1 // SPDX-License-Identifier: GPL-2.0-only
3 * Topcliff PCH DMA controller driver
4 * Copyright (c) 2010 Intel Corporation
5 * Copyright (C) 2011 LAPIS Semiconductor Co., Ltd.
8 #include <linux/dmaengine.h>
9 #include <linux/dma-mapping.h>
10 #include <linux/init.h>
11 #include <linux/pci.h>
12 #include <linux/slab.h>
13 #include <linux/interrupt.h>
14 #include <linux/module.h>
15 #include <linux/pch_dma.h>
17 #include "dmaengine.h"
19 #define DRV_NAME "pch-dma"
21 #define DMA_CTL0_DISABLE 0x0
22 #define DMA_CTL0_SG 0x1
23 #define DMA_CTL0_ONESHOT 0x2
24 #define DMA_CTL0_MODE_MASK_BITS 0x3
25 #define DMA_CTL0_DIR_SHIFT_BITS 2
26 #define DMA_CTL0_BITS_PER_CH 4
28 #define DMA_CTL2_START_SHIFT_BITS 8
29 #define DMA_CTL2_IRQ_ENABLE_MASK ((1UL << DMA_CTL2_START_SHIFT_BITS) - 1)
31 #define DMA_STATUS_IDLE 0x0
32 #define DMA_STATUS_DESC_READ 0x1
33 #define DMA_STATUS_WAIT 0x2
34 #define DMA_STATUS_ACCESS 0x3
35 #define DMA_STATUS_BITS_PER_CH 2
36 #define DMA_STATUS_MASK_BITS 0x3
37 #define DMA_STATUS_SHIFT_BITS 16
38 #define DMA_STATUS_IRQ(x) (0x1 << (x))
39 #define DMA_STATUS0_ERR(x) (0x1 << ((x) + 8))
40 #define DMA_STATUS2_ERR(x) (0x1 << (x))
42 #define DMA_DESC_WIDTH_SHIFT_BITS 12
43 #define DMA_DESC_WIDTH_1_BYTE (0x3 << DMA_DESC_WIDTH_SHIFT_BITS)
44 #define DMA_DESC_WIDTH_2_BYTES (0x2 << DMA_DESC_WIDTH_SHIFT_BITS)
45 #define DMA_DESC_WIDTH_4_BYTES (0x0 << DMA_DESC_WIDTH_SHIFT_BITS)
46 #define DMA_DESC_MAX_COUNT_1_BYTE 0x3FF
47 #define DMA_DESC_MAX_COUNT_2_BYTES 0x3FF
48 #define DMA_DESC_MAX_COUNT_4_BYTES 0x7FF
49 #define DMA_DESC_END_WITHOUT_IRQ 0x0
50 #define DMA_DESC_END_WITH_IRQ 0x1
51 #define DMA_DESC_FOLLOW_WITHOUT_IRQ 0x2
52 #define DMA_DESC_FOLLOW_WITH_IRQ 0x3
54 #define MAX_CHAN_NR 12
56 #define DMA_MASK_CTL0_MODE 0x33333333
57 #define DMA_MASK_CTL2_MODE 0x00003333
59 static unsigned int init_nr_desc_per_channel
= 64;
60 module_param(init_nr_desc_per_channel
, uint
, 0644);
61 MODULE_PARM_DESC(init_nr_desc_per_channel
,
62 "initial descriptors per channel (default: 64)");
64 struct pch_dma_desc_regs
{
80 struct pch_dma_desc_regs desc
[MAX_CHAN_NR
];
84 struct pch_dma_desc_regs regs
;
85 struct dma_async_tx_descriptor txd
;
86 struct list_head desc_node
;
87 struct list_head tx_list
;
92 void __iomem
*membase
;
93 enum dma_transfer_direction dir
;
94 struct tasklet_struct tasklet
;
95 unsigned long err_status
;
99 struct list_head active_list
;
100 struct list_head queue
;
101 struct list_head free_list
;
102 unsigned int descs_allocated
;
105 #define PDC_DEV_ADDR 0x00
106 #define PDC_MEM_ADDR 0x04
107 #define PDC_SIZE 0x08
108 #define PDC_NEXT 0x0C
110 #define channel_readl(pdc, name) \
111 readl((pdc)->membase + PDC_##name)
112 #define channel_writel(pdc, name, val) \
113 writel((val), (pdc)->membase + PDC_##name)
116 struct dma_device dma
;
117 void __iomem
*membase
;
118 struct dma_pool
*pool
;
119 struct pch_dma_regs regs
;
120 struct pch_dma_desc_regs ch_regs
[MAX_CHAN_NR
];
121 struct pch_dma_chan channels
[MAX_CHAN_NR
];
124 #define PCH_DMA_CTL0 0x00
125 #define PCH_DMA_CTL1 0x04
126 #define PCH_DMA_CTL2 0x08
127 #define PCH_DMA_CTL3 0x0C
128 #define PCH_DMA_STS0 0x10
129 #define PCH_DMA_STS1 0x14
130 #define PCH_DMA_STS2 0x18
132 #define dma_readl(pd, name) \
133 readl((pd)->membase + PCH_DMA_##name)
134 #define dma_writel(pd, name, val) \
135 writel((val), (pd)->membase + PCH_DMA_##name)
138 struct pch_dma_desc
*to_pd_desc(struct dma_async_tx_descriptor
*txd
)
140 return container_of(txd
, struct pch_dma_desc
, txd
);
143 static inline struct pch_dma_chan
*to_pd_chan(struct dma_chan
*chan
)
145 return container_of(chan
, struct pch_dma_chan
, chan
);
148 static inline struct pch_dma
*to_pd(struct dma_device
*ddev
)
150 return container_of(ddev
, struct pch_dma
, dma
);
153 static inline struct device
*chan2dev(struct dma_chan
*chan
)
155 return &chan
->dev
->device
;
159 struct pch_dma_desc
*pdc_first_active(struct pch_dma_chan
*pd_chan
)
161 return list_first_entry(&pd_chan
->active_list
,
162 struct pch_dma_desc
, desc_node
);
166 struct pch_dma_desc
*pdc_first_queued(struct pch_dma_chan
*pd_chan
)
168 return list_first_entry(&pd_chan
->queue
,
169 struct pch_dma_desc
, desc_node
);
172 static void pdc_enable_irq(struct dma_chan
*chan
, int enable
)
174 struct pch_dma
*pd
= to_pd(chan
->device
);
178 if (chan
->chan_id
< 8)
181 pos
= chan
->chan_id
+ 8;
183 val
= dma_readl(pd
, CTL2
);
188 val
&= ~(0x1 << pos
);
190 dma_writel(pd
, CTL2
, val
);
192 dev_dbg(chan2dev(chan
), "pdc_enable_irq: chan %d -> %x\n",
196 static void pdc_set_dir(struct dma_chan
*chan
)
198 struct pch_dma_chan
*pd_chan
= to_pd_chan(chan
);
199 struct pch_dma
*pd
= to_pd(chan
->device
);
204 if (chan
->chan_id
< 8) {
205 val
= dma_readl(pd
, CTL0
);
207 mask_mode
= DMA_CTL0_MODE_MASK_BITS
<<
208 (DMA_CTL0_BITS_PER_CH
* chan
->chan_id
);
209 mask_ctl
= DMA_MASK_CTL0_MODE
& ~(DMA_CTL0_MODE_MASK_BITS
<<
210 (DMA_CTL0_BITS_PER_CH
* chan
->chan_id
));
212 if (pd_chan
->dir
== DMA_MEM_TO_DEV
)
213 val
|= 0x1 << (DMA_CTL0_BITS_PER_CH
* chan
->chan_id
+
214 DMA_CTL0_DIR_SHIFT_BITS
);
216 val
&= ~(0x1 << (DMA_CTL0_BITS_PER_CH
* chan
->chan_id
+
217 DMA_CTL0_DIR_SHIFT_BITS
));
220 dma_writel(pd
, CTL0
, val
);
222 int ch
= chan
->chan_id
- 8; /* ch8-->0 ch9-->1 ... ch11->3 */
223 val
= dma_readl(pd
, CTL3
);
225 mask_mode
= DMA_CTL0_MODE_MASK_BITS
<<
226 (DMA_CTL0_BITS_PER_CH
* ch
);
227 mask_ctl
= DMA_MASK_CTL2_MODE
& ~(DMA_CTL0_MODE_MASK_BITS
<<
228 (DMA_CTL0_BITS_PER_CH
* ch
));
230 if (pd_chan
->dir
== DMA_MEM_TO_DEV
)
231 val
|= 0x1 << (DMA_CTL0_BITS_PER_CH
* ch
+
232 DMA_CTL0_DIR_SHIFT_BITS
);
234 val
&= ~(0x1 << (DMA_CTL0_BITS_PER_CH
* ch
+
235 DMA_CTL0_DIR_SHIFT_BITS
));
237 dma_writel(pd
, CTL3
, val
);
240 dev_dbg(chan2dev(chan
), "pdc_set_dir: chan %d -> %x\n",
244 static void pdc_set_mode(struct dma_chan
*chan
, u32 mode
)
246 struct pch_dma
*pd
= to_pd(chan
->device
);
251 if (chan
->chan_id
< 8) {
252 mask_ctl
= DMA_MASK_CTL0_MODE
& ~(DMA_CTL0_MODE_MASK_BITS
<<
253 (DMA_CTL0_BITS_PER_CH
* chan
->chan_id
));
254 mask_dir
= 1 << (DMA_CTL0_BITS_PER_CH
* chan
->chan_id
+\
255 DMA_CTL0_DIR_SHIFT_BITS
);
256 val
= dma_readl(pd
, CTL0
);
258 val
|= mode
<< (DMA_CTL0_BITS_PER_CH
* chan
->chan_id
);
260 dma_writel(pd
, CTL0
, val
);
262 int ch
= chan
->chan_id
- 8; /* ch8-->0 ch9-->1 ... ch11->3 */
263 mask_ctl
= DMA_MASK_CTL2_MODE
& ~(DMA_CTL0_MODE_MASK_BITS
<<
264 (DMA_CTL0_BITS_PER_CH
* ch
));
265 mask_dir
= 1 << (DMA_CTL0_BITS_PER_CH
* ch
+\
266 DMA_CTL0_DIR_SHIFT_BITS
);
267 val
= dma_readl(pd
, CTL3
);
269 val
|= mode
<< (DMA_CTL0_BITS_PER_CH
* ch
);
271 dma_writel(pd
, CTL3
, val
);
274 dev_dbg(chan2dev(chan
), "pdc_set_mode: chan %d -> %x\n",
278 static u32
pdc_get_status0(struct pch_dma_chan
*pd_chan
)
280 struct pch_dma
*pd
= to_pd(pd_chan
->chan
.device
);
283 val
= dma_readl(pd
, STS0
);
284 return DMA_STATUS_MASK_BITS
& (val
>> (DMA_STATUS_SHIFT_BITS
+
285 DMA_STATUS_BITS_PER_CH
* pd_chan
->chan
.chan_id
));
288 static u32
pdc_get_status2(struct pch_dma_chan
*pd_chan
)
290 struct pch_dma
*pd
= to_pd(pd_chan
->chan
.device
);
293 val
= dma_readl(pd
, STS2
);
294 return DMA_STATUS_MASK_BITS
& (val
>> (DMA_STATUS_SHIFT_BITS
+
295 DMA_STATUS_BITS_PER_CH
* (pd_chan
->chan
.chan_id
- 8)));
298 static bool pdc_is_idle(struct pch_dma_chan
*pd_chan
)
302 if (pd_chan
->chan
.chan_id
< 8)
303 sts
= pdc_get_status0(pd_chan
);
305 sts
= pdc_get_status2(pd_chan
);
308 if (sts
== DMA_STATUS_IDLE
)
314 static void pdc_dostart(struct pch_dma_chan
*pd_chan
, struct pch_dma_desc
* desc
)
316 if (!pdc_is_idle(pd_chan
)) {
317 dev_err(chan2dev(&pd_chan
->chan
),
318 "BUG: Attempt to start non-idle channel\n");
322 dev_dbg(chan2dev(&pd_chan
->chan
), "chan %d -> dev_addr: %x\n",
323 pd_chan
->chan
.chan_id
, desc
->regs
.dev_addr
);
324 dev_dbg(chan2dev(&pd_chan
->chan
), "chan %d -> mem_addr: %x\n",
325 pd_chan
->chan
.chan_id
, desc
->regs
.mem_addr
);
326 dev_dbg(chan2dev(&pd_chan
->chan
), "chan %d -> size: %x\n",
327 pd_chan
->chan
.chan_id
, desc
->regs
.size
);
328 dev_dbg(chan2dev(&pd_chan
->chan
), "chan %d -> next: %x\n",
329 pd_chan
->chan
.chan_id
, desc
->regs
.next
);
331 if (list_empty(&desc
->tx_list
)) {
332 channel_writel(pd_chan
, DEV_ADDR
, desc
->regs
.dev_addr
);
333 channel_writel(pd_chan
, MEM_ADDR
, desc
->regs
.mem_addr
);
334 channel_writel(pd_chan
, SIZE
, desc
->regs
.size
);
335 channel_writel(pd_chan
, NEXT
, desc
->regs
.next
);
336 pdc_set_mode(&pd_chan
->chan
, DMA_CTL0_ONESHOT
);
338 channel_writel(pd_chan
, NEXT
, desc
->txd
.phys
);
339 pdc_set_mode(&pd_chan
->chan
, DMA_CTL0_SG
);
343 static void pdc_chain_complete(struct pch_dma_chan
*pd_chan
,
344 struct pch_dma_desc
*desc
)
346 struct dma_async_tx_descriptor
*txd
= &desc
->txd
;
347 struct dmaengine_desc_callback cb
;
349 dmaengine_desc_get_callback(txd
, &cb
);
350 list_splice_init(&desc
->tx_list
, &pd_chan
->free_list
);
351 list_move(&desc
->desc_node
, &pd_chan
->free_list
);
353 dmaengine_desc_callback_invoke(&cb
, NULL
);
356 static void pdc_complete_all(struct pch_dma_chan
*pd_chan
)
358 struct pch_dma_desc
*desc
, *_d
;
361 BUG_ON(!pdc_is_idle(pd_chan
));
363 if (!list_empty(&pd_chan
->queue
))
364 pdc_dostart(pd_chan
, pdc_first_queued(pd_chan
));
366 list_splice_init(&pd_chan
->active_list
, &list
);
367 list_splice_init(&pd_chan
->queue
, &pd_chan
->active_list
);
369 list_for_each_entry_safe(desc
, _d
, &list
, desc_node
)
370 pdc_chain_complete(pd_chan
, desc
);
373 static void pdc_handle_error(struct pch_dma_chan
*pd_chan
)
375 struct pch_dma_desc
*bad_desc
;
377 bad_desc
= pdc_first_active(pd_chan
);
378 list_del(&bad_desc
->desc_node
);
380 list_splice_init(&pd_chan
->queue
, pd_chan
->active_list
.prev
);
382 if (!list_empty(&pd_chan
->active_list
))
383 pdc_dostart(pd_chan
, pdc_first_active(pd_chan
));
385 dev_crit(chan2dev(&pd_chan
->chan
), "Bad descriptor submitted\n");
386 dev_crit(chan2dev(&pd_chan
->chan
), "descriptor cookie: %d\n",
387 bad_desc
->txd
.cookie
);
389 pdc_chain_complete(pd_chan
, bad_desc
);
392 static void pdc_advance_work(struct pch_dma_chan
*pd_chan
)
394 if (list_empty(&pd_chan
->active_list
) ||
395 list_is_singular(&pd_chan
->active_list
)) {
396 pdc_complete_all(pd_chan
);
398 pdc_chain_complete(pd_chan
, pdc_first_active(pd_chan
));
399 pdc_dostart(pd_chan
, pdc_first_active(pd_chan
));
403 static dma_cookie_t
pd_tx_submit(struct dma_async_tx_descriptor
*txd
)
405 struct pch_dma_desc
*desc
= to_pd_desc(txd
);
406 struct pch_dma_chan
*pd_chan
= to_pd_chan(txd
->chan
);
408 spin_lock(&pd_chan
->lock
);
410 if (list_empty(&pd_chan
->active_list
)) {
411 list_add_tail(&desc
->desc_node
, &pd_chan
->active_list
);
412 pdc_dostart(pd_chan
, desc
);
414 list_add_tail(&desc
->desc_node
, &pd_chan
->queue
);
417 spin_unlock(&pd_chan
->lock
);
421 static struct pch_dma_desc
*pdc_alloc_desc(struct dma_chan
*chan
, gfp_t flags
)
423 struct pch_dma_desc
*desc
= NULL
;
424 struct pch_dma
*pd
= to_pd(chan
->device
);
427 desc
= dma_pool_zalloc(pd
->pool
, flags
, &addr
);
429 INIT_LIST_HEAD(&desc
->tx_list
);
430 dma_async_tx_descriptor_init(&desc
->txd
, chan
);
431 desc
->txd
.tx_submit
= pd_tx_submit
;
432 desc
->txd
.flags
= DMA_CTRL_ACK
;
433 desc
->txd
.phys
= addr
;
439 static struct pch_dma_desc
*pdc_desc_get(struct pch_dma_chan
*pd_chan
)
441 struct pch_dma_desc
*desc
, *_d
;
442 struct pch_dma_desc
*ret
= NULL
;
445 spin_lock(&pd_chan
->lock
);
446 list_for_each_entry_safe(desc
, _d
, &pd_chan
->free_list
, desc_node
) {
448 if (async_tx_test_ack(&desc
->txd
)) {
449 list_del(&desc
->desc_node
);
453 dev_dbg(chan2dev(&pd_chan
->chan
), "desc %p not ACKed\n", desc
);
455 spin_unlock(&pd_chan
->lock
);
456 dev_dbg(chan2dev(&pd_chan
->chan
), "scanned %d descriptors\n", i
);
459 ret
= pdc_alloc_desc(&pd_chan
->chan
, GFP_ATOMIC
);
461 spin_lock(&pd_chan
->lock
);
462 pd_chan
->descs_allocated
++;
463 spin_unlock(&pd_chan
->lock
);
465 dev_err(chan2dev(&pd_chan
->chan
),
466 "failed to alloc desc\n");
473 static void pdc_desc_put(struct pch_dma_chan
*pd_chan
,
474 struct pch_dma_desc
*desc
)
477 spin_lock(&pd_chan
->lock
);
478 list_splice_init(&desc
->tx_list
, &pd_chan
->free_list
);
479 list_add(&desc
->desc_node
, &pd_chan
->free_list
);
480 spin_unlock(&pd_chan
->lock
);
484 static int pd_alloc_chan_resources(struct dma_chan
*chan
)
486 struct pch_dma_chan
*pd_chan
= to_pd_chan(chan
);
487 struct pch_dma_desc
*desc
;
491 if (!pdc_is_idle(pd_chan
)) {
492 dev_dbg(chan2dev(chan
), "DMA channel not idle ?\n");
496 if (!list_empty(&pd_chan
->free_list
))
497 return pd_chan
->descs_allocated
;
499 for (i
= 0; i
< init_nr_desc_per_channel
; i
++) {
500 desc
= pdc_alloc_desc(chan
, GFP_KERNEL
);
503 dev_warn(chan2dev(chan
),
504 "Only allocated %d initial descriptors\n", i
);
508 list_add_tail(&desc
->desc_node
, &tmp_list
);
511 spin_lock_irq(&pd_chan
->lock
);
512 list_splice(&tmp_list
, &pd_chan
->free_list
);
513 pd_chan
->descs_allocated
= i
;
514 dma_cookie_init(chan
);
515 spin_unlock_irq(&pd_chan
->lock
);
517 pdc_enable_irq(chan
, 1);
519 return pd_chan
->descs_allocated
;
522 static void pd_free_chan_resources(struct dma_chan
*chan
)
524 struct pch_dma_chan
*pd_chan
= to_pd_chan(chan
);
525 struct pch_dma
*pd
= to_pd(chan
->device
);
526 struct pch_dma_desc
*desc
, *_d
;
529 BUG_ON(!pdc_is_idle(pd_chan
));
530 BUG_ON(!list_empty(&pd_chan
->active_list
));
531 BUG_ON(!list_empty(&pd_chan
->queue
));
533 spin_lock_irq(&pd_chan
->lock
);
534 list_splice_init(&pd_chan
->free_list
, &tmp_list
);
535 pd_chan
->descs_allocated
= 0;
536 spin_unlock_irq(&pd_chan
->lock
);
538 list_for_each_entry_safe(desc
, _d
, &tmp_list
, desc_node
)
539 dma_pool_free(pd
->pool
, desc
, desc
->txd
.phys
);
541 pdc_enable_irq(chan
, 0);
544 static enum dma_status
pd_tx_status(struct dma_chan
*chan
, dma_cookie_t cookie
,
545 struct dma_tx_state
*txstate
)
547 return dma_cookie_status(chan
, cookie
, txstate
);
550 static void pd_issue_pending(struct dma_chan
*chan
)
552 struct pch_dma_chan
*pd_chan
= to_pd_chan(chan
);
554 if (pdc_is_idle(pd_chan
)) {
555 spin_lock(&pd_chan
->lock
);
556 pdc_advance_work(pd_chan
);
557 spin_unlock(&pd_chan
->lock
);
561 static struct dma_async_tx_descriptor
*pd_prep_slave_sg(struct dma_chan
*chan
,
562 struct scatterlist
*sgl
, unsigned int sg_len
,
563 enum dma_transfer_direction direction
, unsigned long flags
,
566 struct pch_dma_chan
*pd_chan
= to_pd_chan(chan
);
567 struct pch_dma_slave
*pd_slave
= chan
->private;
568 struct pch_dma_desc
*first
= NULL
;
569 struct pch_dma_desc
*prev
= NULL
;
570 struct pch_dma_desc
*desc
= NULL
;
571 struct scatterlist
*sg
;
575 if (unlikely(!sg_len
)) {
576 dev_info(chan2dev(chan
), "prep_slave_sg: length is zero!\n");
580 if (direction
== DMA_DEV_TO_MEM
)
581 reg
= pd_slave
->rx_reg
;
582 else if (direction
== DMA_MEM_TO_DEV
)
583 reg
= pd_slave
->tx_reg
;
587 pd_chan
->dir
= direction
;
590 for_each_sg(sgl
, sg
, sg_len
, i
) {
591 desc
= pdc_desc_get(pd_chan
);
596 desc
->regs
.dev_addr
= reg
;
597 desc
->regs
.mem_addr
= sg_dma_address(sg
);
598 desc
->regs
.size
= sg_dma_len(sg
);
599 desc
->regs
.next
= DMA_DESC_FOLLOW_WITHOUT_IRQ
;
601 switch (pd_slave
->width
) {
602 case PCH_DMA_WIDTH_1_BYTE
:
603 if (desc
->regs
.size
> DMA_DESC_MAX_COUNT_1_BYTE
)
605 desc
->regs
.size
|= DMA_DESC_WIDTH_1_BYTE
;
607 case PCH_DMA_WIDTH_2_BYTES
:
608 if (desc
->regs
.size
> DMA_DESC_MAX_COUNT_2_BYTES
)
610 desc
->regs
.size
|= DMA_DESC_WIDTH_2_BYTES
;
612 case PCH_DMA_WIDTH_4_BYTES
:
613 if (desc
->regs
.size
> DMA_DESC_MAX_COUNT_4_BYTES
)
615 desc
->regs
.size
|= DMA_DESC_WIDTH_4_BYTES
;
624 prev
->regs
.next
|= desc
->txd
.phys
;
625 list_add_tail(&desc
->desc_node
, &first
->tx_list
);
631 if (flags
& DMA_PREP_INTERRUPT
)
632 desc
->regs
.next
= DMA_DESC_END_WITH_IRQ
;
634 desc
->regs
.next
= DMA_DESC_END_WITHOUT_IRQ
;
636 first
->txd
.cookie
= -EBUSY
;
637 desc
->txd
.flags
= flags
;
642 dev_err(chan2dev(chan
), "failed to get desc or wrong parameters\n");
643 pdc_desc_put(pd_chan
, first
);
647 static int pd_device_terminate_all(struct dma_chan
*chan
)
649 struct pch_dma_chan
*pd_chan
= to_pd_chan(chan
);
650 struct pch_dma_desc
*desc
, *_d
;
653 spin_lock_irq(&pd_chan
->lock
);
655 pdc_set_mode(&pd_chan
->chan
, DMA_CTL0_DISABLE
);
657 list_splice_init(&pd_chan
->active_list
, &list
);
658 list_splice_init(&pd_chan
->queue
, &list
);
660 list_for_each_entry_safe(desc
, _d
, &list
, desc_node
)
661 pdc_chain_complete(pd_chan
, desc
);
663 spin_unlock_irq(&pd_chan
->lock
);
668 static void pdc_tasklet(struct tasklet_struct
*t
)
670 struct pch_dma_chan
*pd_chan
= from_tasklet(pd_chan
, t
, tasklet
);
673 if (!pdc_is_idle(pd_chan
)) {
674 dev_err(chan2dev(&pd_chan
->chan
),
675 "BUG: handle non-idle channel in tasklet\n");
679 spin_lock_irqsave(&pd_chan
->lock
, flags
);
680 if (test_and_clear_bit(0, &pd_chan
->err_status
))
681 pdc_handle_error(pd_chan
);
683 pdc_advance_work(pd_chan
);
684 spin_unlock_irqrestore(&pd_chan
->lock
, flags
);
687 static irqreturn_t
pd_irq(int irq
, void *devid
)
689 struct pch_dma
*pd
= (struct pch_dma
*)devid
;
690 struct pch_dma_chan
*pd_chan
;
697 sts0
= dma_readl(pd
, STS0
);
698 sts2
= dma_readl(pd
, STS2
);
700 dev_dbg(pd
->dma
.dev
, "pd_irq sts0: %x\n", sts0
);
702 for (i
= 0; i
< pd
->dma
.chancnt
; i
++) {
703 pd_chan
= &pd
->channels
[i
];
706 if (sts0
& DMA_STATUS_IRQ(i
)) {
707 if (sts0
& DMA_STATUS0_ERR(i
))
708 set_bit(0, &pd_chan
->err_status
);
710 tasklet_schedule(&pd_chan
->tasklet
);
714 if (sts2
& DMA_STATUS_IRQ(i
- 8)) {
715 if (sts2
& DMA_STATUS2_ERR(i
))
716 set_bit(0, &pd_chan
->err_status
);
718 tasklet_schedule(&pd_chan
->tasklet
);
724 /* clear interrupt bits in status register */
726 dma_writel(pd
, STS0
, sts0
);
728 dma_writel(pd
, STS2
, sts2
);
733 static void __maybe_unused
pch_dma_save_regs(struct pch_dma
*pd
)
735 struct pch_dma_chan
*pd_chan
;
736 struct dma_chan
*chan
, *_c
;
739 pd
->regs
.dma_ctl0
= dma_readl(pd
, CTL0
);
740 pd
->regs
.dma_ctl1
= dma_readl(pd
, CTL1
);
741 pd
->regs
.dma_ctl2
= dma_readl(pd
, CTL2
);
742 pd
->regs
.dma_ctl3
= dma_readl(pd
, CTL3
);
744 list_for_each_entry_safe(chan
, _c
, &pd
->dma
.channels
, device_node
) {
745 pd_chan
= to_pd_chan(chan
);
747 pd
->ch_regs
[i
].dev_addr
= channel_readl(pd_chan
, DEV_ADDR
);
748 pd
->ch_regs
[i
].mem_addr
= channel_readl(pd_chan
, MEM_ADDR
);
749 pd
->ch_regs
[i
].size
= channel_readl(pd_chan
, SIZE
);
750 pd
->ch_regs
[i
].next
= channel_readl(pd_chan
, NEXT
);
756 static void __maybe_unused
pch_dma_restore_regs(struct pch_dma
*pd
)
758 struct pch_dma_chan
*pd_chan
;
759 struct dma_chan
*chan
, *_c
;
762 dma_writel(pd
, CTL0
, pd
->regs
.dma_ctl0
);
763 dma_writel(pd
, CTL1
, pd
->regs
.dma_ctl1
);
764 dma_writel(pd
, CTL2
, pd
->regs
.dma_ctl2
);
765 dma_writel(pd
, CTL3
, pd
->regs
.dma_ctl3
);
767 list_for_each_entry_safe(chan
, _c
, &pd
->dma
.channels
, device_node
) {
768 pd_chan
= to_pd_chan(chan
);
770 channel_writel(pd_chan
, DEV_ADDR
, pd
->ch_regs
[i
].dev_addr
);
771 channel_writel(pd_chan
, MEM_ADDR
, pd
->ch_regs
[i
].mem_addr
);
772 channel_writel(pd_chan
, SIZE
, pd
->ch_regs
[i
].size
);
773 channel_writel(pd_chan
, NEXT
, pd
->ch_regs
[i
].next
);
779 static int __maybe_unused
pch_dma_suspend(struct device
*dev
)
781 struct pch_dma
*pd
= dev_get_drvdata(dev
);
784 pch_dma_save_regs(pd
);
789 static int __maybe_unused
pch_dma_resume(struct device
*dev
)
791 struct pch_dma
*pd
= dev_get_drvdata(dev
);
794 pch_dma_restore_regs(pd
);
799 static int pch_dma_probe(struct pci_dev
*pdev
,
800 const struct pci_device_id
*id
)
803 struct pch_dma_regs
*regs
;
804 unsigned int nr_channels
;
808 nr_channels
= id
->driver_data
;
809 pd
= kzalloc(sizeof(*pd
), GFP_KERNEL
);
813 pci_set_drvdata(pdev
, pd
);
815 err
= pci_enable_device(pdev
);
817 dev_err(&pdev
->dev
, "Cannot enable PCI device\n");
821 if (!(pci_resource_flags(pdev
, 1) & IORESOURCE_MEM
)) {
822 dev_err(&pdev
->dev
, "Cannot find proper base address\n");
824 goto err_disable_pdev
;
827 err
= pci_request_regions(pdev
, DRV_NAME
);
829 dev_err(&pdev
->dev
, "Cannot obtain PCI resources\n");
830 goto err_disable_pdev
;
833 err
= dma_set_mask(&pdev
->dev
, DMA_BIT_MASK(32));
835 dev_err(&pdev
->dev
, "Cannot set proper DMA config\n");
839 regs
= pd
->membase
= pci_iomap(pdev
, 1, 0);
841 dev_err(&pdev
->dev
, "Cannot map MMIO registers\n");
846 pci_set_master(pdev
);
847 pd
->dma
.dev
= &pdev
->dev
;
849 err
= request_irq(pdev
->irq
, pd_irq
, IRQF_SHARED
, DRV_NAME
, pd
);
851 dev_err(&pdev
->dev
, "Failed to request IRQ\n");
855 pd
->pool
= dma_pool_create("pch_dma_desc_pool", &pdev
->dev
,
856 sizeof(struct pch_dma_desc
), 4, 0);
858 dev_err(&pdev
->dev
, "Failed to alloc DMA descriptors\n");
864 INIT_LIST_HEAD(&pd
->dma
.channels
);
866 for (i
= 0; i
< nr_channels
; i
++) {
867 struct pch_dma_chan
*pd_chan
= &pd
->channels
[i
];
869 pd_chan
->chan
.device
= &pd
->dma
;
870 dma_cookie_init(&pd_chan
->chan
);
872 pd_chan
->membase
= ®s
->desc
[i
];
874 spin_lock_init(&pd_chan
->lock
);
876 INIT_LIST_HEAD(&pd_chan
->active_list
);
877 INIT_LIST_HEAD(&pd_chan
->queue
);
878 INIT_LIST_HEAD(&pd_chan
->free_list
);
880 tasklet_setup(&pd_chan
->tasklet
, pdc_tasklet
);
881 list_add_tail(&pd_chan
->chan
.device_node
, &pd
->dma
.channels
);
884 dma_cap_zero(pd
->dma
.cap_mask
);
885 dma_cap_set(DMA_PRIVATE
, pd
->dma
.cap_mask
);
886 dma_cap_set(DMA_SLAVE
, pd
->dma
.cap_mask
);
888 pd
->dma
.device_alloc_chan_resources
= pd_alloc_chan_resources
;
889 pd
->dma
.device_free_chan_resources
= pd_free_chan_resources
;
890 pd
->dma
.device_tx_status
= pd_tx_status
;
891 pd
->dma
.device_issue_pending
= pd_issue_pending
;
892 pd
->dma
.device_prep_slave_sg
= pd_prep_slave_sg
;
893 pd
->dma
.device_terminate_all
= pd_device_terminate_all
;
895 err
= dma_async_device_register(&pd
->dma
);
897 dev_err(&pdev
->dev
, "Failed to register DMA device\n");
904 dma_pool_destroy(pd
->pool
);
906 free_irq(pdev
->irq
, pd
);
908 pci_iounmap(pdev
, pd
->membase
);
910 pci_release_regions(pdev
);
912 pci_disable_device(pdev
);
918 static void pch_dma_remove(struct pci_dev
*pdev
)
920 struct pch_dma
*pd
= pci_get_drvdata(pdev
);
921 struct pch_dma_chan
*pd_chan
;
922 struct dma_chan
*chan
, *_c
;
925 dma_async_device_unregister(&pd
->dma
);
927 free_irq(pdev
->irq
, pd
);
929 list_for_each_entry_safe(chan
, _c
, &pd
->dma
.channels
,
931 pd_chan
= to_pd_chan(chan
);
933 tasklet_kill(&pd_chan
->tasklet
);
936 dma_pool_destroy(pd
->pool
);
937 pci_iounmap(pdev
, pd
->membase
);
938 pci_release_regions(pdev
);
939 pci_disable_device(pdev
);
944 /* PCI Device ID of DMA device */
945 #define PCI_DEVICE_ID_EG20T_PCH_DMA_8CH 0x8810
946 #define PCI_DEVICE_ID_EG20T_PCH_DMA_4CH 0x8815
947 #define PCI_DEVICE_ID_ML7213_DMA1_8CH 0x8026
948 #define PCI_DEVICE_ID_ML7213_DMA2_8CH 0x802B
949 #define PCI_DEVICE_ID_ML7213_DMA3_4CH 0x8034
950 #define PCI_DEVICE_ID_ML7213_DMA4_12CH 0x8032
951 #define PCI_DEVICE_ID_ML7223_DMA1_4CH 0x800B
952 #define PCI_DEVICE_ID_ML7223_DMA2_4CH 0x800E
953 #define PCI_DEVICE_ID_ML7223_DMA3_4CH 0x8017
954 #define PCI_DEVICE_ID_ML7223_DMA4_4CH 0x803B
955 #define PCI_DEVICE_ID_ML7831_DMA1_8CH 0x8810
956 #define PCI_DEVICE_ID_ML7831_DMA2_4CH 0x8815
958 static const struct pci_device_id pch_dma_id_table
[] = {
959 { PCI_VDEVICE(INTEL
, PCI_DEVICE_ID_EG20T_PCH_DMA_8CH
), 8 },
960 { PCI_VDEVICE(INTEL
, PCI_DEVICE_ID_EG20T_PCH_DMA_4CH
), 4 },
961 { PCI_VDEVICE(ROHM
, PCI_DEVICE_ID_ML7213_DMA1_8CH
), 8}, /* UART Video */
962 { PCI_VDEVICE(ROHM
, PCI_DEVICE_ID_ML7213_DMA2_8CH
), 8}, /* PCMIF SPI */
963 { PCI_VDEVICE(ROHM
, PCI_DEVICE_ID_ML7213_DMA3_4CH
), 4}, /* FPGA */
964 { PCI_VDEVICE(ROHM
, PCI_DEVICE_ID_ML7213_DMA4_12CH
), 12}, /* I2S */
965 { PCI_VDEVICE(ROHM
, PCI_DEVICE_ID_ML7223_DMA1_4CH
), 4}, /* UART */
966 { PCI_VDEVICE(ROHM
, PCI_DEVICE_ID_ML7223_DMA2_4CH
), 4}, /* Video SPI */
967 { PCI_VDEVICE(ROHM
, PCI_DEVICE_ID_ML7223_DMA3_4CH
), 4}, /* Security */
968 { PCI_VDEVICE(ROHM
, PCI_DEVICE_ID_ML7223_DMA4_4CH
), 4}, /* FPGA */
969 { PCI_VDEVICE(ROHM
, PCI_DEVICE_ID_ML7831_DMA1_8CH
), 8}, /* UART */
970 { PCI_VDEVICE(ROHM
, PCI_DEVICE_ID_ML7831_DMA2_4CH
), 4}, /* SPI */
974 static SIMPLE_DEV_PM_OPS(pch_dma_pm_ops
, pch_dma_suspend
, pch_dma_resume
);
976 static struct pci_driver pch_dma_driver
= {
978 .id_table
= pch_dma_id_table
,
979 .probe
= pch_dma_probe
,
980 .remove
= pch_dma_remove
,
981 .driver
.pm
= &pch_dma_pm_ops
,
984 module_pci_driver(pch_dma_driver
);
986 MODULE_DESCRIPTION("Intel EG20T PCH / LAPIS Semicon ML7213/ML7223/ML7831 IOH "
987 "DMA controller driver");
988 MODULE_AUTHOR("Yong Wang <yong.y.wang@intel.com>");
989 MODULE_LICENSE("GPL v2");
990 MODULE_DEVICE_TABLE(pci
, pch_dma_id_table
);