1 // SPDX-License-Identifier: GPL-2.0-only
3 * Topcliff PCH DMA controller driver
4 * Copyright (c) 2010 Intel Corporation
5 * Copyright (C) 2011 LAPIS Semiconductor Co., Ltd.
8 #include <linux/dmaengine.h>
9 #include <linux/dma-mapping.h>
10 #include <linux/init.h>
11 #include <linux/pci.h>
12 #include <linux/slab.h>
13 #include <linux/interrupt.h>
14 #include <linux/module.h>
15 #include <linux/pch_dma.h>
17 #include "dmaengine.h"
19 #define DRV_NAME "pch-dma"
21 #define DMA_CTL0_DISABLE 0x0
22 #define DMA_CTL0_SG 0x1
23 #define DMA_CTL0_ONESHOT 0x2
24 #define DMA_CTL0_MODE_MASK_BITS 0x3
25 #define DMA_CTL0_DIR_SHIFT_BITS 2
26 #define DMA_CTL0_BITS_PER_CH 4
28 #define DMA_CTL2_START_SHIFT_BITS 8
29 #define DMA_CTL2_IRQ_ENABLE_MASK ((1UL << DMA_CTL2_START_SHIFT_BITS) - 1)
31 #define DMA_STATUS_IDLE 0x0
32 #define DMA_STATUS_DESC_READ 0x1
33 #define DMA_STATUS_WAIT 0x2
34 #define DMA_STATUS_ACCESS 0x3
35 #define DMA_STATUS_BITS_PER_CH 2
36 #define DMA_STATUS_MASK_BITS 0x3
37 #define DMA_STATUS_SHIFT_BITS 16
38 #define DMA_STATUS_IRQ(x) (0x1 << (x))
39 #define DMA_STATUS0_ERR(x) (0x1 << ((x) + 8))
40 #define DMA_STATUS2_ERR(x) (0x1 << (x))
42 #define DMA_DESC_WIDTH_SHIFT_BITS 12
43 #define DMA_DESC_WIDTH_1_BYTE (0x3 << DMA_DESC_WIDTH_SHIFT_BITS)
44 #define DMA_DESC_WIDTH_2_BYTES (0x2 << DMA_DESC_WIDTH_SHIFT_BITS)
45 #define DMA_DESC_WIDTH_4_BYTES (0x0 << DMA_DESC_WIDTH_SHIFT_BITS)
46 #define DMA_DESC_MAX_COUNT_1_BYTE 0x3FF
47 #define DMA_DESC_MAX_COUNT_2_BYTES 0x3FF
48 #define DMA_DESC_MAX_COUNT_4_BYTES 0x7FF
49 #define DMA_DESC_END_WITHOUT_IRQ 0x0
50 #define DMA_DESC_END_WITH_IRQ 0x1
51 #define DMA_DESC_FOLLOW_WITHOUT_IRQ 0x2
52 #define DMA_DESC_FOLLOW_WITH_IRQ 0x3
54 #define MAX_CHAN_NR 12
56 #define DMA_MASK_CTL0_MODE 0x33333333
57 #define DMA_MASK_CTL2_MODE 0x00003333
59 static unsigned int init_nr_desc_per_channel
= 64;
60 module_param(init_nr_desc_per_channel
, uint
, 0644);
61 MODULE_PARM_DESC(init_nr_desc_per_channel
,
62 "initial descriptors per channel (default: 64)");
64 struct pch_dma_desc_regs
{
80 struct pch_dma_desc_regs desc
[MAX_CHAN_NR
];
84 struct pch_dma_desc_regs regs
;
85 struct dma_async_tx_descriptor txd
;
86 struct list_head desc_node
;
87 struct list_head tx_list
;
92 void __iomem
*membase
;
93 enum dma_transfer_direction dir
;
94 struct tasklet_struct tasklet
;
95 unsigned long err_status
;
99 struct list_head active_list
;
100 struct list_head queue
;
101 struct list_head free_list
;
102 unsigned int descs_allocated
;
105 #define PDC_DEV_ADDR 0x00
106 #define PDC_MEM_ADDR 0x04
107 #define PDC_SIZE 0x08
108 #define PDC_NEXT 0x0C
110 #define channel_readl(pdc, name) \
111 readl((pdc)->membase + PDC_##name)
112 #define channel_writel(pdc, name, val) \
113 writel((val), (pdc)->membase + PDC_##name)
116 struct dma_device dma
;
117 void __iomem
*membase
;
118 struct dma_pool
*pool
;
119 struct pch_dma_regs regs
;
120 struct pch_dma_desc_regs ch_regs
[MAX_CHAN_NR
];
121 struct pch_dma_chan channels
[MAX_CHAN_NR
];
124 #define PCH_DMA_CTL0 0x00
125 #define PCH_DMA_CTL1 0x04
126 #define PCH_DMA_CTL2 0x08
127 #define PCH_DMA_CTL3 0x0C
128 #define PCH_DMA_STS0 0x10
129 #define PCH_DMA_STS1 0x14
130 #define PCH_DMA_STS2 0x18
132 #define dma_readl(pd, name) \
133 readl((pd)->membase + PCH_DMA_##name)
134 #define dma_writel(pd, name, val) \
135 writel((val), (pd)->membase + PCH_DMA_##name)
138 struct pch_dma_desc
*to_pd_desc(struct dma_async_tx_descriptor
*txd
)
140 return container_of(txd
, struct pch_dma_desc
, txd
);
143 static inline struct pch_dma_chan
*to_pd_chan(struct dma_chan
*chan
)
145 return container_of(chan
, struct pch_dma_chan
, chan
);
148 static inline struct pch_dma
*to_pd(struct dma_device
*ddev
)
150 return container_of(ddev
, struct pch_dma
, dma
);
153 static inline struct device
*chan2dev(struct dma_chan
*chan
)
155 return &chan
->dev
->device
;
158 static inline struct device
*chan2parent(struct dma_chan
*chan
)
160 return chan
->dev
->device
.parent
;
164 struct pch_dma_desc
*pdc_first_active(struct pch_dma_chan
*pd_chan
)
166 return list_first_entry(&pd_chan
->active_list
,
167 struct pch_dma_desc
, desc_node
);
171 struct pch_dma_desc
*pdc_first_queued(struct pch_dma_chan
*pd_chan
)
173 return list_first_entry(&pd_chan
->queue
,
174 struct pch_dma_desc
, desc_node
);
177 static void pdc_enable_irq(struct dma_chan
*chan
, int enable
)
179 struct pch_dma
*pd
= to_pd(chan
->device
);
183 if (chan
->chan_id
< 8)
186 pos
= chan
->chan_id
+ 8;
188 val
= dma_readl(pd
, CTL2
);
193 val
&= ~(0x1 << pos
);
195 dma_writel(pd
, CTL2
, val
);
197 dev_dbg(chan2dev(chan
), "pdc_enable_irq: chan %d -> %x\n",
201 static void pdc_set_dir(struct dma_chan
*chan
)
203 struct pch_dma_chan
*pd_chan
= to_pd_chan(chan
);
204 struct pch_dma
*pd
= to_pd(chan
->device
);
209 if (chan
->chan_id
< 8) {
210 val
= dma_readl(pd
, CTL0
);
212 mask_mode
= DMA_CTL0_MODE_MASK_BITS
<<
213 (DMA_CTL0_BITS_PER_CH
* chan
->chan_id
);
214 mask_ctl
= DMA_MASK_CTL0_MODE
& ~(DMA_CTL0_MODE_MASK_BITS
<<
215 (DMA_CTL0_BITS_PER_CH
* chan
->chan_id
));
217 if (pd_chan
->dir
== DMA_MEM_TO_DEV
)
218 val
|= 0x1 << (DMA_CTL0_BITS_PER_CH
* chan
->chan_id
+
219 DMA_CTL0_DIR_SHIFT_BITS
);
221 val
&= ~(0x1 << (DMA_CTL0_BITS_PER_CH
* chan
->chan_id
+
222 DMA_CTL0_DIR_SHIFT_BITS
));
225 dma_writel(pd
, CTL0
, val
);
227 int ch
= chan
->chan_id
- 8; /* ch8-->0 ch9-->1 ... ch11->3 */
228 val
= dma_readl(pd
, CTL3
);
230 mask_mode
= DMA_CTL0_MODE_MASK_BITS
<<
231 (DMA_CTL0_BITS_PER_CH
* ch
);
232 mask_ctl
= DMA_MASK_CTL2_MODE
& ~(DMA_CTL0_MODE_MASK_BITS
<<
233 (DMA_CTL0_BITS_PER_CH
* ch
));
235 if (pd_chan
->dir
== DMA_MEM_TO_DEV
)
236 val
|= 0x1 << (DMA_CTL0_BITS_PER_CH
* ch
+
237 DMA_CTL0_DIR_SHIFT_BITS
);
239 val
&= ~(0x1 << (DMA_CTL0_BITS_PER_CH
* ch
+
240 DMA_CTL0_DIR_SHIFT_BITS
));
242 dma_writel(pd
, CTL3
, val
);
245 dev_dbg(chan2dev(chan
), "pdc_set_dir: chan %d -> %x\n",
249 static void pdc_set_mode(struct dma_chan
*chan
, u32 mode
)
251 struct pch_dma
*pd
= to_pd(chan
->device
);
256 if (chan
->chan_id
< 8) {
257 mask_ctl
= DMA_MASK_CTL0_MODE
& ~(DMA_CTL0_MODE_MASK_BITS
<<
258 (DMA_CTL0_BITS_PER_CH
* chan
->chan_id
));
259 mask_dir
= 1 << (DMA_CTL0_BITS_PER_CH
* chan
->chan_id
+\
260 DMA_CTL0_DIR_SHIFT_BITS
);
261 val
= dma_readl(pd
, CTL0
);
263 val
|= mode
<< (DMA_CTL0_BITS_PER_CH
* chan
->chan_id
);
265 dma_writel(pd
, CTL0
, val
);
267 int ch
= chan
->chan_id
- 8; /* ch8-->0 ch9-->1 ... ch11->3 */
268 mask_ctl
= DMA_MASK_CTL2_MODE
& ~(DMA_CTL0_MODE_MASK_BITS
<<
269 (DMA_CTL0_BITS_PER_CH
* ch
));
270 mask_dir
= 1 << (DMA_CTL0_BITS_PER_CH
* ch
+\
271 DMA_CTL0_DIR_SHIFT_BITS
);
272 val
= dma_readl(pd
, CTL3
);
274 val
|= mode
<< (DMA_CTL0_BITS_PER_CH
* ch
);
276 dma_writel(pd
, CTL3
, val
);
279 dev_dbg(chan2dev(chan
), "pdc_set_mode: chan %d -> %x\n",
283 static u32
pdc_get_status0(struct pch_dma_chan
*pd_chan
)
285 struct pch_dma
*pd
= to_pd(pd_chan
->chan
.device
);
288 val
= dma_readl(pd
, STS0
);
289 return DMA_STATUS_MASK_BITS
& (val
>> (DMA_STATUS_SHIFT_BITS
+
290 DMA_STATUS_BITS_PER_CH
* pd_chan
->chan
.chan_id
));
293 static u32
pdc_get_status2(struct pch_dma_chan
*pd_chan
)
295 struct pch_dma
*pd
= to_pd(pd_chan
->chan
.device
);
298 val
= dma_readl(pd
, STS2
);
299 return DMA_STATUS_MASK_BITS
& (val
>> (DMA_STATUS_SHIFT_BITS
+
300 DMA_STATUS_BITS_PER_CH
* (pd_chan
->chan
.chan_id
- 8)));
303 static bool pdc_is_idle(struct pch_dma_chan
*pd_chan
)
307 if (pd_chan
->chan
.chan_id
< 8)
308 sts
= pdc_get_status0(pd_chan
);
310 sts
= pdc_get_status2(pd_chan
);
313 if (sts
== DMA_STATUS_IDLE
)
319 static void pdc_dostart(struct pch_dma_chan
*pd_chan
, struct pch_dma_desc
* desc
)
321 if (!pdc_is_idle(pd_chan
)) {
322 dev_err(chan2dev(&pd_chan
->chan
),
323 "BUG: Attempt to start non-idle channel\n");
327 dev_dbg(chan2dev(&pd_chan
->chan
), "chan %d -> dev_addr: %x\n",
328 pd_chan
->chan
.chan_id
, desc
->regs
.dev_addr
);
329 dev_dbg(chan2dev(&pd_chan
->chan
), "chan %d -> mem_addr: %x\n",
330 pd_chan
->chan
.chan_id
, desc
->regs
.mem_addr
);
331 dev_dbg(chan2dev(&pd_chan
->chan
), "chan %d -> size: %x\n",
332 pd_chan
->chan
.chan_id
, desc
->regs
.size
);
333 dev_dbg(chan2dev(&pd_chan
->chan
), "chan %d -> next: %x\n",
334 pd_chan
->chan
.chan_id
, desc
->regs
.next
);
336 if (list_empty(&desc
->tx_list
)) {
337 channel_writel(pd_chan
, DEV_ADDR
, desc
->regs
.dev_addr
);
338 channel_writel(pd_chan
, MEM_ADDR
, desc
->regs
.mem_addr
);
339 channel_writel(pd_chan
, SIZE
, desc
->regs
.size
);
340 channel_writel(pd_chan
, NEXT
, desc
->regs
.next
);
341 pdc_set_mode(&pd_chan
->chan
, DMA_CTL0_ONESHOT
);
343 channel_writel(pd_chan
, NEXT
, desc
->txd
.phys
);
344 pdc_set_mode(&pd_chan
->chan
, DMA_CTL0_SG
);
348 static void pdc_chain_complete(struct pch_dma_chan
*pd_chan
,
349 struct pch_dma_desc
*desc
)
351 struct dma_async_tx_descriptor
*txd
= &desc
->txd
;
352 struct dmaengine_desc_callback cb
;
354 dmaengine_desc_get_callback(txd
, &cb
);
355 list_splice_init(&desc
->tx_list
, &pd_chan
->free_list
);
356 list_move(&desc
->desc_node
, &pd_chan
->free_list
);
358 dmaengine_desc_callback_invoke(&cb
, NULL
);
361 static void pdc_complete_all(struct pch_dma_chan
*pd_chan
)
363 struct pch_dma_desc
*desc
, *_d
;
366 BUG_ON(!pdc_is_idle(pd_chan
));
368 if (!list_empty(&pd_chan
->queue
))
369 pdc_dostart(pd_chan
, pdc_first_queued(pd_chan
));
371 list_splice_init(&pd_chan
->active_list
, &list
);
372 list_splice_init(&pd_chan
->queue
, &pd_chan
->active_list
);
374 list_for_each_entry_safe(desc
, _d
, &list
, desc_node
)
375 pdc_chain_complete(pd_chan
, desc
);
378 static void pdc_handle_error(struct pch_dma_chan
*pd_chan
)
380 struct pch_dma_desc
*bad_desc
;
382 bad_desc
= pdc_first_active(pd_chan
);
383 list_del(&bad_desc
->desc_node
);
385 list_splice_init(&pd_chan
->queue
, pd_chan
->active_list
.prev
);
387 if (!list_empty(&pd_chan
->active_list
))
388 pdc_dostart(pd_chan
, pdc_first_active(pd_chan
));
390 dev_crit(chan2dev(&pd_chan
->chan
), "Bad descriptor submitted\n");
391 dev_crit(chan2dev(&pd_chan
->chan
), "descriptor cookie: %d\n",
392 bad_desc
->txd
.cookie
);
394 pdc_chain_complete(pd_chan
, bad_desc
);
397 static void pdc_advance_work(struct pch_dma_chan
*pd_chan
)
399 if (list_empty(&pd_chan
->active_list
) ||
400 list_is_singular(&pd_chan
->active_list
)) {
401 pdc_complete_all(pd_chan
);
403 pdc_chain_complete(pd_chan
, pdc_first_active(pd_chan
));
404 pdc_dostart(pd_chan
, pdc_first_active(pd_chan
));
408 static dma_cookie_t
pd_tx_submit(struct dma_async_tx_descriptor
*txd
)
410 struct pch_dma_desc
*desc
= to_pd_desc(txd
);
411 struct pch_dma_chan
*pd_chan
= to_pd_chan(txd
->chan
);
413 spin_lock(&pd_chan
->lock
);
415 if (list_empty(&pd_chan
->active_list
)) {
416 list_add_tail(&desc
->desc_node
, &pd_chan
->active_list
);
417 pdc_dostart(pd_chan
, desc
);
419 list_add_tail(&desc
->desc_node
, &pd_chan
->queue
);
422 spin_unlock(&pd_chan
->lock
);
426 static struct pch_dma_desc
*pdc_alloc_desc(struct dma_chan
*chan
, gfp_t flags
)
428 struct pch_dma_desc
*desc
= NULL
;
429 struct pch_dma
*pd
= to_pd(chan
->device
);
432 desc
= dma_pool_zalloc(pd
->pool
, flags
, &addr
);
434 INIT_LIST_HEAD(&desc
->tx_list
);
435 dma_async_tx_descriptor_init(&desc
->txd
, chan
);
436 desc
->txd
.tx_submit
= pd_tx_submit
;
437 desc
->txd
.flags
= DMA_CTRL_ACK
;
438 desc
->txd
.phys
= addr
;
444 static struct pch_dma_desc
*pdc_desc_get(struct pch_dma_chan
*pd_chan
)
446 struct pch_dma_desc
*desc
, *_d
;
447 struct pch_dma_desc
*ret
= NULL
;
450 spin_lock(&pd_chan
->lock
);
451 list_for_each_entry_safe(desc
, _d
, &pd_chan
->free_list
, desc_node
) {
453 if (async_tx_test_ack(&desc
->txd
)) {
454 list_del(&desc
->desc_node
);
458 dev_dbg(chan2dev(&pd_chan
->chan
), "desc %p not ACKed\n", desc
);
460 spin_unlock(&pd_chan
->lock
);
461 dev_dbg(chan2dev(&pd_chan
->chan
), "scanned %d descriptors\n", i
);
464 ret
= pdc_alloc_desc(&pd_chan
->chan
, GFP_ATOMIC
);
466 spin_lock(&pd_chan
->lock
);
467 pd_chan
->descs_allocated
++;
468 spin_unlock(&pd_chan
->lock
);
470 dev_err(chan2dev(&pd_chan
->chan
),
471 "failed to alloc desc\n");
478 static void pdc_desc_put(struct pch_dma_chan
*pd_chan
,
479 struct pch_dma_desc
*desc
)
482 spin_lock(&pd_chan
->lock
);
483 list_splice_init(&desc
->tx_list
, &pd_chan
->free_list
);
484 list_add(&desc
->desc_node
, &pd_chan
->free_list
);
485 spin_unlock(&pd_chan
->lock
);
489 static int pd_alloc_chan_resources(struct dma_chan
*chan
)
491 struct pch_dma_chan
*pd_chan
= to_pd_chan(chan
);
492 struct pch_dma_desc
*desc
;
496 if (!pdc_is_idle(pd_chan
)) {
497 dev_dbg(chan2dev(chan
), "DMA channel not idle ?\n");
501 if (!list_empty(&pd_chan
->free_list
))
502 return pd_chan
->descs_allocated
;
504 for (i
= 0; i
< init_nr_desc_per_channel
; i
++) {
505 desc
= pdc_alloc_desc(chan
, GFP_KERNEL
);
508 dev_warn(chan2dev(chan
),
509 "Only allocated %d initial descriptors\n", i
);
513 list_add_tail(&desc
->desc_node
, &tmp_list
);
516 spin_lock_irq(&pd_chan
->lock
);
517 list_splice(&tmp_list
, &pd_chan
->free_list
);
518 pd_chan
->descs_allocated
= i
;
519 dma_cookie_init(chan
);
520 spin_unlock_irq(&pd_chan
->lock
);
522 pdc_enable_irq(chan
, 1);
524 return pd_chan
->descs_allocated
;
527 static void pd_free_chan_resources(struct dma_chan
*chan
)
529 struct pch_dma_chan
*pd_chan
= to_pd_chan(chan
);
530 struct pch_dma
*pd
= to_pd(chan
->device
);
531 struct pch_dma_desc
*desc
, *_d
;
534 BUG_ON(!pdc_is_idle(pd_chan
));
535 BUG_ON(!list_empty(&pd_chan
->active_list
));
536 BUG_ON(!list_empty(&pd_chan
->queue
));
538 spin_lock_irq(&pd_chan
->lock
);
539 list_splice_init(&pd_chan
->free_list
, &tmp_list
);
540 pd_chan
->descs_allocated
= 0;
541 spin_unlock_irq(&pd_chan
->lock
);
543 list_for_each_entry_safe(desc
, _d
, &tmp_list
, desc_node
)
544 dma_pool_free(pd
->pool
, desc
, desc
->txd
.phys
);
546 pdc_enable_irq(chan
, 0);
549 static enum dma_status
pd_tx_status(struct dma_chan
*chan
, dma_cookie_t cookie
,
550 struct dma_tx_state
*txstate
)
552 return dma_cookie_status(chan
, cookie
, txstate
);
555 static void pd_issue_pending(struct dma_chan
*chan
)
557 struct pch_dma_chan
*pd_chan
= to_pd_chan(chan
);
559 if (pdc_is_idle(pd_chan
)) {
560 spin_lock(&pd_chan
->lock
);
561 pdc_advance_work(pd_chan
);
562 spin_unlock(&pd_chan
->lock
);
566 static struct dma_async_tx_descriptor
*pd_prep_slave_sg(struct dma_chan
*chan
,
567 struct scatterlist
*sgl
, unsigned int sg_len
,
568 enum dma_transfer_direction direction
, unsigned long flags
,
571 struct pch_dma_chan
*pd_chan
= to_pd_chan(chan
);
572 struct pch_dma_slave
*pd_slave
= chan
->private;
573 struct pch_dma_desc
*first
= NULL
;
574 struct pch_dma_desc
*prev
= NULL
;
575 struct pch_dma_desc
*desc
= NULL
;
576 struct scatterlist
*sg
;
580 if (unlikely(!sg_len
)) {
581 dev_info(chan2dev(chan
), "prep_slave_sg: length is zero!\n");
585 if (direction
== DMA_DEV_TO_MEM
)
586 reg
= pd_slave
->rx_reg
;
587 else if (direction
== DMA_MEM_TO_DEV
)
588 reg
= pd_slave
->tx_reg
;
592 pd_chan
->dir
= direction
;
595 for_each_sg(sgl
, sg
, sg_len
, i
) {
596 desc
= pdc_desc_get(pd_chan
);
601 desc
->regs
.dev_addr
= reg
;
602 desc
->regs
.mem_addr
= sg_dma_address(sg
);
603 desc
->regs
.size
= sg_dma_len(sg
);
604 desc
->regs
.next
= DMA_DESC_FOLLOW_WITHOUT_IRQ
;
606 switch (pd_slave
->width
) {
607 case PCH_DMA_WIDTH_1_BYTE
:
608 if (desc
->regs
.size
> DMA_DESC_MAX_COUNT_1_BYTE
)
610 desc
->regs
.size
|= DMA_DESC_WIDTH_1_BYTE
;
612 case PCH_DMA_WIDTH_2_BYTES
:
613 if (desc
->regs
.size
> DMA_DESC_MAX_COUNT_2_BYTES
)
615 desc
->regs
.size
|= DMA_DESC_WIDTH_2_BYTES
;
617 case PCH_DMA_WIDTH_4_BYTES
:
618 if (desc
->regs
.size
> DMA_DESC_MAX_COUNT_4_BYTES
)
620 desc
->regs
.size
|= DMA_DESC_WIDTH_4_BYTES
;
629 prev
->regs
.next
|= desc
->txd
.phys
;
630 list_add_tail(&desc
->desc_node
, &first
->tx_list
);
636 if (flags
& DMA_PREP_INTERRUPT
)
637 desc
->regs
.next
= DMA_DESC_END_WITH_IRQ
;
639 desc
->regs
.next
= DMA_DESC_END_WITHOUT_IRQ
;
641 first
->txd
.cookie
= -EBUSY
;
642 desc
->txd
.flags
= flags
;
647 dev_err(chan2dev(chan
), "failed to get desc or wrong parameters\n");
648 pdc_desc_put(pd_chan
, first
);
652 static int pd_device_terminate_all(struct dma_chan
*chan
)
654 struct pch_dma_chan
*pd_chan
= to_pd_chan(chan
);
655 struct pch_dma_desc
*desc
, *_d
;
658 spin_lock_irq(&pd_chan
->lock
);
660 pdc_set_mode(&pd_chan
->chan
, DMA_CTL0_DISABLE
);
662 list_splice_init(&pd_chan
->active_list
, &list
);
663 list_splice_init(&pd_chan
->queue
, &list
);
665 list_for_each_entry_safe(desc
, _d
, &list
, desc_node
)
666 pdc_chain_complete(pd_chan
, desc
);
668 spin_unlock_irq(&pd_chan
->lock
);
673 static void pdc_tasklet(unsigned long data
)
675 struct pch_dma_chan
*pd_chan
= (struct pch_dma_chan
*)data
;
678 if (!pdc_is_idle(pd_chan
)) {
679 dev_err(chan2dev(&pd_chan
->chan
),
680 "BUG: handle non-idle channel in tasklet\n");
684 spin_lock_irqsave(&pd_chan
->lock
, flags
);
685 if (test_and_clear_bit(0, &pd_chan
->err_status
))
686 pdc_handle_error(pd_chan
);
688 pdc_advance_work(pd_chan
);
689 spin_unlock_irqrestore(&pd_chan
->lock
, flags
);
692 static irqreturn_t
pd_irq(int irq
, void *devid
)
694 struct pch_dma
*pd
= (struct pch_dma
*)devid
;
695 struct pch_dma_chan
*pd_chan
;
702 sts0
= dma_readl(pd
, STS0
);
703 sts2
= dma_readl(pd
, STS2
);
705 dev_dbg(pd
->dma
.dev
, "pd_irq sts0: %x\n", sts0
);
707 for (i
= 0; i
< pd
->dma
.chancnt
; i
++) {
708 pd_chan
= &pd
->channels
[i
];
711 if (sts0
& DMA_STATUS_IRQ(i
)) {
712 if (sts0
& DMA_STATUS0_ERR(i
))
713 set_bit(0, &pd_chan
->err_status
);
715 tasklet_schedule(&pd_chan
->tasklet
);
719 if (sts2
& DMA_STATUS_IRQ(i
- 8)) {
720 if (sts2
& DMA_STATUS2_ERR(i
))
721 set_bit(0, &pd_chan
->err_status
);
723 tasklet_schedule(&pd_chan
->tasklet
);
729 /* clear interrupt bits in status register */
731 dma_writel(pd
, STS0
, sts0
);
733 dma_writel(pd
, STS2
, sts2
);
739 static void pch_dma_save_regs(struct pch_dma
*pd
)
741 struct pch_dma_chan
*pd_chan
;
742 struct dma_chan
*chan
, *_c
;
745 pd
->regs
.dma_ctl0
= dma_readl(pd
, CTL0
);
746 pd
->regs
.dma_ctl1
= dma_readl(pd
, CTL1
);
747 pd
->regs
.dma_ctl2
= dma_readl(pd
, CTL2
);
748 pd
->regs
.dma_ctl3
= dma_readl(pd
, CTL3
);
750 list_for_each_entry_safe(chan
, _c
, &pd
->dma
.channels
, device_node
) {
751 pd_chan
= to_pd_chan(chan
);
753 pd
->ch_regs
[i
].dev_addr
= channel_readl(pd_chan
, DEV_ADDR
);
754 pd
->ch_regs
[i
].mem_addr
= channel_readl(pd_chan
, MEM_ADDR
);
755 pd
->ch_regs
[i
].size
= channel_readl(pd_chan
, SIZE
);
756 pd
->ch_regs
[i
].next
= channel_readl(pd_chan
, NEXT
);
762 static void pch_dma_restore_regs(struct pch_dma
*pd
)
764 struct pch_dma_chan
*pd_chan
;
765 struct dma_chan
*chan
, *_c
;
768 dma_writel(pd
, CTL0
, pd
->regs
.dma_ctl0
);
769 dma_writel(pd
, CTL1
, pd
->regs
.dma_ctl1
);
770 dma_writel(pd
, CTL2
, pd
->regs
.dma_ctl2
);
771 dma_writel(pd
, CTL3
, pd
->regs
.dma_ctl3
);
773 list_for_each_entry_safe(chan
, _c
, &pd
->dma
.channels
, device_node
) {
774 pd_chan
= to_pd_chan(chan
);
776 channel_writel(pd_chan
, DEV_ADDR
, pd
->ch_regs
[i
].dev_addr
);
777 channel_writel(pd_chan
, MEM_ADDR
, pd
->ch_regs
[i
].mem_addr
);
778 channel_writel(pd_chan
, SIZE
, pd
->ch_regs
[i
].size
);
779 channel_writel(pd_chan
, NEXT
, pd
->ch_regs
[i
].next
);
785 static int pch_dma_suspend(struct pci_dev
*pdev
, pm_message_t state
)
787 struct pch_dma
*pd
= pci_get_drvdata(pdev
);
790 pch_dma_save_regs(pd
);
792 pci_save_state(pdev
);
793 pci_disable_device(pdev
);
794 pci_set_power_state(pdev
, pci_choose_state(pdev
, state
));
799 static int pch_dma_resume(struct pci_dev
*pdev
)
801 struct pch_dma
*pd
= pci_get_drvdata(pdev
);
804 pci_set_power_state(pdev
, PCI_D0
);
805 pci_restore_state(pdev
);
807 err
= pci_enable_device(pdev
);
809 dev_dbg(&pdev
->dev
, "failed to enable device\n");
814 pch_dma_restore_regs(pd
);
820 static int pch_dma_probe(struct pci_dev
*pdev
,
821 const struct pci_device_id
*id
)
824 struct pch_dma_regs
*regs
;
825 unsigned int nr_channels
;
829 nr_channels
= id
->driver_data
;
830 pd
= kzalloc(sizeof(*pd
), GFP_KERNEL
);
834 pci_set_drvdata(pdev
, pd
);
836 err
= pci_enable_device(pdev
);
838 dev_err(&pdev
->dev
, "Cannot enable PCI device\n");
842 if (!(pci_resource_flags(pdev
, 1) & IORESOURCE_MEM
)) {
843 dev_err(&pdev
->dev
, "Cannot find proper base address\n");
845 goto err_disable_pdev
;
848 err
= pci_request_regions(pdev
, DRV_NAME
);
850 dev_err(&pdev
->dev
, "Cannot obtain PCI resources\n");
851 goto err_disable_pdev
;
854 err
= pci_set_dma_mask(pdev
, DMA_BIT_MASK(32));
856 dev_err(&pdev
->dev
, "Cannot set proper DMA config\n");
860 regs
= pd
->membase
= pci_iomap(pdev
, 1, 0);
862 dev_err(&pdev
->dev
, "Cannot map MMIO registers\n");
867 pci_set_master(pdev
);
869 err
= request_irq(pdev
->irq
, pd_irq
, IRQF_SHARED
, DRV_NAME
, pd
);
871 dev_err(&pdev
->dev
, "Failed to request IRQ\n");
875 pd
->pool
= dma_pool_create("pch_dma_desc_pool", &pdev
->dev
,
876 sizeof(struct pch_dma_desc
), 4, 0);
878 dev_err(&pdev
->dev
, "Failed to alloc DMA descriptors\n");
883 pd
->dma
.dev
= &pdev
->dev
;
885 INIT_LIST_HEAD(&pd
->dma
.channels
);
887 for (i
= 0; i
< nr_channels
; i
++) {
888 struct pch_dma_chan
*pd_chan
= &pd
->channels
[i
];
890 pd_chan
->chan
.device
= &pd
->dma
;
891 dma_cookie_init(&pd_chan
->chan
);
893 pd_chan
->membase
= ®s
->desc
[i
];
895 spin_lock_init(&pd_chan
->lock
);
897 INIT_LIST_HEAD(&pd_chan
->active_list
);
898 INIT_LIST_HEAD(&pd_chan
->queue
);
899 INIT_LIST_HEAD(&pd_chan
->free_list
);
901 tasklet_init(&pd_chan
->tasklet
, pdc_tasklet
,
902 (unsigned long)pd_chan
);
903 list_add_tail(&pd_chan
->chan
.device_node
, &pd
->dma
.channels
);
906 dma_cap_zero(pd
->dma
.cap_mask
);
907 dma_cap_set(DMA_PRIVATE
, pd
->dma
.cap_mask
);
908 dma_cap_set(DMA_SLAVE
, pd
->dma
.cap_mask
);
910 pd
->dma
.device_alloc_chan_resources
= pd_alloc_chan_resources
;
911 pd
->dma
.device_free_chan_resources
= pd_free_chan_resources
;
912 pd
->dma
.device_tx_status
= pd_tx_status
;
913 pd
->dma
.device_issue_pending
= pd_issue_pending
;
914 pd
->dma
.device_prep_slave_sg
= pd_prep_slave_sg
;
915 pd
->dma
.device_terminate_all
= pd_device_terminate_all
;
917 err
= dma_async_device_register(&pd
->dma
);
919 dev_err(&pdev
->dev
, "Failed to register DMA device\n");
926 dma_pool_destroy(pd
->pool
);
928 free_irq(pdev
->irq
, pd
);
930 pci_iounmap(pdev
, pd
->membase
);
932 pci_release_regions(pdev
);
934 pci_disable_device(pdev
);
940 static void pch_dma_remove(struct pci_dev
*pdev
)
942 struct pch_dma
*pd
= pci_get_drvdata(pdev
);
943 struct pch_dma_chan
*pd_chan
;
944 struct dma_chan
*chan
, *_c
;
947 dma_async_device_unregister(&pd
->dma
);
949 free_irq(pdev
->irq
, pd
);
951 list_for_each_entry_safe(chan
, _c
, &pd
->dma
.channels
,
953 pd_chan
= to_pd_chan(chan
);
955 tasklet_kill(&pd_chan
->tasklet
);
958 dma_pool_destroy(pd
->pool
);
959 pci_iounmap(pdev
, pd
->membase
);
960 pci_release_regions(pdev
);
961 pci_disable_device(pdev
);
966 /* PCI Device ID of DMA device */
967 #define PCI_DEVICE_ID_EG20T_PCH_DMA_8CH 0x8810
968 #define PCI_DEVICE_ID_EG20T_PCH_DMA_4CH 0x8815
969 #define PCI_DEVICE_ID_ML7213_DMA1_8CH 0x8026
970 #define PCI_DEVICE_ID_ML7213_DMA2_8CH 0x802B
971 #define PCI_DEVICE_ID_ML7213_DMA3_4CH 0x8034
972 #define PCI_DEVICE_ID_ML7213_DMA4_12CH 0x8032
973 #define PCI_DEVICE_ID_ML7223_DMA1_4CH 0x800B
974 #define PCI_DEVICE_ID_ML7223_DMA2_4CH 0x800E
975 #define PCI_DEVICE_ID_ML7223_DMA3_4CH 0x8017
976 #define PCI_DEVICE_ID_ML7223_DMA4_4CH 0x803B
977 #define PCI_DEVICE_ID_ML7831_DMA1_8CH 0x8810
978 #define PCI_DEVICE_ID_ML7831_DMA2_4CH 0x8815
980 static const struct pci_device_id pch_dma_id_table
[] = {
981 { PCI_VDEVICE(INTEL
, PCI_DEVICE_ID_EG20T_PCH_DMA_8CH
), 8 },
982 { PCI_VDEVICE(INTEL
, PCI_DEVICE_ID_EG20T_PCH_DMA_4CH
), 4 },
983 { PCI_VDEVICE(ROHM
, PCI_DEVICE_ID_ML7213_DMA1_8CH
), 8}, /* UART Video */
984 { PCI_VDEVICE(ROHM
, PCI_DEVICE_ID_ML7213_DMA2_8CH
), 8}, /* PCMIF SPI */
985 { PCI_VDEVICE(ROHM
, PCI_DEVICE_ID_ML7213_DMA3_4CH
), 4}, /* FPGA */
986 { PCI_VDEVICE(ROHM
, PCI_DEVICE_ID_ML7213_DMA4_12CH
), 12}, /* I2S */
987 { PCI_VDEVICE(ROHM
, PCI_DEVICE_ID_ML7223_DMA1_4CH
), 4}, /* UART */
988 { PCI_VDEVICE(ROHM
, PCI_DEVICE_ID_ML7223_DMA2_4CH
), 4}, /* Video SPI */
989 { PCI_VDEVICE(ROHM
, PCI_DEVICE_ID_ML7223_DMA3_4CH
), 4}, /* Security */
990 { PCI_VDEVICE(ROHM
, PCI_DEVICE_ID_ML7223_DMA4_4CH
), 4}, /* FPGA */
991 { PCI_VDEVICE(ROHM
, PCI_DEVICE_ID_ML7831_DMA1_8CH
), 8}, /* UART */
992 { PCI_VDEVICE(ROHM
, PCI_DEVICE_ID_ML7831_DMA2_4CH
), 4}, /* SPI */
996 static struct pci_driver pch_dma_driver
= {
998 .id_table
= pch_dma_id_table
,
999 .probe
= pch_dma_probe
,
1000 .remove
= pch_dma_remove
,
1002 .suspend
= pch_dma_suspend
,
1003 .resume
= pch_dma_resume
,
1007 module_pci_driver(pch_dma_driver
);
1009 MODULE_DESCRIPTION("Intel EG20T PCH / LAPIS Semicon ML7213/ML7223/ML7831 IOH "
1010 "DMA controller driver");
1011 MODULE_AUTHOR("Yong Wang <yong.y.wang@intel.com>");
1012 MODULE_LICENSE("GPL v2");
1013 MODULE_DEVICE_TABLE(pci
, pch_dma_id_table
);