1 // SPDX-License-Identifier: GPL-2.0-only
3 * Topcliff PCH DMA controller driver
4 * Copyright (c) 2010 Intel Corporation
5 * Copyright (C) 2011 LAPIS Semiconductor Co., Ltd.
8 #include <linux/dmaengine.h>
9 #include <linux/dma-mapping.h>
10 #include <linux/init.h>
11 #include <linux/pci.h>
12 #include <linux/slab.h>
13 #include <linux/interrupt.h>
14 #include <linux/module.h>
15 #include <linux/pch_dma.h>
17 #include "dmaengine.h"
19 #define DRV_NAME "pch-dma"
21 #define DMA_CTL0_DISABLE 0x0
22 #define DMA_CTL0_SG 0x1
23 #define DMA_CTL0_ONESHOT 0x2
24 #define DMA_CTL0_MODE_MASK_BITS 0x3
25 #define DMA_CTL0_DIR_SHIFT_BITS 2
26 #define DMA_CTL0_BITS_PER_CH 4
28 #define DMA_CTL2_START_SHIFT_BITS 8
29 #define DMA_CTL2_IRQ_ENABLE_MASK ((1UL << DMA_CTL2_START_SHIFT_BITS) - 1)
31 #define DMA_STATUS_IDLE 0x0
32 #define DMA_STATUS_DESC_READ 0x1
33 #define DMA_STATUS_WAIT 0x2
34 #define DMA_STATUS_ACCESS 0x3
35 #define DMA_STATUS_BITS_PER_CH 2
36 #define DMA_STATUS_MASK_BITS 0x3
37 #define DMA_STATUS_SHIFT_BITS 16
38 #define DMA_STATUS_IRQ(x) (0x1 << (x))
39 #define DMA_STATUS0_ERR(x) (0x1 << ((x) + 8))
40 #define DMA_STATUS2_ERR(x) (0x1 << (x))
42 #define DMA_DESC_WIDTH_SHIFT_BITS 12
43 #define DMA_DESC_WIDTH_1_BYTE (0x3 << DMA_DESC_WIDTH_SHIFT_BITS)
44 #define DMA_DESC_WIDTH_2_BYTES (0x2 << DMA_DESC_WIDTH_SHIFT_BITS)
45 #define DMA_DESC_WIDTH_4_BYTES (0x0 << DMA_DESC_WIDTH_SHIFT_BITS)
46 #define DMA_DESC_MAX_COUNT_1_BYTE 0x3FF
47 #define DMA_DESC_MAX_COUNT_2_BYTES 0x3FF
48 #define DMA_DESC_MAX_COUNT_4_BYTES 0x7FF
49 #define DMA_DESC_END_WITHOUT_IRQ 0x0
50 #define DMA_DESC_END_WITH_IRQ 0x1
51 #define DMA_DESC_FOLLOW_WITHOUT_IRQ 0x2
52 #define DMA_DESC_FOLLOW_WITH_IRQ 0x3
54 #define MAX_CHAN_NR 12
56 #define DMA_MASK_CTL0_MODE 0x33333333
57 #define DMA_MASK_CTL2_MODE 0x00003333
59 static unsigned int init_nr_desc_per_channel
= 64;
60 module_param(init_nr_desc_per_channel
, uint
, 0644);
61 MODULE_PARM_DESC(init_nr_desc_per_channel
,
62 "initial descriptors per channel (default: 64)");
64 struct pch_dma_desc_regs
{
80 struct pch_dma_desc_regs desc
[MAX_CHAN_NR
];
84 struct pch_dma_desc_regs regs
;
85 struct dma_async_tx_descriptor txd
;
86 struct list_head desc_node
;
87 struct list_head tx_list
;
92 void __iomem
*membase
;
93 enum dma_transfer_direction dir
;
94 struct tasklet_struct tasklet
;
95 unsigned long err_status
;
99 struct list_head active_list
;
100 struct list_head queue
;
101 struct list_head free_list
;
102 unsigned int descs_allocated
;
105 #define PDC_DEV_ADDR 0x00
106 #define PDC_MEM_ADDR 0x04
107 #define PDC_SIZE 0x08
108 #define PDC_NEXT 0x0C
110 #define channel_readl(pdc, name) \
111 readl((pdc)->membase + PDC_##name)
112 #define channel_writel(pdc, name, val) \
113 writel((val), (pdc)->membase + PDC_##name)
116 struct dma_device dma
;
117 void __iomem
*membase
;
118 struct dma_pool
*pool
;
119 struct pch_dma_regs regs
;
120 struct pch_dma_desc_regs ch_regs
[MAX_CHAN_NR
];
121 struct pch_dma_chan channels
[MAX_CHAN_NR
];
124 #define PCH_DMA_CTL0 0x00
125 #define PCH_DMA_CTL1 0x04
126 #define PCH_DMA_CTL2 0x08
127 #define PCH_DMA_CTL3 0x0C
128 #define PCH_DMA_STS0 0x10
129 #define PCH_DMA_STS1 0x14
130 #define PCH_DMA_STS2 0x18
132 #define dma_readl(pd, name) \
133 readl((pd)->membase + PCH_DMA_##name)
134 #define dma_writel(pd, name, val) \
135 writel((val), (pd)->membase + PCH_DMA_##name)
138 struct pch_dma_desc
*to_pd_desc(struct dma_async_tx_descriptor
*txd
)
140 return container_of(txd
, struct pch_dma_desc
, txd
);
143 static inline struct pch_dma_chan
*to_pd_chan(struct dma_chan
*chan
)
145 return container_of(chan
, struct pch_dma_chan
, chan
);
148 static inline struct pch_dma
*to_pd(struct dma_device
*ddev
)
150 return container_of(ddev
, struct pch_dma
, dma
);
153 static inline struct device
*chan2dev(struct dma_chan
*chan
)
155 return &chan
->dev
->device
;
158 static inline struct device
*chan2parent(struct dma_chan
*chan
)
160 return chan
->dev
->device
.parent
;
164 struct pch_dma_desc
*pdc_first_active(struct pch_dma_chan
*pd_chan
)
166 return list_first_entry(&pd_chan
->active_list
,
167 struct pch_dma_desc
, desc_node
);
171 struct pch_dma_desc
*pdc_first_queued(struct pch_dma_chan
*pd_chan
)
173 return list_first_entry(&pd_chan
->queue
,
174 struct pch_dma_desc
, desc_node
);
177 static void pdc_enable_irq(struct dma_chan
*chan
, int enable
)
179 struct pch_dma
*pd
= to_pd(chan
->device
);
183 if (chan
->chan_id
< 8)
186 pos
= chan
->chan_id
+ 8;
188 val
= dma_readl(pd
, CTL2
);
193 val
&= ~(0x1 << pos
);
195 dma_writel(pd
, CTL2
, val
);
197 dev_dbg(chan2dev(chan
), "pdc_enable_irq: chan %d -> %x\n",
201 static void pdc_set_dir(struct dma_chan
*chan
)
203 struct pch_dma_chan
*pd_chan
= to_pd_chan(chan
);
204 struct pch_dma
*pd
= to_pd(chan
->device
);
209 if (chan
->chan_id
< 8) {
210 val
= dma_readl(pd
, CTL0
);
212 mask_mode
= DMA_CTL0_MODE_MASK_BITS
<<
213 (DMA_CTL0_BITS_PER_CH
* chan
->chan_id
);
214 mask_ctl
= DMA_MASK_CTL0_MODE
& ~(DMA_CTL0_MODE_MASK_BITS
<<
215 (DMA_CTL0_BITS_PER_CH
* chan
->chan_id
));
217 if (pd_chan
->dir
== DMA_MEM_TO_DEV
)
218 val
|= 0x1 << (DMA_CTL0_BITS_PER_CH
* chan
->chan_id
+
219 DMA_CTL0_DIR_SHIFT_BITS
);
221 val
&= ~(0x1 << (DMA_CTL0_BITS_PER_CH
* chan
->chan_id
+
222 DMA_CTL0_DIR_SHIFT_BITS
));
225 dma_writel(pd
, CTL0
, val
);
227 int ch
= chan
->chan_id
- 8; /* ch8-->0 ch9-->1 ... ch11->3 */
228 val
= dma_readl(pd
, CTL3
);
230 mask_mode
= DMA_CTL0_MODE_MASK_BITS
<<
231 (DMA_CTL0_BITS_PER_CH
* ch
);
232 mask_ctl
= DMA_MASK_CTL2_MODE
& ~(DMA_CTL0_MODE_MASK_BITS
<<
233 (DMA_CTL0_BITS_PER_CH
* ch
));
235 if (pd_chan
->dir
== DMA_MEM_TO_DEV
)
236 val
|= 0x1 << (DMA_CTL0_BITS_PER_CH
* ch
+
237 DMA_CTL0_DIR_SHIFT_BITS
);
239 val
&= ~(0x1 << (DMA_CTL0_BITS_PER_CH
* ch
+
240 DMA_CTL0_DIR_SHIFT_BITS
));
242 dma_writel(pd
, CTL3
, val
);
245 dev_dbg(chan2dev(chan
), "pdc_set_dir: chan %d -> %x\n",
249 static void pdc_set_mode(struct dma_chan
*chan
, u32 mode
)
251 struct pch_dma
*pd
= to_pd(chan
->device
);
256 if (chan
->chan_id
< 8) {
257 mask_ctl
= DMA_MASK_CTL0_MODE
& ~(DMA_CTL0_MODE_MASK_BITS
<<
258 (DMA_CTL0_BITS_PER_CH
* chan
->chan_id
));
259 mask_dir
= 1 << (DMA_CTL0_BITS_PER_CH
* chan
->chan_id
+\
260 DMA_CTL0_DIR_SHIFT_BITS
);
261 val
= dma_readl(pd
, CTL0
);
263 val
|= mode
<< (DMA_CTL0_BITS_PER_CH
* chan
->chan_id
);
265 dma_writel(pd
, CTL0
, val
);
267 int ch
= chan
->chan_id
- 8; /* ch8-->0 ch9-->1 ... ch11->3 */
268 mask_ctl
= DMA_MASK_CTL2_MODE
& ~(DMA_CTL0_MODE_MASK_BITS
<<
269 (DMA_CTL0_BITS_PER_CH
* ch
));
270 mask_dir
= 1 << (DMA_CTL0_BITS_PER_CH
* ch
+\
271 DMA_CTL0_DIR_SHIFT_BITS
);
272 val
= dma_readl(pd
, CTL3
);
274 val
|= mode
<< (DMA_CTL0_BITS_PER_CH
* ch
);
276 dma_writel(pd
, CTL3
, val
);
279 dev_dbg(chan2dev(chan
), "pdc_set_mode: chan %d -> %x\n",
283 static u32
pdc_get_status0(struct pch_dma_chan
*pd_chan
)
285 struct pch_dma
*pd
= to_pd(pd_chan
->chan
.device
);
288 val
= dma_readl(pd
, STS0
);
289 return DMA_STATUS_MASK_BITS
& (val
>> (DMA_STATUS_SHIFT_BITS
+
290 DMA_STATUS_BITS_PER_CH
* pd_chan
->chan
.chan_id
));
293 static u32
pdc_get_status2(struct pch_dma_chan
*pd_chan
)
295 struct pch_dma
*pd
= to_pd(pd_chan
->chan
.device
);
298 val
= dma_readl(pd
, STS2
);
299 return DMA_STATUS_MASK_BITS
& (val
>> (DMA_STATUS_SHIFT_BITS
+
300 DMA_STATUS_BITS_PER_CH
* (pd_chan
->chan
.chan_id
- 8)));
303 static bool pdc_is_idle(struct pch_dma_chan
*pd_chan
)
307 if (pd_chan
->chan
.chan_id
< 8)
308 sts
= pdc_get_status0(pd_chan
);
310 sts
= pdc_get_status2(pd_chan
);
313 if (sts
== DMA_STATUS_IDLE
)
319 static void pdc_dostart(struct pch_dma_chan
*pd_chan
, struct pch_dma_desc
* desc
)
321 if (!pdc_is_idle(pd_chan
)) {
322 dev_err(chan2dev(&pd_chan
->chan
),
323 "BUG: Attempt to start non-idle channel\n");
327 dev_dbg(chan2dev(&pd_chan
->chan
), "chan %d -> dev_addr: %x\n",
328 pd_chan
->chan
.chan_id
, desc
->regs
.dev_addr
);
329 dev_dbg(chan2dev(&pd_chan
->chan
), "chan %d -> mem_addr: %x\n",
330 pd_chan
->chan
.chan_id
, desc
->regs
.mem_addr
);
331 dev_dbg(chan2dev(&pd_chan
->chan
), "chan %d -> size: %x\n",
332 pd_chan
->chan
.chan_id
, desc
->regs
.size
);
333 dev_dbg(chan2dev(&pd_chan
->chan
), "chan %d -> next: %x\n",
334 pd_chan
->chan
.chan_id
, desc
->regs
.next
);
336 if (list_empty(&desc
->tx_list
)) {
337 channel_writel(pd_chan
, DEV_ADDR
, desc
->regs
.dev_addr
);
338 channel_writel(pd_chan
, MEM_ADDR
, desc
->regs
.mem_addr
);
339 channel_writel(pd_chan
, SIZE
, desc
->regs
.size
);
340 channel_writel(pd_chan
, NEXT
, desc
->regs
.next
);
341 pdc_set_mode(&pd_chan
->chan
, DMA_CTL0_ONESHOT
);
343 channel_writel(pd_chan
, NEXT
, desc
->txd
.phys
);
344 pdc_set_mode(&pd_chan
->chan
, DMA_CTL0_SG
);
348 static void pdc_chain_complete(struct pch_dma_chan
*pd_chan
,
349 struct pch_dma_desc
*desc
)
351 struct dma_async_tx_descriptor
*txd
= &desc
->txd
;
352 struct dmaengine_desc_callback cb
;
354 dmaengine_desc_get_callback(txd
, &cb
);
355 list_splice_init(&desc
->tx_list
, &pd_chan
->free_list
);
356 list_move(&desc
->desc_node
, &pd_chan
->free_list
);
358 dmaengine_desc_callback_invoke(&cb
, NULL
);
361 static void pdc_complete_all(struct pch_dma_chan
*pd_chan
)
363 struct pch_dma_desc
*desc
, *_d
;
366 BUG_ON(!pdc_is_idle(pd_chan
));
368 if (!list_empty(&pd_chan
->queue
))
369 pdc_dostart(pd_chan
, pdc_first_queued(pd_chan
));
371 list_splice_init(&pd_chan
->active_list
, &list
);
372 list_splice_init(&pd_chan
->queue
, &pd_chan
->active_list
);
374 list_for_each_entry_safe(desc
, _d
, &list
, desc_node
)
375 pdc_chain_complete(pd_chan
, desc
);
378 static void pdc_handle_error(struct pch_dma_chan
*pd_chan
)
380 struct pch_dma_desc
*bad_desc
;
382 bad_desc
= pdc_first_active(pd_chan
);
383 list_del(&bad_desc
->desc_node
);
385 list_splice_init(&pd_chan
->queue
, pd_chan
->active_list
.prev
);
387 if (!list_empty(&pd_chan
->active_list
))
388 pdc_dostart(pd_chan
, pdc_first_active(pd_chan
));
390 dev_crit(chan2dev(&pd_chan
->chan
), "Bad descriptor submitted\n");
391 dev_crit(chan2dev(&pd_chan
->chan
), "descriptor cookie: %d\n",
392 bad_desc
->txd
.cookie
);
394 pdc_chain_complete(pd_chan
, bad_desc
);
397 static void pdc_advance_work(struct pch_dma_chan
*pd_chan
)
399 if (list_empty(&pd_chan
->active_list
) ||
400 list_is_singular(&pd_chan
->active_list
)) {
401 pdc_complete_all(pd_chan
);
403 pdc_chain_complete(pd_chan
, pdc_first_active(pd_chan
));
404 pdc_dostart(pd_chan
, pdc_first_active(pd_chan
));
408 static dma_cookie_t
pd_tx_submit(struct dma_async_tx_descriptor
*txd
)
410 struct pch_dma_desc
*desc
= to_pd_desc(txd
);
411 struct pch_dma_chan
*pd_chan
= to_pd_chan(txd
->chan
);
413 spin_lock(&pd_chan
->lock
);
415 if (list_empty(&pd_chan
->active_list
)) {
416 list_add_tail(&desc
->desc_node
, &pd_chan
->active_list
);
417 pdc_dostart(pd_chan
, desc
);
419 list_add_tail(&desc
->desc_node
, &pd_chan
->queue
);
422 spin_unlock(&pd_chan
->lock
);
426 static struct pch_dma_desc
*pdc_alloc_desc(struct dma_chan
*chan
, gfp_t flags
)
428 struct pch_dma_desc
*desc
= NULL
;
429 struct pch_dma
*pd
= to_pd(chan
->device
);
432 desc
= dma_pool_zalloc(pd
->pool
, flags
, &addr
);
434 INIT_LIST_HEAD(&desc
->tx_list
);
435 dma_async_tx_descriptor_init(&desc
->txd
, chan
);
436 desc
->txd
.tx_submit
= pd_tx_submit
;
437 desc
->txd
.flags
= DMA_CTRL_ACK
;
438 desc
->txd
.phys
= addr
;
444 static struct pch_dma_desc
*pdc_desc_get(struct pch_dma_chan
*pd_chan
)
446 struct pch_dma_desc
*desc
, *_d
;
447 struct pch_dma_desc
*ret
= NULL
;
450 spin_lock(&pd_chan
->lock
);
451 list_for_each_entry_safe(desc
, _d
, &pd_chan
->free_list
, desc_node
) {
453 if (async_tx_test_ack(&desc
->txd
)) {
454 list_del(&desc
->desc_node
);
458 dev_dbg(chan2dev(&pd_chan
->chan
), "desc %p not ACKed\n", desc
);
460 spin_unlock(&pd_chan
->lock
);
461 dev_dbg(chan2dev(&pd_chan
->chan
), "scanned %d descriptors\n", i
);
464 ret
= pdc_alloc_desc(&pd_chan
->chan
, GFP_ATOMIC
);
466 spin_lock(&pd_chan
->lock
);
467 pd_chan
->descs_allocated
++;
468 spin_unlock(&pd_chan
->lock
);
470 dev_err(chan2dev(&pd_chan
->chan
),
471 "failed to alloc desc\n");
478 static void pdc_desc_put(struct pch_dma_chan
*pd_chan
,
479 struct pch_dma_desc
*desc
)
482 spin_lock(&pd_chan
->lock
);
483 list_splice_init(&desc
->tx_list
, &pd_chan
->free_list
);
484 list_add(&desc
->desc_node
, &pd_chan
->free_list
);
485 spin_unlock(&pd_chan
->lock
);
489 static int pd_alloc_chan_resources(struct dma_chan
*chan
)
491 struct pch_dma_chan
*pd_chan
= to_pd_chan(chan
);
492 struct pch_dma_desc
*desc
;
496 if (!pdc_is_idle(pd_chan
)) {
497 dev_dbg(chan2dev(chan
), "DMA channel not idle ?\n");
501 if (!list_empty(&pd_chan
->free_list
))
502 return pd_chan
->descs_allocated
;
504 for (i
= 0; i
< init_nr_desc_per_channel
; i
++) {
505 desc
= pdc_alloc_desc(chan
, GFP_KERNEL
);
508 dev_warn(chan2dev(chan
),
509 "Only allocated %d initial descriptors\n", i
);
513 list_add_tail(&desc
->desc_node
, &tmp_list
);
516 spin_lock_irq(&pd_chan
->lock
);
517 list_splice(&tmp_list
, &pd_chan
->free_list
);
518 pd_chan
->descs_allocated
= i
;
519 dma_cookie_init(chan
);
520 spin_unlock_irq(&pd_chan
->lock
);
522 pdc_enable_irq(chan
, 1);
524 return pd_chan
->descs_allocated
;
527 static void pd_free_chan_resources(struct dma_chan
*chan
)
529 struct pch_dma_chan
*pd_chan
= to_pd_chan(chan
);
530 struct pch_dma
*pd
= to_pd(chan
->device
);
531 struct pch_dma_desc
*desc
, *_d
;
534 BUG_ON(!pdc_is_idle(pd_chan
));
535 BUG_ON(!list_empty(&pd_chan
->active_list
));
536 BUG_ON(!list_empty(&pd_chan
->queue
));
538 spin_lock_irq(&pd_chan
->lock
);
539 list_splice_init(&pd_chan
->free_list
, &tmp_list
);
540 pd_chan
->descs_allocated
= 0;
541 spin_unlock_irq(&pd_chan
->lock
);
543 list_for_each_entry_safe(desc
, _d
, &tmp_list
, desc_node
)
544 dma_pool_free(pd
->pool
, desc
, desc
->txd
.phys
);
546 pdc_enable_irq(chan
, 0);
549 static enum dma_status
pd_tx_status(struct dma_chan
*chan
, dma_cookie_t cookie
,
550 struct dma_tx_state
*txstate
)
552 return dma_cookie_status(chan
, cookie
, txstate
);
555 static void pd_issue_pending(struct dma_chan
*chan
)
557 struct pch_dma_chan
*pd_chan
= to_pd_chan(chan
);
559 if (pdc_is_idle(pd_chan
)) {
560 spin_lock(&pd_chan
->lock
);
561 pdc_advance_work(pd_chan
);
562 spin_unlock(&pd_chan
->lock
);
566 static struct dma_async_tx_descriptor
*pd_prep_slave_sg(struct dma_chan
*chan
,
567 struct scatterlist
*sgl
, unsigned int sg_len
,
568 enum dma_transfer_direction direction
, unsigned long flags
,
571 struct pch_dma_chan
*pd_chan
= to_pd_chan(chan
);
572 struct pch_dma_slave
*pd_slave
= chan
->private;
573 struct pch_dma_desc
*first
= NULL
;
574 struct pch_dma_desc
*prev
= NULL
;
575 struct pch_dma_desc
*desc
= NULL
;
576 struct scatterlist
*sg
;
580 if (unlikely(!sg_len
)) {
581 dev_info(chan2dev(chan
), "prep_slave_sg: length is zero!\n");
585 if (direction
== DMA_DEV_TO_MEM
)
586 reg
= pd_slave
->rx_reg
;
587 else if (direction
== DMA_MEM_TO_DEV
)
588 reg
= pd_slave
->tx_reg
;
592 pd_chan
->dir
= direction
;
595 for_each_sg(sgl
, sg
, sg_len
, i
) {
596 desc
= pdc_desc_get(pd_chan
);
601 desc
->regs
.dev_addr
= reg
;
602 desc
->regs
.mem_addr
= sg_dma_address(sg
);
603 desc
->regs
.size
= sg_dma_len(sg
);
604 desc
->regs
.next
= DMA_DESC_FOLLOW_WITHOUT_IRQ
;
606 switch (pd_slave
->width
) {
607 case PCH_DMA_WIDTH_1_BYTE
:
608 if (desc
->regs
.size
> DMA_DESC_MAX_COUNT_1_BYTE
)
610 desc
->regs
.size
|= DMA_DESC_WIDTH_1_BYTE
;
612 case PCH_DMA_WIDTH_2_BYTES
:
613 if (desc
->regs
.size
> DMA_DESC_MAX_COUNT_2_BYTES
)
615 desc
->regs
.size
|= DMA_DESC_WIDTH_2_BYTES
;
617 case PCH_DMA_WIDTH_4_BYTES
:
618 if (desc
->regs
.size
> DMA_DESC_MAX_COUNT_4_BYTES
)
620 desc
->regs
.size
|= DMA_DESC_WIDTH_4_BYTES
;
629 prev
->regs
.next
|= desc
->txd
.phys
;
630 list_add_tail(&desc
->desc_node
, &first
->tx_list
);
636 if (flags
& DMA_PREP_INTERRUPT
)
637 desc
->regs
.next
= DMA_DESC_END_WITH_IRQ
;
639 desc
->regs
.next
= DMA_DESC_END_WITHOUT_IRQ
;
641 first
->txd
.cookie
= -EBUSY
;
642 desc
->txd
.flags
= flags
;
647 dev_err(chan2dev(chan
), "failed to get desc or wrong parameters\n");
648 pdc_desc_put(pd_chan
, first
);
652 static int pd_device_terminate_all(struct dma_chan
*chan
)
654 struct pch_dma_chan
*pd_chan
= to_pd_chan(chan
);
655 struct pch_dma_desc
*desc
, *_d
;
658 spin_lock_irq(&pd_chan
->lock
);
660 pdc_set_mode(&pd_chan
->chan
, DMA_CTL0_DISABLE
);
662 list_splice_init(&pd_chan
->active_list
, &list
);
663 list_splice_init(&pd_chan
->queue
, &list
);
665 list_for_each_entry_safe(desc
, _d
, &list
, desc_node
)
666 pdc_chain_complete(pd_chan
, desc
);
668 spin_unlock_irq(&pd_chan
->lock
);
673 static void pdc_tasklet(struct tasklet_struct
*t
)
675 struct pch_dma_chan
*pd_chan
= from_tasklet(pd_chan
, t
, tasklet
);
678 if (!pdc_is_idle(pd_chan
)) {
679 dev_err(chan2dev(&pd_chan
->chan
),
680 "BUG: handle non-idle channel in tasklet\n");
684 spin_lock_irqsave(&pd_chan
->lock
, flags
);
685 if (test_and_clear_bit(0, &pd_chan
->err_status
))
686 pdc_handle_error(pd_chan
);
688 pdc_advance_work(pd_chan
);
689 spin_unlock_irqrestore(&pd_chan
->lock
, flags
);
692 static irqreturn_t
pd_irq(int irq
, void *devid
)
694 struct pch_dma
*pd
= (struct pch_dma
*)devid
;
695 struct pch_dma_chan
*pd_chan
;
702 sts0
= dma_readl(pd
, STS0
);
703 sts2
= dma_readl(pd
, STS2
);
705 dev_dbg(pd
->dma
.dev
, "pd_irq sts0: %x\n", sts0
);
707 for (i
= 0; i
< pd
->dma
.chancnt
; i
++) {
708 pd_chan
= &pd
->channels
[i
];
711 if (sts0
& DMA_STATUS_IRQ(i
)) {
712 if (sts0
& DMA_STATUS0_ERR(i
))
713 set_bit(0, &pd_chan
->err_status
);
715 tasklet_schedule(&pd_chan
->tasklet
);
719 if (sts2
& DMA_STATUS_IRQ(i
- 8)) {
720 if (sts2
& DMA_STATUS2_ERR(i
))
721 set_bit(0, &pd_chan
->err_status
);
723 tasklet_schedule(&pd_chan
->tasklet
);
729 /* clear interrupt bits in status register */
731 dma_writel(pd
, STS0
, sts0
);
733 dma_writel(pd
, STS2
, sts2
);
738 static void __maybe_unused
pch_dma_save_regs(struct pch_dma
*pd
)
740 struct pch_dma_chan
*pd_chan
;
741 struct dma_chan
*chan
, *_c
;
744 pd
->regs
.dma_ctl0
= dma_readl(pd
, CTL0
);
745 pd
->regs
.dma_ctl1
= dma_readl(pd
, CTL1
);
746 pd
->regs
.dma_ctl2
= dma_readl(pd
, CTL2
);
747 pd
->regs
.dma_ctl3
= dma_readl(pd
, CTL3
);
749 list_for_each_entry_safe(chan
, _c
, &pd
->dma
.channels
, device_node
) {
750 pd_chan
= to_pd_chan(chan
);
752 pd
->ch_regs
[i
].dev_addr
= channel_readl(pd_chan
, DEV_ADDR
);
753 pd
->ch_regs
[i
].mem_addr
= channel_readl(pd_chan
, MEM_ADDR
);
754 pd
->ch_regs
[i
].size
= channel_readl(pd_chan
, SIZE
);
755 pd
->ch_regs
[i
].next
= channel_readl(pd_chan
, NEXT
);
761 static void __maybe_unused
pch_dma_restore_regs(struct pch_dma
*pd
)
763 struct pch_dma_chan
*pd_chan
;
764 struct dma_chan
*chan
, *_c
;
767 dma_writel(pd
, CTL0
, pd
->regs
.dma_ctl0
);
768 dma_writel(pd
, CTL1
, pd
->regs
.dma_ctl1
);
769 dma_writel(pd
, CTL2
, pd
->regs
.dma_ctl2
);
770 dma_writel(pd
, CTL3
, pd
->regs
.dma_ctl3
);
772 list_for_each_entry_safe(chan
, _c
, &pd
->dma
.channels
, device_node
) {
773 pd_chan
= to_pd_chan(chan
);
775 channel_writel(pd_chan
, DEV_ADDR
, pd
->ch_regs
[i
].dev_addr
);
776 channel_writel(pd_chan
, MEM_ADDR
, pd
->ch_regs
[i
].mem_addr
);
777 channel_writel(pd_chan
, SIZE
, pd
->ch_regs
[i
].size
);
778 channel_writel(pd_chan
, NEXT
, pd
->ch_regs
[i
].next
);
784 static int __maybe_unused
pch_dma_suspend(struct device
*dev
)
786 struct pch_dma
*pd
= dev_get_drvdata(dev
);
789 pch_dma_save_regs(pd
);
794 static int __maybe_unused
pch_dma_resume(struct device
*dev
)
796 struct pch_dma
*pd
= dev_get_drvdata(dev
);
799 pch_dma_restore_regs(pd
);
804 static int pch_dma_probe(struct pci_dev
*pdev
,
805 const struct pci_device_id
*id
)
808 struct pch_dma_regs
*regs
;
809 unsigned int nr_channels
;
813 nr_channels
= id
->driver_data
;
814 pd
= kzalloc(sizeof(*pd
), GFP_KERNEL
);
818 pci_set_drvdata(pdev
, pd
);
820 err
= pci_enable_device(pdev
);
822 dev_err(&pdev
->dev
, "Cannot enable PCI device\n");
826 if (!(pci_resource_flags(pdev
, 1) & IORESOURCE_MEM
)) {
827 dev_err(&pdev
->dev
, "Cannot find proper base address\n");
829 goto err_disable_pdev
;
832 err
= pci_request_regions(pdev
, DRV_NAME
);
834 dev_err(&pdev
->dev
, "Cannot obtain PCI resources\n");
835 goto err_disable_pdev
;
838 err
= pci_set_dma_mask(pdev
, DMA_BIT_MASK(32));
840 dev_err(&pdev
->dev
, "Cannot set proper DMA config\n");
844 regs
= pd
->membase
= pci_iomap(pdev
, 1, 0);
846 dev_err(&pdev
->dev
, "Cannot map MMIO registers\n");
851 pci_set_master(pdev
);
852 pd
->dma
.dev
= &pdev
->dev
;
854 err
= request_irq(pdev
->irq
, pd_irq
, IRQF_SHARED
, DRV_NAME
, pd
);
856 dev_err(&pdev
->dev
, "Failed to request IRQ\n");
860 pd
->pool
= dma_pool_create("pch_dma_desc_pool", &pdev
->dev
,
861 sizeof(struct pch_dma_desc
), 4, 0);
863 dev_err(&pdev
->dev
, "Failed to alloc DMA descriptors\n");
869 INIT_LIST_HEAD(&pd
->dma
.channels
);
871 for (i
= 0; i
< nr_channels
; i
++) {
872 struct pch_dma_chan
*pd_chan
= &pd
->channels
[i
];
874 pd_chan
->chan
.device
= &pd
->dma
;
875 dma_cookie_init(&pd_chan
->chan
);
877 pd_chan
->membase
= ®s
->desc
[i
];
879 spin_lock_init(&pd_chan
->lock
);
881 INIT_LIST_HEAD(&pd_chan
->active_list
);
882 INIT_LIST_HEAD(&pd_chan
->queue
);
883 INIT_LIST_HEAD(&pd_chan
->free_list
);
885 tasklet_setup(&pd_chan
->tasklet
, pdc_tasklet
);
886 list_add_tail(&pd_chan
->chan
.device_node
, &pd
->dma
.channels
);
889 dma_cap_zero(pd
->dma
.cap_mask
);
890 dma_cap_set(DMA_PRIVATE
, pd
->dma
.cap_mask
);
891 dma_cap_set(DMA_SLAVE
, pd
->dma
.cap_mask
);
893 pd
->dma
.device_alloc_chan_resources
= pd_alloc_chan_resources
;
894 pd
->dma
.device_free_chan_resources
= pd_free_chan_resources
;
895 pd
->dma
.device_tx_status
= pd_tx_status
;
896 pd
->dma
.device_issue_pending
= pd_issue_pending
;
897 pd
->dma
.device_prep_slave_sg
= pd_prep_slave_sg
;
898 pd
->dma
.device_terminate_all
= pd_device_terminate_all
;
900 err
= dma_async_device_register(&pd
->dma
);
902 dev_err(&pdev
->dev
, "Failed to register DMA device\n");
909 dma_pool_destroy(pd
->pool
);
911 free_irq(pdev
->irq
, pd
);
913 pci_iounmap(pdev
, pd
->membase
);
915 pci_release_regions(pdev
);
917 pci_disable_device(pdev
);
923 static void pch_dma_remove(struct pci_dev
*pdev
)
925 struct pch_dma
*pd
= pci_get_drvdata(pdev
);
926 struct pch_dma_chan
*pd_chan
;
927 struct dma_chan
*chan
, *_c
;
930 dma_async_device_unregister(&pd
->dma
);
932 free_irq(pdev
->irq
, pd
);
934 list_for_each_entry_safe(chan
, _c
, &pd
->dma
.channels
,
936 pd_chan
= to_pd_chan(chan
);
938 tasklet_kill(&pd_chan
->tasklet
);
941 dma_pool_destroy(pd
->pool
);
942 pci_iounmap(pdev
, pd
->membase
);
943 pci_release_regions(pdev
);
944 pci_disable_device(pdev
);
949 /* PCI Device ID of DMA device */
950 #define PCI_DEVICE_ID_EG20T_PCH_DMA_8CH 0x8810
951 #define PCI_DEVICE_ID_EG20T_PCH_DMA_4CH 0x8815
952 #define PCI_DEVICE_ID_ML7213_DMA1_8CH 0x8026
953 #define PCI_DEVICE_ID_ML7213_DMA2_8CH 0x802B
954 #define PCI_DEVICE_ID_ML7213_DMA3_4CH 0x8034
955 #define PCI_DEVICE_ID_ML7213_DMA4_12CH 0x8032
956 #define PCI_DEVICE_ID_ML7223_DMA1_4CH 0x800B
957 #define PCI_DEVICE_ID_ML7223_DMA2_4CH 0x800E
958 #define PCI_DEVICE_ID_ML7223_DMA3_4CH 0x8017
959 #define PCI_DEVICE_ID_ML7223_DMA4_4CH 0x803B
960 #define PCI_DEVICE_ID_ML7831_DMA1_8CH 0x8810
961 #define PCI_DEVICE_ID_ML7831_DMA2_4CH 0x8815
963 static const struct pci_device_id pch_dma_id_table
[] = {
964 { PCI_VDEVICE(INTEL
, PCI_DEVICE_ID_EG20T_PCH_DMA_8CH
), 8 },
965 { PCI_VDEVICE(INTEL
, PCI_DEVICE_ID_EG20T_PCH_DMA_4CH
), 4 },
966 { PCI_VDEVICE(ROHM
, PCI_DEVICE_ID_ML7213_DMA1_8CH
), 8}, /* UART Video */
967 { PCI_VDEVICE(ROHM
, PCI_DEVICE_ID_ML7213_DMA2_8CH
), 8}, /* PCMIF SPI */
968 { PCI_VDEVICE(ROHM
, PCI_DEVICE_ID_ML7213_DMA3_4CH
), 4}, /* FPGA */
969 { PCI_VDEVICE(ROHM
, PCI_DEVICE_ID_ML7213_DMA4_12CH
), 12}, /* I2S */
970 { PCI_VDEVICE(ROHM
, PCI_DEVICE_ID_ML7223_DMA1_4CH
), 4}, /* UART */
971 { PCI_VDEVICE(ROHM
, PCI_DEVICE_ID_ML7223_DMA2_4CH
), 4}, /* Video SPI */
972 { PCI_VDEVICE(ROHM
, PCI_DEVICE_ID_ML7223_DMA3_4CH
), 4}, /* Security */
973 { PCI_VDEVICE(ROHM
, PCI_DEVICE_ID_ML7223_DMA4_4CH
), 4}, /* FPGA */
974 { PCI_VDEVICE(ROHM
, PCI_DEVICE_ID_ML7831_DMA1_8CH
), 8}, /* UART */
975 { PCI_VDEVICE(ROHM
, PCI_DEVICE_ID_ML7831_DMA2_4CH
), 4}, /* SPI */
979 static SIMPLE_DEV_PM_OPS(pch_dma_pm_ops
, pch_dma_suspend
, pch_dma_resume
);
981 static struct pci_driver pch_dma_driver
= {
983 .id_table
= pch_dma_id_table
,
984 .probe
= pch_dma_probe
,
985 .remove
= pch_dma_remove
,
986 .driver
.pm
= &pch_dma_pm_ops
,
989 module_pci_driver(pch_dma_driver
);
991 MODULE_DESCRIPTION("Intel EG20T PCH / LAPIS Semicon ML7213/ML7223/ML7831 IOH "
992 "DMA controller driver");
993 MODULE_AUTHOR("Yong Wang <yong.y.wang@intel.com>");
994 MODULE_LICENSE("GPL v2");
995 MODULE_DEVICE_TABLE(pci
, pch_dma_id_table
);