IB/core: Add mitigation for Spectre V1
[linux/fpc-iii.git] / drivers / dma / mic_x100_dma.c
blob818255844a3c3610f7ec14574bd8acd1095102f4
1 /*
2 * Intel MIC Platform Software Stack (MPSS)
4 * Copyright(c) 2014 Intel Corporation.
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License, version 2, as
8 * published by the Free Software Foundation.
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * General Public License for more details.
15 * The full GNU General Public License is included in this distribution in
16 * the file called "COPYING".
18 * Intel MIC X100 DMA Driver.
20 * Adapted from IOAT dma driver.
22 #include <linux/module.h>
23 #include <linux/io.h>
24 #include <linux/seq_file.h>
25 #include <linux/vmalloc.h>
27 #include "mic_x100_dma.h"
29 #define MIC_DMA_MAX_XFER_SIZE_CARD (1 * 1024 * 1024 -\
30 MIC_DMA_ALIGN_BYTES)
31 #define MIC_DMA_MAX_XFER_SIZE_HOST (1 * 1024 * 1024 >> 1)
32 #define MIC_DMA_DESC_TYPE_SHIFT 60
33 #define MIC_DMA_MEMCPY_LEN_SHIFT 46
34 #define MIC_DMA_STAT_INTR_SHIFT 59
36 /* high-water mark for pushing dma descriptors */
37 static int mic_dma_pending_level = 4;
39 /* Status descriptor is used to write a 64 bit value to a memory location */
40 enum mic_dma_desc_format_type {
41 MIC_DMA_MEMCPY = 1,
42 MIC_DMA_STATUS,
45 static inline u32 mic_dma_hw_ring_inc(u32 val)
47 return (val + 1) % MIC_DMA_DESC_RX_SIZE;
50 static inline u32 mic_dma_hw_ring_dec(u32 val)
52 return val ? val - 1 : MIC_DMA_DESC_RX_SIZE - 1;
55 static inline void mic_dma_hw_ring_inc_head(struct mic_dma_chan *ch)
57 ch->head = mic_dma_hw_ring_inc(ch->head);
60 /* Prepare a memcpy desc */
61 static inline void mic_dma_memcpy_desc(struct mic_dma_desc *desc,
62 dma_addr_t src_phys, dma_addr_t dst_phys, u64 size)
64 u64 qw0, qw1;
66 qw0 = src_phys;
67 qw0 |= (size >> MIC_DMA_ALIGN_SHIFT) << MIC_DMA_MEMCPY_LEN_SHIFT;
68 qw1 = MIC_DMA_MEMCPY;
69 qw1 <<= MIC_DMA_DESC_TYPE_SHIFT;
70 qw1 |= dst_phys;
71 desc->qw0 = qw0;
72 desc->qw1 = qw1;
75 /* Prepare a status desc. with @data to be written at @dst_phys */
76 static inline void mic_dma_prep_status_desc(struct mic_dma_desc *desc, u64 data,
77 dma_addr_t dst_phys, bool generate_intr)
79 u64 qw0, qw1;
81 qw0 = data;
82 qw1 = (u64) MIC_DMA_STATUS << MIC_DMA_DESC_TYPE_SHIFT | dst_phys;
83 if (generate_intr)
84 qw1 |= (1ULL << MIC_DMA_STAT_INTR_SHIFT);
85 desc->qw0 = qw0;
86 desc->qw1 = qw1;
89 static void mic_dma_cleanup(struct mic_dma_chan *ch)
91 struct dma_async_tx_descriptor *tx;
92 u32 tail;
93 u32 last_tail;
95 spin_lock(&ch->cleanup_lock);
96 tail = mic_dma_read_cmp_cnt(ch);
98 * This is the barrier pair for smp_wmb() in fn.
99 * mic_dma_tx_submit_unlock. It's required so that we read the
100 * updated cookie value from tx->cookie.
102 smp_rmb();
103 for (last_tail = ch->last_tail; tail != last_tail;) {
104 tx = &ch->tx_array[last_tail];
105 if (tx->cookie) {
106 dma_cookie_complete(tx);
107 dmaengine_desc_get_callback_invoke(tx, NULL);
108 tx->callback = NULL;
110 last_tail = mic_dma_hw_ring_inc(last_tail);
112 /* finish all completion callbacks before incrementing tail */
113 smp_mb();
114 ch->last_tail = last_tail;
115 spin_unlock(&ch->cleanup_lock);
118 static u32 mic_dma_ring_count(u32 head, u32 tail)
120 u32 count;
122 if (head >= tail)
123 count = (tail - 0) + (MIC_DMA_DESC_RX_SIZE - head);
124 else
125 count = tail - head;
126 return count - 1;
129 /* Returns the num. of free descriptors on success, -ENOMEM on failure */
130 static int mic_dma_avail_desc_ring_space(struct mic_dma_chan *ch, int required)
132 struct device *dev = mic_dma_ch_to_device(ch);
133 u32 count;
135 count = mic_dma_ring_count(ch->head, ch->last_tail);
136 if (count < required) {
137 mic_dma_cleanup(ch);
138 count = mic_dma_ring_count(ch->head, ch->last_tail);
141 if (count < required) {
142 dev_dbg(dev, "Not enough desc space");
143 dev_dbg(dev, "%s %d required=%u, avail=%u\n",
144 __func__, __LINE__, required, count);
145 return -ENOMEM;
146 } else {
147 return count;
151 /* Program memcpy descriptors into the descriptor ring and update s/w head ptr*/
152 static int mic_dma_prog_memcpy_desc(struct mic_dma_chan *ch, dma_addr_t src,
153 dma_addr_t dst, size_t len)
155 size_t current_transfer_len;
156 size_t max_xfer_size = to_mic_dma_dev(ch)->max_xfer_size;
157 /* 3 is added to make sure we have enough space for status desc */
158 int num_desc = len / max_xfer_size + 3;
159 int ret;
161 if (len % max_xfer_size)
162 num_desc++;
164 ret = mic_dma_avail_desc_ring_space(ch, num_desc);
165 if (ret < 0)
166 return ret;
167 do {
168 current_transfer_len = min(len, max_xfer_size);
169 mic_dma_memcpy_desc(&ch->desc_ring[ch->head],
170 src, dst, current_transfer_len);
171 mic_dma_hw_ring_inc_head(ch);
172 len -= current_transfer_len;
173 dst = dst + current_transfer_len;
174 src = src + current_transfer_len;
175 } while (len > 0);
176 return 0;
179 /* It's a h/w quirk and h/w needs 2 status descriptors for every status desc */
180 static void mic_dma_prog_intr(struct mic_dma_chan *ch)
182 mic_dma_prep_status_desc(&ch->desc_ring[ch->head], 0,
183 ch->status_dest_micpa, false);
184 mic_dma_hw_ring_inc_head(ch);
185 mic_dma_prep_status_desc(&ch->desc_ring[ch->head], 0,
186 ch->status_dest_micpa, true);
187 mic_dma_hw_ring_inc_head(ch);
190 /* Wrapper function to program memcpy descriptors/status descriptors */
191 static int mic_dma_do_dma(struct mic_dma_chan *ch, int flags, dma_addr_t src,
192 dma_addr_t dst, size_t len)
194 if (len && -ENOMEM == mic_dma_prog_memcpy_desc(ch, src, dst, len)) {
195 return -ENOMEM;
196 } else {
197 /* 3 is the maximum number of status descriptors */
198 int ret = mic_dma_avail_desc_ring_space(ch, 3);
200 if (ret < 0)
201 return ret;
204 /* Above mic_dma_prog_memcpy_desc() makes sure we have enough space */
205 if (flags & DMA_PREP_FENCE) {
206 mic_dma_prep_status_desc(&ch->desc_ring[ch->head], 0,
207 ch->status_dest_micpa, false);
208 mic_dma_hw_ring_inc_head(ch);
211 if (flags & DMA_PREP_INTERRUPT)
212 mic_dma_prog_intr(ch);
214 return 0;
217 static inline void mic_dma_issue_pending(struct dma_chan *ch)
219 struct mic_dma_chan *mic_ch = to_mic_dma_chan(ch);
221 spin_lock(&mic_ch->issue_lock);
223 * Write to head triggers h/w to act on the descriptors.
224 * On MIC, writing the same head value twice causes
225 * a h/w error. On second write, h/w assumes we filled
226 * the entire ring & overwrote some of the descriptors.
228 if (mic_ch->issued == mic_ch->submitted)
229 goto out;
230 mic_ch->issued = mic_ch->submitted;
232 * make descriptor updates visible before advancing head,
233 * this is purposefully not smp_wmb() since we are also
234 * publishing the descriptor updates to a dma device
236 wmb();
237 mic_dma_write_reg(mic_ch, MIC_DMA_REG_DHPR, mic_ch->issued);
238 out:
239 spin_unlock(&mic_ch->issue_lock);
242 static inline void mic_dma_update_pending(struct mic_dma_chan *ch)
244 if (mic_dma_ring_count(ch->issued, ch->submitted)
245 > mic_dma_pending_level)
246 mic_dma_issue_pending(&ch->api_ch);
249 static dma_cookie_t mic_dma_tx_submit_unlock(struct dma_async_tx_descriptor *tx)
251 struct mic_dma_chan *mic_ch = to_mic_dma_chan(tx->chan);
252 dma_cookie_t cookie;
254 dma_cookie_assign(tx);
255 cookie = tx->cookie;
257 * We need an smp write barrier here because another CPU might see
258 * an update to submitted and update h/w head even before we
259 * assigned a cookie to this tx.
261 smp_wmb();
262 mic_ch->submitted = mic_ch->head;
263 spin_unlock(&mic_ch->prep_lock);
264 mic_dma_update_pending(mic_ch);
265 return cookie;
268 static inline struct dma_async_tx_descriptor *
269 allocate_tx(struct mic_dma_chan *ch)
271 u32 idx = mic_dma_hw_ring_dec(ch->head);
272 struct dma_async_tx_descriptor *tx = &ch->tx_array[idx];
274 dma_async_tx_descriptor_init(tx, &ch->api_ch);
275 tx->tx_submit = mic_dma_tx_submit_unlock;
276 return tx;
279 /* Program a status descriptor with dst as address and value to be written */
280 static struct dma_async_tx_descriptor *
281 mic_dma_prep_status_lock(struct dma_chan *ch, dma_addr_t dst, u64 src_val,
282 unsigned long flags)
284 struct mic_dma_chan *mic_ch = to_mic_dma_chan(ch);
285 int result;
287 spin_lock(&mic_ch->prep_lock);
288 result = mic_dma_avail_desc_ring_space(mic_ch, 4);
289 if (result < 0)
290 goto error;
291 mic_dma_prep_status_desc(&mic_ch->desc_ring[mic_ch->head], src_val, dst,
292 false);
293 mic_dma_hw_ring_inc_head(mic_ch);
294 result = mic_dma_do_dma(mic_ch, flags, 0, 0, 0);
295 if (result < 0)
296 goto error;
298 return allocate_tx(mic_ch);
299 error:
300 dev_err(mic_dma_ch_to_device(mic_ch),
301 "Error enqueueing dma status descriptor, error=%d\n", result);
302 spin_unlock(&mic_ch->prep_lock);
303 return NULL;
307 * Prepare a memcpy descriptor to be added to the ring.
308 * Note that the temporary descriptor adds an extra overhead of copying the
309 * descriptor to ring. So, we copy directly to the descriptor ring
311 static struct dma_async_tx_descriptor *
312 mic_dma_prep_memcpy_lock(struct dma_chan *ch, dma_addr_t dma_dest,
313 dma_addr_t dma_src, size_t len, unsigned long flags)
315 struct mic_dma_chan *mic_ch = to_mic_dma_chan(ch);
316 struct device *dev = mic_dma_ch_to_device(mic_ch);
317 int result;
319 if (!len && !flags)
320 return NULL;
322 spin_lock(&mic_ch->prep_lock);
323 result = mic_dma_do_dma(mic_ch, flags, dma_src, dma_dest, len);
324 if (result >= 0)
325 return allocate_tx(mic_ch);
326 dev_err(dev, "Error enqueueing dma, error=%d\n", result);
327 spin_unlock(&mic_ch->prep_lock);
328 return NULL;
331 static struct dma_async_tx_descriptor *
332 mic_dma_prep_interrupt_lock(struct dma_chan *ch, unsigned long flags)
334 struct mic_dma_chan *mic_ch = to_mic_dma_chan(ch);
335 int ret;
337 spin_lock(&mic_ch->prep_lock);
338 ret = mic_dma_do_dma(mic_ch, flags, 0, 0, 0);
339 if (!ret)
340 return allocate_tx(mic_ch);
341 spin_unlock(&mic_ch->prep_lock);
342 return NULL;
345 /* Return the status of the transaction */
346 static enum dma_status
347 mic_dma_tx_status(struct dma_chan *ch, dma_cookie_t cookie,
348 struct dma_tx_state *txstate)
350 struct mic_dma_chan *mic_ch = to_mic_dma_chan(ch);
352 if (DMA_COMPLETE != dma_cookie_status(ch, cookie, txstate))
353 mic_dma_cleanup(mic_ch);
355 return dma_cookie_status(ch, cookie, txstate);
358 static irqreturn_t mic_dma_thread_fn(int irq, void *data)
360 mic_dma_cleanup((struct mic_dma_chan *)data);
361 return IRQ_HANDLED;
364 static irqreturn_t mic_dma_intr_handler(int irq, void *data)
366 struct mic_dma_chan *ch = ((struct mic_dma_chan *)data);
368 mic_dma_ack_interrupt(ch);
369 return IRQ_WAKE_THREAD;
372 static int mic_dma_alloc_desc_ring(struct mic_dma_chan *ch)
374 u64 desc_ring_size = MIC_DMA_DESC_RX_SIZE * sizeof(*ch->desc_ring);
375 struct device *dev = &to_mbus_device(ch)->dev;
377 desc_ring_size = ALIGN(desc_ring_size, MIC_DMA_ALIGN_BYTES);
378 ch->desc_ring = kzalloc(desc_ring_size, GFP_KERNEL);
380 if (!ch->desc_ring)
381 return -ENOMEM;
383 ch->desc_ring_micpa = dma_map_single(dev, ch->desc_ring,
384 desc_ring_size, DMA_BIDIRECTIONAL);
385 if (dma_mapping_error(dev, ch->desc_ring_micpa))
386 goto map_error;
388 ch->tx_array = vzalloc(MIC_DMA_DESC_RX_SIZE * sizeof(*ch->tx_array));
389 if (!ch->tx_array)
390 goto tx_error;
391 return 0;
392 tx_error:
393 dma_unmap_single(dev, ch->desc_ring_micpa, desc_ring_size,
394 DMA_BIDIRECTIONAL);
395 map_error:
396 kfree(ch->desc_ring);
397 return -ENOMEM;
400 static void mic_dma_free_desc_ring(struct mic_dma_chan *ch)
402 u64 desc_ring_size = MIC_DMA_DESC_RX_SIZE * sizeof(*ch->desc_ring);
404 vfree(ch->tx_array);
405 desc_ring_size = ALIGN(desc_ring_size, MIC_DMA_ALIGN_BYTES);
406 dma_unmap_single(&to_mbus_device(ch)->dev, ch->desc_ring_micpa,
407 desc_ring_size, DMA_BIDIRECTIONAL);
408 kfree(ch->desc_ring);
409 ch->desc_ring = NULL;
412 static void mic_dma_free_status_dest(struct mic_dma_chan *ch)
414 dma_unmap_single(&to_mbus_device(ch)->dev, ch->status_dest_micpa,
415 L1_CACHE_BYTES, DMA_BIDIRECTIONAL);
416 kfree(ch->status_dest);
419 static int mic_dma_alloc_status_dest(struct mic_dma_chan *ch)
421 struct device *dev = &to_mbus_device(ch)->dev;
423 ch->status_dest = kzalloc(L1_CACHE_BYTES, GFP_KERNEL);
424 if (!ch->status_dest)
425 return -ENOMEM;
426 ch->status_dest_micpa = dma_map_single(dev, ch->status_dest,
427 L1_CACHE_BYTES, DMA_BIDIRECTIONAL);
428 if (dma_mapping_error(dev, ch->status_dest_micpa)) {
429 kfree(ch->status_dest);
430 ch->status_dest = NULL;
431 return -ENOMEM;
433 return 0;
436 static int mic_dma_check_chan(struct mic_dma_chan *ch)
438 if (mic_dma_read_reg(ch, MIC_DMA_REG_DCHERR) ||
439 mic_dma_read_reg(ch, MIC_DMA_REG_DSTAT) & MIC_DMA_CHAN_QUIESCE) {
440 mic_dma_disable_chan(ch);
441 mic_dma_chan_mask_intr(ch);
442 dev_err(mic_dma_ch_to_device(ch),
443 "%s %d error setting up mic dma chan %d\n",
444 __func__, __LINE__, ch->ch_num);
445 return -EBUSY;
447 return 0;
450 static int mic_dma_chan_setup(struct mic_dma_chan *ch)
452 if (MIC_DMA_CHAN_MIC == ch->owner)
453 mic_dma_chan_set_owner(ch);
454 mic_dma_disable_chan(ch);
455 mic_dma_chan_mask_intr(ch);
456 mic_dma_write_reg(ch, MIC_DMA_REG_DCHERRMSK, 0);
457 mic_dma_chan_set_desc_ring(ch);
458 ch->last_tail = mic_dma_read_reg(ch, MIC_DMA_REG_DTPR);
459 ch->head = ch->last_tail;
460 ch->issued = 0;
461 mic_dma_chan_unmask_intr(ch);
462 mic_dma_enable_chan(ch);
463 return mic_dma_check_chan(ch);
466 static void mic_dma_chan_destroy(struct mic_dma_chan *ch)
468 mic_dma_disable_chan(ch);
469 mic_dma_chan_mask_intr(ch);
472 static void mic_dma_unregister_dma_device(struct mic_dma_device *mic_dma_dev)
474 dma_async_device_unregister(&mic_dma_dev->dma_dev);
477 static int mic_dma_setup_irq(struct mic_dma_chan *ch)
479 ch->cookie =
480 to_mbus_hw_ops(ch)->request_threaded_irq(to_mbus_device(ch),
481 mic_dma_intr_handler, mic_dma_thread_fn,
482 "mic dma_channel", ch, ch->ch_num);
483 if (IS_ERR(ch->cookie))
484 return PTR_ERR(ch->cookie);
485 return 0;
488 static inline void mic_dma_free_irq(struct mic_dma_chan *ch)
490 to_mbus_hw_ops(ch)->free_irq(to_mbus_device(ch), ch->cookie, ch);
493 static int mic_dma_chan_init(struct mic_dma_chan *ch)
495 int ret = mic_dma_alloc_desc_ring(ch);
497 if (ret)
498 goto ring_error;
499 ret = mic_dma_alloc_status_dest(ch);
500 if (ret)
501 goto status_error;
502 ret = mic_dma_chan_setup(ch);
503 if (ret)
504 goto chan_error;
505 return ret;
506 chan_error:
507 mic_dma_free_status_dest(ch);
508 status_error:
509 mic_dma_free_desc_ring(ch);
510 ring_error:
511 return ret;
514 static int mic_dma_drain_chan(struct mic_dma_chan *ch)
516 struct dma_async_tx_descriptor *tx;
517 int err = 0;
518 dma_cookie_t cookie;
520 tx = mic_dma_prep_memcpy_lock(&ch->api_ch, 0, 0, 0, DMA_PREP_FENCE);
521 if (!tx) {
522 err = -ENOMEM;
523 goto error;
526 cookie = tx->tx_submit(tx);
527 if (dma_submit_error(cookie))
528 err = -ENOMEM;
529 else
530 err = dma_sync_wait(&ch->api_ch, cookie);
531 if (err) {
532 dev_err(mic_dma_ch_to_device(ch), "%s %d TO chan 0x%x\n",
533 __func__, __LINE__, ch->ch_num);
534 err = -EIO;
536 error:
537 mic_dma_cleanup(ch);
538 return err;
541 static inline void mic_dma_chan_uninit(struct mic_dma_chan *ch)
543 mic_dma_chan_destroy(ch);
544 mic_dma_cleanup(ch);
545 mic_dma_free_status_dest(ch);
546 mic_dma_free_desc_ring(ch);
549 static int mic_dma_init(struct mic_dma_device *mic_dma_dev,
550 enum mic_dma_chan_owner owner)
552 int i, first_chan = mic_dma_dev->start_ch;
553 struct mic_dma_chan *ch;
554 int ret;
556 for (i = first_chan; i < first_chan + MIC_DMA_NUM_CHAN; i++) {
557 unsigned long data;
558 ch = &mic_dma_dev->mic_ch[i];
559 data = (unsigned long)ch;
560 ch->ch_num = i;
561 ch->owner = owner;
562 spin_lock_init(&ch->cleanup_lock);
563 spin_lock_init(&ch->prep_lock);
564 spin_lock_init(&ch->issue_lock);
565 ret = mic_dma_setup_irq(ch);
566 if (ret)
567 goto error;
569 return 0;
570 error:
571 for (i = i - 1; i >= first_chan; i--)
572 mic_dma_free_irq(ch);
573 return ret;
576 static void mic_dma_uninit(struct mic_dma_device *mic_dma_dev)
578 int i, first_chan = mic_dma_dev->start_ch;
579 struct mic_dma_chan *ch;
581 for (i = first_chan; i < first_chan + MIC_DMA_NUM_CHAN; i++) {
582 ch = &mic_dma_dev->mic_ch[i];
583 mic_dma_free_irq(ch);
587 static int mic_dma_alloc_chan_resources(struct dma_chan *ch)
589 int ret = mic_dma_chan_init(to_mic_dma_chan(ch));
590 if (ret)
591 return ret;
592 return MIC_DMA_DESC_RX_SIZE;
595 static void mic_dma_free_chan_resources(struct dma_chan *ch)
597 struct mic_dma_chan *mic_ch = to_mic_dma_chan(ch);
598 mic_dma_drain_chan(mic_ch);
599 mic_dma_chan_uninit(mic_ch);
602 /* Set the fn. handlers and register the dma device with dma api */
603 static int mic_dma_register_dma_device(struct mic_dma_device *mic_dma_dev,
604 enum mic_dma_chan_owner owner)
606 int i, first_chan = mic_dma_dev->start_ch;
608 dma_cap_zero(mic_dma_dev->dma_dev.cap_mask);
610 * This dma engine is not capable of host memory to host memory
611 * transfers
613 dma_cap_set(DMA_MEMCPY, mic_dma_dev->dma_dev.cap_mask);
615 if (MIC_DMA_CHAN_HOST == owner)
616 dma_cap_set(DMA_PRIVATE, mic_dma_dev->dma_dev.cap_mask);
617 mic_dma_dev->dma_dev.device_alloc_chan_resources =
618 mic_dma_alloc_chan_resources;
619 mic_dma_dev->dma_dev.device_free_chan_resources =
620 mic_dma_free_chan_resources;
621 mic_dma_dev->dma_dev.device_tx_status = mic_dma_tx_status;
622 mic_dma_dev->dma_dev.device_prep_dma_memcpy = mic_dma_prep_memcpy_lock;
623 mic_dma_dev->dma_dev.device_prep_dma_imm_data =
624 mic_dma_prep_status_lock;
625 mic_dma_dev->dma_dev.device_prep_dma_interrupt =
626 mic_dma_prep_interrupt_lock;
627 mic_dma_dev->dma_dev.device_issue_pending = mic_dma_issue_pending;
628 mic_dma_dev->dma_dev.copy_align = MIC_DMA_ALIGN_SHIFT;
629 INIT_LIST_HEAD(&mic_dma_dev->dma_dev.channels);
630 for (i = first_chan; i < first_chan + MIC_DMA_NUM_CHAN; i++) {
631 mic_dma_dev->mic_ch[i].api_ch.device = &mic_dma_dev->dma_dev;
632 dma_cookie_init(&mic_dma_dev->mic_ch[i].api_ch);
633 list_add_tail(&mic_dma_dev->mic_ch[i].api_ch.device_node,
634 &mic_dma_dev->dma_dev.channels);
636 return dma_async_device_register(&mic_dma_dev->dma_dev);
640 * Initializes dma channels and registers the dma device with the
641 * dma engine api.
643 static struct mic_dma_device *mic_dma_dev_reg(struct mbus_device *mbdev,
644 enum mic_dma_chan_owner owner)
646 struct mic_dma_device *mic_dma_dev;
647 int ret;
648 struct device *dev = &mbdev->dev;
650 mic_dma_dev = kzalloc(sizeof(*mic_dma_dev), GFP_KERNEL);
651 if (!mic_dma_dev) {
652 ret = -ENOMEM;
653 goto alloc_error;
655 mic_dma_dev->mbdev = mbdev;
656 mic_dma_dev->dma_dev.dev = dev;
657 mic_dma_dev->mmio = mbdev->mmio_va;
658 if (MIC_DMA_CHAN_HOST == owner) {
659 mic_dma_dev->start_ch = 0;
660 mic_dma_dev->max_xfer_size = MIC_DMA_MAX_XFER_SIZE_HOST;
661 } else {
662 mic_dma_dev->start_ch = 4;
663 mic_dma_dev->max_xfer_size = MIC_DMA_MAX_XFER_SIZE_CARD;
665 ret = mic_dma_init(mic_dma_dev, owner);
666 if (ret)
667 goto init_error;
668 ret = mic_dma_register_dma_device(mic_dma_dev, owner);
669 if (ret)
670 goto reg_error;
671 return mic_dma_dev;
672 reg_error:
673 mic_dma_uninit(mic_dma_dev);
674 init_error:
675 kfree(mic_dma_dev);
676 mic_dma_dev = NULL;
677 alloc_error:
678 dev_err(dev, "Error at %s %d ret=%d\n", __func__, __LINE__, ret);
679 return mic_dma_dev;
682 static void mic_dma_dev_unreg(struct mic_dma_device *mic_dma_dev)
684 mic_dma_unregister_dma_device(mic_dma_dev);
685 mic_dma_uninit(mic_dma_dev);
686 kfree(mic_dma_dev);
689 /* DEBUGFS CODE */
690 static int mic_dma_reg_seq_show(struct seq_file *s, void *pos)
692 struct mic_dma_device *mic_dma_dev = s->private;
693 int i, chan_num, first_chan = mic_dma_dev->start_ch;
694 struct mic_dma_chan *ch;
696 seq_printf(s, "SBOX_DCR: %#x\n",
697 mic_dma_mmio_read(&mic_dma_dev->mic_ch[first_chan],
698 MIC_DMA_SBOX_BASE + MIC_DMA_SBOX_DCR));
699 seq_puts(s, "DMA Channel Registers\n");
700 seq_printf(s, "%-10s| %-10s %-10s %-10s %-10s %-10s",
701 "Channel", "DCAR", "DTPR", "DHPR", "DRAR_HI", "DRAR_LO");
702 seq_printf(s, " %-11s %-14s %-10s\n", "DCHERR", "DCHERRMSK", "DSTAT");
703 for (i = first_chan; i < first_chan + MIC_DMA_NUM_CHAN; i++) {
704 ch = &mic_dma_dev->mic_ch[i];
705 chan_num = ch->ch_num;
706 seq_printf(s, "%-10i| %-#10x %-#10x %-#10x %-#10x",
707 chan_num,
708 mic_dma_read_reg(ch, MIC_DMA_REG_DCAR),
709 mic_dma_read_reg(ch, MIC_DMA_REG_DTPR),
710 mic_dma_read_reg(ch, MIC_DMA_REG_DHPR),
711 mic_dma_read_reg(ch, MIC_DMA_REG_DRAR_HI));
712 seq_printf(s, " %-#10x %-#10x %-#14x %-#10x\n",
713 mic_dma_read_reg(ch, MIC_DMA_REG_DRAR_LO),
714 mic_dma_read_reg(ch, MIC_DMA_REG_DCHERR),
715 mic_dma_read_reg(ch, MIC_DMA_REG_DCHERRMSK),
716 mic_dma_read_reg(ch, MIC_DMA_REG_DSTAT));
718 return 0;
721 static int mic_dma_reg_debug_open(struct inode *inode, struct file *file)
723 return single_open(file, mic_dma_reg_seq_show, inode->i_private);
726 static int mic_dma_reg_debug_release(struct inode *inode, struct file *file)
728 return single_release(inode, file);
731 static const struct file_operations mic_dma_reg_ops = {
732 .owner = THIS_MODULE,
733 .open = mic_dma_reg_debug_open,
734 .read = seq_read,
735 .llseek = seq_lseek,
736 .release = mic_dma_reg_debug_release
739 /* Debugfs parent dir */
740 static struct dentry *mic_dma_dbg;
742 static int mic_dma_driver_probe(struct mbus_device *mbdev)
744 struct mic_dma_device *mic_dma_dev;
745 enum mic_dma_chan_owner owner;
747 if (MBUS_DEV_DMA_MIC == mbdev->id.device)
748 owner = MIC_DMA_CHAN_MIC;
749 else
750 owner = MIC_DMA_CHAN_HOST;
752 mic_dma_dev = mic_dma_dev_reg(mbdev, owner);
753 dev_set_drvdata(&mbdev->dev, mic_dma_dev);
755 if (mic_dma_dbg) {
756 mic_dma_dev->dbg_dir = debugfs_create_dir(dev_name(&mbdev->dev),
757 mic_dma_dbg);
758 if (mic_dma_dev->dbg_dir)
759 debugfs_create_file("mic_dma_reg", 0444,
760 mic_dma_dev->dbg_dir, mic_dma_dev,
761 &mic_dma_reg_ops);
763 return 0;
766 static void mic_dma_driver_remove(struct mbus_device *mbdev)
768 struct mic_dma_device *mic_dma_dev;
770 mic_dma_dev = dev_get_drvdata(&mbdev->dev);
771 debugfs_remove_recursive(mic_dma_dev->dbg_dir);
772 mic_dma_dev_unreg(mic_dma_dev);
775 static struct mbus_device_id id_table[] = {
776 {MBUS_DEV_DMA_MIC, MBUS_DEV_ANY_ID},
777 {MBUS_DEV_DMA_HOST, MBUS_DEV_ANY_ID},
778 {0},
781 static struct mbus_driver mic_dma_driver = {
782 .driver.name = KBUILD_MODNAME,
783 .driver.owner = THIS_MODULE,
784 .id_table = id_table,
785 .probe = mic_dma_driver_probe,
786 .remove = mic_dma_driver_remove,
789 static int __init mic_x100_dma_init(void)
791 int rc = mbus_register_driver(&mic_dma_driver);
792 if (rc)
793 return rc;
794 mic_dma_dbg = debugfs_create_dir(KBUILD_MODNAME, NULL);
795 return 0;
798 static void __exit mic_x100_dma_exit(void)
800 debugfs_remove_recursive(mic_dma_dbg);
801 mbus_unregister_driver(&mic_dma_driver);
804 module_init(mic_x100_dma_init);
805 module_exit(mic_x100_dma_exit);
807 MODULE_DEVICE_TABLE(mbus, id_table);
808 MODULE_AUTHOR("Intel Corporation");
809 MODULE_DESCRIPTION("Intel(R) MIC X100 DMA Driver");
810 MODULE_LICENSE("GPL v2");