2 * Keystone Queue Manager subsystem driver
4 * Copyright (C) 2014 Texas Instruments Incorporated - http://www.ti.com
5 * Authors: Sandeep Nair <sandeep_n@ti.com>
6 * Cyril Chemparathy <cyril@ti.com>
7 * Santosh Shilimkar <santosh.shilimkar@ti.com>
9 * This program is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU General Public License
11 * version 2 as published by the Free Software Foundation.
13 * This program is distributed in the hope that it will be useful, but
14 * WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * General Public License for more details.
19 #include <linux/kernel.h>
20 #include <linux/module.h>
21 #include <linux/device.h>
22 #include <linux/clk.h>
24 #include <linux/interrupt.h>
25 #include <linux/bitops.h>
26 #include <linux/slab.h>
27 #include <linux/spinlock.h>
28 #include <linux/platform_device.h>
29 #include <linux/dma-mapping.h>
31 #include <linux/of_irq.h>
32 #include <linux/of_device.h>
33 #include <linux/of_address.h>
34 #include <linux/pm_runtime.h>
35 #include <linux/firmware.h>
36 #include <linux/debugfs.h>
37 #include <linux/seq_file.h>
38 #include <linux/string.h>
39 #include <linux/soc/ti/knav_qmss.h>
41 #include "knav_qmss.h"
43 static struct knav_device
*kdev
;
44 static DEFINE_MUTEX(knav_dev_lock
);
46 /* Queue manager register indices in DTS */
47 #define KNAV_QUEUE_PEEK_REG_INDEX 0
48 #define KNAV_QUEUE_STATUS_REG_INDEX 1
49 #define KNAV_QUEUE_CONFIG_REG_INDEX 2
50 #define KNAV_QUEUE_REGION_REG_INDEX 3
51 #define KNAV_QUEUE_PUSH_REG_INDEX 4
52 #define KNAV_QUEUE_POP_REG_INDEX 5
54 /* PDSP register indices in DTS */
55 #define KNAV_QUEUE_PDSP_IRAM_REG_INDEX 0
56 #define KNAV_QUEUE_PDSP_REGS_REG_INDEX 1
57 #define KNAV_QUEUE_PDSP_INTD_REG_INDEX 2
58 #define KNAV_QUEUE_PDSP_CMD_REG_INDEX 3
60 #define knav_queue_idx_to_inst(kdev, idx) \
61 (kdev->instances + (idx << kdev->inst_shift))
63 #define for_each_handle_rcu(qh, inst) \
64 list_for_each_entry_rcu(qh, &inst->handles, list)
66 #define for_each_instance(idx, inst, kdev) \
67 for (idx = 0, inst = kdev->instances; \
68 idx < (kdev)->num_queues_in_use; \
69 idx++, inst = knav_queue_idx_to_inst(kdev, idx))
72 * knav_queue_notify: qmss queue notfier call
74 * @inst: qmss queue instance like accumulator
76 void knav_queue_notify(struct knav_queue_inst
*inst
)
78 struct knav_queue
*qh
;
84 for_each_handle_rcu(qh
, inst
) {
85 if (atomic_read(&qh
->notifier_enabled
) <= 0)
87 if (WARN_ON(!qh
->notifier_fn
))
89 atomic_inc(&qh
->stats
.notifies
);
90 qh
->notifier_fn(qh
->notifier_fn_arg
);
94 EXPORT_SYMBOL_GPL(knav_queue_notify
);
96 static irqreturn_t
knav_queue_int_handler(int irq
, void *_instdata
)
98 struct knav_queue_inst
*inst
= _instdata
;
100 knav_queue_notify(inst
);
104 static int knav_queue_setup_irq(struct knav_range_info
*range
,
105 struct knav_queue_inst
*inst
)
107 unsigned queue
= inst
->id
- range
->queue_base
;
108 unsigned long cpu_map
;
111 if (range
->flags
& RANGE_HAS_IRQ
) {
112 irq
= range
->irqs
[queue
].irq
;
113 cpu_map
= range
->irqs
[queue
].cpu_map
;
114 ret
= request_irq(irq
, knav_queue_int_handler
, 0,
115 inst
->irq_name
, inst
);
120 ret
= irq_set_affinity_hint(irq
, to_cpumask(&cpu_map
));
122 dev_warn(range
->kdev
->dev
,
123 "Failed to set IRQ affinity\n");
131 static void knav_queue_free_irq(struct knav_queue_inst
*inst
)
133 struct knav_range_info
*range
= inst
->range
;
134 unsigned queue
= inst
->id
- inst
->range
->queue_base
;
137 if (range
->flags
& RANGE_HAS_IRQ
) {
138 irq
= range
->irqs
[queue
].irq
;
139 irq_set_affinity_hint(irq
, NULL
);
144 static inline bool knav_queue_is_busy(struct knav_queue_inst
*inst
)
146 return !list_empty(&inst
->handles
);
149 static inline bool knav_queue_is_reserved(struct knav_queue_inst
*inst
)
151 return inst
->range
->flags
& RANGE_RESERVED
;
154 static inline bool knav_queue_is_shared(struct knav_queue_inst
*inst
)
156 struct knav_queue
*tmp
;
159 for_each_handle_rcu(tmp
, inst
) {
160 if (tmp
->flags
& KNAV_QUEUE_SHARED
) {
169 static inline bool knav_queue_match_type(struct knav_queue_inst
*inst
,
172 if ((type
== KNAV_QUEUE_QPEND
) &&
173 (inst
->range
->flags
& RANGE_HAS_IRQ
)) {
175 } else if ((type
== KNAV_QUEUE_ACC
) &&
176 (inst
->range
->flags
& RANGE_HAS_ACCUMULATOR
)) {
178 } else if ((type
== KNAV_QUEUE_GP
) &&
179 !(inst
->range
->flags
&
180 (RANGE_HAS_ACCUMULATOR
| RANGE_HAS_IRQ
))) {
186 static inline struct knav_queue_inst
*
187 knav_queue_match_id_to_inst(struct knav_device
*kdev
, unsigned id
)
189 struct knav_queue_inst
*inst
;
192 for_each_instance(idx
, inst
, kdev
) {
199 static inline struct knav_queue_inst
*knav_queue_find_by_id(int id
)
201 if (kdev
->base_id
<= id
&&
202 kdev
->base_id
+ kdev
->num_queues
> id
) {
204 return knav_queue_match_id_to_inst(kdev
, id
);
209 static struct knav_queue
*__knav_queue_open(struct knav_queue_inst
*inst
,
210 const char *name
, unsigned flags
)
212 struct knav_queue
*qh
;
216 qh
= devm_kzalloc(inst
->kdev
->dev
, sizeof(*qh
), GFP_KERNEL
);
218 return ERR_PTR(-ENOMEM
);
222 id
= inst
->id
- inst
->qmgr
->start_queue
;
223 qh
->reg_push
= &inst
->qmgr
->reg_push
[id
];
224 qh
->reg_pop
= &inst
->qmgr
->reg_pop
[id
];
225 qh
->reg_peek
= &inst
->qmgr
->reg_peek
[id
];
228 if (!knav_queue_is_busy(inst
)) {
229 struct knav_range_info
*range
= inst
->range
;
231 inst
->name
= kstrndup(name
, KNAV_NAME_SIZE
, GFP_KERNEL
);
232 if (range
->ops
&& range
->ops
->open_queue
)
233 ret
= range
->ops
->open_queue(range
, inst
, flags
);
236 devm_kfree(inst
->kdev
->dev
, qh
);
240 list_add_tail_rcu(&qh
->list
, &inst
->handles
);
244 static struct knav_queue
*
245 knav_queue_open_by_id(const char *name
, unsigned id
, unsigned flags
)
247 struct knav_queue_inst
*inst
;
248 struct knav_queue
*qh
;
250 mutex_lock(&knav_dev_lock
);
252 qh
= ERR_PTR(-ENODEV
);
253 inst
= knav_queue_find_by_id(id
);
257 qh
= ERR_PTR(-EEXIST
);
258 if (!(flags
& KNAV_QUEUE_SHARED
) && knav_queue_is_busy(inst
))
261 qh
= ERR_PTR(-EBUSY
);
262 if ((flags
& KNAV_QUEUE_SHARED
) &&
263 (knav_queue_is_busy(inst
) && !knav_queue_is_shared(inst
)))
266 qh
= __knav_queue_open(inst
, name
, flags
);
269 mutex_unlock(&knav_dev_lock
);
274 static struct knav_queue
*knav_queue_open_by_type(const char *name
,
275 unsigned type
, unsigned flags
)
277 struct knav_queue_inst
*inst
;
278 struct knav_queue
*qh
= ERR_PTR(-EINVAL
);
281 mutex_lock(&knav_dev_lock
);
283 for_each_instance(idx
, inst
, kdev
) {
284 if (knav_queue_is_reserved(inst
))
286 if (!knav_queue_match_type(inst
, type
))
288 if (knav_queue_is_busy(inst
))
290 qh
= __knav_queue_open(inst
, name
, flags
);
295 mutex_unlock(&knav_dev_lock
);
299 static void knav_queue_set_notify(struct knav_queue_inst
*inst
, bool enabled
)
301 struct knav_range_info
*range
= inst
->range
;
303 if (range
->ops
&& range
->ops
->set_notify
)
304 range
->ops
->set_notify(range
, inst
, enabled
);
307 static int knav_queue_enable_notifier(struct knav_queue
*qh
)
309 struct knav_queue_inst
*inst
= qh
->inst
;
312 if (WARN_ON(!qh
->notifier_fn
))
315 /* Adjust the per handle notifier count */
316 first
= (atomic_inc_return(&qh
->notifier_enabled
) == 1);
318 return 0; /* nothing to do */
320 /* Now adjust the per instance notifier count */
321 first
= (atomic_inc_return(&inst
->num_notifiers
) == 1);
323 knav_queue_set_notify(inst
, true);
328 static int knav_queue_disable_notifier(struct knav_queue
*qh
)
330 struct knav_queue_inst
*inst
= qh
->inst
;
333 last
= (atomic_dec_return(&qh
->notifier_enabled
) == 0);
335 return 0; /* nothing to do */
337 last
= (atomic_dec_return(&inst
->num_notifiers
) == 0);
339 knav_queue_set_notify(inst
, false);
344 static int knav_queue_set_notifier(struct knav_queue
*qh
,
345 struct knav_queue_notify_config
*cfg
)
347 knav_queue_notify_fn old_fn
= qh
->notifier_fn
;
352 if (!(qh
->inst
->range
->flags
& (RANGE_HAS_ACCUMULATOR
| RANGE_HAS_IRQ
)))
355 if (!cfg
->fn
&& old_fn
)
356 knav_queue_disable_notifier(qh
);
358 qh
->notifier_fn
= cfg
->fn
;
359 qh
->notifier_fn_arg
= cfg
->fn_arg
;
361 if (cfg
->fn
&& !old_fn
)
362 knav_queue_enable_notifier(qh
);
367 static int knav_gp_set_notify(struct knav_range_info
*range
,
368 struct knav_queue_inst
*inst
,
373 if (range
->flags
& RANGE_HAS_IRQ
) {
374 queue
= inst
->id
- range
->queue_base
;
376 enable_irq(range
->irqs
[queue
].irq
);
378 disable_irq_nosync(range
->irqs
[queue
].irq
);
383 static int knav_gp_open_queue(struct knav_range_info
*range
,
384 struct knav_queue_inst
*inst
, unsigned flags
)
386 return knav_queue_setup_irq(range
, inst
);
389 static int knav_gp_close_queue(struct knav_range_info
*range
,
390 struct knav_queue_inst
*inst
)
392 knav_queue_free_irq(inst
);
396 struct knav_range_ops knav_gp_range_ops
= {
397 .set_notify
= knav_gp_set_notify
,
398 .open_queue
= knav_gp_open_queue
,
399 .close_queue
= knav_gp_close_queue
,
403 static int knav_queue_get_count(void *qhandle
)
405 struct knav_queue
*qh
= qhandle
;
406 struct knav_queue_inst
*inst
= qh
->inst
;
408 return readl_relaxed(&qh
->reg_peek
[0].entry_count
) +
409 atomic_read(&inst
->desc_count
);
412 static void knav_queue_debug_show_instance(struct seq_file
*s
,
413 struct knav_queue_inst
*inst
)
415 struct knav_device
*kdev
= inst
->kdev
;
416 struct knav_queue
*qh
;
418 if (!knav_queue_is_busy(inst
))
421 seq_printf(s
, "\tqueue id %d (%s)\n",
422 kdev
->base_id
+ inst
->id
, inst
->name
);
423 for_each_handle_rcu(qh
, inst
) {
424 seq_printf(s
, "\t\thandle %p: ", qh
);
425 seq_printf(s
, "pushes %8d, ",
426 atomic_read(&qh
->stats
.pushes
));
427 seq_printf(s
, "pops %8d, ",
428 atomic_read(&qh
->stats
.pops
));
429 seq_printf(s
, "count %8d, ",
430 knav_queue_get_count(qh
));
431 seq_printf(s
, "notifies %8d, ",
432 atomic_read(&qh
->stats
.notifies
));
433 seq_printf(s
, "push errors %8d, ",
434 atomic_read(&qh
->stats
.push_errors
));
435 seq_printf(s
, "pop errors %8d\n",
436 atomic_read(&qh
->stats
.pop_errors
));
440 static int knav_queue_debug_show(struct seq_file
*s
, void *v
)
442 struct knav_queue_inst
*inst
;
445 mutex_lock(&knav_dev_lock
);
446 seq_printf(s
, "%s: %u-%u\n",
447 dev_name(kdev
->dev
), kdev
->base_id
,
448 kdev
->base_id
+ kdev
->num_queues
- 1);
449 for_each_instance(idx
, inst
, kdev
)
450 knav_queue_debug_show_instance(s
, inst
);
451 mutex_unlock(&knav_dev_lock
);
456 static int knav_queue_debug_open(struct inode
*inode
, struct file
*file
)
458 return single_open(file
, knav_queue_debug_show
, NULL
);
461 static const struct file_operations knav_queue_debug_ops
= {
462 .open
= knav_queue_debug_open
,
465 .release
= single_release
,
468 static inline int knav_queue_pdsp_wait(u32
* __iomem addr
, unsigned timeout
,
474 end
= jiffies
+ msecs_to_jiffies(timeout
);
475 while (time_after(end
, jiffies
)) {
476 val
= readl_relaxed(addr
);
483 return val
? -ETIMEDOUT
: 0;
487 static int knav_queue_flush(struct knav_queue
*qh
)
489 struct knav_queue_inst
*inst
= qh
->inst
;
490 unsigned id
= inst
->id
- inst
->qmgr
->start_queue
;
492 atomic_set(&inst
->desc_count
, 0);
493 writel_relaxed(0, &inst
->qmgr
->reg_push
[id
].ptr_size_thresh
);
498 * knav_queue_open() - open a hardware queue
499 * @name - name to give the queue handle
500 * @id - desired queue number if any or specifes the type
502 * @flags - the following flags are applicable to queues:
503 * KNAV_QUEUE_SHARED - allow the queue to be shared. Queues are
504 * exclusive by default.
505 * Subsequent attempts to open a shared queue should
506 * also have this flag.
508 * Returns a handle to the open hardware queue if successful. Use IS_ERR()
509 * to check the returned value for error codes.
511 void *knav_queue_open(const char *name
, unsigned id
,
514 struct knav_queue
*qh
= ERR_PTR(-EINVAL
);
517 case KNAV_QUEUE_QPEND
:
520 qh
= knav_queue_open_by_type(name
, id
, flags
);
524 qh
= knav_queue_open_by_id(name
, id
, flags
);
529 EXPORT_SYMBOL_GPL(knav_queue_open
);
532 * knav_queue_close() - close a hardware queue handle
533 * @qh - handle to close
535 void knav_queue_close(void *qhandle
)
537 struct knav_queue
*qh
= qhandle
;
538 struct knav_queue_inst
*inst
= qh
->inst
;
540 while (atomic_read(&qh
->notifier_enabled
) > 0)
541 knav_queue_disable_notifier(qh
);
543 mutex_lock(&knav_dev_lock
);
544 list_del_rcu(&qh
->list
);
545 mutex_unlock(&knav_dev_lock
);
547 if (!knav_queue_is_busy(inst
)) {
548 struct knav_range_info
*range
= inst
->range
;
550 if (range
->ops
&& range
->ops
->close_queue
)
551 range
->ops
->close_queue(range
, inst
);
553 devm_kfree(inst
->kdev
->dev
, qh
);
555 EXPORT_SYMBOL_GPL(knav_queue_close
);
558 * knav_queue_device_control() - Perform control operations on a queue
560 * @cmd - control commands
561 * @arg - command argument
563 * Returns 0 on success, errno otherwise.
565 int knav_queue_device_control(void *qhandle
, enum knav_queue_ctrl_cmd cmd
,
568 struct knav_queue
*qh
= qhandle
;
569 struct knav_queue_notify_config
*cfg
;
573 case KNAV_QUEUE_GET_ID
:
574 ret
= qh
->inst
->kdev
->base_id
+ qh
->inst
->id
;
577 case KNAV_QUEUE_FLUSH
:
578 ret
= knav_queue_flush(qh
);
581 case KNAV_QUEUE_SET_NOTIFIER
:
583 ret
= knav_queue_set_notifier(qh
, cfg
);
586 case KNAV_QUEUE_ENABLE_NOTIFY
:
587 ret
= knav_queue_enable_notifier(qh
);
590 case KNAV_QUEUE_DISABLE_NOTIFY
:
591 ret
= knav_queue_disable_notifier(qh
);
594 case KNAV_QUEUE_GET_COUNT
:
595 ret
= knav_queue_get_count(qh
);
604 EXPORT_SYMBOL_GPL(knav_queue_device_control
);
609 * knav_queue_push() - push data (or descriptor) to the tail of a queue
610 * @qh - hardware queue handle
611 * @data - data to push
612 * @size - size of data to push
613 * @flags - can be used to pass additional information
615 * Returns 0 on success, errno otherwise.
617 int knav_queue_push(void *qhandle
, dma_addr_t dma
,
618 unsigned size
, unsigned flags
)
620 struct knav_queue
*qh
= qhandle
;
623 val
= (u32
)dma
| ((size
/ 16) - 1);
624 writel_relaxed(val
, &qh
->reg_push
[0].ptr_size_thresh
);
626 atomic_inc(&qh
->stats
.pushes
);
629 EXPORT_SYMBOL_GPL(knav_queue_push
);
632 * knav_queue_pop() - pop data (or descriptor) from the head of a queue
633 * @qh - hardware queue handle
634 * @size - (optional) size of the data pop'ed.
636 * Returns a DMA address on success, 0 on failure.
638 dma_addr_t
knav_queue_pop(void *qhandle
, unsigned *size
)
640 struct knav_queue
*qh
= qhandle
;
641 struct knav_queue_inst
*inst
= qh
->inst
;
645 /* are we accumulated? */
647 if (unlikely(atomic_dec_return(&inst
->desc_count
) < 0)) {
648 atomic_inc(&inst
->desc_count
);
651 idx
= atomic_inc_return(&inst
->desc_head
);
652 idx
&= ACC_DESCS_MASK
;
653 val
= inst
->descs
[idx
];
655 val
= readl_relaxed(&qh
->reg_pop
[0].ptr_size_thresh
);
660 dma
= val
& DESC_PTR_MASK
;
662 *size
= ((val
& DESC_SIZE_MASK
) + 1) * 16;
664 atomic_inc(&qh
->stats
.pops
);
667 EXPORT_SYMBOL_GPL(knav_queue_pop
);
669 /* carve out descriptors and push into queue */
670 static void kdesc_fill_pool(struct knav_pool
*pool
)
672 struct knav_region
*region
;
675 region
= pool
->region
;
676 pool
->desc_size
= region
->desc_size
;
677 for (i
= 0; i
< pool
->num_desc
; i
++) {
678 int index
= pool
->region_offset
+ i
;
681 dma_addr
= region
->dma_start
+ (region
->desc_size
* index
);
682 dma_size
= ALIGN(pool
->desc_size
, SMP_CACHE_BYTES
);
683 dma_sync_single_for_device(pool
->dev
, dma_addr
, dma_size
,
685 knav_queue_push(pool
->queue
, dma_addr
, dma_size
, 0);
689 /* pop out descriptors and close the queue */
690 static void kdesc_empty_pool(struct knav_pool
*pool
)
701 dma
= knav_queue_pop(pool
->queue
, &size
);
704 desc
= knav_pool_desc_dma_to_virt(pool
, dma
);
706 dev_dbg(pool
->kdev
->dev
,
707 "couldn't unmap desc, continuing\n");
711 WARN_ON(i
!= pool
->num_desc
);
712 knav_queue_close(pool
->queue
);
716 /* Get the DMA address of a descriptor */
717 dma_addr_t
knav_pool_desc_virt_to_dma(void *ph
, void *virt
)
719 struct knav_pool
*pool
= ph
;
720 return pool
->region
->dma_start
+ (virt
- pool
->region
->virt_start
);
722 EXPORT_SYMBOL_GPL(knav_pool_desc_virt_to_dma
);
724 void *knav_pool_desc_dma_to_virt(void *ph
, dma_addr_t dma
)
726 struct knav_pool
*pool
= ph
;
727 return pool
->region
->virt_start
+ (dma
- pool
->region
->dma_start
);
729 EXPORT_SYMBOL_GPL(knav_pool_desc_dma_to_virt
);
732 * knav_pool_create() - Create a pool of descriptors
733 * @name - name to give the pool handle
734 * @num_desc - numbers of descriptors in the pool
735 * @region_id - QMSS region id from which the descriptors are to be
738 * Returns a pool handle on success.
739 * Use IS_ERR_OR_NULL() to identify error values on return.
741 void *knav_pool_create(const char *name
,
742 int num_desc
, int region_id
)
744 struct knav_region
*reg_itr
, *region
= NULL
;
745 struct knav_pool
*pool
, *pi
;
746 struct list_head
*node
;
747 unsigned last_offset
;
752 return ERR_PTR(-ENODEV
);
754 pool
= devm_kzalloc(kdev
->dev
, sizeof(*pool
), GFP_KERNEL
);
756 dev_err(kdev
->dev
, "out of memory allocating pool\n");
757 return ERR_PTR(-ENOMEM
);
760 for_each_region(kdev
, reg_itr
) {
761 if (reg_itr
->id
!= region_id
)
768 dev_err(kdev
->dev
, "region-id(%d) not found\n", region_id
);
773 pool
->queue
= knav_queue_open(name
, KNAV_QUEUE_GP
, 0);
774 if (IS_ERR_OR_NULL(pool
->queue
)) {
776 "failed to open queue for pool(%s), error %ld\n",
777 name
, PTR_ERR(pool
->queue
));
778 ret
= PTR_ERR(pool
->queue
);
782 pool
->name
= kstrndup(name
, KNAV_NAME_SIZE
, GFP_KERNEL
);
784 pool
->dev
= kdev
->dev
;
786 mutex_lock(&knav_dev_lock
);
788 if (num_desc
> (region
->num_desc
- region
->used_desc
)) {
789 dev_err(kdev
->dev
, "out of descs in region(%d) for pool(%s)\n",
795 /* Region maintains a sorted (by region offset) list of pools
796 * use the first free slot which is large enough to accomodate
801 node
= ®ion
->pools
;
802 list_for_each_entry(pi
, ®ion
->pools
, region_inst
) {
803 if ((pi
->region_offset
- last_offset
) >= num_desc
) {
807 last_offset
= pi
->region_offset
+ pi
->num_desc
;
809 node
= &pi
->region_inst
;
812 pool
->region
= region
;
813 pool
->num_desc
= num_desc
;
814 pool
->region_offset
= last_offset
;
815 region
->used_desc
+= num_desc
;
816 list_add_tail(&pool
->list
, &kdev
->pools
);
817 list_add_tail(&pool
->region_inst
, node
);
819 dev_err(kdev
->dev
, "pool(%s) create failed: fragmented desc pool in region(%d)\n",
825 mutex_unlock(&knav_dev_lock
);
826 kdesc_fill_pool(pool
);
830 mutex_unlock(&knav_dev_lock
);
833 devm_kfree(kdev
->dev
, pool
);
836 EXPORT_SYMBOL_GPL(knav_pool_create
);
839 * knav_pool_destroy() - Free a pool of descriptors
840 * @pool - pool handle
842 void knav_pool_destroy(void *ph
)
844 struct knav_pool
*pool
= ph
;
852 kdesc_empty_pool(pool
);
853 mutex_lock(&knav_dev_lock
);
855 pool
->region
->used_desc
-= pool
->num_desc
;
856 list_del(&pool
->region_inst
);
857 list_del(&pool
->list
);
859 mutex_unlock(&knav_dev_lock
);
861 devm_kfree(kdev
->dev
, pool
);
863 EXPORT_SYMBOL_GPL(knav_pool_destroy
);
867 * knav_pool_desc_get() - Get a descriptor from the pool
868 * @pool - pool handle
870 * Returns descriptor from the pool.
872 void *knav_pool_desc_get(void *ph
)
874 struct knav_pool
*pool
= ph
;
879 dma
= knav_queue_pop(pool
->queue
, &size
);
881 return ERR_PTR(-ENOMEM
);
882 data
= knav_pool_desc_dma_to_virt(pool
, dma
);
885 EXPORT_SYMBOL_GPL(knav_pool_desc_get
);
888 * knav_pool_desc_put() - return a descriptor to the pool
889 * @pool - pool handle
891 void knav_pool_desc_put(void *ph
, void *desc
)
893 struct knav_pool
*pool
= ph
;
895 dma
= knav_pool_desc_virt_to_dma(pool
, desc
);
896 knav_queue_push(pool
->queue
, dma
, pool
->region
->desc_size
, 0);
898 EXPORT_SYMBOL_GPL(knav_pool_desc_put
);
901 * knav_pool_desc_map() - Map descriptor for DMA transfer
902 * @pool - pool handle
903 * @desc - address of descriptor to map
904 * @size - size of descriptor to map
905 * @dma - DMA address return pointer
906 * @dma_sz - adjusted return pointer
908 * Returns 0 on success, errno otherwise.
910 int knav_pool_desc_map(void *ph
, void *desc
, unsigned size
,
911 dma_addr_t
*dma
, unsigned *dma_sz
)
913 struct knav_pool
*pool
= ph
;
914 *dma
= knav_pool_desc_virt_to_dma(pool
, desc
);
915 size
= min(size
, pool
->region
->desc_size
);
916 size
= ALIGN(size
, SMP_CACHE_BYTES
);
918 dma_sync_single_for_device(pool
->dev
, *dma
, size
, DMA_TO_DEVICE
);
920 /* Ensure the descriptor reaches to the memory */
925 EXPORT_SYMBOL_GPL(knav_pool_desc_map
);
928 * knav_pool_desc_unmap() - Unmap descriptor after DMA transfer
929 * @pool - pool handle
930 * @dma - DMA address of descriptor to unmap
931 * @dma_sz - size of descriptor to unmap
933 * Returns descriptor address on success, Use IS_ERR_OR_NULL() to identify
934 * error values on return.
936 void *knav_pool_desc_unmap(void *ph
, dma_addr_t dma
, unsigned dma_sz
)
938 struct knav_pool
*pool
= ph
;
942 desc_sz
= min(dma_sz
, pool
->region
->desc_size
);
943 desc
= knav_pool_desc_dma_to_virt(pool
, dma
);
944 dma_sync_single_for_cpu(pool
->dev
, dma
, desc_sz
, DMA_FROM_DEVICE
);
948 EXPORT_SYMBOL_GPL(knav_pool_desc_unmap
);
951 * knav_pool_count() - Get the number of descriptors in pool.
952 * @pool - pool handle
953 * Returns number of elements in the pool.
955 int knav_pool_count(void *ph
)
957 struct knav_pool
*pool
= ph
;
958 return knav_queue_get_count(pool
->queue
);
960 EXPORT_SYMBOL_GPL(knav_pool_count
);
962 static void knav_queue_setup_region(struct knav_device
*kdev
,
963 struct knav_region
*region
)
965 unsigned hw_num_desc
, hw_desc_size
, size
;
966 struct knav_reg_region __iomem
*regs
;
967 struct knav_qmgr_info
*qmgr
;
968 struct knav_pool
*pool
;
973 if (!region
->num_desc
) {
974 dev_warn(kdev
->dev
, "unused region %s\n", region
->name
);
978 /* get hardware descriptor value */
979 hw_num_desc
= ilog2(region
->num_desc
- 1) + 1;
981 /* did we force fit ourselves into nothingness? */
982 if (region
->num_desc
< 32) {
983 region
->num_desc
= 0;
984 dev_warn(kdev
->dev
, "too few descriptors in region %s\n",
989 size
= region
->num_desc
* region
->desc_size
;
990 region
->virt_start
= alloc_pages_exact(size
, GFP_KERNEL
| GFP_DMA
|
992 if (!region
->virt_start
) {
993 region
->num_desc
= 0;
994 dev_err(kdev
->dev
, "memory alloc failed for region %s\n",
998 region
->virt_end
= region
->virt_start
+ size
;
999 page
= virt_to_page(region
->virt_start
);
1001 region
->dma_start
= dma_map_page(kdev
->dev
, page
, 0, size
,
1003 if (dma_mapping_error(kdev
->dev
, region
->dma_start
)) {
1004 dev_err(kdev
->dev
, "dma map failed for region %s\n",
1008 region
->dma_end
= region
->dma_start
+ size
;
1010 pool
= devm_kzalloc(kdev
->dev
, sizeof(*pool
), GFP_KERNEL
);
1012 dev_err(kdev
->dev
, "out of memory allocating dummy pool\n");
1016 pool
->region_offset
= region
->num_desc
;
1017 list_add(&pool
->region_inst
, ®ion
->pools
);
1020 "region %s (%d): size:%d, link:%d@%d, phys:%08x-%08x, virt:%p-%p\n",
1021 region
->name
, id
, region
->desc_size
, region
->num_desc
,
1022 region
->link_index
, region
->dma_start
, region
->dma_end
,
1023 region
->virt_start
, region
->virt_end
);
1025 hw_desc_size
= (region
->desc_size
/ 16) - 1;
1028 for_each_qmgr(kdev
, qmgr
) {
1029 regs
= qmgr
->reg_region
+ id
;
1030 writel_relaxed(region
->dma_start
, ®s
->base
);
1031 writel_relaxed(region
->link_index
, ®s
->start_index
);
1032 writel_relaxed(hw_desc_size
<< 16 | hw_num_desc
,
1038 if (region
->dma_start
)
1039 dma_unmap_page(kdev
->dev
, region
->dma_start
, size
,
1041 if (region
->virt_start
)
1042 free_pages_exact(region
->virt_start
, size
);
1043 region
->num_desc
= 0;
1047 static const char *knav_queue_find_name(struct device_node
*node
)
1051 if (of_property_read_string(node
, "label", &name
) < 0)
1058 static int knav_queue_setup_regions(struct knav_device
*kdev
,
1059 struct device_node
*regions
)
1061 struct device
*dev
= kdev
->dev
;
1062 struct knav_region
*region
;
1063 struct device_node
*child
;
1067 for_each_child_of_node(regions
, child
) {
1068 region
= devm_kzalloc(dev
, sizeof(*region
), GFP_KERNEL
);
1070 dev_err(dev
, "out of memory allocating region\n");
1074 region
->name
= knav_queue_find_name(child
);
1075 of_property_read_u32(child
, "id", ®ion
->id
);
1076 ret
= of_property_read_u32_array(child
, "region-spec", temp
, 2);
1078 region
->num_desc
= temp
[0];
1079 region
->desc_size
= temp
[1];
1081 dev_err(dev
, "invalid region info %s\n", region
->name
);
1082 devm_kfree(dev
, region
);
1086 if (!of_get_property(child
, "link-index", NULL
)) {
1087 dev_err(dev
, "No link info for %s\n", region
->name
);
1088 devm_kfree(dev
, region
);
1091 ret
= of_property_read_u32(child
, "link-index",
1092 ®ion
->link_index
);
1094 dev_err(dev
, "link index not found for %s\n",
1096 devm_kfree(dev
, region
);
1100 INIT_LIST_HEAD(®ion
->pools
);
1101 list_add_tail(®ion
->list
, &kdev
->regions
);
1103 if (list_empty(&kdev
->regions
)) {
1104 dev_err(dev
, "no valid region information found\n");
1108 /* Next, we run through the regions and set things up */
1109 for_each_region(kdev
, region
)
1110 knav_queue_setup_region(kdev
, region
);
1115 static int knav_get_link_ram(struct knav_device
*kdev
,
1117 struct knav_link_ram_block
*block
)
1119 struct platform_device
*pdev
= to_platform_device(kdev
->dev
);
1120 struct device_node
*node
= pdev
->dev
.of_node
;
1124 * Note: link ram resources are specified in "entry" sized units. In
1125 * reality, although entries are ~40bits in hardware, we treat them as
1126 * 64-bit entities here.
1128 * For example, to specify the internal link ram for Keystone-I class
1129 * devices, we would set the linkram0 resource to 0x80000-0x83fff.
1131 * This gets a bit weird when other link rams are used. For example,
1132 * if the range specified is 0x0c000000-0x0c003fff (i.e., 16K entries
1133 * in MSMC SRAM), the actual memory used is 0x0c000000-0x0c020000,
1134 * which accounts for 64-bits per entry, for 16K entries.
1136 if (!of_property_read_u32_array(node
, name
, temp
, 2)) {
1139 * queue_base specified => using internal or onchip
1140 * link ram WARNING - we do not "reserve" this block
1142 block
->phys
= (dma_addr_t
)temp
[0];
1144 block
->size
= temp
[1];
1146 block
->size
= temp
[1];
1147 /* queue_base not specific => allocate requested size */
1148 block
->virt
= dmam_alloc_coherent(kdev
->dev
,
1149 8 * block
->size
, &block
->phys
,
1152 dev_err(kdev
->dev
, "failed to alloc linkram\n");
1162 static int knav_queue_setup_link_ram(struct knav_device
*kdev
)
1164 struct knav_link_ram_block
*block
;
1165 struct knav_qmgr_info
*qmgr
;
1167 for_each_qmgr(kdev
, qmgr
) {
1168 block
= &kdev
->link_rams
[0];
1169 dev_dbg(kdev
->dev
, "linkram0: phys:%x, virt:%p, size:%x\n",
1170 block
->phys
, block
->virt
, block
->size
);
1171 writel_relaxed(block
->phys
, &qmgr
->reg_config
->link_ram_base0
);
1172 writel_relaxed(block
->size
, &qmgr
->reg_config
->link_ram_size0
);
1178 dev_dbg(kdev
->dev
, "linkram1: phys:%x, virt:%p, size:%x\n",
1179 block
->phys
, block
->virt
, block
->size
);
1180 writel_relaxed(block
->phys
, &qmgr
->reg_config
->link_ram_base1
);
1186 static int knav_setup_queue_range(struct knav_device
*kdev
,
1187 struct device_node
*node
)
1189 struct device
*dev
= kdev
->dev
;
1190 struct knav_range_info
*range
;
1191 struct knav_qmgr_info
*qmgr
;
1192 u32 temp
[2], start
, end
, id
, index
;
1195 range
= devm_kzalloc(dev
, sizeof(*range
), GFP_KERNEL
);
1197 dev_err(dev
, "out of memory allocating range\n");
1202 range
->name
= knav_queue_find_name(node
);
1203 ret
= of_property_read_u32_array(node
, "qrange", temp
, 2);
1205 range
->queue_base
= temp
[0] - kdev
->base_id
;
1206 range
->num_queues
= temp
[1];
1208 dev_err(dev
, "invalid queue range %s\n", range
->name
);
1209 devm_kfree(dev
, range
);
1213 for (i
= 0; i
< RANGE_MAX_IRQS
; i
++) {
1214 struct of_phandle_args oirq
;
1216 if (of_irq_parse_one(node
, i
, &oirq
))
1219 range
->irqs
[i
].irq
= irq_create_of_mapping(&oirq
);
1220 if (range
->irqs
[i
].irq
== IRQ_NONE
)
1225 if (oirq
.args_count
== 3)
1226 range
->irqs
[i
].cpu_map
=
1227 (oirq
.args
[2] & 0x0000ff00) >> 8;
1230 range
->num_irqs
= min(range
->num_irqs
, range
->num_queues
);
1231 if (range
->num_irqs
)
1232 range
->flags
|= RANGE_HAS_IRQ
;
1234 if (of_get_property(node
, "qalloc-by-id", NULL
))
1235 range
->flags
|= RANGE_RESERVED
;
1237 if (of_get_property(node
, "accumulator", NULL
)) {
1238 ret
= knav_init_acc_range(kdev
, node
, range
);
1240 devm_kfree(dev
, range
);
1244 range
->ops
= &knav_gp_range_ops
;
1247 /* set threshold to 1, and flush out the queues */
1248 for_each_qmgr(kdev
, qmgr
) {
1249 start
= max(qmgr
->start_queue
, range
->queue_base
);
1250 end
= min(qmgr
->start_queue
+ qmgr
->num_queues
,
1251 range
->queue_base
+ range
->num_queues
);
1252 for (id
= start
; id
< end
; id
++) {
1253 index
= id
- qmgr
->start_queue
;
1254 writel_relaxed(THRESH_GTE
| 1,
1255 &qmgr
->reg_peek
[index
].ptr_size_thresh
);
1257 &qmgr
->reg_push
[index
].ptr_size_thresh
);
1261 list_add_tail(&range
->list
, &kdev
->queue_ranges
);
1262 dev_dbg(dev
, "added range %s: %d-%d, %d irqs%s%s%s\n",
1263 range
->name
, range
->queue_base
,
1264 range
->queue_base
+ range
->num_queues
- 1,
1266 (range
->flags
& RANGE_HAS_IRQ
) ? ", has irq" : "",
1267 (range
->flags
& RANGE_RESERVED
) ? ", reserved" : "",
1268 (range
->flags
& RANGE_HAS_ACCUMULATOR
) ? ", acc" : "");
1269 kdev
->num_queues_in_use
+= range
->num_queues
;
1273 static int knav_setup_queue_pools(struct knav_device
*kdev
,
1274 struct device_node
*queue_pools
)
1276 struct device_node
*type
, *range
;
1279 for_each_child_of_node(queue_pools
, type
) {
1280 for_each_child_of_node(type
, range
) {
1281 ret
= knav_setup_queue_range(kdev
, range
);
1282 /* return value ignored, we init the rest... */
1286 /* ... and barf if they all failed! */
1287 if (list_empty(&kdev
->queue_ranges
)) {
1288 dev_err(kdev
->dev
, "no valid queue range found\n");
1294 static void knav_free_queue_range(struct knav_device
*kdev
,
1295 struct knav_range_info
*range
)
1297 if (range
->ops
&& range
->ops
->free_range
)
1298 range
->ops
->free_range(range
);
1299 list_del(&range
->list
);
1300 devm_kfree(kdev
->dev
, range
);
1303 static void knav_free_queue_ranges(struct knav_device
*kdev
)
1305 struct knav_range_info
*range
;
1308 range
= first_queue_range(kdev
);
1311 knav_free_queue_range(kdev
, range
);
1315 static void knav_queue_free_regions(struct knav_device
*kdev
)
1317 struct knav_region
*region
;
1318 struct knav_pool
*pool
, *tmp
;
1322 region
= first_region(kdev
);
1325 list_for_each_entry_safe(pool
, tmp
, ®ion
->pools
, region_inst
)
1326 knav_pool_destroy(pool
);
1328 size
= region
->virt_end
- region
->virt_start
;
1330 free_pages_exact(region
->virt_start
, size
);
1331 list_del(®ion
->list
);
1332 devm_kfree(kdev
->dev
, region
);
1336 static void __iomem
*knav_queue_map_reg(struct knav_device
*kdev
,
1337 struct device_node
*node
, int index
)
1339 struct resource res
;
1343 ret
= of_address_to_resource(node
, index
, &res
);
1345 dev_err(kdev
->dev
, "Can't translate of node(%s) address for index(%d)\n",
1347 return ERR_PTR(ret
);
1350 regs
= devm_ioremap_resource(kdev
->dev
, &res
);
1352 dev_err(kdev
->dev
, "Failed to map register base for index(%d) node(%s)\n",
1357 static int knav_queue_init_qmgrs(struct knav_device
*kdev
,
1358 struct device_node
*qmgrs
)
1360 struct device
*dev
= kdev
->dev
;
1361 struct knav_qmgr_info
*qmgr
;
1362 struct device_node
*child
;
1366 for_each_child_of_node(qmgrs
, child
) {
1367 qmgr
= devm_kzalloc(dev
, sizeof(*qmgr
), GFP_KERNEL
);
1369 dev_err(dev
, "out of memory allocating qmgr\n");
1373 ret
= of_property_read_u32_array(child
, "managed-queues",
1376 qmgr
->start_queue
= temp
[0];
1377 qmgr
->num_queues
= temp
[1];
1379 dev_err(dev
, "invalid qmgr queue range\n");
1380 devm_kfree(dev
, qmgr
);
1384 dev_info(dev
, "qmgr start queue %d, number of queues %d\n",
1385 qmgr
->start_queue
, qmgr
->num_queues
);
1388 knav_queue_map_reg(kdev
, child
,
1389 KNAV_QUEUE_PEEK_REG_INDEX
);
1391 knav_queue_map_reg(kdev
, child
,
1392 KNAV_QUEUE_STATUS_REG_INDEX
);
1394 knav_queue_map_reg(kdev
, child
,
1395 KNAV_QUEUE_CONFIG_REG_INDEX
);
1397 knav_queue_map_reg(kdev
, child
,
1398 KNAV_QUEUE_REGION_REG_INDEX
);
1400 knav_queue_map_reg(kdev
, child
,
1401 KNAV_QUEUE_PUSH_REG_INDEX
);
1403 knav_queue_map_reg(kdev
, child
,
1404 KNAV_QUEUE_POP_REG_INDEX
);
1406 if (IS_ERR(qmgr
->reg_peek
) || IS_ERR(qmgr
->reg_status
) ||
1407 IS_ERR(qmgr
->reg_config
) || IS_ERR(qmgr
->reg_region
) ||
1408 IS_ERR(qmgr
->reg_push
) || IS_ERR(qmgr
->reg_pop
)) {
1409 dev_err(dev
, "failed to map qmgr regs\n");
1410 if (!IS_ERR(qmgr
->reg_peek
))
1411 devm_iounmap(dev
, qmgr
->reg_peek
);
1412 if (!IS_ERR(qmgr
->reg_status
))
1413 devm_iounmap(dev
, qmgr
->reg_status
);
1414 if (!IS_ERR(qmgr
->reg_config
))
1415 devm_iounmap(dev
, qmgr
->reg_config
);
1416 if (!IS_ERR(qmgr
->reg_region
))
1417 devm_iounmap(dev
, qmgr
->reg_region
);
1418 if (!IS_ERR(qmgr
->reg_push
))
1419 devm_iounmap(dev
, qmgr
->reg_push
);
1420 if (!IS_ERR(qmgr
->reg_pop
))
1421 devm_iounmap(dev
, qmgr
->reg_pop
);
1422 devm_kfree(dev
, qmgr
);
1426 list_add_tail(&qmgr
->list
, &kdev
->qmgrs
);
1427 dev_info(dev
, "added qmgr start queue %d, num of queues %d, reg_peek %p, reg_status %p, reg_config %p, reg_region %p, reg_push %p, reg_pop %p\n",
1428 qmgr
->start_queue
, qmgr
->num_queues
,
1429 qmgr
->reg_peek
, qmgr
->reg_status
,
1430 qmgr
->reg_config
, qmgr
->reg_region
,
1431 qmgr
->reg_push
, qmgr
->reg_pop
);
1436 static int knav_queue_init_pdsps(struct knav_device
*kdev
,
1437 struct device_node
*pdsps
)
1439 struct device
*dev
= kdev
->dev
;
1440 struct knav_pdsp_info
*pdsp
;
1441 struct device_node
*child
;
1444 for_each_child_of_node(pdsps
, child
) {
1445 pdsp
= devm_kzalloc(dev
, sizeof(*pdsp
), GFP_KERNEL
);
1447 dev_err(dev
, "out of memory allocating pdsp\n");
1450 pdsp
->name
= knav_queue_find_name(child
);
1451 ret
= of_property_read_string(child
, "firmware",
1453 if (ret
< 0 || !pdsp
->firmware
) {
1454 dev_err(dev
, "unknown firmware for pdsp %s\n",
1456 devm_kfree(dev
, pdsp
);
1459 dev_dbg(dev
, "pdsp name %s fw name :%s\n", pdsp
->name
,
1463 knav_queue_map_reg(kdev
, child
,
1464 KNAV_QUEUE_PDSP_IRAM_REG_INDEX
);
1466 knav_queue_map_reg(kdev
, child
,
1467 KNAV_QUEUE_PDSP_REGS_REG_INDEX
);
1469 knav_queue_map_reg(kdev
, child
,
1470 KNAV_QUEUE_PDSP_INTD_REG_INDEX
);
1472 knav_queue_map_reg(kdev
, child
,
1473 KNAV_QUEUE_PDSP_CMD_REG_INDEX
);
1475 if (IS_ERR(pdsp
->command
) || IS_ERR(pdsp
->iram
) ||
1476 IS_ERR(pdsp
->regs
) || IS_ERR(pdsp
->intd
)) {
1477 dev_err(dev
, "failed to map pdsp %s regs\n",
1479 if (!IS_ERR(pdsp
->command
))
1480 devm_iounmap(dev
, pdsp
->command
);
1481 if (!IS_ERR(pdsp
->iram
))
1482 devm_iounmap(dev
, pdsp
->iram
);
1483 if (!IS_ERR(pdsp
->regs
))
1484 devm_iounmap(dev
, pdsp
->regs
);
1485 if (!IS_ERR(pdsp
->intd
))
1486 devm_iounmap(dev
, pdsp
->intd
);
1487 devm_kfree(dev
, pdsp
);
1490 of_property_read_u32(child
, "id", &pdsp
->id
);
1491 list_add_tail(&pdsp
->list
, &kdev
->pdsps
);
1492 dev_dbg(dev
, "added pdsp %s: command %p, iram %p, regs %p, intd %p, firmware %s\n",
1493 pdsp
->name
, pdsp
->command
, pdsp
->iram
, pdsp
->regs
,
1494 pdsp
->intd
, pdsp
->firmware
);
1499 static int knav_queue_stop_pdsp(struct knav_device
*kdev
,
1500 struct knav_pdsp_info
*pdsp
)
1502 u32 val
, timeout
= 1000;
1505 val
= readl_relaxed(&pdsp
->regs
->control
) & ~PDSP_CTRL_ENABLE
;
1506 writel_relaxed(val
, &pdsp
->regs
->control
);
1507 ret
= knav_queue_pdsp_wait(&pdsp
->regs
->control
, timeout
,
1510 dev_err(kdev
->dev
, "timed out on pdsp %s stop\n", pdsp
->name
);
1516 static int knav_queue_load_pdsp(struct knav_device
*kdev
,
1517 struct knav_pdsp_info
*pdsp
)
1520 const struct firmware
*fw
;
1523 ret
= request_firmware(&fw
, pdsp
->firmware
, kdev
->dev
);
1525 dev_err(kdev
->dev
, "failed to get firmware %s for pdsp %s\n",
1526 pdsp
->firmware
, pdsp
->name
);
1529 writel_relaxed(pdsp
->id
+ 1, pdsp
->command
+ 0x18);
1530 /* download the firmware */
1531 fwdata
= (u32
*)fw
->data
;
1532 fwlen
= (fw
->size
+ sizeof(u32
) - 1) / sizeof(u32
);
1533 for (i
= 0; i
< fwlen
; i
++)
1534 writel_relaxed(be32_to_cpu(fwdata
[i
]), pdsp
->iram
+ i
);
1536 release_firmware(fw
);
1540 static int knav_queue_start_pdsp(struct knav_device
*kdev
,
1541 struct knav_pdsp_info
*pdsp
)
1543 u32 val
, timeout
= 1000;
1546 /* write a command for sync */
1547 writel_relaxed(0xffffffff, pdsp
->command
);
1548 while (readl_relaxed(pdsp
->command
) != 0xffffffff)
1551 /* soft reset the PDSP */
1552 val
= readl_relaxed(&pdsp
->regs
->control
);
1553 val
&= ~(PDSP_CTRL_PC_MASK
| PDSP_CTRL_SOFT_RESET
);
1554 writel_relaxed(val
, &pdsp
->regs
->control
);
1557 val
= readl_relaxed(&pdsp
->regs
->control
) | PDSP_CTRL_ENABLE
;
1558 writel_relaxed(val
, &pdsp
->regs
->control
);
1560 /* wait for command register to clear */
1561 ret
= knav_queue_pdsp_wait(pdsp
->command
, timeout
, 0);
1564 "timed out on pdsp %s command register wait\n",
1571 static void knav_queue_stop_pdsps(struct knav_device
*kdev
)
1573 struct knav_pdsp_info
*pdsp
;
1575 /* disable all pdsps */
1576 for_each_pdsp(kdev
, pdsp
)
1577 knav_queue_stop_pdsp(kdev
, pdsp
);
1580 static int knav_queue_start_pdsps(struct knav_device
*kdev
)
1582 struct knav_pdsp_info
*pdsp
;
1585 knav_queue_stop_pdsps(kdev
);
1586 /* now load them all */
1587 for_each_pdsp(kdev
, pdsp
) {
1588 ret
= knav_queue_load_pdsp(kdev
, pdsp
);
1593 for_each_pdsp(kdev
, pdsp
) {
1594 ret
= knav_queue_start_pdsp(kdev
, pdsp
);
1600 static inline struct knav_qmgr_info
*knav_find_qmgr(unsigned id
)
1602 struct knav_qmgr_info
*qmgr
;
1604 for_each_qmgr(kdev
, qmgr
) {
1605 if ((id
>= qmgr
->start_queue
) &&
1606 (id
< qmgr
->start_queue
+ qmgr
->num_queues
))
1612 static int knav_queue_init_queue(struct knav_device
*kdev
,
1613 struct knav_range_info
*range
,
1614 struct knav_queue_inst
*inst
,
1617 char irq_name
[KNAV_NAME_SIZE
];
1618 inst
->qmgr
= knav_find_qmgr(id
);
1622 INIT_LIST_HEAD(&inst
->handles
);
1624 inst
->range
= range
;
1627 scnprintf(irq_name
, sizeof(irq_name
), "hwqueue-%d", id
);
1628 inst
->irq_name
= kstrndup(irq_name
, sizeof(irq_name
), GFP_KERNEL
);
1630 if (range
->ops
&& range
->ops
->init_queue
)
1631 return range
->ops
->init_queue(range
, inst
);
1636 static int knav_queue_init_queues(struct knav_device
*kdev
)
1638 struct knav_range_info
*range
;
1639 int size
, id
, base_idx
;
1640 int idx
= 0, ret
= 0;
1642 /* how much do we need for instance data? */
1643 size
= sizeof(struct knav_queue_inst
);
1645 /* round this up to a power of 2, keep the index to instance
1648 kdev
->inst_shift
= order_base_2(size
);
1649 size
= (1 << kdev
->inst_shift
) * kdev
->num_queues_in_use
;
1650 kdev
->instances
= devm_kzalloc(kdev
->dev
, size
, GFP_KERNEL
);
1651 if (!kdev
->instances
)
1654 for_each_queue_range(kdev
, range
) {
1655 if (range
->ops
&& range
->ops
->init_range
)
1656 range
->ops
->init_range(range
);
1658 for (id
= range
->queue_base
;
1659 id
< range
->queue_base
+ range
->num_queues
; id
++, idx
++) {
1660 ret
= knav_queue_init_queue(kdev
, range
,
1661 knav_queue_idx_to_inst(kdev
, idx
), id
);
1665 range
->queue_base_inst
=
1666 knav_queue_idx_to_inst(kdev
, base_idx
);
1671 static int knav_queue_probe(struct platform_device
*pdev
)
1673 struct device_node
*node
= pdev
->dev
.of_node
;
1674 struct device_node
*qmgrs
, *queue_pools
, *regions
, *pdsps
;
1675 struct device
*dev
= &pdev
->dev
;
1680 dev_err(dev
, "device tree info unavailable\n");
1684 kdev
= devm_kzalloc(dev
, sizeof(struct knav_device
), GFP_KERNEL
);
1686 dev_err(dev
, "memory allocation failed\n");
1690 platform_set_drvdata(pdev
, kdev
);
1692 INIT_LIST_HEAD(&kdev
->queue_ranges
);
1693 INIT_LIST_HEAD(&kdev
->qmgrs
);
1694 INIT_LIST_HEAD(&kdev
->pools
);
1695 INIT_LIST_HEAD(&kdev
->regions
);
1696 INIT_LIST_HEAD(&kdev
->pdsps
);
1698 pm_runtime_enable(&pdev
->dev
);
1699 ret
= pm_runtime_get_sync(&pdev
->dev
);
1701 dev_err(dev
, "Failed to enable QMSS\n");
1705 if (of_property_read_u32_array(node
, "queue-range", temp
, 2)) {
1706 dev_err(dev
, "queue-range not specified\n");
1710 kdev
->base_id
= temp
[0];
1711 kdev
->num_queues
= temp
[1];
1713 /* Initialize queue managers using device tree configuration */
1714 qmgrs
= of_get_child_by_name(node
, "qmgrs");
1716 dev_err(dev
, "queue manager info not specified\n");
1720 ret
= knav_queue_init_qmgrs(kdev
, qmgrs
);
1725 /* get pdsp configuration values from device tree */
1726 pdsps
= of_get_child_by_name(node
, "pdsps");
1728 ret
= knav_queue_init_pdsps(kdev
, pdsps
);
1732 ret
= knav_queue_start_pdsps(kdev
);
1738 /* get usable queue range values from device tree */
1739 queue_pools
= of_get_child_by_name(node
, "queue-pools");
1741 dev_err(dev
, "queue-pools not specified\n");
1745 ret
= knav_setup_queue_pools(kdev
, queue_pools
);
1746 of_node_put(queue_pools
);
1750 ret
= knav_get_link_ram(kdev
, "linkram0", &kdev
->link_rams
[0]);
1752 dev_err(kdev
->dev
, "could not setup linking ram\n");
1756 ret
= knav_get_link_ram(kdev
, "linkram1", &kdev
->link_rams
[1]);
1759 * nothing really, we have one linking ram already, so we just
1760 * live within our means
1764 ret
= knav_queue_setup_link_ram(kdev
);
1768 regions
= of_get_child_by_name(node
, "descriptor-regions");
1770 dev_err(dev
, "descriptor-regions not specified\n");
1773 ret
= knav_queue_setup_regions(kdev
, regions
);
1774 of_node_put(regions
);
1778 ret
= knav_queue_init_queues(kdev
);
1780 dev_err(dev
, "hwqueue initialization failed\n");
1784 debugfs_create_file("qmss", S_IFREG
| S_IRUGO
, NULL
, NULL
,
1785 &knav_queue_debug_ops
);
1789 knav_queue_stop_pdsps(kdev
);
1790 knav_queue_free_regions(kdev
);
1791 knav_free_queue_ranges(kdev
);
1792 pm_runtime_put_sync(&pdev
->dev
);
1793 pm_runtime_disable(&pdev
->dev
);
1797 static int knav_queue_remove(struct platform_device
*pdev
)
1799 /* TODO: Free resources */
1800 pm_runtime_put_sync(&pdev
->dev
);
1801 pm_runtime_disable(&pdev
->dev
);
1805 /* Match table for of_platform binding */
1806 static struct of_device_id keystone_qmss_of_match
[] = {
1807 { .compatible
= "ti,keystone-navigator-qmss", },
1810 MODULE_DEVICE_TABLE(of
, keystone_qmss_of_match
);
1812 static struct platform_driver keystone_qmss_driver
= {
1813 .probe
= knav_queue_probe
,
1814 .remove
= knav_queue_remove
,
1816 .name
= "keystone-navigator-qmss",
1817 .of_match_table
= keystone_qmss_of_match
,
1820 module_platform_driver(keystone_qmss_driver
);
1822 MODULE_LICENSE("GPL v2");
1823 MODULE_DESCRIPTION("TI QMSS driver for Keystone SOCs");
1824 MODULE_AUTHOR("Sandeep Nair <sandeep_n@ti.com>");
1825 MODULE_AUTHOR("Santosh Shilimkar <santosh.shilimkar@ti.com>");