1 // SPDX-License-Identifier: GPL-2.0
2 // Copyright (c) 2011-2018, The Linux Foundation. All rights reserved.
3 // Copyright (c) 2018, Linaro Limited
5 #include <linux/completion.h>
6 #include <linux/device.h>
7 #include <linux/dma-buf.h>
8 #include <linux/dma-mapping.h>
10 #include <linux/list.h>
11 #include <linux/miscdevice.h>
12 #include <linux/module.h>
13 #include <linux/of_address.h>
15 #include <linux/sort.h>
16 #include <linux/of_platform.h>
17 #include <linux/rpmsg.h>
18 #include <linux/scatterlist.h>
19 #include <linux/slab.h>
20 #include <uapi/misc/fastrpc.h>
22 #define ADSP_DOMAIN_ID (0)
23 #define MDSP_DOMAIN_ID (1)
24 #define SDSP_DOMAIN_ID (2)
25 #define CDSP_DOMAIN_ID (3)
26 #define FASTRPC_DEV_MAX 4 /* adsp, mdsp, slpi, cdsp*/
27 #define FASTRPC_MAX_SESSIONS 9 /*8 compute, 1 cpz*/
28 #define FASTRPC_ALIGN 128
29 #define FASTRPC_MAX_FDLIST 16
30 #define FASTRPC_MAX_CRCLIST 64
31 #define FASTRPC_PHYS(p) ((p) & 0xffffffff)
32 #define FASTRPC_CTX_MAX (256)
33 #define FASTRPC_INIT_HANDLE 1
34 #define FASTRPC_CTXID_MASK (0xFF0)
35 #define INIT_FILELEN_MAX (2 * 1024 * 1024)
36 #define FASTRPC_DEVICE_NAME "fastrpc"
37 #define ADSP_MMAP_ADD_PAGES 0x1000
39 /* Retrives number of input buffers from the scalars parameter */
40 #define REMOTE_SCALARS_INBUFS(sc) (((sc) >> 16) & 0x0ff)
42 /* Retrives number of output buffers from the scalars parameter */
43 #define REMOTE_SCALARS_OUTBUFS(sc) (((sc) >> 8) & 0x0ff)
45 /* Retrives number of input handles from the scalars parameter */
46 #define REMOTE_SCALARS_INHANDLES(sc) (((sc) >> 4) & 0x0f)
48 /* Retrives number of output handles from the scalars parameter */
49 #define REMOTE_SCALARS_OUTHANDLES(sc) ((sc) & 0x0f)
51 #define REMOTE_SCALARS_LENGTH(sc) (REMOTE_SCALARS_INBUFS(sc) + \
52 REMOTE_SCALARS_OUTBUFS(sc) + \
53 REMOTE_SCALARS_INHANDLES(sc)+ \
54 REMOTE_SCALARS_OUTHANDLES(sc))
55 #define FASTRPC_BUILD_SCALARS(attr, method, in, out, oin, oout) \
56 (((attr & 0x07) << 29) | \
57 ((method & 0x1f) << 24) | \
58 ((in & 0xff) << 16) | \
59 ((out & 0xff) << 8) | \
60 ((oin & 0x0f) << 4) | \
63 #define FASTRPC_SCALARS(method, in, out) \
64 FASTRPC_BUILD_SCALARS(0, method, in, out, 0, 0)
66 #define FASTRPC_CREATE_PROCESS_NARGS 6
67 /* Remote Method id table */
68 #define FASTRPC_RMID_INIT_ATTACH 0
69 #define FASTRPC_RMID_INIT_RELEASE 1
70 #define FASTRPC_RMID_INIT_MMAP 4
71 #define FASTRPC_RMID_INIT_MUNMAP 5
72 #define FASTRPC_RMID_INIT_CREATE 6
73 #define FASTRPC_RMID_INIT_CREATE_ATTR 7
74 #define FASTRPC_RMID_INIT_CREATE_STATIC 8
76 /* Protection Domain(PD) ids */
77 #define AUDIO_PD (0) /* also GUEST_OS PD? */
79 #define SENSORS_PD (2)
81 #define miscdev_to_cctx(d) container_of(d, struct fastrpc_channel_ctx, miscdev)
83 static const char *domains
[FASTRPC_DEV_MAX
] = { "adsp", "mdsp",
85 struct fastrpc_phy_page
{
86 u64 addr
; /* physical address */
87 u64 size
; /* size of contiguous region */
90 struct fastrpc_invoke_buf
{
91 u32 num
; /* number of contiguous regions */
92 u32 pgidx
; /* index to start of contiguous region */
95 struct fastrpc_remote_arg
{
100 struct fastrpc_mmap_rsp_msg
{
104 struct fastrpc_mmap_req_msg
{
111 struct fastrpc_munmap_req_msg
{
118 int pid
; /* process group id */
119 int tid
; /* thread id */
120 u64 ctx
; /* invoke caller context */
121 u32 handle
; /* handle to invoke */
122 u32 sc
; /* scalars structure describing the data */
123 u64 addr
; /* physical address */
124 u64 size
; /* size of contiguous region */
127 struct fastrpc_invoke_rsp
{
128 u64 ctx
; /* invoke caller context */
129 int retval
; /* invoke return value */
132 struct fastrpc_buf_overlap
{
142 struct fastrpc_user
*fl
;
143 struct dma_buf
*dmabuf
;
148 /* Lock for dma buf attachments */
150 struct list_head attachments
;
152 struct list_head node
; /* list of user requested mmaps */
156 struct fastrpc_dma_buf_attachment
{
159 struct list_head node
;
163 struct list_head node
;
164 struct fastrpc_user
*fl
;
167 struct sg_table
*table
;
168 struct dma_buf_attachment
*attach
;
173 struct kref refcount
;
176 struct fastrpc_invoke_ctx
{
186 struct kref refcount
;
187 struct list_head node
; /* list of ctxs */
188 struct completion work
;
189 struct work_struct put_work
;
190 struct fastrpc_msg msg
;
191 struct fastrpc_user
*fl
;
192 struct fastrpc_remote_arg
*rpra
;
193 struct fastrpc_map
**maps
;
194 struct fastrpc_buf
*buf
;
195 struct fastrpc_invoke_args
*args
;
196 struct fastrpc_buf_overlap
*olaps
;
197 struct fastrpc_channel_ctx
*cctx
;
200 struct fastrpc_session_ctx
{
207 struct fastrpc_channel_ctx
{
210 struct rpmsg_device
*rpdev
;
211 struct fastrpc_session_ctx session
[FASTRPC_MAX_SESSIONS
];
214 struct list_head users
;
215 struct miscdevice miscdev
;
216 struct kref refcount
;
219 struct fastrpc_user
{
220 struct list_head user
;
221 struct list_head maps
;
222 struct list_head pending
;
223 struct list_head mmaps
;
225 struct fastrpc_channel_ctx
*cctx
;
226 struct fastrpc_session_ctx
*sctx
;
227 struct fastrpc_buf
*init_mem
;
233 /* lock for allocations */
237 static void fastrpc_free_map(struct kref
*ref
)
239 struct fastrpc_map
*map
;
241 map
= container_of(ref
, struct fastrpc_map
, refcount
);
244 dma_buf_unmap_attachment(map
->attach
, map
->table
,
246 dma_buf_detach(map
->buf
, map
->attach
);
247 dma_buf_put(map
->buf
);
253 static void fastrpc_map_put(struct fastrpc_map
*map
)
256 kref_put(&map
->refcount
, fastrpc_free_map
);
259 static void fastrpc_map_get(struct fastrpc_map
*map
)
262 kref_get(&map
->refcount
);
265 static int fastrpc_map_find(struct fastrpc_user
*fl
, int fd
,
266 struct fastrpc_map
**ppmap
)
268 struct fastrpc_map
*map
= NULL
;
270 mutex_lock(&fl
->mutex
);
271 list_for_each_entry(map
, &fl
->maps
, node
) {
273 fastrpc_map_get(map
);
275 mutex_unlock(&fl
->mutex
);
279 mutex_unlock(&fl
->mutex
);
284 static void fastrpc_buf_free(struct fastrpc_buf
*buf
)
286 dma_free_coherent(buf
->dev
, buf
->size
, buf
->virt
,
287 FASTRPC_PHYS(buf
->phys
));
291 static int fastrpc_buf_alloc(struct fastrpc_user
*fl
, struct device
*dev
,
292 u64 size
, struct fastrpc_buf
**obuf
)
294 struct fastrpc_buf
*buf
;
296 buf
= kzalloc(sizeof(*buf
), GFP_KERNEL
);
300 INIT_LIST_HEAD(&buf
->attachments
);
301 INIT_LIST_HEAD(&buf
->node
);
302 mutex_init(&buf
->lock
);
311 buf
->virt
= dma_alloc_coherent(dev
, buf
->size
, (dma_addr_t
*)&buf
->phys
,
314 mutex_destroy(&buf
->lock
);
319 if (fl
->sctx
&& fl
->sctx
->sid
)
320 buf
->phys
+= ((u64
)fl
->sctx
->sid
<< 32);
327 static void fastrpc_channel_ctx_free(struct kref
*ref
)
329 struct fastrpc_channel_ctx
*cctx
;
331 cctx
= container_of(ref
, struct fastrpc_channel_ctx
, refcount
);
336 static void fastrpc_channel_ctx_get(struct fastrpc_channel_ctx
*cctx
)
338 kref_get(&cctx
->refcount
);
341 static void fastrpc_channel_ctx_put(struct fastrpc_channel_ctx
*cctx
)
343 kref_put(&cctx
->refcount
, fastrpc_channel_ctx_free
);
346 static void fastrpc_context_free(struct kref
*ref
)
348 struct fastrpc_invoke_ctx
*ctx
;
349 struct fastrpc_channel_ctx
*cctx
;
353 ctx
= container_of(ref
, struct fastrpc_invoke_ctx
, refcount
);
356 for (i
= 0; i
< ctx
->nscalars
; i
++)
357 fastrpc_map_put(ctx
->maps
[i
]);
360 fastrpc_buf_free(ctx
->buf
);
362 spin_lock_irqsave(&cctx
->lock
, flags
);
363 idr_remove(&cctx
->ctx_idr
, ctx
->ctxid
>> 4);
364 spin_unlock_irqrestore(&cctx
->lock
, flags
);
370 fastrpc_channel_ctx_put(cctx
);
373 static void fastrpc_context_get(struct fastrpc_invoke_ctx
*ctx
)
375 kref_get(&ctx
->refcount
);
378 static void fastrpc_context_put(struct fastrpc_invoke_ctx
*ctx
)
380 kref_put(&ctx
->refcount
, fastrpc_context_free
);
383 static void fastrpc_context_put_wq(struct work_struct
*work
)
385 struct fastrpc_invoke_ctx
*ctx
=
386 container_of(work
, struct fastrpc_invoke_ctx
, put_work
);
388 fastrpc_context_put(ctx
);
391 #define CMP(aa, bb) ((aa) == (bb) ? 0 : (aa) < (bb) ? -1 : 1)
392 static int olaps_cmp(const void *a
, const void *b
)
394 struct fastrpc_buf_overlap
*pa
= (struct fastrpc_buf_overlap
*)a
;
395 struct fastrpc_buf_overlap
*pb
= (struct fastrpc_buf_overlap
*)b
;
396 /* sort with lowest starting buffer first */
397 int st
= CMP(pa
->start
, pb
->start
);
398 /* sort with highest ending buffer first */
399 int ed
= CMP(pb
->end
, pa
->end
);
401 return st
== 0 ? ed
: st
;
404 static void fastrpc_get_buff_overlaps(struct fastrpc_invoke_ctx
*ctx
)
409 for (i
= 0; i
< ctx
->nbufs
; ++i
) {
410 ctx
->olaps
[i
].start
= ctx
->args
[i
].ptr
;
411 ctx
->olaps
[i
].end
= ctx
->olaps
[i
].start
+ ctx
->args
[i
].length
;
412 ctx
->olaps
[i
].raix
= i
;
415 sort(ctx
->olaps
, ctx
->nbufs
, sizeof(*ctx
->olaps
), olaps_cmp
, NULL
);
417 for (i
= 0; i
< ctx
->nbufs
; ++i
) {
418 /* Falling inside previous range */
419 if (ctx
->olaps
[i
].start
< max_end
) {
420 ctx
->olaps
[i
].mstart
= max_end
;
421 ctx
->olaps
[i
].mend
= ctx
->olaps
[i
].end
;
422 ctx
->olaps
[i
].offset
= max_end
- ctx
->olaps
[i
].start
;
424 if (ctx
->olaps
[i
].end
> max_end
) {
425 max_end
= ctx
->olaps
[i
].end
;
427 ctx
->olaps
[i
].mend
= 0;
428 ctx
->olaps
[i
].mstart
= 0;
432 ctx
->olaps
[i
].mend
= ctx
->olaps
[i
].end
;
433 ctx
->olaps
[i
].mstart
= ctx
->olaps
[i
].start
;
434 ctx
->olaps
[i
].offset
= 0;
435 max_end
= ctx
->olaps
[i
].end
;
440 static struct fastrpc_invoke_ctx
*fastrpc_context_alloc(
441 struct fastrpc_user
*user
, u32 kernel
, u32 sc
,
442 struct fastrpc_invoke_args
*args
)
444 struct fastrpc_channel_ctx
*cctx
= user
->cctx
;
445 struct fastrpc_invoke_ctx
*ctx
= NULL
;
449 ctx
= kzalloc(sizeof(*ctx
), GFP_KERNEL
);
451 return ERR_PTR(-ENOMEM
);
453 INIT_LIST_HEAD(&ctx
->node
);
455 ctx
->nscalars
= REMOTE_SCALARS_LENGTH(sc
);
456 ctx
->nbufs
= REMOTE_SCALARS_INBUFS(sc
) +
457 REMOTE_SCALARS_OUTBUFS(sc
);
460 ctx
->maps
= kcalloc(ctx
->nscalars
,
461 sizeof(*ctx
->maps
), GFP_KERNEL
);
464 return ERR_PTR(-ENOMEM
);
466 ctx
->olaps
= kcalloc(ctx
->nscalars
,
467 sizeof(*ctx
->olaps
), GFP_KERNEL
);
471 return ERR_PTR(-ENOMEM
);
474 fastrpc_get_buff_overlaps(ctx
);
477 /* Released in fastrpc_context_put() */
478 fastrpc_channel_ctx_get(cctx
);
482 ctx
->pid
= current
->pid
;
483 ctx
->tgid
= user
->tgid
;
485 init_completion(&ctx
->work
);
486 INIT_WORK(&ctx
->put_work
, fastrpc_context_put_wq
);
488 spin_lock(&user
->lock
);
489 list_add_tail(&ctx
->node
, &user
->pending
);
490 spin_unlock(&user
->lock
);
492 spin_lock_irqsave(&cctx
->lock
, flags
);
493 ret
= idr_alloc_cyclic(&cctx
->ctx_idr
, ctx
, 1,
494 FASTRPC_CTX_MAX
, GFP_ATOMIC
);
496 spin_unlock_irqrestore(&cctx
->lock
, flags
);
499 ctx
->ctxid
= ret
<< 4;
500 spin_unlock_irqrestore(&cctx
->lock
, flags
);
502 kref_init(&ctx
->refcount
);
506 spin_lock(&user
->lock
);
507 list_del(&ctx
->node
);
508 spin_unlock(&user
->lock
);
509 fastrpc_channel_ctx_put(cctx
);
517 static struct sg_table
*
518 fastrpc_map_dma_buf(struct dma_buf_attachment
*attachment
,
519 enum dma_data_direction dir
)
521 struct fastrpc_dma_buf_attachment
*a
= attachment
->priv
;
522 struct sg_table
*table
;
526 if (!dma_map_sgtable(attachment
->dev
, table
, dir
, 0))
527 return ERR_PTR(-ENOMEM
);
532 static void fastrpc_unmap_dma_buf(struct dma_buf_attachment
*attach
,
533 struct sg_table
*table
,
534 enum dma_data_direction dir
)
536 dma_unmap_sgtable(attach
->dev
, table
, dir
, 0);
539 static void fastrpc_release(struct dma_buf
*dmabuf
)
541 struct fastrpc_buf
*buffer
= dmabuf
->priv
;
543 fastrpc_buf_free(buffer
);
546 static int fastrpc_dma_buf_attach(struct dma_buf
*dmabuf
,
547 struct dma_buf_attachment
*attachment
)
549 struct fastrpc_dma_buf_attachment
*a
;
550 struct fastrpc_buf
*buffer
= dmabuf
->priv
;
553 a
= kzalloc(sizeof(*a
), GFP_KERNEL
);
557 ret
= dma_get_sgtable(buffer
->dev
, &a
->sgt
, buffer
->virt
,
558 FASTRPC_PHYS(buffer
->phys
), buffer
->size
);
560 dev_err(buffer
->dev
, "failed to get scatterlist from DMA API\n");
565 a
->dev
= attachment
->dev
;
566 INIT_LIST_HEAD(&a
->node
);
567 attachment
->priv
= a
;
569 mutex_lock(&buffer
->lock
);
570 list_add(&a
->node
, &buffer
->attachments
);
571 mutex_unlock(&buffer
->lock
);
576 static void fastrpc_dma_buf_detatch(struct dma_buf
*dmabuf
,
577 struct dma_buf_attachment
*attachment
)
579 struct fastrpc_dma_buf_attachment
*a
= attachment
->priv
;
580 struct fastrpc_buf
*buffer
= dmabuf
->priv
;
582 mutex_lock(&buffer
->lock
);
584 mutex_unlock(&buffer
->lock
);
585 sg_free_table(&a
->sgt
);
589 static int fastrpc_vmap(struct dma_buf
*dmabuf
, struct dma_buf_map
*map
)
591 struct fastrpc_buf
*buf
= dmabuf
->priv
;
593 dma_buf_map_set_vaddr(map
, buf
->virt
);
598 static int fastrpc_mmap(struct dma_buf
*dmabuf
,
599 struct vm_area_struct
*vma
)
601 struct fastrpc_buf
*buf
= dmabuf
->priv
;
602 size_t size
= vma
->vm_end
- vma
->vm_start
;
604 return dma_mmap_coherent(buf
->dev
, vma
, buf
->virt
,
605 FASTRPC_PHYS(buf
->phys
), size
);
608 static const struct dma_buf_ops fastrpc_dma_buf_ops
= {
609 .attach
= fastrpc_dma_buf_attach
,
610 .detach
= fastrpc_dma_buf_detatch
,
611 .map_dma_buf
= fastrpc_map_dma_buf
,
612 .unmap_dma_buf
= fastrpc_unmap_dma_buf
,
613 .mmap
= fastrpc_mmap
,
614 .vmap
= fastrpc_vmap
,
615 .release
= fastrpc_release
,
618 static int fastrpc_map_create(struct fastrpc_user
*fl
, int fd
,
619 u64 len
, struct fastrpc_map
**ppmap
)
621 struct fastrpc_session_ctx
*sess
= fl
->sctx
;
622 struct fastrpc_map
*map
= NULL
;
625 if (!fastrpc_map_find(fl
, fd
, ppmap
))
628 map
= kzalloc(sizeof(*map
), GFP_KERNEL
);
632 INIT_LIST_HEAD(&map
->node
);
635 map
->buf
= dma_buf_get(fd
);
636 if (IS_ERR(map
->buf
)) {
637 err
= PTR_ERR(map
->buf
);
641 map
->attach
= dma_buf_attach(map
->buf
, sess
->dev
);
642 if (IS_ERR(map
->attach
)) {
643 dev_err(sess
->dev
, "Failed to attach dmabuf\n");
644 err
= PTR_ERR(map
->attach
);
648 map
->table
= dma_buf_map_attachment(map
->attach
, DMA_BIDIRECTIONAL
);
649 if (IS_ERR(map
->table
)) {
650 err
= PTR_ERR(map
->table
);
654 map
->phys
= sg_dma_address(map
->table
->sgl
);
655 map
->phys
+= ((u64
)fl
->sctx
->sid
<< 32);
657 map
->va
= sg_virt(map
->table
->sgl
);
659 kref_init(&map
->refcount
);
661 spin_lock(&fl
->lock
);
662 list_add_tail(&map
->node
, &fl
->maps
);
663 spin_unlock(&fl
->lock
);
669 dma_buf_detach(map
->buf
, map
->attach
);
671 dma_buf_put(map
->buf
);
679 * Fastrpc payload buffer with metadata looks like:
681 * >>>>>> START of METADATA <<<<<<<<<
682 * +---------------------------------+
684 * | type:(struct fastrpc_remote_arg)|
686 * +---------------------------------+
687 * | Invoke Buffer list |
688 * | type:(struct fastrpc_invoke_buf)|
690 * +---------------------------------+
692 * | type:(struct fastrpc_phy_page) |
694 * +---------------------------------+
696 * |(can be specific to SoC/Firmware)|
697 * +---------------------------------+
698 * >>>>>>>> END of METADATA <<<<<<<<<
699 * +---------------------------------+
702 * +---------------------------------+
705 static int fastrpc_get_meta_size(struct fastrpc_invoke_ctx
*ctx
)
709 size
= (sizeof(struct fastrpc_remote_arg
) +
710 sizeof(struct fastrpc_invoke_buf
) +
711 sizeof(struct fastrpc_phy_page
)) * ctx
->nscalars
+
712 sizeof(u64
) * FASTRPC_MAX_FDLIST
+
713 sizeof(u32
) * FASTRPC_MAX_CRCLIST
;
718 static u64
fastrpc_get_payload_size(struct fastrpc_invoke_ctx
*ctx
, int metalen
)
723 size
= ALIGN(metalen
, FASTRPC_ALIGN
);
724 for (i
= 0; i
< ctx
->nscalars
; i
++) {
725 if (ctx
->args
[i
].fd
== 0 || ctx
->args
[i
].fd
== -1) {
727 if (ctx
->olaps
[i
].offset
== 0)
728 size
= ALIGN(size
, FASTRPC_ALIGN
);
730 size
+= (ctx
->olaps
[i
].mend
- ctx
->olaps
[i
].mstart
);
737 static int fastrpc_create_maps(struct fastrpc_invoke_ctx
*ctx
)
739 struct device
*dev
= ctx
->fl
->sctx
->dev
;
742 for (i
= 0; i
< ctx
->nscalars
; ++i
) {
743 /* Make sure reserved field is set to 0 */
744 if (ctx
->args
[i
].reserved
)
747 if (ctx
->args
[i
].fd
== 0 || ctx
->args
[i
].fd
== -1 ||
748 ctx
->args
[i
].length
== 0)
751 err
= fastrpc_map_create(ctx
->fl
, ctx
->args
[i
].fd
,
752 ctx
->args
[i
].length
, &ctx
->maps
[i
]);
754 dev_err(dev
, "Error Creating map %d\n", err
);
762 static int fastrpc_get_args(u32 kernel
, struct fastrpc_invoke_ctx
*ctx
)
764 struct device
*dev
= ctx
->fl
->sctx
->dev
;
765 struct fastrpc_remote_arg
*rpra
;
766 struct fastrpc_invoke_buf
*list
;
767 struct fastrpc_phy_page
*pages
;
768 int inbufs
, i
, oix
, err
= 0;
769 u64 len
, rlen
, pkt_size
;
770 u64 pg_start
, pg_end
;
774 inbufs
= REMOTE_SCALARS_INBUFS(ctx
->sc
);
775 metalen
= fastrpc_get_meta_size(ctx
);
776 pkt_size
= fastrpc_get_payload_size(ctx
, metalen
);
778 err
= fastrpc_create_maps(ctx
);
782 ctx
->msg_sz
= pkt_size
;
784 err
= fastrpc_buf_alloc(ctx
->fl
, dev
, pkt_size
, &ctx
->buf
);
788 rpra
= ctx
->buf
->virt
;
789 list
= ctx
->buf
->virt
+ ctx
->nscalars
* sizeof(*rpra
);
790 pages
= ctx
->buf
->virt
+ ctx
->nscalars
* (sizeof(*list
) +
792 args
= (uintptr_t)ctx
->buf
->virt
+ metalen
;
793 rlen
= pkt_size
- metalen
;
796 for (oix
= 0; oix
< ctx
->nbufs
; ++oix
) {
799 i
= ctx
->olaps
[oix
].raix
;
800 len
= ctx
->args
[i
].length
;
804 list
[i
].num
= len
? 1 : 0;
811 struct vm_area_struct
*vma
= NULL
;
813 rpra
[i
].pv
= (u64
) ctx
->args
[i
].ptr
;
814 pages
[i
].addr
= ctx
->maps
[i
]->phys
;
816 vma
= find_vma(current
->mm
, ctx
->args
[i
].ptr
);
818 pages
[i
].addr
+= ctx
->args
[i
].ptr
-
821 pg_start
= (ctx
->args
[i
].ptr
& PAGE_MASK
) >> PAGE_SHIFT
;
822 pg_end
= ((ctx
->args
[i
].ptr
+ len
- 1) & PAGE_MASK
) >>
824 pages
[i
].size
= (pg_end
- pg_start
+ 1) * PAGE_SIZE
;
828 if (ctx
->olaps
[oix
].offset
== 0) {
829 rlen
-= ALIGN(args
, FASTRPC_ALIGN
) - args
;
830 args
= ALIGN(args
, FASTRPC_ALIGN
);
833 mlen
= ctx
->olaps
[oix
].mend
- ctx
->olaps
[oix
].mstart
;
838 rpra
[i
].pv
= args
- ctx
->olaps
[oix
].offset
;
839 pages
[i
].addr
= ctx
->buf
->phys
-
840 ctx
->olaps
[oix
].offset
+
842 pages
[i
].addr
= pages
[i
].addr
& PAGE_MASK
;
844 pg_start
= (args
& PAGE_MASK
) >> PAGE_SHIFT
;
845 pg_end
= ((args
+ len
- 1) & PAGE_MASK
) >> PAGE_SHIFT
;
846 pages
[i
].size
= (pg_end
- pg_start
+ 1) * PAGE_SIZE
;
851 if (i
< inbufs
&& !ctx
->maps
[i
]) {
852 void *dst
= (void *)(uintptr_t)rpra
[i
].pv
;
853 void *src
= (void *)(uintptr_t)ctx
->args
[i
].ptr
;
856 if (copy_from_user(dst
, (void __user
*)src
,
862 memcpy(dst
, src
, len
);
867 for (i
= ctx
->nbufs
; i
< ctx
->nscalars
; ++i
) {
868 rpra
[i
].pv
= (u64
) ctx
->args
[i
].ptr
;
869 rpra
[i
].len
= ctx
->args
[i
].length
;
870 list
[i
].num
= ctx
->args
[i
].length
? 1 : 0;
872 pages
[i
].addr
= ctx
->maps
[i
]->phys
;
873 pages
[i
].size
= ctx
->maps
[i
]->size
;
878 dev_err(dev
, "Error: get invoke args failed:%d\n", err
);
883 static int fastrpc_put_args(struct fastrpc_invoke_ctx
*ctx
,
886 struct fastrpc_remote_arg
*rpra
= ctx
->rpra
;
889 inbufs
= REMOTE_SCALARS_INBUFS(ctx
->sc
);
891 for (i
= inbufs
; i
< ctx
->nbufs
; ++i
) {
892 void *src
= (void *)(uintptr_t)rpra
[i
].pv
;
893 void *dst
= (void *)(uintptr_t)ctx
->args
[i
].ptr
;
894 u64 len
= rpra
[i
].len
;
897 if (copy_to_user((void __user
*)dst
, src
, len
))
900 memcpy(dst
, src
, len
);
907 static int fastrpc_invoke_send(struct fastrpc_session_ctx
*sctx
,
908 struct fastrpc_invoke_ctx
*ctx
,
909 u32 kernel
, uint32_t handle
)
911 struct fastrpc_channel_ctx
*cctx
;
912 struct fastrpc_user
*fl
= ctx
->fl
;
913 struct fastrpc_msg
*msg
= &ctx
->msg
;
918 msg
->tid
= current
->pid
;
923 msg
->ctx
= ctx
->ctxid
| fl
->pd
;
924 msg
->handle
= handle
;
926 msg
->addr
= ctx
->buf
? ctx
->buf
->phys
: 0;
927 msg
->size
= roundup(ctx
->msg_sz
, PAGE_SIZE
);
928 fastrpc_context_get(ctx
);
930 ret
= rpmsg_send(cctx
->rpdev
->ept
, (void *)msg
, sizeof(*msg
));
933 fastrpc_context_put(ctx
);
939 static int fastrpc_internal_invoke(struct fastrpc_user
*fl
, u32 kernel
,
941 struct fastrpc_invoke_args
*args
)
943 struct fastrpc_invoke_ctx
*ctx
= NULL
;
949 if (!fl
->cctx
->rpdev
)
952 ctx
= fastrpc_context_alloc(fl
, kernel
, sc
, args
);
957 err
= fastrpc_get_args(kernel
, ctx
);
962 /* make sure that all CPU memory writes are seen by DSP */
964 /* Send invoke buffer to remote dsp */
965 err
= fastrpc_invoke_send(fl
->sctx
, ctx
, kernel
, handle
);
970 if (!wait_for_completion_timeout(&ctx
->work
, 10 * HZ
))
973 err
= wait_for_completion_interruptible(&ctx
->work
);
979 /* Check the response from remote dsp */
985 /* make sure that all memory writes by DSP are seen by CPU */
987 /* populate all the output buffers with results */
988 err
= fastrpc_put_args(ctx
, kernel
);
994 if (err
!= -ERESTARTSYS
&& err
!= -ETIMEDOUT
) {
995 /* We are done with this compute context */
996 spin_lock(&fl
->lock
);
997 list_del(&ctx
->node
);
998 spin_unlock(&fl
->lock
);
999 fastrpc_context_put(ctx
);
1002 dev_dbg(fl
->sctx
->dev
, "Error: Invoke Failed %d\n", err
);
1007 static int fastrpc_init_create_process(struct fastrpc_user
*fl
,
1010 struct fastrpc_init_create init
;
1011 struct fastrpc_invoke_args
*args
;
1012 struct fastrpc_phy_page pages
[1];
1013 struct fastrpc_map
*map
= NULL
;
1014 struct fastrpc_buf
*imem
= NULL
;
1027 args
= kcalloc(FASTRPC_CREATE_PROCESS_NARGS
, sizeof(*args
), GFP_KERNEL
);
1031 if (copy_from_user(&init
, argp
, sizeof(init
))) {
1036 if (init
.filelen
> INIT_FILELEN_MAX
) {
1041 inbuf
.pgid
= fl
->tgid
;
1042 inbuf
.namelen
= strlen(current
->comm
) + 1;
1043 inbuf
.filelen
= init
.filelen
;
1045 inbuf
.attrs
= init
.attrs
;
1046 inbuf
.siglen
= init
.siglen
;
1049 if (init
.filelen
&& init
.filefd
) {
1050 err
= fastrpc_map_create(fl
, init
.filefd
, init
.filelen
, &map
);
1055 memlen
= ALIGN(max(INIT_FILELEN_MAX
, (int)init
.filelen
* 4),
1057 err
= fastrpc_buf_alloc(fl
, fl
->sctx
->dev
, memlen
,
1062 fl
->init_mem
= imem
;
1063 args
[0].ptr
= (u64
)(uintptr_t)&inbuf
;
1064 args
[0].length
= sizeof(inbuf
);
1067 args
[1].ptr
= (u64
)(uintptr_t)current
->comm
;
1068 args
[1].length
= inbuf
.namelen
;
1071 args
[2].ptr
= (u64
) init
.file
;
1072 args
[2].length
= inbuf
.filelen
;
1073 args
[2].fd
= init
.filefd
;
1075 pages
[0].addr
= imem
->phys
;
1076 pages
[0].size
= imem
->size
;
1078 args
[3].ptr
= (u64
)(uintptr_t) pages
;
1079 args
[3].length
= 1 * sizeof(*pages
);
1082 args
[4].ptr
= (u64
)(uintptr_t)&inbuf
.attrs
;
1083 args
[4].length
= sizeof(inbuf
.attrs
);
1086 args
[5].ptr
= (u64
)(uintptr_t) &inbuf
.siglen
;
1087 args
[5].length
= sizeof(inbuf
.siglen
);
1090 sc
= FASTRPC_SCALARS(FASTRPC_RMID_INIT_CREATE
, 4, 0);
1092 sc
= FASTRPC_SCALARS(FASTRPC_RMID_INIT_CREATE_ATTR
, 6, 0);
1094 err
= fastrpc_internal_invoke(fl
, true, FASTRPC_INIT_HANDLE
,
1104 fl
->init_mem
= NULL
;
1105 fastrpc_buf_free(imem
);
1108 spin_lock(&fl
->lock
);
1109 list_del(&map
->node
);
1110 spin_unlock(&fl
->lock
);
1111 fastrpc_map_put(map
);
1119 static struct fastrpc_session_ctx
*fastrpc_session_alloc(
1120 struct fastrpc_channel_ctx
*cctx
)
1122 struct fastrpc_session_ctx
*session
= NULL
;
1123 unsigned long flags
;
1126 spin_lock_irqsave(&cctx
->lock
, flags
);
1127 for (i
= 0; i
< cctx
->sesscount
; i
++) {
1128 if (!cctx
->session
[i
].used
&& cctx
->session
[i
].valid
) {
1129 cctx
->session
[i
].used
= true;
1130 session
= &cctx
->session
[i
];
1134 spin_unlock_irqrestore(&cctx
->lock
, flags
);
1139 static void fastrpc_session_free(struct fastrpc_channel_ctx
*cctx
,
1140 struct fastrpc_session_ctx
*session
)
1142 unsigned long flags
;
1144 spin_lock_irqsave(&cctx
->lock
, flags
);
1145 session
->used
= false;
1146 spin_unlock_irqrestore(&cctx
->lock
, flags
);
1149 static int fastrpc_release_current_dsp_process(struct fastrpc_user
*fl
)
1151 struct fastrpc_invoke_args args
[1];
1156 args
[0].ptr
= (u64
)(uintptr_t) &tgid
;
1157 args
[0].length
= sizeof(tgid
);
1159 args
[0].reserved
= 0;
1160 sc
= FASTRPC_SCALARS(FASTRPC_RMID_INIT_RELEASE
, 1, 0);
1162 return fastrpc_internal_invoke(fl
, true, FASTRPC_INIT_HANDLE
,
1166 static int fastrpc_device_release(struct inode
*inode
, struct file
*file
)
1168 struct fastrpc_user
*fl
= (struct fastrpc_user
*)file
->private_data
;
1169 struct fastrpc_channel_ctx
*cctx
= fl
->cctx
;
1170 struct fastrpc_invoke_ctx
*ctx
, *n
;
1171 struct fastrpc_map
*map
, *m
;
1172 struct fastrpc_buf
*buf
, *b
;
1173 unsigned long flags
;
1175 fastrpc_release_current_dsp_process(fl
);
1177 spin_lock_irqsave(&cctx
->lock
, flags
);
1178 list_del(&fl
->user
);
1179 spin_unlock_irqrestore(&cctx
->lock
, flags
);
1182 fastrpc_buf_free(fl
->init_mem
);
1184 list_for_each_entry_safe(ctx
, n
, &fl
->pending
, node
) {
1185 list_del(&ctx
->node
);
1186 fastrpc_context_put(ctx
);
1189 list_for_each_entry_safe(map
, m
, &fl
->maps
, node
) {
1190 list_del(&map
->node
);
1191 fastrpc_map_put(map
);
1194 list_for_each_entry_safe(buf
, b
, &fl
->mmaps
, node
) {
1195 list_del(&buf
->node
);
1196 fastrpc_buf_free(buf
);
1199 fastrpc_session_free(cctx
, fl
->sctx
);
1200 fastrpc_channel_ctx_put(cctx
);
1202 mutex_destroy(&fl
->mutex
);
1204 file
->private_data
= NULL
;
1209 static int fastrpc_device_open(struct inode
*inode
, struct file
*filp
)
1211 struct fastrpc_channel_ctx
*cctx
= miscdev_to_cctx(filp
->private_data
);
1212 struct fastrpc_user
*fl
= NULL
;
1213 unsigned long flags
;
1215 fl
= kzalloc(sizeof(*fl
), GFP_KERNEL
);
1219 /* Released in fastrpc_device_release() */
1220 fastrpc_channel_ctx_get(cctx
);
1222 filp
->private_data
= fl
;
1223 spin_lock_init(&fl
->lock
);
1224 mutex_init(&fl
->mutex
);
1225 INIT_LIST_HEAD(&fl
->pending
);
1226 INIT_LIST_HEAD(&fl
->maps
);
1227 INIT_LIST_HEAD(&fl
->mmaps
);
1228 INIT_LIST_HEAD(&fl
->user
);
1229 fl
->tgid
= current
->tgid
;
1232 fl
->sctx
= fastrpc_session_alloc(cctx
);
1234 dev_err(&cctx
->rpdev
->dev
, "No session available\n");
1235 mutex_destroy(&fl
->mutex
);
1241 spin_lock_irqsave(&cctx
->lock
, flags
);
1242 list_add_tail(&fl
->user
, &cctx
->users
);
1243 spin_unlock_irqrestore(&cctx
->lock
, flags
);
1248 static int fastrpc_dmabuf_alloc(struct fastrpc_user
*fl
, char __user
*argp
)
1250 struct fastrpc_alloc_dma_buf bp
;
1251 DEFINE_DMA_BUF_EXPORT_INFO(exp_info
);
1252 struct fastrpc_buf
*buf
= NULL
;
1255 if (copy_from_user(&bp
, argp
, sizeof(bp
)))
1258 err
= fastrpc_buf_alloc(fl
, fl
->sctx
->dev
, bp
.size
, &buf
);
1261 exp_info
.ops
= &fastrpc_dma_buf_ops
;
1262 exp_info
.size
= bp
.size
;
1263 exp_info
.flags
= O_RDWR
;
1264 exp_info
.priv
= buf
;
1265 buf
->dmabuf
= dma_buf_export(&exp_info
);
1266 if (IS_ERR(buf
->dmabuf
)) {
1267 err
= PTR_ERR(buf
->dmabuf
);
1268 fastrpc_buf_free(buf
);
1272 bp
.fd
= dma_buf_fd(buf
->dmabuf
, O_ACCMODE
);
1274 dma_buf_put(buf
->dmabuf
);
1278 if (copy_to_user(argp
, &bp
, sizeof(bp
))) {
1279 dma_buf_put(buf
->dmabuf
);
1286 static int fastrpc_init_attach(struct fastrpc_user
*fl
, int pd
)
1288 struct fastrpc_invoke_args args
[1];
1289 int tgid
= fl
->tgid
;
1292 args
[0].ptr
= (u64
)(uintptr_t) &tgid
;
1293 args
[0].length
= sizeof(tgid
);
1295 args
[0].reserved
= 0;
1296 sc
= FASTRPC_SCALARS(FASTRPC_RMID_INIT_ATTACH
, 1, 0);
1299 return fastrpc_internal_invoke(fl
, true, FASTRPC_INIT_HANDLE
,
1303 static int fastrpc_invoke(struct fastrpc_user
*fl
, char __user
*argp
)
1305 struct fastrpc_invoke_args
*args
= NULL
;
1306 struct fastrpc_invoke inv
;
1310 if (copy_from_user(&inv
, argp
, sizeof(inv
)))
1313 /* nscalars is truncated here to max supported value */
1314 nscalars
= REMOTE_SCALARS_LENGTH(inv
.sc
);
1316 args
= kcalloc(nscalars
, sizeof(*args
), GFP_KERNEL
);
1320 if (copy_from_user(args
, (void __user
*)(uintptr_t)inv
.args
,
1321 nscalars
* sizeof(*args
))) {
1327 err
= fastrpc_internal_invoke(fl
, false, inv
.handle
, inv
.sc
, args
);
1333 static int fastrpc_req_munmap_impl(struct fastrpc_user
*fl
,
1334 struct fastrpc_req_munmap
*req
)
1336 struct fastrpc_invoke_args args
[1] = { [0] = { 0 } };
1337 struct fastrpc_buf
*buf
, *b
;
1338 struct fastrpc_munmap_req_msg req_msg
;
1339 struct device
*dev
= fl
->sctx
->dev
;
1343 spin_lock(&fl
->lock
);
1344 list_for_each_entry_safe(buf
, b
, &fl
->mmaps
, node
) {
1345 if ((buf
->raddr
== req
->vaddrout
) && (buf
->size
== req
->size
))
1349 spin_unlock(&fl
->lock
);
1352 dev_err(dev
, "mmap not in list\n");
1356 req_msg
.pgid
= fl
->tgid
;
1357 req_msg
.size
= buf
->size
;
1358 req_msg
.vaddr
= buf
->raddr
;
1360 args
[0].ptr
= (u64
) (uintptr_t) &req_msg
;
1361 args
[0].length
= sizeof(req_msg
);
1363 sc
= FASTRPC_SCALARS(FASTRPC_RMID_INIT_MUNMAP
, 1, 0);
1364 err
= fastrpc_internal_invoke(fl
, true, FASTRPC_INIT_HANDLE
, sc
,
1367 dev_dbg(dev
, "unmmap\tpt 0x%09lx OK\n", buf
->raddr
);
1368 spin_lock(&fl
->lock
);
1369 list_del(&buf
->node
);
1370 spin_unlock(&fl
->lock
);
1371 fastrpc_buf_free(buf
);
1373 dev_err(dev
, "unmmap\tpt 0x%09lx ERROR\n", buf
->raddr
);
1379 static int fastrpc_req_munmap(struct fastrpc_user
*fl
, char __user
*argp
)
1381 struct fastrpc_req_munmap req
;
1383 if (copy_from_user(&req
, argp
, sizeof(req
)))
1386 return fastrpc_req_munmap_impl(fl
, &req
);
1389 static int fastrpc_req_mmap(struct fastrpc_user
*fl
, char __user
*argp
)
1391 struct fastrpc_invoke_args args
[3] = { [0 ... 2] = { 0 } };
1392 struct fastrpc_buf
*buf
= NULL
;
1393 struct fastrpc_mmap_req_msg req_msg
;
1394 struct fastrpc_mmap_rsp_msg rsp_msg
;
1395 struct fastrpc_req_munmap req_unmap
;
1396 struct fastrpc_phy_page pages
;
1397 struct fastrpc_req_mmap req
;
1398 struct device
*dev
= fl
->sctx
->dev
;
1402 if (copy_from_user(&req
, argp
, sizeof(req
)))
1405 if (req
.flags
!= ADSP_MMAP_ADD_PAGES
) {
1406 dev_err(dev
, "flag not supported 0x%x\n", req
.flags
);
1411 dev_err(dev
, "adding user allocated pages is not supported\n");
1415 err
= fastrpc_buf_alloc(fl
, fl
->sctx
->dev
, req
.size
, &buf
);
1417 dev_err(dev
, "failed to allocate buffer\n");
1421 req_msg
.pgid
= fl
->tgid
;
1422 req_msg
.flags
= req
.flags
;
1423 req_msg
.vaddr
= req
.vaddrin
;
1424 req_msg
.num
= sizeof(pages
);
1426 args
[0].ptr
= (u64
) (uintptr_t) &req_msg
;
1427 args
[0].length
= sizeof(req_msg
);
1429 pages
.addr
= buf
->phys
;
1430 pages
.size
= buf
->size
;
1432 args
[1].ptr
= (u64
) (uintptr_t) &pages
;
1433 args
[1].length
= sizeof(pages
);
1435 args
[2].ptr
= (u64
) (uintptr_t) &rsp_msg
;
1436 args
[2].length
= sizeof(rsp_msg
);
1438 sc
= FASTRPC_SCALARS(FASTRPC_RMID_INIT_MMAP
, 2, 1);
1439 err
= fastrpc_internal_invoke(fl
, true, FASTRPC_INIT_HANDLE
, sc
,
1442 dev_err(dev
, "mmap error (len 0x%08llx)\n", buf
->size
);
1446 /* update the buffer to be able to deallocate the memory on the DSP */
1447 buf
->raddr
= (uintptr_t) rsp_msg
.vaddr
;
1449 /* let the client know the address to use */
1450 req
.vaddrout
= rsp_msg
.vaddr
;
1452 spin_lock(&fl
->lock
);
1453 list_add_tail(&buf
->node
, &fl
->mmaps
);
1454 spin_unlock(&fl
->lock
);
1456 if (copy_to_user((void __user
*)argp
, &req
, sizeof(req
))) {
1457 /* unmap the memory and release the buffer */
1458 req_unmap
.vaddrout
= buf
->raddr
;
1459 req_unmap
.size
= buf
->size
;
1460 fastrpc_req_munmap_impl(fl
, &req_unmap
);
1464 dev_dbg(dev
, "mmap\t\tpt 0x%09lx OK [len 0x%08llx]\n",
1465 buf
->raddr
, buf
->size
);
1470 fastrpc_buf_free(buf
);
1475 static long fastrpc_device_ioctl(struct file
*file
, unsigned int cmd
,
1478 struct fastrpc_user
*fl
= (struct fastrpc_user
*)file
->private_data
;
1479 char __user
*argp
= (char __user
*)arg
;
1483 case FASTRPC_IOCTL_INVOKE
:
1484 err
= fastrpc_invoke(fl
, argp
);
1486 case FASTRPC_IOCTL_INIT_ATTACH
:
1487 err
= fastrpc_init_attach(fl
, AUDIO_PD
);
1489 case FASTRPC_IOCTL_INIT_ATTACH_SNS
:
1490 err
= fastrpc_init_attach(fl
, SENSORS_PD
);
1492 case FASTRPC_IOCTL_INIT_CREATE
:
1493 err
= fastrpc_init_create_process(fl
, argp
);
1495 case FASTRPC_IOCTL_ALLOC_DMA_BUFF
:
1496 err
= fastrpc_dmabuf_alloc(fl
, argp
);
1498 case FASTRPC_IOCTL_MMAP
:
1499 err
= fastrpc_req_mmap(fl
, argp
);
1501 case FASTRPC_IOCTL_MUNMAP
:
1502 err
= fastrpc_req_munmap(fl
, argp
);
1512 static const struct file_operations fastrpc_fops
= {
1513 .open
= fastrpc_device_open
,
1514 .release
= fastrpc_device_release
,
1515 .unlocked_ioctl
= fastrpc_device_ioctl
,
1516 .compat_ioctl
= fastrpc_device_ioctl
,
1519 static int fastrpc_cb_probe(struct platform_device
*pdev
)
1521 struct fastrpc_channel_ctx
*cctx
;
1522 struct fastrpc_session_ctx
*sess
;
1523 struct device
*dev
= &pdev
->dev
;
1524 int i
, sessions
= 0;
1525 unsigned long flags
;
1528 cctx
= dev_get_drvdata(dev
->parent
);
1532 of_property_read_u32(dev
->of_node
, "qcom,nsessions", &sessions
);
1534 spin_lock_irqsave(&cctx
->lock
, flags
);
1535 sess
= &cctx
->session
[cctx
->sesscount
];
1539 dev_set_drvdata(dev
, sess
);
1541 if (of_property_read_u32(dev
->of_node
, "reg", &sess
->sid
))
1542 dev_info(dev
, "FastRPC Session ID not specified in DT\n");
1545 struct fastrpc_session_ctx
*dup_sess
;
1547 for (i
= 1; i
< sessions
; i
++) {
1548 if (cctx
->sesscount
++ >= FASTRPC_MAX_SESSIONS
)
1550 dup_sess
= &cctx
->session
[cctx
->sesscount
];
1551 memcpy(dup_sess
, sess
, sizeof(*dup_sess
));
1555 spin_unlock_irqrestore(&cctx
->lock
, flags
);
1556 rc
= dma_set_mask(dev
, DMA_BIT_MASK(32));
1558 dev_err(dev
, "32-bit DMA enable failed\n");
1565 static int fastrpc_cb_remove(struct platform_device
*pdev
)
1567 struct fastrpc_channel_ctx
*cctx
= dev_get_drvdata(pdev
->dev
.parent
);
1568 struct fastrpc_session_ctx
*sess
= dev_get_drvdata(&pdev
->dev
);
1569 unsigned long flags
;
1572 spin_lock_irqsave(&cctx
->lock
, flags
);
1573 for (i
= 1; i
< FASTRPC_MAX_SESSIONS
; i
++) {
1574 if (cctx
->session
[i
].sid
== sess
->sid
) {
1575 cctx
->session
[i
].valid
= false;
1579 spin_unlock_irqrestore(&cctx
->lock
, flags
);
1584 static const struct of_device_id fastrpc_match_table
[] = {
1585 { .compatible
= "qcom,fastrpc-compute-cb", },
1589 static struct platform_driver fastrpc_cb_driver
= {
1590 .probe
= fastrpc_cb_probe
,
1591 .remove
= fastrpc_cb_remove
,
1593 .name
= "qcom,fastrpc-cb",
1594 .of_match_table
= fastrpc_match_table
,
1595 .suppress_bind_attrs
= true,
1599 static int fastrpc_rpmsg_probe(struct rpmsg_device
*rpdev
)
1601 struct device
*rdev
= &rpdev
->dev
;
1602 struct fastrpc_channel_ctx
*data
;
1603 int i
, err
, domain_id
= -1;
1606 err
= of_property_read_string(rdev
->of_node
, "label", &domain
);
1608 dev_info(rdev
, "FastRPC Domain not specified in DT\n");
1612 for (i
= 0; i
<= CDSP_DOMAIN_ID
; i
++) {
1613 if (!strcmp(domains
[i
], domain
)) {
1619 if (domain_id
< 0) {
1620 dev_info(rdev
, "FastRPC Invalid Domain ID %d\n", domain_id
);
1624 data
= kzalloc(sizeof(*data
), GFP_KERNEL
);
1628 data
->miscdev
.minor
= MISC_DYNAMIC_MINOR
;
1629 data
->miscdev
.name
= devm_kasprintf(rdev
, GFP_KERNEL
, "fastrpc-%s",
1630 domains
[domain_id
]);
1631 data
->miscdev
.fops
= &fastrpc_fops
;
1632 err
= misc_register(&data
->miscdev
);
1638 kref_init(&data
->refcount
);
1640 dev_set_drvdata(&rpdev
->dev
, data
);
1641 dma_set_mask_and_coherent(rdev
, DMA_BIT_MASK(32));
1642 INIT_LIST_HEAD(&data
->users
);
1643 spin_lock_init(&data
->lock
);
1644 idr_init(&data
->ctx_idr
);
1645 data
->domain_id
= domain_id
;
1646 data
->rpdev
= rpdev
;
1648 return of_platform_populate(rdev
->of_node
, NULL
, NULL
, rdev
);
1651 static void fastrpc_notify_users(struct fastrpc_user
*user
)
1653 struct fastrpc_invoke_ctx
*ctx
;
1655 spin_lock(&user
->lock
);
1656 list_for_each_entry(ctx
, &user
->pending
, node
)
1657 complete(&ctx
->work
);
1658 spin_unlock(&user
->lock
);
1661 static void fastrpc_rpmsg_remove(struct rpmsg_device
*rpdev
)
1663 struct fastrpc_channel_ctx
*cctx
= dev_get_drvdata(&rpdev
->dev
);
1664 struct fastrpc_user
*user
;
1665 unsigned long flags
;
1667 spin_lock_irqsave(&cctx
->lock
, flags
);
1668 list_for_each_entry(user
, &cctx
->users
, user
)
1669 fastrpc_notify_users(user
);
1670 spin_unlock_irqrestore(&cctx
->lock
, flags
);
1672 misc_deregister(&cctx
->miscdev
);
1673 of_platform_depopulate(&rpdev
->dev
);
1676 fastrpc_channel_ctx_put(cctx
);
1679 static int fastrpc_rpmsg_callback(struct rpmsg_device
*rpdev
, void *data
,
1680 int len
, void *priv
, u32 addr
)
1682 struct fastrpc_channel_ctx
*cctx
= dev_get_drvdata(&rpdev
->dev
);
1683 struct fastrpc_invoke_rsp
*rsp
= data
;
1684 struct fastrpc_invoke_ctx
*ctx
;
1685 unsigned long flags
;
1686 unsigned long ctxid
;
1688 if (len
< sizeof(*rsp
))
1691 ctxid
= ((rsp
->ctx
& FASTRPC_CTXID_MASK
) >> 4);
1693 spin_lock_irqsave(&cctx
->lock
, flags
);
1694 ctx
= idr_find(&cctx
->ctx_idr
, ctxid
);
1695 spin_unlock_irqrestore(&cctx
->lock
, flags
);
1698 dev_err(&rpdev
->dev
, "No context ID matches response\n");
1702 ctx
->retval
= rsp
->retval
;
1703 complete(&ctx
->work
);
1706 * The DMA buffer associated with the context cannot be freed in
1707 * interrupt context so schedule it through a worker thread to
1708 * avoid a kernel BUG.
1710 schedule_work(&ctx
->put_work
);
1715 static const struct of_device_id fastrpc_rpmsg_of_match
[] = {
1716 { .compatible
= "qcom,fastrpc" },
1719 MODULE_DEVICE_TABLE(of
, fastrpc_rpmsg_of_match
);
1721 static struct rpmsg_driver fastrpc_driver
= {
1722 .probe
= fastrpc_rpmsg_probe
,
1723 .remove
= fastrpc_rpmsg_remove
,
1724 .callback
= fastrpc_rpmsg_callback
,
1726 .name
= "qcom,fastrpc",
1727 .of_match_table
= fastrpc_rpmsg_of_match
,
1731 static int fastrpc_init(void)
1735 ret
= platform_driver_register(&fastrpc_cb_driver
);
1737 pr_err("fastrpc: failed to register cb driver\n");
1741 ret
= register_rpmsg_driver(&fastrpc_driver
);
1743 pr_err("fastrpc: failed to register rpmsg driver\n");
1744 platform_driver_unregister(&fastrpc_cb_driver
);
1750 module_init(fastrpc_init
);
1752 static void fastrpc_exit(void)
1754 platform_driver_unregister(&fastrpc_cb_driver
);
1755 unregister_rpmsg_driver(&fastrpc_driver
);
1757 module_exit(fastrpc_exit
);
1759 MODULE_LICENSE("GPL v2");