1 // SPDX-License-Identifier: GPL-2.0-only
3 * Arm Firmware Framework for ARMv8-A(FFA) interface driver
5 * The Arm FFA specification[1] describes a software architecture to
6 * leverages the virtualization extension to isolate software images
7 * provided by an ecosystem of vendors from each other and describes
8 * interfaces that standardize communication between the various software
9 * images including communication between images in the Secure world and
10 * Normal world. Any Hypervisor could use the FFA interfaces to enable
11 * communication between VMs it manages.
13 * The Hypervisor a.k.a Partition managers in FFA terminology can assign
14 * system resources(Memory regions, Devices, CPU cycles) to the partitions
15 * and manage isolation amongst them.
17 * [1] https://developer.arm.com/docs/den0077/latest
19 * Copyright (C) 2021 ARM Ltd.
22 #define DRIVER_NAME "ARM FF-A"
23 #define pr_fmt(fmt) DRIVER_NAME ": " fmt
25 #include <linux/acpi.h>
26 #include <linux/arm_ffa.h>
27 #include <linux/bitfield.h>
28 #include <linux/cpuhotplug.h>
29 #include <linux/delay.h>
30 #include <linux/device.h>
31 #include <linux/hashtable.h>
32 #include <linux/interrupt.h>
34 #include <linux/kernel.h>
35 #include <linux/module.h>
37 #include <linux/mutex.h>
38 #include <linux/of_irq.h>
39 #include <linux/scatterlist.h>
40 #include <linux/slab.h>
41 #include <linux/smp.h>
42 #include <linux/uuid.h>
43 #include <linux/xarray.h>
47 #define FFA_DRIVER_VERSION FFA_VERSION_1_1
48 #define FFA_MIN_VERSION FFA_VERSION_1_0
50 #define SENDER_ID_MASK GENMASK(31, 16)
51 #define RECEIVER_ID_MASK GENMASK(15, 0)
52 #define SENDER_ID(x) ((u16)(FIELD_GET(SENDER_ID_MASK, (x))))
53 #define RECEIVER_ID(x) ((u16)(FIELD_GET(RECEIVER_ID_MASK, (x))))
54 #define PACK_TARGET_INFO(s, r) \
55 (FIELD_PREP(SENDER_ID_MASK, (s)) | FIELD_PREP(RECEIVER_ID_MASK, (r)))
57 #define RXTX_MAP_MIN_BUFSZ_MASK GENMASK(1, 0)
58 #define RXTX_MAP_MIN_BUFSZ(x) ((x) & RXTX_MAP_MIN_BUFSZ_MASK)
60 #define FFA_MAX_NOTIFICATIONS 64
62 static ffa_fn
*invoke_ffa_fn
;
64 static const int ffa_linux_errmap
[] = {
65 /* better than switch case as long as return value is continuous */
66 0, /* FFA_RET_SUCCESS */
67 -EOPNOTSUPP
, /* FFA_RET_NOT_SUPPORTED */
68 -EINVAL
, /* FFA_RET_INVALID_PARAMETERS */
69 -ENOMEM
, /* FFA_RET_NO_MEMORY */
70 -EBUSY
, /* FFA_RET_BUSY */
71 -EINTR
, /* FFA_RET_INTERRUPTED */
72 -EACCES
, /* FFA_RET_DENIED */
73 -EAGAIN
, /* FFA_RET_RETRY */
74 -ECANCELED
, /* FFA_RET_ABORTED */
75 -ENODATA
, /* FFA_RET_NO_DATA */
76 -EAGAIN
, /* FFA_RET_NOT_READY */
79 static inline int ffa_to_linux_errno(int errno
)
83 if (err_idx
>= 0 && err_idx
< ARRAY_SIZE(ffa_linux_errmap
))
84 return ffa_linux_errmap
[err_idx
];
89 struct ffa_drv_info
*info
;
95 struct mutex rx_lock
; /* lock to protect Rx buffer */
96 struct mutex tx_lock
; /* lock to protect Tx buffer */
101 bool msg_direct_req2_supp
;
104 unsigned int sched_recv_irq
;
105 unsigned int notif_pend_irq
;
106 unsigned int cpuhp_state
;
107 struct ffa_pcpu_irq __percpu
*irq_pcpu
;
108 struct workqueue_struct
*notif_pcpu_wq
;
109 struct work_struct notif_pcpu_work
;
110 struct work_struct sched_recv_irq_work
;
111 struct xarray partition_info
;
112 DECLARE_HASHTABLE(notifier_hash
, ilog2(FFA_MAX_NOTIFICATIONS
));
113 struct mutex notify_lock
; /* lock to protect notifier hashtable */
116 static struct ffa_drv_info
*drv_info
;
117 static void ffa_partitions_cleanup(void);
120 * The driver must be able to support all the versions from the earliest
121 * supported FFA_MIN_VERSION to the latest supported FFA_DRIVER_VERSION.
122 * The specification states that if firmware supports a FFA implementation
123 * that is incompatible with and at a greater version number than specified
124 * by the caller(FFA_DRIVER_VERSION passed as parameter to FFA_VERSION),
125 * it must return the NOT_SUPPORTED error code.
127 static u32
ffa_compatible_version_find(u32 version
)
129 u16 major
= FFA_MAJOR_VERSION(version
), minor
= FFA_MINOR_VERSION(version
);
130 u16 drv_major
= FFA_MAJOR_VERSION(FFA_DRIVER_VERSION
);
131 u16 drv_minor
= FFA_MINOR_VERSION(FFA_DRIVER_VERSION
);
133 if ((major
< drv_major
) || (major
== drv_major
&& minor
<= drv_minor
))
136 pr_info("Firmware version higher than driver version, downgrading\n");
137 return FFA_DRIVER_VERSION
;
140 static int ffa_version_check(u32
*version
)
144 invoke_ffa_fn((ffa_value_t
){
145 .a0
= FFA_VERSION
, .a1
= FFA_DRIVER_VERSION
,
148 if (ver
.a0
== FFA_RET_NOT_SUPPORTED
) {
149 pr_info("FFA_VERSION returned not supported\n");
153 if (ver
.a0
< FFA_MIN_VERSION
) {
154 pr_err("Incompatible v%d.%d! Earliest supported v%d.%d\n",
155 FFA_MAJOR_VERSION(ver
.a0
), FFA_MINOR_VERSION(ver
.a0
),
156 FFA_MAJOR_VERSION(FFA_MIN_VERSION
),
157 FFA_MINOR_VERSION(FFA_MIN_VERSION
));
161 pr_info("Driver version %d.%d\n", FFA_MAJOR_VERSION(FFA_DRIVER_VERSION
),
162 FFA_MINOR_VERSION(FFA_DRIVER_VERSION
));
163 pr_info("Firmware version %d.%d found\n", FFA_MAJOR_VERSION(ver
.a0
),
164 FFA_MINOR_VERSION(ver
.a0
));
165 *version
= ffa_compatible_version_find(ver
.a0
);
170 static int ffa_rx_release(void)
174 invoke_ffa_fn((ffa_value_t
){
175 .a0
= FFA_RX_RELEASE
,
178 if (ret
.a0
== FFA_ERROR
)
179 return ffa_to_linux_errno((int)ret
.a2
);
181 /* check for ret.a0 == FFA_RX_RELEASE ? */
186 static int ffa_rxtx_map(phys_addr_t tx_buf
, phys_addr_t rx_buf
, u32 pg_cnt
)
190 invoke_ffa_fn((ffa_value_t
){
191 .a0
= FFA_FN_NATIVE(RXTX_MAP
),
192 .a1
= tx_buf
, .a2
= rx_buf
, .a3
= pg_cnt
,
195 if (ret
.a0
== FFA_ERROR
)
196 return ffa_to_linux_errno((int)ret
.a2
);
201 static int ffa_rxtx_unmap(u16 vm_id
)
205 invoke_ffa_fn((ffa_value_t
){
206 .a0
= FFA_RXTX_UNMAP
, .a1
= PACK_TARGET_INFO(vm_id
, 0),
209 if (ret
.a0
== FFA_ERROR
)
210 return ffa_to_linux_errno((int)ret
.a2
);
215 static int ffa_features(u32 func_feat_id
, u32 input_props
,
216 u32
*if_props_1
, u32
*if_props_2
)
220 if (!ARM_SMCCC_IS_FAST_CALL(func_feat_id
) && input_props
) {
221 pr_err("%s: Invalid Parameters: %x, %x", __func__
,
222 func_feat_id
, input_props
);
223 return ffa_to_linux_errno(FFA_RET_INVALID_PARAMETERS
);
226 invoke_ffa_fn((ffa_value_t
){
227 .a0
= FFA_FEATURES
, .a1
= func_feat_id
, .a2
= input_props
,
230 if (id
.a0
== FFA_ERROR
)
231 return ffa_to_linux_errno((int)id
.a2
);
241 #define PARTITION_INFO_GET_RETURN_COUNT_ONLY BIT(0)
243 /* buffer must be sizeof(struct ffa_partition_info) * num_partitions */
245 __ffa_partition_info_get(u32 uuid0
, u32 uuid1
, u32 uuid2
, u32 uuid3
,
246 struct ffa_partition_info
*buffer
, int num_partitions
)
248 int idx
, count
, flags
= 0, sz
, buf_sz
;
249 ffa_value_t partition_info
;
251 if (drv_info
->version
> FFA_VERSION_1_0
&&
252 (!buffer
|| !num_partitions
)) /* Just get the count for now */
253 flags
= PARTITION_INFO_GET_RETURN_COUNT_ONLY
;
255 mutex_lock(&drv_info
->rx_lock
);
256 invoke_ffa_fn((ffa_value_t
){
257 .a0
= FFA_PARTITION_INFO_GET
,
258 .a1
= uuid0
, .a2
= uuid1
, .a3
= uuid2
, .a4
= uuid3
,
262 if (partition_info
.a0
== FFA_ERROR
) {
263 mutex_unlock(&drv_info
->rx_lock
);
264 return ffa_to_linux_errno((int)partition_info
.a2
);
267 count
= partition_info
.a2
;
269 if (drv_info
->version
> FFA_VERSION_1_0
) {
270 buf_sz
= sz
= partition_info
.a3
;
271 if (sz
> sizeof(*buffer
))
272 buf_sz
= sizeof(*buffer
);
274 /* FFA_VERSION_1_0 lacks size in the response */
278 if (buffer
&& count
<= num_partitions
)
279 for (idx
= 0; idx
< count
; idx
++)
280 memcpy(buffer
+ idx
, drv_info
->rx_buffer
+ idx
* sz
,
285 mutex_unlock(&drv_info
->rx_lock
);
290 #define LAST_INDEX_MASK GENMASK(15, 0)
291 #define CURRENT_INDEX_MASK GENMASK(31, 16)
292 #define UUID_INFO_TAG_MASK GENMASK(47, 32)
293 #define PARTITION_INFO_SZ_MASK GENMASK(63, 48)
294 #define PARTITION_COUNT(x) ((u16)(FIELD_GET(LAST_INDEX_MASK, (x))) + 1)
295 #define CURRENT_INDEX(x) ((u16)(FIELD_GET(CURRENT_INDEX_MASK, (x))))
296 #define UUID_INFO_TAG(x) ((u16)(FIELD_GET(UUID_INFO_TAG_MASK, (x))))
297 #define PARTITION_INFO_SZ(x) ((u16)(FIELD_GET(PARTITION_INFO_SZ_MASK, (x))))
299 __ffa_partition_info_get_regs(u32 uuid0
, u32 uuid1
, u32 uuid2
, u32 uuid3
,
300 struct ffa_partition_info
*buffer
, int num_parts
)
302 u16 buf_sz
, start_idx
, cur_idx
, count
= 0, prev_idx
= 0, tag
= 0;
303 ffa_value_t partition_info
;
306 start_idx
= prev_idx
? prev_idx
+ 1 : 0;
308 invoke_ffa_fn((ffa_value_t
){
309 .a0
= FFA_PARTITION_INFO_GET_REGS
,
310 .a1
= (u64
)uuid1
<< 32 | uuid0
,
311 .a2
= (u64
)uuid3
<< 32 | uuid2
,
312 .a3
= start_idx
| tag
<< 16,
315 if (partition_info
.a0
== FFA_ERROR
)
316 return ffa_to_linux_errno((int)partition_info
.a2
);
319 count
= PARTITION_COUNT(partition_info
.a2
);
320 if (!buffer
|| !num_parts
) /* count only */
323 cur_idx
= CURRENT_INDEX(partition_info
.a2
);
324 tag
= UUID_INFO_TAG(partition_info
.a2
);
325 buf_sz
= PARTITION_INFO_SZ(partition_info
.a2
);
326 if (buf_sz
> sizeof(*buffer
))
327 buf_sz
= sizeof(*buffer
);
329 memcpy(buffer
+ prev_idx
* buf_sz
, &partition_info
.a3
,
330 (cur_idx
- start_idx
+ 1) * buf_sz
);
333 } while (cur_idx
< (count
- 1));
338 /* buffer is allocated and caller must free the same if returned count > 0 */
340 ffa_partition_probe(const uuid_t
*uuid
, struct ffa_partition_info
**buffer
)
344 bool reg_mode
= false;
345 struct ffa_partition_info
*pbuf
;
347 if (!ffa_features(FFA_PARTITION_INFO_GET_REGS
, 0, NULL
, NULL
))
350 export_uuid((u8
*)uuid0_4
, uuid
);
352 count
= __ffa_partition_info_get_regs(uuid0_4
[0], uuid0_4
[1],
353 uuid0_4
[2], uuid0_4
[3],
356 count
= __ffa_partition_info_get(uuid0_4
[0], uuid0_4
[1],
357 uuid0_4
[2], uuid0_4
[3],
362 pbuf
= kcalloc(count
, sizeof(*pbuf
), GFP_KERNEL
);
367 count
= __ffa_partition_info_get_regs(uuid0_4
[0], uuid0_4
[1],
368 uuid0_4
[2], uuid0_4
[3],
371 count
= __ffa_partition_info_get(uuid0_4
[0], uuid0_4
[1],
372 uuid0_4
[2], uuid0_4
[3],
382 #define VM_ID_MASK GENMASK(15, 0)
383 static int ffa_id_get(u16
*vm_id
)
387 invoke_ffa_fn((ffa_value_t
){
391 if (id
.a0
== FFA_ERROR
)
392 return ffa_to_linux_errno((int)id
.a2
);
394 *vm_id
= FIELD_GET(VM_ID_MASK
, (id
.a2
));
399 static inline void ffa_msg_send_wait_for_completion(ffa_value_t
*ret
)
401 while (ret
->a0
== FFA_INTERRUPT
|| ret
->a0
== FFA_YIELD
) {
402 if (ret
->a0
== FFA_YIELD
)
405 invoke_ffa_fn((ffa_value_t
){
406 .a0
= FFA_RUN
, .a1
= ret
->a1
,
411 static int ffa_msg_send_direct_req(u16 src_id
, u16 dst_id
, bool mode_32bit
,
412 struct ffa_send_direct_data
*data
)
414 u32 req_id
, resp_id
, src_dst_ids
= PACK_TARGET_INFO(src_id
, dst_id
);
418 req_id
= FFA_MSG_SEND_DIRECT_REQ
;
419 resp_id
= FFA_MSG_SEND_DIRECT_RESP
;
421 req_id
= FFA_FN_NATIVE(MSG_SEND_DIRECT_REQ
);
422 resp_id
= FFA_FN_NATIVE(MSG_SEND_DIRECT_RESP
);
425 invoke_ffa_fn((ffa_value_t
){
426 .a0
= req_id
, .a1
= src_dst_ids
, .a2
= 0,
427 .a3
= data
->data0
, .a4
= data
->data1
, .a5
= data
->data2
,
428 .a6
= data
->data3
, .a7
= data
->data4
,
431 ffa_msg_send_wait_for_completion(&ret
);
433 if (ret
.a0
== FFA_ERROR
)
434 return ffa_to_linux_errno((int)ret
.a2
);
436 if (ret
.a0
== resp_id
) {
437 data
->data0
= ret
.a3
;
438 data
->data1
= ret
.a4
;
439 data
->data2
= ret
.a5
;
440 data
->data3
= ret
.a6
;
441 data
->data4
= ret
.a7
;
448 static int ffa_msg_send2(u16 src_id
, u16 dst_id
, void *buf
, size_t sz
)
450 u32 src_dst_ids
= PACK_TARGET_INFO(src_id
, dst_id
);
451 struct ffa_indirect_msg_hdr
*msg
;
455 if (sz
> (drv_info
->rxtx_bufsz
- sizeof(*msg
)))
458 mutex_lock(&drv_info
->tx_lock
);
460 msg
= drv_info
->tx_buffer
;
463 msg
->offset
= sizeof(*msg
);
464 msg
->send_recv_id
= src_dst_ids
;
466 memcpy((u8
*)msg
+ msg
->offset
, buf
, sz
);
468 /* flags = 0, sender VMID = 0 works for both physical/virtual NS */
469 invoke_ffa_fn((ffa_value_t
){
470 .a0
= FFA_MSG_SEND2
, .a1
= 0, .a2
= 0
473 if (ret
.a0
== FFA_ERROR
)
474 retval
= ffa_to_linux_errno((int)ret
.a2
);
476 mutex_unlock(&drv_info
->tx_lock
);
480 static int ffa_msg_send_direct_req2(u16 src_id
, u16 dst_id
, const uuid_t
*uuid
,
481 struct ffa_send_direct_data2
*data
)
483 u32 src_dst_ids
= PACK_TARGET_INFO(src_id
, dst_id
);
487 } uuid_regs
= { .uuid
= *uuid
};
488 ffa_value_t ret
, args
= {
489 .a0
= FFA_MSG_SEND_DIRECT_REQ2
,
491 .a2
= le64_to_cpu(uuid_regs
.regs
[0]),
492 .a3
= le64_to_cpu(uuid_regs
.regs
[1]),
494 memcpy((void *)&args
+ offsetof(ffa_value_t
, a4
), data
, sizeof(*data
));
496 invoke_ffa_fn(args
, &ret
);
498 ffa_msg_send_wait_for_completion(&ret
);
500 if (ret
.a0
== FFA_ERROR
)
501 return ffa_to_linux_errno((int)ret
.a2
);
503 if (ret
.a0
== FFA_MSG_SEND_DIRECT_RESP2
) {
504 memcpy(data
, (void *)&ret
+ offsetof(ffa_value_t
, a4
), sizeof(*data
));
511 static int ffa_mem_first_frag(u32 func_id
, phys_addr_t buf
, u32 buf_sz
,
512 u32 frag_len
, u32 len
, u64
*handle
)
516 invoke_ffa_fn((ffa_value_t
){
517 .a0
= func_id
, .a1
= len
, .a2
= frag_len
,
518 .a3
= buf
, .a4
= buf_sz
,
521 while (ret
.a0
== FFA_MEM_OP_PAUSE
)
522 invoke_ffa_fn((ffa_value_t
){
523 .a0
= FFA_MEM_OP_RESUME
,
524 .a1
= ret
.a1
, .a2
= ret
.a2
,
527 if (ret
.a0
== FFA_ERROR
)
528 return ffa_to_linux_errno((int)ret
.a2
);
530 if (ret
.a0
== FFA_SUCCESS
) {
532 *handle
= PACK_HANDLE(ret
.a2
, ret
.a3
);
533 } else if (ret
.a0
== FFA_MEM_FRAG_RX
) {
535 *handle
= PACK_HANDLE(ret
.a1
, ret
.a2
);
543 static int ffa_mem_next_frag(u64 handle
, u32 frag_len
)
547 invoke_ffa_fn((ffa_value_t
){
548 .a0
= FFA_MEM_FRAG_TX
,
549 .a1
= HANDLE_LOW(handle
), .a2
= HANDLE_HIGH(handle
),
553 while (ret
.a0
== FFA_MEM_OP_PAUSE
)
554 invoke_ffa_fn((ffa_value_t
){
555 .a0
= FFA_MEM_OP_RESUME
,
556 .a1
= ret
.a1
, .a2
= ret
.a2
,
559 if (ret
.a0
== FFA_ERROR
)
560 return ffa_to_linux_errno((int)ret
.a2
);
562 if (ret
.a0
== FFA_MEM_FRAG_RX
)
564 else if (ret
.a0
== FFA_SUCCESS
)
571 ffa_transmit_fragment(u32 func_id
, phys_addr_t buf
, u32 buf_sz
, u32 frag_len
,
572 u32 len
, u64
*handle
, bool first
)
575 return ffa_mem_next_frag(*handle
, frag_len
);
577 return ffa_mem_first_frag(func_id
, buf
, buf_sz
, frag_len
, len
, handle
);
580 static u32
ffa_get_num_pages_sg(struct scatterlist
*sg
)
585 num_pages
+= sg
->length
/ FFA_PAGE_SIZE
;
586 } while ((sg
= sg_next(sg
)));
591 static u16
ffa_memory_attributes_get(u32 func_id
)
594 * For the memory lend or donate operation, if the receiver is a PE or
595 * a proxy endpoint, the owner/sender must not specify the attributes
597 if (func_id
== FFA_FN_NATIVE(MEM_LEND
) ||
598 func_id
== FFA_MEM_LEND
)
601 return FFA_MEM_NORMAL
| FFA_MEM_WRITE_BACK
| FFA_MEM_INNER_SHAREABLE
;
605 ffa_setup_and_transmit(u32 func_id
, void *buffer
, u32 max_fragsize
,
606 struct ffa_mem_ops_args
*args
)
610 u32 composite_offset
;
611 phys_addr_t addr
= 0;
612 struct ffa_mem_region
*mem_region
= buffer
;
613 struct ffa_composite_mem_region
*composite
;
614 struct ffa_mem_region_addr_range
*constituents
;
615 struct ffa_mem_region_attributes
*ep_mem_access
;
616 u32 idx
, frag_len
, length
, buf_sz
= 0, num_entries
= sg_nents(args
->sg
);
618 mem_region
->tag
= args
->tag
;
619 mem_region
->flags
= args
->flags
;
620 mem_region
->sender_id
= drv_info
->vm_id
;
621 mem_region
->attributes
= ffa_memory_attributes_get(func_id
);
622 ep_mem_access
= buffer
+
623 ffa_mem_desc_offset(buffer
, 0, drv_info
->version
);
624 composite_offset
= ffa_mem_desc_offset(buffer
, args
->nattrs
,
627 for (idx
= 0; idx
< args
->nattrs
; idx
++, ep_mem_access
++) {
628 ep_mem_access
->receiver
= args
->attrs
[idx
].receiver
;
629 ep_mem_access
->attrs
= args
->attrs
[idx
].attrs
;
630 ep_mem_access
->composite_off
= composite_offset
;
631 ep_mem_access
->flag
= 0;
632 ep_mem_access
->reserved
= 0;
634 mem_region
->handle
= 0;
635 mem_region
->ep_count
= args
->nattrs
;
636 if (drv_info
->version
<= FFA_VERSION_1_0
) {
637 mem_region
->ep_mem_size
= 0;
639 mem_region
->ep_mem_size
= sizeof(*ep_mem_access
);
640 mem_region
->ep_mem_offset
= sizeof(*mem_region
);
641 memset(mem_region
->reserved
, 0, 12);
644 composite
= buffer
+ composite_offset
;
645 composite
->total_pg_cnt
= ffa_get_num_pages_sg(args
->sg
);
646 composite
->addr_range_cnt
= num_entries
;
647 composite
->reserved
= 0;
649 length
= composite_offset
+ CONSTITUENTS_OFFSET(num_entries
);
650 frag_len
= composite_offset
+ CONSTITUENTS_OFFSET(0);
651 if (frag_len
> max_fragsize
)
654 if (!args
->use_txbuf
) {
655 addr
= virt_to_phys(buffer
);
656 buf_sz
= max_fragsize
/ FFA_PAGE_SIZE
;
659 constituents
= buffer
+ frag_len
;
662 if (frag_len
== max_fragsize
) {
663 rc
= ffa_transmit_fragment(func_id
, addr
, buf_sz
,
665 &args
->g_handle
, first
);
672 constituents
= buffer
;
675 if ((void *)constituents
- buffer
> max_fragsize
) {
676 pr_err("Memory Region Fragment > Tx Buffer size\n");
680 constituents
->address
= sg_phys(args
->sg
);
681 constituents
->pg_cnt
= args
->sg
->length
/ FFA_PAGE_SIZE
;
682 constituents
->reserved
= 0;
684 frag_len
+= sizeof(struct ffa_mem_region_addr_range
);
685 } while ((args
->sg
= sg_next(args
->sg
)));
687 return ffa_transmit_fragment(func_id
, addr
, buf_sz
, frag_len
,
688 length
, &args
->g_handle
, first
);
691 static int ffa_memory_ops(u32 func_id
, struct ffa_mem_ops_args
*args
)
695 size_t rxtx_bufsz
= drv_info
->rxtx_bufsz
;
697 if (!args
->use_txbuf
) {
698 buffer
= alloc_pages_exact(rxtx_bufsz
, GFP_KERNEL
);
702 buffer
= drv_info
->tx_buffer
;
703 mutex_lock(&drv_info
->tx_lock
);
706 ret
= ffa_setup_and_transmit(func_id
, buffer
, rxtx_bufsz
, args
);
709 mutex_unlock(&drv_info
->tx_lock
);
711 free_pages_exact(buffer
, rxtx_bufsz
);
713 return ret
< 0 ? ret
: 0;
716 static int ffa_memory_reclaim(u64 g_handle
, u32 flags
)
720 invoke_ffa_fn((ffa_value_t
){
721 .a0
= FFA_MEM_RECLAIM
,
722 .a1
= HANDLE_LOW(g_handle
), .a2
= HANDLE_HIGH(g_handle
),
726 if (ret
.a0
== FFA_ERROR
)
727 return ffa_to_linux_errno((int)ret
.a2
);
732 static int ffa_notification_bitmap_create(void)
735 u16 vcpu_count
= nr_cpu_ids
;
737 invoke_ffa_fn((ffa_value_t
){
738 .a0
= FFA_NOTIFICATION_BITMAP_CREATE
,
739 .a1
= drv_info
->vm_id
, .a2
= vcpu_count
,
742 if (ret
.a0
== FFA_ERROR
)
743 return ffa_to_linux_errno((int)ret
.a2
);
748 static int ffa_notification_bitmap_destroy(void)
752 invoke_ffa_fn((ffa_value_t
){
753 .a0
= FFA_NOTIFICATION_BITMAP_DESTROY
,
754 .a1
= drv_info
->vm_id
,
757 if (ret
.a0
== FFA_ERROR
)
758 return ffa_to_linux_errno((int)ret
.a2
);
763 #define NOTIFICATION_LOW_MASK GENMASK(31, 0)
764 #define NOTIFICATION_HIGH_MASK GENMASK(63, 32)
765 #define NOTIFICATION_BITMAP_HIGH(x) \
766 ((u32)(FIELD_GET(NOTIFICATION_HIGH_MASK, (x))))
767 #define NOTIFICATION_BITMAP_LOW(x) \
768 ((u32)(FIELD_GET(NOTIFICATION_LOW_MASK, (x))))
769 #define PACK_NOTIFICATION_BITMAP(low, high) \
770 (FIELD_PREP(NOTIFICATION_LOW_MASK, (low)) | \
771 FIELD_PREP(NOTIFICATION_HIGH_MASK, (high)))
773 #define RECEIVER_VCPU_MASK GENMASK(31, 16)
774 #define PACK_NOTIFICATION_GET_RECEIVER_INFO(vcpu_r, r) \
775 (FIELD_PREP(RECEIVER_VCPU_MASK, (vcpu_r)) | \
776 FIELD_PREP(RECEIVER_ID_MASK, (r)))
778 #define NOTIFICATION_INFO_GET_MORE_PEND_MASK BIT(0)
779 #define NOTIFICATION_INFO_GET_ID_COUNT GENMASK(11, 7)
780 #define ID_LIST_MASK_64 GENMASK(51, 12)
781 #define ID_LIST_MASK_32 GENMASK(31, 12)
782 #define MAX_IDS_64 20
783 #define MAX_IDS_32 10
785 #define PER_VCPU_NOTIFICATION_FLAG BIT(0)
786 #define SECURE_PARTITION_BITMAP BIT(0)
787 #define NON_SECURE_VM_BITMAP BIT(1)
788 #define SPM_FRAMEWORK_BITMAP BIT(2)
789 #define NS_HYP_FRAMEWORK_BITMAP BIT(3)
791 static int ffa_notification_bind_common(u16 dst_id
, u64 bitmap
,
792 u32 flags
, bool is_bind
)
795 u32 func
, src_dst_ids
= PACK_TARGET_INFO(dst_id
, drv_info
->vm_id
);
797 func
= is_bind
? FFA_NOTIFICATION_BIND
: FFA_NOTIFICATION_UNBIND
;
799 invoke_ffa_fn((ffa_value_t
){
800 .a0
= func
, .a1
= src_dst_ids
, .a2
= flags
,
801 .a3
= NOTIFICATION_BITMAP_LOW(bitmap
),
802 .a4
= NOTIFICATION_BITMAP_HIGH(bitmap
),
805 if (ret
.a0
== FFA_ERROR
)
806 return ffa_to_linux_errno((int)ret
.a2
);
807 else if (ret
.a0
!= FFA_SUCCESS
)
814 int ffa_notification_set(u16 src_id
, u16 dst_id
, u32 flags
, u64 bitmap
)
817 u32 src_dst_ids
= PACK_TARGET_INFO(dst_id
, src_id
);
819 invoke_ffa_fn((ffa_value_t
) {
820 .a0
= FFA_NOTIFICATION_SET
, .a1
= src_dst_ids
, .a2
= flags
,
821 .a3
= NOTIFICATION_BITMAP_LOW(bitmap
),
822 .a4
= NOTIFICATION_BITMAP_HIGH(bitmap
),
825 if (ret
.a0
== FFA_ERROR
)
826 return ffa_to_linux_errno((int)ret
.a2
);
827 else if (ret
.a0
!= FFA_SUCCESS
)
833 struct ffa_notify_bitmaps
{
839 static int ffa_notification_get(u32 flags
, struct ffa_notify_bitmaps
*notify
)
842 u16 src_id
= drv_info
->vm_id
;
843 u16 cpu_id
= smp_processor_id();
844 u32 rec_vcpu_ids
= PACK_NOTIFICATION_GET_RECEIVER_INFO(cpu_id
, src_id
);
846 invoke_ffa_fn((ffa_value_t
){
847 .a0
= FFA_NOTIFICATION_GET
, .a1
= rec_vcpu_ids
, .a2
= flags
,
850 if (ret
.a0
== FFA_ERROR
)
851 return ffa_to_linux_errno((int)ret
.a2
);
852 else if (ret
.a0
!= FFA_SUCCESS
)
853 return -EINVAL
; /* Something else went wrong. */
855 notify
->sp_map
= PACK_NOTIFICATION_BITMAP(ret
.a2
, ret
.a3
);
856 notify
->vm_map
= PACK_NOTIFICATION_BITMAP(ret
.a4
, ret
.a5
);
857 notify
->arch_map
= PACK_NOTIFICATION_BITMAP(ret
.a6
, ret
.a7
);
862 struct ffa_dev_part_info
{
863 ffa_sched_recv_cb callback
;
868 static void __do_sched_recv_cb(u16 part_id
, u16 vcpu
, bool is_per_vcpu
)
870 struct ffa_dev_part_info
*partition
;
871 ffa_sched_recv_cb callback
;
874 partition
= xa_load(&drv_info
->partition_info
, part_id
);
876 pr_err("%s: Invalid partition ID 0x%x\n", __func__
, part_id
);
880 read_lock(&partition
->rw_lock
);
881 callback
= partition
->callback
;
882 cb_data
= partition
->cb_data
;
883 read_unlock(&partition
->rw_lock
);
886 callback(vcpu
, is_per_vcpu
, cb_data
);
889 static void ffa_notification_info_get(void)
891 int idx
, list
, max_ids
, lists_cnt
, ids_processed
, ids_count
[MAX_IDS_64
];
897 invoke_ffa_fn((ffa_value_t
){
898 .a0
= FFA_FN_NATIVE(NOTIFICATION_INFO_GET
),
901 if (ret
.a0
!= FFA_FN_NATIVE(SUCCESS
) && ret
.a0
!= FFA_SUCCESS
) {
902 if (ret
.a2
!= FFA_RET_NO_DATA
)
903 pr_err("Notification Info fetch failed: 0x%lx (0x%lx)",
908 is_64b_resp
= (ret
.a0
== FFA_FN64_SUCCESS
);
911 lists_cnt
= FIELD_GET(NOTIFICATION_INFO_GET_ID_COUNT
, ret
.a2
);
913 max_ids
= MAX_IDS_64
;
914 id_list
= FIELD_GET(ID_LIST_MASK_64
, ret
.a2
);
916 max_ids
= MAX_IDS_32
;
917 id_list
= FIELD_GET(ID_LIST_MASK_32
, ret
.a2
);
920 for (idx
= 0; idx
< lists_cnt
; idx
++, id_list
>>= 2)
921 ids_count
[idx
] = (id_list
& 0x3) + 1;
924 for (list
= 0; list
< lists_cnt
; list
++) {
925 u16 vcpu_id
, part_id
, *packed_id_list
= (u16
*)&ret
.a3
;
927 if (ids_processed
>= max_ids
- 1)
930 part_id
= packed_id_list
[ids_processed
++];
932 if (ids_count
[list
] == 1) { /* Global Notification */
933 __do_sched_recv_cb(part_id
, 0, false);
937 /* Per vCPU Notification */
938 for (idx
= 0; idx
< ids_count
[list
]; idx
++) {
939 if (ids_processed
>= max_ids
- 1)
942 vcpu_id
= packed_id_list
[ids_processed
++];
944 __do_sched_recv_cb(part_id
, vcpu_id
, true);
947 } while (ret
.a2
& NOTIFICATION_INFO_GET_MORE_PEND_MASK
);
950 static int ffa_run(struct ffa_device
*dev
, u16 vcpu
)
953 u32 target
= dev
->vm_id
<< 16 | vcpu
;
955 invoke_ffa_fn((ffa_value_t
){ .a0
= FFA_RUN
, .a1
= target
, }, &ret
);
957 while (ret
.a0
== FFA_INTERRUPT
)
958 invoke_ffa_fn((ffa_value_t
){ .a0
= FFA_RUN
, .a1
= ret
.a1
, },
961 if (ret
.a0
== FFA_ERROR
)
962 return ffa_to_linux_errno((int)ret
.a2
);
967 static void ffa_drvinfo_flags_init(void)
969 if (!ffa_features(FFA_FN_NATIVE(MEM_LEND
), 0, NULL
, NULL
) ||
970 !ffa_features(FFA_FN_NATIVE(MEM_SHARE
), 0, NULL
, NULL
))
971 drv_info
->mem_ops_native
= true;
973 if (!ffa_features(FFA_MSG_SEND_DIRECT_REQ2
, 0, NULL
, NULL
) ||
974 !ffa_features(FFA_MSG_SEND_DIRECT_RESP2
, 0, NULL
, NULL
))
975 drv_info
->msg_direct_req2_supp
= true;
978 static u32
ffa_api_version_get(void)
980 return drv_info
->version
;
983 static int ffa_partition_info_get(const char *uuid_str
,
984 struct ffa_partition_info
*buffer
)
988 struct ffa_partition_info
*pbuf
;
990 if (uuid_parse(uuid_str
, &uuid
)) {
991 pr_err("invalid uuid (%s)\n", uuid_str
);
995 count
= ffa_partition_probe(&uuid
, &pbuf
);
999 memcpy(buffer
, pbuf
, sizeof(*pbuf
) * count
);
1004 static void ffa_mode_32bit_set(struct ffa_device
*dev
)
1006 dev
->mode_32bit
= true;
1009 static int ffa_sync_send_receive(struct ffa_device
*dev
,
1010 struct ffa_send_direct_data
*data
)
1012 return ffa_msg_send_direct_req(drv_info
->vm_id
, dev
->vm_id
,
1013 dev
->mode_32bit
, data
);
1016 static int ffa_indirect_msg_send(struct ffa_device
*dev
, void *buf
, size_t sz
)
1018 return ffa_msg_send2(drv_info
->vm_id
, dev
->vm_id
, buf
, sz
);
1021 static int ffa_sync_send_receive2(struct ffa_device
*dev
, const uuid_t
*uuid
,
1022 struct ffa_send_direct_data2
*data
)
1024 if (!drv_info
->msg_direct_req2_supp
)
1027 return ffa_msg_send_direct_req2(drv_info
->vm_id
, dev
->vm_id
,
1031 static int ffa_memory_share(struct ffa_mem_ops_args
*args
)
1033 if (drv_info
->mem_ops_native
)
1034 return ffa_memory_ops(FFA_FN_NATIVE(MEM_SHARE
), args
);
1036 return ffa_memory_ops(FFA_MEM_SHARE
, args
);
1039 static int ffa_memory_lend(struct ffa_mem_ops_args
*args
)
1041 /* Note that upon a successful MEM_LEND request the caller
1042 * must ensure that the memory region specified is not accessed
1043 * until a successful MEM_RECALIM call has been made.
1044 * On systems with a hypervisor present this will been enforced,
1045 * however on systems without a hypervisor the responsibility
1046 * falls to the calling kernel driver to prevent access.
1048 if (drv_info
->mem_ops_native
)
1049 return ffa_memory_ops(FFA_FN_NATIVE(MEM_LEND
), args
);
1051 return ffa_memory_ops(FFA_MEM_LEND
, args
);
1054 #define FFA_SECURE_PARTITION_ID_FLAG BIT(15)
1056 #define ffa_notifications_disabled() (!drv_info->notif_enabled)
1064 struct notifier_cb_info
{
1065 struct hlist_node hnode
;
1068 enum notify_type type
;
1071 static int ffa_sched_recv_cb_update(u16 part_id
, ffa_sched_recv_cb callback
,
1072 void *cb_data
, bool is_registration
)
1074 struct ffa_dev_part_info
*partition
;
1077 if (ffa_notifications_disabled())
1080 partition
= xa_load(&drv_info
->partition_info
, part_id
);
1082 pr_err("%s: Invalid partition ID 0x%x\n", __func__
, part_id
);
1086 write_lock(&partition
->rw_lock
);
1088 cb_valid
= !!partition
->callback
;
1089 if (!(is_registration
^ cb_valid
)) {
1090 write_unlock(&partition
->rw_lock
);
1094 partition
->callback
= callback
;
1095 partition
->cb_data
= cb_data
;
1097 write_unlock(&partition
->rw_lock
);
1101 static int ffa_sched_recv_cb_register(struct ffa_device
*dev
,
1102 ffa_sched_recv_cb cb
, void *cb_data
)
1104 return ffa_sched_recv_cb_update(dev
->vm_id
, cb
, cb_data
, true);
1107 static int ffa_sched_recv_cb_unregister(struct ffa_device
*dev
)
1109 return ffa_sched_recv_cb_update(dev
->vm_id
, NULL
, NULL
, false);
1112 static int ffa_notification_bind(u16 dst_id
, u64 bitmap
, u32 flags
)
1114 return ffa_notification_bind_common(dst_id
, bitmap
, flags
, true);
1117 static int ffa_notification_unbind(u16 dst_id
, u64 bitmap
)
1119 return ffa_notification_bind_common(dst_id
, bitmap
, 0, false);
1122 /* Should be called while the notify_lock is taken */
1123 static struct notifier_cb_info
*
1124 notifier_hash_node_get(u16 notify_id
, enum notify_type type
)
1126 struct notifier_cb_info
*node
;
1128 hash_for_each_possible(drv_info
->notifier_hash
, node
, hnode
, notify_id
)
1129 if (type
== node
->type
)
1136 update_notifier_cb(int notify_id
, enum notify_type type
, ffa_notifier_cb cb
,
1137 void *cb_data
, bool is_registration
)
1139 struct notifier_cb_info
*cb_info
= NULL
;
1142 cb_info
= notifier_hash_node_get(notify_id
, type
);
1143 cb_found
= !!cb_info
;
1145 if (!(is_registration
^ cb_found
))
1148 if (is_registration
) {
1149 cb_info
= kzalloc(sizeof(*cb_info
), GFP_KERNEL
);
1153 cb_info
->type
= type
;
1155 cb_info
->cb_data
= cb_data
;
1157 hash_add(drv_info
->notifier_hash
, &cb_info
->hnode
, notify_id
);
1159 hash_del(&cb_info
->hnode
);
1165 static enum notify_type
ffa_notify_type_get(u16 vm_id
)
1167 if (vm_id
& FFA_SECURE_PARTITION_ID_FLAG
)
1168 return SECURE_PARTITION
;
1170 return NON_SECURE_VM
;
1173 static int ffa_notify_relinquish(struct ffa_device
*dev
, int notify_id
)
1176 enum notify_type type
= ffa_notify_type_get(dev
->vm_id
);
1178 if (ffa_notifications_disabled())
1181 if (notify_id
>= FFA_MAX_NOTIFICATIONS
)
1184 mutex_lock(&drv_info
->notify_lock
);
1186 rc
= update_notifier_cb(notify_id
, type
, NULL
, NULL
, false);
1188 pr_err("Could not unregister notification callback\n");
1189 mutex_unlock(&drv_info
->notify_lock
);
1193 rc
= ffa_notification_unbind(dev
->vm_id
, BIT(notify_id
));
1195 mutex_unlock(&drv_info
->notify_lock
);
1200 static int ffa_notify_request(struct ffa_device
*dev
, bool is_per_vcpu
,
1201 ffa_notifier_cb cb
, void *cb_data
, int notify_id
)
1205 enum notify_type type
= ffa_notify_type_get(dev
->vm_id
);
1207 if (ffa_notifications_disabled())
1210 if (notify_id
>= FFA_MAX_NOTIFICATIONS
)
1213 mutex_lock(&drv_info
->notify_lock
);
1216 flags
= PER_VCPU_NOTIFICATION_FLAG
;
1218 rc
= ffa_notification_bind(dev
->vm_id
, BIT(notify_id
), flags
);
1220 mutex_unlock(&drv_info
->notify_lock
);
1224 rc
= update_notifier_cb(notify_id
, type
, cb
, cb_data
, true);
1226 pr_err("Failed to register callback for %d - %d\n",
1228 ffa_notification_unbind(dev
->vm_id
, BIT(notify_id
));
1230 mutex_unlock(&drv_info
->notify_lock
);
1235 static int ffa_notify_send(struct ffa_device
*dev
, int notify_id
,
1236 bool is_per_vcpu
, u16 vcpu
)
1240 if (ffa_notifications_disabled())
1244 flags
|= (PER_VCPU_NOTIFICATION_FLAG
| vcpu
<< 16);
1246 return ffa_notification_set(dev
->vm_id
, drv_info
->vm_id
, flags
,
1250 static void handle_notif_callbacks(u64 bitmap
, enum notify_type type
)
1253 struct notifier_cb_info
*cb_info
= NULL
;
1255 for (notify_id
= 0; notify_id
<= FFA_MAX_NOTIFICATIONS
&& bitmap
;
1256 notify_id
++, bitmap
>>= 1) {
1260 mutex_lock(&drv_info
->notify_lock
);
1261 cb_info
= notifier_hash_node_get(notify_id
, type
);
1262 mutex_unlock(&drv_info
->notify_lock
);
1264 if (cb_info
&& cb_info
->cb
)
1265 cb_info
->cb(notify_id
, cb_info
->cb_data
);
1269 static void notif_get_and_handle(void *unused
)
1272 struct ffa_notify_bitmaps bitmaps
;
1274 rc
= ffa_notification_get(SECURE_PARTITION_BITMAP
|
1275 SPM_FRAMEWORK_BITMAP
, &bitmaps
);
1277 pr_err("Failed to retrieve notifications with %d!\n", rc
);
1281 handle_notif_callbacks(bitmaps
.vm_map
, NON_SECURE_VM
);
1282 handle_notif_callbacks(bitmaps
.sp_map
, SECURE_PARTITION
);
1283 handle_notif_callbacks(bitmaps
.arch_map
, FRAMEWORK
);
1287 ffa_self_notif_handle(u16 vcpu
, bool is_per_vcpu
, void *cb_data
)
1289 struct ffa_drv_info
*info
= cb_data
;
1292 notif_get_and_handle(info
);
1294 smp_call_function_single(vcpu
, notif_get_and_handle
, info
, 0);
1297 static void notif_pcpu_irq_work_fn(struct work_struct
*work
)
1299 struct ffa_drv_info
*info
= container_of(work
, struct ffa_drv_info
,
1302 ffa_self_notif_handle(smp_processor_id(), true, info
);
1305 static const struct ffa_info_ops ffa_drv_info_ops
= {
1306 .api_version_get
= ffa_api_version_get
,
1307 .partition_info_get
= ffa_partition_info_get
,
1310 static const struct ffa_msg_ops ffa_drv_msg_ops
= {
1311 .mode_32bit_set
= ffa_mode_32bit_set
,
1312 .sync_send_receive
= ffa_sync_send_receive
,
1313 .indirect_send
= ffa_indirect_msg_send
,
1314 .sync_send_receive2
= ffa_sync_send_receive2
,
1317 static const struct ffa_mem_ops ffa_drv_mem_ops
= {
1318 .memory_reclaim
= ffa_memory_reclaim
,
1319 .memory_share
= ffa_memory_share
,
1320 .memory_lend
= ffa_memory_lend
,
1323 static const struct ffa_cpu_ops ffa_drv_cpu_ops
= {
1327 static const struct ffa_notifier_ops ffa_drv_notifier_ops
= {
1328 .sched_recv_cb_register
= ffa_sched_recv_cb_register
,
1329 .sched_recv_cb_unregister
= ffa_sched_recv_cb_unregister
,
1330 .notify_request
= ffa_notify_request
,
1331 .notify_relinquish
= ffa_notify_relinquish
,
1332 .notify_send
= ffa_notify_send
,
1335 static const struct ffa_ops ffa_drv_ops
= {
1336 .info_ops
= &ffa_drv_info_ops
,
1337 .msg_ops
= &ffa_drv_msg_ops
,
1338 .mem_ops
= &ffa_drv_mem_ops
,
1339 .cpu_ops
= &ffa_drv_cpu_ops
,
1340 .notifier_ops
= &ffa_drv_notifier_ops
,
1343 void ffa_device_match_uuid(struct ffa_device
*ffa_dev
, const uuid_t
*uuid
)
1346 struct ffa_partition_info
*pbuf
, *tpbuf
;
1348 count
= ffa_partition_probe(uuid
, &pbuf
);
1352 for (idx
= 0, tpbuf
= pbuf
; idx
< count
; idx
++, tpbuf
++)
1353 if (tpbuf
->id
== ffa_dev
->vm_id
)
1354 uuid_copy(&ffa_dev
->uuid
, uuid
);
1359 ffa_bus_notifier(struct notifier_block
*nb
, unsigned long action
, void *data
)
1361 struct device
*dev
= data
;
1362 struct ffa_device
*fdev
= to_ffa_dev(dev
);
1364 if (action
== BUS_NOTIFY_BIND_DRIVER
) {
1365 struct ffa_driver
*ffa_drv
= to_ffa_driver(dev
->driver
);
1366 const struct ffa_device_id
*id_table
= ffa_drv
->id_table
;
1369 * FF-A v1.1 provides UUID for each partition as part of the
1370 * discovery API, the discovered UUID must be populated in the
1371 * device's UUID and there is no need to workaround by copying
1372 * the same from the driver table.
1374 if (uuid_is_null(&fdev
->uuid
))
1375 ffa_device_match_uuid(fdev
, &id_table
->uuid
);
1383 static struct notifier_block ffa_bus_nb
= {
1384 .notifier_call
= ffa_bus_notifier
,
1387 static int ffa_setup_partitions(void)
1389 int count
, idx
, ret
;
1391 struct ffa_device
*ffa_dev
;
1392 struct ffa_dev_part_info
*info
;
1393 struct ffa_partition_info
*pbuf
, *tpbuf
;
1395 if (drv_info
->version
== FFA_VERSION_1_0
) {
1396 ret
= bus_register_notifier(&ffa_bus_type
, &ffa_bus_nb
);
1398 pr_err("Failed to register FF-A bus notifiers\n");
1401 count
= ffa_partition_probe(&uuid_null
, &pbuf
);
1403 pr_info("%s: No partitions found, error %d\n", __func__
, count
);
1407 xa_init(&drv_info
->partition_info
);
1408 for (idx
= 0, tpbuf
= pbuf
; idx
< count
; idx
++, tpbuf
++) {
1409 import_uuid(&uuid
, (u8
*)tpbuf
->uuid
);
1411 /* Note that if the UUID will be uuid_null, that will require
1412 * ffa_bus_notifier() to find the UUID of this partition id
1413 * with help of ffa_device_match_uuid(). FF-A v1.1 and above
1414 * provides UUID here for each partition as part of the
1415 * discovery API and the same is passed.
1417 ffa_dev
= ffa_device_register(&uuid
, tpbuf
->id
, &ffa_drv_ops
);
1419 pr_err("%s: failed to register partition ID 0x%x\n",
1420 __func__
, tpbuf
->id
);
1424 ffa_dev
->properties
= tpbuf
->properties
;
1426 if (drv_info
->version
> FFA_VERSION_1_0
&&
1427 !(tpbuf
->properties
& FFA_PARTITION_AARCH64_EXEC
))
1428 ffa_mode_32bit_set(ffa_dev
);
1430 info
= kzalloc(sizeof(*info
), GFP_KERNEL
);
1432 ffa_device_unregister(ffa_dev
);
1435 rwlock_init(&info
->rw_lock
);
1436 ret
= xa_insert(&drv_info
->partition_info
, tpbuf
->id
,
1439 pr_err("%s: failed to save partition ID 0x%x - ret:%d\n",
1440 __func__
, tpbuf
->id
, ret
);
1441 ffa_device_unregister(ffa_dev
);
1448 /* Allocate for the host */
1449 info
= kzalloc(sizeof(*info
), GFP_KERNEL
);
1451 /* Already registered devices are freed on bus_exit */
1452 ffa_partitions_cleanup();
1456 rwlock_init(&info
->rw_lock
);
1457 ret
= xa_insert(&drv_info
->partition_info
, drv_info
->vm_id
,
1460 pr_err("%s: failed to save Host partition ID 0x%x - ret:%d. Abort.\n",
1461 __func__
, drv_info
->vm_id
, ret
);
1463 /* Already registered devices are freed on bus_exit */
1464 ffa_partitions_cleanup();
1470 static void ffa_partitions_cleanup(void)
1472 struct ffa_dev_part_info
*info
;
1475 xa_for_each(&drv_info
->partition_info
, idx
, info
) {
1476 xa_erase(&drv_info
->partition_info
, idx
);
1480 xa_destroy(&drv_info
->partition_info
);
1483 /* FFA FEATURE IDs */
1484 #define FFA_FEAT_NOTIFICATION_PENDING_INT (1)
1485 #define FFA_FEAT_SCHEDULE_RECEIVER_INT (2)
1486 #define FFA_FEAT_MANAGED_EXIT_INT (3)
1488 static irqreturn_t
ffa_sched_recv_irq_handler(int irq
, void *irq_data
)
1490 struct ffa_pcpu_irq
*pcpu
= irq_data
;
1491 struct ffa_drv_info
*info
= pcpu
->info
;
1493 queue_work(info
->notif_pcpu_wq
, &info
->sched_recv_irq_work
);
1498 static irqreturn_t
notif_pend_irq_handler(int irq
, void *irq_data
)
1500 struct ffa_pcpu_irq
*pcpu
= irq_data
;
1501 struct ffa_drv_info
*info
= pcpu
->info
;
1503 queue_work_on(smp_processor_id(), info
->notif_pcpu_wq
,
1504 &info
->notif_pcpu_work
);
1509 static void ffa_sched_recv_irq_work_fn(struct work_struct
*work
)
1511 ffa_notification_info_get();
1514 static int ffa_irq_map(u32 id
)
1517 int ret
, irq
, intid
;
1519 if (id
== FFA_FEAT_NOTIFICATION_PENDING_INT
)
1520 err_str
= "Notification Pending Interrupt";
1521 else if (id
== FFA_FEAT_SCHEDULE_RECEIVER_INT
)
1522 err_str
= "Schedule Receiver Interrupt";
1524 err_str
= "Unknown ID";
1526 /* The returned intid is assumed to be SGI donated to NS world */
1527 ret
= ffa_features(id
, 0, &intid
, NULL
);
1529 if (ret
!= -EOPNOTSUPP
)
1530 pr_err("Failed to retrieve FF-A %s %u\n", err_str
, id
);
1534 if (acpi_disabled
) {
1535 struct of_phandle_args oirq
= {};
1536 struct device_node
*gic
;
1538 /* Only GICv3 supported currently with the device tree */
1539 gic
= of_find_compatible_node(NULL
, NULL
, "arm,gic-v3");
1544 oirq
.args_count
= 1;
1545 oirq
.args
[0] = intid
;
1546 irq
= irq_create_of_mapping(&oirq
);
1550 irq
= acpi_register_gsi(NULL
, intid
, ACPI_EDGE_SENSITIVE
,
1556 pr_err("Failed to create IRQ mapping!\n");
1563 static void ffa_irq_unmap(unsigned int irq
)
1567 irq_dispose_mapping(irq
);
1570 static int ffa_cpuhp_pcpu_irq_enable(unsigned int cpu
)
1572 if (drv_info
->sched_recv_irq
)
1573 enable_percpu_irq(drv_info
->sched_recv_irq
, IRQ_TYPE_NONE
);
1574 if (drv_info
->notif_pend_irq
)
1575 enable_percpu_irq(drv_info
->notif_pend_irq
, IRQ_TYPE_NONE
);
1579 static int ffa_cpuhp_pcpu_irq_disable(unsigned int cpu
)
1581 if (drv_info
->sched_recv_irq
)
1582 disable_percpu_irq(drv_info
->sched_recv_irq
);
1583 if (drv_info
->notif_pend_irq
)
1584 disable_percpu_irq(drv_info
->notif_pend_irq
);
1588 static void ffa_uninit_pcpu_irq(void)
1590 if (drv_info
->cpuhp_state
) {
1591 cpuhp_remove_state(drv_info
->cpuhp_state
);
1592 drv_info
->cpuhp_state
= 0;
1595 if (drv_info
->notif_pcpu_wq
) {
1596 destroy_workqueue(drv_info
->notif_pcpu_wq
);
1597 drv_info
->notif_pcpu_wq
= NULL
;
1600 if (drv_info
->sched_recv_irq
)
1601 free_percpu_irq(drv_info
->sched_recv_irq
, drv_info
->irq_pcpu
);
1603 if (drv_info
->notif_pend_irq
)
1604 free_percpu_irq(drv_info
->notif_pend_irq
, drv_info
->irq_pcpu
);
1606 if (drv_info
->irq_pcpu
) {
1607 free_percpu(drv_info
->irq_pcpu
);
1608 drv_info
->irq_pcpu
= NULL
;
1612 static int ffa_init_pcpu_irq(void)
1614 struct ffa_pcpu_irq __percpu
*irq_pcpu
;
1617 irq_pcpu
= alloc_percpu(struct ffa_pcpu_irq
);
1621 for_each_present_cpu(cpu
)
1622 per_cpu_ptr(irq_pcpu
, cpu
)->info
= drv_info
;
1624 drv_info
->irq_pcpu
= irq_pcpu
;
1626 if (drv_info
->sched_recv_irq
) {
1627 ret
= request_percpu_irq(drv_info
->sched_recv_irq
,
1628 ffa_sched_recv_irq_handler
,
1629 "ARM-FFA-SRI", irq_pcpu
);
1631 pr_err("Error registering percpu SRI nIRQ %d : %d\n",
1632 drv_info
->sched_recv_irq
, ret
);
1633 drv_info
->sched_recv_irq
= 0;
1638 if (drv_info
->notif_pend_irq
) {
1639 ret
= request_percpu_irq(drv_info
->notif_pend_irq
,
1640 notif_pend_irq_handler
,
1641 "ARM-FFA-NPI", irq_pcpu
);
1643 pr_err("Error registering percpu NPI nIRQ %d : %d\n",
1644 drv_info
->notif_pend_irq
, ret
);
1645 drv_info
->notif_pend_irq
= 0;
1650 INIT_WORK(&drv_info
->sched_recv_irq_work
, ffa_sched_recv_irq_work_fn
);
1651 INIT_WORK(&drv_info
->notif_pcpu_work
, notif_pcpu_irq_work_fn
);
1652 drv_info
->notif_pcpu_wq
= create_workqueue("ffa_pcpu_irq_notification");
1653 if (!drv_info
->notif_pcpu_wq
)
1656 ret
= cpuhp_setup_state(CPUHP_AP_ONLINE_DYN
, "ffa/pcpu-irq:starting",
1657 ffa_cpuhp_pcpu_irq_enable
,
1658 ffa_cpuhp_pcpu_irq_disable
);
1663 drv_info
->cpuhp_state
= ret
;
1667 static void ffa_notifications_cleanup(void)
1669 ffa_uninit_pcpu_irq();
1670 ffa_irq_unmap(drv_info
->sched_recv_irq
);
1671 drv_info
->sched_recv_irq
= 0;
1672 ffa_irq_unmap(drv_info
->notif_pend_irq
);
1673 drv_info
->notif_pend_irq
= 0;
1675 if (drv_info
->bitmap_created
) {
1676 ffa_notification_bitmap_destroy();
1677 drv_info
->bitmap_created
= false;
1679 drv_info
->notif_enabled
= false;
1682 static void ffa_notifications_setup(void)
1686 ret
= ffa_features(FFA_NOTIFICATION_BITMAP_CREATE
, 0, NULL
, NULL
);
1688 ret
= ffa_notification_bitmap_create();
1690 pr_err("Notification bitmap create error %d\n", ret
);
1694 drv_info
->bitmap_created
= true;
1697 ret
= ffa_irq_map(FFA_FEAT_SCHEDULE_RECEIVER_INT
);
1699 drv_info
->sched_recv_irq
= ret
;
1701 ret
= ffa_irq_map(FFA_FEAT_NOTIFICATION_PENDING_INT
);
1703 drv_info
->notif_pend_irq
= ret
;
1705 if (!drv_info
->sched_recv_irq
&& !drv_info
->notif_pend_irq
)
1708 ret
= ffa_init_pcpu_irq();
1712 hash_init(drv_info
->notifier_hash
);
1713 mutex_init(&drv_info
->notify_lock
);
1715 drv_info
->notif_enabled
= true;
1718 pr_info("Notification setup failed %d, not enabled\n", ret
);
1719 ffa_notifications_cleanup();
1722 static int __init
ffa_init(void)
1726 size_t rxtx_bufsz
= SZ_4K
;
1728 ret
= ffa_transport_init(&invoke_ffa_fn
);
1732 drv_info
= kzalloc(sizeof(*drv_info
), GFP_KERNEL
);
1736 ret
= ffa_version_check(&drv_info
->version
);
1740 if (ffa_id_get(&drv_info
->vm_id
)) {
1741 pr_err("failed to obtain VM id for self\n");
1746 ret
= ffa_features(FFA_FN_NATIVE(RXTX_MAP
), 0, &buf_sz
, NULL
);
1748 if (RXTX_MAP_MIN_BUFSZ(buf_sz
) == 1)
1749 rxtx_bufsz
= SZ_64K
;
1750 else if (RXTX_MAP_MIN_BUFSZ(buf_sz
) == 2)
1751 rxtx_bufsz
= SZ_16K
;
1756 drv_info
->rxtx_bufsz
= rxtx_bufsz
;
1757 drv_info
->rx_buffer
= alloc_pages_exact(rxtx_bufsz
, GFP_KERNEL
);
1758 if (!drv_info
->rx_buffer
) {
1763 drv_info
->tx_buffer
= alloc_pages_exact(rxtx_bufsz
, GFP_KERNEL
);
1764 if (!drv_info
->tx_buffer
) {
1769 ret
= ffa_rxtx_map(virt_to_phys(drv_info
->tx_buffer
),
1770 virt_to_phys(drv_info
->rx_buffer
),
1771 rxtx_bufsz
/ FFA_PAGE_SIZE
);
1773 pr_err("failed to register FFA RxTx buffers\n");
1777 mutex_init(&drv_info
->rx_lock
);
1778 mutex_init(&drv_info
->tx_lock
);
1780 ffa_drvinfo_flags_init();
1782 ffa_notifications_setup();
1784 ret
= ffa_setup_partitions();
1786 pr_err("failed to setup partitions\n");
1787 goto cleanup_notifs
;
1790 ret
= ffa_sched_recv_cb_update(drv_info
->vm_id
, ffa_self_notif_handle
,
1793 pr_info("Failed to register driver sched callback %d\n", ret
);
1798 ffa_notifications_cleanup();
1800 if (drv_info
->tx_buffer
)
1801 free_pages_exact(drv_info
->tx_buffer
, rxtx_bufsz
);
1802 free_pages_exact(drv_info
->rx_buffer
, rxtx_bufsz
);
1807 module_init(ffa_init
);
1809 static void __exit
ffa_exit(void)
1811 ffa_notifications_cleanup();
1812 ffa_partitions_cleanup();
1813 ffa_rxtx_unmap(drv_info
->vm_id
);
1814 free_pages_exact(drv_info
->tx_buffer
, drv_info
->rxtx_bufsz
);
1815 free_pages_exact(drv_info
->rx_buffer
, drv_info
->rxtx_bufsz
);
1818 module_exit(ffa_exit
);
1820 MODULE_ALIAS("arm-ffa");
1821 MODULE_AUTHOR("Sudeep Holla <sudeep.holla@arm.com>");
1822 MODULE_DESCRIPTION("Arm FF-A interface driver");
1823 MODULE_LICENSE("GPL v2");