1 // SPDX-License-Identifier: GPL-2.0-only
3 * Copyright (C) 2013-2017 ARM Limited, All Rights Reserved.
4 * Author: Marc Zyngier <marc.zyngier@arm.com>
7 #include <linux/acpi.h>
8 #include <linux/acpi_iort.h>
9 #include <linux/bitfield.h>
10 #include <linux/bitmap.h>
11 #include <linux/cpu.h>
12 #include <linux/crash_dump.h>
13 #include <linux/delay.h>
14 #include <linux/efi.h>
15 #include <linux/genalloc.h>
16 #include <linux/interrupt.h>
17 #include <linux/iommu.h>
18 #include <linux/iopoll.h>
19 #include <linux/irqdomain.h>
20 #include <linux/list.h>
21 #include <linux/log2.h>
22 #include <linux/mem_encrypt.h>
23 #include <linux/memblock.h>
25 #include <linux/msi.h>
27 #include <linux/of_address.h>
28 #include <linux/of_irq.h>
29 #include <linux/of_pci.h>
30 #include <linux/of_platform.h>
31 #include <linux/percpu.h>
32 #include <linux/set_memory.h>
33 #include <linux/slab.h>
34 #include <linux/syscore_ops.h>
36 #include <linux/irqchip.h>
37 #include <linux/irqchip/arm-gic-v3.h>
38 #include <linux/irqchip/arm-gic-v4.h>
40 #include <asm/cputype.h>
41 #include <asm/exception.h>
43 #include "irq-gic-common.h"
44 #include "irq-msi-lib.h"
46 #define ITS_FLAGS_CMDQ_NEEDS_FLUSHING (1ULL << 0)
47 #define ITS_FLAGS_WORKAROUND_CAVIUM_22375 (1ULL << 1)
48 #define ITS_FLAGS_WORKAROUND_CAVIUM_23144 (1ULL << 2)
49 #define ITS_FLAGS_FORCE_NON_SHAREABLE (1ULL << 3)
50 #define ITS_FLAGS_WORKAROUND_HISILICON_162100801 (1ULL << 4)
52 #define RD_LOCAL_LPI_ENABLED BIT(0)
53 #define RD_LOCAL_PENDTABLE_PREALLOCATED BIT(1)
54 #define RD_LOCAL_MEMRESERVE_DONE BIT(2)
56 static u32 lpi_id_bits
;
59 * We allocate memory for PROPBASE to cover 2 ^ lpi_id_bits LPIs to
60 * deal with (one configuration byte per interrupt). PENDBASE has to
61 * be 64kB aligned (one bit per LPI, plus 8192 bits for SPI/PPI/SGI).
63 #define LPI_NRBITS lpi_id_bits
64 #define LPI_PROPBASE_SZ ALIGN(BIT(LPI_NRBITS), SZ_64K)
65 #define LPI_PENDBASE_SZ ALIGN(BIT(LPI_NRBITS) / 8, SZ_64K)
67 static u8 __ro_after_init lpi_prop_prio
;
68 static struct its_node
*find_4_1_its(void);
71 * Collection structure - just an ID, and a redistributor address to
72 * ping. We use one per CPU as a bag of interrupts assigned to this
75 struct its_collection
{
81 * The ITS_BASER structure - contains memory information, cached
82 * value of BASER register configuration and ITS page size.
94 * The ITS structure - contains most of the infrastructure, with the
95 * top-level MSI domain, the command queue, the collections, and the
96 * list of devices writing to it.
98 * dev_alloc_lock has to be taken for device allocations, while the
99 * spinlock must be taken to parse data structures such as the device
104 struct mutex dev_alloc_lock
;
105 struct list_head entry
;
107 void __iomem
*sgir_base
;
108 phys_addr_t phys_base
;
109 struct its_cmd_block
*cmd_base
;
110 struct its_cmd_block
*cmd_write
;
111 struct its_baser tables
[GITS_BASER_NR_REGS
];
112 struct its_collection
*collections
;
113 struct fwnode_handle
*fwnode_handle
;
114 u64 (*get_msi_base
)(struct its_device
*its_dev
);
119 struct list_head its_device_list
;
121 unsigned long list_nr
;
123 unsigned int msi_domain_flags
;
124 u32 pre_its_base
; /* for Socionext Synquacer */
125 int vlpi_redist_offset
;
128 #define is_v4(its) (!!((its)->typer & GITS_TYPER_VLPIS))
129 #define is_v4_1(its) (!!((its)->typer & GITS_TYPER_VMAPP))
130 #define device_ids(its) (FIELD_GET(GITS_TYPER_DEVBITS, (its)->typer) + 1)
132 #define ITS_ITT_ALIGN SZ_256
134 /* The maximum number of VPEID bits supported by VLPI commands */
135 #define ITS_MAX_VPEID_BITS \
138 if (gic_rdists->has_rvpeid && \
139 gic_rdists->gicd_typer2 & GICD_TYPER2_VIL) \
140 nvpeid = 1 + (gic_rdists->gicd_typer2 & \
145 #define ITS_MAX_VPEID (1 << (ITS_MAX_VPEID_BITS))
147 /* Convert page order to size in bytes */
148 #define PAGE_ORDER_TO_SIZE(o) (PAGE_SIZE << (o))
150 struct event_lpi_map
{
151 unsigned long *lpi_map
;
153 irq_hw_number_t lpi_base
;
155 raw_spinlock_t vlpi_lock
;
157 struct its_vlpi_map
*vlpi_maps
;
162 * The ITS view of a device - belongs to an ITS, owns an interrupt
163 * translation table, and a list of interrupts. If it some of its
164 * LPIs are injected into a guest (GICv4), the event_map.vm field
165 * indicates which one.
168 struct list_head entry
;
169 struct its_node
*its
;
170 struct event_lpi_map event_map
;
180 struct its_device
*dev
;
181 struct its_vpe
**vpes
;
185 struct cpu_lpi_count
{
190 static DEFINE_PER_CPU(struct cpu_lpi_count
, cpu_lpi_count
);
192 static LIST_HEAD(its_nodes
);
193 static DEFINE_RAW_SPINLOCK(its_lock
);
194 static struct rdists
*gic_rdists
;
195 static struct irq_domain
*its_parent
;
197 static unsigned long its_list_map
;
198 static u16 vmovp_seq_num
;
199 static DEFINE_RAW_SPINLOCK(vmovp_lock
);
201 static DEFINE_IDA(its_vpeid_ida
);
203 #define gic_data_rdist() (raw_cpu_ptr(gic_rdists->rdist))
204 #define gic_data_rdist_cpu(cpu) (per_cpu_ptr(gic_rdists->rdist, cpu))
205 #define gic_data_rdist_rd_base() (gic_data_rdist()->rd_base)
206 #define gic_data_rdist_vlpi_base() (gic_data_rdist_rd_base() + SZ_128K)
208 static struct page
*its_alloc_pages_node(int node
, gfp_t gfp
,
214 page
= alloc_pages_node(node
, gfp
, order
);
219 ret
= set_memory_decrypted((unsigned long)page_address(page
),
222 * If set_memory_decrypted() fails then we don't know what state the
223 * page is in, so we can't free it. Instead we leak it.
224 * set_memory_decrypted() will already have WARNed.
232 static struct page
*its_alloc_pages(gfp_t gfp
, unsigned int order
)
234 return its_alloc_pages_node(NUMA_NO_NODE
, gfp
, order
);
237 static void its_free_pages(void *addr
, unsigned int order
)
240 * If the memory cannot be encrypted again then we must leak the pages.
241 * set_memory_encrypted() will already have WARNed.
243 if (set_memory_encrypted((unsigned long)addr
, 1 << order
))
245 free_pages((unsigned long)addr
, order
);
248 static struct gen_pool
*itt_pool
;
250 static void *itt_alloc_pool(int node
, int size
)
255 if (size
>= PAGE_SIZE
) {
256 page
= its_alloc_pages_node(node
, GFP_KERNEL
| __GFP_ZERO
, get_order(size
));
258 return page
? page_address(page
) : NULL
;
262 addr
= gen_pool_alloc(itt_pool
, size
);
266 page
= its_alloc_pages_node(node
, GFP_KERNEL
| __GFP_ZERO
, 0);
270 gen_pool_add(itt_pool
, (unsigned long)page_address(page
), PAGE_SIZE
, node
);
276 static void itt_free_pool(void *addr
, int size
)
281 if (size
>= PAGE_SIZE
) {
282 its_free_pages(addr
, get_order(size
));
286 gen_pool_free(itt_pool
, (unsigned long)addr
, size
);
290 * Skip ITSs that have no vLPIs mapped, unless we're on GICv4.1, as we
291 * always have vSGIs mapped.
293 static bool require_its_list_vmovp(struct its_vm
*vm
, struct its_node
*its
)
295 return (gic_rdists
->has_rvpeid
|| vm
->vlpi_count
[its
->list_nr
]);
298 static bool rdists_support_shareable(void)
300 return !(gic_rdists
->flags
& RDIST_FLAGS_FORCE_NON_SHAREABLE
);
303 static u16
get_its_list(struct its_vm
*vm
)
305 struct its_node
*its
;
306 unsigned long its_list
= 0;
308 list_for_each_entry(its
, &its_nodes
, entry
) {
312 if (require_its_list_vmovp(vm
, its
))
313 __set_bit(its
->list_nr
, &its_list
);
316 return (u16
)its_list
;
319 static inline u32
its_get_event_id(struct irq_data
*d
)
321 struct its_device
*its_dev
= irq_data_get_irq_chip_data(d
);
322 return d
->hwirq
- its_dev
->event_map
.lpi_base
;
325 static struct its_collection
*dev_event_to_col(struct its_device
*its_dev
,
328 struct its_node
*its
= its_dev
->its
;
330 return its
->collections
+ its_dev
->event_map
.col_map
[event
];
333 static struct its_vlpi_map
*dev_event_to_vlpi_map(struct its_device
*its_dev
,
336 if (WARN_ON_ONCE(event
>= its_dev
->event_map
.nr_lpis
))
339 return &its_dev
->event_map
.vlpi_maps
[event
];
342 static struct its_vlpi_map
*get_vlpi_map(struct irq_data
*d
)
344 if (irqd_is_forwarded_to_vcpu(d
)) {
345 struct its_device
*its_dev
= irq_data_get_irq_chip_data(d
);
346 u32 event
= its_get_event_id(d
);
348 return dev_event_to_vlpi_map(its_dev
, event
);
354 static int vpe_to_cpuid_lock(struct its_vpe
*vpe
, unsigned long *flags
)
356 raw_spin_lock_irqsave(&vpe
->vpe_lock
, *flags
);
360 static void vpe_to_cpuid_unlock(struct its_vpe
*vpe
, unsigned long flags
)
362 raw_spin_unlock_irqrestore(&vpe
->vpe_lock
, flags
);
365 static struct irq_chip its_vpe_irq_chip
;
367 static int irq_to_cpuid_lock(struct irq_data
*d
, unsigned long *flags
)
369 struct its_vpe
*vpe
= NULL
;
372 if (d
->chip
== &its_vpe_irq_chip
) {
373 vpe
= irq_data_get_irq_chip_data(d
);
375 struct its_vlpi_map
*map
= get_vlpi_map(d
);
381 cpu
= vpe_to_cpuid_lock(vpe
, flags
);
383 /* Physical LPIs are already locked via the irq_desc lock */
384 struct its_device
*its_dev
= irq_data_get_irq_chip_data(d
);
385 cpu
= its_dev
->event_map
.col_map
[its_get_event_id(d
)];
386 /* Keep GCC quiet... */
393 static void irq_to_cpuid_unlock(struct irq_data
*d
, unsigned long flags
)
395 struct its_vpe
*vpe
= NULL
;
397 if (d
->chip
== &its_vpe_irq_chip
) {
398 vpe
= irq_data_get_irq_chip_data(d
);
400 struct its_vlpi_map
*map
= get_vlpi_map(d
);
406 vpe_to_cpuid_unlock(vpe
, flags
);
409 static struct its_collection
*valid_col(struct its_collection
*col
)
411 if (WARN_ON_ONCE(col
->target_address
& GENMASK_ULL(15, 0)))
417 static struct its_vpe
*valid_vpe(struct its_node
*its
, struct its_vpe
*vpe
)
419 if (valid_col(its
->collections
+ vpe
->col_idx
))
426 * ITS command descriptors - parameters to be encoded in a command
429 struct its_cmd_desc
{
432 struct its_device
*dev
;
437 struct its_device
*dev
;
442 struct its_device
*dev
;
447 struct its_device
*dev
;
452 struct its_collection
*col
;
457 struct its_device
*dev
;
463 struct its_device
*dev
;
464 struct its_collection
*col
;
469 struct its_device
*dev
;
474 struct its_collection
*col
;
483 struct its_collection
*col
;
489 struct its_device
*dev
;
497 struct its_device
*dev
;
504 struct its_collection
*col
;
525 * The ITS command block, which is what the ITS actually parses.
527 struct its_cmd_block
{
530 __le64 raw_cmd_le
[4];
534 #define ITS_CMD_QUEUE_SZ SZ_64K
535 #define ITS_CMD_QUEUE_NR_ENTRIES (ITS_CMD_QUEUE_SZ / sizeof(struct its_cmd_block))
537 typedef struct its_collection
*(*its_cmd_builder_t
)(struct its_node
*,
538 struct its_cmd_block
*,
539 struct its_cmd_desc
*);
541 typedef struct its_vpe
*(*its_cmd_vbuilder_t
)(struct its_node
*,
542 struct its_cmd_block
*,
543 struct its_cmd_desc
*);
545 static void its_mask_encode(u64
*raw_cmd
, u64 val
, int h
, int l
)
547 u64 mask
= GENMASK_ULL(h
, l
);
549 *raw_cmd
|= (val
<< l
) & mask
;
552 static void its_encode_cmd(struct its_cmd_block
*cmd
, u8 cmd_nr
)
554 its_mask_encode(&cmd
->raw_cmd
[0], cmd_nr
, 7, 0);
557 static void its_encode_devid(struct its_cmd_block
*cmd
, u32 devid
)
559 its_mask_encode(&cmd
->raw_cmd
[0], devid
, 63, 32);
562 static void its_encode_event_id(struct its_cmd_block
*cmd
, u32 id
)
564 its_mask_encode(&cmd
->raw_cmd
[1], id
, 31, 0);
567 static void its_encode_phys_id(struct its_cmd_block
*cmd
, u32 phys_id
)
569 its_mask_encode(&cmd
->raw_cmd
[1], phys_id
, 63, 32);
572 static void its_encode_size(struct its_cmd_block
*cmd
, u8 size
)
574 its_mask_encode(&cmd
->raw_cmd
[1], size
, 4, 0);
577 static void its_encode_itt(struct its_cmd_block
*cmd
, u64 itt_addr
)
579 its_mask_encode(&cmd
->raw_cmd
[2], itt_addr
>> 8, 51, 8);
582 static void its_encode_valid(struct its_cmd_block
*cmd
, int valid
)
584 its_mask_encode(&cmd
->raw_cmd
[2], !!valid
, 63, 63);
587 static void its_encode_target(struct its_cmd_block
*cmd
, u64 target_addr
)
589 its_mask_encode(&cmd
->raw_cmd
[2], target_addr
>> 16, 51, 16);
592 static void its_encode_collection(struct its_cmd_block
*cmd
, u16 col
)
594 its_mask_encode(&cmd
->raw_cmd
[2], col
, 15, 0);
597 static void its_encode_vpeid(struct its_cmd_block
*cmd
, u16 vpeid
)
599 its_mask_encode(&cmd
->raw_cmd
[1], vpeid
, 47, 32);
602 static void its_encode_virt_id(struct its_cmd_block
*cmd
, u32 virt_id
)
604 its_mask_encode(&cmd
->raw_cmd
[2], virt_id
, 31, 0);
607 static void its_encode_db_phys_id(struct its_cmd_block
*cmd
, u32 db_phys_id
)
609 its_mask_encode(&cmd
->raw_cmd
[2], db_phys_id
, 63, 32);
612 static void its_encode_db_valid(struct its_cmd_block
*cmd
, bool db_valid
)
614 its_mask_encode(&cmd
->raw_cmd
[2], db_valid
, 0, 0);
617 static void its_encode_seq_num(struct its_cmd_block
*cmd
, u16 seq_num
)
619 its_mask_encode(&cmd
->raw_cmd
[0], seq_num
, 47, 32);
622 static void its_encode_its_list(struct its_cmd_block
*cmd
, u16 its_list
)
624 its_mask_encode(&cmd
->raw_cmd
[1], its_list
, 15, 0);
627 static void its_encode_vpt_addr(struct its_cmd_block
*cmd
, u64 vpt_pa
)
629 its_mask_encode(&cmd
->raw_cmd
[3], vpt_pa
>> 16, 51, 16);
632 static void its_encode_vpt_size(struct its_cmd_block
*cmd
, u8 vpt_size
)
634 its_mask_encode(&cmd
->raw_cmd
[3], vpt_size
, 4, 0);
637 static void its_encode_vconf_addr(struct its_cmd_block
*cmd
, u64 vconf_pa
)
639 its_mask_encode(&cmd
->raw_cmd
[0], vconf_pa
>> 16, 51, 16);
642 static void its_encode_alloc(struct its_cmd_block
*cmd
, bool alloc
)
644 its_mask_encode(&cmd
->raw_cmd
[0], alloc
, 8, 8);
647 static void its_encode_ptz(struct its_cmd_block
*cmd
, bool ptz
)
649 its_mask_encode(&cmd
->raw_cmd
[0], ptz
, 9, 9);
652 static void its_encode_vmapp_default_db(struct its_cmd_block
*cmd
,
655 its_mask_encode(&cmd
->raw_cmd
[1], vpe_db_lpi
, 31, 0);
658 static void its_encode_vmovp_default_db(struct its_cmd_block
*cmd
,
661 its_mask_encode(&cmd
->raw_cmd
[3], vpe_db_lpi
, 31, 0);
664 static void its_encode_db(struct its_cmd_block
*cmd
, bool db
)
666 its_mask_encode(&cmd
->raw_cmd
[2], db
, 63, 63);
669 static void its_encode_sgi_intid(struct its_cmd_block
*cmd
, u8 sgi
)
671 its_mask_encode(&cmd
->raw_cmd
[0], sgi
, 35, 32);
674 static void its_encode_sgi_priority(struct its_cmd_block
*cmd
, u8 prio
)
676 its_mask_encode(&cmd
->raw_cmd
[0], prio
>> 4, 23, 20);
679 static void its_encode_sgi_group(struct its_cmd_block
*cmd
, bool grp
)
681 its_mask_encode(&cmd
->raw_cmd
[0], grp
, 10, 10);
684 static void its_encode_sgi_clear(struct its_cmd_block
*cmd
, bool clr
)
686 its_mask_encode(&cmd
->raw_cmd
[0], clr
, 9, 9);
689 static void its_encode_sgi_enable(struct its_cmd_block
*cmd
, bool en
)
691 its_mask_encode(&cmd
->raw_cmd
[0], en
, 8, 8);
694 static inline void its_fixup_cmd(struct its_cmd_block
*cmd
)
696 /* Let's fixup BE commands */
697 cmd
->raw_cmd_le
[0] = cpu_to_le64(cmd
->raw_cmd
[0]);
698 cmd
->raw_cmd_le
[1] = cpu_to_le64(cmd
->raw_cmd
[1]);
699 cmd
->raw_cmd_le
[2] = cpu_to_le64(cmd
->raw_cmd
[2]);
700 cmd
->raw_cmd_le
[3] = cpu_to_le64(cmd
->raw_cmd
[3]);
703 static struct its_collection
*its_build_mapd_cmd(struct its_node
*its
,
704 struct its_cmd_block
*cmd
,
705 struct its_cmd_desc
*desc
)
707 unsigned long itt_addr
;
708 u8 size
= ilog2(desc
->its_mapd_cmd
.dev
->nr_ites
);
710 itt_addr
= virt_to_phys(desc
->its_mapd_cmd
.dev
->itt
);
712 its_encode_cmd(cmd
, GITS_CMD_MAPD
);
713 its_encode_devid(cmd
, desc
->its_mapd_cmd
.dev
->device_id
);
714 its_encode_size(cmd
, size
- 1);
715 its_encode_itt(cmd
, itt_addr
);
716 its_encode_valid(cmd
, desc
->its_mapd_cmd
.valid
);
723 static struct its_collection
*its_build_mapc_cmd(struct its_node
*its
,
724 struct its_cmd_block
*cmd
,
725 struct its_cmd_desc
*desc
)
727 its_encode_cmd(cmd
, GITS_CMD_MAPC
);
728 its_encode_collection(cmd
, desc
->its_mapc_cmd
.col
->col_id
);
729 its_encode_target(cmd
, desc
->its_mapc_cmd
.col
->target_address
);
730 its_encode_valid(cmd
, desc
->its_mapc_cmd
.valid
);
734 return desc
->its_mapc_cmd
.col
;
737 static struct its_collection
*its_build_mapti_cmd(struct its_node
*its
,
738 struct its_cmd_block
*cmd
,
739 struct its_cmd_desc
*desc
)
741 struct its_collection
*col
;
743 col
= dev_event_to_col(desc
->its_mapti_cmd
.dev
,
744 desc
->its_mapti_cmd
.event_id
);
746 its_encode_cmd(cmd
, GITS_CMD_MAPTI
);
747 its_encode_devid(cmd
, desc
->its_mapti_cmd
.dev
->device_id
);
748 its_encode_event_id(cmd
, desc
->its_mapti_cmd
.event_id
);
749 its_encode_phys_id(cmd
, desc
->its_mapti_cmd
.phys_id
);
750 its_encode_collection(cmd
, col
->col_id
);
754 return valid_col(col
);
757 static struct its_collection
*its_build_movi_cmd(struct its_node
*its
,
758 struct its_cmd_block
*cmd
,
759 struct its_cmd_desc
*desc
)
761 struct its_collection
*col
;
763 col
= dev_event_to_col(desc
->its_movi_cmd
.dev
,
764 desc
->its_movi_cmd
.event_id
);
766 its_encode_cmd(cmd
, GITS_CMD_MOVI
);
767 its_encode_devid(cmd
, desc
->its_movi_cmd
.dev
->device_id
);
768 its_encode_event_id(cmd
, desc
->its_movi_cmd
.event_id
);
769 its_encode_collection(cmd
, desc
->its_movi_cmd
.col
->col_id
);
773 return valid_col(col
);
776 static struct its_collection
*its_build_discard_cmd(struct its_node
*its
,
777 struct its_cmd_block
*cmd
,
778 struct its_cmd_desc
*desc
)
780 struct its_collection
*col
;
782 col
= dev_event_to_col(desc
->its_discard_cmd
.dev
,
783 desc
->its_discard_cmd
.event_id
);
785 its_encode_cmd(cmd
, GITS_CMD_DISCARD
);
786 its_encode_devid(cmd
, desc
->its_discard_cmd
.dev
->device_id
);
787 its_encode_event_id(cmd
, desc
->its_discard_cmd
.event_id
);
791 return valid_col(col
);
794 static struct its_collection
*its_build_inv_cmd(struct its_node
*its
,
795 struct its_cmd_block
*cmd
,
796 struct its_cmd_desc
*desc
)
798 struct its_collection
*col
;
800 col
= dev_event_to_col(desc
->its_inv_cmd
.dev
,
801 desc
->its_inv_cmd
.event_id
);
803 its_encode_cmd(cmd
, GITS_CMD_INV
);
804 its_encode_devid(cmd
, desc
->its_inv_cmd
.dev
->device_id
);
805 its_encode_event_id(cmd
, desc
->its_inv_cmd
.event_id
);
809 return valid_col(col
);
812 static struct its_collection
*its_build_int_cmd(struct its_node
*its
,
813 struct its_cmd_block
*cmd
,
814 struct its_cmd_desc
*desc
)
816 struct its_collection
*col
;
818 col
= dev_event_to_col(desc
->its_int_cmd
.dev
,
819 desc
->its_int_cmd
.event_id
);
821 its_encode_cmd(cmd
, GITS_CMD_INT
);
822 its_encode_devid(cmd
, desc
->its_int_cmd
.dev
->device_id
);
823 its_encode_event_id(cmd
, desc
->its_int_cmd
.event_id
);
827 return valid_col(col
);
830 static struct its_collection
*its_build_clear_cmd(struct its_node
*its
,
831 struct its_cmd_block
*cmd
,
832 struct its_cmd_desc
*desc
)
834 struct its_collection
*col
;
836 col
= dev_event_to_col(desc
->its_clear_cmd
.dev
,
837 desc
->its_clear_cmd
.event_id
);
839 its_encode_cmd(cmd
, GITS_CMD_CLEAR
);
840 its_encode_devid(cmd
, desc
->its_clear_cmd
.dev
->device_id
);
841 its_encode_event_id(cmd
, desc
->its_clear_cmd
.event_id
);
845 return valid_col(col
);
848 static struct its_collection
*its_build_invall_cmd(struct its_node
*its
,
849 struct its_cmd_block
*cmd
,
850 struct its_cmd_desc
*desc
)
852 its_encode_cmd(cmd
, GITS_CMD_INVALL
);
853 its_encode_collection(cmd
, desc
->its_invall_cmd
.col
->col_id
);
857 return desc
->its_invall_cmd
.col
;
860 static struct its_vpe
*its_build_vinvall_cmd(struct its_node
*its
,
861 struct its_cmd_block
*cmd
,
862 struct its_cmd_desc
*desc
)
864 its_encode_cmd(cmd
, GITS_CMD_VINVALL
);
865 its_encode_vpeid(cmd
, desc
->its_vinvall_cmd
.vpe
->vpe_id
);
869 return valid_vpe(its
, desc
->its_vinvall_cmd
.vpe
);
872 static struct its_vpe
*its_build_vmapp_cmd(struct its_node
*its
,
873 struct its_cmd_block
*cmd
,
874 struct its_cmd_desc
*desc
)
876 struct its_vpe
*vpe
= valid_vpe(its
, desc
->its_vmapp_cmd
.vpe
);
877 unsigned long vpt_addr
, vconf_addr
;
881 its_encode_cmd(cmd
, GITS_CMD_VMAPP
);
882 its_encode_vpeid(cmd
, desc
->its_vmapp_cmd
.vpe
->vpe_id
);
883 its_encode_valid(cmd
, desc
->its_vmapp_cmd
.valid
);
885 if (!desc
->its_vmapp_cmd
.valid
) {
886 alloc
= !atomic_dec_return(&desc
->its_vmapp_cmd
.vpe
->vmapp_count
);
888 its_encode_alloc(cmd
, alloc
);
890 * Unmapping a VPE is self-synchronizing on GICv4.1,
891 * no need to issue a VSYNC.
899 vpt_addr
= virt_to_phys(page_address(desc
->its_vmapp_cmd
.vpe
->vpt_page
));
900 target
= desc
->its_vmapp_cmd
.col
->target_address
+ its
->vlpi_redist_offset
;
902 its_encode_target(cmd
, target
);
903 its_encode_vpt_addr(cmd
, vpt_addr
);
904 its_encode_vpt_size(cmd
, LPI_NRBITS
- 1);
906 alloc
= !atomic_fetch_inc(&desc
->its_vmapp_cmd
.vpe
->vmapp_count
);
911 vconf_addr
= virt_to_phys(page_address(desc
->its_vmapp_cmd
.vpe
->its_vm
->vprop_page
));
913 its_encode_alloc(cmd
, alloc
);
916 * GICv4.1 provides a way to get the VLPI state, which needs the vPE
917 * to be unmapped first, and in this case, we may remap the vPE
918 * back while the VPT is not empty. So we can't assume that the
919 * VPT is empty on map. This is why we never advertise PTZ.
921 its_encode_ptz(cmd
, false);
922 its_encode_vconf_addr(cmd
, vconf_addr
);
923 its_encode_vmapp_default_db(cmd
, desc
->its_vmapp_cmd
.vpe
->vpe_db_lpi
);
931 static struct its_vpe
*its_build_vmapti_cmd(struct its_node
*its
,
932 struct its_cmd_block
*cmd
,
933 struct its_cmd_desc
*desc
)
937 if (!is_v4_1(its
) && desc
->its_vmapti_cmd
.db_enabled
)
938 db
= desc
->its_vmapti_cmd
.vpe
->vpe_db_lpi
;
942 its_encode_cmd(cmd
, GITS_CMD_VMAPTI
);
943 its_encode_devid(cmd
, desc
->its_vmapti_cmd
.dev
->device_id
);
944 its_encode_vpeid(cmd
, desc
->its_vmapti_cmd
.vpe
->vpe_id
);
945 its_encode_event_id(cmd
, desc
->its_vmapti_cmd
.event_id
);
946 its_encode_db_phys_id(cmd
, db
);
947 its_encode_virt_id(cmd
, desc
->its_vmapti_cmd
.virt_id
);
951 return valid_vpe(its
, desc
->its_vmapti_cmd
.vpe
);
954 static struct its_vpe
*its_build_vmovi_cmd(struct its_node
*its
,
955 struct its_cmd_block
*cmd
,
956 struct its_cmd_desc
*desc
)
960 if (!is_v4_1(its
) && desc
->its_vmovi_cmd
.db_enabled
)
961 db
= desc
->its_vmovi_cmd
.vpe
->vpe_db_lpi
;
965 its_encode_cmd(cmd
, GITS_CMD_VMOVI
);
966 its_encode_devid(cmd
, desc
->its_vmovi_cmd
.dev
->device_id
);
967 its_encode_vpeid(cmd
, desc
->its_vmovi_cmd
.vpe
->vpe_id
);
968 its_encode_event_id(cmd
, desc
->its_vmovi_cmd
.event_id
);
969 its_encode_db_phys_id(cmd
, db
);
970 its_encode_db_valid(cmd
, true);
974 return valid_vpe(its
, desc
->its_vmovi_cmd
.vpe
);
977 static struct its_vpe
*its_build_vmovp_cmd(struct its_node
*its
,
978 struct its_cmd_block
*cmd
,
979 struct its_cmd_desc
*desc
)
983 target
= desc
->its_vmovp_cmd
.col
->target_address
+ its
->vlpi_redist_offset
;
984 its_encode_cmd(cmd
, GITS_CMD_VMOVP
);
985 its_encode_seq_num(cmd
, desc
->its_vmovp_cmd
.seq_num
);
986 its_encode_its_list(cmd
, desc
->its_vmovp_cmd
.its_list
);
987 its_encode_vpeid(cmd
, desc
->its_vmovp_cmd
.vpe
->vpe_id
);
988 its_encode_target(cmd
, target
);
991 its_encode_db(cmd
, true);
992 its_encode_vmovp_default_db(cmd
, desc
->its_vmovp_cmd
.vpe
->vpe_db_lpi
);
997 return valid_vpe(its
, desc
->its_vmovp_cmd
.vpe
);
1000 static struct its_vpe
*its_build_vinv_cmd(struct its_node
*its
,
1001 struct its_cmd_block
*cmd
,
1002 struct its_cmd_desc
*desc
)
1004 struct its_vlpi_map
*map
;
1006 map
= dev_event_to_vlpi_map(desc
->its_inv_cmd
.dev
,
1007 desc
->its_inv_cmd
.event_id
);
1009 its_encode_cmd(cmd
, GITS_CMD_INV
);
1010 its_encode_devid(cmd
, desc
->its_inv_cmd
.dev
->device_id
);
1011 its_encode_event_id(cmd
, desc
->its_inv_cmd
.event_id
);
1015 return valid_vpe(its
, map
->vpe
);
1018 static struct its_vpe
*its_build_vint_cmd(struct its_node
*its
,
1019 struct its_cmd_block
*cmd
,
1020 struct its_cmd_desc
*desc
)
1022 struct its_vlpi_map
*map
;
1024 map
= dev_event_to_vlpi_map(desc
->its_int_cmd
.dev
,
1025 desc
->its_int_cmd
.event_id
);
1027 its_encode_cmd(cmd
, GITS_CMD_INT
);
1028 its_encode_devid(cmd
, desc
->its_int_cmd
.dev
->device_id
);
1029 its_encode_event_id(cmd
, desc
->its_int_cmd
.event_id
);
1033 return valid_vpe(its
, map
->vpe
);
1036 static struct its_vpe
*its_build_vclear_cmd(struct its_node
*its
,
1037 struct its_cmd_block
*cmd
,
1038 struct its_cmd_desc
*desc
)
1040 struct its_vlpi_map
*map
;
1042 map
= dev_event_to_vlpi_map(desc
->its_clear_cmd
.dev
,
1043 desc
->its_clear_cmd
.event_id
);
1045 its_encode_cmd(cmd
, GITS_CMD_CLEAR
);
1046 its_encode_devid(cmd
, desc
->its_clear_cmd
.dev
->device_id
);
1047 its_encode_event_id(cmd
, desc
->its_clear_cmd
.event_id
);
1051 return valid_vpe(its
, map
->vpe
);
1054 static struct its_vpe
*its_build_invdb_cmd(struct its_node
*its
,
1055 struct its_cmd_block
*cmd
,
1056 struct its_cmd_desc
*desc
)
1058 if (WARN_ON(!is_v4_1(its
)))
1061 its_encode_cmd(cmd
, GITS_CMD_INVDB
);
1062 its_encode_vpeid(cmd
, desc
->its_invdb_cmd
.vpe
->vpe_id
);
1066 return valid_vpe(its
, desc
->its_invdb_cmd
.vpe
);
1069 static struct its_vpe
*its_build_vsgi_cmd(struct its_node
*its
,
1070 struct its_cmd_block
*cmd
,
1071 struct its_cmd_desc
*desc
)
1073 if (WARN_ON(!is_v4_1(its
)))
1076 its_encode_cmd(cmd
, GITS_CMD_VSGI
);
1077 its_encode_vpeid(cmd
, desc
->its_vsgi_cmd
.vpe
->vpe_id
);
1078 its_encode_sgi_intid(cmd
, desc
->its_vsgi_cmd
.sgi
);
1079 its_encode_sgi_priority(cmd
, desc
->its_vsgi_cmd
.priority
);
1080 its_encode_sgi_group(cmd
, desc
->its_vsgi_cmd
.group
);
1081 its_encode_sgi_clear(cmd
, desc
->its_vsgi_cmd
.clear
);
1082 its_encode_sgi_enable(cmd
, desc
->its_vsgi_cmd
.enable
);
1086 return valid_vpe(its
, desc
->its_vsgi_cmd
.vpe
);
1089 static u64
its_cmd_ptr_to_offset(struct its_node
*its
,
1090 struct its_cmd_block
*ptr
)
1092 return (ptr
- its
->cmd_base
) * sizeof(*ptr
);
1095 static int its_queue_full(struct its_node
*its
)
1100 widx
= its
->cmd_write
- its
->cmd_base
;
1101 ridx
= readl_relaxed(its
->base
+ GITS_CREADR
) / sizeof(struct its_cmd_block
);
1103 /* This is incredibly unlikely to happen, unless the ITS locks up. */
1104 if (((widx
+ 1) % ITS_CMD_QUEUE_NR_ENTRIES
) == ridx
)
1110 static struct its_cmd_block
*its_allocate_entry(struct its_node
*its
)
1112 struct its_cmd_block
*cmd
;
1113 u32 count
= 1000000; /* 1s! */
1115 while (its_queue_full(its
)) {
1118 pr_err_ratelimited("ITS queue not draining\n");
1125 cmd
= its
->cmd_write
++;
1127 /* Handle queue wrapping */
1128 if (its
->cmd_write
== (its
->cmd_base
+ ITS_CMD_QUEUE_NR_ENTRIES
))
1129 its
->cmd_write
= its
->cmd_base
;
1132 cmd
->raw_cmd
[0] = 0;
1133 cmd
->raw_cmd
[1] = 0;
1134 cmd
->raw_cmd
[2] = 0;
1135 cmd
->raw_cmd
[3] = 0;
1140 static struct its_cmd_block
*its_post_commands(struct its_node
*its
)
1142 u64 wr
= its_cmd_ptr_to_offset(its
, its
->cmd_write
);
1144 writel_relaxed(wr
, its
->base
+ GITS_CWRITER
);
1146 return its
->cmd_write
;
1149 static void its_flush_cmd(struct its_node
*its
, struct its_cmd_block
*cmd
)
1152 * Make sure the commands written to memory are observable by
1155 if (its
->flags
& ITS_FLAGS_CMDQ_NEEDS_FLUSHING
)
1156 gic_flush_dcache_to_poc(cmd
, sizeof(*cmd
));
1161 static int its_wait_for_range_completion(struct its_node
*its
,
1163 struct its_cmd_block
*to
)
1165 u64 rd_idx
, to_idx
, linear_idx
;
1166 u32 count
= 1000000; /* 1s! */
1168 /* Linearize to_idx if the command set has wrapped around */
1169 to_idx
= its_cmd_ptr_to_offset(its
, to
);
1170 if (to_idx
< prev_idx
)
1171 to_idx
+= ITS_CMD_QUEUE_SZ
;
1173 linear_idx
= prev_idx
;
1178 rd_idx
= readl_relaxed(its
->base
+ GITS_CREADR
);
1181 * Compute the read pointer progress, taking the
1182 * potential wrap-around into account.
1184 delta
= rd_idx
- prev_idx
;
1185 if (rd_idx
< prev_idx
)
1186 delta
+= ITS_CMD_QUEUE_SZ
;
1188 linear_idx
+= delta
;
1189 if (linear_idx
>= to_idx
)
1194 pr_err_ratelimited("ITS queue timeout (%llu %llu)\n",
1195 to_idx
, linear_idx
);
1206 /* Warning, macro hell follows */
1207 #define BUILD_SINGLE_CMD_FUNC(name, buildtype, synctype, buildfn) \
1208 void name(struct its_node *its, \
1209 buildtype builder, \
1210 struct its_cmd_desc *desc) \
1212 struct its_cmd_block *cmd, *sync_cmd, *next_cmd; \
1213 synctype *sync_obj; \
1214 unsigned long flags; \
1217 raw_spin_lock_irqsave(&its->lock, flags); \
1219 cmd = its_allocate_entry(its); \
1220 if (!cmd) { /* We're soooooo screewed... */ \
1221 raw_spin_unlock_irqrestore(&its->lock, flags); \
1224 sync_obj = builder(its, cmd, desc); \
1225 its_flush_cmd(its, cmd); \
1228 sync_cmd = its_allocate_entry(its); \
1232 buildfn(its, sync_cmd, sync_obj); \
1233 its_flush_cmd(its, sync_cmd); \
1237 rd_idx = readl_relaxed(its->base + GITS_CREADR); \
1238 next_cmd = its_post_commands(its); \
1239 raw_spin_unlock_irqrestore(&its->lock, flags); \
1241 if (its_wait_for_range_completion(its, rd_idx, next_cmd)) \
1242 pr_err_ratelimited("ITS cmd %ps failed\n", builder); \
1245 static void its_build_sync_cmd(struct its_node
*its
,
1246 struct its_cmd_block
*sync_cmd
,
1247 struct its_collection
*sync_col
)
1249 its_encode_cmd(sync_cmd
, GITS_CMD_SYNC
);
1250 its_encode_target(sync_cmd
, sync_col
->target_address
);
1252 its_fixup_cmd(sync_cmd
);
1255 static BUILD_SINGLE_CMD_FUNC(its_send_single_command
, its_cmd_builder_t
,
1256 struct its_collection
, its_build_sync_cmd
)
1258 static void its_build_vsync_cmd(struct its_node
*its
,
1259 struct its_cmd_block
*sync_cmd
,
1260 struct its_vpe
*sync_vpe
)
1262 its_encode_cmd(sync_cmd
, GITS_CMD_VSYNC
);
1263 its_encode_vpeid(sync_cmd
, sync_vpe
->vpe_id
);
1265 its_fixup_cmd(sync_cmd
);
1268 static BUILD_SINGLE_CMD_FUNC(its_send_single_vcommand
, its_cmd_vbuilder_t
,
1269 struct its_vpe
, its_build_vsync_cmd
)
1271 static void its_send_int(struct its_device
*dev
, u32 event_id
)
1273 struct its_cmd_desc desc
;
1275 desc
.its_int_cmd
.dev
= dev
;
1276 desc
.its_int_cmd
.event_id
= event_id
;
1278 its_send_single_command(dev
->its
, its_build_int_cmd
, &desc
);
1281 static void its_send_clear(struct its_device
*dev
, u32 event_id
)
1283 struct its_cmd_desc desc
;
1285 desc
.its_clear_cmd
.dev
= dev
;
1286 desc
.its_clear_cmd
.event_id
= event_id
;
1288 its_send_single_command(dev
->its
, its_build_clear_cmd
, &desc
);
1291 static void its_send_inv(struct its_device
*dev
, u32 event_id
)
1293 struct its_cmd_desc desc
;
1295 desc
.its_inv_cmd
.dev
= dev
;
1296 desc
.its_inv_cmd
.event_id
= event_id
;
1298 its_send_single_command(dev
->its
, its_build_inv_cmd
, &desc
);
1301 static void its_send_mapd(struct its_device
*dev
, int valid
)
1303 struct its_cmd_desc desc
;
1305 desc
.its_mapd_cmd
.dev
= dev
;
1306 desc
.its_mapd_cmd
.valid
= !!valid
;
1308 its_send_single_command(dev
->its
, its_build_mapd_cmd
, &desc
);
1311 static void its_send_mapc(struct its_node
*its
, struct its_collection
*col
,
1314 struct its_cmd_desc desc
;
1316 desc
.its_mapc_cmd
.col
= col
;
1317 desc
.its_mapc_cmd
.valid
= !!valid
;
1319 its_send_single_command(its
, its_build_mapc_cmd
, &desc
);
1322 static void its_send_mapti(struct its_device
*dev
, u32 irq_id
, u32 id
)
1324 struct its_cmd_desc desc
;
1326 desc
.its_mapti_cmd
.dev
= dev
;
1327 desc
.its_mapti_cmd
.phys_id
= irq_id
;
1328 desc
.its_mapti_cmd
.event_id
= id
;
1330 its_send_single_command(dev
->its
, its_build_mapti_cmd
, &desc
);
1333 static void its_send_movi(struct its_device
*dev
,
1334 struct its_collection
*col
, u32 id
)
1336 struct its_cmd_desc desc
;
1338 desc
.its_movi_cmd
.dev
= dev
;
1339 desc
.its_movi_cmd
.col
= col
;
1340 desc
.its_movi_cmd
.event_id
= id
;
1342 its_send_single_command(dev
->its
, its_build_movi_cmd
, &desc
);
1345 static void its_send_discard(struct its_device
*dev
, u32 id
)
1347 struct its_cmd_desc desc
;
1349 desc
.its_discard_cmd
.dev
= dev
;
1350 desc
.its_discard_cmd
.event_id
= id
;
1352 its_send_single_command(dev
->its
, its_build_discard_cmd
, &desc
);
1355 static void its_send_invall(struct its_node
*its
, struct its_collection
*col
)
1357 struct its_cmd_desc desc
;
1359 desc
.its_invall_cmd
.col
= col
;
1361 its_send_single_command(its
, its_build_invall_cmd
, &desc
);
1364 static void its_send_vmapti(struct its_device
*dev
, u32 id
)
1366 struct its_vlpi_map
*map
= dev_event_to_vlpi_map(dev
, id
);
1367 struct its_cmd_desc desc
;
1369 desc
.its_vmapti_cmd
.vpe
= map
->vpe
;
1370 desc
.its_vmapti_cmd
.dev
= dev
;
1371 desc
.its_vmapti_cmd
.virt_id
= map
->vintid
;
1372 desc
.its_vmapti_cmd
.event_id
= id
;
1373 desc
.its_vmapti_cmd
.db_enabled
= map
->db_enabled
;
1375 its_send_single_vcommand(dev
->its
, its_build_vmapti_cmd
, &desc
);
1378 static void its_send_vmovi(struct its_device
*dev
, u32 id
)
1380 struct its_vlpi_map
*map
= dev_event_to_vlpi_map(dev
, id
);
1381 struct its_cmd_desc desc
;
1383 desc
.its_vmovi_cmd
.vpe
= map
->vpe
;
1384 desc
.its_vmovi_cmd
.dev
= dev
;
1385 desc
.its_vmovi_cmd
.event_id
= id
;
1386 desc
.its_vmovi_cmd
.db_enabled
= map
->db_enabled
;
1388 its_send_single_vcommand(dev
->its
, its_build_vmovi_cmd
, &desc
);
1391 static void its_send_vmapp(struct its_node
*its
,
1392 struct its_vpe
*vpe
, bool valid
)
1394 struct its_cmd_desc desc
;
1396 desc
.its_vmapp_cmd
.vpe
= vpe
;
1397 desc
.its_vmapp_cmd
.valid
= valid
;
1398 desc
.its_vmapp_cmd
.col
= &its
->collections
[vpe
->col_idx
];
1400 its_send_single_vcommand(its
, its_build_vmapp_cmd
, &desc
);
1403 static void its_send_vmovp(struct its_vpe
*vpe
)
1405 struct its_cmd_desc desc
= {};
1406 struct its_node
*its
;
1407 int col_id
= vpe
->col_idx
;
1409 desc
.its_vmovp_cmd
.vpe
= vpe
;
1411 if (!its_list_map
) {
1412 its
= list_first_entry(&its_nodes
, struct its_node
, entry
);
1413 desc
.its_vmovp_cmd
.col
= &its
->collections
[col_id
];
1414 its_send_single_vcommand(its
, its_build_vmovp_cmd
, &desc
);
1419 * Yet another marvel of the architecture. If using the
1420 * its_list "feature", we need to make sure that all ITSs
1421 * receive all VMOVP commands in the same order. The only way
1422 * to guarantee this is to make vmovp a serialization point.
1426 guard(raw_spinlock
)(&vmovp_lock
);
1427 desc
.its_vmovp_cmd
.seq_num
= vmovp_seq_num
++;
1428 desc
.its_vmovp_cmd
.its_list
= get_its_list(vpe
->its_vm
);
1431 list_for_each_entry(its
, &its_nodes
, entry
) {
1435 if (!require_its_list_vmovp(vpe
->its_vm
, its
))
1438 desc
.its_vmovp_cmd
.col
= &its
->collections
[col_id
];
1439 its_send_single_vcommand(its
, its_build_vmovp_cmd
, &desc
);
1443 static void its_send_vinvall(struct its_node
*its
, struct its_vpe
*vpe
)
1445 struct its_cmd_desc desc
;
1447 desc
.its_vinvall_cmd
.vpe
= vpe
;
1448 its_send_single_vcommand(its
, its_build_vinvall_cmd
, &desc
);
1451 static void its_send_vinv(struct its_device
*dev
, u32 event_id
)
1453 struct its_cmd_desc desc
;
1456 * There is no real VINV command. This is just a normal INV,
1457 * with a VSYNC instead of a SYNC.
1459 desc
.its_inv_cmd
.dev
= dev
;
1460 desc
.its_inv_cmd
.event_id
= event_id
;
1462 its_send_single_vcommand(dev
->its
, its_build_vinv_cmd
, &desc
);
1465 static void its_send_vint(struct its_device
*dev
, u32 event_id
)
1467 struct its_cmd_desc desc
;
1470 * There is no real VINT command. This is just a normal INT,
1471 * with a VSYNC instead of a SYNC.
1473 desc
.its_int_cmd
.dev
= dev
;
1474 desc
.its_int_cmd
.event_id
= event_id
;
1476 its_send_single_vcommand(dev
->its
, its_build_vint_cmd
, &desc
);
1479 static void its_send_vclear(struct its_device
*dev
, u32 event_id
)
1481 struct its_cmd_desc desc
;
1484 * There is no real VCLEAR command. This is just a normal CLEAR,
1485 * with a VSYNC instead of a SYNC.
1487 desc
.its_clear_cmd
.dev
= dev
;
1488 desc
.its_clear_cmd
.event_id
= event_id
;
1490 its_send_single_vcommand(dev
->its
, its_build_vclear_cmd
, &desc
);
1493 static void its_send_invdb(struct its_node
*its
, struct its_vpe
*vpe
)
1495 struct its_cmd_desc desc
;
1497 desc
.its_invdb_cmd
.vpe
= vpe
;
1498 its_send_single_vcommand(its
, its_build_invdb_cmd
, &desc
);
1502 * irqchip functions - assumes MSI, mostly.
1504 static void lpi_write_config(struct irq_data
*d
, u8 clr
, u8 set
)
1506 struct its_vlpi_map
*map
= get_vlpi_map(d
);
1507 irq_hw_number_t hwirq
;
1512 va
= page_address(map
->vm
->vprop_page
);
1513 hwirq
= map
->vintid
;
1515 /* Remember the updated property */
1516 map
->properties
&= ~clr
;
1517 map
->properties
|= set
| LPI_PROP_GROUP1
;
1519 va
= gic_rdists
->prop_table_va
;
1523 cfg
= va
+ hwirq
- 8192;
1525 *cfg
|= set
| LPI_PROP_GROUP1
;
1528 * Make the above write visible to the redistributors.
1529 * And yes, we're flushing exactly: One. Single. Byte.
1532 if (gic_rdists
->flags
& RDIST_FLAGS_PROPBASE_NEEDS_FLUSHING
)
1533 gic_flush_dcache_to_poc(cfg
, sizeof(*cfg
));
1538 static void wait_for_syncr(void __iomem
*rdbase
)
1540 while (readl_relaxed(rdbase
+ GICR_SYNCR
) & 1)
1544 static void __direct_lpi_inv(struct irq_data
*d
, u64 val
)
1546 void __iomem
*rdbase
;
1547 unsigned long flags
;
1550 /* Target the redistributor this LPI is currently routed to */
1551 cpu
= irq_to_cpuid_lock(d
, &flags
);
1552 raw_spin_lock(&gic_data_rdist_cpu(cpu
)->rd_lock
);
1554 rdbase
= per_cpu_ptr(gic_rdists
->rdist
, cpu
)->rd_base
;
1555 gic_write_lpir(val
, rdbase
+ GICR_INVLPIR
);
1556 wait_for_syncr(rdbase
);
1558 raw_spin_unlock(&gic_data_rdist_cpu(cpu
)->rd_lock
);
1559 irq_to_cpuid_unlock(d
, flags
);
1562 static void direct_lpi_inv(struct irq_data
*d
)
1564 struct its_vlpi_map
*map
= get_vlpi_map(d
);
1568 struct its_device
*its_dev
= irq_data_get_irq_chip_data(d
);
1570 WARN_ON(!is_v4_1(its_dev
->its
));
1572 val
= GICR_INVLPIR_V
;
1573 val
|= FIELD_PREP(GICR_INVLPIR_VPEID
, map
->vpe
->vpe_id
);
1574 val
|= FIELD_PREP(GICR_INVLPIR_INTID
, map
->vintid
);
1579 __direct_lpi_inv(d
, val
);
1582 static void lpi_update_config(struct irq_data
*d
, u8 clr
, u8 set
)
1584 struct its_device
*its_dev
= irq_data_get_irq_chip_data(d
);
1586 lpi_write_config(d
, clr
, set
);
1587 if (gic_rdists
->has_direct_lpi
&&
1588 (is_v4_1(its_dev
->its
) || !irqd_is_forwarded_to_vcpu(d
)))
1590 else if (!irqd_is_forwarded_to_vcpu(d
))
1591 its_send_inv(its_dev
, its_get_event_id(d
));
1593 its_send_vinv(its_dev
, its_get_event_id(d
));
1596 static void its_vlpi_set_doorbell(struct irq_data
*d
, bool enable
)
1598 struct its_device
*its_dev
= irq_data_get_irq_chip_data(d
);
1599 u32 event
= its_get_event_id(d
);
1600 struct its_vlpi_map
*map
;
1603 * GICv4.1 does away with the per-LPI nonsense, nothing to do
1606 if (is_v4_1(its_dev
->its
))
1609 map
= dev_event_to_vlpi_map(its_dev
, event
);
1611 if (map
->db_enabled
== enable
)
1614 map
->db_enabled
= enable
;
1617 * More fun with the architecture:
1619 * Ideally, we'd issue a VMAPTI to set the doorbell to its LPI
1620 * value or to 1023, depending on the enable bit. But that
1621 * would be issuing a mapping for an /existing/ DevID+EventID
1622 * pair, which is UNPREDICTABLE. Instead, let's issue a VMOVI
1623 * to the /same/ vPE, using this opportunity to adjust the
1624 * doorbell. Mouahahahaha. We loves it, Precious.
1626 its_send_vmovi(its_dev
, event
);
1629 static void its_mask_irq(struct irq_data
*d
)
1631 if (irqd_is_forwarded_to_vcpu(d
))
1632 its_vlpi_set_doorbell(d
, false);
1634 lpi_update_config(d
, LPI_PROP_ENABLED
, 0);
1637 static void its_unmask_irq(struct irq_data
*d
)
1639 if (irqd_is_forwarded_to_vcpu(d
))
1640 its_vlpi_set_doorbell(d
, true);
1642 lpi_update_config(d
, 0, LPI_PROP_ENABLED
);
1645 static __maybe_unused u32
its_read_lpi_count(struct irq_data
*d
, int cpu
)
1647 if (irqd_affinity_is_managed(d
))
1648 return atomic_read(&per_cpu_ptr(&cpu_lpi_count
, cpu
)->managed
);
1650 return atomic_read(&per_cpu_ptr(&cpu_lpi_count
, cpu
)->unmanaged
);
1653 static void its_inc_lpi_count(struct irq_data
*d
, int cpu
)
1655 if (irqd_affinity_is_managed(d
))
1656 atomic_inc(&per_cpu_ptr(&cpu_lpi_count
, cpu
)->managed
);
1658 atomic_inc(&per_cpu_ptr(&cpu_lpi_count
, cpu
)->unmanaged
);
1661 static void its_dec_lpi_count(struct irq_data
*d
, int cpu
)
1663 if (irqd_affinity_is_managed(d
))
1664 atomic_dec(&per_cpu_ptr(&cpu_lpi_count
, cpu
)->managed
);
1666 atomic_dec(&per_cpu_ptr(&cpu_lpi_count
, cpu
)->unmanaged
);
1669 static unsigned int cpumask_pick_least_loaded(struct irq_data
*d
,
1670 const struct cpumask
*cpu_mask
)
1672 unsigned int cpu
= nr_cpu_ids
, tmp
;
1673 int count
= S32_MAX
;
1675 for_each_cpu(tmp
, cpu_mask
) {
1676 int this_count
= its_read_lpi_count(d
, tmp
);
1677 if (this_count
< count
) {
1687 * As suggested by Thomas Gleixner in:
1688 * https://lore.kernel.org/r/87h80q2aoc.fsf@nanos.tec.linutronix.de
1690 static int its_select_cpu(struct irq_data
*d
,
1691 const struct cpumask
*aff_mask
)
1693 struct its_device
*its_dev
= irq_data_get_irq_chip_data(d
);
1694 static DEFINE_RAW_SPINLOCK(tmpmask_lock
);
1695 static struct cpumask __tmpmask
;
1696 struct cpumask
*tmpmask
;
1697 unsigned long flags
;
1699 node
= its_dev
->its
->numa_node
;
1700 tmpmask
= &__tmpmask
;
1702 raw_spin_lock_irqsave(&tmpmask_lock
, flags
);
1704 if (!irqd_affinity_is_managed(d
)) {
1705 /* First try the NUMA node */
1706 if (node
!= NUMA_NO_NODE
) {
1708 * Try the intersection of the affinity mask and the
1709 * node mask (and the online mask, just to be safe).
1711 cpumask_and(tmpmask
, cpumask_of_node(node
), aff_mask
);
1712 cpumask_and(tmpmask
, tmpmask
, cpu_online_mask
);
1715 * Ideally, we would check if the mask is empty, and
1716 * try again on the full node here.
1718 * But it turns out that the way ACPI describes the
1719 * affinity for ITSs only deals about memory, and
1720 * not target CPUs, so it cannot describe a single
1721 * ITS placed next to two NUMA nodes.
1723 * Instead, just fallback on the online mask. This
1724 * diverges from Thomas' suggestion above.
1726 cpu
= cpumask_pick_least_loaded(d
, tmpmask
);
1727 if (cpu
< nr_cpu_ids
)
1730 /* If we can't cross sockets, give up */
1731 if ((its_dev
->its
->flags
& ITS_FLAGS_WORKAROUND_CAVIUM_23144
))
1734 /* If the above failed, expand the search */
1737 /* Try the intersection of the affinity and online masks */
1738 cpumask_and(tmpmask
, aff_mask
, cpu_online_mask
);
1740 /* If that doesn't fly, the online mask is the last resort */
1741 if (cpumask_empty(tmpmask
))
1742 cpumask_copy(tmpmask
, cpu_online_mask
);
1744 cpu
= cpumask_pick_least_loaded(d
, tmpmask
);
1746 cpumask_copy(tmpmask
, aff_mask
);
1748 /* If we cannot cross sockets, limit the search to that node */
1749 if ((its_dev
->its
->flags
& ITS_FLAGS_WORKAROUND_CAVIUM_23144
) &&
1750 node
!= NUMA_NO_NODE
)
1751 cpumask_and(tmpmask
, tmpmask
, cpumask_of_node(node
));
1753 cpu
= cpumask_pick_least_loaded(d
, tmpmask
);
1756 raw_spin_unlock_irqrestore(&tmpmask_lock
, flags
);
1758 pr_debug("IRQ%d -> %*pbl CPU%d\n", d
->irq
, cpumask_pr_args(aff_mask
), cpu
);
1762 static int its_set_affinity(struct irq_data
*d
, const struct cpumask
*mask_val
,
1765 struct its_device
*its_dev
= irq_data_get_irq_chip_data(d
);
1766 struct its_collection
*target_col
;
1767 u32 id
= its_get_event_id(d
);
1770 /* A forwarded interrupt should use irq_set_vcpu_affinity */
1771 if (irqd_is_forwarded_to_vcpu(d
))
1774 prev_cpu
= its_dev
->event_map
.col_map
[id
];
1775 its_dec_lpi_count(d
, prev_cpu
);
1778 cpu
= its_select_cpu(d
, mask_val
);
1780 cpu
= cpumask_pick_least_loaded(d
, mask_val
);
1782 if (cpu
< 0 || cpu
>= nr_cpu_ids
)
1785 /* don't set the affinity when the target cpu is same as current one */
1786 if (cpu
!= prev_cpu
) {
1787 target_col
= &its_dev
->its
->collections
[cpu
];
1788 its_send_movi(its_dev
, target_col
, id
);
1789 its_dev
->event_map
.col_map
[id
] = cpu
;
1790 irq_data_update_effective_affinity(d
, cpumask_of(cpu
));
1793 its_inc_lpi_count(d
, cpu
);
1795 return IRQ_SET_MASK_OK_DONE
;
1798 its_inc_lpi_count(d
, prev_cpu
);
1802 static u64
its_irq_get_msi_base(struct its_device
*its_dev
)
1804 struct its_node
*its
= its_dev
->its
;
1806 return its
->phys_base
+ GITS_TRANSLATER
;
1809 static void its_irq_compose_msi_msg(struct irq_data
*d
, struct msi_msg
*msg
)
1811 struct its_device
*its_dev
= irq_data_get_irq_chip_data(d
);
1812 struct its_node
*its
;
1816 addr
= its
->get_msi_base(its_dev
);
1818 msg
->address_lo
= lower_32_bits(addr
);
1819 msg
->address_hi
= upper_32_bits(addr
);
1820 msg
->data
= its_get_event_id(d
);
1822 iommu_dma_compose_msi_msg(irq_data_get_msi_desc(d
), msg
);
1825 static int its_irq_set_irqchip_state(struct irq_data
*d
,
1826 enum irqchip_irq_state which
,
1829 struct its_device
*its_dev
= irq_data_get_irq_chip_data(d
);
1830 u32 event
= its_get_event_id(d
);
1832 if (which
!= IRQCHIP_STATE_PENDING
)
1835 if (irqd_is_forwarded_to_vcpu(d
)) {
1837 its_send_vint(its_dev
, event
);
1839 its_send_vclear(its_dev
, event
);
1842 its_send_int(its_dev
, event
);
1844 its_send_clear(its_dev
, event
);
1850 static int its_irq_retrigger(struct irq_data
*d
)
1852 return !its_irq_set_irqchip_state(d
, IRQCHIP_STATE_PENDING
, true);
1856 * Two favourable cases:
1858 * (a) Either we have a GICv4.1, and all vPEs have to be mapped at all times
1861 * (b) Or the ITSs do not use a list map, meaning that VMOVP is cheap enough
1862 * and we're better off mapping all VPEs always
1864 * If neither (a) nor (b) is true, then we map vPEs on demand.
1867 static bool gic_requires_eager_mapping(void)
1869 if (!its_list_map
|| gic_rdists
->has_rvpeid
)
1875 static void its_map_vm(struct its_node
*its
, struct its_vm
*vm
)
1877 if (gic_requires_eager_mapping())
1880 guard(raw_spinlock_irqsave
)(&vm
->vmapp_lock
);
1883 * If the VM wasn't mapped yet, iterate over the vpes and get
1886 vm
->vlpi_count
[its
->list_nr
]++;
1888 if (vm
->vlpi_count
[its
->list_nr
] == 1) {
1891 for (i
= 0; i
< vm
->nr_vpes
; i
++) {
1892 struct its_vpe
*vpe
= vm
->vpes
[i
];
1894 scoped_guard(raw_spinlock
, &vpe
->vpe_lock
)
1895 its_send_vmapp(its
, vpe
, true);
1897 its_send_vinvall(its
, vpe
);
1902 static void its_unmap_vm(struct its_node
*its
, struct its_vm
*vm
)
1904 /* Not using the ITS list? Everything is always mapped. */
1905 if (gic_requires_eager_mapping())
1908 guard(raw_spinlock_irqsave
)(&vm
->vmapp_lock
);
1910 if (!--vm
->vlpi_count
[its
->list_nr
]) {
1913 for (i
= 0; i
< vm
->nr_vpes
; i
++) {
1914 guard(raw_spinlock
)(&vm
->vpes
[i
]->vpe_lock
);
1915 its_send_vmapp(its
, vm
->vpes
[i
], false);
1920 static int its_vlpi_map(struct irq_data
*d
, struct its_cmd_info
*info
)
1922 struct its_device
*its_dev
= irq_data_get_irq_chip_data(d
);
1923 u32 event
= its_get_event_id(d
);
1928 if (!its_dev
->event_map
.vm
) {
1929 struct its_vlpi_map
*maps
;
1931 maps
= kcalloc(its_dev
->event_map
.nr_lpis
, sizeof(*maps
),
1936 its_dev
->event_map
.vm
= info
->map
->vm
;
1937 its_dev
->event_map
.vlpi_maps
= maps
;
1938 } else if (its_dev
->event_map
.vm
!= info
->map
->vm
) {
1942 /* Get our private copy of the mapping information */
1943 its_dev
->event_map
.vlpi_maps
[event
] = *info
->map
;
1945 if (irqd_is_forwarded_to_vcpu(d
)) {
1946 /* Already mapped, move it around */
1947 its_send_vmovi(its_dev
, event
);
1949 /* Ensure all the VPEs are mapped on this ITS */
1950 its_map_vm(its_dev
->its
, info
->map
->vm
);
1953 * Flag the interrupt as forwarded so that we can
1954 * start poking the virtual property table.
1956 irqd_set_forwarded_to_vcpu(d
);
1958 /* Write out the property to the prop table */
1959 lpi_write_config(d
, 0xff, info
->map
->properties
);
1961 /* Drop the physical mapping */
1962 its_send_discard(its_dev
, event
);
1964 /* and install the virtual one */
1965 its_send_vmapti(its_dev
, event
);
1967 /* Increment the number of VLPIs */
1968 its_dev
->event_map
.nr_vlpis
++;
1974 static int its_vlpi_get(struct irq_data
*d
, struct its_cmd_info
*info
)
1976 struct its_device
*its_dev
= irq_data_get_irq_chip_data(d
);
1977 struct its_vlpi_map
*map
;
1979 map
= get_vlpi_map(d
);
1981 if (!its_dev
->event_map
.vm
|| !map
)
1984 /* Copy our mapping information to the incoming request */
1990 static int its_vlpi_unmap(struct irq_data
*d
)
1992 struct its_device
*its_dev
= irq_data_get_irq_chip_data(d
);
1993 u32 event
= its_get_event_id(d
);
1995 if (!its_dev
->event_map
.vm
|| !irqd_is_forwarded_to_vcpu(d
))
1998 /* Drop the virtual mapping */
1999 its_send_discard(its_dev
, event
);
2001 /* and restore the physical one */
2002 irqd_clr_forwarded_to_vcpu(d
);
2003 its_send_mapti(its_dev
, d
->hwirq
, event
);
2004 lpi_update_config(d
, 0xff, (lpi_prop_prio
|
2008 /* Potentially unmap the VM from this ITS */
2009 its_unmap_vm(its_dev
->its
, its_dev
->event_map
.vm
);
2012 * Drop the refcount and make the device available again if
2013 * this was the last VLPI.
2015 if (!--its_dev
->event_map
.nr_vlpis
) {
2016 its_dev
->event_map
.vm
= NULL
;
2017 kfree(its_dev
->event_map
.vlpi_maps
);
2023 static int its_vlpi_prop_update(struct irq_data
*d
, struct its_cmd_info
*info
)
2025 struct its_device
*its_dev
= irq_data_get_irq_chip_data(d
);
2027 if (!its_dev
->event_map
.vm
|| !irqd_is_forwarded_to_vcpu(d
))
2030 if (info
->cmd_type
== PROP_UPDATE_AND_INV_VLPI
)
2031 lpi_update_config(d
, 0xff, info
->config
);
2033 lpi_write_config(d
, 0xff, info
->config
);
2034 its_vlpi_set_doorbell(d
, !!(info
->config
& LPI_PROP_ENABLED
));
2039 static int its_irq_set_vcpu_affinity(struct irq_data
*d
, void *vcpu_info
)
2041 struct its_device
*its_dev
= irq_data_get_irq_chip_data(d
);
2042 struct its_cmd_info
*info
= vcpu_info
;
2045 if (!is_v4(its_dev
->its
))
2048 guard(raw_spinlock_irq
)(&its_dev
->event_map
.vlpi_lock
);
2050 /* Unmap request? */
2052 return its_vlpi_unmap(d
);
2054 switch (info
->cmd_type
) {
2056 return its_vlpi_map(d
, info
);
2059 return its_vlpi_get(d
, info
);
2061 case PROP_UPDATE_VLPI
:
2062 case PROP_UPDATE_AND_INV_VLPI
:
2063 return its_vlpi_prop_update(d
, info
);
2070 static struct irq_chip its_irq_chip
= {
2072 .irq_mask
= its_mask_irq
,
2073 .irq_unmask
= its_unmask_irq
,
2074 .irq_eoi
= irq_chip_eoi_parent
,
2075 .irq_set_affinity
= its_set_affinity
,
2076 .irq_compose_msi_msg
= its_irq_compose_msi_msg
,
2077 .irq_set_irqchip_state
= its_irq_set_irqchip_state
,
2078 .irq_retrigger
= its_irq_retrigger
,
2079 .irq_set_vcpu_affinity
= its_irq_set_vcpu_affinity
,
2084 * How we allocate LPIs:
2086 * lpi_range_list contains ranges of LPIs that are to available to
2087 * allocate from. To allocate LPIs, just pick the first range that
2088 * fits the required allocation, and reduce it by the required
2089 * amount. Once empty, remove the range from the list.
2091 * To free a range of LPIs, add a free range to the list, sort it and
2092 * merge the result if the new range happens to be adjacent to an
2093 * already free block.
2095 * The consequence of the above is that allocation is cost is low, but
2096 * freeing is expensive. We assumes that freeing rarely occurs.
2098 #define ITS_MAX_LPI_NRBITS 16 /* 64K LPIs */
2100 static DEFINE_MUTEX(lpi_range_lock
);
2101 static LIST_HEAD(lpi_range_list
);
2104 struct list_head entry
;
2109 static struct lpi_range
*mk_lpi_range(u32 base
, u32 span
)
2111 struct lpi_range
*range
;
2113 range
= kmalloc(sizeof(*range
), GFP_KERNEL
);
2115 range
->base_id
= base
;
2122 static int alloc_lpi_range(u32 nr_lpis
, u32
*base
)
2124 struct lpi_range
*range
, *tmp
;
2127 mutex_lock(&lpi_range_lock
);
2129 list_for_each_entry_safe(range
, tmp
, &lpi_range_list
, entry
) {
2130 if (range
->span
>= nr_lpis
) {
2131 *base
= range
->base_id
;
2132 range
->base_id
+= nr_lpis
;
2133 range
->span
-= nr_lpis
;
2135 if (range
->span
== 0) {
2136 list_del(&range
->entry
);
2145 mutex_unlock(&lpi_range_lock
);
2147 pr_debug("ITS: alloc %u:%u\n", *base
, nr_lpis
);
2151 static void merge_lpi_ranges(struct lpi_range
*a
, struct lpi_range
*b
)
2153 if (&a
->entry
== &lpi_range_list
|| &b
->entry
== &lpi_range_list
)
2155 if (a
->base_id
+ a
->span
!= b
->base_id
)
2157 b
->base_id
= a
->base_id
;
2159 list_del(&a
->entry
);
2163 static int free_lpi_range(u32 base
, u32 nr_lpis
)
2165 struct lpi_range
*new, *old
;
2167 new = mk_lpi_range(base
, nr_lpis
);
2171 mutex_lock(&lpi_range_lock
);
2173 list_for_each_entry_reverse(old
, &lpi_range_list
, entry
) {
2174 if (old
->base_id
< base
)
2178 * old is the last element with ->base_id smaller than base,
2179 * so new goes right after it. If there are no elements with
2180 * ->base_id smaller than base, &old->entry ends up pointing
2181 * at the head of the list, and inserting new it the start of
2182 * the list is the right thing to do in that case as well.
2184 list_add(&new->entry
, &old
->entry
);
2186 * Now check if we can merge with the preceding and/or
2189 merge_lpi_ranges(old
, new);
2190 merge_lpi_ranges(new, list_next_entry(new, entry
));
2192 mutex_unlock(&lpi_range_lock
);
2196 static int __init
its_lpi_init(u32 id_bits
)
2198 u32 lpis
= (1UL << id_bits
) - 8192;
2202 numlpis
= 1UL << GICD_TYPER_NUM_LPIS(gic_rdists
->gicd_typer
);
2204 if (numlpis
> 2 && !WARN_ON(numlpis
> lpis
)) {
2206 pr_info("ITS: Using hypervisor restricted LPI range [%u]\n",
2211 * Initializing the allocator is just the same as freeing the
2212 * full range of LPIs.
2214 err
= free_lpi_range(8192, lpis
);
2215 pr_debug("ITS: Allocator initialized for %u LPIs\n", lpis
);
2219 static unsigned long *its_lpi_alloc(int nr_irqs
, u32
*base
, int *nr_ids
)
2221 unsigned long *bitmap
= NULL
;
2225 err
= alloc_lpi_range(nr_irqs
, base
);
2230 } while (nr_irqs
> 0);
2238 bitmap
= bitmap_zalloc(nr_irqs
, GFP_ATOMIC
);
2246 *base
= *nr_ids
= 0;
2251 static void its_lpi_free(unsigned long *bitmap
, u32 base
, u32 nr_ids
)
2253 WARN_ON(free_lpi_range(base
, nr_ids
));
2254 bitmap_free(bitmap
);
2257 static void gic_reset_prop_table(void *va
)
2259 /* Regular IRQ priority, Group-1, disabled */
2260 memset(va
, lpi_prop_prio
| LPI_PROP_GROUP1
, LPI_PROPBASE_SZ
);
2262 /* Make sure the GIC will observe the written configuration */
2263 gic_flush_dcache_to_poc(va
, LPI_PROPBASE_SZ
);
2266 static struct page
*its_allocate_prop_table(gfp_t gfp_flags
)
2268 struct page
*prop_page
;
2270 prop_page
= its_alloc_pages(gfp_flags
,
2271 get_order(LPI_PROPBASE_SZ
));
2275 gic_reset_prop_table(page_address(prop_page
));
2280 static void its_free_prop_table(struct page
*prop_page
)
2282 its_free_pages(page_address(prop_page
), get_order(LPI_PROPBASE_SZ
));
2285 static bool gic_check_reserved_range(phys_addr_t addr
, unsigned long size
)
2287 phys_addr_t start
, end
, addr_end
;
2291 * We don't bother checking for a kdump kernel as by
2292 * construction, the LPI tables are out of this kernel's
2295 if (is_kdump_kernel())
2298 addr_end
= addr
+ size
- 1;
2300 for_each_reserved_mem_range(i
, &start
, &end
) {
2301 if (addr
>= start
&& addr_end
<= end
)
2305 /* Not found, not a good sign... */
2306 pr_warn("GICv3: Expected reserved range [%pa:%pa], not found\n",
2308 add_taint(TAINT_CRAP
, LOCKDEP_STILL_OK
);
2312 static int gic_reserve_range(phys_addr_t addr
, unsigned long size
)
2314 if (efi_enabled(EFI_CONFIG_TABLES
))
2315 return efi_mem_reserve_persistent(addr
, size
);
2320 static int __init
its_setup_lpi_prop_table(void)
2322 if (gic_rdists
->flags
& RDIST_FLAGS_RD_TABLES_PREALLOCATED
) {
2325 val
= gicr_read_propbaser(gic_data_rdist_rd_base() + GICR_PROPBASER
);
2326 lpi_id_bits
= (val
& GICR_PROPBASER_IDBITS_MASK
) + 1;
2328 gic_rdists
->prop_table_pa
= val
& GENMASK_ULL(51, 12);
2329 gic_rdists
->prop_table_va
= memremap(gic_rdists
->prop_table_pa
,
2332 gic_reset_prop_table(gic_rdists
->prop_table_va
);
2336 lpi_id_bits
= min_t(u32
,
2337 GICD_TYPER_ID_BITS(gic_rdists
->gicd_typer
),
2338 ITS_MAX_LPI_NRBITS
);
2339 page
= its_allocate_prop_table(GFP_NOWAIT
);
2341 pr_err("Failed to allocate PROPBASE\n");
2345 gic_rdists
->prop_table_pa
= page_to_phys(page
);
2346 gic_rdists
->prop_table_va
= page_address(page
);
2347 WARN_ON(gic_reserve_range(gic_rdists
->prop_table_pa
,
2351 pr_info("GICv3: using LPI property table @%pa\n",
2352 &gic_rdists
->prop_table_pa
);
2354 return its_lpi_init(lpi_id_bits
);
2357 static const char *its_base_type_string
[] = {
2358 [GITS_BASER_TYPE_DEVICE
] = "Devices",
2359 [GITS_BASER_TYPE_VCPU
] = "Virtual CPUs",
2360 [GITS_BASER_TYPE_RESERVED3
] = "Reserved (3)",
2361 [GITS_BASER_TYPE_COLLECTION
] = "Interrupt Collections",
2362 [GITS_BASER_TYPE_RESERVED5
] = "Reserved (5)",
2363 [GITS_BASER_TYPE_RESERVED6
] = "Reserved (6)",
2364 [GITS_BASER_TYPE_RESERVED7
] = "Reserved (7)",
2367 static u64
its_read_baser(struct its_node
*its
, struct its_baser
*baser
)
2369 u32 idx
= baser
- its
->tables
;
2371 return gits_read_baser(its
->base
+ GITS_BASER
+ (idx
<< 3));
2374 static void its_write_baser(struct its_node
*its
, struct its_baser
*baser
,
2377 u32 idx
= baser
- its
->tables
;
2379 gits_write_baser(val
, its
->base
+ GITS_BASER
+ (idx
<< 3));
2380 baser
->val
= its_read_baser(its
, baser
);
2383 static int its_setup_baser(struct its_node
*its
, struct its_baser
*baser
,
2384 u64 cache
, u64 shr
, u32 order
, bool indirect
)
2386 u64 val
= its_read_baser(its
, baser
);
2387 u64 esz
= GITS_BASER_ENTRY_SIZE(val
);
2388 u64 type
= GITS_BASER_TYPE(val
);
2389 u64 baser_phys
, tmp
;
2390 u32 alloc_pages
, psz
;
2395 alloc_pages
= (PAGE_ORDER_TO_SIZE(order
) / psz
);
2396 if (alloc_pages
> GITS_BASER_PAGES_MAX
) {
2397 pr_warn("ITS@%pa: %s too large, reduce ITS pages %u->%u\n",
2398 &its
->phys_base
, its_base_type_string
[type
],
2399 alloc_pages
, GITS_BASER_PAGES_MAX
);
2400 alloc_pages
= GITS_BASER_PAGES_MAX
;
2401 order
= get_order(GITS_BASER_PAGES_MAX
* psz
);
2404 page
= its_alloc_pages_node(its
->numa_node
, GFP_KERNEL
| __GFP_ZERO
, order
);
2408 base
= (void *)page_address(page
);
2409 baser_phys
= virt_to_phys(base
);
2411 /* Check if the physical address of the memory is above 48bits */
2412 if (IS_ENABLED(CONFIG_ARM64_64K_PAGES
) && (baser_phys
>> 48)) {
2414 /* 52bit PA is supported only when PageSize=64K */
2415 if (psz
!= SZ_64K
) {
2416 pr_err("ITS: no 52bit PA support when psz=%d\n", psz
);
2417 its_free_pages(base
, order
);
2421 /* Convert 52bit PA to 48bit field */
2422 baser_phys
= GITS_BASER_PHYS_52_to_48(baser_phys
);
2427 (type
<< GITS_BASER_TYPE_SHIFT
) |
2428 ((esz
- 1) << GITS_BASER_ENTRY_SIZE_SHIFT
) |
2429 ((alloc_pages
- 1) << GITS_BASER_PAGES_SHIFT
) |
2434 val
|= indirect
? GITS_BASER_INDIRECT
: 0x0;
2438 val
|= GITS_BASER_PAGE_SIZE_4K
;
2441 val
|= GITS_BASER_PAGE_SIZE_16K
;
2444 val
|= GITS_BASER_PAGE_SIZE_64K
;
2449 gic_flush_dcache_to_poc(base
, PAGE_ORDER_TO_SIZE(order
));
2451 its_write_baser(its
, baser
, val
);
2454 if ((val
^ tmp
) & GITS_BASER_SHAREABILITY_MASK
) {
2456 * Shareability didn't stick. Just use
2457 * whatever the read reported, which is likely
2458 * to be the only thing this redistributor
2459 * supports. If that's zero, make it
2460 * non-cacheable as well.
2462 shr
= tmp
& GITS_BASER_SHAREABILITY_MASK
;
2464 cache
= GITS_BASER_nC
;
2470 pr_err("ITS@%pa: %s doesn't stick: %llx %llx\n",
2471 &its
->phys_base
, its_base_type_string
[type
],
2473 its_free_pages(base
, order
);
2477 baser
->order
= order
;
2480 tmp
= indirect
? GITS_LVL1_ENTRY_SIZE
: esz
;
2482 pr_info("ITS@%pa: allocated %d %s @%lx (%s, esz %d, psz %dK, shr %d)\n",
2483 &its
->phys_base
, (int)(PAGE_ORDER_TO_SIZE(order
) / (int)tmp
),
2484 its_base_type_string
[type
],
2485 (unsigned long)virt_to_phys(base
),
2486 indirect
? "indirect" : "flat", (int)esz
,
2487 psz
/ SZ_1K
, (int)shr
>> GITS_BASER_SHAREABILITY_SHIFT
);
2492 static bool its_parse_indirect_baser(struct its_node
*its
,
2493 struct its_baser
*baser
,
2494 u32
*order
, u32 ids
)
2496 u64 tmp
= its_read_baser(its
, baser
);
2497 u64 type
= GITS_BASER_TYPE(tmp
);
2498 u64 esz
= GITS_BASER_ENTRY_SIZE(tmp
);
2499 u64 val
= GITS_BASER_InnerShareable
| GITS_BASER_RaWaWb
;
2500 u32 new_order
= *order
;
2501 u32 psz
= baser
->psz
;
2502 bool indirect
= false;
2504 /* No need to enable Indirection if memory requirement < (psz*2)bytes */
2505 if ((esz
<< ids
) > (psz
* 2)) {
2507 * Find out whether hw supports a single or two-level table by
2508 * table by reading bit at offset '62' after writing '1' to it.
2510 its_write_baser(its
, baser
, val
| GITS_BASER_INDIRECT
);
2511 indirect
= !!(baser
->val
& GITS_BASER_INDIRECT
);
2515 * The size of the lvl2 table is equal to ITS page size
2516 * which is 'psz'. For computing lvl1 table size,
2517 * subtract ID bits that sparse lvl2 table from 'ids'
2518 * which is reported by ITS hardware times lvl1 table
2521 ids
-= ilog2(psz
/ (int)esz
);
2522 esz
= GITS_LVL1_ENTRY_SIZE
;
2527 * Allocate as many entries as required to fit the
2528 * range of device IDs that the ITS can grok... The ID
2529 * space being incredibly sparse, this results in a
2530 * massive waste of memory if two-level device table
2531 * feature is not supported by hardware.
2533 new_order
= max_t(u32
, get_order(esz
<< ids
), new_order
);
2534 if (new_order
> MAX_PAGE_ORDER
) {
2535 new_order
= MAX_PAGE_ORDER
;
2536 ids
= ilog2(PAGE_ORDER_TO_SIZE(new_order
) / (int)esz
);
2537 pr_warn("ITS@%pa: %s Table too large, reduce ids %llu->%u\n",
2538 &its
->phys_base
, its_base_type_string
[type
],
2539 device_ids(its
), ids
);
2547 static u32
compute_common_aff(u64 val
)
2551 aff
= FIELD_GET(GICR_TYPER_AFFINITY
, val
);
2552 clpiaff
= FIELD_GET(GICR_TYPER_COMMON_LPI_AFF
, val
);
2554 return aff
& ~(GENMASK(31, 0) >> (clpiaff
* 8));
2557 static u32
compute_its_aff(struct its_node
*its
)
2563 * Reencode the ITS SVPET and MPIDR as a GICR_TYPER, and compute
2564 * the resulting affinity. We then use that to see if this match
2567 svpet
= FIELD_GET(GITS_TYPER_SVPET
, its
->typer
);
2568 val
= FIELD_PREP(GICR_TYPER_COMMON_LPI_AFF
, svpet
);
2569 val
|= FIELD_PREP(GICR_TYPER_AFFINITY
, its
->mpidr
);
2570 return compute_common_aff(val
);
2573 static struct its_node
*find_sibling_its(struct its_node
*cur_its
)
2575 struct its_node
*its
;
2578 if (!FIELD_GET(GITS_TYPER_SVPET
, cur_its
->typer
))
2581 aff
= compute_its_aff(cur_its
);
2583 list_for_each_entry(its
, &its_nodes
, entry
) {
2586 if (!is_v4_1(its
) || its
== cur_its
)
2589 if (!FIELD_GET(GITS_TYPER_SVPET
, its
->typer
))
2592 if (aff
!= compute_its_aff(its
))
2595 /* GICv4.1 guarantees that the vPE table is GITS_BASER2 */
2596 baser
= its
->tables
[2].val
;
2597 if (!(baser
& GITS_BASER_VALID
))
2606 static void its_free_tables(struct its_node
*its
)
2610 for (i
= 0; i
< GITS_BASER_NR_REGS
; i
++) {
2611 if (its
->tables
[i
].base
) {
2612 its_free_pages(its
->tables
[i
].base
, its
->tables
[i
].order
);
2613 its
->tables
[i
].base
= NULL
;
2618 static int its_probe_baser_psz(struct its_node
*its
, struct its_baser
*baser
)
2625 val
= its_read_baser(its
, baser
);
2626 val
&= ~GITS_BASER_PAGE_SIZE_MASK
;
2630 gpsz
= GITS_BASER_PAGE_SIZE_64K
;
2633 gpsz
= GITS_BASER_PAGE_SIZE_16K
;
2637 gpsz
= GITS_BASER_PAGE_SIZE_4K
;
2641 gpsz
>>= GITS_BASER_PAGE_SIZE_SHIFT
;
2643 val
|= FIELD_PREP(GITS_BASER_PAGE_SIZE_MASK
, gpsz
);
2644 its_write_baser(its
, baser
, val
);
2646 if (FIELD_GET(GITS_BASER_PAGE_SIZE_MASK
, baser
->val
) == gpsz
)
2666 static int its_alloc_tables(struct its_node
*its
)
2668 u64 shr
= GITS_BASER_InnerShareable
;
2669 u64 cache
= GITS_BASER_RaWaWb
;
2672 if (its
->flags
& ITS_FLAGS_WORKAROUND_CAVIUM_22375
)
2673 /* erratum 24313: ignore memory access type */
2674 cache
= GITS_BASER_nCnB
;
2676 if (its
->flags
& ITS_FLAGS_FORCE_NON_SHAREABLE
) {
2677 cache
= GITS_BASER_nC
;
2681 for (i
= 0; i
< GITS_BASER_NR_REGS
; i
++) {
2682 struct its_baser
*baser
= its
->tables
+ i
;
2683 u64 val
= its_read_baser(its
, baser
);
2684 u64 type
= GITS_BASER_TYPE(val
);
2685 bool indirect
= false;
2688 if (type
== GITS_BASER_TYPE_NONE
)
2691 if (its_probe_baser_psz(its
, baser
)) {
2692 its_free_tables(its
);
2696 order
= get_order(baser
->psz
);
2699 case GITS_BASER_TYPE_DEVICE
:
2700 indirect
= its_parse_indirect_baser(its
, baser
, &order
,
2704 case GITS_BASER_TYPE_VCPU
:
2706 struct its_node
*sibling
;
2709 if ((sibling
= find_sibling_its(its
))) {
2710 *baser
= sibling
->tables
[2];
2711 its_write_baser(its
, baser
, baser
->val
);
2716 indirect
= its_parse_indirect_baser(its
, baser
, &order
,
2717 ITS_MAX_VPEID_BITS
);
2721 err
= its_setup_baser(its
, baser
, cache
, shr
, order
, indirect
);
2723 its_free_tables(its
);
2727 /* Update settings which will be used for next BASERn */
2728 cache
= baser
->val
& GITS_BASER_CACHEABILITY_MASK
;
2729 shr
= baser
->val
& GITS_BASER_SHAREABILITY_MASK
;
2735 static u64
inherit_vpe_l1_table_from_its(void)
2737 struct its_node
*its
;
2741 val
= gic_read_typer(gic_data_rdist_rd_base() + GICR_TYPER
);
2742 aff
= compute_common_aff(val
);
2744 list_for_each_entry(its
, &its_nodes
, entry
) {
2750 if (!FIELD_GET(GITS_TYPER_SVPET
, its
->typer
))
2753 if (aff
!= compute_its_aff(its
))
2756 /* GICv4.1 guarantees that the vPE table is GITS_BASER2 */
2757 baser
= its
->tables
[2].val
;
2758 if (!(baser
& GITS_BASER_VALID
))
2761 /* We have a winner! */
2762 gic_data_rdist()->vpe_l1_base
= its
->tables
[2].base
;
2764 val
= GICR_VPROPBASER_4_1_VALID
;
2765 if (baser
& GITS_BASER_INDIRECT
)
2766 val
|= GICR_VPROPBASER_4_1_INDIRECT
;
2767 val
|= FIELD_PREP(GICR_VPROPBASER_4_1_PAGE_SIZE
,
2768 FIELD_GET(GITS_BASER_PAGE_SIZE_MASK
, baser
));
2769 switch (FIELD_GET(GITS_BASER_PAGE_SIZE_MASK
, baser
)) {
2770 case GIC_PAGE_SIZE_64K
:
2771 addr
= GITS_BASER_ADDR_48_to_52(baser
);
2774 addr
= baser
& GENMASK_ULL(47, 12);
2777 val
|= FIELD_PREP(GICR_VPROPBASER_4_1_ADDR
, addr
>> 12);
2778 if (rdists_support_shareable()) {
2779 val
|= FIELD_PREP(GICR_VPROPBASER_SHAREABILITY_MASK
,
2780 FIELD_GET(GITS_BASER_SHAREABILITY_MASK
, baser
));
2781 val
|= FIELD_PREP(GICR_VPROPBASER_INNER_CACHEABILITY_MASK
,
2782 FIELD_GET(GITS_BASER_INNER_CACHEABILITY_MASK
, baser
));
2784 val
|= FIELD_PREP(GICR_VPROPBASER_4_1_SIZE
, GITS_BASER_NR_PAGES(baser
) - 1);
2792 static u64
inherit_vpe_l1_table_from_rd(cpumask_t
**mask
)
2798 val
= gic_read_typer(gic_data_rdist_rd_base() + GICR_TYPER
);
2799 aff
= compute_common_aff(val
);
2801 for_each_possible_cpu(cpu
) {
2802 void __iomem
*base
= gic_data_rdist_cpu(cpu
)->rd_base
;
2804 if (!base
|| cpu
== smp_processor_id())
2807 val
= gic_read_typer(base
+ GICR_TYPER
);
2808 if (aff
!= compute_common_aff(val
))
2812 * At this point, we have a victim. This particular CPU
2813 * has already booted, and has an affinity that matches
2814 * ours wrt CommonLPIAff. Let's use its own VPROPBASER.
2815 * Make sure we don't write the Z bit in that case.
2817 val
= gicr_read_vpropbaser(base
+ SZ_128K
+ GICR_VPROPBASER
);
2818 val
&= ~GICR_VPROPBASER_4_1_Z
;
2820 gic_data_rdist()->vpe_l1_base
= gic_data_rdist_cpu(cpu
)->vpe_l1_base
;
2821 *mask
= gic_data_rdist_cpu(cpu
)->vpe_table_mask
;
2829 static bool allocate_vpe_l2_table(int cpu
, u32 id
)
2831 void __iomem
*base
= gic_data_rdist_cpu(cpu
)->rd_base
;
2832 unsigned int psz
, esz
, idx
, npg
, gpsz
;
2837 if (!gic_rdists
->has_rvpeid
)
2840 /* Skip non-present CPUs */
2844 val
= gicr_read_vpropbaser(base
+ SZ_128K
+ GICR_VPROPBASER
);
2846 esz
= FIELD_GET(GICR_VPROPBASER_4_1_ENTRY_SIZE
, val
) + 1;
2847 gpsz
= FIELD_GET(GICR_VPROPBASER_4_1_PAGE_SIZE
, val
);
2848 npg
= FIELD_GET(GICR_VPROPBASER_4_1_SIZE
, val
) + 1;
2854 case GIC_PAGE_SIZE_4K
:
2857 case GIC_PAGE_SIZE_16K
:
2860 case GIC_PAGE_SIZE_64K
:
2865 /* Don't allow vpe_id that exceeds single, flat table limit */
2866 if (!(val
& GICR_VPROPBASER_4_1_INDIRECT
))
2867 return (id
< (npg
* psz
/ (esz
* SZ_8
)));
2869 /* Compute 1st level table index & check if that exceeds table limit */
2870 idx
= id
>> ilog2(psz
/ (esz
* SZ_8
));
2871 if (idx
>= (npg
* psz
/ GITS_LVL1_ENTRY_SIZE
))
2874 table
= gic_data_rdist_cpu(cpu
)->vpe_l1_base
;
2876 /* Allocate memory for 2nd level table */
2878 page
= its_alloc_pages(GFP_KERNEL
| __GFP_ZERO
, get_order(psz
));
2882 /* Flush Lvl2 table to PoC if hw doesn't support coherency */
2883 if (!(val
& GICR_VPROPBASER_SHAREABILITY_MASK
))
2884 gic_flush_dcache_to_poc(page_address(page
), psz
);
2886 table
[idx
] = cpu_to_le64(page_to_phys(page
) | GITS_BASER_VALID
);
2888 /* Flush Lvl1 entry to PoC if hw doesn't support coherency */
2889 if (!(val
& GICR_VPROPBASER_SHAREABILITY_MASK
))
2890 gic_flush_dcache_to_poc(table
+ idx
, GITS_LVL1_ENTRY_SIZE
);
2892 /* Ensure updated table contents are visible to RD hardware */
2899 static int allocate_vpe_l1_table(void)
2901 void __iomem
*vlpi_base
= gic_data_rdist_vlpi_base();
2902 u64 val
, gpsz
, npg
, pa
;
2903 unsigned int psz
= SZ_64K
;
2904 unsigned int np
, epp
, esz
;
2907 if (!gic_rdists
->has_rvpeid
)
2911 * if VPENDBASER.Valid is set, disable any previously programmed
2912 * VPE by setting PendingLast while clearing Valid. This has the
2913 * effect of making sure no doorbell will be generated and we can
2914 * then safely clear VPROPBASER.Valid.
2916 if (gicr_read_vpendbaser(vlpi_base
+ GICR_VPENDBASER
) & GICR_VPENDBASER_Valid
)
2917 gicr_write_vpendbaser(GICR_VPENDBASER_PendingLast
,
2918 vlpi_base
+ GICR_VPENDBASER
);
2921 * If we can inherit the configuration from another RD, let's do
2922 * so. Otherwise, we have to go through the allocation process. We
2923 * assume that all RDs have the exact same requirements, as
2924 * nothing will work otherwise.
2926 val
= inherit_vpe_l1_table_from_rd(&gic_data_rdist()->vpe_table_mask
);
2927 if (val
& GICR_VPROPBASER_4_1_VALID
)
2930 gic_data_rdist()->vpe_table_mask
= kzalloc(sizeof(cpumask_t
), GFP_ATOMIC
);
2931 if (!gic_data_rdist()->vpe_table_mask
)
2934 val
= inherit_vpe_l1_table_from_its();
2935 if (val
& GICR_VPROPBASER_4_1_VALID
)
2938 /* First probe the page size */
2939 val
= FIELD_PREP(GICR_VPROPBASER_4_1_PAGE_SIZE
, GIC_PAGE_SIZE_64K
);
2940 gicr_write_vpropbaser(val
, vlpi_base
+ GICR_VPROPBASER
);
2941 val
= gicr_read_vpropbaser(vlpi_base
+ GICR_VPROPBASER
);
2942 gpsz
= FIELD_GET(GICR_VPROPBASER_4_1_PAGE_SIZE
, val
);
2943 esz
= FIELD_GET(GICR_VPROPBASER_4_1_ENTRY_SIZE
, val
);
2947 gpsz
= GIC_PAGE_SIZE_4K
;
2949 case GIC_PAGE_SIZE_4K
:
2952 case GIC_PAGE_SIZE_16K
:
2955 case GIC_PAGE_SIZE_64K
:
2961 * Start populating the register from scratch, including RO fields
2962 * (which we want to print in debug cases...)
2965 val
|= FIELD_PREP(GICR_VPROPBASER_4_1_PAGE_SIZE
, gpsz
);
2966 val
|= FIELD_PREP(GICR_VPROPBASER_4_1_ENTRY_SIZE
, esz
);
2968 /* How many entries per GIC page? */
2970 epp
= psz
/ (esz
* SZ_8
);
2973 * If we need more than just a single L1 page, flag the table
2974 * as indirect and compute the number of required L1 pages.
2976 if (epp
< ITS_MAX_VPEID
) {
2979 val
|= GICR_VPROPBASER_4_1_INDIRECT
;
2981 /* Number of L2 pages required to cover the VPEID space */
2982 nl2
= DIV_ROUND_UP(ITS_MAX_VPEID
, epp
);
2984 /* Number of L1 pages to point to the L2 pages */
2985 npg
= DIV_ROUND_UP(nl2
* SZ_8
, psz
);
2990 val
|= FIELD_PREP(GICR_VPROPBASER_4_1_SIZE
, npg
- 1);
2992 /* Right, that's the number of CPU pages we need for L1 */
2993 np
= DIV_ROUND_UP(npg
* psz
, PAGE_SIZE
);
2995 pr_debug("np = %d, npg = %lld, psz = %d, epp = %d, esz = %d\n",
2996 np
, npg
, psz
, epp
, esz
);
2997 page
= its_alloc_pages(GFP_ATOMIC
| __GFP_ZERO
, get_order(np
* PAGE_SIZE
));
3001 gic_data_rdist()->vpe_l1_base
= page_address(page
);
3002 pa
= virt_to_phys(page_address(page
));
3003 WARN_ON(!IS_ALIGNED(pa
, psz
));
3005 val
|= FIELD_PREP(GICR_VPROPBASER_4_1_ADDR
, pa
>> 12);
3006 if (rdists_support_shareable()) {
3007 val
|= GICR_VPROPBASER_RaWb
;
3008 val
|= GICR_VPROPBASER_InnerShareable
;
3010 val
|= GICR_VPROPBASER_4_1_Z
;
3011 val
|= GICR_VPROPBASER_4_1_VALID
;
3014 gicr_write_vpropbaser(val
, vlpi_base
+ GICR_VPROPBASER
);
3015 cpumask_set_cpu(smp_processor_id(), gic_data_rdist()->vpe_table_mask
);
3017 pr_debug("CPU%d: VPROPBASER = %llx %*pbl\n",
3018 smp_processor_id(), val
,
3019 cpumask_pr_args(gic_data_rdist()->vpe_table_mask
));
3024 static int its_alloc_collections(struct its_node
*its
)
3028 its
->collections
= kcalloc(nr_cpu_ids
, sizeof(*its
->collections
),
3030 if (!its
->collections
)
3033 for (i
= 0; i
< nr_cpu_ids
; i
++)
3034 its
->collections
[i
].target_address
= ~0ULL;
3039 static struct page
*its_allocate_pending_table(gfp_t gfp_flags
)
3041 struct page
*pend_page
;
3043 pend_page
= its_alloc_pages(gfp_flags
| __GFP_ZERO
, get_order(LPI_PENDBASE_SZ
));
3047 /* Make sure the GIC will observe the zero-ed page */
3048 gic_flush_dcache_to_poc(page_address(pend_page
), LPI_PENDBASE_SZ
);
3053 static void its_free_pending_table(struct page
*pt
)
3055 its_free_pages(page_address(pt
), get_order(LPI_PENDBASE_SZ
));
3059 * Booting with kdump and LPIs enabled is generally fine. Any other
3060 * case is wrong in the absence of firmware/EFI support.
3062 static bool enabled_lpis_allowed(void)
3067 /* Check whether the property table is in a reserved region */
3068 val
= gicr_read_propbaser(gic_data_rdist_rd_base() + GICR_PROPBASER
);
3069 addr
= val
& GENMASK_ULL(51, 12);
3071 return gic_check_reserved_range(addr
, LPI_PROPBASE_SZ
);
3074 static int __init
allocate_lpi_tables(void)
3080 * If LPIs are enabled while we run this from the boot CPU,
3081 * flag the RD tables as pre-allocated if the stars do align.
3083 val
= readl_relaxed(gic_data_rdist_rd_base() + GICR_CTLR
);
3084 if ((val
& GICR_CTLR_ENABLE_LPIS
) && enabled_lpis_allowed()) {
3085 gic_rdists
->flags
|= (RDIST_FLAGS_RD_TABLES_PREALLOCATED
|
3086 RDIST_FLAGS_PROPBASE_NEEDS_FLUSHING
);
3087 pr_info("GICv3: Using preallocated redistributor tables\n");
3090 err
= its_setup_lpi_prop_table();
3095 * We allocate all the pending tables anyway, as we may have a
3096 * mix of RDs that have had LPIs enabled, and some that
3097 * don't. We'll free the unused ones as each CPU comes online.
3099 for_each_possible_cpu(cpu
) {
3100 struct page
*pend_page
;
3102 pend_page
= its_allocate_pending_table(GFP_NOWAIT
);
3104 pr_err("Failed to allocate PENDBASE for CPU%d\n", cpu
);
3108 gic_data_rdist_cpu(cpu
)->pend_page
= pend_page
;
3114 static u64
read_vpend_dirty_clear(void __iomem
*vlpi_base
)
3116 u32 count
= 1000000; /* 1s! */
3121 val
= gicr_read_vpendbaser(vlpi_base
+ GICR_VPENDBASER
);
3122 clean
= !(val
& GICR_VPENDBASER_Dirty
);
3128 } while (!clean
&& count
);
3130 if (unlikely(!clean
))
3131 pr_err_ratelimited("ITS virtual pending table not cleaning\n");
3136 static u64
its_clear_vpend_valid(void __iomem
*vlpi_base
, u64 clr
, u64 set
)
3140 /* Make sure we wait until the RD is done with the initial scan */
3141 val
= read_vpend_dirty_clear(vlpi_base
);
3142 val
&= ~GICR_VPENDBASER_Valid
;
3145 gicr_write_vpendbaser(val
, vlpi_base
+ GICR_VPENDBASER
);
3147 val
= read_vpend_dirty_clear(vlpi_base
);
3148 if (unlikely(val
& GICR_VPENDBASER_Dirty
))
3149 val
|= GICR_VPENDBASER_PendingLast
;
3154 static void its_cpu_init_lpis(void)
3156 void __iomem
*rbase
= gic_data_rdist_rd_base();
3157 struct page
*pend_page
;
3161 if (gic_data_rdist()->flags
& RD_LOCAL_LPI_ENABLED
)
3164 val
= readl_relaxed(rbase
+ GICR_CTLR
);
3165 if ((gic_rdists
->flags
& RDIST_FLAGS_RD_TABLES_PREALLOCATED
) &&
3166 (val
& GICR_CTLR_ENABLE_LPIS
)) {
3168 * Check that we get the same property table on all
3169 * RDs. If we don't, this is hopeless.
3171 paddr
= gicr_read_propbaser(rbase
+ GICR_PROPBASER
);
3172 paddr
&= GENMASK_ULL(51, 12);
3173 if (WARN_ON(gic_rdists
->prop_table_pa
!= paddr
))
3174 add_taint(TAINT_CRAP
, LOCKDEP_STILL_OK
);
3176 paddr
= gicr_read_pendbaser(rbase
+ GICR_PENDBASER
);
3177 paddr
&= GENMASK_ULL(51, 16);
3179 WARN_ON(!gic_check_reserved_range(paddr
, LPI_PENDBASE_SZ
));
3180 gic_data_rdist()->flags
|= RD_LOCAL_PENDTABLE_PREALLOCATED
;
3185 pend_page
= gic_data_rdist()->pend_page
;
3186 paddr
= page_to_phys(pend_page
);
3189 val
= (gic_rdists
->prop_table_pa
|
3190 GICR_PROPBASER_InnerShareable
|
3191 GICR_PROPBASER_RaWaWb
|
3192 ((LPI_NRBITS
- 1) & GICR_PROPBASER_IDBITS_MASK
));
3194 gicr_write_propbaser(val
, rbase
+ GICR_PROPBASER
);
3195 tmp
= gicr_read_propbaser(rbase
+ GICR_PROPBASER
);
3197 if (!rdists_support_shareable())
3198 tmp
&= ~GICR_PROPBASER_SHAREABILITY_MASK
;
3200 if ((tmp
^ val
) & GICR_PROPBASER_SHAREABILITY_MASK
) {
3201 if (!(tmp
& GICR_PROPBASER_SHAREABILITY_MASK
)) {
3203 * The HW reports non-shareable, we must
3204 * remove the cacheability attributes as
3207 val
&= ~(GICR_PROPBASER_SHAREABILITY_MASK
|
3208 GICR_PROPBASER_CACHEABILITY_MASK
);
3209 val
|= GICR_PROPBASER_nC
;
3210 gicr_write_propbaser(val
, rbase
+ GICR_PROPBASER
);
3212 pr_info_once("GIC: using cache flushing for LPI property table\n");
3213 gic_rdists
->flags
|= RDIST_FLAGS_PROPBASE_NEEDS_FLUSHING
;
3217 val
= (page_to_phys(pend_page
) |
3218 GICR_PENDBASER_InnerShareable
|
3219 GICR_PENDBASER_RaWaWb
);
3221 gicr_write_pendbaser(val
, rbase
+ GICR_PENDBASER
);
3222 tmp
= gicr_read_pendbaser(rbase
+ GICR_PENDBASER
);
3224 if (!rdists_support_shareable())
3225 tmp
&= ~GICR_PENDBASER_SHAREABILITY_MASK
;
3227 if (!(tmp
& GICR_PENDBASER_SHAREABILITY_MASK
)) {
3229 * The HW reports non-shareable, we must remove the
3230 * cacheability attributes as well.
3232 val
&= ~(GICR_PENDBASER_SHAREABILITY_MASK
|
3233 GICR_PENDBASER_CACHEABILITY_MASK
);
3234 val
|= GICR_PENDBASER_nC
;
3235 gicr_write_pendbaser(val
, rbase
+ GICR_PENDBASER
);
3239 val
= readl_relaxed(rbase
+ GICR_CTLR
);
3240 val
|= GICR_CTLR_ENABLE_LPIS
;
3241 writel_relaxed(val
, rbase
+ GICR_CTLR
);
3244 if (gic_rdists
->has_vlpis
&& !gic_rdists
->has_rvpeid
) {
3245 void __iomem
*vlpi_base
= gic_data_rdist_vlpi_base();
3248 * It's possible for CPU to receive VLPIs before it is
3249 * scheduled as a vPE, especially for the first CPU, and the
3250 * VLPI with INTID larger than 2^(IDbits+1) will be considered
3251 * as out of range and dropped by GIC.
3252 * So we initialize IDbits to known value to avoid VLPI drop.
3254 val
= (LPI_NRBITS
- 1) & GICR_VPROPBASER_IDBITS_MASK
;
3255 pr_debug("GICv4: CPU%d: Init IDbits to 0x%llx for GICR_VPROPBASER\n",
3256 smp_processor_id(), val
);
3257 gicr_write_vpropbaser(val
, vlpi_base
+ GICR_VPROPBASER
);
3260 * Also clear Valid bit of GICR_VPENDBASER, in case some
3261 * ancient programming gets left in and has possibility of
3262 * corrupting memory.
3264 val
= its_clear_vpend_valid(vlpi_base
, 0, 0);
3267 if (allocate_vpe_l1_table()) {
3269 * If the allocation has failed, we're in massive trouble.
3270 * Disable direct injection, and pray that no VM was
3271 * already running...
3273 gic_rdists
->has_rvpeid
= false;
3274 gic_rdists
->has_vlpis
= false;
3277 /* Make sure the GIC has seen the above */
3279 gic_data_rdist()->flags
|= RD_LOCAL_LPI_ENABLED
;
3280 pr_info("GICv3: CPU%d: using %s LPI pending table @%pa\n",
3282 gic_data_rdist()->flags
& RD_LOCAL_PENDTABLE_PREALLOCATED
?
3283 "reserved" : "allocated",
3287 static void its_cpu_init_collection(struct its_node
*its
)
3289 int cpu
= smp_processor_id();
3292 /* avoid cross node collections and its mapping */
3293 if (its
->flags
& ITS_FLAGS_WORKAROUND_CAVIUM_23144
) {
3294 struct device_node
*cpu_node
;
3296 cpu_node
= of_get_cpu_node(cpu
, NULL
);
3297 if (its
->numa_node
!= NUMA_NO_NODE
&&
3298 its
->numa_node
!= of_node_to_nid(cpu_node
))
3303 * We now have to bind each collection to its target
3306 if (gic_read_typer(its
->base
+ GITS_TYPER
) & GITS_TYPER_PTA
) {
3308 * This ITS wants the physical address of the
3311 target
= gic_data_rdist()->phys_base
;
3313 /* This ITS wants a linear CPU number. */
3314 target
= gic_read_typer(gic_data_rdist_rd_base() + GICR_TYPER
);
3315 target
= GICR_TYPER_CPU_NUMBER(target
) << 16;
3318 /* Perform collection mapping */
3319 its
->collections
[cpu
].target_address
= target
;
3320 its
->collections
[cpu
].col_id
= cpu
;
3322 its_send_mapc(its
, &its
->collections
[cpu
], 1);
3323 its_send_invall(its
, &its
->collections
[cpu
]);
3326 static void its_cpu_init_collections(void)
3328 struct its_node
*its
;
3330 raw_spin_lock(&its_lock
);
3332 list_for_each_entry(its
, &its_nodes
, entry
)
3333 its_cpu_init_collection(its
);
3335 raw_spin_unlock(&its_lock
);
3338 static struct its_device
*its_find_device(struct its_node
*its
, u32 dev_id
)
3340 struct its_device
*its_dev
= NULL
, *tmp
;
3341 unsigned long flags
;
3343 raw_spin_lock_irqsave(&its
->lock
, flags
);
3345 list_for_each_entry(tmp
, &its
->its_device_list
, entry
) {
3346 if (tmp
->device_id
== dev_id
) {
3352 raw_spin_unlock_irqrestore(&its
->lock
, flags
);
3357 static struct its_baser
*its_get_baser(struct its_node
*its
, u32 type
)
3361 for (i
= 0; i
< GITS_BASER_NR_REGS
; i
++) {
3362 if (GITS_BASER_TYPE(its
->tables
[i
].val
) == type
)
3363 return &its
->tables
[i
];
3369 static bool its_alloc_table_entry(struct its_node
*its
,
3370 struct its_baser
*baser
, u32 id
)
3376 /* Don't allow device id that exceeds single, flat table limit */
3377 esz
= GITS_BASER_ENTRY_SIZE(baser
->val
);
3378 if (!(baser
->val
& GITS_BASER_INDIRECT
))
3379 return (id
< (PAGE_ORDER_TO_SIZE(baser
->order
) / esz
));
3381 /* Compute 1st level table index & check if that exceeds table limit */
3382 idx
= id
>> ilog2(baser
->psz
/ esz
);
3383 if (idx
>= (PAGE_ORDER_TO_SIZE(baser
->order
) / GITS_LVL1_ENTRY_SIZE
))
3386 table
= baser
->base
;
3388 /* Allocate memory for 2nd level table */
3390 page
= its_alloc_pages_node(its
->numa_node
, GFP_KERNEL
| __GFP_ZERO
,
3391 get_order(baser
->psz
));
3395 /* Flush Lvl2 table to PoC if hw doesn't support coherency */
3396 if (!(baser
->val
& GITS_BASER_SHAREABILITY_MASK
))
3397 gic_flush_dcache_to_poc(page_address(page
), baser
->psz
);
3399 table
[idx
] = cpu_to_le64(page_to_phys(page
) | GITS_BASER_VALID
);
3401 /* Flush Lvl1 entry to PoC if hw doesn't support coherency */
3402 if (!(baser
->val
& GITS_BASER_SHAREABILITY_MASK
))
3403 gic_flush_dcache_to_poc(table
+ idx
, GITS_LVL1_ENTRY_SIZE
);
3405 /* Ensure updated table contents are visible to ITS hardware */
3412 static bool its_alloc_device_table(struct its_node
*its
, u32 dev_id
)
3414 struct its_baser
*baser
;
3416 baser
= its_get_baser(its
, GITS_BASER_TYPE_DEVICE
);
3418 /* Don't allow device id that exceeds ITS hardware limit */
3420 return (ilog2(dev_id
) < device_ids(its
));
3422 return its_alloc_table_entry(its
, baser
, dev_id
);
3425 static bool its_alloc_vpe_table(u32 vpe_id
)
3427 struct its_node
*its
;
3431 * Make sure the L2 tables are allocated on *all* v4 ITSs. We
3432 * could try and only do it on ITSs corresponding to devices
3433 * that have interrupts targeted at this VPE, but the
3434 * complexity becomes crazy (and you have tons of memory
3437 list_for_each_entry(its
, &its_nodes
, entry
) {
3438 struct its_baser
*baser
;
3443 baser
= its_get_baser(its
, GITS_BASER_TYPE_VCPU
);
3447 if (!its_alloc_table_entry(its
, baser
, vpe_id
))
3451 /* Non v4.1? No need to iterate RDs and go back early. */
3452 if (!gic_rdists
->has_rvpeid
)
3456 * Make sure the L2 tables are allocated for all copies of
3457 * the L1 table on *all* v4.1 RDs.
3459 for_each_possible_cpu(cpu
) {
3460 if (!allocate_vpe_l2_table(cpu
, vpe_id
))
3467 static struct its_device
*its_create_device(struct its_node
*its
, u32 dev_id
,
3468 int nvecs
, bool alloc_lpis
)
3470 struct its_device
*dev
;
3471 unsigned long *lpi_map
= NULL
;
3472 unsigned long flags
;
3473 u16
*col_map
= NULL
;
3480 if (!its_alloc_device_table(its
, dev_id
))
3483 if (WARN_ON(!is_power_of_2(nvecs
)))
3484 nvecs
= roundup_pow_of_two(nvecs
);
3487 * Even if the device wants a single LPI, the ITT must be
3488 * sized as a power of two (and you need at least one bit...).
3490 nr_ites
= max(2, nvecs
);
3491 sz
= nr_ites
* (FIELD_GET(GITS_TYPER_ITT_ENTRY_SIZE
, its
->typer
) + 1);
3492 sz
= max(sz
, ITS_ITT_ALIGN
);
3494 itt
= itt_alloc_pool(its
->numa_node
, sz
);
3496 dev
= kzalloc(sizeof(*dev
), GFP_KERNEL
);
3499 lpi_map
= its_lpi_alloc(nvecs
, &lpi_base
, &nr_lpis
);
3501 col_map
= kcalloc(nr_lpis
, sizeof(*col_map
),
3504 col_map
= kcalloc(nr_ites
, sizeof(*col_map
), GFP_KERNEL
);
3509 if (!dev
|| !itt
|| !col_map
|| (!lpi_map
&& alloc_lpis
)) {
3511 itt_free_pool(itt
, sz
);
3512 bitmap_free(lpi_map
);
3517 gic_flush_dcache_to_poc(itt
, sz
);
3522 dev
->nr_ites
= nr_ites
;
3523 dev
->event_map
.lpi_map
= lpi_map
;
3524 dev
->event_map
.col_map
= col_map
;
3525 dev
->event_map
.lpi_base
= lpi_base
;
3526 dev
->event_map
.nr_lpis
= nr_lpis
;
3527 raw_spin_lock_init(&dev
->event_map
.vlpi_lock
);
3528 dev
->device_id
= dev_id
;
3529 INIT_LIST_HEAD(&dev
->entry
);
3531 raw_spin_lock_irqsave(&its
->lock
, flags
);
3532 list_add(&dev
->entry
, &its
->its_device_list
);
3533 raw_spin_unlock_irqrestore(&its
->lock
, flags
);
3535 /* Map device to its ITT */
3536 its_send_mapd(dev
, 1);
3541 static void its_free_device(struct its_device
*its_dev
)
3543 unsigned long flags
;
3545 raw_spin_lock_irqsave(&its_dev
->its
->lock
, flags
);
3546 list_del(&its_dev
->entry
);
3547 raw_spin_unlock_irqrestore(&its_dev
->its
->lock
, flags
);
3548 kfree(its_dev
->event_map
.col_map
);
3549 itt_free_pool(its_dev
->itt
, its_dev
->itt_sz
);
3553 static int its_alloc_device_irq(struct its_device
*dev
, int nvecs
, irq_hw_number_t
*hwirq
)
3557 /* Find a free LPI region in lpi_map and allocate them. */
3558 idx
= bitmap_find_free_region(dev
->event_map
.lpi_map
,
3559 dev
->event_map
.nr_lpis
,
3560 get_count_order(nvecs
));
3564 *hwirq
= dev
->event_map
.lpi_base
+ idx
;
3569 static int its_msi_prepare(struct irq_domain
*domain
, struct device
*dev
,
3570 int nvec
, msi_alloc_info_t
*info
)
3572 struct its_node
*its
;
3573 struct its_device
*its_dev
;
3574 struct msi_domain_info
*msi_info
;
3579 * We ignore "dev" entirely, and rely on the dev_id that has
3580 * been passed via the scratchpad. This limits this domain's
3581 * usefulness to upper layers that definitely know that they
3582 * are built on top of the ITS.
3584 dev_id
= info
->scratchpad
[0].ul
;
3586 msi_info
= msi_get_domain_info(domain
);
3587 its
= msi_info
->data
;
3589 if (!gic_rdists
->has_direct_lpi
&&
3591 vpe_proxy
.dev
->its
== its
&&
3592 dev_id
== vpe_proxy
.dev
->device_id
) {
3593 /* Bad luck. Get yourself a better implementation */
3594 WARN_ONCE(1, "DevId %x clashes with GICv4 VPE proxy device\n",
3599 mutex_lock(&its
->dev_alloc_lock
);
3600 its_dev
= its_find_device(its
, dev_id
);
3603 * We already have seen this ID, probably through
3604 * another alias (PCI bridge of some sort). No need to
3605 * create the device.
3607 its_dev
->shared
= true;
3608 pr_debug("Reusing ITT for devID %x\n", dev_id
);
3612 its_dev
= its_create_device(its
, dev_id
, nvec
, true);
3618 if (info
->flags
& MSI_ALLOC_FLAGS_PROXY_DEVICE
)
3619 its_dev
->shared
= true;
3621 pr_debug("ITT %d entries, %d bits\n", nvec
, ilog2(nvec
));
3623 mutex_unlock(&its
->dev_alloc_lock
);
3624 info
->scratchpad
[0].ptr
= its_dev
;
3628 static struct msi_domain_ops its_msi_domain_ops
= {
3629 .msi_prepare
= its_msi_prepare
,
3632 static int its_irq_gic_domain_alloc(struct irq_domain
*domain
,
3634 irq_hw_number_t hwirq
)
3636 struct irq_fwspec fwspec
;
3638 if (irq_domain_get_of_node(domain
->parent
)) {
3639 fwspec
.fwnode
= domain
->parent
->fwnode
;
3640 fwspec
.param_count
= 3;
3641 fwspec
.param
[0] = GIC_IRQ_TYPE_LPI
;
3642 fwspec
.param
[1] = hwirq
;
3643 fwspec
.param
[2] = IRQ_TYPE_EDGE_RISING
;
3644 } else if (is_fwnode_irqchip(domain
->parent
->fwnode
)) {
3645 fwspec
.fwnode
= domain
->parent
->fwnode
;
3646 fwspec
.param_count
= 2;
3647 fwspec
.param
[0] = hwirq
;
3648 fwspec
.param
[1] = IRQ_TYPE_EDGE_RISING
;
3653 return irq_domain_alloc_irqs_parent(domain
, virq
, 1, &fwspec
);
3656 static int its_irq_domain_alloc(struct irq_domain
*domain
, unsigned int virq
,
3657 unsigned int nr_irqs
, void *args
)
3659 msi_alloc_info_t
*info
= args
;
3660 struct its_device
*its_dev
= info
->scratchpad
[0].ptr
;
3661 struct its_node
*its
= its_dev
->its
;
3662 struct irq_data
*irqd
;
3663 irq_hw_number_t hwirq
;
3667 err
= its_alloc_device_irq(its_dev
, nr_irqs
, &hwirq
);
3671 err
= iommu_dma_prepare_msi(info
->desc
, its
->get_msi_base(its_dev
));
3675 for (i
= 0; i
< nr_irqs
; i
++) {
3676 err
= its_irq_gic_domain_alloc(domain
, virq
+ i
, hwirq
+ i
);
3680 irq_domain_set_hwirq_and_chip(domain
, virq
+ i
,
3681 hwirq
+ i
, &its_irq_chip
, its_dev
);
3682 irqd
= irq_get_irq_data(virq
+ i
);
3683 irqd_set_single_target(irqd
);
3684 irqd_set_affinity_on_activate(irqd
);
3685 irqd_set_resend_when_in_progress(irqd
);
3686 pr_debug("ID:%d pID:%d vID:%d\n",
3687 (int)(hwirq
+ i
- its_dev
->event_map
.lpi_base
),
3688 (int)(hwirq
+ i
), virq
+ i
);
3694 static int its_irq_domain_activate(struct irq_domain
*domain
,
3695 struct irq_data
*d
, bool reserve
)
3697 struct its_device
*its_dev
= irq_data_get_irq_chip_data(d
);
3698 u32 event
= its_get_event_id(d
);
3701 cpu
= its_select_cpu(d
, cpu_online_mask
);
3702 if (cpu
< 0 || cpu
>= nr_cpu_ids
)
3705 its_inc_lpi_count(d
, cpu
);
3706 its_dev
->event_map
.col_map
[event
] = cpu
;
3707 irq_data_update_effective_affinity(d
, cpumask_of(cpu
));
3709 /* Map the GIC IRQ and event to the device */
3710 its_send_mapti(its_dev
, d
->hwirq
, event
);
3714 static void its_irq_domain_deactivate(struct irq_domain
*domain
,
3717 struct its_device
*its_dev
= irq_data_get_irq_chip_data(d
);
3718 u32 event
= its_get_event_id(d
);
3720 its_dec_lpi_count(d
, its_dev
->event_map
.col_map
[event
]);
3721 /* Stop the delivery of interrupts */
3722 its_send_discard(its_dev
, event
);
3725 static void its_irq_domain_free(struct irq_domain
*domain
, unsigned int virq
,
3726 unsigned int nr_irqs
)
3728 struct irq_data
*d
= irq_domain_get_irq_data(domain
, virq
);
3729 struct its_device
*its_dev
= irq_data_get_irq_chip_data(d
);
3730 struct its_node
*its
= its_dev
->its
;
3733 bitmap_release_region(its_dev
->event_map
.lpi_map
,
3734 its_get_event_id(irq_domain_get_irq_data(domain
, virq
)),
3735 get_count_order(nr_irqs
));
3737 for (i
= 0; i
< nr_irqs
; i
++) {
3738 struct irq_data
*data
= irq_domain_get_irq_data(domain
,
3740 /* Nuke the entry in the domain */
3741 irq_domain_reset_irq_data(data
);
3744 mutex_lock(&its
->dev_alloc_lock
);
3747 * If all interrupts have been freed, start mopping the
3748 * floor. This is conditioned on the device not being shared.
3750 if (!its_dev
->shared
&&
3751 bitmap_empty(its_dev
->event_map
.lpi_map
,
3752 its_dev
->event_map
.nr_lpis
)) {
3753 its_lpi_free(its_dev
->event_map
.lpi_map
,
3754 its_dev
->event_map
.lpi_base
,
3755 its_dev
->event_map
.nr_lpis
);
3757 /* Unmap device/itt */
3758 its_send_mapd(its_dev
, 0);
3759 its_free_device(its_dev
);
3762 mutex_unlock(&its
->dev_alloc_lock
);
3764 irq_domain_free_irqs_parent(domain
, virq
, nr_irqs
);
3767 static const struct irq_domain_ops its_domain_ops
= {
3768 .select
= msi_lib_irq_domain_select
,
3769 .alloc
= its_irq_domain_alloc
,
3770 .free
= its_irq_domain_free
,
3771 .activate
= its_irq_domain_activate
,
3772 .deactivate
= its_irq_domain_deactivate
,
3778 * If a GICv4.0 doesn't implement Direct LPIs (which is extremely
3779 * likely), the only way to perform an invalidate is to use a fake
3780 * device to issue an INV command, implying that the LPI has first
3781 * been mapped to some event on that device. Since this is not exactly
3782 * cheap, we try to keep that mapping around as long as possible, and
3783 * only issue an UNMAP if we're short on available slots.
3785 * Broken by design(tm).
3787 * GICv4.1, on the other hand, mandates that we're able to invalidate
3788 * by writing to a MMIO register. It doesn't implement the whole of
3789 * DirectLPI, but that's good enough. And most of the time, we don't
3790 * even have to invalidate anything, as the redistributor can be told
3791 * whether to generate a doorbell or not (we thus leave it enabled,
3794 static void its_vpe_db_proxy_unmap_locked(struct its_vpe
*vpe
)
3796 /* GICv4.1 doesn't use a proxy, so nothing to do here */
3797 if (gic_rdists
->has_rvpeid
)
3800 /* Already unmapped? */
3801 if (vpe
->vpe_proxy_event
== -1)
3804 its_send_discard(vpe_proxy
.dev
, vpe
->vpe_proxy_event
);
3805 vpe_proxy
.vpes
[vpe
->vpe_proxy_event
] = NULL
;
3808 * We don't track empty slots at all, so let's move the
3809 * next_victim pointer if we can quickly reuse that slot
3810 * instead of nuking an existing entry. Not clear that this is
3811 * always a win though, and this might just generate a ripple
3812 * effect... Let's just hope VPEs don't migrate too often.
3814 if (vpe_proxy
.vpes
[vpe_proxy
.next_victim
])
3815 vpe_proxy
.next_victim
= vpe
->vpe_proxy_event
;
3817 vpe
->vpe_proxy_event
= -1;
3820 static void its_vpe_db_proxy_unmap(struct its_vpe
*vpe
)
3822 /* GICv4.1 doesn't use a proxy, so nothing to do here */
3823 if (gic_rdists
->has_rvpeid
)
3826 if (!gic_rdists
->has_direct_lpi
) {
3827 unsigned long flags
;
3829 raw_spin_lock_irqsave(&vpe_proxy
.lock
, flags
);
3830 its_vpe_db_proxy_unmap_locked(vpe
);
3831 raw_spin_unlock_irqrestore(&vpe_proxy
.lock
, flags
);
3835 static void its_vpe_db_proxy_map_locked(struct its_vpe
*vpe
)
3837 /* GICv4.1 doesn't use a proxy, so nothing to do here */
3838 if (gic_rdists
->has_rvpeid
)
3841 /* Already mapped? */
3842 if (vpe
->vpe_proxy_event
!= -1)
3845 /* This slot was already allocated. Kick the other VPE out. */
3846 if (vpe_proxy
.vpes
[vpe_proxy
.next_victim
])
3847 its_vpe_db_proxy_unmap_locked(vpe_proxy
.vpes
[vpe_proxy
.next_victim
]);
3849 /* Map the new VPE instead */
3850 vpe_proxy
.vpes
[vpe_proxy
.next_victim
] = vpe
;
3851 vpe
->vpe_proxy_event
= vpe_proxy
.next_victim
;
3852 vpe_proxy
.next_victim
= (vpe_proxy
.next_victim
+ 1) % vpe_proxy
.dev
->nr_ites
;
3854 vpe_proxy
.dev
->event_map
.col_map
[vpe
->vpe_proxy_event
] = vpe
->col_idx
;
3855 its_send_mapti(vpe_proxy
.dev
, vpe
->vpe_db_lpi
, vpe
->vpe_proxy_event
);
3858 static void its_vpe_db_proxy_move(struct its_vpe
*vpe
, int from
, int to
)
3860 unsigned long flags
;
3861 struct its_collection
*target_col
;
3863 /* GICv4.1 doesn't use a proxy, so nothing to do here */
3864 if (gic_rdists
->has_rvpeid
)
3867 if (gic_rdists
->has_direct_lpi
) {
3868 void __iomem
*rdbase
;
3870 rdbase
= per_cpu_ptr(gic_rdists
->rdist
, from
)->rd_base
;
3871 gic_write_lpir(vpe
->vpe_db_lpi
, rdbase
+ GICR_CLRLPIR
);
3872 wait_for_syncr(rdbase
);
3877 raw_spin_lock_irqsave(&vpe_proxy
.lock
, flags
);
3879 its_vpe_db_proxy_map_locked(vpe
);
3881 target_col
= &vpe_proxy
.dev
->its
->collections
[to
];
3882 its_send_movi(vpe_proxy
.dev
, target_col
, vpe
->vpe_proxy_event
);
3883 vpe_proxy
.dev
->event_map
.col_map
[vpe
->vpe_proxy_event
] = to
;
3885 raw_spin_unlock_irqrestore(&vpe_proxy
.lock
, flags
);
3888 static void its_vpe_4_1_invall_locked(int cpu
, struct its_vpe
*vpe
)
3890 void __iomem
*rdbase
;
3893 val
= GICR_INVALLR_V
;
3894 val
|= FIELD_PREP(GICR_INVALLR_VPEID
, vpe
->vpe_id
);
3896 guard(raw_spinlock
)(&gic_data_rdist_cpu(cpu
)->rd_lock
);
3897 rdbase
= per_cpu_ptr(gic_rdists
->rdist
, cpu
)->rd_base
;
3898 gic_write_lpir(val
, rdbase
+ GICR_INVALLR
);
3899 wait_for_syncr(rdbase
);
3902 static int its_vpe_set_affinity(struct irq_data
*d
,
3903 const struct cpumask
*mask_val
,
3906 struct its_vpe
*vpe
= irq_data_get_irq_chip_data(d
);
3907 unsigned int from
, cpu
= nr_cpu_ids
;
3908 struct cpumask
*table_mask
;
3909 struct its_node
*its
;
3910 unsigned long flags
;
3913 * Check if we're racing against a VPE being destroyed, for
3914 * which we don't want to allow a VMOVP.
3916 if (!atomic_read(&vpe
->vmapp_count
)) {
3917 if (gic_requires_eager_mapping())
3921 * If we lazily map the VPEs, this isn't an error and
3922 * we can exit cleanly.
3924 cpu
= cpumask_first(mask_val
);
3925 irq_data_update_effective_affinity(d
, cpumask_of(cpu
));
3926 return IRQ_SET_MASK_OK_DONE
;
3930 * Changing affinity is mega expensive, so let's be as lazy as
3931 * we can and only do it if we really have to. Also, if mapped
3932 * into the proxy device, we need to move the doorbell
3933 * interrupt to its new location.
3935 * Another thing is that changing the affinity of a vPE affects
3936 * *other interrupts* such as all the vLPIs that are routed to
3937 * this vPE. This means that the irq_desc lock is not enough to
3938 * protect us, and that we must ensure nobody samples vpe->col_idx
3939 * during the update, hence the lock below which must also be
3940 * taken on any vLPI handling path that evaluates vpe->col_idx.
3942 * Finally, we must protect ourselves against concurrent updates of
3943 * the mapping state on this VM should the ITS list be in use (see
3944 * the shortcut in its_send_vmovp() otherewise).
3947 raw_spin_lock(&vpe
->its_vm
->vmapp_lock
);
3949 from
= vpe_to_cpuid_lock(vpe
, &flags
);
3950 table_mask
= gic_data_rdist_cpu(from
)->vpe_table_mask
;
3953 * If we are offered another CPU in the same GICv4.1 ITS
3954 * affinity, pick this one. Otherwise, any CPU will do.
3957 cpu
= cpumask_any_and(mask_val
, table_mask
);
3958 if (cpu
< nr_cpu_ids
) {
3959 if (cpumask_test_cpu(from
, mask_val
) &&
3960 cpumask_test_cpu(from
, table_mask
))
3963 cpu
= cpumask_first(mask_val
);
3971 its_send_vmovp(vpe
);
3973 its
= find_4_1_its();
3974 if (its
&& its
->flags
& ITS_FLAGS_WORKAROUND_HISILICON_162100801
)
3975 its_vpe_4_1_invall_locked(cpu
, vpe
);
3977 its_vpe_db_proxy_move(vpe
, from
, cpu
);
3980 irq_data_update_effective_affinity(d
, cpumask_of(cpu
));
3981 vpe_to_cpuid_unlock(vpe
, flags
);
3984 raw_spin_unlock(&vpe
->its_vm
->vmapp_lock
);
3986 return IRQ_SET_MASK_OK_DONE
;
3989 static void its_wait_vpt_parse_complete(void)
3991 void __iomem
*vlpi_base
= gic_data_rdist_vlpi_base();
3994 if (!gic_rdists
->has_vpend_valid_dirty
)
3997 WARN_ON_ONCE(readq_relaxed_poll_timeout_atomic(vlpi_base
+ GICR_VPENDBASER
,
3999 !(val
& GICR_VPENDBASER_Dirty
),
4003 static void its_vpe_schedule(struct its_vpe
*vpe
)
4005 void __iomem
*vlpi_base
= gic_data_rdist_vlpi_base();
4008 /* Schedule the VPE */
4009 val
= virt_to_phys(page_address(vpe
->its_vm
->vprop_page
)) &
4010 GENMASK_ULL(51, 12);
4011 val
|= (LPI_NRBITS
- 1) & GICR_VPROPBASER_IDBITS_MASK
;
4012 if (rdists_support_shareable()) {
4013 val
|= GICR_VPROPBASER_RaWb
;
4014 val
|= GICR_VPROPBASER_InnerShareable
;
4016 gicr_write_vpropbaser(val
, vlpi_base
+ GICR_VPROPBASER
);
4018 val
= virt_to_phys(page_address(vpe
->vpt_page
)) &
4019 GENMASK_ULL(51, 16);
4020 if (rdists_support_shareable()) {
4021 val
|= GICR_VPENDBASER_RaWaWb
;
4022 val
|= GICR_VPENDBASER_InnerShareable
;
4025 * There is no good way of finding out if the pending table is
4026 * empty as we can race against the doorbell interrupt very
4027 * easily. So in the end, vpe->pending_last is only an
4028 * indication that the vcpu has something pending, not one
4029 * that the pending table is empty. A good implementation
4030 * would be able to read its coarse map pretty quickly anyway,
4031 * making this a tolerable issue.
4033 val
|= GICR_VPENDBASER_PendingLast
;
4034 val
|= vpe
->idai
? GICR_VPENDBASER_IDAI
: 0;
4035 val
|= GICR_VPENDBASER_Valid
;
4036 gicr_write_vpendbaser(val
, vlpi_base
+ GICR_VPENDBASER
);
4039 static void its_vpe_deschedule(struct its_vpe
*vpe
)
4041 void __iomem
*vlpi_base
= gic_data_rdist_vlpi_base();
4044 val
= its_clear_vpend_valid(vlpi_base
, 0, 0);
4046 vpe
->idai
= !!(val
& GICR_VPENDBASER_IDAI
);
4047 vpe
->pending_last
= !!(val
& GICR_VPENDBASER_PendingLast
);
4050 static void its_vpe_invall(struct its_vpe
*vpe
)
4052 struct its_node
*its
;
4054 guard(raw_spinlock_irqsave
)(&vpe
->its_vm
->vmapp_lock
);
4056 list_for_each_entry(its
, &its_nodes
, entry
) {
4060 if (its_list_map
&& !vpe
->its_vm
->vlpi_count
[its
->list_nr
])
4064 * Sending a VINVALL to a single ITS is enough, as all
4065 * we need is to reach the redistributors.
4067 its_send_vinvall(its
, vpe
);
4072 static int its_vpe_set_vcpu_affinity(struct irq_data
*d
, void *vcpu_info
)
4074 struct its_vpe
*vpe
= irq_data_get_irq_chip_data(d
);
4075 struct its_cmd_info
*info
= vcpu_info
;
4077 switch (info
->cmd_type
) {
4079 its_vpe_schedule(vpe
);
4082 case DESCHEDULE_VPE
:
4083 its_vpe_deschedule(vpe
);
4087 its_wait_vpt_parse_complete();
4091 its_vpe_invall(vpe
);
4099 static void its_vpe_send_cmd(struct its_vpe
*vpe
,
4100 void (*cmd
)(struct its_device
*, u32
))
4102 unsigned long flags
;
4104 raw_spin_lock_irqsave(&vpe_proxy
.lock
, flags
);
4106 its_vpe_db_proxy_map_locked(vpe
);
4107 cmd(vpe_proxy
.dev
, vpe
->vpe_proxy_event
);
4109 raw_spin_unlock_irqrestore(&vpe_proxy
.lock
, flags
);
4112 static void its_vpe_send_inv(struct irq_data
*d
)
4114 struct its_vpe
*vpe
= irq_data_get_irq_chip_data(d
);
4116 if (gic_rdists
->has_direct_lpi
)
4117 __direct_lpi_inv(d
, d
->parent_data
->hwirq
);
4119 its_vpe_send_cmd(vpe
, its_send_inv
);
4122 static void its_vpe_mask_irq(struct irq_data
*d
)
4125 * We need to unmask the LPI, which is described by the parent
4126 * irq_data. Instead of calling into the parent (which won't
4127 * exactly do the right thing, let's simply use the
4128 * parent_data pointer. Yes, I'm naughty.
4130 lpi_write_config(d
->parent_data
, LPI_PROP_ENABLED
, 0);
4131 its_vpe_send_inv(d
);
4134 static void its_vpe_unmask_irq(struct irq_data
*d
)
4136 /* Same hack as above... */
4137 lpi_write_config(d
->parent_data
, 0, LPI_PROP_ENABLED
);
4138 its_vpe_send_inv(d
);
4141 static int its_vpe_set_irqchip_state(struct irq_data
*d
,
4142 enum irqchip_irq_state which
,
4145 struct its_vpe
*vpe
= irq_data_get_irq_chip_data(d
);
4147 if (which
!= IRQCHIP_STATE_PENDING
)
4150 if (gic_rdists
->has_direct_lpi
) {
4151 void __iomem
*rdbase
;
4153 rdbase
= per_cpu_ptr(gic_rdists
->rdist
, vpe
->col_idx
)->rd_base
;
4155 gic_write_lpir(vpe
->vpe_db_lpi
, rdbase
+ GICR_SETLPIR
);
4157 gic_write_lpir(vpe
->vpe_db_lpi
, rdbase
+ GICR_CLRLPIR
);
4158 wait_for_syncr(rdbase
);
4162 its_vpe_send_cmd(vpe
, its_send_int
);
4164 its_vpe_send_cmd(vpe
, its_send_clear
);
4170 static int its_vpe_retrigger(struct irq_data
*d
)
4172 return !its_vpe_set_irqchip_state(d
, IRQCHIP_STATE_PENDING
, true);
4175 static struct irq_chip its_vpe_irq_chip
= {
4176 .name
= "GICv4-vpe",
4177 .irq_mask
= its_vpe_mask_irq
,
4178 .irq_unmask
= its_vpe_unmask_irq
,
4179 .irq_eoi
= irq_chip_eoi_parent
,
4180 .irq_set_affinity
= its_vpe_set_affinity
,
4181 .irq_retrigger
= its_vpe_retrigger
,
4182 .irq_set_irqchip_state
= its_vpe_set_irqchip_state
,
4183 .irq_set_vcpu_affinity
= its_vpe_set_vcpu_affinity
,
4186 static struct its_node
*find_4_1_its(void)
4188 static struct its_node
*its
= NULL
;
4191 list_for_each_entry(its
, &its_nodes
, entry
) {
4203 static void its_vpe_4_1_send_inv(struct irq_data
*d
)
4205 struct its_vpe
*vpe
= irq_data_get_irq_chip_data(d
);
4206 struct its_node
*its
;
4209 * GICv4.1 wants doorbells to be invalidated using the
4210 * INVDB command in order to be broadcast to all RDs. Send
4211 * it to the first valid ITS, and let the HW do its magic.
4213 its
= find_4_1_its();
4215 its_send_invdb(its
, vpe
);
4218 static void its_vpe_4_1_mask_irq(struct irq_data
*d
)
4220 lpi_write_config(d
->parent_data
, LPI_PROP_ENABLED
, 0);
4221 its_vpe_4_1_send_inv(d
);
4224 static void its_vpe_4_1_unmask_irq(struct irq_data
*d
)
4226 lpi_write_config(d
->parent_data
, 0, LPI_PROP_ENABLED
);
4227 its_vpe_4_1_send_inv(d
);
4230 static void its_vpe_4_1_schedule(struct its_vpe
*vpe
,
4231 struct its_cmd_info
*info
)
4233 void __iomem
*vlpi_base
= gic_data_rdist_vlpi_base();
4236 /* Schedule the VPE */
4237 val
|= GICR_VPENDBASER_Valid
;
4238 val
|= info
->g0en
? GICR_VPENDBASER_4_1_VGRP0EN
: 0;
4239 val
|= info
->g1en
? GICR_VPENDBASER_4_1_VGRP1EN
: 0;
4240 val
|= FIELD_PREP(GICR_VPENDBASER_4_1_VPEID
, vpe
->vpe_id
);
4242 gicr_write_vpendbaser(val
, vlpi_base
+ GICR_VPENDBASER
);
4245 static void its_vpe_4_1_deschedule(struct its_vpe
*vpe
,
4246 struct its_cmd_info
*info
)
4248 void __iomem
*vlpi_base
= gic_data_rdist_vlpi_base();
4252 unsigned long flags
;
4255 * vPE is going to block: make the vPE non-resident with
4256 * PendingLast clear and DB set. The GIC guarantees that if
4257 * we read-back PendingLast clear, then a doorbell will be
4258 * delivered when an interrupt comes.
4260 * Note the locking to deal with the concurrent update of
4261 * pending_last from the doorbell interrupt handler that can
4264 raw_spin_lock_irqsave(&vpe
->vpe_lock
, flags
);
4265 val
= its_clear_vpend_valid(vlpi_base
,
4266 GICR_VPENDBASER_PendingLast
,
4267 GICR_VPENDBASER_4_1_DB
);
4268 vpe
->pending_last
= !!(val
& GICR_VPENDBASER_PendingLast
);
4269 raw_spin_unlock_irqrestore(&vpe
->vpe_lock
, flags
);
4272 * We're not blocking, so just make the vPE non-resident
4273 * with PendingLast set, indicating that we'll be back.
4275 val
= its_clear_vpend_valid(vlpi_base
,
4277 GICR_VPENDBASER_PendingLast
);
4278 vpe
->pending_last
= true;
4282 static void its_vpe_4_1_invall(struct its_vpe
*vpe
)
4284 unsigned long flags
;
4287 /* Target the redistributor this vPE is currently known on */
4288 cpu
= vpe_to_cpuid_lock(vpe
, &flags
);
4289 its_vpe_4_1_invall_locked(cpu
, vpe
);
4290 vpe_to_cpuid_unlock(vpe
, flags
);
4293 static int its_vpe_4_1_set_vcpu_affinity(struct irq_data
*d
, void *vcpu_info
)
4295 struct its_vpe
*vpe
= irq_data_get_irq_chip_data(d
);
4296 struct its_cmd_info
*info
= vcpu_info
;
4298 switch (info
->cmd_type
) {
4300 its_vpe_4_1_schedule(vpe
, info
);
4303 case DESCHEDULE_VPE
:
4304 its_vpe_4_1_deschedule(vpe
, info
);
4308 its_wait_vpt_parse_complete();
4312 its_vpe_4_1_invall(vpe
);
4320 static struct irq_chip its_vpe_4_1_irq_chip
= {
4321 .name
= "GICv4.1-vpe",
4322 .irq_mask
= its_vpe_4_1_mask_irq
,
4323 .irq_unmask
= its_vpe_4_1_unmask_irq
,
4324 .irq_eoi
= irq_chip_eoi_parent
,
4325 .irq_set_affinity
= its_vpe_set_affinity
,
4326 .irq_set_vcpu_affinity
= its_vpe_4_1_set_vcpu_affinity
,
4329 static void its_configure_sgi(struct irq_data
*d
, bool clear
)
4331 struct its_vpe
*vpe
= irq_data_get_irq_chip_data(d
);
4332 struct its_cmd_desc desc
;
4334 desc
.its_vsgi_cmd
.vpe
= vpe
;
4335 desc
.its_vsgi_cmd
.sgi
= d
->hwirq
;
4336 desc
.its_vsgi_cmd
.priority
= vpe
->sgi_config
[d
->hwirq
].priority
;
4337 desc
.its_vsgi_cmd
.enable
= vpe
->sgi_config
[d
->hwirq
].enabled
;
4338 desc
.its_vsgi_cmd
.group
= vpe
->sgi_config
[d
->hwirq
].group
;
4339 desc
.its_vsgi_cmd
.clear
= clear
;
4342 * GICv4.1 allows us to send VSGI commands to any ITS as long as the
4343 * destination VPE is mapped there. Since we map them eagerly at
4344 * activation time, we're pretty sure the first GICv4.1 ITS will do.
4346 its_send_single_vcommand(find_4_1_its(), its_build_vsgi_cmd
, &desc
);
4349 static void its_sgi_mask_irq(struct irq_data
*d
)
4351 struct its_vpe
*vpe
= irq_data_get_irq_chip_data(d
);
4353 vpe
->sgi_config
[d
->hwirq
].enabled
= false;
4354 its_configure_sgi(d
, false);
4357 static void its_sgi_unmask_irq(struct irq_data
*d
)
4359 struct its_vpe
*vpe
= irq_data_get_irq_chip_data(d
);
4361 vpe
->sgi_config
[d
->hwirq
].enabled
= true;
4362 its_configure_sgi(d
, false);
4365 static int its_sgi_set_affinity(struct irq_data
*d
,
4366 const struct cpumask
*mask_val
,
4370 * There is no notion of affinity for virtual SGIs, at least
4371 * not on the host (since they can only be targeting a vPE).
4372 * Tell the kernel we've done whatever it asked for.
4374 irq_data_update_effective_affinity(d
, mask_val
);
4375 return IRQ_SET_MASK_OK
;
4378 static int its_sgi_set_irqchip_state(struct irq_data
*d
,
4379 enum irqchip_irq_state which
,
4382 if (which
!= IRQCHIP_STATE_PENDING
)
4386 struct its_vpe
*vpe
= irq_data_get_irq_chip_data(d
);
4387 struct its_node
*its
= find_4_1_its();
4390 val
= FIELD_PREP(GITS_SGIR_VPEID
, vpe
->vpe_id
);
4391 val
|= FIELD_PREP(GITS_SGIR_VINTID
, d
->hwirq
);
4392 writeq_relaxed(val
, its
->sgir_base
+ GITS_SGIR
- SZ_128K
);
4394 its_configure_sgi(d
, true);
4400 static int its_sgi_get_irqchip_state(struct irq_data
*d
,
4401 enum irqchip_irq_state which
, bool *val
)
4403 struct its_vpe
*vpe
= irq_data_get_irq_chip_data(d
);
4405 unsigned long flags
;
4406 u32 count
= 1000000; /* 1s! */
4410 if (which
!= IRQCHIP_STATE_PENDING
)
4414 * Locking galore! We can race against two different events:
4416 * - Concurrent vPE affinity change: we must make sure it cannot
4417 * happen, or we'll talk to the wrong redistributor. This is
4418 * identical to what happens with vLPIs.
4420 * - Concurrent VSGIPENDR access: As it involves accessing two
4421 * MMIO registers, this must be made atomic one way or another.
4423 cpu
= vpe_to_cpuid_lock(vpe
, &flags
);
4424 raw_spin_lock(&gic_data_rdist_cpu(cpu
)->rd_lock
);
4425 base
= gic_data_rdist_cpu(cpu
)->rd_base
+ SZ_128K
;
4426 writel_relaxed(vpe
->vpe_id
, base
+ GICR_VSGIR
);
4428 status
= readl_relaxed(base
+ GICR_VSGIPENDR
);
4429 if (!(status
& GICR_VSGIPENDR_BUSY
))
4434 pr_err_ratelimited("Unable to get SGI status\n");
4442 raw_spin_unlock(&gic_data_rdist_cpu(cpu
)->rd_lock
);
4443 vpe_to_cpuid_unlock(vpe
, flags
);
4448 *val
= !!(status
& (1 << d
->hwirq
));
4453 static int its_sgi_set_vcpu_affinity(struct irq_data
*d
, void *vcpu_info
)
4455 struct its_vpe
*vpe
= irq_data_get_irq_chip_data(d
);
4456 struct its_cmd_info
*info
= vcpu_info
;
4458 switch (info
->cmd_type
) {
4459 case PROP_UPDATE_VSGI
:
4460 vpe
->sgi_config
[d
->hwirq
].priority
= info
->priority
;
4461 vpe
->sgi_config
[d
->hwirq
].group
= info
->group
;
4462 its_configure_sgi(d
, false);
4470 static struct irq_chip its_sgi_irq_chip
= {
4471 .name
= "GICv4.1-sgi",
4472 .irq_mask
= its_sgi_mask_irq
,
4473 .irq_unmask
= its_sgi_unmask_irq
,
4474 .irq_set_affinity
= its_sgi_set_affinity
,
4475 .irq_set_irqchip_state
= its_sgi_set_irqchip_state
,
4476 .irq_get_irqchip_state
= its_sgi_get_irqchip_state
,
4477 .irq_set_vcpu_affinity
= its_sgi_set_vcpu_affinity
,
4480 static int its_sgi_irq_domain_alloc(struct irq_domain
*domain
,
4481 unsigned int virq
, unsigned int nr_irqs
,
4484 struct its_vpe
*vpe
= args
;
4487 /* Yes, we do want 16 SGIs */
4488 WARN_ON(nr_irqs
!= 16);
4490 for (i
= 0; i
< 16; i
++) {
4491 vpe
->sgi_config
[i
].priority
= 0;
4492 vpe
->sgi_config
[i
].enabled
= false;
4493 vpe
->sgi_config
[i
].group
= false;
4495 irq_domain_set_hwirq_and_chip(domain
, virq
+ i
, i
,
4496 &its_sgi_irq_chip
, vpe
);
4497 irq_set_status_flags(virq
+ i
, IRQ_DISABLE_UNLAZY
);
4503 static void its_sgi_irq_domain_free(struct irq_domain
*domain
,
4505 unsigned int nr_irqs
)
4510 static int its_sgi_irq_domain_activate(struct irq_domain
*domain
,
4511 struct irq_data
*d
, bool reserve
)
4513 /* Write out the initial SGI configuration */
4514 its_configure_sgi(d
, false);
4518 static void its_sgi_irq_domain_deactivate(struct irq_domain
*domain
,
4521 struct its_vpe
*vpe
= irq_data_get_irq_chip_data(d
);
4524 * The VSGI command is awkward:
4526 * - To change the configuration, CLEAR must be set to false,
4527 * leaving the pending bit unchanged.
4528 * - To clear the pending bit, CLEAR must be set to true, leaving
4529 * the configuration unchanged.
4531 * You just can't do both at once, hence the two commands below.
4533 vpe
->sgi_config
[d
->hwirq
].enabled
= false;
4534 its_configure_sgi(d
, false);
4535 its_configure_sgi(d
, true);
4538 static const struct irq_domain_ops its_sgi_domain_ops
= {
4539 .alloc
= its_sgi_irq_domain_alloc
,
4540 .free
= its_sgi_irq_domain_free
,
4541 .activate
= its_sgi_irq_domain_activate
,
4542 .deactivate
= its_sgi_irq_domain_deactivate
,
4545 static int its_vpe_id_alloc(void)
4547 return ida_alloc_max(&its_vpeid_ida
, ITS_MAX_VPEID
- 1, GFP_KERNEL
);
4550 static void its_vpe_id_free(u16 id
)
4552 ida_free(&its_vpeid_ida
, id
);
4555 static int its_vpe_init(struct its_vpe
*vpe
)
4557 struct page
*vpt_page
;
4560 /* Allocate vpe_id */
4561 vpe_id
= its_vpe_id_alloc();
4566 vpt_page
= its_allocate_pending_table(GFP_KERNEL
);
4568 its_vpe_id_free(vpe_id
);
4572 if (!its_alloc_vpe_table(vpe_id
)) {
4573 its_vpe_id_free(vpe_id
);
4574 its_free_pending_table(vpt_page
);
4578 raw_spin_lock_init(&vpe
->vpe_lock
);
4579 vpe
->vpe_id
= vpe_id
;
4580 vpe
->vpt_page
= vpt_page
;
4581 atomic_set(&vpe
->vmapp_count
, 0);
4582 if (!gic_rdists
->has_rvpeid
)
4583 vpe
->vpe_proxy_event
= -1;
4588 static void its_vpe_teardown(struct its_vpe
*vpe
)
4590 its_vpe_db_proxy_unmap(vpe
);
4591 its_vpe_id_free(vpe
->vpe_id
);
4592 its_free_pending_table(vpe
->vpt_page
);
4595 static void its_vpe_irq_domain_free(struct irq_domain
*domain
,
4597 unsigned int nr_irqs
)
4599 struct its_vm
*vm
= domain
->host_data
;
4602 irq_domain_free_irqs_parent(domain
, virq
, nr_irqs
);
4604 for (i
= 0; i
< nr_irqs
; i
++) {
4605 struct irq_data
*data
= irq_domain_get_irq_data(domain
,
4607 struct its_vpe
*vpe
= irq_data_get_irq_chip_data(data
);
4609 BUG_ON(vm
!= vpe
->its_vm
);
4611 clear_bit(data
->hwirq
, vm
->db_bitmap
);
4612 its_vpe_teardown(vpe
);
4613 irq_domain_reset_irq_data(data
);
4616 if (bitmap_empty(vm
->db_bitmap
, vm
->nr_db_lpis
)) {
4617 its_lpi_free(vm
->db_bitmap
, vm
->db_lpi_base
, vm
->nr_db_lpis
);
4618 its_free_prop_table(vm
->vprop_page
);
4622 static int its_vpe_irq_domain_alloc(struct irq_domain
*domain
, unsigned int virq
,
4623 unsigned int nr_irqs
, void *args
)
4625 struct irq_chip
*irqchip
= &its_vpe_irq_chip
;
4626 struct its_vm
*vm
= args
;
4627 unsigned long *bitmap
;
4628 struct page
*vprop_page
;
4629 int base
, nr_ids
, i
, err
= 0;
4631 bitmap
= its_lpi_alloc(roundup_pow_of_two(nr_irqs
), &base
, &nr_ids
);
4635 if (nr_ids
< nr_irqs
) {
4636 its_lpi_free(bitmap
, base
, nr_ids
);
4640 vprop_page
= its_allocate_prop_table(GFP_KERNEL
);
4642 its_lpi_free(bitmap
, base
, nr_ids
);
4646 vm
->db_bitmap
= bitmap
;
4647 vm
->db_lpi_base
= base
;
4648 vm
->nr_db_lpis
= nr_ids
;
4649 vm
->vprop_page
= vprop_page
;
4650 raw_spin_lock_init(&vm
->vmapp_lock
);
4652 if (gic_rdists
->has_rvpeid
)
4653 irqchip
= &its_vpe_4_1_irq_chip
;
4655 for (i
= 0; i
< nr_irqs
; i
++) {
4656 vm
->vpes
[i
]->vpe_db_lpi
= base
+ i
;
4657 err
= its_vpe_init(vm
->vpes
[i
]);
4660 err
= its_irq_gic_domain_alloc(domain
, virq
+ i
,
4661 vm
->vpes
[i
]->vpe_db_lpi
);
4664 irq_domain_set_hwirq_and_chip(domain
, virq
+ i
, i
,
4665 irqchip
, vm
->vpes
[i
]);
4667 irqd_set_resend_when_in_progress(irq_get_irq_data(virq
+ i
));
4671 its_vpe_irq_domain_free(domain
, virq
, i
);
4676 static int its_vpe_irq_domain_activate(struct irq_domain
*domain
,
4677 struct irq_data
*d
, bool reserve
)
4679 struct its_vpe
*vpe
= irq_data_get_irq_chip_data(d
);
4680 struct its_node
*its
;
4682 /* Map the VPE to the first possible CPU */
4683 vpe
->col_idx
= cpumask_first(cpu_online_mask
);
4684 irq_data_update_effective_affinity(d
, cpumask_of(vpe
->col_idx
));
4687 * If we use the list map, we issue VMAPP on demand... Unless
4688 * we're on a GICv4.1 and we eagerly map the VPE on all ITSs
4689 * so that VSGIs can work.
4691 if (!gic_requires_eager_mapping())
4694 list_for_each_entry(its
, &its_nodes
, entry
) {
4698 its_send_vmapp(its
, vpe
, true);
4699 its_send_vinvall(its
, vpe
);
4705 static void its_vpe_irq_domain_deactivate(struct irq_domain
*domain
,
4708 struct its_vpe
*vpe
= irq_data_get_irq_chip_data(d
);
4709 struct its_node
*its
;
4712 * If we use the list map on GICv4.0, we unmap the VPE once no
4713 * VLPIs are associated with the VM.
4715 if (!gic_requires_eager_mapping())
4718 list_for_each_entry(its
, &its_nodes
, entry
) {
4722 its_send_vmapp(its
, vpe
, false);
4726 * There may be a direct read to the VPT after unmapping the
4727 * vPE, to guarantee the validity of this, we make the VPT
4728 * memory coherent with the CPU caches here.
4730 if (find_4_1_its() && !atomic_read(&vpe
->vmapp_count
))
4731 gic_flush_dcache_to_poc(page_address(vpe
->vpt_page
),
4735 static const struct irq_domain_ops its_vpe_domain_ops
= {
4736 .alloc
= its_vpe_irq_domain_alloc
,
4737 .free
= its_vpe_irq_domain_free
,
4738 .activate
= its_vpe_irq_domain_activate
,
4739 .deactivate
= its_vpe_irq_domain_deactivate
,
4742 static int its_force_quiescent(void __iomem
*base
)
4744 u32 count
= 1000000; /* 1s */
4747 val
= readl_relaxed(base
+ GITS_CTLR
);
4749 * GIC architecture specification requires the ITS to be both
4750 * disabled and quiescent for writes to GITS_BASER<n> or
4751 * GITS_CBASER to not have UNPREDICTABLE results.
4753 if ((val
& GITS_CTLR_QUIESCENT
) && !(val
& GITS_CTLR_ENABLE
))
4756 /* Disable the generation of all interrupts to this ITS */
4757 val
&= ~(GITS_CTLR_ENABLE
| GITS_CTLR_ImDe
);
4758 writel_relaxed(val
, base
+ GITS_CTLR
);
4760 /* Poll GITS_CTLR and wait until ITS becomes quiescent */
4762 val
= readl_relaxed(base
+ GITS_CTLR
);
4763 if (val
& GITS_CTLR_QUIESCENT
)
4775 static bool __maybe_unused
its_enable_quirk_cavium_22375(void *data
)
4777 struct its_node
*its
= data
;
4779 /* erratum 22375: only alloc 8MB table size (20 bits) */
4780 its
->typer
&= ~GITS_TYPER_DEVBITS
;
4781 its
->typer
|= FIELD_PREP(GITS_TYPER_DEVBITS
, 20 - 1);
4782 its
->flags
|= ITS_FLAGS_WORKAROUND_CAVIUM_22375
;
4787 static bool __maybe_unused
its_enable_quirk_cavium_23144(void *data
)
4789 struct its_node
*its
= data
;
4791 its
->flags
|= ITS_FLAGS_WORKAROUND_CAVIUM_23144
;
4796 static bool __maybe_unused
its_enable_quirk_qdf2400_e0065(void *data
)
4798 struct its_node
*its
= data
;
4800 /* On QDF2400, the size of the ITE is 16Bytes */
4801 its
->typer
&= ~GITS_TYPER_ITT_ENTRY_SIZE
;
4802 its
->typer
|= FIELD_PREP(GITS_TYPER_ITT_ENTRY_SIZE
, 16 - 1);
4807 static u64
its_irq_get_msi_base_pre_its(struct its_device
*its_dev
)
4809 struct its_node
*its
= its_dev
->its
;
4812 * The Socionext Synquacer SoC has a so-called 'pre-ITS',
4813 * which maps 32-bit writes targeted at a separate window of
4814 * size '4 << device_id_bits' onto writes to GITS_TRANSLATER
4815 * with device ID taken from bits [device_id_bits + 1:2] of
4816 * the window offset.
4818 return its
->pre_its_base
+ (its_dev
->device_id
<< 2);
4821 static bool __maybe_unused
its_enable_quirk_socionext_synquacer(void *data
)
4823 struct its_node
*its
= data
;
4824 u32 pre_its_window
[2];
4827 if (!fwnode_property_read_u32_array(its
->fwnode_handle
,
4828 "socionext,synquacer-pre-its",
4830 ARRAY_SIZE(pre_its_window
))) {
4832 its
->pre_its_base
= pre_its_window
[0];
4833 its
->get_msi_base
= its_irq_get_msi_base_pre_its
;
4835 ids
= ilog2(pre_its_window
[1]) - 2;
4836 if (device_ids(its
) > ids
) {
4837 its
->typer
&= ~GITS_TYPER_DEVBITS
;
4838 its
->typer
|= FIELD_PREP(GITS_TYPER_DEVBITS
, ids
- 1);
4841 /* the pre-ITS breaks isolation, so disable MSI remapping */
4842 its
->msi_domain_flags
&= ~IRQ_DOMAIN_FLAG_ISOLATED_MSI
;
4848 static bool __maybe_unused
its_enable_quirk_hip07_161600802(void *data
)
4850 struct its_node
*its
= data
;
4853 * Hip07 insists on using the wrong address for the VLPI
4854 * page. Trick it into doing the right thing...
4856 its
->vlpi_redist_offset
= SZ_128K
;
4860 static bool __maybe_unused
its_enable_rk3588001(void *data
)
4862 struct its_node
*its
= data
;
4864 if (!of_machine_is_compatible("rockchip,rk3588") &&
4865 !of_machine_is_compatible("rockchip,rk3588s"))
4868 its
->flags
|= ITS_FLAGS_FORCE_NON_SHAREABLE
;
4869 gic_rdists
->flags
|= RDIST_FLAGS_FORCE_NON_SHAREABLE
;
4874 static bool its_set_non_coherent(void *data
)
4876 struct its_node
*its
= data
;
4878 its
->flags
|= ITS_FLAGS_FORCE_NON_SHAREABLE
;
4882 static bool __maybe_unused
its_enable_quirk_hip09_162100801(void *data
)
4884 struct its_node
*its
= data
;
4886 its
->flags
|= ITS_FLAGS_WORKAROUND_HISILICON_162100801
;
4890 static const struct gic_quirk its_quirks
[] = {
4891 #ifdef CONFIG_CAVIUM_ERRATUM_22375
4893 .desc
= "ITS: Cavium errata 22375, 24313",
4894 .iidr
= 0xa100034c, /* ThunderX pass 1.x */
4896 .init
= its_enable_quirk_cavium_22375
,
4899 #ifdef CONFIG_CAVIUM_ERRATUM_23144
4901 .desc
= "ITS: Cavium erratum 23144",
4902 .iidr
= 0xa100034c, /* ThunderX pass 1.x */
4904 .init
= its_enable_quirk_cavium_23144
,
4907 #ifdef CONFIG_QCOM_QDF2400_ERRATUM_0065
4909 .desc
= "ITS: QDF2400 erratum 0065",
4910 .iidr
= 0x00001070, /* QDF2400 ITS rev 1.x */
4912 .init
= its_enable_quirk_qdf2400_e0065
,
4915 #ifdef CONFIG_SOCIONEXT_SYNQUACER_PREITS
4918 * The Socionext Synquacer SoC incorporates ARM's own GIC-500
4919 * implementation, but with a 'pre-ITS' added that requires
4920 * special handling in software.
4922 .desc
= "ITS: Socionext Synquacer pre-ITS",
4925 .init
= its_enable_quirk_socionext_synquacer
,
4928 #ifdef CONFIG_HISILICON_ERRATUM_161600802
4930 .desc
= "ITS: Hip07 erratum 161600802",
4933 .init
= its_enable_quirk_hip07_161600802
,
4936 #ifdef CONFIG_HISILICON_ERRATUM_162100801
4938 .desc
= "ITS: Hip09 erratum 162100801",
4941 .init
= its_enable_quirk_hip09_162100801
,
4944 #ifdef CONFIG_ROCKCHIP_ERRATUM_3588001
4946 .desc
= "ITS: Rockchip erratum RK3588001",
4949 .init
= its_enable_rk3588001
,
4953 .desc
= "ITS: non-coherent attribute",
4954 .property
= "dma-noncoherent",
4955 .init
= its_set_non_coherent
,
4961 static void its_enable_quirks(struct its_node
*its
)
4963 u32 iidr
= readl_relaxed(its
->base
+ GITS_IIDR
);
4965 gic_enable_quirks(iidr
, its_quirks
, its
);
4967 if (is_of_node(its
->fwnode_handle
))
4968 gic_enable_of_quirks(to_of_node(its
->fwnode_handle
),
4972 static int its_save_disable(void)
4974 struct its_node
*its
;
4977 raw_spin_lock(&its_lock
);
4978 list_for_each_entry(its
, &its_nodes
, entry
) {
4982 its
->ctlr_save
= readl_relaxed(base
+ GITS_CTLR
);
4983 err
= its_force_quiescent(base
);
4985 pr_err("ITS@%pa: failed to quiesce: %d\n",
4986 &its
->phys_base
, err
);
4987 writel_relaxed(its
->ctlr_save
, base
+ GITS_CTLR
);
4991 its
->cbaser_save
= gits_read_cbaser(base
+ GITS_CBASER
);
4996 list_for_each_entry_continue_reverse(its
, &its_nodes
, entry
) {
5000 writel_relaxed(its
->ctlr_save
, base
+ GITS_CTLR
);
5003 raw_spin_unlock(&its_lock
);
5008 static void its_restore_enable(void)
5010 struct its_node
*its
;
5013 raw_spin_lock(&its_lock
);
5014 list_for_each_entry(its
, &its_nodes
, entry
) {
5021 * Make sure that the ITS is disabled. If it fails to quiesce,
5022 * don't restore it since writing to CBASER or BASER<n>
5023 * registers is undefined according to the GIC v3 ITS
5026 * Firmware resuming with the ITS enabled is terminally broken.
5028 WARN_ON(readl_relaxed(base
+ GITS_CTLR
) & GITS_CTLR_ENABLE
);
5029 ret
= its_force_quiescent(base
);
5031 pr_err("ITS@%pa: failed to quiesce on resume: %d\n",
5032 &its
->phys_base
, ret
);
5036 gits_write_cbaser(its
->cbaser_save
, base
+ GITS_CBASER
);
5039 * Writing CBASER resets CREADR to 0, so make CWRITER and
5040 * cmd_write line up with it.
5042 its
->cmd_write
= its
->cmd_base
;
5043 gits_write_cwriter(0, base
+ GITS_CWRITER
);
5045 /* Restore GITS_BASER from the value cache. */
5046 for (i
= 0; i
< GITS_BASER_NR_REGS
; i
++) {
5047 struct its_baser
*baser
= &its
->tables
[i
];
5049 if (!(baser
->val
& GITS_BASER_VALID
))
5052 its_write_baser(its
, baser
, baser
->val
);
5054 writel_relaxed(its
->ctlr_save
, base
+ GITS_CTLR
);
5057 * Reinit the collection if it's stored in the ITS. This is
5058 * indicated by the col_id being less than the HCC field.
5059 * CID < HCC as specified in the GIC v3 Documentation.
5061 if (its
->collections
[smp_processor_id()].col_id
<
5062 GITS_TYPER_HCC(gic_read_typer(base
+ GITS_TYPER
)))
5063 its_cpu_init_collection(its
);
5065 raw_spin_unlock(&its_lock
);
5068 static struct syscore_ops its_syscore_ops
= {
5069 .suspend
= its_save_disable
,
5070 .resume
= its_restore_enable
,
5073 static void __init __iomem
*its_map_one(struct resource
*res
, int *err
)
5075 void __iomem
*its_base
;
5078 its_base
= ioremap(res
->start
, SZ_64K
);
5080 pr_warn("ITS@%pa: Unable to map ITS registers\n", &res
->start
);
5085 val
= readl_relaxed(its_base
+ GITS_PIDR2
) & GIC_PIDR2_ARCH_MASK
;
5086 if (val
!= 0x30 && val
!= 0x40) {
5087 pr_warn("ITS@%pa: No ITS detected, giving up\n", &res
->start
);
5092 *err
= its_force_quiescent(its_base
);
5094 pr_warn("ITS@%pa: Failed to quiesce, giving up\n", &res
->start
);
5105 static int its_init_domain(struct its_node
*its
)
5107 struct irq_domain
*inner_domain
;
5108 struct msi_domain_info
*info
;
5110 info
= kzalloc(sizeof(*info
), GFP_KERNEL
);
5114 info
->ops
= &its_msi_domain_ops
;
5117 inner_domain
= irq_domain_create_hierarchy(its_parent
,
5118 its
->msi_domain_flags
, 0,
5119 its
->fwnode_handle
, &its_domain_ops
,
5121 if (!inner_domain
) {
5126 irq_domain_update_bus_token(inner_domain
, DOMAIN_BUS_NEXUS
);
5128 inner_domain
->msi_parent_ops
= &gic_v3_its_msi_parent_ops
;
5129 inner_domain
->flags
|= IRQ_DOMAIN_FLAG_MSI_PARENT
;
5134 static int its_init_vpe_domain(void)
5136 struct its_node
*its
;
5140 if (gic_rdists
->has_direct_lpi
) {
5141 pr_info("ITS: Using DirectLPI for VPE invalidation\n");
5145 /* Any ITS will do, even if not v4 */
5146 its
= list_first_entry(&its_nodes
, struct its_node
, entry
);
5148 entries
= roundup_pow_of_two(nr_cpu_ids
);
5149 vpe_proxy
.vpes
= kcalloc(entries
, sizeof(*vpe_proxy
.vpes
),
5151 if (!vpe_proxy
.vpes
)
5154 /* Use the last possible DevID */
5155 devid
= GENMASK(device_ids(its
) - 1, 0);
5156 vpe_proxy
.dev
= its_create_device(its
, devid
, entries
, false);
5157 if (!vpe_proxy
.dev
) {
5158 kfree(vpe_proxy
.vpes
);
5159 pr_err("ITS: Can't allocate GICv4 proxy device\n");
5163 BUG_ON(entries
> vpe_proxy
.dev
->nr_ites
);
5165 raw_spin_lock_init(&vpe_proxy
.lock
);
5166 vpe_proxy
.next_victim
= 0;
5167 pr_info("ITS: Allocated DevID %x as GICv4 proxy device (%d slots)\n",
5168 devid
, vpe_proxy
.dev
->nr_ites
);
5173 static int __init
its_compute_its_list_map(struct its_node
*its
)
5179 * This is assumed to be done early enough that we're
5180 * guaranteed to be single-threaded, hence no
5181 * locking. Should this change, we should address
5184 its_number
= find_first_zero_bit(&its_list_map
, GICv4_ITS_LIST_MAX
);
5185 if (its_number
>= GICv4_ITS_LIST_MAX
) {
5186 pr_err("ITS@%pa: No ITSList entry available!\n",
5191 ctlr
= readl_relaxed(its
->base
+ GITS_CTLR
);
5192 ctlr
&= ~GITS_CTLR_ITS_NUMBER
;
5193 ctlr
|= its_number
<< GITS_CTLR_ITS_NUMBER_SHIFT
;
5194 writel_relaxed(ctlr
, its
->base
+ GITS_CTLR
);
5195 ctlr
= readl_relaxed(its
->base
+ GITS_CTLR
);
5196 if ((ctlr
& GITS_CTLR_ITS_NUMBER
) != (its_number
<< GITS_CTLR_ITS_NUMBER_SHIFT
)) {
5197 its_number
= ctlr
& GITS_CTLR_ITS_NUMBER
;
5198 its_number
>>= GITS_CTLR_ITS_NUMBER_SHIFT
;
5201 if (test_and_set_bit(its_number
, &its_list_map
)) {
5202 pr_err("ITS@%pa: Duplicate ITSList entry %d\n",
5203 &its
->phys_base
, its_number
);
5210 static int __init
its_probe_one(struct its_node
*its
)
5217 its_enable_quirks(its
);
5220 if (!(its
->typer
& GITS_TYPER_VMOVP
)) {
5221 err
= its_compute_its_list_map(its
);
5227 pr_info("ITS@%pa: Using ITS number %d\n",
5228 &its
->phys_base
, err
);
5230 pr_info("ITS@%pa: Single VMOVP capable\n", &its
->phys_base
);
5234 u32 svpet
= FIELD_GET(GITS_TYPER_SVPET
, its
->typer
);
5236 its
->sgir_base
= ioremap(its
->phys_base
+ SZ_128K
, SZ_64K
);
5237 if (!its
->sgir_base
) {
5242 its
->mpidr
= readl_relaxed(its
->base
+ GITS_MPIDR
);
5244 pr_info("ITS@%pa: Using GICv4.1 mode %08x %08x\n",
5245 &its
->phys_base
, its
->mpidr
, svpet
);
5249 page
= its_alloc_pages_node(its
->numa_node
,
5250 GFP_KERNEL
| __GFP_ZERO
,
5251 get_order(ITS_CMD_QUEUE_SZ
));
5254 goto out_unmap_sgir
;
5256 its
->cmd_base
= (void *)page_address(page
);
5257 its
->cmd_write
= its
->cmd_base
;
5259 err
= its_alloc_tables(its
);
5263 err
= its_alloc_collections(its
);
5265 goto out_free_tables
;
5267 baser
= (virt_to_phys(its
->cmd_base
) |
5268 GITS_CBASER_RaWaWb
|
5269 GITS_CBASER_InnerShareable
|
5270 (ITS_CMD_QUEUE_SZ
/ SZ_4K
- 1) |
5273 gits_write_cbaser(baser
, its
->base
+ GITS_CBASER
);
5274 tmp
= gits_read_cbaser(its
->base
+ GITS_CBASER
);
5276 if (its
->flags
& ITS_FLAGS_FORCE_NON_SHAREABLE
)
5277 tmp
&= ~GITS_CBASER_SHAREABILITY_MASK
;
5279 if ((tmp
^ baser
) & GITS_CBASER_SHAREABILITY_MASK
) {
5280 if (!(tmp
& GITS_CBASER_SHAREABILITY_MASK
)) {
5282 * The HW reports non-shareable, we must
5283 * remove the cacheability attributes as
5286 baser
&= ~(GITS_CBASER_SHAREABILITY_MASK
|
5287 GITS_CBASER_CACHEABILITY_MASK
);
5288 baser
|= GITS_CBASER_nC
;
5289 gits_write_cbaser(baser
, its
->base
+ GITS_CBASER
);
5291 pr_info("ITS: using cache flushing for cmd queue\n");
5292 its
->flags
|= ITS_FLAGS_CMDQ_NEEDS_FLUSHING
;
5295 gits_write_cwriter(0, its
->base
+ GITS_CWRITER
);
5296 ctlr
= readl_relaxed(its
->base
+ GITS_CTLR
);
5297 ctlr
|= GITS_CTLR_ENABLE
;
5299 ctlr
|= GITS_CTLR_ImDe
;
5300 writel_relaxed(ctlr
, its
->base
+ GITS_CTLR
);
5302 err
= its_init_domain(its
);
5304 goto out_free_tables
;
5306 raw_spin_lock(&its_lock
);
5307 list_add(&its
->entry
, &its_nodes
);
5308 raw_spin_unlock(&its_lock
);
5313 its_free_tables(its
);
5315 its_free_pages(its
->cmd_base
, get_order(ITS_CMD_QUEUE_SZ
));
5318 iounmap(its
->sgir_base
);
5320 pr_err("ITS@%pa: failed probing (%d)\n", &its
->phys_base
, err
);
5324 static bool gic_rdists_supports_plpis(void)
5326 return !!(gic_read_typer(gic_data_rdist_rd_base() + GICR_TYPER
) & GICR_TYPER_PLPIS
);
5329 static int redist_disable_lpis(void)
5331 void __iomem
*rbase
= gic_data_rdist_rd_base();
5332 u64 timeout
= USEC_PER_SEC
;
5335 if (!gic_rdists_supports_plpis()) {
5336 pr_info("CPU%d: LPIs not supported\n", smp_processor_id());
5340 val
= readl_relaxed(rbase
+ GICR_CTLR
);
5341 if (!(val
& GICR_CTLR_ENABLE_LPIS
))
5345 * If coming via a CPU hotplug event, we don't need to disable
5346 * LPIs before trying to re-enable them. They are already
5347 * configured and all is well in the world.
5349 * If running with preallocated tables, there is nothing to do.
5351 if ((gic_data_rdist()->flags
& RD_LOCAL_LPI_ENABLED
) ||
5352 (gic_rdists
->flags
& RDIST_FLAGS_RD_TABLES_PREALLOCATED
))
5356 * From that point on, we only try to do some damage control.
5358 pr_warn("GICv3: CPU%d: Booted with LPIs enabled, memory probably corrupted\n",
5359 smp_processor_id());
5360 add_taint(TAINT_CRAP
, LOCKDEP_STILL_OK
);
5363 val
&= ~GICR_CTLR_ENABLE_LPIS
;
5364 writel_relaxed(val
, rbase
+ GICR_CTLR
);
5366 /* Make sure any change to GICR_CTLR is observable by the GIC */
5370 * Software must observe RWP==0 after clearing GICR_CTLR.EnableLPIs
5371 * from 1 to 0 before programming GICR_PEND{PROP}BASER registers.
5372 * Error out if we time out waiting for RWP to clear.
5374 while (readl_relaxed(rbase
+ GICR_CTLR
) & GICR_CTLR_RWP
) {
5376 pr_err("CPU%d: Timeout while disabling LPIs\n",
5377 smp_processor_id());
5385 * After it has been written to 1, it is IMPLEMENTATION
5386 * DEFINED whether GICR_CTLR.EnableLPI becomes RES1 or can be
5387 * cleared to 0. Error out if clearing the bit failed.
5389 if (readl_relaxed(rbase
+ GICR_CTLR
) & GICR_CTLR_ENABLE_LPIS
) {
5390 pr_err("CPU%d: Failed to disable LPIs\n", smp_processor_id());
5397 int its_cpu_init(void)
5399 if (!list_empty(&its_nodes
)) {
5402 ret
= redist_disable_lpis();
5406 its_cpu_init_lpis();
5407 its_cpu_init_collections();
5413 static void rdist_memreserve_cpuhp_cleanup_workfn(struct work_struct
*work
)
5415 cpuhp_remove_state_nocalls(gic_rdists
->cpuhp_memreserve_state
);
5416 gic_rdists
->cpuhp_memreserve_state
= CPUHP_INVALID
;
5419 static DECLARE_WORK(rdist_memreserve_cpuhp_cleanup_work
,
5420 rdist_memreserve_cpuhp_cleanup_workfn
);
5422 static int its_cpu_memreserve_lpi(unsigned int cpu
)
5424 struct page
*pend_page
;
5427 /* This gets to run exactly once per CPU */
5428 if (gic_data_rdist()->flags
& RD_LOCAL_MEMRESERVE_DONE
)
5431 pend_page
= gic_data_rdist()->pend_page
;
5432 if (WARN_ON(!pend_page
)) {
5437 * If the pending table was pre-programmed, free the memory we
5438 * preemptively allocated. Otherwise, reserve that memory for
5441 if (gic_data_rdist()->flags
& RD_LOCAL_PENDTABLE_PREALLOCATED
) {
5442 its_free_pending_table(pend_page
);
5443 gic_data_rdist()->pend_page
= NULL
;
5445 phys_addr_t paddr
= page_to_phys(pend_page
);
5446 WARN_ON(gic_reserve_range(paddr
, LPI_PENDBASE_SZ
));
5450 /* Last CPU being brought up gets to issue the cleanup */
5451 if (!IS_ENABLED(CONFIG_SMP
) ||
5452 cpumask_equal(&cpus_booted_once_mask
, cpu_possible_mask
))
5453 schedule_work(&rdist_memreserve_cpuhp_cleanup_work
);
5455 gic_data_rdist()->flags
|= RD_LOCAL_MEMRESERVE_DONE
;
5459 /* Mark all the BASER registers as invalid before they get reprogrammed */
5460 static int __init
its_reset_one(struct resource
*res
)
5462 void __iomem
*its_base
;
5465 its_base
= its_map_one(res
, &err
);
5469 for (i
= 0; i
< GITS_BASER_NR_REGS
; i
++)
5470 gits_write_baser(0, its_base
+ GITS_BASER
+ (i
<< 3));
5476 static const struct of_device_id its_device_id
[] = {
5477 { .compatible
= "arm,gic-v3-its", },
5481 static struct its_node __init
*its_node_init(struct resource
*res
,
5482 struct fwnode_handle
*handle
, int numa_node
)
5484 void __iomem
*its_base
;
5485 struct its_node
*its
;
5488 its_base
= its_map_one(res
, &err
);
5492 pr_info("ITS %pR\n", res
);
5494 its
= kzalloc(sizeof(*its
), GFP_KERNEL
);
5498 raw_spin_lock_init(&its
->lock
);
5499 mutex_init(&its
->dev_alloc_lock
);
5500 INIT_LIST_HEAD(&its
->entry
);
5501 INIT_LIST_HEAD(&its
->its_device_list
);
5503 its
->typer
= gic_read_typer(its_base
+ GITS_TYPER
);
5504 its
->base
= its_base
;
5505 its
->phys_base
= res
->start
;
5506 its
->get_msi_base
= its_irq_get_msi_base
;
5507 its
->msi_domain_flags
= IRQ_DOMAIN_FLAG_ISOLATED_MSI
;
5509 its
->numa_node
= numa_node
;
5510 its
->fwnode_handle
= handle
;
5519 static void its_node_destroy(struct its_node
*its
)
5525 static int __init
its_of_probe(struct device_node
*node
)
5527 struct device_node
*np
;
5528 struct resource res
;
5532 * Make sure *all* the ITS are reset before we probe any, as
5533 * they may be sharing memory. If any of the ITS fails to
5534 * reset, don't even try to go any further, as this could
5535 * result in something even worse.
5537 for (np
= of_find_matching_node(node
, its_device_id
); np
;
5538 np
= of_find_matching_node(np
, its_device_id
)) {
5539 if (!of_device_is_available(np
) ||
5540 !of_property_read_bool(np
, "msi-controller") ||
5541 of_address_to_resource(np
, 0, &res
))
5544 err
= its_reset_one(&res
);
5549 for (np
= of_find_matching_node(node
, its_device_id
); np
;
5550 np
= of_find_matching_node(np
, its_device_id
)) {
5551 struct its_node
*its
;
5553 if (!of_device_is_available(np
))
5555 if (!of_property_read_bool(np
, "msi-controller")) {
5556 pr_warn("%pOF: no msi-controller property, ITS ignored\n",
5561 if (of_address_to_resource(np
, 0, &res
)) {
5562 pr_warn("%pOF: no regs?\n", np
);
5567 its
= its_node_init(&res
, &np
->fwnode
, of_node_to_nid(np
));
5571 err
= its_probe_one(its
);
5573 its_node_destroy(its
);
5582 #define ACPI_GICV3_ITS_MEM_SIZE (SZ_128K)
5584 #ifdef CONFIG_ACPI_NUMA
5585 struct its_srat_map
{
5592 static struct its_srat_map
*its_srat_maps __initdata
;
5593 static int its_in_srat __initdata
;
5595 static int __init
acpi_get_its_numa_node(u32 its_id
)
5599 for (i
= 0; i
< its_in_srat
; i
++) {
5600 if (its_id
== its_srat_maps
[i
].its_id
)
5601 return its_srat_maps
[i
].numa_node
;
5603 return NUMA_NO_NODE
;
5606 static int __init
gic_acpi_match_srat_its(union acpi_subtable_headers
*header
,
5607 const unsigned long end
)
5612 static int __init
gic_acpi_parse_srat_its(union acpi_subtable_headers
*header
,
5613 const unsigned long end
)
5616 struct acpi_srat_gic_its_affinity
*its_affinity
;
5618 its_affinity
= (struct acpi_srat_gic_its_affinity
*)header
;
5622 if (its_affinity
->header
.length
< sizeof(*its_affinity
)) {
5623 pr_err("SRAT: Invalid header length %d in ITS affinity\n",
5624 its_affinity
->header
.length
);
5629 * Note that in theory a new proximity node could be created by this
5630 * entry as it is an SRAT resource allocation structure.
5631 * We do not currently support doing so.
5633 node
= pxm_to_node(its_affinity
->proximity_domain
);
5635 if (node
== NUMA_NO_NODE
|| node
>= MAX_NUMNODES
) {
5636 pr_err("SRAT: Invalid NUMA node %d in ITS affinity\n", node
);
5640 its_srat_maps
[its_in_srat
].numa_node
= node
;
5641 its_srat_maps
[its_in_srat
].its_id
= its_affinity
->its_id
;
5643 pr_info("SRAT: PXM %d -> ITS %d -> Node %d\n",
5644 its_affinity
->proximity_domain
, its_affinity
->its_id
, node
);
5649 static void __init
acpi_table_parse_srat_its(void)
5653 count
= acpi_table_parse_entries(ACPI_SIG_SRAT
,
5654 sizeof(struct acpi_table_srat
),
5655 ACPI_SRAT_TYPE_GIC_ITS_AFFINITY
,
5656 gic_acpi_match_srat_its
, 0);
5660 its_srat_maps
= kmalloc_array(count
, sizeof(struct its_srat_map
),
5665 acpi_table_parse_entries(ACPI_SIG_SRAT
,
5666 sizeof(struct acpi_table_srat
),
5667 ACPI_SRAT_TYPE_GIC_ITS_AFFINITY
,
5668 gic_acpi_parse_srat_its
, 0);
5671 /* free the its_srat_maps after ITS probing */
5672 static void __init
acpi_its_srat_maps_free(void)
5674 kfree(its_srat_maps
);
5677 static void __init
acpi_table_parse_srat_its(void) { }
5678 static int __init
acpi_get_its_numa_node(u32 its_id
) { return NUMA_NO_NODE
; }
5679 static void __init
acpi_its_srat_maps_free(void) { }
5682 static int __init
gic_acpi_parse_madt_its(union acpi_subtable_headers
*header
,
5683 const unsigned long end
)
5685 struct acpi_madt_generic_translator
*its_entry
;
5686 struct fwnode_handle
*dom_handle
;
5687 struct its_node
*its
;
5688 struct resource res
;
5691 its_entry
= (struct acpi_madt_generic_translator
*)header
;
5692 memset(&res
, 0, sizeof(res
));
5693 res
.start
= its_entry
->base_address
;
5694 res
.end
= its_entry
->base_address
+ ACPI_GICV3_ITS_MEM_SIZE
- 1;
5695 res
.flags
= IORESOURCE_MEM
;
5697 dom_handle
= irq_domain_alloc_fwnode(&res
.start
);
5699 pr_err("ITS@%pa: Unable to allocate GICv3 ITS domain token\n",
5704 err
= iort_register_domain_token(its_entry
->translation_id
, res
.start
,
5707 pr_err("ITS@%pa: Unable to register GICv3 ITS domain token (ITS ID %d) to IORT\n",
5708 &res
.start
, its_entry
->translation_id
);
5712 its
= its_node_init(&res
, dom_handle
,
5713 acpi_get_its_numa_node(its_entry
->translation_id
));
5719 if (acpi_get_madt_revision() >= 7 &&
5720 (its_entry
->flags
& ACPI_MADT_ITS_NON_COHERENT
))
5721 its
->flags
|= ITS_FLAGS_FORCE_NON_SHAREABLE
;
5723 err
= its_probe_one(its
);
5728 iort_deregister_domain_token(its_entry
->translation_id
);
5730 irq_domain_free_fwnode(dom_handle
);
5734 static int __init
its_acpi_reset(union acpi_subtable_headers
*header
,
5735 const unsigned long end
)
5737 struct acpi_madt_generic_translator
*its_entry
;
5738 struct resource res
;
5740 its_entry
= (struct acpi_madt_generic_translator
*)header
;
5741 res
= (struct resource
) {
5742 .start
= its_entry
->base_address
,
5743 .end
= its_entry
->base_address
+ ACPI_GICV3_ITS_MEM_SIZE
- 1,
5744 .flags
= IORESOURCE_MEM
,
5747 return its_reset_one(&res
);
5750 static void __init
its_acpi_probe(void)
5752 acpi_table_parse_srat_its();
5754 * Make sure *all* the ITS are reset before we probe any, as
5755 * they may be sharing memory. If any of the ITS fails to
5756 * reset, don't even try to go any further, as this could
5757 * result in something even worse.
5759 if (acpi_table_parse_madt(ACPI_MADT_TYPE_GENERIC_TRANSLATOR
,
5760 its_acpi_reset
, 0) > 0)
5761 acpi_table_parse_madt(ACPI_MADT_TYPE_GENERIC_TRANSLATOR
,
5762 gic_acpi_parse_madt_its
, 0);
5763 acpi_its_srat_maps_free();
5766 static void __init
its_acpi_probe(void) { }
5769 int __init
its_lpi_memreserve_init(void)
5773 if (!efi_enabled(EFI_CONFIG_TABLES
))
5776 if (list_empty(&its_nodes
))
5779 gic_rdists
->cpuhp_memreserve_state
= CPUHP_INVALID
;
5780 state
= cpuhp_setup_state(CPUHP_AP_ONLINE_DYN
,
5781 "irqchip/arm/gicv3/memreserve:online",
5782 its_cpu_memreserve_lpi
,
5787 gic_rdists
->cpuhp_memreserve_state
= state
;
5792 int __init
its_init(struct fwnode_handle
*handle
, struct rdists
*rdists
,
5793 struct irq_domain
*parent_domain
, u8 irq_prio
)
5795 struct device_node
*of_node
;
5796 struct its_node
*its
;
5797 bool has_v4
= false;
5798 bool has_v4_1
= false;
5801 itt_pool
= gen_pool_create(get_order(ITS_ITT_ALIGN
), -1);
5805 gic_rdists
= rdists
;
5807 lpi_prop_prio
= irq_prio
;
5808 its_parent
= parent_domain
;
5809 of_node
= to_of_node(handle
);
5811 its_of_probe(of_node
);
5815 if (list_empty(&its_nodes
)) {
5816 pr_warn("ITS: No ITS available, not enabling LPIs\n");
5820 err
= allocate_lpi_tables();
5824 list_for_each_entry(its
, &its_nodes
, entry
) {
5825 has_v4
|= is_v4(its
);
5826 has_v4_1
|= is_v4_1(its
);
5829 /* Don't bother with inconsistent systems */
5830 if (WARN_ON(!has_v4_1
&& rdists
->has_rvpeid
))
5831 rdists
->has_rvpeid
= false;
5833 if (has_v4
& rdists
->has_vlpis
) {
5834 const struct irq_domain_ops
*sgi_ops
;
5837 sgi_ops
= &its_sgi_domain_ops
;
5841 if (its_init_vpe_domain() ||
5842 its_init_v4(parent_domain
, &its_vpe_domain_ops
, sgi_ops
)) {
5843 rdists
->has_vlpis
= false;
5844 pr_err("ITS: Disabling GICv4 support\n");
5848 register_syscore_ops(&its_syscore_ops
);