1 // SPDX-License-Identifier: GPL-2.0-only
3 * Copyright (c) 2021, Linaro Limited
4 * Copyright (c) 2010-2020, The Linux Foundation. All rights reserved.
7 #include <linux/delay.h>
9 #include <linux/init.h>
10 #include <linux/interrupt.h>
12 #include <linux/irqchip.h>
13 #include <linux/irqdomain.h>
14 #include <linux/mailbox_client.h>
15 #include <linux/module.h>
17 #include <linux/of_address.h>
18 #include <linux/of_platform.h>
19 #include <linux/platform_device.h>
20 #include <linux/pm_domain.h>
21 #include <linux/slab.h>
22 #include <linux/soc/qcom/irq.h>
23 #include <linux/spinlock.h>
26 * This is the driver for Qualcomm MPM (MSM Power Manager) interrupt controller,
27 * which is commonly found on Qualcomm SoCs built on the RPM architecture.
28 * Sitting in always-on domain, MPM monitors the wakeup interrupts when SoC is
29 * asleep, and wakes up the AP when one of those interrupts occurs. This driver
30 * doesn't directly access physical MPM registers though. Instead, the access
31 * is bridged via a piece of internal memory (SRAM) that is accessible to both
32 * AP and RPM. This piece of memory is called 'vMPM' in the driver.
34 * When SoC is awake, the vMPM is owned by AP and the register setup by this
35 * driver all happens on vMPM. When AP is about to get power collapsed, the
36 * driver sends a mailbox notification to RPM, which will take over the vMPM
37 * ownership and dump vMPM into physical MPM registers. On wakeup, AP is woken
38 * up by a MPM pin/interrupt, and RPM will copy STATUS registers into vMPM.
39 * Then AP start owning vMPM again.
44 * +--------------------------------+
46 * +--------------------------------+
48 * +--------------------------------+
50 * +--------------------------------+
52 * +--------------------------------+
54 * +--------------------------------+
56 * +--------------------------------+
58 * +--------------------------------+
60 * +--------------------------------+
62 * n = DIV_ROUND_UP(pin_cnt, 32)
66 #define MPM_REG_ENABLE 0
67 #define MPM_REG_FALLING_EDGE 1
68 #define MPM_REG_RISING_EDGE 2
69 #define MPM_REG_POLARITY 3
70 #define MPM_REG_STATUS 4
72 /* MPM pin map to GIC hwirq */
75 irq_hw_number_t hwirq
;
78 struct qcom_mpm_priv
{
81 struct mbox_client mbox_client
;
82 struct mbox_chan
*mbox_chan
;
83 struct mpm_gic_map
*maps
;
85 unsigned int reg_stride
;
86 struct irq_domain
*domain
;
87 struct generic_pm_domain genpd
;
90 static u32
qcom_mpm_read(struct qcom_mpm_priv
*priv
, unsigned int reg
,
93 unsigned int offset
= (reg
* priv
->reg_stride
+ index
+ 2) * 4;
95 return readl_relaxed(priv
->base
+ offset
);
98 static void qcom_mpm_write(struct qcom_mpm_priv
*priv
, unsigned int reg
,
99 unsigned int index
, u32 val
)
101 unsigned int offset
= (reg
* priv
->reg_stride
+ index
+ 2) * 4;
103 writel_relaxed(val
, priv
->base
+ offset
);
105 /* Ensure the write is completed */
109 static void qcom_mpm_enable_irq(struct irq_data
*d
, bool en
)
111 struct qcom_mpm_priv
*priv
= d
->chip_data
;
113 unsigned int index
= pin
/ 32;
114 unsigned int shift
= pin
% 32;
115 unsigned long flags
, val
;
117 raw_spin_lock_irqsave(&priv
->lock
, flags
);
119 val
= qcom_mpm_read(priv
, MPM_REG_ENABLE
, index
);
120 __assign_bit(shift
, &val
, en
);
121 qcom_mpm_write(priv
, MPM_REG_ENABLE
, index
, val
);
123 raw_spin_unlock_irqrestore(&priv
->lock
, flags
);
126 static void qcom_mpm_mask(struct irq_data
*d
)
128 qcom_mpm_enable_irq(d
, false);
131 irq_chip_mask_parent(d
);
134 static void qcom_mpm_unmask(struct irq_data
*d
)
136 qcom_mpm_enable_irq(d
, true);
139 irq_chip_unmask_parent(d
);
142 static void mpm_set_type(struct qcom_mpm_priv
*priv
, bool set
, unsigned int reg
,
143 unsigned int index
, unsigned int shift
)
145 unsigned long flags
, val
;
147 raw_spin_lock_irqsave(&priv
->lock
, flags
);
149 val
= qcom_mpm_read(priv
, reg
, index
);
150 __assign_bit(shift
, &val
, set
);
151 qcom_mpm_write(priv
, reg
, index
, val
);
153 raw_spin_unlock_irqrestore(&priv
->lock
, flags
);
156 static int qcom_mpm_set_type(struct irq_data
*d
, unsigned int type
)
158 struct qcom_mpm_priv
*priv
= d
->chip_data
;
160 unsigned int index
= pin
/ 32;
161 unsigned int shift
= pin
% 32;
163 if (type
& IRQ_TYPE_EDGE_RISING
)
164 mpm_set_type(priv
, true, MPM_REG_RISING_EDGE
, index
, shift
);
166 mpm_set_type(priv
, false, MPM_REG_RISING_EDGE
, index
, shift
);
168 if (type
& IRQ_TYPE_EDGE_FALLING
)
169 mpm_set_type(priv
, true, MPM_REG_FALLING_EDGE
, index
, shift
);
171 mpm_set_type(priv
, false, MPM_REG_FALLING_EDGE
, index
, shift
);
173 if (type
& IRQ_TYPE_LEVEL_HIGH
)
174 mpm_set_type(priv
, true, MPM_REG_POLARITY
, index
, shift
);
176 mpm_set_type(priv
, false, MPM_REG_POLARITY
, index
, shift
);
181 if (type
& IRQ_TYPE_EDGE_BOTH
)
182 type
= IRQ_TYPE_EDGE_RISING
;
184 if (type
& IRQ_TYPE_LEVEL_MASK
)
185 type
= IRQ_TYPE_LEVEL_HIGH
;
187 return irq_chip_set_type_parent(d
, type
);
190 static struct irq_chip qcom_mpm_chip
= {
192 .irq_eoi
= irq_chip_eoi_parent
,
193 .irq_mask
= qcom_mpm_mask
,
194 .irq_unmask
= qcom_mpm_unmask
,
195 .irq_retrigger
= irq_chip_retrigger_hierarchy
,
196 .irq_set_type
= qcom_mpm_set_type
,
197 .irq_set_affinity
= irq_chip_set_affinity_parent
,
198 .flags
= IRQCHIP_MASK_ON_SUSPEND
|
199 IRQCHIP_SKIP_SET_WAKE
,
202 static struct mpm_gic_map
*get_mpm_gic_map(struct qcom_mpm_priv
*priv
, int pin
)
204 struct mpm_gic_map
*maps
= priv
->maps
;
207 for (i
= 0; i
< priv
->map_cnt
; i
++) {
208 if (maps
[i
].pin
== pin
)
215 static int qcom_mpm_alloc(struct irq_domain
*domain
, unsigned int virq
,
216 unsigned int nr_irqs
, void *data
)
218 struct qcom_mpm_priv
*priv
= domain
->host_data
;
219 struct irq_fwspec
*fwspec
= data
;
220 struct irq_fwspec parent_fwspec
;
221 struct mpm_gic_map
*map
;
226 ret
= irq_domain_translate_twocell(domain
, fwspec
, &pin
, &type
);
230 ret
= irq_domain_set_hwirq_and_chip(domain
, virq
, pin
,
231 &qcom_mpm_chip
, priv
);
235 map
= get_mpm_gic_map(priv
, pin
);
237 return irq_domain_disconnect_hierarchy(domain
->parent
, virq
);
239 if (type
& IRQ_TYPE_EDGE_BOTH
)
240 type
= IRQ_TYPE_EDGE_RISING
;
242 if (type
& IRQ_TYPE_LEVEL_MASK
)
243 type
= IRQ_TYPE_LEVEL_HIGH
;
245 parent_fwspec
.fwnode
= domain
->parent
->fwnode
;
246 parent_fwspec
.param_count
= 3;
247 parent_fwspec
.param
[0] = 0;
248 parent_fwspec
.param
[1] = map
->hwirq
;
249 parent_fwspec
.param
[2] = type
;
251 return irq_domain_alloc_irqs_parent(domain
, virq
, nr_irqs
,
255 static const struct irq_domain_ops qcom_mpm_ops
= {
256 .alloc
= qcom_mpm_alloc
,
257 .free
= irq_domain_free_irqs_common
,
258 .translate
= irq_domain_translate_twocell
,
261 /* Triggered by RPM when system resumes from deep sleep */
262 static irqreturn_t
qcom_mpm_handler(int irq
, void *dev_id
)
264 struct qcom_mpm_priv
*priv
= dev_id
;
265 unsigned long enable
, pending
;
266 irqreturn_t ret
= IRQ_NONE
;
270 for (i
= 0; i
< priv
->reg_stride
; i
++) {
271 raw_spin_lock_irqsave(&priv
->lock
, flags
);
272 enable
= qcom_mpm_read(priv
, MPM_REG_ENABLE
, i
);
273 pending
= qcom_mpm_read(priv
, MPM_REG_STATUS
, i
);
275 raw_spin_unlock_irqrestore(&priv
->lock
, flags
);
277 for_each_set_bit(j
, &pending
, 32) {
278 unsigned int pin
= 32 * i
+ j
;
279 struct irq_desc
*desc
= irq_resolve_mapping(priv
->domain
, pin
);
280 struct irq_data
*d
= &desc
->irq_data
;
282 if (!irqd_is_level_type(d
))
283 irq_set_irqchip_state(d
->irq
,
284 IRQCHIP_STATE_PENDING
, true);
292 static int mpm_pd_power_off(struct generic_pm_domain
*genpd
)
294 struct qcom_mpm_priv
*priv
= container_of(genpd
, struct qcom_mpm_priv
,
298 for (i
= 0; i
< priv
->reg_stride
; i
++)
299 qcom_mpm_write(priv
, MPM_REG_STATUS
, i
, 0);
301 /* Notify RPM to write vMPM into HW */
302 ret
= mbox_send_message(priv
->mbox_chan
, NULL
);
309 static bool gic_hwirq_is_mapped(struct mpm_gic_map
*maps
, int cnt
, u32 hwirq
)
313 for (i
= 0; i
< cnt
; i
++)
314 if (maps
[i
].hwirq
== hwirq
)
320 static int qcom_mpm_init(struct device_node
*np
, struct device_node
*parent
)
322 struct platform_device
*pdev
= of_find_device_by_node(np
);
323 struct device
*dev
= &pdev
->dev
;
324 struct irq_domain
*parent_domain
;
325 struct generic_pm_domain
*genpd
;
326 struct device_node
*msgram_np
;
327 struct qcom_mpm_priv
*priv
;
328 unsigned int pin_cnt
;
333 priv
= devm_kzalloc(dev
, sizeof(*priv
), GFP_KERNEL
);
337 ret
= of_property_read_u32(np
, "qcom,mpm-pin-count", &pin_cnt
);
339 dev_err(dev
, "failed to read qcom,mpm-pin-count: %d\n", ret
);
343 priv
->reg_stride
= DIV_ROUND_UP(pin_cnt
, 32);
345 ret
= of_property_count_u32_elems(np
, "qcom,mpm-pin-map");
347 dev_err(dev
, "failed to read qcom,mpm-pin-map: %d\n", ret
);
352 dev_err(dev
, "invalid qcom,mpm-pin-map\n");
356 priv
->map_cnt
= ret
/ 2;
357 priv
->maps
= devm_kcalloc(dev
, priv
->map_cnt
, sizeof(*priv
->maps
),
362 for (i
= 0; i
< priv
->map_cnt
; i
++) {
365 of_property_read_u32_index(np
, "qcom,mpm-pin-map", i
* 2, &pin
);
366 of_property_read_u32_index(np
, "qcom,mpm-pin-map", i
* 2 + 1, &hwirq
);
368 if (gic_hwirq_is_mapped(priv
->maps
, i
, hwirq
)) {
369 dev_warn(dev
, "failed to map pin %d as GIC hwirq %d is already mapped\n",
374 priv
->maps
[i
].pin
= pin
;
375 priv
->maps
[i
].hwirq
= hwirq
;
378 raw_spin_lock_init(&priv
->lock
);
380 /* If we have a handle to an RPM message ram partition, use it. */
381 msgram_np
= of_parse_phandle(np
, "qcom,rpm-msg-ram", 0);
383 ret
= of_address_to_resource(msgram_np
, 0, &res
);
385 of_node_put(msgram_np
);
389 /* Don't use devm_ioremap_resource, as we're accessing a shared region. */
390 priv
->base
= devm_ioremap(dev
, res
.start
, resource_size(&res
));
391 of_node_put(msgram_np
);
395 /* Otherwise, fall back to simple MMIO. */
396 priv
->base
= devm_platform_ioremap_resource(pdev
, 0);
397 if (IS_ERR(priv
->base
))
398 return PTR_ERR(priv
->base
);
401 for (i
= 0; i
< priv
->reg_stride
; i
++) {
402 qcom_mpm_write(priv
, MPM_REG_ENABLE
, i
, 0);
403 qcom_mpm_write(priv
, MPM_REG_FALLING_EDGE
, i
, 0);
404 qcom_mpm_write(priv
, MPM_REG_RISING_EDGE
, i
, 0);
405 qcom_mpm_write(priv
, MPM_REG_POLARITY
, i
, 0);
406 qcom_mpm_write(priv
, MPM_REG_STATUS
, i
, 0);
409 irq
= platform_get_irq(pdev
, 0);
413 genpd
= &priv
->genpd
;
414 genpd
->flags
= GENPD_FLAG_IRQ_SAFE
;
415 genpd
->power_off
= mpm_pd_power_off
;
417 genpd
->name
= devm_kasprintf(dev
, GFP_KERNEL
, "%s", dev_name(dev
));
421 ret
= pm_genpd_init(genpd
, NULL
, false);
423 dev_err(dev
, "failed to init genpd: %d\n", ret
);
427 ret
= of_genpd_add_provider_simple(np
, genpd
);
429 dev_err(dev
, "failed to add genpd provider: %d\n", ret
);
433 priv
->mbox_client
.dev
= dev
;
434 priv
->mbox_chan
= mbox_request_channel(&priv
->mbox_client
, 0);
435 if (IS_ERR(priv
->mbox_chan
)) {
436 ret
= PTR_ERR(priv
->mbox_chan
);
437 dev_err(dev
, "failed to acquire IPC channel: %d\n", ret
);
441 parent_domain
= irq_find_host(parent
);
442 if (!parent_domain
) {
443 dev_err(dev
, "failed to find MPM parent domain\n");
448 priv
->domain
= irq_domain_create_hierarchy(parent_domain
,
449 IRQ_DOMAIN_FLAG_QCOM_MPM_WAKEUP
, pin_cnt
,
450 of_node_to_fwnode(np
), &qcom_mpm_ops
, priv
);
452 dev_err(dev
, "failed to create MPM domain\n");
457 irq_domain_update_bus_token(priv
->domain
, DOMAIN_BUS_WAKEUP
);
459 ret
= devm_request_irq(dev
, irq
, qcom_mpm_handler
, IRQF_NO_SUSPEND
,
462 dev_err(dev
, "failed to request irq: %d\n", ret
);
469 irq_domain_remove(priv
->domain
);
471 mbox_free_channel(priv
->mbox_chan
);
473 pm_genpd_remove(genpd
);
477 IRQCHIP_PLATFORM_DRIVER_BEGIN(qcom_mpm
)
478 IRQCHIP_MATCH("qcom,mpm", qcom_mpm_init
)
479 IRQCHIP_PLATFORM_DRIVER_END(qcom_mpm
)
480 MODULE_DESCRIPTION("Qualcomm Technologies, Inc. MSM Power Manager");
481 MODULE_LICENSE("GPL v2");