cpufreq/amd-pstate: Stop caching EPP
[pf-kernel.git] / drivers / soc / qcom / smsm.c
blobe803ea342c971ef1410d79538e47b11707b35a8e
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * Copyright (c) 2015, Sony Mobile Communications Inc.
4 * Copyright (c) 2012-2013, The Linux Foundation. All rights reserved.
5 */
7 #include <linux/interrupt.h>
8 #include <linux/mailbox_client.h>
9 #include <linux/mfd/syscon.h>
10 #include <linux/module.h>
11 #include <linux/of_irq.h>
12 #include <linux/platform_device.h>
13 #include <linux/spinlock.h>
14 #include <linux/regmap.h>
15 #include <linux/soc/qcom/smem.h>
16 #include <linux/soc/qcom/smem_state.h>
19 * This driver implements the Qualcomm Shared Memory State Machine, a mechanism
20 * for communicating single bit state information to remote processors.
22 * The implementation is based on two sections of shared memory; the first
23 * holding the state bits and the second holding a matrix of subscription bits.
25 * The state bits are structured in entries of 32 bits, each belonging to one
26 * system in the SoC. The entry belonging to the local system is considered
27 * read-write, while the rest should be considered read-only.
29 * The subscription matrix consists of N bitmaps per entry, denoting interest
30 * in updates of the entry for each of the N hosts. Upon updating a state bit
31 * each host's subscription bitmap should be queried and the remote system
32 * should be interrupted if they request so.
34 * The subscription matrix is laid out in entry-major order:
35 * entry0: [host0 ... hostN]
36 * .
37 * .
38 * entryM: [host0 ... hostN]
40 * A third, optional, shared memory region might contain information regarding
41 * the number of entries in the state bitmap as well as number of columns in
42 * the subscription matrix.
46 * Shared memory identifiers, used to acquire handles to respective memory
47 * region.
49 #define SMEM_SMSM_SHARED_STATE 85
50 #define SMEM_SMSM_CPU_INTR_MASK 333
51 #define SMEM_SMSM_SIZE_INFO 419
54 * Default sizes, in case SMEM_SMSM_SIZE_INFO is not found.
56 #define SMSM_DEFAULT_NUM_ENTRIES 8
57 #define SMSM_DEFAULT_NUM_HOSTS 3
59 struct smsm_entry;
60 struct smsm_host;
62 /**
63 * struct qcom_smsm - smsm driver context
64 * @dev: smsm device pointer
65 * @local_host: column in the subscription matrix representing this system
66 * @num_hosts: number of columns in the subscription matrix
67 * @num_entries: number of entries in the state map and rows in the subscription
68 * matrix
69 * @local_state: pointer to the local processor's state bits
70 * @subscription: pointer to local processor's row in subscription matrix
71 * @state: smem state handle
72 * @lock: spinlock for read-modify-write of the outgoing state
73 * @entries: context for each of the entries
74 * @hosts: context for each of the hosts
75 * @mbox_client: mailbox client handle
77 struct qcom_smsm {
78 struct device *dev;
80 u32 local_host;
82 u32 num_hosts;
83 u32 num_entries;
85 u32 *local_state;
86 u32 *subscription;
87 struct qcom_smem_state *state;
89 spinlock_t lock;
91 struct smsm_entry *entries;
92 struct smsm_host *hosts;
94 struct mbox_client mbox_client;
97 /**
98 * struct smsm_entry - per remote processor entry context
99 * @smsm: back-reference to driver context
100 * @domain: IRQ domain for this entry, if representing a remote system
101 * @irq_enabled: bitmap of which state bits IRQs are enabled
102 * @irq_rising: bitmap tracking if rising bits should be propagated
103 * @irq_falling: bitmap tracking if falling bits should be propagated
104 * @last_value: snapshot of state bits last time the interrupts where propagated
105 * @remote_state: pointer to this entry's state bits
106 * @subscription: pointer to a row in the subscription matrix representing this
107 * entry
109 struct smsm_entry {
110 struct qcom_smsm *smsm;
112 struct irq_domain *domain;
113 DECLARE_BITMAP(irq_enabled, 32);
114 DECLARE_BITMAP(irq_rising, 32);
115 DECLARE_BITMAP(irq_falling, 32);
116 unsigned long last_value;
118 u32 *remote_state;
119 u32 *subscription;
123 * struct smsm_host - representation of a remote host
124 * @ipc_regmap: regmap for outgoing interrupt
125 * @ipc_offset: offset in @ipc_regmap for outgoing interrupt
126 * @ipc_bit: bit in @ipc_regmap + @ipc_offset for outgoing interrupt
127 * @mbox_chan: apcs ipc mailbox channel handle
129 struct smsm_host {
130 struct regmap *ipc_regmap;
131 int ipc_offset;
132 int ipc_bit;
134 struct mbox_chan *mbox_chan;
138 * smsm_update_bits() - change bit in outgoing entry and inform subscribers
139 * @data: smsm context pointer
140 * @mask: value mask
141 * @value: new value
143 * Used to set and clear the bits in the outgoing/local entry and inform
144 * subscribers about the change.
146 static int smsm_update_bits(void *data, u32 mask, u32 value)
148 struct qcom_smsm *smsm = data;
149 struct smsm_host *hostp;
150 unsigned long flags;
151 u32 changes;
152 u32 host;
153 u32 orig;
154 u32 val;
156 spin_lock_irqsave(&smsm->lock, flags);
158 /* Update the entry */
159 val = orig = readl(smsm->local_state);
160 val &= ~mask;
161 val |= value;
163 /* Don't signal if we didn't change the value */
164 changes = val ^ orig;
165 if (!changes) {
166 spin_unlock_irqrestore(&smsm->lock, flags);
167 goto done;
170 /* Write out the new value */
171 writel(val, smsm->local_state);
172 spin_unlock_irqrestore(&smsm->lock, flags);
174 /* Make sure the value update is ordered before any kicks */
175 wmb();
177 /* Iterate over all hosts to check whom wants a kick */
178 for (host = 0; host < smsm->num_hosts; host++) {
179 hostp = &smsm->hosts[host];
181 val = readl(smsm->subscription + host);
182 if (!(val & changes))
183 continue;
185 if (hostp->mbox_chan) {
186 mbox_send_message(hostp->mbox_chan, NULL);
187 mbox_client_txdone(hostp->mbox_chan, 0);
188 } else if (hostp->ipc_regmap) {
189 regmap_write(hostp->ipc_regmap,
190 hostp->ipc_offset,
191 BIT(hostp->ipc_bit));
195 done:
196 return 0;
199 static const struct qcom_smem_state_ops smsm_state_ops = {
200 .update_bits = smsm_update_bits,
204 * smsm_intr() - cascading IRQ handler for SMSM
205 * @irq: unused
206 * @data: entry related to this IRQ
208 * This function cascades an incoming interrupt from a remote system, based on
209 * the state bits and configuration.
211 static irqreturn_t smsm_intr(int irq, void *data)
213 struct smsm_entry *entry = data;
214 unsigned i;
215 int irq_pin;
216 u32 changed;
217 u32 val;
219 val = readl(entry->remote_state);
220 changed = val ^ xchg(&entry->last_value, val);
222 for_each_set_bit(i, entry->irq_enabled, 32) {
223 if (!(changed & BIT(i)))
224 continue;
226 if (val & BIT(i)) {
227 if (test_bit(i, entry->irq_rising)) {
228 irq_pin = irq_find_mapping(entry->domain, i);
229 handle_nested_irq(irq_pin);
231 } else {
232 if (test_bit(i, entry->irq_falling)) {
233 irq_pin = irq_find_mapping(entry->domain, i);
234 handle_nested_irq(irq_pin);
239 return IRQ_HANDLED;
243 * smsm_mask_irq() - un-subscribe from cascades of IRQs of a certain staus bit
244 * @irqd: IRQ handle to be masked
246 * This un-subscribes the local CPU from interrupts upon changes to the defines
247 * status bit. The bit is also cleared from cascading.
249 static void smsm_mask_irq(struct irq_data *irqd)
251 struct smsm_entry *entry = irq_data_get_irq_chip_data(irqd);
252 irq_hw_number_t irq = irqd_to_hwirq(irqd);
253 struct qcom_smsm *smsm = entry->smsm;
254 u32 val;
256 if (entry->subscription) {
257 val = readl(entry->subscription + smsm->local_host);
258 val &= ~BIT(irq);
259 writel(val, entry->subscription + smsm->local_host);
262 clear_bit(irq, entry->irq_enabled);
266 * smsm_unmask_irq() - subscribe to cascades of IRQs of a certain status bit
267 * @irqd: IRQ handle to be unmasked
269 * This subscribes the local CPU to interrupts upon changes to the defined
270 * status bit. The bit is also marked for cascading.
272 static void smsm_unmask_irq(struct irq_data *irqd)
274 struct smsm_entry *entry = irq_data_get_irq_chip_data(irqd);
275 irq_hw_number_t irq = irqd_to_hwirq(irqd);
276 struct qcom_smsm *smsm = entry->smsm;
277 u32 val;
279 /* Make sure our last cached state is up-to-date */
280 if (readl(entry->remote_state) & BIT(irq))
281 set_bit(irq, &entry->last_value);
282 else
283 clear_bit(irq, &entry->last_value);
285 set_bit(irq, entry->irq_enabled);
287 if (entry->subscription) {
288 val = readl(entry->subscription + smsm->local_host);
289 val |= BIT(irq);
290 writel(val, entry->subscription + smsm->local_host);
295 * smsm_set_irq_type() - updates the requested IRQ type for the cascading
296 * @irqd: consumer interrupt handle
297 * @type: requested flags
299 static int smsm_set_irq_type(struct irq_data *irqd, unsigned int type)
301 struct smsm_entry *entry = irq_data_get_irq_chip_data(irqd);
302 irq_hw_number_t irq = irqd_to_hwirq(irqd);
304 if (!(type & IRQ_TYPE_EDGE_BOTH))
305 return -EINVAL;
307 if (type & IRQ_TYPE_EDGE_RISING)
308 set_bit(irq, entry->irq_rising);
309 else
310 clear_bit(irq, entry->irq_rising);
312 if (type & IRQ_TYPE_EDGE_FALLING)
313 set_bit(irq, entry->irq_falling);
314 else
315 clear_bit(irq, entry->irq_falling);
317 return 0;
320 static int smsm_get_irqchip_state(struct irq_data *irqd,
321 enum irqchip_irq_state which, bool *state)
323 struct smsm_entry *entry = irq_data_get_irq_chip_data(irqd);
324 irq_hw_number_t irq = irqd_to_hwirq(irqd);
325 u32 val;
327 if (which != IRQCHIP_STATE_LINE_LEVEL)
328 return -EINVAL;
330 val = readl(entry->remote_state);
331 *state = !!(val & BIT(irq));
333 return 0;
336 static struct irq_chip smsm_irq_chip = {
337 .name = "smsm",
338 .irq_mask = smsm_mask_irq,
339 .irq_unmask = smsm_unmask_irq,
340 .irq_set_type = smsm_set_irq_type,
341 .irq_get_irqchip_state = smsm_get_irqchip_state,
345 * smsm_irq_map() - sets up a mapping for a cascaded IRQ
346 * @d: IRQ domain representing an entry
347 * @irq: IRQ to set up
348 * @hw: unused
350 static int smsm_irq_map(struct irq_domain *d,
351 unsigned int irq,
352 irq_hw_number_t hw)
354 struct smsm_entry *entry = d->host_data;
356 irq_set_chip_and_handler(irq, &smsm_irq_chip, handle_level_irq);
357 irq_set_chip_data(irq, entry);
358 irq_set_nested_thread(irq, 1);
360 return 0;
363 static const struct irq_domain_ops smsm_irq_ops = {
364 .map = smsm_irq_map,
365 .xlate = irq_domain_xlate_twocell,
369 * smsm_parse_mbox() - requests an mbox channel
370 * @smsm: smsm driver context
371 * @host_id: index of the remote host to be resolved
373 * Requests the desired channel using the mbox interface which is needed for
374 * sending the outgoing interrupts to a remove hosts - identified by @host_id.
376 static int smsm_parse_mbox(struct qcom_smsm *smsm, unsigned int host_id)
378 struct smsm_host *host = &smsm->hosts[host_id];
379 int ret = 0;
381 host->mbox_chan = mbox_request_channel(&smsm->mbox_client, host_id);
382 if (IS_ERR(host->mbox_chan)) {
383 ret = PTR_ERR(host->mbox_chan);
384 host->mbox_chan = NULL;
387 return ret;
391 * smsm_parse_ipc() - parses a qcom,ipc-%d device tree property
392 * @smsm: smsm driver context
393 * @host_id: index of the remote host to be resolved
395 * Parses device tree to acquire the information needed for sending the
396 * outgoing interrupts to a remote host - identified by @host_id.
398 static int smsm_parse_ipc(struct qcom_smsm *smsm, unsigned host_id)
400 struct device_node *syscon;
401 struct device_node *node = smsm->dev->of_node;
402 struct smsm_host *host = &smsm->hosts[host_id];
403 char key[16];
404 int ret;
406 snprintf(key, sizeof(key), "qcom,ipc-%d", host_id);
407 syscon = of_parse_phandle(node, key, 0);
408 if (!syscon)
409 return 0;
411 host->ipc_regmap = syscon_node_to_regmap(syscon);
412 of_node_put(syscon);
413 if (IS_ERR(host->ipc_regmap))
414 return PTR_ERR(host->ipc_regmap);
416 ret = of_property_read_u32_index(node, key, 1, &host->ipc_offset);
417 if (ret < 0) {
418 dev_err(smsm->dev, "no offset in %s\n", key);
419 return -EINVAL;
422 ret = of_property_read_u32_index(node, key, 2, &host->ipc_bit);
423 if (ret < 0) {
424 dev_err(smsm->dev, "no bit in %s\n", key);
425 return -EINVAL;
428 return 0;
432 * smsm_inbound_entry() - parse DT and set up an entry representing a remote system
433 * @smsm: smsm driver context
434 * @entry: entry context to be set up
435 * @node: dt node containing the entry's properties
437 static int smsm_inbound_entry(struct qcom_smsm *smsm,
438 struct smsm_entry *entry,
439 struct device_node *node)
441 int ret;
442 int irq;
444 irq = irq_of_parse_and_map(node, 0);
445 if (!irq) {
446 dev_err(smsm->dev, "failed to parse smsm interrupt\n");
447 return -EINVAL;
450 ret = devm_request_threaded_irq(smsm->dev, irq,
451 NULL, smsm_intr,
452 IRQF_ONESHOT,
453 "smsm", (void *)entry);
454 if (ret) {
455 dev_err(smsm->dev, "failed to request interrupt\n");
456 return ret;
459 entry->domain = irq_domain_add_linear(node, 32, &smsm_irq_ops, entry);
460 if (!entry->domain) {
461 dev_err(smsm->dev, "failed to add irq_domain\n");
462 return -ENOMEM;
465 return 0;
469 * smsm_get_size_info() - parse the optional memory segment for sizes
470 * @smsm: smsm driver context
472 * Attempt to acquire the number of hosts and entries from the optional shared
473 * memory location. Not being able to find this segment should indicate that
474 * we're on a older system where these values was hard coded to
475 * SMSM_DEFAULT_NUM_ENTRIES and SMSM_DEFAULT_NUM_HOSTS.
477 * Returns 0 on success, negative errno on failure.
479 static int smsm_get_size_info(struct qcom_smsm *smsm)
481 size_t size;
482 struct {
483 u32 num_hosts;
484 u32 num_entries;
485 u32 reserved0;
486 u32 reserved1;
487 } *info;
489 info = qcom_smem_get(QCOM_SMEM_HOST_ANY, SMEM_SMSM_SIZE_INFO, &size);
490 if (IS_ERR(info) && PTR_ERR(info) != -ENOENT)
491 return dev_err_probe(smsm->dev, PTR_ERR(info),
492 "unable to retrieve smsm size info\n");
493 else if (IS_ERR(info) || size != sizeof(*info)) {
494 dev_warn(smsm->dev, "no smsm size info, using defaults\n");
495 smsm->num_entries = SMSM_DEFAULT_NUM_ENTRIES;
496 smsm->num_hosts = SMSM_DEFAULT_NUM_HOSTS;
497 return 0;
500 smsm->num_entries = info->num_entries;
501 smsm->num_hosts = info->num_hosts;
503 dev_dbg(smsm->dev,
504 "found custom size of smsm: %d entries %d hosts\n",
505 smsm->num_entries, smsm->num_hosts);
507 return 0;
510 static int qcom_smsm_probe(struct platform_device *pdev)
512 struct device_node *local_node;
513 struct device_node *node;
514 struct smsm_entry *entry;
515 struct qcom_smsm *smsm;
516 u32 *intr_mask;
517 size_t size;
518 u32 *states;
519 u32 id;
520 int ret;
522 smsm = devm_kzalloc(&pdev->dev, sizeof(*smsm), GFP_KERNEL);
523 if (!smsm)
524 return -ENOMEM;
525 smsm->dev = &pdev->dev;
526 spin_lock_init(&smsm->lock);
528 ret = smsm_get_size_info(smsm);
529 if (ret)
530 return ret;
532 smsm->entries = devm_kcalloc(&pdev->dev,
533 smsm->num_entries,
534 sizeof(struct smsm_entry),
535 GFP_KERNEL);
536 if (!smsm->entries)
537 return -ENOMEM;
539 smsm->hosts = devm_kcalloc(&pdev->dev,
540 smsm->num_hosts,
541 sizeof(struct smsm_host),
542 GFP_KERNEL);
543 if (!smsm->hosts)
544 return -ENOMEM;
546 for_each_child_of_node(pdev->dev.of_node, local_node) {
547 if (of_property_present(local_node, "#qcom,smem-state-cells"))
548 break;
550 if (!local_node) {
551 dev_err(&pdev->dev, "no state entry\n");
552 return -EINVAL;
555 of_property_read_u32(pdev->dev.of_node,
556 "qcom,local-host",
557 &smsm->local_host);
559 smsm->mbox_client.dev = &pdev->dev;
560 smsm->mbox_client.knows_txdone = true;
562 /* Parse the host properties */
563 for (id = 0; id < smsm->num_hosts; id++) {
564 /* Try using mbox interface first, otherwise fall back to syscon */
565 ret = smsm_parse_mbox(smsm, id);
566 if (!ret)
567 continue;
569 ret = smsm_parse_ipc(smsm, id);
570 if (ret < 0)
571 goto out_put;
574 /* Acquire the main SMSM state vector */
575 ret = qcom_smem_alloc(QCOM_SMEM_HOST_ANY, SMEM_SMSM_SHARED_STATE,
576 smsm->num_entries * sizeof(u32));
577 if (ret < 0 && ret != -EEXIST) {
578 dev_err(&pdev->dev, "unable to allocate shared state entry\n");
579 goto out_put;
582 states = qcom_smem_get(QCOM_SMEM_HOST_ANY, SMEM_SMSM_SHARED_STATE, NULL);
583 if (IS_ERR(states)) {
584 dev_err(&pdev->dev, "Unable to acquire shared state entry\n");
585 ret = PTR_ERR(states);
586 goto out_put;
589 /* Acquire the list of interrupt mask vectors */
590 size = smsm->num_entries * smsm->num_hosts * sizeof(u32);
591 ret = qcom_smem_alloc(QCOM_SMEM_HOST_ANY, SMEM_SMSM_CPU_INTR_MASK, size);
592 if (ret < 0 && ret != -EEXIST) {
593 dev_err(&pdev->dev, "unable to allocate smsm interrupt mask\n");
594 goto out_put;
597 intr_mask = qcom_smem_get(QCOM_SMEM_HOST_ANY, SMEM_SMSM_CPU_INTR_MASK, NULL);
598 if (IS_ERR(intr_mask)) {
599 dev_err(&pdev->dev, "unable to acquire shared memory interrupt mask\n");
600 ret = PTR_ERR(intr_mask);
601 goto out_put;
604 /* Setup the reference to the local state bits */
605 smsm->local_state = states + smsm->local_host;
606 smsm->subscription = intr_mask + smsm->local_host * smsm->num_hosts;
608 /* Register the outgoing state */
609 smsm->state = qcom_smem_state_register(local_node, &smsm_state_ops, smsm);
610 if (IS_ERR(smsm->state)) {
611 dev_err(smsm->dev, "failed to register qcom_smem_state\n");
612 ret = PTR_ERR(smsm->state);
613 goto out_put;
616 /* Register handlers for remote processor entries of interest. */
617 for_each_available_child_of_node(pdev->dev.of_node, node) {
618 if (!of_property_read_bool(node, "interrupt-controller"))
619 continue;
621 ret = of_property_read_u32(node, "reg", &id);
622 if (ret || id >= smsm->num_entries) {
623 dev_err(&pdev->dev, "invalid reg of entry\n");
624 if (!ret)
625 ret = -EINVAL;
626 goto unwind_interfaces;
628 entry = &smsm->entries[id];
630 entry->smsm = smsm;
631 entry->remote_state = states + id;
633 /* Setup subscription pointers and unsubscribe to any kicks */
634 entry->subscription = intr_mask + id * smsm->num_hosts;
635 writel(0, entry->subscription + smsm->local_host);
637 ret = smsm_inbound_entry(smsm, entry, node);
638 if (ret < 0)
639 goto unwind_interfaces;
642 platform_set_drvdata(pdev, smsm);
643 of_node_put(local_node);
645 return 0;
647 unwind_interfaces:
648 of_node_put(node);
649 for (id = 0; id < smsm->num_entries; id++)
650 if (smsm->entries[id].domain)
651 irq_domain_remove(smsm->entries[id].domain);
653 qcom_smem_state_unregister(smsm->state);
654 out_put:
655 for (id = 0; id < smsm->num_hosts; id++)
656 mbox_free_channel(smsm->hosts[id].mbox_chan);
658 of_node_put(local_node);
659 return ret;
662 static void qcom_smsm_remove(struct platform_device *pdev)
664 struct qcom_smsm *smsm = platform_get_drvdata(pdev);
665 unsigned id;
667 for (id = 0; id < smsm->num_entries; id++)
668 if (smsm->entries[id].domain)
669 irq_domain_remove(smsm->entries[id].domain);
671 for (id = 0; id < smsm->num_hosts; id++)
672 mbox_free_channel(smsm->hosts[id].mbox_chan);
674 qcom_smem_state_unregister(smsm->state);
677 static const struct of_device_id qcom_smsm_of_match[] = {
678 { .compatible = "qcom,smsm" },
681 MODULE_DEVICE_TABLE(of, qcom_smsm_of_match);
683 static struct platform_driver qcom_smsm_driver = {
684 .probe = qcom_smsm_probe,
685 .remove = qcom_smsm_remove,
686 .driver = {
687 .name = "qcom-smsm",
688 .of_match_table = qcom_smsm_of_match,
691 module_platform_driver(qcom_smsm_driver);
693 MODULE_DESCRIPTION("Qualcomm Shared Memory State Machine driver");
694 MODULE_LICENSE("GPL v2");