1 // SPDX-License-Identifier: GPL-2.0-only
3 * Copyright (c) 2015, Sony Mobile Communications AB.
4 * Copyright (c) 2012-2013, The Linux Foundation. All rights reserved.
7 #include <linux/interrupt.h>
8 #include <linux/list.h>
11 #include <linux/irq.h>
12 #include <linux/irqdomain.h>
13 #include <linux/mailbox_client.h>
14 #include <linux/mfd/syscon.h>
15 #include <linux/module.h>
16 #include <linux/platform_device.h>
17 #include <linux/regmap.h>
18 #include <linux/soc/qcom/smem.h>
19 #include <linux/soc/qcom/smem_state.h>
20 #include <linux/spinlock.h>
23 * The Shared Memory Point to Point (SMP2P) protocol facilitates communication
24 * of a single 32-bit value between two processors. Each value has a single
25 * writer (the local side) and a single reader (the remote side). Values are
26 * uniquely identified in the system by the directed edge (local processor ID
27 * to remote processor ID) and a string identifier.
29 * Each processor is responsible for creating the outgoing SMEM items and each
30 * item is writable by the local processor and readable by the remote
31 * processor. By using two separate SMEM items that are single-reader and
32 * single-writer, SMP2P does not require any remote locking mechanisms.
34 * The driver uses the Linux GPIO and interrupt framework to expose a virtual
35 * GPIO for each outbound entry and a virtual interrupt controller for each
39 #define SMP2P_MAX_ENTRY 16
40 #define SMP2P_MAX_ENTRY_NAME 16
42 #define SMP2P_FEATURE_SSR_ACK 0x1
44 #define SMP2P_MAGIC 0x504d5324
47 * struct smp2p_smem_item - in memory communication structure
48 * @magic: magic number
49 * @version: version - must be 1
50 * @features: features flag - currently unused
51 * @local_pid: processor id of sending end
52 * @remote_pid: processor id of receiving end
53 * @total_entries: number of entries - always SMP2P_MAX_ENTRY
54 * @valid_entries: number of allocated entries
56 * @entries: individual communication entries
57 * @name: name of the entry
58 * @value: content of the entry
60 struct smp2p_smem_item
{
71 u8 name
[SMP2P_MAX_ENTRY_NAME
];
73 } entries
[SMP2P_MAX_ENTRY
];
77 * struct smp2p_entry - driver context matching one entry
78 * @node: list entry to keep track of allocated entries
79 * @smp2p: reference to the device driver context
80 * @name: name of the entry, to match against smp2p_smem_item
81 * @value: pointer to smp2p_smem_item entry value
82 * @last_value: last handled value
83 * @domain: irq_domain for inbound entries
84 * @irq_enabled:bitmap to track enabled irq bits
85 * @irq_rising: bitmap to mark irq bits for rising detection
86 * @irq_falling:bitmap to mark irq bits for falling detection
87 * @state: smem state handle
88 * @lock: spinlock to protect read-modify-write of the value
91 struct list_head node
;
92 struct qcom_smp2p
*smp2p
;
98 struct irq_domain
*domain
;
99 DECLARE_BITMAP(irq_enabled
, 32);
100 DECLARE_BITMAP(irq_rising
, 32);
101 DECLARE_BITMAP(irq_falling
, 32);
103 struct qcom_smem_state
*state
;
108 #define SMP2P_INBOUND 0
109 #define SMP2P_OUTBOUND 1
112 * struct qcom_smp2p - device driver context
113 * @dev: device driver handle
114 * @in: pointer to the inbound smem item
115 * @smem_items: ids of the two smem items
116 * @valid_entries: already scanned inbound entries
117 * @local_pid: processor id of the inbound edge
118 * @remote_pid: processor id of the outbound edge
119 * @ipc_regmap: regmap for the outbound ipc
120 * @ipc_offset: offset within the regmap
121 * @ipc_bit: bit in regmap@offset to kick to signal remote processor
122 * @mbox_client: mailbox client handle
123 * @mbox_chan: apcs ipc mailbox channel handle
124 * @inbound: list of inbound entries
125 * @outbound: list of outbound entries
130 struct smp2p_smem_item
*in
;
131 struct smp2p_smem_item
*out
;
133 unsigned smem_items
[SMP2P_OUTBOUND
+ 1];
135 unsigned valid_entries
;
140 struct regmap
*ipc_regmap
;
144 struct mbox_client mbox_client
;
145 struct mbox_chan
*mbox_chan
;
147 struct list_head inbound
;
148 struct list_head outbound
;
151 static void qcom_smp2p_kick(struct qcom_smp2p
*smp2p
)
153 /* Make sure any updated data is written before the kick */
156 if (smp2p
->mbox_chan
) {
157 mbox_send_message(smp2p
->mbox_chan
, NULL
);
158 mbox_client_txdone(smp2p
->mbox_chan
, 0);
160 regmap_write(smp2p
->ipc_regmap
, smp2p
->ipc_offset
, BIT(smp2p
->ipc_bit
));
165 * qcom_smp2p_intr() - interrupt handler for incoming notifications
167 * @data: smp2p driver context
169 * Handle notifications from the remote side to handle newly allocated entries
170 * or any changes to the state bits of existing entries.
172 static irqreturn_t
qcom_smp2p_intr(int irq
, void *data
)
174 struct smp2p_smem_item
*in
;
175 struct smp2p_entry
*entry
;
176 struct qcom_smp2p
*smp2p
= data
;
177 unsigned smem_id
= smp2p
->smem_items
[SMP2P_INBOUND
];
178 unsigned pid
= smp2p
->remote_pid
;
182 char buf
[SMP2P_MAX_ENTRY_NAME
];
188 /* Acquire smem item, if not already found */
190 in
= qcom_smem_get(pid
, smem_id
, &size
);
193 "Unable to acquire remote smp2p item\n");
200 /* Match newly created entries */
201 for (i
= smp2p
->valid_entries
; i
< in
->valid_entries
; i
++) {
202 list_for_each_entry(entry
, &smp2p
->inbound
, node
) {
203 memcpy(buf
, in
->entries
[i
].name
, sizeof(buf
));
204 if (!strcmp(buf
, entry
->name
)) {
205 entry
->value
= &in
->entries
[i
].value
;
210 smp2p
->valid_entries
= i
;
212 /* Fire interrupts based on any value changes */
213 list_for_each_entry(entry
, &smp2p
->inbound
, node
) {
214 /* Ignore entries not yet allocated by the remote side */
218 val
= readl(entry
->value
);
220 status
= val
^ entry
->last_value
;
221 entry
->last_value
= val
;
223 /* No changes of this entry? */
227 for_each_set_bit(i
, entry
->irq_enabled
, 32) {
228 if (!(status
& BIT(i
)))
231 if ((val
& BIT(i
) && test_bit(i
, entry
->irq_rising
)) ||
232 (!(val
& BIT(i
)) && test_bit(i
, entry
->irq_falling
))) {
233 irq_pin
= irq_find_mapping(entry
->domain
, i
);
234 handle_nested_irq(irq_pin
);
242 static void smp2p_mask_irq(struct irq_data
*irqd
)
244 struct smp2p_entry
*entry
= irq_data_get_irq_chip_data(irqd
);
245 irq_hw_number_t irq
= irqd_to_hwirq(irqd
);
247 clear_bit(irq
, entry
->irq_enabled
);
250 static void smp2p_unmask_irq(struct irq_data
*irqd
)
252 struct smp2p_entry
*entry
= irq_data_get_irq_chip_data(irqd
);
253 irq_hw_number_t irq
= irqd_to_hwirq(irqd
);
255 set_bit(irq
, entry
->irq_enabled
);
258 static int smp2p_set_irq_type(struct irq_data
*irqd
, unsigned int type
)
260 struct smp2p_entry
*entry
= irq_data_get_irq_chip_data(irqd
);
261 irq_hw_number_t irq
= irqd_to_hwirq(irqd
);
263 if (!(type
& IRQ_TYPE_EDGE_BOTH
))
266 if (type
& IRQ_TYPE_EDGE_RISING
)
267 set_bit(irq
, entry
->irq_rising
);
269 clear_bit(irq
, entry
->irq_rising
);
271 if (type
& IRQ_TYPE_EDGE_FALLING
)
272 set_bit(irq
, entry
->irq_falling
);
274 clear_bit(irq
, entry
->irq_falling
);
279 static struct irq_chip smp2p_irq_chip
= {
281 .irq_mask
= smp2p_mask_irq
,
282 .irq_unmask
= smp2p_unmask_irq
,
283 .irq_set_type
= smp2p_set_irq_type
,
286 static int smp2p_irq_map(struct irq_domain
*d
,
290 struct smp2p_entry
*entry
= d
->host_data
;
292 irq_set_chip_and_handler(irq
, &smp2p_irq_chip
, handle_level_irq
);
293 irq_set_chip_data(irq
, entry
);
294 irq_set_nested_thread(irq
, 1);
295 irq_set_noprobe(irq
);
300 static const struct irq_domain_ops smp2p_irq_ops
= {
301 .map
= smp2p_irq_map
,
302 .xlate
= irq_domain_xlate_twocell
,
305 static int qcom_smp2p_inbound_entry(struct qcom_smp2p
*smp2p
,
306 struct smp2p_entry
*entry
,
307 struct device_node
*node
)
309 entry
->domain
= irq_domain_add_linear(node
, 32, &smp2p_irq_ops
, entry
);
310 if (!entry
->domain
) {
311 dev_err(smp2p
->dev
, "failed to add irq_domain\n");
318 static int smp2p_update_bits(void *data
, u32 mask
, u32 value
)
320 struct smp2p_entry
*entry
= data
;
324 spin_lock(&entry
->lock
);
325 val
= orig
= readl(entry
->value
);
328 writel(val
, entry
->value
);
329 spin_unlock(&entry
->lock
);
332 qcom_smp2p_kick(entry
->smp2p
);
337 static const struct qcom_smem_state_ops smp2p_state_ops
= {
338 .update_bits
= smp2p_update_bits
,
341 static int qcom_smp2p_outbound_entry(struct qcom_smp2p
*smp2p
,
342 struct smp2p_entry
*entry
,
343 struct device_node
*node
)
345 struct smp2p_smem_item
*out
= smp2p
->out
;
346 char buf
[SMP2P_MAX_ENTRY_NAME
] = {};
348 /* Allocate an entry from the smem item */
349 strlcpy(buf
, entry
->name
, SMP2P_MAX_ENTRY_NAME
);
350 memcpy(out
->entries
[out
->valid_entries
].name
, buf
, SMP2P_MAX_ENTRY_NAME
);
352 /* Make the logical entry reference the physical value */
353 entry
->value
= &out
->entries
[out
->valid_entries
].value
;
355 out
->valid_entries
++;
357 entry
->state
= qcom_smem_state_register(node
, &smp2p_state_ops
, entry
);
358 if (IS_ERR(entry
->state
)) {
359 dev_err(smp2p
->dev
, "failed to register qcom_smem_state\n");
360 return PTR_ERR(entry
->state
);
366 static int qcom_smp2p_alloc_outbound_item(struct qcom_smp2p
*smp2p
)
368 struct smp2p_smem_item
*out
;
369 unsigned smem_id
= smp2p
->smem_items
[SMP2P_OUTBOUND
];
370 unsigned pid
= smp2p
->remote_pid
;
373 ret
= qcom_smem_alloc(pid
, smem_id
, sizeof(*out
));
374 if (ret
< 0 && ret
!= -EEXIST
) {
375 if (ret
!= -EPROBE_DEFER
)
377 "unable to allocate local smp2p item\n");
381 out
= qcom_smem_get(pid
, smem_id
, NULL
);
383 dev_err(smp2p
->dev
, "Unable to acquire local smp2p item\n");
387 memset(out
, 0, sizeof(*out
));
388 out
->magic
= SMP2P_MAGIC
;
389 out
->local_pid
= smp2p
->local_pid
;
390 out
->remote_pid
= smp2p
->remote_pid
;
391 out
->total_entries
= SMP2P_MAX_ENTRY
;
392 out
->valid_entries
= 0;
395 * Make sure the rest of the header is written before we validate the
396 * item by writing a valid version number.
401 qcom_smp2p_kick(smp2p
);
408 static int smp2p_parse_ipc(struct qcom_smp2p
*smp2p
)
410 struct device_node
*syscon
;
411 struct device
*dev
= smp2p
->dev
;
415 syscon
= of_parse_phandle(dev
->of_node
, "qcom,ipc", 0);
417 dev_err(dev
, "no qcom,ipc node\n");
421 smp2p
->ipc_regmap
= syscon_node_to_regmap(syscon
);
422 if (IS_ERR(smp2p
->ipc_regmap
))
423 return PTR_ERR(smp2p
->ipc_regmap
);
426 ret
= of_property_read_u32_index(dev
->of_node
, key
, 1, &smp2p
->ipc_offset
);
428 dev_err(dev
, "no offset in %s\n", key
);
432 ret
= of_property_read_u32_index(dev
->of_node
, key
, 2, &smp2p
->ipc_bit
);
434 dev_err(dev
, "no bit in %s\n", key
);
441 static int qcom_smp2p_probe(struct platform_device
*pdev
)
443 struct smp2p_entry
*entry
;
444 struct device_node
*node
;
445 struct qcom_smp2p
*smp2p
;
450 smp2p
= devm_kzalloc(&pdev
->dev
, sizeof(*smp2p
), GFP_KERNEL
);
454 smp2p
->dev
= &pdev
->dev
;
455 INIT_LIST_HEAD(&smp2p
->inbound
);
456 INIT_LIST_HEAD(&smp2p
->outbound
);
458 platform_set_drvdata(pdev
, smp2p
);
461 ret
= of_property_read_u32_array(pdev
->dev
.of_node
, key
,
462 smp2p
->smem_items
, 2);
466 key
= "qcom,local-pid";
467 ret
= of_property_read_u32(pdev
->dev
.of_node
, key
, &smp2p
->local_pid
);
469 goto report_read_failure
;
471 key
= "qcom,remote-pid";
472 ret
= of_property_read_u32(pdev
->dev
.of_node
, key
, &smp2p
->remote_pid
);
474 goto report_read_failure
;
476 irq
= platform_get_irq(pdev
, 0);
478 dev_err(&pdev
->dev
, "unable to acquire smp2p interrupt\n");
482 smp2p
->mbox_client
.dev
= &pdev
->dev
;
483 smp2p
->mbox_client
.knows_txdone
= true;
484 smp2p
->mbox_chan
= mbox_request_channel(&smp2p
->mbox_client
, 0);
485 if (IS_ERR(smp2p
->mbox_chan
)) {
486 if (PTR_ERR(smp2p
->mbox_chan
) != -ENODEV
)
487 return PTR_ERR(smp2p
->mbox_chan
);
489 smp2p
->mbox_chan
= NULL
;
491 ret
= smp2p_parse_ipc(smp2p
);
496 ret
= qcom_smp2p_alloc_outbound_item(smp2p
);
500 for_each_available_child_of_node(pdev
->dev
.of_node
, node
) {
501 entry
= devm_kzalloc(&pdev
->dev
, sizeof(*entry
), GFP_KERNEL
);
504 goto unwind_interfaces
;
507 entry
->smp2p
= smp2p
;
508 spin_lock_init(&entry
->lock
);
510 ret
= of_property_read_string(node
, "qcom,entry-name", &entry
->name
);
512 goto unwind_interfaces
;
514 if (of_property_read_bool(node
, "interrupt-controller")) {
515 ret
= qcom_smp2p_inbound_entry(smp2p
, entry
, node
);
517 goto unwind_interfaces
;
519 list_add(&entry
->node
, &smp2p
->inbound
);
521 ret
= qcom_smp2p_outbound_entry(smp2p
, entry
, node
);
523 goto unwind_interfaces
;
525 list_add(&entry
->node
, &smp2p
->outbound
);
529 /* Kick the outgoing edge after allocating entries */
530 qcom_smp2p_kick(smp2p
);
532 ret
= devm_request_threaded_irq(&pdev
->dev
, irq
,
533 NULL
, qcom_smp2p_intr
,
535 "smp2p", (void *)smp2p
);
537 dev_err(&pdev
->dev
, "failed to request interrupt\n");
538 goto unwind_interfaces
;
545 list_for_each_entry(entry
, &smp2p
->inbound
, node
)
546 irq_domain_remove(entry
->domain
);
548 list_for_each_entry(entry
, &smp2p
->outbound
, node
)
549 qcom_smem_state_unregister(entry
->state
);
551 smp2p
->out
->valid_entries
= 0;
554 mbox_free_channel(smp2p
->mbox_chan
);
559 dev_err(&pdev
->dev
, "failed to read %s\n", key
);
563 static int qcom_smp2p_remove(struct platform_device
*pdev
)
565 struct qcom_smp2p
*smp2p
= platform_get_drvdata(pdev
);
566 struct smp2p_entry
*entry
;
568 list_for_each_entry(entry
, &smp2p
->inbound
, node
)
569 irq_domain_remove(entry
->domain
);
571 list_for_each_entry(entry
, &smp2p
->outbound
, node
)
572 qcom_smem_state_unregister(entry
->state
);
574 mbox_free_channel(smp2p
->mbox_chan
);
576 smp2p
->out
->valid_entries
= 0;
581 static const struct of_device_id qcom_smp2p_of_match
[] = {
582 { .compatible
= "qcom,smp2p" },
585 MODULE_DEVICE_TABLE(of
, qcom_smp2p_of_match
);
587 static struct platform_driver qcom_smp2p_driver
= {
588 .probe
= qcom_smp2p_probe
,
589 .remove
= qcom_smp2p_remove
,
591 .name
= "qcom_smp2p",
592 .of_match_table
= qcom_smp2p_of_match
,
595 module_platform_driver(qcom_smp2p_driver
);
597 MODULE_DESCRIPTION("Qualcomm Shared Memory Point to Point driver");
598 MODULE_LICENSE("GPL v2");