1 // SPDX-License-Identifier: GPL-2.0-only
3 * Copyright (c) 2015, Sony Mobile Communications AB.
4 * Copyright (c) 2012-2013, The Linux Foundation. All rights reserved.
7 #include <linux/interrupt.h>
8 #include <linux/list.h>
11 #include <linux/irq.h>
12 #include <linux/irqdomain.h>
13 #include <linux/mailbox_client.h>
14 #include <linux/mfd/syscon.h>
15 #include <linux/module.h>
16 #include <linux/platform_device.h>
17 #include <linux/regmap.h>
18 #include <linux/soc/qcom/smem.h>
19 #include <linux/soc/qcom/smem_state.h>
20 #include <linux/spinlock.h>
23 * The Shared Memory Point to Point (SMP2P) protocol facilitates communication
24 * of a single 32-bit value between two processors. Each value has a single
25 * writer (the local side) and a single reader (the remote side). Values are
26 * uniquely identified in the system by the directed edge (local processor ID
27 * to remote processor ID) and a string identifier.
29 * Each processor is responsible for creating the outgoing SMEM items and each
30 * item is writable by the local processor and readable by the remote
31 * processor. By using two separate SMEM items that are single-reader and
32 * single-writer, SMP2P does not require any remote locking mechanisms.
34 * The driver uses the Linux GPIO and interrupt framework to expose a virtual
35 * GPIO for each outbound entry and a virtual interrupt controller for each
39 #define SMP2P_MAX_ENTRY 16
40 #define SMP2P_MAX_ENTRY_NAME 16
42 #define SMP2P_FEATURE_SSR_ACK 0x1
44 #define SMP2P_MAGIC 0x504d5324
47 * struct smp2p_smem_item - in memory communication structure
48 * @magic: magic number
49 * @version: version - must be 1
50 * @features: features flag - currently unused
51 * @local_pid: processor id of sending end
52 * @remote_pid: processor id of receiving end
53 * @total_entries: number of entries - always SMP2P_MAX_ENTRY
54 * @valid_entries: number of allocated entries
56 * @entries: individual communication entries
57 * @name: name of the entry
58 * @value: content of the entry
60 struct smp2p_smem_item
{
71 u8 name
[SMP2P_MAX_ENTRY_NAME
];
73 } entries
[SMP2P_MAX_ENTRY
];
77 * struct smp2p_entry - driver context matching one entry
78 * @node: list entry to keep track of allocated entries
79 * @smp2p: reference to the device driver context
80 * @name: name of the entry, to match against smp2p_smem_item
81 * @value: pointer to smp2p_smem_item entry value
82 * @last_value: last handled value
83 * @domain: irq_domain for inbound entries
84 * @irq_enabled:bitmap to track enabled irq bits
85 * @irq_rising: bitmap to mark irq bits for rising detection
86 * @irq_falling:bitmap to mark irq bits for falling detection
87 * @state: smem state handle
88 * @lock: spinlock to protect read-modify-write of the value
91 struct list_head node
;
92 struct qcom_smp2p
*smp2p
;
98 struct irq_domain
*domain
;
99 DECLARE_BITMAP(irq_enabled
, 32);
100 DECLARE_BITMAP(irq_rising
, 32);
101 DECLARE_BITMAP(irq_falling
, 32);
103 struct qcom_smem_state
*state
;
108 #define SMP2P_INBOUND 0
109 #define SMP2P_OUTBOUND 1
112 * struct qcom_smp2p - device driver context
113 * @dev: device driver handle
114 * @in: pointer to the inbound smem item
115 * @out: pointer to the outbound smem item
116 * @smem_items: ids of the two smem items
117 * @valid_entries: already scanned inbound entries
118 * @local_pid: processor id of the inbound edge
119 * @remote_pid: processor id of the outbound edge
120 * @ipc_regmap: regmap for the outbound ipc
121 * @ipc_offset: offset within the regmap
122 * @ipc_bit: bit in regmap@offset to kick to signal remote processor
123 * @mbox_client: mailbox client handle
124 * @mbox_chan: apcs ipc mailbox channel handle
125 * @inbound: list of inbound entries
126 * @outbound: list of outbound entries
131 struct smp2p_smem_item
*in
;
132 struct smp2p_smem_item
*out
;
134 unsigned smem_items
[SMP2P_OUTBOUND
+ 1];
136 unsigned valid_entries
;
141 struct regmap
*ipc_regmap
;
145 struct mbox_client mbox_client
;
146 struct mbox_chan
*mbox_chan
;
148 struct list_head inbound
;
149 struct list_head outbound
;
152 static void qcom_smp2p_kick(struct qcom_smp2p
*smp2p
)
154 /* Make sure any updated data is written before the kick */
157 if (smp2p
->mbox_chan
) {
158 mbox_send_message(smp2p
->mbox_chan
, NULL
);
159 mbox_client_txdone(smp2p
->mbox_chan
, 0);
161 regmap_write(smp2p
->ipc_regmap
, smp2p
->ipc_offset
, BIT(smp2p
->ipc_bit
));
166 * qcom_smp2p_intr() - interrupt handler for incoming notifications
168 * @data: smp2p driver context
170 * Handle notifications from the remote side to handle newly allocated entries
171 * or any changes to the state bits of existing entries.
173 static irqreturn_t
qcom_smp2p_intr(int irq
, void *data
)
175 struct smp2p_smem_item
*in
;
176 struct smp2p_entry
*entry
;
177 struct qcom_smp2p
*smp2p
= data
;
178 unsigned smem_id
= smp2p
->smem_items
[SMP2P_INBOUND
];
179 unsigned pid
= smp2p
->remote_pid
;
183 char buf
[SMP2P_MAX_ENTRY_NAME
];
189 /* Acquire smem item, if not already found */
191 in
= qcom_smem_get(pid
, smem_id
, &size
);
194 "Unable to acquire remote smp2p item\n");
201 /* Match newly created entries */
202 for (i
= smp2p
->valid_entries
; i
< in
->valid_entries
; i
++) {
203 list_for_each_entry(entry
, &smp2p
->inbound
, node
) {
204 memcpy(buf
, in
->entries
[i
].name
, sizeof(buf
));
205 if (!strcmp(buf
, entry
->name
)) {
206 entry
->value
= &in
->entries
[i
].value
;
211 smp2p
->valid_entries
= i
;
213 /* Fire interrupts based on any value changes */
214 list_for_each_entry(entry
, &smp2p
->inbound
, node
) {
215 /* Ignore entries not yet allocated by the remote side */
219 val
= readl(entry
->value
);
221 status
= val
^ entry
->last_value
;
222 entry
->last_value
= val
;
224 /* No changes of this entry? */
228 for_each_set_bit(i
, entry
->irq_enabled
, 32) {
229 if (!(status
& BIT(i
)))
232 if ((val
& BIT(i
) && test_bit(i
, entry
->irq_rising
)) ||
233 (!(val
& BIT(i
)) && test_bit(i
, entry
->irq_falling
))) {
234 irq_pin
= irq_find_mapping(entry
->domain
, i
);
235 handle_nested_irq(irq_pin
);
243 static void smp2p_mask_irq(struct irq_data
*irqd
)
245 struct smp2p_entry
*entry
= irq_data_get_irq_chip_data(irqd
);
246 irq_hw_number_t irq
= irqd_to_hwirq(irqd
);
248 clear_bit(irq
, entry
->irq_enabled
);
251 static void smp2p_unmask_irq(struct irq_data
*irqd
)
253 struct smp2p_entry
*entry
= irq_data_get_irq_chip_data(irqd
);
254 irq_hw_number_t irq
= irqd_to_hwirq(irqd
);
256 set_bit(irq
, entry
->irq_enabled
);
259 static int smp2p_set_irq_type(struct irq_data
*irqd
, unsigned int type
)
261 struct smp2p_entry
*entry
= irq_data_get_irq_chip_data(irqd
);
262 irq_hw_number_t irq
= irqd_to_hwirq(irqd
);
264 if (!(type
& IRQ_TYPE_EDGE_BOTH
))
267 if (type
& IRQ_TYPE_EDGE_RISING
)
268 set_bit(irq
, entry
->irq_rising
);
270 clear_bit(irq
, entry
->irq_rising
);
272 if (type
& IRQ_TYPE_EDGE_FALLING
)
273 set_bit(irq
, entry
->irq_falling
);
275 clear_bit(irq
, entry
->irq_falling
);
280 static struct irq_chip smp2p_irq_chip
= {
282 .irq_mask
= smp2p_mask_irq
,
283 .irq_unmask
= smp2p_unmask_irq
,
284 .irq_set_type
= smp2p_set_irq_type
,
287 static int smp2p_irq_map(struct irq_domain
*d
,
291 struct smp2p_entry
*entry
= d
->host_data
;
293 irq_set_chip_and_handler(irq
, &smp2p_irq_chip
, handle_level_irq
);
294 irq_set_chip_data(irq
, entry
);
295 irq_set_nested_thread(irq
, 1);
296 irq_set_noprobe(irq
);
301 static const struct irq_domain_ops smp2p_irq_ops
= {
302 .map
= smp2p_irq_map
,
303 .xlate
= irq_domain_xlate_twocell
,
306 static int qcom_smp2p_inbound_entry(struct qcom_smp2p
*smp2p
,
307 struct smp2p_entry
*entry
,
308 struct device_node
*node
)
310 entry
->domain
= irq_domain_add_linear(node
, 32, &smp2p_irq_ops
, entry
);
311 if (!entry
->domain
) {
312 dev_err(smp2p
->dev
, "failed to add irq_domain\n");
319 static int smp2p_update_bits(void *data
, u32 mask
, u32 value
)
321 struct smp2p_entry
*entry
= data
;
326 spin_lock_irqsave(&entry
->lock
, flags
);
327 val
= orig
= readl(entry
->value
);
330 writel(val
, entry
->value
);
331 spin_unlock_irqrestore(&entry
->lock
, flags
);
334 qcom_smp2p_kick(entry
->smp2p
);
339 static const struct qcom_smem_state_ops smp2p_state_ops
= {
340 .update_bits
= smp2p_update_bits
,
343 static int qcom_smp2p_outbound_entry(struct qcom_smp2p
*smp2p
,
344 struct smp2p_entry
*entry
,
345 struct device_node
*node
)
347 struct smp2p_smem_item
*out
= smp2p
->out
;
348 char buf
[SMP2P_MAX_ENTRY_NAME
] = {};
350 /* Allocate an entry from the smem item */
351 strlcpy(buf
, entry
->name
, SMP2P_MAX_ENTRY_NAME
);
352 memcpy(out
->entries
[out
->valid_entries
].name
, buf
, SMP2P_MAX_ENTRY_NAME
);
354 /* Make the logical entry reference the physical value */
355 entry
->value
= &out
->entries
[out
->valid_entries
].value
;
357 out
->valid_entries
++;
359 entry
->state
= qcom_smem_state_register(node
, &smp2p_state_ops
, entry
);
360 if (IS_ERR(entry
->state
)) {
361 dev_err(smp2p
->dev
, "failed to register qcom_smem_state\n");
362 return PTR_ERR(entry
->state
);
368 static int qcom_smp2p_alloc_outbound_item(struct qcom_smp2p
*smp2p
)
370 struct smp2p_smem_item
*out
;
371 unsigned smem_id
= smp2p
->smem_items
[SMP2P_OUTBOUND
];
372 unsigned pid
= smp2p
->remote_pid
;
375 ret
= qcom_smem_alloc(pid
, smem_id
, sizeof(*out
));
376 if (ret
< 0 && ret
!= -EEXIST
) {
377 if (ret
!= -EPROBE_DEFER
)
379 "unable to allocate local smp2p item\n");
383 out
= qcom_smem_get(pid
, smem_id
, NULL
);
385 dev_err(smp2p
->dev
, "Unable to acquire local smp2p item\n");
389 memset(out
, 0, sizeof(*out
));
390 out
->magic
= SMP2P_MAGIC
;
391 out
->local_pid
= smp2p
->local_pid
;
392 out
->remote_pid
= smp2p
->remote_pid
;
393 out
->total_entries
= SMP2P_MAX_ENTRY
;
394 out
->valid_entries
= 0;
397 * Make sure the rest of the header is written before we validate the
398 * item by writing a valid version number.
403 qcom_smp2p_kick(smp2p
);
410 static int smp2p_parse_ipc(struct qcom_smp2p
*smp2p
)
412 struct device_node
*syscon
;
413 struct device
*dev
= smp2p
->dev
;
417 syscon
= of_parse_phandle(dev
->of_node
, "qcom,ipc", 0);
419 dev_err(dev
, "no qcom,ipc node\n");
423 smp2p
->ipc_regmap
= syscon_node_to_regmap(syscon
);
424 if (IS_ERR(smp2p
->ipc_regmap
))
425 return PTR_ERR(smp2p
->ipc_regmap
);
428 ret
= of_property_read_u32_index(dev
->of_node
, key
, 1, &smp2p
->ipc_offset
);
430 dev_err(dev
, "no offset in %s\n", key
);
434 ret
= of_property_read_u32_index(dev
->of_node
, key
, 2, &smp2p
->ipc_bit
);
436 dev_err(dev
, "no bit in %s\n", key
);
443 static int qcom_smp2p_probe(struct platform_device
*pdev
)
445 struct smp2p_entry
*entry
;
446 struct device_node
*node
;
447 struct qcom_smp2p
*smp2p
;
452 smp2p
= devm_kzalloc(&pdev
->dev
, sizeof(*smp2p
), GFP_KERNEL
);
456 smp2p
->dev
= &pdev
->dev
;
457 INIT_LIST_HEAD(&smp2p
->inbound
);
458 INIT_LIST_HEAD(&smp2p
->outbound
);
460 platform_set_drvdata(pdev
, smp2p
);
463 ret
= of_property_read_u32_array(pdev
->dev
.of_node
, key
,
464 smp2p
->smem_items
, 2);
468 key
= "qcom,local-pid";
469 ret
= of_property_read_u32(pdev
->dev
.of_node
, key
, &smp2p
->local_pid
);
471 goto report_read_failure
;
473 key
= "qcom,remote-pid";
474 ret
= of_property_read_u32(pdev
->dev
.of_node
, key
, &smp2p
->remote_pid
);
476 goto report_read_failure
;
478 irq
= platform_get_irq(pdev
, 0);
482 smp2p
->mbox_client
.dev
= &pdev
->dev
;
483 smp2p
->mbox_client
.knows_txdone
= true;
484 smp2p
->mbox_chan
= mbox_request_channel(&smp2p
->mbox_client
, 0);
485 if (IS_ERR(smp2p
->mbox_chan
)) {
486 if (PTR_ERR(smp2p
->mbox_chan
) != -ENODEV
)
487 return PTR_ERR(smp2p
->mbox_chan
);
489 smp2p
->mbox_chan
= NULL
;
491 ret
= smp2p_parse_ipc(smp2p
);
496 ret
= qcom_smp2p_alloc_outbound_item(smp2p
);
500 for_each_available_child_of_node(pdev
->dev
.of_node
, node
) {
501 entry
= devm_kzalloc(&pdev
->dev
, sizeof(*entry
), GFP_KERNEL
);
504 goto unwind_interfaces
;
507 entry
->smp2p
= smp2p
;
508 spin_lock_init(&entry
->lock
);
510 ret
= of_property_read_string(node
, "qcom,entry-name", &entry
->name
);
512 goto unwind_interfaces
;
514 if (of_property_read_bool(node
, "interrupt-controller")) {
515 ret
= qcom_smp2p_inbound_entry(smp2p
, entry
, node
);
517 goto unwind_interfaces
;
519 list_add(&entry
->node
, &smp2p
->inbound
);
521 ret
= qcom_smp2p_outbound_entry(smp2p
, entry
, node
);
523 goto unwind_interfaces
;
525 list_add(&entry
->node
, &smp2p
->outbound
);
529 /* Kick the outgoing edge after allocating entries */
530 qcom_smp2p_kick(smp2p
);
532 ret
= devm_request_threaded_irq(&pdev
->dev
, irq
,
533 NULL
, qcom_smp2p_intr
,
535 "smp2p", (void *)smp2p
);
537 dev_err(&pdev
->dev
, "failed to request interrupt\n");
538 goto unwind_interfaces
;
545 list_for_each_entry(entry
, &smp2p
->inbound
, node
)
546 irq_domain_remove(entry
->domain
);
548 list_for_each_entry(entry
, &smp2p
->outbound
, node
)
549 qcom_smem_state_unregister(entry
->state
);
551 smp2p
->out
->valid_entries
= 0;
554 mbox_free_channel(smp2p
->mbox_chan
);
559 dev_err(&pdev
->dev
, "failed to read %s\n", key
);
563 static int qcom_smp2p_remove(struct platform_device
*pdev
)
565 struct qcom_smp2p
*smp2p
= platform_get_drvdata(pdev
);
566 struct smp2p_entry
*entry
;
568 list_for_each_entry(entry
, &smp2p
->inbound
, node
)
569 irq_domain_remove(entry
->domain
);
571 list_for_each_entry(entry
, &smp2p
->outbound
, node
)
572 qcom_smem_state_unregister(entry
->state
);
574 mbox_free_channel(smp2p
->mbox_chan
);
576 smp2p
->out
->valid_entries
= 0;
581 static const struct of_device_id qcom_smp2p_of_match
[] = {
582 { .compatible
= "qcom,smp2p" },
585 MODULE_DEVICE_TABLE(of
, qcom_smp2p_of_match
);
587 static struct platform_driver qcom_smp2p_driver
= {
588 .probe
= qcom_smp2p_probe
,
589 .remove
= qcom_smp2p_remove
,
591 .name
= "qcom_smp2p",
592 .of_match_table
= qcom_smp2p_of_match
,
595 module_platform_driver(qcom_smp2p_driver
);
597 MODULE_DESCRIPTION("Qualcomm Shared Memory Point to Point driver");
598 MODULE_LICENSE("GPL v2");