1 // SPDX-License-Identifier: GPL-2.0
3 * Driver for the Intel SCU IPC mechanism
5 * (C) Copyright 2008-2010,2015 Intel Corporation
6 * Author: Sreedhara DS (sreedhara.ds@intel.com)
8 * SCU running in ARC processor communicates with other entity running in IA
9 * core through IPC mechanism which in turn messaging between IA core ad SCU.
10 * SCU has two IPC mechanism IPC-1 and IPC-2. IPC-1 is used between IA32 and
11 * SCU where IPC-2 is used between P-Unit and SCU. This driver delas with
12 * IPC-1 Driver provides an API for power control unit registers (e.g. MSIC)
13 * along with other APIs.
16 #include <linux/cleanup.h>
17 #include <linux/delay.h>
18 #include <linux/device.h>
19 #include <linux/errno.h>
20 #include <linux/init.h>
21 #include <linux/interrupt.h>
23 #include <linux/iopoll.h>
24 #include <linux/module.h>
25 #include <linux/slab.h>
27 #include <linux/platform_data/x86/intel_scu_ipc.h>
29 /* IPC defines the following message types */
30 #define IPCMSG_PCNTRL 0xff /* Power controller unit read/write */
32 /* Command id associated with message IPCMSG_PCNTRL */
33 #define IPC_CMD_PCNTRL_W 0 /* Register write */
34 #define IPC_CMD_PCNTRL_R 1 /* Register read */
35 #define IPC_CMD_PCNTRL_M 2 /* Register read-modify-write */
38 * IPC register summary
40 * IPC register blocks are memory mapped at fixed address of PCI BAR 0.
41 * To read or write information to the SCU, driver writes to IPC-1 memory
42 * mapped registers. The following is the IPC mechanism
44 * 1. IA core cDMI interface claims this transaction and converts it to a
45 * Transaction Layer Packet (TLP) message which is sent across the cDMI.
47 * 2. South Complex cDMI block receives this message and writes it to
48 * the IPC-1 register block, causing an interrupt to the SCU
50 * 3. SCU firmware decodes this interrupt and IPC message and the appropriate
51 * message handler is called within firmware.
54 #define IPC_WWBUF_SIZE 20 /* IPC Write buffer Size */
55 #define IPC_RWBUF_SIZE 20 /* IPC Read buffer Size */
56 #define IPC_IOC 0x100 /* IPC command register IOC bit */
58 struct intel_scu_ipc_dev
{
61 void __iomem
*ipc_base
;
62 struct completion cmd_complete
;
64 struct intel_scu_ipc_data data
;
67 #define IPC_STATUS 0x04
68 #define IPC_STATUS_IRQ BIT(2)
69 #define IPC_STATUS_ERR BIT(1)
70 #define IPC_STATUS_BUSY BIT(0)
73 * IPC Write/Read Buffers:
74 * 16 byte buffer for sending and receiving data to and from SCU.
76 #define IPC_WRITE_BUFFER 0x80
77 #define IPC_READ_BUFFER 0x90
79 /* Timeout in jiffies */
80 #define IPC_TIMEOUT (10 * HZ)
82 static struct intel_scu_ipc_dev
*ipcdev
; /* Only one for now */
83 static DEFINE_MUTEX(ipclock
); /* lock used to prevent multiple call to SCU */
85 static struct class intel_scu_ipc_class
= {
86 .name
= "intel_scu_ipc",
90 * intel_scu_ipc_dev_get() - Get SCU IPC instance
92 * The recommended new API takes SCU IPC instance as parameter and this
93 * function can be called by driver to get the instance. This also makes
94 * sure the driver providing the IPC functionality cannot be unloaded
95 * while the caller has the instance.
97 * Call intel_scu_ipc_dev_put() to release the instance.
99 * Returns %NULL if SCU IPC is not currently available.
101 struct intel_scu_ipc_dev
*intel_scu_ipc_dev_get(void)
103 guard(mutex
)(&ipclock
);
106 get_device(&ipcdev
->dev
);
108 * Prevent the IPC provider from being unloaded while it
111 if (try_module_get(ipcdev
->owner
))
114 put_device(&ipcdev
->dev
);
119 EXPORT_SYMBOL_GPL(intel_scu_ipc_dev_get
);
122 * intel_scu_ipc_dev_put() - Put SCU IPC instance
123 * @scu: SCU IPC instance
125 * This function releases the SCU IPC instance retrieved from
126 * intel_scu_ipc_dev_get() and allows the driver providing IPC to be
129 void intel_scu_ipc_dev_put(struct intel_scu_ipc_dev
*scu
)
132 module_put(scu
->owner
);
133 put_device(&scu
->dev
);
136 EXPORT_SYMBOL_GPL(intel_scu_ipc_dev_put
);
138 struct intel_scu_ipc_devres
{
139 struct intel_scu_ipc_dev
*scu
;
142 static void devm_intel_scu_ipc_dev_release(struct device
*dev
, void *res
)
144 struct intel_scu_ipc_devres
*dr
= res
;
145 struct intel_scu_ipc_dev
*scu
= dr
->scu
;
147 intel_scu_ipc_dev_put(scu
);
151 * devm_intel_scu_ipc_dev_get() - Allocate managed SCU IPC device
152 * @dev: Device requesting the SCU IPC device
154 * The recommended new API takes SCU IPC instance as parameter and this
155 * function can be called by driver to get the instance. This also makes
156 * sure the driver providing the IPC functionality cannot be unloaded
157 * while the caller has the instance.
159 * Returns %NULL if SCU IPC is not currently available.
161 struct intel_scu_ipc_dev
*devm_intel_scu_ipc_dev_get(struct device
*dev
)
163 struct intel_scu_ipc_devres
*dr
;
164 struct intel_scu_ipc_dev
*scu
;
166 dr
= devres_alloc(devm_intel_scu_ipc_dev_release
, sizeof(*dr
), GFP_KERNEL
);
170 scu
= intel_scu_ipc_dev_get();
181 EXPORT_SYMBOL_GPL(devm_intel_scu_ipc_dev_get
);
185 * Command Register (Write Only):
186 * A write to this register results in an interrupt to the SCU core processor
188 * |rfu2(8) | size(8) | command id(4) | rfu1(3) | ioc(1) | command(8)|
190 static inline void ipc_command(struct intel_scu_ipc_dev
*scu
, u32 cmd
)
192 reinit_completion(&scu
->cmd_complete
);
193 writel(cmd
| IPC_IOC
, scu
->ipc_base
);
198 * IPC Write Buffer (Write Only):
199 * 16-byte buffer for sending data associated with IPC command to
200 * SCU. Size of the data is specified in the IPC_COMMAND_REG register
202 static inline void ipc_data_writel(struct intel_scu_ipc_dev
*scu
, u32 data
, u32 offset
)
204 writel(data
, scu
->ipc_base
+ IPC_WRITE_BUFFER
+ offset
);
208 * Status Register (Read Only):
209 * Driver will read this register to get the ready/busy status of the IPC
210 * block and error status of the IPC command that was just processed by SCU
212 * |rfu3(8)|error code(8)|initiator id(8)|cmd id(4)|rfu1(2)|error(1)|busy(1)|
214 static inline u8
ipc_read_status(struct intel_scu_ipc_dev
*scu
)
216 return __raw_readl(scu
->ipc_base
+ IPC_STATUS
);
219 /* Read ipc u32 data */
220 static inline u32
ipc_data_readl(struct intel_scu_ipc_dev
*scu
, u32 offset
)
222 return readl(scu
->ipc_base
+ IPC_READ_BUFFER
+ offset
);
225 /* Wait till scu status is busy */
226 static inline int busy_loop(struct intel_scu_ipc_dev
*scu
)
231 err
= readx_poll_timeout(ipc_read_status
, scu
, status
, !(status
& IPC_STATUS_BUSY
),
232 100, jiffies_to_usecs(IPC_TIMEOUT
));
236 return (status
& IPC_STATUS_ERR
) ? -EIO
: 0;
239 /* Wait till ipc ioc interrupt is received or timeout in 10 HZ */
240 static inline int ipc_wait_for_interrupt(struct intel_scu_ipc_dev
*scu
)
244 wait_for_completion_timeout(&scu
->cmd_complete
, IPC_TIMEOUT
);
246 status
= ipc_read_status(scu
);
247 if (status
& IPC_STATUS_BUSY
)
250 if (status
& IPC_STATUS_ERR
)
256 static int intel_scu_ipc_check_status(struct intel_scu_ipc_dev
*scu
)
258 return scu
->data
.irq
> 0 ? ipc_wait_for_interrupt(scu
) : busy_loop(scu
);
261 static struct intel_scu_ipc_dev
*intel_scu_ipc_get(struct intel_scu_ipc_dev
*scu
)
268 return ERR_PTR(-ENODEV
);
270 status
= ipc_read_status(scu
);
271 if (status
& IPC_STATUS_BUSY
) {
272 dev_dbg(&scu
->dev
, "device is busy\n");
273 return ERR_PTR(-EBUSY
);
279 /* Read/Write power control(PMIC in Langwell, MSIC in PenWell) registers */
280 static int pwr_reg_rdwr(struct intel_scu_ipc_dev
*scu
, u16
*addr
, u8
*data
,
281 u32 count
, u32 op
, u32 id
)
286 u8 cbuf
[IPC_WWBUF_SIZE
];
287 u32
*wbuf
= (u32
*)&cbuf
;
289 memset(cbuf
, 0, sizeof(cbuf
));
291 guard(mutex
)(&ipclock
);
293 scu
= intel_scu_ipc_get(scu
);
297 for (nc
= 0; nc
< count
; nc
++, offset
+= 2) {
298 cbuf
[offset
] = addr
[nc
];
299 cbuf
[offset
+ 1] = addr
[nc
] >> 8;
302 if (id
== IPC_CMD_PCNTRL_R
) {
303 for (nc
= 0, offset
= 0; nc
< count
; nc
++, offset
+= 4)
304 ipc_data_writel(scu
, wbuf
[nc
], offset
);
305 ipc_command(scu
, (count
* 2) << 16 | id
<< 12 | 0 << 8 | op
);
306 } else if (id
== IPC_CMD_PCNTRL_W
) {
307 for (nc
= 0; nc
< count
; nc
++, offset
+= 1)
308 cbuf
[offset
] = data
[nc
];
309 for (nc
= 0, offset
= 0; nc
< count
; nc
++, offset
+= 4)
310 ipc_data_writel(scu
, wbuf
[nc
], offset
);
311 ipc_command(scu
, (count
* 3) << 16 | id
<< 12 | 0 << 8 | op
);
312 } else if (id
== IPC_CMD_PCNTRL_M
) {
313 cbuf
[offset
] = data
[0];
314 cbuf
[offset
+ 1] = data
[1];
315 ipc_data_writel(scu
, wbuf
[0], 0); /* Write wbuff */
316 ipc_command(scu
, 4 << 16 | id
<< 12 | 0 << 8 | op
);
319 err
= intel_scu_ipc_check_status(scu
);
324 for (nc
= 0, offset
= 0; nc
< 4; nc
++, offset
+= 4)
325 wbuf
[nc
] = ipc_data_readl(scu
, offset
);
326 memcpy(data
, wbuf
, count
);
332 * intel_scu_ipc_dev_ioread8() - Read a byte via the SCU
333 * @scu: Optional SCU IPC instance
334 * @addr: Register on SCU
335 * @data: Return pointer for read byte
337 * Read a single register. Returns %0 on success or an error code. All
338 * locking between SCU accesses is handled for the caller.
340 * This function may sleep.
342 int intel_scu_ipc_dev_ioread8(struct intel_scu_ipc_dev
*scu
, u16 addr
, u8
*data
)
344 return pwr_reg_rdwr(scu
, &addr
, data
, 1, IPCMSG_PCNTRL
, IPC_CMD_PCNTRL_R
);
346 EXPORT_SYMBOL(intel_scu_ipc_dev_ioread8
);
349 * intel_scu_ipc_dev_iowrite8() - Write a byte via the SCU
350 * @scu: Optional SCU IPC instance
351 * @addr: Register on SCU
352 * @data: Byte to write
354 * Write a single register. Returns %0 on success or an error code. All
355 * locking between SCU accesses is handled for the caller.
357 * This function may sleep.
359 int intel_scu_ipc_dev_iowrite8(struct intel_scu_ipc_dev
*scu
, u16 addr
, u8 data
)
361 return pwr_reg_rdwr(scu
, &addr
, &data
, 1, IPCMSG_PCNTRL
, IPC_CMD_PCNTRL_W
);
363 EXPORT_SYMBOL(intel_scu_ipc_dev_iowrite8
);
366 * intel_scu_ipc_dev_readv() - Read a set of registers
367 * @scu: Optional SCU IPC instance
368 * @addr: Register list
369 * @data: Bytes to return
370 * @len: Length of array
372 * Read registers. Returns %0 on success or an error code. All locking
373 * between SCU accesses is handled for the caller.
375 * The largest array length permitted by the hardware is 5 items.
377 * This function may sleep.
379 int intel_scu_ipc_dev_readv(struct intel_scu_ipc_dev
*scu
, u16
*addr
, u8
*data
,
382 return pwr_reg_rdwr(scu
, addr
, data
, len
, IPCMSG_PCNTRL
, IPC_CMD_PCNTRL_R
);
384 EXPORT_SYMBOL(intel_scu_ipc_dev_readv
);
387 * intel_scu_ipc_dev_writev() - Write a set of registers
388 * @scu: Optional SCU IPC instance
389 * @addr: Register list
390 * @data: Bytes to write
391 * @len: Length of array
393 * Write registers. Returns %0 on success or an error code. All locking
394 * between SCU accesses is handled for the caller.
396 * The largest array length permitted by the hardware is 5 items.
398 * This function may sleep.
400 int intel_scu_ipc_dev_writev(struct intel_scu_ipc_dev
*scu
, u16
*addr
, u8
*data
,
403 return pwr_reg_rdwr(scu
, addr
, data
, len
, IPCMSG_PCNTRL
, IPC_CMD_PCNTRL_W
);
405 EXPORT_SYMBOL(intel_scu_ipc_dev_writev
);
408 * intel_scu_ipc_dev_update() - Update a register
409 * @scu: Optional SCU IPC instance
410 * @addr: Register address
411 * @data: Bits to update
412 * @mask: Mask of bits to update
414 * Read-modify-write power control unit register. The first data argument
415 * must be register value and second is mask value mask is a bitmap that
416 * indicates which bits to update. %0 = masked. Don't modify this bit, %1 =
417 * modify this bit. returns %0 on success or an error code.
419 * This function may sleep. Locking between SCU accesses is handled
422 int intel_scu_ipc_dev_update(struct intel_scu_ipc_dev
*scu
, u16 addr
, u8 data
,
425 u8 tmp
[2] = { data
, mask
};
426 return pwr_reg_rdwr(scu
, &addr
, tmp
, 1, IPCMSG_PCNTRL
, IPC_CMD_PCNTRL_M
);
428 EXPORT_SYMBOL(intel_scu_ipc_dev_update
);
431 * intel_scu_ipc_dev_simple_command() - Send a simple command
432 * @scu: Optional SCU IPC instance
436 * Issue a simple command to the SCU. Do not use this interface if you must
437 * then access data as any data values may be overwritten by another SCU
438 * access by the time this function returns.
440 * This function may sleep. Locking for SCU accesses is handled for the
443 int intel_scu_ipc_dev_simple_command(struct intel_scu_ipc_dev
*scu
, int cmd
,
449 guard(mutex
)(&ipclock
);
451 scu
= intel_scu_ipc_get(scu
);
455 cmdval
= sub
<< 12 | cmd
;
456 ipc_command(scu
, cmdval
);
457 err
= intel_scu_ipc_check_status(scu
);
459 dev_err(&scu
->dev
, "IPC command %#x failed with %d\n", cmdval
, err
);
462 EXPORT_SYMBOL(intel_scu_ipc_dev_simple_command
);
465 * intel_scu_ipc_dev_command_with_size() - Command with data
466 * @scu: Optional SCU IPC instance
470 * @inlen: Input length in bytes
471 * @size: Input size written to the IPC command register in whatever
472 * units (dword, byte) the particular firmware requires. Normally
473 * should be the same as @inlen.
475 * @outlen: Output length in bytes
477 * Issue a command to the SCU which involves data transfers. Do the
478 * data copies under the lock but leave it for the caller to interpret.
480 int intel_scu_ipc_dev_command_with_size(struct intel_scu_ipc_dev
*scu
, int cmd
,
481 int sub
, const void *in
, size_t inlen
,
482 size_t size
, void *out
, size_t outlen
)
484 size_t outbuflen
= DIV_ROUND_UP(outlen
, sizeof(u32
));
485 size_t inbuflen
= DIV_ROUND_UP(inlen
, sizeof(u32
));
486 u32 cmdval
, inbuf
[4] = {}, outbuf
[4] = {};
489 if (inbuflen
> 4 || outbuflen
> 4)
492 guard(mutex
)(&ipclock
);
494 scu
= intel_scu_ipc_get(scu
);
498 memcpy(inbuf
, in
, inlen
);
499 for (i
= 0; i
< inbuflen
; i
++)
500 ipc_data_writel(scu
, inbuf
[i
], 4 * i
);
502 cmdval
= (size
<< 16) | (sub
<< 12) | cmd
;
503 ipc_command(scu
, cmdval
);
504 err
= intel_scu_ipc_check_status(scu
);
506 dev_err(&scu
->dev
, "IPC command %#x failed with %d\n", cmdval
, err
);
510 for (i
= 0; i
< outbuflen
; i
++)
511 outbuf
[i
] = ipc_data_readl(scu
, 4 * i
);
513 memcpy(out
, outbuf
, outlen
);
517 EXPORT_SYMBOL(intel_scu_ipc_dev_command_with_size
);
520 * Interrupt handler gets called when ioc bit of IPC_COMMAND_REG set to 1
521 * When ioc bit is set to 1, caller api must wait for interrupt handler called
522 * which in turn unlocks the caller api. Currently this is not used
524 * This is edge triggered so we need take no action to clear anything
526 static irqreturn_t
ioc(int irq
, void *dev_id
)
528 struct intel_scu_ipc_dev
*scu
= dev_id
;
529 int status
= ipc_read_status(scu
);
531 writel(status
| IPC_STATUS_IRQ
, scu
->ipc_base
+ IPC_STATUS
);
532 complete(&scu
->cmd_complete
);
537 static void intel_scu_ipc_release(struct device
*dev
)
539 struct intel_scu_ipc_dev
*scu
= container_of(dev
, struct intel_scu_ipc_dev
, dev
);
540 struct intel_scu_ipc_data
*data
= &scu
->data
;
543 free_irq(data
->irq
, scu
);
544 iounmap(scu
->ipc_base
);
545 release_mem_region(data
->mem
.start
, resource_size(&data
->mem
));
550 * __intel_scu_ipc_register() - Register SCU IPC device
551 * @parent: Parent device
552 * @scu_data: Data used to configure SCU IPC
553 * @owner: Module registering the SCU IPC device
555 * Call this function to register SCU IPC mechanism under @parent.
556 * Returns pointer to the new SCU IPC device or ERR_PTR() in case of
557 * failure. The caller may use the returned instance if it needs to do
558 * SCU IPC calls itself.
560 struct intel_scu_ipc_dev
*
561 __intel_scu_ipc_register(struct device
*parent
,
562 const struct intel_scu_ipc_data
*scu_data
,
563 struct module
*owner
)
566 struct intel_scu_ipc_data
*data
;
567 struct intel_scu_ipc_dev
*scu
;
568 void __iomem
*ipc_base
;
570 guard(mutex
)(&ipclock
);
572 /* We support only one IPC */
574 return ERR_PTR(-EBUSY
);
576 scu
= kzalloc(sizeof(*scu
), GFP_KERNEL
);
578 return ERR_PTR(-ENOMEM
);
581 scu
->dev
.parent
= parent
;
582 scu
->dev
.class = &intel_scu_ipc_class
;
583 scu
->dev
.release
= intel_scu_ipc_release
;
585 memcpy(&scu
->data
, scu_data
, sizeof(scu
->data
));
588 if (!request_mem_region(data
->mem
.start
, resource_size(&data
->mem
), "intel_scu_ipc")) {
593 ipc_base
= ioremap(data
->mem
.start
, resource_size(&data
->mem
));
599 scu
->ipc_base
= ipc_base
;
600 init_completion(&scu
->cmd_complete
);
603 err
= request_irq(data
->irq
, ioc
, 0, "intel_scu_ipc", scu
);
609 * After this point intel_scu_ipc_release() takes care of
610 * releasing the SCU IPC resources once refcount drops to zero.
612 dev_set_name(&scu
->dev
, "intel_scu_ipc");
613 err
= device_register(&scu
->dev
);
615 put_device(&scu
->dev
);
619 /* Assign device at last */
626 release_mem_region(data
->mem
.start
, resource_size(&data
->mem
));
631 EXPORT_SYMBOL_GPL(__intel_scu_ipc_register
);
634 * intel_scu_ipc_unregister() - Unregister SCU IPC
635 * @scu: SCU IPC handle
637 * This unregisters the SCU IPC device and releases the acquired
638 * resources once the refcount goes to zero.
640 void intel_scu_ipc_unregister(struct intel_scu_ipc_dev
*scu
)
642 guard(mutex
)(&ipclock
);
644 if (!WARN_ON(!ipcdev
)) {
646 device_unregister(&scu
->dev
);
649 EXPORT_SYMBOL_GPL(intel_scu_ipc_unregister
);
651 static void devm_intel_scu_ipc_unregister(struct device
*dev
, void *res
)
653 struct intel_scu_ipc_devres
*dr
= res
;
654 struct intel_scu_ipc_dev
*scu
= dr
->scu
;
656 intel_scu_ipc_unregister(scu
);
660 * __devm_intel_scu_ipc_register() - Register managed SCU IPC device
661 * @parent: Parent device
662 * @scu_data: Data used to configure SCU IPC
663 * @owner: Module registering the SCU IPC device
665 * Call this function to register managed SCU IPC mechanism under
666 * @parent. Returns pointer to the new SCU IPC device or ERR_PTR() in
667 * case of failure. The caller may use the returned instance if it needs
668 * to do SCU IPC calls itself.
670 struct intel_scu_ipc_dev
*
671 __devm_intel_scu_ipc_register(struct device
*parent
,
672 const struct intel_scu_ipc_data
*scu_data
,
673 struct module
*owner
)
675 struct intel_scu_ipc_devres
*dr
;
676 struct intel_scu_ipc_dev
*scu
;
678 dr
= devres_alloc(devm_intel_scu_ipc_unregister
, sizeof(*dr
), GFP_KERNEL
);
682 scu
= __intel_scu_ipc_register(parent
, scu_data
, owner
);
689 devres_add(parent
, dr
);
693 EXPORT_SYMBOL_GPL(__devm_intel_scu_ipc_register
);
695 static int __init
intel_scu_ipc_init(void)
697 return class_register(&intel_scu_ipc_class
);
699 subsys_initcall(intel_scu_ipc_init
);
701 static void __exit
intel_scu_ipc_exit(void)
703 class_unregister(&intel_scu_ipc_class
);
705 module_exit(intel_scu_ipc_exit
);