WIP FPC-III support
[linux/fpc-iii.git] / arch / powerpc / platforms / powernv / opal-prd.c
blobdeddaebf8c14f80f33948487529551ad5e8cf8a9
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * OPAL Runtime Diagnostics interface driver
4 * Supported on POWERNV platform
6 * Copyright IBM Corporation 2015
7 */
9 #define pr_fmt(fmt) "opal-prd: " fmt
11 #include <linux/kernel.h>
12 #include <linux/module.h>
13 #include <linux/platform_device.h>
14 #include <linux/miscdevice.h>
15 #include <linux/fs.h>
16 #include <linux/of.h>
17 #include <linux/of_address.h>
18 #include <linux/poll.h>
19 #include <linux/mm.h>
20 #include <linux/slab.h>
21 #include <asm/opal-prd.h>
22 #include <asm/opal.h>
23 #include <asm/io.h>
24 #include <linux/uaccess.h>
28 * The msg member must be at the end of the struct, as it's followed by the
29 * message data.
31 struct opal_prd_msg_queue_item {
32 struct list_head list;
33 struct opal_prd_msg_header msg;
36 static struct device_node *prd_node;
37 static LIST_HEAD(opal_prd_msg_queue);
38 static DEFINE_SPINLOCK(opal_prd_msg_queue_lock);
39 static DECLARE_WAIT_QUEUE_HEAD(opal_prd_msg_wait);
40 static atomic_t prd_usage;
42 static bool opal_prd_range_is_valid(uint64_t addr, uint64_t size)
44 struct device_node *parent, *node;
45 bool found;
47 if (addr + size < addr)
48 return false;
50 parent = of_find_node_by_path("/reserved-memory");
51 if (!parent)
52 return false;
54 found = false;
56 for_each_child_of_node(parent, node) {
57 uint64_t range_addr, range_size, range_end;
58 const __be32 *addrp;
59 const char *label;
61 addrp = of_get_address(node, 0, &range_size, NULL);
63 range_addr = of_read_number(addrp, 2);
64 range_end = range_addr + range_size;
66 label = of_get_property(node, "ibm,prd-label", NULL);
68 /* PRD ranges need a label */
69 if (!label)
70 continue;
72 if (range_end <= range_addr)
73 continue;
75 if (addr >= range_addr && addr + size <= range_end) {
76 found = true;
77 of_node_put(node);
78 break;
82 of_node_put(parent);
83 return found;
86 static int opal_prd_open(struct inode *inode, struct file *file)
89 * Prevent multiple (separate) processes from concurrent interactions
90 * with the FW PRD channel
92 if (atomic_xchg(&prd_usage, 1) == 1)
93 return -EBUSY;
95 return 0;
99 * opal_prd_mmap - maps firmware-provided ranges into userspace
100 * @file: file structure for the device
101 * @vma: VMA to map the registers into
104 static int opal_prd_mmap(struct file *file, struct vm_area_struct *vma)
106 size_t addr, size;
107 pgprot_t page_prot;
108 int rc;
110 pr_devel("opal_prd_mmap(0x%016lx, 0x%016lx, 0x%lx, 0x%lx)\n",
111 vma->vm_start, vma->vm_end, vma->vm_pgoff,
112 vma->vm_flags);
114 addr = vma->vm_pgoff << PAGE_SHIFT;
115 size = vma->vm_end - vma->vm_start;
117 /* ensure we're mapping within one of the allowable ranges */
118 if (!opal_prd_range_is_valid(addr, size))
119 return -EINVAL;
121 page_prot = phys_mem_access_prot(file, vma->vm_pgoff,
122 size, vma->vm_page_prot);
124 rc = remap_pfn_range(vma, vma->vm_start, vma->vm_pgoff, size,
125 page_prot);
127 return rc;
130 static bool opal_msg_queue_empty(void)
132 unsigned long flags;
133 bool ret;
135 spin_lock_irqsave(&opal_prd_msg_queue_lock, flags);
136 ret = list_empty(&opal_prd_msg_queue);
137 spin_unlock_irqrestore(&opal_prd_msg_queue_lock, flags);
139 return ret;
142 static __poll_t opal_prd_poll(struct file *file,
143 struct poll_table_struct *wait)
145 poll_wait(file, &opal_prd_msg_wait, wait);
147 if (!opal_msg_queue_empty())
148 return EPOLLIN | EPOLLRDNORM;
150 return 0;
153 static ssize_t opal_prd_read(struct file *file, char __user *buf,
154 size_t count, loff_t *ppos)
156 struct opal_prd_msg_queue_item *item;
157 unsigned long flags;
158 ssize_t size, err;
159 int rc;
161 /* we need at least a header's worth of data */
162 if (count < sizeof(item->msg))
163 return -EINVAL;
165 if (*ppos)
166 return -ESPIPE;
168 item = NULL;
170 for (;;) {
172 spin_lock_irqsave(&opal_prd_msg_queue_lock, flags);
173 if (!list_empty(&opal_prd_msg_queue)) {
174 item = list_first_entry(&opal_prd_msg_queue,
175 struct opal_prd_msg_queue_item, list);
176 list_del(&item->list);
178 spin_unlock_irqrestore(&opal_prd_msg_queue_lock, flags);
180 if (item)
181 break;
183 if (file->f_flags & O_NONBLOCK)
184 return -EAGAIN;
186 rc = wait_event_interruptible(opal_prd_msg_wait,
187 !opal_msg_queue_empty());
188 if (rc)
189 return -EINTR;
192 size = be16_to_cpu(item->msg.size);
193 if (size > count) {
194 err = -EINVAL;
195 goto err_requeue;
198 rc = copy_to_user(buf, &item->msg, size);
199 if (rc) {
200 err = -EFAULT;
201 goto err_requeue;
204 kfree(item);
206 return size;
208 err_requeue:
209 /* eep! re-queue at the head of the list */
210 spin_lock_irqsave(&opal_prd_msg_queue_lock, flags);
211 list_add(&item->list, &opal_prd_msg_queue);
212 spin_unlock_irqrestore(&opal_prd_msg_queue_lock, flags);
213 return err;
216 static ssize_t opal_prd_write(struct file *file, const char __user *buf,
217 size_t count, loff_t *ppos)
219 struct opal_prd_msg_header hdr;
220 ssize_t size;
221 void *msg;
222 int rc;
224 size = sizeof(hdr);
226 if (count < size)
227 return -EINVAL;
229 /* grab the header */
230 rc = copy_from_user(&hdr, buf, sizeof(hdr));
231 if (rc)
232 return -EFAULT;
234 size = be16_to_cpu(hdr.size);
236 msg = memdup_user(buf, size);
237 if (IS_ERR(msg))
238 return PTR_ERR(msg);
240 rc = opal_prd_msg(msg);
241 if (rc) {
242 pr_warn("write: opal_prd_msg returned %d\n", rc);
243 size = -EIO;
246 kfree(msg);
248 return size;
251 static int opal_prd_release(struct inode *inode, struct file *file)
253 struct opal_prd_msg_header msg;
255 msg.size = cpu_to_be16(sizeof(msg));
256 msg.type = OPAL_PRD_MSG_TYPE_FINI;
258 opal_prd_msg((struct opal_prd_msg *)&msg);
260 atomic_xchg(&prd_usage, 0);
262 return 0;
265 static long opal_prd_ioctl(struct file *file, unsigned int cmd,
266 unsigned long param)
268 struct opal_prd_info info;
269 struct opal_prd_scom scom;
270 int rc = 0;
272 switch (cmd) {
273 case OPAL_PRD_GET_INFO:
274 memset(&info, 0, sizeof(info));
275 info.version = OPAL_PRD_KERNEL_VERSION;
276 rc = copy_to_user((void __user *)param, &info, sizeof(info));
277 if (rc)
278 return -EFAULT;
279 break;
281 case OPAL_PRD_SCOM_READ:
282 rc = copy_from_user(&scom, (void __user *)param, sizeof(scom));
283 if (rc)
284 return -EFAULT;
286 scom.rc = opal_xscom_read(scom.chip, scom.addr,
287 (__be64 *)&scom.data);
288 scom.data = be64_to_cpu(scom.data);
289 pr_devel("ioctl SCOM_READ: chip %llx addr %016llx data %016llx rc %lld\n",
290 scom.chip, scom.addr, scom.data, scom.rc);
292 rc = copy_to_user((void __user *)param, &scom, sizeof(scom));
293 if (rc)
294 return -EFAULT;
295 break;
297 case OPAL_PRD_SCOM_WRITE:
298 rc = copy_from_user(&scom, (void __user *)param, sizeof(scom));
299 if (rc)
300 return -EFAULT;
302 scom.rc = opal_xscom_write(scom.chip, scom.addr, scom.data);
303 pr_devel("ioctl SCOM_WRITE: chip %llx addr %016llx data %016llx rc %lld\n",
304 scom.chip, scom.addr, scom.data, scom.rc);
306 rc = copy_to_user((void __user *)param, &scom, sizeof(scom));
307 if (rc)
308 return -EFAULT;
309 break;
311 default:
312 rc = -EINVAL;
315 return rc;
318 static const struct file_operations opal_prd_fops = {
319 .open = opal_prd_open,
320 .mmap = opal_prd_mmap,
321 .poll = opal_prd_poll,
322 .read = opal_prd_read,
323 .write = opal_prd_write,
324 .unlocked_ioctl = opal_prd_ioctl,
325 .release = opal_prd_release,
326 .owner = THIS_MODULE,
329 static struct miscdevice opal_prd_dev = {
330 .minor = MISC_DYNAMIC_MINOR,
331 .name = "opal-prd",
332 .fops = &opal_prd_fops,
335 /* opal interface */
336 static int opal_prd_msg_notifier(struct notifier_block *nb,
337 unsigned long msg_type, void *_msg)
339 struct opal_prd_msg_queue_item *item;
340 struct opal_prd_msg_header *hdr;
341 struct opal_msg *msg = _msg;
342 int msg_size, item_size;
343 unsigned long flags;
345 if (msg_type != OPAL_MSG_PRD && msg_type != OPAL_MSG_PRD2)
346 return 0;
348 /* Calculate total size of the message and item we need to store. The
349 * 'size' field in the header includes the header itself. */
350 hdr = (void *)msg->params;
351 msg_size = be16_to_cpu(hdr->size);
352 item_size = msg_size + sizeof(*item) - sizeof(item->msg);
354 item = kzalloc(item_size, GFP_ATOMIC);
355 if (!item)
356 return -ENOMEM;
358 memcpy(&item->msg, msg->params, msg_size);
360 spin_lock_irqsave(&opal_prd_msg_queue_lock, flags);
361 list_add_tail(&item->list, &opal_prd_msg_queue);
362 spin_unlock_irqrestore(&opal_prd_msg_queue_lock, flags);
364 wake_up_interruptible(&opal_prd_msg_wait);
366 return 0;
369 static struct notifier_block opal_prd_event_nb = {
370 .notifier_call = opal_prd_msg_notifier,
371 .next = NULL,
372 .priority = 0,
375 static int opal_prd_probe(struct platform_device *pdev)
377 int rc;
379 if (!pdev || !pdev->dev.of_node)
380 return -ENODEV;
382 /* We should only have one prd driver instance per machine; ensure
383 * that we only get a valid probe on a single OF node.
385 if (prd_node)
386 return -EBUSY;
388 prd_node = pdev->dev.of_node;
390 rc = opal_message_notifier_register(OPAL_MSG_PRD, &opal_prd_event_nb);
391 if (rc) {
392 pr_err("Couldn't register event notifier\n");
393 return rc;
396 rc = opal_message_notifier_register(OPAL_MSG_PRD2, &opal_prd_event_nb);
397 if (rc) {
398 pr_err("Couldn't register PRD2 event notifier\n");
399 return rc;
402 rc = misc_register(&opal_prd_dev);
403 if (rc) {
404 pr_err("failed to register miscdev\n");
405 opal_message_notifier_unregister(OPAL_MSG_PRD,
406 &opal_prd_event_nb);
407 return rc;
410 return 0;
413 static int opal_prd_remove(struct platform_device *pdev)
415 misc_deregister(&opal_prd_dev);
416 opal_message_notifier_unregister(OPAL_MSG_PRD, &opal_prd_event_nb);
417 return 0;
420 static const struct of_device_id opal_prd_match[] = {
421 { .compatible = "ibm,opal-prd" },
422 { },
425 static struct platform_driver opal_prd_driver = {
426 .driver = {
427 .name = "opal-prd",
428 .of_match_table = opal_prd_match,
430 .probe = opal_prd_probe,
431 .remove = opal_prd_remove,
434 module_platform_driver(opal_prd_driver);
436 MODULE_DEVICE_TABLE(of, opal_prd_match);
437 MODULE_DESCRIPTION("PowerNV OPAL runtime diagnostic driver");
438 MODULE_LICENSE("GPL");