2 * Copyright IBM Corp. 2012
5 * Jan Glauber <jang@linux.vnet.ibm.com>
8 #define KMSG_COMPONENT "zpci"
9 #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
11 #include <linux/compat.h>
12 #include <linux/kernel.h>
13 #include <linux/miscdevice.h>
14 #include <linux/slab.h>
15 #include <linux/err.h>
16 #include <linux/delay.h>
17 #include <linux/pci.h>
18 #include <linux/uaccess.h>
19 #include <asm/pci_debug.h>
20 #include <asm/pci_clp.h>
21 #include <asm/compat.h>
23 #include <uapi/asm/clp.h>
25 static inline void zpci_err_clp(unsigned int rsp
, int rc
)
30 } __packed data
= {rsp
, rc
};
32 zpci_err_hex(&data
, sizeof(data
));
36 * Call Logical Processor with c=1, lps=0 and command 1
37 * to get the bit mask of installed logical processors
39 static inline int clp_get_ilp(unsigned long *ilp
)
45 " .insn rrf,0xb9a00000,%[mask],%[cmd],8,0\n"
50 : [cc
] "+d" (cc
), [mask
] "=d" (mask
) : [cmd
] "a" (1)
57 * Call Logical Processor with c=0, the give constant lps and an lpcb request.
59 static inline int clp_req(void *data
, unsigned int lps
)
61 struct { u8 _
[CLP_BLK_SIZE
]; } *req
= data
;
66 " .insn rrf,0xb9a00000,%[ign],%[req],0,%[lps]\n"
71 : [cc
] "+d" (cc
), [ign
] "=d" (ignored
), "+m" (*req
)
72 : [req
] "a" (req
), [lps
] "i" (lps
)
77 static void *clp_alloc_block(gfp_t gfp_mask
)
79 return (void *) __get_free_pages(gfp_mask
, get_order(CLP_BLK_SIZE
));
82 static void clp_free_block(void *ptr
)
84 free_pages((unsigned long) ptr
, get_order(CLP_BLK_SIZE
));
87 static void clp_store_query_pci_fngrp(struct zpci_dev
*zdev
,
88 struct clp_rsp_query_pci_grp
*response
)
90 zdev
->tlb_refresh
= response
->refresh
;
91 zdev
->dma_mask
= response
->dasm
;
92 zdev
->msi_addr
= response
->msia
;
93 zdev
->max_msi
= response
->noi
;
94 zdev
->fmb_update
= response
->mui
;
96 switch (response
->version
) {
98 zdev
->max_bus_speed
= PCIE_SPEED_5_0GT
;
101 zdev
->max_bus_speed
= PCI_SPEED_UNKNOWN
;
106 static int clp_query_pci_fngrp(struct zpci_dev
*zdev
, u8 pfgid
)
108 struct clp_req_rsp_query_pci_grp
*rrb
;
111 rrb
= clp_alloc_block(GFP_KERNEL
);
115 memset(rrb
, 0, sizeof(*rrb
));
116 rrb
->request
.hdr
.len
= sizeof(rrb
->request
);
117 rrb
->request
.hdr
.cmd
= CLP_QUERY_PCI_FNGRP
;
118 rrb
->response
.hdr
.len
= sizeof(rrb
->response
);
119 rrb
->request
.pfgid
= pfgid
;
121 rc
= clp_req(rrb
, CLP_LPS_PCI
);
122 if (!rc
&& rrb
->response
.hdr
.rsp
== CLP_RC_OK
)
123 clp_store_query_pci_fngrp(zdev
, &rrb
->response
);
125 zpci_err("Q PCI FGRP:\n");
126 zpci_err_clp(rrb
->response
.hdr
.rsp
, rc
);
133 static int clp_store_query_pci_fn(struct zpci_dev
*zdev
,
134 struct clp_rsp_query_pci
*response
)
138 for (i
= 0; i
< PCI_BAR_COUNT
; i
++) {
139 zdev
->bars
[i
].val
= le32_to_cpu(response
->bar
[i
]);
140 zdev
->bars
[i
].size
= response
->bar_size
[i
];
142 zdev
->start_dma
= response
->sdma
;
143 zdev
->end_dma
= response
->edma
;
144 zdev
->pchid
= response
->pchid
;
145 zdev
->pfgid
= response
->pfgid
;
146 zdev
->pft
= response
->pft
;
147 zdev
->vfn
= response
->vfn
;
148 zdev
->uid
= response
->uid
;
150 memcpy(zdev
->pfip
, response
->pfip
, sizeof(zdev
->pfip
));
151 if (response
->util_str_avail
) {
152 memcpy(zdev
->util_str
, response
->util_str
,
153 sizeof(zdev
->util_str
));
159 static int clp_query_pci_fn(struct zpci_dev
*zdev
, u32 fh
)
161 struct clp_req_rsp_query_pci
*rrb
;
164 rrb
= clp_alloc_block(GFP_KERNEL
);
168 memset(rrb
, 0, sizeof(*rrb
));
169 rrb
->request
.hdr
.len
= sizeof(rrb
->request
);
170 rrb
->request
.hdr
.cmd
= CLP_QUERY_PCI_FN
;
171 rrb
->response
.hdr
.len
= sizeof(rrb
->response
);
172 rrb
->request
.fh
= fh
;
174 rc
= clp_req(rrb
, CLP_LPS_PCI
);
175 if (!rc
&& rrb
->response
.hdr
.rsp
== CLP_RC_OK
) {
176 rc
= clp_store_query_pci_fn(zdev
, &rrb
->response
);
179 rc
= clp_query_pci_fngrp(zdev
, rrb
->response
.pfgid
);
181 zpci_err("Q PCI FN:\n");
182 zpci_err_clp(rrb
->response
.hdr
.rsp
, rc
);
190 int clp_add_pci_device(u32 fid
, u32 fh
, int configured
)
192 struct zpci_dev
*zdev
;
195 zpci_dbg(3, "add fid:%x, fh:%x, c:%d\n", fid
, fh
, configured
);
196 zdev
= kzalloc(sizeof(*zdev
), GFP_KERNEL
);
203 /* Query function properties and update zdev */
204 rc
= clp_query_pci_fn(zdev
, fh
);
209 zdev
->state
= ZPCI_FN_STATE_CONFIGURED
;
211 zdev
->state
= ZPCI_FN_STATE_STANDBY
;
213 rc
= zpci_create_device(zdev
);
224 * Enable/Disable a given PCI function defined by its function handle.
226 static int clp_set_pci_fn(u32
*fh
, u8 nr_dma_as
, u8 command
)
228 struct clp_req_rsp_set_pci
*rrb
;
229 int rc
, retries
= 100;
231 rrb
= clp_alloc_block(GFP_KERNEL
);
236 memset(rrb
, 0, sizeof(*rrb
));
237 rrb
->request
.hdr
.len
= sizeof(rrb
->request
);
238 rrb
->request
.hdr
.cmd
= CLP_SET_PCI_FN
;
239 rrb
->response
.hdr
.len
= sizeof(rrb
->response
);
240 rrb
->request
.fh
= *fh
;
241 rrb
->request
.oc
= command
;
242 rrb
->request
.ndas
= nr_dma_as
;
244 rc
= clp_req(rrb
, CLP_LPS_PCI
);
245 if (rrb
->response
.hdr
.rsp
== CLP_RC_SETPCIFN_BUSY
) {
251 } while (rrb
->response
.hdr
.rsp
== CLP_RC_SETPCIFN_BUSY
);
253 if (!rc
&& rrb
->response
.hdr
.rsp
== CLP_RC_OK
)
254 *fh
= rrb
->response
.fh
;
256 zpci_err("Set PCI FN:\n");
257 zpci_err_clp(rrb
->response
.hdr
.rsp
, rc
);
264 int clp_enable_fh(struct zpci_dev
*zdev
, u8 nr_dma_as
)
269 rc
= clp_set_pci_fn(&fh
, nr_dma_as
, CLP_SET_ENABLE_PCI_FN
);
271 /* Success -> store enabled handle in zdev */
274 zpci_dbg(3, "ena fid:%x, fh:%x, rc:%d\n", zdev
->fid
, zdev
->fh
, rc
);
278 int clp_disable_fh(struct zpci_dev
*zdev
)
283 if (!zdev_enabled(zdev
))
286 rc
= clp_set_pci_fn(&fh
, 0, CLP_SET_DISABLE_PCI_FN
);
288 /* Success -> store disabled handle in zdev */
291 zpci_dbg(3, "dis fid:%x, fh:%x, rc:%d\n", zdev
->fid
, zdev
->fh
, rc
);
295 static int clp_list_pci(struct clp_req_rsp_list_pci
*rrb
,
296 void (*cb
)(struct clp_fh_list_entry
*entry
))
298 u64 resume_token
= 0;
302 memset(rrb
, 0, sizeof(*rrb
));
303 rrb
->request
.hdr
.len
= sizeof(rrb
->request
);
304 rrb
->request
.hdr
.cmd
= CLP_LIST_PCI
;
305 /* store as many entries as possible */
306 rrb
->response
.hdr
.len
= CLP_BLK_SIZE
- LIST_PCI_HDR_LEN
;
307 rrb
->request
.resume_token
= resume_token
;
309 /* Get PCI function handle list */
310 rc
= clp_req(rrb
, CLP_LPS_PCI
);
311 if (rc
|| rrb
->response
.hdr
.rsp
!= CLP_RC_OK
) {
312 zpci_err("List PCI FN:\n");
313 zpci_err_clp(rrb
->response
.hdr
.rsp
, rc
);
318 WARN_ON_ONCE(rrb
->response
.entry_size
!=
319 sizeof(struct clp_fh_list_entry
));
321 entries
= (rrb
->response
.hdr
.len
- LIST_PCI_HDR_LEN
) /
322 rrb
->response
.entry_size
;
324 resume_token
= rrb
->response
.resume_token
;
325 for (i
= 0; i
< entries
; i
++)
326 cb(&rrb
->response
.fh_list
[i
]);
327 } while (resume_token
);
332 static void __clp_add(struct clp_fh_list_entry
*entry
)
334 if (!entry
->vendor_id
)
337 clp_add_pci_device(entry
->fid
, entry
->fh
, entry
->config_state
);
340 static void __clp_rescan(struct clp_fh_list_entry
*entry
)
342 struct zpci_dev
*zdev
;
344 if (!entry
->vendor_id
)
347 zdev
= get_zdev_by_fid(entry
->fid
);
349 clp_add_pci_device(entry
->fid
, entry
->fh
, entry
->config_state
);
353 if (!entry
->config_state
) {
355 * The handle is already disabled, that means no iota/irq freeing via
356 * the firmware interfaces anymore. Need to free resources manually
357 * (DMA memory, debug, sysfs)...
359 zpci_stop_device(zdev
);
363 static void __clp_update(struct clp_fh_list_entry
*entry
)
365 struct zpci_dev
*zdev
;
367 if (!entry
->vendor_id
)
370 zdev
= get_zdev_by_fid(entry
->fid
);
374 zdev
->fh
= entry
->fh
;
377 int clp_scan_pci_devices(void)
379 struct clp_req_rsp_list_pci
*rrb
;
382 rrb
= clp_alloc_block(GFP_KERNEL
);
386 rc
= clp_list_pci(rrb
, __clp_add
);
392 int clp_rescan_pci_devices(void)
394 struct clp_req_rsp_list_pci
*rrb
;
397 rrb
= clp_alloc_block(GFP_KERNEL
);
401 rc
= clp_list_pci(rrb
, __clp_rescan
);
407 int clp_rescan_pci_devices_simple(void)
409 struct clp_req_rsp_list_pci
*rrb
;
412 rrb
= clp_alloc_block(GFP_NOWAIT
);
416 rc
= clp_list_pci(rrb
, __clp_update
);
422 static int clp_base_slpc(struct clp_req
*req
, struct clp_req_rsp_slpc
*lpcb
)
424 unsigned long limit
= PAGE_SIZE
- sizeof(lpcb
->request
);
426 if (lpcb
->request
.hdr
.len
!= sizeof(lpcb
->request
) ||
427 lpcb
->response
.hdr
.len
> limit
)
429 return clp_req(lpcb
, CLP_LPS_BASE
) ? -EOPNOTSUPP
: 0;
432 static int clp_base_command(struct clp_req
*req
, struct clp_req_hdr
*lpcb
)
435 case 0x0001: /* store logical-processor characteristics */
436 return clp_base_slpc(req
, (void *) lpcb
);
442 static int clp_pci_slpc(struct clp_req
*req
, struct clp_req_rsp_slpc
*lpcb
)
444 unsigned long limit
= PAGE_SIZE
- sizeof(lpcb
->request
);
446 if (lpcb
->request
.hdr
.len
!= sizeof(lpcb
->request
) ||
447 lpcb
->response
.hdr
.len
> limit
)
449 return clp_req(lpcb
, CLP_LPS_PCI
) ? -EOPNOTSUPP
: 0;
452 static int clp_pci_list(struct clp_req
*req
, struct clp_req_rsp_list_pci
*lpcb
)
454 unsigned long limit
= PAGE_SIZE
- sizeof(lpcb
->request
);
456 if (lpcb
->request
.hdr
.len
!= sizeof(lpcb
->request
) ||
457 lpcb
->response
.hdr
.len
> limit
)
459 if (lpcb
->request
.reserved2
!= 0)
461 return clp_req(lpcb
, CLP_LPS_PCI
) ? -EOPNOTSUPP
: 0;
464 static int clp_pci_query(struct clp_req
*req
,
465 struct clp_req_rsp_query_pci
*lpcb
)
467 unsigned long limit
= PAGE_SIZE
- sizeof(lpcb
->request
);
469 if (lpcb
->request
.hdr
.len
!= sizeof(lpcb
->request
) ||
470 lpcb
->response
.hdr
.len
> limit
)
472 if (lpcb
->request
.reserved2
!= 0 || lpcb
->request
.reserved3
!= 0)
474 return clp_req(lpcb
, CLP_LPS_PCI
) ? -EOPNOTSUPP
: 0;
477 static int clp_pci_query_grp(struct clp_req
*req
,
478 struct clp_req_rsp_query_pci_grp
*lpcb
)
480 unsigned long limit
= PAGE_SIZE
- sizeof(lpcb
->request
);
482 if (lpcb
->request
.hdr
.len
!= sizeof(lpcb
->request
) ||
483 lpcb
->response
.hdr
.len
> limit
)
485 if (lpcb
->request
.reserved2
!= 0 || lpcb
->request
.reserved3
!= 0 ||
486 lpcb
->request
.reserved4
!= 0)
488 return clp_req(lpcb
, CLP_LPS_PCI
) ? -EOPNOTSUPP
: 0;
491 static int clp_pci_command(struct clp_req
*req
, struct clp_req_hdr
*lpcb
)
494 case 0x0001: /* store logical-processor characteristics */
495 return clp_pci_slpc(req
, (void *) lpcb
);
496 case 0x0002: /* list PCI functions */
497 return clp_pci_list(req
, (void *) lpcb
);
498 case 0x0003: /* query PCI function */
499 return clp_pci_query(req
, (void *) lpcb
);
500 case 0x0004: /* query PCI function group */
501 return clp_pci_query_grp(req
, (void *) lpcb
);
507 static int clp_normal_command(struct clp_req
*req
)
509 struct clp_req_hdr
*lpcb
;
514 if (req
->lps
!= 0 && req
->lps
!= 2)
518 lpcb
= clp_alloc_block(GFP_KERNEL
);
523 uptr
= (void __force __user
*)(unsigned long) req
->data_p
;
524 if (copy_from_user(lpcb
, uptr
, PAGE_SIZE
) != 0)
528 if (lpcb
->fmt
!= 0 || lpcb
->reserved1
!= 0 || lpcb
->reserved2
!= 0)
533 rc
= clp_base_command(req
, lpcb
);
536 rc
= clp_pci_command(req
, lpcb
);
543 if (copy_to_user(uptr
, lpcb
, PAGE_SIZE
) != 0)
549 clp_free_block(lpcb
);
554 static int clp_immediate_command(struct clp_req
*req
)
560 if (req
->cmd
> 1 || clp_get_ilp(&ilp
) != 0)
563 uptr
= (void __force __user
*)(unsigned long) req
->data_p
;
565 /* Command code 0: test for a specific processor */
566 exists
= test_bit_inv(req
->lps
, &ilp
);
567 return put_user(exists
, (int __user
*) uptr
);
569 /* Command code 1: return bit mask of installed processors */
570 return put_user(ilp
, (unsigned long __user
*) uptr
);
573 static long clp_misc_ioctl(struct file
*filp
, unsigned int cmd
,
582 argp
= is_compat_task() ? compat_ptr(arg
) : (void __user
*) arg
;
583 if (copy_from_user(&req
, argp
, sizeof(req
)))
587 return req
.c
? clp_immediate_command(&req
) : clp_normal_command(&req
);
590 static int clp_misc_release(struct inode
*inode
, struct file
*filp
)
595 static const struct file_operations clp_misc_fops
= {
596 .owner
= THIS_MODULE
,
597 .open
= nonseekable_open
,
598 .release
= clp_misc_release
,
599 .unlocked_ioctl
= clp_misc_ioctl
,
600 .compat_ioctl
= clp_misc_ioctl
,
604 static struct miscdevice clp_misc_device
= {
605 .minor
= MISC_DYNAMIC_MINOR
,
607 .fops
= &clp_misc_fops
,
610 static int __init
clp_misc_init(void)
612 return misc_register(&clp_misc_device
);
615 device_initcall(clp_misc_init
);