1 // SPDX-License-Identifier: GPL-2.0
3 * Copyright IBM Corp. 2012
6 * Jan Glauber <jang@linux.vnet.ibm.com>
9 #define KMSG_COMPONENT "zpci"
10 #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
12 #include <linux/compat.h>
13 #include <linux/kernel.h>
14 #include <linux/miscdevice.h>
15 #include <linux/slab.h>
16 #include <linux/err.h>
17 #include <linux/delay.h>
18 #include <linux/pci.h>
19 #include <linux/uaccess.h>
20 #include <asm/pci_debug.h>
21 #include <asm/pci_clp.h>
23 #include <uapi/asm/clp.h>
27 void update_uid_checking(bool new)
29 if (zpci_unique_uid
!= new)
30 zpci_dbg(1, "uid checking:%d\n", new);
32 zpci_unique_uid
= new;
35 static inline void zpci_err_clp(unsigned int rsp
, int rc
)
40 } __packed data
= {rsp
, rc
};
42 zpci_err_hex(&data
, sizeof(data
));
46 * Call Logical Processor with c=1, lps=0 and command 1
47 * to get the bit mask of installed logical processors
49 static inline int clp_get_ilp(unsigned long *ilp
)
55 " .insn rrf,0xb9a00000,%[mask],%[cmd],8,0\n"
60 : [cc
] "+d" (cc
), [mask
] "=d" (mask
) : [cmd
] "a" (1)
67 * Call Logical Processor with c=0, the give constant lps and an lpcb request.
69 static __always_inline
int clp_req(void *data
, unsigned int lps
)
71 struct { u8 _
[CLP_BLK_SIZE
]; } *req
= data
;
76 " .insn rrf,0xb9a00000,%[ign],%[req],0,%[lps]\n"
81 : [cc
] "+d" (cc
), [ign
] "=d" (ignored
), "+m" (*req
)
82 : [req
] "a" (req
), [lps
] "i" (lps
)
87 static void *clp_alloc_block(gfp_t gfp_mask
)
89 return (void *) __get_free_pages(gfp_mask
, get_order(CLP_BLK_SIZE
));
92 static void clp_free_block(void *ptr
)
94 free_pages((unsigned long) ptr
, get_order(CLP_BLK_SIZE
));
97 static void clp_store_query_pci_fngrp(struct zpci_dev
*zdev
,
98 struct clp_rsp_query_pci_grp
*response
)
100 zdev
->tlb_refresh
= response
->refresh
;
101 zdev
->dma_mask
= response
->dasm
;
102 zdev
->msi_addr
= response
->msia
;
103 zdev
->max_msi
= response
->noi
;
104 zdev
->fmb_update
= response
->mui
;
105 zdev
->version
= response
->version
;
107 switch (response
->version
) {
109 zdev
->max_bus_speed
= PCIE_SPEED_5_0GT
;
112 zdev
->max_bus_speed
= PCI_SPEED_UNKNOWN
;
117 static int clp_query_pci_fngrp(struct zpci_dev
*zdev
, u8 pfgid
)
119 struct clp_req_rsp_query_pci_grp
*rrb
;
122 rrb
= clp_alloc_block(GFP_KERNEL
);
126 memset(rrb
, 0, sizeof(*rrb
));
127 rrb
->request
.hdr
.len
= sizeof(rrb
->request
);
128 rrb
->request
.hdr
.cmd
= CLP_QUERY_PCI_FNGRP
;
129 rrb
->response
.hdr
.len
= sizeof(rrb
->response
);
130 rrb
->request
.pfgid
= pfgid
;
132 rc
= clp_req(rrb
, CLP_LPS_PCI
);
133 if (!rc
&& rrb
->response
.hdr
.rsp
== CLP_RC_OK
)
134 clp_store_query_pci_fngrp(zdev
, &rrb
->response
);
136 zpci_err("Q PCI FGRP:\n");
137 zpci_err_clp(rrb
->response
.hdr
.rsp
, rc
);
144 static int clp_store_query_pci_fn(struct zpci_dev
*zdev
,
145 struct clp_rsp_query_pci
*response
)
149 for (i
= 0; i
< PCI_STD_NUM_BARS
; i
++) {
150 zdev
->bars
[i
].val
= le32_to_cpu(response
->bar
[i
]);
151 zdev
->bars
[i
].size
= response
->bar_size
[i
];
153 zdev
->start_dma
= response
->sdma
;
154 zdev
->end_dma
= response
->edma
;
155 zdev
->pchid
= response
->pchid
;
156 zdev
->pfgid
= response
->pfgid
;
157 zdev
->pft
= response
->pft
;
158 zdev
->vfn
= response
->vfn
;
159 zdev
->port
= response
->port
;
160 zdev
->uid
= response
->uid
;
161 zdev
->fmb_length
= sizeof(u32
) * response
->fmb_len
;
162 zdev
->rid_available
= response
->rid_avail
;
163 zdev
->is_physfn
= response
->is_physfn
;
164 if (!s390_pci_no_rid
&& zdev
->rid_available
)
165 zdev
->devfn
= response
->rid
& ZPCI_RID_MASK_DEVFN
;
167 memcpy(zdev
->pfip
, response
->pfip
, sizeof(zdev
->pfip
));
168 if (response
->util_str_avail
) {
169 memcpy(zdev
->util_str
, response
->util_str
,
170 sizeof(zdev
->util_str
));
171 zdev
->util_str_avail
= 1;
173 zdev
->mio_capable
= response
->mio_addr_avail
;
174 for (i
= 0; i
< PCI_STD_NUM_BARS
; i
++) {
175 if (!(response
->mio
.valid
& (1 << (PCI_STD_NUM_BARS
- i
- 1))))
178 zdev
->bars
[i
].mio_wb
= (void __iomem
*) response
->mio
.addr
[i
].wb
;
179 zdev
->bars
[i
].mio_wt
= (void __iomem
*) response
->mio
.addr
[i
].wt
;
184 static int clp_query_pci_fn(struct zpci_dev
*zdev
, u32 fh
)
186 struct clp_req_rsp_query_pci
*rrb
;
189 rrb
= clp_alloc_block(GFP_KERNEL
);
193 memset(rrb
, 0, sizeof(*rrb
));
194 rrb
->request
.hdr
.len
= sizeof(rrb
->request
);
195 rrb
->request
.hdr
.cmd
= CLP_QUERY_PCI_FN
;
196 rrb
->response
.hdr
.len
= sizeof(rrb
->response
);
197 rrb
->request
.fh
= fh
;
199 rc
= clp_req(rrb
, CLP_LPS_PCI
);
200 if (!rc
&& rrb
->response
.hdr
.rsp
== CLP_RC_OK
) {
201 rc
= clp_store_query_pci_fn(zdev
, &rrb
->response
);
204 rc
= clp_query_pci_fngrp(zdev
, rrb
->response
.pfgid
);
206 zpci_err("Q PCI FN:\n");
207 zpci_err_clp(rrb
->response
.hdr
.rsp
, rc
);
215 int clp_add_pci_device(u32 fid
, u32 fh
, int configured
)
217 struct zpci_dev
*zdev
;
220 zpci_dbg(3, "add fid:%x, fh:%x, c:%d\n", fid
, fh
, configured
);
221 zdev
= kzalloc(sizeof(*zdev
), GFP_KERNEL
);
228 /* Query function properties and update zdev */
229 rc
= clp_query_pci_fn(zdev
, fh
);
234 zdev
->state
= ZPCI_FN_STATE_CONFIGURED
;
236 zdev
->state
= ZPCI_FN_STATE_STANDBY
;
238 rc
= zpci_create_device(zdev
);
244 zpci_dbg(0, "add fid:%x, rc:%d\n", fid
, rc
);
249 static int clp_refresh_fh(u32 fid
);
251 * Enable/Disable a given PCI function and update its function handle if
254 static int clp_set_pci_fn(struct zpci_dev
*zdev
, u8 nr_dma_as
, u8 command
)
256 struct clp_req_rsp_set_pci
*rrb
;
257 int rc
, retries
= 100;
260 rrb
= clp_alloc_block(GFP_KERNEL
);
265 memset(rrb
, 0, sizeof(*rrb
));
266 rrb
->request
.hdr
.len
= sizeof(rrb
->request
);
267 rrb
->request
.hdr
.cmd
= CLP_SET_PCI_FN
;
268 rrb
->response
.hdr
.len
= sizeof(rrb
->response
);
269 rrb
->request
.fh
= zdev
->fh
;
270 rrb
->request
.oc
= command
;
271 rrb
->request
.ndas
= nr_dma_as
;
273 rc
= clp_req(rrb
, CLP_LPS_PCI
);
274 if (rrb
->response
.hdr
.rsp
== CLP_RC_SETPCIFN_BUSY
) {
280 } while (rrb
->response
.hdr
.rsp
== CLP_RC_SETPCIFN_BUSY
);
282 if (rc
|| rrb
->response
.hdr
.rsp
!= CLP_RC_OK
) {
283 zpci_err("Set PCI FN:\n");
284 zpci_err_clp(rrb
->response
.hdr
.rsp
, rc
);
287 if (!rc
&& rrb
->response
.hdr
.rsp
== CLP_RC_OK
) {
288 zdev
->fh
= rrb
->response
.fh
;
289 } else if (!rc
&& rrb
->response
.hdr
.rsp
== CLP_RC_SETPCIFN_ALRDY
&&
290 rrb
->response
.fh
== 0) {
291 /* Function is already in desired state - update handle */
292 rc
= clp_refresh_fh(fid
);
298 int clp_setup_writeback_mio(void)
300 struct clp_req_rsp_slpc_pci
*rrb
;
304 rrb
= clp_alloc_block(GFP_KERNEL
);
308 memset(rrb
, 0, sizeof(*rrb
));
309 rrb
->request
.hdr
.len
= sizeof(rrb
->request
);
310 rrb
->request
.hdr
.cmd
= CLP_SLPC
;
311 rrb
->response
.hdr
.len
= sizeof(rrb
->response
);
313 rc
= clp_req(rrb
, CLP_LPS_PCI
);
314 if (!rc
&& rrb
->response
.hdr
.rsp
== CLP_RC_OK
) {
315 if (rrb
->response
.vwb
) {
316 wb_bit_pos
= rrb
->response
.mio_wb
;
317 set_bit_inv(wb_bit_pos
, &mio_wb_bit_mask
);
318 zpci_dbg(3, "wb bit: %d\n", wb_bit_pos
);
320 zpci_dbg(3, "wb bit: n.a.\n");
324 zpci_err("SLPC PCI:\n");
325 zpci_err_clp(rrb
->response
.hdr
.rsp
, rc
);
332 int clp_enable_fh(struct zpci_dev
*zdev
, u8 nr_dma_as
)
336 rc
= clp_set_pci_fn(zdev
, nr_dma_as
, CLP_SET_ENABLE_PCI_FN
);
337 zpci_dbg(3, "ena fid:%x, fh:%x, rc:%d\n", zdev
->fid
, zdev
->fh
, rc
);
341 if (zpci_use_mio(zdev
)) {
342 rc
= clp_set_pci_fn(zdev
, nr_dma_as
, CLP_SET_ENABLE_MIO
);
343 zpci_dbg(3, "ena mio fid:%x, fh:%x, rc:%d\n",
344 zdev
->fid
, zdev
->fh
, rc
);
346 clp_disable_fh(zdev
);
352 int clp_disable_fh(struct zpci_dev
*zdev
)
356 if (!zdev_enabled(zdev
))
359 rc
= clp_set_pci_fn(zdev
, 0, CLP_SET_DISABLE_PCI_FN
);
360 zpci_dbg(3, "dis fid:%x, fh:%x, rc:%d\n", zdev
->fid
, zdev
->fh
, rc
);
364 static int clp_list_pci(struct clp_req_rsp_list_pci
*rrb
, void *data
,
365 void (*cb
)(struct clp_fh_list_entry
*, void *))
367 u64 resume_token
= 0;
371 memset(rrb
, 0, sizeof(*rrb
));
372 rrb
->request
.hdr
.len
= sizeof(rrb
->request
);
373 rrb
->request
.hdr
.cmd
= CLP_LIST_PCI
;
374 /* store as many entries as possible */
375 rrb
->response
.hdr
.len
= CLP_BLK_SIZE
- LIST_PCI_HDR_LEN
;
376 rrb
->request
.resume_token
= resume_token
;
378 /* Get PCI function handle list */
379 rc
= clp_req(rrb
, CLP_LPS_PCI
);
380 if (rc
|| rrb
->response
.hdr
.rsp
!= CLP_RC_OK
) {
381 zpci_err("List PCI FN:\n");
382 zpci_err_clp(rrb
->response
.hdr
.rsp
, rc
);
387 update_uid_checking(rrb
->response
.uid_checking
);
388 WARN_ON_ONCE(rrb
->response
.entry_size
!=
389 sizeof(struct clp_fh_list_entry
));
391 entries
= (rrb
->response
.hdr
.len
- LIST_PCI_HDR_LEN
) /
392 rrb
->response
.entry_size
;
394 resume_token
= rrb
->response
.resume_token
;
395 for (i
= 0; i
< entries
; i
++)
396 cb(&rrb
->response
.fh_list
[i
], data
);
397 } while (resume_token
);
402 static void __clp_add(struct clp_fh_list_entry
*entry
, void *data
)
404 struct zpci_dev
*zdev
;
406 if (!entry
->vendor_id
)
409 zdev
= get_zdev_by_fid(entry
->fid
);
411 clp_add_pci_device(entry
->fid
, entry
->fh
, entry
->config_state
);
414 int clp_scan_pci_devices(void)
416 struct clp_req_rsp_list_pci
*rrb
;
419 rrb
= clp_alloc_block(GFP_KERNEL
);
423 rc
= clp_list_pci(rrb
, NULL
, __clp_add
);
429 static void __clp_refresh_fh(struct clp_fh_list_entry
*entry
, void *data
)
431 struct zpci_dev
*zdev
;
432 u32 fid
= *((u32
*)data
);
434 if (!entry
->vendor_id
|| fid
!= entry
->fid
)
437 zdev
= get_zdev_by_fid(fid
);
441 zdev
->fh
= entry
->fh
;
445 * Refresh the function handle of the function matching @fid
447 static int clp_refresh_fh(u32 fid
)
449 struct clp_req_rsp_list_pci
*rrb
;
452 rrb
= clp_alloc_block(GFP_NOWAIT
);
456 rc
= clp_list_pci(rrb
, &fid
, __clp_refresh_fh
);
462 struct clp_state_data
{
464 enum zpci_state state
;
467 static void __clp_get_state(struct clp_fh_list_entry
*entry
, void *data
)
469 struct clp_state_data
*sd
= data
;
471 if (entry
->fid
!= sd
->fid
)
474 sd
->state
= entry
->config_state
;
477 int clp_get_state(u32 fid
, enum zpci_state
*state
)
479 struct clp_req_rsp_list_pci
*rrb
;
480 struct clp_state_data sd
= {fid
, ZPCI_FN_STATE_RESERVED
};
483 rrb
= clp_alloc_block(GFP_ATOMIC
);
487 rc
= clp_list_pci(rrb
, &sd
, __clp_get_state
);
495 static int clp_base_slpc(struct clp_req
*req
, struct clp_req_rsp_slpc
*lpcb
)
497 unsigned long limit
= PAGE_SIZE
- sizeof(lpcb
->request
);
499 if (lpcb
->request
.hdr
.len
!= sizeof(lpcb
->request
) ||
500 lpcb
->response
.hdr
.len
> limit
)
502 return clp_req(lpcb
, CLP_LPS_BASE
) ? -EOPNOTSUPP
: 0;
505 static int clp_base_command(struct clp_req
*req
, struct clp_req_hdr
*lpcb
)
508 case 0x0001: /* store logical-processor characteristics */
509 return clp_base_slpc(req
, (void *) lpcb
);
515 static int clp_pci_slpc(struct clp_req
*req
, struct clp_req_rsp_slpc_pci
*lpcb
)
517 unsigned long limit
= PAGE_SIZE
- sizeof(lpcb
->request
);
519 if (lpcb
->request
.hdr
.len
!= sizeof(lpcb
->request
) ||
520 lpcb
->response
.hdr
.len
> limit
)
522 return clp_req(lpcb
, CLP_LPS_PCI
) ? -EOPNOTSUPP
: 0;
525 static int clp_pci_list(struct clp_req
*req
, struct clp_req_rsp_list_pci
*lpcb
)
527 unsigned long limit
= PAGE_SIZE
- sizeof(lpcb
->request
);
529 if (lpcb
->request
.hdr
.len
!= sizeof(lpcb
->request
) ||
530 lpcb
->response
.hdr
.len
> limit
)
532 if (lpcb
->request
.reserved2
!= 0)
534 return clp_req(lpcb
, CLP_LPS_PCI
) ? -EOPNOTSUPP
: 0;
537 static int clp_pci_query(struct clp_req
*req
,
538 struct clp_req_rsp_query_pci
*lpcb
)
540 unsigned long limit
= PAGE_SIZE
- sizeof(lpcb
->request
);
542 if (lpcb
->request
.hdr
.len
!= sizeof(lpcb
->request
) ||
543 lpcb
->response
.hdr
.len
> limit
)
545 if (lpcb
->request
.reserved2
!= 0 || lpcb
->request
.reserved3
!= 0)
547 return clp_req(lpcb
, CLP_LPS_PCI
) ? -EOPNOTSUPP
: 0;
550 static int clp_pci_query_grp(struct clp_req
*req
,
551 struct clp_req_rsp_query_pci_grp
*lpcb
)
553 unsigned long limit
= PAGE_SIZE
- sizeof(lpcb
->request
);
555 if (lpcb
->request
.hdr
.len
!= sizeof(lpcb
->request
) ||
556 lpcb
->response
.hdr
.len
> limit
)
558 if (lpcb
->request
.reserved2
!= 0 || lpcb
->request
.reserved3
!= 0 ||
559 lpcb
->request
.reserved4
!= 0)
561 return clp_req(lpcb
, CLP_LPS_PCI
) ? -EOPNOTSUPP
: 0;
564 static int clp_pci_command(struct clp_req
*req
, struct clp_req_hdr
*lpcb
)
567 case 0x0001: /* store logical-processor characteristics */
568 return clp_pci_slpc(req
, (void *) lpcb
);
569 case 0x0002: /* list PCI functions */
570 return clp_pci_list(req
, (void *) lpcb
);
571 case 0x0003: /* query PCI function */
572 return clp_pci_query(req
, (void *) lpcb
);
573 case 0x0004: /* query PCI function group */
574 return clp_pci_query_grp(req
, (void *) lpcb
);
580 static int clp_normal_command(struct clp_req
*req
)
582 struct clp_req_hdr
*lpcb
;
587 if (req
->lps
!= 0 && req
->lps
!= 2)
591 lpcb
= clp_alloc_block(GFP_KERNEL
);
596 uptr
= (void __force __user
*)(unsigned long) req
->data_p
;
597 if (copy_from_user(lpcb
, uptr
, PAGE_SIZE
) != 0)
601 if (lpcb
->fmt
!= 0 || lpcb
->reserved1
!= 0 || lpcb
->reserved2
!= 0)
606 rc
= clp_base_command(req
, lpcb
);
609 rc
= clp_pci_command(req
, lpcb
);
616 if (copy_to_user(uptr
, lpcb
, PAGE_SIZE
) != 0)
622 clp_free_block(lpcb
);
627 static int clp_immediate_command(struct clp_req
*req
)
633 if (req
->cmd
> 1 || clp_get_ilp(&ilp
) != 0)
636 uptr
= (void __force __user
*)(unsigned long) req
->data_p
;
638 /* Command code 0: test for a specific processor */
639 exists
= test_bit_inv(req
->lps
, &ilp
);
640 return put_user(exists
, (int __user
*) uptr
);
642 /* Command code 1: return bit mask of installed processors */
643 return put_user(ilp
, (unsigned long __user
*) uptr
);
646 static long clp_misc_ioctl(struct file
*filp
, unsigned int cmd
,
655 argp
= is_compat_task() ? compat_ptr(arg
) : (void __user
*) arg
;
656 if (copy_from_user(&req
, argp
, sizeof(req
)))
660 return req
.c
? clp_immediate_command(&req
) : clp_normal_command(&req
);
663 static int clp_misc_release(struct inode
*inode
, struct file
*filp
)
668 static const struct file_operations clp_misc_fops
= {
669 .owner
= THIS_MODULE
,
670 .open
= nonseekable_open
,
671 .release
= clp_misc_release
,
672 .unlocked_ioctl
= clp_misc_ioctl
,
673 .compat_ioctl
= clp_misc_ioctl
,
677 static struct miscdevice clp_misc_device
= {
678 .minor
= MISC_DYNAMIC_MINOR
,
680 .fops
= &clp_misc_fops
,
683 static int __init
clp_misc_init(void)
685 return misc_register(&clp_misc_device
);
688 device_initcall(clp_misc_init
);