1 // SPDX-License-Identifier: GPL-2.0-only
3 * VFIO PCI Intel Graphics support
5 * Copyright (C) 2016 Red Hat, Inc. All rights reserved.
6 * Author: Alex Williamson <alex.williamson@redhat.com>
8 * Register a device specific region through which to provide read-only
9 * access to the Intel IGD opregion. The register defining the opregion
10 * address is also virtualized to prevent user modification.
14 #include <linux/pci.h>
15 #include <linux/uaccess.h>
16 #include <linux/vfio.h>
18 #include "vfio_pci_priv.h"
20 #define OPREGION_SIGNATURE "IntelGraphicsMem"
21 #define OPREGION_SIZE (8 * 1024)
22 #define OPREGION_PCI_ADDR 0xfc
24 #define OPREGION_RVDA 0x3ba
25 #define OPREGION_RVDS 0x3c2
26 #define OPREGION_VERSION 0x16
28 struct igd_opregion_vbt
{
34 * igd_opregion_shift_copy() - Copy OpRegion to user buffer and shift position.
35 * @dst: User buffer ptr to copy to.
36 * @off: Offset to user buffer ptr. Increased by bytes on return.
37 * @src: Source buffer to copy from.
38 * @pos: Increased by bytes on return.
39 * @remaining: Decreased by bytes on return.
40 * @bytes: Bytes to copy and adjust off, pos and remaining.
42 * Copy OpRegion to offset from specific source ptr and shift the offset.
44 * Return: 0 on success, -EFAULT otherwise.
47 static inline unsigned long igd_opregion_shift_copy(char __user
*dst
,
54 if (copy_to_user(dst
+ (*off
), src
, bytes
))
64 static ssize_t
vfio_pci_igd_rw(struct vfio_pci_core_device
*vdev
,
65 char __user
*buf
, size_t count
, loff_t
*ppos
,
68 unsigned int i
= VFIO_PCI_OFFSET_TO_INDEX(*ppos
) - VFIO_PCI_NUM_REGIONS
;
69 struct igd_opregion_vbt
*opregionvbt
= vdev
->region
[i
].data
;
70 loff_t pos
= *ppos
& VFIO_PCI_OFFSET_MASK
, off
= 0;
73 if (pos
>= vdev
->region
[i
].size
|| iswrite
)
76 count
= min_t(size_t, count
, vdev
->region
[i
].size
- pos
);
79 /* Copy until OpRegion version */
80 if (remaining
&& pos
< OPREGION_VERSION
) {
81 size_t bytes
= min_t(size_t, remaining
, OPREGION_VERSION
- pos
);
83 if (igd_opregion_shift_copy(buf
, &off
,
84 opregionvbt
->opregion
+ pos
, &pos
,
89 /* Copy patched (if necessary) OpRegion version */
90 if (remaining
&& pos
< OPREGION_VERSION
+ sizeof(__le16
)) {
91 size_t bytes
= min_t(size_t, remaining
,
92 OPREGION_VERSION
+ sizeof(__le16
) - pos
);
93 __le16 version
= *(__le16
*)(opregionvbt
->opregion
+
96 /* Patch to 2.1 if OpRegion 2.0 has extended VBT */
97 if (le16_to_cpu(version
) == 0x0200 && opregionvbt
->vbt_ex
)
98 version
= cpu_to_le16(0x0201);
100 if (igd_opregion_shift_copy(buf
, &off
,
102 (pos
- OPREGION_VERSION
),
103 &pos
, &remaining
, bytes
))
107 /* Copy until RVDA */
108 if (remaining
&& pos
< OPREGION_RVDA
) {
109 size_t bytes
= min_t(size_t, remaining
, OPREGION_RVDA
- pos
);
111 if (igd_opregion_shift_copy(buf
, &off
,
112 opregionvbt
->opregion
+ pos
, &pos
,
117 /* Copy modified (if necessary) RVDA */
118 if (remaining
&& pos
< OPREGION_RVDA
+ sizeof(__le64
)) {
119 size_t bytes
= min_t(size_t, remaining
,
120 OPREGION_RVDA
+ sizeof(__le64
) - pos
);
121 __le64 rvda
= cpu_to_le64(opregionvbt
->vbt_ex
?
124 if (igd_opregion_shift_copy(buf
, &off
,
125 (u8
*)&rvda
+ (pos
- OPREGION_RVDA
),
126 &pos
, &remaining
, bytes
))
130 /* Copy the rest of OpRegion */
131 if (remaining
&& pos
< OPREGION_SIZE
) {
132 size_t bytes
= min_t(size_t, remaining
, OPREGION_SIZE
- pos
);
134 if (igd_opregion_shift_copy(buf
, &off
,
135 opregionvbt
->opregion
+ pos
, &pos
,
140 /* Copy extended VBT if exists */
142 copy_to_user(buf
+ off
, opregionvbt
->vbt_ex
+ (pos
- OPREGION_SIZE
),
151 static void vfio_pci_igd_release(struct vfio_pci_core_device
*vdev
,
152 struct vfio_pci_region
*region
)
154 struct igd_opregion_vbt
*opregionvbt
= region
->data
;
156 if (opregionvbt
->vbt_ex
)
157 memunmap(opregionvbt
->vbt_ex
);
159 memunmap(opregionvbt
->opregion
);
163 static const struct vfio_pci_regops vfio_pci_igd_regops
= {
164 .rw
= vfio_pci_igd_rw
,
165 .release
= vfio_pci_igd_release
,
168 static int vfio_pci_igd_opregion_init(struct vfio_pci_core_device
*vdev
)
170 __le32
*dwordp
= (__le32
*)(vdev
->vconfig
+ OPREGION_PCI_ADDR
);
172 struct igd_opregion_vbt
*opregionvbt
;
176 ret
= pci_read_config_dword(vdev
->pdev
, OPREGION_PCI_ADDR
, &addr
);
180 if (!addr
|| !(~addr
))
183 opregionvbt
= kzalloc(sizeof(*opregionvbt
), GFP_KERNEL_ACCOUNT
);
187 opregionvbt
->opregion
= memremap(addr
, OPREGION_SIZE
, MEMREMAP_WB
);
188 if (!opregionvbt
->opregion
) {
193 if (memcmp(opregionvbt
->opregion
, OPREGION_SIGNATURE
, 16)) {
194 memunmap(opregionvbt
->opregion
);
199 size
= le32_to_cpu(*(__le32
*)(opregionvbt
->opregion
+ 16));
201 memunmap(opregionvbt
->opregion
);
206 size
*= 1024; /* In KB */
210 * When VBT data doesn't exceed 6KB, it's stored in Mailbox #4.
211 * When VBT data exceeds 6KB size, Mailbox #4 is no longer large enough
212 * to hold the VBT data, the Extended VBT region is introduced since
213 * OpRegion 2.0 to hold the VBT data. Since OpRegion 2.0, RVDA/RVDS are
214 * introduced to define the extended VBT data location and size.
215 * OpRegion 2.0: RVDA defines the absolute physical address of the
216 * extended VBT data, RVDS defines the VBT data size.
217 * OpRegion 2.1 and above: RVDA defines the relative address of the
218 * extended VBT data to OpRegion base, RVDS defines the VBT data size.
220 * Due to the RVDA definition diff in OpRegion VBT (also the only diff
221 * between 2.0 and 2.1), exposing OpRegion and VBT as a contiguous range
222 * for OpRegion 2.0 and above makes it possible to support the
223 * non-contiguous VBT through a single vfio region. From r/w ops view,
224 * only contiguous VBT after OpRegion with version 2.1+ is exposed,
225 * regardless the host OpRegion is 2.0 or non-contiguous 2.1+. The r/w
226 * ops will on-the-fly shift the actural offset into VBT so that data at
227 * correct position can be returned to the requester.
229 version
= le16_to_cpu(*(__le16
*)(opregionvbt
->opregion
+
231 if (version
>= 0x0200) {
232 u64 rvda
= le64_to_cpu(*(__le64
*)(opregionvbt
->opregion
+
234 u32 rvds
= le32_to_cpu(*(__le32
*)(opregionvbt
->opregion
+
237 /* The extended VBT is valid only when RVDA/RVDS are non-zero */
242 * Extended VBT location by RVDA:
243 * Absolute physical addr for 2.0.
244 * Relative addr to OpRegion header for 2.1+.
246 if (version
== 0x0200)
251 opregionvbt
->vbt_ex
= memremap(addr
, rvds
, MEMREMAP_WB
);
252 if (!opregionvbt
->vbt_ex
) {
253 memunmap(opregionvbt
->opregion
);
260 ret
= vfio_pci_core_register_dev_region(vdev
,
261 PCI_VENDOR_ID_INTEL
| VFIO_REGION_TYPE_PCI_VENDOR_TYPE
,
262 VFIO_REGION_SUBTYPE_INTEL_IGD_OPREGION
, &vfio_pci_igd_regops
,
263 size
, VFIO_REGION_INFO_FLAG_READ
, opregionvbt
);
265 if (opregionvbt
->vbt_ex
)
266 memunmap(opregionvbt
->vbt_ex
);
268 memunmap(opregionvbt
->opregion
);
273 /* Fill vconfig with the hw value and virtualize register */
274 *dwordp
= cpu_to_le32(addr
);
275 memset(vdev
->pci_config_map
+ OPREGION_PCI_ADDR
,
276 PCI_CAP_ID_INVALID_VIRT
, 4);
281 static ssize_t
vfio_pci_igd_cfg_rw(struct vfio_pci_core_device
*vdev
,
282 char __user
*buf
, size_t count
, loff_t
*ppos
,
285 unsigned int i
= VFIO_PCI_OFFSET_TO_INDEX(*ppos
) - VFIO_PCI_NUM_REGIONS
;
286 struct pci_dev
*pdev
= vdev
->region
[i
].data
;
287 loff_t pos
= *ppos
& VFIO_PCI_OFFSET_MASK
;
291 if (pos
>= vdev
->region
[i
].size
|| iswrite
)
294 size
= count
= min(count
, (size_t)(vdev
->region
[i
].size
- pos
));
296 if ((pos
& 1) && size
) {
299 ret
= pci_user_read_config_byte(pdev
, pos
, &val
);
303 if (copy_to_user(buf
+ count
- size
, &val
, 1))
310 if ((pos
& 3) && size
> 2) {
314 ret
= pci_user_read_config_word(pdev
, pos
, &val
);
318 lval
= cpu_to_le16(val
);
319 if (copy_to_user(buf
+ count
- size
, &lval
, 2))
330 ret
= pci_user_read_config_dword(pdev
, pos
, &val
);
334 lval
= cpu_to_le32(val
);
335 if (copy_to_user(buf
+ count
- size
, &lval
, 4))
346 ret
= pci_user_read_config_word(pdev
, pos
, &val
);
350 lval
= cpu_to_le16(val
);
351 if (copy_to_user(buf
+ count
- size
, &lval
, 2))
361 ret
= pci_user_read_config_byte(pdev
, pos
, &val
);
365 if (copy_to_user(buf
+ count
- size
, &val
, 1))
377 static void vfio_pci_igd_cfg_release(struct vfio_pci_core_device
*vdev
,
378 struct vfio_pci_region
*region
)
380 struct pci_dev
*pdev
= region
->data
;
385 static const struct vfio_pci_regops vfio_pci_igd_cfg_regops
= {
386 .rw
= vfio_pci_igd_cfg_rw
,
387 .release
= vfio_pci_igd_cfg_release
,
390 static int vfio_pci_igd_cfg_init(struct vfio_pci_core_device
*vdev
)
392 struct pci_dev
*host_bridge
, *lpc_bridge
;
395 host_bridge
= pci_get_domain_bus_and_slot(0, 0, PCI_DEVFN(0, 0));
399 if (host_bridge
->vendor
!= PCI_VENDOR_ID_INTEL
||
400 host_bridge
->class != (PCI_CLASS_BRIDGE_HOST
<< 8)) {
401 pci_dev_put(host_bridge
);
405 ret
= vfio_pci_core_register_dev_region(vdev
,
406 PCI_VENDOR_ID_INTEL
| VFIO_REGION_TYPE_PCI_VENDOR_TYPE
,
407 VFIO_REGION_SUBTYPE_INTEL_IGD_HOST_CFG
,
408 &vfio_pci_igd_cfg_regops
, host_bridge
->cfg_size
,
409 VFIO_REGION_INFO_FLAG_READ
, host_bridge
);
411 pci_dev_put(host_bridge
);
415 lpc_bridge
= pci_get_domain_bus_and_slot(0, 0, PCI_DEVFN(0x1f, 0));
419 if (lpc_bridge
->vendor
!= PCI_VENDOR_ID_INTEL
||
420 lpc_bridge
->class != (PCI_CLASS_BRIDGE_ISA
<< 8)) {
421 pci_dev_put(lpc_bridge
);
425 ret
= vfio_pci_core_register_dev_region(vdev
,
426 PCI_VENDOR_ID_INTEL
| VFIO_REGION_TYPE_PCI_VENDOR_TYPE
,
427 VFIO_REGION_SUBTYPE_INTEL_IGD_LPC_CFG
,
428 &vfio_pci_igd_cfg_regops
, lpc_bridge
->cfg_size
,
429 VFIO_REGION_INFO_FLAG_READ
, lpc_bridge
);
431 pci_dev_put(lpc_bridge
);
438 int vfio_pci_igd_init(struct vfio_pci_core_device
*vdev
)
442 ret
= vfio_pci_igd_opregion_init(vdev
);
446 ret
= vfio_pci_igd_cfg_init(vdev
);