2 * Intel I/OAT DMA Linux driver
3 * Copyright(c) 2007 - 2009 Intel Corporation.
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms and conditions of the GNU General Public License,
7 * version 2, as published by the Free Software Foundation.
9 * This program is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
14 * You should have received a copy of the GNU General Public License along with
15 * this program; if not, write to the Free Software Foundation, Inc.,
16 * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
18 * The full GNU General Public License is included in this distribution in
19 * the file called "COPYING".
23 #include <linux/kernel.h>
24 #include <linux/pci.h>
25 #include <linux/smp.h>
26 #include <linux/interrupt.h>
27 #include <linux/dca.h>
29 /* either a kernel change is needed, or we need something like this in kernel */
32 #undef cpu_physical_id
33 #define cpu_physical_id(cpu) (cpuid_ebx(1) >> 24)
37 #include "registers.h"
40 * Bit 7 of a tag map entry is the "valid" bit, if it is set then bits 0:6
41 * contain the bit number of the APIC ID to map into the DCA tag. If the valid
42 * bit is not set, then the value must be 0 or 1 and defines the bit in the tag.
44 #define DCA_TAG_MAP_VALID 0x80
46 #define DCA3_TAG_MAP_BIT_TO_INV 0x80
47 #define DCA3_TAG_MAP_BIT_TO_SEL 0x40
48 #define DCA3_TAG_MAP_LITERAL_VAL 0x1
50 #define DCA_TAG_MAP_MASK 0xDF
52 /* expected tag map bytes for I/OAT ver.2 */
53 #define DCA2_TAG_MAP_BYTE0 0x80
54 #define DCA2_TAG_MAP_BYTE1 0x0
55 #define DCA2_TAG_MAP_BYTE2 0x81
56 #define DCA2_TAG_MAP_BYTE3 0x82
57 #define DCA2_TAG_MAP_BYTE4 0x82
59 /* verify if tag map matches expected values */
60 static inline int dca2_tag_map_valid(u8
*tag_map
)
62 return ((tag_map
[0] == DCA2_TAG_MAP_BYTE0
) &&
63 (tag_map
[1] == DCA2_TAG_MAP_BYTE1
) &&
64 (tag_map
[2] == DCA2_TAG_MAP_BYTE2
) &&
65 (tag_map
[3] == DCA2_TAG_MAP_BYTE3
) &&
66 (tag_map
[4] == DCA2_TAG_MAP_BYTE4
));
70 * "Legacy" DCA systems do not implement the DCA register set in the
71 * I/OAT device. Software needs direct support for their tag mappings.
74 #define APICID_BIT(x) (DCA_TAG_MAP_VALID | (x))
75 #define IOAT_TAG_MAP_LEN 8
77 static u8 ioat_tag_map_BNB
[IOAT_TAG_MAP_LEN
] = {
78 1, APICID_BIT(1), APICID_BIT(2), APICID_BIT(2), };
79 static u8 ioat_tag_map_SCNB
[IOAT_TAG_MAP_LEN
] = {
80 1, APICID_BIT(1), APICID_BIT(2), APICID_BIT(2), };
81 static u8 ioat_tag_map_CNB
[IOAT_TAG_MAP_LEN
] = {
82 1, APICID_BIT(1), APICID_BIT(3), APICID_BIT(4), APICID_BIT(2), };
83 static u8 ioat_tag_map_UNISYS
[IOAT_TAG_MAP_LEN
] = { 0 };
85 /* pack PCI B/D/F into a u16 */
86 static inline u16
dcaid_from_pcidev(struct pci_dev
*pci
)
88 return (pci
->bus
->number
<< 8) | pci
->devfn
;
91 static int dca_enabled_in_bios(struct pci_dev
*pdev
)
93 /* CPUID level 9 returns DCA configuration */
94 /* Bit 0 indicates DCA enabled by the BIOS */
95 unsigned long cpuid_level_9
;
98 cpuid_level_9
= cpuid_eax(9);
99 res
= test_bit(0, &cpuid_level_9
);
101 dev_dbg(&pdev
->dev
, "DCA is disabled in BIOS\n");
106 int system_has_dca_enabled(struct pci_dev
*pdev
)
108 if (boot_cpu_has(X86_FEATURE_DCA
))
109 return dca_enabled_in_bios(pdev
);
111 dev_dbg(&pdev
->dev
, "boot cpu doesn't have X86_FEATURE_DCA\n");
115 struct ioat_dca_slot
{
116 struct pci_dev
*pdev
; /* requester device */
117 u16 rid
; /* requester id, as used by IOAT */
120 #define IOAT_DCA_MAX_REQ 6
121 #define IOAT3_DCA_MAX_REQ 2
123 struct ioat_dca_priv
{
124 void __iomem
*iobase
;
125 void __iomem
*dca_base
;
128 u8 tag_map
[IOAT_TAG_MAP_LEN
];
129 struct ioat_dca_slot req_slots
[0];
132 /* 5000 series chipset DCA Port Requester ID Table Entry Format
133 * [15:8] PCI-Express Bus Number
134 * [7:3] PCI-Express Device Number
135 * [2:0] PCI-Express Function Number
137 * 5000 series chipset DCA control register format
139 * [0] Ignore Function Number
142 static int ioat_dca_add_requester(struct dca_provider
*dca
, struct device
*dev
)
144 struct ioat_dca_priv
*ioatdca
= dca_priv(dca
);
145 struct pci_dev
*pdev
;
149 /* This implementation only supports PCI-Express */
150 if (dev
->bus
!= &pci_bus_type
)
152 pdev
= to_pci_dev(dev
);
153 id
= dcaid_from_pcidev(pdev
);
155 if (ioatdca
->requester_count
== ioatdca
->max_requesters
)
158 for (i
= 0; i
< ioatdca
->max_requesters
; i
++) {
159 if (ioatdca
->req_slots
[i
].pdev
== NULL
) {
160 /* found an empty slot */
161 ioatdca
->requester_count
++;
162 ioatdca
->req_slots
[i
].pdev
= pdev
;
163 ioatdca
->req_slots
[i
].rid
= id
;
164 writew(id
, ioatdca
->dca_base
+ (i
* 4));
165 /* make sure the ignore function bit is off */
166 writeb(0, ioatdca
->dca_base
+ (i
* 4) + 2);
170 /* Error, ioatdma->requester_count is out of whack */
174 static int ioat_dca_remove_requester(struct dca_provider
*dca
,
177 struct ioat_dca_priv
*ioatdca
= dca_priv(dca
);
178 struct pci_dev
*pdev
;
181 /* This implementation only supports PCI-Express */
182 if (dev
->bus
!= &pci_bus_type
)
184 pdev
= to_pci_dev(dev
);
186 for (i
= 0; i
< ioatdca
->max_requesters
; i
++) {
187 if (ioatdca
->req_slots
[i
].pdev
== pdev
) {
188 writew(0, ioatdca
->dca_base
+ (i
* 4));
189 ioatdca
->req_slots
[i
].pdev
= NULL
;
190 ioatdca
->req_slots
[i
].rid
= 0;
191 ioatdca
->requester_count
--;
198 static u8
ioat_dca_get_tag(struct dca_provider
*dca
,
202 struct ioat_dca_priv
*ioatdca
= dca_priv(dca
);
203 int i
, apic_id
, bit
, value
;
207 apic_id
= cpu_physical_id(cpu
);
209 for (i
= 0; i
< IOAT_TAG_MAP_LEN
; i
++) {
210 entry
= ioatdca
->tag_map
[i
];
211 if (entry
& DCA_TAG_MAP_VALID
) {
212 bit
= entry
& ~DCA_TAG_MAP_VALID
;
213 value
= (apic_id
& (1 << bit
)) ? 1 : 0;
215 value
= entry
? 1 : 0;
222 static int ioat_dca_dev_managed(struct dca_provider
*dca
,
225 struct ioat_dca_priv
*ioatdca
= dca_priv(dca
);
226 struct pci_dev
*pdev
;
229 pdev
= to_pci_dev(dev
);
230 for (i
= 0; i
< ioatdca
->max_requesters
; i
++) {
231 if (ioatdca
->req_slots
[i
].pdev
== pdev
)
237 static struct dca_ops ioat_dca_ops
= {
238 .add_requester
= ioat_dca_add_requester
,
239 .remove_requester
= ioat_dca_remove_requester
,
240 .get_tag
= ioat_dca_get_tag
,
241 .dev_managed
= ioat_dca_dev_managed
,
245 struct dca_provider
* __devinit
246 ioat_dca_init(struct pci_dev
*pdev
, void __iomem
*iobase
)
248 struct dca_provider
*dca
;
249 struct ioat_dca_priv
*ioatdca
;
256 if (!system_has_dca_enabled(pdev
))
259 /* I/OAT v1 systems must have a known tag_map to support DCA */
260 switch (pdev
->vendor
) {
261 case PCI_VENDOR_ID_INTEL
:
262 switch (pdev
->device
) {
263 case PCI_DEVICE_ID_INTEL_IOAT
:
264 tag_map
= ioat_tag_map_BNB
;
266 case PCI_DEVICE_ID_INTEL_IOAT_CNB
:
267 tag_map
= ioat_tag_map_CNB
;
269 case PCI_DEVICE_ID_INTEL_IOAT_SCNB
:
270 tag_map
= ioat_tag_map_SCNB
;
274 case PCI_VENDOR_ID_UNISYS
:
275 switch (pdev
->device
) {
276 case PCI_DEVICE_ID_UNISYS_DMA_DIRECTOR
:
277 tag_map
= ioat_tag_map_UNISYS
;
285 version
= readb(iobase
+ IOAT_VER_OFFSET
);
286 if (version
== IOAT_VER_3_0
)
287 max_requesters
= IOAT3_DCA_MAX_REQ
;
289 max_requesters
= IOAT_DCA_MAX_REQ
;
291 dca
= alloc_dca_provider(&ioat_dca_ops
,
293 (sizeof(struct ioat_dca_slot
) * max_requesters
));
297 ioatdca
= dca_priv(dca
);
298 ioatdca
->max_requesters
= max_requesters
;
299 ioatdca
->dca_base
= iobase
+ 0x54;
301 /* copy over the APIC ID to DCA tag mapping */
302 for (i
= 0; i
< IOAT_TAG_MAP_LEN
; i
++)
303 ioatdca
->tag_map
[i
] = tag_map
[i
];
305 err
= register_dca_provider(dca
, &pdev
->dev
);
307 free_dca_provider(dca
);
315 static int ioat2_dca_add_requester(struct dca_provider
*dca
, struct device
*dev
)
317 struct ioat_dca_priv
*ioatdca
= dca_priv(dca
);
318 struct pci_dev
*pdev
;
321 u16 global_req_table
;
323 /* This implementation only supports PCI-Express */
324 if (dev
->bus
!= &pci_bus_type
)
326 pdev
= to_pci_dev(dev
);
327 id
= dcaid_from_pcidev(pdev
);
329 if (ioatdca
->requester_count
== ioatdca
->max_requesters
)
332 for (i
= 0; i
< ioatdca
->max_requesters
; i
++) {
333 if (ioatdca
->req_slots
[i
].pdev
== NULL
) {
334 /* found an empty slot */
335 ioatdca
->requester_count
++;
336 ioatdca
->req_slots
[i
].pdev
= pdev
;
337 ioatdca
->req_slots
[i
].rid
= id
;
339 readw(ioatdca
->dca_base
+ IOAT_DCA_GREQID_OFFSET
);
340 writel(id
| IOAT_DCA_GREQID_VALID
,
341 ioatdca
->iobase
+ global_req_table
+ (i
* 4));
345 /* Error, ioatdma->requester_count is out of whack */
349 static int ioat2_dca_remove_requester(struct dca_provider
*dca
,
352 struct ioat_dca_priv
*ioatdca
= dca_priv(dca
);
353 struct pci_dev
*pdev
;
355 u16 global_req_table
;
357 /* This implementation only supports PCI-Express */
358 if (dev
->bus
!= &pci_bus_type
)
360 pdev
= to_pci_dev(dev
);
362 for (i
= 0; i
< ioatdca
->max_requesters
; i
++) {
363 if (ioatdca
->req_slots
[i
].pdev
== pdev
) {
365 readw(ioatdca
->dca_base
+ IOAT_DCA_GREQID_OFFSET
);
366 writel(0, ioatdca
->iobase
+ global_req_table
+ (i
* 4));
367 ioatdca
->req_slots
[i
].pdev
= NULL
;
368 ioatdca
->req_slots
[i
].rid
= 0;
369 ioatdca
->requester_count
--;
376 static u8
ioat2_dca_get_tag(struct dca_provider
*dca
,
382 tag
= ioat_dca_get_tag(dca
, dev
, cpu
);
387 static struct dca_ops ioat2_dca_ops
= {
388 .add_requester
= ioat2_dca_add_requester
,
389 .remove_requester
= ioat2_dca_remove_requester
,
390 .get_tag
= ioat2_dca_get_tag
,
391 .dev_managed
= ioat_dca_dev_managed
,
394 static int ioat2_dca_count_dca_slots(void __iomem
*iobase
, u16 dca_offset
)
398 u16 global_req_table
;
400 global_req_table
= readw(iobase
+ dca_offset
+ IOAT_DCA_GREQID_OFFSET
);
401 if (global_req_table
== 0)
404 req
= readl(iobase
+ global_req_table
+ (slots
* sizeof(u32
)));
406 } while ((req
& IOAT_DCA_GREQID_LASTID
) == 0);
411 struct dca_provider
* __devinit
412 ioat2_dca_init(struct pci_dev
*pdev
, void __iomem
*iobase
)
414 struct dca_provider
*dca
;
415 struct ioat_dca_priv
*ioatdca
;
425 if (!system_has_dca_enabled(pdev
))
428 dca_offset
= readw(iobase
+ IOAT_DCAOFFSET_OFFSET
);
432 slots
= ioat2_dca_count_dca_slots(iobase
, dca_offset
);
436 dca
= alloc_dca_provider(&ioat2_dca_ops
,
438 + (sizeof(struct ioat_dca_slot
) * slots
));
442 ioatdca
= dca_priv(dca
);
443 ioatdca
->iobase
= iobase
;
444 ioatdca
->dca_base
= iobase
+ dca_offset
;
445 ioatdca
->max_requesters
= slots
;
447 /* some bios might not know to turn these on */
448 csi_fsb_control
= readw(ioatdca
->dca_base
+ IOAT_FSB_CAP_ENABLE_OFFSET
);
449 if ((csi_fsb_control
& IOAT_FSB_CAP_ENABLE_PREFETCH
) == 0) {
450 csi_fsb_control
|= IOAT_FSB_CAP_ENABLE_PREFETCH
;
451 writew(csi_fsb_control
,
452 ioatdca
->dca_base
+ IOAT_FSB_CAP_ENABLE_OFFSET
);
454 pcie_control
= readw(ioatdca
->dca_base
+ IOAT_PCI_CAP_ENABLE_OFFSET
);
455 if ((pcie_control
& IOAT_PCI_CAP_ENABLE_MEMWR
) == 0) {
456 pcie_control
|= IOAT_PCI_CAP_ENABLE_MEMWR
;
458 ioatdca
->dca_base
+ IOAT_PCI_CAP_ENABLE_OFFSET
);
462 /* TODO version, compatibility and configuration checks */
464 /* copy out the APIC to DCA tag map */
465 tag_map
= readl(ioatdca
->dca_base
+ IOAT_APICID_TAG_MAP_OFFSET
);
466 for (i
= 0; i
< 5; i
++) {
467 bit
= (tag_map
>> (4 * i
)) & 0x0f;
469 ioatdca
->tag_map
[i
] = bit
| DCA_TAG_MAP_VALID
;
471 ioatdca
->tag_map
[i
] = 0;
474 if (!dca2_tag_map_valid(ioatdca
->tag_map
)) {
475 dev_err(&pdev
->dev
, "APICID_TAG_MAP set incorrectly by BIOS, "
477 free_dca_provider(dca
);
481 err
= register_dca_provider(dca
, &pdev
->dev
);
483 free_dca_provider(dca
);
490 static int ioat3_dca_add_requester(struct dca_provider
*dca
, struct device
*dev
)
492 struct ioat_dca_priv
*ioatdca
= dca_priv(dca
);
493 struct pci_dev
*pdev
;
496 u16 global_req_table
;
498 /* This implementation only supports PCI-Express */
499 if (dev
->bus
!= &pci_bus_type
)
501 pdev
= to_pci_dev(dev
);
502 id
= dcaid_from_pcidev(pdev
);
504 if (ioatdca
->requester_count
== ioatdca
->max_requesters
)
507 for (i
= 0; i
< ioatdca
->max_requesters
; i
++) {
508 if (ioatdca
->req_slots
[i
].pdev
== NULL
) {
509 /* found an empty slot */
510 ioatdca
->requester_count
++;
511 ioatdca
->req_slots
[i
].pdev
= pdev
;
512 ioatdca
->req_slots
[i
].rid
= id
;
514 readw(ioatdca
->dca_base
+ IOAT3_DCA_GREQID_OFFSET
);
515 writel(id
| IOAT_DCA_GREQID_VALID
,
516 ioatdca
->iobase
+ global_req_table
+ (i
* 4));
520 /* Error, ioatdma->requester_count is out of whack */
524 static int ioat3_dca_remove_requester(struct dca_provider
*dca
,
527 struct ioat_dca_priv
*ioatdca
= dca_priv(dca
);
528 struct pci_dev
*pdev
;
530 u16 global_req_table
;
532 /* This implementation only supports PCI-Express */
533 if (dev
->bus
!= &pci_bus_type
)
535 pdev
= to_pci_dev(dev
);
537 for (i
= 0; i
< ioatdca
->max_requesters
; i
++) {
538 if (ioatdca
->req_slots
[i
].pdev
== pdev
) {
540 readw(ioatdca
->dca_base
+ IOAT3_DCA_GREQID_OFFSET
);
541 writel(0, ioatdca
->iobase
+ global_req_table
+ (i
* 4));
542 ioatdca
->req_slots
[i
].pdev
= NULL
;
543 ioatdca
->req_slots
[i
].rid
= 0;
544 ioatdca
->requester_count
--;
551 static u8
ioat3_dca_get_tag(struct dca_provider
*dca
,
557 struct ioat_dca_priv
*ioatdca
= dca_priv(dca
);
558 int i
, apic_id
, bit
, value
;
562 apic_id
= cpu_physical_id(cpu
);
564 for (i
= 0; i
< IOAT_TAG_MAP_LEN
; i
++) {
565 entry
= ioatdca
->tag_map
[i
];
566 if (entry
& DCA3_TAG_MAP_BIT_TO_SEL
) {
568 ~(DCA3_TAG_MAP_BIT_TO_SEL
| DCA3_TAG_MAP_BIT_TO_INV
);
569 value
= (apic_id
& (1 << bit
)) ? 1 : 0;
570 } else if (entry
& DCA3_TAG_MAP_BIT_TO_INV
) {
571 bit
= entry
& ~DCA3_TAG_MAP_BIT_TO_INV
;
572 value
= (apic_id
& (1 << bit
)) ? 0 : 1;
574 value
= (entry
& DCA3_TAG_MAP_LITERAL_VAL
) ? 1 : 0;
582 static struct dca_ops ioat3_dca_ops
= {
583 .add_requester
= ioat3_dca_add_requester
,
584 .remove_requester
= ioat3_dca_remove_requester
,
585 .get_tag
= ioat3_dca_get_tag
,
586 .dev_managed
= ioat_dca_dev_managed
,
589 static int ioat3_dca_count_dca_slots(void *iobase
, u16 dca_offset
)
593 u16 global_req_table
;
595 global_req_table
= readw(iobase
+ dca_offset
+ IOAT3_DCA_GREQID_OFFSET
);
596 if (global_req_table
== 0)
600 req
= readl(iobase
+ global_req_table
+ (slots
* sizeof(u32
)));
602 } while ((req
& IOAT_DCA_GREQID_LASTID
) == 0);
607 struct dca_provider
* __devinit
608 ioat3_dca_init(struct pci_dev
*pdev
, void __iomem
*iobase
)
610 struct dca_provider
*dca
;
611 struct ioat_dca_priv
*ioatdca
;
628 if (!system_has_dca_enabled(pdev
))
631 dca_offset
= readw(iobase
+ IOAT_DCAOFFSET_OFFSET
);
635 slots
= ioat3_dca_count_dca_slots(iobase
, dca_offset
);
639 dca
= alloc_dca_provider(&ioat3_dca_ops
,
641 + (sizeof(struct ioat_dca_slot
) * slots
));
645 ioatdca
= dca_priv(dca
);
646 ioatdca
->iobase
= iobase
;
647 ioatdca
->dca_base
= iobase
+ dca_offset
;
648 ioatdca
->max_requesters
= slots
;
650 /* some bios might not know to turn these on */
651 csi_fsb_control
= readw(ioatdca
->dca_base
+ IOAT3_CSI_CONTROL_OFFSET
);
652 if ((csi_fsb_control
& IOAT3_CSI_CONTROL_PREFETCH
) == 0) {
653 csi_fsb_control
|= IOAT3_CSI_CONTROL_PREFETCH
;
654 writew(csi_fsb_control
,
655 ioatdca
->dca_base
+ IOAT3_CSI_CONTROL_OFFSET
);
657 pcie_control
= readw(ioatdca
->dca_base
+ IOAT3_PCI_CONTROL_OFFSET
);
658 if ((pcie_control
& IOAT3_PCI_CONTROL_MEMWR
) == 0) {
659 pcie_control
|= IOAT3_PCI_CONTROL_MEMWR
;
661 ioatdca
->dca_base
+ IOAT3_PCI_CONTROL_OFFSET
);
665 /* TODO version, compatibility and configuration checks */
667 /* copy out the APIC to DCA tag map */
669 readl(ioatdca
->dca_base
+ IOAT3_APICID_TAG_MAP_OFFSET_LOW
);
671 readl(ioatdca
->dca_base
+ IOAT3_APICID_TAG_MAP_OFFSET_HIGH
);
672 for (i
= 0; i
< 8; i
++) {
673 bit
= tag_map
.full
>> (8 * i
);
674 ioatdca
->tag_map
[i
] = bit
& DCA_TAG_MAP_MASK
;
677 err
= register_dca_provider(dca
, &pdev
->dev
);
679 free_dca_provider(dca
);