1 // SPDX-License-Identifier: (GPL-2.0 OR BSD-3-Clause)
2 /* Copyright(c) 2020 Intel Corporation. All rights reserved. */
3 #include <linux/debugfs.h>
4 #include <linux/delay.h>
5 #include <linux/init.h>
6 #include <linux/interrupt.h>
7 #include <linux/module.h>
9 #include <linux/random.h>
10 #include <linux/slab.h>
11 #include <linux/ntb.h>
12 #include <linux/log2.h>
14 #include "ntb_hw_intel.h"
15 #include "ntb_hw_gen1.h"
16 #include "ntb_hw_gen3.h"
17 #include "ntb_hw_gen4.h"
19 static int gen4_poll_link(struct intel_ntb_dev
*ndev
);
20 static int gen4_link_is_up(struct intel_ntb_dev
*ndev
);
22 static const struct intel_ntb_reg gen4_reg
= {
23 .poll_link
= gen4_poll_link
,
24 .link_is_up
= gen4_link_is_up
,
25 .db_ioread
= gen3_db_ioread
,
26 .db_iowrite
= gen3_db_iowrite
,
27 .db_size
= sizeof(u32
),
28 .ntb_ctl
= GEN4_NTBCNTL_OFFSET
,
32 static const struct intel_ntb_alt_reg gen4_pri_reg
= {
33 .db_clear
= GEN4_IM_INT_STATUS_OFFSET
,
34 .db_mask
= GEN4_IM_INT_DISABLE_OFFSET
,
35 .spad
= GEN4_IM_SPAD_OFFSET
,
38 static const struct intel_ntb_xlat_reg gen4_sec_xlat
= {
39 .bar2_limit
= GEN4_IM23XLMT_OFFSET
,
40 .bar2_xlat
= GEN4_IM23XBASE_OFFSET
,
41 .bar2_idx
= GEN4_IM23XBASEIDX_OFFSET
,
44 static const struct intel_ntb_alt_reg gen4_b2b_reg
= {
45 .db_bell
= GEN4_IM_DOORBELL_OFFSET
,
46 .spad
= GEN4_EM_SPAD_OFFSET
,
49 static int gen4_poll_link(struct intel_ntb_dev
*ndev
)
54 * We need to write to DLLSCS bit in the SLOTSTS before we
55 * can clear the hardware link interrupt on ICX NTB.
57 iowrite16(GEN4_SLOTSTS_DLLSCS
, ndev
->self_mmio
+ GEN4_SLOTSTS
);
58 ndev
->reg
->db_iowrite(ndev
->db_link_mask
,
60 ndev
->self_reg
->db_clear
);
62 reg_val
= ioread16(ndev
->self_mmio
+ GEN4_LINK_STATUS_OFFSET
);
63 if (reg_val
== ndev
->lnk_sta
)
66 ndev
->lnk_sta
= reg_val
;
71 static int gen4_link_is_up(struct intel_ntb_dev
*ndev
)
73 return NTB_LNK_STA_ACTIVE(ndev
->lnk_sta
);
76 static int gen4_init_isr(struct intel_ntb_dev
*ndev
)
81 * The MSIX vectors and the interrupt status bits are not lined up
82 * on Gen3 (Skylake) and Gen4. By default the link status bit is bit
83 * 32, however it is by default MSIX vector0. We need to fixup to
84 * line them up. The vectors at reset is 1-32,0. We need to reprogram
87 for (i
= 0; i
< GEN4_DB_MSIX_VECTOR_COUNT
; i
++)
88 iowrite8(i
, ndev
->self_mmio
+ GEN4_INTVEC_OFFSET
+ i
);
90 return ndev_init_isr(ndev
, GEN4_DB_MSIX_VECTOR_COUNT
,
91 GEN4_DB_MSIX_VECTOR_COUNT
,
92 GEN4_DB_MSIX_VECTOR_SHIFT
,
96 static int gen4_setup_b2b_mw(struct intel_ntb_dev
*ndev
,
97 const struct intel_b2b_addr
*addr
,
98 const struct intel_b2b_addr
*peer_addr
)
100 struct pci_dev
*pdev
;
102 phys_addr_t bar_addr
;
104 pdev
= ndev
->ntb
.pdev
;
105 mmio
= ndev
->self_mmio
;
107 /* setup incoming bar limits == base addrs (zero length windows) */
108 bar_addr
= addr
->bar2_addr64
;
109 iowrite64(bar_addr
, mmio
+ GEN4_IM23XLMT_OFFSET
);
110 bar_addr
= ioread64(mmio
+ GEN4_IM23XLMT_OFFSET
);
111 dev_dbg(&pdev
->dev
, "IM23XLMT %#018llx\n", bar_addr
);
113 bar_addr
= addr
->bar4_addr64
;
114 iowrite64(bar_addr
, mmio
+ GEN4_IM45XLMT_OFFSET
);
115 bar_addr
= ioread64(mmio
+ GEN4_IM45XLMT_OFFSET
);
116 dev_dbg(&pdev
->dev
, "IM45XLMT %#018llx\n", bar_addr
);
118 /* zero incoming translation addrs */
119 iowrite64(0, mmio
+ GEN4_IM23XBASE_OFFSET
);
120 iowrite64(0, mmio
+ GEN4_IM45XBASE_OFFSET
);
122 ndev
->peer_mmio
= ndev
->self_mmio
;
127 static int gen4_init_ntb(struct intel_ntb_dev
*ndev
)
132 ndev
->mw_count
= XEON_MW_COUNT
;
133 ndev
->spad_count
= GEN4_SPAD_COUNT
;
134 ndev
->db_count
= GEN4_DB_COUNT
;
135 ndev
->db_link_mask
= GEN4_DB_LINK_BIT
;
137 ndev
->self_reg
= &gen4_pri_reg
;
138 ndev
->xlat_reg
= &gen4_sec_xlat
;
139 ndev
->peer_reg
= &gen4_b2b_reg
;
141 if (ndev
->ntb
.topo
== NTB_TOPO_B2B_USD
)
142 rc
= gen4_setup_b2b_mw(ndev
, &xeon_b2b_dsd_addr
,
145 rc
= gen4_setup_b2b_mw(ndev
, &xeon_b2b_usd_addr
,
150 ndev
->db_valid_mask
= BIT_ULL(ndev
->db_count
) - 1;
152 ndev
->reg
->db_iowrite(ndev
->db_valid_mask
,
154 ndev
->self_reg
->db_mask
);
159 static enum ntb_topo
gen4_ppd_topo(struct intel_ntb_dev
*ndev
, u32 ppd
)
161 switch (ppd
& GEN4_PPD_TOPO_MASK
) {
162 case GEN4_PPD_TOPO_B2B_USD
:
163 return NTB_TOPO_B2B_USD
;
164 case GEN4_PPD_TOPO_B2B_DSD
:
165 return NTB_TOPO_B2B_DSD
;
168 return NTB_TOPO_NONE
;
171 static enum ntb_topo
spr_ppd_topo(struct intel_ntb_dev
*ndev
, u32 ppd
)
173 switch (ppd
& SPR_PPD_TOPO_MASK
) {
174 case SPR_PPD_TOPO_B2B_USD
:
175 return NTB_TOPO_B2B_USD
;
176 case SPR_PPD_TOPO_B2B_DSD
:
177 return NTB_TOPO_B2B_DSD
;
180 return NTB_TOPO_NONE
;
183 int gen4_init_dev(struct intel_ntb_dev
*ndev
)
185 struct pci_dev
*pdev
= ndev
->ntb
.pdev
;
190 ndev
->reg
= &gen4_reg
;
192 if (pdev_is_ICX(pdev
)) {
193 ndev
->hwerr_flags
|= NTB_HWERR_BAR_ALIGN
;
194 ndev
->hwerr_flags
|= NTB_HWERR_LTR_BAD
;
197 ppd1
= ioread32(ndev
->self_mmio
+ GEN4_PPD1_OFFSET
);
198 if (pdev_is_ICX(pdev
))
199 ndev
->ntb
.topo
= gen4_ppd_topo(ndev
, ppd1
);
200 else if (pdev_is_SPR(pdev
) || pdev_is_gen5(pdev
))
201 ndev
->ntb
.topo
= spr_ppd_topo(ndev
, ppd1
);
202 dev_dbg(&pdev
->dev
, "ppd %#x topo %s\n", ppd1
,
203 ntb_topo_string(ndev
->ntb
.topo
));
204 if (ndev
->ntb
.topo
== NTB_TOPO_NONE
)
207 rc
= gen4_init_ntb(ndev
);
211 /* init link setup */
212 lnkctl
= ioread16(ndev
->self_mmio
+ GEN4_LINK_CTRL_OFFSET
);
213 lnkctl
|= GEN4_LINK_CTRL_LINK_DISABLE
;
214 iowrite16(lnkctl
, ndev
->self_mmio
+ GEN4_LINK_CTRL_OFFSET
);
216 return gen4_init_isr(ndev
);
219 ssize_t
ndev_ntb4_debugfs_read(struct file
*filp
, char __user
*ubuf
,
220 size_t count
, loff_t
*offp
)
222 struct intel_ntb_dev
*ndev
;
227 union { u64 v64
; u32 v32
; u16 v16
; } u
;
229 ndev
= filp
->private_data
;
230 mmio
= ndev
->self_mmio
;
232 buf_size
= min(count
, 0x800ul
);
234 buf
= kmalloc(buf_size
, GFP_KERNEL
);
240 off
+= scnprintf(buf
+ off
, buf_size
- off
,
241 "NTB Device Information:\n");
243 off
+= scnprintf(buf
+ off
, buf_size
- off
,
244 "Connection Topology -\t%s\n",
245 ntb_topo_string(ndev
->ntb
.topo
));
247 off
+= scnprintf(buf
+ off
, buf_size
- off
,
248 "NTB CTL -\t\t%#06x\n", ndev
->ntb_ctl
);
249 off
+= scnprintf(buf
+ off
, buf_size
- off
,
250 "LNK STA (cached) -\t\t%#06x\n", ndev
->lnk_sta
);
252 if (!ndev
->reg
->link_is_up(ndev
))
253 off
+= scnprintf(buf
+ off
, buf_size
- off
,
254 "Link Status -\t\tDown\n");
256 off
+= scnprintf(buf
+ off
, buf_size
- off
,
257 "Link Status -\t\tUp\n");
258 off
+= scnprintf(buf
+ off
, buf_size
- off
,
259 "Link Speed -\t\tPCI-E Gen %u\n",
260 NTB_LNK_STA_SPEED(ndev
->lnk_sta
));
261 off
+= scnprintf(buf
+ off
, buf_size
- off
,
262 "Link Width -\t\tx%u\n",
263 NTB_LNK_STA_WIDTH(ndev
->lnk_sta
));
266 off
+= scnprintf(buf
+ off
, buf_size
- off
,
267 "Memory Window Count -\t%u\n", ndev
->mw_count
);
268 off
+= scnprintf(buf
+ off
, buf_size
- off
,
269 "Scratchpad Count -\t%u\n", ndev
->spad_count
);
270 off
+= scnprintf(buf
+ off
, buf_size
- off
,
271 "Doorbell Count -\t%u\n", ndev
->db_count
);
272 off
+= scnprintf(buf
+ off
, buf_size
- off
,
273 "Doorbell Vector Count -\t%u\n", ndev
->db_vec_count
);
274 off
+= scnprintf(buf
+ off
, buf_size
- off
,
275 "Doorbell Vector Shift -\t%u\n", ndev
->db_vec_shift
);
277 off
+= scnprintf(buf
+ off
, buf_size
- off
,
278 "Doorbell Valid Mask -\t%#llx\n", ndev
->db_valid_mask
);
279 off
+= scnprintf(buf
+ off
, buf_size
- off
,
280 "Doorbell Link Mask -\t%#llx\n", ndev
->db_link_mask
);
281 off
+= scnprintf(buf
+ off
, buf_size
- off
,
282 "Doorbell Mask Cached -\t%#llx\n", ndev
->db_mask
);
284 u
.v64
= ndev_db_read(ndev
, mmio
+ ndev
->self_reg
->db_mask
);
285 off
+= scnprintf(buf
+ off
, buf_size
- off
,
286 "Doorbell Mask -\t\t%#llx\n", u
.v64
);
288 off
+= scnprintf(buf
+ off
, buf_size
- off
,
289 "\nNTB Incoming XLAT:\n");
291 u
.v64
= ioread64(mmio
+ GEN4_IM23XBASE_OFFSET
);
292 off
+= scnprintf(buf
+ off
, buf_size
- off
,
293 "IM23XBASE -\t\t%#018llx\n", u
.v64
);
295 u
.v64
= ioread64(mmio
+ GEN4_IM45XBASE_OFFSET
);
296 off
+= scnprintf(buf
+ off
, buf_size
- off
,
297 "IM45XBASE -\t\t%#018llx\n", u
.v64
);
299 u
.v64
= ioread64(mmio
+ GEN4_IM23XLMT_OFFSET
);
300 off
+= scnprintf(buf
+ off
, buf_size
- off
,
301 "IM23XLMT -\t\t\t%#018llx\n", u
.v64
);
303 u
.v64
= ioread64(mmio
+ GEN4_IM45XLMT_OFFSET
);
304 off
+= scnprintf(buf
+ off
, buf_size
- off
,
305 "IM45XLMT -\t\t\t%#018llx\n", u
.v64
);
307 off
+= scnprintf(buf
+ off
, buf_size
- off
,
308 "\nNTB Statistics:\n");
310 off
+= scnprintf(buf
+ off
, buf_size
- off
,
311 "\nNTB Hardware Errors:\n");
313 if (!pci_read_config_word(ndev
->ntb
.pdev
,
314 GEN4_DEVSTS_OFFSET
, &u
.v16
))
315 off
+= scnprintf(buf
+ off
, buf_size
- off
,
316 "DEVSTS -\t\t%#06x\n", u
.v16
);
318 u
.v16
= ioread16(mmio
+ GEN4_LINK_STATUS_OFFSET
);
319 off
+= scnprintf(buf
+ off
, buf_size
- off
,
320 "LNKSTS -\t\t%#06x\n", u
.v16
);
322 if (!pci_read_config_dword(ndev
->ntb
.pdev
,
323 GEN4_UNCERRSTS_OFFSET
, &u
.v32
))
324 off
+= scnprintf(buf
+ off
, buf_size
- off
,
325 "UNCERRSTS -\t\t%#06x\n", u
.v32
);
327 if (!pci_read_config_dword(ndev
->ntb
.pdev
,
328 GEN4_CORERRSTS_OFFSET
, &u
.v32
))
329 off
+= scnprintf(buf
+ off
, buf_size
- off
,
330 "CORERRSTS -\t\t%#06x\n", u
.v32
);
332 ret
= simple_read_from_buffer(ubuf
, count
, offp
, buf
, off
);
337 static int intel_ntb4_mw_set_trans(struct ntb_dev
*ntb
, int pidx
, int idx
,
338 dma_addr_t addr
, resource_size_t size
)
340 struct intel_ntb_dev
*ndev
= ntb_ndev(ntb
);
341 unsigned long xlat_reg
, limit_reg
, idx_reg
;
342 unsigned short base_idx
, reg_val16
;
343 resource_size_t bar_size
, mw_size
;
345 u64 base
, limit
, reg_val
;
348 if (pidx
!= NTB_DEF_PEER_IDX
)
351 if (idx
>= ndev
->b2b_idx
&& !ndev
->b2b_off
)
354 bar
= ndev_mw_to_bar(ndev
, idx
);
358 bar_size
= pci_resource_len(ndev
->ntb
.pdev
, bar
);
360 if (idx
== ndev
->b2b_idx
)
361 mw_size
= bar_size
- ndev
->b2b_off
;
365 if (ndev
->hwerr_flags
& NTB_HWERR_BAR_ALIGN
) {
366 /* hardware requires that addr is aligned to bar size */
367 if (addr
& (bar_size
- 1))
370 if (addr
& (PAGE_SIZE
- 1))
374 /* make sure the range fits in the usable mw size */
378 mmio
= ndev
->self_mmio
;
379 xlat_reg
= ndev
->xlat_reg
->bar2_xlat
+ (idx
* 0x10);
380 limit_reg
= ndev
->xlat_reg
->bar2_limit
+ (idx
* 0x10);
381 base
= pci_resource_start(ndev
->ntb
.pdev
, bar
);
383 /* Set the limit if supported, if size is not mw_size */
384 if (limit_reg
&& size
!= mw_size
) {
386 base_idx
= __ilog2_u64(size
);
388 limit
= base
+ mw_size
;
389 base_idx
= __ilog2_u64(mw_size
);
393 /* set and verify setting the translation address */
394 iowrite64(addr
, mmio
+ xlat_reg
);
395 reg_val
= ioread64(mmio
+ xlat_reg
);
396 if (reg_val
!= addr
) {
397 iowrite64(0, mmio
+ xlat_reg
);
401 dev_dbg(&ntb
->pdev
->dev
, "BAR %d IMXBASE: %#Lx\n", bar
, reg_val
);
403 /* set and verify setting the limit */
404 iowrite64(limit
, mmio
+ limit_reg
);
405 reg_val
= ioread64(mmio
+ limit_reg
);
406 if (reg_val
!= limit
) {
407 iowrite64(base
, mmio
+ limit_reg
);
408 iowrite64(0, mmio
+ xlat_reg
);
412 dev_dbg(&ntb
->pdev
->dev
, "BAR %d IMXLMT: %#Lx\n", bar
, reg_val
);
414 if (ndev
->hwerr_flags
& NTB_HWERR_BAR_ALIGN
) {
415 idx_reg
= ndev
->xlat_reg
->bar2_idx
+ (idx
* 0x2);
416 iowrite16(base_idx
, mmio
+ idx_reg
);
417 reg_val16
= ioread16(mmio
+ idx_reg
);
418 if (reg_val16
!= base_idx
) {
419 iowrite64(base
, mmio
+ limit_reg
);
420 iowrite64(0, mmio
+ xlat_reg
);
421 iowrite16(0, mmio
+ idx_reg
);
424 dev_dbg(&ntb
->pdev
->dev
, "BAR %d IMBASEIDX: %#x\n", bar
, reg_val16
);
431 static int intel_ntb4_link_enable(struct ntb_dev
*ntb
,
432 enum ntb_speed max_speed
, enum ntb_width max_width
)
434 struct intel_ntb_dev
*ndev
;
438 ndev
= container_of(ntb
, struct intel_ntb_dev
, ntb
);
440 dev_dbg(&ntb
->pdev
->dev
,
441 "Enabling link with max_speed %d max_width %d\n",
442 max_speed
, max_width
);
444 if (max_speed
!= NTB_SPEED_AUTO
)
445 dev_dbg(&ntb
->pdev
->dev
,
446 "ignoring max_speed %d\n", max_speed
);
447 if (max_width
!= NTB_WIDTH_AUTO
)
448 dev_dbg(&ntb
->pdev
->dev
,
449 "ignoring max_width %d\n", max_width
);
451 if (!(ndev
->hwerr_flags
& NTB_HWERR_LTR_BAD
)) {
454 /* Setup active snoop LTR values */
455 ltr
= NTB_LTR_ACTIVE_REQMNT
| NTB_LTR_ACTIVE_VAL
| NTB_LTR_ACTIVE_LATSCALE
;
456 /* Setup active non-snoop values */
457 ltr
= (ltr
<< NTB_LTR_NS_SHIFT
) | ltr
;
458 iowrite32(ltr
, ndev
->self_mmio
+ GEN4_LTR_ACTIVE_OFFSET
);
460 /* Setup idle snoop LTR values */
461 ltr
= NTB_LTR_IDLE_VAL
| NTB_LTR_IDLE_LATSCALE
| NTB_LTR_IDLE_REQMNT
;
462 /* Setup idle non-snoop values */
463 ltr
= (ltr
<< NTB_LTR_NS_SHIFT
) | ltr
;
464 iowrite32(ltr
, ndev
->self_mmio
+ GEN4_LTR_IDLE_OFFSET
);
466 /* setup PCIe LTR to active */
467 iowrite8(NTB_LTR_SWSEL_ACTIVE
, ndev
->self_mmio
+ GEN4_LTR_SWSEL_OFFSET
);
470 ntb_ctl
= NTB_CTL_E2I_BAR23_SNOOP
| NTB_CTL_I2E_BAR23_SNOOP
;
471 ntb_ctl
|= NTB_CTL_E2I_BAR45_SNOOP
| NTB_CTL_I2E_BAR45_SNOOP
;
472 iowrite32(ntb_ctl
, ndev
->self_mmio
+ ndev
->reg
->ntb_ctl
);
474 lnkctl
= ioread16(ndev
->self_mmio
+ GEN4_LINK_CTRL_OFFSET
);
475 lnkctl
&= ~GEN4_LINK_CTRL_LINK_DISABLE
;
476 iowrite16(lnkctl
, ndev
->self_mmio
+ GEN4_LINK_CTRL_OFFSET
);
478 /* start link training in PPD0 */
479 ppd0
= ioread32(ndev
->self_mmio
+ GEN4_PPD0_OFFSET
);
480 ppd0
|= GEN4_PPD_LINKTRN
;
481 iowrite32(ppd0
, ndev
->self_mmio
+ GEN4_PPD0_OFFSET
);
483 /* make sure link training has started */
484 ppd0
= ioread32(ndev
->self_mmio
+ GEN4_PPD0_OFFSET
);
485 if (!(ppd0
& GEN4_PPD_LINKTRN
)) {
486 dev_warn(&ntb
->pdev
->dev
, "Link is not training\n");
495 static int intel_ntb4_link_disable(struct ntb_dev
*ntb
)
497 struct intel_ntb_dev
*ndev
;
501 ndev
= container_of(ntb
, struct intel_ntb_dev
, ntb
);
503 dev_dbg(&ntb
->pdev
->dev
, "Disabling link\n");
505 /* clear the snoop bits */
506 ntb_cntl
= ioread32(ndev
->self_mmio
+ ndev
->reg
->ntb_ctl
);
507 ntb_cntl
&= ~(NTB_CTL_E2I_BAR23_SNOOP
| NTB_CTL_I2E_BAR23_SNOOP
);
508 ntb_cntl
&= ~(NTB_CTL_E2I_BAR45_SNOOP
| NTB_CTL_I2E_BAR45_SNOOP
);
509 iowrite32(ntb_cntl
, ndev
->self_mmio
+ ndev
->reg
->ntb_ctl
);
511 lnkctl
= ioread16(ndev
->self_mmio
+ GEN4_LINK_CTRL_OFFSET
);
512 lnkctl
|= GEN4_LINK_CTRL_LINK_DISABLE
;
513 iowrite16(lnkctl
, ndev
->self_mmio
+ GEN4_LINK_CTRL_OFFSET
);
515 /* set LTR to idle */
516 if (!(ndev
->hwerr_flags
& NTB_HWERR_LTR_BAD
))
517 iowrite8(NTB_LTR_SWSEL_IDLE
, ndev
->self_mmio
+ GEN4_LTR_SWSEL_OFFSET
);
524 static int intel_ntb4_mw_get_align(struct ntb_dev
*ntb
, int pidx
, int idx
,
525 resource_size_t
*addr_align
,
526 resource_size_t
*size_align
,
527 resource_size_t
*size_max
)
529 struct intel_ntb_dev
*ndev
= ntb_ndev(ntb
);
530 resource_size_t bar_size
, mw_size
;
533 if (pidx
!= NTB_DEF_PEER_IDX
)
536 if (idx
>= ndev
->b2b_idx
&& !ndev
->b2b_off
)
539 bar
= ndev_mw_to_bar(ndev
, idx
);
543 bar_size
= pci_resource_len(ndev
->ntb
.pdev
, bar
);
545 if (idx
== ndev
->b2b_idx
)
546 mw_size
= bar_size
- ndev
->b2b_off
;
551 if (ndev
->hwerr_flags
& NTB_HWERR_BAR_ALIGN
)
552 *addr_align
= pci_resource_len(ndev
->ntb
.pdev
, bar
);
554 *addr_align
= PAGE_SIZE
;
566 const struct ntb_dev_ops intel_ntb4_ops
= {
567 .mw_count
= intel_ntb_mw_count
,
568 .mw_get_align
= intel_ntb4_mw_get_align
,
569 .mw_set_trans
= intel_ntb4_mw_set_trans
,
570 .peer_mw_count
= intel_ntb_peer_mw_count
,
571 .peer_mw_get_addr
= intel_ntb_peer_mw_get_addr
,
572 .link_is_up
= intel_ntb_link_is_up
,
573 .link_enable
= intel_ntb4_link_enable
,
574 .link_disable
= intel_ntb4_link_disable
,
575 .db_valid_mask
= intel_ntb_db_valid_mask
,
576 .db_vector_count
= intel_ntb_db_vector_count
,
577 .db_vector_mask
= intel_ntb_db_vector_mask
,
578 .db_read
= intel_ntb3_db_read
,
579 .db_clear
= intel_ntb3_db_clear
,
580 .db_set_mask
= intel_ntb_db_set_mask
,
581 .db_clear_mask
= intel_ntb_db_clear_mask
,
582 .peer_db_addr
= intel_ntb3_peer_db_addr
,
583 .peer_db_set
= intel_ntb3_peer_db_set
,
584 .spad_is_unsafe
= intel_ntb_spad_is_unsafe
,
585 .spad_count
= intel_ntb_spad_count
,
586 .spad_read
= intel_ntb_spad_read
,
587 .spad_write
= intel_ntb_spad_write
,
588 .peer_spad_addr
= intel_ntb_peer_spad_addr
,
589 .peer_spad_read
= intel_ntb_peer_spad_read
,
590 .peer_spad_write
= intel_ntb_peer_spad_write
,