2 * This file is provided under a dual BSD/GPLv2 license. When using or
3 * redistributing this file, you may do so under either license.
7 * Copyright(c) 2017 Intel Corporation. All rights reserved.
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of version 2 of the GNU General Public License as
11 * published by the Free Software Foundation.
15 * Copyright(c) 2017 Intel Corporation. All rights reserved.
17 * Redistribution and use in source and binary forms, with or without
18 * modification, are permitted provided that the following conditions
21 * * Redistributions of source code must retain the above copyright
22 * notice, this list of conditions and the following disclaimer.
23 * * Redistributions in binary form must reproduce the above copy
24 * notice, this list of conditions and the following disclaimer in
25 * the documentation and/or other materials provided with the
27 * * Neither the name of Intel Corporation nor the names of its
28 * contributors may be used to endorse or promote products derived
29 * from this software without specific prior written permission.
31 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
32 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
33 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
34 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
35 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
36 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
37 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
38 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
39 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
40 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
41 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
43 * Intel PCIe GEN3 NTB Linux driver
47 #include <linux/debugfs.h>
48 #include <linux/delay.h>
49 #include <linux/init.h>
50 #include <linux/interrupt.h>
51 #include <linux/module.h>
52 #include <linux/pci.h>
53 #include <linux/random.h>
54 #include <linux/slab.h>
55 #include <linux/ntb.h>
57 #include "ntb_hw_intel.h"
58 #include "ntb_hw_gen1.h"
59 #include "ntb_hw_gen3.h"
61 static int gen3_poll_link(struct intel_ntb_dev
*ndev
);
63 static const struct intel_ntb_reg gen3_reg
= {
64 .poll_link
= gen3_poll_link
,
65 .link_is_up
= xeon_link_is_up
,
66 .db_ioread
= gen3_db_ioread
,
67 .db_iowrite
= gen3_db_iowrite
,
68 .db_size
= sizeof(u32
),
69 .ntb_ctl
= GEN3_NTBCNTL_OFFSET
,
73 static const struct intel_ntb_alt_reg gen3_pri_reg
= {
74 .db_bell
= GEN3_EM_DOORBELL_OFFSET
,
75 .db_clear
= GEN3_IM_INT_STATUS_OFFSET
,
76 .db_mask
= GEN3_IM_INT_DISABLE_OFFSET
,
77 .spad
= GEN3_IM_SPAD_OFFSET
,
80 static const struct intel_ntb_alt_reg gen3_b2b_reg
= {
81 .db_bell
= GEN3_IM_DOORBELL_OFFSET
,
82 .db_clear
= GEN3_EM_INT_STATUS_OFFSET
,
83 .db_mask
= GEN3_EM_INT_DISABLE_OFFSET
,
84 .spad
= GEN3_B2B_SPAD_OFFSET
,
87 static const struct intel_ntb_xlat_reg gen3_sec_xlat
= {
88 /* .bar0_base = GEN3_EMBAR0_OFFSET, */
89 .bar2_limit
= GEN3_IMBAR1XLMT_OFFSET
,
90 .bar2_xlat
= GEN3_IMBAR1XBASE_OFFSET
,
93 static int gen3_poll_link(struct intel_ntb_dev
*ndev
)
98 ndev
->reg
->db_iowrite(ndev
->db_link_mask
,
100 ndev
->self_reg
->db_clear
);
102 rc
= pci_read_config_word(ndev
->ntb
.pdev
,
103 GEN3_LINK_STATUS_OFFSET
, ®_val
);
107 if (reg_val
== ndev
->lnk_sta
)
110 ndev
->lnk_sta
= reg_val
;
115 static int gen3_init_isr(struct intel_ntb_dev
*ndev
)
120 * The MSIX vectors and the interrupt status bits are not lined up
121 * on Skylake. By default the link status bit is bit 32, however it
122 * is by default MSIX vector0. We need to fixup to line them up.
123 * The vectors at reset is 1-32,0. We need to reprogram to 0-32.
126 for (i
= 0; i
< GEN3_DB_MSIX_VECTOR_COUNT
; i
++)
127 iowrite8(i
, ndev
->self_mmio
+ GEN3_INTVEC_OFFSET
+ i
);
129 /* move link status down one as workaround */
130 if (ndev
->hwerr_flags
& NTB_HWERR_MSIX_VECTOR32_BAD
) {
131 iowrite8(GEN3_DB_MSIX_VECTOR_COUNT
- 2,
132 ndev
->self_mmio
+ GEN3_INTVEC_OFFSET
+
133 (GEN3_DB_MSIX_VECTOR_COUNT
- 1));
136 return ndev_init_isr(ndev
, GEN3_DB_MSIX_VECTOR_COUNT
,
137 GEN3_DB_MSIX_VECTOR_COUNT
,
138 GEN3_DB_MSIX_VECTOR_SHIFT
,
139 GEN3_DB_TOTAL_SHIFT
);
142 static int gen3_setup_b2b_mw(struct intel_ntb_dev
*ndev
,
143 const struct intel_b2b_addr
*addr
,
144 const struct intel_b2b_addr
*peer_addr
)
146 struct pci_dev
*pdev
;
148 phys_addr_t bar_addr
;
150 pdev
= ndev
->ntb
.pdev
;
151 mmio
= ndev
->self_mmio
;
153 /* setup incoming bar limits == base addrs (zero length windows) */
154 bar_addr
= addr
->bar2_addr64
;
155 iowrite64(bar_addr
, mmio
+ GEN3_IMBAR1XLMT_OFFSET
);
156 bar_addr
= ioread64(mmio
+ GEN3_IMBAR1XLMT_OFFSET
);
157 dev_dbg(&pdev
->dev
, "IMBAR1XLMT %#018llx\n", bar_addr
);
159 bar_addr
= addr
->bar4_addr64
;
160 iowrite64(bar_addr
, mmio
+ GEN3_IMBAR2XLMT_OFFSET
);
161 bar_addr
= ioread64(mmio
+ GEN3_IMBAR2XLMT_OFFSET
);
162 dev_dbg(&pdev
->dev
, "IMBAR2XLMT %#018llx\n", bar_addr
);
164 /* zero incoming translation addrs */
165 iowrite64(0, mmio
+ GEN3_IMBAR1XBASE_OFFSET
);
166 iowrite64(0, mmio
+ GEN3_IMBAR2XBASE_OFFSET
);
168 ndev
->peer_mmio
= ndev
->self_mmio
;
173 static int gen3_init_ntb(struct intel_ntb_dev
*ndev
)
178 ndev
->mw_count
= XEON_MW_COUNT
;
179 ndev
->spad_count
= GEN3_SPAD_COUNT
;
180 ndev
->db_count
= GEN3_DB_COUNT
;
181 ndev
->db_link_mask
= GEN3_DB_LINK_BIT
;
183 /* DB fixup for using 31 right now */
184 if (ndev
->hwerr_flags
& NTB_HWERR_MSIX_VECTOR32_BAD
)
185 ndev
->db_link_mask
|= BIT_ULL(31);
187 switch (ndev
->ntb
.topo
) {
188 case NTB_TOPO_B2B_USD
:
189 case NTB_TOPO_B2B_DSD
:
190 ndev
->self_reg
= &gen3_pri_reg
;
191 ndev
->peer_reg
= &gen3_b2b_reg
;
192 ndev
->xlat_reg
= &gen3_sec_xlat
;
194 if (ndev
->ntb
.topo
== NTB_TOPO_B2B_USD
) {
195 rc
= gen3_setup_b2b_mw(ndev
,
199 rc
= gen3_setup_b2b_mw(ndev
,
207 /* Enable Bus Master and Memory Space on the secondary side */
208 iowrite16(PCI_COMMAND_MEMORY
| PCI_COMMAND_MASTER
,
209 ndev
->self_mmio
+ GEN3_SPCICMD_OFFSET
);
217 ndev
->db_valid_mask
= BIT_ULL(ndev
->db_count
) - 1;
219 ndev
->reg
->db_iowrite(ndev
->db_valid_mask
,
221 ndev
->self_reg
->db_mask
);
226 int gen3_init_dev(struct intel_ntb_dev
*ndev
)
228 struct pci_dev
*pdev
;
232 pdev
= ndev
->ntb
.pdev
;
234 ndev
->reg
= &gen3_reg
;
236 rc
= pci_read_config_byte(pdev
, XEON_PPD_OFFSET
, &ppd
);
240 ndev
->ntb
.topo
= xeon_ppd_topo(ndev
, ppd
);
241 dev_dbg(&pdev
->dev
, "ppd %#x topo %s\n", ppd
,
242 ntb_topo_string(ndev
->ntb
.topo
));
243 if (ndev
->ntb
.topo
== NTB_TOPO_NONE
)
246 ndev
->hwerr_flags
|= NTB_HWERR_MSIX_VECTOR32_BAD
;
248 rc
= gen3_init_ntb(ndev
);
252 return gen3_init_isr(ndev
);
255 ssize_t
ndev_ntb3_debugfs_read(struct file
*filp
, char __user
*ubuf
,
256 size_t count
, loff_t
*offp
)
258 struct intel_ntb_dev
*ndev
;
263 union { u64 v64
; u32 v32
; u16 v16
; } u
;
265 ndev
= filp
->private_data
;
266 mmio
= ndev
->self_mmio
;
268 buf_size
= min(count
, 0x800ul
);
270 buf
= kmalloc(buf_size
, GFP_KERNEL
);
276 off
+= scnprintf(buf
+ off
, buf_size
- off
,
277 "NTB Device Information:\n");
279 off
+= scnprintf(buf
+ off
, buf_size
- off
,
280 "Connection Topology -\t%s\n",
281 ntb_topo_string(ndev
->ntb
.topo
));
283 off
+= scnprintf(buf
+ off
, buf_size
- off
,
284 "NTB CTL -\t\t%#06x\n", ndev
->ntb_ctl
);
285 off
+= scnprintf(buf
+ off
, buf_size
- off
,
286 "LNK STA -\t\t%#06x\n", ndev
->lnk_sta
);
288 if (!ndev
->reg
->link_is_up(ndev
))
289 off
+= scnprintf(buf
+ off
, buf_size
- off
,
290 "Link Status -\t\tDown\n");
292 off
+= scnprintf(buf
+ off
, buf_size
- off
,
293 "Link Status -\t\tUp\n");
294 off
+= scnprintf(buf
+ off
, buf_size
- off
,
295 "Link Speed -\t\tPCI-E Gen %u\n",
296 NTB_LNK_STA_SPEED(ndev
->lnk_sta
));
297 off
+= scnprintf(buf
+ off
, buf_size
- off
,
298 "Link Width -\t\tx%u\n",
299 NTB_LNK_STA_WIDTH(ndev
->lnk_sta
));
302 off
+= scnprintf(buf
+ off
, buf_size
- off
,
303 "Memory Window Count -\t%u\n", ndev
->mw_count
);
304 off
+= scnprintf(buf
+ off
, buf_size
- off
,
305 "Scratchpad Count -\t%u\n", ndev
->spad_count
);
306 off
+= scnprintf(buf
+ off
, buf_size
- off
,
307 "Doorbell Count -\t%u\n", ndev
->db_count
);
308 off
+= scnprintf(buf
+ off
, buf_size
- off
,
309 "Doorbell Vector Count -\t%u\n", ndev
->db_vec_count
);
310 off
+= scnprintf(buf
+ off
, buf_size
- off
,
311 "Doorbell Vector Shift -\t%u\n", ndev
->db_vec_shift
);
313 off
+= scnprintf(buf
+ off
, buf_size
- off
,
314 "Doorbell Valid Mask -\t%#llx\n", ndev
->db_valid_mask
);
315 off
+= scnprintf(buf
+ off
, buf_size
- off
,
316 "Doorbell Link Mask -\t%#llx\n", ndev
->db_link_mask
);
317 off
+= scnprintf(buf
+ off
, buf_size
- off
,
318 "Doorbell Mask Cached -\t%#llx\n", ndev
->db_mask
);
320 u
.v64
= ndev_db_read(ndev
, mmio
+ ndev
->self_reg
->db_mask
);
321 off
+= scnprintf(buf
+ off
, buf_size
- off
,
322 "Doorbell Mask -\t\t%#llx\n", u
.v64
);
324 u
.v64
= ndev_db_read(ndev
, mmio
+ ndev
->self_reg
->db_bell
);
325 off
+= scnprintf(buf
+ off
, buf_size
- off
,
326 "Doorbell Bell -\t\t%#llx\n", u
.v64
);
328 off
+= scnprintf(buf
+ off
, buf_size
- off
,
329 "\nNTB Incoming XLAT:\n");
331 u
.v64
= ioread64(mmio
+ GEN3_IMBAR1XBASE_OFFSET
);
332 off
+= scnprintf(buf
+ off
, buf_size
- off
,
333 "IMBAR1XBASE -\t\t%#018llx\n", u
.v64
);
335 u
.v64
= ioread64(mmio
+ GEN3_IMBAR2XBASE_OFFSET
);
336 off
+= scnprintf(buf
+ off
, buf_size
- off
,
337 "IMBAR2XBASE -\t\t%#018llx\n", u
.v64
);
339 u
.v64
= ioread64(mmio
+ GEN3_IMBAR1XLMT_OFFSET
);
340 off
+= scnprintf(buf
+ off
, buf_size
- off
,
341 "IMBAR1XLMT -\t\t\t%#018llx\n", u
.v64
);
343 u
.v64
= ioread64(mmio
+ GEN3_IMBAR2XLMT_OFFSET
);
344 off
+= scnprintf(buf
+ off
, buf_size
- off
,
345 "IMBAR2XLMT -\t\t\t%#018llx\n", u
.v64
);
347 if (ntb_topo_is_b2b(ndev
->ntb
.topo
)) {
348 off
+= scnprintf(buf
+ off
, buf_size
- off
,
349 "\nNTB Outgoing B2B XLAT:\n");
351 u
.v64
= ioread64(mmio
+ GEN3_EMBAR1XBASE_OFFSET
);
352 off
+= scnprintf(buf
+ off
, buf_size
- off
,
353 "EMBAR1XBASE -\t\t%#018llx\n", u
.v64
);
355 u
.v64
= ioread64(mmio
+ GEN3_EMBAR2XBASE_OFFSET
);
356 off
+= scnprintf(buf
+ off
, buf_size
- off
,
357 "EMBAR2XBASE -\t\t%#018llx\n", u
.v64
);
359 u
.v64
= ioread64(mmio
+ GEN3_EMBAR1XLMT_OFFSET
);
360 off
+= scnprintf(buf
+ off
, buf_size
- off
,
361 "EMBAR1XLMT -\t\t%#018llx\n", u
.v64
);
363 u
.v64
= ioread64(mmio
+ GEN3_EMBAR2XLMT_OFFSET
);
364 off
+= scnprintf(buf
+ off
, buf_size
- off
,
365 "EMBAR2XLMT -\t\t%#018llx\n", u
.v64
);
367 off
+= scnprintf(buf
+ off
, buf_size
- off
,
368 "\nNTB Secondary BAR:\n");
370 u
.v64
= ioread64(mmio
+ GEN3_EMBAR0_OFFSET
);
371 off
+= scnprintf(buf
+ off
, buf_size
- off
,
372 "EMBAR0 -\t\t%#018llx\n", u
.v64
);
374 u
.v64
= ioread64(mmio
+ GEN3_EMBAR1_OFFSET
);
375 off
+= scnprintf(buf
+ off
, buf_size
- off
,
376 "EMBAR1 -\t\t%#018llx\n", u
.v64
);
378 u
.v64
= ioread64(mmio
+ GEN3_EMBAR2_OFFSET
);
379 off
+= scnprintf(buf
+ off
, buf_size
- off
,
380 "EMBAR2 -\t\t%#018llx\n", u
.v64
);
383 off
+= scnprintf(buf
+ off
, buf_size
- off
,
384 "\nNTB Statistics:\n");
386 u
.v16
= ioread16(mmio
+ GEN3_USMEMMISS_OFFSET
);
387 off
+= scnprintf(buf
+ off
, buf_size
- off
,
388 "Upstream Memory Miss -\t%u\n", u
.v16
);
390 off
+= scnprintf(buf
+ off
, buf_size
- off
,
391 "\nNTB Hardware Errors:\n");
393 if (!pci_read_config_word(ndev
->ntb
.pdev
,
394 GEN3_DEVSTS_OFFSET
, &u
.v16
))
395 off
+= scnprintf(buf
+ off
, buf_size
- off
,
396 "DEVSTS -\t\t%#06x\n", u
.v16
);
398 if (!pci_read_config_word(ndev
->ntb
.pdev
,
399 GEN3_LINK_STATUS_OFFSET
, &u
.v16
))
400 off
+= scnprintf(buf
+ off
, buf_size
- off
,
401 "LNKSTS -\t\t%#06x\n", u
.v16
);
403 if (!pci_read_config_dword(ndev
->ntb
.pdev
,
404 GEN3_UNCERRSTS_OFFSET
, &u
.v32
))
405 off
+= scnprintf(buf
+ off
, buf_size
- off
,
406 "UNCERRSTS -\t\t%#06x\n", u
.v32
);
408 if (!pci_read_config_dword(ndev
->ntb
.pdev
,
409 GEN3_CORERRSTS_OFFSET
, &u
.v32
))
410 off
+= scnprintf(buf
+ off
, buf_size
- off
,
411 "CORERRSTS -\t\t%#06x\n", u
.v32
);
413 ret
= simple_read_from_buffer(ubuf
, count
, offp
, buf
, off
);
418 int intel_ntb3_link_enable(struct ntb_dev
*ntb
, enum ntb_speed max_speed
,
419 enum ntb_width max_width
)
421 struct intel_ntb_dev
*ndev
;
424 ndev
= container_of(ntb
, struct intel_ntb_dev
, ntb
);
426 dev_dbg(&ntb
->pdev
->dev
,
427 "Enabling link with max_speed %d max_width %d\n",
428 max_speed
, max_width
);
430 if (max_speed
!= NTB_SPEED_AUTO
)
431 dev_dbg(&ntb
->pdev
->dev
, "ignoring max_speed %d\n", max_speed
);
432 if (max_width
!= NTB_WIDTH_AUTO
)
433 dev_dbg(&ntb
->pdev
->dev
, "ignoring max_width %d\n", max_width
);
435 ntb_ctl
= ioread32(ndev
->self_mmio
+ ndev
->reg
->ntb_ctl
);
436 ntb_ctl
&= ~(NTB_CTL_DISABLE
| NTB_CTL_CFG_LOCK
);
437 ntb_ctl
|= NTB_CTL_P2S_BAR2_SNOOP
| NTB_CTL_S2P_BAR2_SNOOP
;
438 ntb_ctl
|= NTB_CTL_P2S_BAR4_SNOOP
| NTB_CTL_S2P_BAR4_SNOOP
;
439 iowrite32(ntb_ctl
, ndev
->self_mmio
+ ndev
->reg
->ntb_ctl
);
443 static int intel_ntb3_mw_set_trans(struct ntb_dev
*ntb
, int pidx
, int idx
,
444 dma_addr_t addr
, resource_size_t size
)
446 struct intel_ntb_dev
*ndev
= ntb_ndev(ntb
);
447 unsigned long xlat_reg
, limit_reg
;
448 resource_size_t bar_size
, mw_size
;
450 u64 base
, limit
, reg_val
;
453 if (pidx
!= NTB_DEF_PEER_IDX
)
456 if (idx
>= ndev
->b2b_idx
&& !ndev
->b2b_off
)
459 bar
= ndev_mw_to_bar(ndev
, idx
);
463 bar_size
= pci_resource_len(ndev
->ntb
.pdev
, bar
);
465 if (idx
== ndev
->b2b_idx
)
466 mw_size
= bar_size
- ndev
->b2b_off
;
470 /* hardware requires that addr is aligned to bar size */
471 if (addr
& (bar_size
- 1))
474 /* make sure the range fits in the usable mw size */
478 mmio
= ndev
->self_mmio
;
479 xlat_reg
= ndev
->xlat_reg
->bar2_xlat
+ (idx
* 0x10);
480 limit_reg
= ndev
->xlat_reg
->bar2_limit
+ (idx
* 0x10);
481 base
= pci_resource_start(ndev
->ntb
.pdev
, bar
);
483 /* Set the limit if supported, if size is not mw_size */
484 if (limit_reg
&& size
!= mw_size
)
487 limit
= base
+ mw_size
;
489 /* set and verify setting the translation address */
490 iowrite64(addr
, mmio
+ xlat_reg
);
491 reg_val
= ioread64(mmio
+ xlat_reg
);
492 if (reg_val
!= addr
) {
493 iowrite64(0, mmio
+ xlat_reg
);
497 dev_dbg(&ntb
->pdev
->dev
, "BAR %d IMBARXBASE: %#Lx\n", bar
, reg_val
);
499 /* set and verify setting the limit */
500 iowrite64(limit
, mmio
+ limit_reg
);
501 reg_val
= ioread64(mmio
+ limit_reg
);
502 if (reg_val
!= limit
) {
503 iowrite64(base
, mmio
+ limit_reg
);
504 iowrite64(0, mmio
+ xlat_reg
);
508 dev_dbg(&ntb
->pdev
->dev
, "BAR %d IMBARXLMT: %#Lx\n", bar
, reg_val
);
511 limit_reg
= ndev
->xlat_reg
->bar2_limit
+ (idx
* 0x10) + 0x4000;
512 base
= ioread64(mmio
+ GEN3_EMBAR1_OFFSET
+ (8 * idx
));
515 if (limit_reg
&& size
!= mw_size
)
518 limit
= base
+ mw_size
;
520 /* set and verify setting the limit */
521 iowrite64(limit
, mmio
+ limit_reg
);
522 reg_val
= ioread64(mmio
+ limit_reg
);
523 if (reg_val
!= limit
) {
524 iowrite64(base
, mmio
+ limit_reg
);
525 iowrite64(0, mmio
+ xlat_reg
);
529 dev_dbg(&ntb
->pdev
->dev
, "BAR %d EMBARXLMT: %#Lx\n", bar
, reg_val
);
534 int intel_ntb3_peer_db_addr(struct ntb_dev
*ntb
, phys_addr_t
*db_addr
,
535 resource_size_t
*db_size
,
536 u64
*db_data
, int db_bit
)
538 phys_addr_t db_addr_base
;
539 struct intel_ntb_dev
*ndev
= ntb_ndev(ntb
);
541 if (unlikely(db_bit
>= BITS_PER_LONG_LONG
))
544 if (unlikely(BIT_ULL(db_bit
) & ~ntb_ndev(ntb
)->db_valid_mask
))
547 ndev_db_addr(ndev
, &db_addr_base
, db_size
, ndev
->peer_addr
,
548 ndev
->peer_reg
->db_bell
);
551 *db_addr
= db_addr_base
+ (db_bit
* 4);
552 dev_dbg(&ndev
->ntb
.pdev
->dev
, "Peer db addr %llx db bit %d\n",
558 dev_dbg(&ndev
->ntb
.pdev
->dev
, "Peer db data %llx db bit %d\n",
565 int intel_ntb3_peer_db_set(struct ntb_dev
*ntb
, u64 db_bits
)
567 struct intel_ntb_dev
*ndev
= ntb_ndev(ntb
);
570 if (db_bits
& ~ndev
->db_valid_mask
)
574 bit
= __ffs(db_bits
);
575 iowrite32(1, ndev
->peer_mmio
+
576 ndev
->peer_reg
->db_bell
+ (bit
* 4));
577 db_bits
&= db_bits
- 1;
583 u64
intel_ntb3_db_read(struct ntb_dev
*ntb
)
585 struct intel_ntb_dev
*ndev
= ntb_ndev(ntb
);
587 return ndev_db_read(ndev
,
589 ndev
->self_reg
->db_clear
);
592 int intel_ntb3_db_clear(struct ntb_dev
*ntb
, u64 db_bits
)
594 struct intel_ntb_dev
*ndev
= ntb_ndev(ntb
);
596 return ndev_db_write(ndev
, db_bits
,
598 ndev
->self_reg
->db_clear
);
601 const struct ntb_dev_ops intel_ntb3_ops
= {
602 .mw_count
= intel_ntb_mw_count
,
603 .mw_get_align
= intel_ntb_mw_get_align
,
604 .mw_set_trans
= intel_ntb3_mw_set_trans
,
605 .peer_mw_count
= intel_ntb_peer_mw_count
,
606 .peer_mw_get_addr
= intel_ntb_peer_mw_get_addr
,
607 .link_is_up
= intel_ntb_link_is_up
,
608 .link_enable
= intel_ntb3_link_enable
,
609 .link_disable
= intel_ntb_link_disable
,
610 .db_valid_mask
= intel_ntb_db_valid_mask
,
611 .db_vector_count
= intel_ntb_db_vector_count
,
612 .db_vector_mask
= intel_ntb_db_vector_mask
,
613 .db_read
= intel_ntb3_db_read
,
614 .db_clear
= intel_ntb3_db_clear
,
615 .db_set_mask
= intel_ntb_db_set_mask
,
616 .db_clear_mask
= intel_ntb_db_clear_mask
,
617 .peer_db_addr
= intel_ntb3_peer_db_addr
,
618 .peer_db_set
= intel_ntb3_peer_db_set
,
619 .spad_is_unsafe
= intel_ntb_spad_is_unsafe
,
620 .spad_count
= intel_ntb_spad_count
,
621 .spad_read
= intel_ntb_spad_read
,
622 .spad_write
= intel_ntb_spad_write
,
623 .peer_spad_addr
= intel_ntb_peer_spad_addr
,
624 .peer_spad_read
= intel_ntb_peer_spad_read
,
625 .peer_spad_write
= intel_ntb_peer_spad_write
,