1 /**********************************************************************
4 * Contact: support@cavium.com
5 * Please include "LiquidIO" in the subject.
7 * Copyright (c) 2003-2016 Cavium, Inc.
9 * This file is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License, Version 2, as
11 * published by the Free Software Foundation.
13 * This file is distributed in the hope that it will be useful, but
14 * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
15 * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
16 * NONINFRINGEMENT. See the GNU General Public License for more details.
17 ***********************************************************************/
18 #include <linux/pci.h>
19 #include <linux/netdevice.h>
20 #include "liquidio_common.h"
21 #include "octeon_droq.h"
22 #include "octeon_iq.h"
23 #include "response_manager.h"
24 #include "octeon_device.h"
25 #include "octeon_main.h"
26 #include "cn66xx_regs.h"
27 #include "cn66xx_device.h"
29 int lio_cn6xxx_soft_reset(struct octeon_device
*oct
)
31 octeon_write_csr64(oct
, CN6XXX_WIN_WR_MASK_REG
, 0xFF);
33 dev_dbg(&oct
->pci_dev
->dev
, "BIST enabled for soft reset\n");
35 lio_pci_writeq(oct
, 1, CN6XXX_CIU_SOFT_BIST
);
36 octeon_write_csr64(oct
, CN6XXX_SLI_SCRATCH1
, 0x1234ULL
);
38 lio_pci_readq(oct
, CN6XXX_CIU_SOFT_RST
);
39 lio_pci_writeq(oct
, 1, CN6XXX_CIU_SOFT_RST
);
41 /* Wait for 10ms as Octeon resets. */
44 if (octeon_read_csr64(oct
, CN6XXX_SLI_SCRATCH1
)) {
45 dev_err(&oct
->pci_dev
->dev
, "Soft reset failed\n");
49 dev_dbg(&oct
->pci_dev
->dev
, "Reset completed\n");
50 octeon_write_csr64(oct
, CN6XXX_WIN_WR_MASK_REG
, 0xFF);
55 void lio_cn6xxx_enable_error_reporting(struct octeon_device
*oct
)
59 pci_read_config_dword(oct
->pci_dev
, CN6XXX_PCIE_DEVCTL
, &val
);
60 if (val
& 0x000c0000) {
61 dev_err(&oct
->pci_dev
->dev
, "PCI-E Link error detected: 0x%08x\n",
65 val
|= 0xf; /* Enable Link error reporting */
67 dev_dbg(&oct
->pci_dev
->dev
, "Enabling PCI-E error reporting..\n");
68 pci_write_config_dword(oct
->pci_dev
, CN6XXX_PCIE_DEVCTL
, val
);
71 void lio_cn6xxx_setup_pcie_mps(struct octeon_device
*oct
,
72 enum octeon_pcie_mps mps
)
77 /* Read config register for MPS */
78 pci_read_config_dword(oct
->pci_dev
, CN6XXX_PCIE_DEVCTL
, &val
);
80 if (mps
== PCIE_MPS_DEFAULT
) {
81 mps
= ((val
& (0x7 << 5)) >> 5);
83 val
&= ~(0x7 << 5); /* Turn off any MPS bits */
84 val
|= (mps
<< 5); /* Set MPS */
85 pci_write_config_dword(oct
->pci_dev
, CN6XXX_PCIE_DEVCTL
, val
);
88 /* Set MPS in DPI_SLI_PRT0_CFG to the same value. */
89 r64
= lio_pci_readq(oct
, CN6XXX_DPI_SLI_PRTX_CFG(oct
->pcie_port
));
91 lio_pci_writeq(oct
, r64
, CN6XXX_DPI_SLI_PRTX_CFG(oct
->pcie_port
));
94 void lio_cn6xxx_setup_pcie_mrrs(struct octeon_device
*oct
,
95 enum octeon_pcie_mrrs mrrs
)
100 /* Read config register for MRRS */
101 pci_read_config_dword(oct
->pci_dev
, CN6XXX_PCIE_DEVCTL
, &val
);
103 if (mrrs
== PCIE_MRRS_DEFAULT
) {
104 mrrs
= ((val
& (0x7 << 12)) >> 12);
106 val
&= ~(0x7 << 12); /* Turn off any MRRS bits */
107 val
|= (mrrs
<< 12); /* Set MRRS */
108 pci_write_config_dword(oct
->pci_dev
, CN6XXX_PCIE_DEVCTL
, val
);
111 /* Set MRRS in SLI_S2M_PORT0_CTL to the same value. */
112 r64
= octeon_read_csr64(oct
, CN6XXX_SLI_S2M_PORTX_CTL(oct
->pcie_port
));
114 octeon_write_csr64(oct
, CN6XXX_SLI_S2M_PORTX_CTL(oct
->pcie_port
), r64
);
116 /* Set MRRS in DPI_SLI_PRT0_CFG to the same value. */
117 r64
= lio_pci_readq(oct
, CN6XXX_DPI_SLI_PRTX_CFG(oct
->pcie_port
));
119 lio_pci_writeq(oct
, r64
, CN6XXX_DPI_SLI_PRTX_CFG(oct
->pcie_port
));
122 u32
lio_cn6xxx_coprocessor_clock(struct octeon_device
*oct
)
124 /* Bits 29:24 of MIO_RST_BOOT holds the ref. clock multiplier
127 return ((lio_pci_readq(oct
, CN6XXX_MIO_RST_BOOT
) >> 24) & 0x3f) * 50;
130 u32
lio_cn6xxx_get_oq_ticks(struct octeon_device
*oct
,
133 /* This gives the SLI clock per microsec */
134 u32 oqticks_per_us
= lio_cn6xxx_coprocessor_clock(oct
);
136 /* core clock per us / oq ticks will be fractional. TO avoid that
137 * we use the method below.
140 /* This gives the clock cycles per millisecond */
141 oqticks_per_us
*= 1000;
143 /* This gives the oq ticks (1024 core clock cycles) per millisecond */
144 oqticks_per_us
/= 1024;
146 /* time_intr is in microseconds. The next 2 steps gives the oq ticks
147 * corressponding to time_intr.
149 oqticks_per_us
*= time_intr_in_us
;
150 oqticks_per_us
/= 1000;
152 return oqticks_per_us
;
155 void lio_cn6xxx_setup_global_input_regs(struct octeon_device
*oct
)
157 /* Select Round-Robin Arb, ES, RO, NS for Input Queues */
158 octeon_write_csr(oct
, CN6XXX_SLI_PKT_INPUT_CONTROL
,
159 CN6XXX_INPUT_CTL_MASK
);
161 /* Instruction Read Size - Max 4 instructions per PCIE Read */
162 octeon_write_csr64(oct
, CN6XXX_SLI_PKT_INSTR_RD_SIZE
,
163 0xFFFFFFFFFFFFFFFFULL
);
165 /* Select PCIE Port for all Input rings. */
166 octeon_write_csr64(oct
, CN6XXX_SLI_IN_PCIE_PORT
,
167 (oct
->pcie_port
* 0x5555555555555555ULL
));
170 static void lio_cn66xx_setup_pkt_ctl_regs(struct octeon_device
*oct
)
174 struct octeon_cn6xxx
*cn6xxx
= (struct octeon_cn6xxx
*)oct
->chip
;
176 pktctl
= octeon_read_csr64(oct
, CN6XXX_SLI_PKT_CTL
);
179 if (CFG_GET_OQ_MAX_Q(cn6xxx
->conf
) <= 4)
180 /* Disable RING_EN if only upto 4 rings are used. */
185 if (CFG_GET_IS_SLI_BP_ON(cn6xxx
->conf
))
188 /* Disable per-port backpressure. */
190 octeon_write_csr64(oct
, CN6XXX_SLI_PKT_CTL
, pktctl
);
193 void lio_cn6xxx_setup_global_output_regs(struct octeon_device
*oct
)
196 struct octeon_cn6xxx
*cn6xxx
= (struct octeon_cn6xxx
*)oct
->chip
;
198 /* / Select PCI-E Port for all Output queues */
199 octeon_write_csr64(oct
, CN6XXX_SLI_PKT_PCIE_PORT64
,
200 (oct
->pcie_port
* 0x5555555555555555ULL
));
202 if (CFG_GET_IS_SLI_BP_ON(cn6xxx
->conf
)) {
203 octeon_write_csr64(oct
, CN6XXX_SLI_OQ_WMARK
, 32);
205 /* / Set Output queue watermark to 0 to disable backpressure */
206 octeon_write_csr64(oct
, CN6XXX_SLI_OQ_WMARK
, 0);
209 /* / Select Packet count instead of bytes for SLI_PKTi_CNTS[CNT] */
210 octeon_write_csr(oct
, CN6XXX_SLI_PKT_OUT_BMODE
, 0);
212 /* Select ES, RO, NS setting from register for Output Queue Packet
215 octeon_write_csr(oct
, CN6XXX_SLI_PKT_DPADDR
, 0xFFFFFFFF);
217 /* No Relaxed Ordering, No Snoop, 64-bit swap for Output
220 octeon_write_csr(oct
, CN6XXX_SLI_PKT_SLIST_ROR
, 0);
221 octeon_write_csr(oct
, CN6XXX_SLI_PKT_SLIST_NS
, 0);
223 /* / ENDIAN_SPECIFIC CHANGES - 0 works for LE. */
224 #ifdef __BIG_ENDIAN_BITFIELD
225 octeon_write_csr64(oct
, CN6XXX_SLI_PKT_SLIST_ES64
,
226 0x5555555555555555ULL
);
228 octeon_write_csr64(oct
, CN6XXX_SLI_PKT_SLIST_ES64
, 0ULL);
231 /* / No Relaxed Ordering, No Snoop, 64-bit swap for Output Queue Data */
232 octeon_write_csr(oct
, CN6XXX_SLI_PKT_DATA_OUT_ROR
, 0);
233 octeon_write_csr(oct
, CN6XXX_SLI_PKT_DATA_OUT_NS
, 0);
234 octeon_write_csr64(oct
, CN6XXX_SLI_PKT_DATA_OUT_ES64
,
235 0x5555555555555555ULL
);
237 /* / Set up interrupt packet and time threshold */
238 octeon_write_csr(oct
, CN6XXX_SLI_OQ_INT_LEVEL_PKTS
,
239 (u32
)CFG_GET_OQ_INTR_PKT(cn6xxx
->conf
));
241 lio_cn6xxx_get_oq_ticks(oct
, (u32
)
242 CFG_GET_OQ_INTR_TIME(cn6xxx
->conf
));
244 octeon_write_csr(oct
, CN6XXX_SLI_OQ_INT_LEVEL_TIME
, time_threshold
);
247 static int lio_cn6xxx_setup_device_regs(struct octeon_device
*oct
)
249 lio_cn6xxx_setup_pcie_mps(oct
, PCIE_MPS_DEFAULT
);
250 lio_cn6xxx_setup_pcie_mrrs(oct
, PCIE_MRRS_512B
);
251 lio_cn6xxx_enable_error_reporting(oct
);
253 lio_cn6xxx_setup_global_input_regs(oct
);
254 lio_cn66xx_setup_pkt_ctl_regs(oct
);
255 lio_cn6xxx_setup_global_output_regs(oct
);
257 /* Default error timeout value should be 0x200000 to avoid host hang
258 * when reads invalid register
260 octeon_write_csr64(oct
, CN6XXX_SLI_WINDOW_CTL
, 0x200000ULL
);
264 void lio_cn6xxx_setup_iq_regs(struct octeon_device
*oct
, u32 iq_no
)
266 struct octeon_instr_queue
*iq
= oct
->instr_queue
[iq_no
];
268 octeon_write_csr64(oct
, CN6XXX_SLI_IQ_PKT_INSTR_HDR64(iq_no
), 0);
270 /* Write the start of the input queue's ring and its size */
271 octeon_write_csr64(oct
, CN6XXX_SLI_IQ_BASE_ADDR64(iq_no
),
273 octeon_write_csr(oct
, CN6XXX_SLI_IQ_SIZE(iq_no
), iq
->max_count
);
275 /* Remember the doorbell & instruction count register addr for this
278 iq
->doorbell_reg
= oct
->mmio
[0].hw_addr
+ CN6XXX_SLI_IQ_DOORBELL(iq_no
);
279 iq
->inst_cnt_reg
= oct
->mmio
[0].hw_addr
280 + CN6XXX_SLI_IQ_INSTR_COUNT(iq_no
);
281 dev_dbg(&oct
->pci_dev
->dev
, "InstQ[%d]:dbell reg @ 0x%p instcnt_reg @ 0x%p\n",
282 iq_no
, iq
->doorbell_reg
, iq
->inst_cnt_reg
);
284 /* Store the current instruction counter
285 * (used in flush_iq calculation)
287 iq
->reset_instr_cnt
= readl(iq
->inst_cnt_reg
);
290 static void lio_cn66xx_setup_iq_regs(struct octeon_device
*oct
, u32 iq_no
)
292 lio_cn6xxx_setup_iq_regs(oct
, iq_no
);
294 /* Backpressure for this queue - WMARK set to all F's. This effectively
295 * disables the backpressure mechanism.
297 octeon_write_csr64(oct
, CN66XX_SLI_IQ_BP64(iq_no
),
298 (0xFFFFFFFFULL
<< 32));
301 void lio_cn6xxx_setup_oq_regs(struct octeon_device
*oct
, u32 oq_no
)
304 struct octeon_droq
*droq
= oct
->droq
[oq_no
];
306 octeon_write_csr64(oct
, CN6XXX_SLI_OQ_BASE_ADDR64(oq_no
),
307 droq
->desc_ring_dma
);
308 octeon_write_csr(oct
, CN6XXX_SLI_OQ_SIZE(oq_no
), droq
->max_count
);
310 octeon_write_csr(oct
, CN6XXX_SLI_OQ_BUFF_INFO_SIZE(oq_no
),
313 /* Get the mapped address of the pkt_sent and pkts_credit regs */
314 droq
->pkts_sent_reg
=
315 oct
->mmio
[0].hw_addr
+ CN6XXX_SLI_OQ_PKTS_SENT(oq_no
);
316 droq
->pkts_credit_reg
=
317 oct
->mmio
[0].hw_addr
+ CN6XXX_SLI_OQ_PKTS_CREDIT(oq_no
);
319 /* Enable this output queue to generate Packet Timer Interrupt */
320 intr
= octeon_read_csr(oct
, CN6XXX_SLI_PKT_TIME_INT_ENB
);
321 intr
|= (1 << oq_no
);
322 octeon_write_csr(oct
, CN6XXX_SLI_PKT_TIME_INT_ENB
, intr
);
324 /* Enable this output queue to generate Packet Timer Interrupt */
325 intr
= octeon_read_csr(oct
, CN6XXX_SLI_PKT_CNT_INT_ENB
);
326 intr
|= (1 << oq_no
);
327 octeon_write_csr(oct
, CN6XXX_SLI_PKT_CNT_INT_ENB
, intr
);
330 int lio_cn6xxx_enable_io_queues(struct octeon_device
*oct
)
334 mask
= octeon_read_csr(oct
, CN6XXX_SLI_PKT_INSTR_SIZE
);
335 mask
|= oct
->io_qmask
.iq64B
;
336 octeon_write_csr(oct
, CN6XXX_SLI_PKT_INSTR_SIZE
, mask
);
338 mask
= octeon_read_csr(oct
, CN6XXX_SLI_PKT_INSTR_ENB
);
339 mask
|= oct
->io_qmask
.iq
;
340 octeon_write_csr(oct
, CN6XXX_SLI_PKT_INSTR_ENB
, mask
);
342 mask
= octeon_read_csr(oct
, CN6XXX_SLI_PKT_OUT_ENB
);
343 mask
|= oct
->io_qmask
.oq
;
344 octeon_write_csr(oct
, CN6XXX_SLI_PKT_OUT_ENB
, mask
);
349 void lio_cn6xxx_disable_io_queues(struct octeon_device
*oct
)
355 /* Reset the Enable bits for Input Queues. */
356 mask
= octeon_read_csr(oct
, CN6XXX_SLI_PKT_INSTR_ENB
);
357 mask
^= oct
->io_qmask
.iq
;
358 octeon_write_csr(oct
, CN6XXX_SLI_PKT_INSTR_ENB
, mask
);
360 /* Wait until hardware indicates that the queues are out of reset. */
361 mask
= (u32
)oct
->io_qmask
.iq
;
362 d32
= octeon_read_csr(oct
, CN6XXX_SLI_PORT_IN_RST_IQ
);
363 while (((d32
& mask
) != mask
) && loop
--) {
364 d32
= octeon_read_csr(oct
, CN6XXX_SLI_PORT_IN_RST_IQ
);
365 schedule_timeout_uninterruptible(1);
368 /* Reset the doorbell register for each Input queue. */
369 for (i
= 0; i
< MAX_OCTEON_INSTR_QUEUES(oct
); i
++) {
370 if (!(oct
->io_qmask
.iq
& BIT_ULL(i
)))
372 octeon_write_csr(oct
, CN6XXX_SLI_IQ_DOORBELL(i
), 0xFFFFFFFF);
373 d32
= octeon_read_csr(oct
, CN6XXX_SLI_IQ_DOORBELL(i
));
376 /* Reset the Enable bits for Output Queues. */
377 mask
= octeon_read_csr(oct
, CN6XXX_SLI_PKT_OUT_ENB
);
378 mask
^= oct
->io_qmask
.oq
;
379 octeon_write_csr(oct
, CN6XXX_SLI_PKT_OUT_ENB
, mask
);
381 /* Wait until hardware indicates that the queues are out of reset. */
383 mask
= (u32
)oct
->io_qmask
.oq
;
384 d32
= octeon_read_csr(oct
, CN6XXX_SLI_PORT_IN_RST_OQ
);
385 while (((d32
& mask
) != mask
) && loop
--) {
386 d32
= octeon_read_csr(oct
, CN6XXX_SLI_PORT_IN_RST_OQ
);
387 schedule_timeout_uninterruptible(1);
391 /* Reset the doorbell register for each Output queue. */
392 for (i
= 0; i
< MAX_OCTEON_OUTPUT_QUEUES(oct
); i
++) {
393 if (!(oct
->io_qmask
.oq
& BIT_ULL(i
)))
395 octeon_write_csr(oct
, CN6XXX_SLI_OQ_PKTS_CREDIT(i
), 0xFFFFFFFF);
396 d32
= octeon_read_csr(oct
, CN6XXX_SLI_OQ_PKTS_CREDIT(i
));
398 d32
= octeon_read_csr(oct
, CN6XXX_SLI_OQ_PKTS_SENT(i
));
399 octeon_write_csr(oct
, CN6XXX_SLI_OQ_PKTS_SENT(i
), d32
);
402 d32
= octeon_read_csr(oct
, CN6XXX_SLI_PKT_CNT_INT
);
404 octeon_write_csr(oct
, CN6XXX_SLI_PKT_CNT_INT
, d32
);
406 d32
= octeon_read_csr(oct
, CN6XXX_SLI_PKT_TIME_INT
);
408 octeon_write_csr(oct
, CN6XXX_SLI_PKT_TIME_INT
, d32
);
412 lio_cn6xxx_bar1_idx_setup(struct octeon_device
*oct
,
420 bar1
= lio_pci_readq(oct
, CN6XXX_BAR1_REG(idx
, oct
->pcie_port
));
421 lio_pci_writeq(oct
, (bar1
& 0xFFFFFFFEULL
),
422 CN6XXX_BAR1_REG(idx
, oct
->pcie_port
));
423 bar1
= lio_pci_readq(oct
, CN6XXX_BAR1_REG(idx
, oct
->pcie_port
));
427 /* Bits 17:4 of the PCI_BAR1_INDEXx stores bits 35:22 of
430 lio_pci_writeq(oct
, (((core_addr
>> 22) << 4) | PCI_BAR1_MASK
),
431 CN6XXX_BAR1_REG(idx
, oct
->pcie_port
));
433 bar1
= lio_pci_readq(oct
, CN6XXX_BAR1_REG(idx
, oct
->pcie_port
));
436 void lio_cn6xxx_bar1_idx_write(struct octeon_device
*oct
,
440 lio_pci_writeq(oct
, mask
, CN6XXX_BAR1_REG(idx
, oct
->pcie_port
));
443 u32
lio_cn6xxx_bar1_idx_read(struct octeon_device
*oct
, u32 idx
)
445 return (u32
)lio_pci_readq(oct
, CN6XXX_BAR1_REG(idx
, oct
->pcie_port
));
449 lio_cn6xxx_update_read_index(struct octeon_instr_queue
*iq
)
451 u32 new_idx
= readl(iq
->inst_cnt_reg
);
453 /* The new instr cnt reg is a 32-bit counter that can roll over. We have
454 * noted the counter's initial value at init time into
457 if (iq
->reset_instr_cnt
< new_idx
)
458 new_idx
-= iq
->reset_instr_cnt
;
460 new_idx
+= (0xffffffff - iq
->reset_instr_cnt
) + 1;
462 /* Modulo of the new index with the IQ size will give us
465 new_idx
%= iq
->max_count
;
470 void lio_cn6xxx_enable_interrupt(struct octeon_device
*oct
,
471 u8 unused
__attribute__((unused
)))
473 struct octeon_cn6xxx
*cn6xxx
= (struct octeon_cn6xxx
*)oct
->chip
;
474 u64 mask
= cn6xxx
->intr_mask64
| CN6XXX_INTR_DMA0_FORCE
;
476 /* Enable Interrupt */
477 writeq(mask
, cn6xxx
->intr_enb_reg64
);
480 void lio_cn6xxx_disable_interrupt(struct octeon_device
*oct
,
481 u8 unused
__attribute__((unused
)))
483 struct octeon_cn6xxx
*cn6xxx
= (struct octeon_cn6xxx
*)oct
->chip
;
485 /* Disable Interrupts */
486 writeq(0, cn6xxx
->intr_enb_reg64
);
489 static void lio_cn6xxx_get_pcie_qlmport(struct octeon_device
*oct
)
491 /* CN63xx Pass2 and newer parts implements the SLI_MAC_NUMBER register
492 * to determine the PCIE port #
494 oct
->pcie_port
= octeon_read_csr(oct
, CN6XXX_SLI_MAC_NUMBER
) & 0xff;
496 dev_dbg(&oct
->pci_dev
->dev
, "Using PCIE Port %d\n", oct
->pcie_port
);
500 lio_cn6xxx_process_pcie_error_intr(struct octeon_device
*oct
, u64 intr64
)
502 dev_err(&oct
->pci_dev
->dev
, "Error Intr: 0x%016llx\n",
506 static int lio_cn6xxx_process_droq_intr_regs(struct octeon_device
*oct
)
508 struct octeon_droq
*droq
;
510 u32 pkt_count
, droq_time_mask
, droq_mask
, droq_int_enb
;
511 u32 droq_cnt_enb
, droq_cnt_mask
;
513 droq_cnt_enb
= octeon_read_csr(oct
, CN6XXX_SLI_PKT_CNT_INT_ENB
);
514 droq_cnt_mask
= octeon_read_csr(oct
, CN6XXX_SLI_PKT_CNT_INT
);
515 droq_mask
= droq_cnt_mask
& droq_cnt_enb
;
517 droq_time_mask
= octeon_read_csr(oct
, CN6XXX_SLI_PKT_TIME_INT
);
518 droq_int_enb
= octeon_read_csr(oct
, CN6XXX_SLI_PKT_TIME_INT_ENB
);
519 droq_mask
|= (droq_time_mask
& droq_int_enb
);
521 droq_mask
&= oct
->io_qmask
.oq
;
525 for (oq_no
= 0; oq_no
< MAX_OCTEON_OUTPUT_QUEUES(oct
); oq_no
++) {
526 if (!(droq_mask
& BIT_ULL(oq_no
)))
529 droq
= oct
->droq
[oq_no
];
530 pkt_count
= octeon_droq_check_hw_for_pkts(droq
);
532 oct
->droq_intr
|= BIT_ULL(oq_no
);
533 if (droq
->ops
.poll_mode
) {
537 struct octeon_cn6xxx
*cn6xxx
=
538 (struct octeon_cn6xxx
*)oct
->chip
;
540 /* disable interrupts for this droq */
542 (&cn6xxx
->lock_for_droq_int_enb_reg
);
543 reg
= CN6XXX_SLI_PKT_TIME_INT_ENB
;
544 value
= octeon_read_csr(oct
, reg
);
545 value
&= ~(1 << oq_no
);
546 octeon_write_csr(oct
, reg
, value
);
547 reg
= CN6XXX_SLI_PKT_CNT_INT_ENB
;
548 value
= octeon_read_csr(oct
, reg
);
549 value
&= ~(1 << oq_no
);
550 octeon_write_csr(oct
, reg
, value
);
552 spin_unlock(&cn6xxx
->lock_for_droq_int_enb_reg
);
557 droq_time_mask
&= oct
->io_qmask
.oq
;
558 droq_cnt_mask
&= oct
->io_qmask
.oq
;
560 /* Reset the PKT_CNT/TIME_INT registers. */
562 octeon_write_csr(oct
, CN6XXX_SLI_PKT_TIME_INT
, droq_time_mask
);
564 if (droq_cnt_mask
) /* reset PKT_CNT register:66xx */
565 octeon_write_csr(oct
, CN6XXX_SLI_PKT_CNT_INT
, droq_cnt_mask
);
570 irqreturn_t
lio_cn6xxx_process_interrupt_regs(void *dev
)
572 struct octeon_device
*oct
= (struct octeon_device
*)dev
;
573 struct octeon_cn6xxx
*cn6xxx
= (struct octeon_cn6xxx
*)oct
->chip
;
576 intr64
= readq(cn6xxx
->intr_sum_reg64
);
578 /* If our device has interrupted, then proceed.
579 * Also check for all f's if interrupt was triggered on an error
580 * and the PCI read fails.
582 if (!intr64
|| (intr64
== 0xFFFFFFFFFFFFFFFFULL
))
587 if (intr64
& CN6XXX_INTR_ERR
)
588 lio_cn6xxx_process_pcie_error_intr(oct
, intr64
);
590 if (intr64
& CN6XXX_INTR_PKT_DATA
) {
591 lio_cn6xxx_process_droq_intr_regs(oct
);
592 oct
->int_status
|= OCT_DEV_INTR_PKT_DATA
;
595 if (intr64
& CN6XXX_INTR_DMA0_FORCE
)
596 oct
->int_status
|= OCT_DEV_INTR_DMA0_FORCE
;
598 if (intr64
& CN6XXX_INTR_DMA1_FORCE
)
599 oct
->int_status
|= OCT_DEV_INTR_DMA1_FORCE
;
601 /* Clear the current interrupts */
602 writeq(intr64
, cn6xxx
->intr_sum_reg64
);
607 void lio_cn6xxx_setup_reg_address(struct octeon_device
*oct
,
609 struct octeon_reg_list
*reg_list
)
611 u8 __iomem
*bar0_pciaddr
= oct
->mmio
[0].hw_addr
;
612 struct octeon_cn6xxx
*cn6xxx
= (struct octeon_cn6xxx
*)chip
;
614 reg_list
->pci_win_wr_addr_hi
=
615 (u32 __iomem
*)(bar0_pciaddr
+ CN6XXX_WIN_WR_ADDR_HI
);
616 reg_list
->pci_win_wr_addr_lo
=
617 (u32 __iomem
*)(bar0_pciaddr
+ CN6XXX_WIN_WR_ADDR_LO
);
618 reg_list
->pci_win_wr_addr
=
619 (u64 __iomem
*)(bar0_pciaddr
+ CN6XXX_WIN_WR_ADDR64
);
621 reg_list
->pci_win_rd_addr_hi
=
622 (u32 __iomem
*)(bar0_pciaddr
+ CN6XXX_WIN_RD_ADDR_HI
);
623 reg_list
->pci_win_rd_addr_lo
=
624 (u32 __iomem
*)(bar0_pciaddr
+ CN6XXX_WIN_RD_ADDR_LO
);
625 reg_list
->pci_win_rd_addr
=
626 (u64 __iomem
*)(bar0_pciaddr
+ CN6XXX_WIN_RD_ADDR64
);
628 reg_list
->pci_win_wr_data_hi
=
629 (u32 __iomem
*)(bar0_pciaddr
+ CN6XXX_WIN_WR_DATA_HI
);
630 reg_list
->pci_win_wr_data_lo
=
631 (u32 __iomem
*)(bar0_pciaddr
+ CN6XXX_WIN_WR_DATA_LO
);
632 reg_list
->pci_win_wr_data
=
633 (u64 __iomem
*)(bar0_pciaddr
+ CN6XXX_WIN_WR_DATA64
);
635 reg_list
->pci_win_rd_data_hi
=
636 (u32 __iomem
*)(bar0_pciaddr
+ CN6XXX_WIN_RD_DATA_HI
);
637 reg_list
->pci_win_rd_data_lo
=
638 (u32 __iomem
*)(bar0_pciaddr
+ CN6XXX_WIN_RD_DATA_LO
);
639 reg_list
->pci_win_rd_data
=
640 (u64 __iomem
*)(bar0_pciaddr
+ CN6XXX_WIN_RD_DATA64
);
642 lio_cn6xxx_get_pcie_qlmport(oct
);
644 cn6xxx
->intr_sum_reg64
= bar0_pciaddr
+ CN6XXX_SLI_INT_SUM64
;
645 cn6xxx
->intr_mask64
= CN6XXX_INTR_MASK
;
646 cn6xxx
->intr_enb_reg64
=
647 bar0_pciaddr
+ CN6XXX_SLI_INT_ENB64(oct
->pcie_port
);
650 int lio_setup_cn66xx_octeon_device(struct octeon_device
*oct
)
652 struct octeon_cn6xxx
*cn6xxx
= (struct octeon_cn6xxx
*)oct
->chip
;
654 if (octeon_map_pci_barx(oct
, 0, 0))
657 if (octeon_map_pci_barx(oct
, 1, MAX_BAR1_IOREMAP_SIZE
)) {
658 dev_err(&oct
->pci_dev
->dev
, "%s CN66XX BAR1 map failed\n",
660 octeon_unmap_pci_barx(oct
, 0);
664 spin_lock_init(&cn6xxx
->lock_for_droq_int_enb_reg
);
666 oct
->fn_list
.setup_iq_regs
= lio_cn66xx_setup_iq_regs
;
667 oct
->fn_list
.setup_oq_regs
= lio_cn6xxx_setup_oq_regs
;
669 oct
->fn_list
.soft_reset
= lio_cn6xxx_soft_reset
;
670 oct
->fn_list
.setup_device_regs
= lio_cn6xxx_setup_device_regs
;
671 oct
->fn_list
.update_iq_read_idx
= lio_cn6xxx_update_read_index
;
673 oct
->fn_list
.bar1_idx_setup
= lio_cn6xxx_bar1_idx_setup
;
674 oct
->fn_list
.bar1_idx_write
= lio_cn6xxx_bar1_idx_write
;
675 oct
->fn_list
.bar1_idx_read
= lio_cn6xxx_bar1_idx_read
;
677 oct
->fn_list
.process_interrupt_regs
= lio_cn6xxx_process_interrupt_regs
;
678 oct
->fn_list
.enable_interrupt
= lio_cn6xxx_enable_interrupt
;
679 oct
->fn_list
.disable_interrupt
= lio_cn6xxx_disable_interrupt
;
681 oct
->fn_list
.enable_io_queues
= lio_cn6xxx_enable_io_queues
;
682 oct
->fn_list
.disable_io_queues
= lio_cn6xxx_disable_io_queues
;
684 lio_cn6xxx_setup_reg_address(oct
, oct
->chip
, &oct
->reg_list
);
686 cn6xxx
->conf
= (struct octeon_config
*)
687 oct_get_config_info(oct
, LIO_210SV
);
689 dev_err(&oct
->pci_dev
->dev
, "%s No Config found for CN66XX\n",
691 octeon_unmap_pci_barx(oct
, 0);
692 octeon_unmap_pci_barx(oct
, 1);
696 oct
->coproc_clock_rate
= 1000000ULL * lio_cn6xxx_coprocessor_clock(oct
);
701 int lio_validate_cn6xxx_config_info(struct octeon_device
*oct
,
702 struct octeon_config
*conf6xxx
)
704 if (CFG_GET_IQ_MAX_Q(conf6xxx
) > CN6XXX_MAX_INPUT_QUEUES
) {
705 dev_err(&oct
->pci_dev
->dev
, "%s: Num IQ (%d) exceeds Max (%d)\n",
706 __func__
, CFG_GET_IQ_MAX_Q(conf6xxx
),
707 CN6XXX_MAX_INPUT_QUEUES
);
711 if (CFG_GET_OQ_MAX_Q(conf6xxx
) > CN6XXX_MAX_OUTPUT_QUEUES
) {
712 dev_err(&oct
->pci_dev
->dev
, "%s: Num OQ (%d) exceeds Max (%d)\n",
713 __func__
, CFG_GET_OQ_MAX_Q(conf6xxx
),
714 CN6XXX_MAX_OUTPUT_QUEUES
);
718 if (CFG_GET_IQ_INSTR_TYPE(conf6xxx
) != OCTEON_32BYTE_INSTR
&&
719 CFG_GET_IQ_INSTR_TYPE(conf6xxx
) != OCTEON_64BYTE_INSTR
) {
720 dev_err(&oct
->pci_dev
->dev
, "%s: Invalid instr type for IQ\n",
724 if (!CFG_GET_OQ_REFILL_THRESHOLD(conf6xxx
)) {
725 dev_err(&oct
->pci_dev
->dev
, "%s: Invalid parameter for OQ\n",
730 if (!(CFG_GET_OQ_INTR_TIME(conf6xxx
))) {
731 dev_err(&oct
->pci_dev
->dev
, "%s: No Time Interrupt for OQ\n",