2 * QLogic qlcnic NIC Driver
3 * Copyright (c) 2009-2010 QLogic Corporation
5 * See LICENSE.qlcnic for copyright and licensing details.
10 #include <linux/slab.h>
12 #include <linux/bitops.h>
14 #define MASK(n) ((1ULL<<(n))-1)
15 #define OCM_WIN_P3P(addr) (addr & 0xffc0000)
17 #define GET_MEM_OFFS_2M(addr) (addr & MASK(18))
19 #define CRB_BLK(off) ((off >> 20) & 0x3f)
20 #define CRB_SUBBLK(off) ((off >> 16) & 0xf)
21 #define CRB_WINDOW_2M (0x130060)
22 #define CRB_HI(off) ((crb_hub_agt[CRB_BLK(off)] << 20) | ((off) & 0xf0000))
23 #define CRB_INDIRECT_2M (0x1e0000UL)
27 static inline u64
readq(void __iomem
*addr
)
29 return readl(addr
) | (((u64
) readl(addr
+ 4)) << 32LL);
34 static inline void writeq(u64 val
, void __iomem
*addr
)
36 writel(((u32
) (val
)), (addr
));
37 writel(((u32
) (val
>> 32)), (addr
+ 4));
41 static const struct crb_128M_2M_block_map
42 crb_128M_2M_map
[64] __cacheline_aligned_in_smp
= {
43 {{{0, 0, 0, 0} } }, /* 0: PCI */
44 {{{1, 0x0100000, 0x0102000, 0x120000}, /* 1: PCIE */
45 {1, 0x0110000, 0x0120000, 0x130000},
46 {1, 0x0120000, 0x0122000, 0x124000},
47 {1, 0x0130000, 0x0132000, 0x126000},
48 {1, 0x0140000, 0x0142000, 0x128000},
49 {1, 0x0150000, 0x0152000, 0x12a000},
50 {1, 0x0160000, 0x0170000, 0x110000},
51 {1, 0x0170000, 0x0172000, 0x12e000},
52 {0, 0x0000000, 0x0000000, 0x000000},
53 {0, 0x0000000, 0x0000000, 0x000000},
54 {0, 0x0000000, 0x0000000, 0x000000},
55 {0, 0x0000000, 0x0000000, 0x000000},
56 {0, 0x0000000, 0x0000000, 0x000000},
57 {0, 0x0000000, 0x0000000, 0x000000},
58 {1, 0x01e0000, 0x01e0800, 0x122000},
59 {0, 0x0000000, 0x0000000, 0x000000} } },
60 {{{1, 0x0200000, 0x0210000, 0x180000} } },/* 2: MN */
61 {{{0, 0, 0, 0} } }, /* 3: */
62 {{{1, 0x0400000, 0x0401000, 0x169000} } },/* 4: P2NR1 */
63 {{{1, 0x0500000, 0x0510000, 0x140000} } },/* 5: SRE */
64 {{{1, 0x0600000, 0x0610000, 0x1c0000} } },/* 6: NIU */
65 {{{1, 0x0700000, 0x0704000, 0x1b8000} } },/* 7: QM */
66 {{{1, 0x0800000, 0x0802000, 0x170000}, /* 8: SQM0 */
67 {0, 0x0000000, 0x0000000, 0x000000},
68 {0, 0x0000000, 0x0000000, 0x000000},
69 {0, 0x0000000, 0x0000000, 0x000000},
70 {0, 0x0000000, 0x0000000, 0x000000},
71 {0, 0x0000000, 0x0000000, 0x000000},
72 {0, 0x0000000, 0x0000000, 0x000000},
73 {0, 0x0000000, 0x0000000, 0x000000},
74 {0, 0x0000000, 0x0000000, 0x000000},
75 {0, 0x0000000, 0x0000000, 0x000000},
76 {0, 0x0000000, 0x0000000, 0x000000},
77 {0, 0x0000000, 0x0000000, 0x000000},
78 {0, 0x0000000, 0x0000000, 0x000000},
79 {0, 0x0000000, 0x0000000, 0x000000},
80 {0, 0x0000000, 0x0000000, 0x000000},
81 {1, 0x08f0000, 0x08f2000, 0x172000} } },
82 {{{1, 0x0900000, 0x0902000, 0x174000}, /* 9: SQM1*/
83 {0, 0x0000000, 0x0000000, 0x000000},
84 {0, 0x0000000, 0x0000000, 0x000000},
85 {0, 0x0000000, 0x0000000, 0x000000},
86 {0, 0x0000000, 0x0000000, 0x000000},
87 {0, 0x0000000, 0x0000000, 0x000000},
88 {0, 0x0000000, 0x0000000, 0x000000},
89 {0, 0x0000000, 0x0000000, 0x000000},
90 {0, 0x0000000, 0x0000000, 0x000000},
91 {0, 0x0000000, 0x0000000, 0x000000},
92 {0, 0x0000000, 0x0000000, 0x000000},
93 {0, 0x0000000, 0x0000000, 0x000000},
94 {0, 0x0000000, 0x0000000, 0x000000},
95 {0, 0x0000000, 0x0000000, 0x000000},
96 {0, 0x0000000, 0x0000000, 0x000000},
97 {1, 0x09f0000, 0x09f2000, 0x176000} } },
98 {{{0, 0x0a00000, 0x0a02000, 0x178000}, /* 10: SQM2*/
99 {0, 0x0000000, 0x0000000, 0x000000},
100 {0, 0x0000000, 0x0000000, 0x000000},
101 {0, 0x0000000, 0x0000000, 0x000000},
102 {0, 0x0000000, 0x0000000, 0x000000},
103 {0, 0x0000000, 0x0000000, 0x000000},
104 {0, 0x0000000, 0x0000000, 0x000000},
105 {0, 0x0000000, 0x0000000, 0x000000},
106 {0, 0x0000000, 0x0000000, 0x000000},
107 {0, 0x0000000, 0x0000000, 0x000000},
108 {0, 0x0000000, 0x0000000, 0x000000},
109 {0, 0x0000000, 0x0000000, 0x000000},
110 {0, 0x0000000, 0x0000000, 0x000000},
111 {0, 0x0000000, 0x0000000, 0x000000},
112 {0, 0x0000000, 0x0000000, 0x000000},
113 {1, 0x0af0000, 0x0af2000, 0x17a000} } },
114 {{{0, 0x0b00000, 0x0b02000, 0x17c000}, /* 11: SQM3*/
115 {0, 0x0000000, 0x0000000, 0x000000},
116 {0, 0x0000000, 0x0000000, 0x000000},
117 {0, 0x0000000, 0x0000000, 0x000000},
118 {0, 0x0000000, 0x0000000, 0x000000},
119 {0, 0x0000000, 0x0000000, 0x000000},
120 {0, 0x0000000, 0x0000000, 0x000000},
121 {0, 0x0000000, 0x0000000, 0x000000},
122 {0, 0x0000000, 0x0000000, 0x000000},
123 {0, 0x0000000, 0x0000000, 0x000000},
124 {0, 0x0000000, 0x0000000, 0x000000},
125 {0, 0x0000000, 0x0000000, 0x000000},
126 {0, 0x0000000, 0x0000000, 0x000000},
127 {0, 0x0000000, 0x0000000, 0x000000},
128 {0, 0x0000000, 0x0000000, 0x000000},
129 {1, 0x0bf0000, 0x0bf2000, 0x17e000} } },
130 {{{1, 0x0c00000, 0x0c04000, 0x1d4000} } },/* 12: I2Q */
131 {{{1, 0x0d00000, 0x0d04000, 0x1a4000} } },/* 13: TMR */
132 {{{1, 0x0e00000, 0x0e04000, 0x1a0000} } },/* 14: ROMUSB */
133 {{{1, 0x0f00000, 0x0f01000, 0x164000} } },/* 15: PEG4 */
134 {{{0, 0x1000000, 0x1004000, 0x1a8000} } },/* 16: XDMA */
135 {{{1, 0x1100000, 0x1101000, 0x160000} } },/* 17: PEG0 */
136 {{{1, 0x1200000, 0x1201000, 0x161000} } },/* 18: PEG1 */
137 {{{1, 0x1300000, 0x1301000, 0x162000} } },/* 19: PEG2 */
138 {{{1, 0x1400000, 0x1401000, 0x163000} } },/* 20: PEG3 */
139 {{{1, 0x1500000, 0x1501000, 0x165000} } },/* 21: P2ND */
140 {{{1, 0x1600000, 0x1601000, 0x166000} } },/* 22: P2NI */
141 {{{0, 0, 0, 0} } }, /* 23: */
142 {{{0, 0, 0, 0} } }, /* 24: */
143 {{{0, 0, 0, 0} } }, /* 25: */
144 {{{0, 0, 0, 0} } }, /* 26: */
145 {{{0, 0, 0, 0} } }, /* 27: */
146 {{{0, 0, 0, 0} } }, /* 28: */
147 {{{1, 0x1d00000, 0x1d10000, 0x190000} } },/* 29: MS */
148 {{{1, 0x1e00000, 0x1e01000, 0x16a000} } },/* 30: P2NR2 */
149 {{{1, 0x1f00000, 0x1f10000, 0x150000} } },/* 31: EPG */
150 {{{0} } }, /* 32: PCI */
151 {{{1, 0x2100000, 0x2102000, 0x120000}, /* 33: PCIE */
152 {1, 0x2110000, 0x2120000, 0x130000},
153 {1, 0x2120000, 0x2122000, 0x124000},
154 {1, 0x2130000, 0x2132000, 0x126000},
155 {1, 0x2140000, 0x2142000, 0x128000},
156 {1, 0x2150000, 0x2152000, 0x12a000},
157 {1, 0x2160000, 0x2170000, 0x110000},
158 {1, 0x2170000, 0x2172000, 0x12e000},
159 {0, 0x0000000, 0x0000000, 0x000000},
160 {0, 0x0000000, 0x0000000, 0x000000},
161 {0, 0x0000000, 0x0000000, 0x000000},
162 {0, 0x0000000, 0x0000000, 0x000000},
163 {0, 0x0000000, 0x0000000, 0x000000},
164 {0, 0x0000000, 0x0000000, 0x000000},
165 {0, 0x0000000, 0x0000000, 0x000000},
166 {0, 0x0000000, 0x0000000, 0x000000} } },
167 {{{1, 0x2200000, 0x2204000, 0x1b0000} } },/* 34: CAM */
173 {{{1, 0x2800000, 0x2804000, 0x1a4000} } },/* 40: TMR */
174 {{{1, 0x2900000, 0x2901000, 0x16b000} } },/* 41: P2NR3 */
175 {{{1, 0x2a00000, 0x2a00400, 0x1ac400} } },/* 42: RPMX1 */
176 {{{1, 0x2b00000, 0x2b00400, 0x1ac800} } },/* 43: RPMX2 */
177 {{{1, 0x2c00000, 0x2c00400, 0x1acc00} } },/* 44: RPMX3 */
178 {{{1, 0x2d00000, 0x2d00400, 0x1ad000} } },/* 45: RPMX4 */
179 {{{1, 0x2e00000, 0x2e00400, 0x1ad400} } },/* 46: RPMX5 */
180 {{{1, 0x2f00000, 0x2f00400, 0x1ad800} } },/* 47: RPMX6 */
181 {{{1, 0x3000000, 0x3000400, 0x1adc00} } },/* 48: RPMX7 */
182 {{{0, 0x3100000, 0x3104000, 0x1a8000} } },/* 49: XDMA */
183 {{{1, 0x3200000, 0x3204000, 0x1d4000} } },/* 50: I2Q */
184 {{{1, 0x3300000, 0x3304000, 0x1a0000} } },/* 51: ROMUSB */
186 {{{1, 0x3500000, 0x3500400, 0x1ac000} } },/* 53: RPMX0 */
187 {{{1, 0x3600000, 0x3600400, 0x1ae000} } },/* 54: RPMX8 */
188 {{{1, 0x3700000, 0x3700400, 0x1ae400} } },/* 55: RPMX9 */
189 {{{1, 0x3800000, 0x3804000, 0x1d0000} } },/* 56: OCM0 */
190 {{{1, 0x3900000, 0x3904000, 0x1b4000} } },/* 57: CRYPTO */
191 {{{1, 0x3a00000, 0x3a04000, 0x1d8000} } },/* 58: SMB */
192 {{{0} } }, /* 59: I2C0 */
193 {{{0} } }, /* 60: I2C1 */
194 {{{1, 0x3d00000, 0x3d04000, 0x1d8000} } },/* 61: LPC */
195 {{{1, 0x3e00000, 0x3e01000, 0x167000} } },/* 62: P2NC */
196 {{{1, 0x3f00000, 0x3f01000, 0x168000} } } /* 63: P2NR0 */
200 * top 12 bits of crb internal address (hub, agent)
202 static const unsigned crb_hub_agt
[64] = {
204 QLCNIC_HW_CRB_HUB_AGT_ADR_PS
,
205 QLCNIC_HW_CRB_HUB_AGT_ADR_MN
,
206 QLCNIC_HW_CRB_HUB_AGT_ADR_MS
,
208 QLCNIC_HW_CRB_HUB_AGT_ADR_SRE
,
209 QLCNIC_HW_CRB_HUB_AGT_ADR_NIU
,
210 QLCNIC_HW_CRB_HUB_AGT_ADR_QMN
,
211 QLCNIC_HW_CRB_HUB_AGT_ADR_SQN0
,
212 QLCNIC_HW_CRB_HUB_AGT_ADR_SQN1
,
213 QLCNIC_HW_CRB_HUB_AGT_ADR_SQN2
,
214 QLCNIC_HW_CRB_HUB_AGT_ADR_SQN3
,
215 QLCNIC_HW_CRB_HUB_AGT_ADR_I2Q
,
216 QLCNIC_HW_CRB_HUB_AGT_ADR_TIMR
,
217 QLCNIC_HW_CRB_HUB_AGT_ADR_ROMUSB
,
218 QLCNIC_HW_CRB_HUB_AGT_ADR_PGN4
,
219 QLCNIC_HW_CRB_HUB_AGT_ADR_XDMA
,
220 QLCNIC_HW_CRB_HUB_AGT_ADR_PGN0
,
221 QLCNIC_HW_CRB_HUB_AGT_ADR_PGN1
,
222 QLCNIC_HW_CRB_HUB_AGT_ADR_PGN2
,
223 QLCNIC_HW_CRB_HUB_AGT_ADR_PGN3
,
224 QLCNIC_HW_CRB_HUB_AGT_ADR_PGND
,
225 QLCNIC_HW_CRB_HUB_AGT_ADR_PGNI
,
226 QLCNIC_HW_CRB_HUB_AGT_ADR_PGS0
,
227 QLCNIC_HW_CRB_HUB_AGT_ADR_PGS1
,
228 QLCNIC_HW_CRB_HUB_AGT_ADR_PGS2
,
229 QLCNIC_HW_CRB_HUB_AGT_ADR_PGS3
,
231 QLCNIC_HW_CRB_HUB_AGT_ADR_PGSI
,
232 QLCNIC_HW_CRB_HUB_AGT_ADR_SN
,
234 QLCNIC_HW_CRB_HUB_AGT_ADR_EG
,
236 QLCNIC_HW_CRB_HUB_AGT_ADR_PS
,
237 QLCNIC_HW_CRB_HUB_AGT_ADR_CAM
,
243 QLCNIC_HW_CRB_HUB_AGT_ADR_TIMR
,
245 QLCNIC_HW_CRB_HUB_AGT_ADR_RPMX1
,
246 QLCNIC_HW_CRB_HUB_AGT_ADR_RPMX2
,
247 QLCNIC_HW_CRB_HUB_AGT_ADR_RPMX3
,
248 QLCNIC_HW_CRB_HUB_AGT_ADR_RPMX4
,
249 QLCNIC_HW_CRB_HUB_AGT_ADR_RPMX5
,
250 QLCNIC_HW_CRB_HUB_AGT_ADR_RPMX6
,
251 QLCNIC_HW_CRB_HUB_AGT_ADR_RPMX7
,
252 QLCNIC_HW_CRB_HUB_AGT_ADR_XDMA
,
253 QLCNIC_HW_CRB_HUB_AGT_ADR_I2Q
,
254 QLCNIC_HW_CRB_HUB_AGT_ADR_ROMUSB
,
256 QLCNIC_HW_CRB_HUB_AGT_ADR_RPMX0
,
257 QLCNIC_HW_CRB_HUB_AGT_ADR_RPMX8
,
258 QLCNIC_HW_CRB_HUB_AGT_ADR_RPMX9
,
259 QLCNIC_HW_CRB_HUB_AGT_ADR_OCM0
,
261 QLCNIC_HW_CRB_HUB_AGT_ADR_SMB
,
262 QLCNIC_HW_CRB_HUB_AGT_ADR_I2C0
,
263 QLCNIC_HW_CRB_HUB_AGT_ADR_I2C1
,
265 QLCNIC_HW_CRB_HUB_AGT_ADR_PGNC
,
269 /* PCI Windowing for DDR regions. */
271 #define QLCNIC_PCIE_SEM_TIMEOUT 10000
274 qlcnic_pcie_sem_lock(struct qlcnic_adapter
*adapter
, int sem
, u32 id_reg
)
276 int done
= 0, timeout
= 0;
279 done
= QLCRD32(adapter
, QLCNIC_PCIE_REG(PCIE_SEM_LOCK(sem
)));
282 if (++timeout
>= QLCNIC_PCIE_SEM_TIMEOUT
) {
283 dev_err(&adapter
->pdev
->dev
,
284 "Failed to acquire sem=%d lock; holdby=%d\n",
285 sem
, id_reg
? QLCRD32(adapter
, id_reg
) : -1);
292 QLCWR32(adapter
, id_reg
, adapter
->portnum
);
298 qlcnic_pcie_sem_unlock(struct qlcnic_adapter
*adapter
, int sem
)
300 QLCRD32(adapter
, QLCNIC_PCIE_REG(PCIE_SEM_UNLOCK(sem
)));
304 qlcnic_send_cmd_descs(struct qlcnic_adapter
*adapter
,
305 struct cmd_desc_type0
*cmd_desc_arr
, int nr_desc
)
307 u32 i
, producer
, consumer
;
308 struct qlcnic_cmd_buffer
*pbuf
;
309 struct cmd_desc_type0
*cmd_desc
;
310 struct qlcnic_host_tx_ring
*tx_ring
;
314 if (!test_bit(__QLCNIC_FW_ATTACHED
, &adapter
->state
))
317 tx_ring
= adapter
->tx_ring
;
318 __netif_tx_lock_bh(tx_ring
->txq
);
320 producer
= tx_ring
->producer
;
321 consumer
= tx_ring
->sw_consumer
;
323 if (nr_desc
>= qlcnic_tx_avail(tx_ring
)) {
324 netif_tx_stop_queue(tx_ring
->txq
);
326 if (qlcnic_tx_avail(tx_ring
) > nr_desc
) {
327 if (qlcnic_tx_avail(tx_ring
) > TX_STOP_THRESH
)
328 netif_tx_wake_queue(tx_ring
->txq
);
330 adapter
->stats
.xmit_off
++;
331 __netif_tx_unlock_bh(tx_ring
->txq
);
337 cmd_desc
= &cmd_desc_arr
[i
];
339 pbuf
= &tx_ring
->cmd_buf_arr
[producer
];
341 pbuf
->frag_count
= 0;
343 memcpy(&tx_ring
->desc_head
[producer
],
344 &cmd_desc_arr
[i
], sizeof(struct cmd_desc_type0
));
346 producer
= get_next_index(producer
, tx_ring
->num_desc
);
349 } while (i
!= nr_desc
);
351 tx_ring
->producer
= producer
;
353 qlcnic_update_cmd_producer(adapter
, tx_ring
);
355 __netif_tx_unlock_bh(tx_ring
->txq
);
361 qlcnic_sre_macaddr_change(struct qlcnic_adapter
*adapter
, u8
*addr
,
362 __le16 vlan_id
, unsigned op
)
364 struct qlcnic_nic_req req
;
365 struct qlcnic_mac_req
*mac_req
;
366 struct qlcnic_vlan_req
*vlan_req
;
369 memset(&req
, 0, sizeof(struct qlcnic_nic_req
));
370 req
.qhdr
= cpu_to_le64(QLCNIC_REQUEST
<< 23);
372 word
= QLCNIC_MAC_EVENT
| ((u64
)adapter
->portnum
<< 16);
373 req
.req_hdr
= cpu_to_le64(word
);
375 mac_req
= (struct qlcnic_mac_req
*)&req
.words
[0];
377 memcpy(mac_req
->mac_addr
, addr
, 6);
379 vlan_req
= (struct qlcnic_vlan_req
*)&req
.words
[1];
380 vlan_req
->vlan_id
= vlan_id
;
382 return qlcnic_send_cmd_descs(adapter
, (struct cmd_desc_type0
*)&req
, 1);
385 static int qlcnic_nic_add_mac(struct qlcnic_adapter
*adapter
, const u8
*addr
)
387 struct list_head
*head
;
388 struct qlcnic_mac_list_s
*cur
;
390 /* look up if already exists */
391 list_for_each(head
, &adapter
->mac_list
) {
392 cur
= list_entry(head
, struct qlcnic_mac_list_s
, list
);
393 if (memcmp(addr
, cur
->mac_addr
, ETH_ALEN
) == 0)
397 cur
= kzalloc(sizeof(struct qlcnic_mac_list_s
), GFP_ATOMIC
);
399 dev_err(&adapter
->netdev
->dev
,
400 "failed to add mac address filter\n");
403 memcpy(cur
->mac_addr
, addr
, ETH_ALEN
);
405 if (qlcnic_sre_macaddr_change(adapter
,
406 cur
->mac_addr
, 0, QLCNIC_MAC_ADD
)) {
411 list_add_tail(&cur
->list
, &adapter
->mac_list
);
415 void qlcnic_set_multi(struct net_device
*netdev
)
417 struct qlcnic_adapter
*adapter
= netdev_priv(netdev
);
418 struct netdev_hw_addr
*ha
;
419 static const u8 bcast_addr
[ETH_ALEN
] = {
420 0xff, 0xff, 0xff, 0xff, 0xff, 0xff
422 u32 mode
= VPORT_MISS_MODE_DROP
;
424 if (!test_bit(__QLCNIC_FW_ATTACHED
, &adapter
->state
))
427 qlcnic_nic_add_mac(adapter
, adapter
->mac_addr
);
428 qlcnic_nic_add_mac(adapter
, bcast_addr
);
430 if (netdev
->flags
& IFF_PROMISC
) {
431 if (!(adapter
->flags
& QLCNIC_PROMISC_DISABLED
))
432 mode
= VPORT_MISS_MODE_ACCEPT_ALL
;
436 if ((netdev
->flags
& IFF_ALLMULTI
) ||
437 (netdev_mc_count(netdev
) > adapter
->max_mc_count
)) {
438 mode
= VPORT_MISS_MODE_ACCEPT_MULTI
;
442 if (!netdev_mc_empty(netdev
)) {
443 netdev_for_each_mc_addr(ha
, netdev
) {
444 qlcnic_nic_add_mac(adapter
, ha
->addr
);
449 if (mode
== VPORT_MISS_MODE_ACCEPT_ALL
) {
450 qlcnic_alloc_lb_filters_mem(adapter
);
451 adapter
->mac_learn
= 1;
453 adapter
->mac_learn
= 0;
456 qlcnic_nic_set_promisc(adapter
, mode
);
459 int qlcnic_nic_set_promisc(struct qlcnic_adapter
*adapter
, u32 mode
)
461 struct qlcnic_nic_req req
;
464 memset(&req
, 0, sizeof(struct qlcnic_nic_req
));
466 req
.qhdr
= cpu_to_le64(QLCNIC_HOST_REQUEST
<< 23);
468 word
= QLCNIC_H2C_OPCODE_SET_MAC_RECEIVE_MODE
|
469 ((u64
)adapter
->portnum
<< 16);
470 req
.req_hdr
= cpu_to_le64(word
);
472 req
.words
[0] = cpu_to_le64(mode
);
474 return qlcnic_send_cmd_descs(adapter
,
475 (struct cmd_desc_type0
*)&req
, 1);
478 void qlcnic_free_mac_list(struct qlcnic_adapter
*adapter
)
480 struct qlcnic_mac_list_s
*cur
;
481 struct list_head
*head
= &adapter
->mac_list
;
483 while (!list_empty(head
)) {
484 cur
= list_entry(head
->next
, struct qlcnic_mac_list_s
, list
);
485 qlcnic_sre_macaddr_change(adapter
,
486 cur
->mac_addr
, 0, QLCNIC_MAC_DEL
);
487 list_del(&cur
->list
);
492 void qlcnic_prune_lb_filters(struct qlcnic_adapter
*adapter
)
494 struct qlcnic_filter
*tmp_fil
;
495 struct hlist_node
*tmp_hnode
, *n
;
496 struct hlist_head
*head
;
499 for (i
= 0; i
< adapter
->fhash
.fmax
; i
++) {
500 head
= &(adapter
->fhash
.fhead
[i
]);
502 hlist_for_each_entry_safe(tmp_fil
, tmp_hnode
, n
, head
, fnode
)
505 (QLCNIC_FILTER_AGE
* HZ
+ tmp_fil
->ftime
)) {
506 qlcnic_sre_macaddr_change(adapter
,
507 tmp_fil
->faddr
, tmp_fil
->vlan_id
,
508 tmp_fil
->vlan_id
? QLCNIC_MAC_VLAN_DEL
:
510 spin_lock_bh(&adapter
->mac_learn_lock
);
511 adapter
->fhash
.fnum
--;
512 hlist_del(&tmp_fil
->fnode
);
513 spin_unlock_bh(&adapter
->mac_learn_lock
);
520 void qlcnic_delete_lb_filters(struct qlcnic_adapter
*adapter
)
522 struct qlcnic_filter
*tmp_fil
;
523 struct hlist_node
*tmp_hnode
, *n
;
524 struct hlist_head
*head
;
527 for (i
= 0; i
< adapter
->fhash
.fmax
; i
++) {
528 head
= &(adapter
->fhash
.fhead
[i
]);
530 hlist_for_each_entry_safe(tmp_fil
, tmp_hnode
, n
, head
, fnode
) {
531 qlcnic_sre_macaddr_change(adapter
, tmp_fil
->faddr
,
532 tmp_fil
->vlan_id
, tmp_fil
->vlan_id
?
533 QLCNIC_MAC_VLAN_DEL
: QLCNIC_MAC_DEL
);
534 spin_lock_bh(&adapter
->mac_learn_lock
);
535 adapter
->fhash
.fnum
--;
536 hlist_del(&tmp_fil
->fnode
);
537 spin_unlock_bh(&adapter
->mac_learn_lock
);
543 int qlcnic_set_fw_loopback(struct qlcnic_adapter
*adapter
, u8 flag
)
545 struct qlcnic_nic_req req
;
548 memset(&req
, 0, sizeof(struct qlcnic_nic_req
));
550 req
.qhdr
= cpu_to_le64(QLCNIC_HOST_REQUEST
<< 23);
551 req
.req_hdr
= cpu_to_le64(QLCNIC_H2C_OPCODE_CONFIG_LOOPBACK
|
552 ((u64
) adapter
->portnum
<< 16) | ((u64
) 0x1 << 32));
554 req
.words
[0] = cpu_to_le64(flag
);
556 rv
= qlcnic_send_cmd_descs(adapter
, (struct cmd_desc_type0
*)&req
, 1);
558 dev_err(&adapter
->pdev
->dev
, "%sting loopback mode failed\n",
559 flag
? "Set" : "Reset");
563 int qlcnic_set_lb_mode(struct qlcnic_adapter
*adapter
, u8 mode
)
565 if (qlcnic_set_fw_loopback(adapter
, mode
))
568 if (qlcnic_nic_set_promisc(adapter
, VPORT_MISS_MODE_ACCEPT_ALL
)) {
569 qlcnic_set_fw_loopback(adapter
, 0);
577 void qlcnic_clear_lb_mode(struct qlcnic_adapter
*adapter
)
579 int mode
= VPORT_MISS_MODE_DROP
;
580 struct net_device
*netdev
= adapter
->netdev
;
582 qlcnic_set_fw_loopback(adapter
, 0);
584 if (netdev
->flags
& IFF_PROMISC
)
585 mode
= VPORT_MISS_MODE_ACCEPT_ALL
;
586 else if (netdev
->flags
& IFF_ALLMULTI
)
587 mode
= VPORT_MISS_MODE_ACCEPT_MULTI
;
589 qlcnic_nic_set_promisc(adapter
, mode
);
594 * Send the interrupt coalescing parameter set by ethtool to the card.
596 int qlcnic_config_intr_coalesce(struct qlcnic_adapter
*adapter
)
598 struct qlcnic_nic_req req
;
601 memset(&req
, 0, sizeof(struct qlcnic_nic_req
));
603 req
.qhdr
= cpu_to_le64(QLCNIC_HOST_REQUEST
<< 23);
605 req
.req_hdr
= cpu_to_le64(QLCNIC_CONFIG_INTR_COALESCE
|
606 ((u64
) adapter
->portnum
<< 16));
608 req
.words
[0] = cpu_to_le64(((u64
) adapter
->ahw
->coal
.flag
) << 32);
609 req
.words
[2] = cpu_to_le64(adapter
->ahw
->coal
.rx_packets
|
610 ((u64
) adapter
->ahw
->coal
.rx_time_us
) << 16);
611 req
.words
[5] = cpu_to_le64(adapter
->ahw
->coal
.timer_out
|
612 ((u64
) adapter
->ahw
->coal
.type
) << 32 |
613 ((u64
) adapter
->ahw
->coal
.sts_ring_mask
) << 40);
614 rv
= qlcnic_send_cmd_descs(adapter
, (struct cmd_desc_type0
*)&req
, 1);
616 dev_err(&adapter
->netdev
->dev
,
617 "Could not send interrupt coalescing parameters\n");
621 int qlcnic_config_hw_lro(struct qlcnic_adapter
*adapter
, int enable
)
623 struct qlcnic_nic_req req
;
627 if (!test_bit(__QLCNIC_FW_ATTACHED
, &adapter
->state
))
630 memset(&req
, 0, sizeof(struct qlcnic_nic_req
));
632 req
.qhdr
= cpu_to_le64(QLCNIC_HOST_REQUEST
<< 23);
634 word
= QLCNIC_H2C_OPCODE_CONFIG_HW_LRO
| ((u64
)adapter
->portnum
<< 16);
635 req
.req_hdr
= cpu_to_le64(word
);
637 req
.words
[0] = cpu_to_le64(enable
);
639 rv
= qlcnic_send_cmd_descs(adapter
, (struct cmd_desc_type0
*)&req
, 1);
641 dev_err(&adapter
->netdev
->dev
,
642 "Could not send configure hw lro request\n");
647 int qlcnic_config_bridged_mode(struct qlcnic_adapter
*adapter
, u32 enable
)
649 struct qlcnic_nic_req req
;
653 if (!!(adapter
->flags
& QLCNIC_BRIDGE_ENABLED
) == enable
)
656 memset(&req
, 0, sizeof(struct qlcnic_nic_req
));
658 req
.qhdr
= cpu_to_le64(QLCNIC_HOST_REQUEST
<< 23);
660 word
= QLCNIC_H2C_OPCODE_CONFIG_BRIDGING
|
661 ((u64
)adapter
->portnum
<< 16);
662 req
.req_hdr
= cpu_to_le64(word
);
664 req
.words
[0] = cpu_to_le64(enable
);
666 rv
= qlcnic_send_cmd_descs(adapter
, (struct cmd_desc_type0
*)&req
, 1);
668 dev_err(&adapter
->netdev
->dev
,
669 "Could not send configure bridge mode request\n");
671 adapter
->flags
^= QLCNIC_BRIDGE_ENABLED
;
677 #define RSS_HASHTYPE_IP_TCP 0x3
679 int qlcnic_config_rss(struct qlcnic_adapter
*adapter
, int enable
)
681 struct qlcnic_nic_req req
;
685 static const u64 key
[] = {
686 0xbeac01fa6a42b73bULL
, 0x8030f20c77cb2da3ULL
,
687 0xae7b30b4d0ca2bcbULL
, 0x43a38fb04167253dULL
,
688 0x255b0ec26d5a56daULL
691 memset(&req
, 0, sizeof(struct qlcnic_nic_req
));
692 req
.qhdr
= cpu_to_le64(QLCNIC_HOST_REQUEST
<< 23);
694 word
= QLCNIC_H2C_OPCODE_CONFIG_RSS
| ((u64
)adapter
->portnum
<< 16);
695 req
.req_hdr
= cpu_to_le64(word
);
699 * bits 3-0: hash_method
700 * 5-4: hash_type_ipv4
701 * 7-6: hash_type_ipv6
703 * 9: use indirection table
705 * 63-48: indirection table mask
707 word
= ((u64
)(RSS_HASHTYPE_IP_TCP
& 0x3) << 4) |
708 ((u64
)(RSS_HASHTYPE_IP_TCP
& 0x3) << 6) |
709 ((u64
)(enable
& 0x1) << 8) |
711 req
.words
[0] = cpu_to_le64(word
);
712 for (i
= 0; i
< 5; i
++)
713 req
.words
[i
+1] = cpu_to_le64(key
[i
]);
715 rv
= qlcnic_send_cmd_descs(adapter
, (struct cmd_desc_type0
*)&req
, 1);
717 dev_err(&adapter
->netdev
->dev
, "could not configure RSS\n");
722 int qlcnic_config_ipaddr(struct qlcnic_adapter
*adapter
, __be32 ip
, int cmd
)
724 struct qlcnic_nic_req req
;
725 struct qlcnic_ipaddr
*ipa
;
729 memset(&req
, 0, sizeof(struct qlcnic_nic_req
));
730 req
.qhdr
= cpu_to_le64(QLCNIC_HOST_REQUEST
<< 23);
732 word
= QLCNIC_H2C_OPCODE_CONFIG_IPADDR
| ((u64
)adapter
->portnum
<< 16);
733 req
.req_hdr
= cpu_to_le64(word
);
735 req
.words
[0] = cpu_to_le64(cmd
);
736 ipa
= (struct qlcnic_ipaddr
*)&req
.words
[1];
739 rv
= qlcnic_send_cmd_descs(adapter
, (struct cmd_desc_type0
*)&req
, 1);
741 dev_err(&adapter
->netdev
->dev
,
742 "could not notify %s IP 0x%x reuqest\n",
743 (cmd
== QLCNIC_IP_UP
) ? "Add" : "Remove", ip
);
748 int qlcnic_linkevent_request(struct qlcnic_adapter
*adapter
, int enable
)
750 struct qlcnic_nic_req req
;
754 memset(&req
, 0, sizeof(struct qlcnic_nic_req
));
755 req
.qhdr
= cpu_to_le64(QLCNIC_HOST_REQUEST
<< 23);
757 word
= QLCNIC_H2C_OPCODE_GET_LINKEVENT
| ((u64
)adapter
->portnum
<< 16);
758 req
.req_hdr
= cpu_to_le64(word
);
759 req
.words
[0] = cpu_to_le64(enable
| (enable
<< 8));
761 rv
= qlcnic_send_cmd_descs(adapter
, (struct cmd_desc_type0
*)&req
, 1);
763 dev_err(&adapter
->netdev
->dev
,
764 "could not configure link notification\n");
769 int qlcnic_send_lro_cleanup(struct qlcnic_adapter
*adapter
)
771 struct qlcnic_nic_req req
;
775 if (!test_bit(__QLCNIC_FW_ATTACHED
, &adapter
->state
))
778 memset(&req
, 0, sizeof(struct qlcnic_nic_req
));
779 req
.qhdr
= cpu_to_le64(QLCNIC_HOST_REQUEST
<< 23);
781 word
= QLCNIC_H2C_OPCODE_LRO_REQUEST
|
782 ((u64
)adapter
->portnum
<< 16) |
783 ((u64
)QLCNIC_LRO_REQUEST_CLEANUP
<< 56) ;
785 req
.req_hdr
= cpu_to_le64(word
);
787 rv
= qlcnic_send_cmd_descs(adapter
, (struct cmd_desc_type0
*)&req
, 1);
789 dev_err(&adapter
->netdev
->dev
,
790 "could not cleanup lro flows\n");
796 * qlcnic_change_mtu - Change the Maximum Transfer Unit
797 * @returns 0 on success, negative on failure
800 int qlcnic_change_mtu(struct net_device
*netdev
, int mtu
)
802 struct qlcnic_adapter
*adapter
= netdev_priv(netdev
);
805 if (mtu
< P3P_MIN_MTU
|| mtu
> P3P_MAX_MTU
) {
806 dev_err(&adapter
->netdev
->dev
, "%d bytes < mtu < %d bytes"
807 " not supported\n", P3P_MAX_MTU
, P3P_MIN_MTU
);
811 rc
= qlcnic_fw_cmd_set_mtu(adapter
, mtu
);
820 netdev_features_t
qlcnic_fix_features(struct net_device
*netdev
,
821 netdev_features_t features
)
823 struct qlcnic_adapter
*adapter
= netdev_priv(netdev
);
825 if ((adapter
->flags
& QLCNIC_ESWITCH_ENABLED
)) {
826 netdev_features_t changed
= features
^ netdev
->features
;
827 features
^= changed
& (NETIF_F_ALL_CSUM
| NETIF_F_RXCSUM
);
830 if (!(features
& NETIF_F_RXCSUM
))
831 features
&= ~NETIF_F_LRO
;
837 int qlcnic_set_features(struct net_device
*netdev
, netdev_features_t features
)
839 struct qlcnic_adapter
*adapter
= netdev_priv(netdev
);
840 netdev_features_t changed
= netdev
->features
^ features
;
841 int hw_lro
= (features
& NETIF_F_LRO
) ? QLCNIC_LRO_ENABLED
: 0;
843 if (!(changed
& NETIF_F_LRO
))
846 netdev
->features
= features
^ NETIF_F_LRO
;
848 if (qlcnic_config_hw_lro(adapter
, hw_lro
))
851 if ((hw_lro
== 0) && qlcnic_send_lro_cleanup(adapter
))
858 * Changes the CRB window to the specified window.
860 /* Returns < 0 if off is not valid,
861 * 1 if window access is needed. 'off' is set to offset from
862 * CRB space in 128M pci map
863 * 0 if no window access is needed. 'off' is set to 2M addr
864 * In: 'off' is offset from base in 128M pci map
867 qlcnic_pci_get_crb_addr_2M(struct qlcnic_adapter
*adapter
,
868 ulong off
, void __iomem
**addr
)
870 const struct crb_128M_2M_sub_block_map
*m
;
872 if ((off
>= QLCNIC_CRB_MAX
) || (off
< QLCNIC_PCI_CRBSPACE
))
875 off
-= QLCNIC_PCI_CRBSPACE
;
880 m
= &crb_128M_2M_map
[CRB_BLK(off
)].sub_block
[CRB_SUBBLK(off
)];
882 if (m
->valid
&& (m
->start_128M
<= off
) && (m
->end_128M
> off
)) {
883 *addr
= adapter
->ahw
->pci_base0
+ m
->start_2M
+
884 (off
- m
->start_128M
);
889 * Not in direct map, use crb window
891 *addr
= adapter
->ahw
->pci_base0
+ CRB_INDIRECT_2M
+ (off
& MASK(16));
896 * In: 'off' is offset from CRB space in 128M pci map
897 * Out: 'off' is 2M pci map addr
898 * side effect: lock crb window
901 qlcnic_pci_set_crbwindow_2M(struct qlcnic_adapter
*adapter
, ulong off
)
904 void __iomem
*addr
= adapter
->ahw
->pci_base0
+ CRB_WINDOW_2M
;
906 off
-= QLCNIC_PCI_CRBSPACE
;
908 window
= CRB_HI(off
);
910 dev_err(&adapter
->pdev
->dev
, "Invalid offset 0x%lx\n", off
);
914 writel(window
, addr
);
915 if (readl(addr
) != window
) {
916 if (printk_ratelimit())
917 dev_warn(&adapter
->pdev
->dev
,
918 "failed to set CRB window to %d off 0x%lx\n",
926 qlcnic_hw_write_wx_2M(struct qlcnic_adapter
*adapter
, ulong off
, u32 data
)
930 void __iomem
*addr
= NULL
;
932 rv
= qlcnic_pci_get_crb_addr_2M(adapter
, off
, &addr
);
940 /* indirect access */
941 write_lock_irqsave(&adapter
->ahw
->crb_lock
, flags
);
942 crb_win_lock(adapter
);
943 rv
= qlcnic_pci_set_crbwindow_2M(adapter
, off
);
946 crb_win_unlock(adapter
);
947 write_unlock_irqrestore(&adapter
->ahw
->crb_lock
, flags
);
951 dev_err(&adapter
->pdev
->dev
,
952 "%s: invalid offset: 0x%016lx\n", __func__
, off
);
958 qlcnic_hw_read_wx_2M(struct qlcnic_adapter
*adapter
, ulong off
)
963 void __iomem
*addr
= NULL
;
965 rv
= qlcnic_pci_get_crb_addr_2M(adapter
, off
, &addr
);
971 /* indirect access */
972 write_lock_irqsave(&adapter
->ahw
->crb_lock
, flags
);
973 crb_win_lock(adapter
);
974 if (!qlcnic_pci_set_crbwindow_2M(adapter
, off
))
976 crb_win_unlock(adapter
);
977 write_unlock_irqrestore(&adapter
->ahw
->crb_lock
, flags
);
981 dev_err(&adapter
->pdev
->dev
,
982 "%s: invalid offset: 0x%016lx\n", __func__
, off
);
989 qlcnic_get_ioaddr(struct qlcnic_adapter
*adapter
, u32 offset
)
991 void __iomem
*addr
= NULL
;
993 WARN_ON(qlcnic_pci_get_crb_addr_2M(adapter
, offset
, &addr
));
1000 qlcnic_pci_set_window_2M(struct qlcnic_adapter
*adapter
,
1001 u64 addr
, u32
*start
)
1005 window
= OCM_WIN_P3P(addr
);
1007 writel(window
, adapter
->ahw
->ocm_win_crb
);
1008 /* read back to flush */
1009 readl(adapter
->ahw
->ocm_win_crb
);
1011 *start
= QLCNIC_PCI_OCM0_2M
+ GET_MEM_OFFS_2M(addr
);
1016 qlcnic_pci_mem_access_direct(struct qlcnic_adapter
*adapter
, u64 off
,
1023 mutex_lock(&adapter
->ahw
->mem_lock
);
1025 ret
= qlcnic_pci_set_window_2M(adapter
, off
, &start
);
1029 addr
= adapter
->ahw
->pci_base0
+ start
;
1031 if (op
== 0) /* read */
1032 *data
= readq(addr
);
1034 writeq(*data
, addr
);
1037 mutex_unlock(&adapter
->ahw
->mem_lock
);
1043 qlcnic_pci_camqm_read_2M(struct qlcnic_adapter
*adapter
, u64 off
, u64
*data
)
1045 void __iomem
*addr
= adapter
->ahw
->pci_base0
+
1046 QLCNIC_PCI_CAMQM_2M_BASE
+ (off
- QLCNIC_PCI_CAMQM
);
1048 mutex_lock(&adapter
->ahw
->mem_lock
);
1049 *data
= readq(addr
);
1050 mutex_unlock(&adapter
->ahw
->mem_lock
);
1054 qlcnic_pci_camqm_write_2M(struct qlcnic_adapter
*adapter
, u64 off
, u64 data
)
1056 void __iomem
*addr
= adapter
->ahw
->pci_base0
+
1057 QLCNIC_PCI_CAMQM_2M_BASE
+ (off
- QLCNIC_PCI_CAMQM
);
1059 mutex_lock(&adapter
->ahw
->mem_lock
);
1061 mutex_unlock(&adapter
->ahw
->mem_lock
);
1064 #define MAX_CTL_CHECK 1000
1067 qlcnic_pci_mem_write_2M(struct qlcnic_adapter
*adapter
,
1072 void __iomem
*mem_crb
;
1074 /* Only 64-bit aligned access */
1078 /* P3 onward, test agent base for MIU and SIU is same */
1079 if (ADDR_IN_RANGE(off
, QLCNIC_ADDR_QDR_NET
,
1080 QLCNIC_ADDR_QDR_NET_MAX
)) {
1081 mem_crb
= qlcnic_get_ioaddr(adapter
,
1082 QLCNIC_CRB_QDR_NET
+MIU_TEST_AGT_BASE
);
1086 if (ADDR_IN_RANGE(off
, QLCNIC_ADDR_DDR_NET
, QLCNIC_ADDR_DDR_NET_MAX
)) {
1087 mem_crb
= qlcnic_get_ioaddr(adapter
,
1088 QLCNIC_CRB_DDR_NET
+MIU_TEST_AGT_BASE
);
1092 if (ADDR_IN_RANGE(off
, QLCNIC_ADDR_OCM0
, QLCNIC_ADDR_OCM0_MAX
))
1093 return qlcnic_pci_mem_access_direct(adapter
, off
, &data
, 1);
1100 mutex_lock(&adapter
->ahw
->mem_lock
);
1102 writel(off8
, (mem_crb
+ MIU_TEST_AGT_ADDR_LO
));
1103 writel(0, (mem_crb
+ MIU_TEST_AGT_ADDR_HI
));
1106 writel(TA_CTL_ENABLE
, (mem_crb
+ TEST_AGT_CTRL
));
1107 writel((TA_CTL_START
| TA_CTL_ENABLE
),
1108 (mem_crb
+ TEST_AGT_CTRL
));
1110 for (j
= 0; j
< MAX_CTL_CHECK
; j
++) {
1111 temp
= readl(mem_crb
+ TEST_AGT_CTRL
);
1112 if ((temp
& TA_CTL_BUSY
) == 0)
1116 if (j
>= MAX_CTL_CHECK
) {
1121 i
= (off
& 0xf) ? 0 : 2;
1122 writel(readl(mem_crb
+ MIU_TEST_AGT_RDDATA(i
)),
1123 mem_crb
+ MIU_TEST_AGT_WRDATA(i
));
1124 writel(readl(mem_crb
+ MIU_TEST_AGT_RDDATA(i
+1)),
1125 mem_crb
+ MIU_TEST_AGT_WRDATA(i
+1));
1126 i
= (off
& 0xf) ? 2 : 0;
1128 writel(data
& 0xffffffff,
1129 mem_crb
+ MIU_TEST_AGT_WRDATA(i
));
1130 writel((data
>> 32) & 0xffffffff,
1131 mem_crb
+ MIU_TEST_AGT_WRDATA(i
+1));
1133 writel((TA_CTL_ENABLE
| TA_CTL_WRITE
), (mem_crb
+ TEST_AGT_CTRL
));
1134 writel((TA_CTL_START
| TA_CTL_ENABLE
| TA_CTL_WRITE
),
1135 (mem_crb
+ TEST_AGT_CTRL
));
1137 for (j
= 0; j
< MAX_CTL_CHECK
; j
++) {
1138 temp
= readl(mem_crb
+ TEST_AGT_CTRL
);
1139 if ((temp
& TA_CTL_BUSY
) == 0)
1143 if (j
>= MAX_CTL_CHECK
) {
1144 if (printk_ratelimit())
1145 dev_err(&adapter
->pdev
->dev
,
1146 "failed to write through agent\n");
1152 mutex_unlock(&adapter
->ahw
->mem_lock
);
1158 qlcnic_pci_mem_read_2M(struct qlcnic_adapter
*adapter
,
1164 void __iomem
*mem_crb
;
1166 /* Only 64-bit aligned access */
1170 /* P3 onward, test agent base for MIU and SIU is same */
1171 if (ADDR_IN_RANGE(off
, QLCNIC_ADDR_QDR_NET
,
1172 QLCNIC_ADDR_QDR_NET_MAX
)) {
1173 mem_crb
= qlcnic_get_ioaddr(adapter
,
1174 QLCNIC_CRB_QDR_NET
+MIU_TEST_AGT_BASE
);
1178 if (ADDR_IN_RANGE(off
, QLCNIC_ADDR_DDR_NET
, QLCNIC_ADDR_DDR_NET_MAX
)) {
1179 mem_crb
= qlcnic_get_ioaddr(adapter
,
1180 QLCNIC_CRB_DDR_NET
+MIU_TEST_AGT_BASE
);
1184 if (ADDR_IN_RANGE(off
, QLCNIC_ADDR_OCM0
, QLCNIC_ADDR_OCM0_MAX
)) {
1185 return qlcnic_pci_mem_access_direct(adapter
,
1194 mutex_lock(&adapter
->ahw
->mem_lock
);
1196 writel(off8
, (mem_crb
+ MIU_TEST_AGT_ADDR_LO
));
1197 writel(0, (mem_crb
+ MIU_TEST_AGT_ADDR_HI
));
1198 writel(TA_CTL_ENABLE
, (mem_crb
+ TEST_AGT_CTRL
));
1199 writel((TA_CTL_START
| TA_CTL_ENABLE
), (mem_crb
+ TEST_AGT_CTRL
));
1201 for (j
= 0; j
< MAX_CTL_CHECK
; j
++) {
1202 temp
= readl(mem_crb
+ TEST_AGT_CTRL
);
1203 if ((temp
& TA_CTL_BUSY
) == 0)
1207 if (j
>= MAX_CTL_CHECK
) {
1208 if (printk_ratelimit())
1209 dev_err(&adapter
->pdev
->dev
,
1210 "failed to read through agent\n");
1213 off8
= MIU_TEST_AGT_RDDATA_LO
;
1215 off8
= MIU_TEST_AGT_RDDATA_UPPER_LO
;
1217 temp
= readl(mem_crb
+ off8
+ 4);
1218 val
= (u64
)temp
<< 32;
1219 val
|= readl(mem_crb
+ off8
);
1224 mutex_unlock(&adapter
->ahw
->mem_lock
);
1229 int qlcnic_get_board_info(struct qlcnic_adapter
*adapter
)
1231 int offset
, board_type
, magic
;
1232 struct pci_dev
*pdev
= adapter
->pdev
;
1234 offset
= QLCNIC_FW_MAGIC_OFFSET
;
1235 if (qlcnic_rom_fast_read(adapter
, offset
, &magic
))
1238 if (magic
!= QLCNIC_BDINFO_MAGIC
) {
1239 dev_err(&pdev
->dev
, "invalid board config, magic=%08x\n",
1244 offset
= QLCNIC_BRDTYPE_OFFSET
;
1245 if (qlcnic_rom_fast_read(adapter
, offset
, &board_type
))
1248 adapter
->ahw
->board_type
= board_type
;
1250 if (board_type
== QLCNIC_BRDTYPE_P3P_4_GB_MM
) {
1251 u32 gpio
= QLCRD32(adapter
, QLCNIC_ROMUSB_GLB_PAD_GPIO_I
);
1252 if ((gpio
& 0x8000) == 0)
1253 board_type
= QLCNIC_BRDTYPE_P3P_10G_TP
;
1256 switch (board_type
) {
1257 case QLCNIC_BRDTYPE_P3P_HMEZ
:
1258 case QLCNIC_BRDTYPE_P3P_XG_LOM
:
1259 case QLCNIC_BRDTYPE_P3P_10G_CX4
:
1260 case QLCNIC_BRDTYPE_P3P_10G_CX4_LP
:
1261 case QLCNIC_BRDTYPE_P3P_IMEZ
:
1262 case QLCNIC_BRDTYPE_P3P_10G_SFP_PLUS
:
1263 case QLCNIC_BRDTYPE_P3P_10G_SFP_CT
:
1264 case QLCNIC_BRDTYPE_P3P_10G_SFP_QT
:
1265 case QLCNIC_BRDTYPE_P3P_10G_XFP
:
1266 case QLCNIC_BRDTYPE_P3P_10000_BASE_T
:
1267 adapter
->ahw
->port_type
= QLCNIC_XGBE
;
1269 case QLCNIC_BRDTYPE_P3P_REF_QG
:
1270 case QLCNIC_BRDTYPE_P3P_4_GB
:
1271 case QLCNIC_BRDTYPE_P3P_4_GB_MM
:
1272 adapter
->ahw
->port_type
= QLCNIC_GBE
;
1274 case QLCNIC_BRDTYPE_P3P_10G_TP
:
1275 adapter
->ahw
->port_type
= (adapter
->portnum
< 2) ?
1276 QLCNIC_XGBE
: QLCNIC_GBE
;
1279 dev_err(&pdev
->dev
, "unknown board type %x\n", board_type
);
1280 adapter
->ahw
->port_type
= QLCNIC_XGBE
;
1288 qlcnic_wol_supported(struct qlcnic_adapter
*adapter
)
1292 wol_cfg
= QLCRD32(adapter
, QLCNIC_WOL_CONFIG_NV
);
1293 if (wol_cfg
& (1UL << adapter
->portnum
)) {
1294 wol_cfg
= QLCRD32(adapter
, QLCNIC_WOL_CONFIG
);
1295 if (wol_cfg
& (1 << adapter
->portnum
))
1302 int qlcnic_config_led(struct qlcnic_adapter
*adapter
, u32 state
, u32 rate
)
1304 struct qlcnic_nic_req req
;
1308 memset(&req
, 0, sizeof(struct qlcnic_nic_req
));
1309 req
.qhdr
= cpu_to_le64(QLCNIC_HOST_REQUEST
<< 23);
1311 word
= QLCNIC_H2C_OPCODE_CONFIG_LED
| ((u64
)adapter
->portnum
<< 16);
1312 req
.req_hdr
= cpu_to_le64(word
);
1314 req
.words
[0] = cpu_to_le64((u64
)rate
<< 32);
1315 req
.words
[1] = cpu_to_le64(state
);
1317 rv
= qlcnic_send_cmd_descs(adapter
, (struct cmd_desc_type0
*)&req
, 1);
1319 dev_err(&adapter
->pdev
->dev
, "LED configuration failed.\n");
1324 /* FW dump related functions */
1326 qlcnic_dump_crb(struct qlcnic_adapter
*adapter
, struct qlcnic_dump_entry
*entry
,
1331 struct __crb
*crb
= &entry
->region
.crb
;
1332 void __iomem
*base
= adapter
->ahw
->pci_base0
;
1336 for (i
= 0; i
< crb
->no_ops
; i
++) {
1337 QLCNIC_RD_DUMP_REG(addr
, base
, &data
);
1338 *buffer
++ = cpu_to_le32(addr
);
1339 *buffer
++ = cpu_to_le32(data
);
1340 addr
+= crb
->stride
;
1342 return crb
->no_ops
* 2 * sizeof(u32
);
1346 qlcnic_dump_ctrl(struct qlcnic_adapter
*adapter
,
1347 struct qlcnic_dump_entry
*entry
, u32
*buffer
)
1349 int i
, k
, timeout
= 0;
1350 void __iomem
*base
= adapter
->ahw
->pci_base0
;
1353 struct __ctrl
*ctr
= &entry
->region
.ctrl
;
1354 struct qlcnic_dump_template_hdr
*t_hdr
= adapter
->ahw
->fw_dump
.tmpl_hdr
;
1357 no_ops
= ctr
->no_ops
;
1359 for (i
= 0; i
< no_ops
; i
++) {
1362 for (k
= 0; k
< 8; k
++) {
1363 if (!(ctr
->opcode
& (1 << k
)))
1366 case QLCNIC_DUMP_WCRB
:
1367 QLCNIC_WR_DUMP_REG(addr
, base
, ctr
->val1
);
1369 case QLCNIC_DUMP_RWCRB
:
1370 QLCNIC_RD_DUMP_REG(addr
, base
, &data
);
1371 QLCNIC_WR_DUMP_REG(addr
, base
, data
);
1373 case QLCNIC_DUMP_ANDCRB
:
1374 QLCNIC_RD_DUMP_REG(addr
, base
, &data
);
1375 QLCNIC_WR_DUMP_REG(addr
, base
,
1376 (data
& ctr
->val2
));
1378 case QLCNIC_DUMP_ORCRB
:
1379 QLCNIC_RD_DUMP_REG(addr
, base
, &data
);
1380 QLCNIC_WR_DUMP_REG(addr
, base
,
1381 (data
| ctr
->val3
));
1383 case QLCNIC_DUMP_POLLCRB
:
1384 while (timeout
<= ctr
->timeout
) {
1385 QLCNIC_RD_DUMP_REG(addr
, base
, &data
);
1386 if ((data
& ctr
->val2
) == ctr
->val1
)
1391 if (timeout
> ctr
->timeout
) {
1392 dev_info(&adapter
->pdev
->dev
,
1393 "Timed out, aborting poll CRB\n");
1397 case QLCNIC_DUMP_RD_SAVE
:
1399 addr
= t_hdr
->saved_state
[ctr
->index_a
];
1400 QLCNIC_RD_DUMP_REG(addr
, base
, &data
);
1401 t_hdr
->saved_state
[ctr
->index_v
] = data
;
1403 case QLCNIC_DUMP_WRT_SAVED
:
1405 data
= t_hdr
->saved_state
[ctr
->index_v
];
1409 addr
= t_hdr
->saved_state
[ctr
->index_a
];
1410 QLCNIC_WR_DUMP_REG(addr
, base
, data
);
1412 case QLCNIC_DUMP_MOD_SAVE_ST
:
1413 data
= t_hdr
->saved_state
[ctr
->index_v
];
1414 data
<<= ctr
->shl_val
;
1415 data
>>= ctr
->shr_val
;
1420 t_hdr
->saved_state
[ctr
->index_v
] = data
;
1423 dev_info(&adapter
->pdev
->dev
,
1424 "Unknown opcode\n");
1428 addr
+= ctr
->stride
;
1434 qlcnic_dump_mux(struct qlcnic_adapter
*adapter
, struct qlcnic_dump_entry
*entry
,
1439 struct __mux
*mux
= &entry
->region
.mux
;
1440 void __iomem
*base
= adapter
->ahw
->pci_base0
;
1443 for (loop
= 0; loop
< mux
->no_ops
; loop
++) {
1444 QLCNIC_WR_DUMP_REG(mux
->addr
, base
, val
);
1445 QLCNIC_RD_DUMP_REG(mux
->read_addr
, base
, &data
);
1446 *buffer
++ = cpu_to_le32(val
);
1447 *buffer
++ = cpu_to_le32(data
);
1448 val
+= mux
->val_stride
;
1450 return 2 * mux
->no_ops
* sizeof(u32
);
1454 qlcnic_dump_que(struct qlcnic_adapter
*adapter
, struct qlcnic_dump_entry
*entry
,
1458 u32 cnt
, addr
, data
, que_id
= 0;
1459 void __iomem
*base
= adapter
->ahw
->pci_base0
;
1460 struct __queue
*que
= &entry
->region
.que
;
1462 addr
= que
->read_addr
;
1463 cnt
= que
->read_addr_cnt
;
1465 for (loop
= 0; loop
< que
->no_ops
; loop
++) {
1466 QLCNIC_WR_DUMP_REG(que
->sel_addr
, base
, que_id
);
1467 addr
= que
->read_addr
;
1468 for (i
= 0; i
< cnt
; i
++) {
1469 QLCNIC_RD_DUMP_REG(addr
, base
, &data
);
1470 *buffer
++ = cpu_to_le32(data
);
1471 addr
+= que
->read_addr_stride
;
1473 que_id
+= que
->stride
;
1475 return que
->no_ops
* cnt
* sizeof(u32
);
1479 qlcnic_dump_ocm(struct qlcnic_adapter
*adapter
, struct qlcnic_dump_entry
*entry
,
1485 struct __ocm
*ocm
= &entry
->region
.ocm
;
1487 addr
= adapter
->ahw
->pci_base0
+ ocm
->read_addr
;
1488 for (i
= 0; i
< ocm
->no_ops
; i
++) {
1490 *buffer
++ = cpu_to_le32(data
);
1491 addr
+= ocm
->read_addr_stride
;
1493 return ocm
->no_ops
* sizeof(u32
);
1497 qlcnic_read_rom(struct qlcnic_adapter
*adapter
, struct qlcnic_dump_entry
*entry
,
1501 u32 fl_addr
, size
, val
, lck_val
, addr
;
1502 struct __mem
*rom
= &entry
->region
.mem
;
1503 void __iomem
*base
= adapter
->ahw
->pci_base0
;
1505 fl_addr
= rom
->addr
;
1508 lck_val
= readl(base
+ QLCNIC_FLASH_SEM2_LK
);
1509 if (!lck_val
&& count
< MAX_CTL_CHECK
) {
1514 writel(adapter
->ahw
->pci_func
, (base
+ QLCNIC_FLASH_LOCK_ID
));
1515 for (i
= 0; i
< size
; i
++) {
1516 addr
= fl_addr
& 0xFFFF0000;
1517 QLCNIC_WR_DUMP_REG(FLASH_ROM_WINDOW
, base
, addr
);
1518 addr
= LSW(fl_addr
) + FLASH_ROM_DATA
;
1519 QLCNIC_RD_DUMP_REG(addr
, base
, &val
);
1521 *buffer
++ = cpu_to_le32(val
);
1523 readl(base
+ QLCNIC_FLASH_SEM2_ULK
);
1528 qlcnic_dump_l1_cache(struct qlcnic_adapter
*adapter
,
1529 struct qlcnic_dump_entry
*entry
, u32
*buffer
)
1532 u32 cnt
, val
, data
, addr
;
1533 void __iomem
*base
= adapter
->ahw
->pci_base0
;
1534 struct __cache
*l1
= &entry
->region
.cache
;
1536 val
= l1
->init_tag_val
;
1538 for (i
= 0; i
< l1
->no_ops
; i
++) {
1539 QLCNIC_WR_DUMP_REG(l1
->addr
, base
, val
);
1540 QLCNIC_WR_DUMP_REG(l1
->ctrl_addr
, base
, LSW(l1
->ctrl_val
));
1541 addr
= l1
->read_addr
;
1542 cnt
= l1
->read_addr_num
;
1544 QLCNIC_RD_DUMP_REG(addr
, base
, &data
);
1545 *buffer
++ = cpu_to_le32(data
);
1546 addr
+= l1
->read_addr_stride
;
1551 return l1
->no_ops
* l1
->read_addr_num
* sizeof(u32
);
1555 qlcnic_dump_l2_cache(struct qlcnic_adapter
*adapter
,
1556 struct qlcnic_dump_entry
*entry
, u32
*buffer
)
1559 u32 cnt
, val
, data
, addr
;
1560 u8 poll_mask
, poll_to
, time_out
= 0;
1561 void __iomem
*base
= adapter
->ahw
->pci_base0
;
1562 struct __cache
*l2
= &entry
->region
.cache
;
1564 val
= l2
->init_tag_val
;
1565 poll_mask
= LSB(MSW(l2
->ctrl_val
));
1566 poll_to
= MSB(MSW(l2
->ctrl_val
));
1568 for (i
= 0; i
< l2
->no_ops
; i
++) {
1569 QLCNIC_WR_DUMP_REG(l2
->addr
, base
, val
);
1570 if (LSW(l2
->ctrl_val
))
1571 QLCNIC_WR_DUMP_REG(l2
->ctrl_addr
, base
,
1576 QLCNIC_RD_DUMP_REG(l2
->ctrl_addr
, base
, &data
);
1577 if (!(data
& poll_mask
))
1581 } while (time_out
<= poll_to
);
1583 if (time_out
> poll_to
) {
1584 dev_err(&adapter
->pdev
->dev
,
1585 "Timeout exceeded in %s, aborting dump\n",
1590 addr
= l2
->read_addr
;
1591 cnt
= l2
->read_addr_num
;
1593 QLCNIC_RD_DUMP_REG(addr
, base
, &data
);
1594 *buffer
++ = cpu_to_le32(data
);
1595 addr
+= l2
->read_addr_stride
;
1600 return l2
->no_ops
* l2
->read_addr_num
* sizeof(u32
);
1604 qlcnic_read_memory(struct qlcnic_adapter
*adapter
,
1605 struct qlcnic_dump_entry
*entry
, u32
*buffer
)
1607 u32 addr
, data
, test
, ret
= 0;
1609 struct __mem
*mem
= &entry
->region
.mem
;
1610 void __iomem
*base
= adapter
->ahw
->pci_base0
;
1612 reg_read
= mem
->size
;
1614 /* check for data size of multiple of 16 and 16 byte alignment */
1615 if ((addr
& 0xf) || (reg_read
%16)) {
1616 dev_info(&adapter
->pdev
->dev
,
1617 "Unaligned memory addr:0x%x size:0x%x\n",
1622 mutex_lock(&adapter
->ahw
->mem_lock
);
1624 while (reg_read
!= 0) {
1625 QLCNIC_WR_DUMP_REG(MIU_TEST_ADDR_LO
, base
, addr
);
1626 QLCNIC_WR_DUMP_REG(MIU_TEST_ADDR_HI
, base
, 0);
1627 QLCNIC_WR_DUMP_REG(MIU_TEST_CTR
, base
,
1628 TA_CTL_ENABLE
| TA_CTL_START
);
1630 for (i
= 0; i
< MAX_CTL_CHECK
; i
++) {
1631 QLCNIC_RD_DUMP_REG(MIU_TEST_CTR
, base
, &test
);
1632 if (!(test
& TA_CTL_BUSY
))
1635 if (i
== MAX_CTL_CHECK
) {
1636 if (printk_ratelimit()) {
1637 dev_err(&adapter
->pdev
->dev
,
1638 "failed to read through agent\n");
1643 for (i
= 0; i
< 4; i
++) {
1644 QLCNIC_RD_DUMP_REG(MIU_TEST_READ_DATA
[i
], base
, &data
);
1645 *buffer
++ = cpu_to_le32(data
);
1652 mutex_unlock(&adapter
->ahw
->mem_lock
);
1657 qlcnic_dump_nop(struct qlcnic_adapter
*adapter
,
1658 struct qlcnic_dump_entry
*entry
, u32
*buffer
)
1660 entry
->hdr
.flags
|= QLCNIC_DUMP_SKIP
;
1664 struct qlcnic_dump_operations fw_dump_ops
[] = {
1665 { QLCNIC_DUMP_NOP
, qlcnic_dump_nop
},
1666 { QLCNIC_DUMP_READ_CRB
, qlcnic_dump_crb
},
1667 { QLCNIC_DUMP_READ_MUX
, qlcnic_dump_mux
},
1668 { QLCNIC_DUMP_QUEUE
, qlcnic_dump_que
},
1669 { QLCNIC_DUMP_BRD_CONFIG
, qlcnic_read_rom
},
1670 { QLCNIC_DUMP_READ_OCM
, qlcnic_dump_ocm
},
1671 { QLCNIC_DUMP_PEG_REG
, qlcnic_dump_ctrl
},
1672 { QLCNIC_DUMP_L1_DTAG
, qlcnic_dump_l1_cache
},
1673 { QLCNIC_DUMP_L1_ITAG
, qlcnic_dump_l1_cache
},
1674 { QLCNIC_DUMP_L1_DATA
, qlcnic_dump_l1_cache
},
1675 { QLCNIC_DUMP_L1_INST
, qlcnic_dump_l1_cache
},
1676 { QLCNIC_DUMP_L2_DTAG
, qlcnic_dump_l2_cache
},
1677 { QLCNIC_DUMP_L2_ITAG
, qlcnic_dump_l2_cache
},
1678 { QLCNIC_DUMP_L2_DATA
, qlcnic_dump_l2_cache
},
1679 { QLCNIC_DUMP_L2_INST
, qlcnic_dump_l2_cache
},
1680 { QLCNIC_DUMP_READ_ROM
, qlcnic_read_rom
},
1681 { QLCNIC_DUMP_READ_MEM
, qlcnic_read_memory
},
1682 { QLCNIC_DUMP_READ_CTRL
, qlcnic_dump_ctrl
},
1683 { QLCNIC_DUMP_TLHDR
, qlcnic_dump_nop
},
1684 { QLCNIC_DUMP_RDEND
, qlcnic_dump_nop
},
1687 /* Walk the template and collect dump for each entry in the dump template */
1689 qlcnic_valid_dump_entry(struct device
*dev
, struct qlcnic_dump_entry
*entry
,
1693 if (size
!= entry
->hdr
.cap_size
) {
1695 "Invalidate dump, Type:%d\tMask:%d\tSize:%dCap_size:%d\n",
1696 entry
->hdr
.type
, entry
->hdr
.mask
, size
, entry
->hdr
.cap_size
);
1697 dev_info(dev
, "Aborting further dump capture\n");
1703 int qlcnic_dump_fw(struct qlcnic_adapter
*adapter
)
1707 char *msg
[] = {mesg
, NULL
};
1708 int i
, k
, ops_cnt
, ops_index
, dump_size
= 0;
1709 u32 entry_offset
, dump
, no_entries
, buf_offset
= 0;
1710 struct qlcnic_dump_entry
*entry
;
1711 struct qlcnic_fw_dump
*fw_dump
= &adapter
->ahw
->fw_dump
;
1712 struct qlcnic_dump_template_hdr
*tmpl_hdr
= fw_dump
->tmpl_hdr
;
1715 dev_info(&adapter
->pdev
->dev
,
1716 "Previous dump not cleared, not capturing dump\n");
1719 /* Calculate the size for dump data area only */
1720 for (i
= 2, k
= 1; (i
& QLCNIC_DUMP_MASK_MAX
); i
<<= 1, k
++)
1721 if (i
& tmpl_hdr
->drv_cap_mask
)
1722 dump_size
+= tmpl_hdr
->cap_sizes
[k
];
1726 fw_dump
->data
= vzalloc(dump_size
);
1727 if (!fw_dump
->data
) {
1728 dev_info(&adapter
->pdev
->dev
,
1729 "Unable to allocate (%d KB) for fw dump\n",
1733 buffer
= fw_dump
->data
;
1734 fw_dump
->size
= dump_size
;
1735 no_entries
= tmpl_hdr
->num_entries
;
1736 ops_cnt
= ARRAY_SIZE(fw_dump_ops
);
1737 entry_offset
= tmpl_hdr
->offset
;
1738 tmpl_hdr
->sys_info
[0] = QLCNIC_DRIVER_VERSION
;
1739 tmpl_hdr
->sys_info
[1] = adapter
->fw_version
;
1741 for (i
= 0; i
< no_entries
; i
++) {
1742 entry
= (void *)tmpl_hdr
+ entry_offset
;
1743 if (!(entry
->hdr
.mask
& tmpl_hdr
->drv_cap_mask
)) {
1744 entry
->hdr
.flags
|= QLCNIC_DUMP_SKIP
;
1745 entry_offset
+= entry
->hdr
.offset
;
1748 /* Find the handler for this entry */
1750 while (ops_index
< ops_cnt
) {
1751 if (entry
->hdr
.type
== fw_dump_ops
[ops_index
].opcode
)
1755 if (ops_index
== ops_cnt
) {
1756 dev_info(&adapter
->pdev
->dev
,
1757 "Invalid entry type %d, exiting dump\n",
1761 /* Collect dump for this entry */
1762 dump
= fw_dump_ops
[ops_index
].handler(adapter
, entry
, buffer
);
1763 if (dump
&& !qlcnic_valid_dump_entry(&adapter
->pdev
->dev
, entry
,
1765 entry
->hdr
.flags
|= QLCNIC_DUMP_SKIP
;
1766 buf_offset
+= entry
->hdr
.cap_size
;
1767 entry_offset
+= entry
->hdr
.offset
;
1768 buffer
= fw_dump
->data
+ buf_offset
;
1770 if (dump_size
!= buf_offset
) {
1771 dev_info(&adapter
->pdev
->dev
,
1772 "Captured(%d) and expected size(%d) do not match\n",
1773 buf_offset
, dump_size
);
1777 snprintf(mesg
, sizeof(mesg
), "FW_DUMP=%s",
1778 adapter
->netdev
->name
);
1779 dev_info(&adapter
->pdev
->dev
, "Dump data, %d bytes captured\n",
1781 /* Send a udev event to notify availability of FW dump */
1782 kobject_uevent_env(&adapter
->pdev
->dev
.kobj
, KOBJ_CHANGE
, msg
);
1786 vfree(fw_dump
->data
);