2 * QLogic qlcnic NIC Driver
3 * Copyright (c) 2009-2010 QLogic Corporation
5 * See LICENSE.qlcnic for copyright and licensing details.
10 #include <linux/slab.h>
12 #include <linux/bitops.h>
14 #define MASK(n) ((1ULL<<(n))-1)
15 #define OCM_WIN_P3P(addr) (addr & 0xffc0000)
17 #define GET_MEM_OFFS_2M(addr) (addr & MASK(18))
19 #define CRB_BLK(off) ((off >> 20) & 0x3f)
20 #define CRB_SUBBLK(off) ((off >> 16) & 0xf)
21 #define CRB_WINDOW_2M (0x130060)
22 #define CRB_HI(off) ((crb_hub_agt[CRB_BLK(off)] << 20) | ((off) & 0xf0000))
23 #define CRB_INDIRECT_2M (0x1e0000UL)
27 static inline u64
readq(void __iomem
*addr
)
29 return readl(addr
) | (((u64
) readl(addr
+ 4)) << 32LL);
34 static inline void writeq(u64 val
, void __iomem
*addr
)
36 writel(((u32
) (val
)), (addr
));
37 writel(((u32
) (val
>> 32)), (addr
+ 4));
41 static const struct crb_128M_2M_block_map
42 crb_128M_2M_map
[64] __cacheline_aligned_in_smp
= {
43 {{{0, 0, 0, 0} } }, /* 0: PCI */
44 {{{1, 0x0100000, 0x0102000, 0x120000}, /* 1: PCIE */
45 {1, 0x0110000, 0x0120000, 0x130000},
46 {1, 0x0120000, 0x0122000, 0x124000},
47 {1, 0x0130000, 0x0132000, 0x126000},
48 {1, 0x0140000, 0x0142000, 0x128000},
49 {1, 0x0150000, 0x0152000, 0x12a000},
50 {1, 0x0160000, 0x0170000, 0x110000},
51 {1, 0x0170000, 0x0172000, 0x12e000},
52 {0, 0x0000000, 0x0000000, 0x000000},
53 {0, 0x0000000, 0x0000000, 0x000000},
54 {0, 0x0000000, 0x0000000, 0x000000},
55 {0, 0x0000000, 0x0000000, 0x000000},
56 {0, 0x0000000, 0x0000000, 0x000000},
57 {0, 0x0000000, 0x0000000, 0x000000},
58 {1, 0x01e0000, 0x01e0800, 0x122000},
59 {0, 0x0000000, 0x0000000, 0x000000} } },
60 {{{1, 0x0200000, 0x0210000, 0x180000} } },/* 2: MN */
61 {{{0, 0, 0, 0} } }, /* 3: */
62 {{{1, 0x0400000, 0x0401000, 0x169000} } },/* 4: P2NR1 */
63 {{{1, 0x0500000, 0x0510000, 0x140000} } },/* 5: SRE */
64 {{{1, 0x0600000, 0x0610000, 0x1c0000} } },/* 6: NIU */
65 {{{1, 0x0700000, 0x0704000, 0x1b8000} } },/* 7: QM */
66 {{{1, 0x0800000, 0x0802000, 0x170000}, /* 8: SQM0 */
67 {0, 0x0000000, 0x0000000, 0x000000},
68 {0, 0x0000000, 0x0000000, 0x000000},
69 {0, 0x0000000, 0x0000000, 0x000000},
70 {0, 0x0000000, 0x0000000, 0x000000},
71 {0, 0x0000000, 0x0000000, 0x000000},
72 {0, 0x0000000, 0x0000000, 0x000000},
73 {0, 0x0000000, 0x0000000, 0x000000},
74 {0, 0x0000000, 0x0000000, 0x000000},
75 {0, 0x0000000, 0x0000000, 0x000000},
76 {0, 0x0000000, 0x0000000, 0x000000},
77 {0, 0x0000000, 0x0000000, 0x000000},
78 {0, 0x0000000, 0x0000000, 0x000000},
79 {0, 0x0000000, 0x0000000, 0x000000},
80 {0, 0x0000000, 0x0000000, 0x000000},
81 {1, 0x08f0000, 0x08f2000, 0x172000} } },
82 {{{1, 0x0900000, 0x0902000, 0x174000}, /* 9: SQM1*/
83 {0, 0x0000000, 0x0000000, 0x000000},
84 {0, 0x0000000, 0x0000000, 0x000000},
85 {0, 0x0000000, 0x0000000, 0x000000},
86 {0, 0x0000000, 0x0000000, 0x000000},
87 {0, 0x0000000, 0x0000000, 0x000000},
88 {0, 0x0000000, 0x0000000, 0x000000},
89 {0, 0x0000000, 0x0000000, 0x000000},
90 {0, 0x0000000, 0x0000000, 0x000000},
91 {0, 0x0000000, 0x0000000, 0x000000},
92 {0, 0x0000000, 0x0000000, 0x000000},
93 {0, 0x0000000, 0x0000000, 0x000000},
94 {0, 0x0000000, 0x0000000, 0x000000},
95 {0, 0x0000000, 0x0000000, 0x000000},
96 {0, 0x0000000, 0x0000000, 0x000000},
97 {1, 0x09f0000, 0x09f2000, 0x176000} } },
98 {{{0, 0x0a00000, 0x0a02000, 0x178000}, /* 10: SQM2*/
99 {0, 0x0000000, 0x0000000, 0x000000},
100 {0, 0x0000000, 0x0000000, 0x000000},
101 {0, 0x0000000, 0x0000000, 0x000000},
102 {0, 0x0000000, 0x0000000, 0x000000},
103 {0, 0x0000000, 0x0000000, 0x000000},
104 {0, 0x0000000, 0x0000000, 0x000000},
105 {0, 0x0000000, 0x0000000, 0x000000},
106 {0, 0x0000000, 0x0000000, 0x000000},
107 {0, 0x0000000, 0x0000000, 0x000000},
108 {0, 0x0000000, 0x0000000, 0x000000},
109 {0, 0x0000000, 0x0000000, 0x000000},
110 {0, 0x0000000, 0x0000000, 0x000000},
111 {0, 0x0000000, 0x0000000, 0x000000},
112 {0, 0x0000000, 0x0000000, 0x000000},
113 {1, 0x0af0000, 0x0af2000, 0x17a000} } },
114 {{{0, 0x0b00000, 0x0b02000, 0x17c000}, /* 11: SQM3*/
115 {0, 0x0000000, 0x0000000, 0x000000},
116 {0, 0x0000000, 0x0000000, 0x000000},
117 {0, 0x0000000, 0x0000000, 0x000000},
118 {0, 0x0000000, 0x0000000, 0x000000},
119 {0, 0x0000000, 0x0000000, 0x000000},
120 {0, 0x0000000, 0x0000000, 0x000000},
121 {0, 0x0000000, 0x0000000, 0x000000},
122 {0, 0x0000000, 0x0000000, 0x000000},
123 {0, 0x0000000, 0x0000000, 0x000000},
124 {0, 0x0000000, 0x0000000, 0x000000},
125 {0, 0x0000000, 0x0000000, 0x000000},
126 {0, 0x0000000, 0x0000000, 0x000000},
127 {0, 0x0000000, 0x0000000, 0x000000},
128 {0, 0x0000000, 0x0000000, 0x000000},
129 {1, 0x0bf0000, 0x0bf2000, 0x17e000} } },
130 {{{1, 0x0c00000, 0x0c04000, 0x1d4000} } },/* 12: I2Q */
131 {{{1, 0x0d00000, 0x0d04000, 0x1a4000} } },/* 13: TMR */
132 {{{1, 0x0e00000, 0x0e04000, 0x1a0000} } },/* 14: ROMUSB */
133 {{{1, 0x0f00000, 0x0f01000, 0x164000} } },/* 15: PEG4 */
134 {{{0, 0x1000000, 0x1004000, 0x1a8000} } },/* 16: XDMA */
135 {{{1, 0x1100000, 0x1101000, 0x160000} } },/* 17: PEG0 */
136 {{{1, 0x1200000, 0x1201000, 0x161000} } },/* 18: PEG1 */
137 {{{1, 0x1300000, 0x1301000, 0x162000} } },/* 19: PEG2 */
138 {{{1, 0x1400000, 0x1401000, 0x163000} } },/* 20: PEG3 */
139 {{{1, 0x1500000, 0x1501000, 0x165000} } },/* 21: P2ND */
140 {{{1, 0x1600000, 0x1601000, 0x166000} } },/* 22: P2NI */
141 {{{0, 0, 0, 0} } }, /* 23: */
142 {{{0, 0, 0, 0} } }, /* 24: */
143 {{{0, 0, 0, 0} } }, /* 25: */
144 {{{0, 0, 0, 0} } }, /* 26: */
145 {{{0, 0, 0, 0} } }, /* 27: */
146 {{{0, 0, 0, 0} } }, /* 28: */
147 {{{1, 0x1d00000, 0x1d10000, 0x190000} } },/* 29: MS */
148 {{{1, 0x1e00000, 0x1e01000, 0x16a000} } },/* 30: P2NR2 */
149 {{{1, 0x1f00000, 0x1f10000, 0x150000} } },/* 31: EPG */
150 {{{0} } }, /* 32: PCI */
151 {{{1, 0x2100000, 0x2102000, 0x120000}, /* 33: PCIE */
152 {1, 0x2110000, 0x2120000, 0x130000},
153 {1, 0x2120000, 0x2122000, 0x124000},
154 {1, 0x2130000, 0x2132000, 0x126000},
155 {1, 0x2140000, 0x2142000, 0x128000},
156 {1, 0x2150000, 0x2152000, 0x12a000},
157 {1, 0x2160000, 0x2170000, 0x110000},
158 {1, 0x2170000, 0x2172000, 0x12e000},
159 {0, 0x0000000, 0x0000000, 0x000000},
160 {0, 0x0000000, 0x0000000, 0x000000},
161 {0, 0x0000000, 0x0000000, 0x000000},
162 {0, 0x0000000, 0x0000000, 0x000000},
163 {0, 0x0000000, 0x0000000, 0x000000},
164 {0, 0x0000000, 0x0000000, 0x000000},
165 {0, 0x0000000, 0x0000000, 0x000000},
166 {0, 0x0000000, 0x0000000, 0x000000} } },
167 {{{1, 0x2200000, 0x2204000, 0x1b0000} } },/* 34: CAM */
173 {{{1, 0x2800000, 0x2804000, 0x1a4000} } },/* 40: TMR */
174 {{{1, 0x2900000, 0x2901000, 0x16b000} } },/* 41: P2NR3 */
175 {{{1, 0x2a00000, 0x2a00400, 0x1ac400} } },/* 42: RPMX1 */
176 {{{1, 0x2b00000, 0x2b00400, 0x1ac800} } },/* 43: RPMX2 */
177 {{{1, 0x2c00000, 0x2c00400, 0x1acc00} } },/* 44: RPMX3 */
178 {{{1, 0x2d00000, 0x2d00400, 0x1ad000} } },/* 45: RPMX4 */
179 {{{1, 0x2e00000, 0x2e00400, 0x1ad400} } },/* 46: RPMX5 */
180 {{{1, 0x2f00000, 0x2f00400, 0x1ad800} } },/* 47: RPMX6 */
181 {{{1, 0x3000000, 0x3000400, 0x1adc00} } },/* 48: RPMX7 */
182 {{{0, 0x3100000, 0x3104000, 0x1a8000} } },/* 49: XDMA */
183 {{{1, 0x3200000, 0x3204000, 0x1d4000} } },/* 50: I2Q */
184 {{{1, 0x3300000, 0x3304000, 0x1a0000} } },/* 51: ROMUSB */
186 {{{1, 0x3500000, 0x3500400, 0x1ac000} } },/* 53: RPMX0 */
187 {{{1, 0x3600000, 0x3600400, 0x1ae000} } },/* 54: RPMX8 */
188 {{{1, 0x3700000, 0x3700400, 0x1ae400} } },/* 55: RPMX9 */
189 {{{1, 0x3800000, 0x3804000, 0x1d0000} } },/* 56: OCM0 */
190 {{{1, 0x3900000, 0x3904000, 0x1b4000} } },/* 57: CRYPTO */
191 {{{1, 0x3a00000, 0x3a04000, 0x1d8000} } },/* 58: SMB */
192 {{{0} } }, /* 59: I2C0 */
193 {{{0} } }, /* 60: I2C1 */
194 {{{1, 0x3d00000, 0x3d04000, 0x1d8000} } },/* 61: LPC */
195 {{{1, 0x3e00000, 0x3e01000, 0x167000} } },/* 62: P2NC */
196 {{{1, 0x3f00000, 0x3f01000, 0x168000} } } /* 63: P2NR0 */
200 * top 12 bits of crb internal address (hub, agent)
202 static const unsigned crb_hub_agt
[64] = {
204 QLCNIC_HW_CRB_HUB_AGT_ADR_PS
,
205 QLCNIC_HW_CRB_HUB_AGT_ADR_MN
,
206 QLCNIC_HW_CRB_HUB_AGT_ADR_MS
,
208 QLCNIC_HW_CRB_HUB_AGT_ADR_SRE
,
209 QLCNIC_HW_CRB_HUB_AGT_ADR_NIU
,
210 QLCNIC_HW_CRB_HUB_AGT_ADR_QMN
,
211 QLCNIC_HW_CRB_HUB_AGT_ADR_SQN0
,
212 QLCNIC_HW_CRB_HUB_AGT_ADR_SQN1
,
213 QLCNIC_HW_CRB_HUB_AGT_ADR_SQN2
,
214 QLCNIC_HW_CRB_HUB_AGT_ADR_SQN3
,
215 QLCNIC_HW_CRB_HUB_AGT_ADR_I2Q
,
216 QLCNIC_HW_CRB_HUB_AGT_ADR_TIMR
,
217 QLCNIC_HW_CRB_HUB_AGT_ADR_ROMUSB
,
218 QLCNIC_HW_CRB_HUB_AGT_ADR_PGN4
,
219 QLCNIC_HW_CRB_HUB_AGT_ADR_XDMA
,
220 QLCNIC_HW_CRB_HUB_AGT_ADR_PGN0
,
221 QLCNIC_HW_CRB_HUB_AGT_ADR_PGN1
,
222 QLCNIC_HW_CRB_HUB_AGT_ADR_PGN2
,
223 QLCNIC_HW_CRB_HUB_AGT_ADR_PGN3
,
224 QLCNIC_HW_CRB_HUB_AGT_ADR_PGND
,
225 QLCNIC_HW_CRB_HUB_AGT_ADR_PGNI
,
226 QLCNIC_HW_CRB_HUB_AGT_ADR_PGS0
,
227 QLCNIC_HW_CRB_HUB_AGT_ADR_PGS1
,
228 QLCNIC_HW_CRB_HUB_AGT_ADR_PGS2
,
229 QLCNIC_HW_CRB_HUB_AGT_ADR_PGS3
,
231 QLCNIC_HW_CRB_HUB_AGT_ADR_PGSI
,
232 QLCNIC_HW_CRB_HUB_AGT_ADR_SN
,
234 QLCNIC_HW_CRB_HUB_AGT_ADR_EG
,
236 QLCNIC_HW_CRB_HUB_AGT_ADR_PS
,
237 QLCNIC_HW_CRB_HUB_AGT_ADR_CAM
,
243 QLCNIC_HW_CRB_HUB_AGT_ADR_TIMR
,
245 QLCNIC_HW_CRB_HUB_AGT_ADR_RPMX1
,
246 QLCNIC_HW_CRB_HUB_AGT_ADR_RPMX2
,
247 QLCNIC_HW_CRB_HUB_AGT_ADR_RPMX3
,
248 QLCNIC_HW_CRB_HUB_AGT_ADR_RPMX4
,
249 QLCNIC_HW_CRB_HUB_AGT_ADR_RPMX5
,
250 QLCNIC_HW_CRB_HUB_AGT_ADR_RPMX6
,
251 QLCNIC_HW_CRB_HUB_AGT_ADR_RPMX7
,
252 QLCNIC_HW_CRB_HUB_AGT_ADR_XDMA
,
253 QLCNIC_HW_CRB_HUB_AGT_ADR_I2Q
,
254 QLCNIC_HW_CRB_HUB_AGT_ADR_ROMUSB
,
256 QLCNIC_HW_CRB_HUB_AGT_ADR_RPMX0
,
257 QLCNIC_HW_CRB_HUB_AGT_ADR_RPMX8
,
258 QLCNIC_HW_CRB_HUB_AGT_ADR_RPMX9
,
259 QLCNIC_HW_CRB_HUB_AGT_ADR_OCM0
,
261 QLCNIC_HW_CRB_HUB_AGT_ADR_SMB
,
262 QLCNIC_HW_CRB_HUB_AGT_ADR_I2C0
,
263 QLCNIC_HW_CRB_HUB_AGT_ADR_I2C1
,
265 QLCNIC_HW_CRB_HUB_AGT_ADR_PGNC
,
269 /* PCI Windowing for DDR regions. */
271 #define QLCNIC_PCIE_SEM_TIMEOUT 10000
274 qlcnic_pcie_sem_lock(struct qlcnic_adapter
*adapter
, int sem
, u32 id_reg
)
276 int done
= 0, timeout
= 0;
279 done
= QLCRD32(adapter
, QLCNIC_PCIE_REG(PCIE_SEM_LOCK(sem
)));
282 if (++timeout
>= QLCNIC_PCIE_SEM_TIMEOUT
) {
283 dev_err(&adapter
->pdev
->dev
,
284 "Failed to acquire sem=%d lock; holdby=%d\n",
285 sem
, id_reg
? QLCRD32(adapter
, id_reg
) : -1);
292 QLCWR32(adapter
, id_reg
, adapter
->portnum
);
298 qlcnic_pcie_sem_unlock(struct qlcnic_adapter
*adapter
, int sem
)
300 QLCRD32(adapter
, QLCNIC_PCIE_REG(PCIE_SEM_UNLOCK(sem
)));
304 qlcnic_send_cmd_descs(struct qlcnic_adapter
*adapter
,
305 struct cmd_desc_type0
*cmd_desc_arr
, int nr_desc
)
307 u32 i
, producer
, consumer
;
308 struct qlcnic_cmd_buffer
*pbuf
;
309 struct cmd_desc_type0
*cmd_desc
;
310 struct qlcnic_host_tx_ring
*tx_ring
;
314 if (!test_bit(__QLCNIC_FW_ATTACHED
, &adapter
->state
))
317 tx_ring
= adapter
->tx_ring
;
318 __netif_tx_lock_bh(tx_ring
->txq
);
320 producer
= tx_ring
->producer
;
321 consumer
= tx_ring
->sw_consumer
;
323 if (nr_desc
>= qlcnic_tx_avail(tx_ring
)) {
324 netif_tx_stop_queue(tx_ring
->txq
);
326 if (qlcnic_tx_avail(tx_ring
) > nr_desc
) {
327 if (qlcnic_tx_avail(tx_ring
) > TX_STOP_THRESH
)
328 netif_tx_wake_queue(tx_ring
->txq
);
330 adapter
->stats
.xmit_off
++;
331 __netif_tx_unlock_bh(tx_ring
->txq
);
337 cmd_desc
= &cmd_desc_arr
[i
];
339 pbuf
= &tx_ring
->cmd_buf_arr
[producer
];
341 pbuf
->frag_count
= 0;
343 memcpy(&tx_ring
->desc_head
[producer
],
344 &cmd_desc_arr
[i
], sizeof(struct cmd_desc_type0
));
346 producer
= get_next_index(producer
, tx_ring
->num_desc
);
349 } while (i
!= nr_desc
);
351 tx_ring
->producer
= producer
;
353 qlcnic_update_cmd_producer(adapter
, tx_ring
);
355 __netif_tx_unlock_bh(tx_ring
->txq
);
361 qlcnic_sre_macaddr_change(struct qlcnic_adapter
*adapter
, u8
*addr
,
362 __le16 vlan_id
, unsigned op
)
364 struct qlcnic_nic_req req
;
365 struct qlcnic_mac_req
*mac_req
;
366 struct qlcnic_vlan_req
*vlan_req
;
369 memset(&req
, 0, sizeof(struct qlcnic_nic_req
));
370 req
.qhdr
= cpu_to_le64(QLCNIC_REQUEST
<< 23);
372 word
= QLCNIC_MAC_EVENT
| ((u64
)adapter
->portnum
<< 16);
373 req
.req_hdr
= cpu_to_le64(word
);
375 mac_req
= (struct qlcnic_mac_req
*)&req
.words
[0];
377 memcpy(mac_req
->mac_addr
, addr
, 6);
379 vlan_req
= (struct qlcnic_vlan_req
*)&req
.words
[1];
380 vlan_req
->vlan_id
= vlan_id
;
382 return qlcnic_send_cmd_descs(adapter
, (struct cmd_desc_type0
*)&req
, 1);
385 static int qlcnic_nic_add_mac(struct qlcnic_adapter
*adapter
, const u8
*addr
)
387 struct list_head
*head
;
388 struct qlcnic_mac_list_s
*cur
;
390 /* look up if already exists */
391 list_for_each(head
, &adapter
->mac_list
) {
392 cur
= list_entry(head
, struct qlcnic_mac_list_s
, list
);
393 if (memcmp(addr
, cur
->mac_addr
, ETH_ALEN
) == 0)
397 cur
= kzalloc(sizeof(struct qlcnic_mac_list_s
), GFP_ATOMIC
);
399 dev_err(&adapter
->netdev
->dev
,
400 "failed to add mac address filter\n");
403 memcpy(cur
->mac_addr
, addr
, ETH_ALEN
);
405 if (qlcnic_sre_macaddr_change(adapter
,
406 cur
->mac_addr
, 0, QLCNIC_MAC_ADD
)) {
411 list_add_tail(&cur
->list
, &adapter
->mac_list
);
415 void qlcnic_set_multi(struct net_device
*netdev
)
417 struct qlcnic_adapter
*adapter
= netdev_priv(netdev
);
418 struct netdev_hw_addr
*ha
;
419 static const u8 bcast_addr
[ETH_ALEN
] = {
420 0xff, 0xff, 0xff, 0xff, 0xff, 0xff
422 u32 mode
= VPORT_MISS_MODE_DROP
;
424 if (!test_bit(__QLCNIC_FW_ATTACHED
, &adapter
->state
))
427 qlcnic_nic_add_mac(adapter
, adapter
->mac_addr
);
428 qlcnic_nic_add_mac(adapter
, bcast_addr
);
430 if (netdev
->flags
& IFF_PROMISC
) {
431 if (!(adapter
->flags
& QLCNIC_PROMISC_DISABLED
))
432 mode
= VPORT_MISS_MODE_ACCEPT_ALL
;
436 if ((netdev
->flags
& IFF_ALLMULTI
) ||
437 (netdev_mc_count(netdev
) > adapter
->max_mc_count
)) {
438 mode
= VPORT_MISS_MODE_ACCEPT_MULTI
;
442 if (!netdev_mc_empty(netdev
)) {
443 netdev_for_each_mc_addr(ha
, netdev
) {
444 qlcnic_nic_add_mac(adapter
, ha
->addr
);
449 if (mode
== VPORT_MISS_MODE_ACCEPT_ALL
) {
450 qlcnic_alloc_lb_filters_mem(adapter
);
451 adapter
->mac_learn
= 1;
453 adapter
->mac_learn
= 0;
456 qlcnic_nic_set_promisc(adapter
, mode
);
459 int qlcnic_nic_set_promisc(struct qlcnic_adapter
*adapter
, u32 mode
)
461 struct qlcnic_nic_req req
;
464 memset(&req
, 0, sizeof(struct qlcnic_nic_req
));
466 req
.qhdr
= cpu_to_le64(QLCNIC_HOST_REQUEST
<< 23);
468 word
= QLCNIC_H2C_OPCODE_SET_MAC_RECEIVE_MODE
|
469 ((u64
)adapter
->portnum
<< 16);
470 req
.req_hdr
= cpu_to_le64(word
);
472 req
.words
[0] = cpu_to_le64(mode
);
474 return qlcnic_send_cmd_descs(adapter
,
475 (struct cmd_desc_type0
*)&req
, 1);
478 void qlcnic_free_mac_list(struct qlcnic_adapter
*adapter
)
480 struct qlcnic_mac_list_s
*cur
;
481 struct list_head
*head
= &adapter
->mac_list
;
483 while (!list_empty(head
)) {
484 cur
= list_entry(head
->next
, struct qlcnic_mac_list_s
, list
);
485 qlcnic_sre_macaddr_change(adapter
,
486 cur
->mac_addr
, 0, QLCNIC_MAC_DEL
);
487 list_del(&cur
->list
);
492 void qlcnic_prune_lb_filters(struct qlcnic_adapter
*adapter
)
494 struct qlcnic_filter
*tmp_fil
;
495 struct hlist_node
*tmp_hnode
, *n
;
496 struct hlist_head
*head
;
499 for (i
= 0; i
< adapter
->fhash
.fmax
; i
++) {
500 head
= &(adapter
->fhash
.fhead
[i
]);
502 hlist_for_each_entry_safe(tmp_fil
, tmp_hnode
, n
, head
, fnode
)
505 (QLCNIC_FILTER_AGE
* HZ
+ tmp_fil
->ftime
)) {
506 qlcnic_sre_macaddr_change(adapter
,
507 tmp_fil
->faddr
, tmp_fil
->vlan_id
,
508 tmp_fil
->vlan_id
? QLCNIC_MAC_VLAN_DEL
:
510 spin_lock_bh(&adapter
->mac_learn_lock
);
511 adapter
->fhash
.fnum
--;
512 hlist_del(&tmp_fil
->fnode
);
513 spin_unlock_bh(&adapter
->mac_learn_lock
);
520 void qlcnic_delete_lb_filters(struct qlcnic_adapter
*adapter
)
522 struct qlcnic_filter
*tmp_fil
;
523 struct hlist_node
*tmp_hnode
, *n
;
524 struct hlist_head
*head
;
527 for (i
= 0; i
< adapter
->fhash
.fmax
; i
++) {
528 head
= &(adapter
->fhash
.fhead
[i
]);
530 hlist_for_each_entry_safe(tmp_fil
, tmp_hnode
, n
, head
, fnode
) {
531 qlcnic_sre_macaddr_change(adapter
, tmp_fil
->faddr
,
532 tmp_fil
->vlan_id
, tmp_fil
->vlan_id
?
533 QLCNIC_MAC_VLAN_DEL
: QLCNIC_MAC_DEL
);
534 spin_lock_bh(&adapter
->mac_learn_lock
);
535 adapter
->fhash
.fnum
--;
536 hlist_del(&tmp_fil
->fnode
);
537 spin_unlock_bh(&adapter
->mac_learn_lock
);
543 int qlcnic_set_fw_loopback(struct qlcnic_adapter
*adapter
, u8 flag
)
545 struct qlcnic_nic_req req
;
548 memset(&req
, 0, sizeof(struct qlcnic_nic_req
));
550 req
.qhdr
= cpu_to_le64(QLCNIC_HOST_REQUEST
<< 23);
551 req
.req_hdr
= cpu_to_le64(QLCNIC_H2C_OPCODE_CONFIG_LOOPBACK
|
552 ((u64
) adapter
->portnum
<< 16) | ((u64
) 0x1 << 32));
554 req
.words
[0] = cpu_to_le64(flag
);
556 rv
= qlcnic_send_cmd_descs(adapter
, (struct cmd_desc_type0
*)&req
, 1);
558 dev_err(&adapter
->pdev
->dev
, "%sting loopback mode failed\n",
559 flag
? "Set" : "Reset");
563 int qlcnic_set_lb_mode(struct qlcnic_adapter
*adapter
, u8 mode
)
565 if (qlcnic_set_fw_loopback(adapter
, mode
))
568 if (qlcnic_nic_set_promisc(adapter
, VPORT_MISS_MODE_ACCEPT_ALL
)) {
569 qlcnic_set_fw_loopback(adapter
, mode
);
577 void qlcnic_clear_lb_mode(struct qlcnic_adapter
*adapter
)
579 int mode
= VPORT_MISS_MODE_DROP
;
580 struct net_device
*netdev
= adapter
->netdev
;
582 qlcnic_set_fw_loopback(adapter
, 0);
584 if (netdev
->flags
& IFF_PROMISC
)
585 mode
= VPORT_MISS_MODE_ACCEPT_ALL
;
586 else if (netdev
->flags
& IFF_ALLMULTI
)
587 mode
= VPORT_MISS_MODE_ACCEPT_MULTI
;
589 qlcnic_nic_set_promisc(adapter
, mode
);
594 * Send the interrupt coalescing parameter set by ethtool to the card.
596 int qlcnic_config_intr_coalesce(struct qlcnic_adapter
*adapter
)
598 struct qlcnic_nic_req req
;
601 memset(&req
, 0, sizeof(struct qlcnic_nic_req
));
603 req
.qhdr
= cpu_to_le64(QLCNIC_HOST_REQUEST
<< 23);
605 req
.req_hdr
= cpu_to_le64(QLCNIC_CONFIG_INTR_COALESCE
|
606 ((u64
) adapter
->portnum
<< 16));
608 req
.words
[0] = cpu_to_le64(((u64
) adapter
->ahw
->coal
.flag
) << 32);
609 req
.words
[2] = cpu_to_le64(adapter
->ahw
->coal
.rx_packets
|
610 ((u64
) adapter
->ahw
->coal
.rx_time_us
) << 16);
611 req
.words
[5] = cpu_to_le64(adapter
->ahw
->coal
.timer_out
|
612 ((u64
) adapter
->ahw
->coal
.type
) << 32 |
613 ((u64
) adapter
->ahw
->coal
.sts_ring_mask
) << 40);
614 rv
= qlcnic_send_cmd_descs(adapter
, (struct cmd_desc_type0
*)&req
, 1);
616 dev_err(&adapter
->netdev
->dev
,
617 "Could not send interrupt coalescing parameters\n");
621 int qlcnic_config_hw_lro(struct qlcnic_adapter
*adapter
, int enable
)
623 struct qlcnic_nic_req req
;
627 if (!test_bit(__QLCNIC_FW_ATTACHED
, &adapter
->state
))
630 memset(&req
, 0, sizeof(struct qlcnic_nic_req
));
632 req
.qhdr
= cpu_to_le64(QLCNIC_HOST_REQUEST
<< 23);
634 word
= QLCNIC_H2C_OPCODE_CONFIG_HW_LRO
| ((u64
)adapter
->portnum
<< 16);
635 req
.req_hdr
= cpu_to_le64(word
);
637 req
.words
[0] = cpu_to_le64(enable
);
639 rv
= qlcnic_send_cmd_descs(adapter
, (struct cmd_desc_type0
*)&req
, 1);
641 dev_err(&adapter
->netdev
->dev
,
642 "Could not send configure hw lro request\n");
647 int qlcnic_config_bridged_mode(struct qlcnic_adapter
*adapter
, u32 enable
)
649 struct qlcnic_nic_req req
;
653 if (!!(adapter
->flags
& QLCNIC_BRIDGE_ENABLED
) == enable
)
656 memset(&req
, 0, sizeof(struct qlcnic_nic_req
));
658 req
.qhdr
= cpu_to_le64(QLCNIC_HOST_REQUEST
<< 23);
660 word
= QLCNIC_H2C_OPCODE_CONFIG_BRIDGING
|
661 ((u64
)adapter
->portnum
<< 16);
662 req
.req_hdr
= cpu_to_le64(word
);
664 req
.words
[0] = cpu_to_le64(enable
);
666 rv
= qlcnic_send_cmd_descs(adapter
, (struct cmd_desc_type0
*)&req
, 1);
668 dev_err(&adapter
->netdev
->dev
,
669 "Could not send configure bridge mode request\n");
671 adapter
->flags
^= QLCNIC_BRIDGE_ENABLED
;
677 #define RSS_HASHTYPE_IP_TCP 0x3
679 int qlcnic_config_rss(struct qlcnic_adapter
*adapter
, int enable
)
681 struct qlcnic_nic_req req
;
685 static const u64 key
[] = {
686 0xbeac01fa6a42b73bULL
, 0x8030f20c77cb2da3ULL
,
687 0xae7b30b4d0ca2bcbULL
, 0x43a38fb04167253dULL
,
688 0x255b0ec26d5a56daULL
691 memset(&req
, 0, sizeof(struct qlcnic_nic_req
));
692 req
.qhdr
= cpu_to_le64(QLCNIC_HOST_REQUEST
<< 23);
694 word
= QLCNIC_H2C_OPCODE_CONFIG_RSS
| ((u64
)adapter
->portnum
<< 16);
695 req
.req_hdr
= cpu_to_le64(word
);
699 * bits 3-0: hash_method
700 * 5-4: hash_type_ipv4
701 * 7-6: hash_type_ipv6
703 * 9: use indirection table
705 * 63-48: indirection table mask
707 word
= ((u64
)(RSS_HASHTYPE_IP_TCP
& 0x3) << 4) |
708 ((u64
)(RSS_HASHTYPE_IP_TCP
& 0x3) << 6) |
709 ((u64
)(enable
& 0x1) << 8) |
711 req
.words
[0] = cpu_to_le64(word
);
712 for (i
= 0; i
< 5; i
++)
713 req
.words
[i
+1] = cpu_to_le64(key
[i
]);
715 rv
= qlcnic_send_cmd_descs(adapter
, (struct cmd_desc_type0
*)&req
, 1);
717 dev_err(&adapter
->netdev
->dev
, "could not configure RSS\n");
722 int qlcnic_config_ipaddr(struct qlcnic_adapter
*adapter
, __be32 ip
, int cmd
)
724 struct qlcnic_nic_req req
;
725 struct qlcnic_ipaddr
*ipa
;
729 memset(&req
, 0, sizeof(struct qlcnic_nic_req
));
730 req
.qhdr
= cpu_to_le64(QLCNIC_HOST_REQUEST
<< 23);
732 word
= QLCNIC_H2C_OPCODE_CONFIG_IPADDR
| ((u64
)adapter
->portnum
<< 16);
733 req
.req_hdr
= cpu_to_le64(word
);
735 req
.words
[0] = cpu_to_le64(cmd
);
736 ipa
= (struct qlcnic_ipaddr
*)&req
.words
[1];
739 rv
= qlcnic_send_cmd_descs(adapter
, (struct cmd_desc_type0
*)&req
, 1);
741 dev_err(&adapter
->netdev
->dev
,
742 "could not notify %s IP 0x%x reuqest\n",
743 (cmd
== QLCNIC_IP_UP
) ? "Add" : "Remove", ip
);
748 int qlcnic_linkevent_request(struct qlcnic_adapter
*adapter
, int enable
)
750 struct qlcnic_nic_req req
;
754 memset(&req
, 0, sizeof(struct qlcnic_nic_req
));
755 req
.qhdr
= cpu_to_le64(QLCNIC_HOST_REQUEST
<< 23);
757 word
= QLCNIC_H2C_OPCODE_GET_LINKEVENT
| ((u64
)adapter
->portnum
<< 16);
758 req
.req_hdr
= cpu_to_le64(word
);
759 req
.words
[0] = cpu_to_le64(enable
| (enable
<< 8));
761 rv
= qlcnic_send_cmd_descs(adapter
, (struct cmd_desc_type0
*)&req
, 1);
763 dev_err(&adapter
->netdev
->dev
,
764 "could not configure link notification\n");
769 int qlcnic_send_lro_cleanup(struct qlcnic_adapter
*adapter
)
771 struct qlcnic_nic_req req
;
775 if (!test_bit(__QLCNIC_FW_ATTACHED
, &adapter
->state
))
778 memset(&req
, 0, sizeof(struct qlcnic_nic_req
));
779 req
.qhdr
= cpu_to_le64(QLCNIC_HOST_REQUEST
<< 23);
781 word
= QLCNIC_H2C_OPCODE_LRO_REQUEST
|
782 ((u64
)adapter
->portnum
<< 16) |
783 ((u64
)QLCNIC_LRO_REQUEST_CLEANUP
<< 56) ;
785 req
.req_hdr
= cpu_to_le64(word
);
787 rv
= qlcnic_send_cmd_descs(adapter
, (struct cmd_desc_type0
*)&req
, 1);
789 dev_err(&adapter
->netdev
->dev
,
790 "could not cleanup lro flows\n");
796 * qlcnic_change_mtu - Change the Maximum Transfer Unit
797 * @returns 0 on success, negative on failure
800 int qlcnic_change_mtu(struct net_device
*netdev
, int mtu
)
802 struct qlcnic_adapter
*adapter
= netdev_priv(netdev
);
805 if (mtu
< P3P_MIN_MTU
|| mtu
> P3P_MAX_MTU
) {
806 dev_err(&adapter
->netdev
->dev
, "%d bytes < mtu < %d bytes"
807 " not supported\n", P3P_MAX_MTU
, P3P_MIN_MTU
);
811 rc
= qlcnic_fw_cmd_set_mtu(adapter
, mtu
);
820 u32
qlcnic_fix_features(struct net_device
*netdev
, u32 features
)
822 struct qlcnic_adapter
*adapter
= netdev_priv(netdev
);
824 if ((adapter
->flags
& QLCNIC_ESWITCH_ENABLED
)) {
825 u32 changed
= features
^ netdev
->features
;
826 features
^= changed
& (NETIF_F_ALL_CSUM
| NETIF_F_RXCSUM
);
829 if (!(features
& NETIF_F_RXCSUM
))
830 features
&= ~NETIF_F_LRO
;
836 int qlcnic_set_features(struct net_device
*netdev
, u32 features
)
838 struct qlcnic_adapter
*adapter
= netdev_priv(netdev
);
839 u32 changed
= netdev
->features
^ features
;
840 int hw_lro
= (features
& NETIF_F_LRO
) ? QLCNIC_LRO_ENABLED
: 0;
842 if (!(changed
& NETIF_F_LRO
))
845 netdev
->features
= features
^ NETIF_F_LRO
;
847 if (qlcnic_config_hw_lro(adapter
, hw_lro
))
850 if ((hw_lro
== 0) && qlcnic_send_lro_cleanup(adapter
))
857 * Changes the CRB window to the specified window.
859 /* Returns < 0 if off is not valid,
860 * 1 if window access is needed. 'off' is set to offset from
861 * CRB space in 128M pci map
862 * 0 if no window access is needed. 'off' is set to 2M addr
863 * In: 'off' is offset from base in 128M pci map
866 qlcnic_pci_get_crb_addr_2M(struct qlcnic_adapter
*adapter
,
867 ulong off
, void __iomem
**addr
)
869 const struct crb_128M_2M_sub_block_map
*m
;
871 if ((off
>= QLCNIC_CRB_MAX
) || (off
< QLCNIC_PCI_CRBSPACE
))
874 off
-= QLCNIC_PCI_CRBSPACE
;
879 m
= &crb_128M_2M_map
[CRB_BLK(off
)].sub_block
[CRB_SUBBLK(off
)];
881 if (m
->valid
&& (m
->start_128M
<= off
) && (m
->end_128M
> off
)) {
882 *addr
= adapter
->ahw
->pci_base0
+ m
->start_2M
+
883 (off
- m
->start_128M
);
888 * Not in direct map, use crb window
890 *addr
= adapter
->ahw
->pci_base0
+ CRB_INDIRECT_2M
+ (off
& MASK(16));
895 * In: 'off' is offset from CRB space in 128M pci map
896 * Out: 'off' is 2M pci map addr
897 * side effect: lock crb window
900 qlcnic_pci_set_crbwindow_2M(struct qlcnic_adapter
*adapter
, ulong off
)
903 void __iomem
*addr
= adapter
->ahw
->pci_base0
+ CRB_WINDOW_2M
;
905 off
-= QLCNIC_PCI_CRBSPACE
;
907 window
= CRB_HI(off
);
909 dev_err(&adapter
->pdev
->dev
, "Invalid offset 0x%lx\n", off
);
913 writel(window
, addr
);
914 if (readl(addr
) != window
) {
915 if (printk_ratelimit())
916 dev_warn(&adapter
->pdev
->dev
,
917 "failed to set CRB window to %d off 0x%lx\n",
925 qlcnic_hw_write_wx_2M(struct qlcnic_adapter
*adapter
, ulong off
, u32 data
)
929 void __iomem
*addr
= NULL
;
931 rv
= qlcnic_pci_get_crb_addr_2M(adapter
, off
, &addr
);
939 /* indirect access */
940 write_lock_irqsave(&adapter
->ahw
->crb_lock
, flags
);
941 crb_win_lock(adapter
);
942 rv
= qlcnic_pci_set_crbwindow_2M(adapter
, off
);
945 crb_win_unlock(adapter
);
946 write_unlock_irqrestore(&adapter
->ahw
->crb_lock
, flags
);
950 dev_err(&adapter
->pdev
->dev
,
951 "%s: invalid offset: 0x%016lx\n", __func__
, off
);
957 qlcnic_hw_read_wx_2M(struct qlcnic_adapter
*adapter
, ulong off
)
962 void __iomem
*addr
= NULL
;
964 rv
= qlcnic_pci_get_crb_addr_2M(adapter
, off
, &addr
);
970 /* indirect access */
971 write_lock_irqsave(&adapter
->ahw
->crb_lock
, flags
);
972 crb_win_lock(adapter
);
973 if (!qlcnic_pci_set_crbwindow_2M(adapter
, off
))
975 crb_win_unlock(adapter
);
976 write_unlock_irqrestore(&adapter
->ahw
->crb_lock
, flags
);
980 dev_err(&adapter
->pdev
->dev
,
981 "%s: invalid offset: 0x%016lx\n", __func__
, off
);
988 qlcnic_get_ioaddr(struct qlcnic_adapter
*adapter
, u32 offset
)
990 void __iomem
*addr
= NULL
;
992 WARN_ON(qlcnic_pci_get_crb_addr_2M(adapter
, offset
, &addr
));
999 qlcnic_pci_set_window_2M(struct qlcnic_adapter
*adapter
,
1000 u64 addr
, u32
*start
)
1004 window
= OCM_WIN_P3P(addr
);
1006 writel(window
, adapter
->ahw
->ocm_win_crb
);
1007 /* read back to flush */
1008 readl(adapter
->ahw
->ocm_win_crb
);
1010 *start
= QLCNIC_PCI_OCM0_2M
+ GET_MEM_OFFS_2M(addr
);
1015 qlcnic_pci_mem_access_direct(struct qlcnic_adapter
*adapter
, u64 off
,
1022 mutex_lock(&adapter
->ahw
->mem_lock
);
1024 ret
= qlcnic_pci_set_window_2M(adapter
, off
, &start
);
1028 addr
= adapter
->ahw
->pci_base0
+ start
;
1030 if (op
== 0) /* read */
1031 *data
= readq(addr
);
1033 writeq(*data
, addr
);
1036 mutex_unlock(&adapter
->ahw
->mem_lock
);
1042 qlcnic_pci_camqm_read_2M(struct qlcnic_adapter
*adapter
, u64 off
, u64
*data
)
1044 void __iomem
*addr
= adapter
->ahw
->pci_base0
+
1045 QLCNIC_PCI_CAMQM_2M_BASE
+ (off
- QLCNIC_PCI_CAMQM
);
1047 mutex_lock(&adapter
->ahw
->mem_lock
);
1048 *data
= readq(addr
);
1049 mutex_unlock(&adapter
->ahw
->mem_lock
);
1053 qlcnic_pci_camqm_write_2M(struct qlcnic_adapter
*adapter
, u64 off
, u64 data
)
1055 void __iomem
*addr
= adapter
->ahw
->pci_base0
+
1056 QLCNIC_PCI_CAMQM_2M_BASE
+ (off
- QLCNIC_PCI_CAMQM
);
1058 mutex_lock(&adapter
->ahw
->mem_lock
);
1060 mutex_unlock(&adapter
->ahw
->mem_lock
);
1063 #define MAX_CTL_CHECK 1000
1066 qlcnic_pci_mem_write_2M(struct qlcnic_adapter
*adapter
,
1071 void __iomem
*mem_crb
;
1073 /* Only 64-bit aligned access */
1077 /* P3 onward, test agent base for MIU and SIU is same */
1078 if (ADDR_IN_RANGE(off
, QLCNIC_ADDR_QDR_NET
,
1079 QLCNIC_ADDR_QDR_NET_MAX
)) {
1080 mem_crb
= qlcnic_get_ioaddr(adapter
,
1081 QLCNIC_CRB_QDR_NET
+MIU_TEST_AGT_BASE
);
1085 if (ADDR_IN_RANGE(off
, QLCNIC_ADDR_DDR_NET
, QLCNIC_ADDR_DDR_NET_MAX
)) {
1086 mem_crb
= qlcnic_get_ioaddr(adapter
,
1087 QLCNIC_CRB_DDR_NET
+MIU_TEST_AGT_BASE
);
1091 if (ADDR_IN_RANGE(off
, QLCNIC_ADDR_OCM0
, QLCNIC_ADDR_OCM0_MAX
))
1092 return qlcnic_pci_mem_access_direct(adapter
, off
, &data
, 1);
1099 mutex_lock(&adapter
->ahw
->mem_lock
);
1101 writel(off8
, (mem_crb
+ MIU_TEST_AGT_ADDR_LO
));
1102 writel(0, (mem_crb
+ MIU_TEST_AGT_ADDR_HI
));
1105 writel(TA_CTL_ENABLE
, (mem_crb
+ TEST_AGT_CTRL
));
1106 writel((TA_CTL_START
| TA_CTL_ENABLE
),
1107 (mem_crb
+ TEST_AGT_CTRL
));
1109 for (j
= 0; j
< MAX_CTL_CHECK
; j
++) {
1110 temp
= readl(mem_crb
+ TEST_AGT_CTRL
);
1111 if ((temp
& TA_CTL_BUSY
) == 0)
1115 if (j
>= MAX_CTL_CHECK
) {
1120 i
= (off
& 0xf) ? 0 : 2;
1121 writel(readl(mem_crb
+ MIU_TEST_AGT_RDDATA(i
)),
1122 mem_crb
+ MIU_TEST_AGT_WRDATA(i
));
1123 writel(readl(mem_crb
+ MIU_TEST_AGT_RDDATA(i
+1)),
1124 mem_crb
+ MIU_TEST_AGT_WRDATA(i
+1));
1125 i
= (off
& 0xf) ? 2 : 0;
1127 writel(data
& 0xffffffff,
1128 mem_crb
+ MIU_TEST_AGT_WRDATA(i
));
1129 writel((data
>> 32) & 0xffffffff,
1130 mem_crb
+ MIU_TEST_AGT_WRDATA(i
+1));
1132 writel((TA_CTL_ENABLE
| TA_CTL_WRITE
), (mem_crb
+ TEST_AGT_CTRL
));
1133 writel((TA_CTL_START
| TA_CTL_ENABLE
| TA_CTL_WRITE
),
1134 (mem_crb
+ TEST_AGT_CTRL
));
1136 for (j
= 0; j
< MAX_CTL_CHECK
; j
++) {
1137 temp
= readl(mem_crb
+ TEST_AGT_CTRL
);
1138 if ((temp
& TA_CTL_BUSY
) == 0)
1142 if (j
>= MAX_CTL_CHECK
) {
1143 if (printk_ratelimit())
1144 dev_err(&adapter
->pdev
->dev
,
1145 "failed to write through agent\n");
1151 mutex_unlock(&adapter
->ahw
->mem_lock
);
1157 qlcnic_pci_mem_read_2M(struct qlcnic_adapter
*adapter
,
1163 void __iomem
*mem_crb
;
1165 /* Only 64-bit aligned access */
1169 /* P3 onward, test agent base for MIU and SIU is same */
1170 if (ADDR_IN_RANGE(off
, QLCNIC_ADDR_QDR_NET
,
1171 QLCNIC_ADDR_QDR_NET_MAX
)) {
1172 mem_crb
= qlcnic_get_ioaddr(adapter
,
1173 QLCNIC_CRB_QDR_NET
+MIU_TEST_AGT_BASE
);
1177 if (ADDR_IN_RANGE(off
, QLCNIC_ADDR_DDR_NET
, QLCNIC_ADDR_DDR_NET_MAX
)) {
1178 mem_crb
= qlcnic_get_ioaddr(adapter
,
1179 QLCNIC_CRB_DDR_NET
+MIU_TEST_AGT_BASE
);
1183 if (ADDR_IN_RANGE(off
, QLCNIC_ADDR_OCM0
, QLCNIC_ADDR_OCM0_MAX
)) {
1184 return qlcnic_pci_mem_access_direct(adapter
,
1193 mutex_lock(&adapter
->ahw
->mem_lock
);
1195 writel(off8
, (mem_crb
+ MIU_TEST_AGT_ADDR_LO
));
1196 writel(0, (mem_crb
+ MIU_TEST_AGT_ADDR_HI
));
1197 writel(TA_CTL_ENABLE
, (mem_crb
+ TEST_AGT_CTRL
));
1198 writel((TA_CTL_START
| TA_CTL_ENABLE
), (mem_crb
+ TEST_AGT_CTRL
));
1200 for (j
= 0; j
< MAX_CTL_CHECK
; j
++) {
1201 temp
= readl(mem_crb
+ TEST_AGT_CTRL
);
1202 if ((temp
& TA_CTL_BUSY
) == 0)
1206 if (j
>= MAX_CTL_CHECK
) {
1207 if (printk_ratelimit())
1208 dev_err(&adapter
->pdev
->dev
,
1209 "failed to read through agent\n");
1212 off8
= MIU_TEST_AGT_RDDATA_LO
;
1214 off8
= MIU_TEST_AGT_RDDATA_UPPER_LO
;
1216 temp
= readl(mem_crb
+ off8
+ 4);
1217 val
= (u64
)temp
<< 32;
1218 val
|= readl(mem_crb
+ off8
);
1223 mutex_unlock(&adapter
->ahw
->mem_lock
);
1228 int qlcnic_get_board_info(struct qlcnic_adapter
*adapter
)
1230 int offset
, board_type
, magic
;
1231 struct pci_dev
*pdev
= adapter
->pdev
;
1233 offset
= QLCNIC_FW_MAGIC_OFFSET
;
1234 if (qlcnic_rom_fast_read(adapter
, offset
, &magic
))
1237 if (magic
!= QLCNIC_BDINFO_MAGIC
) {
1238 dev_err(&pdev
->dev
, "invalid board config, magic=%08x\n",
1243 offset
= QLCNIC_BRDTYPE_OFFSET
;
1244 if (qlcnic_rom_fast_read(adapter
, offset
, &board_type
))
1247 adapter
->ahw
->board_type
= board_type
;
1249 if (board_type
== QLCNIC_BRDTYPE_P3P_4_GB_MM
) {
1250 u32 gpio
= QLCRD32(adapter
, QLCNIC_ROMUSB_GLB_PAD_GPIO_I
);
1251 if ((gpio
& 0x8000) == 0)
1252 board_type
= QLCNIC_BRDTYPE_P3P_10G_TP
;
1255 switch (board_type
) {
1256 case QLCNIC_BRDTYPE_P3P_HMEZ
:
1257 case QLCNIC_BRDTYPE_P3P_XG_LOM
:
1258 case QLCNIC_BRDTYPE_P3P_10G_CX4
:
1259 case QLCNIC_BRDTYPE_P3P_10G_CX4_LP
:
1260 case QLCNIC_BRDTYPE_P3P_IMEZ
:
1261 case QLCNIC_BRDTYPE_P3P_10G_SFP_PLUS
:
1262 case QLCNIC_BRDTYPE_P3P_10G_SFP_CT
:
1263 case QLCNIC_BRDTYPE_P3P_10G_SFP_QT
:
1264 case QLCNIC_BRDTYPE_P3P_10G_XFP
:
1265 case QLCNIC_BRDTYPE_P3P_10000_BASE_T
:
1266 adapter
->ahw
->port_type
= QLCNIC_XGBE
;
1268 case QLCNIC_BRDTYPE_P3P_REF_QG
:
1269 case QLCNIC_BRDTYPE_P3P_4_GB
:
1270 case QLCNIC_BRDTYPE_P3P_4_GB_MM
:
1271 adapter
->ahw
->port_type
= QLCNIC_GBE
;
1273 case QLCNIC_BRDTYPE_P3P_10G_TP
:
1274 adapter
->ahw
->port_type
= (adapter
->portnum
< 2) ?
1275 QLCNIC_XGBE
: QLCNIC_GBE
;
1278 dev_err(&pdev
->dev
, "unknown board type %x\n", board_type
);
1279 adapter
->ahw
->port_type
= QLCNIC_XGBE
;
1287 qlcnic_wol_supported(struct qlcnic_adapter
*adapter
)
1291 wol_cfg
= QLCRD32(adapter
, QLCNIC_WOL_CONFIG_NV
);
1292 if (wol_cfg
& (1UL << adapter
->portnum
)) {
1293 wol_cfg
= QLCRD32(adapter
, QLCNIC_WOL_CONFIG
);
1294 if (wol_cfg
& (1 << adapter
->portnum
))
1301 int qlcnic_config_led(struct qlcnic_adapter
*adapter
, u32 state
, u32 rate
)
1303 struct qlcnic_nic_req req
;
1307 memset(&req
, 0, sizeof(struct qlcnic_nic_req
));
1308 req
.qhdr
= cpu_to_le64(QLCNIC_HOST_REQUEST
<< 23);
1310 word
= QLCNIC_H2C_OPCODE_CONFIG_LED
| ((u64
)adapter
->portnum
<< 16);
1311 req
.req_hdr
= cpu_to_le64(word
);
1313 req
.words
[0] = cpu_to_le64((u64
)rate
<< 32);
1314 req
.words
[1] = cpu_to_le64(state
);
1316 rv
= qlcnic_send_cmd_descs(adapter
, (struct cmd_desc_type0
*)&req
, 1);
1318 dev_err(&adapter
->pdev
->dev
, "LED configuration failed.\n");
1323 /* FW dump related functions */
1325 qlcnic_dump_crb(struct qlcnic_adapter
*adapter
, struct qlcnic_dump_entry
*entry
,
1330 struct __crb
*crb
= &entry
->region
.crb
;
1331 void __iomem
*base
= adapter
->ahw
->pci_base0
;
1335 for (i
= 0; i
< crb
->no_ops
; i
++) {
1336 QLCNIC_RD_DUMP_REG(addr
, base
, &data
);
1337 *buffer
++ = cpu_to_le32(addr
);
1338 *buffer
++ = cpu_to_le32(data
);
1339 addr
+= crb
->stride
;
1341 return crb
->no_ops
* 2 * sizeof(u32
);
1345 qlcnic_dump_ctrl(struct qlcnic_adapter
*adapter
,
1346 struct qlcnic_dump_entry
*entry
, u32
*buffer
)
1348 int i
, k
, timeout
= 0;
1349 void __iomem
*base
= adapter
->ahw
->pci_base0
;
1352 struct __ctrl
*ctr
= &entry
->region
.ctrl
;
1353 struct qlcnic_dump_template_hdr
*t_hdr
= adapter
->ahw
->fw_dump
.tmpl_hdr
;
1356 no_ops
= ctr
->no_ops
;
1358 for (i
= 0; i
< no_ops
; i
++) {
1361 for (k
= 0; k
< 8; k
++) {
1362 if (!(ctr
->opcode
& (1 << k
)))
1365 case QLCNIC_DUMP_WCRB
:
1366 QLCNIC_WR_DUMP_REG(addr
, base
, ctr
->val1
);
1368 case QLCNIC_DUMP_RWCRB
:
1369 QLCNIC_RD_DUMP_REG(addr
, base
, &data
);
1370 QLCNIC_WR_DUMP_REG(addr
, base
, data
);
1372 case QLCNIC_DUMP_ANDCRB
:
1373 QLCNIC_RD_DUMP_REG(addr
, base
, &data
);
1374 QLCNIC_WR_DUMP_REG(addr
, base
,
1375 (data
& ctr
->val2
));
1377 case QLCNIC_DUMP_ORCRB
:
1378 QLCNIC_RD_DUMP_REG(addr
, base
, &data
);
1379 QLCNIC_WR_DUMP_REG(addr
, base
,
1380 (data
| ctr
->val3
));
1382 case QLCNIC_DUMP_POLLCRB
:
1383 while (timeout
<= ctr
->timeout
) {
1384 QLCNIC_RD_DUMP_REG(addr
, base
, &data
);
1385 if ((data
& ctr
->val2
) == ctr
->val1
)
1390 if (timeout
> ctr
->timeout
) {
1391 dev_info(&adapter
->pdev
->dev
,
1392 "Timed out, aborting poll CRB\n");
1396 case QLCNIC_DUMP_RD_SAVE
:
1398 addr
= t_hdr
->saved_state
[ctr
->index_a
];
1399 QLCNIC_RD_DUMP_REG(addr
, base
, &data
);
1400 t_hdr
->saved_state
[ctr
->index_v
] = data
;
1402 case QLCNIC_DUMP_WRT_SAVED
:
1404 data
= t_hdr
->saved_state
[ctr
->index_v
];
1408 addr
= t_hdr
->saved_state
[ctr
->index_a
];
1409 QLCNIC_WR_DUMP_REG(addr
, base
, data
);
1411 case QLCNIC_DUMP_MOD_SAVE_ST
:
1412 data
= t_hdr
->saved_state
[ctr
->index_v
];
1413 data
<<= ctr
->shl_val
;
1414 data
>>= ctr
->shr_val
;
1419 t_hdr
->saved_state
[ctr
->index_v
] = data
;
1422 dev_info(&adapter
->pdev
->dev
,
1423 "Unknown opcode\n");
1427 addr
+= ctr
->stride
;
1433 qlcnic_dump_mux(struct qlcnic_adapter
*adapter
, struct qlcnic_dump_entry
*entry
,
1438 struct __mux
*mux
= &entry
->region
.mux
;
1439 void __iomem
*base
= adapter
->ahw
->pci_base0
;
1442 for (loop
= 0; loop
< mux
->no_ops
; loop
++) {
1443 QLCNIC_WR_DUMP_REG(mux
->addr
, base
, val
);
1444 QLCNIC_RD_DUMP_REG(mux
->read_addr
, base
, &data
);
1445 *buffer
++ = cpu_to_le32(val
);
1446 *buffer
++ = cpu_to_le32(data
);
1447 val
+= mux
->val_stride
;
1449 return 2 * mux
->no_ops
* sizeof(u32
);
1453 qlcnic_dump_que(struct qlcnic_adapter
*adapter
, struct qlcnic_dump_entry
*entry
,
1457 u32 cnt
, addr
, data
, que_id
= 0;
1458 void __iomem
*base
= adapter
->ahw
->pci_base0
;
1459 struct __queue
*que
= &entry
->region
.que
;
1461 addr
= que
->read_addr
;
1462 cnt
= que
->read_addr_cnt
;
1464 for (loop
= 0; loop
< que
->no_ops
; loop
++) {
1465 QLCNIC_WR_DUMP_REG(que
->sel_addr
, base
, que_id
);
1466 addr
= que
->read_addr
;
1467 for (i
= 0; i
< cnt
; i
++) {
1468 QLCNIC_RD_DUMP_REG(addr
, base
, &data
);
1469 *buffer
++ = cpu_to_le32(data
);
1470 addr
+= que
->read_addr_stride
;
1472 que_id
+= que
->stride
;
1474 return que
->no_ops
* cnt
* sizeof(u32
);
1478 qlcnic_dump_ocm(struct qlcnic_adapter
*adapter
, struct qlcnic_dump_entry
*entry
,
1484 struct __ocm
*ocm
= &entry
->region
.ocm
;
1486 addr
= adapter
->ahw
->pci_base0
+ ocm
->read_addr
;
1487 for (i
= 0; i
< ocm
->no_ops
; i
++) {
1489 *buffer
++ = cpu_to_le32(data
);
1490 addr
+= ocm
->read_addr_stride
;
1492 return ocm
->no_ops
* sizeof(u32
);
1496 qlcnic_read_rom(struct qlcnic_adapter
*adapter
, struct qlcnic_dump_entry
*entry
,
1500 u32 fl_addr
, size
, val
, lck_val
, addr
;
1501 struct __mem
*rom
= &entry
->region
.mem
;
1502 void __iomem
*base
= adapter
->ahw
->pci_base0
;
1504 fl_addr
= rom
->addr
;
1507 lck_val
= readl(base
+ QLCNIC_FLASH_SEM2_LK
);
1508 if (!lck_val
&& count
< MAX_CTL_CHECK
) {
1513 writel(adapter
->ahw
->pci_func
, (base
+ QLCNIC_FLASH_LOCK_ID
));
1514 for (i
= 0; i
< size
; i
++) {
1515 addr
= fl_addr
& 0xFFFF0000;
1516 QLCNIC_WR_DUMP_REG(FLASH_ROM_WINDOW
, base
, addr
);
1517 addr
= LSW(fl_addr
) + FLASH_ROM_DATA
;
1518 QLCNIC_RD_DUMP_REG(addr
, base
, &val
);
1520 *buffer
++ = cpu_to_le32(val
);
1522 readl(base
+ QLCNIC_FLASH_SEM2_ULK
);
1527 qlcnic_dump_l1_cache(struct qlcnic_adapter
*adapter
,
1528 struct qlcnic_dump_entry
*entry
, u32
*buffer
)
1531 u32 cnt
, val
, data
, addr
;
1532 void __iomem
*base
= adapter
->ahw
->pci_base0
;
1533 struct __cache
*l1
= &entry
->region
.cache
;
1535 val
= l1
->init_tag_val
;
1537 for (i
= 0; i
< l1
->no_ops
; i
++) {
1538 QLCNIC_WR_DUMP_REG(l1
->addr
, base
, val
);
1539 QLCNIC_WR_DUMP_REG(l1
->ctrl_addr
, base
, LSW(l1
->ctrl_val
));
1540 addr
= l1
->read_addr
;
1541 cnt
= l1
->read_addr_num
;
1543 QLCNIC_RD_DUMP_REG(addr
, base
, &data
);
1544 *buffer
++ = cpu_to_le32(data
);
1545 addr
+= l1
->read_addr_stride
;
1550 return l1
->no_ops
* l1
->read_addr_num
* sizeof(u32
);
1554 qlcnic_dump_l2_cache(struct qlcnic_adapter
*adapter
,
1555 struct qlcnic_dump_entry
*entry
, u32
*buffer
)
1558 u32 cnt
, val
, data
, addr
;
1559 u8 poll_mask
, poll_to
, time_out
= 0;
1560 void __iomem
*base
= adapter
->ahw
->pci_base0
;
1561 struct __cache
*l2
= &entry
->region
.cache
;
1563 val
= l2
->init_tag_val
;
1564 poll_mask
= LSB(MSW(l2
->ctrl_val
));
1565 poll_to
= MSB(MSW(l2
->ctrl_val
));
1567 for (i
= 0; i
< l2
->no_ops
; i
++) {
1568 QLCNIC_WR_DUMP_REG(l2
->addr
, base
, val
);
1569 if (LSW(l2
->ctrl_val
))
1570 QLCNIC_WR_DUMP_REG(l2
->ctrl_addr
, base
,
1575 QLCNIC_RD_DUMP_REG(l2
->ctrl_addr
, base
, &data
);
1576 if (!(data
& poll_mask
))
1580 } while (time_out
<= poll_to
);
1582 if (time_out
> poll_to
) {
1583 dev_err(&adapter
->pdev
->dev
,
1584 "Timeout exceeded in %s, aborting dump\n",
1589 addr
= l2
->read_addr
;
1590 cnt
= l2
->read_addr_num
;
1592 QLCNIC_RD_DUMP_REG(addr
, base
, &data
);
1593 *buffer
++ = cpu_to_le32(data
);
1594 addr
+= l2
->read_addr_stride
;
1599 return l2
->no_ops
* l2
->read_addr_num
* sizeof(u32
);
1603 qlcnic_read_memory(struct qlcnic_adapter
*adapter
,
1604 struct qlcnic_dump_entry
*entry
, u32
*buffer
)
1606 u32 addr
, data
, test
, ret
= 0;
1608 struct __mem
*mem
= &entry
->region
.mem
;
1609 void __iomem
*base
= adapter
->ahw
->pci_base0
;
1611 reg_read
= mem
->size
;
1613 /* check for data size of multiple of 16 and 16 byte alignment */
1614 if ((addr
& 0xf) || (reg_read
%16)) {
1615 dev_info(&adapter
->pdev
->dev
,
1616 "Unaligned memory addr:0x%x size:0x%x\n",
1621 mutex_lock(&adapter
->ahw
->mem_lock
);
1623 while (reg_read
!= 0) {
1624 QLCNIC_WR_DUMP_REG(MIU_TEST_ADDR_LO
, base
, addr
);
1625 QLCNIC_WR_DUMP_REG(MIU_TEST_ADDR_HI
, base
, 0);
1626 QLCNIC_WR_DUMP_REG(MIU_TEST_CTR
, base
,
1627 TA_CTL_ENABLE
| TA_CTL_START
);
1629 for (i
= 0; i
< MAX_CTL_CHECK
; i
++) {
1630 QLCNIC_RD_DUMP_REG(MIU_TEST_CTR
, base
, &test
);
1631 if (!(test
& TA_CTL_BUSY
))
1634 if (i
== MAX_CTL_CHECK
) {
1635 if (printk_ratelimit()) {
1636 dev_err(&adapter
->pdev
->dev
,
1637 "failed to read through agent\n");
1642 for (i
= 0; i
< 4; i
++) {
1643 QLCNIC_RD_DUMP_REG(MIU_TEST_READ_DATA
[i
], base
, &data
);
1644 *buffer
++ = cpu_to_le32(data
);
1651 mutex_unlock(&adapter
->ahw
->mem_lock
);
1656 qlcnic_dump_nop(struct qlcnic_adapter
*adapter
,
1657 struct qlcnic_dump_entry
*entry
, u32
*buffer
)
1659 entry
->hdr
.flags
|= QLCNIC_DUMP_SKIP
;
1663 struct qlcnic_dump_operations fw_dump_ops
[] = {
1664 { QLCNIC_DUMP_NOP
, qlcnic_dump_nop
},
1665 { QLCNIC_DUMP_READ_CRB
, qlcnic_dump_crb
},
1666 { QLCNIC_DUMP_READ_MUX
, qlcnic_dump_mux
},
1667 { QLCNIC_DUMP_QUEUE
, qlcnic_dump_que
},
1668 { QLCNIC_DUMP_BRD_CONFIG
, qlcnic_read_rom
},
1669 { QLCNIC_DUMP_READ_OCM
, qlcnic_dump_ocm
},
1670 { QLCNIC_DUMP_PEG_REG
, qlcnic_dump_ctrl
},
1671 { QLCNIC_DUMP_L1_DTAG
, qlcnic_dump_l1_cache
},
1672 { QLCNIC_DUMP_L1_ITAG
, qlcnic_dump_l1_cache
},
1673 { QLCNIC_DUMP_L1_DATA
, qlcnic_dump_l1_cache
},
1674 { QLCNIC_DUMP_L1_INST
, qlcnic_dump_l1_cache
},
1675 { QLCNIC_DUMP_L2_DTAG
, qlcnic_dump_l2_cache
},
1676 { QLCNIC_DUMP_L2_ITAG
, qlcnic_dump_l2_cache
},
1677 { QLCNIC_DUMP_L2_DATA
, qlcnic_dump_l2_cache
},
1678 { QLCNIC_DUMP_L2_INST
, qlcnic_dump_l2_cache
},
1679 { QLCNIC_DUMP_READ_ROM
, qlcnic_read_rom
},
1680 { QLCNIC_DUMP_READ_MEM
, qlcnic_read_memory
},
1681 { QLCNIC_DUMP_READ_CTRL
, qlcnic_dump_ctrl
},
1682 { QLCNIC_DUMP_TLHDR
, qlcnic_dump_nop
},
1683 { QLCNIC_DUMP_RDEND
, qlcnic_dump_nop
},
1686 /* Walk the template and collect dump for each entry in the dump template */
1688 qlcnic_valid_dump_entry(struct device
*dev
, struct qlcnic_dump_entry
*entry
,
1692 if (size
!= entry
->hdr
.cap_size
) {
1694 "Invalidate dump, Type:%d\tMask:%d\tSize:%dCap_size:%d\n",
1695 entry
->hdr
.type
, entry
->hdr
.mask
, size
, entry
->hdr
.cap_size
);
1696 dev_info(dev
, "Aborting further dump capture\n");
1702 int qlcnic_dump_fw(struct qlcnic_adapter
*adapter
)
1706 char *msg
[] = {mesg
, NULL
};
1707 int i
, k
, ops_cnt
, ops_index
, dump_size
= 0;
1708 u32 entry_offset
, dump
, no_entries
, buf_offset
= 0;
1709 struct qlcnic_dump_entry
*entry
;
1710 struct qlcnic_fw_dump
*fw_dump
= &adapter
->ahw
->fw_dump
;
1711 struct qlcnic_dump_template_hdr
*tmpl_hdr
= fw_dump
->tmpl_hdr
;
1714 dev_info(&adapter
->pdev
->dev
,
1715 "Previous dump not cleared, not capturing dump\n");
1718 /* Calculate the size for dump data area only */
1719 for (i
= 2, k
= 1; (i
& QLCNIC_DUMP_MASK_MAX
); i
<<= 1, k
++)
1720 if (i
& tmpl_hdr
->drv_cap_mask
)
1721 dump_size
+= tmpl_hdr
->cap_sizes
[k
];
1725 fw_dump
->data
= vzalloc(dump_size
);
1726 if (!fw_dump
->data
) {
1727 dev_info(&adapter
->pdev
->dev
,
1728 "Unable to allocate (%d KB) for fw dump\n",
1732 buffer
= fw_dump
->data
;
1733 fw_dump
->size
= dump_size
;
1734 no_entries
= tmpl_hdr
->num_entries
;
1735 ops_cnt
= ARRAY_SIZE(fw_dump_ops
);
1736 entry_offset
= tmpl_hdr
->offset
;
1737 tmpl_hdr
->sys_info
[0] = QLCNIC_DRIVER_VERSION
;
1738 tmpl_hdr
->sys_info
[1] = adapter
->fw_version
;
1740 for (i
= 0; i
< no_entries
; i
++) {
1741 entry
= (void *)tmpl_hdr
+ entry_offset
;
1742 if (!(entry
->hdr
.mask
& tmpl_hdr
->drv_cap_mask
)) {
1743 entry
->hdr
.flags
|= QLCNIC_DUMP_SKIP
;
1744 entry_offset
+= entry
->hdr
.offset
;
1747 /* Find the handler for this entry */
1749 while (ops_index
< ops_cnt
) {
1750 if (entry
->hdr
.type
== fw_dump_ops
[ops_index
].opcode
)
1754 if (ops_index
== ops_cnt
) {
1755 dev_info(&adapter
->pdev
->dev
,
1756 "Invalid entry type %d, exiting dump\n",
1760 /* Collect dump for this entry */
1761 dump
= fw_dump_ops
[ops_index
].handler(adapter
, entry
, buffer
);
1762 if (dump
&& !qlcnic_valid_dump_entry(&adapter
->pdev
->dev
, entry
,
1764 entry
->hdr
.flags
|= QLCNIC_DUMP_SKIP
;
1765 buf_offset
+= entry
->hdr
.cap_size
;
1766 entry_offset
+= entry
->hdr
.offset
;
1767 buffer
= fw_dump
->data
+ buf_offset
;
1769 if (dump_size
!= buf_offset
) {
1770 dev_info(&adapter
->pdev
->dev
,
1771 "Captured(%d) and expected size(%d) do not match\n",
1772 buf_offset
, dump_size
);
1776 snprintf(mesg
, sizeof(mesg
), "FW dump for device: %d\n",
1777 adapter
->pdev
->devfn
);
1778 dev_info(&adapter
->pdev
->dev
, "Dump data, %d bytes captured\n",
1780 /* Send a udev event to notify availability of FW dump */
1781 kobject_uevent_env(&adapter
->pdev
->dev
.kobj
, KOBJ_CHANGE
, msg
);
1785 vfree(fw_dump
->data
);