2 * QLogic qlcnic NIC Driver
3 * Copyright (c) 2009-2010 QLogic Corporation
5 * See LICENSE.qlcnic for copyright and licensing details.
10 #include <linux/slab.h>
12 #include <linux/bitops.h>
14 #define MASK(n) ((1ULL<<(n))-1)
15 #define OCM_WIN_P3P(addr) (addr & 0xffc0000)
17 #define GET_MEM_OFFS_2M(addr) (addr & MASK(18))
19 #define CRB_BLK(off) ((off >> 20) & 0x3f)
20 #define CRB_SUBBLK(off) ((off >> 16) & 0xf)
21 #define CRB_WINDOW_2M (0x130060)
22 #define CRB_HI(off) ((crb_hub_agt[CRB_BLK(off)] << 20) | ((off) & 0xf0000))
23 #define CRB_INDIRECT_2M (0x1e0000UL)
27 static inline u64
readq(void __iomem
*addr
)
29 return readl(addr
) | (((u64
) readl(addr
+ 4)) << 32LL);
34 static inline void writeq(u64 val
, void __iomem
*addr
)
36 writel(((u32
) (val
)), (addr
));
37 writel(((u32
) (val
>> 32)), (addr
+ 4));
41 static const struct crb_128M_2M_block_map
42 crb_128M_2M_map
[64] __cacheline_aligned_in_smp
= {
43 {{{0, 0, 0, 0} } }, /* 0: PCI */
44 {{{1, 0x0100000, 0x0102000, 0x120000}, /* 1: PCIE */
45 {1, 0x0110000, 0x0120000, 0x130000},
46 {1, 0x0120000, 0x0122000, 0x124000},
47 {1, 0x0130000, 0x0132000, 0x126000},
48 {1, 0x0140000, 0x0142000, 0x128000},
49 {1, 0x0150000, 0x0152000, 0x12a000},
50 {1, 0x0160000, 0x0170000, 0x110000},
51 {1, 0x0170000, 0x0172000, 0x12e000},
52 {0, 0x0000000, 0x0000000, 0x000000},
53 {0, 0x0000000, 0x0000000, 0x000000},
54 {0, 0x0000000, 0x0000000, 0x000000},
55 {0, 0x0000000, 0x0000000, 0x000000},
56 {0, 0x0000000, 0x0000000, 0x000000},
57 {0, 0x0000000, 0x0000000, 0x000000},
58 {1, 0x01e0000, 0x01e0800, 0x122000},
59 {0, 0x0000000, 0x0000000, 0x000000} } },
60 {{{1, 0x0200000, 0x0210000, 0x180000} } },/* 2: MN */
61 {{{0, 0, 0, 0} } }, /* 3: */
62 {{{1, 0x0400000, 0x0401000, 0x169000} } },/* 4: P2NR1 */
63 {{{1, 0x0500000, 0x0510000, 0x140000} } },/* 5: SRE */
64 {{{1, 0x0600000, 0x0610000, 0x1c0000} } },/* 6: NIU */
65 {{{1, 0x0700000, 0x0704000, 0x1b8000} } },/* 7: QM */
66 {{{1, 0x0800000, 0x0802000, 0x170000}, /* 8: SQM0 */
67 {0, 0x0000000, 0x0000000, 0x000000},
68 {0, 0x0000000, 0x0000000, 0x000000},
69 {0, 0x0000000, 0x0000000, 0x000000},
70 {0, 0x0000000, 0x0000000, 0x000000},
71 {0, 0x0000000, 0x0000000, 0x000000},
72 {0, 0x0000000, 0x0000000, 0x000000},
73 {0, 0x0000000, 0x0000000, 0x000000},
74 {0, 0x0000000, 0x0000000, 0x000000},
75 {0, 0x0000000, 0x0000000, 0x000000},
76 {0, 0x0000000, 0x0000000, 0x000000},
77 {0, 0x0000000, 0x0000000, 0x000000},
78 {0, 0x0000000, 0x0000000, 0x000000},
79 {0, 0x0000000, 0x0000000, 0x000000},
80 {0, 0x0000000, 0x0000000, 0x000000},
81 {1, 0x08f0000, 0x08f2000, 0x172000} } },
82 {{{1, 0x0900000, 0x0902000, 0x174000}, /* 9: SQM1*/
83 {0, 0x0000000, 0x0000000, 0x000000},
84 {0, 0x0000000, 0x0000000, 0x000000},
85 {0, 0x0000000, 0x0000000, 0x000000},
86 {0, 0x0000000, 0x0000000, 0x000000},
87 {0, 0x0000000, 0x0000000, 0x000000},
88 {0, 0x0000000, 0x0000000, 0x000000},
89 {0, 0x0000000, 0x0000000, 0x000000},
90 {0, 0x0000000, 0x0000000, 0x000000},
91 {0, 0x0000000, 0x0000000, 0x000000},
92 {0, 0x0000000, 0x0000000, 0x000000},
93 {0, 0x0000000, 0x0000000, 0x000000},
94 {0, 0x0000000, 0x0000000, 0x000000},
95 {0, 0x0000000, 0x0000000, 0x000000},
96 {0, 0x0000000, 0x0000000, 0x000000},
97 {1, 0x09f0000, 0x09f2000, 0x176000} } },
98 {{{0, 0x0a00000, 0x0a02000, 0x178000}, /* 10: SQM2*/
99 {0, 0x0000000, 0x0000000, 0x000000},
100 {0, 0x0000000, 0x0000000, 0x000000},
101 {0, 0x0000000, 0x0000000, 0x000000},
102 {0, 0x0000000, 0x0000000, 0x000000},
103 {0, 0x0000000, 0x0000000, 0x000000},
104 {0, 0x0000000, 0x0000000, 0x000000},
105 {0, 0x0000000, 0x0000000, 0x000000},
106 {0, 0x0000000, 0x0000000, 0x000000},
107 {0, 0x0000000, 0x0000000, 0x000000},
108 {0, 0x0000000, 0x0000000, 0x000000},
109 {0, 0x0000000, 0x0000000, 0x000000},
110 {0, 0x0000000, 0x0000000, 0x000000},
111 {0, 0x0000000, 0x0000000, 0x000000},
112 {0, 0x0000000, 0x0000000, 0x000000},
113 {1, 0x0af0000, 0x0af2000, 0x17a000} } },
114 {{{0, 0x0b00000, 0x0b02000, 0x17c000}, /* 11: SQM3*/
115 {0, 0x0000000, 0x0000000, 0x000000},
116 {0, 0x0000000, 0x0000000, 0x000000},
117 {0, 0x0000000, 0x0000000, 0x000000},
118 {0, 0x0000000, 0x0000000, 0x000000},
119 {0, 0x0000000, 0x0000000, 0x000000},
120 {0, 0x0000000, 0x0000000, 0x000000},
121 {0, 0x0000000, 0x0000000, 0x000000},
122 {0, 0x0000000, 0x0000000, 0x000000},
123 {0, 0x0000000, 0x0000000, 0x000000},
124 {0, 0x0000000, 0x0000000, 0x000000},
125 {0, 0x0000000, 0x0000000, 0x000000},
126 {0, 0x0000000, 0x0000000, 0x000000},
127 {0, 0x0000000, 0x0000000, 0x000000},
128 {0, 0x0000000, 0x0000000, 0x000000},
129 {1, 0x0bf0000, 0x0bf2000, 0x17e000} } },
130 {{{1, 0x0c00000, 0x0c04000, 0x1d4000} } },/* 12: I2Q */
131 {{{1, 0x0d00000, 0x0d04000, 0x1a4000} } },/* 13: TMR */
132 {{{1, 0x0e00000, 0x0e04000, 0x1a0000} } },/* 14: ROMUSB */
133 {{{1, 0x0f00000, 0x0f01000, 0x164000} } },/* 15: PEG4 */
134 {{{0, 0x1000000, 0x1004000, 0x1a8000} } },/* 16: XDMA */
135 {{{1, 0x1100000, 0x1101000, 0x160000} } },/* 17: PEG0 */
136 {{{1, 0x1200000, 0x1201000, 0x161000} } },/* 18: PEG1 */
137 {{{1, 0x1300000, 0x1301000, 0x162000} } },/* 19: PEG2 */
138 {{{1, 0x1400000, 0x1401000, 0x163000} } },/* 20: PEG3 */
139 {{{1, 0x1500000, 0x1501000, 0x165000} } },/* 21: P2ND */
140 {{{1, 0x1600000, 0x1601000, 0x166000} } },/* 22: P2NI */
141 {{{0, 0, 0, 0} } }, /* 23: */
142 {{{0, 0, 0, 0} } }, /* 24: */
143 {{{0, 0, 0, 0} } }, /* 25: */
144 {{{0, 0, 0, 0} } }, /* 26: */
145 {{{0, 0, 0, 0} } }, /* 27: */
146 {{{0, 0, 0, 0} } }, /* 28: */
147 {{{1, 0x1d00000, 0x1d10000, 0x190000} } },/* 29: MS */
148 {{{1, 0x1e00000, 0x1e01000, 0x16a000} } },/* 30: P2NR2 */
149 {{{1, 0x1f00000, 0x1f10000, 0x150000} } },/* 31: EPG */
150 {{{0} } }, /* 32: PCI */
151 {{{1, 0x2100000, 0x2102000, 0x120000}, /* 33: PCIE */
152 {1, 0x2110000, 0x2120000, 0x130000},
153 {1, 0x2120000, 0x2122000, 0x124000},
154 {1, 0x2130000, 0x2132000, 0x126000},
155 {1, 0x2140000, 0x2142000, 0x128000},
156 {1, 0x2150000, 0x2152000, 0x12a000},
157 {1, 0x2160000, 0x2170000, 0x110000},
158 {1, 0x2170000, 0x2172000, 0x12e000},
159 {0, 0x0000000, 0x0000000, 0x000000},
160 {0, 0x0000000, 0x0000000, 0x000000},
161 {0, 0x0000000, 0x0000000, 0x000000},
162 {0, 0x0000000, 0x0000000, 0x000000},
163 {0, 0x0000000, 0x0000000, 0x000000},
164 {0, 0x0000000, 0x0000000, 0x000000},
165 {0, 0x0000000, 0x0000000, 0x000000},
166 {0, 0x0000000, 0x0000000, 0x000000} } },
167 {{{1, 0x2200000, 0x2204000, 0x1b0000} } },/* 34: CAM */
173 {{{1, 0x2800000, 0x2804000, 0x1a4000} } },/* 40: TMR */
174 {{{1, 0x2900000, 0x2901000, 0x16b000} } },/* 41: P2NR3 */
175 {{{1, 0x2a00000, 0x2a00400, 0x1ac400} } },/* 42: RPMX1 */
176 {{{1, 0x2b00000, 0x2b00400, 0x1ac800} } },/* 43: RPMX2 */
177 {{{1, 0x2c00000, 0x2c00400, 0x1acc00} } },/* 44: RPMX3 */
178 {{{1, 0x2d00000, 0x2d00400, 0x1ad000} } },/* 45: RPMX4 */
179 {{{1, 0x2e00000, 0x2e00400, 0x1ad400} } },/* 46: RPMX5 */
180 {{{1, 0x2f00000, 0x2f00400, 0x1ad800} } },/* 47: RPMX6 */
181 {{{1, 0x3000000, 0x3000400, 0x1adc00} } },/* 48: RPMX7 */
182 {{{0, 0x3100000, 0x3104000, 0x1a8000} } },/* 49: XDMA */
183 {{{1, 0x3200000, 0x3204000, 0x1d4000} } },/* 50: I2Q */
184 {{{1, 0x3300000, 0x3304000, 0x1a0000} } },/* 51: ROMUSB */
186 {{{1, 0x3500000, 0x3500400, 0x1ac000} } },/* 53: RPMX0 */
187 {{{1, 0x3600000, 0x3600400, 0x1ae000} } },/* 54: RPMX8 */
188 {{{1, 0x3700000, 0x3700400, 0x1ae400} } },/* 55: RPMX9 */
189 {{{1, 0x3800000, 0x3804000, 0x1d0000} } },/* 56: OCM0 */
190 {{{1, 0x3900000, 0x3904000, 0x1b4000} } },/* 57: CRYPTO */
191 {{{1, 0x3a00000, 0x3a04000, 0x1d8000} } },/* 58: SMB */
192 {{{0} } }, /* 59: I2C0 */
193 {{{0} } }, /* 60: I2C1 */
194 {{{1, 0x3d00000, 0x3d04000, 0x1d8000} } },/* 61: LPC */
195 {{{1, 0x3e00000, 0x3e01000, 0x167000} } },/* 62: P2NC */
196 {{{1, 0x3f00000, 0x3f01000, 0x168000} } } /* 63: P2NR0 */
200 * top 12 bits of crb internal address (hub, agent)
202 static const unsigned crb_hub_agt
[64] = {
204 QLCNIC_HW_CRB_HUB_AGT_ADR_PS
,
205 QLCNIC_HW_CRB_HUB_AGT_ADR_MN
,
206 QLCNIC_HW_CRB_HUB_AGT_ADR_MS
,
208 QLCNIC_HW_CRB_HUB_AGT_ADR_SRE
,
209 QLCNIC_HW_CRB_HUB_AGT_ADR_NIU
,
210 QLCNIC_HW_CRB_HUB_AGT_ADR_QMN
,
211 QLCNIC_HW_CRB_HUB_AGT_ADR_SQN0
,
212 QLCNIC_HW_CRB_HUB_AGT_ADR_SQN1
,
213 QLCNIC_HW_CRB_HUB_AGT_ADR_SQN2
,
214 QLCNIC_HW_CRB_HUB_AGT_ADR_SQN3
,
215 QLCNIC_HW_CRB_HUB_AGT_ADR_I2Q
,
216 QLCNIC_HW_CRB_HUB_AGT_ADR_TIMR
,
217 QLCNIC_HW_CRB_HUB_AGT_ADR_ROMUSB
,
218 QLCNIC_HW_CRB_HUB_AGT_ADR_PGN4
,
219 QLCNIC_HW_CRB_HUB_AGT_ADR_XDMA
,
220 QLCNIC_HW_CRB_HUB_AGT_ADR_PGN0
,
221 QLCNIC_HW_CRB_HUB_AGT_ADR_PGN1
,
222 QLCNIC_HW_CRB_HUB_AGT_ADR_PGN2
,
223 QLCNIC_HW_CRB_HUB_AGT_ADR_PGN3
,
224 QLCNIC_HW_CRB_HUB_AGT_ADR_PGND
,
225 QLCNIC_HW_CRB_HUB_AGT_ADR_PGNI
,
226 QLCNIC_HW_CRB_HUB_AGT_ADR_PGS0
,
227 QLCNIC_HW_CRB_HUB_AGT_ADR_PGS1
,
228 QLCNIC_HW_CRB_HUB_AGT_ADR_PGS2
,
229 QLCNIC_HW_CRB_HUB_AGT_ADR_PGS3
,
231 QLCNIC_HW_CRB_HUB_AGT_ADR_PGSI
,
232 QLCNIC_HW_CRB_HUB_AGT_ADR_SN
,
234 QLCNIC_HW_CRB_HUB_AGT_ADR_EG
,
236 QLCNIC_HW_CRB_HUB_AGT_ADR_PS
,
237 QLCNIC_HW_CRB_HUB_AGT_ADR_CAM
,
243 QLCNIC_HW_CRB_HUB_AGT_ADR_TIMR
,
245 QLCNIC_HW_CRB_HUB_AGT_ADR_RPMX1
,
246 QLCNIC_HW_CRB_HUB_AGT_ADR_RPMX2
,
247 QLCNIC_HW_CRB_HUB_AGT_ADR_RPMX3
,
248 QLCNIC_HW_CRB_HUB_AGT_ADR_RPMX4
,
249 QLCNIC_HW_CRB_HUB_AGT_ADR_RPMX5
,
250 QLCNIC_HW_CRB_HUB_AGT_ADR_RPMX6
,
251 QLCNIC_HW_CRB_HUB_AGT_ADR_RPMX7
,
252 QLCNIC_HW_CRB_HUB_AGT_ADR_XDMA
,
253 QLCNIC_HW_CRB_HUB_AGT_ADR_I2Q
,
254 QLCNIC_HW_CRB_HUB_AGT_ADR_ROMUSB
,
256 QLCNIC_HW_CRB_HUB_AGT_ADR_RPMX0
,
257 QLCNIC_HW_CRB_HUB_AGT_ADR_RPMX8
,
258 QLCNIC_HW_CRB_HUB_AGT_ADR_RPMX9
,
259 QLCNIC_HW_CRB_HUB_AGT_ADR_OCM0
,
261 QLCNIC_HW_CRB_HUB_AGT_ADR_SMB
,
262 QLCNIC_HW_CRB_HUB_AGT_ADR_I2C0
,
263 QLCNIC_HW_CRB_HUB_AGT_ADR_I2C1
,
265 QLCNIC_HW_CRB_HUB_AGT_ADR_PGNC
,
269 /* PCI Windowing for DDR regions. */
271 #define QLCNIC_PCIE_SEM_TIMEOUT 10000
274 qlcnic_pcie_sem_lock(struct qlcnic_adapter
*adapter
, int sem
, u32 id_reg
)
276 int done
= 0, timeout
= 0;
279 done
= QLCRD32(adapter
, QLCNIC_PCIE_REG(PCIE_SEM_LOCK(sem
)));
282 if (++timeout
>= QLCNIC_PCIE_SEM_TIMEOUT
) {
283 dev_err(&adapter
->pdev
->dev
,
284 "Failed to acquire sem=%d lock; holdby=%d\n",
285 sem
, id_reg
? QLCRD32(adapter
, id_reg
) : -1);
292 QLCWR32(adapter
, id_reg
, adapter
->portnum
);
298 qlcnic_pcie_sem_unlock(struct qlcnic_adapter
*adapter
, int sem
)
300 QLCRD32(adapter
, QLCNIC_PCIE_REG(PCIE_SEM_UNLOCK(sem
)));
304 qlcnic_send_cmd_descs(struct qlcnic_adapter
*adapter
,
305 struct cmd_desc_type0
*cmd_desc_arr
, int nr_desc
)
307 u32 i
, producer
, consumer
;
308 struct qlcnic_cmd_buffer
*pbuf
;
309 struct cmd_desc_type0
*cmd_desc
;
310 struct qlcnic_host_tx_ring
*tx_ring
;
314 if (!test_bit(__QLCNIC_FW_ATTACHED
, &adapter
->state
))
317 tx_ring
= adapter
->tx_ring
;
318 __netif_tx_lock_bh(tx_ring
->txq
);
320 producer
= tx_ring
->producer
;
321 consumer
= tx_ring
->sw_consumer
;
323 if (nr_desc
>= qlcnic_tx_avail(tx_ring
)) {
324 netif_tx_stop_queue(tx_ring
->txq
);
326 if (qlcnic_tx_avail(tx_ring
) > nr_desc
) {
327 if (qlcnic_tx_avail(tx_ring
) > TX_STOP_THRESH
)
328 netif_tx_wake_queue(tx_ring
->txq
);
330 adapter
->stats
.xmit_off
++;
331 __netif_tx_unlock_bh(tx_ring
->txq
);
337 cmd_desc
= &cmd_desc_arr
[i
];
339 pbuf
= &tx_ring
->cmd_buf_arr
[producer
];
341 pbuf
->frag_count
= 0;
343 memcpy(&tx_ring
->desc_head
[producer
],
344 &cmd_desc_arr
[i
], sizeof(struct cmd_desc_type0
));
346 producer
= get_next_index(producer
, tx_ring
->num_desc
);
349 } while (i
!= nr_desc
);
351 tx_ring
->producer
= producer
;
353 qlcnic_update_cmd_producer(adapter
, tx_ring
);
355 __netif_tx_unlock_bh(tx_ring
->txq
);
361 qlcnic_sre_macaddr_change(struct qlcnic_adapter
*adapter
, u8
*addr
,
362 __le16 vlan_id
, unsigned op
)
364 struct qlcnic_nic_req req
;
365 struct qlcnic_mac_req
*mac_req
;
366 struct qlcnic_vlan_req
*vlan_req
;
369 memset(&req
, 0, sizeof(struct qlcnic_nic_req
));
370 req
.qhdr
= cpu_to_le64(QLCNIC_REQUEST
<< 23);
372 word
= QLCNIC_MAC_EVENT
| ((u64
)adapter
->portnum
<< 16);
373 req
.req_hdr
= cpu_to_le64(word
);
375 mac_req
= (struct qlcnic_mac_req
*)&req
.words
[0];
377 memcpy(mac_req
->mac_addr
, addr
, 6);
379 vlan_req
= (struct qlcnic_vlan_req
*)&req
.words
[1];
380 vlan_req
->vlan_id
= vlan_id
;
382 return qlcnic_send_cmd_descs(adapter
, (struct cmd_desc_type0
*)&req
, 1);
385 static int qlcnic_nic_add_mac(struct qlcnic_adapter
*adapter
, const u8
*addr
)
387 struct list_head
*head
;
388 struct qlcnic_mac_list_s
*cur
;
390 /* look up if already exists */
391 list_for_each(head
, &adapter
->mac_list
) {
392 cur
= list_entry(head
, struct qlcnic_mac_list_s
, list
);
393 if (memcmp(addr
, cur
->mac_addr
, ETH_ALEN
) == 0)
397 cur
= kzalloc(sizeof(struct qlcnic_mac_list_s
), GFP_ATOMIC
);
399 dev_err(&adapter
->netdev
->dev
,
400 "failed to add mac address filter\n");
403 memcpy(cur
->mac_addr
, addr
, ETH_ALEN
);
405 if (qlcnic_sre_macaddr_change(adapter
,
406 cur
->mac_addr
, 0, QLCNIC_MAC_ADD
)) {
411 list_add_tail(&cur
->list
, &adapter
->mac_list
);
415 void qlcnic_set_multi(struct net_device
*netdev
)
417 struct qlcnic_adapter
*adapter
= netdev_priv(netdev
);
418 struct netdev_hw_addr
*ha
;
419 static const u8 bcast_addr
[ETH_ALEN
] = {
420 0xff, 0xff, 0xff, 0xff, 0xff, 0xff
422 u32 mode
= VPORT_MISS_MODE_DROP
;
424 if (!test_bit(__QLCNIC_FW_ATTACHED
, &adapter
->state
))
427 qlcnic_nic_add_mac(adapter
, adapter
->mac_addr
);
428 qlcnic_nic_add_mac(adapter
, bcast_addr
);
430 if (netdev
->flags
& IFF_PROMISC
) {
431 if (!(adapter
->flags
& QLCNIC_PROMISC_DISABLED
))
432 mode
= VPORT_MISS_MODE_ACCEPT_ALL
;
436 if ((netdev
->flags
& IFF_ALLMULTI
) ||
437 (netdev_mc_count(netdev
) > adapter
->max_mc_count
)) {
438 mode
= VPORT_MISS_MODE_ACCEPT_MULTI
;
442 if (!netdev_mc_empty(netdev
)) {
443 netdev_for_each_mc_addr(ha
, netdev
) {
444 qlcnic_nic_add_mac(adapter
, ha
->addr
);
449 qlcnic_nic_set_promisc(adapter
, mode
);
452 int qlcnic_nic_set_promisc(struct qlcnic_adapter
*adapter
, u32 mode
)
454 struct qlcnic_nic_req req
;
457 memset(&req
, 0, sizeof(struct qlcnic_nic_req
));
459 req
.qhdr
= cpu_to_le64(QLCNIC_HOST_REQUEST
<< 23);
461 word
= QLCNIC_H2C_OPCODE_SET_MAC_RECEIVE_MODE
|
462 ((u64
)adapter
->portnum
<< 16);
463 req
.req_hdr
= cpu_to_le64(word
);
465 req
.words
[0] = cpu_to_le64(mode
);
467 return qlcnic_send_cmd_descs(adapter
,
468 (struct cmd_desc_type0
*)&req
, 1);
471 void qlcnic_free_mac_list(struct qlcnic_adapter
*adapter
)
473 struct qlcnic_mac_list_s
*cur
;
474 struct list_head
*head
= &adapter
->mac_list
;
476 while (!list_empty(head
)) {
477 cur
= list_entry(head
->next
, struct qlcnic_mac_list_s
, list
);
478 qlcnic_sre_macaddr_change(adapter
,
479 cur
->mac_addr
, 0, QLCNIC_MAC_DEL
);
480 list_del(&cur
->list
);
485 void qlcnic_prune_lb_filters(struct qlcnic_adapter
*adapter
)
487 struct qlcnic_filter
*tmp_fil
;
488 struct hlist_node
*tmp_hnode
, *n
;
489 struct hlist_head
*head
;
492 for (i
= 0; i
< adapter
->fhash
.fmax
; i
++) {
493 head
= &(adapter
->fhash
.fhead
[i
]);
495 hlist_for_each_entry_safe(tmp_fil
, tmp_hnode
, n
, head
, fnode
)
498 (QLCNIC_FILTER_AGE
* HZ
+ tmp_fil
->ftime
)) {
499 qlcnic_sre_macaddr_change(adapter
,
500 tmp_fil
->faddr
, tmp_fil
->vlan_id
,
501 tmp_fil
->vlan_id
? QLCNIC_MAC_VLAN_DEL
:
503 spin_lock_bh(&adapter
->mac_learn_lock
);
504 adapter
->fhash
.fnum
--;
505 hlist_del(&tmp_fil
->fnode
);
506 spin_unlock_bh(&adapter
->mac_learn_lock
);
513 void qlcnic_delete_lb_filters(struct qlcnic_adapter
*adapter
)
515 struct qlcnic_filter
*tmp_fil
;
516 struct hlist_node
*tmp_hnode
, *n
;
517 struct hlist_head
*head
;
520 for (i
= 0; i
< adapter
->fhash
.fmax
; i
++) {
521 head
= &(adapter
->fhash
.fhead
[i
]);
523 hlist_for_each_entry_safe(tmp_fil
, tmp_hnode
, n
, head
, fnode
) {
524 qlcnic_sre_macaddr_change(adapter
, tmp_fil
->faddr
,
525 tmp_fil
->vlan_id
, tmp_fil
->vlan_id
?
526 QLCNIC_MAC_VLAN_DEL
: QLCNIC_MAC_DEL
);
527 spin_lock_bh(&adapter
->mac_learn_lock
);
528 adapter
->fhash
.fnum
--;
529 hlist_del(&tmp_fil
->fnode
);
530 spin_unlock_bh(&adapter
->mac_learn_lock
);
537 * Send the interrupt coalescing parameter set by ethtool to the card.
539 int qlcnic_config_intr_coalesce(struct qlcnic_adapter
*adapter
)
541 struct qlcnic_nic_req req
;
544 memset(&req
, 0, sizeof(struct qlcnic_nic_req
));
546 req
.qhdr
= cpu_to_le64(QLCNIC_HOST_REQUEST
<< 23);
548 req
.req_hdr
= cpu_to_le64(QLCNIC_CONFIG_INTR_COALESCE
|
549 ((u64
) adapter
->portnum
<< 16));
551 req
.words
[0] = cpu_to_le64(((u64
) adapter
->ahw
->coal
.flag
) << 32);
552 req
.words
[2] = cpu_to_le64(adapter
->ahw
->coal
.rx_packets
|
553 ((u64
) adapter
->ahw
->coal
.rx_time_us
) << 16);
554 req
.words
[5] = cpu_to_le64(adapter
->ahw
->coal
.timer_out
|
555 ((u64
) adapter
->ahw
->coal
.type
) << 32 |
556 ((u64
) adapter
->ahw
->coal
.sts_ring_mask
) << 40);
557 rv
= qlcnic_send_cmd_descs(adapter
, (struct cmd_desc_type0
*)&req
, 1);
559 dev_err(&adapter
->netdev
->dev
,
560 "Could not send interrupt coalescing parameters\n");
564 int qlcnic_config_hw_lro(struct qlcnic_adapter
*adapter
, int enable
)
566 struct qlcnic_nic_req req
;
570 if (!test_bit(__QLCNIC_FW_ATTACHED
, &adapter
->state
))
573 memset(&req
, 0, sizeof(struct qlcnic_nic_req
));
575 req
.qhdr
= cpu_to_le64(QLCNIC_HOST_REQUEST
<< 23);
577 word
= QLCNIC_H2C_OPCODE_CONFIG_HW_LRO
| ((u64
)adapter
->portnum
<< 16);
578 req
.req_hdr
= cpu_to_le64(word
);
580 req
.words
[0] = cpu_to_le64(enable
);
582 rv
= qlcnic_send_cmd_descs(adapter
, (struct cmd_desc_type0
*)&req
, 1);
584 dev_err(&adapter
->netdev
->dev
,
585 "Could not send configure hw lro request\n");
590 int qlcnic_config_bridged_mode(struct qlcnic_adapter
*adapter
, u32 enable
)
592 struct qlcnic_nic_req req
;
596 if (!!(adapter
->flags
& QLCNIC_BRIDGE_ENABLED
) == enable
)
599 memset(&req
, 0, sizeof(struct qlcnic_nic_req
));
601 req
.qhdr
= cpu_to_le64(QLCNIC_HOST_REQUEST
<< 23);
603 word
= QLCNIC_H2C_OPCODE_CONFIG_BRIDGING
|
604 ((u64
)adapter
->portnum
<< 16);
605 req
.req_hdr
= cpu_to_le64(word
);
607 req
.words
[0] = cpu_to_le64(enable
);
609 rv
= qlcnic_send_cmd_descs(adapter
, (struct cmd_desc_type0
*)&req
, 1);
611 dev_err(&adapter
->netdev
->dev
,
612 "Could not send configure bridge mode request\n");
614 adapter
->flags
^= QLCNIC_BRIDGE_ENABLED
;
620 #define RSS_HASHTYPE_IP_TCP 0x3
622 int qlcnic_config_rss(struct qlcnic_adapter
*adapter
, int enable
)
624 struct qlcnic_nic_req req
;
628 static const u64 key
[] = {
629 0xbeac01fa6a42b73bULL
, 0x8030f20c77cb2da3ULL
,
630 0xae7b30b4d0ca2bcbULL
, 0x43a38fb04167253dULL
,
631 0x255b0ec26d5a56daULL
634 memset(&req
, 0, sizeof(struct qlcnic_nic_req
));
635 req
.qhdr
= cpu_to_le64(QLCNIC_HOST_REQUEST
<< 23);
637 word
= QLCNIC_H2C_OPCODE_CONFIG_RSS
| ((u64
)adapter
->portnum
<< 16);
638 req
.req_hdr
= cpu_to_le64(word
);
642 * bits 3-0: hash_method
643 * 5-4: hash_type_ipv4
644 * 7-6: hash_type_ipv6
646 * 9: use indirection table
648 * 63-48: indirection table mask
650 word
= ((u64
)(RSS_HASHTYPE_IP_TCP
& 0x3) << 4) |
651 ((u64
)(RSS_HASHTYPE_IP_TCP
& 0x3) << 6) |
652 ((u64
)(enable
& 0x1) << 8) |
654 req
.words
[0] = cpu_to_le64(word
);
655 for (i
= 0; i
< 5; i
++)
656 req
.words
[i
+1] = cpu_to_le64(key
[i
]);
658 rv
= qlcnic_send_cmd_descs(adapter
, (struct cmd_desc_type0
*)&req
, 1);
660 dev_err(&adapter
->netdev
->dev
, "could not configure RSS\n");
665 int qlcnic_config_ipaddr(struct qlcnic_adapter
*adapter
, __be32 ip
, int cmd
)
667 struct qlcnic_nic_req req
;
668 struct qlcnic_ipaddr
*ipa
;
672 memset(&req
, 0, sizeof(struct qlcnic_nic_req
));
673 req
.qhdr
= cpu_to_le64(QLCNIC_HOST_REQUEST
<< 23);
675 word
= QLCNIC_H2C_OPCODE_CONFIG_IPADDR
| ((u64
)adapter
->portnum
<< 16);
676 req
.req_hdr
= cpu_to_le64(word
);
678 req
.words
[0] = cpu_to_le64(cmd
);
679 ipa
= (struct qlcnic_ipaddr
*)&req
.words
[1];
682 rv
= qlcnic_send_cmd_descs(adapter
, (struct cmd_desc_type0
*)&req
, 1);
684 dev_err(&adapter
->netdev
->dev
,
685 "could not notify %s IP 0x%x reuqest\n",
686 (cmd
== QLCNIC_IP_UP
) ? "Add" : "Remove", ip
);
691 int qlcnic_linkevent_request(struct qlcnic_adapter
*adapter
, int enable
)
693 struct qlcnic_nic_req req
;
697 memset(&req
, 0, sizeof(struct qlcnic_nic_req
));
698 req
.qhdr
= cpu_to_le64(QLCNIC_HOST_REQUEST
<< 23);
700 word
= QLCNIC_H2C_OPCODE_GET_LINKEVENT
| ((u64
)adapter
->portnum
<< 16);
701 req
.req_hdr
= cpu_to_le64(word
);
702 req
.words
[0] = cpu_to_le64(enable
| (enable
<< 8));
704 rv
= qlcnic_send_cmd_descs(adapter
, (struct cmd_desc_type0
*)&req
, 1);
706 dev_err(&adapter
->netdev
->dev
,
707 "could not configure link notification\n");
712 int qlcnic_send_lro_cleanup(struct qlcnic_adapter
*adapter
)
714 struct qlcnic_nic_req req
;
718 if (!test_bit(__QLCNIC_FW_ATTACHED
, &adapter
->state
))
721 memset(&req
, 0, sizeof(struct qlcnic_nic_req
));
722 req
.qhdr
= cpu_to_le64(QLCNIC_HOST_REQUEST
<< 23);
724 word
= QLCNIC_H2C_OPCODE_LRO_REQUEST
|
725 ((u64
)adapter
->portnum
<< 16) |
726 ((u64
)QLCNIC_LRO_REQUEST_CLEANUP
<< 56) ;
728 req
.req_hdr
= cpu_to_le64(word
);
730 rv
= qlcnic_send_cmd_descs(adapter
, (struct cmd_desc_type0
*)&req
, 1);
732 dev_err(&adapter
->netdev
->dev
,
733 "could not cleanup lro flows\n");
739 * qlcnic_change_mtu - Change the Maximum Transfer Unit
740 * @returns 0 on success, negative on failure
743 int qlcnic_change_mtu(struct net_device
*netdev
, int mtu
)
745 struct qlcnic_adapter
*adapter
= netdev_priv(netdev
);
748 if (mtu
< P3P_MIN_MTU
|| mtu
> P3P_MAX_MTU
) {
749 dev_err(&adapter
->netdev
->dev
, "%d bytes < mtu < %d bytes"
750 " not supported\n", P3P_MAX_MTU
, P3P_MIN_MTU
);
754 rc
= qlcnic_fw_cmd_set_mtu(adapter
, mtu
);
763 u32
qlcnic_fix_features(struct net_device
*netdev
, u32 features
)
765 struct qlcnic_adapter
*adapter
= netdev_priv(netdev
);
767 if ((adapter
->flags
& QLCNIC_ESWITCH_ENABLED
)) {
768 u32 changed
= features
^ netdev
->features
;
769 features
^= changed
& (NETIF_F_ALL_CSUM
| NETIF_F_RXCSUM
);
772 if (!(features
& NETIF_F_RXCSUM
))
773 features
&= ~NETIF_F_LRO
;
779 int qlcnic_set_features(struct net_device
*netdev
, u32 features
)
781 struct qlcnic_adapter
*adapter
= netdev_priv(netdev
);
782 u32 changed
= netdev
->features
^ features
;
783 int hw_lro
= (features
& NETIF_F_LRO
) ? QLCNIC_LRO_ENABLED
: 0;
785 if (!(changed
& NETIF_F_LRO
))
788 netdev
->features
= features
^ NETIF_F_LRO
;
790 if (qlcnic_config_hw_lro(adapter
, hw_lro
))
793 if ((hw_lro
== 0) && qlcnic_send_lro_cleanup(adapter
))
800 * Changes the CRB window to the specified window.
802 /* Returns < 0 if off is not valid,
803 * 1 if window access is needed. 'off' is set to offset from
804 * CRB space in 128M pci map
805 * 0 if no window access is needed. 'off' is set to 2M addr
806 * In: 'off' is offset from base in 128M pci map
809 qlcnic_pci_get_crb_addr_2M(struct qlcnic_adapter
*adapter
,
810 ulong off
, void __iomem
**addr
)
812 const struct crb_128M_2M_sub_block_map
*m
;
814 if ((off
>= QLCNIC_CRB_MAX
) || (off
< QLCNIC_PCI_CRBSPACE
))
817 off
-= QLCNIC_PCI_CRBSPACE
;
822 m
= &crb_128M_2M_map
[CRB_BLK(off
)].sub_block
[CRB_SUBBLK(off
)];
824 if (m
->valid
&& (m
->start_128M
<= off
) && (m
->end_128M
> off
)) {
825 *addr
= adapter
->ahw
->pci_base0
+ m
->start_2M
+
826 (off
- m
->start_128M
);
831 * Not in direct map, use crb window
833 *addr
= adapter
->ahw
->pci_base0
+ CRB_INDIRECT_2M
+ (off
& MASK(16));
838 * In: 'off' is offset from CRB space in 128M pci map
839 * Out: 'off' is 2M pci map addr
840 * side effect: lock crb window
843 qlcnic_pci_set_crbwindow_2M(struct qlcnic_adapter
*adapter
, ulong off
)
846 void __iomem
*addr
= adapter
->ahw
->pci_base0
+ CRB_WINDOW_2M
;
848 off
-= QLCNIC_PCI_CRBSPACE
;
850 window
= CRB_HI(off
);
852 dev_err(&adapter
->pdev
->dev
, "Invalid offset 0x%lx\n", off
);
856 writel(window
, addr
);
857 if (readl(addr
) != window
) {
858 if (printk_ratelimit())
859 dev_warn(&adapter
->pdev
->dev
,
860 "failed to set CRB window to %d off 0x%lx\n",
868 qlcnic_hw_write_wx_2M(struct qlcnic_adapter
*adapter
, ulong off
, u32 data
)
872 void __iomem
*addr
= NULL
;
874 rv
= qlcnic_pci_get_crb_addr_2M(adapter
, off
, &addr
);
882 /* indirect access */
883 write_lock_irqsave(&adapter
->ahw
->crb_lock
, flags
);
884 crb_win_lock(adapter
);
885 rv
= qlcnic_pci_set_crbwindow_2M(adapter
, off
);
888 crb_win_unlock(adapter
);
889 write_unlock_irqrestore(&adapter
->ahw
->crb_lock
, flags
);
893 dev_err(&adapter
->pdev
->dev
,
894 "%s: invalid offset: 0x%016lx\n", __func__
, off
);
900 qlcnic_hw_read_wx_2M(struct qlcnic_adapter
*adapter
, ulong off
)
905 void __iomem
*addr
= NULL
;
907 rv
= qlcnic_pci_get_crb_addr_2M(adapter
, off
, &addr
);
913 /* indirect access */
914 write_lock_irqsave(&adapter
->ahw
->crb_lock
, flags
);
915 crb_win_lock(adapter
);
916 if (!qlcnic_pci_set_crbwindow_2M(adapter
, off
))
918 crb_win_unlock(adapter
);
919 write_unlock_irqrestore(&adapter
->ahw
->crb_lock
, flags
);
923 dev_err(&adapter
->pdev
->dev
,
924 "%s: invalid offset: 0x%016lx\n", __func__
, off
);
931 qlcnic_get_ioaddr(struct qlcnic_adapter
*adapter
, u32 offset
)
933 void __iomem
*addr
= NULL
;
935 WARN_ON(qlcnic_pci_get_crb_addr_2M(adapter
, offset
, &addr
));
942 qlcnic_pci_set_window_2M(struct qlcnic_adapter
*adapter
,
943 u64 addr
, u32
*start
)
947 window
= OCM_WIN_P3P(addr
);
949 writel(window
, adapter
->ahw
->ocm_win_crb
);
950 /* read back to flush */
951 readl(adapter
->ahw
->ocm_win_crb
);
953 *start
= QLCNIC_PCI_OCM0_2M
+ GET_MEM_OFFS_2M(addr
);
958 qlcnic_pci_mem_access_direct(struct qlcnic_adapter
*adapter
, u64 off
,
965 mutex_lock(&adapter
->ahw
->mem_lock
);
967 ret
= qlcnic_pci_set_window_2M(adapter
, off
, &start
);
971 addr
= adapter
->ahw
->pci_base0
+ start
;
973 if (op
== 0) /* read */
979 mutex_unlock(&adapter
->ahw
->mem_lock
);
985 qlcnic_pci_camqm_read_2M(struct qlcnic_adapter
*adapter
, u64 off
, u64
*data
)
987 void __iomem
*addr
= adapter
->ahw
->pci_base0
+
988 QLCNIC_PCI_CAMQM_2M_BASE
+ (off
- QLCNIC_PCI_CAMQM
);
990 mutex_lock(&adapter
->ahw
->mem_lock
);
992 mutex_unlock(&adapter
->ahw
->mem_lock
);
996 qlcnic_pci_camqm_write_2M(struct qlcnic_adapter
*adapter
, u64 off
, u64 data
)
998 void __iomem
*addr
= adapter
->ahw
->pci_base0
+
999 QLCNIC_PCI_CAMQM_2M_BASE
+ (off
- QLCNIC_PCI_CAMQM
);
1001 mutex_lock(&adapter
->ahw
->mem_lock
);
1003 mutex_unlock(&adapter
->ahw
->mem_lock
);
1006 #define MAX_CTL_CHECK 1000
1009 qlcnic_pci_mem_write_2M(struct qlcnic_adapter
*adapter
,
1014 void __iomem
*mem_crb
;
1016 /* Only 64-bit aligned access */
1020 /* P3 onward, test agent base for MIU and SIU is same */
1021 if (ADDR_IN_RANGE(off
, QLCNIC_ADDR_QDR_NET
,
1022 QLCNIC_ADDR_QDR_NET_MAX
)) {
1023 mem_crb
= qlcnic_get_ioaddr(adapter
,
1024 QLCNIC_CRB_QDR_NET
+MIU_TEST_AGT_BASE
);
1028 if (ADDR_IN_RANGE(off
, QLCNIC_ADDR_DDR_NET
, QLCNIC_ADDR_DDR_NET_MAX
)) {
1029 mem_crb
= qlcnic_get_ioaddr(adapter
,
1030 QLCNIC_CRB_DDR_NET
+MIU_TEST_AGT_BASE
);
1034 if (ADDR_IN_RANGE(off
, QLCNIC_ADDR_OCM0
, QLCNIC_ADDR_OCM0_MAX
))
1035 return qlcnic_pci_mem_access_direct(adapter
, off
, &data
, 1);
1042 mutex_lock(&adapter
->ahw
->mem_lock
);
1044 writel(off8
, (mem_crb
+ MIU_TEST_AGT_ADDR_LO
));
1045 writel(0, (mem_crb
+ MIU_TEST_AGT_ADDR_HI
));
1048 writel(TA_CTL_ENABLE
, (mem_crb
+ TEST_AGT_CTRL
));
1049 writel((TA_CTL_START
| TA_CTL_ENABLE
),
1050 (mem_crb
+ TEST_AGT_CTRL
));
1052 for (j
= 0; j
< MAX_CTL_CHECK
; j
++) {
1053 temp
= readl(mem_crb
+ TEST_AGT_CTRL
);
1054 if ((temp
& TA_CTL_BUSY
) == 0)
1058 if (j
>= MAX_CTL_CHECK
) {
1063 i
= (off
& 0xf) ? 0 : 2;
1064 writel(readl(mem_crb
+ MIU_TEST_AGT_RDDATA(i
)),
1065 mem_crb
+ MIU_TEST_AGT_WRDATA(i
));
1066 writel(readl(mem_crb
+ MIU_TEST_AGT_RDDATA(i
+1)),
1067 mem_crb
+ MIU_TEST_AGT_WRDATA(i
+1));
1068 i
= (off
& 0xf) ? 2 : 0;
1070 writel(data
& 0xffffffff,
1071 mem_crb
+ MIU_TEST_AGT_WRDATA(i
));
1072 writel((data
>> 32) & 0xffffffff,
1073 mem_crb
+ MIU_TEST_AGT_WRDATA(i
+1));
1075 writel((TA_CTL_ENABLE
| TA_CTL_WRITE
), (mem_crb
+ TEST_AGT_CTRL
));
1076 writel((TA_CTL_START
| TA_CTL_ENABLE
| TA_CTL_WRITE
),
1077 (mem_crb
+ TEST_AGT_CTRL
));
1079 for (j
= 0; j
< MAX_CTL_CHECK
; j
++) {
1080 temp
= readl(mem_crb
+ TEST_AGT_CTRL
);
1081 if ((temp
& TA_CTL_BUSY
) == 0)
1085 if (j
>= MAX_CTL_CHECK
) {
1086 if (printk_ratelimit())
1087 dev_err(&adapter
->pdev
->dev
,
1088 "failed to write through agent\n");
1094 mutex_unlock(&adapter
->ahw
->mem_lock
);
1100 qlcnic_pci_mem_read_2M(struct qlcnic_adapter
*adapter
,
1106 void __iomem
*mem_crb
;
1108 /* Only 64-bit aligned access */
1112 /* P3 onward, test agent base for MIU and SIU is same */
1113 if (ADDR_IN_RANGE(off
, QLCNIC_ADDR_QDR_NET
,
1114 QLCNIC_ADDR_QDR_NET_MAX
)) {
1115 mem_crb
= qlcnic_get_ioaddr(adapter
,
1116 QLCNIC_CRB_QDR_NET
+MIU_TEST_AGT_BASE
);
1120 if (ADDR_IN_RANGE(off
, QLCNIC_ADDR_DDR_NET
, QLCNIC_ADDR_DDR_NET_MAX
)) {
1121 mem_crb
= qlcnic_get_ioaddr(adapter
,
1122 QLCNIC_CRB_DDR_NET
+MIU_TEST_AGT_BASE
);
1126 if (ADDR_IN_RANGE(off
, QLCNIC_ADDR_OCM0
, QLCNIC_ADDR_OCM0_MAX
)) {
1127 return qlcnic_pci_mem_access_direct(adapter
,
1136 mutex_lock(&adapter
->ahw
->mem_lock
);
1138 writel(off8
, (mem_crb
+ MIU_TEST_AGT_ADDR_LO
));
1139 writel(0, (mem_crb
+ MIU_TEST_AGT_ADDR_HI
));
1140 writel(TA_CTL_ENABLE
, (mem_crb
+ TEST_AGT_CTRL
));
1141 writel((TA_CTL_START
| TA_CTL_ENABLE
), (mem_crb
+ TEST_AGT_CTRL
));
1143 for (j
= 0; j
< MAX_CTL_CHECK
; j
++) {
1144 temp
= readl(mem_crb
+ TEST_AGT_CTRL
);
1145 if ((temp
& TA_CTL_BUSY
) == 0)
1149 if (j
>= MAX_CTL_CHECK
) {
1150 if (printk_ratelimit())
1151 dev_err(&adapter
->pdev
->dev
,
1152 "failed to read through agent\n");
1155 off8
= MIU_TEST_AGT_RDDATA_LO
;
1157 off8
= MIU_TEST_AGT_RDDATA_UPPER_LO
;
1159 temp
= readl(mem_crb
+ off8
+ 4);
1160 val
= (u64
)temp
<< 32;
1161 val
|= readl(mem_crb
+ off8
);
1166 mutex_unlock(&adapter
->ahw
->mem_lock
);
1171 int qlcnic_get_board_info(struct qlcnic_adapter
*adapter
)
1173 int offset
, board_type
, magic
;
1174 struct pci_dev
*pdev
= adapter
->pdev
;
1176 offset
= QLCNIC_FW_MAGIC_OFFSET
;
1177 if (qlcnic_rom_fast_read(adapter
, offset
, &magic
))
1180 if (magic
!= QLCNIC_BDINFO_MAGIC
) {
1181 dev_err(&pdev
->dev
, "invalid board config, magic=%08x\n",
1186 offset
= QLCNIC_BRDTYPE_OFFSET
;
1187 if (qlcnic_rom_fast_read(adapter
, offset
, &board_type
))
1190 adapter
->ahw
->board_type
= board_type
;
1192 if (board_type
== QLCNIC_BRDTYPE_P3P_4_GB_MM
) {
1193 u32 gpio
= QLCRD32(adapter
, QLCNIC_ROMUSB_GLB_PAD_GPIO_I
);
1194 if ((gpio
& 0x8000) == 0)
1195 board_type
= QLCNIC_BRDTYPE_P3P_10G_TP
;
1198 switch (board_type
) {
1199 case QLCNIC_BRDTYPE_P3P_HMEZ
:
1200 case QLCNIC_BRDTYPE_P3P_XG_LOM
:
1201 case QLCNIC_BRDTYPE_P3P_10G_CX4
:
1202 case QLCNIC_BRDTYPE_P3P_10G_CX4_LP
:
1203 case QLCNIC_BRDTYPE_P3P_IMEZ
:
1204 case QLCNIC_BRDTYPE_P3P_10G_SFP_PLUS
:
1205 case QLCNIC_BRDTYPE_P3P_10G_SFP_CT
:
1206 case QLCNIC_BRDTYPE_P3P_10G_SFP_QT
:
1207 case QLCNIC_BRDTYPE_P3P_10G_XFP
:
1208 case QLCNIC_BRDTYPE_P3P_10000_BASE_T
:
1209 adapter
->ahw
->port_type
= QLCNIC_XGBE
;
1211 case QLCNIC_BRDTYPE_P3P_REF_QG
:
1212 case QLCNIC_BRDTYPE_P3P_4_GB
:
1213 case QLCNIC_BRDTYPE_P3P_4_GB_MM
:
1214 adapter
->ahw
->port_type
= QLCNIC_GBE
;
1216 case QLCNIC_BRDTYPE_P3P_10G_TP
:
1217 adapter
->ahw
->port_type
= (adapter
->portnum
< 2) ?
1218 QLCNIC_XGBE
: QLCNIC_GBE
;
1221 dev_err(&pdev
->dev
, "unknown board type %x\n", board_type
);
1222 adapter
->ahw
->port_type
= QLCNIC_XGBE
;
1230 qlcnic_wol_supported(struct qlcnic_adapter
*adapter
)
1234 wol_cfg
= QLCRD32(adapter
, QLCNIC_WOL_CONFIG_NV
);
1235 if (wol_cfg
& (1UL << adapter
->portnum
)) {
1236 wol_cfg
= QLCRD32(adapter
, QLCNIC_WOL_CONFIG
);
1237 if (wol_cfg
& (1 << adapter
->portnum
))
1244 int qlcnic_config_led(struct qlcnic_adapter
*adapter
, u32 state
, u32 rate
)
1246 struct qlcnic_nic_req req
;
1250 memset(&req
, 0, sizeof(struct qlcnic_nic_req
));
1251 req
.qhdr
= cpu_to_le64(QLCNIC_HOST_REQUEST
<< 23);
1253 word
= QLCNIC_H2C_OPCODE_CONFIG_LED
| ((u64
)adapter
->portnum
<< 16);
1254 req
.req_hdr
= cpu_to_le64(word
);
1256 req
.words
[0] = cpu_to_le64((u64
)rate
<< 32);
1257 req
.words
[1] = cpu_to_le64(state
);
1259 rv
= qlcnic_send_cmd_descs(adapter
, (struct cmd_desc_type0
*)&req
, 1);
1261 dev_err(&adapter
->pdev
->dev
, "LED configuration failed.\n");
1266 /* FW dump related functions */
1268 qlcnic_dump_crb(struct qlcnic_adapter
*adapter
, struct qlcnic_dump_entry
*entry
,
1273 struct __crb
*crb
= &entry
->region
.crb
;
1274 void __iomem
*base
= adapter
->ahw
->pci_base0
;
1278 for (i
= 0; i
< crb
->no_ops
; i
++) {
1279 QLCNIC_RD_DUMP_REG(addr
, base
, &data
);
1280 *buffer
++ = cpu_to_le32(addr
);
1281 *buffer
++ = cpu_to_le32(data
);
1282 addr
+= crb
->stride
;
1284 return crb
->no_ops
* 2 * sizeof(u32
);
1288 qlcnic_dump_ctrl(struct qlcnic_adapter
*adapter
,
1289 struct qlcnic_dump_entry
*entry
, u32
*buffer
)
1291 int i
, k
, timeout
= 0;
1292 void __iomem
*base
= adapter
->ahw
->pci_base0
;
1295 struct __ctrl
*ctr
= &entry
->region
.ctrl
;
1296 struct qlcnic_dump_template_hdr
*t_hdr
= adapter
->ahw
->fw_dump
.tmpl_hdr
;
1299 no_ops
= ctr
->no_ops
;
1301 for (i
= 0; i
< no_ops
; i
++) {
1304 for (k
= 0; k
< 8; k
++) {
1305 if (!(ctr
->opcode
& (1 << k
)))
1308 case QLCNIC_DUMP_WCRB
:
1309 QLCNIC_WR_DUMP_REG(addr
, base
, ctr
->val1
);
1311 case QLCNIC_DUMP_RWCRB
:
1312 QLCNIC_RD_DUMP_REG(addr
, base
, &data
);
1313 QLCNIC_WR_DUMP_REG(addr
, base
, data
);
1315 case QLCNIC_DUMP_ANDCRB
:
1316 QLCNIC_RD_DUMP_REG(addr
, base
, &data
);
1317 QLCNIC_WR_DUMP_REG(addr
, base
,
1318 (data
& ctr
->val2
));
1320 case QLCNIC_DUMP_ORCRB
:
1321 QLCNIC_RD_DUMP_REG(addr
, base
, &data
);
1322 QLCNIC_WR_DUMP_REG(addr
, base
,
1323 (data
| ctr
->val3
));
1325 case QLCNIC_DUMP_POLLCRB
:
1326 while (timeout
<= ctr
->timeout
) {
1327 QLCNIC_RD_DUMP_REG(addr
, base
, &data
);
1328 if ((data
& ctr
->val2
) == ctr
->val1
)
1333 if (timeout
> ctr
->timeout
) {
1334 dev_info(&adapter
->pdev
->dev
,
1335 "Timed out, aborting poll CRB\n");
1339 case QLCNIC_DUMP_RD_SAVE
:
1341 addr
= t_hdr
->saved_state
[ctr
->index_a
];
1342 QLCNIC_RD_DUMP_REG(addr
, base
, &data
);
1343 t_hdr
->saved_state
[ctr
->index_v
] = data
;
1345 case QLCNIC_DUMP_WRT_SAVED
:
1347 data
= t_hdr
->saved_state
[ctr
->index_v
];
1351 addr
= t_hdr
->saved_state
[ctr
->index_a
];
1352 QLCNIC_WR_DUMP_REG(addr
, base
, data
);
1354 case QLCNIC_DUMP_MOD_SAVE_ST
:
1355 data
= t_hdr
->saved_state
[ctr
->index_v
];
1356 data
<<= ctr
->shl_val
;
1357 data
>>= ctr
->shr_val
;
1362 t_hdr
->saved_state
[ctr
->index_v
] = data
;
1365 dev_info(&adapter
->pdev
->dev
,
1366 "Unknown opcode\n");
1370 addr
+= ctr
->stride
;
1376 qlcnic_dump_mux(struct qlcnic_adapter
*adapter
, struct qlcnic_dump_entry
*entry
,
1381 struct __mux
*mux
= &entry
->region
.mux
;
1382 void __iomem
*base
= adapter
->ahw
->pci_base0
;
1385 for (loop
= 0; loop
< mux
->no_ops
; loop
++) {
1386 QLCNIC_WR_DUMP_REG(mux
->addr
, base
, val
);
1387 QLCNIC_RD_DUMP_REG(mux
->read_addr
, base
, &data
);
1388 *buffer
++ = cpu_to_le32(val
);
1389 *buffer
++ = cpu_to_le32(data
);
1390 val
+= mux
->val_stride
;
1392 return 2 * mux
->no_ops
* sizeof(u32
);
1396 qlcnic_dump_que(struct qlcnic_adapter
*adapter
, struct qlcnic_dump_entry
*entry
,
1400 u32 cnt
, addr
, data
, que_id
= 0;
1401 void __iomem
*base
= adapter
->ahw
->pci_base0
;
1402 struct __queue
*que
= &entry
->region
.que
;
1404 addr
= que
->read_addr
;
1405 cnt
= que
->read_addr_cnt
;
1407 for (loop
= 0; loop
< que
->no_ops
; loop
++) {
1408 QLCNIC_WR_DUMP_REG(que
->sel_addr
, base
, que_id
);
1409 for (i
= 0; i
< cnt
; i
++) {
1410 QLCNIC_RD_DUMP_REG(addr
, base
, &data
);
1411 *buffer
++ = cpu_to_le32(data
);
1412 addr
+= que
->read_addr_stride
;
1414 que_id
+= que
->stride
;
1416 return que
->no_ops
* cnt
* sizeof(u32
);
1420 qlcnic_dump_ocm(struct qlcnic_adapter
*adapter
, struct qlcnic_dump_entry
*entry
,
1426 struct __ocm
*ocm
= &entry
->region
.ocm
;
1428 addr
= adapter
->ahw
->pci_base0
+ ocm
->read_addr
;
1429 for (i
= 0; i
< ocm
->no_ops
; i
++) {
1431 *buffer
++ = cpu_to_le32(data
);
1432 addr
+= ocm
->read_addr_stride
;
1434 return ocm
->no_ops
* sizeof(u32
);
1438 qlcnic_read_rom(struct qlcnic_adapter
*adapter
, struct qlcnic_dump_entry
*entry
,
1442 u32 fl_addr
, size
, val
, lck_val
, addr
;
1443 struct __mem
*rom
= &entry
->region
.mem
;
1444 void __iomem
*base
= adapter
->ahw
->pci_base0
;
1446 fl_addr
= rom
->addr
;
1449 lck_val
= readl(base
+ QLCNIC_FLASH_SEM2_LK
);
1450 if (!lck_val
&& count
< MAX_CTL_CHECK
) {
1455 writel(adapter
->ahw
->pci_func
, (base
+ QLCNIC_FLASH_LOCK_ID
));
1456 for (i
= 0; i
< size
; i
++) {
1457 addr
= fl_addr
& 0xFFFF0000;
1458 QLCNIC_WR_DUMP_REG(FLASH_ROM_WINDOW
, base
, addr
);
1459 addr
= LSW(fl_addr
) + FLASH_ROM_DATA
;
1460 QLCNIC_RD_DUMP_REG(addr
, base
, &val
);
1462 *buffer
++ = cpu_to_le32(val
);
1464 readl(base
+ QLCNIC_FLASH_SEM2_ULK
);
1469 qlcnic_dump_l1_cache(struct qlcnic_adapter
*adapter
,
1470 struct qlcnic_dump_entry
*entry
, u32
*buffer
)
1473 u32 cnt
, val
, data
, addr
;
1474 void __iomem
*base
= adapter
->ahw
->pci_base0
;
1475 struct __cache
*l1
= &entry
->region
.cache
;
1477 val
= l1
->init_tag_val
;
1479 for (i
= 0; i
< l1
->no_ops
; i
++) {
1480 QLCNIC_WR_DUMP_REG(l1
->addr
, base
, val
);
1481 QLCNIC_WR_DUMP_REG(l1
->ctrl_addr
, base
, LSW(l1
->ctrl_val
));
1482 addr
= l1
->read_addr
;
1483 cnt
= l1
->read_addr_num
;
1485 QLCNIC_RD_DUMP_REG(addr
, base
, &data
);
1486 *buffer
++ = cpu_to_le32(data
);
1487 addr
+= l1
->read_addr_stride
;
1492 return l1
->no_ops
* l1
->read_addr_num
* sizeof(u32
);
1496 qlcnic_dump_l2_cache(struct qlcnic_adapter
*adapter
,
1497 struct qlcnic_dump_entry
*entry
, u32
*buffer
)
1500 u32 cnt
, val
, data
, addr
;
1501 u8 poll_mask
, poll_to
, time_out
= 0;
1502 void __iomem
*base
= adapter
->ahw
->pci_base0
;
1503 struct __cache
*l2
= &entry
->region
.cache
;
1505 val
= l2
->init_tag_val
;
1506 poll_mask
= LSB(MSW(l2
->ctrl_val
));
1507 poll_to
= MSB(MSW(l2
->ctrl_val
));
1509 for (i
= 0; i
< l2
->no_ops
; i
++) {
1510 QLCNIC_WR_DUMP_REG(l2
->addr
, base
, val
);
1512 QLCNIC_WR_DUMP_REG(l2
->ctrl_addr
, base
,
1514 QLCNIC_RD_DUMP_REG(l2
->ctrl_addr
, base
, &data
);
1515 if (!(data
& poll_mask
))
1519 } while (time_out
<= poll_to
);
1520 if (time_out
> poll_to
)
1523 addr
= l2
->read_addr
;
1524 cnt
= l2
->read_addr_num
;
1526 QLCNIC_RD_DUMP_REG(addr
, base
, &data
);
1527 *buffer
++ = cpu_to_le32(data
);
1528 addr
+= l2
->read_addr_stride
;
1533 return l2
->no_ops
* l2
->read_addr_num
* sizeof(u32
);
1537 qlcnic_read_memory(struct qlcnic_adapter
*adapter
,
1538 struct qlcnic_dump_entry
*entry
, u32
*buffer
)
1540 u32 addr
, data
, test
, ret
= 0;
1542 struct __mem
*mem
= &entry
->region
.mem
;
1543 void __iomem
*base
= adapter
->ahw
->pci_base0
;
1545 reg_read
= mem
->size
;
1547 /* check for data size of multiple of 16 and 16 byte alignment */
1548 if ((addr
& 0xf) || (reg_read
%16)) {
1549 dev_info(&adapter
->pdev
->dev
,
1550 "Unaligned memory addr:0x%x size:0x%x\n",
1555 mutex_lock(&adapter
->ahw
->mem_lock
);
1557 while (reg_read
!= 0) {
1558 QLCNIC_WR_DUMP_REG(MIU_TEST_ADDR_LO
, base
, addr
);
1559 QLCNIC_WR_DUMP_REG(MIU_TEST_ADDR_HI
, base
, 0);
1560 QLCNIC_WR_DUMP_REG(MIU_TEST_CTR
, base
,
1561 TA_CTL_ENABLE
| TA_CTL_START
);
1563 for (i
= 0; i
< MAX_CTL_CHECK
; i
++) {
1564 QLCNIC_RD_DUMP_REG(MIU_TEST_CTR
, base
, &test
);
1565 if (!(test
& TA_CTL_BUSY
))
1568 if (i
== MAX_CTL_CHECK
) {
1569 if (printk_ratelimit()) {
1570 dev_err(&adapter
->pdev
->dev
,
1571 "failed to read through agent\n");
1576 for (i
= 0; i
< 4; i
++) {
1577 QLCNIC_RD_DUMP_REG(MIU_TEST_READ_DATA
[i
], base
, &data
);
1578 *buffer
++ = cpu_to_le32(data
);
1585 mutex_unlock(&adapter
->ahw
->mem_lock
);
1590 qlcnic_dump_nop(struct qlcnic_adapter
*adapter
,
1591 struct qlcnic_dump_entry
*entry
, u32
*buffer
)
1593 entry
->hdr
.flags
|= QLCNIC_DUMP_SKIP
;
1597 struct qlcnic_dump_operations fw_dump_ops
[] = {
1598 { QLCNIC_DUMP_NOP
, qlcnic_dump_nop
},
1599 { QLCNIC_DUMP_READ_CRB
, qlcnic_dump_crb
},
1600 { QLCNIC_DUMP_READ_MUX
, qlcnic_dump_mux
},
1601 { QLCNIC_DUMP_QUEUE
, qlcnic_dump_que
},
1602 { QLCNIC_DUMP_BRD_CONFIG
, qlcnic_read_rom
},
1603 { QLCNIC_DUMP_READ_OCM
, qlcnic_dump_ocm
},
1604 { QLCNIC_DUMP_PEG_REG
, qlcnic_dump_ctrl
},
1605 { QLCNIC_DUMP_L1_DTAG
, qlcnic_dump_l1_cache
},
1606 { QLCNIC_DUMP_L1_ITAG
, qlcnic_dump_l1_cache
},
1607 { QLCNIC_DUMP_L1_DATA
, qlcnic_dump_l1_cache
},
1608 { QLCNIC_DUMP_L1_INST
, qlcnic_dump_l1_cache
},
1609 { QLCNIC_DUMP_L2_DTAG
, qlcnic_dump_l2_cache
},
1610 { QLCNIC_DUMP_L2_ITAG
, qlcnic_dump_l2_cache
},
1611 { QLCNIC_DUMP_L2_DATA
, qlcnic_dump_l2_cache
},
1612 { QLCNIC_DUMP_L2_INST
, qlcnic_dump_l2_cache
},
1613 { QLCNIC_DUMP_READ_ROM
, qlcnic_read_rom
},
1614 { QLCNIC_DUMP_READ_MEM
, qlcnic_read_memory
},
1615 { QLCNIC_DUMP_READ_CTRL
, qlcnic_dump_ctrl
},
1616 { QLCNIC_DUMP_TLHDR
, qlcnic_dump_nop
},
1617 { QLCNIC_DUMP_RDEND
, qlcnic_dump_nop
},
1620 /* Walk the template and collect dump for each entry in the dump template */
1622 qlcnic_valid_dump_entry(struct device
*dev
, struct qlcnic_dump_entry
*entry
,
1626 if (size
!= entry
->hdr
.cap_size
) {
1628 "Invalidate dump, Type:%d\tMask:%d\tSize:%dCap_size:%d\n",
1629 entry
->hdr
.type
, entry
->hdr
.mask
, size
, entry
->hdr
.cap_size
);
1630 dev_info(dev
, "Aborting further dump capture\n");
1636 int qlcnic_dump_fw(struct qlcnic_adapter
*adapter
)
1640 char *msg
[] = {mesg
, NULL
};
1641 int i
, k
, ops_cnt
, ops_index
, dump_size
= 0;
1642 u32 entry_offset
, dump
, no_entries
, buf_offset
= 0;
1643 struct qlcnic_dump_entry
*entry
;
1644 struct qlcnic_fw_dump
*fw_dump
= &adapter
->ahw
->fw_dump
;
1645 struct qlcnic_dump_template_hdr
*tmpl_hdr
= fw_dump
->tmpl_hdr
;
1648 dev_info(&adapter
->pdev
->dev
,
1649 "Previous dump not cleared, not capturing dump\n");
1652 /* Calculate the size for dump data area only */
1653 for (i
= 2, k
= 1; (i
& QLCNIC_DUMP_MASK_MAX
); i
<<= 1, k
++)
1654 if (i
& tmpl_hdr
->drv_cap_mask
)
1655 dump_size
+= tmpl_hdr
->cap_sizes
[k
];
1659 fw_dump
->data
= vzalloc(dump_size
);
1660 if (!fw_dump
->data
) {
1661 dev_info(&adapter
->pdev
->dev
,
1662 "Unable to allocate (%d KB) for fw dump\n",
1666 buffer
= fw_dump
->data
;
1667 fw_dump
->size
= dump_size
;
1668 no_entries
= tmpl_hdr
->num_entries
;
1669 ops_cnt
= ARRAY_SIZE(fw_dump_ops
);
1670 entry_offset
= tmpl_hdr
->offset
;
1671 tmpl_hdr
->sys_info
[0] = QLCNIC_DRIVER_VERSION
;
1672 tmpl_hdr
->sys_info
[1] = adapter
->fw_version
;
1674 for (i
= 0; i
< no_entries
; i
++) {
1675 entry
= (struct qlcnic_dump_entry
*) ((void *) tmpl_hdr
+
1677 if (!(entry
->hdr
.mask
& tmpl_hdr
->drv_cap_mask
)) {
1678 entry
->hdr
.flags
|= QLCNIC_DUMP_SKIP
;
1679 entry_offset
+= entry
->hdr
.offset
;
1682 /* Find the handler for this entry */
1684 while (ops_index
< ops_cnt
) {
1685 if (entry
->hdr
.type
== fw_dump_ops
[ops_index
].opcode
)
1689 if (ops_index
== ops_cnt
) {
1690 dev_info(&adapter
->pdev
->dev
,
1691 "Invalid entry type %d, exiting dump\n",
1695 /* Collect dump for this entry */
1696 dump
= fw_dump_ops
[ops_index
].handler(adapter
, entry
, buffer
);
1697 if (dump
&& !qlcnic_valid_dump_entry(&adapter
->pdev
->dev
, entry
,
1699 entry
->hdr
.flags
|= QLCNIC_DUMP_SKIP
;
1700 buf_offset
+= entry
->hdr
.cap_size
;
1701 entry_offset
+= entry
->hdr
.offset
;
1702 buffer
= fw_dump
->data
+ buf_offset
;
1704 if (dump_size
!= buf_offset
) {
1705 dev_info(&adapter
->pdev
->dev
,
1706 "Captured(%d) and expected size(%d) do not match\n",
1707 buf_offset
, dump_size
);
1711 snprintf(mesg
, sizeof(mesg
), "FW dump for device: %d\n",
1712 adapter
->pdev
->devfn
);
1713 dev_info(&adapter
->pdev
->dev
, "Dump data, %d bytes captured\n",
1715 /* Send a udev event to notify availability of FW dump */
1716 kobject_uevent_env(&adapter
->pdev
->dev
.kobj
, KOBJ_CHANGE
, msg
);
1720 vfree(fw_dump
->data
);