Staging: strip: delete the driver
[linux/fpc-iii.git] / drivers / net / qlcnic / qlcnic_hw.c
blobe73ba455aa204c43c1a2d4ec02a1ba1106370247
1 /*
2 * Copyright (C) 2009 - QLogic Corporation.
3 * All rights reserved.
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License
7 * as published by the Free Software Foundation; either version 2
8 * of the License, or (at your option) any later version.
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 59 Temple Place - Suite 330, Boston,
18 * MA 02111-1307, USA.
20 * The full GNU General Public License is included in this distribution
21 * in the file called "COPYING".
25 #include "qlcnic.h"
27 #include <linux/slab.h>
28 #include <net/ip.h>
30 #define MASK(n) ((1ULL<<(n))-1)
31 #define OCM_WIN_P3P(addr) (addr & 0xffc0000)
33 #define GET_MEM_OFFS_2M(addr) (addr & MASK(18))
35 #define CRB_BLK(off) ((off >> 20) & 0x3f)
36 #define CRB_SUBBLK(off) ((off >> 16) & 0xf)
37 #define CRB_WINDOW_2M (0x130060)
38 #define CRB_HI(off) ((crb_hub_agt[CRB_BLK(off)] << 20) | ((off) & 0xf0000))
39 #define CRB_INDIRECT_2M (0x1e0000UL)
42 #ifndef readq
43 static inline u64 readq(void __iomem *addr)
45 return readl(addr) | (((u64) readl(addr + 4)) << 32LL);
47 #endif
49 #ifndef writeq
50 static inline void writeq(u64 val, void __iomem *addr)
52 writel(((u32) (val)), (addr));
53 writel(((u32) (val >> 32)), (addr + 4));
55 #endif
57 #define ADDR_IN_RANGE(addr, low, high) \
58 (((addr) < (high)) && ((addr) >= (low)))
60 #define PCI_OFFSET_FIRST_RANGE(adapter, off) \
61 ((adapter)->ahw.pci_base0 + (off))
63 static void __iomem *pci_base_offset(struct qlcnic_adapter *adapter,
64 unsigned long off)
66 if (ADDR_IN_RANGE(off, FIRST_PAGE_GROUP_START, FIRST_PAGE_GROUP_END))
67 return PCI_OFFSET_FIRST_RANGE(adapter, off);
69 return NULL;
72 static const struct crb_128M_2M_block_map
73 crb_128M_2M_map[64] __cacheline_aligned_in_smp = {
74 {{{0, 0, 0, 0} } }, /* 0: PCI */
75 {{{1, 0x0100000, 0x0102000, 0x120000}, /* 1: PCIE */
76 {1, 0x0110000, 0x0120000, 0x130000},
77 {1, 0x0120000, 0x0122000, 0x124000},
78 {1, 0x0130000, 0x0132000, 0x126000},
79 {1, 0x0140000, 0x0142000, 0x128000},
80 {1, 0x0150000, 0x0152000, 0x12a000},
81 {1, 0x0160000, 0x0170000, 0x110000},
82 {1, 0x0170000, 0x0172000, 0x12e000},
83 {0, 0x0000000, 0x0000000, 0x000000},
84 {0, 0x0000000, 0x0000000, 0x000000},
85 {0, 0x0000000, 0x0000000, 0x000000},
86 {0, 0x0000000, 0x0000000, 0x000000},
87 {0, 0x0000000, 0x0000000, 0x000000},
88 {0, 0x0000000, 0x0000000, 0x000000},
89 {1, 0x01e0000, 0x01e0800, 0x122000},
90 {0, 0x0000000, 0x0000000, 0x000000} } },
91 {{{1, 0x0200000, 0x0210000, 0x180000} } },/* 2: MN */
92 {{{0, 0, 0, 0} } }, /* 3: */
93 {{{1, 0x0400000, 0x0401000, 0x169000} } },/* 4: P2NR1 */
94 {{{1, 0x0500000, 0x0510000, 0x140000} } },/* 5: SRE */
95 {{{1, 0x0600000, 0x0610000, 0x1c0000} } },/* 6: NIU */
96 {{{1, 0x0700000, 0x0704000, 0x1b8000} } },/* 7: QM */
97 {{{1, 0x0800000, 0x0802000, 0x170000}, /* 8: SQM0 */
98 {0, 0x0000000, 0x0000000, 0x000000},
99 {0, 0x0000000, 0x0000000, 0x000000},
100 {0, 0x0000000, 0x0000000, 0x000000},
101 {0, 0x0000000, 0x0000000, 0x000000},
102 {0, 0x0000000, 0x0000000, 0x000000},
103 {0, 0x0000000, 0x0000000, 0x000000},
104 {0, 0x0000000, 0x0000000, 0x000000},
105 {0, 0x0000000, 0x0000000, 0x000000},
106 {0, 0x0000000, 0x0000000, 0x000000},
107 {0, 0x0000000, 0x0000000, 0x000000},
108 {0, 0x0000000, 0x0000000, 0x000000},
109 {0, 0x0000000, 0x0000000, 0x000000},
110 {0, 0x0000000, 0x0000000, 0x000000},
111 {0, 0x0000000, 0x0000000, 0x000000},
112 {1, 0x08f0000, 0x08f2000, 0x172000} } },
113 {{{1, 0x0900000, 0x0902000, 0x174000}, /* 9: SQM1*/
114 {0, 0x0000000, 0x0000000, 0x000000},
115 {0, 0x0000000, 0x0000000, 0x000000},
116 {0, 0x0000000, 0x0000000, 0x000000},
117 {0, 0x0000000, 0x0000000, 0x000000},
118 {0, 0x0000000, 0x0000000, 0x000000},
119 {0, 0x0000000, 0x0000000, 0x000000},
120 {0, 0x0000000, 0x0000000, 0x000000},
121 {0, 0x0000000, 0x0000000, 0x000000},
122 {0, 0x0000000, 0x0000000, 0x000000},
123 {0, 0x0000000, 0x0000000, 0x000000},
124 {0, 0x0000000, 0x0000000, 0x000000},
125 {0, 0x0000000, 0x0000000, 0x000000},
126 {0, 0x0000000, 0x0000000, 0x000000},
127 {0, 0x0000000, 0x0000000, 0x000000},
128 {1, 0x09f0000, 0x09f2000, 0x176000} } },
129 {{{0, 0x0a00000, 0x0a02000, 0x178000}, /* 10: SQM2*/
130 {0, 0x0000000, 0x0000000, 0x000000},
131 {0, 0x0000000, 0x0000000, 0x000000},
132 {0, 0x0000000, 0x0000000, 0x000000},
133 {0, 0x0000000, 0x0000000, 0x000000},
134 {0, 0x0000000, 0x0000000, 0x000000},
135 {0, 0x0000000, 0x0000000, 0x000000},
136 {0, 0x0000000, 0x0000000, 0x000000},
137 {0, 0x0000000, 0x0000000, 0x000000},
138 {0, 0x0000000, 0x0000000, 0x000000},
139 {0, 0x0000000, 0x0000000, 0x000000},
140 {0, 0x0000000, 0x0000000, 0x000000},
141 {0, 0x0000000, 0x0000000, 0x000000},
142 {0, 0x0000000, 0x0000000, 0x000000},
143 {0, 0x0000000, 0x0000000, 0x000000},
144 {1, 0x0af0000, 0x0af2000, 0x17a000} } },
145 {{{0, 0x0b00000, 0x0b02000, 0x17c000}, /* 11: SQM3*/
146 {0, 0x0000000, 0x0000000, 0x000000},
147 {0, 0x0000000, 0x0000000, 0x000000},
148 {0, 0x0000000, 0x0000000, 0x000000},
149 {0, 0x0000000, 0x0000000, 0x000000},
150 {0, 0x0000000, 0x0000000, 0x000000},
151 {0, 0x0000000, 0x0000000, 0x000000},
152 {0, 0x0000000, 0x0000000, 0x000000},
153 {0, 0x0000000, 0x0000000, 0x000000},
154 {0, 0x0000000, 0x0000000, 0x000000},
155 {0, 0x0000000, 0x0000000, 0x000000},
156 {0, 0x0000000, 0x0000000, 0x000000},
157 {0, 0x0000000, 0x0000000, 0x000000},
158 {0, 0x0000000, 0x0000000, 0x000000},
159 {0, 0x0000000, 0x0000000, 0x000000},
160 {1, 0x0bf0000, 0x0bf2000, 0x17e000} } },
161 {{{1, 0x0c00000, 0x0c04000, 0x1d4000} } },/* 12: I2Q */
162 {{{1, 0x0d00000, 0x0d04000, 0x1a4000} } },/* 13: TMR */
163 {{{1, 0x0e00000, 0x0e04000, 0x1a0000} } },/* 14: ROMUSB */
164 {{{1, 0x0f00000, 0x0f01000, 0x164000} } },/* 15: PEG4 */
165 {{{0, 0x1000000, 0x1004000, 0x1a8000} } },/* 16: XDMA */
166 {{{1, 0x1100000, 0x1101000, 0x160000} } },/* 17: PEG0 */
167 {{{1, 0x1200000, 0x1201000, 0x161000} } },/* 18: PEG1 */
168 {{{1, 0x1300000, 0x1301000, 0x162000} } },/* 19: PEG2 */
169 {{{1, 0x1400000, 0x1401000, 0x163000} } },/* 20: PEG3 */
170 {{{1, 0x1500000, 0x1501000, 0x165000} } },/* 21: P2ND */
171 {{{1, 0x1600000, 0x1601000, 0x166000} } },/* 22: P2NI */
172 {{{0, 0, 0, 0} } }, /* 23: */
173 {{{0, 0, 0, 0} } }, /* 24: */
174 {{{0, 0, 0, 0} } }, /* 25: */
175 {{{0, 0, 0, 0} } }, /* 26: */
176 {{{0, 0, 0, 0} } }, /* 27: */
177 {{{0, 0, 0, 0} } }, /* 28: */
178 {{{1, 0x1d00000, 0x1d10000, 0x190000} } },/* 29: MS */
179 {{{1, 0x1e00000, 0x1e01000, 0x16a000} } },/* 30: P2NR2 */
180 {{{1, 0x1f00000, 0x1f10000, 0x150000} } },/* 31: EPG */
181 {{{0} } }, /* 32: PCI */
182 {{{1, 0x2100000, 0x2102000, 0x120000}, /* 33: PCIE */
183 {1, 0x2110000, 0x2120000, 0x130000},
184 {1, 0x2120000, 0x2122000, 0x124000},
185 {1, 0x2130000, 0x2132000, 0x126000},
186 {1, 0x2140000, 0x2142000, 0x128000},
187 {1, 0x2150000, 0x2152000, 0x12a000},
188 {1, 0x2160000, 0x2170000, 0x110000},
189 {1, 0x2170000, 0x2172000, 0x12e000},
190 {0, 0x0000000, 0x0000000, 0x000000},
191 {0, 0x0000000, 0x0000000, 0x000000},
192 {0, 0x0000000, 0x0000000, 0x000000},
193 {0, 0x0000000, 0x0000000, 0x000000},
194 {0, 0x0000000, 0x0000000, 0x000000},
195 {0, 0x0000000, 0x0000000, 0x000000},
196 {0, 0x0000000, 0x0000000, 0x000000},
197 {0, 0x0000000, 0x0000000, 0x000000} } },
198 {{{1, 0x2200000, 0x2204000, 0x1b0000} } },/* 34: CAM */
199 {{{0} } }, /* 35: */
200 {{{0} } }, /* 36: */
201 {{{0} } }, /* 37: */
202 {{{0} } }, /* 38: */
203 {{{0} } }, /* 39: */
204 {{{1, 0x2800000, 0x2804000, 0x1a4000} } },/* 40: TMR */
205 {{{1, 0x2900000, 0x2901000, 0x16b000} } },/* 41: P2NR3 */
206 {{{1, 0x2a00000, 0x2a00400, 0x1ac400} } },/* 42: RPMX1 */
207 {{{1, 0x2b00000, 0x2b00400, 0x1ac800} } },/* 43: RPMX2 */
208 {{{1, 0x2c00000, 0x2c00400, 0x1acc00} } },/* 44: RPMX3 */
209 {{{1, 0x2d00000, 0x2d00400, 0x1ad000} } },/* 45: RPMX4 */
210 {{{1, 0x2e00000, 0x2e00400, 0x1ad400} } },/* 46: RPMX5 */
211 {{{1, 0x2f00000, 0x2f00400, 0x1ad800} } },/* 47: RPMX6 */
212 {{{1, 0x3000000, 0x3000400, 0x1adc00} } },/* 48: RPMX7 */
213 {{{0, 0x3100000, 0x3104000, 0x1a8000} } },/* 49: XDMA */
214 {{{1, 0x3200000, 0x3204000, 0x1d4000} } },/* 50: I2Q */
215 {{{1, 0x3300000, 0x3304000, 0x1a0000} } },/* 51: ROMUSB */
216 {{{0} } }, /* 52: */
217 {{{1, 0x3500000, 0x3500400, 0x1ac000} } },/* 53: RPMX0 */
218 {{{1, 0x3600000, 0x3600400, 0x1ae000} } },/* 54: RPMX8 */
219 {{{1, 0x3700000, 0x3700400, 0x1ae400} } },/* 55: RPMX9 */
220 {{{1, 0x3800000, 0x3804000, 0x1d0000} } },/* 56: OCM0 */
221 {{{1, 0x3900000, 0x3904000, 0x1b4000} } },/* 57: CRYPTO */
222 {{{1, 0x3a00000, 0x3a04000, 0x1d8000} } },/* 58: SMB */
223 {{{0} } }, /* 59: I2C0 */
224 {{{0} } }, /* 60: I2C1 */
225 {{{1, 0x3d00000, 0x3d04000, 0x1d8000} } },/* 61: LPC */
226 {{{1, 0x3e00000, 0x3e01000, 0x167000} } },/* 62: P2NC */
227 {{{1, 0x3f00000, 0x3f01000, 0x168000} } } /* 63: P2NR0 */
231 * top 12 bits of crb internal address (hub, agent)
233 static const unsigned crb_hub_agt[64] = {
235 QLCNIC_HW_CRB_HUB_AGT_ADR_PS,
236 QLCNIC_HW_CRB_HUB_AGT_ADR_MN,
237 QLCNIC_HW_CRB_HUB_AGT_ADR_MS,
239 QLCNIC_HW_CRB_HUB_AGT_ADR_SRE,
240 QLCNIC_HW_CRB_HUB_AGT_ADR_NIU,
241 QLCNIC_HW_CRB_HUB_AGT_ADR_QMN,
242 QLCNIC_HW_CRB_HUB_AGT_ADR_SQN0,
243 QLCNIC_HW_CRB_HUB_AGT_ADR_SQN1,
244 QLCNIC_HW_CRB_HUB_AGT_ADR_SQN2,
245 QLCNIC_HW_CRB_HUB_AGT_ADR_SQN3,
246 QLCNIC_HW_CRB_HUB_AGT_ADR_I2Q,
247 QLCNIC_HW_CRB_HUB_AGT_ADR_TIMR,
248 QLCNIC_HW_CRB_HUB_AGT_ADR_ROMUSB,
249 QLCNIC_HW_CRB_HUB_AGT_ADR_PGN4,
250 QLCNIC_HW_CRB_HUB_AGT_ADR_XDMA,
251 QLCNIC_HW_CRB_HUB_AGT_ADR_PGN0,
252 QLCNIC_HW_CRB_HUB_AGT_ADR_PGN1,
253 QLCNIC_HW_CRB_HUB_AGT_ADR_PGN2,
254 QLCNIC_HW_CRB_HUB_AGT_ADR_PGN3,
255 QLCNIC_HW_CRB_HUB_AGT_ADR_PGND,
256 QLCNIC_HW_CRB_HUB_AGT_ADR_PGNI,
257 QLCNIC_HW_CRB_HUB_AGT_ADR_PGS0,
258 QLCNIC_HW_CRB_HUB_AGT_ADR_PGS1,
259 QLCNIC_HW_CRB_HUB_AGT_ADR_PGS2,
260 QLCNIC_HW_CRB_HUB_AGT_ADR_PGS3,
262 QLCNIC_HW_CRB_HUB_AGT_ADR_PGSI,
263 QLCNIC_HW_CRB_HUB_AGT_ADR_SN,
265 QLCNIC_HW_CRB_HUB_AGT_ADR_EG,
267 QLCNIC_HW_CRB_HUB_AGT_ADR_PS,
268 QLCNIC_HW_CRB_HUB_AGT_ADR_CAM,
274 QLCNIC_HW_CRB_HUB_AGT_ADR_TIMR,
276 QLCNIC_HW_CRB_HUB_AGT_ADR_RPMX1,
277 QLCNIC_HW_CRB_HUB_AGT_ADR_RPMX2,
278 QLCNIC_HW_CRB_HUB_AGT_ADR_RPMX3,
279 QLCNIC_HW_CRB_HUB_AGT_ADR_RPMX4,
280 QLCNIC_HW_CRB_HUB_AGT_ADR_RPMX5,
281 QLCNIC_HW_CRB_HUB_AGT_ADR_RPMX6,
282 QLCNIC_HW_CRB_HUB_AGT_ADR_RPMX7,
283 QLCNIC_HW_CRB_HUB_AGT_ADR_XDMA,
284 QLCNIC_HW_CRB_HUB_AGT_ADR_I2Q,
285 QLCNIC_HW_CRB_HUB_AGT_ADR_ROMUSB,
287 QLCNIC_HW_CRB_HUB_AGT_ADR_RPMX0,
288 QLCNIC_HW_CRB_HUB_AGT_ADR_RPMX8,
289 QLCNIC_HW_CRB_HUB_AGT_ADR_RPMX9,
290 QLCNIC_HW_CRB_HUB_AGT_ADR_OCM0,
292 QLCNIC_HW_CRB_HUB_AGT_ADR_SMB,
293 QLCNIC_HW_CRB_HUB_AGT_ADR_I2C0,
294 QLCNIC_HW_CRB_HUB_AGT_ADR_I2C1,
296 QLCNIC_HW_CRB_HUB_AGT_ADR_PGNC,
300 /* PCI Windowing for DDR regions. */
302 #define QLCNIC_PCIE_SEM_TIMEOUT 10000
305 qlcnic_pcie_sem_lock(struct qlcnic_adapter *adapter, int sem, u32 id_reg)
307 int done = 0, timeout = 0;
309 while (!done) {
310 done = QLCRD32(adapter, QLCNIC_PCIE_REG(PCIE_SEM_LOCK(sem)));
311 if (done == 1)
312 break;
313 if (++timeout >= QLCNIC_PCIE_SEM_TIMEOUT)
314 return -EIO;
315 msleep(1);
318 if (id_reg)
319 QLCWR32(adapter, id_reg, adapter->portnum);
321 return 0;
324 void
325 qlcnic_pcie_sem_unlock(struct qlcnic_adapter *adapter, int sem)
327 QLCRD32(adapter, QLCNIC_PCIE_REG(PCIE_SEM_UNLOCK(sem)));
330 static int
331 qlcnic_send_cmd_descs(struct qlcnic_adapter *adapter,
332 struct cmd_desc_type0 *cmd_desc_arr, int nr_desc)
334 u32 i, producer, consumer;
335 struct qlcnic_cmd_buffer *pbuf;
336 struct cmd_desc_type0 *cmd_desc;
337 struct qlcnic_host_tx_ring *tx_ring;
339 i = 0;
341 if (adapter->is_up != QLCNIC_ADAPTER_UP_MAGIC)
342 return -EIO;
344 tx_ring = adapter->tx_ring;
345 __netif_tx_lock_bh(tx_ring->txq);
347 producer = tx_ring->producer;
348 consumer = tx_ring->sw_consumer;
350 if (nr_desc >= qlcnic_tx_avail(tx_ring)) {
351 netif_tx_stop_queue(tx_ring->txq);
352 __netif_tx_unlock_bh(tx_ring->txq);
353 adapter->stats.xmit_off++;
354 return -EBUSY;
357 do {
358 cmd_desc = &cmd_desc_arr[i];
360 pbuf = &tx_ring->cmd_buf_arr[producer];
361 pbuf->skb = NULL;
362 pbuf->frag_count = 0;
364 memcpy(&tx_ring->desc_head[producer],
365 &cmd_desc_arr[i], sizeof(struct cmd_desc_type0));
367 producer = get_next_index(producer, tx_ring->num_desc);
368 i++;
370 } while (i != nr_desc);
372 tx_ring->producer = producer;
374 qlcnic_update_cmd_producer(adapter, tx_ring);
376 __netif_tx_unlock_bh(tx_ring->txq);
378 return 0;
381 static int
382 qlcnic_sre_macaddr_change(struct qlcnic_adapter *adapter, u8 *addr,
383 unsigned op)
385 struct qlcnic_nic_req req;
386 struct qlcnic_mac_req *mac_req;
387 u64 word;
389 memset(&req, 0, sizeof(struct qlcnic_nic_req));
390 req.qhdr = cpu_to_le64(QLCNIC_REQUEST << 23);
392 word = QLCNIC_MAC_EVENT | ((u64)adapter->portnum << 16);
393 req.req_hdr = cpu_to_le64(word);
395 mac_req = (struct qlcnic_mac_req *)&req.words[0];
396 mac_req->op = op;
397 memcpy(mac_req->mac_addr, addr, 6);
399 return qlcnic_send_cmd_descs(adapter, (struct cmd_desc_type0 *)&req, 1);
402 static int qlcnic_nic_add_mac(struct qlcnic_adapter *adapter, u8 *addr)
404 struct list_head *head;
405 struct qlcnic_mac_list_s *cur;
407 /* look up if already exists */
408 list_for_each(head, &adapter->mac_list) {
409 cur = list_entry(head, struct qlcnic_mac_list_s, list);
410 if (memcmp(addr, cur->mac_addr, ETH_ALEN) == 0)
411 return 0;
414 cur = kzalloc(sizeof(struct qlcnic_mac_list_s), GFP_ATOMIC);
415 if (cur == NULL) {
416 dev_err(&adapter->netdev->dev,
417 "failed to add mac address filter\n");
418 return -ENOMEM;
420 memcpy(cur->mac_addr, addr, ETH_ALEN);
421 list_add_tail(&cur->list, &adapter->mac_list);
423 return qlcnic_sre_macaddr_change(adapter,
424 cur->mac_addr, QLCNIC_MAC_ADD);
427 void qlcnic_set_multi(struct net_device *netdev)
429 struct qlcnic_adapter *adapter = netdev_priv(netdev);
430 struct dev_mc_list *mc_ptr;
431 u8 bcast_addr[ETH_ALEN] = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff };
432 u32 mode = VPORT_MISS_MODE_DROP;
434 if (adapter->is_up != QLCNIC_ADAPTER_UP_MAGIC)
435 return;
437 qlcnic_nic_add_mac(adapter, adapter->mac_addr);
438 qlcnic_nic_add_mac(adapter, bcast_addr);
440 if (netdev->flags & IFF_PROMISC) {
441 mode = VPORT_MISS_MODE_ACCEPT_ALL;
442 goto send_fw_cmd;
445 if ((netdev->flags & IFF_ALLMULTI) ||
446 (netdev_mc_count(netdev) > adapter->max_mc_count)) {
447 mode = VPORT_MISS_MODE_ACCEPT_MULTI;
448 goto send_fw_cmd;
451 if (!netdev_mc_empty(netdev)) {
452 netdev_for_each_mc_addr(mc_ptr, netdev) {
453 qlcnic_nic_add_mac(adapter, mc_ptr->dmi_addr);
457 send_fw_cmd:
458 qlcnic_nic_set_promisc(adapter, mode);
461 int qlcnic_nic_set_promisc(struct qlcnic_adapter *adapter, u32 mode)
463 struct qlcnic_nic_req req;
464 u64 word;
466 memset(&req, 0, sizeof(struct qlcnic_nic_req));
468 req.qhdr = cpu_to_le64(QLCNIC_HOST_REQUEST << 23);
470 word = QLCNIC_H2C_OPCODE_PROXY_SET_VPORT_MISS_MODE |
471 ((u64)adapter->portnum << 16);
472 req.req_hdr = cpu_to_le64(word);
474 req.words[0] = cpu_to_le64(mode);
476 return qlcnic_send_cmd_descs(adapter,
477 (struct cmd_desc_type0 *)&req, 1);
480 void qlcnic_free_mac_list(struct qlcnic_adapter *adapter)
482 struct qlcnic_mac_list_s *cur;
483 struct list_head *head = &adapter->mac_list;
485 while (!list_empty(head)) {
486 cur = list_entry(head->next, struct qlcnic_mac_list_s, list);
487 qlcnic_sre_macaddr_change(adapter,
488 cur->mac_addr, QLCNIC_MAC_DEL);
489 list_del(&cur->list);
490 kfree(cur);
494 #define QLCNIC_CONFIG_INTR_COALESCE 3
497 * Send the interrupt coalescing parameter set by ethtool to the card.
499 int qlcnic_config_intr_coalesce(struct qlcnic_adapter *adapter)
501 struct qlcnic_nic_req req;
502 u64 word[6];
503 int rv, i;
505 memset(&req, 0, sizeof(struct qlcnic_nic_req));
507 req.qhdr = cpu_to_le64(QLCNIC_HOST_REQUEST << 23);
509 word[0] = QLCNIC_CONFIG_INTR_COALESCE | ((u64)adapter->portnum << 16);
510 req.req_hdr = cpu_to_le64(word[0]);
512 memcpy(&word[0], &adapter->coal, sizeof(adapter->coal));
513 for (i = 0; i < 6; i++)
514 req.words[i] = cpu_to_le64(word[i]);
516 rv = qlcnic_send_cmd_descs(adapter, (struct cmd_desc_type0 *)&req, 1);
517 if (rv != 0)
518 dev_err(&adapter->netdev->dev,
519 "Could not send interrupt coalescing parameters\n");
521 return rv;
524 int qlcnic_config_hw_lro(struct qlcnic_adapter *adapter, int enable)
526 struct qlcnic_nic_req req;
527 u64 word;
528 int rv;
530 if ((adapter->flags & QLCNIC_LRO_ENABLED) == enable)
531 return 0;
533 memset(&req, 0, sizeof(struct qlcnic_nic_req));
535 req.qhdr = cpu_to_le64(QLCNIC_HOST_REQUEST << 23);
537 word = QLCNIC_H2C_OPCODE_CONFIG_HW_LRO | ((u64)adapter->portnum << 16);
538 req.req_hdr = cpu_to_le64(word);
540 req.words[0] = cpu_to_le64(enable);
542 rv = qlcnic_send_cmd_descs(adapter, (struct cmd_desc_type0 *)&req, 1);
543 if (rv != 0)
544 dev_err(&adapter->netdev->dev,
545 "Could not send configure hw lro request\n");
547 adapter->flags ^= QLCNIC_LRO_ENABLED;
549 return rv;
552 int qlcnic_config_bridged_mode(struct qlcnic_adapter *adapter, int enable)
554 struct qlcnic_nic_req req;
555 u64 word;
556 int rv;
558 if (!!(adapter->flags & QLCNIC_BRIDGE_ENABLED) == enable)
559 return 0;
561 memset(&req, 0, sizeof(struct qlcnic_nic_req));
563 req.qhdr = cpu_to_le64(QLCNIC_HOST_REQUEST << 23);
565 word = QLCNIC_H2C_OPCODE_CONFIG_BRIDGING |
566 ((u64)adapter->portnum << 16);
567 req.req_hdr = cpu_to_le64(word);
569 req.words[0] = cpu_to_le64(enable);
571 rv = qlcnic_send_cmd_descs(adapter, (struct cmd_desc_type0 *)&req, 1);
572 if (rv != 0)
573 dev_err(&adapter->netdev->dev,
574 "Could not send configure bridge mode request\n");
576 adapter->flags ^= QLCNIC_BRIDGE_ENABLED;
578 return rv;
582 #define RSS_HASHTYPE_IP_TCP 0x3
584 int qlcnic_config_rss(struct qlcnic_adapter *adapter, int enable)
586 struct qlcnic_nic_req req;
587 u64 word;
588 int i, rv;
590 const u64 key[] = { 0xbeac01fa6a42b73bULL, 0x8030f20c77cb2da3ULL,
591 0xae7b30b4d0ca2bcbULL, 0x43a38fb04167253dULL,
592 0x255b0ec26d5a56daULL };
595 memset(&req, 0, sizeof(struct qlcnic_nic_req));
596 req.qhdr = cpu_to_le64(QLCNIC_HOST_REQUEST << 23);
598 word = QLCNIC_H2C_OPCODE_CONFIG_RSS | ((u64)adapter->portnum << 16);
599 req.req_hdr = cpu_to_le64(word);
602 * RSS request:
603 * bits 3-0: hash_method
604 * 5-4: hash_type_ipv4
605 * 7-6: hash_type_ipv6
606 * 8: enable
607 * 9: use indirection table
608 * 47-10: reserved
609 * 63-48: indirection table mask
611 word = ((u64)(RSS_HASHTYPE_IP_TCP & 0x3) << 4) |
612 ((u64)(RSS_HASHTYPE_IP_TCP & 0x3) << 6) |
613 ((u64)(enable & 0x1) << 8) |
614 ((0x7ULL) << 48);
615 req.words[0] = cpu_to_le64(word);
616 for (i = 0; i < 5; i++)
617 req.words[i+1] = cpu_to_le64(key[i]);
619 rv = qlcnic_send_cmd_descs(adapter, (struct cmd_desc_type0 *)&req, 1);
620 if (rv != 0)
621 dev_err(&adapter->netdev->dev, "could not configure RSS\n");
623 return rv;
626 int qlcnic_config_ipaddr(struct qlcnic_adapter *adapter, u32 ip, int cmd)
628 struct qlcnic_nic_req req;
629 u64 word;
630 int rv;
632 memset(&req, 0, sizeof(struct qlcnic_nic_req));
633 req.qhdr = cpu_to_le64(QLCNIC_HOST_REQUEST << 23);
635 word = QLCNIC_H2C_OPCODE_CONFIG_IPADDR | ((u64)adapter->portnum << 16);
636 req.req_hdr = cpu_to_le64(word);
638 req.words[0] = cpu_to_le64(cmd);
639 req.words[1] = cpu_to_le64(ip);
641 rv = qlcnic_send_cmd_descs(adapter, (struct cmd_desc_type0 *)&req, 1);
642 if (rv != 0)
643 dev_err(&adapter->netdev->dev,
644 "could not notify %s IP 0x%x reuqest\n",
645 (cmd == QLCNIC_IP_UP) ? "Add" : "Remove", ip);
647 return rv;
650 int qlcnic_linkevent_request(struct qlcnic_adapter *adapter, int enable)
652 struct qlcnic_nic_req req;
653 u64 word;
654 int rv;
656 memset(&req, 0, sizeof(struct qlcnic_nic_req));
657 req.qhdr = cpu_to_le64(QLCNIC_HOST_REQUEST << 23);
659 word = QLCNIC_H2C_OPCODE_GET_LINKEVENT | ((u64)adapter->portnum << 16);
660 req.req_hdr = cpu_to_le64(word);
661 req.words[0] = cpu_to_le64(enable | (enable << 8));
663 rv = qlcnic_send_cmd_descs(adapter, (struct cmd_desc_type0 *)&req, 1);
664 if (rv != 0)
665 dev_err(&adapter->netdev->dev,
666 "could not configure link notification\n");
668 return rv;
671 int qlcnic_send_lro_cleanup(struct qlcnic_adapter *adapter)
673 struct qlcnic_nic_req req;
674 u64 word;
675 int rv;
677 memset(&req, 0, sizeof(struct qlcnic_nic_req));
678 req.qhdr = cpu_to_le64(QLCNIC_HOST_REQUEST << 23);
680 word = QLCNIC_H2C_OPCODE_LRO_REQUEST |
681 ((u64)adapter->portnum << 16) |
682 ((u64)QLCNIC_LRO_REQUEST_CLEANUP << 56) ;
684 req.req_hdr = cpu_to_le64(word);
686 rv = qlcnic_send_cmd_descs(adapter, (struct cmd_desc_type0 *)&req, 1);
687 if (rv != 0)
688 dev_err(&adapter->netdev->dev,
689 "could not cleanup lro flows\n");
691 return rv;
695 * qlcnic_change_mtu - Change the Maximum Transfer Unit
696 * @returns 0 on success, negative on failure
699 int qlcnic_change_mtu(struct net_device *netdev, int mtu)
701 struct qlcnic_adapter *adapter = netdev_priv(netdev);
702 int rc = 0;
704 if (mtu > P3_MAX_MTU) {
705 dev_err(&adapter->netdev->dev, "mtu > %d bytes unsupported\n",
706 P3_MAX_MTU);
707 return -EINVAL;
710 rc = qlcnic_fw_cmd_set_mtu(adapter, mtu);
712 if (!rc)
713 netdev->mtu = mtu;
715 return rc;
718 int qlcnic_get_mac_addr(struct qlcnic_adapter *adapter, u64 *mac)
720 u32 crbaddr, mac_hi, mac_lo;
721 int pci_func = adapter->ahw.pci_func;
723 crbaddr = CRB_MAC_BLOCK_START +
724 (4 * ((pci_func/2) * 3)) + (4 * (pci_func & 1));
726 mac_lo = QLCRD32(adapter, crbaddr);
727 mac_hi = QLCRD32(adapter, crbaddr+4);
729 if (pci_func & 1)
730 *mac = le64_to_cpu((mac_lo >> 16) | ((u64)mac_hi << 16));
731 else
732 *mac = le64_to_cpu((u64)mac_lo | ((u64)mac_hi << 32));
734 return 0;
738 * Changes the CRB window to the specified window.
740 /* Returns < 0 if off is not valid,
741 * 1 if window access is needed. 'off' is set to offset from
742 * CRB space in 128M pci map
743 * 0 if no window access is needed. 'off' is set to 2M addr
744 * In: 'off' is offset from base in 128M pci map
746 static int
747 qlcnic_pci_get_crb_addr_2M(struct qlcnic_adapter *adapter,
748 ulong off, void __iomem **addr)
750 const struct crb_128M_2M_sub_block_map *m;
752 if ((off >= QLCNIC_CRB_MAX) || (off < QLCNIC_PCI_CRBSPACE))
753 return -EINVAL;
755 off -= QLCNIC_PCI_CRBSPACE;
758 * Try direct map
760 m = &crb_128M_2M_map[CRB_BLK(off)].sub_block[CRB_SUBBLK(off)];
762 if (m->valid && (m->start_128M <= off) && (m->end_128M > off)) {
763 *addr = adapter->ahw.pci_base0 + m->start_2M +
764 (off - m->start_128M);
765 return 0;
769 * Not in direct map, use crb window
771 *addr = adapter->ahw.pci_base0 + CRB_INDIRECT_2M + (off & MASK(16));
772 return 1;
776 * In: 'off' is offset from CRB space in 128M pci map
777 * Out: 'off' is 2M pci map addr
778 * side effect: lock crb window
780 static void
781 qlcnic_pci_set_crbwindow_2M(struct qlcnic_adapter *adapter, ulong off)
783 u32 window;
784 void __iomem *addr = adapter->ahw.pci_base0 + CRB_WINDOW_2M;
786 off -= QLCNIC_PCI_CRBSPACE;
788 window = CRB_HI(off);
790 if (adapter->ahw.crb_win == window)
791 return;
793 writel(window, addr);
794 if (readl(addr) != window) {
795 if (printk_ratelimit())
796 dev_warn(&adapter->pdev->dev,
797 "failed to set CRB window to %d off 0x%lx\n",
798 window, off);
800 adapter->ahw.crb_win = window;
804 qlcnic_hw_write_wx_2M(struct qlcnic_adapter *adapter, ulong off, u32 data)
806 unsigned long flags;
807 int rv;
808 void __iomem *addr = NULL;
810 rv = qlcnic_pci_get_crb_addr_2M(adapter, off, &addr);
812 if (rv == 0) {
813 writel(data, addr);
814 return 0;
817 if (rv > 0) {
818 /* indirect access */
819 write_lock_irqsave(&adapter->ahw.crb_lock, flags);
820 crb_win_lock(adapter);
821 qlcnic_pci_set_crbwindow_2M(adapter, off);
822 writel(data, addr);
823 crb_win_unlock(adapter);
824 write_unlock_irqrestore(&adapter->ahw.crb_lock, flags);
825 return 0;
828 dev_err(&adapter->pdev->dev,
829 "%s: invalid offset: 0x%016lx\n", __func__, off);
830 dump_stack();
831 return -EIO;
835 qlcnic_hw_read_wx_2M(struct qlcnic_adapter *adapter, ulong off)
837 unsigned long flags;
838 int rv;
839 u32 data;
840 void __iomem *addr = NULL;
842 rv = qlcnic_pci_get_crb_addr_2M(adapter, off, &addr);
844 if (rv == 0)
845 return readl(addr);
847 if (rv > 0) {
848 /* indirect access */
849 write_lock_irqsave(&adapter->ahw.crb_lock, flags);
850 crb_win_lock(adapter);
851 qlcnic_pci_set_crbwindow_2M(adapter, off);
852 data = readl(addr);
853 crb_win_unlock(adapter);
854 write_unlock_irqrestore(&adapter->ahw.crb_lock, flags);
855 return data;
858 dev_err(&adapter->pdev->dev,
859 "%s: invalid offset: 0x%016lx\n", __func__, off);
860 dump_stack();
861 return -1;
865 void __iomem *
866 qlcnic_get_ioaddr(struct qlcnic_adapter *adapter, u32 offset)
868 void __iomem *addr = NULL;
870 WARN_ON(qlcnic_pci_get_crb_addr_2M(adapter, offset, &addr));
872 return addr;
876 static int
877 qlcnic_pci_set_window_2M(struct qlcnic_adapter *adapter,
878 u64 addr, u32 *start)
880 u32 window;
881 struct pci_dev *pdev = adapter->pdev;
883 if ((addr & 0x00ff800) == 0xff800) {
884 if (printk_ratelimit())
885 dev_warn(&pdev->dev, "QM access not handled\n");
886 return -EIO;
889 window = OCM_WIN_P3P(addr);
891 writel(window, adapter->ahw.ocm_win_crb);
892 /* read back to flush */
893 readl(adapter->ahw.ocm_win_crb);
895 adapter->ahw.ocm_win = window;
896 *start = QLCNIC_PCI_OCM0_2M + GET_MEM_OFFS_2M(addr);
897 return 0;
900 static int
901 qlcnic_pci_mem_access_direct(struct qlcnic_adapter *adapter, u64 off,
902 u64 *data, int op)
904 void __iomem *addr, *mem_ptr = NULL;
905 resource_size_t mem_base;
906 int ret;
907 u32 start;
909 mutex_lock(&adapter->ahw.mem_lock);
911 ret = qlcnic_pci_set_window_2M(adapter, off, &start);
912 if (ret != 0)
913 goto unlock;
915 addr = pci_base_offset(adapter, start);
916 if (addr)
917 goto noremap;
919 mem_base = pci_resource_start(adapter->pdev, 0) + (start & PAGE_MASK);
921 mem_ptr = ioremap(mem_base, PAGE_SIZE);
922 if (mem_ptr == NULL) {
923 ret = -EIO;
924 goto unlock;
927 addr = mem_ptr + (start & (PAGE_SIZE - 1));
929 noremap:
930 if (op == 0) /* read */
931 *data = readq(addr);
932 else /* write */
933 writeq(*data, addr);
935 unlock:
936 mutex_unlock(&adapter->ahw.mem_lock);
938 if (mem_ptr)
939 iounmap(mem_ptr);
940 return ret;
943 #define MAX_CTL_CHECK 1000
946 qlcnic_pci_mem_write_2M(struct qlcnic_adapter *adapter,
947 u64 off, u64 data)
949 int i, j, ret;
950 u32 temp, off8;
951 u64 stride;
952 void __iomem *mem_crb;
954 /* Only 64-bit aligned access */
955 if (off & 7)
956 return -EIO;
958 /* P3 onward, test agent base for MIU and SIU is same */
959 if (ADDR_IN_RANGE(off, QLCNIC_ADDR_QDR_NET,
960 QLCNIC_ADDR_QDR_NET_MAX_P3)) {
961 mem_crb = qlcnic_get_ioaddr(adapter,
962 QLCNIC_CRB_QDR_NET+MIU_TEST_AGT_BASE);
963 goto correct;
966 if (ADDR_IN_RANGE(off, QLCNIC_ADDR_DDR_NET, QLCNIC_ADDR_DDR_NET_MAX)) {
967 mem_crb = qlcnic_get_ioaddr(adapter,
968 QLCNIC_CRB_DDR_NET+MIU_TEST_AGT_BASE);
969 goto correct;
972 if (ADDR_IN_RANGE(off, QLCNIC_ADDR_OCM0, QLCNIC_ADDR_OCM0_MAX))
973 return qlcnic_pci_mem_access_direct(adapter, off, &data, 1);
975 return -EIO;
977 correct:
978 stride = QLCNIC_IS_REVISION_P3P(adapter->ahw.revision_id) ? 16 : 8;
980 off8 = off & ~(stride-1);
982 mutex_lock(&adapter->ahw.mem_lock);
984 writel(off8, (mem_crb + MIU_TEST_AGT_ADDR_LO));
985 writel(0, (mem_crb + MIU_TEST_AGT_ADDR_HI));
987 i = 0;
988 if (stride == 16) {
989 writel(TA_CTL_ENABLE, (mem_crb + TEST_AGT_CTRL));
990 writel((TA_CTL_START | TA_CTL_ENABLE),
991 (mem_crb + TEST_AGT_CTRL));
993 for (j = 0; j < MAX_CTL_CHECK; j++) {
994 temp = readl(mem_crb + TEST_AGT_CTRL);
995 if ((temp & TA_CTL_BUSY) == 0)
996 break;
999 if (j >= MAX_CTL_CHECK) {
1000 ret = -EIO;
1001 goto done;
1004 i = (off & 0xf) ? 0 : 2;
1005 writel(readl(mem_crb + MIU_TEST_AGT_RDDATA(i)),
1006 mem_crb + MIU_TEST_AGT_WRDATA(i));
1007 writel(readl(mem_crb + MIU_TEST_AGT_RDDATA(i+1)),
1008 mem_crb + MIU_TEST_AGT_WRDATA(i+1));
1009 i = (off & 0xf) ? 2 : 0;
1012 writel(data & 0xffffffff,
1013 mem_crb + MIU_TEST_AGT_WRDATA(i));
1014 writel((data >> 32) & 0xffffffff,
1015 mem_crb + MIU_TEST_AGT_WRDATA(i+1));
1017 writel((TA_CTL_ENABLE | TA_CTL_WRITE), (mem_crb + TEST_AGT_CTRL));
1018 writel((TA_CTL_START | TA_CTL_ENABLE | TA_CTL_WRITE),
1019 (mem_crb + TEST_AGT_CTRL));
1021 for (j = 0; j < MAX_CTL_CHECK; j++) {
1022 temp = readl(mem_crb + TEST_AGT_CTRL);
1023 if ((temp & TA_CTL_BUSY) == 0)
1024 break;
1027 if (j >= MAX_CTL_CHECK) {
1028 if (printk_ratelimit())
1029 dev_err(&adapter->pdev->dev,
1030 "failed to write through agent\n");
1031 ret = -EIO;
1032 } else
1033 ret = 0;
1035 done:
1036 mutex_unlock(&adapter->ahw.mem_lock);
1038 return ret;
1042 qlcnic_pci_mem_read_2M(struct qlcnic_adapter *adapter,
1043 u64 off, u64 *data)
1045 int j, ret;
1046 u32 temp, off8;
1047 u64 val, stride;
1048 void __iomem *mem_crb;
1050 /* Only 64-bit aligned access */
1051 if (off & 7)
1052 return -EIO;
1054 /* P3 onward, test agent base for MIU and SIU is same */
1055 if (ADDR_IN_RANGE(off, QLCNIC_ADDR_QDR_NET,
1056 QLCNIC_ADDR_QDR_NET_MAX_P3)) {
1057 mem_crb = qlcnic_get_ioaddr(adapter,
1058 QLCNIC_CRB_QDR_NET+MIU_TEST_AGT_BASE);
1059 goto correct;
1062 if (ADDR_IN_RANGE(off, QLCNIC_ADDR_DDR_NET, QLCNIC_ADDR_DDR_NET_MAX)) {
1063 mem_crb = qlcnic_get_ioaddr(adapter,
1064 QLCNIC_CRB_DDR_NET+MIU_TEST_AGT_BASE);
1065 goto correct;
1068 if (ADDR_IN_RANGE(off, QLCNIC_ADDR_OCM0, QLCNIC_ADDR_OCM0_MAX)) {
1069 return qlcnic_pci_mem_access_direct(adapter,
1070 off, data, 0);
1073 return -EIO;
1075 correct:
1076 stride = QLCNIC_IS_REVISION_P3P(adapter->ahw.revision_id) ? 16 : 8;
1078 off8 = off & ~(stride-1);
1080 mutex_lock(&adapter->ahw.mem_lock);
1082 writel(off8, (mem_crb + MIU_TEST_AGT_ADDR_LO));
1083 writel(0, (mem_crb + MIU_TEST_AGT_ADDR_HI));
1084 writel(TA_CTL_ENABLE, (mem_crb + TEST_AGT_CTRL));
1085 writel((TA_CTL_START | TA_CTL_ENABLE), (mem_crb + TEST_AGT_CTRL));
1087 for (j = 0; j < MAX_CTL_CHECK; j++) {
1088 temp = readl(mem_crb + TEST_AGT_CTRL);
1089 if ((temp & TA_CTL_BUSY) == 0)
1090 break;
1093 if (j >= MAX_CTL_CHECK) {
1094 if (printk_ratelimit())
1095 dev_err(&adapter->pdev->dev,
1096 "failed to read through agent\n");
1097 ret = -EIO;
1098 } else {
1099 off8 = MIU_TEST_AGT_RDDATA_LO;
1100 if ((stride == 16) && (off & 0xf))
1101 off8 = MIU_TEST_AGT_RDDATA_UPPER_LO;
1103 temp = readl(mem_crb + off8 + 4);
1104 val = (u64)temp << 32;
1105 val |= readl(mem_crb + off8);
1106 *data = val;
1107 ret = 0;
1110 mutex_unlock(&adapter->ahw.mem_lock);
1112 return ret;
1115 int qlcnic_get_board_info(struct qlcnic_adapter *adapter)
1117 int offset, board_type, magic;
1118 struct pci_dev *pdev = adapter->pdev;
1120 offset = QLCNIC_FW_MAGIC_OFFSET;
1121 if (qlcnic_rom_fast_read(adapter, offset, &magic))
1122 return -EIO;
1124 if (magic != QLCNIC_BDINFO_MAGIC) {
1125 dev_err(&pdev->dev, "invalid board config, magic=%08x\n",
1126 magic);
1127 return -EIO;
1130 offset = QLCNIC_BRDTYPE_OFFSET;
1131 if (qlcnic_rom_fast_read(adapter, offset, &board_type))
1132 return -EIO;
1134 adapter->ahw.board_type = board_type;
1136 if (board_type == QLCNIC_BRDTYPE_P3_4_GB_MM) {
1137 u32 gpio = QLCRD32(adapter, QLCNIC_ROMUSB_GLB_PAD_GPIO_I);
1138 if ((gpio & 0x8000) == 0)
1139 board_type = QLCNIC_BRDTYPE_P3_10G_TP;
1142 switch (board_type) {
1143 case QLCNIC_BRDTYPE_P3_HMEZ:
1144 case QLCNIC_BRDTYPE_P3_XG_LOM:
1145 case QLCNIC_BRDTYPE_P3_10G_CX4:
1146 case QLCNIC_BRDTYPE_P3_10G_CX4_LP:
1147 case QLCNIC_BRDTYPE_P3_IMEZ:
1148 case QLCNIC_BRDTYPE_P3_10G_SFP_PLUS:
1149 case QLCNIC_BRDTYPE_P3_10G_SFP_CT:
1150 case QLCNIC_BRDTYPE_P3_10G_SFP_QT:
1151 case QLCNIC_BRDTYPE_P3_10G_XFP:
1152 case QLCNIC_BRDTYPE_P3_10000_BASE_T:
1153 adapter->ahw.port_type = QLCNIC_XGBE;
1154 break;
1155 case QLCNIC_BRDTYPE_P3_REF_QG:
1156 case QLCNIC_BRDTYPE_P3_4_GB:
1157 case QLCNIC_BRDTYPE_P3_4_GB_MM:
1158 adapter->ahw.port_type = QLCNIC_GBE;
1159 break;
1160 case QLCNIC_BRDTYPE_P3_10G_TP:
1161 adapter->ahw.port_type = (adapter->portnum < 2) ?
1162 QLCNIC_XGBE : QLCNIC_GBE;
1163 break;
1164 default:
1165 dev_err(&pdev->dev, "unknown board type %x\n", board_type);
1166 adapter->ahw.port_type = QLCNIC_XGBE;
1167 break;
1170 return 0;
1174 qlcnic_wol_supported(struct qlcnic_adapter *adapter)
1176 u32 wol_cfg;
1178 wol_cfg = QLCRD32(adapter, QLCNIC_WOL_CONFIG_NV);
1179 if (wol_cfg & (1UL << adapter->portnum)) {
1180 wol_cfg = QLCRD32(adapter, QLCNIC_WOL_CONFIG);
1181 if (wol_cfg & (1 << adapter->portnum))
1182 return 1;
1185 return 0;
1188 int qlcnic_config_led(struct qlcnic_adapter *adapter, u32 state, u32 rate)
1190 struct qlcnic_nic_req req;
1191 int rv;
1192 u64 word;
1194 memset(&req, 0, sizeof(struct qlcnic_nic_req));
1195 req.qhdr = cpu_to_le64(QLCNIC_HOST_REQUEST << 23);
1197 word = QLCNIC_H2C_OPCODE_CONFIG_LED | ((u64)adapter->portnum << 16);
1198 req.req_hdr = cpu_to_le64(word);
1200 req.words[0] = cpu_to_le64((u64)rate << 32);
1201 req.words[1] = cpu_to_le64(state);
1203 rv = qlcnic_send_cmd_descs(adapter, (struct cmd_desc_type0 *)&req, 1);
1204 if (rv)
1205 dev_err(&adapter->pdev->dev, "LED configuration failed.\n");
1207 return rv;
1210 static int qlcnic_set_fw_loopback(struct qlcnic_adapter *adapter, u32 flag)
1212 struct qlcnic_nic_req req;
1213 int rv;
1214 u64 word;
1216 memset(&req, 0, sizeof(struct qlcnic_nic_req));
1217 req.qhdr = cpu_to_le64(QLCNIC_HOST_REQUEST << 23);
1219 word = QLCNIC_H2C_OPCODE_CONFIG_LOOPBACK |
1220 ((u64)adapter->portnum << 16);
1221 req.req_hdr = cpu_to_le64(word);
1222 req.words[0] = cpu_to_le64(flag);
1224 rv = qlcnic_send_cmd_descs(adapter, (struct cmd_desc_type0 *)&req, 1);
1225 if (rv)
1226 dev_err(&adapter->pdev->dev,
1227 "%sting loopback mode failed.\n",
1228 flag ? "Set" : "Reset");
1229 return rv;
1232 int qlcnic_set_ilb_mode(struct qlcnic_adapter *adapter)
1234 if (qlcnic_set_fw_loopback(adapter, 1))
1235 return -EIO;
1237 if (qlcnic_nic_set_promisc(adapter,
1238 VPORT_MISS_MODE_ACCEPT_ALL)) {
1239 qlcnic_set_fw_loopback(adapter, 0);
1240 return -EIO;
1243 msleep(1000);
1244 return 0;
1247 void qlcnic_clear_ilb_mode(struct qlcnic_adapter *adapter)
1249 int mode = VPORT_MISS_MODE_DROP;
1250 struct net_device *netdev = adapter->netdev;
1252 qlcnic_set_fw_loopback(adapter, 0);
1254 if (netdev->flags & IFF_PROMISC)
1255 mode = VPORT_MISS_MODE_ACCEPT_ALL;
1256 else if (netdev->flags & IFF_ALLMULTI)
1257 mode = VPORT_MISS_MODE_ACCEPT_MULTI;
1259 qlcnic_nic_set_promisc(adapter, mode);