x86: merge sched_clock handling
[linux/fpc-iii.git] / drivers / net / bnx2x_init.h
blob370686eef97c6b59ff5275383f8c4704e27d2a11
1 /* bnx2x_init.h: Broadcom Everest network driver.
3 * Copyright (c) 2007-2008 Broadcom Corporation
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation.
9 * Maintained by: Eilon Greenstein <eilong@broadcom.com>
10 * Written by: Eliezer Tamir
13 #ifndef BNX2X_INIT_H
14 #define BNX2X_INIT_H
16 #define COMMON 0x1
17 #define PORT0 0x2
18 #define PORT1 0x4
20 #define INIT_EMULATION 0x1
21 #define INIT_FPGA 0x2
22 #define INIT_ASIC 0x4
23 #define INIT_HARDWARE 0x7
25 #define STORM_INTMEM_SIZE (0x5800 / 4)
26 #define TSTORM_INTMEM_ADDR 0x1a0000
27 #define CSTORM_INTMEM_ADDR 0x220000
28 #define XSTORM_INTMEM_ADDR 0x2a0000
29 #define USTORM_INTMEM_ADDR 0x320000
32 /* Init operation types and structures */
34 #define OP_RD 0x1 /* read single register */
35 #define OP_WR 0x2 /* write single register */
36 #define OP_IW 0x3 /* write single register using mailbox */
37 #define OP_SW 0x4 /* copy a string to the device */
38 #define OP_SI 0x5 /* copy a string using mailbox */
39 #define OP_ZR 0x6 /* clear memory */
40 #define OP_ZP 0x7 /* unzip then copy with DMAE */
41 #define OP_WB 0x8 /* copy a string using DMAE */
43 struct raw_op {
44 u32 op :8;
45 u32 offset :24;
46 u32 raw_data;
49 struct op_read {
50 u32 op :8;
51 u32 offset :24;
52 u32 pad;
55 struct op_write {
56 u32 op :8;
57 u32 offset :24;
58 u32 val;
61 struct op_string_write {
62 u32 op :8;
63 u32 offset :24;
64 #ifdef __LITTLE_ENDIAN
65 u16 data_off;
66 u16 data_len;
67 #else /* __BIG_ENDIAN */
68 u16 data_len;
69 u16 data_off;
70 #endif
73 struct op_zero {
74 u32 op :8;
75 u32 offset :24;
76 u32 len;
79 union init_op {
80 struct op_read read;
81 struct op_write write;
82 struct op_string_write str_wr;
83 struct op_zero zero;
84 struct raw_op raw;
87 #include "bnx2x_init_values.h"
89 static void bnx2x_reg_wr_ind(struct bnx2x *bp, u32 addr, u32 val);
91 static void bnx2x_write_dmae(struct bnx2x *bp, dma_addr_t dma_addr,
92 u32 dst_addr, u32 len32);
94 static int bnx2x_gunzip(struct bnx2x *bp, u8 *zbuf, int len);
96 static void bnx2x_init_str_wr(struct bnx2x *bp, u32 addr, const u32 *data,
97 u32 len)
99 int i;
101 for (i = 0; i < len; i++) {
102 REG_WR(bp, addr + i*4, data[i]);
103 if (!(i % 10000)) {
104 touch_softlockup_watchdog();
105 cpu_relax();
110 #define INIT_MEM_WR(reg, data, reg_off, len) \
111 bnx2x_init_str_wr(bp, reg + reg_off*4, data, len)
113 static void bnx2x_init_ind_wr(struct bnx2x *bp, u32 addr, const u32 *data,
114 u16 len)
116 int i;
118 for (i = 0; i < len; i++) {
119 REG_WR_IND(bp, addr + i*4, data[i]);
120 if (!(i % 10000)) {
121 touch_softlockup_watchdog();
122 cpu_relax();
127 static void bnx2x_init_wr_wb(struct bnx2x *bp, u32 addr, const u32 *data,
128 u32 len, int gunzip)
130 int offset = 0;
132 if (gunzip) {
133 int rc;
134 #ifdef __BIG_ENDIAN
135 int i, size;
136 u32 *temp;
138 temp = kmalloc(len, GFP_KERNEL);
139 size = (len / 4) + ((len % 4) ? 1 : 0);
140 for (i = 0; i < size; i++)
141 temp[i] = swab32(data[i]);
142 data = temp;
143 #endif
144 rc = bnx2x_gunzip(bp, (u8 *)data, len);
145 if (rc) {
146 DP(NETIF_MSG_HW, "gunzip failed ! rc %d\n", rc);
147 return;
149 len = bp->gunzip_outlen;
150 #ifdef __BIG_ENDIAN
151 kfree(temp);
152 for (i = 0; i < len; i++)
153 ((u32 *)bp->gunzip_buf)[i] =
154 swab32(((u32 *)bp->gunzip_buf)[i]);
155 #endif
156 } else {
157 if ((len * 4) > FW_BUF_SIZE) {
158 BNX2X_ERR("LARGE DMAE OPERATION ! len 0x%x\n", len*4);
159 return;
161 memcpy(bp->gunzip_buf, data, len * 4);
164 while (len > DMAE_LEN32_MAX) {
165 bnx2x_write_dmae(bp, bp->gunzip_mapping + offset,
166 addr + offset, DMAE_LEN32_MAX);
167 offset += DMAE_LEN32_MAX * 4;
168 len -= DMAE_LEN32_MAX;
170 bnx2x_write_dmae(bp, bp->gunzip_mapping + offset, addr + offset, len);
173 #define INIT_MEM_WB(reg, data, reg_off, len) \
174 bnx2x_init_wr_wb(bp, reg + reg_off*4, data, len, 0)
176 #define INIT_GUNZIP_DMAE(reg, data, reg_off, len) \
177 bnx2x_init_wr_wb(bp, reg + reg_off*4, data, len, 1)
179 static void bnx2x_init_fill(struct bnx2x *bp, u32 addr, int fill, u32 len)
181 int offset = 0;
183 if ((len * 4) > FW_BUF_SIZE) {
184 BNX2X_ERR("LARGE DMAE OPERATION ! len 0x%x\n", len * 4);
185 return;
187 memset(bp->gunzip_buf, fill, len * 4);
189 while (len > DMAE_LEN32_MAX) {
190 bnx2x_write_dmae(bp, bp->gunzip_mapping + offset,
191 addr + offset, DMAE_LEN32_MAX);
192 offset += DMAE_LEN32_MAX * 4;
193 len -= DMAE_LEN32_MAX;
195 bnx2x_write_dmae(bp, bp->gunzip_mapping + offset, addr + offset, len);
198 static void bnx2x_init_block(struct bnx2x *bp, u32 op_start, u32 op_end)
200 int i;
201 union init_op *op;
202 u32 op_type, addr, len;
203 const u32 *data;
205 for (i = op_start; i < op_end; i++) {
207 op = (union init_op *)&(init_ops[i]);
209 op_type = op->str_wr.op;
210 addr = op->str_wr.offset;
211 len = op->str_wr.data_len;
212 data = init_data + op->str_wr.data_off;
214 switch (op_type) {
215 case OP_RD:
216 REG_RD(bp, addr);
217 break;
218 case OP_WR:
219 REG_WR(bp, addr, op->write.val);
220 break;
221 case OP_SW:
222 bnx2x_init_str_wr(bp, addr, data, len);
223 break;
224 case OP_WB:
225 bnx2x_init_wr_wb(bp, addr, data, len, 0);
226 break;
227 case OP_SI:
228 bnx2x_init_ind_wr(bp, addr, data, len);
229 break;
230 case OP_ZR:
231 bnx2x_init_fill(bp, addr, 0, op->zero.len);
232 break;
233 case OP_ZP:
234 bnx2x_init_wr_wb(bp, addr, data, len, 1);
235 break;
236 default:
237 BNX2X_ERR("BAD init operation!\n");
243 /****************************************************************************
244 * PXP
245 ****************************************************************************/
247 * This code configures the PCI read/write arbiter
248 * which implements a wighted round robin
249 * between the virtual queues in the chip.
251 * The values were derived for each PCI max payload and max request size.
252 * since max payload and max request size are only known at run time,
253 * this is done as a separate init stage.
256 #define NUM_WR_Q 13
257 #define NUM_RD_Q 29
258 #define MAX_RD_ORD 3
259 #define MAX_WR_ORD 2
261 /* configuration for one arbiter queue */
262 struct arb_line {
263 int l;
264 int add;
265 int ubound;
268 /* derived configuration for each read queue for each max request size */
269 static const struct arb_line read_arb_data[NUM_RD_Q][MAX_RD_ORD + 1] = {
270 {{8 , 64 , 25}, {16 , 64 , 25}, {32 , 64 , 25}, {64 , 64 , 41} },
271 {{4 , 8 , 4}, {4 , 8 , 4}, {4 , 8 , 4}, {4 , 8 , 4} },
272 {{4 , 3 , 3}, {4 , 3 , 3}, {4 , 3 , 3}, {4 , 3 , 3} },
273 {{8 , 3 , 6}, {16 , 3 , 11}, {16 , 3 , 11}, {16 , 3 , 11} },
274 {{8 , 64 , 25}, {16 , 64 , 25}, {32 , 64 , 25}, {64 , 64 , 41} },
275 {{8 , 3 , 6}, {16 , 3 , 11}, {32 , 3 , 21}, {64 , 3 , 41} },
276 {{8 , 3 , 6}, {16 , 3 , 11}, {32 , 3 , 21}, {64 , 3 , 41} },
277 {{8 , 3 , 6}, {16 , 3 , 11}, {32 , 3 , 21}, {64 , 3 , 41} },
278 {{8 , 3 , 6}, {16 , 3 , 11}, {32 , 3 , 21}, {64 , 3 , 41} },
279 {{8 , 3 , 6}, {16 , 3 , 11}, {32 , 3 , 21}, {32 , 3 , 21} },
280 {{8 , 3 , 6}, {16 , 3 , 11}, {32 , 3 , 21}, {32 , 3 , 21} },
281 {{8 , 3 , 6}, {16 , 3 , 11}, {32 , 3 , 21}, {32 , 3 , 21} },
282 {{8 , 3 , 6}, {16 , 3 , 11}, {32 , 3 , 21}, {32 , 3 , 21} },
283 {{8 , 3 , 6}, {16 , 3 , 11}, {32 , 3 , 21}, {32 , 3 , 21} },
284 {{8 , 3 , 6}, {16 , 3 , 11}, {32 , 3 , 21}, {32 , 3 , 21} },
285 {{8 , 3 , 6}, {16 , 3 , 11}, {32 , 3 , 21}, {32 , 3 , 21} },
286 {{8 , 3 , 6}, {16 , 3 , 11}, {32 , 3 , 21}, {32 , 3 , 21} },
287 {{8 , 3 , 6}, {16 , 3 , 11}, {32 , 3 , 21}, {32 , 3 , 21} },
288 {{8 , 3 , 6}, {16 , 3 , 11}, {32 , 3 , 21}, {32 , 3 , 21} },
289 {{8 , 3 , 6}, {16 , 3 , 11}, {32 , 3 , 21}, {32 , 3 , 21} },
290 {{8 , 3 , 6}, {16 , 3 , 11}, {32 , 3 , 21}, {32 , 3 , 21} },
291 {{8 , 3 , 6}, {16 , 3 , 11}, {32 , 3 , 21}, {32 , 3 , 21} },
292 {{8 , 3 , 6}, {16 , 3 , 11}, {32 , 3 , 21}, {32 , 3 , 21} },
293 {{8 , 3 , 6}, {16 , 3 , 11}, {32 , 3 , 21}, {32 , 3 , 21} },
294 {{8 , 3 , 6}, {16 , 3 , 11}, {32 , 3 , 21}, {32 , 3 , 21} },
295 {{8 , 3 , 6}, {16 , 3 , 11}, {32 , 3 , 21}, {32 , 3 , 21} },
296 {{8 , 3 , 6}, {16 , 3 , 11}, {32 , 3 , 21}, {32 , 3 , 21} },
297 {{8 , 3 , 6}, {16 , 3 , 11}, {32 , 3 , 21}, {32 , 3 , 21} },
298 {{8 , 64 , 25}, {16 , 64 , 41}, {32 , 64 , 81}, {64 , 64 , 120} }
301 /* derived configuration for each write queue for each max request size */
302 static const struct arb_line write_arb_data[NUM_WR_Q][MAX_WR_ORD + 1] = {
303 {{4 , 6 , 3}, {4 , 6 , 3}, {4 , 6 , 3} },
304 {{4 , 2 , 3}, {4 , 2 , 3}, {4 , 2 , 3} },
305 {{8 , 2 , 6}, {16 , 2 , 11}, {16 , 2 , 11} },
306 {{8 , 2 , 6}, {16 , 2 , 11}, {32 , 2 , 21} },
307 {{8 , 2 , 6}, {16 , 2 , 11}, {32 , 2 , 21} },
308 {{8 , 2 , 6}, {16 , 2 , 11}, {32 , 2 , 21} },
309 {{8 , 64 , 25}, {16 , 64 , 25}, {32 , 64 , 25} },
310 {{8 , 2 , 6}, {16 , 2 , 11}, {16 , 2 , 11} },
311 {{8 , 2 , 6}, {16 , 2 , 11}, {16 , 2 , 11} },
312 {{8 , 9 , 6}, {16 , 9 , 11}, {32 , 9 , 21} },
313 {{8 , 47 , 19}, {16 , 47 , 19}, {32 , 47 , 21} },
314 {{8 , 9 , 6}, {16 , 9 , 11}, {16 , 9 , 11} },
315 {{8 , 64 , 25}, {16 , 64 , 41}, {32 , 64 , 81} }
318 /* register adresses for read queues */
319 static const struct arb_line read_arb_addr[NUM_RD_Q-1] = {
320 {PXP2_REG_RQ_BW_RD_L0, PXP2_REG_RQ_BW_RD_ADD0,
321 PXP2_REG_RQ_BW_RD_UBOUND0},
322 {PXP2_REG_PSWRQ_BW_L1, PXP2_REG_PSWRQ_BW_ADD1,
323 PXP2_REG_PSWRQ_BW_UB1},
324 {PXP2_REG_PSWRQ_BW_L2, PXP2_REG_PSWRQ_BW_ADD2,
325 PXP2_REG_PSWRQ_BW_UB2},
326 {PXP2_REG_PSWRQ_BW_L3, PXP2_REG_PSWRQ_BW_ADD3,
327 PXP2_REG_PSWRQ_BW_UB3},
328 {PXP2_REG_RQ_BW_RD_L4, PXP2_REG_RQ_BW_RD_ADD4,
329 PXP2_REG_RQ_BW_RD_UBOUND4},
330 {PXP2_REG_RQ_BW_RD_L5, PXP2_REG_RQ_BW_RD_ADD5,
331 PXP2_REG_RQ_BW_RD_UBOUND5},
332 {PXP2_REG_PSWRQ_BW_L6, PXP2_REG_PSWRQ_BW_ADD6,
333 PXP2_REG_PSWRQ_BW_UB6},
334 {PXP2_REG_PSWRQ_BW_L7, PXP2_REG_PSWRQ_BW_ADD7,
335 PXP2_REG_PSWRQ_BW_UB7},
336 {PXP2_REG_PSWRQ_BW_L8, PXP2_REG_PSWRQ_BW_ADD8,
337 PXP2_REG_PSWRQ_BW_UB8},
338 {PXP2_REG_PSWRQ_BW_L9, PXP2_REG_PSWRQ_BW_ADD9,
339 PXP2_REG_PSWRQ_BW_UB9},
340 {PXP2_REG_PSWRQ_BW_L10, PXP2_REG_PSWRQ_BW_ADD10,
341 PXP2_REG_PSWRQ_BW_UB10},
342 {PXP2_REG_PSWRQ_BW_L11, PXP2_REG_PSWRQ_BW_ADD11,
343 PXP2_REG_PSWRQ_BW_UB11},
344 {PXP2_REG_RQ_BW_RD_L12, PXP2_REG_RQ_BW_RD_ADD12,
345 PXP2_REG_RQ_BW_RD_UBOUND12},
346 {PXP2_REG_RQ_BW_RD_L13, PXP2_REG_RQ_BW_RD_ADD13,
347 PXP2_REG_RQ_BW_RD_UBOUND13},
348 {PXP2_REG_RQ_BW_RD_L14, PXP2_REG_RQ_BW_RD_ADD14,
349 PXP2_REG_RQ_BW_RD_UBOUND14},
350 {PXP2_REG_RQ_BW_RD_L15, PXP2_REG_RQ_BW_RD_ADD15,
351 PXP2_REG_RQ_BW_RD_UBOUND15},
352 {PXP2_REG_RQ_BW_RD_L16, PXP2_REG_RQ_BW_RD_ADD16,
353 PXP2_REG_RQ_BW_RD_UBOUND16},
354 {PXP2_REG_RQ_BW_RD_L17, PXP2_REG_RQ_BW_RD_ADD17,
355 PXP2_REG_RQ_BW_RD_UBOUND17},
356 {PXP2_REG_RQ_BW_RD_L18, PXP2_REG_RQ_BW_RD_ADD18,
357 PXP2_REG_RQ_BW_RD_UBOUND18},
358 {PXP2_REG_RQ_BW_RD_L19, PXP2_REG_RQ_BW_RD_ADD19,
359 PXP2_REG_RQ_BW_RD_UBOUND19},
360 {PXP2_REG_RQ_BW_RD_L20, PXP2_REG_RQ_BW_RD_ADD20,
361 PXP2_REG_RQ_BW_RD_UBOUND20},
362 {PXP2_REG_RQ_BW_RD_L22, PXP2_REG_RQ_BW_RD_ADD22,
363 PXP2_REG_RQ_BW_RD_UBOUND22},
364 {PXP2_REG_RQ_BW_RD_L23, PXP2_REG_RQ_BW_RD_ADD23,
365 PXP2_REG_RQ_BW_RD_UBOUND23},
366 {PXP2_REG_RQ_BW_RD_L24, PXP2_REG_RQ_BW_RD_ADD24,
367 PXP2_REG_RQ_BW_RD_UBOUND24},
368 {PXP2_REG_RQ_BW_RD_L25, PXP2_REG_RQ_BW_RD_ADD25,
369 PXP2_REG_RQ_BW_RD_UBOUND25},
370 {PXP2_REG_RQ_BW_RD_L26, PXP2_REG_RQ_BW_RD_ADD26,
371 PXP2_REG_RQ_BW_RD_UBOUND26},
372 {PXP2_REG_RQ_BW_RD_L27, PXP2_REG_RQ_BW_RD_ADD27,
373 PXP2_REG_RQ_BW_RD_UBOUND27},
374 {PXP2_REG_PSWRQ_BW_L28, PXP2_REG_PSWRQ_BW_ADD28,
375 PXP2_REG_PSWRQ_BW_UB28}
378 /* register adresses for wrtie queues */
379 static const struct arb_line write_arb_addr[NUM_WR_Q-1] = {
380 {PXP2_REG_PSWRQ_BW_L1, PXP2_REG_PSWRQ_BW_ADD1,
381 PXP2_REG_PSWRQ_BW_UB1},
382 {PXP2_REG_PSWRQ_BW_L2, PXP2_REG_PSWRQ_BW_ADD2,
383 PXP2_REG_PSWRQ_BW_UB2},
384 {PXP2_REG_PSWRQ_BW_L3, PXP2_REG_PSWRQ_BW_ADD3,
385 PXP2_REG_PSWRQ_BW_UB3},
386 {PXP2_REG_PSWRQ_BW_L6, PXP2_REG_PSWRQ_BW_ADD6,
387 PXP2_REG_PSWRQ_BW_UB6},
388 {PXP2_REG_PSWRQ_BW_L7, PXP2_REG_PSWRQ_BW_ADD7,
389 PXP2_REG_PSWRQ_BW_UB7},
390 {PXP2_REG_PSWRQ_BW_L8, PXP2_REG_PSWRQ_BW_ADD8,
391 PXP2_REG_PSWRQ_BW_UB8},
392 {PXP2_REG_PSWRQ_BW_L9, PXP2_REG_PSWRQ_BW_ADD9,
393 PXP2_REG_PSWRQ_BW_UB9},
394 {PXP2_REG_PSWRQ_BW_L10, PXP2_REG_PSWRQ_BW_ADD10,
395 PXP2_REG_PSWRQ_BW_UB10},
396 {PXP2_REG_PSWRQ_BW_L11, PXP2_REG_PSWRQ_BW_ADD11,
397 PXP2_REG_PSWRQ_BW_UB11},
398 {PXP2_REG_PSWRQ_BW_L28, PXP2_REG_PSWRQ_BW_ADD28,
399 PXP2_REG_PSWRQ_BW_UB28},
400 {PXP2_REG_RQ_BW_WR_L29, PXP2_REG_RQ_BW_WR_ADD29,
401 PXP2_REG_RQ_BW_WR_UBOUND29},
402 {PXP2_REG_RQ_BW_WR_L30, PXP2_REG_RQ_BW_WR_ADD30,
403 PXP2_REG_RQ_BW_WR_UBOUND30}
406 static void bnx2x_init_pxp(struct bnx2x *bp)
408 int r_order, w_order;
409 u32 val, i;
411 pci_read_config_word(bp->pdev,
412 bp->pcie_cap + PCI_EXP_DEVCTL, (u16 *)&val);
413 DP(NETIF_MSG_HW, "read 0x%x from devctl\n", (u16)val);
414 w_order = ((val & PCI_EXP_DEVCTL_PAYLOAD) >> 5);
415 r_order = ((val & PCI_EXP_DEVCTL_READRQ) >> 12);
417 if (r_order > MAX_RD_ORD) {
418 DP(NETIF_MSG_HW, "read order of %d order adjusted to %d\n",
419 r_order, MAX_RD_ORD);
420 r_order = MAX_RD_ORD;
422 if (w_order > MAX_WR_ORD) {
423 DP(NETIF_MSG_HW, "write order of %d order adjusted to %d\n",
424 w_order, MAX_WR_ORD);
425 w_order = MAX_WR_ORD;
427 DP(NETIF_MSG_HW, "read order %d write order %d\n", r_order, w_order);
429 for (i = 0; i < NUM_RD_Q-1; i++) {
430 REG_WR(bp, read_arb_addr[i].l, read_arb_data[i][r_order].l);
431 REG_WR(bp, read_arb_addr[i].add,
432 read_arb_data[i][r_order].add);
433 REG_WR(bp, read_arb_addr[i].ubound,
434 read_arb_data[i][r_order].ubound);
437 for (i = 0; i < NUM_WR_Q-1; i++) {
438 if ((write_arb_addr[i].l == PXP2_REG_RQ_BW_WR_L29) ||
439 (write_arb_addr[i].l == PXP2_REG_RQ_BW_WR_L30)) {
441 REG_WR(bp, write_arb_addr[i].l,
442 write_arb_data[i][w_order].l);
444 REG_WR(bp, write_arb_addr[i].add,
445 write_arb_data[i][w_order].add);
447 REG_WR(bp, write_arb_addr[i].ubound,
448 write_arb_data[i][w_order].ubound);
449 } else {
451 val = REG_RD(bp, write_arb_addr[i].l);
452 REG_WR(bp, write_arb_addr[i].l,
453 val | (write_arb_data[i][w_order].l << 10));
455 val = REG_RD(bp, write_arb_addr[i].add);
456 REG_WR(bp, write_arb_addr[i].add,
457 val | (write_arb_data[i][w_order].add << 10));
459 val = REG_RD(bp, write_arb_addr[i].ubound);
460 REG_WR(bp, write_arb_addr[i].ubound,
461 val | (write_arb_data[i][w_order].ubound << 7));
465 val = write_arb_data[NUM_WR_Q-1][w_order].add;
466 val += write_arb_data[NUM_WR_Q-1][w_order].ubound << 10;
467 val += write_arb_data[NUM_WR_Q-1][w_order].l << 17;
468 REG_WR(bp, PXP2_REG_PSWRQ_BW_RD, val);
470 val = read_arb_data[NUM_RD_Q-1][r_order].add;
471 val += read_arb_data[NUM_RD_Q-1][r_order].ubound << 10;
472 val += read_arb_data[NUM_RD_Q-1][r_order].l << 17;
473 REG_WR(bp, PXP2_REG_PSWRQ_BW_WR, val);
475 REG_WR(bp, PXP2_REG_RQ_WR_MBS0, w_order);
476 REG_WR(bp, PXP2_REG_RQ_WR_MBS1, w_order);
477 REG_WR(bp, PXP2_REG_RQ_RD_MBS0, r_order);
478 REG_WR(bp, PXP2_REG_RQ_RD_MBS1, r_order);
480 if (r_order == MAX_RD_ORD)
481 REG_WR(bp, PXP2_REG_RQ_PDR_LIMIT, 0xe00);
483 REG_WR(bp, PXP2_REG_WR_USDMDP_TH, (0x18 << w_order));
484 REG_WR(bp, PXP2_REG_WR_DMAE_TH, (128 << w_order)/16);
488 /****************************************************************************
489 * CDU
490 ****************************************************************************/
492 #define CDU_REGION_NUMBER_XCM_AG 2
493 #define CDU_REGION_NUMBER_UCM_AG 4
496 * String-to-compress [31:8] = CID (all 24 bits)
497 * String-to-compress [7:4] = Region
498 * String-to-compress [3:0] = Type
500 #define CDU_VALID_DATA(_cid, _region, _type) \
501 (((_cid) << 8) | (((_region) & 0xf) << 4) | (((_type) & 0xf)))
502 #define CDU_CRC8(_cid, _region, _type) \
503 calc_crc8(CDU_VALID_DATA(_cid, _region, _type), 0xff)
504 #define CDU_RSRVD_VALUE_TYPE_A(_cid, _region, _type) \
505 (0x80 | (CDU_CRC8(_cid, _region, _type) & 0x7f))
506 #define CDU_RSRVD_VALUE_TYPE_B(_crc, _type) \
507 (0x80 | ((_type) & 0xf << 3) | (CDU_CRC8(_cid, _region, _type) & 0x7))
508 #define CDU_RSRVD_INVALIDATE_CONTEXT_VALUE(_val) ((_val) & ~0x80)
510 /*****************************************************************************
511 * Description:
512 * Calculates crc 8 on a word value: polynomial 0-1-2-8
513 * Code was translated from Verilog.
514 ****************************************************************************/
515 static u8 calc_crc8(u32 data, u8 crc)
517 u8 D[32];
518 u8 NewCRC[8];
519 u8 C[8];
520 u8 crc_res;
521 u8 i;
523 /* split the data into 31 bits */
524 for (i = 0; i < 32; i++) {
525 D[i] = data & 1;
526 data = data >> 1;
529 /* split the crc into 8 bits */
530 for (i = 0; i < 8; i++) {
531 C[i] = crc & 1;
532 crc = crc >> 1;
535 NewCRC[0] = D[31] ^ D[30] ^ D[28] ^ D[23] ^ D[21] ^ D[19] ^ D[18] ^
536 D[16] ^ D[14] ^ D[12] ^ D[8] ^ D[7] ^ D[6] ^ D[0] ^ C[4] ^
537 C[6] ^ C[7];
538 NewCRC[1] = D[30] ^ D[29] ^ D[28] ^ D[24] ^ D[23] ^ D[22] ^ D[21] ^
539 D[20] ^ D[18] ^ D[17] ^ D[16] ^ D[15] ^ D[14] ^ D[13] ^
540 D[12] ^ D[9] ^ D[6] ^ D[1] ^ D[0] ^ C[0] ^ C[4] ^ C[5] ^ C[6];
541 NewCRC[2] = D[29] ^ D[28] ^ D[25] ^ D[24] ^ D[22] ^ D[17] ^ D[15] ^
542 D[13] ^ D[12] ^ D[10] ^ D[8] ^ D[6] ^ D[2] ^ D[1] ^ D[0] ^
543 C[0] ^ C[1] ^ C[4] ^ C[5];
544 NewCRC[3] = D[30] ^ D[29] ^ D[26] ^ D[25] ^ D[23] ^ D[18] ^ D[16] ^
545 D[14] ^ D[13] ^ D[11] ^ D[9] ^ D[7] ^ D[3] ^ D[2] ^ D[1] ^
546 C[1] ^ C[2] ^ C[5] ^ C[6];
547 NewCRC[4] = D[31] ^ D[30] ^ D[27] ^ D[26] ^ D[24] ^ D[19] ^ D[17] ^
548 D[15] ^ D[14] ^ D[12] ^ D[10] ^ D[8] ^ D[4] ^ D[3] ^ D[2] ^
549 C[0] ^ C[2] ^ C[3] ^ C[6] ^ C[7];
550 NewCRC[5] = D[31] ^ D[28] ^ D[27] ^ D[25] ^ D[20] ^ D[18] ^ D[16] ^
551 D[15] ^ D[13] ^ D[11] ^ D[9] ^ D[5] ^ D[4] ^ D[3] ^ C[1] ^
552 C[3] ^ C[4] ^ C[7];
553 NewCRC[6] = D[29] ^ D[28] ^ D[26] ^ D[21] ^ D[19] ^ D[17] ^ D[16] ^
554 D[14] ^ D[12] ^ D[10] ^ D[6] ^ D[5] ^ D[4] ^ C[2] ^ C[4] ^
555 C[5];
556 NewCRC[7] = D[30] ^ D[29] ^ D[27] ^ D[22] ^ D[20] ^ D[18] ^ D[17] ^
557 D[15] ^ D[13] ^ D[11] ^ D[7] ^ D[6] ^ D[5] ^ C[3] ^ C[5] ^
558 C[6];
560 crc_res = 0;
561 for (i = 0; i < 8; i++)
562 crc_res |= (NewCRC[i] << i);
564 return crc_res;
568 #endif /* BNX2X_INIT_H */