4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
21 * Copyright (c) 2002-2006 Neterion, Inc.
24 #include "xgehal-ring.h"
25 #include "xgehal-device.h"
27 #if defined(XGE_OS_DMA_REQUIRES_SYNC) && defined(XGE_HAL_DMA_DTR_STREAMING)
29 __hal_ring_item_dma_offset(xge_hal_mempool_h mempoolh
,
35 /* get owner memblock index */
36 memblock_idx
= __hal_ring_block_memblock_idx(item
);
38 /* get owner memblock by memblock index */
39 memblock
= __hal_mempool_memblock(mempoolh
, memblock_idx
);
41 return (char*)item
- (char*)memblock
;
46 __hal_ring_item_dma_addr(xge_hal_mempool_h mempoolh
, void *item
,
47 pci_dma_h
*dma_handle
)
51 xge_hal_mempool_dma_t
*memblock_dma_object
;
52 ptrdiff_t dma_item_offset
;
54 /* get owner memblock index */
55 memblock_idx
= __hal_ring_block_memblock_idx((xge_hal_ring_block_t
*) item
);
57 /* get owner memblock by memblock index */
58 memblock
= __hal_mempool_memblock((xge_hal_mempool_t
*) mempoolh
,
61 /* get memblock DMA object by memblock index */
63 __hal_mempool_memblock_dma((xge_hal_mempool_t
*) mempoolh
,
66 /* calculate offset in the memblock of this item */
67 dma_item_offset
= (char*)item
- (char*)memblock
;
69 *dma_handle
= memblock_dma_object
->handle
;
71 return memblock_dma_object
->addr
+ dma_item_offset
;
75 __hal_ring_rxdblock_link(xge_hal_mempool_h mempoolh
,
76 xge_hal_ring_t
*ring
, int from
, int to
)
78 xge_hal_ring_block_t
*to_item
, *from_item
;
79 dma_addr_t to_dma
, from_dma __unused
;
80 pci_dma_h to_dma_handle
, from_dma_handle
;
82 /* get "from" RxD block */
83 from_item
= (xge_hal_ring_block_t
*)
84 __hal_mempool_item((xge_hal_mempool_t
*) mempoolh
, from
);
85 xge_assert(from_item
);
87 /* get "to" RxD block */
88 to_item
= (xge_hal_ring_block_t
*)
89 __hal_mempool_item((xge_hal_mempool_t
*) mempoolh
, to
);
92 /* return address of the beginning of previous RxD block */
93 to_dma
= __hal_ring_item_dma_addr(mempoolh
, to_item
, &to_dma_handle
);
95 /* set next pointer for this RxD block to point on
96 * previous item's DMA start address */
97 __hal_ring_block_next_pointer_set(from_item
, to_dma
);
99 /* return "from" RxD block's DMA start address */
101 __hal_ring_item_dma_addr(mempoolh
, from_item
, &from_dma_handle
);
103 #if defined(XGE_OS_DMA_REQUIRES_SYNC) && defined(XGE_HAL_DMA_DTR_STREAMING)
104 /* we must sync "from" RxD block, so hardware will see it */
105 xge_os_dma_sync(ring
->channel
.pdev
,
107 from_dma
+ XGE_HAL_RING_NEXT_BLOCK_POINTER_OFFSET
,
108 __hal_ring_item_dma_offset(mempoolh
, from_item
) +
109 XGE_HAL_RING_NEXT_BLOCK_POINTER_OFFSET
,
111 XGE_OS_DMA_DIR_TODEVICE
);
114 xge_debug_ring(XGE_TRACE
, "block%d:0x"XGE_OS_LLXFMT
" => block%d:0x"XGE_OS_LLXFMT
,
115 from
, (unsigned long long)from_dma
, to
,
116 (unsigned long long)to_dma
);
119 static xge_hal_status_e
120 __hal_ring_mempool_item_alloc(xge_hal_mempool_h mempoolh
,
123 xge_hal_mempool_dma_t
*dma_object
,
130 xge_hal_ring_t
*ring
= (xge_hal_ring_t
*)userdata
;
136 /* format rxds array */
137 for (i
=ring
->rxds_per_block
-1; i
>=0; i
--) {
139 xge_hal_ring_rxd_priv_t
*rxd_priv
;
140 xge_hal_ring_rxd_1_t
*rxdp
;
141 int reserve_index
= index
* ring
->rxds_per_block
+ i
;
142 int memblock_item_idx
;
144 ring
->reserved_rxds_arr
[reserve_index
] = (char *)item
+
145 (ring
->rxds_per_block
- 1 - i
) * ring
->rxd_size
;
147 /* Note: memblock_item_idx is index of the item within
148 * the memblock. For instance, in case of three RxD-blocks
149 * per memblock this value can be 0,1 or 2. */
151 __hal_mempool_item_priv((xge_hal_mempool_t
*) mempoolh
,
152 memblock_index
, item
,
154 rxdp
= (xge_hal_ring_rxd_1_t
*)
155 ring
->reserved_rxds_arr
[reserve_index
];
156 rxd_priv
= (xge_hal_ring_rxd_priv_t
*) (void *)
157 ((char*)rxdblock_priv
+ ring
->rxd_priv_size
* i
);
159 /* pre-format per-RxD Ring's private */
160 rxd_priv
->dma_offset
= (char*)rxdp
- (char*)memblock
;
161 rxd_priv
->dma_addr
= dma_object
->addr
+ rxd_priv
->dma_offset
;
162 rxd_priv
->dma_handle
= dma_object
->handle
;
163 #ifdef XGE_DEBUG_ASSERT
164 rxd_priv
->dma_object
= dma_object
;
167 /* pre-format Host_Control */
168 #if defined(XGE_HAL_USE_5B_MODE)
169 if (ring
->buffer_mode
== XGE_HAL_RING_QUEUE_BUFFER_MODE_5
) {
170 xge_hal_ring_rxd_5_t
*rxdp_5
= (xge_hal_ring_rxd_5_t
*)rxdp
;
171 #if defined(XGE_OS_PLATFORM_64BIT)
172 xge_assert(memblock_index
<= 0xFFFF);
173 xge_assert(i
<= 0xFFFF);
174 /* store memblock's index */
175 rxdp_5
->host_control
= (u32
)memblock_index
<< 16;
176 /* store index of memblock's private */
177 rxdp_5
->host_control
|= (u32
)(memblock_item_idx
*
178 ring
->rxds_per_block
+ i
);
181 rxdp_5
->host_control
= (u32
)rxd_priv
;
184 /* 1b and 3b modes */
185 rxdp
->host_control
= (u64
)(ulong_t
)rxd_priv
;
188 /* 1b and 3b modes */
189 rxdp
->host_control
= (u64
)(ulong_t
)rxd_priv
;
193 __hal_ring_block_memblock_idx_set((xge_hal_ring_block_t
*) item
, memblock_index
);
196 /* link last one with first one */
197 __hal_ring_rxdblock_link(mempoolh
, ring
, 0, index
);
201 /* link this RxD block with previous one */
202 __hal_ring_rxdblock_link(mempoolh
, ring
, index
, index
-1);
209 __hal_ring_initial_replenish(xge_hal_channel_t
*channel
,
210 xge_hal_channel_reopen_e reopen
)
212 xge_hal_dtr_h dtr
= NULL
;
214 while (xge_hal_channel_dtr_count(channel
) > 0) {
215 xge_hal_status_e status
;
217 status
= xge_hal_ring_dtr_reserve(channel
, &dtr
);
218 xge_assert(status
== XGE_HAL_OK
);
220 if (channel
->dtr_init
) {
221 status
= channel
->dtr_init(channel
,
222 dtr
, channel
->reserve_length
,
225 if (status
!= XGE_HAL_OK
) {
226 xge_hal_ring_dtr_free(channel
, dtr
);
227 xge_hal_channel_abort(channel
,
228 XGE_HAL_CHANNEL_OC_NORMAL
);
233 xge_hal_ring_dtr_post(channel
, dtr
);
240 __hal_ring_open(xge_hal_channel_h channelh
, xge_hal_channel_attr_t
*attr
)
242 xge_hal_status_e status
;
243 xge_hal_device_t
*hldev
;
244 xge_hal_ring_t
*ring
= (xge_hal_ring_t
*)channelh
;
245 xge_hal_ring_queue_t
*queue
;
248 /* Note: at this point we have channel.devh and channel.pdev
251 hldev
= (xge_hal_device_t
*)ring
->channel
.devh
;
252 ring
->config
= &hldev
->config
.ring
;
253 queue
= &ring
->config
->queue
[attr
->post_qid
];
254 ring
->indicate_max_pkts
= queue
->indicate_max_pkts
;
255 ring
->buffer_mode
= queue
->buffer_mode
;
257 xge_assert(queue
->configured
);
259 #if defined(XGE_HAL_RX_MULTI_RESERVE)
260 xge_os_spin_lock_init(&ring
->channel
.reserve_lock
, hldev
->pdev
);
261 #elif defined(XGE_HAL_RX_MULTI_RESERVE_IRQ)
262 xge_os_spin_lock_init_irq(&ring
->channel
.reserve_lock
, hldev
->irqh
);
264 #if defined(XGE_HAL_RX_MULTI_POST)
265 xge_os_spin_lock_init(&ring
->channel
.post_lock
, hldev
->pdev
);
266 #elif defined(XGE_HAL_RX_MULTI_POST_IRQ)
267 xge_os_spin_lock_init_irq(&ring
->channel
.post_lock
, hldev
->irqh
);
270 ring
->rxd_size
= XGE_HAL_RING_RXD_SIZEOF(queue
->buffer_mode
);
271 ring
->rxd_priv_size
=
272 sizeof(xge_hal_ring_rxd_priv_t
) + attr
->per_dtr_space
;
274 /* how many RxDs can fit into one block. Depends on configured
276 ring
->rxds_per_block
= XGE_HAL_RING_RXDS_PER_BLOCK(queue
->buffer_mode
);
278 /* calculate actual RxD block private size */
279 ring
->rxdblock_priv_size
= ring
->rxd_priv_size
* ring
->rxds_per_block
;
281 ring
->reserved_rxds_arr
= (void **) xge_os_malloc(ring
->channel
.pdev
,
282 sizeof(void*) * queue
->max
* ring
->rxds_per_block
);
284 if (ring
->reserved_rxds_arr
== NULL
) {
285 __hal_ring_close(channelh
);
286 return XGE_HAL_ERR_OUT_OF_MEMORY
;
289 ring
->mempool
= __hal_mempool_create(
291 ring
->config
->memblock_size
,
292 XGE_HAL_RING_RXDBLOCK_SIZE
,
293 ring
->rxdblock_priv_size
,
294 queue
->initial
, queue
->max
,
295 __hal_ring_mempool_item_alloc
,
296 NULL
, /* nothing to free */
298 if (ring
->mempool
== NULL
) {
299 __hal_ring_close(channelh
);
300 return XGE_HAL_ERR_OUT_OF_MEMORY
;
303 status
= __hal_channel_initialize(channelh
,
305 ring
->reserved_rxds_arr
,
306 queue
->initial
* ring
->rxds_per_block
,
307 queue
->max
* ring
->rxds_per_block
,
308 0 /* no threshold for ring! */);
309 if (status
!= XGE_HAL_OK
) {
310 __hal_ring_close(channelh
);
314 /* sanity check that everything formatted ok */
315 xge_assert(ring
->reserved_rxds_arr
[0] ==
316 (char *)ring
->mempool
->items_arr
[0] +
317 (ring
->rxds_per_block
* ring
->rxd_size
- ring
->rxd_size
));
320 * Specifying dtr_init callback means two things:
321 * 1) dtrs need to be initialized by ULD at channel-open time;
322 * 2) dtrs need to be posted at channel-open time
323 * (that's what the initial_replenish() below does)
324 * Currently we don't have a case when the 1) is done without the 2).
326 if (ring
->channel
.dtr_init
) {
327 if ((status
= __hal_ring_initial_replenish (
328 (xge_hal_channel_t
*) channelh
,
329 XGE_HAL_CHANNEL_OC_NORMAL
) )
331 __hal_ring_close(channelh
);
336 /* initial replenish will increment the counter in its post() routine,
337 * we have to reset it */
338 ring
->channel
.usage_cnt
= 0;
344 __hal_ring_close(xge_hal_channel_h channelh
)
346 xge_hal_ring_t
*ring
= (xge_hal_ring_t
*)channelh
;
347 xge_hal_ring_queue_t
*queue
;
348 #if defined(XGE_HAL_RX_MULTI_RESERVE)||defined(XGE_HAL_RX_MULTI_RESERVE_IRQ)||\
349 defined(XGE_HAL_RX_MULTI_POST) || defined(XGE_HAL_RX_MULTI_POST_IRQ)
350 xge_hal_device_t
*hldev
= (xge_hal_device_t
*)ring
->channel
.devh
;
353 xge_assert(ring
->channel
.pdev
);
355 queue
= &ring
->config
->queue
[ring
->channel
.post_qid
];
358 __hal_mempool_destroy(ring
->mempool
);
361 if (ring
->reserved_rxds_arr
) {
362 xge_os_free(ring
->channel
.pdev
,
363 ring
->reserved_rxds_arr
,
364 sizeof(void*) * queue
->max
* ring
->rxds_per_block
);
367 __hal_channel_terminate(channelh
);
369 #if defined(XGE_HAL_RX_MULTI_RESERVE)
370 xge_os_spin_lock_destroy(&ring
->channel
.reserve_lock
, hldev
->pdev
);
371 #elif defined(XGE_HAL_RX_MULTI_RESERVE_IRQ)
372 xge_os_spin_lock_destroy_irq(&ring
->channel
.reserve_lock
, hldev
->pdev
);
374 #if defined(XGE_HAL_RX_MULTI_POST)
375 xge_os_spin_lock_destroy(&ring
->channel
.post_lock
, hldev
->pdev
);
376 #elif defined(XGE_HAL_RX_MULTI_POST_IRQ)
377 xge_os_spin_lock_destroy_irq(&ring
->channel
.post_lock
, hldev
->pdev
);
382 __hal_ring_prc_enable(xge_hal_channel_h channelh
)
384 xge_hal_ring_t
*ring
= (xge_hal_ring_t
*)channelh
;
385 xge_hal_device_t
*hldev
= (xge_hal_device_t
*)ring
->channel
.devh
;
386 xge_hal_pci_bar0_t
*bar0
;
390 xge_hal_ring_queue_t
*queue
;
391 pci_dma_h dma_handle
;
394 xge_assert(ring
->channel
.pdev
);
395 bar0
= (xge_hal_pci_bar0_t
*) (void *)
396 ((xge_hal_device_t
*)ring
->channel
.devh
)->bar0
;
398 queue
= &ring
->config
->queue
[ring
->channel
.post_qid
];
399 xge_assert(queue
->buffer_mode
== 1 ||
400 queue
->buffer_mode
== 3 ||
401 queue
->buffer_mode
== 5);
403 /* last block in fact becomes first. This is just the way it
404 * is filled up and linked by item_alloc() */
406 block_num
= queue
->initial
;
407 first_block
= __hal_mempool_item(ring
->mempool
, block_num
- 1);
408 val64
= __hal_ring_item_dma_addr(ring
->mempool
,
409 first_block
, &dma_handle
);
410 xge_os_pio_mem_write64(ring
->channel
.pdev
, ring
->channel
.regh0
,
411 val64
, &bar0
->prc_rxd0_n
[ring
->channel
.post_qid
]);
413 xge_debug_ring(XGE_TRACE
, "ring%d PRC DMA addr 0x"XGE_OS_LLXFMT
" initialized",
414 ring
->channel
.post_qid
, (unsigned long long)val64
);
416 val64
= xge_os_pio_mem_read64(ring
->channel
.pdev
,
417 ring
->channel
.regh0
, &bar0
->prc_ctrl_n
[ring
->channel
.post_qid
]);
418 if (xge_hal_device_check_id(hldev
) == XGE_HAL_CARD_HERC
&&
420 val64
|= XGE_HAL_PRC_CTRL_RTH_DISABLE
;
422 val64
|= XGE_HAL_PRC_CTRL_RC_ENABLED
;
424 val64
|= vBIT((queue
->buffer_mode
>> 1),14,2);/* 1,3 or 5 => 0,1 or 2 */
425 val64
&= ~XGE_HAL_PRC_CTRL_RXD_BACKOFF_INTERVAL(0xFFFFFF);
426 val64
|= XGE_HAL_PRC_CTRL_RXD_BACKOFF_INTERVAL(
427 (hldev
->config
.pci_freq_mherz
* queue
->backoff_interval_us
));
429 /* Beware: no snoop by the bridge if (no_snoop_bits) */
430 val64
|= XGE_HAL_PRC_CTRL_NO_SNOOP(queue
->no_snoop_bits
);
432 /* Herc: always use group_reads */
433 if (xge_hal_device_check_id(hldev
) == XGE_HAL_CARD_HERC
)
434 val64
|= XGE_HAL_PRC_CTRL_GROUP_READS
;
436 if (hldev
->config
.bimodal_interrupts
)
437 if (xge_hal_device_check_id(hldev
) == XGE_HAL_CARD_HERC
)
438 val64
|= XGE_HAL_PRC_CTRL_BIMODAL_INTERRUPT
;
440 xge_os_pio_mem_write64(ring
->channel
.pdev
, ring
->channel
.regh0
,
441 val64
, &bar0
->prc_ctrl_n
[ring
->channel
.post_qid
]);
443 /* Configure Receive Protocol Assist */
444 val64
= xge_os_pio_mem_read64(ring
->channel
.pdev
,
445 ring
->channel
.regh0
, &bar0
->rx_pa_cfg
);
446 val64
|= XGE_HAL_RX_PA_CFG_SCATTER_MODE(ring
->config
->scatter_mode
);
447 val64
|= (XGE_HAL_RX_PA_CFG_IGNORE_SNAP_OUI
| XGE_HAL_RX_PA_CFG_IGNORE_LLC_CTRL
);
448 /* Clean STRIP_VLAN_TAG bit and set as config from upper layer */
449 val64
&= ~XGE_HAL_RX_PA_CFG_STRIP_VLAN_TAG_MODE(1);
450 val64
|= XGE_HAL_RX_PA_CFG_STRIP_VLAN_TAG_MODE(ring
->config
->strip_vlan_tag
);
452 xge_os_pio_mem_write64(ring
->channel
.pdev
, ring
->channel
.regh0
,
453 val64
, &bar0
->rx_pa_cfg
);
455 xge_debug_ring(XGE_TRACE
, "ring%d enabled in buffer_mode %d",
456 ring
->channel
.post_qid
, queue
->buffer_mode
);
460 __hal_ring_prc_disable(xge_hal_channel_h channelh
)
462 xge_hal_ring_t
*ring
= (xge_hal_ring_t
*)channelh
;
463 xge_hal_pci_bar0_t
*bar0
;
467 xge_assert(ring
->channel
.pdev
);
468 bar0
= (xge_hal_pci_bar0_t
*) (void *)
469 ((xge_hal_device_t
*)ring
->channel
.devh
)->bar0
;
471 val64
= xge_os_pio_mem_read64(ring
->channel
.pdev
,
473 &bar0
->prc_ctrl_n
[ring
->channel
.post_qid
]);
474 val64
&= ~((u64
) XGE_HAL_PRC_CTRL_RC_ENABLED
);
475 xge_os_pio_mem_write64(ring
->channel
.pdev
, ring
->channel
.regh0
,
476 val64
, &bar0
->prc_ctrl_n
[ring
->channel
.post_qid
]);
480 __hal_ring_hw_initialize(xge_hal_device_h devh
)
482 xge_hal_device_t
*hldev
= (xge_hal_device_t
*)devh
;
483 xge_hal_pci_bar0_t
*bar0
= (xge_hal_pci_bar0_t
*)(void *)hldev
->bar0
;
487 /* Rx DMA intialization. */
490 for (i
= 0; i
< XGE_HAL_MAX_RING_NUM
; i
++) {
491 if (!hldev
->config
.ring
.queue
[i
].configured
)
493 val64
|= vBIT(hldev
->config
.ring
.queue
[i
].priority
,
496 xge_os_pio_mem_write64(hldev
->pdev
, hldev
->regh0
, val64
,
497 &bar0
->rx_queue_priority
);
498 xge_debug_ring(XGE_TRACE
, "Rings priority configured to 0x"XGE_OS_LLXFMT
,
499 (unsigned long long)val64
);
501 /* Configuring ring queues according to per-ring configuration */
503 for (i
= 0; i
< XGE_HAL_MAX_RING_NUM
; i
++) {
504 if (!hldev
->config
.ring
.queue
[i
].configured
)
506 val64
|= vBIT(hldev
->config
.ring
.queue
[i
].dram_size_mb
,(i
*8),8);
508 xge_os_pio_mem_write64(hldev
->pdev
, hldev
->regh0
, val64
,
509 &bar0
->rx_queue_cfg
);
510 xge_debug_ring(XGE_TRACE
, "DRAM configured to 0x"XGE_OS_LLXFMT
,
511 (unsigned long long)val64
);
513 if (!hldev
->config
.rts_qos_en
&&
514 !hldev
->config
.rts_port_en
&&
515 !hldev
->config
.rts_mac_en
) {
518 * Activate default (QoS-based) Rx steering
521 val64
= xge_os_pio_mem_read64(hldev
->pdev
, hldev
->regh0
,
522 &bar0
->rts_qos_steering
);
523 for (j
= 0; j
< 8 /* QoS max */; j
++)
525 for (i
= 0; i
< XGE_HAL_MAX_RING_NUM
; i
++)
527 if (!hldev
->config
.ring
.queue
[i
].configured
)
529 if (!hldev
->config
.ring
.queue
[i
].rth_en
)
530 val64
|= (BIT(i
) >> (j
*8));
533 xge_os_pio_mem_write64(hldev
->pdev
, hldev
->regh0
, val64
,
534 &bar0
->rts_qos_steering
);
535 xge_debug_ring(XGE_TRACE
, "QoS steering configured to 0x"XGE_OS_LLXFMT
,
536 (unsigned long long)val64
);
540 /* Note: If a queue does not exist, it should be assigned a maximum
541 * length of zero. Otherwise, packet loss could occur.
544 * All configured rings will be properly set at device open time
545 * by utilizing device_mtu_set() API call. */
546 for (i
= 0; i
< XGE_HAL_MAX_RING_NUM
; i
++) {
547 if (hldev
->config
.ring
.queue
[i
].configured
)
549 xge_os_pio_mem_write64(hldev
->pdev
, hldev
->regh0
, 0ULL,
550 &bar0
->rts_frm_len_n
[i
]);
553 #ifdef XGE_HAL_HERC_EMULATION
554 val64
= xge_os_pio_mem_read64(hldev
->pdev
, hldev
->regh0
,
555 ((u8
*)bar0
+ 0x2e60)); /* mc_rldram_mrs_herc */
556 val64
|= 0x0000000000010000;
557 xge_os_pio_mem_write64(hldev
->pdev
, hldev
->regh0
, val64
,
558 ((u8
*)bar0
+ 0x2e60));
560 val64
|= 0x003a000000000000;
561 xge_os_pio_mem_write64(hldev
->pdev
, hldev
->regh0
, val64
,
562 ((u8
*)bar0
+ 0x2e40)); /* mc_rldram_ref_herc */
566 /* now enabling MC-RLDRAM after setting MC_QUEUE sizes */
567 val64
= xge_os_pio_mem_read64(hldev
->pdev
, hldev
->regh0
,
568 &bar0
->mc_rldram_mrs
);
569 val64
|= XGE_HAL_MC_RLDRAM_QUEUE_SIZE_ENABLE
|
570 XGE_HAL_MC_RLDRAM_MRS_ENABLE
;
571 __hal_pio_mem_write32_upper(hldev
->pdev
, hldev
->regh0
, (u32
)(val64
>>32),
572 &bar0
->mc_rldram_mrs
);
574 __hal_pio_mem_write32_lower(hldev
->pdev
, hldev
->regh0
, (u32
)val64
,
575 &bar0
->mc_rldram_mrs
);
577 /* RLDRAM initialization procedure require 500us to complete */
580 /* Temporary fixes for Herc RLDRAM */
581 if (xge_hal_device_check_id(hldev
) == XGE_HAL_CARD_HERC
) {
582 val64
= XGE_HAL_MC_RLDRAM_SET_REF_PERIOD(0x0279);
583 xge_os_pio_mem_write64(hldev
->pdev
, hldev
->regh0
, val64
,
584 &bar0
->mc_rldram_ref_per_herc
);
586 val64
= xge_os_pio_mem_read64(hldev
->pdev
, hldev
->regh0
,
587 &bar0
->mc_rldram_mrs_herc
);
588 xge_debug_ring(XGE_TRACE
, "default mc_rldram_mrs_herc 0x"XGE_OS_LLXFMT
,
589 (unsigned long long)val64
);
591 val64
= 0x0003570003010300ULL
;
592 xge_os_pio_mem_write64(hldev
->pdev
, hldev
->regh0
, val64
,
593 &bar0
->mc_rldram_mrs_herc
);
598 if (hldev
->config
.intr_mode
!= XGE_HAL_INTR_MODE_MSIX
)
602 * Assign MSI-X vectors
604 for (i
= 0; i
< XGE_HAL_MAX_RING_NUM
; i
++) {
606 xge_hal_channel_t
*channel
= NULL
;
608 if (!hldev
->config
.ring
.queue
[i
].configured
||
609 !hldev
->config
.ring
.queue
[i
].intr_vector
)
613 xge_list_for_each(item
, &hldev
->free_channels
) {
614 xge_hal_channel_t
*tmp
;
615 tmp
= xge_container_of(item
, xge_hal_channel_t
,
617 if (tmp
->type
== XGE_HAL_CHANNEL_TYPE_RING
&&
618 tmp
->post_qid
== i
) {
625 (void) xge_hal_channel_msix_set(channel
,
626 hldev
->config
.ring
.queue
[i
].intr_vector
);
630 xge_debug_ring(XGE_TRACE
, "%s", "ring channels initialized");
634 __hal_ring_mtu_set(xge_hal_device_h devh
, int new_frmlen
)
637 xge_hal_device_t
*hldev
= (xge_hal_device_t
*)devh
;
638 xge_hal_pci_bar0_t
*bar0
= (xge_hal_pci_bar0_t
*)(void *)hldev
->bar0
;
640 for (i
= 0; i
< XGE_HAL_MAX_RING_NUM
; i
++) {
641 if (!hldev
->config
.ring
.queue
[i
].configured
)
643 if (hldev
->config
.ring
.queue
[i
].max_frm_len
!=
644 XGE_HAL_RING_USE_MTU
) {
645 xge_os_pio_mem_write64(hldev
->pdev
, hldev
->regh0
,
646 XGE_HAL_MAC_RTS_FRM_LEN_SET(
647 hldev
->config
.ring
.queue
[i
].max_frm_len
),
648 &bar0
->rts_frm_len_n
[i
]);
650 xge_os_pio_mem_write64(hldev
->pdev
, hldev
->regh0
,
651 XGE_HAL_MAC_RTS_FRM_LEN_SET(new_frmlen
),
652 &bar0
->rts_frm_len_n
[i
]);
655 xge_os_pio_mem_write64(hldev
->pdev
, hldev
->regh0
,
656 XGE_HAL_RMAC_MAX_PYLD_LEN(new_frmlen
),
657 &bar0
->rmac_max_pyld_len
);