BPicture: Fix archive constructor.
[haiku.git] / src / add-ons / kernel / drivers / network / bcm440x / b44um.c
blob411f5addbc0d74ab4be7238bef65a6fe29dc9ef3
1 /*
2 * Copyright 2006-2007, Nathan Whitehorn.
3 * Distributed under the terms of the GPL License.
4 */
7 #include "b44mm.h"
8 #include "b44lm.h"
9 #include "mempool.h"
11 #include <ether_driver.h>
12 #ifdef HAIKU_TARGET_PLATFORM_HAIKU
13 # include <net/if_media.h>
14 #endif
16 #include <PCI.h>
17 #include <Drivers.h>
18 #include <KernelExport.h>
20 #include <malloc.h>
21 #include <string.h>
24 struct pci_module_info *pci = NULL;
26 #define MAX_CARDS 4
28 static char *sDeviceNames[MAX_CARDS + 1];
29 static int sCardsFound = 0;
30 struct be_b44_dev be_b44_dev_cards[MAX_CARDS];
32 int b44_Packet_Desc_Size = sizeof(struct B_UM_PACKET);
34 #define ROUND_UP_TO_PAGE(size) ((size % 4096 != 0) ? 4096 - (size % 4096) + size : size)
36 /* -------- BeOS Driver Hooks ------------ */
38 status_t b44_open(const char *name, uint32 flags, void **cookie);
39 status_t b44_close(void *cookie);
40 status_t b44_free(void *cookie);
41 status_t b44_ioctl(void *cookie,uint32 op,void *data,size_t len);
42 status_t b44_read(void *cookie,off_t pos,void *data,size_t *numBytes);
43 status_t b44_write(void *cookie,off_t pos,const void *data,size_t *numBytes);
44 int32 b44_interrupt(void *cookie);
45 int32 tx_cleanup_thread(void *us);
48 int32 api_version = B_CUR_DRIVER_API_VERSION;
51 status_t
52 init_hardware(void)
54 return B_OK;
58 const char **
59 publish_devices()
61 return (const char **)sDeviceNames;
65 device_hooks *
66 find_device(const char *name)
68 static device_hooks b44_hooks = {
69 b44_open,
70 b44_close,
71 b44_free,
72 b44_ioctl,
73 b44_read,
74 b44_write,
75 NULL,
76 NULL,
77 NULL,
78 NULL
80 return &b44_hooks;
84 status_t
85 init_driver(void)
87 int i = 0;
88 pci_info dev_info;
90 if (get_module(B_PCI_MODULE_NAME,(module_info **)&pci) < B_OK)
91 return ENOSYS;
93 while (pci->get_nth_pci_info(i++, &dev_info) == 0) {
94 if (dev_info.class_base != PCI_network
95 || dev_info.class_sub != PCI_ethernet
96 || dev_info.vendor_id != 0x14e4
97 || (dev_info.device_id != 0x4401
98 && dev_info.device_id != 0x4402
99 && dev_info.device_id != 0x170c))
100 continue;
102 if (sCardsFound >= MAX_CARDS)
103 break;
105 sDeviceNames[sCardsFound] = (char *)malloc(16 /* net/bcm440x/xx */);
106 sprintf(sDeviceNames[sCardsFound], "net/bcm440x/%d", sCardsFound);
107 be_b44_dev_cards[sCardsFound].pci_data = dev_info;
108 be_b44_dev_cards[sCardsFound].packet_release_sem = create_sem(0,
109 sDeviceNames[sCardsFound]);
110 be_b44_dev_cards[sCardsFound].mem_list_num = 0;
111 be_b44_dev_cards[sCardsFound].lockmem_list_num = 0;
112 be_b44_dev_cards[sCardsFound].opened = 0;
113 be_b44_dev_cards[sCardsFound].block = 1;
114 be_b44_dev_cards[sCardsFound].lock = 0;
115 #ifdef HAIKU_TARGET_PLATFORM_HAIKU
116 be_b44_dev_cards[sCardsFound].linkChangeSem = -1;
117 #endif
119 if (b44_LM_GetAdapterInfo(&be_b44_dev_cards[sCardsFound].lm_dev) != LM_STATUS_SUCCESS) {
120 for (i = 0; i < sCardsFound; i++) {
121 free((void *)sDeviceNames[i]);
122 delete_sem(be_b44_dev_cards[i].packet_release_sem);
124 put_module(B_PCI_MODULE_NAME);
125 return ENODEV;
128 QQ_InitQueue(&be_b44_dev_cards[sCardsFound].RxPacketReadQ.Container,
129 MAX_RX_PACKET_DESC_COUNT);
131 sCardsFound++;
134 mempool_init((MAX_RX_PACKET_DESC_COUNT+10) * sCardsFound);
136 sDeviceNames[sCardsFound] = NULL;
137 return B_OK;
141 void
142 uninit_driver(void)
144 struct be_b44_dev *pUmDevice;
145 int i, j;
147 for (j = 0; j < sCardsFound; j++) {
148 pUmDevice = &be_b44_dev_cards[j];
149 for (i = 0; i < pUmDevice->mem_list_num; i++)
150 free(pUmDevice->mem_list[i]);
151 for (i = 0; i < pUmDevice->lockmem_list_num; i++)
152 delete_area(pUmDevice->lockmem_list[i]);
154 delete_area(pUmDevice->mem_base);
156 delete_sem(be_b44_dev_cards[j].packet_release_sem);
157 free((void *)sDeviceNames[j]);
160 mempool_exit();
164 status_t
165 b44_open(const char *name, uint32 flags, void **cookie)
167 struct be_b44_dev *pDevice = NULL;
168 int i;
170 *cookie = NULL;
171 for (i = 0; i < sCardsFound; i++) {
172 if (strcmp(sDeviceNames[i], name) == 0) {
173 *cookie = pDevice = &be_b44_dev_cards[i];
174 break;
178 if (*cookie == NULL)
179 return B_FILE_NOT_FOUND;
181 if (atomic_or(&pDevice->opened,1)) {
182 *cookie = pDevice = NULL;
183 return B_BUSY;
186 install_io_interrupt_handler(pDevice->pci_data.u.h0.interrupt_line,
187 b44_interrupt, *cookie, 0);
188 if (b44_LM_InitializeAdapter(&pDevice->lm_dev) != LM_STATUS_SUCCESS) {
189 atomic_and(&pDevice->opened, 0);
190 remove_io_interrupt_handler(pDevice->pci_data.u.h0.interrupt_line,
191 b44_interrupt, *cookie);
192 *cookie = NULL;
193 return B_ERROR;
196 /*QQ_InitQueue(&pDevice->rx_out_of_buf_q.Container,
197 MAX_RX_PACKET_DESC_COUNT);*/
199 b44_LM_EnableInterrupt(&pDevice->lm_dev);
200 return B_OK;
204 status_t
205 b44_close(void *cookie)
207 struct be_b44_dev *pUmDevice = (struct be_b44_dev *)cookie;
208 if (cookie == NULL)
209 return B_OK;
211 atomic_and(&pUmDevice->opened,0);
212 b44_LM_Halt(&pUmDevice->lm_dev);
214 return B_OK;
218 status_t
219 b44_free(void *cookie)
221 struct be_b44_dev *pUmDevice = (struct be_b44_dev *)cookie;
222 if (cookie == NULL)
223 return B_OK;
225 remove_io_interrupt_handler(pUmDevice->pci_data.u.h0.interrupt_line,
226 b44_interrupt, cookie);
227 return B_OK;
231 status_t
232 b44_ioctl(void *cookie,uint32 op, void *data, size_t len)
234 struct be_b44_dev *pUmDevice = (struct be_b44_dev *)cookie;
236 switch (op) {
237 case ETHER_INIT:
238 return B_OK;
239 case ETHER_GETADDR:
240 if (data == NULL)
241 return B_ERROR;
243 memcpy(data, pUmDevice->lm_dev.NodeAddress, 6);
244 return B_OK;
245 case ETHER_NONBLOCK:
246 pUmDevice->block = !*((uint8 *)data);
247 return B_OK;
248 case ETHER_ADDMULTI:
249 return (b44_LM_MulticastAdd(&pUmDevice->lm_dev,(PLM_UINT8)(data)) == LM_STATUS_SUCCESS) ? B_OK : B_ERROR;
250 case ETHER_REMMULTI:
251 return (b44_LM_MulticastDel(&pUmDevice->lm_dev,(PLM_UINT8)(data)) == LM_STATUS_SUCCESS) ? B_OK : B_ERROR;
252 case ETHER_SETPROMISC:
253 if (*((uint8 *)(data))) {
254 b44_LM_SetReceiveMask(&pUmDevice->lm_dev,
255 pUmDevice->lm_dev.ReceiveMask | LM_PROMISCUOUS_MODE);
256 } else {
257 b44_LM_SetReceiveMask(&pUmDevice->lm_dev,
258 pUmDevice->lm_dev.ReceiveMask & ~LM_PROMISCUOUS_MODE);
260 return B_OK;
261 #ifndef HAIKU_TARGET_PLATFORM_HAIKU
262 case ETHER_GETLINKSTATE:
264 ether_link_state_t *state_buffer = (ether_link_state_t *)(data);
265 state_buffer->link_speed = (pUmDevice->lm_dev.LineSpeed == LM_LINE_SPEED_10MBPS) ? 10 : 100;
266 state_buffer->link_quality = (pUmDevice->lm_dev.LinkStatus == LM_STATUS_LINK_DOWN) ? 0.0 : 1.0;
267 state_buffer->duplex_mode = (pUmDevice->lm_dev.DuplexMode == LM_DUPLEX_MODE_FULL);
268 return B_OK;
270 #else
271 case ETHER_GET_LINK_STATE:
273 ether_link_state_t state;
275 if (pUmDevice->lm_dev.corerev < 7) {
276 b44_LM_PollLink(&pUmDevice->lm_dev);
278 state.media = (pUmDevice->lm_dev.LinkStatus
279 == LM_STATUS_LINK_DOWN ? 0 : IFM_ACTIVE) | IFM_ETHER;
280 switch (pUmDevice->lm_dev.LineSpeed) {
281 case LM_LINE_SPEED_10MBPS:
282 state.media |= IFM_10_T;
283 state.speed = 10000000;
284 break;
285 case LM_LINE_SPEED_100MBPS:
286 state.media |= IFM_100_TX;
287 state.speed = 100000000;
288 break;
289 default:
290 state.speed = 0;
292 state.media |= (pUmDevice->lm_dev.DuplexMode
293 == LM_DUPLEX_MODE_FULL ? IFM_FULL_DUPLEX : IFM_HALF_DUPLEX);
294 state.quality = 1000;
296 return user_memcpy(data, &state, sizeof(ether_link_state_t));
299 case ETHER_SET_LINK_STATE_SEM:
301 if (user_memcpy(&pUmDevice->linkChangeSem, data, sizeof(sem_id)) < B_OK) {
302 pUmDevice->linkChangeSem = -1;
303 return B_BAD_ADDRESS;
305 return B_OK;
308 #endif
310 return B_ERROR;
314 int32
315 b44_interrupt(void *cookie)
317 struct be_b44_dev *pUmDevice = (struct be_b44_dev *)cookie;
318 PLM_DEVICE_BLOCK pDevice = (PLM_DEVICE_BLOCK) pUmDevice;
320 if (!pDevice->InitDone)
321 return B_UNHANDLED_INTERRUPT;
323 if (b44_LM_ServiceInterrupts(pDevice) == 12)
324 return B_UNHANDLED_INTERRUPT;
326 if (QQ_GetEntryCnt(&pDevice->RxPacketFreeQ.Container)) {
327 b44_LM_QueueRxPackets(pDevice);
328 return B_INVOKE_SCHEDULER;
331 return B_HANDLED_INTERRUPT;
335 status_t
336 b44_read(void *cookie, off_t pos, void *data, size_t *numBytes)
338 struct be_b44_dev *pUmDevice = (struct be_b44_dev *)cookie;
339 PLM_DEVICE_BLOCK pDevice = (PLM_DEVICE_BLOCK) pUmDevice;
340 PLM_PACKET pPacket;
341 struct B_UM_PACKET *pUmPacket;
342 cpu_status cpu;
344 if (pUmDevice->block)
345 acquire_sem(pUmDevice->packet_release_sem);
346 else
347 acquire_sem_etc(pUmDevice->packet_release_sem,1,B_RELATIVE_TIMEOUT,0); // Decrement the receive sem anyway, but don't block
349 cpu = disable_interrupts();
350 acquire_spinlock(&pUmDevice->lock);
352 pPacket = (PLM_PACKET)
353 QQ_PopHead(&pUmDevice->RxPacketReadQ.Container);
355 release_spinlock(&pUmDevice->lock);
356 restore_interrupts(cpu);
358 if (pPacket == 0) {
359 *numBytes = -1;
360 return B_ERROR;
363 pUmPacket = (struct B_UM_PACKET *)pPacket;
364 if (pPacket->PacketStatus != LM_STATUS_SUCCESS
365 || pPacket->PacketSize > 1518) {
366 cpu = disable_interrupts();
367 acquire_spinlock(&pUmDevice->lock);
369 QQ_PushTail(&pDevice->RxPacketFreeQ.Container, pPacket);
371 release_spinlock(&pUmDevice->lock);
372 restore_interrupts(cpu);
373 *numBytes = -1;
374 return B_ERROR;
377 if (pPacket->PacketSize/*-pDevice->rxoffset*/ < *numBytes)
378 *numBytes = pPacket->PacketSize/*-pDevice->rxoffset*/;
380 memcpy(data, pUmPacket->data + pDevice->rxoffset, *numBytes);
381 cpu = disable_interrupts();
382 acquire_spinlock(&pUmDevice->lock);
384 QQ_PushTail(&pDevice->RxPacketFreeQ.Container, pPacket);
386 release_spinlock(&pUmDevice->lock);
387 restore_interrupts(cpu);
389 return B_OK;
393 status_t
394 b44_write(void *cookie, off_t pos, const void *data, size_t *numBytes)
396 struct be_b44_dev *pUmDevice = (struct be_b44_dev *)cookie;
397 PLM_DEVICE_BLOCK pDevice = (PLM_DEVICE_BLOCK) pUmDevice;
398 PLM_PACKET pPacket;
399 struct B_UM_PACKET *pUmPacket;
401 /*if ((pDevice->LinkStatus == LM_STATUS_LINK_DOWN) || !pDevice->InitDone)
403 return ENETDOWN;
406 pPacket = (PLM_PACKET)
407 QQ_PopHead(&pDevice->TxPacketFreeQ.Container);
408 if (pPacket == 0)
409 return B_ERROR;
411 pUmPacket = (struct B_UM_PACKET *)pPacket;
412 pUmPacket->data = chunk_pool_get();
414 memcpy(pUmPacket->data/*+pDevice->dataoffset*/,data,*numBytes); /* no guarantee data is contiguous, so we have to copy */
415 pPacket->PacketSize = pUmPacket->size = *numBytes/*+pDevice->rxoffset*/;
417 pPacket->u.Tx.FragCount = 1;
419 tx_cleanup_thread(pUmDevice);
421 b44_LM_SendPacket(pDevice, pPacket);
422 return B_OK;
426 // #pragma mark - Broadcom MM hooks
429 LM_STATUS
430 b44_MM_ReadConfig16(PLM_DEVICE_BLOCK pDevice, LM_UINT32 Offset,
431 LM_UINT16 *pValue16)
433 if (pci == NULL)
434 get_module(B_PCI_MODULE_NAME,(module_info **)&pci);
436 *pValue16 = (LM_UINT16)pci->read_pci_config(((struct be_b44_dev *)(pDevice))->pci_data.bus,((struct be_b44_dev *)(pDevice))->pci_data.device,((struct be_b44_dev *)(pDevice))->pci_data.function,(uchar)Offset,sizeof(LM_UINT16));
437 return LM_STATUS_SUCCESS;
441 LM_STATUS
442 b44_MM_WriteConfig16(PLM_DEVICE_BLOCK pDevice, LM_UINT32 Offset,
443 LM_UINT16 Value16)
445 if (pci == NULL)
446 get_module(B_PCI_MODULE_NAME,(module_info **)&pci);
448 pci->write_pci_config(((struct be_b44_dev *)(pDevice))->pci_data.bus,((struct be_b44_dev *)(pDevice))->pci_data.device,((struct be_b44_dev *)(pDevice))->pci_data.function,(uchar)Offset,sizeof(LM_UINT16),(uint32)Value16);
449 return LM_STATUS_SUCCESS;
453 LM_STATUS
454 b44_MM_ReadConfig32(PLM_DEVICE_BLOCK pDevice, LM_UINT32 Offset,
455 LM_UINT32 *pValue32)
457 if (pci == NULL)
458 get_module(B_PCI_MODULE_NAME,(module_info **)&pci);
460 *pValue32 = (LM_UINT32)pci->read_pci_config(((struct be_b44_dev *)(pDevice))->pci_data.bus,((struct be_b44_dev *)(pDevice))->pci_data.device,((struct be_b44_dev *)(pDevice))->pci_data.function,(uchar)Offset,sizeof(LM_UINT32));
461 return LM_STATUS_SUCCESS;
465 LM_STATUS
466 b44_MM_WriteConfig32(PLM_DEVICE_BLOCK pDevice, LM_UINT32 Offset,
467 LM_UINT32 Value32)
469 if (pci == NULL)
470 get_module(B_PCI_MODULE_NAME,(module_info **)&pci);
472 pci->write_pci_config(((struct be_b44_dev *)(pDevice))->pci_data.bus,((struct be_b44_dev *)(pDevice))->pci_data.device,((struct be_b44_dev *)(pDevice))->pci_data.function,(uchar)Offset,sizeof(LM_UINT32),(uint32)Value32);
473 return LM_STATUS_SUCCESS;
477 LM_STATUS
478 b44_MM_MapMemBase(PLM_DEVICE_BLOCK pDevice)
480 struct be_b44_dev *pUmDevice = (struct be_b44_dev *)(pDevice);
481 size_t size = pUmDevice->pci_data.u.h0.base_register_sizes[0];
483 if (pci == NULL)
484 get_module(B_PCI_MODULE_NAME,(module_info **)&pci);
486 size = ROUNDUP(size,B_PAGE_SIZE);
487 pUmDevice->mem_base = map_physical_memory("bcm440x_regs",
488 pUmDevice->pci_data.u.h0.base_registers[0], size,
489 B_ANY_KERNEL_BLOCK_ADDRESS, B_READ_AREA | B_WRITE_AREA,
490 (void **)(&pDevice->pMappedMemBase));
492 return LM_STATUS_SUCCESS;
497 LM_STATUS
498 b44_MM_MapIoBase(PLM_DEVICE_BLOCK pDevice)
500 if (pci == NULL)
501 get_module(B_PCI_MODULE_NAME,(module_info **)&pci);
503 pDevice->pMappedMemBase = pci->ram_address(((struct be_b44_dev *)(pDevice))->pci_data.memory_base);
504 return LM_STATUS_SUCCESS;
509 LM_STATUS
510 b44_MM_IndicateRxPackets(PLM_DEVICE_BLOCK pDevice)
512 struct be_b44_dev *dev = (struct be_b44_dev *)pDevice;
513 PLM_PACKET pPacket;
515 while (1) {
516 pPacket = (PLM_PACKET)
517 QQ_PopHead(&pDevice->RxPacketReceivedQ.Container);
518 if (pPacket == 0)
519 break;
521 acquire_spinlock(&dev->lock);
522 release_sem_etc(dev->packet_release_sem, 1, B_DO_NOT_RESCHEDULE);
523 release_spinlock(&dev->lock);
524 QQ_PushTail(&dev->RxPacketReadQ.Container, pPacket);
527 return LM_STATUS_SUCCESS;
531 LM_STATUS
532 b44_MM_IndicateTxPackets(PLM_DEVICE_BLOCK pDevice)
534 return LM_STATUS_SUCCESS;
538 int32
539 tx_cleanup_thread(void *us)
541 PLM_PACKET pPacket;
542 PLM_DEVICE_BLOCK pDevice = (PLM_DEVICE_BLOCK)(us);
543 struct be_b44_dev *pUmDevice = (struct be_b44_dev *)(us);
544 struct B_UM_PACKET *pUmPacket;
545 cpu_status cpu;
547 while (1) {
548 cpu = disable_interrupts();
549 acquire_spinlock(&pUmDevice->lock);
551 pPacket = (PLM_PACKET)
552 QQ_PopHead(&pDevice->TxPacketXmittedQ.Container);
554 release_spinlock(&pUmDevice->lock);
555 restore_interrupts(cpu);
556 if (pPacket == 0)
557 break;
559 pUmPacket = (struct B_UM_PACKET *)(pPacket);
560 chunk_pool_put(pUmPacket->data);
561 pUmPacket->data = NULL;
563 cpu = disable_interrupts();
564 acquire_spinlock(&pUmDevice->lock);
565 QQ_PushTail(&pDevice->TxPacketFreeQ.Container, pPacket);
566 release_spinlock(&pUmDevice->lock);
567 restore_interrupts(cpu);
569 return LM_STATUS_SUCCESS;
572 /*LM_STATUS b44_MM_StartTxDma(PLM_DEVICE_BLOCK pDevice, PLM_PACKET pPacket);
573 LM_STATUS b44_MM_CompleteTxDma(PLM_DEVICE_BLOCK pDevice, PLM_PACKET pPacket);*/
576 LM_STATUS
577 b44_MM_AllocateMemory(PLM_DEVICE_BLOCK pDevice, LM_UINT32 BlockSize,
578 PLM_VOID *pMemoryBlockVirt)
580 struct be_b44_dev *dev = (struct be_b44_dev *)(pDevice);
582 if (dev->mem_list_num == 16)
583 return LM_STATUS_FAILURE;
585 *pMemoryBlockVirt = dev->mem_list[(dev->mem_list_num)++] = (void *)malloc(BlockSize);
586 return LM_STATUS_SUCCESS;
590 LM_STATUS
591 b44_MM_AllocateSharedMemory(PLM_DEVICE_BLOCK pDevice, LM_UINT32 BlockSize,
592 PLM_VOID *pMemoryBlockVirt, PLM_PHYSICAL_ADDRESS pMemoryBlockPhy)
594 struct be_b44_dev *dev;
595 void *pvirt = NULL;
596 area_id area_desc;
597 physical_entry entry;
599 dev = (struct be_b44_dev *)(pDevice);
600 area_desc = dev->lockmem_list[dev->lockmem_list_num++] = create_area("broadcom_shared_mem",
601 &pvirt, B_ANY_KERNEL_ADDRESS, ROUND_UP_TO_PAGE(BlockSize),
602 B_32_BIT_FULL_LOCK, B_READ_AREA | B_WRITE_AREA);
603 if (area_desc < B_OK)
604 return LM_STATUS_FAILURE;
606 memset(pvirt, 0, BlockSize);
607 *pMemoryBlockVirt = (PLM_VOID) pvirt;
609 get_memory_map(pvirt,BlockSize,&entry,1);
610 *pMemoryBlockPhy = entry.address;
612 return LM_STATUS_SUCCESS;
616 LM_STATUS
617 b44_MM_GetConfig(PLM_DEVICE_BLOCK pDevice)
619 pDevice->DisableAutoNeg = FALSE;
620 pDevice->RequestedLineSpeed = LM_LINE_SPEED_AUTO;
621 pDevice->RequestedDuplexMode = LM_DUPLEX_MODE_FULL;
622 pDevice->FlowControlCap |= LM_FLOW_CONTROL_AUTO_PAUSE;
623 //pDevice->TxPacketDescCnt = tx_pkt_desc_cnt[DEFAULT_TX_PACKET_DESC_COUNT];
624 pDevice->RxPacketDescCnt = DEFAULT_RX_PACKET_DESC_COUNT;
626 return LM_STATUS_SUCCESS;
630 LM_STATUS
631 b44_MM_IndicateStatus(PLM_DEVICE_BLOCK pDevice, LM_STATUS Status)
633 #ifdef HAIKU_TARGET_PLATFORM_HAIKU
634 struct be_b44_dev *pUmDevice = (struct be_b44_dev *)pDevice;
636 if (pUmDevice->linkChangeSem != -1)
637 release_sem_etc(pUmDevice->linkChangeSem, 1,
638 B_DO_NOT_RESCHEDULE);
639 #endif
641 return LM_STATUS_SUCCESS;
645 LM_STATUS
646 b44_MM_InitializeUmPackets(PLM_DEVICE_BLOCK pDevice)
648 int i;
649 struct B_UM_PACKET *pUmPacket;
650 PLM_PACKET pPacket;
652 for (i = 0; i < pDevice->RxPacketDescCnt; i++) {
653 pPacket = QQ_PopHead(&pDevice->RxPacketFreeQ.Container);
654 pUmPacket = (struct B_UM_PACKET *) pPacket;
655 pUmPacket->data = chunk_pool_get();
656 /*if (pUmPacket->data == 0) {
657 QQ_PushTail(&pUmDevice->rx_out_of_buf_q.Container, pPacket);
658 continue;
660 pPacket->u.Rx.pRxBufferVirt = pUmPacket->data;
661 QQ_PushTail(&pDevice->RxPacketFreeQ.Container, pPacket);
664 return LM_STATUS_SUCCESS;
668 LM_STATUS
669 b44_MM_FreeRxBuffer(PLM_DEVICE_BLOCK pDevice, PLM_PACKET pPacket)
671 struct B_UM_PACKET *pUmPacket;
672 pUmPacket = (struct B_UM_PACKET *)pPacket;
673 chunk_pool_put(pUmPacket->data);
674 pUmPacket->data = NULL;
675 return LM_STATUS_SUCCESS;