2 * Copyright 2006-2007, Nathan Whitehorn.
3 * Distributed under the terms of the GPL License.
11 #include <ether_driver.h>
12 #ifdef HAIKU_TARGET_PLATFORM_HAIKU
13 # include <net/if_media.h>
18 #include <KernelExport.h>
24 struct pci_module_info
*pci
= NULL
;
28 static char *sDeviceNames
[MAX_CARDS
+ 1];
29 static int sCardsFound
= 0;
30 struct be_b44_dev be_b44_dev_cards
[MAX_CARDS
];
32 int b44_Packet_Desc_Size
= sizeof(struct B_UM_PACKET
);
34 #define ROUND_UP_TO_PAGE(size) ((size % 4096 != 0) ? 4096 - (size % 4096) + size : size)
36 /* -------- BeOS Driver Hooks ------------ */
38 status_t
b44_open(const char *name
, uint32 flags
, void **cookie
);
39 status_t
b44_close(void *cookie
);
40 status_t
b44_free(void *cookie
);
41 status_t
b44_ioctl(void *cookie
,uint32 op
,void *data
,size_t len
);
42 status_t
b44_read(void *cookie
,off_t pos
,void *data
,size_t *numBytes
);
43 status_t
b44_write(void *cookie
,off_t pos
,const void *data
,size_t *numBytes
);
44 int32
b44_interrupt(void *cookie
);
45 int32
tx_cleanup_thread(void *us
);
48 int32 api_version
= B_CUR_DRIVER_API_VERSION
;
61 return (const char **)sDeviceNames
;
66 find_device(const char *name
)
68 static device_hooks b44_hooks
= {
90 if (get_module(B_PCI_MODULE_NAME
,(module_info
**)&pci
) < B_OK
)
93 while (pci
->get_nth_pci_info(i
++, &dev_info
) == 0) {
94 if (dev_info
.class_base
!= PCI_network
95 || dev_info
.class_sub
!= PCI_ethernet
96 || dev_info
.vendor_id
!= 0x14e4
97 || (dev_info
.device_id
!= 0x4401
98 && dev_info
.device_id
!= 0x4402
99 && dev_info
.device_id
!= 0x170c))
102 if (sCardsFound
>= MAX_CARDS
)
105 sDeviceNames
[sCardsFound
] = (char *)malloc(16 /* net/bcm440x/xx */);
106 sprintf(sDeviceNames
[sCardsFound
], "net/bcm440x/%d", sCardsFound
);
107 be_b44_dev_cards
[sCardsFound
].pci_data
= dev_info
;
108 be_b44_dev_cards
[sCardsFound
].packet_release_sem
= create_sem(0,
109 sDeviceNames
[sCardsFound
]);
110 be_b44_dev_cards
[sCardsFound
].mem_list_num
= 0;
111 be_b44_dev_cards
[sCardsFound
].lockmem_list_num
= 0;
112 be_b44_dev_cards
[sCardsFound
].opened
= 0;
113 be_b44_dev_cards
[sCardsFound
].block
= 1;
114 be_b44_dev_cards
[sCardsFound
].lock
= 0;
115 #ifdef HAIKU_TARGET_PLATFORM_HAIKU
116 be_b44_dev_cards
[sCardsFound
].linkChangeSem
= -1;
119 if (b44_LM_GetAdapterInfo(&be_b44_dev_cards
[sCardsFound
].lm_dev
) != LM_STATUS_SUCCESS
) {
120 for (i
= 0; i
< sCardsFound
; i
++) {
121 free((void *)sDeviceNames
[i
]);
122 delete_sem(be_b44_dev_cards
[i
].packet_release_sem
);
124 put_module(B_PCI_MODULE_NAME
);
128 QQ_InitQueue(&be_b44_dev_cards
[sCardsFound
].RxPacketReadQ
.Container
,
129 MAX_RX_PACKET_DESC_COUNT
);
134 mempool_init((MAX_RX_PACKET_DESC_COUNT
+10) * sCardsFound
);
136 sDeviceNames
[sCardsFound
] = NULL
;
144 struct be_b44_dev
*pUmDevice
;
147 for (j
= 0; j
< sCardsFound
; j
++) {
148 pUmDevice
= &be_b44_dev_cards
[j
];
149 for (i
= 0; i
< pUmDevice
->mem_list_num
; i
++)
150 free(pUmDevice
->mem_list
[i
]);
151 for (i
= 0; i
< pUmDevice
->lockmem_list_num
; i
++)
152 delete_area(pUmDevice
->lockmem_list
[i
]);
154 delete_area(pUmDevice
->mem_base
);
156 delete_sem(be_b44_dev_cards
[j
].packet_release_sem
);
157 free((void *)sDeviceNames
[j
]);
165 b44_open(const char *name
, uint32 flags
, void **cookie
)
167 struct be_b44_dev
*pDevice
= NULL
;
171 for (i
= 0; i
< sCardsFound
; i
++) {
172 if (strcmp(sDeviceNames
[i
], name
) == 0) {
173 *cookie
= pDevice
= &be_b44_dev_cards
[i
];
179 return B_FILE_NOT_FOUND
;
181 if (atomic_or(&pDevice
->opened
,1)) {
182 *cookie
= pDevice
= NULL
;
186 install_io_interrupt_handler(pDevice
->pci_data
.u
.h0
.interrupt_line
,
187 b44_interrupt
, *cookie
, 0);
188 if (b44_LM_InitializeAdapter(&pDevice
->lm_dev
) != LM_STATUS_SUCCESS
) {
189 atomic_and(&pDevice
->opened
, 0);
190 remove_io_interrupt_handler(pDevice
->pci_data
.u
.h0
.interrupt_line
,
191 b44_interrupt
, *cookie
);
196 /*QQ_InitQueue(&pDevice->rx_out_of_buf_q.Container,
197 MAX_RX_PACKET_DESC_COUNT);*/
199 b44_LM_EnableInterrupt(&pDevice
->lm_dev
);
205 b44_close(void *cookie
)
207 struct be_b44_dev
*pUmDevice
= (struct be_b44_dev
*)cookie
;
211 atomic_and(&pUmDevice
->opened
,0);
212 b44_LM_Halt(&pUmDevice
->lm_dev
);
219 b44_free(void *cookie
)
221 struct be_b44_dev
*pUmDevice
= (struct be_b44_dev
*)cookie
;
225 remove_io_interrupt_handler(pUmDevice
->pci_data
.u
.h0
.interrupt_line
,
226 b44_interrupt
, cookie
);
232 b44_ioctl(void *cookie
,uint32 op
, void *data
, size_t len
)
234 struct be_b44_dev
*pUmDevice
= (struct be_b44_dev
*)cookie
;
243 memcpy(data
, pUmDevice
->lm_dev
.NodeAddress
, 6);
246 pUmDevice
->block
= !*((uint8
*)data
);
249 return (b44_LM_MulticastAdd(&pUmDevice
->lm_dev
,(PLM_UINT8
)(data
)) == LM_STATUS_SUCCESS
) ? B_OK
: B_ERROR
;
251 return (b44_LM_MulticastDel(&pUmDevice
->lm_dev
,(PLM_UINT8
)(data
)) == LM_STATUS_SUCCESS
) ? B_OK
: B_ERROR
;
252 case ETHER_SETPROMISC
:
253 if (*((uint8
*)(data
))) {
254 b44_LM_SetReceiveMask(&pUmDevice
->lm_dev
,
255 pUmDevice
->lm_dev
.ReceiveMask
| LM_PROMISCUOUS_MODE
);
257 b44_LM_SetReceiveMask(&pUmDevice
->lm_dev
,
258 pUmDevice
->lm_dev
.ReceiveMask
& ~LM_PROMISCUOUS_MODE
);
261 #ifndef HAIKU_TARGET_PLATFORM_HAIKU
262 case ETHER_GETLINKSTATE
:
264 ether_link_state_t
*state_buffer
= (ether_link_state_t
*)(data
);
265 state_buffer
->link_speed
= (pUmDevice
->lm_dev
.LineSpeed
== LM_LINE_SPEED_10MBPS
) ? 10 : 100;
266 state_buffer
->link_quality
= (pUmDevice
->lm_dev
.LinkStatus
== LM_STATUS_LINK_DOWN
) ? 0.0 : 1.0;
267 state_buffer
->duplex_mode
= (pUmDevice
->lm_dev
.DuplexMode
== LM_DUPLEX_MODE_FULL
);
271 case ETHER_GET_LINK_STATE
:
273 ether_link_state_t state
;
275 if (pUmDevice
->lm_dev
.corerev
< 7) {
276 b44_LM_PollLink(&pUmDevice
->lm_dev
);
278 state
.media
= (pUmDevice
->lm_dev
.LinkStatus
279 == LM_STATUS_LINK_DOWN
? 0 : IFM_ACTIVE
) | IFM_ETHER
;
280 switch (pUmDevice
->lm_dev
.LineSpeed
) {
281 case LM_LINE_SPEED_10MBPS
:
282 state
.media
|= IFM_10_T
;
283 state
.speed
= 10000000;
285 case LM_LINE_SPEED_100MBPS
:
286 state
.media
|= IFM_100_TX
;
287 state
.speed
= 100000000;
292 state
.media
|= (pUmDevice
->lm_dev
.DuplexMode
293 == LM_DUPLEX_MODE_FULL
? IFM_FULL_DUPLEX
: IFM_HALF_DUPLEX
);
294 state
.quality
= 1000;
296 return user_memcpy(data
, &state
, sizeof(ether_link_state_t
));
299 case ETHER_SET_LINK_STATE_SEM
:
301 if (user_memcpy(&pUmDevice
->linkChangeSem
, data
, sizeof(sem_id
)) < B_OK
) {
302 pUmDevice
->linkChangeSem
= -1;
303 return B_BAD_ADDRESS
;
315 b44_interrupt(void *cookie
)
317 struct be_b44_dev
*pUmDevice
= (struct be_b44_dev
*)cookie
;
318 PLM_DEVICE_BLOCK pDevice
= (PLM_DEVICE_BLOCK
) pUmDevice
;
320 if (!pDevice
->InitDone
)
321 return B_UNHANDLED_INTERRUPT
;
323 if (b44_LM_ServiceInterrupts(pDevice
) == 12)
324 return B_UNHANDLED_INTERRUPT
;
326 if (QQ_GetEntryCnt(&pDevice
->RxPacketFreeQ
.Container
)) {
327 b44_LM_QueueRxPackets(pDevice
);
328 return B_INVOKE_SCHEDULER
;
331 return B_HANDLED_INTERRUPT
;
336 b44_read(void *cookie
, off_t pos
, void *data
, size_t *numBytes
)
338 struct be_b44_dev
*pUmDevice
= (struct be_b44_dev
*)cookie
;
339 PLM_DEVICE_BLOCK pDevice
= (PLM_DEVICE_BLOCK
) pUmDevice
;
341 struct B_UM_PACKET
*pUmPacket
;
344 if (pUmDevice
->block
)
345 acquire_sem(pUmDevice
->packet_release_sem
);
347 acquire_sem_etc(pUmDevice
->packet_release_sem
,1,B_RELATIVE_TIMEOUT
,0); // Decrement the receive sem anyway, but don't block
349 cpu
= disable_interrupts();
350 acquire_spinlock(&pUmDevice
->lock
);
352 pPacket
= (PLM_PACKET
)
353 QQ_PopHead(&pUmDevice
->RxPacketReadQ
.Container
);
355 release_spinlock(&pUmDevice
->lock
);
356 restore_interrupts(cpu
);
363 pUmPacket
= (struct B_UM_PACKET
*)pPacket
;
364 if (pPacket
->PacketStatus
!= LM_STATUS_SUCCESS
365 || pPacket
->PacketSize
> 1518) {
366 cpu
= disable_interrupts();
367 acquire_spinlock(&pUmDevice
->lock
);
369 QQ_PushTail(&pDevice
->RxPacketFreeQ
.Container
, pPacket
);
371 release_spinlock(&pUmDevice
->lock
);
372 restore_interrupts(cpu
);
377 if (pPacket
->PacketSize
/*-pDevice->rxoffset*/ < *numBytes
)
378 *numBytes
= pPacket
->PacketSize
/*-pDevice->rxoffset*/;
380 memcpy(data
, pUmPacket
->data
+ pDevice
->rxoffset
, *numBytes
);
381 cpu
= disable_interrupts();
382 acquire_spinlock(&pUmDevice
->lock
);
384 QQ_PushTail(&pDevice
->RxPacketFreeQ
.Container
, pPacket
);
386 release_spinlock(&pUmDevice
->lock
);
387 restore_interrupts(cpu
);
394 b44_write(void *cookie
, off_t pos
, const void *data
, size_t *numBytes
)
396 struct be_b44_dev
*pUmDevice
= (struct be_b44_dev
*)cookie
;
397 PLM_DEVICE_BLOCK pDevice
= (PLM_DEVICE_BLOCK
) pUmDevice
;
399 struct B_UM_PACKET
*pUmPacket
;
401 /*if ((pDevice->LinkStatus == LM_STATUS_LINK_DOWN) || !pDevice->InitDone)
406 pPacket
= (PLM_PACKET
)
407 QQ_PopHead(&pDevice
->TxPacketFreeQ
.Container
);
411 pUmPacket
= (struct B_UM_PACKET
*)pPacket
;
412 pUmPacket
->data
= chunk_pool_get();
414 memcpy(pUmPacket
->data
/*+pDevice->dataoffset*/,data
,*numBytes
); /* no guarantee data is contiguous, so we have to copy */
415 pPacket
->PacketSize
= pUmPacket
->size
= *numBytes
/*+pDevice->rxoffset*/;
417 pPacket
->u
.Tx
.FragCount
= 1;
419 tx_cleanup_thread(pUmDevice
);
421 b44_LM_SendPacket(pDevice
, pPacket
);
426 // #pragma mark - Broadcom MM hooks
430 b44_MM_ReadConfig16(PLM_DEVICE_BLOCK pDevice
, LM_UINT32 Offset
,
434 get_module(B_PCI_MODULE_NAME
,(module_info
**)&pci
);
436 *pValue16
= (LM_UINT16
)pci
->read_pci_config(((struct be_b44_dev
*)(pDevice
))->pci_data
.bus
,((struct be_b44_dev
*)(pDevice
))->pci_data
.device
,((struct be_b44_dev
*)(pDevice
))->pci_data
.function
,(uchar
)Offset
,sizeof(LM_UINT16
));
437 return LM_STATUS_SUCCESS
;
442 b44_MM_WriteConfig16(PLM_DEVICE_BLOCK pDevice
, LM_UINT32 Offset
,
446 get_module(B_PCI_MODULE_NAME
,(module_info
**)&pci
);
448 pci
->write_pci_config(((struct be_b44_dev
*)(pDevice
))->pci_data
.bus
,((struct be_b44_dev
*)(pDevice
))->pci_data
.device
,((struct be_b44_dev
*)(pDevice
))->pci_data
.function
,(uchar
)Offset
,sizeof(LM_UINT16
),(uint32
)Value16
);
449 return LM_STATUS_SUCCESS
;
454 b44_MM_ReadConfig32(PLM_DEVICE_BLOCK pDevice
, LM_UINT32 Offset
,
458 get_module(B_PCI_MODULE_NAME
,(module_info
**)&pci
);
460 *pValue32
= (LM_UINT32
)pci
->read_pci_config(((struct be_b44_dev
*)(pDevice
))->pci_data
.bus
,((struct be_b44_dev
*)(pDevice
))->pci_data
.device
,((struct be_b44_dev
*)(pDevice
))->pci_data
.function
,(uchar
)Offset
,sizeof(LM_UINT32
));
461 return LM_STATUS_SUCCESS
;
466 b44_MM_WriteConfig32(PLM_DEVICE_BLOCK pDevice
, LM_UINT32 Offset
,
470 get_module(B_PCI_MODULE_NAME
,(module_info
**)&pci
);
472 pci
->write_pci_config(((struct be_b44_dev
*)(pDevice
))->pci_data
.bus
,((struct be_b44_dev
*)(pDevice
))->pci_data
.device
,((struct be_b44_dev
*)(pDevice
))->pci_data
.function
,(uchar
)Offset
,sizeof(LM_UINT32
),(uint32
)Value32
);
473 return LM_STATUS_SUCCESS
;
478 b44_MM_MapMemBase(PLM_DEVICE_BLOCK pDevice
)
480 struct be_b44_dev
*pUmDevice
= (struct be_b44_dev
*)(pDevice
);
481 size_t size
= pUmDevice
->pci_data
.u
.h0
.base_register_sizes
[0];
484 get_module(B_PCI_MODULE_NAME
,(module_info
**)&pci
);
486 size
= ROUNDUP(size
,B_PAGE_SIZE
);
487 pUmDevice
->mem_base
= map_physical_memory("bcm440x_regs",
488 pUmDevice
->pci_data
.u
.h0
.base_registers
[0], size
,
489 B_ANY_KERNEL_BLOCK_ADDRESS
, B_READ_AREA
| B_WRITE_AREA
,
490 (void **)(&pDevice
->pMappedMemBase
));
492 return LM_STATUS_SUCCESS
;
498 b44_MM_MapIoBase(PLM_DEVICE_BLOCK pDevice)
501 get_module(B_PCI_MODULE_NAME,(module_info **)&pci);
503 pDevice->pMappedMemBase = pci->ram_address(((struct be_b44_dev *)(pDevice))->pci_data.memory_base);
504 return LM_STATUS_SUCCESS;
510 b44_MM_IndicateRxPackets(PLM_DEVICE_BLOCK pDevice
)
512 struct be_b44_dev
*dev
= (struct be_b44_dev
*)pDevice
;
516 pPacket
= (PLM_PACKET
)
517 QQ_PopHead(&pDevice
->RxPacketReceivedQ
.Container
);
521 acquire_spinlock(&dev
->lock
);
522 release_sem_etc(dev
->packet_release_sem
, 1, B_DO_NOT_RESCHEDULE
);
523 release_spinlock(&dev
->lock
);
524 QQ_PushTail(&dev
->RxPacketReadQ
.Container
, pPacket
);
527 return LM_STATUS_SUCCESS
;
532 b44_MM_IndicateTxPackets(PLM_DEVICE_BLOCK pDevice
)
534 return LM_STATUS_SUCCESS
;
539 tx_cleanup_thread(void *us
)
542 PLM_DEVICE_BLOCK pDevice
= (PLM_DEVICE_BLOCK
)(us
);
543 struct be_b44_dev
*pUmDevice
= (struct be_b44_dev
*)(us
);
544 struct B_UM_PACKET
*pUmPacket
;
548 cpu
= disable_interrupts();
549 acquire_spinlock(&pUmDevice
->lock
);
551 pPacket
= (PLM_PACKET
)
552 QQ_PopHead(&pDevice
->TxPacketXmittedQ
.Container
);
554 release_spinlock(&pUmDevice
->lock
);
555 restore_interrupts(cpu
);
559 pUmPacket
= (struct B_UM_PACKET
*)(pPacket
);
560 chunk_pool_put(pUmPacket
->data
);
561 pUmPacket
->data
= NULL
;
563 cpu
= disable_interrupts();
564 acquire_spinlock(&pUmDevice
->lock
);
565 QQ_PushTail(&pDevice
->TxPacketFreeQ
.Container
, pPacket
);
566 release_spinlock(&pUmDevice
->lock
);
567 restore_interrupts(cpu
);
569 return LM_STATUS_SUCCESS
;
572 /*LM_STATUS b44_MM_StartTxDma(PLM_DEVICE_BLOCK pDevice, PLM_PACKET pPacket);
573 LM_STATUS b44_MM_CompleteTxDma(PLM_DEVICE_BLOCK pDevice, PLM_PACKET pPacket);*/
577 b44_MM_AllocateMemory(PLM_DEVICE_BLOCK pDevice
, LM_UINT32 BlockSize
,
578 PLM_VOID
*pMemoryBlockVirt
)
580 struct be_b44_dev
*dev
= (struct be_b44_dev
*)(pDevice
);
582 if (dev
->mem_list_num
== 16)
583 return LM_STATUS_FAILURE
;
585 *pMemoryBlockVirt
= dev
->mem_list
[(dev
->mem_list_num
)++] = (void *)malloc(BlockSize
);
586 return LM_STATUS_SUCCESS
;
591 b44_MM_AllocateSharedMemory(PLM_DEVICE_BLOCK pDevice
, LM_UINT32 BlockSize
,
592 PLM_VOID
*pMemoryBlockVirt
, PLM_PHYSICAL_ADDRESS pMemoryBlockPhy
)
594 struct be_b44_dev
*dev
;
597 physical_entry entry
;
599 dev
= (struct be_b44_dev
*)(pDevice
);
600 area_desc
= dev
->lockmem_list
[dev
->lockmem_list_num
++] = create_area("broadcom_shared_mem",
601 &pvirt
, B_ANY_KERNEL_ADDRESS
, ROUND_UP_TO_PAGE(BlockSize
),
602 B_32_BIT_FULL_LOCK
, B_READ_AREA
| B_WRITE_AREA
);
603 if (area_desc
< B_OK
)
604 return LM_STATUS_FAILURE
;
606 memset(pvirt
, 0, BlockSize
);
607 *pMemoryBlockVirt
= (PLM_VOID
) pvirt
;
609 get_memory_map(pvirt
,BlockSize
,&entry
,1);
610 *pMemoryBlockPhy
= entry
.address
;
612 return LM_STATUS_SUCCESS
;
617 b44_MM_GetConfig(PLM_DEVICE_BLOCK pDevice
)
619 pDevice
->DisableAutoNeg
= FALSE
;
620 pDevice
->RequestedLineSpeed
= LM_LINE_SPEED_AUTO
;
621 pDevice
->RequestedDuplexMode
= LM_DUPLEX_MODE_FULL
;
622 pDevice
->FlowControlCap
|= LM_FLOW_CONTROL_AUTO_PAUSE
;
623 //pDevice->TxPacketDescCnt = tx_pkt_desc_cnt[DEFAULT_TX_PACKET_DESC_COUNT];
624 pDevice
->RxPacketDescCnt
= DEFAULT_RX_PACKET_DESC_COUNT
;
626 return LM_STATUS_SUCCESS
;
631 b44_MM_IndicateStatus(PLM_DEVICE_BLOCK pDevice
, LM_STATUS Status
)
633 #ifdef HAIKU_TARGET_PLATFORM_HAIKU
634 struct be_b44_dev
*pUmDevice
= (struct be_b44_dev
*)pDevice
;
636 if (pUmDevice
->linkChangeSem
!= -1)
637 release_sem_etc(pUmDevice
->linkChangeSem
, 1,
638 B_DO_NOT_RESCHEDULE
);
641 return LM_STATUS_SUCCESS
;
646 b44_MM_InitializeUmPackets(PLM_DEVICE_BLOCK pDevice
)
649 struct B_UM_PACKET
*pUmPacket
;
652 for (i
= 0; i
< pDevice
->RxPacketDescCnt
; i
++) {
653 pPacket
= QQ_PopHead(&pDevice
->RxPacketFreeQ
.Container
);
654 pUmPacket
= (struct B_UM_PACKET
*) pPacket
;
655 pUmPacket
->data
= chunk_pool_get();
656 /*if (pUmPacket->data == 0) {
657 QQ_PushTail(&pUmDevice->rx_out_of_buf_q.Container, pPacket);
660 pPacket
->u
.Rx
.pRxBufferVirt
= pUmPacket
->data
;
661 QQ_PushTail(&pDevice
->RxPacketFreeQ
.Container
, pPacket
);
664 return LM_STATUS_SUCCESS
;
669 b44_MM_FreeRxBuffer(PLM_DEVICE_BLOCK pDevice
, PLM_PACKET pPacket
)
671 struct B_UM_PACKET
*pUmPacket
;
672 pUmPacket
= (struct B_UM_PACKET
*)pPacket
;
673 chunk_pool_put(pUmPacket
->data
);
674 pUmPacket
->data
= NULL
;
675 return LM_STATUS_SUCCESS
;