2 * Airgo MIMO wireless driver
4 * Copyright (c) 2007 Li YanBo <dreamfly281@gmail.com>
6 * Thanks for Jeff Williams <angelbane@gmail.com> do reverse engineer
7 * works and published the SPECS at http://airgo.wdwconsulting.net/mymoin
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License version 2 as
11 * published by the Free Software Foundation.
14 #include <linux/pci.h>
15 #include <linux/delay.h>
20 unsigned int rx_frame_cnt
= 0;
21 //unsigned int local_tx_sent_cnt = 0;
23 static inline void disable_rx_engine(struct agnx_priv
*priv
)
25 void __iomem
*ctl
= priv
->ctl
;
26 iowrite32(0x100, ctl
+ AGNX_CIR_RXCTL
);
27 /* Wait for RX Control to have the Disable Rx Interrupt (0x100) set */
28 ioread32(ctl
+ AGNX_CIR_RXCTL
);
31 static inline void enable_rx_engine(struct agnx_priv
*priv
)
33 void __iomem
*ctl
= priv
->ctl
;
34 iowrite32(0x80, ctl
+ AGNX_CIR_RXCTL
);
35 ioread32(ctl
+ AGNX_CIR_RXCTL
);
38 inline void disable_rx_interrupt(struct agnx_priv
*priv
)
40 void __iomem
*ctl
= priv
->ctl
;
43 disable_rx_engine(priv
);
44 reg
= ioread32(ctl
+ AGNX_CIR_RXCFG
);
46 iowrite32(reg
, ctl
+ AGNX_CIR_RXCFG
);
47 ioread32(ctl
+ AGNX_CIR_RXCFG
);
50 inline void enable_rx_interrupt(struct agnx_priv
*priv
)
52 void __iomem
*ctl
= priv
->ctl
;
55 reg
= ioread32(ctl
+ AGNX_CIR_RXCFG
);
57 iowrite32(reg
, ctl
+ AGNX_CIR_RXCFG
);
58 ioread32(ctl
+ AGNX_CIR_RXCFG
);
59 enable_rx_engine(priv
);
62 static inline void rx_desc_init(struct agnx_priv
*priv
, unsigned int idx
)
64 struct agnx_desc
*desc
= priv
->rx
.desc
+ idx
;
65 struct agnx_info
*info
= priv
->rx
.info
+ idx
;
67 memset(info
, 0, sizeof(*info
));
69 info
->dma_len
= IEEE80211_MAX_RTS_THRESHOLD
+ sizeof(struct agnx_hdr
);
70 info
->skb
= dev_alloc_skb(info
->dma_len
);
71 if (info
->skb
== NULL
)
72 agnx_bug("refill err");
74 info
->mapping
= pci_map_single(priv
->pdev
, skb_tail_pointer(info
->skb
),
75 info
->dma_len
, PCI_DMA_FROMDEVICE
);
76 memset(desc
, 0, sizeof(*desc
));
77 desc
->dma_addr
= cpu_to_be32(info
->mapping
);
78 /* Set the owner to the card */
79 desc
->frag
= cpu_to_be32(be32_to_cpu(desc
->frag
) | OWNER
);
82 static inline void rx_desc_reinit(struct agnx_priv
*priv
, unsigned int idx
)
84 struct agnx_info
*info
= priv
->rx
.info
+ idx
;
86 /* Cause ieee80211 will free the skb buffer, so we needn't to free it again?! */
87 pci_unmap_single(priv
->pdev
, info
->mapping
, info
->dma_len
, PCI_DMA_FROMDEVICE
);
88 rx_desc_init(priv
, idx
);
91 static inline void rx_desc_reusing(struct agnx_priv
*priv
, unsigned int idx
)
93 struct agnx_desc
*desc
= priv
->rx
.desc
+ idx
;
94 struct agnx_info
*info
= priv
->rx
.info
+ idx
;
96 memset(desc
, 0, sizeof(*desc
));
97 desc
->dma_addr
= cpu_to_be32(info
->mapping
);
98 /* Set the owner to the card */
99 desc
->frag
= cpu_to_be32(be32_to_cpu(desc
->frag
) | OWNER
);
102 static void rx_desc_free(struct agnx_priv
*priv
, unsigned int idx
)
104 struct agnx_desc
*desc
= priv
->rx
.desc
+ idx
;
105 struct agnx_info
*info
= priv
->rx
.info
+ idx
;
107 BUG_ON(!desc
|| !info
);
109 pci_unmap_single(priv
->pdev
, info
->mapping
, info
->dma_len
, PCI_DMA_FROMDEVICE
);
111 dev_kfree_skb(info
->skb
);
112 memset(info
, 0, sizeof(*info
));
113 memset(desc
, 0, sizeof(*desc
));
116 static inline void __tx_desc_free(struct agnx_priv
*priv
,
117 struct agnx_desc
*desc
, struct agnx_info
*info
)
119 BUG_ON(!desc
|| !info
);
120 /* TODO make sure mapping, skb and len are consistency */
122 pci_unmap_single(priv
->pdev
, info
->mapping
,
123 info
->dma_len
, PCI_DMA_TODEVICE
);
124 if (info
->type
== PACKET
)
125 dev_kfree_skb(info
->skb
);
127 memset(info
, 0, sizeof(*info
));
128 memset(desc
, 0, sizeof(*desc
));
131 static void txm_desc_free(struct agnx_priv
*priv
, unsigned int idx
)
133 struct agnx_desc
*desc
= priv
->txm
.desc
+ idx
;
134 struct agnx_info
*info
= priv
->txm
.info
+ idx
;
136 __tx_desc_free(priv
, desc
, info
);
139 static void txd_desc_free(struct agnx_priv
*priv
, unsigned int idx
)
141 struct agnx_desc
*desc
= priv
->txd
.desc
+ idx
;
142 struct agnx_info
*info
= priv
->txd
.info
+ idx
;
144 __tx_desc_free(priv
, desc
, info
);
147 int fill_rings(struct agnx_priv
*priv
)
149 void __iomem
*ctl
= priv
->ctl
;
154 priv
->txd
.idx_sent
= priv
->txm
.idx_sent
= 0;
155 priv
->rx
.idx
= priv
->txm
.idx
= priv
->txd
.idx
= 0;
157 for (i
= 0; i
< priv
->rx
.size
; i
++)
158 rx_desc_init(priv
, i
);
159 for (i
= 0; i
< priv
->txm
.size
; i
++) {
160 memset(priv
->txm
.desc
+ i
, 0, sizeof(struct agnx_desc
));
161 memset(priv
->txm
.info
+ i
, 0, sizeof(struct agnx_info
));
163 for (i
= 0; i
< priv
->txd
.size
; i
++) {
164 memset(priv
->txd
.desc
+ i
, 0, sizeof(struct agnx_desc
));
165 memset(priv
->txd
.info
+ i
, 0, sizeof(struct agnx_info
));
168 /* FIXME Set the card RX TXM and TXD address */
169 agnx_write32(ctl
, AGNX_CIR_RXCMSTART
, priv
->rx
.dma
);
170 agnx_write32(ctl
, AGNX_CIR_RXCMEND
, priv
->txm
.dma
);
172 agnx_write32(ctl
, AGNX_CIR_TXMSTART
, priv
->txm
.dma
);
173 agnx_write32(ctl
, AGNX_CIR_TXMEND
, priv
->txd
.dma
);
175 agnx_write32(ctl
, AGNX_CIR_TXDSTART
, priv
->txd
.dma
);
176 agnx_write32(ctl
, AGNX_CIR_TXDEND
, priv
->txd
.dma
+
177 sizeof(struct agnx_desc
) * priv
->txd
.size
);
179 /* FIXME Relinquish control of rings to card */
180 reg
= agnx_read32(ctl
, AGNX_CIR_BLKCTL
);
182 agnx_write32(ctl
, AGNX_CIR_BLKCTL
, reg
);
186 void unfill_rings(struct agnx_priv
*priv
)
192 spin_lock_irqsave(&priv
->lock
, flags
);
194 for (i
= 0; i
< priv
->rx
.size
; i
++)
195 rx_desc_free(priv
, i
);
196 for (i
= 0; i
< priv
->txm
.size
; i
++)
197 txm_desc_free(priv
, i
);
198 for (i
= 0; i
< priv
->txd
.size
; i
++)
199 txd_desc_free(priv
, i
);
201 spin_unlock_irqrestore(&priv
->lock
, flags
);
204 /* Extract the bitrate out of a CCK PLCP header.
205 copy from bcm43xx driver */
206 static inline u8
agnx_plcp_get_bitrate_cck(__be32
*phyhdr_11b
)
209 switch (*(u8
*)phyhdr_11b
) {
219 agnx_bug("Wrong plcp rate");
224 static inline u8
agnx_plcp_get_bitrate_ofdm(__be32
*phyhdr_11g
)
226 u8 rate
= *(u8
*)phyhdr_11g
& 0xF;
228 printk(PFX
"G mode rate is 0x%x\n", rate
);
233 static void get_rx_stats(struct agnx_priv
*priv
, struct agnx_hdr
*hdr
,
234 struct ieee80211_rx_status
*stat
)
236 void __iomem
*ctl
= priv
->ctl
;
239 /* FIXME just for test */
240 int snr
= 40; /* signal-to-noise ratio */
242 memset(stat
, 0, sizeof(*stat
));
244 rssi
= (u8
*)&hdr
->phy_stats_lo
;
245 // stat->ssi = (rssi[0] + rssi[1] + rssi[2]) / 3;
247 noise
= ioread32(ctl
+ AGNX_GCR_NOISE0
);
248 noise
+= ioread32(ctl
+ AGNX_GCR_NOISE1
);
249 noise
+= ioread32(ctl
+ AGNX_GCR_NOISE2
);
250 stat
->noise
= noise
/ 3;
252 //snr = stat->ssi - stat->noise;
253 if (snr
>=0 && snr
< 40)
254 stat
->signal
= 5 * snr
/ 2;
261 if (hdr
->_11b0
&& !hdr
->_11g0
) {
262 stat
->rate_idx
= agnx_plcp_get_bitrate_cck(&hdr
->_11b0
);
263 } else if (!hdr
->_11b0
&& hdr
->_11g0
) {
264 printk(PFX
"RX: Found G mode packet\n");
265 stat
->rate_idx
= agnx_plcp_get_bitrate_ofdm(&hdr
->_11g0
);
267 agnx_bug("Unknown packets type");
270 stat
->band
= IEEE80211_BAND_2GHZ
;
271 stat
->freq
= agnx_channels
[priv
->channel
- 1].center_freq
;
272 // stat->antenna = 3;
273 // stat->mactime = be32_to_cpu(hdr->time_stamp);
274 // stat->channel = priv->channel;
278 static inline void combine_hdr_frag(struct ieee80211_hdr
*ieeehdr
,
284 fctl
= le16_to_cpu(ieeehdr
->frame_control
);
285 hdrlen
= ieee80211_hdrlen(fctl
);
287 if (hdrlen
< (2+2+6)/*minimum hdr*/ ||
288 hdrlen
> sizeof(struct ieee80211_mgmt
)) {
289 printk(KERN_ERR PFX
"hdr len is %d\n", hdrlen
);
290 agnx_bug("Wrong ieee80211 hdr detected");
292 skb_push(skb
, hdrlen
);
293 memcpy(skb
->data
, ieeehdr
, hdrlen
);
294 } /* combine_hdr_frag */
296 static inline int agnx_packet_check(struct agnx_priv
*priv
, struct agnx_hdr
*agnxhdr
,
299 if (agnx_get_bits(CRC_FAIL
, CRC_FAIL_SHIFT
, be32_to_cpu(agnxhdr
->reg1
)) == 1){
300 printk(PFX
"RX: CRC check fail\n");
303 if (packet_len
> 2048) {
304 printk(PFX
"RX: Too long packet detected\n");
308 /* FIXME Just usable for Promious Mode, for Manage mode exclude FCS */
309 /* if (packet_len - sizeof(*agnxhdr) < FCS_LEN) { */
310 /* printk(PFX "RX: Too short packet detected\n"); */
315 priv
->stats
.dot11FCSErrorCount
++;
319 void handle_rx_irq(struct agnx_priv
*priv
)
321 struct ieee80211_rx_status status
;
326 struct agnx_desc
*desc
;
328 struct agnx_info
*info
;
329 struct agnx_hdr
*hdr
;
331 unsigned int i
= priv
->rx
.idx
% priv
->rx
.size
;
333 desc
= priv
->rx
.desc
+ i
;
334 frag
= be32_to_cpu(desc
->frag
);
338 info
= priv
->rx
.info
+ i
;
340 hdr
= (struct agnx_hdr
*)(skb
->data
);
342 len
= (frag
& PACKET_LEN
) >> PACKET_LEN_SHIFT
;
343 if (agnx_packet_check(priv
, hdr
, len
) == -1) {
344 rx_desc_reusing(priv
, i
);
351 fctl
= le16_to_cpu(((struct ieee80211_hdr
*)hdr
->mac_hdr
)->frame_control
);
352 if ((fctl
& IEEE80211_FCTL_STYPE
) != IEEE80211_STYPE_BEACON
)// && !(fctl & IEEE80211_STYPE_BEACON))
353 dump_ieee80211_hdr((struct ieee80211_hdr
*)hdr
->mac_hdr
, "RX");
356 if (hdr
->_11b0
&& !hdr
->_11g0
) {
358 /* u16 fctl = le16_to_cpu(((struct ieee80211_hdr *)hdr->mac_hdr) */
359 /* ->frame_control); */
360 /* if ( (fctl & IEEE80211_FCTL_FTYPE) == IEEE80211_FTYPE_DATA) { */
361 /* agnx_print_rx_hdr(hdr); */
362 // agnx_print_sta(priv, BSSID_STAID);
363 /* for (j = 0; j < 8; j++) */
364 /* agnx_print_sta_tx_wq(priv, BSSID_STAID, j); */
367 get_rx_stats(priv
, hdr
, &status
);
368 skb_pull(skb
, sizeof(*hdr
));
369 combine_hdr_frag((struct ieee80211_hdr
*)hdr
->mac_hdr
, skb
);
370 } else if (!hdr
->_11b0
&& hdr
->_11g0
) {
372 agnx_print_rx_hdr(hdr
);
373 agnx_print_sta(priv
, BSSID_STAID
);
374 // for (j = 0; j < 8; j++)
375 agnx_print_sta_tx_wq(priv
, BSSID_STAID
, 0);
377 print_hex_dump_bytes("agnx: RX_PACKET: ", DUMP_PREFIX_NONE
,
378 skb
->data
, skb
->len
+ 8);
380 // if (agnx_plcp_get_bitrate_ofdm(&hdr->_11g0) == 0)
381 get_rx_stats(priv
, hdr
, &status
);
382 skb_pull(skb
, sizeof(*hdr
));
383 combine_hdr_frag((struct ieee80211_hdr
*)
384 ((void *)&hdr
->mac_hdr
), skb
);
385 // dump_ieee80211_hdr((struct ieee80211_hdr *)skb->data, "RX G");
387 agnx_bug("Unknown packets type");
388 ieee80211_rx_irqsafe(priv
->hw
, skb
, &status
);
389 rx_desc_reinit(priv
, i
);
391 } while ( priv
->rx
.idx
++ );
392 } /* handle_rx_irq */
394 static inline void handle_tx_irq(struct agnx_priv
*priv
, struct agnx_ring
*ring
)
396 struct agnx_desc
*desc
;
397 struct agnx_info
*info
;
400 for (idx
= ring
->idx_sent
; idx
< ring
->idx
; idx
++) {
401 unsigned int i
= idx
% ring
->size
;
404 desc
= ring
->desc
+ i
;
405 info
= ring
->info
+ i
;
407 frag
= be32_to_cpu(desc
->frag
);
409 if (info
->type
== HEADER
)
412 agnx_bug("TX error");
415 pci_unmap_single(priv
->pdev
, info
->mapping
, info
->dma_len
, PCI_DMA_TODEVICE
);
420 len
= info
->skb
->len
- sizeof(struct agnx_hdr
) + info
->hdr_len
;
422 // agnx_print_desc(desc);
423 if (info
->type
== PACKET
) {
424 // agnx_print_tx_hdr((struct agnx_hdr *)info->skb->data);
425 /* agnx_print_sta_power(priv, LOCAL_STAID); */
426 /* agnx_print_sta(priv, LOCAL_STAID); */
427 /* // for (j = 0; j < 8; j++) */
428 /* agnx_print_sta_tx_wq(priv, LOCAL_STAID, 0); */
429 // agnx_print_sta_power(priv, BSSID_STAID);
430 // agnx_print_sta(priv, BSSID_STAID);
431 // for (j = 0; j < 8; j++)
432 // agnx_print_sta_tx_wq(priv, BSSID_STAID, 0);
437 if (info
->type
== PACKET
) {
438 // dump_txm_registers(priv);
439 // dump_rxm_registers(priv);
440 // dump_bm_registers(priv);
441 // dump_cir_registers(priv);
444 if (info
->type
== PACKET
) {
445 // struct ieee80211_hdr *hdr;
446 struct ieee80211_tx_info
*txi
= IEEE80211_SKB_CB(info
->skb
);
448 skb_pull(info
->skb
, sizeof(struct agnx_hdr
));
449 memcpy(skb_push(info
->skb
, info
->hdr_len
), &info
->hdr
, info
->hdr_len
);
451 // dump_ieee80211_hdr((struct ieee80211_hdr *)info->skb->data, "TX_HANDLE");
452 /* print_hex_dump_bytes("agnx: TX_HANDLE: ", DUMP_PREFIX_NONE, */
453 /* info->skb->data, info->skb->len); */
455 if (!(txi
->flags
& IEEE80211_TX_CTL_NO_ACK
))
456 txi
->flags
|= IEEE80211_TX_STAT_ACK
;
458 ieee80211_tx_status_irqsafe(priv
->hw
, info
->skb
);
461 /* info->tx_status.queue_number = (ring->size - i) / 2; */
462 /* ieee80211_tx_status_irqsafe(priv->hw, info->skb, &(info->tx_status)); */
464 /* dev_kfree_skb_irq(info->skb); */
466 memset(desc
, 0, sizeof(*desc
));
467 memset(info
, 0, sizeof(*info
));
470 ring
->idx_sent
= idx
;
471 /* TODO fill the priv->low_level_stats */
473 /* ieee80211_wake_queue(priv->hw, 0); */
476 void handle_txm_irq(struct agnx_priv
*priv
)
478 handle_tx_irq(priv
, &priv
->txm
);
481 void handle_txd_irq(struct agnx_priv
*priv
)
483 handle_tx_irq(priv
, &priv
->txd
);
486 void handle_other_irq(struct agnx_priv
*priv
)
488 // void __iomem *ctl = priv->ctl;
489 u32 status
= priv
->irq_status
;
490 void __iomem
*ctl
= priv
->ctl
;
493 if (status
& IRQ_TX_BEACON
) {
494 iowrite32(IRQ_TX_BEACON
, ctl
+ AGNX_INT_STAT
);
495 printk(PFX
"IRQ: TX Beacon control is 0X%.8X\n", ioread32(ctl
+ AGNX_TXM_BEACON_CTL
));
496 printk(PFX
"IRQ: TX Beacon rx frame num: %d\n", rx_frame_cnt
);
498 if (status
& IRQ_TX_RETRY
) {
499 reg
= ioread32(ctl
+ AGNX_TXM_RETRYSTAID
);
500 printk(PFX
"IRQ: TX Retry, RETRY STA ID is %x\n", reg
);
502 if (status
& IRQ_TX_ACTIVITY
)
503 printk(PFX
"IRQ: TX Activity\n");
504 if (status
& IRQ_RX_ACTIVITY
)
505 printk(PFX
"IRQ: RX Activity\n");
506 if (status
& IRQ_RX_X
)
507 printk(PFX
"IRQ: RX X\n");
508 if (status
& IRQ_RX_Y
) {
509 reg
= ioread32(ctl
+ AGNX_INT_MASK
);
511 iowrite32(reg
, ctl
+ AGNX_INT_MASK
);
512 iowrite32(IRQ_RX_Y
, ctl
+ AGNX_INT_STAT
);
513 printk(PFX
"IRQ: RX Y\n");
515 if (status
& IRQ_RX_HASHHIT
) {
516 reg
= ioread32(ctl
+ AGNX_INT_MASK
);
517 reg
&= ~IRQ_RX_HASHHIT
;
518 iowrite32(reg
, ctl
+ AGNX_INT_MASK
);
519 iowrite32(IRQ_RX_HASHHIT
, ctl
+ AGNX_INT_STAT
);
520 printk(PFX
"IRQ: RX Hash Hit\n");
523 if (status
& IRQ_RX_FRAME
) {
524 reg
= ioread32(ctl
+ AGNX_INT_MASK
);
525 reg
&= ~IRQ_RX_FRAME
;
526 iowrite32(reg
, ctl
+ AGNX_INT_MASK
);
527 iowrite32(IRQ_RX_FRAME
, ctl
+ AGNX_INT_STAT
);
528 printk(PFX
"IRQ: RX Frame\n");
531 if (status
& IRQ_ERR_INT
) {
532 iowrite32(IRQ_ERR_INT
, ctl
+ AGNX_INT_STAT
);
533 // agnx_hw_reset(priv);
534 printk(PFX
"IRQ: Error Interrupt\n");
536 if (status
& IRQ_TX_QUE_FULL
)
537 printk(PFX
"IRQ: TX Workqueue Full\n");
538 if (status
& IRQ_BANDMAN_ERR
)
539 printk(PFX
"IRQ: Bandwidth Management Error\n");
540 if (status
& IRQ_TX_DISABLE
)
541 printk(PFX
"IRQ: TX Disable\n");
542 if (status
& IRQ_RX_IVASESKEY
)
543 printk(PFX
"IRQ: RX Invalid Session Key\n");
544 if (status
& IRQ_REP_THHIT
)
545 printk(PFX
"IRQ: Replay Threshold Hit\n");
546 if (status
& IRQ_TIMER1
)
547 printk(PFX
"IRQ: Timer1\n");
548 if (status
& IRQ_TIMER_CNT
)
549 printk(PFX
"IRQ: Timer Count\n");
550 if (status
& IRQ_PHY_FASTINT
)
551 printk(PFX
"IRQ: Phy Fast Interrupt\n");
552 if (status
& IRQ_PHY_SLOWINT
)
553 printk(PFX
"IRQ: Phy Slow Interrupt\n");
554 if (status
& IRQ_OTHER
)
555 printk(PFX
"IRQ: 0x80000000\n");
556 } /* handle_other_irq */
559 static inline void route_flag_set(struct agnx_hdr
*txhdr
)
564 /* reg = (0x7 << ROUTE_COMPRESSION_SHIFT) & ROUTE_COMPRESSION; */
565 /* txhdr->reg5 = cpu_to_be32(reg); */
566 txhdr
->reg5
= (0xa << 0x0) | (0x7 << 0x18);
567 // txhdr->reg5 = cpu_to_be32((0xa << 0x0) | (0x7 << 0x18));
568 // txhdr->reg5 = cpu_to_be32(0x7 << 0x0);
571 /* Return 0 if no match */
572 static inline unsigned int get_power_level(unsigned int rate
, unsigned int antennas_num
)
574 unsigned int power_level
;
582 case 120: power_level
= 22; break;
583 case 180: power_level
= 19; break;
584 case 240: power_level
= 18; break;
585 case 360: power_level
= 16; break;
586 case 480: power_level
= 15; break;
587 case 540: power_level
= 14; break;
589 agnx_bug("Error rate setting\n");
592 if (power_level
&& (antennas_num
== 2))
598 static inline void fill_agnx_hdr(struct agnx_priv
*priv
, struct agnx_info
*tx_info
)
600 struct agnx_hdr
*txhdr
= (struct agnx_hdr
*)tx_info
->skb
->data
;
602 u16 fc
= le16_to_cpu(*(__le16
*)&tx_info
->hdr
);
605 memset(txhdr
, 0, sizeof(*txhdr
));
607 // reg = agnx_set_bits(STATION_ID, STATION_ID_SHIFT, LOCAL_STAID);
608 reg
= agnx_set_bits(STATION_ID
, STATION_ID_SHIFT
, BSSID_STAID
);
609 reg
|= agnx_set_bits(WORKQUEUE_ID
, WORKQUEUE_ID_SHIFT
, 0);
610 txhdr
->reg4
= cpu_to_be32(reg
);
612 /* Set the Hardware Sequence Number to 1? */
613 reg
= agnx_set_bits(SEQUENCE_NUMBER
, SEQUENCE_NUMBER_SHIFT
, 0);
614 // reg = agnx_set_bits(SEQUENCE_NUMBER, SEQUENCE_NUMBER_SHIFT, 1);
615 reg
|= agnx_set_bits(MAC_HDR_LEN
, MAC_HDR_LEN_SHIFT
, tx_info
->hdr_len
);
616 txhdr
->reg1
= cpu_to_be32(reg
);
617 /* Set the agnx_hdr's MAC header */
618 memcpy(txhdr
->mac_hdr
, &tx_info
->hdr
, tx_info
->hdr_len
);
620 reg
= agnx_set_bits(ACK
, ACK_SHIFT
, 1);
621 // reg = agnx_set_bits(ACK, ACK_SHIFT, 0);
622 reg
|= agnx_set_bits(MULTICAST
, MULTICAST_SHIFT
, 0);
623 // reg |= agnx_set_bits(MULTICAST, MULTICAST_SHIFT, 1);
624 reg
|= agnx_set_bits(RELAY
, RELAY_SHIFT
, 0);
625 reg
|= agnx_set_bits(TM
, TM_SHIFT
, 0);
626 txhdr
->reg0
= cpu_to_be32(reg
);
628 /* Set the long and short retry limits */
629 txhdr
->tx
.short_retry_limit
= tx_info
->txi
->control
.rates
[0].count
;
630 txhdr
->tx
.long_retry_limit
= tx_info
->txi
->control
.rates
[0].count
;
633 len
= tx_info
->skb
->len
- sizeof(*txhdr
) + tx_info
->hdr_len
+ FCS_LEN
;
634 if (fc
& IEEE80211_FCTL_PROTECTED
)
637 reg
= agnx_set_bits(FRAG_SIZE
, FRAG_SIZE_SHIFT
, len
);
638 len
= tx_info
->skb
->len
- sizeof(*txhdr
);
639 reg
|= agnx_set_bits(PAYLOAD_LEN
, PAYLOAD_LEN_SHIFT
, len
);
640 txhdr
->reg3
= cpu_to_be32(reg
);
642 route_flag_set(txhdr
);
645 static void txm_power_set(struct agnx_priv
*priv
,
646 struct ieee80211_tx_info
*txi
)
648 struct agnx_sta_power power
;
652 if (txi
->control
.rates
[0].idx
< 0) {
653 /* For B mode Short Preamble */
654 reg
= agnx_set_bits(PHY_MODE
, PHY_MODE_SHIFT
, AGNX_MODE_80211B_SHORT
);
655 // control->tx_rate = -control->tx_rate;
657 reg
= agnx_set_bits(PHY_MODE
, PHY_MODE_SHIFT
, AGNX_MODE_80211G
);
658 // reg = agnx_set_bits(PHY_MODE, PHY_MODE_SHIFT, AGNX_MODE_80211B_LONG);
659 reg
|= agnx_set_bits(SIGNAL
, SIGNAL_SHIFT
, 0xB);
660 reg
|= agnx_set_bits(RATE
, RATE_SHIFT
, 0xB);
661 // reg |= agnx_set_bits(POWER_LEVEL, POWER_LEVEL_SHIFT, 15);
662 reg
|= agnx_set_bits(POWER_LEVEL
, POWER_LEVEL_SHIFT
, 20);
663 /* if rate < 11M set it to 0 */
664 reg
|= agnx_set_bits(NUM_TRANSMITTERS
, NUM_TRANSMITTERS_SHIFT
, 1);
665 // reg |= agnx_set_bits(EDCF, EDCF_SHIFT, 1);
666 // reg |= agnx_set_bits(TIFS, TIFS_SHIFT, 1);
669 // power.reg = cpu_to_le32(reg);
671 // set_sta_power(priv, &power, LOCAL_STAID);
672 set_sta_power(priv
, &power
, BSSID_STAID
);
675 static inline int tx_packet_check(struct sk_buff
*skb
)
677 unsigned int ieee_len
= ieee80211_get_hdrlen_from_skb(skb
);
678 if (skb
->len
> 2048) {
679 printk(KERN_ERR PFX
"length is %d\n", skb
->len
);
680 agnx_bug("Too long TX skb");
684 if (skb
->len
== ieee_len
) {
685 printk(PFX
"A strange TX packet\n");
687 /* tx_faile_irqsafe(); */
692 static int __agnx_tx(struct agnx_priv
*priv
, struct sk_buff
*skb
,
693 struct agnx_ring
*ring
)
695 struct agnx_desc
*hdr_desc
, *frag_desc
;
696 struct agnx_info
*hdr_info
, *frag_info
;
697 struct ieee80211_tx_info
*txi
= IEEE80211_SKB_CB(skb
);
701 spin_lock_irqsave(&priv
->lock
, flags
);
703 /* The RX interrupt need be Disable until this TX packet
704 is handled in the next tx interrupt */
705 disable_rx_interrupt(priv
);
709 /* if (priv->txm_idx - priv->txm_idx_sent == AGNX_TXM_RING_SIZE - 2) */
710 /* ieee80211_stop_queue(priv->hw, 0); */
712 /* Set agnx header's info and desc */
714 hdr_desc
= ring
->desc
+ i
;
715 hdr_info
= ring
->info
+ i
;
716 hdr_info
->hdr_len
= ieee80211_get_hdrlen_from_skb(skb
);
717 memcpy(&hdr_info
->hdr
, skb
->data
, hdr_info
->hdr_len
);
719 /* Add the agnx header to the front of the SKB */
720 skb_push(skb
, sizeof(struct agnx_hdr
) - hdr_info
->hdr_len
);
723 hdr_info
->dma_len
= sizeof(struct agnx_hdr
);
725 hdr_info
->type
= HEADER
;
726 fill_agnx_hdr(priv
, hdr_info
);
727 hdr_info
->mapping
= pci_map_single(priv
->pdev
, skb
->data
,
728 hdr_info
->dma_len
, PCI_DMA_TODEVICE
);
731 frag
|= agnx_set_bits(FIRST_FRAG
, FIRST_FRAG_SHIFT
, 1);
732 frag
|= agnx_set_bits(LAST_FRAG
, LAST_FRAG_SHIFT
, 0);
733 frag
|= agnx_set_bits(PACKET_LEN
, PACKET_LEN_SHIFT
, skb
->len
);
734 frag
|= agnx_set_bits(FIRST_FRAG_LEN
, FIRST_FRAG_LEN_SHIFT
, 1);
735 frag
|= agnx_set_bits(OWNER
, OWNER_SHIFT
, 1);
736 hdr_desc
->frag
= cpu_to_be32(frag
);
738 hdr_desc
->dma_addr
= cpu_to_be32(hdr_info
->mapping
);
741 /* Set Frag's info and desc */
742 i
= (i
+ 1) % ring
->size
;
743 frag_desc
= ring
->desc
+ i
;
744 frag_info
= ring
->info
+ i
;
745 memcpy(frag_info
, hdr_info
, sizeof(struct agnx_info
));
746 frag_info
->type
= PACKET
;
747 frag_info
->dma_len
= skb
->len
- hdr_info
->dma_len
;
748 frag_info
->mapping
= pci_map_single(priv
->pdev
, skb
->data
+ hdr_info
->dma_len
,
749 frag_info
->dma_len
, PCI_DMA_TODEVICE
);
752 frag
|= agnx_set_bits(FIRST_FRAG
, FIRST_FRAG_SHIFT
, 0);
753 frag
|= agnx_set_bits(LAST_FRAG
, LAST_FRAG_SHIFT
, 1);
754 frag
|= agnx_set_bits(PACKET_LEN
, PACKET_LEN_SHIFT
, skb
->len
);
755 frag
|= agnx_set_bits(SUB_FRAG_LEN
, SUB_FRAG_LEN_SHIFT
, frag_info
->dma_len
);
756 frag_desc
->frag
= cpu_to_be32(frag
);
758 frag_desc
->dma_addr
= cpu_to_be32(frag_info
->mapping
);
760 txm_power_set(priv
, txi
);
765 /* len = skb->len - hdr_info->dma_len + hdr_info->hdr_len; */
766 /* // if (len == 614) { */
767 /* agnx_print_desc(hdr_desc); */
768 /* agnx_print_desc(frag_desc); */
769 /* agnx_print_tx_hdr((struct agnx_hdr *)skb->data); */
770 /* agnx_print_sta_power(priv, LOCAL_STAID); */
771 /* agnx_print_sta(priv, LOCAL_STAID); */
772 /* for (j = 0; j < 8; j++) */
773 /* agnx_print_sta_tx_wq(priv, LOCAL_STAID, j); */
774 /* agnx_print_sta_power(priv, BSSID_STAID); */
775 /* agnx_print_sta(priv, BSSID_STAID); */
776 /* for (j = 0; j < 8; j++) */
777 /* agnx_print_sta_tx_wq(priv, BSSID_STAID, j); */
781 spin_unlock_irqrestore(&priv
->lock
, flags
);
783 /* FIXME ugly code */
787 reg
= (ioread32(priv
->ctl
+ AGNX_CIR_TXMCTL
));
789 iowrite32((reg
), priv
->ctl
+ AGNX_CIR_TXMCTL
);
795 reg
= (ioread32(priv
->ctl
+ AGNX_CIR_TXDCTL
));
797 iowrite32((reg
), priv
->ctl
+ AGNX_CIR_TXDCTL
);
803 int _agnx_tx(struct agnx_priv
*priv
, struct sk_buff
*skb
)
807 if (tx_packet_check(skb
))
810 /* print_hex_dump_bytes("agnx: TX_PACKET: ", DUMP_PREFIX_NONE, */
811 /* skb->data, skb->len); */
813 fctl
= le16_to_cpu(*((__le16
*)skb
->data
));
815 if ( (fctl
& IEEE80211_FCTL_FTYPE
) == IEEE80211_FTYPE_DATA
)
816 return __agnx_tx(priv
, skb
, &priv
->txd
);
818 return __agnx_tx(priv
, skb
, &priv
->txm
);