2 * Copyright (c) 2004-2011 Atheros Communications Inc.
3 * Copyright (c) 2011-2012 Qualcomm Atheros, Inc.
5 * Permission to use, copy, modify, and/or distribute this software for any
6 * purpose with or without fee is hereby granted, provided that the above
7 * copyright notice and this permission notice appear in all copies.
9 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
10 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
11 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
12 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
13 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
14 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
15 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
18 #include <linux/module.h>
19 #include <linux/mmc/card.h>
20 #include <linux/mmc/mmc.h>
21 #include <linux/mmc/host.h>
22 #include <linux/mmc/sdio_func.h>
23 #include <linux/mmc/sdio_ids.h>
24 #include <linux/mmc/sdio.h>
25 #include <linux/mmc/sd.h>
33 struct sdio_func
*func
;
35 /* protects access to bus_req_freeq */
39 struct list_head bus_req_freeq
;
41 /* available bus requests */
42 struct bus_request bus_req
[BUS_REQUEST_MAX_NUM
];
48 /* protects access to dma_buffer */
49 struct mutex dma_buffer_mutex
;
51 /* scatter request list head */
52 struct list_head scat_req
;
54 atomic_t irq_handling
;
55 wait_queue_head_t irq_wq
;
57 /* protects access to scat_req */
63 const struct sdio_device_id
*id
;
64 struct work_struct wr_async_work
;
65 struct list_head wr_asyncq
;
67 /* protects access to wr_asyncq */
68 spinlock_t wr_async_lock
;
71 #define CMD53_ARG_READ 0
72 #define CMD53_ARG_WRITE 1
73 #define CMD53_ARG_BLOCK_BASIS 1
74 #define CMD53_ARG_FIXED_ADDRESS 0
75 #define CMD53_ARG_INCR_ADDRESS 1
77 static inline struct ath6kl_sdio
*ath6kl_sdio_priv(struct ath6kl
*ar
)
83 * Macro to check if DMA buffer is WORD-aligned and DMA-able.
84 * Most host controllers assume the buffer is DMA'able and will
85 * bug-check otherwise (i.e. buffers on the stack). virt_addr_valid
86 * check fails on stack memory.
88 static inline bool buf_needs_bounce(u8
*buf
)
90 return ((unsigned long) buf
& 0x3) || !virt_addr_valid(buf
);
93 static void ath6kl_sdio_set_mbox_info(struct ath6kl
*ar
)
95 struct ath6kl_mbox_info
*mbox_info
= &ar
->mbox_info
;
97 /* EP1 has an extended range */
98 mbox_info
->htc_addr
= HIF_MBOX_BASE_ADDR
;
99 mbox_info
->htc_ext_addr
= HIF_MBOX0_EXT_BASE_ADDR
;
100 mbox_info
->htc_ext_sz
= HIF_MBOX0_EXT_WIDTH
;
101 mbox_info
->block_size
= HIF_MBOX_BLOCK_SIZE
;
102 mbox_info
->gmbox_addr
= HIF_GMBOX_BASE_ADDR
;
103 mbox_info
->gmbox_sz
= HIF_GMBOX_WIDTH
;
106 static inline void ath6kl_sdio_set_cmd53_arg(u32
*arg
, u8 rw
, u8 func
,
107 u8 mode
, u8 opcode
, u32 addr
,
110 *arg
= (((rw
& 1) << 31) |
111 ((func
& 0x7) << 28) |
113 ((opcode
& 1) << 26) |
114 ((addr
& 0x1FFFF) << 9) |
118 static inline void ath6kl_sdio_set_cmd52_arg(u32
*arg
, u8 write
, u8 raw
,
119 unsigned int address
,
124 *arg
= ((write
& 1) << 31) |
125 ((func
& 0x7) << 28) |
128 ((address
& 0x1FFFF) << 9) |
133 static int ath6kl_sdio_func0_cmd52_wr_byte(struct mmc_card
*card
,
134 unsigned int address
,
137 struct mmc_command io_cmd
;
139 memset(&io_cmd
, 0, sizeof(io_cmd
));
140 ath6kl_sdio_set_cmd52_arg(&io_cmd
.arg
, 1, 0, address
, byte
);
141 io_cmd
.opcode
= SD_IO_RW_DIRECT
;
142 io_cmd
.flags
= MMC_RSP_R5
| MMC_CMD_AC
;
144 return mmc_wait_for_cmd(card
->host
, &io_cmd
, 0);
147 static int ath6kl_sdio_io(struct sdio_func
*func
, u32 request
, u32 addr
,
152 sdio_claim_host(func
);
154 if (request
& HIF_WRITE
) {
155 /* FIXME: looks like ugly workaround for something */
156 if (addr
>= HIF_MBOX_BASE_ADDR
&&
157 addr
<= HIF_MBOX_END_ADDR
)
158 addr
+= (HIF_MBOX_WIDTH
- len
);
160 /* FIXME: this also looks like ugly workaround */
161 if (addr
== HIF_MBOX0_EXT_BASE_ADDR
)
162 addr
+= HIF_MBOX0_EXT_WIDTH
- len
;
164 if (request
& HIF_FIXED_ADDRESS
)
165 ret
= sdio_writesb(func
, addr
, buf
, len
);
167 ret
= sdio_memcpy_toio(func
, addr
, buf
, len
);
169 if (request
& HIF_FIXED_ADDRESS
)
170 ret
= sdio_readsb(func
, buf
, addr
, len
);
172 ret
= sdio_memcpy_fromio(func
, buf
, addr
, len
);
175 sdio_release_host(func
);
177 ath6kl_dbg(ATH6KL_DBG_SDIO
, "%s addr 0x%x%s buf 0x%p len %d\n",
178 request
& HIF_WRITE
? "wr" : "rd", addr
,
179 request
& HIF_FIXED_ADDRESS
? " (fixed)" : "", buf
, len
);
180 ath6kl_dbg_dump(ATH6KL_DBG_SDIO_DUMP
, NULL
, "sdio ", buf
, len
);
185 static struct bus_request
*ath6kl_sdio_alloc_busreq(struct ath6kl_sdio
*ar_sdio
)
187 struct bus_request
*bus_req
;
189 spin_lock_bh(&ar_sdio
->lock
);
191 if (list_empty(&ar_sdio
->bus_req_freeq
)) {
192 spin_unlock_bh(&ar_sdio
->lock
);
196 bus_req
= list_first_entry(&ar_sdio
->bus_req_freeq
,
197 struct bus_request
, list
);
198 list_del(&bus_req
->list
);
200 spin_unlock_bh(&ar_sdio
->lock
);
201 ath6kl_dbg(ATH6KL_DBG_SCATTER
, "%s: bus request 0x%p\n",
207 static void ath6kl_sdio_free_bus_req(struct ath6kl_sdio
*ar_sdio
,
208 struct bus_request
*bus_req
)
210 ath6kl_dbg(ATH6KL_DBG_SCATTER
, "%s: bus request 0x%p\n",
213 spin_lock_bh(&ar_sdio
->lock
);
214 list_add_tail(&bus_req
->list
, &ar_sdio
->bus_req_freeq
);
215 spin_unlock_bh(&ar_sdio
->lock
);
218 static void ath6kl_sdio_setup_scat_data(struct hif_scatter_req
*scat_req
,
219 struct mmc_data
*data
)
221 struct scatterlist
*sg
;
224 data
->blksz
= HIF_MBOX_BLOCK_SIZE
;
225 data
->blocks
= scat_req
->len
/ HIF_MBOX_BLOCK_SIZE
;
227 ath6kl_dbg(ATH6KL_DBG_SCATTER
,
228 "hif-scatter: (%s) addr: 0x%X, (block len: %d, block count: %d) , (tot:%d,sg:%d)\n",
229 (scat_req
->req
& HIF_WRITE
) ? "WR" : "RD", scat_req
->addr
,
230 data
->blksz
, data
->blocks
, scat_req
->len
,
231 scat_req
->scat_entries
);
233 data
->flags
= (scat_req
->req
& HIF_WRITE
) ? MMC_DATA_WRITE
:
236 /* fill SG entries */
237 sg
= scat_req
->sgentries
;
238 sg_init_table(sg
, scat_req
->scat_entries
);
240 /* assemble SG list */
241 for (i
= 0; i
< scat_req
->scat_entries
; i
++, sg
++) {
242 ath6kl_dbg(ATH6KL_DBG_SCATTER
, "%d: addr:0x%p, len:%d\n",
243 i
, scat_req
->scat_list
[i
].buf
,
244 scat_req
->scat_list
[i
].len
);
246 sg_set_buf(sg
, scat_req
->scat_list
[i
].buf
,
247 scat_req
->scat_list
[i
].len
);
250 /* set scatter-gather table for request */
251 data
->sg
= scat_req
->sgentries
;
252 data
->sg_len
= scat_req
->scat_entries
;
255 static int ath6kl_sdio_scat_rw(struct ath6kl_sdio
*ar_sdio
,
256 struct bus_request
*req
)
258 struct mmc_request mmc_req
;
259 struct mmc_command cmd
;
260 struct mmc_data data
;
261 struct hif_scatter_req
*scat_req
;
265 scat_req
= req
->scat_req
;
267 if (scat_req
->virt_scat
) {
269 if (scat_req
->req
& HIF_BLOCK_BASIS
)
270 len
= round_down(len
, HIF_MBOX_BLOCK_SIZE
);
272 status
= ath6kl_sdio_io(ar_sdio
->func
, scat_req
->req
,
273 scat_req
->addr
, scat_req
->virt_dma_buf
,
278 memset(&mmc_req
, 0, sizeof(struct mmc_request
));
279 memset(&cmd
, 0, sizeof(struct mmc_command
));
280 memset(&data
, 0, sizeof(struct mmc_data
));
282 ath6kl_sdio_setup_scat_data(scat_req
, &data
);
284 opcode
= (scat_req
->req
& HIF_FIXED_ADDRESS
) ?
285 CMD53_ARG_FIXED_ADDRESS
: CMD53_ARG_INCR_ADDRESS
;
287 rw
= (scat_req
->req
& HIF_WRITE
) ? CMD53_ARG_WRITE
: CMD53_ARG_READ
;
289 /* Fixup the address so that the last byte will fall on MBOX EOM */
290 if (scat_req
->req
& HIF_WRITE
) {
291 if (scat_req
->addr
== HIF_MBOX_BASE_ADDR
)
292 scat_req
->addr
+= HIF_MBOX_WIDTH
- scat_req
->len
;
294 /* Uses extended address range */
295 scat_req
->addr
+= HIF_MBOX0_EXT_WIDTH
- scat_req
->len
;
298 /* set command argument */
299 ath6kl_sdio_set_cmd53_arg(&cmd
.arg
, rw
, ar_sdio
->func
->num
,
300 CMD53_ARG_BLOCK_BASIS
, opcode
, scat_req
->addr
,
303 cmd
.opcode
= SD_IO_RW_EXTENDED
;
304 cmd
.flags
= MMC_RSP_SPI_R5
| MMC_RSP_R5
| MMC_CMD_ADTC
;
307 mmc_req
.data
= &data
;
309 sdio_claim_host(ar_sdio
->func
);
311 mmc_set_data_timeout(&data
, ar_sdio
->func
->card
);
312 /* synchronous call to process request */
313 mmc_wait_for_req(ar_sdio
->func
->card
->host
, &mmc_req
);
315 sdio_release_host(ar_sdio
->func
);
317 status
= cmd
.error
? cmd
.error
: data
.error
;
320 scat_req
->status
= status
;
322 if (scat_req
->status
)
323 ath6kl_err("Scatter write request failed:%d\n",
326 if (scat_req
->req
& HIF_ASYNCHRONOUS
)
327 scat_req
->complete(ar_sdio
->ar
->htc_target
, scat_req
);
332 static int ath6kl_sdio_alloc_prep_scat_req(struct ath6kl_sdio
*ar_sdio
,
333 int n_scat_entry
, int n_scat_req
,
336 struct hif_scatter_req
*s_req
;
337 struct bus_request
*bus_req
;
338 int i
, scat_req_sz
, scat_list_sz
, sg_sz
, buf_sz
;
341 scat_list_sz
= (n_scat_entry
- 1) * sizeof(struct hif_scatter_item
);
342 scat_req_sz
= sizeof(*s_req
) + scat_list_sz
;
345 sg_sz
= sizeof(struct scatterlist
) * n_scat_entry
;
347 buf_sz
= 2 * L1_CACHE_BYTES
+
348 ATH6KL_MAX_TRANSFER_SIZE_PER_SCATTER
;
350 for (i
= 0; i
< n_scat_req
; i
++) {
351 /* allocate the scatter request */
352 s_req
= kzalloc(scat_req_sz
, GFP_KERNEL
);
357 virt_buf
= kzalloc(buf_sz
, GFP_KERNEL
);
363 s_req
->virt_dma_buf
=
364 (u8
*)L1_CACHE_ALIGN((unsigned long)virt_buf
);
366 /* allocate sglist */
367 s_req
->sgentries
= kzalloc(sg_sz
, GFP_KERNEL
);
369 if (!s_req
->sgentries
) {
375 /* allocate a bus request for this scatter request */
376 bus_req
= ath6kl_sdio_alloc_busreq(ar_sdio
);
378 kfree(s_req
->sgentries
);
379 kfree(s_req
->virt_dma_buf
);
384 /* assign the scatter request to this bus request */
385 bus_req
->scat_req
= s_req
;
386 s_req
->busrequest
= bus_req
;
388 s_req
->virt_scat
= virt_scat
;
390 /* add it to the scatter pool */
391 hif_scatter_req_add(ar_sdio
->ar
, s_req
);
397 static int ath6kl_sdio_read_write_sync(struct ath6kl
*ar
, u32 addr
, u8
*buf
,
398 u32 len
, u32 request
)
400 struct ath6kl_sdio
*ar_sdio
= ath6kl_sdio_priv(ar
);
403 bool bounced
= false;
405 if (request
& HIF_BLOCK_BASIS
)
406 len
= round_down(len
, HIF_MBOX_BLOCK_SIZE
);
408 if (buf_needs_bounce(buf
)) {
409 if (!ar_sdio
->dma_buffer
)
411 mutex_lock(&ar_sdio
->dma_buffer_mutex
);
412 tbuf
= ar_sdio
->dma_buffer
;
414 if (request
& HIF_WRITE
)
415 memcpy(tbuf
, buf
, len
);
421 ret
= ath6kl_sdio_io(ar_sdio
->func
, request
, addr
, tbuf
, len
);
422 if ((request
& HIF_READ
) && bounced
)
423 memcpy(buf
, tbuf
, len
);
426 mutex_unlock(&ar_sdio
->dma_buffer_mutex
);
431 static void __ath6kl_sdio_write_async(struct ath6kl_sdio
*ar_sdio
,
432 struct bus_request
*req
)
435 ath6kl_sdio_scat_rw(ar_sdio
, req
);
440 status
= ath6kl_sdio_read_write_sync(ar_sdio
->ar
, req
->address
,
441 req
->buffer
, req
->length
,
443 context
= req
->packet
;
444 ath6kl_sdio_free_bus_req(ar_sdio
, req
);
445 ath6kl_hif_rw_comp_handler(context
, status
);
449 static void ath6kl_sdio_write_async_work(struct work_struct
*work
)
451 struct ath6kl_sdio
*ar_sdio
;
452 struct bus_request
*req
, *tmp_req
;
454 ar_sdio
= container_of(work
, struct ath6kl_sdio
, wr_async_work
);
456 spin_lock_bh(&ar_sdio
->wr_async_lock
);
457 list_for_each_entry_safe(req
, tmp_req
, &ar_sdio
->wr_asyncq
, list
) {
458 list_del(&req
->list
);
459 spin_unlock_bh(&ar_sdio
->wr_async_lock
);
460 __ath6kl_sdio_write_async(ar_sdio
, req
);
461 spin_lock_bh(&ar_sdio
->wr_async_lock
);
463 spin_unlock_bh(&ar_sdio
->wr_async_lock
);
466 static void ath6kl_sdio_irq_handler(struct sdio_func
*func
)
469 struct ath6kl_sdio
*ar_sdio
;
471 ath6kl_dbg(ATH6KL_DBG_SDIO
, "irq\n");
473 ar_sdio
= sdio_get_drvdata(func
);
474 atomic_set(&ar_sdio
->irq_handling
, 1);
476 * Release the host during interrups so we can pick it back up when
477 * we process commands.
479 sdio_release_host(ar_sdio
->func
);
481 status
= ath6kl_hif_intr_bh_handler(ar_sdio
->ar
);
482 sdio_claim_host(ar_sdio
->func
);
484 atomic_set(&ar_sdio
->irq_handling
, 0);
485 wake_up(&ar_sdio
->irq_wq
);
487 WARN_ON(status
&& status
!= -ECANCELED
);
490 static int ath6kl_sdio_power_on(struct ath6kl
*ar
)
492 struct ath6kl_sdio
*ar_sdio
= ath6kl_sdio_priv(ar
);
493 struct sdio_func
*func
= ar_sdio
->func
;
496 if (!ar_sdio
->is_disabled
)
499 ath6kl_dbg(ATH6KL_DBG_BOOT
, "sdio power on\n");
501 sdio_claim_host(func
);
503 ret
= sdio_enable_func(func
);
505 ath6kl_err("Unable to enable sdio func: %d)\n", ret
);
506 sdio_release_host(func
);
510 sdio_release_host(func
);
513 * Wait for hardware to initialise. It should take a lot less than
514 * 10 ms but let's be conservative here.
518 ar_sdio
->is_disabled
= false;
523 static int ath6kl_sdio_power_off(struct ath6kl
*ar
)
525 struct ath6kl_sdio
*ar_sdio
= ath6kl_sdio_priv(ar
);
528 if (ar_sdio
->is_disabled
)
531 ath6kl_dbg(ATH6KL_DBG_BOOT
, "sdio power off\n");
533 /* Disable the card */
534 sdio_claim_host(ar_sdio
->func
);
535 ret
= sdio_disable_func(ar_sdio
->func
);
536 sdio_release_host(ar_sdio
->func
);
541 ar_sdio
->is_disabled
= true;
546 static int ath6kl_sdio_write_async(struct ath6kl
*ar
, u32 address
, u8
*buffer
,
547 u32 length
, u32 request
,
548 struct htc_packet
*packet
)
550 struct ath6kl_sdio
*ar_sdio
= ath6kl_sdio_priv(ar
);
551 struct bus_request
*bus_req
;
553 bus_req
= ath6kl_sdio_alloc_busreq(ar_sdio
);
558 bus_req
->address
= address
;
559 bus_req
->buffer
= buffer
;
560 bus_req
->length
= length
;
561 bus_req
->request
= request
;
562 bus_req
->packet
= packet
;
564 spin_lock_bh(&ar_sdio
->wr_async_lock
);
565 list_add_tail(&bus_req
->list
, &ar_sdio
->wr_asyncq
);
566 spin_unlock_bh(&ar_sdio
->wr_async_lock
);
567 queue_work(ar
->ath6kl_wq
, &ar_sdio
->wr_async_work
);
572 static void ath6kl_sdio_irq_enable(struct ath6kl
*ar
)
574 struct ath6kl_sdio
*ar_sdio
= ath6kl_sdio_priv(ar
);
577 sdio_claim_host(ar_sdio
->func
);
579 /* Register the isr */
580 ret
= sdio_claim_irq(ar_sdio
->func
, ath6kl_sdio_irq_handler
);
582 ath6kl_err("Failed to claim sdio irq: %d\n", ret
);
584 sdio_release_host(ar_sdio
->func
);
587 static bool ath6kl_sdio_is_on_irq(struct ath6kl
*ar
)
589 struct ath6kl_sdio
*ar_sdio
= ath6kl_sdio_priv(ar
);
591 return !atomic_read(&ar_sdio
->irq_handling
);
594 static void ath6kl_sdio_irq_disable(struct ath6kl
*ar
)
596 struct ath6kl_sdio
*ar_sdio
= ath6kl_sdio_priv(ar
);
599 sdio_claim_host(ar_sdio
->func
);
601 if (atomic_read(&ar_sdio
->irq_handling
)) {
602 sdio_release_host(ar_sdio
->func
);
604 ret
= wait_event_interruptible(ar_sdio
->irq_wq
,
605 ath6kl_sdio_is_on_irq(ar
));
609 sdio_claim_host(ar_sdio
->func
);
612 ret
= sdio_release_irq(ar_sdio
->func
);
614 ath6kl_err("Failed to release sdio irq: %d\n", ret
);
616 sdio_release_host(ar_sdio
->func
);
619 static struct hif_scatter_req
*ath6kl_sdio_scatter_req_get(struct ath6kl
*ar
)
621 struct ath6kl_sdio
*ar_sdio
= ath6kl_sdio_priv(ar
);
622 struct hif_scatter_req
*node
= NULL
;
624 spin_lock_bh(&ar_sdio
->scat_lock
);
626 if (!list_empty(&ar_sdio
->scat_req
)) {
627 node
= list_first_entry(&ar_sdio
->scat_req
,
628 struct hif_scatter_req
, list
);
629 list_del(&node
->list
);
631 node
->scat_q_depth
= get_queue_depth(&ar_sdio
->scat_req
);
634 spin_unlock_bh(&ar_sdio
->scat_lock
);
639 static void ath6kl_sdio_scatter_req_add(struct ath6kl
*ar
,
640 struct hif_scatter_req
*s_req
)
642 struct ath6kl_sdio
*ar_sdio
= ath6kl_sdio_priv(ar
);
644 spin_lock_bh(&ar_sdio
->scat_lock
);
646 list_add_tail(&s_req
->list
, &ar_sdio
->scat_req
);
648 spin_unlock_bh(&ar_sdio
->scat_lock
);
652 /* scatter gather read write request */
653 static int ath6kl_sdio_async_rw_scatter(struct ath6kl
*ar
,
654 struct hif_scatter_req
*scat_req
)
656 struct ath6kl_sdio
*ar_sdio
= ath6kl_sdio_priv(ar
);
657 u32 request
= scat_req
->req
;
663 ath6kl_dbg(ATH6KL_DBG_SCATTER
,
664 "hif-scatter: total len: %d scatter entries: %d\n",
665 scat_req
->len
, scat_req
->scat_entries
);
667 if (request
& HIF_SYNCHRONOUS
)
668 status
= ath6kl_sdio_scat_rw(ar_sdio
, scat_req
->busrequest
);
670 spin_lock_bh(&ar_sdio
->wr_async_lock
);
671 list_add_tail(&scat_req
->busrequest
->list
, &ar_sdio
->wr_asyncq
);
672 spin_unlock_bh(&ar_sdio
->wr_async_lock
);
673 queue_work(ar
->ath6kl_wq
, &ar_sdio
->wr_async_work
);
679 /* clean up scatter support */
680 static void ath6kl_sdio_cleanup_scatter(struct ath6kl
*ar
)
682 struct ath6kl_sdio
*ar_sdio
= ath6kl_sdio_priv(ar
);
683 struct hif_scatter_req
*s_req
, *tmp_req
;
685 /* empty the free list */
686 spin_lock_bh(&ar_sdio
->scat_lock
);
687 list_for_each_entry_safe(s_req
, tmp_req
, &ar_sdio
->scat_req
, list
) {
688 list_del(&s_req
->list
);
689 spin_unlock_bh(&ar_sdio
->scat_lock
);
692 * FIXME: should we also call completion handler with
693 * ath6kl_hif_rw_comp_handler() with status -ECANCELED so
694 * that the packet is properly freed?
696 if (s_req
->busrequest
)
697 ath6kl_sdio_free_bus_req(ar_sdio
, s_req
->busrequest
);
698 kfree(s_req
->virt_dma_buf
);
699 kfree(s_req
->sgentries
);
702 spin_lock_bh(&ar_sdio
->scat_lock
);
704 spin_unlock_bh(&ar_sdio
->scat_lock
);
707 /* setup of HIF scatter resources */
708 static int ath6kl_sdio_enable_scatter(struct ath6kl
*ar
)
710 struct ath6kl_sdio
*ar_sdio
= ath6kl_sdio_priv(ar
);
711 struct htc_target
*target
= ar
->htc_target
;
713 bool virt_scat
= false;
715 if (ar_sdio
->scatter_enabled
)
718 ar_sdio
->scatter_enabled
= true;
720 /* check if host supports scatter and it meets our requirements */
721 if (ar_sdio
->func
->card
->host
->max_segs
< MAX_SCATTER_ENTRIES_PER_REQ
) {
722 ath6kl_err("host only supports scatter of :%d entries, need: %d\n",
723 ar_sdio
->func
->card
->host
->max_segs
,
724 MAX_SCATTER_ENTRIES_PER_REQ
);
729 ret
= ath6kl_sdio_alloc_prep_scat_req(ar_sdio
,
730 MAX_SCATTER_ENTRIES_PER_REQ
,
731 MAX_SCATTER_REQUESTS
, virt_scat
);
734 ath6kl_dbg(ATH6KL_DBG_BOOT
,
735 "hif-scatter enabled requests %d entries %d\n",
736 MAX_SCATTER_REQUESTS
,
737 MAX_SCATTER_ENTRIES_PER_REQ
);
739 target
->max_scat_entries
= MAX_SCATTER_ENTRIES_PER_REQ
;
740 target
->max_xfer_szper_scatreq
=
741 MAX_SCATTER_REQ_TRANSFER_SIZE
;
743 ath6kl_sdio_cleanup_scatter(ar
);
744 ath6kl_warn("hif scatter resource setup failed, trying virtual scatter method\n");
748 if (virt_scat
|| ret
) {
749 ret
= ath6kl_sdio_alloc_prep_scat_req(ar_sdio
,
750 ATH6KL_SCATTER_ENTRIES_PER_REQ
,
751 ATH6KL_SCATTER_REQS
, virt_scat
);
754 ath6kl_err("failed to alloc virtual scatter resources !\n");
755 ath6kl_sdio_cleanup_scatter(ar
);
759 ath6kl_dbg(ATH6KL_DBG_BOOT
,
760 "virtual scatter enabled requests %d entries %d\n",
761 ATH6KL_SCATTER_REQS
, ATH6KL_SCATTER_ENTRIES_PER_REQ
);
763 target
->max_scat_entries
= ATH6KL_SCATTER_ENTRIES_PER_REQ
;
764 target
->max_xfer_szper_scatreq
=
765 ATH6KL_MAX_TRANSFER_SIZE_PER_SCATTER
;
771 static int ath6kl_sdio_config(struct ath6kl
*ar
)
773 struct ath6kl_sdio
*ar_sdio
= ath6kl_sdio_priv(ar
);
774 struct sdio_func
*func
= ar_sdio
->func
;
777 sdio_claim_host(func
);
779 if ((ar_sdio
->id
->device
& MANUFACTURER_ID_ATH6KL_BASE_MASK
) >=
780 MANUFACTURER_ID_AR6003_BASE
) {
781 /* enable 4-bit ASYNC interrupt on AR6003 or later */
782 ret
= ath6kl_sdio_func0_cmd52_wr_byte(func
->card
,
783 CCCR_SDIO_IRQ_MODE_REG
,
784 SDIO_IRQ_MODE_ASYNC_4BIT_IRQ
);
786 ath6kl_err("Failed to enable 4-bit async irq mode %d\n",
791 ath6kl_dbg(ATH6KL_DBG_BOOT
, "4-bit async irq mode enabled\n");
794 /* give us some time to enable, in ms */
795 func
->enable_timeout
= 100;
797 ret
= sdio_set_block_size(func
, HIF_MBOX_BLOCK_SIZE
);
799 ath6kl_err("Set sdio block size %d failed: %d)\n",
800 HIF_MBOX_BLOCK_SIZE
, ret
);
805 sdio_release_host(func
);
810 static int ath6kl_set_sdio_pm_caps(struct ath6kl
*ar
)
812 struct ath6kl_sdio
*ar_sdio
= ath6kl_sdio_priv(ar
);
813 struct sdio_func
*func
= ar_sdio
->func
;
817 flags
= sdio_get_host_pm_caps(func
);
819 ath6kl_dbg(ATH6KL_DBG_SUSPEND
, "sdio suspend pm_caps 0x%x\n", flags
);
821 if (!(flags
& MMC_PM_WAKE_SDIO_IRQ
) ||
822 !(flags
& MMC_PM_KEEP_POWER
))
825 ret
= sdio_set_host_pm_flags(func
, MMC_PM_KEEP_POWER
);
827 ath6kl_err("set sdio keep pwr flag failed: %d\n", ret
);
831 /* sdio irq wakes up host */
832 ret
= sdio_set_host_pm_flags(func
, MMC_PM_WAKE_SDIO_IRQ
);
834 ath6kl_err("set sdio wake irq flag failed: %d\n", ret
);
839 static int ath6kl_sdio_suspend(struct ath6kl
*ar
, struct cfg80211_wowlan
*wow
)
841 struct ath6kl_sdio
*ar_sdio
= ath6kl_sdio_priv(ar
);
842 struct sdio_func
*func
= ar_sdio
->func
;
844 bool try_deepsleep
= false;
847 if (ar
->state
== ATH6KL_STATE_SCHED_SCAN
) {
848 ath6kl_dbg(ATH6KL_DBG_SUSPEND
, "sched scan is in progress\n");
850 ret
= ath6kl_set_sdio_pm_caps(ar
);
854 ret
= ath6kl_cfg80211_suspend(ar
,
855 ATH6KL_CFG_SUSPEND_SCHED_SCAN
,
863 if (ar
->suspend_mode
== WLAN_POWER_STATE_WOW
||
864 (!ar
->suspend_mode
&& wow
)) {
866 ret
= ath6kl_set_sdio_pm_caps(ar
);
870 ret
= ath6kl_cfg80211_suspend(ar
, ATH6KL_CFG_SUSPEND_WOW
, wow
);
871 if (ret
&& ret
!= -ENOTCONN
)
872 ath6kl_err("wow suspend failed: %d\n", ret
);
875 (!ar
->wow_suspend_mode
||
876 ar
->wow_suspend_mode
== WLAN_POWER_STATE_DEEP_SLEEP
))
877 try_deepsleep
= true;
879 ar
->wow_suspend_mode
== WLAN_POWER_STATE_CUT_PWR
)
885 if (ar
->suspend_mode
== WLAN_POWER_STATE_DEEP_SLEEP
||
886 !ar
->suspend_mode
|| try_deepsleep
) {
888 flags
= sdio_get_host_pm_caps(func
);
889 if (!(flags
& MMC_PM_KEEP_POWER
))
892 ret
= sdio_set_host_pm_flags(func
, MMC_PM_KEEP_POWER
);
897 * Workaround to support Deep Sleep with MSM, set the host pm
898 * flag as MMC_PM_WAKE_SDIO_IRQ to allow SDCC deiver to disable
899 * the sdc2_clock and internally allows MSM to enter
900 * TCXO shutdown properly.
902 if ((flags
& MMC_PM_WAKE_SDIO_IRQ
)) {
903 ret
= sdio_set_host_pm_flags(func
,
904 MMC_PM_WAKE_SDIO_IRQ
);
909 ret
= ath6kl_cfg80211_suspend(ar
, ATH6KL_CFG_SUSPEND_DEEPSLEEP
,
918 return ath6kl_cfg80211_suspend(ar
, ATH6KL_CFG_SUSPEND_CUTPOWER
, NULL
);
921 static int ath6kl_sdio_resume(struct ath6kl
*ar
)
924 case ATH6KL_STATE_OFF
:
925 case ATH6KL_STATE_CUTPOWER
:
926 ath6kl_dbg(ATH6KL_DBG_SUSPEND
,
927 "sdio resume configuring sdio\n");
929 /* need to set sdio settings after power is cut from sdio */
930 ath6kl_sdio_config(ar
);
933 case ATH6KL_STATE_ON
:
936 case ATH6KL_STATE_DEEPSLEEP
:
939 case ATH6KL_STATE_WOW
:
942 case ATH6KL_STATE_SCHED_SCAN
:
945 case ATH6KL_STATE_SUSPENDING
:
948 case ATH6KL_STATE_RESUMING
:
952 ath6kl_cfg80211_resume(ar
);
957 /* set the window address register (using 4-byte register access ). */
958 static int ath6kl_set_addrwin_reg(struct ath6kl
*ar
, u32 reg_addr
, u32 addr
)
965 * Write bytes 1,2,3 of the register to set the upper address bytes,
966 * the LSB is written last to initiate the access cycle
969 for (i
= 1; i
<= 3; i
++) {
971 * Fill the buffer with the address byte value we want to
974 memset(addr_val
, ((u8
*)&addr
)[i
], 4);
977 * Hit each byte of the register address with a 4-byte
978 * write operation to the same address, this is a harmless
981 status
= ath6kl_sdio_read_write_sync(ar
, reg_addr
+ i
, addr_val
,
982 4, HIF_WR_SYNC_BYTE_FIX
);
988 ath6kl_err("%s: failed to write initial bytes of 0x%x to window reg: 0x%X\n",
989 __func__
, addr
, reg_addr
);
994 * Write the address register again, this time write the whole
995 * 4-byte value. The effect here is that the LSB write causes the
996 * cycle to start, the extra 3 byte write to bytes 1,2,3 has no
997 * effect since we are writing the same values again
999 status
= ath6kl_sdio_read_write_sync(ar
, reg_addr
, (u8
*)(&addr
),
1000 4, HIF_WR_SYNC_BYTE_INC
);
1003 ath6kl_err("%s: failed to write 0x%x to window reg: 0x%X\n",
1004 __func__
, addr
, reg_addr
);
1011 static int ath6kl_sdio_diag_read32(struct ath6kl
*ar
, u32 address
, u32
*data
)
1015 /* set window register to start read cycle */
1016 status
= ath6kl_set_addrwin_reg(ar
, WINDOW_READ_ADDR_ADDRESS
,
1023 status
= ath6kl_sdio_read_write_sync(ar
, WINDOW_DATA_ADDRESS
,
1024 (u8
*)data
, sizeof(u32
), HIF_RD_SYNC_BYTE_INC
);
1026 ath6kl_err("%s: failed to read from window data addr\n",
1034 static int ath6kl_sdio_diag_write32(struct ath6kl
*ar
, u32 address
,
1038 u32 val
= (__force u32
) data
;
1040 /* set write data */
1041 status
= ath6kl_sdio_read_write_sync(ar
, WINDOW_DATA_ADDRESS
,
1042 (u8
*) &val
, sizeof(u32
), HIF_WR_SYNC_BYTE_INC
);
1044 ath6kl_err("%s: failed to write 0x%x to window data addr\n",
1049 /* set window register, which starts the write cycle */
1050 return ath6kl_set_addrwin_reg(ar
, WINDOW_WRITE_ADDR_ADDRESS
,
1054 static int ath6kl_sdio_bmi_credits(struct ath6kl
*ar
)
1057 unsigned long timeout
;
1060 ar
->bmi
.cmd_credits
= 0;
1062 /* Read the counter register to get the command credits */
1063 addr
= COUNT_DEC_ADDRESS
+ (HTC_MAILBOX_NUM_MAX
+ ENDPOINT1
) * 4;
1065 timeout
= jiffies
+ msecs_to_jiffies(BMI_COMMUNICATION_TIMEOUT
);
1066 while (time_before(jiffies
, timeout
) && !ar
->bmi
.cmd_credits
) {
1069 * Hit the credit counter with a 4-byte access, the first byte
1070 * read will hit the counter and cause a decrement, while the
1071 * remaining 3 bytes has no effect. The rationale behind this
1072 * is to make all HIF accesses 4-byte aligned.
1074 ret
= ath6kl_sdio_read_write_sync(ar
, addr
,
1075 (u8
*)&ar
->bmi
.cmd_credits
, 4,
1076 HIF_RD_SYNC_BYTE_INC
);
1078 ath6kl_err("Unable to decrement the command credit count register: %d\n",
1083 /* The counter is only 8 bits.
1084 * Ignore anything in the upper 3 bytes
1086 ar
->bmi
.cmd_credits
&= 0xFF;
1089 if (!ar
->bmi
.cmd_credits
) {
1090 ath6kl_err("bmi communication timeout\n");
1097 static int ath6kl_bmi_get_rx_lkahd(struct ath6kl
*ar
)
1099 unsigned long timeout
;
1103 timeout
= jiffies
+ msecs_to_jiffies(BMI_COMMUNICATION_TIMEOUT
);
1104 while ((time_before(jiffies
, timeout
)) && !rx_word
) {
1105 ret
= ath6kl_sdio_read_write_sync(ar
,
1106 RX_LOOKAHEAD_VALID_ADDRESS
,
1107 (u8
*)&rx_word
, sizeof(rx_word
),
1108 HIF_RD_SYNC_BYTE_INC
);
1110 ath6kl_err("unable to read RX_LOOKAHEAD_VALID\n");
1114 /* all we really want is one bit */
1115 rx_word
&= (1 << ENDPOINT1
);
1119 ath6kl_err("bmi_recv_buf FIFO empty\n");
1126 static int ath6kl_sdio_bmi_write(struct ath6kl
*ar
, u8
*buf
, u32 len
)
1131 ret
= ath6kl_sdio_bmi_credits(ar
);
1135 addr
= ar
->mbox_info
.htc_addr
;
1137 ret
= ath6kl_sdio_read_write_sync(ar
, addr
, buf
, len
,
1138 HIF_WR_SYNC_BYTE_INC
);
1140 ath6kl_err("unable to send the bmi data to the device\n");
1145 static int ath6kl_sdio_bmi_read(struct ath6kl
*ar
, u8
*buf
, u32 len
)
1151 * During normal bootup, small reads may be required.
1152 * Rather than issue an HIF Read and then wait as the Target
1153 * adds successive bytes to the FIFO, we wait here until
1154 * we know that response data is available.
1156 * This allows us to cleanly timeout on an unexpected
1157 * Target failure rather than risk problems at the HIF level.
1158 * In particular, this avoids SDIO timeouts and possibly garbage
1159 * data on some host controllers. And on an interconnect
1160 * such as Compact Flash (as well as some SDIO masters) which
1161 * does not provide any indication on data timeout, it avoids
1162 * a potential hang or garbage response.
1164 * Synchronization is more difficult for reads larger than the
1165 * size of the MBOX FIFO (128B), because the Target is unable
1166 * to push the 129th byte of data until AFTER the Host posts an
1167 * HIF Read and removes some FIFO data. So for large reads the
1168 * Host proceeds to post an HIF Read BEFORE all the data is
1169 * actually available to read. Fortunately, large BMI reads do
1170 * not occur in practice -- they're supported for debug/development.
1172 * So Host/Target BMI synchronization is divided into these cases:
1173 * CASE 1: length < 4
1176 * CASE 2: 4 <= length <= 128
1177 * Wait for first 4 bytes to be in FIFO
1178 * If CONSERVATIVE_BMI_READ is enabled, also wait for
1179 * a BMI command credit, which indicates that the ENTIRE
1180 * response is available in the the FIFO
1182 * CASE 3: length > 128
1183 * Wait for the first 4 bytes to be in FIFO
1185 * For most uses, a small timeout should be sufficient and we will
1186 * usually see a response quickly; but there may be some unusual
1187 * (debug) cases of BMI_EXECUTE where we want an larger timeout.
1188 * For now, we use an unbounded busy loop while waiting for
1191 * If BMI_EXECUTE ever needs to support longer-latency execution,
1192 * especially in production, this code needs to be enhanced to sleep
1193 * and yield. Also note that BMI_COMMUNICATION_TIMEOUT is currently
1194 * a function of Host processor speed.
1196 if (len
>= 4) { /* NB: Currently, always true */
1197 ret
= ath6kl_bmi_get_rx_lkahd(ar
);
1202 addr
= ar
->mbox_info
.htc_addr
;
1203 ret
= ath6kl_sdio_read_write_sync(ar
, addr
, buf
, len
,
1204 HIF_RD_SYNC_BYTE_INC
);
1206 ath6kl_err("Unable to read the bmi data from the device: %d\n",
1214 static void ath6kl_sdio_stop(struct ath6kl
*ar
)
1216 struct ath6kl_sdio
*ar_sdio
= ath6kl_sdio_priv(ar
);
1217 struct bus_request
*req
, *tmp_req
;
1220 /* FIXME: make sure that wq is not queued again */
1222 cancel_work_sync(&ar_sdio
->wr_async_work
);
1224 spin_lock_bh(&ar_sdio
->wr_async_lock
);
1226 list_for_each_entry_safe(req
, tmp_req
, &ar_sdio
->wr_asyncq
, list
) {
1227 list_del(&req
->list
);
1229 if (req
->scat_req
) {
1230 /* this is a scatter gather request */
1231 req
->scat_req
->status
= -ECANCELED
;
1232 req
->scat_req
->complete(ar_sdio
->ar
->htc_target
,
1235 context
= req
->packet
;
1236 ath6kl_sdio_free_bus_req(ar_sdio
, req
);
1237 ath6kl_hif_rw_comp_handler(context
, -ECANCELED
);
1241 spin_unlock_bh(&ar_sdio
->wr_async_lock
);
1243 WARN_ON(get_queue_depth(&ar_sdio
->scat_req
) != 4);
1246 static const struct ath6kl_hif_ops ath6kl_sdio_ops
= {
1247 .read_write_sync
= ath6kl_sdio_read_write_sync
,
1248 .write_async
= ath6kl_sdio_write_async
,
1249 .irq_enable
= ath6kl_sdio_irq_enable
,
1250 .irq_disable
= ath6kl_sdio_irq_disable
,
1251 .scatter_req_get
= ath6kl_sdio_scatter_req_get
,
1252 .scatter_req_add
= ath6kl_sdio_scatter_req_add
,
1253 .enable_scatter
= ath6kl_sdio_enable_scatter
,
1254 .scat_req_rw
= ath6kl_sdio_async_rw_scatter
,
1255 .cleanup_scatter
= ath6kl_sdio_cleanup_scatter
,
1256 .suspend
= ath6kl_sdio_suspend
,
1257 .resume
= ath6kl_sdio_resume
,
1258 .diag_read32
= ath6kl_sdio_diag_read32
,
1259 .diag_write32
= ath6kl_sdio_diag_write32
,
1260 .bmi_read
= ath6kl_sdio_bmi_read
,
1261 .bmi_write
= ath6kl_sdio_bmi_write
,
1262 .power_on
= ath6kl_sdio_power_on
,
1263 .power_off
= ath6kl_sdio_power_off
,
1264 .stop
= ath6kl_sdio_stop
,
1267 #ifdef CONFIG_PM_SLEEP
1270 * Empty handlers so that mmc subsystem doesn't remove us entirely during
1271 * suspend. We instead follow cfg80211 suspend/resume handlers.
1273 static int ath6kl_sdio_pm_suspend(struct device
*device
)
1275 ath6kl_dbg(ATH6KL_DBG_SUSPEND
, "sdio pm suspend\n");
1280 static int ath6kl_sdio_pm_resume(struct device
*device
)
1282 ath6kl_dbg(ATH6KL_DBG_SUSPEND
, "sdio pm resume\n");
1287 static SIMPLE_DEV_PM_OPS(ath6kl_sdio_pm_ops
, ath6kl_sdio_pm_suspend
,
1288 ath6kl_sdio_pm_resume
);
1290 #define ATH6KL_SDIO_PM_OPS (&ath6kl_sdio_pm_ops)
1294 #define ATH6KL_SDIO_PM_OPS NULL
1296 #endif /* CONFIG_PM_SLEEP */
1298 static int ath6kl_sdio_probe(struct sdio_func
*func
,
1299 const struct sdio_device_id
*id
)
1302 struct ath6kl_sdio
*ar_sdio
;
1306 ath6kl_dbg(ATH6KL_DBG_BOOT
,
1307 "sdio new func %d vendor 0x%x device 0x%x block 0x%x/0x%x\n",
1308 func
->num
, func
->vendor
, func
->device
,
1309 func
->max_blksize
, func
->cur_blksize
);
1311 ar_sdio
= kzalloc(sizeof(struct ath6kl_sdio
), GFP_KERNEL
);
1315 ar_sdio
->dma_buffer
= kzalloc(HIF_DMA_BUFFER_SIZE
, GFP_KERNEL
);
1316 if (!ar_sdio
->dma_buffer
) {
1321 ar_sdio
->func
= func
;
1322 sdio_set_drvdata(func
, ar_sdio
);
1325 ar_sdio
->is_disabled
= true;
1327 spin_lock_init(&ar_sdio
->lock
);
1328 spin_lock_init(&ar_sdio
->scat_lock
);
1329 spin_lock_init(&ar_sdio
->wr_async_lock
);
1330 mutex_init(&ar_sdio
->dma_buffer_mutex
);
1332 INIT_LIST_HEAD(&ar_sdio
->scat_req
);
1333 INIT_LIST_HEAD(&ar_sdio
->bus_req_freeq
);
1334 INIT_LIST_HEAD(&ar_sdio
->wr_asyncq
);
1336 INIT_WORK(&ar_sdio
->wr_async_work
, ath6kl_sdio_write_async_work
);
1338 init_waitqueue_head(&ar_sdio
->irq_wq
);
1340 for (count
= 0; count
< BUS_REQUEST_MAX_NUM
; count
++)
1341 ath6kl_sdio_free_bus_req(ar_sdio
, &ar_sdio
->bus_req
[count
]);
1343 ar
= ath6kl_core_create(&ar_sdio
->func
->dev
);
1345 ath6kl_err("Failed to alloc ath6kl core\n");
1351 ar
->hif_type
= ATH6KL_HIF_TYPE_SDIO
;
1352 ar
->hif_priv
= ar_sdio
;
1353 ar
->hif_ops
= &ath6kl_sdio_ops
;
1354 ar
->bmi
.max_data_size
= 256;
1356 ath6kl_sdio_set_mbox_info(ar
);
1358 ret
= ath6kl_sdio_config(ar
);
1360 ath6kl_err("Failed to config sdio: %d\n", ret
);
1361 goto err_core_alloc
;
1364 ret
= ath6kl_core_init(ar
, ATH6KL_HTC_TYPE_MBOX
);
1366 ath6kl_err("Failed to init ath6kl core\n");
1367 goto err_core_alloc
;
1373 ath6kl_core_destroy(ar_sdio
->ar
);
1375 kfree(ar_sdio
->dma_buffer
);
1382 static void ath6kl_sdio_remove(struct sdio_func
*func
)
1384 struct ath6kl_sdio
*ar_sdio
;
1386 ath6kl_dbg(ATH6KL_DBG_BOOT
,
1387 "sdio removed func %d vendor 0x%x device 0x%x\n",
1388 func
->num
, func
->vendor
, func
->device
);
1390 ar_sdio
= sdio_get_drvdata(func
);
1392 ath6kl_stop_txrx(ar_sdio
->ar
);
1393 cancel_work_sync(&ar_sdio
->wr_async_work
);
1395 ath6kl_core_cleanup(ar_sdio
->ar
);
1396 ath6kl_core_destroy(ar_sdio
->ar
);
1398 kfree(ar_sdio
->dma_buffer
);
1402 static const struct sdio_device_id ath6kl_sdio_devices
[] = {
1403 {SDIO_DEVICE(MANUFACTURER_CODE
, (MANUFACTURER_ID_AR6003_BASE
| 0x0))},
1404 {SDIO_DEVICE(MANUFACTURER_CODE
, (MANUFACTURER_ID_AR6003_BASE
| 0x1))},
1405 {SDIO_DEVICE(MANUFACTURER_CODE
, (MANUFACTURER_ID_AR6004_BASE
| 0x0))},
1406 {SDIO_DEVICE(MANUFACTURER_CODE
, (MANUFACTURER_ID_AR6004_BASE
| 0x1))},
1410 MODULE_DEVICE_TABLE(sdio
, ath6kl_sdio_devices
);
1412 static struct sdio_driver ath6kl_sdio_driver
= {
1413 .name
= "ath6kl_sdio",
1414 .id_table
= ath6kl_sdio_devices
,
1415 .probe
= ath6kl_sdio_probe
,
1416 .remove
= ath6kl_sdio_remove
,
1417 .drv
.pm
= ATH6KL_SDIO_PM_OPS
,
1420 static int __init
ath6kl_sdio_init(void)
1424 ret
= sdio_register_driver(&ath6kl_sdio_driver
);
1426 ath6kl_err("sdio driver registration failed: %d\n", ret
);
1431 static void __exit
ath6kl_sdio_exit(void)
1433 sdio_unregister_driver(&ath6kl_sdio_driver
);
1436 module_init(ath6kl_sdio_init
);
1437 module_exit(ath6kl_sdio_exit
);
1439 MODULE_AUTHOR("Atheros Communications, Inc.");
1440 MODULE_DESCRIPTION("Driver support for Atheros AR600x SDIO devices");
1441 MODULE_LICENSE("Dual BSD/GPL");
1443 MODULE_FIRMWARE(AR6003_HW_2_0_FW_DIR
"/" AR6003_HW_2_0_OTP_FILE
);
1444 MODULE_FIRMWARE(AR6003_HW_2_0_FW_DIR
"/" AR6003_HW_2_0_FIRMWARE_FILE
);
1445 MODULE_FIRMWARE(AR6003_HW_2_0_FW_DIR
"/" AR6003_HW_2_0_PATCH_FILE
);
1446 MODULE_FIRMWARE(AR6003_HW_2_0_BOARD_DATA_FILE
);
1447 MODULE_FIRMWARE(AR6003_HW_2_0_DEFAULT_BOARD_DATA_FILE
);
1448 MODULE_FIRMWARE(AR6003_HW_2_1_1_FW_DIR
"/" AR6003_HW_2_1_1_OTP_FILE
);
1449 MODULE_FIRMWARE(AR6003_HW_2_1_1_FW_DIR
"/" AR6003_HW_2_1_1_FIRMWARE_FILE
);
1450 MODULE_FIRMWARE(AR6003_HW_2_1_1_FW_DIR
"/" AR6003_HW_2_1_1_PATCH_FILE
);
1451 MODULE_FIRMWARE(AR6003_HW_2_1_1_BOARD_DATA_FILE
);
1452 MODULE_FIRMWARE(AR6003_HW_2_1_1_DEFAULT_BOARD_DATA_FILE
);
1453 MODULE_FIRMWARE(AR6004_HW_1_0_FW_DIR
"/" AR6004_HW_1_0_FIRMWARE_FILE
);
1454 MODULE_FIRMWARE(AR6004_HW_1_0_BOARD_DATA_FILE
);
1455 MODULE_FIRMWARE(AR6004_HW_1_0_DEFAULT_BOARD_DATA_FILE
);
1456 MODULE_FIRMWARE(AR6004_HW_1_1_FW_DIR
"/" AR6004_HW_1_1_FIRMWARE_FILE
);
1457 MODULE_FIRMWARE(AR6004_HW_1_1_BOARD_DATA_FILE
);
1458 MODULE_FIRMWARE(AR6004_HW_1_1_DEFAULT_BOARD_DATA_FILE
);
1459 MODULE_FIRMWARE(AR6004_HW_1_2_FW_DIR
"/" AR6004_HW_1_2_FIRMWARE_FILE
);
1460 MODULE_FIRMWARE(AR6004_HW_1_2_BOARD_DATA_FILE
);
1461 MODULE_FIRMWARE(AR6004_HW_1_2_DEFAULT_BOARD_DATA_FILE
);