1 // SPDX-License-Identifier: BSD-3-Clause-Clear
3 * Copyright (c) 2019-2021 The Linux Foundation. All rights reserved.
4 * Copyright (c) 2021-2023 Qualcomm Innovation Center, Inc. All rights reserved.
11 static const char *irq_name
[ATH11K_IRQ_NUM_MAX
] = {
28 "host2reo-re-injection",
30 "host2rxdma-monitor-ring3",
31 "host2rxdma-monitor-ring2",
32 "host2rxdma-monitor-ring1",
34 "wbm2host-rx-release",
36 "reo2host-destination-ring4",
37 "reo2host-destination-ring3",
38 "reo2host-destination-ring2",
39 "reo2host-destination-ring1",
40 "rxdma2host-monitor-destination-mac3",
41 "rxdma2host-monitor-destination-mac2",
42 "rxdma2host-monitor-destination-mac1",
43 "ppdu-end-interrupts-mac3",
44 "ppdu-end-interrupts-mac2",
45 "ppdu-end-interrupts-mac1",
46 "rxdma2host-monitor-status-ring-mac3",
47 "rxdma2host-monitor-status-ring-mac2",
48 "rxdma2host-monitor-status-ring-mac1",
49 "host2rxdma-host-buf-ring-mac3",
50 "host2rxdma-host-buf-ring-mac2",
51 "host2rxdma-host-buf-ring-mac1",
52 "rxdma2host-destination-ring-mac3",
53 "rxdma2host-destination-ring-mac2",
54 "rxdma2host-destination-ring-mac1",
55 "host2tcl-input-ring4",
56 "host2tcl-input-ring3",
57 "host2tcl-input-ring2",
58 "host2tcl-input-ring1",
59 "wbm2host-tx-completions-ring3",
60 "wbm2host-tx-completions-ring2",
61 "wbm2host-tx-completions-ring1",
62 "tcl2host-status-ring",
65 static const struct ath11k_msi_config ath11k_msi_config
[] = {
69 .users
= (struct ath11k_msi_user
[]) {
70 { .name
= "MHI", .num_vectors
= 3, .base_vector
= 0 },
71 { .name
= "CE", .num_vectors
= 10, .base_vector
= 3 },
72 { .name
= "WAKE", .num_vectors
= 1, .base_vector
= 13 },
73 { .name
= "DP", .num_vectors
= 18, .base_vector
= 14 },
75 .hw_rev
= ATH11K_HW_QCA6390_HW20
,
80 .users
= (struct ath11k_msi_user
[]) {
81 { .name
= "MHI", .num_vectors
= 3, .base_vector
= 0 },
82 { .name
= "CE", .num_vectors
= 5, .base_vector
= 3 },
83 { .name
= "DP", .num_vectors
= 8, .base_vector
= 8 },
85 .hw_rev
= ATH11K_HW_QCN9074_HW10
,
90 .users
= (struct ath11k_msi_user
[]) {
91 { .name
= "MHI", .num_vectors
= 3, .base_vector
= 0 },
92 { .name
= "CE", .num_vectors
= 10, .base_vector
= 3 },
93 { .name
= "WAKE", .num_vectors
= 1, .base_vector
= 13 },
94 { .name
= "DP", .num_vectors
= 18, .base_vector
= 14 },
96 .hw_rev
= ATH11K_HW_WCN6855_HW20
,
101 .users
= (struct ath11k_msi_user
[]) {
102 { .name
= "MHI", .num_vectors
= 3, .base_vector
= 0 },
103 { .name
= "CE", .num_vectors
= 10, .base_vector
= 3 },
104 { .name
= "WAKE", .num_vectors
= 1, .base_vector
= 13 },
105 { .name
= "DP", .num_vectors
= 18, .base_vector
= 14 },
107 .hw_rev
= ATH11K_HW_WCN6855_HW21
,
112 .users
= (struct ath11k_msi_user
[]) {
113 { .name
= "CE", .num_vectors
= 10, .base_vector
= 0 },
114 { .name
= "DP", .num_vectors
= 18, .base_vector
= 10 },
116 .hw_rev
= ATH11K_HW_WCN6750_HW10
,
121 .users
= (struct ath11k_msi_user
[]) {
122 { .name
= "MHI", .num_vectors
= 3, .base_vector
= 0 },
123 { .name
= "CE", .num_vectors
= 10, .base_vector
= 3 },
124 { .name
= "WAKE", .num_vectors
= 1, .base_vector
= 13 },
125 { .name
= "DP", .num_vectors
= 18, .base_vector
= 14 },
127 .hw_rev
= ATH11K_HW_QCA2066_HW21
,
131 int ath11k_pcic_init_msi_config(struct ath11k_base
*ab
)
133 const struct ath11k_msi_config
*msi_config
;
136 for (i
= 0; i
< ARRAY_SIZE(ath11k_msi_config
); i
++) {
137 msi_config
= &ath11k_msi_config
[i
];
139 if (msi_config
->hw_rev
== ab
->hw_rev
)
143 if (i
== ARRAY_SIZE(ath11k_msi_config
)) {
144 ath11k_err(ab
, "failed to fetch msi config, unsupported hw version: 0x%x\n",
149 ab
->pci
.msi
.config
= msi_config
;
152 EXPORT_SYMBOL(ath11k_pcic_init_msi_config
);
154 static void __ath11k_pcic_write32(struct ath11k_base
*ab
, u32 offset
, u32 value
)
156 if (offset
< ATH11K_PCI_WINDOW_START
)
157 iowrite32(value
, ab
->mem
+ offset
);
159 ab
->pci
.ops
->window_write32(ab
, offset
, value
);
162 void ath11k_pcic_write32(struct ath11k_base
*ab
, u32 offset
, u32 value
)
165 bool wakeup_required
;
167 /* for offset beyond BAR + 4K - 32, may
168 * need to wakeup the device to access.
170 wakeup_required
= test_bit(ATH11K_FLAG_DEVICE_INIT_DONE
, &ab
->dev_flags
) &&
171 offset
>= ATH11K_PCI_ACCESS_ALWAYS_OFF
;
172 if (wakeup_required
&& ab
->pci
.ops
->wakeup
)
173 ret
= ab
->pci
.ops
->wakeup(ab
);
175 __ath11k_pcic_write32(ab
, offset
, value
);
177 if (wakeup_required
&& !ret
&& ab
->pci
.ops
->release
)
178 ab
->pci
.ops
->release(ab
);
180 EXPORT_SYMBOL(ath11k_pcic_write32
);
182 static u32
__ath11k_pcic_read32(struct ath11k_base
*ab
, u32 offset
)
186 if (offset
< ATH11K_PCI_WINDOW_START
)
187 val
= ioread32(ab
->mem
+ offset
);
189 val
= ab
->pci
.ops
->window_read32(ab
, offset
);
194 u32
ath11k_pcic_read32(struct ath11k_base
*ab
, u32 offset
)
198 bool wakeup_required
;
200 /* for offset beyond BAR + 4K - 32, may
201 * need to wakeup the device to access.
203 wakeup_required
= test_bit(ATH11K_FLAG_DEVICE_INIT_DONE
, &ab
->dev_flags
) &&
204 offset
>= ATH11K_PCI_ACCESS_ALWAYS_OFF
;
205 if (wakeup_required
&& ab
->pci
.ops
->wakeup
)
206 ret
= ab
->pci
.ops
->wakeup(ab
);
208 val
= __ath11k_pcic_read32(ab
, offset
);
210 if (wakeup_required
&& !ret
&& ab
->pci
.ops
->release
)
211 ab
->pci
.ops
->release(ab
);
215 EXPORT_SYMBOL(ath11k_pcic_read32
);
217 int ath11k_pcic_read(struct ath11k_base
*ab
, void *buf
, u32 start
, u32 end
)
220 bool wakeup_required
;
224 /* for offset beyond BAR + 4K - 32, may
225 * need to wakeup the device to access.
227 wakeup_required
= test_bit(ATH11K_FLAG_DEVICE_INIT_DONE
, &ab
->dev_flags
) &&
228 end
>= ATH11K_PCI_ACCESS_ALWAYS_OFF
;
229 if (wakeup_required
&& ab
->pci
.ops
->wakeup
) {
230 ret
= ab
->pci
.ops
->wakeup(ab
);
233 "wakeup failed, data may be invalid: %d",
235 /* Even though wakeup() failed, continue processing rather
236 * than returning because some parts of the data may still
237 * be valid and useful in some cases, e.g. could give us
238 * some clues on firmware crash.
239 * Mislead due to invalid data could be avoided because we
240 * are aware of the wakeup failure.
245 for (i
= start
; i
< end
+ 1; i
+= 4)
246 *data
++ = __ath11k_pcic_read32(ab
, i
);
248 if (wakeup_required
&& ab
->pci
.ops
->release
)
249 ab
->pci
.ops
->release(ab
);
253 EXPORT_SYMBOL(ath11k_pcic_read
);
255 void ath11k_pcic_get_msi_address(struct ath11k_base
*ab
, u32
*msi_addr_lo
,
258 *msi_addr_lo
= ab
->pci
.msi
.addr_lo
;
259 *msi_addr_hi
= ab
->pci
.msi
.addr_hi
;
261 EXPORT_SYMBOL(ath11k_pcic_get_msi_address
);
263 int ath11k_pcic_get_user_msi_assignment(struct ath11k_base
*ab
, char *user_name
,
264 int *num_vectors
, u32
*user_base_data
,
267 const struct ath11k_msi_config
*msi_config
= ab
->pci
.msi
.config
;
270 for (idx
= 0; idx
< msi_config
->total_users
; idx
++) {
271 if (strcmp(user_name
, msi_config
->users
[idx
].name
) == 0) {
272 *num_vectors
= msi_config
->users
[idx
].num_vectors
;
273 *base_vector
= msi_config
->users
[idx
].base_vector
;
274 *user_base_data
= *base_vector
+ ab
->pci
.msi
.ep_base_data
;
276 ath11k_dbg(ab
, ATH11K_DBG_PCI
,
277 "msi assignment %s num_vectors %d user_base_data %u base_vector %u\n",
278 user_name
, *num_vectors
, *user_base_data
,
285 ath11k_err(ab
, "Failed to find MSI assignment for %s!\n", user_name
);
289 EXPORT_SYMBOL(ath11k_pcic_get_user_msi_assignment
);
291 void ath11k_pcic_get_ce_msi_idx(struct ath11k_base
*ab
, u32 ce_id
, u32
*msi_idx
)
295 for (i
= 0, msi_data_idx
= 0; i
< ab
->hw_params
.ce_count
; i
++) {
296 if (ath11k_ce_get_attr_flags(ab
, i
) & CE_ATTR_DIS_INTR
)
304 *msi_idx
= msi_data_idx
;
306 EXPORT_SYMBOL(ath11k_pcic_get_ce_msi_idx
);
308 static void ath11k_pcic_free_ext_irq(struct ath11k_base
*ab
)
312 for (i
= 0; i
< ATH11K_EXT_IRQ_GRP_NUM_MAX
; i
++) {
313 struct ath11k_ext_irq_grp
*irq_grp
= &ab
->ext_irq_grp
[i
];
315 for (j
= 0; j
< irq_grp
->num_irq
; j
++)
316 free_irq(ab
->irq_num
[irq_grp
->irqs
[j
]], irq_grp
);
318 netif_napi_del(&irq_grp
->napi
);
319 free_netdev(irq_grp
->napi_ndev
);
323 void ath11k_pcic_free_irq(struct ath11k_base
*ab
)
327 for (i
= 0; i
< ab
->hw_params
.ce_count
; i
++) {
328 if (ath11k_ce_get_attr_flags(ab
, i
) & CE_ATTR_DIS_INTR
)
330 irq_idx
= ATH11K_PCI_IRQ_CE0_OFFSET
+ i
;
331 free_irq(ab
->irq_num
[irq_idx
], &ab
->ce
.ce_pipe
[i
]);
334 ath11k_pcic_free_ext_irq(ab
);
336 EXPORT_SYMBOL(ath11k_pcic_free_irq
);
338 static void ath11k_pcic_ce_irq_enable(struct ath11k_base
*ab
, u16 ce_id
)
342 /* In case of one MSI vector, we handle irq enable/disable in a
343 * uniform way since we only have one irq
345 if (!test_bit(ATH11K_FLAG_MULTI_MSI_VECTORS
, &ab
->dev_flags
))
348 irq_idx
= ATH11K_PCI_IRQ_CE0_OFFSET
+ ce_id
;
349 enable_irq(ab
->irq_num
[irq_idx
]);
352 static void ath11k_pcic_ce_irq_disable(struct ath11k_base
*ab
, u16 ce_id
)
356 /* In case of one MSI vector, we handle irq enable/disable in a
357 * uniform way since we only have one irq
359 if (!test_bit(ATH11K_FLAG_MULTI_MSI_VECTORS
, &ab
->dev_flags
))
362 irq_idx
= ATH11K_PCI_IRQ_CE0_OFFSET
+ ce_id
;
363 disable_irq_nosync(ab
->irq_num
[irq_idx
]);
366 static void ath11k_pcic_ce_irqs_disable(struct ath11k_base
*ab
)
370 clear_bit(ATH11K_FLAG_CE_IRQ_ENABLED
, &ab
->dev_flags
);
372 for (i
= 0; i
< ab
->hw_params
.ce_count
; i
++) {
373 if (ath11k_ce_get_attr_flags(ab
, i
) & CE_ATTR_DIS_INTR
)
375 ath11k_pcic_ce_irq_disable(ab
, i
);
379 static void ath11k_pcic_sync_ce_irqs(struct ath11k_base
*ab
)
384 for (i
= 0; i
< ab
->hw_params
.ce_count
; i
++) {
385 if (ath11k_ce_get_attr_flags(ab
, i
) & CE_ATTR_DIS_INTR
)
388 irq_idx
= ATH11K_PCI_IRQ_CE0_OFFSET
+ i
;
389 synchronize_irq(ab
->irq_num
[irq_idx
]);
393 static void ath11k_pcic_ce_tasklet(struct tasklet_struct
*t
)
395 struct ath11k_ce_pipe
*ce_pipe
= from_tasklet(ce_pipe
, t
, intr_tq
);
396 int irq_idx
= ATH11K_PCI_IRQ_CE0_OFFSET
+ ce_pipe
->pipe_num
;
398 ath11k_ce_per_engine_service(ce_pipe
->ab
, ce_pipe
->pipe_num
);
400 enable_irq(ce_pipe
->ab
->irq_num
[irq_idx
]);
403 static irqreturn_t
ath11k_pcic_ce_interrupt_handler(int irq
, void *arg
)
405 struct ath11k_ce_pipe
*ce_pipe
= arg
;
406 struct ath11k_base
*ab
= ce_pipe
->ab
;
407 int irq_idx
= ATH11K_PCI_IRQ_CE0_OFFSET
+ ce_pipe
->pipe_num
;
409 if (!test_bit(ATH11K_FLAG_CE_IRQ_ENABLED
, &ab
->dev_flags
))
412 /* last interrupt received for this CE */
413 ce_pipe
->timestamp
= jiffies
;
415 disable_irq_nosync(ab
->irq_num
[irq_idx
]);
417 tasklet_schedule(&ce_pipe
->intr_tq
);
422 static void ath11k_pcic_ext_grp_disable(struct ath11k_ext_irq_grp
*irq_grp
)
424 struct ath11k_base
*ab
= irq_grp
->ab
;
427 /* In case of one MSI vector, we handle irq enable/disable
428 * in a uniform way since we only have one irq
430 if (!test_bit(ATH11K_FLAG_MULTI_MSI_VECTORS
, &ab
->dev_flags
))
433 for (i
= 0; i
< irq_grp
->num_irq
; i
++)
434 disable_irq_nosync(irq_grp
->ab
->irq_num
[irq_grp
->irqs
[i
]]);
437 static void __ath11k_pcic_ext_irq_disable(struct ath11k_base
*ab
)
441 clear_bit(ATH11K_FLAG_EXT_IRQ_ENABLED
, &ab
->dev_flags
);
443 for (i
= 0; i
< ATH11K_EXT_IRQ_GRP_NUM_MAX
; i
++) {
444 struct ath11k_ext_irq_grp
*irq_grp
= &ab
->ext_irq_grp
[i
];
446 ath11k_pcic_ext_grp_disable(irq_grp
);
448 if (irq_grp
->napi_enabled
) {
449 napi_synchronize(&irq_grp
->napi
);
450 napi_disable(&irq_grp
->napi
);
451 irq_grp
->napi_enabled
= false;
456 static void ath11k_pcic_ext_grp_enable(struct ath11k_ext_irq_grp
*irq_grp
)
458 struct ath11k_base
*ab
= irq_grp
->ab
;
461 /* In case of one MSI vector, we handle irq enable/disable in a
462 * uniform way since we only have one irq
464 if (!test_bit(ATH11K_FLAG_MULTI_MSI_VECTORS
, &ab
->dev_flags
))
467 for (i
= 0; i
< irq_grp
->num_irq
; i
++)
468 enable_irq(irq_grp
->ab
->irq_num
[irq_grp
->irqs
[i
]]);
471 void ath11k_pcic_ext_irq_enable(struct ath11k_base
*ab
)
475 for (i
= 0; i
< ATH11K_EXT_IRQ_GRP_NUM_MAX
; i
++) {
476 struct ath11k_ext_irq_grp
*irq_grp
= &ab
->ext_irq_grp
[i
];
478 if (!irq_grp
->napi_enabled
) {
479 napi_enable(&irq_grp
->napi
);
480 irq_grp
->napi_enabled
= true;
482 ath11k_pcic_ext_grp_enable(irq_grp
);
485 set_bit(ATH11K_FLAG_EXT_IRQ_ENABLED
, &ab
->dev_flags
);
487 EXPORT_SYMBOL(ath11k_pcic_ext_irq_enable
);
489 static void ath11k_pcic_sync_ext_irqs(struct ath11k_base
*ab
)
493 for (i
= 0; i
< ATH11K_EXT_IRQ_GRP_NUM_MAX
; i
++) {
494 struct ath11k_ext_irq_grp
*irq_grp
= &ab
->ext_irq_grp
[i
];
496 for (j
= 0; j
< irq_grp
->num_irq
; j
++) {
497 irq_idx
= irq_grp
->irqs
[j
];
498 synchronize_irq(ab
->irq_num
[irq_idx
]);
503 void ath11k_pcic_ext_irq_disable(struct ath11k_base
*ab
)
505 __ath11k_pcic_ext_irq_disable(ab
);
506 ath11k_pcic_sync_ext_irqs(ab
);
508 EXPORT_SYMBOL(ath11k_pcic_ext_irq_disable
);
510 static int ath11k_pcic_ext_grp_napi_poll(struct napi_struct
*napi
, int budget
)
512 struct ath11k_ext_irq_grp
*irq_grp
= container_of(napi
,
513 struct ath11k_ext_irq_grp
,
515 struct ath11k_base
*ab
= irq_grp
->ab
;
519 work_done
= ath11k_dp_service_srng(ab
, irq_grp
, budget
);
520 if (work_done
< budget
) {
521 napi_complete_done(napi
, work_done
);
522 for (i
= 0; i
< irq_grp
->num_irq
; i
++)
523 enable_irq(irq_grp
->ab
->irq_num
[irq_grp
->irqs
[i
]]);
526 if (work_done
> budget
)
532 static irqreturn_t
ath11k_pcic_ext_interrupt_handler(int irq
, void *arg
)
534 struct ath11k_ext_irq_grp
*irq_grp
= arg
;
535 struct ath11k_base
*ab
= irq_grp
->ab
;
538 if (!test_bit(ATH11K_FLAG_EXT_IRQ_ENABLED
, &ab
->dev_flags
))
541 ath11k_dbg(irq_grp
->ab
, ATH11K_DBG_PCI
, "ext irq %d\n", irq
);
543 /* last interrupt received for this group */
544 irq_grp
->timestamp
= jiffies
;
546 for (i
= 0; i
< irq_grp
->num_irq
; i
++)
547 disable_irq_nosync(irq_grp
->ab
->irq_num
[irq_grp
->irqs
[i
]]);
549 napi_schedule(&irq_grp
->napi
);
555 ath11k_pcic_get_msi_irq(struct ath11k_base
*ab
, unsigned int vector
)
557 return ab
->pci
.ops
->get_msi_irq(ab
, vector
);
560 static int ath11k_pcic_ext_irq_config(struct ath11k_base
*ab
)
562 int i
, j
, n
, ret
, num_vectors
= 0;
563 u32 user_base_data
= 0, base_vector
= 0;
564 struct ath11k_ext_irq_grp
*irq_grp
;
565 unsigned long irq_flags
;
567 ret
= ath11k_pcic_get_user_msi_assignment(ab
, "DP", &num_vectors
,
573 irq_flags
= IRQF_SHARED
;
574 if (!test_bit(ATH11K_FLAG_MULTI_MSI_VECTORS
, &ab
->dev_flags
))
575 irq_flags
|= IRQF_NOBALANCING
;
577 for (i
= 0; i
< ATH11K_EXT_IRQ_GRP_NUM_MAX
; i
++) {
578 irq_grp
= &ab
->ext_irq_grp
[i
];
583 irq_grp
->napi_ndev
= alloc_netdev_dummy(0);
584 if (!irq_grp
->napi_ndev
) {
589 netif_napi_add(irq_grp
->napi_ndev
, &irq_grp
->napi
,
590 ath11k_pcic_ext_grp_napi_poll
);
592 if (ab
->hw_params
.ring_mask
->tx
[i
] ||
593 ab
->hw_params
.ring_mask
->rx
[i
] ||
594 ab
->hw_params
.ring_mask
->rx_err
[i
] ||
595 ab
->hw_params
.ring_mask
->rx_wbm_rel
[i
] ||
596 ab
->hw_params
.ring_mask
->reo_status
[i
] ||
597 ab
->hw_params
.ring_mask
->rxdma2host
[i
] ||
598 ab
->hw_params
.ring_mask
->host2rxdma
[i
] ||
599 ab
->hw_params
.ring_mask
->rx_mon_status
[i
]) {
603 irq_grp
->num_irq
= num_irq
;
604 irq_grp
->irqs
[0] = ATH11K_PCI_IRQ_DP_OFFSET
+ i
;
606 for (j
= 0; j
< irq_grp
->num_irq
; j
++) {
607 int irq_idx
= irq_grp
->irqs
[j
];
608 int vector
= (i
% num_vectors
) + base_vector
;
609 int irq
= ath11k_pcic_get_msi_irq(ab
, vector
);
616 ab
->irq_num
[irq_idx
] = irq
;
618 ath11k_dbg(ab
, ATH11K_DBG_PCI
,
619 "irq %d group %d\n", irq
, i
);
621 irq_set_status_flags(irq
, IRQ_DISABLE_UNLAZY
);
622 ret
= request_irq(irq
, ath11k_pcic_ext_interrupt_handler
,
623 irq_flags
, "DP_EXT_IRQ", irq_grp
);
625 ath11k_err(ab
, "failed request irq %d: %d\n",
627 for (n
= 0; n
<= i
; n
++) {
628 irq_grp
= &ab
->ext_irq_grp
[n
];
629 free_netdev(irq_grp
->napi_ndev
);
634 ath11k_pcic_ext_grp_disable(irq_grp
);
639 /* i ->napi_ndev was properly allocated. Free it also */
642 for (n
= 0; n
< i
; n
++) {
643 irq_grp
= &ab
->ext_irq_grp
[n
];
644 free_netdev(irq_grp
->napi_ndev
);
649 int ath11k_pcic_config_irq(struct ath11k_base
*ab
)
651 struct ath11k_ce_pipe
*ce_pipe
;
653 u32 msi_data_count
, msi_data_idx
;
655 unsigned int msi_data
;
656 int irq
, i
, ret
, irq_idx
;
657 unsigned long irq_flags
;
659 ret
= ath11k_pcic_get_user_msi_assignment(ab
, "CE", &msi_data_count
,
660 &msi_data_start
, &msi_irq_start
);
664 irq_flags
= IRQF_SHARED
;
665 if (!test_bit(ATH11K_FLAG_MULTI_MSI_VECTORS
, &ab
->dev_flags
))
666 irq_flags
|= IRQF_NOBALANCING
;
668 /* Configure CE irqs */
669 for (i
= 0, msi_data_idx
= 0; i
< ab
->hw_params
.ce_count
; i
++) {
670 if (ath11k_ce_get_attr_flags(ab
, i
) & CE_ATTR_DIS_INTR
)
673 msi_data
= (msi_data_idx
% msi_data_count
) + msi_irq_start
;
674 irq
= ath11k_pcic_get_msi_irq(ab
, msi_data
);
678 ce_pipe
= &ab
->ce
.ce_pipe
[i
];
680 irq_idx
= ATH11K_PCI_IRQ_CE0_OFFSET
+ i
;
682 tasklet_setup(&ce_pipe
->intr_tq
, ath11k_pcic_ce_tasklet
);
684 ret
= request_irq(irq
, ath11k_pcic_ce_interrupt_handler
,
685 irq_flags
, irq_name
[irq_idx
], ce_pipe
);
687 ath11k_err(ab
, "failed to request irq %d: %d\n",
692 ab
->irq_num
[irq_idx
] = irq
;
695 ath11k_pcic_ce_irq_disable(ab
, i
);
698 ret
= ath11k_pcic_ext_irq_config(ab
);
704 EXPORT_SYMBOL(ath11k_pcic_config_irq
);
706 void ath11k_pcic_ce_irqs_enable(struct ath11k_base
*ab
)
710 set_bit(ATH11K_FLAG_CE_IRQ_ENABLED
, &ab
->dev_flags
);
712 for (i
= 0; i
< ab
->hw_params
.ce_count
; i
++) {
713 if (ath11k_ce_get_attr_flags(ab
, i
) & CE_ATTR_DIS_INTR
)
715 ath11k_pcic_ce_irq_enable(ab
, i
);
718 EXPORT_SYMBOL(ath11k_pcic_ce_irqs_enable
);
720 static void ath11k_pcic_kill_tasklets(struct ath11k_base
*ab
)
724 for (i
= 0; i
< ab
->hw_params
.ce_count
; i
++) {
725 struct ath11k_ce_pipe
*ce_pipe
= &ab
->ce
.ce_pipe
[i
];
727 if (ath11k_ce_get_attr_flags(ab
, i
) & CE_ATTR_DIS_INTR
)
730 tasklet_kill(&ce_pipe
->intr_tq
);
734 void ath11k_pcic_ce_irq_disable_sync(struct ath11k_base
*ab
)
736 ath11k_pcic_ce_irqs_disable(ab
);
737 ath11k_pcic_sync_ce_irqs(ab
);
738 ath11k_pcic_kill_tasklets(ab
);
740 EXPORT_SYMBOL(ath11k_pcic_ce_irq_disable_sync
);
742 void ath11k_pcic_stop(struct ath11k_base
*ab
)
744 ath11k_pcic_ce_irq_disable_sync(ab
);
745 ath11k_ce_cleanup_pipes(ab
);
747 EXPORT_SYMBOL(ath11k_pcic_stop
);
749 int ath11k_pcic_start(struct ath11k_base
*ab
)
751 set_bit(ATH11K_FLAG_DEVICE_INIT_DONE
, &ab
->dev_flags
);
753 ath11k_pcic_ce_irqs_enable(ab
);
754 ath11k_ce_rx_post_buf(ab
);
758 EXPORT_SYMBOL(ath11k_pcic_start
);
760 int ath11k_pcic_map_service_to_pipe(struct ath11k_base
*ab
, u16 service_id
,
761 u8
*ul_pipe
, u8
*dl_pipe
)
763 const struct service_to_pipe
*entry
;
764 bool ul_set
= false, dl_set
= false;
767 for (i
= 0; i
< ab
->hw_params
.svc_to_ce_map_len
; i
++) {
768 entry
= &ab
->hw_params
.svc_to_ce_map
[i
];
770 if (__le32_to_cpu(entry
->service_id
) != service_id
)
773 switch (__le32_to_cpu(entry
->pipedir
)) {
778 *dl_pipe
= __le32_to_cpu(entry
->pipenum
);
783 *ul_pipe
= __le32_to_cpu(entry
->pipenum
);
789 *dl_pipe
= __le32_to_cpu(entry
->pipenum
);
790 *ul_pipe
= __le32_to_cpu(entry
->pipenum
);
797 if (WARN_ON(!ul_set
|| !dl_set
))
802 EXPORT_SYMBOL(ath11k_pcic_map_service_to_pipe
);
804 int ath11k_pcic_register_pci_ops(struct ath11k_base
*ab
,
805 const struct ath11k_pci_ops
*pci_ops
)
810 /* Return error if mandatory pci_ops callbacks are missing */
811 if (!pci_ops
->get_msi_irq
|| !pci_ops
->window_write32
||
812 !pci_ops
->window_read32
)
815 ab
->pci
.ops
= pci_ops
;
818 EXPORT_SYMBOL(ath11k_pcic_register_pci_ops
);
820 void ath11k_pci_enable_ce_irqs_except_wake_irq(struct ath11k_base
*ab
)
824 for (i
= 0; i
< ab
->hw_params
.ce_count
; i
++) {
825 if (ath11k_ce_get_attr_flags(ab
, i
) & CE_ATTR_DIS_INTR
||
826 i
== ATH11K_PCI_CE_WAKE_IRQ
)
828 ath11k_pcic_ce_irq_enable(ab
, i
);
831 EXPORT_SYMBOL(ath11k_pci_enable_ce_irqs_except_wake_irq
);
833 void ath11k_pci_disable_ce_irqs_except_wake_irq(struct ath11k_base
*ab
)
837 struct ath11k_ce_pipe
*ce_pipe
;
839 for (i
= 0; i
< ab
->hw_params
.ce_count
; i
++) {
840 ce_pipe
= &ab
->ce
.ce_pipe
[i
];
841 irq_idx
= ATH11K_PCI_IRQ_CE0_OFFSET
+ i
;
843 if (ath11k_ce_get_attr_flags(ab
, i
) & CE_ATTR_DIS_INTR
||
844 i
== ATH11K_PCI_CE_WAKE_IRQ
)
847 disable_irq_nosync(ab
->irq_num
[irq_idx
]);
848 synchronize_irq(ab
->irq_num
[irq_idx
]);
849 tasklet_kill(&ce_pipe
->intr_tq
);
852 EXPORT_SYMBOL(ath11k_pci_disable_ce_irqs_except_wake_irq
);