1 // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
3 * Copyright (C) 2015 Intel Mobile Communications GmbH
4 * Copyright (C) 2016-2017 Intel Deutschland GmbH
5 * Copyright (C) 2019-2021, 2023-2024 Intel Corporation
7 #include <linux/kernel.h>
8 #include <linux/bsearch.h>
9 #include <linux/list.h>
11 #include "fw/api/tx.h"
12 #include "iwl-trans.h"
15 #include <linux/dmapool.h>
16 #include "fw/api/commands.h"
17 #include "pcie/internal.h"
18 #include "iwl-context-info-gen3.h"
20 struct iwl_trans_dev_restart_data
{
21 struct list_head list
;
22 unsigned int restart_count
;
27 static LIST_HEAD(restart_data_list
);
28 static DEFINE_SPINLOCK(restart_data_lock
);
30 static struct iwl_trans_dev_restart_data
*
31 iwl_trans_get_restart_data(struct device
*dev
)
33 struct iwl_trans_dev_restart_data
*tmp
, *data
= NULL
;
34 const char *name
= dev_name(dev
);
36 spin_lock(&restart_data_lock
);
37 list_for_each_entry(tmp
, &restart_data_list
, list
) {
38 if (strcmp(tmp
->name
, name
))
43 spin_unlock(&restart_data_lock
);
48 data
= kzalloc(struct_size(data
, name
, strlen(name
) + 1), GFP_ATOMIC
);
52 strcpy(data
->name
, name
);
53 spin_lock(&restart_data_lock
);
54 list_add_tail(&data
->list
, &restart_data_list
);
55 spin_unlock(&restart_data_lock
);
60 static void iwl_trans_inc_restart_count(struct device
*dev
)
62 struct iwl_trans_dev_restart_data
*data
;
64 data
= iwl_trans_get_restart_data(dev
);
66 data
->last_error
= ktime_get_boottime_seconds();
67 data
->restart_count
++;
71 void iwl_trans_free_restart_list(void)
73 struct iwl_trans_dev_restart_data
*tmp
;
75 while ((tmp
= list_first_entry_or_null(&restart_data_list
,
76 typeof(*tmp
), list
))) {
82 struct iwl_trans_reprobe
{
84 struct work_struct work
;
87 static void iwl_trans_reprobe_wk(struct work_struct
*wk
)
89 struct iwl_trans_reprobe
*reprobe
;
91 reprobe
= container_of(wk
, typeof(*reprobe
), work
);
93 if (device_reprobe(reprobe
->dev
))
94 dev_err(reprobe
->dev
, "reprobe failed!\n");
95 put_device(reprobe
->dev
);
97 module_put(THIS_MODULE
);
100 #define IWL_TRANS_RESET_OK_TIME 180 /* seconds */
102 static enum iwl_reset_mode
103 iwl_trans_determine_restart_mode(struct iwl_trans
*trans
)
105 struct iwl_trans_dev_restart_data
*data
;
106 enum iwl_reset_mode at_least
= 0;
108 static const enum iwl_reset_mode escalation_list
[] = {
109 IWL_RESET_MODE_SW_RESET
,
110 IWL_RESET_MODE_REPROBE
,
111 IWL_RESET_MODE_REPROBE
,
112 IWL_RESET_MODE_FUNC_RESET
,
113 /* FIXME: add TOP reset */
114 IWL_RESET_MODE_PROD_RESET
,
115 /* FIXME: add TOP reset */
116 IWL_RESET_MODE_PROD_RESET
,
117 /* FIXME: add TOP reset */
118 IWL_RESET_MODE_PROD_RESET
,
121 if (trans
->restart
.during_reset
)
122 at_least
= IWL_RESET_MODE_REPROBE
;
124 data
= iwl_trans_get_restart_data(trans
->dev
);
128 if (ktime_get_boottime_seconds() - data
->last_error
>=
129 IWL_TRANS_RESET_OK_TIME
)
130 data
->restart_count
= 0;
132 index
= data
->restart_count
;
133 if (index
>= ARRAY_SIZE(escalation_list
))
134 index
= ARRAY_SIZE(escalation_list
) - 1;
136 return max(at_least
, escalation_list
[index
]);
139 #define IWL_TRANS_RESET_DELAY (HZ * 60)
141 static void iwl_trans_restart_wk(struct work_struct
*wk
)
143 struct iwl_trans
*trans
= container_of(wk
, typeof(*trans
), restart
.wk
);
144 struct iwl_trans_reprobe
*reprobe
;
145 enum iwl_reset_mode mode
;
150 /* might have been scheduled before marked as dead, re-check */
151 if (test_bit(STATUS_TRANS_DEAD
, &trans
->status
))
154 iwl_op_mode_dump_error(trans
->op_mode
, &trans
->restart
.mode
);
157 * If the opmode stopped the device while we were trying to dump and
158 * reset, then we'll have done the dump already (synchronized by the
159 * opmode lock that it will acquire in iwl_op_mode_dump_error()) and
160 * managed that via trans->restart.mode.
161 * Additionally, make sure that in such a case we won't attempt to do
162 * any resets now, since it's no longer requested.
164 if (!test_and_clear_bit(STATUS_RESET_PENDING
, &trans
->status
))
167 if (!iwlwifi_mod_params
.fw_restart
)
170 mode
= iwl_trans_determine_restart_mode(trans
);
172 iwl_trans_inc_restart_count(trans
->dev
);
175 case IWL_RESET_MODE_SW_RESET
:
176 IWL_ERR(trans
, "Device error - SW reset\n");
177 iwl_trans_opmode_sw_reset(trans
, trans
->restart
.mode
.type
);
179 case IWL_RESET_MODE_REPROBE
:
180 IWL_ERR(trans
, "Device error - reprobe!\n");
183 * get a module reference to avoid doing this while unloading
184 * anyway and to avoid scheduling a work with code that's
187 if (!try_module_get(THIS_MODULE
)) {
188 IWL_ERR(trans
, "Module is being unloaded - abort\n");
192 reprobe
= kzalloc(sizeof(*reprobe
), GFP_KERNEL
);
194 module_put(THIS_MODULE
);
197 reprobe
->dev
= get_device(trans
->dev
);
198 INIT_WORK(&reprobe
->work
, iwl_trans_reprobe_wk
);
199 schedule_work(&reprobe
->work
);
202 iwl_trans_pcie_reset(trans
, mode
);
207 struct iwl_trans
*iwl_trans_alloc(unsigned int priv_size
,
209 const struct iwl_cfg_trans_params
*cfg_trans
)
211 struct iwl_trans
*trans
;
212 #ifdef CONFIG_LOCKDEP
213 static struct lock_class_key __sync_cmd_key
;
216 trans
= devm_kzalloc(dev
, sizeof(*trans
) + priv_size
, GFP_KERNEL
);
220 trans
->trans_cfg
= cfg_trans
;
222 #ifdef CONFIG_LOCKDEP
223 lockdep_init_map(&trans
->sync_cmd_lockdep_map
, "sync_cmd_lockdep_map",
228 trans
->num_rx_queues
= 1;
230 INIT_WORK(&trans
->restart
.wk
, iwl_trans_restart_wk
);
235 int iwl_trans_init(struct iwl_trans
*trans
)
237 int txcmd_size
, txcmd_align
;
239 if (!trans
->trans_cfg
->gen2
) {
240 txcmd_size
= sizeof(struct iwl_tx_cmd
);
241 txcmd_align
= sizeof(void *);
242 } else if (trans
->trans_cfg
->device_family
< IWL_DEVICE_FAMILY_AX210
) {
243 txcmd_size
= sizeof(struct iwl_tx_cmd_gen2
);
246 txcmd_size
= sizeof(struct iwl_tx_cmd_gen3
);
250 txcmd_size
+= sizeof(struct iwl_cmd_header
);
251 txcmd_size
+= 36; /* biggest possible 802.11 header */
253 /* Ensure device TX cmd cannot reach/cross a page boundary in gen2 */
254 if (WARN_ON(trans
->trans_cfg
->gen2
&& txcmd_size
>= txcmd_align
))
257 snprintf(trans
->dev_cmd_pool_name
, sizeof(trans
->dev_cmd_pool_name
),
258 "iwl_cmd_pool:%s", dev_name(trans
->dev
));
259 trans
->dev_cmd_pool
=
260 kmem_cache_create(trans
->dev_cmd_pool_name
,
261 txcmd_size
, txcmd_align
,
262 SLAB_HWCACHE_ALIGN
, NULL
);
263 if (!trans
->dev_cmd_pool
)
266 /* Initialize the wait queue for commands */
267 init_waitqueue_head(&trans
->wait_command_queue
);
272 void iwl_trans_free(struct iwl_trans
*trans
)
274 cancel_work_sync(&trans
->restart
.wk
);
275 kmem_cache_destroy(trans
->dev_cmd_pool
);
278 int iwl_trans_send_cmd(struct iwl_trans
*trans
, struct iwl_host_cmd
*cmd
)
282 if (unlikely(!(cmd
->flags
& CMD_SEND_IN_RFKILL
) &&
283 test_bit(STATUS_RFKILL_OPMODE
, &trans
->status
)))
287 * We can't test IWL_MVM_STATUS_IN_D3 in mvm->status because this
288 * bit is set early in the D3 flow, before we send all the commands
289 * that configure the firmware for D3 operation (power, patterns, ...)
290 * and we don't want to flag all those with CMD_SEND_IN_D3.
291 * So use the system_pm_mode instead. The only command sent after
292 * we set system_pm_mode is D3_CONFIG_CMD, which we now flag with
295 if (unlikely(trans
->system_pm_mode
== IWL_PLAT_PM_MODE_D3
&&
296 !(cmd
->flags
& CMD_SEND_IN_D3
)))
299 if (unlikely(test_bit(STATUS_FW_ERROR
, &trans
->status
)))
302 if (WARN_ONCE(trans
->state
!= IWL_TRANS_FW_ALIVE
,
303 "bad state = %d\n", trans
->state
))
306 if (!(cmd
->flags
& CMD_ASYNC
))
307 lock_map_acquire_read(&trans
->sync_cmd_lockdep_map
);
309 if (trans
->wide_cmd_header
&& !iwl_cmd_groupid(cmd
->id
)) {
310 if (cmd
->id
!= REPLY_ERROR
)
311 cmd
->id
= DEF_ID(cmd
->id
);
314 ret
= iwl_trans_pcie_send_hcmd(trans
, cmd
);
316 if (!(cmd
->flags
& CMD_ASYNC
))
317 lock_map_release(&trans
->sync_cmd_lockdep_map
);
319 if (WARN_ON((cmd
->flags
& CMD_WANT_SKB
) && !ret
&& !cmd
->resp_pkt
))
324 IWL_EXPORT_SYMBOL(iwl_trans_send_cmd
);
326 /* Comparator for struct iwl_hcmd_names.
327 * Used in the binary search over a list of host commands.
329 * @key: command_id that we're looking for.
330 * @elt: struct iwl_hcmd_names candidate for match.
332 * @return 0 iff equal.
334 static int iwl_hcmd_names_cmp(const void *key
, const void *elt
)
336 const struct iwl_hcmd_names
*name
= elt
;
337 const u8
*cmd1
= key
;
338 u8 cmd2
= name
->cmd_id
;
340 return (*cmd1
- cmd2
);
343 const char *iwl_get_cmd_string(struct iwl_trans
*trans
, u32 id
)
346 struct iwl_hcmd_names
*ret
;
347 const struct iwl_hcmd_arr
*arr
;
348 size_t size
= sizeof(struct iwl_hcmd_names
);
350 grp
= iwl_cmd_groupid(id
);
351 cmd
= iwl_cmd_opcode(id
);
353 if (!trans
->command_groups
|| grp
>= trans
->command_groups_size
||
354 !trans
->command_groups
[grp
].arr
)
357 arr
= &trans
->command_groups
[grp
];
358 ret
= bsearch(&cmd
, arr
->arr
, arr
->size
, size
, iwl_hcmd_names_cmp
);
361 return ret
->cmd_name
;
363 IWL_EXPORT_SYMBOL(iwl_get_cmd_string
);
365 int iwl_cmd_groups_verify_sorted(const struct iwl_trans_config
*trans
)
368 const struct iwl_hcmd_arr
*arr
;
370 for (i
= 0; i
< trans
->command_groups_size
; i
++) {
371 arr
= &trans
->command_groups
[i
];
374 for (j
= 0; j
< arr
->size
- 1; j
++)
375 if (arr
->arr
[j
].cmd_id
> arr
->arr
[j
+ 1].cmd_id
)
380 IWL_EXPORT_SYMBOL(iwl_cmd_groups_verify_sorted
);
382 void iwl_trans_configure(struct iwl_trans
*trans
,
383 const struct iwl_trans_config
*trans_cfg
)
385 trans
->op_mode
= trans_cfg
->op_mode
;
387 iwl_trans_pcie_configure(trans
, trans_cfg
);
388 WARN_ON(iwl_cmd_groups_verify_sorted(trans_cfg
));
390 IWL_EXPORT_SYMBOL(iwl_trans_configure
);
392 int iwl_trans_start_hw(struct iwl_trans
*trans
)
396 return iwl_trans_pcie_start_hw(trans
);
398 IWL_EXPORT_SYMBOL(iwl_trans_start_hw
);
400 void iwl_trans_op_mode_leave(struct iwl_trans
*trans
)
404 iwl_trans_pcie_op_mode_leave(trans
);
406 trans
->op_mode
= NULL
;
408 trans
->state
= IWL_TRANS_NO_FW
;
410 IWL_EXPORT_SYMBOL(iwl_trans_op_mode_leave
);
412 void iwl_trans_write8(struct iwl_trans
*trans
, u32 ofs
, u8 val
)
414 iwl_trans_pcie_write8(trans
, ofs
, val
);
416 IWL_EXPORT_SYMBOL(iwl_trans_write8
);
418 void iwl_trans_write32(struct iwl_trans
*trans
, u32 ofs
, u32 val
)
420 iwl_trans_pcie_write32(trans
, ofs
, val
);
422 IWL_EXPORT_SYMBOL(iwl_trans_write32
);
424 u32
iwl_trans_read32(struct iwl_trans
*trans
, u32 ofs
)
426 return iwl_trans_pcie_read32(trans
, ofs
);
428 IWL_EXPORT_SYMBOL(iwl_trans_read32
);
430 u32
iwl_trans_read_prph(struct iwl_trans
*trans
, u32 ofs
)
432 return iwl_trans_pcie_read_prph(trans
, ofs
);
434 IWL_EXPORT_SYMBOL(iwl_trans_read_prph
);
436 void iwl_trans_write_prph(struct iwl_trans
*trans
, u32 ofs
, u32 val
)
438 return iwl_trans_pcie_write_prph(trans
, ofs
, val
);
440 IWL_EXPORT_SYMBOL(iwl_trans_write_prph
);
442 int iwl_trans_read_mem(struct iwl_trans
*trans
, u32 addr
,
443 void *buf
, int dwords
)
445 return iwl_trans_pcie_read_mem(trans
, addr
, buf
, dwords
);
447 IWL_EXPORT_SYMBOL(iwl_trans_read_mem
);
449 int iwl_trans_write_mem(struct iwl_trans
*trans
, u32 addr
,
450 const void *buf
, int dwords
)
452 return iwl_trans_pcie_write_mem(trans
, addr
, buf
, dwords
);
454 IWL_EXPORT_SYMBOL(iwl_trans_write_mem
);
456 void iwl_trans_set_pmi(struct iwl_trans
*trans
, bool state
)
459 set_bit(STATUS_TPOWER_PMI
, &trans
->status
);
461 clear_bit(STATUS_TPOWER_PMI
, &trans
->status
);
463 IWL_EXPORT_SYMBOL(iwl_trans_set_pmi
);
465 int iwl_trans_sw_reset(struct iwl_trans
*trans
, bool retake_ownership
)
467 return iwl_trans_pcie_sw_reset(trans
, retake_ownership
);
469 IWL_EXPORT_SYMBOL(iwl_trans_sw_reset
);
471 struct iwl_trans_dump_data
*
472 iwl_trans_dump_data(struct iwl_trans
*trans
, u32 dump_mask
,
473 const struct iwl_dump_sanitize_ops
*sanitize_ops
,
476 return iwl_trans_pcie_dump_data(trans
, dump_mask
,
477 sanitize_ops
, sanitize_ctx
);
479 IWL_EXPORT_SYMBOL(iwl_trans_dump_data
);
481 int iwl_trans_d3_suspend(struct iwl_trans
*trans
, bool test
, bool reset
)
485 return iwl_trans_pcie_d3_suspend(trans
, test
, reset
);
487 IWL_EXPORT_SYMBOL(iwl_trans_d3_suspend
);
489 int iwl_trans_d3_resume(struct iwl_trans
*trans
, enum iwl_d3_status
*status
,
490 bool test
, bool reset
)
494 return iwl_trans_pcie_d3_resume(trans
, status
, test
, reset
);
496 IWL_EXPORT_SYMBOL(iwl_trans_d3_resume
);
498 void iwl_trans_interrupts(struct iwl_trans
*trans
, bool enable
)
500 iwl_trans_pci_interrupts(trans
, enable
);
502 IWL_EXPORT_SYMBOL(iwl_trans_interrupts
);
504 void iwl_trans_sync_nmi(struct iwl_trans
*trans
)
506 iwl_trans_pcie_sync_nmi(trans
);
508 IWL_EXPORT_SYMBOL(iwl_trans_sync_nmi
);
510 int iwl_trans_write_imr_mem(struct iwl_trans
*trans
, u32 dst_addr
,
511 u64 src_addr
, u32 byte_cnt
)
513 return iwl_trans_pcie_copy_imr(trans
, dst_addr
, src_addr
, byte_cnt
);
515 IWL_EXPORT_SYMBOL(iwl_trans_write_imr_mem
);
517 void iwl_trans_set_bits_mask(struct iwl_trans
*trans
, u32 reg
,
520 iwl_trans_pcie_set_bits_mask(trans
, reg
, mask
, value
);
522 IWL_EXPORT_SYMBOL(iwl_trans_set_bits_mask
);
524 int iwl_trans_read_config32(struct iwl_trans
*trans
, u32 ofs
,
527 return iwl_trans_pcie_read_config32(trans
, ofs
, val
);
529 IWL_EXPORT_SYMBOL(iwl_trans_read_config32
);
531 bool _iwl_trans_grab_nic_access(struct iwl_trans
*trans
)
533 return iwl_trans_pcie_grab_nic_access(trans
);
535 IWL_EXPORT_SYMBOL(_iwl_trans_grab_nic_access
);
537 void __releases(nic_access
)
538 iwl_trans_release_nic_access(struct iwl_trans
*trans
)
540 iwl_trans_pcie_release_nic_access(trans
);
541 __release(nic_access
);
543 IWL_EXPORT_SYMBOL(iwl_trans_release_nic_access
);
545 void iwl_trans_fw_alive(struct iwl_trans
*trans
, u32 scd_addr
)
549 trans
->state
= IWL_TRANS_FW_ALIVE
;
551 if (trans
->trans_cfg
->gen2
)
552 iwl_trans_pcie_gen2_fw_alive(trans
);
554 iwl_trans_pcie_fw_alive(trans
, scd_addr
);
556 IWL_EXPORT_SYMBOL(iwl_trans_fw_alive
);
558 int iwl_trans_start_fw(struct iwl_trans
*trans
, const struct fw_img
*fw
,
565 WARN_ON_ONCE(!trans
->rx_mpdu_cmd
);
567 clear_bit(STATUS_FW_ERROR
, &trans
->status
);
569 if (trans
->trans_cfg
->gen2
)
570 ret
= iwl_trans_pcie_gen2_start_fw(trans
, fw
, run_in_rfkill
);
572 ret
= iwl_trans_pcie_start_fw(trans
, fw
, run_in_rfkill
);
575 trans
->state
= IWL_TRANS_FW_STARTED
;
579 IWL_EXPORT_SYMBOL(iwl_trans_start_fw
);
581 void iwl_trans_stop_device(struct iwl_trans
*trans
)
586 * See also the comment in iwl_trans_restart_wk().
588 * When the opmode stops the device while a reset is pending, the
589 * worker (iwl_trans_restart_wk) might not have run yet or, more
590 * likely, will be blocked on the opmode lock. Due to the locking,
591 * we can't just flush the worker.
593 * If this is the case, then the test_and_clear_bit() ensures that
594 * the worker won't attempt to do anything after the stop.
596 * The trans->restart.mode is a handshake with the opmode, we set
597 * the context there to ABORT so that when the worker can finally
598 * acquire the lock in the opmode, the code there won't attempt to
599 * do any dumps. Since we'd really like to have the dump though,
600 * also do it inline here (with the opmode locks already held),
601 * but use a separate mode struct to avoid races.
603 if (test_and_clear_bit(STATUS_RESET_PENDING
, &trans
->status
)) {
604 struct iwl_fw_error_dump_mode mode
;
606 mode
= trans
->restart
.mode
;
607 mode
.context
= IWL_ERR_CONTEXT_FROM_OPMODE
;
608 trans
->restart
.mode
.context
= IWL_ERR_CONTEXT_ABORT
;
610 iwl_op_mode_dump_error(trans
->op_mode
, &mode
);
613 if (trans
->trans_cfg
->gen2
)
614 iwl_trans_pcie_gen2_stop_device(trans
);
616 iwl_trans_pcie_stop_device(trans
);
618 trans
->state
= IWL_TRANS_NO_FW
;
620 IWL_EXPORT_SYMBOL(iwl_trans_stop_device
);
622 int iwl_trans_tx(struct iwl_trans
*trans
, struct sk_buff
*skb
,
623 struct iwl_device_tx_cmd
*dev_cmd
, int queue
)
625 if (unlikely(test_bit(STATUS_FW_ERROR
, &trans
->status
)))
628 if (WARN_ONCE(trans
->state
!= IWL_TRANS_FW_ALIVE
,
629 "bad state = %d\n", trans
->state
))
632 if (trans
->trans_cfg
->gen2
)
633 return iwl_txq_gen2_tx(trans
, skb
, dev_cmd
, queue
);
635 return iwl_trans_pcie_tx(trans
, skb
, dev_cmd
, queue
);
637 IWL_EXPORT_SYMBOL(iwl_trans_tx
);
639 void iwl_trans_reclaim(struct iwl_trans
*trans
, int queue
, int ssn
,
640 struct sk_buff_head
*skbs
, bool is_flush
)
642 if (WARN_ONCE(trans
->state
!= IWL_TRANS_FW_ALIVE
,
643 "bad state = %d\n", trans
->state
))
646 iwl_pcie_reclaim(trans
, queue
, ssn
, skbs
, is_flush
);
648 IWL_EXPORT_SYMBOL(iwl_trans_reclaim
);
650 void iwl_trans_txq_disable(struct iwl_trans
*trans
, int queue
,
653 iwl_trans_pcie_txq_disable(trans
, queue
, configure_scd
);
655 IWL_EXPORT_SYMBOL(iwl_trans_txq_disable
);
657 bool iwl_trans_txq_enable_cfg(struct iwl_trans
*trans
, int queue
, u16 ssn
,
658 const struct iwl_trans_txq_scd_cfg
*cfg
,
659 unsigned int queue_wdg_timeout
)
663 if (WARN_ONCE(trans
->state
!= IWL_TRANS_FW_ALIVE
,
664 "bad state = %d\n", trans
->state
))
667 return iwl_trans_pcie_txq_enable(trans
, queue
, ssn
,
668 cfg
, queue_wdg_timeout
);
670 IWL_EXPORT_SYMBOL(iwl_trans_txq_enable_cfg
);
672 int iwl_trans_wait_txq_empty(struct iwl_trans
*trans
, int queue
)
674 if (WARN_ONCE(trans
->state
!= IWL_TRANS_FW_ALIVE
,
675 "bad state = %d\n", trans
->state
))
678 return iwl_trans_pcie_wait_txq_empty(trans
, queue
);
680 IWL_EXPORT_SYMBOL(iwl_trans_wait_txq_empty
);
682 int iwl_trans_wait_tx_queues_empty(struct iwl_trans
*trans
, u32 txqs
)
684 if (WARN_ONCE(trans
->state
!= IWL_TRANS_FW_ALIVE
,
685 "bad state = %d\n", trans
->state
))
688 return iwl_trans_pcie_wait_txqs_empty(trans
, txqs
);
690 IWL_EXPORT_SYMBOL(iwl_trans_wait_tx_queues_empty
);
692 void iwl_trans_freeze_txq_timer(struct iwl_trans
*trans
,
693 unsigned long txqs
, bool freeze
)
695 if (WARN_ONCE(trans
->state
!= IWL_TRANS_FW_ALIVE
,
696 "bad state = %d\n", trans
->state
))
699 iwl_pcie_freeze_txq_timer(trans
, txqs
, freeze
);
701 IWL_EXPORT_SYMBOL(iwl_trans_freeze_txq_timer
);
703 void iwl_trans_txq_set_shared_mode(struct iwl_trans
*trans
,
704 int txq_id
, bool shared_mode
)
706 iwl_trans_pcie_txq_set_shared_mode(trans
, txq_id
, shared_mode
);
708 IWL_EXPORT_SYMBOL(iwl_trans_txq_set_shared_mode
);
710 #ifdef CONFIG_IWLWIFI_DEBUGFS
711 void iwl_trans_debugfs_cleanup(struct iwl_trans
*trans
)
713 iwl_trans_pcie_debugfs_cleanup(trans
);
715 IWL_EXPORT_SYMBOL(iwl_trans_debugfs_cleanup
);
718 void iwl_trans_set_q_ptrs(struct iwl_trans
*trans
, int queue
, int ptr
)
720 if (WARN_ONCE(trans
->state
!= IWL_TRANS_FW_ALIVE
,
721 "bad state = %d\n", trans
->state
))
724 iwl_pcie_set_q_ptrs(trans
, queue
, ptr
);
726 IWL_EXPORT_SYMBOL(iwl_trans_set_q_ptrs
);
728 int iwl_trans_txq_alloc(struct iwl_trans
*trans
, u32 flags
, u32 sta_mask
,
729 u8 tid
, int size
, unsigned int wdg_timeout
)
733 if (WARN_ONCE(trans
->state
!= IWL_TRANS_FW_ALIVE
,
734 "bad state = %d\n", trans
->state
))
737 return iwl_txq_dyn_alloc(trans
, flags
, sta_mask
, tid
,
740 IWL_EXPORT_SYMBOL(iwl_trans_txq_alloc
);
742 void iwl_trans_txq_free(struct iwl_trans
*trans
, int queue
)
744 iwl_txq_dyn_free(trans
, queue
);
746 IWL_EXPORT_SYMBOL(iwl_trans_txq_free
);
748 int iwl_trans_get_rxq_dma_data(struct iwl_trans
*trans
, int queue
,
749 struct iwl_trans_rxq_dma_data
*data
)
751 return iwl_trans_pcie_rxq_dma_data(trans
, queue
, data
);
753 IWL_EXPORT_SYMBOL(iwl_trans_get_rxq_dma_data
);
755 int iwl_trans_load_pnvm(struct iwl_trans
*trans
,
756 const struct iwl_pnvm_image
*pnvm_data
,
757 const struct iwl_ucode_capabilities
*capa
)
759 return iwl_trans_pcie_ctx_info_gen3_load_pnvm(trans
, pnvm_data
, capa
);
761 IWL_EXPORT_SYMBOL(iwl_trans_load_pnvm
);
763 void iwl_trans_set_pnvm(struct iwl_trans
*trans
,
764 const struct iwl_ucode_capabilities
*capa
)
766 iwl_trans_pcie_ctx_info_gen3_set_pnvm(trans
, capa
);
768 IWL_EXPORT_SYMBOL(iwl_trans_set_pnvm
);
770 int iwl_trans_load_reduce_power(struct iwl_trans
*trans
,
771 const struct iwl_pnvm_image
*payloads
,
772 const struct iwl_ucode_capabilities
*capa
)
774 return iwl_trans_pcie_ctx_info_gen3_load_reduce_power(trans
, payloads
,
777 IWL_EXPORT_SYMBOL(iwl_trans_load_reduce_power
);
779 void iwl_trans_set_reduce_power(struct iwl_trans
*trans
,
780 const struct iwl_ucode_capabilities
*capa
)
782 iwl_trans_pcie_ctx_info_gen3_set_reduce_power(trans
, capa
);
784 IWL_EXPORT_SYMBOL(iwl_trans_set_reduce_power
);