1 // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
3 * Copyright (C) 2015 Intel Mobile Communications GmbH
4 * Copyright (C) 2016-2017 Intel Deutschland GmbH
5 * Copyright (C) 2019-2021, 2023-2024 Intel Corporation
7 #include <linux/kernel.h>
8 #include <linux/bsearch.h>
10 #include "fw/api/tx.h"
11 #include "iwl-trans.h"
14 #include <linux/dmapool.h>
15 #include "fw/api/commands.h"
16 #include "pcie/internal.h"
17 #include "iwl-context-info-gen3.h"
19 struct iwl_trans
*iwl_trans_alloc(unsigned int priv_size
,
21 const struct iwl_cfg_trans_params
*cfg_trans
)
23 struct iwl_trans
*trans
;
25 static struct lock_class_key __key
;
28 trans
= devm_kzalloc(dev
, sizeof(*trans
) + priv_size
, GFP_KERNEL
);
32 trans
->trans_cfg
= cfg_trans
;
35 lockdep_init_map(&trans
->sync_cmd_lockdep_map
, "sync_cmd_lockdep_map",
40 trans
->num_rx_queues
= 1;
45 int iwl_trans_init(struct iwl_trans
*trans
)
47 int txcmd_size
, txcmd_align
;
49 if (!trans
->trans_cfg
->gen2
) {
50 txcmd_size
= sizeof(struct iwl_tx_cmd
);
51 txcmd_align
= sizeof(void *);
52 } else if (trans
->trans_cfg
->device_family
< IWL_DEVICE_FAMILY_AX210
) {
53 txcmd_size
= sizeof(struct iwl_tx_cmd_gen2
);
56 txcmd_size
= sizeof(struct iwl_tx_cmd_gen3
);
60 txcmd_size
+= sizeof(struct iwl_cmd_header
);
61 txcmd_size
+= 36; /* biggest possible 802.11 header */
63 /* Ensure device TX cmd cannot reach/cross a page boundary in gen2 */
64 if (WARN_ON(trans
->trans_cfg
->gen2
&& txcmd_size
>= txcmd_align
))
67 snprintf(trans
->dev_cmd_pool_name
, sizeof(trans
->dev_cmd_pool_name
),
68 "iwl_cmd_pool:%s", dev_name(trans
->dev
));
70 kmem_cache_create(trans
->dev_cmd_pool_name
,
71 txcmd_size
, txcmd_align
,
72 SLAB_HWCACHE_ALIGN
, NULL
);
73 if (!trans
->dev_cmd_pool
)
76 /* Initialize the wait queue for commands */
77 init_waitqueue_head(&trans
->wait_command_queue
);
82 void iwl_trans_free(struct iwl_trans
*trans
)
84 kmem_cache_destroy(trans
->dev_cmd_pool
);
87 int iwl_trans_send_cmd(struct iwl_trans
*trans
, struct iwl_host_cmd
*cmd
)
91 if (unlikely(!(cmd
->flags
& CMD_SEND_IN_RFKILL
) &&
92 test_bit(STATUS_RFKILL_OPMODE
, &trans
->status
)))
96 * We can't test IWL_MVM_STATUS_IN_D3 in mvm->status because this
97 * bit is set early in the D3 flow, before we send all the commands
98 * that configure the firmware for D3 operation (power, patterns, ...)
99 * and we don't want to flag all those with CMD_SEND_IN_D3.
100 * So use the system_pm_mode instead. The only command sent after
101 * we set system_pm_mode is D3_CONFIG_CMD, which we now flag with
104 if (unlikely(trans
->system_pm_mode
== IWL_PLAT_PM_MODE_D3
&&
105 !(cmd
->flags
& CMD_SEND_IN_D3
)))
108 if (unlikely(test_bit(STATUS_FW_ERROR
, &trans
->status
)))
111 if (WARN_ONCE(trans
->state
!= IWL_TRANS_FW_ALIVE
,
112 "bad state = %d\n", trans
->state
))
115 if (!(cmd
->flags
& CMD_ASYNC
))
116 lock_map_acquire_read(&trans
->sync_cmd_lockdep_map
);
118 if (trans
->wide_cmd_header
&& !iwl_cmd_groupid(cmd
->id
)) {
119 if (cmd
->id
!= REPLY_ERROR
)
120 cmd
->id
= DEF_ID(cmd
->id
);
123 ret
= iwl_trans_pcie_send_hcmd(trans
, cmd
);
125 if (!(cmd
->flags
& CMD_ASYNC
))
126 lock_map_release(&trans
->sync_cmd_lockdep_map
);
128 if (WARN_ON((cmd
->flags
& CMD_WANT_SKB
) && !ret
&& !cmd
->resp_pkt
))
133 IWL_EXPORT_SYMBOL(iwl_trans_send_cmd
);
135 /* Comparator for struct iwl_hcmd_names.
136 * Used in the binary search over a list of host commands.
138 * @key: command_id that we're looking for.
139 * @elt: struct iwl_hcmd_names candidate for match.
141 * @return 0 iff equal.
143 static int iwl_hcmd_names_cmp(const void *key
, const void *elt
)
145 const struct iwl_hcmd_names
*name
= elt
;
146 const u8
*cmd1
= key
;
147 u8 cmd2
= name
->cmd_id
;
149 return (*cmd1
- cmd2
);
152 const char *iwl_get_cmd_string(struct iwl_trans
*trans
, u32 id
)
155 struct iwl_hcmd_names
*ret
;
156 const struct iwl_hcmd_arr
*arr
;
157 size_t size
= sizeof(struct iwl_hcmd_names
);
159 grp
= iwl_cmd_groupid(id
);
160 cmd
= iwl_cmd_opcode(id
);
162 if (!trans
->command_groups
|| grp
>= trans
->command_groups_size
||
163 !trans
->command_groups
[grp
].arr
)
166 arr
= &trans
->command_groups
[grp
];
167 ret
= bsearch(&cmd
, arr
->arr
, arr
->size
, size
, iwl_hcmd_names_cmp
);
170 return ret
->cmd_name
;
172 IWL_EXPORT_SYMBOL(iwl_get_cmd_string
);
174 int iwl_cmd_groups_verify_sorted(const struct iwl_trans_config
*trans
)
177 const struct iwl_hcmd_arr
*arr
;
179 for (i
= 0; i
< trans
->command_groups_size
; i
++) {
180 arr
= &trans
->command_groups
[i
];
183 for (j
= 0; j
< arr
->size
- 1; j
++)
184 if (arr
->arr
[j
].cmd_id
> arr
->arr
[j
+ 1].cmd_id
)
189 IWL_EXPORT_SYMBOL(iwl_cmd_groups_verify_sorted
);
191 void iwl_trans_configure(struct iwl_trans
*trans
,
192 const struct iwl_trans_config
*trans_cfg
)
194 trans
->op_mode
= trans_cfg
->op_mode
;
196 iwl_trans_pcie_configure(trans
, trans_cfg
);
197 WARN_ON(iwl_cmd_groups_verify_sorted(trans_cfg
));
199 IWL_EXPORT_SYMBOL(iwl_trans_configure
);
201 int iwl_trans_start_hw(struct iwl_trans
*trans
)
205 return iwl_trans_pcie_start_hw(trans
);
207 IWL_EXPORT_SYMBOL(iwl_trans_start_hw
);
209 void iwl_trans_op_mode_leave(struct iwl_trans
*trans
)
213 iwl_trans_pcie_op_mode_leave(trans
);
215 trans
->op_mode
= NULL
;
217 trans
->state
= IWL_TRANS_NO_FW
;
219 IWL_EXPORT_SYMBOL(iwl_trans_op_mode_leave
);
221 void iwl_trans_write8(struct iwl_trans
*trans
, u32 ofs
, u8 val
)
223 iwl_trans_pcie_write8(trans
, ofs
, val
);
225 IWL_EXPORT_SYMBOL(iwl_trans_write8
);
227 void iwl_trans_write32(struct iwl_trans
*trans
, u32 ofs
, u32 val
)
229 iwl_trans_pcie_write32(trans
, ofs
, val
);
231 IWL_EXPORT_SYMBOL(iwl_trans_write32
);
233 u32
iwl_trans_read32(struct iwl_trans
*trans
, u32 ofs
)
235 return iwl_trans_pcie_read32(trans
, ofs
);
237 IWL_EXPORT_SYMBOL(iwl_trans_read32
);
239 u32
iwl_trans_read_prph(struct iwl_trans
*trans
, u32 ofs
)
241 return iwl_trans_pcie_read_prph(trans
, ofs
);
243 IWL_EXPORT_SYMBOL(iwl_trans_read_prph
);
245 void iwl_trans_write_prph(struct iwl_trans
*trans
, u32 ofs
, u32 val
)
247 return iwl_trans_pcie_write_prph(trans
, ofs
, val
);
249 IWL_EXPORT_SYMBOL(iwl_trans_write_prph
);
251 int iwl_trans_read_mem(struct iwl_trans
*trans
, u32 addr
,
252 void *buf
, int dwords
)
254 return iwl_trans_pcie_read_mem(trans
, addr
, buf
, dwords
);
256 IWL_EXPORT_SYMBOL(iwl_trans_read_mem
);
258 int iwl_trans_write_mem(struct iwl_trans
*trans
, u32 addr
,
259 const void *buf
, int dwords
)
261 return iwl_trans_pcie_write_mem(trans
, addr
, buf
, dwords
);
263 IWL_EXPORT_SYMBOL(iwl_trans_write_mem
);
265 void iwl_trans_set_pmi(struct iwl_trans
*trans
, bool state
)
268 set_bit(STATUS_TPOWER_PMI
, &trans
->status
);
270 clear_bit(STATUS_TPOWER_PMI
, &trans
->status
);
272 IWL_EXPORT_SYMBOL(iwl_trans_set_pmi
);
274 int iwl_trans_sw_reset(struct iwl_trans
*trans
, bool retake_ownership
)
276 return iwl_trans_pcie_sw_reset(trans
, retake_ownership
);
278 IWL_EXPORT_SYMBOL(iwl_trans_sw_reset
);
280 struct iwl_trans_dump_data
*
281 iwl_trans_dump_data(struct iwl_trans
*trans
, u32 dump_mask
,
282 const struct iwl_dump_sanitize_ops
*sanitize_ops
,
285 return iwl_trans_pcie_dump_data(trans
, dump_mask
,
286 sanitize_ops
, sanitize_ctx
);
288 IWL_EXPORT_SYMBOL(iwl_trans_dump_data
);
290 int iwl_trans_d3_suspend(struct iwl_trans
*trans
, bool test
, bool reset
)
294 return iwl_trans_pcie_d3_suspend(trans
, test
, reset
);
296 IWL_EXPORT_SYMBOL(iwl_trans_d3_suspend
);
298 int iwl_trans_d3_resume(struct iwl_trans
*trans
, enum iwl_d3_status
*status
,
299 bool test
, bool reset
)
303 return iwl_trans_pcie_d3_resume(trans
, status
, test
, reset
);
305 IWL_EXPORT_SYMBOL(iwl_trans_d3_resume
);
307 void iwl_trans_interrupts(struct iwl_trans
*trans
, bool enable
)
309 iwl_trans_pci_interrupts(trans
, enable
);
311 IWL_EXPORT_SYMBOL(iwl_trans_interrupts
);
313 void iwl_trans_sync_nmi(struct iwl_trans
*trans
)
315 iwl_trans_pcie_sync_nmi(trans
);
317 IWL_EXPORT_SYMBOL(iwl_trans_sync_nmi
);
319 int iwl_trans_write_imr_mem(struct iwl_trans
*trans
, u32 dst_addr
,
320 u64 src_addr
, u32 byte_cnt
)
322 return iwl_trans_pcie_copy_imr(trans
, dst_addr
, src_addr
, byte_cnt
);
324 IWL_EXPORT_SYMBOL(iwl_trans_write_imr_mem
);
326 void iwl_trans_set_bits_mask(struct iwl_trans
*trans
, u32 reg
,
329 iwl_trans_pcie_set_bits_mask(trans
, reg
, mask
, value
);
331 IWL_EXPORT_SYMBOL(iwl_trans_set_bits_mask
);
333 int iwl_trans_read_config32(struct iwl_trans
*trans
, u32 ofs
,
336 return iwl_trans_pcie_read_config32(trans
, ofs
, val
);
338 IWL_EXPORT_SYMBOL(iwl_trans_read_config32
);
340 bool _iwl_trans_grab_nic_access(struct iwl_trans
*trans
)
342 return iwl_trans_pcie_grab_nic_access(trans
);
344 IWL_EXPORT_SYMBOL(_iwl_trans_grab_nic_access
);
346 void __releases(nic_access
)
347 iwl_trans_release_nic_access(struct iwl_trans
*trans
)
349 iwl_trans_pcie_release_nic_access(trans
);
350 __release(nic_access
);
352 IWL_EXPORT_SYMBOL(iwl_trans_release_nic_access
);
354 void iwl_trans_fw_alive(struct iwl_trans
*trans
, u32 scd_addr
)
358 trans
->state
= IWL_TRANS_FW_ALIVE
;
360 if (trans
->trans_cfg
->gen2
)
361 iwl_trans_pcie_gen2_fw_alive(trans
);
363 iwl_trans_pcie_fw_alive(trans
, scd_addr
);
365 IWL_EXPORT_SYMBOL(iwl_trans_fw_alive
);
367 int iwl_trans_start_fw(struct iwl_trans
*trans
, const struct fw_img
*fw
,
374 WARN_ON_ONCE(!trans
->rx_mpdu_cmd
);
376 clear_bit(STATUS_FW_ERROR
, &trans
->status
);
378 if (trans
->trans_cfg
->gen2
)
379 ret
= iwl_trans_pcie_gen2_start_fw(trans
, fw
, run_in_rfkill
);
381 ret
= iwl_trans_pcie_start_fw(trans
, fw
, run_in_rfkill
);
384 trans
->state
= IWL_TRANS_FW_STARTED
;
388 IWL_EXPORT_SYMBOL(iwl_trans_start_fw
);
390 void iwl_trans_stop_device(struct iwl_trans
*trans
)
394 if (trans
->trans_cfg
->gen2
)
395 iwl_trans_pcie_gen2_stop_device(trans
);
397 iwl_trans_pcie_stop_device(trans
);
399 trans
->state
= IWL_TRANS_NO_FW
;
401 IWL_EXPORT_SYMBOL(iwl_trans_stop_device
);
403 int iwl_trans_tx(struct iwl_trans
*trans
, struct sk_buff
*skb
,
404 struct iwl_device_tx_cmd
*dev_cmd
, int queue
)
406 if (unlikely(test_bit(STATUS_FW_ERROR
, &trans
->status
)))
409 if (WARN_ONCE(trans
->state
!= IWL_TRANS_FW_ALIVE
,
410 "bad state = %d\n", trans
->state
))
413 if (trans
->trans_cfg
->gen2
)
414 return iwl_txq_gen2_tx(trans
, skb
, dev_cmd
, queue
);
416 return iwl_trans_pcie_tx(trans
, skb
, dev_cmd
, queue
);
418 IWL_EXPORT_SYMBOL(iwl_trans_tx
);
420 void iwl_trans_reclaim(struct iwl_trans
*trans
, int queue
, int ssn
,
421 struct sk_buff_head
*skbs
, bool is_flush
)
423 if (WARN_ONCE(trans
->state
!= IWL_TRANS_FW_ALIVE
,
424 "bad state = %d\n", trans
->state
))
427 iwl_pcie_reclaim(trans
, queue
, ssn
, skbs
, is_flush
);
429 IWL_EXPORT_SYMBOL(iwl_trans_reclaim
);
431 void iwl_trans_txq_disable(struct iwl_trans
*trans
, int queue
,
434 iwl_trans_pcie_txq_disable(trans
, queue
, configure_scd
);
436 IWL_EXPORT_SYMBOL(iwl_trans_txq_disable
);
438 bool iwl_trans_txq_enable_cfg(struct iwl_trans
*trans
, int queue
, u16 ssn
,
439 const struct iwl_trans_txq_scd_cfg
*cfg
,
440 unsigned int queue_wdg_timeout
)
444 if (WARN_ONCE(trans
->state
!= IWL_TRANS_FW_ALIVE
,
445 "bad state = %d\n", trans
->state
))
448 return iwl_trans_pcie_txq_enable(trans
, queue
, ssn
,
449 cfg
, queue_wdg_timeout
);
451 IWL_EXPORT_SYMBOL(iwl_trans_txq_enable_cfg
);
453 int iwl_trans_wait_txq_empty(struct iwl_trans
*trans
, int queue
)
455 if (WARN_ONCE(trans
->state
!= IWL_TRANS_FW_ALIVE
,
456 "bad state = %d\n", trans
->state
))
459 return iwl_trans_pcie_wait_txq_empty(trans
, queue
);
461 IWL_EXPORT_SYMBOL(iwl_trans_wait_txq_empty
);
463 int iwl_trans_wait_tx_queues_empty(struct iwl_trans
*trans
, u32 txqs
)
465 if (WARN_ONCE(trans
->state
!= IWL_TRANS_FW_ALIVE
,
466 "bad state = %d\n", trans
->state
))
469 return iwl_trans_pcie_wait_txqs_empty(trans
, txqs
);
471 IWL_EXPORT_SYMBOL(iwl_trans_wait_tx_queues_empty
);
473 void iwl_trans_freeze_txq_timer(struct iwl_trans
*trans
,
474 unsigned long txqs
, bool freeze
)
476 if (WARN_ONCE(trans
->state
!= IWL_TRANS_FW_ALIVE
,
477 "bad state = %d\n", trans
->state
))
480 iwl_pcie_freeze_txq_timer(trans
, txqs
, freeze
);
482 IWL_EXPORT_SYMBOL(iwl_trans_freeze_txq_timer
);
484 void iwl_trans_txq_set_shared_mode(struct iwl_trans
*trans
,
485 int txq_id
, bool shared_mode
)
487 iwl_trans_pcie_txq_set_shared_mode(trans
, txq_id
, shared_mode
);
489 IWL_EXPORT_SYMBOL(iwl_trans_txq_set_shared_mode
);
491 #ifdef CONFIG_IWLWIFI_DEBUGFS
492 void iwl_trans_debugfs_cleanup(struct iwl_trans
*trans
)
494 iwl_trans_pcie_debugfs_cleanup(trans
);
496 IWL_EXPORT_SYMBOL(iwl_trans_debugfs_cleanup
);
499 void iwl_trans_set_q_ptrs(struct iwl_trans
*trans
, int queue
, int ptr
)
501 if (WARN_ONCE(trans
->state
!= IWL_TRANS_FW_ALIVE
,
502 "bad state = %d\n", trans
->state
))
505 iwl_pcie_set_q_ptrs(trans
, queue
, ptr
);
507 IWL_EXPORT_SYMBOL(iwl_trans_set_q_ptrs
);
509 int iwl_trans_txq_alloc(struct iwl_trans
*trans
, u32 flags
, u32 sta_mask
,
510 u8 tid
, int size
, unsigned int wdg_timeout
)
514 if (WARN_ONCE(trans
->state
!= IWL_TRANS_FW_ALIVE
,
515 "bad state = %d\n", trans
->state
))
518 return iwl_txq_dyn_alloc(trans
, flags
, sta_mask
, tid
,
521 IWL_EXPORT_SYMBOL(iwl_trans_txq_alloc
);
523 void iwl_trans_txq_free(struct iwl_trans
*trans
, int queue
)
525 iwl_txq_dyn_free(trans
, queue
);
527 IWL_EXPORT_SYMBOL(iwl_trans_txq_free
);
529 int iwl_trans_get_rxq_dma_data(struct iwl_trans
*trans
, int queue
,
530 struct iwl_trans_rxq_dma_data
*data
)
532 return iwl_trans_pcie_rxq_dma_data(trans
, queue
, data
);
534 IWL_EXPORT_SYMBOL(iwl_trans_get_rxq_dma_data
);
536 int iwl_trans_load_pnvm(struct iwl_trans
*trans
,
537 const struct iwl_pnvm_image
*pnvm_data
,
538 const struct iwl_ucode_capabilities
*capa
)
540 return iwl_trans_pcie_ctx_info_gen3_load_pnvm(trans
, pnvm_data
, capa
);
542 IWL_EXPORT_SYMBOL(iwl_trans_load_pnvm
);
544 void iwl_trans_set_pnvm(struct iwl_trans
*trans
,
545 const struct iwl_ucode_capabilities
*capa
)
547 iwl_trans_pcie_ctx_info_gen3_set_pnvm(trans
, capa
);
549 IWL_EXPORT_SYMBOL(iwl_trans_set_pnvm
);
551 int iwl_trans_load_reduce_power(struct iwl_trans
*trans
,
552 const struct iwl_pnvm_image
*payloads
,
553 const struct iwl_ucode_capabilities
*capa
)
555 return iwl_trans_pcie_ctx_info_gen3_load_reduce_power(trans
, payloads
,
558 IWL_EXPORT_SYMBOL(iwl_trans_load_reduce_power
);
560 void iwl_trans_set_reduce_power(struct iwl_trans
*trans
,
561 const struct iwl_ucode_capabilities
*capa
)
563 iwl_trans_pcie_ctx_info_gen3_set_reduce_power(trans
, capa
);
565 IWL_EXPORT_SYMBOL(iwl_trans_set_reduce_power
);