1 // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
3 * Copyright (C) 2017 Intel Deutschland GmbH
4 * Copyright (C) 2018-2020 Intel Corporation
13 #include "fw/api/tx.h"
16 /*************** HOST COMMAND QUEUE FUNCTIONS *****/
19 * iwl_pcie_gen2_enqueue_hcmd - enqueue a uCode command
20 * @priv: device private data point
21 * @cmd: a pointer to the ucode command structure
23 * The function returns < 0 values to indicate the operation
24 * failed. On success, it returns the index (>= 0) of command in the
27 static int iwl_pcie_gen2_enqueue_hcmd(struct iwl_trans
*trans
,
28 struct iwl_host_cmd
*cmd
)
30 struct iwl_trans_pcie
*trans_pcie
= IWL_TRANS_GET_PCIE_TRANS(trans
);
31 struct iwl_txq
*txq
= trans
->txqs
.txq
[trans
->txqs
.cmd
.q_id
];
32 struct iwl_device_cmd
*out_cmd
;
33 struct iwl_cmd_meta
*out_meta
;
38 u16 copy_size
, cmd_size
, tb0_size
;
39 bool had_nocopy
= false;
40 u8 group_id
= iwl_cmd_groupid(cmd
->id
);
41 const u8
*cmddata
[IWL_MAX_CMD_TBS_PER_TFD
];
42 u16 cmdlen
[IWL_MAX_CMD_TBS_PER_TFD
];
43 struct iwl_tfh_tfd
*tfd
;
45 copy_size
= sizeof(struct iwl_cmd_header_wide
);
46 cmd_size
= sizeof(struct iwl_cmd_header_wide
);
48 for (i
= 0; i
< IWL_MAX_CMD_TBS_PER_TFD
; i
++) {
49 cmddata
[i
] = cmd
->data
[i
];
50 cmdlen
[i
] = cmd
->len
[i
];
55 /* need at least IWL_FIRST_TB_SIZE copied */
56 if (copy_size
< IWL_FIRST_TB_SIZE
) {
57 int copy
= IWL_FIRST_TB_SIZE
- copy_size
;
66 if (cmd
->dataflags
[i
] & IWL_HCMD_DFL_NOCOPY
) {
68 if (WARN_ON(cmd
->dataflags
[i
] & IWL_HCMD_DFL_DUP
)) {
72 } else if (cmd
->dataflags
[i
] & IWL_HCMD_DFL_DUP
) {
74 * This is also a chunk that isn't copied
75 * to the static buffer so set had_nocopy.
79 /* only allowed once */
80 if (WARN_ON(dup_buf
)) {
85 dup_buf
= kmemdup(cmddata
[i
], cmdlen
[i
],
90 /* NOCOPY must not be followed by normal! */
91 if (WARN_ON(had_nocopy
)) {
95 copy_size
+= cmdlen
[i
];
97 cmd_size
+= cmd
->len
[i
];
101 * If any of the command structures end up being larger than the
102 * TFD_MAX_PAYLOAD_SIZE and they aren't dynamically allocated into
103 * separate TFDs, then we will need to increase the size of the buffers
105 if (WARN(copy_size
> TFD_MAX_PAYLOAD_SIZE
,
106 "Command %s (%#x) is too large (%d bytes)\n",
107 iwl_get_cmd_string(trans
, cmd
->id
), cmd
->id
, copy_size
)) {
112 spin_lock_bh(&txq
->lock
);
114 idx
= iwl_txq_get_cmd_index(txq
, txq
->write_ptr
);
115 tfd
= iwl_txq_get_tfd(trans
, txq
, txq
->write_ptr
);
116 memset(tfd
, 0, sizeof(*tfd
));
118 if (iwl_txq_space(trans
, txq
) < ((cmd
->flags
& CMD_ASYNC
) ? 2 : 1)) {
119 spin_unlock_bh(&txq
->lock
);
121 IWL_ERR(trans
, "No space in command queue\n");
122 iwl_op_mode_cmd_queue_full(trans
->op_mode
);
127 out_cmd
= txq
->entries
[idx
].cmd
;
128 out_meta
= &txq
->entries
[idx
].meta
;
130 /* re-initialize to NULL */
131 memset(out_meta
, 0, sizeof(*out_meta
));
132 if (cmd
->flags
& CMD_WANT_SKB
)
133 out_meta
->source
= cmd
;
135 /* set up the header */
136 out_cmd
->hdr_wide
.cmd
= iwl_cmd_opcode(cmd
->id
);
137 out_cmd
->hdr_wide
.group_id
= group_id
;
138 out_cmd
->hdr_wide
.version
= iwl_cmd_version(cmd
->id
);
139 out_cmd
->hdr_wide
.length
=
140 cpu_to_le16(cmd_size
- sizeof(struct iwl_cmd_header_wide
));
141 out_cmd
->hdr_wide
.reserved
= 0;
142 out_cmd
->hdr_wide
.sequence
=
143 cpu_to_le16(QUEUE_TO_SEQ(trans
->txqs
.cmd
.q_id
) |
144 INDEX_TO_SEQ(txq
->write_ptr
));
146 cmd_pos
= sizeof(struct iwl_cmd_header_wide
);
147 copy_size
= sizeof(struct iwl_cmd_header_wide
);
149 /* and copy the data that needs to be copied */
150 for (i
= 0; i
< IWL_MAX_CMD_TBS_PER_TFD
; i
++) {
156 /* copy everything if not nocopy/dup */
157 if (!(cmd
->dataflags
[i
] & (IWL_HCMD_DFL_NOCOPY
|
158 IWL_HCMD_DFL_DUP
))) {
161 memcpy((u8
*)out_cmd
+ cmd_pos
, cmd
->data
[i
], copy
);
168 * Otherwise we need at least IWL_FIRST_TB_SIZE copied
169 * in total (for bi-directional DMA), but copy up to what
170 * we can fit into the payload for debug dump purposes.
172 copy
= min_t(int, TFD_MAX_PAYLOAD_SIZE
- cmd_pos
, cmd
->len
[i
]);
174 memcpy((u8
*)out_cmd
+ cmd_pos
, cmd
->data
[i
], copy
);
177 /* However, treat copy_size the proper way, we need it below */
178 if (copy_size
< IWL_FIRST_TB_SIZE
) {
179 copy
= IWL_FIRST_TB_SIZE
- copy_size
;
181 if (copy
> cmd
->len
[i
])
188 "Sending command %s (%.2x.%.2x), seq: 0x%04X, %d bytes at %d[%d]:%d\n",
189 iwl_get_cmd_string(trans
, cmd
->id
), group_id
,
190 out_cmd
->hdr
.cmd
, le16_to_cpu(out_cmd
->hdr
.sequence
),
191 cmd_size
, txq
->write_ptr
, idx
, trans
->txqs
.cmd
.q_id
);
193 /* start the TFD with the minimum copy bytes */
194 tb0_size
= min_t(int, copy_size
, IWL_FIRST_TB_SIZE
);
195 memcpy(&txq
->first_tb_bufs
[idx
], out_cmd
, tb0_size
);
196 iwl_txq_gen2_set_tb(trans
, tfd
, iwl_txq_get_first_tb_dma(txq
, idx
),
199 /* map first command fragment, if any remains */
200 if (copy_size
> tb0_size
) {
201 phys_addr
= dma_map_single(trans
->dev
,
202 (u8
*)out_cmd
+ tb0_size
,
203 copy_size
- tb0_size
,
205 if (dma_mapping_error(trans
->dev
, phys_addr
)) {
207 iwl_txq_gen2_tfd_unmap(trans
, out_meta
, tfd
);
210 iwl_txq_gen2_set_tb(trans
, tfd
, phys_addr
,
211 copy_size
- tb0_size
);
214 /* map the remaining (adjusted) nocopy/dup fragments */
215 for (i
= 0; i
< IWL_MAX_CMD_TBS_PER_TFD
; i
++) {
216 const void *data
= cmddata
[i
];
220 if (!(cmd
->dataflags
[i
] & (IWL_HCMD_DFL_NOCOPY
|
223 if (cmd
->dataflags
[i
] & IWL_HCMD_DFL_DUP
)
225 phys_addr
= dma_map_single(trans
->dev
, (void *)data
,
226 cmdlen
[i
], DMA_TO_DEVICE
);
227 if (dma_mapping_error(trans
->dev
, phys_addr
)) {
229 iwl_txq_gen2_tfd_unmap(trans
, out_meta
, tfd
);
232 iwl_txq_gen2_set_tb(trans
, tfd
, phys_addr
, cmdlen
[i
]);
235 BUILD_BUG_ON(IWL_TFH_NUM_TBS
> sizeof(out_meta
->tbs
) * BITS_PER_BYTE
);
236 out_meta
->flags
= cmd
->flags
;
237 if (WARN_ON_ONCE(txq
->entries
[idx
].free_buf
))
238 kfree_sensitive(txq
->entries
[idx
].free_buf
);
239 txq
->entries
[idx
].free_buf
= dup_buf
;
241 trace_iwlwifi_dev_hcmd(trans
->dev
, cmd
, cmd_size
, &out_cmd
->hdr_wide
);
243 /* start timer if queue currently empty */
244 if (txq
->read_ptr
== txq
->write_ptr
&& txq
->wd_timeout
)
245 mod_timer(&txq
->stuck_timer
, jiffies
+ txq
->wd_timeout
);
247 spin_lock_irqsave(&trans_pcie
->reg_lock
, flags
);
248 /* Increment and update queue's write index */
249 txq
->write_ptr
= iwl_txq_inc_wrap(trans
, txq
->write_ptr
);
250 iwl_txq_inc_wr_ptr(trans
, txq
);
251 spin_unlock_irqrestore(&trans_pcie
->reg_lock
, flags
);
254 spin_unlock_bh(&txq
->lock
);
261 #define HOST_COMPLETE_TIMEOUT (2 * HZ)
263 static int iwl_pcie_gen2_send_hcmd_sync(struct iwl_trans
*trans
,
264 struct iwl_host_cmd
*cmd
)
266 struct iwl_trans_pcie
*trans_pcie
= IWL_TRANS_GET_PCIE_TRANS(trans
);
267 const char *cmd_str
= iwl_get_cmd_string(trans
, cmd
->id
);
268 struct iwl_txq
*txq
= trans
->txqs
.txq
[trans
->txqs
.cmd
.q_id
];
272 IWL_DEBUG_INFO(trans
, "Attempting to send sync command %s\n", cmd_str
);
274 if (WARN(test_and_set_bit(STATUS_SYNC_HCMD_ACTIVE
,
276 "Command %s: a command is already active!\n", cmd_str
))
279 IWL_DEBUG_INFO(trans
, "Setting HCMD_ACTIVE for command %s\n", cmd_str
);
281 cmd_idx
= iwl_pcie_gen2_enqueue_hcmd(trans
, cmd
);
284 clear_bit(STATUS_SYNC_HCMD_ACTIVE
, &trans
->status
);
285 IWL_ERR(trans
, "Error sending %s: enqueue_hcmd failed: %d\n",
290 ret
= wait_event_timeout(trans_pcie
->wait_command_queue
,
291 !test_bit(STATUS_SYNC_HCMD_ACTIVE
,
293 HOST_COMPLETE_TIMEOUT
);
295 IWL_ERR(trans
, "Error sending %s: time out after %dms.\n",
296 cmd_str
, jiffies_to_msecs(HOST_COMPLETE_TIMEOUT
));
298 IWL_ERR(trans
, "Current CMD queue read_ptr %d write_ptr %d\n",
299 txq
->read_ptr
, txq
->write_ptr
);
301 clear_bit(STATUS_SYNC_HCMD_ACTIVE
, &trans
->status
);
302 IWL_DEBUG_INFO(trans
, "Clearing HCMD_ACTIVE for command %s\n",
306 iwl_trans_pcie_sync_nmi(trans
);
310 if (test_bit(STATUS_FW_ERROR
, &trans
->status
)) {
311 IWL_ERR(trans
, "FW error in SYNC CMD %s\n", cmd_str
);
317 if (!(cmd
->flags
& CMD_SEND_IN_RFKILL
) &&
318 test_bit(STATUS_RFKILL_OPMODE
, &trans
->status
)) {
319 IWL_DEBUG_RF_KILL(trans
, "RFKILL in SYNC CMD... no rsp\n");
324 if ((cmd
->flags
& CMD_WANT_SKB
) && !cmd
->resp_pkt
) {
325 IWL_ERR(trans
, "Error: Response NULL in '%s'\n", cmd_str
);
333 if (cmd
->flags
& CMD_WANT_SKB
) {
335 * Cancel the CMD_WANT_SKB flag for the cmd in the
336 * TX cmd queue. Otherwise in case the cmd comes
337 * in later, it will possibly set an invalid
338 * address (cmd->meta.source).
340 txq
->entries
[cmd_idx
].meta
.flags
&= ~CMD_WANT_SKB
;
345 cmd
->resp_pkt
= NULL
;
351 int iwl_trans_pcie_gen2_send_hcmd(struct iwl_trans
*trans
,
352 struct iwl_host_cmd
*cmd
)
354 if (!(cmd
->flags
& CMD_SEND_IN_RFKILL
) &&
355 test_bit(STATUS_RFKILL_OPMODE
, &trans
->status
)) {
356 IWL_DEBUG_RF_KILL(trans
, "Dropping CMD 0x%x: RF KILL\n",
361 if (cmd
->flags
& CMD_ASYNC
) {
364 /* An asynchronous command can not expect an SKB to be set. */
365 if (WARN_ON(cmd
->flags
& CMD_WANT_SKB
))
368 ret
= iwl_pcie_gen2_enqueue_hcmd(trans
, cmd
);
371 "Error sending %s: enqueue_hcmd failed: %d\n",
372 iwl_get_cmd_string(trans
, cmd
->id
), ret
);
378 return iwl_pcie_gen2_send_hcmd_sync(trans
, cmd
);