gpio: rcar: Fix runtime PM imbalance on error
[linux/fpc-iii.git] / drivers / net / wireless / intel / iwlwifi / iwl-trans.h
blobbba527b339b5020ad880476d9c834ac97de035b9
1 /******************************************************************************
3 * This file is provided under a dual BSD/GPLv2 license. When using or
4 * redistributing this file, you may do so under either license.
6 * GPL LICENSE SUMMARY
8 * Copyright(c) 2007 - 2014 Intel Corporation. All rights reserved.
9 * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
10 * Copyright(c) 2016 - 2017 Intel Deutschland GmbH
11 * Copyright(c) 2018 - 2019 Intel Corporation
13 * This program is free software; you can redistribute it and/or modify
14 * it under the terms of version 2 of the GNU General Public License as
15 * published by the Free Software Foundation.
17 * This program is distributed in the hope that it will be useful, but
18 * WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
20 * General Public License for more details.
22 * The full GNU General Public License is included in this distribution
23 * in the file called COPYING.
25 * Contact Information:
26 * Intel Linux Wireless <linuxwifi@intel.com>
27 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
29 * BSD LICENSE
31 * Copyright(c) 2005 - 2014 Intel Corporation. All rights reserved.
32 * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
33 * Copyright(c) 2016 - 2017 Intel Deutschland GmbH
34 * Copyright(c) 2018 - 2019 Intel Corporation
35 * All rights reserved.
37 * Redistribution and use in source and binary forms, with or without
38 * modification, are permitted provided that the following conditions
39 * are met:
41 * * Redistributions of source code must retain the above copyright
42 * notice, this list of conditions and the following disclaimer.
43 * * Redistributions in binary form must reproduce the above copyright
44 * notice, this list of conditions and the following disclaimer in
45 * the documentation and/or other materials provided with the
46 * distribution.
47 * * Neither the name Intel Corporation nor the names of its
48 * contributors may be used to endorse or promote products derived
49 * from this software without specific prior written permission.
51 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
52 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
53 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
54 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
55 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
56 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
57 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
58 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
59 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
60 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
61 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
63 *****************************************************************************/
64 #ifndef __iwl_trans_h__
65 #define __iwl_trans_h__
67 #include <linux/ieee80211.h>
68 #include <linux/mm.h> /* for page_address */
69 #include <linux/lockdep.h>
70 #include <linux/kernel.h>
72 #include "iwl-debug.h"
73 #include "iwl-config.h"
74 #include "fw/img.h"
75 #include "iwl-op-mode.h"
76 #include "fw/api/cmdhdr.h"
77 #include "fw/api/txq.h"
78 #include "fw/api/dbg-tlv.h"
79 #include "iwl-dbg-tlv.h"
81 /**
82 * DOC: Transport layer - what is it ?
84 * The transport layer is the layer that deals with the HW directly. It provides
85 * an abstraction of the underlying HW to the upper layer. The transport layer
86 * doesn't provide any policy, algorithm or anything of this kind, but only
87 * mechanisms to make the HW do something. It is not completely stateless but
88 * close to it.
89 * We will have an implementation for each different supported bus.
92 /**
93 * DOC: Life cycle of the transport layer
95 * The transport layer has a very precise life cycle.
97 * 1) A helper function is called during the module initialization and
98 * registers the bus driver's ops with the transport's alloc function.
99 * 2) Bus's probe calls to the transport layer's allocation functions.
100 * Of course this function is bus specific.
101 * 3) This allocation functions will spawn the upper layer which will
102 * register mac80211.
104 * 4) At some point (i.e. mac80211's start call), the op_mode will call
105 * the following sequence:
106 * start_hw
107 * start_fw
109 * 5) Then when finished (or reset):
110 * stop_device
112 * 6) Eventually, the free function will be called.
115 #define IWL_TRANS_FW_DBG_DOMAIN(trans) IWL_FW_INI_DOMAIN_ALWAYS_ON
117 #define FH_RSCSR_FRAME_SIZE_MSK 0x00003FFF /* bits 0-13 */
118 #define FH_RSCSR_FRAME_INVALID 0x55550000
119 #define FH_RSCSR_FRAME_ALIGN 0x40
120 #define FH_RSCSR_RPA_EN BIT(25)
121 #define FH_RSCSR_RADA_EN BIT(26)
122 #define FH_RSCSR_RXQ_POS 16
123 #define FH_RSCSR_RXQ_MASK 0x3F0000
125 struct iwl_rx_packet {
127 * The first 4 bytes of the RX frame header contain both the RX frame
128 * size and some flags.
129 * Bit fields:
130 * 31: flag flush RB request
131 * 30: flag ignore TC (terminal counter) request
132 * 29: flag fast IRQ request
133 * 28-27: Reserved
134 * 26: RADA enabled
135 * 25: Offload enabled
136 * 24: RPF enabled
137 * 23: RSS enabled
138 * 22: Checksum enabled
139 * 21-16: RX queue
140 * 15-14: Reserved
141 * 13-00: RX frame size
143 __le32 len_n_flags;
144 struct iwl_cmd_header hdr;
145 u8 data[];
146 } __packed;
148 static inline u32 iwl_rx_packet_len(const struct iwl_rx_packet *pkt)
150 return le32_to_cpu(pkt->len_n_flags) & FH_RSCSR_FRAME_SIZE_MSK;
153 static inline u32 iwl_rx_packet_payload_len(const struct iwl_rx_packet *pkt)
155 return iwl_rx_packet_len(pkt) - sizeof(pkt->hdr);
159 * enum CMD_MODE - how to send the host commands ?
161 * @CMD_ASYNC: Return right away and don't wait for the response
162 * @CMD_WANT_SKB: Not valid with CMD_ASYNC. The caller needs the buffer of
163 * the response. The caller needs to call iwl_free_resp when done.
164 * @CMD_WANT_ASYNC_CALLBACK: the op_mode's async callback function must be
165 * called after this command completes. Valid only with CMD_ASYNC.
167 enum CMD_MODE {
168 CMD_ASYNC = BIT(0),
169 CMD_WANT_SKB = BIT(1),
170 CMD_SEND_IN_RFKILL = BIT(2),
171 CMD_WANT_ASYNC_CALLBACK = BIT(3),
174 #define DEF_CMD_PAYLOAD_SIZE 320
177 * struct iwl_device_cmd
179 * For allocation of the command and tx queues, this establishes the overall
180 * size of the largest command we send to uCode, except for commands that
181 * aren't fully copied and use other TFD space.
183 struct iwl_device_cmd {
184 union {
185 struct {
186 struct iwl_cmd_header hdr; /* uCode API */
187 u8 payload[DEF_CMD_PAYLOAD_SIZE];
189 struct {
190 struct iwl_cmd_header_wide hdr_wide;
191 u8 payload_wide[DEF_CMD_PAYLOAD_SIZE -
192 sizeof(struct iwl_cmd_header_wide) +
193 sizeof(struct iwl_cmd_header)];
196 } __packed;
199 * struct iwl_device_tx_cmd - buffer for TX command
200 * @hdr: the header
201 * @payload: the payload placeholder
203 * The actual structure is sized dynamically according to need.
205 struct iwl_device_tx_cmd {
206 struct iwl_cmd_header hdr;
207 u8 payload[];
208 } __packed;
210 #define TFD_MAX_PAYLOAD_SIZE (sizeof(struct iwl_device_cmd))
213 * number of transfer buffers (fragments) per transmit frame descriptor;
214 * this is just the driver's idea, the hardware supports 20
216 #define IWL_MAX_CMD_TBS_PER_TFD 2
219 * enum iwl_hcmd_dataflag - flag for each one of the chunks of the command
221 * @IWL_HCMD_DFL_NOCOPY: By default, the command is copied to the host command's
222 * ring. The transport layer doesn't map the command's buffer to DMA, but
223 * rather copies it to a previously allocated DMA buffer. This flag tells
224 * the transport layer not to copy the command, but to map the existing
225 * buffer (that is passed in) instead. This saves the memcpy and allows
226 * commands that are bigger than the fixed buffer to be submitted.
227 * Note that a TFD entry after a NOCOPY one cannot be a normal copied one.
228 * @IWL_HCMD_DFL_DUP: Only valid without NOCOPY, duplicate the memory for this
229 * chunk internally and free it again after the command completes. This
230 * can (currently) be used only once per command.
231 * Note that a TFD entry after a DUP one cannot be a normal copied one.
233 enum iwl_hcmd_dataflag {
234 IWL_HCMD_DFL_NOCOPY = BIT(0),
235 IWL_HCMD_DFL_DUP = BIT(1),
238 enum iwl_error_event_table_status {
239 IWL_ERROR_EVENT_TABLE_LMAC1 = BIT(0),
240 IWL_ERROR_EVENT_TABLE_LMAC2 = BIT(1),
241 IWL_ERROR_EVENT_TABLE_UMAC = BIT(2),
245 * struct iwl_host_cmd - Host command to the uCode
247 * @data: array of chunks that composes the data of the host command
248 * @resp_pkt: response packet, if %CMD_WANT_SKB was set
249 * @_rx_page_order: (internally used to free response packet)
250 * @_rx_page_addr: (internally used to free response packet)
251 * @flags: can be CMD_*
252 * @len: array of the lengths of the chunks in data
253 * @dataflags: IWL_HCMD_DFL_*
254 * @id: command id of the host command, for wide commands encoding the
255 * version and group as well
257 struct iwl_host_cmd {
258 const void *data[IWL_MAX_CMD_TBS_PER_TFD];
259 struct iwl_rx_packet *resp_pkt;
260 unsigned long _rx_page_addr;
261 u32 _rx_page_order;
263 u32 flags;
264 u32 id;
265 u16 len[IWL_MAX_CMD_TBS_PER_TFD];
266 u8 dataflags[IWL_MAX_CMD_TBS_PER_TFD];
269 static inline void iwl_free_resp(struct iwl_host_cmd *cmd)
271 free_pages(cmd->_rx_page_addr, cmd->_rx_page_order);
274 struct iwl_rx_cmd_buffer {
275 struct page *_page;
276 int _offset;
277 bool _page_stolen;
278 u32 _rx_page_order;
279 unsigned int truesize;
282 static inline void *rxb_addr(struct iwl_rx_cmd_buffer *r)
284 return (void *)((unsigned long)page_address(r->_page) + r->_offset);
287 static inline int rxb_offset(struct iwl_rx_cmd_buffer *r)
289 return r->_offset;
292 static inline struct page *rxb_steal_page(struct iwl_rx_cmd_buffer *r)
294 r->_page_stolen = true;
295 get_page(r->_page);
296 return r->_page;
299 static inline void iwl_free_rxb(struct iwl_rx_cmd_buffer *r)
301 __free_pages(r->_page, r->_rx_page_order);
304 #define MAX_NO_RECLAIM_CMDS 6
306 #define IWL_MASK(lo, hi) ((1 << (hi)) | ((1 << (hi)) - (1 << (lo))))
309 * Maximum number of HW queues the transport layer
310 * currently supports
312 #define IWL_MAX_HW_QUEUES 32
313 #define IWL_MAX_TVQM_QUEUES 512
315 #define IWL_MAX_TID_COUNT 8
316 #define IWL_MGMT_TID 15
317 #define IWL_FRAME_LIMIT 64
318 #define IWL_MAX_RX_HW_QUEUES 16
321 * enum iwl_wowlan_status - WoWLAN image/device status
322 * @IWL_D3_STATUS_ALIVE: firmware is still running after resume
323 * @IWL_D3_STATUS_RESET: device was reset while suspended
325 enum iwl_d3_status {
326 IWL_D3_STATUS_ALIVE,
327 IWL_D3_STATUS_RESET,
331 * enum iwl_trans_status: transport status flags
332 * @STATUS_SYNC_HCMD_ACTIVE: a SYNC command is being processed
333 * @STATUS_DEVICE_ENABLED: APM is enabled
334 * @STATUS_TPOWER_PMI: the device might be asleep (need to wake it up)
335 * @STATUS_INT_ENABLED: interrupts are enabled
336 * @STATUS_RFKILL_HW: the actual HW state of the RF-kill switch
337 * @STATUS_RFKILL_OPMODE: RF-kill state reported to opmode
338 * @STATUS_FW_ERROR: the fw is in error state
339 * @STATUS_TRANS_GOING_IDLE: shutting down the trans, only special commands
340 * are sent
341 * @STATUS_TRANS_IDLE: the trans is idle - general commands are not to be sent
342 * @STATUS_TRANS_DEAD: trans is dead - avoid any read/write operation
344 enum iwl_trans_status {
345 STATUS_SYNC_HCMD_ACTIVE,
346 STATUS_DEVICE_ENABLED,
347 STATUS_TPOWER_PMI,
348 STATUS_INT_ENABLED,
349 STATUS_RFKILL_HW,
350 STATUS_RFKILL_OPMODE,
351 STATUS_FW_ERROR,
352 STATUS_TRANS_GOING_IDLE,
353 STATUS_TRANS_IDLE,
354 STATUS_TRANS_DEAD,
357 static inline int
358 iwl_trans_get_rb_size_order(enum iwl_amsdu_size rb_size)
360 switch (rb_size) {
361 case IWL_AMSDU_2K:
362 return get_order(2 * 1024);
363 case IWL_AMSDU_4K:
364 return get_order(4 * 1024);
365 case IWL_AMSDU_8K:
366 return get_order(8 * 1024);
367 case IWL_AMSDU_12K:
368 return get_order(12 * 1024);
369 default:
370 WARN_ON(1);
371 return -1;
375 static inline int
376 iwl_trans_get_rb_size(enum iwl_amsdu_size rb_size)
378 switch (rb_size) {
379 case IWL_AMSDU_2K:
380 return 2 * 1024;
381 case IWL_AMSDU_4K:
382 return 4 * 1024;
383 case IWL_AMSDU_8K:
384 return 8 * 1024;
385 case IWL_AMSDU_12K:
386 return 12 * 1024;
387 default:
388 WARN_ON(1);
389 return 0;
393 struct iwl_hcmd_names {
394 u8 cmd_id;
395 const char *const cmd_name;
398 #define HCMD_NAME(x) \
399 { .cmd_id = x, .cmd_name = #x }
401 struct iwl_hcmd_arr {
402 const struct iwl_hcmd_names *arr;
403 int size;
406 #define HCMD_ARR(x) \
407 { .arr = x, .size = ARRAY_SIZE(x) }
410 * struct iwl_trans_config - transport configuration
412 * @op_mode: pointer to the upper layer.
413 * @cmd_queue: the index of the command queue.
414 * Must be set before start_fw.
415 * @cmd_fifo: the fifo for host commands
416 * @cmd_q_wdg_timeout: the timeout of the watchdog timer for the command queue.
417 * @no_reclaim_cmds: Some devices erroneously don't set the
418 * SEQ_RX_FRAME bit on some notifications, this is the
419 * list of such notifications to filter. Max length is
420 * %MAX_NO_RECLAIM_CMDS.
421 * @n_no_reclaim_cmds: # of commands in list
422 * @rx_buf_size: RX buffer size needed for A-MSDUs
423 * if unset 4k will be the RX buffer size
424 * @bc_table_dword: set to true if the BC table expects the byte count to be
425 * in DWORD (as opposed to bytes)
426 * @scd_set_active: should the transport configure the SCD for HCMD queue
427 * @sw_csum_tx: transport should compute the TCP checksum
428 * @command_groups: array of command groups, each member is an array of the
429 * commands in the group; for debugging only
430 * @command_groups_size: number of command groups, to avoid illegal access
431 * @cb_data_offs: offset inside skb->cb to store transport data at, must have
432 * space for at least two pointers
434 struct iwl_trans_config {
435 struct iwl_op_mode *op_mode;
437 u8 cmd_queue;
438 u8 cmd_fifo;
439 unsigned int cmd_q_wdg_timeout;
440 const u8 *no_reclaim_cmds;
441 unsigned int n_no_reclaim_cmds;
443 enum iwl_amsdu_size rx_buf_size;
444 bool bc_table_dword;
445 bool scd_set_active;
446 bool sw_csum_tx;
447 const struct iwl_hcmd_arr *command_groups;
448 int command_groups_size;
450 u8 cb_data_offs;
453 struct iwl_trans_dump_data {
454 u32 len;
455 u8 data[];
458 struct iwl_trans;
460 struct iwl_trans_txq_scd_cfg {
461 u8 fifo;
462 u8 sta_id;
463 u8 tid;
464 bool aggregate;
465 int frame_limit;
469 * struct iwl_trans_rxq_dma_data - RX queue DMA data
470 * @fr_bd_cb: DMA address of free BD cyclic buffer
471 * @fr_bd_wid: Initial write index of the free BD cyclic buffer
472 * @urbd_stts_wrptr: DMA address of urbd_stts_wrptr
473 * @ur_bd_cb: DMA address of used BD cyclic buffer
475 struct iwl_trans_rxq_dma_data {
476 u64 fr_bd_cb;
477 u32 fr_bd_wid;
478 u64 urbd_stts_wrptr;
479 u64 ur_bd_cb;
483 * struct iwl_trans_ops - transport specific operations
485 * All the handlers MUST be implemented
487 * @start_hw: starts the HW. From that point on, the HW can send interrupts.
488 * May sleep.
489 * @op_mode_leave: Turn off the HW RF kill indication if on
490 * May sleep
491 * @start_fw: allocates and inits all the resources for the transport
492 * layer. Also kick a fw image.
493 * May sleep
494 * @fw_alive: called when the fw sends alive notification. If the fw provides
495 * the SCD base address in SRAM, then provide it here, or 0 otherwise.
496 * May sleep
497 * @stop_device: stops the whole device (embedded CPU put to reset) and stops
498 * the HW. From that point on, the HW will be stopped but will still issue
499 * an interrupt if the HW RF kill switch is triggered.
500 * This callback must do the right thing and not crash even if %start_hw()
501 * was called but not &start_fw(). May sleep.
502 * @d3_suspend: put the device into the correct mode for WoWLAN during
503 * suspend. This is optional, if not implemented WoWLAN will not be
504 * supported. This callback may sleep.
505 * @d3_resume: resume the device after WoWLAN, enabling the opmode to
506 * talk to the WoWLAN image to get its status. This is optional, if not
507 * implemented WoWLAN will not be supported. This callback may sleep.
508 * @send_cmd:send a host command. Must return -ERFKILL if RFkill is asserted.
509 * If RFkill is asserted in the middle of a SYNC host command, it must
510 * return -ERFKILL straight away.
511 * May sleep only if CMD_ASYNC is not set
512 * @tx: send an skb. The transport relies on the op_mode to zero the
513 * the ieee80211_tx_info->driver_data. If the MPDU is an A-MSDU, all
514 * the CSUM will be taken care of (TCP CSUM and IP header in case of
515 * IPv4). If the MPDU is a single MSDU, the op_mode must compute the IP
516 * header if it is IPv4.
517 * Must be atomic
518 * @reclaim: free packet until ssn. Returns a list of freed packets.
519 * Must be atomic
520 * @txq_enable: setup a queue. To setup an AC queue, use the
521 * iwl_trans_ac_txq_enable wrapper. fw_alive must have been called before
522 * this one. The op_mode must not configure the HCMD queue. The scheduler
523 * configuration may be %NULL, in which case the hardware will not be
524 * configured. If true is returned, the operation mode needs to increment
525 * the sequence number of the packets routed to this queue because of a
526 * hardware scheduler bug. May sleep.
527 * @txq_disable: de-configure a Tx queue to send AMPDUs
528 * Must be atomic
529 * @txq_set_shared_mode: change Tx queue shared/unshared marking
530 * @wait_tx_queues_empty: wait until tx queues are empty. May sleep.
531 * @wait_txq_empty: wait until specific tx queue is empty. May sleep.
532 * @freeze_txq_timer: prevents the timer of the queue from firing until the
533 * queue is set to awake. Must be atomic.
534 * @block_txq_ptrs: stop updating the write pointers of the Tx queues. Note
535 * that the transport needs to refcount the calls since this function
536 * will be called several times with block = true, and then the queues
537 * need to be unblocked only after the same number of calls with
538 * block = false.
539 * @write8: write a u8 to a register at offset ofs from the BAR
540 * @write32: write a u32 to a register at offset ofs from the BAR
541 * @read32: read a u32 register at offset ofs from the BAR
542 * @read_prph: read a DWORD from a periphery register
543 * @write_prph: write a DWORD to a periphery register
544 * @read_mem: read device's SRAM in DWORD
545 * @write_mem: write device's SRAM in DWORD. If %buf is %NULL, then the memory
546 * will be zeroed.
547 * @read_config32: read a u32 value from the device's config space at
548 * the given offset.
549 * @configure: configure parameters required by the transport layer from
550 * the op_mode. May be called several times before start_fw, can't be
551 * called after that.
552 * @set_pmi: set the power pmi state
553 * @grab_nic_access: wake the NIC to be able to access non-HBUS regs.
554 * Sleeping is not allowed between grab_nic_access and
555 * release_nic_access.
556 * @release_nic_access: let the NIC go to sleep. The "flags" parameter
557 * must be the same one that was sent before to the grab_nic_access.
558 * @set_bits_mask - set SRAM register according to value and mask.
559 * @dump_data: return a vmalloc'ed buffer with debug data, maybe containing last
560 * TX'ed commands and similar. The buffer will be vfree'd by the caller.
561 * Note that the transport must fill in the proper file headers.
562 * @debugfs_cleanup: used in the driver unload flow to make a proper cleanup
563 * of the trans debugfs
565 struct iwl_trans_ops {
567 int (*start_hw)(struct iwl_trans *iwl_trans);
568 void (*op_mode_leave)(struct iwl_trans *iwl_trans);
569 int (*start_fw)(struct iwl_trans *trans, const struct fw_img *fw,
570 bool run_in_rfkill);
571 void (*fw_alive)(struct iwl_trans *trans, u32 scd_addr);
572 void (*stop_device)(struct iwl_trans *trans);
574 int (*d3_suspend)(struct iwl_trans *trans, bool test, bool reset);
575 int (*d3_resume)(struct iwl_trans *trans, enum iwl_d3_status *status,
576 bool test, bool reset);
578 int (*send_cmd)(struct iwl_trans *trans, struct iwl_host_cmd *cmd);
580 int (*tx)(struct iwl_trans *trans, struct sk_buff *skb,
581 struct iwl_device_tx_cmd *dev_cmd, int queue);
582 void (*reclaim)(struct iwl_trans *trans, int queue, int ssn,
583 struct sk_buff_head *skbs);
585 void (*set_q_ptrs)(struct iwl_trans *trans, int queue, int ptr);
587 bool (*txq_enable)(struct iwl_trans *trans, int queue, u16 ssn,
588 const struct iwl_trans_txq_scd_cfg *cfg,
589 unsigned int queue_wdg_timeout);
590 void (*txq_disable)(struct iwl_trans *trans, int queue,
591 bool configure_scd);
592 /* 22000 functions */
593 int (*txq_alloc)(struct iwl_trans *trans,
594 __le16 flags, u8 sta_id, u8 tid,
595 int cmd_id, int size,
596 unsigned int queue_wdg_timeout);
597 void (*txq_free)(struct iwl_trans *trans, int queue);
598 int (*rxq_dma_data)(struct iwl_trans *trans, int queue,
599 struct iwl_trans_rxq_dma_data *data);
601 void (*txq_set_shared_mode)(struct iwl_trans *trans, u32 txq_id,
602 bool shared);
604 int (*wait_tx_queues_empty)(struct iwl_trans *trans, u32 txq_bm);
605 int (*wait_txq_empty)(struct iwl_trans *trans, int queue);
606 void (*freeze_txq_timer)(struct iwl_trans *trans, unsigned long txqs,
607 bool freeze);
608 void (*block_txq_ptrs)(struct iwl_trans *trans, bool block);
610 void (*write8)(struct iwl_trans *trans, u32 ofs, u8 val);
611 void (*write32)(struct iwl_trans *trans, u32 ofs, u32 val);
612 u32 (*read32)(struct iwl_trans *trans, u32 ofs);
613 u32 (*read_prph)(struct iwl_trans *trans, u32 ofs);
614 void (*write_prph)(struct iwl_trans *trans, u32 ofs, u32 val);
615 int (*read_mem)(struct iwl_trans *trans, u32 addr,
616 void *buf, int dwords);
617 int (*write_mem)(struct iwl_trans *trans, u32 addr,
618 const void *buf, int dwords);
619 int (*read_config32)(struct iwl_trans *trans, u32 ofs, u32 *val);
620 void (*configure)(struct iwl_trans *trans,
621 const struct iwl_trans_config *trans_cfg);
622 void (*set_pmi)(struct iwl_trans *trans, bool state);
623 void (*sw_reset)(struct iwl_trans *trans);
624 bool (*grab_nic_access)(struct iwl_trans *trans, unsigned long *flags);
625 void (*release_nic_access)(struct iwl_trans *trans,
626 unsigned long *flags);
627 void (*set_bits_mask)(struct iwl_trans *trans, u32 reg, u32 mask,
628 u32 value);
629 int (*suspend)(struct iwl_trans *trans);
630 void (*resume)(struct iwl_trans *trans);
632 struct iwl_trans_dump_data *(*dump_data)(struct iwl_trans *trans,
633 u32 dump_mask);
634 void (*debugfs_cleanup)(struct iwl_trans *trans);
635 void (*sync_nmi)(struct iwl_trans *trans);
639 * enum iwl_trans_state - state of the transport layer
641 * @IWL_TRANS_NO_FW: no fw has sent an alive response
642 * @IWL_TRANS_FW_ALIVE: a fw has sent an alive response
644 enum iwl_trans_state {
645 IWL_TRANS_NO_FW = 0,
646 IWL_TRANS_FW_ALIVE = 1,
650 * DOC: Platform power management
652 * In system-wide power management the entire platform goes into a low
653 * power state (e.g. idle or suspend to RAM) at the same time and the
654 * device is configured as a wakeup source for the entire platform.
655 * This is usually triggered by userspace activity (e.g. the user
656 * presses the suspend button or a power management daemon decides to
657 * put the platform in low power mode). The device's behavior in this
658 * mode is dictated by the wake-on-WLAN configuration.
660 * The terms used for the device's behavior are as follows:
662 * - D0: the device is fully powered and the host is awake;
663 * - D3: the device is in low power mode and only reacts to
664 * specific events (e.g. magic-packet received or scan
665 * results found);
667 * These terms reflect the power modes in the firmware and are not to
668 * be confused with the physical device power state.
672 * enum iwl_plat_pm_mode - platform power management mode
674 * This enumeration describes the device's platform power management
675 * behavior when in system-wide suspend (i.e WoWLAN).
677 * @IWL_PLAT_PM_MODE_DISABLED: power management is disabled for this
678 * device. In system-wide suspend mode, it means that the all
679 * connections will be closed automatically by mac80211 before
680 * the platform is suspended.
681 * @IWL_PLAT_PM_MODE_D3: the device goes into D3 mode (i.e. WoWLAN).
683 enum iwl_plat_pm_mode {
684 IWL_PLAT_PM_MODE_DISABLED,
685 IWL_PLAT_PM_MODE_D3,
689 * enum iwl_ini_cfg_state
690 * @IWL_INI_CFG_STATE_NOT_LOADED: no debug cfg was given
691 * @IWL_INI_CFG_STATE_LOADED: debug cfg was found and loaded
692 * @IWL_INI_CFG_STATE_CORRUPTED: debug cfg was found and some of the TLVs
693 * are corrupted. The rest of the debug TLVs will still be used
695 enum iwl_ini_cfg_state {
696 IWL_INI_CFG_STATE_NOT_LOADED,
697 IWL_INI_CFG_STATE_LOADED,
698 IWL_INI_CFG_STATE_CORRUPTED,
701 /* Max time to wait for nmi interrupt */
702 #define IWL_TRANS_NMI_TIMEOUT (HZ / 4)
705 * struct iwl_dram_data
706 * @physical: page phy pointer
707 * @block: pointer to the allocated block/page
708 * @size: size of the block/page
710 struct iwl_dram_data {
711 dma_addr_t physical;
712 void *block;
713 int size;
717 * struct iwl_fw_mon - fw monitor per allocation id
718 * @num_frags: number of fragments
719 * @frags: an array of DRAM buffer fragments
721 struct iwl_fw_mon {
722 u32 num_frags;
723 struct iwl_dram_data *frags;
727 * struct iwl_self_init_dram - dram data used by self init process
728 * @fw: lmac and umac dram data
729 * @fw_cnt: total number of items in array
730 * @paging: paging dram data
731 * @paging_cnt: total number of items in array
733 struct iwl_self_init_dram {
734 struct iwl_dram_data *fw;
735 int fw_cnt;
736 struct iwl_dram_data *paging;
737 int paging_cnt;
741 * struct iwl_trans_debug - transport debug related data
743 * @n_dest_reg: num of reg_ops in %dbg_dest_tlv
744 * @rec_on: true iff there is a fw debug recording currently active
745 * @dest_tlv: points to the destination TLV for debug
746 * @conf_tlv: array of pointers to configuration TLVs for debug
747 * @trigger_tlv: array of pointers to triggers TLVs for debug
748 * @lmac_error_event_table: addrs of lmacs error tables
749 * @umac_error_event_table: addr of umac error table
750 * @error_event_table_tlv_status: bitmap that indicates what error table
751 * pointers was recevied via TLV. uses enum &iwl_error_event_table_status
752 * @internal_ini_cfg: internal debug cfg state. Uses &enum iwl_ini_cfg_state
753 * @external_ini_cfg: external debug cfg state. Uses &enum iwl_ini_cfg_state
754 * @fw_mon_cfg: debug buffer allocation configuration
755 * @fw_mon_ini: DRAM buffer fragments per allocation id
756 * @fw_mon: DRAM buffer for firmware monitor
757 * @hw_error: equals true if hw error interrupt was received from the FW
758 * @ini_dest: debug monitor destination uses &enum iwl_fw_ini_buffer_location
759 * @active_regions: active regions
760 * @debug_info_tlv_list: list of debug info TLVs
761 * @time_point: array of debug time points
762 * @periodic_trig_list: periodic triggers list
763 * @domains_bitmap: bitmap of active domains other than
764 * &IWL_FW_INI_DOMAIN_ALWAYS_ON
766 struct iwl_trans_debug {
767 u8 n_dest_reg;
768 bool rec_on;
770 const struct iwl_fw_dbg_dest_tlv_v1 *dest_tlv;
771 const struct iwl_fw_dbg_conf_tlv *conf_tlv[FW_DBG_CONF_MAX];
772 struct iwl_fw_dbg_trigger_tlv * const *trigger_tlv;
774 u32 lmac_error_event_table[2];
775 u32 umac_error_event_table;
776 unsigned int error_event_table_tlv_status;
778 enum iwl_ini_cfg_state internal_ini_cfg;
779 enum iwl_ini_cfg_state external_ini_cfg;
781 struct iwl_fw_ini_allocation_tlv fw_mon_cfg[IWL_FW_INI_ALLOCATION_NUM];
782 struct iwl_fw_mon fw_mon_ini[IWL_FW_INI_ALLOCATION_NUM];
784 struct iwl_dram_data fw_mon;
786 bool hw_error;
787 enum iwl_fw_ini_buffer_location ini_dest;
789 struct iwl_ucode_tlv *active_regions[IWL_FW_INI_MAX_REGION_ID];
790 struct list_head debug_info_tlv_list;
791 struct iwl_dbg_tlv_time_point_data
792 time_point[IWL_FW_INI_TIME_POINT_NUM];
793 struct list_head periodic_trig_list;
795 u32 domains_bitmap;
799 * struct iwl_trans - transport common data
801 * @ops - pointer to iwl_trans_ops
802 * @op_mode - pointer to the op_mode
803 * @trans_cfg: the trans-specific configuration part
804 * @cfg - pointer to the configuration
805 * @drv - pointer to iwl_drv
806 * @status: a bit-mask of transport status flags
807 * @dev - pointer to struct device * that represents the device
808 * @max_skb_frags: maximum number of fragments an SKB can have when transmitted.
809 * 0 indicates that frag SKBs (NETIF_F_SG) aren't supported.
810 * @hw_rf_id a u32 with the device RF ID
811 * @hw_id: a u32 with the ID of the device / sub-device.
812 * Set during transport allocation.
813 * @hw_id_str: a string with info about HW ID. Set during transport allocation.
814 * @pm_support: set to true in start_hw if link pm is supported
815 * @ltr_enabled: set to true if the LTR is enabled
816 * @wide_cmd_header: true when ucode supports wide command header format
817 * @num_rx_queues: number of RX queues allocated by the transport;
818 * the transport must set this before calling iwl_drv_start()
819 * @iml_len: the length of the image loader
820 * @iml: a pointer to the image loader itself
821 * @dev_cmd_pool: pool for Tx cmd allocation - for internal use only.
822 * The user should use iwl_trans_{alloc,free}_tx_cmd.
823 * @rx_mpdu_cmd: MPDU RX command ID, must be assigned by opmode before
824 * starting the firmware, used for tracing
825 * @rx_mpdu_cmd_hdr_size: used for tracing, amount of data before the
826 * start of the 802.11 header in the @rx_mpdu_cmd
827 * @dflt_pwr_limit: default power limit fetched from the platform (ACPI)
828 * @system_pm_mode: the system-wide power management mode in use.
829 * This mode is set dynamically, depending on the WoWLAN values
830 * configured from the userspace at runtime.
832 struct iwl_trans {
833 const struct iwl_trans_ops *ops;
834 struct iwl_op_mode *op_mode;
835 const struct iwl_cfg_trans_params *trans_cfg;
836 const struct iwl_cfg *cfg;
837 struct iwl_drv *drv;
838 enum iwl_trans_state state;
839 unsigned long status;
841 struct device *dev;
842 u32 max_skb_frags;
843 u32 hw_rev;
844 u32 hw_rf_id;
845 u32 hw_id;
846 char hw_id_str[52];
848 u8 rx_mpdu_cmd, rx_mpdu_cmd_hdr_size;
850 bool pm_support;
851 bool ltr_enabled;
853 const struct iwl_hcmd_arr *command_groups;
854 int command_groups_size;
855 bool wide_cmd_header;
857 u8 num_rx_queues;
859 size_t iml_len;
860 u8 *iml;
862 /* The following fields are internal only */
863 struct kmem_cache *dev_cmd_pool;
864 char dev_cmd_pool_name[50];
866 struct dentry *dbgfs_dir;
868 #ifdef CONFIG_LOCKDEP
869 struct lockdep_map sync_cmd_lockdep_map;
870 #endif
872 struct iwl_trans_debug dbg;
873 struct iwl_self_init_dram init_dram;
875 enum iwl_plat_pm_mode system_pm_mode;
877 const char *name;
879 /* pointer to trans specific struct */
880 /*Ensure that this pointer will always be aligned to sizeof pointer */
881 char trans_specific[0] __aligned(sizeof(void *));
884 const char *iwl_get_cmd_string(struct iwl_trans *trans, u32 id);
885 int iwl_cmd_groups_verify_sorted(const struct iwl_trans_config *trans);
887 static inline void iwl_trans_configure(struct iwl_trans *trans,
888 const struct iwl_trans_config *trans_cfg)
890 trans->op_mode = trans_cfg->op_mode;
892 trans->ops->configure(trans, trans_cfg);
893 WARN_ON(iwl_cmd_groups_verify_sorted(trans_cfg));
896 static inline int iwl_trans_start_hw(struct iwl_trans *trans)
898 might_sleep();
900 return trans->ops->start_hw(trans);
903 static inline void iwl_trans_op_mode_leave(struct iwl_trans *trans)
905 might_sleep();
907 if (trans->ops->op_mode_leave)
908 trans->ops->op_mode_leave(trans);
910 trans->op_mode = NULL;
912 trans->state = IWL_TRANS_NO_FW;
915 static inline void iwl_trans_fw_alive(struct iwl_trans *trans, u32 scd_addr)
917 might_sleep();
919 trans->state = IWL_TRANS_FW_ALIVE;
921 trans->ops->fw_alive(trans, scd_addr);
924 static inline int iwl_trans_start_fw(struct iwl_trans *trans,
925 const struct fw_img *fw,
926 bool run_in_rfkill)
928 might_sleep();
930 WARN_ON_ONCE(!trans->rx_mpdu_cmd);
932 clear_bit(STATUS_FW_ERROR, &trans->status);
933 return trans->ops->start_fw(trans, fw, run_in_rfkill);
936 static inline void iwl_trans_stop_device(struct iwl_trans *trans)
938 might_sleep();
940 trans->ops->stop_device(trans);
942 trans->state = IWL_TRANS_NO_FW;
945 static inline int iwl_trans_d3_suspend(struct iwl_trans *trans, bool test,
946 bool reset)
948 might_sleep();
949 if (!trans->ops->d3_suspend)
950 return 0;
952 return trans->ops->d3_suspend(trans, test, reset);
955 static inline int iwl_trans_d3_resume(struct iwl_trans *trans,
956 enum iwl_d3_status *status,
957 bool test, bool reset)
959 might_sleep();
960 if (!trans->ops->d3_resume)
961 return 0;
963 return trans->ops->d3_resume(trans, status, test, reset);
966 static inline int iwl_trans_suspend(struct iwl_trans *trans)
968 if (!trans->ops->suspend)
969 return 0;
971 return trans->ops->suspend(trans);
974 static inline void iwl_trans_resume(struct iwl_trans *trans)
976 if (trans->ops->resume)
977 trans->ops->resume(trans);
980 static inline struct iwl_trans_dump_data *
981 iwl_trans_dump_data(struct iwl_trans *trans, u32 dump_mask)
983 if (!trans->ops->dump_data)
984 return NULL;
985 return trans->ops->dump_data(trans, dump_mask);
988 static inline struct iwl_device_tx_cmd *
989 iwl_trans_alloc_tx_cmd(struct iwl_trans *trans)
991 return kmem_cache_zalloc(trans->dev_cmd_pool, GFP_ATOMIC);
994 int iwl_trans_send_cmd(struct iwl_trans *trans, struct iwl_host_cmd *cmd);
996 static inline void iwl_trans_free_tx_cmd(struct iwl_trans *trans,
997 struct iwl_device_tx_cmd *dev_cmd)
999 kmem_cache_free(trans->dev_cmd_pool, dev_cmd);
1002 static inline int iwl_trans_tx(struct iwl_trans *trans, struct sk_buff *skb,
1003 struct iwl_device_tx_cmd *dev_cmd, int queue)
1005 if (unlikely(test_bit(STATUS_FW_ERROR, &trans->status)))
1006 return -EIO;
1008 if (WARN_ON_ONCE(trans->state != IWL_TRANS_FW_ALIVE)) {
1009 IWL_ERR(trans, "%s bad state = %d\n", __func__, trans->state);
1010 return -EIO;
1013 return trans->ops->tx(trans, skb, dev_cmd, queue);
1016 static inline void iwl_trans_reclaim(struct iwl_trans *trans, int queue,
1017 int ssn, struct sk_buff_head *skbs)
1019 if (WARN_ON_ONCE(trans->state != IWL_TRANS_FW_ALIVE)) {
1020 IWL_ERR(trans, "%s bad state = %d\n", __func__, trans->state);
1021 return;
1024 trans->ops->reclaim(trans, queue, ssn, skbs);
1027 static inline void iwl_trans_set_q_ptrs(struct iwl_trans *trans, int queue,
1028 int ptr)
1030 if (WARN_ON_ONCE(trans->state != IWL_TRANS_FW_ALIVE)) {
1031 IWL_ERR(trans, "%s bad state = %d\n", __func__, trans->state);
1032 return;
1035 trans->ops->set_q_ptrs(trans, queue, ptr);
1038 static inline void iwl_trans_txq_disable(struct iwl_trans *trans, int queue,
1039 bool configure_scd)
1041 trans->ops->txq_disable(trans, queue, configure_scd);
1044 static inline bool
1045 iwl_trans_txq_enable_cfg(struct iwl_trans *trans, int queue, u16 ssn,
1046 const struct iwl_trans_txq_scd_cfg *cfg,
1047 unsigned int queue_wdg_timeout)
1049 might_sleep();
1051 if (WARN_ON_ONCE(trans->state != IWL_TRANS_FW_ALIVE)) {
1052 IWL_ERR(trans, "%s bad state = %d\n", __func__, trans->state);
1053 return false;
1056 return trans->ops->txq_enable(trans, queue, ssn,
1057 cfg, queue_wdg_timeout);
1060 static inline int
1061 iwl_trans_get_rxq_dma_data(struct iwl_trans *trans, int queue,
1062 struct iwl_trans_rxq_dma_data *data)
1064 if (WARN_ON_ONCE(!trans->ops->rxq_dma_data))
1065 return -ENOTSUPP;
1067 return trans->ops->rxq_dma_data(trans, queue, data);
1070 static inline void
1071 iwl_trans_txq_free(struct iwl_trans *trans, int queue)
1073 if (WARN_ON_ONCE(!trans->ops->txq_free))
1074 return;
1076 trans->ops->txq_free(trans, queue);
1079 static inline int
1080 iwl_trans_txq_alloc(struct iwl_trans *trans,
1081 __le16 flags, u8 sta_id, u8 tid,
1082 int cmd_id, int size,
1083 unsigned int wdg_timeout)
1085 might_sleep();
1087 if (WARN_ON_ONCE(!trans->ops->txq_alloc))
1088 return -ENOTSUPP;
1090 if (WARN_ON_ONCE(trans->state != IWL_TRANS_FW_ALIVE)) {
1091 IWL_ERR(trans, "%s bad state = %d\n", __func__, trans->state);
1092 return -EIO;
1095 return trans->ops->txq_alloc(trans, flags, sta_id, tid,
1096 cmd_id, size, wdg_timeout);
1099 static inline void iwl_trans_txq_set_shared_mode(struct iwl_trans *trans,
1100 int queue, bool shared_mode)
1102 if (trans->ops->txq_set_shared_mode)
1103 trans->ops->txq_set_shared_mode(trans, queue, shared_mode);
1106 static inline void iwl_trans_txq_enable(struct iwl_trans *trans, int queue,
1107 int fifo, int sta_id, int tid,
1108 int frame_limit, u16 ssn,
1109 unsigned int queue_wdg_timeout)
1111 struct iwl_trans_txq_scd_cfg cfg = {
1112 .fifo = fifo,
1113 .sta_id = sta_id,
1114 .tid = tid,
1115 .frame_limit = frame_limit,
1116 .aggregate = sta_id >= 0,
1119 iwl_trans_txq_enable_cfg(trans, queue, ssn, &cfg, queue_wdg_timeout);
1122 static inline
1123 void iwl_trans_ac_txq_enable(struct iwl_trans *trans, int queue, int fifo,
1124 unsigned int queue_wdg_timeout)
1126 struct iwl_trans_txq_scd_cfg cfg = {
1127 .fifo = fifo,
1128 .sta_id = -1,
1129 .tid = IWL_MAX_TID_COUNT,
1130 .frame_limit = IWL_FRAME_LIMIT,
1131 .aggregate = false,
1134 iwl_trans_txq_enable_cfg(trans, queue, 0, &cfg, queue_wdg_timeout);
1137 static inline void iwl_trans_freeze_txq_timer(struct iwl_trans *trans,
1138 unsigned long txqs,
1139 bool freeze)
1141 if (WARN_ON_ONCE(trans->state != IWL_TRANS_FW_ALIVE)) {
1142 IWL_ERR(trans, "%s bad state = %d\n", __func__, trans->state);
1143 return;
1146 if (trans->ops->freeze_txq_timer)
1147 trans->ops->freeze_txq_timer(trans, txqs, freeze);
1150 static inline void iwl_trans_block_txq_ptrs(struct iwl_trans *trans,
1151 bool block)
1153 if (WARN_ON_ONCE(trans->state != IWL_TRANS_FW_ALIVE)) {
1154 IWL_ERR(trans, "%s bad state = %d\n", __func__, trans->state);
1155 return;
1158 if (trans->ops->block_txq_ptrs)
1159 trans->ops->block_txq_ptrs(trans, block);
1162 static inline int iwl_trans_wait_tx_queues_empty(struct iwl_trans *trans,
1163 u32 txqs)
1165 if (WARN_ON_ONCE(!trans->ops->wait_tx_queues_empty))
1166 return -ENOTSUPP;
1168 if (WARN_ON_ONCE(trans->state != IWL_TRANS_FW_ALIVE)) {
1169 IWL_ERR(trans, "%s bad state = %d\n", __func__, trans->state);
1170 return -EIO;
1173 return trans->ops->wait_tx_queues_empty(trans, txqs);
1176 static inline int iwl_trans_wait_txq_empty(struct iwl_trans *trans, int queue)
1178 if (WARN_ON_ONCE(!trans->ops->wait_txq_empty))
1179 return -ENOTSUPP;
1181 if (WARN_ON_ONCE(trans->state != IWL_TRANS_FW_ALIVE)) {
1182 IWL_ERR(trans, "%s bad state = %d\n", __func__, trans->state);
1183 return -EIO;
1186 return trans->ops->wait_txq_empty(trans, queue);
1189 static inline void iwl_trans_write8(struct iwl_trans *trans, u32 ofs, u8 val)
1191 trans->ops->write8(trans, ofs, val);
1194 static inline void iwl_trans_write32(struct iwl_trans *trans, u32 ofs, u32 val)
1196 trans->ops->write32(trans, ofs, val);
1199 static inline u32 iwl_trans_read32(struct iwl_trans *trans, u32 ofs)
1201 return trans->ops->read32(trans, ofs);
1204 static inline u32 iwl_trans_read_prph(struct iwl_trans *trans, u32 ofs)
1206 return trans->ops->read_prph(trans, ofs);
1209 static inline void iwl_trans_write_prph(struct iwl_trans *trans, u32 ofs,
1210 u32 val)
1212 return trans->ops->write_prph(trans, ofs, val);
1215 static inline int iwl_trans_read_mem(struct iwl_trans *trans, u32 addr,
1216 void *buf, int dwords)
1218 return trans->ops->read_mem(trans, addr, buf, dwords);
1221 #define iwl_trans_read_mem_bytes(trans, addr, buf, bufsize) \
1222 do { \
1223 if (__builtin_constant_p(bufsize)) \
1224 BUILD_BUG_ON((bufsize) % sizeof(u32)); \
1225 iwl_trans_read_mem(trans, addr, buf, (bufsize) / sizeof(u32));\
1226 } while (0)
1228 static inline u32 iwl_trans_read_mem32(struct iwl_trans *trans, u32 addr)
1230 u32 value;
1232 if (WARN_ON(iwl_trans_read_mem(trans, addr, &value, 1)))
1233 return 0xa5a5a5a5;
1235 return value;
1238 static inline int iwl_trans_write_mem(struct iwl_trans *trans, u32 addr,
1239 const void *buf, int dwords)
1241 return trans->ops->write_mem(trans, addr, buf, dwords);
1244 static inline u32 iwl_trans_write_mem32(struct iwl_trans *trans, u32 addr,
1245 u32 val)
1247 return iwl_trans_write_mem(trans, addr, &val, 1);
1250 static inline void iwl_trans_set_pmi(struct iwl_trans *trans, bool state)
1252 if (trans->ops->set_pmi)
1253 trans->ops->set_pmi(trans, state);
1256 static inline void iwl_trans_sw_reset(struct iwl_trans *trans)
1258 if (trans->ops->sw_reset)
1259 trans->ops->sw_reset(trans);
1262 static inline void
1263 iwl_trans_set_bits_mask(struct iwl_trans *trans, u32 reg, u32 mask, u32 value)
1265 trans->ops->set_bits_mask(trans, reg, mask, value);
1268 #define iwl_trans_grab_nic_access(trans, flags) \
1269 __cond_lock(nic_access, \
1270 likely((trans)->ops->grab_nic_access(trans, flags)))
1272 static inline void __releases(nic_access)
1273 iwl_trans_release_nic_access(struct iwl_trans *trans, unsigned long *flags)
1275 trans->ops->release_nic_access(trans, flags);
1276 __release(nic_access);
1279 static inline void iwl_trans_fw_error(struct iwl_trans *trans)
1281 if (WARN_ON_ONCE(!trans->op_mode))
1282 return;
1284 /* prevent double restarts due to the same erroneous FW */
1285 if (!test_and_set_bit(STATUS_FW_ERROR, &trans->status))
1286 iwl_op_mode_nic_error(trans->op_mode);
1289 static inline bool iwl_trans_fw_running(struct iwl_trans *trans)
1291 return trans->state == IWL_TRANS_FW_ALIVE;
1294 static inline void iwl_trans_sync_nmi(struct iwl_trans *trans)
1296 if (trans->ops->sync_nmi)
1297 trans->ops->sync_nmi(trans);
1300 static inline bool iwl_trans_dbg_ini_valid(struct iwl_trans *trans)
1302 return trans->dbg.internal_ini_cfg != IWL_INI_CFG_STATE_NOT_LOADED ||
1303 trans->dbg.external_ini_cfg != IWL_INI_CFG_STATE_NOT_LOADED;
1306 /*****************************************************
1307 * transport helper functions
1308 *****************************************************/
1309 struct iwl_trans *iwl_trans_alloc(unsigned int priv_size,
1310 struct device *dev,
1311 const struct iwl_trans_ops *ops,
1312 unsigned int cmd_pool_size,
1313 unsigned int cmd_pool_align);
1314 void iwl_trans_free(struct iwl_trans *trans);
1316 /*****************************************************
1317 * driver (transport) register/unregister functions
1318 ******************************************************/
1319 int __must_check iwl_pci_register_driver(void);
1320 void iwl_pci_unregister_driver(void);
1322 #endif /* __iwl_trans_h__ */