1 /* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
3 * Copyright (C) 2020 Intel Corporation
5 #ifndef __iwl_trans_queue_tx_h__
6 #define __iwl_trans_queue_tx_h__
10 struct iwl_tso_hdr_page
{
15 static inline dma_addr_t
16 iwl_txq_get_first_tb_dma(struct iwl_txq
*txq
, int idx
)
18 return txq
->first_tb_dma
+
19 sizeof(struct iwl_pcie_first_tb_buf
) * idx
;
22 static inline u16
iwl_txq_get_cmd_index(const struct iwl_txq
*q
, u32 index
)
24 return index
& (q
->n_window
- 1);
27 void iwl_txq_gen2_unmap(struct iwl_trans
*trans
, int txq_id
);
29 static inline void iwl_wake_queue(struct iwl_trans
*trans
,
32 if (test_and_clear_bit(txq
->id
, trans
->txqs
.queue_stopped
)) {
33 IWL_DEBUG_TX_QUEUES(trans
, "Wake hwq %d\n", txq
->id
);
34 iwl_op_mode_queue_not_full(trans
->op_mode
, txq
->id
);
38 static inline void *iwl_txq_get_tfd(struct iwl_trans
*trans
,
39 struct iwl_txq
*txq
, int idx
)
41 if (trans
->trans_cfg
->use_tfh
)
42 idx
= iwl_txq_get_cmd_index(txq
, idx
);
44 return txq
->tfds
+ trans
->txqs
.tfd
.size
* idx
;
47 int iwl_txq_alloc(struct iwl_trans
*trans
, struct iwl_txq
*txq
, int slots_num
,
50 * We need this inline in case dma_addr_t is only 32-bits - since the
51 * hardware is always 64-bit, the issue can still occur in that case,
52 * so use u64 for 'phys' here to force the addition in 64-bit.
54 static inline bool iwl_txq_crosses_4g_boundary(u64 phys
, u16 len
)
56 return upper_32_bits(phys
) != upper_32_bits(phys
+ len
);
59 int iwl_txq_space(struct iwl_trans
*trans
, const struct iwl_txq
*q
);
61 static inline void iwl_txq_stop(struct iwl_trans
*trans
, struct iwl_txq
*txq
)
63 if (!test_and_set_bit(txq
->id
, trans
->txqs
.queue_stopped
)) {
64 iwl_op_mode_queue_full(trans
->op_mode
, txq
->id
);
65 IWL_DEBUG_TX_QUEUES(trans
, "Stop hwq %d\n", txq
->id
);
67 IWL_DEBUG_TX_QUEUES(trans
, "hwq %d already stopped\n",
73 * iwl_txq_inc_wrap - increment queue index, wrap back to beginning
74 * @index -- current index
76 static inline int iwl_txq_inc_wrap(struct iwl_trans
*trans
, int index
)
79 (trans
->trans_cfg
->base_params
->max_tfd_queue_size
- 1);
83 * iwl_txq_dec_wrap - decrement queue index, wrap back to end
84 * @index -- current index
86 static inline int iwl_txq_dec_wrap(struct iwl_trans
*trans
, int index
)
89 (trans
->trans_cfg
->base_params
->max_tfd_queue_size
- 1);
92 static inline bool iwl_txq_used(const struct iwl_txq
*q
, int i
)
94 int index
= iwl_txq_get_cmd_index(q
, i
);
95 int r
= iwl_txq_get_cmd_index(q
, q
->read_ptr
);
96 int w
= iwl_txq_get_cmd_index(q
, q
->write_ptr
);
99 (index
>= r
&& index
< w
) :
100 !(index
< r
&& index
>= w
);
103 void iwl_txq_free_tso_page(struct iwl_trans
*trans
, struct sk_buff
*skb
);
105 void iwl_txq_log_scd_error(struct iwl_trans
*trans
, struct iwl_txq
*txq
);
107 int iwl_txq_gen2_set_tb(struct iwl_trans
*trans
,
108 struct iwl_tfh_tfd
*tfd
, dma_addr_t addr
,
111 void iwl_txq_gen2_tfd_unmap(struct iwl_trans
*trans
,
112 struct iwl_cmd_meta
*meta
,
113 struct iwl_tfh_tfd
*tfd
);
115 int iwl_txq_dyn_alloc(struct iwl_trans
*trans
,
116 __le16 flags
, u8 sta_id
, u8 tid
,
117 int cmd_id
, int size
,
118 unsigned int timeout
);
120 int iwl_txq_gen2_tx(struct iwl_trans
*trans
, struct sk_buff
*skb
,
121 struct iwl_device_tx_cmd
*dev_cmd
, int txq_id
);
123 void iwl_txq_dyn_free(struct iwl_trans
*trans
, int queue
);
124 void iwl_txq_gen2_free_tfd(struct iwl_trans
*trans
, struct iwl_txq
*txq
);
125 void iwl_txq_inc_wr_ptr(struct iwl_trans
*trans
, struct iwl_txq
*txq
);
126 void iwl_txq_gen2_tx_stop(struct iwl_trans
*trans
);
127 void iwl_txq_gen2_tx_free(struct iwl_trans
*trans
);
128 int iwl_txq_init(struct iwl_trans
*trans
, struct iwl_txq
*txq
, int slots_num
,
130 int iwl_txq_gen2_init(struct iwl_trans
*trans
, int txq_id
, int queue_size
);
132 struct iwl_tso_hdr_page
*get_page_hdr(struct iwl_trans
*trans
, size_t len
,
133 struct sk_buff
*skb
);
135 static inline u8
iwl_txq_gen1_tfd_get_num_tbs(struct iwl_trans
*trans
,
140 if (trans
->trans_cfg
->use_tfh
) {
141 struct iwl_tfh_tfd
*tfd
= _tfd
;
143 return le16_to_cpu(tfd
->num_tbs
) & 0x1f;
146 tfd
= (struct iwl_tfd
*)_tfd
;
147 return tfd
->num_tbs
& 0x1f;
150 static inline u16
iwl_txq_gen1_tfd_tb_get_len(struct iwl_trans
*trans
,
154 struct iwl_tfd_tb
*tb
;
156 if (trans
->trans_cfg
->use_tfh
) {
157 struct iwl_tfh_tfd
*tfd
= _tfd
;
158 struct iwl_tfh_tb
*tb
= &tfd
->tbs
[idx
];
160 return le16_to_cpu(tb
->tb_len
);
163 tfd
= (struct iwl_tfd
*)_tfd
;
166 return le16_to_cpu(tb
->hi_n_len
) >> 4;
169 void iwl_txq_gen1_tfd_unmap(struct iwl_trans
*trans
,
170 struct iwl_cmd_meta
*meta
,
171 struct iwl_txq
*txq
, int index
);
172 void iwl_txq_gen1_inval_byte_cnt_tbl(struct iwl_trans
*trans
,
173 struct iwl_txq
*txq
);
174 void iwl_txq_gen1_update_byte_cnt_tbl(struct iwl_trans
*trans
,
175 struct iwl_txq
*txq
, u16 byte_cnt
,
177 void iwl_txq_reclaim(struct iwl_trans
*trans
, int txq_id
, int ssn
,
178 struct sk_buff_head
*skbs
);
179 void iwl_txq_set_q_ptrs(struct iwl_trans
*trans
, int txq_id
, int ptr
);
180 void iwl_trans_txq_freeze_timer(struct iwl_trans
*trans
, unsigned long txqs
,
182 void iwl_txq_progress(struct iwl_txq
*txq
);
183 void iwl_txq_free_tfd(struct iwl_trans
*trans
, struct iwl_txq
*txq
);
184 #endif /* __iwl_trans_queue_tx_h__ */