gpio: rcar: Fix runtime PM imbalance on error
[linux/fpc-iii.git] / drivers / net / wireless / ath / ath10k / sdio.h
blob33195f49acabc478088026c96802428e8a93b414
1 /* SPDX-License-Identifier: ISC */
2 /*
3 * Copyright (c) 2004-2011 Atheros Communications Inc.
4 * Copyright (c) 2011-2012 Qualcomm Atheros, Inc.
5 * Copyright (c) 2016-2017 Erik Stromdahl <erik.stromdahl@gmail.com>
6 */
8 #ifndef _SDIO_H_
9 #define _SDIO_H_
11 #define ATH10K_HIF_MBOX_BLOCK_SIZE 256
13 #define QCA_MANUFACTURER_ID_BASE GENMASK(11, 8)
14 #define QCA_MANUFACTURER_ID_AR6005_BASE 0x5
15 #define QCA_MANUFACTURER_ID_QCA9377_BASE 0x7
16 #define QCA_SDIO_ID_AR6005_BASE 0x500
17 #define QCA_SDIO_ID_QCA9377_BASE 0x700
18 #define QCA_MANUFACTURER_ID_REV_MASK 0x00FF
19 #define QCA_MANUFACTURER_CODE 0x271 /* Qualcomm/Atheros */
21 #define ATH10K_SDIO_MAX_BUFFER_SIZE 4096 /*Unsure of this constant*/
23 /* Mailbox address in SDIO address space */
24 #define ATH10K_HIF_MBOX_BASE_ADDR 0x1000
25 #define ATH10K_HIF_MBOX_WIDTH 0x800
27 #define ATH10K_HIF_MBOX_TOT_WIDTH \
28 (ATH10K_HIF_MBOX_NUM_MAX * ATH10K_HIF_MBOX_WIDTH)
30 #define ATH10K_HIF_MBOX0_EXT_BASE_ADDR 0x5000
31 #define ATH10K_HIF_MBOX0_EXT_WIDTH (36 * 1024)
32 #define ATH10K_HIF_MBOX0_EXT_WIDTH_ROME_2_0 (56 * 1024)
33 #define ATH10K_HIF_MBOX1_EXT_WIDTH (36 * 1024)
34 #define ATH10K_HIF_MBOX_DUMMY_SPACE_SIZE (2 * 1024)
36 #define ATH10K_HTC_MBOX_MAX_PAYLOAD_LENGTH \
37 (ATH10K_SDIO_MAX_BUFFER_SIZE - sizeof(struct ath10k_htc_hdr))
39 #define ATH10K_HIF_MBOX_NUM_MAX 4
40 #define ATH10K_SDIO_BUS_REQUEST_MAX_NUM 64
42 #define ATH10K_SDIO_HIF_COMMUNICATION_TIMEOUT_HZ (100 * HZ)
44 /* HTC runs over mailbox 0 */
45 #define ATH10K_HTC_MAILBOX 0
46 #define ATH10K_HTC_MAILBOX_MASK BIT(ATH10K_HTC_MAILBOX)
48 /* GMBOX addresses */
49 #define ATH10K_HIF_GMBOX_BASE_ADDR 0x7000
50 #define ATH10K_HIF_GMBOX_WIDTH 0x4000
52 /* Modified versions of the sdio.h macros.
53 * The macros in sdio.h can't be used easily with the FIELD_{PREP|GET}
54 * macros in bitfield.h, so we define our own macros here.
56 #define ATH10K_SDIO_DRIVE_DTSX_MASK \
57 (SDIO_DRIVE_DTSx_MASK << SDIO_DRIVE_DTSx_SHIFT)
59 #define ATH10K_SDIO_DRIVE_DTSX_TYPE_B 0
60 #define ATH10K_SDIO_DRIVE_DTSX_TYPE_A 1
61 #define ATH10K_SDIO_DRIVE_DTSX_TYPE_C 2
62 #define ATH10K_SDIO_DRIVE_DTSX_TYPE_D 3
64 /* SDIO CCCR register definitions */
65 #define CCCR_SDIO_IRQ_MODE_REG 0xF0
66 #define CCCR_SDIO_IRQ_MODE_REG_SDIO3 0x16
68 #define CCCR_SDIO_DRIVER_STRENGTH_ENABLE_ADDR 0xF2
70 #define CCCR_SDIO_DRIVER_STRENGTH_ENABLE_A 0x02
71 #define CCCR_SDIO_DRIVER_STRENGTH_ENABLE_C 0x04
72 #define CCCR_SDIO_DRIVER_STRENGTH_ENABLE_D 0x08
74 #define CCCR_SDIO_ASYNC_INT_DELAY_ADDRESS 0xF0
75 #define CCCR_SDIO_ASYNC_INT_DELAY_MASK 0xC0
77 /* mode to enable special 4-bit interrupt assertion without clock */
78 #define SDIO_IRQ_MODE_ASYNC_4BIT_IRQ BIT(0)
79 #define SDIO_IRQ_MODE_ASYNC_4BIT_IRQ_SDIO3 BIT(1)
81 #define ATH10K_SDIO_TARGET_DEBUG_INTR_MASK 0x01
83 /* The theoretical maximum number of RX messages that can be fetched
84 * from the mbox interrupt handler in one loop is derived in the following
85 * way:
87 * Let's assume that each packet in a bundle of the maximum bundle size
88 * (HTC_HOST_MAX_MSG_PER_RX_BUNDLE) has the HTC header bundle count set
89 * to the maximum value (HTC_HOST_MAX_MSG_PER_RX_BUNDLE).
91 * in this case the driver must allocate
92 * (HTC_HOST_MAX_MSG_PER_RX_BUNDLE * 2) skb's.
94 #define ATH10K_SDIO_MAX_RX_MSGS \
95 (HTC_HOST_MAX_MSG_PER_RX_BUNDLE * 2)
97 #define ATH10K_FIFO_TIMEOUT_AND_CHIP_CONTROL 0x00000868u
98 #define ATH10K_FIFO_TIMEOUT_AND_CHIP_CONTROL_DISABLE_SLEEP_OFF 0xFFFEFFFF
99 #define ATH10K_FIFO_TIMEOUT_AND_CHIP_CONTROL_DISABLE_SLEEP_ON 0x10000
101 struct ath10k_sdio_bus_request {
102 struct list_head list;
104 /* sdio address */
105 u32 address;
107 struct sk_buff *skb;
108 enum ath10k_htc_ep_id eid;
109 int status;
110 /* Specifies if the current request is an HTC message.
111 * If not, the eid is not applicable an the TX completion handler
112 * associated with the endpoint will not be invoked.
114 bool htc_msg;
115 /* Completion that (if set) will be invoked for non HTC requests
116 * (htc_msg == false) when the request has been processed.
118 struct completion *comp;
121 struct ath10k_sdio_rx_data {
122 struct sk_buff *skb;
123 size_t alloc_len;
124 size_t act_len;
125 enum ath10k_htc_ep_id eid;
126 bool part_of_bundle;
127 bool last_in_bundle;
128 bool trailer_only;
131 struct ath10k_sdio_irq_proc_regs {
132 u8 host_int_status;
133 u8 cpu_int_status;
134 u8 error_int_status;
135 u8 counter_int_status;
136 u8 mbox_frame;
137 u8 rx_lookahead_valid;
138 u8 host_int_status2;
139 u8 gmbox_rx_avail;
140 __le32 rx_lookahead[2 * ATH10K_HIF_MBOX_NUM_MAX];
141 __le32 int_status_enable;
144 struct ath10k_sdio_irq_enable_regs {
145 u8 int_status_en;
146 u8 cpu_int_status_en;
147 u8 err_int_status_en;
148 u8 cntr_int_status_en;
151 struct ath10k_sdio_irq_data {
152 /* protects irq_proc_reg and irq_en_reg below.
153 * We use a mutex here and not a spinlock since we will have the
154 * mutex locked while calling the sdio_memcpy_ functions.
155 * These function require non atomic context, and hence, spinlocks
156 * can be held while calling these functions.
158 struct mutex mtx;
159 struct ath10k_sdio_irq_proc_regs *irq_proc_reg;
160 struct ath10k_sdio_irq_enable_regs *irq_en_reg;
163 struct ath10k_mbox_ext_info {
164 u32 htc_ext_addr;
165 u32 htc_ext_sz;
168 struct ath10k_mbox_info {
169 u32 htc_addr;
170 struct ath10k_mbox_ext_info ext_info[2];
171 u32 block_size;
172 u32 block_mask;
173 u32 gmbox_addr;
174 u32 gmbox_sz;
177 struct ath10k_sdio {
178 struct sdio_func *func;
180 struct ath10k_mbox_info mbox_info;
181 bool swap_mbox;
182 u32 mbox_addr[ATH10K_HTC_EP_COUNT];
183 u32 mbox_size[ATH10K_HTC_EP_COUNT];
185 /* available bus requests */
186 struct ath10k_sdio_bus_request bus_req[ATH10K_SDIO_BUS_REQUEST_MAX_NUM];
187 /* free list of bus requests */
188 struct list_head bus_req_freeq;
190 struct sk_buff_head rx_head;
192 /* protects access to bus_req_freeq */
193 spinlock_t lock;
195 struct ath10k_sdio_rx_data rx_pkts[ATH10K_SDIO_MAX_RX_MSGS];
196 size_t n_rx_pkts;
198 struct ath10k *ar;
199 struct ath10k_sdio_irq_data irq_data;
201 /* temporary buffer for sdio read.
202 * It is allocated when probe, and used for receive bundled packets,
203 * the read for bundled packets is not parallel, so it does not need
204 * protected.
206 u8 *vsg_buffer;
208 /* temporary buffer for BMI requests */
209 u8 *bmi_buf;
211 bool is_disabled;
213 struct workqueue_struct *workqueue;
214 struct work_struct wr_async_work;
215 struct list_head wr_asyncq;
216 /* protects access to wr_asyncq */
217 spinlock_t wr_async_lock;
219 struct work_struct async_work_rx;
222 static inline struct ath10k_sdio *ath10k_sdio_priv(struct ath10k *ar)
224 return (struct ath10k_sdio *)ar->drv_priv;
227 #endif