drm/tests: hdmi: Fix memory leaks in drm_display_mode_from_cea_vic()
[drm/drm-misc.git] / drivers / scsi / hptiop.h
blob394ef6aa469e89f4cb4b110f31ba2aa77163f1f2
1 /* SPDX-License-Identifier: GPL-2.0-only */
2 /*
3 * HighPoint RR3xxx/4xxx controller driver for Linux
4 * Copyright (C) 2006-2015 HighPoint Technologies, Inc. All Rights Reserved.
6 * Please report bugs/comments/suggestions to linux@highpoint-tech.com
8 * For more information, visit http://www.highpoint-tech.com
9 */
10 #ifndef _HPTIOP_H_
11 #define _HPTIOP_H_
13 struct hpt_iopmu_itl {
14 __le32 resrved0[4];
15 __le32 inbound_msgaddr0;
16 __le32 inbound_msgaddr1;
17 __le32 outbound_msgaddr0;
18 __le32 outbound_msgaddr1;
19 __le32 inbound_doorbell;
20 __le32 inbound_intstatus;
21 __le32 inbound_intmask;
22 __le32 outbound_doorbell;
23 __le32 outbound_intstatus;
24 __le32 outbound_intmask;
25 __le32 reserved1[2];
26 __le32 inbound_queue;
27 __le32 outbound_queue;
30 #define IOPMU_QUEUE_EMPTY 0xffffffff
31 #define IOPMU_QUEUE_MASK_HOST_BITS 0xf0000000
32 #define IOPMU_QUEUE_ADDR_HOST_BIT 0x80000000
33 #define IOPMU_QUEUE_REQUEST_SIZE_BIT 0x40000000
34 #define IOPMU_QUEUE_REQUEST_RESULT_BIT 0x40000000
36 #define IOPMU_OUTBOUND_INT_MSG0 1
37 #define IOPMU_OUTBOUND_INT_MSG1 2
38 #define IOPMU_OUTBOUND_INT_DOORBELL 4
39 #define IOPMU_OUTBOUND_INT_POSTQUEUE 8
40 #define IOPMU_OUTBOUND_INT_PCI 0x10
42 #define IOPMU_INBOUND_INT_MSG0 1
43 #define IOPMU_INBOUND_INT_MSG1 2
44 #define IOPMU_INBOUND_INT_DOORBELL 4
45 #define IOPMU_INBOUND_INT_ERROR 8
46 #define IOPMU_INBOUND_INT_POSTQUEUE 0x10
48 #define MVIOP_QUEUE_LEN 512
50 struct hpt_iopmu_mv {
51 __le32 inbound_head;
52 __le32 inbound_tail;
53 __le32 outbound_head;
54 __le32 outbound_tail;
55 __le32 inbound_msg;
56 __le32 outbound_msg;
57 __le32 reserve[10];
58 __le64 inbound_q[MVIOP_QUEUE_LEN];
59 __le64 outbound_q[MVIOP_QUEUE_LEN];
62 struct hpt_iopmv_regs {
63 __le32 reserved[0x20400 / 4];
64 __le32 inbound_doorbell;
65 __le32 inbound_intmask;
66 __le32 outbound_doorbell;
67 __le32 outbound_intmask;
70 #pragma pack(1)
71 struct hpt_iopmu_mvfrey {
72 __le32 reserved0[(0x4000 - 0) / 4];
73 __le32 inbound_base;
74 __le32 inbound_base_high;
75 __le32 reserved1[(0x4018 - 0x4008) / 4];
76 __le32 inbound_write_ptr;
77 __le32 reserved2[(0x402c - 0x401c) / 4];
78 __le32 inbound_conf_ctl;
79 __le32 reserved3[(0x4050 - 0x4030) / 4];
80 __le32 outbound_base;
81 __le32 outbound_base_high;
82 __le32 outbound_shadow_base;
83 __le32 outbound_shadow_base_high;
84 __le32 reserved4[(0x4088 - 0x4060) / 4];
85 __le32 isr_cause;
86 __le32 isr_enable;
87 __le32 reserved5[(0x1020c - 0x4090) / 4];
88 __le32 pcie_f0_int_enable;
89 __le32 reserved6[(0x10400 - 0x10210) / 4];
90 __le32 f0_to_cpu_msg_a;
91 __le32 reserved7[(0x10420 - 0x10404) / 4];
92 __le32 cpu_to_f0_msg_a;
93 __le32 reserved8[(0x10480 - 0x10424) / 4];
94 __le32 f0_doorbell;
95 __le32 f0_doorbell_enable;
98 struct mvfrey_inlist_entry {
99 dma_addr_t addr;
100 __le32 intrfc_len;
101 __le32 reserved;
104 struct mvfrey_outlist_entry {
105 __le32 val;
107 #pragma pack()
109 #define MVIOP_MU_QUEUE_ADDR_HOST_MASK (~(0x1full))
110 #define MVIOP_MU_QUEUE_ADDR_HOST_BIT 4
112 #define MVIOP_MU_QUEUE_ADDR_IOP_HIGH32 0xffffffff
113 #define MVIOP_MU_QUEUE_REQUEST_RESULT_BIT 1
114 #define MVIOP_MU_QUEUE_REQUEST_RETURN_CONTEXT 2
116 #define MVIOP_MU_INBOUND_INT_MSG 1
117 #define MVIOP_MU_INBOUND_INT_POSTQUEUE 2
118 #define MVIOP_MU_OUTBOUND_INT_MSG 1
119 #define MVIOP_MU_OUTBOUND_INT_POSTQUEUE 2
121 #define CL_POINTER_TOGGLE 0x00004000
122 #define CPU_TO_F0_DRBL_MSG_BIT 0x02000000
124 enum hpt_iopmu_message {
125 /* host-to-iop messages */
126 IOPMU_INBOUND_MSG0_NOP = 0,
127 IOPMU_INBOUND_MSG0_RESET,
128 IOPMU_INBOUND_MSG0_FLUSH,
129 IOPMU_INBOUND_MSG0_SHUTDOWN,
130 IOPMU_INBOUND_MSG0_STOP_BACKGROUND_TASK,
131 IOPMU_INBOUND_MSG0_START_BACKGROUND_TASK,
132 IOPMU_INBOUND_MSG0_RESET_COMM,
133 IOPMU_INBOUND_MSG0_MAX = 0xff,
134 /* iop-to-host messages */
135 IOPMU_OUTBOUND_MSG0_REGISTER_DEVICE_0 = 0x100,
136 IOPMU_OUTBOUND_MSG0_REGISTER_DEVICE_MAX = 0x1ff,
137 IOPMU_OUTBOUND_MSG0_UNREGISTER_DEVICE_0 = 0x200,
138 IOPMU_OUTBOUND_MSG0_UNREGISTER_DEVICE_MAX = 0x2ff,
139 IOPMU_OUTBOUND_MSG0_REVALIDATE_DEVICE_0 = 0x300,
140 IOPMU_OUTBOUND_MSG0_REVALIDATE_DEVICE_MAX = 0x3ff,
143 struct hpt_iop_request_header {
144 __le32 size;
145 __le32 type;
146 __le32 flags;
147 __le32 result;
148 __le32 context; /* host context */
149 __le32 context_hi32;
152 #define IOP_REQUEST_FLAG_SYNC_REQUEST 1
153 #define IOP_REQUEST_FLAG_BIST_REQUEST 2
154 #define IOP_REQUEST_FLAG_REMAPPED 4
155 #define IOP_REQUEST_FLAG_OUTPUT_CONTEXT 8
156 #define IOP_REQUEST_FLAG_ADDR_BITS 0x40 /* flags[31:16] is phy_addr[47:32] */
158 enum hpt_iop_request_type {
159 IOP_REQUEST_TYPE_GET_CONFIG = 0,
160 IOP_REQUEST_TYPE_SET_CONFIG,
161 IOP_REQUEST_TYPE_BLOCK_COMMAND,
162 IOP_REQUEST_TYPE_SCSI_COMMAND,
163 IOP_REQUEST_TYPE_IOCTL_COMMAND,
164 IOP_REQUEST_TYPE_MAX
167 enum hpt_iop_result_type {
168 IOP_RESULT_PENDING = 0,
169 IOP_RESULT_SUCCESS,
170 IOP_RESULT_FAIL,
171 IOP_RESULT_BUSY,
172 IOP_RESULT_RESET,
173 IOP_RESULT_INVALID_REQUEST,
174 IOP_RESULT_BAD_TARGET,
175 IOP_RESULT_CHECK_CONDITION,
178 struct hpt_iop_request_get_config {
179 struct hpt_iop_request_header header;
180 __le32 interface_version;
181 __le32 firmware_version;
182 __le32 max_requests;
183 __le32 request_size;
184 __le32 max_sg_count;
185 __le32 data_transfer_length;
186 __le32 alignment_mask;
187 __le32 max_devices;
188 __le32 sdram_size;
191 struct hpt_iop_request_set_config {
192 struct hpt_iop_request_header header;
193 __le32 iop_id;
194 __le16 vbus_id;
195 __le16 max_host_request_size;
196 __le32 reserve[6];
199 struct hpt_iopsg {
200 __le32 size;
201 __le32 eot; /* non-zero: end of table */
202 __le64 pci_address;
205 struct hpt_iop_request_block_command {
206 struct hpt_iop_request_header header;
207 u8 channel;
208 u8 target;
209 u8 lun;
210 u8 pad1;
211 __le16 command; /* IOP_BLOCK_COMMAND_{READ,WRITE} */
212 __le16 sectors;
213 __le64 lba;
214 struct hpt_iopsg sg_list[1];
217 #define IOP_BLOCK_COMMAND_READ 1
218 #define IOP_BLOCK_COMMAND_WRITE 2
219 #define IOP_BLOCK_COMMAND_VERIFY 3
220 #define IOP_BLOCK_COMMAND_FLUSH 4
221 #define IOP_BLOCK_COMMAND_SHUTDOWN 5
223 struct hpt_iop_request_scsi_command {
224 struct hpt_iop_request_header header;
225 u8 channel;
226 u8 target;
227 u8 lun;
228 u8 pad1;
229 u8 cdb[16];
230 __le32 dataxfer_length;
231 struct hpt_iopsg sg_list[];
234 struct hpt_iop_request_ioctl_command {
235 struct hpt_iop_request_header header;
236 __le32 ioctl_code;
237 __le32 inbuf_size;
238 __le32 outbuf_size;
239 __le32 bytes_returned;
240 u8 buf[];
241 /* out data should be put at buf[(inbuf_size+3)&~3] */
244 #define HPTIOP_MAX_REQUESTS 256u
246 struct hptiop_request {
247 struct hptiop_request *next;
248 void *req_virt;
249 u32 req_shifted_phy;
250 struct scsi_cmnd *scp;
251 int index;
254 struct hpt_cmd_priv {
255 int mapped;
256 int sgcnt;
257 dma_addr_t dma_handle;
260 #define HPT_SCP(scp) ((struct hpt_cmd_priv *)scsi_cmd_priv(scp))
262 enum hptiop_family {
263 UNKNOWN_BASED_IOP,
264 INTEL_BASED_IOP,
265 MV_BASED_IOP,
266 MVFREY_BASED_IOP
269 struct hptiop_hba {
270 struct hptiop_adapter_ops *ops;
271 union {
272 struct {
273 struct hpt_iopmu_itl __iomem *iop;
274 void __iomem *plx;
275 } itl;
276 struct {
277 struct hpt_iopmv_regs *regs;
278 struct hpt_iopmu_mv __iomem *mu;
279 void *internal_req;
280 dma_addr_t internal_req_phy;
281 } mv;
282 struct {
283 struct hpt_iop_request_get_config __iomem *config;
284 struct hpt_iopmu_mvfrey __iomem *mu;
286 int internal_mem_size;
287 struct hptiop_request internal_req;
288 int list_count;
289 struct mvfrey_inlist_entry *inlist;
290 dma_addr_t inlist_phy;
291 __le32 inlist_wptr;
292 struct mvfrey_outlist_entry *outlist;
293 dma_addr_t outlist_phy;
294 __le32 *outlist_cptr; /* copy pointer shadow */
295 dma_addr_t outlist_cptr_phy;
296 __le32 outlist_rptr;
297 } mvfrey;
298 } u;
300 struct Scsi_Host *host;
301 struct pci_dev *pcidev;
303 /* IOP config info */
304 u32 interface_version;
305 u32 firmware_version;
306 u32 sdram_size;
307 u32 max_devices;
308 u32 max_requests;
309 u32 max_request_size;
310 u32 max_sg_descriptors;
312 u32 req_size; /* host-allocated request buffer size */
314 u32 iopintf_v2: 1;
315 u32 initialized: 1;
316 u32 msg_done: 1;
318 struct hptiop_request * req_list;
319 struct hptiop_request reqs[HPTIOP_MAX_REQUESTS];
321 /* used to free allocated dma area */
322 void *dma_coherent[HPTIOP_MAX_REQUESTS];
323 dma_addr_t dma_coherent_handle[HPTIOP_MAX_REQUESTS];
325 atomic_t reset_count;
326 atomic_t resetting;
328 wait_queue_head_t reset_wq;
329 wait_queue_head_t ioctl_wq;
332 struct hpt_ioctl_k {
333 struct hptiop_hba * hba;
334 u32 ioctl_code;
335 u32 inbuf_size;
336 u32 outbuf_size;
337 void *inbuf;
338 void *outbuf;
339 u32 *bytes_returned;
340 void (*done)(struct hpt_ioctl_k *);
341 int result; /* HPT_IOCTL_RESULT_ */
344 struct hptiop_adapter_ops {
345 enum hptiop_family family;
346 int (*iop_wait_ready)(struct hptiop_hba *hba, u32 millisec);
347 int (*internal_memalloc)(struct hptiop_hba *hba);
348 int (*internal_memfree)(struct hptiop_hba *hba);
349 int (*map_pci_bar)(struct hptiop_hba *hba);
350 void (*unmap_pci_bar)(struct hptiop_hba *hba);
351 void (*enable_intr)(struct hptiop_hba *hba);
352 void (*disable_intr)(struct hptiop_hba *hba);
353 int (*get_config)(struct hptiop_hba *hba,
354 struct hpt_iop_request_get_config *config);
355 int (*set_config)(struct hptiop_hba *hba,
356 struct hpt_iop_request_set_config *config);
357 int (*iop_intr)(struct hptiop_hba *hba);
358 void (*post_msg)(struct hptiop_hba *hba, u32 msg);
359 void (*post_req)(struct hptiop_hba *hba, struct hptiop_request *_req);
360 int hw_dma_bit_mask;
361 int (*reset_comm)(struct hptiop_hba *hba);
362 __le64 host_phy_flag;
365 #define HPT_IOCTL_RESULT_OK 0
366 #define HPT_IOCTL_RESULT_FAILED (-1)
368 #if 0
369 #define dprintk(fmt, args...) do { printk(fmt, ##args); } while(0)
370 #else
371 #define dprintk(fmt, args...)
372 #endif
374 #endif