Merge tag 'trace-printf-v6.13' of git://git.kernel.org/pub/scm/linux/kernel/git/trace...
[drm/drm-misc.git] / drivers / dma / idxd / registers.h
blobc426511f21048e872cee36359c208f7a15afc697
1 /* SPDX-License-Identifier: GPL-2.0 */
2 /* Copyright(c) 2019 Intel Corporation. All rights rsvd. */
3 #ifndef _IDXD_REGISTERS_H_
4 #define _IDXD_REGISTERS_H_
6 #include <uapi/linux/idxd.h>
8 /* PCI Config */
9 #define PCI_DEVICE_ID_INTEL_DSA_GNRD 0x11fb
10 #define PCI_DEVICE_ID_INTEL_DSA_DMR 0x1212
11 #define PCI_DEVICE_ID_INTEL_IAA_DMR 0x1216
13 #define DEVICE_VERSION_1 0x100
14 #define DEVICE_VERSION_2 0x200
16 #define IDXD_MMIO_BAR 0
17 #define IDXD_WQ_BAR 2
18 #define IDXD_PORTAL_SIZE PAGE_SIZE
20 /* MMIO Device BAR0 Registers */
21 #define IDXD_VER_OFFSET 0x00
22 #define IDXD_VER_MAJOR_MASK 0xf0
23 #define IDXD_VER_MINOR_MASK 0x0f
24 #define GET_IDXD_VER_MAJOR(x) (((x) & IDXD_VER_MAJOR_MASK) >> 4)
25 #define GET_IDXD_VER_MINOR(x) ((x) & IDXD_VER_MINOR_MASK)
27 union gen_cap_reg {
28 struct {
29 u64 block_on_fault:1;
30 u64 overlap_copy:1;
31 u64 cache_control_mem:1;
32 u64 cache_control_cache:1;
33 u64 cmd_cap:1;
34 u64 rsvd:3;
35 u64 dest_readback:1;
36 u64 drain_readback:1;
37 u64 rsvd2:3;
38 u64 evl_support:2;
39 u64 batch_continuation:1;
40 u64 max_xfer_shift:5;
41 u64 max_batch_shift:4;
42 u64 max_ims_mult:6;
43 u64 config_en:1;
44 u64 rsvd3:32;
46 u64 bits;
47 } __packed;
48 #define IDXD_GENCAP_OFFSET 0x10
50 union wq_cap_reg {
51 struct {
52 u64 total_wq_size:16;
53 u64 num_wqs:8;
54 u64 wqcfg_size:4;
55 u64 rsvd:20;
56 u64 shared_mode:1;
57 u64 dedicated_mode:1;
58 u64 wq_ats_support:1;
59 u64 priority:1;
60 u64 occupancy:1;
61 u64 occupancy_int:1;
62 u64 op_config:1;
63 u64 wq_prs_support:1;
64 u64 rsvd4:8;
66 u64 bits;
67 } __packed;
68 #define IDXD_WQCAP_OFFSET 0x20
69 #define IDXD_WQCFG_MIN 5
71 union group_cap_reg {
72 struct {
73 u64 num_groups:8;
74 u64 total_rdbufs:8; /* formerly total_tokens */
75 u64 rdbuf_ctrl:1; /* formerly token_en */
76 u64 rdbuf_limit:1; /* formerly token_limit */
77 u64 progress_limit:1; /* descriptor and batch descriptor */
78 u64 rsvd:45;
80 u64 bits;
81 } __packed;
82 #define IDXD_GRPCAP_OFFSET 0x30
84 union engine_cap_reg {
85 struct {
86 u64 num_engines:8;
87 u64 rsvd:56;
89 u64 bits;
90 } __packed;
92 #define IDXD_ENGCAP_OFFSET 0x38
94 #define IDXD_OPCAP_NOOP 0x0001
95 #define IDXD_OPCAP_BATCH 0x0002
96 #define IDXD_OPCAP_MEMMOVE 0x0008
97 struct opcap {
98 u64 bits[4];
101 #define IDXD_MAX_OPCAP_BITS 256U
103 #define IDXD_OPCAP_OFFSET 0x40
105 #define IDXD_TABLE_OFFSET 0x60
106 union offsets_reg {
107 struct {
108 u64 grpcfg:16;
109 u64 wqcfg:16;
110 u64 msix_perm:16;
111 u64 ims:16;
112 u64 perfmon:16;
113 u64 rsvd:48;
115 u64 bits[2];
116 } __packed;
118 #define IDXD_TABLE_MULT 0x100
120 #define IDXD_GENCFG_OFFSET 0x80
121 union gencfg_reg {
122 struct {
123 u32 rdbuf_limit:8;
124 u32 rsvd:4;
125 u32 user_int_en:1;
126 u32 evl_en:1;
127 u32 rsvd2:18;
129 u32 bits;
130 } __packed;
132 #define IDXD_GENCTRL_OFFSET 0x88
133 union genctrl_reg {
134 struct {
135 u32 softerr_int_en:1;
136 u32 halt_int_en:1;
137 u32 evl_int_en:1;
138 u32 rsvd:29;
140 u32 bits;
141 } __packed;
143 #define IDXD_GENSTATS_OFFSET 0x90
144 union gensts_reg {
145 struct {
146 u32 state:2;
147 u32 reset_type:2;
148 u32 rsvd:28;
150 u32 bits;
151 } __packed;
153 enum idxd_device_status_state {
154 IDXD_DEVICE_STATE_DISABLED = 0,
155 IDXD_DEVICE_STATE_ENABLED,
156 IDXD_DEVICE_STATE_DRAIN,
157 IDXD_DEVICE_STATE_HALT,
160 enum idxd_device_reset_type {
161 IDXD_DEVICE_RESET_SOFTWARE = 0,
162 IDXD_DEVICE_RESET_FLR,
163 IDXD_DEVICE_RESET_WARM,
164 IDXD_DEVICE_RESET_COLD,
167 #define IDXD_INTCAUSE_OFFSET 0x98
168 #define IDXD_INTC_ERR 0x01
169 #define IDXD_INTC_CMD 0x02
170 #define IDXD_INTC_OCCUPY 0x04
171 #define IDXD_INTC_PERFMON_OVFL 0x08
172 #define IDXD_INTC_HALT_STATE 0x10
173 #define IDXD_INTC_EVL 0x20
174 #define IDXD_INTC_INT_HANDLE_REVOKED 0x80000000
176 #define IDXD_CMD_OFFSET 0xa0
177 union idxd_command_reg {
178 struct {
179 u32 operand:20;
180 u32 cmd:5;
181 u32 rsvd:6;
182 u32 int_req:1;
184 u32 bits;
185 } __packed;
187 enum idxd_cmd {
188 IDXD_CMD_ENABLE_DEVICE = 1,
189 IDXD_CMD_DISABLE_DEVICE,
190 IDXD_CMD_DRAIN_ALL,
191 IDXD_CMD_ABORT_ALL,
192 IDXD_CMD_RESET_DEVICE,
193 IDXD_CMD_ENABLE_WQ,
194 IDXD_CMD_DISABLE_WQ,
195 IDXD_CMD_DRAIN_WQ,
196 IDXD_CMD_ABORT_WQ,
197 IDXD_CMD_RESET_WQ,
198 IDXD_CMD_DRAIN_PASID,
199 IDXD_CMD_ABORT_PASID,
200 IDXD_CMD_REQUEST_INT_HANDLE,
201 IDXD_CMD_RELEASE_INT_HANDLE,
204 #define CMD_INT_HANDLE_IMS 0x10000
206 #define IDXD_CMDSTS_OFFSET 0xa8
207 union cmdsts_reg {
208 struct {
209 u8 err;
210 u16 result;
211 u8 rsvd:7;
212 u8 active:1;
214 u32 bits;
215 } __packed;
216 #define IDXD_CMDSTS_ACTIVE 0x80000000
217 #define IDXD_CMDSTS_ERR_MASK 0xff
218 #define IDXD_CMDSTS_RES_SHIFT 8
220 enum idxd_cmdsts_err {
221 IDXD_CMDSTS_SUCCESS = 0,
222 IDXD_CMDSTS_INVAL_CMD,
223 IDXD_CMDSTS_INVAL_WQIDX,
224 IDXD_CMDSTS_HW_ERR,
225 /* enable device errors */
226 IDXD_CMDSTS_ERR_DEV_ENABLED = 0x10,
227 IDXD_CMDSTS_ERR_CONFIG,
228 IDXD_CMDSTS_ERR_BUSMASTER_EN,
229 IDXD_CMDSTS_ERR_PASID_INVAL,
230 IDXD_CMDSTS_ERR_WQ_SIZE_ERANGE,
231 IDXD_CMDSTS_ERR_GRP_CONFIG,
232 IDXD_CMDSTS_ERR_GRP_CONFIG2,
233 IDXD_CMDSTS_ERR_GRP_CONFIG3,
234 IDXD_CMDSTS_ERR_GRP_CONFIG4,
235 /* enable wq errors */
236 IDXD_CMDSTS_ERR_DEV_NOTEN = 0x20,
237 IDXD_CMDSTS_ERR_WQ_ENABLED,
238 IDXD_CMDSTS_ERR_WQ_SIZE,
239 IDXD_CMDSTS_ERR_WQ_PRIOR,
240 IDXD_CMDSTS_ERR_WQ_MODE,
241 IDXD_CMDSTS_ERR_BOF_EN,
242 IDXD_CMDSTS_ERR_PASID_EN,
243 IDXD_CMDSTS_ERR_MAX_BATCH_SIZE,
244 IDXD_CMDSTS_ERR_MAX_XFER_SIZE,
245 /* disable device errors */
246 IDXD_CMDSTS_ERR_DIS_DEV_EN = 0x31,
247 /* disable WQ, drain WQ, abort WQ, reset WQ */
248 IDXD_CMDSTS_ERR_DEV_NOT_EN,
249 /* request interrupt handle */
250 IDXD_CMDSTS_ERR_INVAL_INT_IDX = 0x41,
251 IDXD_CMDSTS_ERR_NO_HANDLE,
254 #define IDXD_CMDCAP_OFFSET 0xb0
256 #define IDXD_SWERR_OFFSET 0xc0
257 #define IDXD_SWERR_VALID 0x00000001
258 #define IDXD_SWERR_OVERFLOW 0x00000002
259 #define IDXD_SWERR_ACK (IDXD_SWERR_VALID | IDXD_SWERR_OVERFLOW)
260 union sw_err_reg {
261 struct {
262 u64 valid:1;
263 u64 overflow:1;
264 u64 desc_valid:1;
265 u64 wq_idx_valid:1;
266 u64 batch:1;
267 u64 fault_rw:1;
268 u64 priv:1;
269 u64 rsvd:1;
270 u64 error:8;
271 u64 wq_idx:8;
272 u64 rsvd2:8;
273 u64 operation:8;
274 u64 pasid:20;
275 u64 rsvd3:4;
277 u64 batch_idx:16;
278 u64 rsvd4:16;
279 u64 invalid_flags:32;
281 u64 fault_addr;
283 u64 rsvd5;
285 u64 bits[4];
286 } __packed;
288 union iaa_cap_reg {
289 struct {
290 u64 dec_aecs_format_ver:1;
291 u64 drop_init_bits:1;
292 u64 chaining:1;
293 u64 force_array_output_mod:1;
294 u64 load_part_aecs:1;
295 u64 comp_early_abort:1;
296 u64 nested_comp:1;
297 u64 diction_comp:1;
298 u64 header_gen:1;
299 u64 crypto_gcm:1;
300 u64 crypto_cfb:1;
301 u64 crypto_xts:1;
302 u64 rsvd:52;
304 u64 bits;
305 } __packed;
307 #define IDXD_IAACAP_OFFSET 0x180
309 #define IDXD_EVLCFG_OFFSET 0xe0
310 union evlcfg_reg {
311 struct {
312 u64 pasid_en:1;
313 u64 priv:1;
314 u64 rsvd:10;
315 u64 base_addr:52;
317 u64 size:16;
318 u64 pasid:20;
319 u64 rsvd2:28;
321 u64 bits[2];
322 } __packed;
324 #define IDXD_EVL_SIZE_MIN 0x0040
325 #define IDXD_EVL_SIZE_MAX 0xffff
327 union msix_perm {
328 struct {
329 u32 rsvd:2;
330 u32 ignore:1;
331 u32 pasid_en:1;
332 u32 rsvd2:8;
333 u32 pasid:20;
335 u32 bits;
336 } __packed;
338 union group_flags {
339 struct {
340 u64 tc_a:3;
341 u64 tc_b:3;
342 u64 rsvd:1;
343 u64 use_rdbuf_limit:1;
344 u64 rdbufs_reserved:8;
345 u64 rsvd2:4;
346 u64 rdbufs_allowed:8;
347 u64 rsvd3:4;
348 u64 desc_progress_limit:2;
349 u64 rsvd4:2;
350 u64 batch_progress_limit:2;
351 u64 rsvd5:26;
353 u64 bits;
354 } __packed;
356 struct grpcfg {
357 u64 wqs[4];
358 u64 engines;
359 union group_flags flags;
360 } __packed;
362 union wqcfg {
363 struct {
364 /* bytes 0-3 */
365 u16 wq_size;
366 u16 rsvd;
368 /* bytes 4-7 */
369 u16 wq_thresh;
370 u16 rsvd1;
372 /* bytes 8-11 */
373 u32 mode:1; /* shared or dedicated */
374 u32 bof:1; /* block on fault */
375 u32 wq_ats_disable:1;
376 u32 wq_prs_disable:1;
377 u32 priority:4;
378 u32 pasid:20;
379 u32 pasid_en:1;
380 u32 priv:1;
381 u32 rsvd3:2;
383 /* bytes 12-15 */
384 u32 max_xfer_shift:5;
385 u32 max_batch_shift:4;
386 u32 rsvd4:23;
388 /* bytes 16-19 */
389 u16 occupancy_inth;
390 u16 occupancy_table_sel:1;
391 u16 rsvd5:15;
393 /* bytes 20-23 */
394 u16 occupancy_limit;
395 u16 occupancy_int_en:1;
396 u16 rsvd6:15;
398 /* bytes 24-27 */
399 u16 occupancy;
400 u16 occupancy_int:1;
401 u16 rsvd7:12;
402 u16 mode_support:1;
403 u16 wq_state:2;
405 /* bytes 28-31 */
406 u32 rsvd8;
408 /* bytes 32-63 */
409 u64 op_config[4];
411 u32 bits[16];
412 } __packed;
414 #define WQCFG_PASID_IDX 2
415 #define WQCFG_PRIVL_IDX 2
416 #define WQCFG_OCCUP_IDX 6
418 #define WQCFG_OCCUP_MASK 0xffff
421 * This macro calculates the offset into the WQCFG register
422 * idxd - struct idxd *
423 * n - wq id
424 * ofs - the index of the 32b dword for the config register
426 * The WQCFG register block is divided into groups per each wq. The n index
427 * allows us to move to the register group that's for that particular wq.
428 * Each register is 32bits. The ofs gives us the number of register to access.
430 #define WQCFG_OFFSET(_idxd_dev, n, ofs) \
432 typeof(_idxd_dev) __idxd_dev = (_idxd_dev); \
433 (__idxd_dev)->wqcfg_offset + (n) * (__idxd_dev)->wqcfg_size + sizeof(u32) * (ofs); \
436 #define WQCFG_STRIDES(_idxd_dev) ((_idxd_dev)->wqcfg_size / sizeof(u32))
438 #define GRPCFG_SIZE 64
439 #define GRPWQCFG_STRIDES 4
442 * This macro calculates the offset into the GRPCFG register
443 * idxd - struct idxd *
444 * n - group id
445 * ofs - the index of the 64b qword for the config register
447 * The GRPCFG register block is divided into three sub-registers, which
448 * are GRPWQCFG, GRPENGCFG and GRPFLGCFG. The n index allows us to move
449 * to the register block that contains the three sub-registers.
450 * Each register block is 64bits. And the ofs gives us the offset
451 * within the GRPWQCFG register to access.
453 #define GRPWQCFG_OFFSET(idxd_dev, n, ofs) ((idxd_dev)->grpcfg_offset +\
454 (n) * GRPCFG_SIZE + sizeof(u64) * (ofs))
455 #define GRPENGCFG_OFFSET(idxd_dev, n) ((idxd_dev)->grpcfg_offset + (n) * GRPCFG_SIZE + 32)
456 #define GRPFLGCFG_OFFSET(idxd_dev, n) ((idxd_dev)->grpcfg_offset + (n) * GRPCFG_SIZE + 40)
458 /* Following is performance monitor registers */
459 #define IDXD_PERFCAP_OFFSET 0x0
460 union idxd_perfcap {
461 struct {
462 u64 num_perf_counter:6;
463 u64 rsvd1:2;
464 u64 counter_width:8;
465 u64 num_event_category:4;
466 u64 global_event_category:16;
467 u64 filter:8;
468 u64 rsvd2:8;
469 u64 cap_per_counter:1;
470 u64 writeable_counter:1;
471 u64 counter_freeze:1;
472 u64 overflow_interrupt:1;
473 u64 rsvd3:8;
475 u64 bits;
476 } __packed;
478 #define IDXD_EVNTCAP_OFFSET 0x80
479 union idxd_evntcap {
480 struct {
481 u64 events:28;
482 u64 rsvd:36;
484 u64 bits;
485 } __packed;
487 struct idxd_event {
488 union {
489 struct {
490 u32 event_category:4;
491 u32 events:28;
493 u32 val;
495 } __packed;
497 #define IDXD_CNTRCAP_OFFSET 0x800
498 struct idxd_cntrcap {
499 union {
500 struct {
501 u32 counter_width:8;
502 u32 rsvd:20;
503 u32 num_events:4;
505 u32 val;
507 struct idxd_event events[];
508 } __packed;
510 #define IDXD_PERFRST_OFFSET 0x10
511 union idxd_perfrst {
512 struct {
513 u32 perfrst_config:1;
514 u32 perfrst_counter:1;
515 u32 rsvd:30;
517 u32 val;
518 } __packed;
520 #define IDXD_OVFSTATUS_OFFSET 0x30
521 #define IDXD_PERFFRZ_OFFSET 0x20
522 #define IDXD_CNTRCFG_OFFSET 0x100
523 union idxd_cntrcfg {
524 struct {
525 u64 enable:1;
526 u64 interrupt_ovf:1;
527 u64 global_freeze_ovf:1;
528 u64 rsvd1:5;
529 u64 event_category:4;
530 u64 rsvd2:20;
531 u64 events:28;
532 u64 rsvd3:4;
534 u64 val;
535 } __packed;
537 #define IDXD_FLTCFG_OFFSET 0x300
539 #define IDXD_CNTRDATA_OFFSET 0x200
540 union idxd_cntrdata {
541 struct {
542 u64 event_count_value;
544 u64 val;
545 } __packed;
547 union event_cfg {
548 struct {
549 u64 event_cat:4;
550 u64 event_enc:28;
552 u64 val;
553 } __packed;
555 union filter_cfg {
556 struct {
557 u64 wq:32;
558 u64 tc:8;
559 u64 pg_sz:4;
560 u64 xfer_sz:8;
561 u64 eng:8;
563 u64 val;
564 } __packed;
566 #define IDXD_EVLSTATUS_OFFSET 0xf0
568 union evl_status_reg {
569 struct {
570 u32 head:16;
571 u32 rsvd:16;
572 u32 tail:16;
573 u32 rsvd2:14;
574 u32 int_pending:1;
575 u32 rsvd3:1;
577 struct {
578 u32 bits_lower32;
579 u32 bits_upper32;
581 u64 bits;
582 } __packed;
584 #define IDXD_MAX_BATCH_IDENT 256
586 struct __evl_entry {
587 u64 rsvd:2;
588 u64 desc_valid:1;
589 u64 wq_idx_valid:1;
590 u64 batch:1;
591 u64 fault_rw:1;
592 u64 priv:1;
593 u64 err_info_valid:1;
594 u64 error:8;
595 u64 wq_idx:8;
596 u64 batch_id:8;
597 u64 operation:8;
598 u64 pasid:20;
599 u64 rsvd2:4;
601 u16 batch_idx;
602 u16 rsvd3;
603 union {
604 /* Invalid Flags 0x11 */
605 u32 invalid_flags;
606 /* Invalid Int Handle 0x19 */
607 /* Page fault 0x1a */
608 /* Page fault 0x06, 0x1f, only operand_id */
609 /* Page fault before drain or in batch, 0x26, 0x27 */
610 struct {
611 u16 int_handle;
612 u16 rci:1;
613 u16 ims:1;
614 u16 rcr:1;
615 u16 first_err_in_batch:1;
616 u16 rsvd4_2:9;
617 u16 operand_id:3;
620 u64 fault_addr;
621 u64 rsvd5;
622 } __packed;
624 struct dsa_evl_entry {
625 struct __evl_entry e;
626 struct dsa_completion_record cr;
627 } __packed;
629 struct iax_evl_entry {
630 struct __evl_entry e;
631 u64 rsvd[4];
632 struct iax_completion_record cr;
633 } __packed;
635 #endif