1 // SPDX-License-Identifier: GPL-2.0-or-later
3 * Copyright (c) 2017, Oracle and/or its affiliates. All rights reserved.
7 * Oracle Data Analytics Accelerator (DAX)
9 * DAX is a coprocessor which resides on the SPARC M7 (DAX1) and M8
10 * (DAX2) processor chips, and has direct access to the CPU's L3
11 * caches as well as physical memory. It can perform several
12 * operations on data streams with various input and output formats.
13 * The driver provides a transport mechanism only and has limited
14 * knowledge of the various opcodes and data formats. A user space
15 * library provides high level services and translates these into low
16 * level commands which are then passed into the driver and
17 * subsequently the hypervisor and the coprocessor. The library is
18 * the recommended way for applications to use the coprocessor, and
19 * the driver interface is not intended for general use.
21 * See Documentation/sparc/oradax/oracle-dax.rst for more details.
24 #include <linux/uaccess.h>
25 #include <linux/module.h>
26 #include <linux/delay.h>
27 #include <linux/cdev.h>
28 #include <linux/slab.h>
31 #include <asm/hypervisor.h>
32 #include <asm/mdesc.h>
33 #include <asm/oradax.h>
35 MODULE_LICENSE("GPL");
36 MODULE_DESCRIPTION("Driver for Oracle Data Analytics Accelerator");
38 #define DAX_DBG_FLG_BASIC 0x01
39 #define DAX_DBG_FLG_STAT 0x02
40 #define DAX_DBG_FLG_INFO 0x04
41 #define DAX_DBG_FLG_ALL 0xff
43 #define dax_err(fmt, ...) pr_err("%s: " fmt "\n", __func__, ##__VA_ARGS__)
44 #define dax_info(fmt, ...) pr_info("%s: " fmt "\n", __func__, ##__VA_ARGS__)
46 #define dax_dbg(fmt, ...) do { \
47 if (dax_debug & DAX_DBG_FLG_BASIC)\
48 dax_info(fmt, ##__VA_ARGS__); \
50 #define dax_stat_dbg(fmt, ...) do { \
51 if (dax_debug & DAX_DBG_FLG_STAT) \
52 dax_info(fmt, ##__VA_ARGS__); \
54 #define dax_info_dbg(fmt, ...) do { \
55 if (dax_debug & DAX_DBG_FLG_INFO) \
56 dax_info(fmt, ##__VA_ARGS__); \
64 #define DAX1_STR "ORCL,sun4v-dax"
65 #define DAX2_STR "ORCL,sun4v-dax2"
67 #define DAX_CA_ELEMS (DAX_MMAP_LEN / sizeof(struct dax_cca))
69 #define DAX_CCB_USEC 100
70 #define DAX_CCB_RETRIES 10000
81 /* completion status */
82 #define CCA_STAT_NOT_COMPLETED 0
83 #define CCA_STAT_COMPLETED 1
84 #define CCA_STAT_FAILED 2
85 #define CCA_STAT_KILLED 3
86 #define CCA_STAT_NOT_RUN 4
87 #define CCA_STAT_PIPE_OUT 5
88 #define CCA_STAT_PIPE_SRC 6
89 #define CCA_STAT_PIPE_DST 7
92 #define CCA_ERR_SUCCESS 0x0 /* no error */
93 #define CCA_ERR_OVERFLOW 0x1 /* buffer overflow */
94 #define CCA_ERR_DECODE 0x2 /* CCB decode error */
95 #define CCA_ERR_PAGE_OVERFLOW 0x3 /* page overflow */
96 #define CCA_ERR_KILLED 0x7 /* command was killed */
97 #define CCA_ERR_TIMEOUT 0x8 /* Timeout */
98 #define CCA_ERR_ADI 0x9 /* ADI error */
99 #define CCA_ERR_DATA_FMT 0xA /* data format error */
100 #define CCA_ERR_OTHER_NO_RETRY 0xE /* Other error, do not retry */
101 #define CCA_ERR_OTHER_RETRY 0xF /* Other error, retry */
102 #define CCA_ERR_PARTIAL_SYMBOL 0x80 /* QP partial symbol warning */
104 /* CCB address types */
105 #define DAX_ADDR_TYPE_NONE 0
106 #define DAX_ADDR_TYPE_VA_ALT 1 /* secondary context */
107 #define DAX_ADDR_TYPE_RA 2 /* real address */
108 #define DAX_ADDR_TYPE_VA 3 /* virtual address */
110 /* dax_header_t opcode */
111 #define DAX_OP_SYNC_NOP 0x0
112 #define DAX_OP_EXTRACT 0x1
113 #define DAX_OP_SCAN_VALUE 0x2
114 #define DAX_OP_SCAN_RANGE 0x3
115 #define DAX_OP_TRANSLATE 0x4
116 #define DAX_OP_SELECT 0x5
117 #define DAX_OP_INVERT 0x10 /* OR with translate, scan opcodes */
120 u32 ccb_version
:4; /* 31:28 CCB Version */
121 /* 27:24 Sync Flags */
122 u32 pipe
:1; /* Pipeline */
123 u32 longccb
:1; /* Longccb. Set for scan with lu2, lu3, lu4. */
124 u32 cond
:1; /* Conditional */
125 u32 serial
:1; /* Serial */
126 u32 opcode
:8; /* 23:16 Opcode */
127 /* 15:0 Address Type. */
128 u32 reserved
:3; /* 15:13 reserved */
129 u32 table_addr_type
:2; /* 12:11 Huffman Table Address Type */
130 u32 out_addr_type
:3; /* 10:8 Destination Address Type */
131 u32 sec_addr_type
:3; /* 7:5 Secondary Source Address Type */
132 u32 pri_addr_type
:3; /* 4:2 Primary Source Address Type */
133 u32 cca_addr_type
:2; /* 1:0 Completion Address Type */
137 u32 pri_fmt
:4; /* 31:28 Primary Input Format */
138 u32 pri_elem_size
:5; /* 27:23 Primary Input Element Size(less1) */
139 u32 pri_offset
:3; /* 22:20 Primary Input Starting Offset */
140 u32 sec_encoding
:1; /* 19 Secondary Input Encoding */
141 /* (must be 0 for Select) */
142 u32 sec_offset
:3; /* 18:16 Secondary Input Starting Offset */
143 u32 sec_elem_size
:2; /* 15:14 Secondary Input Element Size */
144 /* (must be 0 for Select) */
145 u32 out_fmt
:2; /* 13:12 Output Format */
146 u32 out_elem_size
:2; /* 11:10 Output Element Size */
147 u32 misc
:10; /* 9:0 Opcode specific info */
150 struct dax_data_access
{
151 u64 flow_ctrl
:2; /* 63:62 Flow Control Type */
152 u64 pipe_target
:2; /* 61:60 Pipeline Target */
153 u64 out_buf_size
:20; /* 59:40 Output Buffer Size */
154 /* (cachelines less 1) */
155 u64 unused1
:8; /* 39:32 Reserved, Set to 0 */
156 u64 out_alloc
:5; /* 31:27 Output Allocation */
157 u64 unused2
:1; /* 26 Reserved */
158 u64 pri_len_fmt
:2; /* 25:24 Input Length Format */
159 u64 pri_len
:24; /* 23:0 Input Element/Byte/Bit Count */
164 struct dax_header hdr
; /* CCB Header */
165 struct dax_control ctrl
;/* Control Word */
166 void *ca
; /* Completion Address */
167 void *pri
; /* Primary Input Address */
168 struct dax_data_access dac
; /* Data Access Control */
169 void *sec
; /* Secondary Input Address */
170 u64 dword5
; /* depends on opcode */
171 void *out
; /* Output Address */
172 void *tbl
; /* Table Address or bitmap */
176 u8 status
; /* user may mwait on this address */
177 u8 err
; /* user visible error notification */
178 u8 rsvd
[2]; /* reserved */
179 u32 n_remaining
; /* for QP partial symbol warning */
180 u32 output_sz
; /* output in bytes */
181 u32 rsvd2
; /* reserved */
182 u64 run_cycles
; /* run time in OCND2 cycles */
183 u64 run_stats
; /* nothing reported in version 1.0 */
184 u32 n_processed
; /* number input elements */
185 u32 rsvd3
[5]; /* reserved */
186 u64 retval
; /* command return value */
187 u64 rsvd4
[8]; /* reserved */
190 /* per thread CCB context */
192 struct dax_ccb
*ccb_buf
;
193 u64 ccb_buf_ra
; /* cached RA of ccb_buf */
194 struct dax_cca
*ca_buf
;
195 u64 ca_buf_ra
; /* cached RA of ca_buf */
196 struct page
*pages
[DAX_CA_ELEMS
][NUM_STREAM_TYPES
];
197 /* array of locked pages */
198 struct task_struct
*owner
; /* thread that owns ctx */
199 struct task_struct
*client
; /* requesting thread */
200 union ccb_result result
;
205 /* driver public entry points */
206 static int dax_open(struct inode
*inode
, struct file
*file
);
207 static ssize_t
dax_read(struct file
*filp
, char __user
*buf
,
208 size_t count
, loff_t
*ppos
);
209 static ssize_t
dax_write(struct file
*filp
, const char __user
*buf
,
210 size_t count
, loff_t
*ppos
);
211 static int dax_devmap(struct file
*f
, struct vm_area_struct
*vma
);
212 static int dax_close(struct inode
*i
, struct file
*f
);
214 static const struct file_operations dax_fops
= {
215 .owner
= THIS_MODULE
,
220 .release
= dax_close
,
223 static int dax_ccb_exec(struct dax_ctx
*ctx
, const char __user
*buf
,
224 size_t count
, loff_t
*ppos
);
225 static int dax_ccb_info(u64 ca
, struct ccb_info_result
*info
);
226 static int dax_ccb_kill(u64 ca
, u16
*kill_res
);
228 static struct cdev c_dev
;
229 static struct class *cl
;
232 static int max_ccb_version
;
233 static int dax_debug
;
234 module_param(dax_debug
, int, 0644);
235 MODULE_PARM_DESC(dax_debug
, "Debug flags");
237 static int __init
dax_attach(void)
239 unsigned long dummy
, hv_rv
, major
, minor
, minor_requested
, max_ccbs
;
240 struct mdesc_handle
*hp
= mdesc_grab();
241 char *prop
, *dax_name
;
247 dax_err("Unable to grab mdesc");
251 mdesc_for_each_node_by_name(hp
, pn
, "virtual-device") {
252 prop
= (char *)mdesc_get_property(hp
, pn
, "name", &len
);
255 if (strncmp(prop
, "dax", strlen("dax")))
257 dax_dbg("Found node 0x%llx = %s", pn
, prop
);
259 prop
= (char *)mdesc_get_property(hp
, pn
, "compatible", &len
);
262 dax_dbg("Found node 0x%llx = %s", pn
, prop
);
268 dax_err("No DAX device found");
273 if (strncmp(prop
, DAX2_STR
, strlen(DAX2_STR
)) == 0) {
274 dax_name
= DAX_NAME
"2";
276 minor_requested
= DAX2_MINOR
;
278 dax_dbg("MD indicates DAX2 coprocessor");
279 } else if (strncmp(prop
, DAX1_STR
, strlen(DAX1_STR
)) == 0) {
280 dax_name
= DAX_NAME
"1";
282 minor_requested
= DAX1_MINOR
;
284 dax_dbg("MD indicates DAX1 coprocessor");
286 dax_err("Unknown dax type: %s", prop
);
291 minor
= minor_requested
;
292 dax_dbg("Registering DAX HV api with major %ld minor %ld", major
,
294 if (sun4v_hvapi_register(HV_GRP_DAX
, major
, &minor
)) {
295 dax_err("hvapi_register failed");
299 dax_dbg("Max minor supported by HV = %ld (major %ld)", minor
,
301 minor
= min(minor
, minor_requested
);
302 dax_dbg("registered DAX major %ld minor %ld", major
, minor
);
305 /* submit a zero length ccb array to query coprocessor queue size */
306 hv_rv
= sun4v_ccb_submit(0, 0, HV_CCB_QUERY_CMD
, 0, &max_ccbs
, &dummy
);
308 dax_err("get_hwqueue_size failed with status=%ld and max_ccbs=%ld",
314 if (max_ccbs
!= DAX_MAX_CCBS
) {
315 dax_err("HV reports unsupported max_ccbs=%ld", max_ccbs
);
320 if (alloc_chrdev_region(&first
, 0, 1, DAX_NAME
) < 0) {
321 dax_err("alloc_chrdev_region failed");
326 cl
= class_create(THIS_MODULE
, DAX_NAME
);
328 dax_err("class_create failed");
333 if (device_create(cl
, NULL
, first
, NULL
, dax_name
) == NULL
) {
334 dax_err("device_create failed");
339 cdev_init(&c_dev
, &dax_fops
);
340 if (cdev_add(&c_dev
, first
, 1) == -1) {
341 dax_err("cdev_add failed");
346 pr_info("Attached DAX module\n");
350 device_destroy(cl
, first
);
354 unregister_chrdev_region(first
, 1);
359 module_init(dax_attach
);
361 static void __exit
dax_detach(void)
363 pr_info("Cleaning up DAX module\n");
365 device_destroy(cl
, first
);
367 unregister_chrdev_region(first
, 1);
369 module_exit(dax_detach
);
371 /* map completion area */
372 static int dax_devmap(struct file
*f
, struct vm_area_struct
*vma
)
374 struct dax_ctx
*ctx
= (struct dax_ctx
*)f
->private_data
;
375 size_t len
= vma
->vm_end
- vma
->vm_start
;
377 dax_dbg("len=0x%lx, flags=0x%lx", len
, vma
->vm_flags
);
379 if (ctx
->owner
!= current
) {
380 dax_dbg("devmap called from wrong thread");
384 if (len
!= DAX_MMAP_LEN
) {
385 dax_dbg("len(%lu) != DAX_MMAP_LEN(%d)", len
, DAX_MMAP_LEN
);
389 /* completion area is mapped read-only for user */
390 if (vma
->vm_flags
& VM_WRITE
)
392 vma
->vm_flags
&= ~VM_MAYWRITE
;
394 if (remap_pfn_range(vma
, vma
->vm_start
, ctx
->ca_buf_ra
>> PAGE_SHIFT
,
395 len
, vma
->vm_page_prot
))
398 dax_dbg("mmapped completion area at uva 0x%lx", vma
->vm_start
);
402 /* Unlock user pages. Called during dequeue or device close */
403 static void dax_unlock_pages(struct dax_ctx
*ctx
, int ccb_index
, int nelem
)
407 for (i
= ccb_index
; i
< ccb_index
+ nelem
; i
++) {
408 for (j
= 0; j
< NUM_STREAM_TYPES
; j
++) {
409 struct page
*p
= ctx
->pages
[i
][j
];
412 dax_dbg("freeing page %p", p
);
413 unpin_user_pages_dirty_lock(&p
, 1, j
== OUT
);
414 ctx
->pages
[i
][j
] = NULL
;
420 static int dax_lock_page(void *va
, struct page
**p
)
424 dax_dbg("uva %p", va
);
426 ret
= pin_user_pages_fast((unsigned long)va
, 1, FOLL_WRITE
, p
);
428 dax_dbg("locked page %p, for VA %p", *p
, va
);
432 dax_dbg("pin_user_pages failed, va=%p, ret=%d", va
, ret
);
436 static int dax_lock_pages(struct dax_ctx
*ctx
, int idx
,
437 int nelem
, u64
*err_va
)
441 for (i
= 0; i
< nelem
; i
++) {
442 struct dax_ccb
*ccbp
= &ctx
->ccb_buf
[i
];
445 * For each address in the CCB whose type is virtual,
446 * lock the page and change the type to virtual alternate
447 * context. On error, return the offending address in
450 if (ccbp
->hdr
.out_addr_type
== DAX_ADDR_TYPE_VA
) {
452 if (dax_lock_page(ccbp
->out
,
453 &ctx
->pages
[i
+ idx
][OUT
]) != 0) {
454 *err_va
= (u64
)ccbp
->out
;
457 ccbp
->hdr
.out_addr_type
= DAX_ADDR_TYPE_VA_ALT
;
460 if (ccbp
->hdr
.pri_addr_type
== DAX_ADDR_TYPE_VA
) {
462 if (dax_lock_page(ccbp
->pri
,
463 &ctx
->pages
[i
+ idx
][PRI
]) != 0) {
464 *err_va
= (u64
)ccbp
->pri
;
467 ccbp
->hdr
.pri_addr_type
= DAX_ADDR_TYPE_VA_ALT
;
470 if (ccbp
->hdr
.sec_addr_type
== DAX_ADDR_TYPE_VA
) {
471 dax_dbg("sec input");
472 if (dax_lock_page(ccbp
->sec
,
473 &ctx
->pages
[i
+ idx
][SEC
]) != 0) {
474 *err_va
= (u64
)ccbp
->sec
;
477 ccbp
->hdr
.sec_addr_type
= DAX_ADDR_TYPE_VA_ALT
;
480 if (ccbp
->hdr
.table_addr_type
== DAX_ADDR_TYPE_VA
) {
482 if (dax_lock_page(ccbp
->tbl
,
483 &ctx
->pages
[i
+ idx
][TBL
]) != 0) {
484 *err_va
= (u64
)ccbp
->tbl
;
487 ccbp
->hdr
.table_addr_type
= DAX_ADDR_TYPE_VA_ALT
;
490 /* skip over 2nd 64 bytes of long CCB */
491 if (ccbp
->hdr
.longccb
)
494 return DAX_SUBMIT_OK
;
497 dax_unlock_pages(ctx
, idx
, nelem
);
498 return DAX_SUBMIT_ERR_NOACCESS
;
501 static void dax_ccb_wait(struct dax_ctx
*ctx
, int idx
)
506 dax_dbg("idx=%d", idx
);
508 for (nretries
= 0; nretries
< DAX_CCB_RETRIES
; nretries
++) {
509 if (ctx
->ca_buf
[idx
].status
== CCA_STAT_NOT_COMPLETED
)
510 udelay(DAX_CCB_USEC
);
514 dax_dbg("ctx (%p): CCB[%d] timed out, wait usec=%d, retries=%d. Killing ccb",
515 (void *)ctx
, idx
, DAX_CCB_USEC
, DAX_CCB_RETRIES
);
517 ret
= dax_ccb_kill(ctx
->ca_buf_ra
+ idx
* sizeof(struct dax_cca
),
519 dax_dbg("Kill CCB[%d] %s", idx
, ret
? "failed" : "succeeded");
522 static int dax_close(struct inode
*ino
, struct file
*f
)
524 struct dax_ctx
*ctx
= (struct dax_ctx
*)f
->private_data
;
527 f
->private_data
= NULL
;
529 for (i
= 0; i
< DAX_CA_ELEMS
; i
++) {
530 if (ctx
->ca_buf
[i
].status
== CCA_STAT_NOT_COMPLETED
) {
531 dax_dbg("CCB[%d] not completed", i
);
532 dax_ccb_wait(ctx
, i
);
534 dax_unlock_pages(ctx
, i
, 1);
539 dax_stat_dbg("CCBs: %d good, %d bad", ctx
->ccb_count
, ctx
->fail_count
);
545 static ssize_t
dax_read(struct file
*f
, char __user
*buf
,
546 size_t count
, loff_t
*ppos
)
548 struct dax_ctx
*ctx
= f
->private_data
;
550 if (ctx
->client
!= current
)
555 if (count
!= sizeof(union ccb_result
))
557 if (copy_to_user(buf
, &ctx
->result
, sizeof(union ccb_result
)))
562 static ssize_t
dax_write(struct file
*f
, const char __user
*buf
,
563 size_t count
, loff_t
*ppos
)
565 struct dax_ctx
*ctx
= f
->private_data
;
566 struct dax_command hdr
;
570 if (ctx
->client
!= NULL
)
573 if (count
== 0 || count
> DAX_MAX_CCBS
* sizeof(struct dax_ccb
))
576 if (count
% sizeof(struct dax_ccb
) == 0)
577 return dax_ccb_exec(ctx
, buf
, count
, ppos
); /* CCB EXEC */
579 if (count
!= sizeof(struct dax_command
))
582 /* immediate command */
583 if (ctx
->owner
!= current
)
586 if (copy_from_user(&hdr
, buf
, sizeof(hdr
)))
589 ca
= ctx
->ca_buf_ra
+ hdr
.ca_offset
;
591 switch (hdr
.command
) {
593 if (hdr
.ca_offset
>= DAX_MMAP_LEN
) {
594 dax_dbg("invalid ca_offset (%d) >= ca_buflen (%d)",
595 hdr
.ca_offset
, DAX_MMAP_LEN
);
599 ret
= dax_ccb_kill(ca
, &ctx
->result
.kill
.action
);
601 dax_dbg("dax_ccb_kill failed (ret=%d)", ret
);
605 dax_info_dbg("killed (ca_offset %d)", hdr
.ca_offset
);
606 idx
= hdr
.ca_offset
/ sizeof(struct dax_cca
);
607 ctx
->ca_buf
[idx
].status
= CCA_STAT_KILLED
;
608 ctx
->ca_buf
[idx
].err
= CCA_ERR_KILLED
;
609 ctx
->client
= current
;
613 if (hdr
.ca_offset
>= DAX_MMAP_LEN
) {
614 dax_dbg("invalid ca_offset (%d) >= ca_buflen (%d)",
615 hdr
.ca_offset
, DAX_MMAP_LEN
);
619 ret
= dax_ccb_info(ca
, &ctx
->result
.info
);
621 dax_dbg("dax_ccb_info failed (ret=%d)", ret
);
625 dax_info_dbg("info succeeded on ca_offset %d", hdr
.ca_offset
);
626 ctx
->client
= current
;
630 for (i
= 0; i
< DAX_CA_ELEMS
; i
++) {
631 if (ctx
->ca_buf
[i
].status
!=
632 CCA_STAT_NOT_COMPLETED
)
633 dax_unlock_pages(ctx
, i
, 1);
642 static int dax_open(struct inode
*inode
, struct file
*f
)
644 struct dax_ctx
*ctx
= NULL
;
647 ctx
= kzalloc(sizeof(*ctx
), GFP_KERNEL
);
651 ctx
->ccb_buf
= kcalloc(DAX_MAX_CCBS
, sizeof(struct dax_ccb
),
653 if (ctx
->ccb_buf
== NULL
)
656 ctx
->ccb_buf_ra
= virt_to_phys(ctx
->ccb_buf
);
657 dax_dbg("ctx->ccb_buf=0x%p, ccb_buf_ra=0x%llx",
658 (void *)ctx
->ccb_buf
, ctx
->ccb_buf_ra
);
660 /* allocate CCB completion area buffer */
661 ctx
->ca_buf
= kzalloc(DAX_MMAP_LEN
, GFP_KERNEL
);
662 if (ctx
->ca_buf
== NULL
)
664 for (i
= 0; i
< DAX_CA_ELEMS
; i
++)
665 ctx
->ca_buf
[i
].status
= CCA_STAT_COMPLETED
;
667 ctx
->ca_buf_ra
= virt_to_phys(ctx
->ca_buf
);
668 dax_dbg("ctx=0x%p, ctx->ca_buf=0x%p, ca_buf_ra=0x%llx",
669 (void *)ctx
, (void *)ctx
->ca_buf
, ctx
->ca_buf_ra
);
671 ctx
->owner
= current
;
672 f
->private_data
= ctx
;
682 static char *dax_hv_errno(unsigned long hv_ret
, int *ret
)
687 return "HV_EBADALIGN";
690 return "HV_ENORADDR";
696 return "HV_EWOULDBLOCK";
699 return "HV_ENOACCESS";
708 static int dax_ccb_kill(u64 ca
, u16
*kill_res
)
710 unsigned long hv_ret
;
714 for (count
= 0; count
< DAX_CCB_RETRIES
; count
++) {
715 dax_dbg("attempting kill on ca_ra 0x%llx", ca
);
716 hv_ret
= sun4v_ccb_kill(ca
, kill_res
);
718 if (hv_ret
== HV_EOK
) {
719 dax_info_dbg("HV_EOK (ca_ra 0x%llx): %d", ca
,
722 err_str
= dax_hv_errno(hv_ret
, &ret
);
723 dax_dbg("%s (ca_ra 0x%llx)", err_str
, ca
);
728 dax_info_dbg("ccb_kill count = %d", count
);
729 udelay(DAX_CCB_USEC
);
735 static int dax_ccb_info(u64 ca
, struct ccb_info_result
*info
)
737 unsigned long hv_ret
;
741 dax_dbg("attempting info on ca_ra 0x%llx", ca
);
742 hv_ret
= sun4v_ccb_info(ca
, info
);
744 if (hv_ret
== HV_EOK
) {
745 dax_info_dbg("HV_EOK (ca_ra 0x%llx): %d", ca
, info
->state
);
746 if (info
->state
== DAX_CCB_ENQUEUED
) {
747 dax_info_dbg("dax_unit %d, queue_num %d, queue_pos %d",
748 info
->inst_num
, info
->q_num
, info
->q_pos
);
751 err_str
= dax_hv_errno(hv_ret
, &ret
);
752 dax_dbg("%s (ca_ra 0x%llx)", err_str
, ca
);
758 static void dax_prt_ccbs(struct dax_ccb
*ccb
, int nelem
)
763 dax_dbg("ccb buffer:");
764 for (i
= 0; i
< nelem
; i
++) {
765 ccbp
= (u64
*)&ccb
[i
];
766 dax_dbg(" %sccb[%d]", ccb
[i
].hdr
.longccb
? "long " : "", i
);
767 for (j
= 0; j
< 8; j
++)
768 dax_dbg("\tccb[%d].dwords[%d]=0x%llx",
774 * Validates user CCB content. Also sets completion address and address types
775 * for all addresses contained in CCB.
777 static int dax_preprocess_usr_ccbs(struct dax_ctx
*ctx
, int idx
, int nelem
)
782 * The user is not allowed to specify real address types in
783 * the CCB header. This must be enforced by the kernel before
784 * submitting the CCBs to HV. The only allowed values for all
785 * address fields are VA or IMM
787 for (i
= 0; i
< nelem
; i
++) {
788 struct dax_ccb
*ccbp
= &ctx
->ccb_buf
[i
];
789 unsigned long ca_offset
;
791 if (ccbp
->hdr
.ccb_version
> max_ccb_version
)
792 return DAX_SUBMIT_ERR_CCB_INVAL
;
794 switch (ccbp
->hdr
.opcode
) {
795 case DAX_OP_SYNC_NOP
:
797 case DAX_OP_SCAN_VALUE
:
798 case DAX_OP_SCAN_RANGE
:
799 case DAX_OP_TRANSLATE
:
800 case DAX_OP_SCAN_VALUE
| DAX_OP_INVERT
:
801 case DAX_OP_SCAN_RANGE
| DAX_OP_INVERT
:
802 case DAX_OP_TRANSLATE
| DAX_OP_INVERT
:
806 return DAX_SUBMIT_ERR_CCB_INVAL
;
809 if (ccbp
->hdr
.out_addr_type
!= DAX_ADDR_TYPE_VA
&&
810 ccbp
->hdr
.out_addr_type
!= DAX_ADDR_TYPE_NONE
) {
811 dax_dbg("invalid out_addr_type in user CCB[%d]", i
);
812 return DAX_SUBMIT_ERR_CCB_INVAL
;
815 if (ccbp
->hdr
.pri_addr_type
!= DAX_ADDR_TYPE_VA
&&
816 ccbp
->hdr
.pri_addr_type
!= DAX_ADDR_TYPE_NONE
) {
817 dax_dbg("invalid pri_addr_type in user CCB[%d]", i
);
818 return DAX_SUBMIT_ERR_CCB_INVAL
;
821 if (ccbp
->hdr
.sec_addr_type
!= DAX_ADDR_TYPE_VA
&&
822 ccbp
->hdr
.sec_addr_type
!= DAX_ADDR_TYPE_NONE
) {
823 dax_dbg("invalid sec_addr_type in user CCB[%d]", i
);
824 return DAX_SUBMIT_ERR_CCB_INVAL
;
827 if (ccbp
->hdr
.table_addr_type
!= DAX_ADDR_TYPE_VA
&&
828 ccbp
->hdr
.table_addr_type
!= DAX_ADDR_TYPE_NONE
) {
829 dax_dbg("invalid table_addr_type in user CCB[%d]", i
);
830 return DAX_SUBMIT_ERR_CCB_INVAL
;
833 /* set completion (real) address and address type */
834 ccbp
->hdr
.cca_addr_type
= DAX_ADDR_TYPE_RA
;
835 ca_offset
= (idx
+ i
) * sizeof(struct dax_cca
);
836 ccbp
->ca
= (void *)ctx
->ca_buf_ra
+ ca_offset
;
837 memset(&ctx
->ca_buf
[idx
+ i
], 0, sizeof(struct dax_cca
));
839 dax_dbg("ccb[%d]=%p, ca_offset=0x%lx, compl RA=0x%llx",
840 i
, ccbp
, ca_offset
, ctx
->ca_buf_ra
+ ca_offset
);
842 /* skip over 2nd 64 bytes of long CCB */
843 if (ccbp
->hdr
.longccb
)
847 return DAX_SUBMIT_OK
;
850 static int dax_ccb_exec(struct dax_ctx
*ctx
, const char __user
*buf
,
851 size_t count
, loff_t
*ppos
)
853 unsigned long accepted_len
, hv_rv
;
854 int i
, idx
, nccbs
, naccepted
;
856 ctx
->client
= current
;
858 nccbs
= count
/ sizeof(struct dax_ccb
);
860 if (ctx
->owner
!= current
) {
861 dax_dbg("wrong thread");
862 ctx
->result
.exec
.status
= DAX_SUBMIT_ERR_THR_INIT
;
865 dax_dbg("args: ccb_buf_len=%ld, idx=%d", count
, idx
);
867 /* for given index and length, verify ca_buf range exists */
868 if (idx
< 0 || idx
> (DAX_CA_ELEMS
- nccbs
)) {
869 ctx
->result
.exec
.status
= DAX_SUBMIT_ERR_NO_CA_AVAIL
;
874 * Copy CCBs into kernel buffer to prevent modification by the
875 * user in between validation and submission.
877 if (copy_from_user(ctx
->ccb_buf
, buf
, count
)) {
878 dax_dbg("copyin of user CCB buffer failed");
879 ctx
->result
.exec
.status
= DAX_SUBMIT_ERR_CCB_ARR_MMU_MISS
;
883 /* check to see if ca_buf[idx] .. ca_buf[idx + nccbs] are available */
884 for (i
= idx
; i
< idx
+ nccbs
; i
++) {
885 if (ctx
->ca_buf
[i
].status
== CCA_STAT_NOT_COMPLETED
) {
886 dax_dbg("CA range not available, dequeue needed");
887 ctx
->result
.exec
.status
= DAX_SUBMIT_ERR_NO_CA_AVAIL
;
891 dax_unlock_pages(ctx
, idx
, nccbs
);
893 ctx
->result
.exec
.status
= dax_preprocess_usr_ccbs(ctx
, idx
, nccbs
);
894 if (ctx
->result
.exec
.status
!= DAX_SUBMIT_OK
)
897 ctx
->result
.exec
.status
= dax_lock_pages(ctx
, idx
, nccbs
,
898 &ctx
->result
.exec
.status_data
);
899 if (ctx
->result
.exec
.status
!= DAX_SUBMIT_OK
)
902 if (dax_debug
& DAX_DBG_FLG_BASIC
)
903 dax_prt_ccbs(ctx
->ccb_buf
, nccbs
);
905 hv_rv
= sun4v_ccb_submit(ctx
->ccb_buf_ra
, count
,
906 HV_CCB_QUERY_CMD
| HV_CCB_VA_SECONDARY
, 0,
907 &accepted_len
, &ctx
->result
.exec
.status_data
);
912 * Hcall succeeded with no errors but the accepted
913 * length may be less than the requested length. The
914 * only way the driver can resubmit the remainder is
915 * to wait for completion of the submitted CCBs since
916 * there is no way to guarantee the ordering semantics
917 * required by the client applications. Therefore we
918 * let the user library deal with resubmissions.
920 ctx
->result
.exec
.status
= DAX_SUBMIT_OK
;
924 * This is a transient HV API error. The user library
927 dax_dbg("hcall returned HV_EWOULDBLOCK");
928 ctx
->result
.exec
.status
= DAX_SUBMIT_ERR_WOULDBLOCK
;
932 * HV was unable to translate a VA. The VA it could
933 * not translate is returned in the status_data param.
935 dax_dbg("hcall returned HV_ENOMAP");
936 ctx
->result
.exec
.status
= DAX_SUBMIT_ERR_NOMAP
;
940 * This is the result of an invalid user CCB as HV is
941 * validating some of the user CCB fields. Pass this
942 * error back to the user. There is no supporting info
943 * to isolate the invalid field.
945 dax_dbg("hcall returned HV_EINVAL");
946 ctx
->result
.exec
.status
= DAX_SUBMIT_ERR_CCB_INVAL
;
950 * HV found a VA that did not have the appropriate
951 * permissions (such as the w bit). The VA in question
952 * is returned in status_data param.
954 dax_dbg("hcall returned HV_ENOACCESS");
955 ctx
->result
.exec
.status
= DAX_SUBMIT_ERR_NOACCESS
;
957 case HV_EUNAVAILABLE
:
959 * The requested CCB operation could not be performed
960 * at this time. Return the specific unavailable code
961 * in the status_data field.
963 dax_dbg("hcall returned HV_EUNAVAILABLE");
964 ctx
->result
.exec
.status
= DAX_SUBMIT_ERR_UNAVAIL
;
967 ctx
->result
.exec
.status
= DAX_SUBMIT_ERR_INTERNAL
;
968 dax_dbg("unknown hcall return value (%ld)", hv_rv
);
972 /* unlock pages associated with the unaccepted CCBs */
973 naccepted
= accepted_len
/ sizeof(struct dax_ccb
);
974 dax_unlock_pages(ctx
, idx
+ naccepted
, nccbs
- naccepted
);
976 /* mark unaccepted CCBs as not completed */
977 for (i
= idx
+ naccepted
; i
< idx
+ nccbs
; i
++)
978 ctx
->ca_buf
[i
].status
= CCA_STAT_COMPLETED
;
980 ctx
->ccb_count
+= naccepted
;
981 ctx
->fail_count
+= nccbs
- naccepted
;
983 dax_dbg("hcall rv=%ld, accepted_len=%ld, status_data=0x%llx, ret status=%d",
984 hv_rv
, accepted_len
, ctx
->result
.exec
.status_data
,
985 ctx
->result
.exec
.status
);
987 if (count
== accepted_len
)
988 ctx
->client
= NULL
; /* no read needed to complete protocol */