1 // SPDX-License-Identifier: GPL-2.0
3 * Copyright (c) 2000-2005 Silicon Graphics, Inc.
4 * Copyright (c) 2022-2024 Oracle.
9 #include "xfs_format.h"
10 #include "xfs_log_format.h"
11 #include "xfs_shared.h"
12 #include "xfs_trans_resv.h"
13 #include "xfs_mount.h"
14 #include "xfs_bmap_btree.h"
15 #include "xfs_inode.h"
16 #include "xfs_error.h"
17 #include "xfs_trace.h"
18 #include "xfs_trans.h"
19 #include "xfs_da_format.h"
20 #include "xfs_da_btree.h"
22 #include "xfs_ioctl.h"
23 #include "xfs_parent.h"
24 #include "xfs_handle.h"
25 #include "xfs_health.h"
26 #include "xfs_icache.h"
27 #include "xfs_export.h"
28 #include "xfs_xattr.h"
31 #include <linux/namei.h>
34 xfs_filehandle_fid_len(void)
36 struct xfs_handle
*handle
= NULL
;
38 return sizeof(struct xfs_fid
) - sizeof(handle
->ha_fid
.fid_len
);
46 struct xfs_handle
*handle
)
48 memcpy(&handle
->ha_fsid
, mp
->m_fixedfsid
, sizeof(struct xfs_fsid
));
50 handle
->ha_fid
.fid_len
= xfs_filehandle_fid_len();
51 handle
->ha_fid
.fid_pad
= 0;
52 handle
->ha_fid
.fid_gen
= gen
;
53 handle
->ha_fid
.fid_ino
= ino
;
55 return sizeof(struct xfs_handle
);
61 struct xfs_handle
*handle
)
63 memcpy(&handle
->ha_fsid
, mp
->m_fixedfsid
, sizeof(struct xfs_fsid
));
64 memset(&handle
->ha_fid
, 0, sizeof(handle
->ha_fid
));
66 return sizeof(struct xfs_fsid
);
70 * xfs_find_handle maps from userspace xfs_fsop_handlereq structure to
71 * a file or fs handle.
73 * XFS_IOC_PATH_TO_FSHANDLE
74 * returns fs handle for a mount point or path within that mount point
75 * XFS_IOC_FD_TO_HANDLE
76 * returns full handle for a FD opened in user space
77 * XFS_IOC_PATH_TO_HANDLE
78 * returns full handle for a path
83 xfs_fsop_handlereq_t
*hreq
)
88 struct fd f
= EMPTY_FD
;
93 if (cmd
== XFS_IOC_FD_TO_HANDLE
) {
97 inode
= file_inode(fd_file(f
));
99 error
= user_path_at(AT_FDCWD
, hreq
->path
, 0, &path
);
102 inode
= d_inode(path
.dentry
);
107 * We can only generate handles for inodes residing on a XFS filesystem,
108 * and only for regular files, directories or symbolic links.
111 if (inode
->i_sb
->s_magic
!= XFS_SB_MAGIC
)
115 if (!S_ISREG(inode
->i_mode
) &&
116 !S_ISDIR(inode
->i_mode
) &&
117 !S_ISLNK(inode
->i_mode
))
121 memcpy(&handle
.ha_fsid
, ip
->i_mount
->m_fixedfsid
, sizeof(xfs_fsid_t
));
123 if (cmd
== XFS_IOC_PATH_TO_FSHANDLE
)
124 hsize
= xfs_fshandle_init(ip
->i_mount
, &handle
);
126 hsize
= xfs_filehandle_init(ip
->i_mount
, ip
->i_ino
,
127 inode
->i_generation
, &handle
);
130 if (copy_to_user(hreq
->ohandle
, &handle
, hsize
) ||
131 copy_to_user(hreq
->ohandlen
, &hsize
, sizeof(__s32
)))
137 if (cmd
== XFS_IOC_FD_TO_HANDLE
)
145 * No need to do permission checks on the various pathname components
146 * as the handle operations are privileged.
149 xfs_handle_acceptable(
151 struct dentry
*dentry
)
156 /* Convert handle already copied to kernel space into a dentry. */
157 static struct dentry
*
158 xfs_khandle_to_dentry(
160 struct xfs_handle
*handle
)
162 struct xfs_fid64 fid
= {
163 .ino
= handle
->ha_fid
.fid_ino
,
164 .gen
= handle
->ha_fid
.fid_gen
,
168 * Only allow handle opens under a directory.
170 if (!S_ISDIR(file_inode(file
)->i_mode
))
171 return ERR_PTR(-ENOTDIR
);
173 if (handle
->ha_fid
.fid_len
!= xfs_filehandle_fid_len())
174 return ERR_PTR(-EINVAL
);
176 return exportfs_decode_fh(file
->f_path
.mnt
, (struct fid
*)&fid
, 3,
177 FILEID_INO32_GEN
| XFS_FILEID_TYPE_64FLAG
,
178 xfs_handle_acceptable
, NULL
);
181 /* Convert handle already copied to kernel space into an xfs_inode. */
182 static struct xfs_inode
*
183 xfs_khandle_to_inode(
185 struct xfs_handle
*handle
)
187 struct xfs_inode
*ip
= XFS_I(file_inode(file
));
188 struct xfs_mount
*mp
= ip
->i_mount
;
191 if (!S_ISDIR(VFS_I(ip
)->i_mode
))
192 return ERR_PTR(-ENOTDIR
);
194 if (handle
->ha_fid
.fid_len
!= xfs_filehandle_fid_len())
195 return ERR_PTR(-EINVAL
);
197 inode
= xfs_nfs_get_inode(mp
->m_super
, handle
->ha_fid
.fid_ino
,
198 handle
->ha_fid
.fid_gen
);
200 return ERR_CAST(inode
);
206 * Convert userspace handle data into a dentry.
209 xfs_handle_to_dentry(
210 struct file
*parfilp
,
211 void __user
*uhandle
,
216 if (hlen
!= sizeof(xfs_handle_t
))
217 return ERR_PTR(-EINVAL
);
218 if (copy_from_user(&handle
, uhandle
, hlen
))
219 return ERR_PTR(-EFAULT
);
221 return xfs_khandle_to_dentry(parfilp
, &handle
);
224 STATIC
struct dentry
*
225 xfs_handlereq_to_dentry(
226 struct file
*parfilp
,
227 xfs_fsop_handlereq_t
*hreq
)
229 return xfs_handle_to_dentry(parfilp
, hreq
->ihandle
, hreq
->ihandlen
);
234 struct file
*parfilp
,
235 xfs_fsop_handlereq_t
*hreq
)
237 const struct cred
*cred
= current_cred();
243 struct dentry
*dentry
;
247 if (!capable(CAP_SYS_ADMIN
))
250 dentry
= xfs_handlereq_to_dentry(parfilp
, hreq
);
252 return PTR_ERR(dentry
);
253 inode
= d_inode(dentry
);
255 /* Restrict xfs_open_by_handle to directories & regular files. */
256 if (!(S_ISREG(inode
->i_mode
) || S_ISDIR(inode
->i_mode
))) {
261 #if BITS_PER_LONG != 32
262 hreq
->oflags
|= O_LARGEFILE
;
265 permflag
= hreq
->oflags
;
266 fmode
= OPEN_FMODE(permflag
);
267 if ((!(permflag
& O_APPEND
) || (permflag
& O_TRUNC
)) &&
268 (fmode
& FMODE_WRITE
) && IS_APPEND(inode
)) {
273 if ((fmode
& FMODE_WRITE
) && IS_IMMUTABLE(inode
)) {
278 /* Can't write directories. */
279 if (S_ISDIR(inode
->i_mode
) && (fmode
& FMODE_WRITE
)) {
284 fd
= get_unused_fd_flags(0);
290 path
.mnt
= parfilp
->f_path
.mnt
;
291 path
.dentry
= dentry
;
292 filp
= dentry_open(&path
, hreq
->oflags
, cred
);
296 return PTR_ERR(filp
);
299 if (S_ISREG(inode
->i_mode
)) {
300 filp
->f_flags
|= O_NOATIME
;
301 filp
->f_mode
|= FMODE_NOCMTIME
;
304 fd_install(fd
, filp
);
313 xfs_readlink_by_handle(
314 struct file
*parfilp
,
315 xfs_fsop_handlereq_t
*hreq
)
317 struct dentry
*dentry
;
321 if (!capable(CAP_SYS_ADMIN
))
324 dentry
= xfs_handlereq_to_dentry(parfilp
, hreq
);
326 return PTR_ERR(dentry
);
328 /* Restrict this handle operation to symlinks only. */
329 if (!d_is_symlink(dentry
)) {
334 if (copy_from_user(&olen
, hreq
->ohandlen
, sizeof(__u32
))) {
339 error
= vfs_readlink(dentry
, hreq
->ohandle
, olen
);
347 * Format an attribute and copy it out to the user's buffer.
348 * Take care to check values and protect against them changing later,
349 * we may be reading them directly out of a user buffer.
352 xfs_ioc_attr_put_listent(
353 struct xfs_attr_list_context
*context
,
360 struct xfs_attrlist
*alist
= context
->buffer
;
361 struct xfs_attrlist_ent
*aep
;
364 ASSERT(!context
->seen_enough
);
365 ASSERT(context
->count
>= 0);
366 ASSERT(context
->count
< (ATTR_MAX_VALUELEN
/8));
367 ASSERT(context
->firstu
>= sizeof(*alist
));
368 ASSERT(context
->firstu
<= context
->bufsize
);
371 * Only list entries in the right namespace.
373 if (context
->attr_filter
!= (flags
& XFS_ATTR_NSP_ONDISK_MASK
))
376 arraytop
= sizeof(*alist
) +
377 context
->count
* sizeof(alist
->al_offset
[0]);
379 /* decrement by the actual bytes used by the attr */
380 context
->firstu
-= round_up(offsetof(struct xfs_attrlist_ent
, a_name
) +
381 namelen
+ 1, sizeof(uint32_t));
382 if (context
->firstu
< arraytop
) {
383 trace_xfs_attr_list_full(context
);
385 context
->seen_enough
= 1;
389 aep
= context
->buffer
+ context
->firstu
;
390 aep
->a_valuelen
= valuelen
;
391 memcpy(aep
->a_name
, name
, namelen
);
392 aep
->a_name
[namelen
] = 0;
393 alist
->al_offset
[context
->count
++] = context
->firstu
;
394 alist
->al_count
= context
->count
;
395 trace_xfs_attr_list_add(context
);
402 if (ioc_flags
& XFS_IOC_ATTR_ROOT
)
403 return XFS_ATTR_ROOT
;
404 if (ioc_flags
& XFS_IOC_ATTR_SECURE
)
405 return XFS_ATTR_SECURE
;
409 static inline enum xfs_attr_update
415 return XFS_ATTRUPDATE_REMOVE
;
416 if (ioc_flags
& XFS_IOC_ATTR_CREATE
)
417 return XFS_ATTRUPDATE_CREATE
;
418 if (ioc_flags
& XFS_IOC_ATTR_REPLACE
)
419 return XFS_ATTRUPDATE_REPLACE
;
420 return XFS_ATTRUPDATE_UPSERT
;
425 struct xfs_inode
*dp
,
429 struct xfs_attrlist_cursor __user
*ucursor
)
431 struct xfs_attr_list_context context
= { };
432 struct xfs_attrlist
*alist
;
436 if (bufsize
< sizeof(struct xfs_attrlist
) ||
437 bufsize
> XFS_XATTR_LIST_MAX
)
441 * Reject flags, only allow namespaces.
443 if (flags
& ~(XFS_IOC_ATTR_ROOT
| XFS_IOC_ATTR_SECURE
))
445 if (flags
== (XFS_IOC_ATTR_ROOT
| XFS_IOC_ATTR_SECURE
))
449 * Validate the cursor.
451 if (copy_from_user(&context
.cursor
, ucursor
, sizeof(context
.cursor
)))
453 if (context
.cursor
.pad1
|| context
.cursor
.pad2
)
455 if (!context
.cursor
.initted
&&
456 (context
.cursor
.hashval
|| context
.cursor
.blkno
||
457 context
.cursor
.offset
))
460 buffer
= kvzalloc(bufsize
, GFP_KERNEL
);
465 * Initialize the output buffer.
469 context
.attr_filter
= xfs_attr_filter(flags
);
470 context
.buffer
= buffer
;
471 context
.bufsize
= round_down(bufsize
, sizeof(uint32_t));
472 context
.firstu
= context
.bufsize
;
473 context
.put_listent
= xfs_ioc_attr_put_listent
;
475 alist
= context
.buffer
;
478 alist
->al_offset
[0] = context
.bufsize
;
480 error
= xfs_attr_list(&context
);
484 if (copy_to_user(ubuf
, buffer
, bufsize
) ||
485 copy_to_user(ucursor
, &context
.cursor
, sizeof(context
.cursor
)))
493 xfs_attrlist_by_handle(
494 struct file
*parfilp
,
495 struct xfs_fsop_attrlist_handlereq __user
*p
)
497 struct xfs_fsop_attrlist_handlereq al_hreq
;
498 struct dentry
*dentry
;
501 if (!capable(CAP_SYS_ADMIN
))
503 if (copy_from_user(&al_hreq
, p
, sizeof(al_hreq
)))
506 dentry
= xfs_handlereq_to_dentry(parfilp
, &al_hreq
.hreq
);
508 return PTR_ERR(dentry
);
510 error
= xfs_ioc_attr_list(XFS_I(d_inode(dentry
)), al_hreq
.buffer
,
511 al_hreq
.buflen
, al_hreq
.flags
, &p
->pos
);
517 xfs_attrmulti_attr_get(
520 unsigned char __user
*ubuf
,
524 struct xfs_da_args args
= {
526 .attr_filter
= xfs_attr_filter(flags
),
528 .namelen
= strlen(name
),
533 if (*len
> XFS_XATTR_SIZE_MAX
)
536 error
= xfs_attr_get(&args
);
540 *len
= args
.valuelen
;
541 if (copy_to_user(ubuf
, args
.value
, args
.valuelen
))
550 xfs_attrmulti_attr_set(
553 const unsigned char __user
*ubuf
,
557 struct xfs_da_args args
= {
559 .attr_filter
= xfs_attr_filter(flags
),
561 .namelen
= strlen(name
),
565 if (IS_IMMUTABLE(inode
) || IS_APPEND(inode
))
569 if (len
> XFS_XATTR_SIZE_MAX
)
571 args
.value
= memdup_user(ubuf
, len
);
572 if (IS_ERR(args
.value
))
573 return PTR_ERR(args
.value
);
577 error
= xfs_attr_change(&args
, xfs_xattr_flags(flags
, args
.value
));
578 if (!error
&& (flags
& XFS_IOC_ATTR_ROOT
))
579 xfs_forget_acl(inode
, name
);
585 xfs_ioc_attrmulti_one(
586 struct file
*parfilp
,
597 if ((flags
& XFS_IOC_ATTR_ROOT
) && (flags
& XFS_IOC_ATTR_SECURE
))
600 name
= strndup_user(uname
, MAXNAMELEN
);
602 return PTR_ERR(name
);
606 error
= xfs_attrmulti_attr_get(inode
, name
, value
, len
, flags
);
613 error
= mnt_want_write_file(parfilp
);
616 error
= xfs_attrmulti_attr_set(inode
, name
, value
, *len
, flags
);
617 mnt_drop_write_file(parfilp
);
629 xfs_attrmulti_by_handle(
630 struct file
*parfilp
,
634 xfs_attr_multiop_t
*ops
;
635 xfs_fsop_attrmulti_handlereq_t am_hreq
;
636 struct dentry
*dentry
;
637 unsigned int i
, size
;
639 if (!capable(CAP_SYS_ADMIN
))
641 if (copy_from_user(&am_hreq
, arg
, sizeof(xfs_fsop_attrmulti_handlereq_t
)))
645 if (am_hreq
.opcount
>= INT_MAX
/ sizeof(xfs_attr_multiop_t
))
648 dentry
= xfs_handlereq_to_dentry(parfilp
, &am_hreq
.hreq
);
650 return PTR_ERR(dentry
);
653 size
= am_hreq
.opcount
* sizeof(xfs_attr_multiop_t
);
654 if (!size
|| size
> 16 * PAGE_SIZE
)
657 ops
= memdup_user(am_hreq
.ops
, size
);
659 error
= PTR_ERR(ops
);
664 for (i
= 0; i
< am_hreq
.opcount
; i
++) {
665 ops
[i
].am_error
= xfs_ioc_attrmulti_one(parfilp
,
666 d_inode(dentry
), ops
[i
].am_opcode
,
667 ops
[i
].am_attrname
, ops
[i
].am_attrvalue
,
668 &ops
[i
].am_length
, ops
[i
].am_flags
);
671 if (copy_to_user(am_hreq
.ops
, ops
, size
))
680 struct xfs_getparents_ctx
{
681 struct xfs_attr_list_context context
;
682 struct xfs_getparents_by_handle gph
;
685 struct xfs_inode
*ip
;
687 /* Internal buffer where we format records */
690 /* Last record filled out */
691 struct xfs_getparents_rec
*lastrec
;
696 static inline unsigned int
697 xfs_getparents_rec_sizeof(
698 unsigned int namelen
)
700 return round_up(sizeof(struct xfs_getparents_rec
) + namelen
+ 1,
705 xfs_getparents_put_listent(
706 struct xfs_attr_list_context
*context
,
713 struct xfs_getparents_ctx
*gpx
=
714 container_of(context
, struct xfs_getparents_ctx
, context
);
715 struct xfs_inode
*ip
= context
->dp
;
716 struct xfs_mount
*mp
= ip
->i_mount
;
717 struct xfs_getparents
*gp
= &gpx
->gph
.gph_request
;
718 struct xfs_getparents_rec
*gpr
= gpx
->krecords
+ context
->firstu
;
719 unsigned short reclen
=
720 xfs_getparents_rec_sizeof(namelen
);
725 if (!(flags
& XFS_ATTR_PARENT
))
728 error
= xfs_parent_from_attr(mp
, flags
, name
, namelen
, value
, valuelen
,
731 xfs_inode_mark_sick(ip
, XFS_SICK_INO_PARENT
);
732 context
->seen_enough
= -EFSCORRUPTED
;
737 * We found a parent pointer, but we've filled up the buffer. Signal
738 * to the caller that we did /not/ reach the end of the parent pointer
741 if (context
->firstu
> context
->bufsize
- reclen
) {
742 context
->seen_enough
= 1;
746 /* Format the parent pointer directly into the caller buffer. */
747 gpr
->gpr_reclen
= reclen
;
748 xfs_filehandle_init(mp
, ino
, gen
, &gpr
->gpr_parent
);
749 memcpy(gpr
->gpr_name
, name
, namelen
);
750 gpr
->gpr_name
[namelen
] = 0;
752 trace_xfs_getparents_put_listent(ip
, gp
, context
, gpr
);
754 context
->firstu
+= reclen
;
759 /* Expand the last record to fill the rest of the caller's buffer. */
761 xfs_getparents_expand_lastrec(
762 struct xfs_getparents_ctx
*gpx
)
764 struct xfs_getparents
*gp
= &gpx
->gph
.gph_request
;
765 struct xfs_getparents_rec
*gpr
= gpx
->lastrec
;
770 gpr
->gpr_reclen
= gp
->gp_bufsize
- ((void *)gpr
- gpx
->krecords
);
772 trace_xfs_getparents_expand_lastrec(gpx
->ip
, gp
, &gpx
->context
, gpr
);
775 /* Retrieve the parent pointers for a given inode. */
778 struct xfs_getparents_ctx
*gpx
)
780 struct xfs_getparents
*gp
= &gpx
->gph
.gph_request
;
781 struct xfs_inode
*ip
= gpx
->ip
;
782 struct xfs_mount
*mp
= ip
->i_mount
;
786 /* Check size of buffer requested by user */
787 if (gp
->gp_bufsize
> XFS_XATTR_LIST_MAX
)
789 if (gp
->gp_bufsize
< xfs_getparents_rec_sizeof(1))
792 if (gp
->gp_iflags
& ~XFS_GETPARENTS_IFLAGS_ALL
)
797 bufsize
= round_down(gp
->gp_bufsize
, sizeof(uint64_t));
798 gpx
->krecords
= kvzalloc(bufsize
, GFP_KERNEL
);
799 if (!gpx
->krecords
) {
800 bufsize
= min(bufsize
, PAGE_SIZE
);
801 gpx
->krecords
= kvzalloc(bufsize
, GFP_KERNEL
);
806 gpx
->context
.dp
= ip
;
807 gpx
->context
.resynch
= 1;
808 gpx
->context
.put_listent
= xfs_getparents_put_listent
;
809 gpx
->context
.bufsize
= bufsize
;
810 /* firstu is used to track the bytes filled in the buffer */
811 gpx
->context
.firstu
= 0;
813 /* Copy the cursor provided by caller */
814 memcpy(&gpx
->context
.cursor
, &gp
->gp_cursor
,
815 sizeof(struct xfs_attrlist_cursor
));
819 trace_xfs_getparents_begin(ip
, gp
, &gpx
->context
.cursor
);
821 error
= xfs_attr_list(&gpx
->context
);
824 if (gpx
->context
.seen_enough
< 0) {
825 error
= gpx
->context
.seen_enough
;
828 xfs_getparents_expand_lastrec(gpx
);
830 /* Update the caller with the current cursor position */
831 memcpy(&gp
->gp_cursor
, &gpx
->context
.cursor
,
832 sizeof(struct xfs_attrlist_cursor
));
834 /* Is this the root directory? */
835 if (ip
->i_ino
== mp
->m_sb
.sb_rootino
)
836 gp
->gp_oflags
|= XFS_GETPARENTS_OFLAG_ROOT
;
838 if (gpx
->context
.seen_enough
== 0) {
840 * If we did not run out of buffer space, then we reached the
841 * end of the pptr recordset, so set the DONE flag.
843 gp
->gp_oflags
|= XFS_GETPARENTS_OFLAG_DONE
;
844 } else if (gpx
->count
== 0) {
846 * If we ran out of buffer space before copying any parent
847 * pointers at all, the caller's buffer was too short. Tell
848 * userspace that, erm, the message is too long.
854 trace_xfs_getparents_end(ip
, gp
, &gpx
->context
.cursor
);
856 ASSERT(gpx
->context
.firstu
<= gpx
->gph
.gph_request
.gp_bufsize
);
858 /* Copy the records to userspace. */
859 if (copy_to_user(u64_to_user_ptr(gpx
->gph
.gph_request
.gp_buffer
),
860 gpx
->krecords
, gpx
->context
.firstu
))
864 kvfree(gpx
->krecords
);
865 gpx
->krecords
= NULL
;
869 /* Retrieve the parents of this file and pass them back to userspace. */
873 struct xfs_getparents __user
*ureq
)
875 struct xfs_getparents_ctx gpx
= {
876 .ip
= XFS_I(file_inode(file
)),
878 struct xfs_getparents
*kreq
= &gpx
.gph
.gph_request
;
879 struct xfs_mount
*mp
= gpx
.ip
->i_mount
;
882 if (!capable(CAP_SYS_ADMIN
))
884 if (!xfs_has_parent(mp
))
886 if (copy_from_user(kreq
, ureq
, sizeof(*kreq
)))
889 error
= xfs_getparents(&gpx
);
893 if (copy_to_user(ureq
, kreq
, sizeof(*kreq
)))
899 /* Retrieve the parents of this file handle and pass them back to userspace. */
901 xfs_ioc_getparents_by_handle(
903 struct xfs_getparents_by_handle __user
*ureq
)
905 struct xfs_getparents_ctx gpx
= { };
906 struct xfs_inode
*ip
= XFS_I(file_inode(file
));
907 struct xfs_mount
*mp
= ip
->i_mount
;
908 struct xfs_getparents_by_handle
*kreq
= &gpx
.gph
;
909 struct xfs_handle
*handle
= &kreq
->gph_handle
;
912 if (!capable(CAP_SYS_ADMIN
))
914 if (!xfs_has_parent(mp
))
916 if (copy_from_user(kreq
, ureq
, sizeof(*kreq
)))
920 * We don't use exportfs_decode_fh because it does too much work here.
921 * If the handle refers to a directory, the exportfs code will walk
922 * upwards through the directory tree to connect the dentries to the
923 * root directory dentry. For GETPARENTS we don't care about that
924 * because we're not actually going to open a file descriptor; we only
925 * want to open an inode and read its parent pointers.
927 * Note that xfs_scrub uses GETPARENTS to log that it will try to fix a
928 * corrupted file's metadata. For this usecase we would really rather
929 * userspace single-step the path reconstruction to avoid loops or
930 * other strange things if the directory tree is corrupt.
932 gpx
.ip
= xfs_khandle_to_inode(file
, handle
);
934 return PTR_ERR(gpx
.ip
);
936 error
= xfs_getparents(&gpx
);
940 if (copy_to_user(ureq
, kreq
, sizeof(*kreq
)))