Linux 5.3: Fix switch() fall though compiler errors
[zfs.git] / module / zfs / zfs_log.c
blob41b663b65fb8d3b298b86291666c7f904f25b0da
1 /*
2 * CDDL HEADER START
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
19 * CDDL HEADER END
22 * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
23 * Copyright (c) 2015, 2018 by Delphix. All rights reserved.
27 #include <sys/types.h>
28 #include <sys/param.h>
29 #include <sys/sysmacros.h>
30 #include <sys/cmn_err.h>
31 #include <sys/kmem.h>
32 #include <sys/thread.h>
33 #include <sys/file.h>
34 #include <sys/vfs.h>
35 #include <sys/zfs_znode.h>
36 #include <sys/zfs_dir.h>
37 #include <sys/zil.h>
38 #include <sys/zil_impl.h>
39 #include <sys/byteorder.h>
40 #include <sys/policy.h>
41 #include <sys/stat.h>
42 #include <sys/mode.h>
43 #include <sys/acl.h>
44 #include <sys/dmu.h>
45 #include <sys/dbuf.h>
46 #include <sys/spa.h>
47 #include <sys/zfs_fuid.h>
48 #include <sys/dsl_dataset.h>
51 * These zfs_log_* functions must be called within a dmu tx, in one
52 * of 2 contexts depending on zilog->z_replay:
54 * Non replay mode
55 * ---------------
56 * We need to record the transaction so that if it is committed to
57 * the Intent Log then it can be replayed. An intent log transaction
58 * structure (itx_t) is allocated and all the information necessary to
59 * possibly replay the transaction is saved in it. The itx is then assigned
60 * a sequence number and inserted in the in-memory list anchored in the zilog.
62 * Replay mode
63 * -----------
64 * We need to mark the intent log record as replayed in the log header.
65 * This is done in the same transaction as the replay so that they
66 * commit atomically.
69 int
70 zfs_log_create_txtype(zil_create_t type, vsecattr_t *vsecp, vattr_t *vap)
72 int isxvattr = (vap->va_mask & ATTR_XVATTR);
73 switch (type) {
74 case Z_FILE:
75 if (vsecp == NULL && !isxvattr)
76 return (TX_CREATE);
77 if (vsecp && isxvattr)
78 return (TX_CREATE_ACL_ATTR);
79 if (vsecp)
80 return (TX_CREATE_ACL);
81 else
82 return (TX_CREATE_ATTR);
83 /*NOTREACHED*/
84 case Z_DIR:
85 if (vsecp == NULL && !isxvattr)
86 return (TX_MKDIR);
87 if (vsecp && isxvattr)
88 return (TX_MKDIR_ACL_ATTR);
89 if (vsecp)
90 return (TX_MKDIR_ACL);
91 else
92 return (TX_MKDIR_ATTR);
93 case Z_XATTRDIR:
94 return (TX_MKXATTR);
96 ASSERT(0);
97 return (TX_MAX_TYPE);
101 * build up the log data necessary for logging xvattr_t
102 * First lr_attr_t is initialized. following the lr_attr_t
103 * is the mapsize and attribute bitmap copied from the xvattr_t.
104 * Following the bitmap and bitmapsize two 64 bit words are reserved
105 * for the create time which may be set. Following the create time
106 * records a single 64 bit integer which has the bits to set on
107 * replay for the xvattr.
109 static void
110 zfs_log_xvattr(lr_attr_t *lrattr, xvattr_t *xvap)
112 uint32_t *bitmap;
113 uint64_t *attrs;
114 uint64_t *crtime;
115 xoptattr_t *xoap;
116 void *scanstamp;
117 int i;
119 xoap = xva_getxoptattr(xvap);
120 ASSERT(xoap);
122 lrattr->lr_attr_masksize = xvap->xva_mapsize;
123 bitmap = &lrattr->lr_attr_bitmap;
124 for (i = 0; i != xvap->xva_mapsize; i++, bitmap++) {
125 *bitmap = xvap->xva_reqattrmap[i];
128 /* Now pack the attributes up in a single uint64_t */
129 attrs = (uint64_t *)bitmap;
130 crtime = attrs + 1;
131 scanstamp = (caddr_t)(crtime + 2);
132 *attrs = 0;
133 if (XVA_ISSET_REQ(xvap, XAT_READONLY))
134 *attrs |= (xoap->xoa_readonly == 0) ? 0 :
135 XAT0_READONLY;
136 if (XVA_ISSET_REQ(xvap, XAT_HIDDEN))
137 *attrs |= (xoap->xoa_hidden == 0) ? 0 :
138 XAT0_HIDDEN;
139 if (XVA_ISSET_REQ(xvap, XAT_SYSTEM))
140 *attrs |= (xoap->xoa_system == 0) ? 0 :
141 XAT0_SYSTEM;
142 if (XVA_ISSET_REQ(xvap, XAT_ARCHIVE))
143 *attrs |= (xoap->xoa_archive == 0) ? 0 :
144 XAT0_ARCHIVE;
145 if (XVA_ISSET_REQ(xvap, XAT_IMMUTABLE))
146 *attrs |= (xoap->xoa_immutable == 0) ? 0 :
147 XAT0_IMMUTABLE;
148 if (XVA_ISSET_REQ(xvap, XAT_NOUNLINK))
149 *attrs |= (xoap->xoa_nounlink == 0) ? 0 :
150 XAT0_NOUNLINK;
151 if (XVA_ISSET_REQ(xvap, XAT_APPENDONLY))
152 *attrs |= (xoap->xoa_appendonly == 0) ? 0 :
153 XAT0_APPENDONLY;
154 if (XVA_ISSET_REQ(xvap, XAT_OPAQUE))
155 *attrs |= (xoap->xoa_opaque == 0) ? 0 :
156 XAT0_APPENDONLY;
157 if (XVA_ISSET_REQ(xvap, XAT_NODUMP))
158 *attrs |= (xoap->xoa_nodump == 0) ? 0 :
159 XAT0_NODUMP;
160 if (XVA_ISSET_REQ(xvap, XAT_AV_QUARANTINED))
161 *attrs |= (xoap->xoa_av_quarantined == 0) ? 0 :
162 XAT0_AV_QUARANTINED;
163 if (XVA_ISSET_REQ(xvap, XAT_AV_MODIFIED))
164 *attrs |= (xoap->xoa_av_modified == 0) ? 0 :
165 XAT0_AV_MODIFIED;
166 if (XVA_ISSET_REQ(xvap, XAT_CREATETIME))
167 ZFS_TIME_ENCODE(&xoap->xoa_createtime, crtime);
168 if (XVA_ISSET_REQ(xvap, XAT_AV_SCANSTAMP)) {
169 ASSERT(!XVA_ISSET_REQ(xvap, XAT_PROJID));
171 bcopy(xoap->xoa_av_scanstamp, scanstamp, AV_SCANSTAMP_SZ);
172 } else if (XVA_ISSET_REQ(xvap, XAT_PROJID)) {
174 * XAT_PROJID and XAT_AV_SCANSTAMP will never be valid
175 * at the same time, so we can share the same space.
177 bcopy(&xoap->xoa_projid, scanstamp, sizeof (uint64_t));
179 if (XVA_ISSET_REQ(xvap, XAT_REPARSE))
180 *attrs |= (xoap->xoa_reparse == 0) ? 0 :
181 XAT0_REPARSE;
182 if (XVA_ISSET_REQ(xvap, XAT_OFFLINE))
183 *attrs |= (xoap->xoa_offline == 0) ? 0 :
184 XAT0_OFFLINE;
185 if (XVA_ISSET_REQ(xvap, XAT_SPARSE))
186 *attrs |= (xoap->xoa_sparse == 0) ? 0 :
187 XAT0_SPARSE;
188 if (XVA_ISSET_REQ(xvap, XAT_PROJINHERIT))
189 *attrs |= (xoap->xoa_projinherit == 0) ? 0 :
190 XAT0_PROJINHERIT;
193 static void *
194 zfs_log_fuid_ids(zfs_fuid_info_t *fuidp, void *start)
196 zfs_fuid_t *zfuid;
197 uint64_t *fuidloc = start;
199 /* First copy in the ACE FUIDs */
200 for (zfuid = list_head(&fuidp->z_fuids); zfuid;
201 zfuid = list_next(&fuidp->z_fuids, zfuid)) {
202 *fuidloc++ = zfuid->z_logfuid;
204 return (fuidloc);
208 static void *
209 zfs_log_fuid_domains(zfs_fuid_info_t *fuidp, void *start)
211 zfs_fuid_domain_t *zdomain;
213 /* now copy in the domain info, if any */
214 if (fuidp->z_domain_str_sz != 0) {
215 for (zdomain = list_head(&fuidp->z_domains); zdomain;
216 zdomain = list_next(&fuidp->z_domains, zdomain)) {
217 bcopy((void *)zdomain->z_domain, start,
218 strlen(zdomain->z_domain) + 1);
219 start = (caddr_t)start +
220 strlen(zdomain->z_domain) + 1;
223 return (start);
227 * If zp is an xattr node, check whether the xattr owner is unlinked.
228 * We don't want to log anything if the owner is unlinked.
230 static int
231 zfs_xattr_owner_unlinked(znode_t *zp)
233 int unlinked = 0;
234 znode_t *dzp;
235 igrab(ZTOI(zp));
237 * if zp is XATTR node, keep walking up via z_xattr_parent until we
238 * get the owner
240 while (zp->z_pflags & ZFS_XATTR) {
241 ASSERT3U(zp->z_xattr_parent, !=, 0);
242 if (zfs_zget(ZTOZSB(zp), zp->z_xattr_parent, &dzp) != 0) {
243 unlinked = 1;
244 break;
246 iput(ZTOI(zp));
247 zp = dzp;
248 unlinked = zp->z_unlinked;
250 iput(ZTOI(zp));
251 return (unlinked);
255 * Handles TX_CREATE, TX_CREATE_ATTR, TX_MKDIR, TX_MKDIR_ATTR and
256 * TK_MKXATTR transactions.
258 * TX_CREATE and TX_MKDIR are standard creates, but they may have FUID
259 * domain information appended prior to the name. In this case the
260 * uid/gid in the log record will be a log centric FUID.
262 * TX_CREATE_ACL_ATTR and TX_MKDIR_ACL_ATTR handle special creates that
263 * may contain attributes, ACL and optional fuid information.
265 * TX_CREATE_ACL and TX_MKDIR_ACL handle special creates that specify
266 * and ACL and normal users/groups in the ACEs.
268 * There may be an optional xvattr attribute information similar
269 * to zfs_log_setattr.
271 * Also, after the file name "domain" strings may be appended.
273 void
274 zfs_log_create(zilog_t *zilog, dmu_tx_t *tx, uint64_t txtype,
275 znode_t *dzp, znode_t *zp, char *name, vsecattr_t *vsecp,
276 zfs_fuid_info_t *fuidp, vattr_t *vap)
278 itx_t *itx;
279 lr_create_t *lr;
280 lr_acl_create_t *lracl;
281 size_t aclsize = 0;
282 size_t xvatsize = 0;
283 size_t txsize;
284 xvattr_t *xvap = (xvattr_t *)vap;
285 void *end;
286 size_t lrsize;
287 size_t namesize = strlen(name) + 1;
288 size_t fuidsz = 0;
290 if (zil_replaying(zilog, tx) || zfs_xattr_owner_unlinked(dzp))
291 return;
294 * If we have FUIDs present then add in space for
295 * domains and ACE fuid's if any.
297 if (fuidp) {
298 fuidsz += fuidp->z_domain_str_sz;
299 fuidsz += fuidp->z_fuid_cnt * sizeof (uint64_t);
302 if (vap->va_mask & ATTR_XVATTR)
303 xvatsize = ZIL_XVAT_SIZE(xvap->xva_mapsize);
305 if ((int)txtype == TX_CREATE_ATTR || (int)txtype == TX_MKDIR_ATTR ||
306 (int)txtype == TX_CREATE || (int)txtype == TX_MKDIR ||
307 (int)txtype == TX_MKXATTR) {
308 txsize = sizeof (*lr) + namesize + fuidsz + xvatsize;
309 lrsize = sizeof (*lr);
310 } else {
311 txsize =
312 sizeof (lr_acl_create_t) + namesize + fuidsz +
313 ZIL_ACE_LENGTH(aclsize) + xvatsize;
314 lrsize = sizeof (lr_acl_create_t);
317 itx = zil_itx_create(txtype, txsize);
319 lr = (lr_create_t *)&itx->itx_lr;
320 lr->lr_doid = dzp->z_id;
321 lr->lr_foid = zp->z_id;
322 /* Store dnode slot count in 8 bits above object id. */
323 LR_FOID_SET_SLOTS(lr->lr_foid, zp->z_dnodesize >> DNODE_SHIFT);
324 lr->lr_mode = zp->z_mode;
325 if (!IS_EPHEMERAL(KUID_TO_SUID(ZTOI(zp)->i_uid))) {
326 lr->lr_uid = (uint64_t)KUID_TO_SUID(ZTOI(zp)->i_uid);
327 } else {
328 lr->lr_uid = fuidp->z_fuid_owner;
330 if (!IS_EPHEMERAL(KGID_TO_SGID(ZTOI(zp)->i_gid))) {
331 lr->lr_gid = (uint64_t)KGID_TO_SGID(ZTOI(zp)->i_gid);
332 } else {
333 lr->lr_gid = fuidp->z_fuid_group;
335 (void) sa_lookup(zp->z_sa_hdl, SA_ZPL_GEN(ZTOZSB(zp)), &lr->lr_gen,
336 sizeof (uint64_t));
337 (void) sa_lookup(zp->z_sa_hdl, SA_ZPL_CRTIME(ZTOZSB(zp)),
338 lr->lr_crtime, sizeof (uint64_t) * 2);
340 if (sa_lookup(zp->z_sa_hdl, SA_ZPL_RDEV(ZTOZSB(zp)), &lr->lr_rdev,
341 sizeof (lr->lr_rdev)) != 0)
342 lr->lr_rdev = 0;
345 * Fill in xvattr info if any
347 if (vap->va_mask & ATTR_XVATTR) {
348 zfs_log_xvattr((lr_attr_t *)((caddr_t)lr + lrsize), xvap);
349 end = (caddr_t)lr + lrsize + xvatsize;
350 } else {
351 end = (caddr_t)lr + lrsize;
354 /* Now fill in any ACL info */
356 if (vsecp) {
357 lracl = (lr_acl_create_t *)&itx->itx_lr;
358 lracl->lr_aclcnt = vsecp->vsa_aclcnt;
359 lracl->lr_acl_bytes = aclsize;
360 lracl->lr_domcnt = fuidp ? fuidp->z_domain_cnt : 0;
361 lracl->lr_fuidcnt = fuidp ? fuidp->z_fuid_cnt : 0;
362 if (vsecp->vsa_aclflags & VSA_ACE_ACLFLAGS)
363 lracl->lr_acl_flags = (uint64_t)vsecp->vsa_aclflags;
364 else
365 lracl->lr_acl_flags = 0;
367 bcopy(vsecp->vsa_aclentp, end, aclsize);
368 end = (caddr_t)end + ZIL_ACE_LENGTH(aclsize);
371 /* drop in FUID info */
372 if (fuidp) {
373 end = zfs_log_fuid_ids(fuidp, end);
374 end = zfs_log_fuid_domains(fuidp, end);
377 * Now place file name in log record
379 bcopy(name, end, namesize);
381 zil_itx_assign(zilog, itx, tx);
384 void zil_remove_async(zilog_t *zilog, uint64_t oid);
387 * Handles both TX_REMOVE and TX_RMDIR transactions.
389 void
390 zfs_log_remove(zilog_t *zilog, dmu_tx_t *tx, uint64_t txtype,
391 znode_t *dzp, char *name, uint64_t foid, boolean_t unlinked)
393 itx_t *itx;
394 lr_remove_t *lr;
395 size_t namesize = strlen(name) + 1;
397 if (zil_replaying(zilog, tx) || zfs_xattr_owner_unlinked(dzp))
398 return;
400 itx = zil_itx_create(txtype, sizeof (*lr) + namesize);
401 lr = (lr_remove_t *)&itx->itx_lr;
402 lr->lr_doid = dzp->z_id;
403 bcopy(name, (char *)(lr + 1), namesize);
405 itx->itx_oid = foid;
408 * Object ids can be re-instantiated in the next txg so
409 * remove any async transactions to avoid future leaks.
410 * This can happen if a fsync occurs on the re-instantiated
411 * object for a WR_INDIRECT or WR_NEED_COPY write, which gets
412 * the new file data and flushes a write record for the old object.
414 if (unlinked) {
415 ASSERT((txtype & ~TX_CI) == TX_REMOVE);
416 zil_remove_async(zilog, foid);
418 zil_itx_assign(zilog, itx, tx);
422 * Handles TX_LINK transactions.
424 void
425 zfs_log_link(zilog_t *zilog, dmu_tx_t *tx, uint64_t txtype,
426 znode_t *dzp, znode_t *zp, char *name)
428 itx_t *itx;
429 lr_link_t *lr;
430 size_t namesize = strlen(name) + 1;
432 if (zil_replaying(zilog, tx))
433 return;
435 itx = zil_itx_create(txtype, sizeof (*lr) + namesize);
436 lr = (lr_link_t *)&itx->itx_lr;
437 lr->lr_doid = dzp->z_id;
438 lr->lr_link_obj = zp->z_id;
439 bcopy(name, (char *)(lr + 1), namesize);
441 zil_itx_assign(zilog, itx, tx);
445 * Handles TX_SYMLINK transactions.
447 void
448 zfs_log_symlink(zilog_t *zilog, dmu_tx_t *tx, uint64_t txtype,
449 znode_t *dzp, znode_t *zp, char *name, char *link)
451 itx_t *itx;
452 lr_create_t *lr;
453 size_t namesize = strlen(name) + 1;
454 size_t linksize = strlen(link) + 1;
456 if (zil_replaying(zilog, tx))
457 return;
459 itx = zil_itx_create(txtype, sizeof (*lr) + namesize + linksize);
460 lr = (lr_create_t *)&itx->itx_lr;
461 lr->lr_doid = dzp->z_id;
462 lr->lr_foid = zp->z_id;
463 lr->lr_uid = KUID_TO_SUID(ZTOI(zp)->i_uid);
464 lr->lr_gid = KGID_TO_SGID(ZTOI(zp)->i_gid);
465 lr->lr_mode = zp->z_mode;
466 (void) sa_lookup(zp->z_sa_hdl, SA_ZPL_GEN(ZTOZSB(zp)), &lr->lr_gen,
467 sizeof (uint64_t));
468 (void) sa_lookup(zp->z_sa_hdl, SA_ZPL_CRTIME(ZTOZSB(zp)),
469 lr->lr_crtime, sizeof (uint64_t) * 2);
470 bcopy(name, (char *)(lr + 1), namesize);
471 bcopy(link, (char *)(lr + 1) + namesize, linksize);
473 zil_itx_assign(zilog, itx, tx);
477 * Handles TX_RENAME transactions.
479 void
480 zfs_log_rename(zilog_t *zilog, dmu_tx_t *tx, uint64_t txtype,
481 znode_t *sdzp, char *sname, znode_t *tdzp, char *dname, znode_t *szp)
483 itx_t *itx;
484 lr_rename_t *lr;
485 size_t snamesize = strlen(sname) + 1;
486 size_t dnamesize = strlen(dname) + 1;
488 if (zil_replaying(zilog, tx))
489 return;
491 itx = zil_itx_create(txtype, sizeof (*lr) + snamesize + dnamesize);
492 lr = (lr_rename_t *)&itx->itx_lr;
493 lr->lr_sdoid = sdzp->z_id;
494 lr->lr_tdoid = tdzp->z_id;
495 bcopy(sname, (char *)(lr + 1), snamesize);
496 bcopy(dname, (char *)(lr + 1) + snamesize, dnamesize);
497 itx->itx_oid = szp->z_id;
499 zil_itx_assign(zilog, itx, tx);
503 * zfs_log_write() handles TX_WRITE transactions. The specified callback is
504 * called as soon as the write is on stable storage (be it via a DMU sync or a
505 * ZIL commit).
507 long zfs_immediate_write_sz = 32768;
509 void
510 zfs_log_write(zilog_t *zilog, dmu_tx_t *tx, int txtype,
511 znode_t *zp, offset_t off, ssize_t resid, int ioflag,
512 zil_callback_t callback, void *callback_data)
514 dmu_buf_impl_t *db = (dmu_buf_impl_t *)sa_get_db(zp->z_sa_hdl);
515 uint32_t blocksize = zp->z_blksz;
516 itx_wr_state_t write_state;
517 uintptr_t fsync_cnt;
519 if (zil_replaying(zilog, tx) || zp->z_unlinked ||
520 zfs_xattr_owner_unlinked(zp)) {
521 if (callback != NULL)
522 callback(callback_data);
523 return;
526 if (zilog->zl_logbias == ZFS_LOGBIAS_THROUGHPUT)
527 write_state = WR_INDIRECT;
528 else if (!spa_has_slogs(zilog->zl_spa) &&
529 resid >= zfs_immediate_write_sz)
530 write_state = WR_INDIRECT;
531 else if (ioflag & (FSYNC | FDSYNC))
532 write_state = WR_COPIED;
533 else
534 write_state = WR_NEED_COPY;
536 if ((fsync_cnt = (uintptr_t)tsd_get(zfs_fsyncer_key)) != 0) {
537 (void) tsd_set(zfs_fsyncer_key, (void *)(fsync_cnt - 1));
540 while (resid) {
541 itx_t *itx;
542 lr_write_t *lr;
543 itx_wr_state_t wr_state = write_state;
544 ssize_t len = resid;
547 * A WR_COPIED record must fit entirely in one log block.
548 * Large writes can use WR_NEED_COPY, which the ZIL will
549 * split into multiple records across several log blocks
550 * if necessary.
552 if (wr_state == WR_COPIED &&
553 resid > zil_max_copied_data(zilog))
554 wr_state = WR_NEED_COPY;
555 else if (wr_state == WR_INDIRECT)
556 len = MIN(blocksize - P2PHASE(off, blocksize), resid);
558 itx = zil_itx_create(txtype, sizeof (*lr) +
559 (wr_state == WR_COPIED ? len : 0));
560 lr = (lr_write_t *)&itx->itx_lr;
562 DB_DNODE_ENTER(db);
563 if (wr_state == WR_COPIED && dmu_read_by_dnode(DB_DNODE(db),
564 off, len, lr + 1, DMU_READ_NO_PREFETCH) != 0) {
565 zil_itx_destroy(itx);
566 itx = zil_itx_create(txtype, sizeof (*lr));
567 lr = (lr_write_t *)&itx->itx_lr;
568 wr_state = WR_NEED_COPY;
570 DB_DNODE_EXIT(db);
572 itx->itx_wr_state = wr_state;
573 lr->lr_foid = zp->z_id;
574 lr->lr_offset = off;
575 lr->lr_length = len;
576 lr->lr_blkoff = 0;
577 BP_ZERO(&lr->lr_blkptr);
579 itx->itx_private = ZTOZSB(zp);
581 if (!(ioflag & (FSYNC | FDSYNC)) && (zp->z_sync_cnt == 0) &&
582 (fsync_cnt == 0))
583 itx->itx_sync = B_FALSE;
585 itx->itx_callback = callback;
586 itx->itx_callback_data = callback_data;
587 zil_itx_assign(zilog, itx, tx);
589 off += len;
590 resid -= len;
595 * Handles TX_TRUNCATE transactions.
597 void
598 zfs_log_truncate(zilog_t *zilog, dmu_tx_t *tx, int txtype,
599 znode_t *zp, uint64_t off, uint64_t len)
601 itx_t *itx;
602 lr_truncate_t *lr;
604 if (zil_replaying(zilog, tx) || zp->z_unlinked ||
605 zfs_xattr_owner_unlinked(zp))
606 return;
608 itx = zil_itx_create(txtype, sizeof (*lr));
609 lr = (lr_truncate_t *)&itx->itx_lr;
610 lr->lr_foid = zp->z_id;
611 lr->lr_offset = off;
612 lr->lr_length = len;
614 itx->itx_sync = (zp->z_sync_cnt != 0);
615 zil_itx_assign(zilog, itx, tx);
619 * Handles TX_SETATTR transactions.
621 void
622 zfs_log_setattr(zilog_t *zilog, dmu_tx_t *tx, int txtype,
623 znode_t *zp, vattr_t *vap, uint_t mask_applied, zfs_fuid_info_t *fuidp)
625 itx_t *itx;
626 lr_setattr_t *lr;
627 xvattr_t *xvap = (xvattr_t *)vap;
628 size_t recsize = sizeof (lr_setattr_t);
629 void *start;
631 if (zil_replaying(zilog, tx) || zp->z_unlinked)
632 return;
635 * If XVATTR set, then log record size needs to allow
636 * for lr_attr_t + xvattr mask, mapsize and create time
637 * plus actual attribute values
639 if (vap->va_mask & ATTR_XVATTR)
640 recsize = sizeof (*lr) + ZIL_XVAT_SIZE(xvap->xva_mapsize);
642 if (fuidp)
643 recsize += fuidp->z_domain_str_sz;
645 itx = zil_itx_create(txtype, recsize);
646 lr = (lr_setattr_t *)&itx->itx_lr;
647 lr->lr_foid = zp->z_id;
648 lr->lr_mask = (uint64_t)mask_applied;
649 lr->lr_mode = (uint64_t)vap->va_mode;
650 if ((mask_applied & ATTR_UID) && IS_EPHEMERAL(vap->va_uid))
651 lr->lr_uid = fuidp->z_fuid_owner;
652 else
653 lr->lr_uid = (uint64_t)vap->va_uid;
655 if ((mask_applied & ATTR_GID) && IS_EPHEMERAL(vap->va_gid))
656 lr->lr_gid = fuidp->z_fuid_group;
657 else
658 lr->lr_gid = (uint64_t)vap->va_gid;
660 lr->lr_size = (uint64_t)vap->va_size;
661 ZFS_TIME_ENCODE(&vap->va_atime, lr->lr_atime);
662 ZFS_TIME_ENCODE(&vap->va_mtime, lr->lr_mtime);
663 start = (lr_setattr_t *)(lr + 1);
664 if (vap->va_mask & ATTR_XVATTR) {
665 zfs_log_xvattr((lr_attr_t *)start, xvap);
666 start = (caddr_t)start + ZIL_XVAT_SIZE(xvap->xva_mapsize);
670 * Now stick on domain information if any on end
673 if (fuidp)
674 (void) zfs_log_fuid_domains(fuidp, start);
676 itx->itx_sync = (zp->z_sync_cnt != 0);
677 zil_itx_assign(zilog, itx, tx);
681 * Handles TX_ACL transactions.
683 void
684 zfs_log_acl(zilog_t *zilog, dmu_tx_t *tx, znode_t *zp,
685 vsecattr_t *vsecp, zfs_fuid_info_t *fuidp)
687 itx_t *itx;
688 lr_acl_v0_t *lrv0;
689 lr_acl_t *lr;
690 int txtype;
691 int lrsize;
692 size_t txsize;
693 size_t aclbytes = vsecp->vsa_aclentsz;
695 if (zil_replaying(zilog, tx) || zp->z_unlinked)
696 return;
698 txtype = (ZTOZSB(zp)->z_version < ZPL_VERSION_FUID) ?
699 TX_ACL_V0 : TX_ACL;
701 if (txtype == TX_ACL)
702 lrsize = sizeof (*lr);
703 else
704 lrsize = sizeof (*lrv0);
706 txsize = lrsize +
707 ((txtype == TX_ACL) ? ZIL_ACE_LENGTH(aclbytes) : aclbytes) +
708 (fuidp ? fuidp->z_domain_str_sz : 0) +
709 sizeof (uint64_t) * (fuidp ? fuidp->z_fuid_cnt : 0);
711 itx = zil_itx_create(txtype, txsize);
713 lr = (lr_acl_t *)&itx->itx_lr;
714 lr->lr_foid = zp->z_id;
715 if (txtype == TX_ACL) {
716 lr->lr_acl_bytes = aclbytes;
717 lr->lr_domcnt = fuidp ? fuidp->z_domain_cnt : 0;
718 lr->lr_fuidcnt = fuidp ? fuidp->z_fuid_cnt : 0;
719 if (vsecp->vsa_mask & VSA_ACE_ACLFLAGS)
720 lr->lr_acl_flags = (uint64_t)vsecp->vsa_aclflags;
721 else
722 lr->lr_acl_flags = 0;
724 lr->lr_aclcnt = (uint64_t)vsecp->vsa_aclcnt;
726 if (txtype == TX_ACL_V0) {
727 lrv0 = (lr_acl_v0_t *)lr;
728 bcopy(vsecp->vsa_aclentp, (ace_t *)(lrv0 + 1), aclbytes);
729 } else {
730 void *start = (ace_t *)(lr + 1);
732 bcopy(vsecp->vsa_aclentp, start, aclbytes);
734 start = (caddr_t)start + ZIL_ACE_LENGTH(aclbytes);
736 if (fuidp) {
737 start = zfs_log_fuid_ids(fuidp, start);
738 (void) zfs_log_fuid_domains(fuidp, start);
742 itx->itx_sync = (zp->z_sync_cnt != 0);
743 zil_itx_assign(zilog, itx, tx);
746 #if defined(_KERNEL)
747 module_param(zfs_immediate_write_sz, long, 0644);
748 MODULE_PARM_DESC(zfs_immediate_write_sz, "Largest data block to write to zil");
749 #endif