cpuset: restore sanity to cpuset_cpus_allowed_fallback()
[linux/fpc-iii.git] / fs / xfs / xfs_pnfs.c
blobf44c3599527d07441fc6eb689c9d442e29add600
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * Copyright (c) 2014 Christoph Hellwig.
4 */
5 #include <linux/iomap.h>
6 #include "xfs.h"
7 #include "xfs_format.h"
8 #include "xfs_log_format.h"
9 #include "xfs_trans_resv.h"
10 #include "xfs_sb.h"
11 #include "xfs_mount.h"
12 #include "xfs_inode.h"
13 #include "xfs_trans.h"
14 #include "xfs_log.h"
15 #include "xfs_bmap.h"
16 #include "xfs_bmap_util.h"
17 #include "xfs_error.h"
18 #include "xfs_iomap.h"
19 #include "xfs_shared.h"
20 #include "xfs_bit.h"
21 #include "xfs_pnfs.h"
24 * Ensure that we do not have any outstanding pNFS layouts that can be used by
25 * clients to directly read from or write to this inode. This must be called
26 * before every operation that can remove blocks from the extent map.
27 * Additionally we call it during the write operation, where aren't concerned
28 * about exposing unallocated blocks but just want to provide basic
29 * synchronization between a local writer and pNFS clients. mmap writes would
30 * also benefit from this sort of synchronization, but due to the tricky locking
31 * rules in the page fault path we don't bother.
33 int
34 xfs_break_leased_layouts(
35 struct inode *inode,
36 uint *iolock,
37 bool *did_unlock)
39 struct xfs_inode *ip = XFS_I(inode);
40 int error;
42 while ((error = break_layout(inode, false) == -EWOULDBLOCK)) {
43 xfs_iunlock(ip, *iolock);
44 *did_unlock = true;
45 error = break_layout(inode, true);
46 *iolock &= ~XFS_IOLOCK_SHARED;
47 *iolock |= XFS_IOLOCK_EXCL;
48 xfs_ilock(ip, *iolock);
51 return error;
55 * Get a unique ID including its location so that the client can identify
56 * the exported device.
58 int
59 xfs_fs_get_uuid(
60 struct super_block *sb,
61 u8 *buf,
62 u32 *len,
63 u64 *offset)
65 struct xfs_mount *mp = XFS_M(sb);
67 printk_once(KERN_NOTICE
68 "XFS (%s): using experimental pNFS feature, use at your own risk!\n",
69 mp->m_fsname);
71 if (*len < sizeof(uuid_t))
72 return -EINVAL;
74 memcpy(buf, &mp->m_sb.sb_uuid, sizeof(uuid_t));
75 *len = sizeof(uuid_t);
76 *offset = offsetof(struct xfs_dsb, sb_uuid);
77 return 0;
81 * Get a layout for the pNFS client.
83 int
84 xfs_fs_map_blocks(
85 struct inode *inode,
86 loff_t offset,
87 u64 length,
88 struct iomap *iomap,
89 bool write,
90 u32 *device_generation)
92 struct xfs_inode *ip = XFS_I(inode);
93 struct xfs_mount *mp = ip->i_mount;
94 struct xfs_bmbt_irec imap;
95 xfs_fileoff_t offset_fsb, end_fsb;
96 loff_t limit;
97 int bmapi_flags = XFS_BMAPI_ENTIRE;
98 int nimaps = 1;
99 uint lock_flags;
100 int error = 0;
102 if (XFS_FORCED_SHUTDOWN(mp))
103 return -EIO;
106 * We can't export inodes residing on the realtime device. The realtime
107 * device doesn't have a UUID to identify it, so the client has no way
108 * to find it.
110 if (XFS_IS_REALTIME_INODE(ip))
111 return -ENXIO;
114 * The pNFS block layout spec actually supports reflink like
115 * functionality, but the Linux pNFS server doesn't implement it yet.
117 if (xfs_is_reflink_inode(ip))
118 return -ENXIO;
121 * Lock out any other I/O before we flush and invalidate the pagecache,
122 * and then hand out a layout to the remote system. This is very
123 * similar to direct I/O, except that the synchronization is much more
124 * complicated. See the comment near xfs_break_leased_layouts
125 * for a detailed explanation.
127 xfs_ilock(ip, XFS_IOLOCK_EXCL);
129 error = -EINVAL;
130 limit = mp->m_super->s_maxbytes;
131 if (!write)
132 limit = max(limit, round_up(i_size_read(inode),
133 inode->i_sb->s_blocksize));
134 if (offset > limit)
135 goto out_unlock;
136 if (offset > limit - length)
137 length = limit - offset;
139 error = filemap_write_and_wait(inode->i_mapping);
140 if (error)
141 goto out_unlock;
142 error = invalidate_inode_pages2(inode->i_mapping);
143 if (WARN_ON_ONCE(error))
144 return error;
146 end_fsb = XFS_B_TO_FSB(mp, (xfs_ufsize_t)offset + length);
147 offset_fsb = XFS_B_TO_FSBT(mp, offset);
149 lock_flags = xfs_ilock_data_map_shared(ip);
150 error = xfs_bmapi_read(ip, offset_fsb, end_fsb - offset_fsb,
151 &imap, &nimaps, bmapi_flags);
152 xfs_iunlock(ip, lock_flags);
154 if (error)
155 goto out_unlock;
157 if (write) {
158 enum xfs_prealloc_flags flags = 0;
160 ASSERT(imap.br_startblock != DELAYSTARTBLOCK);
162 if (!nimaps || imap.br_startblock == HOLESTARTBLOCK) {
164 * xfs_iomap_write_direct() expects to take ownership of
165 * the shared ilock.
167 xfs_ilock(ip, XFS_ILOCK_SHARED);
168 error = xfs_iomap_write_direct(ip, offset, length,
169 &imap, nimaps);
170 if (error)
171 goto out_unlock;
174 * Ensure the next transaction is committed
175 * synchronously so that the blocks allocated and
176 * handed out to the client are guaranteed to be
177 * present even after a server crash.
179 flags |= XFS_PREALLOC_SET | XFS_PREALLOC_SYNC;
182 error = xfs_update_prealloc_flags(ip, flags);
183 if (error)
184 goto out_unlock;
186 xfs_iunlock(ip, XFS_IOLOCK_EXCL);
188 xfs_bmbt_to_iomap(ip, iomap, &imap);
189 *device_generation = mp->m_generation;
190 return error;
191 out_unlock:
192 xfs_iunlock(ip, XFS_IOLOCK_EXCL);
193 return error;
197 * Ensure the size update falls into a valid allocated block.
199 static int
200 xfs_pnfs_validate_isize(
201 struct xfs_inode *ip,
202 xfs_off_t isize)
204 struct xfs_bmbt_irec imap;
205 int nimaps = 1;
206 int error = 0;
208 xfs_ilock(ip, XFS_ILOCK_SHARED);
209 error = xfs_bmapi_read(ip, XFS_B_TO_FSBT(ip->i_mount, isize - 1), 1,
210 &imap, &nimaps, 0);
211 xfs_iunlock(ip, XFS_ILOCK_SHARED);
212 if (error)
213 return error;
215 if (imap.br_startblock == HOLESTARTBLOCK ||
216 imap.br_startblock == DELAYSTARTBLOCK ||
217 imap.br_state == XFS_EXT_UNWRITTEN)
218 return -EIO;
219 return 0;
223 * Make sure the blocks described by maps are stable on disk. This includes
224 * converting any unwritten extents, flushing the disk cache and updating the
225 * time stamps.
227 * Note that we rely on the caller to always send us a timestamp update so that
228 * we always commit a transaction here. If that stops being true we will have
229 * to manually flush the cache here similar to what the fsync code path does
230 * for datasyncs on files that have no dirty metadata.
233 xfs_fs_commit_blocks(
234 struct inode *inode,
235 struct iomap *maps,
236 int nr_maps,
237 struct iattr *iattr)
239 struct xfs_inode *ip = XFS_I(inode);
240 struct xfs_mount *mp = ip->i_mount;
241 struct xfs_trans *tp;
242 bool update_isize = false;
243 int error, i;
244 loff_t size;
246 ASSERT(iattr->ia_valid & (ATTR_ATIME|ATTR_CTIME|ATTR_MTIME));
248 xfs_ilock(ip, XFS_IOLOCK_EXCL);
250 size = i_size_read(inode);
251 if ((iattr->ia_valid & ATTR_SIZE) && iattr->ia_size > size) {
252 update_isize = true;
253 size = iattr->ia_size;
256 for (i = 0; i < nr_maps; i++) {
257 u64 start, length, end;
259 start = maps[i].offset;
260 if (start > size)
261 continue;
263 end = start + maps[i].length;
264 if (end > size)
265 end = size;
267 length = end - start;
268 if (!length)
269 continue;
272 * Make sure reads through the pagecache see the new data.
274 error = invalidate_inode_pages2_range(inode->i_mapping,
275 start >> PAGE_SHIFT,
276 (end - 1) >> PAGE_SHIFT);
277 WARN_ON_ONCE(error);
279 error = xfs_iomap_write_unwritten(ip, start, length, false);
280 if (error)
281 goto out_drop_iolock;
284 if (update_isize) {
285 error = xfs_pnfs_validate_isize(ip, size);
286 if (error)
287 goto out_drop_iolock;
290 error = xfs_trans_alloc(mp, &M_RES(mp)->tr_ichange, 0, 0, 0, &tp);
291 if (error)
292 goto out_drop_iolock;
294 xfs_ilock(ip, XFS_ILOCK_EXCL);
295 xfs_trans_ijoin(tp, ip, XFS_ILOCK_EXCL);
296 xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE);
298 xfs_setattr_time(ip, iattr);
299 if (update_isize) {
300 i_size_write(inode, iattr->ia_size);
301 ip->i_d.di_size = iattr->ia_size;
304 xfs_trans_set_sync(tp);
305 error = xfs_trans_commit(tp);
307 out_drop_iolock:
308 xfs_iunlock(ip, XFS_IOLOCK_EXCL);
309 return error;