MAINTAINERS: add tree entry for ARM/UniPhier architecture
[linux/fpc-iii.git] / fs / xfs / xfs_pnfs.c
blob0f14b2e4bf6cb03b803ff42abbf1c026fc74c70d
1 /*
2 * Copyright (c) 2014 Christoph Hellwig.
3 */
4 #include <linux/iomap.h>
5 #include "xfs.h"
6 #include "xfs_format.h"
7 #include "xfs_log_format.h"
8 #include "xfs_trans_resv.h"
9 #include "xfs_sb.h"
10 #include "xfs_mount.h"
11 #include "xfs_inode.h"
12 #include "xfs_trans.h"
13 #include "xfs_log.h"
14 #include "xfs_bmap.h"
15 #include "xfs_bmap_util.h"
16 #include "xfs_error.h"
17 #include "xfs_iomap.h"
18 #include "xfs_shared.h"
19 #include "xfs_bit.h"
20 #include "xfs_pnfs.h"
23 * Ensure that we do not have any outstanding pNFS layouts that can be used by
24 * clients to directly read from or write to this inode. This must be called
25 * before every operation that can remove blocks from the extent map.
26 * Additionally we call it during the write operation, where aren't concerned
27 * about exposing unallocated blocks but just want to provide basic
28 * synchronization between a local writer and pNFS clients. mmap writes would
29 * also benefit from this sort of synchronization, but due to the tricky locking
30 * rules in the page fault path we don't bother.
32 int
33 xfs_break_layouts(
34 struct inode *inode,
35 uint *iolock,
36 bool with_imutex)
38 struct xfs_inode *ip = XFS_I(inode);
39 int error;
41 ASSERT(xfs_isilocked(ip, XFS_IOLOCK_SHARED|XFS_IOLOCK_EXCL));
43 while ((error = break_layout(inode, false) == -EWOULDBLOCK)) {
44 xfs_iunlock(ip, *iolock);
45 if (with_imutex && (*iolock & XFS_IOLOCK_EXCL))
46 inode_unlock(inode);
47 error = break_layout(inode, true);
48 *iolock = XFS_IOLOCK_EXCL;
49 if (with_imutex)
50 inode_lock(inode);
51 xfs_ilock(ip, *iolock);
54 return error;
58 * Get a unique ID including its location so that the client can identify
59 * the exported device.
61 int
62 xfs_fs_get_uuid(
63 struct super_block *sb,
64 u8 *buf,
65 u32 *len,
66 u64 *offset)
68 struct xfs_mount *mp = XFS_M(sb);
70 printk_once(KERN_NOTICE
71 "XFS (%s): using experimental pNFS feature, use at your own risk!\n",
72 mp->m_fsname);
74 if (*len < sizeof(uuid_t))
75 return -EINVAL;
77 memcpy(buf, &mp->m_sb.sb_uuid, sizeof(uuid_t));
78 *len = sizeof(uuid_t);
79 *offset = offsetof(struct xfs_dsb, sb_uuid);
80 return 0;
84 * Get a layout for the pNFS client.
86 int
87 xfs_fs_map_blocks(
88 struct inode *inode,
89 loff_t offset,
90 u64 length,
91 struct iomap *iomap,
92 bool write,
93 u32 *device_generation)
95 struct xfs_inode *ip = XFS_I(inode);
96 struct xfs_mount *mp = ip->i_mount;
97 struct xfs_bmbt_irec imap;
98 xfs_fileoff_t offset_fsb, end_fsb;
99 loff_t limit;
100 int bmapi_flags = XFS_BMAPI_ENTIRE;
101 int nimaps = 1;
102 uint lock_flags;
103 int error = 0;
105 if (XFS_FORCED_SHUTDOWN(mp))
106 return -EIO;
109 * We can't export inodes residing on the realtime device. The realtime
110 * device doesn't have a UUID to identify it, so the client has no way
111 * to find it.
113 if (XFS_IS_REALTIME_INODE(ip))
114 return -ENXIO;
117 * Lock out any other I/O before we flush and invalidate the pagecache,
118 * and then hand out a layout to the remote system. This is very
119 * similar to direct I/O, except that the synchronization is much more
120 * complicated. See the comment near xfs_break_layouts for a detailed
121 * explanation.
123 xfs_ilock(ip, XFS_IOLOCK_EXCL);
125 error = -EINVAL;
126 limit = mp->m_super->s_maxbytes;
127 if (!write)
128 limit = max(limit, round_up(i_size_read(inode),
129 inode->i_sb->s_blocksize));
130 if (offset > limit)
131 goto out_unlock;
132 if (offset > limit - length)
133 length = limit - offset;
135 error = filemap_write_and_wait(inode->i_mapping);
136 if (error)
137 goto out_unlock;
138 error = invalidate_inode_pages2(inode->i_mapping);
139 if (WARN_ON_ONCE(error))
140 return error;
142 end_fsb = XFS_B_TO_FSB(mp, (xfs_ufsize_t)offset + length);
143 offset_fsb = XFS_B_TO_FSBT(mp, offset);
145 lock_flags = xfs_ilock_data_map_shared(ip);
146 error = xfs_bmapi_read(ip, offset_fsb, end_fsb - offset_fsb,
147 &imap, &nimaps, bmapi_flags);
148 xfs_iunlock(ip, lock_flags);
150 if (error)
151 goto out_unlock;
153 if (write) {
154 enum xfs_prealloc_flags flags = 0;
156 ASSERT(imap.br_startblock != DELAYSTARTBLOCK);
158 if (!nimaps || imap.br_startblock == HOLESTARTBLOCK) {
160 * xfs_iomap_write_direct() expects to take ownership of
161 * the shared ilock.
163 xfs_ilock(ip, XFS_ILOCK_SHARED);
164 error = xfs_iomap_write_direct(ip, offset, length,
165 &imap, nimaps);
166 if (error)
167 goto out_unlock;
170 * Ensure the next transaction is committed
171 * synchronously so that the blocks allocated and
172 * handed out to the client are guaranteed to be
173 * present even after a server crash.
175 flags |= XFS_PREALLOC_SET | XFS_PREALLOC_SYNC;
178 error = xfs_update_prealloc_flags(ip, flags);
179 if (error)
180 goto out_unlock;
182 xfs_iunlock(ip, XFS_IOLOCK_EXCL);
184 xfs_bmbt_to_iomap(ip, iomap, &imap);
185 *device_generation = mp->m_generation;
186 return error;
187 out_unlock:
188 xfs_iunlock(ip, XFS_IOLOCK_EXCL);
189 return error;
193 * Ensure the size update falls into a valid allocated block.
195 static int
196 xfs_pnfs_validate_isize(
197 struct xfs_inode *ip,
198 xfs_off_t isize)
200 struct xfs_bmbt_irec imap;
201 int nimaps = 1;
202 int error = 0;
204 xfs_ilock(ip, XFS_ILOCK_SHARED);
205 error = xfs_bmapi_read(ip, XFS_B_TO_FSBT(ip->i_mount, isize - 1), 1,
206 &imap, &nimaps, 0);
207 xfs_iunlock(ip, XFS_ILOCK_SHARED);
208 if (error)
209 return error;
211 if (imap.br_startblock == HOLESTARTBLOCK ||
212 imap.br_startblock == DELAYSTARTBLOCK ||
213 imap.br_state == XFS_EXT_UNWRITTEN)
214 return -EIO;
215 return 0;
219 * Make sure the blocks described by maps are stable on disk. This includes
220 * converting any unwritten extents, flushing the disk cache and updating the
221 * time stamps.
223 * Note that we rely on the caller to always send us a timestamp update so that
224 * we always commit a transaction here. If that stops being true we will have
225 * to manually flush the cache here similar to what the fsync code path does
226 * for datasyncs on files that have no dirty metadata.
229 xfs_fs_commit_blocks(
230 struct inode *inode,
231 struct iomap *maps,
232 int nr_maps,
233 struct iattr *iattr)
235 struct xfs_inode *ip = XFS_I(inode);
236 struct xfs_mount *mp = ip->i_mount;
237 struct xfs_trans *tp;
238 bool update_isize = false;
239 int error, i;
240 loff_t size;
242 ASSERT(iattr->ia_valid & (ATTR_ATIME|ATTR_CTIME|ATTR_MTIME));
244 xfs_ilock(ip, XFS_IOLOCK_EXCL);
246 size = i_size_read(inode);
247 if ((iattr->ia_valid & ATTR_SIZE) && iattr->ia_size > size) {
248 update_isize = true;
249 size = iattr->ia_size;
252 for (i = 0; i < nr_maps; i++) {
253 u64 start, length, end;
255 start = maps[i].offset;
256 if (start > size)
257 continue;
259 end = start + maps[i].length;
260 if (end > size)
261 end = size;
263 length = end - start;
264 if (!length)
265 continue;
268 * Make sure reads through the pagecache see the new data.
270 error = invalidate_inode_pages2_range(inode->i_mapping,
271 start >> PAGE_SHIFT,
272 (end - 1) >> PAGE_SHIFT);
273 WARN_ON_ONCE(error);
275 error = xfs_iomap_write_unwritten(ip, start, length);
276 if (error)
277 goto out_drop_iolock;
280 if (update_isize) {
281 error = xfs_pnfs_validate_isize(ip, size);
282 if (error)
283 goto out_drop_iolock;
286 error = xfs_trans_alloc(mp, &M_RES(mp)->tr_ichange, 0, 0, 0, &tp);
287 if (error)
288 goto out_drop_iolock;
290 xfs_ilock(ip, XFS_ILOCK_EXCL);
291 xfs_trans_ijoin(tp, ip, XFS_ILOCK_EXCL);
292 xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE);
294 xfs_setattr_time(ip, iattr);
295 if (update_isize) {
296 i_size_write(inode, iattr->ia_size);
297 ip->i_d.di_size = iattr->ia_size;
300 xfs_trans_set_sync(tp);
301 error = xfs_trans_commit(tp);
303 out_drop_iolock:
304 xfs_iunlock(ip, XFS_IOLOCK_EXCL);
305 return error;