drm/panthor: Don't declare a queue blocked if deferred operations are pending
[drm/drm-misc.git] / fs / nfsd / blocklayout.c
blob08a20e5bcf7fee0ea6ae6bd5a3fd7622c77977ed
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * Copyright (c) 2014-2016 Christoph Hellwig.
4 */
5 #include <linux/exportfs.h>
6 #include <linux/iomap.h>
7 #include <linux/slab.h>
8 #include <linux/pr.h>
10 #include <linux/nfsd/debug.h>
12 #include "blocklayoutxdr.h"
13 #include "pnfs.h"
14 #include "filecache.h"
15 #include "vfs.h"
17 #define NFSDDBG_FACILITY NFSDDBG_PNFS
20 static __be32
21 nfsd4_block_proc_layoutget(struct inode *inode, const struct svc_fh *fhp,
22 struct nfsd4_layoutget *args)
24 struct nfsd4_layout_seg *seg = &args->lg_seg;
25 struct super_block *sb = inode->i_sb;
26 u32 block_size = i_blocksize(inode);
27 struct pnfs_block_extent *bex;
28 struct iomap iomap;
29 u32 device_generation = 0;
30 int error;
32 if (seg->offset & (block_size - 1)) {
33 dprintk("pnfsd: I/O misaligned\n");
34 goto out_layoutunavailable;
38 * Some clients barf on non-zero block numbers for NONE or INVALID
39 * layouts, so make sure to zero the whole structure.
41 error = -ENOMEM;
42 bex = kzalloc(sizeof(*bex), GFP_KERNEL);
43 if (!bex)
44 goto out_error;
45 args->lg_content = bex;
47 error = sb->s_export_op->map_blocks(inode, seg->offset, seg->length,
48 &iomap, seg->iomode != IOMODE_READ,
49 &device_generation);
50 if (error) {
51 if (error == -ENXIO)
52 goto out_layoutunavailable;
53 goto out_error;
56 if (iomap.length < args->lg_minlength) {
57 dprintk("pnfsd: extent smaller than minlength\n");
58 goto out_layoutunavailable;
61 switch (iomap.type) {
62 case IOMAP_MAPPED:
63 if (seg->iomode == IOMODE_READ)
64 bex->es = PNFS_BLOCK_READ_DATA;
65 else
66 bex->es = PNFS_BLOCK_READWRITE_DATA;
67 bex->soff = iomap.addr;
68 break;
69 case IOMAP_UNWRITTEN:
70 if (seg->iomode & IOMODE_RW) {
72 * Crack monkey special case from section 2.3.1.
74 if (args->lg_minlength == 0) {
75 dprintk("pnfsd: no soup for you!\n");
76 goto out_layoutunavailable;
79 bex->es = PNFS_BLOCK_INVALID_DATA;
80 bex->soff = iomap.addr;
81 break;
83 fallthrough;
84 case IOMAP_HOLE:
85 if (seg->iomode == IOMODE_READ) {
86 bex->es = PNFS_BLOCK_NONE_DATA;
87 break;
89 fallthrough;
90 case IOMAP_DELALLOC:
91 default:
92 WARN(1, "pnfsd: filesystem returned %d extent\n", iomap.type);
93 goto out_layoutunavailable;
96 error = nfsd4_set_deviceid(&bex->vol_id, fhp, device_generation);
97 if (error)
98 goto out_error;
99 bex->foff = iomap.offset;
100 bex->len = iomap.length;
102 seg->offset = iomap.offset;
103 seg->length = iomap.length;
105 dprintk("GET: 0x%llx:0x%llx %d\n", bex->foff, bex->len, bex->es);
106 return 0;
108 out_error:
109 seg->length = 0;
110 return nfserrno(error);
111 out_layoutunavailable:
112 seg->length = 0;
113 return nfserr_layoutunavailable;
116 static __be32
117 nfsd4_block_commit_blocks(struct inode *inode, struct nfsd4_layoutcommit *lcp,
118 struct iomap *iomaps, int nr_iomaps)
120 struct timespec64 mtime = inode_get_mtime(inode);
121 loff_t new_size = lcp->lc_last_wr + 1;
122 struct iattr iattr = { .ia_valid = 0 };
123 int error;
125 if (lcp->lc_mtime.tv_nsec == UTIME_NOW ||
126 timespec64_compare(&lcp->lc_mtime, &mtime) < 0)
127 lcp->lc_mtime = current_time(inode);
128 iattr.ia_valid |= ATTR_ATIME | ATTR_CTIME | ATTR_MTIME;
129 iattr.ia_atime = iattr.ia_ctime = iattr.ia_mtime = lcp->lc_mtime;
131 if (new_size > i_size_read(inode)) {
132 iattr.ia_valid |= ATTR_SIZE;
133 iattr.ia_size = new_size;
136 error = inode->i_sb->s_export_op->commit_blocks(inode, iomaps,
137 nr_iomaps, &iattr);
138 kfree(iomaps);
139 return nfserrno(error);
142 #ifdef CONFIG_NFSD_BLOCKLAYOUT
143 static int
144 nfsd4_block_get_device_info_simple(struct super_block *sb,
145 struct nfsd4_getdeviceinfo *gdp)
147 struct pnfs_block_deviceaddr *dev;
148 struct pnfs_block_volume *b;
150 dev = kzalloc(struct_size(dev, volumes, 1), GFP_KERNEL);
151 if (!dev)
152 return -ENOMEM;
153 gdp->gd_device = dev;
155 dev->nr_volumes = 1;
156 b = &dev->volumes[0];
158 b->type = PNFS_BLOCK_VOLUME_SIMPLE;
159 b->simple.sig_len = PNFS_BLOCK_UUID_LEN;
160 return sb->s_export_op->get_uuid(sb, b->simple.sig, &b->simple.sig_len,
161 &b->simple.offset);
164 static __be32
165 nfsd4_block_proc_getdeviceinfo(struct super_block *sb,
166 struct svc_rqst *rqstp,
167 struct nfs4_client *clp,
168 struct nfsd4_getdeviceinfo *gdp)
170 if (bdev_is_partition(sb->s_bdev))
171 return nfserr_inval;
172 return nfserrno(nfsd4_block_get_device_info_simple(sb, gdp));
175 static __be32
176 nfsd4_block_proc_layoutcommit(struct inode *inode,
177 struct nfsd4_layoutcommit *lcp)
179 struct iomap *iomaps;
180 int nr_iomaps;
182 nr_iomaps = nfsd4_block_decode_layoutupdate(lcp->lc_up_layout,
183 lcp->lc_up_len, &iomaps, i_blocksize(inode));
184 if (nr_iomaps < 0)
185 return nfserrno(nr_iomaps);
187 return nfsd4_block_commit_blocks(inode, lcp, iomaps, nr_iomaps);
190 const struct nfsd4_layout_ops bl_layout_ops = {
192 * Pretend that we send notification to the client. This is a blatant
193 * lie to force recent Linux clients to cache our device IDs.
194 * We rarely ever change the device ID, so the harm of leaking deviceids
195 * for a while isn't too bad. Unfortunately RFC5661 is a complete mess
196 * in this regard, but I filed errata 4119 for this a while ago, and
197 * hopefully the Linux client will eventually start caching deviceids
198 * without this again.
200 .notify_types =
201 NOTIFY_DEVICEID4_DELETE | NOTIFY_DEVICEID4_CHANGE,
202 .proc_getdeviceinfo = nfsd4_block_proc_getdeviceinfo,
203 .encode_getdeviceinfo = nfsd4_block_encode_getdeviceinfo,
204 .proc_layoutget = nfsd4_block_proc_layoutget,
205 .encode_layoutget = nfsd4_block_encode_layoutget,
206 .proc_layoutcommit = nfsd4_block_proc_layoutcommit,
208 #endif /* CONFIG_NFSD_BLOCKLAYOUT */
210 #ifdef CONFIG_NFSD_SCSILAYOUT
211 #define NFSD_MDS_PR_KEY 0x0100000000000000ULL
214 * We use the client ID as a unique key for the reservations.
215 * This allows us to easily fence a client when recalls fail.
217 static u64 nfsd4_scsi_pr_key(struct nfs4_client *clp)
219 return ((u64)clp->cl_clientid.cl_boot << 32) | clp->cl_clientid.cl_id;
222 static const u8 designator_types[] = {
223 PS_DESIGNATOR_EUI64,
224 PS_DESIGNATOR_NAA,
227 static int
228 nfsd4_block_get_unique_id(struct gendisk *disk, struct pnfs_block_volume *b)
230 int ret, i;
232 for (i = 0; i < ARRAY_SIZE(designator_types); i++) {
233 u8 type = designator_types[i];
235 ret = disk->fops->get_unique_id(disk, b->scsi.designator, type);
236 if (ret > 0) {
237 b->scsi.code_set = PS_CODE_SET_BINARY;
238 b->scsi.designator_type = type;
239 b->scsi.designator_len = ret;
240 return 0;
244 return -EINVAL;
247 static int
248 nfsd4_block_get_device_info_scsi(struct super_block *sb,
249 struct nfs4_client *clp,
250 struct nfsd4_getdeviceinfo *gdp)
252 struct pnfs_block_deviceaddr *dev;
253 struct pnfs_block_volume *b;
254 const struct pr_ops *ops;
255 int ret;
257 dev = kzalloc(struct_size(dev, volumes, 1), GFP_KERNEL);
258 if (!dev)
259 return -ENOMEM;
260 gdp->gd_device = dev;
262 dev->nr_volumes = 1;
263 b = &dev->volumes[0];
265 b->type = PNFS_BLOCK_VOLUME_SCSI;
266 b->scsi.pr_key = nfsd4_scsi_pr_key(clp);
268 ret = nfsd4_block_get_unique_id(sb->s_bdev->bd_disk, b);
269 if (ret < 0)
270 goto out_free_dev;
272 ret = -EINVAL;
273 ops = sb->s_bdev->bd_disk->fops->pr_ops;
274 if (!ops) {
275 pr_err("pNFS: device %s does not support PRs.\n",
276 sb->s_id);
277 goto out_free_dev;
280 ret = ops->pr_register(sb->s_bdev, 0, NFSD_MDS_PR_KEY, true);
281 if (ret) {
282 pr_err("pNFS: failed to register key for device %s.\n",
283 sb->s_id);
284 goto out_free_dev;
287 ret = ops->pr_reserve(sb->s_bdev, NFSD_MDS_PR_KEY,
288 PR_EXCLUSIVE_ACCESS_REG_ONLY, 0);
289 if (ret) {
290 pr_err("pNFS: failed to reserve device %s.\n",
291 sb->s_id);
292 goto out_free_dev;
295 return 0;
297 out_free_dev:
298 kfree(dev);
299 gdp->gd_device = NULL;
300 return ret;
303 static __be32
304 nfsd4_scsi_proc_getdeviceinfo(struct super_block *sb,
305 struct svc_rqst *rqstp,
306 struct nfs4_client *clp,
307 struct nfsd4_getdeviceinfo *gdp)
309 if (bdev_is_partition(sb->s_bdev))
310 return nfserr_inval;
311 return nfserrno(nfsd4_block_get_device_info_scsi(sb, clp, gdp));
313 static __be32
314 nfsd4_scsi_proc_layoutcommit(struct inode *inode,
315 struct nfsd4_layoutcommit *lcp)
317 struct iomap *iomaps;
318 int nr_iomaps;
320 nr_iomaps = nfsd4_scsi_decode_layoutupdate(lcp->lc_up_layout,
321 lcp->lc_up_len, &iomaps, i_blocksize(inode));
322 if (nr_iomaps < 0)
323 return nfserrno(nr_iomaps);
325 return nfsd4_block_commit_blocks(inode, lcp, iomaps, nr_iomaps);
328 static void
329 nfsd4_scsi_fence_client(struct nfs4_layout_stateid *ls, struct nfsd_file *file)
331 struct nfs4_client *clp = ls->ls_stid.sc_client;
332 struct block_device *bdev = file->nf_file->f_path.mnt->mnt_sb->s_bdev;
334 bdev->bd_disk->fops->pr_ops->pr_preempt(bdev, NFSD_MDS_PR_KEY,
335 nfsd4_scsi_pr_key(clp), 0, true);
338 const struct nfsd4_layout_ops scsi_layout_ops = {
340 * Pretend that we send notification to the client. This is a blatant
341 * lie to force recent Linux clients to cache our device IDs.
342 * We rarely ever change the device ID, so the harm of leaking deviceids
343 * for a while isn't too bad. Unfortunately RFC5661 is a complete mess
344 * in this regard, but I filed errata 4119 for this a while ago, and
345 * hopefully the Linux client will eventually start caching deviceids
346 * without this again.
348 .notify_types =
349 NOTIFY_DEVICEID4_DELETE | NOTIFY_DEVICEID4_CHANGE,
350 .proc_getdeviceinfo = nfsd4_scsi_proc_getdeviceinfo,
351 .encode_getdeviceinfo = nfsd4_block_encode_getdeviceinfo,
352 .proc_layoutget = nfsd4_block_proc_layoutget,
353 .encode_layoutget = nfsd4_block_encode_layoutget,
354 .proc_layoutcommit = nfsd4_scsi_proc_layoutcommit,
355 .fence_client = nfsd4_scsi_fence_client,
357 #endif /* CONFIG_NFSD_SCSILAYOUT */