1 // SPDX-License-Identifier: GPL-2.0
3 * NVMe Over Fabrics Target File I/O commands implementation.
4 * Copyright (c) 2017-2018 Western Digital Corporation or its
7 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
9 #include <linux/falloc.h>
10 #include <linux/file.h>
14 #define NVMET_MIN_MPOOL_OBJ 16
16 void nvmet_file_ns_revalidate(struct nvmet_ns
*ns
)
18 ns
->size
= i_size_read(ns
->file
->f_mapping
->host
);
21 void nvmet_file_ns_disable(struct nvmet_ns
*ns
)
25 flush_workqueue(buffered_io_wq
);
26 mempool_destroy(ns
->bvec_pool
);
33 int nvmet_file_ns_enable(struct nvmet_ns
*ns
)
35 int flags
= O_RDWR
| O_LARGEFILE
;
41 ns
->file
= filp_open(ns
->device_path
, flags
, 0);
42 if (IS_ERR(ns
->file
)) {
43 ret
= PTR_ERR(ns
->file
);
44 pr_err("failed to open file %s: (%d)\n",
45 ns
->device_path
, ret
);
50 nvmet_file_ns_revalidate(ns
);
53 * i_blkbits can be greater than the universally accepted upper bound,
54 * so make sure we export a sane namespace lba_shift.
56 ns
->blksize_shift
= min_t(u8
,
57 file_inode(ns
->file
)->i_blkbits
, 12);
59 ns
->bvec_pool
= mempool_create(NVMET_MIN_MPOOL_OBJ
, mempool_alloc_slab
,
60 mempool_free_slab
, nvmet_bvec_cache
);
72 ns
->blksize_shift
= 0;
76 static ssize_t
nvmet_file_submit_bvec(struct nvmet_req
*req
, loff_t pos
,
77 unsigned long nr_segs
, size_t count
, int ki_flags
)
79 struct kiocb
*iocb
= &req
->f
.iocb
;
80 ssize_t (*call_iter
)(struct kiocb
*iocb
, struct iov_iter
*iter
);
84 if (req
->cmd
->rw
.opcode
== nvme_cmd_write
) {
85 if (req
->cmd
->rw
.control
& cpu_to_le16(NVME_RW_FUA
))
86 ki_flags
|= IOCB_DSYNC
;
87 call_iter
= req
->ns
->file
->f_op
->write_iter
;
90 call_iter
= req
->ns
->file
->f_op
->read_iter
;
94 iov_iter_bvec(&iter
, rw
, req
->f
.bvec
, nr_segs
, count
);
97 iocb
->ki_filp
= req
->ns
->file
;
98 iocb
->ki_flags
= ki_flags
| iocb
->ki_filp
->f_iocb_flags
;
100 return call_iter(iocb
, &iter
);
103 static void nvmet_file_io_done(struct kiocb
*iocb
, long ret
)
105 struct nvmet_req
*req
= container_of(iocb
, struct nvmet_req
, f
.iocb
);
106 u16 status
= NVME_SC_SUCCESS
;
108 if (req
->f
.bvec
!= req
->inline_bvec
) {
109 if (likely(req
->f
.mpool_alloc
== false))
112 mempool_free(req
->f
.bvec
, req
->ns
->bvec_pool
);
115 if (unlikely(ret
!= req
->transfer_len
))
116 status
= errno_to_nvme_status(req
, ret
);
117 nvmet_req_complete(req
, status
);
120 static bool nvmet_file_execute_io(struct nvmet_req
*req
, int ki_flags
)
122 ssize_t nr_bvec
= req
->sg_cnt
;
123 unsigned long bv_cnt
= 0;
124 bool is_sync
= false;
125 size_t len
= 0, total_len
= 0;
129 struct scatterlist
*sg
;
131 if (req
->f
.mpool_alloc
&& nr_bvec
> NVMET_MAX_MPOOL_BVEC
)
134 pos
= le64_to_cpu(req
->cmd
->rw
.slba
) << req
->ns
->blksize_shift
;
135 if (unlikely(pos
+ req
->transfer_len
> req
->ns
->size
)) {
136 nvmet_req_complete(req
, errno_to_nvme_status(req
, -ENOSPC
));
140 memset(&req
->f
.iocb
, 0, sizeof(struct kiocb
));
141 for_each_sg(req
->sg
, sg
, req
->sg_cnt
, i
) {
142 bvec_set_page(&req
->f
.bvec
[bv_cnt
], sg_page(sg
), sg
->length
,
144 len
+= req
->f
.bvec
[bv_cnt
].bv_len
;
145 total_len
+= req
->f
.bvec
[bv_cnt
].bv_len
;
148 WARN_ON_ONCE((nr_bvec
- 1) < 0);
150 if (unlikely(is_sync
) &&
151 (nr_bvec
- 1 == 0 || bv_cnt
== NVMET_MAX_MPOOL_BVEC
)) {
152 ret
= nvmet_file_submit_bvec(req
, pos
, bv_cnt
, len
, 0);
163 if (WARN_ON_ONCE(total_len
!= req
->transfer_len
)) {
168 if (unlikely(is_sync
)) {
174 * A NULL ki_complete ask for synchronous execution, which we want
175 * for the IOCB_NOWAIT case.
177 if (!(ki_flags
& IOCB_NOWAIT
))
178 req
->f
.iocb
.ki_complete
= nvmet_file_io_done
;
180 ret
= nvmet_file_submit_bvec(req
, pos
, bv_cnt
, total_len
, ki_flags
);
186 if (WARN_ON_ONCE(!(ki_flags
& IOCB_NOWAIT
)))
191 * For file systems returning error -EOPNOTSUPP, handle
192 * IOCB_NOWAIT error case separately and retry without
195 if ((ki_flags
& IOCB_NOWAIT
))
201 nvmet_file_io_done(&req
->f
.iocb
, ret
);
205 static void nvmet_file_buffered_io_work(struct work_struct
*w
)
207 struct nvmet_req
*req
= container_of(w
, struct nvmet_req
, f
.work
);
209 nvmet_file_execute_io(req
, 0);
212 static void nvmet_file_submit_buffered_io(struct nvmet_req
*req
)
214 INIT_WORK(&req
->f
.work
, nvmet_file_buffered_io_work
);
215 queue_work(buffered_io_wq
, &req
->f
.work
);
218 static void nvmet_file_execute_rw(struct nvmet_req
*req
)
220 ssize_t nr_bvec
= req
->sg_cnt
;
222 if (!nvmet_check_transfer_len(req
, nvmet_rw_data_len(req
)))
225 if (!req
->sg_cnt
|| !nr_bvec
) {
226 nvmet_req_complete(req
, 0);
230 if (nr_bvec
> NVMET_MAX_INLINE_BIOVEC
)
231 req
->f
.bvec
= kmalloc_array(nr_bvec
, sizeof(struct bio_vec
),
234 req
->f
.bvec
= req
->inline_bvec
;
236 if (unlikely(!req
->f
.bvec
)) {
237 /* fallback under memory pressure */
238 req
->f
.bvec
= mempool_alloc(req
->ns
->bvec_pool
, GFP_KERNEL
);
239 req
->f
.mpool_alloc
= true;
241 req
->f
.mpool_alloc
= false;
243 if (req
->ns
->buffered_io
) {
244 if (likely(!req
->f
.mpool_alloc
) &&
245 (req
->ns
->file
->f_mode
& FMODE_NOWAIT
) &&
246 nvmet_file_execute_io(req
, IOCB_NOWAIT
))
248 nvmet_file_submit_buffered_io(req
);
250 nvmet_file_execute_io(req
, 0);
253 u16
nvmet_file_flush(struct nvmet_req
*req
)
255 return errno_to_nvme_status(req
, vfs_fsync(req
->ns
->file
, 1));
258 static void nvmet_file_flush_work(struct work_struct
*w
)
260 struct nvmet_req
*req
= container_of(w
, struct nvmet_req
, f
.work
);
262 nvmet_req_complete(req
, nvmet_file_flush(req
));
265 static void nvmet_file_execute_flush(struct nvmet_req
*req
)
267 if (!nvmet_check_transfer_len(req
, 0))
269 INIT_WORK(&req
->f
.work
, nvmet_file_flush_work
);
270 queue_work(nvmet_wq
, &req
->f
.work
);
273 static void nvmet_file_execute_discard(struct nvmet_req
*req
)
275 int mode
= FALLOC_FL_PUNCH_HOLE
| FALLOC_FL_KEEP_SIZE
;
276 struct nvme_dsm_range range
;
282 for (i
= 0; i
<= le32_to_cpu(req
->cmd
->dsm
.nr
); i
++) {
283 status
= nvmet_copy_from_sgl(req
, i
* sizeof(range
), &range
,
288 offset
= le64_to_cpu(range
.slba
) << req
->ns
->blksize_shift
;
289 len
= le32_to_cpu(range
.nlb
);
290 len
<<= req
->ns
->blksize_shift
;
291 if (offset
+ len
> req
->ns
->size
) {
292 req
->error_slba
= le64_to_cpu(range
.slba
);
293 status
= errno_to_nvme_status(req
, -ENOSPC
);
297 ret
= vfs_fallocate(req
->ns
->file
, mode
, offset
, len
);
298 if (ret
&& ret
!= -EOPNOTSUPP
) {
299 req
->error_slba
= le64_to_cpu(range
.slba
);
300 status
= errno_to_nvme_status(req
, ret
);
305 nvmet_req_complete(req
, status
);
308 static void nvmet_file_dsm_work(struct work_struct
*w
)
310 struct nvmet_req
*req
= container_of(w
, struct nvmet_req
, f
.work
);
312 switch (le32_to_cpu(req
->cmd
->dsm
.attributes
)) {
314 nvmet_file_execute_discard(req
);
316 case NVME_DSMGMT_IDR
:
317 case NVME_DSMGMT_IDW
:
319 /* Not supported yet */
320 nvmet_req_complete(req
, 0);
325 static void nvmet_file_execute_dsm(struct nvmet_req
*req
)
327 if (!nvmet_check_data_len_lte(req
, nvmet_dsm_len(req
)))
329 INIT_WORK(&req
->f
.work
, nvmet_file_dsm_work
);
330 queue_work(nvmet_wq
, &req
->f
.work
);
333 static void nvmet_file_write_zeroes_work(struct work_struct
*w
)
335 struct nvmet_req
*req
= container_of(w
, struct nvmet_req
, f
.work
);
336 struct nvme_write_zeroes_cmd
*write_zeroes
= &req
->cmd
->write_zeroes
;
337 int mode
= FALLOC_FL_ZERO_RANGE
| FALLOC_FL_KEEP_SIZE
;
342 offset
= le64_to_cpu(write_zeroes
->slba
) << req
->ns
->blksize_shift
;
343 len
= (((sector_t
)le16_to_cpu(write_zeroes
->length
) + 1) <<
344 req
->ns
->blksize_shift
);
346 if (unlikely(offset
+ len
> req
->ns
->size
)) {
347 nvmet_req_complete(req
, errno_to_nvme_status(req
, -ENOSPC
));
351 ret
= vfs_fallocate(req
->ns
->file
, mode
, offset
, len
);
352 nvmet_req_complete(req
, ret
< 0 ? errno_to_nvme_status(req
, ret
) : 0);
355 static void nvmet_file_execute_write_zeroes(struct nvmet_req
*req
)
357 if (!nvmet_check_transfer_len(req
, 0))
359 INIT_WORK(&req
->f
.work
, nvmet_file_write_zeroes_work
);
360 queue_work(nvmet_wq
, &req
->f
.work
);
363 u16
nvmet_file_parse_io_cmd(struct nvmet_req
*req
)
365 switch (req
->cmd
->common
.opcode
) {
368 req
->execute
= nvmet_file_execute_rw
;
371 req
->execute
= nvmet_file_execute_flush
;
374 req
->execute
= nvmet_file_execute_dsm
;
376 case nvme_cmd_write_zeroes
:
377 req
->execute
= nvmet_file_execute_write_zeroes
;
380 return nvmet_report_invalid_opcode(req
);