1 // SPDX-License-Identifier: GPL-2.0
3 * NVMe Over Fabrics Target File I/O commands implementation.
4 * Copyright (c) 2017-2018 Western Digital Corporation or its
7 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
9 #include <linux/falloc.h>
10 #include <linux/file.h>
13 #define NVMET_MAX_MPOOL_BVEC 16
14 #define NVMET_MIN_MPOOL_OBJ 16
16 void nvmet_file_ns_disable(struct nvmet_ns
*ns
)
20 flush_workqueue(buffered_io_wq
);
21 mempool_destroy(ns
->bvec_pool
);
23 kmem_cache_destroy(ns
->bvec_cache
);
24 ns
->bvec_cache
= NULL
;
30 int nvmet_file_ns_enable(struct nvmet_ns
*ns
)
32 int flags
= O_RDWR
| O_LARGEFILE
;
39 ns
->file
= filp_open(ns
->device_path
, flags
, 0);
40 if (IS_ERR(ns
->file
)) {
41 pr_err("failed to open file %s: (%ld)\n",
42 ns
->device_path
, PTR_ERR(ns
->file
));
43 return PTR_ERR(ns
->file
);
46 ret
= vfs_getattr(&ns
->file
->f_path
,
47 &stat
, STATX_SIZE
, AT_STATX_FORCE_SYNC
);
53 * i_blkbits can be greater than the universally accepted upper bound,
54 * so make sure we export a sane namespace lba_shift.
56 ns
->blksize_shift
= min_t(u8
,
57 file_inode(ns
->file
)->i_blkbits
, 12);
59 ns
->bvec_cache
= kmem_cache_create("nvmet-bvec",
60 NVMET_MAX_MPOOL_BVEC
* sizeof(struct bio_vec
),
61 0, SLAB_HWCACHE_ALIGN
, NULL
);
62 if (!ns
->bvec_cache
) {
67 ns
->bvec_pool
= mempool_create(NVMET_MIN_MPOOL_OBJ
, mempool_alloc_slab
,
68 mempool_free_slab
, ns
->bvec_cache
);
78 ns
->blksize_shift
= 0;
79 nvmet_file_ns_disable(ns
);
83 static void nvmet_file_init_bvec(struct bio_vec
*bv
, struct scatterlist
*sg
)
85 bv
->bv_page
= sg_page(sg
);
86 bv
->bv_offset
= sg
->offset
;
87 bv
->bv_len
= sg
->length
;
90 static ssize_t
nvmet_file_submit_bvec(struct nvmet_req
*req
, loff_t pos
,
91 unsigned long nr_segs
, size_t count
, int ki_flags
)
93 struct kiocb
*iocb
= &req
->f
.iocb
;
94 ssize_t (*call_iter
)(struct kiocb
*iocb
, struct iov_iter
*iter
);
98 if (req
->cmd
->rw
.opcode
== nvme_cmd_write
) {
99 if (req
->cmd
->rw
.control
& cpu_to_le16(NVME_RW_FUA
))
100 ki_flags
|= IOCB_DSYNC
;
101 call_iter
= req
->ns
->file
->f_op
->write_iter
;
104 call_iter
= req
->ns
->file
->f_op
->read_iter
;
108 iov_iter_bvec(&iter
, rw
, req
->f
.bvec
, nr_segs
, count
);
111 iocb
->ki_filp
= req
->ns
->file
;
112 iocb
->ki_flags
= ki_flags
| iocb_flags(req
->ns
->file
);
114 return call_iter(iocb
, &iter
);
117 static void nvmet_file_io_done(struct kiocb
*iocb
, long ret
, long ret2
)
119 struct nvmet_req
*req
= container_of(iocb
, struct nvmet_req
, f
.iocb
);
120 u16 status
= NVME_SC_SUCCESS
;
122 if (req
->f
.bvec
!= req
->inline_bvec
) {
123 if (likely(req
->f
.mpool_alloc
== false))
126 mempool_free(req
->f
.bvec
, req
->ns
->bvec_pool
);
129 if (unlikely(ret
!= req
->transfer_len
))
130 status
= errno_to_nvme_status(req
, ret
);
131 nvmet_req_complete(req
, status
);
134 static bool nvmet_file_execute_io(struct nvmet_req
*req
, int ki_flags
)
136 ssize_t nr_bvec
= req
->sg_cnt
;
137 unsigned long bv_cnt
= 0;
138 bool is_sync
= false;
139 size_t len
= 0, total_len
= 0;
143 struct scatterlist
*sg
;
145 if (req
->f
.mpool_alloc
&& nr_bvec
> NVMET_MAX_MPOOL_BVEC
)
148 pos
= le64_to_cpu(req
->cmd
->rw
.slba
) << req
->ns
->blksize_shift
;
149 if (unlikely(pos
+ req
->transfer_len
> req
->ns
->size
)) {
150 nvmet_req_complete(req
, errno_to_nvme_status(req
, -ENOSPC
));
154 memset(&req
->f
.iocb
, 0, sizeof(struct kiocb
));
155 for_each_sg(req
->sg
, sg
, req
->sg_cnt
, i
) {
156 nvmet_file_init_bvec(&req
->f
.bvec
[bv_cnt
], sg
);
157 len
+= req
->f
.bvec
[bv_cnt
].bv_len
;
158 total_len
+= req
->f
.bvec
[bv_cnt
].bv_len
;
161 WARN_ON_ONCE((nr_bvec
- 1) < 0);
163 if (unlikely(is_sync
) &&
164 (nr_bvec
- 1 == 0 || bv_cnt
== NVMET_MAX_MPOOL_BVEC
)) {
165 ret
= nvmet_file_submit_bvec(req
, pos
, bv_cnt
, len
, 0);
176 if (WARN_ON_ONCE(total_len
!= req
->transfer_len
)) {
181 if (unlikely(is_sync
)) {
187 * A NULL ki_complete ask for synchronous execution, which we want
188 * for the IOCB_NOWAIT case.
190 if (!(ki_flags
& IOCB_NOWAIT
))
191 req
->f
.iocb
.ki_complete
= nvmet_file_io_done
;
193 ret
= nvmet_file_submit_bvec(req
, pos
, bv_cnt
, total_len
, ki_flags
);
199 if (WARN_ON_ONCE(!(ki_flags
& IOCB_NOWAIT
)))
204 * For file systems returning error -EOPNOTSUPP, handle
205 * IOCB_NOWAIT error case separately and retry without
208 if ((ki_flags
& IOCB_NOWAIT
))
214 nvmet_file_io_done(&req
->f
.iocb
, ret
, 0);
218 static void nvmet_file_buffered_io_work(struct work_struct
*w
)
220 struct nvmet_req
*req
= container_of(w
, struct nvmet_req
, f
.work
);
222 nvmet_file_execute_io(req
, 0);
225 static void nvmet_file_submit_buffered_io(struct nvmet_req
*req
)
227 INIT_WORK(&req
->f
.work
, nvmet_file_buffered_io_work
);
228 queue_work(buffered_io_wq
, &req
->f
.work
);
231 static void nvmet_file_execute_rw(struct nvmet_req
*req
)
233 ssize_t nr_bvec
= req
->sg_cnt
;
235 if (!nvmet_check_data_len(req
, nvmet_rw_len(req
)))
238 if (!req
->sg_cnt
|| !nr_bvec
) {
239 nvmet_req_complete(req
, 0);
243 if (nr_bvec
> NVMET_MAX_INLINE_BIOVEC
)
244 req
->f
.bvec
= kmalloc_array(nr_bvec
, sizeof(struct bio_vec
),
247 req
->f
.bvec
= req
->inline_bvec
;
249 if (unlikely(!req
->f
.bvec
)) {
250 /* fallback under memory pressure */
251 req
->f
.bvec
= mempool_alloc(req
->ns
->bvec_pool
, GFP_KERNEL
);
252 req
->f
.mpool_alloc
= true;
254 req
->f
.mpool_alloc
= false;
256 if (req
->ns
->buffered_io
) {
257 if (likely(!req
->f
.mpool_alloc
) &&
258 nvmet_file_execute_io(req
, IOCB_NOWAIT
))
260 nvmet_file_submit_buffered_io(req
);
262 nvmet_file_execute_io(req
, 0);
265 u16
nvmet_file_flush(struct nvmet_req
*req
)
267 return errno_to_nvme_status(req
, vfs_fsync(req
->ns
->file
, 1));
270 static void nvmet_file_flush_work(struct work_struct
*w
)
272 struct nvmet_req
*req
= container_of(w
, struct nvmet_req
, f
.work
);
274 nvmet_req_complete(req
, nvmet_file_flush(req
));
277 static void nvmet_file_execute_flush(struct nvmet_req
*req
)
279 if (!nvmet_check_data_len(req
, 0))
281 INIT_WORK(&req
->f
.work
, nvmet_file_flush_work
);
282 schedule_work(&req
->f
.work
);
285 static void nvmet_file_execute_discard(struct nvmet_req
*req
)
287 int mode
= FALLOC_FL_PUNCH_HOLE
| FALLOC_FL_KEEP_SIZE
;
288 struct nvme_dsm_range range
;
294 for (i
= 0; i
<= le32_to_cpu(req
->cmd
->dsm
.nr
); i
++) {
295 status
= nvmet_copy_from_sgl(req
, i
* sizeof(range
), &range
,
300 offset
= le64_to_cpu(range
.slba
) << req
->ns
->blksize_shift
;
301 len
= le32_to_cpu(range
.nlb
);
302 len
<<= req
->ns
->blksize_shift
;
303 if (offset
+ len
> req
->ns
->size
) {
304 req
->error_slba
= le64_to_cpu(range
.slba
);
305 status
= errno_to_nvme_status(req
, -ENOSPC
);
309 ret
= vfs_fallocate(req
->ns
->file
, mode
, offset
, len
);
310 if (ret
&& ret
!= -EOPNOTSUPP
) {
311 req
->error_slba
= le64_to_cpu(range
.slba
);
312 status
= errno_to_nvme_status(req
, ret
);
317 nvmet_req_complete(req
, status
);
320 static void nvmet_file_dsm_work(struct work_struct
*w
)
322 struct nvmet_req
*req
= container_of(w
, struct nvmet_req
, f
.work
);
324 switch (le32_to_cpu(req
->cmd
->dsm
.attributes
)) {
326 nvmet_file_execute_discard(req
);
328 case NVME_DSMGMT_IDR
:
329 case NVME_DSMGMT_IDW
:
331 /* Not supported yet */
332 nvmet_req_complete(req
, 0);
337 static void nvmet_file_execute_dsm(struct nvmet_req
*req
)
339 if (!nvmet_check_data_len(req
, nvmet_dsm_len(req
)))
341 INIT_WORK(&req
->f
.work
, nvmet_file_dsm_work
);
342 schedule_work(&req
->f
.work
);
345 static void nvmet_file_write_zeroes_work(struct work_struct
*w
)
347 struct nvmet_req
*req
= container_of(w
, struct nvmet_req
, f
.work
);
348 struct nvme_write_zeroes_cmd
*write_zeroes
= &req
->cmd
->write_zeroes
;
349 int mode
= FALLOC_FL_ZERO_RANGE
| FALLOC_FL_KEEP_SIZE
;
354 offset
= le64_to_cpu(write_zeroes
->slba
) << req
->ns
->blksize_shift
;
355 len
= (((sector_t
)le16_to_cpu(write_zeroes
->length
) + 1) <<
356 req
->ns
->blksize_shift
);
358 if (unlikely(offset
+ len
> req
->ns
->size
)) {
359 nvmet_req_complete(req
, errno_to_nvme_status(req
, -ENOSPC
));
363 ret
= vfs_fallocate(req
->ns
->file
, mode
, offset
, len
);
364 nvmet_req_complete(req
, ret
< 0 ? errno_to_nvme_status(req
, ret
) : 0);
367 static void nvmet_file_execute_write_zeroes(struct nvmet_req
*req
)
369 if (!nvmet_check_data_len(req
, 0))
371 INIT_WORK(&req
->f
.work
, nvmet_file_write_zeroes_work
);
372 schedule_work(&req
->f
.work
);
375 u16
nvmet_file_parse_io_cmd(struct nvmet_req
*req
)
377 struct nvme_command
*cmd
= req
->cmd
;
379 switch (cmd
->common
.opcode
) {
382 req
->execute
= nvmet_file_execute_rw
;
385 req
->execute
= nvmet_file_execute_flush
;
388 req
->execute
= nvmet_file_execute_dsm
;
390 case nvme_cmd_write_zeroes
:
391 req
->execute
= nvmet_file_execute_write_zeroes
;
394 pr_err("unhandled cmd for file ns %d on qid %d\n",
395 cmd
->common
.opcode
, req
->sq
->qid
);
396 req
->error_loc
= offsetof(struct nvme_common_command
, opcode
);
397 return NVME_SC_INVALID_OPCODE
| NVME_SC_DNR
;