1 // SPDX-License-Identifier: GPL-2.0
3 * NVMe Over Fabrics Target File I/O commands implementation.
4 * Copyright (c) 2017-2018 Western Digital Corporation or its
7 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
9 #include <linux/falloc.h>
10 #include <linux/file.h>
13 #define NVMET_MAX_MPOOL_BVEC 16
14 #define NVMET_MIN_MPOOL_OBJ 16
16 void nvmet_file_ns_disable(struct nvmet_ns
*ns
)
20 flush_workqueue(buffered_io_wq
);
21 mempool_destroy(ns
->bvec_pool
);
23 kmem_cache_destroy(ns
->bvec_cache
);
24 ns
->bvec_cache
= NULL
;
30 int nvmet_file_ns_enable(struct nvmet_ns
*ns
)
32 int flags
= O_RDWR
| O_LARGEFILE
;
39 ns
->file
= filp_open(ns
->device_path
, flags
, 0);
40 if (IS_ERR(ns
->file
)) {
41 pr_err("failed to open file %s: (%ld)\n",
42 ns
->device_path
, PTR_ERR(ns
->file
));
43 return PTR_ERR(ns
->file
);
46 ret
= vfs_getattr(&ns
->file
->f_path
,
47 &stat
, STATX_SIZE
, AT_STATX_FORCE_SYNC
);
52 ns
->blksize_shift
= file_inode(ns
->file
)->i_blkbits
;
54 ns
->bvec_cache
= kmem_cache_create("nvmet-bvec",
55 NVMET_MAX_MPOOL_BVEC
* sizeof(struct bio_vec
),
56 0, SLAB_HWCACHE_ALIGN
, NULL
);
57 if (!ns
->bvec_cache
) {
62 ns
->bvec_pool
= mempool_create(NVMET_MIN_MPOOL_OBJ
, mempool_alloc_slab
,
63 mempool_free_slab
, ns
->bvec_cache
);
73 ns
->blksize_shift
= 0;
74 nvmet_file_ns_disable(ns
);
78 static void nvmet_file_init_bvec(struct bio_vec
*bv
, struct sg_page_iter
*iter
)
80 bv
->bv_page
= sg_page_iter_page(iter
);
81 bv
->bv_offset
= iter
->sg
->offset
;
82 bv
->bv_len
= PAGE_SIZE
- iter
->sg
->offset
;
85 static ssize_t
nvmet_file_submit_bvec(struct nvmet_req
*req
, loff_t pos
,
86 unsigned long nr_segs
, size_t count
, int ki_flags
)
88 struct kiocb
*iocb
= &req
->f
.iocb
;
89 ssize_t (*call_iter
)(struct kiocb
*iocb
, struct iov_iter
*iter
);
93 if (req
->cmd
->rw
.opcode
== nvme_cmd_write
) {
94 if (req
->cmd
->rw
.control
& cpu_to_le16(NVME_RW_FUA
))
95 ki_flags
|= IOCB_DSYNC
;
96 call_iter
= req
->ns
->file
->f_op
->write_iter
;
99 call_iter
= req
->ns
->file
->f_op
->read_iter
;
103 iov_iter_bvec(&iter
, rw
, req
->f
.bvec
, nr_segs
, count
);
106 iocb
->ki_filp
= req
->ns
->file
;
107 iocb
->ki_flags
= ki_flags
| iocb_flags(req
->ns
->file
);
109 return call_iter(iocb
, &iter
);
112 static void nvmet_file_io_done(struct kiocb
*iocb
, long ret
, long ret2
)
114 struct nvmet_req
*req
= container_of(iocb
, struct nvmet_req
, f
.iocb
);
115 u16 status
= NVME_SC_SUCCESS
;
117 if (req
->f
.bvec
!= req
->inline_bvec
) {
118 if (likely(req
->f
.mpool_alloc
== false))
121 mempool_free(req
->f
.bvec
, req
->ns
->bvec_pool
);
124 if (unlikely(ret
!= req
->data_len
))
125 status
= errno_to_nvme_status(req
, ret
);
126 nvmet_req_complete(req
, status
);
129 static bool nvmet_file_execute_io(struct nvmet_req
*req
, int ki_flags
)
131 ssize_t nr_bvec
= DIV_ROUND_UP(req
->data_len
, PAGE_SIZE
);
132 struct sg_page_iter sg_pg_iter
;
133 unsigned long bv_cnt
= 0;
134 bool is_sync
= false;
135 size_t len
= 0, total_len
= 0;
140 if (req
->f
.mpool_alloc
&& nr_bvec
> NVMET_MAX_MPOOL_BVEC
)
143 pos
= le64_to_cpu(req
->cmd
->rw
.slba
) << req
->ns
->blksize_shift
;
144 if (unlikely(pos
+ req
->data_len
> req
->ns
->size
)) {
145 nvmet_req_complete(req
, errno_to_nvme_status(req
, -ENOSPC
));
149 memset(&req
->f
.iocb
, 0, sizeof(struct kiocb
));
150 for_each_sg_page(req
->sg
, &sg_pg_iter
, req
->sg_cnt
, 0) {
151 nvmet_file_init_bvec(&req
->f
.bvec
[bv_cnt
], &sg_pg_iter
);
152 len
+= req
->f
.bvec
[bv_cnt
].bv_len
;
153 total_len
+= req
->f
.bvec
[bv_cnt
].bv_len
;
156 WARN_ON_ONCE((nr_bvec
- 1) < 0);
158 if (unlikely(is_sync
) &&
159 (nr_bvec
- 1 == 0 || bv_cnt
== NVMET_MAX_MPOOL_BVEC
)) {
160 ret
= nvmet_file_submit_bvec(req
, pos
, bv_cnt
, len
, 0);
171 if (WARN_ON_ONCE(total_len
!= req
->data_len
)) {
176 if (unlikely(is_sync
)) {
182 * A NULL ki_complete ask for synchronous execution, which we want
183 * for the IOCB_NOWAIT case.
185 if (!(ki_flags
& IOCB_NOWAIT
))
186 req
->f
.iocb
.ki_complete
= nvmet_file_io_done
;
188 ret
= nvmet_file_submit_bvec(req
, pos
, bv_cnt
, total_len
, ki_flags
);
194 if (WARN_ON_ONCE(!(ki_flags
& IOCB_NOWAIT
)))
199 * For file systems returning error -EOPNOTSUPP, handle
200 * IOCB_NOWAIT error case separately and retry without
203 if ((ki_flags
& IOCB_NOWAIT
))
209 nvmet_file_io_done(&req
->f
.iocb
, ret
, 0);
213 static void nvmet_file_buffered_io_work(struct work_struct
*w
)
215 struct nvmet_req
*req
= container_of(w
, struct nvmet_req
, f
.work
);
217 nvmet_file_execute_io(req
, 0);
220 static void nvmet_file_submit_buffered_io(struct nvmet_req
*req
)
222 INIT_WORK(&req
->f
.work
, nvmet_file_buffered_io_work
);
223 queue_work(buffered_io_wq
, &req
->f
.work
);
226 static void nvmet_file_execute_rw(struct nvmet_req
*req
)
228 ssize_t nr_bvec
= DIV_ROUND_UP(req
->data_len
, PAGE_SIZE
);
230 if (!req
->sg_cnt
|| !nr_bvec
) {
231 nvmet_req_complete(req
, 0);
235 if (nr_bvec
> NVMET_MAX_INLINE_BIOVEC
)
236 req
->f
.bvec
= kmalloc_array(nr_bvec
, sizeof(struct bio_vec
),
239 req
->f
.bvec
= req
->inline_bvec
;
241 if (unlikely(!req
->f
.bvec
)) {
242 /* fallback under memory pressure */
243 req
->f
.bvec
= mempool_alloc(req
->ns
->bvec_pool
, GFP_KERNEL
);
244 req
->f
.mpool_alloc
= true;
246 req
->f
.mpool_alloc
= false;
248 if (req
->ns
->buffered_io
) {
249 if (likely(!req
->f
.mpool_alloc
) &&
250 nvmet_file_execute_io(req
, IOCB_NOWAIT
))
252 nvmet_file_submit_buffered_io(req
);
254 nvmet_file_execute_io(req
, 0);
257 u16
nvmet_file_flush(struct nvmet_req
*req
)
259 return errno_to_nvme_status(req
, vfs_fsync(req
->ns
->file
, 1));
262 static void nvmet_file_flush_work(struct work_struct
*w
)
264 struct nvmet_req
*req
= container_of(w
, struct nvmet_req
, f
.work
);
266 nvmet_req_complete(req
, nvmet_file_flush(req
));
269 static void nvmet_file_execute_flush(struct nvmet_req
*req
)
271 INIT_WORK(&req
->f
.work
, nvmet_file_flush_work
);
272 schedule_work(&req
->f
.work
);
275 static void nvmet_file_execute_discard(struct nvmet_req
*req
)
277 int mode
= FALLOC_FL_PUNCH_HOLE
| FALLOC_FL_KEEP_SIZE
;
278 struct nvme_dsm_range range
;
284 for (i
= 0; i
<= le32_to_cpu(req
->cmd
->dsm
.nr
); i
++) {
285 status
= nvmet_copy_from_sgl(req
, i
* sizeof(range
), &range
,
290 offset
= le64_to_cpu(range
.slba
) << req
->ns
->blksize_shift
;
291 len
= le32_to_cpu(range
.nlb
);
292 len
<<= req
->ns
->blksize_shift
;
293 if (offset
+ len
> req
->ns
->size
) {
294 req
->error_slba
= le64_to_cpu(range
.slba
);
295 status
= errno_to_nvme_status(req
, -ENOSPC
);
299 ret
= vfs_fallocate(req
->ns
->file
, mode
, offset
, len
);
301 req
->error_slba
= le64_to_cpu(range
.slba
);
302 status
= errno_to_nvme_status(req
, ret
);
307 nvmet_req_complete(req
, status
);
310 static void nvmet_file_dsm_work(struct work_struct
*w
)
312 struct nvmet_req
*req
= container_of(w
, struct nvmet_req
, f
.work
);
314 switch (le32_to_cpu(req
->cmd
->dsm
.attributes
)) {
316 nvmet_file_execute_discard(req
);
318 case NVME_DSMGMT_IDR
:
319 case NVME_DSMGMT_IDW
:
321 /* Not supported yet */
322 nvmet_req_complete(req
, 0);
327 static void nvmet_file_execute_dsm(struct nvmet_req
*req
)
329 INIT_WORK(&req
->f
.work
, nvmet_file_dsm_work
);
330 schedule_work(&req
->f
.work
);
333 static void nvmet_file_write_zeroes_work(struct work_struct
*w
)
335 struct nvmet_req
*req
= container_of(w
, struct nvmet_req
, f
.work
);
336 struct nvme_write_zeroes_cmd
*write_zeroes
= &req
->cmd
->write_zeroes
;
337 int mode
= FALLOC_FL_ZERO_RANGE
| FALLOC_FL_KEEP_SIZE
;
342 offset
= le64_to_cpu(write_zeroes
->slba
) << req
->ns
->blksize_shift
;
343 len
= (((sector_t
)le16_to_cpu(write_zeroes
->length
) + 1) <<
344 req
->ns
->blksize_shift
);
346 if (unlikely(offset
+ len
> req
->ns
->size
)) {
347 nvmet_req_complete(req
, errno_to_nvme_status(req
, -ENOSPC
));
351 ret
= vfs_fallocate(req
->ns
->file
, mode
, offset
, len
);
352 nvmet_req_complete(req
, ret
< 0 ? errno_to_nvme_status(req
, ret
) : 0);
355 static void nvmet_file_execute_write_zeroes(struct nvmet_req
*req
)
357 INIT_WORK(&req
->f
.work
, nvmet_file_write_zeroes_work
);
358 schedule_work(&req
->f
.work
);
361 u16
nvmet_file_parse_io_cmd(struct nvmet_req
*req
)
363 struct nvme_command
*cmd
= req
->cmd
;
365 switch (cmd
->common
.opcode
) {
368 req
->execute
= nvmet_file_execute_rw
;
369 req
->data_len
= nvmet_rw_len(req
);
372 req
->execute
= nvmet_file_execute_flush
;
376 req
->execute
= nvmet_file_execute_dsm
;
377 req
->data_len
= (le32_to_cpu(cmd
->dsm
.nr
) + 1) *
378 sizeof(struct nvme_dsm_range
);
380 case nvme_cmd_write_zeroes
:
381 req
->execute
= nvmet_file_execute_write_zeroes
;
385 pr_err("unhandled cmd for file ns %d on qid %d\n",
386 cmd
->common
.opcode
, req
->sq
->qid
);
387 req
->error_loc
= offsetof(struct nvme_common_command
, opcode
);
388 return NVME_SC_INVALID_OPCODE
| NVME_SC_DNR
;