nvme-rdma: cancel async events before freeing event struct
[linux/fpc-iii.git] / fs / ntfs / aops.h
blobf0962d46bd673bdb6656a464bdf7e560a08c83a8
1 /* SPDX-License-Identifier: GPL-2.0-or-later */
2 /**
3 * aops.h - Defines for NTFS kernel address space operations and page cache
4 * handling. Part of the Linux-NTFS project.
6 * Copyright (c) 2001-2004 Anton Altaparmakov
7 * Copyright (c) 2002 Richard Russon
8 */
10 #ifndef _LINUX_NTFS_AOPS_H
11 #define _LINUX_NTFS_AOPS_H
13 #include <linux/mm.h>
14 #include <linux/highmem.h>
15 #include <linux/pagemap.h>
16 #include <linux/fs.h>
18 #include "inode.h"
20 /**
21 * ntfs_unmap_page - release a page that was mapped using ntfs_map_page()
22 * @page: the page to release
24 * Unpin, unmap and release a page that was obtained from ntfs_map_page().
26 static inline void ntfs_unmap_page(struct page *page)
28 kunmap(page);
29 put_page(page);
32 /**
33 * ntfs_map_page - map a page into accessible memory, reading it if necessary
34 * @mapping: address space for which to obtain the page
35 * @index: index into the page cache for @mapping of the page to map
37 * Read a page from the page cache of the address space @mapping at position
38 * @index, where @index is in units of PAGE_SIZE, and not in bytes.
40 * If the page is not in memory it is loaded from disk first using the readpage
41 * method defined in the address space operations of @mapping and the page is
42 * added to the page cache of @mapping in the process.
44 * If the page belongs to an mst protected attribute and it is marked as such
45 * in its ntfs inode (NInoMstProtected()) the mst fixups are applied but no
46 * error checking is performed. This means the caller has to verify whether
47 * the ntfs record(s) contained in the page are valid or not using one of the
48 * ntfs_is_XXXX_record{,p}() macros, where XXXX is the record type you are
49 * expecting to see. (For details of the macros, see fs/ntfs/layout.h.)
51 * If the page is in high memory it is mapped into memory directly addressible
52 * by the kernel.
54 * Finally the page count is incremented, thus pinning the page into place.
56 * The above means that page_address(page) can be used on all pages obtained
57 * with ntfs_map_page() to get the kernel virtual address of the page.
59 * When finished with the page, the caller has to call ntfs_unmap_page() to
60 * unpin, unmap and release the page.
62 * Note this does not grant exclusive access. If such is desired, the caller
63 * must provide it independently of the ntfs_{un}map_page() calls by using
64 * a {rw_}semaphore or other means of serialization. A spin lock cannot be
65 * used as ntfs_map_page() can block.
67 * The unlocked and uptodate page is returned on success or an encoded error
68 * on failure. Caller has to test for error using the IS_ERR() macro on the
69 * return value. If that evaluates to 'true', the negative error code can be
70 * obtained using PTR_ERR() on the return value of ntfs_map_page().
72 static inline struct page *ntfs_map_page(struct address_space *mapping,
73 unsigned long index)
75 struct page *page = read_mapping_page(mapping, index, NULL);
77 if (!IS_ERR(page)) {
78 kmap(page);
79 if (!PageError(page))
80 return page;
81 ntfs_unmap_page(page);
82 return ERR_PTR(-EIO);
84 return page;
87 #ifdef NTFS_RW
89 extern void mark_ntfs_record_dirty(struct page *page, const unsigned int ofs);
91 #endif /* NTFS_RW */
93 #endif /* _LINUX_NTFS_AOPS_H */