1 // SPDX-License-Identifier: GPL-2.0-or-later
5 * Code to deal with the mess that is clustered mmap.
7 * Copyright (C) 2002, 2004 Oracle. All rights reserved.
11 #include <linux/types.h>
12 #include <linux/highmem.h>
13 #include <linux/pagemap.h>
14 #include <linux/uio.h>
15 #include <linux/signal.h>
16 #include <linux/rbtree.h>
18 #include <cluster/masklog.h>
28 #include "ocfs2_trace.h"
31 static vm_fault_t
ocfs2_fault(struct vm_fault
*vmf
)
33 struct vm_area_struct
*vma
= vmf
->vma
;
37 ocfs2_block_signals(&oldset
);
38 ret
= filemap_fault(vmf
);
39 ocfs2_unblock_signals(&oldset
);
41 trace_ocfs2_fault(OCFS2_I(vma
->vm_file
->f_mapping
->host
)->ip_blkno
,
42 vma
, vmf
->page
, vmf
->pgoff
);
46 static vm_fault_t
__ocfs2_page_mkwrite(struct file
*file
,
47 struct buffer_head
*di_bh
, struct page
*page
)
50 vm_fault_t ret
= VM_FAULT_NOPAGE
;
51 struct inode
*inode
= file_inode(file
);
52 struct address_space
*mapping
= inode
->i_mapping
;
53 loff_t pos
= page_offset(page
);
54 unsigned int len
= PAGE_SIZE
;
56 struct folio
*locked_folio
= NULL
;
58 loff_t size
= i_size_read(inode
);
60 last_index
= (size
- 1) >> PAGE_SHIFT
;
63 * There are cases that lead to the page no longer belonging to the
65 * 1) pagecache truncates locally due to memory pressure.
66 * 2) pagecache truncates when another is taking EX lock against
67 * inode lock. see ocfs2_data_convert_worker.
69 * The i_size check doesn't catch the case where nodes truncated and
70 * then re-extended the file. We'll re-check the page mapping after
71 * taking the page lock inside of ocfs2_write_begin_nolock().
73 * Let VM retry with these cases.
75 if ((page
->mapping
!= inode
->i_mapping
) ||
76 (!PageUptodate(page
)) ||
77 (page_offset(page
) >= size
))
81 * Call ocfs2_write_begin() and ocfs2_write_end() to take
82 * advantage of the allocation code there. We pass a write
83 * length of the whole page (chopped to i_size) to make sure
84 * the whole thing is allocated.
86 * Since we know the page is up to date, we don't have to
87 * worry about ocfs2_write_begin() skipping some buffer reads
88 * because the "write" would invalidate their data.
90 if (page
->index
== last_index
)
91 len
= ((size
- 1) & ~PAGE_MASK
) + 1;
93 err
= ocfs2_write_begin_nolock(mapping
, pos
, len
, OCFS2_WRITE_MMAP
,
94 &locked_folio
, &fsdata
, di_bh
, page
);
103 ret
= VM_FAULT_NOPAGE
;
106 err
= ocfs2_write_end_nolock(mapping
, pos
, len
, len
, fsdata
);
108 ret
= VM_FAULT_LOCKED
;
113 static vm_fault_t
ocfs2_page_mkwrite(struct vm_fault
*vmf
)
115 struct page
*page
= vmf
->page
;
116 struct inode
*inode
= file_inode(vmf
->vma
->vm_file
);
117 struct buffer_head
*di_bh
= NULL
;
122 sb_start_pagefault(inode
->i_sb
);
123 ocfs2_block_signals(&oldset
);
126 * The cluster locks taken will block a truncate from another
127 * node. Taking the data lock will also ensure that we don't
128 * attempt page truncation as part of a downconvert.
130 err
= ocfs2_inode_lock(inode
, &di_bh
, 1);
133 ret
= vmf_error(err
);
138 * The alloc sem should be enough to serialize with
139 * ocfs2_truncate_file() changing i_size as well as any thread
140 * modifying the inode btree.
142 down_write(&OCFS2_I(inode
)->ip_alloc_sem
);
144 ret
= __ocfs2_page_mkwrite(vmf
->vma
->vm_file
, di_bh
, page
);
146 up_write(&OCFS2_I(inode
)->ip_alloc_sem
);
149 ocfs2_inode_unlock(inode
, 1);
152 ocfs2_unblock_signals(&oldset
);
153 sb_end_pagefault(inode
->i_sb
);
157 static const struct vm_operations_struct ocfs2_file_vm_ops
= {
158 .fault
= ocfs2_fault
,
159 .page_mkwrite
= ocfs2_page_mkwrite
,
162 int ocfs2_mmap(struct file
*file
, struct vm_area_struct
*vma
)
164 int ret
= 0, lock_level
= 0;
166 ret
= ocfs2_inode_lock_atime(file_inode(file
),
167 file
->f_path
.mnt
, &lock_level
, 1);
172 ocfs2_inode_unlock(file_inode(file
), lock_level
);
174 vma
->vm_ops
= &ocfs2_file_vm_ops
;