2 * Copyright (c) 2002, 2007 Red Hat, Inc. All rights reserved.
4 * This software may be freely redistributed under the terms of the
5 * GNU General Public License.
7 * You should have received a copy of the GNU General Public License
8 * along with this program; if not, write to the Free Software
9 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
11 * Authors: David Woodhouse <dwmw2@infradead.org>
12 * David Howells <dhowells@redhat.com>
16 #include <linux/kernel.h>
17 #include <linux/module.h>
18 #include <linux/init.h>
19 #include <linux/circ_buf.h>
20 #include <linux/sched.h>
24 * Handle invalidation of an mmap'd file. We invalidate all the PTEs referring
25 * to the pages in this file's pagecache, forcing the kernel to go through
26 * ->fault() or ->page_mkwrite() - at which point we can handle invalidation
29 void afs_invalidate_mmap_work(struct work_struct
*work
)
31 struct afs_vnode
*vnode
= container_of(work
, struct afs_vnode
, cb_work
);
33 unmap_mapping_pages(vnode
->netfs
.inode
.i_mapping
, 0, 0, false);
36 static void afs_volume_init_callback(struct afs_volume
*volume
)
38 struct afs_vnode
*vnode
;
40 down_read(&volume
->open_mmaps_lock
);
42 list_for_each_entry(vnode
, &volume
->open_mmaps
, cb_mmap_link
) {
43 if (vnode
->cb_v_check
!= atomic_read(&volume
->cb_v_break
)) {
44 atomic64_set(&vnode
->cb_expires_at
, AFS_NO_CB_PROMISE
);
45 queue_work(system_unbound_wq
, &vnode
->cb_work
);
49 up_read(&volume
->open_mmaps_lock
);
53 * Allow the fileserver to request callback state (re-)initialisation.
54 * Unfortunately, UUIDs are not guaranteed unique.
56 void afs_init_callback_state(struct afs_server
*server
)
58 struct afs_server_entry
*se
;
60 down_read(&server
->cell
->vs_lock
);
62 list_for_each_entry(se
, &server
->volumes
, slink
) {
63 se
->cb_expires_at
= AFS_NO_CB_PROMISE
;
64 se
->volume
->cb_expires_at
= AFS_NO_CB_PROMISE
;
65 trace_afs_cb_v_break(se
->volume
->vid
, atomic_read(&se
->volume
->cb_v_break
),
66 afs_cb_break_for_s_reinit
);
67 if (!list_empty(&se
->volume
->open_mmaps
))
68 afs_volume_init_callback(se
->volume
);
71 up_read(&server
->cell
->vs_lock
);
75 * actually break a callback
77 void __afs_break_callback(struct afs_vnode
*vnode
, enum afs_cb_break_reason reason
)
81 clear_bit(AFS_VNODE_NEW_CONTENT
, &vnode
->flags
);
82 if (atomic64_xchg(&vnode
->cb_expires_at
, AFS_NO_CB_PROMISE
) != AFS_NO_CB_PROMISE
) {
84 vnode
->cb_v_check
= atomic_read(&vnode
->volume
->cb_v_break
);
85 afs_clear_permits(vnode
);
87 if (vnode
->lock_state
== AFS_VNODE_LOCK_WAITING_FOR_CB
)
88 afs_lock_may_be_available(vnode
);
90 if (reason
!= afs_cb_break_for_deleted
&&
91 vnode
->status
.type
== AFS_FTYPE_FILE
&&
92 atomic_read(&vnode
->cb_nr_mmap
))
93 queue_work(system_unbound_wq
, &vnode
->cb_work
);
95 trace_afs_cb_break(&vnode
->fid
, vnode
->cb_break
, reason
, true);
97 trace_afs_cb_break(&vnode
->fid
, vnode
->cb_break
, reason
, false);
101 void afs_break_callback(struct afs_vnode
*vnode
, enum afs_cb_break_reason reason
)
103 write_seqlock(&vnode
->cb_lock
);
104 __afs_break_callback(vnode
, reason
);
105 write_sequnlock(&vnode
->cb_lock
);
109 * Look up a volume by volume ID under RCU conditions.
111 static struct afs_volume
*afs_lookup_volume_rcu(struct afs_cell
*cell
,
114 struct afs_volume
*volume
= NULL
;
119 /* Unfortunately, rbtree walking doesn't give reliable results
120 * under just the RCU read lock, so we have to check for
123 seq
++; /* 2 on the 1st/lockless path, otherwise odd */
124 read_seqbegin_or_lock(&cell
->volume_lock
, &seq
);
126 p
= rcu_dereference_raw(cell
->volumes
.rb_node
);
128 volume
= rb_entry(p
, struct afs_volume
, cell_node
);
130 if (volume
->vid
< vid
)
131 p
= rcu_dereference_raw(p
->rb_left
);
132 else if (volume
->vid
> vid
)
133 p
= rcu_dereference_raw(p
->rb_right
);
139 if (volume
&& afs_try_get_volume(volume
, afs_volume_trace_get_callback
))
141 if (!need_seqretry(&cell
->volume_lock
, seq
))
143 seq
|= 1; /* Want a lock next time */
146 done_seqretry(&cell
->volume_lock
, seq
);
151 * Allow the fileserver to break callbacks at the volume-level. This is
152 * typically done when, for example, a R/W volume is snapshotted to a R/O
153 * volume (the only way to change an R/O volume). It may also, however, happen
154 * when a volserver takes control of a volume (offlining it, moving it, etc.).
156 * Every file in that volume will need to be reevaluated.
158 static void afs_break_volume_callback(struct afs_server
*server
,
159 struct afs_volume
*volume
)
162 struct afs_server_list
*slist
= rcu_dereference(volume
->servers
);
163 unsigned int i
, cb_v_break
;
165 write_lock(&volume
->cb_v_break_lock
);
167 for (i
= 0; i
< slist
->nr_servers
; i
++)
168 if (slist
->servers
[i
].server
== server
)
169 slist
->servers
[i
].cb_expires_at
= AFS_NO_CB_PROMISE
;
170 volume
->cb_expires_at
= AFS_NO_CB_PROMISE
;
172 cb_v_break
= atomic_inc_return_release(&volume
->cb_v_break
);
173 trace_afs_cb_v_break(volume
->vid
, cb_v_break
, afs_cb_break_for_volume_callback
);
175 write_unlock(&volume
->cb_v_break_lock
);
178 if (!list_empty(&volume
->open_mmaps
))
179 afs_volume_init_callback(volume
);
183 * allow the fileserver to explicitly break one callback
185 * - the backing file is changed
186 * - a lock is released
188 static void afs_break_one_callback(struct afs_server
*server
,
189 struct afs_volume
*volume
,
192 struct super_block
*sb
;
193 struct afs_vnode
*vnode
;
196 /* See if we can find a matching inode - even an I_NEW inode needs to
197 * be marked as it can have its callback broken before we finish
198 * setting up the local inode.
200 sb
= rcu_dereference(volume
->sb
);
204 inode
= find_inode_rcu(sb
, fid
->vnode
, afs_ilookup5_test_by_fid
, fid
);
206 vnode
= AFS_FS_I(inode
);
207 afs_break_callback(vnode
, afs_cb_break_for_callback
);
209 trace_afs_cb_miss(fid
, afs_cb_break_for_callback
);
213 static void afs_break_some_callbacks(struct afs_server
*server
,
214 struct afs_callback_break
*cbb
,
217 struct afs_callback_break
*residue
= cbb
;
218 struct afs_volume
*volume
;
219 afs_volid_t vid
= cbb
->fid
.vid
;
223 volume
= afs_lookup_volume_rcu(server
->cell
, vid
);
224 if (cbb
->fid
.vnode
== 0 && cbb
->fid
.unique
== 0) {
225 afs_break_volume_callback(server
, volume
);
228 memmove(cbb
, cbb
+ 1, sizeof(*cbb
) * *_count
);
230 /* TODO: Find all matching volumes if we couldn't match the server and
234 for (i
= *_count
; i
> 0; cbb
++, i
--) {
235 if (cbb
->fid
.vid
== vid
) {
236 _debug("- Fid { vl=%08llx n=%llu u=%u }",
242 afs_break_one_callback(server
, volume
, &cbb
->fid
);
250 afs_put_volume(volume
, afs_volume_trace_put_callback
);
254 * allow the fileserver to break callback promises
256 void afs_break_callbacks(struct afs_server
*server
, size_t count
,
257 struct afs_callback_break
*callbacks
)
259 _enter("%p,%zu,", server
, count
);
261 ASSERT(server
!= NULL
);
264 afs_break_some_callbacks(server
, callbacks
, &count
);