workqueue: Make worker_attach/detach_pool() update worker->pool
[linux/fpc-iii.git] / fs / ceph / cache.c
blobbb524c880b1eadf2915a0a551eb01730871dee9a
1 /*
2 * Ceph cache definitions.
4 * Copyright (C) 2013 by Adfin Solutions, Inc. All Rights Reserved.
5 * Written by Milosz Tanski (milosz@adfin.com)
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2
9 * as published by the Free Software Foundation.
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to:
18 * Free Software Foundation
19 * 51 Franklin Street, Fifth Floor
20 * Boston, MA 02111-1301 USA
24 #include "super.h"
25 #include "cache.h"
27 struct ceph_aux_inode {
28 u64 version;
29 struct timespec mtime;
32 struct fscache_netfs ceph_cache_netfs = {
33 .name = "ceph",
34 .version = 0,
37 static DEFINE_MUTEX(ceph_fscache_lock);
38 static LIST_HEAD(ceph_fscache_list);
40 struct ceph_fscache_entry {
41 struct list_head list;
42 struct fscache_cookie *fscache;
43 size_t uniq_len;
44 /* The following members must be last */
45 struct ceph_fsid fsid;
46 char uniquifier[0];
49 static const struct fscache_cookie_def ceph_fscache_fsid_object_def = {
50 .name = "CEPH.fsid",
51 .type = FSCACHE_COOKIE_TYPE_INDEX,
54 int __init ceph_fscache_register(void)
56 return fscache_register_netfs(&ceph_cache_netfs);
59 void ceph_fscache_unregister(void)
61 fscache_unregister_netfs(&ceph_cache_netfs);
64 int ceph_fscache_register_fs(struct ceph_fs_client* fsc)
66 const struct ceph_fsid *fsid = &fsc->client->fsid;
67 const char *fscache_uniq = fsc->mount_options->fscache_uniq;
68 size_t uniq_len = fscache_uniq ? strlen(fscache_uniq) : 0;
69 struct ceph_fscache_entry *ent;
70 int err = 0;
72 mutex_lock(&ceph_fscache_lock);
73 list_for_each_entry(ent, &ceph_fscache_list, list) {
74 if (memcmp(&ent->fsid, fsid, sizeof(*fsid)))
75 continue;
76 if (ent->uniq_len != uniq_len)
77 continue;
78 if (uniq_len && memcmp(ent->uniquifier, fscache_uniq, uniq_len))
79 continue;
81 pr_err("fscache cookie already registered for fsid %pU\n", fsid);
82 pr_err(" use fsc=%%s mount option to specify a uniquifier\n");
83 err = -EBUSY;
84 goto out_unlock;
87 ent = kzalloc(sizeof(*ent) + uniq_len, GFP_KERNEL);
88 if (!ent) {
89 err = -ENOMEM;
90 goto out_unlock;
93 memcpy(&ent->fsid, fsid, sizeof(*fsid));
94 if (uniq_len > 0) {
95 memcpy(&ent->uniquifier, fscache_uniq, uniq_len);
96 ent->uniq_len = uniq_len;
99 fsc->fscache = fscache_acquire_cookie(ceph_cache_netfs.primary_index,
100 &ceph_fscache_fsid_object_def,
101 &ent->fsid, sizeof(ent->fsid) + uniq_len,
102 NULL, 0,
103 fsc, 0, true);
105 if (fsc->fscache) {
106 ent->fscache = fsc->fscache;
107 list_add_tail(&ent->list, &ceph_fscache_list);
108 } else {
109 kfree(ent);
110 pr_err("unable to register fscache cookie for fsid %pU\n",
111 fsid);
112 /* all other fs ignore this error */
114 out_unlock:
115 mutex_unlock(&ceph_fscache_lock);
116 return err;
119 static enum fscache_checkaux ceph_fscache_inode_check_aux(
120 void *cookie_netfs_data, const void *data, uint16_t dlen,
121 loff_t object_size)
123 struct ceph_aux_inode aux;
124 struct ceph_inode_info* ci = cookie_netfs_data;
125 struct inode* inode = &ci->vfs_inode;
127 if (dlen != sizeof(aux) ||
128 i_size_read(inode) != object_size)
129 return FSCACHE_CHECKAUX_OBSOLETE;
131 memset(&aux, 0, sizeof(aux));
132 aux.version = ci->i_version;
133 aux.mtime = inode->i_mtime;
135 if (memcmp(data, &aux, sizeof(aux)) != 0)
136 return FSCACHE_CHECKAUX_OBSOLETE;
138 dout("ceph inode 0x%p cached okay\n", ci);
139 return FSCACHE_CHECKAUX_OKAY;
142 static const struct fscache_cookie_def ceph_fscache_inode_object_def = {
143 .name = "CEPH.inode",
144 .type = FSCACHE_COOKIE_TYPE_DATAFILE,
145 .check_aux = ceph_fscache_inode_check_aux,
148 void ceph_fscache_register_inode_cookie(struct inode *inode)
150 struct ceph_inode_info *ci = ceph_inode(inode);
151 struct ceph_fs_client *fsc = ceph_inode_to_client(inode);
152 struct ceph_aux_inode aux;
154 /* No caching for filesystem */
155 if (!fsc->fscache)
156 return;
158 /* Only cache for regular files that are read only */
159 if (!S_ISREG(inode->i_mode))
160 return;
162 inode_lock_nested(inode, I_MUTEX_CHILD);
163 if (!ci->fscache) {
164 memset(&aux, 0, sizeof(aux));
165 aux.version = ci->i_version;
166 aux.mtime = inode->i_mtime;
167 ci->fscache = fscache_acquire_cookie(fsc->fscache,
168 &ceph_fscache_inode_object_def,
169 &ci->i_vino, sizeof(ci->i_vino),
170 &aux, sizeof(aux),
171 ci, i_size_read(inode), false);
173 inode_unlock(inode);
176 void ceph_fscache_unregister_inode_cookie(struct ceph_inode_info* ci)
178 struct fscache_cookie* cookie;
180 if ((cookie = ci->fscache) == NULL)
181 return;
183 ci->fscache = NULL;
185 fscache_uncache_all_inode_pages(cookie, &ci->vfs_inode);
186 fscache_relinquish_cookie(cookie, &ci->i_vino, false);
189 static bool ceph_fscache_can_enable(void *data)
191 struct inode *inode = data;
192 return !inode_is_open_for_write(inode);
195 void ceph_fscache_file_set_cookie(struct inode *inode, struct file *filp)
197 struct ceph_inode_info *ci = ceph_inode(inode);
199 if (!fscache_cookie_valid(ci->fscache))
200 return;
202 if (inode_is_open_for_write(inode)) {
203 dout("fscache_file_set_cookie %p %p disabling cache\n",
204 inode, filp);
205 fscache_disable_cookie(ci->fscache, &ci->i_vino, false);
206 fscache_uncache_all_inode_pages(ci->fscache, inode);
207 } else {
208 fscache_enable_cookie(ci->fscache, &ci->i_vino, i_size_read(inode),
209 ceph_fscache_can_enable, inode);
210 if (fscache_cookie_enabled(ci->fscache)) {
211 dout("fscache_file_set_cookie %p %p enabling cache\n",
212 inode, filp);
217 static void ceph_readpage_from_fscache_complete(struct page *page, void *data, int error)
219 if (!error)
220 SetPageUptodate(page);
222 unlock_page(page);
225 static inline bool cache_valid(struct ceph_inode_info *ci)
227 return ci->i_fscache_gen == ci->i_rdcache_gen;
231 /* Atempt to read from the fscache,
233 * This function is called from the readpage_nounlock context. DO NOT attempt to
234 * unlock the page here (or in the callback).
236 int ceph_readpage_from_fscache(struct inode *inode, struct page *page)
238 struct ceph_inode_info *ci = ceph_inode(inode);
239 int ret;
241 if (!cache_valid(ci))
242 return -ENOBUFS;
244 ret = fscache_read_or_alloc_page(ci->fscache, page,
245 ceph_readpage_from_fscache_complete, NULL,
246 GFP_KERNEL);
248 switch (ret) {
249 case 0: /* Page found */
250 dout("page read submitted\n");
251 return 0;
252 case -ENOBUFS: /* Pages were not found, and can't be */
253 case -ENODATA: /* Pages were not found */
254 dout("page/inode not in cache\n");
255 return ret;
256 default:
257 dout("%s: unknown error ret = %i\n", __func__, ret);
258 return ret;
262 int ceph_readpages_from_fscache(struct inode *inode,
263 struct address_space *mapping,
264 struct list_head *pages,
265 unsigned *nr_pages)
267 struct ceph_inode_info *ci = ceph_inode(inode);
268 int ret;
270 if (!cache_valid(ci))
271 return -ENOBUFS;
273 ret = fscache_read_or_alloc_pages(ci->fscache, mapping, pages, nr_pages,
274 ceph_readpage_from_fscache_complete,
275 NULL, mapping_gfp_mask(mapping));
277 switch (ret) {
278 case 0: /* All pages found */
279 dout("all-page read submitted\n");
280 return 0;
281 case -ENOBUFS: /* Some pages were not found, and can't be */
282 case -ENODATA: /* some pages were not found */
283 dout("page/inode not in cache\n");
284 return ret;
285 default:
286 dout("%s: unknown error ret = %i\n", __func__, ret);
287 return ret;
291 void ceph_readpage_to_fscache(struct inode *inode, struct page *page)
293 struct ceph_inode_info *ci = ceph_inode(inode);
294 int ret;
296 if (!PageFsCache(page))
297 return;
299 if (!cache_valid(ci))
300 return;
302 ret = fscache_write_page(ci->fscache, page, i_size_read(inode),
303 GFP_KERNEL);
304 if (ret)
305 fscache_uncache_page(ci->fscache, page);
308 void ceph_invalidate_fscache_page(struct inode* inode, struct page *page)
310 struct ceph_inode_info *ci = ceph_inode(inode);
312 if (!PageFsCache(page))
313 return;
315 fscache_wait_on_page_write(ci->fscache, page);
316 fscache_uncache_page(ci->fscache, page);
319 void ceph_fscache_unregister_fs(struct ceph_fs_client* fsc)
321 if (fscache_cookie_valid(fsc->fscache)) {
322 struct ceph_fscache_entry *ent;
323 bool found = false;
325 mutex_lock(&ceph_fscache_lock);
326 list_for_each_entry(ent, &ceph_fscache_list, list) {
327 if (ent->fscache == fsc->fscache) {
328 list_del(&ent->list);
329 kfree(ent);
330 found = true;
331 break;
334 WARN_ON_ONCE(!found);
335 mutex_unlock(&ceph_fscache_lock);
337 __fscache_relinquish_cookie(fsc->fscache, NULL, false);
339 fsc->fscache = NULL;
343 * caller should hold CEPH_CAP_FILE_{RD,CACHE}
345 void ceph_fscache_revalidate_cookie(struct ceph_inode_info *ci)
347 if (cache_valid(ci))
348 return;
350 /* resue i_truncate_mutex. There should be no pending
351 * truncate while the caller holds CEPH_CAP_FILE_RD */
352 mutex_lock(&ci->i_truncate_mutex);
353 if (!cache_valid(ci)) {
354 if (fscache_check_consistency(ci->fscache, &ci->i_vino))
355 fscache_invalidate(ci->fscache);
356 spin_lock(&ci->i_ceph_lock);
357 ci->i_fscache_gen = ci->i_rdcache_gen;
358 spin_unlock(&ci->i_ceph_lock);
360 mutex_unlock(&ci->i_truncate_mutex);