2 * Device operations for the pnfs client.
5 * The Regents of the University of Michigan
8 * Dean Hildebrand <dhildebz@umich.edu>
9 * Garth Goodson <Garth.Goodson@netapp.com>
11 * Permission is granted to use, copy, create derivative works, and
12 * redistribute this software and such derivative works for any purpose,
13 * so long as the name of the University of Michigan is not used in
14 * any advertising or publicity pertaining to the use or distribution
15 * of this software without specific, written prior authorization. If
16 * the above copyright notice or any other identification of the
17 * University of Michigan is included in any copy of any portion of
18 * this software, then the disclaimer below must also be included.
20 * This software is provided as is, without representation or warranty
21 * of any kind either express or implied, including without limitation
22 * the implied warranties of merchantability, fitness for a particular
23 * purpose, or noninfringement. The Regents of the University of
24 * Michigan shall not be liable for any damages, including special,
25 * indirect, incidental, or consequential damages, with respect to any
26 * claim arising out of or in connection with the use of the software,
27 * even if it has been or is hereafter advised of the possibility of
31 #include <linux/export.h>
32 #include <linux/nfs_fs.h>
33 #include "nfs4session.h"
37 #include "nfs4trace.h"
39 #define NFSDBG_FACILITY NFSDBG_PNFS
42 * Device ID RCU cache. A device ID is unique per server and layout type.
44 #define NFS4_DEVICE_ID_HASH_BITS 5
45 #define NFS4_DEVICE_ID_HASH_SIZE (1 << NFS4_DEVICE_ID_HASH_BITS)
46 #define NFS4_DEVICE_ID_HASH_MASK (NFS4_DEVICE_ID_HASH_SIZE - 1)
49 static struct hlist_head nfs4_deviceid_cache
[NFS4_DEVICE_ID_HASH_SIZE
];
50 static DEFINE_SPINLOCK(nfs4_deviceid_lock
);
54 nfs4_print_deviceid(const struct nfs4_deviceid
*id
)
58 dprintk("%s: device id= [%x%x%x%x]\n", __func__
,
59 p
[0], p
[1], p
[2], p
[3]);
61 EXPORT_SYMBOL_GPL(nfs4_print_deviceid
);
65 nfs4_deviceid_hash(const struct nfs4_deviceid
*id
)
67 unsigned char *cptr
= (unsigned char *)id
->data
;
68 unsigned int nbytes
= NFS4_DEVICEID4_SIZE
;
75 return x
& NFS4_DEVICE_ID_HASH_MASK
;
78 static struct nfs4_deviceid_node
*
79 _lookup_deviceid(const struct pnfs_layoutdriver_type
*ld
,
80 const struct nfs_client
*clp
, const struct nfs4_deviceid
*id
,
83 struct nfs4_deviceid_node
*d
;
85 hlist_for_each_entry_rcu(d
, &nfs4_deviceid_cache
[hash
], node
)
86 if (d
->ld
== ld
&& d
->nfs_client
== clp
&&
87 !memcmp(&d
->deviceid
, id
, sizeof(*id
))) {
88 if (atomic_read(&d
->ref
))
96 static struct nfs4_deviceid_node
*
97 nfs4_get_device_info(struct nfs_server
*server
,
98 const struct nfs4_deviceid
*dev_id
,
99 const struct cred
*cred
, gfp_t gfp_flags
)
101 struct nfs4_deviceid_node
*d
= NULL
;
102 struct pnfs_device
*pdev
= NULL
;
103 struct page
**pages
= NULL
;
109 * Use the session max response size as the basis for setting
110 * GETDEVICEINFO's maxcount
112 max_resp_sz
= server
->nfs_client
->cl_session
->fc_attrs
.max_resp_sz
;
113 if (server
->pnfs_curr_ld
->max_deviceinfo_size
&&
114 server
->pnfs_curr_ld
->max_deviceinfo_size
< max_resp_sz
)
115 max_resp_sz
= server
->pnfs_curr_ld
->max_deviceinfo_size
;
116 max_pages
= nfs_page_array_len(0, max_resp_sz
);
117 dprintk("%s: server %p max_resp_sz %u max_pages %d\n",
118 __func__
, server
, max_resp_sz
, max_pages
);
120 pdev
= kzalloc(sizeof(*pdev
), gfp_flags
);
124 pages
= kcalloc(max_pages
, sizeof(struct page
*), gfp_flags
);
128 for (i
= 0; i
< max_pages
; i
++) {
129 pages
[i
] = alloc_page(gfp_flags
);
134 memcpy(&pdev
->dev_id
, dev_id
, sizeof(*dev_id
));
135 pdev
->layout_type
= server
->pnfs_curr_ld
->id
;
138 pdev
->pglen
= max_resp_sz
;
140 pdev
->maxcount
= max_resp_sz
- nfs41_maxgetdevinfo_overhead
;
142 rc
= nfs4_proc_getdeviceinfo(server
, pdev
, cred
);
143 dprintk("%s getdevice info returns %d\n", __func__
, rc
);
148 * Found new device, need to decode it and then add it to the
149 * list of known devices for this mountpoint.
151 d
= server
->pnfs_curr_ld
->alloc_deviceid_node(server
, pdev
,
153 if (d
&& pdev
->nocache
)
154 set_bit(NFS_DEVICEID_NOCACHE
, &d
->flags
);
157 for (i
= 0; i
< max_pages
; i
++)
158 __free_page(pages
[i
]);
162 dprintk("<-- %s d %p\n", __func__
, d
);
167 * Lookup a deviceid in cache and get a reference count on it if found
169 * @clp nfs_client associated with deviceid
170 * @id deviceid to look up
172 static struct nfs4_deviceid_node
*
173 __nfs4_find_get_deviceid(struct nfs_server
*server
,
174 const struct nfs4_deviceid
*id
, long hash
)
176 struct nfs4_deviceid_node
*d
;
179 d
= _lookup_deviceid(server
->pnfs_curr_ld
, server
->nfs_client
, id
,
181 if (d
!= NULL
&& !atomic_inc_not_zero(&d
->ref
))
187 struct nfs4_deviceid_node
*
188 nfs4_find_get_deviceid(struct nfs_server
*server
,
189 const struct nfs4_deviceid
*id
, const struct cred
*cred
,
192 long hash
= nfs4_deviceid_hash(id
);
193 struct nfs4_deviceid_node
*d
, *new;
195 d
= __nfs4_find_get_deviceid(server
, id
, hash
);
199 new = nfs4_get_device_info(server
, id
, cred
, gfp_mask
);
201 trace_nfs4_find_deviceid(server
, id
, -ENOENT
);
205 spin_lock(&nfs4_deviceid_lock
);
206 d
= __nfs4_find_get_deviceid(server
, id
, hash
);
208 spin_unlock(&nfs4_deviceid_lock
);
209 server
->pnfs_curr_ld
->free_deviceid_node(new);
211 atomic_inc(&new->ref
);
212 hlist_add_head_rcu(&new->node
, &nfs4_deviceid_cache
[hash
]);
213 spin_unlock(&nfs4_deviceid_lock
);
217 trace_nfs4_find_deviceid(server
, id
, 0);
220 EXPORT_SYMBOL_GPL(nfs4_find_get_deviceid
);
223 * Remove a deviceid from cache
225 * @clp nfs_client associated with deviceid
226 * @id the deviceid to unhash
228 * @ret the unhashed node, if found and dereferenced to zero, NULL otherwise.
231 nfs4_delete_deviceid(const struct pnfs_layoutdriver_type
*ld
,
232 const struct nfs_client
*clp
, const struct nfs4_deviceid
*id
)
234 struct nfs4_deviceid_node
*d
;
236 spin_lock(&nfs4_deviceid_lock
);
238 d
= _lookup_deviceid(ld
, clp
, id
, nfs4_deviceid_hash(id
));
241 spin_unlock(&nfs4_deviceid_lock
);
244 hlist_del_init_rcu(&d
->node
);
245 clear_bit(NFS_DEVICEID_NOCACHE
, &d
->flags
);
246 spin_unlock(&nfs4_deviceid_lock
);
248 /* balance the initial ref set in pnfs_insert_deviceid */
249 nfs4_put_deviceid_node(d
);
251 EXPORT_SYMBOL_GPL(nfs4_delete_deviceid
);
254 nfs4_init_deviceid_node(struct nfs4_deviceid_node
*d
, struct nfs_server
*server
,
255 const struct nfs4_deviceid
*id
)
257 INIT_HLIST_NODE(&d
->node
);
258 INIT_HLIST_NODE(&d
->tmpnode
);
259 d
->ld
= server
->pnfs_curr_ld
;
260 d
->nfs_client
= server
->nfs_client
;
263 atomic_set(&d
->ref
, 1);
265 EXPORT_SYMBOL_GPL(nfs4_init_deviceid_node
);
268 * Dereference a deviceid node and delete it when its reference count drops
271 * @d deviceid node to put
273 * return true iff the node was deleted
274 * Note that since the test for d->ref == 0 is sufficient to establish
275 * that the node is no longer hashed in the global device id cache.
278 nfs4_put_deviceid_node(struct nfs4_deviceid_node
*d
)
280 if (test_bit(NFS_DEVICEID_NOCACHE
, &d
->flags
)) {
281 if (atomic_add_unless(&d
->ref
, -1, 2))
283 nfs4_delete_deviceid(d
->ld
, d
->nfs_client
, &d
->deviceid
);
285 if (!atomic_dec_and_test(&d
->ref
))
287 trace_nfs4_deviceid_free(d
->nfs_client
, &d
->deviceid
);
288 d
->ld
->free_deviceid_node(d
);
291 EXPORT_SYMBOL_GPL(nfs4_put_deviceid_node
);
294 nfs4_mark_deviceid_available(struct nfs4_deviceid_node
*node
)
296 if (test_bit(NFS_DEVICEID_UNAVAILABLE
, &node
->flags
)) {
297 clear_bit(NFS_DEVICEID_UNAVAILABLE
, &node
->flags
);
298 smp_mb__after_atomic();
301 EXPORT_SYMBOL_GPL(nfs4_mark_deviceid_available
);
304 nfs4_mark_deviceid_unavailable(struct nfs4_deviceid_node
*node
)
306 node
->timestamp_unavailable
= jiffies
;
307 smp_mb__before_atomic();
308 set_bit(NFS_DEVICEID_UNAVAILABLE
, &node
->flags
);
309 smp_mb__after_atomic();
311 EXPORT_SYMBOL_GPL(nfs4_mark_deviceid_unavailable
);
314 nfs4_test_deviceid_unavailable(struct nfs4_deviceid_node
*node
)
316 if (test_bit(NFS_DEVICEID_UNAVAILABLE
, &node
->flags
)) {
317 unsigned long start
, end
;
320 start
= end
- PNFS_DEVICE_RETRY_TIMEOUT
;
321 if (time_in_range(node
->timestamp_unavailable
, start
, end
))
323 clear_bit(NFS_DEVICEID_UNAVAILABLE
, &node
->flags
);
324 smp_mb__after_atomic();
328 EXPORT_SYMBOL_GPL(nfs4_test_deviceid_unavailable
);
331 _deviceid_purge_client(const struct nfs_client
*clp
, long hash
)
333 struct nfs4_deviceid_node
*d
;
336 spin_lock(&nfs4_deviceid_lock
);
338 hlist_for_each_entry_rcu(d
, &nfs4_deviceid_cache
[hash
], node
)
339 if (d
->nfs_client
== clp
&& atomic_read(&d
->ref
)) {
340 hlist_del_init_rcu(&d
->node
);
341 hlist_add_head(&d
->tmpnode
, &tmp
);
342 clear_bit(NFS_DEVICEID_NOCACHE
, &d
->flags
);
345 spin_unlock(&nfs4_deviceid_lock
);
347 if (hlist_empty(&tmp
))
350 while (!hlist_empty(&tmp
)) {
351 d
= hlist_entry(tmp
.first
, struct nfs4_deviceid_node
, tmpnode
);
352 hlist_del(&d
->tmpnode
);
353 nfs4_put_deviceid_node(d
);
358 nfs4_deviceid_purge_client(const struct nfs_client
*clp
)
362 if (!(clp
->cl_exchange_flags
& EXCHGID4_FLAG_USE_PNFS_MDS
))
364 for (h
= 0; h
< NFS4_DEVICE_ID_HASH_SIZE
; h
++)
365 _deviceid_purge_client(clp
, h
);
369 * Stop use of all deviceids associated with an nfs_client
372 nfs4_deviceid_mark_client_invalid(struct nfs_client
*clp
)
374 struct nfs4_deviceid_node
*d
;
378 for (i
= 0; i
< NFS4_DEVICE_ID_HASH_SIZE
; i
++){
379 hlist_for_each_entry_rcu(d
, &nfs4_deviceid_cache
[i
], node
)
380 if (d
->nfs_client
== clp
)
381 set_bit(NFS_DEVICEID_INVALID
, &d
->flags
);