2 * Common NFS I/O operations for the pnfs file based
5 * Copyright (c) 2014, Primary Data, Inc. All rights reserved.
7 * Tom Haynes <loghyr@primarydata.com>
10 #include <linux/nfs_fs.h>
11 #include <linux/nfs_page.h>
12 #include <linux/sunrpc/addr.h>
13 #include <linux/module.h>
15 #include "nfs4session.h"
19 #define NFSDBG_FACILITY NFSDBG_PNFS
21 void pnfs_generic_rw_release(void *data
)
23 struct nfs_pgio_header
*hdr
= data
;
25 nfs_put_client(hdr
->ds_clp
);
26 hdr
->mds_ops
->rpc_release(data
);
28 EXPORT_SYMBOL_GPL(pnfs_generic_rw_release
);
30 /* Fake up some data that will cause nfs_commit_release to retry the writes. */
31 void pnfs_generic_prepare_to_resend_writes(struct nfs_commit_data
*data
)
33 struct nfs_page
*first
= nfs_list_entry(data
->pages
.next
);
35 data
->task
.tk_status
= 0;
36 memcpy(&data
->verf
.verifier
, &first
->wb_verf
,
37 sizeof(data
->verf
.verifier
));
38 data
->verf
.verifier
.data
[0]++; /* ensure verifier mismatch */
40 EXPORT_SYMBOL_GPL(pnfs_generic_prepare_to_resend_writes
);
42 void pnfs_generic_write_commit_done(struct rpc_task
*task
, void *data
)
44 struct nfs_commit_data
*wdata
= data
;
46 /* Note this may cause RPC to be resent */
47 wdata
->mds_ops
->rpc_call_done(task
, data
);
49 EXPORT_SYMBOL_GPL(pnfs_generic_write_commit_done
);
51 void pnfs_generic_commit_release(void *calldata
)
53 struct nfs_commit_data
*data
= calldata
;
55 data
->completion_ops
->completion(data
);
56 pnfs_put_lseg(data
->lseg
);
57 nfs_put_client(data
->ds_clp
);
58 nfs_commitdata_release(data
);
60 EXPORT_SYMBOL_GPL(pnfs_generic_commit_release
);
62 /* The generic layer is about to remove the req from the commit list.
63 * If this will make the bucket empty, it will need to put the lseg reference.
64 * Note this must be called holding the inode (/cinfo) lock
67 pnfs_generic_clear_request_commit(struct nfs_page
*req
,
68 struct nfs_commit_info
*cinfo
)
70 struct pnfs_layout_segment
*freeme
= NULL
;
72 if (!test_and_clear_bit(PG_COMMIT_TO_DS
, &req
->wb_flags
))
74 cinfo
->ds
->nwritten
--;
75 if (list_is_singular(&req
->wb_list
)) {
76 struct pnfs_commit_bucket
*bucket
;
78 bucket
= list_first_entry(&req
->wb_list
,
79 struct pnfs_commit_bucket
,
81 freeme
= bucket
->wlseg
;
85 nfs_request_remove_commit_list(req
, cinfo
);
86 pnfs_put_lseg_locked(freeme
);
88 EXPORT_SYMBOL_GPL(pnfs_generic_clear_request_commit
);
91 pnfs_generic_transfer_commit_list(struct list_head
*src
, struct list_head
*dst
,
92 struct nfs_commit_info
*cinfo
, int max
)
94 struct nfs_page
*req
, *tmp
;
97 list_for_each_entry_safe(req
, tmp
, src
, wb_list
) {
98 if (!nfs_lock_request(req
))
100 kref_get(&req
->wb_kref
);
101 if (cond_resched_lock(cinfo
->lock
))
102 list_safe_reset_next(req
, tmp
, wb_list
);
103 nfs_request_remove_commit_list(req
, cinfo
);
104 clear_bit(PG_COMMIT_TO_DS
, &req
->wb_flags
);
105 nfs_list_add_request(req
, dst
);
107 if ((ret
== max
) && !cinfo
->dreq
)
114 pnfs_generic_scan_ds_commit_list(struct pnfs_commit_bucket
*bucket
,
115 struct nfs_commit_info
*cinfo
,
118 struct list_head
*src
= &bucket
->written
;
119 struct list_head
*dst
= &bucket
->committing
;
122 lockdep_assert_held(cinfo
->lock
);
123 ret
= pnfs_generic_transfer_commit_list(src
, dst
, cinfo
, max
);
125 cinfo
->ds
->nwritten
-= ret
;
126 cinfo
->ds
->ncommitting
+= ret
;
127 bucket
->clseg
= bucket
->wlseg
;
129 bucket
->wlseg
= NULL
;
131 pnfs_get_lseg(bucket
->clseg
);
136 /* Move reqs from written to committing lists, returning count
139 int pnfs_generic_scan_commit_lists(struct nfs_commit_info
*cinfo
,
144 lockdep_assert_held(cinfo
->lock
);
145 for (i
= 0; i
< cinfo
->ds
->nbuckets
&& max
!= 0; i
++) {
146 cnt
= pnfs_generic_scan_ds_commit_list(&cinfo
->ds
->buckets
[i
],
153 EXPORT_SYMBOL_GPL(pnfs_generic_scan_commit_lists
);
155 /* Pull everything off the committing lists and dump into @dst. */
156 void pnfs_generic_recover_commit_reqs(struct list_head
*dst
,
157 struct nfs_commit_info
*cinfo
)
159 struct pnfs_commit_bucket
*b
;
160 struct pnfs_layout_segment
*freeme
;
163 lockdep_assert_held(cinfo
->lock
);
165 for (i
= 0, b
= cinfo
->ds
->buckets
; i
< cinfo
->ds
->nbuckets
; i
++, b
++) {
166 if (pnfs_generic_transfer_commit_list(&b
->written
, dst
,
170 spin_unlock(cinfo
->lock
);
171 pnfs_put_lseg(freeme
);
172 spin_lock(cinfo
->lock
);
176 cinfo
->ds
->nwritten
= 0;
178 EXPORT_SYMBOL_GPL(pnfs_generic_recover_commit_reqs
);
180 static void pnfs_generic_retry_commit(struct nfs_commit_info
*cinfo
, int idx
)
182 struct pnfs_ds_commit_info
*fl_cinfo
= cinfo
->ds
;
183 struct pnfs_commit_bucket
*bucket
;
184 struct pnfs_layout_segment
*freeme
;
187 for (i
= idx
; i
< fl_cinfo
->nbuckets
; i
++) {
188 bucket
= &fl_cinfo
->buckets
[i
];
189 if (list_empty(&bucket
->committing
))
191 nfs_retry_commit(&bucket
->committing
, bucket
->clseg
, cinfo
, i
);
192 spin_lock(cinfo
->lock
);
193 freeme
= bucket
->clseg
;
194 bucket
->clseg
= NULL
;
195 spin_unlock(cinfo
->lock
);
196 pnfs_put_lseg(freeme
);
201 pnfs_generic_alloc_ds_commits(struct nfs_commit_info
*cinfo
,
202 struct list_head
*list
)
204 struct pnfs_ds_commit_info
*fl_cinfo
;
205 struct pnfs_commit_bucket
*bucket
;
206 struct nfs_commit_data
*data
;
208 unsigned int nreq
= 0;
210 fl_cinfo
= cinfo
->ds
;
211 bucket
= fl_cinfo
->buckets
;
212 for (i
= 0; i
< fl_cinfo
->nbuckets
; i
++, bucket
++) {
213 if (list_empty(&bucket
->committing
))
215 data
= nfs_commitdata_alloc();
218 data
->ds_commit_index
= i
;
219 spin_lock(cinfo
->lock
);
220 data
->lseg
= bucket
->clseg
;
221 bucket
->clseg
= NULL
;
222 spin_unlock(cinfo
->lock
);
223 list_add(&data
->pages
, list
);
227 /* Clean up on error */
228 pnfs_generic_retry_commit(cinfo
, i
);
232 /* This follows nfs_commit_list pretty closely */
234 pnfs_generic_commit_pagelist(struct inode
*inode
, struct list_head
*mds_pages
,
235 int how
, struct nfs_commit_info
*cinfo
,
236 int (*initiate_commit
)(struct nfs_commit_data
*data
,
239 struct nfs_commit_data
*data
, *tmp
;
241 unsigned int nreq
= 0;
243 if (!list_empty(mds_pages
)) {
244 data
= nfs_commitdata_alloc();
247 list_add(&data
->pages
, &list
);
250 nfs_retry_commit(mds_pages
, NULL
, cinfo
, 0);
251 pnfs_generic_retry_commit(cinfo
, 0);
252 cinfo
->completion_ops
->error_cleanup(NFS_I(inode
));
257 nreq
+= pnfs_generic_alloc_ds_commits(cinfo
, &list
);
260 cinfo
->completion_ops
->error_cleanup(NFS_I(inode
));
264 atomic_add(nreq
, &cinfo
->mds
->rpcs_out
);
266 list_for_each_entry_safe(data
, tmp
, &list
, pages
) {
267 list_del_init(&data
->pages
);
269 nfs_init_commit(data
, mds_pages
, NULL
, cinfo
);
270 nfs_initiate_commit(NFS_CLIENT(inode
), data
,
271 NFS_PROTO(data
->inode
),
272 data
->mds_ops
, how
, 0);
274 struct pnfs_commit_bucket
*buckets
;
276 buckets
= cinfo
->ds
->buckets
;
277 nfs_init_commit(data
,
278 &buckets
[data
->ds_commit_index
].committing
,
281 initiate_commit(data
, how
);
285 cinfo
->ds
->ncommitting
= 0;
286 return PNFS_ATTEMPTED
;
288 EXPORT_SYMBOL_GPL(pnfs_generic_commit_pagelist
);
293 * Data servers can be mapped to different device ids.
294 * nfs4_pnfs_ds reference counting
295 * - set to 1 on allocation
296 * - incremented when a device id maps a data server already in the cache.
297 * - decremented when deviceid is removed from the cache.
299 static DEFINE_SPINLOCK(nfs4_ds_cache_lock
);
300 static LIST_HEAD(nfs4_data_server_cache
);
304 print_ds(struct nfs4_pnfs_ds
*ds
)
307 printk(KERN_WARNING
"%s NULL device\n", __func__
);
310 printk(KERN_WARNING
" ds %s\n"
313 " cl_exchange_flags %x\n",
315 atomic_read(&ds
->ds_count
), ds
->ds_clp
,
316 ds
->ds_clp
? ds
->ds_clp
->cl_exchange_flags
: 0);
320 same_sockaddr(struct sockaddr
*addr1
, struct sockaddr
*addr2
)
322 struct sockaddr_in
*a
, *b
;
323 struct sockaddr_in6
*a6
, *b6
;
325 if (addr1
->sa_family
!= addr2
->sa_family
)
328 switch (addr1
->sa_family
) {
330 a
= (struct sockaddr_in
*)addr1
;
331 b
= (struct sockaddr_in
*)addr2
;
333 if (a
->sin_addr
.s_addr
== b
->sin_addr
.s_addr
&&
334 a
->sin_port
== b
->sin_port
)
339 a6
= (struct sockaddr_in6
*)addr1
;
340 b6
= (struct sockaddr_in6
*)addr2
;
342 /* LINKLOCAL addresses must have matching scope_id */
343 if (ipv6_addr_src_scope(&a6
->sin6_addr
) ==
344 IPV6_ADDR_SCOPE_LINKLOCAL
&&
345 a6
->sin6_scope_id
!= b6
->sin6_scope_id
)
348 if (ipv6_addr_equal(&a6
->sin6_addr
, &b6
->sin6_addr
) &&
349 a6
->sin6_port
== b6
->sin6_port
)
354 dprintk("%s: unhandled address family: %u\n",
355 __func__
, addr1
->sa_family
);
363 _same_data_server_addrs_locked(const struct list_head
*dsaddrs1
,
364 const struct list_head
*dsaddrs2
)
366 struct nfs4_pnfs_ds_addr
*da1
, *da2
;
368 /* step through both lists, comparing as we go */
369 for (da1
= list_first_entry(dsaddrs1
, typeof(*da1
), da_node
),
370 da2
= list_first_entry(dsaddrs2
, typeof(*da2
), da_node
);
371 da1
!= NULL
&& da2
!= NULL
;
372 da1
= list_entry(da1
->da_node
.next
, typeof(*da1
), da_node
),
373 da2
= list_entry(da2
->da_node
.next
, typeof(*da2
), da_node
)) {
374 if (!same_sockaddr((struct sockaddr
*)&da1
->da_addr
,
375 (struct sockaddr
*)&da2
->da_addr
))
378 if (da1
== NULL
&& da2
== NULL
)
385 * Lookup DS by addresses. nfs4_ds_cache_lock is held
387 static struct nfs4_pnfs_ds
*
388 _data_server_lookup_locked(const struct list_head
*dsaddrs
)
390 struct nfs4_pnfs_ds
*ds
;
392 list_for_each_entry(ds
, &nfs4_data_server_cache
, ds_node
)
393 if (_same_data_server_addrs_locked(&ds
->ds_addrs
, dsaddrs
))
398 static void destroy_ds(struct nfs4_pnfs_ds
*ds
)
400 struct nfs4_pnfs_ds_addr
*da
;
402 dprintk("--> %s\n", __func__
);
406 nfs_put_client(ds
->ds_clp
);
408 while (!list_empty(&ds
->ds_addrs
)) {
409 da
= list_first_entry(&ds
->ds_addrs
,
410 struct nfs4_pnfs_ds_addr
,
412 list_del_init(&da
->da_node
);
413 kfree(da
->da_remotestr
);
417 kfree(ds
->ds_remotestr
);
421 void nfs4_pnfs_ds_put(struct nfs4_pnfs_ds
*ds
)
423 if (atomic_dec_and_lock(&ds
->ds_count
,
424 &nfs4_ds_cache_lock
)) {
425 list_del_init(&ds
->ds_node
);
426 spin_unlock(&nfs4_ds_cache_lock
);
430 EXPORT_SYMBOL_GPL(nfs4_pnfs_ds_put
);
433 * Create a string with a human readable address and port to avoid
434 * complicated setup around many dprinks.
437 nfs4_pnfs_remotestr(struct list_head
*dsaddrs
, gfp_t gfp_flags
)
439 struct nfs4_pnfs_ds_addr
*da
;
444 len
= 3; /* '{', '}' and eol */
445 list_for_each_entry(da
, dsaddrs
, da_node
) {
446 len
+= strlen(da
->da_remotestr
) + 1; /* string plus comma */
449 remotestr
= kzalloc(len
, gfp_flags
);
456 list_for_each_entry(da
, dsaddrs
, da_node
) {
457 size_t ll
= strlen(da
->da_remotestr
);
462 memcpy(p
, da
->da_remotestr
, ll
);
482 * Given a list of multipath struct nfs4_pnfs_ds_addr, add it to ds cache if
483 * uncached and return cached struct nfs4_pnfs_ds.
485 struct nfs4_pnfs_ds
*
486 nfs4_pnfs_ds_add(struct list_head
*dsaddrs
, gfp_t gfp_flags
)
488 struct nfs4_pnfs_ds
*tmp_ds
, *ds
= NULL
;
491 if (list_empty(dsaddrs
)) {
492 dprintk("%s: no addresses defined\n", __func__
);
496 ds
= kzalloc(sizeof(*ds
), gfp_flags
);
500 /* this is only used for debugging, so it's ok if its NULL */
501 remotestr
= nfs4_pnfs_remotestr(dsaddrs
, gfp_flags
);
503 spin_lock(&nfs4_ds_cache_lock
);
504 tmp_ds
= _data_server_lookup_locked(dsaddrs
);
505 if (tmp_ds
== NULL
) {
506 INIT_LIST_HEAD(&ds
->ds_addrs
);
507 list_splice_init(dsaddrs
, &ds
->ds_addrs
);
508 ds
->ds_remotestr
= remotestr
;
509 atomic_set(&ds
->ds_count
, 1);
510 INIT_LIST_HEAD(&ds
->ds_node
);
512 list_add(&ds
->ds_node
, &nfs4_data_server_cache
);
513 dprintk("%s add new data server %s\n", __func__
,
518 atomic_inc(&tmp_ds
->ds_count
);
519 dprintk("%s data server %s found, inc'ed ds_count to %d\n",
520 __func__
, tmp_ds
->ds_remotestr
,
521 atomic_read(&tmp_ds
->ds_count
));
524 spin_unlock(&nfs4_ds_cache_lock
);
528 EXPORT_SYMBOL_GPL(nfs4_pnfs_ds_add
);
530 static void nfs4_wait_ds_connect(struct nfs4_pnfs_ds
*ds
)
533 wait_on_bit(&ds
->ds_state
, NFS4DS_CONNECTING
,
537 static void nfs4_clear_ds_conn_bit(struct nfs4_pnfs_ds
*ds
)
539 smp_mb__before_atomic();
540 clear_bit(NFS4DS_CONNECTING
, &ds
->ds_state
);
541 smp_mb__after_atomic();
542 wake_up_bit(&ds
->ds_state
, NFS4DS_CONNECTING
);
545 static struct nfs_client
*(*get_v3_ds_connect
)(
546 struct nfs_client
*mds_clp
,
547 const struct sockaddr
*ds_addr
,
550 unsigned int ds_timeo
,
551 unsigned int ds_retrans
,
552 rpc_authflavor_t au_flavor
);
554 static bool load_v3_ds_connect(void)
556 if (!get_v3_ds_connect
) {
557 get_v3_ds_connect
= symbol_request(nfs3_set_ds_client
);
558 WARN_ON_ONCE(!get_v3_ds_connect
);
561 return(get_v3_ds_connect
!= NULL
);
564 void __exit
nfs4_pnfs_v3_ds_connect_unload(void)
566 if (get_v3_ds_connect
) {
567 symbol_put(nfs3_set_ds_client
);
568 get_v3_ds_connect
= NULL
;
571 EXPORT_SYMBOL_GPL(nfs4_pnfs_v3_ds_connect_unload
);
573 static int _nfs4_pnfs_v3_ds_connect(struct nfs_server
*mds_srv
,
574 struct nfs4_pnfs_ds
*ds
,
576 unsigned int retrans
,
577 rpc_authflavor_t au_flavor
)
579 struct nfs_client
*clp
= ERR_PTR(-EIO
);
580 struct nfs4_pnfs_ds_addr
*da
;
583 dprintk("--> %s DS %s au_flavor %d\n", __func__
,
584 ds
->ds_remotestr
, au_flavor
);
586 if (!load_v3_ds_connect())
589 list_for_each_entry(da
, &ds
->ds_addrs
, da_node
) {
590 dprintk("%s: DS %s: trying address %s\n",
591 __func__
, ds
->ds_remotestr
, da
->da_remotestr
);
593 clp
= get_v3_ds_connect(mds_srv
->nfs_client
,
594 (struct sockaddr
*)&da
->da_addr
,
595 da
->da_addrlen
, IPPROTO_TCP
,
596 timeo
, retrans
, au_flavor
);
602 status
= PTR_ERR(clp
);
608 dprintk("%s [new] addr: %s\n", __func__
, ds
->ds_remotestr
);
613 static int _nfs4_pnfs_v4_ds_connect(struct nfs_server
*mds_srv
,
614 struct nfs4_pnfs_ds
*ds
,
616 unsigned int retrans
,
618 rpc_authflavor_t au_flavor
)
620 struct nfs_client
*clp
= ERR_PTR(-EIO
);
621 struct nfs4_pnfs_ds_addr
*da
;
624 dprintk("--> %s DS %s au_flavor %d\n", __func__
, ds
->ds_remotestr
,
627 list_for_each_entry(da
, &ds
->ds_addrs
, da_node
) {
628 dprintk("%s: DS %s: trying address %s\n",
629 __func__
, ds
->ds_remotestr
, da
->da_remotestr
);
631 clp
= nfs4_set_ds_client(mds_srv
->nfs_client
,
632 (struct sockaddr
*)&da
->da_addr
,
633 da
->da_addrlen
, IPPROTO_TCP
,
634 timeo
, retrans
, minor_version
,
641 status
= PTR_ERR(clp
);
645 status
= nfs4_init_ds_session(clp
, mds_srv
->nfs_client
->cl_lease_time
);
651 dprintk("%s [new] addr: %s\n", __func__
, ds
->ds_remotestr
);
660 * Create an rpc connection to the nfs4_pnfs_ds data server.
661 * Currently only supports IPv4 and IPv6 addresses.
662 * If connection fails, make devid unavailable.
664 void nfs4_pnfs_ds_connect(struct nfs_server
*mds_srv
, struct nfs4_pnfs_ds
*ds
,
665 struct nfs4_deviceid_node
*devid
, unsigned int timeo
,
666 unsigned int retrans
, u32 version
,
667 u32 minor_version
, rpc_authflavor_t au_flavor
)
669 if (test_and_set_bit(NFS4DS_CONNECTING
, &ds
->ds_state
) == 0) {
673 err
= _nfs4_pnfs_v3_ds_connect(mds_srv
, ds
, timeo
,
675 } else if (version
== 4) {
676 err
= _nfs4_pnfs_v4_ds_connect(mds_srv
, ds
, timeo
,
677 retrans
, minor_version
,
680 dprintk("%s: unsupported DS version %d\n", __func__
,
682 err
= -EPROTONOSUPPORT
;
686 nfs4_mark_deviceid_unavailable(devid
);
687 nfs4_clear_ds_conn_bit(ds
);
689 nfs4_wait_ds_connect(ds
);
692 EXPORT_SYMBOL_GPL(nfs4_pnfs_ds_connect
);
695 * Currently only supports ipv4, ipv6 and one multi-path address.
697 struct nfs4_pnfs_ds_addr
*
698 nfs4_decode_mp_ds_addr(struct net
*net
, struct xdr_stream
*xdr
, gfp_t gfp_flags
)
700 struct nfs4_pnfs_ds_addr
*da
= NULL
;
706 char *netid
, *match_netid
;
707 size_t len
, match_netid_len
;
713 p
= xdr_inline_decode(xdr
, 4);
716 nlen
= be32_to_cpup(p
++);
718 p
= xdr_inline_decode(xdr
, nlen
);
722 netid
= kmalloc(nlen
+1, gfp_flags
);
723 if (unlikely(!netid
))
727 memcpy(netid
, p
, nlen
);
729 /* r_addr: ip/ip6addr with port in dec octets - see RFC 5665 */
730 p
= xdr_inline_decode(xdr
, 4);
733 rlen
= be32_to_cpup(p
);
735 p
= xdr_inline_decode(xdr
, rlen
);
739 /* port is ".ABC.DEF", 8 chars max */
740 if (rlen
> INET6_ADDRSTRLEN
+ IPV6_SCOPE_ID_LEN
+ 8) {
741 dprintk("%s: Invalid address, length %d\n", __func__
,
745 buf
= kmalloc(rlen
+ 1, gfp_flags
);
747 dprintk("%s: Not enough memory\n", __func__
);
751 memcpy(buf
, p
, rlen
);
753 /* replace port '.' with '-' */
754 portstr
= strrchr(buf
, '.');
756 dprintk("%s: Failed finding expected dot in port\n",
762 /* find '.' between address and port */
763 portstr
= strrchr(buf
, '.');
765 dprintk("%s: Failed finding expected dot between address and "
771 da
= kzalloc(sizeof(*da
), gfp_flags
);
775 INIT_LIST_HEAD(&da
->da_node
);
777 if (!rpc_pton(net
, buf
, portstr
-buf
, (struct sockaddr
*)&da
->da_addr
,
778 sizeof(da
->da_addr
))) {
779 dprintk("%s: error parsing address %s\n", __func__
, buf
);
784 sscanf(portstr
, "%d-%d", &tmp
[0], &tmp
[1]);
785 port
= htons((tmp
[0] << 8) | (tmp
[1]));
787 switch (da
->da_addr
.ss_family
) {
789 ((struct sockaddr_in
*)&da
->da_addr
)->sin_port
= port
;
790 da
->da_addrlen
= sizeof(struct sockaddr_in
);
796 ((struct sockaddr_in6
*)&da
->da_addr
)->sin6_port
= port
;
797 da
->da_addrlen
= sizeof(struct sockaddr_in6
);
798 match_netid
= "tcp6";
805 dprintk("%s: unsupported address family: %u\n",
806 __func__
, da
->da_addr
.ss_family
);
810 if (nlen
!= match_netid_len
|| strncmp(netid
, match_netid
, nlen
)) {
811 dprintk("%s: ERROR: r_netid \"%s\" != \"%s\"\n",
812 __func__
, netid
, match_netid
);
816 /* save human readable address */
817 len
= strlen(startsep
) + strlen(buf
) + strlen(endsep
) + 7;
818 da
->da_remotestr
= kzalloc(len
, gfp_flags
);
820 /* NULL is ok, only used for dprintk */
821 if (da
->da_remotestr
)
822 snprintf(da
->da_remotestr
, len
, "%s%s%s:%u", startsep
,
823 buf
, endsep
, ntohs(port
));
825 dprintk("%s: Parsed DS addr %s\n", __func__
, da
->da_remotestr
);
833 dprintk("%s: Error parsing DS addr: %s\n", __func__
, buf
);
840 EXPORT_SYMBOL_GPL(nfs4_decode_mp_ds_addr
);
843 pnfs_layout_mark_request_commit(struct nfs_page
*req
,
844 struct pnfs_layout_segment
*lseg
,
845 struct nfs_commit_info
*cinfo
,
848 struct list_head
*list
;
849 struct pnfs_commit_bucket
*buckets
;
851 spin_lock(cinfo
->lock
);
852 buckets
= cinfo
->ds
->buckets
;
853 list
= &buckets
[ds_commit_idx
].written
;
854 if (list_empty(list
)) {
855 /* Non-empty buckets hold a reference on the lseg. That ref
856 * is normally transferred to the COMMIT call and released
857 * there. It could also be released if the last req is pulled
858 * off due to a rewrite, in which case it will be done in
859 * pnfs_common_clear_request_commit
861 WARN_ON_ONCE(buckets
[ds_commit_idx
].wlseg
!= NULL
);
862 buckets
[ds_commit_idx
].wlseg
= pnfs_get_lseg(lseg
);
864 set_bit(PG_COMMIT_TO_DS
, &req
->wb_flags
);
865 cinfo
->ds
->nwritten
++;
866 spin_unlock(cinfo
->lock
);
868 nfs_request_add_commit_list(req
, list
, cinfo
);
870 EXPORT_SYMBOL_GPL(pnfs_layout_mark_request_commit
);