2 Unix SMB/CIFS implementation.
3 byte range locking code
4 Updated to handle range splits/merges.
6 Copyright (C) Andrew Tridgell 1992-2000
7 Copyright (C) Jeremy Allison 1992-2000
9 This program is free software; you can redistribute it and/or modify
10 it under the terms of the GNU General Public License as published by
11 the Free Software Foundation; either version 3 of the License, or
12 (at your option) any later version.
14 This program is distributed in the hope that it will be useful,
15 but WITHOUT ANY WARRANTY; without even the implied warranty of
16 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 GNU General Public License for more details.
19 You should have received a copy of the GNU General Public License
20 along with this program. If not, see <http://www.gnu.org/licenses/>.
23 /* This module implements a tdb based byte range locking service,
24 replacing the fcntl() based byte range locking previously
25 used. This allows us to provide the same semantics as NT */
28 #include "system/filesys.h"
29 #include "lib/util/server_id.h"
30 #include "locking/proto.h"
31 #include "smbd/globals.h"
32 #include "dbwrap/dbwrap.h"
33 #include "dbwrap/dbwrap_open.h"
39 #define DBGC_CLASS DBGC_LOCKING
41 /* The open brlock.tdb database. */
43 static struct db_context
*brlock_db
;
45 struct byte_range_lock
{
46 struct files_struct
*fsp
;
47 TALLOC_CTX
*req_mem_ctx
;
48 const struct GUID
*req_guid
;
49 unsigned int num_locks
;
51 struct lock_struct
*lock_data
;
52 struct db_record
*record
;
55 /****************************************************************************
56 Debug info at level 10 for lock struct.
57 ****************************************************************************/
59 static void print_lock_struct(unsigned int i
, const struct lock_struct
*pls
)
61 struct server_id_buf tmp
;
63 DBG_DEBUG("[%u]: smblctx = %"PRIu64
", tid = %"PRIu32
", pid = %s, "
64 "start = %"PRIu64
", size = %"PRIu64
", fnum = %"PRIu64
", "
69 server_id_str_buf(pls
->context
.pid
, &tmp
),
73 lock_type_name(pls
->lock_type
),
74 lock_flav_name(pls
->lock_flav
));
77 unsigned int brl_num_locks(const struct byte_range_lock
*brl
)
79 return brl
->num_locks
;
82 struct files_struct
*brl_fsp(struct byte_range_lock
*brl
)
87 TALLOC_CTX
*brl_req_mem_ctx(const struct byte_range_lock
*brl
)
89 if (brl
->req_mem_ctx
== NULL
) {
90 return talloc_get_type_abort(brl
, struct byte_range_lock
);
93 return brl
->req_mem_ctx
;
96 const struct GUID
*brl_req_guid(const struct byte_range_lock
*brl
)
98 if (brl
->req_guid
== NULL
) {
99 static const struct GUID brl_zero_req_guid
;
100 return &brl_zero_req_guid
;
103 return brl
->req_guid
;
106 /****************************************************************************
107 See if two locking contexts are equal.
108 ****************************************************************************/
110 static bool brl_same_context(const struct lock_context
*ctx1
,
111 const struct lock_context
*ctx2
)
113 return (server_id_equal(&ctx1
->pid
, &ctx2
->pid
) &&
114 (ctx1
->smblctx
== ctx2
->smblctx
) &&
115 (ctx1
->tid
== ctx2
->tid
));
118 bool byte_range_valid(uint64_t ofs
, uint64_t len
)
120 uint64_t max_len
= UINT64_MAX
- ofs
;
121 uint64_t effective_len
;
124 * [MS-FSA] specifies this:
126 * If (((FileOffset + Length - 1) < FileOffset) && Length != 0) {
127 * return STATUS_INVALID_LOCK_RANGE
130 * We avoid integer wrapping and calculate
131 * max and effective len instead.
138 effective_len
= len
- 1;
139 if (effective_len
<= max_len
) {
146 bool byte_range_overlap(uint64_t ofs1
,
156 * This is based on [MS-FSA] 2.1.4.10
157 * Algorithm for Determining If a Range Access
158 * Conflicts with Byte-Range Locks
162 * The {0, 0} range doesn't conflict with any byte-range lock
164 if (ofs1
== 0 && len1
== 0) {
167 if (ofs2
== 0 && len2
== 0) {
172 * The caller should have checked that the ranges are
173 * valid. But currently we gracefully handle
174 * the overflow of a read/write check.
176 valid
= byte_range_valid(ofs1
, len1
);
178 last1
= ofs1
+ len1
- 1;
182 valid
= byte_range_valid(ofs2
, len2
);
184 last2
= ofs2
+ len2
- 1;
190 * If one range starts after the last
191 * byte of the other range there's
204 /****************************************************************************
205 See if lck1 and lck2 overlap.
206 ****************************************************************************/
208 static bool brl_overlap(const struct lock_struct
*lck1
,
209 const struct lock_struct
*lck2
)
211 return byte_range_overlap(lck1
->start
,
217 /****************************************************************************
218 See if lock2 can be added when lock1 is in place.
219 ****************************************************************************/
221 static bool brl_conflict(const struct lock_struct
*lck1
,
222 const struct lock_struct
*lck2
)
224 /* Read locks never conflict. */
225 if (lck1
->lock_type
== READ_LOCK
&& lck2
->lock_type
== READ_LOCK
) {
229 /* A READ lock can stack on top of a WRITE lock if they have the same
231 if (lck1
->lock_type
== WRITE_LOCK
&& lck2
->lock_type
== READ_LOCK
&&
232 brl_same_context(&lck1
->context
, &lck2
->context
) &&
233 lck1
->fnum
== lck2
->fnum
) {
237 return brl_overlap(lck1
, lck2
);
240 /****************************************************************************
241 See if lock2 can be added when lock1 is in place - when both locks are POSIX
242 flavour. POSIX locks ignore fnum - they only care about dev/ino which we
244 ****************************************************************************/
246 static bool brl_conflict_posix(const struct lock_struct
*lck1
,
247 const struct lock_struct
*lck2
)
249 #if defined(DEVELOPER)
250 SMB_ASSERT(lck1
->lock_flav
== POSIX_LOCK
);
251 SMB_ASSERT(lck2
->lock_flav
== POSIX_LOCK
);
254 /* Read locks never conflict. */
255 if (lck1
->lock_type
== READ_LOCK
&& lck2
->lock_type
== READ_LOCK
) {
259 /* Locks on the same context don't conflict. Ignore fnum. */
260 if (brl_same_context(&lck1
->context
, &lck2
->context
)) {
264 /* One is read, the other write, or the context is different,
266 return brl_overlap(lck1
, lck2
);
269 /****************************************************************************
270 Check to see if this lock conflicts, but ignore our own locks on the
271 same fnum only. This is the read/write lock check code path.
272 This is never used in the POSIX lock case.
273 ****************************************************************************/
275 static bool brl_conflict_other(const struct lock_struct
*lock
,
276 const struct lock_struct
*rw_probe
)
278 if (lock
->lock_type
== READ_LOCK
&& rw_probe
->lock_type
== READ_LOCK
) {
282 if (lock
->lock_flav
== POSIX_LOCK
&&
283 rw_probe
->lock_flav
== POSIX_LOCK
) {
285 * POSIX flavour locks never conflict here - this is only called
286 * in the read/write path.
291 if (!brl_overlap(lock
, rw_probe
)) {
293 * I/O can only conflict when overlapping a lock, thus let it
299 if (!brl_same_context(&lock
->context
, &rw_probe
->context
)) {
301 * Different process, conflict
306 if (lock
->fnum
!= rw_probe
->fnum
) {
308 * Different file handle, conflict
313 if ((lock
->lock_type
== READ_LOCK
) &&
314 (rw_probe
->lock_type
== WRITE_LOCK
)) {
316 * Incoming WRITE locks conflict with existing READ locks even
317 * if the context is the same. JRA. See LOCKTEST7 in
324 * I/O request compatible with existing lock, let it pass without
331 /****************************************************************************
332 Open up the brlock.tdb database.
333 ****************************************************************************/
335 void brl_init(bool read_only
)
344 tdb_flags
= SMBD_VOLATILE_TDB_FLAGS
| TDB_SEQNUM
;
346 db_path
= lock_path(talloc_tos(), "brlock.tdb");
347 if (db_path
== NULL
) {
348 DEBUG(0, ("out of memory!\n"));
352 brlock_db
= db_open(NULL
, db_path
,
353 SMBD_VOLATILE_TDB_HASH_SIZE
, tdb_flags
,
354 read_only
?O_RDONLY
:(O_RDWR
|O_CREAT
), 0644,
355 DBWRAP_LOCK_ORDER_2
, DBWRAP_FLAG_NONE
);
357 DEBUG(0,("Failed to open byte range locking database %s\n",
359 TALLOC_FREE(db_path
);
362 TALLOC_FREE(db_path
);
365 /****************************************************************************
366 Close down the brlock.tdb database.
367 ****************************************************************************/
369 void brl_shutdown(void)
371 TALLOC_FREE(brlock_db
);
374 /****************************************************************************
375 Lock a range of bytes - Windows lock semantics.
376 ****************************************************************************/
378 NTSTATUS
brl_lock_windows_default(struct byte_range_lock
*br_lck
,
379 struct lock_struct
*plock
)
382 files_struct
*fsp
= br_lck
->fsp
;
383 struct lock_struct
*locks
= br_lck
->lock_data
;
387 SMB_ASSERT(plock
->lock_type
!= UNLOCK_LOCK
);
389 valid
= byte_range_valid(plock
->start
, plock
->size
);
391 return NT_STATUS_INVALID_LOCK_RANGE
;
394 for (i
=0; i
< br_lck
->num_locks
; i
++) {
395 /* Do any Windows or POSIX locks conflict ? */
396 if (brl_conflict(&locks
[i
], plock
)) {
397 if (!serverid_exists(&locks
[i
].context
.pid
)) {
398 locks
[i
].context
.pid
.pid
= 0;
399 br_lck
->modified
= true;
402 /* Remember who blocked us. */
403 plock
->context
.smblctx
= locks
[i
].context
.smblctx
;
404 return NT_STATUS_LOCK_NOT_GRANTED
;
408 contend_level2_oplocks_begin(fsp
, LEVEL2_CONTEND_WINDOWS_BRL
);
410 /* We can get the Windows lock, now see if it needs to
411 be mapped into a lower level POSIX one, and if so can
414 if (lp_posix_locking(fsp
->conn
->params
)) {
416 if (!set_posix_lock_windows_flavour(fsp
,
425 /* We don't know who blocked us. */
426 plock
->context
.smblctx
= 0xFFFFFFFFFFFFFFFFLL
;
428 if (errno_ret
== EACCES
|| errno_ret
== EAGAIN
) {
429 status
= NT_STATUS_LOCK_NOT_GRANTED
;
432 status
= map_nt_error_from_unix(errno
);
438 /* no conflicts - add it to the list of locks */
439 locks
= talloc_realloc(br_lck
, locks
, struct lock_struct
,
440 (br_lck
->num_locks
+ 1));
442 status
= NT_STATUS_NO_MEMORY
;
446 memcpy(&locks
[br_lck
->num_locks
], plock
, sizeof(struct lock_struct
));
447 br_lck
->num_locks
+= 1;
448 br_lck
->lock_data
= locks
;
449 br_lck
->modified
= True
;
453 contend_level2_oplocks_end(fsp
, LEVEL2_CONTEND_WINDOWS_BRL
);
457 /****************************************************************************
458 Cope with POSIX range splits and merges.
459 ****************************************************************************/
461 static unsigned int brlock_posix_split_merge(struct lock_struct
*lck_arr
, /* Output array. */
462 struct lock_struct
*ex
, /* existing lock. */
463 struct lock_struct
*plock
) /* proposed lock. */
465 bool lock_types_differ
= (ex
->lock_type
!= plock
->lock_type
);
467 /* We can't merge non-conflicting locks on different context - ignore fnum. */
469 if (!brl_same_context(&ex
->context
, &plock
->context
)) {
471 memcpy(&lck_arr
[0], ex
, sizeof(struct lock_struct
));
475 /* We now know we have the same context. */
477 /* Did we overlap ? */
479 /*********************************************
490 **********************************************/
492 if ( (ex
->start
> (plock
->start
+ plock
->size
)) ||
493 (plock
->start
> (ex
->start
+ ex
->size
))) {
495 /* No overlap with this lock - copy existing. */
497 memcpy(&lck_arr
[0], ex
, sizeof(struct lock_struct
));
501 /*********************************************
502 +---------------------------+
504 +---------------------------+
505 +---------------------------+
506 | plock | -> replace with plock.
507 +---------------------------+
512 +---------------------------+
513 | plock | -> replace with plock.
514 +---------------------------+
516 **********************************************/
518 if ( (ex
->start
>= plock
->start
) &&
519 (ex
->start
+ ex
->size
<= plock
->start
+ plock
->size
) ) {
521 /* Replace - discard existing lock. */
526 /*********************************************
536 +---------------+-------+
537 | plock | ex | - different lock types.
538 +---------------+-------+
540 +-----------------------+
541 | plock | - same lock type.
542 +-----------------------+
543 **********************************************/
545 if (plock
->start
+ plock
->size
== ex
->start
) {
547 /* If the lock types are the same, we merge, if different, we
548 add the remainder of the old lock. */
550 if (lock_types_differ
) {
552 memcpy(&lck_arr
[0], ex
, sizeof(struct lock_struct
));
555 /* Merge - adjust incoming lock as we may have more
556 * merging to come. */
557 plock
->size
+= ex
->size
;
562 /*********************************************
571 +-------+---------------+
572 | ex | plock | - different lock types
573 +-------+---------------+
576 +-----------------------+
577 | plock | - same lock type.
578 +-----------------------+
580 **********************************************/
582 if (ex
->start
+ ex
->size
== plock
->start
) {
584 /* If the lock types are the same, we merge, if different, we
585 add the existing lock. */
587 if (lock_types_differ
) {
588 memcpy(&lck_arr
[0], ex
, sizeof(struct lock_struct
));
591 /* Merge - adjust incoming lock as we may have more
592 * merging to come. */
593 plock
->start
= ex
->start
;
594 plock
->size
+= ex
->size
;
599 /*********************************************
601 +-----------------------+
603 +-----------------------+
616 +---------------+-------+
617 | plock | ex | - different lock types.
618 +---------------+-------+
620 +-----------------------+
621 | plock | - same lock type.
622 +-----------------------+
623 **********************************************/
625 if ( (ex
->start
>= plock
->start
) &&
626 (ex
->start
<= plock
->start
+ plock
->size
) &&
627 (ex
->start
+ ex
->size
> plock
->start
+ plock
->size
) ) {
629 /* If the lock types are the same, we merge, if different, we
630 add the remainder of the old lock. */
632 if (lock_types_differ
) {
633 /* Add remaining existing. */
634 memcpy(&lck_arr
[0], ex
, sizeof(struct lock_struct
));
635 /* Adjust existing start and size. */
636 lck_arr
[0].start
= plock
->start
+ plock
->size
;
637 lck_arr
[0].size
= (ex
->start
+ ex
->size
) - (plock
->start
+ plock
->size
);
640 /* Merge - adjust incoming lock as we may have more
641 * merging to come. */
642 plock
->size
+= (ex
->start
+ ex
->size
) - (plock
->start
+ plock
->size
);
647 /*********************************************
649 +-----------------------+
651 +-----------------------+
664 +-------+---------------+
665 | ex | plock | - different lock types
666 +-------+---------------+
669 +-----------------------+
670 | plock | - same lock type.
671 +-----------------------+
673 **********************************************/
675 if ( (ex
->start
< plock
->start
) &&
676 (ex
->start
+ ex
->size
>= plock
->start
) &&
677 (ex
->start
+ ex
->size
<= plock
->start
+ plock
->size
) ) {
679 /* If the lock types are the same, we merge, if different, we
680 add the truncated old lock. */
682 if (lock_types_differ
) {
683 memcpy(&lck_arr
[0], ex
, sizeof(struct lock_struct
));
684 /* Adjust existing size. */
685 lck_arr
[0].size
= plock
->start
- ex
->start
;
688 /* Merge - adjust incoming lock as we may have more
689 * merging to come. MUST ADJUST plock SIZE FIRST ! */
690 plock
->size
+= (plock
->start
- ex
->start
);
691 plock
->start
= ex
->start
;
696 /*********************************************
698 +---------------------------+
700 +---------------------------+
705 +-------+---------+---------+
706 | ex | plock | ex | - different lock types.
707 +-------+---------+---------+
709 +---------------------------+
710 | plock | - same lock type.
711 +---------------------------+
712 **********************************************/
714 if ( (ex
->start
< plock
->start
) && (ex
->start
+ ex
->size
> plock
->start
+ plock
->size
) ) {
716 if (lock_types_differ
) {
718 /* We have to split ex into two locks here. */
720 memcpy(&lck_arr
[0], ex
, sizeof(struct lock_struct
));
721 memcpy(&lck_arr
[1], ex
, sizeof(struct lock_struct
));
723 /* Adjust first existing size. */
724 lck_arr
[0].size
= plock
->start
- ex
->start
;
726 /* Adjust second existing start and size. */
727 lck_arr
[1].start
= plock
->start
+ plock
->size
;
728 lck_arr
[1].size
= (ex
->start
+ ex
->size
) - (plock
->start
+ plock
->size
);
731 /* Just eat the existing locks, merge them into plock. */
732 plock
->start
= ex
->start
;
733 plock
->size
= ex
->size
;
738 /* Never get here. */
739 smb_panic("brlock_posix_split_merge");
742 /* Keep some compilers happy. */
746 /****************************************************************************
747 Lock a range of bytes - POSIX lock semantics.
748 We must cope with range splits and merges.
749 ****************************************************************************/
751 static NTSTATUS
brl_lock_posix(struct byte_range_lock
*br_lck
,
752 struct lock_struct
*plock
)
754 unsigned int i
, count
, posix_count
;
755 struct lock_struct
*locks
= br_lck
->lock_data
;
756 struct lock_struct
*tp
;
757 bool break_oplocks
= false;
760 /* No zero-zero locks for POSIX. */
761 if (plock
->start
== 0 && plock
->size
== 0) {
762 return NT_STATUS_INVALID_PARAMETER
;
765 /* Don't allow 64-bit lock wrap. */
766 if (plock
->start
+ plock
->size
- 1 < plock
->start
) {
767 return NT_STATUS_INVALID_PARAMETER
;
770 /* The worst case scenario here is we have to split an
771 existing POSIX lock range into two, and add our lock,
772 so we need at most 2 more entries. */
774 tp
= talloc_array(br_lck
, struct lock_struct
, br_lck
->num_locks
+ 2);
776 return NT_STATUS_NO_MEMORY
;
779 count
= posix_count
= 0;
781 for (i
=0; i
< br_lck
->num_locks
; i
++) {
782 struct lock_struct
*curr_lock
= &locks
[i
];
784 if (curr_lock
->lock_flav
== WINDOWS_LOCK
) {
785 /* Do any Windows flavour locks conflict ? */
786 if (brl_conflict(curr_lock
, plock
)) {
787 if (!serverid_exists(&curr_lock
->context
.pid
)) {
788 curr_lock
->context
.pid
.pid
= 0;
789 br_lck
->modified
= true;
792 /* No games with error messages. */
794 /* Remember who blocked us. */
795 plock
->context
.smblctx
= curr_lock
->context
.smblctx
;
796 return NT_STATUS_LOCK_NOT_GRANTED
;
798 /* Just copy the Windows lock into the new array. */
799 memcpy(&tp
[count
], curr_lock
, sizeof(struct lock_struct
));
802 unsigned int tmp_count
= 0;
804 /* POSIX conflict semantics are different. */
805 if (brl_conflict_posix(curr_lock
, plock
)) {
806 if (!serverid_exists(&curr_lock
->context
.pid
)) {
807 curr_lock
->context
.pid
.pid
= 0;
808 br_lck
->modified
= true;
811 /* Can't block ourselves with POSIX locks. */
812 /* No games with error messages. */
814 /* Remember who blocked us. */
815 plock
->context
.smblctx
= curr_lock
->context
.smblctx
;
816 return NT_STATUS_LOCK_NOT_GRANTED
;
819 /* Work out overlaps. */
820 tmp_count
+= brlock_posix_split_merge(&tp
[count
], curr_lock
, plock
);
821 posix_count
+= tmp_count
;
827 * Break oplocks while we hold a brl. Since lock() and unlock() calls
828 * are not symmetric with POSIX semantics, we cannot guarantee our
829 * contend_level2_oplocks_begin/end calls will be acquired and
830 * released one-for-one as with Windows semantics. Therefore we only
831 * call contend_level2_oplocks_begin if this is the first POSIX brl on
834 break_oplocks
= (posix_count
== 0);
836 contend_level2_oplocks_begin(br_lck
->fsp
,
837 LEVEL2_CONTEND_POSIX_BRL
);
840 /* Try and add the lock in order, sorted by lock start. */
841 for (i
=0; i
< count
; i
++) {
842 struct lock_struct
*curr_lock
= &tp
[i
];
844 if (curr_lock
->start
<= plock
->start
) {
850 memmove(&tp
[i
+1], &tp
[i
],
851 (count
- i
)*sizeof(struct lock_struct
));
853 memcpy(&tp
[i
], plock
, sizeof(struct lock_struct
));
856 /* We can get the POSIX lock, now see if it needs to
857 be mapped into a lower level POSIX one, and if so can
860 if (lp_posix_locking(br_lck
->fsp
->conn
->params
)) {
863 /* The lower layer just needs to attempt to
864 get the system POSIX lock. We've weeded out
865 any conflicts above. */
867 if (!set_posix_lock_posix_flavour(br_lck
->fsp
,
874 /* We don't know who blocked us. */
875 plock
->context
.smblctx
= 0xFFFFFFFFFFFFFFFFLL
;
877 if (errno_ret
== EACCES
|| errno_ret
== EAGAIN
) {
879 status
= NT_STATUS_LOCK_NOT_GRANTED
;
883 status
= map_nt_error_from_unix(errno
);
889 /* If we didn't use all the allocated size,
890 * Realloc so we don't leak entries per lock call. */
891 if (count
< br_lck
->num_locks
+ 2) {
892 tp
= talloc_realloc(br_lck
, tp
, struct lock_struct
, count
);
894 status
= NT_STATUS_NO_MEMORY
;
899 br_lck
->num_locks
= count
;
900 TALLOC_FREE(br_lck
->lock_data
);
901 br_lck
->lock_data
= tp
;
903 br_lck
->modified
= True
;
905 /* A successful downgrade from write to read lock can trigger a lock
906 re-evalutation where waiting readers can now proceed. */
911 contend_level2_oplocks_end(br_lck
->fsp
,
912 LEVEL2_CONTEND_POSIX_BRL
);
917 /****************************************************************************
918 Lock a range of bytes.
919 ****************************************************************************/
922 struct byte_range_lock
*br_lck
,
924 struct server_id pid
,
927 enum brl_type lock_type
,
928 enum brl_flavour lock_flav
,
929 struct server_id
*blocker_pid
,
933 struct lock_struct lock
;
937 lock
= (struct lock_struct
) {
938 .context
.smblctx
= smblctx
,
940 .context
.tid
= br_lck
->fsp
->conn
->cnum
,
943 .fnum
= br_lck
->fsp
->fnum
,
944 .lock_type
= lock_type
,
945 .lock_flav
= lock_flav
948 if (lock_flav
== WINDOWS_LOCK
) {
949 ret
= SMB_VFS_BRL_LOCK_WINDOWS(
950 br_lck
->fsp
->conn
, br_lck
, &lock
);
952 ret
= brl_lock_posix(br_lck
, &lock
);
955 /* If we're returning an error, return who blocked us. */
956 if (!NT_STATUS_IS_OK(ret
) && psmblctx
) {
957 *blocker_pid
= lock
.context
.pid
;
958 *psmblctx
= lock
.context
.smblctx
;
963 /****************************************************************************
964 Unlock a range of bytes - Windows semantics.
965 ****************************************************************************/
967 bool brl_unlock_windows_default(struct byte_range_lock
*br_lck
,
968 const struct lock_struct
*plock
)
971 struct lock_struct
*locks
= br_lck
->lock_data
;
972 enum brl_type deleted_lock_type
= READ_LOCK
; /* shut the compiler up.... */
974 SMB_ASSERT(plock
->lock_type
== UNLOCK_LOCK
);
977 for (i
= 0; i
< br_lck
->num_locks
; i
++) {
978 struct lock_struct
*lock
= &locks
[i
];
980 /* Only remove our own locks that match in start, size, and flavour. */
981 if (brl_same_context(&lock
->context
, &plock
->context
) &&
982 lock
->fnum
== plock
->fnum
&&
983 lock
->lock_flav
== WINDOWS_LOCK
&&
984 lock
->start
== plock
->start
&&
985 lock
->size
== plock
->size
) {
986 deleted_lock_type
= lock
->lock_type
;
991 if (i
== br_lck
->num_locks
) {
992 /* we didn't find it */
996 ARRAY_DEL_ELEMENT(locks
, i
, br_lck
->num_locks
);
997 br_lck
->num_locks
-= 1;
998 br_lck
->modified
= True
;
1000 /* Unlock the underlying POSIX regions. */
1001 if(lp_posix_locking(br_lck
->fsp
->conn
->params
)) {
1002 release_posix_lock_windows_flavour(br_lck
->fsp
,
1011 contend_level2_oplocks_end(br_lck
->fsp
, LEVEL2_CONTEND_WINDOWS_BRL
);
1015 /****************************************************************************
1016 Unlock a range of bytes - POSIX semantics.
1017 ****************************************************************************/
1019 static bool brl_unlock_posix(struct byte_range_lock
*br_lck
,
1020 struct lock_struct
*plock
)
1022 unsigned int i
, count
;
1023 struct lock_struct
*tp
;
1024 struct lock_struct
*locks
= br_lck
->lock_data
;
1025 bool overlap_found
= False
;
1027 /* No zero-zero locks for POSIX. */
1028 if (plock
->start
== 0 && plock
->size
== 0) {
1032 /* Don't allow 64-bit lock wrap. */
1033 if (plock
->start
+ plock
->size
< plock
->start
||
1034 plock
->start
+ plock
->size
< plock
->size
) {
1035 DEBUG(10,("brl_unlock_posix: lock wrap\n"));
1039 /* The worst case scenario here is we have to split an
1040 existing POSIX lock range into two, so we need at most
1043 tp
= talloc_array(br_lck
, struct lock_struct
, br_lck
->num_locks
+ 1);
1045 DEBUG(10,("brl_unlock_posix: malloc fail\n"));
1050 for (i
= 0; i
< br_lck
->num_locks
; i
++) {
1051 struct lock_struct
*lock
= &locks
[i
];
1052 unsigned int tmp_count
;
1054 /* Only remove our own locks - ignore fnum. */
1055 if (!brl_same_context(&lock
->context
, &plock
->context
)) {
1056 memcpy(&tp
[count
], lock
, sizeof(struct lock_struct
));
1061 if (lock
->lock_flav
== WINDOWS_LOCK
) {
1062 /* Do any Windows flavour locks conflict ? */
1063 if (brl_conflict(lock
, plock
)) {
1067 /* Just copy the Windows lock into the new array. */
1068 memcpy(&tp
[count
], lock
, sizeof(struct lock_struct
));
1073 /* Work out overlaps. */
1074 tmp_count
= brlock_posix_split_merge(&tp
[count
], lock
, plock
);
1076 if (tmp_count
== 0) {
1077 /* plock overlapped the existing lock completely,
1078 or replaced it. Don't copy the existing lock. */
1079 overlap_found
= true;
1080 } else if (tmp_count
== 1) {
1081 /* Either no overlap, (simple copy of existing lock) or
1082 * an overlap of an existing lock. */
1083 /* If the lock changed size, we had an overlap. */
1084 if (tp
[count
].size
!= lock
->size
) {
1085 overlap_found
= true;
1088 } else if (tmp_count
== 2) {
1089 /* We split a lock range in two. */
1090 overlap_found
= true;
1093 /* Optimisation... */
1094 /* We know we're finished here as we can't overlap any
1095 more POSIX locks. Copy the rest of the lock array. */
1097 if (i
< br_lck
->num_locks
- 1) {
1098 memcpy(&tp
[count
], &locks
[i
+1],
1099 sizeof(*locks
)*((br_lck
->num_locks
-1) - i
));
1100 count
+= ((br_lck
->num_locks
-1) - i
);
1107 if (!overlap_found
) {
1108 /* Just ignore - no change. */
1110 DEBUG(10,("brl_unlock_posix: No overlap - unlocked.\n"));
1114 /* Unlock any POSIX regions. */
1115 if(lp_posix_locking(br_lck
->fsp
->conn
->params
)) {
1116 release_posix_lock_posix_flavour(br_lck
->fsp
,
1124 /* Realloc so we don't leak entries per unlock call. */
1126 tp
= talloc_realloc(br_lck
, tp
, struct lock_struct
, count
);
1128 DEBUG(10,("brl_unlock_posix: realloc fail\n"));
1132 /* We deleted the last lock. */
1137 contend_level2_oplocks_end(br_lck
->fsp
,
1138 LEVEL2_CONTEND_POSIX_BRL
);
1140 br_lck
->num_locks
= count
;
1141 TALLOC_FREE(br_lck
->lock_data
);
1143 br_lck
->lock_data
= tp
;
1144 br_lck
->modified
= True
;
1149 /****************************************************************************
1150 Unlock a range of bytes.
1151 ****************************************************************************/
1153 bool brl_unlock(struct byte_range_lock
*br_lck
,
1155 struct server_id pid
,
1158 enum brl_flavour lock_flav
)
1160 struct lock_struct lock
;
1162 lock
.context
.smblctx
= smblctx
;
1163 lock
.context
.pid
= pid
;
1164 lock
.context
.tid
= br_lck
->fsp
->conn
->cnum
;
1167 lock
.fnum
= br_lck
->fsp
->fnum
;
1168 lock
.lock_type
= UNLOCK_LOCK
;
1169 lock
.lock_flav
= lock_flav
;
1171 if (lock_flav
== WINDOWS_LOCK
) {
1172 return SMB_VFS_BRL_UNLOCK_WINDOWS(
1173 br_lck
->fsp
->conn
, br_lck
, &lock
);
1175 return brl_unlock_posix(br_lck
, &lock
);
1179 /****************************************************************************
1180 Test if we could add a lock if we wanted to.
1181 Returns True if the region required is currently unlocked, False if locked.
1182 ****************************************************************************/
1184 bool brl_locktest(struct byte_range_lock
*br_lck
,
1185 const struct lock_struct
*rw_probe
)
1189 struct lock_struct
*locks
= br_lck
->lock_data
;
1190 files_struct
*fsp
= br_lck
->fsp
;
1192 /* Make sure existing locks don't conflict */
1193 for (i
=0; i
< br_lck
->num_locks
; i
++) {
1195 * Our own locks don't conflict.
1197 if (brl_conflict_other(&locks
[i
], rw_probe
)) {
1198 if (br_lck
->record
== NULL
) {
1203 if (!serverid_exists(&locks
[i
].context
.pid
)) {
1204 locks
[i
].context
.pid
.pid
= 0;
1205 br_lck
->modified
= true;
1214 * There is no lock held by an SMB daemon, check to
1215 * see if there is a POSIX lock from a UNIX or NFS process.
1216 * This only conflicts with Windows locks, not POSIX locks.
1219 if(lp_posix_locking(fsp
->conn
->params
) &&
1220 (rw_probe
->lock_flav
== WINDOWS_LOCK
)) {
1222 * Make copies -- is_posix_locked might modify the values
1225 br_off start
= rw_probe
->start
;
1226 br_off size
= rw_probe
->size
;
1227 enum brl_type lock_type
= rw_probe
->lock_type
;
1229 ret
= is_posix_locked(fsp
, &start
, &size
, &lock_type
, WINDOWS_LOCK
);
1231 DEBUG(10, ("brl_locktest: posix start=%ju len=%ju %s for %s "
1232 "file %s\n", (uintmax_t)start
, (uintmax_t)size
,
1233 ret
? "locked" : "unlocked",
1234 fsp_fnum_dbg(fsp
), fsp_str_dbg(fsp
)));
1236 /* We need to return the inverse of is_posix_locked. */
1240 /* no conflicts - we could have added it */
1244 /****************************************************************************
1245 Query for existing locks.
1246 ****************************************************************************/
1248 NTSTATUS
brl_lockquery(struct byte_range_lock
*br_lck
,
1250 struct server_id pid
,
1253 enum brl_type
*plock_type
,
1254 enum brl_flavour lock_flav
)
1257 struct lock_struct lock
;
1258 const struct lock_struct
*locks
= br_lck
->lock_data
;
1259 files_struct
*fsp
= br_lck
->fsp
;
1261 lock
.context
.smblctx
= *psmblctx
;
1262 lock
.context
.pid
= pid
;
1263 lock
.context
.tid
= br_lck
->fsp
->conn
->cnum
;
1264 lock
.start
= *pstart
;
1266 lock
.fnum
= fsp
->fnum
;
1267 lock
.lock_type
= *plock_type
;
1268 lock
.lock_flav
= lock_flav
;
1270 /* Make sure existing locks don't conflict */
1271 for (i
=0; i
< br_lck
->num_locks
; i
++) {
1272 const struct lock_struct
*exlock
= &locks
[i
];
1273 bool conflict
= False
;
1275 if (exlock
->lock_flav
== WINDOWS_LOCK
) {
1276 conflict
= brl_conflict(exlock
, &lock
);
1278 conflict
= brl_conflict_posix(exlock
, &lock
);
1282 *psmblctx
= exlock
->context
.smblctx
;
1283 *pstart
= exlock
->start
;
1284 *psize
= exlock
->size
;
1285 *plock_type
= exlock
->lock_type
;
1286 return NT_STATUS_LOCK_NOT_GRANTED
;
1291 * There is no lock held by an SMB daemon, check to
1292 * see if there is a POSIX lock from a UNIX or NFS process.
1295 if(lp_posix_locking(fsp
->conn
->params
)) {
1296 bool ret
= is_posix_locked(fsp
, pstart
, psize
, plock_type
, POSIX_LOCK
);
1298 DEBUG(10, ("brl_lockquery: posix start=%ju len=%ju %s for %s "
1299 "file %s\n", (uintmax_t)*pstart
,
1300 (uintmax_t)*psize
, ret
? "locked" : "unlocked",
1301 fsp_fnum_dbg(fsp
), fsp_str_dbg(fsp
)));
1304 /* Hmmm. No clue what to set smblctx to - use -1. */
1305 *psmblctx
= 0xFFFFFFFFFFFFFFFFLL
;
1306 return NT_STATUS_LOCK_NOT_GRANTED
;
1310 return NT_STATUS_OK
;
1314 /****************************************************************************
1315 Remove any locks associated with a open file.
1316 We return True if this process owns any other Windows locks on this
1317 fd and so we should not immediately close the fd.
1318 ****************************************************************************/
1320 void brl_close_fnum(struct byte_range_lock
*br_lck
)
1322 files_struct
*fsp
= br_lck
->fsp
;
1323 uint32_t tid
= fsp
->conn
->cnum
;
1324 uint64_t fnum
= fsp
->fnum
;
1326 struct lock_struct
*locks
= br_lck
->lock_data
;
1327 struct server_id pid
= messaging_server_id(fsp
->conn
->sconn
->msg_ctx
);
1328 struct lock_struct
*locks_copy
;
1329 unsigned int num_locks_copy
;
1331 /* Copy the current lock array. */
1332 if (br_lck
->num_locks
) {
1333 locks_copy
= (struct lock_struct
*)talloc_memdup(br_lck
, locks
, br_lck
->num_locks
* sizeof(struct lock_struct
));
1335 smb_panic("brl_close_fnum: talloc failed");
1341 num_locks_copy
= br_lck
->num_locks
;
1343 for (i
=0; i
< num_locks_copy
; i
++) {
1344 struct lock_struct
*lock
= &locks_copy
[i
];
1346 if (lock
->context
.tid
== tid
&&
1347 server_id_equal(&lock
->context
.pid
, &pid
) &&
1348 (lock
->fnum
== fnum
)) {
1351 lock
->context
.smblctx
,
1360 bool brl_mark_disconnected(struct files_struct
*fsp
)
1362 uint32_t tid
= fsp
->conn
->cnum
;
1364 uint64_t fnum
= fsp
->fnum
;
1366 struct server_id self
= messaging_server_id(fsp
->conn
->sconn
->msg_ctx
);
1367 struct byte_range_lock
*br_lck
= NULL
;
1369 if (fsp
->op
== NULL
) {
1373 smblctx
= fsp
->op
->global
->open_persistent_id
;
1375 if (!fsp
->op
->global
->durable
) {
1379 if (fsp
->current_lock_count
== 0) {
1383 br_lck
= brl_get_locks(talloc_tos(), fsp
);
1384 if (br_lck
== NULL
) {
1388 for (i
=0; i
< br_lck
->num_locks
; i
++) {
1389 struct lock_struct
*lock
= &br_lck
->lock_data
[i
];
1392 * as this is a durable handle, we only expect locks
1393 * of the current file handle!
1396 if (lock
->context
.smblctx
!= smblctx
) {
1397 TALLOC_FREE(br_lck
);
1401 if (lock
->context
.tid
!= tid
) {
1402 TALLOC_FREE(br_lck
);
1406 if (!server_id_equal(&lock
->context
.pid
, &self
)) {
1407 TALLOC_FREE(br_lck
);
1411 if (lock
->fnum
!= fnum
) {
1412 TALLOC_FREE(br_lck
);
1416 server_id_set_disconnected(&lock
->context
.pid
);
1417 lock
->context
.tid
= TID_FIELD_INVALID
;
1418 lock
->fnum
= FNUM_FIELD_INVALID
;
1421 br_lck
->modified
= true;
1422 TALLOC_FREE(br_lck
);
1426 bool brl_reconnect_disconnected(struct files_struct
*fsp
)
1428 uint32_t tid
= fsp
->conn
->cnum
;
1430 uint64_t fnum
= fsp
->fnum
;
1432 struct server_id self
= messaging_server_id(fsp
->conn
->sconn
->msg_ctx
);
1433 struct byte_range_lock
*br_lck
= NULL
;
1435 if (fsp
->op
== NULL
) {
1439 smblctx
= fsp
->op
->global
->open_persistent_id
;
1441 if (!fsp
->op
->global
->durable
) {
1446 * When reconnecting, we do not want to validate the brlock entries
1447 * and thereby remove our own (disconnected) entries but reactivate
1451 br_lck
= brl_get_locks(talloc_tos(), fsp
);
1452 if (br_lck
== NULL
) {
1456 if (br_lck
->num_locks
== 0) {
1457 TALLOC_FREE(br_lck
);
1461 for (i
=0; i
< br_lck
->num_locks
; i
++) {
1462 struct lock_struct
*lock
= &br_lck
->lock_data
[i
];
1465 * as this is a durable handle we only expect locks
1466 * of the current file handle!
1469 if (lock
->context
.smblctx
!= smblctx
) {
1470 TALLOC_FREE(br_lck
);
1474 if (lock
->context
.tid
!= TID_FIELD_INVALID
) {
1475 TALLOC_FREE(br_lck
);
1479 if (!server_id_is_disconnected(&lock
->context
.pid
)) {
1480 TALLOC_FREE(br_lck
);
1484 if (lock
->fnum
!= FNUM_FIELD_INVALID
) {
1485 TALLOC_FREE(br_lck
);
1489 lock
->context
.pid
= self
;
1490 lock
->context
.tid
= tid
;
1494 fsp
->current_lock_count
= br_lck
->num_locks
;
1495 br_lck
->modified
= true;
1496 TALLOC_FREE(br_lck
);
1500 struct brl_forall_cb
{
1501 void (*fn
)(struct file_id id
, struct server_id pid
,
1502 enum brl_type lock_type
,
1503 enum brl_flavour lock_flav
,
1504 br_off start
, br_off size
,
1505 void *private_data
);
1509 /****************************************************************************
1510 Traverse the whole database with this function, calling traverse_callback
1512 ****************************************************************************/
1514 static int brl_traverse_fn(struct db_record
*rec
, void *state
)
1516 struct brl_forall_cb
*cb
= (struct brl_forall_cb
*)state
;
1517 struct lock_struct
*locks
;
1518 struct file_id
*key
;
1520 unsigned int num_locks
= 0;
1524 dbkey
= dbwrap_record_get_key(rec
);
1525 value
= dbwrap_record_get_value(rec
);
1527 /* In a traverse function we must make a copy of
1528 dbuf before modifying it. */
1530 locks
= (struct lock_struct
*)talloc_memdup(
1531 talloc_tos(), value
.dptr
, value
.dsize
);
1533 return -1; /* Terminate traversal. */
1536 key
= (struct file_id
*)dbkey
.dptr
;
1537 num_locks
= value
.dsize
/sizeof(*locks
);
1540 for ( i
=0; i
<num_locks
; i
++) {
1542 locks
[i
].context
.pid
,
1555 /*******************************************************************
1556 Call the specified function on each lock in the database.
1557 ********************************************************************/
1559 int brl_forall(void (*fn
)(struct file_id id
, struct server_id pid
,
1560 enum brl_type lock_type
,
1561 enum brl_flavour lock_flav
,
1562 br_off start
, br_off size
,
1563 void *private_data
),
1566 struct brl_forall_cb cb
;
1574 cb
.private_data
= private_data
;
1575 status
= dbwrap_traverse(brlock_db
, brl_traverse_fn
, &cb
, &count
);
1577 if (!NT_STATUS_IS_OK(status
)) {
1584 /*******************************************************************
1585 Store a potentially modified set of byte range lock data back into
1588 ********************************************************************/
1590 static void byte_range_lock_flush(struct byte_range_lock
*br_lck
)
1593 struct lock_struct
*locks
= br_lck
->lock_data
;
1595 if (!br_lck
->modified
) {
1596 DEBUG(10, ("br_lck not modified\n"));
1602 while (i
< br_lck
->num_locks
) {
1603 if (locks
[i
].context
.pid
.pid
== 0) {
1605 * Autocleanup, the process conflicted and does not
1608 locks
[i
] = locks
[br_lck
->num_locks
-1];
1609 br_lck
->num_locks
-= 1;
1615 if (br_lck
->num_locks
== 0) {
1616 /* No locks - delete this entry. */
1617 NTSTATUS status
= dbwrap_record_delete(br_lck
->record
);
1618 if (!NT_STATUS_IS_OK(status
)) {
1619 DEBUG(0, ("delete_rec returned %s\n",
1620 nt_errstr(status
)));
1621 smb_panic("Could not delete byte range lock entry");
1625 .dsize
= br_lck
->num_locks
* sizeof(struct lock_struct
),
1626 .dptr
= (uint8_t *)br_lck
->lock_data
,
1630 status
= dbwrap_record_store(br_lck
->record
, data
, TDB_REPLACE
);
1631 if (!NT_STATUS_IS_OK(status
)) {
1632 DEBUG(0, ("store returned %s\n", nt_errstr(status
)));
1633 smb_panic("Could not store byte range mode entry");
1637 DEBUG(10, ("seqnum=%d\n", dbwrap_get_seqnum(brlock_db
)));
1640 br_lck
->modified
= false;
1641 TALLOC_FREE(br_lck
->record
);
1644 static int byte_range_lock_destructor(struct byte_range_lock
*br_lck
)
1646 byte_range_lock_flush(br_lck
);
1650 static bool brl_parse_data(struct byte_range_lock
*br_lck
, TDB_DATA data
)
1654 if (data
.dsize
== 0) {
1657 if (data
.dsize
% sizeof(struct lock_struct
) != 0) {
1658 DEBUG(1, ("Invalid data size: %u\n", (unsigned)data
.dsize
));
1662 br_lck
->num_locks
= data
.dsize
/ sizeof(struct lock_struct
);
1663 data_len
= br_lck
->num_locks
* sizeof(struct lock_struct
);
1665 br_lck
->lock_data
= talloc_memdup(br_lck
, data
.dptr
, data_len
);
1666 if (br_lck
->lock_data
== NULL
) {
1667 DEBUG(1, ("talloc_memdup failed\n"));
1673 /*******************************************************************
1674 Fetch a set of byte range lock data from the database.
1675 Leave the record locked.
1676 TALLOC_FREE(brl) will release the lock in the destructor.
1677 ********************************************************************/
1679 struct byte_range_lock
*brl_get_locks(TALLOC_CTX
*mem_ctx
, files_struct
*fsp
)
1682 struct byte_range_lock
*br_lck
;
1684 br_lck
= talloc_zero(mem_ctx
, struct byte_range_lock
);
1685 if (br_lck
== NULL
) {
1691 key
.dptr
= (uint8_t *)&fsp
->file_id
;
1692 key
.dsize
= sizeof(struct file_id
);
1694 br_lck
->record
= dbwrap_fetch_locked(brlock_db
, br_lck
, key
);
1696 if (br_lck
->record
== NULL
) {
1697 DEBUG(3, ("Could not lock byte range lock entry\n"));
1698 TALLOC_FREE(br_lck
);
1702 data
= dbwrap_record_get_value(br_lck
->record
);
1704 if (!brl_parse_data(br_lck
, data
)) {
1705 TALLOC_FREE(br_lck
);
1709 talloc_set_destructor(br_lck
, byte_range_lock_destructor
);
1711 if (DEBUGLEVEL
>= 10) {
1713 struct file_id_buf buf
;
1714 struct lock_struct
*locks
= br_lck
->lock_data
;
1715 DBG_DEBUG("%u current locks on file_id %s\n",
1717 file_id_str_buf(fsp
->file_id
, &buf
));
1718 for( i
= 0; i
< br_lck
->num_locks
; i
++) {
1719 print_lock_struct(i
, &locks
[i
]);
1726 struct byte_range_lock
*brl_get_locks_for_locking(TALLOC_CTX
*mem_ctx
,
1728 TALLOC_CTX
*req_mem_ctx
,
1729 const struct GUID
*req_guid
)
1731 struct byte_range_lock
*br_lck
= NULL
;
1733 br_lck
= brl_get_locks(mem_ctx
, fsp
);
1734 if (br_lck
== NULL
) {
1737 SMB_ASSERT(req_mem_ctx
!= NULL
);
1738 br_lck
->req_mem_ctx
= req_mem_ctx
;
1739 SMB_ASSERT(req_guid
!= NULL
);
1740 br_lck
->req_guid
= req_guid
;
1745 struct brl_get_locks_readonly_state
{
1746 TALLOC_CTX
*mem_ctx
;
1747 struct byte_range_lock
**br_lock
;
1750 static void brl_get_locks_readonly_parser(TDB_DATA key
, TDB_DATA data
,
1753 struct brl_get_locks_readonly_state
*state
=
1754 (struct brl_get_locks_readonly_state
*)private_data
;
1755 struct byte_range_lock
*br_lck
;
1757 br_lck
= talloc_pooled_object(
1758 state
->mem_ctx
, struct byte_range_lock
, 1, data
.dsize
);
1759 if (br_lck
== NULL
) {
1760 *state
->br_lock
= NULL
;
1763 *br_lck
= (struct byte_range_lock
) { 0 };
1764 if (!brl_parse_data(br_lck
, data
)) {
1765 *state
->br_lock
= NULL
;
1768 *state
->br_lock
= br_lck
;
1771 struct byte_range_lock
*brl_get_locks_readonly(files_struct
*fsp
)
1773 struct byte_range_lock
*br_lock
= NULL
;
1774 struct brl_get_locks_readonly_state state
;
1777 DEBUG(10, ("seqnum=%d, fsp->brlock_seqnum=%d\n",
1778 dbwrap_get_seqnum(brlock_db
), fsp
->brlock_seqnum
));
1780 if ((fsp
->brlock_rec
!= NULL
)
1781 && (dbwrap_get_seqnum(brlock_db
) == fsp
->brlock_seqnum
)) {
1783 * We have cached the brlock_rec and the database did not
1786 return fsp
->brlock_rec
;
1790 * Parse the record fresh from the database
1793 state
.mem_ctx
= fsp
;
1794 state
.br_lock
= &br_lock
;
1796 status
= dbwrap_parse_record(
1798 make_tdb_data((uint8_t *)&fsp
->file_id
,
1799 sizeof(fsp
->file_id
)),
1800 brl_get_locks_readonly_parser
, &state
);
1802 if (NT_STATUS_EQUAL(status
,NT_STATUS_NOT_FOUND
)) {
1804 * No locks on this file. Return an empty br_lock.
1806 br_lock
= talloc_zero(fsp
, struct byte_range_lock
);
1807 if (br_lock
== NULL
) {
1811 } else if (!NT_STATUS_IS_OK(status
)) {
1812 DEBUG(3, ("Could not parse byte range lock record: "
1813 "%s\n", nt_errstr(status
)));
1816 if (br_lock
== NULL
) {
1821 br_lock
->modified
= false;
1822 br_lock
->record
= NULL
;
1825 * Cache the brlock struct, invalidated when the dbwrap_seqnum
1826 * changes. See beginning of this routine.
1828 TALLOC_FREE(fsp
->brlock_rec
);
1829 fsp
->brlock_rec
= br_lock
;
1830 fsp
->brlock_seqnum
= dbwrap_get_seqnum(brlock_db
);
1835 bool brl_cleanup_disconnected(struct file_id fid
, uint64_t open_persistent_id
)
1838 TALLOC_CTX
*frame
= talloc_stackframe();
1840 struct db_record
*rec
;
1841 struct lock_struct
*lock
;
1843 struct file_id_buf buf
;
1846 key
= make_tdb_data((void*)&fid
, sizeof(fid
));
1848 rec
= dbwrap_fetch_locked(brlock_db
, frame
, key
);
1850 DBG_INFO("failed to fetch record for file %s\n",
1851 file_id_str_buf(fid
, &buf
));
1855 val
= dbwrap_record_get_value(rec
);
1856 lock
= (struct lock_struct
*)val
.dptr
;
1857 num
= val
.dsize
/ sizeof(struct lock_struct
);
1859 DBG_DEBUG("no byte range locks for file %s\n",
1860 file_id_str_buf(fid
, &buf
));
1865 for (n
=0; n
<num
; n
++) {
1866 struct lock_context
*ctx
= &lock
[n
].context
;
1868 if (!server_id_is_disconnected(&ctx
->pid
)) {
1869 struct server_id_buf tmp
;
1870 DBG_INFO("byte range lock "
1871 "%s used by server %s, do not cleanup\n",
1872 file_id_str_buf(fid
, &buf
),
1873 server_id_str_buf(ctx
->pid
, &tmp
));
1877 if (ctx
->smblctx
!= open_persistent_id
) {
1878 DBG_INFO("byte range lock %s expected smblctx %"PRIu64
" "
1879 "but found %"PRIu64
", do not cleanup\n",
1880 file_id_str_buf(fid
, &buf
),
1887 status
= dbwrap_record_delete(rec
);
1888 if (!NT_STATUS_IS_OK(status
)) {
1889 DBG_INFO("failed to delete record "
1890 "for file %s from %s, open %"PRIu64
": %s\n",
1891 file_id_str_buf(fid
, &buf
),
1892 dbwrap_name(brlock_db
),
1898 DBG_DEBUG("file %s cleaned up %u entries from open %"PRIu64
"\n",
1899 file_id_str_buf(fid
, &buf
),
1901 open_persistent_id
);