4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
22 * Copyright 2010 Sun Microsystems, Inc. All rights reserved.
23 * Use is subject to license terms.
27 #include <sys/types.h>
29 #include <sys/socket.h>
30 #include <netinet/in.h>
31 #include <netinet/tcp.h>
32 #include <arpa/inet.h>
37 #include <sys/param.h>
38 #include <sys/pathconf.h>
40 #include <netconfig.h>
41 #include <sys/sockio.h>
43 #include <sys/resource.h>
52 #include <sys/unistat/spcs_s.h>
53 #include <sys/unistat/spcs_s_u.h>
54 #include <sys/unistat/spcs_errors.h>
56 #include <sys/nsctl/cfg.h>
57 #include <sys/nsctl/cfg_lockd.h>
60 #define DPF(m) if (debug) (void) fprintf m
71 #define MAX_LOCKQ 1024
72 #define MAX_DAEMONS 1024
73 #define MAX_LOCAL 1024
75 #define MAX_TIMEOUTS 3
76 #define TIMEOUT_SECS 5
78 static char program
[] = "dscfglockd";
82 static FILE *debugfile
= NULL
;
85 cfglockd_t type
; /* read or write */
86 pid_t pid
; /* pid of read locker or local writer */
87 daemonaddr_t remote
; /* remote machine requesting write lock */
88 int state
; /* for write locks */
89 int32_t order
; /* who gets priority? */
90 } lock_queue
[MAX_LOCKQ
];
93 pid_t pid
; /* pid of locker */
94 uint8_t seq
; /* seq number of last lock request */
95 } unlock_buf
[MAX_UNLOCK
];
100 #define lock_wanted lock_queue[0]
104 #define READ_LOCK 0x11
105 #define WRITE_LOCK 0x12
116 struct lockdaemon
*remote_daemon
;
117 pid_t holding_pid
[MAX_LOCAL
];
120 daemonaddr_t thishost
;
121 daemonaddr_t localhost
;
123 #define STATE_CLEAR 0
124 #define STATE_ASKED 1
125 #define STATE_OKAYED 2
126 #define STATE_WANTS 3
127 #define lockdaemon_dead(ldp) ((ticker - (ldp)->timeout) > MAX_TIMEOUTS)
128 #define CRIT_BEGIN() (void) sighold(SIGALRM)
129 #define CRIT_END() (void) sigrelse(SIGALRM)
131 #define NORMAL_UNLOCK 0
132 #define FORCE_UNLOCK 1
141 } daemon_list
[MAX_DAEMONS
];
143 unsigned short lock_port
= CFG_SERVER_PORT
;
145 int pf_inet
= PF_INET
;
146 #define dp_addr(p) inet_ntoa(((struct sockaddr_in *)p)->sin_addr)
151 lockd_type(cfglockd_t type
)
154 case LOCK_NOTLOCKED
: return "NotLocked";
155 case LOCK_READ
: return "Read";
156 case LOCK_WRITE
: return "Write";
157 case LOCK_LOCKED
: return "Locked";
158 case LOCK_LOCKEDBY
: return "LockedBy";
159 case LOCK_STAT
: return "Stat";
160 case LOCK_ACK
: return "Ack";
161 default: return "*unknown*";
166 lockd_state(int state
)
169 case STATE_CLEAR
: return "Clear";
170 case STATE_ASKED
: return "Asked";
171 case STATE_OKAYED
: return "Okayed";
172 case STATE_WANTS
: return "Wants";
173 default: return "*unknown*";
178 lockd_msg(int message
)
181 case ALIVE
: return "Alive";
182 case READ_LOCK
: return "ReadLock";
183 case WRITE_LOCK
: return "WriteLock";
184 case UNLOCK
: return "Unlock";
185 case GRANTED
: return "Granted";
186 default: return lockd_type((cfglockd_t
)message
);
191 * The following is stolen from autod_nfs.c
194 getmyaddrs(struct ifconf
*ifc
)
209 if ((sock
= socket(family
, SOCK_DGRAM
, 0)) < 0) {
211 perror("getmyaddrs(): socket");
216 if (ioctl(sock
, SIOCGIFNUM
, (char *)&numifs
) < 0) {
218 perror("getmyaddrs(): SIOCGIFNUM");
223 buf
= (char *)malloc(numifs
* sizeof (struct ifreq
));
226 (void) fprintf(stderr
, "getmyaddrs(): malloc failed\n");
233 ifc
->ifc_len
= numifs
* sizeof (struct ifreq
);
235 if (ioctl(sock
, SIOCGIFCONF
, (char *)ifc
) < 0) {
237 perror("getmyaddrs(): SIOCGIFCONF");
247 cmp_addr(daemonaddr_t
*a
, daemonaddr_t
*b
)
250 rc
= memcmp(&(a
->sin_addr
), &(b
->sin_addr
), sizeof (a
->sin_addr
));
251 DPF((stderr
, "compare %s %hu with", dp_addr(a
), a
->sin_port
));
252 DPF((stderr
, " %s %hu = %d\n", dp_addr(b
), b
->sin_port
, rc
));
257 addr_is_holder(int32_t order
)
259 return ((the_lock
.nholders
> 0) && the_lock
.remote_daemon
!= NULL
&&
260 (order
== the_lock
.remote_daemon
->order
));
264 islocalhost(daemonaddr_t
*host
)
267 struct sockaddr_in
*s1
, *s2
;
272 n
= ifc
->ifc_len
/ sizeof (struct ifreq
);
275 for (; n
> 0; n
--, ifr
++) {
276 if (ifr
->ifr_addr
.sa_family
!= AF_INET
)
279 /* LINTED pointer alignment */
280 s2
= (struct sockaddr_in
*)&ifr
->ifr_addr
;
282 if (memcmp((char *)&s2
->sin_addr
,
283 (char *)&s1
->sin_addr
, sizeof (s1
->sin_addr
)) == 0) {
293 send_lockmsg(int cmd
, pid_t pid
, daemonaddr_t
*dp
, uint8_t seq
)
295 struct lock_msg message_buf
;
298 if (msgtrace
&& debugfile
) {
300 (void) fprintf(debugfile
, "%19.19s send %-9.9s to %s\n",
301 ctime(&t
), lockd_msg(cmd
), dp_addr(dp
));
303 DPF((stderr
, "send %d to %s port %hu\n", cmd
,
304 dp_addr(dp
), dp
->sin_port
));
305 message_buf
.message
= cmd
;
306 message_buf
.pid
= pid
;
307 message_buf
.order
= order
;
308 message_buf
.seq
= seq
;
310 rc
= sendto(lock_soc
, &message_buf
, sizeof (message_buf
), 0,
311 (struct sockaddr
*)dp
, sizeof (struct sockaddr
));
312 } while (rc
== -1 && errno
== EINTR
);
314 spcs_log("cfglockd", NULL
, "sendto rc -1 errno %d", errno
);
318 * send an alive message to all configured daemons so that they can tell
319 * us if they are holding a write lock.
325 struct lockdaemon
*ldp
;
327 for (i
= 0, ldp
= daemon_list
; i
< MAX_DAEMONS
; i
++, ldp
++) {
330 send_lockmsg(ALIVE
, (pid_t
)0, &(ldp
->host
), 0);
334 /* find the lock daemon structure for a give daemon address */
336 static struct lockdaemon
*
337 find_lockdaemon(daemonaddr_t
*d
)
339 struct lockdaemon
*ldp
;
341 for (i
= 0, ldp
= daemon_list
; i
< MAX_DAEMONS
; i
++, ldp
++) {
344 if (cmp_addr(&(ldp
->host
), d
) == 0)
351 * a messge has been received from daemon, note this and if the daemon
352 * was previously dead and we have the write lock tell it that we do.
356 daemon_alive(daemonaddr_t
*daemon
, int32_t order
)
358 struct lockdaemon
*ldp
;
361 for (i
= 0, ldp
= daemon_list
; i
< MAX_DAEMONS
; i
++, ldp
++) {
364 if (cmp_addr(&(ldp
->host
), daemon
) == 0) {
366 ldp
->timeout
= ticker
;
368 spcs_log("cfglockd", NULL
,
369 "daemon restarted on %s\n",
371 DPF((stderr
, "daemon restarted on %s\n",
379 /* new daemon has announced itself */
380 if (i
< MAX_DAEMONS
) {
381 DPF((stderr
, "new daemon on %s\n", dp_addr(daemon
)));
382 spcs_log("cfglockd", NULL
,
383 "new daemon on %s\n", dp_addr(daemon
));
386 ldp
->timeout
= ticker
;
389 /* problem, more daemons than expected */
393 if (the_lock
.type
== LOCK_WRITE
&& the_lock
.remote_daemon
== NULL
)
394 send_lockmsg(WRITE_LOCK
, (pid_t
)0, daemon
, 0);
398 delete_queue_entry(struct lock_req
*req
)
402 for (i
= (req
- lock_queue
); i
++ < next_req
; req
++)
408 take_lock(int ackmessage
)
410 send_lockmsg(ackmessage
, (pid_t
)0, &lock_wanted
.remote
, 0);
411 delete_queue_entry(lock_queue
);
415 check_for_write_lock()
417 struct lockdaemon
*ldp
;
421 DPF((stderr
, "check for lock\n"));
422 if (lock_wanted
.state
!= STATE_ASKED
)
424 for (i
= 0, ldp
= daemon_list
; i
< MAX_DAEMONS
; i
++, ldp
++) {
427 if (ldp
->up
&& ldp
->state
!= STATE_OKAYED
) {
432 if (wait
== 0 && lock_wanted
.type
== LOCK_WRITE
) {
433 the_lock
.type
= LOCK_WRITE
;
434 the_lock
.holding_pid
[0] = lock_wanted
.pid
;
435 the_lock
.nholders
= 1;
436 the_lock
.state
= STATE_CLEAR
;
437 take_lock(LOCK_LOCKED
);
442 lock_granted(daemonaddr_t
*da
)
444 struct lockdaemon
*ldp
;
446 if ((ldp
= find_lockdaemon(da
)) != NULL
) {
447 /* if we already own the lock, throw the msg away */
448 if (the_lock
.remote_daemon
== NULL
&&
449 the_lock
.type
== LOCK_WRITE
) {
454 * If the current lock isn't a write lock and we're not
457 * The current lock is a write lock and it's not owned by us
459 * send back an unlocked message.
461 if ((the_lock
.type
!= LOCK_WRITE
&&
462 the_lock
.state
!= STATE_ASKED
) ||
463 (the_lock
.type
== LOCK_WRITE
&&
464 the_lock
.remote_daemon
!= NULL
)) {
465 send_lockmsg(UNLOCK
, (pid_t
)0, &(ldp
->host
), 0);
468 ldp
->state
= STATE_OKAYED
;
470 check_for_write_lock();
476 struct lockdaemon
*ldp
;
479 switch (the_lock
.type
) {
481 if (lock_wanted
.type
== LOCK_READ
) {
482 i
= the_lock
.nholders
++;
483 the_lock
.holding_pid
[i
] = lock_wanted
.pid
;
484 the_lock
.state
= STATE_CLEAR
;
485 DPF((stderr
, "increment read lockers to %d\n",
487 take_lock(LOCK_LOCKED
);
490 /* write lock has to wait */
493 /* lock has to wait until write lock is cleared */
496 if (lock_wanted
.type
== LOCK_READ
) {
497 DPF((stderr
, "local locker, 1 lock holder\n"));
498 the_lock
.holding_pid
[0] = lock_wanted
.pid
;
499 the_lock
.nholders
= 1;
500 the_lock
.type
= LOCK_READ
;
501 the_lock
.state
= STATE_CLEAR
;
502 the_lock
.remote_daemon
= NULL
;
503 take_lock(LOCK_LOCKED
);
506 if (islocalhost(&lock_wanted
.remote
)) {
507 DPF((stderr
, "local locker, take write lock\n"));
508 /* tell everyone I'm locking */
509 if (lock_wanted
.state
!= STATE_ASKED
) {
510 for (i
= 0, ldp
= daemon_list
; i
< MAX_DAEMONS
;
514 ldp
->state
= STATE_ASKED
;
515 send_lockmsg(WRITE_LOCK
, (pid_t
)0,
519 lock_wanted
.state
= STATE_ASKED
;
520 check_for_write_lock();
521 the_lock
.remote_daemon
= NULL
;
522 the_lock
.state
= STATE_ASKED
;
525 DPF((stderr
, "remote locker, take write lock\n"));
526 the_lock
.type
= LOCK_WRITE
;
527 the_lock
.holder
= lock_wanted
.remote
;
528 the_lock
.nholders
= 1;
529 the_lock
.remote_daemon
=
530 find_lockdaemon(&the_lock
.holder
);
531 the_lock
.state
= STATE_CLEAR
;
537 DPF((stderr
, "weird lock type held - %d\n", the_lock
.type
));
538 the_lock
.type
= LOCK_NOTLOCKED
;
548 return; /* no locks queued */
554 lock_sort(const void *a
, const void *b
)
556 struct lock_req
*left
= (struct lock_req
*)a
;
557 struct lock_req
*right
= (struct lock_req
*)b
;
559 return (left
->order
- right
->order
);
563 queue_lock(cfglockd_t type
, struct lock_msg
*msg
, daemonaddr_t
*addr
)
566 struct lock_req
*lrp
;
567 struct lockdaemon
*ldp
;
569 /* first check if new lock matches current lock */
570 if (the_lock
.type
== type
&& addr_is_holder(msg
->order
)) {
571 /* remote daemon missed locked message */
572 send_lockmsg(GRANTED
, (pid_t
)0, addr
, msg
->seq
);
576 /* next search queue to check for duplicate */
577 for (i
= 0, lrp
= lock_queue
; i
++ < next_req
; lrp
++) {
578 if (lrp
->type
== type
&& lrp
->pid
== msg
->pid
&&
579 cmp_addr(addr
, &(lrp
->remote
)) == 0)
585 * It's a new lock request. Are we in the middle of
586 * obtaining one for ourselves?
589 if (the_lock
.type
== LOCK_NOTLOCKED
&& the_lock
.state
== STATE_ASKED
) {
590 /* did a higher priority request just come in? */
591 if (msg
->order
< order
) {
592 /* requeue our request */
593 the_lock
.state
= STATE_CLEAR
;
594 lock_wanted
.state
= STATE_CLEAR
;
596 /* let the other lockds know */
597 for (i
= 0, ldp
= daemon_list
; i
< MAX_DAEMONS
;
601 if (ldp
->up
&& ldp
->state
== STATE_OKAYED
) {
602 send_lockmsg(UNLOCK
, (pid_t
)0,
614 lrp
->state
= STATE_CLEAR
;
615 lrp
->order
= msg
->order
;
621 qsort(lock_queue
, next_req
, sizeof (lock_queue
[0]), lock_sort
);
623 if (the_lock
.type
!= LOCK_WRITE
)
630 char *lt
= "Unknown";
631 struct lockdaemon
*ldp
;
634 switch (the_lock
.type
) {
645 spcs_log("cfglockd", NULL
, "Lock is %s (%d)", lt
, the_lock
.type
);
646 spcs_log("cfglockd", NULL
, "There are %d holders of the lock",
648 if (the_lock
.nholders
> 0) {
649 for (i
= 0; i
< the_lock
.nholders
; i
++)
650 spcs_log("cfglockd", NULL
, "holding_pid[%d] = %6d", i
,
651 the_lock
.holding_pid
[i
]);
653 spcs_log("cfglockd", NULL
, "holder daemon was %s port %hu, remote %x",
654 dp_addr(&the_lock
.holder
), the_lock
.holder
.sin_port
,
655 the_lock
.remote_daemon
);
656 spcs_log("cfglockd", NULL
, "Lock queue, %d requests", next_req
);
657 for (i
= 0; i
< next_req
; i
++) {
658 spcs_log("cfglockd", NULL
, "request %d type %d order %d", i
,
659 lock_queue
[i
].type
, lock_queue
[i
].order
);
660 spcs_log("cfglockd", NULL
, " client %s port %hu, pid %d",
661 dp_addr(&lock_queue
[i
].remote
),
662 lock_queue
[i
].remote
.sin_port
, lock_queue
[i
].pid
);
664 spcs_log("cfglockd", NULL
, "Daemon list");
666 for (i
= 0, ldp
= daemon_list
; i
< MAX_DAEMONS
; i
++, ldp
++) {
669 spcs_log("cfglockd", NULL
, "daemon %d, %s port %hu", i
,
670 dp_addr(&ldp
->host
), ldp
->host
.sin_port
);
671 spcs_log("cfglockd", NULL
,
672 " up %d timeout %ld missed %d state %d\n", ldp
->up
,
673 ldp
->timeout
, ticker
- ldp
->timeout
, ldp
->state
);
678 is_duplicate(cfglockd_t type
, pid_t pid
, uint8_t seq
)
680 struct unlock_s
*bufp
;
687 for (i
= 0, bufp
= unlock_buf
; bufp
->pid
&& i
< MAX_UNLOCK
;
689 if (bufp
->pid
== pid
&& bufp
->seq
== seq
) {
690 /* throw message away */
692 spcs_log("cfglockd", NULL
,
693 "duplicate '%d' request received from %d",
700 /* add it to the list */
701 bcopy(unlock_buf
, &unlock_buf
[ 1 ],
702 sizeof (unlock_buf
) - sizeof (struct unlock_s
));
703 (*unlock_buf
).pid
= pid
;
704 (*unlock_buf
).seq
= seq
;
710 local_lock(cfglockd_t type
, struct lock_msg
*msg
, daemonaddr_t
*client
)
712 if (is_duplicate(type
, msg
->pid
, msg
->seq
)) {
713 if (the_lock
.remote_daemon
== NULL
&&
714 (the_lock
.type
== LOCK_WRITE
||
715 the_lock
.type
== LOCK_READ
) &&
716 the_lock
.holding_pid
[0] == msg
->pid
) {
717 send_lockmsg(LOCK_LOCKED
, (pid_t
)0, client
, msg
->seq
);
720 queue_lock(type
, msg
, client
);
725 remote_lock(struct sockaddr_in
*remote
, struct lock_msg
*msg
)
727 /* make sure remote knows we are alive */
728 send_lockmsg(ALIVE
, (pid_t
)0, remote
, 0);
730 /* clear out pid as it is meaningless on this node */
733 queue_lock(LOCK_WRITE
, msg
, (daemonaddr_t
*)remote
);
737 unqueue_lock(daemonaddr_t
*d
, pid_t pid
)
740 struct lock_req
*lrp
, *xrp
;
743 /* search queue to delete ungranted locks */
744 for (i
= 0, xrp
= lrp
= lock_queue
; i
++ < next_req
; lrp
++) {
747 if (pid
!= (pid_t
)0 && lrp
->pid
!= pid
)
749 if (d
!= NULL
&& cmp_addr(d
, &(lrp
->remote
)) != 0)
756 next_req
= xrp
- lock_queue
;
762 DPF((stderr
, "** UNLOCK **\n"));
763 the_lock
.remote_daemon
= NULL
;
764 the_lock
.type
= LOCK_NOTLOCKED
;
765 the_lock
.nholders
= 0;
766 the_lock
.state
= STATE_CLEAR
;
772 local_unlock(pid_t pid
, uint8_t seq
, int method
)
774 struct lockdaemon
*ldp
;
777 if (method
== NORMAL_UNLOCK
&& is_duplicate(LOCK_NOTLOCKED
, pid
, seq
)) {
781 if (the_lock
.type
== LOCK_READ
) {
782 /* delete reference to pid of reading process */
783 for (i
= 0; i
< the_lock
.nholders
; i
++) {
784 if (the_lock
.holding_pid
[i
] == pid
) {
785 DPF((stderr
, "decrement lockers from %d\n",
791 for (; i
< the_lock
.nholders
; i
++) {
792 the_lock
.holding_pid
[i
] = the_lock
.holding_pid
[i
+1];
794 if (the_lock
.nholders
> 0)
798 if (pid
!= the_lock
.holding_pid
[0])
800 the_lock
.holding_pid
[0] = (pid_t
)0;
801 for (i
= 0, ldp
= daemon_list
; i
< MAX_DAEMONS
; i
++, ldp
++) {
805 send_lockmsg(UNLOCK
, (pid_t
)0, &(ldp
->host
), 0);
812 remote_unlock(int32_t order
, daemonaddr_t
*d
)
815 struct lock_req
*lrp
;
817 DPF((stderr
, "remote unlock from %s ", dp_addr(d
)));
818 DPF((stderr
, "when %s holds lock\n", dp_addr(&the_lock
.holder
)));
820 /* search queue to check for ungranted lock */
821 for (i
= 0, lrp
= lock_queue
; i
++ < next_req
; lrp
++) {
822 if (lrp
->type
== LOCK_WRITE
&&
823 cmp_addr(d
, &(lrp
->remote
)) == 0) {
824 delete_queue_entry(lrp
);
829 if (addr_is_holder(order
)) {
835 lockedby(daemonaddr_t
*d
, uint8_t seq
)
837 DPF((stderr
, "lockby enquiry from %s ", dp_addr(d
)));
838 switch (the_lock
.type
) {
840 send_lockmsg(LOCK_NOTLOCKED
, (pid_t
)0, d
, seq
);
843 send_lockmsg(LOCK_READ
, the_lock
.holding_pid
[0], d
, seq
);
846 send_lockmsg(LOCK_WRITE
, the_lock
.holding_pid
[0], d
, seq
);
856 struct lock_req
*locker
;
857 struct lockdaemon
*ldp
;
859 DPF((stderr
, "keepalive...\n"));
863 * tell any other daemon that has a lock request in our queue that
864 * this daemon is still alive.
867 for (i
= 0, locker
= lock_queue
; i
< next_req
; i
++, locker
++) {
868 if (locker
->pid
== 0) /* remote lock request */
869 send_lockmsg(ALIVE
, (pid_t
)0, &(locker
->remote
), 0);
873 * if a remote daemon holds the lock, check it is still alive and
874 * if the remote daemon is sent it a grant message in case the
875 * remote daemon missed our original grant.
878 if (the_lock
.remote_daemon
) {
879 if (lockdaemon_dead(the_lock
.remote_daemon
)) {
880 DPF((stderr
, "lock owner died\n"));
881 the_lock
.remote_daemon
->up
= 0;
884 send_lockmsg(GRANTED
, (pid_t
)0, &the_lock
.holder
, 0);
889 * check for response from daemons preventing this daemon
890 * from taking a write lock by not sending a grant message.
891 * if the remote daemon is alive send another lock request,
892 * otherwise mark it as dead.
893 * send alive message to any live remote daemons if this
894 * daemon has the write lock.
897 (void) printf("\nlock: %s\n", lockd_type(the_lock
.type
));
898 (void) printf(" no. holders: %d\n", the_lock
.nholders
);
899 (void) printf(" hold addr : %s\n", the_lock
.remote_daemon
?
900 dp_addr(the_lock
.remote_daemon
): "0.0.0.0");
901 (void) printf(" holding pid:");
902 for (i
= 0; i
< the_lock
.nholders
; i
++) {
903 (void) printf(" %ld", the_lock
.holding_pid
[ i
]);
907 for (i
= 0, ldp
= daemon_list
; i
< MAX_DAEMONS
; i
++, ldp
++) {
912 (void) printf("%-15.15s ", dp_addr(&ldp
->host
));
913 (void) printf("%-4.4s ", ldp
->up
? "up" : "down");
914 (void) printf("%5ld ", ldp
->timeout
);
915 (void) printf("%-10.10s ", lockd_state(ldp
->state
));
916 (void) printf("%6d\n", ldp
->order
);
919 if (ldp
->state
== STATE_ASKED
) {
920 if (lockdaemon_dead(ldp
)) {
922 ldp
->state
= STATE_CLEAR
;
925 send_lockmsg(WRITE_LOCK
, (pid_t
)0, &(ldp
->host
), 0);
928 if (the_lock
.type
== LOCK_WRITE
&&
929 the_lock
.remote_daemon
== NULL
)
930 send_lockmsg(ALIVE
, (pid_t
)0, &(ldp
->host
), 0);
935 dispatch(struct lock_msg
*mp
, daemonaddr_t
*host
)
937 int message
= mp
->message
;
940 localhost
= islocalhost(host
);
941 if (msgtrace
&& debugfile
) {
944 (void) fprintf(debugfile
,
945 "%19.19s recv %-9.9s from %s (%ld)\n", ctime(&t
),
946 lockd_msg(message
), dp_addr(host
), mp
->pid
);
948 (void) fprintf(debugfile
,
949 "%19.19s recv %-9.9s from %s order %d (%ld)\n",
950 ctime(&t
), lockd_msg(message
), dp_addr(host
),
954 DPF((stderr
, "received message %d\n", message
));
955 DPF((stderr
, "from %s port %hu\n", dp_addr(host
), host
->sin_port
));
957 daemon_alive(host
, mp
->order
);
962 DPF((stderr
, "received ALIVE %s\n", dp_addr(host
)));
963 /* do nothing, general "not localhost" code above does this */
966 DPF((stderr
, "received UNLOCK\n"));
967 remote_unlock(mp
->order
, host
);
970 DPF((stderr
, "received GRANTED\n"));
974 DPF((stderr
, "received WRITE_LOCK\n"));
976 remote_lock(host
, mp
);
980 DPF((stderr
, "received READ_LOCK\n"));
982 local_lock(LOCK_READ
, mp
, host
);
985 DPF((stderr
, "received LOCK_WRITE\n"));
987 local_lock(LOCK_WRITE
, mp
, host
);
990 DPF((stderr
, "received LOCK_NOTLOCKED\n"));
991 send_lockmsg(LOCK_ACK
, (pid_t
)0, host
, mp
->seq
);
992 if (the_lock
.type
!= LOCK_NOTLOCKED
) {
993 local_unlock(mp
->pid
, mp
->seq
, NORMAL_UNLOCK
);
997 lockedby(host
, mp
->seq
);
1003 /* throw message away -- this is an error to receive */
1009 * unqueue any locks asked for by pid and unlock any locks held by pid.
1013 purge_pid(pid_t pid
)
1015 DPF((stderr
, "purge locks for %ld\n", pid
));
1016 unqueue_lock(NULL
, pid
);
1017 if (the_lock
.type
!= LOCK_NOTLOCKED
)
1018 local_unlock(pid
, 0, FORCE_UNLOCK
);
1022 * Check for exit or exec of client processes.
1023 * The lock protecting the processes pid in the lockfile will
1024 * be removed by the kernel when a client exits or execs.
1033 for (i
= 0; (x
= cfg_filelock(i
, 0)) != CFG_LF_EOF
; i
++) {
1034 if (x
== CFG_LF_AGAIN
)
1035 continue; /* can't take lock, must be still alive */
1036 cfg_readpid(i
, &pid
);
1037 cfg_writepid(i
, (pid_t
)0);
1038 (void) cfg_fileunlock(i
);
1039 if (pid
!= (pid_t
)0)
1045 build_daemon_list(char *cf_file
, int exe
)
1052 struct lockdaemon
*ldp
;
1054 if ((hp
= gethostbyname("localhost")) == NULL
) {
1055 (void) fprintf(stderr
, "%s: Can't find hostent for %s\n",
1056 program
, "localhost");
1057 spcs_log("cfglockd", NULL
, "couldn't find localhost");
1061 (void) memcpy(&(localhost
.sin_addr
.s_addr
), *(hp
->h_addr_list
),
1062 sizeof (localhost
.sin_addr
));
1063 if (cf_file
== NULL
) {
1064 (void) endhostent();
1068 if ((fp
= popen(cf_file
, "r")) == NULL
) {
1070 (void) fprintf(stderr
,
1071 "%s: Can't open config program\n", program
);
1072 spcs_log("cfglockd", NULL
, "couldn't read config");
1076 if ((fp
= fopen(cf_file
, "r")) == NULL
) {
1078 (void) fprintf(stderr
, "%s: Can't open config file\n",
1080 spcs_log("cfglockd", NULL
, "couldn't read config");
1085 while ((i
= fscanf(fp
, "%s %d\n", host
, &port
)) != EOF
) {
1086 if (host
[0] == '#') /* line starting with # are comments */
1091 if (strcmp(host
, "localhost") == 0) {
1097 if ((hp
= gethostbyname(host
)) == NULL
) {
1098 (void) fprintf(stderr
,
1099 "%s: Can't find hostent for %s\n", program
, host
);
1103 (void) memcpy(&(ldp
->host
.sin_addr
.s_addr
), *(hp
->h_addr_list
),
1104 sizeof (ldp
->host
.sin_addr
));
1105 DPF((stderr
, "daemon: %s\t%s\n",
1106 inet_ntoa(ldp
->host
.sin_addr
), hp
->h_name
));
1107 if (islocalhost(&(ldp
->host
))) {
1108 DPF((stderr
, "is an alias for this host, skipping\n"));
1111 ldp
->host
.sin_port
= htons((short)port
);
1112 ldp
->host
.sin_family
= hp
->h_addrtype
;
1121 (void) endhostent();
1127 (void) fprintf(stderr
,
1128 gettext("usage: %s [-d] [-f file]|[-e program]\n"), program
);
1135 spcs_log("cfglockd", NULL
, "pid %d unexpected signal %d, ignoring",
1142 (void) unlink(CFG_PIDFILE
);
1143 spcs_log("cfglockd", NULL
, "pid %d terminate on signal %d", getpid(),
1149 init(int argc
, char *argv
[])
1151 #if defined(_SunOS_5_6) || defined(_SunOS_5_7) || defined(_SunOS_5_8)
1157 struct itimerval tv
;
1159 socklen_t len
= sizeof (thishost
);
1164 lstate
= (getenv("LOCKD_STATE") != NULL
);
1165 msgtrace
= (getenv("LOCKD_MSG") != NULL
);
1168 * Fork off a child that becomes the daemon.
1171 #ifndef TTY_MESSAGES
1172 if ((rc
= fork()) > 0)
1175 spcs_log("cfglockd", NULL
, "can't fork %d", errno
);
1176 (void) fprintf(stderr
, gettext("dscfglockd: cannot fork: %s\n"),
1183 * In child - become daemon.
1186 #if !defined(_SunOS_5_6) && !defined(_SunOS_5_7) && !defined(_SunOS_5_8)
1187 /* use closefrom(3C) from PSARC/2000/193 when possible */
1188 closefrom(CLOSE_FD
);
1190 (void) getrlimit(RLIMIT_NOFILE
, &rl
);
1191 for (i
= CLOSE_FD
; i
< rl
.rlim_max
; i
++)
1196 #ifndef TTY_MESSAGES
1197 (void) open("/dev/console", O_WRONLY
|O_APPEND
);
1204 if (msgtrace
|| lstate
) {
1205 debugfile
= fopen("/var/tmp/dscfglockd.out", "a");
1208 setbuf(debugfile
, (char *)0);
1209 (void) fprintf(debugfile
, "%19.19s dscfglockd start\n",
1215 spcs_log("cfglockd", NULL
, "new lock daemon, pid %d", getpid());
1218 * Catch as unexpected all signals apart from SIGTERM.
1221 for (i
= 1; i
< _sys_nsig
; i
++)
1222 (void) sigset(i
, unexpected
);
1223 (void) sigset(SIGTERM
, term
);
1225 for (i
= 0; (c
= getopt(argc
, argv
, "df:e:")) != EOF
; i
++) {
1249 ifc
= (struct ifconf
*)malloc(sizeof (struct ifconf
));
1251 perror(CFG_PIDFILE
);
1252 DPF((stderr
, "Can't open pid file\n"));
1255 (void) memset((char *)ifc
, 0, sizeof (struct ifconf
));
1259 * if (lockdaemonalive()) {
1260 * (void) fprintf(stderr, "%s: %s\n", program,
1261 * gettext("There is already a live lockdaemon"));
1265 if ((fp
= fopen(CFG_PIDFILE
, "w")) == NULL
) {
1266 perror(CFG_PIDFILE
);
1267 DPF((stderr
, "Can't open pid file\n"));
1270 (void) fprintf(fp
, "%ld\n", getpid());
1273 /* order should be set to node number within cluster */
1274 order
= cfg_iscluster();
1278 (void) gettimeofday(&tp
, NULL
);
1279 srand48(tp
.tv_usec
);
1282 (void) fprintf(debugfile
, "WARNING: order number "
1283 "is 0 -- changing randomly to %d\n", order
);
1287 for (i
= 0; (x
= cfg_filelock(i
, 0)) != CFG_LF_EOF
; i
++) {
1288 if (x
== CFG_LF_AGAIN
) {
1289 cfg_readpid(i
, &pid
);
1291 spcs_log("cfglockd", NULL
,
1292 "init .dscfg.lck slot %d pid %d locked",
1294 DPF((stderr
, "client process %ld still alive\n", pid
));
1295 continue; /* can't take lock, must be still alive */
1298 (void) cfg_fileunlock(i
);
1301 tv
.it_interval
.tv_sec
= TIMEOUT_SECS
;
1302 tv
.it_interval
.tv_usec
= 0;
1303 tv
.it_value
= tv
.it_interval
;
1305 bzero(unlock_buf
, sizeof (unlock_buf
));
1307 build_daemon_list(cp
, exe
);
1308 if ((lock_soc
= socket(pf_inet
, SOCK_DGRAM
, 0)) < 0) {
1309 (void) fprintf(stderr
, "%s: %s\n", program
,
1310 gettext("failed to create socket"));
1312 spcs_log("cfglockd", NULL
, "couldn't create socket");
1315 thishost
.sin_family
= AF_INET
;
1316 thishost
.sin_addr
.s_addr
= INADDR_ANY
;
1317 thishost
.sin_port
= htons(lock_port
);
1318 rc
= bind(lock_soc
, (struct sockaddr
*)&thishost
, sizeof (thishost
));
1321 spcs_log("cfglockd", NULL
, "couldn't bind");
1324 if (getsockname(lock_soc
, (struct sockaddr
*)&thishost
, &len
) < 0)
1325 perror("getsockname");
1327 (void) sigset(SIGALRM
, keepalive
);
1328 (void) setitimer(ITIMER_REAL
, &tv
, NULL
);
1330 * wait 2 time outs before allowing a lock to find if someone else
1331 * currently has the lock.
1337 lintmain(int argc
, char *argv
[])
1340 main(int argc
, char *argv
[])
1343 struct lock_msg message_buf
;
1347 int x
= 1; /* kludge to stop warnings from compiler */
1353 addrlen
= sizeof (from
);
1354 DPF((stderr
, "begin recvfrom\n"));
1355 rc
= recvfrom(lock_soc
, &message_buf
, sizeof (message_buf
),
1356 0, (struct sockaddr
*)&from
, &addrlen
);
1357 DPF((stderr
, "end recvfrom rc = %d\n", rc
));
1359 if (rc
== sizeof (message_buf
))
1360 dispatch(&message_buf
, &from
);
1362 check_for_write_lock();
1364 /* if we own the lock, check to see if the process died */
1365 if (the_lock
.type
!= LOCK_NOTLOCKED
&&
1366 the_lock
.remote_daemon
== NULL
)