4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
22 * Copyright 2007 Sun Microsystems, Inc. All rights reserved.
23 * Use is subject to license terms.
34 #include <sys/types.h>
35 #include <sys/param.h>
43 #include "automount.h"
45 static void build_dir_entry_list(struct autofs_rddir_cache
*rdcp
,
46 struct dir_entry
*list
);
47 static int autofs_rddir_cache_enter(char *map
, ulong_t bucket_size
,
48 struct autofs_rddir_cache
**rdcpp
);
49 int autofs_rddir_cache_lookup(char *map
, struct autofs_rddir_cache
**rdcpp
);
50 static int autofs_rddir_cache_delete(struct autofs_rddir_cache
*rdcp
);
51 static int create_dirents(struct autofs_rddir_cache
*rdcp
, ulong_t offset
,
52 autofs_rddirres
*res
);
53 struct dir_entry
*rddir_entry_lookup(char *name
, struct dir_entry
*list
);
54 static void free_offset_tbl(struct off_tbl
*head
);
55 static void free_dir_list(struct dir_entry
*head
);
57 #define OFFSET_BUCKET_SIZE 100
59 rwlock_t autofs_rddir_cache_lock
; /* readdir cache lock */
60 struct autofs_rddir_cache
*rddir_head
; /* readdir cache head */
63 do_readdir(autofs_rddirargs
*rda
, autofs_rddirres
*rd
)
65 struct dir_entry
*list
= NULL
, *l
;
66 struct autofs_rddir_cache
*rdcp
= NULL
;
68 int cache_time
= RDDIR_CACHE_TIME
;
70 if (automountd_nobrowse
) {
72 * Browsability was disabled return an empty list.
74 rd
->rd_status
= AUTOFS_OK
;
75 rd
->rd_rddir
.rddir_size
= 0;
76 rd
->rd_rddir
.rddir_eof
= 1;
77 rd
->rd_rddir
.rddir_entries
= NULL
;
82 rw_rdlock(&autofs_rddir_cache_lock
);
83 error
= autofs_rddir_cache_lookup(rda
->rda_map
, &rdcp
);
85 rw_unlock(&autofs_rddir_cache_lock
);
86 rw_wrlock(&autofs_rddir_cache_lock
);
87 error
= autofs_rddir_cache_lookup(rda
->rda_map
, &rdcp
);
91 "map %s not found, adding...\n", rda
->rda_map
);
93 * entry doesn't exist, add it.
95 error
= autofs_rddir_cache_enter(rda
->rda_map
,
96 OFFSET_BUCKET_SIZE
, &rdcp
);
99 rw_unlock(&autofs_rddir_cache_lock
);
104 assert(rdcp
!= NULL
);
105 assert(rdcp
->in_use
);
108 rw_wrlock(&rdcp
->rwlock
);
111 * cache entry hasn't been filled up, do it now.
113 char *stack
[STACKSIZ
];
117 * Initialize the stack of open files
120 stack_op(INIT
, NULL
, stack
, &stkptr
);
121 (void) getmapkeys(rda
->rda_map
, &list
, &error
,
122 &cache_time
, stack
, &stkptr
, rda
->uid
);
124 build_dir_entry_list(rdcp
, list
);
131 rw_rdlock(&rdcp
->rwlock
);
133 rd
->rd_bufsize
= rda
->rda_count
;
135 error
= create_dirents(rdcp
, rda
->rda_offset
, rd
);
138 free_offset_tbl(rdcp
->offtp
);
142 free_dir_list(rdcp
->entp
);
152 * print this list only once
154 for (l
= list
; l
!= NULL
; l
= l
->next
)
155 trace_prt(0, "%s\n", l
->name
);
160 rd
->rd_status
= AUTOFS_OK
;
163 * keep list of entries for up to
164 * 'cache_time' seconds
166 rdcp
->ttl
= time(NULL
) + cache_time
;
169 * the underlying name service indicated not
173 free_offset_tbl(rdcp
->offtp
);
177 free_dir_list(rdcp
->entp
);
184 * return an empty list
186 rd
->rd_rddir
.rddir_size
= 0;
187 rd
->rd_rddir
.rddir_eof
= 1;
188 rd
->rd_rddir
.rddir_entries
= NULL
;
191 * Invalidate cache and set error
195 rd
->rd_status
= AUTOFS_NOENT
;
198 rd
->rd_status
= AUTOFS_NOMEM
;
201 rd
->rd_status
= AUTOFS_ECOMM
;
204 rw_unlock(&rdcp
->rwlock
);
206 mutex_lock(&rdcp
->lock
);
208 mutex_unlock(&rdcp
->lock
);
210 assert(rdcp
->in_use
>= 0);
215 #define roundtoint(x) (((x) + sizeof (int) - 1) & ~(sizeof (int) - 1))
216 #define DIRENT64_RECLEN(namelen) \
217 (((int)(((dirent64_t *)0)->d_name) + 1 + (namelen) + 7) & ~ 7)
221 struct autofs_rddir_cache
*rdcp
,
223 autofs_rddirres
*res
)
225 uint_t total_bytes_wanted
;
227 ushort_t this_reclen
;
230 struct dir_entry
*list
= NULL
, *l
, *nl
;
233 struct off_tbl
*offtp
, *next
= NULL
;
238 assert(RW_LOCK_HELD(&rdcp
->rwlock
));
239 for (offtp
= rdcp
->offtp
; offtp
!= NULL
; offtp
= next
) {
242 this_bucket
= (next
== NULL
);
244 this_bucket
= (offset
< next
->offset
);
247 * has to be in this bucket
249 assert(offset
>= offtp
->offset
);
254 * loop to look in next bucket
258 for (l
= list
; l
!= NULL
&& l
->offset
< offset
; l
= l
->next
)
263 * reached end of directory
270 trace_prt(1, "%s: offset searches (%d, %d)\n", rdcp
->map
, x
, y
);
272 total_bytes_wanted
= res
->rd_bufsize
;
273 bufsize
= total_bytes_wanted
+ sizeof (struct dirent64
);
274 outbuf
= malloc(bufsize
);
275 if (outbuf
== NULL
) {
276 syslog(LOG_ERR
, "memory allocation error\n");
280 memset(outbuf
, 0, bufsize
);
281 /* LINTED pointer alignment */
282 dp
= (struct dirent64
*)outbuf
;
286 namelen
= strlen(l
->name
);
287 this_reclen
= DIRENT64_RECLEN(namelen
);
288 if (outcount
+ this_reclen
> total_bytes_wanted
) {
291 dp
->d_ino
= (ino64_t
)l
->nodeid
;
294 * get the next elements offset
296 dp
->d_off
= (off64_t
)nl
->offset
;
299 * This is the last element
300 * make offset one plus the current.
302 dp
->d_off
= (off64_t
)l
->offset
+ 1;
304 (void) strcpy(dp
->d_name
, l
->name
);
305 dp
->d_reclen
= (ushort_t
)this_reclen
;
306 outcount
+= dp
->d_reclen
;
307 dp
= (struct dirent64
*)((int)dp
+ dp
->d_reclen
);
308 assert(outcount
<= total_bytes_wanted
);
312 res
->rd_rddir
.rddir_size
= (long)outcount
;
317 res
->rd_rddir
.rddir_eof
= (l
== NULL
);
318 /* LINTED pointer alignment */
319 res
->rd_rddir
.rddir_entries
= (struct dirent64
*)outbuf
;
323 * total_bytes_wanted is not large enough for one
326 res
->rd_rddir
.rddir_eof
= 0;
327 res
->rd_rddir
.rddir_entries
= NULL
;
334 res
->rd_rddir
.rddir_size
= 0L;
335 res
->rd_rddir
.rddir_eof
= TRUE
;
336 res
->rd_rddir
.rddir_entries
= NULL
;
342 * add new entry to cache for 'map'
345 autofs_rddir_cache_enter(
348 struct autofs_rddir_cache
**rdcpp
)
350 struct autofs_rddir_cache
*p
;
351 assert(RW_LOCK_HELD(&autofs_rddir_cache_lock
));
354 * Add to front of the list at this time
356 p
= (struct autofs_rddir_cache
*)malloc(sizeof (*p
));
359 "autofs_rddir_cache_enter: memory allocation failed\n");
362 memset((char *)p
, 0, sizeof (*p
));
364 p
->map
= malloc(strlen(map
) + 1);
365 if (p
->map
== NULL
) {
367 "autofs_rddir_cache_enter: memory allocation failed\n");
373 p
->bucket_size
= bucket_size
;
375 * no need to grab mutex lock since I haven't yet made the
376 * node visible to the list
379 (void) rwlock_init(&p
->rwlock
, USYNC_THREAD
, NULL
);
380 (void) mutex_init(&p
->lock
, USYNC_THREAD
, NULL
);
382 if (rddir_head
== NULL
)
385 p
->next
= rddir_head
;
394 * find 'map' in readdir cache
397 autofs_rddir_cache_lookup(char *map
, struct autofs_rddir_cache
**rdcpp
)
399 struct autofs_rddir_cache
*p
;
401 assert(RW_LOCK_HELD(&autofs_rddir_cache_lock
));
402 for (p
= rddir_head
; p
!= NULL
; p
= p
->next
) {
403 if (strcmp(p
->map
, map
) == 0) {
405 * found matching entry
408 mutex_lock(&p
->lock
);
410 mutex_unlock(&p
->lock
);
421 * free the offset table
424 free_offset_tbl(struct off_tbl
*head
)
426 struct off_tbl
*p
, *next
= NULL
;
428 for (p
= head
; p
!= NULL
; p
= next
) {
435 * free the directory entries
438 free_dir_list(struct dir_entry
*head
)
440 struct dir_entry
*p
, *next
= NULL
;
442 for (p
= head
; p
!= NULL
; p
= next
) {
451 autofs_rddir_cache_entry_free(struct autofs_rddir_cache
*p
)
453 assert(RW_LOCK_HELD(&autofs_rddir_cache_lock
));
457 free_offset_tbl(p
->offtp
);
459 free_dir_list(p
->entp
);
464 * Remove entry from the rddircache
465 * the caller must own the autofs_rddir_cache_lock.
468 autofs_rddir_cache_delete(struct autofs_rddir_cache
*rdcp
)
470 struct autofs_rddir_cache
*p
, *prev
;
472 assert(RW_LOCK_HELD(&autofs_rddir_cache_lock
));
474 * Search cache for entry
477 for (p
= rddir_head
; p
!= NULL
; p
= p
->next
) {
480 * entry found, remove from list if not in use
485 prev
->next
= p
->next
;
487 rddir_head
= p
->next
;
488 autofs_rddir_cache_entry_free(p
);
493 syslog(LOG_ERR
, "Couldn't find entry %x in cache\n", p
);
498 * Return entry that matches name, NULL otherwise.
499 * Assumes the readers lock for this list has been grabed.
502 rddir_entry_lookup(char *name
, struct dir_entry
*list
)
504 return (btree_lookup(list
, name
));
508 build_dir_entry_list(struct autofs_rddir_cache
*rdcp
, struct dir_entry
*list
)
511 ulong_t offset
= AUTOFS_DAEMONCOOKIE
, offset_list
= AUTOFS_DAEMONCOOKIE
;
512 struct off_tbl
*offtp
, *last
= NULL
;
515 assert(RW_LOCK_HELD(&rdcp
->rwlock
));
516 assert(rdcp
->entp
== NULL
);
518 for (p
= list
; p
!= NULL
; p
= p
->next
) {
521 if (offset
>= offset_list
) {
523 * add node to index table
525 offtp
= (struct off_tbl
*)
526 malloc(sizeof (struct off_tbl
));
528 offtp
->offset
= offset
;
531 offset_list
+= rdcp
->bucket_size
;
534 "WARNING: build_dir_entry_list: could not add offset to index table\n");
540 if (rdcp
->offtp
== NULL
)
547 inonum
+= 2; /* use even numbers in daemon */
552 mutex_t cleanup_lock
;
553 cond_t cleanup_start_cv
;
554 cond_t cleanup_done_cv
;
557 * cache cleanup thread starting point
563 struct autofs_rddir_cache
*p
, *next
= NULL
;
566 mutex_init(&cleanup_lock
, USYNC_THREAD
, NULL
);
567 cond_init(&cleanup_start_cv
, USYNC_THREAD
, NULL
);
568 cond_init(&cleanup_done_cv
, USYNC_THREAD
, NULL
);
570 mutex_lock(&cleanup_lock
);
572 reltime
.tv_sec
= RDDIR_CACHE_TIME
/2;
576 * delay RDDIR_CACHE_TIME seconds, or until some other thread
577 * requests that I cleanup the caches
579 if (error
= cond_reltimedwait(
580 &cleanup_start_cv
, &cleanup_lock
, &reltime
)) {
581 if (error
!= ETIME
) {
584 "cleanup thread wakeup (%d)\n", error
);
588 mutex_unlock(&cleanup_lock
);
591 * Perform the cache cleanup
593 rw_wrlock(&autofs_rddir_cache_lock
);
594 for (p
= rddir_head
; p
!= NULL
; p
= next
) {
598 * cache entry busy, skip it
602 "%s cache in use\n", p
->map
);
607 * Cache entry is not in use, and nobody can grab a
608 * new reference since I'm holding the
609 * autofs_rddir_cache_lock
613 * error will be zero if some thread signaled us asking
614 * that the caches be freed. In such case, free caches
615 * even if they're still valid and nobody is referencing
616 * them at this time. Otherwise, free caches only
617 * if their time to live (ttl) has expired.
619 if (error
== ETIME
&& (p
->ttl
> time(NULL
))) {
621 * Scheduled cache cleanup, if cache is still
626 "%s cache still valid\n", p
->map
);
631 trace_prt(1, "%s freeing cache\n", p
->map
);
633 error
= autofs_rddir_cache_delete(p
);
636 rw_unlock(&autofs_rddir_cache_lock
);
639 * wakeup the thread/threads waiting for the
642 mutex_lock(&cleanup_lock
);
643 cond_broadcast(&cleanup_done_cv
);