dmake: do not set MAKEFLAGS=k
[unleashed/tickless.git] / usr / src / cmd / fs.d / autofs / autod_readdir.c
blobb380488a7596826d86275a164cf7599e8e30ae77
1 /*
2 * CDDL HEADER START
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
19 * CDDL HEADER END
22 * Copyright 2007 Sun Microsystems, Inc. All rights reserved.
23 * Use is subject to license terms.
27 * autod_readdir.c
30 #include <stdio.h>
31 #include <ctype.h>
32 #include <string.h>
33 #include <syslog.h>
34 #include <sys/types.h>
35 #include <sys/param.h>
36 #include <errno.h>
37 #include <pwd.h>
38 #include <locale.h>
39 #include <stdlib.h>
40 #include <unistd.h>
41 #include <assert.h>
42 #include <fcntl.h>
43 #include "automount.h"
45 static void build_dir_entry_list(struct autofs_rddir_cache *rdcp,
46 struct dir_entry *list);
47 static int autofs_rddir_cache_enter(char *map, ulong_t bucket_size,
48 struct autofs_rddir_cache **rdcpp);
49 int autofs_rddir_cache_lookup(char *map, struct autofs_rddir_cache **rdcpp);
50 static int autofs_rddir_cache_delete(struct autofs_rddir_cache *rdcp);
51 static int create_dirents(struct autofs_rddir_cache *rdcp, ulong_t offset,
52 autofs_rddirres *res);
53 struct dir_entry *rddir_entry_lookup(char *name, struct dir_entry *list);
54 static void free_offset_tbl(struct off_tbl *head);
55 static void free_dir_list(struct dir_entry *head);
57 #define OFFSET_BUCKET_SIZE 100
59 rwlock_t autofs_rddir_cache_lock; /* readdir cache lock */
60 struct autofs_rddir_cache *rddir_head; /* readdir cache head */
62 int
63 do_readdir(autofs_rddirargs *rda, autofs_rddirres *rd)
65 struct dir_entry *list = NULL, *l;
66 struct autofs_rddir_cache *rdcp = NULL;
67 int error;
68 int cache_time = RDDIR_CACHE_TIME;
70 if (automountd_nobrowse) {
72 * Browsability was disabled return an empty list.
74 rd->rd_status = AUTOFS_OK;
75 rd->rd_rddir.rddir_size = 0;
76 rd->rd_rddir.rddir_eof = 1;
77 rd->rd_rddir.rddir_entries = NULL;
79 return (0);
82 rw_rdlock(&autofs_rddir_cache_lock);
83 error = autofs_rddir_cache_lookup(rda->rda_map, &rdcp);
84 if (error) {
85 rw_unlock(&autofs_rddir_cache_lock);
86 rw_wrlock(&autofs_rddir_cache_lock);
87 error = autofs_rddir_cache_lookup(rda->rda_map, &rdcp);
88 if (error) {
89 if (trace > 2)
90 trace_prt(1,
91 "map %s not found, adding...\n", rda->rda_map);
93 * entry doesn't exist, add it.
95 error = autofs_rddir_cache_enter(rda->rda_map,
96 OFFSET_BUCKET_SIZE, &rdcp);
99 rw_unlock(&autofs_rddir_cache_lock);
101 if (error)
102 return (error);
104 assert(rdcp != NULL);
105 assert(rdcp->in_use);
107 if (!rdcp->full) {
108 rw_wrlock(&rdcp->rwlock);
109 if (!rdcp->full) {
111 * cache entry hasn't been filled up, do it now.
113 char *stack[STACKSIZ];
114 char **stkptr;
117 * Initialize the stack of open files
118 * for this thread
120 stack_op(INIT, NULL, stack, &stkptr);
121 (void) getmapkeys(rda->rda_map, &list, &error,
122 &cache_time, stack, &stkptr, rda->uid);
123 if (!error)
124 build_dir_entry_list(rdcp, list);
125 else if (list) {
126 free_dir_list(list);
127 list = NULL;
130 } else
131 rw_rdlock(&rdcp->rwlock);
133 rd->rd_bufsize = rda->rda_count;
134 if (!error) {
135 error = create_dirents(rdcp, rda->rda_offset, rd);
136 if (error) {
137 if (rdcp->offtp) {
138 free_offset_tbl(rdcp->offtp);
139 rdcp->offtp = NULL;
141 if (rdcp->entp) {
142 free_dir_list(rdcp->entp);
143 rdcp->entp = NULL;
145 rdcp->full = 0;
146 list = NULL;
150 if (trace > 2) {
152 * print this list only once
154 for (l = list; l != NULL; l = l->next)
155 trace_prt(0, "%s\n", l->name);
156 trace_prt(0, "\n");
159 if (!error) {
160 rd->rd_status = AUTOFS_OK;
161 if (cache_time) {
163 * keep list of entries for up to
164 * 'cache_time' seconds
166 rdcp->ttl = time(NULL) + cache_time;
167 } else {
169 * the underlying name service indicated not
170 * to cache contents.
172 if (rdcp->offtp) {
173 free_offset_tbl(rdcp->offtp);
174 rdcp->offtp = NULL;
176 if (rdcp->entp) {
177 free_dir_list(rdcp->entp);
178 rdcp->entp = NULL;
180 rdcp->full = 0;
182 } else {
184 * return an empty list
186 rd->rd_rddir.rddir_size = 0;
187 rd->rd_rddir.rddir_eof = 1;
188 rd->rd_rddir.rddir_entries = NULL;
191 * Invalidate cache and set error
193 switch (error) {
194 case ENOENT:
195 rd->rd_status = AUTOFS_NOENT;
196 break;
197 case ENOMEM:
198 rd->rd_status = AUTOFS_NOMEM;
199 break;
200 default:
201 rd->rd_status = AUTOFS_ECOMM;
204 rw_unlock(&rdcp->rwlock);
206 mutex_lock(&rdcp->lock);
207 rdcp->in_use--;
208 mutex_unlock(&rdcp->lock);
210 assert(rdcp->in_use >= 0);
212 return (error);
215 #define roundtoint(x) (((x) + sizeof (int) - 1) & ~(sizeof (int) - 1))
216 #define DIRENT64_RECLEN(namelen) \
217 (((int)(((dirent64_t *)0)->d_name) + 1 + (namelen) + 7) & ~ 7)
219 static int
220 create_dirents(
221 struct autofs_rddir_cache *rdcp,
222 ulong_t offset,
223 autofs_rddirres *res)
225 uint_t total_bytes_wanted;
226 int bufsize;
227 ushort_t this_reclen;
228 int outcount = 0;
229 int namelen;
230 struct dir_entry *list = NULL, *l, *nl;
231 struct dirent64 *dp;
232 char *outbuf;
233 struct off_tbl *offtp, *next = NULL;
234 int this_bucket = 0;
235 int error = 0;
236 int x = 0, y = 0;
238 assert(RW_LOCK_HELD(&rdcp->rwlock));
239 for (offtp = rdcp->offtp; offtp != NULL; offtp = next) {
240 x++;
241 next = offtp->next;
242 this_bucket = (next == NULL);
243 if (!this_bucket)
244 this_bucket = (offset < next->offset);
245 if (this_bucket) {
247 * has to be in this bucket
249 assert(offset >= offtp->offset);
250 list = offtp->first;
251 break;
254 * loop to look in next bucket
258 for (l = list; l != NULL && l->offset < offset; l = l->next)
259 y++;
261 if (l == NULL) {
263 * reached end of directory
265 error = 0;
266 goto empty;
269 if (trace > 2)
270 trace_prt(1, "%s: offset searches (%d, %d)\n", rdcp->map, x, y);
272 total_bytes_wanted = res->rd_bufsize;
273 bufsize = total_bytes_wanted + sizeof (struct dirent64);
274 outbuf = malloc(bufsize);
275 if (outbuf == NULL) {
276 syslog(LOG_ERR, "memory allocation error\n");
277 error = ENOMEM;
278 goto empty;
280 memset(outbuf, 0, bufsize);
281 /* LINTED pointer alignment */
282 dp = (struct dirent64 *)outbuf;
284 while (l) {
285 nl = l->next;
286 namelen = strlen(l->name);
287 this_reclen = DIRENT64_RECLEN(namelen);
288 if (outcount + this_reclen > total_bytes_wanted) {
289 break;
291 dp->d_ino = (ino64_t)l->nodeid;
292 if (nl) {
294 * get the next elements offset
296 dp->d_off = (off64_t)nl->offset;
297 } else {
299 * This is the last element
300 * make offset one plus the current.
302 dp->d_off = (off64_t)l->offset + 1;
304 (void) strcpy(dp->d_name, l->name);
305 dp->d_reclen = (ushort_t)this_reclen;
306 outcount += dp->d_reclen;
307 dp = (struct dirent64 *)((int)dp + dp->d_reclen);
308 assert(outcount <= total_bytes_wanted);
309 l = l->next;
312 res->rd_rddir.rddir_size = (long)outcount;
313 if (outcount > 0) {
315 * have some entries
317 res->rd_rddir.rddir_eof = (l == NULL);
318 /* LINTED pointer alignment */
319 res->rd_rddir.rddir_entries = (struct dirent64 *)outbuf;
320 error = 0;
321 } else {
323 * total_bytes_wanted is not large enough for one
324 * directory entry
326 res->rd_rddir.rddir_eof = 0;
327 res->rd_rddir.rddir_entries = NULL;
328 free(outbuf);
329 error = EIO;
331 return (error);
333 empty:
334 res->rd_rddir.rddir_size = 0L;
335 res->rd_rddir.rddir_eof = TRUE;
336 res->rd_rddir.rddir_entries = NULL;
337 return (error);
342 * add new entry to cache for 'map'
344 static int
345 autofs_rddir_cache_enter(
346 char *map,
347 ulong_t bucket_size,
348 struct autofs_rddir_cache **rdcpp)
350 struct autofs_rddir_cache *p;
351 assert(RW_LOCK_HELD(&autofs_rddir_cache_lock));
354 * Add to front of the list at this time
356 p = (struct autofs_rddir_cache *)malloc(sizeof (*p));
357 if (p == NULL) {
358 syslog(LOG_ERR,
359 "autofs_rddir_cache_enter: memory allocation failed\n");
360 return (ENOMEM);
362 memset((char *)p, 0, sizeof (*p));
364 p->map = malloc(strlen(map) + 1);
365 if (p->map == NULL) {
366 syslog(LOG_ERR,
367 "autofs_rddir_cache_enter: memory allocation failed\n");
368 free(p);
369 return (ENOMEM);
371 strcpy(p->map, map);
373 p->bucket_size = bucket_size;
375 * no need to grab mutex lock since I haven't yet made the
376 * node visible to the list
378 p->in_use = 1;
379 (void) rwlock_init(&p->rwlock, USYNC_THREAD, NULL);
380 (void) mutex_init(&p->lock, USYNC_THREAD, NULL);
382 if (rddir_head == NULL)
383 rddir_head = p;
384 else {
385 p->next = rddir_head;
386 rddir_head = p;
388 *rdcpp = p;
390 return (0);
394 * find 'map' in readdir cache
397 autofs_rddir_cache_lookup(char *map, struct autofs_rddir_cache **rdcpp)
399 struct autofs_rddir_cache *p;
401 assert(RW_LOCK_HELD(&autofs_rddir_cache_lock));
402 for (p = rddir_head; p != NULL; p = p->next) {
403 if (strcmp(p->map, map) == 0) {
405 * found matching entry
407 *rdcpp = p;
408 mutex_lock(&p->lock);
409 p->in_use++;
410 mutex_unlock(&p->lock);
411 return (0);
415 * didn't find entry
417 return (ENOENT);
421 * free the offset table
423 static void
424 free_offset_tbl(struct off_tbl *head)
426 struct off_tbl *p, *next = NULL;
428 for (p = head; p != NULL; p = next) {
429 next = p->next;
430 free(p);
435 * free the directory entries
437 static void
438 free_dir_list(struct dir_entry *head)
440 struct dir_entry *p, *next = NULL;
442 for (p = head; p != NULL; p = next) {
443 next = p->next;
444 assert(p->name);
445 free(p->name);
446 free(p);
450 static void
451 autofs_rddir_cache_entry_free(struct autofs_rddir_cache *p)
453 assert(RW_LOCK_HELD(&autofs_rddir_cache_lock));
454 assert(!p->in_use);
455 free(p->map);
456 if (p->offtp)
457 free_offset_tbl(p->offtp);
458 if (p->entp)
459 free_dir_list(p->entp);
460 free(p);
464 * Remove entry from the rddircache
465 * the caller must own the autofs_rddir_cache_lock.
467 static int
468 autofs_rddir_cache_delete(struct autofs_rddir_cache *rdcp)
470 struct autofs_rddir_cache *p, *prev;
472 assert(RW_LOCK_HELD(&autofs_rddir_cache_lock));
474 * Search cache for entry
476 prev = NULL;
477 for (p = rddir_head; p != NULL; p = p->next) {
478 if (p == rdcp) {
480 * entry found, remove from list if not in use
482 if (p->in_use)
483 return (EBUSY);
484 if (prev)
485 prev->next = p->next;
486 else
487 rddir_head = p->next;
488 autofs_rddir_cache_entry_free(p);
489 return (0);
491 prev = p;
493 syslog(LOG_ERR, "Couldn't find entry %x in cache\n", p);
494 return (ENOENT);
498 * Return entry that matches name, NULL otherwise.
499 * Assumes the readers lock for this list has been grabed.
501 struct dir_entry *
502 rddir_entry_lookup(char *name, struct dir_entry *list)
504 return (btree_lookup(list, name));
507 static void
508 build_dir_entry_list(struct autofs_rddir_cache *rdcp, struct dir_entry *list)
510 struct dir_entry *p;
511 ulong_t offset = AUTOFS_DAEMONCOOKIE, offset_list = AUTOFS_DAEMONCOOKIE;
512 struct off_tbl *offtp, *last = NULL;
513 ino_t inonum = 4;
515 assert(RW_LOCK_HELD(&rdcp->rwlock));
516 assert(rdcp->entp == NULL);
517 rdcp->entp = list;
518 for (p = list; p != NULL; p = p->next) {
519 p->nodeid = inonum;
520 p->offset = offset;
521 if (offset >= offset_list) {
523 * add node to index table
525 offtp = (struct off_tbl *)
526 malloc(sizeof (struct off_tbl));
527 if (offtp != NULL) {
528 offtp->offset = offset;
529 offtp->first = p;
530 offtp->next = NULL;
531 offset_list += rdcp->bucket_size;
532 } else {
533 syslog(LOG_ERR,
534 "WARNING: build_dir_entry_list: could not add offset to index table\n");
535 continue;
538 * add to cache
540 if (rdcp->offtp == NULL)
541 rdcp->offtp = offtp;
542 else
543 last->next = offtp;
544 last = offtp;
546 offset++;
547 inonum += 2; /* use even numbers in daemon */
549 rdcp->full = 1;
552 mutex_t cleanup_lock;
553 cond_t cleanup_start_cv;
554 cond_t cleanup_done_cv;
557 * cache cleanup thread starting point
559 void
560 cache_cleanup(void)
562 timestruc_t reltime;
563 struct autofs_rddir_cache *p, *next = NULL;
564 int error;
566 mutex_init(&cleanup_lock, USYNC_THREAD, NULL);
567 cond_init(&cleanup_start_cv, USYNC_THREAD, NULL);
568 cond_init(&cleanup_done_cv, USYNC_THREAD, NULL);
570 mutex_lock(&cleanup_lock);
571 for (;;) {
572 reltime.tv_sec = RDDIR_CACHE_TIME/2;
573 reltime.tv_nsec = 0;
576 * delay RDDIR_CACHE_TIME seconds, or until some other thread
577 * requests that I cleanup the caches
579 if (error = cond_reltimedwait(
580 &cleanup_start_cv, &cleanup_lock, &reltime)) {
581 if (error != ETIME) {
582 if (trace > 1)
583 trace_prt(1,
584 "cleanup thread wakeup (%d)\n", error);
585 continue;
588 mutex_unlock(&cleanup_lock);
591 * Perform the cache cleanup
593 rw_wrlock(&autofs_rddir_cache_lock);
594 for (p = rddir_head; p != NULL; p = next) {
595 next = p->next;
596 if (p->in_use > 0) {
598 * cache entry busy, skip it
600 if (trace > 1) {
601 trace_prt(1,
602 "%s cache in use\n", p->map);
604 continue;
607 * Cache entry is not in use, and nobody can grab a
608 * new reference since I'm holding the
609 * autofs_rddir_cache_lock
613 * error will be zero if some thread signaled us asking
614 * that the caches be freed. In such case, free caches
615 * even if they're still valid and nobody is referencing
616 * them at this time. Otherwise, free caches only
617 * if their time to live (ttl) has expired.
619 if (error == ETIME && (p->ttl > time(NULL))) {
621 * Scheduled cache cleanup, if cache is still
622 * valid don't free.
624 if (trace > 1) {
625 trace_prt(1,
626 "%s cache still valid\n", p->map);
628 continue;
630 if (trace > 1)
631 trace_prt(1, "%s freeing cache\n", p->map);
632 assert(!p->in_use);
633 error = autofs_rddir_cache_delete(p);
634 assert(!error);
636 rw_unlock(&autofs_rddir_cache_lock);
639 * wakeup the thread/threads waiting for the
640 * cleanup to finish
642 mutex_lock(&cleanup_lock);
643 cond_broadcast(&cleanup_done_cv);
645 /* NOTREACHED */