2 * linux/fs/ufs/ufs_dir.c
5 * Adrian Rodriguez (adrian@franklins-tower.rutgers.edu)
6 * Laboratory for Computer Science Research Computing Facility
7 * Rutgers, The State University of New Jersey
9 * swab support by Francois-Rene Rideau <fare@tunes.org> 19970406
11 * 4.4BSD (FreeBSD) support added on February 1st 1998 by
12 * Niels Kristian Bech Jensen <nkbj@image.dk> partially based
13 * on code by Martin von Loewis <martin@mira.isdn.cs.tu-berlin.de>.
15 * Migration to usage of "page cache" on May 2006 by
16 * Evgeniy Dushistov <dushistov@mail.ru> based on ext2 code base.
19 #include <linux/time.h>
21 #include <linux/swap.h>
29 * NOTE! unlike strncmp, ufs_match returns 1 for success, 0 for failure.
31 * len <= UFS_MAXNAMLEN and de != NULL are guaranteed by caller.
33 static inline int ufs_match(struct super_block
*sb
, int len
,
34 const unsigned char *name
, struct ufs_dir_entry
*de
)
36 if (len
!= ufs_get_de_namlen(sb
, de
))
40 return !memcmp(name
, de
->d_name
, len
);
43 static int ufs_commit_chunk(struct page
*page
, loff_t pos
, unsigned len
)
45 struct address_space
*mapping
= page
->mapping
;
46 struct inode
*dir
= mapping
->host
;
50 block_write_end(NULL
, mapping
, pos
, len
, len
, page
, NULL
);
51 if (pos
+len
> dir
->i_size
) {
52 i_size_write(dir
, pos
+len
);
53 mark_inode_dirty(dir
);
56 err
= write_one_page(page
, 1);
62 static inline void ufs_put_page(struct page
*page
)
68 ino_t
ufs_inode_by_name(struct inode
*dir
, const struct qstr
*qstr
)
71 struct ufs_dir_entry
*de
;
74 de
= ufs_find_entry(dir
, qstr
, &page
);
76 res
= fs32_to_cpu(dir
->i_sb
, de
->d_ino
);
83 /* Releases the page */
84 void ufs_set_link(struct inode
*dir
, struct ufs_dir_entry
*de
,
85 struct page
*page
, struct inode
*inode
,
88 loff_t pos
= page_offset(page
) +
89 (char *) de
- (char *) page_address(page
);
90 unsigned len
= fs16_to_cpu(dir
->i_sb
, de
->d_reclen
);
94 err
= ufs_prepare_chunk(page
, pos
, len
);
97 de
->d_ino
= cpu_to_fs32(dir
->i_sb
, inode
->i_ino
);
98 ufs_set_de_type(dir
->i_sb
, de
, inode
->i_mode
);
100 err
= ufs_commit_chunk(page
, pos
, len
);
103 dir
->i_mtime
= dir
->i_ctime
= CURRENT_TIME_SEC
;
104 mark_inode_dirty(dir
);
108 static bool ufs_check_page(struct page
*page
)
110 struct inode
*dir
= page
->mapping
->host
;
111 struct super_block
*sb
= dir
->i_sb
;
112 char *kaddr
= page_address(page
);
113 unsigned offs
, rec_len
;
114 unsigned limit
= PAGE_SIZE
;
115 const unsigned chunk_mask
= UFS_SB(sb
)->s_uspi
->s_dirblksize
- 1;
116 struct ufs_dir_entry
*p
;
119 if ((dir
->i_size
>> PAGE_SHIFT
) == page
->index
) {
120 limit
= dir
->i_size
& ~PAGE_MASK
;
121 if (limit
& chunk_mask
)
126 for (offs
= 0; offs
<= limit
- UFS_DIR_REC_LEN(1); offs
+= rec_len
) {
127 p
= (struct ufs_dir_entry
*)(kaddr
+ offs
);
128 rec_len
= fs16_to_cpu(sb
, p
->d_reclen
);
130 if (rec_len
< UFS_DIR_REC_LEN(1))
134 if (rec_len
< UFS_DIR_REC_LEN(ufs_get_de_namlen(sb
, p
)))
136 if (((offs
+ rec_len
- 1) ^ offs
) & ~chunk_mask
)
138 if (fs32_to_cpu(sb
, p
->d_ino
) > (UFS_SB(sb
)->s_uspi
->s_ipg
*
139 UFS_SB(sb
)->s_uspi
->s_ncg
))
145 SetPageChecked(page
);
148 /* Too bad, we had an error */
151 ufs_error(sb
, "ufs_check_page",
152 "size of directory #%lu is not a multiple of chunk size",
157 error
= "rec_len is smaller than minimal";
160 error
= "unaligned directory entry";
163 error
= "rec_len is too small for name_len";
166 error
= "directory entry across blocks";
169 error
= "inode out of bounds";
171 ufs_error (sb
, "ufs_check_page", "bad entry in directory #%lu: %s - "
172 "offset=%lu, rec_len=%d, name_len=%d",
173 dir
->i_ino
, error
, (page
->index
<<PAGE_SHIFT
)+offs
,
174 rec_len
, ufs_get_de_namlen(sb
, p
));
177 p
= (struct ufs_dir_entry
*)(kaddr
+ offs
);
178 ufs_error(sb
, __func__
,
179 "entry in directory #%lu spans the page boundary"
181 dir
->i_ino
, (page
->index
<<PAGE_SHIFT
)+offs
);
187 static struct page
*ufs_get_page(struct inode
*dir
, unsigned long n
)
189 struct address_space
*mapping
= dir
->i_mapping
;
190 struct page
*page
= read_mapping_page(mapping
, n
, NULL
);
193 if (unlikely(!PageChecked(page
))) {
194 if (PageError(page
) || !ufs_check_page(page
))
202 return ERR_PTR(-EIO
);
206 * Return the offset into page `page_nr' of the last valid
207 * byte in that page, plus one.
210 ufs_last_byte(struct inode
*inode
, unsigned long page_nr
)
212 unsigned last_byte
= inode
->i_size
;
214 last_byte
-= page_nr
<< PAGE_SHIFT
;
215 if (last_byte
> PAGE_SIZE
)
216 last_byte
= PAGE_SIZE
;
220 static inline struct ufs_dir_entry
*
221 ufs_next_entry(struct super_block
*sb
, struct ufs_dir_entry
*p
)
223 return (struct ufs_dir_entry
*)((char *)p
+
224 fs16_to_cpu(sb
, p
->d_reclen
));
227 struct ufs_dir_entry
*ufs_dotdot(struct inode
*dir
, struct page
**p
)
229 struct page
*page
= ufs_get_page(dir
, 0);
230 struct ufs_dir_entry
*de
= NULL
;
233 de
= ufs_next_entry(dir
->i_sb
,
234 (struct ufs_dir_entry
*)page_address(page
));
243 * finds an entry in the specified directory with the wanted name. It
244 * returns the page in which the entry was found, and the entry itself
245 * (as a parameter - res_dir). Page is returned mapped and unlocked.
246 * Entry is guaranteed to be valid.
248 struct ufs_dir_entry
*ufs_find_entry(struct inode
*dir
, const struct qstr
*qstr
,
249 struct page
**res_page
)
251 struct super_block
*sb
= dir
->i_sb
;
252 const unsigned char *name
= qstr
->name
;
253 int namelen
= qstr
->len
;
254 unsigned reclen
= UFS_DIR_REC_LEN(namelen
);
255 unsigned long start
, n
;
256 unsigned long npages
= dir_pages(dir
);
257 struct page
*page
= NULL
;
258 struct ufs_inode_info
*ui
= UFS_I(dir
);
259 struct ufs_dir_entry
*de
;
261 UFSD("ENTER, dir_ino %lu, name %s, namlen %u\n", dir
->i_ino
, name
, namelen
);
263 if (npages
== 0 || namelen
> UFS_MAXNAMLEN
)
269 start
= ui
->i_dir_start_lookup
;
276 page
= ufs_get_page(dir
, n
);
278 kaddr
= page_address(page
);
279 de
= (struct ufs_dir_entry
*) kaddr
;
280 kaddr
+= ufs_last_byte(dir
, n
) - reclen
;
281 while ((char *) de
<= kaddr
) {
282 if (ufs_match(sb
, namelen
, name
, de
))
284 de
= ufs_next_entry(sb
, de
);
290 } while (n
!= start
);
296 ui
->i_dir_start_lookup
= n
;
303 int ufs_add_link(struct dentry
*dentry
, struct inode
*inode
)
305 struct inode
*dir
= d_inode(dentry
->d_parent
);
306 const unsigned char *name
= dentry
->d_name
.name
;
307 int namelen
= dentry
->d_name
.len
;
308 struct super_block
*sb
= dir
->i_sb
;
309 unsigned reclen
= UFS_DIR_REC_LEN(namelen
);
310 const unsigned int chunk_size
= UFS_SB(sb
)->s_uspi
->s_dirblksize
;
311 unsigned short rec_len
, name_len
;
312 struct page
*page
= NULL
;
313 struct ufs_dir_entry
*de
;
314 unsigned long npages
= dir_pages(dir
);
320 UFSD("ENTER, name %s, namelen %u\n", name
, namelen
);
323 * We take care of directory expansion in the same loop.
324 * This code plays outside i_size, so it locks the page
325 * to protect that region.
327 for (n
= 0; n
<= npages
; n
++) {
330 page
= ufs_get_page(dir
, n
);
335 kaddr
= page_address(page
);
336 dir_end
= kaddr
+ ufs_last_byte(dir
, n
);
337 de
= (struct ufs_dir_entry
*)kaddr
;
338 kaddr
+= PAGE_SIZE
- reclen
;
339 while ((char *)de
<= kaddr
) {
340 if ((char *)de
== dir_end
) {
343 rec_len
= chunk_size
;
344 de
->d_reclen
= cpu_to_fs16(sb
, chunk_size
);
348 if (de
->d_reclen
== 0) {
349 ufs_error(dir
->i_sb
, __func__
,
350 "zero-length directory entry");
355 if (ufs_match(sb
, namelen
, name
, de
))
357 name_len
= UFS_DIR_REC_LEN(ufs_get_de_namlen(sb
, de
));
358 rec_len
= fs16_to_cpu(sb
, de
->d_reclen
);
359 if (!de
->d_ino
&& rec_len
>= reclen
)
361 if (rec_len
>= name_len
+ reclen
)
363 de
= (struct ufs_dir_entry
*) ((char *) de
+ rec_len
);
372 pos
= page_offset(page
) +
373 (char*)de
- (char*)page_address(page
);
374 err
= ufs_prepare_chunk(page
, pos
, rec_len
);
378 struct ufs_dir_entry
*de1
=
379 (struct ufs_dir_entry
*) ((char *) de
+ name_len
);
380 de1
->d_reclen
= cpu_to_fs16(sb
, rec_len
- name_len
);
381 de
->d_reclen
= cpu_to_fs16(sb
, name_len
);
386 ufs_set_de_namlen(sb
, de
, namelen
);
387 memcpy(de
->d_name
, name
, namelen
+ 1);
388 de
->d_ino
= cpu_to_fs32(sb
, inode
->i_ino
);
389 ufs_set_de_type(sb
, de
, inode
->i_mode
);
391 err
= ufs_commit_chunk(page
, pos
, rec_len
);
392 dir
->i_mtime
= dir
->i_ctime
= CURRENT_TIME_SEC
;
394 mark_inode_dirty(dir
);
405 static inline unsigned
406 ufs_validate_entry(struct super_block
*sb
, char *base
,
407 unsigned offset
, unsigned mask
)
409 struct ufs_dir_entry
*de
= (struct ufs_dir_entry
*)(base
+ offset
);
410 struct ufs_dir_entry
*p
= (struct ufs_dir_entry
*)(base
+ (offset
&mask
));
411 while ((char*)p
< (char*)de
)
412 p
= ufs_next_entry(sb
, p
);
413 return (char *)p
- base
;
418 * This is blatantly stolen from ext2fs
421 ufs_readdir(struct file
*file
, struct dir_context
*ctx
)
423 loff_t pos
= ctx
->pos
;
424 struct inode
*inode
= file_inode(file
);
425 struct super_block
*sb
= inode
->i_sb
;
426 unsigned int offset
= pos
& ~PAGE_MASK
;
427 unsigned long n
= pos
>> PAGE_SHIFT
;
428 unsigned long npages
= dir_pages(inode
);
429 unsigned chunk_mask
= ~(UFS_SB(sb
)->s_uspi
->s_dirblksize
- 1);
430 int need_revalidate
= file
->f_version
!= inode
->i_version
;
431 unsigned flags
= UFS_SB(sb
)->s_flags
;
435 if (pos
> inode
->i_size
- UFS_DIR_REC_LEN(1))
438 for ( ; n
< npages
; n
++, offset
= 0) {
440 struct ufs_dir_entry
*de
;
442 struct page
*page
= ufs_get_page(inode
, n
);
445 ufs_error(sb
, __func__
,
448 ctx
->pos
+= PAGE_SIZE
- offset
;
451 kaddr
= page_address(page
);
452 if (unlikely(need_revalidate
)) {
454 offset
= ufs_validate_entry(sb
, kaddr
, offset
, chunk_mask
);
455 ctx
->pos
= (n
<<PAGE_SHIFT
) + offset
;
457 file
->f_version
= inode
->i_version
;
460 de
= (struct ufs_dir_entry
*)(kaddr
+offset
);
461 limit
= kaddr
+ ufs_last_byte(inode
, n
) - UFS_DIR_REC_LEN(1);
462 for ( ;(char*)de
<= limit
; de
= ufs_next_entry(sb
, de
)) {
464 unsigned char d_type
= DT_UNKNOWN
;
466 UFSD("filldir(%s,%u)\n", de
->d_name
,
467 fs32_to_cpu(sb
, de
->d_ino
));
468 UFSD("namlen %u\n", ufs_get_de_namlen(sb
, de
));
470 if ((flags
& UFS_DE_MASK
) == UFS_DE_44BSD
)
471 d_type
= de
->d_u
.d_44
.d_type
;
473 if (!dir_emit(ctx
, de
->d_name
,
474 ufs_get_de_namlen(sb
, de
),
475 fs32_to_cpu(sb
, de
->d_ino
),
481 ctx
->pos
+= fs16_to_cpu(sb
, de
->d_reclen
);
490 * ufs_delete_entry deletes a directory entry by merging it with the
493 int ufs_delete_entry(struct inode
*inode
, struct ufs_dir_entry
*dir
,
496 struct super_block
*sb
= inode
->i_sb
;
497 char *kaddr
= page_address(page
);
498 unsigned from
= ((char*)dir
- kaddr
) & ~(UFS_SB(sb
)->s_uspi
->s_dirblksize
- 1);
499 unsigned to
= ((char*)dir
- kaddr
) + fs16_to_cpu(sb
, dir
->d_reclen
);
501 struct ufs_dir_entry
*pde
= NULL
;
502 struct ufs_dir_entry
*de
= (struct ufs_dir_entry
*) (kaddr
+ from
);
507 UFSD("ino %u, reclen %u, namlen %u, name %s\n",
508 fs32_to_cpu(sb
, de
->d_ino
),
509 fs16_to_cpu(sb
, de
->d_reclen
),
510 ufs_get_de_namlen(sb
, de
), de
->d_name
);
512 while ((char*)de
< (char*)dir
) {
513 if (de
->d_reclen
== 0) {
514 ufs_error(inode
->i_sb
, __func__
,
515 "zero-length directory entry");
520 de
= ufs_next_entry(sb
, de
);
523 from
= (char*)pde
- (char*)page_address(page
);
525 pos
= page_offset(page
) + from
;
527 err
= ufs_prepare_chunk(page
, pos
, to
- from
);
530 pde
->d_reclen
= cpu_to_fs16(sb
, to
- from
);
532 err
= ufs_commit_chunk(page
, pos
, to
- from
);
533 inode
->i_ctime
= inode
->i_mtime
= CURRENT_TIME_SEC
;
534 mark_inode_dirty(inode
);
541 int ufs_make_empty(struct inode
* inode
, struct inode
*dir
)
543 struct super_block
* sb
= dir
->i_sb
;
544 struct address_space
*mapping
= inode
->i_mapping
;
545 struct page
*page
= grab_cache_page(mapping
, 0);
546 const unsigned int chunk_size
= UFS_SB(sb
)->s_uspi
->s_dirblksize
;
547 struct ufs_dir_entry
* de
;
554 err
= ufs_prepare_chunk(page
, 0, chunk_size
);
561 base
= (char*)page_address(page
);
562 memset(base
, 0, PAGE_SIZE
);
564 de
= (struct ufs_dir_entry
*) base
;
566 de
->d_ino
= cpu_to_fs32(sb
, inode
->i_ino
);
567 ufs_set_de_type(sb
, de
, inode
->i_mode
);
568 ufs_set_de_namlen(sb
, de
, 1);
569 de
->d_reclen
= cpu_to_fs16(sb
, UFS_DIR_REC_LEN(1));
570 strcpy (de
->d_name
, ".");
571 de
= (struct ufs_dir_entry
*)
572 ((char *)de
+ fs16_to_cpu(sb
, de
->d_reclen
));
573 de
->d_ino
= cpu_to_fs32(sb
, dir
->i_ino
);
574 ufs_set_de_type(sb
, de
, dir
->i_mode
);
575 de
->d_reclen
= cpu_to_fs16(sb
, chunk_size
- UFS_DIR_REC_LEN(1));
576 ufs_set_de_namlen(sb
, de
, 2);
577 strcpy (de
->d_name
, "..");
580 err
= ufs_commit_chunk(page
, 0, chunk_size
);
587 * routine to check that the specified directory is empty (for rmdir)
589 int ufs_empty_dir(struct inode
* inode
)
591 struct super_block
*sb
= inode
->i_sb
;
592 struct page
*page
= NULL
;
593 unsigned long i
, npages
= dir_pages(inode
);
595 for (i
= 0; i
< npages
; i
++) {
597 struct ufs_dir_entry
*de
;
598 page
= ufs_get_page(inode
, i
);
603 kaddr
= page_address(page
);
604 de
= (struct ufs_dir_entry
*)kaddr
;
605 kaddr
+= ufs_last_byte(inode
, i
) - UFS_DIR_REC_LEN(1);
607 while ((char *)de
<= kaddr
) {
608 if (de
->d_reclen
== 0) {
609 ufs_error(inode
->i_sb
, __func__
,
610 "zero-length directory entry: "
611 "kaddr=%p, de=%p\n", kaddr
, de
);
615 u16 namelen
=ufs_get_de_namlen(sb
, de
);
616 /* check for . and .. */
617 if (de
->d_name
[0] != '.')
623 fs32_to_cpu(sb
, de
->d_ino
))
625 } else if (de
->d_name
[1] != '.')
628 de
= ufs_next_entry(sb
, de
);
639 const struct file_operations ufs_dir_operations
= {
640 .read
= generic_read_dir
,
641 .iterate_shared
= ufs_readdir
,
642 .fsync
= generic_file_fsync
,
643 .llseek
= generic_file_llseek
,