net: rose: restore old recvmsg behavior
[linux/fpc-iii.git] / fs / jfs / jfs_dtree.c
blob9f7c758761de3ced6ce9828773a2a74cc86a9824
1 /*
2 * Copyright (C) International Business Machines Corp., 2000-2004
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation; either version 2 of the License, or
7 * (at your option) any later version.
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See
12 * the GNU General Public License for more details.
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write to the Free Software
16 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
20 * jfs_dtree.c: directory B+-tree manager
22 * B+-tree with variable length key directory:
24 * each directory page is structured as an array of 32-byte
25 * directory entry slots initialized as a freelist
26 * to avoid search/compaction of free space at insertion.
27 * when an entry is inserted, a number of slots are allocated
28 * from the freelist as required to store variable length data
29 * of the entry; when the entry is deleted, slots of the entry
30 * are returned to freelist.
32 * leaf entry stores full name as key and file serial number
33 * (aka inode number) as data.
34 * internal/router entry stores sufffix compressed name
35 * as key and simple extent descriptor as data.
37 * each directory page maintains a sorted entry index table
38 * which stores the start slot index of sorted entries
39 * to allow binary search on the table.
41 * directory starts as a root/leaf page in on-disk inode
42 * inline data area.
43 * when it becomes full, it starts a leaf of a external extent
44 * of length of 1 block. each time the first leaf becomes full,
45 * it is extended rather than split (its size is doubled),
46 * until its length becoms 4 KBytes, from then the extent is split
47 * with new 4 Kbyte extent when it becomes full
48 * to reduce external fragmentation of small directories.
50 * blah, blah, blah, for linear scan of directory in pieces by
51 * readdir().
54 * case-insensitive directory file system
56 * names are stored in case-sensitive way in leaf entry.
57 * but stored, searched and compared in case-insensitive (uppercase) order
58 * (i.e., both search key and entry key are folded for search/compare):
59 * (note that case-sensitive order is BROKEN in storage, e.g.,
60 * sensitive: Ad, aB, aC, aD -> insensitive: aB, aC, aD, Ad
62 * entries which folds to the same key makes up a equivalent class
63 * whose members are stored as contiguous cluster (may cross page boundary)
64 * but whose order is arbitrary and acts as duplicate, e.g.,
65 * abc, Abc, aBc, abC)
67 * once match is found at leaf, requires scan forward/backward
68 * either for, in case-insensitive search, duplicate
69 * or for, in case-sensitive search, for exact match
71 * router entry must be created/stored in case-insensitive way
72 * in internal entry:
73 * (right most key of left page and left most key of right page
74 * are folded, and its suffix compression is propagated as router
75 * key in parent)
76 * (e.g., if split occurs <abc> and <aBd>, <ABD> trather than <aB>
77 * should be made the router key for the split)
79 * case-insensitive search:
81 * fold search key;
83 * case-insensitive search of B-tree:
84 * for internal entry, router key is already folded;
85 * for leaf entry, fold the entry key before comparison.
87 * if (leaf entry case-insensitive match found)
88 * if (next entry satisfies case-insensitive match)
89 * return EDUPLICATE;
90 * if (prev entry satisfies case-insensitive match)
91 * return EDUPLICATE;
92 * return match;
93 * else
94 * return no match;
96 * serialization:
97 * target directory inode lock is being held on entry/exit
98 * of all main directory service routines.
100 * log based recovery:
103 #include <linux/fs.h>
104 #include <linux/quotaops.h>
105 #include <linux/slab.h>
106 #include "jfs_incore.h"
107 #include "jfs_superblock.h"
108 #include "jfs_filsys.h"
109 #include "jfs_metapage.h"
110 #include "jfs_dmap.h"
111 #include "jfs_unicode.h"
112 #include "jfs_debug.h"
114 /* dtree split parameter */
115 struct dtsplit {
116 struct metapage *mp;
117 s16 index;
118 s16 nslot;
119 struct component_name *key;
120 ddata_t *data;
121 struct pxdlist *pxdlist;
124 #define DT_PAGE(IP, MP) BT_PAGE(IP, MP, dtpage_t, i_dtroot)
126 /* get page buffer for specified block address */
127 #define DT_GETPAGE(IP, BN, MP, SIZE, P, RC)\
129 BT_GETPAGE(IP, BN, MP, dtpage_t, SIZE, P, RC, i_dtroot)\
130 if (!(RC))\
132 if (((P)->header.nextindex > (((BN)==0)?DTROOTMAXSLOT:(P)->header.maxslot)) ||\
133 ((BN) && ((P)->header.maxslot > DTPAGEMAXSLOT)))\
135 BT_PUTPAGE(MP);\
136 jfs_error((IP)->i_sb, "DT_GETPAGE: dtree page corrupt");\
137 MP = NULL;\
138 RC = -EIO;\
143 /* for consistency */
144 #define DT_PUTPAGE(MP) BT_PUTPAGE(MP)
146 #define DT_GETSEARCH(IP, LEAF, BN, MP, P, INDEX) \
147 BT_GETSEARCH(IP, LEAF, BN, MP, dtpage_t, P, INDEX, i_dtroot)
150 * forward references
152 static int dtSplitUp(tid_t tid, struct inode *ip,
153 struct dtsplit * split, struct btstack * btstack);
155 static int dtSplitPage(tid_t tid, struct inode *ip, struct dtsplit * split,
156 struct metapage ** rmpp, dtpage_t ** rpp, pxd_t * rxdp);
158 static int dtExtendPage(tid_t tid, struct inode *ip,
159 struct dtsplit * split, struct btstack * btstack);
161 static int dtSplitRoot(tid_t tid, struct inode *ip,
162 struct dtsplit * split, struct metapage ** rmpp);
164 static int dtDeleteUp(tid_t tid, struct inode *ip, struct metapage * fmp,
165 dtpage_t * fp, struct btstack * btstack);
167 static int dtRelink(tid_t tid, struct inode *ip, dtpage_t * p);
169 static int dtReadFirst(struct inode *ip, struct btstack * btstack);
171 static int dtReadNext(struct inode *ip,
172 loff_t * offset, struct btstack * btstack);
174 static int dtCompare(struct component_name * key, dtpage_t * p, int si);
176 static int ciCompare(struct component_name * key, dtpage_t * p, int si,
177 int flag);
179 static void dtGetKey(dtpage_t * p, int i, struct component_name * key,
180 int flag);
182 static int ciGetLeafPrefixKey(dtpage_t * lp, int li, dtpage_t * rp,
183 int ri, struct component_name * key, int flag);
185 static void dtInsertEntry(dtpage_t * p, int index, struct component_name * key,
186 ddata_t * data, struct dt_lock **);
188 static void dtMoveEntry(dtpage_t * sp, int si, dtpage_t * dp,
189 struct dt_lock ** sdtlock, struct dt_lock ** ddtlock,
190 int do_index);
192 static void dtDeleteEntry(dtpage_t * p, int fi, struct dt_lock ** dtlock);
194 static void dtTruncateEntry(dtpage_t * p, int ti, struct dt_lock ** dtlock);
196 static void dtLinelockFreelist(dtpage_t * p, int m, struct dt_lock ** dtlock);
198 #define ciToUpper(c) UniStrupr((c)->name)
201 * read_index_page()
203 * Reads a page of a directory's index table.
204 * Having metadata mapped into the directory inode's address space
205 * presents a multitude of problems. We avoid this by mapping to
206 * the absolute address space outside of the *_metapage routines
208 static struct metapage *read_index_page(struct inode *inode, s64 blkno)
210 int rc;
211 s64 xaddr;
212 int xflag;
213 s32 xlen;
215 rc = xtLookup(inode, blkno, 1, &xflag, &xaddr, &xlen, 1);
216 if (rc || (xaddr == 0))
217 return NULL;
219 return read_metapage(inode, xaddr, PSIZE, 1);
223 * get_index_page()
225 * Same as get_index_page(), but get's a new page without reading
227 static struct metapage *get_index_page(struct inode *inode, s64 blkno)
229 int rc;
230 s64 xaddr;
231 int xflag;
232 s32 xlen;
234 rc = xtLookup(inode, blkno, 1, &xflag, &xaddr, &xlen, 1);
235 if (rc || (xaddr == 0))
236 return NULL;
238 return get_metapage(inode, xaddr, PSIZE, 1);
242 * find_index()
244 * Returns dtree page containing directory table entry for specified
245 * index and pointer to its entry.
247 * mp must be released by caller.
249 static struct dir_table_slot *find_index(struct inode *ip, u32 index,
250 struct metapage ** mp, s64 *lblock)
252 struct jfs_inode_info *jfs_ip = JFS_IP(ip);
253 s64 blkno;
254 s64 offset;
255 int page_offset;
256 struct dir_table_slot *slot;
257 static int maxWarnings = 10;
259 if (index < 2) {
260 if (maxWarnings) {
261 jfs_warn("find_entry called with index = %d", index);
262 maxWarnings--;
264 return NULL;
267 if (index >= jfs_ip->next_index) {
268 jfs_warn("find_entry called with index >= next_index");
269 return NULL;
272 if (jfs_dirtable_inline(ip)) {
274 * Inline directory table
276 *mp = NULL;
277 slot = &jfs_ip->i_dirtable[index - 2];
278 } else {
279 offset = (index - 2) * sizeof(struct dir_table_slot);
280 page_offset = offset & (PSIZE - 1);
281 blkno = ((offset + 1) >> L2PSIZE) <<
282 JFS_SBI(ip->i_sb)->l2nbperpage;
284 if (*mp && (*lblock != blkno)) {
285 release_metapage(*mp);
286 *mp = NULL;
288 if (!(*mp)) {
289 *lblock = blkno;
290 *mp = read_index_page(ip, blkno);
292 if (!(*mp)) {
293 jfs_err("free_index: error reading directory table");
294 return NULL;
297 slot =
298 (struct dir_table_slot *) ((char *) (*mp)->data +
299 page_offset);
301 return slot;
304 static inline void lock_index(tid_t tid, struct inode *ip, struct metapage * mp,
305 u32 index)
307 struct tlock *tlck;
308 struct linelock *llck;
309 struct lv *lv;
311 tlck = txLock(tid, ip, mp, tlckDATA);
312 llck = (struct linelock *) tlck->lock;
314 if (llck->index >= llck->maxcnt)
315 llck = txLinelock(llck);
316 lv = &llck->lv[llck->index];
319 * Linelock slot size is twice the size of directory table
320 * slot size. 512 entries per page.
322 lv->offset = ((index - 2) & 511) >> 1;
323 lv->length = 1;
324 llck->index++;
328 * add_index()
330 * Adds an entry to the directory index table. This is used to provide
331 * each directory entry with a persistent index in which to resume
332 * directory traversals
334 static u32 add_index(tid_t tid, struct inode *ip, s64 bn, int slot)
336 struct super_block *sb = ip->i_sb;
337 struct jfs_sb_info *sbi = JFS_SBI(sb);
338 struct jfs_inode_info *jfs_ip = JFS_IP(ip);
339 u64 blkno;
340 struct dir_table_slot *dirtab_slot;
341 u32 index;
342 struct linelock *llck;
343 struct lv *lv;
344 struct metapage *mp;
345 s64 offset;
346 uint page_offset;
347 struct tlock *tlck;
348 s64 xaddr;
350 ASSERT(DO_INDEX(ip));
352 if (jfs_ip->next_index < 2) {
353 jfs_warn("add_index: next_index = %d. Resetting!",
354 jfs_ip->next_index);
355 jfs_ip->next_index = 2;
358 index = jfs_ip->next_index++;
360 if (index <= MAX_INLINE_DIRTABLE_ENTRY) {
362 * i_size reflects size of index table, or 8 bytes per entry.
364 ip->i_size = (loff_t) (index - 1) << 3;
367 * dir table fits inline within inode
369 dirtab_slot = &jfs_ip->i_dirtable[index-2];
370 dirtab_slot->flag = DIR_INDEX_VALID;
371 dirtab_slot->slot = slot;
372 DTSaddress(dirtab_slot, bn);
374 set_cflag(COMMIT_Dirtable, ip);
376 return index;
378 if (index == (MAX_INLINE_DIRTABLE_ENTRY + 1)) {
379 struct dir_table_slot temp_table[12];
382 * It's time to move the inline table to an external
383 * page and begin to build the xtree
385 if (dquot_alloc_block(ip, sbi->nbperpage))
386 goto clean_up;
387 if (dbAlloc(ip, 0, sbi->nbperpage, &xaddr)) {
388 dquot_free_block(ip, sbi->nbperpage);
389 goto clean_up;
393 * Save the table, we're going to overwrite it with the
394 * xtree root
396 memcpy(temp_table, &jfs_ip->i_dirtable, sizeof(temp_table));
399 * Initialize empty x-tree
401 xtInitRoot(tid, ip);
404 * Add the first block to the xtree
406 if (xtInsert(tid, ip, 0, 0, sbi->nbperpage, &xaddr, 0)) {
407 /* This really shouldn't fail */
408 jfs_warn("add_index: xtInsert failed!");
409 memcpy(&jfs_ip->i_dirtable, temp_table,
410 sizeof (temp_table));
411 dbFree(ip, xaddr, sbi->nbperpage);
412 dquot_free_block(ip, sbi->nbperpage);
413 goto clean_up;
415 ip->i_size = PSIZE;
417 mp = get_index_page(ip, 0);
418 if (!mp) {
419 jfs_err("add_index: get_metapage failed!");
420 xtTruncate(tid, ip, 0, COMMIT_PWMAP);
421 memcpy(&jfs_ip->i_dirtable, temp_table,
422 sizeof (temp_table));
423 goto clean_up;
425 tlck = txLock(tid, ip, mp, tlckDATA);
426 llck = (struct linelock *) & tlck->lock;
427 ASSERT(llck->index == 0);
428 lv = &llck->lv[0];
430 lv->offset = 0;
431 lv->length = 6; /* tlckDATA slot size is 16 bytes */
432 llck->index++;
434 memcpy(mp->data, temp_table, sizeof(temp_table));
436 mark_metapage_dirty(mp);
437 release_metapage(mp);
440 * Logging is now directed by xtree tlocks
442 clear_cflag(COMMIT_Dirtable, ip);
445 offset = (index - 2) * sizeof(struct dir_table_slot);
446 page_offset = offset & (PSIZE - 1);
447 blkno = ((offset + 1) >> L2PSIZE) << sbi->l2nbperpage;
448 if (page_offset == 0) {
450 * This will be the beginning of a new page
452 xaddr = 0;
453 if (xtInsert(tid, ip, 0, blkno, sbi->nbperpage, &xaddr, 0)) {
454 jfs_warn("add_index: xtInsert failed!");
455 goto clean_up;
457 ip->i_size += PSIZE;
459 if ((mp = get_index_page(ip, blkno)))
460 memset(mp->data, 0, PSIZE); /* Just looks better */
461 else
462 xtTruncate(tid, ip, offset, COMMIT_PWMAP);
463 } else
464 mp = read_index_page(ip, blkno);
466 if (!mp) {
467 jfs_err("add_index: get/read_metapage failed!");
468 goto clean_up;
471 lock_index(tid, ip, mp, index);
473 dirtab_slot =
474 (struct dir_table_slot *) ((char *) mp->data + page_offset);
475 dirtab_slot->flag = DIR_INDEX_VALID;
476 dirtab_slot->slot = slot;
477 DTSaddress(dirtab_slot, bn);
479 mark_metapage_dirty(mp);
480 release_metapage(mp);
482 return index;
484 clean_up:
486 jfs_ip->next_index--;
488 return 0;
492 * free_index()
494 * Marks an entry to the directory index table as free.
496 static void free_index(tid_t tid, struct inode *ip, u32 index, u32 next)
498 struct dir_table_slot *dirtab_slot;
499 s64 lblock;
500 struct metapage *mp = NULL;
502 dirtab_slot = find_index(ip, index, &mp, &lblock);
504 if (!dirtab_slot)
505 return;
507 dirtab_slot->flag = DIR_INDEX_FREE;
508 dirtab_slot->slot = dirtab_slot->addr1 = 0;
509 dirtab_slot->addr2 = cpu_to_le32(next);
511 if (mp) {
512 lock_index(tid, ip, mp, index);
513 mark_metapage_dirty(mp);
514 release_metapage(mp);
515 } else
516 set_cflag(COMMIT_Dirtable, ip);
520 * modify_index()
522 * Changes an entry in the directory index table
524 static void modify_index(tid_t tid, struct inode *ip, u32 index, s64 bn,
525 int slot, struct metapage ** mp, s64 *lblock)
527 struct dir_table_slot *dirtab_slot;
529 dirtab_slot = find_index(ip, index, mp, lblock);
531 if (!dirtab_slot)
532 return;
534 DTSaddress(dirtab_slot, bn);
535 dirtab_slot->slot = slot;
537 if (*mp) {
538 lock_index(tid, ip, *mp, index);
539 mark_metapage_dirty(*mp);
540 } else
541 set_cflag(COMMIT_Dirtable, ip);
545 * read_index()
547 * reads a directory table slot
549 static int read_index(struct inode *ip, u32 index,
550 struct dir_table_slot * dirtab_slot)
552 s64 lblock;
553 struct metapage *mp = NULL;
554 struct dir_table_slot *slot;
556 slot = find_index(ip, index, &mp, &lblock);
557 if (!slot) {
558 return -EIO;
561 memcpy(dirtab_slot, slot, sizeof(struct dir_table_slot));
563 if (mp)
564 release_metapage(mp);
566 return 0;
570 * dtSearch()
572 * function:
573 * Search for the entry with specified key
575 * parameter:
577 * return: 0 - search result on stack, leaf page pinned;
578 * errno - I/O error
580 int dtSearch(struct inode *ip, struct component_name * key, ino_t * data,
581 struct btstack * btstack, int flag)
583 int rc = 0;
584 int cmp = 1; /* init for empty page */
585 s64 bn;
586 struct metapage *mp;
587 dtpage_t *p;
588 s8 *stbl;
589 int base, index, lim;
590 struct btframe *btsp;
591 pxd_t *pxd;
592 int psize = 288; /* initial in-line directory */
593 ino_t inumber;
594 struct component_name ciKey;
595 struct super_block *sb = ip->i_sb;
597 ciKey.name = kmalloc((JFS_NAME_MAX + 1) * sizeof(wchar_t), GFP_NOFS);
598 if (!ciKey.name) {
599 rc = -ENOMEM;
600 goto dtSearch_Exit2;
604 /* uppercase search key for c-i directory */
605 UniStrcpy(ciKey.name, key->name);
606 ciKey.namlen = key->namlen;
608 /* only uppercase if case-insensitive support is on */
609 if ((JFS_SBI(sb)->mntflag & JFS_OS2) == JFS_OS2) {
610 ciToUpper(&ciKey);
612 BT_CLR(btstack); /* reset stack */
614 /* init level count for max pages to split */
615 btstack->nsplit = 1;
618 * search down tree from root:
620 * between two consecutive entries of <Ki, Pi> and <Kj, Pj> of
621 * internal page, child page Pi contains entry with k, Ki <= K < Kj.
623 * if entry with search key K is not found
624 * internal page search find the entry with largest key Ki
625 * less than K which point to the child page to search;
626 * leaf page search find the entry with smallest key Kj
627 * greater than K so that the returned index is the position of
628 * the entry to be shifted right for insertion of new entry.
629 * for empty tree, search key is greater than any key of the tree.
631 * by convention, root bn = 0.
633 for (bn = 0;;) {
634 /* get/pin the page to search */
635 DT_GETPAGE(ip, bn, mp, psize, p, rc);
636 if (rc)
637 goto dtSearch_Exit1;
639 /* get sorted entry table of the page */
640 stbl = DT_GETSTBL(p);
643 * binary search with search key K on the current page.
645 for (base = 0, lim = p->header.nextindex; lim; lim >>= 1) {
646 index = base + (lim >> 1);
648 if (p->header.flag & BT_LEAF) {
649 /* uppercase leaf name to compare */
650 cmp =
651 ciCompare(&ciKey, p, stbl[index],
652 JFS_SBI(sb)->mntflag);
653 } else {
654 /* router key is in uppercase */
656 cmp = dtCompare(&ciKey, p, stbl[index]);
660 if (cmp == 0) {
662 * search hit
664 /* search hit - leaf page:
665 * return the entry found
667 if (p->header.flag & BT_LEAF) {
668 inumber = le32_to_cpu(
669 ((struct ldtentry *) & p->slot[stbl[index]])->inumber);
672 * search for JFS_LOOKUP
674 if (flag == JFS_LOOKUP) {
675 *data = inumber;
676 rc = 0;
677 goto out;
681 * search for JFS_CREATE
683 if (flag == JFS_CREATE) {
684 *data = inumber;
685 rc = -EEXIST;
686 goto out;
690 * search for JFS_REMOVE or JFS_RENAME
692 if ((flag == JFS_REMOVE ||
693 flag == JFS_RENAME) &&
694 *data != inumber) {
695 rc = -ESTALE;
696 goto out;
700 * JFS_REMOVE|JFS_FINDDIR|JFS_RENAME
702 /* save search result */
703 *data = inumber;
704 btsp = btstack->top;
705 btsp->bn = bn;
706 btsp->index = index;
707 btsp->mp = mp;
709 rc = 0;
710 goto dtSearch_Exit1;
713 /* search hit - internal page:
714 * descend/search its child page
716 goto getChild;
719 if (cmp > 0) {
720 base = index + 1;
721 --lim;
726 * search miss
728 * base is the smallest index with key (Kj) greater than
729 * search key (K) and may be zero or (maxindex + 1) index.
732 * search miss - leaf page
734 * return location of entry (base) where new entry with
735 * search key K is to be inserted.
737 if (p->header.flag & BT_LEAF) {
739 * search for JFS_LOOKUP, JFS_REMOVE, or JFS_RENAME
741 if (flag == JFS_LOOKUP || flag == JFS_REMOVE ||
742 flag == JFS_RENAME) {
743 rc = -ENOENT;
744 goto out;
748 * search for JFS_CREATE|JFS_FINDDIR:
750 * save search result
752 *data = 0;
753 btsp = btstack->top;
754 btsp->bn = bn;
755 btsp->index = base;
756 btsp->mp = mp;
758 rc = 0;
759 goto dtSearch_Exit1;
763 * search miss - internal page
765 * if base is non-zero, decrement base by one to get the parent
766 * entry of the child page to search.
768 index = base ? base - 1 : base;
771 * go down to child page
773 getChild:
774 /* update max. number of pages to split */
775 if (BT_STACK_FULL(btstack)) {
776 /* Something's corrupted, mark filesystem dirty so
777 * chkdsk will fix it.
779 jfs_error(sb, "stack overrun in dtSearch!");
780 BT_STACK_DUMP(btstack);
781 rc = -EIO;
782 goto out;
784 btstack->nsplit++;
786 /* push (bn, index) of the parent page/entry */
787 BT_PUSH(btstack, bn, index);
789 /* get the child page block number */
790 pxd = (pxd_t *) & p->slot[stbl[index]];
791 bn = addressPXD(pxd);
792 psize = lengthPXD(pxd) << JFS_SBI(ip->i_sb)->l2bsize;
794 /* unpin the parent page */
795 DT_PUTPAGE(mp);
798 out:
799 DT_PUTPAGE(mp);
801 dtSearch_Exit1:
803 kfree(ciKey.name);
805 dtSearch_Exit2:
807 return rc;
812 * dtInsert()
814 * function: insert an entry to directory tree
816 * parameter:
818 * return: 0 - success;
819 * errno - failure;
821 int dtInsert(tid_t tid, struct inode *ip,
822 struct component_name * name, ino_t * fsn, struct btstack * btstack)
824 int rc = 0;
825 struct metapage *mp; /* meta-page buffer */
826 dtpage_t *p; /* base B+-tree index page */
827 s64 bn;
828 int index;
829 struct dtsplit split; /* split information */
830 ddata_t data;
831 struct dt_lock *dtlck;
832 int n;
833 struct tlock *tlck;
834 struct lv *lv;
837 * retrieve search result
839 * dtSearch() returns (leaf page pinned, index at which to insert).
840 * n.b. dtSearch() may return index of (maxindex + 1) of
841 * the full page.
843 DT_GETSEARCH(ip, btstack->top, bn, mp, p, index);
846 * insert entry for new key
848 if (DO_INDEX(ip)) {
849 if (JFS_IP(ip)->next_index == DIREND) {
850 DT_PUTPAGE(mp);
851 return -EMLINK;
853 n = NDTLEAF(name->namlen);
854 data.leaf.tid = tid;
855 data.leaf.ip = ip;
856 } else {
857 n = NDTLEAF_LEGACY(name->namlen);
858 data.leaf.ip = NULL; /* signifies legacy directory format */
860 data.leaf.ino = *fsn;
863 * leaf page does not have enough room for new entry:
865 * extend/split the leaf page;
867 * dtSplitUp() will insert the entry and unpin the leaf page.
869 if (n > p->header.freecnt) {
870 split.mp = mp;
871 split.index = index;
872 split.nslot = n;
873 split.key = name;
874 split.data = &data;
875 rc = dtSplitUp(tid, ip, &split, btstack);
876 return rc;
880 * leaf page does have enough room for new entry:
882 * insert the new data entry into the leaf page;
884 BT_MARK_DIRTY(mp, ip);
886 * acquire a transaction lock on the leaf page
888 tlck = txLock(tid, ip, mp, tlckDTREE | tlckENTRY);
889 dtlck = (struct dt_lock *) & tlck->lock;
890 ASSERT(dtlck->index == 0);
891 lv = & dtlck->lv[0];
893 /* linelock header */
894 lv->offset = 0;
895 lv->length = 1;
896 dtlck->index++;
898 dtInsertEntry(p, index, name, &data, &dtlck);
900 /* linelock stbl of non-root leaf page */
901 if (!(p->header.flag & BT_ROOT)) {
902 if (dtlck->index >= dtlck->maxcnt)
903 dtlck = (struct dt_lock *) txLinelock(dtlck);
904 lv = & dtlck->lv[dtlck->index];
905 n = index >> L2DTSLOTSIZE;
906 lv->offset = p->header.stblindex + n;
907 lv->length =
908 ((p->header.nextindex - 1) >> L2DTSLOTSIZE) - n + 1;
909 dtlck->index++;
912 /* unpin the leaf page */
913 DT_PUTPAGE(mp);
915 return 0;
920 * dtSplitUp()
922 * function: propagate insertion bottom up;
924 * parameter:
926 * return: 0 - success;
927 * errno - failure;
928 * leaf page unpinned;
930 static int dtSplitUp(tid_t tid,
931 struct inode *ip, struct dtsplit * split, struct btstack * btstack)
933 struct jfs_sb_info *sbi = JFS_SBI(ip->i_sb);
934 int rc = 0;
935 struct metapage *smp;
936 dtpage_t *sp; /* split page */
937 struct metapage *rmp;
938 dtpage_t *rp; /* new right page split from sp */
939 pxd_t rpxd; /* new right page extent descriptor */
940 struct metapage *lmp;
941 dtpage_t *lp; /* left child page */
942 int skip; /* index of entry of insertion */
943 struct btframe *parent; /* parent page entry on traverse stack */
944 s64 xaddr, nxaddr;
945 int xlen, xsize;
946 struct pxdlist pxdlist;
947 pxd_t *pxd;
948 struct component_name key = { 0, NULL };
949 ddata_t *data = split->data;
950 int n;
951 struct dt_lock *dtlck;
952 struct tlock *tlck;
953 struct lv *lv;
954 int quota_allocation = 0;
956 /* get split page */
957 smp = split->mp;
958 sp = DT_PAGE(ip, smp);
960 key.name = kmalloc((JFS_NAME_MAX + 2) * sizeof(wchar_t), GFP_NOFS);
961 if (!key.name) {
962 DT_PUTPAGE(smp);
963 rc = -ENOMEM;
964 goto dtSplitUp_Exit;
968 * split leaf page
970 * The split routines insert the new entry, and
971 * acquire txLock as appropriate.
974 * split root leaf page:
976 if (sp->header.flag & BT_ROOT) {
978 * allocate a single extent child page
980 xlen = 1;
981 n = sbi->bsize >> L2DTSLOTSIZE;
982 n -= (n + 31) >> L2DTSLOTSIZE; /* stbl size */
983 n -= DTROOTMAXSLOT - sp->header.freecnt; /* header + entries */
984 if (n <= split->nslot)
985 xlen++;
986 if ((rc = dbAlloc(ip, 0, (s64) xlen, &xaddr))) {
987 DT_PUTPAGE(smp);
988 goto freeKeyName;
991 pxdlist.maxnpxd = 1;
992 pxdlist.npxd = 0;
993 pxd = &pxdlist.pxd[0];
994 PXDaddress(pxd, xaddr);
995 PXDlength(pxd, xlen);
996 split->pxdlist = &pxdlist;
997 rc = dtSplitRoot(tid, ip, split, &rmp);
999 if (rc)
1000 dbFree(ip, xaddr, xlen);
1001 else
1002 DT_PUTPAGE(rmp);
1004 DT_PUTPAGE(smp);
1006 if (!DO_INDEX(ip))
1007 ip->i_size = xlen << sbi->l2bsize;
1009 goto freeKeyName;
1013 * extend first leaf page
1015 * extend the 1st extent if less than buffer page size
1016 * (dtExtendPage() reurns leaf page unpinned)
1018 pxd = &sp->header.self;
1019 xlen = lengthPXD(pxd);
1020 xsize = xlen << sbi->l2bsize;
1021 if (xsize < PSIZE) {
1022 xaddr = addressPXD(pxd);
1023 n = xsize >> L2DTSLOTSIZE;
1024 n -= (n + 31) >> L2DTSLOTSIZE; /* stbl size */
1025 if ((n + sp->header.freecnt) <= split->nslot)
1026 n = xlen + (xlen << 1);
1027 else
1028 n = xlen;
1030 /* Allocate blocks to quota. */
1031 rc = dquot_alloc_block(ip, n);
1032 if (rc)
1033 goto extendOut;
1034 quota_allocation += n;
1036 if ((rc = dbReAlloc(sbi->ipbmap, xaddr, (s64) xlen,
1037 (s64) n, &nxaddr)))
1038 goto extendOut;
1040 pxdlist.maxnpxd = 1;
1041 pxdlist.npxd = 0;
1042 pxd = &pxdlist.pxd[0];
1043 PXDaddress(pxd, nxaddr)
1044 PXDlength(pxd, xlen + n);
1045 split->pxdlist = &pxdlist;
1046 if ((rc = dtExtendPage(tid, ip, split, btstack))) {
1047 nxaddr = addressPXD(pxd);
1048 if (xaddr != nxaddr) {
1049 /* free relocated extent */
1050 xlen = lengthPXD(pxd);
1051 dbFree(ip, nxaddr, (s64) xlen);
1052 } else {
1053 /* free extended delta */
1054 xlen = lengthPXD(pxd) - n;
1055 xaddr = addressPXD(pxd) + xlen;
1056 dbFree(ip, xaddr, (s64) n);
1058 } else if (!DO_INDEX(ip))
1059 ip->i_size = lengthPXD(pxd) << sbi->l2bsize;
1062 extendOut:
1063 DT_PUTPAGE(smp);
1064 goto freeKeyName;
1068 * split leaf page <sp> into <sp> and a new right page <rp>.
1070 * return <rp> pinned and its extent descriptor <rpxd>
1073 * allocate new directory page extent and
1074 * new index page(s) to cover page split(s)
1076 * allocation hint: ?
1078 n = btstack->nsplit;
1079 pxdlist.maxnpxd = pxdlist.npxd = 0;
1080 xlen = sbi->nbperpage;
1081 for (pxd = pxdlist.pxd; n > 0; n--, pxd++) {
1082 if ((rc = dbAlloc(ip, 0, (s64) xlen, &xaddr)) == 0) {
1083 PXDaddress(pxd, xaddr);
1084 PXDlength(pxd, xlen);
1085 pxdlist.maxnpxd++;
1086 continue;
1089 DT_PUTPAGE(smp);
1091 /* undo allocation */
1092 goto splitOut;
1095 split->pxdlist = &pxdlist;
1096 if ((rc = dtSplitPage(tid, ip, split, &rmp, &rp, &rpxd))) {
1097 DT_PUTPAGE(smp);
1099 /* undo allocation */
1100 goto splitOut;
1103 if (!DO_INDEX(ip))
1104 ip->i_size += PSIZE;
1107 * propagate up the router entry for the leaf page just split
1109 * insert a router entry for the new page into the parent page,
1110 * propagate the insert/split up the tree by walking back the stack
1111 * of (bn of parent page, index of child page entry in parent page)
1112 * that were traversed during the search for the page that split.
1114 * the propagation of insert/split up the tree stops if the root
1115 * splits or the page inserted into doesn't have to split to hold
1116 * the new entry.
1118 * the parent entry for the split page remains the same, and
1119 * a new entry is inserted at its right with the first key and
1120 * block number of the new right page.
1122 * There are a maximum of 4 pages pinned at any time:
1123 * two children, left parent and right parent (when the parent splits).
1124 * keep the child pages pinned while working on the parent.
1125 * make sure that all pins are released at exit.
1127 while ((parent = BT_POP(btstack)) != NULL) {
1128 /* parent page specified by stack frame <parent> */
1130 /* keep current child pages (<lp>, <rp>) pinned */
1131 lmp = smp;
1132 lp = sp;
1135 * insert router entry in parent for new right child page <rp>
1137 /* get the parent page <sp> */
1138 DT_GETPAGE(ip, parent->bn, smp, PSIZE, sp, rc);
1139 if (rc) {
1140 DT_PUTPAGE(lmp);
1141 DT_PUTPAGE(rmp);
1142 goto splitOut;
1146 * The new key entry goes ONE AFTER the index of parent entry,
1147 * because the split was to the right.
1149 skip = parent->index + 1;
1152 * compute the key for the router entry
1154 * key suffix compression:
1155 * for internal pages that have leaf pages as children,
1156 * retain only what's needed to distinguish between
1157 * the new entry and the entry on the page to its left.
1158 * If the keys compare equal, retain the entire key.
1160 * note that compression is performed only at computing
1161 * router key at the lowest internal level.
1162 * further compression of the key between pairs of higher
1163 * level internal pages loses too much information and
1164 * the search may fail.
1165 * (e.g., two adjacent leaf pages of {a, ..., x} {xx, ...,}
1166 * results in two adjacent parent entries (a)(xx).
1167 * if split occurs between these two entries, and
1168 * if compression is applied, the router key of parent entry
1169 * of right page (x) will divert search for x into right
1170 * subtree and miss x in the left subtree.)
1172 * the entire key must be retained for the next-to-leftmost
1173 * internal key at any level of the tree, or search may fail
1174 * (e.g., ?)
1176 switch (rp->header.flag & BT_TYPE) {
1177 case BT_LEAF:
1179 * compute the length of prefix for suffix compression
1180 * between last entry of left page and first entry
1181 * of right page
1183 if ((sp->header.flag & BT_ROOT && skip > 1) ||
1184 sp->header.prev != 0 || skip > 1) {
1185 /* compute uppercase router prefix key */
1186 rc = ciGetLeafPrefixKey(lp,
1187 lp->header.nextindex-1,
1188 rp, 0, &key,
1189 sbi->mntflag);
1190 if (rc) {
1191 DT_PUTPAGE(lmp);
1192 DT_PUTPAGE(rmp);
1193 DT_PUTPAGE(smp);
1194 goto splitOut;
1196 } else {
1197 /* next to leftmost entry of
1198 lowest internal level */
1200 /* compute uppercase router key */
1201 dtGetKey(rp, 0, &key, sbi->mntflag);
1202 key.name[key.namlen] = 0;
1204 if ((sbi->mntflag & JFS_OS2) == JFS_OS2)
1205 ciToUpper(&key);
1208 n = NDTINTERNAL(key.namlen);
1209 break;
1211 case BT_INTERNAL:
1212 dtGetKey(rp, 0, &key, sbi->mntflag);
1213 n = NDTINTERNAL(key.namlen);
1214 break;
1216 default:
1217 jfs_err("dtSplitUp(): UFO!");
1218 break;
1221 /* unpin left child page */
1222 DT_PUTPAGE(lmp);
1225 * compute the data for the router entry
1227 data->xd = rpxd; /* child page xd */
1230 * parent page is full - split the parent page
1232 if (n > sp->header.freecnt) {
1233 /* init for parent page split */
1234 split->mp = smp;
1235 split->index = skip; /* index at insert */
1236 split->nslot = n;
1237 split->key = &key;
1238 /* split->data = data; */
1240 /* unpin right child page */
1241 DT_PUTPAGE(rmp);
1243 /* The split routines insert the new entry,
1244 * acquire txLock as appropriate.
1245 * return <rp> pinned and its block number <rbn>.
1247 rc = (sp->header.flag & BT_ROOT) ?
1248 dtSplitRoot(tid, ip, split, &rmp) :
1249 dtSplitPage(tid, ip, split, &rmp, &rp, &rpxd);
1250 if (rc) {
1251 DT_PUTPAGE(smp);
1252 goto splitOut;
1255 /* smp and rmp are pinned */
1258 * parent page is not full - insert router entry in parent page
1260 else {
1261 BT_MARK_DIRTY(smp, ip);
1263 * acquire a transaction lock on the parent page
1265 tlck = txLock(tid, ip, smp, tlckDTREE | tlckENTRY);
1266 dtlck = (struct dt_lock *) & tlck->lock;
1267 ASSERT(dtlck->index == 0);
1268 lv = & dtlck->lv[0];
1270 /* linelock header */
1271 lv->offset = 0;
1272 lv->length = 1;
1273 dtlck->index++;
1275 /* linelock stbl of non-root parent page */
1276 if (!(sp->header.flag & BT_ROOT)) {
1277 lv++;
1278 n = skip >> L2DTSLOTSIZE;
1279 lv->offset = sp->header.stblindex + n;
1280 lv->length =
1281 ((sp->header.nextindex -
1282 1) >> L2DTSLOTSIZE) - n + 1;
1283 dtlck->index++;
1286 dtInsertEntry(sp, skip, &key, data, &dtlck);
1288 /* exit propagate up */
1289 break;
1293 /* unpin current split and its right page */
1294 DT_PUTPAGE(smp);
1295 DT_PUTPAGE(rmp);
1298 * free remaining extents allocated for split
1300 splitOut:
1301 n = pxdlist.npxd;
1302 pxd = &pxdlist.pxd[n];
1303 for (; n < pxdlist.maxnpxd; n++, pxd++)
1304 dbFree(ip, addressPXD(pxd), (s64) lengthPXD(pxd));
1306 freeKeyName:
1307 kfree(key.name);
1309 /* Rollback quota allocation */
1310 if (rc && quota_allocation)
1311 dquot_free_block(ip, quota_allocation);
1313 dtSplitUp_Exit:
1315 return rc;
1320 * dtSplitPage()
1322 * function: Split a non-root page of a btree.
1324 * parameter:
1326 * return: 0 - success;
1327 * errno - failure;
1328 * return split and new page pinned;
1330 static int dtSplitPage(tid_t tid, struct inode *ip, struct dtsplit * split,
1331 struct metapage ** rmpp, dtpage_t ** rpp, pxd_t * rpxdp)
1333 int rc = 0;
1334 struct metapage *smp;
1335 dtpage_t *sp;
1336 struct metapage *rmp;
1337 dtpage_t *rp; /* new right page allocated */
1338 s64 rbn; /* new right page block number */
1339 struct metapage *mp;
1340 dtpage_t *p;
1341 s64 nextbn;
1342 struct pxdlist *pxdlist;
1343 pxd_t *pxd;
1344 int skip, nextindex, half, left, nxt, off, si;
1345 struct ldtentry *ldtentry;
1346 struct idtentry *idtentry;
1347 u8 *stbl;
1348 struct dtslot *f;
1349 int fsi, stblsize;
1350 int n;
1351 struct dt_lock *sdtlck, *rdtlck;
1352 struct tlock *tlck;
1353 struct dt_lock *dtlck;
1354 struct lv *slv, *rlv, *lv;
1356 /* get split page */
1357 smp = split->mp;
1358 sp = DT_PAGE(ip, smp);
1361 * allocate the new right page for the split
1363 pxdlist = split->pxdlist;
1364 pxd = &pxdlist->pxd[pxdlist->npxd];
1365 pxdlist->npxd++;
1366 rbn = addressPXD(pxd);
1367 rmp = get_metapage(ip, rbn, PSIZE, 1);
1368 if (rmp == NULL)
1369 return -EIO;
1371 /* Allocate blocks to quota. */
1372 rc = dquot_alloc_block(ip, lengthPXD(pxd));
1373 if (rc) {
1374 release_metapage(rmp);
1375 return rc;
1378 jfs_info("dtSplitPage: ip:0x%p smp:0x%p rmp:0x%p", ip, smp, rmp);
1380 BT_MARK_DIRTY(rmp, ip);
1382 * acquire a transaction lock on the new right page
1384 tlck = txLock(tid, ip, rmp, tlckDTREE | tlckNEW);
1385 rdtlck = (struct dt_lock *) & tlck->lock;
1387 rp = (dtpage_t *) rmp->data;
1388 *rpp = rp;
1389 rp->header.self = *pxd;
1391 BT_MARK_DIRTY(smp, ip);
1393 * acquire a transaction lock on the split page
1395 * action:
1397 tlck = txLock(tid, ip, smp, tlckDTREE | tlckENTRY);
1398 sdtlck = (struct dt_lock *) & tlck->lock;
1400 /* linelock header of split page */
1401 ASSERT(sdtlck->index == 0);
1402 slv = & sdtlck->lv[0];
1403 slv->offset = 0;
1404 slv->length = 1;
1405 sdtlck->index++;
1408 * initialize/update sibling pointers between sp and rp
1410 nextbn = le64_to_cpu(sp->header.next);
1411 rp->header.next = cpu_to_le64(nextbn);
1412 rp->header.prev = cpu_to_le64(addressPXD(&sp->header.self));
1413 sp->header.next = cpu_to_le64(rbn);
1416 * initialize new right page
1418 rp->header.flag = sp->header.flag;
1420 /* compute sorted entry table at start of extent data area */
1421 rp->header.nextindex = 0;
1422 rp->header.stblindex = 1;
1424 n = PSIZE >> L2DTSLOTSIZE;
1425 rp->header.maxslot = n;
1426 stblsize = (n + 31) >> L2DTSLOTSIZE; /* in unit of slot */
1428 /* init freelist */
1429 fsi = rp->header.stblindex + stblsize;
1430 rp->header.freelist = fsi;
1431 rp->header.freecnt = rp->header.maxslot - fsi;
1434 * sequential append at tail: append without split
1436 * If splitting the last page on a level because of appending
1437 * a entry to it (skip is maxentry), it's likely that the access is
1438 * sequential. Adding an empty page on the side of the level is less
1439 * work and can push the fill factor much higher than normal.
1440 * If we're wrong it's no big deal, we'll just do the split the right
1441 * way next time.
1442 * (It may look like it's equally easy to do a similar hack for
1443 * reverse sorted data, that is, split the tree left,
1444 * but it's not. Be my guest.)
1446 if (nextbn == 0 && split->index == sp->header.nextindex) {
1447 /* linelock header + stbl (first slot) of new page */
1448 rlv = & rdtlck->lv[rdtlck->index];
1449 rlv->offset = 0;
1450 rlv->length = 2;
1451 rdtlck->index++;
1454 * initialize freelist of new right page
1456 f = &rp->slot[fsi];
1457 for (fsi++; fsi < rp->header.maxslot; f++, fsi++)
1458 f->next = fsi;
1459 f->next = -1;
1461 /* insert entry at the first entry of the new right page */
1462 dtInsertEntry(rp, 0, split->key, split->data, &rdtlck);
1464 goto out;
1468 * non-sequential insert (at possibly middle page)
1472 * update prev pointer of previous right sibling page;
1474 if (nextbn != 0) {
1475 DT_GETPAGE(ip, nextbn, mp, PSIZE, p, rc);
1476 if (rc) {
1477 discard_metapage(rmp);
1478 return rc;
1481 BT_MARK_DIRTY(mp, ip);
1483 * acquire a transaction lock on the next page
1485 tlck = txLock(tid, ip, mp, tlckDTREE | tlckRELINK);
1486 jfs_info("dtSplitPage: tlck = 0x%p, ip = 0x%p, mp=0x%p",
1487 tlck, ip, mp);
1488 dtlck = (struct dt_lock *) & tlck->lock;
1490 /* linelock header of previous right sibling page */
1491 lv = & dtlck->lv[dtlck->index];
1492 lv->offset = 0;
1493 lv->length = 1;
1494 dtlck->index++;
1496 p->header.prev = cpu_to_le64(rbn);
1498 DT_PUTPAGE(mp);
1502 * split the data between the split and right pages.
1504 skip = split->index;
1505 half = (PSIZE >> L2DTSLOTSIZE) >> 1; /* swag */
1506 left = 0;
1509 * compute fill factor for split pages
1511 * <nxt> traces the next entry to move to rp
1512 * <off> traces the next entry to stay in sp
1514 stbl = (u8 *) & sp->slot[sp->header.stblindex];
1515 nextindex = sp->header.nextindex;
1516 for (nxt = off = 0; nxt < nextindex; ++off) {
1517 if (off == skip)
1518 /* check for fill factor with new entry size */
1519 n = split->nslot;
1520 else {
1521 si = stbl[nxt];
1522 switch (sp->header.flag & BT_TYPE) {
1523 case BT_LEAF:
1524 ldtentry = (struct ldtentry *) & sp->slot[si];
1525 if (DO_INDEX(ip))
1526 n = NDTLEAF(ldtentry->namlen);
1527 else
1528 n = NDTLEAF_LEGACY(ldtentry->
1529 namlen);
1530 break;
1532 case BT_INTERNAL:
1533 idtentry = (struct idtentry *) & sp->slot[si];
1534 n = NDTINTERNAL(idtentry->namlen);
1535 break;
1537 default:
1538 break;
1541 ++nxt; /* advance to next entry to move in sp */
1544 left += n;
1545 if (left >= half)
1546 break;
1549 /* <nxt> poins to the 1st entry to move */
1552 * move entries to right page
1554 * dtMoveEntry() initializes rp and reserves entry for insertion
1556 * split page moved out entries are linelocked;
1557 * new/right page moved in entries are linelocked;
1559 /* linelock header + stbl of new right page */
1560 rlv = & rdtlck->lv[rdtlck->index];
1561 rlv->offset = 0;
1562 rlv->length = 5;
1563 rdtlck->index++;
1565 dtMoveEntry(sp, nxt, rp, &sdtlck, &rdtlck, DO_INDEX(ip));
1567 sp->header.nextindex = nxt;
1570 * finalize freelist of new right page
1572 fsi = rp->header.freelist;
1573 f = &rp->slot[fsi];
1574 for (fsi++; fsi < rp->header.maxslot; f++, fsi++)
1575 f->next = fsi;
1576 f->next = -1;
1579 * Update directory index table for entries now in right page
1581 if ((rp->header.flag & BT_LEAF) && DO_INDEX(ip)) {
1582 s64 lblock;
1584 mp = NULL;
1585 stbl = DT_GETSTBL(rp);
1586 for (n = 0; n < rp->header.nextindex; n++) {
1587 ldtentry = (struct ldtentry *) & rp->slot[stbl[n]];
1588 modify_index(tid, ip, le32_to_cpu(ldtentry->index),
1589 rbn, n, &mp, &lblock);
1591 if (mp)
1592 release_metapage(mp);
1596 * the skipped index was on the left page,
1598 if (skip <= off) {
1599 /* insert the new entry in the split page */
1600 dtInsertEntry(sp, skip, split->key, split->data, &sdtlck);
1602 /* linelock stbl of split page */
1603 if (sdtlck->index >= sdtlck->maxcnt)
1604 sdtlck = (struct dt_lock *) txLinelock(sdtlck);
1605 slv = & sdtlck->lv[sdtlck->index];
1606 n = skip >> L2DTSLOTSIZE;
1607 slv->offset = sp->header.stblindex + n;
1608 slv->length =
1609 ((sp->header.nextindex - 1) >> L2DTSLOTSIZE) - n + 1;
1610 sdtlck->index++;
1613 * the skipped index was on the right page,
1615 else {
1616 /* adjust the skip index to reflect the new position */
1617 skip -= nxt;
1619 /* insert the new entry in the right page */
1620 dtInsertEntry(rp, skip, split->key, split->data, &rdtlck);
1623 out:
1624 *rmpp = rmp;
1625 *rpxdp = *pxd;
1627 return rc;
1632 * dtExtendPage()
1634 * function: extend 1st/only directory leaf page
1636 * parameter:
1638 * return: 0 - success;
1639 * errno - failure;
1640 * return extended page pinned;
1642 static int dtExtendPage(tid_t tid,
1643 struct inode *ip, struct dtsplit * split, struct btstack * btstack)
1645 struct super_block *sb = ip->i_sb;
1646 int rc;
1647 struct metapage *smp, *pmp, *mp;
1648 dtpage_t *sp, *pp;
1649 struct pxdlist *pxdlist;
1650 pxd_t *pxd, *tpxd;
1651 int xlen, xsize;
1652 int newstblindex, newstblsize;
1653 int oldstblindex, oldstblsize;
1654 int fsi, last;
1655 struct dtslot *f;
1656 struct btframe *parent;
1657 int n;
1658 struct dt_lock *dtlck;
1659 s64 xaddr, txaddr;
1660 struct tlock *tlck;
1661 struct pxd_lock *pxdlock;
1662 struct lv *lv;
1663 uint type;
1664 struct ldtentry *ldtentry;
1665 u8 *stbl;
1667 /* get page to extend */
1668 smp = split->mp;
1669 sp = DT_PAGE(ip, smp);
1671 /* get parent/root page */
1672 parent = BT_POP(btstack);
1673 DT_GETPAGE(ip, parent->bn, pmp, PSIZE, pp, rc);
1674 if (rc)
1675 return (rc);
1678 * extend the extent
1680 pxdlist = split->pxdlist;
1681 pxd = &pxdlist->pxd[pxdlist->npxd];
1682 pxdlist->npxd++;
1684 xaddr = addressPXD(pxd);
1685 tpxd = &sp->header.self;
1686 txaddr = addressPXD(tpxd);
1687 /* in-place extension */
1688 if (xaddr == txaddr) {
1689 type = tlckEXTEND;
1691 /* relocation */
1692 else {
1693 type = tlckNEW;
1695 /* save moved extent descriptor for later free */
1696 tlck = txMaplock(tid, ip, tlckDTREE | tlckRELOCATE);
1697 pxdlock = (struct pxd_lock *) & tlck->lock;
1698 pxdlock->flag = mlckFREEPXD;
1699 pxdlock->pxd = sp->header.self;
1700 pxdlock->index = 1;
1703 * Update directory index table to reflect new page address
1705 if (DO_INDEX(ip)) {
1706 s64 lblock;
1708 mp = NULL;
1709 stbl = DT_GETSTBL(sp);
1710 for (n = 0; n < sp->header.nextindex; n++) {
1711 ldtentry =
1712 (struct ldtentry *) & sp->slot[stbl[n]];
1713 modify_index(tid, ip,
1714 le32_to_cpu(ldtentry->index),
1715 xaddr, n, &mp, &lblock);
1717 if (mp)
1718 release_metapage(mp);
1723 * extend the page
1725 sp->header.self = *pxd;
1727 jfs_info("dtExtendPage: ip:0x%p smp:0x%p sp:0x%p", ip, smp, sp);
1729 BT_MARK_DIRTY(smp, ip);
1731 * acquire a transaction lock on the extended/leaf page
1733 tlck = txLock(tid, ip, smp, tlckDTREE | type);
1734 dtlck = (struct dt_lock *) & tlck->lock;
1735 lv = & dtlck->lv[0];
1737 /* update buffer extent descriptor of extended page */
1738 xlen = lengthPXD(pxd);
1739 xsize = xlen << JFS_SBI(sb)->l2bsize;
1742 * copy old stbl to new stbl at start of extended area
1744 oldstblindex = sp->header.stblindex;
1745 oldstblsize = (sp->header.maxslot + 31) >> L2DTSLOTSIZE;
1746 newstblindex = sp->header.maxslot;
1747 n = xsize >> L2DTSLOTSIZE;
1748 newstblsize = (n + 31) >> L2DTSLOTSIZE;
1749 memcpy(&sp->slot[newstblindex], &sp->slot[oldstblindex],
1750 sp->header.nextindex);
1753 * in-line extension: linelock old area of extended page
1755 if (type == tlckEXTEND) {
1756 /* linelock header */
1757 lv->offset = 0;
1758 lv->length = 1;
1759 dtlck->index++;
1760 lv++;
1762 /* linelock new stbl of extended page */
1763 lv->offset = newstblindex;
1764 lv->length = newstblsize;
1767 * relocation: linelock whole relocated area
1769 else {
1770 lv->offset = 0;
1771 lv->length = sp->header.maxslot + newstblsize;
1774 dtlck->index++;
1776 sp->header.maxslot = n;
1777 sp->header.stblindex = newstblindex;
1778 /* sp->header.nextindex remains the same */
1781 * add old stbl region at head of freelist
1783 fsi = oldstblindex;
1784 f = &sp->slot[fsi];
1785 last = sp->header.freelist;
1786 for (n = 0; n < oldstblsize; n++, fsi++, f++) {
1787 f->next = last;
1788 last = fsi;
1790 sp->header.freelist = last;
1791 sp->header.freecnt += oldstblsize;
1794 * append free region of newly extended area at tail of freelist
1796 /* init free region of newly extended area */
1797 fsi = n = newstblindex + newstblsize;
1798 f = &sp->slot[fsi];
1799 for (fsi++; fsi < sp->header.maxslot; f++, fsi++)
1800 f->next = fsi;
1801 f->next = -1;
1803 /* append new free region at tail of old freelist */
1804 fsi = sp->header.freelist;
1805 if (fsi == -1)
1806 sp->header.freelist = n;
1807 else {
1808 do {
1809 f = &sp->slot[fsi];
1810 fsi = f->next;
1811 } while (fsi != -1);
1813 f->next = n;
1816 sp->header.freecnt += sp->header.maxslot - n;
1819 * insert the new entry
1821 dtInsertEntry(sp, split->index, split->key, split->data, &dtlck);
1823 BT_MARK_DIRTY(pmp, ip);
1825 * linelock any freeslots residing in old extent
1827 if (type == tlckEXTEND) {
1828 n = sp->header.maxslot >> 2;
1829 if (sp->header.freelist < n)
1830 dtLinelockFreelist(sp, n, &dtlck);
1834 * update parent entry on the parent/root page
1837 * acquire a transaction lock on the parent/root page
1839 tlck = txLock(tid, ip, pmp, tlckDTREE | tlckENTRY);
1840 dtlck = (struct dt_lock *) & tlck->lock;
1841 lv = & dtlck->lv[dtlck->index];
1843 /* linelock parent entry - 1st slot */
1844 lv->offset = 1;
1845 lv->length = 1;
1846 dtlck->index++;
1848 /* update the parent pxd for page extension */
1849 tpxd = (pxd_t *) & pp->slot[1];
1850 *tpxd = *pxd;
1852 DT_PUTPAGE(pmp);
1853 return 0;
1858 * dtSplitRoot()
1860 * function:
1861 * split the full root page into
1862 * original/root/split page and new right page
1863 * i.e., root remains fixed in tree anchor (inode) and
1864 * the root is copied to a single new right child page
1865 * since root page << non-root page, and
1866 * the split root page contains a single entry for the
1867 * new right child page.
1869 * parameter:
1871 * return: 0 - success;
1872 * errno - failure;
1873 * return new page pinned;
1875 static int dtSplitRoot(tid_t tid,
1876 struct inode *ip, struct dtsplit * split, struct metapage ** rmpp)
1878 struct super_block *sb = ip->i_sb;
1879 struct metapage *smp;
1880 dtroot_t *sp;
1881 struct metapage *rmp;
1882 dtpage_t *rp;
1883 s64 rbn;
1884 int xlen;
1885 int xsize;
1886 struct dtslot *f;
1887 s8 *stbl;
1888 int fsi, stblsize, n;
1889 struct idtentry *s;
1890 pxd_t *ppxd;
1891 struct pxdlist *pxdlist;
1892 pxd_t *pxd;
1893 struct dt_lock *dtlck;
1894 struct tlock *tlck;
1895 struct lv *lv;
1896 int rc;
1898 /* get split root page */
1899 smp = split->mp;
1900 sp = &JFS_IP(ip)->i_dtroot;
1903 * allocate/initialize a single (right) child page
1905 * N.B. at first split, a one (or two) block to fit new entry
1906 * is allocated; at subsequent split, a full page is allocated;
1908 pxdlist = split->pxdlist;
1909 pxd = &pxdlist->pxd[pxdlist->npxd];
1910 pxdlist->npxd++;
1911 rbn = addressPXD(pxd);
1912 xlen = lengthPXD(pxd);
1913 xsize = xlen << JFS_SBI(sb)->l2bsize;
1914 rmp = get_metapage(ip, rbn, xsize, 1);
1915 if (!rmp)
1916 return -EIO;
1918 rp = rmp->data;
1920 /* Allocate blocks to quota. */
1921 rc = dquot_alloc_block(ip, lengthPXD(pxd));
1922 if (rc) {
1923 release_metapage(rmp);
1924 return rc;
1927 BT_MARK_DIRTY(rmp, ip);
1929 * acquire a transaction lock on the new right page
1931 tlck = txLock(tid, ip, rmp, tlckDTREE | tlckNEW);
1932 dtlck = (struct dt_lock *) & tlck->lock;
1934 rp->header.flag =
1935 (sp->header.flag & BT_LEAF) ? BT_LEAF : BT_INTERNAL;
1936 rp->header.self = *pxd;
1938 /* initialize sibling pointers */
1939 rp->header.next = 0;
1940 rp->header.prev = 0;
1943 * move in-line root page into new right page extent
1945 /* linelock header + copied entries + new stbl (1st slot) in new page */
1946 ASSERT(dtlck->index == 0);
1947 lv = & dtlck->lv[0];
1948 lv->offset = 0;
1949 lv->length = 10; /* 1 + 8 + 1 */
1950 dtlck->index++;
1952 n = xsize >> L2DTSLOTSIZE;
1953 rp->header.maxslot = n;
1954 stblsize = (n + 31) >> L2DTSLOTSIZE;
1956 /* copy old stbl to new stbl at start of extended area */
1957 rp->header.stblindex = DTROOTMAXSLOT;
1958 stbl = (s8 *) & rp->slot[DTROOTMAXSLOT];
1959 memcpy(stbl, sp->header.stbl, sp->header.nextindex);
1960 rp->header.nextindex = sp->header.nextindex;
1962 /* copy old data area to start of new data area */
1963 memcpy(&rp->slot[1], &sp->slot[1], IDATASIZE);
1966 * append free region of newly extended area at tail of freelist
1968 /* init free region of newly extended area */
1969 fsi = n = DTROOTMAXSLOT + stblsize;
1970 f = &rp->slot[fsi];
1971 for (fsi++; fsi < rp->header.maxslot; f++, fsi++)
1972 f->next = fsi;
1973 f->next = -1;
1975 /* append new free region at tail of old freelist */
1976 fsi = sp->header.freelist;
1977 if (fsi == -1)
1978 rp->header.freelist = n;
1979 else {
1980 rp->header.freelist = fsi;
1982 do {
1983 f = &rp->slot[fsi];
1984 fsi = f->next;
1985 } while (fsi != -1);
1987 f->next = n;
1990 rp->header.freecnt = sp->header.freecnt + rp->header.maxslot - n;
1993 * Update directory index table for entries now in right page
1995 if ((rp->header.flag & BT_LEAF) && DO_INDEX(ip)) {
1996 s64 lblock;
1997 struct metapage *mp = NULL;
1998 struct ldtentry *ldtentry;
2000 stbl = DT_GETSTBL(rp);
2001 for (n = 0; n < rp->header.nextindex; n++) {
2002 ldtentry = (struct ldtentry *) & rp->slot[stbl[n]];
2003 modify_index(tid, ip, le32_to_cpu(ldtentry->index),
2004 rbn, n, &mp, &lblock);
2006 if (mp)
2007 release_metapage(mp);
2010 * insert the new entry into the new right/child page
2011 * (skip index in the new right page will not change)
2013 dtInsertEntry(rp, split->index, split->key, split->data, &dtlck);
2016 * reset parent/root page
2018 * set the 1st entry offset to 0, which force the left-most key
2019 * at any level of the tree to be less than any search key.
2021 * The btree comparison code guarantees that the left-most key on any
2022 * level of the tree is never used, so it doesn't need to be filled in.
2024 BT_MARK_DIRTY(smp, ip);
2026 * acquire a transaction lock on the root page (in-memory inode)
2028 tlck = txLock(tid, ip, smp, tlckDTREE | tlckNEW | tlckBTROOT);
2029 dtlck = (struct dt_lock *) & tlck->lock;
2031 /* linelock root */
2032 ASSERT(dtlck->index == 0);
2033 lv = & dtlck->lv[0];
2034 lv->offset = 0;
2035 lv->length = DTROOTMAXSLOT;
2036 dtlck->index++;
2038 /* update page header of root */
2039 if (sp->header.flag & BT_LEAF) {
2040 sp->header.flag &= ~BT_LEAF;
2041 sp->header.flag |= BT_INTERNAL;
2044 /* init the first entry */
2045 s = (struct idtentry *) & sp->slot[DTENTRYSTART];
2046 ppxd = (pxd_t *) s;
2047 *ppxd = *pxd;
2048 s->next = -1;
2049 s->namlen = 0;
2051 stbl = sp->header.stbl;
2052 stbl[0] = DTENTRYSTART;
2053 sp->header.nextindex = 1;
2055 /* init freelist */
2056 fsi = DTENTRYSTART + 1;
2057 f = &sp->slot[fsi];
2059 /* init free region of remaining area */
2060 for (fsi++; fsi < DTROOTMAXSLOT; f++, fsi++)
2061 f->next = fsi;
2062 f->next = -1;
2064 sp->header.freelist = DTENTRYSTART + 1;
2065 sp->header.freecnt = DTROOTMAXSLOT - (DTENTRYSTART + 1);
2067 *rmpp = rmp;
2069 return 0;
2074 * dtDelete()
2076 * function: delete the entry(s) referenced by a key.
2078 * parameter:
2080 * return:
2082 int dtDelete(tid_t tid,
2083 struct inode *ip, struct component_name * key, ino_t * ino, int flag)
2085 int rc = 0;
2086 s64 bn;
2087 struct metapage *mp, *imp;
2088 dtpage_t *p;
2089 int index;
2090 struct btstack btstack;
2091 struct dt_lock *dtlck;
2092 struct tlock *tlck;
2093 struct lv *lv;
2094 int i;
2095 struct ldtentry *ldtentry;
2096 u8 *stbl;
2097 u32 table_index, next_index;
2098 struct metapage *nmp;
2099 dtpage_t *np;
2102 * search for the entry to delete:
2104 * dtSearch() returns (leaf page pinned, index at which to delete).
2106 if ((rc = dtSearch(ip, key, ino, &btstack, flag)))
2107 return rc;
2109 /* retrieve search result */
2110 DT_GETSEARCH(ip, btstack.top, bn, mp, p, index);
2113 * We need to find put the index of the next entry into the
2114 * directory index table in order to resume a readdir from this
2115 * entry.
2117 if (DO_INDEX(ip)) {
2118 stbl = DT_GETSTBL(p);
2119 ldtentry = (struct ldtentry *) & p->slot[stbl[index]];
2120 table_index = le32_to_cpu(ldtentry->index);
2121 if (index == (p->header.nextindex - 1)) {
2123 * Last entry in this leaf page
2125 if ((p->header.flag & BT_ROOT)
2126 || (p->header.next == 0))
2127 next_index = -1;
2128 else {
2129 /* Read next leaf page */
2130 DT_GETPAGE(ip, le64_to_cpu(p->header.next),
2131 nmp, PSIZE, np, rc);
2132 if (rc)
2133 next_index = -1;
2134 else {
2135 stbl = DT_GETSTBL(np);
2136 ldtentry =
2137 (struct ldtentry *) & np->
2138 slot[stbl[0]];
2139 next_index =
2140 le32_to_cpu(ldtentry->index);
2141 DT_PUTPAGE(nmp);
2144 } else {
2145 ldtentry =
2146 (struct ldtentry *) & p->slot[stbl[index + 1]];
2147 next_index = le32_to_cpu(ldtentry->index);
2149 free_index(tid, ip, table_index, next_index);
2152 * the leaf page becomes empty, delete the page
2154 if (p->header.nextindex == 1) {
2155 /* delete empty page */
2156 rc = dtDeleteUp(tid, ip, mp, p, &btstack);
2159 * the leaf page has other entries remaining:
2161 * delete the entry from the leaf page.
2163 else {
2164 BT_MARK_DIRTY(mp, ip);
2166 * acquire a transaction lock on the leaf page
2168 tlck = txLock(tid, ip, mp, tlckDTREE | tlckENTRY);
2169 dtlck = (struct dt_lock *) & tlck->lock;
2172 * Do not assume that dtlck->index will be zero. During a
2173 * rename within a directory, this transaction may have
2174 * modified this page already when adding the new entry.
2177 /* linelock header */
2178 if (dtlck->index >= dtlck->maxcnt)
2179 dtlck = (struct dt_lock *) txLinelock(dtlck);
2180 lv = & dtlck->lv[dtlck->index];
2181 lv->offset = 0;
2182 lv->length = 1;
2183 dtlck->index++;
2185 /* linelock stbl of non-root leaf page */
2186 if (!(p->header.flag & BT_ROOT)) {
2187 if (dtlck->index >= dtlck->maxcnt)
2188 dtlck = (struct dt_lock *) txLinelock(dtlck);
2189 lv = & dtlck->lv[dtlck->index];
2190 i = index >> L2DTSLOTSIZE;
2191 lv->offset = p->header.stblindex + i;
2192 lv->length =
2193 ((p->header.nextindex - 1) >> L2DTSLOTSIZE) -
2194 i + 1;
2195 dtlck->index++;
2198 /* free the leaf entry */
2199 dtDeleteEntry(p, index, &dtlck);
2202 * Update directory index table for entries moved in stbl
2204 if (DO_INDEX(ip) && index < p->header.nextindex) {
2205 s64 lblock;
2207 imp = NULL;
2208 stbl = DT_GETSTBL(p);
2209 for (i = index; i < p->header.nextindex; i++) {
2210 ldtentry =
2211 (struct ldtentry *) & p->slot[stbl[i]];
2212 modify_index(tid, ip,
2213 le32_to_cpu(ldtentry->index),
2214 bn, i, &imp, &lblock);
2216 if (imp)
2217 release_metapage(imp);
2220 DT_PUTPAGE(mp);
2223 return rc;
2228 * dtDeleteUp()
2230 * function:
2231 * free empty pages as propagating deletion up the tree
2233 * parameter:
2235 * return:
2237 static int dtDeleteUp(tid_t tid, struct inode *ip,
2238 struct metapage * fmp, dtpage_t * fp, struct btstack * btstack)
2240 int rc = 0;
2241 struct metapage *mp;
2242 dtpage_t *p;
2243 int index, nextindex;
2244 int xlen;
2245 struct btframe *parent;
2246 struct dt_lock *dtlck;
2247 struct tlock *tlck;
2248 struct lv *lv;
2249 struct pxd_lock *pxdlock;
2250 int i;
2253 * keep the root leaf page which has become empty
2255 if (BT_IS_ROOT(fmp)) {
2257 * reset the root
2259 * dtInitRoot() acquires txlock on the root
2261 dtInitRoot(tid, ip, PARENT(ip));
2263 DT_PUTPAGE(fmp);
2265 return 0;
2269 * free the non-root leaf page
2272 * acquire a transaction lock on the page
2274 * write FREEXTENT|NOREDOPAGE log record
2275 * N.B. linelock is overlaid as freed extent descriptor, and
2276 * the buffer page is freed;
2278 tlck = txMaplock(tid, ip, tlckDTREE | tlckFREE);
2279 pxdlock = (struct pxd_lock *) & tlck->lock;
2280 pxdlock->flag = mlckFREEPXD;
2281 pxdlock->pxd = fp->header.self;
2282 pxdlock->index = 1;
2284 /* update sibling pointers */
2285 if ((rc = dtRelink(tid, ip, fp))) {
2286 BT_PUTPAGE(fmp);
2287 return rc;
2290 xlen = lengthPXD(&fp->header.self);
2292 /* Free quota allocation. */
2293 dquot_free_block(ip, xlen);
2295 /* free/invalidate its buffer page */
2296 discard_metapage(fmp);
2299 * propagate page deletion up the directory tree
2301 * If the delete from the parent page makes it empty,
2302 * continue all the way up the tree.
2303 * stop if the root page is reached (which is never deleted) or
2304 * if the entry deletion does not empty the page.
2306 while ((parent = BT_POP(btstack)) != NULL) {
2307 /* pin the parent page <sp> */
2308 DT_GETPAGE(ip, parent->bn, mp, PSIZE, p, rc);
2309 if (rc)
2310 return rc;
2313 * free the extent of the child page deleted
2315 index = parent->index;
2318 * delete the entry for the child page from parent
2320 nextindex = p->header.nextindex;
2323 * the parent has the single entry being deleted:
2325 * free the parent page which has become empty.
2327 if (nextindex == 1) {
2329 * keep the root internal page which has become empty
2331 if (p->header.flag & BT_ROOT) {
2333 * reset the root
2335 * dtInitRoot() acquires txlock on the root
2337 dtInitRoot(tid, ip, PARENT(ip));
2339 DT_PUTPAGE(mp);
2341 return 0;
2344 * free the parent page
2346 else {
2348 * acquire a transaction lock on the page
2350 * write FREEXTENT|NOREDOPAGE log record
2352 tlck =
2353 txMaplock(tid, ip,
2354 tlckDTREE | tlckFREE);
2355 pxdlock = (struct pxd_lock *) & tlck->lock;
2356 pxdlock->flag = mlckFREEPXD;
2357 pxdlock->pxd = p->header.self;
2358 pxdlock->index = 1;
2360 /* update sibling pointers */
2361 if ((rc = dtRelink(tid, ip, p))) {
2362 DT_PUTPAGE(mp);
2363 return rc;
2366 xlen = lengthPXD(&p->header.self);
2368 /* Free quota allocation */
2369 dquot_free_block(ip, xlen);
2371 /* free/invalidate its buffer page */
2372 discard_metapage(mp);
2374 /* propagate up */
2375 continue;
2380 * the parent has other entries remaining:
2382 * delete the router entry from the parent page.
2384 BT_MARK_DIRTY(mp, ip);
2386 * acquire a transaction lock on the page
2388 * action: router entry deletion
2390 tlck = txLock(tid, ip, mp, tlckDTREE | tlckENTRY);
2391 dtlck = (struct dt_lock *) & tlck->lock;
2393 /* linelock header */
2394 if (dtlck->index >= dtlck->maxcnt)
2395 dtlck = (struct dt_lock *) txLinelock(dtlck);
2396 lv = & dtlck->lv[dtlck->index];
2397 lv->offset = 0;
2398 lv->length = 1;
2399 dtlck->index++;
2401 /* linelock stbl of non-root leaf page */
2402 if (!(p->header.flag & BT_ROOT)) {
2403 if (dtlck->index < dtlck->maxcnt)
2404 lv++;
2405 else {
2406 dtlck = (struct dt_lock *) txLinelock(dtlck);
2407 lv = & dtlck->lv[0];
2409 i = index >> L2DTSLOTSIZE;
2410 lv->offset = p->header.stblindex + i;
2411 lv->length =
2412 ((p->header.nextindex - 1) >> L2DTSLOTSIZE) -
2413 i + 1;
2414 dtlck->index++;
2417 /* free the router entry */
2418 dtDeleteEntry(p, index, &dtlck);
2420 /* reset key of new leftmost entry of level (for consistency) */
2421 if (index == 0 &&
2422 ((p->header.flag & BT_ROOT) || p->header.prev == 0))
2423 dtTruncateEntry(p, 0, &dtlck);
2425 /* unpin the parent page */
2426 DT_PUTPAGE(mp);
2428 /* exit propagation up */
2429 break;
2432 if (!DO_INDEX(ip))
2433 ip->i_size -= PSIZE;
2435 return 0;
2438 #ifdef _NOTYET
2440 * NAME: dtRelocate()
2442 * FUNCTION: relocate dtpage (internal or leaf) of directory;
2443 * This function is mainly used by defragfs utility.
2445 int dtRelocate(tid_t tid, struct inode *ip, s64 lmxaddr, pxd_t * opxd,
2446 s64 nxaddr)
2448 int rc = 0;
2449 struct metapage *mp, *pmp, *lmp, *rmp;
2450 dtpage_t *p, *pp, *rp = 0, *lp= 0;
2451 s64 bn;
2452 int index;
2453 struct btstack btstack;
2454 pxd_t *pxd;
2455 s64 oxaddr, nextbn, prevbn;
2456 int xlen, xsize;
2457 struct tlock *tlck;
2458 struct dt_lock *dtlck;
2459 struct pxd_lock *pxdlock;
2460 s8 *stbl;
2461 struct lv *lv;
2463 oxaddr = addressPXD(opxd);
2464 xlen = lengthPXD(opxd);
2466 jfs_info("dtRelocate: lmxaddr:%Ld xaddr:%Ld:%Ld xlen:%d",
2467 (long long)lmxaddr, (long long)oxaddr, (long long)nxaddr,
2468 xlen);
2471 * 1. get the internal parent dtpage covering
2472 * router entry for the tartget page to be relocated;
2474 rc = dtSearchNode(ip, lmxaddr, opxd, &btstack);
2475 if (rc)
2476 return rc;
2478 /* retrieve search result */
2479 DT_GETSEARCH(ip, btstack.top, bn, pmp, pp, index);
2480 jfs_info("dtRelocate: parent router entry validated.");
2483 * 2. relocate the target dtpage
2485 /* read in the target page from src extent */
2486 DT_GETPAGE(ip, oxaddr, mp, PSIZE, p, rc);
2487 if (rc) {
2488 /* release the pinned parent page */
2489 DT_PUTPAGE(pmp);
2490 return rc;
2494 * read in sibling pages if any to update sibling pointers;
2496 rmp = NULL;
2497 if (p->header.next) {
2498 nextbn = le64_to_cpu(p->header.next);
2499 DT_GETPAGE(ip, nextbn, rmp, PSIZE, rp, rc);
2500 if (rc) {
2501 DT_PUTPAGE(mp);
2502 DT_PUTPAGE(pmp);
2503 return (rc);
2507 lmp = NULL;
2508 if (p->header.prev) {
2509 prevbn = le64_to_cpu(p->header.prev);
2510 DT_GETPAGE(ip, prevbn, lmp, PSIZE, lp, rc);
2511 if (rc) {
2512 DT_PUTPAGE(mp);
2513 DT_PUTPAGE(pmp);
2514 if (rmp)
2515 DT_PUTPAGE(rmp);
2516 return (rc);
2520 /* at this point, all xtpages to be updated are in memory */
2523 * update sibling pointers of sibling dtpages if any;
2525 if (lmp) {
2526 tlck = txLock(tid, ip, lmp, tlckDTREE | tlckRELINK);
2527 dtlck = (struct dt_lock *) & tlck->lock;
2528 /* linelock header */
2529 ASSERT(dtlck->index == 0);
2530 lv = & dtlck->lv[0];
2531 lv->offset = 0;
2532 lv->length = 1;
2533 dtlck->index++;
2535 lp->header.next = cpu_to_le64(nxaddr);
2536 DT_PUTPAGE(lmp);
2539 if (rmp) {
2540 tlck = txLock(tid, ip, rmp, tlckDTREE | tlckRELINK);
2541 dtlck = (struct dt_lock *) & tlck->lock;
2542 /* linelock header */
2543 ASSERT(dtlck->index == 0);
2544 lv = & dtlck->lv[0];
2545 lv->offset = 0;
2546 lv->length = 1;
2547 dtlck->index++;
2549 rp->header.prev = cpu_to_le64(nxaddr);
2550 DT_PUTPAGE(rmp);
2554 * update the target dtpage to be relocated
2556 * write LOG_REDOPAGE of LOG_NEW type for dst page
2557 * for the whole target page (logredo() will apply
2558 * after image and update bmap for allocation of the
2559 * dst extent), and update bmap for allocation of
2560 * the dst extent;
2562 tlck = txLock(tid, ip, mp, tlckDTREE | tlckNEW);
2563 dtlck = (struct dt_lock *) & tlck->lock;
2564 /* linelock header */
2565 ASSERT(dtlck->index == 0);
2566 lv = & dtlck->lv[0];
2568 /* update the self address in the dtpage header */
2569 pxd = &p->header.self;
2570 PXDaddress(pxd, nxaddr);
2572 /* the dst page is the same as the src page, i.e.,
2573 * linelock for afterimage of the whole page;
2575 lv->offset = 0;
2576 lv->length = p->header.maxslot;
2577 dtlck->index++;
2579 /* update the buffer extent descriptor of the dtpage */
2580 xsize = xlen << JFS_SBI(ip->i_sb)->l2bsize;
2582 /* unpin the relocated page */
2583 DT_PUTPAGE(mp);
2584 jfs_info("dtRelocate: target dtpage relocated.");
2586 /* the moved extent is dtpage, then a LOG_NOREDOPAGE log rec
2587 * needs to be written (in logredo(), the LOG_NOREDOPAGE log rec
2588 * will also force a bmap update ).
2592 * 3. acquire maplock for the source extent to be freed;
2594 /* for dtpage relocation, write a LOG_NOREDOPAGE record
2595 * for the source dtpage (logredo() will init NoRedoPage
2596 * filter and will also update bmap for free of the source
2597 * dtpage), and upadte bmap for free of the source dtpage;
2599 tlck = txMaplock(tid, ip, tlckDTREE | tlckFREE);
2600 pxdlock = (struct pxd_lock *) & tlck->lock;
2601 pxdlock->flag = mlckFREEPXD;
2602 PXDaddress(&pxdlock->pxd, oxaddr);
2603 PXDlength(&pxdlock->pxd, xlen);
2604 pxdlock->index = 1;
2607 * 4. update the parent router entry for relocation;
2609 * acquire tlck for the parent entry covering the target dtpage;
2610 * write LOG_REDOPAGE to apply after image only;
2612 jfs_info("dtRelocate: update parent router entry.");
2613 tlck = txLock(tid, ip, pmp, tlckDTREE | tlckENTRY);
2614 dtlck = (struct dt_lock *) & tlck->lock;
2615 lv = & dtlck->lv[dtlck->index];
2617 /* update the PXD with the new address */
2618 stbl = DT_GETSTBL(pp);
2619 pxd = (pxd_t *) & pp->slot[stbl[index]];
2620 PXDaddress(pxd, nxaddr);
2621 lv->offset = stbl[index];
2622 lv->length = 1;
2623 dtlck->index++;
2625 /* unpin the parent dtpage */
2626 DT_PUTPAGE(pmp);
2628 return rc;
2632 * NAME: dtSearchNode()
2634 * FUNCTION: Search for an dtpage containing a specified address
2635 * This function is mainly used by defragfs utility.
2637 * NOTE: Search result on stack, the found page is pinned at exit.
2638 * The result page must be an internal dtpage.
2639 * lmxaddr give the address of the left most page of the
2640 * dtree level, in which the required dtpage resides.
2642 static int dtSearchNode(struct inode *ip, s64 lmxaddr, pxd_t * kpxd,
2643 struct btstack * btstack)
2645 int rc = 0;
2646 s64 bn;
2647 struct metapage *mp;
2648 dtpage_t *p;
2649 int psize = 288; /* initial in-line directory */
2650 s8 *stbl;
2651 int i;
2652 pxd_t *pxd;
2653 struct btframe *btsp;
2655 BT_CLR(btstack); /* reset stack */
2658 * descend tree to the level with specified leftmost page
2660 * by convention, root bn = 0.
2662 for (bn = 0;;) {
2663 /* get/pin the page to search */
2664 DT_GETPAGE(ip, bn, mp, psize, p, rc);
2665 if (rc)
2666 return rc;
2668 /* does the xaddr of leftmost page of the levevl
2669 * matches levevl search key ?
2671 if (p->header.flag & BT_ROOT) {
2672 if (lmxaddr == 0)
2673 break;
2674 } else if (addressPXD(&p->header.self) == lmxaddr)
2675 break;
2678 * descend down to leftmost child page
2680 if (p->header.flag & BT_LEAF) {
2681 DT_PUTPAGE(mp);
2682 return -ESTALE;
2685 /* get the leftmost entry */
2686 stbl = DT_GETSTBL(p);
2687 pxd = (pxd_t *) & p->slot[stbl[0]];
2689 /* get the child page block address */
2690 bn = addressPXD(pxd);
2691 psize = lengthPXD(pxd) << JFS_SBI(ip->i_sb)->l2bsize;
2692 /* unpin the parent page */
2693 DT_PUTPAGE(mp);
2697 * search each page at the current levevl
2699 loop:
2700 stbl = DT_GETSTBL(p);
2701 for (i = 0; i < p->header.nextindex; i++) {
2702 pxd = (pxd_t *) & p->slot[stbl[i]];
2704 /* found the specified router entry */
2705 if (addressPXD(pxd) == addressPXD(kpxd) &&
2706 lengthPXD(pxd) == lengthPXD(kpxd)) {
2707 btsp = btstack->top;
2708 btsp->bn = bn;
2709 btsp->index = i;
2710 btsp->mp = mp;
2712 return 0;
2716 /* get the right sibling page if any */
2717 if (p->header.next)
2718 bn = le64_to_cpu(p->header.next);
2719 else {
2720 DT_PUTPAGE(mp);
2721 return -ESTALE;
2724 /* unpin current page */
2725 DT_PUTPAGE(mp);
2727 /* get the right sibling page */
2728 DT_GETPAGE(ip, bn, mp, PSIZE, p, rc);
2729 if (rc)
2730 return rc;
2732 goto loop;
2734 #endif /* _NOTYET */
2737 * dtRelink()
2739 * function:
2740 * link around a freed page.
2742 * parameter:
2743 * fp: page to be freed
2745 * return:
2747 static int dtRelink(tid_t tid, struct inode *ip, dtpage_t * p)
2749 int rc;
2750 struct metapage *mp;
2751 s64 nextbn, prevbn;
2752 struct tlock *tlck;
2753 struct dt_lock *dtlck;
2754 struct lv *lv;
2756 nextbn = le64_to_cpu(p->header.next);
2757 prevbn = le64_to_cpu(p->header.prev);
2759 /* update prev pointer of the next page */
2760 if (nextbn != 0) {
2761 DT_GETPAGE(ip, nextbn, mp, PSIZE, p, rc);
2762 if (rc)
2763 return rc;
2765 BT_MARK_DIRTY(mp, ip);
2767 * acquire a transaction lock on the next page
2769 * action: update prev pointer;
2771 tlck = txLock(tid, ip, mp, tlckDTREE | tlckRELINK);
2772 jfs_info("dtRelink nextbn: tlck = 0x%p, ip = 0x%p, mp=0x%p",
2773 tlck, ip, mp);
2774 dtlck = (struct dt_lock *) & tlck->lock;
2776 /* linelock header */
2777 if (dtlck->index >= dtlck->maxcnt)
2778 dtlck = (struct dt_lock *) txLinelock(dtlck);
2779 lv = & dtlck->lv[dtlck->index];
2780 lv->offset = 0;
2781 lv->length = 1;
2782 dtlck->index++;
2784 p->header.prev = cpu_to_le64(prevbn);
2785 DT_PUTPAGE(mp);
2788 /* update next pointer of the previous page */
2789 if (prevbn != 0) {
2790 DT_GETPAGE(ip, prevbn, mp, PSIZE, p, rc);
2791 if (rc)
2792 return rc;
2794 BT_MARK_DIRTY(mp, ip);
2796 * acquire a transaction lock on the prev page
2798 * action: update next pointer;
2800 tlck = txLock(tid, ip, mp, tlckDTREE | tlckRELINK);
2801 jfs_info("dtRelink prevbn: tlck = 0x%p, ip = 0x%p, mp=0x%p",
2802 tlck, ip, mp);
2803 dtlck = (struct dt_lock *) & tlck->lock;
2805 /* linelock header */
2806 if (dtlck->index >= dtlck->maxcnt)
2807 dtlck = (struct dt_lock *) txLinelock(dtlck);
2808 lv = & dtlck->lv[dtlck->index];
2809 lv->offset = 0;
2810 lv->length = 1;
2811 dtlck->index++;
2813 p->header.next = cpu_to_le64(nextbn);
2814 DT_PUTPAGE(mp);
2817 return 0;
2822 * dtInitRoot()
2824 * initialize directory root (inline in inode)
2826 void dtInitRoot(tid_t tid, struct inode *ip, u32 idotdot)
2828 struct jfs_inode_info *jfs_ip = JFS_IP(ip);
2829 dtroot_t *p;
2830 int fsi;
2831 struct dtslot *f;
2832 struct tlock *tlck;
2833 struct dt_lock *dtlck;
2834 struct lv *lv;
2835 u16 xflag_save;
2838 * If this was previously an non-empty directory, we need to remove
2839 * the old directory table.
2841 if (DO_INDEX(ip)) {
2842 if (!jfs_dirtable_inline(ip)) {
2843 struct tblock *tblk = tid_to_tblock(tid);
2845 * We're playing games with the tid's xflag. If
2846 * we're removing a regular file, the file's xtree
2847 * is committed with COMMIT_PMAP, but we always
2848 * commit the directories xtree with COMMIT_PWMAP.
2850 xflag_save = tblk->xflag;
2851 tblk->xflag = 0;
2853 * xtTruncate isn't guaranteed to fully truncate
2854 * the xtree. The caller needs to check i_size
2855 * after committing the transaction to see if
2856 * additional truncation is needed. The
2857 * COMMIT_Stale flag tells caller that we
2858 * initiated the truncation.
2860 xtTruncate(tid, ip, 0, COMMIT_PWMAP);
2861 set_cflag(COMMIT_Stale, ip);
2863 tblk->xflag = xflag_save;
2864 } else
2865 ip->i_size = 1;
2867 jfs_ip->next_index = 2;
2868 } else
2869 ip->i_size = IDATASIZE;
2872 * acquire a transaction lock on the root
2874 * action: directory initialization;
2876 tlck = txLock(tid, ip, (struct metapage *) & jfs_ip->bxflag,
2877 tlckDTREE | tlckENTRY | tlckBTROOT);
2878 dtlck = (struct dt_lock *) & tlck->lock;
2880 /* linelock root */
2881 ASSERT(dtlck->index == 0);
2882 lv = & dtlck->lv[0];
2883 lv->offset = 0;
2884 lv->length = DTROOTMAXSLOT;
2885 dtlck->index++;
2887 p = &jfs_ip->i_dtroot;
2889 p->header.flag = DXD_INDEX | BT_ROOT | BT_LEAF;
2891 p->header.nextindex = 0;
2893 /* init freelist */
2894 fsi = 1;
2895 f = &p->slot[fsi];
2897 /* init data area of root */
2898 for (fsi++; fsi < DTROOTMAXSLOT; f++, fsi++)
2899 f->next = fsi;
2900 f->next = -1;
2902 p->header.freelist = 1;
2903 p->header.freecnt = 8;
2905 /* init '..' entry */
2906 p->header.idotdot = cpu_to_le32(idotdot);
2908 return;
2912 * add_missing_indices()
2914 * function: Fix dtree page in which one or more entries has an invalid index.
2915 * fsck.jfs should really fix this, but it currently does not.
2916 * Called from jfs_readdir when bad index is detected.
2918 static void add_missing_indices(struct inode *inode, s64 bn)
2920 struct ldtentry *d;
2921 struct dt_lock *dtlck;
2922 int i;
2923 uint index;
2924 struct lv *lv;
2925 struct metapage *mp;
2926 dtpage_t *p;
2927 int rc;
2928 s8 *stbl;
2929 tid_t tid;
2930 struct tlock *tlck;
2932 tid = txBegin(inode->i_sb, 0);
2934 DT_GETPAGE(inode, bn, mp, PSIZE, p, rc);
2936 if (rc) {
2937 printk(KERN_ERR "DT_GETPAGE failed!\n");
2938 goto end;
2940 BT_MARK_DIRTY(mp, inode);
2942 ASSERT(p->header.flag & BT_LEAF);
2944 tlck = txLock(tid, inode, mp, tlckDTREE | tlckENTRY);
2945 if (BT_IS_ROOT(mp))
2946 tlck->type |= tlckBTROOT;
2948 dtlck = (struct dt_lock *) &tlck->lock;
2950 stbl = DT_GETSTBL(p);
2951 for (i = 0; i < p->header.nextindex; i++) {
2952 d = (struct ldtentry *) &p->slot[stbl[i]];
2953 index = le32_to_cpu(d->index);
2954 if ((index < 2) || (index >= JFS_IP(inode)->next_index)) {
2955 d->index = cpu_to_le32(add_index(tid, inode, bn, i));
2956 if (dtlck->index >= dtlck->maxcnt)
2957 dtlck = (struct dt_lock *) txLinelock(dtlck);
2958 lv = &dtlck->lv[dtlck->index];
2959 lv->offset = stbl[i];
2960 lv->length = 1;
2961 dtlck->index++;
2965 DT_PUTPAGE(mp);
2966 (void) txCommit(tid, 1, &inode, 0);
2967 end:
2968 txEnd(tid);
2972 * Buffer to hold directory entry info while traversing a dtree page
2973 * before being fed to the filldir function
2975 struct jfs_dirent {
2976 loff_t position;
2977 int ino;
2978 u16 name_len;
2979 char name[0];
2983 * function to determine next variable-sized jfs_dirent in buffer
2985 static inline struct jfs_dirent *next_jfs_dirent(struct jfs_dirent *dirent)
2987 return (struct jfs_dirent *)
2988 ((char *)dirent +
2989 ((sizeof (struct jfs_dirent) + dirent->name_len + 1 +
2990 sizeof (loff_t) - 1) &
2991 ~(sizeof (loff_t) - 1)));
2995 * jfs_readdir()
2997 * function: read directory entries sequentially
2998 * from the specified entry offset
3000 * parameter:
3002 * return: offset = (pn, index) of start entry
3003 * of next jfs_readdir()/dtRead()
3005 int jfs_readdir(struct file *filp, void *dirent, filldir_t filldir)
3007 struct inode *ip = filp->f_path.dentry->d_inode;
3008 struct nls_table *codepage = JFS_SBI(ip->i_sb)->nls_tab;
3009 int rc = 0;
3010 loff_t dtpos; /* legacy OS/2 style position */
3011 struct dtoffset {
3012 s16 pn;
3013 s16 index;
3014 s32 unused;
3015 } *dtoffset = (struct dtoffset *) &dtpos;
3016 s64 bn;
3017 struct metapage *mp;
3018 dtpage_t *p;
3019 int index;
3020 s8 *stbl;
3021 struct btstack btstack;
3022 int i, next;
3023 struct ldtentry *d;
3024 struct dtslot *t;
3025 int d_namleft, len, outlen;
3026 unsigned long dirent_buf;
3027 char *name_ptr;
3028 u32 dir_index;
3029 int do_index = 0;
3030 uint loop_count = 0;
3031 struct jfs_dirent *jfs_dirent;
3032 int jfs_dirents;
3033 int overflow, fix_page, page_fixed = 0;
3034 static int unique_pos = 2; /* If we can't fix broken index */
3036 if (filp->f_pos == DIREND)
3037 return 0;
3039 if (DO_INDEX(ip)) {
3041 * persistent index is stored in directory entries.
3042 * Special cases: 0 = .
3043 * 1 = ..
3044 * -1 = End of directory
3046 do_index = 1;
3048 dir_index = (u32) filp->f_pos;
3051 * NFSv4 reserves cookies 1 and 2 for . and .. so the value
3052 * we return to the vfs is one greater than the one we use
3053 * internally.
3055 if (dir_index)
3056 dir_index--;
3058 if (dir_index > 1) {
3059 struct dir_table_slot dirtab_slot;
3061 if (dtEmpty(ip) ||
3062 (dir_index >= JFS_IP(ip)->next_index)) {
3063 /* Stale position. Directory has shrunk */
3064 filp->f_pos = DIREND;
3065 return 0;
3067 repeat:
3068 rc = read_index(ip, dir_index, &dirtab_slot);
3069 if (rc) {
3070 filp->f_pos = DIREND;
3071 return rc;
3073 if (dirtab_slot.flag == DIR_INDEX_FREE) {
3074 if (loop_count++ > JFS_IP(ip)->next_index) {
3075 jfs_err("jfs_readdir detected "
3076 "infinite loop!");
3077 filp->f_pos = DIREND;
3078 return 0;
3080 dir_index = le32_to_cpu(dirtab_slot.addr2);
3081 if (dir_index == -1) {
3082 filp->f_pos = DIREND;
3083 return 0;
3085 goto repeat;
3087 bn = addressDTS(&dirtab_slot);
3088 index = dirtab_slot.slot;
3089 DT_GETPAGE(ip, bn, mp, PSIZE, p, rc);
3090 if (rc) {
3091 filp->f_pos = DIREND;
3092 return 0;
3094 if (p->header.flag & BT_INTERNAL) {
3095 jfs_err("jfs_readdir: bad index table");
3096 DT_PUTPAGE(mp);
3097 filp->f_pos = DIREND;
3098 return 0;
3100 } else {
3101 if (dir_index == 0) {
3103 * self "."
3105 filp->f_pos = 1;
3106 if (filldir(dirent, ".", 1, 0, ip->i_ino,
3107 DT_DIR))
3108 return 0;
3111 * parent ".."
3113 filp->f_pos = 2;
3114 if (filldir(dirent, "..", 2, 1, PARENT(ip), DT_DIR))
3115 return 0;
3118 * Find first entry of left-most leaf
3120 if (dtEmpty(ip)) {
3121 filp->f_pos = DIREND;
3122 return 0;
3125 if ((rc = dtReadFirst(ip, &btstack)))
3126 return rc;
3128 DT_GETSEARCH(ip, btstack.top, bn, mp, p, index);
3130 } else {
3132 * Legacy filesystem - OS/2 & Linux JFS < 0.3.6
3134 * pn = 0; index = 1: First entry "."
3135 * pn = 0; index = 2: Second entry ".."
3136 * pn > 0: Real entries, pn=1 -> leftmost page
3137 * pn = index = -1: No more entries
3139 dtpos = filp->f_pos;
3140 if (dtpos < 2) {
3141 /* build "." entry */
3143 filp->f_pos = 1;
3144 if (filldir(dirent, ".", 1, filp->f_pos, ip->i_ino,
3145 DT_DIR))
3146 return 0;
3147 dtoffset->index = 2;
3148 filp->f_pos = dtpos;
3151 if (dtoffset->pn == 0) {
3152 if (dtoffset->index == 2) {
3153 /* build ".." entry */
3155 if (filldir(dirent, "..", 2, filp->f_pos,
3156 PARENT(ip), DT_DIR))
3157 return 0;
3158 } else {
3159 jfs_err("jfs_readdir called with "
3160 "invalid offset!");
3162 dtoffset->pn = 1;
3163 dtoffset->index = 0;
3164 filp->f_pos = dtpos;
3167 if (dtEmpty(ip)) {
3168 filp->f_pos = DIREND;
3169 return 0;
3172 if ((rc = dtReadNext(ip, &filp->f_pos, &btstack))) {
3173 jfs_err("jfs_readdir: unexpected rc = %d "
3174 "from dtReadNext", rc);
3175 filp->f_pos = DIREND;
3176 return 0;
3178 /* get start leaf page and index */
3179 DT_GETSEARCH(ip, btstack.top, bn, mp, p, index);
3181 /* offset beyond directory eof ? */
3182 if (bn < 0) {
3183 filp->f_pos = DIREND;
3184 return 0;
3188 dirent_buf = __get_free_page(GFP_KERNEL);
3189 if (dirent_buf == 0) {
3190 DT_PUTPAGE(mp);
3191 jfs_warn("jfs_readdir: __get_free_page failed!");
3192 filp->f_pos = DIREND;
3193 return -ENOMEM;
3196 while (1) {
3197 jfs_dirent = (struct jfs_dirent *) dirent_buf;
3198 jfs_dirents = 0;
3199 overflow = fix_page = 0;
3201 stbl = DT_GETSTBL(p);
3203 for (i = index; i < p->header.nextindex; i++) {
3204 d = (struct ldtentry *) & p->slot[stbl[i]];
3206 if (((long) jfs_dirent + d->namlen + 1) >
3207 (dirent_buf + PAGE_SIZE)) {
3208 /* DBCS codepages could overrun dirent_buf */
3209 index = i;
3210 overflow = 1;
3211 break;
3214 d_namleft = d->namlen;
3215 name_ptr = jfs_dirent->name;
3216 jfs_dirent->ino = le32_to_cpu(d->inumber);
3218 if (do_index) {
3219 len = min(d_namleft, DTLHDRDATALEN);
3220 jfs_dirent->position = le32_to_cpu(d->index);
3222 * d->index should always be valid, but it
3223 * isn't. fsck.jfs doesn't create the
3224 * directory index for the lost+found
3225 * directory. Rather than let it go,
3226 * we can try to fix it.
3228 if ((jfs_dirent->position < 2) ||
3229 (jfs_dirent->position >=
3230 JFS_IP(ip)->next_index)) {
3231 if (!page_fixed && !isReadOnly(ip)) {
3232 fix_page = 1;
3234 * setting overflow and setting
3235 * index to i will cause the
3236 * same page to be processed
3237 * again starting here
3239 overflow = 1;
3240 index = i;
3241 break;
3243 jfs_dirent->position = unique_pos++;
3246 * We add 1 to the index because we may
3247 * use a value of 2 internally, and NFSv4
3248 * doesn't like that.
3250 jfs_dirent->position++;
3251 } else {
3252 jfs_dirent->position = dtpos;
3253 len = min(d_namleft, DTLHDRDATALEN_LEGACY);
3256 /* copy the name of head/only segment */
3257 outlen = jfs_strfromUCS_le(name_ptr, d->name, len,
3258 codepage);
3259 jfs_dirent->name_len = outlen;
3261 /* copy name in the additional segment(s) */
3262 next = d->next;
3263 while (next >= 0) {
3264 t = (struct dtslot *) & p->slot[next];
3265 name_ptr += outlen;
3266 d_namleft -= len;
3267 /* Sanity Check */
3268 if (d_namleft == 0) {
3269 jfs_error(ip->i_sb,
3270 "JFS:Dtree error: ino = "
3271 "%ld, bn=%Ld, index = %d",
3272 (long)ip->i_ino,
3273 (long long)bn,
3275 goto skip_one;
3277 len = min(d_namleft, DTSLOTDATALEN);
3278 outlen = jfs_strfromUCS_le(name_ptr, t->name,
3279 len, codepage);
3280 jfs_dirent->name_len += outlen;
3282 next = t->next;
3285 jfs_dirents++;
3286 jfs_dirent = next_jfs_dirent(jfs_dirent);
3287 skip_one:
3288 if (!do_index)
3289 dtoffset->index++;
3292 if (!overflow) {
3293 /* Point to next leaf page */
3294 if (p->header.flag & BT_ROOT)
3295 bn = 0;
3296 else {
3297 bn = le64_to_cpu(p->header.next);
3298 index = 0;
3299 /* update offset (pn:index) for new page */
3300 if (!do_index) {
3301 dtoffset->pn++;
3302 dtoffset->index = 0;
3305 page_fixed = 0;
3308 /* unpin previous leaf page */
3309 DT_PUTPAGE(mp);
3311 jfs_dirent = (struct jfs_dirent *) dirent_buf;
3312 while (jfs_dirents--) {
3313 filp->f_pos = jfs_dirent->position;
3314 if (filldir(dirent, jfs_dirent->name,
3315 jfs_dirent->name_len, filp->f_pos,
3316 jfs_dirent->ino, DT_UNKNOWN))
3317 goto out;
3318 jfs_dirent = next_jfs_dirent(jfs_dirent);
3321 if (fix_page) {
3322 add_missing_indices(ip, bn);
3323 page_fixed = 1;
3326 if (!overflow && (bn == 0)) {
3327 filp->f_pos = DIREND;
3328 break;
3331 DT_GETPAGE(ip, bn, mp, PSIZE, p, rc);
3332 if (rc) {
3333 free_page(dirent_buf);
3334 return rc;
3338 out:
3339 free_page(dirent_buf);
3341 return rc;
3346 * dtReadFirst()
3348 * function: get the leftmost page of the directory
3350 static int dtReadFirst(struct inode *ip, struct btstack * btstack)
3352 int rc = 0;
3353 s64 bn;
3354 int psize = 288; /* initial in-line directory */
3355 struct metapage *mp;
3356 dtpage_t *p;
3357 s8 *stbl;
3358 struct btframe *btsp;
3359 pxd_t *xd;
3361 BT_CLR(btstack); /* reset stack */
3364 * descend leftmost path of the tree
3366 * by convention, root bn = 0.
3368 for (bn = 0;;) {
3369 DT_GETPAGE(ip, bn, mp, psize, p, rc);
3370 if (rc)
3371 return rc;
3374 * leftmost leaf page
3376 if (p->header.flag & BT_LEAF) {
3377 /* return leftmost entry */
3378 btsp = btstack->top;
3379 btsp->bn = bn;
3380 btsp->index = 0;
3381 btsp->mp = mp;
3383 return 0;
3387 * descend down to leftmost child page
3389 if (BT_STACK_FULL(btstack)) {
3390 DT_PUTPAGE(mp);
3391 jfs_error(ip->i_sb, "dtReadFirst: btstack overrun");
3392 BT_STACK_DUMP(btstack);
3393 return -EIO;
3395 /* push (bn, index) of the parent page/entry */
3396 BT_PUSH(btstack, bn, 0);
3398 /* get the leftmost entry */
3399 stbl = DT_GETSTBL(p);
3400 xd = (pxd_t *) & p->slot[stbl[0]];
3402 /* get the child page block address */
3403 bn = addressPXD(xd);
3404 psize = lengthPXD(xd) << JFS_SBI(ip->i_sb)->l2bsize;
3406 /* unpin the parent page */
3407 DT_PUTPAGE(mp);
3413 * dtReadNext()
3415 * function: get the page of the specified offset (pn:index)
3417 * return: if (offset > eof), bn = -1;
3419 * note: if index > nextindex of the target leaf page,
3420 * start with 1st entry of next leaf page;
3422 static int dtReadNext(struct inode *ip, loff_t * offset,
3423 struct btstack * btstack)
3425 int rc = 0;
3426 struct dtoffset {
3427 s16 pn;
3428 s16 index;
3429 s32 unused;
3430 } *dtoffset = (struct dtoffset *) offset;
3431 s64 bn;
3432 struct metapage *mp;
3433 dtpage_t *p;
3434 int index;
3435 int pn;
3436 s8 *stbl;
3437 struct btframe *btsp, *parent;
3438 pxd_t *xd;
3441 * get leftmost leaf page pinned
3443 if ((rc = dtReadFirst(ip, btstack)))
3444 return rc;
3446 /* get leaf page */
3447 DT_GETSEARCH(ip, btstack->top, bn, mp, p, index);
3449 /* get the start offset (pn:index) */
3450 pn = dtoffset->pn - 1; /* Now pn = 0 represents leftmost leaf */
3451 index = dtoffset->index;
3453 /* start at leftmost page ? */
3454 if (pn == 0) {
3455 /* offset beyond eof ? */
3456 if (index < p->header.nextindex)
3457 goto out;
3459 if (p->header.flag & BT_ROOT) {
3460 bn = -1;
3461 goto out;
3464 /* start with 1st entry of next leaf page */
3465 dtoffset->pn++;
3466 dtoffset->index = index = 0;
3467 goto a;
3470 /* start at non-leftmost page: scan parent pages for large pn */
3471 if (p->header.flag & BT_ROOT) {
3472 bn = -1;
3473 goto out;
3476 /* start after next leaf page ? */
3477 if (pn > 1)
3478 goto b;
3480 /* get leaf page pn = 1 */
3482 bn = le64_to_cpu(p->header.next);
3484 /* unpin leaf page */
3485 DT_PUTPAGE(mp);
3487 /* offset beyond eof ? */
3488 if (bn == 0) {
3489 bn = -1;
3490 goto out;
3493 goto c;
3496 * scan last internal page level to get target leaf page
3499 /* unpin leftmost leaf page */
3500 DT_PUTPAGE(mp);
3502 /* get left most parent page */
3503 btsp = btstack->top;
3504 parent = btsp - 1;
3505 bn = parent->bn;
3506 DT_GETPAGE(ip, bn, mp, PSIZE, p, rc);
3507 if (rc)
3508 return rc;
3510 /* scan parent pages at last internal page level */
3511 while (pn >= p->header.nextindex) {
3512 pn -= p->header.nextindex;
3514 /* get next parent page address */
3515 bn = le64_to_cpu(p->header.next);
3517 /* unpin current parent page */
3518 DT_PUTPAGE(mp);
3520 /* offset beyond eof ? */
3521 if (bn == 0) {
3522 bn = -1;
3523 goto out;
3526 /* get next parent page */
3527 DT_GETPAGE(ip, bn, mp, PSIZE, p, rc);
3528 if (rc)
3529 return rc;
3531 /* update parent page stack frame */
3532 parent->bn = bn;
3535 /* get leaf page address */
3536 stbl = DT_GETSTBL(p);
3537 xd = (pxd_t *) & p->slot[stbl[pn]];
3538 bn = addressPXD(xd);
3540 /* unpin parent page */
3541 DT_PUTPAGE(mp);
3544 * get target leaf page
3547 DT_GETPAGE(ip, bn, mp, PSIZE, p, rc);
3548 if (rc)
3549 return rc;
3552 * leaf page has been completed:
3553 * start with 1st entry of next leaf page
3555 if (index >= p->header.nextindex) {
3556 bn = le64_to_cpu(p->header.next);
3558 /* unpin leaf page */
3559 DT_PUTPAGE(mp);
3561 /* offset beyond eof ? */
3562 if (bn == 0) {
3563 bn = -1;
3564 goto out;
3567 /* get next leaf page */
3568 DT_GETPAGE(ip, bn, mp, PSIZE, p, rc);
3569 if (rc)
3570 return rc;
3572 /* start with 1st entry of next leaf page */
3573 dtoffset->pn++;
3574 dtoffset->index = 0;
3577 out:
3578 /* return target leaf page pinned */
3579 btsp = btstack->top;
3580 btsp->bn = bn;
3581 btsp->index = dtoffset->index;
3582 btsp->mp = mp;
3584 return 0;
3589 * dtCompare()
3591 * function: compare search key with an internal entry
3593 * return:
3594 * < 0 if k is < record
3595 * = 0 if k is = record
3596 * > 0 if k is > record
3598 static int dtCompare(struct component_name * key, /* search key */
3599 dtpage_t * p, /* directory page */
3600 int si)
3601 { /* entry slot index */
3602 wchar_t *kname;
3603 __le16 *name;
3604 int klen, namlen, len, rc;
3605 struct idtentry *ih;
3606 struct dtslot *t;
3609 * force the left-most key on internal pages, at any level of
3610 * the tree, to be less than any search key.
3611 * this obviates having to update the leftmost key on an internal
3612 * page when the user inserts a new key in the tree smaller than
3613 * anything that has been stored.
3615 * (? if/when dtSearch() narrows down to 1st entry (index = 0),
3616 * at any internal page at any level of the tree,
3617 * it descends to child of the entry anyway -
3618 * ? make the entry as min size dummy entry)
3620 * if (e->index == 0 && h->prevpg == P_INVALID && !(h->flags & BT_LEAF))
3621 * return (1);
3624 kname = key->name;
3625 klen = key->namlen;
3627 ih = (struct idtentry *) & p->slot[si];
3628 si = ih->next;
3629 name = ih->name;
3630 namlen = ih->namlen;
3631 len = min(namlen, DTIHDRDATALEN);
3633 /* compare with head/only segment */
3634 len = min(klen, len);
3635 if ((rc = UniStrncmp_le(kname, name, len)))
3636 return rc;
3638 klen -= len;
3639 namlen -= len;
3641 /* compare with additional segment(s) */
3642 kname += len;
3643 while (klen > 0 && namlen > 0) {
3644 /* compare with next name segment */
3645 t = (struct dtslot *) & p->slot[si];
3646 len = min(namlen, DTSLOTDATALEN);
3647 len = min(klen, len);
3648 name = t->name;
3649 if ((rc = UniStrncmp_le(kname, name, len)))
3650 return rc;
3652 klen -= len;
3653 namlen -= len;
3654 kname += len;
3655 si = t->next;
3658 return (klen - namlen);
3665 * ciCompare()
3667 * function: compare search key with an (leaf/internal) entry
3669 * return:
3670 * < 0 if k is < record
3671 * = 0 if k is = record
3672 * > 0 if k is > record
3674 static int ciCompare(struct component_name * key, /* search key */
3675 dtpage_t * p, /* directory page */
3676 int si, /* entry slot index */
3677 int flag)
3679 wchar_t *kname, x;
3680 __le16 *name;
3681 int klen, namlen, len, rc;
3682 struct ldtentry *lh;
3683 struct idtentry *ih;
3684 struct dtslot *t;
3685 int i;
3688 * force the left-most key on internal pages, at any level of
3689 * the tree, to be less than any search key.
3690 * this obviates having to update the leftmost key on an internal
3691 * page when the user inserts a new key in the tree smaller than
3692 * anything that has been stored.
3694 * (? if/when dtSearch() narrows down to 1st entry (index = 0),
3695 * at any internal page at any level of the tree,
3696 * it descends to child of the entry anyway -
3697 * ? make the entry as min size dummy entry)
3699 * if (e->index == 0 && h->prevpg == P_INVALID && !(h->flags & BT_LEAF))
3700 * return (1);
3703 kname = key->name;
3704 klen = key->namlen;
3707 * leaf page entry
3709 if (p->header.flag & BT_LEAF) {
3710 lh = (struct ldtentry *) & p->slot[si];
3711 si = lh->next;
3712 name = lh->name;
3713 namlen = lh->namlen;
3714 if (flag & JFS_DIR_INDEX)
3715 len = min(namlen, DTLHDRDATALEN);
3716 else
3717 len = min(namlen, DTLHDRDATALEN_LEGACY);
3720 * internal page entry
3722 else {
3723 ih = (struct idtentry *) & p->slot[si];
3724 si = ih->next;
3725 name = ih->name;
3726 namlen = ih->namlen;
3727 len = min(namlen, DTIHDRDATALEN);
3730 /* compare with head/only segment */
3731 len = min(klen, len);
3732 for (i = 0; i < len; i++, kname++, name++) {
3733 /* only uppercase if case-insensitive support is on */
3734 if ((flag & JFS_OS2) == JFS_OS2)
3735 x = UniToupper(le16_to_cpu(*name));
3736 else
3737 x = le16_to_cpu(*name);
3738 if ((rc = *kname - x))
3739 return rc;
3742 klen -= len;
3743 namlen -= len;
3745 /* compare with additional segment(s) */
3746 while (klen > 0 && namlen > 0) {
3747 /* compare with next name segment */
3748 t = (struct dtslot *) & p->slot[si];
3749 len = min(namlen, DTSLOTDATALEN);
3750 len = min(klen, len);
3751 name = t->name;
3752 for (i = 0; i < len; i++, kname++, name++) {
3753 /* only uppercase if case-insensitive support is on */
3754 if ((flag & JFS_OS2) == JFS_OS2)
3755 x = UniToupper(le16_to_cpu(*name));
3756 else
3757 x = le16_to_cpu(*name);
3759 if ((rc = *kname - x))
3760 return rc;
3763 klen -= len;
3764 namlen -= len;
3765 si = t->next;
3768 return (klen - namlen);
3773 * ciGetLeafPrefixKey()
3775 * function: compute prefix of suffix compression
3776 * from two adjacent leaf entries
3777 * across page boundary
3779 * return: non-zero on error
3782 static int ciGetLeafPrefixKey(dtpage_t * lp, int li, dtpage_t * rp,
3783 int ri, struct component_name * key, int flag)
3785 int klen, namlen;
3786 wchar_t *pl, *pr, *kname;
3787 struct component_name lkey;
3788 struct component_name rkey;
3790 lkey.name = kmalloc((JFS_NAME_MAX + 1) * sizeof(wchar_t),
3791 GFP_KERNEL);
3792 if (lkey.name == NULL)
3793 return -ENOMEM;
3795 rkey.name = kmalloc((JFS_NAME_MAX + 1) * sizeof(wchar_t),
3796 GFP_KERNEL);
3797 if (rkey.name == NULL) {
3798 kfree(lkey.name);
3799 return -ENOMEM;
3802 /* get left and right key */
3803 dtGetKey(lp, li, &lkey, flag);
3804 lkey.name[lkey.namlen] = 0;
3806 if ((flag & JFS_OS2) == JFS_OS2)
3807 ciToUpper(&lkey);
3809 dtGetKey(rp, ri, &rkey, flag);
3810 rkey.name[rkey.namlen] = 0;
3813 if ((flag & JFS_OS2) == JFS_OS2)
3814 ciToUpper(&rkey);
3816 /* compute prefix */
3817 klen = 0;
3818 kname = key->name;
3819 namlen = min(lkey.namlen, rkey.namlen);
3820 for (pl = lkey.name, pr = rkey.name;
3821 namlen; pl++, pr++, namlen--, klen++, kname++) {
3822 *kname = *pr;
3823 if (*pl != *pr) {
3824 key->namlen = klen + 1;
3825 goto free_names;
3829 /* l->namlen <= r->namlen since l <= r */
3830 if (lkey.namlen < rkey.namlen) {
3831 *kname = *pr;
3832 key->namlen = klen + 1;
3833 } else /* l->namelen == r->namelen */
3834 key->namlen = klen;
3836 free_names:
3837 kfree(lkey.name);
3838 kfree(rkey.name);
3839 return 0;
3845 * dtGetKey()
3847 * function: get key of the entry
3849 static void dtGetKey(dtpage_t * p, int i, /* entry index */
3850 struct component_name * key, int flag)
3852 int si;
3853 s8 *stbl;
3854 struct ldtentry *lh;
3855 struct idtentry *ih;
3856 struct dtslot *t;
3857 int namlen, len;
3858 wchar_t *kname;
3859 __le16 *name;
3861 /* get entry */
3862 stbl = DT_GETSTBL(p);
3863 si = stbl[i];
3864 if (p->header.flag & BT_LEAF) {
3865 lh = (struct ldtentry *) & p->slot[si];
3866 si = lh->next;
3867 namlen = lh->namlen;
3868 name = lh->name;
3869 if (flag & JFS_DIR_INDEX)
3870 len = min(namlen, DTLHDRDATALEN);
3871 else
3872 len = min(namlen, DTLHDRDATALEN_LEGACY);
3873 } else {
3874 ih = (struct idtentry *) & p->slot[si];
3875 si = ih->next;
3876 namlen = ih->namlen;
3877 name = ih->name;
3878 len = min(namlen, DTIHDRDATALEN);
3881 key->namlen = namlen;
3882 kname = key->name;
3885 * move head/only segment
3887 UniStrncpy_from_le(kname, name, len);
3890 * move additional segment(s)
3892 while (si >= 0) {
3893 /* get next segment */
3894 t = &p->slot[si];
3895 kname += len;
3896 namlen -= len;
3897 len = min(namlen, DTSLOTDATALEN);
3898 UniStrncpy_from_le(kname, t->name, len);
3900 si = t->next;
3906 * dtInsertEntry()
3908 * function: allocate free slot(s) and
3909 * write a leaf/internal entry
3911 * return: entry slot index
3913 static void dtInsertEntry(dtpage_t * p, int index, struct component_name * key,
3914 ddata_t * data, struct dt_lock ** dtlock)
3916 struct dtslot *h, *t;
3917 struct ldtentry *lh = NULL;
3918 struct idtentry *ih = NULL;
3919 int hsi, fsi, klen, len, nextindex;
3920 wchar_t *kname;
3921 __le16 *name;
3922 s8 *stbl;
3923 pxd_t *xd;
3924 struct dt_lock *dtlck = *dtlock;
3925 struct lv *lv;
3926 int xsi, n;
3927 s64 bn = 0;
3928 struct metapage *mp = NULL;
3930 klen = key->namlen;
3931 kname = key->name;
3933 /* allocate a free slot */
3934 hsi = fsi = p->header.freelist;
3935 h = &p->slot[fsi];
3936 p->header.freelist = h->next;
3937 --p->header.freecnt;
3939 /* open new linelock */
3940 if (dtlck->index >= dtlck->maxcnt)
3941 dtlck = (struct dt_lock *) txLinelock(dtlck);
3943 lv = & dtlck->lv[dtlck->index];
3944 lv->offset = hsi;
3946 /* write head/only segment */
3947 if (p->header.flag & BT_LEAF) {
3948 lh = (struct ldtentry *) h;
3949 lh->next = h->next;
3950 lh->inumber = cpu_to_le32(data->leaf.ino);
3951 lh->namlen = klen;
3952 name = lh->name;
3953 if (data->leaf.ip) {
3954 len = min(klen, DTLHDRDATALEN);
3955 if (!(p->header.flag & BT_ROOT))
3956 bn = addressPXD(&p->header.self);
3957 lh->index = cpu_to_le32(add_index(data->leaf.tid,
3958 data->leaf.ip,
3959 bn, index));
3960 } else
3961 len = min(klen, DTLHDRDATALEN_LEGACY);
3962 } else {
3963 ih = (struct idtentry *) h;
3964 ih->next = h->next;
3965 xd = (pxd_t *) ih;
3966 *xd = data->xd;
3967 ih->namlen = klen;
3968 name = ih->name;
3969 len = min(klen, DTIHDRDATALEN);
3972 UniStrncpy_to_le(name, kname, len);
3974 n = 1;
3975 xsi = hsi;
3977 /* write additional segment(s) */
3978 t = h;
3979 klen -= len;
3980 while (klen) {
3981 /* get free slot */
3982 fsi = p->header.freelist;
3983 t = &p->slot[fsi];
3984 p->header.freelist = t->next;
3985 --p->header.freecnt;
3987 /* is next slot contiguous ? */
3988 if (fsi != xsi + 1) {
3989 /* close current linelock */
3990 lv->length = n;
3991 dtlck->index++;
3993 /* open new linelock */
3994 if (dtlck->index < dtlck->maxcnt)
3995 lv++;
3996 else {
3997 dtlck = (struct dt_lock *) txLinelock(dtlck);
3998 lv = & dtlck->lv[0];
4001 lv->offset = fsi;
4002 n = 0;
4005 kname += len;
4006 len = min(klen, DTSLOTDATALEN);
4007 UniStrncpy_to_le(t->name, kname, len);
4009 n++;
4010 xsi = fsi;
4011 klen -= len;
4014 /* close current linelock */
4015 lv->length = n;
4016 dtlck->index++;
4018 *dtlock = dtlck;
4020 /* terminate last/only segment */
4021 if (h == t) {
4022 /* single segment entry */
4023 if (p->header.flag & BT_LEAF)
4024 lh->next = -1;
4025 else
4026 ih->next = -1;
4027 } else
4028 /* multi-segment entry */
4029 t->next = -1;
4031 /* if insert into middle, shift right succeeding entries in stbl */
4032 stbl = DT_GETSTBL(p);
4033 nextindex = p->header.nextindex;
4034 if (index < nextindex) {
4035 memmove(stbl + index + 1, stbl + index, nextindex - index);
4037 if ((p->header.flag & BT_LEAF) && data->leaf.ip) {
4038 s64 lblock;
4041 * Need to update slot number for entries that moved
4042 * in the stbl
4044 mp = NULL;
4045 for (n = index + 1; n <= nextindex; n++) {
4046 lh = (struct ldtentry *) & (p->slot[stbl[n]]);
4047 modify_index(data->leaf.tid, data->leaf.ip,
4048 le32_to_cpu(lh->index), bn, n,
4049 &mp, &lblock);
4051 if (mp)
4052 release_metapage(mp);
4056 stbl[index] = hsi;
4058 /* advance next available entry index of stbl */
4059 ++p->header.nextindex;
4064 * dtMoveEntry()
4066 * function: move entries from split/left page to new/right page
4068 * nextindex of dst page and freelist/freecnt of both pages
4069 * are updated.
4071 static void dtMoveEntry(dtpage_t * sp, int si, dtpage_t * dp,
4072 struct dt_lock ** sdtlock, struct dt_lock ** ddtlock,
4073 int do_index)
4075 int ssi, next; /* src slot index */
4076 int di; /* dst entry index */
4077 int dsi; /* dst slot index */
4078 s8 *sstbl, *dstbl; /* sorted entry table */
4079 int snamlen, len;
4080 struct ldtentry *slh, *dlh = NULL;
4081 struct idtentry *sih, *dih = NULL;
4082 struct dtslot *h, *s, *d;
4083 struct dt_lock *sdtlck = *sdtlock, *ddtlck = *ddtlock;
4084 struct lv *slv, *dlv;
4085 int xssi, ns, nd;
4086 int sfsi;
4088 sstbl = (s8 *) & sp->slot[sp->header.stblindex];
4089 dstbl = (s8 *) & dp->slot[dp->header.stblindex];
4091 dsi = dp->header.freelist; /* first (whole page) free slot */
4092 sfsi = sp->header.freelist;
4094 /* linelock destination entry slot */
4095 dlv = & ddtlck->lv[ddtlck->index];
4096 dlv->offset = dsi;
4098 /* linelock source entry slot */
4099 slv = & sdtlck->lv[sdtlck->index];
4100 slv->offset = sstbl[si];
4101 xssi = slv->offset - 1;
4104 * move entries
4106 ns = nd = 0;
4107 for (di = 0; si < sp->header.nextindex; si++, di++) {
4108 ssi = sstbl[si];
4109 dstbl[di] = dsi;
4111 /* is next slot contiguous ? */
4112 if (ssi != xssi + 1) {
4113 /* close current linelock */
4114 slv->length = ns;
4115 sdtlck->index++;
4117 /* open new linelock */
4118 if (sdtlck->index < sdtlck->maxcnt)
4119 slv++;
4120 else {
4121 sdtlck = (struct dt_lock *) txLinelock(sdtlck);
4122 slv = & sdtlck->lv[0];
4125 slv->offset = ssi;
4126 ns = 0;
4130 * move head/only segment of an entry
4132 /* get dst slot */
4133 h = d = &dp->slot[dsi];
4135 /* get src slot and move */
4136 s = &sp->slot[ssi];
4137 if (sp->header.flag & BT_LEAF) {
4138 /* get source entry */
4139 slh = (struct ldtentry *) s;
4140 dlh = (struct ldtentry *) h;
4141 snamlen = slh->namlen;
4143 if (do_index) {
4144 len = min(snamlen, DTLHDRDATALEN);
4145 dlh->index = slh->index; /* little-endian */
4146 } else
4147 len = min(snamlen, DTLHDRDATALEN_LEGACY);
4149 memcpy(dlh, slh, 6 + len * 2);
4151 next = slh->next;
4153 /* update dst head/only segment next field */
4154 dsi++;
4155 dlh->next = dsi;
4156 } else {
4157 sih = (struct idtentry *) s;
4158 snamlen = sih->namlen;
4160 len = min(snamlen, DTIHDRDATALEN);
4161 dih = (struct idtentry *) h;
4162 memcpy(dih, sih, 10 + len * 2);
4163 next = sih->next;
4165 dsi++;
4166 dih->next = dsi;
4169 /* free src head/only segment */
4170 s->next = sfsi;
4171 s->cnt = 1;
4172 sfsi = ssi;
4174 ns++;
4175 nd++;
4176 xssi = ssi;
4179 * move additional segment(s) of the entry
4181 snamlen -= len;
4182 while ((ssi = next) >= 0) {
4183 /* is next slot contiguous ? */
4184 if (ssi != xssi + 1) {
4185 /* close current linelock */
4186 slv->length = ns;
4187 sdtlck->index++;
4189 /* open new linelock */
4190 if (sdtlck->index < sdtlck->maxcnt)
4191 slv++;
4192 else {
4193 sdtlck =
4194 (struct dt_lock *)
4195 txLinelock(sdtlck);
4196 slv = & sdtlck->lv[0];
4199 slv->offset = ssi;
4200 ns = 0;
4203 /* get next source segment */
4204 s = &sp->slot[ssi];
4206 /* get next destination free slot */
4207 d++;
4209 len = min(snamlen, DTSLOTDATALEN);
4210 UniStrncpy_le(d->name, s->name, len);
4212 ns++;
4213 nd++;
4214 xssi = ssi;
4216 dsi++;
4217 d->next = dsi;
4219 /* free source segment */
4220 next = s->next;
4221 s->next = sfsi;
4222 s->cnt = 1;
4223 sfsi = ssi;
4225 snamlen -= len;
4226 } /* end while */
4228 /* terminate dst last/only segment */
4229 if (h == d) {
4230 /* single segment entry */
4231 if (dp->header.flag & BT_LEAF)
4232 dlh->next = -1;
4233 else
4234 dih->next = -1;
4235 } else
4236 /* multi-segment entry */
4237 d->next = -1;
4238 } /* end for */
4240 /* close current linelock */
4241 slv->length = ns;
4242 sdtlck->index++;
4243 *sdtlock = sdtlck;
4245 dlv->length = nd;
4246 ddtlck->index++;
4247 *ddtlock = ddtlck;
4249 /* update source header */
4250 sp->header.freelist = sfsi;
4251 sp->header.freecnt += nd;
4253 /* update destination header */
4254 dp->header.nextindex = di;
4256 dp->header.freelist = dsi;
4257 dp->header.freecnt -= nd;
4262 * dtDeleteEntry()
4264 * function: free a (leaf/internal) entry
4266 * log freelist header, stbl, and each segment slot of entry
4267 * (even though last/only segment next field is modified,
4268 * physical image logging requires all segment slots of
4269 * the entry logged to avoid applying previous updates
4270 * to the same slots)
4272 static void dtDeleteEntry(dtpage_t * p, int fi, struct dt_lock ** dtlock)
4274 int fsi; /* free entry slot index */
4275 s8 *stbl;
4276 struct dtslot *t;
4277 int si, freecnt;
4278 struct dt_lock *dtlck = *dtlock;
4279 struct lv *lv;
4280 int xsi, n;
4282 /* get free entry slot index */
4283 stbl = DT_GETSTBL(p);
4284 fsi = stbl[fi];
4286 /* open new linelock */
4287 if (dtlck->index >= dtlck->maxcnt)
4288 dtlck = (struct dt_lock *) txLinelock(dtlck);
4289 lv = & dtlck->lv[dtlck->index];
4291 lv->offset = fsi;
4293 /* get the head/only segment */
4294 t = &p->slot[fsi];
4295 if (p->header.flag & BT_LEAF)
4296 si = ((struct ldtentry *) t)->next;
4297 else
4298 si = ((struct idtentry *) t)->next;
4299 t->next = si;
4300 t->cnt = 1;
4302 n = freecnt = 1;
4303 xsi = fsi;
4305 /* find the last/only segment */
4306 while (si >= 0) {
4307 /* is next slot contiguous ? */
4308 if (si != xsi + 1) {
4309 /* close current linelock */
4310 lv->length = n;
4311 dtlck->index++;
4313 /* open new linelock */
4314 if (dtlck->index < dtlck->maxcnt)
4315 lv++;
4316 else {
4317 dtlck = (struct dt_lock *) txLinelock(dtlck);
4318 lv = & dtlck->lv[0];
4321 lv->offset = si;
4322 n = 0;
4325 n++;
4326 xsi = si;
4327 freecnt++;
4329 t = &p->slot[si];
4330 t->cnt = 1;
4331 si = t->next;
4334 /* close current linelock */
4335 lv->length = n;
4336 dtlck->index++;
4338 *dtlock = dtlck;
4340 /* update freelist */
4341 t->next = p->header.freelist;
4342 p->header.freelist = fsi;
4343 p->header.freecnt += freecnt;
4345 /* if delete from middle,
4346 * shift left the succedding entries in the stbl
4348 si = p->header.nextindex;
4349 if (fi < si - 1)
4350 memmove(&stbl[fi], &stbl[fi + 1], si - fi - 1);
4352 p->header.nextindex--;
4357 * dtTruncateEntry()
4359 * function: truncate a (leaf/internal) entry
4361 * log freelist header, stbl, and each segment slot of entry
4362 * (even though last/only segment next field is modified,
4363 * physical image logging requires all segment slots of
4364 * the entry logged to avoid applying previous updates
4365 * to the same slots)
4367 static void dtTruncateEntry(dtpage_t * p, int ti, struct dt_lock ** dtlock)
4369 int tsi; /* truncate entry slot index */
4370 s8 *stbl;
4371 struct dtslot *t;
4372 int si, freecnt;
4373 struct dt_lock *dtlck = *dtlock;
4374 struct lv *lv;
4375 int fsi, xsi, n;
4377 /* get free entry slot index */
4378 stbl = DT_GETSTBL(p);
4379 tsi = stbl[ti];
4381 /* open new linelock */
4382 if (dtlck->index >= dtlck->maxcnt)
4383 dtlck = (struct dt_lock *) txLinelock(dtlck);
4384 lv = & dtlck->lv[dtlck->index];
4386 lv->offset = tsi;
4388 /* get the head/only segment */
4389 t = &p->slot[tsi];
4390 ASSERT(p->header.flag & BT_INTERNAL);
4391 ((struct idtentry *) t)->namlen = 0;
4392 si = ((struct idtentry *) t)->next;
4393 ((struct idtentry *) t)->next = -1;
4395 n = 1;
4396 freecnt = 0;
4397 fsi = si;
4398 xsi = tsi;
4400 /* find the last/only segment */
4401 while (si >= 0) {
4402 /* is next slot contiguous ? */
4403 if (si != xsi + 1) {
4404 /* close current linelock */
4405 lv->length = n;
4406 dtlck->index++;
4408 /* open new linelock */
4409 if (dtlck->index < dtlck->maxcnt)
4410 lv++;
4411 else {
4412 dtlck = (struct dt_lock *) txLinelock(dtlck);
4413 lv = & dtlck->lv[0];
4416 lv->offset = si;
4417 n = 0;
4420 n++;
4421 xsi = si;
4422 freecnt++;
4424 t = &p->slot[si];
4425 t->cnt = 1;
4426 si = t->next;
4429 /* close current linelock */
4430 lv->length = n;
4431 dtlck->index++;
4433 *dtlock = dtlck;
4435 /* update freelist */
4436 if (freecnt == 0)
4437 return;
4438 t->next = p->header.freelist;
4439 p->header.freelist = fsi;
4440 p->header.freecnt += freecnt;
4445 * dtLinelockFreelist()
4447 static void dtLinelockFreelist(dtpage_t * p, /* directory page */
4448 int m, /* max slot index */
4449 struct dt_lock ** dtlock)
4451 int fsi; /* free entry slot index */
4452 struct dtslot *t;
4453 int si;
4454 struct dt_lock *dtlck = *dtlock;
4455 struct lv *lv;
4456 int xsi, n;
4458 /* get free entry slot index */
4459 fsi = p->header.freelist;
4461 /* open new linelock */
4462 if (dtlck->index >= dtlck->maxcnt)
4463 dtlck = (struct dt_lock *) txLinelock(dtlck);
4464 lv = & dtlck->lv[dtlck->index];
4466 lv->offset = fsi;
4468 n = 1;
4469 xsi = fsi;
4471 t = &p->slot[fsi];
4472 si = t->next;
4474 /* find the last/only segment */
4475 while (si < m && si >= 0) {
4476 /* is next slot contiguous ? */
4477 if (si != xsi + 1) {
4478 /* close current linelock */
4479 lv->length = n;
4480 dtlck->index++;
4482 /* open new linelock */
4483 if (dtlck->index < dtlck->maxcnt)
4484 lv++;
4485 else {
4486 dtlck = (struct dt_lock *) txLinelock(dtlck);
4487 lv = & dtlck->lv[0];
4490 lv->offset = si;
4491 n = 0;
4494 n++;
4495 xsi = si;
4497 t = &p->slot[si];
4498 si = t->next;
4501 /* close current linelock */
4502 lv->length = n;
4503 dtlck->index++;
4505 *dtlock = dtlck;
4510 * NAME: dtModify
4512 * FUNCTION: Modify the inode number part of a directory entry
4514 * PARAMETERS:
4515 * tid - Transaction id
4516 * ip - Inode of parent directory
4517 * key - Name of entry to be modified
4518 * orig_ino - Original inode number expected in entry
4519 * new_ino - New inode number to put into entry
4520 * flag - JFS_RENAME
4522 * RETURNS:
4523 * -ESTALE - If entry found does not match orig_ino passed in
4524 * -ENOENT - If no entry can be found to match key
4525 * 0 - If successfully modified entry
4527 int dtModify(tid_t tid, struct inode *ip,
4528 struct component_name * key, ino_t * orig_ino, ino_t new_ino, int flag)
4530 int rc;
4531 s64 bn;
4532 struct metapage *mp;
4533 dtpage_t *p;
4534 int index;
4535 struct btstack btstack;
4536 struct tlock *tlck;
4537 struct dt_lock *dtlck;
4538 struct lv *lv;
4539 s8 *stbl;
4540 int entry_si; /* entry slot index */
4541 struct ldtentry *entry;
4544 * search for the entry to modify:
4546 * dtSearch() returns (leaf page pinned, index at which to modify).
4548 if ((rc = dtSearch(ip, key, orig_ino, &btstack, flag)))
4549 return rc;
4551 /* retrieve search result */
4552 DT_GETSEARCH(ip, btstack.top, bn, mp, p, index);
4554 BT_MARK_DIRTY(mp, ip);
4556 * acquire a transaction lock on the leaf page of named entry
4558 tlck = txLock(tid, ip, mp, tlckDTREE | tlckENTRY);
4559 dtlck = (struct dt_lock *) & tlck->lock;
4561 /* get slot index of the entry */
4562 stbl = DT_GETSTBL(p);
4563 entry_si = stbl[index];
4565 /* linelock entry */
4566 ASSERT(dtlck->index == 0);
4567 lv = & dtlck->lv[0];
4568 lv->offset = entry_si;
4569 lv->length = 1;
4570 dtlck->index++;
4572 /* get the head/only segment */
4573 entry = (struct ldtentry *) & p->slot[entry_si];
4575 /* substitute the inode number of the entry */
4576 entry->inumber = cpu_to_le32(new_ino);
4578 /* unpin the leaf page */
4579 DT_PUTPAGE(mp);
4581 return 0;