Staging: hv: mousevsc: Cleanup and properly implement reportdesc_callback()
[zen-stable.git] / fs / ufs / util.h
blob95417592824094c0655a0bf5839c3da442d5db46
1 /*
2 * linux/fs/ufs/util.h
4 * Copyright (C) 1998
5 * Daniel Pirkl <daniel.pirkl@email.cz>
6 * Charles University, Faculty of Mathematics and Physics
7 */
9 #include <linux/buffer_head.h>
10 #include <linux/fs.h>
11 #include "swab.h"
15 * some useful macros
17 #define in_range(b,first,len) ((b)>=(first)&&(b)<(first)+(len))
20 * functions used for retyping
22 static inline struct ufs_buffer_head *UCPI_UBH(struct ufs_cg_private_info *cpi)
24 return &cpi->c_ubh;
26 static inline struct ufs_buffer_head *USPI_UBH(struct ufs_sb_private_info *spi)
28 return &spi->s_ubh;
34 * macros used for accessing structures
36 static inline s32
37 ufs_get_fs_state(struct super_block *sb, struct ufs_super_block_first *usb1,
38 struct ufs_super_block_third *usb3)
40 switch (UFS_SB(sb)->s_flags & UFS_ST_MASK) {
41 case UFS_ST_SUNOS:
42 if (fs32_to_cpu(sb, usb3->fs_postblformat) == UFS_42POSTBLFMT)
43 return fs32_to_cpu(sb, usb1->fs_u0.fs_sun.fs_state);
44 /* Fall Through to UFS_ST_SUN */
45 case UFS_ST_SUN:
46 return fs32_to_cpu(sb, usb3->fs_un2.fs_sun.fs_state);
47 case UFS_ST_SUNx86:
48 return fs32_to_cpu(sb, usb1->fs_u1.fs_sunx86.fs_state);
49 case UFS_ST_44BSD:
50 default:
51 return fs32_to_cpu(sb, usb3->fs_un2.fs_44.fs_state);
55 static inline void
56 ufs_set_fs_state(struct super_block *sb, struct ufs_super_block_first *usb1,
57 struct ufs_super_block_third *usb3, s32 value)
59 switch (UFS_SB(sb)->s_flags & UFS_ST_MASK) {
60 case UFS_ST_SUNOS:
61 if (fs32_to_cpu(sb, usb3->fs_postblformat) == UFS_42POSTBLFMT) {
62 usb1->fs_u0.fs_sun.fs_state = cpu_to_fs32(sb, value);
63 break;
65 /* Fall Through to UFS_ST_SUN */
66 case UFS_ST_SUN:
67 usb3->fs_un2.fs_sun.fs_state = cpu_to_fs32(sb, value);
68 break;
69 case UFS_ST_SUNx86:
70 usb1->fs_u1.fs_sunx86.fs_state = cpu_to_fs32(sb, value);
71 break;
72 case UFS_ST_44BSD:
73 usb3->fs_un2.fs_44.fs_state = cpu_to_fs32(sb, value);
74 break;
78 static inline u32
79 ufs_get_fs_npsect(struct super_block *sb, struct ufs_super_block_first *usb1,
80 struct ufs_super_block_third *usb3)
82 if ((UFS_SB(sb)->s_flags & UFS_ST_MASK) == UFS_ST_SUNx86)
83 return fs32_to_cpu(sb, usb3->fs_un2.fs_sunx86.fs_npsect);
84 else
85 return fs32_to_cpu(sb, usb1->fs_u1.fs_sun.fs_npsect);
88 static inline u64
89 ufs_get_fs_qbmask(struct super_block *sb, struct ufs_super_block_third *usb3)
91 __fs64 tmp;
93 switch (UFS_SB(sb)->s_flags & UFS_ST_MASK) {
94 case UFS_ST_SUNOS:
95 case UFS_ST_SUN:
96 ((__fs32 *)&tmp)[0] = usb3->fs_un2.fs_sun.fs_qbmask[0];
97 ((__fs32 *)&tmp)[1] = usb3->fs_un2.fs_sun.fs_qbmask[1];
98 break;
99 case UFS_ST_SUNx86:
100 ((__fs32 *)&tmp)[0] = usb3->fs_un2.fs_sunx86.fs_qbmask[0];
101 ((__fs32 *)&tmp)[1] = usb3->fs_un2.fs_sunx86.fs_qbmask[1];
102 break;
103 case UFS_ST_44BSD:
104 ((__fs32 *)&tmp)[0] = usb3->fs_un2.fs_44.fs_qbmask[0];
105 ((__fs32 *)&tmp)[1] = usb3->fs_un2.fs_44.fs_qbmask[1];
106 break;
109 return fs64_to_cpu(sb, tmp);
112 static inline u64
113 ufs_get_fs_qfmask(struct super_block *sb, struct ufs_super_block_third *usb3)
115 __fs64 tmp;
117 switch (UFS_SB(sb)->s_flags & UFS_ST_MASK) {
118 case UFS_ST_SUNOS:
119 case UFS_ST_SUN:
120 ((__fs32 *)&tmp)[0] = usb3->fs_un2.fs_sun.fs_qfmask[0];
121 ((__fs32 *)&tmp)[1] = usb3->fs_un2.fs_sun.fs_qfmask[1];
122 break;
123 case UFS_ST_SUNx86:
124 ((__fs32 *)&tmp)[0] = usb3->fs_un2.fs_sunx86.fs_qfmask[0];
125 ((__fs32 *)&tmp)[1] = usb3->fs_un2.fs_sunx86.fs_qfmask[1];
126 break;
127 case UFS_ST_44BSD:
128 ((__fs32 *)&tmp)[0] = usb3->fs_un2.fs_44.fs_qfmask[0];
129 ((__fs32 *)&tmp)[1] = usb3->fs_un2.fs_44.fs_qfmask[1];
130 break;
133 return fs64_to_cpu(sb, tmp);
136 static inline u16
137 ufs_get_de_namlen(struct super_block *sb, struct ufs_dir_entry *de)
139 if ((UFS_SB(sb)->s_flags & UFS_DE_MASK) == UFS_DE_OLD)
140 return fs16_to_cpu(sb, de->d_u.d_namlen);
141 else
142 return de->d_u.d_44.d_namlen; /* XXX this seems wrong */
145 static inline void
146 ufs_set_de_namlen(struct super_block *sb, struct ufs_dir_entry *de, u16 value)
148 if ((UFS_SB(sb)->s_flags & UFS_DE_MASK) == UFS_DE_OLD)
149 de->d_u.d_namlen = cpu_to_fs16(sb, value);
150 else
151 de->d_u.d_44.d_namlen = value; /* XXX this seems wrong */
154 static inline void
155 ufs_set_de_type(struct super_block *sb, struct ufs_dir_entry *de, int mode)
157 if ((UFS_SB(sb)->s_flags & UFS_DE_MASK) != UFS_DE_44BSD)
158 return;
161 * TODO turn this into a table lookup
163 switch (mode & S_IFMT) {
164 case S_IFSOCK:
165 de->d_u.d_44.d_type = DT_SOCK;
166 break;
167 case S_IFLNK:
168 de->d_u.d_44.d_type = DT_LNK;
169 break;
170 case S_IFREG:
171 de->d_u.d_44.d_type = DT_REG;
172 break;
173 case S_IFBLK:
174 de->d_u.d_44.d_type = DT_BLK;
175 break;
176 case S_IFDIR:
177 de->d_u.d_44.d_type = DT_DIR;
178 break;
179 case S_IFCHR:
180 de->d_u.d_44.d_type = DT_CHR;
181 break;
182 case S_IFIFO:
183 de->d_u.d_44.d_type = DT_FIFO;
184 break;
185 default:
186 de->d_u.d_44.d_type = DT_UNKNOWN;
190 static inline u32
191 ufs_get_inode_uid(struct super_block *sb, struct ufs_inode *inode)
193 switch (UFS_SB(sb)->s_flags & UFS_UID_MASK) {
194 case UFS_UID_44BSD:
195 return fs32_to_cpu(sb, inode->ui_u3.ui_44.ui_uid);
196 case UFS_UID_EFT:
197 if (inode->ui_u1.oldids.ui_suid == 0xFFFF)
198 return fs32_to_cpu(sb, inode->ui_u3.ui_sun.ui_uid);
199 /* Fall through */
200 default:
201 return fs16_to_cpu(sb, inode->ui_u1.oldids.ui_suid);
205 static inline void
206 ufs_set_inode_uid(struct super_block *sb, struct ufs_inode *inode, u32 value)
208 switch (UFS_SB(sb)->s_flags & UFS_UID_MASK) {
209 case UFS_UID_44BSD:
210 inode->ui_u3.ui_44.ui_uid = cpu_to_fs32(sb, value);
211 inode->ui_u1.oldids.ui_suid = cpu_to_fs16(sb, value);
212 break;
213 case UFS_UID_EFT:
214 inode->ui_u3.ui_sun.ui_uid = cpu_to_fs32(sb, value);
215 if (value > 0xFFFF)
216 value = 0xFFFF;
217 /* Fall through */
218 default:
219 inode->ui_u1.oldids.ui_suid = cpu_to_fs16(sb, value);
220 break;
224 static inline u32
225 ufs_get_inode_gid(struct super_block *sb, struct ufs_inode *inode)
227 switch (UFS_SB(sb)->s_flags & UFS_UID_MASK) {
228 case UFS_UID_44BSD:
229 return fs32_to_cpu(sb, inode->ui_u3.ui_44.ui_gid);
230 case UFS_UID_EFT:
231 if (inode->ui_u1.oldids.ui_suid == 0xFFFF)
232 return fs32_to_cpu(sb, inode->ui_u3.ui_sun.ui_gid);
233 /* Fall through */
234 default:
235 return fs16_to_cpu(sb, inode->ui_u1.oldids.ui_sgid);
239 static inline void
240 ufs_set_inode_gid(struct super_block *sb, struct ufs_inode *inode, u32 value)
242 switch (UFS_SB(sb)->s_flags & UFS_UID_MASK) {
243 case UFS_UID_44BSD:
244 inode->ui_u3.ui_44.ui_gid = cpu_to_fs32(sb, value);
245 inode->ui_u1.oldids.ui_sgid = cpu_to_fs16(sb, value);
246 break;
247 case UFS_UID_EFT:
248 inode->ui_u3.ui_sun.ui_gid = cpu_to_fs32(sb, value);
249 if (value > 0xFFFF)
250 value = 0xFFFF;
251 /* Fall through */
252 default:
253 inode->ui_u1.oldids.ui_sgid = cpu_to_fs16(sb, value);
254 break;
258 extern dev_t ufs_get_inode_dev(struct super_block *, struct ufs_inode_info *);
259 extern void ufs_set_inode_dev(struct super_block *, struct ufs_inode_info *, dev_t);
260 extern int ufs_prepare_chunk(struct page *page, loff_t pos, unsigned len);
263 * These functions manipulate ufs buffers
265 #define ubh_bread(sb,fragment,size) _ubh_bread_(uspi,sb,fragment,size)
266 extern struct ufs_buffer_head * _ubh_bread_(struct ufs_sb_private_info *, struct super_block *, u64 , u64);
267 extern struct ufs_buffer_head * ubh_bread_uspi(struct ufs_sb_private_info *, struct super_block *, u64, u64);
268 extern void ubh_brelse (struct ufs_buffer_head *);
269 extern void ubh_brelse_uspi (struct ufs_sb_private_info *);
270 extern void ubh_mark_buffer_dirty (struct ufs_buffer_head *);
271 extern void ubh_mark_buffer_uptodate (struct ufs_buffer_head *, int);
272 extern void ubh_sync_block(struct ufs_buffer_head *);
273 extern void ubh_bforget (struct ufs_buffer_head *);
274 extern int ubh_buffer_dirty (struct ufs_buffer_head *);
275 #define ubh_ubhcpymem(mem,ubh,size) _ubh_ubhcpymem_(uspi,mem,ubh,size)
276 extern void _ubh_ubhcpymem_(struct ufs_sb_private_info *, unsigned char *, struct ufs_buffer_head *, unsigned);
277 #define ubh_memcpyubh(ubh,mem,size) _ubh_memcpyubh_(uspi,ubh,mem,size)
278 extern void _ubh_memcpyubh_(struct ufs_sb_private_info *, struct ufs_buffer_head *, unsigned char *, unsigned);
280 /* This functions works with cache pages*/
281 extern struct page *ufs_get_locked_page(struct address_space *mapping,
282 pgoff_t index);
283 static inline void ufs_put_locked_page(struct page *page)
285 unlock_page(page);
286 page_cache_release(page);
291 * macros and inline function to get important structures from ufs_sb_private_info
294 static inline void *get_usb_offset(struct ufs_sb_private_info *uspi,
295 unsigned int offset)
297 unsigned int index;
299 index = offset >> uspi->s_fshift;
300 offset &= ~uspi->s_fmask;
301 return uspi->s_ubh.bh[index]->b_data + offset;
304 #define ubh_get_usb_first(uspi) \
305 ((struct ufs_super_block_first *)get_usb_offset((uspi), 0))
307 #define ubh_get_usb_second(uspi) \
308 ((struct ufs_super_block_second *)get_usb_offset((uspi), UFS_SECTOR_SIZE))
310 #define ubh_get_usb_third(uspi) \
311 ((struct ufs_super_block_third *)get_usb_offset((uspi), 2*UFS_SECTOR_SIZE))
314 #define ubh_get_ucg(ubh) \
315 ((struct ufs_cylinder_group *)((ubh)->bh[0]->b_data))
319 * Extract byte from ufs_buffer_head
320 * Extract the bits for a block from a map inside ufs_buffer_head
322 #define ubh_get_addr8(ubh,begin) \
323 ((u8*)(ubh)->bh[(begin) >> uspi->s_fshift]->b_data + \
324 ((begin) & ~uspi->s_fmask))
326 #define ubh_get_addr16(ubh,begin) \
327 (((__fs16*)((ubh)->bh[(begin) >> (uspi->s_fshift-1)]->b_data)) + \
328 ((begin) & ((uspi->fsize>>1) - 1)))
330 #define ubh_get_addr32(ubh,begin) \
331 (((__fs32*)((ubh)->bh[(begin) >> (uspi->s_fshift-2)]->b_data)) + \
332 ((begin) & ((uspi->s_fsize>>2) - 1)))
334 #define ubh_get_addr64(ubh,begin) \
335 (((__fs64*)((ubh)->bh[(begin) >> (uspi->s_fshift-3)]->b_data)) + \
336 ((begin) & ((uspi->s_fsize>>3) - 1)))
338 #define ubh_get_addr ubh_get_addr8
340 static inline void *ubh_get_data_ptr(struct ufs_sb_private_info *uspi,
341 struct ufs_buffer_head *ubh,
342 u64 blk)
344 if (uspi->fs_magic == UFS2_MAGIC)
345 return ubh_get_addr64(ubh, blk);
346 else
347 return ubh_get_addr32(ubh, blk);
350 #define ubh_blkmap(ubh,begin,bit) \
351 ((*ubh_get_addr(ubh, (begin) + ((bit) >> 3)) >> ((bit) & 7)) & (0xff >> (UFS_MAXFRAG - uspi->s_fpb)))
354 * Determine the number of available frags given a
355 * percentage to hold in reserve.
357 static inline u64
358 ufs_freespace(struct ufs_sb_private_info *uspi, int percentreserved)
360 return ufs_blkstofrags(uspi->cs_total.cs_nbfree) +
361 uspi->cs_total.cs_nffree -
362 (uspi->s_dsize * (percentreserved) / 100);
366 * Macros to access cylinder group array structures
368 #define ubh_cg_blktot(ucpi,cylno) \
369 (*((__fs32*)ubh_get_addr(UCPI_UBH(ucpi), (ucpi)->c_btotoff + ((cylno) << 2))))
371 #define ubh_cg_blks(ucpi,cylno,rpos) \
372 (*((__fs16*)ubh_get_addr(UCPI_UBH(ucpi), \
373 (ucpi)->c_boff + (((cylno) * uspi->s_nrpos + (rpos)) << 1 ))))
376 * Bitmap operations
377 * These functions work like classical bitmap operations.
378 * The difference is that we don't have the whole bitmap
379 * in one contiguous chunk of memory, but in several buffers.
380 * The parameters of each function are super_block, ufs_buffer_head and
381 * position of the beginning of the bitmap.
383 #define ubh_setbit(ubh,begin,bit) \
384 (*ubh_get_addr(ubh, (begin) + ((bit) >> 3)) |= (1 << ((bit) & 7)))
386 #define ubh_clrbit(ubh,begin,bit) \
387 (*ubh_get_addr (ubh, (begin) + ((bit) >> 3)) &= ~(1 << ((bit) & 7)))
389 #define ubh_isset(ubh,begin,bit) \
390 (*ubh_get_addr (ubh, (begin) + ((bit) >> 3)) & (1 << ((bit) & 7)))
392 #define ubh_isclr(ubh,begin,bit) (!ubh_isset(ubh,begin,bit))
394 #define ubh_find_first_zero_bit(ubh,begin,size) _ubh_find_next_zero_bit_(uspi,ubh,begin,size,0)
396 #define ubh_find_next_zero_bit(ubh,begin,size,offset) _ubh_find_next_zero_bit_(uspi,ubh,begin,size,offset)
397 static inline unsigned _ubh_find_next_zero_bit_(
398 struct ufs_sb_private_info * uspi, struct ufs_buffer_head * ubh,
399 unsigned begin, unsigned size, unsigned offset)
401 unsigned base, count, pos;
403 size -= offset;
404 begin <<= 3;
405 offset += begin;
406 base = offset >> uspi->s_bpfshift;
407 offset &= uspi->s_bpfmask;
408 for (;;) {
409 count = min_t(unsigned int, size + offset, uspi->s_bpf);
410 size -= count - offset;
411 pos = find_next_zero_bit_le(ubh->bh[base]->b_data, count, offset);
412 if (pos < count || !size)
413 break;
414 base++;
415 offset = 0;
417 return (base << uspi->s_bpfshift) + pos - begin;
420 static inline unsigned find_last_zero_bit (unsigned char * bitmap,
421 unsigned size, unsigned offset)
423 unsigned bit, i;
424 unsigned char * mapp;
425 unsigned char map;
427 mapp = bitmap + (size >> 3);
428 map = *mapp--;
429 bit = 1 << (size & 7);
430 for (i = size; i > offset; i--) {
431 if ((map & bit) == 0)
432 break;
433 if ((i & 7) != 0) {
434 bit >>= 1;
435 } else {
436 map = *mapp--;
437 bit = 1 << 7;
440 return i;
443 #define ubh_find_last_zero_bit(ubh,begin,size,offset) _ubh_find_last_zero_bit_(uspi,ubh,begin,size,offset)
444 static inline unsigned _ubh_find_last_zero_bit_(
445 struct ufs_sb_private_info * uspi, struct ufs_buffer_head * ubh,
446 unsigned begin, unsigned start, unsigned end)
448 unsigned base, count, pos, size;
450 size = start - end;
451 begin <<= 3;
452 start += begin;
453 base = start >> uspi->s_bpfshift;
454 start &= uspi->s_bpfmask;
455 for (;;) {
456 count = min_t(unsigned int,
457 size + (uspi->s_bpf - start), uspi->s_bpf)
458 - (uspi->s_bpf - start);
459 size -= count;
460 pos = find_last_zero_bit (ubh->bh[base]->b_data,
461 start, start - count);
462 if (pos > start - count || !size)
463 break;
464 base--;
465 start = uspi->s_bpf;
467 return (base << uspi->s_bpfshift) + pos - begin;
470 #define ubh_isblockclear(ubh,begin,block) (!_ubh_isblockset_(uspi,ubh,begin,block))
472 #define ubh_isblockset(ubh,begin,block) _ubh_isblockset_(uspi,ubh,begin,block)
473 static inline int _ubh_isblockset_(struct ufs_sb_private_info * uspi,
474 struct ufs_buffer_head * ubh, unsigned begin, unsigned block)
476 switch (uspi->s_fpb) {
477 case 8:
478 return (*ubh_get_addr (ubh, begin + block) == 0xff);
479 case 4:
480 return (*ubh_get_addr (ubh, begin + (block >> 1)) == (0x0f << ((block & 0x01) << 2)));
481 case 2:
482 return (*ubh_get_addr (ubh, begin + (block >> 2)) == (0x03 << ((block & 0x03) << 1)));
483 case 1:
484 return (*ubh_get_addr (ubh, begin + (block >> 3)) == (0x01 << (block & 0x07)));
486 return 0;
489 #define ubh_clrblock(ubh,begin,block) _ubh_clrblock_(uspi,ubh,begin,block)
490 static inline void _ubh_clrblock_(struct ufs_sb_private_info * uspi,
491 struct ufs_buffer_head * ubh, unsigned begin, unsigned block)
493 switch (uspi->s_fpb) {
494 case 8:
495 *ubh_get_addr (ubh, begin + block) = 0x00;
496 return;
497 case 4:
498 *ubh_get_addr (ubh, begin + (block >> 1)) &= ~(0x0f << ((block & 0x01) << 2));
499 return;
500 case 2:
501 *ubh_get_addr (ubh, begin + (block >> 2)) &= ~(0x03 << ((block & 0x03) << 1));
502 return;
503 case 1:
504 *ubh_get_addr (ubh, begin + (block >> 3)) &= ~(0x01 << ((block & 0x07)));
505 return;
509 #define ubh_setblock(ubh,begin,block) _ubh_setblock_(uspi,ubh,begin,block)
510 static inline void _ubh_setblock_(struct ufs_sb_private_info * uspi,
511 struct ufs_buffer_head * ubh, unsigned begin, unsigned block)
513 switch (uspi->s_fpb) {
514 case 8:
515 *ubh_get_addr(ubh, begin + block) = 0xff;
516 return;
517 case 4:
518 *ubh_get_addr(ubh, begin + (block >> 1)) |= (0x0f << ((block & 0x01) << 2));
519 return;
520 case 2:
521 *ubh_get_addr(ubh, begin + (block >> 2)) |= (0x03 << ((block & 0x03) << 1));
522 return;
523 case 1:
524 *ubh_get_addr(ubh, begin + (block >> 3)) |= (0x01 << ((block & 0x07)));
525 return;
529 static inline void ufs_fragacct (struct super_block * sb, unsigned blockmap,
530 __fs32 * fraglist, int cnt)
532 struct ufs_sb_private_info * uspi;
533 unsigned fragsize, pos;
535 uspi = UFS_SB(sb)->s_uspi;
537 fragsize = 0;
538 for (pos = 0; pos < uspi->s_fpb; pos++) {
539 if (blockmap & (1 << pos)) {
540 fragsize++;
542 else if (fragsize > 0) {
543 fs32_add(sb, &fraglist[fragsize], cnt);
544 fragsize = 0;
547 if (fragsize > 0 && fragsize < uspi->s_fpb)
548 fs32_add(sb, &fraglist[fragsize], cnt);
551 static inline void *ufs_get_direct_data_ptr(struct ufs_sb_private_info *uspi,
552 struct ufs_inode_info *ufsi,
553 unsigned blk)
555 BUG_ON(blk > UFS_TIND_BLOCK);
556 return uspi->fs_magic == UFS2_MAGIC ?
557 (void *)&ufsi->i_u1.u2_i_data[blk] :
558 (void *)&ufsi->i_u1.i_data[blk];
561 static inline u64 ufs_data_ptr_to_cpu(struct super_block *sb, void *p)
563 return UFS_SB(sb)->s_uspi->fs_magic == UFS2_MAGIC ?
564 fs64_to_cpu(sb, *(__fs64 *)p) :
565 fs32_to_cpu(sb, *(__fs32 *)p);
568 static inline void ufs_cpu_to_data_ptr(struct super_block *sb, void *p, u64 val)
570 if (UFS_SB(sb)->s_uspi->fs_magic == UFS2_MAGIC)
571 *(__fs64 *)p = cpu_to_fs64(sb, val);
572 else
573 *(__fs32 *)p = cpu_to_fs32(sb, val);
576 static inline void ufs_data_ptr_clear(struct ufs_sb_private_info *uspi,
577 void *p)
579 if (uspi->fs_magic == UFS2_MAGIC)
580 *(__fs64 *)p = 0;
581 else
582 *(__fs32 *)p = 0;
585 static inline int ufs_is_data_ptr_zero(struct ufs_sb_private_info *uspi,
586 void *p)
588 if (uspi->fs_magic == UFS2_MAGIC)
589 return *(__fs64 *)p == 0;
590 else
591 return *(__fs32 *)p == 0;