gro: Allow tunnel stacking in the case of FOU/GUE
[linux/fpc-iii.git] / fs / stat.c
blobcccc1aab9a8b75231f056a18b0b9fc3ecaaf1561
1 /*
2 * linux/fs/stat.c
4 * Copyright (C) 1991, 1992 Linus Torvalds
5 */
7 #include <linux/export.h>
8 #include <linux/mm.h>
9 #include <linux/errno.h>
10 #include <linux/file.h>
11 #include <linux/highuid.h>
12 #include <linux/fs.h>
13 #include <linux/namei.h>
14 #include <linux/security.h>
15 #include <linux/syscalls.h>
16 #include <linux/pagemap.h>
18 #include <asm/uaccess.h>
19 #include <asm/unistd.h>
21 void generic_fillattr(struct inode *inode, struct kstat *stat)
23 stat->dev = inode->i_sb->s_dev;
24 stat->ino = inode->i_ino;
25 stat->mode = inode->i_mode;
26 stat->nlink = inode->i_nlink;
27 stat->uid = inode->i_uid;
28 stat->gid = inode->i_gid;
29 stat->rdev = inode->i_rdev;
30 stat->size = i_size_read(inode);
31 stat->atime = inode->i_atime;
32 stat->mtime = inode->i_mtime;
33 stat->ctime = inode->i_ctime;
34 stat->blksize = (1 << inode->i_blkbits);
35 stat->blocks = inode->i_blocks;
38 EXPORT_SYMBOL(generic_fillattr);
40 /**
41 * vfs_getattr_nosec - getattr without security checks
42 * @path: file to get attributes from
43 * @stat: structure to return attributes in
45 * Get attributes without calling security_inode_getattr.
47 * Currently the only caller other than vfs_getattr is internal to the
48 * filehandle lookup code, which uses only the inode number and returns
49 * no attributes to any user. Any other code probably wants
50 * vfs_getattr.
52 int vfs_getattr_nosec(struct path *path, struct kstat *stat)
54 struct inode *inode = d_backing_inode(path->dentry);
56 if (inode->i_op->getattr)
57 return inode->i_op->getattr(path->mnt, path->dentry, stat);
59 generic_fillattr(inode, stat);
60 return 0;
63 EXPORT_SYMBOL(vfs_getattr_nosec);
65 int vfs_getattr(struct path *path, struct kstat *stat)
67 int retval;
69 retval = security_inode_getattr(path);
70 if (retval)
71 return retval;
72 return vfs_getattr_nosec(path, stat);
75 EXPORT_SYMBOL(vfs_getattr);
77 int vfs_fstat(unsigned int fd, struct kstat *stat)
79 struct fd f = fdget_raw(fd);
80 int error = -EBADF;
82 if (f.file) {
83 error = vfs_getattr(&f.file->f_path, stat);
84 fdput(f);
86 return error;
88 EXPORT_SYMBOL(vfs_fstat);
90 int vfs_fstatat(int dfd, const char __user *filename, struct kstat *stat,
91 int flag)
93 struct path path;
94 int error = -EINVAL;
95 unsigned int lookup_flags = 0;
97 if ((flag & ~(AT_SYMLINK_NOFOLLOW | AT_NO_AUTOMOUNT |
98 AT_EMPTY_PATH)) != 0)
99 goto out;
101 if (!(flag & AT_SYMLINK_NOFOLLOW))
102 lookup_flags |= LOOKUP_FOLLOW;
103 if (flag & AT_EMPTY_PATH)
104 lookup_flags |= LOOKUP_EMPTY;
105 retry:
106 error = user_path_at(dfd, filename, lookup_flags, &path);
107 if (error)
108 goto out;
110 error = vfs_getattr(&path, stat);
111 path_put(&path);
112 if (retry_estale(error, lookup_flags)) {
113 lookup_flags |= LOOKUP_REVAL;
114 goto retry;
116 out:
117 return error;
119 EXPORT_SYMBOL(vfs_fstatat);
121 int vfs_stat(const char __user *name, struct kstat *stat)
123 return vfs_fstatat(AT_FDCWD, name, stat, 0);
125 EXPORT_SYMBOL(vfs_stat);
127 int vfs_lstat(const char __user *name, struct kstat *stat)
129 return vfs_fstatat(AT_FDCWD, name, stat, AT_SYMLINK_NOFOLLOW);
131 EXPORT_SYMBOL(vfs_lstat);
134 #ifdef __ARCH_WANT_OLD_STAT
137 * For backward compatibility? Maybe this should be moved
138 * into arch/i386 instead?
140 static int cp_old_stat(struct kstat *stat, struct __old_kernel_stat __user * statbuf)
142 static int warncount = 5;
143 struct __old_kernel_stat tmp;
145 if (warncount > 0) {
146 warncount--;
147 printk(KERN_WARNING "VFS: Warning: %s using old stat() call. Recompile your binary.\n",
148 current->comm);
149 } else if (warncount < 0) {
150 /* it's laughable, but... */
151 warncount = 0;
154 memset(&tmp, 0, sizeof(struct __old_kernel_stat));
155 tmp.st_dev = old_encode_dev(stat->dev);
156 tmp.st_ino = stat->ino;
157 if (sizeof(tmp.st_ino) < sizeof(stat->ino) && tmp.st_ino != stat->ino)
158 return -EOVERFLOW;
159 tmp.st_mode = stat->mode;
160 tmp.st_nlink = stat->nlink;
161 if (tmp.st_nlink != stat->nlink)
162 return -EOVERFLOW;
163 SET_UID(tmp.st_uid, from_kuid_munged(current_user_ns(), stat->uid));
164 SET_GID(tmp.st_gid, from_kgid_munged(current_user_ns(), stat->gid));
165 tmp.st_rdev = old_encode_dev(stat->rdev);
166 #if BITS_PER_LONG == 32
167 if (stat->size > MAX_NON_LFS)
168 return -EOVERFLOW;
169 #endif
170 tmp.st_size = stat->size;
171 tmp.st_atime = stat->atime.tv_sec;
172 tmp.st_mtime = stat->mtime.tv_sec;
173 tmp.st_ctime = stat->ctime.tv_sec;
174 return copy_to_user(statbuf,&tmp,sizeof(tmp)) ? -EFAULT : 0;
177 SYSCALL_DEFINE2(stat, const char __user *, filename,
178 struct __old_kernel_stat __user *, statbuf)
180 struct kstat stat;
181 int error;
183 error = vfs_stat(filename, &stat);
184 if (error)
185 return error;
187 return cp_old_stat(&stat, statbuf);
190 SYSCALL_DEFINE2(lstat, const char __user *, filename,
191 struct __old_kernel_stat __user *, statbuf)
193 struct kstat stat;
194 int error;
196 error = vfs_lstat(filename, &stat);
197 if (error)
198 return error;
200 return cp_old_stat(&stat, statbuf);
203 SYSCALL_DEFINE2(fstat, unsigned int, fd, struct __old_kernel_stat __user *, statbuf)
205 struct kstat stat;
206 int error = vfs_fstat(fd, &stat);
208 if (!error)
209 error = cp_old_stat(&stat, statbuf);
211 return error;
214 #endif /* __ARCH_WANT_OLD_STAT */
216 #if BITS_PER_LONG == 32
217 # define choose_32_64(a,b) a
218 #else
219 # define choose_32_64(a,b) b
220 #endif
222 #define valid_dev(x) choose_32_64(old_valid_dev,new_valid_dev)(x)
223 #define encode_dev(x) choose_32_64(old_encode_dev,new_encode_dev)(x)
225 #ifndef INIT_STRUCT_STAT_PADDING
226 # define INIT_STRUCT_STAT_PADDING(st) memset(&st, 0, sizeof(st))
227 #endif
229 static int cp_new_stat(struct kstat *stat, struct stat __user *statbuf)
231 struct stat tmp;
233 if (!valid_dev(stat->dev) || !valid_dev(stat->rdev))
234 return -EOVERFLOW;
235 #if BITS_PER_LONG == 32
236 if (stat->size > MAX_NON_LFS)
237 return -EOVERFLOW;
238 #endif
240 INIT_STRUCT_STAT_PADDING(tmp);
241 tmp.st_dev = encode_dev(stat->dev);
242 tmp.st_ino = stat->ino;
243 if (sizeof(tmp.st_ino) < sizeof(stat->ino) && tmp.st_ino != stat->ino)
244 return -EOVERFLOW;
245 tmp.st_mode = stat->mode;
246 tmp.st_nlink = stat->nlink;
247 if (tmp.st_nlink != stat->nlink)
248 return -EOVERFLOW;
249 SET_UID(tmp.st_uid, from_kuid_munged(current_user_ns(), stat->uid));
250 SET_GID(tmp.st_gid, from_kgid_munged(current_user_ns(), stat->gid));
251 tmp.st_rdev = encode_dev(stat->rdev);
252 tmp.st_size = stat->size;
253 tmp.st_atime = stat->atime.tv_sec;
254 tmp.st_mtime = stat->mtime.tv_sec;
255 tmp.st_ctime = stat->ctime.tv_sec;
256 #ifdef STAT_HAVE_NSEC
257 tmp.st_atime_nsec = stat->atime.tv_nsec;
258 tmp.st_mtime_nsec = stat->mtime.tv_nsec;
259 tmp.st_ctime_nsec = stat->ctime.tv_nsec;
260 #endif
261 tmp.st_blocks = stat->blocks;
262 tmp.st_blksize = stat->blksize;
263 return copy_to_user(statbuf,&tmp,sizeof(tmp)) ? -EFAULT : 0;
266 SYSCALL_DEFINE2(newstat, const char __user *, filename,
267 struct stat __user *, statbuf)
269 struct kstat stat;
270 int error = vfs_stat(filename, &stat);
272 if (error)
273 return error;
274 return cp_new_stat(&stat, statbuf);
277 SYSCALL_DEFINE2(newlstat, const char __user *, filename,
278 struct stat __user *, statbuf)
280 struct kstat stat;
281 int error;
283 error = vfs_lstat(filename, &stat);
284 if (error)
285 return error;
287 return cp_new_stat(&stat, statbuf);
290 #if !defined(__ARCH_WANT_STAT64) || defined(__ARCH_WANT_SYS_NEWFSTATAT)
291 SYSCALL_DEFINE4(newfstatat, int, dfd, const char __user *, filename,
292 struct stat __user *, statbuf, int, flag)
294 struct kstat stat;
295 int error;
297 error = vfs_fstatat(dfd, filename, &stat, flag);
298 if (error)
299 return error;
300 return cp_new_stat(&stat, statbuf);
302 #endif
304 SYSCALL_DEFINE2(newfstat, unsigned int, fd, struct stat __user *, statbuf)
306 struct kstat stat;
307 int error = vfs_fstat(fd, &stat);
309 if (!error)
310 error = cp_new_stat(&stat, statbuf);
312 return error;
315 SYSCALL_DEFINE4(readlinkat, int, dfd, const char __user *, pathname,
316 char __user *, buf, int, bufsiz)
318 struct path path;
319 int error;
320 int empty = 0;
321 unsigned int lookup_flags = LOOKUP_EMPTY;
323 if (bufsiz <= 0)
324 return -EINVAL;
326 retry:
327 error = user_path_at_empty(dfd, pathname, lookup_flags, &path, &empty);
328 if (!error) {
329 struct inode *inode = d_backing_inode(path.dentry);
331 error = empty ? -ENOENT : -EINVAL;
332 if (inode->i_op->readlink) {
333 error = security_inode_readlink(path.dentry);
334 if (!error) {
335 touch_atime(&path);
336 error = inode->i_op->readlink(path.dentry,
337 buf, bufsiz);
340 path_put(&path);
341 if (retry_estale(error, lookup_flags)) {
342 lookup_flags |= LOOKUP_REVAL;
343 goto retry;
346 return error;
349 SYSCALL_DEFINE3(readlink, const char __user *, path, char __user *, buf,
350 int, bufsiz)
352 return sys_readlinkat(AT_FDCWD, path, buf, bufsiz);
356 /* ---------- LFS-64 ----------- */
357 #if defined(__ARCH_WANT_STAT64) || defined(__ARCH_WANT_COMPAT_STAT64)
359 #ifndef INIT_STRUCT_STAT64_PADDING
360 # define INIT_STRUCT_STAT64_PADDING(st) memset(&st, 0, sizeof(st))
361 #endif
363 static long cp_new_stat64(struct kstat *stat, struct stat64 __user *statbuf)
365 struct stat64 tmp;
367 INIT_STRUCT_STAT64_PADDING(tmp);
368 #ifdef CONFIG_MIPS
369 /* mips has weird padding, so we don't get 64 bits there */
370 if (!new_valid_dev(stat->dev) || !new_valid_dev(stat->rdev))
371 return -EOVERFLOW;
372 tmp.st_dev = new_encode_dev(stat->dev);
373 tmp.st_rdev = new_encode_dev(stat->rdev);
374 #else
375 tmp.st_dev = huge_encode_dev(stat->dev);
376 tmp.st_rdev = huge_encode_dev(stat->rdev);
377 #endif
378 tmp.st_ino = stat->ino;
379 if (sizeof(tmp.st_ino) < sizeof(stat->ino) && tmp.st_ino != stat->ino)
380 return -EOVERFLOW;
381 #ifdef STAT64_HAS_BROKEN_ST_INO
382 tmp.__st_ino = stat->ino;
383 #endif
384 tmp.st_mode = stat->mode;
385 tmp.st_nlink = stat->nlink;
386 tmp.st_uid = from_kuid_munged(current_user_ns(), stat->uid);
387 tmp.st_gid = from_kgid_munged(current_user_ns(), stat->gid);
388 tmp.st_atime = stat->atime.tv_sec;
389 tmp.st_atime_nsec = stat->atime.tv_nsec;
390 tmp.st_mtime = stat->mtime.tv_sec;
391 tmp.st_mtime_nsec = stat->mtime.tv_nsec;
392 tmp.st_ctime = stat->ctime.tv_sec;
393 tmp.st_ctime_nsec = stat->ctime.tv_nsec;
394 tmp.st_size = stat->size;
395 tmp.st_blocks = stat->blocks;
396 tmp.st_blksize = stat->blksize;
397 return copy_to_user(statbuf,&tmp,sizeof(tmp)) ? -EFAULT : 0;
400 SYSCALL_DEFINE2(stat64, const char __user *, filename,
401 struct stat64 __user *, statbuf)
403 struct kstat stat;
404 int error = vfs_stat(filename, &stat);
406 if (!error)
407 error = cp_new_stat64(&stat, statbuf);
409 return error;
412 SYSCALL_DEFINE2(lstat64, const char __user *, filename,
413 struct stat64 __user *, statbuf)
415 struct kstat stat;
416 int error = vfs_lstat(filename, &stat);
418 if (!error)
419 error = cp_new_stat64(&stat, statbuf);
421 return error;
424 SYSCALL_DEFINE2(fstat64, unsigned long, fd, struct stat64 __user *, statbuf)
426 struct kstat stat;
427 int error = vfs_fstat(fd, &stat);
429 if (!error)
430 error = cp_new_stat64(&stat, statbuf);
432 return error;
435 SYSCALL_DEFINE4(fstatat64, int, dfd, const char __user *, filename,
436 struct stat64 __user *, statbuf, int, flag)
438 struct kstat stat;
439 int error;
441 error = vfs_fstatat(dfd, filename, &stat, flag);
442 if (error)
443 return error;
444 return cp_new_stat64(&stat, statbuf);
446 #endif /* __ARCH_WANT_STAT64 || __ARCH_WANT_COMPAT_STAT64 */
448 /* Caller is here responsible for sufficient locking (ie. inode->i_lock) */
449 void __inode_add_bytes(struct inode *inode, loff_t bytes)
451 inode->i_blocks += bytes >> 9;
452 bytes &= 511;
453 inode->i_bytes += bytes;
454 if (inode->i_bytes >= 512) {
455 inode->i_blocks++;
456 inode->i_bytes -= 512;
460 void inode_add_bytes(struct inode *inode, loff_t bytes)
462 spin_lock(&inode->i_lock);
463 __inode_add_bytes(inode, bytes);
464 spin_unlock(&inode->i_lock);
467 EXPORT_SYMBOL(inode_add_bytes);
469 void __inode_sub_bytes(struct inode *inode, loff_t bytes)
471 inode->i_blocks -= bytes >> 9;
472 bytes &= 511;
473 if (inode->i_bytes < bytes) {
474 inode->i_blocks--;
475 inode->i_bytes += 512;
477 inode->i_bytes -= bytes;
480 EXPORT_SYMBOL(__inode_sub_bytes);
482 void inode_sub_bytes(struct inode *inode, loff_t bytes)
484 spin_lock(&inode->i_lock);
485 __inode_sub_bytes(inode, bytes);
486 spin_unlock(&inode->i_lock);
489 EXPORT_SYMBOL(inode_sub_bytes);
491 loff_t inode_get_bytes(struct inode *inode)
493 loff_t ret;
495 spin_lock(&inode->i_lock);
496 ret = (((loff_t)inode->i_blocks) << 9) + inode->i_bytes;
497 spin_unlock(&inode->i_lock);
498 return ret;
501 EXPORT_SYMBOL(inode_get_bytes);
503 void inode_set_bytes(struct inode *inode, loff_t bytes)
505 /* Caller is here responsible for sufficient locking
506 * (ie. inode->i_lock) */
507 inode->i_blocks = bytes >> 9;
508 inode->i_bytes = bytes & 511;
511 EXPORT_SYMBOL(inode_set_bytes);