stat.c (22267B)
1// SPDX-License-Identifier: GPL-2.0 2/* 3 * linux/fs/stat.c 4 * 5 * Copyright (C) 1991, 1992 Linus Torvalds 6 */ 7 8#include <linux/export.h> 9#include <linux/mm.h> 10#include <linux/errno.h> 11#include <linux/file.h> 12#include <linux/highuid.h> 13#include <linux/fs.h> 14#include <linux/namei.h> 15#include <linux/security.h> 16#include <linux/cred.h> 17#include <linux/syscalls.h> 18#include <linux/pagemap.h> 19#include <linux/compat.h> 20 21#include <linux/uaccess.h> 22#include <asm/unistd.h> 23 24#include "internal.h" 25#include "mount.h" 26 27/** 28 * generic_fillattr - Fill in the basic attributes from the inode struct 29 * @mnt_userns: user namespace of the mount the inode was found from 30 * @inode: Inode to use as the source 31 * @stat: Where to fill in the attributes 32 * 33 * Fill in the basic attributes in the kstat structure from data that's to be 34 * found on the VFS inode structure. This is the default if no getattr inode 35 * operation is supplied. 36 * 37 * If the inode has been found through an idmapped mount the user namespace of 38 * the vfsmount must be passed through @mnt_userns. This function will then 39 * take care to map the inode according to @mnt_userns before filling in the 40 * uid and gid filds. On non-idmapped mounts or if permission checking is to be 41 * performed on the raw inode simply passs init_user_ns. 42 */ 43void generic_fillattr(struct user_namespace *mnt_userns, struct inode *inode, 44 struct kstat *stat) 45{ 46 stat->dev = inode->i_sb->s_dev; 47 stat->ino = inode->i_ino; 48 stat->mode = inode->i_mode; 49 stat->nlink = inode->i_nlink; 50 stat->uid = i_uid_into_mnt(mnt_userns, inode); 51 stat->gid = i_gid_into_mnt(mnt_userns, inode); 52 stat->rdev = inode->i_rdev; 53 stat->size = i_size_read(inode); 54 stat->atime = inode->i_atime; 55 stat->mtime = inode->i_mtime; 56 stat->ctime = inode->i_ctime; 57 stat->blksize = i_blocksize(inode); 58 stat->blocks = inode->i_blocks; 59} 60EXPORT_SYMBOL(generic_fillattr); 61 62/** 63 * generic_fill_statx_attr - Fill in the statx attributes from the inode flags 64 * @inode: Inode to use as the source 65 * @stat: Where to fill in the attribute flags 66 * 67 * Fill in the STATX_ATTR_* flags in the kstat structure for properties of the 68 * inode that are published on i_flags and enforced by the VFS. 69 */ 70void generic_fill_statx_attr(struct inode *inode, struct kstat *stat) 71{ 72 if (inode->i_flags & S_IMMUTABLE) 73 stat->attributes |= STATX_ATTR_IMMUTABLE; 74 if (inode->i_flags & S_APPEND) 75 stat->attributes |= STATX_ATTR_APPEND; 76 stat->attributes_mask |= KSTAT_ATTR_VFS_FLAGS; 77} 78EXPORT_SYMBOL(generic_fill_statx_attr); 79 80/** 81 * vfs_getattr_nosec - getattr without security checks 82 * @path: file to get attributes from 83 * @stat: structure to return attributes in 84 * @request_mask: STATX_xxx flags indicating what the caller wants 85 * @query_flags: Query mode (AT_STATX_SYNC_TYPE) 86 * 87 * Get attributes without calling security_inode_getattr. 88 * 89 * Currently the only caller other than vfs_getattr is internal to the 90 * filehandle lookup code, which uses only the inode number and returns no 91 * attributes to any user. Any other code probably wants vfs_getattr. 92 */ 93int vfs_getattr_nosec(const struct path *path, struct kstat *stat, 94 u32 request_mask, unsigned int query_flags) 95{ 96 struct user_namespace *mnt_userns; 97 struct inode *inode = d_backing_inode(path->dentry); 98 99 memset(stat, 0, sizeof(*stat)); 100 stat->result_mask |= STATX_BASIC_STATS; 101 query_flags &= AT_STATX_SYNC_TYPE; 102 103 /* allow the fs to override these if it really wants to */ 104 /* SB_NOATIME means filesystem supplies dummy atime value */ 105 if (inode->i_sb->s_flags & SB_NOATIME) 106 stat->result_mask &= ~STATX_ATIME; 107 108 /* 109 * Note: If you add another clause to set an attribute flag, please 110 * update attributes_mask below. 111 */ 112 if (IS_AUTOMOUNT(inode)) 113 stat->attributes |= STATX_ATTR_AUTOMOUNT; 114 115 if (IS_DAX(inode)) 116 stat->attributes |= STATX_ATTR_DAX; 117 118 stat->attributes_mask |= (STATX_ATTR_AUTOMOUNT | 119 STATX_ATTR_DAX); 120 121 mnt_userns = mnt_user_ns(path->mnt); 122 if (inode->i_op->getattr) 123 return inode->i_op->getattr(mnt_userns, path, stat, 124 request_mask, query_flags); 125 126 generic_fillattr(mnt_userns, inode, stat); 127 return 0; 128} 129EXPORT_SYMBOL(vfs_getattr_nosec); 130 131/* 132 * vfs_getattr - Get the enhanced basic attributes of a file 133 * @path: The file of interest 134 * @stat: Where to return the statistics 135 * @request_mask: STATX_xxx flags indicating what the caller wants 136 * @query_flags: Query mode (AT_STATX_SYNC_TYPE) 137 * 138 * Ask the filesystem for a file's attributes. The caller must indicate in 139 * request_mask and query_flags to indicate what they want. 140 * 141 * If the file is remote, the filesystem can be forced to update the attributes 142 * from the backing store by passing AT_STATX_FORCE_SYNC in query_flags or can 143 * suppress the update by passing AT_STATX_DONT_SYNC. 144 * 145 * Bits must have been set in request_mask to indicate which attributes the 146 * caller wants retrieving. Any such attribute not requested may be returned 147 * anyway, but the value may be approximate, and, if remote, may not have been 148 * synchronised with the server. 149 * 150 * 0 will be returned on success, and a -ve error code if unsuccessful. 151 */ 152int vfs_getattr(const struct path *path, struct kstat *stat, 153 u32 request_mask, unsigned int query_flags) 154{ 155 int retval; 156 157 retval = security_inode_getattr(path); 158 if (retval) 159 return retval; 160 return vfs_getattr_nosec(path, stat, request_mask, query_flags); 161} 162EXPORT_SYMBOL(vfs_getattr); 163 164/** 165 * vfs_fstat - Get the basic attributes by file descriptor 166 * @fd: The file descriptor referring to the file of interest 167 * @stat: The result structure to fill in. 168 * 169 * This function is a wrapper around vfs_getattr(). The main difference is 170 * that it uses a file descriptor to determine the file location. 171 * 172 * 0 will be returned on success, and a -ve error code if unsuccessful. 173 */ 174int vfs_fstat(int fd, struct kstat *stat) 175{ 176 struct fd f; 177 int error; 178 179 f = fdget_raw(fd); 180 if (!f.file) 181 return -EBADF; 182 error = vfs_getattr(&f.file->f_path, stat, STATX_BASIC_STATS, 0); 183 fdput(f); 184 return error; 185} 186 187int getname_statx_lookup_flags(int flags) 188{ 189 int lookup_flags = 0; 190 191 if (!(flags & AT_SYMLINK_NOFOLLOW)) 192 lookup_flags |= LOOKUP_FOLLOW; 193 if (!(flags & AT_NO_AUTOMOUNT)) 194 lookup_flags |= LOOKUP_AUTOMOUNT; 195 if (flags & AT_EMPTY_PATH) 196 lookup_flags |= LOOKUP_EMPTY; 197 198 return lookup_flags; 199} 200 201/** 202 * vfs_statx - Get basic and extra attributes by filename 203 * @dfd: A file descriptor representing the base dir for a relative filename 204 * @filename: The name of the file of interest 205 * @flags: Flags to control the query 206 * @stat: The result structure to fill in. 207 * @request_mask: STATX_xxx flags indicating what the caller wants 208 * 209 * This function is a wrapper around vfs_getattr(). The main difference is 210 * that it uses a filename and base directory to determine the file location. 211 * Additionally, the use of AT_SYMLINK_NOFOLLOW in flags will prevent a symlink 212 * at the given name from being referenced. 213 * 214 * 0 will be returned on success, and a -ve error code if unsuccessful. 215 */ 216static int vfs_statx(int dfd, struct filename *filename, int flags, 217 struct kstat *stat, u32 request_mask) 218{ 219 struct path path; 220 unsigned int lookup_flags = getname_statx_lookup_flags(flags); 221 int error; 222 223 if (flags & ~(AT_SYMLINK_NOFOLLOW | AT_NO_AUTOMOUNT | AT_EMPTY_PATH | 224 AT_STATX_SYNC_TYPE)) 225 return -EINVAL; 226 227retry: 228 error = filename_lookup(dfd, filename, lookup_flags, &path, NULL); 229 if (error) 230 goto out; 231 232 error = vfs_getattr(&path, stat, request_mask, flags); 233 stat->mnt_id = real_mount(path.mnt)->mnt_id; 234 stat->result_mask |= STATX_MNT_ID; 235 if (path.mnt->mnt_root == path.dentry) 236 stat->attributes |= STATX_ATTR_MOUNT_ROOT; 237 stat->attributes_mask |= STATX_ATTR_MOUNT_ROOT; 238 path_put(&path); 239 if (retry_estale(error, lookup_flags)) { 240 lookup_flags |= LOOKUP_REVAL; 241 goto retry; 242 } 243out: 244 return error; 245} 246 247int vfs_fstatat(int dfd, const char __user *filename, 248 struct kstat *stat, int flags) 249{ 250 int ret; 251 int statx_flags = flags | AT_NO_AUTOMOUNT; 252 struct filename *name; 253 254 name = getname_flags(filename, getname_statx_lookup_flags(statx_flags), NULL); 255 ret = vfs_statx(dfd, name, statx_flags, stat, STATX_BASIC_STATS); 256 putname(name); 257 258 return ret; 259} 260 261#ifdef __ARCH_WANT_OLD_STAT 262 263/* 264 * For backward compatibility? Maybe this should be moved 265 * into arch/i386 instead? 266 */ 267static int cp_old_stat(struct kstat *stat, struct __old_kernel_stat __user * statbuf) 268{ 269 static int warncount = 5; 270 struct __old_kernel_stat tmp; 271 272 if (warncount > 0) { 273 warncount--; 274 printk(KERN_WARNING "VFS: Warning: %s using old stat() call. Recompile your binary.\n", 275 current->comm); 276 } else if (warncount < 0) { 277 /* it's laughable, but... */ 278 warncount = 0; 279 } 280 281 memset(&tmp, 0, sizeof(struct __old_kernel_stat)); 282 tmp.st_dev = old_encode_dev(stat->dev); 283 tmp.st_ino = stat->ino; 284 if (sizeof(tmp.st_ino) < sizeof(stat->ino) && tmp.st_ino != stat->ino) 285 return -EOVERFLOW; 286 tmp.st_mode = stat->mode; 287 tmp.st_nlink = stat->nlink; 288 if (tmp.st_nlink != stat->nlink) 289 return -EOVERFLOW; 290 SET_UID(tmp.st_uid, from_kuid_munged(current_user_ns(), stat->uid)); 291 SET_GID(tmp.st_gid, from_kgid_munged(current_user_ns(), stat->gid)); 292 tmp.st_rdev = old_encode_dev(stat->rdev); 293#if BITS_PER_LONG == 32 294 if (stat->size > MAX_NON_LFS) 295 return -EOVERFLOW; 296#endif 297 tmp.st_size = stat->size; 298 tmp.st_atime = stat->atime.tv_sec; 299 tmp.st_mtime = stat->mtime.tv_sec; 300 tmp.st_ctime = stat->ctime.tv_sec; 301 return copy_to_user(statbuf,&tmp,sizeof(tmp)) ? -EFAULT : 0; 302} 303 304SYSCALL_DEFINE2(stat, const char __user *, filename, 305 struct __old_kernel_stat __user *, statbuf) 306{ 307 struct kstat stat; 308 int error; 309 310 error = vfs_stat(filename, &stat); 311 if (error) 312 return error; 313 314 return cp_old_stat(&stat, statbuf); 315} 316 317SYSCALL_DEFINE2(lstat, const char __user *, filename, 318 struct __old_kernel_stat __user *, statbuf) 319{ 320 struct kstat stat; 321 int error; 322 323 error = vfs_lstat(filename, &stat); 324 if (error) 325 return error; 326 327 return cp_old_stat(&stat, statbuf); 328} 329 330SYSCALL_DEFINE2(fstat, unsigned int, fd, struct __old_kernel_stat __user *, statbuf) 331{ 332 struct kstat stat; 333 int error = vfs_fstat(fd, &stat); 334 335 if (!error) 336 error = cp_old_stat(&stat, statbuf); 337 338 return error; 339} 340 341#endif /* __ARCH_WANT_OLD_STAT */ 342 343#ifdef __ARCH_WANT_NEW_STAT 344 345#if BITS_PER_LONG == 32 346# define choose_32_64(a,b) a 347#else 348# define choose_32_64(a,b) b 349#endif 350 351#ifndef INIT_STRUCT_STAT_PADDING 352# define INIT_STRUCT_STAT_PADDING(st) memset(&st, 0, sizeof(st)) 353#endif 354 355static int cp_new_stat(struct kstat *stat, struct stat __user *statbuf) 356{ 357 struct stat tmp; 358 359 if (sizeof(tmp.st_dev) < 4 && !old_valid_dev(stat->dev)) 360 return -EOVERFLOW; 361 if (sizeof(tmp.st_rdev) < 4 && !old_valid_dev(stat->rdev)) 362 return -EOVERFLOW; 363#if BITS_PER_LONG == 32 364 if (stat->size > MAX_NON_LFS) 365 return -EOVERFLOW; 366#endif 367 368 INIT_STRUCT_STAT_PADDING(tmp); 369 tmp.st_dev = new_encode_dev(stat->dev); 370 tmp.st_ino = stat->ino; 371 if (sizeof(tmp.st_ino) < sizeof(stat->ino) && tmp.st_ino != stat->ino) 372 return -EOVERFLOW; 373 tmp.st_mode = stat->mode; 374 tmp.st_nlink = stat->nlink; 375 if (tmp.st_nlink != stat->nlink) 376 return -EOVERFLOW; 377 SET_UID(tmp.st_uid, from_kuid_munged(current_user_ns(), stat->uid)); 378 SET_GID(tmp.st_gid, from_kgid_munged(current_user_ns(), stat->gid)); 379 tmp.st_rdev = new_encode_dev(stat->rdev); 380 tmp.st_size = stat->size; 381 tmp.st_atime = stat->atime.tv_sec; 382 tmp.st_mtime = stat->mtime.tv_sec; 383 tmp.st_ctime = stat->ctime.tv_sec; 384#ifdef STAT_HAVE_NSEC 385 tmp.st_atime_nsec = stat->atime.tv_nsec; 386 tmp.st_mtime_nsec = stat->mtime.tv_nsec; 387 tmp.st_ctime_nsec = stat->ctime.tv_nsec; 388#endif 389 tmp.st_blocks = stat->blocks; 390 tmp.st_blksize = stat->blksize; 391 return copy_to_user(statbuf,&tmp,sizeof(tmp)) ? -EFAULT : 0; 392} 393 394SYSCALL_DEFINE2(newstat, const char __user *, filename, 395 struct stat __user *, statbuf) 396{ 397 struct kstat stat; 398 int error = vfs_stat(filename, &stat); 399 400 if (error) 401 return error; 402 return cp_new_stat(&stat, statbuf); 403} 404 405SYSCALL_DEFINE2(newlstat, const char __user *, filename, 406 struct stat __user *, statbuf) 407{ 408 struct kstat stat; 409 int error; 410 411 error = vfs_lstat(filename, &stat); 412 if (error) 413 return error; 414 415 return cp_new_stat(&stat, statbuf); 416} 417 418#if !defined(__ARCH_WANT_STAT64) || defined(__ARCH_WANT_SYS_NEWFSTATAT) 419SYSCALL_DEFINE4(newfstatat, int, dfd, const char __user *, filename, 420 struct stat __user *, statbuf, int, flag) 421{ 422 struct kstat stat; 423 int error; 424 425 error = vfs_fstatat(dfd, filename, &stat, flag); 426 if (error) 427 return error; 428 return cp_new_stat(&stat, statbuf); 429} 430#endif 431 432SYSCALL_DEFINE2(newfstat, unsigned int, fd, struct stat __user *, statbuf) 433{ 434 struct kstat stat; 435 int error = vfs_fstat(fd, &stat); 436 437 if (!error) 438 error = cp_new_stat(&stat, statbuf); 439 440 return error; 441} 442#endif 443 444static int do_readlinkat(int dfd, const char __user *pathname, 445 char __user *buf, int bufsiz) 446{ 447 struct path path; 448 int error; 449 int empty = 0; 450 unsigned int lookup_flags = LOOKUP_EMPTY; 451 452 if (bufsiz <= 0) 453 return -EINVAL; 454 455retry: 456 error = user_path_at_empty(dfd, pathname, lookup_flags, &path, &empty); 457 if (!error) { 458 struct inode *inode = d_backing_inode(path.dentry); 459 460 error = empty ? -ENOENT : -EINVAL; 461 /* 462 * AFS mountpoints allow readlink(2) but are not symlinks 463 */ 464 if (d_is_symlink(path.dentry) || inode->i_op->readlink) { 465 error = security_inode_readlink(path.dentry); 466 if (!error) { 467 touch_atime(&path); 468 error = vfs_readlink(path.dentry, buf, bufsiz); 469 } 470 } 471 path_put(&path); 472 if (retry_estale(error, lookup_flags)) { 473 lookup_flags |= LOOKUP_REVAL; 474 goto retry; 475 } 476 } 477 return error; 478} 479 480SYSCALL_DEFINE4(readlinkat, int, dfd, const char __user *, pathname, 481 char __user *, buf, int, bufsiz) 482{ 483 return do_readlinkat(dfd, pathname, buf, bufsiz); 484} 485 486SYSCALL_DEFINE3(readlink, const char __user *, path, char __user *, buf, 487 int, bufsiz) 488{ 489 return do_readlinkat(AT_FDCWD, path, buf, bufsiz); 490} 491 492 493/* ---------- LFS-64 ----------- */ 494#if defined(__ARCH_WANT_STAT64) || defined(__ARCH_WANT_COMPAT_STAT64) 495 496#ifndef INIT_STRUCT_STAT64_PADDING 497# define INIT_STRUCT_STAT64_PADDING(st) memset(&st, 0, sizeof(st)) 498#endif 499 500static long cp_new_stat64(struct kstat *stat, struct stat64 __user *statbuf) 501{ 502 struct stat64 tmp; 503 504 INIT_STRUCT_STAT64_PADDING(tmp); 505#ifdef CONFIG_MIPS 506 /* mips has weird padding, so we don't get 64 bits there */ 507 tmp.st_dev = new_encode_dev(stat->dev); 508 tmp.st_rdev = new_encode_dev(stat->rdev); 509#else 510 tmp.st_dev = huge_encode_dev(stat->dev); 511 tmp.st_rdev = huge_encode_dev(stat->rdev); 512#endif 513 tmp.st_ino = stat->ino; 514 if (sizeof(tmp.st_ino) < sizeof(stat->ino) && tmp.st_ino != stat->ino) 515 return -EOVERFLOW; 516#ifdef STAT64_HAS_BROKEN_ST_INO 517 tmp.__st_ino = stat->ino; 518#endif 519 tmp.st_mode = stat->mode; 520 tmp.st_nlink = stat->nlink; 521 tmp.st_uid = from_kuid_munged(current_user_ns(), stat->uid); 522 tmp.st_gid = from_kgid_munged(current_user_ns(), stat->gid); 523 tmp.st_atime = stat->atime.tv_sec; 524 tmp.st_atime_nsec = stat->atime.tv_nsec; 525 tmp.st_mtime = stat->mtime.tv_sec; 526 tmp.st_mtime_nsec = stat->mtime.tv_nsec; 527 tmp.st_ctime = stat->ctime.tv_sec; 528 tmp.st_ctime_nsec = stat->ctime.tv_nsec; 529 tmp.st_size = stat->size; 530 tmp.st_blocks = stat->blocks; 531 tmp.st_blksize = stat->blksize; 532 return copy_to_user(statbuf,&tmp,sizeof(tmp)) ? -EFAULT : 0; 533} 534 535SYSCALL_DEFINE2(stat64, const char __user *, filename, 536 struct stat64 __user *, statbuf) 537{ 538 struct kstat stat; 539 int error = vfs_stat(filename, &stat); 540 541 if (!error) 542 error = cp_new_stat64(&stat, statbuf); 543 544 return error; 545} 546 547SYSCALL_DEFINE2(lstat64, const char __user *, filename, 548 struct stat64 __user *, statbuf) 549{ 550 struct kstat stat; 551 int error = vfs_lstat(filename, &stat); 552 553 if (!error) 554 error = cp_new_stat64(&stat, statbuf); 555 556 return error; 557} 558 559SYSCALL_DEFINE2(fstat64, unsigned long, fd, struct stat64 __user *, statbuf) 560{ 561 struct kstat stat; 562 int error = vfs_fstat(fd, &stat); 563 564 if (!error) 565 error = cp_new_stat64(&stat, statbuf); 566 567 return error; 568} 569 570SYSCALL_DEFINE4(fstatat64, int, dfd, const char __user *, filename, 571 struct stat64 __user *, statbuf, int, flag) 572{ 573 struct kstat stat; 574 int error; 575 576 error = vfs_fstatat(dfd, filename, &stat, flag); 577 if (error) 578 return error; 579 return cp_new_stat64(&stat, statbuf); 580} 581#endif /* __ARCH_WANT_STAT64 || __ARCH_WANT_COMPAT_STAT64 */ 582 583static noinline_for_stack int 584cp_statx(const struct kstat *stat, struct statx __user *buffer) 585{ 586 struct statx tmp; 587 588 memset(&tmp, 0, sizeof(tmp)); 589 590 tmp.stx_mask = stat->result_mask; 591 tmp.stx_blksize = stat->blksize; 592 tmp.stx_attributes = stat->attributes; 593 tmp.stx_nlink = stat->nlink; 594 tmp.stx_uid = from_kuid_munged(current_user_ns(), stat->uid); 595 tmp.stx_gid = from_kgid_munged(current_user_ns(), stat->gid); 596 tmp.stx_mode = stat->mode; 597 tmp.stx_ino = stat->ino; 598 tmp.stx_size = stat->size; 599 tmp.stx_blocks = stat->blocks; 600 tmp.stx_attributes_mask = stat->attributes_mask; 601 tmp.stx_atime.tv_sec = stat->atime.tv_sec; 602 tmp.stx_atime.tv_nsec = stat->atime.tv_nsec; 603 tmp.stx_btime.tv_sec = stat->btime.tv_sec; 604 tmp.stx_btime.tv_nsec = stat->btime.tv_nsec; 605 tmp.stx_ctime.tv_sec = stat->ctime.tv_sec; 606 tmp.stx_ctime.tv_nsec = stat->ctime.tv_nsec; 607 tmp.stx_mtime.tv_sec = stat->mtime.tv_sec; 608 tmp.stx_mtime.tv_nsec = stat->mtime.tv_nsec; 609 tmp.stx_rdev_major = MAJOR(stat->rdev); 610 tmp.stx_rdev_minor = MINOR(stat->rdev); 611 tmp.stx_dev_major = MAJOR(stat->dev); 612 tmp.stx_dev_minor = MINOR(stat->dev); 613 tmp.stx_mnt_id = stat->mnt_id; 614 615 return copy_to_user(buffer, &tmp, sizeof(tmp)) ? -EFAULT : 0; 616} 617 618int do_statx(int dfd, struct filename *filename, unsigned int flags, 619 unsigned int mask, struct statx __user *buffer) 620{ 621 struct kstat stat; 622 int error; 623 624 if (mask & STATX__RESERVED) 625 return -EINVAL; 626 if ((flags & AT_STATX_SYNC_TYPE) == AT_STATX_SYNC_TYPE) 627 return -EINVAL; 628 629 error = vfs_statx(dfd, filename, flags, &stat, mask); 630 if (error) 631 return error; 632 633 return cp_statx(&stat, buffer); 634} 635 636/** 637 * sys_statx - System call to get enhanced stats 638 * @dfd: Base directory to pathwalk from *or* fd to stat. 639 * @filename: File to stat or "" with AT_EMPTY_PATH 640 * @flags: AT_* flags to control pathwalk. 641 * @mask: Parts of statx struct actually required. 642 * @buffer: Result buffer. 643 * 644 * Note that fstat() can be emulated by setting dfd to the fd of interest, 645 * supplying "" as the filename and setting AT_EMPTY_PATH in the flags. 646 */ 647SYSCALL_DEFINE5(statx, 648 int, dfd, const char __user *, filename, unsigned, flags, 649 unsigned int, mask, 650 struct statx __user *, buffer) 651{ 652 int ret; 653 struct filename *name; 654 655 name = getname_flags(filename, getname_statx_lookup_flags(flags), NULL); 656 ret = do_statx(dfd, name, flags, mask, buffer); 657 putname(name); 658 659 return ret; 660} 661 662#if defined(CONFIG_COMPAT) && defined(__ARCH_WANT_COMPAT_STAT) 663static int cp_compat_stat(struct kstat *stat, struct compat_stat __user *ubuf) 664{ 665 struct compat_stat tmp; 666 667 if (sizeof(tmp.st_dev) < 4 && !old_valid_dev(stat->dev)) 668 return -EOVERFLOW; 669 if (sizeof(tmp.st_rdev) < 4 && !old_valid_dev(stat->rdev)) 670 return -EOVERFLOW; 671 672 memset(&tmp, 0, sizeof(tmp)); 673 tmp.st_dev = new_encode_dev(stat->dev); 674 tmp.st_ino = stat->ino; 675 if (sizeof(tmp.st_ino) < sizeof(stat->ino) && tmp.st_ino != stat->ino) 676 return -EOVERFLOW; 677 tmp.st_mode = stat->mode; 678 tmp.st_nlink = stat->nlink; 679 if (tmp.st_nlink != stat->nlink) 680 return -EOVERFLOW; 681 SET_UID(tmp.st_uid, from_kuid_munged(current_user_ns(), stat->uid)); 682 SET_GID(tmp.st_gid, from_kgid_munged(current_user_ns(), stat->gid)); 683 tmp.st_rdev = new_encode_dev(stat->rdev); 684 if ((u64) stat->size > MAX_NON_LFS) 685 return -EOVERFLOW; 686 tmp.st_size = stat->size; 687 tmp.st_atime = stat->atime.tv_sec; 688 tmp.st_atime_nsec = stat->atime.tv_nsec; 689 tmp.st_mtime = stat->mtime.tv_sec; 690 tmp.st_mtime_nsec = stat->mtime.tv_nsec; 691 tmp.st_ctime = stat->ctime.tv_sec; 692 tmp.st_ctime_nsec = stat->ctime.tv_nsec; 693 tmp.st_blocks = stat->blocks; 694 tmp.st_blksize = stat->blksize; 695 return copy_to_user(ubuf, &tmp, sizeof(tmp)) ? -EFAULT : 0; 696} 697 698COMPAT_SYSCALL_DEFINE2(newstat, const char __user *, filename, 699 struct compat_stat __user *, statbuf) 700{ 701 struct kstat stat; 702 int error; 703 704 error = vfs_stat(filename, &stat); 705 if (error) 706 return error; 707 return cp_compat_stat(&stat, statbuf); 708} 709 710COMPAT_SYSCALL_DEFINE2(newlstat, const char __user *, filename, 711 struct compat_stat __user *, statbuf) 712{ 713 struct kstat stat; 714 int error; 715 716 error = vfs_lstat(filename, &stat); 717 if (error) 718 return error; 719 return cp_compat_stat(&stat, statbuf); 720} 721 722#ifndef __ARCH_WANT_STAT64 723COMPAT_SYSCALL_DEFINE4(newfstatat, unsigned int, dfd, 724 const char __user *, filename, 725 struct compat_stat __user *, statbuf, int, flag) 726{ 727 struct kstat stat; 728 int error; 729 730 error = vfs_fstatat(dfd, filename, &stat, flag); 731 if (error) 732 return error; 733 return cp_compat_stat(&stat, statbuf); 734} 735#endif 736 737COMPAT_SYSCALL_DEFINE2(newfstat, unsigned int, fd, 738 struct compat_stat __user *, statbuf) 739{ 740 struct kstat stat; 741 int error = vfs_fstat(fd, &stat); 742 743 if (!error) 744 error = cp_compat_stat(&stat, statbuf); 745 return error; 746} 747#endif 748 749/* Caller is here responsible for sufficient locking (ie. inode->i_lock) */ 750void __inode_add_bytes(struct inode *inode, loff_t bytes) 751{ 752 inode->i_blocks += bytes >> 9; 753 bytes &= 511; 754 inode->i_bytes += bytes; 755 if (inode->i_bytes >= 512) { 756 inode->i_blocks++; 757 inode->i_bytes -= 512; 758 } 759} 760EXPORT_SYMBOL(__inode_add_bytes); 761 762void inode_add_bytes(struct inode *inode, loff_t bytes) 763{ 764 spin_lock(&inode->i_lock); 765 __inode_add_bytes(inode, bytes); 766 spin_unlock(&inode->i_lock); 767} 768 769EXPORT_SYMBOL(inode_add_bytes); 770 771void __inode_sub_bytes(struct inode *inode, loff_t bytes) 772{ 773 inode->i_blocks -= bytes >> 9; 774 bytes &= 511; 775 if (inode->i_bytes < bytes) { 776 inode->i_blocks--; 777 inode->i_bytes += 512; 778 } 779 inode->i_bytes -= bytes; 780} 781 782EXPORT_SYMBOL(__inode_sub_bytes); 783 784void inode_sub_bytes(struct inode *inode, loff_t bytes) 785{ 786 spin_lock(&inode->i_lock); 787 __inode_sub_bytes(inode, bytes); 788 spin_unlock(&inode->i_lock); 789} 790 791EXPORT_SYMBOL(inode_sub_bytes); 792 793loff_t inode_get_bytes(struct inode *inode) 794{ 795 loff_t ret; 796 797 spin_lock(&inode->i_lock); 798 ret = __inode_get_bytes(inode); 799 spin_unlock(&inode->i_lock); 800 return ret; 801} 802 803EXPORT_SYMBOL(inode_get_bytes); 804 805void inode_set_bytes(struct inode *inode, loff_t bytes) 806{ 807 /* Caller is here responsible for sufficient locking 808 * (ie. inode->i_lock) */ 809 inode->i_blocks = bytes >> 9; 810 inode->i_bytes = bytes & 511; 811} 812 813EXPORT_SYMBOL(inode_set_bytes);