2 * Copyright (c) 2000-2005 Silicon Graphics, Inc.
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License as
7 * published by the Free Software Foundation.
9 * This program is distributed in the hope that it would be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write the Free Software Foundation,
16 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
23 #include "xfs_trans.h"
27 #include "xfs_alloc.h"
28 #include "xfs_dmapi.h"
29 #include "xfs_mount.h"
30 #include "xfs_bmap_btree.h"
31 #include "xfs_alloc_btree.h"
32 #include "xfs_ialloc_btree.h"
33 #include "xfs_attr_sf.h"
34 #include "xfs_dir2_sf.h"
35 #include "xfs_dinode.h"
36 #include "xfs_inode.h"
37 #include "xfs_btree.h"
38 #include "xfs_ialloc.h"
39 #include "xfs_rtalloc.h"
40 #include "xfs_itable.h"
41 #include "xfs_error.h"
46 #include "xfs_buf_item.h"
47 #include "xfs_utils.h"
48 #include "xfs_dfrag.h"
49 #include "xfs_fsops.h"
50 #include "xfs_vnodeops.h"
52 #include <linux/capability.h>
53 #include <linux/dcache.h>
54 #include <linux/mount.h>
55 #include <linux/namei.h>
56 #include <linux/pagemap.h>
59 * xfs_find_handle maps from userspace xfs_fsop_handlereq structure to
60 * a file or fs handle.
62 * XFS_IOC_PATH_TO_FSHANDLE
63 * returns fs handle for a mount point or path within that mount point
64 * XFS_IOC_FD_TO_HANDLE
65 * returns full handle for a FD opened in user space
66 * XFS_IOC_PATH_TO_HANDLE
67 * returns full handle for a path
76 xfs_fsop_handlereq_t hreq;
79 if (copy_from_user(&hreq, arg, sizeof(hreq)))
80 return -XFS_ERROR(EFAULT);
82 memset((char *)&handle, 0, sizeof(handle));
85 case XFS_IOC_PATH_TO_FSHANDLE:
86 case XFS_IOC_PATH_TO_HANDLE: {
90 error = user_path_walk_link((const char __user *)hreq.path, &nd);
94 ASSERT(nd.path.dentry);
95 ASSERT(nd.path.dentry->d_inode);
96 inode = igrab(nd.path.dentry->d_inode);
101 case XFS_IOC_FD_TO_HANDLE: {
104 file = fget(hreq.fd);
108 ASSERT(file->f_path.dentry);
109 ASSERT(file->f_path.dentry->d_inode);
110 inode = igrab(file->f_path.dentry->d_inode);
117 return -XFS_ERROR(EINVAL);
120 if (inode->i_sb->s_magic != XFS_SB_MAGIC) {
121 /* we're not in XFS anymore, Toto */
123 return -XFS_ERROR(EINVAL);
126 switch (inode->i_mode & S_IFMT) {
133 return -XFS_ERROR(EBADF);
136 /* now we can grab the fsid */
137 memcpy(&handle.ha_fsid, XFS_I(inode)->i_mount->m_fixedfsid,
139 hsize = sizeof(xfs_fsid_t);
141 if (cmd != XFS_IOC_PATH_TO_FSHANDLE) {
142 xfs_inode_t *ip = XFS_I(inode);
145 /* need to get access to the xfs_inode to read the generation */
146 lock_mode = xfs_ilock_map_shared(ip);
148 /* fill in fid section of handle from inode */
149 handle.ha_fid.fid_len = sizeof(xfs_fid_t) -
150 sizeof(handle.ha_fid.fid_len);
151 handle.ha_fid.fid_pad = 0;
152 handle.ha_fid.fid_gen = ip->i_d.di_gen;
153 handle.ha_fid.fid_ino = ip->i_ino;
155 xfs_iunlock_map_shared(ip, lock_mode);
157 hsize = XFS_HSIZE(handle);
160 /* now copy our handle into the user buffer & write out the size */
161 if (copy_to_user(hreq.ohandle, &handle, hsize) ||
162 copy_to_user(hreq.ohandlen, &hsize, sizeof(__s32))) {
164 return -XFS_ERROR(EFAULT);
173 * Convert userspace handle data into inode.
175 * We use the fact that all the fsop_handlereq ioctl calls have a data
176 * structure argument whose first component is always a xfs_fsop_handlereq_t,
177 * so we can pass that sub structure into this handy, shared routine.
179 * If no error, caller must always iput the returned inode.
182 xfs_vget_fsop_handlereq(
184 struct inode *parinode, /* parent inode pointer */
185 xfs_fsop_handlereq_t *hreq,
186 struct inode **inode)
191 xfs_handle_t *handlep;
199 * Only allow handle opens under a directory.
201 if (!S_ISDIR(parinode->i_mode))
202 return XFS_ERROR(ENOTDIR);
204 hanp = hreq->ihandle;
205 hlen = hreq->ihandlen;
208 if (hlen < sizeof(handlep->ha_fsid) || hlen > sizeof(*handlep))
209 return XFS_ERROR(EINVAL);
210 if (copy_from_user(handlep, hanp, hlen))
211 return XFS_ERROR(EFAULT);
212 if (hlen < sizeof(*handlep))
213 memset(((char *)handlep) + hlen, 0, sizeof(*handlep) - hlen);
214 if (hlen > sizeof(handlep->ha_fsid)) {
215 if (handlep->ha_fid.fid_len !=
216 (hlen - sizeof(handlep->ha_fsid) -
217 sizeof(handlep->ha_fid.fid_len)) ||
218 handlep->ha_fid.fid_pad)
219 return XFS_ERROR(EINVAL);
223 * Crack the handle, obtain the inode # & generation #
225 xfid = (struct xfs_fid *)&handlep->ha_fid;
226 if (xfid->fid_len == sizeof(*xfid) - sizeof(xfid->fid_len)) {
228 igen = xfid->fid_gen;
230 return XFS_ERROR(EINVAL);
234 * Get the XFS inode, building a Linux inode to go with it.
236 error = xfs_iget(mp, NULL, ino, 0, XFS_ILOCK_SHARED, &ip, 0);
240 return XFS_ERROR(EIO);
241 if (ip->i_d.di_mode == 0 || ip->i_d.di_gen != igen) {
242 xfs_iput_new(ip, XFS_ILOCK_SHARED);
243 return XFS_ERROR(ENOENT);
246 xfs_iunlock(ip, XFS_ILOCK_SHARED);
248 *inode = XFS_ITOV(ip);
256 struct file *parfilp,
257 struct inode *parinode)
264 struct dentry *dentry;
265 xfs_fsop_handlereq_t hreq;
267 if (!capable(CAP_SYS_ADMIN))
268 return -XFS_ERROR(EPERM);
269 if (copy_from_user(&hreq, arg, sizeof(xfs_fsop_handlereq_t)))
270 return -XFS_ERROR(EFAULT);
272 error = xfs_vget_fsop_handlereq(mp, parinode, &hreq, &inode);
276 /* Restrict xfs_open_by_handle to directories & regular files. */
277 if (!(S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode))) {
279 return -XFS_ERROR(EINVAL);
282 #if BITS_PER_LONG != 32
283 hreq.oflags |= O_LARGEFILE;
285 /* Put open permission in namei format. */
286 permflag = hreq.oflags;
287 if ((permflag+1) & O_ACCMODE)
289 if (permflag & O_TRUNC)
292 if ((!(permflag & O_APPEND) || (permflag & O_TRUNC)) &&
293 (permflag & FMODE_WRITE) && IS_APPEND(inode)) {
295 return -XFS_ERROR(EPERM);
298 if ((permflag & FMODE_WRITE) && IS_IMMUTABLE(inode)) {
300 return -XFS_ERROR(EACCES);
303 /* Can't write directories. */
304 if ( S_ISDIR(inode->i_mode) && (permflag & FMODE_WRITE)) {
306 return -XFS_ERROR(EISDIR);
309 if ((new_fd = get_unused_fd()) < 0) {
314 dentry = d_alloc_anon(inode);
315 if (dentry == NULL) {
317 put_unused_fd(new_fd);
318 return -XFS_ERROR(ENOMEM);
321 /* Ensure umount returns EBUSY on umounts while this file is open. */
322 mntget(parfilp->f_path.mnt);
324 /* Create file pointer. */
325 filp = dentry_open(dentry, parfilp->f_path.mnt, hreq.oflags);
327 put_unused_fd(new_fd);
328 return -XFS_ERROR(-PTR_ERR(filp));
330 if (inode->i_mode & S_IFREG) {
331 /* invisible operation should not change atime */
332 filp->f_flags |= O_NOATIME;
333 filp->f_op = &xfs_invis_file_operations;
336 fd_install(new_fd, filp);
341 * This is a copy from fs/namei.c:vfs_readlink(), except for removing it's
342 * unused first argument.
357 if (len > (unsigned) buflen)
359 if (copy_to_user(buffer, link, len))
367 xfs_readlink_by_handle(
370 struct inode *parinode)
373 xfs_fsop_handlereq_t hreq;
378 if (!capable(CAP_SYS_ADMIN))
379 return -XFS_ERROR(EPERM);
380 if (copy_from_user(&hreq, arg, sizeof(xfs_fsop_handlereq_t)))
381 return -XFS_ERROR(EFAULT);
383 error = xfs_vget_fsop_handlereq(mp, parinode, &hreq, &inode);
387 /* Restrict this handle operation to symlinks only. */
388 if (!S_ISLNK(inode->i_mode)) {
389 error = -XFS_ERROR(EINVAL);
393 if (copy_from_user(&olen, hreq.ohandlen, sizeof(__u32))) {
394 error = -XFS_ERROR(EFAULT);
398 link = kmalloc(MAXPATHLEN+1, GFP_KERNEL);
402 error = -xfs_readlink(XFS_I(inode), link);
405 error = do_readlink(hreq.ohandle, olen, link);
417 xfs_fssetdm_by_handle(
420 struct inode *parinode)
423 struct fsdmidata fsd;
424 xfs_fsop_setdm_handlereq_t dmhreq;
427 if (!capable(CAP_MKNOD))
428 return -XFS_ERROR(EPERM);
429 if (copy_from_user(&dmhreq, arg, sizeof(xfs_fsop_setdm_handlereq_t)))
430 return -XFS_ERROR(EFAULT);
432 error = xfs_vget_fsop_handlereq(mp, parinode, &dmhreq.hreq, &inode);
436 if (IS_IMMUTABLE(inode) || IS_APPEND(inode)) {
437 error = -XFS_ERROR(EPERM);
441 if (copy_from_user(&fsd, dmhreq.data, sizeof(fsd))) {
442 error = -XFS_ERROR(EFAULT);
446 error = -xfs_set_dmattrs(XFS_I(inode), fsd.fsd_dmevmask,
455 xfs_attrlist_by_handle(
458 struct inode *parinode)
461 attrlist_cursor_kern_t *cursor;
462 xfs_fsop_attrlist_handlereq_t al_hreq;
466 if (!capable(CAP_SYS_ADMIN))
467 return -XFS_ERROR(EPERM);
468 if (copy_from_user(&al_hreq, arg, sizeof(xfs_fsop_attrlist_handlereq_t)))
469 return -XFS_ERROR(EFAULT);
470 if (al_hreq.buflen > XATTR_LIST_MAX)
471 return -XFS_ERROR(EINVAL);
473 error = xfs_vget_fsop_handlereq(mp, parinode, &al_hreq.hreq, &inode);
477 kbuf = kmalloc(al_hreq.buflen, GFP_KERNEL);
481 cursor = (attrlist_cursor_kern_t *)&al_hreq.pos;
482 error = xfs_attr_list(XFS_I(inode), kbuf, al_hreq.buflen,
483 al_hreq.flags, cursor);
487 if (copy_to_user(al_hreq.buffer, kbuf, al_hreq.buflen))
499 xfs_attrmulti_attr_get(
509 if (*len > XATTR_SIZE_MAX)
511 kbuf = kmalloc(*len, GFP_KERNEL);
515 error = xfs_attr_get(XFS_I(inode), name, kbuf, (int *)len, flags, NULL);
519 if (copy_to_user(ubuf, kbuf, *len))
528 xfs_attrmulti_attr_set(
531 const char __user *ubuf,
538 if (IS_IMMUTABLE(inode) || IS_APPEND(inode))
540 if (len > XATTR_SIZE_MAX)
543 kbuf = kmalloc(len, GFP_KERNEL);
547 if (copy_from_user(kbuf, ubuf, len))
550 error = xfs_attr_set(XFS_I(inode), name, kbuf, len, flags);
558 xfs_attrmulti_attr_remove(
563 if (IS_IMMUTABLE(inode) || IS_APPEND(inode))
565 return xfs_attr_remove(XFS_I(inode), name, flags);
569 xfs_attrmulti_by_handle(
572 struct file *parfilp,
573 struct inode *parinode)
576 xfs_attr_multiop_t *ops;
577 xfs_fsop_attrmulti_handlereq_t am_hreq;
579 unsigned int i, size;
582 if (!capable(CAP_SYS_ADMIN))
583 return -XFS_ERROR(EPERM);
584 if (copy_from_user(&am_hreq, arg, sizeof(xfs_fsop_attrmulti_handlereq_t)))
585 return -XFS_ERROR(EFAULT);
587 error = xfs_vget_fsop_handlereq(mp, parinode, &am_hreq.hreq, &inode);
592 size = am_hreq.opcount * sizeof(attr_multiop_t);
593 if (!size || size > 16 * PAGE_SIZE)
597 ops = kmalloc(size, GFP_KERNEL);
602 if (copy_from_user(ops, am_hreq.ops, size))
605 attr_name = kmalloc(MAXNAMELEN, GFP_KERNEL);
611 for (i = 0; i < am_hreq.opcount; i++) {
612 ops[i].am_error = strncpy_from_user(attr_name,
613 ops[i].am_attrname, MAXNAMELEN);
614 if (ops[i].am_error == 0 || ops[i].am_error == MAXNAMELEN)
616 if (ops[i].am_error < 0)
619 switch (ops[i].am_opcode) {
621 ops[i].am_error = xfs_attrmulti_attr_get(inode,
622 attr_name, ops[i].am_attrvalue,
623 &ops[i].am_length, ops[i].am_flags);
626 ops[i].am_error = mnt_want_write(parfilp->f_path.mnt);
629 ops[i].am_error = xfs_attrmulti_attr_set(inode,
630 attr_name, ops[i].am_attrvalue,
631 ops[i].am_length, ops[i].am_flags);
632 mnt_drop_write(parfilp->f_path.mnt);
635 ops[i].am_error = mnt_want_write(parfilp->f_path.mnt);
638 ops[i].am_error = xfs_attrmulti_attr_remove(inode,
639 attr_name, ops[i].am_flags);
640 mnt_drop_write(parfilp->f_path.mnt);
643 ops[i].am_error = EINVAL;
647 if (copy_to_user(am_hreq.ops, ops, size))
648 error = XFS_ERROR(EFAULT);
661 struct xfs_inode *ip,
672 if (inode->i_flags & (S_IMMUTABLE|S_APPEND))
673 return -XFS_ERROR(EPERM);
675 if (!(filp->f_mode & FMODE_WRITE))
676 return -XFS_ERROR(EBADF);
678 if (!S_ISREG(inode->i_mode))
679 return -XFS_ERROR(EINVAL);
681 if (copy_from_user(&bf, arg, sizeof(bf)))
682 return -XFS_ERROR(EFAULT);
684 if (filp->f_flags & (O_NDELAY|O_NONBLOCK))
685 attr_flags |= ATTR_NONBLOCK;
686 if (ioflags & IO_INVIS)
687 attr_flags |= ATTR_DMI;
689 error = xfs_change_file_space(ip, cmd, &bf, filp->f_pos,
700 xfs_fsop_bulkreq_t bulkreq;
701 int count; /* # of records returned */
702 xfs_ino_t inlast; /* last inode number */
706 /* done = 1 if there are more stats to get and if bulkstat */
707 /* should be called again (unused here, but used in dmapi) */
709 if (!capable(CAP_SYS_ADMIN))
712 if (XFS_FORCED_SHUTDOWN(mp))
713 return -XFS_ERROR(EIO);
715 if (copy_from_user(&bulkreq, arg, sizeof(xfs_fsop_bulkreq_t)))
716 return -XFS_ERROR(EFAULT);
718 if (copy_from_user(&inlast, bulkreq.lastip, sizeof(__s64)))
719 return -XFS_ERROR(EFAULT);
721 if ((count = bulkreq.icount) <= 0)
722 return -XFS_ERROR(EINVAL);
724 if (bulkreq.ubuffer == NULL)
725 return -XFS_ERROR(EINVAL);
727 if (cmd == XFS_IOC_FSINUMBERS)
728 error = xfs_inumbers(mp, &inlast, &count,
729 bulkreq.ubuffer, xfs_inumbers_fmt);
730 else if (cmd == XFS_IOC_FSBULKSTAT_SINGLE)
731 error = xfs_bulkstat_single(mp, &inlast,
732 bulkreq.ubuffer, &done);
733 else /* XFS_IOC_FSBULKSTAT */
734 error = xfs_bulkstat(mp, &inlast, &count,
735 (bulkstat_one_pf)xfs_bulkstat_one, NULL,
736 sizeof(xfs_bstat_t), bulkreq.ubuffer,
737 BULKSTAT_FG_QUICK, &done);
742 if (bulkreq.ocount != NULL) {
743 if (copy_to_user(bulkreq.lastip, &inlast,
745 return -XFS_ERROR(EFAULT);
747 if (copy_to_user(bulkreq.ocount, &count, sizeof(count)))
748 return -XFS_ERROR(EFAULT);
755 xfs_ioc_fsgeometry_v1(
759 xfs_fsop_geom_v1_t fsgeo;
762 error = xfs_fs_geometry(mp, (xfs_fsop_geom_t *)&fsgeo, 3);
766 if (copy_to_user(arg, &fsgeo, sizeof(fsgeo)))
767 return -XFS_ERROR(EFAULT);
776 xfs_fsop_geom_t fsgeo;
779 error = xfs_fs_geometry(mp, &fsgeo, 4);
783 if (copy_to_user(arg, &fsgeo, sizeof(fsgeo)))
784 return -XFS_ERROR(EFAULT);
789 * Linux extended inode flags interface.
793 xfs_merge_ioc_xflags(
797 unsigned int xflags = start;
799 if (flags & FS_IMMUTABLE_FL)
800 xflags |= XFS_XFLAG_IMMUTABLE;
802 xflags &= ~XFS_XFLAG_IMMUTABLE;
803 if (flags & FS_APPEND_FL)
804 xflags |= XFS_XFLAG_APPEND;
806 xflags &= ~XFS_XFLAG_APPEND;
807 if (flags & FS_SYNC_FL)
808 xflags |= XFS_XFLAG_SYNC;
810 xflags &= ~XFS_XFLAG_SYNC;
811 if (flags & FS_NOATIME_FL)
812 xflags |= XFS_XFLAG_NOATIME;
814 xflags &= ~XFS_XFLAG_NOATIME;
815 if (flags & FS_NODUMP_FL)
816 xflags |= XFS_XFLAG_NODUMP;
818 xflags &= ~XFS_XFLAG_NODUMP;
827 unsigned int flags = 0;
829 if (di_flags & XFS_DIFLAG_IMMUTABLE)
830 flags |= FS_IMMUTABLE_FL;
831 if (di_flags & XFS_DIFLAG_APPEND)
832 flags |= FS_APPEND_FL;
833 if (di_flags & XFS_DIFLAG_SYNC)
835 if (di_flags & XFS_DIFLAG_NOATIME)
836 flags |= FS_NOATIME_FL;
837 if (di_flags & XFS_DIFLAG_NODUMP)
838 flags |= FS_NODUMP_FL;
850 xfs_ilock(ip, XFS_ILOCK_SHARED);
851 fa.fsx_xflags = xfs_ip2xflags(ip);
852 fa.fsx_extsize = ip->i_d.di_extsize << ip->i_mount->m_sb.sb_blocklog;
853 fa.fsx_projid = ip->i_d.di_projid;
857 if (ip->i_afp->if_flags & XFS_IFEXTENTS)
858 fa.fsx_nextents = ip->i_afp->if_bytes /
859 sizeof(xfs_bmbt_rec_t);
861 fa.fsx_nextents = ip->i_d.di_anextents;
865 if (ip->i_df.if_flags & XFS_IFEXTENTS)
866 fa.fsx_nextents = ip->i_df.if_bytes /
867 sizeof(xfs_bmbt_rec_t);
869 fa.fsx_nextents = ip->i_d.di_nextents;
871 xfs_iunlock(ip, XFS_ILOCK_SHARED);
873 if (copy_to_user(arg, &fa, sizeof(fa)))
885 struct bhv_vattr *vattr;
889 if (copy_from_user(&fa, arg, sizeof(fa)))
892 vattr = kmalloc(sizeof(*vattr), GFP_KERNEL);
893 if (unlikely(!vattr))
897 if (filp->f_flags & (O_NDELAY|O_NONBLOCK))
898 attr_flags |= ATTR_NONBLOCK;
900 vattr->va_mask = XFS_AT_XFLAGS | XFS_AT_EXTSIZE | XFS_AT_PROJID;
901 vattr->va_xflags = fa.fsx_xflags;
902 vattr->va_extsize = fa.fsx_extsize;
903 vattr->va_projid = fa.fsx_projid;
905 error = -xfs_setattr(ip, vattr, attr_flags, NULL);
907 vn_revalidate(XFS_ITOV(ip)); /* update flags */
919 flags = xfs_di2lxflags(ip->i_d.di_flags);
920 if (copy_to_user(arg, &flags, sizeof(flags)))
931 struct bhv_vattr *vattr;
936 if (copy_from_user(&flags, arg, sizeof(flags)))
939 if (flags & ~(FS_IMMUTABLE_FL | FS_APPEND_FL | \
940 FS_NOATIME_FL | FS_NODUMP_FL | \
944 vattr = kmalloc(sizeof(*vattr), GFP_KERNEL);
945 if (unlikely(!vattr))
949 if (filp->f_flags & (O_NDELAY|O_NONBLOCK))
950 attr_flags |= ATTR_NONBLOCK;
952 vattr->va_mask = XFS_AT_XFLAGS;
953 vattr->va_xflags = xfs_merge_ioc_xflags(flags, xfs_ip2xflags(ip));
955 error = -xfs_setattr(ip, vattr, attr_flags, NULL);
957 vn_revalidate(XFS_ITOV(ip)); /* update flags */
964 struct xfs_inode *ip,
973 if (copy_from_user(&bm, arg, sizeof(bm)))
974 return -XFS_ERROR(EFAULT);
976 if (bm.bmv_count < 2)
977 return -XFS_ERROR(EINVAL);
979 iflags = (cmd == XFS_IOC_GETBMAPA ? BMV_IF_ATTRFORK : 0);
980 if (ioflags & IO_INVIS)
981 iflags |= BMV_IF_NO_DMAPI_READ;
983 error = xfs_getbmap(ip, &bm, (struct getbmap __user *)arg+1, iflags);
987 if (copy_to_user(arg, &bm, sizeof(bm)))
988 return -XFS_ERROR(EFAULT);
994 struct xfs_inode *ip,
1002 if (copy_from_user(&bmx, arg, sizeof(bmx)))
1003 return -XFS_ERROR(EFAULT);
1005 if (bmx.bmv_count < 2)
1006 return -XFS_ERROR(EINVAL);
1009 * Map input getbmapx structure to a getbmap
1010 * structure for xfs_getbmap.
1012 GETBMAP_CONVERT(bmx, bm);
1014 iflags = bmx.bmv_iflags;
1016 if (iflags & (~BMV_IF_VALID))
1017 return -XFS_ERROR(EINVAL);
1019 iflags |= BMV_IF_EXTENDED;
1021 error = xfs_getbmap(ip, &bm, (struct getbmapx __user *)arg+1, iflags);
1025 GETBMAP_CONVERT(bm, bmx);
1027 if (copy_to_user(arg, &bmx, sizeof(bmx)))
1028 return -XFS_ERROR(EFAULT);
1041 struct inode *inode = filp->f_path.dentry->d_inode;
1042 xfs_mount_t *mp = ip->i_mount;
1045 xfs_itrace_entry(XFS_I(inode));
1048 case XFS_IOC_ALLOCSP:
1049 case XFS_IOC_FREESP:
1050 case XFS_IOC_RESVSP:
1051 case XFS_IOC_UNRESVSP:
1052 case XFS_IOC_ALLOCSP64:
1053 case XFS_IOC_FREESP64:
1054 case XFS_IOC_RESVSP64:
1055 case XFS_IOC_UNRESVSP64:
1057 * Only allow the sys admin to reserve space unless
1058 * unwritten extents are enabled.
1060 if (!xfs_sb_version_hasextflgbit(&mp->m_sb) &&
1061 !capable(CAP_SYS_ADMIN))
1064 return xfs_ioc_space(ip, inode, filp, ioflags, cmd, arg);
1066 case XFS_IOC_DIOINFO: {
1068 xfs_buftarg_t *target =
1069 XFS_IS_REALTIME_INODE(ip) ?
1070 mp->m_rtdev_targp : mp->m_ddev_targp;
1072 da.d_mem = da.d_miniosz = 1 << target->bt_sshift;
1073 da.d_maxiosz = INT_MAX & ~(da.d_miniosz - 1);
1075 if (copy_to_user(arg, &da, sizeof(da)))
1076 return -XFS_ERROR(EFAULT);
1080 case XFS_IOC_FSBULKSTAT_SINGLE:
1081 case XFS_IOC_FSBULKSTAT:
1082 case XFS_IOC_FSINUMBERS:
1083 return xfs_ioc_bulkstat(mp, cmd, arg);
1085 case XFS_IOC_FSGEOMETRY_V1:
1086 return xfs_ioc_fsgeometry_v1(mp, arg);
1088 case XFS_IOC_FSGEOMETRY:
1089 return xfs_ioc_fsgeometry(mp, arg);
1091 case XFS_IOC_GETVERSION:
1092 return put_user(inode->i_generation, (int __user *)arg);
1094 case XFS_IOC_FSGETXATTR:
1095 return xfs_ioc_fsgetxattr(ip, 0, arg);
1096 case XFS_IOC_FSGETXATTRA:
1097 return xfs_ioc_fsgetxattr(ip, 1, arg);
1098 case XFS_IOC_FSSETXATTR:
1099 return xfs_ioc_fssetxattr(ip, filp, arg);
1100 case XFS_IOC_GETXFLAGS:
1101 return xfs_ioc_getxflags(ip, arg);
1102 case XFS_IOC_SETXFLAGS:
1103 return xfs_ioc_setxflags(ip, filp, arg);
1105 case XFS_IOC_FSSETDM: {
1106 struct fsdmidata dmi;
1108 if (copy_from_user(&dmi, arg, sizeof(dmi)))
1109 return -XFS_ERROR(EFAULT);
1111 error = xfs_set_dmattrs(ip, dmi.fsd_dmevmask,
1116 case XFS_IOC_GETBMAP:
1117 case XFS_IOC_GETBMAPA:
1118 return xfs_ioc_getbmap(ip, ioflags, cmd, arg);
1120 case XFS_IOC_GETBMAPX:
1121 return xfs_ioc_getbmapx(ip, arg);
1123 case XFS_IOC_FD_TO_HANDLE:
1124 case XFS_IOC_PATH_TO_HANDLE:
1125 case XFS_IOC_PATH_TO_FSHANDLE:
1126 return xfs_find_handle(cmd, arg);
1128 case XFS_IOC_OPEN_BY_HANDLE:
1129 return xfs_open_by_handle(mp, arg, filp, inode);
1131 case XFS_IOC_FSSETDM_BY_HANDLE:
1132 return xfs_fssetdm_by_handle(mp, arg, inode);
1134 case XFS_IOC_READLINK_BY_HANDLE:
1135 return xfs_readlink_by_handle(mp, arg, inode);
1137 case XFS_IOC_ATTRLIST_BY_HANDLE:
1138 return xfs_attrlist_by_handle(mp, arg, inode);
1140 case XFS_IOC_ATTRMULTI_BY_HANDLE:
1141 return xfs_attrmulti_by_handle(mp, arg, filp, inode);
1143 case XFS_IOC_SWAPEXT: {
1144 error = xfs_swapext((struct xfs_swapext __user *)arg);
1148 case XFS_IOC_FSCOUNTS: {
1149 xfs_fsop_counts_t out;
1151 error = xfs_fs_counts(mp, &out);
1155 if (copy_to_user(arg, &out, sizeof(out)))
1156 return -XFS_ERROR(EFAULT);
1160 case XFS_IOC_SET_RESBLKS: {
1161 xfs_fsop_resblks_t inout;
1164 if (!capable(CAP_SYS_ADMIN))
1167 if (copy_from_user(&inout, arg, sizeof(inout)))
1168 return -XFS_ERROR(EFAULT);
1170 /* input parameter is passed in resblks field of structure */
1172 error = xfs_reserve_blocks(mp, &in, &inout);
1176 if (copy_to_user(arg, &inout, sizeof(inout)))
1177 return -XFS_ERROR(EFAULT);
1181 case XFS_IOC_GET_RESBLKS: {
1182 xfs_fsop_resblks_t out;
1184 if (!capable(CAP_SYS_ADMIN))
1187 error = xfs_reserve_blocks(mp, NULL, &out);
1191 if (copy_to_user(arg, &out, sizeof(out)))
1192 return -XFS_ERROR(EFAULT);
1197 case XFS_IOC_FSGROWFSDATA: {
1198 xfs_growfs_data_t in;
1200 if (!capable(CAP_SYS_ADMIN))
1203 if (copy_from_user(&in, arg, sizeof(in)))
1204 return -XFS_ERROR(EFAULT);
1206 error = xfs_growfs_data(mp, &in);
1210 case XFS_IOC_FSGROWFSLOG: {
1211 xfs_growfs_log_t in;
1213 if (!capable(CAP_SYS_ADMIN))
1216 if (copy_from_user(&in, arg, sizeof(in)))
1217 return -XFS_ERROR(EFAULT);
1219 error = xfs_growfs_log(mp, &in);
1223 case XFS_IOC_FSGROWFSRT: {
1226 if (!capable(CAP_SYS_ADMIN))
1229 if (copy_from_user(&in, arg, sizeof(in)))
1230 return -XFS_ERROR(EFAULT);
1232 error = xfs_growfs_rt(mp, &in);
1236 case XFS_IOC_FREEZE:
1237 if (!capable(CAP_SYS_ADMIN))
1240 if (inode->i_sb->s_frozen == SB_UNFROZEN)
1241 freeze_bdev(inode->i_sb->s_bdev);
1245 if (!capable(CAP_SYS_ADMIN))
1247 if (inode->i_sb->s_frozen != SB_UNFROZEN)
1248 thaw_bdev(inode->i_sb->s_bdev, inode->i_sb);
1251 case XFS_IOC_GOINGDOWN: {
1254 if (!capable(CAP_SYS_ADMIN))
1257 if (get_user(in, (__uint32_t __user *)arg))
1258 return -XFS_ERROR(EFAULT);
1260 error = xfs_fs_goingdown(mp, in);
1264 case XFS_IOC_ERROR_INJECTION: {
1265 xfs_error_injection_t in;
1267 if (!capable(CAP_SYS_ADMIN))
1270 if (copy_from_user(&in, arg, sizeof(in)))
1271 return -XFS_ERROR(EFAULT);
1273 error = xfs_errortag_add(in.errtag, mp);
1277 case XFS_IOC_ERROR_CLEARALL:
1278 if (!capable(CAP_SYS_ADMIN))
1281 error = xfs_errortag_clearall(mp, 1);