]> err.no Git - linux-2.6/blob - fs/xfs/linux-2.6/xfs_super.c
[XFS] Resolve a namespace collision on vfs/vfsops for FreeBSD porters.
[linux-2.6] / fs / xfs / linux-2.6 / xfs_super.c
1 /*
2  * Copyright (c) 2000-2005 Silicon Graphics, Inc.
3  * All Rights Reserved.
4  *
5  * This program is free software; you can redistribute it and/or
6  * modify it under the terms of the GNU General Public License as
7  * published by the Free Software Foundation.
8  *
9  * This program is distributed in the hope that it would be useful,
10  * but WITHOUT ANY WARRANTY; without even the implied warranty of
11  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
12  * GNU General Public License for more details.
13  *
14  * You should have received a copy of the GNU General Public License
15  * along with this program; if not, write the Free Software Foundation,
16  * Inc.,  51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
17  */
18 #include "xfs.h"
19 #include "xfs_bit.h"
20 #include "xfs_log.h"
21 #include "xfs_clnt.h"
22 #include "xfs_inum.h"
23 #include "xfs_trans.h"
24 #include "xfs_sb.h"
25 #include "xfs_ag.h"
26 #include "xfs_dir.h"
27 #include "xfs_dir2.h"
28 #include "xfs_alloc.h"
29 #include "xfs_dmapi.h"
30 #include "xfs_quota.h"
31 #include "xfs_mount.h"
32 #include "xfs_bmap_btree.h"
33 #include "xfs_alloc_btree.h"
34 #include "xfs_ialloc_btree.h"
35 #include "xfs_dir_sf.h"
36 #include "xfs_dir2_sf.h"
37 #include "xfs_attr_sf.h"
38 #include "xfs_dinode.h"
39 #include "xfs_inode.h"
40 #include "xfs_btree.h"
41 #include "xfs_ialloc.h"
42 #include "xfs_bmap.h"
43 #include "xfs_rtalloc.h"
44 #include "xfs_error.h"
45 #include "xfs_itable.h"
46 #include "xfs_rw.h"
47 #include "xfs_acl.h"
48 #include "xfs_cap.h"
49 #include "xfs_mac.h"
50 #include "xfs_attr.h"
51 #include "xfs_buf_item.h"
52 #include "xfs_utils.h"
53 #include "xfs_version.h"
54
55 #include <linux/namei.h>
56 #include <linux/init.h>
57 #include <linux/mount.h>
58 #include <linux/mempool.h>
59 #include <linux/writeback.h>
60 #include <linux/kthread.h>
61
62 STATIC struct quotactl_ops xfs_quotactl_operations;
63 STATIC struct super_operations xfs_super_operations;
64 STATIC kmem_zone_t *xfs_vnode_zone;
65 STATIC kmem_zone_t *xfs_ioend_zone;
66 mempool_t *xfs_ioend_pool;
67
68 STATIC struct xfs_mount_args *
69 xfs_args_allocate(
70         struct super_block      *sb,
71         int                     silent)
72 {
73         struct xfs_mount_args   *args;
74
75         args = kmem_zalloc(sizeof(struct xfs_mount_args), KM_SLEEP);
76         args->logbufs = args->logbufsize = -1;
77         strncpy(args->fsname, sb->s_id, MAXNAMELEN);
78
79         /* Copy the already-parsed mount(2) flags we're interested in */
80         if (sb->s_flags & MS_DIRSYNC)
81                 args->flags |= XFSMNT_DIRSYNC;
82         if (sb->s_flags & MS_SYNCHRONOUS)
83                 args->flags |= XFSMNT_WSYNC;
84         if (silent)
85                 args->flags |= XFSMNT_QUIET;
86         args->flags |= XFSMNT_32BITINODES;
87
88         return args;
89 }
90
91 __uint64_t
92 xfs_max_file_offset(
93         unsigned int            blockshift)
94 {
95         unsigned int            pagefactor = 1;
96         unsigned int            bitshift = BITS_PER_LONG - 1;
97
98         /* Figure out maximum filesize, on Linux this can depend on
99          * the filesystem blocksize (on 32 bit platforms).
100          * __block_prepare_write does this in an [unsigned] long...
101          *      page->index << (PAGE_CACHE_SHIFT - bbits)
102          * So, for page sized blocks (4K on 32 bit platforms),
103          * this wraps at around 8Tb (hence MAX_LFS_FILESIZE which is
104          *      (((u64)PAGE_CACHE_SIZE << (BITS_PER_LONG-1))-1)
105          * but for smaller blocksizes it is less (bbits = log2 bsize).
106          * Note1: get_block_t takes a long (implicit cast from above)
107          * Note2: The Large Block Device (LBD and HAVE_SECTOR_T) patch
108          * can optionally convert the [unsigned] long from above into
109          * an [unsigned] long long.
110          */
111
112 #if BITS_PER_LONG == 32
113 # if defined(CONFIG_LBD)
114         ASSERT(sizeof(sector_t) == 8);
115         pagefactor = PAGE_CACHE_SIZE;
116         bitshift = BITS_PER_LONG;
117 # else
118         pagefactor = PAGE_CACHE_SIZE >> (PAGE_CACHE_SHIFT - blockshift);
119 # endif
120 #endif
121
122         return (((__uint64_t)pagefactor) << bitshift) - 1;
123 }
124
125 STATIC __inline__ void
126 xfs_set_inodeops(
127         struct inode            *inode)
128 {
129         switch (inode->i_mode & S_IFMT) {
130         case S_IFREG:
131                 inode->i_op = &xfs_inode_operations;
132                 inode->i_fop = &xfs_file_operations;
133                 inode->i_mapping->a_ops = &xfs_address_space_operations;
134                 break;
135         case S_IFDIR:
136                 inode->i_op = &xfs_dir_inode_operations;
137                 inode->i_fop = &xfs_dir_file_operations;
138                 break;
139         case S_IFLNK:
140                 inode->i_op = &xfs_symlink_inode_operations;
141                 if (inode->i_blocks)
142                         inode->i_mapping->a_ops = &xfs_address_space_operations;
143                 break;
144         default:
145                 inode->i_op = &xfs_inode_operations;
146                 init_special_inode(inode, inode->i_mode, inode->i_rdev);
147                 break;
148         }
149 }
150
151 STATIC __inline__ void
152 xfs_revalidate_inode(
153         xfs_mount_t             *mp,
154         vnode_t                 *vp,
155         xfs_inode_t             *ip)
156 {
157         struct inode            *inode = vn_to_inode(vp);
158
159         inode->i_mode   = ip->i_d.di_mode;
160         inode->i_nlink  = ip->i_d.di_nlink;
161         inode->i_uid    = ip->i_d.di_uid;
162         inode->i_gid    = ip->i_d.di_gid;
163
164         switch (inode->i_mode & S_IFMT) {
165         case S_IFBLK:
166         case S_IFCHR:
167                 inode->i_rdev =
168                         MKDEV(sysv_major(ip->i_df.if_u2.if_rdev) & 0x1ff,
169                               sysv_minor(ip->i_df.if_u2.if_rdev));
170                 break;
171         default:
172                 inode->i_rdev = 0;
173                 break;
174         }
175
176         inode->i_blksize = xfs_preferred_iosize(mp);
177         inode->i_generation = ip->i_d.di_gen;
178         i_size_write(inode, ip->i_d.di_size);
179         inode->i_blocks =
180                 XFS_FSB_TO_BB(mp, ip->i_d.di_nblocks + ip->i_delayed_blks);
181         inode->i_atime.tv_sec   = ip->i_d.di_atime.t_sec;
182         inode->i_atime.tv_nsec  = ip->i_d.di_atime.t_nsec;
183         inode->i_mtime.tv_sec   = ip->i_d.di_mtime.t_sec;
184         inode->i_mtime.tv_nsec  = ip->i_d.di_mtime.t_nsec;
185         inode->i_ctime.tv_sec   = ip->i_d.di_ctime.t_sec;
186         inode->i_ctime.tv_nsec  = ip->i_d.di_ctime.t_nsec;
187         if (ip->i_d.di_flags & XFS_DIFLAG_IMMUTABLE)
188                 inode->i_flags |= S_IMMUTABLE;
189         else
190                 inode->i_flags &= ~S_IMMUTABLE;
191         if (ip->i_d.di_flags & XFS_DIFLAG_APPEND)
192                 inode->i_flags |= S_APPEND;
193         else
194                 inode->i_flags &= ~S_APPEND;
195         if (ip->i_d.di_flags & XFS_DIFLAG_SYNC)
196                 inode->i_flags |= S_SYNC;
197         else
198                 inode->i_flags &= ~S_SYNC;
199         if (ip->i_d.di_flags & XFS_DIFLAG_NOATIME)
200                 inode->i_flags |= S_NOATIME;
201         else
202                 inode->i_flags &= ~S_NOATIME;
203         vp->v_flag &= ~VMODIFIED;
204 }
205
206 void
207 xfs_initialize_vnode(
208         bhv_desc_t              *bdp,
209         vnode_t                 *vp,
210         bhv_desc_t              *inode_bhv,
211         int                     unlock)
212 {
213         xfs_inode_t             *ip = XFS_BHVTOI(inode_bhv);
214         struct inode            *inode = vn_to_inode(vp);
215
216         if (!inode_bhv->bd_vobj) {
217                 vp->v_vfsp = bhvtovfs(bdp);
218                 bhv_desc_init(inode_bhv, ip, vp, &xfs_vnodeops);
219                 bhv_insert(VN_BHV_HEAD(vp), inode_bhv);
220         }
221
222         /*
223          * We need to set the ops vectors, and unlock the inode, but if
224          * we have been called during the new inode create process, it is
225          * too early to fill in the Linux inode.  We will get called a
226          * second time once the inode is properly set up, and then we can
227          * finish our work.
228          */
229         if (ip->i_d.di_mode != 0 && unlock && (inode->i_state & I_NEW)) {
230                 xfs_revalidate_inode(XFS_BHVTOM(bdp), vp, ip);
231                 xfs_set_inodeops(inode);
232
233                 ip->i_flags &= ~XFS_INEW;
234                 barrier();
235
236                 unlock_new_inode(inode);
237         }
238 }
239
240 int
241 xfs_blkdev_get(
242         xfs_mount_t             *mp,
243         const char              *name,
244         struct block_device     **bdevp)
245 {
246         int                     error = 0;
247
248         *bdevp = open_bdev_excl(name, 0, mp);
249         if (IS_ERR(*bdevp)) {
250                 error = PTR_ERR(*bdevp);
251                 printk("XFS: Invalid device [%s], error=%d\n", name, error);
252         }
253
254         return -error;
255 }
256
257 void
258 xfs_blkdev_put(
259         struct block_device     *bdev)
260 {
261         if (bdev)
262                 close_bdev_excl(bdev);
263 }
264
265 /*
266  * Try to write out the superblock using barriers.
267  */
268 STATIC int
269 xfs_barrier_test(
270         xfs_mount_t     *mp)
271 {
272         xfs_buf_t       *sbp = xfs_getsb(mp, 0);
273         int             error;
274
275         XFS_BUF_UNDONE(sbp);
276         XFS_BUF_UNREAD(sbp);
277         XFS_BUF_UNDELAYWRITE(sbp);
278         XFS_BUF_WRITE(sbp);
279         XFS_BUF_UNASYNC(sbp);
280         XFS_BUF_ORDERED(sbp);
281
282         xfsbdstrat(mp, sbp);
283         error = xfs_iowait(sbp);
284
285         /*
286          * Clear all the flags we set and possible error state in the
287          * buffer.  We only did the write to try out whether barriers
288          * worked and shouldn't leave any traces in the superblock
289          * buffer.
290          */
291         XFS_BUF_DONE(sbp);
292         XFS_BUF_ERROR(sbp, 0);
293         XFS_BUF_UNORDERED(sbp);
294
295         xfs_buf_relse(sbp);
296         return error;
297 }
298
299 void
300 xfs_mountfs_check_barriers(xfs_mount_t *mp)
301 {
302         int error;
303
304         if (mp->m_logdev_targp != mp->m_ddev_targp) {
305                 xfs_fs_cmn_err(CE_NOTE, mp,
306                   "Disabling barriers, not supported with external log device");
307                 mp->m_flags &= ~XFS_MOUNT_BARRIER;
308                 return;
309         }
310
311         if (mp->m_ddev_targp->bt_bdev->bd_disk->queue->ordered ==
312                                         QUEUE_ORDERED_NONE) {
313                 xfs_fs_cmn_err(CE_NOTE, mp,
314                   "Disabling barriers, not supported by the underlying device");
315                 mp->m_flags &= ~XFS_MOUNT_BARRIER;
316                 return;
317         }
318
319         error = xfs_barrier_test(mp);
320         if (error) {
321                 xfs_fs_cmn_err(CE_NOTE, mp,
322                   "Disabling barriers, trial barrier write failed");
323                 mp->m_flags &= ~XFS_MOUNT_BARRIER;
324                 return;
325         }
326 }
327
328 void
329 xfs_blkdev_issue_flush(
330         xfs_buftarg_t           *buftarg)
331 {
332         blkdev_issue_flush(buftarg->bt_bdev, NULL);
333 }
334
335 STATIC struct inode *
336 xfs_fs_alloc_inode(
337         struct super_block      *sb)
338 {
339         vnode_t                 *vp;
340
341         vp = kmem_zone_alloc(xfs_vnode_zone, KM_SLEEP);
342         if (unlikely(!vp))
343                 return NULL;
344         return vn_to_inode(vp);
345 }
346
347 STATIC void
348 xfs_fs_destroy_inode(
349         struct inode            *inode)
350 {
351         kmem_zone_free(xfs_vnode_zone, vn_from_inode(inode));
352 }
353
354 STATIC void
355 xfs_fs_inode_init_once(
356         void                    *vnode,
357         kmem_zone_t             *zonep,
358         unsigned long           flags)
359 {
360         if ((flags & (SLAB_CTOR_VERIFY|SLAB_CTOR_CONSTRUCTOR)) ==
361                       SLAB_CTOR_CONSTRUCTOR)
362                 inode_init_once(vn_to_inode((vnode_t *)vnode));
363 }
364
365 STATIC int
366 xfs_init_zones(void)
367 {
368         xfs_vnode_zone = kmem_zone_init_flags(sizeof(vnode_t), "xfs_vnode_t",
369                                         KM_ZONE_HWALIGN | KM_ZONE_RECLAIM |
370                                         KM_ZONE_SPREAD,
371                                         xfs_fs_inode_init_once);
372         if (!xfs_vnode_zone)
373                 goto out;
374
375         xfs_ioend_zone = kmem_zone_init(sizeof(xfs_ioend_t), "xfs_ioend");
376         if (!xfs_ioend_zone)
377                 goto out_destroy_vnode_zone;
378
379         xfs_ioend_pool = mempool_create_slab_pool(4 * MAX_BUF_PER_PAGE,
380                                                   xfs_ioend_zone);
381         if (!xfs_ioend_pool)
382                 goto out_free_ioend_zone;
383         return 0;
384
385  out_free_ioend_zone:
386         kmem_zone_destroy(xfs_ioend_zone);
387  out_destroy_vnode_zone:
388         kmem_zone_destroy(xfs_vnode_zone);
389  out:
390         return -ENOMEM;
391 }
392
393 STATIC void
394 xfs_destroy_zones(void)
395 {
396         mempool_destroy(xfs_ioend_pool);
397         kmem_zone_destroy(xfs_vnode_zone);
398         kmem_zone_destroy(xfs_ioend_zone);
399 }
400
401 /*
402  * Attempt to flush the inode, this will actually fail
403  * if the inode is pinned, but we dirty the inode again
404  * at the point when it is unpinned after a log write,
405  * since this is when the inode itself becomes flushable.
406  */
407 STATIC int
408 xfs_fs_write_inode(
409         struct inode            *inode,
410         int                     sync)
411 {
412         vnode_t                 *vp = vn_from_inode(inode);
413         int                     error = 0, flags = FLUSH_INODE;
414
415         if (vp) {
416                 vn_trace_entry(vp, __FUNCTION__, (inst_t *)__return_address);
417                 if (sync)
418                         flags |= FLUSH_SYNC;
419                 VOP_IFLUSH(vp, flags, error);
420                 if (error == EAGAIN) {
421                         if (sync)
422                                 VOP_IFLUSH(vp, flags | FLUSH_LOG, error);
423                         else
424                                 error = 0;
425                 }
426         }
427
428         return -error;
429 }
430
431 STATIC void
432 xfs_fs_clear_inode(
433         struct inode            *inode)
434 {
435         vnode_t                 *vp = vn_from_inode(inode);
436         int                     error, cache;
437
438         vn_trace_entry(vp, __FUNCTION__, (inst_t *)__return_address);
439
440         XFS_STATS_INC(vn_rele);
441         XFS_STATS_INC(vn_remove);
442         XFS_STATS_INC(vn_reclaim);
443         XFS_STATS_DEC(vn_active);
444
445         /*
446          * This can happen because xfs_iget_core calls xfs_idestroy if we
447          * find an inode with di_mode == 0 but without IGET_CREATE set.
448          */
449         if (vp->v_fbhv)
450                 VOP_INACTIVE(vp, NULL, cache);
451
452         VN_LOCK(vp);
453         vp->v_flag &= ~VMODIFIED;
454         VN_UNLOCK(vp, 0);
455
456         if (vp->v_fbhv) {
457                 VOP_RECLAIM(vp, error);
458                 if (error)
459                         panic("vn_purge: cannot reclaim");
460         }
461
462         ASSERT(vp->v_fbhv == NULL);
463
464 #ifdef XFS_VNODE_TRACE
465         ktrace_free(vp->v_trace);
466 #endif
467 }
468
469 /*
470  * Enqueue a work item to be picked up by the vfs xfssyncd thread.
471  * Doing this has two advantages:
472  * - It saves on stack space, which is tight in certain situations
473  * - It can be used (with care) as a mechanism to avoid deadlocks.
474  * Flushing while allocating in a full filesystem requires both.
475  */
476 STATIC void
477 xfs_syncd_queue_work(
478         struct bhv_vfs  *vfs,
479         void            *data,
480         void            (*syncer)(bhv_vfs_t *, void *))
481 {
482         struct bhv_vfs_sync_work *work;
483
484         work = kmem_alloc(sizeof(struct bhv_vfs_sync_work), KM_SLEEP);
485         INIT_LIST_HEAD(&work->w_list);
486         work->w_syncer = syncer;
487         work->w_data = data;
488         work->w_vfs = vfs;
489         spin_lock(&vfs->vfs_sync_lock);
490         list_add_tail(&work->w_list, &vfs->vfs_sync_list);
491         spin_unlock(&vfs->vfs_sync_lock);
492         wake_up_process(vfs->vfs_sync_task);
493 }
494
495 /*
496  * Flush delayed allocate data, attempting to free up reserved space
497  * from existing allocations.  At this point a new allocation attempt
498  * has failed with ENOSPC and we are in the process of scratching our
499  * heads, looking about for more room...
500  */
501 STATIC void
502 xfs_flush_inode_work(
503         bhv_vfs_t       *vfs,
504         void            *inode)
505 {
506         filemap_flush(((struct inode *)inode)->i_mapping);
507         iput((struct inode *)inode);
508 }
509
510 void
511 xfs_flush_inode(
512         xfs_inode_t     *ip)
513 {
514         struct inode    *inode = vn_to_inode(XFS_ITOV(ip));
515         struct bhv_vfs  *vfs = XFS_MTOVFS(ip->i_mount);
516
517         igrab(inode);
518         xfs_syncd_queue_work(vfs, inode, xfs_flush_inode_work);
519         delay(msecs_to_jiffies(500));
520 }
521
522 /*
523  * This is the "bigger hammer" version of xfs_flush_inode_work...
524  * (IOW, "If at first you don't succeed, use a Bigger Hammer").
525  */
526 STATIC void
527 xfs_flush_device_work(
528         bhv_vfs_t       *vfs,
529         void            *inode)
530 {
531         sync_blockdev(vfs->vfs_super->s_bdev);
532         iput((struct inode *)inode);
533 }
534
535 void
536 xfs_flush_device(
537         xfs_inode_t     *ip)
538 {
539         struct inode    *inode = vn_to_inode(XFS_ITOV(ip));
540         struct bhv_vfs  *vfs = XFS_MTOVFS(ip->i_mount);
541
542         igrab(inode);
543         xfs_syncd_queue_work(vfs, inode, xfs_flush_device_work);
544         delay(msecs_to_jiffies(500));
545         xfs_log_force(ip->i_mount, (xfs_lsn_t)0, XFS_LOG_FORCE|XFS_LOG_SYNC);
546 }
547
548 STATIC void
549 vfs_sync_worker(
550         bhv_vfs_t       *vfsp,
551         void            *unused)
552 {
553         int             error;
554
555         if (!(vfsp->vfs_flag & VFS_RDONLY))
556                 error = bhv_vfs_sync(vfsp, SYNC_FSDATA | SYNC_BDFLUSH | \
557                                         SYNC_ATTR | SYNC_REFCACHE, NULL);
558         vfsp->vfs_sync_seq++;
559         wmb();
560         wake_up(&vfsp->vfs_wait_single_sync_task);
561 }
562
563 STATIC int
564 xfssyncd(
565         void                    *arg)
566 {
567         long                    timeleft;
568         bhv_vfs_t               *vfsp = (bhv_vfs_t *) arg;
569         bhv_vfs_sync_work_t     *work, *n;
570         LIST_HEAD               (tmp);
571
572         timeleft = xfs_syncd_centisecs * msecs_to_jiffies(10);
573         for (;;) {
574                 timeleft = schedule_timeout_interruptible(timeleft);
575                 /* swsusp */
576                 try_to_freeze();
577                 if (kthread_should_stop() && list_empty(&vfsp->vfs_sync_list))
578                         break;
579
580                 spin_lock(&vfsp->vfs_sync_lock);
581                 /*
582                  * We can get woken by laptop mode, to do a sync -
583                  * that's the (only!) case where the list would be
584                  * empty with time remaining.
585                  */
586                 if (!timeleft || list_empty(&vfsp->vfs_sync_list)) {
587                         if (!timeleft)
588                                 timeleft = xfs_syncd_centisecs *
589                                                         msecs_to_jiffies(10);
590                         INIT_LIST_HEAD(&vfsp->vfs_sync_work.w_list);
591                         list_add_tail(&vfsp->vfs_sync_work.w_list,
592                                         &vfsp->vfs_sync_list);
593                 }
594                 list_for_each_entry_safe(work, n, &vfsp->vfs_sync_list, w_list)
595                         list_move(&work->w_list, &tmp);
596                 spin_unlock(&vfsp->vfs_sync_lock);
597
598                 list_for_each_entry_safe(work, n, &tmp, w_list) {
599                         (*work->w_syncer)(vfsp, work->w_data);
600                         list_del(&work->w_list);
601                         if (work == &vfsp->vfs_sync_work)
602                                 continue;
603                         kmem_free(work, sizeof(struct bhv_vfs_sync_work));
604                 }
605         }
606
607         return 0;
608 }
609
610 STATIC int
611 xfs_fs_start_syncd(
612         bhv_vfs_t               *vfsp)
613 {
614         vfsp->vfs_sync_work.w_syncer = vfs_sync_worker;
615         vfsp->vfs_sync_work.w_vfs = vfsp;
616         vfsp->vfs_sync_task = kthread_run(xfssyncd, vfsp, "xfssyncd");
617         if (IS_ERR(vfsp->vfs_sync_task))
618                 return -PTR_ERR(vfsp->vfs_sync_task);
619         return 0;
620 }
621
622 STATIC void
623 xfs_fs_stop_syncd(
624         bhv_vfs_t               *vfsp)
625 {
626         kthread_stop(vfsp->vfs_sync_task);
627 }
628
629 STATIC void
630 xfs_fs_put_super(
631         struct super_block      *sb)
632 {
633         bhv_vfs_t               *vfsp = vfs_from_sb(sb);
634         int                     error;
635
636         xfs_fs_stop_syncd(vfsp);
637         bhv_vfs_sync(vfsp, SYNC_ATTR | SYNC_DELWRI, NULL);
638         error = bhv_vfs_unmount(vfsp, 0, NULL);
639         if (error) {
640                 printk("XFS: unmount got error=%d\n", error);
641                 printk("%s: vfs=0x%p left dangling!\n", __FUNCTION__, vfsp);
642         } else {
643                 vfs_deallocate(vfsp);
644         }
645 }
646
647 STATIC void
648 xfs_fs_write_super(
649         struct super_block      *sb)
650 {
651         if (!(sb->s_flags & MS_RDONLY))
652                 bhv_vfs_sync(vfs_from_sb(sb), SYNC_FSDATA, NULL);
653         sb->s_dirt = 0;
654 }
655
656 STATIC int
657 xfs_fs_sync_super(
658         struct super_block      *sb,
659         int                     wait)
660 {
661         bhv_vfs_t               *vfsp = vfs_from_sb(sb);
662         int                     error;
663         int                     flags;
664
665         if (unlikely(sb->s_frozen == SB_FREEZE_WRITE))
666                 flags = SYNC_QUIESCE;
667         else
668                 flags = SYNC_FSDATA | (wait ? SYNC_WAIT : 0);
669
670         error = bhv_vfs_sync(vfsp, flags, NULL);
671         sb->s_dirt = 0;
672
673         if (unlikely(laptop_mode)) {
674                 int     prev_sync_seq = vfsp->vfs_sync_seq;
675
676                 /*
677                  * The disk must be active because we're syncing.
678                  * We schedule xfssyncd now (now that the disk is
679                  * active) instead of later (when it might not be).
680                  */
681                 wake_up_process(vfsp->vfs_sync_task);
682                 /*
683                  * We have to wait for the sync iteration to complete.
684                  * If we don't, the disk activity caused by the sync
685                  * will come after the sync is completed, and that
686                  * triggers another sync from laptop mode.
687                  */
688                 wait_event(vfsp->vfs_wait_single_sync_task,
689                                 vfsp->vfs_sync_seq != prev_sync_seq);
690         }
691
692         return -error;
693 }
694
695 STATIC int
696 xfs_fs_statfs(
697         struct super_block      *sb,
698         struct kstatfs          *statp)
699 {
700         return -bhv_vfs_statvfs(vfs_from_sb(sb), statp, NULL);
701 }
702
703 STATIC int
704 xfs_fs_remount(
705         struct super_block      *sb,
706         int                     *flags,
707         char                    *options)
708 {
709         bhv_vfs_t               *vfsp = vfs_from_sb(sb);
710         struct xfs_mount_args   *args = xfs_args_allocate(sb, 0);
711         int                     error;
712
713         error = bhv_vfs_parseargs(vfsp, options, args, 1);
714         if (!error)
715                 error = bhv_vfs_mntupdate(vfsp, flags, args);
716         kmem_free(args, sizeof(*args));
717         return -error;
718 }
719
720 STATIC void
721 xfs_fs_lockfs(
722         struct super_block      *sb)
723 {
724         bhv_vfs_freeze(vfs_from_sb(sb));
725 }
726
727 STATIC int
728 xfs_fs_show_options(
729         struct seq_file         *m,
730         struct vfsmount         *mnt)
731 {
732         return -bhv_vfs_showargs(vfs_from_sb(mnt->mnt_sb), m);
733 }
734
735 STATIC int
736 xfs_fs_quotasync(
737         struct super_block      *sb,
738         int                     type)
739 {
740         return -bhv_vfs_quotactl(vfs_from_sb(sb), Q_XQUOTASYNC, 0, NULL);
741 }
742
743 STATIC int
744 xfs_fs_getxstate(
745         struct super_block      *sb,
746         struct fs_quota_stat    *fqs)
747 {
748         return -bhv_vfs_quotactl(vfs_from_sb(sb), Q_XGETQSTAT, 0, (caddr_t)fqs);
749 }
750
751 STATIC int
752 xfs_fs_setxstate(
753         struct super_block      *sb,
754         unsigned int            flags,
755         int                     op)
756 {
757         return -bhv_vfs_quotactl(vfs_from_sb(sb), op, 0, (caddr_t)&flags);
758 }
759
760 STATIC int
761 xfs_fs_getxquota(
762         struct super_block      *sb,
763         int                     type,
764         qid_t                   id,
765         struct fs_disk_quota    *fdq)
766 {
767         return -bhv_vfs_quotactl(vfs_from_sb(sb),
768                                  (type == USRQUOTA) ? Q_XGETQUOTA :
769                                   ((type == GRPQUOTA) ? Q_XGETGQUOTA :
770                                    Q_XGETPQUOTA), id, (caddr_t)fdq);
771 }
772
773 STATIC int
774 xfs_fs_setxquota(
775         struct super_block      *sb,
776         int                     type,
777         qid_t                   id,
778         struct fs_disk_quota    *fdq)
779 {
780         return -bhv_vfs_quotactl(vfs_from_sb(sb),
781                                  (type == USRQUOTA) ? Q_XSETQLIM :
782                                   ((type == GRPQUOTA) ? Q_XSETGQLIM :
783                                    Q_XSETPQLIM), id, (caddr_t)fdq);
784 }
785
786 STATIC int
787 xfs_fs_fill_super(
788         struct super_block      *sb,
789         void                    *data,
790         int                     silent)
791 {
792         vnode_t                 *rootvp;
793         struct bhv_vfs          *vfsp = vfs_allocate(sb);
794         struct xfs_mount_args   *args = xfs_args_allocate(sb, silent);
795         struct kstatfs          statvfs;
796         int                     error;
797
798         bhv_insert_all_vfsops(vfsp);
799
800         error = bhv_vfs_parseargs(vfsp, (char *)data, args, 0);
801         if (error) {
802                 bhv_remove_all_vfsops(vfsp, 1);
803                 goto fail_vfsop;
804         }
805
806         sb_min_blocksize(sb, BBSIZE);
807 #ifdef CONFIG_XFS_EXPORT
808         sb->s_export_op = &xfs_export_operations;
809 #endif
810         sb->s_qcop = &xfs_quotactl_operations;
811         sb->s_op = &xfs_super_operations;
812
813         error = bhv_vfs_mount(vfsp, args, NULL);
814         if (error) {
815                 bhv_remove_all_vfsops(vfsp, 1);
816                 goto fail_vfsop;
817         }
818
819         error = bhv_vfs_statvfs(vfsp, &statvfs, NULL);
820         if (error)
821                 goto fail_unmount;
822
823         sb->s_dirt = 1;
824         sb->s_magic = statvfs.f_type;
825         sb->s_blocksize = statvfs.f_bsize;
826         sb->s_blocksize_bits = ffs(statvfs.f_bsize) - 1;
827         sb->s_maxbytes = xfs_max_file_offset(sb->s_blocksize_bits);
828         sb->s_time_gran = 1;
829         set_posix_acl_flag(sb);
830
831         error = bhv_vfs_root(vfsp, &rootvp);
832         if (error)
833                 goto fail_unmount;
834
835         sb->s_root = d_alloc_root(vn_to_inode(rootvp));
836         if (!sb->s_root) {
837                 error = ENOMEM;
838                 goto fail_vnrele;
839         }
840         if (is_bad_inode(sb->s_root->d_inode)) {
841                 error = EINVAL;
842                 goto fail_vnrele;
843         }
844         if ((error = xfs_fs_start_syncd(vfsp)))
845                 goto fail_vnrele;
846         vn_trace_exit(rootvp, __FUNCTION__, (inst_t *)__return_address);
847
848         kmem_free(args, sizeof(*args));
849         return 0;
850
851 fail_vnrele:
852         if (sb->s_root) {
853                 dput(sb->s_root);
854                 sb->s_root = NULL;
855         } else {
856                 VN_RELE(rootvp);
857         }
858
859 fail_unmount:
860         bhv_vfs_unmount(vfsp, 0, NULL);
861
862 fail_vfsop:
863         vfs_deallocate(vfsp);
864         kmem_free(args, sizeof(*args));
865         return -error;
866 }
867
868 STATIC struct super_block *
869 xfs_fs_get_sb(
870         struct file_system_type *fs_type,
871         int                     flags,
872         const char              *dev_name,
873         void                    *data)
874 {
875         return get_sb_bdev(fs_type, flags, dev_name, data, xfs_fs_fill_super);
876 }
877
878 STATIC struct super_operations xfs_super_operations = {
879         .alloc_inode            = xfs_fs_alloc_inode,
880         .destroy_inode          = xfs_fs_destroy_inode,
881         .write_inode            = xfs_fs_write_inode,
882         .clear_inode            = xfs_fs_clear_inode,
883         .put_super              = xfs_fs_put_super,
884         .write_super            = xfs_fs_write_super,
885         .sync_fs                = xfs_fs_sync_super,
886         .write_super_lockfs     = xfs_fs_lockfs,
887         .statfs                 = xfs_fs_statfs,
888         .remount_fs             = xfs_fs_remount,
889         .show_options           = xfs_fs_show_options,
890 };
891
892 STATIC struct quotactl_ops xfs_quotactl_operations = {
893         .quota_sync             = xfs_fs_quotasync,
894         .get_xstate             = xfs_fs_getxstate,
895         .set_xstate             = xfs_fs_setxstate,
896         .get_xquota             = xfs_fs_getxquota,
897         .set_xquota             = xfs_fs_setxquota,
898 };
899
900 STATIC struct file_system_type xfs_fs_type = {
901         .owner                  = THIS_MODULE,
902         .name                   = "xfs",
903         .get_sb                 = xfs_fs_get_sb,
904         .kill_sb                = kill_block_super,
905         .fs_flags               = FS_REQUIRES_DEV,
906 };
907
908
909 STATIC int __init
910 init_xfs_fs( void )
911 {
912         int                     error;
913         struct sysinfo          si;
914         static char             message[] __initdata = KERN_INFO \
915                 XFS_VERSION_STRING " with " XFS_BUILD_OPTIONS " enabled\n";
916
917         printk(message);
918
919         si_meminfo(&si);
920         xfs_physmem = si.totalram;
921
922         ktrace_init(64);
923
924         error = xfs_init_zones();
925         if (error < 0)
926                 goto undo_zones;
927
928         error = xfs_buf_init();
929         if (error < 0)
930                 goto undo_buffers;
931
932         vn_init();
933         xfs_init();
934         uuid_init();
935         vfs_initquota();
936
937         error = register_filesystem(&xfs_fs_type);
938         if (error)
939                 goto undo_register;
940         return 0;
941
942 undo_register:
943         xfs_buf_terminate();
944
945 undo_buffers:
946         xfs_destroy_zones();
947
948 undo_zones:
949         return error;
950 }
951
952 STATIC void __exit
953 exit_xfs_fs( void )
954 {
955         vfs_exitquota();
956         unregister_filesystem(&xfs_fs_type);
957         xfs_cleanup();
958         xfs_buf_terminate();
959         xfs_destroy_zones();
960         ktrace_uninit();
961 }
962
963 module_init(init_xfs_fs);
964 module_exit(exit_xfs_fs);
965
966 MODULE_AUTHOR("Silicon Graphics, Inc.");
967 MODULE_DESCRIPTION(XFS_VERSION_STRING " with " XFS_BUILD_OPTIONS " enabled");
968 MODULE_LICENSE("GPL");