From b877e3d37dda0154868a3c78f02f38a1ec14ce79 Mon Sep 17 00:00:00 2001 From: Lachlan McIlroy Date: Fri, 27 Jun 2008 13:33:03 +1000 Subject: [PATCH] [XFS] Restore the lowspace extent allocator algorithm When free space is running low the extent allocator may choose to allocate an extent from an AG without leaving sufficient space for a btree split when inserting the new extent (see where xfs_bmap_btalloc() sets minleft to 0). In this case the allocator will enable the lowspace algorithm which is supposed to allow further allocations (such as btree splits and newroots) to allocate from sequential AGs. This algorithm has been broken for a long time and this patch restores its behaviour. SGI-PV: 983338 SGI-Modid: xfs-linux-melb:xfs-kern:31358a Signed-off-by: Lachlan McIlroy Signed-off-by: David Chinner --- fs/xfs/xfs_bmap.h | 13 ++++++++++++- fs/xfs/xfs_bmap_btree.c | 8 ++++++-- 2 files changed, 18 insertions(+), 3 deletions(-) diff --git a/fs/xfs/xfs_bmap.h b/fs/xfs/xfs_bmap.h index 6ff70cda45..9f3e3a836d 100644 --- a/fs/xfs/xfs_bmap.h +++ b/fs/xfs/xfs_bmap.h @@ -54,12 +54,23 @@ typedef struct xfs_bmap_free_item /* * Header for free extent list. + * + * xbf_low is used by the allocator to activate the lowspace algorithm - + * when free space is running low the extent allocator may choose to + * allocate an extent from an AG without leaving sufficient space for + * a btree split when inserting the new extent. In this case the allocator + * will enable the lowspace algorithm which is supposed to allow further + * allocations (such as btree splits and newroots) to allocate from + * sequential AGs. In order to avoid locking AGs out of order the lowspace + * algorithm will start searching for free space from AG 0. If the correct + * transaction reservations have been made then this algorithm will eventually + * find all the space it needs. */ typedef struct xfs_bmap_free { xfs_bmap_free_item_t *xbf_first; /* list of to-be-free extents */ int xbf_count; /* count of items on list */ - int xbf_low; /* kludge: alloc in low mode */ + int xbf_low; /* alloc in low mode */ } xfs_bmap_free_t; #define XFS_BMAP_MAX_NMAP 4 diff --git a/fs/xfs/xfs_bmap_btree.c b/fs/xfs/xfs_bmap_btree.c index 3fc09cd8d5..1140cef4ba 100644 --- a/fs/xfs/xfs_bmap_btree.c +++ b/fs/xfs/xfs_bmap_btree.c @@ -1509,7 +1509,9 @@ xfs_bmbt_split( * block allocation here and corrupt the filesystem. */ args.minleft = xfs_trans_get_block_res(args.tp); - } else + } else if (cur->bc_private.b.flist->xbf_low) + args.type = XFS_ALLOCTYPE_START_BNO; + else args.type = XFS_ALLOCTYPE_NEAR_BNO; args.mod = args.alignment = args.total = args.isfl = args.userdata = args.minalignslop = 0; @@ -2237,7 +2239,9 @@ xfs_bmbt_newroot( #endif args.fsbno = be64_to_cpu(*pp); args.type = XFS_ALLOCTYPE_START_BNO; - } else + } else if (cur->bc_private.b.flist->xbf_low) + args.type = XFS_ALLOCTYPE_START_BNO; + else args.type = XFS_ALLOCTYPE_NEAR_BNO; if ((error = xfs_alloc_vextent(&args))) { XFS_BMBT_TRACE_CURSOR(cur, ERROR); -- 2.39.5