X-Git-Url: https://err.no/cgi-bin/gitweb.cgi?a=blobdiff_plain;f=fs%2Fxfs%2Fxfs_log.c;h=31f2b04f2c97b21c0cbe96a4757d5a26e9ddfa32;hb=1018cf9b0f27646c37a52a079989dc0552b2451c;hp=301a6b8c2034589a9c3a67dd81fc14042bbdff97;hpb=a8272ce0c1d49aa3bec57682678f0bdfe28ed4ca;p=linux-2.6 diff --git a/fs/xfs/xfs_log.c b/fs/xfs/xfs_log.c index 301a6b8c20..31f2b04f2c 100644 --- a/fs/xfs/xfs_log.c +++ b/fs/xfs/xfs_log.c @@ -498,11 +498,14 @@ xfs_log_reserve(xfs_mount_t *mp, * Return error or zero. */ int -xfs_log_mount(xfs_mount_t *mp, - xfs_buftarg_t *log_target, - xfs_daddr_t blk_offset, - int num_bblks) +xfs_log_mount( + xfs_mount_t *mp, + xfs_buftarg_t *log_target, + xfs_daddr_t blk_offset, + int num_bblks) { + int error; + if (!(mp->m_flags & XFS_MOUNT_NORECOVERY)) cmn_err(CE_NOTE, "XFS mounting filesystem %s", mp->m_fsname); else { @@ -514,12 +517,22 @@ xfs_log_mount(xfs_mount_t *mp, mp->m_log = xlog_alloc_log(mp, log_target, blk_offset, num_bblks); + /* + * Initialize the AIL now we have a log. + */ + spin_lock_init(&mp->m_ail_lock); + error = xfs_trans_ail_init(mp); + if (error) { + cmn_err(CE_WARN, "XFS: AIL initialisation failed: error %d", error); + goto error; + } + /* * skip log recovery on a norecovery mount. pretend it all * just worked. */ if (!(mp->m_flags & XFS_MOUNT_NORECOVERY)) { - int error, readonly = (mp->m_flags & XFS_MOUNT_RDONLY); + int readonly = (mp->m_flags & XFS_MOUNT_RDONLY); if (readonly) mp->m_flags &= ~XFS_MOUNT_RDONLY; @@ -530,8 +543,7 @@ xfs_log_mount(xfs_mount_t *mp, mp->m_flags |= XFS_MOUNT_RDONLY; if (error) { cmn_err(CE_WARN, "XFS: log mount/recovery failed: error %d", error); - xlog_dealloc_log(mp->m_log); - return error; + goto error; } } @@ -540,6 +552,9 @@ xfs_log_mount(xfs_mount_t *mp, /* End mounting message in xfs_log_mount_finish */ return 0; +error: + xfs_log_unmount_dealloc(mp); + return error; } /* xfs_log_mount */ /* @@ -722,10 +737,14 @@ xfs_log_unmount_write(xfs_mount_t *mp) /* * Deallocate log structures for unmount/relocation. + * + * We need to stop the aild from running before we destroy + * and deallocate the log as the aild references the log. */ void xfs_log_unmount_dealloc(xfs_mount_t *mp) { + xfs_trans_ail_destroy(mp); xlog_dealloc_log(mp->m_log); } @@ -764,7 +783,6 @@ xfs_log_move_tail(xfs_mount_t *mp, if (XLOG_FORCED_SHUTDOWN(log)) return; - ASSERT(!XFS_FORCED_SHUTDOWN(mp)); if (tail_lsn == 0) { /* needed since sync_lsn is 64 bits */ @@ -1072,7 +1090,7 @@ xlog_get_iclog_buffer_size(xfs_mount_t *mp, size >>= 1; } - if (XFS_SB_VERSION_HASLOGV2(&mp->m_sb)) { + if (xfs_sb_version_haslogv2(&mp->m_sb)) { /* # headers = size / 32K * one header holds cycles from 32K of data */ @@ -1168,13 +1186,13 @@ xlog_alloc_log(xfs_mount_t *mp, log->l_grant_reserve_cycle = 1; log->l_grant_write_cycle = 1; - if (XFS_SB_VERSION_HASSECTOR(&mp->m_sb)) { + if (xfs_sb_version_hassector(&mp->m_sb)) { log->l_sectbb_log = mp->m_sb.sb_logsectlog - BBSHIFT; ASSERT(log->l_sectbb_log <= mp->m_sectbb_log); /* for larger sector sizes, must have v2 or external log */ ASSERT(log->l_sectbb_log == 0 || log->l_logBBstart == 0 || - XFS_SB_VERSION_HASLOGV2(&mp->m_sb)); + xfs_sb_version_haslogv2(&mp->m_sb)); ASSERT(mp->m_sb.sb_logsectlog >= BBSHIFT); } log->l_sectbb_mask = (1 << log->l_sectbb_log) - 1; @@ -1229,7 +1247,7 @@ xlog_alloc_log(xfs_mount_t *mp, memset(head, 0, sizeof(xlog_rec_header_t)); head->h_magicno = cpu_to_be32(XLOG_HEADER_MAGIC_NUM); head->h_version = cpu_to_be32( - XFS_SB_VERSION_HASLOGV2(&log->l_mp->m_sb) ? 2 : 1); + xfs_sb_version_haslogv2(&log->l_mp->m_sb) ? 2 : 1); head->h_size = cpu_to_be32(log->l_iclog_size); /* new fields */ head->h_fmt = cpu_to_be32(XLOG_FMT); @@ -1384,7 +1402,7 @@ xlog_sync(xlog_t *log, int roundoff; /* roundoff to BB or stripe */ int split = 0; /* split write into two regions */ int error; - int v2 = XFS_SB_VERSION_HASLOGV2(&log->l_mp->m_sb); + int v2 = xfs_sb_version_haslogv2(&log->l_mp->m_sb); XFS_STATS_INC(xs_log_writes); ASSERT(iclog->ic_refcnt == 0); @@ -1491,9 +1509,9 @@ xlog_sync(xlog_t *log, * case, though. */ for (i = 0; i < split; i += BBSIZE) { - be32_add((__be32 *)dptr, 1); + be32_add_cpu((__be32 *)dptr, 1); if (be32_to_cpu(*(__be32 *)dptr) == XLOG_HEADER_MAGIC_NUM) - be32_add((__be32 *)dptr, 1); + be32_add_cpu((__be32 *)dptr, 1); dptr += BBSIZE; } @@ -1553,7 +1571,7 @@ xlog_dealloc_log(xlog_t *log) tic = log->l_unmount_free; while (tic) { next_tic = tic->t_next; - kmem_free(tic, NBPP); + kmem_free(tic, PAGE_SIZE); tic = next_tic; } } @@ -1582,7 +1600,7 @@ xlog_state_finish_copy(xlog_t *log, { spin_lock(&log->l_icloglock); - be32_add(&iclog->ic_header.h_num_logops, record_cnt); + be32_add_cpu(&iclog->ic_header.h_num_logops, record_cnt); iclog->ic_offset += copy_bytes; spin_unlock(&log->l_icloglock); @@ -2863,7 +2881,7 @@ xlog_state_switch_iclogs(xlog_t *log, log->l_curr_block += BTOBB(eventual_size)+BTOBB(log->l_iclog_hsize); /* Round up to next log-sunit */ - if (XFS_SB_VERSION_HASLOGV2(&log->l_mp->m_sb) && + if (xfs_sb_version_haslogv2(&log->l_mp->m_sb) && log->l_mp->m_sb.sb_logsunit > 1) { __uint32_t sunit_bb = BTOBB(log->l_mp->m_sb.sb_logsunit); log->l_curr_block = roundup(log->l_curr_block, sunit_bb); @@ -3162,13 +3180,13 @@ xlog_state_ticket_alloc(xlog_t *log) xlog_ticket_t *t_list; xlog_ticket_t *next; xfs_caddr_t buf; - uint i = (NBPP / sizeof(xlog_ticket_t)) - 2; + uint i = (PAGE_SIZE / sizeof(xlog_ticket_t)) - 2; /* * The kmem_zalloc may sleep, so we shouldn't be holding the * global lock. XXXmiken: may want to use zone allocator. */ - buf = (xfs_caddr_t) kmem_zalloc(NBPP, KM_SLEEP); + buf = (xfs_caddr_t) kmem_zalloc(PAGE_SIZE, KM_SLEEP); spin_lock(&log->l_icloglock); @@ -3316,7 +3334,7 @@ xlog_ticket_get(xlog_t *log, unit_bytes += sizeof(xlog_op_header_t) * num_headers; /* for roundoff padding for transaction data and one for commit record */ - if (XFS_SB_VERSION_HASLOGV2(&log->l_mp->m_sb) && + if (xfs_sb_version_haslogv2(&log->l_mp->m_sb) && log->l_mp->m_sb.sb_logsunit > 1) { /* log su roundoff */ unit_bytes += 2*log->l_mp->m_sb.sb_logsunit;