* filled.
*/
unsigned c_new;
+ unsigned c_unwritten;
};
+static inline int ocfs2_should_zero_cluster(struct ocfs2_write_cluster_desc *d)
+{
+ return d->c_new || d->c_unwritten;
+}
+
struct ocfs2_write_ctxt {
/* Logical cluster position / len of write */
u32 w_cpos;
handle_t *w_handle;
struct buffer_head *w_di_bh;
+
+ struct ocfs2_cached_dealloc_ctxt w_dealloc;
};
static void ocfs2_free_write_ctxt(struct ocfs2_write_ctxt *wc)
else
wc->w_large_pages = 0;
+ ocfs2_init_dealloc_ctxt(&wc->w_dealloc);
+
*wcp = wc;
return 0;
* Prepare a single cluster for write one cluster into the file.
*/
static int ocfs2_write_cluster(struct address_space *mapping,
- u32 phys, struct ocfs2_alloc_context *data_ac,
+ u32 phys, unsigned int unwritten,
+ struct ocfs2_alloc_context *data_ac,
struct ocfs2_alloc_context *meta_ac,
struct ocfs2_write_ctxt *wc, u32 cpos,
loff_t user_pos, unsigned user_len)
{
- int ret, i, new;
+ int ret, i, new, should_zero = 0;
u64 v_blkno, p_blkno;
struct inode *inode = mapping->host;
new = phys == 0 ? 1 : 0;
+ if (new || unwritten)
+ should_zero = 1;
if (new) {
u32 tmp_pos;
mlog_errno(ret);
goto out;
}
+ } else if (unwritten) {
+ ret = ocfs2_mark_extent_written(inode, wc->w_di_bh,
+ wc->w_handle, cpos, 1, phys,
+ meta_ac, &wc->w_dealloc);
+ if (ret < 0) {
+ mlog_errno(ret);
+ goto out;
+ }
+ }
+ if (should_zero)
v_blkno = ocfs2_clusters_to_blocks(inode->i_sb, cpos);
- } else {
+ else
v_blkno = user_pos >> inode->i_sb->s_blocksize_bits;
- }
/*
* The only reason this should fail is due to an inability to
tmpret = ocfs2_prepare_page_for_write(inode, &p_blkno, wc,
wc->w_pages[i], cpos,
- user_pos, user_len, new);
+ user_pos, user_len,
+ should_zero);
if (tmpret) {
mlog_errno(tmpret);
if (ret == 0)
for (i = 0; i < wc->w_clen; i++) {
desc = &wc->w_desc[i];
- ret = ocfs2_write_cluster(mapping, desc->c_phys, data_ac,
- meta_ac, wc, desc->c_cpos, pos, len);
+ ret = ocfs2_write_cluster(mapping, desc->c_phys,
+ desc->c_unwritten, data_ac, meta_ac,
+ wc, desc->c_cpos, pos, len);
if (ret) {
mlog_errno(ret);
goto out;
if (wc->w_large_pages) {
/*
* We only care about the 1st and last cluster within
- * our range and whether they are holes or not. Either
+ * our range and whether they should be zero'd or not. Either
* value may be extended out to the start/end of a
* newly allocated cluster.
*/
desc = &wc->w_desc[0];
- if (desc->c_new)
+ if (ocfs2_should_zero_cluster(desc))
ocfs2_figure_cluster_boundaries(osb,
desc->c_cpos,
&wc->w_target_from,
NULL);
desc = &wc->w_desc[wc->w_clen - 1];
- if (desc->c_new)
+ if (ocfs2_should_zero_cluster(desc))
ocfs2_figure_cluster_boundaries(osb,
desc->c_cpos,
NULL,
/*
* Populate each single-cluster write descriptor in the write context
* with information about the i/o to be done.
+ *
+ * Returns the number of clusters that will have to be allocated, as
+ * well as a worst case estimate of the number of extent records that
+ * would have to be created during a write to an unwritten region.
*/
static int ocfs2_populate_write_desc(struct inode *inode,
struct ocfs2_write_ctxt *wc,
- unsigned int *clusters_to_alloc)
+ unsigned int *clusters_to_alloc,
+ unsigned int *extents_to_split)
{
int ret;
struct ocfs2_write_cluster_desc *desc;
unsigned int num_clusters = 0;
+ unsigned int ext_flags = 0;
u32 phys = 0;
int i;
+ *clusters_to_alloc = 0;
+ *extents_to_split = 0;
+
for (i = 0; i < wc->w_clen; i++) {
desc = &wc->w_desc[i];
desc->c_cpos = wc->w_cpos + i;
if (num_clusters == 0) {
+ /*
+ * Need to look up the next extent record.
+ */
ret = ocfs2_get_clusters(inode, desc->c_cpos, &phys,
- &num_clusters, NULL);
+ &num_clusters, &ext_flags);
if (ret) {
mlog_errno(ret);
goto out;
}
+
+ /*
+ * Assume worst case - that we're writing in
+ * the middle of the extent.
+ *
+ * We can assume that the write proceeds from
+ * left to right, in which case the extent
+ * insert code is smart enough to coalesce the
+ * next splits into the previous records created.
+ */
+ if (ext_flags & OCFS2_EXT_UNWRITTEN)
+ *extents_to_split = *extents_to_split + 2;
} else if (phys) {
/*
* Only increment phys if it doesn't describe
desc->c_new = 1;
*clusters_to_alloc = *clusters_to_alloc + 1;
}
+ if (ext_flags & OCFS2_EXT_UNWRITTEN)
+ desc->c_unwritten = 1;
num_clusters--;
}
struct buffer_head *di_bh, struct page *mmap_page)
{
int ret, credits = OCFS2_INODE_UPDATE_CREDITS;
- unsigned int clusters_to_alloc = 0;
+ unsigned int clusters_to_alloc, extents_to_split;
struct ocfs2_write_ctxt *wc;
struct inode *inode = mapping->host;
struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
return ret;
}
- ret = ocfs2_populate_write_desc(inode, wc, &clusters_to_alloc);
+ ret = ocfs2_populate_write_desc(inode, wc, &clusters_to_alloc,
+ &extents_to_split);
if (ret) {
mlog_errno(ret);
goto out;
* write out. An allocation requires that we write the entire
* cluster range.
*/
- if (clusters_to_alloc > 0) {
+ if (clusters_to_alloc || extents_to_split) {
/*
* XXX: We are stretching the limits of
- * ocfs2_lock_allocators(). It greately over-estimates
+ * ocfs2_lock_allocators(). It greatly over-estimates
* the work to be done.
*/
ret = ocfs2_lock_allocators(inode, di, clusters_to_alloc,
- &data_ac, &meta_ac);
+ extents_to_split, &data_ac, &meta_ac);
if (ret) {
mlog_errno(ret);
goto out;
}
- ocfs2_set_target_boundaries(osb, wc, pos, len, clusters_to_alloc);
+ ocfs2_set_target_boundaries(osb, wc, pos, len,
+ clusters_to_alloc + extents_to_split);
handle = ocfs2_start_trans(osb, credits);
if (IS_ERR(handle)) {
* extent.
*/
ret = ocfs2_grab_pages_for_write(mapping, wc, wc->w_cpos, pos,
- clusters_to_alloc, mmap_page);
+ clusters_to_alloc + extents_to_split,
+ mmap_page);
if (ret) {
mlog_errno(ret);
goto out_commit;
inode->i_mtime = inode->i_ctime = CURRENT_TIME;
di->i_mtime = di->i_ctime = cpu_to_le64(inode->i_mtime.tv_sec);
di->i_mtime_nsec = di->i_ctime_nsec = cpu_to_le32(inode->i_mtime.tv_nsec);
-
ocfs2_journal_dirty(handle, wc->w_di_bh);
ocfs2_commit_trans(osb, handle);
+ ocfs2_run_deallocs(osb, &wc->w_dealloc);
+
ocfs2_free_write_ctxt(wc);
return copied;
* understand sparse inodes.
*/
int ocfs2_lock_allocators(struct inode *inode, struct ocfs2_dinode *di,
- u32 clusters_to_add,
+ u32 clusters_to_add, u32 extents_to_split,
struct ocfs2_alloc_context **data_ac,
struct ocfs2_alloc_context **meta_ac)
{
int ret, num_free_extents;
+ unsigned int max_recs_needed = clusters_to_add + 2 * extents_to_split;
struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
*meta_ac = NULL;
*data_ac = NULL;
mlog(0, "extend inode %llu, i_size = %lld, di->i_clusters = %u, "
- "clusters_to_add = %u\n",
+ "clusters_to_add = %u, extents_to_split = %u\n",
(unsigned long long)OCFS2_I(inode)->ip_blkno, i_size_read(inode),
- le32_to_cpu(di->i_clusters), clusters_to_add);
+ le32_to_cpu(di->i_clusters), clusters_to_add, extents_to_split);
num_free_extents = ocfs2_num_free_extents(osb, inode, di);
if (num_free_extents < 0) {
*
* Most of the time we'll only be seeing this 1 cluster at a time
* anyway.
+ *
+ * Always lock for any unwritten extents - we might want to
+ * add blocks during a split.
*/
if (!num_free_extents ||
- (ocfs2_sparse_alloc(osb) && num_free_extents < clusters_to_add)) {
+ (ocfs2_sparse_alloc(osb) && num_free_extents < max_recs_needed)) {
ret = ocfs2_reserve_new_metadata(osb, di, meta_ac);
if (ret < 0) {
if (ret != -ENOSPC)
down_write(&OCFS2_I(inode)->ip_alloc_sem);
drop_alloc_sem = 1;
- status = ocfs2_lock_allocators(inode, fe, clusters_to_add, &data_ac,
+ status = ocfs2_lock_allocators(inode, fe, clusters_to_add, 0, &data_ac,
&meta_ac);
if (status) {
mlog_errno(status);