2 * Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved.
3 * Copyright (C) 2004-2005 Red Hat, Inc. All rights reserved.
5 * This copyrighted material is made available to anyone wishing to use,
6 * modify, copy, or redistribute it subject to the terms and conditions
7 * of the GNU General Public License v.2.
10 #include <linux/sched.h>
11 #include <linux/slab.h>
12 #include <linux/spinlock.h>
13 #include <linux/completion.h>
14 #include <linux/buffer_head.h>
15 #include <asm/semaphore.h>
26 static void glock_lo_add(struct gfs2_sbd *sdp, struct gfs2_log_element *le)
28 struct gfs2_glock *gl;
30 get_transaction->tr_touched = 1;
32 if (!list_empty(&le->le_list))
35 gl = container_of(le, struct gfs2_glock, gl_le);
36 if (gfs2_assert_withdraw(sdp, gfs2_glock_is_held_excl(gl)))
39 set_bit(GLF_DIRTY, &gl->gl_flags);
43 list_add(&le->le_list, &sdp->sd_log_le_gl);
47 static void glock_lo_after_commit(struct gfs2_sbd *sdp, struct gfs2_ail *ai)
49 struct list_head *head = &sdp->sd_log_le_gl;
50 struct gfs2_glock *gl;
52 while (!list_empty(head)) {
53 gl = list_entry(head->next, struct gfs2_glock, gl_le.le_list);
54 list_del_init(&gl->gl_le.le_list);
57 gfs2_assert_withdraw(sdp, gfs2_glock_is_held_excl(gl));
60 gfs2_assert_warn(sdp, !sdp->sd_log_num_gl);
63 static void buf_lo_add(struct gfs2_sbd *sdp, struct gfs2_log_element *le)
65 struct gfs2_bufdata *bd = container_of(le, struct gfs2_bufdata, bd_le);
66 struct gfs2_trans *tr;
68 if (!list_empty(&bd->bd_list_tr))
74 list_add(&bd->bd_list_tr, &tr->tr_list_buf);
76 if (!list_empty(&le->le_list))
79 gfs2_trans_add_gl(bd->bd_gl);
81 gfs2_meta_check(sdp, bd->bd_bh);
82 gfs2_pin(sdp, bd->bd_bh);
85 sdp->sd_log_num_buf++;
86 list_add(&le->le_list, &sdp->sd_log_le_buf);
92 static void buf_lo_incore_commit(struct gfs2_sbd *sdp, struct gfs2_trans *tr)
94 struct list_head *head = &tr->tr_list_buf;
95 struct gfs2_bufdata *bd;
97 while (!list_empty(head)) {
98 bd = list_entry(head->next, struct gfs2_bufdata, bd_list_tr);
99 list_del_init(&bd->bd_list_tr);
102 gfs2_assert_warn(sdp, !tr->tr_num_buf);
105 static void buf_lo_before_commit(struct gfs2_sbd *sdp)
107 struct buffer_head *bh;
108 struct gfs2_log_descriptor *ld;
109 struct gfs2_bufdata *bd1 = NULL, *bd2;
110 unsigned int total = sdp->sd_log_num_buf;
111 unsigned int offset = sizeof(struct gfs2_log_descriptor);
117 offset += (sizeof(__be64) - 1);
118 offset &= ~(sizeof(__be64) - 1);
119 limit = (sdp->sd_sb.sb_bsize - offset)/sizeof(__be64);
120 /* for 4k blocks, limit = 503 */
122 bd1 = bd2 = list_prepare_entry(bd1, &sdp->sd_log_le_buf, bd_le.le_list);
127 bh = gfs2_log_get_buf(sdp);
128 ld = (struct gfs2_log_descriptor *)bh->b_data;
129 ptr = (__be64 *)(bh->b_data + offset);
130 ld->ld_header.mh_magic = cpu_to_be32(GFS2_MAGIC);
131 ld->ld_header.mh_type = cpu_to_be16(GFS2_METATYPE_LD);
132 ld->ld_header.mh_format = cpu_to_be16(GFS2_FORMAT_LD);
133 ld->ld_type = cpu_to_be32(GFS2_LOG_DESC_METADATA);
134 ld->ld_length = cpu_to_be32(num + 1);
135 ld->ld_data1 = cpu_to_be32(num);
136 ld->ld_data2 = cpu_to_be32(0);
137 memset(ld->ld_reserved, 0, sizeof(ld->ld_reserved));
140 list_for_each_entry_continue(bd1, &sdp->sd_log_le_buf,
142 *ptr++ = cpu_to_be64(bd1->bd_bh->b_blocknr);
147 set_buffer_dirty(bh);
148 ll_rw_block(WRITE, 1, &bh);
151 list_for_each_entry_continue(bd2, &sdp->sd_log_le_buf,
153 bh = gfs2_log_fake_buf(sdp, bd2->bd_bh);
154 set_buffer_dirty(bh);
155 ll_rw_block(WRITE, 1, &bh);
164 static void buf_lo_after_commit(struct gfs2_sbd *sdp, struct gfs2_ail *ai)
166 struct list_head *head = &sdp->sd_log_le_buf;
167 struct gfs2_bufdata *bd;
169 while (!list_empty(head)) {
170 bd = list_entry(head->next, struct gfs2_bufdata, bd_le.le_list);
171 list_del_init(&bd->bd_le.le_list);
172 sdp->sd_log_num_buf--;
174 gfs2_unpin(sdp, bd->bd_bh, ai);
176 gfs2_assert_warn(sdp, !sdp->sd_log_num_buf);
179 static void buf_lo_before_scan(struct gfs2_jdesc *jd,
180 struct gfs2_log_header *head, int pass)
182 struct gfs2_sbd *sdp = get_v2ip(jd->jd_inode)->i_sbd;
187 sdp->sd_found_blocks = 0;
188 sdp->sd_replayed_blocks = 0;
191 static int buf_lo_scan_elements(struct gfs2_jdesc *jd, unsigned int start,
192 struct gfs2_log_descriptor *ld, __be64 *ptr,
195 struct gfs2_sbd *sdp = get_v2ip(jd->jd_inode)->i_sbd;
196 struct gfs2_glock *gl = get_v2ip(jd->jd_inode)->i_gl;
197 unsigned int blks = be32_to_cpu(ld->ld_data1);
198 struct buffer_head *bh_log, *bh_ip;
202 if (pass != 1 || be32_to_cpu(ld->ld_type) != GFS2_LOG_DESC_METADATA)
205 gfs2_replay_incr_blk(sdp, &start);
207 for (; blks; gfs2_replay_incr_blk(sdp, &start), blks--) {
208 blkno = be64_to_cpu(*ptr++);
210 sdp->sd_found_blocks++;
212 if (gfs2_revoke_check(sdp, blkno, start))
215 error = gfs2_replay_read_block(jd, start, &bh_log);
219 bh_ip = gfs2_meta_new(gl, blkno);
220 memcpy(bh_ip->b_data, bh_log->b_data, bh_log->b_size);
222 if (gfs2_meta_check(sdp, bh_ip))
225 mark_buffer_dirty(bh_ip);
233 sdp->sd_replayed_blocks++;
239 static void buf_lo_after_scan(struct gfs2_jdesc *jd, int error, int pass)
241 struct gfs2_sbd *sdp = get_v2ip(jd->jd_inode)->i_sbd;
244 gfs2_meta_sync(get_v2ip(jd->jd_inode)->i_gl,
245 DIO_START | DIO_WAIT);
251 gfs2_meta_sync(get_v2ip(jd->jd_inode)->i_gl, DIO_START | DIO_WAIT);
253 fs_info(sdp, "jid=%u: Replayed %u of %u blocks\n",
254 jd->jd_jid, sdp->sd_replayed_blocks, sdp->sd_found_blocks);
257 static void revoke_lo_add(struct gfs2_sbd *sdp, struct gfs2_log_element *le)
259 struct gfs2_trans *tr;
261 tr = get_transaction;
266 sdp->sd_log_num_revoke++;
267 list_add(&le->le_list, &sdp->sd_log_le_revoke);
268 gfs2_log_unlock(sdp);
271 static void revoke_lo_before_commit(struct gfs2_sbd *sdp)
273 struct gfs2_log_descriptor *ld;
274 struct gfs2_meta_header *mh;
275 struct buffer_head *bh;
277 struct list_head *head = &sdp->sd_log_le_revoke;
278 struct gfs2_revoke *rv;
280 if (!sdp->sd_log_num_revoke)
283 bh = gfs2_log_get_buf(sdp);
284 ld = (struct gfs2_log_descriptor *)bh->b_data;
285 ld->ld_header.mh_magic = cpu_to_be32(GFS2_MAGIC);
286 ld->ld_header.mh_type = cpu_to_be16(GFS2_METATYPE_LD);
287 ld->ld_header.mh_format = cpu_to_be16(GFS2_FORMAT_LD);
288 ld->ld_type = cpu_to_be32(GFS2_LOG_DESC_REVOKE);
289 ld->ld_length = cpu_to_be32(gfs2_struct2blk(sdp, sdp->sd_log_num_revoke,
291 ld->ld_data1 = cpu_to_be32(sdp->sd_log_num_revoke);
292 ld->ld_data2 = cpu_to_be32(0);
293 memset(ld->ld_reserved, 0, sizeof(ld->ld_reserved));
294 offset = sizeof(struct gfs2_log_descriptor);
296 while (!list_empty(head)) {
297 rv = list_entry(head->next, struct gfs2_revoke, rv_le.le_list);
298 list_del_init(&rv->rv_le.le_list);
299 sdp->sd_log_num_revoke--;
301 if (offset + sizeof(uint64_t) > sdp->sd_sb.sb_bsize) {
302 set_buffer_dirty(bh);
303 ll_rw_block(WRITE, 1, &bh);
305 bh = gfs2_log_get_buf(sdp);
306 mh = (struct gfs2_meta_header *)bh->b_data;
307 mh->mh_magic = cpu_to_be32(GFS2_MAGIC);
308 mh->mh_type = cpu_to_be16(GFS2_METATYPE_LB);
309 mh->mh_format = cpu_to_be16(GFS2_FORMAT_LB);
310 offset = sizeof(struct gfs2_meta_header);
313 *(__be64 *)(bh->b_data + offset) = cpu_to_be64(rv->rv_blkno);
316 offset += sizeof(uint64_t);
318 gfs2_assert_withdraw(sdp, !sdp->sd_log_num_revoke);
320 set_buffer_dirty(bh);
321 ll_rw_block(WRITE, 1, &bh);
324 static void revoke_lo_before_scan(struct gfs2_jdesc *jd,
325 struct gfs2_log_header *head, int pass)
327 struct gfs2_sbd *sdp = get_v2ip(jd->jd_inode)->i_sbd;
332 sdp->sd_found_revokes = 0;
333 sdp->sd_replay_tail = head->lh_tail;
336 static int revoke_lo_scan_elements(struct gfs2_jdesc *jd, unsigned int start,
337 struct gfs2_log_descriptor *ld, __be64 *ptr,
340 struct gfs2_sbd *sdp = get_v2ip(jd->jd_inode)->i_sbd;
341 unsigned int blks = be32_to_cpu(ld->ld_length);
342 unsigned int revokes = be32_to_cpu(ld->ld_data1);
343 struct buffer_head *bh;
349 if (pass != 0 || be32_to_cpu(ld->ld_type) != GFS2_LOG_DESC_REVOKE)
352 offset = sizeof(struct gfs2_log_descriptor);
354 for (; blks; gfs2_replay_incr_blk(sdp, &start), blks--) {
355 error = gfs2_replay_read_block(jd, start, &bh);
360 gfs2_metatype_check(sdp, bh, GFS2_METATYPE_LB);
362 while (offset + sizeof(uint64_t) <= sdp->sd_sb.sb_bsize) {
363 blkno = be64_to_cpu(*(__be64 *)(bh->b_data + offset));
365 error = gfs2_revoke_add(sdp, blkno, start);
369 sdp->sd_found_revokes++;
373 offset += sizeof(uint64_t);
377 offset = sizeof(struct gfs2_meta_header);
384 static void revoke_lo_after_scan(struct gfs2_jdesc *jd, int error, int pass)
386 struct gfs2_sbd *sdp = get_v2ip(jd->jd_inode)->i_sbd;
389 gfs2_revoke_clean(sdp);
395 fs_info(sdp, "jid=%u: Found %u revoke tags\n",
396 jd->jd_jid, sdp->sd_found_revokes);
398 gfs2_revoke_clean(sdp);
401 static void rg_lo_add(struct gfs2_sbd *sdp, struct gfs2_log_element *le)
403 struct gfs2_rgrpd *rgd;
405 get_transaction->tr_touched = 1;
407 if (!list_empty(&le->le_list))
410 rgd = container_of(le, struct gfs2_rgrpd, rd_le);
411 gfs2_rgrp_bh_hold(rgd);
414 sdp->sd_log_num_rg++;
415 list_add(&le->le_list, &sdp->sd_log_le_rg);
416 gfs2_log_unlock(sdp);
419 static void rg_lo_after_commit(struct gfs2_sbd *sdp, struct gfs2_ail *ai)
421 struct list_head *head = &sdp->sd_log_le_rg;
422 struct gfs2_rgrpd *rgd;
424 while (!list_empty(head)) {
425 rgd = list_entry(head->next, struct gfs2_rgrpd, rd_le.le_list);
426 list_del_init(&rgd->rd_le.le_list);
427 sdp->sd_log_num_rg--;
429 gfs2_rgrp_repolish_clones(rgd);
430 gfs2_rgrp_bh_put(rgd);
432 gfs2_assert_warn(sdp, !sdp->sd_log_num_rg);
436 * databuf_lo_add - Add a databuf to the transaction.
438 * This is used in two distinct cases:
439 * i) In ordered write mode
440 * We put the data buffer on a list so that we can ensure that its
441 * synced to disk at the right time
442 * ii) In journaled data mode
443 * We need to journal the data block in the same way as metadata in
444 * the functions above. The difference is that here we have a tag
445 * which is two __be64's being the block number (as per meta data)
446 * and a flag which says whether the data block needs escaping or
447 * not. This means we need a new log entry for each 251 or so data
448 * blocks, which isn't an enormous overhead but twice as much as
449 * for normal metadata blocks.
451 static void databuf_lo_add(struct gfs2_sbd *sdp, struct gfs2_log_element *le)
453 struct gfs2_bufdata *bd = container_of(le, struct gfs2_bufdata, bd_le);
454 struct gfs2_trans *tr = get_transaction;
455 struct address_space *mapping = bd->bd_bh->b_page->mapping;
456 struct gfs2_inode *ip = get_v2ip(mapping->host);
459 if (!list_empty(&bd->bd_list_tr) &&
460 (ip->i_di.di_flags & GFS2_DIF_JDATA)) {
462 gfs2_trans_add_gl(bd->bd_gl);
463 list_add(&bd->bd_list_tr, &tr->tr_list_buf);
464 gfs2_pin(sdp, bd->bd_bh);
467 if (!list_empty(&le->le_list)) {
468 if (ip->i_di.di_flags & GFS2_DIF_JDATA)
469 sdp->sd_log_num_jdata++;
470 sdp->sd_log_num_databuf++;
471 list_add(&le->le_list, &sdp->sd_log_le_databuf);
473 gfs2_log_unlock(sdp);
476 static int gfs2_check_magic(struct buffer_head *bh)
478 struct page *page = bh->b_page;
483 kaddr = kmap_atomic(page, KM_USER0);
484 ptr = kaddr + bh_offset(bh);
485 if (*ptr == cpu_to_be32(GFS2_MAGIC))
487 kunmap_atomic(page, KM_USER0);
493 * databuf_lo_before_commit - Scan the data buffers, writing as we go
495 * Here we scan through the lists of buffers and make the assumption
496 * that any buffer thats been pinned is being journaled, and that
497 * any unpinned buffer is an ordered write data buffer and therefore
498 * will be written back rather than journaled.
500 static void databuf_lo_before_commit(struct gfs2_sbd *sdp)
503 struct gfs2_bufdata *bd1 = NULL, *bd2, *bdt;
504 struct buffer_head *bh = NULL;
505 unsigned int offset = sizeof(struct gfs2_log_descriptor);
506 struct gfs2_log_descriptor *ld;
508 unsigned int total_dbuf = sdp->sd_log_num_databuf;
509 unsigned int total_jdata = sdp->sd_log_num_jdata;
513 offset += (2*sizeof(__be64) - 1);
514 offset &= ~(2*sizeof(__be64) - 1);
515 limit = (sdp->sd_sb.sb_bsize - offset)/sizeof(__be64);
518 * Start writing ordered buffers, write journaled buffers
519 * into the log along with a header
522 bd2 = bd1 = list_prepare_entry(bd1, &sdp->sd_log_le_databuf,
529 list_for_each_entry_safe_continue(bd1, bdt,
530 &sdp->sd_log_le_databuf,
532 /* An ordered write buffer */
533 if (bd1->bd_bh && !buffer_pinned(bd1->bd_bh)) {
534 list_move(&bd1->bd_le.le_list, &started);
537 bd2 = list_prepare_entry(bd2,
538 &sdp->sd_log_le_databuf,
544 if (buffer_dirty(bd1->bd_bh)) {
545 gfs2_log_unlock(sdp);
546 wait_on_buffer(bd1->bd_bh);
547 ll_rw_block(WRITE, 1,
555 } else if (bd1->bd_bh) { /* A journaled buffer */
557 gfs2_log_unlock(sdp);
559 bh = gfs2_log_get_buf(sdp);
560 ld = (struct gfs2_log_descriptor *)
562 ptr = (__be64 *)(bh->b_data + offset);
563 ld->ld_header.mh_magic =
564 cpu_to_be32(GFS2_MAGIC);
565 ld->ld_header.mh_type =
566 cpu_to_be16(GFS2_METATYPE_LD);
567 ld->ld_header.mh_format =
568 cpu_to_be16(GFS2_FORMAT_LD);
570 cpu_to_be32(GFS2_LOG_DESC_JDATA);
571 ld->ld_length = cpu_to_be32(num + 1);
572 ld->ld_data1 = cpu_to_be32(num);
573 ld->ld_data2 = cpu_to_be32(0);
574 memset(ld->ld_reserved, 0, sizeof(ld->ld_reserved));
576 magic = gfs2_check_magic(bd1->bd_bh);
577 *ptr++ = cpu_to_be64(bd1->bd_bh->b_blocknr);
578 *ptr++ = cpu_to_be64((__u64)magic);
579 clear_buffer_escaped(bd1->bd_bh);
580 if (unlikely(magic != 0))
581 set_buffer_escaped(bd1->bd_bh);
587 gfs2_log_unlock(sdp);
589 set_buffer_dirty(bh);
590 ll_rw_block(WRITE, 1, &bh);
595 list_for_each_entry_continue(bd2, &sdp->sd_log_le_databuf,
599 /* copy buffer if it needs escaping */
600 gfs2_log_unlock(sdp);
601 if (unlikely(buffer_escaped(bd2->bd_bh))) {
603 struct page *page = bd2->bd_bh->b_page;
604 bh = gfs2_log_get_buf(sdp);
605 kaddr = kmap_atomic(page, KM_USER0);
607 kaddr + bh_offset(bd2->bd_bh),
608 sdp->sd_sb.sb_bsize);
609 kunmap_atomic(page, KM_USER0);
610 *(__be32 *)bh->b_data = 0;
612 bh = gfs2_log_fake_buf(sdp, bd2->bd_bh);
614 set_buffer_dirty(bh);
615 ll_rw_block(WRITE, 1, &bh);
624 gfs2_log_unlock(sdp);
626 /* Wait on all ordered buffers */
627 while (!list_empty(&started)) {
629 bd1 = list_entry(started.next, struct gfs2_bufdata,
631 list_del(&bd1->bd_le.le_list);
632 sdp->sd_log_num_databuf--;
637 gfs2_log_unlock(sdp);
641 gfs2_log_unlock(sdp);
646 /* We've removed all the ordered write bufs here, so only jdata left */
647 gfs2_assert_warn(sdp, sdp->sd_log_num_databuf == sdp->sd_log_num_jdata);
650 static int databuf_lo_scan_elements(struct gfs2_jdesc *jd, unsigned int start,
651 struct gfs2_log_descriptor *ld,
652 __be64 *ptr, int pass)
654 struct gfs2_sbd *sdp = get_v2ip(jd->jd_inode)->i_sbd;
655 struct gfs2_glock *gl = get_v2ip(jd->jd_inode)->i_gl;
656 unsigned int blks = be32_to_cpu(ld->ld_data1);
657 struct buffer_head *bh_log, *bh_ip;
662 if (pass != 1 || be32_to_cpu(ld->ld_type) != GFS2_LOG_DESC_JDATA)
665 gfs2_replay_incr_blk(sdp, &start);
666 for (; blks; gfs2_replay_incr_blk(sdp, &start), blks--) {
667 blkno = be64_to_cpu(*ptr++);
668 esc = be64_to_cpu(*ptr++);
670 sdp->sd_found_blocks++;
672 if (gfs2_revoke_check(sdp, blkno, start))
675 error = gfs2_replay_read_block(jd, start, &bh_log);
679 bh_ip = gfs2_meta_new(gl, blkno);
680 memcpy(bh_ip->b_data, bh_log->b_data, bh_log->b_size);
684 __be32 *eptr = (__be32 *)bh_ip->b_data;
685 *eptr = cpu_to_be32(GFS2_MAGIC);
687 mark_buffer_dirty(bh_ip);
694 sdp->sd_replayed_blocks++;
700 /* FIXME: sort out accounting for log blocks etc. */
702 static void databuf_lo_after_scan(struct gfs2_jdesc *jd, int error, int pass)
704 struct gfs2_sbd *sdp = get_v2ip(jd->jd_inode)->i_sbd;
707 gfs2_meta_sync(get_v2ip(jd->jd_inode)->i_gl,
708 DIO_START | DIO_WAIT);
715 gfs2_meta_sync(get_v2ip(jd->jd_inode)->i_gl, DIO_START | DIO_WAIT);
717 fs_info(sdp, "jid=%u: Replayed %u of %u data blocks\n",
718 jd->jd_jid, sdp->sd_replayed_blocks, sdp->sd_found_blocks);
721 static void databuf_lo_after_commit(struct gfs2_sbd *sdp, struct gfs2_ail *ai)
723 struct list_head *head = &sdp->sd_log_le_databuf;
724 struct gfs2_bufdata *bd;
726 while (!list_empty(head)) {
727 bd = list_entry(head->next, struct gfs2_bufdata, bd_le.le_list);
728 list_del(&bd->bd_le.le_list);
729 sdp->sd_log_num_databuf--;
730 sdp->sd_log_num_jdata--;
731 gfs2_unpin(sdp, bd->bd_bh, ai);
733 gfs2_assert_warn(sdp, !sdp->sd_log_num_databuf);
734 gfs2_assert_warn(sdp, !sdp->sd_log_num_jdata);
738 struct gfs2_log_operations gfs2_glock_lops = {
739 .lo_add = glock_lo_add,
740 .lo_after_commit = glock_lo_after_commit,
744 struct gfs2_log_operations gfs2_buf_lops = {
745 .lo_add = buf_lo_add,
746 .lo_incore_commit = buf_lo_incore_commit,
747 .lo_before_commit = buf_lo_before_commit,
748 .lo_after_commit = buf_lo_after_commit,
749 .lo_before_scan = buf_lo_before_scan,
750 .lo_scan_elements = buf_lo_scan_elements,
751 .lo_after_scan = buf_lo_after_scan,
755 struct gfs2_log_operations gfs2_revoke_lops = {
756 .lo_add = revoke_lo_add,
757 .lo_before_commit = revoke_lo_before_commit,
758 .lo_before_scan = revoke_lo_before_scan,
759 .lo_scan_elements = revoke_lo_scan_elements,
760 .lo_after_scan = revoke_lo_after_scan,
764 struct gfs2_log_operations gfs2_rg_lops = {
766 .lo_after_commit = rg_lo_after_commit,
770 struct gfs2_log_operations gfs2_databuf_lops = {
771 .lo_add = databuf_lo_add,
772 .lo_incore_commit = buf_lo_incore_commit,
773 .lo_before_commit = databuf_lo_before_commit,
774 .lo_after_commit = databuf_lo_after_commit,
775 .lo_scan_elements = databuf_lo_scan_elements,
776 .lo_after_scan = databuf_lo_after_scan,
780 struct gfs2_log_operations *gfs2_log_ops[] = {