static int sd_check_left(struct virtual_item *vi, int free,
int start_skip, int end_skip)
{
- if (start_skip || end_skip)
- BUG();
+ BUG_ON(start_skip || end_skip);
return -1;
}
static int sd_part_size(struct virtual_item *vi, int first, int count)
{
- if (count)
- BUG();
+ BUG_ON(count);
return 0;
}
vi->vi_index = TYPE_DIRENTRY;
- if (!(vi->vi_ih) || !vi->vi_item)
- BUG();
+ BUG_ON(!(vi->vi_ih) || !vi->vi_item);
dir_u->flags = 0;
if (le_ih_k_offset(vi->vi_ih) == DOT_OFFSET)
free -= dir_u->entry_sizes[i];
entries++;
}
- if (entries == dir_u->entry_count)
- BUG();
+ BUG_ON(entries == dir_u->entry_count);
/* "." and ".." can not be separated from each other */
if ((dir_u->flags & DIRENTRY_VI_FIRST_DIRENTRY_ITEM)
spinlock_t * lock, void (fn) (struct buffer_chunk *))
{
int ret = 0;
- if (chunk->nr >= CHUNK_SIZE)
- BUG();
+ BUG_ON(chunk->nr >= CHUNK_SIZE);
chunk->bh[chunk->nr++] = bh;
if (chunk->nr >= CHUNK_SIZE) {
ret = 1;
/* buffer must be locked for __add_jh, should be able to have
* two adds at the same time
*/
- if (bh->b_private)
- BUG();
+ BUG_ON(bh->b_private);
jh->bh = bh;
bh->b_private = jh;
}
int retval;
reiserfs_check_lock_depth(p_s_sb, "journal_begin");
- if (nblocks > journal->j_trans_max)
- BUG();
+ BUG_ON(nblocks > journal->j_trans_max);
PROC_INFO_INC(p_s_sb, journal.journal_being);
/* set here for journal_join */
if (reiserfs_transaction_running(s)) {
th = current->journal_info;
th->t_refcount++;
- if (th->t_refcount < 2) {
- BUG();
- }
+ BUG_ON(th->t_refcount < 2);
+
return th;
}
th = kmalloc(sizeof(struct reiserfs_transaction_handle), GFP_NOFS);
** pointer
*/
th->t_handle_save = cur_th;
- if (cur_th && cur_th->t_refcount > 1) {
- BUG();
- }
+ BUG_ON(cur_th && cur_th->t_refcount > 1);
return do_journal_begin_r(th, p_s_sb, nblocks, JBEGIN_JOIN);
}
** pointer
*/
th->t_handle_save = cur_th;
- if (cur_th && cur_th->t_refcount > 1) {
- BUG();
- }
+ BUG_ON(cur_th && cur_th->t_refcount > 1);
return do_journal_begin_r(th, p_s_sb, nblocks, JBEGIN_ABORT);
}
current->journal_info = th;
}
ret = do_journal_begin_r(th, p_s_sb, nblocks, JBEGIN_REG);
- if (current->journal_info != th)
- BUG();
+ BUG_ON(current->journal_info != th);
/* I guess this boils down to being the reciprocal of clm-2100 above.
* If do_journal_begin_r fails, we need to put it back, since journal_end
/* we aren't allowed to close a nested transaction on a different
** filesystem from the one in the task struct
*/
- if (cur_th->t_super != th->t_super)
- BUG();
+ BUG_ON(cur_th->t_super != th->t_super);
if (th != cur_th) {
memcpy(current->journal_info, th, sizeof(*th));
BUG_ON(!th->t_trans_id);
/* you can sync while nested, very, very bad */
- if (th->t_refcount > 1) {
- BUG();
- }
+ BUG_ON(th->t_refcount > 1);
if (journal->j_len == 0) {
reiserfs_prepare_for_journal(p_s_sb, SB_BUFFER_WITH_SB(p_s_sb),
1);
** will be dealt with by next transaction that actually writes something, but should be taken
** care of in this trans
*/
- if (journal->j_len == 0) {
- BUG();
- }
+ BUG_ON(journal->j_len == 0);
+
/* if wcount > 0, and we are called to with flush or commit_now,
** we wait on j_join_wait. We will wake up when the last writer has
** finished the transaction, and started it on its way to the disk.
unlock_journal(p_s_sb);
}
}
- if (journal->j_trans_id == trans_id) {
- BUG();
- }
+ BUG_ON(journal->j_trans_id == trans_id);
+
if (commit_now
&& journal_list_still_alive(p_s_sb, trans_id)
&& wait_on_commit) {
set_commit_trans_len(commit, journal->j_len);
/* special check in case all buffers in the journal were marked for not logging */
- if (journal->j_len == 0) {
- BUG();
- }
+ BUG_ON(journal->j_len == 0);
/* we're about to dirty all the log blocks, mark the description block
* dirty now too. Don't mark the commit block dirty until all the
journal, jl, &jl->j_tail_bh_list);
lock_kernel();
}
- if (!list_empty(&jl->j_tail_bh_list))
- BUG();
+ BUG_ON(!list_empty(&jl->j_tail_bh_list));
up(&jl->j_commit_lock);
/* honor the flush wishes from the caller, simple commits can
{
struct reiserfs_de_head *deh = de->de_deh + de->de_entry_num;
- if (de->de_entry_num >= ih_entry_count(de->de_ih))
- BUG();
+ BUG_ON(de->de_entry_num >= ih_entry_count(de->de_ih));
de->de_entrylen = entry_length(de->de_bh, de->de_ih, de->de_entry_num);
de->de_namelen = de->de_entrylen - (de_with_sd(deh) ? SD_SIZE : 0);
// what entry points to
static inline void set_de_object_key(struct reiserfs_dir_entry *de)
{
- if (de->de_entry_num >= ih_entry_count(de->de_ih))
- BUG();
+ BUG_ON(de->de_entry_num >= ih_entry_count(de->de_ih));
de->de_dir_id = deh_dir_id(&(de->de_deh[de->de_entry_num]));
de->de_objectid = deh_objectid(&(de->de_deh[de->de_entry_num]));
}
{
struct reiserfs_de_head *deh = de->de_deh + de->de_entry_num;
- if (de->de_entry_num >= ih_entry_count(de->de_ih))
- BUG();
+ BUG_ON(de->de_entry_num >= ih_entry_count(de->de_ih));
/* store key of the found entry */
de->de_entry_key.version = KEY_FORMAT_3_5;