sb = get_super(bdev);
if (sb && !(sb->s_flags & MS_RDONLY)) {
sb->s_frozen = SB_FREEZE_WRITE;
- wmb();
+ smp_wmb();
sync_inodes_sb(sb, 0);
DQUOT_SYNC(sb);
sync_inodes_sb(sb, 1);
sb->s_frozen = SB_FREEZE_TRANS;
- wmb();
+ smp_wmb();
sync_blockdev(sb->s_bdev);
if (sb->s_op->unlockfs)
sb->s_op->unlockfs(sb);
sb->s_frozen = SB_UNFROZEN;
- wmb();
+ smp_wmb();
wake_up(&sb->s_wait_unfrozen);
drop_super(sb);
}
list_del(&receiver->list);
receiver->state = STATE_PENDING;
wake_up_process(receiver->task);
- wmb();
+ smp_wmb();
receiver->state = STATE_READY;
}
list_del(&sender->list);
sender->state = STATE_PENDING;
wake_up_process(sender->task);
- wmb();
+ smp_wmb();
sender->state = STATE_READY;
}
/* Must init completion *before* thread sees kthread_stop_info.k */
init_completion(&kthread_stop_info.done);
- wmb();
+ smp_wmb();
/* Now set kthread_should_stop() to true, and wake it up. */
kthread_stop_info.k = k;
return 0;
out_cleanup:
prof_on = 0;
- mb();
+ smp_mb();
on_each_cpu(profile_nop, NULL, 0, 1);
for_each_online_cpu(cpu) {
struct page *page;
(current->gid != task->sgid) ||
(current->gid != task->gid)) && !capable(CAP_SYS_PTRACE))
goto bad;
- rmb();
+ smp_rmb();
if (!task->mm->dumpable && !capable(CAP_SYS_PTRACE))
goto bad;
/* the same process cannot be attached many times */
set_cpus_allowed(current, cpumask_of_cpu((int)(long)cpu));
/* Ack: we are alive */
- mb(); /* Theoretically the ack = 0 might not be on this CPU yet. */
+ smp_mb(); /* Theoretically the ack = 0 might not be on this CPU yet. */
atomic_inc(&stopmachine_thread_ack);
/* Simple state machine */
local_irq_disable();
irqs_disabled = 1;
/* Ack: irqs disabled. */
- mb(); /* Must read state first. */
+ smp_mb(); /* Must read state first. */
atomic_inc(&stopmachine_thread_ack);
} else if (stopmachine_state == STOPMACHINE_PREPARE
&& !prepared) {
/* Everyone is in place, hold CPU. */
preempt_disable();
prepared = 1;
- mb(); /* Must read state first. */
+ smp_mb(); /* Must read state first. */
atomic_inc(&stopmachine_thread_ack);
}
/* Yield in first stage: migration threads need to
}
/* Ack: we are exiting. */
- mb(); /* Must read state first. */
+ smp_mb(); /* Must read state first. */
atomic_inc(&stopmachine_thread_ack);
if (irqs_disabled)
static void stopmachine_set_state(enum stopmachine_state state)
{
atomic_set(&stopmachine_thread_ack, 0);
- wmb();
+ smp_wmb();
stopmachine_state = state;
while (atomic_read(&stopmachine_thread_ack) != stopmachine_num_threads)
cpu_relax();
if (new_egid != old_egid)
{
current->mm->dumpable = 0;
- wmb();
+ smp_wmb();
}
if (rgid != (gid_t) -1 ||
(egid != (gid_t) -1 && egid != old_rgid))
if(old_egid != gid)
{
current->mm->dumpable=0;
- wmb();
+ smp_wmb();
}
current->gid = current->egid = current->sgid = current->fsgid = gid;
}
if(old_egid != gid)
{
current->mm->dumpable=0;
- wmb();
+ smp_wmb();
}
current->egid = current->fsgid = gid;
}
if(dumpclear)
{
current->mm->dumpable = 0;
- wmb();
+ smp_wmb();
}
current->uid = new_ruid;
return 0;
if (new_euid != old_euid)
{
current->mm->dumpable=0;
- wmb();
+ smp_wmb();
}
current->fsuid = current->euid = new_euid;
if (ruid != (uid_t) -1 ||
if (old_euid != uid)
{
current->mm->dumpable = 0;
- wmb();
+ smp_wmb();
}
current->fsuid = current->euid = uid;
current->suid = new_suid;
if (euid != current->euid)
{
current->mm->dumpable = 0;
- wmb();
+ smp_wmb();
}
current->euid = euid;
}
if (egid != current->egid)
{
current->mm->dumpable = 0;
- wmb();
+ smp_wmb();
}
current->egid = egid;
}
if (uid != old_fsuid)
{
current->mm->dumpable = 0;
- wmb();
+ smp_wmb();
}
current->fsuid = uid;
}
if (gid != old_fsgid)
{
current->mm->dumpable = 0;
- wmb();
+ smp_wmb();
}
current->fsgid = gid;
key_fsgid_changed(current);
* Make sure we read the pid before re-reading the
* parent pointer:
*/
- rmb();
+ smp_rmb();
parent = me->group_leader->real_parent;
if (old != parent)
continue;
list_del(&waiter->list);
tsk = waiter->task;
/* Don't touch waiter after ->task has been NULLed */
- mb();
+ smp_mb();
waiter->task = NULL;
wake_up_process(tsk);
put_task_struct(tsk);
list_del(&waiter->list);
tsk = waiter->task;
- mb();
+ smp_mb();
waiter->task = NULL;
wake_up_process(tsk);
put_task_struct(tsk);
list_del(&waiter->list);
tsk = waiter->task;
- mb();
+ smp_mb();
waiter->task = NULL;
wake_up_process(tsk);
put_task_struct(tsk);
*/
list_del(&waiter->list);
tsk = waiter->task;
- mb();
+ smp_mb();
waiter->task = NULL;
wake_up_process(tsk);
put_task_struct(tsk);
waiter = list_entry(next, struct rwsem_waiter, list);
next = waiter->list.next;
tsk = waiter->task;
- mb();
+ smp_mb();
waiter->task = NULL;
wake_up_process(tsk);
put_task_struct(tsk);
/* Now start performing page reclaim */
gfp_temp = gfp_mask;
prepare_to_wait(&pool->wait, &wait, TASK_UNINTERRUPTIBLE);
- mb();
+ smp_mb();
if (!pool->curr_nr)
io_schedule();
finish_wait(&pool->wait, &wait);
{
unsigned long flags;
- mb();
+ smp_mb();
if (pool->curr_nr < pool->min_nr) {
spin_lock_irqsave(&pool->lock, flags);
if (pool->curr_nr < pool->min_nr) {