]> err.no Git - linux-2.6/commitdiff
[POWERPC] spufs: add memory barriers after set_bit
authorArnd Bergmann <arnd.bergmann@de.ibm.com>
Mon, 23 Apr 2007 19:08:10 +0000 (21:08 +0200)
committerArnd Bergmann <arnd@klappe.arndb.de>
Mon, 23 Apr 2007 19:18:54 +0000 (21:18 +0200)
set_bit does not guarantee ordering on powerpc, so using it
for communication between threads requires explicit
mb() calls.

Signed-off-by: Arnd Bergmann <arnd.bergmann@de.ibm.com>
arch/powerpc/platforms/cell/spufs/sched.c

index 405a0555d75cbaf4fd7551107a48d7d0d481b61d..1582d7645237de6f8a130ee6b7081333dbf5ed40 100644 (file)
@@ -76,6 +76,7 @@ void spu_start_tick(struct spu_context *ctx)
                 * Make sure the exiting bit is cleared.
                 */
                clear_bit(SPU_SCHED_EXITING, &ctx->sched_flags);
+               mb();
                queue_delayed_work(spu_sched_wq, &ctx->sched_work, SPU_TIMESLICE);
        }
 }
@@ -88,6 +89,7 @@ void spu_stop_tick(struct spu_context *ctx)
                 * makes sure it does not rearm itself anymore.
                 */
                set_bit(SPU_SCHED_EXITING, &ctx->sched_flags);
+               mb();
                cancel_delayed_work(&ctx->sched_work);
        }
 }
@@ -239,6 +241,7 @@ static void spu_add_to_rq(struct spu_context *ctx)
        spin_lock(&spu_prio->runq_lock);
        list_add_tail(&ctx->rq, &spu_prio->runq[ctx->prio]);
        set_bit(ctx->prio, spu_prio->bitmap);
+       mb();
        spin_unlock(&spu_prio->runq_lock);
 }