X-Git-Url: https://err.no/cgi-bin/gitweb.cgi?a=blobdiff_plain;f=arch%2Fpowerpc%2Fplatforms%2Fcell%2Fspufs%2Fswitch.c;h=3df9a36eb2f58efe6bcda41f7d27cf5861ac4e5b;hb=2a5f2e3e6cd1ce9fb3f8b186b6bc9aa1f1497a92;hp=6f5886c7b1f9a8e5f16c1220df0a53c9c60cf241;hpb=f8303dd3db57bd7ab2062985ad7a9e898a8ac423;p=linux-2.6 diff --git a/arch/powerpc/platforms/cell/spufs/switch.c b/arch/powerpc/platforms/cell/spufs/switch.c index 6f5886c7b1..3df9a36eb2 100644 --- a/arch/powerpc/platforms/cell/spufs/switch.c +++ b/arch/powerpc/platforms/cell/spufs/switch.c @@ -34,6 +34,7 @@ #include #include +#include #include #include #include @@ -117,6 +118,8 @@ static inline void disable_interrupts(struct spu_state *csa, struct spu *spu) * Write INT_MASK_class1 with value of 0. * Save INT_Mask_class2 in CSA. * Write INT_MASK_class2 with value of 0. + * Synchronize all three interrupts to be sure + * we no longer execute a handler on another CPU. */ spin_lock_irq(&spu->register_lock); if (csa) { @@ -129,6 +132,17 @@ static inline void disable_interrupts(struct spu_state *csa, struct spu *spu) spu_int_mask_set(spu, 2, 0ul); eieio(); spin_unlock_irq(&spu->register_lock); + + /* + * This flag needs to be set before calling synchronize_irq so + * that the update will be visible to the relevant handlers + * via a simple load. + */ + set_bit(SPU_CONTEXT_SWITCH_PENDING, &spu->flags); + clear_bit(SPU_CONTEXT_FAULT_PENDING, &spu->flags); + synchronize_irq(spu->irqs[0]); + synchronize_irq(spu->irqs[1]); + synchronize_irq(spu->irqs[2]); } static inline void set_watchdog_timer(struct spu_state *csa, struct spu *spu) @@ -160,9 +174,8 @@ static inline void set_switch_pending(struct spu_state *csa, struct spu *spu) /* Save, Step 7: * Restore, Step 5: * Set a software context switch pending flag. + * Done above in Step 3 - disable_interrupts(). */ - set_bit(SPU_CONTEXT_SWITCH_PENDING, &spu->flags); - mb(); } static inline void save_mfc_cntl(struct spu_state *csa, struct spu *spu) @@ -180,20 +193,21 @@ static inline void save_mfc_cntl(struct spu_state *csa, struct spu *spu) MFC_CNTL_SUSPEND_COMPLETE); /* fall through */ case MFC_CNTL_SUSPEND_COMPLETE: - if (csa) { + if (csa) csa->priv2.mfc_control_RW = - MFC_CNTL_SUSPEND_MASK | + in_be64(&priv2->mfc_control_RW) | MFC_CNTL_SUSPEND_DMA_QUEUE; - } break; case MFC_CNTL_NORMAL_DMA_QUEUE_OPERATION: out_be64(&priv2->mfc_control_RW, MFC_CNTL_SUSPEND_DMA_QUEUE); POLL_WHILE_FALSE((in_be64(&priv2->mfc_control_RW) & MFC_CNTL_SUSPEND_DMA_STATUS_MASK) == MFC_CNTL_SUSPEND_COMPLETE); - if (csa) { - csa->priv2.mfc_control_RW = 0; - } + if (csa) + csa->priv2.mfc_control_RW = + in_be64(&priv2->mfc_control_RW) & + ~MFC_CNTL_SUSPEND_DMA_QUEUE & + ~MFC_CNTL_SUSPEND_MASK; break; } } @@ -243,16 +257,21 @@ static inline void save_spu_status(struct spu_state *csa, struct spu *spu) } } -static inline void save_mfc_decr(struct spu_state *csa, struct spu *spu) +static inline void save_mfc_stopped_status(struct spu_state *csa, + struct spu *spu) { struct spu_priv2 __iomem *priv2 = spu->priv2; + const u64 mask = MFC_CNTL_DECREMENTER_RUNNING | + MFC_CNTL_DMA_QUEUES_EMPTY; /* Save, Step 12: * Read MFC_CNTL[Ds]. Update saved copy of * CSA.MFC_CNTL[Ds]. + * + * update: do the same with MFC_CNTL[Q]. */ - csa->priv2.mfc_control_RW |= - in_be64(&priv2->mfc_control_RW) & MFC_CNTL_DECREMENTER_RUNNING; + csa->priv2.mfc_control_RW &= ~mask; + csa->priv2.mfc_control_RW |= in_be64(&priv2->mfc_control_RW) & mask; } static inline void halt_mfc_decr(struct spu_state *csa, struct spu *spu) @@ -456,7 +475,9 @@ static inline void purge_mfc_queue(struct spu_state *csa, struct spu *spu) * Restore, Step 14. * Write MFC_CNTL[Pc]=1 (purge queue). */ - out_be64(&priv2->mfc_control_RW, MFC_CNTL_PURGE_DMA_REQUEST); + out_be64(&priv2->mfc_control_RW, + MFC_CNTL_PURGE_DMA_REQUEST | + MFC_CNTL_SUSPEND_MASK); eieio(); } @@ -719,10 +740,14 @@ static inline void set_switch_active(struct spu_state *csa, struct spu *spu) /* Save, Step 48: * Restore, Step 23. * Change the software context switch pending flag - * to context switch active. + * to context switch active. This implementation does + * not uses a switch active flag. * - * This implementation does not uses a switch active flag. + * Now that we have saved the mfc in the csa, we can add in the + * restart command if an exception occurred. */ + if (test_bit(SPU_CONTEXT_FAULT_PENDING, &spu->flags)) + csa->priv2.mfc_control_RW |= MFC_CNTL_RESTART_DMA_COMMAND; clear_bit(SPU_CONTEXT_SWITCH_PENDING, &spu->flags); mb(); } @@ -1684,6 +1709,13 @@ static inline void restore_mfc_sr1(struct spu_state *csa, struct spu *spu) eieio(); } +static inline void set_int_route(struct spu_state *csa, struct spu *spu) +{ + struct spu_context *ctx = spu->ctx; + + spu_cpu_affinity_set(spu, ctx->last_ran); +} + static inline void restore_other_spu_access(struct spu_state *csa, struct spu *spu) { @@ -1715,15 +1747,15 @@ static inline void restore_mfc_cntl(struct spu_state *csa, struct spu *spu) */ out_be64(&priv2->mfc_control_RW, csa->priv2.mfc_control_RW); eieio(); + /* - * FIXME: this is to restart a DMA that we were processing - * before the save. better remember the fault information - * in the csa instead. + * The queue is put back into the same state that was evident prior to + * the context switch. The suspend flag is added to the saved state in + * the csa, if the operational state was suspending or suspended. In + * this case, the code that suspended the mfc is responsible for + * continuing it. Note that SPE faults do not change the operational + * state of the spu. */ - if ((csa->priv2.mfc_control_RW & MFC_CNTL_SUSPEND_DMA_QUEUE_MASK)) { - out_be64(&priv2->mfc_control_RW, MFC_CNTL_RESTART_DMA_COMMAND); - eieio(); - } } static inline void enable_user_access(struct spu_state *csa, struct spu *spu) @@ -1782,7 +1814,7 @@ static int quiece_spu(struct spu_state *prev, struct spu *spu) save_spu_runcntl(prev, spu); /* Step 9. */ save_mfc_sr1(prev, spu); /* Step 10. */ save_spu_status(prev, spu); /* Step 11. */ - save_mfc_decr(prev, spu); /* Step 12. */ + save_mfc_stopped_status(prev, spu); /* Step 12. */ halt_mfc_decr(prev, spu); /* Step 13. */ save_timebase(prev, spu); /* Step 14. */ remove_other_spu_access(prev, spu); /* Step 15. */ @@ -1809,6 +1841,7 @@ static void save_csa(struct spu_state *prev, struct spu *spu) save_mfc_csr_ato(prev, spu); /* Step 24. */ save_mfc_tclass_id(prev, spu); /* Step 25. */ set_mfc_tclass_id(prev, spu); /* Step 26. */ + save_mfc_cmd(prev, spu); /* Step 26a - moved from 44. */ purge_mfc_queue(prev, spu); /* Step 27. */ wait_purge_complete(prev, spu); /* Step 28. */ setup_mfc_sr1(prev, spu); /* Step 30. */ @@ -1825,7 +1858,6 @@ static void save_csa(struct spu_state *prev, struct spu *spu) save_ppuint_mb(prev, spu); /* Step 41. */ save_ch_part1(prev, spu); /* Step 42. */ save_spu_mb(prev, spu); /* Step 43. */ - save_mfc_cmd(prev, spu); /* Step 44. */ reset_ch(prev, spu); /* Step 45. */ } @@ -1994,6 +2026,7 @@ static void restore_csa(struct spu_state *next, struct spu *spu) check_ppuint_mb_stat(next, spu); /* Step 67. */ spu_invalidate_slbs(spu); /* Modified Step 68. */ restore_mfc_sr1(next, spu); /* Step 69. */ + set_int_route(next, spu); /* NEW */ restore_other_spu_access(next, spu); /* Step 70. */ restore_spu_runcntl(next, spu); /* Step 71. */ restore_mfc_cntl(next, spu); /* Step 72. */