]> err.no Git - linux-2.6/commitdiff
Merge branch 'for-2.6.26' of master.kernel.org:/pub/scm/linux/kernel/git/jwboyer...
authorPaul Mackerras <paulus@samba.org>
Fri, 9 May 2008 10:12:06 +0000 (20:12 +1000)
committerPaul Mackerras <paulus@samba.org>
Fri, 9 May 2008 10:12:06 +0000 (20:12 +1000)
12 files changed:
arch/powerpc/platforms/cell/interrupt.c
arch/powerpc/platforms/cell/spu_base.c
arch/powerpc/platforms/cell/spu_priv1_mmio.c
arch/powerpc/platforms/cell/spufs/fault.c
arch/powerpc/platforms/cell/spufs/inode.c
arch/powerpc/platforms/cell/spufs/run.c
arch/powerpc/platforms/cell/spufs/sched.c
arch/powerpc/platforms/cell/spufs/spufs.h
arch/powerpc/platforms/cell/spufs/switch.c
arch/powerpc/xmon/xmon.c
include/asm-powerpc/spu.h
include/asm-powerpc/spu_csa.h

index 04f74f9f9ab67dcae27a706a7c3de449bab22ac6..5bf7df14602288b1d4daafb79e1116ac92066864 100644 (file)
@@ -35,6 +35,7 @@
 #include <linux/percpu.h>
 #include <linux/types.h>
 #include <linux/ioport.h>
+#include <linux/kernel_stat.h>
 
 #include <asm/io.h>
 #include <asm/pgtable.h>
@@ -231,6 +232,54 @@ static int iic_host_match(struct irq_host *h, struct device_node *node)
                                    "IBM,CBEA-Internal-Interrupt-Controller");
 }
 
+extern int noirqdebug;
+
+static void handle_iic_irq(unsigned int irq, struct irq_desc *desc)
+{
+       const unsigned int cpu = smp_processor_id();
+
+       spin_lock(&desc->lock);
+
+       desc->status &= ~(IRQ_REPLAY | IRQ_WAITING);
+
+       /*
+        * If we're currently running this IRQ, or its disabled,
+        * we shouldn't process the IRQ. Mark it pending, handle
+        * the necessary masking and go out
+        */
+       if (unlikely((desc->status & (IRQ_INPROGRESS | IRQ_DISABLED)) ||
+                   !desc->action)) {
+               desc->status |= IRQ_PENDING;
+               goto out_eoi;
+       }
+
+       kstat_cpu(cpu).irqs[irq]++;
+
+       /* Mark the IRQ currently in progress.*/
+       desc->status |= IRQ_INPROGRESS;
+
+       do {
+               struct irqaction *action = desc->action;
+               irqreturn_t action_ret;
+
+               if (unlikely(!action))
+                       goto out_eoi;
+
+               desc->status &= ~IRQ_PENDING;
+               spin_unlock(&desc->lock);
+               action_ret = handle_IRQ_event(irq, action);
+               if (!noirqdebug)
+                       note_interrupt(irq, desc, action_ret);
+               spin_lock(&desc->lock);
+
+       } while ((desc->status & (IRQ_PENDING | IRQ_DISABLED)) == IRQ_PENDING);
+
+       desc->status &= ~IRQ_INPROGRESS;
+out_eoi:
+       desc->chip->eoi(irq);
+       spin_unlock(&desc->lock);
+}
+
 static int iic_host_map(struct irq_host *h, unsigned int virq,
                        irq_hw_number_t hw)
 {
@@ -240,10 +289,10 @@ static int iic_host_map(struct irq_host *h, unsigned int virq,
                break;
        case IIC_IRQ_TYPE_IOEXC:
                set_irq_chip_and_handler(virq, &iic_ioexc_chip,
-                                        handle_fasteoi_irq);
+                                        handle_iic_irq);
                break;
        default:
-               set_irq_chip_and_handler(virq, &iic_chip, handle_fasteoi_irq);
+               set_irq_chip_and_handler(virq, &iic_chip, handle_iic_irq);
        }
        return 0;
 }
index 6bab44b7716b2d525bff0720c0c6a0f436a55acb..70c660121ec4d0d2da4a36071d851a1acd3e7870 100644 (file)
@@ -141,6 +141,10 @@ static void spu_restart_dma(struct spu *spu)
 
        if (!test_bit(SPU_CONTEXT_SWITCH_PENDING, &spu->flags))
                out_be64(&priv2->mfc_control_RW, MFC_CNTL_RESTART_DMA_COMMAND);
+       else {
+               set_bit(SPU_CONTEXT_FAULT_PENDING, &spu->flags);
+               mb();
+       }
 }
 
 static inline void spu_load_slb(struct spu *spu, int slbe, struct spu_slb *slb)
@@ -226,11 +230,13 @@ static int __spu_trap_data_map(struct spu *spu, unsigned long ea, u64 dsisr)
                return 0;
        }
 
-       spu->class_0_pending = 0;
-       spu->dar = ea;
-       spu->dsisr = dsisr;
+       spu->class_1_dar = ea;
+       spu->class_1_dsisr = dsisr;
+
+       spu->stop_callback(spu, 1);
 
-       spu->stop_callback(spu);
+       spu->class_1_dar = 0;
+       spu->class_1_dsisr = 0;
 
        return 0;
 }
@@ -318,11 +324,15 @@ spu_irq_class_0(int irq, void *data)
        stat = spu_int_stat_get(spu, 0) & mask;
 
        spu->class_0_pending |= stat;
-       spu->dsisr = spu_mfc_dsisr_get(spu);
-       spu->dar = spu_mfc_dar_get(spu);
+       spu->class_0_dsisr = spu_mfc_dsisr_get(spu);
+       spu->class_0_dar = spu_mfc_dar_get(spu);
        spin_unlock(&spu->register_lock);
 
-       spu->stop_callback(spu);
+       spu->stop_callback(spu, 0);
+
+       spu->class_0_pending = 0;
+       spu->class_0_dsisr = 0;
+       spu->class_0_dar = 0;
 
        spu_int_stat_clear(spu, 0, stat);
 
@@ -363,6 +373,9 @@ spu_irq_class_1(int irq, void *data)
        if (stat & CLASS1_LS_COMPARE_SUSPEND_ON_PUT_INTR)
                ;
 
+       spu->class_1_dsisr = 0;
+       spu->class_1_dar = 0;
+
        return stat ? IRQ_HANDLED : IRQ_NONE;
 }
 
@@ -396,10 +409,10 @@ spu_irq_class_2(int irq, void *data)
                spu->ibox_callback(spu);
 
        if (stat & CLASS2_SPU_STOP_INTR)
-               spu->stop_callback(spu);
+               spu->stop_callback(spu, 2);
 
        if (stat & CLASS2_SPU_HALT_INTR)
-               spu->stop_callback(spu);
+               spu->stop_callback(spu, 2);
 
        if (stat & CLASS2_SPU_DMA_TAG_GROUP_COMPLETE_INTR)
                spu->mfc_callback(spu);
index 67fa7247b80a4e0dac21d6702c2fcc0c1d6b1867..906a0a2a9fe18fc4273be7916f18df118c824dc1 100644 (file)
@@ -28,6 +28,7 @@
 #include <linux/io.h>
 #include <linux/mutex.h>
 #include <linux/device.h>
+#include <linux/sched.h>
 
 #include <asm/spu.h>
 #include <asm/spu_priv1.h>
@@ -75,8 +76,19 @@ static u64 int_stat_get(struct spu *spu, int class)
 
 static void cpu_affinity_set(struct spu *spu, int cpu)
 {
-       u64 target = iic_get_target_id(cpu);
-       u64 route = target << 48 | target << 32 | target << 16;
+       u64 target;
+       u64 route;
+
+       if (nr_cpus_node(spu->node)) {
+               cpumask_t spumask = node_to_cpumask(spu->node);
+               cpumask_t cpumask = node_to_cpumask(cpu_to_node(cpu));
+
+               if (!cpus_intersects(spumask, cpumask))
+                       return;
+       }
+
+       target = iic_get_target_id(cpu);
+       route = target << 48 | target << 32 | target << 16;
        out_be64(&spu->priv1->int_route_RW, route);
 }
 
index e46d300e21a5e18cf3b612b214a11af5f9562805..f093a581ac7410792a9126b2b0bbc87f665e9fdc 100644 (file)
@@ -83,13 +83,18 @@ int spufs_handle_class0(struct spu_context *ctx)
                return 0;
 
        if (stat & CLASS0_DMA_ALIGNMENT_INTR)
-               spufs_handle_event(ctx, ctx->csa.dar, SPE_EVENT_DMA_ALIGNMENT);
+               spufs_handle_event(ctx, ctx->csa.class_0_dar,
+                       SPE_EVENT_DMA_ALIGNMENT);
 
        if (stat & CLASS0_INVALID_DMA_COMMAND_INTR)
-               spufs_handle_event(ctx, ctx->csa.dar, SPE_EVENT_INVALID_DMA);
+               spufs_handle_event(ctx, ctx->csa.class_0_dar,
+                       SPE_EVENT_INVALID_DMA);
 
        if (stat & CLASS0_SPU_ERROR_INTR)
-               spufs_handle_event(ctx, ctx->csa.dar, SPE_EVENT_SPE_ERROR);
+               spufs_handle_event(ctx, ctx->csa.class_0_dar,
+                       SPE_EVENT_SPE_ERROR);
+
+       ctx->csa.class_0_pending = 0;
 
        return -EIO;
 }
@@ -119,8 +124,8 @@ int spufs_handle_class1(struct spu_context *ctx)
         * in time, we can still expect to get the same fault
         * the immediately after the context restore.
         */
-       ea = ctx->csa.dar;
-       dsisr = ctx->csa.dsisr;
+       ea = ctx->csa.class_1_dar;
+       dsisr = ctx->csa.class_1_dsisr;
 
        if (!(dsisr & (MFC_DSISR_PTE_NOT_FOUND | MFC_DSISR_ACCESS_DENIED)))
                return 0;
@@ -158,7 +163,7 @@ int spufs_handle_class1(struct spu_context *ctx)
         * time slicing will not preempt the context while the page fault
         * handler is running. Context switch code removes mappings.
         */
-       ctx->csa.dar = ctx->csa.dsisr = 0;
+       ctx->csa.class_1_dar = ctx->csa.class_1_dsisr = 0;
 
        /*
         * If we handled the fault successfully and are in runnable
index 0c32a05ab068a1487485d8871ce26a847d50a784..f407b24718554f474a153dc9a66e93d88518739f 100644 (file)
@@ -23,6 +23,7 @@
 
 #include <linux/file.h>
 #include <linux/fs.h>
+#include <linux/fsnotify.h>
 #include <linux/backing-dev.h>
 #include <linux/init.h>
 #include <linux/ioctl.h>
@@ -223,7 +224,7 @@ static int spufs_dir_close(struct inode *inode, struct file *file)
        parent = dir->d_parent->d_inode;
        ctx = SPUFS_I(dir->d_inode)->i_ctx;
 
-       mutex_lock(&parent->i_mutex);
+       mutex_lock_nested(&parent->i_mutex, I_MUTEX_PARENT);
        ret = spufs_rmdir(parent, dir);
        mutex_unlock(&parent->i_mutex);
        WARN_ON(ret);
@@ -618,12 +619,15 @@ long spufs_create(struct nameidata *nd, unsigned int flags, mode_t mode,
        mode &= ~current->fs->umask;
 
        if (flags & SPU_CREATE_GANG)
-               return spufs_create_gang(nd->path.dentry->d_inode,
+               ret = spufs_create_gang(nd->path.dentry->d_inode,
                                         dentry, nd->path.mnt, mode);
        else
-               return spufs_create_context(nd->path.dentry->d_inode,
+               ret = spufs_create_context(nd->path.dentry->d_inode,
                                            dentry, nd->path.mnt, flags, mode,
                                            filp);
+       if (ret >= 0)
+               fsnotify_mkdir(nd->path.dentry->d_inode, dentry);
+       return ret;
 
 out_dput:
        dput(dentry);
index a9c35b7b719fda3bd3850dc1dfdaa74d2e5acc6b..b7493b86581228d0edf8b0c0b691582e706bac58 100644 (file)
@@ -11,7 +11,7 @@
 #include "spufs.h"
 
 /* interrupt-level stop callback function. */
-void spufs_stop_callback(struct spu *spu)
+void spufs_stop_callback(struct spu *spu, int irq)
 {
        struct spu_context *ctx = spu->ctx;
 
@@ -24,9 +24,19 @@ void spufs_stop_callback(struct spu *spu)
         */
        if (ctx) {
                /* Copy exception arguments into module specific structure */
-               ctx->csa.class_0_pending = spu->class_0_pending;
-               ctx->csa.dsisr = spu->dsisr;
-               ctx->csa.dar = spu->dar;
+               switch(irq) {
+               case 0 :
+                       ctx->csa.class_0_pending = spu->class_0_pending;
+                       ctx->csa.class_0_dsisr = spu->class_0_dsisr;
+                       ctx->csa.class_0_dar = spu->class_0_dar;
+                       break;
+               case 1 :
+                       ctx->csa.class_1_dsisr = spu->class_1_dsisr;
+                       ctx->csa.class_1_dar = spu->class_1_dar;
+                       break;
+               case 2 :
+                       break;
+               }
 
                /* ensure that the exception status has hit memory before a
                 * thread waiting on the context's stop queue is woken */
@@ -34,11 +44,6 @@ void spufs_stop_callback(struct spu *spu)
 
                wake_up_all(&ctx->stop_wq);
        }
-
-       /* Clear callback arguments from spu structure */
-       spu->class_0_pending = 0;
-       spu->dsisr = 0;
-       spu->dar = 0;
 }
 
 int spu_stopped(struct spu_context *ctx, u32 *stat)
@@ -56,7 +61,11 @@ int spu_stopped(struct spu_context *ctx, u32 *stat)
        if (!(*stat & SPU_STATUS_RUNNING) && (*stat & stopped))
                return 1;
 
-       dsisr = ctx->csa.dsisr;
+       dsisr = ctx->csa.class_0_dsisr;
+       if (dsisr & (MFC_DSISR_PTE_NOT_FOUND | MFC_DSISR_ACCESS_DENIED))
+               return 1;
+
+       dsisr = ctx->csa.class_1_dsisr;
        if (dsisr & (MFC_DSISR_PTE_NOT_FOUND | MFC_DSISR_ACCESS_DENIED))
                return 1;
 
@@ -294,7 +303,7 @@ static int spu_process_callback(struct spu_context *ctx)
        u32 ls_pointer, npc;
        void __iomem *ls;
        long spu_ret;
-       int ret, ret2;
+       int ret;
 
        /* get syscall block from local store */
        npc = ctx->ops->npc_read(ctx) & ~3;
@@ -316,11 +325,9 @@ static int spu_process_callback(struct spu_context *ctx)
                if (spu_ret <= -ERESTARTSYS) {
                        ret = spu_handle_restartsys(ctx, &spu_ret, &npc);
                }
-               ret2 = spu_acquire(ctx);
+               mutex_lock(&ctx->state_mutex);
                if (ret == -ERESTARTSYS)
                        return ret;
-               if (ret2)
-                       return -EINTR;
        }
 
        /* need to re-get the ls, as it may have changed when we released the
@@ -343,13 +350,14 @@ long spufs_run_spu(struct spu_context *ctx, u32 *npc, u32 *event)
        if (mutex_lock_interruptible(&ctx->run_mutex))
                return -ERESTARTSYS;
 
-       spu_enable_spu(ctx);
        ctx->event_return = 0;
 
        ret = spu_acquire(ctx);
        if (ret)
                goto out_unlock;
 
+       spu_enable_spu(ctx);
+
        spu_update_sched_info(ctx);
 
        ret = spu_run_init(ctx, npc);
index 7298e7db2c8365cf83f8519350f917a01a35c8c3..2e411f23462b47417e50f965905a62f947c73f8f 100644 (file)
@@ -140,6 +140,9 @@ void __spu_update_sched_info(struct spu_context *ctx)
         * if it is timesliced or preempted.
         */
        ctx->cpus_allowed = current->cpus_allowed;
+
+       /* Save the current cpu id for spu interrupt routing. */
+       ctx->last_ran = raw_smp_processor_id();
 }
 
 void spu_update_sched_info(struct spu_context *ctx)
@@ -243,7 +246,6 @@ static void spu_bind_context(struct spu *spu, struct spu_context *ctx)
        spu_switch_log_notify(spu, ctx, SWITCH_LOG_START, 0);
        spu_restore(&ctx->csa, spu);
        spu->timestamp = jiffies;
-       spu_cpu_affinity_set(spu, raw_smp_processor_id());
        spu_switch_notify(spu, ctx);
        ctx->state = SPU_STATE_RUNNABLE;
 
@@ -657,7 +659,8 @@ static struct spu *find_victim(struct spu_context *ctx)
 
                        victim->stats.invol_ctx_switch++;
                        spu->stats.invol_ctx_switch++;
-                       spu_add_to_rq(victim);
+                       if (test_bit(SPU_SCHED_SPU_RUN, &ctx->sched_flags))
+                               spu_add_to_rq(victim);
 
                        mutex_unlock(&victim->state_mutex);
 
index 7312745b7540d4586fd72c7e4b57e9b5a9abd641..454c277c1457deaf4034b084cf95e438241c295c 100644 (file)
@@ -121,6 +121,7 @@ struct spu_context {
        cpumask_t cpus_allowed;
        int policy;
        int prio;
+       int last_ran;
 
        /* statistics */
        struct {
@@ -331,7 +332,7 @@ size_t spu_ibox_read(struct spu_context *ctx, u32 *data);
 /* irq callback funcs. */
 void spufs_ibox_callback(struct spu *spu);
 void spufs_wbox_callback(struct spu *spu);
-void spufs_stop_callback(struct spu *spu);
+void spufs_stop_callback(struct spu *spu, int irq);
 void spufs_mfc_callback(struct spu *spu);
 void spufs_dma_callback(struct spu *spu, int type);
 
index d2a1249d36dd2a7bc018727e09c3b7c57b88cf1c..3df9a36eb2f58efe6bcda41f7d27cf5861ac4e5b 100644 (file)
@@ -132,6 +132,14 @@ static inline void disable_interrupts(struct spu_state *csa, struct spu *spu)
        spu_int_mask_set(spu, 2, 0ul);
        eieio();
        spin_unlock_irq(&spu->register_lock);
+
+       /*
+        * This flag needs to be set before calling synchronize_irq so
+        * that the update will be visible to the relevant handlers
+        * via a simple load.
+        */
+       set_bit(SPU_CONTEXT_SWITCH_PENDING, &spu->flags);
+       clear_bit(SPU_CONTEXT_FAULT_PENDING, &spu->flags);
        synchronize_irq(spu->irqs[0]);
        synchronize_irq(spu->irqs[1]);
        synchronize_irq(spu->irqs[2]);
@@ -166,9 +174,8 @@ static inline void set_switch_pending(struct spu_state *csa, struct spu *spu)
        /* Save, Step 7:
         * Restore, Step 5:
         *     Set a software context switch pending flag.
+        *     Done above in Step 3 - disable_interrupts().
         */
-       set_bit(SPU_CONTEXT_SWITCH_PENDING, &spu->flags);
-       mb();
 }
 
 static inline void save_mfc_cntl(struct spu_state *csa, struct spu *spu)
@@ -186,20 +193,21 @@ static inline void save_mfc_cntl(struct spu_state *csa, struct spu *spu)
                                 MFC_CNTL_SUSPEND_COMPLETE);
                /* fall through */
        case MFC_CNTL_SUSPEND_COMPLETE:
-               if (csa) {
+               if (csa)
                        csa->priv2.mfc_control_RW =
-                               MFC_CNTL_SUSPEND_MASK |
+                               in_be64(&priv2->mfc_control_RW) |
                                MFC_CNTL_SUSPEND_DMA_QUEUE;
-               }
                break;
        case MFC_CNTL_NORMAL_DMA_QUEUE_OPERATION:
                out_be64(&priv2->mfc_control_RW, MFC_CNTL_SUSPEND_DMA_QUEUE);
                POLL_WHILE_FALSE((in_be64(&priv2->mfc_control_RW) &
                                  MFC_CNTL_SUSPEND_DMA_STATUS_MASK) ==
                                 MFC_CNTL_SUSPEND_COMPLETE);
-               if (csa) {
-                       csa->priv2.mfc_control_RW = 0;
-               }
+               if (csa)
+                       csa->priv2.mfc_control_RW =
+                               in_be64(&priv2->mfc_control_RW) &
+                               ~MFC_CNTL_SUSPEND_DMA_QUEUE &
+                               ~MFC_CNTL_SUSPEND_MASK;
                break;
        }
 }
@@ -249,16 +257,21 @@ static inline void save_spu_status(struct spu_state *csa, struct spu *spu)
        }
 }
 
-static inline void save_mfc_decr(struct spu_state *csa, struct spu *spu)
+static inline void save_mfc_stopped_status(struct spu_state *csa,
+               struct spu *spu)
 {
        struct spu_priv2 __iomem *priv2 = spu->priv2;
+       const u64 mask = MFC_CNTL_DECREMENTER_RUNNING |
+                       MFC_CNTL_DMA_QUEUES_EMPTY;
 
        /* Save, Step 12:
         *     Read MFC_CNTL[Ds].  Update saved copy of
         *     CSA.MFC_CNTL[Ds].
+        *
+        * update: do the same with MFC_CNTL[Q].
         */
-       csa->priv2.mfc_control_RW |=
-               in_be64(&priv2->mfc_control_RW) & MFC_CNTL_DECREMENTER_RUNNING;
+       csa->priv2.mfc_control_RW &= ~mask;
+       csa->priv2.mfc_control_RW |= in_be64(&priv2->mfc_control_RW) & mask;
 }
 
 static inline void halt_mfc_decr(struct spu_state *csa, struct spu *spu)
@@ -462,7 +475,9 @@ static inline void purge_mfc_queue(struct spu_state *csa, struct spu *spu)
         * Restore, Step 14.
         *     Write MFC_CNTL[Pc]=1 (purge queue).
         */
-       out_be64(&priv2->mfc_control_RW, MFC_CNTL_PURGE_DMA_REQUEST);
+       out_be64(&priv2->mfc_control_RW,
+                       MFC_CNTL_PURGE_DMA_REQUEST |
+                       MFC_CNTL_SUSPEND_MASK);
        eieio();
 }
 
@@ -725,10 +740,14 @@ static inline void set_switch_active(struct spu_state *csa, struct spu *spu)
        /* Save, Step 48:
         * Restore, Step 23.
         *     Change the software context switch pending flag
-        *     to context switch active.
+        *     to context switch active.  This implementation does
+        *     not uses a switch active flag.
         *
-        *     This implementation does not uses a switch active flag.
+        * Now that we have saved the mfc in the csa, we can add in the
+        * restart command if an exception occurred.
         */
+       if (test_bit(SPU_CONTEXT_FAULT_PENDING, &spu->flags))
+               csa->priv2.mfc_control_RW |= MFC_CNTL_RESTART_DMA_COMMAND;
        clear_bit(SPU_CONTEXT_SWITCH_PENDING, &spu->flags);
        mb();
 }
@@ -1690,6 +1709,13 @@ static inline void restore_mfc_sr1(struct spu_state *csa, struct spu *spu)
        eieio();
 }
 
+static inline void set_int_route(struct spu_state *csa, struct spu *spu)
+{
+       struct spu_context *ctx = spu->ctx;
+
+       spu_cpu_affinity_set(spu, ctx->last_ran);
+}
+
 static inline void restore_other_spu_access(struct spu_state *csa,
                                            struct spu *spu)
 {
@@ -1721,15 +1747,15 @@ static inline void restore_mfc_cntl(struct spu_state *csa, struct spu *spu)
         */
        out_be64(&priv2->mfc_control_RW, csa->priv2.mfc_control_RW);
        eieio();
+
        /*
-        * FIXME: this is to restart a DMA that we were processing
-        *        before the save. better remember the fault information
-        *        in the csa instead.
+        * The queue is put back into the same state that was evident prior to
+        * the context switch. The suspend flag is added to the saved state in
+        * the csa, if the operational state was suspending or suspended. In
+        * this case, the code that suspended the mfc is responsible for
+        * continuing it. Note that SPE faults do not change the operational
+        * state of the spu.
         */
-       if ((csa->priv2.mfc_control_RW & MFC_CNTL_SUSPEND_DMA_QUEUE_MASK)) {
-               out_be64(&priv2->mfc_control_RW, MFC_CNTL_RESTART_DMA_COMMAND);
-               eieio();
-       }
 }
 
 static inline void enable_user_access(struct spu_state *csa, struct spu *spu)
@@ -1788,7 +1814,7 @@ static int quiece_spu(struct spu_state *prev, struct spu *spu)
        save_spu_runcntl(prev, spu);            /* Step 9. */
        save_mfc_sr1(prev, spu);                /* Step 10. */
        save_spu_status(prev, spu);             /* Step 11. */
-       save_mfc_decr(prev, spu);               /* Step 12. */
+       save_mfc_stopped_status(prev, spu);     /* Step 12. */
        halt_mfc_decr(prev, spu);               /* Step 13. */
        save_timebase(prev, spu);               /* Step 14. */
        remove_other_spu_access(prev, spu);     /* Step 15. */
@@ -2000,6 +2026,7 @@ static void restore_csa(struct spu_state *next, struct spu *spu)
        check_ppuint_mb_stat(next, spu);        /* Step 67. */
        spu_invalidate_slbs(spu);               /* Modified Step 68. */
        restore_mfc_sr1(next, spu);             /* Step 69. */
+       set_int_route(next, spu);               /* NEW      */
        restore_other_spu_access(next, spu);    /* Step 70. */
        restore_spu_runcntl(next, spu);         /* Step 71. */
        restore_mfc_cntl(next, spu);            /* Step 72. */
index 52c74780f403c420c47bbcf13640173b6992852e..1702de9395eeee96b02d686e20550dbc98b8d63d 100644 (file)
@@ -2842,9 +2842,11 @@ static void dump_spu_fields(struct spu *spu)
        DUMP_FIELD(spu, "0x%lx", ls_size);
        DUMP_FIELD(spu, "0x%x", node);
        DUMP_FIELD(spu, "0x%lx", flags);
-       DUMP_FIELD(spu, "0x%lx", dar);
-       DUMP_FIELD(spu, "0x%lx", dsisr);
        DUMP_FIELD(spu, "%d", class_0_pending);
+       DUMP_FIELD(spu, "0x%lx", class_0_dar);
+       DUMP_FIELD(spu, "0x%lx", class_0_dsisr);
+       DUMP_FIELD(spu, "0x%lx", class_1_dar);
+       DUMP_FIELD(spu, "0x%lx", class_1_dsisr);
        DUMP_FIELD(spu, "0x%lx", irqs[0]);
        DUMP_FIELD(spu, "0x%lx", irqs[1]);
        DUMP_FIELD(spu, "0x%lx", irqs[2]);
index e3c845b0f76438af611c8450c36e4660b076207b..6abead6e681aacd732efc40c5fd0431824af8f37 100644 (file)
 
 /* Flag indicating progress during context switch. */
 #define SPU_CONTEXT_SWITCH_PENDING     0UL
+#define SPU_CONTEXT_FAULT_PENDING      1UL
 
 struct spu_context;
 struct spu_runqueue;
@@ -128,9 +129,11 @@ struct spu {
        unsigned int irqs[3];
        u32 node;
        u64 flags;
-       u64 dar;
-       u64 dsisr;
        u64 class_0_pending;
+       u64 class_0_dar;
+       u64 class_0_dsisr;
+       u64 class_1_dar;
+       u64 class_1_dsisr;
        size_t ls_size;
        unsigned int slb_replace;
        struct mm_struct *mm;
@@ -143,7 +146,7 @@ struct spu {
 
        void (* wbox_callback)(struct spu *spu);
        void (* ibox_callback)(struct spu *spu);
-       void (* stop_callback)(struct spu *spu);
+       void (* stop_callback)(struct spu *spu, int irq);
        void (* mfc_callback)(struct spu *spu);
 
        char irq_c0[8];
index 0ab6bff86078e955dd10c3da1c282fe29773f33f..129ec148d4512b1a63ac6dbcbf3321ac07712ba9 100644 (file)
@@ -254,7 +254,8 @@ struct spu_state {
        u64 spu_chnldata_RW[32];
        u32 spu_mailbox_data[4];
        u32 pu_mailbox_data[1];
-       u64 dar, dsisr, class_0_pending;
+       u64 class_0_dar, class_0_dsisr, class_0_pending;
+       u64 class_1_dar, class_1_dsisr;
        unsigned long suspend_time;
        spinlock_t register_lock;
 };