]> err.no Git - linux-2.6/blobdiff - arch/powerpc/platforms/cell/spu_base.c
[POWERPC] spu_base: fix initialisation on systems with no SPEs
[linux-2.6] / arch / powerpc / platforms / cell / spu_base.c
index bd7bffc3ddd08be19caf0a15572b5e727c6f0cd2..8086eb1ed60d95e85ab47d5e17aca84854855e7d 100644 (file)
 #include <asm/xmon.h>
 
 const struct spu_management_ops *spu_management_ops;
+EXPORT_SYMBOL_GPL(spu_management_ops);
+
 const struct spu_priv1_ops *spu_priv1_ops;
 
+static struct list_head spu_list[MAX_NUMNODES];
+static LIST_HEAD(spu_full_list);
+static DEFINE_MUTEX(spu_mutex);
+static spinlock_t spu_list_lock = SPIN_LOCK_UNLOCKED;
+
 EXPORT_SYMBOL_GPL(spu_priv1_ops);
 
+void spu_invalidate_slbs(struct spu *spu)
+{
+       struct spu_priv2 __iomem *priv2 = spu->priv2;
+
+       if (spu_mfc_sr1_get(spu) & MFC_STATE1_RELOCATE_MASK)
+               out_be64(&priv2->slb_invalidate_all_W, 0UL);
+}
+EXPORT_SYMBOL_GPL(spu_invalidate_slbs);
+
+/* This is called by the MM core when a segment size is changed, to
+ * request a flush of all the SPEs using a given mm
+ */
+void spu_flush_all_slbs(struct mm_struct *mm)
+{
+       struct spu *spu;
+       unsigned long flags;
+
+       spin_lock_irqsave(&spu_list_lock, flags);
+       list_for_each_entry(spu, &spu_full_list, full_list) {
+               if (spu->mm == mm)
+                       spu_invalidate_slbs(spu);
+       }
+       spin_unlock_irqrestore(&spu_list_lock, flags);
+}
+
+/* The hack below stinks... try to do something better one of
+ * these days... Does it even work properly with NR_CPUS == 1 ?
+ */
+static inline void mm_needs_global_tlbie(struct mm_struct *mm)
+{
+       int nr = (NR_CPUS > 1) ? NR_CPUS : NR_CPUS + 1;
+
+       /* Global TLBIE broadcast required with SPEs. */
+       __cpus_setall(&mm->cpu_vm_mask, nr);
+}
+
+void spu_associate_mm(struct spu *spu, struct mm_struct *mm)
+{
+       unsigned long flags;
+
+       spin_lock_irqsave(&spu_list_lock, flags);
+       spu->mm = mm;
+       spin_unlock_irqrestore(&spu_list_lock, flags);
+       if (mm)
+               mm_needs_global_tlbie(mm);
+}
+EXPORT_SYMBOL_GPL(spu_associate_mm);
+
 static int __spu_trap_invalid_dma(struct spu *spu)
 {
        pr_debug("%s\n", __FUNCTION__);
@@ -74,6 +129,7 @@ static int __spu_trap_data_seg(struct spu *spu, unsigned long ea)
        struct spu_priv2 __iomem *priv2 = spu->priv2;
        struct mm_struct *mm = spu->mm;
        u64 esid, vsid, llp;
+       int psize;
 
        pr_debug("%s\n", __FUNCTION__);
 
@@ -90,22 +146,25 @@ static int __spu_trap_data_seg(struct spu *spu, unsigned long ea)
        case USER_REGION_ID:
 #ifdef CONFIG_HUGETLB_PAGE
                if (in_hugepage_area(mm->context, ea))
-                       llp = mmu_psize_defs[mmu_huge_psize].sllp;
+                       psize = mmu_huge_psize;
                else
 #endif
-                       llp = mmu_psize_defs[mmu_virtual_psize].sllp;
+                       psize = mm->context.user_psize;
                vsid = (get_vsid(mm->context.id, ea) << SLB_VSID_SHIFT) |
-                               SLB_VSID_USER | llp;
+                               SLB_VSID_USER;
                break;
        case VMALLOC_REGION_ID:
-               llp = mmu_psize_defs[mmu_virtual_psize].sllp;
+               if (ea < VMALLOC_END)
+                       psize = mmu_vmalloc_psize;
+               else
+                       psize = mmu_io_psize;
                vsid = (get_kernel_vsid(ea) << SLB_VSID_SHIFT) |
-                       SLB_VSID_KERNEL | llp;
+                       SLB_VSID_KERNEL;
                break;
        case KERNEL_REGION_ID:
-               llp = mmu_psize_defs[mmu_linear_psize].sllp;
+               psize = mmu_linear_psize;
                vsid = (get_kernel_vsid(ea) << SLB_VSID_SHIFT) |
-                       SLB_VSID_KERNEL | llp;
+                       SLB_VSID_KERNEL;
                break;
        default:
                /* Future: support kernel segments so that drivers
@@ -114,9 +173,10 @@ static int __spu_trap_data_seg(struct spu *spu, unsigned long ea)
                pr_debug("invalid region access at %016lx\n", ea);
                return 1;
        }
+       llp = mmu_psize_defs[psize].sllp;
 
        out_be64(&priv2->slb_index_W, spu->slb_replace);
-       out_be64(&priv2->slb_vsid_RW, vsid);
+       out_be64(&priv2->slb_vsid_RW, vsid | llp);
        out_be64(&priv2->slb_esid_RW, esid);
 
        spu->slb_replace++;
@@ -170,9 +230,11 @@ int
 spu_irq_class_0_bottom(struct spu *spu)
 {
        unsigned long stat, mask;
+       unsigned long flags;
 
        spu->class_0_pending = 0;
 
+       spin_lock_irqsave(&spu->register_lock, flags);
        mask = spu_int_mask_get(spu, 0);
        stat = spu_int_stat_get(spu, 0);
 
@@ -188,6 +250,7 @@ spu_irq_class_0_bottom(struct spu *spu)
                __spu_trap_error(spu);
 
        spu_int_stat_clear(spu, 0, stat);
+       spin_unlock_irqrestore(&spu->register_lock, flags);
 
        return (stat & 0x7) ? -EIO : 0;
 }
@@ -229,7 +292,6 @@ spu_irq_class_1(int irq, void *data)
 
        return stat ? IRQ_HANDLED : IRQ_NONE;
 }
-EXPORT_SYMBOL_GPL(spu_irq_class_1_bottom);
 
 static irqreturn_t
 spu_irq_class_2(int irq, void *data)
@@ -327,10 +389,6 @@ static void spu_free_irqs(struct spu *spu)
                free_irq(spu->irqs[2], spu);
 }
 
-static struct list_head spu_list[MAX_NUMNODES];
-static LIST_HEAD(spu_full_list);
-static DEFINE_MUTEX(spu_mutex);
-
 static void spu_init_channels(struct spu *spu)
 {
        static const struct {
@@ -374,10 +432,11 @@ struct spu *spu_alloc_node(int node)
                spu = list_entry(spu_list[node].next, struct spu, list);
                list_del_init(&spu->list);
                pr_debug("Got SPU %d %d\n", spu->number, spu->node);
-               spu_init_channels(spu);
        }
        mutex_unlock(&spu_mutex);
 
+       if (spu)
+               spu_init_channels(spu);
        return spu;
 }
 EXPORT_SYMBOL_GPL(spu_alloc_node);
@@ -404,108 +463,6 @@ void spu_free(struct spu *spu)
 }
 EXPORT_SYMBOL_GPL(spu_free);
 
-static int spu_handle_mm_fault(struct spu *spu)
-{
-       struct mm_struct *mm = spu->mm;
-       struct vm_area_struct *vma;
-       u64 ea, dsisr, is_write;
-       int ret;
-
-       ea = spu->dar;
-       dsisr = spu->dsisr;
-#if 0
-       if (!IS_VALID_EA(ea)) {
-               return -EFAULT;
-       }
-#endif /* XXX */
-       if (mm == NULL) {
-               return -EFAULT;
-       }
-       if (mm->pgd == NULL) {
-               return -EFAULT;
-       }
-
-       down_read(&mm->mmap_sem);
-       vma = find_vma(mm, ea);
-       if (!vma)
-               goto bad_area;
-       if (vma->vm_start <= ea)
-               goto good_area;
-       if (!(vma->vm_flags & VM_GROWSDOWN))
-               goto bad_area;
-#if 0
-       if (expand_stack(vma, ea))
-               goto bad_area;
-#endif /* XXX */
-good_area:
-       is_write = dsisr & MFC_DSISR_ACCESS_PUT;
-       if (is_write) {
-               if (!(vma->vm_flags & VM_WRITE))
-                       goto bad_area;
-       } else {
-               if (dsisr & MFC_DSISR_ACCESS_DENIED)
-                       goto bad_area;
-               if (!(vma->vm_flags & (VM_READ | VM_EXEC)))
-                       goto bad_area;
-       }
-       ret = 0;
-       switch (handle_mm_fault(mm, vma, ea, is_write)) {
-       case VM_FAULT_MINOR:
-               current->min_flt++;
-               break;
-       case VM_FAULT_MAJOR:
-               current->maj_flt++;
-               break;
-       case VM_FAULT_SIGBUS:
-               ret = -EFAULT;
-               goto bad_area;
-       case VM_FAULT_OOM:
-               ret = -ENOMEM;
-               goto bad_area;
-       default:
-               BUG();
-       }
-       up_read(&mm->mmap_sem);
-       return ret;
-
-bad_area:
-       up_read(&mm->mmap_sem);
-       return -EFAULT;
-}
-
-int spu_irq_class_1_bottom(struct spu *spu)
-{
-       u64 ea, dsisr, access, error = 0UL;
-       int ret = 0;
-
-       ea = spu->dar;
-       dsisr = spu->dsisr;
-       if (dsisr & (MFC_DSISR_PTE_NOT_FOUND | MFC_DSISR_ACCESS_DENIED)) {
-               u64 flags;
-
-               access = (_PAGE_PRESENT | _PAGE_USER);
-               access |= (dsisr & MFC_DSISR_ACCESS_PUT) ? _PAGE_RW : 0UL;
-               local_irq_save(flags);
-               if (hash_page(ea, access, 0x300) != 0)
-                       error |= CLASS1_ENABLE_STORAGE_FAULT_INTR;
-               local_irq_restore(flags);
-       }
-       if (error & CLASS1_ENABLE_STORAGE_FAULT_INTR) {
-               if ((ret = spu_handle_mm_fault(spu)) != 0)
-                       error |= CLASS1_ENABLE_STORAGE_FAULT_INTR;
-               else
-                       error &= ~CLASS1_ENABLE_STORAGE_FAULT_INTR;
-       }
-       spu->dar = 0UL;
-       spu->dsisr = 0UL;
-       if (!error) {
-               spu_restart_dma(spu);
-       } else {
-               spu->dma_callback(spu, SPE_EVENT_SPE_DATA_STORAGE);
-       }
-       return ret;
-}
-
 struct sysdev_class spu_sysdev_class = {
        set_kset_name("spu")
 };
@@ -579,17 +536,12 @@ static int spu_create_sysdev(struct spu *spu)
        return 0;
 }
 
-static void spu_destroy_sysdev(struct spu *spu)
-{
-       sysfs_remove_device_from_node(&spu->sysdev, spu->node);
-       sysdev_unregister(&spu->sysdev);
-}
-
 static int __init create_spu(void *data)
 {
        struct spu *spu;
        int ret;
        static int number;
+       unsigned long flags;
 
        ret = -ENOMEM;
        spu = kzalloc(sizeof (*spu), GFP_KERNEL);
@@ -617,8 +569,10 @@ static int __init create_spu(void *data)
                goto out_free_irqs;
 
        mutex_lock(&spu_mutex);
+       spin_lock_irqsave(&spu_list_lock, flags);
        list_add(&spu->list, &spu_list[spu->node]);
        list_add(&spu->full_list, &spu_full_list);
+       spin_unlock_irqrestore(&spu_list_lock, flags);
        mutex_unlock(&spu_mutex);
 
        goto out;
@@ -633,58 +587,37 @@ out:
        return ret;
 }
 
-static void destroy_spu(struct spu *spu)
-{
-       list_del_init(&spu->list);
-       list_del_init(&spu->full_list);
-
-       spu_destroy_sysdev(spu);
-       spu_free_irqs(spu);
-       spu_destroy_spu(spu);
-       kfree(spu);
-}
-
-static void cleanup_spu_base(void)
-{
-       struct spu *spu, *tmp;
-       int node;
-
-       mutex_lock(&spu_mutex);
-       for (node = 0; node < MAX_NUMNODES; node++) {
-               list_for_each_entry_safe(spu, tmp, &spu_list[node], list)
-                       destroy_spu(spu);
-       }
-       mutex_unlock(&spu_mutex);
-       sysdev_class_unregister(&spu_sysdev_class);
-}
-module_exit(cleanup_spu_base);
-
 static int __init init_spu_base(void)
 {
-       int i, ret;
+       int i, ret = 0;
+
+       for (i = 0; i < MAX_NUMNODES; i++)
+               INIT_LIST_HEAD(&spu_list[i]);
 
        if (!spu_management_ops)
-               return 0;
+               goto out;
 
        /* create sysdev class for spus */
        ret = sysdev_class_register(&spu_sysdev_class);
        if (ret)
-               return ret;
-
-       for (i = 0; i < MAX_NUMNODES; i++)
-               INIT_LIST_HEAD(&spu_list[i]);
+               goto out;
 
        ret = spu_enumerate_spus(create_spu);
 
        if (ret) {
                printk(KERN_WARNING "%s: Error initializing spus\n",
                        __FUNCTION__);
-               cleanup_spu_base();
-               return ret;
+               goto out_unregister_sysdev_class;
        }
 
        xmon_register_spus(&spu_full_list);
 
+       return 0;
+
+ out_unregister_sysdev_class:
+       sysdev_class_unregister(&spu_sysdev_class);
+ out:
+
        return ret;
 }
 module_init(init_spu_base);