]> err.no Git - linux-2.6/commitdiff
Fix race conditions for read_c0_entryhi. Remove broken ASID masks in
authorThiemo Seufer <ths@networkno.de>
Sat, 2 Apr 2005 10:21:56 +0000 (10:21 +0000)
committerRalf Baechle <ralf@linux-mips.org>
Sat, 29 Oct 2005 18:31:01 +0000 (19:31 +0100)
tlb-sb1.c. Make tlb-r4k.c and tlb-sb1.c more similiar and more efficient.

Signed-off-by: Thiemo Seufer <ths@networkno.de>
Signed-off-by: Ralf Baechle <ralf@linux-mips.org>
arch/mips/mm/tlb-r4k.c
arch/mips/mm/tlb-sb1.c

index 08702202758d4a20ef3de5645aba6e91afa96d1e..316c8a3d6b53705b2a40413449af7b8a2989e541 100644 (file)
 
 extern void build_tlb_refill_handler(void);
 
+/*
+ * Make sure all entries differ.  If they're not different
+ * MIPS32 will take revenge ...
+ */
+#define UNIQUE_ENTRYHI(idx) (CKSEG0 + ((idx) << (PAGE_SHIFT + 1)))
+
 /* CP0 hazard avoidance. */
 #define BARRIER __asm__ __volatile__(".set noreorder\n\t" \
                                     "nop; nop; nop; nop; nop; nop;\n\t" \
@@ -42,11 +48,8 @@ void local_flush_tlb_all(void)
 
        /* Blast 'em all away. */
        while (entry < current_cpu_data.tlbsize) {
-               /*
-                * Make sure all entries differ.  If they're not different
-                * MIPS32 will take revenge ...
-                */
-               write_c0_entryhi(CKSEG0 + (entry << (PAGE_SHIFT + 1)));
+               /* Make sure all entries differ. */
+               write_c0_entryhi(UNIQUE_ENTRYHI(entry));
                write_c0_index(entry);
                mtc0_tlbw_hazard();
                tlb_write_indexed();
@@ -57,12 +60,21 @@ void local_flush_tlb_all(void)
        local_irq_restore(flags);
 }
 
+/* All entries common to a mm share an asid.  To effectively flush
+   these entries, we just bump the asid. */
 void local_flush_tlb_mm(struct mm_struct *mm)
 {
-       int cpu = smp_processor_id();
+       int cpu;
+
+       preempt_disable();
 
-       if (cpu_context(cpu, mm) != 0)
-               drop_mmu_context(mm,cpu);
+       cpu = smp_processor_id();
+
+       if (cpu_context(cpu, mm) != 0) {
+               drop_mmu_context(mm, cpu);
+       }
+
+       preempt_enable();
 }
 
 void local_flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
@@ -75,9 +87,9 @@ void local_flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
                unsigned long flags;
                int size;
 
-               local_irq_save(flags);
                size = (end - start + (PAGE_SIZE - 1)) >> PAGE_SHIFT;
                size = (size + 1) >> 1;
+               local_irq_save(flags);
                if (size <= current_cpu_data.tlbsize/2) {
                        int oldpid = read_c0_entryhi();
                        int newpid = cpu_asid(cpu, mm);
@@ -99,8 +111,7 @@ void local_flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
                                if (idx < 0)
                                        continue;
                                /* Make sure all entries differ. */
-                               write_c0_entryhi(CKSEG0 +
-                                                (idx << (PAGE_SHIFT + 1)));
+                               write_c0_entryhi(UNIQUE_ENTRYHI(idx));
                                mtc0_tlbw_hazard();
                                tlb_write_indexed();
                        }
@@ -118,9 +129,9 @@ void local_flush_tlb_kernel_range(unsigned long start, unsigned long end)
        unsigned long flags;
        int size;
 
-       local_irq_save(flags);
        size = (end - start + (PAGE_SIZE - 1)) >> PAGE_SHIFT;
        size = (size + 1) >> 1;
+       local_irq_save(flags);
        if (size <= current_cpu_data.tlbsize / 2) {
                int pid = read_c0_entryhi();
 
@@ -142,7 +153,7 @@ void local_flush_tlb_kernel_range(unsigned long start, unsigned long end)
                        if (idx < 0)
                                continue;
                        /* Make sure all entries differ. */
-                       write_c0_entryhi(CKSEG0 + (idx << (PAGE_SHIFT + 1)));
+                       write_c0_entryhi(UNIQUE_ENTRYHI(idx));
                        mtc0_tlbw_hazard();
                        tlb_write_indexed();
                }
@@ -176,7 +187,7 @@ void local_flush_tlb_page(struct vm_area_struct *vma, unsigned long page)
                if (idx < 0)
                        goto finish;
                /* Make sure all entries differ. */
-               write_c0_entryhi(CKSEG0 + (idx << (PAGE_SHIFT + 1)));
+               write_c0_entryhi(UNIQUE_ENTRYHI(idx));
                mtc0_tlbw_hazard();
                tlb_write_indexed();
                tlbw_use_hazard();
@@ -197,8 +208,8 @@ void local_flush_tlb_one(unsigned long page)
        int oldpid, idx;
 
        local_irq_save(flags);
-       page &= (PAGE_MASK << 1);
        oldpid = read_c0_entryhi();
+       page &= (PAGE_MASK << 1);
        write_c0_entryhi(page);
        mtc0_tlbw_hazard();
        tlb_probe();
@@ -208,7 +219,7 @@ void local_flush_tlb_one(unsigned long page)
        write_c0_entrylo1(0);
        if (idx >= 0) {
                /* Make sure all entries differ. */
-               write_c0_entryhi(CKSEG0 + (idx << (PAGE_SHIFT + 1)));
+               write_c0_entryhi(UNIQUE_ENTRYHI(idx));
                mtc0_tlbw_hazard();
                tlb_write_indexed();
                tlbw_use_hazard();
@@ -238,9 +249,9 @@ void __update_tlb(struct vm_area_struct * vma, unsigned long address, pte_t pte)
        if (current->active_mm != vma->vm_mm)
                return;
 
-       pid = read_c0_entryhi() & ASID_MASK;
-
        local_irq_save(flags);
+
+       pid = read_c0_entryhi() & ASID_MASK;
        address &= (PAGE_MASK << 1);
        write_c0_entryhi(address | pid);
        pgdp = pgd_offset(vma->vm_mm, address);
@@ -260,14 +271,12 @@ void __update_tlb(struct vm_area_struct * vma, unsigned long address, pte_t pte)
        write_c0_entrylo0(pte_val(*ptep++) >> 6);
        write_c0_entrylo1(pte_val(*ptep) >> 6);
 #endif
-       write_c0_entryhi(address | pid);
        mtc0_tlbw_hazard();
        if (idx < 0)
                tlb_write_random();
        else
                tlb_write_indexed();
        tlbw_use_hazard();
-       write_c0_entryhi(pid);
        local_irq_restore(flags);
 }
 
index 6256cafcf3a25863859caabb448c859beff3c753..bba7130e75476e3c913bda1139659cd515872b42 100644 (file)
@@ -94,7 +94,7 @@ void local_flush_tlb_all(void)
 
        local_irq_save(flags);
        /* Save old context and create impossible VPN2 value */
-       old_ctx = read_c0_entryhi() & ASID_MASK;
+       old_ctx = read_c0_entryhi();
        write_c0_entrylo0(0);
        write_c0_entrylo1(0);
 
@@ -144,17 +144,17 @@ void local_flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
        unsigned long end)
 {
        struct mm_struct *mm = vma->vm_mm;
-       unsigned long flags;
-       int cpu;
+       int cpu = smp_processor_id();
 
-       local_irq_save(flags);
-       cpu = smp_processor_id();
        if (cpu_context(cpu, mm) != 0) {
+               unsigned long flags;
                int size;
+
                size = (end - start + (PAGE_SIZE - 1)) >> PAGE_SHIFT;
                size = (size + 1) >> 1;
+               local_irq_save(flags);
                if (size <= (current_cpu_data.tlbsize/2)) {
-                       int oldpid = read_c0_entryhi() & ASID_MASK;
+                       int oldpid = read_c0_entryhi();
                        int newpid = cpu_asid(cpu, mm);
 
                        start &= (PAGE_MASK << 1);
@@ -169,17 +169,17 @@ void local_flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
                                idx = read_c0_index();
                                write_c0_entrylo0(0);
                                write_c0_entrylo1(0);
-                               write_c0_entryhi(UNIQUE_ENTRYHI(idx));
                                if (idx < 0)
                                        continue;
+                               write_c0_entryhi(UNIQUE_ENTRYHI(idx));
                                tlb_write_indexed();
                        }
                        write_c0_entryhi(oldpid);
                } else {
                        drop_mmu_context(mm, cpu);
                }
+               local_irq_restore(flags);
        }
-       local_irq_restore(flags);
 }
 
 void local_flush_tlb_kernel_range(unsigned long start, unsigned long end)
@@ -189,7 +189,6 @@ void local_flush_tlb_kernel_range(unsigned long start, unsigned long end)
 
        size = (end - start + (PAGE_SIZE - 1)) >> PAGE_SHIFT;
        size = (size + 1) >> 1;
-
        local_irq_save(flags);
        if (size <= (current_cpu_data.tlbsize/2)) {
                int pid = read_c0_entryhi();
@@ -207,9 +206,9 @@ void local_flush_tlb_kernel_range(unsigned long start, unsigned long end)
                        idx = read_c0_index();
                        write_c0_entrylo0(0);
                        write_c0_entrylo1(0);
-                       write_c0_entryhi(UNIQUE_ENTRYHI(idx));
                        if (idx < 0)
                                continue;
+                       write_c0_entryhi(UNIQUE_ENTRYHI(idx));
                        tlb_write_indexed();
                }
                write_c0_entryhi(pid);
@@ -221,15 +220,16 @@ void local_flush_tlb_kernel_range(unsigned long start, unsigned long end)
 
 void local_flush_tlb_page(struct vm_area_struct *vma, unsigned long page)
 {
-       unsigned long flags;
        int cpu = smp_processor_id();
 
-       local_irq_save(flags);
        if (cpu_context(cpu, vma->vm_mm) != 0) {
+               unsigned long flags;
                int oldpid, newpid, idx;
+
                newpid = cpu_asid(cpu, vma->vm_mm);
                page &= (PAGE_MASK << 1);
-               oldpid = read_c0_entryhi() & ASID_MASK;
+               local_irq_save(flags);
+               oldpid = read_c0_entryhi();
                write_c0_entryhi(page | newpid);
                tlb_probe();
                idx = read_c0_index();
@@ -240,10 +240,11 @@ void local_flush_tlb_page(struct vm_area_struct *vma, unsigned long page)
                /* Make sure all entries differ. */
                write_c0_entryhi(UNIQUE_ENTRYHI(idx));
                tlb_write_indexed();
+
        finish:
                write_c0_entryhi(oldpid);
+               local_irq_restore(flags);
        }
-       local_irq_restore(flags);
 }
 
 /*
@@ -255,18 +256,17 @@ void local_flush_tlb_one(unsigned long page)
        unsigned long flags;
        int oldpid, idx;
 
-       page &= (PAGE_MASK << 1);
-       oldpid = read_c0_entryhi() & ASID_MASK;
-
        local_irq_save(flags);
+       oldpid = read_c0_entryhi();
+       page &= (PAGE_MASK << 1);
        write_c0_entryhi(page);
        tlb_probe();
        idx = read_c0_index();
+       write_c0_entrylo0(0);
+       write_c0_entrylo1(0);
        if (idx >= 0) {
                /* Make sure all entries differ. */
                write_c0_entryhi(UNIQUE_ENTRYHI(idx));
-               write_c0_entrylo0(0);
-               write_c0_entrylo1(0);
                tlb_write_indexed();
        }
 
@@ -297,6 +297,7 @@ void __update_tlb(struct vm_area_struct *vma, unsigned long address, pte_t pte)
 {
        unsigned long flags;
        pgd_t *pgdp;
+       pud_t *pudp;
        pmd_t *pmdp;
        pte_t *ptep;
        int idx, pid;
@@ -311,19 +312,26 @@ void __update_tlb(struct vm_area_struct *vma, unsigned long address, pte_t pte)
 
        pid = read_c0_entryhi() & ASID_MASK;
        address &= (PAGE_MASK << 1);
-       write_c0_entryhi(address | (pid));
+       write_c0_entryhi(address | pid);
        pgdp = pgd_offset(vma->vm_mm, address);
        tlb_probe();
-       pmdp = pmd_offset(pgdp, address);
+       pudp = pud_offset(pgdp, address);
+       pmdp = pmd_offset(pudp, address);
        idx = read_c0_index();
        ptep = pte_offset_map(pmdp, address);
+
+#if defined(CONFIG_64BIT_PHYS_ADDR) && defined(CONFIG_CPU_MIPS32)
+       write_c0_entrylo0(ptep->pte_high);
+       ptep++;
+       write_c0_entrylo1(ptep->pte_high);
+#else
        write_c0_entrylo0(pte_val(*ptep++) >> 6);
        write_c0_entrylo1(pte_val(*ptep) >> 6);
-       if (idx < 0) {
+#endif
+       if (idx < 0)
                tlb_write_random();
-       } else {
+       else
                tlb_write_indexed();
-       }
        local_irq_restore(flags);
 }
 
@@ -336,7 +344,8 @@ void __init add_wired_entry(unsigned long entrylo0, unsigned long entrylo1,
        unsigned long old_ctx;
 
        local_irq_save(flags);
-       old_ctx = read_c0_entryhi() & 0xff;
+       /* Save old context and create impossible VPN2 value */
+       old_ctx = read_c0_entryhi();
        old_pagemask = read_c0_pagemask();
        wired = read_c0_wired();
        write_c0_wired(wired + 1);