]> err.no Git - linux-2.6/commitdiff
[POWERPC] ppc405 Fix arithmatic rollover bug when memory size under 16M
authorGrant Likely <grant.likely@secretlab.ca>
Wed, 31 Oct 2007 06:41:20 +0000 (17:41 +1100)
committerJosh Boyer <jwboyer@linux.vnet.ibm.com>
Thu, 1 Nov 2007 12:15:59 +0000 (07:15 -0500)
mmu_mapin_ram() loops over total_lowmem to setup page tables.  However, if
total_lowmem is less that 16M, the subtraction rolls over and results in
a number just under 4G (because total_lowmem is an unsigned value).

This patch rejigs the loop from countup to countdown to eliminate the
bug.

Special thanks to Magnus Hjorth who wrote the original patch to fix this
bug.  This patch improves on his by making the loop code simpler (which
also eliminates the possibility of another rollover at the high end)
and also applies the change to arch/powerpc.

Signed-off-by: Grant Likely <grant.likely@secretlab.ca>
Signed-off-by: Josh Boyer <jwboyer@linux.vnet.ibm.com>
arch/powerpc/mm/40x_mmu.c
arch/ppc/mm/4xx_mmu.c

index e067df836be20954b319b7c2efe7aa6731f4b762..3899ea97fbdff935b23869fbe8c8db9c6de78283 100644 (file)
@@ -98,13 +98,12 @@ unsigned long __init mmu_mapin_ram(void)
 
        v = KERNELBASE;
        p = PPC_MEMSTART;
-       s = 0;
+       s = total_lowmem;
 
-       if (__map_without_ltlbs) {
-               return s;
-       }
+       if (__map_without_ltlbs)
+               return 0;
 
-       while (s <= (total_lowmem - LARGE_PAGE_SIZE_16M)) {
+       while (s >= LARGE_PAGE_SIZE_16M) {
                pmd_t *pmdp;
                unsigned long val = p | _PMD_SIZE_16M | _PAGE_HWEXEC | _PAGE_HWWRITE;
 
@@ -116,10 +115,10 @@ unsigned long __init mmu_mapin_ram(void)
 
                v += LARGE_PAGE_SIZE_16M;
                p += LARGE_PAGE_SIZE_16M;
-               s += LARGE_PAGE_SIZE_16M;
+               s -= LARGE_PAGE_SIZE_16M;
        }
 
-       while (s <= (total_lowmem - LARGE_PAGE_SIZE_4M)) {
+       while (s >= LARGE_PAGE_SIZE_4M) {
                pmd_t *pmdp;
                unsigned long val = p | _PMD_SIZE_4M | _PAGE_HWEXEC | _PAGE_HWWRITE;
 
@@ -128,8 +127,8 @@ unsigned long __init mmu_mapin_ram(void)
 
                v += LARGE_PAGE_SIZE_4M;
                p += LARGE_PAGE_SIZE_4M;
-               s += LARGE_PAGE_SIZE_4M;
+               s -= LARGE_PAGE_SIZE_4M;
        }
 
-       return s;
+       return total_lowmem - s;
 }
index 838e09db71d97f56ca3721917dd2fd0ada843042..ea785dbaac7cec8441925edd6e0be9688406759d 100644 (file)
@@ -99,13 +99,12 @@ unsigned long __init mmu_mapin_ram(void)
 
        v = KERNELBASE;
        p = PPC_MEMSTART;
-       s = 0;
+       s = total_lowmem;
 
-       if (__map_without_ltlbs) {
-               return s;
-       }
+       if (__map_without_ltlbs)
+               return 0;
 
-       while (s <= (total_lowmem - LARGE_PAGE_SIZE_16M)) {
+       while (s >= LARGE_PAGE_SIZE_16M) {
                pmd_t *pmdp;
                unsigned long val = p | _PMD_SIZE_16M | _PAGE_HWEXEC | _PAGE_HWWRITE;
 
@@ -117,10 +116,10 @@ unsigned long __init mmu_mapin_ram(void)
 
                v += LARGE_PAGE_SIZE_16M;
                p += LARGE_PAGE_SIZE_16M;
-               s += LARGE_PAGE_SIZE_16M;
+               s -= LARGE_PAGE_SIZE_16M;
        }
 
-       while (s <= (total_lowmem - LARGE_PAGE_SIZE_4M)) {
+       while (s >= LARGE_PAGE_SIZE_4M) {
                pmd_t *pmdp;
                unsigned long val = p | _PMD_SIZE_4M | _PAGE_HWEXEC | _PAGE_HWWRITE;
 
@@ -129,8 +128,8 @@ unsigned long __init mmu_mapin_ram(void)
 
                v += LARGE_PAGE_SIZE_4M;
                p += LARGE_PAGE_SIZE_4M;
-               s += LARGE_PAGE_SIZE_4M;
+               s -= LARGE_PAGE_SIZE_4M;
        }
 
-       return s;
+       return total_lowmem - s;
 }