X-Git-Url: https://err.no/cgi-bin/gitweb.cgi?a=blobdiff_plain;f=arch%2Fpowerpc%2Fmm%2Fpgtable_64.c;h=365e61ae5dbcf54f7f4789d2cfed5bb7072201e3;hb=88bef5a4074e0568cf54df410f41065c06694d8a;hp=ad6e135bf212ac1b77c87ac1ed8cd8b704b93e28;hpb=cab8e5c4444cb7d9b8035de5d81fbfd5284a02fa;p=linux-2.6 diff --git a/arch/powerpc/mm/pgtable_64.c b/arch/powerpc/mm/pgtable_64.c index ad6e135bf2..365e61ae5d 100644 --- a/arch/powerpc/mm/pgtable_64.c +++ b/arch/powerpc/mm/pgtable_64.c @@ -7,7 +7,6 @@ * Modifications by Paul Mackerras (PowerMac) (paulus@samba.org) * and Cort Dougan (PReP) (cort@cs.nmt.edu) * Copyright (C) 1996 Paul Mackerras - * Amiga/APUS changes by Jesper Skov (jskov@cygnus.co.uk). * * Derived from "arch/i386/mm/init.c" * Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds @@ -34,41 +33,27 @@ #include #include #include -#include -#include -#include -#include -#include -#include #include #include #include -#include -#include #include #include #include #include -#include #include #include #include -#include #include -#include #include #include #include -#include #include -#include #include #include "mmu_decl.h" -unsigned long ioremap_bot = IMALLOC_BASE; -static unsigned long phbs_io_bot = PHBS_IO_BASE; +unsigned long ioremap_bot = IOREMAP_BASE; /* * map_io_page currently only called by __ioremap @@ -103,7 +88,7 @@ static int map_io_page(unsigned long ea, unsigned long pa, int flags) * */ if (htab_bolt_mapping(ea, ea + PAGE_SIZE, pa, flags, - mmu_io_psize)) { + mmu_io_psize, mmu_kernel_ssize)) { printk(KERN_ERR "Failed to do bolted mapping IO " "memory at %016lx !\n", pa); return -ENOMEM; @@ -113,26 +98,56 @@ static int map_io_page(unsigned long ea, unsigned long pa, int flags) } -static void __iomem * __ioremap_com(phys_addr_t addr, unsigned long pa, - unsigned long ea, unsigned long size, +/** + * __ioremap_at - Low level function to establish the page tables + * for an IO mapping + */ +void __iomem * __ioremap_at(phys_addr_t pa, void *ea, unsigned long size, unsigned long flags) { unsigned long i; + /* Make sure we have the base flags */ if ((flags & _PAGE_PRESENT) == 0) flags |= pgprot_val(PAGE_KERNEL); + /* Non-cacheable page cannot be coherent */ + if (flags & _PAGE_NO_CACHE) + flags &= ~_PAGE_COHERENT; + + /* We don't support the 4K PFN hack with ioremap */ + if (flags & _PAGE_4K_PFN) + return NULL; + + WARN_ON(pa & ~PAGE_MASK); + WARN_ON(((unsigned long)ea) & ~PAGE_MASK); + WARN_ON(size & ~PAGE_MASK); + for (i = 0; i < size; i += PAGE_SIZE) - if (map_io_page(ea+i, pa+i, flags)) + if (map_io_page((unsigned long)ea+i, pa+i, flags)) return NULL; - return (void __iomem *) (ea + (addr & ~PAGE_MASK)); + return (void __iomem *)ea; +} + +/** + * __iounmap_from - Low level function to tear down the page tables + * for an IO mapping. This is used for mappings that + * are manipulated manually, like partial unmapping of + * PCI IOs or ISA space. + */ +void __iounmap_at(void *ea, unsigned long size) +{ + WARN_ON(((unsigned long)ea) & ~PAGE_MASK); + WARN_ON(size & ~PAGE_MASK); + + unmap_kernel_range((unsigned long)ea, size); } void __iomem * __ioremap(phys_addr_t addr, unsigned long size, unsigned long flags) { - unsigned long pa, ea; + phys_addr_t paligned; void __iomem *ret; /* @@ -144,27 +159,30 @@ void __iomem * __ioremap(phys_addr_t addr, unsigned long size, * IMALLOC_END * */ - pa = addr & PAGE_MASK; - size = PAGE_ALIGN(addr + size) - pa; + paligned = addr & PAGE_MASK; + size = PAGE_ALIGN(addr + size) - paligned; - if ((size == 0) || (pa == 0)) + if ((size == 0) || (paligned == 0)) return NULL; if (mem_init_done) { struct vm_struct *area; - area = im_get_free_area(size); + + area = __get_vm_area(size, VM_IOREMAP, + ioremap_bot, IOREMAP_END); if (area == NULL) return NULL; - ea = (unsigned long)(area->addr); - ret = __ioremap_com(addr, pa, ea, size, flags); + ret = __ioremap_at(paligned, area->addr, size, flags); if (!ret) - im_free(area->addr); + vunmap(area->addr); } else { - ea = ioremap_bot; - ret = __ioremap_com(addr, pa, ea, size, flags); + ret = __ioremap_at(paligned, (void *)ioremap_bot, size, flags); if (ret) ioremap_bot += size; } + + if (ret) + ret += addr & ~PAGE_MASK; return ret; } @@ -181,68 +199,22 @@ void __iomem * ioremap(phys_addr_t addr, unsigned long size) void __iomem * ioremap_flags(phys_addr_t addr, unsigned long size, unsigned long flags) { + /* writeable implies dirty for kernel addresses */ + if (flags & _PAGE_RW) + flags |= _PAGE_DIRTY; + + /* we don't want to let _PAGE_USER and _PAGE_EXEC leak out */ + flags &= ~(_PAGE_USER | _PAGE_EXEC); + if (ppc_md.ioremap) return ppc_md.ioremap(addr, size, flags); return __ioremap(addr, size, flags); } -#define IS_PAGE_ALIGNED(_val) ((_val) == ((_val) & PAGE_MASK)) - -int __ioremap_explicit(phys_addr_t pa, unsigned long ea, - unsigned long size, unsigned long flags) -{ - struct vm_struct *area; - void __iomem *ret; - - /* For now, require page-aligned values for pa, ea, and size */ - if (!IS_PAGE_ALIGNED(pa) || !IS_PAGE_ALIGNED(ea) || - !IS_PAGE_ALIGNED(size)) { - printk(KERN_ERR "unaligned value in %s\n", __FUNCTION__); - return 1; - } - - if (!mem_init_done) { - /* Two things to consider in this case: - * 1) No records will be kept (imalloc, etc) that the region - * has been remapped - * 2) It won't be easy to iounmap() the region later (because - * of 1) - */ - ; - } else { - area = im_get_area(ea, size, - IM_REGION_UNUSED|IM_REGION_SUBSET|IM_REGION_EXISTS); - if (area == NULL) { - /* Expected when PHB-dlpar is in play */ - return 1; - } - if (ea != (unsigned long) area->addr) { - printk(KERN_ERR "unexpected addr return from " - "im_get_area\n"); - return 1; - } - } - - ret = __ioremap_com(pa, pa, ea, size, flags); - if (ret == NULL) { - printk(KERN_ERR "ioremap_explicit() allocation failure !\n"); - return 1; - } - if (ret != (void *) ea) { - printk(KERN_ERR "__ioremap_com() returned unexpected addr\n"); - return 1; - } - - return 0; -} - /* * Unmap an IO region and remove it from imalloc'd list. * Access to IO memory should be serialized by driver. - * This code is modeled after vmalloc code - unmap_vm_area() - * - * XXX what about calls before mem_init_done (ie python_countermeasures()) */ void __iounmap(volatile void __iomem *token) { @@ -251,9 +223,14 @@ void __iounmap(volatile void __iomem *token) if (!mem_init_done) return; - addr = (void *) ((unsigned long __force) token & PAGE_MASK); - - im_free(addr); + addr = (void *) ((unsigned long __force) + PCI_FIX_ADDR(token) & PAGE_MASK); + if ((unsigned long)addr < ioremap_bot) { + printk(KERN_WARNING "Attempt to iounmap early bolted mapping" + " at 0x%p\n", addr); + return; + } + vunmap(addr); } void iounmap(volatile void __iomem *token) @@ -264,77 +241,10 @@ void iounmap(volatile void __iomem *token) __iounmap(token); } -static int iounmap_subset_regions(unsigned long addr, unsigned long size) -{ - struct vm_struct *area; - - /* Check whether subsets of this region exist */ - area = im_get_area(addr, size, IM_REGION_SUPERSET); - if (area == NULL) - return 1; - - while (area) { - iounmap((void __iomem *) area->addr); - area = im_get_area(addr, size, - IM_REGION_SUPERSET); - } - - return 0; -} - -int __iounmap_explicit(volatile void __iomem *start, unsigned long size) -{ - struct vm_struct *area; - unsigned long addr; - int rc; - - addr = (unsigned long __force) start & PAGE_MASK; - - /* Verify that the region either exists or is a subset of an existing - * region. In the latter case, split the parent region to create - * the exact region - */ - area = im_get_area(addr, size, - IM_REGION_EXISTS | IM_REGION_SUBSET); - if (area == NULL) { - /* Determine whether subset regions exist. If so, unmap */ - rc = iounmap_subset_regions(addr, size); - if (rc) { - printk(KERN_ERR - "%s() cannot unmap nonexistent range 0x%lx\n", - __FUNCTION__, addr); - return 1; - } - } else { - iounmap((void __iomem *) area->addr); - } - /* - * FIXME! This can't be right: - iounmap(area->addr); - * Maybe it should be "iounmap(area);" - */ - return 0; -} - EXPORT_SYMBOL(ioremap); EXPORT_SYMBOL(ioremap_flags); EXPORT_SYMBOL(__ioremap); +EXPORT_SYMBOL(__ioremap_at); EXPORT_SYMBOL(iounmap); EXPORT_SYMBOL(__iounmap); - -static DEFINE_SPINLOCK(phb_io_lock); - -void __iomem * reserve_phb_iospace(unsigned long size) -{ - void __iomem *virt_addr; - - if (phbs_io_bot >= IMALLOC_BASE) - panic("reserve_phb_iospace(): phb io space overflow\n"); - - spin_lock(&phb_io_lock); - virt_addr = (void __iomem *) phbs_io_bot; - phbs_io_bot += size; - spin_unlock(&phb_io_lock); - - return virt_addr; -} +EXPORT_SYMBOL(__iounmap_at);