#ifndef _ASM_POWERPC_PGTABLE_PPC32_H
#define _ASM_POWERPC_PGTABLE_PPC32_H
-#include <asm-generic/4level-fixup.h>
+#include <asm-generic/pgtable-nopmd.h>
#ifndef __ASSEMBLY__
#include <linux/sched.h>
#include <linux/threads.h>
-#include <asm/processor.h> /* For TASK_SIZE */
-#include <asm/mmu.h>
-#include <asm/page.h>
#include <asm/io.h> /* For sub-arch specific PPC_PIN_SIZE */
-struct mm_struct;
extern unsigned long va_to_phys(unsigned long address);
extern pte_t *va_to_pte(unsigned long address);
* level has 2048 entries and the second level has 512 64-bit PTE entries.
* -Matt
*/
-/* PMD_SHIFT determines the size of the area mapped by the PTE pages */
-#define PMD_SHIFT (PAGE_SHIFT + PTE_SHIFT)
-#define PMD_SIZE (1UL << PMD_SHIFT)
-#define PMD_MASK (~(PMD_SIZE-1))
-
/* PGDIR_SHIFT determines what a top-level page table entry can map */
-#define PGDIR_SHIFT PMD_SHIFT
+#define PGDIR_SHIFT (PAGE_SHIFT + PTE_SHIFT)
#define PGDIR_SIZE (1UL << PGDIR_SHIFT)
#define PGDIR_MASK (~(PGDIR_SIZE-1))
#define pte_ERROR(e) \
printk("%s:%d: bad pte %llx.\n", __FILE__, __LINE__, \
(unsigned long long)pte_val(e))
-#define pmd_ERROR(e) \
- printk("%s:%d: bad pmd %08lx.\n", __FILE__, __LINE__, pmd_val(e))
#define pgd_ERROR(e) \
printk("%s:%d: bad pgd %08lx.\n", __FILE__, __LINE__, pgd_val(e))
#define pfn_pte(pfn, prot) __pte(((pte_basic_t)(pfn) << PFN_SHIFT_OFFSET) |\
pgprot_val(prot))
#define mk_pte(page, prot) pfn_pte(page_to_pfn(page), prot)
-
-/*
- * ZERO_PAGE is a global shared page that is always zero: used
- * for zero-mapped memory areas etc..
- */
-extern unsigned long empty_zero_page[1024];
-#define ZERO_PAGE(vaddr) (virt_to_page(empty_zero_page))
-
#endif /* __ASSEMBLY__ */
#define pte_none(pte) ((pte_val(pte) & ~_PTE_NONE_MASK) == 0)
#define pmd_clear(pmdp) do { pmd_val(*(pmdp)) = 0; } while (0)
#ifndef __ASSEMBLY__
-/*
- * The "pgd_xxx()" functions here are trivial for a folded two-level
- * setup: the pgd is never bad, and a pmd always exists (as it's folded
- * into the pgd entry)
- */
-static inline int pgd_none(pgd_t pgd) { return 0; }
-static inline int pgd_bad(pgd_t pgd) { return 0; }
-static inline int pgd_present(pgd_t pgd) { return 1; }
-#define pgd_clear(xp) do { } while (0)
-
-#define pgd_page_vaddr(pgd) \
- ((unsigned long) __va(pgd_val(pgd) & PAGE_MASK))
-
/*
* The following only work if pte_present() is true.
* Undefined behaviour if not..
*/
-static inline int pte_read(pte_t pte) { return pte_val(pte) & _PAGE_USER; }
static inline int pte_write(pte_t pte) { return pte_val(pte) & _PAGE_RW; }
-static inline int pte_exec(pte_t pte) { return pte_val(pte) & _PAGE_EXEC; }
static inline int pte_dirty(pte_t pte) { return pte_val(pte) & _PAGE_DIRTY; }
static inline int pte_young(pte_t pte) { return pte_val(pte) & _PAGE_ACCESSED; }
static inline int pte_file(pte_t pte) { return pte_val(pte) & _PAGE_FILE; }
static inline void pte_uncache(pte_t pte) { pte_val(pte) |= _PAGE_NO_CACHE; }
static inline void pte_cache(pte_t pte) { pte_val(pte) &= ~_PAGE_NO_CACHE; }
-static inline pte_t pte_rdprotect(pte_t pte) {
- pte_val(pte) &= ~_PAGE_USER; return pte; }
static inline pte_t pte_wrprotect(pte_t pte) {
pte_val(pte) &= ~(_PAGE_RW | _PAGE_HWWRITE); return pte; }
-static inline pte_t pte_exprotect(pte_t pte) {
- pte_val(pte) &= ~_PAGE_EXEC; return pte; }
static inline pte_t pte_mkclean(pte_t pte) {
pte_val(pte) &= ~(_PAGE_DIRTY | _PAGE_HWWRITE); return pte; }
static inline pte_t pte_mkold(pte_t pte) {
pte_val(pte) &= ~_PAGE_ACCESSED; return pte; }
-static inline pte_t pte_mkread(pte_t pte) {
- pte_val(pte) |= _PAGE_USER; return pte; }
-static inline pte_t pte_mkexec(pte_t pte) {
- pte_val(pte) |= _PAGE_USER | _PAGE_EXEC; return pte; }
static inline pte_t pte_mkwrite(pte_t pte) {
pte_val(pte) |= _PAGE_RW; return pte; }
static inline pte_t pte_mkdirty(pte_t pte) {
#define ptep_test_and_clear_young(__vma, __addr, __ptep) \
__ptep_test_and_clear_young((__vma)->vm_mm->context.id, __addr, __ptep)
-#define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_DIRTY
-static inline int ptep_test_and_clear_dirty(struct vm_area_struct *vma,
- unsigned long addr, pte_t *ptep)
-{
- return (pte_update(ptep, (_PAGE_DIRTY | _PAGE_HWWRITE), 0) & _PAGE_DIRTY) != 0;
-}
-
#define __HAVE_ARCH_PTEP_GET_AND_CLEAR
static inline pte_t ptep_get_and_clear(struct mm_struct *mm, unsigned long addr,
pte_t *ptep)
}
#define ptep_set_access_flags(__vma, __address, __ptep, __entry, __dirty) \
- do { \
- __ptep_set_access_flags(__ptep, __entry, __dirty); \
- flush_tlb_page_nohash(__vma, __address); \
- } while(0)
+({ \
+ int __changed = !pte_same(*(__ptep), __entry); \
+ if (__changed) { \
+ __ptep_set_access_flags(__ptep, __entry, __dirty); \
+ flush_tlb_page_nohash(__vma, __address); \
+ } \
+ __changed; \
+})
/*
* Macro to mark a page protection value as "uncacheable".
#define pgd_index(address) ((address) >> PGDIR_SHIFT)
#define pgd_offset(mm, address) ((mm)->pgd + pgd_index(address))
-/* Find an entry in the second-level page table.. */
-static inline pmd_t * pmd_offset(pgd_t * dir, unsigned long address)
-{
- return (pmd_t *) dir;
-}
-
/* Find an entry in the third-level page table.. */
#define pte_index(address) \
(((address) >> PAGE_SHIFT) & (PTRS_PER_PTE - 1))
#define pte_unmap(pte) kunmap_atomic(pte, KM_PTE0)
#define pte_unmap_nested(pte) kunmap_atomic(pte, KM_PTE1)
-extern pgd_t swapper_pg_dir[PTRS_PER_PGD];
-
-extern void paging_init(void);
-
/*
* Encode and decode a swap entry.
* Note that the bits we use in a PTE for representing a swap entry
#define pte_to_pgoff(pte) (pte_val(pte) >> 3)
#define pgoff_to_pte(off) ((pte_t) { ((off) << 3) | _PAGE_FILE })
-/* CONFIG_APUS */
-/* For virtual address to physical address conversion */
-extern void cache_clear(__u32 addr, int length);
-extern void cache_push(__u32 addr, int length);
-extern int mm_end_of_chunk (unsigned long addr, int len);
-extern unsigned long iopa(unsigned long addr);
-extern unsigned long mm_ptov(unsigned long addr) __attribute_const__;
-
-/* Values for nocacheflag and cmode */
-/* These are not used by the APUS kernel_map, but prevents
- compilation errors. */
-#define KERNELMAP_FULL_CACHING 0
-#define KERNELMAP_NOCACHE_SER 1
-#define KERNELMAP_NOCACHE_NONSER 2
-#define KERNELMAP_NO_COPYBACK 3
-
-/*
- * Map some physical address range into the kernel address space.
- */
-extern unsigned long kernel_map(unsigned long paddr, unsigned long size,
- int nocacheflag, unsigned long *memavailp );
-
-/*
- * Set cache mode of (kernel space) address range.
- */
-extern void kernel_set_cachemode (unsigned long address, unsigned long size,
- unsigned int cmode);
-
-/* Needs to be defined here and not in linux/mm.h, as it is arch dependent */
-#define kern_addr_valid(addr) (1)
-
-#ifdef CONFIG_PHYS_64BIT
-extern int remap_pfn_range(struct vm_area_struct *vma, unsigned long from,
- unsigned long paddr, unsigned long size, pgprot_t prot);
-
-static inline int io_remap_pfn_range(struct vm_area_struct *vma,
- unsigned long vaddr,
- unsigned long pfn,
- unsigned long size,
- pgprot_t prot)
-{
- phys_addr_t paddr64 = fixup_bigphys_addr(pfn << PAGE_SHIFT, size);
- return remap_pfn_range(vma, vaddr, paddr64 >> PAGE_SHIFT, size, prot);
-}
-#else
-#define io_remap_pfn_range(vma, vaddr, pfn, size, prot) \
- remap_pfn_range(vma, vaddr, pfn, size, prot)
-#endif
-
/*
* No page table caches to initialise
*/