X-Git-Url: https://err.no/cgi-bin/gitweb.cgi?a=blobdiff_plain;f=include%2Fasm-s390%2Fpgalloc.h;h=f5b2bf3d7c1d4b7a103e3a1089360be7669cb848;hb=df1efe6f871e2d3f83e6ad7b7a1d2b728b478fc2;hp=e45d3c9a4b7ee65a99f684a4e24ddbd9d0e17432;hpb=6db602d447fb6c3aeb020c5dff5219de317f8bb4;p=linux-2.6 diff --git a/include/asm-s390/pgalloc.h b/include/asm-s390/pgalloc.h index e45d3c9a4b..f5b2bf3d7c 100644 --- a/include/asm-s390/pgalloc.h +++ b/include/asm-s390/pgalloc.h @@ -19,219 +19,156 @@ #define check_pgt_cache() do {} while (0) -/* - * Page allocation orders. - */ -#ifndef __s390x__ -# define PTE_ALLOC_ORDER 0 -# define PMD_ALLOC_ORDER 0 -# define PGD_ALLOC_ORDER 1 -#else /* __s390x__ */ -# define PTE_ALLOC_ORDER 0 -# define PMD_ALLOC_ORDER 2 -# define PGD_ALLOC_ORDER 2 -#endif /* __s390x__ */ +unsigned long *crst_table_alloc(struct mm_struct *, int); +void crst_table_free(struct mm_struct *, unsigned long *); -/* - * Allocate and free page tables. The xxx_kernel() versions are - * used to allocate a kernel page table - this turns on ASN bits - * if any. - */ +unsigned long *page_table_alloc(struct mm_struct *); +void page_table_free(struct mm_struct *, unsigned long *); +void disable_noexec(struct mm_struct *, struct task_struct *); -static inline pgd_t *pgd_alloc(struct mm_struct *mm) +static inline void clear_table(unsigned long *s, unsigned long val, size_t n) { - pgd_t *pgd = (pgd_t *) __get_free_pages(GFP_KERNEL, PGD_ALLOC_ORDER); - int i; - - if (!pgd) - return NULL; - if (s390_noexec) { - pgd_t *shadow_pgd = (pgd_t *) - __get_free_pages(GFP_KERNEL, PGD_ALLOC_ORDER); - struct page *page = virt_to_page(pgd); - - if (!shadow_pgd) { - free_pages((unsigned long) pgd, PGD_ALLOC_ORDER); - return NULL; - } - page->lru.next = (void *) shadow_pgd; - } - for (i = 0; i < PTRS_PER_PGD; i++) -#ifndef __s390x__ - pmd_clear(pmd_offset(pgd + i, i*PGDIR_SIZE)); + *s = val; + n = (n / 256) - 1; + asm volatile( +#ifdef CONFIG_64BIT + " mvc 8(248,%0),0(%0)\n" #else - pgd_clear(pgd + i); + " mvc 4(252,%0),0(%0)\n" #endif - return pgd; + "0: mvc 256(256,%0),0(%0)\n" + " la %0,256(%0)\n" + " brct %1,0b\n" + : "+a" (s), "+d" (n)); } -static inline void pgd_free(pgd_t *pgd) +static inline void crst_table_init(unsigned long *crst, unsigned long entry) { - pgd_t *shadow_pgd = get_shadow_pgd(pgd); - - if (shadow_pgd) - free_pages((unsigned long) shadow_pgd, PGD_ALLOC_ORDER); - free_pages((unsigned long) pgd, PGD_ALLOC_ORDER); + clear_table(crst, entry, sizeof(unsigned long)*2048); + crst = get_shadow_table(crst); + if (crst) + clear_table(crst, entry, sizeof(unsigned long)*2048); } #ifndef __s390x__ -/* - * page middle directory allocation/free routines. - * We use pmd cache only on s390x, so these are dummy routines. This - * code never triggers because the pgd will always be present. - */ -#define pmd_alloc_one(mm,address) ({ BUG(); ((pmd_t *)2); }) -#define pmd_free(x) do { } while (0) -#define __pmd_free_tlb(tlb,x) do { } while (0) -#define pgd_populate(mm, pmd, pte) BUG() -#define pgd_populate_kernel(mm, pmd, pte) BUG() -#else /* __s390x__ */ -static inline pmd_t * pmd_alloc_one(struct mm_struct *mm, unsigned long vmaddr) + +static inline unsigned long pgd_entry_type(struct mm_struct *mm) { - pmd_t *pmd = (pmd_t *) __get_free_pages(GFP_KERNEL, PMD_ALLOC_ORDER); - int i; - - if (!pmd) - return NULL; - if (s390_noexec) { - pmd_t *shadow_pmd = (pmd_t *) - __get_free_pages(GFP_KERNEL, PMD_ALLOC_ORDER); - struct page *page = virt_to_page(pmd); - - if (!shadow_pmd) { - free_pages((unsigned long) pmd, PMD_ALLOC_ORDER); - return NULL; - } - page->lru.next = (void *) shadow_pmd; - } - for (i=0; i < PTRS_PER_PMD; i++) - pmd_clear(pmd + i); - return pmd; + return _SEGMENT_ENTRY_EMPTY; } -static inline void pmd_free (pmd_t *pmd) -{ - pmd_t *shadow_pmd = get_shadow_pmd(pmd); +#define pud_alloc_one(mm,address) ({ BUG(); ((pud_t *)2); }) +#define pud_free(mm, x) do { } while (0) - if (shadow_pmd) - free_pages((unsigned long) shadow_pmd, PMD_ALLOC_ORDER); - free_pages((unsigned long) pmd, PMD_ALLOC_ORDER); -} +#define pmd_alloc_one(mm,address) ({ BUG(); ((pmd_t *)2); }) +#define pmd_free(mm, x) do { } while (0) -#define __pmd_free_tlb(tlb,pmd) \ - do { \ - tlb_flush_mmu(tlb, 0, 0); \ - pmd_free(pmd); \ - } while (0) +#define pgd_populate(mm, pgd, pud) BUG() +#define pgd_populate_kernel(mm, pgd, pud) BUG() -static inline void -pgd_populate_kernel(struct mm_struct *mm, pgd_t *pgd, pmd_t *pmd) -{ - pgd_val(*pgd) = _PGD_ENTRY | __pa(pmd); -} +#define pud_populate(mm, pud, pmd) BUG() +#define pud_populate_kernel(mm, pud, pmd) BUG() -static inline void pgd_populate(struct mm_struct *mm, pgd_t *pgd, pmd_t *pmd) -{ - pgd_t *shadow_pgd = get_shadow_pgd(pgd); - pmd_t *shadow_pmd = get_shadow_pmd(pmd); +#else /* __s390x__ */ - if (shadow_pgd && shadow_pmd) - pgd_populate_kernel(mm, shadow_pgd, shadow_pmd); - pgd_populate_kernel(mm, pgd, pmd); +static inline unsigned long pgd_entry_type(struct mm_struct *mm) +{ + if (mm->context.asce_limit <= (1UL << 31)) + return _SEGMENT_ENTRY_EMPTY; + if (mm->context.asce_limit <= (1UL << 42)) + return _REGION3_ENTRY_EMPTY; + return _REGION2_ENTRY_EMPTY; } -#endif /* __s390x__ */ +int crst_table_upgrade(struct mm_struct *, unsigned long limit); +void crst_table_downgrade(struct mm_struct *, unsigned long limit); -static inline void -pmd_populate_kernel(struct mm_struct *mm, pmd_t *pmd, pte_t *pte) +static inline pud_t *pud_alloc_one(struct mm_struct *mm, unsigned long address) { -#ifndef __s390x__ - pmd_val(pmd[0]) = _PAGE_TABLE + __pa(pte); - pmd_val(pmd[1]) = _PAGE_TABLE + __pa(pte+256); - pmd_val(pmd[2]) = _PAGE_TABLE + __pa(pte+512); - pmd_val(pmd[3]) = _PAGE_TABLE + __pa(pte+768); -#else /* __s390x__ */ - pmd_val(*pmd) = _PMD_ENTRY + __pa(pte); - pmd_val1(*pmd) = _PMD_ENTRY + __pa(pte+256); -#endif /* __s390x__ */ + unsigned long *table = crst_table_alloc(mm, mm->context.noexec); + if (table) + crst_table_init(table, _REGION3_ENTRY_EMPTY); + return (pud_t *) table; } +#define pud_free(mm, pud) crst_table_free(mm, (unsigned long *) pud) -static inline void -pmd_populate(struct mm_struct *mm, pmd_t *pmd, struct page *page) +static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long vmaddr) { - pte_t *pte = (pte_t *)page_to_phys(page); - pmd_t *shadow_pmd = get_shadow_pmd(pmd); - pte_t *shadow_pte = get_shadow_pte(pte); + unsigned long *table = crst_table_alloc(mm, mm->context.noexec); + if (table) + crst_table_init(table, _SEGMENT_ENTRY_EMPTY); + return (pmd_t *) table; +} +#define pmd_free(mm, pmd) crst_table_free(mm, (unsigned long *) pmd) - pmd_populate_kernel(mm, pmd, pte); - if (shadow_pmd && shadow_pte) - pmd_populate_kernel(mm, shadow_pmd, shadow_pte); +static inline void pgd_populate_kernel(struct mm_struct *mm, + pgd_t *pgd, pud_t *pud) +{ + pgd_val(*pgd) = _REGION2_ENTRY | __pa(pud); } -/* - * page table entry allocation/free routines. - */ -static inline pte_t * -pte_alloc_one_kernel(struct mm_struct *mm, unsigned long vmaddr) +static inline void pgd_populate(struct mm_struct *mm, pgd_t *pgd, pud_t *pud) { - pte_t *pte = (pte_t *) __get_free_page(GFP_KERNEL|__GFP_REPEAT); - int i; - - if (!pte) - return NULL; - if (s390_noexec) { - pte_t *shadow_pte = (pte_t *) - __get_free_page(GFP_KERNEL|__GFP_REPEAT); - struct page *page = virt_to_page(pte); - - if (!shadow_pte) { - free_page((unsigned long) pte); - return NULL; - } - page->lru.next = (void *) shadow_pte; - } - for (i=0; i < PTRS_PER_PTE; i++) { - pte_clear(mm, vmaddr, pte + i); - vmaddr += PAGE_SIZE; + pgd_populate_kernel(mm, pgd, pud); + if (mm->context.noexec) { + pgd = get_shadow_table(pgd); + pud = get_shadow_table(pud); + pgd_populate_kernel(mm, pgd, pud); } - return pte; } -static inline struct page * -pte_alloc_one(struct mm_struct *mm, unsigned long vmaddr) +static inline void pud_populate_kernel(struct mm_struct *mm, + pud_t *pud, pmd_t *pmd) { - pte_t *pte = pte_alloc_one_kernel(mm, vmaddr); - if (pte) - return virt_to_page(pte); - return NULL; + pud_val(*pud) = _REGION3_ENTRY | __pa(pmd); } -static inline void pte_free_kernel(pte_t *pte) +static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd) { - pte_t *shadow_pte = get_shadow_pte(pte); + pud_populate_kernel(mm, pud, pmd); + if (mm->context.noexec) { + pud = get_shadow_table(pud); + pmd = get_shadow_table(pmd); + pud_populate_kernel(mm, pud, pmd); + } +} + +#endif /* __s390x__ */ - if (shadow_pte) - free_page((unsigned long) shadow_pte); - free_page((unsigned long) pte); +static inline pgd_t *pgd_alloc(struct mm_struct *mm) +{ + INIT_LIST_HEAD(&mm->context.crst_list); + INIT_LIST_HEAD(&mm->context.pgtable_list); + return (pgd_t *) crst_table_alloc(mm, s390_noexec); } +#define pgd_free(mm, pgd) crst_table_free(mm, (unsigned long *) pgd) -static inline void pte_free(struct page *pte) +static inline void pmd_populate_kernel(struct mm_struct *mm, + pmd_t *pmd, pte_t *pte) { - struct page *shadow_page = get_shadow_page(pte); + pmd_val(*pmd) = _SEGMENT_ENTRY + __pa(pte); +} - if (shadow_page) - __free_page(shadow_page); - __free_page(pte); +static inline void pmd_populate(struct mm_struct *mm, + pmd_t *pmd, pgtable_t pte) +{ + pmd_populate_kernel(mm, pmd, pte); + if (mm->context.noexec) { + pmd = get_shadow_table(pmd); + pmd_populate_kernel(mm, pmd, pte + PTRS_PER_PTE); + } } -#define __pte_free_tlb(tlb, pte) \ -({ \ - struct mmu_gather *__tlb = (tlb); \ - struct page *__pte = (pte); \ - struct page *shadow_page = get_shadow_page(__pte); \ - if (shadow_page) \ - tlb_remove_page(__tlb, shadow_page); \ - tlb_remove_page(__tlb, __pte); \ -}) +#define pmd_pgtable(pmd) \ + (pgtable_t)(pmd_val(pmd) & -sizeof(pte_t)*PTRS_PER_PTE) + +/* + * page table entry allocation/free routines. + */ +#define pte_alloc_one_kernel(mm, vmaddr) ((pte_t *) page_table_alloc(mm)) +#define pte_alloc_one(mm, vmaddr) ((pte_t *) page_table_alloc(mm)) + +#define pte_free_kernel(mm, pte) page_table_free(mm, (unsigned long *) pte) +#define pte_free(mm, pte) page_table_free(mm, (unsigned long *) pte) #endif /* _S390_PGALLOC_H */