]> err.no Git - linux-2.6/blob - include/asm-sparc64/pgalloc.h
ecea1bbdc11549e05f097b5bf75c5ceeb1af270b
[linux-2.6] / include / asm-sparc64 / pgalloc.h
1 /* $Id: pgalloc.h,v 1.30 2001/12/21 04:56:17 davem Exp $ */
2 #ifndef _SPARC64_PGALLOC_H
3 #define _SPARC64_PGALLOC_H
4
5 #include <linux/config.h>
6 #include <linux/kernel.h>
7 #include <linux/sched.h>
8 #include <linux/mm.h>
9
10 #include <asm/spitfire.h>
11 #include <asm/cpudata.h>
12 #include <asm/cacheflush.h>
13 #include <asm/page.h>
14
15 /* Page table allocation/freeing. */
16 #ifdef CONFIG_SMP
17 /* Sliiiicck */
18 #define pgt_quicklists  local_cpu_data()
19 #else
20 extern struct pgtable_cache_struct {
21         unsigned long *pgd_cache;
22         unsigned long *pte_cache;
23         unsigned int pgcache_size;
24 } pgt_quicklists;
25 #endif
26 #define pgd_quicklist           (pgt_quicklists.pgd_cache)
27 #define pte_quicklist           (pgt_quicklists.pte_cache)
28 #define pgtable_cache_size      (pgt_quicklists.pgcache_size)
29
30 static inline void free_pgd_fast(pgd_t *pgd)
31 {
32         preempt_disable();
33         *(unsigned long *)pgd = (unsigned long) pgd_quicklist;
34         pgd_quicklist = (unsigned long *) pgd;
35         pgtable_cache_size++;
36         preempt_enable();
37 }
38
39 static inline pgd_t *get_pgd_fast(void)
40 {
41         unsigned long *ret;
42
43         preempt_disable();
44         if((ret = pgd_quicklist) != NULL) {
45                 pgd_quicklist = (unsigned long *)(*ret);
46                 ret[0] = 0;
47                 pgtable_cache_size--;
48                 preempt_enable();
49         } else {
50                 preempt_enable();
51                 ret = (unsigned long *) __get_free_page(GFP_KERNEL|__GFP_REPEAT);
52                 if(ret)
53                         memset(ret, 0, PAGE_SIZE);
54         }
55         return (pgd_t *)ret;
56 }
57
58 static inline void free_pgd_slow(pgd_t *pgd)
59 {
60         free_page((unsigned long)pgd);
61 }
62
63 #define pud_populate(MM, PUD, PMD)      pud_set(PUD, PMD)
64
65 static inline pmd_t *pmd_alloc_one_fast(void)
66 {
67         unsigned long *ret;
68
69         preempt_disable();
70         ret = (unsigned long *) pte_quicklist;
71         if (likely(ret)) {
72                 pte_quicklist = (unsigned long *)(*ret);
73                 ret[0] = 0;
74                 pgtable_cache_size--;
75         }
76         preempt_enable();
77
78         return (pmd_t *) ret;
79 }
80
81 static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long address)
82 {
83         pmd_t *pmd;
84
85         pmd = pmd_alloc_one_fast();
86         if (unlikely(!pmd)) {
87                 pmd = (pmd_t *)__get_free_page(GFP_KERNEL|__GFP_REPEAT);
88                 if (pmd)
89                         memset(pmd, 0, PAGE_SIZE);
90         }
91         return pmd;
92 }
93
94 static inline void free_pmd_fast(pmd_t *pmd)
95 {
96         preempt_disable();
97         *(unsigned long *)pmd = (unsigned long) pte_quicklist;
98         pte_quicklist = (unsigned long *) pmd;
99         pgtable_cache_size++;
100         preempt_enable();
101 }
102
103 static inline void free_pmd_slow(pmd_t *pmd)
104 {
105         free_page((unsigned long)pmd);
106 }
107
108 #define pmd_populate_kernel(MM, PMD, PTE)       pmd_set(PMD, PTE)
109 #define pmd_populate(MM,PMD,PTE_PAGE)           \
110         pmd_populate_kernel(MM,PMD,page_address(PTE_PAGE))
111
112 static inline pte_t *pte_alloc_one_fast(void)
113 {
114         unsigned long *ret;
115
116         preempt_disable();
117         ret = (unsigned long *) pte_quicklist;
118         if (likely(ret)) {
119                 pte_quicklist = (unsigned long *)(*ret);
120                 ret[0] = 0;
121                 pgtable_cache_size--;
122         }
123         preempt_enable();
124
125         return (pte_t *) ret;
126 }
127
128 static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm, unsigned long address)
129 {
130         pte_t *ptep = pte_alloc_one_fast();
131
132         if (likely(ptep))
133                 return ptep;
134
135         return (pte_t *) get_zeroed_page(GFP_KERNEL|__GFP_REPEAT);
136 }
137
138 static inline struct page *pte_alloc_one(struct mm_struct *mm, unsigned long addr)
139 {
140         pte_t *pte = pte_alloc_one_fast();
141
142         if (likely(pte))
143                 return virt_to_page(pte);
144
145         return alloc_pages(GFP_KERNEL|__GFP_REPEAT|__GFP_ZERO, 0);
146 }
147
148 static inline void free_pte_fast(pte_t *pte)
149 {
150         preempt_disable();
151         *(unsigned long *)pte = (unsigned long) pte_quicklist;
152         pte_quicklist = (unsigned long *) pte;
153         pgtable_cache_size++;
154         preempt_enable();
155 }
156
157 static inline void free_pte_slow(pte_t *pte)
158 {
159         free_page((unsigned long) pte);
160 }
161
162 static inline void pte_free_kernel(pte_t *pte)
163 {
164         free_pte_fast(pte);
165 }
166
167 static inline void pte_free(struct page *ptepage)
168 {
169         free_pte_fast(page_address(ptepage));
170 }
171
172 #define pmd_free(pmd)           free_pmd_fast(pmd)
173 #define pgd_free(pgd)           free_pgd_fast(pgd)
174 #define pgd_alloc(mm)           get_pgd_fast()
175
176 #endif /* _SPARC64_PGALLOC_H */