]> err.no Git - linux-2.6/blob - arch/ia64/mm/tlb.c
Merge branch 'release' of git://git.kernel.org/pub/scm/linux/kernel/git/aegl/linux-2.6
[linux-2.6] / arch / ia64 / mm / tlb.c
1 /*
2  * TLB support routines.
3  *
4  * Copyright (C) 1998-2001, 2003 Hewlett-Packard Co
5  *      David Mosberger-Tang <davidm@hpl.hp.com>
6  *
7  * 08/02/00 A. Mallick <asit.k.mallick@intel.com>
8  *              Modified RID allocation for SMP
9  *          Goutham Rao <goutham.rao@intel.com>
10  *              IPI based ptc implementation and A-step IPI implementation.
11  * Rohit Seth <rohit.seth@intel.com>
12  * Ken Chen <kenneth.w.chen@intel.com>
13  * Christophe de Dinechin <ddd@hp.com>: Avoid ptc.e on memory allocation
14  * Copyright (C) 2007 Intel Corp
15  *      Fenghua Yu <fenghua.yu@intel.com>
16  *      Add multiple ptc.g/ptc.ga instruction support in global tlb purge.
17  */
18 #include <linux/module.h>
19 #include <linux/init.h>
20 #include <linux/kernel.h>
21 #include <linux/sched.h>
22 #include <linux/smp.h>
23 #include <linux/mm.h>
24 #include <linux/bootmem.h>
25
26 #include <asm/delay.h>
27 #include <asm/mmu_context.h>
28 #include <asm/pgalloc.h>
29 #include <asm/pal.h>
30 #include <asm/tlbflush.h>
31 #include <asm/dma.h>
32 #include <asm/processor.h>
33 #include <asm/sal.h>
34 #include <asm/tlb.h>
35
36 static struct {
37         unsigned long mask;     /* mask of supported purge page-sizes */
38         unsigned long max_bits; /* log2 of largest supported purge page-size */
39 } purge;
40
41 struct ia64_ctx ia64_ctx = {
42         .lock = __SPIN_LOCK_UNLOCKED(ia64_ctx.lock),
43         .next = 1,
44         .max_ctx = ~0U
45 };
46
47 DEFINE_PER_CPU(u8, ia64_need_tlb_flush);
48 DEFINE_PER_CPU(u8, ia64_tr_num);  /*Number of TR slots in current processor*/
49 DEFINE_PER_CPU(u8, ia64_tr_used); /*Max Slot number used by kernel*/
50
51 struct ia64_tr_entry __per_cpu_idtrs[NR_CPUS][2][IA64_TR_ALLOC_MAX];
52
53 /*
54  * Initializes the ia64_ctx.bitmap array based on max_ctx+1.
55  * Called after cpu_init() has setup ia64_ctx.max_ctx based on
56  * maximum RID that is supported by boot CPU.
57  */
58 void __init
59 mmu_context_init (void)
60 {
61         ia64_ctx.bitmap = alloc_bootmem((ia64_ctx.max_ctx+1)>>3);
62         ia64_ctx.flushmap = alloc_bootmem((ia64_ctx.max_ctx+1)>>3);
63 }
64
65 /*
66  * Acquire the ia64_ctx.lock before calling this function!
67  */
68 void
69 wrap_mmu_context (struct mm_struct *mm)
70 {
71         int i, cpu;
72         unsigned long flush_bit;
73
74         for (i=0; i <= ia64_ctx.max_ctx / BITS_PER_LONG; i++) {
75                 flush_bit = xchg(&ia64_ctx.flushmap[i], 0);
76                 ia64_ctx.bitmap[i] ^= flush_bit;
77         }
78  
79         /* use offset at 300 to skip daemons */
80         ia64_ctx.next = find_next_zero_bit(ia64_ctx.bitmap,
81                                 ia64_ctx.max_ctx, 300);
82         ia64_ctx.limit = find_next_bit(ia64_ctx.bitmap,
83                                 ia64_ctx.max_ctx, ia64_ctx.next);
84
85         /*
86          * can't call flush_tlb_all() here because of race condition
87          * with O(1) scheduler [EF]
88          */
89         cpu = get_cpu(); /* prevent preemption/migration */
90         for_each_online_cpu(i)
91                 if (i != cpu)
92                         per_cpu(ia64_need_tlb_flush, i) = 1;
93         put_cpu();
94         local_flush_tlb_all();
95 }
96
97 /*
98  * Implement "spinaphores" ... like counting semaphores, but they
99  * spin instead of sleeping.  If there are ever any other users for
100  * this primitive it can be moved up to a spinaphore.h header.
101  */
102 struct spinaphore {
103         atomic_t        cur;
104 };
105
106 static inline void spinaphore_init(struct spinaphore *ss, int val)
107 {
108         atomic_set(&ss->cur, val);
109 }
110
111 static inline void down_spin(struct spinaphore *ss)
112 {
113         while (unlikely(!atomic_add_unless(&ss->cur, -1, 0)))
114                 while (atomic_read(&ss->cur) == 0)
115                         cpu_relax();
116 }
117
118 static inline void up_spin(struct spinaphore *ss)
119 {
120         atomic_add(1, &ss->cur);
121 }
122
123 static struct spinaphore ptcg_sem;
124 static u16 nptcg = 1;
125 static int need_ptcg_sem = 1;
126 static int toolatetochangeptcgsem = 0;
127
128 /*
129  * Kernel parameter "nptcg=" overrides max number of concurrent global TLB
130  * purges which is reported from either PAL or SAL PALO.
131  *
132  * We don't have sanity checking for nptcg value. It's the user's responsibility
133  * for valid nptcg value on the platform. Otherwise, kernel may hang in some
134  * cases.
135  */
136 static int __init
137 set_nptcg(char *str)
138 {
139         int value = 0;
140
141         get_option(&str, &value);
142         setup_ptcg_sem(value, NPTCG_FROM_KERNEL_PARAMETER);
143
144         return 1;
145 }
146
147 __setup("nptcg=", set_nptcg);
148
149 /*
150  * Maximum number of simultaneous ptc.g purges in the system can
151  * be defined by PAL_VM_SUMMARY (in which case we should take
152  * the smallest value for any cpu in the system) or by the PAL
153  * override table (in which case we should ignore the value from
154  * PAL_VM_SUMMARY).
155  *
156  * Kernel parameter "nptcg=" overrides maximum number of simultanesous ptc.g
157  * purges defined in either PAL_VM_SUMMARY or PAL override table. In this case,
158  * we should ignore the value from either PAL_VM_SUMMARY or PAL override table.
159  *
160  * Complicating the logic here is the fact that num_possible_cpus()
161  * isn't fully setup until we start bringing cpus online.
162  */
163 void
164 setup_ptcg_sem(int max_purges, int nptcg_from)
165 {
166         static int kp_override;
167         static int palo_override;
168         static int firstcpu = 1;
169
170         if (toolatetochangeptcgsem) {
171                 BUG_ON(max_purges < nptcg);
172                 return;
173         }
174
175         if (nptcg_from == NPTCG_FROM_KERNEL_PARAMETER) {
176                 kp_override = 1;
177                 nptcg = max_purges;
178                 goto resetsema;
179         }
180         if (kp_override) {
181                 need_ptcg_sem = num_possible_cpus() > nptcg;
182                 return;
183         }
184
185         if (nptcg_from == NPTCG_FROM_PALO) {
186                 palo_override = 1;
187
188                 /* In PALO max_purges == 0 really means it! */
189                 if (max_purges == 0)
190                         panic("Whoa! Platform does not support global TLB purges.\n");
191                 nptcg = max_purges;
192                 if (nptcg == PALO_MAX_TLB_PURGES) {
193                         need_ptcg_sem = 0;
194                         return;
195                 }
196                 goto resetsema;
197         }
198         if (palo_override) {
199                 if (nptcg != PALO_MAX_TLB_PURGES)
200                         need_ptcg_sem = (num_possible_cpus() > nptcg);
201                 return;
202         }
203
204         /* In PAL_VM_SUMMARY max_purges == 0 actually means 1 */
205         if (max_purges == 0) max_purges = 1;
206
207         if (firstcpu) {
208                 nptcg = max_purges;
209                 firstcpu = 0;
210         }
211         if (max_purges < nptcg)
212                 nptcg = max_purges;
213         if (nptcg == PAL_MAX_PURGES) {
214                 need_ptcg_sem = 0;
215                 return;
216         } else
217                 need_ptcg_sem = (num_possible_cpus() > nptcg);
218
219 resetsema:
220         spinaphore_init(&ptcg_sem, max_purges);
221 }
222
223 void
224 ia64_global_tlb_purge (struct mm_struct *mm, unsigned long start,
225                        unsigned long end, unsigned long nbits)
226 {
227         struct mm_struct *active_mm = current->active_mm;
228
229         toolatetochangeptcgsem = 1;
230
231         if (mm != active_mm) {
232                 /* Restore region IDs for mm */
233                 if (mm && active_mm) {
234                         activate_context(mm);
235                 } else {
236                         flush_tlb_all();
237                         return;
238                 }
239         }
240
241         if (need_ptcg_sem)
242                 down_spin(&ptcg_sem);
243
244         do {
245                 /*
246                  * Flush ALAT entries also.
247                  */
248                 ia64_ptcga(start, (nbits << 2));
249                 ia64_srlz_i();
250                 start += (1UL << nbits);
251         } while (start < end);
252
253         if (need_ptcg_sem)
254                 up_spin(&ptcg_sem);
255
256         if (mm != active_mm) {
257                 activate_context(active_mm);
258         }
259 }
260
261 void
262 local_flush_tlb_all (void)
263 {
264         unsigned long i, j, flags, count0, count1, stride0, stride1, addr;
265
266         addr    = local_cpu_data->ptce_base;
267         count0  = local_cpu_data->ptce_count[0];
268         count1  = local_cpu_data->ptce_count[1];
269         stride0 = local_cpu_data->ptce_stride[0];
270         stride1 = local_cpu_data->ptce_stride[1];
271
272         local_irq_save(flags);
273         for (i = 0; i < count0; ++i) {
274                 for (j = 0; j < count1; ++j) {
275                         ia64_ptce(addr);
276                         addr += stride1;
277                 }
278                 addr += stride0;
279         }
280         local_irq_restore(flags);
281         ia64_srlz_i();                  /* srlz.i implies srlz.d */
282 }
283
284 void
285 flush_tlb_range (struct vm_area_struct *vma, unsigned long start,
286                  unsigned long end)
287 {
288         struct mm_struct *mm = vma->vm_mm;
289         unsigned long size = end - start;
290         unsigned long nbits;
291
292 #ifndef CONFIG_SMP
293         if (mm != current->active_mm) {
294                 mm->context = 0;
295                 return;
296         }
297 #endif
298
299         nbits = ia64_fls(size + 0xfff);
300         while (unlikely (((1UL << nbits) & purge.mask) == 0) &&
301                         (nbits < purge.max_bits))
302                 ++nbits;
303         if (nbits > purge.max_bits)
304                 nbits = purge.max_bits;
305         start &= ~((1UL << nbits) - 1);
306
307         preempt_disable();
308 #ifdef CONFIG_SMP
309         if (mm != current->active_mm || cpus_weight(mm->cpu_vm_mask) != 1) {
310                 platform_global_tlb_purge(mm, start, end, nbits);
311                 preempt_enable();
312                 return;
313         }
314 #endif
315         do {
316                 ia64_ptcl(start, (nbits<<2));
317                 start += (1UL << nbits);
318         } while (start < end);
319         preempt_enable();
320         ia64_srlz_i();                  /* srlz.i implies srlz.d */
321 }
322 EXPORT_SYMBOL(flush_tlb_range);
323
324 void __devinit
325 ia64_tlb_init (void)
326 {
327         ia64_ptce_info_t uninitialized_var(ptce_info); /* GCC be quiet */
328         unsigned long tr_pgbits;
329         long status;
330         pal_vm_info_1_u_t vm_info_1;
331         pal_vm_info_2_u_t vm_info_2;
332         int cpu = smp_processor_id();
333
334         if ((status = ia64_pal_vm_page_size(&tr_pgbits, &purge.mask)) != 0) {
335                 printk(KERN_ERR "PAL_VM_PAGE_SIZE failed with status=%ld; "
336                        "defaulting to architected purge page-sizes.\n", status);
337                 purge.mask = 0x115557000UL;
338         }
339         purge.max_bits = ia64_fls(purge.mask);
340
341         ia64_get_ptce(&ptce_info);
342         local_cpu_data->ptce_base = ptce_info.base;
343         local_cpu_data->ptce_count[0] = ptce_info.count[0];
344         local_cpu_data->ptce_count[1] = ptce_info.count[1];
345         local_cpu_data->ptce_stride[0] = ptce_info.stride[0];
346         local_cpu_data->ptce_stride[1] = ptce_info.stride[1];
347
348         local_flush_tlb_all();  /* nuke left overs from bootstrapping... */
349         status = ia64_pal_vm_summary(&vm_info_1, &vm_info_2);
350
351         if (status) {
352                 printk(KERN_ERR "ia64_pal_vm_summary=%ld\n", status);
353                 per_cpu(ia64_tr_num, cpu) = 8;
354                 return;
355         }
356         per_cpu(ia64_tr_num, cpu) = vm_info_1.pal_vm_info_1_s.max_itr_entry+1;
357         if (per_cpu(ia64_tr_num, cpu) >
358                                 (vm_info_1.pal_vm_info_1_s.max_dtr_entry+1))
359                 per_cpu(ia64_tr_num, cpu) =
360                                 vm_info_1.pal_vm_info_1_s.max_dtr_entry+1;
361         if (per_cpu(ia64_tr_num, cpu) > IA64_TR_ALLOC_MAX) {
362                 per_cpu(ia64_tr_num, cpu) = IA64_TR_ALLOC_MAX;
363                 printk(KERN_DEBUG "TR register number exceeds IA64_TR_ALLOC_MAX!"
364                         "IA64_TR_ALLOC_MAX should be extended\n");
365         }
366 }
367
368 /*
369  * is_tr_overlap
370  *
371  * Check overlap with inserted TRs.
372  */
373 static int is_tr_overlap(struct ia64_tr_entry *p, u64 va, u64 log_size)
374 {
375         u64 tr_log_size;
376         u64 tr_end;
377         u64 va_rr = ia64_get_rr(va);
378         u64 va_rid = RR_TO_RID(va_rr);
379         u64 va_end = va + (1<<log_size) - 1;
380
381         if (va_rid != RR_TO_RID(p->rr))
382                 return 0;
383         tr_log_size = (p->itir & 0xff) >> 2;
384         tr_end = p->ifa + (1<<tr_log_size) - 1;
385
386         if (va > tr_end || p->ifa > va_end)
387                 return 0;
388         return 1;
389
390 }
391
392 /*
393  * ia64_insert_tr in virtual mode. Allocate a TR slot
394  *
395  * target_mask : 0x1 : itr, 0x2 : dtr, 0x3 : idtr
396  *
397  * va   : virtual address.
398  * pte  : pte entries inserted.
399  * log_size: range to be covered.
400  *
401  * Return value:  <0 :  error No.
402  *
403  *                >=0 : slot number allocated for TR.
404  * Must be called with preemption disabled.
405  */
406 int ia64_itr_entry(u64 target_mask, u64 va, u64 pte, u64 log_size)
407 {
408         int i, r;
409         unsigned long psr;
410         struct ia64_tr_entry *p;
411         int cpu = smp_processor_id();
412
413         r = -EINVAL;
414         /*Check overlap with existing TR entries*/
415         if (target_mask & 0x1) {
416                 p = &__per_cpu_idtrs[cpu][0][0];
417                 for (i = IA64_TR_ALLOC_BASE; i <= per_cpu(ia64_tr_used, cpu);
418                                                                 i++, p++) {
419                         if (p->pte & 0x1)
420                                 if (is_tr_overlap(p, va, log_size)) {
421                                         printk(KERN_DEBUG "Overlapped Entry"
422                                                 "Inserted for TR Reigster!!\n");
423                                         goto out;
424                         }
425                 }
426         }
427         if (target_mask & 0x2) {
428                 p = &__per_cpu_idtrs[cpu][1][0];
429                 for (i = IA64_TR_ALLOC_BASE; i <= per_cpu(ia64_tr_used, cpu);
430                                                                 i++, p++) {
431                         if (p->pte & 0x1)
432                                 if (is_tr_overlap(p, va, log_size)) {
433                                         printk(KERN_DEBUG "Overlapped Entry"
434                                                 "Inserted for TR Reigster!!\n");
435                                         goto out;
436                                 }
437                 }
438         }
439
440         for (i = IA64_TR_ALLOC_BASE; i < per_cpu(ia64_tr_num, cpu); i++) {
441                 switch (target_mask & 0x3) {
442                 case 1:
443                         if (!(__per_cpu_idtrs[cpu][0][i].pte & 0x1))
444                                 goto found;
445                         continue;
446                 case 2:
447                         if (!(__per_cpu_idtrs[cpu][1][i].pte & 0x1))
448                                 goto found;
449                         continue;
450                 case 3:
451                         if (!(__per_cpu_idtrs[cpu][0][i].pte & 0x1) &&
452                                 !(__per_cpu_idtrs[cpu][1][i].pte & 0x1))
453                                 goto found;
454                         continue;
455                 default:
456                         r = -EINVAL;
457                         goto out;
458                 }
459         }
460 found:
461         if (i >= per_cpu(ia64_tr_num, cpu))
462                 return -EBUSY;
463
464         /*Record tr info for mca hander use!*/
465         if (i > per_cpu(ia64_tr_used, cpu))
466                 per_cpu(ia64_tr_used, cpu) = i;
467
468         psr = ia64_clear_ic();
469         if (target_mask & 0x1) {
470                 ia64_itr(0x1, i, va, pte, log_size);
471                 ia64_srlz_i();
472                 p = &__per_cpu_idtrs[cpu][0][i];
473                 p->ifa = va;
474                 p->pte = pte;
475                 p->itir = log_size << 2;
476                 p->rr = ia64_get_rr(va);
477         }
478         if (target_mask & 0x2) {
479                 ia64_itr(0x2, i, va, pte, log_size);
480                 ia64_srlz_i();
481                 p = &__per_cpu_idtrs[cpu][1][i];
482                 p->ifa = va;
483                 p->pte = pte;
484                 p->itir = log_size << 2;
485                 p->rr = ia64_get_rr(va);
486         }
487         ia64_set_psr(psr);
488         r = i;
489 out:
490         return r;
491 }
492 EXPORT_SYMBOL_GPL(ia64_itr_entry);
493
494 /*
495  * ia64_purge_tr
496  *
497  * target_mask: 0x1: purge itr, 0x2 : purge dtr, 0x3 purge idtr.
498  * slot: slot number to be freed.
499  *
500  * Must be called with preemption disabled.
501  */
502 void ia64_ptr_entry(u64 target_mask, int slot)
503 {
504         int cpu = smp_processor_id();
505         int i;
506         struct ia64_tr_entry *p;
507
508         if (slot < IA64_TR_ALLOC_BASE || slot >= per_cpu(ia64_tr_num, cpu))
509                 return;
510
511         if (target_mask & 0x1) {
512                 p = &__per_cpu_idtrs[cpu][0][slot];
513                 if ((p->pte&0x1) && is_tr_overlap(p, p->ifa, p->itir>>2)) {
514                         p->pte = 0;
515                         ia64_ptr(0x1, p->ifa, p->itir>>2);
516                         ia64_srlz_i();
517                 }
518         }
519
520         if (target_mask & 0x2) {
521                 p = &__per_cpu_idtrs[cpu][1][slot];
522                 if ((p->pte & 0x1) && is_tr_overlap(p, p->ifa, p->itir>>2)) {
523                         p->pte = 0;
524                         ia64_ptr(0x2, p->ifa, p->itir>>2);
525                         ia64_srlz_i();
526                 }
527         }
528
529         for (i = per_cpu(ia64_tr_used, cpu); i >= IA64_TR_ALLOC_BASE; i--) {
530                 if ((__per_cpu_idtrs[cpu][0][i].pte & 0x1) ||
531                                 (__per_cpu_idtrs[cpu][1][i].pte & 0x1))
532                         break;
533         }
534         per_cpu(ia64_tr_used, cpu) = i;
535 }
536 EXPORT_SYMBOL_GPL(ia64_ptr_entry);