]> err.no Git - linux-2.6/blob - arch/sparc64/kernel/irq.c
[SPARC64]: Kill ugly __irq_ino() macro.
[linux-2.6] / arch / sparc64 / kernel / irq.c
1 /* irq.c: UltraSparc IRQ handling/init/registry.
2  *
3  * Copyright (C) 1997, 2007  David S. Miller  (davem@davemloft.net)
4  * Copyright (C) 1998  Eddie C. Dost    (ecd@skynet.be)
5  * Copyright (C) 1998  Jakub Jelinek    (jj@ultra.linux.cz)
6  */
7
8 #include <linux/module.h>
9 #include <linux/sched.h>
10 #include <linux/ptrace.h>
11 #include <linux/errno.h>
12 #include <linux/kernel_stat.h>
13 #include <linux/signal.h>
14 #include <linux/mm.h>
15 #include <linux/interrupt.h>
16 #include <linux/slab.h>
17 #include <linux/random.h>
18 #include <linux/init.h>
19 #include <linux/delay.h>
20 #include <linux/proc_fs.h>
21 #include <linux/seq_file.h>
22 #include <linux/bootmem.h>
23 #include <linux/irq.h>
24
25 #include <asm/ptrace.h>
26 #include <asm/processor.h>
27 #include <asm/atomic.h>
28 #include <asm/system.h>
29 #include <asm/irq.h>
30 #include <asm/io.h>
31 #include <asm/sbus.h>
32 #include <asm/iommu.h>
33 #include <asm/upa.h>
34 #include <asm/oplib.h>
35 #include <asm/prom.h>
36 #include <asm/timer.h>
37 #include <asm/smp.h>
38 #include <asm/starfire.h>
39 #include <asm/uaccess.h>
40 #include <asm/cache.h>
41 #include <asm/cpudata.h>
42 #include <asm/auxio.h>
43 #include <asm/head.h>
44 #include <asm/hypervisor.h>
45 #include <asm/cacheflush.h>
46
47 /* UPA nodes send interrupt packet to UltraSparc with first data reg
48  * value low 5 (7 on Starfire) bits holding the IRQ identifier being
49  * delivered.  We must translate this into a non-vector IRQ so we can
50  * set the softint on this cpu.
51  *
52  * To make processing these packets efficient and race free we use
53  * an array of irq buckets below.  The interrupt vector handler in
54  * entry.S feeds incoming packets into per-cpu pil-indexed lists.
55  *
56  * If you make changes to ino_bucket, please update hand coded assembler
57  * of the vectored interrupt trap handler(s) in entry.S and sun4v_ivec.S
58  */
59 struct ino_bucket {
60 /*0x00*/unsigned long __irq_chain_pa;
61
62         /* Virtual interrupt number assigned to this INO.  */
63 /*0x08*/unsigned int __virt_irq;
64 /*0x0c*/unsigned int __pad;
65 };
66
67 #define NUM_IVECS       (IMAP_INR + 1)
68 struct ino_bucket *ivector_table;
69 unsigned long ivector_table_pa;
70
71 /* On several sun4u processors, it is illegal to mix bypass and
72  * non-bypass accesses.  Therefore we access all INO buckets
73  * using bypass accesses only.
74  */
75 static unsigned long bucket_get_chain_pa(unsigned long bucket_pa)
76 {
77         unsigned long ret;
78
79         __asm__ __volatile__("ldxa      [%1] %2, %0"
80                              : "=&r" (ret)
81                              : "r" (bucket_pa +
82                                     offsetof(struct ino_bucket,
83                                              __irq_chain_pa)),
84                                "i" (ASI_PHYS_USE_EC));
85
86         return ret;
87 }
88
89 static void bucket_clear_chain_pa(unsigned long bucket_pa)
90 {
91         __asm__ __volatile__("stxa      %%g0, [%0] %1"
92                              : /* no outputs */
93                              : "r" (bucket_pa +
94                                     offsetof(struct ino_bucket,
95                                              __irq_chain_pa)),
96                                "i" (ASI_PHYS_USE_EC));
97 }
98
99 static unsigned int bucket_get_virt_irq(unsigned long bucket_pa)
100 {
101         unsigned int ret;
102
103         __asm__ __volatile__("lduwa     [%1] %2, %0"
104                              : "=&r" (ret)
105                              : "r" (bucket_pa +
106                                     offsetof(struct ino_bucket,
107                                              __virt_irq)),
108                                "i" (ASI_PHYS_USE_EC));
109
110         return ret;
111 }
112
113 static void bucket_set_virt_irq(unsigned long bucket_pa,
114                                 unsigned int virt_irq)
115 {
116         __asm__ __volatile__("stwa      %0, [%1] %2"
117                              : /* no outputs */
118                              : "r" (virt_irq),
119                                "r" (bucket_pa +
120                                     offsetof(struct ino_bucket,
121                                              __virt_irq)),
122                                "i" (ASI_PHYS_USE_EC));
123 }
124
125 #define __bucket(irq) ((struct ino_bucket *)(irq))
126 #define __irq(bucket) ((unsigned long)(bucket))
127
128 #define irq_work_pa(__cpu)      &(trap_block[(__cpu)].irq_worklist_pa)
129
130 static struct {
131         unsigned long irq;
132         unsigned int dev_handle;
133         unsigned int dev_ino;
134 } virt_to_real_irq_table[NR_IRQS];
135 static DEFINE_SPINLOCK(virt_irq_alloc_lock);
136
137 unsigned char virt_irq_alloc(unsigned long real_irq,
138                              unsigned int dev_handle,
139                              unsigned int dev_ino)
140 {
141         unsigned long flags;
142         unsigned char ent;
143
144         BUILD_BUG_ON(NR_IRQS >= 256);
145
146         spin_lock_irqsave(&virt_irq_alloc_lock, flags);
147
148         for (ent = 1; ent < NR_IRQS; ent++) {
149                 if (!virt_to_real_irq_table[ent].irq)
150                         break;
151         }
152         if (ent >= NR_IRQS) {
153                 printk(KERN_ERR "IRQ: Out of virtual IRQs.\n");
154                 ent = 0;
155         } else {
156                 virt_to_real_irq_table[ent].irq = real_irq;
157                 virt_to_real_irq_table[ent].dev_handle = dev_handle;
158                 virt_to_real_irq_table[ent].dev_ino = dev_ino;
159         }
160
161         spin_unlock_irqrestore(&virt_irq_alloc_lock, flags);
162
163         return ent;
164 }
165
166 #ifdef CONFIG_PCI_MSI
167 void virt_irq_free(unsigned int virt_irq)
168 {
169         unsigned long flags;
170
171         if (virt_irq >= NR_IRQS)
172                 return;
173
174         spin_lock_irqsave(&virt_irq_alloc_lock, flags);
175
176         virt_to_real_irq_table[virt_irq].irq = 0;
177
178         spin_unlock_irqrestore(&virt_irq_alloc_lock, flags);
179 }
180 #endif
181
182 static unsigned long virt_to_real_irq(unsigned char virt_irq)
183 {
184         return virt_to_real_irq_table[virt_irq].irq;
185 }
186
187 /*
188  * /proc/interrupts printing:
189  */
190
191 int show_interrupts(struct seq_file *p, void *v)
192 {
193         int i = *(loff_t *) v, j;
194         struct irqaction * action;
195         unsigned long flags;
196
197         if (i == 0) {
198                 seq_printf(p, "           ");
199                 for_each_online_cpu(j)
200                         seq_printf(p, "CPU%d       ",j);
201                 seq_putc(p, '\n');
202         }
203
204         if (i < NR_IRQS) {
205                 spin_lock_irqsave(&irq_desc[i].lock, flags);
206                 action = irq_desc[i].action;
207                 if (!action)
208                         goto skip;
209                 seq_printf(p, "%3d: ",i);
210 #ifndef CONFIG_SMP
211                 seq_printf(p, "%10u ", kstat_irqs(i));
212 #else
213                 for_each_online_cpu(j)
214                         seq_printf(p, "%10u ", kstat_cpu(j).irqs[i]);
215 #endif
216                 seq_printf(p, " %9s", irq_desc[i].chip->typename);
217                 seq_printf(p, "  %s", action->name);
218
219                 for (action=action->next; action; action = action->next)
220                         seq_printf(p, ", %s", action->name);
221
222                 seq_putc(p, '\n');
223 skip:
224                 spin_unlock_irqrestore(&irq_desc[i].lock, flags);
225         }
226         return 0;
227 }
228
229 static unsigned int sun4u_compute_tid(unsigned long imap, unsigned long cpuid)
230 {
231         unsigned int tid;
232
233         if (this_is_starfire) {
234                 tid = starfire_translate(imap, cpuid);
235                 tid <<= IMAP_TID_SHIFT;
236                 tid &= IMAP_TID_UPA;
237         } else {
238                 if (tlb_type == cheetah || tlb_type == cheetah_plus) {
239                         unsigned long ver;
240
241                         __asm__ ("rdpr %%ver, %0" : "=r" (ver));
242                         if ((ver >> 32UL) == __JALAPENO_ID ||
243                             (ver >> 32UL) == __SERRANO_ID) {
244                                 tid = cpuid << IMAP_TID_SHIFT;
245                                 tid &= IMAP_TID_JBUS;
246                         } else {
247                                 unsigned int a = cpuid & 0x1f;
248                                 unsigned int n = (cpuid >> 5) & 0x1f;
249
250                                 tid = ((a << IMAP_AID_SHIFT) |
251                                        (n << IMAP_NID_SHIFT));
252                                 tid &= (IMAP_AID_SAFARI |
253                                         IMAP_NID_SAFARI);;
254                         }
255                 } else {
256                         tid = cpuid << IMAP_TID_SHIFT;
257                         tid &= IMAP_TID_UPA;
258                 }
259         }
260
261         return tid;
262 }
263
264 struct irq_handler_data {
265         unsigned long   iclr;
266         unsigned long   imap;
267
268         void            (*pre_handler)(unsigned int, void *, void *);
269         void            *pre_handler_arg1;
270         void            *pre_handler_arg2;
271 };
272
273 static inline struct ino_bucket *virt_irq_to_bucket(unsigned int virt_irq)
274 {
275         unsigned long real_irq = virt_to_real_irq(virt_irq);
276         struct ino_bucket *bucket = NULL;
277
278         if (likely(real_irq))
279                 bucket = __bucket(real_irq);
280
281         return bucket;
282 }
283
284 #ifdef CONFIG_SMP
285 static int irq_choose_cpu(unsigned int virt_irq)
286 {
287         cpumask_t mask = irq_desc[virt_irq].affinity;
288         int cpuid;
289
290         if (cpus_equal(mask, CPU_MASK_ALL)) {
291                 static int irq_rover;
292                 static DEFINE_SPINLOCK(irq_rover_lock);
293                 unsigned long flags;
294
295                 /* Round-robin distribution... */
296         do_round_robin:
297                 spin_lock_irqsave(&irq_rover_lock, flags);
298
299                 while (!cpu_online(irq_rover)) {
300                         if (++irq_rover >= NR_CPUS)
301                                 irq_rover = 0;
302                 }
303                 cpuid = irq_rover;
304                 do {
305                         if (++irq_rover >= NR_CPUS)
306                                 irq_rover = 0;
307                 } while (!cpu_online(irq_rover));
308
309                 spin_unlock_irqrestore(&irq_rover_lock, flags);
310         } else {
311                 cpumask_t tmp;
312
313                 cpus_and(tmp, cpu_online_map, mask);
314
315                 if (cpus_empty(tmp))
316                         goto do_round_robin;
317
318                 cpuid = first_cpu(tmp);
319         }
320
321         return cpuid;
322 }
323 #else
324 static int irq_choose_cpu(unsigned int virt_irq)
325 {
326         return real_hard_smp_processor_id();
327 }
328 #endif
329
330 static void sun4u_irq_enable(unsigned int virt_irq)
331 {
332         struct irq_handler_data *data = get_irq_chip_data(virt_irq);
333
334         if (likely(data)) {
335                 unsigned long cpuid, imap, val;
336                 unsigned int tid;
337
338                 cpuid = irq_choose_cpu(virt_irq);
339                 imap = data->imap;
340
341                 tid = sun4u_compute_tid(imap, cpuid);
342
343                 val = upa_readq(imap);
344                 val &= ~(IMAP_TID_UPA | IMAP_TID_JBUS |
345                          IMAP_AID_SAFARI | IMAP_NID_SAFARI);
346                 val |= tid | IMAP_VALID;
347                 upa_writeq(val, imap);
348         }
349 }
350
351 static void sun4u_set_affinity(unsigned int virt_irq, cpumask_t mask)
352 {
353         sun4u_irq_enable(virt_irq);
354 }
355
356 static void sun4u_irq_disable(unsigned int virt_irq)
357 {
358         struct irq_handler_data *data = get_irq_chip_data(virt_irq);
359
360         if (likely(data)) {
361                 unsigned long imap = data->imap;
362                 unsigned long tmp = upa_readq(imap);
363
364                 tmp &= ~IMAP_VALID;
365                 upa_writeq(tmp, imap);
366         }
367 }
368
369 static void sun4u_irq_end(unsigned int virt_irq)
370 {
371         struct irq_handler_data *data = get_irq_chip_data(virt_irq);
372         struct irq_desc *desc = irq_desc + virt_irq;
373
374         if (unlikely(desc->status & (IRQ_DISABLED|IRQ_INPROGRESS)))
375                 return;
376
377         if (likely(data))
378                 upa_writeq(ICLR_IDLE, data->iclr);
379 }
380
381 static void sun4v_irq_enable(unsigned int virt_irq)
382 {
383         struct ino_bucket *bucket = virt_irq_to_bucket(virt_irq);
384         unsigned int ino = bucket - &ivector_table[0];
385
386         if (likely(bucket)) {
387                 unsigned long cpuid;
388                 int err;
389
390                 cpuid = irq_choose_cpu(virt_irq);
391
392                 err = sun4v_intr_settarget(ino, cpuid);
393                 if (err != HV_EOK)
394                         printk(KERN_ERR "sun4v_intr_settarget(%x,%lu): "
395                                "err(%d)\n", ino, cpuid, err);
396                 err = sun4v_intr_setstate(ino, HV_INTR_STATE_IDLE);
397                 if (err != HV_EOK)
398                         printk(KERN_ERR "sun4v_intr_setstate(%x): "
399                                "err(%d)\n", ino, err);
400                 err = sun4v_intr_setenabled(ino, HV_INTR_ENABLED);
401                 if (err != HV_EOK)
402                         printk(KERN_ERR "sun4v_intr_setenabled(%x): err(%d)\n",
403                                ino, err);
404         }
405 }
406
407 static void sun4v_set_affinity(unsigned int virt_irq, cpumask_t mask)
408 {
409         struct ino_bucket *bucket = virt_irq_to_bucket(virt_irq);
410         unsigned int ino = bucket - &ivector_table[0];
411
412         if (likely(bucket)) {
413                 unsigned long cpuid;
414                 int err;
415
416                 cpuid = irq_choose_cpu(virt_irq);
417
418                 err = sun4v_intr_settarget(ino, cpuid);
419                 if (err != HV_EOK)
420                         printk(KERN_ERR "sun4v_intr_settarget(%x,%lu): "
421                                "err(%d)\n", ino, cpuid, err);
422         }
423 }
424
425 static void sun4v_irq_disable(unsigned int virt_irq)
426 {
427         struct ino_bucket *bucket = virt_irq_to_bucket(virt_irq);
428         unsigned int ino = bucket - &ivector_table[0];
429
430         if (likely(bucket)) {
431                 int err;
432
433                 err = sun4v_intr_setenabled(ino, HV_INTR_DISABLED);
434                 if (err != HV_EOK)
435                         printk(KERN_ERR "sun4v_intr_setenabled(%x): "
436                                "err(%d)\n", ino, err);
437         }
438 }
439
440 static void sun4v_irq_end(unsigned int virt_irq)
441 {
442         struct ino_bucket *bucket = virt_irq_to_bucket(virt_irq);
443         unsigned int ino = bucket - &ivector_table[0];
444         struct irq_desc *desc = irq_desc + virt_irq;
445
446         if (unlikely(desc->status & (IRQ_DISABLED|IRQ_INPROGRESS)))
447                 return;
448
449         if (likely(bucket)) {
450                 int err;
451
452                 err = sun4v_intr_setstate(ino, HV_INTR_STATE_IDLE);
453                 if (err != HV_EOK)
454                         printk(KERN_ERR "sun4v_intr_setstate(%x): "
455                                "err(%d)\n", ino, err);
456         }
457 }
458
459 static void sun4v_virq_enable(unsigned int virt_irq)
460 {
461         struct ino_bucket *bucket = virt_irq_to_bucket(virt_irq);
462
463         if (likely(bucket)) {
464                 unsigned long cpuid, dev_handle, dev_ino;
465                 int err;
466
467                 cpuid = irq_choose_cpu(virt_irq);
468
469                 dev_handle = virt_to_real_irq_table[virt_irq].dev_handle;
470                 dev_ino = virt_to_real_irq_table[virt_irq].dev_ino;
471
472                 err = sun4v_vintr_set_target(dev_handle, dev_ino, cpuid);
473                 if (err != HV_EOK)
474                         printk(KERN_ERR "sun4v_vintr_set_target(%lx,%lx,%lu): "
475                                "err(%d)\n",
476                                dev_handle, dev_ino, cpuid, err);
477                 err = sun4v_vintr_set_state(dev_handle, dev_ino,
478                                             HV_INTR_STATE_IDLE);
479                 if (err != HV_EOK)
480                         printk(KERN_ERR "sun4v_vintr_set_state(%lx,%lx,"
481                                 "HV_INTR_STATE_IDLE): err(%d)\n",
482                                dev_handle, dev_ino, err);
483                 err = sun4v_vintr_set_valid(dev_handle, dev_ino,
484                                             HV_INTR_ENABLED);
485                 if (err != HV_EOK)
486                         printk(KERN_ERR "sun4v_vintr_set_state(%lx,%lx,"
487                                "HV_INTR_ENABLED): err(%d)\n",
488                                dev_handle, dev_ino, err);
489         }
490 }
491
492 static void sun4v_virt_set_affinity(unsigned int virt_irq, cpumask_t mask)
493 {
494         struct ino_bucket *bucket = virt_irq_to_bucket(virt_irq);
495
496         if (likely(bucket)) {
497                 unsigned long cpuid, dev_handle, dev_ino;
498                 int err;
499
500                 cpuid = irq_choose_cpu(virt_irq);
501
502                 dev_handle = virt_to_real_irq_table[virt_irq].dev_handle;
503                 dev_ino = virt_to_real_irq_table[virt_irq].dev_ino;
504
505                 err = sun4v_vintr_set_target(dev_handle, dev_ino, cpuid);
506                 if (err != HV_EOK)
507                         printk(KERN_ERR "sun4v_vintr_set_target(%lx,%lx,%lu): "
508                                "err(%d)\n",
509                                dev_handle, dev_ino, cpuid, err);
510         }
511 }
512
513 static void sun4v_virq_disable(unsigned int virt_irq)
514 {
515         struct ino_bucket *bucket = virt_irq_to_bucket(virt_irq);
516
517         if (likely(bucket)) {
518                 unsigned long dev_handle, dev_ino;
519                 int err;
520
521                 dev_handle = virt_to_real_irq_table[virt_irq].dev_handle;
522                 dev_ino = virt_to_real_irq_table[virt_irq].dev_ino;
523
524                 err = sun4v_vintr_set_valid(dev_handle, dev_ino,
525                                             HV_INTR_DISABLED);
526                 if (err != HV_EOK)
527                         printk(KERN_ERR "sun4v_vintr_set_state(%lx,%lx,"
528                                "HV_INTR_DISABLED): err(%d)\n",
529                                dev_handle, dev_ino, err);
530         }
531 }
532
533 static void sun4v_virq_end(unsigned int virt_irq)
534 {
535         struct ino_bucket *bucket = virt_irq_to_bucket(virt_irq);
536         struct irq_desc *desc = irq_desc + virt_irq;
537
538         if (unlikely(desc->status & (IRQ_DISABLED|IRQ_INPROGRESS)))
539                 return;
540
541         if (likely(bucket)) {
542                 unsigned long dev_handle, dev_ino;
543                 int err;
544
545                 dev_handle = virt_to_real_irq_table[virt_irq].dev_handle;
546                 dev_ino = virt_to_real_irq_table[virt_irq].dev_ino;
547
548                 err = sun4v_vintr_set_state(dev_handle, dev_ino,
549                                             HV_INTR_STATE_IDLE);
550                 if (err != HV_EOK)
551                         printk(KERN_ERR "sun4v_vintr_set_state(%lx,%lx,"
552                                 "HV_INTR_STATE_IDLE): err(%d)\n",
553                                dev_handle, dev_ino, err);
554         }
555 }
556
557 static void run_pre_handler(unsigned int virt_irq)
558 {
559         struct irq_handler_data *data = get_irq_chip_data(virt_irq);
560         unsigned int ino;
561
562         ino = virt_to_real_irq_table[virt_irq].dev_ino;
563         if (likely(data->pre_handler)) {
564                 data->pre_handler(ino,
565                                   data->pre_handler_arg1,
566                                   data->pre_handler_arg2);
567         }
568 }
569
570 static struct irq_chip sun4u_irq = {
571         .typename       = "sun4u",
572         .enable         = sun4u_irq_enable,
573         .disable        = sun4u_irq_disable,
574         .end            = sun4u_irq_end,
575         .set_affinity   = sun4u_set_affinity,
576 };
577
578 static struct irq_chip sun4u_irq_ack = {
579         .typename       = "sun4u+ack",
580         .enable         = sun4u_irq_enable,
581         .disable        = sun4u_irq_disable,
582         .ack            = run_pre_handler,
583         .end            = sun4u_irq_end,
584         .set_affinity   = sun4u_set_affinity,
585 };
586
587 static struct irq_chip sun4v_irq = {
588         .typename       = "sun4v",
589         .enable         = sun4v_irq_enable,
590         .disable        = sun4v_irq_disable,
591         .end            = sun4v_irq_end,
592         .set_affinity   = sun4v_set_affinity,
593 };
594
595 static struct irq_chip sun4v_virq = {
596         .typename       = "vsun4v",
597         .enable         = sun4v_virq_enable,
598         .disable        = sun4v_virq_disable,
599         .end            = sun4v_virq_end,
600         .set_affinity   = sun4v_virt_set_affinity,
601 };
602
603 void irq_install_pre_handler(int virt_irq,
604                              void (*func)(unsigned int, void *, void *),
605                              void *arg1, void *arg2)
606 {
607         struct irq_handler_data *data = get_irq_chip_data(virt_irq);
608         struct irq_chip *chip = get_irq_chip(virt_irq);
609
610         if (WARN_ON(chip == &sun4v_irq || chip == &sun4v_virq)) {
611                 printk(KERN_ERR "IRQ: Trying to install pre-handler on "
612                        "sun4v irq %u\n", virt_irq);
613                 return;
614         }
615
616         data->pre_handler = func;
617         data->pre_handler_arg1 = arg1;
618         data->pre_handler_arg2 = arg2;
619
620         if (chip == &sun4u_irq_ack)
621                 return;
622
623         set_irq_chip(virt_irq, &sun4u_irq_ack);
624 }
625
626 unsigned int build_irq(int inofixup, unsigned long iclr, unsigned long imap)
627 {
628         struct ino_bucket *bucket;
629         struct irq_handler_data *data;
630         unsigned int virt_irq;
631         int ino;
632
633         BUG_ON(tlb_type == hypervisor);
634
635         ino = (upa_readq(imap) & (IMAP_IGN | IMAP_INO)) + inofixup;
636         bucket = &ivector_table[ino];
637         virt_irq = bucket_get_virt_irq(__pa(bucket));
638         if (!virt_irq) {
639                 virt_irq = virt_irq_alloc(__irq(bucket), 0, ino);
640                 bucket_set_virt_irq(__pa(bucket), virt_irq);
641                 set_irq_chip(virt_irq, &sun4u_irq);
642         }
643
644         data = get_irq_chip_data(virt_irq);
645         if (unlikely(data))
646                 goto out;
647
648         data = kzalloc(sizeof(struct irq_handler_data), GFP_ATOMIC);
649         if (unlikely(!data)) {
650                 prom_printf("IRQ: kzalloc(irq_handler_data) failed.\n");
651                 prom_halt();
652         }
653         set_irq_chip_data(virt_irq, data);
654
655         data->imap  = imap;
656         data->iclr  = iclr;
657
658 out:
659         return virt_irq;
660 }
661
662 static unsigned int sun4v_build_common(unsigned long sysino,
663                                        struct irq_chip *chip)
664 {
665         struct ino_bucket *bucket;
666         struct irq_handler_data *data;
667         unsigned int virt_irq;
668
669         BUG_ON(tlb_type != hypervisor);
670
671         bucket = &ivector_table[sysino];
672         virt_irq = bucket_get_virt_irq(__pa(bucket));
673         if (!virt_irq) {
674                 virt_irq = virt_irq_alloc(__irq(bucket), 0, sysino);
675                 bucket_set_virt_irq(__pa(bucket), virt_irq);
676                 set_irq_chip(virt_irq, chip);
677         }
678
679         data = get_irq_chip_data(virt_irq);
680         if (unlikely(data))
681                 goto out;
682
683         data = kzalloc(sizeof(struct irq_handler_data), GFP_ATOMIC);
684         if (unlikely(!data)) {
685                 prom_printf("IRQ: kzalloc(irq_handler_data) failed.\n");
686                 prom_halt();
687         }
688         set_irq_chip_data(virt_irq, data);
689
690         /* Catch accidental accesses to these things.  IMAP/ICLR handling
691          * is done by hypervisor calls on sun4v platforms, not by direct
692          * register accesses.
693          */
694         data->imap = ~0UL;
695         data->iclr = ~0UL;
696
697 out:
698         return virt_irq;
699 }
700
701 unsigned int sun4v_build_irq(u32 devhandle, unsigned int devino)
702 {
703         unsigned long sysino = sun4v_devino_to_sysino(devhandle, devino);
704
705         return sun4v_build_common(sysino, &sun4v_irq);
706 }
707
708 unsigned int sun4v_build_virq(u32 devhandle, unsigned int devino)
709 {
710         struct irq_handler_data *data;
711         struct ino_bucket *bucket;
712         unsigned long hv_err, cookie;
713         unsigned int virt_irq;
714
715         bucket = kzalloc(sizeof(struct ino_bucket), GFP_ATOMIC);
716         if (unlikely(!bucket))
717                 return 0;
718         __flush_dcache_range((unsigned long) bucket,
719                              ((unsigned long) bucket +
720                               sizeof(struct ino_bucket)));
721
722         virt_irq = virt_irq_alloc(__irq(bucket), devhandle, devino);
723         bucket_set_virt_irq(__pa(bucket), virt_irq);
724         set_irq_chip(virt_irq, &sun4v_virq);
725
726         data = kzalloc(sizeof(struct irq_handler_data), GFP_ATOMIC);
727         if (unlikely(!data))
728                 return 0;
729
730         set_irq_chip_data(virt_irq, data);
731
732         /* Catch accidental accesses to these things.  IMAP/ICLR handling
733          * is done by hypervisor calls on sun4v platforms, not by direct
734          * register accesses.
735          */
736         data->imap = ~0UL;
737         data->iclr = ~0UL;
738
739         cookie = ~__pa(bucket);
740         hv_err = sun4v_vintr_set_cookie(devhandle, devino, cookie);
741         if (hv_err) {
742                 prom_printf("IRQ: Fatal, cannot set cookie for [%x:%x] "
743                             "err=%lu\n", devhandle, devino, hv_err);
744                 prom_halt();
745         }
746
747         return virt_irq;
748 }
749
750 void ack_bad_irq(unsigned int virt_irq)
751 {
752         struct ino_bucket *bucket = virt_irq_to_bucket(virt_irq);
753         unsigned int ino = 0xdeadbeef;
754
755         if (bucket)
756                 ino = bucket - &ivector_table[0];
757
758         printk(KERN_CRIT "Unexpected IRQ from ino[%x] virt_irq[%u]\n",
759                ino, virt_irq);
760 }
761
762 void handler_irq(int irq, struct pt_regs *regs)
763 {
764         unsigned long pstate, bucket_pa;
765         struct pt_regs *old_regs;
766
767         clear_softint(1 << irq);
768
769         old_regs = set_irq_regs(regs);
770         irq_enter();
771
772         /* Grab an atomic snapshot of the pending IVECs.  */
773         __asm__ __volatile__("rdpr      %%pstate, %0\n\t"
774                              "wrpr      %0, %3, %%pstate\n\t"
775                              "ldx       [%2], %1\n\t"
776                              "stx       %%g0, [%2]\n\t"
777                              "wrpr      %0, 0x0, %%pstate\n\t"
778                              : "=&r" (pstate), "=&r" (bucket_pa)
779                              : "r" (irq_work_pa(smp_processor_id())),
780                                "i" (PSTATE_IE)
781                              : "memory");
782
783         while (bucket_pa) {
784                 unsigned long next_pa;
785                 unsigned int virt_irq;
786
787                 next_pa = bucket_get_chain_pa(bucket_pa);
788                 virt_irq = bucket_get_virt_irq(bucket_pa);
789                 bucket_clear_chain_pa(bucket_pa);
790
791                 __do_IRQ(virt_irq);
792
793                 bucket_pa = next_pa;
794         }
795
796         irq_exit();
797         set_irq_regs(old_regs);
798 }
799
800 #ifdef CONFIG_HOTPLUG_CPU
801 void fixup_irqs(void)
802 {
803         unsigned int irq;
804
805         for (irq = 0; irq < NR_IRQS; irq++) {
806                 unsigned long flags;
807
808                 spin_lock_irqsave(&irq_desc[irq].lock, flags);
809                 if (irq_desc[irq].action &&
810                     !(irq_desc[irq].status & IRQ_PER_CPU)) {
811                         if (irq_desc[irq].chip->set_affinity)
812                                 irq_desc[irq].chip->set_affinity(irq,
813                                         irq_desc[irq].affinity);
814                 }
815                 spin_unlock_irqrestore(&irq_desc[irq].lock, flags);
816         }
817 }
818 #endif
819
820 struct sun5_timer {
821         u64     count0;
822         u64     limit0;
823         u64     count1;
824         u64     limit1;
825 };
826
827 static struct sun5_timer *prom_timers;
828 static u64 prom_limit0, prom_limit1;
829
830 static void map_prom_timers(void)
831 {
832         struct device_node *dp;
833         const unsigned int *addr;
834
835         /* PROM timer node hangs out in the top level of device siblings... */
836         dp = of_find_node_by_path("/");
837         dp = dp->child;
838         while (dp) {
839                 if (!strcmp(dp->name, "counter-timer"))
840                         break;
841                 dp = dp->sibling;
842         }
843
844         /* Assume if node is not present, PROM uses different tick mechanism
845          * which we should not care about.
846          */
847         if (!dp) {
848                 prom_timers = (struct sun5_timer *) 0;
849                 return;
850         }
851
852         /* If PROM is really using this, it must be mapped by him. */
853         addr = of_get_property(dp, "address", NULL);
854         if (!addr) {
855                 prom_printf("PROM does not have timer mapped, trying to continue.\n");
856                 prom_timers = (struct sun5_timer *) 0;
857                 return;
858         }
859         prom_timers = (struct sun5_timer *) ((unsigned long)addr[0]);
860 }
861
862 static void kill_prom_timer(void)
863 {
864         if (!prom_timers)
865                 return;
866
867         /* Save them away for later. */
868         prom_limit0 = prom_timers->limit0;
869         prom_limit1 = prom_timers->limit1;
870
871         /* Just as in sun4c/sun4m PROM uses timer which ticks at IRQ 14.
872          * We turn both off here just to be paranoid.
873          */
874         prom_timers->limit0 = 0;
875         prom_timers->limit1 = 0;
876
877         /* Wheee, eat the interrupt packet too... */
878         __asm__ __volatile__(
879 "       mov     0x40, %%g2\n"
880 "       ldxa    [%%g0] %0, %%g1\n"
881 "       ldxa    [%%g2] %1, %%g1\n"
882 "       stxa    %%g0, [%%g0] %0\n"
883 "       membar  #Sync\n"
884         : /* no outputs */
885         : "i" (ASI_INTR_RECEIVE), "i" (ASI_INTR_R)
886         : "g1", "g2");
887 }
888
889 void init_irqwork_curcpu(void)
890 {
891         int cpu = hard_smp_processor_id();
892
893         trap_block[cpu].irq_worklist_pa = 0UL;
894 }
895
896 /* Please be very careful with register_one_mondo() and
897  * sun4v_register_mondo_queues().
898  *
899  * On SMP this gets invoked from the CPU trampoline before
900  * the cpu has fully taken over the trap table from OBP,
901  * and it's kernel stack + %g6 thread register state is
902  * not fully cooked yet.
903  *
904  * Therefore you cannot make any OBP calls, not even prom_printf,
905  * from these two routines.
906  */
907 static void __cpuinit register_one_mondo(unsigned long paddr, unsigned long type, unsigned long qmask)
908 {
909         unsigned long num_entries = (qmask + 1) / 64;
910         unsigned long status;
911
912         status = sun4v_cpu_qconf(type, paddr, num_entries);
913         if (status != HV_EOK) {
914                 prom_printf("SUN4V: sun4v_cpu_qconf(%lu:%lx:%lu) failed, "
915                             "err %lu\n", type, paddr, num_entries, status);
916                 prom_halt();
917         }
918 }
919
920 void __cpuinit sun4v_register_mondo_queues(int this_cpu)
921 {
922         struct trap_per_cpu *tb = &trap_block[this_cpu];
923
924         register_one_mondo(tb->cpu_mondo_pa, HV_CPU_QUEUE_CPU_MONDO,
925                            tb->cpu_mondo_qmask);
926         register_one_mondo(tb->dev_mondo_pa, HV_CPU_QUEUE_DEVICE_MONDO,
927                            tb->dev_mondo_qmask);
928         register_one_mondo(tb->resum_mondo_pa, HV_CPU_QUEUE_RES_ERROR,
929                            tb->resum_qmask);
930         register_one_mondo(tb->nonresum_mondo_pa, HV_CPU_QUEUE_NONRES_ERROR,
931                            tb->nonresum_qmask);
932 }
933
934 static void __init alloc_one_mondo(unsigned long *pa_ptr, unsigned long qmask)
935 {
936         unsigned long size = PAGE_ALIGN(qmask + 1);
937         void *p = __alloc_bootmem_low(size, size, 0);
938         if (!p) {
939                 prom_printf("SUN4V: Error, cannot allocate mondo queue.\n");
940                 prom_halt();
941         }
942
943         *pa_ptr = __pa(p);
944 }
945
946 static void __init alloc_one_kbuf(unsigned long *pa_ptr, unsigned long qmask)
947 {
948         unsigned long size = PAGE_ALIGN(qmask + 1);
949         void *p = __alloc_bootmem_low(size, size, 0);
950
951         if (!p) {
952                 prom_printf("SUN4V: Error, cannot allocate kbuf page.\n");
953                 prom_halt();
954         }
955
956         *pa_ptr = __pa(p);
957 }
958
959 static void __init init_cpu_send_mondo_info(struct trap_per_cpu *tb)
960 {
961 #ifdef CONFIG_SMP
962         void *page;
963
964         BUILD_BUG_ON((NR_CPUS * sizeof(u16)) > (PAGE_SIZE - 64));
965
966         page = alloc_bootmem_low_pages(PAGE_SIZE);
967         if (!page) {
968                 prom_printf("SUN4V: Error, cannot allocate cpu mondo page.\n");
969                 prom_halt();
970         }
971
972         tb->cpu_mondo_block_pa = __pa(page);
973         tb->cpu_list_pa = __pa(page + 64);
974 #endif
975 }
976
977 /* Allocate mondo and error queues for all possible cpus.  */
978 static void __init sun4v_init_mondo_queues(void)
979 {
980         int cpu;
981
982         for_each_possible_cpu(cpu) {
983                 struct trap_per_cpu *tb = &trap_block[cpu];
984
985                 alloc_one_mondo(&tb->cpu_mondo_pa, tb->cpu_mondo_qmask);
986                 alloc_one_mondo(&tb->dev_mondo_pa, tb->dev_mondo_qmask);
987                 alloc_one_mondo(&tb->resum_mondo_pa, tb->resum_qmask);
988                 alloc_one_kbuf(&tb->resum_kernel_buf_pa, tb->resum_qmask);
989                 alloc_one_mondo(&tb->nonresum_mondo_pa, tb->nonresum_qmask);
990                 alloc_one_kbuf(&tb->nonresum_kernel_buf_pa,
991                                tb->nonresum_qmask);
992
993                 init_cpu_send_mondo_info(tb);
994         }
995
996         /* Load up the boot cpu's entries.  */
997         sun4v_register_mondo_queues(hard_smp_processor_id());
998 }
999
1000 static struct irqaction timer_irq_action = {
1001         .name = "timer",
1002 };
1003
1004 /* Only invoked on boot processor. */
1005 void __init init_IRQ(void)
1006 {
1007         unsigned long size;
1008
1009         map_prom_timers();
1010         kill_prom_timer();
1011
1012         size = sizeof(struct ino_bucket) * NUM_IVECS;
1013         ivector_table = alloc_bootmem_low(size);
1014         if (!ivector_table) {
1015                 prom_printf("Fatal error, cannot allocate ivector_table\n");
1016                 prom_halt();
1017         }
1018         __flush_dcache_range((unsigned long) ivector_table,
1019                              ((unsigned long) ivector_table) + size);
1020
1021         ivector_table_pa = __pa(ivector_table);
1022
1023         if (tlb_type == hypervisor)
1024                 sun4v_init_mondo_queues();
1025
1026         /* We need to clear any IRQ's pending in the soft interrupt
1027          * registers, a spurious one could be left around from the
1028          * PROM timer which we just disabled.
1029          */
1030         clear_softint(get_softint());
1031
1032         /* Now that ivector table is initialized, it is safe
1033          * to receive IRQ vector traps.  We will normally take
1034          * one or two right now, in case some device PROM used
1035          * to boot us wants to speak to us.  We just ignore them.
1036          */
1037         __asm__ __volatile__("rdpr      %%pstate, %%g1\n\t"
1038                              "or        %%g1, %0, %%g1\n\t"
1039                              "wrpr      %%g1, 0x0, %%pstate"
1040                              : /* No outputs */
1041                              : "i" (PSTATE_IE)
1042                              : "g1");
1043
1044         irq_desc[0].action = &timer_irq_action;
1045 }