]> err.no Git - linux-2.6/blob - arch/ppc64/kernel/ItLpQueue.c
[PATCH] ppc64: Make two ItLpQueue related functions static
[linux-2.6] / arch / ppc64 / kernel / ItLpQueue.c
1 /*
2  * ItLpQueue.c
3  * Copyright (C) 2001 Mike Corrigan  IBM Corporation
4  * 
5  * This program is free software; you can redistribute it and/or modify
6  * it under the terms of the GNU General Public License as published by
7  * the Free Software Foundation; either version 2 of the License, or
8  * (at your option) any later version.
9  */
10
11 #include <linux/stddef.h>
12 #include <linux/kernel.h>
13 #include <linux/sched.h>
14 #include <linux/bootmem.h>
15 #include <linux/seq_file.h>
16 #include <linux/proc_fs.h>
17 #include <asm/system.h>
18 #include <asm/paca.h>
19 #include <asm/iSeries/ItLpQueue.h>
20 #include <asm/iSeries/HvLpEvent.h>
21 #include <asm/iSeries/HvCallEvent.h>
22
23 static char *event_types[9] = {
24         "Hypervisor\t\t",
25         "Machine Facilities\t",
26         "Session Manager\t",
27         "SPD I/O\t\t",
28         "Virtual Bus\t\t",
29         "PCI I/O\t\t",
30         "RIO I/O\t\t",
31         "Virtual Lan\t\t",
32         "Virtual I/O\t\t"
33 };
34
35 static __inline__ int set_inUse(void)
36 {
37         int t;
38         u32 * inUseP = &xItLpQueue.xInUseWord;
39
40         __asm__ __volatile__("\n\
41 1:      lwarx   %0,0,%2         \n\
42         cmpwi   0,%0,0          \n\
43         li      %0,0            \n\
44         bne-    2f              \n\
45         addi    %0,%0,1         \n\
46         stwcx.  %0,0,%2         \n\
47         bne-    1b              \n\
48 2:      eieio"
49         : "=&r" (t), "=m" (xItLpQueue.xInUseWord)
50         : "r" (inUseP), "m" (xItLpQueue.xInUseWord)
51         : "cc");
52
53         return t;
54 }
55
56 static __inline__ void clear_inUse(void)
57 {
58         xItLpQueue.xInUseWord = 0;
59 }
60
61 /* Array of LpEvent handler functions */
62 extern LpEventHandler lpEventHandler[HvLpEvent_Type_NumTypes];
63 unsigned long ItLpQueueInProcess = 0;
64
65 static struct HvLpEvent * ItLpQueue_getNextLpEvent(void)
66 {
67         struct HvLpEvent * nextLpEvent = 
68                 (struct HvLpEvent *)xItLpQueue.xSlicCurEventPtr;
69         if ( nextLpEvent->xFlags.xValid ) {
70                 /* rmb() needed only for weakly consistent machines (regatta) */
71                 rmb();
72                 /* Set pointer to next potential event */
73                 xItLpQueue.xSlicCurEventPtr += ((nextLpEvent->xSizeMinus1 +
74                                       LpEventAlign ) /
75                                       LpEventAlign ) *
76                                       LpEventAlign;
77                 /* Wrap to beginning if no room at end */
78                 if (xItLpQueue.xSlicCurEventPtr > xItLpQueue.xSlicLastValidEventPtr)
79                         xItLpQueue.xSlicCurEventPtr = xItLpQueue.xSlicEventStackPtr;
80         }
81         else 
82                 nextLpEvent = NULL;
83
84         return nextLpEvent;
85 }
86
87 static unsigned long spread_lpevents = NR_CPUS;
88
89 int ItLpQueue_isLpIntPending(void)
90 {
91         struct HvLpEvent *next_event;
92
93         if (smp_processor_id() >= spread_lpevents)
94                 return 0;
95
96         next_event = (struct HvLpEvent *)xItLpQueue.xSlicCurEventPtr;
97         return next_event->xFlags.xValid | xItLpQueue.xPlicOverflowIntPending;
98 }
99
100 static void ItLpQueue_clearValid( struct HvLpEvent * event )
101 {
102         /* Clear the valid bit of the event
103          * Also clear bits within this event that might
104          * look like valid bits (on 64-byte boundaries)
105          */
106         unsigned extra = (( event->xSizeMinus1 + LpEventAlign ) /
107                                                  LpEventAlign ) - 1;
108         switch ( extra ) {
109           case 3:
110            ((struct HvLpEvent*)((char*)event+3*LpEventAlign))->xFlags.xValid=0;
111           case 2:
112            ((struct HvLpEvent*)((char*)event+2*LpEventAlign))->xFlags.xValid=0;
113           case 1:
114            ((struct HvLpEvent*)((char*)event+1*LpEventAlign))->xFlags.xValid=0;
115           case 0:
116            ;    
117         }
118         mb();
119         event->xFlags.xValid = 0;
120 }
121
122 unsigned ItLpQueue_process(struct pt_regs *regs)
123 {
124         unsigned numIntsProcessed = 0;
125         struct HvLpEvent * nextLpEvent;
126
127         /* If we have recursed, just return */
128         if ( !set_inUse() )
129                 return 0;
130         
131         if (ItLpQueueInProcess == 0)
132                 ItLpQueueInProcess = 1;
133         else
134                 BUG();
135
136         for (;;) {
137                 nextLpEvent = ItLpQueue_getNextLpEvent();
138                 if ( nextLpEvent ) {
139                         /* Count events to return to caller
140                          * and count processed events in xItLpQueue
141                          */
142                         ++numIntsProcessed;
143                         xItLpQueue.xLpIntCount++;
144                         /* Call appropriate handler here, passing 
145                          * a pointer to the LpEvent.  The handler
146                          * must make a copy of the LpEvent if it
147                          * needs it in a bottom half. (perhaps for
148                          * an ACK)
149                          *      
150                          *  Handlers are responsible for ACK processing 
151                          *
152                          * The Hypervisor guarantees that LpEvents will
153                          * only be delivered with types that we have
154                          * registered for, so no type check is necessary
155                          * here!
156                          */
157                         if ( nextLpEvent->xType < HvLpEvent_Type_NumTypes )
158                                 xItLpQueue.xLpIntCountByType[nextLpEvent->xType]++;
159                         if ( nextLpEvent->xType < HvLpEvent_Type_NumTypes &&
160                              lpEventHandler[nextLpEvent->xType] ) 
161                                 lpEventHandler[nextLpEvent->xType](nextLpEvent, regs);
162                         else
163                                 printk(KERN_INFO "Unexpected Lp Event type=%d\n", nextLpEvent->xType );
164                         
165                         ItLpQueue_clearValid( nextLpEvent );
166                 } else if ( xItLpQueue.xPlicOverflowIntPending )
167                         /*
168                          * No more valid events. If overflow events are
169                          * pending process them
170                          */
171                         HvCallEvent_getOverflowLpEvents( xItLpQueue.xIndex);
172                 else
173                         break;
174         }
175
176         ItLpQueueInProcess = 0;
177         mb();
178         clear_inUse();
179
180         get_paca()->lpevent_count += numIntsProcessed;
181
182         return numIntsProcessed;
183 }
184
185 static int set_spread_lpevents(char *str)
186 {
187         unsigned long val = simple_strtoul(str, NULL, 0);
188
189         /*
190          * The parameter is the number of processors to share in processing
191          * lp events.
192          */
193         if (( val > 0) && (val <= NR_CPUS)) {
194                 spread_lpevents = val;
195                 printk("lpevent processing spread over %ld processors\n", val);
196         } else {
197                 printk("invalid spread_lpevents %ld\n", val);
198         }
199
200         return 1;
201 }
202 __setup("spread_lpevents=", set_spread_lpevents);
203
204 void setup_hvlpevent_queue(void)
205 {
206         void *eventStack;
207
208         /*
209          * Allocate a page for the Event Stack. The Hypervisor needs the
210          * absolute real address, so we subtract out the KERNELBASE and add
211          * in the absolute real address of the kernel load area.
212          */
213         eventStack = alloc_bootmem_pages(LpEventStackSize);
214         memset(eventStack, 0, LpEventStackSize);
215
216         /* Invoke the hypervisor to initialize the event stack */
217         HvCallEvent_setLpEventStack(0, eventStack, LpEventStackSize);
218
219         xItLpQueue.xSlicEventStackPtr = (char *)eventStack;
220         xItLpQueue.xSlicCurEventPtr = (char *)eventStack;
221         xItLpQueue.xSlicLastValidEventPtr = (char *)eventStack +
222                                         (LpEventStackSize - LpEventMaxSize);
223         xItLpQueue.xIndex = 0;
224 }
225
226 static int proc_lpevents_show(struct seq_file *m, void *v)
227 {
228         unsigned int i;
229
230         seq_printf(m, "LpEventQueue 0\n");
231         seq_printf(m, "  events processed:\t%lu\n",
232                    (unsigned long)xItLpQueue.xLpIntCount);
233
234         for (i = 0; i < 9; ++i)
235                 seq_printf(m, "    %s %10lu\n", event_types[i],
236                            (unsigned long)xItLpQueue.xLpIntCountByType[i]);
237
238         seq_printf(m, "\n  events processed by processor:\n");
239
240         for_each_online_cpu(i)
241                 seq_printf(m, "    CPU%02d  %10u\n", i, paca[i].lpevent_count);
242
243         return 0;
244 }
245
246 static int proc_lpevents_open(struct inode *inode, struct file *file)
247 {
248         return single_open(file, proc_lpevents_show, NULL);
249 }
250
251 static struct file_operations proc_lpevents_operations = {
252         .open           = proc_lpevents_open,
253         .read           = seq_read,
254         .llseek         = seq_lseek,
255         .release        = single_release,
256 };
257
258 static int __init proc_lpevents_init(void)
259 {
260         struct proc_dir_entry *e;
261
262         e = create_proc_entry("iSeries/lpevents", S_IFREG|S_IRUGO, NULL);
263         if (e)
264                 e->proc_fops = &proc_lpevents_operations;
265
266         return 0;
267 }
268 __initcall(proc_lpevents_init);
269