3 * Copyright (C) 2001 Mike Corrigan IBM Corporation
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation; either version 2 of the License, or
8 * (at your option) any later version.
11 #include <linux/stddef.h>
12 #include <linux/kernel.h>
13 #include <linux/sched.h>
14 #include <linux/bootmem.h>
15 #include <linux/seq_file.h>
16 #include <linux/proc_fs.h>
17 #include <asm/system.h>
19 #include <asm/iSeries/ItLpQueue.h>
20 #include <asm/iSeries/HvLpEvent.h>
21 #include <asm/iSeries/HvCallEvent.h>
23 static char *event_types[9] = {
25 "Machine Facilities\t",
35 static __inline__ int set_inUse(void)
38 u32 * inUseP = &xItLpQueue.xInUseWord;
40 __asm__ __volatile__("\n\
49 : "=&r" (t), "=m" (xItLpQueue.xInUseWord)
50 : "r" (inUseP), "m" (xItLpQueue.xInUseWord)
56 static __inline__ void clear_inUse(void)
58 xItLpQueue.xInUseWord = 0;
61 /* Array of LpEvent handler functions */
62 extern LpEventHandler lpEventHandler[HvLpEvent_Type_NumTypes];
63 unsigned long ItLpQueueInProcess = 0;
65 static struct HvLpEvent * ItLpQueue_getNextLpEvent(void)
67 struct HvLpEvent * nextLpEvent =
68 (struct HvLpEvent *)xItLpQueue.xSlicCurEventPtr;
69 if ( nextLpEvent->xFlags.xValid ) {
70 /* rmb() needed only for weakly consistent machines (regatta) */
72 /* Set pointer to next potential event */
73 xItLpQueue.xSlicCurEventPtr += ((nextLpEvent->xSizeMinus1 +
77 /* Wrap to beginning if no room at end */
78 if (xItLpQueue.xSlicCurEventPtr > xItLpQueue.xSlicLastValidEventPtr)
79 xItLpQueue.xSlicCurEventPtr = xItLpQueue.xSlicEventStackPtr;
87 static unsigned long spread_lpevents = NR_CPUS;
89 int ItLpQueue_isLpIntPending(void)
91 struct HvLpEvent *next_event;
93 if (smp_processor_id() >= spread_lpevents)
96 next_event = (struct HvLpEvent *)xItLpQueue.xSlicCurEventPtr;
97 return next_event->xFlags.xValid | xItLpQueue.xPlicOverflowIntPending;
100 static void ItLpQueue_clearValid( struct HvLpEvent * event )
102 /* Clear the valid bit of the event
103 * Also clear bits within this event that might
104 * look like valid bits (on 64-byte boundaries)
106 unsigned extra = (( event->xSizeMinus1 + LpEventAlign ) /
110 ((struct HvLpEvent*)((char*)event+3*LpEventAlign))->xFlags.xValid=0;
112 ((struct HvLpEvent*)((char*)event+2*LpEventAlign))->xFlags.xValid=0;
114 ((struct HvLpEvent*)((char*)event+1*LpEventAlign))->xFlags.xValid=0;
119 event->xFlags.xValid = 0;
122 unsigned ItLpQueue_process(struct pt_regs *regs)
124 unsigned numIntsProcessed = 0;
125 struct HvLpEvent * nextLpEvent;
127 /* If we have recursed, just return */
131 if (ItLpQueueInProcess == 0)
132 ItLpQueueInProcess = 1;
137 nextLpEvent = ItLpQueue_getNextLpEvent();
139 /* Count events to return to caller
140 * and count processed events in xItLpQueue
143 xItLpQueue.xLpIntCount++;
144 /* Call appropriate handler here, passing
145 * a pointer to the LpEvent. The handler
146 * must make a copy of the LpEvent if it
147 * needs it in a bottom half. (perhaps for
150 * Handlers are responsible for ACK processing
152 * The Hypervisor guarantees that LpEvents will
153 * only be delivered with types that we have
154 * registered for, so no type check is necessary
157 if ( nextLpEvent->xType < HvLpEvent_Type_NumTypes )
158 xItLpQueue.xLpIntCountByType[nextLpEvent->xType]++;
159 if ( nextLpEvent->xType < HvLpEvent_Type_NumTypes &&
160 lpEventHandler[nextLpEvent->xType] )
161 lpEventHandler[nextLpEvent->xType](nextLpEvent, regs);
163 printk(KERN_INFO "Unexpected Lp Event type=%d\n", nextLpEvent->xType );
165 ItLpQueue_clearValid( nextLpEvent );
166 } else if ( xItLpQueue.xPlicOverflowIntPending )
168 * No more valid events. If overflow events are
169 * pending process them
171 HvCallEvent_getOverflowLpEvents( xItLpQueue.xIndex);
176 ItLpQueueInProcess = 0;
180 get_paca()->lpevent_count += numIntsProcessed;
182 return numIntsProcessed;
185 static int set_spread_lpevents(char *str)
187 unsigned long val = simple_strtoul(str, NULL, 0);
190 * The parameter is the number of processors to share in processing
193 if (( val > 0) && (val <= NR_CPUS)) {
194 spread_lpevents = val;
195 printk("lpevent processing spread over %ld processors\n", val);
197 printk("invalid spread_lpevents %ld\n", val);
202 __setup("spread_lpevents=", set_spread_lpevents);
204 void setup_hvlpevent_queue(void)
209 * Allocate a page for the Event Stack. The Hypervisor needs the
210 * absolute real address, so we subtract out the KERNELBASE and add
211 * in the absolute real address of the kernel load area.
213 eventStack = alloc_bootmem_pages(LpEventStackSize);
214 memset(eventStack, 0, LpEventStackSize);
216 /* Invoke the hypervisor to initialize the event stack */
217 HvCallEvent_setLpEventStack(0, eventStack, LpEventStackSize);
219 xItLpQueue.xSlicEventStackPtr = (char *)eventStack;
220 xItLpQueue.xSlicCurEventPtr = (char *)eventStack;
221 xItLpQueue.xSlicLastValidEventPtr = (char *)eventStack +
222 (LpEventStackSize - LpEventMaxSize);
223 xItLpQueue.xIndex = 0;
226 static int proc_lpevents_show(struct seq_file *m, void *v)
230 seq_printf(m, "LpEventQueue 0\n");
231 seq_printf(m, " events processed:\t%lu\n",
232 (unsigned long)xItLpQueue.xLpIntCount);
234 for (i = 0; i < 9; ++i)
235 seq_printf(m, " %s %10lu\n", event_types[i],
236 (unsigned long)xItLpQueue.xLpIntCountByType[i]);
238 seq_printf(m, "\n events processed by processor:\n");
240 for_each_online_cpu(i)
241 seq_printf(m, " CPU%02d %10u\n", i, paca[i].lpevent_count);
246 static int proc_lpevents_open(struct inode *inode, struct file *file)
248 return single_open(file, proc_lpevents_show, NULL);
251 static struct file_operations proc_lpevents_operations = {
252 .open = proc_lpevents_open,
255 .release = single_release,
258 static int __init proc_lpevents_init(void)
260 struct proc_dir_entry *e;
262 e = create_proc_entry("iSeries/lpevents", S_IFREG|S_IRUGO, NULL);
264 e->proc_fops = &proc_lpevents_operations;
268 __initcall(proc_lpevents_init);