2 * Copyright (C) 2001 Mike Corrigan IBM Corporation
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation; either version 2 of the License, or
7 * (at your option) any later version.
10 #include <linux/stddef.h>
11 #include <linux/kernel.h>
12 #include <linux/sched.h>
13 #include <linux/bootmem.h>
14 #include <linux/seq_file.h>
15 #include <linux/proc_fs.h>
16 #include <asm/system.h>
18 #include <asm/iSeries/ItLpQueue.h>
19 #include <asm/iSeries/HvLpEvent.h>
20 #include <asm/iSeries/HvCallEvent.h>
21 #include <asm/iSeries/ItLpNaca.h>
24 * The LpQueue is used to pass event data from the hypervisor to
25 * the partition. This is where I/O interrupt events are communicated.
27 * It is written to by the hypervisor so cannot end up in the BSS.
29 struct hvlpevent_queue hvlpevent_queue __attribute__((__section__(".data")));
31 DEFINE_PER_CPU(unsigned long[HvLpEvent_Type_NumTypes], hvlpevent_counts);
33 static char *event_types[HvLpEvent_Type_NumTypes] = {
45 /* Array of LpEvent handler functions */
46 static LpEventHandler lpEventHandler[HvLpEvent_Type_NumTypes];
47 static unsigned lpEventHandlerPaths[HvLpEvent_Type_NumTypes];
49 static struct HvLpEvent * get_next_hvlpevent(void)
51 struct HvLpEvent * event;
52 event = (struct HvLpEvent *)hvlpevent_queue.xSlicCurEventPtr;
54 if (event->xFlags.xValid) {
55 /* rmb() needed only for weakly consistent machines (regatta) */
57 /* Set pointer to next potential event */
58 hvlpevent_queue.xSlicCurEventPtr += ((event->xSizeMinus1 +
59 LpEventAlign) / LpEventAlign) * LpEventAlign;
61 /* Wrap to beginning if no room at end */
62 if (hvlpevent_queue.xSlicCurEventPtr >
63 hvlpevent_queue.xSlicLastValidEventPtr) {
64 hvlpevent_queue.xSlicCurEventPtr =
65 hvlpevent_queue.xSlicEventStackPtr;
74 static unsigned long spread_lpevents = NR_CPUS;
76 int hvlpevent_is_pending(void)
78 struct HvLpEvent *next_event;
80 if (smp_processor_id() >= spread_lpevents)
83 next_event = (struct HvLpEvent *)hvlpevent_queue.xSlicCurEventPtr;
85 return next_event->xFlags.xValid |
86 hvlpevent_queue.xPlicOverflowIntPending;
89 static void hvlpevent_clear_valid(struct HvLpEvent * event)
91 /* Tell the Hypervisor that we're done with this event.
92 * Also clear bits within this event that might look like valid bits.
93 * ie. on 64-byte boundaries.
95 struct HvLpEvent *tmp;
96 unsigned extra = ((event->xSizeMinus1 + LpEventAlign) /
101 tmp = (struct HvLpEvent*)((char*)event + 3 * LpEventAlign);
102 tmp->xFlags.xValid = 0;
104 tmp = (struct HvLpEvent*)((char*)event + 2 * LpEventAlign);
105 tmp->xFlags.xValid = 0;
107 tmp = (struct HvLpEvent*)((char*)event + 1 * LpEventAlign);
108 tmp->xFlags.xValid = 0;
113 event->xFlags.xValid = 0;
116 void process_hvlpevents(struct pt_regs *regs)
118 struct HvLpEvent * event;
120 /* If we have recursed, just return */
121 if (!spin_trylock(&hvlpevent_queue.lock))
125 event = get_next_hvlpevent();
127 /* Call appropriate handler here, passing
128 * a pointer to the LpEvent. The handler
129 * must make a copy of the LpEvent if it
130 * needs it in a bottom half. (perhaps for
133 * Handlers are responsible for ACK processing
135 * The Hypervisor guarantees that LpEvents will
136 * only be delivered with types that we have
137 * registered for, so no type check is necessary
140 if (event->xType < HvLpEvent_Type_NumTypes)
141 __get_cpu_var(hvlpevent_counts)[event->xType]++;
142 if (event->xType < HvLpEvent_Type_NumTypes &&
143 lpEventHandler[event->xType])
144 lpEventHandler[event->xType](event, regs);
146 printk(KERN_INFO "Unexpected Lp Event type=%d\n", event->xType );
148 hvlpevent_clear_valid(event);
149 } else if (hvlpevent_queue.xPlicOverflowIntPending)
151 * No more valid events. If overflow events are
152 * pending process them
154 HvCallEvent_getOverflowLpEvents(hvlpevent_queue.xIndex);
159 spin_unlock(&hvlpevent_queue.lock);
162 static int set_spread_lpevents(char *str)
164 unsigned long val = simple_strtoul(str, NULL, 0);
167 * The parameter is the number of processors to share in processing
170 if (( val > 0) && (val <= NR_CPUS)) {
171 spread_lpevents = val;
172 printk("lpevent processing spread over %ld processors\n", val);
174 printk("invalid spread_lpevents %ld\n", val);
179 __setup("spread_lpevents=", set_spread_lpevents);
181 void setup_hvlpevent_queue(void)
186 * Allocate a page for the Event Stack. The Hypervisor needs the
187 * absolute real address, so we subtract out the KERNELBASE and add
188 * in the absolute real address of the kernel load area.
190 eventStack = alloc_bootmem_pages(LpEventStackSize);
191 memset(eventStack, 0, LpEventStackSize);
193 /* Invoke the hypervisor to initialize the event stack */
194 HvCallEvent_setLpEventStack(0, eventStack, LpEventStackSize);
196 hvlpevent_queue.xSlicEventStackPtr = (char *)eventStack;
197 hvlpevent_queue.xSlicCurEventPtr = (char *)eventStack;
198 hvlpevent_queue.xSlicLastValidEventPtr = (char *)eventStack +
199 (LpEventStackSize - LpEventMaxSize);
200 hvlpevent_queue.xIndex = 0;
203 /* Register a handler for an LpEvent type */
204 int HvLpEvent_registerHandler(HvLpEvent_Type eventType, LpEventHandler handler)
206 if (eventType < HvLpEvent_Type_NumTypes) {
207 lpEventHandler[eventType] = handler;
212 EXPORT_SYMBOL(HvLpEvent_registerHandler);
214 int HvLpEvent_unregisterHandler(HvLpEvent_Type eventType)
218 if (eventType < HvLpEvent_Type_NumTypes) {
219 if (!lpEventHandlerPaths[eventType]) {
220 lpEventHandler[eventType] = NULL;
222 * We now sleep until all other CPUs have scheduled.
223 * This ensures that the deletion is seen by all
224 * other CPUs, and that the deleted handler isn't
225 * still running on another CPU when we return.
233 EXPORT_SYMBOL(HvLpEvent_unregisterHandler);
236 * lpIndex is the partition index of the target partition.
237 * needed only for VirtualIo, VirtualLan and SessionMgr. Zero
238 * indicates to use our partition index - for the other types.
240 int HvLpEvent_openPath(HvLpEvent_Type eventType, HvLpIndex lpIndex)
242 if ((eventType < HvLpEvent_Type_NumTypes) &&
243 lpEventHandler[eventType]) {
245 lpIndex = itLpNaca.xLpIndex;
246 HvCallEvent_openLpEventPath(lpIndex, eventType);
247 ++lpEventHandlerPaths[eventType];
253 int HvLpEvent_closePath(HvLpEvent_Type eventType, HvLpIndex lpIndex)
255 if ((eventType < HvLpEvent_Type_NumTypes) &&
256 lpEventHandler[eventType] &&
257 lpEventHandlerPaths[eventType]) {
259 lpIndex = itLpNaca.xLpIndex;
260 HvCallEvent_closeLpEventPath(lpIndex, eventType);
261 --lpEventHandlerPaths[eventType];
267 static int proc_lpevents_show(struct seq_file *m, void *v)
271 static unsigned long cpu_totals[NR_CPUS];
273 /* FIXME: do we care that there's no locking here? */
275 for_each_online_cpu(cpu) {
277 for (i = 0; i < HvLpEvent_Type_NumTypes; i++) {
278 cpu_totals[cpu] += per_cpu(hvlpevent_counts, cpu)[i];
280 sum += cpu_totals[cpu];
283 seq_printf(m, "LpEventQueue 0\n");
284 seq_printf(m, " events processed:\t%lu\n", sum);
286 for (i = 0; i < HvLpEvent_Type_NumTypes; ++i) {
288 for_each_online_cpu(cpu) {
289 sum += per_cpu(hvlpevent_counts, cpu)[i];
292 seq_printf(m, " %-20s %10lu\n", event_types[i], sum);
295 seq_printf(m, "\n events processed by processor:\n");
297 for_each_online_cpu(cpu) {
298 seq_printf(m, " CPU%02d %10lu\n", cpu, cpu_totals[cpu]);
304 static int proc_lpevents_open(struct inode *inode, struct file *file)
306 return single_open(file, proc_lpevents_show, NULL);
309 static struct file_operations proc_lpevents_operations = {
310 .open = proc_lpevents_open,
313 .release = single_release,
316 static int __init proc_lpevents_init(void)
318 struct proc_dir_entry *e;
320 e = create_proc_entry("iSeries/lpevents", S_IFREG|S_IRUGO, NULL);
322 e->proc_fops = &proc_lpevents_operations;
326 __initcall(proc_lpevents_init);