2 * Low-level SPU handling
4 * (C) Copyright IBM Deutschland Entwicklung GmbH 2005
6 * Author: Arnd Bergmann <arndb@de.ibm.com>
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2, or (at your option)
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software
20 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
25 #include <linux/interrupt.h>
26 #include <linux/list.h>
27 #include <linux/module.h>
28 #include <linux/ptrace.h>
29 #include <linux/slab.h>
30 #include <linux/wait.h>
33 #include <linux/mutex.h>
34 #include <linux/linux_logo.h>
36 #include <asm/spu_priv1.h>
40 const struct spu_management_ops *spu_management_ops;
41 EXPORT_SYMBOL_GPL(spu_management_ops);
43 const struct spu_priv1_ops *spu_priv1_ops;
44 EXPORT_SYMBOL_GPL(spu_priv1_ops);
46 struct cbe_spu_info cbe_spu_info[MAX_NUMNODES];
47 EXPORT_SYMBOL_GPL(cbe_spu_info);
50 * Protects cbe_spu_info and spu->number.
52 static DEFINE_SPINLOCK(spu_lock);
55 * List of all spus in the system.
57 * This list is iterated by callers from irq context and callers that
58 * want to sleep. Thus modifications need to be done with both
59 * spu_full_list_lock and spu_full_list_mutex held, while iterating
60 * through it requires either of these locks.
62 * In addition spu_full_list_lock protects all assignmens to
65 static LIST_HEAD(spu_full_list);
66 static DEFINE_SPINLOCK(spu_full_list_lock);
67 static DEFINE_MUTEX(spu_full_list_mutex);
69 void spu_invalidate_slbs(struct spu *spu)
71 struct spu_priv2 __iomem *priv2 = spu->priv2;
73 if (spu_mfc_sr1_get(spu) & MFC_STATE1_RELOCATE_MASK)
74 out_be64(&priv2->slb_invalidate_all_W, 0UL);
76 EXPORT_SYMBOL_GPL(spu_invalidate_slbs);
78 /* This is called by the MM core when a segment size is changed, to
79 * request a flush of all the SPEs using a given mm
81 void spu_flush_all_slbs(struct mm_struct *mm)
86 spin_lock_irqsave(&spu_full_list_lock, flags);
87 list_for_each_entry(spu, &spu_full_list, full_list) {
89 spu_invalidate_slbs(spu);
91 spin_unlock_irqrestore(&spu_full_list_lock, flags);
94 /* The hack below stinks... try to do something better one of
95 * these days... Does it even work properly with NR_CPUS == 1 ?
97 static inline void mm_needs_global_tlbie(struct mm_struct *mm)
99 int nr = (NR_CPUS > 1) ? NR_CPUS : NR_CPUS + 1;
101 /* Global TLBIE broadcast required with SPEs. */
102 __cpus_setall(&mm->cpu_vm_mask, nr);
105 void spu_associate_mm(struct spu *spu, struct mm_struct *mm)
109 spin_lock_irqsave(&spu_full_list_lock, flags);
111 spin_unlock_irqrestore(&spu_full_list_lock, flags);
113 mm_needs_global_tlbie(mm);
115 EXPORT_SYMBOL_GPL(spu_associate_mm);
117 static int __spu_trap_invalid_dma(struct spu *spu)
119 pr_debug("%s\n", __FUNCTION__);
120 spu->dma_callback(spu, SPE_EVENT_INVALID_DMA);
124 static int __spu_trap_dma_align(struct spu *spu)
126 pr_debug("%s\n", __FUNCTION__);
127 spu->dma_callback(spu, SPE_EVENT_DMA_ALIGNMENT);
131 static int __spu_trap_error(struct spu *spu)
133 pr_debug("%s\n", __FUNCTION__);
134 spu->dma_callback(spu, SPE_EVENT_SPE_ERROR);
138 static void spu_restart_dma(struct spu *spu)
140 struct spu_priv2 __iomem *priv2 = spu->priv2;
142 if (!test_bit(SPU_CONTEXT_SWITCH_PENDING, &spu->flags))
143 out_be64(&priv2->mfc_control_RW, MFC_CNTL_RESTART_DMA_COMMAND);
146 static int __spu_trap_data_seg(struct spu *spu, unsigned long ea)
148 struct spu_priv2 __iomem *priv2 = spu->priv2;
149 struct mm_struct *mm = spu->mm;
153 pr_debug("%s\n", __FUNCTION__);
155 if (test_bit(SPU_CONTEXT_SWITCH_ACTIVE, &spu->flags)) {
156 /* SLBs are pre-loaded for context switch, so
157 * we should never get here!
159 printk("%s: invalid access during switch!\n", __func__);
162 esid = (ea & ESID_MASK) | SLB_ESID_V;
164 switch(REGION_ID(ea)) {
166 #ifdef CONFIG_PPC_MM_SLICES
167 psize = get_slice_psize(mm, ea);
169 psize = mm->context.user_psize;
171 vsid = (get_vsid(mm->context.id, ea) << SLB_VSID_SHIFT) |
174 case VMALLOC_REGION_ID:
175 if (ea < VMALLOC_END)
176 psize = mmu_vmalloc_psize;
178 psize = mmu_io_psize;
179 vsid = (get_kernel_vsid(ea) << SLB_VSID_SHIFT) |
182 case KERNEL_REGION_ID:
183 psize = mmu_linear_psize;
184 vsid = (get_kernel_vsid(ea) << SLB_VSID_SHIFT) |
188 /* Future: support kernel segments so that drivers
191 pr_debug("invalid region access at %016lx\n", ea);
194 llp = mmu_psize_defs[psize].sllp;
196 out_be64(&priv2->slb_index_W, spu->slb_replace);
197 out_be64(&priv2->slb_vsid_RW, vsid | llp);
198 out_be64(&priv2->slb_esid_RW, esid);
201 if (spu->slb_replace >= 8)
202 spu->slb_replace = 0;
204 spu_restart_dma(spu);
205 spu->stats.slb_flt++;
209 extern int hash_page(unsigned long ea, unsigned long access, unsigned long trap); //XXX
210 static int __spu_trap_data_map(struct spu *spu, unsigned long ea, u64 dsisr)
212 pr_debug("%s, %lx, %lx\n", __FUNCTION__, dsisr, ea);
214 /* Handle kernel space hash faults immediately.
215 User hash faults need to be deferred to process context. */
216 if ((dsisr & MFC_DSISR_PTE_NOT_FOUND)
217 && REGION_ID(ea) != USER_REGION_ID
218 && hash_page(ea, _PAGE_PRESENT, 0x300) == 0) {
219 spu_restart_dma(spu);
223 if (test_bit(SPU_CONTEXT_SWITCH_ACTIVE, &spu->flags)) {
224 printk("%s: invalid access during switch!\n", __func__);
231 spu->stop_callback(spu);
236 spu_irq_class_0(int irq, void *data)
241 spu->class_0_pending = 1;
242 spu->stop_callback(spu);
248 spu_irq_class_0_bottom(struct spu *spu)
250 unsigned long stat, mask;
253 spu->class_0_pending = 0;
255 spin_lock_irqsave(&spu->register_lock, flags);
256 mask = spu_int_mask_get(spu, 0);
257 stat = spu_int_stat_get(spu, 0);
261 if (stat & 1) /* invalid DMA alignment */
262 __spu_trap_dma_align(spu);
264 if (stat & 2) /* invalid MFC DMA */
265 __spu_trap_invalid_dma(spu);
267 if (stat & 4) /* error on SPU */
268 __spu_trap_error(spu);
270 spu_int_stat_clear(spu, 0, stat);
271 spin_unlock_irqrestore(&spu->register_lock, flags);
273 return (stat & 0x7) ? -EIO : 0;
275 EXPORT_SYMBOL_GPL(spu_irq_class_0_bottom);
278 spu_irq_class_1(int irq, void *data)
281 unsigned long stat, mask, dar, dsisr;
285 /* atomically read & clear class1 status. */
286 spin_lock(&spu->register_lock);
287 mask = spu_int_mask_get(spu, 1);
288 stat = spu_int_stat_get(spu, 1) & mask;
289 dar = spu_mfc_dar_get(spu);
290 dsisr = spu_mfc_dsisr_get(spu);
291 if (stat & 2) /* mapping fault */
292 spu_mfc_dsisr_set(spu, 0ul);
293 spu_int_stat_clear(spu, 1, stat);
294 spin_unlock(&spu->register_lock);
295 pr_debug("%s: %lx %lx %lx %lx\n", __FUNCTION__, mask, stat,
298 if (stat & 1) /* segment fault */
299 __spu_trap_data_seg(spu, dar);
301 if (stat & 2) { /* mapping fault */
302 __spu_trap_data_map(spu, dar, dsisr);
305 if (stat & 4) /* ls compare & suspend on get */
308 if (stat & 8) /* ls compare & suspend on put */
311 return stat ? IRQ_HANDLED : IRQ_NONE;
315 spu_irq_class_2(int irq, void *data)
322 spin_lock(&spu->register_lock);
323 stat = spu_int_stat_get(spu, 2);
324 mask = spu_int_mask_get(spu, 2);
325 /* ignore interrupts we're not waiting for */
328 * mailbox interrupts (0x1 and 0x10) are level triggered.
329 * mask them now before acknowledging.
332 spu_int_mask_and(spu, 2, ~(stat & 0x11));
333 /* acknowledge all interrupts before the callbacks */
334 spu_int_stat_clear(spu, 2, stat);
335 spin_unlock(&spu->register_lock);
337 pr_debug("class 2 interrupt %d, %lx, %lx\n", irq, stat, mask);
339 if (stat & 1) /* PPC core mailbox */
340 spu->ibox_callback(spu);
342 if (stat & 2) /* SPU stop-and-signal */
343 spu->stop_callback(spu);
345 if (stat & 4) /* SPU halted */
346 spu->stop_callback(spu);
348 if (stat & 8) /* DMA tag group complete */
349 spu->mfc_callback(spu);
351 if (stat & 0x10) /* SPU mailbox threshold */
352 spu->wbox_callback(spu);
354 spu->stats.class2_intr++;
355 return stat ? IRQ_HANDLED : IRQ_NONE;
358 static int spu_request_irqs(struct spu *spu)
362 if (spu->irqs[0] != NO_IRQ) {
363 snprintf(spu->irq_c0, sizeof (spu->irq_c0), "spe%02d.0",
365 ret = request_irq(spu->irqs[0], spu_irq_class_0,
371 if (spu->irqs[1] != NO_IRQ) {
372 snprintf(spu->irq_c1, sizeof (spu->irq_c1), "spe%02d.1",
374 ret = request_irq(spu->irqs[1], spu_irq_class_1,
380 if (spu->irqs[2] != NO_IRQ) {
381 snprintf(spu->irq_c2, sizeof (spu->irq_c2), "spe%02d.2",
383 ret = request_irq(spu->irqs[2], spu_irq_class_2,
392 if (spu->irqs[1] != NO_IRQ)
393 free_irq(spu->irqs[1], spu);
395 if (spu->irqs[0] != NO_IRQ)
396 free_irq(spu->irqs[0], spu);
401 static void spu_free_irqs(struct spu *spu)
403 if (spu->irqs[0] != NO_IRQ)
404 free_irq(spu->irqs[0], spu);
405 if (spu->irqs[1] != NO_IRQ)
406 free_irq(spu->irqs[1], spu);
407 if (spu->irqs[2] != NO_IRQ)
408 free_irq(spu->irqs[2], spu);
411 void spu_init_channels(struct spu *spu)
413 static const struct {
417 { 0x00, 1, }, { 0x01, 1, }, { 0x03, 1, }, { 0x04, 1, },
418 { 0x18, 1, }, { 0x19, 1, }, { 0x1b, 1, }, { 0x1d, 1, },
420 { 0x00, 0, }, { 0x03, 0, }, { 0x04, 0, }, { 0x15, 16, },
421 { 0x17, 1, }, { 0x18, 0, }, { 0x19, 0, }, { 0x1b, 0, },
422 { 0x1c, 1, }, { 0x1d, 0, }, { 0x1e, 1, },
424 struct spu_priv2 __iomem *priv2;
429 /* initialize all channel data to zero */
430 for (i = 0; i < ARRAY_SIZE(zero_list); i++) {
433 out_be64(&priv2->spu_chnlcntptr_RW, zero_list[i].channel);
434 for (count = 0; count < zero_list[i].count; count++)
435 out_be64(&priv2->spu_chnldata_RW, 0);
438 /* initialize channel counts to meaningful values */
439 for (i = 0; i < ARRAY_SIZE(count_list); i++) {
440 out_be64(&priv2->spu_chnlcntptr_RW, count_list[i].channel);
441 out_be64(&priv2->spu_chnlcnt_RW, count_list[i].count);
444 EXPORT_SYMBOL_GPL(spu_init_channels);
446 static int spu_shutdown(struct sys_device *sysdev)
448 struct spu *spu = container_of(sysdev, struct spu, sysdev);
451 spu_destroy_spu(spu);
455 struct sysdev_class spu_sysdev_class = {
456 set_kset_name("spu"),
457 .shutdown = spu_shutdown,
460 int spu_add_sysdev_attr(struct sysdev_attribute *attr)
464 mutex_lock(&spu_full_list_mutex);
465 list_for_each_entry(spu, &spu_full_list, full_list)
466 sysdev_create_file(&spu->sysdev, attr);
467 mutex_unlock(&spu_full_list_mutex);
471 EXPORT_SYMBOL_GPL(spu_add_sysdev_attr);
473 int spu_add_sysdev_attr_group(struct attribute_group *attrs)
477 mutex_lock(&spu_full_list_mutex);
478 list_for_each_entry(spu, &spu_full_list, full_list)
479 sysfs_create_group(&spu->sysdev.kobj, attrs);
480 mutex_unlock(&spu_full_list_mutex);
484 EXPORT_SYMBOL_GPL(spu_add_sysdev_attr_group);
487 void spu_remove_sysdev_attr(struct sysdev_attribute *attr)
491 mutex_lock(&spu_full_list_mutex);
492 list_for_each_entry(spu, &spu_full_list, full_list)
493 sysdev_remove_file(&spu->sysdev, attr);
494 mutex_unlock(&spu_full_list_mutex);
496 EXPORT_SYMBOL_GPL(spu_remove_sysdev_attr);
498 void spu_remove_sysdev_attr_group(struct attribute_group *attrs)
502 mutex_lock(&spu_full_list_mutex);
503 list_for_each_entry(spu, &spu_full_list, full_list)
504 sysfs_remove_group(&spu->sysdev.kobj, attrs);
505 mutex_unlock(&spu_full_list_mutex);
507 EXPORT_SYMBOL_GPL(spu_remove_sysdev_attr_group);
509 static int spu_create_sysdev(struct spu *spu)
513 spu->sysdev.id = spu->number;
514 spu->sysdev.cls = &spu_sysdev_class;
515 ret = sysdev_register(&spu->sysdev);
517 printk(KERN_ERR "Can't register SPU %d with sysfs\n",
522 sysfs_add_device_to_node(&spu->sysdev, spu->node);
527 static int __init create_spu(void *data)
536 spu = kzalloc(sizeof (*spu), GFP_KERNEL);
540 spu->alloc_state = SPU_FREE;
542 spin_lock_init(&spu->register_lock);
543 spin_lock(&spu_lock);
544 spu->number = number++;
545 spin_unlock(&spu_lock);
547 ret = spu_create_spu(spu, data);
552 spu_mfc_sdr_setup(spu);
553 spu_mfc_sr1_set(spu, 0x33);
554 ret = spu_request_irqs(spu);
558 ret = spu_create_sysdev(spu);
562 mutex_lock(&cbe_spu_info[spu->node].list_mutex);
563 list_add(&spu->cbe_list, &cbe_spu_info[spu->node].spus);
564 cbe_spu_info[spu->node].n_spus++;
565 mutex_unlock(&cbe_spu_info[spu->node].list_mutex);
567 mutex_lock(&spu_full_list_mutex);
568 spin_lock_irqsave(&spu_full_list_lock, flags);
569 list_add(&spu->full_list, &spu_full_list);
570 spin_unlock_irqrestore(&spu_full_list_lock, flags);
571 mutex_unlock(&spu_full_list_mutex);
573 spu->stats.util_state = SPU_UTIL_IDLE_LOADED;
575 spu->stats.tstamp = timespec_to_ns(&ts);
577 INIT_LIST_HEAD(&spu->aff_list);
584 spu_destroy_spu(spu);
591 static const char *spu_state_names[] = {
592 "user", "system", "iowait", "idle"
595 static unsigned long long spu_acct_time(struct spu *spu,
596 enum spu_utilization_state state)
599 unsigned long long time = spu->stats.times[state];
602 * If the spu is idle or the context is stopped, utilization
603 * statistics are not updated. Apply the time delta from the
604 * last recorded state of the spu.
606 if (spu->stats.util_state == state) {
608 time += timespec_to_ns(&ts) - spu->stats.tstamp;
611 return time / NSEC_PER_MSEC;
615 static ssize_t spu_stat_show(struct sys_device *sysdev, char *buf)
617 struct spu *spu = container_of(sysdev, struct spu, sysdev);
619 return sprintf(buf, "%s %llu %llu %llu %llu "
620 "%llu %llu %llu %llu %llu %llu %llu %llu\n",
621 spu_state_names[spu->stats.util_state],
622 spu_acct_time(spu, SPU_UTIL_USER),
623 spu_acct_time(spu, SPU_UTIL_SYSTEM),
624 spu_acct_time(spu, SPU_UTIL_IOWAIT),
625 spu_acct_time(spu, SPU_UTIL_IDLE_LOADED),
626 spu->stats.vol_ctx_switch,
627 spu->stats.invol_ctx_switch,
632 spu->stats.class2_intr,
633 spu->stats.libassist);
636 static SYSDEV_ATTR(stat, 0644, spu_stat_show, NULL);
638 static int __init init_spu_base(void)
642 for (i = 0; i < MAX_NUMNODES; i++) {
643 mutex_init(&cbe_spu_info[i].list_mutex);
644 INIT_LIST_HEAD(&cbe_spu_info[i].spus);
647 if (!spu_management_ops)
650 /* create sysdev class for spus */
651 ret = sysdev_class_register(&spu_sysdev_class);
655 ret = spu_enumerate_spus(create_spu);
658 printk(KERN_WARNING "%s: Error initializing spus\n",
660 goto out_unregister_sysdev_class;
665 * We cannot put the forward declaration in
666 * <linux/linux_logo.h> because of conflicting session type
667 * conflicts for const and __initdata with different compiler
670 extern const struct linux_logo logo_spe_clut224;
672 fb_append_extra_logo(&logo_spe_clut224, ret);
675 mutex_lock(&spu_full_list_mutex);
676 xmon_register_spus(&spu_full_list);
677 crash_register_spus(&spu_full_list);
678 mutex_unlock(&spu_full_list_mutex);
679 spu_add_sysdev_attr(&attr_stat);
685 out_unregister_sysdev_class:
686 sysdev_class_unregister(&spu_sysdev_class);
690 module_init(init_spu_base);
692 MODULE_LICENSE("GPL");
693 MODULE_AUTHOR("Arnd Bergmann <arndb@de.ibm.com>");