From: Jeff Garzik Date: Sun, 18 Jun 2006 05:15:45 +0000 (-0400) Subject: Merge branch 'master' into upstream X-Git-Tag: v2.6.18-rc1~1079^2~19 X-Git-Url: https://err.no/cgi-bin/gitweb.cgi?a=commitdiff_plain;h=ea6e1e94f2cb9ae54bd1428e1ef3e84a749ceed8;hp=db9ca5803566078aafe63cf364ef98b5097e4194;p=linux-2.6 Merge branch 'master' into upstream --- diff --git a/Makefile b/Makefile index a3a7baad85..1700d3f6ea 100644 --- a/Makefile +++ b/Makefile @@ -1,7 +1,7 @@ VERSION = 2 PATCHLEVEL = 6 SUBLEVEL = 17 -EXTRAVERSION =-rc6 +EXTRAVERSION = NAME=Crazed Snow-Weasel # *DOCUMENTATION* diff --git a/arch/powerpc/mm/hash_native_64.c b/arch/powerpc/mm/hash_native_64.c index 33654d1b1b..994856e55b 100644 --- a/arch/powerpc/mm/hash_native_64.c +++ b/arch/powerpc/mm/hash_native_64.c @@ -52,7 +52,7 @@ static inline void __tlbie(unsigned long va, unsigned int psize) default: penc = mmu_psize_defs[psize].penc; va &= ~((1ul << mmu_psize_defs[psize].shift) - 1); - va |= (0x7f >> (8 - penc)) << 12; + va |= penc << 12; asm volatile("tlbie %0,1" : : "r" (va) : "memory"); break; } @@ -74,7 +74,7 @@ static inline void __tlbiel(unsigned long va, unsigned int psize) default: penc = mmu_psize_defs[psize].penc; va &= ~((1ul << mmu_psize_defs[psize].shift) - 1); - va |= (0x7f >> (8 - penc)) << 12; + va |= penc << 12; asm volatile(".long 0x7c000224 | (%0 << 11) | (1 << 21)" : : "r"(va) : "memory"); break; diff --git a/block/cfq-iosched.c b/block/cfq-iosched.c index a46d030e09..052b174876 100644 --- a/block/cfq-iosched.c +++ b/block/cfq-iosched.c @@ -1323,17 +1323,12 @@ cfq_alloc_io_context(struct cfq_data *cfqd, gfp_t gfp_mask) struct cfq_io_context *cic = kmem_cache_alloc(cfq_ioc_pool, gfp_mask); if (cic) { - RB_CLEAR(&cic->rb_node); - cic->key = NULL; - cic->cfqq[ASYNC] = NULL; - cic->cfqq[SYNC] = NULL; + memset(cic, 0, sizeof(*cic)); + RB_CLEAR_COLOR(&cic->rb_node); cic->last_end_request = jiffies; - cic->ttime_total = 0; - cic->ttime_samples = 0; - cic->ttime_mean = 0; + INIT_LIST_HEAD(&cic->queue_list); cic->dtor = cfq_free_io_context; cic->exit = cfq_exit_io_context; - INIT_LIST_HEAD(&cic->queue_list); atomic_inc(&ioc_count); } diff --git a/drivers/cdrom/cdrom.c b/drivers/cdrom/cdrom.c index a59876a0bf..3170eaa250 100644 --- a/drivers/cdrom/cdrom.c +++ b/drivers/cdrom/cdrom.c @@ -1009,9 +1009,9 @@ int cdrom_open(struct cdrom_device_info *cdi, struct inode *ip, struct file *fp) if (fp->f_mode & FMODE_WRITE) { ret = -EROFS; if (cdrom_open_write(cdi)) - goto err; + goto err_release; if (!CDROM_CAN(CDC_RAM)) - goto err; + goto err_release; ret = 0; cdi->media_written = 0; } @@ -1026,6 +1026,8 @@ int cdrom_open(struct cdrom_device_info *cdi, struct inode *ip, struct file *fp) not be mounting, but opening with O_NONBLOCK */ check_disk_change(ip->i_bdev); return 0; +err_release: + cdi->ops->release(cdi); err: cdi->use_count--; return ret; diff --git a/drivers/net/sky2.c b/drivers/net/sky2.c index 97fe95666f..fba1e4d4d8 100644 --- a/drivers/net/sky2.c +++ b/drivers/net/sky2.c @@ -2255,8 +2255,10 @@ static irqreturn_t sky2_intr(int irq, void *dev_id, struct pt_regs *regs) static void sky2_netpoll(struct net_device *dev) { struct sky2_port *sky2 = netdev_priv(dev); + struct net_device *dev0 = sky2->hw->dev[0]; - sky2_intr(sky2->hw->pdev->irq, sky2->hw, NULL); + if (netif_running(dev) && __netif_rx_schedule_prep(dev0)) + __netif_rx_schedule(dev0); } #endif @@ -3446,6 +3448,7 @@ static int sky2_suspend(struct pci_dev *pdev, pm_message_t state) sky2_down(dev); netif_device_detach(dev); + netif_poll_disable(dev); } } @@ -3474,6 +3477,8 @@ static int sky2_resume(struct pci_dev *pdev) struct net_device *dev = hw->dev[i]; if (dev && netif_running(dev)) { netif_device_attach(dev); + netif_poll_enable(dev); + err = sky2_up(dev); if (err) { printk(KERN_ERR PFX "%s: could not up: %d\n", diff --git a/fs/bio.c b/fs/bio.c index 098c12b2d6..6a0b9ad8f8 100644 --- a/fs/bio.c +++ b/fs/bio.c @@ -654,9 +654,10 @@ static struct bio *__bio_map_user_iov(request_queue_t *q, write_to_vm, 0, &pages[cur_page], NULL); up_read(¤t->mm->mmap_sem); - if (ret < local_nr_pages) + if (ret < local_nr_pages) { + ret = -EFAULT; goto out_unmap; - + } offset = uaddr & ~PAGE_MASK; for (j = cur_page; j < page_limit; j++) { diff --git a/fs/locks.c b/fs/locks.c index 6f99c0a6f8..ab61a8b548 100644 --- a/fs/locks.c +++ b/fs/locks.c @@ -755,6 +755,7 @@ static int flock_lock_file(struct file *filp, struct file_lock *request) if (request->fl_type == F_UNLCK) goto out; + error = -ENOMEM; new_fl = locks_alloc_lock(); if (new_fl == NULL) goto out; @@ -781,6 +782,7 @@ static int flock_lock_file(struct file *filp, struct file_lock *request) locks_copy_lock(new_fl, request); locks_insert_lock(&inode->i_flock, new_fl); new_fl = NULL; + error = 0; out: unlock_kernel(); diff --git a/include/asm-powerpc/cputable.h b/include/asm-powerpc/cputable.h index 9fcf0162d8..f6265c2a0d 100644 --- a/include/asm-powerpc/cputable.h +++ b/include/asm-powerpc/cputable.h @@ -329,7 +329,7 @@ extern void do_cpu_ftr_fixups(unsigned long offset); #define CPU_FTRS_CELL (CPU_FTR_SPLIT_ID_CACHE | CPU_FTR_USE_TB | \ CPU_FTR_HPTE_TABLE | CPU_FTR_PPCAS_ARCH_V2 | \ CPU_FTR_ALTIVEC_COMP | CPU_FTR_MMCRA | CPU_FTR_SMT | \ - CPU_FTR_CTRL | CPU_FTR_PAUSE_ZERO) + CPU_FTR_CTRL | CPU_FTR_PAUSE_ZERO | CPU_FTR_CI_LARGE_PAGE) #define CPU_FTRS_COMPATIBLE (CPU_FTR_SPLIT_ID_CACHE | CPU_FTR_USE_TB | \ CPU_FTR_HPTE_TABLE | CPU_FTR_PPCAS_ARCH_V2) #endif diff --git a/kernel/exit.c b/kernel/exit.c index e95b932822..e06d0c10a2 100644 --- a/kernel/exit.c +++ b/kernel/exit.c @@ -881,14 +881,6 @@ fastcall NORET_TYPE void do_exit(long code) tsk->flags |= PF_EXITING; - /* - * Make sure we don't try to process any timer firings - * while we are already exiting. - */ - tsk->it_virt_expires = cputime_zero; - tsk->it_prof_expires = cputime_zero; - tsk->it_sched_expires = 0; - if (unlikely(in_atomic())) printk(KERN_INFO "note: %s[%d] exited with preempt_count %d\n", current->comm, current->pid, diff --git a/kernel/posix-cpu-timers.c b/kernel/posix-cpu-timers.c index 520f6c5994..d38d9ec327 100644 --- a/kernel/posix-cpu-timers.c +++ b/kernel/posix-cpu-timers.c @@ -555,9 +555,6 @@ static void arm_timer(struct k_itimer *timer, union cpu_time_count now) struct cpu_timer_list *next; unsigned long i; - if (CPUCLOCK_PERTHREAD(timer->it_clock) && (p->flags & PF_EXITING)) - return; - head = (CPUCLOCK_PERTHREAD(timer->it_clock) ? p->cpu_timers : p->signal->cpu_timers); head += CPUCLOCK_WHICH(timer->it_clock); @@ -1173,6 +1170,9 @@ static void check_process_timers(struct task_struct *tsk, } t = tsk; do { + if (unlikely(t->flags & PF_EXITING)) + continue; + ticks = cputime_add(cputime_add(t->utime, t->stime), prof_left); if (!cputime_eq(prof_expires, cputime_zero) && @@ -1193,11 +1193,7 @@ static void check_process_timers(struct task_struct *tsk, t->it_sched_expires > sched)) { t->it_sched_expires = sched; } - - do { - t = next_thread(t); - } while (unlikely(t->flags & PF_EXITING)); - } while (t != tsk); + } while ((t = next_thread(t)) != tsk); } } @@ -1289,30 +1285,30 @@ void run_posix_cpu_timers(struct task_struct *tsk) #undef UNEXPIRED - BUG_ON(tsk->exit_state); - /* * Double-check with locks held. */ read_lock(&tasklist_lock); - spin_lock(&tsk->sighand->siglock); + if (likely(tsk->signal != NULL)) { + spin_lock(&tsk->sighand->siglock); - /* - * Here we take off tsk->cpu_timers[N] and tsk->signal->cpu_timers[N] - * all the timers that are firing, and put them on the firing list. - */ - check_thread_timers(tsk, &firing); - check_process_timers(tsk, &firing); + /* + * Here we take off tsk->cpu_timers[N] and tsk->signal->cpu_timers[N] + * all the timers that are firing, and put them on the firing list. + */ + check_thread_timers(tsk, &firing); + check_process_timers(tsk, &firing); - /* - * We must release these locks before taking any timer's lock. - * There is a potential race with timer deletion here, as the - * siglock now protects our private firing list. We have set - * the firing flag in each timer, so that a deletion attempt - * that gets the timer lock before we do will give it up and - * spin until we've taken care of that timer below. - */ - spin_unlock(&tsk->sighand->siglock); + /* + * We must release these locks before taking any timer's lock. + * There is a potential race with timer deletion here, as the + * siglock now protects our private firing list. We have set + * the firing flag in each timer, so that a deletion attempt + * that gets the timer lock before we do will give it up and + * spin until we've taken care of that timer below. + */ + spin_unlock(&tsk->sighand->siglock); + } read_unlock(&tasklist_lock); /*