X-Git-Url: https://err.no/cgi-bin/gitweb.cgi?a=blobdiff_plain;f=include%2Flinux%2Frcupdate.h;h=fe17d7d750c2be05f545d51e5254f928d613ccca;hb=644b55ce889edd37d6406df26e2d96d7a7390749;hp=6312758393b61a3e3e741825f78d49956024ea26;hpb=d83015b8f62ee3fcd338f6f009051ed57f77a531;p=linux-2.6 diff --git a/include/linux/rcupdate.h b/include/linux/rcupdate.h index 6312758393..fe17d7d750 100644 --- a/include/linux/rcupdate.h +++ b/include/linux/rcupdate.h @@ -19,7 +19,7 @@ * * Author: Dipankar Sarma * - * Based on the original work by Paul McKenney + * Based on the original work by Paul McKenney * and inputs from Rusty Russell, Andrea Arcangeli and Andi Kleen. * Papers: * http://www.rdrop.com/users/paulmck/paper/rclockpdcsproof.pdf @@ -66,6 +66,8 @@ struct rcu_ctrlblk { long completed; /* Number of the last completed batch */ int next_pending; /* Is the next batch already waiting? */ + int signaled; + spinlock_t lock ____cacheline_internodealigned_in_smp; cpumask_t cpumask; /* CPUs that need to switch in order */ /* for current batch to proceed. */ @@ -106,9 +108,6 @@ struct rcu_data { long blimit; /* Upper limit on a processed batch */ int cpu; struct rcu_head barrier; -#ifdef CONFIG_SMP - long last_rs_qlen; /* qlen during the last resched */ -#endif }; DECLARE_PER_CPU(struct rcu_data, rcu_data); @@ -163,14 +162,22 @@ extern int rcu_needs_cpu(int cpu); * * It is illegal to block while in an RCU read-side critical section. */ -#define rcu_read_lock() preempt_disable() +#define rcu_read_lock() \ + do { \ + preempt_disable(); \ + __acquire(RCU); \ + } while(0) /** * rcu_read_unlock - marks the end of an RCU read-side critical section. * * See rcu_read_lock() for more information. */ -#define rcu_read_unlock() preempt_enable() +#define rcu_read_unlock() \ + do { \ + __release(RCU); \ + preempt_enable(); \ + } while(0) /* * So where is rcu_write_lock()? It does not exist, as there is no @@ -193,14 +200,22 @@ extern int rcu_needs_cpu(int cpu); * can use just rcu_read_lock(). * */ -#define rcu_read_lock_bh() local_bh_disable() +#define rcu_read_lock_bh() \ + do { \ + local_bh_disable(); \ + __acquire(RCU_BH); \ + } while(0) /* * rcu_read_unlock_bh - marks the end of a softirq-only RCU critical section * * See rcu_read_lock_bh() for more information. */ -#define rcu_read_unlock_bh() local_bh_enable() +#define rcu_read_unlock_bh() \ + do { \ + __release(RCU_BH); \ + local_bh_enable(); \ + } while(0) /** * rcu_dereference - fetch an RCU-protected pointer in an @@ -258,6 +273,7 @@ extern void rcu_init(void); extern void rcu_check_callbacks(int cpu, int user); extern void rcu_restart_cpu(int cpu); extern long rcu_batches_completed(void); +extern long rcu_batches_completed_bh(void); /* Exported interfaces */ extern void FASTCALL(call_rcu(struct rcu_head *head, @@ -265,7 +281,6 @@ extern void FASTCALL(call_rcu(struct rcu_head *head, extern void FASTCALL(call_rcu_bh(struct rcu_head *head, void (*func)(struct rcu_head *head))); extern void synchronize_rcu(void); -void synchronize_idle(void); extern void rcu_barrier(void); #endif /* __KERNEL__ */