X-Git-Url: https://err.no/cgi-bin/gitweb.cgi?a=blobdiff_plain;f=include%2Flinux%2Fsched.h;h=995eb407c234f927df0ee733cc932bb487419394;hb=20b8a59f2461e1be911dce2cfafefab9d22e4eee;hp=97c0c7da58efe896e519ef3a5a338da716c2b052;hpb=5884c40668a928bba017eaf54e2eb3c01c8a98e6;p=linux-2.6 diff --git a/include/linux/sched.h b/include/linux/sched.h index 97c0c7da58..995eb407c2 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h @@ -34,6 +34,8 @@ #define SCHED_FIFO 1 #define SCHED_RR 2 #define SCHED_BATCH 3 +/* SCHED_ISO: reserved but not implemented yet */ +#define SCHED_IDLE 5 #ifdef __KERNEL__ @@ -88,6 +90,7 @@ struct sched_param { struct exec_domain; struct futex_pi_state; +struct bio; /* * List of flags we want to share for kernel threads, @@ -192,6 +195,7 @@ struct task_struct; extern void sched_init(void); extern void sched_init_smp(void); extern void init_idle(struct task_struct *idle, int cpu); +extern void init_idle_bootup_task(struct task_struct *idle); extern cpumask_t nohz_cpu_mask; #if defined(CONFIG_SMP) && defined(CONFIG_NO_HZ) @@ -638,18 +642,24 @@ static inline int sched_info_on(void) #endif } -enum idle_type -{ - SCHED_IDLE, - NOT_IDLE, - NEWLY_IDLE, - MAX_IDLE_TYPES +enum cpu_idle_type { + CPU_IDLE, + CPU_NOT_IDLE, + CPU_NEWLY_IDLE, + CPU_MAX_IDLE_TYPES }; /* * sched-domains (multiprocessor balancing) declarations: */ -#define SCHED_LOAD_SCALE 128UL /* increase resolution of load */ + +/* + * Increase resolution of nice-level calculations: + */ +#define SCHED_LOAD_SHIFT 10 +#define SCHED_LOAD_SCALE (1L << SCHED_LOAD_SHIFT) + +#define SCHED_LOAD_SCALE_FUZZ (SCHED_LOAD_SCALE >> 5) #ifdef CONFIG_SMP #define SD_LOAD_BALANCE 1 /* Do load balancing on this domain. */ @@ -718,14 +728,14 @@ struct sched_domain { #ifdef CONFIG_SCHEDSTATS /* load_balance() stats */ - unsigned long lb_cnt[MAX_IDLE_TYPES]; - unsigned long lb_failed[MAX_IDLE_TYPES]; - unsigned long lb_balanced[MAX_IDLE_TYPES]; - unsigned long lb_imbalance[MAX_IDLE_TYPES]; - unsigned long lb_gained[MAX_IDLE_TYPES]; - unsigned long lb_hot_gained[MAX_IDLE_TYPES]; - unsigned long lb_nobusyg[MAX_IDLE_TYPES]; - unsigned long lb_nobusyq[MAX_IDLE_TYPES]; + unsigned long lb_cnt[CPU_MAX_IDLE_TYPES]; + unsigned long lb_failed[CPU_MAX_IDLE_TYPES]; + unsigned long lb_balanced[CPU_MAX_IDLE_TYPES]; + unsigned long lb_imbalance[CPU_MAX_IDLE_TYPES]; + unsigned long lb_gained[CPU_MAX_IDLE_TYPES]; + unsigned long lb_hot_gained[CPU_MAX_IDLE_TYPES]; + unsigned long lb_nobusyg[CPU_MAX_IDLE_TYPES]; + unsigned long lb_nobusyq[CPU_MAX_IDLE_TYPES]; /* Active load balancing */ unsigned long alb_cnt; @@ -752,12 +762,6 @@ struct sched_domain { extern int partition_sched_domains(cpumask_t *partition1, cpumask_t *partition2); -/* - * Maximum cache size the migration-costs auto-tuning code will - * search from: - */ -extern unsigned int max_cache_size; - #endif /* CONFIG_SMP */ @@ -816,6 +820,86 @@ enum sleep_type { }; struct prio_array; +struct rq; +struct sched_domain; + +struct sched_class { + struct sched_class *next; + + void (*enqueue_task) (struct rq *rq, struct task_struct *p, + int wakeup, u64 now); + void (*dequeue_task) (struct rq *rq, struct task_struct *p, + int sleep, u64 now); + void (*yield_task) (struct rq *rq, struct task_struct *p); + + void (*check_preempt_curr) (struct rq *rq, struct task_struct *p); + + struct task_struct * (*pick_next_task) (struct rq *rq, u64 now); + void (*put_prev_task) (struct rq *rq, struct task_struct *p, u64 now); + + int (*load_balance) (struct rq *this_rq, int this_cpu, + struct rq *busiest, + unsigned long max_nr_move, unsigned long max_load_move, + struct sched_domain *sd, enum cpu_idle_type idle, + int *all_pinned, unsigned long *total_load_moved); + + void (*set_curr_task) (struct rq *rq); + void (*task_tick) (struct rq *rq, struct task_struct *p); + void (*task_new) (struct rq *rq, struct task_struct *p); +}; + +struct load_weight { + unsigned long weight, inv_weight; +}; + +/* + * CFS stats for a schedulable entity (task, task-group etc) + * + * Current field usage histogram: + * + * 4 se->block_start + * 4 se->run_node + * 4 se->sleep_start + * 4 se->sleep_start_fair + * 6 se->load.weight + * 7 se->delta_fair + * 15 se->wait_runtime + */ +struct sched_entity { + long wait_runtime; + unsigned long delta_fair_run; + unsigned long delta_fair_sleep; + unsigned long delta_exec; + s64 fair_key; + struct load_weight load; /* for load-balancing */ + struct rb_node run_node; + unsigned int on_rq; + + u64 wait_start_fair; + u64 wait_start; + u64 exec_start; + u64 sleep_start; + u64 sleep_start_fair; + u64 block_start; + u64 sleep_max; + u64 block_max; + u64 exec_max; + u64 wait_max; + u64 last_ran; + + u64 sum_exec_runtime; + s64 sum_wait_runtime; + s64 sum_sleep_runtime; + unsigned long wait_runtime_overruns; + unsigned long wait_runtime_underruns; +#ifdef CONFIG_FAIR_GROUP_SCHED + struct sched_entity *parent; + /* rq on which this entity is (to be) queued: */ + struct cfs_rq *cfs_rq; + /* rq "owned" by this entity/group: */ + struct cfs_rq *my_q; +#endif +}; struct task_struct { volatile long state; /* -1 unrunnable, 0 runnable, >0 stopped */ @@ -835,6 +919,8 @@ struct task_struct { int prio, static_prio, normal_prio; struct list_head run_list; struct prio_array *array; + struct sched_class *sched_class; + struct sched_entity se; unsigned short ioprio; #ifdef CONFIG_BLK_DEV_IO_TRACE @@ -1016,6 +1102,9 @@ struct task_struct { /* journalling filesystem info */ void *journal_info; +/* stacked block device info */ + struct bio *bio_list, **bio_tail; + /* VM state */ struct reclaim_state *reclaim_state; @@ -1158,6 +1247,7 @@ static inline void put_task_struct(struct task_struct *t) /* Not implemented yet, only for 486*/ #define PF_STARTING 0x00000002 /* being created */ #define PF_EXITING 0x00000004 /* getting shut down */ +#define PF_EXITPIDONE 0x00000008 /* pi exit done on shut down */ #define PF_FORKNOEXEC 0x00000040 /* forked but didn't exec */ #define PF_SUPERPRIV 0x00000100 /* used super-user privileges */ #define PF_DUMPCORE 0x00000200 /* dumped core */ @@ -1178,6 +1268,7 @@ static inline void put_task_struct(struct task_struct *t) #define PF_SPREAD_SLAB 0x02000000 /* Spread some slab caches over cpuset */ #define PF_MEMPOLICY 0x10000000 /* Non-default NUMA mempolicy */ #define PF_MUTEX_TESTER 0x20000000 /* Thread belongs to the rt mutex tester */ +#define PF_FREEZER_SKIP 0x40000000 /* Freezer should not count it as freezeable */ /* * Only the _current_ task can read/write to tsk->flags, but other @@ -1234,6 +1325,14 @@ static inline void idle_task_exit(void) {} extern void sched_idle_next(void); +extern unsigned int sysctl_sched_granularity; +extern unsigned int sysctl_sched_wakeup_granularity; +extern unsigned int sysctl_sched_batch_wakeup_granularity; +extern unsigned int sysctl_sched_stat_granularity; +extern unsigned int sysctl_sched_runtime_limit; +extern unsigned int sysctl_sched_child_runs_first; +extern unsigned int sysctl_sched_features; + #ifdef CONFIG_RT_MUTEXES extern int rt_mutex_getprio(struct task_struct *p); extern void rt_mutex_setprio(struct task_struct *p, int prio); @@ -1611,11 +1710,13 @@ static inline int lock_need_resched(spinlock_t *lock) return 0; } -/* Reevaluate whether the task has signals pending delivery. - This is required every time the blocked sigset_t changes. - callers must hold sighand->siglock. */ - -extern FASTCALL(void recalc_sigpending_tsk(struct task_struct *t)); +/* + * Reevaluate whether the task has signals pending delivery. + * Wake the task if so. + * This is required every time the blocked sigset_t changes. + * callers must hold sighand->siglock. + */ +extern void recalc_sigpending_and_wake(struct task_struct *t); extern void recalc_sigpending(void); extern void signal_wake_up(struct task_struct *t, int resume_stopped); @@ -1630,10 +1731,7 @@ static inline unsigned int task_cpu(const struct task_struct *p) return task_thread_info(p)->cpu; } -static inline void set_task_cpu(struct task_struct *p, unsigned int cpu) -{ - task_thread_info(p)->cpu = cpu; -} +extern void set_task_cpu(struct task_struct *p, unsigned int cpu); #else