From 4530d7ab0fb8d5056b68c376949e2d5c4db7817e Mon Sep 17 00:00:00 2001 From: Dmitry Adamushko Date: Mon, 15 Oct 2007 17:00:08 +0200 Subject: [PATCH] sched: simplify sched_class::yield_task() the 'p' (task_struct) parameter in the sched_class :: yield_task() is redundant as the caller is always the 'current'. Get rid of it. text data bss dec hex filename 24341 2734 20 27095 69d7 sched.o.before 24330 2734 20 27084 69cc sched.o.after Signed-off-by: Dmitry Adamushko Signed-off-by: Ingo Molnar Signed-off-by: Peter Zijlstra Reviewed-by: Thomas Gleixner --- include/linux/sched.h | 2 +- kernel/sched.c | 2 +- kernel/sched_fair.c | 10 +++++----- kernel/sched_rt.c | 4 ++-- 4 files changed, 9 insertions(+), 9 deletions(-) diff --git a/include/linux/sched.h b/include/linux/sched.h index f776a30b40..66169005f0 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h @@ -858,7 +858,7 @@ struct sched_class { void (*enqueue_task) (struct rq *rq, struct task_struct *p, int wakeup); void (*dequeue_task) (struct rq *rq, struct task_struct *p, int sleep); - void (*yield_task) (struct rq *rq, struct task_struct *p); + void (*yield_task) (struct rq *rq); void (*check_preempt_curr) (struct rq *rq, struct task_struct *p); diff --git a/kernel/sched.c b/kernel/sched.c index 3b104635a8..e1f784f4b4 100644 --- a/kernel/sched.c +++ b/kernel/sched.c @@ -4537,7 +4537,7 @@ asmlinkage long sys_sched_yield(void) struct rq *rq = this_rq_lock(); schedstat_inc(rq, yld_cnt); - current->sched_class->yield_task(rq, current); + current->sched_class->yield_task(rq); /* * Since we are going to call schedule() anyway, there's diff --git a/kernel/sched_fair.c b/kernel/sched_fair.c index 827a063652..4dd256d468 100644 --- a/kernel/sched_fair.c +++ b/kernel/sched_fair.c @@ -722,11 +722,11 @@ static void dequeue_task_fair(struct rq *rq, struct task_struct *p, int sleep) * * If compat_yield is turned on then we requeue to the end of the tree. */ -static void yield_task_fair(struct rq *rq, struct task_struct *p) +static void yield_task_fair(struct rq *rq) { - struct cfs_rq *cfs_rq = task_cfs_rq(p); + struct cfs_rq *cfs_rq = &rq->cfs; struct rb_node **link = &cfs_rq->tasks_timeline.rb_node; - struct sched_entity *rightmost, *se = &p->se; + struct sched_entity *rightmost, *se = &rq->curr->se; struct rb_node *parent; /* @@ -741,8 +741,8 @@ static void yield_task_fair(struct rq *rq, struct task_struct *p) * Dequeue and enqueue the task to update its * position within the tree: */ - dequeue_entity(cfs_rq, &p->se, 0); - enqueue_entity(cfs_rq, &p->se, 0); + dequeue_entity(cfs_rq, se, 0); + enqueue_entity(cfs_rq, se, 0); return; } diff --git a/kernel/sched_rt.c b/kernel/sched_rt.c index 45b339f56a..b86944c20f 100644 --- a/kernel/sched_rt.c +++ b/kernel/sched_rt.c @@ -59,9 +59,9 @@ static void requeue_task_rt(struct rq *rq, struct task_struct *p) } static void -yield_task_rt(struct rq *rq, struct task_struct *p) +yield_task_rt(struct rq *rq) { - requeue_task_rt(rq, p); + requeue_task_rt(rq, rq->curr); } /* -- 2.39.5