Comments, suggestions etc highly welcome.
This patch implements an idea from Linus, to automatically create task groups
per tty, to improve desktop interactivity under hefty load such as kbuild. The
feature is enabled from boot by default, The default setting can be changed via
the boot option ttysched=0, and can be can be turned on or off on the fly via
echo [01] > /proc/sys/kernel/sched_tty_sched_enabled.
A 100% hog overhead measurement proggy pinned to the same CPU as a make -j10
pert/s: 229 >5484.43us: 41 min: 0.15 max:12069.42 avg:2193.81 sum/s:502382us overhead:50.24%
pert/s: 222 >5652.28us: 43 min: 0.46 max:12077.31 avg:2248.56 sum/s:499181us overhead:49.92%
pert/s: 211 >5809.38us: 43 min: 0.16 max:12064.78 avg:2381.70 sum/s:502538us overhead:50.25%
pert/s: 223 >6147.92us: 43 min: 0.15 max:16107.46 avg:2282.17 sum/s:508925us overhead:50.49%
pert/s: 218 >6252.64us: 43 min: 0.16 max:12066.13 avg:2324.11 sum/s:506656us overhead:50.27%
Signed-off-by: Mike Galbraith <efa...@gmx.de>
---
drivers/char/tty_io.c | 2
include/linux/sched.h | 14 +++++
include/linux/tty.h | 3 +
init/Kconfig | 13 +++++
kernel/sched.c | 9 +++
kernel/sched_tty.c | 128 ++++++++++++++++++++++++++++++++++++++++++++++++++
kernel/sched_tty.h | 7 ++
kernel/sysctl.c | 11 ++++
8 files changed, 186 insertions(+), 1 deletion(-)
Index: linux-2.6.36.git/include/linux/sched.h
===================================================================
--- linux-2.6.36.git.orig/include/linux/sched.h
+++ linux-2.6.36.git/include/linux/sched.h
@@ -1900,6 +1900,20 @@ int sched_rt_handler(struct ctl_table *t
extern unsigned int sysctl_sched_compat_yield;
+#ifdef CONFIG_SCHED_DESKTOP
+int sched_tty_sched_handler(struct ctl_table *table, int write,
+ void __user *buffer, size_t *lenp,
+ loff_t *ppos);
+
+extern unsigned int sysctl_sched_tty_sched_enabled;
+
+void tty_sched_create_group(struct tty_struct *tty);
+void tty_sched_destroy_group(struct tty_struct *tty);
+#else
+static inline void tty_sched_create_group(struct tty_struct *tty) { }
+static inline void tty_sched_destroy_group(struct tty_struct *tty) { }
+#endif
+
#ifdef CONFIG_RT_MUTEXES
extern int rt_mutex_getprio(struct task_struct *p);
extern void rt_mutex_setprio(struct task_struct *p, int prio);
Index: linux-2.6.36.git/include/linux/tty.h
===================================================================
--- linux-2.6.36.git.orig/include/linux/tty.h
+++ linux-2.6.36.git/include/linux/tty.h
@@ -327,6 +327,9 @@ struct tty_struct {
/* If the tty has a pending do_SAK, queue it here - akpm */
struct work_struct SAK_work;
struct tty_port *port;
+#ifdef CONFIG_SCHED_DESKTOP
+ struct task_group *tg;
+#endif
};
/* Each of a tty's open files has private_data pointing to tty_file_private */
Index: linux-2.6.36.git/kernel/sched.c
===================================================================
--- linux-2.6.36.git.orig/kernel/sched.c
+++ linux-2.6.36.git/kernel/sched.c
@@ -78,6 +78,7 @@
#include "sched_cpupri.h"
#include "workqueue_sched.h"
+#include "sched_tty.h"
#define CREATE_TRACE_POINTS
#include <trace/events/sched.h>
@@ -612,11 +613,16 @@ static inline int cpu_of(struct rq *rq)
*/
static inline struct task_group *task_group(struct task_struct *p)
{
+ struct task_group *tg;
struct cgroup_subsys_state *css;
css = task_subsys_state_check(p, cpu_cgroup_subsys_id,
lockdep_is_held(&task_rq(p)->lock));
- return container_of(css, struct task_group, css);
+ tg = container_of(css, struct task_group, css);
+
+ tty_sched_check_attach(p, &tg);
+
+ return tg;
}
/* Change a task's cfs_rq and parent entity if it moves across CPUs/groups */
@@ -1920,6 +1926,7 @@ static void deactivate_task(struct rq *r
#include "sched_idletask.c"
#include "sched_fair.c"
#include "sched_rt.c"
+#include "sched_tty.c"
#ifdef CONFIG_SCHED_DEBUG
# include "sched_debug.c"
#endif
Index: linux-2.6.36.git/drivers/char/tty_io.c
===================================================================
--- linux-2.6.36.git.orig/drivers/char/tty_io.c
+++ linux-2.6.36.git/drivers/char/tty_io.c
@@ -185,6 +185,7 @@ void free_tty_struct(struct tty_struct *
{
kfree(tty->write_buf);
tty_buffer_free_all(tty);
+ tty_sched_destroy_group(tty);
kfree(tty);
}
@@ -2823,6 +2824,7 @@ void initialize_tty_struct(struct tty_st
tty->ops = driver->ops;
tty->index = idx;
tty_line_name(driver, idx, tty->name);
+ tty_sched_create_group(tty);
}
/**
Index: linux-2.6.36.git/kernel/sched_tty.h
===================================================================
--- /dev/null
+++ linux-2.6.36.git/kernel/sched_tty.h
@@ -0,0 +1,7 @@
+#ifdef CONFIG_SCHED_DESKTOP
+static inline void
+tty_sched_check_attach(struct task_struct *p, struct task_group **tg);
+#else
+static inline void
+tty_sched_check_attach(struct task_struct *p, struct task_group **tg) { }
+#endif
Index: linux-2.6.36.git/kernel/sched_tty.c
===================================================================
--- /dev/null
+++ linux-2.6.36.git/kernel/sched_tty.c
@@ -0,0 +1,128 @@
+#ifdef CONFIG_SCHED_DESKTOP
+#include <linux/tty.h>
+
+unsigned int __read_mostly sysctl_sched_tty_sched_enabled = 1;
+
+void tty_sched_create_group(struct tty_struct *tty)
+{
+ tty->tg = sched_create_group(&init_task_group);
+ if (IS_ERR(tty->tg)) {
+ tty->tg = &init_task_group;
+ WARN_ON(1);
+ }
+}
+EXPORT_SYMBOL(tty_sched_create_group);
+
+void tty_sched_destroy_group(struct tty_struct *tty)
+{
+ if (tty->tg && tty->tg != &init_task_group)
+ sched_destroy_group(tty->tg);
+}
+EXPORT_SYMBOL(tty_sched_destroy_group);
+
+static inline void
+tty_sched_check_attach(struct task_struct *p, struct task_group **tg)
+{
+ struct tty_struct *tty;
+ int attach = 0, enabled = sysctl_sched_tty_sched_enabled;
+
+ rcu_read_lock();
+ tty = p->signal->tty;
+ if (!tty)
+ goto out_unlock;
+
+ if (enabled && *tg == &root_task_group) {
+ *tg = p->signal->tty->tg;
+ attach = 1;
+ } else if (!enabled && *tg == tty->tg) {
+ *tg = &root_task_group;
+ attach = 1;
+ }
+
+ if (attach && !p->se.on_rq) {
+ p->se.vruntime -= cfs_rq_of(&p->se)->min_vruntime;
+ p->se.vruntime += (*tg)->cfs_rq[task_cpu(p)]->min_vruntime;
+ }
+
+out_unlock:
+ rcu_read_unlock();
+}
+
+void tty_sched_move_task(struct task_struct *p, struct task_group *tg)
+{
+ struct sched_entity *se = &p->se;
+ struct rq *rq;
+ unsigned long flags;
+ int on_rq, running, cpu;
+
+ rq = task_rq_lock(p, &flags);
+
+ running = task_current(rq, p);
+ on_rq = se->on_rq;
+ cpu = rq->cpu;
+
+ if (on_rq)
+ dequeue_task(rq, p, 0);
+ if (unlikely(running))
+ p->sched_class->put_prev_task(rq, p);
+
+ if (!on_rq)
+ se->vruntime -= cfs_rq_of(se)->min_vruntime;
+
+ se->cfs_rq = tg->cfs_rq[cpu];
+ se->parent = tg->se[cpu];
+
+ p->rt.rt_rq = tg->rt_rq[cpu];
+ p->rt.parent = tg->rt_se[cpu];
+
+ if (!on_rq)
+ se->vruntime += cfs_rq_of(se)->min_vruntime;
+
+ if (unlikely(running))
+ p->sched_class->set_curr_task(rq);
+ if (on_rq)
+ enqueue_task(rq, p, 0);
+
+ task_rq_unlock(rq, &flags);
+}
+
+int sched_tty_sched_handler(struct ctl_table *table, int write,
+ void __user *buffer, size_t *lenp,
+ loff_t *ppos)
+{
+ struct task_struct *p, *t;
+ struct task_group *tg;
+ unsigned long flags;
+ int ret = proc_dointvec_minmax(table, write, buffer, lenp, ppos);
+
+ if (ret || !write)
+ return ret;
+
+ read_lock_irqsave(&tasklist_lock, flags);
+
+ rcu_read_lock();
+ for_each_process(p) {
+ tg = task_group(p);
+ tty_sched_move_task(p, tg);
+ list_for_each_entry_rcu(t, &p->thread_group, thread_group) {
+ tty_sched_move_task(t, tg);
+ }
+ }
+ rcu_read_unlock();
+
+ read_unlock_irqrestore(&tasklist_lock, flags);
+
+ return 0;
+}
+
+static int __init setup_tty_sched(char *str)
+{
+ unsigned long val;
+
+ val = simple_strtoul(str, NULL, 0);
+ sysctl_sched_tty_sched_enabled = val ? 1 : 0;
+
+ return 1;
+}
+__setup("ttysched=", setup_tty_sched);
+#endif
Index: linux-2.6.36.git/kernel/sysctl.c
===================================================================
--- linux-2.6.36.git.orig/kernel/sysctl.c
+++ linux-2.6.36.git/kernel/sysctl.c
@@ -384,6 +384,17 @@ static struct ctl_table kern_table[] = {
.mode = 0644,
.proc_handler = proc_dointvec,
},
+#ifdef CONFIG_SCHED_DESKTOP
+ {
+ .procname = "sched_tty_sched_enabled",
+ .data = &sysctl_sched_tty_sched_enabled,
+ .maxlen = sizeof(unsigned int),
+ .mode = 0644,
+ .proc_handler = sched_tty_sched_handler,
+ .extra1 = &zero,
+ .extra2 = &one,
+ },
+#endif
#ifdef CONFIG_PROVE_LOCKING
{
.procname = "prove_locking",
Index: linux-2.6.36.git/init/Kconfig
===================================================================
--- linux-2.6.36.git.orig/init/Kconfig
+++ linux-2.6.36.git/init/Kconfig
@@ -652,6 +652,19 @@ config DEBUG_BLK_CGROUP
endif # CGROUPS
+config SCHED_DESKTOP
+ bool "Desktop centric group scheduling"
+ depends on EXPERIMENTAL
+ select CGROUPS
+ select CGROUP_SCHED
+ select FAIR_GROUP_SCHED
+ select RT_GROUP_SCHED
+ select BLK_CGROUP
+ help
+ This option optimizes the group scheduler for common desktop workloads,
+ by creating separate per tty groups. This separation of workloads isolates
+ aggressive CPU burners (like build jobs) from desktop applications.
+
config MM_OWNER
bool
--
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to majo...@vger.kernel.org
More majordomo info at http://vger.kernel.org/majordomo-info.html
Please read the FAQ at http://www.tux.org/lkml/
You might be wanting to exclude RT tasks from the tty groups since there
is no interface to grant them any runtime and such :-)
Also, I think tty_sched_move_task and sched_move_task() should be
sharing lots more code -- I recently proposed a fix for
sched_move_task() because the Android people complained, but they
haven't replied back yet..
I don't think you need to disable IRQs for tasklist lock, nor do I think
you actually need it.
If you enable tty groups and then scan all the existing tasks you've
covered them all, new tasks will already be placed right, dying tasks we
don't care about anyway.
OK, thanks. (No such thing as too paranoid;)
-Mike
Yeah. I wanted to get the RFC out the door, so took a shortcut.
-Mike
Ah, first feedback I've got on that patch,. but surely since you created
one that does work we can fix my patch and use the normal path? :-)
:)
> Also, I think tty_sched_move_task and sched_move_task() should be
> sharing lots more code -- I recently proposed a fix for
> sched_move_task() because the Android people complained, but they
> haven't replied back yet..
Yeah. I should be able to just do sched_move_task(), but it doesn't
currently work even with your patch, turning tty_sched on/off can lead
to incredible delays before you get box back. With virgin source, it's
size infinity for all intents and purposes.
-Mike
On Tue, 2010-10-19 at 11:16 +0200, Mike Galbraith wrote:
> A 100% hog overhead measurement proggy pinned to the same CPU as a make -j10
>
> pert/s: 229 >5484.43us: 41 min: 0.15 max:12069.42 avg:2193.81 sum/s:502382us overhead:50.24%
> pert/s: 222 >5652.28us: 43 min: 0.46 max:12077.31 avg:2248.56 sum/s:499181us overhead:49.92%
> pert/s: 211 >5809.38us: 43 min: 0.16 max:12064.78 avg:2381.70 sum/s:502538us overhead:50.25%
> pert/s: 223 >6147.92us: 43 min: 0.15 max:16107.46 avg:2282.17 sum/s:508925us overhead:50.49%
> pert/s: 218 >6252.64us: 43 min: 0.16 max:12066.13 avg:2324.11 sum/s:506656us overhead:50.27%
The same load without per tty task groups.
pert/s: 31 >40475.37us: 3 min: 0.37 max:48103.60 avg:29573.74 sum/s:916786us overhead:90.24%
pert/s: 23 >41237.70us: 12 min: 0.36 max:56010.39 avg:40187.01 sum/s:924301us overhead:91.99%
pert/s: 24 >42150.22us: 12 min: 8.86 max:61265.91 avg:39459.91 sum/s:947038us overhead:92.20%
pert/s: 26 >42344.91us: 11 min: 3.83 max:52029.60 avg:36164.70 sum/s:940282us overhead:91.12%
pert/s: 24 >44262.90us: 14 min: 5.05 max:82735.15 avg:40314.33 sum/s:967544us overhead:92.22%
^^^^^usecs ^^^^^usecs ^^the competition got
Average service latency is an order of magnitude better with tty_sched.
(Imagine that pert is Xorg or whatnot instead)
Using Mathieu Desnoyers' wakeup-latency testcase (attached):
With taskset -c 3 make -j 10 running..
taskset -c 3 ./wakeup-latency& sleep 30;killall wakeup-latency
without:
maximum latency: 42963.2 µs
average latency: 9077.0 µs
missed timer events: 0
with:
maximum latency: 4160.7 µs
average latency: 149.4 µs
missed timer events: 0
Patch makes a big difference in desktop feel under hefty load here.
-Mike
> Using Mathieu Desnoyers' wakeup-latency testcase (attached):
>
> With taskset -c 3 make -j 10 running..
>
> taskset -c 3 ./wakeup-latency& sleep 30;killall wakeup-latency
>
> without:
> maximum latency: 42963.2 �s
> average latency: 9077.0 �s
> missed timer events: 0
>
> with:
> maximum latency: 4160.7 �s
> average latency: 149.4 �s
> missed timer events: 0
>
> Patch makes a big difference in desktop feel under hefty load here.
That's really nice!
Could this feature realistically do block IO isolation as well? It's always annoying
when some big IO job is making the desktop jerky. Especially as your patch is
selecting the block cgroup feature already:
+ select BLK_CGROUP
Thanks,
Ingo
> Could this feature realistically do block IO isolation as well? It's always annoying
> when some big IO job is making the desktop jerky. Especially as your patch is
> selecting the block cgroup feature already:
>
> + select BLK_CGROUP
I know my cgroup pgid config helps a bunch with rummaging in my email
while other IO is going on. I've been attributing that to BLK_CGROUP,
but have no proof.
-Mike
Yes, I was going to complain that the numbers in the commit message
made no sense without something to compare the numbers to.
> The same load without per tty task groups.
Very impressive. This definitely looks like something people will notice.
That said, I do think we should think carefully about calling this a
"tty" feature. I think we might want to leave the door open to other
heuristics than _just_ the tty group. I think the tty group approach
is wonderful for traditional Unix loads in a desktop environment, but
I suspect we might hit issues with IDE's etc too. I don't know if we
can notice things like that automatically, but I think it's worth
thinking about.
So I think the patch looks pretty good, and the numbers seem to look
just stunningly so, but I'd like to name the feature more along the
lines of "automatic process group scheduling" rather than about tty's
per se.
And you actually did that for the Kconfig option, which makes me quite happy.
The one other thing I do wonder about is how noticeable the group
scheduling overhead is. If people compare with a non-CGROUP_SCHED
kernel, will a desktop-optimized kernel suddenly have horrible pipe
latency due to much higher scheduling cost? Right now that whole
feature is hidden by EXPERIMENTAL, I don't know how much it hurts, and
I never timed it when I tried it out long ago..
Linus
> So I think the patch looks pretty good, and the numbers seem to look
> just stunningly so, but I'd like to name the feature more along the
> lines of "automatic process group scheduling" rather than about tty's
> per se.
Oh, absolutely, that's what it's all about really. What I'd _like_ is
to get per process group scheduling working on the cheap..ish. Your
idea of tty cgoups looked much simpler though, so I figured that would
be a great place to start. It turned out to be much simpler than I
thought it would be, which is encouraging, and it works well in testing
(so far that is).
> And you actually did that for the Kconfig option, which makes me quite happy.
(Ingo's input.. spot on)
> The one other thing I do wonder about is how noticeable the group
> scheduling overhead is.
Very noticeable, cgroups is far from free. It would make no sense for a
performance freak to even think about it. I don't run cgroup enabled
kernels usually, and generally strip to the bone because I favor
throughput very very heavily, but when I look at the desktop under load,
the cost/performance trade-off ~seems to work out.
> If people compare with a non-CGROUP_SCHED
> kernel, will a desktop-optimized kernel suddenly have horrible pipe
> latency due to much higher scheduling cost? Right now that whole
> feature is hidden by EXPERIMENTAL, I don't know how much it hurts, and
> I never timed it when I tried it out long ago..
The scheduling cost is quite high. But realistically, the cost of a
distro kernel with full featured network stack is (much) higher. I
seriously doubt the cost of cgroups would be noticed by the typical
_desktop_ user. Overall latencies for any switchy microbenchmark will
certainly be considerably higher with the feature enabled.
-Mike
Q/D test of kernels w/wo, with same .config using pipe-test (pure sched)
gives on my box ~590khz with tty_sched active, 620khz without cgroups
acitve in same kernel/config without patch. last time I measured
stripped down config (not long ago, but not yesterday either) gave max
ctx rate ~690khz on this box.
(note: very Q, very D numbers, no variance testing, ballpark)
> > On Tue, 2010-10-19 at 08:28 -0700, Linus Torvalds wrote:
> > >
> > > If people compare with a non-CGROUP_SCHED
> > > kernel, will a desktop-optimized kernel suddenly have horrible pipe
> > > latency due to much higher scheduling cost? Right now that whole
> > > feature is hidden by EXPERIMENTAL, I don't know how much it hurts, and
> > > I never timed it when I tried it out long ago..
>
> Q/D test of kernels w/wo, with same .config using pipe-test (pure sched) gives on
> my box ~590khz with tty_sched active, 620khz without cgroups acitve in same
> kernel/config without patch. last time I measured stripped down config (not long
> ago, but not yesterday either) gave max ctx rate ~690khz on this box.
>
> (note: very Q, very D numbers, no variance testing, ballpark)
That's 5% overhead in context switches. Definitely not in the 'horrible' category.
This would be a rather tempting item for 2.6.37 ... especially as it really mainly
reuses existing group scheduling functionality, in a clever way.
Mind doing more of the tty->desktop renames/generalizations as Linus suggested, and
resend the patch?
I'd also suggest to move it out of EXPERIMENTAL - we dont really do that for core
kernel features as most distros enable CONFIG_EXPERIMENTAL so it's a rather
meaningless distinction. Since the feature is default-n, people will get the old
scheduler by default but can also choose this desktop-centric scheduling mode.
I'd even argue to make it default-y, because this patch clearly cures a form of
kbuild cancer.
Thanks,
Ingo
> Comments, suggestions etc highly welcome.
I've tested your patch and it runs smoothly on my machine.
However I had several NULL pointer dereference BUGs that happened when I
left X or rebooted my system. I think this is caused by your patch.
There is nothing in the logs unfortunately, but I scribbled down the
following by hand (not the whole trace, I'm too lazy):
BUG: unable to handle NULL pointer dereference at 0..038
IP: pick_next_task_fair 0xa7/0x1a0
...
Call Trace: schedule
...
Hm. Not much I can do without the trace, but thanks for testing and
reporting anyway, guess I need to do some heavy stress testing. I'm
re-writing it as I write this anyway.
thanks,
-Mike
Actually, your patch works just peachy, my fiddling with sleepers
vruntime at attach time was a dainbramaged thing to do.
-Mike
> Mind doing more of the tty->desktop renames/generalizations as Linus suggested, and
> resend the patch?
Here she comes. Better/Worse?
Changes:
- tty->autogroup.
- only autogroup fair class tasks.
- removed dainbramaged sleeper vruntime twiddling.
- removed paranoid locking.
- removed noop detatch code.
> I'd also suggest to move it out of EXPERIMENTAL - we dont really do that for core
> kernel features as most distros enable CONFIG_EXPERIMENTAL so it's a rather
> meaningless distinction. Since the feature is default-n, people will get the old
> scheduler by default but can also choose this desktop-centric scheduling mode.
>
> I'd even argue to make it default-y, because this patch clearly cures a form of
> kbuild cancer.
You top dogs can make the default call.. it it's accepted that is ;-)
Marcus: care to try the below? Works fine for me (but so did first
cut). It depends on the attached patch, and applied to virgin shiny new
2.6.36.
A recurring complaint from CFS users is that parallel kbuild has a negative
impact on desktop interactivity. This patch implements an idea from Linus,
to automatically create task groups. This patch only implements Linus' per
tty task group suggestion, and only for fair class tasks, but leaves the way
open for enhancement.
How it works: at tty alloc/dealloc time, a task group is created/destroyed,
so there is always a task group active per tty. When we select a runqueue,
if the task has a has a tty association, and no task group, attach it to a
per tty autogroup on the fly.
The feature is enabled from boot by default if CONFIG_SCHED_AUTOGROUP is
selected, but can be disabled via the boot option noautogroup, and can be
also be turned on/off on the fly via..
echo [01] > /proc/sys/kernel/sched_autogroup_enabled.
..which will automatically move tasks to/from the root task group.
Some numbers.
A 100% hog overhead measurement proggy pinned to the same CPU as a make -j10
About measurement proggy:
pert/sec = perturbations/sec
min/max/avg = scheduler service latencies in usecs
sum/s = time accrued by the competition per sample period (1 sec here)
overhead = %CPU received by the competition per sample period
pert/s: 31 >40475.37us: 3 min: 0.37 max:48103.60 avg:29573.74 sum/s:916786us overhead:90.24%
pert/s: 23 >41237.70us: 12 min: 0.36 max:56010.39 avg:40187.01 sum/s:924301us overhead:91.99%
pert/s: 24 >42150.22us: 12 min: 8.86 max:61265.91 avg:39459.91 sum/s:947038us overhead:92.20%
pert/s: 26 >42344.91us: 11 min: 3.83 max:52029.60 avg:36164.70 sum/s:940282us overhead:91.12%
pert/s: 24 >44262.90us: 14 min: 5.05 max:82735.15 avg:40314.33 sum/s:967544us overhead:92.22%
Same load with this patch applied.
pert/s: 229 >5484.43us: 41 min: 0.15 max:12069.42 avg:2193.81 sum/s:502382us overhead:50.24%
pert/s: 222 >5652.28us: 43 min: 0.46 max:12077.31 avg:2248.56 sum/s:499181us overhead:49.92%
pert/s: 211 >5809.38us: 43 min: 0.16 max:12064.78 avg:2381.70 sum/s:502538us overhead:50.25%
pert/s: 223 >6147.92us: 43 min: 0.15 max:16107.46 avg:2282.17 sum/s:508925us overhead:50.49%
pert/s: 218 >6252.64us: 43 min: 0.16 max:12066.13 avg:2324.11 sum/s:506656us overhead:50.27%
Average service latency is an order of magnitude better with autogroup.
(Imagine that pert were Xorg or whatnot instead)
Using Mathieu Desnoyers' wakeup-latency testcase:
With taskset -c 3 make -j 10 running..
taskset -c 3 ./wakeup-latency& sleep 30;killall wakeup-latency
without:
maximum latency: 42963.2 µs
average latency: 9077.0 µs
missed timer events: 0
with:
maximum latency: 4160.7 µs
average latency: 149.4 µs
missed timer events: 0
Signed-off-by: Mike Galbraith <efa...@gmx.de>
---
drivers/char/tty_io.c | 2 +
include/linux/sched.h | 14 ++++++++
include/linux/tty.h | 3 +
init/Kconfig | 13 ++++++++
kernel/sched.c | 9 ++++-
kernel/sched_autogroup.c | 76 +++++++++++++++++++++++++++++++++++++++++++++++
kernel/sched_autogroup.h | 7 ++++
kernel/sysctl.c | 11 ++++++
8 files changed, 134 insertions(+), 1 deletion(-)
Index: linux-2.6.36.git/include/linux/sched.h
===================================================================
--- linux-2.6.36.git.orig/include/linux/sched.h
+++ linux-2.6.36.git/include/linux/sched.h
@@ -1900,6 +1900,20 @@ int sched_rt_handler(struct ctl_table *t
extern unsigned int sysctl_sched_compat_yield;
+#ifdef CONFIG_SCHED_AUTOGROUP
+int sched_autogroup_handler(struct ctl_table *table, int write,
+ void __user *buffer, size_t *lenp,
+ loff_t *ppos);
+
+extern unsigned int sysctl_sched_autogroup_enabled;
+
+void sched_autogroup_create_tty(struct tty_struct *tty);
+void sched_autogroup_destroy_tty(struct tty_struct *tty);
+#else
+static inline void sched_autogroup_create_tty(struct tty_struct *tty) { }
+static inline void sched_autogroup_destroy_tty(struct tty_struct *tty) { }
+#endif
+
#ifdef CONFIG_RT_MUTEXES
extern int rt_mutex_getprio(struct task_struct *p);
extern void rt_mutex_setprio(struct task_struct *p, int prio);
Index: linux-2.6.36.git/include/linux/tty.h
===================================================================
--- linux-2.6.36.git.orig/include/linux/tty.h
+++ linux-2.6.36.git/include/linux/tty.h
@@ -327,6 +327,9 @@ struct tty_struct {
/* If the tty has a pending do_SAK, queue it here - akpm */
struct work_struct SAK_work;
struct tty_port *port;
+#ifdef CONFIG_SCHED_AUTOGROUP
+ struct task_group *tg;
+#endif
};
/* Each of a tty's open files has private_data pointing to tty_file_private */
Index: linux-2.6.36.git/kernel/sched.c
===================================================================
--- linux-2.6.36.git.orig/kernel/sched.c
+++ linux-2.6.36.git/kernel/sched.c
@@ -78,6 +78,7 @@
#include "sched_cpupri.h"
#include "workqueue_sched.h"
+#include "sched_autogroup.h"
#define CREATE_TRACE_POINTS
#include <trace/events/sched.h>
@@ -612,11 +613,16 @@ static inline int cpu_of(struct rq *rq)
*/
static inline struct task_group *task_group(struct task_struct *p)
{
+ struct task_group *tg;
struct cgroup_subsys_state *css;
css = task_subsys_state_check(p, cpu_cgroup_subsys_id,
lockdep_is_held(&task_rq(p)->lock));
- return container_of(css, struct task_group, css);
+ tg = container_of(css, struct task_group, css);
+
+ autogroup_check_attach(p, &tg);
+
+ return tg;
}
/* Change a task's cfs_rq and parent entity if it moves across CPUs/groups */
@@ -1920,6 +1926,7 @@ static void deactivate_task(struct rq *r
#include "sched_idletask.c"
#include "sched_fair.c"
#include "sched_rt.c"
+#include "sched_autogroup.c"
#ifdef CONFIG_SCHED_DEBUG
# include "sched_debug.c"
#endif
Index: linux-2.6.36.git/drivers/char/tty_io.c
===================================================================
--- linux-2.6.36.git.orig/drivers/char/tty_io.c
+++ linux-2.6.36.git/drivers/char/tty_io.c
@@ -185,6 +185,7 @@ void free_tty_struct(struct tty_struct *
{
kfree(tty->write_buf);
tty_buffer_free_all(tty);
+ sched_autogroup_destroy_tty(tty);
kfree(tty);
}
@@ -2823,6 +2824,7 @@ void initialize_tty_struct(struct tty_st
tty->ops = driver->ops;
tty->index = idx;
tty_line_name(driver, idx, tty->name);
+ sched_autogroup_create_tty(tty);
}
/**
Index: linux-2.6.36.git/kernel/sched_autogroup.h
===================================================================
--- /dev/null
+++ linux-2.6.36.git/kernel/sched_autogroup.h
@@ -0,0 +1,7 @@
+#ifdef CONFIG_SCHED_AUTOGROUP
+static inline void
+autogroup_check_attach(struct task_struct *p, struct task_group **tg);
+#else
+static inline void
+autogroup_check_attach(struct task_struct *p, struct task_group **tg) { }
+#endif
Index: linux-2.6.36.git/kernel/sysctl.c
===================================================================
--- linux-2.6.36.git.orig/kernel/sysctl.c
+++ linux-2.6.36.git/kernel/sysctl.c
@@ -384,6 +384,17 @@ static struct ctl_table kern_table[] = {
.mode = 0644,
.proc_handler = proc_dointvec,
},
+#ifdef CONFIG_SCHED_AUTOGROUP
+ {
+ .procname = "sched_autogroup_enabled",
+ .data = &sysctl_sched_autogroup_enabled,
+ .maxlen = sizeof(unsigned int),
+ .mode = 0644,
+ .proc_handler = sched_autogroup_handler,
+ .extra1 = &zero,
+ .extra2 = &one,
+ },
+#endif
#ifdef CONFIG_PROVE_LOCKING
{
.procname = "prove_locking",
Index: linux-2.6.36.git/init/Kconfig
===================================================================
--- linux-2.6.36.git.orig/init/Kconfig
+++ linux-2.6.36.git/init/Kconfig
@@ -652,6 +652,19 @@ config DEBUG_BLK_CGROUP
endif # CGROUPS
+config SCHED_AUTOGROUP
+ bool "Automatic process group scheduling"
+ select CGROUPS
+ select CGROUP_SCHED
+ select FAIR_GROUP_SCHED
+ select BLK_CGROUP
+ help
+ This option optimizes the scheduler for common desktop workloads by
+ automatically creating and populating task groups. This separation
+ of workloads isolates aggressive CPU burners (like build jobs) from
+ desktop applications. Task group autogeneration is currently based
+ upon task tty association.
+
config MM_OWNER
bool
Index: linux-2.6.36.git/kernel/sched_autogroup.c
===================================================================
--- /dev/null
+++ linux-2.6.36.git/kernel/sched_autogroup.c
@@ -0,0 +1,76 @@
+#ifdef CONFIG_SCHED_AUTOGROUP
+#include <linux/tty.h>
+
+unsigned int __read_mostly sysctl_sched_autogroup_enabled = 1;
+
+void sched_autogroup_create_tty(struct tty_struct *tty)
+{
+ tty->tg = sched_create_group(&init_task_group);
+ if (IS_ERR(tty->tg)) {
+ tty->tg = &init_task_group;
+ WARN_ON(1);
+ }
+}
+EXPORT_SYMBOL(sched_autogroup_create_tty);
+
+void sched_autogroup_destroy_tty(struct tty_struct *tty)
+{
+ if (tty->tg && tty->tg != &init_task_group)
+ sched_destroy_group(tty->tg);
+}
+EXPORT_SYMBOL(sched_autogroup_destroy_tty);
+
+static void
+autogroup_attach_tty(struct task_struct *p, struct task_group **tg)
+{
+ struct tty_struct *tty = p->signal->tty;
+
+ if (!tty)
+ return;
+
+ *tg = p->signal->tty->tg;
+}
+
+static inline void
+autogroup_check_attach(struct task_struct *p, struct task_group **tg)
+{
+ if (!sysctl_sched_autogroup_enabled || *tg != &root_task_group ||
+ p->sched_class != &fair_sched_class)
+ return;
+
+ rcu_read_lock();
+
+ autogroup_attach_tty(p, tg);
+
+ rcu_read_unlock();
+}
+
+int sched_autogroup_handler(struct ctl_table *table, int write,
+ void __user *buffer, size_t *lenp, loff_t *ppos)
+{
+ struct task_struct *p, *t;
+ struct task_group *tg;
+ int ret = proc_dointvec_minmax(table, write, buffer, lenp, ppos);
+
+ if (ret || !write)
+ return ret;
+
+ for_each_process(p) {
+ tg = task_group(p);
+ sched_move_task(p);
+ list_for_each_entry_rcu(t, &p->thread_group, thread_group) {
+ sched_move_task(t);
+ }
+ }
+
+ return 0;
+}
+
+static int __init setup_autogroup(char *str)
+{
+ sysctl_sched_autogroup_enabled = 0;
+
+ return 1;
+}
+__setup("noautogroup", setup_autogroup);
+#endif
> On Wed, 2010-10-20 at 04:56 +0200, Ingo Molnar wrote:
>
> > Mind doing more of the tty->desktop renames/generalizations as Linus suggested, and
> > resend the patch?
>
> Here she comes. Better/Worse?
>
> Changes:
> - tty->autogroup.
> - only autogroup fair class tasks.
> - removed dainbramaged sleeper vruntime twiddling.
> - removed paranoid locking.
> - removed noop detatch code.
I really like the new 'autogroup scheduler' name - as we really dont want to turn
this into anything but an intelligent grouping thing. Via the naming we can resist
heuristics for example.
Btw., how does Xorg fare with this? Can we remove sleeper fairness for example and
simplify other bits of the CFS logic as a side-effect?
Works fine interactively, but I still get the same kernel BUG on reboot
time as before. Would a photo of the trace help you?
--
Markus
> Btw., how does Xorg fare with this? Can we remove sleeper fairness for example and
> simplify other bits of the CFS logic as a side-effect?
Works a treat for me. As I write this in evolution, I have amarok
playing with a visualization and a make -j100 running. Song switch is
instant, visualization is nice and smooth despite unaccelerated Xorg.
We can't whack fair sleepers though, not without inventing a new
preemption model to take it's place.
-Mike
Odd. Yeah, please send me the photo and your .config.
-Mike
Can this issue be solved by userspace daemon (think of libpam-systemd,
libpam-cgroup)? Perhaps tty grouping in kernel space is more
efficient.
I'm missing something?
Marco
OK, I'll queue it with a Tested-by from you. Thanks!
Hi Mike,
This per-tty task grouping approach looks very promising. I'll give it a spin
when I find the time. Meanwhile, a little question about locking here: how is
the read lock supposed to protect from p->signal (and p->signal->tty)
modifications ? What's the locking scheme here ? So maybe just simple
rcu_dereference are missing, or maybe the tsk->sighand->siglock might be
required. In all cases, I feel something is missing there.
Thanks!
Mathieu
--
Mathieu Desnoyers
Operating System Efficiency R&D Consultant
EfficiOS Inc.
http://www.efficios.com
> Meanwhile, a little question about locking here: how is
> the read lock supposed to protect from p->signal (and p->signal->tty)
> modifications ? What's the locking scheme here ? So maybe just simple
> rcu_dereference are missing, or maybe the tsk->sighand->siglock might be
> required. In all cases, I feel something is missing there.
Oleg, could you comment?
My assumption is that no additional locking is needed. The tty is
refcounted, dropped in release_task()->__exit_signal(), at which point
the task is unhashed, is history. The tty can't go away until the last
task referencing it goes away.
-Mike