Google Groups no longer supports new Usenet posts or subscriptions. Historical content remains viewable.
Dismiss

[RFC/RFT PATCH] sched: automated per tty task groups

85 views
Skip to first unread message

Mike Galbraith

unread,
Oct 19, 2010, 5:20:02 AM10/19/10
to
Greetings,

Comments, suggestions etc highly welcome.

This patch implements an idea from Linus, to automatically create task groups
per tty, to improve desktop interactivity under hefty load such as kbuild. The
feature is enabled from boot by default, The default setting can be changed via
the boot option ttysched=0, and can be can be turned on or off on the fly via
echo [01] > /proc/sys/kernel/sched_tty_sched_enabled.

A 100% hog overhead measurement proggy pinned to the same CPU as a make -j10

pert/s: 229 >5484.43us: 41 min: 0.15 max:12069.42 avg:2193.81 sum/s:502382us overhead:50.24%
pert/s: 222 >5652.28us: 43 min: 0.46 max:12077.31 avg:2248.56 sum/s:499181us overhead:49.92%
pert/s: 211 >5809.38us: 43 min: 0.16 max:12064.78 avg:2381.70 sum/s:502538us overhead:50.25%
pert/s: 223 >6147.92us: 43 min: 0.15 max:16107.46 avg:2282.17 sum/s:508925us overhead:50.49%
pert/s: 218 >6252.64us: 43 min: 0.16 max:12066.13 avg:2324.11 sum/s:506656us overhead:50.27%

Signed-off-by: Mike Galbraith <efa...@gmx.de>

---
drivers/char/tty_io.c | 2
include/linux/sched.h | 14 +++++
include/linux/tty.h | 3 +
init/Kconfig | 13 +++++
kernel/sched.c | 9 +++
kernel/sched_tty.c | 128 ++++++++++++++++++++++++++++++++++++++++++++++++++
kernel/sched_tty.h | 7 ++
kernel/sysctl.c | 11 ++++
8 files changed, 186 insertions(+), 1 deletion(-)

Index: linux-2.6.36.git/include/linux/sched.h
===================================================================
--- linux-2.6.36.git.orig/include/linux/sched.h
+++ linux-2.6.36.git/include/linux/sched.h
@@ -1900,6 +1900,20 @@ int sched_rt_handler(struct ctl_table *t

extern unsigned int sysctl_sched_compat_yield;

+#ifdef CONFIG_SCHED_DESKTOP
+int sched_tty_sched_handler(struct ctl_table *table, int write,
+ void __user *buffer, size_t *lenp,
+ loff_t *ppos);
+
+extern unsigned int sysctl_sched_tty_sched_enabled;
+
+void tty_sched_create_group(struct tty_struct *tty);
+void tty_sched_destroy_group(struct tty_struct *tty);
+#else
+static inline void tty_sched_create_group(struct tty_struct *tty) { }
+static inline void tty_sched_destroy_group(struct tty_struct *tty) { }
+#endif
+
#ifdef CONFIG_RT_MUTEXES
extern int rt_mutex_getprio(struct task_struct *p);
extern void rt_mutex_setprio(struct task_struct *p, int prio);
Index: linux-2.6.36.git/include/linux/tty.h
===================================================================
--- linux-2.6.36.git.orig/include/linux/tty.h
+++ linux-2.6.36.git/include/linux/tty.h
@@ -327,6 +327,9 @@ struct tty_struct {
/* If the tty has a pending do_SAK, queue it here - akpm */
struct work_struct SAK_work;
struct tty_port *port;
+#ifdef CONFIG_SCHED_DESKTOP
+ struct task_group *tg;
+#endif
};

/* Each of a tty's open files has private_data pointing to tty_file_private */
Index: linux-2.6.36.git/kernel/sched.c
===================================================================
--- linux-2.6.36.git.orig/kernel/sched.c
+++ linux-2.6.36.git/kernel/sched.c
@@ -78,6 +78,7 @@

#include "sched_cpupri.h"
#include "workqueue_sched.h"
+#include "sched_tty.h"

#define CREATE_TRACE_POINTS
#include <trace/events/sched.h>
@@ -612,11 +613,16 @@ static inline int cpu_of(struct rq *rq)
*/
static inline struct task_group *task_group(struct task_struct *p)
{
+ struct task_group *tg;
struct cgroup_subsys_state *css;

css = task_subsys_state_check(p, cpu_cgroup_subsys_id,
lockdep_is_held(&task_rq(p)->lock));
- return container_of(css, struct task_group, css);
+ tg = container_of(css, struct task_group, css);
+
+ tty_sched_check_attach(p, &tg);
+
+ return tg;
}

/* Change a task's cfs_rq and parent entity if it moves across CPUs/groups */
@@ -1920,6 +1926,7 @@ static void deactivate_task(struct rq *r
#include "sched_idletask.c"
#include "sched_fair.c"
#include "sched_rt.c"
+#include "sched_tty.c"
#ifdef CONFIG_SCHED_DEBUG
# include "sched_debug.c"
#endif
Index: linux-2.6.36.git/drivers/char/tty_io.c
===================================================================
--- linux-2.6.36.git.orig/drivers/char/tty_io.c
+++ linux-2.6.36.git/drivers/char/tty_io.c
@@ -185,6 +185,7 @@ void free_tty_struct(struct tty_struct *
{
kfree(tty->write_buf);
tty_buffer_free_all(tty);
+ tty_sched_destroy_group(tty);
kfree(tty);
}

@@ -2823,6 +2824,7 @@ void initialize_tty_struct(struct tty_st
tty->ops = driver->ops;
tty->index = idx;
tty_line_name(driver, idx, tty->name);
+ tty_sched_create_group(tty);
}

/**
Index: linux-2.6.36.git/kernel/sched_tty.h
===================================================================
--- /dev/null
+++ linux-2.6.36.git/kernel/sched_tty.h
@@ -0,0 +1,7 @@
+#ifdef CONFIG_SCHED_DESKTOP
+static inline void
+tty_sched_check_attach(struct task_struct *p, struct task_group **tg);
+#else
+static inline void
+tty_sched_check_attach(struct task_struct *p, struct task_group **tg) { }
+#endif
Index: linux-2.6.36.git/kernel/sched_tty.c
===================================================================
--- /dev/null
+++ linux-2.6.36.git/kernel/sched_tty.c
@@ -0,0 +1,128 @@
+#ifdef CONFIG_SCHED_DESKTOP
+#include <linux/tty.h>
+
+unsigned int __read_mostly sysctl_sched_tty_sched_enabled = 1;
+
+void tty_sched_create_group(struct tty_struct *tty)
+{
+ tty->tg = sched_create_group(&init_task_group);
+ if (IS_ERR(tty->tg)) {
+ tty->tg = &init_task_group;
+ WARN_ON(1);
+ }
+}
+EXPORT_SYMBOL(tty_sched_create_group);
+
+void tty_sched_destroy_group(struct tty_struct *tty)
+{
+ if (tty->tg && tty->tg != &init_task_group)
+ sched_destroy_group(tty->tg);
+}
+EXPORT_SYMBOL(tty_sched_destroy_group);
+
+static inline void
+tty_sched_check_attach(struct task_struct *p, struct task_group **tg)
+{
+ struct tty_struct *tty;
+ int attach = 0, enabled = sysctl_sched_tty_sched_enabled;
+
+ rcu_read_lock();
+ tty = p->signal->tty;
+ if (!tty)
+ goto out_unlock;
+
+ if (enabled && *tg == &root_task_group) {
+ *tg = p->signal->tty->tg;
+ attach = 1;
+ } else if (!enabled && *tg == tty->tg) {
+ *tg = &root_task_group;
+ attach = 1;
+ }
+
+ if (attach && !p->se.on_rq) {
+ p->se.vruntime -= cfs_rq_of(&p->se)->min_vruntime;
+ p->se.vruntime += (*tg)->cfs_rq[task_cpu(p)]->min_vruntime;
+ }
+
+out_unlock:
+ rcu_read_unlock();
+}
+
+void tty_sched_move_task(struct task_struct *p, struct task_group *tg)
+{
+ struct sched_entity *se = &p->se;
+ struct rq *rq;
+ unsigned long flags;
+ int on_rq, running, cpu;
+
+ rq = task_rq_lock(p, &flags);
+
+ running = task_current(rq, p);
+ on_rq = se->on_rq;
+ cpu = rq->cpu;
+
+ if (on_rq)
+ dequeue_task(rq, p, 0);
+ if (unlikely(running))
+ p->sched_class->put_prev_task(rq, p);
+
+ if (!on_rq)
+ se->vruntime -= cfs_rq_of(se)->min_vruntime;
+
+ se->cfs_rq = tg->cfs_rq[cpu];
+ se->parent = tg->se[cpu];
+
+ p->rt.rt_rq = tg->rt_rq[cpu];
+ p->rt.parent = tg->rt_se[cpu];
+
+ if (!on_rq)
+ se->vruntime += cfs_rq_of(se)->min_vruntime;
+
+ if (unlikely(running))
+ p->sched_class->set_curr_task(rq);
+ if (on_rq)
+ enqueue_task(rq, p, 0);
+
+ task_rq_unlock(rq, &flags);
+}
+
+int sched_tty_sched_handler(struct ctl_table *table, int write,
+ void __user *buffer, size_t *lenp,
+ loff_t *ppos)
+{
+ struct task_struct *p, *t;
+ struct task_group *tg;
+ unsigned long flags;
+ int ret = proc_dointvec_minmax(table, write, buffer, lenp, ppos);
+
+ if (ret || !write)
+ return ret;
+
+ read_lock_irqsave(&tasklist_lock, flags);
+
+ rcu_read_lock();
+ for_each_process(p) {
+ tg = task_group(p);
+ tty_sched_move_task(p, tg);
+ list_for_each_entry_rcu(t, &p->thread_group, thread_group) {
+ tty_sched_move_task(t, tg);
+ }
+ }
+ rcu_read_unlock();
+
+ read_unlock_irqrestore(&tasklist_lock, flags);
+
+ return 0;
+}
+
+static int __init setup_tty_sched(char *str)
+{
+ unsigned long val;
+
+ val = simple_strtoul(str, NULL, 0);
+ sysctl_sched_tty_sched_enabled = val ? 1 : 0;
+
+ return 1;
+}
+__setup("ttysched=", setup_tty_sched);
+#endif
Index: linux-2.6.36.git/kernel/sysctl.c
===================================================================
--- linux-2.6.36.git.orig/kernel/sysctl.c
+++ linux-2.6.36.git/kernel/sysctl.c
@@ -384,6 +384,17 @@ static struct ctl_table kern_table[] = {
.mode = 0644,
.proc_handler = proc_dointvec,
},
+#ifdef CONFIG_SCHED_DESKTOP
+ {
+ .procname = "sched_tty_sched_enabled",
+ .data = &sysctl_sched_tty_sched_enabled,
+ .maxlen = sizeof(unsigned int),
+ .mode = 0644,
+ .proc_handler = sched_tty_sched_handler,
+ .extra1 = &zero,
+ .extra2 = &one,
+ },
+#endif
#ifdef CONFIG_PROVE_LOCKING
{
.procname = "prove_locking",
Index: linux-2.6.36.git/init/Kconfig
===================================================================
--- linux-2.6.36.git.orig/init/Kconfig
+++ linux-2.6.36.git/init/Kconfig
@@ -652,6 +652,19 @@ config DEBUG_BLK_CGROUP

endif # CGROUPS

+config SCHED_DESKTOP
+ bool "Desktop centric group scheduling"
+ depends on EXPERIMENTAL
+ select CGROUPS
+ select CGROUP_SCHED
+ select FAIR_GROUP_SCHED
+ select RT_GROUP_SCHED
+ select BLK_CGROUP
+ help
+ This option optimizes the group scheduler for common desktop workloads,
+ by creating separate per tty groups. This separation of workloads isolates
+ aggressive CPU burners (like build jobs) from desktop applications.
+
config MM_OWNER
bool


--
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to majo...@vger.kernel.org
More majordomo info at http://vger.kernel.org/majordomo-info.html
Please read the FAQ at http://www.tux.org/lkml/

Peter Zijlstra

unread,
Oct 19, 2010, 5:30:01 AM10/19/10
to
On Tue, 2010-10-19 at 11:16 +0200, Mike Galbraith wrote:
> Greetings,
>
> Comments, suggestions etc highly welcome.

You might be wanting to exclude RT tasks from the tty groups since there
is no interface to grant them any runtime and such :-)

Also, I think tty_sched_move_task and sched_move_task() should be
sharing lots more code -- I recently proposed a fix for
sched_move_task() because the Android people complained, but they
haven't replied back yet..

Peter Zijlstra

unread,
Oct 19, 2010, 5:40:03 AM10/19/10
to
On Tue, 2010-10-19 at 11:16 +0200, Mike Galbraith wrote:
> + read_lock_irqsave(&tasklist_lock, flags);
> +
> + rcu_read_lock();
> + for_each_process(p) {
> + tg = task_group(p);
> + tty_sched_move_task(p, tg);
> + list_for_each_entry_rcu(t, &p->thread_group, thread_group) {
> + tty_sched_move_task(t, tg);
> + }
> + }
> + rcu_read_unlock();
> +
> + read_unlock_irqrestore(&tasklist_lock, flags);

I don't think you need to disable IRQs for tasklist lock, nor do I think
you actually need it.

If you enable tty groups and then scan all the existing tasks you've
covered them all, new tasks will already be placed right, dying tasks we
don't care about anyway.

Mike Galbraith

unread,
Oct 19, 2010, 5:50:01 AM10/19/10
to
On Tue, 2010-10-19 at 11:29 +0200, Peter Zijlstra wrote:
> On Tue, 2010-10-19 at 11:16 +0200, Mike Galbraith wrote:
> > + read_lock_irqsave(&tasklist_lock, flags);
> > +
> > + rcu_read_lock();
> > + for_each_process(p) {
> > + tg = task_group(p);
> > + tty_sched_move_task(p, tg);
> > + list_for_each_entry_rcu(t, &p->thread_group, thread_group) {
> > + tty_sched_move_task(t, tg);
> > + }
> > + }
> > + rcu_read_unlock();
> > +
> > + read_unlock_irqrestore(&tasklist_lock, flags);
>
> I don't think you need to disable IRQs for tasklist lock, nor do I think
> you actually need it.

OK, thanks. (No such thing as too paranoid;)

-Mike

Mike Galbraith

unread,
Oct 19, 2010, 5:50:02 AM10/19/10
to
On Tue, 2010-10-19 at 11:43 +0200, Peter Zijlstra wrote:
> On Tue, 2010-10-19 at 11:39 +0200, Mike Galbraith wrote:
> >
> > Yeah. I should be able to just do sched_move_task(), but it doesn't
> > currently work even with your patch, turning tty_sched on/off can lead
> > to incredible delays before you get box back. With virgin source, it's
> > size infinity for all intents and purposes.
>
> Ah, first feedback I've got on that patch,. but surely since you created
> one that does work we can fix my patch and use the normal path? :-)

Yeah. I wanted to get the RFC out the door, so took a shortcut.

-Mike

Peter Zijlstra

unread,
Oct 19, 2010, 5:50:01 AM10/19/10
to
On Tue, 2010-10-19 at 11:39 +0200, Mike Galbraith wrote:
>
> Yeah. I should be able to just do sched_move_task(), but it doesn't
> currently work even with your patch, turning tty_sched on/off can lead
> to incredible delays before you get box back. With virgin source, it's
> size infinity for all intents and purposes.

Ah, first feedback I've got on that patch,. but surely since you created
one that does work we can fix my patch and use the normal path? :-)

Mike Galbraith

unread,
Oct 19, 2010, 5:50:02 AM10/19/10
to
On Tue, 2010-10-19 at 11:26 +0200, Peter Zijlstra wrote:
> On Tue, 2010-10-19 at 11:16 +0200, Mike Galbraith wrote:
> > Greetings,
> >
> > Comments, suggestions etc highly welcome.
>
> You might be wanting to exclude RT tasks from the tty groups since there
> is no interface to grant them any runtime and such :-)

:)

> Also, I think tty_sched_move_task and sched_move_task() should be
> sharing lots more code -- I recently proposed a fix for
> sched_move_task() because the Android people complained, but they
> haven't replied back yet..

Yeah. I should be able to just do sched_move_task(), but it doesn't


currently work even with your patch, turning tty_sched on/off can lead
to incredible delays before you get box back. With virgin source, it's
size infinity for all intents and purposes.

-Mike

Mike Galbraith

unread,
Oct 19, 2010, 7:30:02 AM10/19/10
to
It was suggested that I show a bit more info.

On Tue, 2010-10-19 at 11:16 +0200, Mike Galbraith wrote:

> A 100% hog overhead measurement proggy pinned to the same CPU as a make -j10
>
> pert/s: 229 >5484.43us: 41 min: 0.15 max:12069.42 avg:2193.81 sum/s:502382us overhead:50.24%
> pert/s: 222 >5652.28us: 43 min: 0.46 max:12077.31 avg:2248.56 sum/s:499181us overhead:49.92%
> pert/s: 211 >5809.38us: 43 min: 0.16 max:12064.78 avg:2381.70 sum/s:502538us overhead:50.25%
> pert/s: 223 >6147.92us: 43 min: 0.15 max:16107.46 avg:2282.17 sum/s:508925us overhead:50.49%
> pert/s: 218 >6252.64us: 43 min: 0.16 max:12066.13 avg:2324.11 sum/s:506656us overhead:50.27%

The same load without per tty task groups.

pert/s: 31 >40475.37us: 3 min: 0.37 max:48103.60 avg:29573.74 sum/s:916786us overhead:90.24%
pert/s: 23 >41237.70us: 12 min: 0.36 max:56010.39 avg:40187.01 sum/s:924301us overhead:91.99%
pert/s: 24 >42150.22us: 12 min: 8.86 max:61265.91 avg:39459.91 sum/s:947038us overhead:92.20%
pert/s: 26 >42344.91us: 11 min: 3.83 max:52029.60 avg:36164.70 sum/s:940282us overhead:91.12%
pert/s: 24 >44262.90us: 14 min: 5.05 max:82735.15 avg:40314.33 sum/s:967544us overhead:92.22%
^^^^^usecs ^^^^^usecs ^^the competition got

Average service latency is an order of magnitude better with tty_sched.
(Imagine that pert is Xorg or whatnot instead)

Using Mathieu Desnoyers' wakeup-latency testcase (attached):

With taskset -c 3 make -j 10 running..

taskset -c 3 ./wakeup-latency& sleep 30;killall wakeup-latency

without:
maximum latency: 42963.2 µs
average latency: 9077.0 µs
missed timer events: 0

with:
maximum latency: 4160.7 µs
average latency: 149.4 µs
missed timer events: 0

Patch makes a big difference in desktop feel under hefty load here.

-Mike

wakeup-latency.c

Ingo Molnar

unread,
Oct 19, 2010, 8:00:02 AM10/19/10
to

* Mike Galbraith <efa...@gmx.de> wrote:

> Using Mathieu Desnoyers' wakeup-latency testcase (attached):
>
> With taskset -c 3 make -j 10 running..
>
> taskset -c 3 ./wakeup-latency& sleep 30;killall wakeup-latency
>
> without:

> maximum latency: 42963.2 �s
> average latency: 9077.0 �s


> missed timer events: 0
>
> with:

> maximum latency: 4160.7 �s
> average latency: 149.4 �s


> missed timer events: 0
>
> Patch makes a big difference in desktop feel under hefty load here.

That's really nice!

Could this feature realistically do block IO isolation as well? It's always annoying
when some big IO job is making the desktop jerky. Especially as your patch is
selecting the block cgroup feature already:

+ select BLK_CGROUP

Thanks,

Ingo

Mike Galbraith

unread,
Oct 19, 2010, 9:20:02 AM10/19/10
to
On Tue, 2010-10-19 at 13:56 +0200, Ingo Molnar wrote:

> Could this feature realistically do block IO isolation as well? It's always annoying
> when some big IO job is making the desktop jerky. Especially as your patch is
> selecting the block cgroup feature already:
>
> + select BLK_CGROUP

I know my cgroup pgid config helps a bunch with rummaging in my email
while other IO is going on. I've been attributing that to BLK_CGROUP,
but have no proof.

-Mike

Linus Torvalds

unread,
Oct 19, 2010, 11:30:01 AM10/19/10
to
On Tue, Oct 19, 2010 at 4:29 AM, Mike Galbraith <efa...@gmx.de> wrote:
> It was suggested that I show a bit more info.

Yes, I was going to complain that the numbers in the commit message
made no sense without something to compare the numbers to.

> The same load without per tty task groups.

Very impressive. This definitely looks like something people will notice.

That said, I do think we should think carefully about calling this a
"tty" feature. I think we might want to leave the door open to other
heuristics than _just_ the tty group. I think the tty group approach
is wonderful for traditional Unix loads in a desktop environment, but
I suspect we might hit issues with IDE's etc too. I don't know if we
can notice things like that automatically, but I think it's worth
thinking about.

So I think the patch looks pretty good, and the numbers seem to look
just stunningly so, but I'd like to name the feature more along the
lines of "automatic process group scheduling" rather than about tty's
per se.

And you actually did that for the Kconfig option, which makes me quite happy.

The one other thing I do wonder about is how noticeable the group
scheduling overhead is. If people compare with a non-CGROUP_SCHED
kernel, will a desktop-optimized kernel suddenly have horrible pipe
latency due to much higher scheduling cost? Right now that whole
feature is hidden by EXPERIMENTAL, I don't know how much it hurts, and
I never timed it when I tried it out long ago..

Linus

Mike Galbraith

unread,
Oct 19, 2010, 2:20:02 PM10/19/10
to
On Tue, 2010-10-19 at 08:28 -0700, Linus Torvalds wrote:
> On Tue, Oct 19, 2010 at 4:29 AM, Mike Galbraith <efa...@gmx.de> wrote:

> So I think the patch looks pretty good, and the numbers seem to look
> just stunningly so, but I'd like to name the feature more along the
> lines of "automatic process group scheduling" rather than about tty's
> per se.

Oh, absolutely, that's what it's all about really. What I'd _like_ is
to get per process group scheduling working on the cheap..ish. Your
idea of tty cgoups looked much simpler though, so I figured that would
be a great place to start. It turned out to be much simpler than I
thought it would be, which is encouraging, and it works well in testing
(so far that is).

> And you actually did that for the Kconfig option, which makes me quite happy.

(Ingo's input.. spot on)

> The one other thing I do wonder about is how noticeable the group
> scheduling overhead is.

Very noticeable, cgroups is far from free. It would make no sense for a
performance freak to even think about it. I don't run cgroup enabled
kernels usually, and generally strip to the bone because I favor
throughput very very heavily, but when I look at the desktop under load,
the cost/performance trade-off ~seems to work out.

> If people compare with a non-CGROUP_SCHED
> kernel, will a desktop-optimized kernel suddenly have horrible pipe
> latency due to much higher scheduling cost? Right now that whole
> feature is hidden by EXPERIMENTAL, I don't know how much it hurts, and
> I never timed it when I tried it out long ago..

The scheduling cost is quite high. But realistically, the cost of a
distro kernel with full featured network stack is (much) higher. I
seriously doubt the cost of cgroups would be noticed by the typical
_desktop_ user. Overall latencies for any switchy microbenchmark will
certainly be considerably higher with the feature enabled.

-Mike

Mike Galbraith

unread,
Oct 19, 2010, 3:00:02 PM10/19/10
to
> On Tue, 2010-10-19 at 08:28 -0700, Linus Torvalds wrote:
> >
> > If people compare with a non-CGROUP_SCHED
> > kernel, will a desktop-optimized kernel suddenly have horrible pipe
> > latency due to much higher scheduling cost? Right now that whole
> > feature is hidden by EXPERIMENTAL, I don't know how much it hurts, and
> > I never timed it when I tried it out long ago..

Q/D test of kernels w/wo, with same .config using pipe-test (pure sched)
gives on my box ~590khz with tty_sched active, 620khz without cgroups
acitve in same kernel/config without patch. last time I measured
stripped down config (not long ago, but not yesterday either) gave max
ctx rate ~690khz on this box.

(note: very Q, very D numbers, no variance testing, ballpark)

Ingo Molnar

unread,
Oct 19, 2010, 11:00:02 PM10/19/10
to

* Mike Galbraith <efa...@gmx.de> wrote:

> > On Tue, 2010-10-19 at 08:28 -0700, Linus Torvalds wrote:
> > >
> > > If people compare with a non-CGROUP_SCHED
> > > kernel, will a desktop-optimized kernel suddenly have horrible pipe
> > > latency due to much higher scheduling cost? Right now that whole
> > > feature is hidden by EXPERIMENTAL, I don't know how much it hurts, and
> > > I never timed it when I tried it out long ago..
>
> Q/D test of kernels w/wo, with same .config using pipe-test (pure sched) gives on
> my box ~590khz with tty_sched active, 620khz without cgroups acitve in same
> kernel/config without patch. last time I measured stripped down config (not long
> ago, but not yesterday either) gave max ctx rate ~690khz on this box.
>
> (note: very Q, very D numbers, no variance testing, ballpark)

That's 5% overhead in context switches. Definitely not in the 'horrible' category.

This would be a rather tempting item for 2.6.37 ... especially as it really mainly
reuses existing group scheduling functionality, in a clever way.

Mind doing more of the tty->desktop renames/generalizations as Linus suggested, and
resend the patch?

I'd also suggest to move it out of EXPERIMENTAL - we dont really do that for core
kernel features as most distros enable CONFIG_EXPERIMENTAL so it's a rather
meaningless distinction. Since the feature is default-n, people will get the old
scheduler by default but can also choose this desktop-centric scheduling mode.

I'd even argue to make it default-y, because this patch clearly cures a form of
kbuild cancer.

Thanks,

Ingo

Markus Trippelsdorf

unread,
Oct 20, 2010, 10:10:02 AM10/20/10
to
Mike Galbraith wrote:

> Comments, suggestions etc highly welcome.

I've tested your patch and it runs smoothly on my machine.
However I had several NULL pointer dereference BUGs that happened when I
left X or rebooted my system. I think this is caused by your patch.
There is nothing in the logs unfortunately, but I scribbled down the
following by hand (not the whole trace, I'm too lazy):

BUG: unable to handle NULL pointer dereference at 0..038
IP: pick_next_task_fair 0xa7/0x1a0
...
Call Trace: schedule
...

Mike Galbraith

unread,
Oct 20, 2010, 10:50:01 AM10/20/10
to
On Wed, 2010-10-20 at 15:55 +0200, Markus Trippelsdorf wrote:
> Mike Galbraith wrote:
>
> > Comments, suggestions etc highly welcome.
>
> I've tested your patch and it runs smoothly on my machine.
> However I had several NULL pointer dereference BUGs that happened when I
> left X or rebooted my system. I think this is caused by your patch.
> There is nothing in the logs unfortunately, but I scribbled down the
> following by hand (not the whole trace, I'm too lazy):
>
> BUG: unable to handle NULL pointer dereference at 0..038
> IP: pick_next_task_fair 0xa7/0x1a0
> ...
> Call Trace: schedule
> ...

Hm. Not much I can do without the trace, but thanks for testing and
reporting anyway, guess I need to do some heavy stress testing. I'm
re-writing it as I write this anyway.

thanks,

-Mike

Mike Galbraith

unread,
Oct 21, 2010, 4:00:02 AM10/21/10
to
On Tue, 2010-10-19 at 11:43 +0200, Peter Zijlstra wrote:
> On Tue, 2010-10-19 at 11:39 +0200, Mike Galbraith wrote:
> >
> > Yeah. I should be able to just do sched_move_task(), but it doesn't
> > currently work even with your patch, turning tty_sched on/off can lead
> > to incredible delays before you get box back. With virgin source, it's
> > size infinity for all intents and purposes.
>
> Ah, first feedback I've got on that patch,. but surely since you created
> one that does work we can fix my patch and use the normal path? :-)

Actually, your patch works just peachy, my fiddling with sleepers
vruntime at attach time was a dainbramaged thing to do.

-Mike

Mike Galbraith

unread,
Oct 21, 2010, 4:20:02 AM10/21/10
to
On Wed, 2010-10-20 at 04:56 +0200, Ingo Molnar wrote:

> Mind doing more of the tty->desktop renames/generalizations as Linus suggested, and
> resend the patch?

Here she comes. Better/Worse?

Changes:
- tty->autogroup.
- only autogroup fair class tasks.
- removed dainbramaged sleeper vruntime twiddling.
- removed paranoid locking.
- removed noop detatch code.

> I'd also suggest to move it out of EXPERIMENTAL - we dont really do that for core
> kernel features as most distros enable CONFIG_EXPERIMENTAL so it's a rather
> meaningless distinction. Since the feature is default-n, people will get the old
> scheduler by default but can also choose this desktop-centric scheduling mode.
>
> I'd even argue to make it default-y, because this patch clearly cures a form of
> kbuild cancer.

You top dogs can make the default call.. it it's accepted that is ;-)

Marcus: care to try the below? Works fine for me (but so did first
cut). It depends on the attached patch, and applied to virgin shiny new
2.6.36.

A recurring complaint from CFS users is that parallel kbuild has a negative
impact on desktop interactivity. This patch implements an idea from Linus,
to automatically create task groups. This patch only implements Linus' per
tty task group suggestion, and only for fair class tasks, but leaves the way
open for enhancement.

How it works: at tty alloc/dealloc time, a task group is created/destroyed,
so there is always a task group active per tty. When we select a runqueue,
if the task has a has a tty association, and no task group, attach it to a
per tty autogroup on the fly.

The feature is enabled from boot by default if CONFIG_SCHED_AUTOGROUP is
selected, but can be disabled via the boot option noautogroup, and can be
also be turned on/off on the fly via..
echo [01] > /proc/sys/kernel/sched_autogroup_enabled.
..which will automatically move tasks to/from the root task group.

Some numbers.

A 100% hog overhead measurement proggy pinned to the same CPU as a make -j10

About measurement proggy:
pert/sec = perturbations/sec
min/max/avg = scheduler service latencies in usecs
sum/s = time accrued by the competition per sample period (1 sec here)
overhead = %CPU received by the competition per sample period

pert/s: 31 >40475.37us: 3 min: 0.37 max:48103.60 avg:29573.74 sum/s:916786us overhead:90.24%
pert/s: 23 >41237.70us: 12 min: 0.36 max:56010.39 avg:40187.01 sum/s:924301us overhead:91.99%
pert/s: 24 >42150.22us: 12 min: 8.86 max:61265.91 avg:39459.91 sum/s:947038us overhead:92.20%
pert/s: 26 >42344.91us: 11 min: 3.83 max:52029.60 avg:36164.70 sum/s:940282us overhead:91.12%
pert/s: 24 >44262.90us: 14 min: 5.05 max:82735.15 avg:40314.33 sum/s:967544us overhead:92.22%

Same load with this patch applied.

pert/s: 229 >5484.43us: 41 min: 0.15 max:12069.42 avg:2193.81 sum/s:502382us overhead:50.24%
pert/s: 222 >5652.28us: 43 min: 0.46 max:12077.31 avg:2248.56 sum/s:499181us overhead:49.92%
pert/s: 211 >5809.38us: 43 min: 0.16 max:12064.78 avg:2381.70 sum/s:502538us overhead:50.25%
pert/s: 223 >6147.92us: 43 min: 0.15 max:16107.46 avg:2282.17 sum/s:508925us overhead:50.49%
pert/s: 218 >6252.64us: 43 min: 0.16 max:12066.13 avg:2324.11 sum/s:506656us overhead:50.27%

Average service latency is an order of magnitude better with autogroup.
(Imagine that pert were Xorg or whatnot instead)

Using Mathieu Desnoyers' wakeup-latency testcase:

With taskset -c 3 make -j 10 running..

taskset -c 3 ./wakeup-latency& sleep 30;killall wakeup-latency

without:
maximum latency: 42963.2 µs
average latency: 9077.0 µs
missed timer events: 0

with:
maximum latency: 4160.7 µs
average latency: 149.4 µs
missed timer events: 0

Signed-off-by: Mike Galbraith <efa...@gmx.de>
---
drivers/char/tty_io.c | 2 +
include/linux/sched.h | 14 ++++++++
include/linux/tty.h | 3 +
init/Kconfig | 13 ++++++++
kernel/sched.c | 9 ++++-
kernel/sched_autogroup.c | 76 +++++++++++++++++++++++++++++++++++++++++++++++
kernel/sched_autogroup.h | 7 ++++
kernel/sysctl.c | 11 ++++++
8 files changed, 134 insertions(+), 1 deletion(-)

Index: linux-2.6.36.git/include/linux/sched.h
===================================================================
--- linux-2.6.36.git.orig/include/linux/sched.h
+++ linux-2.6.36.git/include/linux/sched.h
@@ -1900,6 +1900,20 @@ int sched_rt_handler(struct ctl_table *t

extern unsigned int sysctl_sched_compat_yield;

+#ifdef CONFIG_SCHED_AUTOGROUP
+int sched_autogroup_handler(struct ctl_table *table, int write,


+ void __user *buffer, size_t *lenp,
+ loff_t *ppos);
+

+extern unsigned int sysctl_sched_autogroup_enabled;
+
+void sched_autogroup_create_tty(struct tty_struct *tty);
+void sched_autogroup_destroy_tty(struct tty_struct *tty);
+#else
+static inline void sched_autogroup_create_tty(struct tty_struct *tty) { }
+static inline void sched_autogroup_destroy_tty(struct tty_struct *tty) { }


+#endif
+
#ifdef CONFIG_RT_MUTEXES
extern int rt_mutex_getprio(struct task_struct *p);
extern void rt_mutex_setprio(struct task_struct *p, int prio);
Index: linux-2.6.36.git/include/linux/tty.h
===================================================================
--- linux-2.6.36.git.orig/include/linux/tty.h
+++ linux-2.6.36.git/include/linux/tty.h
@@ -327,6 +327,9 @@ struct tty_struct {
/* If the tty has a pending do_SAK, queue it here - akpm */
struct work_struct SAK_work;
struct tty_port *port;

+#ifdef CONFIG_SCHED_AUTOGROUP


+ struct task_group *tg;
+#endif
};

/* Each of a tty's open files has private_data pointing to tty_file_private */
Index: linux-2.6.36.git/kernel/sched.c
===================================================================
--- linux-2.6.36.git.orig/kernel/sched.c
+++ linux-2.6.36.git/kernel/sched.c
@@ -78,6 +78,7 @@

#include "sched_cpupri.h"
#include "workqueue_sched.h"

+#include "sched_autogroup.h"



#define CREATE_TRACE_POINTS
#include <trace/events/sched.h>
@@ -612,11 +613,16 @@ static inline int cpu_of(struct rq *rq)
*/
static inline struct task_group *task_group(struct task_struct *p)
{
+ struct task_group *tg;
struct cgroup_subsys_state *css;

css = task_subsys_state_check(p, cpu_cgroup_subsys_id,
lockdep_is_held(&task_rq(p)->lock));
- return container_of(css, struct task_group, css);
+ tg = container_of(css, struct task_group, css);
+

+ autogroup_check_attach(p, &tg);


+
+ return tg;
}

/* Change a task's cfs_rq and parent entity if it moves across CPUs/groups */
@@ -1920,6 +1926,7 @@ static void deactivate_task(struct rq *r
#include "sched_idletask.c"
#include "sched_fair.c"
#include "sched_rt.c"

+#include "sched_autogroup.c"


#ifdef CONFIG_SCHED_DEBUG
# include "sched_debug.c"
#endif
Index: linux-2.6.36.git/drivers/char/tty_io.c
===================================================================
--- linux-2.6.36.git.orig/drivers/char/tty_io.c
+++ linux-2.6.36.git/drivers/char/tty_io.c
@@ -185,6 +185,7 @@ void free_tty_struct(struct tty_struct *
{
kfree(tty->write_buf);
tty_buffer_free_all(tty);

+ sched_autogroup_destroy_tty(tty);


kfree(tty);
}

@@ -2823,6 +2824,7 @@ void initialize_tty_struct(struct tty_st
tty->ops = driver->ops;
tty->index = idx;
tty_line_name(driver, idx, tty->name);

+ sched_autogroup_create_tty(tty);
}

/**
Index: linux-2.6.36.git/kernel/sched_autogroup.h
===================================================================
--- /dev/null
+++ linux-2.6.36.git/kernel/sched_autogroup.h
@@ -0,0 +1,7 @@
+#ifdef CONFIG_SCHED_AUTOGROUP
+static inline void
+autogroup_check_attach(struct task_struct *p, struct task_group **tg);
+#else
+static inline void
+autogroup_check_attach(struct task_struct *p, struct task_group **tg) { }
+#endif


Index: linux-2.6.36.git/kernel/sysctl.c
===================================================================
--- linux-2.6.36.git.orig/kernel/sysctl.c
+++ linux-2.6.36.git/kernel/sysctl.c
@@ -384,6 +384,17 @@ static struct ctl_table kern_table[] = {
.mode = 0644,
.proc_handler = proc_dointvec,
},

+#ifdef CONFIG_SCHED_AUTOGROUP
+ {
+ .procname = "sched_autogroup_enabled",
+ .data = &sysctl_sched_autogroup_enabled,


+ .maxlen = sizeof(unsigned int),
+ .mode = 0644,

+ .proc_handler = sched_autogroup_handler,


+ .extra1 = &zero,
+ .extra2 = &one,
+ },
+#endif
#ifdef CONFIG_PROVE_LOCKING
{
.procname = "prove_locking",
Index: linux-2.6.36.git/init/Kconfig
===================================================================
--- linux-2.6.36.git.orig/init/Kconfig
+++ linux-2.6.36.git/init/Kconfig
@@ -652,6 +652,19 @@ config DEBUG_BLK_CGROUP

endif # CGROUPS

+config SCHED_AUTOGROUP
+ bool "Automatic process group scheduling"


+ select CGROUPS
+ select CGROUP_SCHED
+ select FAIR_GROUP_SCHED

+ select BLK_CGROUP
+ help

+ This option optimizes the scheduler for common desktop workloads by
+ automatically creating and populating task groups. This separation
+ of workloads isolates aggressive CPU burners (like build jobs) from
+ desktop applications. Task group autogeneration is currently based
+ upon task tty association.
+
config MM_OWNER
bool

Index: linux-2.6.36.git/kernel/sched_autogroup.c
===================================================================
--- /dev/null
+++ linux-2.6.36.git/kernel/sched_autogroup.c
@@ -0,0 +1,76 @@
+#ifdef CONFIG_SCHED_AUTOGROUP
+#include <linux/tty.h>
+
+unsigned int __read_mostly sysctl_sched_autogroup_enabled = 1;
+
+void sched_autogroup_create_tty(struct tty_struct *tty)


+{
+ tty->tg = sched_create_group(&init_task_group);
+ if (IS_ERR(tty->tg)) {
+ tty->tg = &init_task_group;
+ WARN_ON(1);
+ }
+}

+EXPORT_SYMBOL(sched_autogroup_create_tty);
+
+void sched_autogroup_destroy_tty(struct tty_struct *tty)


+{
+ if (tty->tg && tty->tg != &init_task_group)
+ sched_destroy_group(tty->tg);
+}

+EXPORT_SYMBOL(sched_autogroup_destroy_tty);
+
+static void
+autogroup_attach_tty(struct task_struct *p, struct task_group **tg)
+{
+ struct tty_struct *tty = p->signal->tty;
+
+ if (!tty)
+ return;
+


+ *tg = p->signal->tty->tg;
+}

+
+static inline void
+autogroup_check_attach(struct task_struct *p, struct task_group **tg)
+{
+ if (!sysctl_sched_autogroup_enabled || *tg != &root_task_group ||
+ p->sched_class != &fair_sched_class)
+ return;
+
+ rcu_read_lock();
+
+ autogroup_attach_tty(p, tg);


+
+ rcu_read_unlock();
+}
+

+int sched_autogroup_handler(struct ctl_table *table, int write,
+ void __user *buffer, size_t *lenp, loff_t *ppos)


+{
+ struct task_struct *p, *t;
+ struct task_group *tg;

+ int ret = proc_dointvec_minmax(table, write, buffer, lenp, ppos);
+
+ if (ret || !write)
+ return ret;
+

+ for_each_process(p) {
+ tg = task_group(p);

+ sched_move_task(p);


+ list_for_each_entry_rcu(t, &p->thread_group, thread_group) {

+ sched_move_task(t);
+ }
+ }


+
+ return 0;
+}
+

+static int __init setup_autogroup(char *str)
+{
+ sysctl_sched_autogroup_enabled = 0;


+
+ return 1;
+}

+__setup("noautogroup", setup_autogroup);
+#endif

cgroup_fixup_broken_cgroup_movement.diff

Ingo Molnar

unread,
Oct 21, 2010, 4:40:01 AM10/21/10
to

* Mike Galbraith <efa...@gmx.de> wrote:

> On Wed, 2010-10-20 at 04:56 +0200, Ingo Molnar wrote:
>
> > Mind doing more of the tty->desktop renames/generalizations as Linus suggested, and
> > resend the patch?
>
> Here she comes. Better/Worse?
>
> Changes:
> - tty->autogroup.
> - only autogroup fair class tasks.
> - removed dainbramaged sleeper vruntime twiddling.
> - removed paranoid locking.
> - removed noop detatch code.

I really like the new 'autogroup scheduler' name - as we really dont want to turn
this into anything but an intelligent grouping thing. Via the naming we can resist
heuristics for example.

Btw., how does Xorg fare with this? Can we remove sleeper fairness for example and
simplify other bits of the CFS logic as a side-effect?

Markus Trippelsdorf

unread,
Oct 21, 2010, 4:50:01 AM10/21/10
to
On Thu, Oct 21, 2010 at 10:11:55AM +0200, Mike Galbraith wrote:
> On Wed, 2010-10-20 at 04:56 +0200, Ingo Molnar wrote:
>
> > Mind doing more of the tty->desktop renames/generalizations as Linus suggested, and
> > resend the patch?
>
> Here she comes. Better/Worse?
>
> Changes:
> - tty->autogroup.
> - only autogroup fair class tasks.
> - removed dainbramaged sleeper vruntime twiddling.
> - removed paranoid locking.
> - removed noop detatch code.
>
> > I'd also suggest to move it out of EXPERIMENTAL - we dont really do that for core
> > kernel features as most distros enable CONFIG_EXPERIMENTAL so it's a rather
> > meaningless distinction. Since the feature is default-n, people will get the old
> > scheduler by default but can also choose this desktop-centric scheduling mode.
> >
> > I'd even argue to make it default-y, because this patch clearly cures a form of
> > kbuild cancer.
>
> You top dogs can make the default call.. it it's accepted that is ;-)
>
> Marcus: care to try the below? Works fine for me (but so did first
> cut). It depends on the attached patch, and applied to virgin shiny new
> 2.6.36.

Works fine interactively, but I still get the same kernel BUG on reboot
time as before. Would a photo of the trace help you?

--
Markus

Mike Galbraith

unread,
Oct 21, 2010, 4:50:02 AM10/21/10
to
On Thu, 2010-10-21 at 10:31 +0200, Ingo Molnar wrote:

> Btw., how does Xorg fare with this? Can we remove sleeper fairness for example and
> simplify other bits of the CFS logic as a side-effect?

Works a treat for me. As I write this in evolution, I have amarok
playing with a visualization and a make -j100 running. Song switch is
instant, visualization is nice and smooth despite unaccelerated Xorg.

We can't whack fair sleepers though, not without inventing a new
preemption model to take it's place.

-Mike

Mike Galbraith

unread,
Oct 21, 2010, 5:00:03 AM10/21/10
to
On Thu, 2010-10-21 at 10:48 +0200, Markus Trippelsdorf wrote:
> On Thu, Oct 21, 2010 at 10:11:55AM +0200, Mike Galbraith wrote:
> > On Wed, 2010-10-20 at 04:56 +0200, Ingo Molnar wrote:
> >
> > > Mind doing more of the tty->desktop renames/generalizations as Linus suggested, and
> > > resend the patch?
> >
> > Here she comes. Better/Worse?
> >
> > Changes:
> > - tty->autogroup.
> > - only autogroup fair class tasks.
> > - removed dainbramaged sleeper vruntime twiddling.
> > - removed paranoid locking.
> > - removed noop detatch code.
> >
> > > I'd also suggest to move it out of EXPERIMENTAL - we dont really do that for core
> > > kernel features as most distros enable CONFIG_EXPERIMENTAL so it's a rather
> > > meaningless distinction. Since the feature is default-n, people will get the old
> > > scheduler by default but can also choose this desktop-centric scheduling mode.
> > >
> > > I'd even argue to make it default-y, because this patch clearly cures a form of
> > > kbuild cancer.
> >
> > You top dogs can make the default call.. it it's accepted that is ;-)
> >
> > Marcus: care to try the below? Works fine for me (but so did first
> > cut). It depends on the attached patch, and applied to virgin shiny new
> > 2.6.36.
>
> Works fine interactively, but I still get the same kernel BUG on reboot
> time as before. Would a photo of the trace help you?

Odd. Yeah, please send me the photo and your .config.

-Mike

Marco

unread,
Oct 21, 2010, 5:20:02 AM10/21/10
to
Hello, i'm a little confused about tty grouping.
The benefit in terms of latency is only due to the grouping of task
under the appropriate cgroup hierarchy?

Can this issue be solved by userspace daemon (think of libpam-systemd,
libpam-cgroup)? Perhaps tty grouping in kernel space is more
efficient.

I'm missing something?

Marco

Peter Zijlstra

unread,
Oct 21, 2010, 6:30:01 AM10/21/10
to
On Thu, 2010-10-21 at 09:55 +0200, Mike Galbraith wrote:
> Actually, your patch works just peachy, my fiddling with sleepers
> vruntime at attach time was a dainbramaged thing to do.

OK, I'll queue it with a Tested-by from you. Thanks!

Mathieu Desnoyers

unread,
Oct 21, 2010, 7:00:03 AM10/21/10
to
* Mike Galbraith (efa...@gmx.de) wrote:
[...]

Hi Mike,

This per-tty task grouping approach looks very promising. I'll give it a spin
when I find the time. Meanwhile, a little question about locking here: how is
the read lock supposed to protect from p->signal (and p->signal->tty)
modifications ? What's the locking scheme here ? So maybe just simple
rcu_dereference are missing, or maybe the tsk->sighand->siglock might be
required. In all cases, I feel something is missing there.

Thanks!

Mathieu

--
Mathieu Desnoyers
Operating System Efficiency R&D Consultant
EfficiOS Inc.
http://www.efficios.com

Peter Zijlstra

unread,
Oct 21, 2010, 7:30:03 AM10/21/10
to

> Meanwhile, a little question about locking here: how is


> the read lock supposed to protect from p->signal (and p->signal->tty)
> modifications ? What's the locking scheme here ? So maybe just simple
> rcu_dereference are missing, or maybe the tsk->sighand->siglock might be
> required. In all cases, I feel something is missing there.

Oleg, could you comment?

Mike Galbraith

unread,
Oct 21, 2010, 7:30:03 AM10/21/10
to
On Thu, 2010-10-21 at 06:51 -0400, Mathieu Desnoyers wrote:

My assumption is that no additional locking is needed. The tty is
refcounted, dropped in release_task()->__exit_signal(), at which point
the task is unhashed, is history. The tty can't go away until the last
task referencing it goes away.

-Mike

Mathieu Desnoyers

unread,
Oct 21, 2010, 12:30:01 PM10/21/10
to
* Markus Trippelsdorf (mar...@trippelsdorf.de) wrote:
> OK here you go. Both are attached.

In the backtrace, the scheduler is called from:

do_group_exit()
__dequeue_signal()
do_exit()

given that task_group() is called from many spots in the scheduler, I wonder if
some checks making sure that tg != NULL in task_group() would be appropriate ?
Also checking that p->signal is non-NULL in autogroup_attach_tty() might help.

Thanks,

Mathieu


--
Mathieu Desnoyers
Operating System Efficiency R&D Consultant
EfficiOS Inc.
http://www.efficios.com

Oleg Nesterov

unread,
Oct 21, 2010, 1:40:01 PM10/21/10
to
On 10/21, Peter Zijlstra wrote:
>
> On Thu, 2010-10-21 at 06:51 -0400, Mathieu Desnoyers wrote:
> > * Mike Galbraith (efa...@gmx.de) wrote:
> > [...]
> > > +static void
> > > +autogroup_attach_tty(struct task_struct *p, struct task_group **tg)
> > > +{
> > > + struct tty_struct *tty = p->signal->tty;
> > > +
> > > + if (!tty)
> > > + return;
> > > +
> > > + *tg = p->signal->tty->tg;
> > > +}

minor nit, I think in theory this needs barrier(), or

struct tty_struct *tty = ACCESS_ONCE(p->signal->tty);

if (tty)
*tg = tty->tg;

> > > +static inline void
> > > +autogroup_check_attach(struct task_struct *p, struct task_group **tg)
> > > +{
> > > + if (!sysctl_sched_autogroup_enabled || *tg != &root_task_group ||
> > > + p->sched_class != &fair_sched_class)
> > > + return;
> > > +
> > > + rcu_read_lock();
> > > +
> > > + autogroup_attach_tty(p, tg);
> > > +
> > > + rcu_read_unlock();
> > > +}
> > > +
>
> > Meanwhile, a little question about locking here: how is
> > the read lock supposed to protect from p->signal (and p->signal->tty)
> > modifications ? What's the locking scheme here ? So maybe just simple
> > rcu_dereference are missing, or maybe the tsk->sighand->siglock might be
> > required. In all cases, I feel something is missing there.
>
> Oleg, could you comment?

No, I don't understand this ;) But I know nothig about task groups,
most probably this is OK.

It is not clear to me why do we need rcu_read_lock() and how it can help.
The tty can go away right after dereferencing signal->tty.

Even if the task doesn't exit, it (or its sub-thread) can do sys_setsid()
at any moment and free this tty. If any thread was moved to tty->sg, doesn't
this mean that, say, ->cfs_rq will point to the already freed tg->cfs_rq?

From http://marc.info/?l=linux-kernel&m=128764874422614

+int sched_autogroup_handler(struct ctl_table *table, int write,
+ void __user *buffer, size_t *lenp, loff_t *ppos)
+{
+ struct task_struct *p, *t;
+ struct task_group *tg;
+ int ret = proc_dointvec_minmax(table, write, buffer, lenp, ppos);
+
+ if (ret || !write)
+ return ret;
+
+ for_each_process(p) {

Hmm. This needs rcu lock at least?

+ tg = task_group(p);

Why?

+ sched_move_task(p);
+ list_for_each_entry_rcu(t, &p->thread_group, thread_group) {
+ sched_move_task(t);
+ }
+ }

Looks like, you can just do

do_each_thread(p, t) {
sched_move_task(t);
} while_each_thread(p, t);

With the same effect.

Oleg.

Mike Galbraith

unread,
Oct 21, 2010, 3:20:02 PM10/21/10
to
On Thu, 2010-10-21 at 18:29 +0200, Oleg Nesterov wrote:
> On 10/21, Peter Zijlstra wrote:
> >
> > On Thu, 2010-10-21 at 06:51 -0400, Mathieu Desnoyers wrote:
> > > * Mike Galbraith (efa...@gmx.de) wrote:
> > > [...]
> > > > +static void
> > > > +autogroup_attach_tty(struct task_struct *p, struct task_group **tg)
> > > > +{
> > > > + struct tty_struct *tty = p->signal->tty;
> > > > +
> > > > + if (!tty)
> > > > + return;
> > > > +
> > > > + *tg = p->signal->tty->tg;
> > > > +}
>
> minor nit, I think in theory this needs barrier(), or
>
> struct tty_struct *tty = ACCESS_ONCE(p->signal->tty);
>
> if (tty)
> *tg = tty->tg;

Thanks.

> > > > +static inline void
> > > > +autogroup_check_attach(struct task_struct *p, struct task_group **tg)
> > > > +{
> > > > + if (!sysctl_sched_autogroup_enabled || *tg != &root_task_group ||
> > > > + p->sched_class != &fair_sched_class)
> > > > + return;
> > > > +
> > > > + rcu_read_lock();
> > > > +
> > > > + autogroup_attach_tty(p, tg);
> > > > +
> > > > + rcu_read_unlock();
> > > > +}
> > > > +
> >
> > > Meanwhile, a little question about locking here: how is
> > > the read lock supposed to protect from p->signal (and p->signal->tty)
> > > modifications ? What's the locking scheme here ? So maybe just simple
> > > rcu_dereference are missing, or maybe the tsk->sighand->siglock might be
> > > required. In all cases, I feel something is missing there.
> >
> > Oleg, could you comment?
>
> No, I don't understand this ;) But I know nothig about task groups,
> most probably this is OK.
>
> It is not clear to me why do we need rcu_read_lock() and how it can help.
> The tty can go away right after dereferencing signal->tty.

It was inherited.

> Even if the task doesn't exit, it (or its sub-thread) can do sys_setsid()
> at any moment and free this tty. If any thread was moved to tty->sg, doesn't
> this mean that, say, ->cfs_rq will point to the already freed tg->cfs_rq?

Ah, so isn't as safe as it looked. Thanks!

> >From http://marc.info/?l=linux-kernel&m=128764874422614
>
> +int sched_autogroup_handler(struct ctl_table *table, int write,
> + void __user *buffer, size_t *lenp, loff_t *ppos)
> +{
> + struct task_struct *p, *t;
> + struct task_group *tg;
> + int ret = proc_dointvec_minmax(table, write, buffer, lenp, ppos);
> +
> + if (ret || !write)
> + return ret;
> +
> + for_each_process(p) {
>
> Hmm. This needs rcu lock at least?

(used to be paranoid locking there.. vs required locking)

> + tg = task_group(p);
>
> Why?

A cleanup leftover.

>
> + sched_move_task(p);
> + list_for_each_entry_rcu(t, &p->thread_group, thread_group) {
> + sched_move_task(t);
> + }
> + }
>
> Looks like, you can just do
>
> do_each_thread(p, t) {
> sched_move_task(t);
> } while_each_thread(p, t);
>
> With the same effect.

Yeah.

So in theory, the tty can go away on me. I knew this was too easy.

-Mike

Marco

unread,
Oct 21, 2010, 6:20:01 PM10/21/10
to
I've made some tests to give a try to tty grouping patches.

Kernel 2.6.36+CONFIG_SCHED_AUTOGROUP+Improve_tick_preemption.patch

These are the result of :
taskset -c 0 ./wakeup-latency& sleep 30;killall wakeup-latency (owner marco)
vs
taskset -c 0 make -j 10 bzImage


Vanilla 2.6.36
maximum latency: 52879.7 µs
average latency: 4217.5 µs
missed timer events: 0

Kernel 2.6.36+autogroup enabled
maximum latency: 2840.8 µs
average latency: 15.5 µs
missed timer events: 0

Kernel 2.6.36+libpam-systemd enabled
maximum latency: 26361.6 µs
average latency: 4548.6 µs
missed timer events: 0

Kernel 2.6.36+START-NICE sched feature(v3)
maximum latency: 76182.8 µs
average latency: 4930.1 µs
missed timer events: 0

Quite impressive.....
Now, i can play supertuxkart while make'ing -j 10 and stellarium shows
the highest fps i've seen so far.
Very good job, guys.

Marco

Mike Galbraith

unread,
Oct 26, 2010, 3:10:01 AM10/26/10
to
On Thu, 2010-10-21 at 18:29 +0200, Oleg Nesterov wrote:

> It is not clear to me why do we need rcu_read_lock() and how it can help.
> The tty can go away right after dereferencing signal->tty.

Which was Marcus' crash. Didn't happen here only because I didn't have
CONFIG_PREEMPT set.

Changes since v2:
- drop

Mike Galbraith

unread,
Oct 26, 2010, 3:30:02 AM10/26/10
to
On Tue, 2010-10-26 at 09:07 +0200, Mike Galbraith wrote:
> On Thu, 2010-10-21 at 18:29 +0200, Oleg Nesterov wrote:
>
> > It is not clear to me why do we need rcu_read_lock() and how it can help.
> > The tty can go away right after dereferencing signal->tty.
>
> Which was Marcus' crash. Didn't happen here only because I didn't have
> CONFIG_PREEMPT set.
>
> Changes since v2:
> - drop

Bumped mouse, message escaped.

Doesn't matter though, damn thing just blew up during enable/disable
plus hackbench stress test, despite holding a reference to the tty at
every place tty changes (under sighand lock), and moving the task with
that reference held.

CONFIG_PREEMPT is being a little S.O.B.

-Mike

Linus Torvalds

unread,
Oct 26, 2010, 11:50:02 AM10/26/10
to
On Tue, Oct 26, 2010 at 12:29 AM, Mike Galbraith <mgalb...@suse.de> wrote:
> On Tue, 2010-10-26 at 09:07 +0200, Mike Galbraith wrote:
>> On Thu, 2010-10-21 at 18:29 +0200, Oleg Nesterov wrote:
>>
>> > It is not clear to me why do we need rcu_read_lock() and how it can help.
>> > The tty can go away right after dereferencing signal->tty.
>>
>> Which was Marcus' crash. �Didn't happen here only because I didn't have
>> CONFIG_PREEMPT set.
>>
>> Changes since v2:
>> � - drop
>
> Bumped mouse, message escaped.
>
> Doesn't matter though, damn thing just blew up during enable/disable
> plus hackbench stress test, despite holding a reference to the tty at
> every place tty changes (under sighand lock), and moving the task with
> that reference held.

So I have a suggestion that may not be popular with you, because it
does end up changing the approach of your patch a lot.

And I have to say, I like how your last patch looked. It was
surprisingly small, simple, and clean. So I hate saying "I think it
should perhaps do things a bit differently". That said, I would
suggest:

- don't depend on "tsk->signal->tty" at all.

- INSTEAD, introduce a "tsk->signal->sched_group" pointer that points
to whatever the current auto-task_group is. Remember, long-term, we'd
want to maybe have other heuristics than just the tty groups, so we'd
want this separate from the tty logic _anyway_

- at fork time, just copy the task_group pointer in copy_signal() if
it is non-NULL, and increment the refcount (I don't think struct
task_group is refcounted now, but this would require it).

- at free_signal_struct(), just do a
"put_task_group(sig->task_group);" before freeing it.

- make the scheduler use the "tsk->signal->sched_group" as the
default group if nothing else exists.

Now, all the basic logic is _entirely_ unaware of any tty logic, and
it's generic. And none of it has any races with some odd tty release
logic or anything like that.

Now, after this, the only thing you'd need to do is hook into
__proc_set_tty(), which already holds the sighand lock, and _there_
you would attach the task_group to the process. Notice how it would
never be attached to a tty at all, so tty_release etc would never be
involved in any taskgroup thing - it's not really the tty that owns
the taskgroup, it's simply the act of becoming a tty task group leader
that attaches the task to a new scheduling group.

It also means, for example, that if a process loses its tty (and
doesn't get a new one - think hangup), it still remains in whatever
scheduling group it started out with. The tty really is immaterial.

And the nice thing about this is that it should be trivial to make
other things than tty's trigger this same thing, if we find a pattern
(or create some new interface to let people ask for it) for something
that should create a new group (like perhaps spawning a graphical
application from the window manager rather than from a tty).

Comments?

Linus

Mike Galbraith

unread,
Oct 26, 2010, 10:00:01 PM10/26/10
to
On Tue, 2010-10-26 at 08:47 -0700, Linus Torvalds wrote:
> On Tue, Oct 26, 2010 at 12:29 AM, Mike Galbraith <mgalb...@suse.de> wrote:
> > On Tue, 2010-10-26 at 09:07 +0200, Mike Galbraith wrote:
> >> On Thu, 2010-10-21 at 18:29 +0200, Oleg Nesterov wrote:
> >>
> >> > It is not clear to me why do we need rcu_read_lock() and how it can help.
> >> > The tty can go away right after dereferencing signal->tty.
> >>
> >> Which was Marcus' crash. Didn't happen here only because I didn't have
> >> CONFIG_PREEMPT set.
> >>
> >> Changes since v2:
> >> - drop
> >
> > Bumped mouse, message escaped.
> >
> > Doesn't matter though, damn thing just blew up during enable/disable
> > plus hackbench stress test, despite holding a reference to the tty at
> > every place tty changes (under sighand lock), and moving the task with
> > that reference held.
>
> So I have a suggestion that may not be popular with you, because it
> does end up changing the approach of your patch a lot.

Suggestions highly welcome. The raciness is driving me nuts. I can't
afford additional locking, and barriers ain't working.

Much more tasteful than what I was about to do as a last resort funky
race killer, namely make my on/off switch a machine wide atomic bomb :)

Thanks!

-Mike

Mike Galbraith

unread,
Nov 11, 2010, 10:30:01 AM11/11/10
to
Greetings from sunny Arizona!

On Tue, 2010-10-26 at 08:47 -0700, Linus Torvalds wrote:

I _finally_ got back to this yesterday, and implemented your suggestion,
though with a couple minor variations. Putting the autogroup pointer in
the signal struct didn't look right to me, so I plugged it into the task
struct instead. I also didn't refcount taskgroups, wanted the patchlet
to be as self-contained as possible, so refcounted the autogroup struct
instead. I also left group movement on tty disassociation in place, but
may nuke it.

The below has withstood an all night thrashing in my laptop with a
PREEMPT_RT kernel, and looks kinda presentable to me, so...

A recurring complaint from CFS users is that parallel kbuild has a negative
impact on desktop interactivity. This patch implements an idea from Linus,
to automatically create task groups. This patch only implements Linus' per
tty task group suggestion, and only for fair class tasks, but leaves the way
open for enhancement.

Implementation: each task struct contains an inherited pointer to a refcounted
autogroup struct containing a task group pointer, the default for all tasks
pointing to the init_task_group. When a task calls __proc_set_tty(), the
task's reference to the default group is dropped, a new task group is created,
and the task is moved out of the old group and into the new. Children thereafter
inherit this task group, and increase it's refcount. Calls to __tty_hangup()
and proc_clear_tty() move the caller back to the init_task_group, and possibly
destroy the task group. On exit, reference to the current task group is dropped,
and the task group is potentially destroyed. At runqueue selection time, iff
a task has no cgroup assignment, it's current autogroup is used.

Some numbers.

Documentation/kernel-parameters.txt | 2
drivers/char/tty_io.c | 4
include/linux/sched.h | 20 ++++
init/Kconfig | 12 ++
kernel/exit.c | 1
kernel/sched.c | 28 ++++--
kernel/sched_autogroup.c | 161 ++++++++++++++++++++++++++++++++++++
kernel/sched_autogroup.h | 10 ++
kernel/sysctl.c | 11 ++
9 files changed, 241 insertions(+), 8 deletions(-)

Index: linux-2.6.36.git/include/linux/sched.h
===================================================================
--- linux-2.6.36.git.orig/include/linux/sched.h
+++ linux-2.6.36.git/include/linux/sched.h

@@ -1159,6 +1159,7 @@ struct sched_rt_entity {
};

struct rcu_node;
+struct autogroup;

struct task_struct {
volatile long state; /* -1 unrunnable, 0 runnable, >0 stopped */
@@ -1181,6 +1182,10 @@ struct task_struct {
struct sched_entity se;
struct sched_rt_entity rt;

+#ifdef CONFIG_SCHED_AUTOGROUP
+ struct autogroup *autogroup;
+#endif
+
#ifdef CONFIG_PREEMPT_NOTIFIERS
/* list of struct preempt_notifier: */
struct hlist_head preempt_notifiers;
@@ -1900,6 +1905,21 @@ int sched_rt_handler(struct ctl_table *t



extern unsigned int sysctl_sched_compat_yield;

+#ifdef CONFIG_SCHED_AUTOGROUP

+extern unsigned int sysctl_sched_autogroup_enabled;
+

+int sched_autogroup_handler(struct ctl_table *table, int write,
+ void __user *buffer, size_t *lenp, loff_t *ppos);
+

+extern void sched_autogroup_create_attach(struct task_struct *p);
+extern void sched_autogroup_detatch(struct task_struct *p);
+extern void sched_autogroup_exit(struct task_struct *p);
+#else
+static inline void sched_autogroup_create_attach(struct task_struct *p) { }
+static inline void sched_autogroup_detatch(struct task_struct *p) { }
+static inline void sched_autogroup_exit(struct task_struct *p) { }


+#endif
+
#ifdef CONFIG_RT_MUTEXES
extern int rt_mutex_getprio(struct task_struct *p);
extern void rt_mutex_setprio(struct task_struct *p, int prio);

Index: linux-2.6.36.git/kernel/sched.c
===================================================================
--- linux-2.6.36.git.orig/kernel/sched.c
+++ linux-2.6.36.git/kernel/sched.c
@@ -78,6 +78,7 @@

#include "sched_cpupri.h"
#include "workqueue_sched.h"
+#include "sched_autogroup.h"

#define CREATE_TRACE_POINTS
#include <trace/events/sched.h>
@@ -612,11 +613,16 @@ static inline int cpu_of(struct rq *rq)
*/
static inline struct task_group *task_group(struct task_struct *p)
{
+ struct task_group *tg;
struct cgroup_subsys_state *css;

css = task_subsys_state_check(p, cpu_cgroup_subsys_id,
lockdep_is_held(&task_rq(p)->lock));
- return container_of(css, struct task_group, css);
+ tg = container_of(css, struct task_group, css);
+

+ autogroup_task_group(p, &tg);


+
+ return tg;
}

/* Change a task's cfs_rq and parent entity if it moves across CPUs/groups */
@@ -1920,6 +1926,7 @@ static void deactivate_task(struct rq *r
#include "sched_idletask.c"
#include "sched_fair.c"
#include "sched_rt.c"
+#include "sched_autogroup.c"
#ifdef CONFIG_SCHED_DEBUG
# include "sched_debug.c"
#endif

@@ -2569,6 +2576,7 @@ void sched_fork(struct task_struct *p, i
* Silence PROVE_RCU.
*/
rcu_read_lock();
+ autogroup_fork(p);
set_task_cpu(p, cpu);
rcu_read_unlock();

@@ -7749,7 +7757,7 @@ void __init sched_init(void)
#ifdef CONFIG_CGROUP_SCHED
list_add(&init_task_group.list, &task_groups);
INIT_LIST_HEAD(&init_task_group.children);
-
+ autogroup_init(&init_task);
#endif /* CONFIG_CGROUP_SCHED */

#if defined CONFIG_FAIR_GROUP_SCHED && defined CONFIG_SMP
@@ -8279,15 +8287,11 @@ void sched_destroy_group(struct task_gro
/* change task's runqueue when it moves between groups.
* The caller of this function should have put the task in its new group
* by now. This function just updates tsk->se.cfs_rq and tsk->se.parent to
- * reflect its new group.
+ * reflect its new group. Called with the runqueue lock held.
*/
-void sched_move_task(struct task_struct *tsk)
+void __sched_move_task(struct task_struct *tsk, struct rq *rq)
{
int on_rq, running;
- unsigned long flags;
- struct rq *rq;
-
- rq = task_rq_lock(tsk, &flags);

running = task_current(rq, tsk);
on_rq = tsk->se.on_rq;
@@ -8308,7 +8312,15 @@ void sched_move_task(struct task_struct
tsk->sched_class->set_curr_task(rq);
if (on_rq)
enqueue_task(rq, tsk, 0);
+}
+
+void sched_move_task(struct task_struct *tsk)
+{
+ struct rq *rq;
+ unsigned long flags;

+ rq = task_rq_lock(tsk, &flags);
+ __sched_move_task(tsk, rq);
task_rq_unlock(rq, &flags);
}
#endif /* CONFIG_CGROUP_SCHED */


Index: linux-2.6.36.git/drivers/char/tty_io.c
===================================================================
--- linux-2.6.36.git.orig/drivers/char/tty_io.c
+++ linux-2.6.36.git/drivers/char/tty_io.c

@@ -580,6 +580,7 @@ void __tty_hangup(struct tty_struct *tty
spin_lock_irq(&p->sighand->siglock);
if (p->signal->tty == tty) {
p->signal->tty = NULL;
+ sched_autogroup_detatch(p);
/* We defer the dereferences outside fo
the tasklist lock */
refs++;
@@ -3070,6 +3071,7 @@ void proc_clear_tty(struct task_struct *
spin_lock_irqsave(&p->sighand->siglock, flags);
tty = p->signal->tty;
p->signal->tty = NULL;
+ sched_autogroup_detatch(p);
spin_unlock_irqrestore(&p->sighand->siglock, flags);
tty_kref_put(tty);
}
@@ -3089,12 +3091,14 @@ static void __proc_set_tty(struct task_s
tty->session = get_pid(task_session(tsk));
if (tsk->signal->tty) {
printk(KERN_DEBUG "tty not NULL!!\n");
+ sched_autogroup_detatch(tsk);
tty_kref_put(tsk->signal->tty);
}
}
put_pid(tsk->signal->tty_old_pgrp);
tsk->signal->tty = tty_kref_get(tty);
tsk->signal->tty_old_pgrp = NULL;
+ sched_autogroup_create_attach(tsk);
}

static void proc_set_tty(struct task_struct *tsk, struct tty_struct *tty)
Index: linux-2.6.36.git/kernel/exit.c
===================================================================
--- linux-2.6.36.git.orig/kernel/exit.c
+++ linux-2.6.36.git/kernel/exit.c
@@ -174,6 +174,7 @@ repeat:
write_lock_irq(&tasklist_lock);
tracehook_finish_release_task(p);
__exit_signal(p);
+ sched_autogroup_exit(p);

/*
* If we are the last non-leader member of the thread


Index: linux-2.6.36.git/kernel/sched_autogroup.h
===================================================================
--- /dev/null
+++ linux-2.6.36.git/kernel/sched_autogroup.h

@@ -0,0 +1,10 @@


+#ifdef CONFIG_SCHED_AUTOGROUP
+static inline void

+autogroup_task_group(struct task_struct *p, struct task_group **tg);
+static void __sched_move_task(struct task_struct *tsk, struct rq *rq);
+#else /* !CONFIG_SCHED_AUTOGROUP */
+static inline void autogroup_init(struct task_struct *init_task) { }
+static inline void autogroup_fork(struct task_struct *p) { }
+static inline void
+autogroup_task_group(struct task_struct *p, struct task_group **tg) { }
+#endif /* CONFIG_SCHED_AUTOGROUP */


Index: linux-2.6.36.git/kernel/sched_autogroup.c
===================================================================
--- /dev/null
+++ linux-2.6.36.git/kernel/sched_autogroup.c

@@ -0,0 +1,161 @@
+#ifdef CONFIG_SCHED_AUTOGROUP


+
+unsigned int __read_mostly sysctl_sched_autogroup_enabled = 1;
+

+struct autogroup {
+ struct kref kref;
+ struct task_group *tg;
+};
+
+static struct autogroup autogroup_default;
+
+static void autogroup_init(struct task_struct *init_task)
+{
+ autogroup_default.tg = &init_task_group;
+ kref_init(&autogroup_default.kref);
+ init_task->autogroup = &autogroup_default;
+}
+
+static inline void autogroup_destroy(struct kref *kref)
+{
+ struct autogroup *ag = container_of(kref, struct autogroup, kref);
+
+ sched_destroy_group(ag->tg);
+ kfree(ag);
+}
+
+static inline void autogroup_kref_put(struct autogroup *ag)
+{
+ kref_put(&ag->kref, autogroup_destroy);
+}
+
+static inline struct autogroup *autogroup_kref_get(struct autogroup *ag)
+{
+ kref_get(&ag->kref);
+ return ag;
+}
+
+static inline struct autogroup *autogroup_create(void)
+{
+ struct autogroup *ag = kmalloc(sizeof(*ag), GFP_KERNEL);
+
+ if (!ag)
+ goto out_fail;
+
+ ag->tg = sched_create_group(&init_task_group);
+ kref_init(&ag->kref);
+
+ if (!(IS_ERR(ag->tg)))
+ return ag;
+
+out_fail:
+ if (ag) {
+ kfree(ag);
+ WARN_ON(1);
+ } else
+ WARN_ON(1);
+
+ return autogroup_kref_get(&autogroup_default);
+}
+
+static void autogroup_fork(struct task_struct *p)
+{
+ p->autogroup = autogroup_kref_get(current->autogroup);


+}
+
+static inline void

+autogroup_task_group(struct task_struct *p, struct task_group **tg)
+{
+ int enabled = sysctl_sched_autogroup_enabled;
+
+ enabled &= (*tg == &root_task_group);
+ enabled &= (p->sched_class == &fair_sched_class);
+ enabled &= (!(p->flags & PF_EXITING));
+
+ if (enabled)
+ *tg = p->autogroup->tg;
+}
+
+static void
+autogroup_move_task(struct task_struct *p, struct autogroup *ag)
+{
+ struct autogroup *prev;
+ struct rq *rq;
+ unsigned long flags;
+
+ rq = task_rq_lock(p, &flags);
+ prev = p->autogroup;
+ if (prev == ag) {
+ task_rq_unlock(rq, &flags);
+ return;
+ }
+
+ p->autogroup = autogroup_kref_get(ag);
+ __sched_move_task(p, rq);
+ task_rq_unlock(rq, &flags);
+
+ autogroup_kref_put(prev);
+}
+
+void sched_autogroup_create_attach(struct task_struct *p)
+{
+ autogroup_move_task(p, autogroup_create());
+
+ /*
+ * Correct freshly allocated group's refcount.
+ * Move takes a reference on destination, but
+ * create already initialized refcount to 1.
+ */
+ if (p->autogroup != &autogroup_default)
+ autogroup_kref_put(p->autogroup);
+}
+EXPORT_SYMBOL(sched_autogroup_create_attach);
+
+void sched_autogroup_detatch(struct task_struct *p)
+{
+ autogroup_move_task(p, &autogroup_default);
+}
+EXPORT_SYMBOL(sched_autogroup_detatch);
+
+void sched_autogroup_exit(struct task_struct *p)
+{
+ autogroup_kref_put(p->autogroup);
+}
+


+int sched_autogroup_handler(struct ctl_table *table, int write,
+ void __user *buffer, size_t *lenp, loff_t *ppos)
+{
+ struct task_struct *p, *t;

+ int ret = proc_dointvec_minmax(table, write, buffer, lenp, ppos);
+
+ if (ret || !write)
+ return ret;
+

+ /*
+ * Exclude cgroup, task group and task create/destroy
+ * during global classification.
+ */
+ cgroup_lock();
+ spin_lock(&task_group_lock);
+ read_lock(&tasklist_lock);
+
+ do_each_thread(p, t) {
+ sched_move_task(t);
+ } while_each_thread(p, t);
+
+ read_unlock(&tasklist_lock);
+ spin_unlock(&task_group_lock);
+ cgroup_unlock();


+
+ return 0;
+}
+
+static int __init setup_autogroup(char *str)
+{
+ sysctl_sched_autogroup_enabled = 0;
+
+ return 1;
+}
+
+__setup("noautogroup", setup_autogroup);
+#endif

@@ -652,6 +652,18 @@ config DEBUG_BLK_CGROUP



endif # CGROUPS

+config SCHED_AUTOGROUP
+ bool "Automatic process group scheduling"
+ select CGROUPS
+ select CGROUP_SCHED
+ select FAIR_GROUP_SCHED

+ help
+ This option optimizes the scheduler for common desktop workloads by
+ automatically creating and populating task groups. This separation
+ of workloads isolates aggressive CPU burners (like build jobs) from
+ desktop applications. Task group autogeneration is currently based
+ upon task tty association.
+
config MM_OWNER
bool

Index: linux-2.6.36.git/Documentation/kernel-parameters.txt
===================================================================
--- linux-2.6.36.git.orig/Documentation/kernel-parameters.txt
+++ linux-2.6.36.git/Documentation/kernel-parameters.txt
@@ -1610,6 +1610,8 @@ and is between 256 and 4096 characters.
noapic [SMP,APIC] Tells the kernel to not make use of any
IOAPICs that may be present in the system.

+ noautogroup Disable scheduler automatic task group creation.
+
nobats [PPC] Do not use BATs for mapping kernel lowmem
on "Classic" PPC cores.

Ingo Molnar

unread,
Nov 11, 2010, 1:10:02 PM11/11/10
to

* Mike Galbraith <efa...@gmx.de> wrote:

> I _finally_ got back to this yesterday, and implemented your suggestion, though
> with a couple minor variations. Putting the autogroup pointer in the signal
> struct didn't look right to me, so I plugged it into the task struct instead. I
> also didn't refcount taskgroups, wanted the patchlet to be as self-contained as
> possible, so refcounted the autogroup struct instead. I also left group movement
> on tty disassociation in place, but may nuke it.
>
> The below has withstood an all night thrashing in my laptop with a PREEMPT_RT
> kernel, and looks kinda presentable to me, so...

The patch and the diffstat gives me warm fuzzy feelings:

> ---
> Documentation/kernel-parameters.txt | 2
> drivers/char/tty_io.c | 4
> include/linux/sched.h | 20 ++++
> init/Kconfig | 12 ++
> kernel/exit.c | 1
> kernel/sched.c | 28 ++++--
> kernel/sched_autogroup.c | 161 ++++++++++++++++++++++++++++++++++++
> kernel/sched_autogroup.h | 10 ++
> kernel/sysctl.c