Google Groups no longer supports new Usenet posts or subscriptions. Historical content remains viewable.
Dismiss

[PATCH 2/2] x86/fpu: split old & new fpu handling into separate functions

9 views
Skip to first unread message

ri...@redhat.com

unread,
Oct 14, 2016, 8:20:05 AM10/14/16
to
From: Rik van Riel <ri...@redhat.com>

By moving all of the new fpu state handling into switch_fpu_finish,
the code can be simplified some more. This does get rid of the
prefetch, but given the size of the fpu register state on modern
CPUs, and the amount of work done by __switch_to in-between both
functions, the value of a single cache line prefetch seems somewhat
dubious anyway.

Signed-off-by: Rik van Riel <ri...@redhat.com>
---
arch/x86/include/asm/fpu/internal.h | 48 ++++++++++++-------------------------
arch/x86/kernel/process_32.c | 5 ++--
arch/x86/kernel/process_64.c | 5 ++--
3 files changed, 19 insertions(+), 39 deletions(-)

diff --git a/arch/x86/include/asm/fpu/internal.h b/arch/x86/include/asm/fpu/internal.h
index a75324675311..621ba3bfa2a7 100644
--- a/arch/x86/include/asm/fpu/internal.h
+++ b/arch/x86/include/asm/fpu/internal.h
@@ -552,27 +552,15 @@ static inline int fpregs_active(void)
*
* This is a two-stage process:
*
- * - switch_fpu_prepare() saves the old state and
- * sets the new state of the CR0.TS bit. This is
- * done within the context of the old process.
+ * - switch_fpu_prepare() saves the old state.
+ * This is done within the context of the old process.
*
* - switch_fpu_finish() restores the new state as
* necessary.
*/
-typedef struct { int preload; } fpu_switch_t;
-
-static inline fpu_switch_t
-switch_fpu_prepare(struct fpu *old_fpu, struct fpu *new_fpu, int cpu)
+static inline void
+switch_fpu_prepare(struct fpu *old_fpu, int cpu)
{
- fpu_switch_t fpu;
-
- /*
- * If the task has used the math, pre-load the FPU on xsave processors
- * or if the past 5 consecutive context-switches used math.
- */
- fpu.preload = static_cpu_has(X86_FEATURE_FPU) &&
- new_fpu->fpstate_active;
-
if (old_fpu->fpregs_active) {
if (!copy_fpregs_to_fpstate(old_fpu))
old_fpu->last_cpu = -1;
@@ -584,16 +572,6 @@ switch_fpu_prepare(struct fpu *old_fpu, struct fpu *new_fpu, int cpu)
trace_x86_fpu_regs_deactivated(old_fpu);
} else
old_fpu->last_cpu = -1;
-
- if (fpu.preload) {
- if (fpregs_state_valid(new_fpu, cpu))
- fpu.preload = 0;
- else
- prefetch(&new_fpu->state);
- fpregs_activate(new_fpu);
- }
-
- return fpu;
}

/*
@@ -601,15 +579,19 @@ switch_fpu_prepare(struct fpu *old_fpu, struct fpu *new_fpu, int cpu)
*/

/*
- * By the time this gets called, we've already cleared CR0.TS and
- * given the process the FPU if we are going to preload the FPU
- * state - all we need to do is to conditionally restore the register
- * state itself.
+ * Set up the userspace FPU context for the new task, if the task
+ * has used the FPU.
*/
-static inline void switch_fpu_finish(struct fpu *new_fpu, fpu_switch_t fpu_switch)
+static inline void switch_fpu_finish(struct fpu *new_fpu, int cpu)
{
- if (fpu_switch.preload)
- copy_kernel_to_fpregs(&new_fpu->state);
+ bool preload = static_cpu_has(X86_FEATURE_FPU) &&
+ new_fpu->fpstate_active;
+
+ if (preload) {
+ if (!fpregs_state_valid(new_fpu, cpu))
+ copy_kernel_to_fpregs(&new_fpu->state);
+ fpregs_activate(new_fpu);
+ }
}

/*
diff --git a/arch/x86/kernel/process_32.c b/arch/x86/kernel/process_32.c
index bd7be8efdc4c..7dc8c9c3d801 100644
--- a/arch/x86/kernel/process_32.c
+++ b/arch/x86/kernel/process_32.c
@@ -232,11 +232,10 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
struct fpu *next_fpu = &next->fpu;
int cpu = smp_processor_id();
struct tss_struct *tss = &per_cpu(cpu_tss, cpu);
- fpu_switch_t fpu_switch;

/* never put a printk in __switch_to... printk() calls wake_up*() indirectly */

- fpu_switch = switch_fpu_prepare(prev_fpu, next_fpu, cpu);
+ switch_fpu_prepare(prev_fpu, cpu);

/*
* Save away %gs. No need to save %fs, as it was saved on the
@@ -295,7 +294,7 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
if (prev->gs | next->gs)
lazy_load_gs(next->gs);

- switch_fpu_finish(next_fpu, fpu_switch);
+ switch_fpu_finish(next_fpu, cpu);

this_cpu_write(current_task, next_p);

diff --git a/arch/x86/kernel/process_64.c b/arch/x86/kernel/process_64.c
index ee944bd2310d..705669efb762 100644
--- a/arch/x86/kernel/process_64.c
+++ b/arch/x86/kernel/process_64.c
@@ -264,9 +264,8 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
int cpu = smp_processor_id();
struct tss_struct *tss = &per_cpu(cpu_tss, cpu);
unsigned prev_fsindex, prev_gsindex;
- fpu_switch_t fpu_switch;

- fpu_switch = switch_fpu_prepare(prev_fpu, next_fpu, cpu);
+ switch_fpu_prepare(prev_fpu, cpu);

/* We must save %fs and %gs before load_TLS() because
* %fs and %gs may be cleared by load_TLS().
@@ -416,7 +415,7 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
prev->gsbase = 0;
prev->gsindex = prev_gsindex;

- switch_fpu_finish(next_fpu, fpu_switch);
+ switch_fpu_finish(next_fpu, cpu);

/*
* Switch the PDA and FPU contexts.
--
2.7.4

ri...@redhat.com

unread,
Oct 14, 2016, 8:20:05 AM10/14/16
to
Two more small cleanups to the x86 FPU code, before I re-do the
functional FPU context switching changes I have been working on.

Neither of these should result in any noticeable differences,
besides for slightly fewer lines of code and updated comments :)

Dave Hansen

unread,
Oct 14, 2016, 1:20:06 PM10/14/16
to
On 10/14/2016 05:15 AM, ri...@redhat.com wrote:
> From: Rik van Riel <ri...@redhat.com>
>
> By moving all of the new fpu state handling into switch_fpu_finish,
> the code can be simplified some more. This does get rid of the
> prefetch, but given the size of the fpu register state on modern
> CPUs, and the amount of work done by __switch_to in-between both
> functions, the value of a single cache line prefetch seems somewhat
> dubious anyway.
...
> -
> - if (fpu.preload) {
> - if (fpregs_state_valid(new_fpu, cpu))
> - fpu.preload = 0;
> - else
> - prefetch(&new_fpu->state);
> - fpregs_activate(new_fpu);
> - }
> -
> - return fpu;
> }

Yeah, that prefetch is highly dubious. XRSTOR might not even be
_reading_ that cacheline if the state isn't present (xstate->xfeatures
bit is 0). If we had to pick *a* cacheline to prefetch for XRSTOR, it
would be the XSAVE header, *not* the FPU state.

I actually did some attempts to optimize the PKRU handling by touching
and prefetching the state before calling XRSTOR. It actually made
things overall _worse_ when I touched it before the XRSTOR.

It would be ideal to have some data on whether this actually _does_
anything, but I can't imagine it being a real delta in either direction.

Acked-by: Dave Hansen <dave....@intel.com>

tip-bot for Rik van Riel

unread,
Oct 16, 2016, 7:40:05 AM10/16/16
to
Commit-ID: c474e50711aa79b7bd0ea30b44744baca5650375
Gitweb: http://git.kernel.org/tip/c474e50711aa79b7bd0ea30b44744baca5650375
Author: Rik van Riel <ri...@redhat.com>
AuthorDate: Fri, 14 Oct 2016 08:15:31 -0400
Committer: Ingo Molnar <mi...@kernel.org>
CommitDate: Sun, 16 Oct 2016 11:38:41 +0200

x86/fpu: Split old_fpu & new_fpu handling into separate functions

By moving all of the new_fpu state handling into switch_fpu_finish(),
the code can be simplified some more.

This gets rid of the prefetch, but given the size of the FPU register
state on modern CPUs, and the amount of work done by __switch_to()
inbetween both functions, the value of a single cache line prefetch
seems somewhat dubious anyway.

Signed-off-by: Rik van Riel <ri...@redhat.com>
Acked-by: Dave Hansen <dave....@intel.com>
Cc: Andy Lutomirski <lu...@kernel.org>
Cc: Borislav Petkov <b...@alien8.de>
Cc: Brian Gerst <brg...@gmail.com>
Cc: Dave Hansen <dave....@linux.intel.com>
Cc: Denys Vlasenko <dvla...@redhat.com>
Cc: Fenghua Yu <fengh...@intel.com>
Cc: H. Peter Anvin <h...@zytor.com>
Cc: Josh Poimboeuf <jpoi...@redhat.com>
Cc: Linus Torvalds <torv...@linux-foundation.org>
Cc: Oleg Nesterov <ol...@redhat.com>
Cc: Peter Zijlstra <pet...@infradead.org>
Cc: Quentin Casasnovas <quentin.c...@oracle.com>
Cc: Thomas Gleixner <tg...@linutronix.de>
Link: http://lkml.kernel.org/r/1476447331-21566-3-...@redhat.com
Signed-off-by: Ingo Molnar <mi...@kernel.org>
---
arch/x86/include/asm/fpu/internal.h | 48 ++++++++++++-------------------------
arch/x86/kernel/process_32.c | 5 ++--
arch/x86/kernel/process_64.c | 5 ++--
3 files changed, 19 insertions(+), 39 deletions(-)

diff --git a/arch/x86/include/asm/fpu/internal.h b/arch/x86/include/asm/fpu/internal.h
index 590f274..d4a6849 100644
index bd7be8e..7dc8c9c 100644
--- a/arch/x86/kernel/process_32.c
+++ b/arch/x86/kernel/process_32.c
@@ -232,11 +232,10 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
struct fpu *next_fpu = &next->fpu;
int cpu = smp_processor_id();
struct tss_struct *tss = &per_cpu(cpu_tss, cpu);
- fpu_switch_t fpu_switch;

/* never put a printk in __switch_to... printk() calls wake_up*() indirectly */

- fpu_switch = switch_fpu_prepare(prev_fpu, next_fpu, cpu);
+ switch_fpu_prepare(prev_fpu, cpu);

/*
* Save away %gs. No need to save %fs, as it was saved on the
@@ -295,7 +294,7 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
if (prev->gs | next->gs)
lazy_load_gs(next->gs);

- switch_fpu_finish(next_fpu, fpu_switch);
+ switch_fpu_finish(next_fpu, cpu);

this_cpu_write(current_task, next_p);

diff --git a/arch/x86/kernel/process_64.c b/arch/x86/kernel/process_64.c
index ee944bd..705669e 100644

Izuchukwu Francis

unread,
Jan 24, 2017, 7:20:09 AM1/24/17
to
bless may i know u

On 10/16/16, tip-bot for Rik van Riel <tip...@zytor.com> wrote:
> You can do whatever you want to me izundunaoyi
>
> Tell me where you want my cum
>
> and don't let it be a disappointing location
>
>
> [image]
> [image]
>
> To OPT_OUT,Please Click Here....
>
>
>

Izuchukwu Francis

unread,
Feb 10, 2017, 9:00:06 AM2/10/17
to
sori am ntt around since wia re u nw
0 new messages