Google Groups no longer supports new Usenet posts or subscriptions. Historical content remains viewable.
Dismiss

[PATCH] ARM: perf: save/restore pmu registers in pm notifier

48 views
Skip to first unread message

Neil Zhang

unread,
Apr 11, 2014, 7:10:01 AM4/11/14
to
From: Sudeep KarkadaNagesha <sudeep.kar...@arm.com>

This adds core support for saving and restoring CPU PMU registers
for suspend/resume support i.e. deeper C-states in cpuidle terms.
This patch adds support only to ARMv7 PMU registers save/restore.
It needs to be extended to xscale and ARMv6 if needed.

[Neil] We found that DS-5 not work on our CA7 based SoCs.
After debuging, found PMU registers were lost because of core power down.
Then i found Sudeep had a patch to fix it about two years ago but not in
the mainline, just port it.

Signed-off-by: Sudeep KarkadaNagesha <sudeep.kar...@arm.com>
Signed-off-by: Neil Zhang <zha...@marvell.com>
---
arch/arm/include/asm/pmu.h | 11 +++++++++
arch/arm/kernel/perf_event_cpu.c | 30 +++++++++++++++++++++++-
arch/arm/kernel/perf_event_v7.c | 47 ++++++++++++++++++++++++++++++++++++++
3 files changed, 87 insertions(+), 1 deletion(-)

diff --git a/arch/arm/include/asm/pmu.h b/arch/arm/include/asm/pmu.h
index ae1919b..f37f048 100644
--- a/arch/arm/include/asm/pmu.h
+++ b/arch/arm/include/asm/pmu.h
@@ -62,6 +62,15 @@ struct pmu_hw_events {
raw_spinlock_t pmu_lock;
};

+struct cpupmu_regs {
+ u32 pmc;
+ u32 pmcntenset;
+ u32 pmuseren;
+ u32 pmintenset;
+ u32 pmxevttype[8];
+ u32 pmxevtcnt[8];
+};
+
struct arm_pmu {
struct pmu pmu;
cpumask_t active_irqs;
@@ -83,6 +92,8 @@ struct arm_pmu {
int (*request_irq)(struct arm_pmu *, irq_handler_t handler);
void (*free_irq)(struct arm_pmu *);
int (*map_event)(struct perf_event *event);
+ void (*save_regs)(struct arm_pmu *, struct cpupmu_regs *);
+ void (*restore_regs)(struct arm_pmu *, struct cpupmu_regs *);
int num_events;
atomic_t active_events;
struct mutex reserve_mutex;
diff --git a/arch/arm/kernel/perf_event_cpu.c b/arch/arm/kernel/perf_event_cpu.c
index 51798d7..934f49b 100644
--- a/arch/arm/kernel/perf_event_cpu.c
+++ b/arch/arm/kernel/perf_event_cpu.c
@@ -19,6 +19,7 @@
#define pr_fmt(fmt) "CPU PMU: " fmt

#include <linux/bitmap.h>
+#include <linux/cpu_pm.h>
#include <linux/export.h>
#include <linux/kernel.h>
#include <linux/of.h>
@@ -39,6 +40,7 @@ static DEFINE_PER_CPU(struct arm_pmu *, percpu_pmu);
static DEFINE_PER_CPU(struct perf_event * [ARMPMU_MAX_HWEVENTS], hw_events);
static DEFINE_PER_CPU(unsigned long [BITS_TO_LONGS(ARMPMU_MAX_HWEVENTS)], used_mask);
static DEFINE_PER_CPU(struct pmu_hw_events, cpu_hw_events);
+static DEFINE_PER_CPU(struct cpupmu_regs, cpu_pmu_regs);

/*
* Despite the names, these two functions are CPU-specific and are used
@@ -217,6 +219,24 @@ static struct notifier_block cpu_pmu_hotplug_notifier = {
.notifier_call = cpu_pmu_notify,
};

+static int cpu_pmu_pm_notify(struct notifier_block *b,
+ unsigned long action, void *hcpu)
+{
+ int cpu = smp_processor_id();
+ struct cpupmu_regs *pmuregs = &per_cpu(cpu_pmu_regs, cpu);
+
+ if (action == CPU_PM_ENTER && cpu_pmu->save_regs)
+ cpu_pmu->save_regs(cpu_pmu, pmuregs);
+ else if (action == CPU_PM_EXIT && cpu_pmu->restore_regs)
+ cpu_pmu->restore_regs(cpu_pmu, pmuregs);
+
+ return NOTIFY_OK;
+}
+
+static struct notifier_block cpu_pmu_pm_notifier = {
+ .notifier_call = cpu_pmu_pm_notify,
+};
+
/*
* PMU platform driver and devicetree bindings.
*/
@@ -349,9 +369,17 @@ static int __init register_pmu_driver(void)
if (err)
return err;

+ err = cpu_pm_register_notifier(&cpu_pmu_pm_notifier);
+ if (err) {
+ unregister_cpu_notifier(&cpu_pmu_hotplug_notifier);
+ return err;
+ }
+
err = platform_driver_register(&cpu_pmu_driver);
- if (err)
+ if (err) {
+ cpu_pm_unregister_notifier(&cpu_pmu_pm_notifier);
unregister_cpu_notifier(&cpu_pmu_hotplug_notifier);
+ }

return err;
}
diff --git a/arch/arm/kernel/perf_event_v7.c b/arch/arm/kernel/perf_event_v7.c
index f4ef398..29ae8f1 100644
--- a/arch/arm/kernel/perf_event_v7.c
+++ b/arch/arm/kernel/perf_event_v7.c
@@ -1237,6 +1237,51 @@ static void armv7_pmnc_dump_regs(struct arm_pmu *cpu_pmu)
}
#endif

+static void armv7pmu_save_regs(struct arm_pmu *cpu_pmu,
+ struct cpupmu_regs *regs)
+{
+ unsigned int cnt;
+ asm volatile("mrc p15, 0, %0, c9, c12, 0" : "=r" (regs->pmc));
+ if (!(regs->pmc & ARMV7_PMNC_E))
+ return;
+
+ asm volatile("mrc p15, 0, %0, c9, c12, 1" : "=r" (regs->pmcntenset));
+ asm volatile("mrc p15, 0, %0, c9, c14, 0" : "=r" (regs->pmuseren));
+ asm volatile("mrc p15, 0, %0, c9, c14, 1" : "=r" (regs->pmintenset));
+ asm volatile("mrc p15, 0, %0, c9, c13, 0" : "=r" (regs->pmxevtcnt[0]));
+ for (cnt = ARMV7_IDX_COUNTER0;
+ cnt <= ARMV7_IDX_COUNTER_LAST(cpu_pmu); cnt++) {
+ armv7_pmnc_select_counter(cnt);
+ asm volatile("mrc p15, 0, %0, c9, c13, 1"
+ : "=r"(regs->pmxevttype[cnt]));
+ asm volatile("mrc p15, 0, %0, c9, c13, 2"
+ : "=r"(regs->pmxevtcnt[cnt]));
+ }
+ return;
+}
+
+static void armv7pmu_restore_regs(struct arm_pmu *cpu_pmu,
+ struct cpupmu_regs *regs)
+{
+ unsigned int cnt;
+ if (!(regs->pmc & ARMV7_PMNC_E))
+ return;
+
+ asm volatile("mcr p15, 0, %0, c9, c12, 1" : : "r" (regs->pmcntenset));
+ asm volatile("mcr p15, 0, %0, c9, c14, 0" : : "r" (regs->pmuseren));
+ asm volatile("mcr p15, 0, %0, c9, c14, 1" : : "r" (regs->pmintenset));
+ asm volatile("mcr p15, 0, %0, c9, c13, 0" : : "r" (regs->pmxevtcnt[0]));
+ for (cnt = ARMV7_IDX_COUNTER0;
+ cnt <= ARMV7_IDX_COUNTER_LAST(cpu_pmu); cnt++) {
+ armv7_pmnc_select_counter(cnt);
+ asm volatile("mcr p15, 0, %0, c9, c13, 1"
+ : : "r"(regs->pmxevttype[cnt]));
+ asm volatile("mcr p15, 0, %0, c9, c13, 2"
+ : : "r"(regs->pmxevtcnt[cnt]));
+ }
+ asm volatile("mcr p15, 0, %0, c9, c12, 0" : : "r" (regs->pmc));
+}
+
static void armv7pmu_enable_event(struct perf_event *event)
{
unsigned long flags;
@@ -1528,6 +1573,8 @@ static void armv7pmu_init(struct arm_pmu *cpu_pmu)
cpu_pmu->start = armv7pmu_start;
cpu_pmu->stop = armv7pmu_stop;
cpu_pmu->reset = armv7pmu_reset;
+ cpu_pmu->save_regs = armv7pmu_save_regs;
+ cpu_pmu->restore_regs = armv7pmu_restore_regs;
cpu_pmu->max_period = (1LLU << 32) - 1;
};

--
1.7.9.5

--
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to majo...@vger.kernel.org
More majordomo info at http://vger.kernel.org/majordomo-info.html
Please read the FAQ at http://www.tux.org/lkml/

Stephen Boyd

unread,
Apr 11, 2014, 2:40:01 PM4/11/14
to
On 04/11/14 04:01, Neil Zhang wrote:
> @@ -217,6 +219,24 @@ static struct notifier_block cpu_pmu_hotplug_notifier = {
> .notifier_call = cpu_pmu_notify,
> };
>
> +static int cpu_pmu_pm_notify(struct notifier_block *b,
> + unsigned long action, void *hcpu)
> +{
> + int cpu = smp_processor_id();
> + struct cpupmu_regs *pmuregs = &per_cpu(cpu_pmu_regs, cpu);

this_cpu_ptr(&cpu_pmu_regs)?

--
Qualcomm Innovation Center, Inc. is a member of Code Aurora Forum,
hosted by The Linux Foundation

Neil Zhang

unread,
Apr 13, 2014, 9:40:02 PM4/13/14
to
Stephen,

> -----Original Message-----
> From: Stephen Boyd [mailto:sb...@codeaurora.org]
> Sent: 2014年4月12日 2:32
> To: Neil Zhang
> Cc: will....@arm.com; li...@arm.linux.org.uk; Sudeep KarkadaNagesha;
> linux-...@vger.kernel.org; linux-ar...@lists.infradead.org
> Subject: Re: [PATCH] ARM: perf: save/restore pmu registers in pm notifier
>
> On 04/11/14 04:01, Neil Zhang wrote:
> > @@ -217,6 +219,24 @@ static struct notifier_block
> cpu_pmu_hotplug_notifier = {
> > .notifier_call = cpu_pmu_notify,
> > };
> >
> > +static int cpu_pmu_pm_notify(struct notifier_block *b,
> > + unsigned long action, void *hcpu) {
> > + int cpu = smp_processor_id();
> > + struct cpupmu_regs *pmuregs = &per_cpu(cpu_pmu_regs, cpu);
>
> this_cpu_ptr(&cpu_pmu_regs)?

Thanks for the suggestion, I will update it.

>
> --
> Qualcomm Innovation Center, Inc. is a member of Code Aurora Forum, hosted
> by The Linux Foundation


Best Regards,
Neil Zhang

Neil Zhang

unread,
Apr 13, 2014, 9:50:01 PM4/13/14
to
From: Sudeep KarkadaNagesha <sudeep.kar...@arm.com>

This adds core support for saving and restoring CPU PMU registers
for suspend/resume support i.e. deeper C-states in cpuidle terms.
This patch adds support only to ARMv7 PMU registers save/restore.
It needs to be extended to xscale and ARMv6 if needed.

[Neil] We found that DS-5 not work on our CA7 based SoCs.
After debuging, found PMU registers were lost because of core power down.
Then i found Sudeep had a patch to fix it about two years ago but not in
the mainline, just port it.

Signed-off-by: Sudeep KarkadaNagesha <sudeep.kar...@arm.com>
Signed-off-by: Neil Zhang <zha...@marvell.com>
---
arch/arm/include/asm/pmu.h | 11 +++++++++
arch/arm/kernel/perf_event_cpu.c | 29 ++++++++++++++++++++++-
arch/arm/kernel/perf_event_v7.c | 47 ++++++++++++++++++++++++++++++++++++++
3 files changed, 86 insertions(+), 1 deletion(-)
index 51798d7..7f1c756 100644
--- a/arch/arm/kernel/perf_event_cpu.c
+++ b/arch/arm/kernel/perf_event_cpu.c
@@ -19,6 +19,7 @@
#define pr_fmt(fmt) "CPU PMU: " fmt

#include <linux/bitmap.h>
+#include <linux/cpu_pm.h>
#include <linux/export.h>
#include <linux/kernel.h>
#include <linux/of.h>
@@ -39,6 +40,7 @@ static DEFINE_PER_CPU(struct arm_pmu *, percpu_pmu);
static DEFINE_PER_CPU(struct perf_event * [ARMPMU_MAX_HWEVENTS], hw_events);
static DEFINE_PER_CPU(unsigned long [BITS_TO_LONGS(ARMPMU_MAX_HWEVENTS)], used_mask);
static DEFINE_PER_CPU(struct pmu_hw_events, cpu_hw_events);
+static DEFINE_PER_CPU(struct cpupmu_regs, cpu_pmu_regs);

/*
* Despite the names, these two functions are CPU-specific and are used
@@ -217,6 +219,23 @@ static struct notifier_block cpu_pmu_hotplug_notifier = {
.notifier_call = cpu_pmu_notify,
};

+static int cpu_pmu_pm_notify(struct notifier_block *b,
+ unsigned long action, void *hcpu)
+{
+ struct cpupmu_regs *pmuregs = this_cpu_ptr(&cpu_pmu_regs);
+
+ if (action == CPU_PM_ENTER && cpu_pmu->save_regs)
+ cpu_pmu->save_regs(cpu_pmu, pmuregs);
+ else if (action == CPU_PM_EXIT && cpu_pmu->restore_regs)
+ cpu_pmu->restore_regs(cpu_pmu, pmuregs);
+
+ return NOTIFY_OK;
+}
+
+static struct notifier_block cpu_pmu_pm_notifier = {
+ .notifier_call = cpu_pmu_pm_notify,
+};
+
/*
* PMU platform driver and devicetree bindings.
*/
@@ -349,9 +368,17 @@ static int __init register_pmu_driver(void)

Will Deacon

unread,
Apr 15, 2014, 4:50:02 AM4/15/14
to
Hello,

On Mon, Apr 14, 2014 at 02:42:22AM +0100, Neil Zhang wrote:
> From: Sudeep KarkadaNagesha <sudeep.kar...@arm.com>
>
> This adds core support for saving and restoring CPU PMU registers
> for suspend/resume support i.e. deeper C-states in cpuidle terms.
> This patch adds support only to ARMv7 PMU registers save/restore.
> It needs to be extended to xscale and ARMv6 if needed.
>
> [Neil] We found that DS-5 not work on our CA7 based SoCs.
> After debuging, found PMU registers were lost because of core power down.
> Then i found Sudeep had a patch to fix it about two years ago but not in
> the mainline, just port it.

What I don't like about this patch is that we're introducing significant
overhead for SoCs that don't require save/restore of the PMU state. I'd much
rather see core power down disabled whilst the PMU is in use but, if that's
not possible, then I think we need to:

(1) Make this conditional for cores that really need it

(2) Only save/restore if the PMU is in use (even better, just save/restore
the live registers, but that's probably not worth the effort
initially).

(3) Ensure we ->reset the PMU before doing the restore

Will

Neil Zhang

unread,
Apr 15, 2014, 8:40:03 AM4/15/14
to
Will,
Thanks for the comments!
The patch has check the ARMV7_PMNC_E bit when save / restore,
so suppose only the core's that use PMU will do the save / restore work.

> (3) Ensure we ->reset the PMU before doing the restore

Ok, I can add it in the next version.

>
> Will

Best Regards,
Neil Zhang

Neil Zhang

unread,
Apr 15, 2014, 8:50:01 AM4/15/14
to


> -----Original Message-----
> From: Will Deacon [mailto:will....@arm.com]
> Sent: 2014年4月15日 20:41
> To: Neil Zhang
> Cc: li...@arm.linux.org.uk; linux-ar...@lists.infradead.org;
> linux-...@vger.kernel.org; Sudeep Holla
> Subject: Re: [PATCH v2] ARM: perf: save/restore pmu registers in pm notifier
>
> On Tue, Apr 15, 2014 at 01:37:17PM +0100, Neil Zhang wrote:
> > > On Mon, Apr 14, 2014 at 02:42:22AM +0100, Neil Zhang wrote:
> > > > From: Sudeep KarkadaNagesha <sudeep.kar...@arm.com>
> > > >
> > > > This adds core support for saving and restoring CPU PMU registers
> > > > for suspend/resume support i.e. deeper C-states in cpuidle terms.
> > > > This patch adds support only to ARMv7 PMU registers save/restore.
> > > > It needs to be extended to xscale and ARMv6 if needed.
> > > >
> > > > [Neil] We found that DS-5 not work on our CA7 based SoCs.
> > > > After debuging, found PMU registers were lost because of core power
> down.
> > > > Then i found Sudeep had a patch to fix it about two years ago but
> > > > not in the mainline, just port it.
> > >
> > > What I don't like about this patch is that we're introducing
> > > significant overhead for SoCs that don't require save/restore of the
> > > PMU state. I'd much rather see core power down disabled whilst the
> > > PMU is in use but, if that's not possible, then I think we need to:
> > >
> > > (1) Make this conditional for cores that really need it
> > >
> > > (2) Only save/restore if the PMU is in use (even better, just save/restore
> > > the live registers, but that's probably not worth the effort
> > > initially).
> > >
> >
> > The patch has check the ARMV7_PMNC_E bit when save / restore, so
> > suppose only the core's that use PMU will do the save / restore work.
>
> Seems pretty fragile to base our save/restore decision on the value of one of
> the registers, though. What happens if the control register is zeroed by the
> core power down?
>
It will check the saved control value when restore, so is should be OK while control register is zeroed.

Will Deacon

unread,
Apr 15, 2014, 8:50:02 AM4/15/14
to
On Tue, Apr 15, 2014 at 01:37:17PM +0100, Neil Zhang wrote:
> > On Mon, Apr 14, 2014 at 02:42:22AM +0100, Neil Zhang wrote:
> > > From: Sudeep KarkadaNagesha <sudeep.kar...@arm.com>
> > >
> > > This adds core support for saving and restoring CPU PMU registers for
> > > suspend/resume support i.e. deeper C-states in cpuidle terms.
> > > This patch adds support only to ARMv7 PMU registers save/restore.
> > > It needs to be extended to xscale and ARMv6 if needed.
> > >
> > > [Neil] We found that DS-5 not work on our CA7 based SoCs.
> > > After debuging, found PMU registers were lost because of core power down.
> > > Then i found Sudeep had a patch to fix it about two years ago but not
> > > in the mainline, just port it.
> >
> > What I don't like about this patch is that we're introducing significant
> > overhead for SoCs that don't require save/restore of the PMU state. I'd much
> > rather see core power down disabled whilst the PMU is in use but, if that's not
> > possible, then I think we need to:
> >
> > (1) Make this conditional for cores that really need it
> >
> > (2) Only save/restore if the PMU is in use (even better, just save/restore
> > the live registers, but that's probably not worth the effort
> > initially).
> >
>
> The patch has check the ARMV7_PMNC_E bit when save / restore,
> so suppose only the core's that use PMU will do the save / restore work.

Seems pretty fragile to base our save/restore decision on the value of one
of the registers, though. What happens if the control register is zeroed by
the core power down?

Neil Zhang

unread,
Apr 15, 2014, 9:00:01 AM4/15/14
to

> -----Original Message-----
> From: Will Deacon [mailto:will....@arm.com]
> Sent: 2014年4月15日 20:50
> To: Neil Zhang
> Cc: li...@arm.linux.org.uk; linux-ar...@lists.infradead.org;
> linux-...@vger.kernel.org; Sudeep Holla
> Subject: Re: [PATCH v2] ARM: perf: save/restore pmu registers in pm notifier
>
> Ah yes, I mixed up and save and restore functions. It's still horrible that we
> have to read the control register unconditionally during the save though
> -- it might be nicer if we simply register/unregister the notifier during perf runs
> (in the same way that we request/free the PMU IRQs).
>

Thanks for the comments, I will refine it according to your suggestion.

Will Deacon

unread,
Apr 15, 2014, 9:00:01 AM4/15/14
to
Ah yes, I mixed up and save and restore functions. It's still horrible that
we have to read the control register unconditionally during the save though
-- it might be nicer if we simply register/unregister the notifier during
perf runs (in the same way that we request/free the PMU IRQs).

Sudeep Holla

unread,
Apr 15, 2014, 9:10:02 AM4/15/14
to
Hi Will,

Thanks for the response.

Hi Neil,

On 14/04/14 02:42, Neil Zhang wrote:
> From: Sudeep KarkadaNagesha <sudeep.kar...@arm.com>
>
> This adds core support for saving and restoring CPU PMU registers
> for suspend/resume support i.e. deeper C-states in cpuidle terms.
> This patch adds support only to ARMv7 PMU registers save/restore.
> It needs to be extended to xscale and ARMv6 if needed.
>
> [Neil] We found that DS-5 not work on our CA7 based SoCs.
> After debuging, found PMU registers were lost because of core power down.
> Then i found Sudeep had a patch to fix it about two years ago but not in
> the mainline, just port it.
>

The patch was written as PoC to support multiple PMUs back then and was clearly
not intended for upstream. Will has already mentioned what this patch needs to
improve on.

I have mentioned few comments which IMO is necessary.

> Signed-off-by: Sudeep KarkadaNagesha <sudeep.kar...@arm.com>
> Signed-off-by: Neil Zhang <zha...@marvell.com>
> ---
> arch/arm/include/asm/pmu.h | 11 +++++++++
> arch/arm/kernel/perf_event_cpu.c | 29 ++++++++++++++++++++++-
> arch/arm/kernel/perf_event_v7.c | 47 ++++++++++++++++++++++++++++++++++++++
> 3 files changed, 86 insertions(+), 1 deletion(-)
>
> diff --git a/arch/arm/include/asm/pmu.h b/arch/arm/include/asm/pmu.h
> index ae1919b..f37f048 100644
> --- a/arch/arm/include/asm/pmu.h
> +++ b/arch/arm/include/asm/pmu.h
> @@ -62,6 +62,15 @@ struct pmu_hw_events {
> raw_spinlock_t pmu_lock;
> };
>
> +struct cpupmu_regs {
> + u32 pmc;
> + u32 pmcntenset;
> + u32 pmuseren;
> + u32 pmintenset;
> + u32 pmxevttype[8];
> + u32 pmxevtcnt[8];
> +};
> +

This can't be generic, it needs to be specific to each PMU implementations.
I have clearly mentioned it in the commit log. It's better to move this out
to each PMU specific implementations.

> struct arm_pmu {
> struct pmu pmu;
> cpumask_t active_irqs;
> @@ -83,6 +92,8 @@ struct arm_pmu {
> int (*request_irq)(struct arm_pmu *, irq_handler_t handler);
> void (*free_irq)(struct arm_pmu *);
> int (*map_event)(struct perf_event *event);
> + void (*save_regs)(struct arm_pmu *, struct cpupmu_regs *);
> + void (*restore_regs)(struct arm_pmu *, struct cpupmu_regs *);

Once cpupmu_regs becomes PMU specific, you can remove it from the above 2
function pointers.
hcpu is misleading, the cpu_pm_notify doesn't pass the logical cpu index.
IIRC its NULL, rename it.
As Will suggested better to use used_mask to save/restore only active events.

Regards,
Sudeep

Neil Zhang

unread,
Apr 15, 2014, 10:10:01 PM4/15/14
to
Hi Sudeep,


> -----Original Message-----
> From: Sudeep Holla [mailto:sudeep...@arm.com]
> Sent: 2014年4月15日 21:06
> To: Neil Zhang
> Cc: Will Deacon; li...@arm.linux.org.uk; Sudeep Holla;
> linux-ar...@lists.infradead.org; linux-...@vger.kernel.org
> Subject: Re: [PATCH v2] ARM: perf: save/restore pmu registers in pm notifier
>
> Hi Will,
>
> Thanks for the response.
>
> Hi Neil,
>
> On 14/04/14 02:42, Neil Zhang wrote:
> > From: Sudeep KarkadaNagesha <sudeep.kar...@arm.com>
> >
> > This adds core support for saving and restoring CPU PMU registers for
> > suspend/resume support i.e. deeper C-states in cpuidle terms.
> > This patch adds support only to ARMv7 PMU registers save/restore.
> > It needs to be extended to xscale and ARMv6 if needed.
> >
> > [Neil] We found that DS-5 not work on our CA7 based SoCs.
> > After debuging, found PMU registers were lost because of core power down.
> > Then i found Sudeep had a patch to fix it about two years ago but not
> > in the mainline, just port it.
> >
>
> The patch was written as PoC to support multiple PMUs back then and was
> clearly not intended for upstream. Will has already mentioned what this patch
> needs to improve on.

Thanks for the comments.
I think it can help others if it can be accepted by mainline.
Thanks for the detailed suggestions, I will refine it later.
Best Regards,
Neil Zhang

Neil Zhang

unread,
Apr 21, 2014, 7:20:01 AM4/21/14
to
This adds core support for saving and restoring CPU PMU registers
for suspend/resume support i.e. deeper C-states in cpuidle terms.
This patch adds support only to ARMv7 PMU registers save/restore.
It needs to be extended to xscale and ARMv6 if needed.

I make this patch because of DS-5 not work on our CA7 based SoCs.
And it is based on Sudeep KarkadaNagesha's patch set for multiple PMUs.

Thanks Will and Sudeep's suggestion to only save / restore used events.

Cc: Sudeep KarkadaNagesha <sudeep.kar...@arm.com>
Signed-off-by: Neil Zhang <zha...@marvell.com>
---
arch/arm/include/asm/pmu.h | 4 ++
arch/arm/kernel/perf_event.c | 2 +
arch/arm/kernel/perf_event_cpu.c | 28 ++++++++++++++
arch/arm/kernel/perf_event_v7.c | 75 ++++++++++++++++++++++++++++++++++++++
4 files changed, 109 insertions(+)

diff --git a/arch/arm/include/asm/pmu.h b/arch/arm/include/asm/pmu.h
index ae1919b..3de3db7 100644
--- a/arch/arm/include/asm/pmu.h
+++ b/arch/arm/include/asm/pmu.h
@@ -83,6 +83,10 @@ struct arm_pmu {
int (*request_irq)(struct arm_pmu *, irq_handler_t handler);
void (*free_irq)(struct arm_pmu *);
int (*map_event)(struct perf_event *event);
+ int (*register_pm_notifier)(struct arm_pmu *);
+ void (*unregister_pm_notifier)(struct arm_pmu *);
+ void (*save_regs)(struct arm_pmu *);
+ void (*restore_regs)(struct arm_pmu *);
int num_events;
atomic_t active_events;
struct mutex reserve_mutex;
diff --git a/arch/arm/kernel/perf_event.c b/arch/arm/kernel/perf_event.c
index a6bc431..08822de 100644
--- a/arch/arm/kernel/perf_event.c
+++ b/arch/arm/kernel/perf_event.c
@@ -326,6 +326,7 @@ static void
armpmu_release_hardware(struct arm_pmu *armpmu)
{
armpmu->free_irq(armpmu);
+ armpmu->unregister_pm_notifier(armpmu);
pm_runtime_put_sync(&armpmu->plat_device->dev);
}

@@ -339,6 +340,7 @@ armpmu_reserve_hardware(struct arm_pmu *armpmu)
return -ENODEV;

pm_runtime_get_sync(&pmu_device->dev);
+ armpmu->register_pm_notifier(armpmu);
err = armpmu->request_irq(armpmu, armpmu_dispatch_irq);
if (err) {
armpmu_release_hardware(armpmu);
diff --git a/arch/arm/kernel/perf_event_cpu.c b/arch/arm/kernel/perf_event_cpu.c
index 51798d7..79e1c06 100644
--- a/arch/arm/kernel/perf_event_cpu.c
+++ b/arch/arm/kernel/perf_event_cpu.c
@@ -19,6 +19,7 @@
#define pr_fmt(fmt) "CPU PMU: " fmt

#include <linux/bitmap.h>
+#include <linux/cpu_pm.h>
#include <linux/export.h>
#include <linux/kernel.h>
#include <linux/of.h>
@@ -173,6 +174,31 @@ static int cpu_pmu_request_irq(struct arm_pmu *cpu_pmu, irq_handler_t handler)
return 0;
}

+static int cpu_pmu_pm_notify(struct notifier_block *b,
+ unsigned long action, void *v)
+{
+ if (action == CPU_PM_ENTER && cpu_pmu->save_regs)
+ cpu_pmu->save_regs(cpu_pmu);
+ else if (action == CPU_PM_EXIT && cpu_pmu->restore_regs)
+ cpu_pmu->restore_regs(cpu_pmu);
+
+ return NOTIFY_OK;
+}
+
+static struct notifier_block cpu_pmu_pm_notifier = {
+ .notifier_call = cpu_pmu_pm_notify,
+};
+
+static int cpu_pmu_register_pm_notifier(struct arm_pmu *cpu_pmu)
+{
+ return cpu_pm_register_notifier(&cpu_pmu_pm_notifier);
+}
+
+static void cpu_pmu_unregister_pm_notifier(struct arm_pmu *cpu_pmu)
+{
+ cpu_pm_unregister_notifier(&cpu_pmu_pm_notifier);
+}
+
static void cpu_pmu_init(struct arm_pmu *cpu_pmu)
{
int cpu;
@@ -187,6 +213,8 @@ static void cpu_pmu_init(struct arm_pmu *cpu_pmu)
cpu_pmu->get_hw_events = cpu_pmu_get_cpu_events;
cpu_pmu->request_irq = cpu_pmu_request_irq;
cpu_pmu->free_irq = cpu_pmu_free_irq;
+ cpu_pmu->register_pm_notifier = cpu_pmu_register_pm_notifier;
+ cpu_pmu->unregister_pm_notifier = cpu_pmu_unregister_pm_notifier;

/* Ensure the PMU has sane values out of reset. */
if (cpu_pmu->reset)
diff --git a/arch/arm/kernel/perf_event_v7.c b/arch/arm/kernel/perf_event_v7.c
index f4ef398..8898b4d 100644
--- a/arch/arm/kernel/perf_event_v7.c
+++ b/arch/arm/kernel/perf_event_v7.c
@@ -1237,6 +1237,79 @@ static void armv7_pmnc_dump_regs(struct arm_pmu *cpu_pmu)
}
#endif

+struct armv7_pmuregs {
+ u32 pmc;
+ u32 pmcntenset;
+ u32 pmuseren;
+ u32 pmintenset;
+ u32 pmxevttype[8];
+ u32 pmxevtcnt[8];
+};
+
+static DEFINE_PER_CPU(struct armv7_pmuregs, pmu_regs);
+
+static void armv7pmu_reset(void *info);
+
+static void armv7pmu_save_regs(struct arm_pmu *cpu_pmu)
+{
+ struct pmu_hw_events *events = cpu_pmu->get_hw_events();
+ struct armv7_pmuregs *regs;
+ int bit;
+
+ /* Check whether there are events used */
+ bit = find_first_bit(events->used_mask, cpu_pmu->num_events);
+ if (bit >= cpu_pmu->num_events)
+ return;
+
+ regs = this_cpu_ptr(&pmu_regs);
+ memset(regs, 0, sizeof(*regs));
+
+ for_each_set_bit(bit, events->used_mask, cpu_pmu->num_events) {
+ if (bit) {
+ armv7_pmnc_select_counter(bit);
+ asm volatile("mrc p15, 0, %0, c9, c13, 1"
+ : "=r"(regs->pmxevttype[bit]));
+ asm volatile("mrc p15, 0, %0, c9, c13, 2"
+ : "=r"(regs->pmxevtcnt[bit]));
+ } else
+ asm volatile("mrc p15, 0, %0, c9, c13, 0"
+ : "=r" (regs->pmxevtcnt[0]));
+ }
+
+ asm volatile("mrc p15, 0, %0, c9, c12, 1" : "=r" (regs->pmcntenset));
+ asm volatile("mrc p15, 0, %0, c9, c14, 1" : "=r" (regs->pmintenset));
+ asm volatile("mrc p15, 0, %0, c9, c12, 0" : "=r" (regs->pmc));
+}
+
+static void armv7pmu_restore_regs(struct arm_pmu *cpu_pmu)
+{
+ struct pmu_hw_events *events;
+ struct armv7_pmuregs *regs = this_cpu_ptr(&pmu_regs);
+ int bit;
+
+ if (!(regs->pmc & ARMV7_PMNC_E))
+ return;
+
+ armv7pmu_reset(cpu_pmu);
+
+ events = cpu_pmu->get_hw_events();
+ for_each_set_bit(bit, events->used_mask, cpu_pmu->num_events) {
+ if (bit) {
+ armv7_pmnc_select_counter(bit);
+ asm volatile("mcr p15, 0, %0, c9, c13, 1"
+ : : "r"(regs->pmxevttype[bit]));
+ asm volatile("mcr p15, 0, %0, c9, c13, 2"
+ : : "r"(regs->pmxevtcnt[bit]));
+ } else
+ asm volatile("mcr p15, 0, %0, c9, c13, 0"
+ : : "r" (regs->pmxevtcnt[0]));
+ }
+
+ asm volatile("mcr p15, 0, %0, c9, c12, 1" : : "r" (regs->pmcntenset));
+ asm volatile("mcr p15, 0, %0, c9, c14, 1" : : "r" (regs->pmintenset));
+ asm volatile("mcr p15, 0, %0, c9, c12, 0" : : "r" (regs->pmc));
+}
+
static void armv7pmu_enable_event(struct perf_event *event)
{
unsigned long flags;
@@ -1528,6 +1601,8 @@ static void armv7pmu_init(struct arm_pmu *cpu_pmu)
cpu_pmu->start = armv7pmu_start;
cpu_pmu->stop = armv7pmu_stop;
cpu_pmu->reset = armv7pmu_reset;
+ cpu_pmu->save_regs = armv7pmu_save_regs;
+ cpu_pmu->restore_regs = armv7pmu_restore_regs;
cpu_pmu->max_period = (1LLU << 32) - 1;
};

--
1.7.9.5

Neil Zhang

unread,
Apr 21, 2014, 10:30:02 PM4/21/14
to
> + return cpu_pm_register_notifier(&cpu_pmu_pm_notifier);
> +}
> +
> +static void cpu_pmu_unregister_pm_notifier(struct arm_pmu *cpu_pmu) {
> +static void armv7pmu_restore_regs(struct arm_pmu *cpu_pmu) {
> + struct pmu_hw_events *events;
> + struct armv7_pmuregs *regs = this_cpu_ptr(&pmu_regs);
> + int bit;
> +
> + if (!(regs->pmc & ARMV7_PMNC_E))
> + return;
> +

Shouldn't check regs->pmc anymore since it won't be updated every time.
I will submit another version to fix it.
Sorry for noise.

> + armv7pmu_reset(cpu_pmu);
> +
> + events = cpu_pmu->get_hw_events();
> + for_each_set_bit(bit, events->used_mask, cpu_pmu->num_events) {
> + if (bit) {
> + armv7_pmnc_select_counter(bit);
> + asm volatile("mcr p15, 0, %0, c9, c13, 1"
> + : : "r"(regs->pmxevttype[bit]));
> + asm volatile("mcr p15, 0, %0, c9, c13, 2"
> + : : "r"(regs->pmxevtcnt[bit]));
> + } else
> + asm volatile("mcr p15, 0, %0, c9, c13, 0"
> + : : "r" (regs->pmxevtcnt[0]));
> + }
> +
> + asm volatile("mcr p15, 0, %0, c9, c12, 1" : : "r" (regs->pmcntenset));
> + asm volatile("mcr p15, 0, %0, c9, c14, 1" : : "r" (regs->pmintenset));
> + asm volatile("mcr p15, 0, %0, c9, c12, 0" : : "r" (regs->pmc)); }
> +
> static void armv7pmu_enable_event(struct perf_event *event) {
> unsigned long flags;
> @@ -1528,6 +1601,8 @@ static void armv7pmu_init(struct arm_pmu
> *cpu_pmu)
> cpu_pmu->start = armv7pmu_start;
> cpu_pmu->stop = armv7pmu_stop;
> cpu_pmu->reset = armv7pmu_reset;
> + cpu_pmu->save_regs = armv7pmu_save_regs;
> + cpu_pmu->restore_regs = armv7pmu_restore_regs;
> cpu_pmu->max_period = (1LLU << 32) - 1;
> };
>
> --
> 1.7.9.5



Best Regards,
Neil Zhang
0 new messages