[PATCH 05/11] arm, arm64: Remove unneeded forward type declarations from gic.h

5 views
Skip to first unread message

Jan Kiszka

unread,
Dec 2, 2016, 3:43:07 AM12/2/16
to jailho...@googlegroups.com
No longer needed, or we get them via the included headers.

Signed-off-by: Jan Kiszka <jan.k...@siemens.com>
---
hypervisor/arch/arm-common/include/asm/gic.h | 7 -------
1 file changed, 7 deletions(-)

diff --git a/hypervisor/arch/arm-common/include/asm/gic.h b/hypervisor/arch/arm-common/include/asm/gic.h
index b8fab88..224a466 100644
--- a/hypervisor/arch/arm-common/include/asm/gic.h
+++ b/hypervisor/arch/arm-common/include/asm/gic.h
@@ -14,7 +14,6 @@
#define _JAILHOUSE_ASM_GIC_COMMON_H

#include <jailhouse/mmio.h>
-#include <jailhouse/types.h>
#if defined(CONFIG_ARM_GIC_V2)
# include <asm/gic_v2.h>
#elif defined(CONFIG_ARM_GIC_V3)
@@ -48,12 +47,6 @@
#define is_spi(irqn) ((irqn) > 31 && (irqn) < 1020)

#ifndef __ASSEMBLY__
-
-struct cell;
-struct arm_mmio_access;
-struct per_cpu;
-struct sgi;
-
extern u8 target_cpu_map[];

extern void *gicd_base;
--
2.1.4

Jan Kiszka

unread,
Dec 2, 2016, 3:43:07 AM12/2/16
to jailho...@googlegroups.com, Ralf Ramsauer, Mark Rutland
This is a mandatory service with PSCI v0.2+, and if the root cell was
using it prior to enabling Jailhouse, just returning an error, like we
do so far, will send the CPUs into a busy loop.

Implement the minimum of this service by sending the CPU into a wfi, but
only if there are no interrupt waiting to be injected. We better check
for physical interrupts after the wfi to reduce world switches and,
thus, event delivery latencies.

CC: Ralf Ramsauer <ra...@ramses-pyramidenbau.de>
CC: Mark Rutland <mark.r...@arm.com>
Signed-off-by: Jan Kiszka <jan.k...@siemens.com>
---
hypervisor/arch/arm-common/include/asm/psci.h | 2 ++
hypervisor/arch/arm-common/psci.c | 8 ++++++++
2 files changed, 10 insertions(+)

diff --git a/hypervisor/arch/arm-common/include/asm/psci.h b/hypervisor/arch/arm-common/include/asm/psci.h
index 8feeda4..09712c6 100644
--- a/hypervisor/arch/arm-common/include/asm/psci.h
+++ b/hypervisor/arch/arm-common/include/asm/psci.h
@@ -14,6 +14,8 @@
#define _JAILHOUSE_ASM_PSCI_H

#define PSCI_VERSION 0x84000000
+#define PSCI_CPU_SUSPEND_32 0x84000001
+#define PSCI_CPU_SUSPEND_64 0xc4000001
#define PSCI_CPU_OFF 0x84000002
#define PSCI_CPU_ON_32 0x84000003
#define PSCI_CPU_ON_64 0xc4000003
diff --git a/hypervisor/arch/arm-common/psci.c b/hypervisor/arch/arm-common/psci.c
index ca83119..8e3a301 100644
--- a/hypervisor/arch/arm-common/psci.c
+++ b/hypervisor/arch/arm-common/psci.c
@@ -79,6 +79,14 @@ long psci_dispatch(struct trap_context *ctx)
/* Major[31:16], minor[15:0] */
return 2;

+ case PSCI_CPU_SUSPEND_32:
+ case PSCI_CPU_SUSPEND_64:
+ if (!irqchip_has_pending_irqs()) {
+ asm volatile("wfi" : : : "memory");
+ irqchip_handle_irq(cpu_data);
+ }
+ return 0;
+
case PSCI_CPU_OFF:
case PSCI_CPU_OFF_V0_1_UBOOT:
arm_cpu_park();
--
2.1.4

Jan Kiszka

unread,
Dec 2, 2016, 3:43:07 AM12/2/16
to jailho...@googlegroups.com, Mark Rutland, Ralf Ramsauer
This first of all addresses the issue brought up in
https://www.mail-archive.com/jailho...@googlegroups.com/msg01375.html.

In addition, and that's the larger part of the series, I cleaned up and
consolidated the ARM irqchip/gic layer further. See patches for details.

Jan


CC: Mark Rutland <mark.r...@arm.com>
CC: Ralf Ramsauer <ra...@ramses-pyramidenbau.de>

Jan Kiszka (11):
arm, arm64: Add irqchip_has_pending_irqs
arm, arm64: Emulate PSCI service PSCI_CPU_SUSPEND
arm, arm64: Remove indirection to gic_handle_irq
arm, arm64: Move common GICD mapping into irqchip_cell_init
arm, arm64: Remove unneeded forward type declarations from gic.h
arm, arm64: Refactor target_cpu_map to gicv2_target_cpu_map
arm, arm64: Removed unused mmio_access irqchip callback
arm, arm64: Factor out handle_irq_target irqchip callback
arm, arm64: Fold gic_probe_cpu_id into GICv2 gic_cpu_init
arm, arm64: Move gic_targets_in_cell into gic-v2.c
arm, arm64: Fold gic-common.c into irqchip.c

hypervisor/arch/arm-common/Kbuild | 2 +-
hypervisor/arch/arm-common/gic-common.c | 356 -----------------------
hypervisor/arch/arm-common/gic-v2.c | 143 +++++++--
hypervisor/arch/arm-common/include/asm/gic.h | 16 +-
hypervisor/arch/arm-common/include/asm/irqchip.h | 7 +-
hypervisor/arch/arm-common/include/asm/psci.h | 2 +
hypervisor/arch/arm-common/irqchip.c | 241 ++++++++++++++-
hypervisor/arch/arm-common/psci.c | 8 +
hypervisor/arch/arm/gic-v3.c | 23 +-
9 files changed, 395 insertions(+), 403 deletions(-)
delete mode 100644 hypervisor/arch/arm-common/gic-common.c

--
2.1.4

Jan Kiszka

unread,
Dec 2, 2016, 3:43:08 AM12/2/16
to jailho...@googlegroups.com
Leftover from dda4c03f.

Signed-off-by: Jan Kiszka <jan.k...@siemens.com>
---
hypervisor/arch/arm-common/include/asm/irqchip.h | 2 --
1 file changed, 2 deletions(-)

diff --git a/hypervisor/arch/arm-common/include/asm/irqchip.h b/hypervisor/arch/arm-common/include/asm/irqchip.h
index b531f52..d9d1f61 100644
--- a/hypervisor/arch/arm-common/include/asm/irqchip.h
+++ b/hypervisor/arch/arm-common/include/asm/irqchip.h
@@ -52,8 +52,6 @@ struct irqchip_ops {
int (*inject_irq)(struct per_cpu *cpu_data, u16 irq_id);
void (*enable_maint_irq)(bool enable);
bool (*has_pending_irqs)(void);
-
- int (*mmio_access)(struct mmio_access *access);
};

unsigned int irqchip_mmio_count_regions(struct cell *cell);
--
2.1.4

Jan Kiszka

unread,
Dec 2, 2016, 3:43:08 AM12/2/16
to jailho...@googlegroups.com
This mapping is GICv2-only, and therefore the variable is better
renamed. Also make sure that its size can be retrieved externally.

Signed-off-by: Jan Kiszka <jan.k...@siemens.com>
---
hypervisor/arch/arm-common/gic-common.c | 16 ++++++++--------
hypervisor/arch/arm-common/gic-v2.c | 2 +-
hypervisor/arch/arm-common/include/asm/gic.h | 2 +-
3 files changed, 10 insertions(+), 10 deletions(-)

diff --git a/hypervisor/arch/arm-common/gic-common.c b/hypervisor/arch/arm-common/gic-common.c
index e0771d8..67dac8b 100644
--- a/hypervisor/arch/arm-common/gic-common.c
+++ b/hypervisor/arch/arm-common/gic-common.c
@@ -26,16 +26,16 @@

static DEFINE_SPINLOCK(dist_lock);

-/* The GIC interface numbering does not necessarily match the logical map */
-u8 target_cpu_map[8] = { 0, 0, 0, 0, 0, 0, 0, 0 };
+/* The GICv2 interface numbering does not necessarily match the logical map */
+u8 gicv2_target_cpu_map[8] = { 0, 0, 0, 0, 0, 0, 0, 0 };

/* Check that the targeted interface belongs to the cell */
bool gic_targets_in_cell(struct cell *cell, u8 targets)
{
unsigned int cpu;

- for (cpu = 0; cpu < ARRAY_SIZE(target_cpu_map); cpu++)
- if (targets & target_cpu_map[cpu] &&
+ for (cpu = 0; cpu < ARRAY_SIZE(gicv2_target_cpu_map); cpu++)
+ if (targets & gicv2_target_cpu_map[cpu] &&
per_cpu(cpu)->cell != cell)
return false;

@@ -203,12 +203,12 @@ static enum mmio_result handle_sgir_access(struct mmio_access *mmio)
*/
int gic_probe_cpu_id(unsigned int cpu)
{
- if (cpu >= ARRAY_SIZE(target_cpu_map))
+ if (cpu >= ARRAY_SIZE(gicv2_target_cpu_map))
return -EINVAL;

- target_cpu_map[cpu] = mmio_read32(gicd_base + GICD_ITARGETSR);
+ gicv2_target_cpu_map[cpu] = mmio_read32(gicd_base + GICD_ITARGETSR);

- if (target_cpu_map[cpu] == 0)
+ if (gicv2_target_cpu_map[cpu] == 0)
return -ENODEV;

return 0;
@@ -244,7 +244,7 @@ void gic_handle_sgir_write(struct sgi *sgi, bool virt_input)
* translate them to the hypervisor's virtual
* IDs.
*/
- if (!(targets & target_cpu_map[cpu]))
+ if (!(targets & gicv2_target_cpu_map[cpu]))
continue;
}

diff --git a/hypervisor/arch/arm-common/gic-v2.c b/hypervisor/arch/arm-common/gic-v2.c
index c67b412..c3d8581 100644
--- a/hypervisor/arch/arm-common/gic-v2.c
+++ b/hypervisor/arch/arm-common/gic-v2.c
@@ -200,7 +200,7 @@ static void gic_adjust_irq_target(struct cell *cell, u16 irq_id)
return;

targets &= ~(0xff << shift);
- targets |= target_cpu_map[first_cpu(cell->cpu_set)] << shift;
+ targets |= gicv2_target_cpu_map[first_cpu(cell->cpu_set)] << shift;

mmio_write32(itargetsr, targets);
}
diff --git a/hypervisor/arch/arm-common/include/asm/gic.h b/hypervisor/arch/arm-common/include/asm/gic.h
index 224a466..44c50f9 100644
--- a/hypervisor/arch/arm-common/include/asm/gic.h
+++ b/hypervisor/arch/arm-common/include/asm/gic.h
@@ -47,7 +47,7 @@
#define is_spi(irqn) ((irqn) > 31 && (irqn) < 1020)

#ifndef __ASSEMBLY__
-extern u8 target_cpu_map[];
+extern u8 gicv2_target_cpu_map[8];

Jan Kiszka

unread,
Dec 2, 2016, 3:43:09 AM12/2/16
to jailho...@googlegroups.com
This is the only user, and it is GICv2-only. As a side-effect, this now
actually evaluates the return value.

Signed-off-by: Jan Kiszka <jan.k...@siemens.com>
---
hypervisor/arch/arm-common/gic-common.c | 22 ----------------------
hypervisor/arch/arm-common/gic-v2.c | 19 +++++++++++++++++--
hypervisor/arch/arm-common/include/asm/gic.h | 1 -
3 files changed, 17 insertions(+), 25 deletions(-)

diff --git a/hypervisor/arch/arm-common/gic-common.c b/hypervisor/arch/arm-common/gic-common.c
index 0fef774..acc1377 100644
--- a/hypervisor/arch/arm-common/gic-common.c
+++ b/hypervisor/arch/arm-common/gic-common.c
@@ -122,28 +122,6 @@ static enum mmio_result handle_sgir_access(struct mmio_access *mmio)
return MMIO_HANDLED;
}

-/*
- * Get the CPU interface ID for this cpu. It can be discovered by reading
- * the banked value of the PPI and IPI TARGET registers
- * Patch 2bb3135 in Linux explains why the probe may need to scans the first 8
- * registers: some early implementation returned 0 for the first ITARGETSR
- * registers.
- * Since those didn't have virtualization extensions, we can safely ignore that
- * case.
- */
-int gic_probe_cpu_id(unsigned int cpu)
-{
- if (cpu >= ARRAY_SIZE(gicv2_target_cpu_map))
- return -EINVAL;
-
- gicv2_target_cpu_map[cpu] = mmio_read32(gicd_base + GICD_ITARGETSR);
-
- if (gicv2_target_cpu_map[cpu] == 0)
- return -ENODEV;
-
- return 0;
-}
-
void gic_handle_sgir_write(struct sgi *sgi, bool virt_input)
{
struct per_cpu *cpu_data = this_cpu_data();
diff --git a/hypervisor/arch/arm-common/gic-v2.c b/hypervisor/arch/arm-common/gic-v2.c
index 51c5c7e..26a0d60 100644
--- a/hypervisor/arch/arm-common/gic-v2.c
+++ b/hypervisor/arch/arm-common/gic-v2.c
@@ -146,8 +146,23 @@ static int gic_cpu_init(struct per_cpu *cpu_data)
*/
gic_clear_pending_irqs();

- /* Register ourselves into the CPU itf map */
- gic_probe_cpu_id(cpu_data->cpu_id);
+ /*
+ * Get the CPU interface ID for this cpu. It can be discovered by
+ * reading the banked value of the PPI and IPI TARGET registers
+ * Patch 2bb3135 in Linux explains why the probe may need to scans the
+ * first 8 registers: some early implementation returned 0 for the first
+ * ITARGETSR registers.
+ * Since those didn't have virtualization extensions, we can safely
+ * ignore that case.
+ */
+ if (cpu_data->cpu_id >= ARRAY_SIZE(gicv2_target_cpu_map))
+ return -EINVAL;
+
+ gicv2_target_cpu_map[cpu_data->cpu_id] =
+ mmio_read32(gicd_base + GICD_ITARGETSR);
+
+ if (gicv2_target_cpu_map[cpu_data->cpu_id] == 0)
+ return -ENODEV;

return 0;
}
diff --git a/hypervisor/arch/arm-common/include/asm/gic.h b/hypervisor/arch/arm-common/include/asm/gic.h
index a1ee60e..7895633 100644
--- a/hypervisor/arch/arm-common/include/asm/gic.h
+++ b/hypervisor/arch/arm-common/include/asm/gic.h
@@ -54,7 +54,6 @@ extern u8 gicv2_target_cpu_map[8];
extern void *gicd_base;
extern spinlock_t dist_lock;

-int gic_probe_cpu_id(unsigned int cpu);
enum mmio_result gic_handle_dist_access(void *arg, struct mmio_access *mmio);
enum mmio_result gic_handle_irq_route(struct mmio_access *mmio,
unsigned int irq);
--
2.1.4

Jan Kiszka

unread,
Dec 2, 2016, 3:43:09 AM12/2/16
to jailho...@googlegroups.com
As we run the GICv3 only in affinity mode, access to ITARGETSR can be
ignored, and the existing handle_irq_target can become GICv2-only.

Signed-off-by: Jan Kiszka <jan.k...@siemens.com>
---
hypervisor/arch/arm-common/gic-common.c | 74 +-----------------------
hypervisor/arch/arm-common/gic-v2.c | 74 +++++++++++++++++++++++-
hypervisor/arch/arm-common/include/asm/gic.h | 3 +
hypervisor/arch/arm-common/include/asm/irqchip.h | 3 +
hypervisor/arch/arm-common/irqchip.c | 2 -
hypervisor/arch/arm/gic-v3.c | 8 +++
6 files changed, 89 insertions(+), 75 deletions(-)

diff --git a/hypervisor/arch/arm-common/gic-common.c b/hypervisor/arch/arm-common/gic-common.c
index 67dac8b..0fef774 100644
--- a/hypervisor/arch/arm-common/gic-common.c
+++ b/hypervisor/arch/arm-common/gic-common.c
@@ -24,7 +24,7 @@
#define REG_RANGE(base, n, size) \
(base) ... ((base) + (n - 1) * (size))

-static DEFINE_SPINLOCK(dist_lock);
+DEFINE_SPINLOCK(dist_lock);

/* The GICv2 interface numbering does not necessarily match the logical map */
u8 gicv2_target_cpu_map[8] = { 0, 0, 0, 0, 0, 0, 0, 0 };
@@ -103,76 +103,6 @@ restrict_bitmask_access(struct mmio_access *mmio, unsigned int reg_index,
return MMIO_HANDLED;
}

-/*
- * GICv2 uses 8bit values for each IRQ in the ITARGETRs registers
- */
-static enum mmio_result handle_irq_target(struct mmio_access *mmio,
- unsigned int irq)
-{
- /*
- * ITARGETSR contain one byte per IRQ, so the first one affected by this
- * access corresponds to the reg index
- */
- unsigned int irq_base = irq & ~0x3;
- struct cell *cell = this_cell();
- unsigned int offset;
- u32 access_mask = 0;
- unsigned int n;
- u8 targets;
-
- /*
- * Let the guest freely access its SGIs and PPIs, which may be used to
- * fill its CPU interface map.
- */
- if (!is_spi(irq)) {
- mmio_perform_access(gicd_base, mmio);
- return MMIO_HANDLED;
- }
-
- /*
- * The registers are byte-accessible, but we always do word accesses.
- */
- offset = irq % 4;
- mmio->address &= ~0x3;
- mmio->value <<= 8 * offset;
- mmio->size = 4;
-
- for (n = 0; n < 4; n++) {
- if (irqchip_irq_in_cell(cell, irq_base + n))
- access_mask |= 0xff << (8 * n);
- else
- continue;
-
- if (!mmio->is_write)
- continue;
-
- targets = (mmio->value >> (8 * n)) & 0xff;
-
- if (!gic_targets_in_cell(cell, targets)) {
- printk("Attempt to route IRQ%d outside of cell\n",
- irq_base + n);
- return MMIO_ERROR;
- }
- }
-
- if (mmio->is_write) {
- spin_lock(&dist_lock);
- u32 itargetsr =
- mmio_read32(gicd_base + GICD_ITARGETSR + irq_base);
- mmio->value &= access_mask;
- /* Combine with external SPIs */
- mmio->value |= (itargetsr & ~access_mask);
- /* And do the access */
- mmio_perform_access(gicd_base, mmio);
- spin_unlock(&dist_lock);
- } else {
- mmio_perform_access(gicd_base, mmio);
- mmio->value &= access_mask;
- }
-
- return MMIO_HANDLED;
-}
-
static enum mmio_result handle_sgir_access(struct mmio_access *mmio)
{
struct sgi sgi;
@@ -269,7 +199,7 @@ enum mmio_result gic_handle_dist_access(void *arg, struct mmio_access *mmio)
break;

case REG_RANGE(GICD_ITARGETSR, 1024, 1):
- ret = handle_irq_target(mmio, reg - GICD_ITARGETSR);
+ ret = irqchip.handle_irq_target(mmio, reg - GICD_ITARGETSR);
break;

case REG_RANGE(GICD_ICENABLER, 32, 4):
diff --git a/hypervisor/arch/arm-common/gic-v2.c b/hypervisor/arch/arm-common/gic-v2.c
index c3d8581..51c5c7e 100644
--- a/hypervisor/arch/arm-common/gic-v2.c
+++ b/hypervisor/arch/arm-common/gic-v2.c
@@ -11,7 +11,7 @@
*/

#include <jailhouse/control.h>
-#include <jailhouse/mmio.h>
+#include <jailhouse/printk.h>
#include <asm/gic.h>
#include <asm/irqchip.h>
#include <asm/setup.h>
@@ -291,6 +291,76 @@ enum mmio_result gic_handle_irq_route(struct mmio_access *mmio,
return MMIO_HANDLED;
}

+/*
+ * GICv2 uses 8bit values for each IRQ in the ITARGETSR registers
+ */
+static enum mmio_result gic_handle_irq_target(struct mmio_access *mmio,
+ unsigned int irq)
+{
+ /*
+ * ITARGETSR contain one byte per IRQ, so the first one affected by this
+ * access corresponds to the reg index
+ */
+ unsigned int irq_base = irq & ~0x3;
+ struct cell *cell = this_cell();
+ unsigned int offset;
+ u32 access_mask = 0;
+ unsigned int n;
+ u8 targets;
+
+ /*
+ * Let the guest freely access its SGIs and PPIs, which may be used to
+ * fill its CPU interface map.
+ */
+ if (!is_spi(irq)) {
+ mmio_perform_access(gicd_base, mmio);
+ return MMIO_HANDLED;
+ }
+
+ /*
+ * The registers are byte-accessible, but we always do word accesses.
+ */
+ offset = irq % 4;
+ mmio->address &= ~0x3;
+ mmio->value <<= 8 * offset;
+ mmio->size = 4;
+
+ for (n = 0; n < 4; n++) {
+ if (irqchip_irq_in_cell(cell, irq_base + n))
+ access_mask |= 0xff << (8 * n);
+ else
+ continue;
+
+ if (!mmio->is_write)
+ continue;
+
+ targets = (mmio->value >> (8 * n)) & 0xff;
+
+ if (!gic_targets_in_cell(cell, targets)) {
+ printk("Attempt to route IRQ%d outside of cell\n",
+ irq_base + n);
+ return MMIO_ERROR;
+ }
+ }
+
+ if (mmio->is_write) {
+ spin_lock(&dist_lock);
+ u32 itargetsr =
+ mmio_read32(gicd_base + GICD_ITARGETSR + irq_base);
+ mmio->value &= access_mask;
+ /* Combine with external SPIs */
+ mmio->value |= (itargetsr & ~access_mask);
+ /* And do the access */
+ mmio_perform_access(gicd_base, mmio);
+ spin_unlock(&dist_lock);
+ } else {
+ mmio_perform_access(gicd_base, mmio);
+ mmio->value &= access_mask;
+ }
+
+ return MMIO_HANDLED;
+}
+
unsigned int irqchip_mmio_count_regions(struct cell *cell)
{
return 1;
@@ -309,4 +379,6 @@ struct irqchip_ops irqchip = {
.enable_maint_irq = gic_enable_maint_irq,
.has_pending_irqs = gic_has_pending_irqs,
.eoi_irq = gic_eoi_irq,
+
+ .handle_irq_target = gic_handle_irq_target,
};
diff --git a/hypervisor/arch/arm-common/include/asm/gic.h b/hypervisor/arch/arm-common/include/asm/gic.h
index 44c50f9..a1ee60e 100644
--- a/hypervisor/arch/arm-common/include/asm/gic.h
+++ b/hypervisor/arch/arm-common/include/asm/gic.h
@@ -47,9 +47,12 @@
#define is_spi(irqn) ((irqn) > 31 && (irqn) < 1020)

#ifndef __ASSEMBLY__
+extern struct irqchip_ops irqchip;
+
extern u8 gicv2_target_cpu_map[8];

extern void *gicd_base;
+extern spinlock_t dist_lock;

int gic_probe_cpu_id(unsigned int cpu);
enum mmio_result gic_handle_dist_access(void *arg, struct mmio_access *mmio);
diff --git a/hypervisor/arch/arm-common/include/asm/irqchip.h b/hypervisor/arch/arm-common/include/asm/irqchip.h
index d9d1f61..40984e0 100644
--- a/hypervisor/arch/arm-common/include/asm/irqchip.h
+++ b/hypervisor/arch/arm-common/include/asm/irqchip.h
@@ -52,6 +52,9 @@ struct irqchip_ops {
int (*inject_irq)(struct per_cpu *cpu_data, u16 irq_id);
void (*enable_maint_irq)(bool enable);
bool (*has_pending_irqs)(void);
+
+ enum mmio_result (*handle_irq_target)(struct mmio_access *mmio,
+ unsigned int irq);
};

unsigned int irqchip_mmio_count_regions(struct cell *cell);
diff --git a/hypervisor/arch/arm-common/irqchip.c b/hypervisor/arch/arm-common/irqchip.c
index 28e453e..794259d 100644
--- a/hypervisor/arch/arm-common/irqchip.c
+++ b/hypervisor/arch/arm-common/irqchip.c
@@ -32,8 +32,6 @@
(counter) < (config)->num_irqchips; \
(chip)++, (counter)++)

-extern struct irqchip_ops irqchip;
-
void *gicd_base;

/*
diff --git a/hypervisor/arch/arm/gic-v3.c b/hypervisor/arch/arm/gic-v3.c
index 3b10d0d..9b17df9 100644
--- a/hypervisor/arch/arm/gic-v3.c
+++ b/hypervisor/arch/arm/gic-v3.c
@@ -432,6 +432,13 @@ static bool gicv3_has_pending_irqs(void)
return false;
}

+static enum mmio_result gicv3_handle_irq_target(struct mmio_access *mmio,
+ unsigned int irq)
+{
+ /* ignore writes, we are in affinity routing mode */
+ return MMIO_HANDLED;
+}
+
unsigned int irqchip_mmio_count_regions(struct cell *cell)
{
return 2;
@@ -448,4 +455,5 @@ struct irqchip_ops irqchip = {
.enable_maint_irq = gicv3_enable_maint_irq,
.has_pending_irqs = gicv3_has_pending_irqs,
.eoi_irq = gic_eoi_irq,
+ .handle_irq_target = gicv3_handle_irq_target,
};
--
2.1.4

Jan Kiszka

unread,
Dec 2, 2016, 3:43:10 AM12/2/16
to jailho...@googlegroups.com
This is GICv2-only.

Signed-off-by: Jan Kiszka <jan.k...@siemens.com>
---
hypervisor/arch/arm-common/gic-common.c | 13 -------------
hypervisor/arch/arm-common/gic-v2.c | 13 +++++++++++++
hypervisor/arch/arm-common/include/asm/gic.h | 1 -
3 files changed, 13 insertions(+), 14 deletions(-)

diff --git a/hypervisor/arch/arm-common/gic-common.c b/hypervisor/arch/arm-common/gic-common.c
index acc1377..37a12b6 100644
--- a/hypervisor/arch/arm-common/gic-common.c
+++ b/hypervisor/arch/arm-common/gic-common.c
@@ -29,19 +29,6 @@ DEFINE_SPINLOCK(dist_lock);
/* The GICv2 interface numbering does not necessarily match the logical map */
u8 gicv2_target_cpu_map[8] = { 0, 0, 0, 0, 0, 0, 0, 0 };

-/* Check that the targeted interface belongs to the cell */
-bool gic_targets_in_cell(struct cell *cell, u8 targets)
-{
- unsigned int cpu;
-
- for (cpu = 0; cpu < ARRAY_SIZE(gicv2_target_cpu_map); cpu++)
- if (targets & gicv2_target_cpu_map[cpu] &&
- per_cpu(cpu)->cell != cell)
- return false;
-
- return true;
-}
-
/*
* Most of the GIC distributor writes only reconfigure the IRQs corresponding to
* the bits of the written value, by using separate `set' and `clear' registers.
diff --git a/hypervisor/arch/arm-common/gic-v2.c b/hypervisor/arch/arm-common/gic-v2.c
index 26a0d60..d9362be 100644
--- a/hypervisor/arch/arm-common/gic-v2.c
+++ b/hypervisor/arch/arm-common/gic-v2.c
@@ -21,6 +21,19 @@ static unsigned int gic_num_lr;
void *gicc_base;
void *gich_base;

+/* Check that the targeted interface belongs to the cell */
+static bool gic_targets_in_cell(struct cell *cell, u8 targets)
+{
+ unsigned int cpu;
+
+ for (cpu = 0; cpu < ARRAY_SIZE(gicv2_target_cpu_map); cpu++)
+ if (targets & gicv2_target_cpu_map[cpu] &&
+ per_cpu(cpu)->cell != cell)
+ return false;
+
+ return true;
+}
+
static int gic_init(void)
{
gicc_base = paging_map_device(
diff --git a/hypervisor/arch/arm-common/include/asm/gic.h b/hypervisor/arch/arm-common/include/asm/gic.h
index 7895633..d74ec9b 100644
--- a/hypervisor/arch/arm-common/include/asm/gic.h
+++ b/hypervisor/arch/arm-common/include/asm/gic.h
@@ -58,7 +58,6 @@ enum mmio_result gic_handle_dist_access(void *arg, struct mmio_access *mmio);
enum mmio_result gic_handle_irq_route(struct mmio_access *mmio,
unsigned int irq);
void gic_handle_sgir_write(struct sgi *sgi, bool virt_input);
-bool gic_targets_in_cell(struct cell *cell, u8 targets);
void gic_set_irq_pending(u16 irq_id);

#endif /* !__ASSEMBLY__ */
--
2.1.4

Jan Kiszka

unread,
Dec 2, 2016, 3:43:10 AM12/2/16
to jailho...@googlegroups.com
Our irqchip is inherently GIC-centric, so there is no reason to split up
the frontend interface from the common GIC part. Move them together, and
also fold gic_set_irq_pending into its only caller.

Signed-off-by: Jan Kiszka <jan.k...@siemens.com>
---
hypervisor/arch/arm-common/Kbuild | 2 +-
hypervisor/arch/arm-common/gic-common.c | 251 ---------------------------
hypervisor/arch/arm-common/include/asm/gic.h | 3 -
hypervisor/arch/arm-common/irqchip.c | 228 +++++++++++++++++++++++-
4 files changed, 227 insertions(+), 257 deletions(-)
delete mode 100644 hypervisor/arch/arm-common/gic-common.c

diff --git a/hypervisor/arch/arm-common/Kbuild b/hypervisor/arch/arm-common/Kbuild
index 0aa613d..2901ccf 100644
--- a/hypervisor/arch/arm-common/Kbuild
+++ b/hypervisor/arch/arm-common/Kbuild
@@ -13,7 +13,7 @@
include $(CONFIG_MK)

OBJS-y += dbg-write.o lib.o psci.o control.o paging.o mmu_cell.o
-OBJS-y += irqchip.o gic-common.o pci.o ivshmem.o uart-pl011.o uart-8250.o
+OBJS-y += irqchip.o pci.o ivshmem.o uart-pl011.o uart-8250.o
OBJS-$(CONFIG_ARM_GIC_V2) += gic-v2.o

COMMON_OBJECTS = $(addprefix ../arm-common/,$(OBJS-y))
diff --git a/hypervisor/arch/arm-common/gic-common.c b/hypervisor/arch/arm-common/gic-common.c
deleted file mode 100644
index 37a12b6..0000000
--- a/hypervisor/arch/arm-common/gic-common.c
+++ /dev/null
@@ -1,251 +0,0 @@
-/*
- * Jailhouse, a Linux-based partitioning hypervisor
- *
- * Copyright (c) ARM Limited, 2014
- *
- * Authors:
- * Jean-Philippe Brucker <jean-phili...@arm.com>
- *
- * This work is licensed under the terms of the GNU GPL, version 2. See
- * the COPYING file in the top-level directory.
- */
-
-#include <jailhouse/cell.h>
-#include <jailhouse/control.h>
-#include <jailhouse/mmio.h>
-#include <jailhouse/printk.h>
-#include <asm/control.h>
-#include <asm/gic.h>
-#include <asm/irqchip.h>
-#include <asm/percpu.h>
-#include <asm/spinlock.h>
-#include <asm/traps.h>
-
-#define REG_RANGE(base, n, size) \
- (base) ... ((base) + (n - 1) * (size))
-
-DEFINE_SPINLOCK(dist_lock);
-
-/* The GICv2 interface numbering does not necessarily match the logical map */
-u8 gicv2_target_cpu_map[8] = { 0, 0, 0, 0, 0, 0, 0, 0 };
-
-/*
- * Most of the GIC distributor writes only reconfigure the IRQs corresponding to
- * the bits of the written value, by using separate `set' and `clear' registers.
- * Such registers can be handled by setting the `is_poke' boolean, which allows
- * to simply restrict the mmio->value with the cell configuration mask.
- * Others, such as the priority registers, will need to be read and written back
- * with a restricted value, by using the distributor lock.
- */
-static enum mmio_result
-restrict_bitmask_access(struct mmio_access *mmio, unsigned int reg_index,
- unsigned int bits_per_irq, bool is_poke)
-{
- struct cell *cell = this_cell();
- unsigned int irq;
- unsigned long access_mask = 0;
- /*
- * In order to avoid division, the number of bits per irq is limited
- * to powers of 2 for the moment.
- */
- unsigned long irqs_per_reg = 32 >> ffsl(bits_per_irq);
- unsigned long irq_bits = (1 << bits_per_irq) - 1;
- /* First, extract the first interrupt affected by this access */
- unsigned int first_irq = reg_index * irqs_per_reg;
-
- for (irq = 0; irq < irqs_per_reg; irq++)
- if (irqchip_irq_in_cell(cell, first_irq + irq))
- access_mask |= irq_bits << (irq * bits_per_irq);
-
- if (!mmio->is_write) {
- /* Restrict the read value */
- mmio_perform_access(gicd_base, mmio);
- mmio->value &= access_mask;
- return MMIO_HANDLED;
- }
-
- if (!is_poke) {
- /*
- * Modify the existing value of this register by first reading
- * it into mmio->value
- * Relies on a spinlock since we need two mmio accesses.
- */
- unsigned long access_val = mmio->value;
-
- spin_lock(&dist_lock);
-
- mmio->is_write = false;
- mmio_perform_access(gicd_base, mmio);
- mmio->is_write = true;
-
- mmio->value &= ~access_mask;
- mmio->value |= access_val & access_mask;
- mmio_perform_access(gicd_base, mmio);
-
- spin_unlock(&dist_lock);
- } else {
- mmio->value &= access_mask;
- mmio_perform_access(gicd_base, mmio);
- }
- return MMIO_HANDLED;
-}
-
-static enum mmio_result handle_sgir_access(struct mmio_access *mmio)
-{
- struct sgi sgi;
- unsigned long val = mmio->value;
-
- if (!mmio->is_write)
- return MMIO_HANDLED;
-
- sgi.targets = (val >> 16) & 0xff;
- sgi.routing_mode = (val >> 24) & 0x3;
- sgi.aff1 = 0;
- sgi.aff2 = 0;
- sgi.aff3 = 0;
- sgi.id = val & 0xf;
-
- gic_handle_sgir_write(&sgi, false);
- return MMIO_HANDLED;
-}
-
-void gic_handle_sgir_write(struct sgi *sgi, bool virt_input)
-{
- struct per_cpu *cpu_data = this_cpu_data();
- unsigned long targets = sgi->targets;
- unsigned int cpu;
-
- if (sgi->routing_mode == 2) {
- /* Route to the caller itself */
- irqchip_set_pending(cpu_data, sgi->id);
- sgi->targets = (1 << cpu_data->cpu_id);
- } else {
- sgi->targets = 0;
-
- for_each_cpu(cpu, cpu_data->cell->cpu_set) {
- if (sgi->routing_mode == 1) {
- /* Route to all (cell) CPUs but the caller. */
- if (cpu == cpu_data->cpu_id)
- continue;
- } else if (virt_input) {
- if (!test_bit(arm_cpu_phys2virt(cpu),
- &targets))
- continue;
- } else {
- /*
- * When using a cpu map to target the different
- * CPUs (GICv2), they are independent from the
- * physical CPU IDs, so there is no need to
- * translate them to the hypervisor's virtual
- * IDs.
- */
- if (!(targets & gicv2_target_cpu_map[cpu]))
- continue;
- }
-
- irqchip_set_pending(per_cpu(cpu), sgi->id);
- sgi->targets |= (1 << cpu);
- }
- }
-
- /* Let the other CPUS inject their SGIs */
- sgi->id = SGI_INJECT;
- irqchip_send_sgi(sgi);
-}
-
-enum mmio_result gic_handle_dist_access(void *arg, struct mmio_access *mmio)
-{
- unsigned long reg = mmio->address;
- enum mmio_result ret;
-
- switch (reg) {
- case REG_RANGE(GICD_IROUTER, 1024, 8):
- ret = gic_handle_irq_route(mmio, (reg - GICD_IROUTER) / 8);
- break;
-
- case REG_RANGE(GICD_ITARGETSR, 1024, 1):
- ret = irqchip.handle_irq_target(mmio, reg - GICD_ITARGETSR);
- break;
-
- case REG_RANGE(GICD_ICENABLER, 32, 4):
- case REG_RANGE(GICD_ISENABLER, 32, 4):
- case REG_RANGE(GICD_ICPENDR, 32, 4):
- case REG_RANGE(GICD_ISPENDR, 32, 4):
- case REG_RANGE(GICD_ICACTIVER, 32, 4):
- case REG_RANGE(GICD_ISACTIVER, 32, 4):
- ret = restrict_bitmask_access(mmio, (reg & 0x7f) / 4, 1, true);
- break;
-
- case REG_RANGE(GICD_IGROUPR, 32, 4):
- ret = restrict_bitmask_access(mmio, (reg & 0x7f) / 4, 1, false);
- break;
-
- case REG_RANGE(GICD_ICFGR, 64, 4):
- ret = restrict_bitmask_access(mmio, (reg & 0xff) / 4, 2, false);
- break;
-
- case REG_RANGE(GICD_IPRIORITYR, 255, 4):
- ret = restrict_bitmask_access(mmio, (reg & 0x3ff) / 4, 8,
- false);
- break;
-
- case GICD_SGIR:
- ret = handle_sgir_access(mmio);
- break;
-
- case GICD_CTLR:
- case GICD_TYPER:
- case GICD_IIDR:
- case REG_RANGE(GICD_PIDR0, 4, 4):
- case REG_RANGE(GICD_PIDR4, 4, 4):
- case REG_RANGE(GICD_CIDR0, 4, 4):
- /* Allow read access, ignore write */
- if (!mmio->is_write)
- mmio_perform_access(gicd_base, mmio);
- /* fall through */
- default:
- /* Ignore access. */
- ret = MMIO_HANDLED;
- }
-
- return ret;
-}
-
-void irqchip_handle_irq(struct per_cpu *cpu_data)
-{
- unsigned int count_event = 1;
- bool handled = false;
- u32 irq_id;
-
- while (1) {
- /* Read IAR1: set 'active' state */
- irq_id = gic_read_iar();
-
- if (irq_id == 0x3ff) /* Spurious IRQ */
- break;
-
- /* Handle IRQ */
- if (is_sgi(irq_id)) {
- arch_handle_sgi(cpu_data, irq_id, count_event);
- handled = true;
- } else {
- handled = arch_handle_phys_irq(cpu_data, irq_id,
- count_event);
- }
- count_event = 0;
-
- /*
- * Write EOIR1: drop priority, but stay active if handled is
- * false.
- * This allows to not be re-interrupted by a level-triggered
- * interrupt that needs handling in the guest (e.g. timer)
- */
- irqchip_eoi_irq(irq_id, handled);
- }
-}
-
-void gic_set_irq_pending(u16 irq_id)
-{
- mmio_write32(gicd_base + GICD_ISPENDR + (irq_id / 32) * 4,
- 1 << (irq_id % 32));
-}
diff --git a/hypervisor/arch/arm-common/include/asm/gic.h b/hypervisor/arch/arm-common/include/asm/gic.h
index d74ec9b..35f48dc 100644
--- a/hypervisor/arch/arm-common/include/asm/gic.h
+++ b/hypervisor/arch/arm-common/include/asm/gic.h
@@ -54,11 +54,8 @@ extern u8 gicv2_target_cpu_map[8];
extern void *gicd_base;
extern spinlock_t dist_lock;

-enum mmio_result gic_handle_dist_access(void *arg, struct mmio_access *mmio);
enum mmio_result gic_handle_irq_route(struct mmio_access *mmio,
unsigned int irq);
void gic_handle_sgir_write(struct sgi *sgi, bool virt_input);
-void gic_set_irq_pending(u16 irq_id);
-
#endif /* !__ASSEMBLY__ */
#endif /* !_JAILHOUSE_ASM_GIC_COMMON_H */
diff --git a/hypervisor/arch/arm-common/irqchip.c b/hypervisor/arch/arm-common/irqchip.c
index 794259d..d523797 100644
--- a/hypervisor/arch/arm-common/irqchip.c
+++ b/hypervisor/arch/arm-common/irqchip.c
@@ -21,7 +21,6 @@
#include <asm/control.h>
#include <asm/gic.h>
#include <asm/irqchip.h>
-#include <asm/setup.h>
#include <asm/sysregs.h>

/* AMBA's biosfood */
@@ -32,6 +31,14 @@
(counter) < (config)->num_irqchips; \
(chip)++, (counter)++)

+#define REG_RANGE(base, n, size) \
+ (base) ... ((base) + (n - 1) * (size))
+
+/* The GICv2 interface numbering does not necessarily match the logical map */
+u8 gicv2_target_cpu_map[8] = { 0, 0, 0, 0, 0, 0, 0, 0 };
+
+DEFINE_SPINLOCK(dist_lock);
+
void *gicd_base;

/*
@@ -40,6 +47,222 @@ void *gicd_base;
*/
static bool irqchip_is_init;

+/*
+ * Most of the GIC distributor writes only reconfigure the IRQs corresponding to
+ * the bits of the written value, by using separate `set' and `clear' registers.
+ * Such registers can be handled by setting the `is_poke' boolean, which allows
+ * to simply restrict the mmio->value with the cell configuration mask.
+ * Others, such as the priority registers, will need to be read and written back
+ * with a restricted value, by using the distributor lock.
+ */
+static enum mmio_result
+restrict_bitmask_access(struct mmio_access *mmio, unsigned int reg_index,
+ unsigned int bits_per_irq, bool is_poke)
+{
+ struct cell *cell = this_cell();
+ unsigned int irq;
+ unsigned long access_mask = 0;
+ /*
+ * In order to avoid division, the number of bits per irq is limited
+ * to powers of 2 for the moment.
+ */
+ unsigned long irqs_per_reg = 32 >> ffsl(bits_per_irq);
+ unsigned long irq_bits = (1 << bits_per_irq) - 1;
+ /* First, extract the first interrupt affected by this access */
+ unsigned int first_irq = reg_index * irqs_per_reg;
+
+ for (irq = 0; irq < irqs_per_reg; irq++)
+ if (irqchip_irq_in_cell(cell, first_irq + irq))
+ access_mask |= irq_bits << (irq * bits_per_irq);
+
+ if (!mmio->is_write) {
+ /* Restrict the read value */
+ mmio_perform_access(gicd_base, mmio);
+ mmio->value &= access_mask;
+ return MMIO_HANDLED;
+ }
+
+ if (!is_poke) {
+ /*
+ * Modify the existing value of this register by first reading
+ * it into mmio->value
+ * Relies on a spinlock since we need two mmio accesses.
+ */
+ unsigned long access_val = mmio->value;
+
+ spin_lock(&dist_lock);
+
+ mmio->is_write = false;
+ mmio_perform_access(gicd_base, mmio);
+ mmio->is_write = true;
+
+ mmio->value &= ~access_mask;
+ mmio->value |= access_val & access_mask;
+ mmio_perform_access(gicd_base, mmio);
+
+ spin_unlock(&dist_lock);
+ } else {
+ mmio->value &= access_mask;
+ mmio_perform_access(gicd_base, mmio);
+ }
+ return MMIO_HANDLED;
+}
+
+static enum mmio_result handle_sgir_access(struct mmio_access *mmio)
+{
+ struct sgi sgi;
+ unsigned long val = mmio->value;
+
+ if (!mmio->is_write)
+ return MMIO_HANDLED;
+
+ sgi.targets = (val >> 16) & 0xff;
+ sgi.routing_mode = (val >> 24) & 0x3;
+ sgi.aff1 = 0;
+ sgi.aff2 = 0;
+ sgi.aff3 = 0;
+ sgi.id = val & 0xf;
+
+ gic_handle_sgir_write(&sgi, false);
+ return MMIO_HANDLED;
+}
+
+void gic_handle_sgir_write(struct sgi *sgi, bool virt_input)
+{
+ struct per_cpu *cpu_data = this_cpu_data();
+ unsigned long targets = sgi->targets;
+ unsigned int cpu;
+
+ if (sgi->routing_mode == 2) {
+ /* Route to the caller itself */
+ irqchip_set_pending(cpu_data, sgi->id);
+ sgi->targets = (1 << cpu_data->cpu_id);
+ } else {
+ sgi->targets = 0;
+
+ for_each_cpu(cpu, cpu_data->cell->cpu_set) {
+ if (sgi->routing_mode == 1) {
+ /* Route to all (cell) CPUs but the caller. */
+ if (cpu == cpu_data->cpu_id)
+ continue;
+ } else if (virt_input) {
+ if (!test_bit(arm_cpu_phys2virt(cpu),
+ &targets))
+ continue;
+ } else {
+ /*
+ * When using a cpu map to target the different
+ * CPUs (GICv2), they are independent from the
+ * physical CPU IDs, so there is no need to
+ * translate them to the hypervisor's virtual
+ * IDs.
+ */
+ if (!(targets & gicv2_target_cpu_map[cpu]))
+ continue;
+ }
+
+ irqchip_set_pending(per_cpu(cpu), sgi->id);
+ sgi->targets |= (1 << cpu);
+ }
+ }
+
+ /* Let the other CPUS inject their SGIs */
+ sgi->id = SGI_INJECT;
+ irqchip_send_sgi(sgi);
+}
+
+static enum mmio_result gic_handle_dist_access(void *arg,
+ struct mmio_access *mmio)
+{
+ unsigned long reg = mmio->address;
+ enum mmio_result ret;
+
+ switch (reg) {
+ case REG_RANGE(GICD_IROUTER, 1024, 8):
+ ret = gic_handle_irq_route(mmio, (reg - GICD_IROUTER) / 8);
+ break;
+
+ case REG_RANGE(GICD_ITARGETSR, 1024, 1):
+ ret = irqchip.handle_irq_target(mmio, reg - GICD_ITARGETSR);
+ break;
+
+ case REG_RANGE(GICD_ICENABLER, 32, 4):
+ case REG_RANGE(GICD_ISENABLER, 32, 4):
+ case REG_RANGE(GICD_ICPENDR, 32, 4):
+ case REG_RANGE(GICD_ISPENDR, 32, 4):
+ case REG_RANGE(GICD_ICACTIVER, 32, 4):
+ case REG_RANGE(GICD_ISACTIVER, 32, 4):
+ ret = restrict_bitmask_access(mmio, (reg & 0x7f) / 4, 1, true);
+ break;
+
+ case REG_RANGE(GICD_IGROUPR, 32, 4):
+ ret = restrict_bitmask_access(mmio, (reg & 0x7f) / 4, 1, false);
+ break;
+
+ case REG_RANGE(GICD_ICFGR, 64, 4):
+ ret = restrict_bitmask_access(mmio, (reg & 0xff) / 4, 2, false);
+ break;
+
+ case REG_RANGE(GICD_IPRIORITYR, 255, 4):
+ ret = restrict_bitmask_access(mmio, (reg & 0x3ff) / 4, 8,
+ false);
+ break;
+
+ case GICD_SGIR:
+ ret = handle_sgir_access(mmio);
+ break;
+
+ case GICD_CTLR:
+ case GICD_TYPER:
+ case GICD_IIDR:
+ case REG_RANGE(GICD_PIDR0, 4, 4):
+ case REG_RANGE(GICD_PIDR4, 4, 4):
+ case REG_RANGE(GICD_CIDR0, 4, 4):
+ /* Allow read access, ignore write */
+ if (!mmio->is_write)
+ mmio_perform_access(gicd_base, mmio);
+ /* fall through */
+ default:
+ /* Ignore access. */
+ ret = MMIO_HANDLED;
+ }
+
+ return ret;
+}
+
+void irqchip_handle_irq(struct per_cpu *cpu_data)
+{
+ unsigned int count_event = 1;
+ bool handled = false;
+ u32 irq_id;
+
+ while (1) {
+ /* Read IAR1: set 'active' state */
+ irq_id = gic_read_iar();
+
+ if (irq_id == 0x3ff) /* Spurious IRQ */
+ break;
+
+ /* Handle IRQ */
+ if (is_sgi(irq_id)) {
+ arch_handle_sgi(cpu_data, irq_id, count_event);
+ handled = true;
+ } else {
+ handled = arch_handle_phys_irq(cpu_data, irq_id,
+ count_event);
+ }
+ count_event = 0;
+
+ /*
+ * Write EOIR1: drop priority, but stay active if handled is
+ * false.
+ * This allows to not be re-interrupted by a level-triggered
+ * interrupt that needs handling in the guest (e.g. timer)
+ */
+ irqchip_eoi_irq(irq_id, handled);
+ }
+}
+
bool irqchip_irq_in_cell(struct cell *cell, unsigned int irq_id)
{
if (irq_id >= sizeof(cell->arch.irq_bitmap) * 8)
@@ -60,7 +283,8 @@ void irqchip_set_pending(struct per_cpu *cpu_data, u16 irq_id)

if (!cpu_data) {
/* Injection via GICD */
- gic_set_irq_pending(irq_id);
+ mmio_write32(gicd_base + GICD_ISPENDR + (irq_id / 32) * 4,
+ 1 << (irq_id % 32));
return;
}

--
2.1.4

Ralf Ramsauer

unread,
Dec 2, 2016, 6:48:59 AM12/2/16
to Jan Kiszka, jailho...@googlegroups.com, Mark Rutland


On 12/02/2016 09:42 AM, Jan Kiszka wrote:
> This is a mandatory service with PSCI v0.2+, and if the root cell was
> using it prior to enabling Jailhouse, just returning an error, like we
> do so far, will send the CPUs into a busy loop.
>
> Implement the minimum of this service by sending the CPU into a wfi, but
> only if there are no interrupt waiting to be injected. We better check
> for physical interrupts after the wfi to reduce world switches and,
> thus, event delivery latencies.
>
> CC: Ralf Ramsauer <ra...@ramses-pyramidenbau.de>
> CC: Mark Rutland <mark.r...@arm.com>
> Signed-off-by: Jan Kiszka <jan.k...@siemens.com>
Tested-by: Ralf Ramsauer <ra...@ramses-pyramidenbau.de>
Ralf Ramsauer
PGP: 0x8F10049B
Reply all
Reply to author
Forward
0 new messages