[PATCH 2/8] arm-common: Prefix GIC functions with their target version

37 views
Skip to first unread message

Jan Kiszka

unread,
Oct 9, 2017, 3:44:38 AM10/9/17
to jailho...@googlegroups.com
From: Jan Kiszka <jan.k...@siemens.com>

Now that we link both GIC implementations into the same binary, this
prefixing helps telling the origin of symbols apart.

No functional changes.

Signed-off-by: Jan Kiszka <jan.k...@siemens.com>
---
hypervisor/arch/arm-common/gic-v2.c | 104 ++++++++++++++++++------------------
hypervisor/arch/arm-common/gic-v3.c | 82 ++++++++++++++--------------
2 files changed, 93 insertions(+), 93 deletions(-)

diff --git a/hypervisor/arch/arm-common/gic-v2.c b/hypervisor/arch/arm-common/gic-v2.c
index 1aec2a2f..eeed5305 100644
--- a/hypervisor/arch/arm-common/gic-v2.c
+++ b/hypervisor/arch/arm-common/gic-v2.c
@@ -24,18 +24,18 @@ static unsigned int gic_num_lr;
static void *gicc_base;
static void *gich_base;

-static u32 gic_read_lr(unsigned int i)
+static u32 gicv2_read_lr(unsigned int i)
{
return mmio_read32(gich_base + GICH_LR_BASE + i * 4);
}

-static void gic_write_lr(unsigned int i, u32 value)
+static void gicv2_write_lr(unsigned int i, u32 value)
{
mmio_write32(gich_base + GICH_LR_BASE + i * 4, value);
}

/* Check that the targeted interface belongs to the cell */
-static bool gic_targets_in_cell(struct cell *cell, u8 targets)
+static bool gicv2_targets_in_cell(struct cell *cell, u8 targets)
{
unsigned int cpu;

@@ -47,7 +47,7 @@ static bool gic_targets_in_cell(struct cell *cell, u8 targets)
return true;
}

-static int gic_init(void)
+static int gicv2_init(void)
{
/* Probe the GICD version */
if (GICD_PIDR2_ARCH(mmio_read32(gicd_base + GICDv2_PIDR2)) != 2)
@@ -66,23 +66,23 @@ static int gic_init(void)
return 0;
}

-static void gic_clear_pending_irqs(void)
+static void gicv2_clear_pending_irqs(void)
{
unsigned int n;

/* Clear list registers. */
for (n = 0; n < gic_num_lr; n++)
- gic_write_lr(n, 0);
+ gicv2_write_lr(n, 0);

/* Clear active priority bits. */
mmio_write32(gich_base + GICH_APR, 0);
}

-static void gic_cpu_reset(struct per_cpu *cpu_data)
+static void gicv2_cpu_reset(struct per_cpu *cpu_data)
{
unsigned int mnt_irq = system_config->platform_info.arm.maintenance_irq;

- gic_clear_pending_irqs();
+ gicv2_clear_pending_irqs();

/* Ensure all IPIs and the maintenance PPI are enabled */
mmio_write32(gicd_base + GICD_ISENABLER, 0x0000ffff | (1 << mnt_irq));
@@ -96,7 +96,7 @@ static void gic_cpu_reset(struct per_cpu *cpu_data)
mmio_write32(gich_base + GICH_VMCR, 0);
}

-static int gic_cpu_init(struct per_cpu *cpu_data)
+static int gicv2_cpu_init(struct per_cpu *cpu_data)
{
unsigned int mnt_irq = system_config->platform_info.arm.maintenance_irq;
u32 vtr, vmcr;
@@ -142,7 +142,7 @@ static int gic_cpu_init(struct per_cpu *cpu_data)
* use. Physically pending IRQs will be forwarded to Linux once we
* enable interrupts for the hypervisor.
*/
- gic_clear_pending_irqs();
+ gicv2_clear_pending_irqs();

cpu_data->gicc_initialized = true;

@@ -167,7 +167,7 @@ static int gic_cpu_init(struct per_cpu *cpu_data)
return 0;
}

-static void gic_cpu_shutdown(struct per_cpu *cpu_data)
+static void gicv2_cpu_shutdown(struct per_cpu *cpu_data)
{
u32 gich_vmcr = mmio_read32(gich_base + GICH_VMCR);
u32 gicc_ctlr = 0;
@@ -191,12 +191,12 @@ static void gic_cpu_shutdown(struct per_cpu *cpu_data)
(gich_vmcr >> GICH_VMCR_PMR_SHIFT) << GICV_PMR_SHIFT);
}

-static u32 gic_read_iar_irqn(void)
+static u32 gicv2_read_iar_irqn(void)
{
return mmio_read32(gicc_base + GICC_IAR) & 0x3ff;
}

-static void gic_eoi_irq(u32 irq_id, bool deactivate)
+static void gicv2_eoi_irq(u32 irq_id, bool deactivate)
{
/*
* The GIC doesn't seem to care about the CPUID value written to EOIR,
@@ -207,7 +207,7 @@ static void gic_eoi_irq(u32 irq_id, bool deactivate)
mmio_write32(gicc_base + GICC_DIR, irq_id);
}

-static int gic_cell_init(struct cell *cell)
+static int gicv2_cell_init(struct cell *cell)
{
/*
* Let the guest access the virtual CPU interface instead of the
@@ -227,20 +227,20 @@ static int gic_cell_init(struct cell *cell)
PAGING_COHERENT);
}

-static void gic_cell_exit(struct cell *cell)
+static void gicv2_cell_exit(struct cell *cell)
{
paging_destroy(&cell->arch.mm,
system_config->platform_info.arm.gicc_base, GICC_SIZE,
PAGING_COHERENT);
}

-static void gic_adjust_irq_target(struct cell *cell, u16 irq_id)
+static void gicv2_adjust_irq_target(struct cell *cell, u16 irq_id)
{
void *itargetsr = gicd_base + GICD_ITARGETSR + (irq_id & ~0x3);
u32 targets = mmio_read32(itargetsr);
unsigned int shift = (irq_id % 4) * 8;

- if (gic_targets_in_cell(cell, (u8)(targets >> shift)))
+ if (gicv2_targets_in_cell(cell, (u8)(targets >> shift)))
return;

targets &= ~(0xff << shift);
@@ -249,7 +249,7 @@ static void gic_adjust_irq_target(struct cell *cell, u16 irq_id)
mmio_write32(itargetsr, targets);
}

-static int gic_send_sgi(struct sgi *sgi)
+static int gicv2_send_sgi(struct sgi *sgi)
{
u32 val;

@@ -265,7 +265,7 @@ static int gic_send_sgi(struct sgi *sgi)
return 0;
}

-static int gic_inject_irq(struct per_cpu *cpu_data, u16 irq_id)
+static int gicv2_inject_irq(struct per_cpu *cpu_data, u16 irq_id)
{
int i;
int first_free = -1;
@@ -283,7 +283,7 @@ static int gic_inject_irq(struct per_cpu *cpu_data, u16 irq_id)
}

/* Check that there is no overlapping */
- lr = gic_read_lr(i);
+ lr = gicv2_read_lr(i);
if ((lr & GICH_LR_VIRT_ID_MASK) == irq_id)
return -EEXIST;
}
@@ -300,12 +300,12 @@ static int gic_inject_irq(struct per_cpu *cpu_data, u16 irq_id)
lr |= (u32)irq_id << GICH_LR_PHYS_ID_SHIFT;
}

- gic_write_lr(first_free, lr);
+ gicv2_write_lr(first_free, lr);

return 0;
}

-static void gic_enable_maint_irq(bool enable)
+static void gicv2_enable_maint_irq(bool enable)
{
u32 hcr;

@@ -317,19 +317,19 @@ static void gic_enable_maint_irq(bool enable)
mmio_write32(gich_base + GICH_HCR, hcr);
}

-static bool gic_has_pending_irqs(void)
+static bool gicv2_has_pending_irqs(void)
{
unsigned int n;

for (n = 0; n < gic_num_lr; n++)
- if (gic_read_lr(n) & GICH_LR_PENDING_BIT)
+ if (gicv2_read_lr(n) & GICH_LR_PENDING_BIT)
return true;

return false;
}

-static enum mmio_result gic_handle_irq_route(struct mmio_access *mmio,
- unsigned int irq)
+static enum mmio_result gicv2_handle_irq_route(struct mmio_access *mmio,
+ unsigned int irq)
{
/* doesn't exist in v2 - ignore access */
return MMIO_HANDLED;
@@ -338,8 +338,8 @@ static enum mmio_result gic_handle_irq_route(struct mmio_access *mmio,
/*
* GICv2 uses 8bit values for each IRQ in the ITARGETSR registers
*/
-static enum mmio_result gic_handle_irq_target(struct mmio_access *mmio,
- unsigned int irq)
+static enum mmio_result gicv2_handle_irq_target(struct mmio_access *mmio,
+ unsigned int irq)
{
/*
* ITARGETSR contain one byte per IRQ, so the first one affected by this
@@ -380,7 +380,7 @@ static enum mmio_result gic_handle_irq_target(struct mmio_access *mmio,

targets = (mmio->value >> (8 * n)) & 0xff;

- if (!gic_targets_in_cell(cell, targets)) {
+ if (!gicv2_targets_in_cell(cell, targets)) {
printk("Attempt to route IRQ%d outside of cell\n",
irq_base + n);
return MMIO_ERROR;
@@ -405,7 +405,7 @@ static enum mmio_result gic_handle_irq_target(struct mmio_access *mmio,
return MMIO_HANDLED;
}

-static enum mmio_result gic_handle_dist_access(struct mmio_access *mmio)
+static enum mmio_result gicv2_handle_dist_access(struct mmio_access *mmio)
{
unsigned long val = mmio->value;
struct sgi sgi;
@@ -439,37 +439,37 @@ static enum mmio_result gic_handle_dist_access(struct mmio_access *mmio)
}
}

-static int gic_get_cpu_target(unsigned int cpu_id)
+static int gicv2_get_cpu_target(unsigned int cpu_id)
{
return gicv2_target_cpu_map[cpu_id];
}

-static u64 gic_get_cluster_target(unsigned int cpu_id)
+static u64 gicv2_get_cluster_target(unsigned int cpu_id)
{
return 0;
}

const struct irqchip gicv2_irqchip = {
- .init = gic_init,
- .cpu_init = gic_cpu_init,
- .cpu_reset = gic_cpu_reset,
- .cpu_shutdown = gic_cpu_shutdown,
- .cell_init = gic_cell_init,
- .cell_exit = gic_cell_exit,
- .adjust_irq_target = gic_adjust_irq_target,
-
- .send_sgi = gic_send_sgi,
- .inject_irq = gic_inject_irq,
- .enable_maint_irq = gic_enable_maint_irq,
- .has_pending_irqs = gic_has_pending_irqs,
- .read_iar_irqn = gic_read_iar_irqn,
- .eoi_irq = gic_eoi_irq,
-
- .handle_irq_route = gic_handle_irq_route,
- .handle_irq_target = gic_handle_irq_target,
- .handle_dist_access = gic_handle_dist_access,
- .get_cpu_target = gic_get_cpu_target,
- .get_cluster_target = gic_get_cluster_target,
+ .init = gicv2_init,
+ .cpu_init = gicv2_cpu_init,
+ .cpu_reset = gicv2_cpu_reset,
+ .cpu_shutdown = gicv2_cpu_shutdown,
+ .cell_init = gicv2_cell_init,
+ .cell_exit = gicv2_cell_exit,
+ .adjust_irq_target = gicv2_adjust_irq_target,
+
+ .send_sgi = gicv2_send_sgi,
+ .inject_irq = gicv2_inject_irq,
+ .enable_maint_irq = gicv2_enable_maint_irq,
+ .has_pending_irqs = gicv2_has_pending_irqs,
+ .read_iar_irqn = gicv2_read_iar_irqn,
+ .eoi_irq = gicv2_eoi_irq,
+
+ .handle_irq_route = gicv2_handle_irq_route,
+ .handle_irq_target = gicv2_handle_irq_target,
+ .handle_dist_access = gicv2_handle_dist_access,
+ .get_cpu_target = gicv2_get_cpu_target,
+ .get_cluster_target = gicv2_get_cluster_target,

.gicd_size = 0x1000,
};
diff --git a/hypervisor/arch/arm-common/gic-v3.c b/hypervisor/arch/arm-common/gic-v3.c
index f45efbf5..d6f16aa3 100644
--- a/hypervisor/arch/arm-common/gic-v3.c
+++ b/hypervisor/arch/arm-common/gic-v3.c
@@ -36,7 +36,7 @@ static u32 gic_version;

static void *gicr_base;

-static u64 gic_read_lr(unsigned int reg)
+static u64 gicv3_read_lr(unsigned int reg)
{
u64 val;

@@ -78,7 +78,7 @@ static u64 gic_read_lr(unsigned int reg)
return val;
}

-static void gic_write_lr(unsigned int reg, u64 val)
+static void gicv3_write_lr(unsigned int reg, u64 val)
{
switch (reg) {
#define __WRITE_LR0_7(n) \
@@ -112,7 +112,7 @@ static void gic_write_lr(unsigned int reg, u64 val)
}
}

-static int gic_init(void)
+static int gicv3_init(void)
{
/* TODO: need to validate more? */
if (!(mmio_read32(gicd_base + GICD_CTLR) & GICD_CTLR_ARE_NS))
@@ -127,13 +127,13 @@ static int gic_init(void)
return 0;
}

-static void gic_clear_pending_irqs(void)
+static void gicv3_clear_pending_irqs(void)
{
unsigned int n;

/* Clear list registers. */
for (n = 0; n < gic_num_lr; n++)
- gic_write_lr(n, 0);
+ gicv3_write_lr(n, 0);

/* Clear active priority bits */
if (gic_num_priority_bits >= 5)
@@ -146,12 +146,12 @@ static void gic_clear_pending_irqs(void)
}
}

-static void gic_cpu_reset(struct per_cpu *cpu_data)
+static void gicv3_cpu_reset(struct per_cpu *cpu_data)
{
unsigned int mnt_irq = system_config->platform_info.arm.maintenance_irq;
void *gicr = cpu_data->gicr.base + GICR_SGI_BASE;

- gic_clear_pending_irqs();
+ gicv3_clear_pending_irqs();

/* Ensure all IPIs and the maintenance PPI are enabled. */
mmio_write32(gicr + GICR_ISENABLER, 0x0000ffff | (1 << mnt_irq));
@@ -165,7 +165,7 @@ static void gic_cpu_reset(struct per_cpu *cpu_data)
arm_write_sysreg(ICH_VMCR_EL2, 0);
}

-static int gic_cpu_init(struct per_cpu *cpu_data)
+static int gicv3_cpu_init(struct per_cpu *cpu_data)
{
unsigned int mnt_irq = system_config->platform_info.arm.maintenance_irq;
unsigned long redist_addr = system_config->platform_info.arm.gicr_base;
@@ -247,7 +247,7 @@ static int gic_cpu_init(struct per_cpu *cpu_data)
* use. Physically pending IRQs will be forwarded to Linux once we
* enable interrupts for the hypervisor.
*/
- gic_clear_pending_irqs();
+ gicv3_clear_pending_irqs();

ich_vmcr = (cell_icc_pmr & ICC_PMR_MASK) << ICH_VMCR_VPMR_SHIFT;
if (cell_icc_igrpen1 & ICC_IGRPEN1_EN)
@@ -262,7 +262,7 @@ static int gic_cpu_init(struct per_cpu *cpu_data)
return 0;
}

-static void gic_cpu_shutdown(struct per_cpu *cpu_data)
+static void gicv3_cpu_shutdown(struct per_cpu *cpu_data)
{
u32 ich_vmcr, icc_ctlr, cell_icc_igrpen1;

@@ -290,7 +290,7 @@ static void gic_cpu_shutdown(struct per_cpu *cpu_data)
}
}

-static void gic_adjust_irq_target(struct cell *cell, u16 irq_id)
+static void gicv3_adjust_irq_target(struct cell *cell, u16 irq_id)
{
void *irouter = gicd_base + GICD_IROUTER + 8 * irq_id;
u64 mpidr = per_cpu(first_cpu(cell->cpu_set))->mpidr;
@@ -301,8 +301,8 @@ static void gic_adjust_irq_target(struct cell *cell, u16 irq_id)
mmio_write64(irouter, mpidr);
}

-static enum mmio_result gic_handle_redist_access(void *arg,
- struct mmio_access *mmio)
+static enum mmio_result gicv3_handle_redist_access(void *arg,
+ struct mmio_access *mmio)
{
struct per_cpu *cpu_data = arg;

@@ -344,7 +344,7 @@ static enum mmio_result gic_handle_redist_access(void *arg,
return MMIO_HANDLED;
}

-static int gic_cell_init(struct cell *cell)
+static int gicv3_cell_init(struct cell *cell)
{
unsigned int cpu;

@@ -357,7 +357,7 @@ static int gic_cell_init(struct cell *cell)
continue;
mmio_region_register(cell, per_cpu(cpu)->gicr.phys_addr,
gic_version == 4 ? 0x40000 : 0x20000,
- gic_handle_redist_access, per_cpu(cpu));
+ gicv3_handle_redist_access, per_cpu(cpu));
}

return 0;
@@ -367,7 +367,7 @@ static int gic_cell_init(struct cell *cell)
(MPIDR_AFFINITY_LEVEL((cluster_id), (level)) \
<< ICC_SGIR_AFF## level ##_SHIFT)

-static int gic_send_sgi(struct sgi *sgi)
+static int gicv3_send_sgi(struct sgi *sgi)
{
u64 val;
u16 targets = sgi->targets;
@@ -428,8 +428,8 @@ bool gicv3_handle_sgir_write(u64 sgir)
/*
* GICv3 uses a 64bit register IROUTER for each IRQ
*/
-static enum mmio_result gic_handle_irq_route(struct mmio_access *mmio,
- unsigned int irq)
+static enum mmio_result gicv3_handle_irq_route(struct mmio_access *mmio,
+ unsigned int irq)
{
struct cell *cell = this_cell();
unsigned int cpu;
@@ -470,7 +470,7 @@ static enum mmio_result gic_handle_irq_route(struct mmio_access *mmio,
}
}

-static u32 gic_read_iar_irqn(void)
+static u32 gicv3_read_iar_irqn(void)
{
u32 iar;

@@ -478,14 +478,14 @@ static u32 gic_read_iar_irqn(void)
return iar & 0xffffff;
}

-static void gic_eoi_irq(u32 irq_id, bool deactivate)
+static void gicv3_eoi_irq(u32 irq_id, bool deactivate)
{
arm_write_sysreg(ICC_EOIR1_EL1, irq_id);
if (deactivate)
arm_write_sysreg(ICC_DIR_EL1, irq_id);
}

-static int gic_inject_irq(struct per_cpu *cpu_data, u16 irq_id)
+static int gicv3_inject_irq(struct per_cpu *cpu_data, u16 irq_id)
{
int i;
int free_lr = -1;
@@ -505,7 +505,7 @@ static int gic_inject_irq(struct per_cpu *cpu_data, u16 irq_id)
* Entry is in use, check that it doesn't match the one we want
* to inject.
*/
- lr = gic_read_lr(i);
+ lr = gicv3_read_lr(i);

/*
* A strict phys->virt id mapping is used for SPIs, so this test
@@ -528,7 +528,7 @@ static int gic_inject_irq(struct per_cpu *cpu_data, u16 irq_id)
lr |= (u64)irq_id << ICH_LR_PHYS_ID_SHIFT;
}

- gic_write_lr(free_lr, lr);
+ gicv3_write_lr(free_lr, lr);

return 0;
}
@@ -550,7 +550,7 @@ static bool gicv3_has_pending_irqs(void)
unsigned int n;

for (n = 0; n < gic_num_lr; n++)
- if (gic_read_lr(n) & ICH_LR_PENDING)
+ if (gicv3_read_lr(n) & ICH_LR_PENDING)
return true;

return false;
@@ -563,7 +563,7 @@ static enum mmio_result gicv3_handle_irq_target(struct mmio_access *mmio,
return MMIO_HANDLED;
}

-static enum mmio_result gic_handle_dist_access(struct mmio_access *mmio)
+static enum mmio_result gicv3_handle_dist_access(struct mmio_access *mmio)
{
switch (mmio->address) {
case GICD_CTLR:
@@ -582,34 +582,34 @@ static enum mmio_result gic_handle_dist_access(struct mmio_access *mmio)
}
}

-static int gic_get_cpu_target(unsigned int cpu_id)
+static int gicv3_get_cpu_target(unsigned int cpu_id)
{
return 1 << per_cpu(cpu_id)->mpidr & MPIDR_AFF0_MASK;
}

-static u64 gic_get_cluster_target(unsigned int cpu_id)
+static u64 gicv3_get_cluster_target(unsigned int cpu_id)
{
return per_cpu(cpu_id)->mpidr & MPIDR_CLUSTERID_MASK;
}

const struct irqchip gicv3_irqchip = {
- .init = gic_init,
- .cpu_init = gic_cpu_init,
- .cpu_reset = gic_cpu_reset,
- .cpu_shutdown = gic_cpu_shutdown,
- .cell_init = gic_cell_init,
- .adjust_irq_target = gic_adjust_irq_target,
- .send_sgi = gic_send_sgi,
- .inject_irq = gic_inject_irq,
+ .init = gicv3_init,
+ .cpu_init = gicv3_cpu_init,
+ .cpu_reset = gicv3_cpu_reset,
+ .cpu_shutdown = gicv3_cpu_shutdown,
+ .cell_init = gicv3_cell_init,
+ .adjust_irq_target = gicv3_adjust_irq_target,
+ .send_sgi = gicv3_send_sgi,
+ .inject_irq = gicv3_inject_irq,
.enable_maint_irq = gicv3_enable_maint_irq,
.has_pending_irqs = gicv3_has_pending_irqs,
- .read_iar_irqn = gic_read_iar_irqn,
- .eoi_irq = gic_eoi_irq,
- .handle_irq_route = gic_handle_irq_route,
+ .read_iar_irqn = gicv3_read_iar_irqn,
+ .eoi_irq = gicv3_eoi_irq,
+ .handle_irq_route = gicv3_handle_irq_route,
.handle_irq_target = gicv3_handle_irq_target,
- .handle_dist_access = gic_handle_dist_access,
- .get_cpu_target = gic_get_cpu_target,
- .get_cluster_target = gic_get_cluster_target,
+ .handle_dist_access = gicv3_handle_dist_access,
+ .get_cpu_target = gicv3_get_cpu_target,
+ .get_cluster_target = gicv3_get_cluster_target,

.gicd_size = 0x10000,
};
--
2.12.3

Jan Kiszka

unread,
Oct 9, 2017, 3:44:38 AM10/9/17
to jailho...@googlegroups.com, Ralf Ramsauer
This mostly deals with the case that Ralf pointed out: We lost interupts
during jailhouse disable, specifically when running this in a tight
loop. The reason is explained in the last patch.

This also contains a small change to our device trees which allow the
non-root inmates to detect that they are running over Jailhouse. Will be
useful, e.g., for switching the PCI scanner in Linux to per-function
scans (I'm preparing a cleaned up Linux queue).

Moreover, it pulls out the shared setup parts of arm/arm64 and moves
them into arm-common.

Finally, this fixes the header_check again.

Jan


CC: Ralf Ramsauer <ralf.r...@oth-regensburg.de>

Jan Kiszka (8):
configs: Add hypervisor node to inmate device trees
arm-common: Prefix GIC functions with their target version
core: Add barrier to shutdown procedure
arm-common: Let cpu_shutdown return an error if the GIC is
uninitialized
arm-common: Migrate pending interrupts on shutdown
arm-common: Factor out common setup parts
core: Add missing include to uart.h
scripts: Update header check

configs/dts/inmate-amd-seattle.dts | 4 +
configs/dts/inmate-bananapi.dts | 4 +
configs/dts/inmate-espressobin.dts | 4 +
configs/dts/inmate-foundation-v8.dts | 4 +
configs/dts/inmate-hikey.dts | 4 +
configs/dts/inmate-jetson-tk1.dts | 4 +
configs/dts/inmate-jetson-tx1.dts | 4 +
configs/dts/inmate-orangepi0.dts | 4 +
configs/dts/inmate-qemu-arm64.dts | 4 +
configs/dts/inmate-zynqmp-zcu102-2.dts | 4 +
configs/dts/inmate-zynqmp-zcu102.dts | 4 +
hypervisor/arch/arm-common/Kbuild | 2 +-
hypervisor/arch/arm-common/gic-v2.c | 148 +++++++++++++--------
hypervisor/arch/arm-common/gic-v3.c | 131 ++++++++++++------
hypervisor/arch/arm-common/include/asm/irqchip.h | 4 +-
.../arch/arm-common/include/asm/setup-common.h | 17 +++
hypervisor/arch/arm-common/irqchip.c | 33 ++++-
hypervisor/arch/arm-common/setup.c | 70 ++++++++++
hypervisor/arch/arm/include/asm/processor.h | 4 +
hypervisor/arch/arm/setup.c | 42 +-----
hypervisor/arch/arm64/include/asm/processor.h | 4 +
hypervisor/arch/arm64/setup.c | 44 +-----
hypervisor/control.c | 52 +++++---
hypervisor/include/jailhouse/uart.h | 2 +
scripts/header_check | 5 +-
25 files changed, 407 insertions(+), 195 deletions(-)
create mode 100644 hypervisor/arch/arm-common/include/asm/setup-common.h
create mode 100644 hypervisor/arch/arm-common/setup.c

--
2.12.3

Jan Kiszka

unread,
Oct 9, 2017, 3:44:38 AM10/9/17
to jailho...@googlegroups.com
From: Jan Kiszka <jan.k...@siemens.com>

Add a barrier that waits for all CPUs to enter the disable hypercall
prior to performing any shutdown step. This avoids races in case some
CPU rushes forward with disabling common hypervisor support while
another is still operational, possibly triggering code paths that
expects Jailhouse to be still working.

One such scenario can be triggered on ARM once we start migrating
pending interrupts from the hypervisor queue back to the physical
irqchip.

Signed-off-by: Jan Kiszka <jan.k...@siemens.com>
---
hypervisor/control.c | 52 +++++++++++++++++++++++++++++++++++++---------------
1 file changed, 37 insertions(+), 15 deletions(-)

diff --git a/hypervisor/control.c b/hypervisor/control.c
index 05a7d032..72d3c7eb 100644
--- a/hypervisor/control.c
+++ b/hypervisor/control.c
@@ -697,6 +697,8 @@ void shutdown(void)

static int hypervisor_disable(struct per_cpu *cpu_data)
{
+ static volatile unsigned int waiting_cpus;
+ static bool do_common_shutdown;
unsigned int this_cpu = cpu_data->cpu_id;
unsigned int cpu;
int state, ret;
@@ -726,35 +728,55 @@ static int hypervisor_disable(struct per_cpu *cpu_data)
* running in parallel before that CPU releases the root cell again via
* cell_resume. In that case, we will see the result of the change.
*
- * shutdown_lock is here just to coordinate between the root cell CPUs
- * who is evaluating num_cells and should start the shutdown depending
- * on its state.
+ * shutdown_lock is here to protect shutdown_state, waiting_cpus and
+ * do_common_shutdown.
*/
spin_lock(&shutdown_lock);

if (cpu_data->shutdown_state == SHUTDOWN_NONE) {
- if (num_cells == 1) {
- printk("Shutting down hypervisor\n");
- shutdown();
- state = SHUTDOWN_STARTED;
- } else {
- state = -EBUSY;
- }
-
+ state = num_cells == 1 ? SHUTDOWN_STARTED : -EBUSY;
for_each_cpu(cpu, root_cell.cpu_set)
per_cpu(cpu)->shutdown_state = state;
}

if (cpu_data->shutdown_state == SHUTDOWN_STARTED) {
- printk(" Releasing CPU %d\n", this_cpu);
+ do_common_shutdown = true;
+ waiting_cpus++;
ret = 0;
- } else
+ } else {
ret = cpu_data->shutdown_state;
- cpu_data->shutdown_state = SHUTDOWN_NONE;
+ cpu_data->shutdown_state = SHUTDOWN_NONE;
+ }

spin_unlock(&shutdown_lock);

- return ret;
+ if (ret < 0)
+ return ret;
+
+ /*
+ * The shutdown will change hardware behavior, and we have to avoid
+ * that one CPU already turns it to native mode while another makes use
+ * of it or runs into a hypervisor trap. This barrier prevents such
+ * scenarios.
+ */
+ while (waiting_cpus < hypervisor_header.online_cpus)
+ cpu_relax();
+
+ spin_lock(&shutdown_lock);
+
+ if (do_common_shutdown) {
+ /*
+ * The first CPU to get here changes common settings to native.
+ */
+ printk("Shutting down hypervisor\n");
+ shutdown();
+ do_common_shutdown = false;
+ }
+ printk(" Releasing CPU %d\n", this_cpu);
+
+ spin_unlock(&shutdown_lock);
+
+ return 0;
}

static long hypervisor_get_info(struct per_cpu *cpu_data, unsigned long type)
--
2.12.3

Jan Kiszka

unread,
Oct 9, 2017, 3:44:38 AM10/9/17
to jailho...@googlegroups.com
From: Jan Kiszka <jan.k...@siemens.com>

Enables the guest to identify Jailhouse on ARM platforms.

Signed-off-by: Jan Kiszka <jan.k...@siemens.com>
---
configs/dts/inmate-amd-seattle.dts | 4 ++++
configs/dts/inmate-bananapi.dts | 4 ++++
configs/dts/inmate-espressobin.dts | 4 ++++
configs/dts/inmate-foundation-v8.dts | 4 ++++
configs/dts/inmate-hikey.dts | 4 ++++
configs/dts/inmate-jetson-tk1.dts | 4 ++++
configs/dts/inmate-jetson-tx1.dts | 4 ++++
configs/dts/inmate-orangepi0.dts | 4 ++++
configs/dts/inmate-qemu-arm64.dts | 4 ++++
configs/dts/inmate-zynqmp-zcu102-2.dts | 4 ++++
configs/dts/inmate-zynqmp-zcu102.dts | 4 ++++
11 files changed, 44 insertions(+)

diff --git a/configs/dts/inmate-amd-seattle.dts b/configs/dts/inmate-amd-seattle.dts
index 62d29e13..0b24bdb5 100644
--- a/configs/dts/inmate-amd-seattle.dts
+++ b/configs/dts/inmate-amd-seattle.dts
@@ -25,6 +25,10 @@
bootargs = "earlycon";
};

+ hypervisor {
+ compatible = "jailhouse,cell";
+ };
+
cpus {
#address-cells = <2>;
#size-cells = <0>;
diff --git a/configs/dts/inmate-bananapi.dts b/configs/dts/inmate-bananapi.dts
index e81ae350..06b47989 100644
--- a/configs/dts/inmate-bananapi.dts
+++ b/configs/dts/inmate-bananapi.dts
@@ -25,6 +25,10 @@

interrupt-parent = <&gic>;

+ hypervisor {
+ compatible = "jailhouse,cell";
+ };
+
cpus {
#address-cells = <1>;
#size-cells = <0>;
diff --git a/configs/dts/inmate-espressobin.dts b/configs/dts/inmate-espressobin.dts
index e5f8f42c..ee013573 100644
--- a/configs/dts/inmate-espressobin.dts
+++ b/configs/dts/inmate-espressobin.dts
@@ -25,6 +25,10 @@

interrupt-parent = <&gic>;

+ hypervisor {
+ compatible = "jailhouse,cell";
+ };
+
cpus {
#address-cells = <2>;
#size-cells = <0>;
diff --git a/configs/dts/inmate-foundation-v8.dts b/configs/dts/inmate-foundation-v8.dts
index ed187637..342735db 100644
--- a/configs/dts/inmate-foundation-v8.dts
+++ b/configs/dts/inmate-foundation-v8.dts
@@ -34,6 +34,10 @@
serial0 = &serial0;
};

+ hypervisor {
+ compatible = "jailhouse,cell";
+ };
+
cpus {
#address-cells = <2>;
#size-cells = <0>;
diff --git a/configs/dts/inmate-hikey.dts b/configs/dts/inmate-hikey.dts
index bcd82f35..68b15c03 100644
--- a/configs/dts/inmate-hikey.dts
+++ b/configs/dts/inmate-hikey.dts
@@ -25,6 +25,10 @@

interrupt-parent = <&gic>;

+ hypervisor {
+ compatible = "jailhouse,cell";
+ };
+
cpus {
#address-cells = <2>;
#size-cells = <0>;
diff --git a/configs/dts/inmate-jetson-tk1.dts b/configs/dts/inmate-jetson-tk1.dts
index df60046d..bb86b3a1 100644
--- a/configs/dts/inmate-jetson-tk1.dts
+++ b/configs/dts/inmate-jetson-tk1.dts
@@ -25,6 +25,10 @@

interrupt-parent = <&gic>;

+ hypervisor {
+ compatible = "jailhouse,cell";
+ };
+
cpus {
#address-cells = <1>;
#size-cells = <0>;
diff --git a/configs/dts/inmate-jetson-tx1.dts b/configs/dts/inmate-jetson-tx1.dts
index 72511938..ea3afa72 100644
--- a/configs/dts/inmate-jetson-tx1.dts
+++ b/configs/dts/inmate-jetson-tx1.dts
@@ -25,6 +25,10 @@

interrupt-parent = <&gic>;

+ hypervisor {
+ compatible = "jailhouse,cell";
+ };
+
cpus {
#address-cells = <1>;
#size-cells = <0>;
diff --git a/configs/dts/inmate-orangepi0.dts b/configs/dts/inmate-orangepi0.dts
index 54ea4827..71bee078 100644
--- a/configs/dts/inmate-orangepi0.dts
+++ b/configs/dts/inmate-orangepi0.dts
@@ -25,6 +25,10 @@

interrupt-parent = <&gic>;

+ hypervisor {
+ compatible = "jailhouse,cell";
+ };
+
cpus {
#address-cells = <1>;
#size-cells = <0>;
diff --git a/configs/dts/inmate-qemu-arm64.dts b/configs/dts/inmate-qemu-arm64.dts
index 58aaff53..bc3293e7 100644
--- a/configs/dts/inmate-qemu-arm64.dts
+++ b/configs/dts/inmate-qemu-arm64.dts
@@ -25,6 +25,10 @@

interrupt-parent = <&gic>;

+ hypervisor {
+ compatible = "jailhouse,cell";
+ };
+
cpus {
#address-cells = <2>;
#size-cells = <0>;
diff --git a/configs/dts/inmate-zynqmp-zcu102-2.dts b/configs/dts/inmate-zynqmp-zcu102-2.dts
index 2f1df597..938a53dc 100644
--- a/configs/dts/inmate-zynqmp-zcu102-2.dts
+++ b/configs/dts/inmate-zynqmp-zcu102-2.dts
@@ -25,6 +25,10 @@

interrupt-parent = <&gic>;

+ hypervisor {
+ compatible = "jailhouse,cell";
+ };
+
cpus {
#address-cells = <2>;
#size-cells = <0>;
diff --git a/configs/dts/inmate-zynqmp-zcu102.dts b/configs/dts/inmate-zynqmp-zcu102.dts
index 96910c17..ed5b5bff 100644
--- a/configs/dts/inmate-zynqmp-zcu102.dts
+++ b/configs/dts/inmate-zynqmp-zcu102.dts
@@ -25,6 +25,10 @@

interrupt-parent = <&gic>;

+ hypervisor {
+ compatible = "jailhouse,cell";
+ };
+
cpus {
#address-cells = <2>;
#size-cells = <0>;
--
2.12.3

Jan Kiszka

unread,
Oct 9, 2017, 3:44:39 AM10/9/17
to jailho...@googlegroups.com
From: Jan Kiszka <jan.k...@siemens.com>

init_early, cpu_init and init_late contains a number of steps that are
common to both arm and arm64. Factor them out and share them.

Signed-off-by: Jan Kiszka <jan.k...@siemens.com>
---
hypervisor/arch/arm-common/Kbuild | 2 +-
.../arch/arm-common/include/asm/setup-common.h | 17 ++++++
hypervisor/arch/arm-common/setup.c | 70 ++++++++++++++++++++++
hypervisor/arch/arm/include/asm/processor.h | 4 ++
hypervisor/arch/arm/setup.c | 42 ++-----------
hypervisor/arch/arm64/include/asm/processor.h | 4 ++
hypervisor/arch/arm64/setup.c | 44 ++------------
7 files changed, 104 insertions(+), 79 deletions(-)
create mode 100644 hypervisor/arch/arm-common/include/asm/setup-common.h
create mode 100644 hypervisor/arch/arm-common/setup.c

diff --git a/hypervisor/arch/arm-common/Kbuild b/hypervisor/arch/arm-common/Kbuild
index c5620f3c..87e5e8a8 100644
--- a/hypervisor/arch/arm-common/Kbuild
+++ b/hypervisor/arch/arm-common/Kbuild
@@ -15,7 +15,7 @@
GCOV_PROFILE := n
ccflags-$(CONFIG_JAILHOUSE_GCOV) += -fprofile-arcs -ftest-coverage

-OBJS-y += dbg-write.o lib.o psci.o control.o paging.o mmu_cell.o
+OBJS-y += dbg-write.o lib.o psci.o control.o paging.o mmu_cell.o setup.o
OBJS-y += irqchip.o pci.o ivshmem.o uart-pl011.o uart-xuartps.o uart-mvebu.o
OBJS-y += gic-v2.o gic-v3.o

diff --git a/hypervisor/arch/arm-common/include/asm/setup-common.h b/hypervisor/arch/arm-common/include/asm/setup-common.h
new file mode 100644
index 00000000..933fdf67
--- /dev/null
+++ b/hypervisor/arch/arm-common/include/asm/setup-common.h
@@ -0,0 +1,17 @@
+/*
+ * Jailhouse, a Linux-based partitioning hypervisor
+ *
+ * Copyright (c) Siemens AG, 2017
+ *
+ * Authors:
+ * Jan Kiszka <jan.k...@siemens.com>
+ *
+ * This work is licensed under the terms of the GNU GPL, version 2. See
+ * the COPYING file in the top-level directory.
+ */
+
+#include <asm/percpu.h>
+
+int arm_init_early(void);
+int arm_cpu_init(struct per_cpu *cpu_data);
+int arm_init_late(void);
diff --git a/hypervisor/arch/arm-common/setup.c b/hypervisor/arch/arm-common/setup.c
new file mode 100644
index 00000000..16b8ea4d
--- /dev/null
+++ b/hypervisor/arch/arm-common/setup.c
@@ -0,0 +1,70 @@
+/*
+ * Jailhouse, a Linux-based partitioning hypervisor
+ *
+ * Copyright (c) Siemens AG, 2013-2017
+ *
+ * Authors:
+ * Jan Kiszka <jan.k...@siemens.com>
+ *
+ * This work is licensed under the terms of the GNU GPL, version 2. See
+ * the COPYING file in the top-level directory.
+ */
+
+#include <jailhouse/control.h>
+#include <jailhouse/paging.h>
+#include <jailhouse/processor.h>
+#include <asm/setup-common.h>
+
+static u32 __attribute__((aligned(PAGE_SIZE))) parking_code[PAGE_SIZE / 4] = {
+ ARM_PARKING_CODE
+};
+
+struct paging_structures parking_mm;
+
+int arm_init_early(void)
+{
+ int err;
+
+ parking_mm.root_paging = cell_paging;
+ parking_mm.root_table =
+ page_alloc_aligned(&mem_pool, ARM_CELL_ROOT_PT_SZ);
+ if (!parking_mm.root_table)
+ return -ENOMEM;
+
+ err = paging_create(&parking_mm, paging_hvirt2phys(parking_code),
+ PAGE_SIZE, 0,
+ (PTE_FLAG_VALID | PTE_ACCESS_FLAG |
+ S2_PTE_ACCESS_RO | S2_PTE_FLAG_NORMAL),
+ PAGING_COHERENT);
+ if (err)
+ return err;
+
+ return arm_paging_cell_init(&root_cell);
+}
+
+int arm_cpu_init(struct per_cpu *cpu_data)
+{
+ int err;
+
+ cpu_data->mpidr = phys_processor_id();
+
+ arm_paging_vcpu_init(&root_cell.arch.mm);
+
+ err = irqchip_init();
+ if (err)
+ return err;
+
+ return irqchip_cpu_init(cpu_data);
+}
+
+int arm_init_late(void)
+{
+ int err;
+
+ /* Setup the SPI bitmap */
+ err = irqchip_cell_init(&root_cell);
+ if (err)
+ return err;
+
+ return map_root_memory_regions();
+}
diff --git a/hypervisor/arch/arm/include/asm/processor.h b/hypervisor/arch/arm/include/asm/processor.h
index 2efac288..63f4c7f3 100644
--- a/hypervisor/arch/arm/include/asm/processor.h
+++ b/hypervisor/arch/arm/include/asm/processor.h
@@ -35,6 +35,10 @@ struct registers {
unsigned long usr[NUM_USR_REGS];
};

+#define ARM_PARKING_CODE \
+ 0xe320f003, /* 1: wfi */ \
+ 0xeafffffd, /* b 1b */
+
#define dmb(domain) asm volatile("dmb " #domain ::: "memory")
#define dsb(domain) asm volatile("dsb " #domain ::: "memory")
#define isb() asm volatile("isb")
diff --git a/hypervisor/arch/arm/setup.c b/hypervisor/arch/arm/setup.c
index 0aedbf9e..ec260a11 100644
--- a/hypervisor/arch/arm/setup.c
+++ b/hypervisor/arch/arm/setup.c
@@ -17,15 +17,10 @@
#include <asm/control.h>
#include <asm/mach.h>
#include <asm/setup.h>
+#include <asm/setup-common.h>
#include <asm/sysregs.h>

-static u32 __attribute__((aligned(PAGE_SIZE))) parking_code[PAGE_SIZE / 4] = {
- 0xe320f003, /* 1: wfi */
- 0xeafffffd, /* b 1b */
-};
-
unsigned int cache_line_size;
-struct paging_structures parking_mm;

static int arch_check_features(void)
{
@@ -52,29 +47,13 @@ int arch_init_early(void)
if (err)
return err;

- parking_mm.root_paging = cell_paging;
- parking_mm.root_table =
- page_alloc_aligned(&mem_pool, ARM_CELL_ROOT_PT_SZ);
- if (!parking_mm.root_table)
- return -ENOMEM;
-
- err = paging_create(&parking_mm, paging_hvirt2phys(parking_code),
- PAGE_SIZE, 0,
- (PTE_FLAG_VALID | PTE_ACCESS_FLAG |
- S2_PTE_ACCESS_RO | S2_PTE_FLAG_NORMAL),
- PAGING_COHERENT);
- if (err)
- return err;
-
- return arm_paging_cell_init(&root_cell);
+ return arm_init_early();
}

int arch_cpu_init(struct per_cpu *cpu_data)
{
int err;

- cpu_data->mpidr = phys_processor_id();
-
/*
* Copy the registers to restore from the linux stack here, because we
* won't be able to access it later
@@ -97,31 +76,18 @@ int arch_cpu_init(struct per_cpu *cpu_data)
arm_write_sysreg(HCR, HCR_VM_BIT | HCR_IMO_BIT | HCR_FMO_BIT |
HCR_TSC_BIT | HCR_TAC_BIT | HCR_TSW_BIT);

- arm_paging_vcpu_init(&root_cell.arch.mm);
-
- err = irqchip_init();
- if (err)
- return err;
-
- err = irqchip_cpu_init(cpu_data);
-
- return err;
+ return arm_cpu_init(cpu_data);
}

int arch_init_late(void)
{
int err;

- /* Setup the SPI bitmap */
- err = irqchip_cell_init(&root_cell);
- if (err)
- return err;
-
err = mach_init();
if (err)
return err;

- return map_root_memory_regions();
+ return arm_init_late();
}

void __attribute__((noreturn)) arch_cpu_activate_vmm(struct per_cpu *cpu_data)
diff --git a/hypervisor/arch/arm64/include/asm/processor.h b/hypervisor/arch/arm64/include/asm/processor.h
index 00b8601b..1af6d1d4 100644
--- a/hypervisor/arch/arm64/include/asm/processor.h
+++ b/hypervisor/arch/arm64/include/asm/processor.h
@@ -29,6 +29,10 @@ struct registers {
unsigned long usr[NUM_USR_REGS];
};

+#define ARM_PARKING_CODE \
+ 0xd503207f, /* 1: wfi */ \
+ 0x17ffffff, /* b 1b */
+
#define dmb(domain) asm volatile("dmb " #domain "\n" : : : "memory")
#define dsb(domain) asm volatile("dsb " #domain "\n" : : : "memory")
#define isb() asm volatile("isb\n")
diff --git a/hypervisor/arch/arm64/setup.c b/hypervisor/arch/arm64/setup.c
index 4f547cd4..e46f5bee 100644
--- a/hypervisor/arch/arm64/setup.c
+++ b/hypervisor/arch/arm64/setup.c
@@ -18,16 +18,10 @@
#include <asm/control.h>
#include <asm/irqchip.h>
#include <asm/setup.h>
+#include <asm/setup-common.h>

extern u8 __trampoline_start[];

-static u32 __attribute__((aligned(PAGE_SIZE))) parking_code[PAGE_SIZE / 4] = {
- 0xd503207f, /* 1: wfi */
- 0x17ffffff, /* b 1b */
-};
-
-struct paging_structures parking_mm;
-
int arch_init_early(void)
{
unsigned long trampoline_page = paging_hvirt2phys(&__trampoline_start);
@@ -45,56 +39,26 @@ int arch_init_early(void)
if (err)
return err;

- parking_mm.root_paging = cell_paging;
- parking_mm.root_table =
- page_alloc_aligned(&mem_pool, ARM_CELL_ROOT_PT_SZ);
- if (!parking_mm.root_table)
- return -ENOMEM;
-
- err = paging_create(&parking_mm, paging_hvirt2phys(parking_code),
- PAGE_SIZE, 0,
- (PTE_FLAG_VALID | PTE_ACCESS_FLAG |
- S2_PTE_ACCESS_RO | S2_PTE_FLAG_NORMAL),
- PAGING_COHERENT);
- if (err)
- return err;
-
- return arm_paging_cell_init(&root_cell);
+ return arm_init_early();
}

int arch_cpu_init(struct per_cpu *cpu_data)
{
unsigned long hcr = HCR_VM_BIT | HCR_IMO_BIT | HCR_FMO_BIT
| HCR_TSC_BIT | HCR_TAC_BIT | HCR_RW_BIT;
- int err;

/* switch to the permanent page tables */
enable_mmu_el2(paging_hvirt2phys(hv_paging_structs.root_table));

- cpu_data->mpidr = phys_processor_id();
-
/* Setup guest traps */
arm_write_sysreg(HCR_EL2, hcr);

- arm_paging_vcpu_init(&root_cell.arch.mm);
-
- err = irqchip_init();
- if (err)
- return err;
-
- return irqchip_cpu_init(cpu_data);
+ return arm_cpu_init(cpu_data);
}

int arch_init_late(void)
{
- int err;
-
- /* Setup the SPI bitmap */
- err = irqchip_cell_init(&root_cell);
- if (err)
- return err;
-
- return map_root_memory_regions();
+ return arm_init_late();
}

void __attribute__((noreturn)) arch_cpu_activate_vmm(struct per_cpu *cpu_data)
--
2.12.3

Jan Kiszka

unread,
Oct 9, 2017, 3:44:39 AM10/9/17
to jailho...@googlegroups.com
From: Jan Kiszka <jan.k...@siemens.com>

Fixes stand-alone build (header_check).

Signed-off-by: Jan Kiszka <jan.k...@siemens.com>
---
hypervisor/include/jailhouse/uart.h | 2 ++
1 file changed, 2 insertions(+)

diff --git a/hypervisor/include/jailhouse/uart.h b/hypervisor/include/jailhouse/uart.h
index 7d9a7c32..0190e679 100644
--- a/hypervisor/include/jailhouse/uart.h
+++ b/hypervisor/include/jailhouse/uart.h
@@ -10,6 +10,8 @@
* the COPYING file in the top-level directory.
*/

+#include <jailhouse/types.h>
+
struct uart_chip {
/* must be set by the caller */
void *virt_base;
--
2.12.3

Jan Kiszka

unread,
Oct 9, 2017, 3:44:39 AM10/9/17
to jailho...@googlegroups.com
From: Jan Kiszka <jan.k...@siemens.com>

When we have interrupts pending for virtual injection, we need to
migrate them into the physical queue prior to disabling Jailhouse.
Failing to do so means loosing interrupts, most often the timer IRQ.
This error could be triggered by running jailhouse enable/disable in a
tight loop.

Signed-off-by: Jan Kiszka <jan.k...@siemens.com>
---
hypervisor/arch/arm-common/gic-v2.c | 40 +++++++++++++++++++++
hypervisor/arch/arm-common/gic-v3.c | 45 ++++++++++++++++++++++++
hypervisor/arch/arm-common/include/asm/irqchip.h | 2 ++
hypervisor/arch/arm-common/irqchip.c | 33 ++++++++++++++---
4 files changed, 116 insertions(+), 4 deletions(-)

diff --git a/hypervisor/arch/arm-common/gic-v2.c b/hypervisor/arch/arm-common/gic-v2.c
index 9ad6b9b8..3b9f7d1f 100644
--- a/hypervisor/arch/arm-common/gic-v2.c
+++ b/hypervisor/arch/arm-common/gic-v2.c
@@ -330,6 +330,43 @@ static bool gicv2_has_pending_irqs(void)
return false;
}

+static int gicv2_get_pending_irq(void)
+{
+ unsigned int n;
+ u64 lr;
+
+ for (n = 0; n < gic_num_lr; n++) {
+ lr = gicv2_read_lr(n);
+ if (lr & GICH_LR_PENDING_BIT) {
+ gicv2_write_lr(n, 0);
+ return lr & GICH_LR_VIRT_ID_MASK;
+ }
+ }
+
+ return -ENOENT;
+}
+
+static void gicv2_inject_phys_irq(u16 irq_id)
+{
+ unsigned int offset = (irq_id / 32) * 4;
+ unsigned int mask = 1 << (irq_id % 32);
+
+ if (is_sgi(irq_id)) {
+ /* Inject with CPU 0 as source - we don't track the origin. */
+ mmio_write8(gicd_base + GICD_SPENDSGIR + irq_id, 1);
+ } else {
+ /*
+ * Hardware interrupts are physically active until they are
+ * processed by the cell. Deactivate them first so that we can
+ * reinject.
+ */
+ mmio_write32(gicd_base + GICD_ICACTIVER + offset, mask);
+
+ /* inject via GICD */
+ mmio_write32(gicd_base + GICD_ISPENDR + offset, mask);
+ }
+}
+
static enum mmio_result gicv2_handle_irq_route(struct mmio_access *mmio,
unsigned int irq)
{
@@ -467,6 +504,9 @@ const struct irqchip gicv2_irqchip = {
.read_iar_irqn = gicv2_read_iar_irqn,
.eoi_irq = gicv2_eoi_irq,

+ .get_pending_irq = gicv2_get_pending_irq,
+ .inject_phys_irq = gicv2_inject_phys_irq,
+
.handle_irq_route = gicv2_handle_irq_route,
.handle_irq_target = gicv2_handle_irq_target,
.handle_dist_access = gicv2_handle_dist_access,
diff --git a/hypervisor/arch/arm-common/gic-v3.c b/hypervisor/arch/arm-common/gic-v3.c
index e06b7da6..2cc22be6 100644
--- a/hypervisor/arch/arm-common/gic-v3.c
+++ b/hypervisor/arch/arm-common/gic-v3.c
@@ -558,6 +558,49 @@ static bool gicv3_has_pending_irqs(void)
return false;
}

+static int gicv3_get_pending_irq(void)
+{
+ unsigned int n;
+ u64 lr;
+
+ for (n = 0; n < gic_num_lr; n++) {
+ lr = gicv3_read_lr(n);
+ if (lr & ICH_LR_PENDING) {
+ gicv3_write_lr(n, 0);
+ return (u32)lr;
+ }
+ }
+
+ return -ENOENT;
+}
+
+static void gicv3_inject_phys_irq(u16 irq_id)
+{
+ void *gicr = this_cpu_data()->gicr.base + GICR_SGI_BASE;
+ unsigned int offset = (irq_id / 32) * 4;
+ unsigned int mask = 1 << (irq_id % 32);
+
+ if (!is_spi(irq_id)) {
+ /*
+ * Hardware interrupts are physically active until they are
+ * processed by the cell. Deactivate them first so that we can
+ * reinject.
+ * For simplicity reasons, we also issue deactivation for SGIs
+ * although they don't need this.
+ */
+ mmio_write32(gicr + GICR_ICACTIVER, mask);
+
+ /* inject via GICR */
+ mmio_write32(gicr + GICR_ISPENDR, mask);
+ } else {
+ /* see above */
+ mmio_write32(gicd_base + GICD_ICACTIVER + offset, mask);
+
+ /* injet via GICD */
+ mmio_write32(gicd_base + GICD_ISPENDR + offset, mask);
+ }
+}
+
static enum mmio_result gicv3_handle_irq_target(struct mmio_access *mmio,
unsigned int irq)
{
@@ -605,6 +648,8 @@ const struct irqchip gicv3_irqchip = {
.inject_irq = gicv3_inject_irq,
.enable_maint_irq = gicv3_enable_maint_irq,
.has_pending_irqs = gicv3_has_pending_irqs,
+ .get_pending_irq = gicv3_get_pending_irq,
+ .inject_phys_irq = gicv3_inject_phys_irq,
.read_iar_irqn = gicv3_read_iar_irqn,
.eoi_irq = gicv3_eoi_irq,
.handle_irq_route = gicv3_handle_irq_route,
diff --git a/hypervisor/arch/arm-common/include/asm/irqchip.h b/hypervisor/arch/arm-common/include/asm/irqchip.h
index 3d15c934..e336628f 100644
--- a/hypervisor/arch/arm-common/include/asm/irqchip.h
+++ b/hypervisor/arch/arm-common/include/asm/irqchip.h
@@ -51,6 +51,8 @@ struct irqchip {
int (*inject_irq)(struct per_cpu *cpu_data, u16 irq_id);
void (*enable_maint_irq)(bool enable);
bool (*has_pending_irqs)(void);
+ int (*get_pending_irq)(void);
+ void (*inject_phys_irq)(u16 irq_id);

int (*get_cpu_target)(unsigned int cpu_id);
u64 (*get_cluster_target)(unsigned int cpu_id);
diff --git a/hypervisor/arch/arm-common/irqchip.c b/hypervisor/arch/arm-common/irqchip.c
index f78f4511..90fb23c0 100644
--- a/hypervisor/arch/arm-common/irqchip.c
+++ b/hypervisor/arch/arm-common/irqchip.c
@@ -326,12 +326,37 @@ void irqchip_cpu_reset(struct per_cpu *cpu_data)

void irqchip_cpu_shutdown(struct per_cpu *cpu_data)
{
+ int irq_id;
+
/*
- * The GIC backend must take care of only resetting the hyp interface if
- * it has been initialised: this function may be executed during the
- * setup phase.
+ * The GIC implementation must take care of only resetting the hyp
+ * interface if it has been initialized because this function may be
+ * executed during the setup phase. It returns an error if the
+ * initialization do not take place yet.
*/
- irqchip.cpu_shutdown(cpu_data);
+ if (irqchip.cpu_shutdown(cpu_data) < 0)
+ return;
+
+ /*
+ * Migrate interrupts queued in the GICV.
+ * No locking required at this stage because no other CPU is able to
+ * inject anymore.
+ */
+ do {
+ irq_id = irqchip.get_pending_irq();
+ if (irq_id >= 0)
+ irqchip.inject_phys_irq(irq_id);
+ } while (irq_id >= 0);
+
+ /* Migrate interrupts queued in software. */
+ while (cpu_data->pending_irqs_head != cpu_data->pending_irqs_tail) {
+ irq_id = cpu_data->pending_irqs[cpu_data->pending_irqs_head];
+
+ irqchip.inject_phys_irq(irq_id);
+
+ cpu_data->pending_irqs_head =
+ (cpu_data->pending_irqs_head + 1) % MAX_PENDING_IRQS;
+ }
}

int irqchip_cell_init(struct cell *cell)
--
2.12.3

Jan Kiszka

unread,
Oct 9, 2017, 3:44:39 AM10/9/17
to jailho...@googlegroups.com
From: Jan Kiszka <jan.k...@siemens.com>

This allows the caller to decide if further deinitializations are
required.

Signed-off-by: Jan Kiszka <jan.k...@siemens.com>
---
hypervisor/arch/arm-common/gic-v2.c | 6 ++++--
hypervisor/arch/arm-common/gic-v3.c | 6 ++++--
hypervisor/arch/arm-common/include/asm/irqchip.h | 2 +-
3 files changed, 9 insertions(+), 5 deletions(-)

diff --git a/hypervisor/arch/arm-common/gic-v2.c b/hypervisor/arch/arm-common/gic-v2.c
index eeed5305..9ad6b9b8 100644
--- a/hypervisor/arch/arm-common/gic-v2.c
+++ b/hypervisor/arch/arm-common/gic-v2.c
@@ -167,13 +167,13 @@ static int gicv2_cpu_init(struct per_cpu *cpu_data)
return 0;
}

-static void gicv2_cpu_shutdown(struct per_cpu *cpu_data)
+static int gicv2_cpu_shutdown(struct per_cpu *cpu_data)
{
u32 gich_vmcr = mmio_read32(gich_base + GICH_VMCR);
u32 gicc_ctlr = 0;

if (!cpu_data->gicc_initialized)
- return;
+ return -ENODEV;

mmio_write32(gich_base + GICH_HCR, 0);

@@ -189,6 +189,8 @@ static void gicv2_cpu_shutdown(struct per_cpu *cpu_data)
mmio_write32(gicc_base + GICC_CTLR, gicc_ctlr);
mmio_write32(gicc_base + GICC_PMR,
(gich_vmcr >> GICH_VMCR_PMR_SHIFT) << GICV_PMR_SHIFT);
+
+ return 0;
}

static u32 gicv2_read_iar_irqn(void)
diff --git a/hypervisor/arch/arm-common/gic-v3.c b/hypervisor/arch/arm-common/gic-v3.c
index d6f16aa3..e06b7da6 100644
--- a/hypervisor/arch/arm-common/gic-v3.c
+++ b/hypervisor/arch/arm-common/gic-v3.c
@@ -262,12 +262,12 @@ static int gicv3_cpu_init(struct per_cpu *cpu_data)
return 0;
}

-static void gicv3_cpu_shutdown(struct per_cpu *cpu_data)
+static int gicv3_cpu_shutdown(struct per_cpu *cpu_data)
{
u32 ich_vmcr, icc_ctlr, cell_icc_igrpen1;

if (!cpu_data->gicr.base)
- return;
+ return -ENODEV;

arm_write_sysreg(ICH_HCR_EL2, 0);

@@ -288,6 +288,8 @@ static void gicv3_cpu_shutdown(struct per_cpu *cpu_data)
cell_icc_igrpen1 &= ~ICC_IGRPEN1_EN;
arm_write_sysreg(ICC_IGRPEN1_EL1, cell_icc_igrpen1);
}
+
+ return 0;
}

static void gicv3_adjust_irq_target(struct cell *cell, u16 irq_id)
diff --git a/hypervisor/arch/arm-common/include/asm/irqchip.h b/hypervisor/arch/arm-common/include/asm/irqchip.h
index 7cf5d704..3d15c934 100644
--- a/hypervisor/arch/arm-common/include/asm/irqchip.h
+++ b/hypervisor/arch/arm-common/include/asm/irqchip.h
@@ -40,7 +40,7 @@ struct irqchip {
int (*init)(void);
int (*cpu_init)(struct per_cpu *cpu_data);
void (*cpu_reset)(struct per_cpu *cpu_data);
- void (*cpu_shutdown)(struct per_cpu *cpu_data);
+ int (*cpu_shutdown)(struct per_cpu *cpu_data);
int (*cell_init)(struct cell *cell);
void (*cell_exit)(struct cell *cell);
void (*adjust_irq_target)(struct cell *cell, u16 irq_id);
--
2.12.3

Jan Kiszka

unread,
Oct 9, 2017, 3:44:41 AM10/9/17
to jailho...@googlegroups.com
From: Jan Kiszka <jan.k...@siemens.com>

jailhouse_hypercall.h is now only pulled by arm64 directly. Adjust the
test accordingly so that it does not report false positives.

Signed-off-by: Jan Kiszka <jan.k...@siemens.com>
---
scripts/header_check | 5 ++++-
1 file changed, 4 insertions(+), 1 deletion(-)

diff --git a/scripts/header_check b/scripts/header_check
index 76d637b0..d1475a95 100755
--- a/scripts/header_check
+++ b/scripts/header_check
@@ -34,7 +34,10 @@ test_compile()
prepend="#include <jailhouse/types.h>"
;;
jailhouse_hypercall.h)
- # only included directly for linker script
+ # only included directly on arm64
+ if [ "$ARCH" != "arm64" ]; then
+ return;
+ fi
prepend="#define __ASSEMBLY__
#include <jailhouse/types.h>"
;;
--
2.12.3

Ralf Ramsauer

unread,
Oct 23, 2017, 7:11:47 PM10/23/17
to Jan Kiszka, jailho...@googlegroups.com
Hi.

On 10/09/2017 09:44 AM, Jan Kiszka wrote:
> This mostly deals with the case that Ralf pointed out: We lost interupts
> during jailhouse disable, specifically when running this in a tight
> loop. The reason is explained in the last patch.
>
> This also contains a small change to our device trees which allow the
> non-root inmates to detect that they are running over Jailhouse. Will be
> useful, e.g., for switching the PCI scanner in Linux to per-function
> scans (I'm preparing a cleaned up Linux queue).
>
> Moreover, it pulls out the shared setup parts of arm/arm64 and moves
> them into arm-common.
>
> Finally, this fixes the header_check again.
>
> Jan
>
>
> CC: Ralf Ramsauer <ralf.r...@oth-regensburg.de>
For the series, and on Qemu arm64 softmmu target,

Tested-by: Ralf Ramsauer <ralf.r...@oth-regensburg.de>

Ralf

Lokesh Vutla

unread,
Oct 24, 2017, 12:13:25 AM10/24/17
to Jan Kiszka, jailho...@googlegroups.com


On Monday 09 October 2017 01:14 PM, Jan Kiszka wrote:
> From: Jan Kiszka <jan.k...@siemens.com>
>
> When we have interrupts pending for virtual injection, we need to
> migrate them into the physical queue prior to disabling Jailhouse.
> Failing to do so means loosing interrupts, most often the timer IRQ.
> This error could be triggered by running jailhouse enable/disable in a
> tight loop.

Good point. Why can't we have a single function to do that. I mean use

irqchip.inject_pending_phys_irq()

instead of

do {
irq_id = irqchip.get_pending_irq();
if (irq_id >= 0)
irqchip.inject_phys_irq(irq_id);
} while (irq_id >= 0);

Thanks and regards,
Lokesh

Jan Kiszka

unread,
Oct 24, 2017, 5:14:45 PM10/24/17
to Lokesh Vutla, jailho...@googlegroups.com
On 2017-10-24 06:08, 'Lokesh Vutla' via Jailhouse wrote:
>
>
> On Monday 09 October 2017 01:14 PM, Jan Kiszka wrote:
>> From: Jan Kiszka <jan.k...@siemens.com>
>>
>> When we have interrupts pending for virtual injection, we need to
>> migrate them into the physical queue prior to disabling Jailhouse.
>> Failing to do so means loosing interrupts, most often the timer IRQ.
>> This error could be triggered by running jailhouse enable/disable in a
>> tight loop.
>
> Good point. Why can't we have a single function to do that. I mean use
>
> irqchip.inject_pending_phys_irq()
>
> instead of
>
> do {
> irq_id = irqchip.get_pending_irq();
> if (irq_id >= 0)
> irqchip.inject_phys_irq(irq_id);
> } while (irq_id >= 0);

While this is the only use case for get_pending_irq, there is another
case for inject_phys_irq. Already that is a good reason to split this
into two callbacks.

Jan

--
Siemens AG, Corporate Technology, CT RDA ITP SES-DE
Corporate Competence Center Embedded Linux
Reply all
Reply to author
Forward
0 new messages