[PATCH 00/13] ARM-related fixes and cleanups

27 views
Skip to first unread message

Jan Kiszka

unread,
Jul 14, 2015, 2:08:44 AM7/14/15
to jailho...@googlegroups.com, Antonios Motakis, Claudio Fontana
While walking through several ARM corners to prepare generic MMIO
dispatching, I noticed some style issues and cleanup opportunities but
also found and fixed two bugs that caused page leakages on cell
destruction. And this series improves the dump on hypervisor crashes.

Antonio, Claudio, this could affect your work. Please have a careful
look when possible.

Jan

Jan Kiszka (13):
arm: Fix header_check errors
arm: Fix coding style of asm blocks
arm: Move cpu_switch_el2 into calling module
arm: Fix comment of switch_exception_level
arm: smp: Concentrate non-PSCI logic in Versatile Express module
arm: smp: Remove unused type field from smp_ops
arm: Fix arm_page_table_empty
arm: Account for errors during irqchip cell_init
arm: Use paging_create instead of arch_map_memory_region for GICv2
mapping
arm: Unmap virtual GIC on cell destruction
arm: Use more panic_printk for fatal error messages
core, inmates: Move \r injection into console_write / arch_dbg_write
arm: Improve reporting of unhandled HYP exits

hypervisor/arch/arm/control.c | 35 ++++++++++-------
hypervisor/arch/arm/dbg-write.c | 7 +++-
hypervisor/arch/arm/gic-v2.c | 25 ++++++-------
hypervisor/arch/arm/gic-v3.c | 3 +-
hypervisor/arch/arm/include/asm/bitops.h | 50 ++++++++++++-------------
hypervisor/arch/arm/include/asm/irqchip.h | 4 +-
hypervisor/arch/arm/include/asm/mmio.h | 2 +
hypervisor/arch/arm/include/asm/processor.h | 12 +++---
hypervisor/arch/arm/include/asm/psci.h | 4 ++
hypervisor/arch/arm/include/asm/setup.h | 15 ++++----
hypervisor/arch/arm/include/asm/setup_mmu.h | 58 +++++++----------------------
hypervisor/arch/arm/include/asm/smp.h | 11 ------
hypervisor/arch/arm/include/asm/spinlock.h | 29 ++++++++-------
hypervisor/arch/arm/irqchip.c | 4 +-
hypervisor/arch/arm/mmu_hyp.c | 41 ++++++++++++++++++--
hypervisor/arch/arm/paging.c | 2 +-
hypervisor/arch/arm/setup.c | 31 ++++++++-------
hypervisor/arch/arm/smp-sun7i.c | 1 -
hypervisor/arch/arm/smp-tegra124.c | 1 -
hypervisor/arch/arm/smp-vexpress.c | 40 +++++++++++++++-----
hypervisor/arch/arm/smp.c | 46 -----------------------
hypervisor/arch/x86/dbg-write.c | 7 +++-
hypervisor/printk-core.c | 14 ++-----
inmates/lib/arm/printk.c | 7 +++-
inmates/lib/x86/printk.c | 7 +++-
25 files changed, 223 insertions(+), 233 deletions(-)

--
2.1.4

Jan Kiszka

unread,
Jul 14, 2015, 2:08:44 AM7/14/15
to jailho...@googlegroups.com, Antonios Motakis, Claudio Fontana
Add missing includes and struct cell forward declaration.

Signed-off-by: Jan Kiszka <jan.k...@siemens.com>
---
hypervisor/arch/arm/include/asm/mmio.h | 2 ++
hypervisor/arch/arm/include/asm/psci.h | 4 ++++
2 files changed, 6 insertions(+)

diff --git a/hypervisor/arch/arm/include/asm/mmio.h b/hypervisor/arch/arm/include/asm/mmio.h
index 2d34f9b..a877b79 100644
--- a/hypervisor/arch/arm/include/asm/mmio.h
+++ b/hypervisor/arch/arm/include/asm/mmio.h
@@ -10,6 +10,8 @@
* the COPYING file in the top-level directory.
*/

+#include <jailhouse/types.h>
+
struct mmio_access {
unsigned long addr;
bool is_write;
diff --git a/hypervisor/arch/arm/include/asm/psci.h b/hypervisor/arch/arm/include/asm/psci.h
index 90c3a23..ad64caf 100644
--- a/hypervisor/arch/arm/include/asm/psci.h
+++ b/hypervisor/arch/arm/include/asm/psci.h
@@ -52,8 +52,12 @@

#ifndef __ASSEMBLY__

+#include <jailhouse/types.h>
+
+struct cell;
struct trap_context;
struct per_cpu;
+
struct psci_mbox {
unsigned long entry;
unsigned long context;
--
2.1.4

Jan Kiszka

unread,
Jul 14, 2015, 2:08:44 AM7/14/15
to jailho...@googlegroups.com, Antonios Motakis, Claudio Fontana
Uninlined functions should not be defined in headers. No functional
changes.

Signed-off-by: Jan Kiszka <jan.k...@siemens.com>
---
hypervisor/arch/arm/include/asm/setup_mmu.h | 35 -----------------------------
hypervisor/arch/arm/mmu_hyp.c | 35 +++++++++++++++++++++++++++++
2 files changed, 35 insertions(+), 35 deletions(-)

diff --git a/hypervisor/arch/arm/include/asm/setup_mmu.h b/hypervisor/arch/arm/include/asm/setup_mmu.h
index f1a7b86..dc71ead 100644
--- a/hypervisor/arch/arm/include/asm/setup_mmu.h
+++ b/hypervisor/arch/arm/include/asm/setup_mmu.h
@@ -22,41 +22,6 @@
typedef void* (*phys2virt_t)(unsigned long);
typedef unsigned long (*virt2phys_t)(volatile const void *);

-static void __attribute__((naked)) __attribute__((noinline))
-cpu_switch_el2(unsigned long phys_bootstrap, virt2phys_t virt2phys)
-{
- asm volatile(
- /*
- * The linux hyp stub allows to install the vectors with a
- * single hvc. The vector base address is in r0
- * (phys_bootstrap).
- */
- "hvc #0\n\t"
-
- /*
- * Now that the bootstrap vectors are installed, call setup_el2
- * with the translated physical values of lr and sp as
- * arguments.
- */
- "mov r0, sp\n\t"
- "push {lr}\n\t"
- "blx %0\n\t"
- "pop {lr}\n\t"
- "push {r0}\n\t"
- "mov r0, lr\n\t"
- "blx %0\n\t"
- "pop {r1}\n\t"
- "hvc #0\n\t"
- :
- : "r" (virt2phys)
- /*
- * The call to virt2phys may clobber all temp registers. This
- * list ensures that the compiler uses a decent register for
- * hvirt2phys.
- */
- : "cc", "memory", "r0", "r1", "r2", "r3");
-}
-
static inline void __attribute__((always_inline))
cpu_switch_phys2virt(phys2virt_t phys2virt)
{
diff --git a/hypervisor/arch/arm/mmu_hyp.c b/hypervisor/arch/arm/mmu_hyp.c
index 6aece9c..c9c18ed 100644
--- a/hypervisor/arch/arm/mmu_hyp.c
+++ b/hypervisor/arch/arm/mmu_hyp.c
@@ -90,6 +90,41 @@ static void destroy_id_maps(void)
}
}

+static void __attribute__((naked)) __attribute__((noinline))
+cpu_switch_el2(unsigned long phys_bootstrap, virt2phys_t virt2phys)
+{
+ asm volatile(
+ /*
+ * The linux hyp stub allows to install the vectors with a
+ * single hvc. The vector base address is in r0
+ * (phys_bootstrap).
+ */
+ "hvc #0\n\t"
+
+ /*
+ * Now that the bootstrap vectors are installed, call setup_el2
+ * with the translated physical values of lr and sp as
+ * arguments.
+ */
+ "mov r0, sp\n\t"
+ "push {lr}\n\t"
+ "blx %0\n\t"
+ "pop {lr}\n\t"
+ "push {r0}\n\t"
+ "mov r0, lr\n\t"
+ "blx %0\n\t"
+ "pop {r1}\n\t"
+ "hvc #0\n\t"
+ :
+ : "r" (virt2phys)
+ /*
+ * The call to virt2phys may clobber all temp registers. This
+ * list ensures that the compiler uses a decent register for
+ * hvirt2phys.
+ */
+ : "cc", "memory", "r0", "r1", "r2", "r3");
+}
+
/*
* This code is put in the id-mapped `.trampoline' section, allowing to enable
* and disable the MMU in a readable and portable fashion.
--
2.1.4

Jan Kiszka

unread,
Jul 14, 2015, 2:08:44 AM7/14/15
to jailho...@googlegroups.com, Antonios Motakis, Claudio Fontana
This aligns them with our (kernel) coding style: indent multi-line asm
blocks, end each line with \n\t in multi-line blocks, remove the ending
in single-line statements. No functional changes.

Signed-off-by: Jan Kiszka <jan.k...@siemens.com>
---
hypervisor/arch/arm/include/asm/bitops.h | 50 +++++++++---------
hypervisor/arch/arm/include/asm/processor.h | 12 ++---
hypervisor/arch/arm/include/asm/setup.h | 15 +++---
hypervisor/arch/arm/include/asm/setup_mmu.h | 79 +++++++++++++++--------------
hypervisor/arch/arm/include/asm/spinlock.h | 29 ++++++-----
hypervisor/arch/arm/mmu_hyp.c | 2 +-
hypervisor/arch/arm/setup.c | 27 +++++-----
7 files changed, 111 insertions(+), 103 deletions(-)

diff --git a/hypervisor/arch/arm/include/asm/bitops.h b/hypervisor/arch/arm/include/asm/bitops.h
index 41e5ef1..bf1bfdf 100644
--- a/hypervisor/arch/arm/include/asm/bitops.h
+++ b/hypervisor/arch/arm/include/asm/bitops.h
@@ -26,8 +26,8 @@

/* Load the cacheline in exclusive state */
#define PRELOAD(addr) \
- asm volatile (".arch_extension mp\n" \
- "pldw %0\n" \
+ asm volatile (".arch_extension mp\n\t" \
+ "pldw %0\n\t" \
: "+Qo" (*(volatile unsigned long *)addr));

static inline __attribute__((always_inline)) void
@@ -40,13 +40,13 @@ clear_bit(int nr, volatile unsigned long *addr)
PRELOAD(addr);
do {
asm volatile (
- "ldrex %1, %2\n"
- "bic %1, %3\n"
- "strex %0, %1, %2\n"
- : "=r" (ret), "=r" (val),
- /* Declare the clobbering of this address to the compiler */
- "+Qo" (*(volatile unsigned long *)addr)
- : "r" (1 << nr));
+ "ldrex %1, %2\n\t"
+ "bic %1, %3\n\t"
+ "strex %0, %1, %2\n\t"
+ : "=r" (ret), "=r" (val),
+ /* declare clobbering of this address to the compiler */
+ "+Qo" (*(volatile unsigned long *)addr)
+ : "r" (1 << nr));
} while (ret);
}

@@ -60,12 +60,12 @@ set_bit(unsigned int nr, volatile unsigned long *addr)
PRELOAD(addr);
do {
asm volatile (
- "ldrex %1, %2\n"
- "orr %1, %3\n"
- "strex %0, %1, %2\n"
- : "=r" (ret), "=r" (val),
- "+Qo" (*(volatile unsigned long *)addr)
- : "r" (1 << nr));
+ "ldrex %1, %2\n\t"
+ "orr %1, %3\n\t"
+ "strex %0, %1, %2\n\t"
+ : "=r" (ret), "=r" (val),
+ "+Qo" (*(volatile unsigned long *)addr)
+ : "r" (1 << nr));
} while (ret);
}

@@ -85,14 +85,14 @@ static inline int test_and_set_bit(int nr, volatile unsigned long *addr)
PRELOAD(addr);
do {
asm volatile (
- "ldrex %1, %3\n"
- "ands %2, %1, %4\n"
- "it eq\n"
- "orreq %1, %4\n"
- "strex %0, %1, %3\n"
- : "=r" (ret), "=r" (val), "=r" (test),
- "+Qo" (*(volatile unsigned long *)addr)
- : "r" (1 << nr));
+ "ldrex %1, %3\n\t"
+ "ands %2, %1, %4\n\t"
+ "it eq\n\t"
+ "orreq %1, %4\n\t"
+ "strex %0, %1, %3\n\t"
+ : "=r" (ret), "=r" (val), "=r" (test),
+ "+Qo" (*(volatile unsigned long *)addr)
+ : "r" (1 << nr));
} while (ret);

return !!(test);
@@ -103,7 +103,7 @@ static inline int test_and_set_bit(int nr, volatile unsigned long *addr)
static inline unsigned long clz(unsigned long word)
{
unsigned long val;
- asm volatile ("clz %0, %1\n" : "=r" (val) : "r" (word));
+ asm volatile ("clz %0, %1" : "=r" (val) : "r" (word));
return val;
}

@@ -112,7 +112,7 @@ static inline unsigned long ffsl(unsigned long word)
{
if (!word)
return 0;
- asm volatile ("rbit %0, %0\n" : "+r" (word));
+ asm volatile ("rbit %0, %0" : "+r" (word));
return clz(word);
}

diff --git a/hypervisor/arch/arm/include/asm/processor.h b/hypervisor/arch/arm/include/asm/processor.h
index c4fbb9e..c6144a7 100644
--- a/hypervisor/arch/arm/include/asm/processor.h
+++ b/hypervisor/arch/arm/include/asm/processor.h
@@ -163,13 +163,13 @@ struct registers {
unsigned long usr[NUM_USR_REGS];
};

-#define dmb(domain) asm volatile("dmb " #domain "\n" ::: "memory")
-#define dsb(domain) asm volatile("dsb " #domain "\n" ::: "memory")
-#define isb() asm volatile("isb\n")
+#define dmb(domain) asm volatile("dmb " #domain ::: "memory")
+#define dsb(domain) asm volatile("dsb " #domain ::: "memory")
+#define isb() asm volatile("isb")

-#define wfe() asm volatile("wfe\n")
-#define wfi() asm volatile("wfi\n")
-#define sev() asm volatile("sev\n")
+#define wfe() asm volatile("wfe")
+#define wfi() asm volatile("wfi")
+#define sev() asm volatile("sev")

unsigned int smc(unsigned int r0, ...);
unsigned int hvc(unsigned int r0, ...);
diff --git a/hypervisor/arch/arm/include/asm/setup.h b/hypervisor/arch/arm/include/asm/setup.h
index dacbe17..69d913a 100644
--- a/hypervisor/arch/arm/include/asm/setup.h
+++ b/hypervisor/arch/arm/include/asm/setup.h
@@ -26,13 +26,14 @@ cpu_prepare_return_el1(struct per_cpu *cpu_data, int return_code)
cpu_data->linux_reg[0] = return_code;

asm volatile (
- "msr sp_svc, %0\n"
- "msr elr_hyp, %1\n"
- "msr spsr_hyp, %2\n"
- :
- : "r" (cpu_data->linux_sp + (NUM_ENTRY_REGS * sizeof(unsigned long))),
- "r" (cpu_data->linux_ret),
- "r" (cpu_data->linux_flags));
+ "msr sp_svc, %0\n\t"
+ "msr elr_hyp, %1\n\t"
+ "msr spsr_hyp, %2\n\t"
+ :
+ : "r" (cpu_data->linux_sp +
+ (NUM_ENTRY_REGS * sizeof(unsigned long))),
+ "r" (cpu_data->linux_ret),
+ "r" (cpu_data->linux_flags));
}

int switch_exception_level(struct per_cpu *cpu_data);
diff --git a/hypervisor/arch/arm/include/asm/setup_mmu.h b/hypervisor/arch/arm/include/asm/setup_mmu.h
index 7b6e8bb..f1a7b86 100644
--- a/hypervisor/arch/arm/include/asm/setup_mmu.h
+++ b/hypervisor/arch/arm/include/asm/setup_mmu.h
@@ -26,32 +26,35 @@ static void __attribute__((naked)) __attribute__((noinline))
cpu_switch_el2(unsigned long phys_bootstrap, virt2phys_t virt2phys)
{
asm volatile(
- /*
- * The linux hyp stub allows to install the vectors with a single hvc.
- * The vector base address is in r0 (phys_bootstrap).
- */
- "hvc #0\n"
+ /*
+ * The linux hyp stub allows to install the vectors with a
+ * single hvc. The vector base address is in r0
+ * (phys_bootstrap).
+ */
+ "hvc #0\n\t"

- /*
- * Now that the bootstrap vectors are installed, call setup_el2 with
- * the translated physical values of lr and sp as arguments
- */
- "mov r0, sp\n"
- "push {lr}\n"
- "blx %0\n"
- "pop {lr}\n"
- "push {r0}\n"
- "mov r0, lr\n"
- "blx %0\n"
- "pop {r1}\n"
- "hvc #0\n"
- :
- : "r" (virt2phys)
- /*
- * The call to virt2phys may clobber all temp registers. This list
- * ensures that the compiler uses a decent register for hvirt2phys.
- */
- : "cc", "memory", "r0", "r1", "r2", "r3");
+ /*
+ * Now that the bootstrap vectors are installed, call setup_el2
+ * with the translated physical values of lr and sp as
+ * arguments.
+ */
+ "mov r0, sp\n\t"
+ "push {lr}\n\t"
+ "blx %0\n\t"
+ "pop {lr}\n\t"
+ "push {r0}\n\t"
+ "mov r0, lr\n\t"
+ "blx %0\n\t"
+ "pop {r1}\n\t"
+ "hvc #0\n\t"
+ :
+ : "r" (virt2phys)
+ /*
+ * The call to virt2phys may clobber all temp registers. This
+ * list ensures that the compiler uses a decent register for
+ * hvirt2phys.
+ */
+ : "cc", "memory", "r0", "r1", "r2", "r3");
}

static inline void __attribute__((always_inline))
@@ -59,19 +62,19 @@ cpu_switch_phys2virt(phys2virt_t phys2virt)
{
/* phys2virt is allowed to touch the stack */
asm volatile(
- "mov r0, lr\n"
- "blx %0\n"
- /* Save virt_lr */
- "push {r0}\n"
- /* Translate phys_sp */
- "mov r0, sp\n"
- "blx %0\n"
- /* Jump back to virtual addresses */
- "mov sp, r0\n"
- "pop {pc}\n"
- :
- : "r" (phys2virt)
- : "cc", "r0", "r1", "r2", "r3", "lr", "sp");
+ "mov r0, lr\n\t"
+ "blx %0\n\t"
+ /* Save virt_lr */
+ "push {r0}\n\t"
+ /* Translate phys_sp */
+ "mov r0, sp\n\t"
+ "blx %0\n\t"
+ /* Jump back to virtual addresses */
+ "mov sp, r0\n\t"
+ "pop {pc}\n\t"
+ :
+ : "r" (phys2virt)
+ : "cc", "r0", "r1", "r2", "r3", "lr", "sp");
}

#endif /* !__ASSEMBLY__ */
diff --git a/hypervisor/arch/arm/include/asm/spinlock.h b/hypervisor/arch/arm/include/asm/spinlock.h
index 5e32a59..bee4e40 100644
--- a/hypervisor/arch/arm/include/asm/spinlock.h
+++ b/hypervisor/arch/arm/include/asm/spinlock.h
@@ -40,23 +40,24 @@ static inline void spin_lock(spinlock_t *lock)

/* Take the lock by updating the high part atomically */
asm volatile (
-" .arch_extension mp\n"
-" pldw [%3]\n"
-"1: ldrex %0, [%3]\n"
-" add %1, %0, %4\n"
-" strex %2, %1, [%3]\n"
-" teq %2, #0\n"
-" bne 1b"
- : "=&r" (lockval), "=&r" (newval), "=&r" (tmp)
- : "r" (&lock->slock), "I" (1 << TICKET_SHIFT)
- : "cc");
+ ".arch_extension mp\n\t"
+ "pldw [%3]\n\t"
+ "1:\n\t"
+ "ldrex %0, [%3]\n\t"
+ "add %1, %0, %4\n\t"
+ "strex %2, %1, [%3]\n\t"
+ "teq %2, #0\n\t"
+ "bne 1b\n\t"
+ : "=&r" (lockval), "=&r" (newval), "=&r" (tmp)
+ : "r" (&lock->slock), "I" (1 << TICKET_SHIFT)
+ : "cc");

while (lockval.tickets.next != lockval.tickets.owner)
asm volatile (
- "wfe\n"
- "ldrh %0, [%1]\n"
- : "=r" (lockval.tickets.owner)
- : "r" (&lock->tickets.owner));
+ "wfe\n\t"
+ "ldrh %0, [%1]\n\t"
+ : "=r" (lockval.tickets.owner)
+ : "r" (&lock->tickets.owner));

/* Ensure we have the lock before doing any more memory ops */
dmb(ish);
diff --git a/hypervisor/arch/arm/mmu_hyp.c b/hypervisor/arch/arm/mmu_hyp.c
index e9e0dc4..6aece9c 100644
--- a/hypervisor/arch/arm/mmu_hyp.c
+++ b/hypervisor/arch/arm/mmu_hyp.c
@@ -152,7 +152,7 @@ setup_mmu_el2(struct per_cpu *cpu_data, phys2virt_t phys2virt, u64 ttbr)
cpu_switch_phys2virt(phys2virt);

/* Not reached (cannot be a while(1), it confuses the compiler) */
- asm volatile("b .\n");
+ asm volatile("b .");
}

/*
diff --git a/hypervisor/arch/arm/setup.c b/hypervisor/arch/arm/setup.c
index 364ade8..f437c5d 100644
--- a/hypervisor/arch/arm/setup.c
+++ b/hypervisor/arch/arm/setup.c
@@ -114,18 +114,21 @@ void __attribute__((noreturn)) arch_cpu_activate_vmm(struct per_cpu *cpu_data)
cpu_prepare_return_el1(cpu_data, 0);

asm volatile(
- /* Reset the hypervisor stack */
- "mov sp, %0\n"
- /*
- * We don't care about clobbering the other registers from now on. Must
- * be in sync with arch_entry.
- */
- "ldm %1, {r0 - r12}\n"
- /* After this, the kernel won't be able to access the hypervisor code */
- "eret\n"
- :
- : "r" (cpu_data->stack + PERCPU_STACK_END),
- "r" (cpu_data->linux_reg));
+ /* Reset the hypervisor stack */
+ "mov sp, %0\n\t"
+ /*
+ * We don't care about clobbering the other registers from now
+ * on. Must be in sync with arch_entry.
+ */
+ "ldm %1, {r0 - r12}\n\t"
+ /*
+ * After this, the kernel won't be able to access the hypervisor
+ * code.
+ */
+ "eret\n\t"
+ :
+ : "r" (cpu_data->stack + PERCPU_STACK_END),
+ "r" (cpu_data->linux_reg));

__builtin_unreachable();
}
--
2.1.4

Jan Kiszka

unread,
Jul 14, 2015, 2:08:45 AM7/14/15
to jailho...@googlegroups.com, Antonios Motakis, Claudio Fontana
The size of a pt_entry_t is a reference to an entry, not the entry type
itself. So we were calculating with an entry size of 4 instead of 8,
overrunning the table during empty checks. This specifically caused
page leakages during cell destruction.

Signed-off-by: Jan Kiszka <jan.k...@siemens.com>
---
hypervisor/arch/arm/paging.c | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/hypervisor/arch/arm/paging.c b/hypervisor/arch/arm/paging.c
index 495b23f..8fdd034 100644
--- a/hypervisor/arch/arm/paging.c
+++ b/hypervisor/arch/arm/paging.c
@@ -34,7 +34,7 @@ static bool arm_page_table_empty(page_table_t page_table)
unsigned long n;
pt_entry_t pte;

- for (n = 0, pte = page_table; n < PAGE_SIZE / sizeof(pt_entry_t); n++, pte++)
+ for (n = 0, pte = page_table; n < PAGE_SIZE / sizeof(u64); n++, pte++)
if (arm_entry_valid(pte, PTE_FLAG_VALID))
return false;
return true;
--
2.1.4

Jan Kiszka

unread,
Jul 14, 2015, 2:08:45 AM7/14/15
to jailho...@googlegroups.com, Antonios Motakis, Claudio Fontana
We only keep the non-PSCI CPU hotplug support around for the sake of
old Versatile Express boards/models. No new boards will be accepted that
do not support the PSCI standard. Therefore, concentrate all functions
that were once considered reusable in the smp-vexpress module, folding
them into their only callers.

Signed-off-by: Jan Kiszka <jan.k...@siemens.com>
---
hypervisor/arch/arm/include/asm/smp.h | 5 ----
hypervisor/arch/arm/smp-vexpress.c | 38 +++++++++++++++++++++++------
hypervisor/arch/arm/smp.c | 46 -----------------------------------
3 files changed, 30 insertions(+), 59 deletions(-)

diff --git a/hypervisor/arch/arm/include/asm/smp.h b/hypervisor/arch/arm/include/asm/smp.h
index 858b875..81b3fee 100644
--- a/hypervisor/arch/arm/include/asm/smp.h
+++ b/hypervisor/arch/arm/include/asm/smp.h
@@ -39,11 +39,6 @@ struct smp_ops {
unsigned long (*cpu_spin)(struct per_cpu *cpu_data);
};

-int arch_generic_smp_init(unsigned long mbox);
-int arch_generic_smp_mmio(struct per_cpu *cpu_data, struct mmio_access *access,
- unsigned long mbox);
-unsigned long arch_generic_smp_spin(unsigned long mbox);
-
int arch_smp_mmio_access(struct per_cpu *cpu_data, struct mmio_access *access);
unsigned long arch_smp_spin(struct per_cpu *cpu_data, struct smp_ops *ops);
void register_smp_ops(struct cell *cell);
diff --git a/hypervisor/arch/arm/smp-vexpress.c b/hypervisor/arch/arm/smp-vexpress.c
index c604f2a..ed16887 100644
--- a/hypervisor/arch/arm/smp-vexpress.c
+++ b/hypervisor/arch/arm/smp-vexpress.c
@@ -10,34 +10,56 @@
* the COPYING file in the top-level directory.
*/

+#include <jailhouse/control.h>
#include <jailhouse/processor.h>
-#include <asm/control.h>
#include <asm/irqchip.h>
#include <asm/paging.h>
#include <asm/platform.h>
+#include <asm/setup.h>
#include <asm/smp.h>

-static unsigned long hotplug_mbox;
+static const unsigned long hotplug_mbox = SYSREGS_BASE + 0x30;

static int smp_init(struct cell *cell)
{
- /* vexpress SYSFLAGS */
- hotplug_mbox = SYSREGS_BASE + 0x30;
+ void *mbox_page = (void *)(hotplug_mbox & PAGE_MASK);
+ int err;

/* Map the mailbox page */
- arch_generic_smp_init(hotplug_mbox);
+ err = arch_map_device(mbox_page, mbox_page, PAGE_SIZE);
+ if (err)
+ printk("Unable to map spin mbox page\n");

- return 0;
+ return err;
}

static unsigned long smp_spin(struct per_cpu *cpu_data)
{
- return arch_generic_smp_spin(hotplug_mbox);
+ /*
+ * This is super-dodgy: we assume nothing wrote to the flag register
+ * since the kernel called smp_prepare_cpus, at initialisation.
+ */
+ return mmio_read32((void *)hotplug_mbox);
}

static int smp_mmio(struct per_cpu *cpu_data, struct mmio_access *access)
{
- return arch_generic_smp_mmio(cpu_data, access, hotplug_mbox);
+ unsigned int cpu;
+ unsigned long mbox_page = hotplug_mbox & PAGE_MASK;
+
+ if (access->addr < mbox_page || access->addr >= mbox_page + PAGE_SIZE)
+ return TRAP_UNHANDLED;
+
+ if (access->addr != hotplug_mbox || !access->is_write)
+ /* Ignore all other accesses */
+ return TRAP_HANDLED;
+
+ for_each_cpu_except(cpu, cpu_data->cell->cpu_set, cpu_data->cpu_id) {
+ per_cpu(cpu)->guest_mbox.entry = access->val;
+ psci_try_resume(cpu);
+ }
+
+ return TRAP_HANDLED;
}

static struct smp_ops vexpress_smp_ops = {
diff --git a/hypervisor/arch/arm/smp.c b/hypervisor/arch/arm/smp.c
index 1efc612..dfc8941 100644
--- a/hypervisor/arch/arm/smp.c
+++ b/hypervisor/arch/arm/smp.c
@@ -10,56 +10,10 @@
* the COPYING file in the top-level directory.
*/

-#include <jailhouse/control.h>
#include <jailhouse/mmio.h>
-#include <jailhouse/printk.h>
-#include <asm/control.h>
-#include <asm/psci.h>
-#include <asm/setup.h>
#include <asm/smp.h>
#include <asm/traps.h>

-int arch_generic_smp_init(unsigned long mbox)
-{
- void *mbox_page = (void *)(mbox & PAGE_MASK);
- int err = arch_map_device(mbox_page, mbox_page, PAGE_SIZE);
-
- if (err)
- printk("Unable to map spin mbox page\n");
-
- return err;
-}
-
-unsigned long arch_generic_smp_spin(unsigned long mbox)
-{
- /*
- * This is super-dodgy: we assume nothing wrote to the flag register
- * since the kernel called smp_prepare_cpus, at initialisation.
- */
- return mmio_read32((void *)mbox);
-}
-
-int arch_generic_smp_mmio(struct per_cpu *cpu_data, struct mmio_access *access,
- unsigned long mbox)
-{
- unsigned int cpu;
- unsigned long mbox_page = mbox & PAGE_MASK;
-
- if (access->addr < mbox_page || access->addr >= mbox_page + PAGE_SIZE)
- return TRAP_UNHANDLED;
-
- if (access->addr != mbox || !access->is_write)
- /* Ignore all other accesses */
- return TRAP_HANDLED;
-
- for_each_cpu_except(cpu, cpu_data->cell->cpu_set, cpu_data->cpu_id) {
- per_cpu(cpu)->guest_mbox.entry = access->val;
- psci_try_resume(cpu);
- }
-
- return TRAP_HANDLED;
-}
-
unsigned long arch_smp_spin(struct per_cpu *cpu_data, struct smp_ops *ops)
{
/*
--
2.1.4

Jan Kiszka

unread,
Jul 14, 2015, 2:08:45 AM7/14/15
to jailho...@googlegroups.com, Antonios Motakis, Claudio Fontana
Was never read and actually initialized incorrectly for sun7i and
tegra124.

Signed-off-by: Jan Kiszka <jan.k...@siemens.com>
---
hypervisor/arch/arm/include/asm/smp.h | 6 ------
hypervisor/arch/arm/smp-sun7i.c | 1 -
hypervisor/arch/arm/smp-tegra124.c | 1 -
hypervisor/arch/arm/smp-vexpress.c | 2 --
4 files changed, 10 deletions(-)

diff --git a/hypervisor/arch/arm/include/asm/smp.h b/hypervisor/arch/arm/include/asm/smp.h
index 81b3fee..f802b2d 100644
--- a/hypervisor/arch/arm/include/asm/smp.h
+++ b/hypervisor/arch/arm/include/asm/smp.h
@@ -15,17 +15,11 @@

#ifndef __ASSEMBLY__

-enum smp_type {
- SMP_PSCI,
- SMP_SPIN
-};
-
struct mmio_access;
struct per_cpu;
struct cell;

struct smp_ops {
- enum smp_type type;
int (*init)(struct cell *cell);

/*
diff --git a/hypervisor/arch/arm/smp-sun7i.c b/hypervisor/arch/arm/smp-sun7i.c
index abb52d0..d7f66d3 100644
--- a/hypervisor/arch/arm/smp-sun7i.c
+++ b/hypervisor/arch/arm/smp-sun7i.c
@@ -15,7 +15,6 @@
#include <asm/smp.h>

static struct smp_ops sun7i_smp_ops = {
- .type = SMP_SPIN,
.init = psci_cell_init,
.cpu_spin = psci_emulate_spin,
};
diff --git a/hypervisor/arch/arm/smp-tegra124.c b/hypervisor/arch/arm/smp-tegra124.c
index 81f9649..21c855f 100644
--- a/hypervisor/arch/arm/smp-tegra124.c
+++ b/hypervisor/arch/arm/smp-tegra124.c
@@ -15,7 +15,6 @@
#include <asm/smp.h>

static struct smp_ops tegra124_smp_ops = {
- .type = SMP_SPIN,
.init = psci_cell_init,
.cpu_spin = psci_emulate_spin,
};
diff --git a/hypervisor/arch/arm/smp-vexpress.c b/hypervisor/arch/arm/smp-vexpress.c
index ed16887..68f90f5 100644
--- a/hypervisor/arch/arm/smp-vexpress.c
+++ b/hypervisor/arch/arm/smp-vexpress.c
@@ -63,7 +63,6 @@ static int smp_mmio(struct per_cpu *cpu_data, struct mmio_access *access)
}

static struct smp_ops vexpress_smp_ops = {
- .type = SMP_SPIN,
.init = smp_init,
.mmio_handler = smp_mmio,
.cpu_spin = smp_spin,
@@ -74,7 +73,6 @@ static struct smp_ops vexpress_smp_ops = {
* an access to the mbox from the primary.
*/
static struct smp_ops vexpress_guest_smp_ops = {
- .type = SMP_SPIN,
.init = psci_cell_init,
.mmio_handler = smp_mmio,
.cpu_spin = psci_emulate_spin,
--
2.1.4

Jan Kiszka

unread,
Jul 14, 2015, 2:08:45 AM7/14/15
to jailho...@googlegroups.com, Antonios Motakis, Claudio Fontana
The referenced function is actually called setup_mmu_el2.

Signed-off-by: Jan Kiszka <jan.k...@siemens.com>
---
hypervisor/arch/arm/mmu_hyp.c | 4 ++--
1 file changed, 2 insertions(+), 2 deletions(-)

diff --git a/hypervisor/arch/arm/mmu_hyp.c b/hypervisor/arch/arm/mmu_hyp.c
index c9c18ed..99b5fa5 100644
--- a/hypervisor/arch/arm/mmu_hyp.c
+++ b/hypervisor/arch/arm/mmu_hyp.c
@@ -237,8 +237,8 @@ static void check_mmu_map(unsigned long virt_addr, unsigned long phys_addr)
* Jumping to EL2 in the same C code represents an interesting challenge, since
* it will switch from virtual addresses to physical ones, and then back to
* virtual after setting up the EL2 MMU.
- * To this end, the setup_mmu and cpu_switch_el2 functions are naked and must
- * handle the stack themselves.
+ * To this end, the setup_mmu_el2 and cpu_switch_el2 functions are naked and
+ * must handle the stack themselves.
*/
int switch_exception_level(struct per_cpu *cpu_data)
{
--
2.1.4

Jan Kiszka

unread,
Jul 14, 2015, 2:08:46 AM7/14/15
to jailho...@googlegroups.com, Antonios Motakis, Claudio Fontana
The cell_init callback of GICv2 should report the result of the mapping
request, thus needs a channel to return an error code. Extend the call
chain accordingly.

Signed-off-by: Jan Kiszka <jan.k...@siemens.com>
---
hypervisor/arch/arm/control.c | 6 +++++-
hypervisor/arch/arm/gic-v2.c | 4 ++--
hypervisor/arch/arm/gic-v3.c | 3 ++-
hypervisor/arch/arm/include/asm/irqchip.h | 4 ++--
hypervisor/arch/arm/irqchip.c | 4 ++--
hypervisor/arch/arm/setup.c | 4 +++-
6 files changed, 16 insertions(+), 9 deletions(-)

diff --git a/hypervisor/arch/arm/control.c b/hypervisor/arch/arm/control.c
index 409885a..bfd79c0 100644
--- a/hypervisor/arch/arm/control.c
+++ b/hypervisor/arch/arm/control.c
@@ -349,7 +349,11 @@ int arch_cell_create(struct cell *cell)
}
cell->arch.last_virt_id = virt_id - 1;

- irqchip_cell_init(cell);
+ err = irqchip_cell_init(cell);
+ if (err) {
+ arch_mmu_cell_destroy(cell);
+ return err;
+ }
irqchip_root_cell_shrink(cell);

register_smp_ops(cell);
diff --git a/hypervisor/arch/arm/gic-v2.c b/hypervisor/arch/arm/gic-v2.c
index 0113f12..6f561f9 100644
--- a/hypervisor/arch/arm/gic-v2.c
+++ b/hypervisor/arch/arm/gic-v2.c
@@ -169,7 +169,7 @@ static void gic_eoi_irq(u32 irq_id, bool deactivate)
mmio_write32(gicc_base + GICC_DIR, irq_id);
}

-static void gic_cell_init(struct cell *cell)
+static int gic_cell_init(struct cell *cell)
{
struct jailhouse_memory gicv_region;

@@ -198,7 +198,7 @@ static void gic_cell_init(struct cell *cell)
* Let the guest access the virtual CPU interface instead of the
* physical one
*/
- arch_map_memory_region(cell, &gicv_region);
+ return arch_map_memory_region(cell, &gicv_region);
}

static void gic_cell_exit(struct cell *cell)
diff --git a/hypervisor/arch/arm/gic-v3.c b/hypervisor/arch/arm/gic-v3.c
index a609cc3..763d712 100644
--- a/hypervisor/arch/arm/gic-v3.c
+++ b/hypervisor/arch/arm/gic-v3.c
@@ -213,9 +213,10 @@ static void gic_route_spis(struct cell *config_cell, struct cell *dest_cell)
}
}

-static void gic_cell_init(struct cell *cell)
+static int gic_cell_init(struct cell *cell)
{
gic_route_spis(cell, cell);
+ return 0;
}

static void gic_cell_exit(struct cell *cell)
diff --git a/hypervisor/arch/arm/include/asm/irqchip.h b/hypervisor/arch/arm/include/asm/irqchip.h
index 1f1ae54..ba82cf2 100644
--- a/hypervisor/arch/arm/include/asm/irqchip.h
+++ b/hypervisor/arch/arm/include/asm/irqchip.h
@@ -47,7 +47,7 @@ struct sgi {
struct irqchip_ops {
int (*init)(void);
int (*cpu_init)(struct per_cpu *cpu_data);
- void (*cell_init)(struct cell *cell);
+ int (*cell_init)(struct cell *cell);
void (*cell_exit)(struct cell *cell);
int (*cpu_reset)(struct per_cpu *cpu_data, bool is_shutdown);

@@ -87,7 +87,7 @@ int irqchip_cpu_init(struct per_cpu *cpu_data);
int irqchip_cpu_reset(struct per_cpu *cpu_data);
void irqchip_cpu_shutdown(struct per_cpu *cpu_data);

-void irqchip_cell_init(struct cell *cell);
+int irqchip_cell_init(struct cell *cell);
void irqchip_cell_exit(struct cell *cell);
void irqchip_root_cell_shrink(struct cell *cell);

diff --git a/hypervisor/arch/arm/irqchip.c b/hypervisor/arch/arm/irqchip.c
index 24b5970..f7dcffe 100644
--- a/hypervisor/arch/arm/irqchip.c
+++ b/hypervisor/arch/arm/irqchip.c
@@ -257,13 +257,13 @@ irqchip_find_config(struct jailhouse_cell_desc *config)
return NULL;
}

-void irqchip_cell_init(struct cell *cell)
+int irqchip_cell_init(struct cell *cell)
{
const struct jailhouse_irqchip *pins = irqchip_find_config(cell->config);

cell->arch.spis = (pins ? pins->pin_bitmap : 0);

- irqchip.cell_init(cell);
+ return irqchip.cell_init(cell);
}

void irqchip_cell_exit(struct cell *cell)
diff --git a/hypervisor/arch/arm/setup.c b/hypervisor/arch/arm/setup.c
index f437c5d..ef6f9e0 100644
--- a/hypervisor/arch/arm/setup.c
+++ b/hypervisor/arch/arm/setup.c
@@ -96,7 +96,9 @@ int arch_init_late(void)
int err;

/* Setup the SPI bitmap */
- irqchip_cell_init(&root_cell);
+ err = irqchip_cell_init(&root_cell);
+ if (err)
+ return err;

/* Platform-specific SMP operations */
register_smp_ops(&root_cell);
--
2.1.4

Jan Kiszka

unread,
Jul 14, 2015, 2:08:46 AM7/14/15
to jailho...@googlegroups.com, Antonios Motakis, Claudio Fontana
This fixes a leak on cell destruction because we left the GICv2 mapped,
thus didn't free all paging structures. This also means we need to run
the irqchip cleanup before the cell MMU destruction.

Signed-off-by: Jan Kiszka <jan.k...@siemens.com>
---
hypervisor/arch/arm/control.c | 4 ++--
hypervisor/arch/arm/gic-v2.c | 2 ++
2 files changed, 4 insertions(+), 2 deletions(-)

diff --git a/hypervisor/arch/arm/control.c b/hypervisor/arch/arm/control.c
index bfd79c0..a077736 100644
--- a/hypervisor/arch/arm/control.c
+++ b/hypervisor/arch/arm/control.c
@@ -366,8 +366,6 @@ void arch_cell_destroy(struct cell *cell)
unsigned int cpu;
struct per_cpu *percpu;

- arch_mmu_cell_destroy(cell);
-
for_each_cpu(cpu, cell->cpu_set) {
percpu = per_cpu(cpu);
/* Re-assign the physical IDs for the root cell */
@@ -376,6 +374,8 @@ void arch_cell_destroy(struct cell *cell)
}

irqchip_cell_exit(cell);
+
+ arch_mmu_cell_destroy(cell);
}

/* Note: only supports synchronous flushing as triggered by config_commit! */
diff --git a/hypervisor/arch/arm/gic-v2.c b/hypervisor/arch/arm/gic-v2.c
index a87894f..16937b1 100644
--- a/hypervisor/arch/arm/gic-v2.c
+++ b/hypervisor/arch/arm/gic-v2.c
@@ -198,6 +198,8 @@ static int gic_cell_init(struct cell *cell)

static void gic_cell_exit(struct cell *cell)
{
+ paging_destroy(&cell->arch.mm, (unsigned long)gicc_base, gicc_size,
+ PAGING_NON_COHERENT);
/* Reset interrupt routing of the cell's spis */
gic_target_spis(cell, &root_cell);
}
--
2.1.4

Jan Kiszka

unread,
Jul 14, 2015, 2:08:47 AM7/14/15
to jailho...@googlegroups.com, Antonios Motakis, Claudio Fontana
This moves the injection of \r on \n into the console_write and
arch_dbg_write implementations, causing some minor duplication but also
fixing injection for %s strings. Furthermore, this allows to skip the
injection for consoles the may not need it.

Signed-off-by: Jan Kiszka <jan.k...@siemens.com>
---
hypervisor/arch/arm/dbg-write.c | 7 +++++--
hypervisor/arch/x86/dbg-write.c | 7 +++++--
hypervisor/printk-core.c | 14 ++++----------
inmates/lib/arm/printk.c | 7 +++++--
inmates/lib/x86/printk.c | 7 +++++--
5 files changed, 24 insertions(+), 18 deletions(-)

diff --git a/hypervisor/arch/arm/dbg-write.c b/hypervisor/arch/arm/dbg-write.c
index 7612a13..3290b27 100644
--- a/hypervisor/arch/arm/dbg-write.c
+++ b/hypervisor/arch/arm/dbg-write.c
@@ -30,10 +30,13 @@ void arch_dbg_write_init(void)

void arch_dbg_write(const char *msg)
{
- char c;
+ char c = 0;

while (1) {
- c = *msg++;
+ if (c == '\n')
+ c = '\r';
+ else
+ c = *msg++;
if (!c)
break;

diff --git a/hypervisor/arch/x86/dbg-write.c b/hypervisor/arch/x86/dbg-write.c
index 3614c67..e32b8ca 100644
--- a/hypervisor/arch/x86/dbg-write.c
+++ b/hypervisor/arch/x86/dbg-write.c
@@ -42,10 +42,13 @@ void arch_dbg_write_init(void)

void arch_dbg_write(const char *msg)
{
- char c;
+ char c = 0;

while (1) {
- c = *msg++;
+ if (c == '\n')
+ c = '\r';
+ else
+ c = *msg++;
if (!c)
break;
while (!(inb(UART_BASE + UART_LSR) & UART_LSR_THRE))
diff --git a/hypervisor/printk-core.c b/hypervisor/printk-core.c
index 095506f..7cbf62d 100644
--- a/hypervisor/printk-core.c
+++ b/hypervisor/printk-core.c
@@ -117,9 +117,9 @@ static void __vprintk(const char *fmt, va_list ap)

while (1) {
c = *fmt++;
- if (c == 0)
+ if (c == 0) {
break;
- else if (c == '%') {
+ } else if (c == '%') {
*p = 0;
console_write(buf);
p = buf;
@@ -181,15 +181,9 @@ static void __vprintk(const char *fmt, va_list ap)
*p++ = c;
break;
}
- } else if (c == '\n') {
+ } else {
*p++ = c;
- *p = 0;
- console_write(buf);
- p = buf;
- *p++ = '\r';
- } else
- *p++ = c;
-
+ }
if (p >= &buf[sizeof(buf) - 1]) {
*p = 0;
console_write(buf);
diff --git a/inmates/lib/arm/printk.c b/inmates/lib/arm/printk.c
index 970f165..0bc6e0a 100644
--- a/inmates/lib/arm/printk.c
+++ b/inmates/lib/arm/printk.c
@@ -19,10 +19,13 @@ static struct uart_chip chip;

static void console_write(const char *msg)
{
- char c;
+ char c = 0;

while (1) {
- c = *msg++;
+ if (c == '\n')
+ c = '\r';
+ else
+ c = *msg++;
if (!c)
break;

diff --git a/inmates/lib/x86/printk.c b/inmates/lib/x86/printk.c
index 4358476..78271e1 100644
--- a/inmates/lib/x86/printk.c
+++ b/inmates/lib/x86/printk.c
@@ -26,10 +26,13 @@ unsigned int printk_uart_base;

static void uart_write(const char *msg)
{
- char c;
+ char c = 0;

while (1) {
- c = *msg++;
+ if (c == '\n')
+ c = '\r';
+ else
+ c = *msg++;
if (!c)
break;
while (!(inb(printk_uart_base + UART_LSR) & UART_LSR_THRE))
--
2.1.4

Jan Kiszka

unread,
Jul 14, 2015, 2:08:47 AM7/14/15
to jailho...@googlegroups.com, Antonios Motakis, Claudio Fontana
It's actually simpler to invoke paging_create directly instead of
preparing arguments for arch_map_memory_region first.

Signed-off-by: Jan Kiszka <jan.k...@siemens.com>
---
hypervisor/arch/arm/gic-v2.c | 21 ++++++++-------------
1 file changed, 8 insertions(+), 13 deletions(-)

diff --git a/hypervisor/arch/arm/gic-v2.c b/hypervisor/arch/arm/gic-v2.c
index 6f561f9..a87894f 100644
--- a/hypervisor/arch/arm/gic-v2.c
+++ b/hypervisor/arch/arm/gic-v2.c
@@ -171,8 +171,6 @@ static void gic_eoi_irq(u32 irq_id, bool deactivate)

static int gic_cell_init(struct cell *cell)
{
- struct jailhouse_memory gicv_region;
-
/*
* target_cpu_map has not been populated by all available CPUs when the
* setup code initialises the root cell. It is assumed that the kernel
@@ -182,23 +180,20 @@ static int gic_cell_init(struct cell *cell)
if (cell != &root_cell)
gic_target_spis(cell, cell);

- gicv_region.phys_start = (unsigned long)gicv_base;
/*
+ * Let the guest access the virtual CPU interface instead of the
+ * physical one.
+ *
* WARN: some SoCs (EXYNOS4) use a modified GIC which doesn't have any
* banked CPU interface, so we should map per-CPU physical addresses
* here.
* As for now, none of them seem to have virtualization extensions.
*/
- gicv_region.virt_start = (unsigned long)gicc_base;
- gicv_region.size = gicc_size;
- gicv_region.flags = JAILHOUSE_MEM_IO | JAILHOUSE_MEM_READ
- | JAILHOUSE_MEM_WRITE;
-
- /*
- * Let the guest access the virtual CPU interface instead of the
- * physical one
- */
- return arch_map_memory_region(cell, &gicv_region);
+ return paging_create(&cell->arch.mm, (unsigned long)gicv_base,
+ gicc_size, (unsigned long)gicc_base,
+ (PTE_FLAG_VALID | PTE_ACCESS_FLAG |
+ S2_PTE_ACCESS_RW | S2_PTE_FLAG_DEVICE),
+ PAGING_NON_COHERENT);
}

static void gic_cell_exit(struct cell *cell)
--
2.1.4

Jan Kiszka

unread,
Jul 14, 2015, 2:08:47 AM7/14/15
to jailho...@googlegroups.com, Antonios Motakis, Claudio Fontana
Fatal errors that will leave CPUs unusable and may occur in parallel on
multiple CPUs should be reported via panic_printk to maintain
readability of the output. Adjust some locations for unexpected HYP
exits and failing PSCI_CPU_OFF.

Signed-off-by: Jan Kiszka <jan.k...@siemens.com>
---
hypervisor/arch/arm/control.c | 6 +++---
1 file changed, 3 insertions(+), 3 deletions(-)

diff --git a/hypervisor/arch/arm/control.c b/hypervisor/arch/arm/control.c
index a077736..6db514f 100644
--- a/hypervisor/arch/arm/control.c
+++ b/hypervisor/arch/arm/control.c
@@ -122,7 +122,7 @@ void arch_reset_self(struct per_cpu *cpu_data)

smc(PSCI_CPU_OFF, 0, 0, 0);
smc(PSCI_CPU_OFF_V0_1_UBOOT, 0, 0, 0);
- printk("FATAL: PSCI_CPU_OFF failed\n");
+ panic_printk("FATAL: PSCI_CPU_OFF failed\n");
panic_stop();
}
#endif
@@ -168,7 +168,7 @@ static void arch_dump_exit(const char *reason)
unsigned long pc;

arm_read_banked_reg(ELR_hyp, pc);
- printk("Unhandled HYP %s exit at 0x%x\n", reason, pc);
+ panic_printk("Unhandled HYP %s exit at 0x%x\n", reason, pc);
}

static void arch_dump_abt(bool is_data)
@@ -182,7 +182,7 @@ static void arch_dump_abt(bool is_data)
else
arm_read_sysreg(HIFAR, hxfar);

- printk(" paddr=0x%lx esr=0x%x\n", hxfar, esr);
+ panic_printk(" paddr=0x%lx esr=0x%x\n", hxfar, esr);
}

struct registers* arch_handle_exit(struct per_cpu *cpu_data,
--
2.1.4

Jan Kiszka

unread,
Jul 14, 2015, 2:08:48 AM7/14/15
to jailho...@googlegroups.com, Antonios Motakis, Claudio Fontana
Add a simple register dump and unify the reporting format. Specifically
useful to debug hypervisor crashes.

Signed-off-by: Jan Kiszka <jan.k...@siemens.com>
---
hypervisor/arch/arm/control.c | 21 +++++++++++++--------
1 file changed, 13 insertions(+), 8 deletions(-)

diff --git a/hypervisor/arch/arm/control.c b/hypervisor/arch/arm/control.c
index 6db514f..1c17c31 100644
--- a/hypervisor/arch/arm/control.c
+++ b/hypervisor/arch/arm/control.c
@@ -163,12 +163,17 @@ static void arch_suspend_self(struct per_cpu *cpu_data)
arch_cpu_tlb_flush(cpu_data);
}

-static void arch_dump_exit(const char *reason)
+static void arch_dump_exit(struct registers *regs, const char *reason)
{
unsigned long pc;
+ unsigned int n;

arm_read_banked_reg(ELR_hyp, pc);
panic_printk("Unhandled HYP %s exit at 0x%x\n", reason, pc);
+ for (n = 0; n < NUM_USR_REGS; n++)
+ panic_printk("r%d:%s 0x%08lx%s", n, n < 10 ? " " : "",
+ regs->usr[n], n % 4 == 3 ? "\n" : " ");
+ panic_printk("\n");
}

static void arch_dump_abt(bool is_data)
@@ -182,7 +187,7 @@ static void arch_dump_abt(bool is_data)
else
arm_read_sysreg(HIFAR, hxfar);

- panic_printk(" paddr=0x%lx esr=0x%x\n", hxfar, esr);
+ panic_printk("Physical address: 0x%08lx ESR: 0x%08x\n", hxfar, esr);
}

struct registers* arch_handle_exit(struct per_cpu *cpu_data,
@@ -199,24 +204,24 @@ struct registers* arch_handle_exit(struct per_cpu *cpu_data,
break;

case EXIT_REASON_UNDEF:
- arch_dump_exit("undef");
+ arch_dump_exit(regs, "undef");
panic_stop();
case EXIT_REASON_DABT:
- arch_dump_exit("data abort");
+ arch_dump_exit(regs, "data abort");
arch_dump_abt(true);
panic_stop();
case EXIT_REASON_PABT:
- arch_dump_exit("prefetch abort");
+ arch_dump_exit(regs, "prefetch abort");
arch_dump_abt(false);
panic_stop();
case EXIT_REASON_HVC:
- arch_dump_exit("hvc");
+ arch_dump_exit(regs, "hvc");
panic_stop();
case EXIT_REASON_FIQ:
- arch_dump_exit("fiq");
+ arch_dump_exit(regs, "fiq");
panic_stop();
default:
- arch_dump_exit("unknown");
+ arch_dump_exit(regs, "unknown");
panic_stop();
}

--
2.1.4

Reply all
Reply to author
Forward
0 new messages