This aligns them with our (kernel) coding style: indent multi-line asm
blocks, end each line with \n\t in multi-line blocks, remove the ending
in single-line statements. No functional changes.
hypervisor/arch/arm/include/asm/bitops.h | 50 +++++++++---------
hypervisor/arch/arm/include/asm/processor.h | 12 ++---
hypervisor/arch/arm/include/asm/setup.h | 15 +++---
hypervisor/arch/arm/include/asm/setup_mmu.h | 79 +++++++++++++++--------------
hypervisor/arch/arm/include/asm/spinlock.h | 29 ++++++-----
hypervisor/arch/arm/mmu_hyp.c | 2 +-
hypervisor/arch/arm/setup.c | 27 +++++-----
7 files changed, 111 insertions(+), 103 deletions(-)
diff --git a/hypervisor/arch/arm/include/asm/bitops.h b/hypervisor/arch/arm/include/asm/bitops.h
index 41e5ef1..bf1bfdf 100644
--- a/hypervisor/arch/arm/include/asm/bitops.h
+++ b/hypervisor/arch/arm/include/asm/bitops.h
@@ -26,8 +26,8 @@
/* Load the cacheline in exclusive state */
#define PRELOAD(addr) \
- asm volatile (".arch_extension mp\n" \
- "pldw %0\n" \
+ asm volatile (".arch_extension mp\n\t" \
+ "pldw %0\n\t" \
: "+Qo" (*(volatile unsigned long *)addr));
static inline __attribute__((always_inline)) void
@@ -40,13 +40,13 @@ clear_bit(int nr, volatile unsigned long *addr)
PRELOAD(addr);
do {
asm volatile (
- "ldrex %1, %2\n"
- "bic %1, %3\n"
- "strex %0, %1, %2\n"
- : "=r" (ret), "=r" (val),
- /* Declare the clobbering of this address to the compiler */
- "+Qo" (*(volatile unsigned long *)addr)
- : "r" (1 << nr));
+ "ldrex %1, %2\n\t"
+ "bic %1, %3\n\t"
+ "strex %0, %1, %2\n\t"
+ : "=r" (ret), "=r" (val),
+ /* declare clobbering of this address to the compiler */
+ "+Qo" (*(volatile unsigned long *)addr)
+ : "r" (1 << nr));
} while (ret);
}
@@ -60,12 +60,12 @@ set_bit(unsigned int nr, volatile unsigned long *addr)
PRELOAD(addr);
do {
asm volatile (
- "ldrex %1, %2\n"
- "orr %1, %3\n"
- "strex %0, %1, %2\n"
- : "=r" (ret), "=r" (val),
- "+Qo" (*(volatile unsigned long *)addr)
- : "r" (1 << nr));
+ "ldrex %1, %2\n\t"
+ "orr %1, %3\n\t"
+ "strex %0, %1, %2\n\t"
+ : "=r" (ret), "=r" (val),
+ "+Qo" (*(volatile unsigned long *)addr)
+ : "r" (1 << nr));
} while (ret);
}
@@ -85,14 +85,14 @@ static inline int test_and_set_bit(int nr, volatile unsigned long *addr)
PRELOAD(addr);
do {
asm volatile (
- "ldrex %1, %3\n"
- "ands %2, %1, %4\n"
- "it eq\n"
- "orreq %1, %4\n"
- "strex %0, %1, %3\n"
- : "=r" (ret), "=r" (val), "=r" (test),
- "+Qo" (*(volatile unsigned long *)addr)
- : "r" (1 << nr));
+ "ldrex %1, %3\n\t"
+ "ands %2, %1, %4\n\t"
+ "it eq\n\t"
+ "orreq %1, %4\n\t"
+ "strex %0, %1, %3\n\t"
+ : "=r" (ret), "=r" (val), "=r" (test),
+ "+Qo" (*(volatile unsigned long *)addr)
+ : "r" (1 << nr));
} while (ret);
return !!(test);
@@ -103,7 +103,7 @@ static inline int test_and_set_bit(int nr, volatile unsigned long *addr)
static inline unsigned long clz(unsigned long word)
{
unsigned long val;
- asm volatile ("clz %0, %1\n" : "=r" (val) : "r" (word));
+ asm volatile ("clz %0, %1" : "=r" (val) : "r" (word));
return val;
}
@@ -112,7 +112,7 @@ static inline unsigned long ffsl(unsigned long word)
{
if (!word)
return 0;
- asm volatile ("rbit %0, %0\n" : "+r" (word));
+ asm volatile ("rbit %0, %0" : "+r" (word));
return clz(word);
}
diff --git a/hypervisor/arch/arm/include/asm/processor.h b/hypervisor/arch/arm/include/asm/processor.h
index c4fbb9e..c6144a7 100644
--- a/hypervisor/arch/arm/include/asm/processor.h
+++ b/hypervisor/arch/arm/include/asm/processor.h
@@ -163,13 +163,13 @@ struct registers {
unsigned long usr[NUM_USR_REGS];
};
-#define dmb(domain) asm volatile("dmb " #domain "\n" ::: "memory")
-#define dsb(domain) asm volatile("dsb " #domain "\n" ::: "memory")
-#define isb() asm volatile("isb\n")
+#define dmb(domain) asm volatile("dmb " #domain ::: "memory")
+#define dsb(domain) asm volatile("dsb " #domain ::: "memory")
+#define isb() asm volatile("isb")
-#define wfe() asm volatile("wfe\n")
-#define wfi() asm volatile("wfi\n")
-#define sev() asm volatile("sev\n")
+#define wfe() asm volatile("wfe")
+#define wfi() asm volatile("wfi")
+#define sev() asm volatile("sev")
unsigned int smc(unsigned int r0, ...);
unsigned int hvc(unsigned int r0, ...);
diff --git a/hypervisor/arch/arm/include/asm/setup.h b/hypervisor/arch/arm/include/asm/setup.h
index dacbe17..69d913a 100644
--- a/hypervisor/arch/arm/include/asm/setup.h
+++ b/hypervisor/arch/arm/include/asm/setup.h
@@ -26,13 +26,14 @@ cpu_prepare_return_el1(struct per_cpu *cpu_data, int return_code)
cpu_data->linux_reg[0] = return_code;
asm volatile (
- "msr sp_svc, %0\n"
- "msr elr_hyp, %1\n"
- "msr spsr_hyp, %2\n"
- :
- : "r" (cpu_data->linux_sp + (NUM_ENTRY_REGS * sizeof(unsigned long))),
- "r" (cpu_data->linux_ret),
- "r" (cpu_data->linux_flags));
+ "msr sp_svc, %0\n\t"
+ "msr elr_hyp, %1\n\t"
+ "msr spsr_hyp, %2\n\t"
+ :
+ : "r" (cpu_data->linux_sp +
+ (NUM_ENTRY_REGS * sizeof(unsigned long))),
+ "r" (cpu_data->linux_ret),
+ "r" (cpu_data->linux_flags));
}
int switch_exception_level(struct per_cpu *cpu_data);
diff --git a/hypervisor/arch/arm/include/asm/setup_mmu.h b/hypervisor/arch/arm/include/asm/setup_mmu.h
index 7b6e8bb..f1a7b86 100644
--- a/hypervisor/arch/arm/include/asm/setup_mmu.h
+++ b/hypervisor/arch/arm/include/asm/setup_mmu.h
@@ -26,32 +26,35 @@ static void __attribute__((naked)) __attribute__((noinline))
cpu_switch_el2(unsigned long phys_bootstrap, virt2phys_t virt2phys)
{
asm volatile(
- /*
- * The linux hyp stub allows to install the vectors with a single hvc.
- * The vector base address is in r0 (phys_bootstrap).
- */
- "hvc #0\n"
+ /*
+ * The linux hyp stub allows to install the vectors with a
+ * single hvc. The vector base address is in r0
+ * (phys_bootstrap).
+ */
+ "hvc #0\n\t"
- /*
- * Now that the bootstrap vectors are installed, call setup_el2 with
- * the translated physical values of lr and sp as arguments
- */
- "mov r0, sp\n"
- "push {lr}\n"
- "blx %0\n"
- "pop {lr}\n"
- "push {r0}\n"
- "mov r0, lr\n"
- "blx %0\n"
- "pop {r1}\n"
- "hvc #0\n"
- :
- : "r" (virt2phys)
- /*
- * The call to virt2phys may clobber all temp registers. This list
- * ensures that the compiler uses a decent register for hvirt2phys.
- */
- : "cc", "memory", "r0", "r1", "r2", "r3");
+ /*
+ * Now that the bootstrap vectors are installed, call setup_el2
+ * with the translated physical values of lr and sp as
+ * arguments.
+ */
+ "mov r0, sp\n\t"
+ "push {lr}\n\t"
+ "blx %0\n\t"
+ "pop {lr}\n\t"
+ "push {r0}\n\t"
+ "mov r0, lr\n\t"
+ "blx %0\n\t"
+ "pop {r1}\n\t"
+ "hvc #0\n\t"
+ :
+ : "r" (virt2phys)
+ /*
+ * The call to virt2phys may clobber all temp registers. This
+ * list ensures that the compiler uses a decent register for
+ * hvirt2phys.
+ */
+ : "cc", "memory", "r0", "r1", "r2", "r3");
}
static inline void __attribute__((always_inline))
@@ -59,19 +62,19 @@ cpu_switch_phys2virt(phys2virt_t phys2virt)
{
/* phys2virt is allowed to touch the stack */
asm volatile(
- "mov r0, lr\n"
- "blx %0\n"
- /* Save virt_lr */
- "push {r0}\n"
- /* Translate phys_sp */
- "mov r0, sp\n"
- "blx %0\n"
- /* Jump back to virtual addresses */
- "mov sp, r0\n"
- "pop {pc}\n"
- :
- : "r" (phys2virt)
- : "cc", "r0", "r1", "r2", "r3", "lr", "sp");
+ "mov r0, lr\n\t"
+ "blx %0\n\t"
+ /* Save virt_lr */
+ "push {r0}\n\t"
+ /* Translate phys_sp */
+ "mov r0, sp\n\t"
+ "blx %0\n\t"
+ /* Jump back to virtual addresses */
+ "mov sp, r0\n\t"
+ "pop {pc}\n\t"
+ :
+ : "r" (phys2virt)
+ : "cc", "r0", "r1", "r2", "r3", "lr", "sp");
}
#endif /* !__ASSEMBLY__ */
diff --git a/hypervisor/arch/arm/include/asm/spinlock.h b/hypervisor/arch/arm/include/asm/spinlock.h
index 5e32a59..bee4e40 100644
--- a/hypervisor/arch/arm/include/asm/spinlock.h
+++ b/hypervisor/arch/arm/include/asm/spinlock.h
@@ -40,23 +40,24 @@ static inline void spin_lock(spinlock_t *lock)
/* Take the lock by updating the high part atomically */
asm volatile (
-" .arch_extension mp\n"
-" pldw [%3]\n"
-"1: ldrex %0, [%3]\n"
-" add %1, %0, %4\n"
-" strex %2, %1, [%3]\n"
-" teq %2, #0\n"
-" bne 1b"
- : "=&r" (lockval), "=&r" (newval), "=&r" (tmp)
- : "r" (&lock->slock), "I" (1 << TICKET_SHIFT)
- : "cc");
+ ".arch_extension mp\n\t"
+ "pldw [%3]\n\t"
+ "1:\n\t"
+ "ldrex %0, [%3]\n\t"
+ "add %1, %0, %4\n\t"
+ "strex %2, %1, [%3]\n\t"
+ "teq %2, #0\n\t"
+ "bne 1b\n\t"
+ : "=&r" (lockval), "=&r" (newval), "=&r" (tmp)
+ : "r" (&lock->slock), "I" (1 << TICKET_SHIFT)
+ : "cc");
while (lockval.tickets.next != lockval.tickets.owner)
asm volatile (
- "wfe\n"
- "ldrh %0, [%1]\n"
- : "=r" (lockval.tickets.owner)
- : "r" (&lock->tickets.owner));
+ "wfe\n\t"
+ "ldrh %0, [%1]\n\t"
+ : "=r" (lockval.tickets.owner)
+ : "r" (&lock->tickets.owner));
/* Ensure we have the lock before doing any more memory ops */
dmb(ish);
diff --git a/hypervisor/arch/arm/mmu_hyp.c b/hypervisor/arch/arm/mmu_hyp.c
index e9e0dc4..6aece9c 100644
--- a/hypervisor/arch/arm/mmu_hyp.c
+++ b/hypervisor/arch/arm/mmu_hyp.c
@@ -152,7 +152,7 @@ setup_mmu_el2(struct per_cpu *cpu_data, phys2virt_t phys2virt, u64 ttbr)
cpu_switch_phys2virt(phys2virt);
/* Not reached (cannot be a while(1), it confuses the compiler) */
- asm volatile("b .\n");
+ asm volatile("b .");
}
/*
diff --git a/hypervisor/arch/arm/setup.c b/hypervisor/arch/arm/setup.c
index 364ade8..f437c5d 100644
--- a/hypervisor/arch/arm/setup.c
+++ b/hypervisor/arch/arm/setup.c
@@ -114,18 +114,21 @@ void __attribute__((noreturn)) arch_cpu_activate_vmm(struct per_cpu *cpu_data)
cpu_prepare_return_el1(cpu_data, 0);
asm volatile(
- /* Reset the hypervisor stack */
- "mov sp, %0\n"
- /*
- * We don't care about clobbering the other registers from now on. Must
- * be in sync with arch_entry.
- */
- "ldm %1, {r0 - r12}\n"
- /* After this, the kernel won't be able to access the hypervisor code */
- "eret\n"
- :
- : "r" (cpu_data->stack + PERCPU_STACK_END),
- "r" (cpu_data->linux_reg));
+ /* Reset the hypervisor stack */
+ "mov sp, %0\n\t"
+ /*
+ * We don't care about clobbering the other registers from now
+ * on. Must be in sync with arch_entry.
+ */
+ "ldm %1, {r0 - r12}\n\t"
+ /*
+ * After this, the kernel won't be able to access the hypervisor
+ * code.
+ */
+ "eret\n\t"
+ :
+ : "r" (cpu_data->stack + PERCPU_STACK_END),
+ "r" (cpu_data->linux_reg));
__builtin_unreachable();
}
--
2.1.4