From: Jan Kiszka <
jan.k...@siemens.com>
So far, any error returned by a function called by
pvu_iommu_config_commit was ignored, only reported to the console. That
would have resulted in an inconsistent configuration being run. Also,
pvu_tlb_alloc and pvu_tlb_chain didn't even report an errors at all, and
the former also returned an incorrect code then.
We rather need to panic-stop the system in case some configuration bit
cannot be programmed because there is no way to roll back from a
config_commit by design.
Signed-off-by: Jan Kiszka <
jan.k...@siemens.com>
---
I wonder if there isn't a way - provided it's not too complex - to build
up the programming during cell_init/exit, validate it at that chance,
cache it, and only apply that state on config_commit. Would mean less
panic.
I also wonder when that error pvu_tlb_chain can actually happen, or if
the check was just defensive programming. Maybe you also have a better
error text for it.
hypervisor/arch/arm64/include/asm/ti-pvu.h | 2 +-
hypervisor/arch/arm64/ti-pvu.c | 65 +++++++++++++-----------------
2 files changed, 30 insertions(+), 37 deletions(-)
diff --git a/hypervisor/arch/arm64/include/asm/ti-pvu.h b/hypervisor/arch/arm64/include/asm/ti-pvu.h
index 2c340b3a..466b5b3f 100644
--- a/hypervisor/arch/arm64/include/asm/ti-pvu.h
+++ b/hypervisor/arch/arm64/include/asm/ti-pvu.h
@@ -125,6 +125,6 @@ int pvu_iommu_map_memory(struct cell *cell,
int pvu_iommu_unmap_memory(struct cell *cell,
const struct jailhouse_memory *mem);
-int pvu_iommu_config_commit(struct cell *cell);
+void pvu_iommu_config_commit(struct cell *cell_added_removed);
#endif /* _IOMMMU_PVU_H_ */
diff --git a/hypervisor/arch/arm64/ti-pvu.c b/hypervisor/arch/arm64/ti-pvu.c
index b06ba843..91d39848 100644
--- a/hypervisor/arch/arm64/ti-pvu.c
+++ b/hypervisor/arch/arm64/ti-pvu.c
@@ -15,7 +15,7 @@
* There are limitations on the number of available contexts, page sizes,
* number of pages that can be mapped, etc.
*
- * PVU is desgined to be programmed with all the memory mapping at once.
+ * PVU is designed to be programmed with all the memory mapping at once.
* Therefore, it defers the actual register programming till config_commit.
* Also, it does not support unmapping of the pages at runtime.
*
@@ -83,16 +83,18 @@ static u32 pvu_tlb_is_enabled(struct pvu_dev *dev, u16 tlbnum)
return 0;
}
-static int pvu_tlb_chain(struct pvu_dev *dev, u16 tlbnum, u16 tlb_next)
+static void pvu_tlb_chain(struct pvu_dev *dev, u16 tlbnum, u16 tlb_next)
{
struct pvu_hw_tlb *tlb;
- if (tlb_next <= tlbnum || tlb_next <= dev->max_virtid)
- return -EINVAL;
+ if (tlb_next <= tlbnum || tlb_next <= dev->max_virtid) {
+ panic_printk("ERROR: PVU: Invalid TLB index %d for chaining\n",
+ tlb_next);
+ panic_stop();
+ }
tlb = (struct pvu_hw_tlb *)dev->tlb_base + tlbnum;
mmio_write32_field(&tlb->chain, PVU_TLB_CHAIN_MASK, tlb_next);
- return 0;
}
static u32 pvu_tlb_next(struct pvu_dev *dev, u16 tlbnum)
@@ -113,7 +115,8 @@ static u32 pvu_tlb_alloc(struct pvu_dev *dev, u16 virtid)
return i;
}
}
- return 0;
+ panic_printk("ERROR: PVU: Not enough TLB entries\n");
+ panic_stop();
}
static void pvu_tlb_flush(struct pvu_dev *dev, u16 tlbnum)
@@ -159,8 +162,8 @@ static void pvu_entry_enable(struct pvu_dev *dev, u16 tlbnum, u8 index)
dev->tlb_data[tlbnum] |= (1 << index);
}
-static int pvu_entry_write(struct pvu_dev *dev, u16 tlbnum, u8 index,
- struct pvu_tlb_entry *ent)
+static void pvu_entry_write(struct pvu_dev *dev, u16 tlbnum, u8 index,
+ struct pvu_tlb_entry *ent)
{
struct pvu_hw_tlb_entry *entry;
struct pvu_hw_tlb *tlb;
@@ -175,16 +178,16 @@ static int pvu_entry_write(struct pvu_dev *dev, u16 tlbnum, u8 index,
}
if (pgsz >= ARRAY_SIZE(pvu_page_size_bytes)) {
- printk("ERROR: PVU: %s: Unsupported page size %llx\n",
- __func__, ent->size);
- return -EINVAL;
+ panic_printk("ERROR: PVU: Unsupported page size %llx\n",
+ ent->size);
+ panic_stop();
}
if (!is_aligned(ent->virt_addr, ent->size) ||
!is_aligned(ent->phys_addr, ent->size)) {
- printk("ERROR: PVU: %s: Address %llx => %llx is not aligned with size %llx\n",
- __func__, ent->virt_addr, ent->phys_addr, ent->size);
- return -EINVAL;
+ panic_printk("ERROR: PVU: Address %llx => %llx is not aligned with size %llx\n",
+ ent->virt_addr, ent->phys_addr, ent->size);
+ panic_stop();
}
mmio_write32(&entry->reg0, ent->virt_addr & 0xffffffff);
@@ -200,7 +203,6 @@ static int pvu_entry_write(struct pvu_dev *dev, u16 tlbnum, u8 index,
/* Do we need "DSB NSH" here to make sure all writes are finised? */
pvu_entry_enable(dev, tlbnum, index);
- return 0;
}
static u32 pvu_init_device(struct pvu_dev *dev, u16 max_virtid)
@@ -328,17 +330,17 @@ static void pvu_entrylist_sort(struct pvu_tlb_entry *entlist, u32 num_entries)
}
}
-static int pvu_iommu_program_entries(struct cell *cell, u8 virtid)
+static void pvu_iommu_program_entries(struct cell *cell, u8 virtid)
{
unsigned int inst, i, tlbnum, idx, ent_count;
struct pvu_tlb_entry *ent, *cell_entries;
struct pvu_dev *dev;
- int ret, tlb_next;
+ int tlb_next;
cell_entries = cell->arch.iommu_pvu.entries;
ent_count = cell->arch.iommu_pvu.ent_count;
if (ent_count == 0 || cell_entries == NULL)
- return 0;
+ return;
/* Program same memory mapping for all of the instances */
for (inst = 0; inst < pvu_count; inst++) {
@@ -356,20 +358,15 @@ static int pvu_iommu_program_entries(struct cell *cell, u8 virtid)
if (idx == 0 && i >= dev->num_entries) {
/* Find next available TLB and chain to it */
tlb_next = pvu_tlb_alloc(dev, virtid);
- if (tlb_next < 0)
- return -ENOMEM;
pvu_tlb_chain(dev, tlbnum, tlb_next);
pvu_tlb_enable(dev, tlbnum);
tlbnum = tlb_next;
}
- ret = pvu_entry_write(dev, tlbnum, idx, ent);
- if (ret)
- return ret;
+ pvu_entry_write(dev, tlbnum, idx, ent);
}
pvu_tlb_enable(dev, tlbnum);
}
- return 0;
}
/*
@@ -434,13 +431,12 @@ int pvu_iommu_unmap_memory(struct cell *cell,
return 0;
}
-int pvu_iommu_config_commit(struct cell *cell)
+void pvu_iommu_config_commit(struct cell *cell_added_removed)
{
unsigned int i, virtid;
- int ret = 0;
- if (pvu_count == 0 || !cell)
- return 0;
+ if (pvu_count == 0 || !cell_added_removed)
+ return;
/*
* Chaining the TLB entries adds extra latency to translate those
@@ -448,20 +444,17 @@ int pvu_iommu_config_commit(struct cell *cell)
* Sort the entries in descending order of page sizes to reduce effects
* of chaining and thus reducing average translation latency
*/
- pvu_entrylist_sort(cell->arch.iommu_pvu.entries,
- cell->arch.iommu_pvu.ent_count);
+ pvu_entrylist_sort(cell_added_removed->arch.iommu_pvu.entries,
+ cell_added_removed->arch.iommu_pvu.ent_count);
- for_each_stream_id(virtid, cell->config, i) {
+ for_each_stream_id(virtid, cell_added_removed->config, i) {
if (virtid > MAX_VIRTID)
continue;
- ret = pvu_iommu_program_entries(cell, virtid);
- if (ret)
- return ret;
+ pvu_iommu_program_entries(cell_added_removed, virtid);
}
- cell->arch.iommu_pvu.ent_count = 0;
- return ret;
+ cell_added_removed->arch.iommu_pvu.ent_count = 0;
}
static int pvu_iommu_cell_init(struct cell *cell)
--
2.16.4