Current PVU iommu implementation ignores possible failures in the
config_commit part. This would allow inconsistent configuration
to run and may introduce unknown bugs.
Solve this by making sure that the pvu_iommu_config_commit never
fails. Catch the errors early in the mapping phase. Use
"free_tlb_count" to track available no of TLBs for chaining.
This can be used to check if any mapping causes it to potentially
use more no of TLBs than that are free. This will ensure that
the allocationg for chaining will not fail.
Change the return type to void and remove the error handling in
the config_commit path.
Signed-off-by: Nikhil Devshatwar <
nikh...@ti.com>
---
hypervisor/arch/arm64/include/asm/ti-pvu.h | 3 +-
hypervisor/arch/arm64/ti-pvu.c | 54 +++++++++++++---------
2 files changed, 34 insertions(+), 23 deletions(-)
diff --git a/hypervisor/arch/arm64/include/asm/ti-pvu.h b/hypervisor/arch/arm64/include/asm/ti-pvu.h
index 2c340b3a..62aec7c0 100644
--- a/hypervisor/arch/arm64/include/asm/ti-pvu.h
+++ b/hypervisor/arch/arm64/include/asm/ti-pvu.h
@@ -117,6 +117,7 @@ struct pvu_dev {
u16 max_virtid;
u16 tlb_data[PVU_NUM_TLBS];
+ u16 free_tlb_count;
};
int pvu_iommu_map_memory(struct cell *cell,
@@ -125,6 +126,6 @@ int pvu_iommu_map_memory(struct cell *cell,
int pvu_iommu_unmap_memory(struct cell *cell,
const struct jailhouse_memory *mem);
-int pvu_iommu_config_commit(struct cell *cell);
+void pvu_iommu_config_commit(struct cell *cell);
#endif /* _IOMMMU_PVU_H_ */
diff --git a/hypervisor/arch/arm64/ti-pvu.c b/hypervisor/arch/arm64/ti-pvu.c
index 3b9a29ec..d96d01c9 100644
--- a/hypervisor/arch/arm64/ti-pvu.c
+++ b/hypervisor/arch/arm64/ti-pvu.c
@@ -15,7 +15,7 @@
* There are limitations on the number of available contexts, page sizes,
* number of pages that can be mapped, etc.
*
- * PVU is desgined to be programmed with all the memory mapping at once.
+ * PVU is designed to be programmed with all the memory mapping at once.
* Therefore, it defers the actual register programming till config_commit.
* Also, it does not support unmapping of the pages at runtime.
*
@@ -110,6 +110,7 @@ static u32 pvu_tlb_alloc(struct pvu_dev *dev, u16 virtid)
for (i = dev->max_virtid + 1; i < dev->num_tlbs; i++) {
if (dev->tlb_data[i] == 0) {
dev->tlb_data[i] = virtid << dev->num_entries;
+ dev->free_tlb_count--;
return i;
}
}
@@ -138,10 +139,13 @@ static void pvu_tlb_flush(struct pvu_dev *dev, u16 tlbnum)
mmio_write32(&tlb->chain, 0x0);
- if (i < dev->max_virtid)
+ if (i < dev->max_virtid) {
dev->tlb_data[tlbnum] = 0x0 | i << dev->num_entries;
- else
+ } else {
+ /* This was a chained TLB */
dev->tlb_data[tlbnum] = 0x0;
+ dev->free_tlb_count++;
+ }
}
@@ -198,7 +202,7 @@ static int pvu_entry_write(struct pvu_dev *dev, u16 tlbnum, u8 index,
mmio_write32_field(&entry->reg2, PVU_TLB_ENTRY_PGSIZE_MASK, pgsz);
mmio_write32_field(&entry->reg2, PVU_TLB_ENTRY_FLAG_MASK, ent->flags);
- /* Do we need "DSB NSH" here to make sure all writes are finised? */
+ /* Do we need "DSB NSH" here to make sure all writes are finished? */
pvu_entry_enable(dev, tlbnum, index);
return 0;
}
@@ -221,6 +225,8 @@ static u32 pvu_init_device(struct pvu_dev *dev, u16 max_virtid)
}
dev->max_virtid = max_virtid;
+ dev->free_tlb_count = dev->num_tlbs - (max_virtid + 1);
+
mmio_write32(&cfg->virtid_map1, 0);
mmio_write32_field(&cfg->virtid_map2, PVU_MAX_VIRTID_MASK, max_virtid);
@@ -328,17 +334,17 @@ static void pvu_entrylist_sort(struct pvu_tlb_entry *entlist, u32 num_entries)
}
}
-static int pvu_iommu_program_entries(struct cell *cell, u8 virtid)
+static void pvu_iommu_program_entries(struct cell *cell, u8 virtid)
{
unsigned int inst, i, tlbnum, idx, ent_count;
struct pvu_tlb_entry *ent, *cell_entries;
struct pvu_dev *dev;
- int ret, tlb_next;
+ int tlb_next;
cell_entries = cell->arch.iommu_pvu.entries;
ent_count = cell->arch.iommu_pvu.ent_count;
if (ent_count == 0 || cell_entries == NULL)
- return 0;
+ return;
/* Program same memory mapping for all of the instances */
for (inst = 0; inst < pvu_count; inst++) {
@@ -356,20 +362,15 @@ static int pvu_iommu_program_entries(struct cell *cell, u8 virtid)
if (idx == 0 && i >= dev->num_entries) {
/* Find next available TLB and chain to it */
tlb_next = pvu_tlb_alloc(dev, virtid);
- if (tlb_next < 0)
- return -ENOMEM;
pvu_tlb_chain(dev, tlbnum, tlb_next);
pvu_tlb_enable(dev, tlbnum);
tlbnum = tlb_next;
}
- ret = pvu_entry_write(dev, tlbnum, idx, ent);
- if (ret)
- return ret;
+ pvu_entry_write(dev, tlbnum, idx, ent);
}
pvu_tlb_enable(dev, tlbnum);
}
- return 0;
}
/*
@@ -380,8 +381,9 @@ int pvu_iommu_map_memory(struct cell *cell,
const struct jailhouse_memory *mem)
{
struct pvu_tlb_entry *ent;
+ struct pvu_dev *dev;
unsigned int size;
- u32 flags = 0;
+ u32 tlb_count, flags = 0;
int ret;
if (pvu_count == 0 || (mem->flags & JAILHOUSE_MEM_DMA) == 0)
@@ -408,7 +410,19 @@ int pvu_iommu_map_memory(struct cell *cell,
if (ret < 0)
return ret;
- cell->arch.iommu_pvu.ent_count += ret;
+ /*
+ * Check if there are enough TLBs left for *chaining* to ensure that
+ * pvu_tlb_alloc called from config_commit never fails
+ */
+ tlb_count = (cell->arch.iommu_pvu.ent_count + ret - 1) / 8;
+ dev = &pvu_units[0];
+
+ if (tlb_count > dev->free_tlb_count) {
+ printk("ERROR: PVU: Mapping this memory needs more TLBs than that are available\n");
+ return -EINVAL;
+ } else {
+ cell->arch.iommu_pvu.ent_count += ret;
+ }
return 0;
}
@@ -434,13 +448,12 @@ int pvu_iommu_unmap_memory(struct cell *cell,
return 0;
}
-int pvu_iommu_config_commit(struct cell *cell)
+void pvu_iommu_config_commit(struct cell *cell)
{
unsigned int i, virtid;
- int ret = 0;
if (pvu_count == 0 || !cell)
- return 0;
+ return;
/*
* Chaining the TLB entries adds extra latency to translate those
@@ -455,13 +468,10 @@ int pvu_iommu_config_commit(struct cell *cell)
if (virtid > MAX_VIRTID)
continue;
- ret = pvu_iommu_program_entries(cell, virtid);
- if (ret)
- return ret;
+ pvu_iommu_program_entries(cell, virtid);
}
cell->arch.iommu_pvu.ent_count = 0;
- return ret;
}
static int pvu_iommu_cell_init(struct cell *cell)
--
2.17.1