[PATCH] arm64: pvu: Avoid failure in config_commit

9 views
Skip to first unread message

Nikhil Devshatwar

unread,
Oct 26, 2020, 3:53:04 PM10/26/20
to jailho...@googlegroups.com, Jan Kiszka
Current PVU iommu implementation ignores possible failures in the
config_commit part. This would allow inconsistent configuration
to run and may introduce unknown bugs.

Solve this by making sure that the pvu_iommu_config_commit never
fails. Catch the errors early in the mapping phase. Use
"free_tlb_count" to track available no of TLBs for chaining.
This can be used to check if any mapping causes it to potentially
use more no of TLBs than that are free. This will ensure that
the allocationg for chaining will not fail.

Change the return type to void and remove the error handling in
the config_commit path.

Signed-off-by: Nikhil Devshatwar <nikh...@ti.com>
---
hypervisor/arch/arm64/include/asm/ti-pvu.h | 3 +-
hypervisor/arch/arm64/ti-pvu.c | 54 +++++++++++++---------
2 files changed, 34 insertions(+), 23 deletions(-)

diff --git a/hypervisor/arch/arm64/include/asm/ti-pvu.h b/hypervisor/arch/arm64/include/asm/ti-pvu.h
index 2c340b3a..62aec7c0 100644
--- a/hypervisor/arch/arm64/include/asm/ti-pvu.h
+++ b/hypervisor/arch/arm64/include/asm/ti-pvu.h
@@ -117,6 +117,7 @@ struct pvu_dev {
u16 max_virtid;

u16 tlb_data[PVU_NUM_TLBS];
+ u16 free_tlb_count;
};

int pvu_iommu_map_memory(struct cell *cell,
@@ -125,6 +126,6 @@ int pvu_iommu_map_memory(struct cell *cell,
int pvu_iommu_unmap_memory(struct cell *cell,
const struct jailhouse_memory *mem);

-int pvu_iommu_config_commit(struct cell *cell);
+void pvu_iommu_config_commit(struct cell *cell);

#endif /* _IOMMMU_PVU_H_ */
diff --git a/hypervisor/arch/arm64/ti-pvu.c b/hypervisor/arch/arm64/ti-pvu.c
index 3b9a29ec..d96d01c9 100644
--- a/hypervisor/arch/arm64/ti-pvu.c
+++ b/hypervisor/arch/arm64/ti-pvu.c
@@ -15,7 +15,7 @@
* There are limitations on the number of available contexts, page sizes,
* number of pages that can be mapped, etc.
*
- * PVU is desgined to be programmed with all the memory mapping at once.
+ * PVU is designed to be programmed with all the memory mapping at once.
* Therefore, it defers the actual register programming till config_commit.
* Also, it does not support unmapping of the pages at runtime.
*
@@ -110,6 +110,7 @@ static u32 pvu_tlb_alloc(struct pvu_dev *dev, u16 virtid)
for (i = dev->max_virtid + 1; i < dev->num_tlbs; i++) {
if (dev->tlb_data[i] == 0) {
dev->tlb_data[i] = virtid << dev->num_entries;
+ dev->free_tlb_count--;
return i;
}
}
@@ -138,10 +139,13 @@ static void pvu_tlb_flush(struct pvu_dev *dev, u16 tlbnum)

mmio_write32(&tlb->chain, 0x0);

- if (i < dev->max_virtid)
+ if (i < dev->max_virtid) {
dev->tlb_data[tlbnum] = 0x0 | i << dev->num_entries;
- else
+ } else {
+ /* This was a chained TLB */
dev->tlb_data[tlbnum] = 0x0;
+ dev->free_tlb_count++;
+ }

}

@@ -198,7 +202,7 @@ static int pvu_entry_write(struct pvu_dev *dev, u16 tlbnum, u8 index,
mmio_write32_field(&entry->reg2, PVU_TLB_ENTRY_PGSIZE_MASK, pgsz);
mmio_write32_field(&entry->reg2, PVU_TLB_ENTRY_FLAG_MASK, ent->flags);

- /* Do we need "DSB NSH" here to make sure all writes are finised? */
+ /* Do we need "DSB NSH" here to make sure all writes are finished? */
pvu_entry_enable(dev, tlbnum, index);
return 0;
}
@@ -221,6 +225,8 @@ static u32 pvu_init_device(struct pvu_dev *dev, u16 max_virtid)
}

dev->max_virtid = max_virtid;
+ dev->free_tlb_count = dev->num_tlbs - (max_virtid + 1);
+
mmio_write32(&cfg->virtid_map1, 0);
mmio_write32_field(&cfg->virtid_map2, PVU_MAX_VIRTID_MASK, max_virtid);

@@ -328,17 +334,17 @@ static void pvu_entrylist_sort(struct pvu_tlb_entry *entlist, u32 num_entries)
}
}

-static int pvu_iommu_program_entries(struct cell *cell, u8 virtid)
+static void pvu_iommu_program_entries(struct cell *cell, u8 virtid)
{
unsigned int inst, i, tlbnum, idx, ent_count;
struct pvu_tlb_entry *ent, *cell_entries;
struct pvu_dev *dev;
- int ret, tlb_next;
+ int tlb_next;

cell_entries = cell->arch.iommu_pvu.entries;
ent_count = cell->arch.iommu_pvu.ent_count;
if (ent_count == 0 || cell_entries == NULL)
- return 0;
+ return;

/* Program same memory mapping for all of the instances */
for (inst = 0; inst < pvu_count; inst++) {
@@ -356,20 +362,15 @@ static int pvu_iommu_program_entries(struct cell *cell, u8 virtid)
if (idx == 0 && i >= dev->num_entries) {
/* Find next available TLB and chain to it */
tlb_next = pvu_tlb_alloc(dev, virtid);
- if (tlb_next < 0)
- return -ENOMEM;
pvu_tlb_chain(dev, tlbnum, tlb_next);
pvu_tlb_enable(dev, tlbnum);
tlbnum = tlb_next;
}

- ret = pvu_entry_write(dev, tlbnum, idx, ent);
- if (ret)
- return ret;
+ pvu_entry_write(dev, tlbnum, idx, ent);
}
pvu_tlb_enable(dev, tlbnum);
}
- return 0;
}

/*
@@ -380,8 +381,9 @@ int pvu_iommu_map_memory(struct cell *cell,
const struct jailhouse_memory *mem)
{
struct pvu_tlb_entry *ent;
+ struct pvu_dev *dev;
unsigned int size;
- u32 flags = 0;
+ u32 tlb_count, flags = 0;
int ret;

if (pvu_count == 0 || (mem->flags & JAILHOUSE_MEM_DMA) == 0)
@@ -408,7 +410,19 @@ int pvu_iommu_map_memory(struct cell *cell,
if (ret < 0)
return ret;

- cell->arch.iommu_pvu.ent_count += ret;
+ /*
+ * Check if there are enough TLBs left for *chaining* to ensure that
+ * pvu_tlb_alloc called from config_commit never fails
+ */
+ tlb_count = (cell->arch.iommu_pvu.ent_count + ret - 1) / 8;
+ dev = &pvu_units[0];
+
+ if (tlb_count > dev->free_tlb_count) {
+ printk("ERROR: PVU: Mapping this memory needs more TLBs than that are available\n");
+ return -EINVAL;
+ } else {
+ cell->arch.iommu_pvu.ent_count += ret;
+ }
return 0;
}

@@ -434,13 +448,12 @@ int pvu_iommu_unmap_memory(struct cell *cell,
return 0;
}

-int pvu_iommu_config_commit(struct cell *cell)
+void pvu_iommu_config_commit(struct cell *cell)
{
unsigned int i, virtid;
- int ret = 0;

if (pvu_count == 0 || !cell)
- return 0;
+ return;

/*
* Chaining the TLB entries adds extra latency to translate those
@@ -455,13 +468,10 @@ int pvu_iommu_config_commit(struct cell *cell)
if (virtid > MAX_VIRTID)
continue;

- ret = pvu_iommu_program_entries(cell, virtid);
- if (ret)
- return ret;
+ pvu_iommu_program_entries(cell, virtid);
}

cell->arch.iommu_pvu.ent_count = 0;
- return ret;
}

static int pvu_iommu_cell_init(struct cell *cell)
--
2.17.1

Jan Kiszka

unread,
Oct 27, 2020, 1:33:07 PM10/27/20
to Nikhil Devshatwar, jailho...@googlegroups.com
return 0;

So we will never get here and never return 0? What prevents that is in
pvu_iommu_map_memory, right? Should be explained.

And maybe we should actually introduce a BUG() macro to crash
intentionally when reaching impossible states. Or we simply do

while (dev->tlb_data[i] != 0)
i++;

explaining why this loop is always finishing.
But what if pvu_entry_write finds an issue? Are we fine with reporting
only and then simply continueing?

Please clearify that in-place, i.e. via comments in pvu_entry_write(),
and remove ignored return values.
No need for "else" here.

> + cell->arch.iommu_pvu.ent_count += ret;
> + }
> return 0;
> }
>
> @@ -434,13 +448,12 @@ int pvu_iommu_unmap_memory(struct cell *cell,
> return 0;
> }
>
> -int pvu_iommu_config_commit(struct cell *cell)
> +void pvu_iommu_config_commit(struct cell *cell)
> {
> unsigned int i, virtid;
> - int ret = 0;
>
> if (pvu_count == 0 || !cell)
> - return 0;
> + return;
>
> /*
> * Chaining the TLB entries adds extra latency to translate those
> @@ -455,13 +468,10 @@ int pvu_iommu_config_commit(struct cell *cell)
> if (virtid > MAX_VIRTID)
> continue;
>
> - ret = pvu_iommu_program_entries(cell, virtid);
> - if (ret)
> - return ret;
> + pvu_iommu_program_entries(cell, virtid);
> }
>
> cell->arch.iommu_pvu.ent_count = 0;
> - return ret;
> }
>
> static int pvu_iommu_cell_init(struct cell *cell)
>

Jan

--
Siemens AG, T RDA IOT
Corporate Competence Center Embedded Linux

Ralf Ramsauer

unread,
Oct 27, 2020, 1:40:44 PM10/27/20
to Jan Kiszka, Nikhil Devshatwar, jailho...@googlegroups.com
If it's about to keep the compiler silent, then a
__builtin_unreachable() does both: give a hint in code that there's no
way to reach that point, and keep the no return value warning silent.

Ralf

Jan Kiszka

unread,
Oct 27, 2020, 1:48:57 PM10/27/20
to Ralf Ramsauer, Nikhil Devshatwar, jailho...@googlegroups.com
I know, though I'd rather like to NOT have the then useless limit check
written down.

Jan

Nikhil Devshatwar

unread,
Oct 27, 2020, 3:24:17 PM10/27/20
to Jan Kiszka, jailho...@googlegroups.com
okay, will add a comment
The failures in pvu_entry_write are defensive programming.
To ensure that the hardware constraints are met.
Here, the caller has ensured the constraints, so this won't fail.

I can either get rid of the checks or explain why it is okay to
ignore the return value
got it, will remove
Thanks for review
Nikhil D
>
> --
> Siemens AG, T RDA IOT
> Corporate Competence Center Embedded Linux
>
> --
> You received this message because you are subscribed to the Google Groups "Jailhouse" group.
> To unsubscribe from this group and stop receiving emails from it, send an email to jailhouse-de...@googlegroups.com.
> To view this discussion on the web visit https://groups.google.com/d/msgid/jailhouse-dev/a282f746-9eb7-eb5e-7c45-a45e795a74c3%40siemens.com.

Jan Kiszka

unread,
Oct 28, 2020, 7:30:05 AM10/28/20
to Nikhil Devshatwar, jailho...@googlegroups.com
On 27.10.20 20:24, 'Nikhil Devshatwar' via Jailhouse wrote:
> On 18:33-20201027, Jan Kiszka wrote:
>> On 26.10.20 20:52, 'Nikhil Devshatwar' via Jailhouse wrote:
>>> @@ -356,20 +362,15 @@ static int pvu_iommu_program_entries(struct cell *cell, u8 virtid)
>>> if (idx == 0 && i >= dev->num_entries) {
>>> /* Find next available TLB and chain to it */
>>> tlb_next = pvu_tlb_alloc(dev, virtid);
>>> - if (tlb_next < 0)
>>> - return -ENOMEM;
>>> pvu_tlb_chain(dev, tlbnum, tlb_next);
>>> pvu_tlb_enable(dev, tlbnum);
>>> tlbnum = tlb_next;
>>> }
>>>
>>> - ret = pvu_entry_write(dev, tlbnum, idx, ent);
>>> - if (ret)
>>> - return ret;
>>> + pvu_entry_write(dev, tlbnum, idx, ent);
>>
>> But what if pvu_entry_write finds an issue? Are we fine with reporting
>> only and then simply continueing?
>>
> The failures in pvu_entry_write are defensive programming.
> To ensure that the hardware constraints are met.
> Here, the caller has ensured the constraints, so this won't fail.
>
> I can either get rid of the checks or explain why it is okay to
> ignore the return value

Are the check run against an unlikely hardware state or against a
possibly invalid cell configuration?

Jan

Nikhil Devshatwar

unread,
Oct 28, 2020, 10:15:14 AM10/28/20
to jailho...@googlegroups.com, Jan Kiszka, Ralf Ramsauer, Sekhar Nori
Current PVU iommu implementation ignores possible failures in the
config_commit part. This would allow inconsistent configuration
to run and may introduce unknown bugs.

Solve this by making sure that the pvu_iommu_config_commit never
fails. Catch the errors early in the mapping phase. Use
"free_tlb_count" to track available no of TLBs for chaining.
This can be used to check if any mapping causes it to potentially
use more no of TLBs than that are free. This will ensure that
the allocationg for chaining will not fail.

Change the return type to void for few functions. Add comments to
explain behavior in case of failure.

Signed-off-by: Nikhil Devshatwar <nikh...@ti.com>
---

Notes:
Changes from v1:
* Add a comment to descibe why pvu_tlb_alloc will not fail
* Make return type of pvu_entry_write as void and explain the
behavior in case the constraints are not met
* Remove un necessary else block

hypervisor/arch/arm64/include/asm/ti-pvu.h | 3 +-
hypervisor/arch/arm64/ti-pvu.c | 68 ++++++++++++++--------
2 files changed, 45 insertions(+), 26 deletions(-)

diff --git a/hypervisor/arch/arm64/include/asm/ti-pvu.h b/hypervisor/arch/arm64/include/asm/ti-pvu.h
index 2c340b3a..62aec7c0 100644
--- a/hypervisor/arch/arm64/include/asm/ti-pvu.h
+++ b/hypervisor/arch/arm64/include/asm/ti-pvu.h
@@ -117,6 +117,7 @@ struct pvu_dev {
u16 max_virtid;

u16 tlb_data[PVU_NUM_TLBS];
+ u16 free_tlb_count;
};

int pvu_iommu_map_memory(struct cell *cell,
@@ -125,6 +126,6 @@ int pvu_iommu_map_memory(struct cell *cell,
int pvu_iommu_unmap_memory(struct cell *cell,
const struct jailhouse_memory *mem);

-int pvu_iommu_config_commit(struct cell *cell);
+void pvu_iommu_config_commit(struct cell *cell);

#endif /* _IOMMMU_PVU_H_ */
diff --git a/hypervisor/arch/arm64/ti-pvu.c b/hypervisor/arch/arm64/ti-pvu.c
index 3b9a29ec..c488ce89 100644
--- a/hypervisor/arch/arm64/ti-pvu.c
+++ b/hypervisor/arch/arm64/ti-pvu.c
@@ -15,7 +15,7 @@
* There are limitations on the number of available contexts, page sizes,
* number of pages that can be mapped, etc.
*
- * PVU is desgined to be programmed with all the memory mapping at once.
+ * PVU is designed to be programmed with all the memory mapping at once.
* Therefore, it defers the actual register programming till config_commit.
* Also, it does not support unmapping of the pages at runtime.
*
@@ -110,9 +110,15 @@ static u32 pvu_tlb_alloc(struct pvu_dev *dev, u16 virtid)
for (i = dev->max_virtid + 1; i < dev->num_tlbs; i++) {
if (dev->tlb_data[i] == 0) {
dev->tlb_data[i] = virtid << dev->num_entries;
+ dev->free_tlb_count--;
return i;
}
}
+
+ /*
+ * We should never reach here, tlb_allocation should not fail
+ * pvu_iommu_map_memory ensures that there are enough free TLBs
+ */
return 0;
}

@@ -138,10 +144,13 @@ static void pvu_tlb_flush(struct pvu_dev *dev, u16 tlbnum)

mmio_write32(&tlb->chain, 0x0);

- if (i < dev->max_virtid)
+ if (i < dev->max_virtid) {
dev->tlb_data[tlbnum] = 0x0 | i << dev->num_entries;
- else
+ } else {
+ /* This was a chained TLB */
dev->tlb_data[tlbnum] = 0x0;
+ dev->free_tlb_count++;
+ }

}

@@ -159,7 +168,7 @@ static void pvu_entry_enable(struct pvu_dev *dev, u16 tlbnum, u8 index)
dev->tlb_data[tlbnum] |= (1 << index);
}

-static int pvu_entry_write(struct pvu_dev *dev, u16 tlbnum, u8 index,
+static void pvu_entry_write(struct pvu_dev *dev, u16 tlbnum, u8 index,
struct pvu_tlb_entry *ent)
{
struct pvu_hw_tlb_entry *entry;
@@ -174,17 +183,21 @@ static int pvu_entry_write(struct pvu_dev *dev, u16 tlbnum, u8 index,
break;
}

+ /*
+ * If the hardware constraints for page size and address alignment
+ * are not met, print out an error, return without writing any entry
+ */
if (pgsz >= ARRAY_SIZE(pvu_page_size_bytes)) {
printk("ERROR: PVU: %s: Unsupported page size %llx\n",
__func__, ent->size);
- return -EINVAL;
+ return;
}

if (!is_aligned(ent->virt_addr, ent->size) ||
!is_aligned(ent->phys_addr, ent->size)) {
printk("ERROR: PVU: %s: Address %llx => %llx is not aligned with size %llx\n",
__func__, ent->virt_addr, ent->phys_addr, ent->size);
- return -EINVAL;
+ return;
}

mmio_write32(&entry->reg0, ent->virt_addr & 0xffffffff);
@@ -198,9 +211,8 @@ static int pvu_entry_write(struct pvu_dev *dev, u16 tlbnum, u8 index,
mmio_write32_field(&entry->reg2, PVU_TLB_ENTRY_PGSIZE_MASK, pgsz);
mmio_write32_field(&entry->reg2, PVU_TLB_ENTRY_FLAG_MASK, ent->flags);

- /* Do we need "DSB NSH" here to make sure all writes are finised? */
+ /* Do we need "DSB NSH" here to make sure all writes are finished? */
pvu_entry_enable(dev, tlbnum, index);
- return 0;
}

static u32 pvu_init_device(struct pvu_dev *dev, u16 max_virtid)
@@ -221,6 +233,8 @@ static u32 pvu_init_device(struct pvu_dev *dev, u16 max_virtid)
}

dev->max_virtid = max_virtid;
+ dev->free_tlb_count = dev->num_tlbs - (max_virtid + 1);
+
mmio_write32(&cfg->virtid_map1, 0);
mmio_write32_field(&cfg->virtid_map2, PVU_MAX_VIRTID_MASK, max_virtid);

@@ -328,17 +342,17 @@ static void pvu_entrylist_sort(struct pvu_tlb_entry *entlist, u32 num_entries)
}
}

-static int pvu_iommu_program_entries(struct cell *cell, u8 virtid)
+static void pvu_iommu_program_entries(struct cell *cell, u8 virtid)
{
unsigned int inst, i, tlbnum, idx, ent_count;
struct pvu_tlb_entry *ent, *cell_entries;
struct pvu_dev *dev;
- int ret, tlb_next;
+ int tlb_next;

cell_entries = cell->arch.iommu_pvu.entries;
ent_count = cell->arch.iommu_pvu.ent_count;
if (ent_count == 0 || cell_entries == NULL)
- return 0;
+ return;

/* Program same memory mapping for all of the instances */
for (inst = 0; inst < pvu_count; inst++) {
@@ -356,20 +370,15 @@ static int pvu_iommu_program_entries(struct cell *cell, u8 virtid)
if (idx == 0 && i >= dev->num_entries) {
/* Find next available TLB and chain to it */
tlb_next = pvu_tlb_alloc(dev, virtid);
- if (tlb_next < 0)
- return -ENOMEM;
pvu_tlb_chain(dev, tlbnum, tlb_next);
pvu_tlb_enable(dev, tlbnum);
tlbnum = tlb_next;
}

- ret = pvu_entry_write(dev, tlbnum, idx, ent);
- if (ret)
- return ret;
+ pvu_entry_write(dev, tlbnum, idx, ent);
}
pvu_tlb_enable(dev, tlbnum);
}
- return 0;
}

/*
@@ -380,8 +389,9 @@ int pvu_iommu_map_memory(struct cell *cell,
const struct jailhouse_memory *mem)
{
struct pvu_tlb_entry *ent;
+ struct pvu_dev *dev;
unsigned int size;
- u32 flags = 0;
+ u32 tlb_count, flags = 0;
int ret;

if (pvu_count == 0 || (mem->flags & JAILHOUSE_MEM_DMA) == 0)
@@ -408,6 +418,18 @@ int pvu_iommu_map_memory(struct cell *cell,
if (ret < 0)
return ret;

+ /*
+ * Check if there are enough TLBs left for *chaining* to ensure that
+ * pvu_tlb_alloc called from config_commit never fails
+ */
+ tlb_count = (cell->arch.iommu_pvu.ent_count + ret - 1) / 8;
+ dev = &pvu_units[0];
+
+ if (tlb_count > dev->free_tlb_count) {
+ printk("ERROR: PVU: Mapping this memory needs more TLBs than that are available\n");
+ return -EINVAL;
+ }
+
cell->arch.iommu_pvu.ent_count += ret;
return 0;
}
@@ -434,13 +456,12 @@ int pvu_iommu_unmap_memory(struct cell *cell,
return 0;
}

-int pvu_iommu_config_commit(struct cell *cell)
+void pvu_iommu_config_commit(struct cell *cell)
{
unsigned int i, virtid;
- int ret = 0;

if (pvu_count == 0 || !cell)
- return 0;
+ return;

/*
* Chaining the TLB entries adds extra latency to translate those
@@ -455,13 +476,10 @@ int pvu_iommu_config_commit(struct cell *cell)
if (virtid > MAX_VIRTID)
continue;

- ret = pvu_iommu_program_entries(cell, virtid);
- if (ret)
- return ret;
+ pvu_iommu_program_entries(cell, virtid);
}

cell->arch.iommu_pvu.ent_count = 0;
- return ret;
}

static int pvu_iommu_cell_init(struct cell *cell)
--
2.17.1

Jan Kiszka

unread,
Oct 29, 2020, 3:14:27 AM10/29/20
to Nikhil Devshatwar, jailho...@googlegroups.com, Ralf Ramsauer, Sekhar Nori
On 28.10.20 15:14, 'Nikhil Devshatwar' via Jailhouse wrote:
> Current PVU iommu implementation ignores possible failures in the
> config_commit part. This would allow inconsistent configuration
> to run and may introduce unknown bugs.
>
> Solve this by making sure that the pvu_iommu_config_commit never
> fails. Catch the errors early in the mapping phase. Use
> "free_tlb_count" to track available no of TLBs for chaining.
> This can be used to check if any mapping causes it to potentially
> use more no of TLBs than that are free. This will ensure that
> the allocationg for chaining will not fail.

allocating
If we never get here, the test for "i < dev->num_tlbs" is pointless. If
we could get here, we should not return.

> return 0;
> }
>
> @@ -138,10 +144,13 @@ static void pvu_tlb_flush(struct pvu_dev *dev, u16 tlbnum)
>
> mmio_write32(&tlb->chain, 0x0);
>
> - if (i < dev->max_virtid)
> + if (i < dev->max_virtid) {
> dev->tlb_data[tlbnum] = 0x0 | i << dev->num_entries;
> - else
> + } else {
> + /* This was a chained TLB */
> dev->tlb_data[tlbnum] = 0x0;
> + dev->free_tlb_count++;
> + }
>
> }
>
> @@ -159,7 +168,7 @@ static void pvu_entry_enable(struct pvu_dev *dev, u16 tlbnum, u8 index)
> dev->tlb_data[tlbnum] |= (1 << index);
> }
>
> -static int pvu_entry_write(struct pvu_dev *dev, u16 tlbnum, u8 index,
> +static void pvu_entry_write(struct pvu_dev *dev, u16 tlbnum, u8 index,
> struct pvu_tlb_entry *ent)
> {
> struct pvu_hw_tlb_entry *entry;
> @@ -174,17 +183,21 @@ static int pvu_entry_write(struct pvu_dev *dev, u16 tlbnum, u8 index,
> break;
> }
>
> + /*
> + * If the hardware constraints for page size and address alignment
> + * are not met, print out an error, return without writing any entry
> + */

That's lacking reasoning *why* we can continue then. Again: Can the user
trigger this by providing an invalid config? I suspect so. Can we catch
that earlier?
Can you explain this math? Without reasioning how that prevents the
overflow exactly, dropping a check from pvu_tlb_alloc() would be hard to
argue.

Jan Kiszka

unread,
Oct 29, 2020, 4:00:53 AM10/29/20
to Nikhil Devshatwar, jailho...@googlegroups.com, Ralf Ramsauer, Sekhar Nori
If catching earlier is not easily possible, you could also consider
moving things to jailhouse-config-check.

Nikhil Devshatwar

unread,
Oct 29, 2020, 11:27:41 AM10/29/20
to Jan Kiszka, jailho...@googlegroups.com, Ralf Ramsauer, Sekhar Nori
I would like to add some kind of BUG()
Do we have something like this?
No. It's not possible. As I explained, it was just defensive
programming. I can get rid of the checks and still no bad config will
trigger this.

> I suspect so. Can we catch that earlier?
The original intent was to write the PVU part such that it can be
easily ported to other hypervisors. That's why these checks.
Shall I remove?
Looks like I will need better comment here

> > + tlb_count = (cell->arch.iommu_pvu.ent_count + ret - 1) / 8;
>
> Can you explain this math? Without reasioning how that prevents the
> overflow exactly, dropping a check from pvu_tlb_alloc() would be hard to
> argue.

I am counting how many TLBs you need for chaining.
Total TLBs = (no of entries + 7 ) / 8
Total TLBs for chaining = (no of entries + 7 ) / 8 - 1
simplified, it becomes (no of entries - 1 ) / 8

Let's say the user asks to map a region, which after breakdown, results
into 12 entries. Since this call is saving entries for all the mappings,
it will add with previous count and find out that total of 76 entries
are needed.

That means (76 -1 ) / 8 = 9 new allocations are needed.
If there are at least 9 free_tlb available, we are okay.
If not, we flag the failure early by failing the map call.


Regards,
Nikhil D
> --
> You received this message because you are subscribed to the Google Groups "Jailhouse" group.
> To unsubscribe from this group and stop receiving emails from it, send an email to jailhouse-de...@googlegroups.com.
> To view this discussion on the web visit https://groups.google.com/d/msgid/jailhouse-dev/372c4fcc-f707-955b-ec3f-c800dc948557%40siemens.com.

Jan Kiszka

unread,
Oct 29, 2020, 12:10:01 PM10/29/20
to Nikhil Devshatwar, jailho...@googlegroups.com, Ralf Ramsauer, Sekhar Nori
Not yet. Could be simply defined as

#define BUG() *(int *)0 = 0xdead
Yes, then it can go.
OK, there is a magic REGS_PER_TLB = 8, spread in hard-coded manner. I
didn't look as close at this code yet as I did for the smmu-v2. I
suspect there is also some room for cleanups... ;)

Nikhil Devshatwar

unread,
Oct 29, 2020, 6:19:11 PM10/29/20
to Jan Kiszka, jailho...@googlegroups.com, Ralf Ramsauer, Sekhar Nori
okay, I will add this.
Will do
Good catch.
Actually in all other places I am using dev->num_entries
This is the only place I used magic 8
I will change that as well.


> didn't look as close at this code yet as I did for the smmu-v2. I
> suspect there is also some room for cleanups... ;)

Thanks!
Nikhil D

Nikhil Devshatwar

unread,
Oct 30, 2020, 8:27:22 AM10/30/20
to jailho...@googlegroups.com, Jan Kiszka, Ralf Ramsauer, Sekhar Nori
Current PVU iommu implementation ignores possible failures in the
config_commit part. This would allow inconsistent configuration
to run and may introduce unknown bugs.

Solve this by making sure that the pvu_iommu_config_commit never
fails. Catch the errors early in the mapping phase. Use
"free_tlb_count" to track available no of TLBs for chaining.
This can be used to check if any mapping causes it to potentially
use more no of TLBs than that are free. This will ensure that
the allocationg for chaining will not fail.

Change the return type to void for few functions. Add comments to
explain behavior in case of failure. Remove un necessary checks
that would never trigger.

Signed-off-by: Nikhil Devshatwar <nikh...@ti.com>
---

Notes:
Changes from v2:
* Remove additional checks which will always pass
* Use a BUG in case control reaches where it should'nt have

Changes from v1:
* Add a comment to descibe why pvu_tlb_alloc will not fail
* Make return type of pvu_entry_write as void and explain the
behavior in case the constraints are not met
* Remove un necessary else block

hypervisor/arch/arm64/include/asm/ti-pvu.h | 3 +-
hypervisor/arch/arm64/ti-pvu.c | 76 ++++++++++++----------
2 files changed, 42 insertions(+), 37 deletions(-)

diff --git a/hypervisor/arch/arm64/include/asm/ti-pvu.h b/hypervisor/arch/arm64/include/asm/ti-pvu.h
index 2c340b3a..62aec7c0 100644
--- a/hypervisor/arch/arm64/include/asm/ti-pvu.h
+++ b/hypervisor/arch/arm64/include/asm/ti-pvu.h
@@ -117,6 +117,7 @@ struct pvu_dev {
u16 max_virtid;

u16 tlb_data[PVU_NUM_TLBS];
+ u16 free_tlb_count;
};

int pvu_iommu_map_memory(struct cell *cell,
@@ -125,6 +126,6 @@ int pvu_iommu_map_memory(struct cell *cell,
int pvu_iommu_unmap_memory(struct cell *cell,
const struct jailhouse_memory *mem);

-int pvu_iommu_config_commit(struct cell *cell);
+void pvu_iommu_config_commit(struct cell *cell);

#endif /* _IOMMMU_PVU_H_ */
diff --git a/hypervisor/arch/arm64/ti-pvu.c b/hypervisor/arch/arm64/ti-pvu.c
index 3b9a29ec..98c0176b 100644
--- a/hypervisor/arch/arm64/ti-pvu.c
+++ b/hypervisor/arch/arm64/ti-pvu.c
@@ -15,7 +15,7 @@
* There are limitations on the number of available contexts, page sizes,
* number of pages that can be mapped, etc.
*
- * PVU is desgined to be programmed with all the memory mapping at once.
+ * PVU is designed to be programmed with all the memory mapping at once.
* Therefore, it defers the actual register programming till config_commit.
* Also, it does not support unmapping of the pages at runtime.
*
@@ -110,9 +110,17 @@ static u32 pvu_tlb_alloc(struct pvu_dev *dev, u16 virtid)
for (i = dev->max_virtid + 1; i < dev->num_tlbs; i++) {
if (dev->tlb_data[i] == 0) {
dev->tlb_data[i] = virtid << dev->num_entries;
+ dev->free_tlb_count--;
return i;
}
}
+
+ /*
+ * We should never reach here, tlb_allocation should not fail.
+ * pvu_iommu_map_memory ensures that there are enough free TLBs
+ */
+
+ BUG();
return 0;
}

@@ -138,10 +146,13 @@ static void pvu_tlb_flush(struct pvu_dev *dev, u16 tlbnum)

mmio_write32(&tlb->chain, 0x0);

- if (i < dev->max_virtid)
+ if (i < dev->max_virtid) {
dev->tlb_data[tlbnum] = 0x0 | i << dev->num_entries;
- else
+ } else {
+ /* This was a chained TLB */
dev->tlb_data[tlbnum] = 0x0;
+ dev->free_tlb_count++;
+ }

}

@@ -159,7 +170,7 @@ static void pvu_entry_enable(struct pvu_dev *dev, u16 tlbnum, u8 index)
dev->tlb_data[tlbnum] |= (1 << index);
}

-static int pvu_entry_write(struct pvu_dev *dev, u16 tlbnum, u8 index,
+static void pvu_entry_write(struct pvu_dev *dev, u16 tlbnum, u8 index,
struct pvu_tlb_entry *ent)
{
struct pvu_hw_tlb_entry *entry;
@@ -174,19 +185,6 @@ static int pvu_entry_write(struct pvu_dev *dev, u16 tlbnum, u8 index,
break;
}

- if (pgsz >= ARRAY_SIZE(pvu_page_size_bytes)) {
- printk("ERROR: PVU: %s: Unsupported page size %llx\n",
- __func__, ent->size);
- return -EINVAL;
- }
-
- if (!is_aligned(ent->virt_addr, ent->size) ||
- !is_aligned(ent->phys_addr, ent->size)) {
- printk("ERROR: PVU: %s: Address %llx => %llx is not aligned with size %llx\n",
- __func__, ent->virt_addr, ent->phys_addr, ent->size);
- return -EINVAL;
- }
-
mmio_write32(&entry->reg0, ent->virt_addr & 0xffffffff);
mmio_write32_field(&entry->reg1, 0xffff, (ent->virt_addr >> 32));
mmio_write32(&entry->reg2, 0x0);
@@ -198,9 +196,8 @@ static int pvu_entry_write(struct pvu_dev *dev, u16 tlbnum, u8 index,
mmio_write32_field(&entry->reg2, PVU_TLB_ENTRY_PGSIZE_MASK, pgsz);
mmio_write32_field(&entry->reg2, PVU_TLB_ENTRY_FLAG_MASK, ent->flags);

- /* Do we need "DSB NSH" here to make sure all writes are finised? */
+ /* Do we need "DSB NSH" here to make sure all writes are finished? */
pvu_entry_enable(dev, tlbnum, index);
- return 0;
}

static u32 pvu_init_device(struct pvu_dev *dev, u16 max_virtid)
@@ -221,6 +218,8 @@ static u32 pvu_init_device(struct pvu_dev *dev, u16 max_virtid)
}

dev->max_virtid = max_virtid;
+ dev->free_tlb_count = dev->num_tlbs - (max_virtid + 1);
+
mmio_write32(&cfg->virtid_map1, 0);
mmio_write32_field(&cfg->virtid_map2, PVU_MAX_VIRTID_MASK, max_virtid);

@@ -328,17 +327,17 @@ static void pvu_entrylist_sort(struct pvu_tlb_entry *entlist, u32 num_entries)
}
}

-static int pvu_iommu_program_entries(struct cell *cell, u8 virtid)
+static void pvu_iommu_program_entries(struct cell *cell, u8 virtid)
{
unsigned int inst, i, tlbnum, idx, ent_count;
struct pvu_tlb_entry *ent, *cell_entries;
struct pvu_dev *dev;
- int ret, tlb_next;
+ int tlb_next;

cell_entries = cell->arch.iommu_pvu.entries;
ent_count = cell->arch.iommu_pvu.ent_count;
if (ent_count == 0 || cell_entries == NULL)
- return 0;
+ return;

/* Program same memory mapping for all of the instances */
for (inst = 0; inst < pvu_count; inst++) {
@@ -356,20 +355,15 @@ static int pvu_iommu_program_entries(struct cell *cell, u8 virtid)
if (idx == 0 && i >= dev->num_entries) {
/* Find next available TLB and chain to it */
tlb_next = pvu_tlb_alloc(dev, virtid);
- if (tlb_next < 0)
- return -ENOMEM;
pvu_tlb_chain(dev, tlbnum, tlb_next);
pvu_tlb_enable(dev, tlbnum);
tlbnum = tlb_next;
}

- ret = pvu_entry_write(dev, tlbnum, idx, ent);
- if (ret)
- return ret;
+ pvu_entry_write(dev, tlbnum, idx, ent);
}
pvu_tlb_enable(dev, tlbnum);
}
- return 0;
}

/*
@@ -380,8 +374,9 @@ int pvu_iommu_map_memory(struct cell *cell,
const struct jailhouse_memory *mem)
{
struct pvu_tlb_entry *ent;
+ struct pvu_dev *dev;
unsigned int size;
- u32 flags = 0;
+ u32 tlb_count, flags = 0;
int ret;

if (pvu_count == 0 || (mem->flags & JAILHOUSE_MEM_DMA) == 0)
@@ -408,6 +403,19 @@ int pvu_iommu_map_memory(struct cell *cell,
if (ret < 0)
return ret;

+ /*
+ * Check if there are enough TLBs left for *chaining* to ensure that
+ * pvu_tlb_alloc called from config_commit never fails
+ */
+ dev = &pvu_units[0];
+ tlb_count = (cell->arch.iommu_pvu.ent_count + ret - 1) /
+ dev->num_entries;
+
+ if (tlb_count > dev->free_tlb_count) {
+ printk("ERROR: PVU: Mapping this memory needs more TLBs than that are available\n");
+ return -EINVAL;
+ }
+
cell->arch.iommu_pvu.ent_count += ret;
return 0;
}
@@ -434,13 +442,12 @@ int pvu_iommu_unmap_memory(struct cell *cell,
return 0;
}

-int pvu_iommu_config_commit(struct cell *cell)
+void pvu_iommu_config_commit(struct cell *cell)
{
unsigned int i, virtid;
- int ret = 0;

if (pvu_count == 0 || !cell)
- return 0;
+ return;

/*
* Chaining the TLB entries adds extra latency to translate those
@@ -455,13 +462,10 @@ int pvu_iommu_config_commit(struct cell *cell)
if (virtid > MAX_VIRTID)
continue;

- ret = pvu_iommu_program_entries(cell, virtid);
- if (ret)
- return ret;
+ pvu_iommu_program_entries(cell, virtid);
}

cell->arch.iommu_pvu.ent_count = 0;
- return ret;
}

static int pvu_iommu_cell_init(struct cell *cell)
--
2.17.1

Nikhil Devshatwar

unread,
Nov 6, 2020, 3:25:12 AM11/6/20
to jailho...@googlegroups.com, Jan Kiszka, Ralf Ramsauer, Sekhar Nori
On 17:57-20201030, 'Nikhil Devshatwar' via Jailhouse wrote:
> Current PVU iommu implementation ignores possible failures in the
> config_commit part. This would allow inconsistent configuration
> to run and may introduce unknown bugs.
>
> Solve this by making sure that the pvu_iommu_config_commit never
> fails. Catch the errors early in the mapping phase. Use
> "free_tlb_count" to track available no of TLBs for chaining.
> This can be used to check if any mapping causes it to potentially
> use more no of TLBs than that are free. This will ensure that
> the allocationg for chaining will not fail.
>
> Change the return type to void for few functions. Add comments to
> explain behavior in case of failure. Remove un necessary checks
> that would never trigger.
>
> Signed-off-by: Nikhil Devshatwar <nikh...@ti.com>

ping on this

Nikhil D
> --
> You received this message because you are subscribed to the Google Groups "Jailhouse" group.
> To unsubscribe from this group and stop receiving emails from it, send an email to jailhouse-de...@googlegroups.com.
> To view this discussion on the web visit https://groups.google.com/d/msgid/jailhouse-dev/20201030122712.4199-1-nikhil.nd%40ti.com.

Jan Kiszka

unread,
Nov 6, 2020, 3:41:14 AM11/6/20
to Nikhil Devshatwar, jailho...@googlegroups.com, Ralf Ramsauer, Sekhar Nori
On 06.11.20 09:25, Nikhil Devshatwar wrote:
> On 17:57-20201030, 'Nikhil Devshatwar' via Jailhouse wrote:
>> Current PVU iommu implementation ignores possible failures in the
>> config_commit part. This would allow inconsistent configuration
>> to run and may introduce unknown bugs.
>>
>> Solve this by making sure that the pvu_iommu_config_commit never
>> fails. Catch the errors early in the mapping phase. Use
>> "free_tlb_count" to track available no of TLBs for chaining.
>> This can be used to check if any mapping causes it to potentially
>> use more no of TLBs than that are free. This will ensure that
>> the allocationg for chaining will not fail.
>>
>> Change the return type to void for few functions. Add comments to
>> explain behavior in case of failure. Remove un necessary checks
>> that would never trigger.
>>
>> Signed-off-by: Nikhil Devshatwar <nikh...@ti.com>
>
> ping on this
>

Sorry, long backlog. I'll try to work through the Jailhouse patch
backlog soon, latest on Monday.

Jan Kiszka

unread,
Nov 8, 2020, 5:27:00 PM11/8/20
to Nikhil Devshatwar, jailho...@googlegroups.com, Ralf Ramsauer, Sekhar Nori
Thanks, applied - as well as the BUG() patch and ELR dumping.
Reply all
Reply to author
Forward
0 new messages