Add support for Texas Instrument's Peripheral Virtualization Unit
* Define a new IOMMU type and extra fields in the platform_data
* Add new cofig option CONFIG_IOMMU_TI_PVU
* Integrate with the arm iommu support such that multiple types
of IOMMU can be supported.
Signed-off-by: Nikhil Devshatwar <
nikh...@ti.com>
---
hypervisor/arch/arm-common/include/asm/cell.h | 7 +
.../arch/arm-common/include/asm/iommu.h | 1 +
.../arch/arm-common/include/asm/ti-pvu.h | 32 +
hypervisor/arch/arm-common/iommu.c | 9 +
hypervisor/arch/arm64/Kbuild | 1 +
hypervisor/arch/arm64/ti-pvu.c | 556 ++++++++++++++++++
hypervisor/arch/arm64/ti-pvu_priv.h | 141 +++++
include/jailhouse/cell-config.h | 4 +
8 files changed, 751 insertions(+)
create mode 100644 hypervisor/arch/arm-common/include/asm/ti-pvu.h
create mode 100644 hypervisor/arch/arm64/ti-pvu.c
create mode 100644 hypervisor/arch/arm64/ti-pvu_priv.h
diff --git a/hypervisor/arch/arm-common/include/asm/cell.h b/hypervisor/arch/arm-common/include/asm/cell.h
index 5b1e4207..9c6e8c6f 100644
--- a/hypervisor/arch/arm-common/include/asm/cell.h
+++ b/hypervisor/arch/arm-common/include/asm/cell.h
@@ -15,10 +15,17 @@
#include <jailhouse/paging.h>
+struct pvu_tlb_entry;
+
struct arch_cell {
struct paging_structures mm;
u32 irq_bitmap[1024/32];
+
+ struct {
+ u8 ent_count;
+ struct pvu_tlb_entry *entries;
+ } iommu_pvu; /**< ARM PVU specific fields. */
};
#endif /* !_JAILHOUSE_ASM_CELL_H */
diff --git a/hypervisor/arch/arm-common/include/asm/iommu.h b/hypervisor/arch/arm-common/include/asm/iommu.h
index dde762c0..399248dc 100644
--- a/hypervisor/arch/arm-common/include/asm/iommu.h
+++ b/hypervisor/arch/arm-common/include/asm/iommu.h
@@ -16,6 +16,7 @@
#include <jailhouse/cell.h>
#include <jailhouse/utils.h>
#include <jailhouse/cell-config.h>
+#include <asm/ti-pvu.h>
#define for_each_stream_id(sid, config, counter) \
for ((sid) = (jailhouse_cell_stream_ids(config)[0]), (counter) = 0; \
diff --git a/hypervisor/arch/arm-common/include/asm/ti-pvu.h b/hypervisor/arch/arm-common/include/asm/ti-pvu.h
new file mode 100644
index 00000000..a3ef72f7
--- /dev/null
+++ b/hypervisor/arch/arm-common/include/asm/ti-pvu.h
@@ -0,0 +1,32 @@
+/*
+ * Jailhouse, a Linux-based partitioning hypervisor
+ *
+ * Copyright (c) 2018 Texas Instruments Incorporated -
http://www.ti.com/
+ *
+ * TI PVU IOMMU unit API headers
+ *
+ * Authors:
+ * Nikhil Devshatwar <
nikh...@ti.com>
+ *
+ * This work is licensed under the terms of the GNU GPL, version 2. See
+ * the COPYING file in the top-level directory.
+ */
+
+#ifndef _IOMMMU_PVU_H_
+#define _IOMMMU_PVU_H_
+
+#include <jailhouse/config.h>
+
+#ifdef CONFIG_IOMMU_TI_PVU
+
+int pvu_iommu_map_memory(struct cell *cell,
+ const struct jailhouse_memory *mem);
+
+int pvu_iommu_unmap_memory(struct cell *cell,
+ const struct jailhouse_memory *mem);
+
+int pvu_iommu_config_commit(struct cell *cell);
+
+#endif /* CONFIG_IOMMU_TI_PVU */
+
+#endif /* _IOMMMU_PVU_H_ */
diff --git a/hypervisor/arch/arm-common/iommu.c b/hypervisor/arch/arm-common/iommu.c
index b3100d03..b6b61f52 100644
--- a/hypervisor/arch/arm-common/iommu.c
+++ b/hypervisor/arch/arm-common/iommu.c
@@ -26,15 +26,24 @@ unsigned int iommu_count_units(void)
int iommu_map_memory_region(struct cell *cell,
const struct jailhouse_memory *mem)
{
+#ifdef CONFIG_IOMMU_TI_PVU
+ return pvu_iommu_map_memory(cell, mem);
+#endif
return 0;
}
int iommu_unmap_memory_region(struct cell *cell,
const struct jailhouse_memory *mem)
{
+#ifdef CONFIG_IOMMU_TI_PVU
+ return pvu_iommu_unmap_memory(cell, mem);
+#endif
return 0;
}
void iommu_config_commit(struct cell *cell)
{
+#ifdef CONFIG_IOMMU_TI_PVU
+ pvu_iommu_config_commit(cell);
+#endif
}
diff --git a/hypervisor/arch/arm64/Kbuild b/hypervisor/arch/arm64/Kbuild
index 323b78b6..8012c46e 100644
--- a/hypervisor/arch/arm64/Kbuild
+++ b/hypervisor/arch/arm64/Kbuild
@@ -21,3 +21,4 @@ always := lib.a
lib-y := $(common-objs-y)
lib-y += entry.o setup.o control.o mmio.o paging.o caches.o traps.o smmu-v3.o
+lib-$(CONFIG_IOMMU_TI_PVU) += ti-pvu.o
diff --git a/hypervisor/arch/arm64/ti-pvu.c b/hypervisor/arch/arm64/ti-pvu.c
new file mode 100644
index 00000000..02380baa
--- /dev/null
+++ b/hypervisor/arch/arm64/ti-pvu.c
@@ -0,0 +1,556 @@
+/*
+ * Jailhouse, a Linux-based partitioning hypervisor
+ *
+ * Copyright (c) 2018 Texas Instruments Incorporated -
http://www.ti.com/
+ *
+ * TI PVU IOMMU unit
+ *
+ * Peripheral Virtualization Unit(PVU) is an IOMMU (memory management
+ * unit for DMA) which is designed for 2nd stage address translation in a
+ * real time manner.
+ *
+ * Unlike ARM-SMMU, all the memory mapping information is stored in the
+ * local registers instead of the in-memory page tables.
+ *
+ * There are limitations on the number of available contexts, page sizes,
+ * number of pages that can be mapped, etc.
+ *
+ * PVU is desgined to be programmed with all the memory mapping at once.
+ * Therefore, it defers the actual register programming till config_commit.
+ * Also, it does not support unmapping of the pages at runtime.
+ *
+ * Authors:
+ * Nikhil Devshatwar <
nikh...@ti.com>
+ *
+ * This work is licensed under the terms of the GNU GPL, version 2. See
+ * the COPYING file in the top-level directory.
+ */
+
+#include <jailhouse/unit.h>
+#include <jailhouse/cell.h>
+#include <jailhouse/entry.h>
+#include <jailhouse/paging.h>
+#include <jailhouse/control.h>
+#include <jailhouse/printk.h>
+#include <asm/iommu.h>
+#include <asm/ti-pvu.h>
+#include "ti-pvu_priv.h"
+
+#define MAX_PVU_ENTRIES (PAGE_SIZE / sizeof (struct pvu_tlb_entry))
+#define MAX_VIRTID 7
+
+static struct pvu_dev pvu_units[JAILHOUSE_MAX_IOMMU_UNITS];
+static unsigned int pvu_count;
+
+static const u64 PVU_PAGE_SIZE_BYTES[] = {
+ [LPAE_PAGE_SZ_4K] = 4 * 1024,
+ [LPAE_PAGE_SZ_16K] = 16 * 1024,
+ [LPAE_PAGE_SZ_64K] = 64 * 1024,
+ [LPAE_PAGE_SZ_2M] = 2 * 1024 * 1024,
+ [LPAE_PAGE_SZ_32M] = 32 * 1024 * 1024,
+ [LPAE_PAGE_SZ_512M] = 512 * 1024 * 1024,
+ [LPAE_PAGE_SZ_1G] = 1 * 1024 * 1024 * 1024,
+ [LPAE_PAGE_SZ_16G] = 16ULL * 1024 * 1024 * 1024,
+};
+
+static inline u32 is_aligned(u64 addr, u64 size)
+{
+ return (addr % size) == 0;
+}
+
+static void pvu_tlb_enable(struct pvu_dev *dev, u16 tlbnum)
+{
+ struct pvu_hw_tlb *tlb;
+
+ tlb = (struct pvu_hw_tlb *)dev->tlb_base + tlbnum;
+ mmio_write32_field(&tlb->chain, PVU_TLB_LOG_DIS_MASK, 0);
+ mmio_write32_field(&tlb->chain, PVU_TLB_EN_MASK, 1);
+}
+
+static void pvu_tlb_disable(struct pvu_dev *dev, u16 tlbnum)
+{
+ struct pvu_hw_tlb *tlb;
+
+ tlb = (struct pvu_hw_tlb *)dev->tlb_base + tlbnum;
+ mmio_write32_field(&tlb->chain, PVU_TLB_EN_MASK, 0);
+ mmio_write32_field(&tlb->chain, PVU_TLB_LOG_DIS_MASK, 1);
+}
+
+static u32 pvu_tlb_is_enabled(struct pvu_dev *dev, u16 tlbnum)
+{
+ struct pvu_hw_tlb *tlb;
+
+ tlb = (struct pvu_hw_tlb *)dev->tlb_base + tlbnum;
+ if (mmio_read32_field(&tlb->chain, PVU_TLB_EN_MASK))
+ return 1;
+ else
+ return 0;
+}
+
+static int pvu_tlb_chain(struct pvu_dev *dev, u16 tlbnum, u16 tlb_next)
+{
+ struct pvu_hw_tlb *tlb;
+
+ if (tlb_next <= tlbnum || tlb_next <= dev->max_virtid)
+ return -EINVAL;
+
+ tlb = (struct pvu_hw_tlb *)dev->tlb_base + tlbnum;
+ mmio_write32_field(&tlb->chain, PVU_TLB_CHAIN_MASK, tlb_next);
+ return 0;
+}
+
+static u32 pvu_tlb_next(struct pvu_dev *dev, u16 tlbnum)
+{
+ struct pvu_hw_tlb *tlb;
+
+ tlb = (struct pvu_hw_tlb *)dev->tlb_base + tlbnum;
+ return mmio_read32_field(&tlb->chain, PVU_TLB_CHAIN_MASK);
+}
+
+static u32 pvu_tlb_alloc(struct pvu_dev *dev, u16 virtid)
+{
+ int i;
+
+ for (i = dev->max_virtid + 1; i < dev->num_tlbs; i++) {
+ if (dev->tlb_data[i] == 0) {
+ dev->tlb_data[i] = virtid << dev->num_entries;
+ return i;
+ }
+ }
+ return 0;
+}
+
+static void pvu_tlb_flush(struct pvu_dev *dev, u16 tlbnum)
+{
+ struct pvu_hw_tlb_entry *entry;
+ struct pvu_hw_tlb *tlb;
+ u32 i;
+
+ pvu_tlb_disable(dev, tlbnum);
+ tlb = (struct pvu_hw_tlb *)dev->tlb_base + tlbnum;
+
+ for (i = 0; i < dev->num_entries; i++) {
+
+ entry = &tlb->entry[i];
+ mmio_write32(&entry->reg0, 0x0);
+ mmio_write32(&entry->reg1, 0x0);
+ mmio_write32(&entry->reg2, 0x0);
+ mmio_write32(&entry->reg4, 0x0);
+ mmio_write32(&entry->reg5, 0x0);
+ mmio_write32(&entry->reg6, 0x0);
+ }
+
+ mmio_write32(&tlb->chain, 0x0);
+ pvu_tlb_disable(dev, tlbnum);
+
+ if (i < dev->max_virtid)
+ dev->tlb_data[tlbnum] = 0x0 | i << dev->num_entries;
+ else
+ dev->tlb_data[tlbnum] = 0x0;
+
+}
+
+static void pvu_entry_enable(struct pvu_dev *dev, u16 tlbnum, u8 index)
+{
+ struct pvu_hw_tlb_entry *entry;
+ struct pvu_hw_tlb *tlb;
+
+ tlb = (struct pvu_hw_tlb *)dev->tlb_base + tlbnum;
+ entry = &tlb->entry[index];
+
+ mmio_write32_field(&entry->reg2, PVU_TLB_ENTRY_MODE_MASK,
+ PVU_TLB_ENTRY_VALID);
+
+ dev->tlb_data[tlbnum] |= (1 << index);
+}
+
+static int pvu_entry_write(struct pvu_dev *dev, u16 tlbnum, u8 index,
+ struct pvu_tlb_entry *ent)
+{
+ struct pvu_hw_tlb_entry *entry;
+ struct pvu_hw_tlb *tlb;
+ u8 pgsz;
+
+ tlb = (struct pvu_hw_tlb *)dev->tlb_base + tlbnum;
+ entry = &tlb->entry[index];
+
+ for (pgsz = 0; pgsz < ARRAY_SIZE(PVU_PAGE_SIZE_BYTES); pgsz++) {
+ if (ent->size == PVU_PAGE_SIZE_BYTES[pgsz])
+ break;
+ }
+
+ if (pgsz >= ARRAY_SIZE(PVU_PAGE_SIZE_BYTES)) {
+ printk("ERROR: PVU: %s: Unsupported page size %llx\n",
+ __func__, ent->size);
+ return -EINVAL;
+ }
+
+ if (!is_aligned(ent->virt_addr, ent->size) ||
+ !is_aligned(ent->phys_addr, ent->size)) {
+ printk("ERROR: PVU: %s: Address %llx => %llx is not aligned with size %llx\n",
+ __func__, ent->virt_addr, ent->phys_addr, ent->size);
+ return -EINVAL;
+ }
+
+ mmio_write32(&entry->reg0, ent->virt_addr & 0xffffffff);
+ mmio_write32_field(&entry->reg1, 0xffff, (ent->virt_addr >> 32));
+ mmio_write32(&entry->reg2, 0x0);
+
+ mmio_write32(&entry->reg4, ent->phys_addr & 0xffffffff);
+ mmio_write32_field(&entry->reg5, 0xffff, (ent->phys_addr >> 32));
+ mmio_write32(&entry->reg6, 0x0);
+
+ mmio_write32_field(&entry->reg2, PVU_TLB_ENTRY_PGSIZE_MASK, pgsz);
+ mmio_write32_field(&entry->reg2, PVU_TLB_ENTRY_FLAG_MASK, ent->flags);
+
+ /* Do we need "DSB NSH" here to make sure all writes are finised? */
+ pvu_entry_enable(dev, tlbnum, index);
+ return 0;
+}
+
+static u32 pvu_init_device(struct pvu_dev *dev, u16 max_virtid)
+{
+ struct pvu_hw_cfg *cfg;
+ int i;
+
+ cfg = (struct pvu_hw_cfg *)dev->cfg_base;
+
+ dev->num_tlbs = mmio_read32_field(&cfg->config,
+ PVU_CONFIG_NTLB_MASK);
+ dev->num_entries = mmio_read32_field(&cfg->config,
+ PVU_CONFIG_NENT_MASK);
+
+ if (max_virtid >= dev->num_tlbs) {
+ printk("ERROR: PVU: Max virtid(%d) should be less than num_tlbs(%d)\n",
+ max_virtid, dev->num_tlbs);
+ return -EINVAL;
+ }
+
+ dev->max_virtid = max_virtid;
+ mmio_write32(&cfg->virtid_map1, 0);
+ mmio_write32_field(&cfg->virtid_map2, PVU_MAX_VIRTID_MASK, max_virtid);
+
+ for (i = 0; i < dev->num_tlbs; i++) {
+
+ pvu_tlb_disable(dev, i);
+ if (i < dev->max_virtid)
+ dev->tlb_data[i] = 0x0 | i << dev->num_entries;
+ else
+ dev->tlb_data[i] = 0x0;
+ }
+
+ /* Enable all types of exceptions */
+ mmio_write32(&cfg->exception_logging_disable, 0x0);
+ mmio_write32(&cfg->exception_logging_control, 0x0);
+ mmio_write32_field(&cfg->enable, PVU_enable_MASK, PVU_enable_EN);
+ return 0;
+}
+
+
+
+/*
+ * Split a memory region into multiple pages, where page size is one of the PVU
+ * supported size and the start address is aligned to page size
+ */
+static int pvu_entrylist_create(u64 ipa, u64 pa, u64 map_size,
+ u64 flags, struct pvu_tlb_entry *entlist, u32 num_entries)
+{
+ u8 num_sizes = ARRAY_SIZE(PVU_PAGE_SIZE_BYTES);
+ u64 page_size, vaddr, paddr;
+ s64 size, i, aligned, count;
+
+ vaddr = ipa;
+ paddr = pa;
+ size = map_size;
+ count = 0;
+
+ while (size) {
+
+ if (count == num_entries) {
+ printk("ERROR: PVU: Need more TLB entries for mapping %llx => %llx with size %llx\n",
+ ipa, pa, map_size);
+ return -EINVAL;
+ }
+
+ aligned = 0;
+
+ /* Try size from largest to smallest */
+ for (i = num_sizes - 1; i >= 0; i--) {
+
+ page_size = PVU_PAGE_SIZE_BYTES[i];
+
+ if (is_aligned(vaddr, page_size) &&
+ is_aligned(paddr, page_size) &&
+ size >= page_size) {
+
+ entlist[count].virt_addr = vaddr;
+ entlist[count].phys_addr = paddr;
+ entlist[count].size = page_size;
+ entlist[count].flags = flags;
+
+ count++;
+ vaddr += page_size;
+ paddr += page_size;
+ size -= page_size;
+ aligned = 1;
+ break;
+ }
+ }
+
+ if (!aligned) {
+ printk("ERROR: PVU: Addresses %llx %llx aren't aligned to any of the allowed page sizes\n",
+ vaddr, paddr);
+ return -EINVAL;
+ }
+ }
+ return count;
+}
+
+static void pvu_entrylist_sort(struct pvu_tlb_entry *entlist, u32 num_entries)
+{
+ struct pvu_tlb_entry temp;
+ int i, j;
+
+ for (i = 0; i < num_entries; i++) {
+ for (j = i; j < num_entries; j++) {
+
+ if (entlist[i].size < entlist[j].size) {
+ temp = entlist[i];
+ entlist[i] = entlist[j];
+ entlist[j] = temp;
+ }
+ }
+ }
+}
+
+static int pvu_iommu_program_entries(struct cell *cell, u8 virtid)
+{
+ int i, ret, tlb_next, tlbnum, idx, num_ent;
+ struct pvu_tlb_entry *ent, *cell_entries;
+ struct pvu_dev *dev;
+ u32 inst;
+
+ cell_entries = cell->arch.iommu_pvu.entries;
+ num_ent = cell->arch.iommu_pvu.ent_count;
+ if (num_ent == 0 || cell_entries == NULL)
+ return 0;
+
+ /* Program same memory mapping for all of the instances */
+ for (inst = 0; inst < pvu_count; inst++) {
+ dev = &pvu_units[inst];
+ if (pvu_tlb_is_enabled(dev, virtid))
+ continue;
+
+ tlbnum = virtid;
+ for (i = 0; i < num_ent; i++) {
+
+ ent = &cell_entries[i];
+ idx = i % dev->num_entries;
+
+ if (idx == 0 && i >= dev->num_entries) {
+ /* Find next available TLB and chain to it */
+ tlb_next = pvu_tlb_alloc(dev, virtid);
+ if (tlb_next < 0)
+ return -ENOMEM;
+ pvu_tlb_chain(dev, tlbnum, tlb_next);
+ pvu_tlb_enable(dev, tlbnum);
+ tlbnum = tlb_next;
+ }
+
+ ret = pvu_entry_write(dev, tlbnum, idx, ent);
+ if (ret)
+ return ret;
+ }
+ pvu_tlb_enable(dev, tlbnum);
+ }
+ return 0;
+}
+
+/*
+ * Actual TLB entry programming is deferred till config_commit
+ * Only populate the pvu_entries array for now
+ */
+int pvu_iommu_map_memory(struct cell *cell,
+ const struct jailhouse_memory *mem)
+{
+ struct pvu_tlb_entry *ent;
+ int size, ret;
+ u32 flags = 0;
+
+ if (pvu_count == 0)
+ return 0;
+
+ if ((mem->flags & JAILHOUSE_MEM_DMA) == 0)
+ return 0;
+
+ if (cell->arch.iommu_pvu.ent_count == MAX_PVU_ENTRIES)
+ return -ENOMEM;
+
+ if (mem->flags & JAILHOUSE_MEM_READ)
+ flags |= (LPAE_PAGE_PERM_UR | LPAE_PAGE_PERM_SR);
+ if (mem->flags & JAILHOUSE_MEM_WRITE)
+ flags |= (LPAE_PAGE_PERM_UW | LPAE_PAGE_PERM_SW);
+ if (mem->flags & JAILHOUSE_MEM_EXECUTE)
+ flags |= (LPAE_PAGE_PERM_UX | LPAE_PAGE_PERM_SX);
+
+ flags |= (LPAE_PAGE_MEM_WRITETHROUGH | LPAE_PAGE_OUTER_SHARABLE |
+ LPAE_PAGE_IS_NOALLOC | LPAE_PAGE_OS_NOALLOC);
+
+ ent = &cell->arch.iommu_pvu.entries[cell->arch.iommu_pvu.ent_count];
+ size = MAX_PVU_ENTRIES - cell->arch.iommu_pvu.ent_count;
+
+ ret = pvu_entrylist_create(mem->virt_start, mem->phys_start, mem->size,
+ flags, ent, size);
+ if (ret < 0)
+ return ret;
+
+ cell->arch.iommu_pvu.ent_count += ret;
+ return 0;
+}
+
+int pvu_iommu_unmap_memory(struct cell *cell,
+ const struct jailhouse_memory *mem)
+{
+ /*
+ * dummy unmap for now
+ * PVU does not support dynamic unmap
+ * Works well for static partitioning
+ */
+ return 0;
+}
+
+int pvu_iommu_config_commit(struct cell *cell)
+{
+ int ret, i, virtid;
+
+ if (pvu_count == 0)
+ return 0;
+
+ if (!cell) {
+ return 0;
+ }
+
+ /*
+ * Chaining the TLB entries adds extra latency to translate those
+ * addresses.
+ * Sort the entries in descending order of page sizes to reduce effects
+ * of chaining and thus reducing average translation latency
+ */
+ pvu_entrylist_sort(cell->arch.iommu_pvu.entries,
+ cell->arch.iommu_pvu.ent_count);
+
+ for_each_stream_id(virtid, cell->config, i) {
+ if (virtid == JAILHOUSE_INVALID_STREAMID)
+ break;
+ if (virtid > MAX_VIRTID)
+ continue;
+
+ ret = pvu_iommu_program_entries(cell, virtid);
+ if (ret)
+ return ret;
+ }
+
+ cell->arch.iommu_pvu.ent_count = 0;
+ return ret;
+}
+
+static int pvu_iommu_cell_init(struct cell *cell)
+{
+ struct pvu_dev *dev;
+ int i, virtid;
+
+ if (pvu_count == 0)
+ return 0;
+
+ cell->arch.iommu_pvu.ent_count = 0;
+ cell->arch.iommu_pvu.entries = page_alloc(&mem_pool, 1);
+ if (!cell->arch.iommu_pvu.entries)
+ return -ENOMEM;
+
+ dev = &pvu_units[0];
+ for_each_stream_id(virtid, cell->config, i) {
+
+ if (virtid == JAILHOUSE_INVALID_STREAMID)
+ break;
+ if (virtid > MAX_VIRTID)
+ continue;
+
+ if (pvu_tlb_is_enabled(dev, virtid))
+ return -EINVAL;
+ }
+ return 0;
+}
+
+static int pvu_iommu_flush_context(u16 virtid)
+{
+ struct pvu_dev *dev;
+ int i, tlbnum, next;
+
+ for (i = 0; i < pvu_count; i++) {
+
+ dev = &pvu_units[i];
+ tlbnum = virtid;
+
+ while (tlbnum) {
+
+ next = pvu_tlb_next(dev, tlbnum);
+ pvu_tlb_flush(dev, tlbnum);
+ tlbnum = next;
+ }
+ }
+ return 0;
+}
+
+static void pvu_iommu_cell_exit(struct cell *cell)
+{
+ int i, virtid;
+
+ if (pvu_count == 0)
+ return;
+
+ for_each_stream_id(virtid, cell->config, i) {
+
+ if (virtid == JAILHOUSE_INVALID_STREAMID)
+ break;
+ if (virtid > MAX_VIRTID)
+ continue;
+
+ pvu_iommu_flush_context(virtid);
+ }
+
+ cell->arch.iommu_pvu.ent_count = 0;
+ page_free(&mem_pool, cell->arch.iommu_pvu.entries, 1);
+ cell->arch.iommu_pvu.entries = NULL;
+}
+
+static int pvu_iommu_init(void)
+{
+ struct jailhouse_iommu *iommu;
+ struct pvu_dev *dev;
+ int i, ret;
+
+ iommu = &system_config->platform_info.arm.iommu_units[0];
+ for (i = 0; i < iommu_count_units(); iommu++, i++) {
+
+ if (iommu->type != JAILHOUSE_IOMMU_PVU)
+ continue;
+
+ dev = &pvu_units[pvu_count];
+ dev->cfg_base = paging_map_device(iommu->base,
+ iommu->size);
+ dev->tlb_base = paging_map_device(iommu->tipvu_tlb_base,
+ iommu->tipvu_tlb_size);
+
+ ret = pvu_init_device(dev, MAX_VIRTID);
+ if (ret)
+ return ret;
+
+ pvu_count++;
+ }
+
+ return pvu_iommu_cell_init(&root_cell);
+}
+
+DEFINE_UNIT_SHUTDOWN_STUB(pvu_iommu);
+DEFINE_UNIT_MMIO_COUNT_REGIONS_STUB(pvu_iommu);
+DEFINE_UNIT(pvu_iommu, "PVU IOMMU");
diff --git a/hypervisor/arch/arm64/ti-pvu_priv.h b/hypervisor/arch/arm64/ti-pvu_priv.h
new file mode 100644
index 00000000..acba338b
--- /dev/null
+++ b/hypervisor/arch/arm64/ti-pvu_priv.h
@@ -0,0 +1,141 @@
+/*
+ * Jailhouse, a Linux-based partitioning hypervisor
+ *
+ * Copyright (c) 2018 Texas Instruments Incorporated -
http://www.ti.com/
+ *
+ * TI PVU IOMMU unit private headers
+ *
+ * Authors:
+ * Nikhil Devshatwar <
nikh...@ti.com>
+ *
+ * This work is licensed under the terms of the GNU GPL, version 2. See
+ * the COPYING file in the top-level directory.
+ */
+
+#ifndef __TI_PVU_PRIV_H__
+#define __TI_PVU_PRIV_H__
+
+#define PVU_NUM_TLBS 64
+#define PVU_NUM_ENTRIES 8
+
+#define PVU_CONFIG_NTLB_MASK (0xff)
+#define PVU_CONFIG_NENT_MASK (0xf << 16)
+
+#define PVU_MAX_VIRTID_MASK (0xfff)
+
+#define PVU_enable_EN (0x1)
+#define PVU_enable_DIS (0x0)
+#define PVU_enable_MASK (0x1)
+
+struct pvu_hw_cfg {
+ u32 pid;
+ u32 config;
+ u8 resv_16[8];
+ u32 enable;
+ u32 virtid_map1;
+ u32 virtid_map2;
+ u8 resv_48[20];
+ u32 exception_logging_disable;
+ u8 resv_260[208];
+ u32 destination_id;
+ u8 resv_288[24];
+ u32 exception_logging_control;
+ u32 exception_logging_header0;
+ u32 exception_logging_header1;
+ u32 exception_logging_data0;
+ u32 exception_logging_data1;
+ u32 exception_logging_data2;
+ u32 exception_logging_data3;
+ u8 resv_320[4];
+ u32 exception_pend_set;
+ u32 exception_pend_clear;
+ u32 exception_ENABLE_set;
+ u32 exception_ENABLE_clear;
+ u32 eoi_reg;
+};
+
+#define PVU_TLB_ENTRY_VALID (2)
+#define PVU_TLB_ENTRY_INVALID (0)
+#define PVU_TLB_ENTRY_MODE_MASK (0x3 << 30)
+#define PVU_TLB_ENTRY_FLAG_MASK (0xff7f)
+#define PVU_TLB_ENTRY_PGSIZE_MASK (0xf << 16)
+
+#define PVU_ENTRY_INVALID (0 << 30)
+#define PVU_ENTRY_VALID (2 << 30)
+
+#define LPAE_PAGE_SZ_4K 0
+#define LPAE_PAGE_SZ_16K 1
+#define LPAE_PAGE_SZ_64K 2
+#define LPAE_PAGE_SZ_2M 3
+#define LPAE_PAGE_SZ_32M 4
+#define LPAE_PAGE_SZ_512M 5
+#define LPAE_PAGE_SZ_1G 6
+#define LPAE_PAGE_SZ_16G 7
+
+#define LPAE_PAGE_PERM_UR (1 << 15)
+#define LPAE_PAGE_PERM_UW (1 << 14)
+#define LPAE_PAGE_PERM_UX (1 << 13)
+#define LPAE_PAGE_PERM_SR (1 << 12)
+#define LPAE_PAGE_PERM_SW (1 << 11)
+#define LPAE_PAGE_PERM_SX (1 << 10)
+
+#define LPAE_PAGE_MEM_DEVICE (0 << 8)
+#define LPAE_PAGE_MEM_WRITEBACK (1 << 8)
+#define LPAE_PAGE_MEM_WRITETHROUGH (2 << 8)
+
+#define LPAE_PAGE_PREFETCH (1 << 6)
+#define LPAE_PAGE_INNER_SHARABLE (1 << 5)
+#define LPAE_PAGE_OUTER_SHARABLE (1 << 4)
+
+#define LPAE_PAGE_IS_NOALLOC (0 << 2)
+#define LPAE_PAGE_IS_WR_ALLOC (1 << 2)
+#define LPAE_PAGE_IS_RD_ALLOC (2 << 2)
+#define LPAE_PAGE_IS_RDWR_ALLOC (3 << 2)
+
+#define LPAE_PAGE_OS_NOALLOC (0 << 0)
+#define LPAE_PAGE_OS_WR_ALLOC (1 << 0)
+#define LPAE_PAGE_OS_RD_ALLOC (2 << 0)
+#define LPAE_PAGE_OS_RDWR_ALLOC (3 << 0)
+
+struct pvu_hw_tlb_entry {
+ u32 reg0;
+ u32 reg1;
+ u32 reg2;
+ u32 reg3;
+ u32 reg4;
+ u32 reg5;
+ u32 reg6;
+ u32 reg7;
+};
+
+#define PVU_TLB_EN_MASK (1 << 31)
+#define PVU_TLB_LOG_DIS_MASK (1 << 30)
+#define PVU_TLB_FAULT_MASK (1 << 29)
+#define PVU_TLB_CHAIN_MASK (0xfff)
+
+struct pvu_hw_tlb {
+ u32 chain;
+ u8 resv_32[28];
+ struct pvu_hw_tlb_entry entry[8];
+ u8 resv_4096[3808];
+};
+
+struct pvu_tlb_entry {
+ u64 virt_addr;
+ u64 phys_addr;
+ u64 size;
+ u64 flags;
+};
+
+struct pvu_dev {
+ u32 *cfg_base;
+ u32 *tlb_base;
+
+ u32 num_tlbs;
+ u32 num_entries;
+ u16 max_virtid;
+
+ u16 tlb_data[PVU_NUM_TLBS];
+};
+
+#endif /* __TI_PVU_PRIV_H__ */
diff --git a/include/jailhouse/cell-config.h b/include/jailhouse/cell-config.h
index d435b9f7..9bb84492 100644
--- a/include/jailhouse/cell-config.h
+++ b/include/jailhouse/cell-config.h
@@ -203,6 +203,7 @@ struct jailhouse_pci_capability {
#define JAILHOUSE_IOMMU_AMD 1
#define JAILHOUSE_IOMMU_INTEL 2
#define JAILHOUSE_IOMMU_SMMUV3 3
+#define JAILHOUSE_IOMMU_PVU 4
struct jailhouse_iommu {
__u32 type;
@@ -213,6 +214,9 @@ struct jailhouse_iommu {
__u8 amd_base_cap;
__u8 amd_msi_cap;
__u32 amd_features;
+
+ __u64 tipvu_tlb_base;
+ __u32 tipvu_tlb_size;
} __attribute__((packed));
struct jailhouse_pio {
--
2.17.1