Initial commit
This commit is contained in:
@@ -0,0 +1,208 @@
|
||||
From 621f2c4753b03170213e178cdafd66e78b212b3c Mon Sep 17 00:00:00 2001
|
||||
From: Biwen Li <biwen.li@nxp.com>
|
||||
Date: Tue, 30 Oct 2018 18:26:41 +0800
|
||||
Subject: [PATCH 27/40] kvm: support layerscape
|
||||
This is an integrated patch of kvm for layerscape
|
||||
|
||||
Signed-off-by: Bharat Bhushan <Bharat.Bhushan@nxp.com>
|
||||
Signed-off-by: Laurentiu Tudor <laurentiu.tudor@nxp.com>
|
||||
Signed-off-by: Signed-off-by: Biwen Li <biwen.li@nxp.com>
|
||||
---
|
||||
arch/arm/include/asm/kvm_mmu.h | 3 +-
|
||||
arch/arm64/include/asm/kvm_mmu.h | 14 ++++++--
|
||||
arch/powerpc/kvm/booke.c | 5 +++
|
||||
virt/kvm/arm/mmu.c | 56 ++++++++++++++++++++++++++++++--
|
||||
virt/kvm/arm/vgic/vgic-its.c | 2 +-
|
||||
virt/kvm/arm/vgic/vgic-v2.c | 3 +-
|
||||
6 files changed, 74 insertions(+), 9 deletions(-)
|
||||
|
||||
--- a/arch/arm/include/asm/kvm_mmu.h
|
||||
+++ b/arch/arm/include/asm/kvm_mmu.h
|
||||
@@ -55,7 +55,8 @@ void stage2_unmap_vm(struct kvm *kvm);
|
||||
int kvm_alloc_stage2_pgd(struct kvm *kvm);
|
||||
void kvm_free_stage2_pgd(struct kvm *kvm);
|
||||
int kvm_phys_addr_ioremap(struct kvm *kvm, phys_addr_t guest_ipa,
|
||||
- phys_addr_t pa, unsigned long size, bool writable);
|
||||
+ phys_addr_t pa, unsigned long size, bool writable,
|
||||
+ pgprot_t prot);
|
||||
|
||||
int kvm_handle_guest_abort(struct kvm_vcpu *vcpu, struct kvm_run *run);
|
||||
|
||||
--- a/arch/arm64/include/asm/kvm_mmu.h
|
||||
+++ b/arch/arm64/include/asm/kvm_mmu.h
|
||||
@@ -167,7 +167,8 @@ void stage2_unmap_vm(struct kvm *kvm);
|
||||
int kvm_alloc_stage2_pgd(struct kvm *kvm);
|
||||
void kvm_free_stage2_pgd(struct kvm *kvm);
|
||||
int kvm_phys_addr_ioremap(struct kvm *kvm, phys_addr_t guest_ipa,
|
||||
- phys_addr_t pa, unsigned long size, bool writable);
|
||||
+ phys_addr_t pa, unsigned long size, bool writable,
|
||||
+ pgprot_t prot);
|
||||
|
||||
int kvm_handle_guest_abort(struct kvm_vcpu *vcpu, struct kvm_run *run);
|
||||
|
||||
@@ -270,8 +271,15 @@ static inline void __coherent_cache_gues
|
||||
|
||||
static inline void __kvm_flush_dcache_pte(pte_t pte)
|
||||
{
|
||||
- struct page *page = pte_page(pte);
|
||||
- kvm_flush_dcache_to_poc(page_address(page), PAGE_SIZE);
|
||||
+ if (pfn_valid(pte_pfn(pte))) {
|
||||
+ struct page *page = pte_page(pte);
|
||||
+ kvm_flush_dcache_to_poc(page_address(page), PAGE_SIZE);
|
||||
+ } else {
|
||||
+ void __iomem *va = ioremap_cache_ns(pte_pfn(pte) << PAGE_SHIFT, PAGE_SIZE);
|
||||
+
|
||||
+ kvm_flush_dcache_to_poc(va, PAGE_SIZE);
|
||||
+ iounmap(va);
|
||||
+ }
|
||||
}
|
||||
|
||||
static inline void __kvm_flush_dcache_pmd(pmd_t pmd)
|
||||
--- a/arch/powerpc/kvm/booke.c
|
||||
+++ b/arch/powerpc/kvm/booke.c
|
||||
@@ -305,6 +305,11 @@ void kvmppc_core_queue_fpunavail(struct
|
||||
kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_FP_UNAVAIL);
|
||||
}
|
||||
|
||||
+void kvmppc_core_queue_vec_unavail(struct kvm_vcpu *vcpu)
|
||||
+{
|
||||
+ kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_ALTIVEC_UNAVAIL);
|
||||
+}
|
||||
+
|
||||
void kvmppc_core_queue_dec(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_DECREMENTER);
|
||||
--- a/virt/kvm/arm/mmu.c
|
||||
+++ b/virt/kvm/arm/mmu.c
|
||||
@@ -1022,9 +1022,11 @@ static int stage2_pmdp_test_and_clear_yo
|
||||
* @guest_ipa: The IPA at which to insert the mapping
|
||||
* @pa: The physical address of the device
|
||||
* @size: The size of the mapping
|
||||
+ * @prot: S2 page translation bits
|
||||
*/
|
||||
int kvm_phys_addr_ioremap(struct kvm *kvm, phys_addr_t guest_ipa,
|
||||
- phys_addr_t pa, unsigned long size, bool writable)
|
||||
+ phys_addr_t pa, unsigned long size, bool writable,
|
||||
+ pgprot_t prot)
|
||||
{
|
||||
phys_addr_t addr, end;
|
||||
int ret = 0;
|
||||
@@ -1035,7 +1037,7 @@ int kvm_phys_addr_ioremap(struct kvm *kv
|
||||
pfn = __phys_to_pfn(pa);
|
||||
|
||||
for (addr = guest_ipa; addr < end; addr += PAGE_SIZE) {
|
||||
- pte_t pte = pfn_pte(pfn, PAGE_S2_DEVICE);
|
||||
+ pte_t pte = pfn_pte(pfn, prot);
|
||||
|
||||
if (writable)
|
||||
pte = kvm_s2pte_mkwrite(pte);
|
||||
@@ -1059,6 +1061,30 @@ out:
|
||||
return ret;
|
||||
}
|
||||
|
||||
+#ifdef CONFIG_ARM64
|
||||
+static pgprot_t stage1_to_stage2_pgprot(pgprot_t prot)
|
||||
+{
|
||||
+ switch (pgprot_val(prot) & PTE_ATTRINDX_MASK) {
|
||||
+ case PTE_ATTRINDX(MT_DEVICE_nGnRE):
|
||||
+ case PTE_ATTRINDX(MT_DEVICE_nGnRnE):
|
||||
+ case PTE_ATTRINDX(MT_DEVICE_GRE):
|
||||
+ return PAGE_S2_DEVICE;
|
||||
+ case PTE_ATTRINDX(MT_NORMAL_NC):
|
||||
+ case PTE_ATTRINDX(MT_NORMAL):
|
||||
+ return (pgprot_val(prot) & PTE_SHARED)
|
||||
+ ? PAGE_S2
|
||||
+ : PAGE_S2_NS;
|
||||
+ }
|
||||
+
|
||||
+ return PAGE_S2_DEVICE;
|
||||
+}
|
||||
+#else
|
||||
+static pgprot_t stage1_to_stage2_pgprot(pgprot_t prot)
|
||||
+{
|
||||
+ return PAGE_S2_DEVICE;
|
||||
+}
|
||||
+#endif
|
||||
+
|
||||
static bool transparent_hugepage_adjust(kvm_pfn_t *pfnp, phys_addr_t *ipap)
|
||||
{
|
||||
kvm_pfn_t pfn = *pfnp;
|
||||
@@ -1335,6 +1361,18 @@ static int user_mem_abort(struct kvm_vcp
|
||||
hugetlb = true;
|
||||
gfn = (fault_ipa & PMD_MASK) >> PAGE_SHIFT;
|
||||
} else {
|
||||
+ pte_t *pte;
|
||||
+ spinlock_t *ptl;
|
||||
+ pgprot_t prot;
|
||||
+
|
||||
+ pte = get_locked_pte(current->mm, memslot->userspace_addr, &ptl);
|
||||
+ prot = stage1_to_stage2_pgprot(__pgprot(pte_val(*pte)));
|
||||
+ pte_unmap_unlock(pte, ptl);
|
||||
+#ifdef CONFIG_ARM64
|
||||
+ if (pgprot_val(prot) == pgprot_val(PAGE_S2_NS))
|
||||
+ mem_type = PAGE_S2_NS;
|
||||
+#endif
|
||||
+
|
||||
/*
|
||||
* Pages belonging to memslots that don't have the same
|
||||
* alignment for userspace and IPA cannot be mapped using
|
||||
@@ -1376,6 +1414,11 @@ static int user_mem_abort(struct kvm_vcp
|
||||
if (is_error_noslot_pfn(pfn))
|
||||
return -EFAULT;
|
||||
|
||||
+#ifdef CONFIG_ARM64
|
||||
+ if (pgprot_val(mem_type) == pgprot_val(PAGE_S2_NS)) {
|
||||
+ flags |= KVM_S2PTE_FLAG_IS_IOMAP;
|
||||
+ } else
|
||||
+#endif
|
||||
if (kvm_is_device_pfn(pfn)) {
|
||||
mem_type = PAGE_S2_DEVICE;
|
||||
flags |= KVM_S2PTE_FLAG_IS_IOMAP;
|
||||
@@ -1913,6 +1956,9 @@ int kvm_arch_prepare_memory_region(struc
|
||||
gpa_t gpa = mem->guest_phys_addr +
|
||||
(vm_start - mem->userspace_addr);
|
||||
phys_addr_t pa;
|
||||
+ pgprot_t prot;
|
||||
+ pte_t *pte;
|
||||
+ spinlock_t *ptl;
|
||||
|
||||
pa = (phys_addr_t)vma->vm_pgoff << PAGE_SHIFT;
|
||||
pa += vm_start - vma->vm_start;
|
||||
@@ -1923,9 +1969,13 @@ int kvm_arch_prepare_memory_region(struc
|
||||
goto out;
|
||||
}
|
||||
|
||||
+ pte = get_locked_pte(current->mm, mem->userspace_addr, &ptl);
|
||||
+ prot = stage1_to_stage2_pgprot(__pgprot(pte_val(*pte)));
|
||||
+ pte_unmap_unlock(pte, ptl);
|
||||
+
|
||||
ret = kvm_phys_addr_ioremap(kvm, gpa, pa,
|
||||
vm_end - vm_start,
|
||||
- writable);
|
||||
+ writable, prot);
|
||||
if (ret)
|
||||
break;
|
||||
}
|
||||
--- a/virt/kvm/arm/vgic/vgic-its.c
|
||||
+++ b/virt/kvm/arm/vgic/vgic-its.c
|
||||
@@ -243,7 +243,7 @@ static struct its_ite *find_ite(struct v
|
||||
#define GIC_LPI_OFFSET 8192
|
||||
|
||||
#define VITS_TYPER_IDBITS 16
|
||||
-#define VITS_TYPER_DEVBITS 16
|
||||
+#define VITS_TYPER_DEVBITS 17
|
||||
#define VITS_DTE_MAX_DEVID_OFFSET (BIT(14) - 1)
|
||||
#define VITS_ITE_MAX_EVENTID_OFFSET (BIT(16) - 1)
|
||||
|
||||
--- a/virt/kvm/arm/vgic/vgic-v2.c
|
||||
+++ b/virt/kvm/arm/vgic/vgic-v2.c
|
||||
@@ -307,7 +307,8 @@ int vgic_v2_map_resources(struct kvm *kv
|
||||
if (!static_branch_unlikely(&vgic_v2_cpuif_trap)) {
|
||||
ret = kvm_phys_addr_ioremap(kvm, dist->vgic_cpu_base,
|
||||
kvm_vgic_global_state.vcpu_base,
|
||||
- KVM_VGIC_V2_CPU_SIZE, true);
|
||||
+ KVM_VGIC_V2_CPU_SIZE, true,
|
||||
+ PAGE_S2_DEVICE);
|
||||
if (ret) {
|
||||
kvm_err("Unable to remap VGIC CPU to VCPU\n");
|
||||
goto out;
|
||||
Reference in New Issue
Block a user