mirror of
https://github.com/AetherDroid/android_kernel_samsung_on5xelte.git
synced 2025-09-08 01:08:03 -04:00
Fixed MTP to work with TWRP
This commit is contained in:
commit
f6dfaef42e
50820 changed files with 20846062 additions and 0 deletions
63
arch/arm64/kvm/Kconfig
Normal file
63
arch/arm64/kvm/Kconfig
Normal file
|
@ -0,0 +1,63 @@
|
|||
#
|
||||
# KVM configuration
|
||||
#
|
||||
|
||||
source "virt/kvm/Kconfig"
|
||||
|
||||
menuconfig VIRTUALIZATION
|
||||
bool "Virtualization"
|
||||
---help---
|
||||
Say Y here to get to see options for using your Linux host to run
|
||||
other operating systems inside virtual machines (guests).
|
||||
This option alone does not add any kernel code.
|
||||
|
||||
If you say N, all options in this submenu will be skipped and
|
||||
disabled.
|
||||
|
||||
if VIRTUALIZATION
|
||||
|
||||
config KVM
|
||||
bool "Kernel-based Virtual Machine (KVM) support"
|
||||
select MMU_NOTIFIER
|
||||
select PREEMPT_NOTIFIERS
|
||||
select ANON_INODES
|
||||
select HAVE_KVM_CPU_RELAX_INTERCEPT
|
||||
select KVM_MMIO
|
||||
select KVM_ARM_HOST
|
||||
select KVM_ARM_VGIC
|
||||
select KVM_ARM_TIMER
|
||||
---help---
|
||||
Support hosting virtualized guest machines.
|
||||
|
||||
If unsure, say N.
|
||||
|
||||
config KVM_ARM_HOST
|
||||
bool
|
||||
---help---
|
||||
Provides host support for ARM processors.
|
||||
|
||||
config KVM_ARM_MAX_VCPUS
|
||||
int "Number maximum supported virtual CPUs per VM"
|
||||
depends on KVM_ARM_HOST
|
||||
default 4
|
||||
help
|
||||
Static number of max supported virtual CPUs per VM.
|
||||
|
||||
If you choose a high number, the vcpu structures will be quite
|
||||
large, so only choose a reasonable number that you expect to
|
||||
actually use.
|
||||
|
||||
config KVM_ARM_VGIC
|
||||
bool
|
||||
depends on KVM_ARM_HOST && OF
|
||||
select HAVE_KVM_IRQCHIP
|
||||
---help---
|
||||
Adds support for a hardware assisted, in-kernel GIC emulation.
|
||||
|
||||
config KVM_ARM_TIMER
|
||||
bool
|
||||
depends on KVM_ARM_VGIC
|
||||
---help---
|
||||
Adds support for the Architected Timers in virtual machines.
|
||||
|
||||
endif # VIRTUALIZATION
|
27
arch/arm64/kvm/Makefile
Normal file
27
arch/arm64/kvm/Makefile
Normal file
|
@ -0,0 +1,27 @@
|
|||
#
|
||||
# Makefile for Kernel-based Virtual Machine module
|
||||
#
|
||||
|
||||
ccflags-y += -Ivirt/kvm -Iarch/arm64/kvm
|
||||
CFLAGS_arm.o := -I.
|
||||
CFLAGS_mmu.o := -I.
|
||||
|
||||
KVM=../../../virt/kvm
|
||||
ARM=../../../arch/arm/kvm
|
||||
|
||||
obj-$(CONFIG_KVM_ARM_HOST) += kvm.o
|
||||
|
||||
kvm-$(CONFIG_KVM_ARM_HOST) += $(KVM)/kvm_main.o $(KVM)/coalesced_mmio.o
|
||||
kvm-$(CONFIG_KVM_ARM_HOST) += $(ARM)/arm.o $(ARM)/mmu.o $(ARM)/mmio.o
|
||||
kvm-$(CONFIG_KVM_ARM_HOST) += $(ARM)/psci.o $(ARM)/perf.o
|
||||
|
||||
kvm-$(CONFIG_KVM_ARM_HOST) += emulate.o inject_fault.o regmap.o
|
||||
kvm-$(CONFIG_KVM_ARM_HOST) += hyp.o hyp-init.o handle_exit.o
|
||||
kvm-$(CONFIG_KVM_ARM_HOST) += guest.o reset.o sys_regs.o sys_regs_generic_v8.o
|
||||
|
||||
kvm-$(CONFIG_KVM_ARM_VGIC) += $(KVM)/arm/vgic.o
|
||||
kvm-$(CONFIG_KVM_ARM_VGIC) += $(KVM)/arm/vgic-v2.o
|
||||
kvm-$(CONFIG_KVM_ARM_VGIC) += vgic-v2-switch.o
|
||||
kvm-$(CONFIG_KVM_ARM_VGIC) += $(KVM)/arm/vgic-v3.o
|
||||
kvm-$(CONFIG_KVM_ARM_VGIC) += vgic-v3-switch.o
|
||||
kvm-$(CONFIG_KVM_ARM_TIMER) += $(KVM)/arm/arch_timer.o
|
158
arch/arm64/kvm/emulate.c
Normal file
158
arch/arm64/kvm/emulate.c
Normal file
|
@ -0,0 +1,158 @@
|
|||
/*
|
||||
* (not much of an) Emulation layer for 32bit guests.
|
||||
*
|
||||
* Copyright (C) 2012,2013 - ARM Ltd
|
||||
* Author: Marc Zyngier <marc.zyngier@arm.com>
|
||||
*
|
||||
* based on arch/arm/kvm/emulate.c
|
||||
* Copyright (C) 2012 - Virtual Open Systems and Columbia University
|
||||
* Author: Christoffer Dall <c.dall@virtualopensystems.com>
|
||||
*
|
||||
* This program is free software: you can redistribute it and/or modify
|
||||
* it under the terms of the GNU General Public License version 2 as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
* GNU General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License
|
||||
* along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
*/
|
||||
|
||||
#include <linux/kvm_host.h>
|
||||
#include <asm/kvm_emulate.h>
|
||||
|
||||
/*
|
||||
* stolen from arch/arm/kernel/opcodes.c
|
||||
*
|
||||
* condition code lookup table
|
||||
* index into the table is test code: EQ, NE, ... LT, GT, AL, NV
|
||||
*
|
||||
* bit position in short is condition code: NZCV
|
||||
*/
|
||||
static const unsigned short cc_map[16] = {
|
||||
0xF0F0, /* EQ == Z set */
|
||||
0x0F0F, /* NE */
|
||||
0xCCCC, /* CS == C set */
|
||||
0x3333, /* CC */
|
||||
0xFF00, /* MI == N set */
|
||||
0x00FF, /* PL */
|
||||
0xAAAA, /* VS == V set */
|
||||
0x5555, /* VC */
|
||||
0x0C0C, /* HI == C set && Z clear */
|
||||
0xF3F3, /* LS == C clear || Z set */
|
||||
0xAA55, /* GE == (N==V) */
|
||||
0x55AA, /* LT == (N!=V) */
|
||||
0x0A05, /* GT == (!Z && (N==V)) */
|
||||
0xF5FA, /* LE == (Z || (N!=V)) */
|
||||
0xFFFF, /* AL always */
|
||||
0 /* NV */
|
||||
};
|
||||
|
||||
static int kvm_vcpu_get_condition(const struct kvm_vcpu *vcpu)
|
||||
{
|
||||
u32 esr = kvm_vcpu_get_hsr(vcpu);
|
||||
|
||||
if (esr & ESR_EL2_CV)
|
||||
return (esr & ESR_EL2_COND) >> ESR_EL2_COND_SHIFT;
|
||||
|
||||
return -1;
|
||||
}
|
||||
|
||||
/*
|
||||
* Check if a trapped instruction should have been executed or not.
|
||||
*/
|
||||
bool kvm_condition_valid32(const struct kvm_vcpu *vcpu)
|
||||
{
|
||||
unsigned long cpsr;
|
||||
u32 cpsr_cond;
|
||||
int cond;
|
||||
|
||||
/* Top two bits non-zero? Unconditional. */
|
||||
if (kvm_vcpu_get_hsr(vcpu) >> 30)
|
||||
return true;
|
||||
|
||||
/* Is condition field valid? */
|
||||
cond = kvm_vcpu_get_condition(vcpu);
|
||||
if (cond == 0xE)
|
||||
return true;
|
||||
|
||||
cpsr = *vcpu_cpsr(vcpu);
|
||||
|
||||
if (cond < 0) {
|
||||
/* This can happen in Thumb mode: examine IT state. */
|
||||
unsigned long it;
|
||||
|
||||
it = ((cpsr >> 8) & 0xFC) | ((cpsr >> 25) & 0x3);
|
||||
|
||||
/* it == 0 => unconditional. */
|
||||
if (it == 0)
|
||||
return true;
|
||||
|
||||
/* The cond for this insn works out as the top 4 bits. */
|
||||
cond = (it >> 4);
|
||||
}
|
||||
|
||||
cpsr_cond = cpsr >> 28;
|
||||
|
||||
if (!((cc_map[cond] >> cpsr_cond) & 1))
|
||||
return false;
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
/**
|
||||
* adjust_itstate - adjust ITSTATE when emulating instructions in IT-block
|
||||
* @vcpu: The VCPU pointer
|
||||
*
|
||||
* When exceptions occur while instructions are executed in Thumb IF-THEN
|
||||
* blocks, the ITSTATE field of the CPSR is not advanced (updated), so we have
|
||||
* to do this little bit of work manually. The fields map like this:
|
||||
*
|
||||
* IT[7:0] -> CPSR[26:25],CPSR[15:10]
|
||||
*/
|
||||
static void kvm_adjust_itstate(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
unsigned long itbits, cond;
|
||||
unsigned long cpsr = *vcpu_cpsr(vcpu);
|
||||
bool is_arm = !(cpsr & COMPAT_PSR_T_BIT);
|
||||
|
||||
BUG_ON(is_arm && (cpsr & COMPAT_PSR_IT_MASK));
|
||||
|
||||
if (!(cpsr & COMPAT_PSR_IT_MASK))
|
||||
return;
|
||||
|
||||
cond = (cpsr & 0xe000) >> 13;
|
||||
itbits = (cpsr & 0x1c00) >> (10 - 2);
|
||||
itbits |= (cpsr & (0x3 << 25)) >> 25;
|
||||
|
||||
/* Perform ITAdvance (see page A2-52 in ARM DDI 0406C) */
|
||||
if ((itbits & 0x7) == 0)
|
||||
itbits = cond = 0;
|
||||
else
|
||||
itbits = (itbits << 1) & 0x1f;
|
||||
|
||||
cpsr &= ~COMPAT_PSR_IT_MASK;
|
||||
cpsr |= cond << 13;
|
||||
cpsr |= (itbits & 0x1c) << (10 - 2);
|
||||
cpsr |= (itbits & 0x3) << 25;
|
||||
*vcpu_cpsr(vcpu) = cpsr;
|
||||
}
|
||||
|
||||
/**
|
||||
* kvm_skip_instr - skip a trapped instruction and proceed to the next
|
||||
* @vcpu: The vcpu pointer
|
||||
*/
|
||||
void kvm_skip_instr32(struct kvm_vcpu *vcpu, bool is_wide_instr)
|
||||
{
|
||||
bool is_thumb;
|
||||
|
||||
is_thumb = !!(*vcpu_cpsr(vcpu) & COMPAT_PSR_T_BIT);
|
||||
if (is_thumb && !is_wide_instr)
|
||||
*vcpu_pc(vcpu) += 2;
|
||||
else
|
||||
*vcpu_pc(vcpu) += 4;
|
||||
kvm_adjust_itstate(vcpu);
|
||||
}
|
358
arch/arm64/kvm/guest.c
Normal file
358
arch/arm64/kvm/guest.c
Normal file
|
@ -0,0 +1,358 @@
|
|||
/*
|
||||
* Copyright (C) 2012,2013 - ARM Ltd
|
||||
* Author: Marc Zyngier <marc.zyngier@arm.com>
|
||||
*
|
||||
* Derived from arch/arm/kvm/guest.c:
|
||||
* Copyright (C) 2012 - Virtual Open Systems and Columbia University
|
||||
* Author: Christoffer Dall <c.dall@virtualopensystems.com>
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify
|
||||
* it under the terms of the GNU General Public License version 2 as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
* GNU General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License
|
||||
* along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
*/
|
||||
|
||||
#include <linux/errno.h>
|
||||
#include <linux/err.h>
|
||||
#include <linux/kvm_host.h>
|
||||
#include <linux/module.h>
|
||||
#include <linux/vmalloc.h>
|
||||
#include <linux/fs.h>
|
||||
#include <asm/cputype.h>
|
||||
#include <asm/uaccess.h>
|
||||
#include <asm/kvm.h>
|
||||
#include <asm/kvm_asm.h>
|
||||
#include <asm/kvm_emulate.h>
|
||||
#include <asm/kvm_coproc.h>
|
||||
|
||||
struct kvm_stats_debugfs_item debugfs_entries[] = {
|
||||
{ NULL }
|
||||
};
|
||||
|
||||
int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
static u64 core_reg_offset_from_id(u64 id)
|
||||
{
|
||||
return id & ~(KVM_REG_ARCH_MASK | KVM_REG_SIZE_MASK | KVM_REG_ARM_CORE);
|
||||
}
|
||||
|
||||
static int get_core_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
|
||||
{
|
||||
/*
|
||||
* Because the kvm_regs structure is a mix of 32, 64 and
|
||||
* 128bit fields, we index it as if it was a 32bit
|
||||
* array. Hence below, nr_regs is the number of entries, and
|
||||
* off the index in the "array".
|
||||
*/
|
||||
__u32 __user *uaddr = (__u32 __user *)(unsigned long)reg->addr;
|
||||
struct kvm_regs *regs = vcpu_gp_regs(vcpu);
|
||||
int nr_regs = sizeof(*regs) / sizeof(__u32);
|
||||
u32 off;
|
||||
|
||||
/* Our ID is an index into the kvm_regs struct. */
|
||||
off = core_reg_offset_from_id(reg->id);
|
||||
if (off >= nr_regs ||
|
||||
(off + (KVM_REG_SIZE(reg->id) / sizeof(__u32))) >= nr_regs)
|
||||
return -ENOENT;
|
||||
|
||||
if (copy_to_user(uaddr, ((u32 *)regs) + off, KVM_REG_SIZE(reg->id)))
|
||||
return -EFAULT;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int set_core_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
|
||||
{
|
||||
__u32 __user *uaddr = (__u32 __user *)(unsigned long)reg->addr;
|
||||
struct kvm_regs *regs = vcpu_gp_regs(vcpu);
|
||||
int nr_regs = sizeof(*regs) / sizeof(__u32);
|
||||
__uint128_t tmp;
|
||||
void *valp = &tmp;
|
||||
u64 off;
|
||||
int err = 0;
|
||||
|
||||
/* Our ID is an index into the kvm_regs struct. */
|
||||
off = core_reg_offset_from_id(reg->id);
|
||||
if (off >= nr_regs ||
|
||||
(off + (KVM_REG_SIZE(reg->id) / sizeof(__u32))) >= nr_regs)
|
||||
return -ENOENT;
|
||||
|
||||
if (KVM_REG_SIZE(reg->id) > sizeof(tmp))
|
||||
return -EINVAL;
|
||||
|
||||
if (copy_from_user(valp, uaddr, KVM_REG_SIZE(reg->id))) {
|
||||
err = -EFAULT;
|
||||
goto out;
|
||||
}
|
||||
|
||||
if (off == KVM_REG_ARM_CORE_REG(regs.pstate)) {
|
||||
u32 mode = (*(u32 *)valp) & COMPAT_PSR_MODE_MASK;
|
||||
switch (mode) {
|
||||
case COMPAT_PSR_MODE_USR:
|
||||
case COMPAT_PSR_MODE_FIQ:
|
||||
case COMPAT_PSR_MODE_IRQ:
|
||||
case COMPAT_PSR_MODE_SVC:
|
||||
case COMPAT_PSR_MODE_ABT:
|
||||
case COMPAT_PSR_MODE_UND:
|
||||
case PSR_MODE_EL0t:
|
||||
case PSR_MODE_EL1t:
|
||||
case PSR_MODE_EL1h:
|
||||
break;
|
||||
default:
|
||||
err = -EINVAL;
|
||||
goto out;
|
||||
}
|
||||
}
|
||||
|
||||
memcpy((u32 *)regs + off, valp, KVM_REG_SIZE(reg->id));
|
||||
out:
|
||||
return err;
|
||||
}
|
||||
|
||||
int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
|
||||
{
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
|
||||
{
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
static unsigned long num_core_regs(void)
|
||||
{
|
||||
return sizeof(struct kvm_regs) / sizeof(__u32);
|
||||
}
|
||||
|
||||
/**
|
||||
* ARM64 versions of the TIMER registers, always available on arm64
|
||||
*/
|
||||
|
||||
#define NUM_TIMER_REGS 3
|
||||
|
||||
static bool is_timer_reg(u64 index)
|
||||
{
|
||||
switch (index) {
|
||||
case KVM_REG_ARM_TIMER_CTL:
|
||||
case KVM_REG_ARM_TIMER_CNT:
|
||||
case KVM_REG_ARM_TIMER_CVAL:
|
||||
return true;
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
static int copy_timer_indices(struct kvm_vcpu *vcpu, u64 __user *uindices)
|
||||
{
|
||||
if (put_user(KVM_REG_ARM_TIMER_CTL, uindices))
|
||||
return -EFAULT;
|
||||
uindices++;
|
||||
if (put_user(KVM_REG_ARM_TIMER_CNT, uindices))
|
||||
return -EFAULT;
|
||||
uindices++;
|
||||
if (put_user(KVM_REG_ARM_TIMER_CVAL, uindices))
|
||||
return -EFAULT;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int set_timer_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
|
||||
{
|
||||
void __user *uaddr = (void __user *)(long)reg->addr;
|
||||
u64 val;
|
||||
int ret;
|
||||
|
||||
ret = copy_from_user(&val, uaddr, KVM_REG_SIZE(reg->id));
|
||||
if (ret != 0)
|
||||
return -EFAULT;
|
||||
|
||||
return kvm_arm_timer_set_reg(vcpu, reg->id, val);
|
||||
}
|
||||
|
||||
static int get_timer_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
|
||||
{
|
||||
void __user *uaddr = (void __user *)(long)reg->addr;
|
||||
u64 val;
|
||||
|
||||
val = kvm_arm_timer_get_reg(vcpu, reg->id);
|
||||
return copy_to_user(uaddr, &val, KVM_REG_SIZE(reg->id));
|
||||
}
|
||||
|
||||
/**
|
||||
* kvm_arm_num_regs - how many registers do we present via KVM_GET_ONE_REG
|
||||
*
|
||||
* This is for all registers.
|
||||
*/
|
||||
unsigned long kvm_arm_num_regs(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
return num_core_regs() + kvm_arm_num_sys_reg_descs(vcpu)
|
||||
+ NUM_TIMER_REGS;
|
||||
}
|
||||
|
||||
/**
|
||||
* kvm_arm_copy_reg_indices - get indices of all registers.
|
||||
*
|
||||
* We do core registers right here, then we apppend system regs.
|
||||
*/
|
||||
int kvm_arm_copy_reg_indices(struct kvm_vcpu *vcpu, u64 __user *uindices)
|
||||
{
|
||||
unsigned int i;
|
||||
const u64 core_reg = KVM_REG_ARM64 | KVM_REG_SIZE_U64 | KVM_REG_ARM_CORE;
|
||||
int ret;
|
||||
|
||||
for (i = 0; i < sizeof(struct kvm_regs) / sizeof(__u32); i++) {
|
||||
if (put_user(core_reg | i, uindices))
|
||||
return -EFAULT;
|
||||
uindices++;
|
||||
}
|
||||
|
||||
ret = copy_timer_indices(vcpu, uindices);
|
||||
if (ret)
|
||||
return ret;
|
||||
uindices += NUM_TIMER_REGS;
|
||||
|
||||
return kvm_arm_copy_sys_reg_indices(vcpu, uindices);
|
||||
}
|
||||
|
||||
int kvm_arm_get_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
|
||||
{
|
||||
/* We currently use nothing arch-specific in upper 32 bits */
|
||||
if ((reg->id & ~KVM_REG_SIZE_MASK) >> 32 != KVM_REG_ARM64 >> 32)
|
||||
return -EINVAL;
|
||||
|
||||
/* Register group 16 means we want a core register. */
|
||||
if ((reg->id & KVM_REG_ARM_COPROC_MASK) == KVM_REG_ARM_CORE)
|
||||
return get_core_reg(vcpu, reg);
|
||||
|
||||
if (is_timer_reg(reg->id))
|
||||
return get_timer_reg(vcpu, reg);
|
||||
|
||||
return kvm_arm_sys_reg_get_reg(vcpu, reg);
|
||||
}
|
||||
|
||||
int kvm_arm_set_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
|
||||
{
|
||||
/* We currently use nothing arch-specific in upper 32 bits */
|
||||
if ((reg->id & ~KVM_REG_SIZE_MASK) >> 32 != KVM_REG_ARM64 >> 32)
|
||||
return -EINVAL;
|
||||
|
||||
/* Register group 16 means we set a core register. */
|
||||
if ((reg->id & KVM_REG_ARM_COPROC_MASK) == KVM_REG_ARM_CORE)
|
||||
return set_core_reg(vcpu, reg);
|
||||
|
||||
if (is_timer_reg(reg->id))
|
||||
return set_timer_reg(vcpu, reg);
|
||||
|
||||
return kvm_arm_sys_reg_set_reg(vcpu, reg);
|
||||
}
|
||||
|
||||
int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu,
|
||||
struct kvm_sregs *sregs)
|
||||
{
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu,
|
||||
struct kvm_sregs *sregs)
|
||||
{
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
int __attribute_const__ kvm_target_cpu(void)
|
||||
{
|
||||
unsigned long implementor = read_cpuid_implementor();
|
||||
unsigned long part_number = read_cpuid_part_number();
|
||||
|
||||
switch (implementor) {
|
||||
case ARM_CPU_IMP_ARM:
|
||||
switch (part_number) {
|
||||
case ARM_CPU_PART_AEM_V8:
|
||||
return KVM_ARM_TARGET_AEM_V8;
|
||||
case ARM_CPU_PART_FOUNDATION:
|
||||
return KVM_ARM_TARGET_FOUNDATION_V8;
|
||||
case ARM_CPU_PART_CORTEX_A53:
|
||||
return KVM_ARM_TARGET_CORTEX_A53;
|
||||
case ARM_CPU_PART_CORTEX_A57:
|
||||
return KVM_ARM_TARGET_CORTEX_A57;
|
||||
};
|
||||
break;
|
||||
case ARM_CPU_IMP_APM:
|
||||
switch (part_number) {
|
||||
case APM_CPU_PART_POTENZA:
|
||||
return KVM_ARM_TARGET_XGENE_POTENZA;
|
||||
};
|
||||
break;
|
||||
};
|
||||
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
int kvm_vcpu_set_target(struct kvm_vcpu *vcpu,
|
||||
const struct kvm_vcpu_init *init)
|
||||
{
|
||||
unsigned int i;
|
||||
int phys_target = kvm_target_cpu();
|
||||
|
||||
if (init->target != phys_target)
|
||||
return -EINVAL;
|
||||
|
||||
vcpu->arch.target = phys_target;
|
||||
bitmap_zero(vcpu->arch.features, KVM_VCPU_MAX_FEATURES);
|
||||
|
||||
/* -ENOENT for unknown features, -EINVAL for invalid combinations. */
|
||||
for (i = 0; i < sizeof(init->features) * 8; i++) {
|
||||
if (init->features[i / 32] & (1 << (i % 32))) {
|
||||
if (i >= KVM_VCPU_MAX_FEATURES)
|
||||
return -ENOENT;
|
||||
set_bit(i, vcpu->arch.features);
|
||||
}
|
||||
}
|
||||
|
||||
/* Now we know what it is, we can reset it. */
|
||||
return kvm_reset_vcpu(vcpu);
|
||||
}
|
||||
|
||||
int kvm_vcpu_preferred_target(struct kvm_vcpu_init *init)
|
||||
{
|
||||
int target = kvm_target_cpu();
|
||||
|
||||
if (target < 0)
|
||||
return -ENODEV;
|
||||
|
||||
memset(init, 0, sizeof(*init));
|
||||
|
||||
/*
|
||||
* For now, we don't return any features.
|
||||
* In future, we might use features to return target
|
||||
* specific features available for the preferred
|
||||
* target type.
|
||||
*/
|
||||
init->target = (__u32)target;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
|
||||
{
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
|
||||
{
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu,
|
||||
struct kvm_translation *tr)
|
||||
{
|
||||
return -EINVAL;
|
||||
}
|
135
arch/arm64/kvm/handle_exit.c
Normal file
135
arch/arm64/kvm/handle_exit.c
Normal file
|
@ -0,0 +1,135 @@
|
|||
/*
|
||||
* Copyright (C) 2012,2013 - ARM Ltd
|
||||
* Author: Marc Zyngier <marc.zyngier@arm.com>
|
||||
*
|
||||
* Derived from arch/arm/kvm/handle_exit.c:
|
||||
* Copyright (C) 2012 - Virtual Open Systems and Columbia University
|
||||
* Author: Christoffer Dall <c.dall@virtualopensystems.com>
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify
|
||||
* it under the terms of the GNU General Public License version 2 as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
* GNU General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License
|
||||
* along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
*/
|
||||
|
||||
#include <linux/kvm.h>
|
||||
#include <linux/kvm_host.h>
|
||||
#include <asm/kvm_emulate.h>
|
||||
#include <asm/kvm_coproc.h>
|
||||
#include <asm/kvm_mmu.h>
|
||||
#include <asm/kvm_psci.h>
|
||||
|
||||
typedef int (*exit_handle_fn)(struct kvm_vcpu *, struct kvm_run *);
|
||||
|
||||
static int handle_hvc(struct kvm_vcpu *vcpu, struct kvm_run *run)
|
||||
{
|
||||
int ret;
|
||||
|
||||
ret = kvm_psci_call(vcpu);
|
||||
if (ret < 0) {
|
||||
kvm_inject_undefined(vcpu);
|
||||
return 1;
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int handle_smc(struct kvm_vcpu *vcpu, struct kvm_run *run)
|
||||
{
|
||||
kvm_inject_undefined(vcpu);
|
||||
return 1;
|
||||
}
|
||||
|
||||
/**
|
||||
* kvm_handle_wfx - handle a wait-for-interrupts or wait-for-event
|
||||
* instruction executed by a guest
|
||||
*
|
||||
* @vcpu: the vcpu pointer
|
||||
*
|
||||
* WFE: Yield the CPU and come back to this vcpu when the scheduler
|
||||
* decides to.
|
||||
* WFI: Simply call kvm_vcpu_block(), which will halt execution of
|
||||
* world-switches and schedule other host processes until there is an
|
||||
* incoming IRQ or FIQ to the VM.
|
||||
*/
|
||||
static int kvm_handle_wfx(struct kvm_vcpu *vcpu, struct kvm_run *run)
|
||||
{
|
||||
if (kvm_vcpu_get_hsr(vcpu) & ESR_EL2_EC_WFI_ISS_WFE)
|
||||
kvm_vcpu_on_spin(vcpu);
|
||||
else
|
||||
kvm_vcpu_block(vcpu);
|
||||
|
||||
kvm_skip_instr(vcpu, kvm_vcpu_trap_il_is32bit(vcpu));
|
||||
|
||||
return 1;
|
||||
}
|
||||
|
||||
static exit_handle_fn arm_exit_handlers[] = {
|
||||
[ESR_EL2_EC_WFI] = kvm_handle_wfx,
|
||||
[ESR_EL2_EC_CP15_32] = kvm_handle_cp15_32,
|
||||
[ESR_EL2_EC_CP15_64] = kvm_handle_cp15_64,
|
||||
[ESR_EL2_EC_CP14_MR] = kvm_handle_cp14_32,
|
||||
[ESR_EL2_EC_CP14_LS] = kvm_handle_cp14_load_store,
|
||||
[ESR_EL2_EC_CP14_64] = kvm_handle_cp14_64,
|
||||
[ESR_EL2_EC_HVC32] = handle_hvc,
|
||||
[ESR_EL2_EC_SMC32] = handle_smc,
|
||||
[ESR_EL2_EC_HVC64] = handle_hvc,
|
||||
[ESR_EL2_EC_SMC64] = handle_smc,
|
||||
[ESR_EL2_EC_SYS64] = kvm_handle_sys_reg,
|
||||
[ESR_EL2_EC_IABT] = kvm_handle_guest_abort,
|
||||
[ESR_EL2_EC_DABT] = kvm_handle_guest_abort,
|
||||
};
|
||||
|
||||
static exit_handle_fn kvm_get_exit_handler(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
u8 hsr_ec = kvm_vcpu_trap_get_class(vcpu);
|
||||
|
||||
if (hsr_ec >= ARRAY_SIZE(arm_exit_handlers) ||
|
||||
!arm_exit_handlers[hsr_ec]) {
|
||||
kvm_err("Unknown exception class: hsr: %#08x\n",
|
||||
(unsigned int)kvm_vcpu_get_hsr(vcpu));
|
||||
BUG();
|
||||
}
|
||||
|
||||
return arm_exit_handlers[hsr_ec];
|
||||
}
|
||||
|
||||
/*
|
||||
* Return > 0 to return to guest, < 0 on error, 0 (and set exit_reason) on
|
||||
* proper exit to userspace.
|
||||
*/
|
||||
int handle_exit(struct kvm_vcpu *vcpu, struct kvm_run *run,
|
||||
int exception_index)
|
||||
{
|
||||
exit_handle_fn exit_handler;
|
||||
|
||||
switch (exception_index) {
|
||||
case ARM_EXCEPTION_IRQ:
|
||||
return 1;
|
||||
case ARM_EXCEPTION_TRAP:
|
||||
/*
|
||||
* See ARM ARM B1.14.1: "Hyp traps on instructions
|
||||
* that fail their condition code check"
|
||||
*/
|
||||
if (!kvm_condition_valid(vcpu)) {
|
||||
kvm_skip_instr(vcpu, kvm_vcpu_trap_il_is32bit(vcpu));
|
||||
return 1;
|
||||
}
|
||||
|
||||
exit_handler = kvm_get_exit_handler(vcpu);
|
||||
|
||||
return exit_handler(vcpu, run);
|
||||
default:
|
||||
kvm_pr_unimpl("Unsupported exception type: %d",
|
||||
exception_index);
|
||||
run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
|
||||
return 0;
|
||||
}
|
||||
}
|
120
arch/arm64/kvm/hyp-init.S
Normal file
120
arch/arm64/kvm/hyp-init.S
Normal file
|
@ -0,0 +1,120 @@
|
|||
/*
|
||||
* Copyright (C) 2012,2013 - ARM Ltd
|
||||
* Author: Marc Zyngier <marc.zyngier@arm.com>
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify
|
||||
* it under the terms of the GNU General Public License, version 2, as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
* GNU General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License
|
||||
* along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
*/
|
||||
|
||||
#include <linux/linkage.h>
|
||||
|
||||
#include <asm/assembler.h>
|
||||
#include <asm/kvm_arm.h>
|
||||
#include <asm/kvm_mmu.h>
|
||||
|
||||
.text
|
||||
.pushsection .hyp.idmap.text, "ax"
|
||||
|
||||
.align 11
|
||||
|
||||
ENTRY(__kvm_hyp_init)
|
||||
ventry __invalid // Synchronous EL2t
|
||||
ventry __invalid // IRQ EL2t
|
||||
ventry __invalid // FIQ EL2t
|
||||
ventry __invalid // Error EL2t
|
||||
|
||||
ventry __invalid // Synchronous EL2h
|
||||
ventry __invalid // IRQ EL2h
|
||||
ventry __invalid // FIQ EL2h
|
||||
ventry __invalid // Error EL2h
|
||||
|
||||
ventry __do_hyp_init // Synchronous 64-bit EL1
|
||||
ventry __invalid // IRQ 64-bit EL1
|
||||
ventry __invalid // FIQ 64-bit EL1
|
||||
ventry __invalid // Error 64-bit EL1
|
||||
|
||||
ventry __invalid // Synchronous 32-bit EL1
|
||||
ventry __invalid // IRQ 32-bit EL1
|
||||
ventry __invalid // FIQ 32-bit EL1
|
||||
ventry __invalid // Error 32-bit EL1
|
||||
|
||||
__invalid:
|
||||
b .
|
||||
|
||||
/*
|
||||
* x0: HYP boot pgd
|
||||
* x1: HYP pgd
|
||||
* x2: HYP stack
|
||||
* x3: HYP vectors
|
||||
*/
|
||||
__do_hyp_init:
|
||||
|
||||
msr ttbr0_el2, x0
|
||||
|
||||
mrs x4, tcr_el1
|
||||
ldr x5, =TCR_EL2_MASK
|
||||
and x4, x4, x5
|
||||
ldr x5, =TCR_EL2_FLAGS
|
||||
orr x4, x4, x5
|
||||
msr tcr_el2, x4
|
||||
|
||||
ldr x4, =VTCR_EL2_FLAGS
|
||||
/*
|
||||
* Read the PARange bits from ID_AA64MMFR0_EL1 and set the PS bits in
|
||||
* VTCR_EL2.
|
||||
*/
|
||||
mrs x5, ID_AA64MMFR0_EL1
|
||||
bfi x4, x5, #16, #3
|
||||
msr vtcr_el2, x4
|
||||
|
||||
mrs x4, mair_el1
|
||||
msr mair_el2, x4
|
||||
isb
|
||||
|
||||
/* Invalidate the stale TLBs from Bootloader */
|
||||
tlbi alle2
|
||||
dsb sy
|
||||
|
||||
mrs x4, sctlr_el2
|
||||
and x4, x4, #SCTLR_EL2_EE // preserve endianness of EL2
|
||||
ldr x5, =SCTLR_EL2_FLAGS
|
||||
orr x4, x4, x5
|
||||
msr sctlr_el2, x4
|
||||
isb
|
||||
|
||||
/* MMU is now enabled. Get ready for the trampoline dance */
|
||||
ldr x4, =TRAMPOLINE_VA
|
||||
adr x5, target
|
||||
bfi x4, x5, #0, #PAGE_SHIFT
|
||||
br x4
|
||||
|
||||
target: /* We're now in the trampoline code, switch page tables */
|
||||
msr ttbr0_el2, x1
|
||||
isb
|
||||
|
||||
/* Invalidate the old TLBs */
|
||||
tlbi alle2
|
||||
dsb sy
|
||||
|
||||
/* Set the stack and new vectors */
|
||||
kern_hyp_va x2
|
||||
mov sp, x2
|
||||
kern_hyp_va x3
|
||||
msr vbar_el2, x3
|
||||
|
||||
/* Hello, World! */
|
||||
eret
|
||||
ENDPROC(__kvm_hyp_init)
|
||||
|
||||
.ltorg
|
||||
|
||||
.popsection
|
1275
arch/arm64/kvm/hyp.S
Normal file
1275
arch/arm64/kvm/hyp.S
Normal file
File diff suppressed because it is too large
Load diff
203
arch/arm64/kvm/inject_fault.c
Normal file
203
arch/arm64/kvm/inject_fault.c
Normal file
|
@ -0,0 +1,203 @@
|
|||
/*
|
||||
* Fault injection for both 32 and 64bit guests.
|
||||
*
|
||||
* Copyright (C) 2012,2013 - ARM Ltd
|
||||
* Author: Marc Zyngier <marc.zyngier@arm.com>
|
||||
*
|
||||
* Based on arch/arm/kvm/emulate.c
|
||||
* Copyright (C) 2012 - Virtual Open Systems and Columbia University
|
||||
* Author: Christoffer Dall <c.dall@virtualopensystems.com>
|
||||
*
|
||||
* This program is free software: you can redistribute it and/or modify
|
||||
* it under the terms of the GNU General Public License version 2 as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
* GNU General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License
|
||||
* along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
*/
|
||||
|
||||
#include <linux/kvm_host.h>
|
||||
#include <asm/kvm_emulate.h>
|
||||
#include <asm/esr.h>
|
||||
|
||||
#define PSTATE_FAULT_BITS_64 (PSR_MODE_EL1h | PSR_A_BIT | PSR_F_BIT | \
|
||||
PSR_I_BIT | PSR_D_BIT)
|
||||
#define EL1_EXCEPT_SYNC_OFFSET 0x200
|
||||
|
||||
static void prepare_fault32(struct kvm_vcpu *vcpu, u32 mode, u32 vect_offset)
|
||||
{
|
||||
unsigned long cpsr;
|
||||
unsigned long new_spsr_value = *vcpu_cpsr(vcpu);
|
||||
bool is_thumb = (new_spsr_value & COMPAT_PSR_T_BIT);
|
||||
u32 return_offset = (is_thumb) ? 4 : 0;
|
||||
u32 sctlr = vcpu_cp15(vcpu, c1_SCTLR);
|
||||
|
||||
cpsr = mode | COMPAT_PSR_I_BIT;
|
||||
|
||||
if (sctlr & (1 << 30))
|
||||
cpsr |= COMPAT_PSR_T_BIT;
|
||||
if (sctlr & (1 << 25))
|
||||
cpsr |= COMPAT_PSR_E_BIT;
|
||||
|
||||
*vcpu_cpsr(vcpu) = cpsr;
|
||||
|
||||
/* Note: These now point to the banked copies */
|
||||
*vcpu_spsr(vcpu) = new_spsr_value;
|
||||
*vcpu_reg(vcpu, 14) = *vcpu_pc(vcpu) + return_offset;
|
||||
|
||||
/* Branch to exception vector */
|
||||
if (sctlr & (1 << 13))
|
||||
vect_offset += 0xffff0000;
|
||||
else /* always have security exceptions */
|
||||
vect_offset += vcpu_cp15(vcpu, c12_VBAR);
|
||||
|
||||
*vcpu_pc(vcpu) = vect_offset;
|
||||
}
|
||||
|
||||
static void inject_undef32(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
prepare_fault32(vcpu, COMPAT_PSR_MODE_UND, 4);
|
||||
}
|
||||
|
||||
/*
|
||||
* Modelled after TakeDataAbortException() and TakePrefetchAbortException
|
||||
* pseudocode.
|
||||
*/
|
||||
static void inject_abt32(struct kvm_vcpu *vcpu, bool is_pabt,
|
||||
unsigned long addr)
|
||||
{
|
||||
u32 vect_offset;
|
||||
u32 *far, *fsr;
|
||||
bool is_lpae;
|
||||
|
||||
if (is_pabt) {
|
||||
vect_offset = 12;
|
||||
far = &vcpu_cp15(vcpu, c6_IFAR);
|
||||
fsr = &vcpu_cp15(vcpu, c5_IFSR);
|
||||
} else { /* !iabt */
|
||||
vect_offset = 16;
|
||||
far = &vcpu_cp15(vcpu, c6_DFAR);
|
||||
fsr = &vcpu_cp15(vcpu, c5_DFSR);
|
||||
}
|
||||
|
||||
prepare_fault32(vcpu, COMPAT_PSR_MODE_ABT | COMPAT_PSR_A_BIT, vect_offset);
|
||||
|
||||
*far = addr;
|
||||
|
||||
/* Give the guest an IMPLEMENTATION DEFINED exception */
|
||||
is_lpae = (vcpu_cp15(vcpu, c2_TTBCR) >> 31);
|
||||
if (is_lpae)
|
||||
*fsr = 1 << 9 | 0x34;
|
||||
else
|
||||
*fsr = 0x14;
|
||||
}
|
||||
|
||||
static void inject_abt64(struct kvm_vcpu *vcpu, bool is_iabt, unsigned long addr)
|
||||
{
|
||||
unsigned long cpsr = *vcpu_cpsr(vcpu);
|
||||
bool is_aarch32;
|
||||
u32 esr = 0;
|
||||
|
||||
is_aarch32 = vcpu_mode_is_32bit(vcpu);
|
||||
|
||||
*vcpu_spsr(vcpu) = cpsr;
|
||||
*vcpu_elr_el1(vcpu) = *vcpu_pc(vcpu);
|
||||
|
||||
*vcpu_cpsr(vcpu) = PSTATE_FAULT_BITS_64;
|
||||
*vcpu_pc(vcpu) = vcpu_sys_reg(vcpu, VBAR_EL1) + EL1_EXCEPT_SYNC_OFFSET;
|
||||
|
||||
vcpu_sys_reg(vcpu, FAR_EL1) = addr;
|
||||
|
||||
/*
|
||||
* Build an {i,d}abort, depending on the level and the
|
||||
* instruction set. Report an external synchronous abort.
|
||||
*/
|
||||
if (kvm_vcpu_trap_il_is32bit(vcpu))
|
||||
esr |= ESR_EL1_IL;
|
||||
|
||||
/*
|
||||
* Here, the guest runs in AArch64 mode when in EL1. If we get
|
||||
* an AArch32 fault, it means we managed to trap an EL0 fault.
|
||||
*/
|
||||
if (is_aarch32 || (cpsr & PSR_MODE_MASK) == PSR_MODE_EL0t)
|
||||
esr |= (ESR_EL1_EC_IABT_EL0 << ESR_EL1_EC_SHIFT);
|
||||
else
|
||||
esr |= (ESR_EL1_EC_IABT_EL1 << ESR_EL1_EC_SHIFT);
|
||||
|
||||
if (!is_iabt)
|
||||
esr |= ESR_EL1_EC_DABT_EL0;
|
||||
|
||||
vcpu_sys_reg(vcpu, ESR_EL1) = esr | ESR_EL2_EC_xABT_xFSR_EXTABT;
|
||||
}
|
||||
|
||||
static void inject_undef64(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
unsigned long cpsr = *vcpu_cpsr(vcpu);
|
||||
u32 esr = (ESR_EL1_EC_UNKNOWN << ESR_EL1_EC_SHIFT);
|
||||
|
||||
*vcpu_spsr(vcpu) = cpsr;
|
||||
*vcpu_elr_el1(vcpu) = *vcpu_pc(vcpu);
|
||||
|
||||
*vcpu_cpsr(vcpu) = PSTATE_FAULT_BITS_64;
|
||||
*vcpu_pc(vcpu) = vcpu_sys_reg(vcpu, VBAR_EL1) + EL1_EXCEPT_SYNC_OFFSET;
|
||||
|
||||
/*
|
||||
* Build an unknown exception, depending on the instruction
|
||||
* set.
|
||||
*/
|
||||
if (kvm_vcpu_trap_il_is32bit(vcpu))
|
||||
esr |= ESR_EL1_IL;
|
||||
|
||||
vcpu_sys_reg(vcpu, ESR_EL1) = esr;
|
||||
}
|
||||
|
||||
/**
|
||||
* kvm_inject_dabt - inject a data abort into the guest
|
||||
* @vcpu: The VCPU to receive the undefined exception
|
||||
* @addr: The address to report in the DFAR
|
||||
*
|
||||
* It is assumed that this code is called from the VCPU thread and that the
|
||||
* VCPU therefore is not currently executing guest code.
|
||||
*/
|
||||
void kvm_inject_dabt(struct kvm_vcpu *vcpu, unsigned long addr)
|
||||
{
|
||||
if (!(vcpu->arch.hcr_el2 & HCR_RW))
|
||||
inject_abt32(vcpu, false, addr);
|
||||
|
||||
inject_abt64(vcpu, false, addr);
|
||||
}
|
||||
|
||||
/**
|
||||
* kvm_inject_pabt - inject a prefetch abort into the guest
|
||||
* @vcpu: The VCPU to receive the undefined exception
|
||||
* @addr: The address to report in the DFAR
|
||||
*
|
||||
* It is assumed that this code is called from the VCPU thread and that the
|
||||
* VCPU therefore is not currently executing guest code.
|
||||
*/
|
||||
void kvm_inject_pabt(struct kvm_vcpu *vcpu, unsigned long addr)
|
||||
{
|
||||
if (!(vcpu->arch.hcr_el2 & HCR_RW))
|
||||
inject_abt32(vcpu, true, addr);
|
||||
|
||||
inject_abt64(vcpu, true, addr);
|
||||
}
|
||||
|
||||
/**
|
||||
* kvm_inject_undefined - inject an undefined instruction into the guest
|
||||
*
|
||||
* It is assumed that this code is called from the VCPU thread and that the
|
||||
* VCPU therefore is not currently executing guest code.
|
||||
*/
|
||||
void kvm_inject_undefined(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
if (!(vcpu->arch.hcr_el2 & HCR_RW))
|
||||
inject_undef32(vcpu);
|
||||
|
||||
inject_undef64(vcpu);
|
||||
}
|
168
arch/arm64/kvm/regmap.c
Normal file
168
arch/arm64/kvm/regmap.c
Normal file
|
@ -0,0 +1,168 @@
|
|||
/*
|
||||
* Copyright (C) 2012,2013 - ARM Ltd
|
||||
* Author: Marc Zyngier <marc.zyngier@arm.com>
|
||||
*
|
||||
* Derived from arch/arm/kvm/emulate.c:
|
||||
* Copyright (C) 2012 - Virtual Open Systems and Columbia University
|
||||
* Author: Christoffer Dall <c.dall@virtualopensystems.com>
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify
|
||||
* it under the terms of the GNU General Public License version 2 as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
* GNU General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License
|
||||
* along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
*/
|
||||
|
||||
#include <linux/mm.h>
|
||||
#include <linux/kvm_host.h>
|
||||
#include <asm/kvm_emulate.h>
|
||||
#include <asm/ptrace.h>
|
||||
|
||||
#define VCPU_NR_MODES 6
|
||||
#define REG_OFFSET(_reg) \
|
||||
(offsetof(struct user_pt_regs, _reg) / sizeof(unsigned long))
|
||||
|
||||
#define USR_REG_OFFSET(R) REG_OFFSET(compat_usr(R))
|
||||
|
||||
static const unsigned long vcpu_reg_offsets[VCPU_NR_MODES][16] = {
|
||||
/* USR Registers */
|
||||
{
|
||||
USR_REG_OFFSET(0), USR_REG_OFFSET(1), USR_REG_OFFSET(2),
|
||||
USR_REG_OFFSET(3), USR_REG_OFFSET(4), USR_REG_OFFSET(5),
|
||||
USR_REG_OFFSET(6), USR_REG_OFFSET(7), USR_REG_OFFSET(8),
|
||||
USR_REG_OFFSET(9), USR_REG_OFFSET(10), USR_REG_OFFSET(11),
|
||||
USR_REG_OFFSET(12), USR_REG_OFFSET(13), USR_REG_OFFSET(14),
|
||||
REG_OFFSET(pc)
|
||||
},
|
||||
|
||||
/* FIQ Registers */
|
||||
{
|
||||
USR_REG_OFFSET(0), USR_REG_OFFSET(1), USR_REG_OFFSET(2),
|
||||
USR_REG_OFFSET(3), USR_REG_OFFSET(4), USR_REG_OFFSET(5),
|
||||
USR_REG_OFFSET(6), USR_REG_OFFSET(7),
|
||||
REG_OFFSET(compat_r8_fiq), /* r8 */
|
||||
REG_OFFSET(compat_r9_fiq), /* r9 */
|
||||
REG_OFFSET(compat_r10_fiq), /* r10 */
|
||||
REG_OFFSET(compat_r11_fiq), /* r11 */
|
||||
REG_OFFSET(compat_r12_fiq), /* r12 */
|
||||
REG_OFFSET(compat_sp_fiq), /* r13 */
|
||||
REG_OFFSET(compat_lr_fiq), /* r14 */
|
||||
REG_OFFSET(pc)
|
||||
},
|
||||
|
||||
/* IRQ Registers */
|
||||
{
|
||||
USR_REG_OFFSET(0), USR_REG_OFFSET(1), USR_REG_OFFSET(2),
|
||||
USR_REG_OFFSET(3), USR_REG_OFFSET(4), USR_REG_OFFSET(5),
|
||||
USR_REG_OFFSET(6), USR_REG_OFFSET(7), USR_REG_OFFSET(8),
|
||||
USR_REG_OFFSET(9), USR_REG_OFFSET(10), USR_REG_OFFSET(11),
|
||||
USR_REG_OFFSET(12),
|
||||
REG_OFFSET(compat_sp_irq), /* r13 */
|
||||
REG_OFFSET(compat_lr_irq), /* r14 */
|
||||
REG_OFFSET(pc)
|
||||
},
|
||||
|
||||
/* SVC Registers */
|
||||
{
|
||||
USR_REG_OFFSET(0), USR_REG_OFFSET(1), USR_REG_OFFSET(2),
|
||||
USR_REG_OFFSET(3), USR_REG_OFFSET(4), USR_REG_OFFSET(5),
|
||||
USR_REG_OFFSET(6), USR_REG_OFFSET(7), USR_REG_OFFSET(8),
|
||||
USR_REG_OFFSET(9), USR_REG_OFFSET(10), USR_REG_OFFSET(11),
|
||||
USR_REG_OFFSET(12),
|
||||
REG_OFFSET(compat_sp_svc), /* r13 */
|
||||
REG_OFFSET(compat_lr_svc), /* r14 */
|
||||
REG_OFFSET(pc)
|
||||
},
|
||||
|
||||
/* ABT Registers */
|
||||
{
|
||||
USR_REG_OFFSET(0), USR_REG_OFFSET(1), USR_REG_OFFSET(2),
|
||||
USR_REG_OFFSET(3), USR_REG_OFFSET(4), USR_REG_OFFSET(5),
|
||||
USR_REG_OFFSET(6), USR_REG_OFFSET(7), USR_REG_OFFSET(8),
|
||||
USR_REG_OFFSET(9), USR_REG_OFFSET(10), USR_REG_OFFSET(11),
|
||||
USR_REG_OFFSET(12),
|
||||
REG_OFFSET(compat_sp_abt), /* r13 */
|
||||
REG_OFFSET(compat_lr_abt), /* r14 */
|
||||
REG_OFFSET(pc)
|
||||
},
|
||||
|
||||
/* UND Registers */
|
||||
{
|
||||
USR_REG_OFFSET(0), USR_REG_OFFSET(1), USR_REG_OFFSET(2),
|
||||
USR_REG_OFFSET(3), USR_REG_OFFSET(4), USR_REG_OFFSET(5),
|
||||
USR_REG_OFFSET(6), USR_REG_OFFSET(7), USR_REG_OFFSET(8),
|
||||
USR_REG_OFFSET(9), USR_REG_OFFSET(10), USR_REG_OFFSET(11),
|
||||
USR_REG_OFFSET(12),
|
||||
REG_OFFSET(compat_sp_und), /* r13 */
|
||||
REG_OFFSET(compat_lr_und), /* r14 */
|
||||
REG_OFFSET(pc)
|
||||
},
|
||||
};
|
||||
|
||||
/*
|
||||
* Return a pointer to the register number valid in the current mode of
|
||||
* the virtual CPU.
|
||||
*/
|
||||
unsigned long *vcpu_reg32(const struct kvm_vcpu *vcpu, u8 reg_num)
|
||||
{
|
||||
unsigned long *reg_array = (unsigned long *)&vcpu->arch.ctxt.gp_regs.regs;
|
||||
unsigned long mode = *vcpu_cpsr(vcpu) & COMPAT_PSR_MODE_MASK;
|
||||
|
||||
switch (mode) {
|
||||
case COMPAT_PSR_MODE_USR ... COMPAT_PSR_MODE_SVC:
|
||||
mode &= ~PSR_MODE32_BIT; /* 0 ... 3 */
|
||||
break;
|
||||
|
||||
case COMPAT_PSR_MODE_ABT:
|
||||
mode = 4;
|
||||
break;
|
||||
|
||||
case COMPAT_PSR_MODE_UND:
|
||||
mode = 5;
|
||||
break;
|
||||
|
||||
case COMPAT_PSR_MODE_SYS:
|
||||
mode = 0; /* SYS maps to USR */
|
||||
break;
|
||||
|
||||
default:
|
||||
BUG();
|
||||
}
|
||||
|
||||
return reg_array + vcpu_reg_offsets[mode][reg_num];
|
||||
}
|
||||
|
||||
/*
|
||||
* Return the SPSR for the current mode of the virtual CPU.
|
||||
*/
|
||||
unsigned long *vcpu_spsr32(const struct kvm_vcpu *vcpu)
|
||||
{
|
||||
unsigned long mode = *vcpu_cpsr(vcpu) & COMPAT_PSR_MODE_MASK;
|
||||
switch (mode) {
|
||||
case COMPAT_PSR_MODE_SVC:
|
||||
mode = KVM_SPSR_SVC;
|
||||
break;
|
||||
case COMPAT_PSR_MODE_ABT:
|
||||
mode = KVM_SPSR_ABT;
|
||||
break;
|
||||
case COMPAT_PSR_MODE_UND:
|
||||
mode = KVM_SPSR_UND;
|
||||
break;
|
||||
case COMPAT_PSR_MODE_IRQ:
|
||||
mode = KVM_SPSR_IRQ;
|
||||
break;
|
||||
case COMPAT_PSR_MODE_FIQ:
|
||||
mode = KVM_SPSR_FIQ;
|
||||
break;
|
||||
default:
|
||||
BUG();
|
||||
}
|
||||
|
||||
return (unsigned long *)&vcpu_gp_regs(vcpu)->spsr[mode];
|
||||
}
|
111
arch/arm64/kvm/reset.c
Normal file
111
arch/arm64/kvm/reset.c
Normal file
|
@ -0,0 +1,111 @@
|
|||
/*
|
||||
* Copyright (C) 2012,2013 - ARM Ltd
|
||||
* Author: Marc Zyngier <marc.zyngier@arm.com>
|
||||
*
|
||||
* Derived from arch/arm/kvm/reset.c
|
||||
* Copyright (C) 2012 - Virtual Open Systems and Columbia University
|
||||
* Author: Christoffer Dall <c.dall@virtualopensystems.com>
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify
|
||||
* it under the terms of the GNU General Public License, version 2, as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
* GNU General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License
|
||||
* along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
*/
|
||||
|
||||
#include <linux/errno.h>
|
||||
#include <linux/kvm_host.h>
|
||||
#include <linux/kvm.h>
|
||||
|
||||
#include <kvm/arm_arch_timer.h>
|
||||
|
||||
#include <asm/cputype.h>
|
||||
#include <asm/ptrace.h>
|
||||
#include <asm/kvm_arm.h>
|
||||
#include <asm/kvm_coproc.h>
|
||||
|
||||
/*
|
||||
* ARMv8 Reset Values
|
||||
*/
|
||||
static const struct kvm_regs default_regs_reset = {
|
||||
.regs.pstate = (PSR_MODE_EL1h | PSR_A_BIT | PSR_I_BIT |
|
||||
PSR_F_BIT | PSR_D_BIT),
|
||||
};
|
||||
|
||||
static const struct kvm_regs default_regs_reset32 = {
|
||||
.regs.pstate = (COMPAT_PSR_MODE_SVC | COMPAT_PSR_A_BIT |
|
||||
COMPAT_PSR_I_BIT | COMPAT_PSR_F_BIT),
|
||||
};
|
||||
|
||||
static const struct kvm_irq_level default_vtimer_irq = {
|
||||
.irq = 27,
|
||||
.level = 1,
|
||||
};
|
||||
|
||||
static bool cpu_has_32bit_el1(void)
|
||||
{
|
||||
u64 pfr0;
|
||||
|
||||
pfr0 = read_cpuid(ID_AA64PFR0_EL1);
|
||||
return !!(pfr0 & 0x20);
|
||||
}
|
||||
|
||||
int kvm_arch_dev_ioctl_check_extension(long ext)
|
||||
{
|
||||
int r;
|
||||
|
||||
switch (ext) {
|
||||
case KVM_CAP_ARM_EL1_32BIT:
|
||||
r = cpu_has_32bit_el1();
|
||||
break;
|
||||
default:
|
||||
r = 0;
|
||||
}
|
||||
|
||||
return r;
|
||||
}
|
||||
|
||||
/**
|
||||
* kvm_reset_vcpu - sets core registers and sys_regs to reset value
|
||||
* @vcpu: The VCPU pointer
|
||||
*
|
||||
* This function finds the right table above and sets the registers on
|
||||
* the virtual CPU struct to their architectually defined reset
|
||||
* values.
|
||||
*/
|
||||
int kvm_reset_vcpu(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
const struct kvm_irq_level *cpu_vtimer_irq;
|
||||
const struct kvm_regs *cpu_reset;
|
||||
|
||||
switch (vcpu->arch.target) {
|
||||
default:
|
||||
if (test_bit(KVM_ARM_VCPU_EL1_32BIT, vcpu->arch.features)) {
|
||||
if (!cpu_has_32bit_el1())
|
||||
return -EINVAL;
|
||||
cpu_reset = &default_regs_reset32;
|
||||
} else {
|
||||
cpu_reset = &default_regs_reset;
|
||||
}
|
||||
|
||||
cpu_vtimer_irq = &default_vtimer_irq;
|
||||
break;
|
||||
}
|
||||
|
||||
/* Reset core registers */
|
||||
memcpy(vcpu_gp_regs(vcpu), cpu_reset, sizeof(*cpu_reset));
|
||||
|
||||
/* Reset system registers */
|
||||
kvm_reset_sys_regs(vcpu);
|
||||
|
||||
/* Reset timer */
|
||||
kvm_timer_vcpu_reset(vcpu, cpu_vtimer_irq);
|
||||
|
||||
return 0;
|
||||
}
|
1537
arch/arm64/kvm/sys_regs.c
Normal file
1537
arch/arm64/kvm/sys_regs.c
Normal file
File diff suppressed because it is too large
Load diff
140
arch/arm64/kvm/sys_regs.h
Normal file
140
arch/arm64/kvm/sys_regs.h
Normal file
|
@ -0,0 +1,140 @@
|
|||
/*
|
||||
* Copyright (C) 2012,2013 - ARM Ltd
|
||||
* Author: Marc Zyngier <marc.zyngier@arm.com>
|
||||
*
|
||||
* Derived from arch/arm/kvm/coproc.h
|
||||
* Copyright (C) 2012 - Virtual Open Systems and Columbia University
|
||||
* Authors: Christoffer Dall <c.dall@virtualopensystems.com>
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify
|
||||
* it under the terms of the GNU General Public License, version 2, as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
* GNU General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License
|
||||
* along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
*/
|
||||
|
||||
#ifndef __ARM64_KVM_SYS_REGS_LOCAL_H__
|
||||
#define __ARM64_KVM_SYS_REGS_LOCAL_H__
|
||||
|
||||
struct sys_reg_params {
|
||||
u8 Op0;
|
||||
u8 Op1;
|
||||
u8 CRn;
|
||||
u8 CRm;
|
||||
u8 Op2;
|
||||
u8 Rt;
|
||||
bool is_write;
|
||||
bool is_aarch32;
|
||||
bool is_32bit; /* Only valid if is_aarch32 is true */
|
||||
};
|
||||
|
||||
struct sys_reg_desc {
|
||||
/* MRS/MSR instruction which accesses it. */
|
||||
u8 Op0;
|
||||
u8 Op1;
|
||||
u8 CRn;
|
||||
u8 CRm;
|
||||
u8 Op2;
|
||||
|
||||
/* Trapped access from guest, if non-NULL. */
|
||||
bool (*access)(struct kvm_vcpu *,
|
||||
const struct sys_reg_params *,
|
||||
const struct sys_reg_desc *);
|
||||
|
||||
/* Initialization for vcpu. */
|
||||
void (*reset)(struct kvm_vcpu *, const struct sys_reg_desc *);
|
||||
|
||||
/* Index into sys_reg[], or 0 if we don't need to save it. */
|
||||
int reg;
|
||||
|
||||
/* Value (usually reset value) */
|
||||
u64 val;
|
||||
};
|
||||
|
||||
static inline void print_sys_reg_instr(const struct sys_reg_params *p)
|
||||
{
|
||||
/* Look, we even formatted it for you to paste into the table! */
|
||||
kvm_pr_unimpl(" { Op0(%2u), Op1(%2u), CRn(%2u), CRm(%2u), Op2(%2u), func_%s },\n",
|
||||
p->Op0, p->Op1, p->CRn, p->CRm, p->Op2, p->is_write ? "write" : "read");
|
||||
}
|
||||
|
||||
static inline bool ignore_write(struct kvm_vcpu *vcpu,
|
||||
const struct sys_reg_params *p)
|
||||
{
|
||||
return true;
|
||||
}
|
||||
|
||||
static inline bool read_zero(struct kvm_vcpu *vcpu,
|
||||
const struct sys_reg_params *p)
|
||||
{
|
||||
*vcpu_reg(vcpu, p->Rt) = 0;
|
||||
return true;
|
||||
}
|
||||
|
||||
static inline bool write_to_read_only(struct kvm_vcpu *vcpu,
|
||||
const struct sys_reg_params *params)
|
||||
{
|
||||
kvm_debug("sys_reg write to read-only register at: %lx\n",
|
||||
*vcpu_pc(vcpu));
|
||||
print_sys_reg_instr(params);
|
||||
return false;
|
||||
}
|
||||
|
||||
static inline bool read_from_write_only(struct kvm_vcpu *vcpu,
|
||||
const struct sys_reg_params *params)
|
||||
{
|
||||
kvm_debug("sys_reg read to write-only register at: %lx\n",
|
||||
*vcpu_pc(vcpu));
|
||||
print_sys_reg_instr(params);
|
||||
return false;
|
||||
}
|
||||
|
||||
/* Reset functions */
|
||||
static inline void reset_unknown(struct kvm_vcpu *vcpu,
|
||||
const struct sys_reg_desc *r)
|
||||
{
|
||||
BUG_ON(!r->reg);
|
||||
BUG_ON(r->reg >= NR_SYS_REGS);
|
||||
vcpu_sys_reg(vcpu, r->reg) = 0x1de7ec7edbadc0deULL;
|
||||
}
|
||||
|
||||
static inline void reset_val(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r)
|
||||
{
|
||||
BUG_ON(!r->reg);
|
||||
BUG_ON(r->reg >= NR_SYS_REGS);
|
||||
vcpu_sys_reg(vcpu, r->reg) = r->val;
|
||||
}
|
||||
|
||||
static inline int cmp_sys_reg(const struct sys_reg_desc *i1,
|
||||
const struct sys_reg_desc *i2)
|
||||
{
|
||||
BUG_ON(i1 == i2);
|
||||
if (!i1)
|
||||
return 1;
|
||||
else if (!i2)
|
||||
return -1;
|
||||
if (i1->Op0 != i2->Op0)
|
||||
return i1->Op0 - i2->Op0;
|
||||
if (i1->Op1 != i2->Op1)
|
||||
return i1->Op1 - i2->Op1;
|
||||
if (i1->CRn != i2->CRn)
|
||||
return i1->CRn - i2->CRn;
|
||||
if (i1->CRm != i2->CRm)
|
||||
return i1->CRm - i2->CRm;
|
||||
return i1->Op2 - i2->Op2;
|
||||
}
|
||||
|
||||
|
||||
#define Op0(_x) .Op0 = _x
|
||||
#define Op1(_x) .Op1 = _x
|
||||
#define CRn(_x) .CRn = _x
|
||||
#define CRm(_x) .CRm = _x
|
||||
#define Op2(_x) .Op2 = _x
|
||||
|
||||
#endif /* __ARM64_KVM_SYS_REGS_LOCAL_H__ */
|
100
arch/arm64/kvm/sys_regs_generic_v8.c
Normal file
100
arch/arm64/kvm/sys_regs_generic_v8.c
Normal file
|
@ -0,0 +1,100 @@
|
|||
/*
|
||||
* Copyright (C) 2012,2013 - ARM Ltd
|
||||
* Author: Marc Zyngier <marc.zyngier@arm.com>
|
||||
*
|
||||
* Based on arch/arm/kvm/coproc_a15.c:
|
||||
* Copyright (C) 2012 - Virtual Open Systems and Columbia University
|
||||
* Authors: Rusty Russell <rusty@rustcorp.au>
|
||||
* Christoffer Dall <c.dall@virtualopensystems.com>
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify
|
||||
* it under the terms of the GNU General Public License, version 2, as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
* GNU General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License
|
||||
* along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
*/
|
||||
#include <linux/kvm_host.h>
|
||||
#include <asm/cputype.h>
|
||||
#include <asm/kvm_arm.h>
|
||||
#include <asm/kvm_asm.h>
|
||||
#include <asm/kvm_host.h>
|
||||
#include <asm/kvm_emulate.h>
|
||||
#include <asm/kvm_coproc.h>
|
||||
#include <linux/init.h>
|
||||
|
||||
#include "sys_regs.h"
|
||||
|
||||
static bool access_actlr(struct kvm_vcpu *vcpu,
|
||||
const struct sys_reg_params *p,
|
||||
const struct sys_reg_desc *r)
|
||||
{
|
||||
if (p->is_write)
|
||||
return ignore_write(vcpu, p);
|
||||
|
||||
*vcpu_reg(vcpu, p->Rt) = vcpu_sys_reg(vcpu, ACTLR_EL1);
|
||||
return true;
|
||||
}
|
||||
|
||||
static void reset_actlr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r)
|
||||
{
|
||||
u64 actlr;
|
||||
|
||||
asm volatile("mrs %0, actlr_el1\n" : "=r" (actlr));
|
||||
vcpu_sys_reg(vcpu, ACTLR_EL1) = actlr;
|
||||
}
|
||||
|
||||
/*
|
||||
* Implementation specific sys-reg registers.
|
||||
* Important: Must be sorted ascending by Op0, Op1, CRn, CRm, Op2
|
||||
*/
|
||||
static const struct sys_reg_desc genericv8_sys_regs[] = {
|
||||
/* ACTLR_EL1 */
|
||||
{ Op0(0b11), Op1(0b000), CRn(0b0001), CRm(0b0000), Op2(0b001),
|
||||
access_actlr, reset_actlr, ACTLR_EL1 },
|
||||
};
|
||||
|
||||
static const struct sys_reg_desc genericv8_cp15_regs[] = {
|
||||
/* ACTLR */
|
||||
{ Op1(0b000), CRn(0b0001), CRm(0b0000), Op2(0b001),
|
||||
access_actlr },
|
||||
};
|
||||
|
||||
static struct kvm_sys_reg_target_table genericv8_target_table = {
|
||||
.table64 = {
|
||||
.table = genericv8_sys_regs,
|
||||
.num = ARRAY_SIZE(genericv8_sys_regs),
|
||||
},
|
||||
.table32 = {
|
||||
.table = genericv8_cp15_regs,
|
||||
.num = ARRAY_SIZE(genericv8_cp15_regs),
|
||||
},
|
||||
};
|
||||
|
||||
static int __init sys_reg_genericv8_init(void)
|
||||
{
|
||||
unsigned int i;
|
||||
|
||||
for (i = 1; i < ARRAY_SIZE(genericv8_sys_regs); i++)
|
||||
BUG_ON(cmp_sys_reg(&genericv8_sys_regs[i-1],
|
||||
&genericv8_sys_regs[i]) >= 0);
|
||||
|
||||
kvm_register_target_sys_reg_table(KVM_ARM_TARGET_AEM_V8,
|
||||
&genericv8_target_table);
|
||||
kvm_register_target_sys_reg_table(KVM_ARM_TARGET_FOUNDATION_V8,
|
||||
&genericv8_target_table);
|
||||
kvm_register_target_sys_reg_table(KVM_ARM_TARGET_CORTEX_A53,
|
||||
&genericv8_target_table);
|
||||
kvm_register_target_sys_reg_table(KVM_ARM_TARGET_CORTEX_A57,
|
||||
&genericv8_target_table);
|
||||
kvm_register_target_sys_reg_table(KVM_ARM_TARGET_XGENE_POTENZA,
|
||||
&genericv8_target_table);
|
||||
|
||||
return 0;
|
||||
}
|
||||
late_initcall(sys_reg_genericv8_init);
|
137
arch/arm64/kvm/vgic-v2-switch.S
Normal file
137
arch/arm64/kvm/vgic-v2-switch.S
Normal file
|
@ -0,0 +1,137 @@
|
|||
/*
|
||||
* Copyright (C) 2012,2013 - ARM Ltd
|
||||
* Author: Marc Zyngier <marc.zyngier@arm.com>
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify
|
||||
* it under the terms of the GNU General Public License version 2 as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
* GNU General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License
|
||||
* along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
*/
|
||||
|
||||
#include <linux/linkage.h>
|
||||
#include <linux/irqchip/arm-gic.h>
|
||||
|
||||
#include <asm/assembler.h>
|
||||
#include <asm/memory.h>
|
||||
#include <asm/asm-offsets.h>
|
||||
#include <asm/kvm.h>
|
||||
#include <asm/kvm_asm.h>
|
||||
#include <asm/kvm_arm.h>
|
||||
#include <asm/kvm_mmu.h>
|
||||
|
||||
.text
|
||||
.pushsection .hyp.text, "ax"
|
||||
|
||||
/*
|
||||
* Save the VGIC CPU state into memory
|
||||
* x0: Register pointing to VCPU struct
|
||||
* Do not corrupt x1!!!
|
||||
*/
|
||||
ENTRY(__save_vgic_v2_state)
|
||||
__save_vgic_v2_state:
|
||||
/* Get VGIC VCTRL base into x2 */
|
||||
ldr x2, [x0, #VCPU_KVM]
|
||||
kern_hyp_va x2
|
||||
ldr x2, [x2, #KVM_VGIC_VCTRL]
|
||||
kern_hyp_va x2
|
||||
cbz x2, 2f // disabled
|
||||
|
||||
/* Compute the address of struct vgic_cpu */
|
||||
add x3, x0, #VCPU_VGIC_CPU
|
||||
|
||||
/* Save all interesting registers */
|
||||
ldr w4, [x2, #GICH_HCR]
|
||||
ldr w5, [x2, #GICH_VMCR]
|
||||
ldr w6, [x2, #GICH_MISR]
|
||||
ldr w7, [x2, #GICH_EISR0]
|
||||
ldr w8, [x2, #GICH_EISR1]
|
||||
ldr w9, [x2, #GICH_ELRSR0]
|
||||
ldr w10, [x2, #GICH_ELRSR1]
|
||||
ldr w11, [x2, #GICH_APR]
|
||||
CPU_BE( rev w4, w4 )
|
||||
CPU_BE( rev w5, w5 )
|
||||
CPU_BE( rev w6, w6 )
|
||||
CPU_BE( rev w7, w7 )
|
||||
CPU_BE( rev w8, w8 )
|
||||
CPU_BE( rev w9, w9 )
|
||||
CPU_BE( rev w10, w10 )
|
||||
CPU_BE( rev w11, w11 )
|
||||
|
||||
str w4, [x3, #VGIC_V2_CPU_HCR]
|
||||
str w5, [x3, #VGIC_V2_CPU_VMCR]
|
||||
str w6, [x3, #VGIC_V2_CPU_MISR]
|
||||
CPU_LE( str w7, [x3, #VGIC_V2_CPU_EISR] )
|
||||
CPU_LE( str w8, [x3, #(VGIC_V2_CPU_EISR + 4)] )
|
||||
CPU_LE( str w9, [x3, #VGIC_V2_CPU_ELRSR] )
|
||||
CPU_LE( str w10, [x3, #(VGIC_V2_CPU_ELRSR + 4)] )
|
||||
CPU_BE( str w7, [x3, #(VGIC_V2_CPU_EISR + 4)] )
|
||||
CPU_BE( str w8, [x3, #VGIC_V2_CPU_EISR] )
|
||||
CPU_BE( str w9, [x3, #(VGIC_V2_CPU_ELRSR + 4)] )
|
||||
CPU_BE( str w10, [x3, #VGIC_V2_CPU_ELRSR] )
|
||||
str w11, [x3, #VGIC_V2_CPU_APR]
|
||||
|
||||
/* Clear GICH_HCR */
|
||||
str wzr, [x2, #GICH_HCR]
|
||||
|
||||
/* Save list registers */
|
||||
add x2, x2, #GICH_LR0
|
||||
ldr w4, [x3, #VGIC_CPU_NR_LR]
|
||||
add x3, x3, #VGIC_V2_CPU_LR
|
||||
1: ldr w5, [x2], #4
|
||||
CPU_BE( rev w5, w5 )
|
||||
str w5, [x3], #4
|
||||
sub w4, w4, #1
|
||||
cbnz w4, 1b
|
||||
2:
|
||||
ret
|
||||
ENDPROC(__save_vgic_v2_state)
|
||||
|
||||
/*
|
||||
* Restore the VGIC CPU state from memory
|
||||
* x0: Register pointing to VCPU struct
|
||||
*/
|
||||
ENTRY(__restore_vgic_v2_state)
|
||||
__restore_vgic_v2_state:
|
||||
/* Get VGIC VCTRL base into x2 */
|
||||
ldr x2, [x0, #VCPU_KVM]
|
||||
kern_hyp_va x2
|
||||
ldr x2, [x2, #KVM_VGIC_VCTRL]
|
||||
kern_hyp_va x2
|
||||
cbz x2, 2f // disabled
|
||||
|
||||
/* Compute the address of struct vgic_cpu */
|
||||
add x3, x0, #VCPU_VGIC_CPU
|
||||
|
||||
/* We only restore a minimal set of registers */
|
||||
ldr w4, [x3, #VGIC_V2_CPU_HCR]
|
||||
ldr w5, [x3, #VGIC_V2_CPU_VMCR]
|
||||
ldr w6, [x3, #VGIC_V2_CPU_APR]
|
||||
CPU_BE( rev w4, w4 )
|
||||
CPU_BE( rev w5, w5 )
|
||||
CPU_BE( rev w6, w6 )
|
||||
|
||||
str w4, [x2, #GICH_HCR]
|
||||
str w5, [x2, #GICH_VMCR]
|
||||
str w6, [x2, #GICH_APR]
|
||||
|
||||
/* Restore list registers */
|
||||
add x2, x2, #GICH_LR0
|
||||
ldr w4, [x3, #VGIC_CPU_NR_LR]
|
||||
add x3, x3, #VGIC_V2_CPU_LR
|
||||
1: ldr w5, [x3], #4
|
||||
CPU_BE( rev w5, w5 )
|
||||
str w5, [x2], #4
|
||||
sub w4, w4, #1
|
||||
cbnz w4, 1b
|
||||
2:
|
||||
ret
|
||||
ENDPROC(__restore_vgic_v2_state)
|
||||
|
||||
.popsection
|
267
arch/arm64/kvm/vgic-v3-switch.S
Normal file
267
arch/arm64/kvm/vgic-v3-switch.S
Normal file
|
@ -0,0 +1,267 @@
|
|||
/*
|
||||
* Copyright (C) 2012,2013 - ARM Ltd
|
||||
* Author: Marc Zyngier <marc.zyngier@arm.com>
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify
|
||||
* it under the terms of the GNU General Public License version 2 as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
* GNU General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License
|
||||
* along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
*/
|
||||
|
||||
#include <linux/linkage.h>
|
||||
#include <linux/irqchip/arm-gic-v3.h>
|
||||
|
||||
#include <asm/assembler.h>
|
||||
#include <asm/memory.h>
|
||||
#include <asm/asm-offsets.h>
|
||||
#include <asm/kvm.h>
|
||||
#include <asm/kvm_asm.h>
|
||||
#include <asm/kvm_arm.h>
|
||||
|
||||
.text
|
||||
.pushsection .hyp.text, "ax"
|
||||
|
||||
/*
|
||||
* We store LRs in reverse order to let the CPU deal with streaming
|
||||
* access. Use this macro to make it look saner...
|
||||
*/
|
||||
#define LR_OFFSET(n) (VGIC_V3_CPU_LR + (15 - n) * 8)
|
||||
|
||||
/*
|
||||
* Save the VGIC CPU state into memory
|
||||
* x0: Register pointing to VCPU struct
|
||||
* Do not corrupt x1!!!
|
||||
*/
|
||||
.macro save_vgic_v3_state
|
||||
// Compute the address of struct vgic_cpu
|
||||
add x3, x0, #VCPU_VGIC_CPU
|
||||
|
||||
// Make sure stores to the GIC via the memory mapped interface
|
||||
// are now visible to the system register interface
|
||||
dsb st
|
||||
|
||||
// Save all interesting registers
|
||||
mrs_s x4, ICH_HCR_EL2
|
||||
mrs_s x5, ICH_VMCR_EL2
|
||||
mrs_s x6, ICH_MISR_EL2
|
||||
mrs_s x7, ICH_EISR_EL2
|
||||
mrs_s x8, ICH_ELSR_EL2
|
||||
|
||||
str w4, [x3, #VGIC_V3_CPU_HCR]
|
||||
str w5, [x3, #VGIC_V3_CPU_VMCR]
|
||||
str w6, [x3, #VGIC_V3_CPU_MISR]
|
||||
str w7, [x3, #VGIC_V3_CPU_EISR]
|
||||
str w8, [x3, #VGIC_V3_CPU_ELRSR]
|
||||
|
||||
msr_s ICH_HCR_EL2, xzr
|
||||
|
||||
mrs_s x21, ICH_VTR_EL2
|
||||
mvn w22, w21
|
||||
ubfiz w23, w22, 2, 4 // w23 = (15 - ListRegs) * 4
|
||||
|
||||
adr x24, 1f
|
||||
add x24, x24, x23
|
||||
br x24
|
||||
|
||||
1:
|
||||
mrs_s x20, ICH_LR15_EL2
|
||||
mrs_s x19, ICH_LR14_EL2
|
||||
mrs_s x18, ICH_LR13_EL2
|
||||
mrs_s x17, ICH_LR12_EL2
|
||||
mrs_s x16, ICH_LR11_EL2
|
||||
mrs_s x15, ICH_LR10_EL2
|
||||
mrs_s x14, ICH_LR9_EL2
|
||||
mrs_s x13, ICH_LR8_EL2
|
||||
mrs_s x12, ICH_LR7_EL2
|
||||
mrs_s x11, ICH_LR6_EL2
|
||||
mrs_s x10, ICH_LR5_EL2
|
||||
mrs_s x9, ICH_LR4_EL2
|
||||
mrs_s x8, ICH_LR3_EL2
|
||||
mrs_s x7, ICH_LR2_EL2
|
||||
mrs_s x6, ICH_LR1_EL2
|
||||
mrs_s x5, ICH_LR0_EL2
|
||||
|
||||
adr x24, 1f
|
||||
add x24, x24, x23
|
||||
br x24
|
||||
|
||||
1:
|
||||
str x20, [x3, #LR_OFFSET(15)]
|
||||
str x19, [x3, #LR_OFFSET(14)]
|
||||
str x18, [x3, #LR_OFFSET(13)]
|
||||
str x17, [x3, #LR_OFFSET(12)]
|
||||
str x16, [x3, #LR_OFFSET(11)]
|
||||
str x15, [x3, #LR_OFFSET(10)]
|
||||
str x14, [x3, #LR_OFFSET(9)]
|
||||
str x13, [x3, #LR_OFFSET(8)]
|
||||
str x12, [x3, #LR_OFFSET(7)]
|
||||
str x11, [x3, #LR_OFFSET(6)]
|
||||
str x10, [x3, #LR_OFFSET(5)]
|
||||
str x9, [x3, #LR_OFFSET(4)]
|
||||
str x8, [x3, #LR_OFFSET(3)]
|
||||
str x7, [x3, #LR_OFFSET(2)]
|
||||
str x6, [x3, #LR_OFFSET(1)]
|
||||
str x5, [x3, #LR_OFFSET(0)]
|
||||
|
||||
tbnz w21, #29, 6f // 6 bits
|
||||
tbz w21, #30, 5f // 5 bits
|
||||
// 7 bits
|
||||
mrs_s x20, ICH_AP0R3_EL2
|
||||
str w20, [x3, #(VGIC_V3_CPU_AP0R + 3*4)]
|
||||
mrs_s x19, ICH_AP0R2_EL2
|
||||
str w19, [x3, #(VGIC_V3_CPU_AP0R + 2*4)]
|
||||
6: mrs_s x18, ICH_AP0R1_EL2
|
||||
str w18, [x3, #(VGIC_V3_CPU_AP0R + 1*4)]
|
||||
5: mrs_s x17, ICH_AP0R0_EL2
|
||||
str w17, [x3, #VGIC_V3_CPU_AP0R]
|
||||
|
||||
tbnz w21, #29, 6f // 6 bits
|
||||
tbz w21, #30, 5f // 5 bits
|
||||
// 7 bits
|
||||
mrs_s x20, ICH_AP1R3_EL2
|
||||
str w20, [x3, #(VGIC_V3_CPU_AP1R + 3*4)]
|
||||
mrs_s x19, ICH_AP1R2_EL2
|
||||
str w19, [x3, #(VGIC_V3_CPU_AP1R + 2*4)]
|
||||
6: mrs_s x18, ICH_AP1R1_EL2
|
||||
str w18, [x3, #(VGIC_V3_CPU_AP1R + 1*4)]
|
||||
5: mrs_s x17, ICH_AP1R0_EL2
|
||||
str w17, [x3, #VGIC_V3_CPU_AP1R]
|
||||
|
||||
// Restore SRE_EL1 access and re-enable SRE at EL1.
|
||||
mrs_s x5, ICC_SRE_EL2
|
||||
orr x5, x5, #ICC_SRE_EL2_ENABLE
|
||||
msr_s ICC_SRE_EL2, x5
|
||||
isb
|
||||
mov x5, #1
|
||||
msr_s ICC_SRE_EL1, x5
|
||||
.endm
|
||||
|
||||
/*
|
||||
* Restore the VGIC CPU state from memory
|
||||
* x0: Register pointing to VCPU struct
|
||||
*/
|
||||
.macro restore_vgic_v3_state
|
||||
// Disable SRE_EL1 access. Necessary, otherwise
|
||||
// ICH_VMCR_EL2.VFIQEn becomes one, and FIQ happens...
|
||||
msr_s ICC_SRE_EL1, xzr
|
||||
isb
|
||||
|
||||
// Compute the address of struct vgic_cpu
|
||||
add x3, x0, #VCPU_VGIC_CPU
|
||||
|
||||
// Restore all interesting registers
|
||||
ldr w4, [x3, #VGIC_V3_CPU_HCR]
|
||||
ldr w5, [x3, #VGIC_V3_CPU_VMCR]
|
||||
|
||||
msr_s ICH_HCR_EL2, x4
|
||||
msr_s ICH_VMCR_EL2, x5
|
||||
|
||||
mrs_s x21, ICH_VTR_EL2
|
||||
|
||||
tbnz w21, #29, 6f // 6 bits
|
||||
tbz w21, #30, 5f // 5 bits
|
||||
// 7 bits
|
||||
ldr w20, [x3, #(VGIC_V3_CPU_AP1R + 3*4)]
|
||||
msr_s ICH_AP1R3_EL2, x20
|
||||
ldr w19, [x3, #(VGIC_V3_CPU_AP1R + 2*4)]
|
||||
msr_s ICH_AP1R2_EL2, x19
|
||||
6: ldr w18, [x3, #(VGIC_V3_CPU_AP1R + 1*4)]
|
||||
msr_s ICH_AP1R1_EL2, x18
|
||||
5: ldr w17, [x3, #VGIC_V3_CPU_AP1R]
|
||||
msr_s ICH_AP1R0_EL2, x17
|
||||
|
||||
tbnz w21, #29, 6f // 6 bits
|
||||
tbz w21, #30, 5f // 5 bits
|
||||
// 7 bits
|
||||
ldr w20, [x3, #(VGIC_V3_CPU_AP0R + 3*4)]
|
||||
msr_s ICH_AP0R3_EL2, x20
|
||||
ldr w19, [x3, #(VGIC_V3_CPU_AP0R + 2*4)]
|
||||
msr_s ICH_AP0R2_EL2, x19
|
||||
6: ldr w18, [x3, #(VGIC_V3_CPU_AP0R + 1*4)]
|
||||
msr_s ICH_AP0R1_EL2, x18
|
||||
5: ldr w17, [x3, #VGIC_V3_CPU_AP0R]
|
||||
msr_s ICH_AP0R0_EL2, x17
|
||||
|
||||
and w22, w21, #0xf
|
||||
mvn w22, w21
|
||||
ubfiz w23, w22, 2, 4 // w23 = (15 - ListRegs) * 4
|
||||
|
||||
adr x24, 1f
|
||||
add x24, x24, x23
|
||||
br x24
|
||||
|
||||
1:
|
||||
ldr x20, [x3, #LR_OFFSET(15)]
|
||||
ldr x19, [x3, #LR_OFFSET(14)]
|
||||
ldr x18, [x3, #LR_OFFSET(13)]
|
||||
ldr x17, [x3, #LR_OFFSET(12)]
|
||||
ldr x16, [x3, #LR_OFFSET(11)]
|
||||
ldr x15, [x3, #LR_OFFSET(10)]
|
||||
ldr x14, [x3, #LR_OFFSET(9)]
|
||||
ldr x13, [x3, #LR_OFFSET(8)]
|
||||
ldr x12, [x3, #LR_OFFSET(7)]
|
||||
ldr x11, [x3, #LR_OFFSET(6)]
|
||||
ldr x10, [x3, #LR_OFFSET(5)]
|
||||
ldr x9, [x3, #LR_OFFSET(4)]
|
||||
ldr x8, [x3, #LR_OFFSET(3)]
|
||||
ldr x7, [x3, #LR_OFFSET(2)]
|
||||
ldr x6, [x3, #LR_OFFSET(1)]
|
||||
ldr x5, [x3, #LR_OFFSET(0)]
|
||||
|
||||
adr x24, 1f
|
||||
add x24, x24, x23
|
||||
br x24
|
||||
|
||||
1:
|
||||
msr_s ICH_LR15_EL2, x20
|
||||
msr_s ICH_LR14_EL2, x19
|
||||
msr_s ICH_LR13_EL2, x18
|
||||
msr_s ICH_LR12_EL2, x17
|
||||
msr_s ICH_LR11_EL2, x16
|
||||
msr_s ICH_LR10_EL2, x15
|
||||
msr_s ICH_LR9_EL2, x14
|
||||
msr_s ICH_LR8_EL2, x13
|
||||
msr_s ICH_LR7_EL2, x12
|
||||
msr_s ICH_LR6_EL2, x11
|
||||
msr_s ICH_LR5_EL2, x10
|
||||
msr_s ICH_LR4_EL2, x9
|
||||
msr_s ICH_LR3_EL2, x8
|
||||
msr_s ICH_LR2_EL2, x7
|
||||
msr_s ICH_LR1_EL2, x6
|
||||
msr_s ICH_LR0_EL2, x5
|
||||
|
||||
// Ensure that the above will have reached the
|
||||
// (re)distributors. This ensure the guest will read
|
||||
// the correct values from the memory-mapped interface.
|
||||
isb
|
||||
dsb sy
|
||||
|
||||
// Prevent the guest from touching the GIC system registers
|
||||
mrs_s x5, ICC_SRE_EL2
|
||||
and x5, x5, #~ICC_SRE_EL2_ENABLE
|
||||
msr_s ICC_SRE_EL2, x5
|
||||
.endm
|
||||
|
||||
ENTRY(__save_vgic_v3_state)
|
||||
save_vgic_v3_state
|
||||
ret
|
||||
ENDPROC(__save_vgic_v3_state)
|
||||
|
||||
ENTRY(__restore_vgic_v3_state)
|
||||
restore_vgic_v3_state
|
||||
ret
|
||||
ENDPROC(__restore_vgic_v3_state)
|
||||
|
||||
ENTRY(__vgic_v3_get_ich_vtr_el2)
|
||||
mrs_s x0, ICH_VTR_EL2
|
||||
ret
|
||||
ENDPROC(__vgic_v3_get_ich_vtr_el2)
|
||||
|
||||
.popsection
|
Loading…
Add table
Add a link
Reference in a new issue