Fixed MTP to work with TWRP

This commit is contained in:
awab228 2018-06-19 23:16:04 +02:00
commit f6dfaef42e
50820 changed files with 20846062 additions and 0 deletions

71
arch/arm/kvm/Kconfig Normal file
View file

@ -0,0 +1,71 @@
#
# KVM configuration
#
source "virt/kvm/Kconfig"
menuconfig VIRTUALIZATION
bool "Virtualization"
---help---
Say Y here to get to see options for using your Linux host to run
other operating systems inside virtual machines (guests).
This option alone does not add any kernel code.
If you say N, all options in this submenu will be skipped and
disabled.
if VIRTUALIZATION
config KVM
bool "Kernel-based Virtual Machine (KVM) support"
select PREEMPT_NOTIFIERS
select ANON_INODES
select HAVE_KVM_CPU_RELAX_INTERCEPT
select KVM_MMIO
select KVM_ARM_HOST
depends on ARM_VIRT_EXT && ARM_LPAE
---help---
Support hosting virtualized guest machines. You will also
need to select one or more of the processor modules below.
This module provides access to the hardware capabilities through
a character device node named /dev/kvm.
If unsure, say N.
config KVM_ARM_HOST
bool "KVM host support for ARM cpus."
depends on KVM
depends on MMU
select MMU_NOTIFIER
---help---
Provides host support for ARM processors.
config KVM_ARM_MAX_VCPUS
int "Number maximum supported virtual CPUs per VM"
depends on KVM_ARM_HOST
default 4
help
Static number of max supported virtual CPUs per VM.
If you choose a high number, the vcpu structures will be quite
large, so only choose a reasonable number that you expect to
actually use.
config KVM_ARM_VGIC
bool "KVM support for Virtual GIC"
depends on KVM_ARM_HOST && OF
select HAVE_KVM_IRQCHIP
default y
---help---
Adds support for a hardware assisted, in-kernel GIC emulation.
config KVM_ARM_TIMER
bool "KVM support for Architected Timers"
depends on KVM_ARM_VGIC && ARM_ARCH_TIMER
select HAVE_KVM_IRQCHIP
default y
---help---
Adds support for the Architected Timers in virtual machines
endif # VIRTUALIZATION

25
arch/arm/kvm/Makefile Normal file
View file

@ -0,0 +1,25 @@
#
# Makefile for Kernel-based Virtual Machine module
#
plus_virt := $(call as-instr,.arch_extension virt,+virt)
ifeq ($(plus_virt),+virt)
plus_virt_def := -DREQUIRES_VIRT=1
endif
ccflags-y += -Ivirt/kvm -Iarch/arm/kvm
CFLAGS_arm.o := -I. $(plus_virt_def)
CFLAGS_mmu.o := -I.
AFLAGS_init.o := -Wa,-march=armv7-a$(plus_virt)
AFLAGS_interrupts.o := -Wa,-march=armv7-a$(plus_virt)
KVM := ../../../virt/kvm
kvm-arm-y = $(KVM)/kvm_main.o $(KVM)/coalesced_mmio.o
obj-y += kvm-arm.o init.o interrupts.o
obj-y += arm.o handle_exit.o guest.o mmu.o emulate.o reset.o
obj-y += coproc.o coproc_a15.o coproc_a7.o mmio.o psci.o perf.o
obj-$(CONFIG_KVM_ARM_VGIC) += $(KVM)/arm/vgic.o
obj-$(CONFIG_KVM_ARM_VGIC) += $(KVM)/arm/vgic-v2.o
obj-$(CONFIG_KVM_ARM_TIMER) += $(KVM)/arm/arch_timer.o

1060
arch/arm/kvm/arm.c Normal file

File diff suppressed because it is too large Load diff

1304
arch/arm/kvm/coproc.c Normal file

File diff suppressed because it is too large Load diff

160
arch/arm/kvm/coproc.h Normal file
View file

@ -0,0 +1,160 @@
/*
* Copyright (C) 2012 - Virtual Open Systems and Columbia University
* Authors: Christoffer Dall <c.dall@virtualopensystems.com>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License, version 2, as
* published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
*/
#ifndef __ARM_KVM_COPROC_LOCAL_H__
#define __ARM_KVM_COPROC_LOCAL_H__
struct coproc_params {
unsigned long CRn;
unsigned long CRm;
unsigned long Op1;
unsigned long Op2;
unsigned long Rt1;
unsigned long Rt2;
bool is_64bit;
bool is_write;
};
struct coproc_reg {
/* MRC/MCR/MRRC/MCRR instruction which accesses it. */
unsigned long CRn;
unsigned long CRm;
unsigned long Op1;
unsigned long Op2;
bool is_64;
/* Trapped access from guest, if non-NULL. */
bool (*access)(struct kvm_vcpu *,
const struct coproc_params *,
const struct coproc_reg *);
/* Initialization for vcpu. */
void (*reset)(struct kvm_vcpu *, const struct coproc_reg *);
/* Index into vcpu->arch.cp15[], or 0 if we don't need to save it. */
unsigned long reg;
/* Value (usually reset value) */
u64 val;
};
static inline void print_cp_instr(const struct coproc_params *p)
{
/* Look, we even formatted it for you to paste into the table! */
if (p->is_64bit) {
kvm_pr_unimpl(" { CRm64(%2lu), Op1(%2lu), is64, func_%s },\n",
p->CRn, p->Op1, p->is_write ? "write" : "read");
} else {
kvm_pr_unimpl(" { CRn(%2lu), CRm(%2lu), Op1(%2lu), Op2(%2lu), is32,"
" func_%s },\n",
p->CRn, p->CRm, p->Op1, p->Op2,
p->is_write ? "write" : "read");
}
}
static inline bool ignore_write(struct kvm_vcpu *vcpu,
const struct coproc_params *p)
{
return true;
}
static inline bool read_zero(struct kvm_vcpu *vcpu,
const struct coproc_params *p)
{
*vcpu_reg(vcpu, p->Rt1) = 0;
return true;
}
static inline bool write_to_read_only(struct kvm_vcpu *vcpu,
const struct coproc_params *params)
{
kvm_debug("CP15 write to read-only register at: %08lx\n",
*vcpu_pc(vcpu));
print_cp_instr(params);
return false;
}
static inline bool read_from_write_only(struct kvm_vcpu *vcpu,
const struct coproc_params *params)
{
kvm_debug("CP15 read to write-only register at: %08lx\n",
*vcpu_pc(vcpu));
print_cp_instr(params);
return false;
}
/* Reset functions */
static inline void reset_unknown(struct kvm_vcpu *vcpu,
const struct coproc_reg *r)
{
BUG_ON(!r->reg);
BUG_ON(r->reg >= ARRAY_SIZE(vcpu->arch.cp15));
vcpu->arch.cp15[r->reg] = 0xdecafbad;
}
static inline void reset_val(struct kvm_vcpu *vcpu, const struct coproc_reg *r)
{
BUG_ON(!r->reg);
BUG_ON(r->reg >= ARRAY_SIZE(vcpu->arch.cp15));
vcpu->arch.cp15[r->reg] = r->val;
}
static inline void reset_unknown64(struct kvm_vcpu *vcpu,
const struct coproc_reg *r)
{
BUG_ON(!r->reg);
BUG_ON(r->reg + 1 >= ARRAY_SIZE(vcpu->arch.cp15));
vcpu->arch.cp15[r->reg] = 0xdecafbad;
vcpu->arch.cp15[r->reg+1] = 0xd0c0ffee;
}
static inline int cmp_reg(const struct coproc_reg *i1,
const struct coproc_reg *i2)
{
BUG_ON(i1 == i2);
if (!i1)
return 1;
else if (!i2)
return -1;
if (i1->CRn != i2->CRn)
return i1->CRn - i2->CRn;
if (i1->CRm != i2->CRm)
return i1->CRm - i2->CRm;
if (i1->Op1 != i2->Op1)
return i1->Op1 - i2->Op1;
if (i1->Op2 != i2->Op2)
return i1->Op2 - i2->Op2;
return i2->is_64 - i1->is_64;
}
#define CRn(_x) .CRn = _x
#define CRm(_x) .CRm = _x
#define CRm64(_x) .CRn = _x, .CRm = 0
#define Op1(_x) .Op1 = _x
#define Op2(_x) .Op2 = _x
#define is64 .is_64 = true
#define is32 .is_64 = false
bool access_sctlr(struct kvm_vcpu *vcpu,
const struct coproc_params *p,
const struct coproc_reg *r);
#endif /* __ARM_KVM_COPROC_LOCAL_H__ */

51
arch/arm/kvm/coproc_a15.c Normal file
View file

@ -0,0 +1,51 @@
/*
* Copyright (C) 2012 - Virtual Open Systems and Columbia University
* Authors: Rusty Russell <rusty@rustcorp.au>
* Christoffer Dall <c.dall@virtualopensystems.com>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License, version 2, as
* published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
*/
#include <linux/kvm_host.h>
#include <asm/kvm_coproc.h>
#include <asm/kvm_emulate.h>
#include <linux/init.h>
#include "coproc.h"
/*
* A15-specific CP15 registers.
* CRn denotes the primary register number, but is copied to the CRm in the
* user space API for 64-bit register access in line with the terminology used
* in the ARM ARM.
* Important: Must be sorted ascending by CRn, CRM, Op1, Op2 and with 64-bit
* registers preceding 32-bit ones.
*/
static const struct coproc_reg a15_regs[] = {
/* SCTLR: swapped by interrupt.S. */
{ CRn( 1), CRm( 0), Op1( 0), Op2( 0), is32,
access_sctlr, reset_val, c1_SCTLR, 0x00C50078 },
};
static struct kvm_coproc_target_table a15_target_table = {
.target = KVM_ARM_TARGET_CORTEX_A15,
.table = a15_regs,
.num = ARRAY_SIZE(a15_regs),
};
static int __init coproc_a15_init(void)
{
kvm_register_target_coproc_table(&a15_target_table);
return 0;
}
late_initcall(coproc_a15_init);

54
arch/arm/kvm/coproc_a7.c Normal file
View file

@ -0,0 +1,54 @@
/*
* Copyright (C) 2012 - Virtual Open Systems and Columbia University
* Copyright (C) 2013 - ARM Ltd
*
* Authors: Rusty Russell <rusty@rustcorp.au>
* Christoffer Dall <c.dall@virtualopensystems.com>
* Jonathan Austin <jonathan.austin@arm.com>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License, version 2, as
* published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
*/
#include <linux/kvm_host.h>
#include <asm/kvm_coproc.h>
#include <asm/kvm_emulate.h>
#include <linux/init.h>
#include "coproc.h"
/*
* Cortex-A7 specific CP15 registers.
* CRn denotes the primary register number, but is copied to the CRm in the
* user space API for 64-bit register access in line with the terminology used
* in the ARM ARM.
* Important: Must be sorted ascending by CRn, CRM, Op1, Op2 and with 64-bit
* registers preceding 32-bit ones.
*/
static const struct coproc_reg a7_regs[] = {
/* SCTLR: swapped by interrupt.S. */
{ CRn( 1), CRm( 0), Op1( 0), Op2( 0), is32,
access_sctlr, reset_val, c1_SCTLR, 0x00C50878 },
};
static struct kvm_coproc_target_table a7_target_table = {
.target = KVM_ARM_TARGET_CORTEX_A7,
.table = a7_regs,
.num = ARRAY_SIZE(a7_regs),
};
static int __init coproc_a7_init(void)
{
kvm_register_target_coproc_table(&a7_target_table);
return 0;
}
late_initcall(coproc_a7_init);

402
arch/arm/kvm/emulate.c Normal file
View file

@ -0,0 +1,402 @@
/*
* Copyright (C) 2012 - Virtual Open Systems and Columbia University
* Author: Christoffer Dall <c.dall@virtualopensystems.com>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License, version 2, as
* published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
*/
#include <linux/mm.h>
#include <linux/kvm_host.h>
#include <asm/kvm_arm.h>
#include <asm/kvm_emulate.h>
#include <asm/opcodes.h>
#include <trace/events/kvm.h>
#include "trace.h"
#define VCPU_NR_MODES 6
#define VCPU_REG_OFFSET_USR 0
#define VCPU_REG_OFFSET_FIQ 1
#define VCPU_REG_OFFSET_IRQ 2
#define VCPU_REG_OFFSET_SVC 3
#define VCPU_REG_OFFSET_ABT 4
#define VCPU_REG_OFFSET_UND 5
#define REG_OFFSET(_reg) \
(offsetof(struct kvm_regs, _reg) / sizeof(u32))
#define USR_REG_OFFSET(_num) REG_OFFSET(usr_regs.uregs[_num])
static const unsigned long vcpu_reg_offsets[VCPU_NR_MODES][15] = {
/* USR/SYS Registers */
[VCPU_REG_OFFSET_USR] = {
USR_REG_OFFSET(0), USR_REG_OFFSET(1), USR_REG_OFFSET(2),
USR_REG_OFFSET(3), USR_REG_OFFSET(4), USR_REG_OFFSET(5),
USR_REG_OFFSET(6), USR_REG_OFFSET(7), USR_REG_OFFSET(8),
USR_REG_OFFSET(9), USR_REG_OFFSET(10), USR_REG_OFFSET(11),
USR_REG_OFFSET(12), USR_REG_OFFSET(13), USR_REG_OFFSET(14),
},
/* FIQ Registers */
[VCPU_REG_OFFSET_FIQ] = {
USR_REG_OFFSET(0), USR_REG_OFFSET(1), USR_REG_OFFSET(2),
USR_REG_OFFSET(3), USR_REG_OFFSET(4), USR_REG_OFFSET(5),
USR_REG_OFFSET(6), USR_REG_OFFSET(7),
REG_OFFSET(fiq_regs[0]), /* r8 */
REG_OFFSET(fiq_regs[1]), /* r9 */
REG_OFFSET(fiq_regs[2]), /* r10 */
REG_OFFSET(fiq_regs[3]), /* r11 */
REG_OFFSET(fiq_regs[4]), /* r12 */
REG_OFFSET(fiq_regs[5]), /* r13 */
REG_OFFSET(fiq_regs[6]), /* r14 */
},
/* IRQ Registers */
[VCPU_REG_OFFSET_IRQ] = {
USR_REG_OFFSET(0), USR_REG_OFFSET(1), USR_REG_OFFSET(2),
USR_REG_OFFSET(3), USR_REG_OFFSET(4), USR_REG_OFFSET(5),
USR_REG_OFFSET(6), USR_REG_OFFSET(7), USR_REG_OFFSET(8),
USR_REG_OFFSET(9), USR_REG_OFFSET(10), USR_REG_OFFSET(11),
USR_REG_OFFSET(12),
REG_OFFSET(irq_regs[0]), /* r13 */
REG_OFFSET(irq_regs[1]), /* r14 */
},
/* SVC Registers */
[VCPU_REG_OFFSET_SVC] = {
USR_REG_OFFSET(0), USR_REG_OFFSET(1), USR_REG_OFFSET(2),
USR_REG_OFFSET(3), USR_REG_OFFSET(4), USR_REG_OFFSET(5),
USR_REG_OFFSET(6), USR_REG_OFFSET(7), USR_REG_OFFSET(8),
USR_REG_OFFSET(9), USR_REG_OFFSET(10), USR_REG_OFFSET(11),
USR_REG_OFFSET(12),
REG_OFFSET(svc_regs[0]), /* r13 */
REG_OFFSET(svc_regs[1]), /* r14 */
},
/* ABT Registers */
[VCPU_REG_OFFSET_ABT] = {
USR_REG_OFFSET(0), USR_REG_OFFSET(1), USR_REG_OFFSET(2),
USR_REG_OFFSET(3), USR_REG_OFFSET(4), USR_REG_OFFSET(5),
USR_REG_OFFSET(6), USR_REG_OFFSET(7), USR_REG_OFFSET(8),
USR_REG_OFFSET(9), USR_REG_OFFSET(10), USR_REG_OFFSET(11),
USR_REG_OFFSET(12),
REG_OFFSET(abt_regs[0]), /* r13 */
REG_OFFSET(abt_regs[1]), /* r14 */
},
/* UND Registers */
[VCPU_REG_OFFSET_UND] = {
USR_REG_OFFSET(0), USR_REG_OFFSET(1), USR_REG_OFFSET(2),
USR_REG_OFFSET(3), USR_REG_OFFSET(4), USR_REG_OFFSET(5),
USR_REG_OFFSET(6), USR_REG_OFFSET(7), USR_REG_OFFSET(8),
USR_REG_OFFSET(9), USR_REG_OFFSET(10), USR_REG_OFFSET(11),
USR_REG_OFFSET(12),
REG_OFFSET(und_regs[0]), /* r13 */
REG_OFFSET(und_regs[1]), /* r14 */
},
};
/*
* Return a pointer to the register number valid in the current mode of
* the virtual CPU.
*/
unsigned long *vcpu_reg(struct kvm_vcpu *vcpu, u8 reg_num)
{
unsigned long *reg_array = (unsigned long *)&vcpu->arch.regs;
unsigned long mode = *vcpu_cpsr(vcpu) & MODE_MASK;
switch (mode) {
case USR_MODE...SVC_MODE:
mode &= ~MODE32_BIT; /* 0 ... 3 */
break;
case ABT_MODE:
mode = VCPU_REG_OFFSET_ABT;
break;
case UND_MODE:
mode = VCPU_REG_OFFSET_UND;
break;
case SYSTEM_MODE:
mode = VCPU_REG_OFFSET_USR;
break;
default:
BUG();
}
return reg_array + vcpu_reg_offsets[mode][reg_num];
}
/*
* Return the SPSR for the current mode of the virtual CPU.
*/
unsigned long *vcpu_spsr(struct kvm_vcpu *vcpu)
{
unsigned long mode = *vcpu_cpsr(vcpu) & MODE_MASK;
switch (mode) {
case SVC_MODE:
return &vcpu->arch.regs.KVM_ARM_SVC_spsr;
case ABT_MODE:
return &vcpu->arch.regs.KVM_ARM_ABT_spsr;
case UND_MODE:
return &vcpu->arch.regs.KVM_ARM_UND_spsr;
case IRQ_MODE:
return &vcpu->arch.regs.KVM_ARM_IRQ_spsr;
case FIQ_MODE:
return &vcpu->arch.regs.KVM_ARM_FIQ_spsr;
default:
BUG();
}
}
/*
* A conditional instruction is allowed to trap, even though it
* wouldn't be executed. So let's re-implement the hardware, in
* software!
*/
bool kvm_condition_valid(struct kvm_vcpu *vcpu)
{
unsigned long cpsr, cond, insn;
/*
* Exception Code 0 can only happen if we set HCR.TGE to 1, to
* catch undefined instructions, and then we won't get past
* the arm_exit_handlers test anyway.
*/
BUG_ON(!kvm_vcpu_trap_get_class(vcpu));
/* Top two bits non-zero? Unconditional. */
if (kvm_vcpu_get_hsr(vcpu) >> 30)
return true;
cpsr = *vcpu_cpsr(vcpu);
/* Is condition field valid? */
if ((kvm_vcpu_get_hsr(vcpu) & HSR_CV) >> HSR_CV_SHIFT)
cond = (kvm_vcpu_get_hsr(vcpu) & HSR_COND) >> HSR_COND_SHIFT;
else {
/* This can happen in Thumb mode: examine IT state. */
unsigned long it;
it = ((cpsr >> 8) & 0xFC) | ((cpsr >> 25) & 0x3);
/* it == 0 => unconditional. */
if (it == 0)
return true;
/* The cond for this insn works out as the top 4 bits. */
cond = (it >> 4);
}
/* Shift makes it look like an ARM-mode instruction */
insn = cond << 28;
return arm_check_condition(insn, cpsr) != ARM_OPCODE_CONDTEST_FAIL;
}
/**
* adjust_itstate - adjust ITSTATE when emulating instructions in IT-block
* @vcpu: The VCPU pointer
*
* When exceptions occur while instructions are executed in Thumb IF-THEN
* blocks, the ITSTATE field of the CPSR is not advanved (updated), so we have
* to do this little bit of work manually. The fields map like this:
*
* IT[7:0] -> CPSR[26:25],CPSR[15:10]
*/
static void kvm_adjust_itstate(struct kvm_vcpu *vcpu)
{
unsigned long itbits, cond;
unsigned long cpsr = *vcpu_cpsr(vcpu);
bool is_arm = !(cpsr & PSR_T_BIT);
BUG_ON(is_arm && (cpsr & PSR_IT_MASK));
if (!(cpsr & PSR_IT_MASK))
return;
cond = (cpsr & 0xe000) >> 13;
itbits = (cpsr & 0x1c00) >> (10 - 2);
itbits |= (cpsr & (0x3 << 25)) >> 25;
/* Perform ITAdvance (see page A-52 in ARM DDI 0406C) */
if ((itbits & 0x7) == 0)
itbits = cond = 0;
else
itbits = (itbits << 1) & 0x1f;
cpsr &= ~PSR_IT_MASK;
cpsr |= cond << 13;
cpsr |= (itbits & 0x1c) << (10 - 2);
cpsr |= (itbits & 0x3) << 25;
*vcpu_cpsr(vcpu) = cpsr;
}
/**
* kvm_skip_instr - skip a trapped instruction and proceed to the next
* @vcpu: The vcpu pointer
*/
void kvm_skip_instr(struct kvm_vcpu *vcpu, bool is_wide_instr)
{
bool is_thumb;
is_thumb = !!(*vcpu_cpsr(vcpu) & PSR_T_BIT);
if (is_thumb && !is_wide_instr)
*vcpu_pc(vcpu) += 2;
else
*vcpu_pc(vcpu) += 4;
kvm_adjust_itstate(vcpu);
}
/******************************************************************************
* Inject exceptions into the guest
*/
static u32 exc_vector_base(struct kvm_vcpu *vcpu)
{
u32 sctlr = vcpu->arch.cp15[c1_SCTLR];
u32 vbar = vcpu->arch.cp15[c12_VBAR];
if (sctlr & SCTLR_V)
return 0xffff0000;
else /* always have security exceptions */
return vbar;
}
/**
* kvm_inject_undefined - inject an undefined exception into the guest
* @vcpu: The VCPU to receive the undefined exception
*
* It is assumed that this code is called from the VCPU thread and that the
* VCPU therefore is not currently executing guest code.
*
* Modelled after TakeUndefInstrException() pseudocode.
*/
void kvm_inject_undefined(struct kvm_vcpu *vcpu)
{
unsigned long new_lr_value;
unsigned long new_spsr_value;
unsigned long cpsr = *vcpu_cpsr(vcpu);
u32 sctlr = vcpu->arch.cp15[c1_SCTLR];
bool is_thumb = (cpsr & PSR_T_BIT);
u32 vect_offset = 4;
u32 return_offset = (is_thumb) ? 2 : 4;
new_spsr_value = cpsr;
new_lr_value = *vcpu_pc(vcpu) - return_offset;
*vcpu_cpsr(vcpu) = (cpsr & ~MODE_MASK) | UND_MODE;
*vcpu_cpsr(vcpu) |= PSR_I_BIT;
*vcpu_cpsr(vcpu) &= ~(PSR_IT_MASK | PSR_J_BIT | PSR_E_BIT | PSR_T_BIT);
if (sctlr & SCTLR_TE)
*vcpu_cpsr(vcpu) |= PSR_T_BIT;
if (sctlr & SCTLR_EE)
*vcpu_cpsr(vcpu) |= PSR_E_BIT;
/* Note: These now point to UND banked copies */
*vcpu_spsr(vcpu) = cpsr;
*vcpu_reg(vcpu, 14) = new_lr_value;
/* Branch to exception vector */
*vcpu_pc(vcpu) = exc_vector_base(vcpu) + vect_offset;
}
/*
* Modelled after TakeDataAbortException() and TakePrefetchAbortException
* pseudocode.
*/
static void inject_abt(struct kvm_vcpu *vcpu, bool is_pabt, unsigned long addr)
{
unsigned long new_lr_value;
unsigned long new_spsr_value;
unsigned long cpsr = *vcpu_cpsr(vcpu);
u32 sctlr = vcpu->arch.cp15[c1_SCTLR];
bool is_thumb = (cpsr & PSR_T_BIT);
u32 vect_offset;
u32 return_offset = (is_thumb) ? 4 : 0;
bool is_lpae;
new_spsr_value = cpsr;
new_lr_value = *vcpu_pc(vcpu) + return_offset;
*vcpu_cpsr(vcpu) = (cpsr & ~MODE_MASK) | ABT_MODE;
*vcpu_cpsr(vcpu) |= PSR_I_BIT | PSR_A_BIT;
*vcpu_cpsr(vcpu) &= ~(PSR_IT_MASK | PSR_J_BIT | PSR_E_BIT | PSR_T_BIT);
if (sctlr & SCTLR_TE)
*vcpu_cpsr(vcpu) |= PSR_T_BIT;
if (sctlr & SCTLR_EE)
*vcpu_cpsr(vcpu) |= PSR_E_BIT;
/* Note: These now point to ABT banked copies */
*vcpu_spsr(vcpu) = cpsr;
*vcpu_reg(vcpu, 14) = new_lr_value;
if (is_pabt)
vect_offset = 12;
else
vect_offset = 16;
/* Branch to exception vector */
*vcpu_pc(vcpu) = exc_vector_base(vcpu) + vect_offset;
if (is_pabt) {
/* Set IFAR and IFSR */
vcpu->arch.cp15[c6_IFAR] = addr;
is_lpae = (vcpu->arch.cp15[c2_TTBCR] >> 31);
/* Always give debug fault for now - should give guest a clue */
if (is_lpae)
vcpu->arch.cp15[c5_IFSR] = 1 << 9 | 0x22;
else
vcpu->arch.cp15[c5_IFSR] = 2;
} else { /* !iabt */
/* Set DFAR and DFSR */
vcpu->arch.cp15[c6_DFAR] = addr;
is_lpae = (vcpu->arch.cp15[c2_TTBCR] >> 31);
/* Always give debug fault for now - should give guest a clue */
if (is_lpae)
vcpu->arch.cp15[c5_DFSR] = 1 << 9 | 0x22;
else
vcpu->arch.cp15[c5_DFSR] = 2;
}
}
/**
* kvm_inject_dabt - inject a data abort into the guest
* @vcpu: The VCPU to receive the undefined exception
* @addr: The address to report in the DFAR
*
* It is assumed that this code is called from the VCPU thread and that the
* VCPU therefore is not currently executing guest code.
*/
void kvm_inject_dabt(struct kvm_vcpu *vcpu, unsigned long addr)
{
inject_abt(vcpu, false, addr);
}
/**
* kvm_inject_pabt - inject a prefetch abort into the guest
* @vcpu: The VCPU to receive the undefined exception
* @addr: The address to report in the DFAR
*
* It is assumed that this code is called from the VCPU thread and that the
* VCPU therefore is not currently executing guest code.
*/
void kvm_inject_pabt(struct kvm_vcpu *vcpu, unsigned long addr)
{
inject_abt(vcpu, true, addr);
}

335
arch/arm/kvm/guest.c Normal file
View file

@ -0,0 +1,335 @@
/*
* Copyright (C) 2012 - Virtual Open Systems and Columbia University
* Author: Christoffer Dall <c.dall@virtualopensystems.com>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License, version 2, as
* published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
*/
#include <linux/errno.h>
#include <linux/err.h>
#include <linux/kvm_host.h>
#include <linux/module.h>
#include <linux/vmalloc.h>
#include <linux/fs.h>
#include <asm/cputype.h>
#include <asm/uaccess.h>
#include <asm/kvm.h>
#include <asm/kvm_asm.h>
#include <asm/kvm_emulate.h>
#include <asm/kvm_coproc.h>
#define VM_STAT(x) { #x, offsetof(struct kvm, stat.x), KVM_STAT_VM }
#define VCPU_STAT(x) { #x, offsetof(struct kvm_vcpu, stat.x), KVM_STAT_VCPU }
struct kvm_stats_debugfs_item debugfs_entries[] = {
{ NULL }
};
int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu)
{
return 0;
}
static u64 core_reg_offset_from_id(u64 id)
{
return id & ~(KVM_REG_ARCH_MASK | KVM_REG_SIZE_MASK | KVM_REG_ARM_CORE);
}
static int get_core_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
{
u32 __user *uaddr = (u32 __user *)(long)reg->addr;
struct kvm_regs *regs = &vcpu->arch.regs;
u64 off;
if (KVM_REG_SIZE(reg->id) != 4)
return -ENOENT;
/* Our ID is an index into the kvm_regs struct. */
off = core_reg_offset_from_id(reg->id);
if (off >= sizeof(*regs) / KVM_REG_SIZE(reg->id))
return -ENOENT;
return put_user(((u32 *)regs)[off], uaddr);
}
static int set_core_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
{
u32 __user *uaddr = (u32 __user *)(long)reg->addr;
struct kvm_regs *regs = &vcpu->arch.regs;
u64 off, val;
if (KVM_REG_SIZE(reg->id) != 4)
return -ENOENT;
/* Our ID is an index into the kvm_regs struct. */
off = core_reg_offset_from_id(reg->id);
if (off >= sizeof(*regs) / KVM_REG_SIZE(reg->id))
return -ENOENT;
if (get_user(val, uaddr) != 0)
return -EFAULT;
if (off == KVM_REG_ARM_CORE_REG(usr_regs.ARM_cpsr)) {
unsigned long mode = val & MODE_MASK;
switch (mode) {
case USR_MODE:
case FIQ_MODE:
case IRQ_MODE:
case SVC_MODE:
case ABT_MODE:
case UND_MODE:
break;
default:
return -EINVAL;
}
}
((u32 *)regs)[off] = val;
return 0;
}
int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
{
return -EINVAL;
}
int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
{
return -EINVAL;
}
#ifndef CONFIG_KVM_ARM_TIMER
#define NUM_TIMER_REGS 0
static int copy_timer_indices(struct kvm_vcpu *vcpu, u64 __user *uindices)
{
return 0;
}
static bool is_timer_reg(u64 index)
{
return false;
}
#else
#define NUM_TIMER_REGS 3
static bool is_timer_reg(u64 index)
{
switch (index) {
case KVM_REG_ARM_TIMER_CTL:
case KVM_REG_ARM_TIMER_CNT:
case KVM_REG_ARM_TIMER_CVAL:
return true;
}
return false;
}
static int copy_timer_indices(struct kvm_vcpu *vcpu, u64 __user *uindices)
{
if (put_user(KVM_REG_ARM_TIMER_CTL, uindices))
return -EFAULT;
uindices++;
if (put_user(KVM_REG_ARM_TIMER_CNT, uindices))
return -EFAULT;
uindices++;
if (put_user(KVM_REG_ARM_TIMER_CVAL, uindices))
return -EFAULT;
return 0;
}
#endif
static int set_timer_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
{
void __user *uaddr = (void __user *)(long)reg->addr;
u64 val;
int ret;
ret = copy_from_user(&val, uaddr, KVM_REG_SIZE(reg->id));
if (ret != 0)
return -EFAULT;
return kvm_arm_timer_set_reg(vcpu, reg->id, val);
}
static int get_timer_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
{
void __user *uaddr = (void __user *)(long)reg->addr;
u64 val;
val = kvm_arm_timer_get_reg(vcpu, reg->id);
return copy_to_user(uaddr, &val, KVM_REG_SIZE(reg->id));
}
static unsigned long num_core_regs(void)
{
return sizeof(struct kvm_regs) / sizeof(u32);
}
/**
* kvm_arm_num_regs - how many registers do we present via KVM_GET_ONE_REG
*
* This is for all registers.
*/
unsigned long kvm_arm_num_regs(struct kvm_vcpu *vcpu)
{
return num_core_regs() + kvm_arm_num_coproc_regs(vcpu)
+ NUM_TIMER_REGS;
}
/**
* kvm_arm_copy_reg_indices - get indices of all registers.
*
* We do core registers right here, then we apppend coproc regs.
*/
int kvm_arm_copy_reg_indices(struct kvm_vcpu *vcpu, u64 __user *uindices)
{
unsigned int i;
const u64 core_reg = KVM_REG_ARM | KVM_REG_SIZE_U32 | KVM_REG_ARM_CORE;
int ret;
for (i = 0; i < sizeof(struct kvm_regs)/sizeof(u32); i++) {
if (put_user(core_reg | i, uindices))
return -EFAULT;
uindices++;
}
ret = copy_timer_indices(vcpu, uindices);
if (ret)
return ret;
uindices += NUM_TIMER_REGS;
return kvm_arm_copy_coproc_indices(vcpu, uindices);
}
int kvm_arm_get_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
{
/* We currently use nothing arch-specific in upper 32 bits */
if ((reg->id & ~KVM_REG_SIZE_MASK) >> 32 != KVM_REG_ARM >> 32)
return -EINVAL;
/* Register group 16 means we want a core register. */
if ((reg->id & KVM_REG_ARM_COPROC_MASK) == KVM_REG_ARM_CORE)
return get_core_reg(vcpu, reg);
if (is_timer_reg(reg->id))
return get_timer_reg(vcpu, reg);
return kvm_arm_coproc_get_reg(vcpu, reg);
}
int kvm_arm_set_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
{
/* We currently use nothing arch-specific in upper 32 bits */
if ((reg->id & ~KVM_REG_SIZE_MASK) >> 32 != KVM_REG_ARM >> 32)
return -EINVAL;
/* Register group 16 means we set a core register. */
if ((reg->id & KVM_REG_ARM_COPROC_MASK) == KVM_REG_ARM_CORE)
return set_core_reg(vcpu, reg);
if (is_timer_reg(reg->id))
return set_timer_reg(vcpu, reg);
return kvm_arm_coproc_set_reg(vcpu, reg);
}
int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu,
struct kvm_sregs *sregs)
{
return -EINVAL;
}
int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu,
struct kvm_sregs *sregs)
{
return -EINVAL;
}
int __attribute_const__ kvm_target_cpu(void)
{
switch (read_cpuid_part()) {
case ARM_CPU_PART_CORTEX_A7:
return KVM_ARM_TARGET_CORTEX_A7;
case ARM_CPU_PART_CORTEX_A15:
return KVM_ARM_TARGET_CORTEX_A15;
default:
return -EINVAL;
}
}
int kvm_vcpu_set_target(struct kvm_vcpu *vcpu,
const struct kvm_vcpu_init *init)
{
unsigned int i;
/* We can only cope with guest==host and only on A15/A7 (for now). */
if (init->target != kvm_target_cpu())
return -EINVAL;
vcpu->arch.target = init->target;
bitmap_zero(vcpu->arch.features, KVM_VCPU_MAX_FEATURES);
/* -ENOENT for unknown features, -EINVAL for invalid combinations. */
for (i = 0; i < sizeof(init->features) * 8; i++) {
if (test_bit(i, (void *)init->features)) {
if (i >= KVM_VCPU_MAX_FEATURES)
return -ENOENT;
set_bit(i, vcpu->arch.features);
}
}
/* Now we know what it is, we can reset it. */
return kvm_reset_vcpu(vcpu);
}
int kvm_vcpu_preferred_target(struct kvm_vcpu_init *init)
{
int target = kvm_target_cpu();
if (target < 0)
return -ENODEV;
memset(init, 0, sizeof(*init));
/*
* For now, we don't return any features.
* In future, we might use features to return target
* specific features available for the preferred
* target type.
*/
init->target = (__u32)target;
return 0;
}
int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
{
return -EINVAL;
}
int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
{
return -EINVAL;
}
int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu,
struct kvm_translation *tr)
{
return -EINVAL;
}

171
arch/arm/kvm/handle_exit.c Normal file
View file

@ -0,0 +1,171 @@
/*
* Copyright (C) 2012 - Virtual Open Systems and Columbia University
* Author: Christoffer Dall <c.dall@virtualopensystems.com>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License, version 2, as
* published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
*/
#include <linux/kvm.h>
#include <linux/kvm_host.h>
#include <asm/kvm_emulate.h>
#include <asm/kvm_coproc.h>
#include <asm/kvm_mmu.h>
#include <asm/kvm_psci.h>
#include <trace/events/kvm.h>
#include "trace.h"
typedef int (*exit_handle_fn)(struct kvm_vcpu *, struct kvm_run *);
static int handle_svc_hyp(struct kvm_vcpu *vcpu, struct kvm_run *run)
{
/* SVC called from Hyp mode should never get here */
kvm_debug("SVC called from Hyp mode shouldn't go here\n");
BUG();
return -EINVAL; /* Squash warning */
}
static int handle_hvc(struct kvm_vcpu *vcpu, struct kvm_run *run)
{
int ret;
trace_kvm_hvc(*vcpu_pc(vcpu), *vcpu_reg(vcpu, 0),
kvm_vcpu_hvc_get_imm(vcpu));
ret = kvm_psci_call(vcpu);
if (ret < 0) {
kvm_inject_undefined(vcpu);
return 1;
}
return ret;
}
static int handle_smc(struct kvm_vcpu *vcpu, struct kvm_run *run)
{
kvm_inject_undefined(vcpu);
return 1;
}
static int handle_pabt_hyp(struct kvm_vcpu *vcpu, struct kvm_run *run)
{
/* The hypervisor should never cause aborts */
kvm_err("Prefetch Abort taken from Hyp mode at %#08lx (HSR: %#08x)\n",
kvm_vcpu_get_hfar(vcpu), kvm_vcpu_get_hsr(vcpu));
return -EFAULT;
}
static int handle_dabt_hyp(struct kvm_vcpu *vcpu, struct kvm_run *run)
{
/* This is either an error in the ws. code or an external abort */
kvm_err("Data Abort taken from Hyp mode at %#08lx (HSR: %#08x)\n",
kvm_vcpu_get_hfar(vcpu), kvm_vcpu_get_hsr(vcpu));
return -EFAULT;
}
/**
* kvm_handle_wfx - handle a WFI or WFE instructions trapped in guests
* @vcpu: the vcpu pointer
* @run: the kvm_run structure pointer
*
* WFE: Yield the CPU and come back to this vcpu when the scheduler
* decides to.
* WFI: Simply call kvm_vcpu_block(), which will halt execution of
* world-switches and schedule other host processes until there is an
* incoming IRQ or FIQ to the VM.
*/
static int kvm_handle_wfx(struct kvm_vcpu *vcpu, struct kvm_run *run)
{
trace_kvm_wfi(*vcpu_pc(vcpu));
if (kvm_vcpu_get_hsr(vcpu) & HSR_WFI_IS_WFE)
kvm_vcpu_on_spin(vcpu);
else
kvm_vcpu_block(vcpu);
kvm_skip_instr(vcpu, kvm_vcpu_trap_il_is32bit(vcpu));
return 1;
}
static exit_handle_fn arm_exit_handlers[] = {
[HSR_EC_WFI] = kvm_handle_wfx,
[HSR_EC_CP15_32] = kvm_handle_cp15_32,
[HSR_EC_CP15_64] = kvm_handle_cp15_64,
[HSR_EC_CP14_MR] = kvm_handle_cp14_access,
[HSR_EC_CP14_LS] = kvm_handle_cp14_load_store,
[HSR_EC_CP14_64] = kvm_handle_cp14_access,
[HSR_EC_CP_0_13] = kvm_handle_cp_0_13_access,
[HSR_EC_CP10_ID] = kvm_handle_cp10_id,
[HSR_EC_SVC_HYP] = handle_svc_hyp,
[HSR_EC_HVC] = handle_hvc,
[HSR_EC_SMC] = handle_smc,
[HSR_EC_IABT] = kvm_handle_guest_abort,
[HSR_EC_IABT_HYP] = handle_pabt_hyp,
[HSR_EC_DABT] = kvm_handle_guest_abort,
[HSR_EC_DABT_HYP] = handle_dabt_hyp,
};
static exit_handle_fn kvm_get_exit_handler(struct kvm_vcpu *vcpu)
{
u8 hsr_ec = kvm_vcpu_trap_get_class(vcpu);
if (hsr_ec >= ARRAY_SIZE(arm_exit_handlers) ||
!arm_exit_handlers[hsr_ec]) {
kvm_err("Unknown exception class: hsr: %#08x\n",
(unsigned int)kvm_vcpu_get_hsr(vcpu));
BUG();
}
return arm_exit_handlers[hsr_ec];
}
/*
* Return > 0 to return to guest, < 0 on error, 0 (and set exit_reason) on
* proper exit to userspace.
*/
int handle_exit(struct kvm_vcpu *vcpu, struct kvm_run *run,
int exception_index)
{
exit_handle_fn exit_handler;
switch (exception_index) {
case ARM_EXCEPTION_IRQ:
return 1;
case ARM_EXCEPTION_UNDEFINED:
kvm_err("Undefined exception in Hyp mode at: %#08lx\n",
kvm_vcpu_get_hyp_pc(vcpu));
BUG();
panic("KVM: Hypervisor undefined exception!\n");
case ARM_EXCEPTION_DATA_ABORT:
case ARM_EXCEPTION_PREF_ABORT:
case ARM_EXCEPTION_HVC:
/*
* See ARM ARM B1.14.1: "Hyp traps on instructions
* that fail their condition code check"
*/
if (!kvm_condition_valid(vcpu)) {
kvm_skip_instr(vcpu, kvm_vcpu_trap_il_is32bit(vcpu));
return 1;
}
exit_handler = kvm_get_exit_handler(vcpu);
return exit_handler(vcpu, run);
default:
kvm_pr_unimpl("Unsupported exception type: %d",
exception_index);
run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
return 0;
}
}

159
arch/arm/kvm/init.S Normal file
View file

@ -0,0 +1,159 @@
/*
* Copyright (C) 2012 - Virtual Open Systems and Columbia University
* Author: Christoffer Dall <c.dall@virtualopensystems.com>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License, version 2, as
* published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
*/
#include <linux/linkage.h>
#include <asm/assembler.h>
#include <asm/unified.h>
#include <asm/asm-offsets.h>
#include <asm/kvm_asm.h>
#include <asm/kvm_arm.h>
#include <asm/kvm_mmu.h>
/********************************************************************
* Hypervisor initialization
* - should be called with:
* r0 = top of Hyp stack (kernel VA)
* r1 = pointer to hyp vectors
* r2,r3 = Hypervisor pgd pointer
*
* The init scenario is:
* - We jump in HYP with four parameters: boot HYP pgd, runtime HYP pgd,
* runtime stack, runtime vectors
* - Enable the MMU with the boot pgd
* - Jump to a target into the trampoline page (remember, this is the same
* physical page!)
* - Now switch to the runtime pgd (same VA, and still the same physical
* page!)
* - Invalidate TLBs
* - Set stack and vectors
* - Profit! (or eret, if you only care about the code).
*
* As we only have four registers available to pass parameters (and we
* need six), we split the init in two phases:
* - Phase 1: r0 = 0, r1 = 0, r2,r3 contain the boot PGD.
* Provides the basic HYP init, and enable the MMU.
* - Phase 2: r0 = ToS, r1 = vectors, r2,r3 contain the runtime PGD.
* Switches to the runtime PGD, set stack and vectors.
*/
.text
.pushsection .hyp.idmap.text,"ax"
.align 5
__kvm_hyp_init:
.globl __kvm_hyp_init
@ Hyp-mode exception vector
W(b) .
W(b) .
W(b) .
W(b) .
W(b) .
W(b) __do_hyp_init
W(b) .
W(b) .
__do_hyp_init:
cmp r0, #0 @ We have a SP?
bne phase2 @ Yes, second stage init
@ Set the HTTBR to point to the hypervisor PGD pointer passed
mcrr p15, 4, rr_lo_hi(r2, r3), c2
@ Set the HTCR and VTCR to the same shareability and cacheability
@ settings as the non-secure TTBCR and with T0SZ == 0.
mrc p15, 4, r0, c2, c0, 2 @ HTCR
ldr r2, =HTCR_MASK
bic r0, r0, r2
mrc p15, 0, r1, c2, c0, 2 @ TTBCR
and r1, r1, #(HTCR_MASK & ~TTBCR_T0SZ)
orr r0, r0, r1
mcr p15, 4, r0, c2, c0, 2 @ HTCR
mrc p15, 4, r1, c2, c1, 2 @ VTCR
ldr r2, =VTCR_MASK
bic r1, r1, r2
bic r0, r0, #(~VTCR_HTCR_SH) @ clear non-reusable HTCR bits
orr r1, r0, r1
orr r1, r1, #(KVM_VTCR_SL0 | KVM_VTCR_T0SZ | KVM_VTCR_S)
mcr p15, 4, r1, c2, c1, 2 @ VTCR
@ Use the same memory attributes for hyp. accesses as the kernel
@ (copy MAIRx ro HMAIRx).
mrc p15, 0, r0, c10, c2, 0
mcr p15, 4, r0, c10, c2, 0
mrc p15, 0, r0, c10, c2, 1
mcr p15, 4, r0, c10, c2, 1
@ Invalidate the stale TLBs from Bootloader
mcr p15, 4, r0, c8, c7, 0 @ TLBIALLH
dsb ish
@ Set the HSCTLR to:
@ - ARM/THUMB exceptions: Kernel config (Thumb-2 kernel)
@ - Endianness: Kernel config
@ - Fast Interrupt Features: Kernel config
@ - Write permission implies XN: disabled
@ - Instruction cache: enabled
@ - Data/Unified cache: enabled
@ - Memory alignment checks: enabled
@ - MMU: enabled (this code must be run from an identity mapping)
mrc p15, 4, r0, c1, c0, 0 @ HSCR
ldr r2, =HSCTLR_MASK
bic r0, r0, r2
mrc p15, 0, r1, c1, c0, 0 @ SCTLR
ldr r2, =(HSCTLR_EE | HSCTLR_FI | HSCTLR_I | HSCTLR_C)
and r1, r1, r2
ARM( ldr r2, =(HSCTLR_M | HSCTLR_A) )
THUMB( ldr r2, =(HSCTLR_M | HSCTLR_A | HSCTLR_TE) )
orr r1, r1, r2
orr r0, r0, r1
isb
mcr p15, 4, r0, c1, c0, 0 @ HSCR
@ End of init phase-1
eret
phase2:
@ Set stack pointer
mov sp, r0
@ Set HVBAR to point to the HYP vectors
mcr p15, 4, r1, c12, c0, 0 @ HVBAR
@ Jump to the trampoline page
ldr r0, =TRAMPOLINE_VA
adr r1, target
bfi r0, r1, #0, #PAGE_SHIFT
ret r0
target: @ We're now in the trampoline code, switch page tables
mcrr p15, 4, rr_lo_hi(r2, r3), c2
isb
@ Invalidate the old TLBs
mcr p15, 4, r0, c8, c7, 0 @ TLBIALLH
dsb ish
eret
.ltorg
.globl __kvm_hyp_init_end
__kvm_hyp_init_end:
.popsection

515
arch/arm/kvm/interrupts.S Normal file
View file

@ -0,0 +1,515 @@
/*
* Copyright (C) 2012 - Virtual Open Systems and Columbia University
* Author: Christoffer Dall <c.dall@virtualopensystems.com>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License, version 2, as
* published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
*/
#include <linux/linkage.h>
#include <linux/const.h>
#include <asm/unified.h>
#include <asm/page.h>
#include <asm/ptrace.h>
#include <asm/asm-offsets.h>
#include <asm/kvm_asm.h>
#include <asm/kvm_arm.h>
#include <asm/vfpmacros.h>
#include "interrupts_head.S"
.text
__kvm_hyp_code_start:
.globl __kvm_hyp_code_start
/********************************************************************
* Flush per-VMID TLBs
*
* void __kvm_tlb_flush_vmid_ipa(struct kvm *kvm, phys_addr_t ipa);
*
* We rely on the hardware to broadcast the TLB invalidation to all CPUs
* inside the inner-shareable domain (which is the case for all v7
* implementations). If we come across a non-IS SMP implementation, we'll
* have to use an IPI based mechanism. Until then, we stick to the simple
* hardware assisted version.
*
* As v7 does not support flushing per IPA, just nuke the whole TLB
* instead, ignoring the ipa value.
*/
ENTRY(__kvm_tlb_flush_vmid_ipa)
push {r2, r3}
dsb ishst
add r0, r0, #KVM_VTTBR
ldrd r2, r3, [r0]
mcrr p15, 6, rr_lo_hi(r2, r3), c2 @ Write VTTBR
isb
mcr p15, 0, r0, c8, c3, 0 @ TLBIALLIS (rt ignored)
dsb ish
isb
mov r2, #0
mov r3, #0
mcrr p15, 6, r2, r3, c2 @ Back to VMID #0
isb @ Not necessary if followed by eret
pop {r2, r3}
bx lr
ENDPROC(__kvm_tlb_flush_vmid_ipa)
/********************************************************************
* Flush TLBs and instruction caches of all CPUs inside the inner-shareable
* domain, for all VMIDs
*
* void __kvm_flush_vm_context(void);
*/
ENTRY(__kvm_flush_vm_context)
mov r0, #0 @ rn parameter for c15 flushes is SBZ
/* Invalidate NS Non-Hyp TLB Inner Shareable (TLBIALLNSNHIS) */
mcr p15, 4, r0, c8, c3, 4
/* Invalidate instruction caches Inner Shareable (ICIALLUIS) */
mcr p15, 0, r0, c7, c1, 0
dsb ish
isb @ Not necessary if followed by eret
bx lr
ENDPROC(__kvm_flush_vm_context)
/********************************************************************
* Hypervisor world-switch code
*
*
* int __kvm_vcpu_run(struct kvm_vcpu *vcpu)
*/
ENTRY(__kvm_vcpu_run)
@ Save the vcpu pointer
mcr p15, 4, vcpu, c13, c0, 2 @ HTPIDR
save_host_regs
restore_vgic_state
restore_timer_state
@ Store hardware CP15 state and load guest state
read_cp15_state store_to_vcpu = 0
write_cp15_state read_from_vcpu = 1
@ If the host kernel has not been configured with VFPv3 support,
@ then it is safer if we deny guests from using it as well.
#ifdef CONFIG_VFPv3
@ Set FPEXC_EN so the guest doesn't trap floating point instructions
VFPFMRX r2, FPEXC @ VMRS
push {r2}
orr r2, r2, #FPEXC_EN
VFPFMXR FPEXC, r2 @ VMSR
#endif
@ Configure Hyp-role
configure_hyp_role vmentry
@ Trap coprocessor CRx accesses
set_hstr vmentry
set_hcptr vmentry, (HCPTR_TTA | HCPTR_TCP(10) | HCPTR_TCP(11))
set_hdcr vmentry
@ Write configured ID register into MIDR alias
ldr r1, [vcpu, #VCPU_MIDR]
mcr p15, 4, r1, c0, c0, 0
@ Write guest view of MPIDR into VMPIDR
ldr r1, [vcpu, #CP15_OFFSET(c0_MPIDR)]
mcr p15, 4, r1, c0, c0, 5
@ Set up guest memory translation
ldr r1, [vcpu, #VCPU_KVM]
add r1, r1, #KVM_VTTBR
ldrd r2, r3, [r1]
mcrr p15, 6, rr_lo_hi(r2, r3), c2 @ Write VTTBR
@ We're all done, just restore the GPRs and go to the guest
restore_guest_regs
clrex @ Clear exclusive monitor
eret
__kvm_vcpu_return:
/*
* return convention:
* guest r0, r1, r2 saved on the stack
* r0: vcpu pointer
* r1: exception code
*/
save_guest_regs
@ Set VMID == 0
mov r2, #0
mov r3, #0
mcrr p15, 6, r2, r3, c2 @ Write VTTBR
@ Don't trap coprocessor accesses for host kernel
set_hstr vmexit
set_hdcr vmexit
set_hcptr vmexit, (HCPTR_TTA | HCPTR_TCP(10) | HCPTR_TCP(11))
#ifdef CONFIG_VFPv3
@ Save floating point registers we if let guest use them.
tst r2, #(HCPTR_TCP(10) | HCPTR_TCP(11))
bne after_vfp_restore
@ Switch VFP/NEON hardware state to the host's
add r7, vcpu, #VCPU_VFP_GUEST
store_vfp_state r7
add r7, vcpu, #VCPU_VFP_HOST
ldr r7, [r7]
restore_vfp_state r7
after_vfp_restore:
@ Restore FPEXC_EN which we clobbered on entry
pop {r2}
VFPFMXR FPEXC, r2
#endif
@ Reset Hyp-role
configure_hyp_role vmexit
@ Let host read hardware MIDR
mrc p15, 0, r2, c0, c0, 0
mcr p15, 4, r2, c0, c0, 0
@ Back to hardware MPIDR
mrc p15, 0, r2, c0, c0, 5
mcr p15, 4, r2, c0, c0, 5
@ Store guest CP15 state and restore host state
read_cp15_state store_to_vcpu = 1
write_cp15_state read_from_vcpu = 0
save_timer_state
save_vgic_state
restore_host_regs
clrex @ Clear exclusive monitor
#ifndef CONFIG_CPU_ENDIAN_BE8
mov r0, r1 @ Return the return code
mov r1, #0 @ Clear upper bits in return value
#else
@ r1 already has return code
mov r0, #0 @ Clear upper bits in return value
#endif /* CONFIG_CPU_ENDIAN_BE8 */
bx lr @ return to IOCTL
/********************************************************************
* Call function in Hyp mode
*
*
* u64 kvm_call_hyp(void *hypfn, ...);
*
* This is not really a variadic function in the classic C-way and care must
* be taken when calling this to ensure parameters are passed in registers
* only, since the stack will change between the caller and the callee.
*
* Call the function with the first argument containing a pointer to the
* function you wish to call in Hyp mode, and subsequent arguments will be
* passed as r0, r1, and r2 (a maximum of 3 arguments in addition to the
* function pointer can be passed). The function being called must be mapped
* in Hyp mode (see init_hyp_mode in arch/arm/kvm/arm.c). Return values are
* passed in r0 and r1.
*
* A function pointer with a value of 0xffffffff has a special meaning,
* and is used to implement __hyp_get_vectors in the same way as in
* arch/arm/kernel/hyp_stub.S.
*
* The calling convention follows the standard AAPCS:
* r0 - r3: caller save
* r12: caller save
* rest: callee save
*/
ENTRY(kvm_call_hyp)
hvc #0
bx lr
/********************************************************************
* Hypervisor exception vector and handlers
*
*
* The KVM/ARM Hypervisor ABI is defined as follows:
*
* Entry to Hyp mode from the host kernel will happen _only_ when an HVC
* instruction is issued since all traps are disabled when running the host
* kernel as per the Hyp-mode initialization at boot time.
*
* HVC instructions cause a trap to the vector page + offset 0x14 (see hyp_hvc
* below) when the HVC instruction is called from SVC mode (i.e. a guest or the
* host kernel) and they cause a trap to the vector page + offset 0x8 when HVC
* instructions are called from within Hyp-mode.
*
* Hyp-ABI: Calling HYP-mode functions from host (in SVC mode):
* Switching to Hyp mode is done through a simple HVC #0 instruction. The
* exception vector code will check that the HVC comes from VMID==0 and if
* so will push the necessary state (SPSR, lr_usr) on the Hyp stack.
* - r0 contains a pointer to a HYP function
* - r1, r2, and r3 contain arguments to the above function.
* - The HYP function will be called with its arguments in r0, r1 and r2.
* On HYP function return, we return directly to SVC.
*
* Note that the above is used to execute code in Hyp-mode from a host-kernel
* point of view, and is a different concept from performing a world-switch and
* executing guest code SVC mode (with a VMID != 0).
*/
/* Handle undef, svc, pabt, or dabt by crashing with a user notice */
.macro bad_exception exception_code, panic_str
push {r0-r2}
mrrc p15, 6, r0, r1, c2 @ Read VTTBR
lsr r1, r1, #16
ands r1, r1, #0xff
beq 99f
load_vcpu @ Load VCPU pointer
.if \exception_code == ARM_EXCEPTION_DATA_ABORT
mrc p15, 4, r2, c5, c2, 0 @ HSR
mrc p15, 4, r1, c6, c0, 0 @ HDFAR
str r2, [vcpu, #VCPU_HSR]
str r1, [vcpu, #VCPU_HxFAR]
.endif
.if \exception_code == ARM_EXCEPTION_PREF_ABORT
mrc p15, 4, r2, c5, c2, 0 @ HSR
mrc p15, 4, r1, c6, c0, 2 @ HIFAR
str r2, [vcpu, #VCPU_HSR]
str r1, [vcpu, #VCPU_HxFAR]
.endif
mov r1, #\exception_code
b __kvm_vcpu_return
@ We were in the host already. Let's craft a panic-ing return to SVC.
99: mrs r2, cpsr
bic r2, r2, #MODE_MASK
orr r2, r2, #SVC_MODE
THUMB( orr r2, r2, #PSR_T_BIT )
msr spsr_cxsf, r2
mrs r1, ELR_hyp
ldr r2, =BSYM(panic)
msr ELR_hyp, r2
ldr r0, =\panic_str
clrex @ Clear exclusive monitor
eret
.endm
.text
.align 5
__kvm_hyp_vector:
.globl __kvm_hyp_vector
@ Hyp-mode exception vector
W(b) hyp_reset
W(b) hyp_undef
W(b) hyp_svc
W(b) hyp_pabt
W(b) hyp_dabt
W(b) hyp_hvc
W(b) hyp_irq
W(b) hyp_fiq
.align
hyp_reset:
b hyp_reset
.align
hyp_undef:
bad_exception ARM_EXCEPTION_UNDEFINED, und_die_str
.align
hyp_svc:
bad_exception ARM_EXCEPTION_HVC, svc_die_str
.align
hyp_pabt:
bad_exception ARM_EXCEPTION_PREF_ABORT, pabt_die_str
.align
hyp_dabt:
bad_exception ARM_EXCEPTION_DATA_ABORT, dabt_die_str
.align
hyp_hvc:
/*
* Getting here is either becuase of a trap from a guest or from calling
* HVC from the host kernel, which means "switch to Hyp mode".
*/
push {r0, r1, r2}
@ Check syndrome register
mrc p15, 4, r1, c5, c2, 0 @ HSR
lsr r0, r1, #HSR_EC_SHIFT
#ifdef CONFIG_VFPv3
cmp r0, #HSR_EC_CP_0_13
beq switch_to_guest_vfp
#endif
cmp r0, #HSR_EC_HVC
bne guest_trap @ Not HVC instr.
/*
* Let's check if the HVC came from VMID 0 and allow simple
* switch to Hyp mode
*/
mrrc p15, 6, r0, r2, c2
lsr r2, r2, #16
and r2, r2, #0xff
cmp r2, #0
bne guest_trap @ Guest called HVC
host_switch_to_hyp:
pop {r0, r1, r2}
/* Check for __hyp_get_vectors */
cmp r0, #-1
mrceq p15, 4, r0, c12, c0, 0 @ get HVBAR
beq 1f
push {lr}
mrs lr, SPSR
push {lr}
mov lr, r0
mov r0, r1
mov r1, r2
mov r2, r3
THUMB( orr lr, #1)
blx lr @ Call the HYP function
pop {lr}
msr SPSR_csxf, lr
pop {lr}
1: eret
guest_trap:
load_vcpu @ Load VCPU pointer to r0
str r1, [vcpu, #VCPU_HSR]
@ Check if we need the fault information
lsr r1, r1, #HSR_EC_SHIFT
cmp r1, #HSR_EC_IABT
mrceq p15, 4, r2, c6, c0, 2 @ HIFAR
beq 2f
cmp r1, #HSR_EC_DABT
bne 1f
mrc p15, 4, r2, c6, c0, 0 @ HDFAR
2: str r2, [vcpu, #VCPU_HxFAR]
/*
* B3.13.5 Reporting exceptions taken to the Non-secure PL2 mode:
*
* Abort on the stage 2 translation for a memory access from a
* Non-secure PL1 or PL0 mode:
*
* For any Access flag fault or Translation fault, and also for any
* Permission fault on the stage 2 translation of a memory access
* made as part of a translation table walk for a stage 1 translation,
* the HPFAR holds the IPA that caused the fault. Otherwise, the HPFAR
* is UNKNOWN.
*/
/* Check for permission fault, and S1PTW */
mrc p15, 4, r1, c5, c2, 0 @ HSR
and r0, r1, #HSR_FSC_TYPE
cmp r0, #FSC_PERM
tsteq r1, #(1 << 7) @ S1PTW
mrcne p15, 4, r2, c6, c0, 4 @ HPFAR
bne 3f
/* Preserve PAR */
mrrc p15, 0, r0, r1, c7 @ PAR
push {r0, r1}
/* Resolve IPA using the xFAR */
mcr p15, 0, r2, c7, c8, 0 @ ATS1CPR
isb
mrrc p15, 0, r0, r1, c7 @ PAR
tst r0, #1
bne 4f @ Failed translation
ubfx r2, r0, #12, #20
lsl r2, r2, #4
orr r2, r2, r1, lsl #24
/* Restore PAR */
pop {r0, r1}
mcrr p15, 0, r0, r1, c7 @ PAR
3: load_vcpu @ Load VCPU pointer to r0
str r2, [r0, #VCPU_HPFAR]
1: mov r1, #ARM_EXCEPTION_HVC
b __kvm_vcpu_return
4: pop {r0, r1} @ Failed translation, return to guest
mcrr p15, 0, r0, r1, c7 @ PAR
clrex
pop {r0, r1, r2}
eret
/*
* If VFPv3 support is not available, then we will not switch the VFP
* registers; however cp10 and cp11 accesses will still trap and fallback
* to the regular coprocessor emulation code, which currently will
* inject an undefined exception to the guest.
*/
#ifdef CONFIG_VFPv3
switch_to_guest_vfp:
load_vcpu @ Load VCPU pointer to r0
push {r3-r7}
@ NEON/VFP used. Turn on VFP access.
set_hcptr vmexit, (HCPTR_TCP(10) | HCPTR_TCP(11))
@ Switch VFP/NEON hardware state to the guest's
add r7, r0, #VCPU_VFP_HOST
ldr r7, [r7]
store_vfp_state r7
add r7, r0, #VCPU_VFP_GUEST
restore_vfp_state r7
pop {r3-r7}
pop {r0-r2}
clrex
eret
#endif
.align
hyp_irq:
push {r0, r1, r2}
mov r1, #ARM_EXCEPTION_IRQ
load_vcpu @ Load VCPU pointer to r0
b __kvm_vcpu_return
.align
hyp_fiq:
b hyp_fiq
.ltorg
__kvm_hyp_code_end:
.globl __kvm_hyp_code_end
.section ".rodata"
und_die_str:
.ascii "unexpected undefined exception in Hyp mode at: %#08x\n"
pabt_die_str:
.ascii "unexpected prefetch abort in Hyp mode at: %#08x\n"
dabt_die_str:
.ascii "unexpected data abort in Hyp mode at: %#08x\n"
svc_die_str:
.ascii "unexpected HVC/SVC trap in Hyp mode at: %#08x\n"

View file

@ -0,0 +1,641 @@
#include <linux/irqchip/arm-gic.h>
#include <asm/assembler.h>
#define VCPU_USR_REG(_reg_nr) (VCPU_USR_REGS + (_reg_nr * 4))
#define VCPU_USR_SP (VCPU_USR_REG(13))
#define VCPU_USR_LR (VCPU_USR_REG(14))
#define CP15_OFFSET(_cp15_reg_idx) (VCPU_CP15 + (_cp15_reg_idx * 4))
/*
* Many of these macros need to access the VCPU structure, which is always
* held in r0. These macros should never clobber r1, as it is used to hold the
* exception code on the return path (except of course the macro that switches
* all the registers before the final jump to the VM).
*/
vcpu .req r0 @ vcpu pointer always in r0
/* Clobbers {r2-r6} */
.macro store_vfp_state vfp_base
@ The VFPFMRX and VFPFMXR macros are the VMRS and VMSR instructions
VFPFMRX r2, FPEXC
@ Make sure VFP is enabled so we can touch the registers.
orr r6, r2, #FPEXC_EN
VFPFMXR FPEXC, r6
VFPFMRX r3, FPSCR
tst r2, #FPEXC_EX @ Check for VFP Subarchitecture
beq 1f
@ If FPEXC_EX is 0, then FPINST/FPINST2 reads are upredictable, so
@ we only need to save them if FPEXC_EX is set.
VFPFMRX r4, FPINST
tst r2, #FPEXC_FP2V
VFPFMRX r5, FPINST2, ne @ vmrsne
bic r6, r2, #FPEXC_EX @ FPEXC_EX disable
VFPFMXR FPEXC, r6
1:
VFPFSTMIA \vfp_base, r6 @ Save VFP registers
stm \vfp_base, {r2-r5} @ Save FPEXC, FPSCR, FPINST, FPINST2
.endm
/* Assume FPEXC_EN is on and FPEXC_EX is off, clobbers {r2-r6} */
.macro restore_vfp_state vfp_base
VFPFLDMIA \vfp_base, r6 @ Load VFP registers
ldm \vfp_base, {r2-r5} @ Load FPEXC, FPSCR, FPINST, FPINST2
VFPFMXR FPSCR, r3
tst r2, #FPEXC_EX @ Check for VFP Subarchitecture
beq 1f
VFPFMXR FPINST, r4
tst r2, #FPEXC_FP2V
VFPFMXR FPINST2, r5, ne
1:
VFPFMXR FPEXC, r2 @ FPEXC (last, in case !EN)
.endm
/* These are simply for the macros to work - value don't have meaning */
.equ usr, 0
.equ svc, 1
.equ abt, 2
.equ und, 3
.equ irq, 4
.equ fiq, 5
.macro push_host_regs_mode mode
mrs r2, SP_\mode
mrs r3, LR_\mode
mrs r4, SPSR_\mode
push {r2, r3, r4}
.endm
/*
* Store all host persistent registers on the stack.
* Clobbers all registers, in all modes, except r0 and r1.
*/
.macro save_host_regs
/* Hyp regs. Only ELR_hyp (SPSR_hyp already saved) */
mrs r2, ELR_hyp
push {r2}
/* usr regs */
push {r4-r12} @ r0-r3 are always clobbered
mrs r2, SP_usr
mov r3, lr
push {r2, r3}
push_host_regs_mode svc
push_host_regs_mode abt
push_host_regs_mode und
push_host_regs_mode irq
/* fiq regs */
mrs r2, r8_fiq
mrs r3, r9_fiq
mrs r4, r10_fiq
mrs r5, r11_fiq
mrs r6, r12_fiq
mrs r7, SP_fiq
mrs r8, LR_fiq
mrs r9, SPSR_fiq
push {r2-r9}
.endm
.macro pop_host_regs_mode mode
pop {r2, r3, r4}
msr SP_\mode, r2
msr LR_\mode, r3
msr SPSR_\mode, r4
.endm
/*
* Restore all host registers from the stack.
* Clobbers all registers, in all modes, except r0 and r1.
*/
.macro restore_host_regs
pop {r2-r9}
msr r8_fiq, r2
msr r9_fiq, r3
msr r10_fiq, r4
msr r11_fiq, r5
msr r12_fiq, r6
msr SP_fiq, r7
msr LR_fiq, r8
msr SPSR_fiq, r9
pop_host_regs_mode irq
pop_host_regs_mode und
pop_host_regs_mode abt
pop_host_regs_mode svc
pop {r2, r3}
msr SP_usr, r2
mov lr, r3
pop {r4-r12}
pop {r2}
msr ELR_hyp, r2
.endm
/*
* Restore SP, LR and SPSR for a given mode. offset is the offset of
* this mode's registers from the VCPU base.
*
* Assumes vcpu pointer in vcpu reg
*
* Clobbers r1, r2, r3, r4.
*/
.macro restore_guest_regs_mode mode, offset
add r1, vcpu, \offset
ldm r1, {r2, r3, r4}
msr SP_\mode, r2
msr LR_\mode, r3
msr SPSR_\mode, r4
.endm
/*
* Restore all guest registers from the vcpu struct.
*
* Assumes vcpu pointer in vcpu reg
*
* Clobbers *all* registers.
*/
.macro restore_guest_regs
restore_guest_regs_mode svc, #VCPU_SVC_REGS
restore_guest_regs_mode abt, #VCPU_ABT_REGS
restore_guest_regs_mode und, #VCPU_UND_REGS
restore_guest_regs_mode irq, #VCPU_IRQ_REGS
add r1, vcpu, #VCPU_FIQ_REGS
ldm r1, {r2-r9}
msr r8_fiq, r2
msr r9_fiq, r3
msr r10_fiq, r4
msr r11_fiq, r5
msr r12_fiq, r6
msr SP_fiq, r7
msr LR_fiq, r8
msr SPSR_fiq, r9
@ Load return state
ldr r2, [vcpu, #VCPU_PC]
ldr r3, [vcpu, #VCPU_CPSR]
msr ELR_hyp, r2
msr SPSR_cxsf, r3
@ Load user registers
ldr r2, [vcpu, #VCPU_USR_SP]
ldr r3, [vcpu, #VCPU_USR_LR]
msr SP_usr, r2
mov lr, r3
add vcpu, vcpu, #(VCPU_USR_REGS)
ldm vcpu, {r0-r12}
.endm
/*
* Save SP, LR and SPSR for a given mode. offset is the offset of
* this mode's registers from the VCPU base.
*
* Assumes vcpu pointer in vcpu reg
*
* Clobbers r2, r3, r4, r5.
*/
.macro save_guest_regs_mode mode, offset
add r2, vcpu, \offset
mrs r3, SP_\mode
mrs r4, LR_\mode
mrs r5, SPSR_\mode
stm r2, {r3, r4, r5}
.endm
/*
* Save all guest registers to the vcpu struct
* Expects guest's r0, r1, r2 on the stack.
*
* Assumes vcpu pointer in vcpu reg
*
* Clobbers r2, r3, r4, r5.
*/
.macro save_guest_regs
@ Store usr registers
add r2, vcpu, #VCPU_USR_REG(3)
stm r2, {r3-r12}
add r2, vcpu, #VCPU_USR_REG(0)
pop {r3, r4, r5} @ r0, r1, r2
stm r2, {r3, r4, r5}
mrs r2, SP_usr
mov r3, lr
str r2, [vcpu, #VCPU_USR_SP]
str r3, [vcpu, #VCPU_USR_LR]
@ Store return state
mrs r2, ELR_hyp
mrs r3, spsr
str r2, [vcpu, #VCPU_PC]
str r3, [vcpu, #VCPU_CPSR]
@ Store other guest registers
save_guest_regs_mode svc, #VCPU_SVC_REGS
save_guest_regs_mode abt, #VCPU_ABT_REGS
save_guest_regs_mode und, #VCPU_UND_REGS
save_guest_regs_mode irq, #VCPU_IRQ_REGS
.endm
/* Reads cp15 registers from hardware and stores them in memory
* @store_to_vcpu: If 0, registers are written in-order to the stack,
* otherwise to the VCPU struct pointed to by vcpup
*
* Assumes vcpu pointer in vcpu reg
*
* Clobbers r2 - r12
*/
.macro read_cp15_state store_to_vcpu
mrc p15, 0, r2, c1, c0, 0 @ SCTLR
mrc p15, 0, r3, c1, c0, 2 @ CPACR
mrc p15, 0, r4, c2, c0, 2 @ TTBCR
mrc p15, 0, r5, c3, c0, 0 @ DACR
mrrc p15, 0, r6, r7, c2 @ TTBR 0
mrrc p15, 1, r8, r9, c2 @ TTBR 1
mrc p15, 0, r10, c10, c2, 0 @ PRRR
mrc p15, 0, r11, c10, c2, 1 @ NMRR
mrc p15, 2, r12, c0, c0, 0 @ CSSELR
.if \store_to_vcpu == 0
push {r2-r12} @ Push CP15 registers
.else
str r2, [vcpu, #CP15_OFFSET(c1_SCTLR)]
str r3, [vcpu, #CP15_OFFSET(c1_CPACR)]
str r4, [vcpu, #CP15_OFFSET(c2_TTBCR)]
str r5, [vcpu, #CP15_OFFSET(c3_DACR)]
add r2, vcpu, #CP15_OFFSET(c2_TTBR0)
strd r6, r7, [r2]
add r2, vcpu, #CP15_OFFSET(c2_TTBR1)
strd r8, r9, [r2]
str r10, [vcpu, #CP15_OFFSET(c10_PRRR)]
str r11, [vcpu, #CP15_OFFSET(c10_NMRR)]
str r12, [vcpu, #CP15_OFFSET(c0_CSSELR)]
.endif
mrc p15, 0, r2, c13, c0, 1 @ CID
mrc p15, 0, r3, c13, c0, 2 @ TID_URW
mrc p15, 0, r4, c13, c0, 3 @ TID_URO
mrc p15, 0, r5, c13, c0, 4 @ TID_PRIV
mrc p15, 0, r6, c5, c0, 0 @ DFSR
mrc p15, 0, r7, c5, c0, 1 @ IFSR
mrc p15, 0, r8, c5, c1, 0 @ ADFSR
mrc p15, 0, r9, c5, c1, 1 @ AIFSR
mrc p15, 0, r10, c6, c0, 0 @ DFAR
mrc p15, 0, r11, c6, c0, 2 @ IFAR
mrc p15, 0, r12, c12, c0, 0 @ VBAR
.if \store_to_vcpu == 0
push {r2-r12} @ Push CP15 registers
.else
str r2, [vcpu, #CP15_OFFSET(c13_CID)]
str r3, [vcpu, #CP15_OFFSET(c13_TID_URW)]
str r4, [vcpu, #CP15_OFFSET(c13_TID_URO)]
str r5, [vcpu, #CP15_OFFSET(c13_TID_PRIV)]
str r6, [vcpu, #CP15_OFFSET(c5_DFSR)]
str r7, [vcpu, #CP15_OFFSET(c5_IFSR)]
str r8, [vcpu, #CP15_OFFSET(c5_ADFSR)]
str r9, [vcpu, #CP15_OFFSET(c5_AIFSR)]
str r10, [vcpu, #CP15_OFFSET(c6_DFAR)]
str r11, [vcpu, #CP15_OFFSET(c6_IFAR)]
str r12, [vcpu, #CP15_OFFSET(c12_VBAR)]
.endif
mrc p15, 0, r2, c14, c1, 0 @ CNTKCTL
mrrc p15, 0, r4, r5, c7 @ PAR
mrc p15, 0, r6, c10, c3, 0 @ AMAIR0
mrc p15, 0, r7, c10, c3, 1 @ AMAIR1
.if \store_to_vcpu == 0
push {r2,r4-r7}
.else
str r2, [vcpu, #CP15_OFFSET(c14_CNTKCTL)]
add r12, vcpu, #CP15_OFFSET(c7_PAR)
strd r4, r5, [r12]
str r6, [vcpu, #CP15_OFFSET(c10_AMAIR0)]
str r7, [vcpu, #CP15_OFFSET(c10_AMAIR1)]
.endif
.endm
/*
* Reads cp15 registers from memory and writes them to hardware
* @read_from_vcpu: If 0, registers are read in-order from the stack,
* otherwise from the VCPU struct pointed to by vcpup
*
* Assumes vcpu pointer in vcpu reg
*/
.macro write_cp15_state read_from_vcpu
.if \read_from_vcpu == 0
pop {r2,r4-r7}
.else
ldr r2, [vcpu, #CP15_OFFSET(c14_CNTKCTL)]
add r12, vcpu, #CP15_OFFSET(c7_PAR)
ldrd r4, r5, [r12]
ldr r6, [vcpu, #CP15_OFFSET(c10_AMAIR0)]
ldr r7, [vcpu, #CP15_OFFSET(c10_AMAIR1)]
.endif
mcr p15, 0, r2, c14, c1, 0 @ CNTKCTL
mcrr p15, 0, r4, r5, c7 @ PAR
mcr p15, 0, r6, c10, c3, 0 @ AMAIR0
mcr p15, 0, r7, c10, c3, 1 @ AMAIR1
.if \read_from_vcpu == 0
pop {r2-r12}
.else
ldr r2, [vcpu, #CP15_OFFSET(c13_CID)]
ldr r3, [vcpu, #CP15_OFFSET(c13_TID_URW)]
ldr r4, [vcpu, #CP15_OFFSET(c13_TID_URO)]
ldr r5, [vcpu, #CP15_OFFSET(c13_TID_PRIV)]
ldr r6, [vcpu, #CP15_OFFSET(c5_DFSR)]
ldr r7, [vcpu, #CP15_OFFSET(c5_IFSR)]
ldr r8, [vcpu, #CP15_OFFSET(c5_ADFSR)]
ldr r9, [vcpu, #CP15_OFFSET(c5_AIFSR)]
ldr r10, [vcpu, #CP15_OFFSET(c6_DFAR)]
ldr r11, [vcpu, #CP15_OFFSET(c6_IFAR)]
ldr r12, [vcpu, #CP15_OFFSET(c12_VBAR)]
.endif
mcr p15, 0, r2, c13, c0, 1 @ CID
mcr p15, 0, r3, c13, c0, 2 @ TID_URW
mcr p15, 0, r4, c13, c0, 3 @ TID_URO
mcr p15, 0, r5, c13, c0, 4 @ TID_PRIV
mcr p15, 0, r6, c5, c0, 0 @ DFSR
mcr p15, 0, r7, c5, c0, 1 @ IFSR
mcr p15, 0, r8, c5, c1, 0 @ ADFSR
mcr p15, 0, r9, c5, c1, 1 @ AIFSR
mcr p15, 0, r10, c6, c0, 0 @ DFAR
mcr p15, 0, r11, c6, c0, 2 @ IFAR
mcr p15, 0, r12, c12, c0, 0 @ VBAR
.if \read_from_vcpu == 0
pop {r2-r12}
.else
ldr r2, [vcpu, #CP15_OFFSET(c1_SCTLR)]
ldr r3, [vcpu, #CP15_OFFSET(c1_CPACR)]
ldr r4, [vcpu, #CP15_OFFSET(c2_TTBCR)]
ldr r5, [vcpu, #CP15_OFFSET(c3_DACR)]
add r12, vcpu, #CP15_OFFSET(c2_TTBR0)
ldrd r6, r7, [r12]
add r12, vcpu, #CP15_OFFSET(c2_TTBR1)
ldrd r8, r9, [r12]
ldr r10, [vcpu, #CP15_OFFSET(c10_PRRR)]
ldr r11, [vcpu, #CP15_OFFSET(c10_NMRR)]
ldr r12, [vcpu, #CP15_OFFSET(c0_CSSELR)]
.endif
mcr p15, 0, r2, c1, c0, 0 @ SCTLR
mcr p15, 0, r3, c1, c0, 2 @ CPACR
mcr p15, 0, r4, c2, c0, 2 @ TTBCR
mcr p15, 0, r5, c3, c0, 0 @ DACR
mcrr p15, 0, r6, r7, c2 @ TTBR 0
mcrr p15, 1, r8, r9, c2 @ TTBR 1
mcr p15, 0, r10, c10, c2, 0 @ PRRR
mcr p15, 0, r11, c10, c2, 1 @ NMRR
mcr p15, 2, r12, c0, c0, 0 @ CSSELR
.endm
/*
* Save the VGIC CPU state into memory
*
* Assumes vcpu pointer in vcpu reg
*/
.macro save_vgic_state
#ifdef CONFIG_KVM_ARM_VGIC
/* Get VGIC VCTRL base into r2 */
ldr r2, [vcpu, #VCPU_KVM]
ldr r2, [r2, #KVM_VGIC_VCTRL]
cmp r2, #0
beq 2f
/* Compute the address of struct vgic_cpu */
add r11, vcpu, #VCPU_VGIC_CPU
/* Save all interesting registers */
ldr r3, [r2, #GICH_HCR]
ldr r4, [r2, #GICH_VMCR]
ldr r5, [r2, #GICH_MISR]
ldr r6, [r2, #GICH_EISR0]
ldr r7, [r2, #GICH_EISR1]
ldr r8, [r2, #GICH_ELRSR0]
ldr r9, [r2, #GICH_ELRSR1]
ldr r10, [r2, #GICH_APR]
ARM_BE8(rev r3, r3 )
ARM_BE8(rev r4, r4 )
ARM_BE8(rev r5, r5 )
ARM_BE8(rev r6, r6 )
ARM_BE8(rev r7, r7 )
ARM_BE8(rev r8, r8 )
ARM_BE8(rev r9, r9 )
ARM_BE8(rev r10, r10 )
str r3, [r11, #VGIC_V2_CPU_HCR]
str r4, [r11, #VGIC_V2_CPU_VMCR]
str r5, [r11, #VGIC_V2_CPU_MISR]
#ifdef CONFIG_CPU_ENDIAN_BE8
str r6, [r11, #(VGIC_V2_CPU_EISR + 4)]
str r7, [r11, #VGIC_V2_CPU_EISR]
str r8, [r11, #(VGIC_V2_CPU_ELRSR + 4)]
str r9, [r11, #VGIC_V2_CPU_ELRSR]
#else
str r6, [r11, #VGIC_V2_CPU_EISR]
str r7, [r11, #(VGIC_V2_CPU_EISR + 4)]
str r8, [r11, #VGIC_V2_CPU_ELRSR]
str r9, [r11, #(VGIC_V2_CPU_ELRSR + 4)]
#endif
str r10, [r11, #VGIC_V2_CPU_APR]
/* Clear GICH_HCR */
mov r5, #0
str r5, [r2, #GICH_HCR]
/* Save list registers */
add r2, r2, #GICH_LR0
add r3, r11, #VGIC_V2_CPU_LR
ldr r4, [r11, #VGIC_CPU_NR_LR]
1: ldr r6, [r2], #4
ARM_BE8(rev r6, r6 )
str r6, [r3], #4
subs r4, r4, #1
bne 1b
2:
#endif
.endm
/*
* Restore the VGIC CPU state from memory
*
* Assumes vcpu pointer in vcpu reg
*/
.macro restore_vgic_state
#ifdef CONFIG_KVM_ARM_VGIC
/* Get VGIC VCTRL base into r2 */
ldr r2, [vcpu, #VCPU_KVM]
ldr r2, [r2, #KVM_VGIC_VCTRL]
cmp r2, #0
beq 2f
/* Compute the address of struct vgic_cpu */
add r11, vcpu, #VCPU_VGIC_CPU
/* We only restore a minimal set of registers */
ldr r3, [r11, #VGIC_V2_CPU_HCR]
ldr r4, [r11, #VGIC_V2_CPU_VMCR]
ldr r8, [r11, #VGIC_V2_CPU_APR]
ARM_BE8(rev r3, r3 )
ARM_BE8(rev r4, r4 )
ARM_BE8(rev r8, r8 )
str r3, [r2, #GICH_HCR]
str r4, [r2, #GICH_VMCR]
str r8, [r2, #GICH_APR]
/* Restore list registers */
add r2, r2, #GICH_LR0
add r3, r11, #VGIC_V2_CPU_LR
ldr r4, [r11, #VGIC_CPU_NR_LR]
1: ldr r6, [r3], #4
ARM_BE8(rev r6, r6 )
str r6, [r2], #4
subs r4, r4, #1
bne 1b
2:
#endif
.endm
#define CNTHCTL_PL1PCTEN (1 << 0)
#define CNTHCTL_PL1PCEN (1 << 1)
/*
* Save the timer state onto the VCPU and allow physical timer/counter access
* for the host.
*
* Assumes vcpu pointer in vcpu reg
* Clobbers r2-r5
*/
.macro save_timer_state
#ifdef CONFIG_KVM_ARM_TIMER
ldr r4, [vcpu, #VCPU_KVM]
ldr r2, [r4, #KVM_TIMER_ENABLED]
cmp r2, #0
beq 1f
mrc p15, 0, r2, c14, c3, 1 @ CNTV_CTL
str r2, [vcpu, #VCPU_TIMER_CNTV_CTL]
bic r2, #1 @ Clear ENABLE
mcr p15, 0, r2, c14, c3, 1 @ CNTV_CTL
isb
mrrc p15, 3, rr_lo_hi(r2, r3), c14 @ CNTV_CVAL
ldr r4, =VCPU_TIMER_CNTV_CVAL
add r5, vcpu, r4
strd r2, r3, [r5]
@ Ensure host CNTVCT == CNTPCT
mov r2, #0
mcrr p15, 4, r2, r2, c14 @ CNTVOFF
1:
#endif
@ Allow physical timer/counter access for the host
mrc p15, 4, r2, c14, c1, 0 @ CNTHCTL
orr r2, r2, #(CNTHCTL_PL1PCEN | CNTHCTL_PL1PCTEN)
mcr p15, 4, r2, c14, c1, 0 @ CNTHCTL
.endm
/*
* Load the timer state from the VCPU and deny physical timer/counter access
* for the host.
*
* Assumes vcpu pointer in vcpu reg
* Clobbers r2-r5
*/
.macro restore_timer_state
@ Disallow physical timer access for the guest
@ Physical counter access is allowed
mrc p15, 4, r2, c14, c1, 0 @ CNTHCTL
orr r2, r2, #CNTHCTL_PL1PCTEN
bic r2, r2, #CNTHCTL_PL1PCEN
mcr p15, 4, r2, c14, c1, 0 @ CNTHCTL
#ifdef CONFIG_KVM_ARM_TIMER
ldr r4, [vcpu, #VCPU_KVM]
ldr r2, [r4, #KVM_TIMER_ENABLED]
cmp r2, #0
beq 1f
ldr r2, [r4, #KVM_TIMER_CNTVOFF]
ldr r3, [r4, #(KVM_TIMER_CNTVOFF + 4)]
mcrr p15, 4, rr_lo_hi(r2, r3), c14 @ CNTVOFF
ldr r4, =VCPU_TIMER_CNTV_CVAL
add r5, vcpu, r4
ldrd r2, r3, [r5]
mcrr p15, 3, rr_lo_hi(r2, r3), c14 @ CNTV_CVAL
isb
ldr r2, [vcpu, #VCPU_TIMER_CNTV_CTL]
and r2, r2, #3
mcr p15, 0, r2, c14, c3, 1 @ CNTV_CTL
1:
#endif
.endm
.equ vmentry, 0
.equ vmexit, 1
/* Configures the HSTR (Hyp System Trap Register) on entry/return
* (hardware reset value is 0) */
.macro set_hstr operation
mrc p15, 4, r2, c1, c1, 3
ldr r3, =HSTR_T(15)
.if \operation == vmentry
orr r2, r2, r3 @ Trap CR{15}
.else
bic r2, r2, r3 @ Don't trap any CRx accesses
.endif
mcr p15, 4, r2, c1, c1, 3
.endm
/* Configures the HCPTR (Hyp Coprocessor Trap Register) on entry/return
* (hardware reset value is 0). Keep previous value in r2. */
.macro set_hcptr operation, mask
mrc p15, 4, r2, c1, c1, 2
ldr r3, =\mask
.if \operation == vmentry
orr r3, r2, r3 @ Trap coproc-accesses defined in mask
.else
bic r3, r2, r3 @ Don't trap defined coproc-accesses
.endif
mcr p15, 4, r3, c1, c1, 2
.endm
/* Configures the HDCR (Hyp Debug Configuration Register) on entry/return
* (hardware reset value is 0) */
.macro set_hdcr operation
mrc p15, 4, r2, c1, c1, 1
ldr r3, =(HDCR_TPM|HDCR_TPMCR)
.if \operation == vmentry
orr r2, r2, r3 @ Trap some perfmon accesses
.else
bic r2, r2, r3 @ Don't trap any perfmon accesses
.endif
mcr p15, 4, r2, c1, c1, 1
.endm
/* Enable/Disable: stage-2 trans., trap interrupts, trap wfi, trap smc */
.macro configure_hyp_role operation
.if \operation == vmentry
ldr r2, [vcpu, #VCPU_HCR]
ldr r3, [vcpu, #VCPU_IRQ_LINES]
orr r2, r2, r3
.else
mov r2, #0
.endif
mcr p15, 4, r2, c1, c1, 0 @ HCR
.endm
.macro load_vcpu
mrc p15, 4, vcpu, c13, c0, 2 @ HTPIDR
.endm

205
arch/arm/kvm/mmio.c Normal file
View file

@ -0,0 +1,205 @@
/*
* Copyright (C) 2012 - Virtual Open Systems and Columbia University
* Author: Christoffer Dall <c.dall@virtualopensystems.com>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License, version 2, as
* published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
*/
#include <linux/kvm_host.h>
#include <asm/kvm_mmio.h>
#include <asm/kvm_emulate.h>
#include <trace/events/kvm.h>
#include "trace.h"
static void mmio_write_buf(char *buf, unsigned int len, unsigned long data)
{
void *datap = NULL;
union {
u8 byte;
u16 hword;
u32 word;
u64 dword;
} tmp;
switch (len) {
case 1:
tmp.byte = data;
datap = &tmp.byte;
break;
case 2:
tmp.hword = data;
datap = &tmp.hword;
break;
case 4:
tmp.word = data;
datap = &tmp.word;
break;
case 8:
tmp.dword = data;
datap = &tmp.dword;
break;
}
memcpy(buf, datap, len);
}
static unsigned long mmio_read_buf(char *buf, unsigned int len)
{
unsigned long data = 0;
union {
u16 hword;
u32 word;
u64 dword;
} tmp;
switch (len) {
case 1:
data = buf[0];
break;
case 2:
memcpy(&tmp.hword, buf, len);
data = tmp.hword;
break;
case 4:
memcpy(&tmp.word, buf, len);
data = tmp.word;
break;
case 8:
memcpy(&tmp.dword, buf, len);
data = tmp.dword;
break;
}
return data;
}
/**
* kvm_handle_mmio_return -- Handle MMIO loads after user space emulation
* @vcpu: The VCPU pointer
* @run: The VCPU run struct containing the mmio data
*
* This should only be called after returning from userspace for MMIO load
* emulation.
*/
int kvm_handle_mmio_return(struct kvm_vcpu *vcpu, struct kvm_run *run)
{
unsigned long data;
unsigned int len;
int mask;
if (!run->mmio.is_write) {
len = run->mmio.len;
if (len > sizeof(unsigned long))
return -EINVAL;
data = mmio_read_buf(run->mmio.data, len);
if (vcpu->arch.mmio_decode.sign_extend &&
len < sizeof(unsigned long)) {
mask = 1U << ((len * 8) - 1);
data = (data ^ mask) - mask;
}
trace_kvm_mmio(KVM_TRACE_MMIO_READ, len, run->mmio.phys_addr,
data);
data = vcpu_data_host_to_guest(vcpu, data, len);
*vcpu_reg(vcpu, vcpu->arch.mmio_decode.rt) = data;
}
return 0;
}
static int decode_hsr(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
struct kvm_exit_mmio *mmio)
{
unsigned long rt;
int len;
bool is_write, sign_extend;
if (kvm_vcpu_dabt_isextabt(vcpu)) {
/* cache operation on I/O addr, tell guest unsupported */
kvm_inject_dabt(vcpu, kvm_vcpu_get_hfar(vcpu));
return 1;
}
if (kvm_vcpu_dabt_iss1tw(vcpu)) {
/* page table accesses IO mem: tell guest to fix its TTBR */
kvm_inject_dabt(vcpu, kvm_vcpu_get_hfar(vcpu));
return 1;
}
len = kvm_vcpu_dabt_get_as(vcpu);
if (unlikely(len < 0))
return len;
is_write = kvm_vcpu_dabt_iswrite(vcpu);
sign_extend = kvm_vcpu_dabt_issext(vcpu);
rt = kvm_vcpu_dabt_get_rd(vcpu);
mmio->is_write = is_write;
mmio->phys_addr = fault_ipa;
mmio->len = len;
vcpu->arch.mmio_decode.sign_extend = sign_extend;
vcpu->arch.mmio_decode.rt = rt;
/*
* The MMIO instruction is emulated and should not be re-executed
* in the guest.
*/
kvm_skip_instr(vcpu, kvm_vcpu_trap_il_is32bit(vcpu));
return 0;
}
int io_mem_abort(struct kvm_vcpu *vcpu, struct kvm_run *run,
phys_addr_t fault_ipa)
{
struct kvm_exit_mmio mmio;
unsigned long data;
unsigned long rt;
int ret;
/*
* Prepare MMIO operation. First stash it in a private
* structure that we can use for in-kernel emulation. If the
* kernel can't handle it, copy it into run->mmio and let user
* space do its magic.
*/
if (kvm_vcpu_dabt_isvalid(vcpu)) {
ret = decode_hsr(vcpu, fault_ipa, &mmio);
if (ret)
return ret;
} else {
kvm_err("load/store instruction decoding not implemented\n");
return -ENOSYS;
}
rt = vcpu->arch.mmio_decode.rt;
data = vcpu_data_guest_to_host(vcpu, *vcpu_reg(vcpu, rt), mmio.len);
trace_kvm_mmio((mmio.is_write) ? KVM_TRACE_MMIO_WRITE :
KVM_TRACE_MMIO_READ_UNSATISFIED,
mmio.len, fault_ipa,
(mmio.is_write) ? data : 0);
if (mmio.is_write)
mmio_write_buf(mmio.data, mmio.len, data);
if (vgic_handle_mmio(vcpu, run, &mmio))
return 1;
kvm_prepare_mmio(run, &mmio);
return 0;
}

1500
arch/arm/kvm/mmu.c Normal file

File diff suppressed because it is too large Load diff

68
arch/arm/kvm/perf.c Normal file
View file

@ -0,0 +1,68 @@
/*
* Based on the x86 implementation.
*
* Copyright (C) 2012 ARM Ltd.
* Author: Marc Zyngier <marc.zyngier@arm.com>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#include <linux/perf_event.h>
#include <linux/kvm_host.h>
#include <asm/kvm_emulate.h>
static int kvm_is_in_guest(void)
{
return kvm_arm_get_running_vcpu() != NULL;
}
static int kvm_is_user_mode(void)
{
struct kvm_vcpu *vcpu;
vcpu = kvm_arm_get_running_vcpu();
if (vcpu)
return !vcpu_mode_priv(vcpu);
return 0;
}
static unsigned long kvm_get_guest_ip(void)
{
struct kvm_vcpu *vcpu;
vcpu = kvm_arm_get_running_vcpu();
if (vcpu)
return *vcpu_pc(vcpu);
return 0;
}
static struct perf_guest_info_callbacks kvm_guest_cbs = {
.is_in_guest = kvm_is_in_guest,
.is_user_mode = kvm_is_user_mode,
.get_guest_ip = kvm_get_guest_ip,
};
int kvm_perf_init(void)
{
return perf_register_guest_info_callbacks(&kvm_guest_cbs);
}
int kvm_perf_teardown(void)
{
return perf_unregister_guest_info_callbacks(&kvm_guest_cbs);
}

337
arch/arm/kvm/psci.c Normal file
View file

@ -0,0 +1,337 @@
/*
* Copyright (C) 2012 - ARM Ltd
* Author: Marc Zyngier <marc.zyngier@arm.com>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#include <linux/preempt.h>
#include <linux/kvm_host.h>
#include <linux/wait.h>
#include <asm/cputype.h>
#include <asm/kvm_emulate.h>
#include <asm/kvm_psci.h>
/*
* This is an implementation of the Power State Coordination Interface
* as described in ARM document number ARM DEN 0022A.
*/
#define AFFINITY_MASK(level) ~((0x1UL << ((level) * MPIDR_LEVEL_BITS)) - 1)
static unsigned long psci_affinity_mask(unsigned long affinity_level)
{
if (affinity_level <= 3)
return MPIDR_HWID_BITMASK & AFFINITY_MASK(affinity_level);
return 0;
}
static unsigned long kvm_psci_vcpu_suspend(struct kvm_vcpu *vcpu)
{
/*
* NOTE: For simplicity, we make VCPU suspend emulation to be
* same-as WFI (Wait-for-interrupt) emulation.
*
* This means for KVM the wakeup events are interrupts and
* this is consistent with intended use of StateID as described
* in section 5.4.1 of PSCI v0.2 specification (ARM DEN 0022A).
*
* Further, we also treat power-down request to be same as
* stand-by request as-per section 5.4.2 clause 3 of PSCI v0.2
* specification (ARM DEN 0022A). This means all suspend states
* for KVM will preserve the register state.
*/
kvm_vcpu_block(vcpu);
return PSCI_RET_SUCCESS;
}
static void kvm_psci_vcpu_off(struct kvm_vcpu *vcpu)
{
vcpu->arch.pause = true;
}
static unsigned long kvm_psci_vcpu_on(struct kvm_vcpu *source_vcpu)
{
struct kvm *kvm = source_vcpu->kvm;
struct kvm_vcpu *vcpu = NULL, *tmp;
wait_queue_head_t *wq;
unsigned long cpu_id;
unsigned long context_id;
unsigned long mpidr;
phys_addr_t target_pc;
int i;
cpu_id = *vcpu_reg(source_vcpu, 1);
if (vcpu_mode_is_32bit(source_vcpu))
cpu_id &= ~((u32) 0);
kvm_for_each_vcpu(i, tmp, kvm) {
mpidr = kvm_vcpu_get_mpidr(tmp);
if ((mpidr & MPIDR_HWID_BITMASK) == (cpu_id & MPIDR_HWID_BITMASK)) {
vcpu = tmp;
break;
}
}
/*
* Make sure the caller requested a valid CPU and that the CPU is
* turned off.
*/
if (!vcpu)
return PSCI_RET_INVALID_PARAMS;
if (!vcpu->arch.pause) {
if (kvm_psci_version(source_vcpu) != KVM_ARM_PSCI_0_1)
return PSCI_RET_ALREADY_ON;
else
return PSCI_RET_INVALID_PARAMS;
}
target_pc = *vcpu_reg(source_vcpu, 2);
context_id = *vcpu_reg(source_vcpu, 3);
kvm_reset_vcpu(vcpu);
/* Gracefully handle Thumb2 entry point */
if (vcpu_mode_is_32bit(vcpu) && (target_pc & 1)) {
target_pc &= ~((phys_addr_t) 1);
vcpu_set_thumb(vcpu);
}
/* Propagate caller endianness */
if (kvm_vcpu_is_be(source_vcpu))
kvm_vcpu_set_be(vcpu);
*vcpu_pc(vcpu) = target_pc;
/*
* NOTE: We always update r0 (or x0) because for PSCI v0.1
* the general puspose registers are undefined upon CPU_ON.
*/
*vcpu_reg(vcpu, 0) = context_id;
vcpu->arch.pause = false;
smp_mb(); /* Make sure the above is visible */
wq = kvm_arch_vcpu_wq(vcpu);
wake_up_interruptible(wq);
return PSCI_RET_SUCCESS;
}
static unsigned long kvm_psci_vcpu_affinity_info(struct kvm_vcpu *vcpu)
{
int i;
unsigned long mpidr;
unsigned long target_affinity;
unsigned long target_affinity_mask;
unsigned long lowest_affinity_level;
struct kvm *kvm = vcpu->kvm;
struct kvm_vcpu *tmp;
target_affinity = *vcpu_reg(vcpu, 1);
lowest_affinity_level = *vcpu_reg(vcpu, 2);
/* Determine target affinity mask */
target_affinity_mask = psci_affinity_mask(lowest_affinity_level);
if (!target_affinity_mask)
return PSCI_RET_INVALID_PARAMS;
/* Ignore other bits of target affinity */
target_affinity &= target_affinity_mask;
/*
* If one or more VCPU matching target affinity are running
* then ON else OFF
*/
kvm_for_each_vcpu(i, tmp, kvm) {
mpidr = kvm_vcpu_get_mpidr(tmp);
if (((mpidr & target_affinity_mask) == target_affinity) &&
!tmp->arch.pause) {
return PSCI_0_2_AFFINITY_LEVEL_ON;
}
}
return PSCI_0_2_AFFINITY_LEVEL_OFF;
}
static void kvm_prepare_system_event(struct kvm_vcpu *vcpu, u32 type)
{
int i;
struct kvm_vcpu *tmp;
/*
* The KVM ABI specifies that a system event exit may call KVM_RUN
* again and may perform shutdown/reboot at a later time that when the
* actual request is made. Since we are implementing PSCI and a
* caller of PSCI reboot and shutdown expects that the system shuts
* down or reboots immediately, let's make sure that VCPUs are not run
* after this call is handled and before the VCPUs have been
* re-initialized.
*/
kvm_for_each_vcpu(i, tmp, vcpu->kvm) {
tmp->arch.pause = true;
kvm_vcpu_kick(tmp);
}
memset(&vcpu->run->system_event, 0, sizeof(vcpu->run->system_event));
vcpu->run->system_event.type = type;
vcpu->run->exit_reason = KVM_EXIT_SYSTEM_EVENT;
}
static void kvm_psci_system_off(struct kvm_vcpu *vcpu)
{
kvm_prepare_system_event(vcpu, KVM_SYSTEM_EVENT_SHUTDOWN);
}
static void kvm_psci_system_reset(struct kvm_vcpu *vcpu)
{
kvm_prepare_system_event(vcpu, KVM_SYSTEM_EVENT_RESET);
}
int kvm_psci_version(struct kvm_vcpu *vcpu)
{
if (test_bit(KVM_ARM_VCPU_PSCI_0_2, vcpu->arch.features))
return KVM_ARM_PSCI_0_2;
return KVM_ARM_PSCI_0_1;
}
static int kvm_psci_0_2_call(struct kvm_vcpu *vcpu)
{
int ret = 1;
unsigned long psci_fn = *vcpu_reg(vcpu, 0) & ~((u32) 0);
unsigned long val;
switch (psci_fn) {
case PSCI_0_2_FN_PSCI_VERSION:
/*
* Bits[31:16] = Major Version = 0
* Bits[15:0] = Minor Version = 2
*/
val = 2;
break;
case PSCI_0_2_FN_CPU_SUSPEND:
case PSCI_0_2_FN64_CPU_SUSPEND:
val = kvm_psci_vcpu_suspend(vcpu);
break;
case PSCI_0_2_FN_CPU_OFF:
kvm_psci_vcpu_off(vcpu);
val = PSCI_RET_SUCCESS;
break;
case PSCI_0_2_FN_CPU_ON:
case PSCI_0_2_FN64_CPU_ON:
val = kvm_psci_vcpu_on(vcpu);
break;
case PSCI_0_2_FN_AFFINITY_INFO:
case PSCI_0_2_FN64_AFFINITY_INFO:
val = kvm_psci_vcpu_affinity_info(vcpu);
break;
case PSCI_0_2_FN_MIGRATE:
case PSCI_0_2_FN64_MIGRATE:
val = PSCI_RET_NOT_SUPPORTED;
break;
case PSCI_0_2_FN_MIGRATE_INFO_TYPE:
/*
* Trusted OS is MP hence does not require migration
* or
* Trusted OS is not present
*/
val = PSCI_0_2_TOS_MP;
break;
case PSCI_0_2_FN_MIGRATE_INFO_UP_CPU:
case PSCI_0_2_FN64_MIGRATE_INFO_UP_CPU:
val = PSCI_RET_NOT_SUPPORTED;
break;
case PSCI_0_2_FN_SYSTEM_OFF:
kvm_psci_system_off(vcpu);
/*
* We should'nt be going back to guest VCPU after
* receiving SYSTEM_OFF request.
*
* If user space accidently/deliberately resumes
* guest VCPU after SYSTEM_OFF request then guest
* VCPU should see internal failure from PSCI return
* value. To achieve this, we preload r0 (or x0) with
* PSCI return value INTERNAL_FAILURE.
*/
val = PSCI_RET_INTERNAL_FAILURE;
ret = 0;
break;
case PSCI_0_2_FN_SYSTEM_RESET:
kvm_psci_system_reset(vcpu);
/*
* Same reason as SYSTEM_OFF for preloading r0 (or x0)
* with PSCI return value INTERNAL_FAILURE.
*/
val = PSCI_RET_INTERNAL_FAILURE;
ret = 0;
break;
default:
return -EINVAL;
}
*vcpu_reg(vcpu, 0) = val;
return ret;
}
static int kvm_psci_0_1_call(struct kvm_vcpu *vcpu)
{
unsigned long psci_fn = *vcpu_reg(vcpu, 0) & ~((u32) 0);
unsigned long val;
switch (psci_fn) {
case KVM_PSCI_FN_CPU_OFF:
kvm_psci_vcpu_off(vcpu);
val = PSCI_RET_SUCCESS;
break;
case KVM_PSCI_FN_CPU_ON:
val = kvm_psci_vcpu_on(vcpu);
break;
case KVM_PSCI_FN_CPU_SUSPEND:
case KVM_PSCI_FN_MIGRATE:
val = PSCI_RET_NOT_SUPPORTED;
break;
default:
return -EINVAL;
}
*vcpu_reg(vcpu, 0) = val;
return 1;
}
/**
* kvm_psci_call - handle PSCI call if r0 value is in range
* @vcpu: Pointer to the VCPU struct
*
* Handle PSCI calls from guests through traps from HVC instructions.
* The calling convention is similar to SMC calls to the secure world
* where the function number is placed in r0.
*
* This function returns: > 0 (success), 0 (success but exit to user
* space), and < 0 (errors)
*
* Errors:
* -EINVAL: Unrecognized PSCI function
*/
int kvm_psci_call(struct kvm_vcpu *vcpu)
{
switch (kvm_psci_version(vcpu)) {
case KVM_ARM_PSCI_0_2:
return kvm_psci_0_2_call(vcpu);
case KVM_ARM_PSCI_0_1:
return kvm_psci_0_1_call(vcpu);
default:
return -EINVAL;
};
}

83
arch/arm/kvm/reset.c Normal file
View file

@ -0,0 +1,83 @@
/*
* Copyright (C) 2012 - Virtual Open Systems and Columbia University
* Author: Christoffer Dall <c.dall@virtualopensystems.com>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License, version 2, as
* published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
*/
#include <linux/compiler.h>
#include <linux/errno.h>
#include <linux/sched.h>
#include <linux/kvm_host.h>
#include <linux/kvm.h>
#include <asm/unified.h>
#include <asm/ptrace.h>
#include <asm/cputype.h>
#include <asm/kvm_arm.h>
#include <asm/kvm_coproc.h>
#include <kvm/arm_arch_timer.h>
/******************************************************************************
* Cortex-A15 and Cortex-A7 Reset Values
*/
static struct kvm_regs cortexa_regs_reset = {
.usr_regs.ARM_cpsr = SVC_MODE | PSR_A_BIT | PSR_I_BIT | PSR_F_BIT,
};
static const struct kvm_irq_level cortexa_vtimer_irq = {
{ .irq = 27 },
.level = 1,
};
/*******************************************************************************
* Exported reset function
*/
/**
* kvm_reset_vcpu - sets core registers and cp15 registers to reset value
* @vcpu: The VCPU pointer
*
* This function finds the right table above and sets the registers on the
* virtual CPU struct to their architectually defined reset values.
*/
int kvm_reset_vcpu(struct kvm_vcpu *vcpu)
{
struct kvm_regs *reset_regs;
const struct kvm_irq_level *cpu_vtimer_irq;
switch (vcpu->arch.target) {
case KVM_ARM_TARGET_CORTEX_A7:
case KVM_ARM_TARGET_CORTEX_A15:
reset_regs = &cortexa_regs_reset;
vcpu->arch.midr = read_cpuid_id();
cpu_vtimer_irq = &cortexa_vtimer_irq;
break;
default:
return -ENODEV;
}
/* Reset core registers */
memcpy(&vcpu->arch.regs, reset_regs, sizeof(vcpu->arch.regs));
/* Reset CP15 registers */
kvm_reset_coprocs(vcpu);
/* Reset arch_timer context */
kvm_timer_vcpu_reset(vcpu, cpu_vtimer_irq);
return 0;
}

234
arch/arm/kvm/trace.h Normal file
View file

@ -0,0 +1,234 @@
#if !defined(_TRACE_KVM_H) || defined(TRACE_HEADER_MULTI_READ)
#define _TRACE_KVM_H
#include <linux/tracepoint.h>
#undef TRACE_SYSTEM
#define TRACE_SYSTEM kvm
/*
* Tracepoints for entry/exit to guest
*/
TRACE_EVENT(kvm_entry,
TP_PROTO(unsigned long vcpu_pc),
TP_ARGS(vcpu_pc),
TP_STRUCT__entry(
__field( unsigned long, vcpu_pc )
),
TP_fast_assign(
__entry->vcpu_pc = vcpu_pc;
),
TP_printk("PC: 0x%08lx", __entry->vcpu_pc)
);
TRACE_EVENT(kvm_exit,
TP_PROTO(unsigned long vcpu_pc),
TP_ARGS(vcpu_pc),
TP_STRUCT__entry(
__field( unsigned long, vcpu_pc )
),
TP_fast_assign(
__entry->vcpu_pc = vcpu_pc;
),
TP_printk("PC: 0x%08lx", __entry->vcpu_pc)
);
TRACE_EVENT(kvm_guest_fault,
TP_PROTO(unsigned long vcpu_pc, unsigned long hsr,
unsigned long hxfar,
unsigned long long ipa),
TP_ARGS(vcpu_pc, hsr, hxfar, ipa),
TP_STRUCT__entry(
__field( unsigned long, vcpu_pc )
__field( unsigned long, hsr )
__field( unsigned long, hxfar )
__field( unsigned long long, ipa )
),
TP_fast_assign(
__entry->vcpu_pc = vcpu_pc;
__entry->hsr = hsr;
__entry->hxfar = hxfar;
__entry->ipa = ipa;
),
TP_printk("ipa %#llx, hsr %#08lx, hxfar %#08lx, pc %#08lx",
__entry->ipa, __entry->hsr,
__entry->hxfar, __entry->vcpu_pc)
);
TRACE_EVENT(kvm_irq_line,
TP_PROTO(unsigned int type, int vcpu_idx, int irq_num, int level),
TP_ARGS(type, vcpu_idx, irq_num, level),
TP_STRUCT__entry(
__field( unsigned int, type )
__field( int, vcpu_idx )
__field( int, irq_num )
__field( int, level )
),
TP_fast_assign(
__entry->type = type;
__entry->vcpu_idx = vcpu_idx;
__entry->irq_num = irq_num;
__entry->level = level;
),
TP_printk("Inject %s interrupt (%d), vcpu->idx: %d, num: %d, level: %d",
(__entry->type == KVM_ARM_IRQ_TYPE_CPU) ? "CPU" :
(__entry->type == KVM_ARM_IRQ_TYPE_PPI) ? "VGIC PPI" :
(__entry->type == KVM_ARM_IRQ_TYPE_SPI) ? "VGIC SPI" : "UNKNOWN",
__entry->type, __entry->vcpu_idx, __entry->irq_num, __entry->level)
);
TRACE_EVENT(kvm_mmio_emulate,
TP_PROTO(unsigned long vcpu_pc, unsigned long instr,
unsigned long cpsr),
TP_ARGS(vcpu_pc, instr, cpsr),
TP_STRUCT__entry(
__field( unsigned long, vcpu_pc )
__field( unsigned long, instr )
__field( unsigned long, cpsr )
),
TP_fast_assign(
__entry->vcpu_pc = vcpu_pc;
__entry->instr = instr;
__entry->cpsr = cpsr;
),
TP_printk("Emulate MMIO at: 0x%08lx (instr: %08lx, cpsr: %08lx)",
__entry->vcpu_pc, __entry->instr, __entry->cpsr)
);
/* Architecturally implementation defined CP15 register access */
TRACE_EVENT(kvm_emulate_cp15_imp,
TP_PROTO(unsigned long Op1, unsigned long Rt1, unsigned long CRn,
unsigned long CRm, unsigned long Op2, bool is_write),
TP_ARGS(Op1, Rt1, CRn, CRm, Op2, is_write),
TP_STRUCT__entry(
__field( unsigned int, Op1 )
__field( unsigned int, Rt1 )
__field( unsigned int, CRn )
__field( unsigned int, CRm )
__field( unsigned int, Op2 )
__field( bool, is_write )
),
TP_fast_assign(
__entry->is_write = is_write;
__entry->Op1 = Op1;
__entry->Rt1 = Rt1;
__entry->CRn = CRn;
__entry->CRm = CRm;
__entry->Op2 = Op2;
),
TP_printk("Implementation defined CP15: %s\tp15, %u, r%u, c%u, c%u, %u",
(__entry->is_write) ? "mcr" : "mrc",
__entry->Op1, __entry->Rt1, __entry->CRn,
__entry->CRm, __entry->Op2)
);
TRACE_EVENT(kvm_wfi,
TP_PROTO(unsigned long vcpu_pc),
TP_ARGS(vcpu_pc),
TP_STRUCT__entry(
__field( unsigned long, vcpu_pc )
),
TP_fast_assign(
__entry->vcpu_pc = vcpu_pc;
),
TP_printk("guest executed wfi at: 0x%08lx", __entry->vcpu_pc)
);
TRACE_EVENT(kvm_unmap_hva,
TP_PROTO(unsigned long hva),
TP_ARGS(hva),
TP_STRUCT__entry(
__field( unsigned long, hva )
),
TP_fast_assign(
__entry->hva = hva;
),
TP_printk("mmu notifier unmap hva: %#08lx", __entry->hva)
);
TRACE_EVENT(kvm_unmap_hva_range,
TP_PROTO(unsigned long start, unsigned long end),
TP_ARGS(start, end),
TP_STRUCT__entry(
__field( unsigned long, start )
__field( unsigned long, end )
),
TP_fast_assign(
__entry->start = start;
__entry->end = end;
),
TP_printk("mmu notifier unmap range: %#08lx -- %#08lx",
__entry->start, __entry->end)
);
TRACE_EVENT(kvm_set_spte_hva,
TP_PROTO(unsigned long hva),
TP_ARGS(hva),
TP_STRUCT__entry(
__field( unsigned long, hva )
),
TP_fast_assign(
__entry->hva = hva;
),
TP_printk("mmu notifier set pte hva: %#08lx", __entry->hva)
);
TRACE_EVENT(kvm_hvc,
TP_PROTO(unsigned long vcpu_pc, unsigned long r0, unsigned long imm),
TP_ARGS(vcpu_pc, r0, imm),
TP_STRUCT__entry(
__field( unsigned long, vcpu_pc )
__field( unsigned long, r0 )
__field( unsigned long, imm )
),
TP_fast_assign(
__entry->vcpu_pc = vcpu_pc;
__entry->r0 = r0;
__entry->imm = imm;
),
TP_printk("HVC at 0x%08lx (r0: 0x%08lx, imm: 0x%lx",
__entry->vcpu_pc, __entry->r0, __entry->imm)
);
#endif /* _TRACE_KVM_H */
#undef TRACE_INCLUDE_PATH
#define TRACE_INCLUDE_PATH arch/arm/kvm
#undef TRACE_INCLUDE_FILE
#define TRACE_INCLUDE_FILE trace
/* This part must be outside protection */
#include <trace/define_trace.h>