mirror of
https://github.com/AetherDroid/android_kernel_samsung_on5xelte.git
synced 2025-09-08 01:08:03 -04:00
Fixed MTP to work with TWRP
This commit is contained in:
commit
f6dfaef42e
50820 changed files with 20846062 additions and 0 deletions
31
arch/mips/kvm/00README.txt
Normal file
31
arch/mips/kvm/00README.txt
Normal file
|
@ -0,0 +1,31 @@
|
|||
KVM/MIPS Trap & Emulate Release Notes
|
||||
=====================================
|
||||
|
||||
(1) KVM/MIPS should support MIPS32R2 and beyond. It has been tested on the following platforms:
|
||||
Malta Board with FPGA based 34K
|
||||
Sigma Designs TangoX board with a 24K based 8654 SoC.
|
||||
Malta Board with 74K @ 1GHz
|
||||
|
||||
(2) Both Guest kernel and Guest Userspace execute in UM.
|
||||
Guest User address space: 0x00000000 -> 0x40000000
|
||||
Guest Kernel Unmapped: 0x40000000 -> 0x60000000
|
||||
Guest Kernel Mapped: 0x60000000 -> 0x80000000
|
||||
|
||||
Guest Usermode virtual memory is limited to 1GB.
|
||||
|
||||
(2) 16K Page Sizes: Both Host Kernel and Guest Kernel should have the same page size, currently at least 16K.
|
||||
Note that due to cache aliasing issues, 4K page sizes are NOT supported.
|
||||
|
||||
(3) No HugeTLB Support
|
||||
Both the host kernel and Guest kernel should have the page size set to 16K.
|
||||
This will be implemented in a future release.
|
||||
|
||||
(4) KVM/MIPS does not have support for SMP Guests
|
||||
Linux-3.7-rc2 based SMP guest hangs due to the following code sequence in the generated TLB handlers:
|
||||
LL/TLBP/SC. Since the TLBP instruction causes a trap the reservation gets cleared
|
||||
when we ERET back to the guest. This causes the guest to hang in an infinite loop.
|
||||
This will be fixed in a future release.
|
||||
|
||||
(5) Use Host FPU
|
||||
Currently KVM/MIPS emulates a 24K CPU without a FPU.
|
||||
This will be fixed in a future release
|
48
arch/mips/kvm/Kconfig
Normal file
48
arch/mips/kvm/Kconfig
Normal file
|
@ -0,0 +1,48 @@
|
|||
#
|
||||
# KVM configuration
|
||||
#
|
||||
source "virt/kvm/Kconfig"
|
||||
|
||||
menuconfig VIRTUALIZATION
|
||||
bool "Virtualization"
|
||||
---help---
|
||||
Say Y here to get to see options for using your Linux host to run
|
||||
other operating systems inside virtual machines (guests).
|
||||
This option alone does not add any kernel code.
|
||||
|
||||
If you say N, all options in this submenu will be skipped and disabled.
|
||||
|
||||
if VIRTUALIZATION
|
||||
|
||||
config KVM
|
||||
tristate "Kernel-based Virtual Machine (KVM) support"
|
||||
depends on HAVE_KVM
|
||||
select PREEMPT_NOTIFIERS
|
||||
select ANON_INODES
|
||||
select KVM_MMIO
|
||||
---help---
|
||||
Support for hosting Guest kernels.
|
||||
Currently supported on MIPS32 processors.
|
||||
|
||||
config KVM_MIPS_DYN_TRANS
|
||||
bool "KVM/MIPS: Dynamic binary translation to reduce traps"
|
||||
depends on KVM
|
||||
---help---
|
||||
When running in Trap & Emulate mode patch privileged
|
||||
instructions to reduce the number of traps.
|
||||
|
||||
If unsure, say Y.
|
||||
|
||||
config KVM_MIPS_DEBUG_COP0_COUNTERS
|
||||
bool "Maintain counters for COP0 accesses"
|
||||
depends on KVM
|
||||
---help---
|
||||
Maintain statistics for Guest COP0 accesses.
|
||||
A histogram of COP0 accesses is printed when the VM is
|
||||
shutdown.
|
||||
|
||||
If unsure, say N.
|
||||
|
||||
source drivers/vhost/Kconfig
|
||||
|
||||
endif # VIRTUALIZATION
|
13
arch/mips/kvm/Makefile
Normal file
13
arch/mips/kvm/Makefile
Normal file
|
@ -0,0 +1,13 @@
|
|||
# Makefile for KVM support for MIPS
|
||||
#
|
||||
|
||||
common-objs = $(addprefix ../../../virt/kvm/, kvm_main.o coalesced_mmio.o)
|
||||
|
||||
EXTRA_CFLAGS += -Ivirt/kvm -Iarch/mips/kvm
|
||||
|
||||
kvm-objs := $(common-objs) mips.o emulate.o locore.o \
|
||||
interrupt.o stats.o commpage.o \
|
||||
dyntrans.o trap_emul.o
|
||||
|
||||
obj-$(CONFIG_KVM) += kvm.o
|
||||
obj-y += callback.o tlb.o
|
14
arch/mips/kvm/callback.c
Normal file
14
arch/mips/kvm/callback.c
Normal file
|
@ -0,0 +1,14 @@
|
|||
/*
|
||||
* This file is subject to the terms and conditions of the GNU General Public
|
||||
* License. See the file "COPYING" in the main directory of this archive
|
||||
* for more details.
|
||||
*
|
||||
* Copyright (C) 2012 MIPS Technologies, Inc. All rights reserved.
|
||||
* Authors: Yann Le Du <ledu@kymasys.com>
|
||||
*/
|
||||
|
||||
#include <linux/export.h>
|
||||
#include <linux/kvm_host.h>
|
||||
|
||||
struct kvm_mips_callbacks *kvm_mips_callbacks;
|
||||
EXPORT_SYMBOL(kvm_mips_callbacks);
|
33
arch/mips/kvm/commpage.c
Normal file
33
arch/mips/kvm/commpage.c
Normal file
|
@ -0,0 +1,33 @@
|
|||
/*
|
||||
* This file is subject to the terms and conditions of the GNU General Public
|
||||
* License. See the file "COPYING" in the main directory of this archive
|
||||
* for more details.
|
||||
*
|
||||
* commpage, currently used for Virtual COP0 registers.
|
||||
* Mapped into the guest kernel @ 0x0.
|
||||
*
|
||||
* Copyright (C) 2012 MIPS Technologies, Inc. All rights reserved.
|
||||
* Authors: Sanjay Lal <sanjayl@kymasys.com>
|
||||
*/
|
||||
|
||||
#include <linux/errno.h>
|
||||
#include <linux/err.h>
|
||||
#include <linux/module.h>
|
||||
#include <linux/vmalloc.h>
|
||||
#include <linux/fs.h>
|
||||
#include <linux/bootmem.h>
|
||||
#include <asm/page.h>
|
||||
#include <asm/cacheflush.h>
|
||||
#include <asm/mmu_context.h>
|
||||
|
||||
#include <linux/kvm_host.h>
|
||||
|
||||
#include "commpage.h"
|
||||
|
||||
void kvm_mips_commpage_init(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
struct kvm_mips_commpage *page = vcpu->arch.kseg0_commpage;
|
||||
|
||||
/* Specific init values for fields */
|
||||
vcpu->arch.cop0 = &page->cop0;
|
||||
}
|
24
arch/mips/kvm/commpage.h
Normal file
24
arch/mips/kvm/commpage.h
Normal file
|
@ -0,0 +1,24 @@
|
|||
/*
|
||||
* This file is subject to the terms and conditions of the GNU General Public
|
||||
* License. See the file "COPYING" in the main directory of this archive
|
||||
* for more details.
|
||||
*
|
||||
* KVM/MIPS: commpage: mapped into get kernel space
|
||||
*
|
||||
* Copyright (C) 2012 MIPS Technologies, Inc. All rights reserved.
|
||||
* Authors: Sanjay Lal <sanjayl@kymasys.com>
|
||||
*/
|
||||
|
||||
#ifndef __KVM_MIPS_COMMPAGE_H__
|
||||
#define __KVM_MIPS_COMMPAGE_H__
|
||||
|
||||
struct kvm_mips_commpage {
|
||||
/* COP0 state is mapped into Guest kernel via commpage */
|
||||
struct mips_coproc cop0;
|
||||
};
|
||||
|
||||
#define KVM_MIPS_COMM_EIDI_OFFSET 0x0
|
||||
|
||||
extern void kvm_mips_commpage_init(struct kvm_vcpu *vcpu);
|
||||
|
||||
#endif /* __KVM_MIPS_COMMPAGE_H__ */
|
148
arch/mips/kvm/dyntrans.c
Normal file
148
arch/mips/kvm/dyntrans.c
Normal file
|
@ -0,0 +1,148 @@
|
|||
/*
|
||||
* This file is subject to the terms and conditions of the GNU General Public
|
||||
* License. See the file "COPYING" in the main directory of this archive
|
||||
* for more details.
|
||||
*
|
||||
* KVM/MIPS: Binary Patching for privileged instructions, reduces traps.
|
||||
*
|
||||
* Copyright (C) 2012 MIPS Technologies, Inc. All rights reserved.
|
||||
* Authors: Sanjay Lal <sanjayl@kymasys.com>
|
||||
*/
|
||||
|
||||
#include <linux/errno.h>
|
||||
#include <linux/err.h>
|
||||
#include <linux/kvm_host.h>
|
||||
#include <linux/module.h>
|
||||
#include <linux/vmalloc.h>
|
||||
#include <linux/fs.h>
|
||||
#include <linux/bootmem.h>
|
||||
#include <asm/cacheflush.h>
|
||||
|
||||
#include "commpage.h"
|
||||
|
||||
#define SYNCI_TEMPLATE 0x041f0000
|
||||
#define SYNCI_BASE(x) (((x) >> 21) & 0x1f)
|
||||
#define SYNCI_OFFSET ((x) & 0xffff)
|
||||
|
||||
#define LW_TEMPLATE 0x8c000000
|
||||
#define CLEAR_TEMPLATE 0x00000020
|
||||
#define SW_TEMPLATE 0xac000000
|
||||
|
||||
int kvm_mips_trans_cache_index(uint32_t inst, uint32_t *opc,
|
||||
struct kvm_vcpu *vcpu)
|
||||
{
|
||||
int result = 0;
|
||||
unsigned long kseg0_opc;
|
||||
uint32_t synci_inst = 0x0;
|
||||
|
||||
/* Replace the CACHE instruction, with a NOP */
|
||||
kseg0_opc =
|
||||
CKSEG0ADDR(kvm_mips_translate_guest_kseg0_to_hpa
|
||||
(vcpu, (unsigned long) opc));
|
||||
memcpy((void *)kseg0_opc, (void *)&synci_inst, sizeof(uint32_t));
|
||||
local_flush_icache_range(kseg0_opc, kseg0_opc + 32);
|
||||
|
||||
return result;
|
||||
}
|
||||
|
||||
/*
|
||||
* Address based CACHE instructions are transformed into synci(s). A little
|
||||
* heavy for just D-cache invalidates, but avoids an expensive trap
|
||||
*/
|
||||
int kvm_mips_trans_cache_va(uint32_t inst, uint32_t *opc,
|
||||
struct kvm_vcpu *vcpu)
|
||||
{
|
||||
int result = 0;
|
||||
unsigned long kseg0_opc;
|
||||
uint32_t synci_inst = SYNCI_TEMPLATE, base, offset;
|
||||
|
||||
base = (inst >> 21) & 0x1f;
|
||||
offset = inst & 0xffff;
|
||||
synci_inst |= (base << 21);
|
||||
synci_inst |= offset;
|
||||
|
||||
kseg0_opc =
|
||||
CKSEG0ADDR(kvm_mips_translate_guest_kseg0_to_hpa
|
||||
(vcpu, (unsigned long) opc));
|
||||
memcpy((void *)kseg0_opc, (void *)&synci_inst, sizeof(uint32_t));
|
||||
local_flush_icache_range(kseg0_opc, kseg0_opc + 32);
|
||||
|
||||
return result;
|
||||
}
|
||||
|
||||
int kvm_mips_trans_mfc0(uint32_t inst, uint32_t *opc, struct kvm_vcpu *vcpu)
|
||||
{
|
||||
int32_t rt, rd, sel;
|
||||
uint32_t mfc0_inst;
|
||||
unsigned long kseg0_opc, flags;
|
||||
|
||||
rt = (inst >> 16) & 0x1f;
|
||||
rd = (inst >> 11) & 0x1f;
|
||||
sel = inst & 0x7;
|
||||
|
||||
if ((rd == MIPS_CP0_ERRCTL) && (sel == 0)) {
|
||||
mfc0_inst = CLEAR_TEMPLATE;
|
||||
mfc0_inst |= ((rt & 0x1f) << 16);
|
||||
} else {
|
||||
mfc0_inst = LW_TEMPLATE;
|
||||
mfc0_inst |= ((rt & 0x1f) << 16);
|
||||
mfc0_inst |=
|
||||
offsetof(struct mips_coproc,
|
||||
reg[rd][sel]) + offsetof(struct kvm_mips_commpage,
|
||||
cop0);
|
||||
}
|
||||
|
||||
if (KVM_GUEST_KSEGX(opc) == KVM_GUEST_KSEG0) {
|
||||
kseg0_opc =
|
||||
CKSEG0ADDR(kvm_mips_translate_guest_kseg0_to_hpa
|
||||
(vcpu, (unsigned long) opc));
|
||||
memcpy((void *)kseg0_opc, (void *)&mfc0_inst, sizeof(uint32_t));
|
||||
local_flush_icache_range(kseg0_opc, kseg0_opc + 32);
|
||||
} else if (KVM_GUEST_KSEGX((unsigned long) opc) == KVM_GUEST_KSEG23) {
|
||||
local_irq_save(flags);
|
||||
memcpy((void *)opc, (void *)&mfc0_inst, sizeof(uint32_t));
|
||||
local_flush_icache_range((unsigned long)opc,
|
||||
(unsigned long)opc + 32);
|
||||
local_irq_restore(flags);
|
||||
} else {
|
||||
kvm_err("%s: Invalid address: %p\n", __func__, opc);
|
||||
return -EFAULT;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
int kvm_mips_trans_mtc0(uint32_t inst, uint32_t *opc, struct kvm_vcpu *vcpu)
|
||||
{
|
||||
int32_t rt, rd, sel;
|
||||
uint32_t mtc0_inst = SW_TEMPLATE;
|
||||
unsigned long kseg0_opc, flags;
|
||||
|
||||
rt = (inst >> 16) & 0x1f;
|
||||
rd = (inst >> 11) & 0x1f;
|
||||
sel = inst & 0x7;
|
||||
|
||||
mtc0_inst |= ((rt & 0x1f) << 16);
|
||||
mtc0_inst |=
|
||||
offsetof(struct mips_coproc,
|
||||
reg[rd][sel]) + offsetof(struct kvm_mips_commpage, cop0);
|
||||
|
||||
if (KVM_GUEST_KSEGX(opc) == KVM_GUEST_KSEG0) {
|
||||
kseg0_opc =
|
||||
CKSEG0ADDR(kvm_mips_translate_guest_kseg0_to_hpa
|
||||
(vcpu, (unsigned long) opc));
|
||||
memcpy((void *)kseg0_opc, (void *)&mtc0_inst, sizeof(uint32_t));
|
||||
local_flush_icache_range(kseg0_opc, kseg0_opc + 32);
|
||||
} else if (KVM_GUEST_KSEGX((unsigned long) opc) == KVM_GUEST_KSEG23) {
|
||||
local_irq_save(flags);
|
||||
memcpy((void *)opc, (void *)&mtc0_inst, sizeof(uint32_t));
|
||||
local_flush_icache_range((unsigned long)opc,
|
||||
(unsigned long)opc + 32);
|
||||
local_irq_restore(flags);
|
||||
} else {
|
||||
kvm_err("%s: Invalid address: %p\n", __func__, opc);
|
||||
return -EFAULT;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
2320
arch/mips/kvm/emulate.c
Normal file
2320
arch/mips/kvm/emulate.c
Normal file
File diff suppressed because it is too large
Load diff
242
arch/mips/kvm/interrupt.c
Normal file
242
arch/mips/kvm/interrupt.c
Normal file
|
@ -0,0 +1,242 @@
|
|||
/*
|
||||
* This file is subject to the terms and conditions of the GNU General Public
|
||||
* License. See the file "COPYING" in the main directory of this archive
|
||||
* for more details.
|
||||
*
|
||||
* KVM/MIPS: Interrupt delivery
|
||||
*
|
||||
* Copyright (C) 2012 MIPS Technologies, Inc. All rights reserved.
|
||||
* Authors: Sanjay Lal <sanjayl@kymasys.com>
|
||||
*/
|
||||
|
||||
#include <linux/errno.h>
|
||||
#include <linux/err.h>
|
||||
#include <linux/module.h>
|
||||
#include <linux/vmalloc.h>
|
||||
#include <linux/fs.h>
|
||||
#include <linux/bootmem.h>
|
||||
#include <asm/page.h>
|
||||
#include <asm/cacheflush.h>
|
||||
|
||||
#include <linux/kvm_host.h>
|
||||
|
||||
#include "interrupt.h"
|
||||
|
||||
void kvm_mips_queue_irq(struct kvm_vcpu *vcpu, uint32_t priority)
|
||||
{
|
||||
set_bit(priority, &vcpu->arch.pending_exceptions);
|
||||
}
|
||||
|
||||
void kvm_mips_dequeue_irq(struct kvm_vcpu *vcpu, uint32_t priority)
|
||||
{
|
||||
clear_bit(priority, &vcpu->arch.pending_exceptions);
|
||||
}
|
||||
|
||||
void kvm_mips_queue_timer_int_cb(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
/*
|
||||
* Cause bits to reflect the pending timer interrupt,
|
||||
* the EXC code will be set when we are actually
|
||||
* delivering the interrupt:
|
||||
*/
|
||||
kvm_set_c0_guest_cause(vcpu->arch.cop0, (C_IRQ5 | C_TI));
|
||||
|
||||
/* Queue up an INT exception for the core */
|
||||
kvm_mips_queue_irq(vcpu, MIPS_EXC_INT_TIMER);
|
||||
|
||||
}
|
||||
|
||||
void kvm_mips_dequeue_timer_int_cb(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
kvm_clear_c0_guest_cause(vcpu->arch.cop0, (C_IRQ5 | C_TI));
|
||||
kvm_mips_dequeue_irq(vcpu, MIPS_EXC_INT_TIMER);
|
||||
}
|
||||
|
||||
void kvm_mips_queue_io_int_cb(struct kvm_vcpu *vcpu,
|
||||
struct kvm_mips_interrupt *irq)
|
||||
{
|
||||
int intr = (int)irq->irq;
|
||||
|
||||
/*
|
||||
* Cause bits to reflect the pending IO interrupt,
|
||||
* the EXC code will be set when we are actually
|
||||
* delivering the interrupt:
|
||||
*/
|
||||
switch (intr) {
|
||||
case 2:
|
||||
kvm_set_c0_guest_cause(vcpu->arch.cop0, (C_IRQ0));
|
||||
/* Queue up an INT exception for the core */
|
||||
kvm_mips_queue_irq(vcpu, MIPS_EXC_INT_IO);
|
||||
break;
|
||||
|
||||
case 3:
|
||||
kvm_set_c0_guest_cause(vcpu->arch.cop0, (C_IRQ1));
|
||||
kvm_mips_queue_irq(vcpu, MIPS_EXC_INT_IPI_1);
|
||||
break;
|
||||
|
||||
case 4:
|
||||
kvm_set_c0_guest_cause(vcpu->arch.cop0, (C_IRQ2));
|
||||
kvm_mips_queue_irq(vcpu, MIPS_EXC_INT_IPI_2);
|
||||
break;
|
||||
|
||||
default:
|
||||
break;
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
void kvm_mips_dequeue_io_int_cb(struct kvm_vcpu *vcpu,
|
||||
struct kvm_mips_interrupt *irq)
|
||||
{
|
||||
int intr = (int)irq->irq;
|
||||
|
||||
switch (intr) {
|
||||
case -2:
|
||||
kvm_clear_c0_guest_cause(vcpu->arch.cop0, (C_IRQ0));
|
||||
kvm_mips_dequeue_irq(vcpu, MIPS_EXC_INT_IO);
|
||||
break;
|
||||
|
||||
case -3:
|
||||
kvm_clear_c0_guest_cause(vcpu->arch.cop0, (C_IRQ1));
|
||||
kvm_mips_dequeue_irq(vcpu, MIPS_EXC_INT_IPI_1);
|
||||
break;
|
||||
|
||||
case -4:
|
||||
kvm_clear_c0_guest_cause(vcpu->arch.cop0, (C_IRQ2));
|
||||
kvm_mips_dequeue_irq(vcpu, MIPS_EXC_INT_IPI_2);
|
||||
break;
|
||||
|
||||
default:
|
||||
break;
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
/* Deliver the interrupt of the corresponding priority, if possible. */
|
||||
int kvm_mips_irq_deliver_cb(struct kvm_vcpu *vcpu, unsigned int priority,
|
||||
uint32_t cause)
|
||||
{
|
||||
int allowed = 0;
|
||||
uint32_t exccode;
|
||||
|
||||
struct kvm_vcpu_arch *arch = &vcpu->arch;
|
||||
struct mips_coproc *cop0 = vcpu->arch.cop0;
|
||||
|
||||
switch (priority) {
|
||||
case MIPS_EXC_INT_TIMER:
|
||||
if ((kvm_read_c0_guest_status(cop0) & ST0_IE)
|
||||
&& (!(kvm_read_c0_guest_status(cop0) & (ST0_EXL | ST0_ERL)))
|
||||
&& (kvm_read_c0_guest_status(cop0) & IE_IRQ5)) {
|
||||
allowed = 1;
|
||||
exccode = T_INT;
|
||||
}
|
||||
break;
|
||||
|
||||
case MIPS_EXC_INT_IO:
|
||||
if ((kvm_read_c0_guest_status(cop0) & ST0_IE)
|
||||
&& (!(kvm_read_c0_guest_status(cop0) & (ST0_EXL | ST0_ERL)))
|
||||
&& (kvm_read_c0_guest_status(cop0) & IE_IRQ0)) {
|
||||
allowed = 1;
|
||||
exccode = T_INT;
|
||||
}
|
||||
break;
|
||||
|
||||
case MIPS_EXC_INT_IPI_1:
|
||||
if ((kvm_read_c0_guest_status(cop0) & ST0_IE)
|
||||
&& (!(kvm_read_c0_guest_status(cop0) & (ST0_EXL | ST0_ERL)))
|
||||
&& (kvm_read_c0_guest_status(cop0) & IE_IRQ1)) {
|
||||
allowed = 1;
|
||||
exccode = T_INT;
|
||||
}
|
||||
break;
|
||||
|
||||
case MIPS_EXC_INT_IPI_2:
|
||||
if ((kvm_read_c0_guest_status(cop0) & ST0_IE)
|
||||
&& (!(kvm_read_c0_guest_status(cop0) & (ST0_EXL | ST0_ERL)))
|
||||
&& (kvm_read_c0_guest_status(cop0) & IE_IRQ2)) {
|
||||
allowed = 1;
|
||||
exccode = T_INT;
|
||||
}
|
||||
break;
|
||||
|
||||
default:
|
||||
break;
|
||||
}
|
||||
|
||||
/* Are we allowed to deliver the interrupt ??? */
|
||||
if (allowed) {
|
||||
if ((kvm_read_c0_guest_status(cop0) & ST0_EXL) == 0) {
|
||||
/* save old pc */
|
||||
kvm_write_c0_guest_epc(cop0, arch->pc);
|
||||
kvm_set_c0_guest_status(cop0, ST0_EXL);
|
||||
|
||||
if (cause & CAUSEF_BD)
|
||||
kvm_set_c0_guest_cause(cop0, CAUSEF_BD);
|
||||
else
|
||||
kvm_clear_c0_guest_cause(cop0, CAUSEF_BD);
|
||||
|
||||
kvm_debug("Delivering INT @ pc %#lx\n", arch->pc);
|
||||
|
||||
} else
|
||||
kvm_err("Trying to deliver interrupt when EXL is already set\n");
|
||||
|
||||
kvm_change_c0_guest_cause(cop0, CAUSEF_EXCCODE,
|
||||
(exccode << CAUSEB_EXCCODE));
|
||||
|
||||
/* XXXSL Set PC to the interrupt exception entry point */
|
||||
if (kvm_read_c0_guest_cause(cop0) & CAUSEF_IV)
|
||||
arch->pc = KVM_GUEST_KSEG0 + 0x200;
|
||||
else
|
||||
arch->pc = KVM_GUEST_KSEG0 + 0x180;
|
||||
|
||||
clear_bit(priority, &vcpu->arch.pending_exceptions);
|
||||
}
|
||||
|
||||
return allowed;
|
||||
}
|
||||
|
||||
int kvm_mips_irq_clear_cb(struct kvm_vcpu *vcpu, unsigned int priority,
|
||||
uint32_t cause)
|
||||
{
|
||||
return 1;
|
||||
}
|
||||
|
||||
void kvm_mips_deliver_interrupts(struct kvm_vcpu *vcpu, uint32_t cause)
|
||||
{
|
||||
unsigned long *pending = &vcpu->arch.pending_exceptions;
|
||||
unsigned long *pending_clr = &vcpu->arch.pending_exceptions_clr;
|
||||
unsigned int priority;
|
||||
|
||||
if (!(*pending) && !(*pending_clr))
|
||||
return;
|
||||
|
||||
priority = __ffs(*pending_clr);
|
||||
while (priority <= MIPS_EXC_MAX) {
|
||||
if (kvm_mips_callbacks->irq_clear(vcpu, priority, cause)) {
|
||||
if (!KVM_MIPS_IRQ_CLEAR_ALL_AT_ONCE)
|
||||
break;
|
||||
}
|
||||
|
||||
priority = find_next_bit(pending_clr,
|
||||
BITS_PER_BYTE * sizeof(*pending_clr),
|
||||
priority + 1);
|
||||
}
|
||||
|
||||
priority = __ffs(*pending);
|
||||
while (priority <= MIPS_EXC_MAX) {
|
||||
if (kvm_mips_callbacks->irq_deliver(vcpu, priority, cause)) {
|
||||
if (!KVM_MIPS_IRQ_DELIVER_ALL_AT_ONCE)
|
||||
break;
|
||||
}
|
||||
|
||||
priority = find_next_bit(pending,
|
||||
BITS_PER_BYTE * sizeof(*pending),
|
||||
priority + 1);
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
int kvm_mips_pending_timer(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
return test_bit(MIPS_EXC_INT_TIMER, &vcpu->arch.pending_exceptions);
|
||||
}
|
53
arch/mips/kvm/interrupt.h
Normal file
53
arch/mips/kvm/interrupt.h
Normal file
|
@ -0,0 +1,53 @@
|
|||
/*
|
||||
* This file is subject to the terms and conditions of the GNU General Public
|
||||
* License. See the file "COPYING" in the main directory of this archive
|
||||
* for more details.
|
||||
*
|
||||
* KVM/MIPS: Interrupts
|
||||
* Copyright (C) 2012 MIPS Technologies, Inc. All rights reserved.
|
||||
* Authors: Sanjay Lal <sanjayl@kymasys.com>
|
||||
*/
|
||||
|
||||
/*
|
||||
* MIPS Exception Priorities, exceptions (including interrupts) are queued up
|
||||
* for the guest in the order specified by their priorities
|
||||
*/
|
||||
|
||||
#define MIPS_EXC_RESET 0
|
||||
#define MIPS_EXC_SRESET 1
|
||||
#define MIPS_EXC_DEBUG_ST 2
|
||||
#define MIPS_EXC_DEBUG 3
|
||||
#define MIPS_EXC_DDB 4
|
||||
#define MIPS_EXC_NMI 5
|
||||
#define MIPS_EXC_MCHK 6
|
||||
#define MIPS_EXC_INT_TIMER 7
|
||||
#define MIPS_EXC_INT_IO 8
|
||||
#define MIPS_EXC_EXECUTE 9
|
||||
#define MIPS_EXC_INT_IPI_1 10
|
||||
#define MIPS_EXC_INT_IPI_2 11
|
||||
#define MIPS_EXC_MAX 12
|
||||
/* XXXSL More to follow */
|
||||
|
||||
extern char mips32_exception[], mips32_exceptionEnd[];
|
||||
extern char mips32_GuestException[], mips32_GuestExceptionEnd[];
|
||||
|
||||
#define C_TI (_ULCAST_(1) << 30)
|
||||
|
||||
#define KVM_MIPS_IRQ_DELIVER_ALL_AT_ONCE (0)
|
||||
#define KVM_MIPS_IRQ_CLEAR_ALL_AT_ONCE (0)
|
||||
|
||||
void kvm_mips_queue_irq(struct kvm_vcpu *vcpu, uint32_t priority);
|
||||
void kvm_mips_dequeue_irq(struct kvm_vcpu *vcpu, uint32_t priority);
|
||||
int kvm_mips_pending_timer(struct kvm_vcpu *vcpu);
|
||||
|
||||
void kvm_mips_queue_timer_int_cb(struct kvm_vcpu *vcpu);
|
||||
void kvm_mips_dequeue_timer_int_cb(struct kvm_vcpu *vcpu);
|
||||
void kvm_mips_queue_io_int_cb(struct kvm_vcpu *vcpu,
|
||||
struct kvm_mips_interrupt *irq);
|
||||
void kvm_mips_dequeue_io_int_cb(struct kvm_vcpu *vcpu,
|
||||
struct kvm_mips_interrupt *irq);
|
||||
int kvm_mips_irq_deliver_cb(struct kvm_vcpu *vcpu, unsigned int priority,
|
||||
uint32_t cause);
|
||||
int kvm_mips_irq_clear_cb(struct kvm_vcpu *vcpu, unsigned int priority,
|
||||
uint32_t cause);
|
||||
void kvm_mips_deliver_interrupts(struct kvm_vcpu *vcpu, uint32_t cause);
|
620
arch/mips/kvm/locore.S
Normal file
620
arch/mips/kvm/locore.S
Normal file
|
@ -0,0 +1,620 @@
|
|||
/*
|
||||
* This file is subject to the terms and conditions of the GNU General Public
|
||||
* License. See the file "COPYING" in the main directory of this archive
|
||||
* for more details.
|
||||
*
|
||||
* Main entry point for the guest, exception handling.
|
||||
*
|
||||
* Copyright (C) 2012 MIPS Technologies, Inc. All rights reserved.
|
||||
* Authors: Sanjay Lal <sanjayl@kymasys.com>
|
||||
*/
|
||||
|
||||
#include <asm/asm.h>
|
||||
#include <asm/asmmacro.h>
|
||||
#include <asm/regdef.h>
|
||||
#include <asm/mipsregs.h>
|
||||
#include <asm/stackframe.h>
|
||||
#include <asm/asm-offsets.h>
|
||||
|
||||
#define _C_LABEL(x) x
|
||||
#define MIPSX(name) mips32_ ## name
|
||||
#define CALLFRAME_SIZ 32
|
||||
|
||||
/*
|
||||
* VECTOR
|
||||
* exception vector entrypoint
|
||||
*/
|
||||
#define VECTOR(x, regmask) \
|
||||
.ent _C_LABEL(x),0; \
|
||||
EXPORT(x);
|
||||
|
||||
#define VECTOR_END(x) \
|
||||
EXPORT(x);
|
||||
|
||||
/* Overload, Danger Will Robinson!! */
|
||||
#define PT_HOST_ASID PT_BVADDR
|
||||
#define PT_HOST_USERLOCAL PT_EPC
|
||||
|
||||
#define CP0_DDATA_LO $28,3
|
||||
#define CP0_EBASE $15,1
|
||||
|
||||
#define CP0_INTCTL $12,1
|
||||
#define CP0_SRSCTL $12,2
|
||||
#define CP0_SRSMAP $12,3
|
||||
#define CP0_HWRENA $7,0
|
||||
|
||||
/* Resume Flags */
|
||||
#define RESUME_FLAG_HOST (1<<1) /* Resume host? */
|
||||
|
||||
#define RESUME_GUEST 0
|
||||
#define RESUME_HOST RESUME_FLAG_HOST
|
||||
|
||||
/*
|
||||
* __kvm_mips_vcpu_run: entry point to the guest
|
||||
* a0: run
|
||||
* a1: vcpu
|
||||
*/
|
||||
.set noreorder
|
||||
.set noat
|
||||
|
||||
FEXPORT(__kvm_mips_vcpu_run)
|
||||
/* k0/k1 not being used in host kernel context */
|
||||
INT_ADDIU k1, sp, -PT_SIZE
|
||||
LONG_S $0, PT_R0(k1)
|
||||
LONG_S $1, PT_R1(k1)
|
||||
LONG_S $2, PT_R2(k1)
|
||||
LONG_S $3, PT_R3(k1)
|
||||
|
||||
LONG_S $4, PT_R4(k1)
|
||||
LONG_S $5, PT_R5(k1)
|
||||
LONG_S $6, PT_R6(k1)
|
||||
LONG_S $7, PT_R7(k1)
|
||||
|
||||
LONG_S $8, PT_R8(k1)
|
||||
LONG_S $9, PT_R9(k1)
|
||||
LONG_S $10, PT_R10(k1)
|
||||
LONG_S $11, PT_R11(k1)
|
||||
LONG_S $12, PT_R12(k1)
|
||||
LONG_S $13, PT_R13(k1)
|
||||
LONG_S $14, PT_R14(k1)
|
||||
LONG_S $15, PT_R15(k1)
|
||||
LONG_S $16, PT_R16(k1)
|
||||
LONG_S $17, PT_R17(k1)
|
||||
|
||||
LONG_S $18, PT_R18(k1)
|
||||
LONG_S $19, PT_R19(k1)
|
||||
LONG_S $20, PT_R20(k1)
|
||||
LONG_S $21, PT_R21(k1)
|
||||
LONG_S $22, PT_R22(k1)
|
||||
LONG_S $23, PT_R23(k1)
|
||||
LONG_S $24, PT_R24(k1)
|
||||
LONG_S $25, PT_R25(k1)
|
||||
|
||||
/*
|
||||
* XXXKYMA k0/k1 not saved, not being used if we got here through
|
||||
* an ioctl()
|
||||
*/
|
||||
|
||||
LONG_S $28, PT_R28(k1)
|
||||
LONG_S $29, PT_R29(k1)
|
||||
LONG_S $30, PT_R30(k1)
|
||||
LONG_S $31, PT_R31(k1)
|
||||
|
||||
/* Save hi/lo */
|
||||
mflo v0
|
||||
LONG_S v0, PT_LO(k1)
|
||||
mfhi v1
|
||||
LONG_S v1, PT_HI(k1)
|
||||
|
||||
/* Save host status */
|
||||
mfc0 v0, CP0_STATUS
|
||||
LONG_S v0, PT_STATUS(k1)
|
||||
|
||||
/* Save host ASID, shove it into the BVADDR location */
|
||||
mfc0 v1, CP0_ENTRYHI
|
||||
andi v1, 0xff
|
||||
LONG_S v1, PT_HOST_ASID(k1)
|
||||
|
||||
/* Save DDATA_LO, will be used to store pointer to vcpu */
|
||||
mfc0 v1, CP0_DDATA_LO
|
||||
LONG_S v1, PT_HOST_USERLOCAL(k1)
|
||||
|
||||
/* DDATA_LO has pointer to vcpu */
|
||||
mtc0 a1, CP0_DDATA_LO
|
||||
|
||||
/* Offset into vcpu->arch */
|
||||
INT_ADDIU k1, a1, VCPU_HOST_ARCH
|
||||
|
||||
/*
|
||||
* Save the host stack to VCPU, used for exception processing
|
||||
* when we exit from the Guest
|
||||
*/
|
||||
LONG_S sp, VCPU_HOST_STACK(k1)
|
||||
|
||||
/* Save the kernel gp as well */
|
||||
LONG_S gp, VCPU_HOST_GP(k1)
|
||||
|
||||
/*
|
||||
* Setup status register for running the guest in UM, interrupts
|
||||
* are disabled
|
||||
*/
|
||||
li k0, (ST0_EXL | KSU_USER | ST0_BEV)
|
||||
mtc0 k0, CP0_STATUS
|
||||
ehb
|
||||
|
||||
/* load up the new EBASE */
|
||||
LONG_L k0, VCPU_GUEST_EBASE(k1)
|
||||
mtc0 k0, CP0_EBASE
|
||||
|
||||
/*
|
||||
* Now that the new EBASE has been loaded, unset BEV, set
|
||||
* interrupt mask as it was but make sure that timer interrupts
|
||||
* are enabled
|
||||
*/
|
||||
li k0, (ST0_EXL | KSU_USER | ST0_IE)
|
||||
andi v0, v0, ST0_IM
|
||||
or k0, k0, v0
|
||||
mtc0 k0, CP0_STATUS
|
||||
ehb
|
||||
|
||||
/* Set Guest EPC */
|
||||
LONG_L t0, VCPU_PC(k1)
|
||||
mtc0 t0, CP0_EPC
|
||||
|
||||
FEXPORT(__kvm_mips_load_asid)
|
||||
/* Set the ASID for the Guest Kernel */
|
||||
INT_SLL t0, t0, 1 /* with kseg0 @ 0x40000000, kernel */
|
||||
/* addresses shift to 0x80000000 */
|
||||
bltz t0, 1f /* If kernel */
|
||||
INT_ADDIU t1, k1, VCPU_GUEST_KERNEL_ASID /* (BD) */
|
||||
INT_ADDIU t1, k1, VCPU_GUEST_USER_ASID /* else user */
|
||||
1:
|
||||
/* t1: contains the base of the ASID array, need to get the cpu id */
|
||||
LONG_L t2, TI_CPU($28) /* smp_processor_id */
|
||||
INT_SLL t2, t2, 2 /* x4 */
|
||||
REG_ADDU t3, t1, t2
|
||||
LONG_L k0, (t3)
|
||||
andi k0, k0, 0xff
|
||||
mtc0 k0, CP0_ENTRYHI
|
||||
ehb
|
||||
|
||||
/* Disable RDHWR access */
|
||||
mtc0 zero, CP0_HWRENA
|
||||
|
||||
/* Now load up the Guest Context from VCPU */
|
||||
LONG_L $1, VCPU_R1(k1)
|
||||
LONG_L $2, VCPU_R2(k1)
|
||||
LONG_L $3, VCPU_R3(k1)
|
||||
|
||||
LONG_L $4, VCPU_R4(k1)
|
||||
LONG_L $5, VCPU_R5(k1)
|
||||
LONG_L $6, VCPU_R6(k1)
|
||||
LONG_L $7, VCPU_R7(k1)
|
||||
|
||||
LONG_L $8, VCPU_R8(k1)
|
||||
LONG_L $9, VCPU_R9(k1)
|
||||
LONG_L $10, VCPU_R10(k1)
|
||||
LONG_L $11, VCPU_R11(k1)
|
||||
LONG_L $12, VCPU_R12(k1)
|
||||
LONG_L $13, VCPU_R13(k1)
|
||||
LONG_L $14, VCPU_R14(k1)
|
||||
LONG_L $15, VCPU_R15(k1)
|
||||
LONG_L $16, VCPU_R16(k1)
|
||||
LONG_L $17, VCPU_R17(k1)
|
||||
LONG_L $18, VCPU_R18(k1)
|
||||
LONG_L $19, VCPU_R19(k1)
|
||||
LONG_L $20, VCPU_R20(k1)
|
||||
LONG_L $21, VCPU_R21(k1)
|
||||
LONG_L $22, VCPU_R22(k1)
|
||||
LONG_L $23, VCPU_R23(k1)
|
||||
LONG_L $24, VCPU_R24(k1)
|
||||
LONG_L $25, VCPU_R25(k1)
|
||||
|
||||
/* k0/k1 loaded up later */
|
||||
|
||||
LONG_L $28, VCPU_R28(k1)
|
||||
LONG_L $29, VCPU_R29(k1)
|
||||
LONG_L $30, VCPU_R30(k1)
|
||||
LONG_L $31, VCPU_R31(k1)
|
||||
|
||||
/* Restore hi/lo */
|
||||
LONG_L k0, VCPU_LO(k1)
|
||||
mtlo k0
|
||||
|
||||
LONG_L k0, VCPU_HI(k1)
|
||||
mthi k0
|
||||
|
||||
FEXPORT(__kvm_mips_load_k0k1)
|
||||
/* Restore the guest's k0/k1 registers */
|
||||
LONG_L k0, VCPU_R26(k1)
|
||||
LONG_L k1, VCPU_R27(k1)
|
||||
|
||||
/* Jump to guest */
|
||||
eret
|
||||
|
||||
VECTOR(MIPSX(exception), unknown)
|
||||
/* Find out what mode we came from and jump to the proper handler. */
|
||||
mtc0 k0, CP0_ERROREPC #01: Save guest k0
|
||||
ehb #02:
|
||||
|
||||
mfc0 k0, CP0_EBASE #02: Get EBASE
|
||||
INT_SRL k0, k0, 10 #03: Get rid of CPUNum
|
||||
INT_SLL k0, k0, 10 #04
|
||||
LONG_S k1, 0x3000(k0) #05: Save k1 @ offset 0x3000
|
||||
INT_ADDIU k0, k0, 0x2000 #06: Exception handler is
|
||||
# installed @ offset 0x2000
|
||||
j k0 #07: jump to the function
|
||||
nop #08: branch delay slot
|
||||
VECTOR_END(MIPSX(exceptionEnd))
|
||||
.end MIPSX(exception)
|
||||
|
||||
/*
|
||||
* Generic Guest exception handler. We end up here when the guest
|
||||
* does something that causes a trap to kernel mode.
|
||||
*/
|
||||
NESTED (MIPSX(GuestException), CALLFRAME_SIZ, ra)
|
||||
/* Get the VCPU pointer from DDTATA_LO */
|
||||
mfc0 k1, CP0_DDATA_LO
|
||||
INT_ADDIU k1, k1, VCPU_HOST_ARCH
|
||||
|
||||
/* Start saving Guest context to VCPU */
|
||||
LONG_S $0, VCPU_R0(k1)
|
||||
LONG_S $1, VCPU_R1(k1)
|
||||
LONG_S $2, VCPU_R2(k1)
|
||||
LONG_S $3, VCPU_R3(k1)
|
||||
LONG_S $4, VCPU_R4(k1)
|
||||
LONG_S $5, VCPU_R5(k1)
|
||||
LONG_S $6, VCPU_R6(k1)
|
||||
LONG_S $7, VCPU_R7(k1)
|
||||
LONG_S $8, VCPU_R8(k1)
|
||||
LONG_S $9, VCPU_R9(k1)
|
||||
LONG_S $10, VCPU_R10(k1)
|
||||
LONG_S $11, VCPU_R11(k1)
|
||||
LONG_S $12, VCPU_R12(k1)
|
||||
LONG_S $13, VCPU_R13(k1)
|
||||
LONG_S $14, VCPU_R14(k1)
|
||||
LONG_S $15, VCPU_R15(k1)
|
||||
LONG_S $16, VCPU_R16(k1)
|
||||
LONG_S $17, VCPU_R17(k1)
|
||||
LONG_S $18, VCPU_R18(k1)
|
||||
LONG_S $19, VCPU_R19(k1)
|
||||
LONG_S $20, VCPU_R20(k1)
|
||||
LONG_S $21, VCPU_R21(k1)
|
||||
LONG_S $22, VCPU_R22(k1)
|
||||
LONG_S $23, VCPU_R23(k1)
|
||||
LONG_S $24, VCPU_R24(k1)
|
||||
LONG_S $25, VCPU_R25(k1)
|
||||
|
||||
/* Guest k0/k1 saved later */
|
||||
|
||||
LONG_S $28, VCPU_R28(k1)
|
||||
LONG_S $29, VCPU_R29(k1)
|
||||
LONG_S $30, VCPU_R30(k1)
|
||||
LONG_S $31, VCPU_R31(k1)
|
||||
|
||||
/* We need to save hi/lo and restore them on the way out */
|
||||
mfhi t0
|
||||
LONG_S t0, VCPU_HI(k1)
|
||||
|
||||
mflo t0
|
||||
LONG_S t0, VCPU_LO(k1)
|
||||
|
||||
/* Finally save guest k0/k1 to VCPU */
|
||||
mfc0 t0, CP0_ERROREPC
|
||||
LONG_S t0, VCPU_R26(k1)
|
||||
|
||||
/* Get GUEST k1 and save it in VCPU */
|
||||
PTR_LI t1, ~0x2ff
|
||||
mfc0 t0, CP0_EBASE
|
||||
and t0, t0, t1
|
||||
LONG_L t0, 0x3000(t0)
|
||||
LONG_S t0, VCPU_R27(k1)
|
||||
|
||||
/* Now that context has been saved, we can use other registers */
|
||||
|
||||
/* Restore vcpu */
|
||||
mfc0 a1, CP0_DDATA_LO
|
||||
move s1, a1
|
||||
|
||||
/* Restore run (vcpu->run) */
|
||||
LONG_L a0, VCPU_RUN(a1)
|
||||
/* Save pointer to run in s0, will be saved by the compiler */
|
||||
move s0, a0
|
||||
|
||||
/*
|
||||
* Save Host level EPC, BadVaddr and Cause to VCPU, useful to
|
||||
* process the exception
|
||||
*/
|
||||
mfc0 k0,CP0_EPC
|
||||
LONG_S k0, VCPU_PC(k1)
|
||||
|
||||
mfc0 k0, CP0_BADVADDR
|
||||
LONG_S k0, VCPU_HOST_CP0_BADVADDR(k1)
|
||||
|
||||
mfc0 k0, CP0_CAUSE
|
||||
LONG_S k0, VCPU_HOST_CP0_CAUSE(k1)
|
||||
|
||||
mfc0 k0, CP0_ENTRYHI
|
||||
LONG_S k0, VCPU_HOST_ENTRYHI(k1)
|
||||
|
||||
/* Now restore the host state just enough to run the handlers */
|
||||
|
||||
/* Swtich EBASE to the one used by Linux */
|
||||
/* load up the host EBASE */
|
||||
mfc0 v0, CP0_STATUS
|
||||
|
||||
.set at
|
||||
or k0, v0, ST0_BEV
|
||||
.set noat
|
||||
|
||||
mtc0 k0, CP0_STATUS
|
||||
ehb
|
||||
|
||||
LONG_L k0, VCPU_HOST_EBASE(k1)
|
||||
mtc0 k0,CP0_EBASE
|
||||
|
||||
/* Now that the new EBASE has been loaded, unset BEV and KSU_USER */
|
||||
.set at
|
||||
and v0, v0, ~(ST0_EXL | KSU_USER | ST0_IE)
|
||||
or v0, v0, ST0_CU0
|
||||
.set noat
|
||||
mtc0 v0, CP0_STATUS
|
||||
ehb
|
||||
|
||||
/* Load up host GP */
|
||||
LONG_L gp, VCPU_HOST_GP(k1)
|
||||
|
||||
/* Need a stack before we can jump to "C" */
|
||||
LONG_L sp, VCPU_HOST_STACK(k1)
|
||||
|
||||
/* Saved host state */
|
||||
INT_ADDIU sp, sp, -PT_SIZE
|
||||
|
||||
/*
|
||||
* XXXKYMA do we need to load the host ASID, maybe not because the
|
||||
* kernel entries are marked GLOBAL, need to verify
|
||||
*/
|
||||
|
||||
/* Restore host DDATA_LO */
|
||||
LONG_L k0, PT_HOST_USERLOCAL(sp)
|
||||
mtc0 k0, CP0_DDATA_LO
|
||||
|
||||
/* Restore RDHWR access */
|
||||
PTR_LI k0, 0x2000000F
|
||||
mtc0 k0, CP0_HWRENA
|
||||
|
||||
/* Jump to handler */
|
||||
FEXPORT(__kvm_mips_jump_to_handler)
|
||||
/*
|
||||
* XXXKYMA: not sure if this is safe, how large is the stack??
|
||||
* Now jump to the kvm_mips_handle_exit() to see if we can deal
|
||||
* with this in the kernel
|
||||
*/
|
||||
PTR_LA t9, kvm_mips_handle_exit
|
||||
jalr.hb t9
|
||||
INT_ADDIU sp, sp, -CALLFRAME_SIZ /* BD Slot */
|
||||
|
||||
/* Return from handler Make sure interrupts are disabled */
|
||||
di
|
||||
ehb
|
||||
|
||||
/*
|
||||
* XXXKYMA: k0/k1 could have been blown away if we processed
|
||||
* an exception while we were handling the exception from the
|
||||
* guest, reload k1
|
||||
*/
|
||||
|
||||
move k1, s1
|
||||
INT_ADDIU k1, k1, VCPU_HOST_ARCH
|
||||
|
||||
/*
|
||||
* Check return value, should tell us if we are returning to the
|
||||
* host (handle I/O etc)or resuming the guest
|
||||
*/
|
||||
andi t0, v0, RESUME_HOST
|
||||
bnez t0, __kvm_mips_return_to_host
|
||||
nop
|
||||
|
||||
__kvm_mips_return_to_guest:
|
||||
/* Put the saved pointer to vcpu (s1) back into the DDATA_LO Register */
|
||||
mtc0 s1, CP0_DDATA_LO
|
||||
|
||||
/* Load up the Guest EBASE to minimize the window where BEV is set */
|
||||
LONG_L t0, VCPU_GUEST_EBASE(k1)
|
||||
|
||||
/* Switch EBASE back to the one used by KVM */
|
||||
mfc0 v1, CP0_STATUS
|
||||
.set at
|
||||
or k0, v1, ST0_BEV
|
||||
.set noat
|
||||
mtc0 k0, CP0_STATUS
|
||||
ehb
|
||||
mtc0 t0, CP0_EBASE
|
||||
|
||||
/* Setup status register for running guest in UM */
|
||||
.set at
|
||||
or v1, v1, (ST0_EXL | KSU_USER | ST0_IE)
|
||||
and v1, v1, ~(ST0_CU0 | ST0_MX)
|
||||
.set noat
|
||||
mtc0 v1, CP0_STATUS
|
||||
ehb
|
||||
|
||||
/* Set Guest EPC */
|
||||
LONG_L t0, VCPU_PC(k1)
|
||||
mtc0 t0, CP0_EPC
|
||||
|
||||
/* Set the ASID for the Guest Kernel */
|
||||
INT_SLL t0, t0, 1 /* with kseg0 @ 0x40000000, kernel */
|
||||
/* addresses shift to 0x80000000 */
|
||||
bltz t0, 1f /* If kernel */
|
||||
INT_ADDIU t1, k1, VCPU_GUEST_KERNEL_ASID /* (BD) */
|
||||
INT_ADDIU t1, k1, VCPU_GUEST_USER_ASID /* else user */
|
||||
1:
|
||||
/* t1: contains the base of the ASID array, need to get the cpu id */
|
||||
LONG_L t2, TI_CPU($28) /* smp_processor_id */
|
||||
INT_SLL t2, t2, 2 /* x4 */
|
||||
REG_ADDU t3, t1, t2
|
||||
LONG_L k0, (t3)
|
||||
andi k0, k0, 0xff
|
||||
mtc0 k0,CP0_ENTRYHI
|
||||
ehb
|
||||
|
||||
/* Disable RDHWR access */
|
||||
mtc0 zero, CP0_HWRENA
|
||||
|
||||
/* load the guest context from VCPU and return */
|
||||
LONG_L $0, VCPU_R0(k1)
|
||||
LONG_L $1, VCPU_R1(k1)
|
||||
LONG_L $2, VCPU_R2(k1)
|
||||
LONG_L $3, VCPU_R3(k1)
|
||||
LONG_L $4, VCPU_R4(k1)
|
||||
LONG_L $5, VCPU_R5(k1)
|
||||
LONG_L $6, VCPU_R6(k1)
|
||||
LONG_L $7, VCPU_R7(k1)
|
||||
LONG_L $8, VCPU_R8(k1)
|
||||
LONG_L $9, VCPU_R9(k1)
|
||||
LONG_L $10, VCPU_R10(k1)
|
||||
LONG_L $11, VCPU_R11(k1)
|
||||
LONG_L $12, VCPU_R12(k1)
|
||||
LONG_L $13, VCPU_R13(k1)
|
||||
LONG_L $14, VCPU_R14(k1)
|
||||
LONG_L $15, VCPU_R15(k1)
|
||||
LONG_L $16, VCPU_R16(k1)
|
||||
LONG_L $17, VCPU_R17(k1)
|
||||
LONG_L $18, VCPU_R18(k1)
|
||||
LONG_L $19, VCPU_R19(k1)
|
||||
LONG_L $20, VCPU_R20(k1)
|
||||
LONG_L $21, VCPU_R21(k1)
|
||||
LONG_L $22, VCPU_R22(k1)
|
||||
LONG_L $23, VCPU_R23(k1)
|
||||
LONG_L $24, VCPU_R24(k1)
|
||||
LONG_L $25, VCPU_R25(k1)
|
||||
|
||||
/* $/k1 loaded later */
|
||||
LONG_L $28, VCPU_R28(k1)
|
||||
LONG_L $29, VCPU_R29(k1)
|
||||
LONG_L $30, VCPU_R30(k1)
|
||||
LONG_L $31, VCPU_R31(k1)
|
||||
|
||||
FEXPORT(__kvm_mips_skip_guest_restore)
|
||||
LONG_L k0, VCPU_HI(k1)
|
||||
mthi k0
|
||||
|
||||
LONG_L k0, VCPU_LO(k1)
|
||||
mtlo k0
|
||||
|
||||
LONG_L k0, VCPU_R26(k1)
|
||||
LONG_L k1, VCPU_R27(k1)
|
||||
|
||||
eret
|
||||
|
||||
__kvm_mips_return_to_host:
|
||||
/* EBASE is already pointing to Linux */
|
||||
LONG_L k1, VCPU_HOST_STACK(k1)
|
||||
INT_ADDIU k1,k1, -PT_SIZE
|
||||
|
||||
/* Restore host DDATA_LO */
|
||||
LONG_L k0, PT_HOST_USERLOCAL(k1)
|
||||
mtc0 k0, CP0_DDATA_LO
|
||||
|
||||
/* Restore host ASID */
|
||||
LONG_L k0, PT_HOST_ASID(sp)
|
||||
andi k0, 0xff
|
||||
mtc0 k0,CP0_ENTRYHI
|
||||
ehb
|
||||
|
||||
/* Load context saved on the host stack */
|
||||
LONG_L $0, PT_R0(k1)
|
||||
LONG_L $1, PT_R1(k1)
|
||||
|
||||
/*
|
||||
* r2/v0 is the return code, shift it down by 2 (arithmetic)
|
||||
* to recover the err code
|
||||
*/
|
||||
INT_SRA k0, v0, 2
|
||||
move $2, k0
|
||||
|
||||
LONG_L $3, PT_R3(k1)
|
||||
LONG_L $4, PT_R4(k1)
|
||||
LONG_L $5, PT_R5(k1)
|
||||
LONG_L $6, PT_R6(k1)
|
||||
LONG_L $7, PT_R7(k1)
|
||||
LONG_L $8, PT_R8(k1)
|
||||
LONG_L $9, PT_R9(k1)
|
||||
LONG_L $10, PT_R10(k1)
|
||||
LONG_L $11, PT_R11(k1)
|
||||
LONG_L $12, PT_R12(k1)
|
||||
LONG_L $13, PT_R13(k1)
|
||||
LONG_L $14, PT_R14(k1)
|
||||
LONG_L $15, PT_R15(k1)
|
||||
LONG_L $16, PT_R16(k1)
|
||||
LONG_L $17, PT_R17(k1)
|
||||
LONG_L $18, PT_R18(k1)
|
||||
LONG_L $19, PT_R19(k1)
|
||||
LONG_L $20, PT_R20(k1)
|
||||
LONG_L $21, PT_R21(k1)
|
||||
LONG_L $22, PT_R22(k1)
|
||||
LONG_L $23, PT_R23(k1)
|
||||
LONG_L $24, PT_R24(k1)
|
||||
LONG_L $25, PT_R25(k1)
|
||||
|
||||
/* Host k0/k1 were not saved */
|
||||
|
||||
LONG_L $28, PT_R28(k1)
|
||||
LONG_L $29, PT_R29(k1)
|
||||
LONG_L $30, PT_R30(k1)
|
||||
|
||||
LONG_L k0, PT_HI(k1)
|
||||
mthi k0
|
||||
|
||||
LONG_L k0, PT_LO(k1)
|
||||
mtlo k0
|
||||
|
||||
/* Restore RDHWR access */
|
||||
PTR_LI k0, 0x2000000F
|
||||
mtc0 k0, CP0_HWRENA
|
||||
|
||||
/* Restore RA, which is the address we will return to */
|
||||
LONG_L ra, PT_R31(k1)
|
||||
j ra
|
||||
nop
|
||||
|
||||
VECTOR_END(MIPSX(GuestExceptionEnd))
|
||||
.end MIPSX(GuestException)
|
||||
|
||||
MIPSX(exceptions):
|
||||
####
|
||||
##### The exception handlers.
|
||||
#####
|
||||
.word _C_LABEL(MIPSX(GuestException)) # 0
|
||||
.word _C_LABEL(MIPSX(GuestException)) # 1
|
||||
.word _C_LABEL(MIPSX(GuestException)) # 2
|
||||
.word _C_LABEL(MIPSX(GuestException)) # 3
|
||||
.word _C_LABEL(MIPSX(GuestException)) # 4
|
||||
.word _C_LABEL(MIPSX(GuestException)) # 5
|
||||
.word _C_LABEL(MIPSX(GuestException)) # 6
|
||||
.word _C_LABEL(MIPSX(GuestException)) # 7
|
||||
.word _C_LABEL(MIPSX(GuestException)) # 8
|
||||
.word _C_LABEL(MIPSX(GuestException)) # 9
|
||||
.word _C_LABEL(MIPSX(GuestException)) # 10
|
||||
.word _C_LABEL(MIPSX(GuestException)) # 11
|
||||
.word _C_LABEL(MIPSX(GuestException)) # 12
|
||||
.word _C_LABEL(MIPSX(GuestException)) # 13
|
||||
.word _C_LABEL(MIPSX(GuestException)) # 14
|
||||
.word _C_LABEL(MIPSX(GuestException)) # 15
|
||||
.word _C_LABEL(MIPSX(GuestException)) # 16
|
||||
.word _C_LABEL(MIPSX(GuestException)) # 17
|
||||
.word _C_LABEL(MIPSX(GuestException)) # 18
|
||||
.word _C_LABEL(MIPSX(GuestException)) # 19
|
||||
.word _C_LABEL(MIPSX(GuestException)) # 20
|
||||
.word _C_LABEL(MIPSX(GuestException)) # 21
|
||||
.word _C_LABEL(MIPSX(GuestException)) # 22
|
||||
.word _C_LABEL(MIPSX(GuestException)) # 23
|
||||
.word _C_LABEL(MIPSX(GuestException)) # 24
|
||||
.word _C_LABEL(MIPSX(GuestException)) # 25
|
||||
.word _C_LABEL(MIPSX(GuestException)) # 26
|
||||
.word _C_LABEL(MIPSX(GuestException)) # 27
|
||||
.word _C_LABEL(MIPSX(GuestException)) # 28
|
||||
.word _C_LABEL(MIPSX(GuestException)) # 29
|
||||
.word _C_LABEL(MIPSX(GuestException)) # 30
|
||||
.word _C_LABEL(MIPSX(GuestException)) # 31
|
1198
arch/mips/kvm/mips.c
Normal file
1198
arch/mips/kvm/mips.c
Normal file
File diff suppressed because it is too large
Load diff
22
arch/mips/kvm/opcode.h
Normal file
22
arch/mips/kvm/opcode.h
Normal file
|
@ -0,0 +1,22 @@
|
|||
/*
|
||||
* This file is subject to the terms and conditions of the GNU General Public
|
||||
* License. See the file "COPYING" in the main directory of this archive
|
||||
* for more details.
|
||||
*
|
||||
* Copyright (C) 2012 MIPS Technologies, Inc. All rights reserved.
|
||||
* Authors: Sanjay Lal <sanjayl@kymasys.com>
|
||||
*/
|
||||
|
||||
/* Define opcode values not defined in <asm/isnt.h> */
|
||||
|
||||
#ifndef __KVM_MIPS_OPCODE_H__
|
||||
#define __KVM_MIPS_OPCODE_H__
|
||||
|
||||
/* COP0 Ops */
|
||||
#define mfmcz_op 0x0b /* 01011 */
|
||||
#define wrpgpr_op 0x0e /* 01110 */
|
||||
|
||||
/* COP0 opcodes (only if COP0 and CO=1): */
|
||||
#define wait_op 0x20 /* 100000 */
|
||||
|
||||
#endif /* __KVM_MIPS_OPCODE_H__ */
|
80
arch/mips/kvm/stats.c
Normal file
80
arch/mips/kvm/stats.c
Normal file
|
@ -0,0 +1,80 @@
|
|||
/*
|
||||
* This file is subject to the terms and conditions of the GNU General Public
|
||||
* License. See the file "COPYING" in the main directory of this archive
|
||||
* for more details.
|
||||
*
|
||||
* KVM/MIPS: COP0 access histogram
|
||||
*
|
||||
* Copyright (C) 2012 MIPS Technologies, Inc. All rights reserved.
|
||||
* Authors: Sanjay Lal <sanjayl@kymasys.com>
|
||||
*/
|
||||
|
||||
#include <linux/kvm_host.h>
|
||||
|
||||
char *kvm_mips_exit_types_str[MAX_KVM_MIPS_EXIT_TYPES] = {
|
||||
"WAIT",
|
||||
"CACHE",
|
||||
"Signal",
|
||||
"Interrupt",
|
||||
"COP0/1 Unusable",
|
||||
"TLB Mod",
|
||||
"TLB Miss (LD)",
|
||||
"TLB Miss (ST)",
|
||||
"Address Err (ST)",
|
||||
"Address Error (LD)",
|
||||
"System Call",
|
||||
"Reserved Inst",
|
||||
"Break Inst",
|
||||
"D-Cache Flushes",
|
||||
};
|
||||
|
||||
char *kvm_cop0_str[N_MIPS_COPROC_REGS] = {
|
||||
"Index",
|
||||
"Random",
|
||||
"EntryLo0",
|
||||
"EntryLo1",
|
||||
"Context",
|
||||
"PG Mask",
|
||||
"Wired",
|
||||
"HWREna",
|
||||
"BadVAddr",
|
||||
"Count",
|
||||
"EntryHI",
|
||||
"Compare",
|
||||
"Status",
|
||||
"Cause",
|
||||
"EXC PC",
|
||||
"PRID",
|
||||
"Config",
|
||||
"LLAddr",
|
||||
"Watch Lo",
|
||||
"Watch Hi",
|
||||
"X Context",
|
||||
"Reserved",
|
||||
"Impl Dep",
|
||||
"Debug",
|
||||
"DEPC",
|
||||
"PerfCnt",
|
||||
"ErrCtl",
|
||||
"CacheErr",
|
||||
"TagLo",
|
||||
"TagHi",
|
||||
"ErrorEPC",
|
||||
"DESAVE"
|
||||
};
|
||||
|
||||
void kvm_mips_dump_stats(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
#ifdef CONFIG_KVM_MIPS_DEBUG_COP0_COUNTERS
|
||||
int i, j;
|
||||
|
||||
kvm_info("\nKVM VCPU[%d] COP0 Access Profile:\n", vcpu->vcpu_id);
|
||||
for (i = 0; i < N_MIPS_COPROC_REGS; i++) {
|
||||
for (j = 0; j < N_MIPS_COPROC_SEL; j++) {
|
||||
if (vcpu->arch.cop0->stat[i][j])
|
||||
kvm_info("%s[%d]: %lu\n", kvm_cop0_str[i], j,
|
||||
vcpu->arch.cop0->stat[i][j]);
|
||||
}
|
||||
}
|
||||
#endif
|
||||
}
|
809
arch/mips/kvm/tlb.c
Normal file
809
arch/mips/kvm/tlb.c
Normal file
|
@ -0,0 +1,809 @@
|
|||
/*
|
||||
* This file is subject to the terms and conditions of the GNU General Public
|
||||
* License. See the file "COPYING" in the main directory of this archive
|
||||
* for more details.
|
||||
*
|
||||
* KVM/MIPS TLB handling, this file is part of the Linux host kernel so that
|
||||
* TLB handlers run from KSEG0
|
||||
*
|
||||
* Copyright (C) 2012 MIPS Technologies, Inc. All rights reserved.
|
||||
* Authors: Sanjay Lal <sanjayl@kymasys.com>
|
||||
*/
|
||||
|
||||
#include <linux/sched.h>
|
||||
#include <linux/smp.h>
|
||||
#include <linux/mm.h>
|
||||
#include <linux/delay.h>
|
||||
#include <linux/module.h>
|
||||
#include <linux/kvm_host.h>
|
||||
#include <linux/srcu.h>
|
||||
|
||||
#include <asm/cpu.h>
|
||||
#include <asm/bootinfo.h>
|
||||
#include <asm/mmu_context.h>
|
||||
#include <asm/pgtable.h>
|
||||
#include <asm/cacheflush.h>
|
||||
#include <asm/tlb.h>
|
||||
|
||||
#undef CONFIG_MIPS_MT
|
||||
#include <asm/r4kcache.h>
|
||||
#define CONFIG_MIPS_MT
|
||||
|
||||
#define KVM_GUEST_PC_TLB 0
|
||||
#define KVM_GUEST_SP_TLB 1
|
||||
|
||||
#define PRIx64 "llx"
|
||||
|
||||
atomic_t kvm_mips_instance;
|
||||
EXPORT_SYMBOL(kvm_mips_instance);
|
||||
|
||||
/* These function pointers are initialized once the KVM module is loaded */
|
||||
pfn_t (*kvm_mips_gfn_to_pfn)(struct kvm *kvm, gfn_t gfn);
|
||||
EXPORT_SYMBOL(kvm_mips_gfn_to_pfn);
|
||||
|
||||
void (*kvm_mips_release_pfn_clean)(pfn_t pfn);
|
||||
EXPORT_SYMBOL(kvm_mips_release_pfn_clean);
|
||||
|
||||
bool (*kvm_mips_is_error_pfn)(pfn_t pfn);
|
||||
EXPORT_SYMBOL(kvm_mips_is_error_pfn);
|
||||
|
||||
uint32_t kvm_mips_get_kernel_asid(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
return vcpu->arch.guest_kernel_asid[smp_processor_id()] & ASID_MASK;
|
||||
}
|
||||
|
||||
uint32_t kvm_mips_get_user_asid(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
return vcpu->arch.guest_user_asid[smp_processor_id()] & ASID_MASK;
|
||||
}
|
||||
|
||||
inline uint32_t kvm_mips_get_commpage_asid(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
return vcpu->kvm->arch.commpage_tlb;
|
||||
}
|
||||
|
||||
/* Structure defining an tlb entry data set. */
|
||||
|
||||
void kvm_mips_dump_host_tlbs(void)
|
||||
{
|
||||
unsigned long old_entryhi;
|
||||
unsigned long old_pagemask;
|
||||
struct kvm_mips_tlb tlb;
|
||||
unsigned long flags;
|
||||
int i;
|
||||
|
||||
local_irq_save(flags);
|
||||
|
||||
old_entryhi = read_c0_entryhi();
|
||||
old_pagemask = read_c0_pagemask();
|
||||
|
||||
kvm_info("HOST TLBs:\n");
|
||||
kvm_info("ASID: %#lx\n", read_c0_entryhi() & ASID_MASK);
|
||||
|
||||
for (i = 0; i < current_cpu_data.tlbsize; i++) {
|
||||
write_c0_index(i);
|
||||
mtc0_tlbw_hazard();
|
||||
|
||||
tlb_read();
|
||||
tlbw_use_hazard();
|
||||
|
||||
tlb.tlb_hi = read_c0_entryhi();
|
||||
tlb.tlb_lo0 = read_c0_entrylo0();
|
||||
tlb.tlb_lo1 = read_c0_entrylo1();
|
||||
tlb.tlb_mask = read_c0_pagemask();
|
||||
|
||||
kvm_info("TLB%c%3d Hi 0x%08lx ",
|
||||
(tlb.tlb_lo0 | tlb.tlb_lo1) & MIPS3_PG_V ? ' ' : '*',
|
||||
i, tlb.tlb_hi);
|
||||
kvm_info("Lo0=0x%09" PRIx64 " %c%c attr %lx ",
|
||||
(uint64_t) mips3_tlbpfn_to_paddr(tlb.tlb_lo0),
|
||||
(tlb.tlb_lo0 & MIPS3_PG_D) ? 'D' : ' ',
|
||||
(tlb.tlb_lo0 & MIPS3_PG_G) ? 'G' : ' ',
|
||||
(tlb.tlb_lo0 >> 3) & 7);
|
||||
kvm_info("Lo1=0x%09" PRIx64 " %c%c attr %lx sz=%lx\n",
|
||||
(uint64_t) mips3_tlbpfn_to_paddr(tlb.tlb_lo1),
|
||||
(tlb.tlb_lo1 & MIPS3_PG_D) ? 'D' : ' ',
|
||||
(tlb.tlb_lo1 & MIPS3_PG_G) ? 'G' : ' ',
|
||||
(tlb.tlb_lo1 >> 3) & 7, tlb.tlb_mask);
|
||||
}
|
||||
write_c0_entryhi(old_entryhi);
|
||||
write_c0_pagemask(old_pagemask);
|
||||
mtc0_tlbw_hazard();
|
||||
local_irq_restore(flags);
|
||||
}
|
||||
EXPORT_SYMBOL(kvm_mips_dump_host_tlbs);
|
||||
|
||||
void kvm_mips_dump_guest_tlbs(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
struct mips_coproc *cop0 = vcpu->arch.cop0;
|
||||
struct kvm_mips_tlb tlb;
|
||||
int i;
|
||||
|
||||
kvm_info("Guest TLBs:\n");
|
||||
kvm_info("Guest EntryHi: %#lx\n", kvm_read_c0_guest_entryhi(cop0));
|
||||
|
||||
for (i = 0; i < KVM_MIPS_GUEST_TLB_SIZE; i++) {
|
||||
tlb = vcpu->arch.guest_tlb[i];
|
||||
kvm_info("TLB%c%3d Hi 0x%08lx ",
|
||||
(tlb.tlb_lo0 | tlb.tlb_lo1) & MIPS3_PG_V ? ' ' : '*',
|
||||
i, tlb.tlb_hi);
|
||||
kvm_info("Lo0=0x%09" PRIx64 " %c%c attr %lx ",
|
||||
(uint64_t) mips3_tlbpfn_to_paddr(tlb.tlb_lo0),
|
||||
(tlb.tlb_lo0 & MIPS3_PG_D) ? 'D' : ' ',
|
||||
(tlb.tlb_lo0 & MIPS3_PG_G) ? 'G' : ' ',
|
||||
(tlb.tlb_lo0 >> 3) & 7);
|
||||
kvm_info("Lo1=0x%09" PRIx64 " %c%c attr %lx sz=%lx\n",
|
||||
(uint64_t) mips3_tlbpfn_to_paddr(tlb.tlb_lo1),
|
||||
(tlb.tlb_lo1 & MIPS3_PG_D) ? 'D' : ' ',
|
||||
(tlb.tlb_lo1 & MIPS3_PG_G) ? 'G' : ' ',
|
||||
(tlb.tlb_lo1 >> 3) & 7, tlb.tlb_mask);
|
||||
}
|
||||
}
|
||||
EXPORT_SYMBOL(kvm_mips_dump_guest_tlbs);
|
||||
|
||||
static int kvm_mips_map_page(struct kvm *kvm, gfn_t gfn)
|
||||
{
|
||||
int srcu_idx, err = 0;
|
||||
pfn_t pfn;
|
||||
|
||||
if (kvm->arch.guest_pmap[gfn] != KVM_INVALID_PAGE)
|
||||
return 0;
|
||||
|
||||
srcu_idx = srcu_read_lock(&kvm->srcu);
|
||||
pfn = kvm_mips_gfn_to_pfn(kvm, gfn);
|
||||
|
||||
if (kvm_mips_is_error_pfn(pfn)) {
|
||||
kvm_err("Couldn't get pfn for gfn %#" PRIx64 "!\n", gfn);
|
||||
err = -EFAULT;
|
||||
goto out;
|
||||
}
|
||||
|
||||
kvm->arch.guest_pmap[gfn] = pfn;
|
||||
out:
|
||||
srcu_read_unlock(&kvm->srcu, srcu_idx);
|
||||
return err;
|
||||
}
|
||||
|
||||
/* Translate guest KSEG0 addresses to Host PA */
|
||||
unsigned long kvm_mips_translate_guest_kseg0_to_hpa(struct kvm_vcpu *vcpu,
|
||||
unsigned long gva)
|
||||
{
|
||||
gfn_t gfn;
|
||||
uint32_t offset = gva & ~PAGE_MASK;
|
||||
struct kvm *kvm = vcpu->kvm;
|
||||
|
||||
if (KVM_GUEST_KSEGX(gva) != KVM_GUEST_KSEG0) {
|
||||
kvm_err("%s/%p: Invalid gva: %#lx\n", __func__,
|
||||
__builtin_return_address(0), gva);
|
||||
return KVM_INVALID_PAGE;
|
||||
}
|
||||
|
||||
gfn = (KVM_GUEST_CPHYSADDR(gva) >> PAGE_SHIFT);
|
||||
|
||||
if (gfn >= kvm->arch.guest_pmap_npages) {
|
||||
kvm_err("%s: Invalid gfn: %#llx, GVA: %#lx\n", __func__, gfn,
|
||||
gva);
|
||||
return KVM_INVALID_PAGE;
|
||||
}
|
||||
|
||||
if (kvm_mips_map_page(vcpu->kvm, gfn) < 0)
|
||||
return KVM_INVALID_ADDR;
|
||||
|
||||
return (kvm->arch.guest_pmap[gfn] << PAGE_SHIFT) + offset;
|
||||
}
|
||||
EXPORT_SYMBOL(kvm_mips_translate_guest_kseg0_to_hpa);
|
||||
|
||||
/* XXXKYMA: Must be called with interrupts disabled */
|
||||
/* set flush_dcache_mask == 0 if no dcache flush required */
|
||||
int kvm_mips_host_tlb_write(struct kvm_vcpu *vcpu, unsigned long entryhi,
|
||||
unsigned long entrylo0, unsigned long entrylo1,
|
||||
int flush_dcache_mask)
|
||||
{
|
||||
unsigned long flags;
|
||||
unsigned long old_entryhi;
|
||||
int idx;
|
||||
|
||||
local_irq_save(flags);
|
||||
|
||||
old_entryhi = read_c0_entryhi();
|
||||
write_c0_entryhi(entryhi);
|
||||
mtc0_tlbw_hazard();
|
||||
|
||||
tlb_probe();
|
||||
tlb_probe_hazard();
|
||||
idx = read_c0_index();
|
||||
|
||||
if (idx > current_cpu_data.tlbsize) {
|
||||
kvm_err("%s: Invalid Index: %d\n", __func__, idx);
|
||||
kvm_mips_dump_host_tlbs();
|
||||
return -1;
|
||||
}
|
||||
|
||||
write_c0_entrylo0(entrylo0);
|
||||
write_c0_entrylo1(entrylo1);
|
||||
mtc0_tlbw_hazard();
|
||||
|
||||
if (idx < 0)
|
||||
tlb_write_random();
|
||||
else
|
||||
tlb_write_indexed();
|
||||
tlbw_use_hazard();
|
||||
|
||||
kvm_debug("@ %#lx idx: %2d [entryhi(R): %#lx] entrylo0(R): 0x%08lx, entrylo1(R): 0x%08lx\n",
|
||||
vcpu->arch.pc, idx, read_c0_entryhi(),
|
||||
read_c0_entrylo0(), read_c0_entrylo1());
|
||||
|
||||
/* Flush D-cache */
|
||||
if (flush_dcache_mask) {
|
||||
if (entrylo0 & MIPS3_PG_V) {
|
||||
++vcpu->stat.flush_dcache_exits;
|
||||
flush_data_cache_page((entryhi & VPN2_MASK) &
|
||||
~flush_dcache_mask);
|
||||
}
|
||||
if (entrylo1 & MIPS3_PG_V) {
|
||||
++vcpu->stat.flush_dcache_exits;
|
||||
flush_data_cache_page(((entryhi & VPN2_MASK) &
|
||||
~flush_dcache_mask) |
|
||||
(0x1 << PAGE_SHIFT));
|
||||
}
|
||||
}
|
||||
|
||||
/* Restore old ASID */
|
||||
write_c0_entryhi(old_entryhi);
|
||||
mtc0_tlbw_hazard();
|
||||
tlbw_use_hazard();
|
||||
local_irq_restore(flags);
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* XXXKYMA: Must be called with interrupts disabled */
|
||||
int kvm_mips_handle_kseg0_tlb_fault(unsigned long badvaddr,
|
||||
struct kvm_vcpu *vcpu)
|
||||
{
|
||||
gfn_t gfn;
|
||||
pfn_t pfn0, pfn1;
|
||||
unsigned long vaddr = 0;
|
||||
unsigned long entryhi = 0, entrylo0 = 0, entrylo1 = 0;
|
||||
int even;
|
||||
struct kvm *kvm = vcpu->kvm;
|
||||
const int flush_dcache_mask = 0;
|
||||
|
||||
if (KVM_GUEST_KSEGX(badvaddr) != KVM_GUEST_KSEG0) {
|
||||
kvm_err("%s: Invalid BadVaddr: %#lx\n", __func__, badvaddr);
|
||||
kvm_mips_dump_host_tlbs();
|
||||
return -1;
|
||||
}
|
||||
|
||||
gfn = (KVM_GUEST_CPHYSADDR(badvaddr) >> PAGE_SHIFT);
|
||||
if (gfn >= kvm->arch.guest_pmap_npages) {
|
||||
kvm_err("%s: Invalid gfn: %#llx, BadVaddr: %#lx\n", __func__,
|
||||
gfn, badvaddr);
|
||||
kvm_mips_dump_host_tlbs();
|
||||
return -1;
|
||||
}
|
||||
even = !(gfn & 0x1);
|
||||
vaddr = badvaddr & (PAGE_MASK << 1);
|
||||
|
||||
if (kvm_mips_map_page(vcpu->kvm, gfn) < 0)
|
||||
return -1;
|
||||
|
||||
if (kvm_mips_map_page(vcpu->kvm, gfn ^ 0x1) < 0)
|
||||
return -1;
|
||||
|
||||
if (even) {
|
||||
pfn0 = kvm->arch.guest_pmap[gfn];
|
||||
pfn1 = kvm->arch.guest_pmap[gfn ^ 0x1];
|
||||
} else {
|
||||
pfn0 = kvm->arch.guest_pmap[gfn ^ 0x1];
|
||||
pfn1 = kvm->arch.guest_pmap[gfn];
|
||||
}
|
||||
|
||||
entryhi = (vaddr | kvm_mips_get_kernel_asid(vcpu));
|
||||
entrylo0 = mips3_paddr_to_tlbpfn(pfn0 << PAGE_SHIFT) | (0x3 << 3) |
|
||||
(1 << 2) | (0x1 << 1);
|
||||
entrylo1 = mips3_paddr_to_tlbpfn(pfn1 << PAGE_SHIFT) | (0x3 << 3) |
|
||||
(1 << 2) | (0x1 << 1);
|
||||
|
||||
return kvm_mips_host_tlb_write(vcpu, entryhi, entrylo0, entrylo1,
|
||||
flush_dcache_mask);
|
||||
}
|
||||
EXPORT_SYMBOL(kvm_mips_handle_kseg0_tlb_fault);
|
||||
|
||||
int kvm_mips_handle_commpage_tlb_fault(unsigned long badvaddr,
|
||||
struct kvm_vcpu *vcpu)
|
||||
{
|
||||
pfn_t pfn0, pfn1;
|
||||
unsigned long flags, old_entryhi = 0, vaddr = 0;
|
||||
unsigned long entrylo0 = 0, entrylo1 = 0;
|
||||
|
||||
pfn0 = CPHYSADDR(vcpu->arch.kseg0_commpage) >> PAGE_SHIFT;
|
||||
pfn1 = 0;
|
||||
entrylo0 = mips3_paddr_to_tlbpfn(pfn0 << PAGE_SHIFT) | (0x3 << 3) |
|
||||
(1 << 2) | (0x1 << 1);
|
||||
entrylo1 = 0;
|
||||
|
||||
local_irq_save(flags);
|
||||
|
||||
old_entryhi = read_c0_entryhi();
|
||||
vaddr = badvaddr & (PAGE_MASK << 1);
|
||||
write_c0_entryhi(vaddr | kvm_mips_get_kernel_asid(vcpu));
|
||||
mtc0_tlbw_hazard();
|
||||
write_c0_entrylo0(entrylo0);
|
||||
mtc0_tlbw_hazard();
|
||||
write_c0_entrylo1(entrylo1);
|
||||
mtc0_tlbw_hazard();
|
||||
write_c0_index(kvm_mips_get_commpage_asid(vcpu));
|
||||
mtc0_tlbw_hazard();
|
||||
tlb_write_indexed();
|
||||
mtc0_tlbw_hazard();
|
||||
tlbw_use_hazard();
|
||||
|
||||
kvm_debug("@ %#lx idx: %2d [entryhi(R): %#lx] entrylo0 (R): 0x%08lx, entrylo1(R): 0x%08lx\n",
|
||||
vcpu->arch.pc, read_c0_index(), read_c0_entryhi(),
|
||||
read_c0_entrylo0(), read_c0_entrylo1());
|
||||
|
||||
/* Restore old ASID */
|
||||
write_c0_entryhi(old_entryhi);
|
||||
mtc0_tlbw_hazard();
|
||||
tlbw_use_hazard();
|
||||
local_irq_restore(flags);
|
||||
|
||||
return 0;
|
||||
}
|
||||
EXPORT_SYMBOL(kvm_mips_handle_commpage_tlb_fault);
|
||||
|
||||
int kvm_mips_handle_mapped_seg_tlb_fault(struct kvm_vcpu *vcpu,
|
||||
struct kvm_mips_tlb *tlb,
|
||||
unsigned long *hpa0,
|
||||
unsigned long *hpa1)
|
||||
{
|
||||
unsigned long entryhi = 0, entrylo0 = 0, entrylo1 = 0;
|
||||
struct kvm *kvm = vcpu->kvm;
|
||||
pfn_t pfn0, pfn1;
|
||||
|
||||
if ((tlb->tlb_hi & VPN2_MASK) == 0) {
|
||||
pfn0 = 0;
|
||||
pfn1 = 0;
|
||||
} else {
|
||||
if (kvm_mips_map_page(kvm, mips3_tlbpfn_to_paddr(tlb->tlb_lo0)
|
||||
>> PAGE_SHIFT) < 0)
|
||||
return -1;
|
||||
|
||||
if (kvm_mips_map_page(kvm, mips3_tlbpfn_to_paddr(tlb->tlb_lo1)
|
||||
>> PAGE_SHIFT) < 0)
|
||||
return -1;
|
||||
|
||||
pfn0 = kvm->arch.guest_pmap[mips3_tlbpfn_to_paddr(tlb->tlb_lo0)
|
||||
>> PAGE_SHIFT];
|
||||
pfn1 = kvm->arch.guest_pmap[mips3_tlbpfn_to_paddr(tlb->tlb_lo1)
|
||||
>> PAGE_SHIFT];
|
||||
}
|
||||
|
||||
if (hpa0)
|
||||
*hpa0 = pfn0 << PAGE_SHIFT;
|
||||
|
||||
if (hpa1)
|
||||
*hpa1 = pfn1 << PAGE_SHIFT;
|
||||
|
||||
/* Get attributes from the Guest TLB */
|
||||
entryhi = (tlb->tlb_hi & VPN2_MASK) | (KVM_GUEST_KERNEL_MODE(vcpu) ?
|
||||
kvm_mips_get_kernel_asid(vcpu) :
|
||||
kvm_mips_get_user_asid(vcpu));
|
||||
entrylo0 = mips3_paddr_to_tlbpfn(pfn0 << PAGE_SHIFT) | (0x3 << 3) |
|
||||
(tlb->tlb_lo0 & MIPS3_PG_D) | (tlb->tlb_lo0 & MIPS3_PG_V);
|
||||
entrylo1 = mips3_paddr_to_tlbpfn(pfn1 << PAGE_SHIFT) | (0x3 << 3) |
|
||||
(tlb->tlb_lo1 & MIPS3_PG_D) | (tlb->tlb_lo1 & MIPS3_PG_V);
|
||||
|
||||
kvm_debug("@ %#lx tlb_lo0: 0x%08lx tlb_lo1: 0x%08lx\n", vcpu->arch.pc,
|
||||
tlb->tlb_lo0, tlb->tlb_lo1);
|
||||
|
||||
return kvm_mips_host_tlb_write(vcpu, entryhi, entrylo0, entrylo1,
|
||||
tlb->tlb_mask);
|
||||
}
|
||||
EXPORT_SYMBOL(kvm_mips_handle_mapped_seg_tlb_fault);
|
||||
|
||||
int kvm_mips_guest_tlb_lookup(struct kvm_vcpu *vcpu, unsigned long entryhi)
|
||||
{
|
||||
int i;
|
||||
int index = -1;
|
||||
struct kvm_mips_tlb *tlb = vcpu->arch.guest_tlb;
|
||||
|
||||
for (i = 0; i < KVM_MIPS_GUEST_TLB_SIZE; i++) {
|
||||
if (TLB_HI_VPN2_HIT(tlb[i], entryhi) &&
|
||||
TLB_HI_ASID_HIT(tlb[i], entryhi)) {
|
||||
index = i;
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
kvm_debug("%s: entryhi: %#lx, index: %d lo0: %#lx, lo1: %#lx\n",
|
||||
__func__, entryhi, index, tlb[i].tlb_lo0, tlb[i].tlb_lo1);
|
||||
|
||||
return index;
|
||||
}
|
||||
EXPORT_SYMBOL(kvm_mips_guest_tlb_lookup);
|
||||
|
||||
int kvm_mips_host_tlb_lookup(struct kvm_vcpu *vcpu, unsigned long vaddr)
|
||||
{
|
||||
unsigned long old_entryhi, flags;
|
||||
int idx;
|
||||
|
||||
local_irq_save(flags);
|
||||
|
||||
old_entryhi = read_c0_entryhi();
|
||||
|
||||
if (KVM_GUEST_KERNEL_MODE(vcpu))
|
||||
write_c0_entryhi((vaddr & VPN2_MASK) |
|
||||
kvm_mips_get_kernel_asid(vcpu));
|
||||
else {
|
||||
write_c0_entryhi((vaddr & VPN2_MASK) |
|
||||
kvm_mips_get_user_asid(vcpu));
|
||||
}
|
||||
|
||||
mtc0_tlbw_hazard();
|
||||
|
||||
tlb_probe();
|
||||
tlb_probe_hazard();
|
||||
idx = read_c0_index();
|
||||
|
||||
/* Restore old ASID */
|
||||
write_c0_entryhi(old_entryhi);
|
||||
mtc0_tlbw_hazard();
|
||||
tlbw_use_hazard();
|
||||
|
||||
local_irq_restore(flags);
|
||||
|
||||
kvm_debug("Host TLB lookup, %#lx, idx: %2d\n", vaddr, idx);
|
||||
|
||||
return idx;
|
||||
}
|
||||
EXPORT_SYMBOL(kvm_mips_host_tlb_lookup);
|
||||
|
||||
int kvm_mips_host_tlb_inv(struct kvm_vcpu *vcpu, unsigned long va)
|
||||
{
|
||||
int idx;
|
||||
unsigned long flags, old_entryhi;
|
||||
|
||||
local_irq_save(flags);
|
||||
|
||||
old_entryhi = read_c0_entryhi();
|
||||
|
||||
write_c0_entryhi((va & VPN2_MASK) | kvm_mips_get_user_asid(vcpu));
|
||||
mtc0_tlbw_hazard();
|
||||
|
||||
tlb_probe();
|
||||
tlb_probe_hazard();
|
||||
idx = read_c0_index();
|
||||
|
||||
if (idx >= current_cpu_data.tlbsize)
|
||||
BUG();
|
||||
|
||||
if (idx > 0) {
|
||||
write_c0_entryhi(UNIQUE_ENTRYHI(idx));
|
||||
mtc0_tlbw_hazard();
|
||||
|
||||
write_c0_entrylo0(0);
|
||||
mtc0_tlbw_hazard();
|
||||
|
||||
write_c0_entrylo1(0);
|
||||
mtc0_tlbw_hazard();
|
||||
|
||||
tlb_write_indexed();
|
||||
mtc0_tlbw_hazard();
|
||||
}
|
||||
|
||||
write_c0_entryhi(old_entryhi);
|
||||
mtc0_tlbw_hazard();
|
||||
tlbw_use_hazard();
|
||||
|
||||
local_irq_restore(flags);
|
||||
|
||||
if (idx > 0)
|
||||
kvm_debug("%s: Invalidated entryhi %#lx @ idx %d\n", __func__,
|
||||
(va & VPN2_MASK) | kvm_mips_get_user_asid(vcpu), idx);
|
||||
|
||||
return 0;
|
||||
}
|
||||
EXPORT_SYMBOL(kvm_mips_host_tlb_inv);
|
||||
|
||||
/* XXXKYMA: Fix Guest USER/KERNEL no longer share the same ASID */
|
||||
int kvm_mips_host_tlb_inv_index(struct kvm_vcpu *vcpu, int index)
|
||||
{
|
||||
unsigned long flags, old_entryhi;
|
||||
|
||||
if (index >= current_cpu_data.tlbsize)
|
||||
BUG();
|
||||
|
||||
local_irq_save(flags);
|
||||
|
||||
old_entryhi = read_c0_entryhi();
|
||||
|
||||
write_c0_entryhi(UNIQUE_ENTRYHI(index));
|
||||
mtc0_tlbw_hazard();
|
||||
|
||||
write_c0_index(index);
|
||||
mtc0_tlbw_hazard();
|
||||
|
||||
write_c0_entrylo0(0);
|
||||
mtc0_tlbw_hazard();
|
||||
|
||||
write_c0_entrylo1(0);
|
||||
mtc0_tlbw_hazard();
|
||||
|
||||
tlb_write_indexed();
|
||||
mtc0_tlbw_hazard();
|
||||
tlbw_use_hazard();
|
||||
|
||||
write_c0_entryhi(old_entryhi);
|
||||
mtc0_tlbw_hazard();
|
||||
tlbw_use_hazard();
|
||||
|
||||
local_irq_restore(flags);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
void kvm_mips_flush_host_tlb(int skip_kseg0)
|
||||
{
|
||||
unsigned long flags;
|
||||
unsigned long old_entryhi, entryhi;
|
||||
unsigned long old_pagemask;
|
||||
int entry = 0;
|
||||
int maxentry = current_cpu_data.tlbsize;
|
||||
|
||||
local_irq_save(flags);
|
||||
|
||||
old_entryhi = read_c0_entryhi();
|
||||
old_pagemask = read_c0_pagemask();
|
||||
|
||||
/* Blast 'em all away. */
|
||||
for (entry = 0; entry < maxentry; entry++) {
|
||||
write_c0_index(entry);
|
||||
mtc0_tlbw_hazard();
|
||||
|
||||
if (skip_kseg0) {
|
||||
tlb_read();
|
||||
tlbw_use_hazard();
|
||||
|
||||
entryhi = read_c0_entryhi();
|
||||
|
||||
/* Don't blow away guest kernel entries */
|
||||
if (KVM_GUEST_KSEGX(entryhi) == KVM_GUEST_KSEG0)
|
||||
continue;
|
||||
}
|
||||
|
||||
/* Make sure all entries differ. */
|
||||
write_c0_entryhi(UNIQUE_ENTRYHI(entry));
|
||||
mtc0_tlbw_hazard();
|
||||
write_c0_entrylo0(0);
|
||||
mtc0_tlbw_hazard();
|
||||
write_c0_entrylo1(0);
|
||||
mtc0_tlbw_hazard();
|
||||
|
||||
tlb_write_indexed();
|
||||
mtc0_tlbw_hazard();
|
||||
}
|
||||
|
||||
tlbw_use_hazard();
|
||||
|
||||
write_c0_entryhi(old_entryhi);
|
||||
write_c0_pagemask(old_pagemask);
|
||||
mtc0_tlbw_hazard();
|
||||
tlbw_use_hazard();
|
||||
|
||||
local_irq_restore(flags);
|
||||
}
|
||||
EXPORT_SYMBOL(kvm_mips_flush_host_tlb);
|
||||
|
||||
void kvm_get_new_mmu_context(struct mm_struct *mm, unsigned long cpu,
|
||||
struct kvm_vcpu *vcpu)
|
||||
{
|
||||
unsigned long asid = asid_cache(cpu);
|
||||
|
||||
asid += ASID_INC;
|
||||
if (!(asid & ASID_MASK)) {
|
||||
if (cpu_has_vtag_icache)
|
||||
flush_icache_all();
|
||||
|
||||
kvm_local_flush_tlb_all(); /* start new asid cycle */
|
||||
|
||||
if (!asid) /* fix version if needed */
|
||||
asid = ASID_FIRST_VERSION;
|
||||
}
|
||||
|
||||
cpu_context(cpu, mm) = asid_cache(cpu) = asid;
|
||||
}
|
||||
|
||||
void kvm_local_flush_tlb_all(void)
|
||||
{
|
||||
unsigned long flags;
|
||||
unsigned long old_ctx;
|
||||
int entry = 0;
|
||||
|
||||
local_irq_save(flags);
|
||||
/* Save old context and create impossible VPN2 value */
|
||||
old_ctx = read_c0_entryhi();
|
||||
write_c0_entrylo0(0);
|
||||
write_c0_entrylo1(0);
|
||||
|
||||
/* Blast 'em all away. */
|
||||
while (entry < current_cpu_data.tlbsize) {
|
||||
/* Make sure all entries differ. */
|
||||
write_c0_entryhi(UNIQUE_ENTRYHI(entry));
|
||||
write_c0_index(entry);
|
||||
mtc0_tlbw_hazard();
|
||||
tlb_write_indexed();
|
||||
entry++;
|
||||
}
|
||||
tlbw_use_hazard();
|
||||
write_c0_entryhi(old_ctx);
|
||||
mtc0_tlbw_hazard();
|
||||
|
||||
local_irq_restore(flags);
|
||||
}
|
||||
EXPORT_SYMBOL(kvm_local_flush_tlb_all);
|
||||
|
||||
/**
|
||||
* kvm_mips_migrate_count() - Migrate timer.
|
||||
* @vcpu: Virtual CPU.
|
||||
*
|
||||
* Migrate CP0_Count hrtimer to the current CPU by cancelling and restarting it
|
||||
* if it was running prior to being cancelled.
|
||||
*
|
||||
* Must be called when the VCPU is migrated to a different CPU to ensure that
|
||||
* timer expiry during guest execution interrupts the guest and causes the
|
||||
* interrupt to be delivered in a timely manner.
|
||||
*/
|
||||
static void kvm_mips_migrate_count(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
if (hrtimer_cancel(&vcpu->arch.comparecount_timer))
|
||||
hrtimer_restart(&vcpu->arch.comparecount_timer);
|
||||
}
|
||||
|
||||
/* Restore ASID once we are scheduled back after preemption */
|
||||
void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
|
||||
{
|
||||
unsigned long flags;
|
||||
int newasid = 0;
|
||||
|
||||
kvm_debug("%s: vcpu %p, cpu: %d\n", __func__, vcpu, cpu);
|
||||
|
||||
/* Alocate new kernel and user ASIDs if needed */
|
||||
|
||||
local_irq_save(flags);
|
||||
|
||||
if (((vcpu->arch.
|
||||
guest_kernel_asid[cpu] ^ asid_cache(cpu)) & ASID_VERSION_MASK)) {
|
||||
kvm_get_new_mmu_context(&vcpu->arch.guest_kernel_mm, cpu, vcpu);
|
||||
vcpu->arch.guest_kernel_asid[cpu] =
|
||||
vcpu->arch.guest_kernel_mm.context.asid[cpu];
|
||||
kvm_get_new_mmu_context(&vcpu->arch.guest_user_mm, cpu, vcpu);
|
||||
vcpu->arch.guest_user_asid[cpu] =
|
||||
vcpu->arch.guest_user_mm.context.asid[cpu];
|
||||
newasid++;
|
||||
|
||||
kvm_debug("[%d]: cpu_context: %#lx\n", cpu,
|
||||
cpu_context(cpu, current->mm));
|
||||
kvm_debug("[%d]: Allocated new ASID for Guest Kernel: %#x\n",
|
||||
cpu, vcpu->arch.guest_kernel_asid[cpu]);
|
||||
kvm_debug("[%d]: Allocated new ASID for Guest User: %#x\n", cpu,
|
||||
vcpu->arch.guest_user_asid[cpu]);
|
||||
}
|
||||
|
||||
if (vcpu->arch.last_sched_cpu != cpu) {
|
||||
kvm_debug("[%d->%d]KVM VCPU[%d] switch\n",
|
||||
vcpu->arch.last_sched_cpu, cpu, vcpu->vcpu_id);
|
||||
/*
|
||||
* Migrate the timer interrupt to the current CPU so that it
|
||||
* always interrupts the guest and synchronously triggers a
|
||||
* guest timer interrupt.
|
||||
*/
|
||||
kvm_mips_migrate_count(vcpu);
|
||||
}
|
||||
|
||||
if (!newasid) {
|
||||
/*
|
||||
* If we preempted while the guest was executing, then reload
|
||||
* the pre-empted ASID
|
||||
*/
|
||||
if (current->flags & PF_VCPU) {
|
||||
write_c0_entryhi(vcpu->arch.
|
||||
preempt_entryhi & ASID_MASK);
|
||||
ehb();
|
||||
}
|
||||
} else {
|
||||
/* New ASIDs were allocated for the VM */
|
||||
|
||||
/*
|
||||
* Were we in guest context? If so then the pre-empted ASID is
|
||||
* no longer valid, we need to set it to what it should be based
|
||||
* on the mode of the Guest (Kernel/User)
|
||||
*/
|
||||
if (current->flags & PF_VCPU) {
|
||||
if (KVM_GUEST_KERNEL_MODE(vcpu))
|
||||
write_c0_entryhi(vcpu->arch.
|
||||
guest_kernel_asid[cpu] &
|
||||
ASID_MASK);
|
||||
else
|
||||
write_c0_entryhi(vcpu->arch.
|
||||
guest_user_asid[cpu] &
|
||||
ASID_MASK);
|
||||
ehb();
|
||||
}
|
||||
}
|
||||
|
||||
local_irq_restore(flags);
|
||||
|
||||
}
|
||||
EXPORT_SYMBOL(kvm_arch_vcpu_load);
|
||||
|
||||
/* ASID can change if another task is scheduled during preemption */
|
||||
void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
unsigned long flags;
|
||||
uint32_t cpu;
|
||||
|
||||
local_irq_save(flags);
|
||||
|
||||
cpu = smp_processor_id();
|
||||
|
||||
vcpu->arch.preempt_entryhi = read_c0_entryhi();
|
||||
vcpu->arch.last_sched_cpu = cpu;
|
||||
|
||||
if (((cpu_context(cpu, current->mm) ^ asid_cache(cpu)) &
|
||||
ASID_VERSION_MASK)) {
|
||||
kvm_debug("%s: Dropping MMU Context: %#lx\n", __func__,
|
||||
cpu_context(cpu, current->mm));
|
||||
drop_mmu_context(current->mm, cpu);
|
||||
}
|
||||
write_c0_entryhi(cpu_asid(cpu, current->mm));
|
||||
ehb();
|
||||
|
||||
local_irq_restore(flags);
|
||||
}
|
||||
EXPORT_SYMBOL(kvm_arch_vcpu_put);
|
||||
|
||||
uint32_t kvm_get_inst(uint32_t *opc, struct kvm_vcpu *vcpu)
|
||||
{
|
||||
struct mips_coproc *cop0 = vcpu->arch.cop0;
|
||||
unsigned long paddr, flags, vpn2, asid;
|
||||
uint32_t inst;
|
||||
int index;
|
||||
|
||||
if (KVM_GUEST_KSEGX((unsigned long) opc) < KVM_GUEST_KSEG0 ||
|
||||
KVM_GUEST_KSEGX((unsigned long) opc) == KVM_GUEST_KSEG23) {
|
||||
local_irq_save(flags);
|
||||
index = kvm_mips_host_tlb_lookup(vcpu, (unsigned long) opc);
|
||||
if (index >= 0) {
|
||||
inst = *(opc);
|
||||
} else {
|
||||
vpn2 = (unsigned long) opc & VPN2_MASK;
|
||||
asid = kvm_read_c0_guest_entryhi(cop0) & ASID_MASK;
|
||||
index = kvm_mips_guest_tlb_lookup(vcpu, vpn2 | asid);
|
||||
if (index < 0) {
|
||||
kvm_err("%s: get_user_failed for %p, vcpu: %p, ASID: %#lx\n",
|
||||
__func__, opc, vcpu, read_c0_entryhi());
|
||||
kvm_mips_dump_host_tlbs();
|
||||
local_irq_restore(flags);
|
||||
return KVM_INVALID_INST;
|
||||
}
|
||||
kvm_mips_handle_mapped_seg_tlb_fault(vcpu,
|
||||
&vcpu->arch.
|
||||
guest_tlb[index],
|
||||
NULL, NULL);
|
||||
inst = *(opc);
|
||||
}
|
||||
local_irq_restore(flags);
|
||||
} else if (KVM_GUEST_KSEGX(opc) == KVM_GUEST_KSEG0) {
|
||||
paddr =
|
||||
kvm_mips_translate_guest_kseg0_to_hpa(vcpu,
|
||||
(unsigned long) opc);
|
||||
inst = *(uint32_t *) CKSEG0ADDR(paddr);
|
||||
} else {
|
||||
kvm_err("%s: illegal address: %p\n", __func__, opc);
|
||||
return KVM_INVALID_INST;
|
||||
}
|
||||
|
||||
return inst;
|
||||
}
|
||||
EXPORT_SYMBOL(kvm_get_inst);
|
44
arch/mips/kvm/trace.h
Normal file
44
arch/mips/kvm/trace.h
Normal file
|
@ -0,0 +1,44 @@
|
|||
/*
|
||||
* This file is subject to the terms and conditions of the GNU General Public
|
||||
* License. See the file "COPYING" in the main directory of this archive
|
||||
* for more details.
|
||||
*
|
||||
* Copyright (C) 2012 MIPS Technologies, Inc. All rights reserved.
|
||||
* Authors: Sanjay Lal <sanjayl@kymasys.com>
|
||||
*/
|
||||
|
||||
#if !defined(_TRACE_KVM_H) || defined(TRACE_HEADER_MULTI_READ)
|
||||
#define _TRACE_KVM_H
|
||||
|
||||
#include <linux/tracepoint.h>
|
||||
|
||||
#undef TRACE_SYSTEM
|
||||
#define TRACE_SYSTEM kvm
|
||||
#define TRACE_INCLUDE_PATH .
|
||||
#define TRACE_INCLUDE_FILE trace
|
||||
|
||||
/* Tracepoints for VM eists */
|
||||
extern char *kvm_mips_exit_types_str[MAX_KVM_MIPS_EXIT_TYPES];
|
||||
|
||||
TRACE_EVENT(kvm_exit,
|
||||
TP_PROTO(struct kvm_vcpu *vcpu, unsigned int reason),
|
||||
TP_ARGS(vcpu, reason),
|
||||
TP_STRUCT__entry(
|
||||
__field(unsigned long, pc)
|
||||
__field(unsigned int, reason)
|
||||
),
|
||||
|
||||
TP_fast_assign(
|
||||
__entry->pc = vcpu->arch.pc;
|
||||
__entry->reason = reason;
|
||||
),
|
||||
|
||||
TP_printk("[%s]PC: 0x%08lx",
|
||||
kvm_mips_exit_types_str[__entry->reason],
|
||||
__entry->pc)
|
||||
);
|
||||
|
||||
#endif /* _TRACE_KVM_H */
|
||||
|
||||
/* This part must be outside protection */
|
||||
#include <trace/define_trace.h>
|
520
arch/mips/kvm/trap_emul.c
Normal file
520
arch/mips/kvm/trap_emul.c
Normal file
|
@ -0,0 +1,520 @@
|
|||
/*
|
||||
* This file is subject to the terms and conditions of the GNU General Public
|
||||
* License. See the file "COPYING" in the main directory of this archive
|
||||
* for more details.
|
||||
*
|
||||
* KVM/MIPS: Deliver/Emulate exceptions to the guest kernel
|
||||
*
|
||||
* Copyright (C) 2012 MIPS Technologies, Inc. All rights reserved.
|
||||
* Authors: Sanjay Lal <sanjayl@kymasys.com>
|
||||
*/
|
||||
|
||||
#include <linux/errno.h>
|
||||
#include <linux/err.h>
|
||||
#include <linux/module.h>
|
||||
#include <linux/vmalloc.h>
|
||||
|
||||
#include <linux/kvm_host.h>
|
||||
|
||||
#include "opcode.h"
|
||||
#include "interrupt.h"
|
||||
|
||||
static gpa_t kvm_trap_emul_gva_to_gpa_cb(gva_t gva)
|
||||
{
|
||||
gpa_t gpa;
|
||||
uint32_t kseg = KSEGX(gva);
|
||||
|
||||
if ((kseg == CKSEG0) || (kseg == CKSEG1))
|
||||
gpa = CPHYSADDR(gva);
|
||||
else {
|
||||
kvm_err("%s: cannot find GPA for GVA: %#lx\n", __func__, gva);
|
||||
kvm_mips_dump_host_tlbs();
|
||||
gpa = KVM_INVALID_ADDR;
|
||||
}
|
||||
|
||||
kvm_debug("%s: gva %#lx, gpa: %#llx\n", __func__, gva, gpa);
|
||||
|
||||
return gpa;
|
||||
}
|
||||
|
||||
static int kvm_trap_emul_handle_cop_unusable(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
struct kvm_run *run = vcpu->run;
|
||||
uint32_t __user *opc = (uint32_t __user *) vcpu->arch.pc;
|
||||
unsigned long cause = vcpu->arch.host_cp0_cause;
|
||||
enum emulation_result er = EMULATE_DONE;
|
||||
int ret = RESUME_GUEST;
|
||||
|
||||
if (((cause & CAUSEF_CE) >> CAUSEB_CE) == 1)
|
||||
er = kvm_mips_emulate_fpu_exc(cause, opc, run, vcpu);
|
||||
else
|
||||
er = kvm_mips_emulate_inst(cause, opc, run, vcpu);
|
||||
|
||||
switch (er) {
|
||||
case EMULATE_DONE:
|
||||
ret = RESUME_GUEST;
|
||||
break;
|
||||
|
||||
case EMULATE_FAIL:
|
||||
run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
|
||||
ret = RESUME_HOST;
|
||||
break;
|
||||
|
||||
case EMULATE_WAIT:
|
||||
run->exit_reason = KVM_EXIT_INTR;
|
||||
ret = RESUME_HOST;
|
||||
break;
|
||||
|
||||
default:
|
||||
BUG();
|
||||
}
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int kvm_trap_emul_handle_tlb_mod(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
struct kvm_run *run = vcpu->run;
|
||||
uint32_t __user *opc = (uint32_t __user *) vcpu->arch.pc;
|
||||
unsigned long badvaddr = vcpu->arch.host_cp0_badvaddr;
|
||||
unsigned long cause = vcpu->arch.host_cp0_cause;
|
||||
enum emulation_result er = EMULATE_DONE;
|
||||
int ret = RESUME_GUEST;
|
||||
|
||||
if (KVM_GUEST_KSEGX(badvaddr) < KVM_GUEST_KSEG0
|
||||
|| KVM_GUEST_KSEGX(badvaddr) == KVM_GUEST_KSEG23) {
|
||||
kvm_debug("USER/KSEG23 ADDR TLB MOD fault: cause %#lx, PC: %p, BadVaddr: %#lx\n",
|
||||
cause, opc, badvaddr);
|
||||
er = kvm_mips_handle_tlbmod(cause, opc, run, vcpu);
|
||||
|
||||
if (er == EMULATE_DONE)
|
||||
ret = RESUME_GUEST;
|
||||
else {
|
||||
run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
|
||||
ret = RESUME_HOST;
|
||||
}
|
||||
} else if (KVM_GUEST_KSEGX(badvaddr) == KVM_GUEST_KSEG0) {
|
||||
/*
|
||||
* XXXKYMA: The guest kernel does not expect to get this fault
|
||||
* when we are not using HIGHMEM. Need to address this in a
|
||||
* HIGHMEM kernel
|
||||
*/
|
||||
kvm_err("TLB MOD fault not handled, cause %#lx, PC: %p, BadVaddr: %#lx\n",
|
||||
cause, opc, badvaddr);
|
||||
kvm_mips_dump_host_tlbs();
|
||||
kvm_arch_vcpu_dump_regs(vcpu);
|
||||
run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
|
||||
ret = RESUME_HOST;
|
||||
} else {
|
||||
kvm_err("Illegal TLB Mod fault address , cause %#lx, PC: %p, BadVaddr: %#lx\n",
|
||||
cause, opc, badvaddr);
|
||||
kvm_mips_dump_host_tlbs();
|
||||
kvm_arch_vcpu_dump_regs(vcpu);
|
||||
run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
|
||||
ret = RESUME_HOST;
|
||||
}
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int kvm_trap_emul_handle_tlb_st_miss(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
struct kvm_run *run = vcpu->run;
|
||||
uint32_t __user *opc = (uint32_t __user *) vcpu->arch.pc;
|
||||
unsigned long badvaddr = vcpu->arch.host_cp0_badvaddr;
|
||||
unsigned long cause = vcpu->arch.host_cp0_cause;
|
||||
enum emulation_result er = EMULATE_DONE;
|
||||
int ret = RESUME_GUEST;
|
||||
|
||||
if (((badvaddr & PAGE_MASK) == KVM_GUEST_COMMPAGE_ADDR)
|
||||
&& KVM_GUEST_KERNEL_MODE(vcpu)) {
|
||||
if (kvm_mips_handle_commpage_tlb_fault(badvaddr, vcpu) < 0) {
|
||||
run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
|
||||
ret = RESUME_HOST;
|
||||
}
|
||||
} else if (KVM_GUEST_KSEGX(badvaddr) < KVM_GUEST_KSEG0
|
||||
|| KVM_GUEST_KSEGX(badvaddr) == KVM_GUEST_KSEG23) {
|
||||
kvm_debug("USER ADDR TLB LD fault: cause %#lx, PC: %p, BadVaddr: %#lx\n",
|
||||
cause, opc, badvaddr);
|
||||
er = kvm_mips_handle_tlbmiss(cause, opc, run, vcpu);
|
||||
if (er == EMULATE_DONE)
|
||||
ret = RESUME_GUEST;
|
||||
else {
|
||||
run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
|
||||
ret = RESUME_HOST;
|
||||
}
|
||||
} else if (KVM_GUEST_KSEGX(badvaddr) == KVM_GUEST_KSEG0) {
|
||||
/*
|
||||
* All KSEG0 faults are handled by KVM, as the guest kernel does
|
||||
* not expect to ever get them
|
||||
*/
|
||||
if (kvm_mips_handle_kseg0_tlb_fault
|
||||
(vcpu->arch.host_cp0_badvaddr, vcpu) < 0) {
|
||||
run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
|
||||
ret = RESUME_HOST;
|
||||
}
|
||||
} else {
|
||||
kvm_err("Illegal TLB LD fault address , cause %#lx, PC: %p, BadVaddr: %#lx\n",
|
||||
cause, opc, badvaddr);
|
||||
kvm_mips_dump_host_tlbs();
|
||||
kvm_arch_vcpu_dump_regs(vcpu);
|
||||
run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
|
||||
ret = RESUME_HOST;
|
||||
}
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int kvm_trap_emul_handle_tlb_ld_miss(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
struct kvm_run *run = vcpu->run;
|
||||
uint32_t __user *opc = (uint32_t __user *) vcpu->arch.pc;
|
||||
unsigned long badvaddr = vcpu->arch.host_cp0_badvaddr;
|
||||
unsigned long cause = vcpu->arch.host_cp0_cause;
|
||||
enum emulation_result er = EMULATE_DONE;
|
||||
int ret = RESUME_GUEST;
|
||||
|
||||
if (((badvaddr & PAGE_MASK) == KVM_GUEST_COMMPAGE_ADDR)
|
||||
&& KVM_GUEST_KERNEL_MODE(vcpu)) {
|
||||
if (kvm_mips_handle_commpage_tlb_fault(badvaddr, vcpu) < 0) {
|
||||
run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
|
||||
ret = RESUME_HOST;
|
||||
}
|
||||
} else if (KVM_GUEST_KSEGX(badvaddr) < KVM_GUEST_KSEG0
|
||||
|| KVM_GUEST_KSEGX(badvaddr) == KVM_GUEST_KSEG23) {
|
||||
kvm_debug("USER ADDR TLB ST fault: PC: %#lx, BadVaddr: %#lx\n",
|
||||
vcpu->arch.pc, badvaddr);
|
||||
|
||||
/*
|
||||
* User Address (UA) fault, this could happen if
|
||||
* (1) TLB entry not present/valid in both Guest and shadow host
|
||||
* TLBs, in this case we pass on the fault to the guest
|
||||
* kernel and let it handle it.
|
||||
* (2) TLB entry is present in the Guest TLB but not in the
|
||||
* shadow, in this case we inject the TLB from the Guest TLB
|
||||
* into the shadow host TLB
|
||||
*/
|
||||
|
||||
er = kvm_mips_handle_tlbmiss(cause, opc, run, vcpu);
|
||||
if (er == EMULATE_DONE)
|
||||
ret = RESUME_GUEST;
|
||||
else {
|
||||
run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
|
||||
ret = RESUME_HOST;
|
||||
}
|
||||
} else if (KVM_GUEST_KSEGX(badvaddr) == KVM_GUEST_KSEG0) {
|
||||
if (kvm_mips_handle_kseg0_tlb_fault
|
||||
(vcpu->arch.host_cp0_badvaddr, vcpu) < 0) {
|
||||
run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
|
||||
ret = RESUME_HOST;
|
||||
}
|
||||
} else {
|
||||
kvm_err("Illegal TLB ST fault address , cause %#lx, PC: %p, BadVaddr: %#lx\n",
|
||||
cause, opc, badvaddr);
|
||||
kvm_mips_dump_host_tlbs();
|
||||
kvm_arch_vcpu_dump_regs(vcpu);
|
||||
run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
|
||||
ret = RESUME_HOST;
|
||||
}
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int kvm_trap_emul_handle_addr_err_st(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
struct kvm_run *run = vcpu->run;
|
||||
uint32_t __user *opc = (uint32_t __user *) vcpu->arch.pc;
|
||||
unsigned long badvaddr = vcpu->arch.host_cp0_badvaddr;
|
||||
unsigned long cause = vcpu->arch.host_cp0_cause;
|
||||
enum emulation_result er = EMULATE_DONE;
|
||||
int ret = RESUME_GUEST;
|
||||
|
||||
if (KVM_GUEST_KERNEL_MODE(vcpu)
|
||||
&& (KSEGX(badvaddr) == CKSEG0 || KSEGX(badvaddr) == CKSEG1)) {
|
||||
kvm_debug("Emulate Store to MMIO space\n");
|
||||
er = kvm_mips_emulate_inst(cause, opc, run, vcpu);
|
||||
if (er == EMULATE_FAIL) {
|
||||
kvm_err("Emulate Store to MMIO space failed\n");
|
||||
run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
|
||||
ret = RESUME_HOST;
|
||||
} else {
|
||||
run->exit_reason = KVM_EXIT_MMIO;
|
||||
ret = RESUME_HOST;
|
||||
}
|
||||
} else {
|
||||
kvm_err("Address Error (STORE): cause %#lx, PC: %p, BadVaddr: %#lx\n",
|
||||
cause, opc, badvaddr);
|
||||
run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
|
||||
ret = RESUME_HOST;
|
||||
}
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int kvm_trap_emul_handle_addr_err_ld(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
struct kvm_run *run = vcpu->run;
|
||||
uint32_t __user *opc = (uint32_t __user *) vcpu->arch.pc;
|
||||
unsigned long badvaddr = vcpu->arch.host_cp0_badvaddr;
|
||||
unsigned long cause = vcpu->arch.host_cp0_cause;
|
||||
enum emulation_result er = EMULATE_DONE;
|
||||
int ret = RESUME_GUEST;
|
||||
|
||||
if (KSEGX(badvaddr) == CKSEG0 || KSEGX(badvaddr) == CKSEG1) {
|
||||
kvm_debug("Emulate Load from MMIO space @ %#lx\n", badvaddr);
|
||||
er = kvm_mips_emulate_inst(cause, opc, run, vcpu);
|
||||
if (er == EMULATE_FAIL) {
|
||||
kvm_err("Emulate Load from MMIO space failed\n");
|
||||
run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
|
||||
ret = RESUME_HOST;
|
||||
} else {
|
||||
run->exit_reason = KVM_EXIT_MMIO;
|
||||
ret = RESUME_HOST;
|
||||
}
|
||||
} else {
|
||||
kvm_err("Address Error (LOAD): cause %#lx, PC: %p, BadVaddr: %#lx\n",
|
||||
cause, opc, badvaddr);
|
||||
run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
|
||||
ret = RESUME_HOST;
|
||||
er = EMULATE_FAIL;
|
||||
}
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int kvm_trap_emul_handle_syscall(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
struct kvm_run *run = vcpu->run;
|
||||
uint32_t __user *opc = (uint32_t __user *) vcpu->arch.pc;
|
||||
unsigned long cause = vcpu->arch.host_cp0_cause;
|
||||
enum emulation_result er = EMULATE_DONE;
|
||||
int ret = RESUME_GUEST;
|
||||
|
||||
er = kvm_mips_emulate_syscall(cause, opc, run, vcpu);
|
||||
if (er == EMULATE_DONE)
|
||||
ret = RESUME_GUEST;
|
||||
else {
|
||||
run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
|
||||
ret = RESUME_HOST;
|
||||
}
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int kvm_trap_emul_handle_res_inst(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
struct kvm_run *run = vcpu->run;
|
||||
uint32_t __user *opc = (uint32_t __user *) vcpu->arch.pc;
|
||||
unsigned long cause = vcpu->arch.host_cp0_cause;
|
||||
enum emulation_result er = EMULATE_DONE;
|
||||
int ret = RESUME_GUEST;
|
||||
|
||||
er = kvm_mips_handle_ri(cause, opc, run, vcpu);
|
||||
if (er == EMULATE_DONE)
|
||||
ret = RESUME_GUEST;
|
||||
else {
|
||||
run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
|
||||
ret = RESUME_HOST;
|
||||
}
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int kvm_trap_emul_handle_break(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
struct kvm_run *run = vcpu->run;
|
||||
uint32_t __user *opc = (uint32_t __user *) vcpu->arch.pc;
|
||||
unsigned long cause = vcpu->arch.host_cp0_cause;
|
||||
enum emulation_result er = EMULATE_DONE;
|
||||
int ret = RESUME_GUEST;
|
||||
|
||||
er = kvm_mips_emulate_bp_exc(cause, opc, run, vcpu);
|
||||
if (er == EMULATE_DONE)
|
||||
ret = RESUME_GUEST;
|
||||
else {
|
||||
run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
|
||||
ret = RESUME_HOST;
|
||||
}
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int kvm_trap_emul_handle_msa_disabled(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
struct kvm_run *run = vcpu->run;
|
||||
uint32_t __user *opc = (uint32_t __user *) vcpu->arch.pc;
|
||||
unsigned long cause = vcpu->arch.host_cp0_cause;
|
||||
enum emulation_result er = EMULATE_DONE;
|
||||
int ret = RESUME_GUEST;
|
||||
|
||||
/* No MSA supported in guest, guest reserved instruction exception */
|
||||
er = kvm_mips_emulate_ri_exc(cause, opc, run, vcpu);
|
||||
|
||||
switch (er) {
|
||||
case EMULATE_DONE:
|
||||
ret = RESUME_GUEST;
|
||||
break;
|
||||
|
||||
case EMULATE_FAIL:
|
||||
run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
|
||||
ret = RESUME_HOST;
|
||||
break;
|
||||
|
||||
default:
|
||||
BUG();
|
||||
}
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int kvm_trap_emul_vm_init(struct kvm *kvm)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int kvm_trap_emul_vcpu_init(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int kvm_trap_emul_vcpu_setup(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
struct mips_coproc *cop0 = vcpu->arch.cop0;
|
||||
uint32_t config1;
|
||||
int vcpu_id = vcpu->vcpu_id;
|
||||
|
||||
/*
|
||||
* Arch specific stuff, set up config registers properly so that the
|
||||
* guest will come up as expected, for now we simulate a MIPS 24kc
|
||||
*/
|
||||
kvm_write_c0_guest_prid(cop0, 0x00019300);
|
||||
kvm_write_c0_guest_config(cop0,
|
||||
MIPS_CONFIG0 | (0x1 << CP0C0_AR) |
|
||||
(MMU_TYPE_R4000 << CP0C0_MT));
|
||||
|
||||
/* Read the cache characteristics from the host Config1 Register */
|
||||
config1 = (read_c0_config1() & ~0x7f);
|
||||
|
||||
/* Set up MMU size */
|
||||
config1 &= ~(0x3f << 25);
|
||||
config1 |= ((KVM_MIPS_GUEST_TLB_SIZE - 1) << 25);
|
||||
|
||||
/* We unset some bits that we aren't emulating */
|
||||
config1 &=
|
||||
~((1 << CP0C1_C2) | (1 << CP0C1_MD) | (1 << CP0C1_PC) |
|
||||
(1 << CP0C1_WR) | (1 << CP0C1_CA));
|
||||
kvm_write_c0_guest_config1(cop0, config1);
|
||||
|
||||
kvm_write_c0_guest_config2(cop0, MIPS_CONFIG2);
|
||||
/* MIPS_CONFIG2 | (read_c0_config2() & 0xfff) */
|
||||
kvm_write_c0_guest_config3(cop0, MIPS_CONFIG3 | (0 << CP0C3_VInt) |
|
||||
(1 << CP0C3_ULRI));
|
||||
|
||||
/* Set Wait IE/IXMT Ignore in Config7, IAR, AR */
|
||||
kvm_write_c0_guest_config7(cop0, (MIPS_CONF7_WII) | (1 << 10));
|
||||
|
||||
/*
|
||||
* Setup IntCtl defaults, compatibilty mode for timer interrupts (HW5)
|
||||
*/
|
||||
kvm_write_c0_guest_intctl(cop0, 0xFC000000);
|
||||
|
||||
/* Put in vcpu id as CPUNum into Ebase Reg to handle SMP Guests */
|
||||
kvm_write_c0_guest_ebase(cop0, KVM_GUEST_KSEG0 | (vcpu_id & 0xFF));
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int kvm_trap_emul_get_one_reg(struct kvm_vcpu *vcpu,
|
||||
const struct kvm_one_reg *reg,
|
||||
s64 *v)
|
||||
{
|
||||
switch (reg->id) {
|
||||
case KVM_REG_MIPS_CP0_COUNT:
|
||||
*v = kvm_mips_read_count(vcpu);
|
||||
break;
|
||||
case KVM_REG_MIPS_COUNT_CTL:
|
||||
*v = vcpu->arch.count_ctl;
|
||||
break;
|
||||
case KVM_REG_MIPS_COUNT_RESUME:
|
||||
*v = ktime_to_ns(vcpu->arch.count_resume);
|
||||
break;
|
||||
case KVM_REG_MIPS_COUNT_HZ:
|
||||
*v = vcpu->arch.count_hz;
|
||||
break;
|
||||
default:
|
||||
return -EINVAL;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int kvm_trap_emul_set_one_reg(struct kvm_vcpu *vcpu,
|
||||
const struct kvm_one_reg *reg,
|
||||
s64 v)
|
||||
{
|
||||
struct mips_coproc *cop0 = vcpu->arch.cop0;
|
||||
int ret = 0;
|
||||
|
||||
switch (reg->id) {
|
||||
case KVM_REG_MIPS_CP0_COUNT:
|
||||
kvm_mips_write_count(vcpu, v);
|
||||
break;
|
||||
case KVM_REG_MIPS_CP0_COMPARE:
|
||||
kvm_mips_write_compare(vcpu, v);
|
||||
break;
|
||||
case KVM_REG_MIPS_CP0_CAUSE:
|
||||
/*
|
||||
* If the timer is stopped or started (DC bit) it must look
|
||||
* atomic with changes to the interrupt pending bits (TI, IRQ5).
|
||||
* A timer interrupt should not happen in between.
|
||||
*/
|
||||
if ((kvm_read_c0_guest_cause(cop0) ^ v) & CAUSEF_DC) {
|
||||
if (v & CAUSEF_DC) {
|
||||
/* disable timer first */
|
||||
kvm_mips_count_disable_cause(vcpu);
|
||||
kvm_change_c0_guest_cause(cop0, ~CAUSEF_DC, v);
|
||||
} else {
|
||||
/* enable timer last */
|
||||
kvm_change_c0_guest_cause(cop0, ~CAUSEF_DC, v);
|
||||
kvm_mips_count_enable_cause(vcpu);
|
||||
}
|
||||
} else {
|
||||
kvm_write_c0_guest_cause(cop0, v);
|
||||
}
|
||||
break;
|
||||
case KVM_REG_MIPS_COUNT_CTL:
|
||||
ret = kvm_mips_set_count_ctl(vcpu, v);
|
||||
break;
|
||||
case KVM_REG_MIPS_COUNT_RESUME:
|
||||
ret = kvm_mips_set_count_resume(vcpu, v);
|
||||
break;
|
||||
case KVM_REG_MIPS_COUNT_HZ:
|
||||
ret = kvm_mips_set_count_hz(vcpu, v);
|
||||
break;
|
||||
default:
|
||||
return -EINVAL;
|
||||
}
|
||||
return ret;
|
||||
}
|
||||
|
||||
static struct kvm_mips_callbacks kvm_trap_emul_callbacks = {
|
||||
/* exit handlers */
|
||||
.handle_cop_unusable = kvm_trap_emul_handle_cop_unusable,
|
||||
.handle_tlb_mod = kvm_trap_emul_handle_tlb_mod,
|
||||
.handle_tlb_st_miss = kvm_trap_emul_handle_tlb_st_miss,
|
||||
.handle_tlb_ld_miss = kvm_trap_emul_handle_tlb_ld_miss,
|
||||
.handle_addr_err_st = kvm_trap_emul_handle_addr_err_st,
|
||||
.handle_addr_err_ld = kvm_trap_emul_handle_addr_err_ld,
|
||||
.handle_syscall = kvm_trap_emul_handle_syscall,
|
||||
.handle_res_inst = kvm_trap_emul_handle_res_inst,
|
||||
.handle_break = kvm_trap_emul_handle_break,
|
||||
.handle_msa_disabled = kvm_trap_emul_handle_msa_disabled,
|
||||
|
||||
.vm_init = kvm_trap_emul_vm_init,
|
||||
.vcpu_init = kvm_trap_emul_vcpu_init,
|
||||
.vcpu_setup = kvm_trap_emul_vcpu_setup,
|
||||
.gva_to_gpa = kvm_trap_emul_gva_to_gpa_cb,
|
||||
.queue_timer_int = kvm_mips_queue_timer_int_cb,
|
||||
.dequeue_timer_int = kvm_mips_dequeue_timer_int_cb,
|
||||
.queue_io_int = kvm_mips_queue_io_int_cb,
|
||||
.dequeue_io_int = kvm_mips_dequeue_io_int_cb,
|
||||
.irq_deliver = kvm_mips_irq_deliver_cb,
|
||||
.irq_clear = kvm_mips_irq_clear_cb,
|
||||
.get_one_reg = kvm_trap_emul_get_one_reg,
|
||||
.set_one_reg = kvm_trap_emul_set_one_reg,
|
||||
};
|
||||
|
||||
int kvm_mips_emulation_init(struct kvm_mips_callbacks **install_callbacks)
|
||||
{
|
||||
*install_callbacks = &kvm_trap_emul_callbacks;
|
||||
return 0;
|
||||
}
|
Loading…
Add table
Add a link
Reference in a new issue