mirror of
https://github.com/AetherDroid/android_kernel_samsung_on5xelte.git
synced 2025-09-08 17:18:05 -04:00
Fixed MTP to work with TWRP
This commit is contained in:
commit
f6dfaef42e
50820 changed files with 20846062 additions and 0 deletions
57
arch/s390/kvm/Kconfig
Normal file
57
arch/s390/kvm/Kconfig
Normal file
|
@ -0,0 +1,57 @@
|
|||
#
|
||||
# KVM configuration
|
||||
#
|
||||
source "virt/kvm/Kconfig"
|
||||
|
||||
menuconfig VIRTUALIZATION
|
||||
def_bool y
|
||||
prompt "KVM"
|
||||
---help---
|
||||
Say Y here to get to see options for using your Linux host to run other
|
||||
operating systems inside virtual machines (guests).
|
||||
This option alone does not add any kernel code.
|
||||
|
||||
If you say N, all options in this submenu will be skipped and disabled.
|
||||
|
||||
if VIRTUALIZATION
|
||||
|
||||
config KVM
|
||||
def_tristate y
|
||||
prompt "Kernel-based Virtual Machine (KVM) support"
|
||||
depends on HAVE_KVM
|
||||
select PREEMPT_NOTIFIERS
|
||||
select ANON_INODES
|
||||
select HAVE_KVM_CPU_RELAX_INTERCEPT
|
||||
select HAVE_KVM_EVENTFD
|
||||
select KVM_ASYNC_PF
|
||||
select KVM_ASYNC_PF_SYNC
|
||||
select HAVE_KVM_IRQCHIP
|
||||
select HAVE_KVM_IRQFD
|
||||
select HAVE_KVM_IRQ_ROUTING
|
||||
---help---
|
||||
Support hosting paravirtualized guest machines using the SIE
|
||||
virtualization capability on the mainframe. This should work
|
||||
on any 64bit machine.
|
||||
|
||||
This module provides access to the hardware capabilities through
|
||||
a character device node named /dev/kvm.
|
||||
|
||||
To compile this as a module, choose M here: the module
|
||||
will be called kvm.
|
||||
|
||||
If unsure, say N.
|
||||
|
||||
config KVM_S390_UCONTROL
|
||||
bool "Userspace controlled virtual machines"
|
||||
depends on KVM
|
||||
---help---
|
||||
Allow CAP_SYS_ADMIN users to create KVM virtual machines that are
|
||||
controlled by userspace.
|
||||
|
||||
If unsure, say N.
|
||||
|
||||
# OK, it's a little counter-intuitive to do this, but it puts it neatly under
|
||||
# the virtualization menu.
|
||||
source drivers/vhost/Kconfig
|
||||
|
||||
endif # VIRTUALIZATION
|
17
arch/s390/kvm/Makefile
Normal file
17
arch/s390/kvm/Makefile
Normal file
|
@ -0,0 +1,17 @@
|
|||
# Makefile for kernel virtual machines on s390
|
||||
#
|
||||
# Copyright IBM Corp. 2008
|
||||
#
|
||||
# This program is free software; you can redistribute it and/or modify
|
||||
# it under the terms of the GNU General Public License (version 2 only)
|
||||
# as published by the Free Software Foundation.
|
||||
|
||||
KVM := ../../../virt/kvm
|
||||
common-objs = $(KVM)/kvm_main.o $(KVM)/eventfd.o $(KVM)/async_pf.o $(KVM)/irqchip.o
|
||||
|
||||
ccflags-y := -Ivirt/kvm -Iarch/s390/kvm
|
||||
|
||||
kvm-objs := $(common-objs) kvm-s390.o intercept.o interrupt.o priv.o sigp.o
|
||||
kvm-objs += diag.o gaccess.o guestdbg.o
|
||||
|
||||
obj-$(CONFIG_KVM) += kvm.o
|
255
arch/s390/kvm/diag.c
Normal file
255
arch/s390/kvm/diag.c
Normal file
|
@ -0,0 +1,255 @@
|
|||
/*
|
||||
* handling diagnose instructions
|
||||
*
|
||||
* Copyright IBM Corp. 2008, 2011
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify
|
||||
* it under the terms of the GNU General Public License (version 2 only)
|
||||
* as published by the Free Software Foundation.
|
||||
*
|
||||
* Author(s): Carsten Otte <cotte@de.ibm.com>
|
||||
* Christian Borntraeger <borntraeger@de.ibm.com>
|
||||
*/
|
||||
|
||||
#include <linux/kvm.h>
|
||||
#include <linux/kvm_host.h>
|
||||
#include <asm/pgalloc.h>
|
||||
#include <asm/virtio-ccw.h>
|
||||
#include "kvm-s390.h"
|
||||
#include "trace.h"
|
||||
#include "trace-s390.h"
|
||||
#include "gaccess.h"
|
||||
|
||||
static int diag_release_pages(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
unsigned long start, end;
|
||||
unsigned long prefix = kvm_s390_get_prefix(vcpu);
|
||||
|
||||
start = vcpu->run->s.regs.gprs[(vcpu->arch.sie_block->ipa & 0xf0) >> 4];
|
||||
end = vcpu->run->s.regs.gprs[vcpu->arch.sie_block->ipa & 0xf] + 4096;
|
||||
|
||||
if (start & ~PAGE_MASK || end & ~PAGE_MASK || start >= end
|
||||
|| start < 2 * PAGE_SIZE)
|
||||
return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
|
||||
|
||||
VCPU_EVENT(vcpu, 5, "diag release pages %lX %lX", start, end);
|
||||
vcpu->stat.diagnose_10++;
|
||||
|
||||
/*
|
||||
* We checked for start >= end above, so lets check for the
|
||||
* fast path (no prefix swap page involved)
|
||||
*/
|
||||
if (end <= prefix || start >= prefix + 2 * PAGE_SIZE) {
|
||||
gmap_discard(vcpu->arch.gmap, start, end);
|
||||
} else {
|
||||
/*
|
||||
* This is slow path. gmap_discard will check for start
|
||||
* so lets split this into before prefix, prefix, after
|
||||
* prefix and let gmap_discard make some of these calls
|
||||
* NOPs.
|
||||
*/
|
||||
gmap_discard(vcpu->arch.gmap, start, prefix);
|
||||
if (start <= prefix)
|
||||
gmap_discard(vcpu->arch.gmap, 0, 4096);
|
||||
if (end > prefix + 4096)
|
||||
gmap_discard(vcpu->arch.gmap, 4096, 8192);
|
||||
gmap_discard(vcpu->arch.gmap, prefix + 2 * PAGE_SIZE, end);
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int __diag_page_ref_service(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
struct prs_parm {
|
||||
u16 code;
|
||||
u16 subcode;
|
||||
u16 parm_len;
|
||||
u16 parm_version;
|
||||
u64 token_addr;
|
||||
u64 select_mask;
|
||||
u64 compare_mask;
|
||||
u64 zarch;
|
||||
};
|
||||
struct prs_parm parm;
|
||||
int rc;
|
||||
u16 rx = (vcpu->arch.sie_block->ipa & 0xf0) >> 4;
|
||||
u16 ry = (vcpu->arch.sie_block->ipa & 0x0f);
|
||||
|
||||
if (vcpu->run->s.regs.gprs[rx] & 7)
|
||||
return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
|
||||
rc = read_guest(vcpu, vcpu->run->s.regs.gprs[rx], &parm, sizeof(parm));
|
||||
if (rc)
|
||||
return kvm_s390_inject_prog_cond(vcpu, rc);
|
||||
if (parm.parm_version != 2 || parm.parm_len < 5 || parm.code != 0x258)
|
||||
return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
|
||||
|
||||
switch (parm.subcode) {
|
||||
case 0: /* TOKEN */
|
||||
if (vcpu->arch.pfault_token != KVM_S390_PFAULT_TOKEN_INVALID) {
|
||||
/*
|
||||
* If the pagefault handshake is already activated,
|
||||
* the token must not be changed. We have to return
|
||||
* decimal 8 instead, as mandated in SC24-6084.
|
||||
*/
|
||||
vcpu->run->s.regs.gprs[ry] = 8;
|
||||
return 0;
|
||||
}
|
||||
|
||||
if ((parm.compare_mask & parm.select_mask) != parm.compare_mask ||
|
||||
parm.token_addr & 7 || parm.zarch != 0x8000000000000000ULL)
|
||||
return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
|
||||
|
||||
if (kvm_is_error_gpa(vcpu->kvm, parm.token_addr))
|
||||
return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
|
||||
|
||||
vcpu->arch.pfault_token = parm.token_addr;
|
||||
vcpu->arch.pfault_select = parm.select_mask;
|
||||
vcpu->arch.pfault_compare = parm.compare_mask;
|
||||
vcpu->run->s.regs.gprs[ry] = 0;
|
||||
rc = 0;
|
||||
break;
|
||||
case 1: /*
|
||||
* CANCEL
|
||||
* Specification allows to let already pending tokens survive
|
||||
* the cancel, therefore to reduce code complexity, we assume
|
||||
* all outstanding tokens are already pending.
|
||||
*/
|
||||
if (parm.token_addr || parm.select_mask ||
|
||||
parm.compare_mask || parm.zarch)
|
||||
return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
|
||||
|
||||
vcpu->run->s.regs.gprs[ry] = 0;
|
||||
/*
|
||||
* If the pfault handling was not established or is already
|
||||
* canceled SC24-6084 requests to return decimal 4.
|
||||
*/
|
||||
if (vcpu->arch.pfault_token == KVM_S390_PFAULT_TOKEN_INVALID)
|
||||
vcpu->run->s.regs.gprs[ry] = 4;
|
||||
else
|
||||
vcpu->arch.pfault_token = KVM_S390_PFAULT_TOKEN_INVALID;
|
||||
|
||||
rc = 0;
|
||||
break;
|
||||
default:
|
||||
rc = -EOPNOTSUPP;
|
||||
break;
|
||||
}
|
||||
|
||||
return rc;
|
||||
}
|
||||
|
||||
static int __diag_time_slice_end(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
VCPU_EVENT(vcpu, 5, "%s", "diag time slice end");
|
||||
vcpu->stat.diagnose_44++;
|
||||
kvm_vcpu_on_spin(vcpu);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int __diag_time_slice_end_directed(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
struct kvm *kvm = vcpu->kvm;
|
||||
struct kvm_vcpu *tcpu;
|
||||
int tid;
|
||||
int i;
|
||||
|
||||
tid = vcpu->run->s.regs.gprs[(vcpu->arch.sie_block->ipa & 0xf0) >> 4];
|
||||
vcpu->stat.diagnose_9c++;
|
||||
VCPU_EVENT(vcpu, 5, "diag time slice end directed to %d", tid);
|
||||
|
||||
if (tid == vcpu->vcpu_id)
|
||||
return 0;
|
||||
|
||||
kvm_for_each_vcpu(i, tcpu, kvm)
|
||||
if (tcpu->vcpu_id == tid) {
|
||||
kvm_vcpu_yield_to(tcpu);
|
||||
break;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int __diag_ipl_functions(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
unsigned int reg = vcpu->arch.sie_block->ipa & 0xf;
|
||||
unsigned long subcode = vcpu->run->s.regs.gprs[reg] & 0xffff;
|
||||
|
||||
VCPU_EVENT(vcpu, 5, "diag ipl functions, subcode %lx", subcode);
|
||||
switch (subcode) {
|
||||
case 3:
|
||||
vcpu->run->s390_reset_flags = KVM_S390_RESET_CLEAR;
|
||||
break;
|
||||
case 4:
|
||||
vcpu->run->s390_reset_flags = 0;
|
||||
break;
|
||||
default:
|
||||
return -EOPNOTSUPP;
|
||||
}
|
||||
|
||||
if (!kvm_s390_user_cpu_state_ctrl(vcpu->kvm))
|
||||
kvm_s390_vcpu_stop(vcpu);
|
||||
vcpu->run->s390_reset_flags |= KVM_S390_RESET_SUBSYSTEM;
|
||||
vcpu->run->s390_reset_flags |= KVM_S390_RESET_IPL;
|
||||
vcpu->run->s390_reset_flags |= KVM_S390_RESET_CPU_INIT;
|
||||
vcpu->run->exit_reason = KVM_EXIT_S390_RESET;
|
||||
VCPU_EVENT(vcpu, 3, "requesting userspace resets %llx",
|
||||
vcpu->run->s390_reset_flags);
|
||||
trace_kvm_s390_request_resets(vcpu->run->s390_reset_flags);
|
||||
return -EREMOTE;
|
||||
}
|
||||
|
||||
static int __diag_virtio_hypercall(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
int ret;
|
||||
|
||||
/* No virtio-ccw notification? Get out quickly. */
|
||||
if (!vcpu->kvm->arch.css_support ||
|
||||
(vcpu->run->s.regs.gprs[1] != KVM_S390_VIRTIO_CCW_NOTIFY))
|
||||
return -EOPNOTSUPP;
|
||||
|
||||
/*
|
||||
* The layout is as follows:
|
||||
* - gpr 2 contains the subchannel id (passed as addr)
|
||||
* - gpr 3 contains the virtqueue index (passed as datamatch)
|
||||
* - gpr 4 contains the index on the bus (optionally)
|
||||
*/
|
||||
ret = kvm_io_bus_write_cookie(vcpu->kvm, KVM_VIRTIO_CCW_NOTIFY_BUS,
|
||||
vcpu->run->s.regs.gprs[2] & 0xffffffff,
|
||||
8, &vcpu->run->s.regs.gprs[3],
|
||||
vcpu->run->s.regs.gprs[4]);
|
||||
|
||||
/*
|
||||
* Return cookie in gpr 2, but don't overwrite the register if the
|
||||
* diagnose will be handled by userspace.
|
||||
*/
|
||||
if (ret != -EOPNOTSUPP)
|
||||
vcpu->run->s.regs.gprs[2] = ret;
|
||||
/* kvm_io_bus_write_cookie returns -EOPNOTSUPP if it found no match. */
|
||||
return ret < 0 ? ret : 0;
|
||||
}
|
||||
|
||||
int kvm_s390_handle_diag(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
int code = kvm_s390_get_base_disp_rs(vcpu) & 0xffff;
|
||||
|
||||
if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
|
||||
return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
|
||||
|
||||
trace_kvm_s390_handle_diag(vcpu, code);
|
||||
switch (code) {
|
||||
case 0x10:
|
||||
return diag_release_pages(vcpu);
|
||||
case 0x44:
|
||||
return __diag_time_slice_end(vcpu);
|
||||
case 0x9c:
|
||||
return __diag_time_slice_end_directed(vcpu);
|
||||
case 0x258:
|
||||
return __diag_page_ref_service(vcpu);
|
||||
case 0x308:
|
||||
return __diag_ipl_functions(vcpu);
|
||||
case 0x500:
|
||||
return __diag_virtio_hypercall(vcpu);
|
||||
default:
|
||||
return -EOPNOTSUPP;
|
||||
}
|
||||
}
|
733
arch/s390/kvm/gaccess.c
Normal file
733
arch/s390/kvm/gaccess.c
Normal file
|
@ -0,0 +1,733 @@
|
|||
/*
|
||||
* guest access functions
|
||||
*
|
||||
* Copyright IBM Corp. 2014
|
||||
*
|
||||
*/
|
||||
|
||||
#include <linux/vmalloc.h>
|
||||
#include <linux/err.h>
|
||||
#include <asm/pgtable.h>
|
||||
#include "kvm-s390.h"
|
||||
#include "gaccess.h"
|
||||
|
||||
union asce {
|
||||
unsigned long val;
|
||||
struct {
|
||||
unsigned long origin : 52; /* Region- or Segment-Table Origin */
|
||||
unsigned long : 2;
|
||||
unsigned long g : 1; /* Subspace Group Control */
|
||||
unsigned long p : 1; /* Private Space Control */
|
||||
unsigned long s : 1; /* Storage-Alteration-Event Control */
|
||||
unsigned long x : 1; /* Space-Switch-Event Control */
|
||||
unsigned long r : 1; /* Real-Space Control */
|
||||
unsigned long : 1;
|
||||
unsigned long dt : 2; /* Designation-Type Control */
|
||||
unsigned long tl : 2; /* Region- or Segment-Table Length */
|
||||
};
|
||||
};
|
||||
|
||||
enum {
|
||||
ASCE_TYPE_SEGMENT = 0,
|
||||
ASCE_TYPE_REGION3 = 1,
|
||||
ASCE_TYPE_REGION2 = 2,
|
||||
ASCE_TYPE_REGION1 = 3
|
||||
};
|
||||
|
||||
union region1_table_entry {
|
||||
unsigned long val;
|
||||
struct {
|
||||
unsigned long rto: 52;/* Region-Table Origin */
|
||||
unsigned long : 2;
|
||||
unsigned long p : 1; /* DAT-Protection Bit */
|
||||
unsigned long : 1;
|
||||
unsigned long tf : 2; /* Region-Second-Table Offset */
|
||||
unsigned long i : 1; /* Region-Invalid Bit */
|
||||
unsigned long : 1;
|
||||
unsigned long tt : 2; /* Table-Type Bits */
|
||||
unsigned long tl : 2; /* Region-Second-Table Length */
|
||||
};
|
||||
};
|
||||
|
||||
union region2_table_entry {
|
||||
unsigned long val;
|
||||
struct {
|
||||
unsigned long rto: 52;/* Region-Table Origin */
|
||||
unsigned long : 2;
|
||||
unsigned long p : 1; /* DAT-Protection Bit */
|
||||
unsigned long : 1;
|
||||
unsigned long tf : 2; /* Region-Third-Table Offset */
|
||||
unsigned long i : 1; /* Region-Invalid Bit */
|
||||
unsigned long : 1;
|
||||
unsigned long tt : 2; /* Table-Type Bits */
|
||||
unsigned long tl : 2; /* Region-Third-Table Length */
|
||||
};
|
||||
};
|
||||
|
||||
struct region3_table_entry_fc0 {
|
||||
unsigned long sto: 52;/* Segment-Table Origin */
|
||||
unsigned long : 1;
|
||||
unsigned long fc : 1; /* Format-Control */
|
||||
unsigned long p : 1; /* DAT-Protection Bit */
|
||||
unsigned long : 1;
|
||||
unsigned long tf : 2; /* Segment-Table Offset */
|
||||
unsigned long i : 1; /* Region-Invalid Bit */
|
||||
unsigned long cr : 1; /* Common-Region Bit */
|
||||
unsigned long tt : 2; /* Table-Type Bits */
|
||||
unsigned long tl : 2; /* Segment-Table Length */
|
||||
};
|
||||
|
||||
struct region3_table_entry_fc1 {
|
||||
unsigned long rfaa : 33; /* Region-Frame Absolute Address */
|
||||
unsigned long : 14;
|
||||
unsigned long av : 1; /* ACCF-Validity Control */
|
||||
unsigned long acc: 4; /* Access-Control Bits */
|
||||
unsigned long f : 1; /* Fetch-Protection Bit */
|
||||
unsigned long fc : 1; /* Format-Control */
|
||||
unsigned long p : 1; /* DAT-Protection Bit */
|
||||
unsigned long co : 1; /* Change-Recording Override */
|
||||
unsigned long : 2;
|
||||
unsigned long i : 1; /* Region-Invalid Bit */
|
||||
unsigned long cr : 1; /* Common-Region Bit */
|
||||
unsigned long tt : 2; /* Table-Type Bits */
|
||||
unsigned long : 2;
|
||||
};
|
||||
|
||||
union region3_table_entry {
|
||||
unsigned long val;
|
||||
struct region3_table_entry_fc0 fc0;
|
||||
struct region3_table_entry_fc1 fc1;
|
||||
struct {
|
||||
unsigned long : 53;
|
||||
unsigned long fc : 1; /* Format-Control */
|
||||
unsigned long : 4;
|
||||
unsigned long i : 1; /* Region-Invalid Bit */
|
||||
unsigned long cr : 1; /* Common-Region Bit */
|
||||
unsigned long tt : 2; /* Table-Type Bits */
|
||||
unsigned long : 2;
|
||||
};
|
||||
};
|
||||
|
||||
struct segment_entry_fc0 {
|
||||
unsigned long pto: 53;/* Page-Table Origin */
|
||||
unsigned long fc : 1; /* Format-Control */
|
||||
unsigned long p : 1; /* DAT-Protection Bit */
|
||||
unsigned long : 3;
|
||||
unsigned long i : 1; /* Segment-Invalid Bit */
|
||||
unsigned long cs : 1; /* Common-Segment Bit */
|
||||
unsigned long tt : 2; /* Table-Type Bits */
|
||||
unsigned long : 2;
|
||||
};
|
||||
|
||||
struct segment_entry_fc1 {
|
||||
unsigned long sfaa : 44; /* Segment-Frame Absolute Address */
|
||||
unsigned long : 3;
|
||||
unsigned long av : 1; /* ACCF-Validity Control */
|
||||
unsigned long acc: 4; /* Access-Control Bits */
|
||||
unsigned long f : 1; /* Fetch-Protection Bit */
|
||||
unsigned long fc : 1; /* Format-Control */
|
||||
unsigned long p : 1; /* DAT-Protection Bit */
|
||||
unsigned long co : 1; /* Change-Recording Override */
|
||||
unsigned long : 2;
|
||||
unsigned long i : 1; /* Segment-Invalid Bit */
|
||||
unsigned long cs : 1; /* Common-Segment Bit */
|
||||
unsigned long tt : 2; /* Table-Type Bits */
|
||||
unsigned long : 2;
|
||||
};
|
||||
|
||||
union segment_table_entry {
|
||||
unsigned long val;
|
||||
struct segment_entry_fc0 fc0;
|
||||
struct segment_entry_fc1 fc1;
|
||||
struct {
|
||||
unsigned long : 53;
|
||||
unsigned long fc : 1; /* Format-Control */
|
||||
unsigned long : 4;
|
||||
unsigned long i : 1; /* Segment-Invalid Bit */
|
||||
unsigned long cs : 1; /* Common-Segment Bit */
|
||||
unsigned long tt : 2; /* Table-Type Bits */
|
||||
unsigned long : 2;
|
||||
};
|
||||
};
|
||||
|
||||
enum {
|
||||
TABLE_TYPE_SEGMENT = 0,
|
||||
TABLE_TYPE_REGION3 = 1,
|
||||
TABLE_TYPE_REGION2 = 2,
|
||||
TABLE_TYPE_REGION1 = 3
|
||||
};
|
||||
|
||||
union page_table_entry {
|
||||
unsigned long val;
|
||||
struct {
|
||||
unsigned long pfra : 52; /* Page-Frame Real Address */
|
||||
unsigned long z : 1; /* Zero Bit */
|
||||
unsigned long i : 1; /* Page-Invalid Bit */
|
||||
unsigned long p : 1; /* DAT-Protection Bit */
|
||||
unsigned long co : 1; /* Change-Recording Override */
|
||||
unsigned long : 8;
|
||||
};
|
||||
};
|
||||
|
||||
/*
|
||||
* vaddress union in order to easily decode a virtual address into its
|
||||
* region first index, region second index etc. parts.
|
||||
*/
|
||||
union vaddress {
|
||||
unsigned long addr;
|
||||
struct {
|
||||
unsigned long rfx : 11;
|
||||
unsigned long rsx : 11;
|
||||
unsigned long rtx : 11;
|
||||
unsigned long sx : 11;
|
||||
unsigned long px : 8;
|
||||
unsigned long bx : 12;
|
||||
};
|
||||
struct {
|
||||
unsigned long rfx01 : 2;
|
||||
unsigned long : 9;
|
||||
unsigned long rsx01 : 2;
|
||||
unsigned long : 9;
|
||||
unsigned long rtx01 : 2;
|
||||
unsigned long : 9;
|
||||
unsigned long sx01 : 2;
|
||||
unsigned long : 29;
|
||||
};
|
||||
};
|
||||
|
||||
/*
|
||||
* raddress union which will contain the result (real or absolute address)
|
||||
* after a page table walk. The rfaa, sfaa and pfra members are used to
|
||||
* simply assign them the value of a region, segment or page table entry.
|
||||
*/
|
||||
union raddress {
|
||||
unsigned long addr;
|
||||
unsigned long rfaa : 33; /* Region-Frame Absolute Address */
|
||||
unsigned long sfaa : 44; /* Segment-Frame Absolute Address */
|
||||
unsigned long pfra : 52; /* Page-Frame Real Address */
|
||||
};
|
||||
|
||||
static int ipte_lock_count;
|
||||
static DEFINE_MUTEX(ipte_mutex);
|
||||
|
||||
int ipte_lock_held(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
union ipte_control *ic = &vcpu->kvm->arch.sca->ipte_control;
|
||||
|
||||
if (vcpu->arch.sie_block->eca & 1)
|
||||
return ic->kh != 0;
|
||||
return ipte_lock_count != 0;
|
||||
}
|
||||
|
||||
static void ipte_lock_simple(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
union ipte_control old, new, *ic;
|
||||
|
||||
mutex_lock(&ipte_mutex);
|
||||
ipte_lock_count++;
|
||||
if (ipte_lock_count > 1)
|
||||
goto out;
|
||||
ic = &vcpu->kvm->arch.sca->ipte_control;
|
||||
do {
|
||||
old = *ic;
|
||||
barrier();
|
||||
while (old.k) {
|
||||
cond_resched();
|
||||
old = *ic;
|
||||
barrier();
|
||||
}
|
||||
new = old;
|
||||
new.k = 1;
|
||||
} while (cmpxchg(&ic->val, old.val, new.val) != old.val);
|
||||
out:
|
||||
mutex_unlock(&ipte_mutex);
|
||||
}
|
||||
|
||||
static void ipte_unlock_simple(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
union ipte_control old, new, *ic;
|
||||
|
||||
mutex_lock(&ipte_mutex);
|
||||
ipte_lock_count--;
|
||||
if (ipte_lock_count)
|
||||
goto out;
|
||||
ic = &vcpu->kvm->arch.sca->ipte_control;
|
||||
do {
|
||||
old = *ic;
|
||||
barrier();
|
||||
new = old;
|
||||
new.k = 0;
|
||||
} while (cmpxchg(&ic->val, old.val, new.val) != old.val);
|
||||
wake_up(&vcpu->kvm->arch.ipte_wq);
|
||||
out:
|
||||
mutex_unlock(&ipte_mutex);
|
||||
}
|
||||
|
||||
static void ipte_lock_siif(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
union ipte_control old, new, *ic;
|
||||
|
||||
ic = &vcpu->kvm->arch.sca->ipte_control;
|
||||
do {
|
||||
old = *ic;
|
||||
barrier();
|
||||
while (old.kg) {
|
||||
cond_resched();
|
||||
old = *ic;
|
||||
barrier();
|
||||
}
|
||||
new = old;
|
||||
new.k = 1;
|
||||
new.kh++;
|
||||
} while (cmpxchg(&ic->val, old.val, new.val) != old.val);
|
||||
}
|
||||
|
||||
static void ipte_unlock_siif(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
union ipte_control old, new, *ic;
|
||||
|
||||
ic = &vcpu->kvm->arch.sca->ipte_control;
|
||||
do {
|
||||
old = *ic;
|
||||
barrier();
|
||||
new = old;
|
||||
new.kh--;
|
||||
if (!new.kh)
|
||||
new.k = 0;
|
||||
} while (cmpxchg(&ic->val, old.val, new.val) != old.val);
|
||||
if (!new.kh)
|
||||
wake_up(&vcpu->kvm->arch.ipte_wq);
|
||||
}
|
||||
|
||||
void ipte_lock(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
if (vcpu->arch.sie_block->eca & 1)
|
||||
ipte_lock_siif(vcpu);
|
||||
else
|
||||
ipte_lock_simple(vcpu);
|
||||
}
|
||||
|
||||
void ipte_unlock(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
if (vcpu->arch.sie_block->eca & 1)
|
||||
ipte_unlock_siif(vcpu);
|
||||
else
|
||||
ipte_unlock_simple(vcpu);
|
||||
}
|
||||
|
||||
static unsigned long get_vcpu_asce(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
switch (psw_bits(vcpu->arch.sie_block->gpsw).as) {
|
||||
case PSW_AS_PRIMARY:
|
||||
return vcpu->arch.sie_block->gcr[1];
|
||||
case PSW_AS_SECONDARY:
|
||||
return vcpu->arch.sie_block->gcr[7];
|
||||
case PSW_AS_HOME:
|
||||
return vcpu->arch.sie_block->gcr[13];
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int deref_table(struct kvm *kvm, unsigned long gpa, unsigned long *val)
|
||||
{
|
||||
return kvm_read_guest(kvm, gpa, val, sizeof(*val));
|
||||
}
|
||||
|
||||
/**
|
||||
* guest_translate - translate a guest virtual into a guest absolute address
|
||||
* @vcpu: virtual cpu
|
||||
* @gva: guest virtual address
|
||||
* @gpa: points to where guest physical (absolute) address should be stored
|
||||
* @write: indicates if access is a write access
|
||||
*
|
||||
* Translate a guest virtual address into a guest absolute address by means
|
||||
* of dynamic address translation as specified by the architecuture.
|
||||
* If the resulting absolute address is not available in the configuration
|
||||
* an addressing exception is indicated and @gpa will not be changed.
|
||||
*
|
||||
* Returns: - zero on success; @gpa contains the resulting absolute address
|
||||
* - a negative value if guest access failed due to e.g. broken
|
||||
* guest mapping
|
||||
* - a positve value if an access exception happened. In this case
|
||||
* the returned value is the program interruption code as defined
|
||||
* by the architecture
|
||||
*/
|
||||
static unsigned long guest_translate(struct kvm_vcpu *vcpu, unsigned long gva,
|
||||
unsigned long *gpa, int write)
|
||||
{
|
||||
union vaddress vaddr = {.addr = gva};
|
||||
union raddress raddr = {.addr = gva};
|
||||
union page_table_entry pte;
|
||||
int dat_protection = 0;
|
||||
union ctlreg0 ctlreg0;
|
||||
unsigned long ptr;
|
||||
int edat1, edat2;
|
||||
union asce asce;
|
||||
|
||||
ctlreg0.val = vcpu->arch.sie_block->gcr[0];
|
||||
edat1 = ctlreg0.edat && test_vfacility(8);
|
||||
edat2 = edat1 && test_vfacility(78);
|
||||
asce.val = get_vcpu_asce(vcpu);
|
||||
if (asce.r)
|
||||
goto real_address;
|
||||
ptr = asce.origin * 4096;
|
||||
switch (asce.dt) {
|
||||
case ASCE_TYPE_REGION1:
|
||||
if (vaddr.rfx01 > asce.tl)
|
||||
return PGM_REGION_FIRST_TRANS;
|
||||
ptr += vaddr.rfx * 8;
|
||||
break;
|
||||
case ASCE_TYPE_REGION2:
|
||||
if (vaddr.rfx)
|
||||
return PGM_ASCE_TYPE;
|
||||
if (vaddr.rsx01 > asce.tl)
|
||||
return PGM_REGION_SECOND_TRANS;
|
||||
ptr += vaddr.rsx * 8;
|
||||
break;
|
||||
case ASCE_TYPE_REGION3:
|
||||
if (vaddr.rfx || vaddr.rsx)
|
||||
return PGM_ASCE_TYPE;
|
||||
if (vaddr.rtx01 > asce.tl)
|
||||
return PGM_REGION_THIRD_TRANS;
|
||||
ptr += vaddr.rtx * 8;
|
||||
break;
|
||||
case ASCE_TYPE_SEGMENT:
|
||||
if (vaddr.rfx || vaddr.rsx || vaddr.rtx)
|
||||
return PGM_ASCE_TYPE;
|
||||
if (vaddr.sx01 > asce.tl)
|
||||
return PGM_SEGMENT_TRANSLATION;
|
||||
ptr += vaddr.sx * 8;
|
||||
break;
|
||||
}
|
||||
switch (asce.dt) {
|
||||
case ASCE_TYPE_REGION1: {
|
||||
union region1_table_entry rfte;
|
||||
|
||||
if (kvm_is_error_gpa(vcpu->kvm, ptr))
|
||||
return PGM_ADDRESSING;
|
||||
if (deref_table(vcpu->kvm, ptr, &rfte.val))
|
||||
return -EFAULT;
|
||||
if (rfte.i)
|
||||
return PGM_REGION_FIRST_TRANS;
|
||||
if (rfte.tt != TABLE_TYPE_REGION1)
|
||||
return PGM_TRANSLATION_SPEC;
|
||||
if (vaddr.rsx01 < rfte.tf || vaddr.rsx01 > rfte.tl)
|
||||
return PGM_REGION_SECOND_TRANS;
|
||||
if (edat1)
|
||||
dat_protection |= rfte.p;
|
||||
ptr = rfte.rto * 4096 + vaddr.rsx * 8;
|
||||
}
|
||||
/* fallthrough */
|
||||
case ASCE_TYPE_REGION2: {
|
||||
union region2_table_entry rste;
|
||||
|
||||
if (kvm_is_error_gpa(vcpu->kvm, ptr))
|
||||
return PGM_ADDRESSING;
|
||||
if (deref_table(vcpu->kvm, ptr, &rste.val))
|
||||
return -EFAULT;
|
||||
if (rste.i)
|
||||
return PGM_REGION_SECOND_TRANS;
|
||||
if (rste.tt != TABLE_TYPE_REGION2)
|
||||
return PGM_TRANSLATION_SPEC;
|
||||
if (vaddr.rtx01 < rste.tf || vaddr.rtx01 > rste.tl)
|
||||
return PGM_REGION_THIRD_TRANS;
|
||||
if (edat1)
|
||||
dat_protection |= rste.p;
|
||||
ptr = rste.rto * 4096 + vaddr.rtx * 8;
|
||||
}
|
||||
/* fallthrough */
|
||||
case ASCE_TYPE_REGION3: {
|
||||
union region3_table_entry rtte;
|
||||
|
||||
if (kvm_is_error_gpa(vcpu->kvm, ptr))
|
||||
return PGM_ADDRESSING;
|
||||
if (deref_table(vcpu->kvm, ptr, &rtte.val))
|
||||
return -EFAULT;
|
||||
if (rtte.i)
|
||||
return PGM_REGION_THIRD_TRANS;
|
||||
if (rtte.tt != TABLE_TYPE_REGION3)
|
||||
return PGM_TRANSLATION_SPEC;
|
||||
if (rtte.cr && asce.p && edat2)
|
||||
return PGM_TRANSLATION_SPEC;
|
||||
if (rtte.fc && edat2) {
|
||||
dat_protection |= rtte.fc1.p;
|
||||
raddr.rfaa = rtte.fc1.rfaa;
|
||||
goto absolute_address;
|
||||
}
|
||||
if (vaddr.sx01 < rtte.fc0.tf)
|
||||
return PGM_SEGMENT_TRANSLATION;
|
||||
if (vaddr.sx01 > rtte.fc0.tl)
|
||||
return PGM_SEGMENT_TRANSLATION;
|
||||
if (edat1)
|
||||
dat_protection |= rtte.fc0.p;
|
||||
ptr = rtte.fc0.sto * 4096 + vaddr.sx * 8;
|
||||
}
|
||||
/* fallthrough */
|
||||
case ASCE_TYPE_SEGMENT: {
|
||||
union segment_table_entry ste;
|
||||
|
||||
if (kvm_is_error_gpa(vcpu->kvm, ptr))
|
||||
return PGM_ADDRESSING;
|
||||
if (deref_table(vcpu->kvm, ptr, &ste.val))
|
||||
return -EFAULT;
|
||||
if (ste.i)
|
||||
return PGM_SEGMENT_TRANSLATION;
|
||||
if (ste.tt != TABLE_TYPE_SEGMENT)
|
||||
return PGM_TRANSLATION_SPEC;
|
||||
if (ste.cs && asce.p)
|
||||
return PGM_TRANSLATION_SPEC;
|
||||
if (ste.fc && edat1) {
|
||||
dat_protection |= ste.fc1.p;
|
||||
raddr.sfaa = ste.fc1.sfaa;
|
||||
goto absolute_address;
|
||||
}
|
||||
dat_protection |= ste.fc0.p;
|
||||
ptr = ste.fc0.pto * 2048 + vaddr.px * 8;
|
||||
}
|
||||
}
|
||||
if (kvm_is_error_gpa(vcpu->kvm, ptr))
|
||||
return PGM_ADDRESSING;
|
||||
if (deref_table(vcpu->kvm, ptr, &pte.val))
|
||||
return -EFAULT;
|
||||
if (pte.i)
|
||||
return PGM_PAGE_TRANSLATION;
|
||||
if (pte.z)
|
||||
return PGM_TRANSLATION_SPEC;
|
||||
if (pte.co && !edat1)
|
||||
return PGM_TRANSLATION_SPEC;
|
||||
dat_protection |= pte.p;
|
||||
raddr.pfra = pte.pfra;
|
||||
real_address:
|
||||
raddr.addr = kvm_s390_real_to_abs(vcpu, raddr.addr);
|
||||
absolute_address:
|
||||
if (write && dat_protection)
|
||||
return PGM_PROTECTION;
|
||||
if (kvm_is_error_gpa(vcpu->kvm, raddr.addr))
|
||||
return PGM_ADDRESSING;
|
||||
*gpa = raddr.addr;
|
||||
return 0;
|
||||
}
|
||||
|
||||
static inline int is_low_address(unsigned long ga)
|
||||
{
|
||||
/* Check for address ranges 0..511 and 4096..4607 */
|
||||
return (ga & ~0x11fful) == 0;
|
||||
}
|
||||
|
||||
static int low_address_protection_enabled(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
union ctlreg0 ctlreg0 = {.val = vcpu->arch.sie_block->gcr[0]};
|
||||
psw_t *psw = &vcpu->arch.sie_block->gpsw;
|
||||
union asce asce;
|
||||
|
||||
if (!ctlreg0.lap)
|
||||
return 0;
|
||||
asce.val = get_vcpu_asce(vcpu);
|
||||
if (psw_bits(*psw).t && asce.p)
|
||||
return 0;
|
||||
return 1;
|
||||
}
|
||||
|
||||
struct trans_exc_code_bits {
|
||||
unsigned long addr : 52; /* Translation-exception Address */
|
||||
unsigned long fsi : 2; /* Access Exception Fetch/Store Indication */
|
||||
unsigned long : 7;
|
||||
unsigned long b61 : 1;
|
||||
unsigned long as : 2; /* ASCE Identifier */
|
||||
};
|
||||
|
||||
enum {
|
||||
FSI_UNKNOWN = 0, /* Unknown wether fetch or store */
|
||||
FSI_STORE = 1, /* Exception was due to store operation */
|
||||
FSI_FETCH = 2 /* Exception was due to fetch operation */
|
||||
};
|
||||
|
||||
static int guest_page_range(struct kvm_vcpu *vcpu, unsigned long ga,
|
||||
unsigned long *pages, unsigned long nr_pages,
|
||||
int write)
|
||||
{
|
||||
struct kvm_s390_pgm_info *pgm = &vcpu->arch.pgm;
|
||||
psw_t *psw = &vcpu->arch.sie_block->gpsw;
|
||||
struct trans_exc_code_bits *tec_bits;
|
||||
int lap_enabled, rc;
|
||||
|
||||
memset(pgm, 0, sizeof(*pgm));
|
||||
tec_bits = (struct trans_exc_code_bits *)&pgm->trans_exc_code;
|
||||
tec_bits->fsi = write ? FSI_STORE : FSI_FETCH;
|
||||
tec_bits->as = psw_bits(*psw).as;
|
||||
lap_enabled = low_address_protection_enabled(vcpu);
|
||||
while (nr_pages) {
|
||||
ga = kvm_s390_logical_to_effective(vcpu, ga);
|
||||
tec_bits->addr = ga >> PAGE_SHIFT;
|
||||
if (write && lap_enabled && is_low_address(ga)) {
|
||||
pgm->code = PGM_PROTECTION;
|
||||
return pgm->code;
|
||||
}
|
||||
ga &= PAGE_MASK;
|
||||
if (psw_bits(*psw).t) {
|
||||
rc = guest_translate(vcpu, ga, pages, write);
|
||||
if (rc < 0)
|
||||
return rc;
|
||||
if (rc == PGM_PROTECTION)
|
||||
tec_bits->b61 = 1;
|
||||
if (rc)
|
||||
pgm->code = rc;
|
||||
} else {
|
||||
*pages = kvm_s390_real_to_abs(vcpu, ga);
|
||||
if (kvm_is_error_gpa(vcpu->kvm, *pages))
|
||||
pgm->code = PGM_ADDRESSING;
|
||||
}
|
||||
if (pgm->code)
|
||||
return pgm->code;
|
||||
ga += PAGE_SIZE;
|
||||
pages++;
|
||||
nr_pages--;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
int access_guest(struct kvm_vcpu *vcpu, unsigned long ga, void *data,
|
||||
unsigned long len, int write)
|
||||
{
|
||||
psw_t *psw = &vcpu->arch.sie_block->gpsw;
|
||||
unsigned long _len, nr_pages, gpa, idx;
|
||||
unsigned long pages_array[2];
|
||||
unsigned long *pages;
|
||||
int need_ipte_lock;
|
||||
union asce asce;
|
||||
int rc;
|
||||
|
||||
if (!len)
|
||||
return 0;
|
||||
/* Access register mode is not supported yet. */
|
||||
if (psw_bits(*psw).t && psw_bits(*psw).as == PSW_AS_ACCREG)
|
||||
return -EOPNOTSUPP;
|
||||
nr_pages = (((ga & ~PAGE_MASK) + len - 1) >> PAGE_SHIFT) + 1;
|
||||
pages = pages_array;
|
||||
if (nr_pages > ARRAY_SIZE(pages_array))
|
||||
pages = vmalloc(nr_pages * sizeof(unsigned long));
|
||||
if (!pages)
|
||||
return -ENOMEM;
|
||||
asce.val = get_vcpu_asce(vcpu);
|
||||
need_ipte_lock = psw_bits(*psw).t && !asce.r;
|
||||
if (need_ipte_lock)
|
||||
ipte_lock(vcpu);
|
||||
rc = guest_page_range(vcpu, ga, pages, nr_pages, write);
|
||||
for (idx = 0; idx < nr_pages && !rc; idx++) {
|
||||
gpa = *(pages + idx) + (ga & ~PAGE_MASK);
|
||||
_len = min(PAGE_SIZE - (gpa & ~PAGE_MASK), len);
|
||||
if (write)
|
||||
rc = kvm_write_guest(vcpu->kvm, gpa, data, _len);
|
||||
else
|
||||
rc = kvm_read_guest(vcpu->kvm, gpa, data, _len);
|
||||
len -= _len;
|
||||
ga += _len;
|
||||
data += _len;
|
||||
}
|
||||
if (need_ipte_lock)
|
||||
ipte_unlock(vcpu);
|
||||
if (nr_pages > ARRAY_SIZE(pages_array))
|
||||
vfree(pages);
|
||||
return rc;
|
||||
}
|
||||
|
||||
int access_guest_real(struct kvm_vcpu *vcpu, unsigned long gra,
|
||||
void *data, unsigned long len, int write)
|
||||
{
|
||||
unsigned long _len, gpa;
|
||||
int rc = 0;
|
||||
|
||||
while (len && !rc) {
|
||||
gpa = kvm_s390_real_to_abs(vcpu, gra);
|
||||
_len = min(PAGE_SIZE - (gpa & ~PAGE_MASK), len);
|
||||
if (write)
|
||||
rc = write_guest_abs(vcpu, gpa, data, _len);
|
||||
else
|
||||
rc = read_guest_abs(vcpu, gpa, data, _len);
|
||||
len -= _len;
|
||||
gra += _len;
|
||||
data += _len;
|
||||
}
|
||||
return rc;
|
||||
}
|
||||
|
||||
/**
|
||||
* guest_translate_address - translate guest logical into guest absolute address
|
||||
*
|
||||
* Parameter semantics are the same as the ones from guest_translate.
|
||||
* The memory contents at the guest address are not changed.
|
||||
*
|
||||
* Note: The IPTE lock is not taken during this function, so the caller
|
||||
* has to take care of this.
|
||||
*/
|
||||
int guest_translate_address(struct kvm_vcpu *vcpu, unsigned long gva,
|
||||
unsigned long *gpa, int write)
|
||||
{
|
||||
struct kvm_s390_pgm_info *pgm = &vcpu->arch.pgm;
|
||||
psw_t *psw = &vcpu->arch.sie_block->gpsw;
|
||||
struct trans_exc_code_bits *tec;
|
||||
union asce asce;
|
||||
int rc;
|
||||
|
||||
/* Access register mode is not supported yet. */
|
||||
if (psw_bits(*psw).t && psw_bits(*psw).as == PSW_AS_ACCREG)
|
||||
return -EOPNOTSUPP;
|
||||
|
||||
gva = kvm_s390_logical_to_effective(vcpu, gva);
|
||||
memset(pgm, 0, sizeof(*pgm));
|
||||
tec = (struct trans_exc_code_bits *)&pgm->trans_exc_code;
|
||||
tec->as = psw_bits(*psw).as;
|
||||
tec->fsi = write ? FSI_STORE : FSI_FETCH;
|
||||
tec->addr = gva >> PAGE_SHIFT;
|
||||
if (is_low_address(gva) && low_address_protection_enabled(vcpu)) {
|
||||
if (write) {
|
||||
rc = pgm->code = PGM_PROTECTION;
|
||||
return rc;
|
||||
}
|
||||
}
|
||||
|
||||
asce.val = get_vcpu_asce(vcpu);
|
||||
if (psw_bits(*psw).t && !asce.r) { /* Use DAT? */
|
||||
rc = guest_translate(vcpu, gva, gpa, write);
|
||||
if (rc > 0) {
|
||||
if (rc == PGM_PROTECTION)
|
||||
tec->b61 = 1;
|
||||
pgm->code = rc;
|
||||
}
|
||||
} else {
|
||||
rc = 0;
|
||||
*gpa = kvm_s390_real_to_abs(vcpu, gva);
|
||||
if (kvm_is_error_gpa(vcpu->kvm, *gpa))
|
||||
rc = pgm->code = PGM_ADDRESSING;
|
||||
}
|
||||
|
||||
return rc;
|
||||
}
|
||||
|
||||
/**
|
||||
* kvm_s390_check_low_addr_protection - check for low-address protection
|
||||
* @ga: Guest address
|
||||
*
|
||||
* Checks whether an address is subject to low-address protection and set
|
||||
* up vcpu->arch.pgm accordingly if necessary.
|
||||
*
|
||||
* Return: 0 if no protection exception, or PGM_PROTECTION if protected.
|
||||
*/
|
||||
int kvm_s390_check_low_addr_protection(struct kvm_vcpu *vcpu, unsigned long ga)
|
||||
{
|
||||
struct kvm_s390_pgm_info *pgm = &vcpu->arch.pgm;
|
||||
psw_t *psw = &vcpu->arch.sie_block->gpsw;
|
||||
struct trans_exc_code_bits *tec_bits;
|
||||
|
||||
if (!is_low_address(ga) || !low_address_protection_enabled(vcpu))
|
||||
return 0;
|
||||
|
||||
memset(pgm, 0, sizeof(*pgm));
|
||||
tec_bits = (struct trans_exc_code_bits *)&pgm->trans_exc_code;
|
||||
tec_bits->fsi = FSI_STORE;
|
||||
tec_bits->as = psw_bits(*psw).as;
|
||||
tec_bits->addr = ga >> PAGE_SHIFT;
|
||||
pgm->code = PGM_PROTECTION;
|
||||
|
||||
return pgm->code;
|
||||
}
|
335
arch/s390/kvm/gaccess.h
Normal file
335
arch/s390/kvm/gaccess.h
Normal file
|
@ -0,0 +1,335 @@
|
|||
/*
|
||||
* access guest memory
|
||||
*
|
||||
* Copyright IBM Corp. 2008, 2014
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify
|
||||
* it under the terms of the GNU General Public License (version 2 only)
|
||||
* as published by the Free Software Foundation.
|
||||
*
|
||||
* Author(s): Carsten Otte <cotte@de.ibm.com>
|
||||
*/
|
||||
|
||||
#ifndef __KVM_S390_GACCESS_H
|
||||
#define __KVM_S390_GACCESS_H
|
||||
|
||||
#include <linux/compiler.h>
|
||||
#include <linux/kvm_host.h>
|
||||
#include <linux/uaccess.h>
|
||||
#include <linux/ptrace.h>
|
||||
#include "kvm-s390.h"
|
||||
|
||||
/**
|
||||
* kvm_s390_real_to_abs - convert guest real address to guest absolute address
|
||||
* @vcpu - guest virtual cpu
|
||||
* @gra - guest real address
|
||||
*
|
||||
* Returns the guest absolute address that corresponds to the passed guest real
|
||||
* address @gra of a virtual guest cpu by applying its prefix.
|
||||
*/
|
||||
static inline unsigned long kvm_s390_real_to_abs(struct kvm_vcpu *vcpu,
|
||||
unsigned long gra)
|
||||
{
|
||||
unsigned long prefix = kvm_s390_get_prefix(vcpu);
|
||||
|
||||
if (gra < 2 * PAGE_SIZE)
|
||||
gra += prefix;
|
||||
else if (gra >= prefix && gra < prefix + 2 * PAGE_SIZE)
|
||||
gra -= prefix;
|
||||
return gra;
|
||||
}
|
||||
|
||||
/**
|
||||
* kvm_s390_logical_to_effective - convert guest logical to effective address
|
||||
* @vcpu: guest virtual cpu
|
||||
* @ga: guest logical address
|
||||
*
|
||||
* Convert a guest vcpu logical address to a guest vcpu effective address by
|
||||
* applying the rules of the vcpu's addressing mode defined by PSW bits 31
|
||||
* and 32 (extendended/basic addressing mode).
|
||||
*
|
||||
* Depending on the vcpu's addressing mode the upper 40 bits (24 bit addressing
|
||||
* mode), 33 bits (31 bit addressing mode) or no bits (64 bit addressing mode)
|
||||
* of @ga will be zeroed and the remaining bits will be returned.
|
||||
*/
|
||||
static inline unsigned long kvm_s390_logical_to_effective(struct kvm_vcpu *vcpu,
|
||||
unsigned long ga)
|
||||
{
|
||||
psw_t *psw = &vcpu->arch.sie_block->gpsw;
|
||||
|
||||
if (psw_bits(*psw).eaba == PSW_AMODE_64BIT)
|
||||
return ga;
|
||||
if (psw_bits(*psw).eaba == PSW_AMODE_31BIT)
|
||||
return ga & ((1UL << 31) - 1);
|
||||
return ga & ((1UL << 24) - 1);
|
||||
}
|
||||
|
||||
/*
|
||||
* put_guest_lc, read_guest_lc and write_guest_lc are guest access functions
|
||||
* which shall only be used to access the lowcore of a vcpu.
|
||||
* These functions should be used for e.g. interrupt handlers where no
|
||||
* guest memory access protection facilities, like key or low address
|
||||
* protection, are applicable.
|
||||
* At a later point guest vcpu lowcore access should happen via pinned
|
||||
* prefix pages, so that these pages can be accessed directly via the
|
||||
* kernel mapping. All of these *_lc functions can be removed then.
|
||||
*/
|
||||
|
||||
/**
|
||||
* put_guest_lc - write a simple variable to a guest vcpu's lowcore
|
||||
* @vcpu: virtual cpu
|
||||
* @x: value to copy to guest
|
||||
* @gra: vcpu's destination guest real address
|
||||
*
|
||||
* Copies a simple value from kernel space to a guest vcpu's lowcore.
|
||||
* The size of the variable may be 1, 2, 4 or 8 bytes. The destination
|
||||
* must be located in the vcpu's lowcore. Otherwise the result is undefined.
|
||||
*
|
||||
* Returns zero on success or -EFAULT on error.
|
||||
*
|
||||
* Note: an error indicates that either the kernel is out of memory or
|
||||
* the guest memory mapping is broken. In any case the best solution
|
||||
* would be to terminate the guest.
|
||||
* It is wrong to inject a guest exception.
|
||||
*/
|
||||
#define put_guest_lc(vcpu, x, gra) \
|
||||
({ \
|
||||
struct kvm_vcpu *__vcpu = (vcpu); \
|
||||
__typeof__(*(gra)) __x = (x); \
|
||||
unsigned long __gpa; \
|
||||
\
|
||||
__gpa = (unsigned long)(gra); \
|
||||
__gpa += kvm_s390_get_prefix(__vcpu); \
|
||||
kvm_write_guest(__vcpu->kvm, __gpa, &__x, sizeof(__x)); \
|
||||
})
|
||||
|
||||
/**
|
||||
* write_guest_lc - copy data from kernel space to guest vcpu's lowcore
|
||||
* @vcpu: virtual cpu
|
||||
* @gra: vcpu's source guest real address
|
||||
* @data: source address in kernel space
|
||||
* @len: number of bytes to copy
|
||||
*
|
||||
* Copy data from kernel space to guest vcpu's lowcore. The entire range must
|
||||
* be located within the vcpu's lowcore, otherwise the result is undefined.
|
||||
*
|
||||
* Returns zero on success or -EFAULT on error.
|
||||
*
|
||||
* Note: an error indicates that either the kernel is out of memory or
|
||||
* the guest memory mapping is broken. In any case the best solution
|
||||
* would be to terminate the guest.
|
||||
* It is wrong to inject a guest exception.
|
||||
*/
|
||||
static inline __must_check
|
||||
int write_guest_lc(struct kvm_vcpu *vcpu, unsigned long gra, void *data,
|
||||
unsigned long len)
|
||||
{
|
||||
unsigned long gpa = gra + kvm_s390_get_prefix(vcpu);
|
||||
|
||||
return kvm_write_guest(vcpu->kvm, gpa, data, len);
|
||||
}
|
||||
|
||||
/**
|
||||
* read_guest_lc - copy data from guest vcpu's lowcore to kernel space
|
||||
* @vcpu: virtual cpu
|
||||
* @gra: vcpu's source guest real address
|
||||
* @data: destination address in kernel space
|
||||
* @len: number of bytes to copy
|
||||
*
|
||||
* Copy data from guest vcpu's lowcore to kernel space. The entire range must
|
||||
* be located within the vcpu's lowcore, otherwise the result is undefined.
|
||||
*
|
||||
* Returns zero on success or -EFAULT on error.
|
||||
*
|
||||
* Note: an error indicates that either the kernel is out of memory or
|
||||
* the guest memory mapping is broken. In any case the best solution
|
||||
* would be to terminate the guest.
|
||||
* It is wrong to inject a guest exception.
|
||||
*/
|
||||
static inline __must_check
|
||||
int read_guest_lc(struct kvm_vcpu *vcpu, unsigned long gra, void *data,
|
||||
unsigned long len)
|
||||
{
|
||||
unsigned long gpa = gra + kvm_s390_get_prefix(vcpu);
|
||||
|
||||
return kvm_read_guest(vcpu->kvm, gpa, data, len);
|
||||
}
|
||||
|
||||
int guest_translate_address(struct kvm_vcpu *vcpu, unsigned long gva,
|
||||
unsigned long *gpa, int write);
|
||||
|
||||
int access_guest(struct kvm_vcpu *vcpu, unsigned long ga, void *data,
|
||||
unsigned long len, int write);
|
||||
|
||||
int access_guest_real(struct kvm_vcpu *vcpu, unsigned long gra,
|
||||
void *data, unsigned long len, int write);
|
||||
|
||||
/**
|
||||
* write_guest - copy data from kernel space to guest space
|
||||
* @vcpu: virtual cpu
|
||||
* @ga: guest address
|
||||
* @data: source address in kernel space
|
||||
* @len: number of bytes to copy
|
||||
*
|
||||
* Copy @len bytes from @data (kernel space) to @ga (guest address).
|
||||
* In order to copy data to guest space the PSW of the vcpu is inspected:
|
||||
* If DAT is off data will be copied to guest real or absolute memory.
|
||||
* If DAT is on data will be copied to the address space as specified by
|
||||
* the address space bits of the PSW:
|
||||
* Primary, secondory or home space (access register mode is currently not
|
||||
* implemented).
|
||||
* The addressing mode of the PSW is also inspected, so that address wrap
|
||||
* around is taken into account for 24-, 31- and 64-bit addressing mode,
|
||||
* if the to be copied data crosses page boundaries in guest address space.
|
||||
* In addition also low address and DAT protection are inspected before
|
||||
* copying any data (key protection is currently not implemented).
|
||||
*
|
||||
* This function modifies the 'struct kvm_s390_pgm_info pgm' member of @vcpu.
|
||||
* In case of an access exception (e.g. protection exception) pgm will contain
|
||||
* all data necessary so that a subsequent call to 'kvm_s390_inject_prog_vcpu()'
|
||||
* will inject a correct exception into the guest.
|
||||
* If no access exception happened, the contents of pgm are undefined when
|
||||
* this function returns.
|
||||
*
|
||||
* Returns: - zero on success
|
||||
* - a negative value if e.g. the guest mapping is broken or in
|
||||
* case of out-of-memory. In this case the contents of pgm are
|
||||
* undefined. Also parts of @data may have been copied to guest
|
||||
* space.
|
||||
* - a positive value if an access exception happened. In this case
|
||||
* the returned value is the program interruption code and the
|
||||
* contents of pgm may be used to inject an exception into the
|
||||
* guest. No data has been copied to guest space.
|
||||
*
|
||||
* Note: in case an access exception is recognized no data has been copied to
|
||||
* guest space (this is also true, if the to be copied data would cross
|
||||
* one or more page boundaries in guest space).
|
||||
* Therefore this function may be used for nullifying and suppressing
|
||||
* instruction emulation.
|
||||
* It may also be used for terminating instructions, if it is undefined
|
||||
* if data has been changed in guest space in case of an exception.
|
||||
*/
|
||||
static inline __must_check
|
||||
int write_guest(struct kvm_vcpu *vcpu, unsigned long ga, void *data,
|
||||
unsigned long len)
|
||||
{
|
||||
return access_guest(vcpu, ga, data, len, 1);
|
||||
}
|
||||
|
||||
/**
|
||||
* read_guest - copy data from guest space to kernel space
|
||||
* @vcpu: virtual cpu
|
||||
* @ga: guest address
|
||||
* @data: destination address in kernel space
|
||||
* @len: number of bytes to copy
|
||||
*
|
||||
* Copy @len bytes from @ga (guest address) to @data (kernel space).
|
||||
*
|
||||
* The behaviour of read_guest is identical to write_guest, except that
|
||||
* data will be copied from guest space to kernel space.
|
||||
*/
|
||||
static inline __must_check
|
||||
int read_guest(struct kvm_vcpu *vcpu, unsigned long ga, void *data,
|
||||
unsigned long len)
|
||||
{
|
||||
return access_guest(vcpu, ga, data, len, 0);
|
||||
}
|
||||
|
||||
/**
|
||||
* write_guest_abs - copy data from kernel space to guest space absolute
|
||||
* @vcpu: virtual cpu
|
||||
* @gpa: guest physical (absolute) address
|
||||
* @data: source address in kernel space
|
||||
* @len: number of bytes to copy
|
||||
*
|
||||
* Copy @len bytes from @data (kernel space) to @gpa (guest absolute address).
|
||||
* It is up to the caller to ensure that the entire guest memory range is
|
||||
* valid memory before calling this function.
|
||||
* Guest low address and key protection are not checked.
|
||||
*
|
||||
* Returns zero on success or -EFAULT on error.
|
||||
*
|
||||
* If an error occurs data may have been copied partially to guest memory.
|
||||
*/
|
||||
static inline __must_check
|
||||
int write_guest_abs(struct kvm_vcpu *vcpu, unsigned long gpa, void *data,
|
||||
unsigned long len)
|
||||
{
|
||||
return kvm_write_guest(vcpu->kvm, gpa, data, len);
|
||||
}
|
||||
|
||||
/**
|
||||
* read_guest_abs - copy data from guest space absolute to kernel space
|
||||
* @vcpu: virtual cpu
|
||||
* @gpa: guest physical (absolute) address
|
||||
* @data: destination address in kernel space
|
||||
* @len: number of bytes to copy
|
||||
*
|
||||
* Copy @len bytes from @gpa (guest absolute address) to @data (kernel space).
|
||||
* It is up to the caller to ensure that the entire guest memory range is
|
||||
* valid memory before calling this function.
|
||||
* Guest key protection is not checked.
|
||||
*
|
||||
* Returns zero on success or -EFAULT on error.
|
||||
*
|
||||
* If an error occurs data may have been copied partially to kernel space.
|
||||
*/
|
||||
static inline __must_check
|
||||
int read_guest_abs(struct kvm_vcpu *vcpu, unsigned long gpa, void *data,
|
||||
unsigned long len)
|
||||
{
|
||||
return kvm_read_guest(vcpu->kvm, gpa, data, len);
|
||||
}
|
||||
|
||||
/**
|
||||
* write_guest_real - copy data from kernel space to guest space real
|
||||
* @vcpu: virtual cpu
|
||||
* @gra: guest real address
|
||||
* @data: source address in kernel space
|
||||
* @len: number of bytes to copy
|
||||
*
|
||||
* Copy @len bytes from @data (kernel space) to @gra (guest real address).
|
||||
* It is up to the caller to ensure that the entire guest memory range is
|
||||
* valid memory before calling this function.
|
||||
* Guest low address and key protection are not checked.
|
||||
*
|
||||
* Returns zero on success or -EFAULT on error.
|
||||
*
|
||||
* If an error occurs data may have been copied partially to guest memory.
|
||||
*/
|
||||
static inline __must_check
|
||||
int write_guest_real(struct kvm_vcpu *vcpu, unsigned long gra, void *data,
|
||||
unsigned long len)
|
||||
{
|
||||
return access_guest_real(vcpu, gra, data, len, 1);
|
||||
}
|
||||
|
||||
/**
|
||||
* read_guest_real - copy data from guest space real to kernel space
|
||||
* @vcpu: virtual cpu
|
||||
* @gra: guest real address
|
||||
* @data: destination address in kernel space
|
||||
* @len: number of bytes to copy
|
||||
*
|
||||
* Copy @len bytes from @gra (guest real address) to @data (kernel space).
|
||||
* It is up to the caller to ensure that the entire guest memory range is
|
||||
* valid memory before calling this function.
|
||||
* Guest key protection is not checked.
|
||||
*
|
||||
* Returns zero on success or -EFAULT on error.
|
||||
*
|
||||
* If an error occurs data may have been copied partially to kernel space.
|
||||
*/
|
||||
static inline __must_check
|
||||
int read_guest_real(struct kvm_vcpu *vcpu, unsigned long gra, void *data,
|
||||
unsigned long len)
|
||||
{
|
||||
return access_guest_real(vcpu, gra, data, len, 0);
|
||||
}
|
||||
|
||||
void ipte_lock(struct kvm_vcpu *vcpu);
|
||||
void ipte_unlock(struct kvm_vcpu *vcpu);
|
||||
int ipte_lock_held(struct kvm_vcpu *vcpu);
|
||||
int kvm_s390_check_low_addr_protection(struct kvm_vcpu *vcpu, unsigned long ga);
|
||||
|
||||
#endif /* __KVM_S390_GACCESS_H */
|
482
arch/s390/kvm/guestdbg.c
Normal file
482
arch/s390/kvm/guestdbg.c
Normal file
|
@ -0,0 +1,482 @@
|
|||
/*
|
||||
* kvm guest debug support
|
||||
*
|
||||
* Copyright IBM Corp. 2014
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify
|
||||
* it under the terms of the GNU General Public License (version 2 only)
|
||||
* as published by the Free Software Foundation.
|
||||
*
|
||||
* Author(s): David Hildenbrand <dahi@linux.vnet.ibm.com>
|
||||
*/
|
||||
#include <linux/kvm_host.h>
|
||||
#include <linux/errno.h>
|
||||
#include "kvm-s390.h"
|
||||
#include "gaccess.h"
|
||||
|
||||
/*
|
||||
* Extends the address range given by *start and *stop to include the address
|
||||
* range starting with estart and the length len. Takes care of overflowing
|
||||
* intervals and tries to minimize the overall intervall size.
|
||||
*/
|
||||
static void extend_address_range(u64 *start, u64 *stop, u64 estart, int len)
|
||||
{
|
||||
u64 estop;
|
||||
|
||||
if (len > 0)
|
||||
len--;
|
||||
else
|
||||
len = 0;
|
||||
|
||||
estop = estart + len;
|
||||
|
||||
/* 0-0 range represents "not set" */
|
||||
if ((*start == 0) && (*stop == 0)) {
|
||||
*start = estart;
|
||||
*stop = estop;
|
||||
} else if (*start <= *stop) {
|
||||
/* increase the existing range */
|
||||
if (estart < *start)
|
||||
*start = estart;
|
||||
if (estop > *stop)
|
||||
*stop = estop;
|
||||
} else {
|
||||
/* "overflowing" interval, whereby *stop > *start */
|
||||
if (estart <= *stop) {
|
||||
if (estop > *stop)
|
||||
*stop = estop;
|
||||
} else if (estop > *start) {
|
||||
if (estart < *start)
|
||||
*start = estart;
|
||||
}
|
||||
/* minimize the range */
|
||||
else if ((estop - *stop) < (*start - estart))
|
||||
*stop = estop;
|
||||
else
|
||||
*start = estart;
|
||||
}
|
||||
}
|
||||
|
||||
#define MAX_INST_SIZE 6
|
||||
|
||||
static void enable_all_hw_bp(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
unsigned long start, len;
|
||||
u64 *cr9 = &vcpu->arch.sie_block->gcr[9];
|
||||
u64 *cr10 = &vcpu->arch.sie_block->gcr[10];
|
||||
u64 *cr11 = &vcpu->arch.sie_block->gcr[11];
|
||||
int i;
|
||||
|
||||
if (vcpu->arch.guestdbg.nr_hw_bp <= 0 ||
|
||||
vcpu->arch.guestdbg.hw_bp_info == NULL)
|
||||
return;
|
||||
|
||||
/*
|
||||
* If the guest is not interrested in branching events, we can savely
|
||||
* limit them to the PER address range.
|
||||
*/
|
||||
if (!(*cr9 & PER_EVENT_BRANCH))
|
||||
*cr9 |= PER_CONTROL_BRANCH_ADDRESS;
|
||||
*cr9 |= PER_EVENT_IFETCH | PER_EVENT_BRANCH;
|
||||
|
||||
for (i = 0; i < vcpu->arch.guestdbg.nr_hw_bp; i++) {
|
||||
start = vcpu->arch.guestdbg.hw_bp_info[i].addr;
|
||||
len = vcpu->arch.guestdbg.hw_bp_info[i].len;
|
||||
|
||||
/*
|
||||
* The instruction in front of the desired bp has to
|
||||
* report instruction-fetching events
|
||||
*/
|
||||
if (start < MAX_INST_SIZE) {
|
||||
len += start;
|
||||
start = 0;
|
||||
} else {
|
||||
start -= MAX_INST_SIZE;
|
||||
len += MAX_INST_SIZE;
|
||||
}
|
||||
|
||||
extend_address_range(cr10, cr11, start, len);
|
||||
}
|
||||
}
|
||||
|
||||
static void enable_all_hw_wp(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
unsigned long start, len;
|
||||
u64 *cr9 = &vcpu->arch.sie_block->gcr[9];
|
||||
u64 *cr10 = &vcpu->arch.sie_block->gcr[10];
|
||||
u64 *cr11 = &vcpu->arch.sie_block->gcr[11];
|
||||
int i;
|
||||
|
||||
if (vcpu->arch.guestdbg.nr_hw_wp <= 0 ||
|
||||
vcpu->arch.guestdbg.hw_wp_info == NULL)
|
||||
return;
|
||||
|
||||
/* if host uses storage alternation for special address
|
||||
* spaces, enable all events and give all to the guest */
|
||||
if (*cr9 & PER_EVENT_STORE && *cr9 & PER_CONTROL_ALTERATION) {
|
||||
*cr9 &= ~PER_CONTROL_ALTERATION;
|
||||
*cr10 = 0;
|
||||
*cr11 = PSW_ADDR_INSN;
|
||||
} else {
|
||||
*cr9 &= ~PER_CONTROL_ALTERATION;
|
||||
*cr9 |= PER_EVENT_STORE;
|
||||
|
||||
for (i = 0; i < vcpu->arch.guestdbg.nr_hw_wp; i++) {
|
||||
start = vcpu->arch.guestdbg.hw_wp_info[i].addr;
|
||||
len = vcpu->arch.guestdbg.hw_wp_info[i].len;
|
||||
|
||||
extend_address_range(cr10, cr11, start, len);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
void kvm_s390_backup_guest_per_regs(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
vcpu->arch.guestdbg.cr0 = vcpu->arch.sie_block->gcr[0];
|
||||
vcpu->arch.guestdbg.cr9 = vcpu->arch.sie_block->gcr[9];
|
||||
vcpu->arch.guestdbg.cr10 = vcpu->arch.sie_block->gcr[10];
|
||||
vcpu->arch.guestdbg.cr11 = vcpu->arch.sie_block->gcr[11];
|
||||
}
|
||||
|
||||
void kvm_s390_restore_guest_per_regs(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
vcpu->arch.sie_block->gcr[0] = vcpu->arch.guestdbg.cr0;
|
||||
vcpu->arch.sie_block->gcr[9] = vcpu->arch.guestdbg.cr9;
|
||||
vcpu->arch.sie_block->gcr[10] = vcpu->arch.guestdbg.cr10;
|
||||
vcpu->arch.sie_block->gcr[11] = vcpu->arch.guestdbg.cr11;
|
||||
}
|
||||
|
||||
void kvm_s390_patch_guest_per_regs(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
/*
|
||||
* TODO: if guest psw has per enabled, otherwise 0s!
|
||||
* This reduces the amount of reported events.
|
||||
* Need to intercept all psw changes!
|
||||
*/
|
||||
|
||||
if (guestdbg_sstep_enabled(vcpu)) {
|
||||
/* disable timer (clock-comparator) interrupts */
|
||||
vcpu->arch.sie_block->gcr[0] &= ~0x800ul;
|
||||
vcpu->arch.sie_block->gcr[9] |= PER_EVENT_IFETCH;
|
||||
vcpu->arch.sie_block->gcr[10] = 0;
|
||||
vcpu->arch.sie_block->gcr[11] = PSW_ADDR_INSN;
|
||||
}
|
||||
|
||||
if (guestdbg_hw_bp_enabled(vcpu)) {
|
||||
enable_all_hw_bp(vcpu);
|
||||
enable_all_hw_wp(vcpu);
|
||||
}
|
||||
|
||||
/* TODO: Instruction-fetching-nullification not allowed for now */
|
||||
if (vcpu->arch.sie_block->gcr[9] & PER_EVENT_NULLIFICATION)
|
||||
vcpu->arch.sie_block->gcr[9] &= ~PER_EVENT_NULLIFICATION;
|
||||
}
|
||||
|
||||
#define MAX_WP_SIZE 100
|
||||
|
||||
static int __import_wp_info(struct kvm_vcpu *vcpu,
|
||||
struct kvm_hw_breakpoint *bp_data,
|
||||
struct kvm_hw_wp_info_arch *wp_info)
|
||||
{
|
||||
int ret = 0;
|
||||
wp_info->len = bp_data->len;
|
||||
wp_info->addr = bp_data->addr;
|
||||
wp_info->phys_addr = bp_data->phys_addr;
|
||||
wp_info->old_data = NULL;
|
||||
|
||||
if (wp_info->len < 0 || wp_info->len > MAX_WP_SIZE)
|
||||
return -EINVAL;
|
||||
|
||||
wp_info->old_data = kmalloc(bp_data->len, GFP_KERNEL);
|
||||
if (!wp_info->old_data)
|
||||
return -ENOMEM;
|
||||
/* try to backup the original value */
|
||||
ret = read_guest(vcpu, wp_info->phys_addr, wp_info->old_data,
|
||||
wp_info->len);
|
||||
if (ret) {
|
||||
kfree(wp_info->old_data);
|
||||
wp_info->old_data = NULL;
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
#define MAX_BP_COUNT 50
|
||||
|
||||
int kvm_s390_import_bp_data(struct kvm_vcpu *vcpu,
|
||||
struct kvm_guest_debug *dbg)
|
||||
{
|
||||
int ret = 0, nr_wp = 0, nr_bp = 0, i, size;
|
||||
struct kvm_hw_breakpoint *bp_data = NULL;
|
||||
struct kvm_hw_wp_info_arch *wp_info = NULL;
|
||||
struct kvm_hw_bp_info_arch *bp_info = NULL;
|
||||
|
||||
if (dbg->arch.nr_hw_bp <= 0 || !dbg->arch.hw_bp)
|
||||
return 0;
|
||||
else if (dbg->arch.nr_hw_bp > MAX_BP_COUNT)
|
||||
return -EINVAL;
|
||||
|
||||
size = dbg->arch.nr_hw_bp * sizeof(struct kvm_hw_breakpoint);
|
||||
bp_data = kmalloc(size, GFP_KERNEL);
|
||||
if (!bp_data) {
|
||||
ret = -ENOMEM;
|
||||
goto error;
|
||||
}
|
||||
|
||||
if (copy_from_user(bp_data, dbg->arch.hw_bp, size)) {
|
||||
ret = -EFAULT;
|
||||
goto error;
|
||||
}
|
||||
|
||||
for (i = 0; i < dbg->arch.nr_hw_bp; i++) {
|
||||
switch (bp_data[i].type) {
|
||||
case KVM_HW_WP_WRITE:
|
||||
nr_wp++;
|
||||
break;
|
||||
case KVM_HW_BP:
|
||||
nr_bp++;
|
||||
break;
|
||||
default:
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
size = nr_wp * sizeof(struct kvm_hw_wp_info_arch);
|
||||
if (size > 0) {
|
||||
wp_info = kmalloc(size, GFP_KERNEL);
|
||||
if (!wp_info) {
|
||||
ret = -ENOMEM;
|
||||
goto error;
|
||||
}
|
||||
}
|
||||
size = nr_bp * sizeof(struct kvm_hw_bp_info_arch);
|
||||
if (size > 0) {
|
||||
bp_info = kmalloc(size, GFP_KERNEL);
|
||||
if (!bp_info) {
|
||||
ret = -ENOMEM;
|
||||
goto error;
|
||||
}
|
||||
}
|
||||
|
||||
for (nr_wp = 0, nr_bp = 0, i = 0; i < dbg->arch.nr_hw_bp; i++) {
|
||||
switch (bp_data[i].type) {
|
||||
case KVM_HW_WP_WRITE:
|
||||
ret = __import_wp_info(vcpu, &bp_data[i],
|
||||
&wp_info[nr_wp]);
|
||||
if (ret)
|
||||
goto error;
|
||||
nr_wp++;
|
||||
break;
|
||||
case KVM_HW_BP:
|
||||
bp_info[nr_bp].len = bp_data[i].len;
|
||||
bp_info[nr_bp].addr = bp_data[i].addr;
|
||||
nr_bp++;
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
vcpu->arch.guestdbg.nr_hw_bp = nr_bp;
|
||||
vcpu->arch.guestdbg.hw_bp_info = bp_info;
|
||||
vcpu->arch.guestdbg.nr_hw_wp = nr_wp;
|
||||
vcpu->arch.guestdbg.hw_wp_info = wp_info;
|
||||
return 0;
|
||||
error:
|
||||
kfree(bp_data);
|
||||
kfree(wp_info);
|
||||
kfree(bp_info);
|
||||
return ret;
|
||||
}
|
||||
|
||||
void kvm_s390_clear_bp_data(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
int i;
|
||||
struct kvm_hw_wp_info_arch *hw_wp_info = NULL;
|
||||
|
||||
for (i = 0; i < vcpu->arch.guestdbg.nr_hw_wp; i++) {
|
||||
hw_wp_info = &vcpu->arch.guestdbg.hw_wp_info[i];
|
||||
kfree(hw_wp_info->old_data);
|
||||
hw_wp_info->old_data = NULL;
|
||||
}
|
||||
kfree(vcpu->arch.guestdbg.hw_wp_info);
|
||||
vcpu->arch.guestdbg.hw_wp_info = NULL;
|
||||
|
||||
kfree(vcpu->arch.guestdbg.hw_bp_info);
|
||||
vcpu->arch.guestdbg.hw_bp_info = NULL;
|
||||
|
||||
vcpu->arch.guestdbg.nr_hw_wp = 0;
|
||||
vcpu->arch.guestdbg.nr_hw_bp = 0;
|
||||
}
|
||||
|
||||
static inline int in_addr_range(u64 addr, u64 a, u64 b)
|
||||
{
|
||||
if (a <= b)
|
||||
return (addr >= a) && (addr <= b);
|
||||
else
|
||||
/* "overflowing" interval */
|
||||
return (addr <= a) && (addr >= b);
|
||||
}
|
||||
|
||||
#define end_of_range(bp_info) (bp_info->addr + bp_info->len - 1)
|
||||
|
||||
static struct kvm_hw_bp_info_arch *find_hw_bp(struct kvm_vcpu *vcpu,
|
||||
unsigned long addr)
|
||||
{
|
||||
struct kvm_hw_bp_info_arch *bp_info = vcpu->arch.guestdbg.hw_bp_info;
|
||||
int i;
|
||||
|
||||
if (vcpu->arch.guestdbg.nr_hw_bp == 0)
|
||||
return NULL;
|
||||
|
||||
for (i = 0; i < vcpu->arch.guestdbg.nr_hw_bp; i++) {
|
||||
/* addr is directly the start or in the range of a bp */
|
||||
if (addr == bp_info->addr)
|
||||
goto found;
|
||||
if (bp_info->len > 0 &&
|
||||
in_addr_range(addr, bp_info->addr, end_of_range(bp_info)))
|
||||
goto found;
|
||||
|
||||
bp_info++;
|
||||
}
|
||||
|
||||
return NULL;
|
||||
found:
|
||||
return bp_info;
|
||||
}
|
||||
|
||||
static struct kvm_hw_wp_info_arch *any_wp_changed(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
int i;
|
||||
struct kvm_hw_wp_info_arch *wp_info = NULL;
|
||||
void *temp = NULL;
|
||||
|
||||
if (vcpu->arch.guestdbg.nr_hw_wp == 0)
|
||||
return NULL;
|
||||
|
||||
for (i = 0; i < vcpu->arch.guestdbg.nr_hw_wp; i++) {
|
||||
wp_info = &vcpu->arch.guestdbg.hw_wp_info[i];
|
||||
if (!wp_info || !wp_info->old_data || wp_info->len <= 0)
|
||||
continue;
|
||||
|
||||
temp = kmalloc(wp_info->len, GFP_KERNEL);
|
||||
if (!temp)
|
||||
continue;
|
||||
|
||||
/* refetch the wp data and compare it to the old value */
|
||||
if (!read_guest(vcpu, wp_info->phys_addr, temp,
|
||||
wp_info->len)) {
|
||||
if (memcmp(temp, wp_info->old_data, wp_info->len)) {
|
||||
kfree(temp);
|
||||
return wp_info;
|
||||
}
|
||||
}
|
||||
kfree(temp);
|
||||
temp = NULL;
|
||||
}
|
||||
|
||||
return NULL;
|
||||
}
|
||||
|
||||
void kvm_s390_prepare_debug_exit(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
vcpu->run->exit_reason = KVM_EXIT_DEBUG;
|
||||
vcpu->guest_debug &= ~KVM_GUESTDBG_EXIT_PENDING;
|
||||
}
|
||||
|
||||
#define per_bp_event(code) \
|
||||
(code & (PER_EVENT_IFETCH | PER_EVENT_BRANCH))
|
||||
#define per_write_wp_event(code) \
|
||||
(code & (PER_EVENT_STORE | PER_EVENT_STORE_REAL))
|
||||
|
||||
static int debug_exit_required(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
u32 perc = (vcpu->arch.sie_block->perc << 24);
|
||||
struct kvm_debug_exit_arch *debug_exit = &vcpu->run->debug.arch;
|
||||
struct kvm_hw_wp_info_arch *wp_info = NULL;
|
||||
struct kvm_hw_bp_info_arch *bp_info = NULL;
|
||||
unsigned long addr = vcpu->arch.sie_block->gpsw.addr;
|
||||
unsigned long peraddr = vcpu->arch.sie_block->peraddr;
|
||||
|
||||
if (guestdbg_hw_bp_enabled(vcpu)) {
|
||||
if (per_write_wp_event(perc) &&
|
||||
vcpu->arch.guestdbg.nr_hw_wp > 0) {
|
||||
wp_info = any_wp_changed(vcpu);
|
||||
if (wp_info) {
|
||||
debug_exit->addr = wp_info->addr;
|
||||
debug_exit->type = KVM_HW_WP_WRITE;
|
||||
goto exit_required;
|
||||
}
|
||||
}
|
||||
if (per_bp_event(perc) &&
|
||||
vcpu->arch.guestdbg.nr_hw_bp > 0) {
|
||||
bp_info = find_hw_bp(vcpu, addr);
|
||||
/* remove duplicate events if PC==PER address */
|
||||
if (bp_info && (addr != peraddr)) {
|
||||
debug_exit->addr = addr;
|
||||
debug_exit->type = KVM_HW_BP;
|
||||
vcpu->arch.guestdbg.last_bp = addr;
|
||||
goto exit_required;
|
||||
}
|
||||
/* breakpoint missed */
|
||||
bp_info = find_hw_bp(vcpu, peraddr);
|
||||
if (bp_info && vcpu->arch.guestdbg.last_bp != peraddr) {
|
||||
debug_exit->addr = peraddr;
|
||||
debug_exit->type = KVM_HW_BP;
|
||||
goto exit_required;
|
||||
}
|
||||
}
|
||||
}
|
||||
if (guestdbg_sstep_enabled(vcpu) && per_bp_event(perc)) {
|
||||
debug_exit->addr = addr;
|
||||
debug_exit->type = KVM_SINGLESTEP;
|
||||
goto exit_required;
|
||||
}
|
||||
|
||||
return 0;
|
||||
exit_required:
|
||||
return 1;
|
||||
}
|
||||
|
||||
#define guest_per_enabled(vcpu) \
|
||||
(vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PER)
|
||||
|
||||
static void filter_guest_per_event(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
u32 perc = vcpu->arch.sie_block->perc << 24;
|
||||
u64 peraddr = vcpu->arch.sie_block->peraddr;
|
||||
u64 addr = vcpu->arch.sie_block->gpsw.addr;
|
||||
u64 cr9 = vcpu->arch.sie_block->gcr[9];
|
||||
u64 cr10 = vcpu->arch.sie_block->gcr[10];
|
||||
u64 cr11 = vcpu->arch.sie_block->gcr[11];
|
||||
/* filter all events, demanded by the guest */
|
||||
u32 guest_perc = perc & cr9 & PER_EVENT_MASK;
|
||||
|
||||
if (!guest_per_enabled(vcpu))
|
||||
guest_perc = 0;
|
||||
|
||||
/* filter "successful-branching" events */
|
||||
if (guest_perc & PER_EVENT_BRANCH &&
|
||||
cr9 & PER_CONTROL_BRANCH_ADDRESS &&
|
||||
!in_addr_range(addr, cr10, cr11))
|
||||
guest_perc &= ~PER_EVENT_BRANCH;
|
||||
|
||||
/* filter "instruction-fetching" events */
|
||||
if (guest_perc & PER_EVENT_IFETCH &&
|
||||
!in_addr_range(peraddr, cr10, cr11))
|
||||
guest_perc &= ~PER_EVENT_IFETCH;
|
||||
|
||||
/* All other PER events will be given to the guest */
|
||||
/* TODO: Check alterated address/address space */
|
||||
|
||||
vcpu->arch.sie_block->perc = guest_perc >> 24;
|
||||
|
||||
if (!guest_perc)
|
||||
vcpu->arch.sie_block->iprcc &= ~PGM_PER;
|
||||
}
|
||||
|
||||
void kvm_s390_handle_per_event(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
if (debug_exit_required(vcpu))
|
||||
vcpu->guest_debug |= KVM_GUESTDBG_EXIT_PENDING;
|
||||
|
||||
filter_guest_per_event(vcpu);
|
||||
}
|
353
arch/s390/kvm/intercept.c
Normal file
353
arch/s390/kvm/intercept.c
Normal file
|
@ -0,0 +1,353 @@
|
|||
/*
|
||||
* in-kernel handling for sie intercepts
|
||||
*
|
||||
* Copyright IBM Corp. 2008, 2014
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify
|
||||
* it under the terms of the GNU General Public License (version 2 only)
|
||||
* as published by the Free Software Foundation.
|
||||
*
|
||||
* Author(s): Carsten Otte <cotte@de.ibm.com>
|
||||
* Christian Borntraeger <borntraeger@de.ibm.com>
|
||||
*/
|
||||
|
||||
#include <linux/kvm_host.h>
|
||||
#include <linux/errno.h>
|
||||
#include <linux/pagemap.h>
|
||||
|
||||
#include <asm/kvm_host.h>
|
||||
#include <asm/asm-offsets.h>
|
||||
#include <asm/irq.h>
|
||||
|
||||
#include "kvm-s390.h"
|
||||
#include "gaccess.h"
|
||||
#include "trace.h"
|
||||
#include "trace-s390.h"
|
||||
|
||||
|
||||
static const intercept_handler_t instruction_handlers[256] = {
|
||||
[0x01] = kvm_s390_handle_01,
|
||||
[0x82] = kvm_s390_handle_lpsw,
|
||||
[0x83] = kvm_s390_handle_diag,
|
||||
[0xae] = kvm_s390_handle_sigp,
|
||||
[0xb2] = kvm_s390_handle_b2,
|
||||
[0xb6] = kvm_s390_handle_stctl,
|
||||
[0xb7] = kvm_s390_handle_lctl,
|
||||
[0xb9] = kvm_s390_handle_b9,
|
||||
[0xe5] = kvm_s390_handle_e5,
|
||||
[0xeb] = kvm_s390_handle_eb,
|
||||
};
|
||||
|
||||
static int handle_noop(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
switch (vcpu->arch.sie_block->icptcode) {
|
||||
case 0x0:
|
||||
vcpu->stat.exit_null++;
|
||||
break;
|
||||
case 0x10:
|
||||
vcpu->stat.exit_external_request++;
|
||||
break;
|
||||
default:
|
||||
break; /* nothing */
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int handle_stop(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
int rc = 0;
|
||||
unsigned int action_bits;
|
||||
|
||||
vcpu->stat.exit_stop_request++;
|
||||
trace_kvm_s390_stop_request(vcpu->arch.local_int.action_bits);
|
||||
|
||||
action_bits = vcpu->arch.local_int.action_bits;
|
||||
|
||||
if (!(action_bits & ACTION_STOP_ON_STOP))
|
||||
return 0;
|
||||
|
||||
if (action_bits & ACTION_STORE_ON_STOP) {
|
||||
rc = kvm_s390_vcpu_store_status(vcpu,
|
||||
KVM_S390_STORE_STATUS_NOADDR);
|
||||
if (rc)
|
||||
return rc;
|
||||
}
|
||||
|
||||
if (!kvm_s390_user_cpu_state_ctrl(vcpu->kvm))
|
||||
kvm_s390_vcpu_stop(vcpu);
|
||||
return -EOPNOTSUPP;
|
||||
}
|
||||
|
||||
static int handle_validity(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
int viwhy = vcpu->arch.sie_block->ipb >> 16;
|
||||
|
||||
vcpu->stat.exit_validity++;
|
||||
trace_kvm_s390_intercept_validity(vcpu, viwhy);
|
||||
WARN_ONCE(true, "kvm: unhandled validity intercept 0x%x\n", viwhy);
|
||||
return -EOPNOTSUPP;
|
||||
}
|
||||
|
||||
static int handle_instruction(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
intercept_handler_t handler;
|
||||
|
||||
vcpu->stat.exit_instruction++;
|
||||
trace_kvm_s390_intercept_instruction(vcpu,
|
||||
vcpu->arch.sie_block->ipa,
|
||||
vcpu->arch.sie_block->ipb);
|
||||
handler = instruction_handlers[vcpu->arch.sie_block->ipa >> 8];
|
||||
if (handler)
|
||||
return handler(vcpu);
|
||||
return -EOPNOTSUPP;
|
||||
}
|
||||
|
||||
static void __extract_prog_irq(struct kvm_vcpu *vcpu,
|
||||
struct kvm_s390_pgm_info *pgm_info)
|
||||
{
|
||||
memset(pgm_info, 0, sizeof(struct kvm_s390_pgm_info));
|
||||
pgm_info->code = vcpu->arch.sie_block->iprcc;
|
||||
|
||||
switch (vcpu->arch.sie_block->iprcc & ~PGM_PER) {
|
||||
case PGM_AFX_TRANSLATION:
|
||||
case PGM_ASX_TRANSLATION:
|
||||
case PGM_EX_TRANSLATION:
|
||||
case PGM_LFX_TRANSLATION:
|
||||
case PGM_LSTE_SEQUENCE:
|
||||
case PGM_LSX_TRANSLATION:
|
||||
case PGM_LX_TRANSLATION:
|
||||
case PGM_PRIMARY_AUTHORITY:
|
||||
case PGM_SECONDARY_AUTHORITY:
|
||||
case PGM_SPACE_SWITCH:
|
||||
pgm_info->trans_exc_code = vcpu->arch.sie_block->tecmc;
|
||||
break;
|
||||
case PGM_ALEN_TRANSLATION:
|
||||
case PGM_ALE_SEQUENCE:
|
||||
case PGM_ASTE_INSTANCE:
|
||||
case PGM_ASTE_SEQUENCE:
|
||||
case PGM_ASTE_VALIDITY:
|
||||
case PGM_EXTENDED_AUTHORITY:
|
||||
pgm_info->exc_access_id = vcpu->arch.sie_block->eai;
|
||||
break;
|
||||
case PGM_ASCE_TYPE:
|
||||
case PGM_PAGE_TRANSLATION:
|
||||
case PGM_REGION_FIRST_TRANS:
|
||||
case PGM_REGION_SECOND_TRANS:
|
||||
case PGM_REGION_THIRD_TRANS:
|
||||
case PGM_SEGMENT_TRANSLATION:
|
||||
pgm_info->trans_exc_code = vcpu->arch.sie_block->tecmc;
|
||||
pgm_info->exc_access_id = vcpu->arch.sie_block->eai;
|
||||
pgm_info->op_access_id = vcpu->arch.sie_block->oai;
|
||||
break;
|
||||
case PGM_MONITOR:
|
||||
pgm_info->mon_class_nr = vcpu->arch.sie_block->mcn;
|
||||
pgm_info->mon_code = vcpu->arch.sie_block->tecmc;
|
||||
break;
|
||||
case PGM_DATA:
|
||||
pgm_info->data_exc_code = vcpu->arch.sie_block->dxc;
|
||||
break;
|
||||
case PGM_PROTECTION:
|
||||
pgm_info->trans_exc_code = vcpu->arch.sie_block->tecmc;
|
||||
pgm_info->exc_access_id = vcpu->arch.sie_block->eai;
|
||||
break;
|
||||
default:
|
||||
break;
|
||||
}
|
||||
|
||||
if (vcpu->arch.sie_block->iprcc & PGM_PER) {
|
||||
pgm_info->per_code = vcpu->arch.sie_block->perc;
|
||||
pgm_info->per_atmid = vcpu->arch.sie_block->peratmid;
|
||||
pgm_info->per_address = vcpu->arch.sie_block->peraddr;
|
||||
pgm_info->per_access_id = vcpu->arch.sie_block->peraid;
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* restore ITDB to program-interruption TDB in guest lowcore
|
||||
* and set TX abort indication if required
|
||||
*/
|
||||
static int handle_itdb(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
struct kvm_s390_itdb *itdb;
|
||||
int rc;
|
||||
|
||||
if (!IS_TE_ENABLED(vcpu) || !IS_ITDB_VALID(vcpu))
|
||||
return 0;
|
||||
if (current->thread.per_flags & PER_FLAG_NO_TE)
|
||||
return 0;
|
||||
itdb = (struct kvm_s390_itdb *)vcpu->arch.sie_block->itdba;
|
||||
rc = write_guest_lc(vcpu, __LC_PGM_TDB, itdb, sizeof(*itdb));
|
||||
if (rc)
|
||||
return rc;
|
||||
memset(itdb, 0, sizeof(*itdb));
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
#define per_event(vcpu) (vcpu->arch.sie_block->iprcc & PGM_PER)
|
||||
|
||||
static int handle_prog(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
struct kvm_s390_pgm_info pgm_info;
|
||||
psw_t psw;
|
||||
int rc;
|
||||
|
||||
vcpu->stat.exit_program_interruption++;
|
||||
|
||||
if (guestdbg_enabled(vcpu) && per_event(vcpu)) {
|
||||
kvm_s390_handle_per_event(vcpu);
|
||||
/* the interrupt might have been filtered out completely */
|
||||
if (vcpu->arch.sie_block->iprcc == 0)
|
||||
return 0;
|
||||
}
|
||||
|
||||
trace_kvm_s390_intercept_prog(vcpu, vcpu->arch.sie_block->iprcc);
|
||||
if (vcpu->arch.sie_block->iprcc == PGM_SPECIFICATION) {
|
||||
rc = read_guest_lc(vcpu, __LC_PGM_NEW_PSW, &psw, sizeof(psw_t));
|
||||
if (rc)
|
||||
return rc;
|
||||
/* Avoid endless loops of specification exceptions */
|
||||
if (!is_valid_psw(&psw))
|
||||
return -EOPNOTSUPP;
|
||||
}
|
||||
rc = handle_itdb(vcpu);
|
||||
if (rc)
|
||||
return rc;
|
||||
|
||||
__extract_prog_irq(vcpu, &pgm_info);
|
||||
return kvm_s390_inject_prog_irq(vcpu, &pgm_info);
|
||||
}
|
||||
|
||||
static int handle_instruction_and_prog(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
int rc, rc2;
|
||||
|
||||
vcpu->stat.exit_instr_and_program++;
|
||||
rc = handle_instruction(vcpu);
|
||||
rc2 = handle_prog(vcpu);
|
||||
|
||||
if (rc == -EOPNOTSUPP)
|
||||
vcpu->arch.sie_block->icptcode = 0x04;
|
||||
if (rc)
|
||||
return rc;
|
||||
return rc2;
|
||||
}
|
||||
|
||||
/**
|
||||
* handle_external_interrupt - used for external interruption interceptions
|
||||
*
|
||||
* This interception only occurs if the CPUSTAT_EXT_INT bit was set, or if
|
||||
* the new PSW does not have external interrupts disabled. In the first case,
|
||||
* we've got to deliver the interrupt manually, and in the second case, we
|
||||
* drop to userspace to handle the situation there.
|
||||
*/
|
||||
static int handle_external_interrupt(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
u16 eic = vcpu->arch.sie_block->eic;
|
||||
struct kvm_s390_interrupt irq;
|
||||
psw_t newpsw;
|
||||
int rc;
|
||||
|
||||
vcpu->stat.exit_external_interrupt++;
|
||||
|
||||
rc = read_guest_lc(vcpu, __LC_EXT_NEW_PSW, &newpsw, sizeof(psw_t));
|
||||
if (rc)
|
||||
return rc;
|
||||
/* We can not handle clock comparator or timer interrupt with bad PSW */
|
||||
if ((eic == EXT_IRQ_CLK_COMP || eic == EXT_IRQ_CPU_TIMER) &&
|
||||
(newpsw.mask & PSW_MASK_EXT))
|
||||
return -EOPNOTSUPP;
|
||||
|
||||
switch (eic) {
|
||||
case EXT_IRQ_CLK_COMP:
|
||||
irq.type = KVM_S390_INT_CLOCK_COMP;
|
||||
break;
|
||||
case EXT_IRQ_CPU_TIMER:
|
||||
irq.type = KVM_S390_INT_CPU_TIMER;
|
||||
break;
|
||||
case EXT_IRQ_EXTERNAL_CALL:
|
||||
if (kvm_s390_si_ext_call_pending(vcpu))
|
||||
return 0;
|
||||
irq.type = KVM_S390_INT_EXTERNAL_CALL;
|
||||
irq.parm = vcpu->arch.sie_block->extcpuaddr;
|
||||
break;
|
||||
default:
|
||||
return -EOPNOTSUPP;
|
||||
}
|
||||
|
||||
return kvm_s390_inject_vcpu(vcpu, &irq);
|
||||
}
|
||||
|
||||
/**
|
||||
* Handle MOVE PAGE partial execution interception.
|
||||
*
|
||||
* This interception can only happen for guests with DAT disabled and
|
||||
* addresses that are currently not mapped in the host. Thus we try to
|
||||
* set up the mappings for the corresponding user pages here (or throw
|
||||
* addressing exceptions in case of illegal guest addresses).
|
||||
*/
|
||||
static int handle_mvpg_pei(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
psw_t *psw = &vcpu->arch.sie_block->gpsw;
|
||||
unsigned long srcaddr, dstaddr;
|
||||
int reg1, reg2, rc;
|
||||
|
||||
kvm_s390_get_regs_rre(vcpu, ®1, ®2);
|
||||
|
||||
/* Make sure that the source is paged-in */
|
||||
srcaddr = kvm_s390_real_to_abs(vcpu, vcpu->run->s.regs.gprs[reg2]);
|
||||
if (kvm_is_error_gpa(vcpu->kvm, srcaddr))
|
||||
return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
|
||||
rc = kvm_arch_fault_in_page(vcpu, srcaddr, 0);
|
||||
if (rc != 0)
|
||||
return rc;
|
||||
|
||||
/* Make sure that the destination is paged-in */
|
||||
dstaddr = kvm_s390_real_to_abs(vcpu, vcpu->run->s.regs.gprs[reg1]);
|
||||
if (kvm_is_error_gpa(vcpu->kvm, dstaddr))
|
||||
return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
|
||||
rc = kvm_arch_fault_in_page(vcpu, dstaddr, 1);
|
||||
if (rc != 0)
|
||||
return rc;
|
||||
|
||||
psw->addr = __rewind_psw(*psw, 4);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int handle_partial_execution(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
if (vcpu->arch.sie_block->ipa == 0xb254) /* MVPG */
|
||||
return handle_mvpg_pei(vcpu);
|
||||
if (vcpu->arch.sie_block->ipa >> 8 == 0xae) /* SIGP */
|
||||
return kvm_s390_handle_sigp_pei(vcpu);
|
||||
|
||||
return -EOPNOTSUPP;
|
||||
}
|
||||
|
||||
static const intercept_handler_t intercept_funcs[] = {
|
||||
[0x00 >> 2] = handle_noop,
|
||||
[0x04 >> 2] = handle_instruction,
|
||||
[0x08 >> 2] = handle_prog,
|
||||
[0x0C >> 2] = handle_instruction_and_prog,
|
||||
[0x10 >> 2] = handle_noop,
|
||||
[0x14 >> 2] = handle_external_interrupt,
|
||||
[0x18 >> 2] = handle_noop,
|
||||
[0x1C >> 2] = kvm_s390_handle_wait,
|
||||
[0x20 >> 2] = handle_validity,
|
||||
[0x28 >> 2] = handle_stop,
|
||||
[0x38 >> 2] = handle_partial_execution,
|
||||
};
|
||||
|
||||
int kvm_handle_sie_intercept(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
intercept_handler_t func;
|
||||
u8 code = vcpu->arch.sie_block->icptcode;
|
||||
|
||||
if (code & 3 || (code >> 2) >= ARRAY_SIZE(intercept_funcs))
|
||||
return -EOPNOTSUPP;
|
||||
func = intercept_funcs[code >> 2];
|
||||
if (func)
|
||||
return func(vcpu);
|
||||
return -EOPNOTSUPP;
|
||||
}
|
1541
arch/s390/kvm/interrupt.c
Normal file
1541
arch/s390/kvm/interrupt.c
Normal file
File diff suppressed because it is too large
Load diff
22
arch/s390/kvm/irq.h
Normal file
22
arch/s390/kvm/irq.h
Normal file
|
@ -0,0 +1,22 @@
|
|||
/*
|
||||
* s390 irqchip routines
|
||||
*
|
||||
* Copyright IBM Corp. 2014
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify
|
||||
* it under the terms of the GNU General Public License (version 2 only)
|
||||
* as published by the Free Software Foundation.
|
||||
*
|
||||
* Author(s): Cornelia Huck <cornelia.huck@de.ibm.com>
|
||||
*/
|
||||
#ifndef __KVM_IRQ_H
|
||||
#define __KVM_IRQ_H
|
||||
|
||||
#include <linux/kvm_host.h>
|
||||
|
||||
static inline int irqchip_in_kernel(struct kvm *kvm)
|
||||
{
|
||||
return 1;
|
||||
}
|
||||
|
||||
#endif
|
1810
arch/s390/kvm/kvm-s390.c
Normal file
1810
arch/s390/kvm/kvm-s390.c
Normal file
File diff suppressed because it is too large
Load diff
242
arch/s390/kvm/kvm-s390.h
Normal file
242
arch/s390/kvm/kvm-s390.h
Normal file
|
@ -0,0 +1,242 @@
|
|||
/*
|
||||
* definition for kvm on s390
|
||||
*
|
||||
* Copyright IBM Corp. 2008, 2009
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify
|
||||
* it under the terms of the GNU General Public License (version 2 only)
|
||||
* as published by the Free Software Foundation.
|
||||
*
|
||||
* Author(s): Carsten Otte <cotte@de.ibm.com>
|
||||
* Christian Borntraeger <borntraeger@de.ibm.com>
|
||||
* Christian Ehrhardt <ehrhardt@de.ibm.com>
|
||||
*/
|
||||
|
||||
#ifndef ARCH_S390_KVM_S390_H
|
||||
#define ARCH_S390_KVM_S390_H
|
||||
|
||||
#include <linux/hrtimer.h>
|
||||
#include <linux/kvm.h>
|
||||
#include <linux/kvm_host.h>
|
||||
|
||||
typedef int (*intercept_handler_t)(struct kvm_vcpu *vcpu);
|
||||
|
||||
/* declare vfacilities extern */
|
||||
extern unsigned long *vfacilities;
|
||||
|
||||
int kvm_handle_sie_intercept(struct kvm_vcpu *vcpu);
|
||||
|
||||
/* Transactional Memory Execution related macros */
|
||||
#define IS_TE_ENABLED(vcpu) ((vcpu->arch.sie_block->ecb & 0x10))
|
||||
#define TDB_FORMAT1 1
|
||||
#define IS_ITDB_VALID(vcpu) ((*(char *)vcpu->arch.sie_block->itdba == TDB_FORMAT1))
|
||||
|
||||
#define VM_EVENT(d_kvm, d_loglevel, d_string, d_args...)\
|
||||
do { \
|
||||
debug_sprintf_event(d_kvm->arch.dbf, d_loglevel, d_string "\n", \
|
||||
d_args); \
|
||||
} while (0)
|
||||
|
||||
#define VCPU_EVENT(d_vcpu, d_loglevel, d_string, d_args...)\
|
||||
do { \
|
||||
debug_sprintf_event(d_vcpu->kvm->arch.dbf, d_loglevel, \
|
||||
"%02d[%016lx-%016lx]: " d_string "\n", d_vcpu->vcpu_id, \
|
||||
d_vcpu->arch.sie_block->gpsw.mask, d_vcpu->arch.sie_block->gpsw.addr,\
|
||||
d_args); \
|
||||
} while (0)
|
||||
|
||||
static inline int is_vcpu_stopped(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
return atomic_read(&vcpu->arch.sie_block->cpuflags) & CPUSTAT_STOPPED;
|
||||
}
|
||||
|
||||
static inline int kvm_is_ucontrol(struct kvm *kvm)
|
||||
{
|
||||
#ifdef CONFIG_KVM_S390_UCONTROL
|
||||
if (kvm->arch.gmap)
|
||||
return 0;
|
||||
return 1;
|
||||
#else
|
||||
return 0;
|
||||
#endif
|
||||
}
|
||||
|
||||
#define GUEST_PREFIX_SHIFT 13
|
||||
static inline u32 kvm_s390_get_prefix(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
return vcpu->arch.sie_block->prefix << GUEST_PREFIX_SHIFT;
|
||||
}
|
||||
|
||||
static inline void kvm_s390_set_prefix(struct kvm_vcpu *vcpu, u32 prefix)
|
||||
{
|
||||
vcpu->arch.sie_block->prefix = prefix >> GUEST_PREFIX_SHIFT;
|
||||
kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu);
|
||||
kvm_make_request(KVM_REQ_MMU_RELOAD, vcpu);
|
||||
}
|
||||
|
||||
static inline u64 kvm_s390_get_base_disp_s(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
u32 base2 = vcpu->arch.sie_block->ipb >> 28;
|
||||
u32 disp2 = ((vcpu->arch.sie_block->ipb & 0x0fff0000) >> 16);
|
||||
|
||||
return (base2 ? vcpu->run->s.regs.gprs[base2] : 0) + disp2;
|
||||
}
|
||||
|
||||
static inline void kvm_s390_get_base_disp_sse(struct kvm_vcpu *vcpu,
|
||||
u64 *address1, u64 *address2)
|
||||
{
|
||||
u32 base1 = (vcpu->arch.sie_block->ipb & 0xf0000000) >> 28;
|
||||
u32 disp1 = (vcpu->arch.sie_block->ipb & 0x0fff0000) >> 16;
|
||||
u32 base2 = (vcpu->arch.sie_block->ipb & 0xf000) >> 12;
|
||||
u32 disp2 = vcpu->arch.sie_block->ipb & 0x0fff;
|
||||
|
||||
*address1 = (base1 ? vcpu->run->s.regs.gprs[base1] : 0) + disp1;
|
||||
*address2 = (base2 ? vcpu->run->s.regs.gprs[base2] : 0) + disp2;
|
||||
}
|
||||
|
||||
static inline void kvm_s390_get_regs_rre(struct kvm_vcpu *vcpu, int *r1, int *r2)
|
||||
{
|
||||
if (r1)
|
||||
*r1 = (vcpu->arch.sie_block->ipb & 0x00f00000) >> 20;
|
||||
if (r2)
|
||||
*r2 = (vcpu->arch.sie_block->ipb & 0x000f0000) >> 16;
|
||||
}
|
||||
|
||||
static inline u64 kvm_s390_get_base_disp_rsy(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
u32 base2 = vcpu->arch.sie_block->ipb >> 28;
|
||||
u32 disp2 = ((vcpu->arch.sie_block->ipb & 0x0fff0000) >> 16) +
|
||||
((vcpu->arch.sie_block->ipb & 0xff00) << 4);
|
||||
/* The displacement is a 20bit _SIGNED_ value */
|
||||
if (disp2 & 0x80000)
|
||||
disp2+=0xfff00000;
|
||||
|
||||
return (base2 ? vcpu->run->s.regs.gprs[base2] : 0) + (long)(int)disp2;
|
||||
}
|
||||
|
||||
static inline u64 kvm_s390_get_base_disp_rs(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
u32 base2 = vcpu->arch.sie_block->ipb >> 28;
|
||||
u32 disp2 = ((vcpu->arch.sie_block->ipb & 0x0fff0000) >> 16);
|
||||
|
||||
return (base2 ? vcpu->run->s.regs.gprs[base2] : 0) + disp2;
|
||||
}
|
||||
|
||||
/* Set the condition code in the guest program status word */
|
||||
static inline void kvm_s390_set_psw_cc(struct kvm_vcpu *vcpu, unsigned long cc)
|
||||
{
|
||||
vcpu->arch.sie_block->gpsw.mask &= ~(3UL << 44);
|
||||
vcpu->arch.sie_block->gpsw.mask |= cc << 44;
|
||||
}
|
||||
|
||||
/* are cpu states controlled by user space */
|
||||
static inline int kvm_s390_user_cpu_state_ctrl(struct kvm *kvm)
|
||||
{
|
||||
return kvm->arch.user_cpu_state_ctrl != 0;
|
||||
}
|
||||
|
||||
int kvm_s390_handle_wait(struct kvm_vcpu *vcpu);
|
||||
void kvm_s390_vcpu_wakeup(struct kvm_vcpu *vcpu);
|
||||
enum hrtimer_restart kvm_s390_idle_wakeup(struct hrtimer *timer);
|
||||
int __must_check kvm_s390_deliver_pending_interrupts(struct kvm_vcpu *vcpu);
|
||||
void kvm_s390_clear_local_irqs(struct kvm_vcpu *vcpu);
|
||||
void kvm_s390_clear_float_irqs(struct kvm *kvm);
|
||||
int __must_check kvm_s390_inject_vm(struct kvm *kvm,
|
||||
struct kvm_s390_interrupt *s390int);
|
||||
int __must_check kvm_s390_inject_vcpu(struct kvm_vcpu *vcpu,
|
||||
struct kvm_s390_interrupt *s390int);
|
||||
int __must_check kvm_s390_inject_program_int(struct kvm_vcpu *vcpu, u16 code);
|
||||
struct kvm_s390_interrupt_info *kvm_s390_get_io_int(struct kvm *kvm,
|
||||
u64 cr6, u64 schid);
|
||||
int kvm_s390_reinject_io_int(struct kvm *kvm,
|
||||
struct kvm_s390_interrupt_info *inti);
|
||||
int kvm_s390_mask_adapter(struct kvm *kvm, unsigned int id, bool masked);
|
||||
|
||||
/* implemented in priv.c */
|
||||
int is_valid_psw(psw_t *psw);
|
||||
int kvm_s390_handle_b2(struct kvm_vcpu *vcpu);
|
||||
int kvm_s390_handle_e5(struct kvm_vcpu *vcpu);
|
||||
int kvm_s390_handle_01(struct kvm_vcpu *vcpu);
|
||||
int kvm_s390_handle_b9(struct kvm_vcpu *vcpu);
|
||||
int kvm_s390_handle_lpsw(struct kvm_vcpu *vcpu);
|
||||
int kvm_s390_handle_stctl(struct kvm_vcpu *vcpu);
|
||||
int kvm_s390_handle_lctl(struct kvm_vcpu *vcpu);
|
||||
int kvm_s390_handle_eb(struct kvm_vcpu *vcpu);
|
||||
|
||||
/* implemented in sigp.c */
|
||||
int kvm_s390_handle_sigp(struct kvm_vcpu *vcpu);
|
||||
int kvm_s390_handle_sigp_pei(struct kvm_vcpu *vcpu);
|
||||
|
||||
/* implemented in kvm-s390.c */
|
||||
long kvm_arch_fault_in_page(struct kvm_vcpu *vcpu, gpa_t gpa, int writable);
|
||||
int kvm_s390_store_status_unloaded(struct kvm_vcpu *vcpu, unsigned long addr);
|
||||
int kvm_s390_vcpu_store_status(struct kvm_vcpu *vcpu, unsigned long addr);
|
||||
void kvm_s390_vcpu_start(struct kvm_vcpu *vcpu);
|
||||
void kvm_s390_vcpu_stop(struct kvm_vcpu *vcpu);
|
||||
void s390_vcpu_block(struct kvm_vcpu *vcpu);
|
||||
void s390_vcpu_unblock(struct kvm_vcpu *vcpu);
|
||||
void exit_sie(struct kvm_vcpu *vcpu);
|
||||
void exit_sie_sync(struct kvm_vcpu *vcpu);
|
||||
int kvm_s390_vcpu_setup_cmma(struct kvm_vcpu *vcpu);
|
||||
void kvm_s390_vcpu_unsetup_cmma(struct kvm_vcpu *vcpu);
|
||||
/* is cmma enabled */
|
||||
bool kvm_s390_cmma_enabled(struct kvm *kvm);
|
||||
int test_vfacility(unsigned long nr);
|
||||
|
||||
/* implemented in diag.c */
|
||||
int kvm_s390_handle_diag(struct kvm_vcpu *vcpu);
|
||||
/* implemented in interrupt.c */
|
||||
int kvm_s390_inject_prog_irq(struct kvm_vcpu *vcpu,
|
||||
struct kvm_s390_pgm_info *pgm_info);
|
||||
|
||||
/**
|
||||
* kvm_s390_inject_prog_cond - conditionally inject a program check
|
||||
* @vcpu: virtual cpu
|
||||
* @rc: original return/error code
|
||||
*
|
||||
* This function is supposed to be used after regular guest access functions
|
||||
* failed, to conditionally inject a program check to a vcpu. The typical
|
||||
* pattern would look like
|
||||
*
|
||||
* rc = write_guest(vcpu, addr, data, len);
|
||||
* if (rc)
|
||||
* return kvm_s390_inject_prog_cond(vcpu, rc);
|
||||
*
|
||||
* A negative return code from guest access functions implies an internal error
|
||||
* like e.g. out of memory. In these cases no program check should be injected
|
||||
* to the guest.
|
||||
* A positive value implies that an exception happened while accessing a guest's
|
||||
* memory. In this case all data belonging to the corresponding program check
|
||||
* has been stored in vcpu->arch.pgm and can be injected with
|
||||
* kvm_s390_inject_prog_irq().
|
||||
*
|
||||
* Returns: - the original @rc value if @rc was negative (internal error)
|
||||
* - zero if @rc was already zero
|
||||
* - zero or error code from injecting if @rc was positive
|
||||
* (program check injected to @vcpu)
|
||||
*/
|
||||
static inline int kvm_s390_inject_prog_cond(struct kvm_vcpu *vcpu, int rc)
|
||||
{
|
||||
if (rc <= 0)
|
||||
return rc;
|
||||
return kvm_s390_inject_prog_irq(vcpu, &vcpu->arch.pgm);
|
||||
}
|
||||
|
||||
/* implemented in interrupt.c */
|
||||
int kvm_cpu_has_interrupt(struct kvm_vcpu *vcpu);
|
||||
int psw_extint_disabled(struct kvm_vcpu *vcpu);
|
||||
void kvm_s390_destroy_adapters(struct kvm *kvm);
|
||||
int kvm_s390_si_ext_call_pending(struct kvm_vcpu *vcpu);
|
||||
extern struct kvm_device_ops kvm_flic_ops;
|
||||
|
||||
/* implemented in guestdbg.c */
|
||||
void kvm_s390_backup_guest_per_regs(struct kvm_vcpu *vcpu);
|
||||
void kvm_s390_restore_guest_per_regs(struct kvm_vcpu *vcpu);
|
||||
void kvm_s390_patch_guest_per_regs(struct kvm_vcpu *vcpu);
|
||||
int kvm_s390_import_bp_data(struct kvm_vcpu *vcpu,
|
||||
struct kvm_guest_debug *dbg);
|
||||
void kvm_s390_clear_bp_data(struct kvm_vcpu *vcpu);
|
||||
void kvm_s390_prepare_debug_exit(struct kvm_vcpu *vcpu);
|
||||
void kvm_s390_handle_per_event(struct kvm_vcpu *vcpu);
|
||||
|
||||
#endif
|
1021
arch/s390/kvm/priv.c
Normal file
1021
arch/s390/kvm/priv.c
Normal file
File diff suppressed because it is too large
Load diff
473
arch/s390/kvm/sigp.c
Normal file
473
arch/s390/kvm/sigp.c
Normal file
|
@ -0,0 +1,473 @@
|
|||
/*
|
||||
* handling interprocessor communication
|
||||
*
|
||||
* Copyright IBM Corp. 2008, 2013
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify
|
||||
* it under the terms of the GNU General Public License (version 2 only)
|
||||
* as published by the Free Software Foundation.
|
||||
*
|
||||
* Author(s): Carsten Otte <cotte@de.ibm.com>
|
||||
* Christian Borntraeger <borntraeger@de.ibm.com>
|
||||
* Christian Ehrhardt <ehrhardt@de.ibm.com>
|
||||
*/
|
||||
|
||||
#include <linux/kvm.h>
|
||||
#include <linux/kvm_host.h>
|
||||
#include <linux/slab.h>
|
||||
#include <asm/sigp.h>
|
||||
#include "gaccess.h"
|
||||
#include "kvm-s390.h"
|
||||
#include "trace.h"
|
||||
|
||||
static int __sigp_sense(struct kvm_vcpu *vcpu, u16 cpu_addr,
|
||||
u64 *reg)
|
||||
{
|
||||
struct kvm_s390_local_interrupt *li;
|
||||
struct kvm_vcpu *dst_vcpu = NULL;
|
||||
int cpuflags;
|
||||
int rc;
|
||||
|
||||
if (cpu_addr >= KVM_MAX_VCPUS)
|
||||
return SIGP_CC_NOT_OPERATIONAL;
|
||||
|
||||
dst_vcpu = kvm_get_vcpu(vcpu->kvm, cpu_addr);
|
||||
if (!dst_vcpu)
|
||||
return SIGP_CC_NOT_OPERATIONAL;
|
||||
li = &dst_vcpu->arch.local_int;
|
||||
|
||||
cpuflags = atomic_read(li->cpuflags);
|
||||
if (!(cpuflags & (CPUSTAT_ECALL_PEND | CPUSTAT_STOPPED)))
|
||||
rc = SIGP_CC_ORDER_CODE_ACCEPTED;
|
||||
else {
|
||||
*reg &= 0xffffffff00000000UL;
|
||||
if (cpuflags & CPUSTAT_ECALL_PEND)
|
||||
*reg |= SIGP_STATUS_EXT_CALL_PENDING;
|
||||
if (cpuflags & CPUSTAT_STOPPED)
|
||||
*reg |= SIGP_STATUS_STOPPED;
|
||||
rc = SIGP_CC_STATUS_STORED;
|
||||
}
|
||||
|
||||
VCPU_EVENT(vcpu, 4, "sensed status of cpu %x rc %x", cpu_addr, rc);
|
||||
return rc;
|
||||
}
|
||||
|
||||
static int __sigp_emergency(struct kvm_vcpu *vcpu, u16 cpu_addr)
|
||||
{
|
||||
struct kvm_s390_interrupt s390int = {
|
||||
.type = KVM_S390_INT_EMERGENCY,
|
||||
.parm = vcpu->vcpu_id,
|
||||
};
|
||||
struct kvm_vcpu *dst_vcpu = NULL;
|
||||
int rc = 0;
|
||||
|
||||
if (cpu_addr < KVM_MAX_VCPUS)
|
||||
dst_vcpu = kvm_get_vcpu(vcpu->kvm, cpu_addr);
|
||||
if (!dst_vcpu)
|
||||
return SIGP_CC_NOT_OPERATIONAL;
|
||||
|
||||
rc = kvm_s390_inject_vcpu(dst_vcpu, &s390int);
|
||||
if (!rc)
|
||||
VCPU_EVENT(vcpu, 4, "sent sigp emerg to cpu %x", cpu_addr);
|
||||
|
||||
return rc ? rc : SIGP_CC_ORDER_CODE_ACCEPTED;
|
||||
}
|
||||
|
||||
static int __sigp_conditional_emergency(struct kvm_vcpu *vcpu, u16 cpu_addr,
|
||||
u16 asn, u64 *reg)
|
||||
{
|
||||
struct kvm_vcpu *dst_vcpu = NULL;
|
||||
const u64 psw_int_mask = PSW_MASK_IO | PSW_MASK_EXT;
|
||||
u16 p_asn, s_asn;
|
||||
psw_t *psw;
|
||||
u32 flags;
|
||||
|
||||
if (cpu_addr < KVM_MAX_VCPUS)
|
||||
dst_vcpu = kvm_get_vcpu(vcpu->kvm, cpu_addr);
|
||||
if (!dst_vcpu)
|
||||
return SIGP_CC_NOT_OPERATIONAL;
|
||||
flags = atomic_read(&dst_vcpu->arch.sie_block->cpuflags);
|
||||
psw = &dst_vcpu->arch.sie_block->gpsw;
|
||||
p_asn = dst_vcpu->arch.sie_block->gcr[4] & 0xffff; /* Primary ASN */
|
||||
s_asn = dst_vcpu->arch.sie_block->gcr[3] & 0xffff; /* Secondary ASN */
|
||||
|
||||
/* Deliver the emergency signal? */
|
||||
if (!(flags & CPUSTAT_STOPPED)
|
||||
|| (psw->mask & psw_int_mask) != psw_int_mask
|
||||
|| ((flags & CPUSTAT_WAIT) && psw->addr != 0)
|
||||
|| (!(flags & CPUSTAT_WAIT) && (asn == p_asn || asn == s_asn))) {
|
||||
return __sigp_emergency(vcpu, cpu_addr);
|
||||
} else {
|
||||
*reg &= 0xffffffff00000000UL;
|
||||
*reg |= SIGP_STATUS_INCORRECT_STATE;
|
||||
return SIGP_CC_STATUS_STORED;
|
||||
}
|
||||
}
|
||||
|
||||
static int __sigp_external_call(struct kvm_vcpu *vcpu, u16 cpu_addr)
|
||||
{
|
||||
struct kvm_s390_interrupt s390int = {
|
||||
.type = KVM_S390_INT_EXTERNAL_CALL,
|
||||
.parm = vcpu->vcpu_id,
|
||||
};
|
||||
struct kvm_vcpu *dst_vcpu = NULL;
|
||||
int rc;
|
||||
|
||||
if (cpu_addr < KVM_MAX_VCPUS)
|
||||
dst_vcpu = kvm_get_vcpu(vcpu->kvm, cpu_addr);
|
||||
if (!dst_vcpu)
|
||||
return SIGP_CC_NOT_OPERATIONAL;
|
||||
|
||||
rc = kvm_s390_inject_vcpu(dst_vcpu, &s390int);
|
||||
if (!rc)
|
||||
VCPU_EVENT(vcpu, 4, "sent sigp ext call to cpu %x", cpu_addr);
|
||||
|
||||
return rc ? rc : SIGP_CC_ORDER_CODE_ACCEPTED;
|
||||
}
|
||||
|
||||
static int __inject_sigp_stop(struct kvm_vcpu *dst_vcpu, int action)
|
||||
{
|
||||
struct kvm_s390_local_interrupt *li = &dst_vcpu->arch.local_int;
|
||||
struct kvm_s390_interrupt_info *inti;
|
||||
int rc = SIGP_CC_ORDER_CODE_ACCEPTED;
|
||||
|
||||
inti = kzalloc(sizeof(*inti), GFP_ATOMIC);
|
||||
if (!inti)
|
||||
return -ENOMEM;
|
||||
inti->type = KVM_S390_SIGP_STOP;
|
||||
|
||||
spin_lock(&li->lock);
|
||||
if (li->action_bits & ACTION_STOP_ON_STOP) {
|
||||
/* another SIGP STOP is pending */
|
||||
kfree(inti);
|
||||
rc = SIGP_CC_BUSY;
|
||||
goto out;
|
||||
}
|
||||
if ((atomic_read(li->cpuflags) & CPUSTAT_STOPPED)) {
|
||||
kfree(inti);
|
||||
if ((action & ACTION_STORE_ON_STOP) != 0)
|
||||
rc = -ESHUTDOWN;
|
||||
goto out;
|
||||
}
|
||||
list_add_tail(&inti->list, &li->list);
|
||||
atomic_set(&li->active, 1);
|
||||
li->action_bits |= action;
|
||||
atomic_set_mask(CPUSTAT_STOP_INT, li->cpuflags);
|
||||
kvm_s390_vcpu_wakeup(dst_vcpu);
|
||||
out:
|
||||
spin_unlock(&li->lock);
|
||||
|
||||
return rc;
|
||||
}
|
||||
|
||||
static int __sigp_stop(struct kvm_vcpu *vcpu, u16 cpu_addr, int action)
|
||||
{
|
||||
struct kvm_vcpu *dst_vcpu = NULL;
|
||||
int rc;
|
||||
|
||||
if (cpu_addr >= KVM_MAX_VCPUS)
|
||||
return SIGP_CC_NOT_OPERATIONAL;
|
||||
|
||||
dst_vcpu = kvm_get_vcpu(vcpu->kvm, cpu_addr);
|
||||
if (!dst_vcpu)
|
||||
return SIGP_CC_NOT_OPERATIONAL;
|
||||
|
||||
rc = __inject_sigp_stop(dst_vcpu, action);
|
||||
|
||||
VCPU_EVENT(vcpu, 4, "sent sigp stop to cpu %x", cpu_addr);
|
||||
|
||||
if ((action & ACTION_STORE_ON_STOP) != 0 && rc == -ESHUTDOWN) {
|
||||
/* If the CPU has already been stopped, we still have
|
||||
* to save the status when doing stop-and-store. This
|
||||
* has to be done after unlocking all spinlocks. */
|
||||
rc = kvm_s390_store_status_unloaded(dst_vcpu,
|
||||
KVM_S390_STORE_STATUS_NOADDR);
|
||||
}
|
||||
|
||||
return rc;
|
||||
}
|
||||
|
||||
static int __sigp_set_arch(struct kvm_vcpu *vcpu, u32 parameter)
|
||||
{
|
||||
int rc;
|
||||
unsigned int i;
|
||||
struct kvm_vcpu *v;
|
||||
|
||||
switch (parameter & 0xff) {
|
||||
case 0:
|
||||
rc = SIGP_CC_NOT_OPERATIONAL;
|
||||
break;
|
||||
case 1:
|
||||
case 2:
|
||||
kvm_for_each_vcpu(i, v, vcpu->kvm) {
|
||||
v->arch.pfault_token = KVM_S390_PFAULT_TOKEN_INVALID;
|
||||
kvm_clear_async_pf_completion_queue(v);
|
||||
}
|
||||
|
||||
rc = SIGP_CC_ORDER_CODE_ACCEPTED;
|
||||
break;
|
||||
default:
|
||||
rc = -EOPNOTSUPP;
|
||||
}
|
||||
return rc;
|
||||
}
|
||||
|
||||
static int __sigp_set_prefix(struct kvm_vcpu *vcpu, u16 cpu_addr, u32 address,
|
||||
u64 *reg)
|
||||
{
|
||||
struct kvm_s390_local_interrupt *li;
|
||||
struct kvm_vcpu *dst_vcpu = NULL;
|
||||
struct kvm_s390_interrupt_info *inti;
|
||||
int rc;
|
||||
|
||||
if (cpu_addr < KVM_MAX_VCPUS)
|
||||
dst_vcpu = kvm_get_vcpu(vcpu->kvm, cpu_addr);
|
||||
if (!dst_vcpu)
|
||||
return SIGP_CC_NOT_OPERATIONAL;
|
||||
li = &dst_vcpu->arch.local_int;
|
||||
|
||||
/*
|
||||
* Make sure the new value is valid memory. We only need to check the
|
||||
* first page, since address is 8k aligned and memory pieces are always
|
||||
* at least 1MB aligned and have at least a size of 1MB.
|
||||
*/
|
||||
address &= 0x7fffe000u;
|
||||
if (kvm_is_error_gpa(vcpu->kvm, address)) {
|
||||
*reg &= 0xffffffff00000000UL;
|
||||
*reg |= SIGP_STATUS_INVALID_PARAMETER;
|
||||
return SIGP_CC_STATUS_STORED;
|
||||
}
|
||||
|
||||
inti = kzalloc(sizeof(*inti), GFP_KERNEL);
|
||||
if (!inti)
|
||||
return SIGP_CC_BUSY;
|
||||
|
||||
spin_lock(&li->lock);
|
||||
/* cpu must be in stopped state */
|
||||
if (!(atomic_read(li->cpuflags) & CPUSTAT_STOPPED)) {
|
||||
*reg &= 0xffffffff00000000UL;
|
||||
*reg |= SIGP_STATUS_INCORRECT_STATE;
|
||||
rc = SIGP_CC_STATUS_STORED;
|
||||
kfree(inti);
|
||||
goto out_li;
|
||||
}
|
||||
|
||||
inti->type = KVM_S390_SIGP_SET_PREFIX;
|
||||
inti->prefix.address = address;
|
||||
|
||||
list_add_tail(&inti->list, &li->list);
|
||||
atomic_set(&li->active, 1);
|
||||
kvm_s390_vcpu_wakeup(dst_vcpu);
|
||||
rc = SIGP_CC_ORDER_CODE_ACCEPTED;
|
||||
|
||||
VCPU_EVENT(vcpu, 4, "set prefix of cpu %02x to %x", cpu_addr, address);
|
||||
out_li:
|
||||
spin_unlock(&li->lock);
|
||||
return rc;
|
||||
}
|
||||
|
||||
static int __sigp_store_status_at_addr(struct kvm_vcpu *vcpu, u16 cpu_id,
|
||||
u32 addr, u64 *reg)
|
||||
{
|
||||
struct kvm_vcpu *dst_vcpu = NULL;
|
||||
int flags;
|
||||
int rc;
|
||||
|
||||
if (cpu_id < KVM_MAX_VCPUS)
|
||||
dst_vcpu = kvm_get_vcpu(vcpu->kvm, cpu_id);
|
||||
if (!dst_vcpu)
|
||||
return SIGP_CC_NOT_OPERATIONAL;
|
||||
|
||||
spin_lock(&dst_vcpu->arch.local_int.lock);
|
||||
flags = atomic_read(dst_vcpu->arch.local_int.cpuflags);
|
||||
spin_unlock(&dst_vcpu->arch.local_int.lock);
|
||||
if (!(flags & CPUSTAT_STOPPED)) {
|
||||
*reg &= 0xffffffff00000000UL;
|
||||
*reg |= SIGP_STATUS_INCORRECT_STATE;
|
||||
return SIGP_CC_STATUS_STORED;
|
||||
}
|
||||
|
||||
addr &= 0x7ffffe00;
|
||||
rc = kvm_s390_store_status_unloaded(dst_vcpu, addr);
|
||||
if (rc == -EFAULT) {
|
||||
*reg &= 0xffffffff00000000UL;
|
||||
*reg |= SIGP_STATUS_INVALID_PARAMETER;
|
||||
rc = SIGP_CC_STATUS_STORED;
|
||||
}
|
||||
return rc;
|
||||
}
|
||||
|
||||
static int __sigp_sense_running(struct kvm_vcpu *vcpu, u16 cpu_addr,
|
||||
u64 *reg)
|
||||
{
|
||||
struct kvm_s390_local_interrupt *li;
|
||||
struct kvm_vcpu *dst_vcpu = NULL;
|
||||
int rc;
|
||||
|
||||
if (cpu_addr >= KVM_MAX_VCPUS)
|
||||
return SIGP_CC_NOT_OPERATIONAL;
|
||||
|
||||
dst_vcpu = kvm_get_vcpu(vcpu->kvm, cpu_addr);
|
||||
if (!dst_vcpu)
|
||||
return SIGP_CC_NOT_OPERATIONAL;
|
||||
li = &dst_vcpu->arch.local_int;
|
||||
if (atomic_read(li->cpuflags) & CPUSTAT_RUNNING) {
|
||||
/* running */
|
||||
rc = SIGP_CC_ORDER_CODE_ACCEPTED;
|
||||
} else {
|
||||
/* not running */
|
||||
*reg &= 0xffffffff00000000UL;
|
||||
*reg |= SIGP_STATUS_NOT_RUNNING;
|
||||
rc = SIGP_CC_STATUS_STORED;
|
||||
}
|
||||
|
||||
VCPU_EVENT(vcpu, 4, "sensed running status of cpu %x rc %x", cpu_addr,
|
||||
rc);
|
||||
|
||||
return rc;
|
||||
}
|
||||
|
||||
/* Test whether the destination CPU is available and not busy */
|
||||
static int sigp_check_callable(struct kvm_vcpu *vcpu, u16 cpu_addr)
|
||||
{
|
||||
struct kvm_s390_local_interrupt *li;
|
||||
int rc = SIGP_CC_ORDER_CODE_ACCEPTED;
|
||||
struct kvm_vcpu *dst_vcpu = NULL;
|
||||
|
||||
if (cpu_addr >= KVM_MAX_VCPUS)
|
||||
return SIGP_CC_NOT_OPERATIONAL;
|
||||
|
||||
dst_vcpu = kvm_get_vcpu(vcpu->kvm, cpu_addr);
|
||||
if (!dst_vcpu)
|
||||
return SIGP_CC_NOT_OPERATIONAL;
|
||||
li = &dst_vcpu->arch.local_int;
|
||||
spin_lock(&li->lock);
|
||||
if (li->action_bits & ACTION_STOP_ON_STOP)
|
||||
rc = SIGP_CC_BUSY;
|
||||
spin_unlock(&li->lock);
|
||||
|
||||
return rc;
|
||||
}
|
||||
|
||||
int kvm_s390_handle_sigp(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
int r1 = (vcpu->arch.sie_block->ipa & 0x00f0) >> 4;
|
||||
int r3 = vcpu->arch.sie_block->ipa & 0x000f;
|
||||
u32 parameter;
|
||||
u16 cpu_addr = vcpu->run->s.regs.gprs[r3];
|
||||
u8 order_code;
|
||||
int rc;
|
||||
|
||||
/* sigp in userspace can exit */
|
||||
if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
|
||||
return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
|
||||
|
||||
order_code = kvm_s390_get_base_disp_rs(vcpu);
|
||||
|
||||
if (r1 % 2)
|
||||
parameter = vcpu->run->s.regs.gprs[r1];
|
||||
else
|
||||
parameter = vcpu->run->s.regs.gprs[r1 + 1];
|
||||
|
||||
trace_kvm_s390_handle_sigp(vcpu, order_code, cpu_addr, parameter);
|
||||
switch (order_code) {
|
||||
case SIGP_SENSE:
|
||||
vcpu->stat.instruction_sigp_sense++;
|
||||
rc = __sigp_sense(vcpu, cpu_addr,
|
||||
&vcpu->run->s.regs.gprs[r1]);
|
||||
break;
|
||||
case SIGP_EXTERNAL_CALL:
|
||||
vcpu->stat.instruction_sigp_external_call++;
|
||||
rc = __sigp_external_call(vcpu, cpu_addr);
|
||||
break;
|
||||
case SIGP_EMERGENCY_SIGNAL:
|
||||
vcpu->stat.instruction_sigp_emergency++;
|
||||
rc = __sigp_emergency(vcpu, cpu_addr);
|
||||
break;
|
||||
case SIGP_STOP:
|
||||
vcpu->stat.instruction_sigp_stop++;
|
||||
rc = __sigp_stop(vcpu, cpu_addr, ACTION_STOP_ON_STOP);
|
||||
break;
|
||||
case SIGP_STOP_AND_STORE_STATUS:
|
||||
vcpu->stat.instruction_sigp_stop++;
|
||||
rc = __sigp_stop(vcpu, cpu_addr, ACTION_STORE_ON_STOP |
|
||||
ACTION_STOP_ON_STOP);
|
||||
break;
|
||||
case SIGP_STORE_STATUS_AT_ADDRESS:
|
||||
rc = __sigp_store_status_at_addr(vcpu, cpu_addr, parameter,
|
||||
&vcpu->run->s.regs.gprs[r1]);
|
||||
break;
|
||||
case SIGP_SET_ARCHITECTURE:
|
||||
vcpu->stat.instruction_sigp_arch++;
|
||||
rc = __sigp_set_arch(vcpu, parameter);
|
||||
break;
|
||||
case SIGP_SET_PREFIX:
|
||||
vcpu->stat.instruction_sigp_prefix++;
|
||||
rc = __sigp_set_prefix(vcpu, cpu_addr, parameter,
|
||||
&vcpu->run->s.regs.gprs[r1]);
|
||||
break;
|
||||
case SIGP_COND_EMERGENCY_SIGNAL:
|
||||
rc = __sigp_conditional_emergency(vcpu, cpu_addr, parameter,
|
||||
&vcpu->run->s.regs.gprs[r1]);
|
||||
break;
|
||||
case SIGP_SENSE_RUNNING:
|
||||
vcpu->stat.instruction_sigp_sense_running++;
|
||||
rc = __sigp_sense_running(vcpu, cpu_addr,
|
||||
&vcpu->run->s.regs.gprs[r1]);
|
||||
break;
|
||||
case SIGP_START:
|
||||
rc = sigp_check_callable(vcpu, cpu_addr);
|
||||
if (rc == SIGP_CC_ORDER_CODE_ACCEPTED)
|
||||
rc = -EOPNOTSUPP; /* Handle START in user space */
|
||||
break;
|
||||
case SIGP_RESTART:
|
||||
vcpu->stat.instruction_sigp_restart++;
|
||||
rc = sigp_check_callable(vcpu, cpu_addr);
|
||||
if (rc == SIGP_CC_ORDER_CODE_ACCEPTED) {
|
||||
VCPU_EVENT(vcpu, 4,
|
||||
"sigp restart %x to handle userspace",
|
||||
cpu_addr);
|
||||
/* user space must know about restart */
|
||||
rc = -EOPNOTSUPP;
|
||||
}
|
||||
break;
|
||||
default:
|
||||
return -EOPNOTSUPP;
|
||||
}
|
||||
|
||||
if (rc < 0)
|
||||
return rc;
|
||||
|
||||
kvm_s390_set_psw_cc(vcpu, rc);
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* Handle SIGP partial execution interception.
|
||||
*
|
||||
* This interception will occur at the source cpu when a source cpu sends an
|
||||
* external call to a target cpu and the target cpu has the WAIT bit set in
|
||||
* its cpuflags. Interception will occurr after the interrupt indicator bits at
|
||||
* the target cpu have been set. All error cases will lead to instruction
|
||||
* interception, therefore nothing is to be checked or prepared.
|
||||
*/
|
||||
int kvm_s390_handle_sigp_pei(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
int r3 = vcpu->arch.sie_block->ipa & 0x000f;
|
||||
u16 cpu_addr = vcpu->run->s.regs.gprs[r3];
|
||||
struct kvm_vcpu *dest_vcpu;
|
||||
u8 order_code = kvm_s390_get_base_disp_rs(vcpu);
|
||||
|
||||
trace_kvm_s390_handle_sigp_pei(vcpu, order_code, cpu_addr);
|
||||
|
||||
if (order_code == SIGP_EXTERNAL_CALL) {
|
||||
dest_vcpu = kvm_get_vcpu(vcpu->kvm, cpu_addr);
|
||||
BUG_ON(dest_vcpu == NULL);
|
||||
|
||||
kvm_s390_vcpu_wakeup(dest_vcpu);
|
||||
kvm_s390_set_psw_cc(vcpu, SIGP_CC_ORDER_CODE_ACCEPTED);
|
||||
return 0;
|
||||
}
|
||||
|
||||
return -EOPNOTSUPP;
|
||||
}
|
273
arch/s390/kvm/trace-s390.h
Normal file
273
arch/s390/kvm/trace-s390.h
Normal file
|
@ -0,0 +1,273 @@
|
|||
#if !defined(_TRACE_KVMS390_H) || defined(TRACE_HEADER_MULTI_READ)
|
||||
#define _TRACE_KVMS390_H
|
||||
|
||||
#include <linux/tracepoint.h>
|
||||
|
||||
#undef TRACE_SYSTEM
|
||||
#define TRACE_SYSTEM kvm-s390
|
||||
#define TRACE_INCLUDE_PATH .
|
||||
#undef TRACE_INCLUDE_FILE
|
||||
#define TRACE_INCLUDE_FILE trace-s390
|
||||
|
||||
/*
|
||||
* Trace point for the creation of the kvm instance.
|
||||
*/
|
||||
TRACE_EVENT(kvm_s390_create_vm,
|
||||
TP_PROTO(unsigned long type),
|
||||
TP_ARGS(type),
|
||||
|
||||
TP_STRUCT__entry(
|
||||
__field(unsigned long, type)
|
||||
),
|
||||
|
||||
TP_fast_assign(
|
||||
__entry->type = type;
|
||||
),
|
||||
|
||||
TP_printk("create vm%s",
|
||||
__entry->type & KVM_VM_S390_UCONTROL ? " (UCONTROL)" : "")
|
||||
);
|
||||
|
||||
/*
|
||||
* Trace points for creation and destruction of vpcus.
|
||||
*/
|
||||
TRACE_EVENT(kvm_s390_create_vcpu,
|
||||
TP_PROTO(unsigned int id, struct kvm_vcpu *vcpu,
|
||||
struct kvm_s390_sie_block *sie_block),
|
||||
TP_ARGS(id, vcpu, sie_block),
|
||||
|
||||
TP_STRUCT__entry(
|
||||
__field(unsigned int, id)
|
||||
__field(struct kvm_vcpu *, vcpu)
|
||||
__field(struct kvm_s390_sie_block *, sie_block)
|
||||
),
|
||||
|
||||
TP_fast_assign(
|
||||
__entry->id = id;
|
||||
__entry->vcpu = vcpu;
|
||||
__entry->sie_block = sie_block;
|
||||
),
|
||||
|
||||
TP_printk("create cpu %d at %p, sie block at %p", __entry->id,
|
||||
__entry->vcpu, __entry->sie_block)
|
||||
);
|
||||
|
||||
TRACE_EVENT(kvm_s390_destroy_vcpu,
|
||||
TP_PROTO(unsigned int id),
|
||||
TP_ARGS(id),
|
||||
|
||||
TP_STRUCT__entry(
|
||||
__field(unsigned int, id)
|
||||
),
|
||||
|
||||
TP_fast_assign(
|
||||
__entry->id = id;
|
||||
),
|
||||
|
||||
TP_printk("destroy cpu %d", __entry->id)
|
||||
);
|
||||
|
||||
/*
|
||||
* Trace point for start and stop of vpcus.
|
||||
*/
|
||||
TRACE_EVENT(kvm_s390_vcpu_start_stop,
|
||||
TP_PROTO(unsigned int id, int state),
|
||||
TP_ARGS(id, state),
|
||||
|
||||
TP_STRUCT__entry(
|
||||
__field(unsigned int, id)
|
||||
__field(int, state)
|
||||
),
|
||||
|
||||
TP_fast_assign(
|
||||
__entry->id = id;
|
||||
__entry->state = state;
|
||||
),
|
||||
|
||||
TP_printk("%s cpu %d", __entry->state ? "starting" : "stopping",
|
||||
__entry->id)
|
||||
);
|
||||
|
||||
/*
|
||||
* Trace points for injection of interrupts, either per machine or
|
||||
* per vcpu.
|
||||
*/
|
||||
|
||||
#define kvm_s390_int_type \
|
||||
{KVM_S390_SIGP_STOP, "sigp stop"}, \
|
||||
{KVM_S390_PROGRAM_INT, "program interrupt"}, \
|
||||
{KVM_S390_SIGP_SET_PREFIX, "sigp set prefix"}, \
|
||||
{KVM_S390_RESTART, "sigp restart"}, \
|
||||
{KVM_S390_INT_VIRTIO, "virtio interrupt"}, \
|
||||
{KVM_S390_INT_SERVICE, "sclp interrupt"}, \
|
||||
{KVM_S390_INT_EMERGENCY, "sigp emergency"}, \
|
||||
{KVM_S390_INT_EXTERNAL_CALL, "sigp ext call"}
|
||||
|
||||
TRACE_EVENT(kvm_s390_inject_vm,
|
||||
TP_PROTO(__u64 type, __u32 parm, __u64 parm64, int who),
|
||||
TP_ARGS(type, parm, parm64, who),
|
||||
|
||||
TP_STRUCT__entry(
|
||||
__field(__u32, inttype)
|
||||
__field(__u32, parm)
|
||||
__field(__u64, parm64)
|
||||
__field(int, who)
|
||||
),
|
||||
|
||||
TP_fast_assign(
|
||||
__entry->inttype = type & 0x00000000ffffffff;
|
||||
__entry->parm = parm;
|
||||
__entry->parm64 = parm64;
|
||||
__entry->who = who;
|
||||
),
|
||||
|
||||
TP_printk("inject%s: type:%x (%s) parm:%x parm64:%llx",
|
||||
(__entry->who == 1) ? " (from kernel)" :
|
||||
(__entry->who == 2) ? " (from user)" : "",
|
||||
__entry->inttype,
|
||||
__print_symbolic(__entry->inttype, kvm_s390_int_type),
|
||||
__entry->parm, __entry->parm64)
|
||||
);
|
||||
|
||||
TRACE_EVENT(kvm_s390_inject_vcpu,
|
||||
TP_PROTO(unsigned int id, __u64 type, __u32 parm, __u64 parm64, \
|
||||
int who),
|
||||
TP_ARGS(id, type, parm, parm64, who),
|
||||
|
||||
TP_STRUCT__entry(
|
||||
__field(int, id)
|
||||
__field(__u32, inttype)
|
||||
__field(__u32, parm)
|
||||
__field(__u64, parm64)
|
||||
__field(int, who)
|
||||
),
|
||||
|
||||
TP_fast_assign(
|
||||
__entry->id = id;
|
||||
__entry->inttype = type & 0x00000000ffffffff;
|
||||
__entry->parm = parm;
|
||||
__entry->parm64 = parm64;
|
||||
__entry->who = who;
|
||||
),
|
||||
|
||||
TP_printk("inject%s (vcpu %d): type:%x (%s) parm:%x parm64:%llx",
|
||||
(__entry->who == 1) ? " (from kernel)" :
|
||||
(__entry->who == 2) ? " (from user)" : "",
|
||||
__entry->id, __entry->inttype,
|
||||
__print_symbolic(__entry->inttype, kvm_s390_int_type),
|
||||
__entry->parm, __entry->parm64)
|
||||
);
|
||||
|
||||
/*
|
||||
* Trace point for the actual delivery of interrupts.
|
||||
*/
|
||||
TRACE_EVENT(kvm_s390_deliver_interrupt,
|
||||
TP_PROTO(unsigned int id, __u64 type, __u64 data0, __u64 data1),
|
||||
TP_ARGS(id, type, data0, data1),
|
||||
|
||||
TP_STRUCT__entry(
|
||||
__field(int, id)
|
||||
__field(__u32, inttype)
|
||||
__field(__u64, data0)
|
||||
__field(__u64, data1)
|
||||
),
|
||||
|
||||
TP_fast_assign(
|
||||
__entry->id = id;
|
||||
__entry->inttype = type & 0x00000000ffffffff;
|
||||
__entry->data0 = data0;
|
||||
__entry->data1 = data1;
|
||||
),
|
||||
|
||||
TP_printk("deliver interrupt (vcpu %d): type:%x (%s) " \
|
||||
"data:%08llx %016llx",
|
||||
__entry->id, __entry->inttype,
|
||||
__print_symbolic(__entry->inttype, kvm_s390_int_type),
|
||||
__entry->data0, __entry->data1)
|
||||
);
|
||||
|
||||
/*
|
||||
* Trace point for resets that may be requested from userspace.
|
||||
*/
|
||||
TRACE_EVENT(kvm_s390_request_resets,
|
||||
TP_PROTO(__u64 resets),
|
||||
TP_ARGS(resets),
|
||||
|
||||
TP_STRUCT__entry(
|
||||
__field(__u64, resets)
|
||||
),
|
||||
|
||||
TP_fast_assign(
|
||||
__entry->resets = resets;
|
||||
),
|
||||
|
||||
TP_printk("requesting userspace resets %llx",
|
||||
__entry->resets)
|
||||
);
|
||||
|
||||
/*
|
||||
* Trace point for a vcpu's stop requests.
|
||||
*/
|
||||
TRACE_EVENT(kvm_s390_stop_request,
|
||||
TP_PROTO(unsigned int action_bits),
|
||||
TP_ARGS(action_bits),
|
||||
|
||||
TP_STRUCT__entry(
|
||||
__field(unsigned int, action_bits)
|
||||
),
|
||||
|
||||
TP_fast_assign(
|
||||
__entry->action_bits = action_bits;
|
||||
),
|
||||
|
||||
TP_printk("stop request, action_bits = %08x",
|
||||
__entry->action_bits)
|
||||
);
|
||||
|
||||
|
||||
/*
|
||||
* Trace point for enabling channel I/O instruction support.
|
||||
*/
|
||||
TRACE_EVENT(kvm_s390_enable_css,
|
||||
TP_PROTO(void *kvm),
|
||||
TP_ARGS(kvm),
|
||||
|
||||
TP_STRUCT__entry(
|
||||
__field(void *, kvm)
|
||||
),
|
||||
|
||||
TP_fast_assign(
|
||||
__entry->kvm = kvm;
|
||||
),
|
||||
|
||||
TP_printk("enabling channel I/O support (kvm @ %p)\n",
|
||||
__entry->kvm)
|
||||
);
|
||||
|
||||
/*
|
||||
* Trace point for enabling and disabling interlocking-and-broadcasting
|
||||
* suppression.
|
||||
*/
|
||||
TRACE_EVENT(kvm_s390_enable_disable_ibs,
|
||||
TP_PROTO(unsigned int id, int state),
|
||||
TP_ARGS(id, state),
|
||||
|
||||
TP_STRUCT__entry(
|
||||
__field(unsigned int, id)
|
||||
__field(int, state)
|
||||
),
|
||||
|
||||
TP_fast_assign(
|
||||
__entry->id = id;
|
||||
__entry->state = state;
|
||||
),
|
||||
|
||||
TP_printk("%s ibs on cpu %d",
|
||||
__entry->state ? "enabling" : "disabling", __entry->id)
|
||||
);
|
||||
|
||||
|
||||
#endif /* _TRACE_KVMS390_H */
|
||||
|
||||
/* This part must be outside protection */
|
||||
#include <trace/define_trace.h>
|
418
arch/s390/kvm/trace.h
Normal file
418
arch/s390/kvm/trace.h
Normal file
|
@ -0,0 +1,418 @@
|
|||
#if !defined(_TRACE_KVM_H) || defined(TRACE_HEADER_MULTI_READ)
|
||||
#define _TRACE_KVM_H
|
||||
|
||||
#include <linux/tracepoint.h>
|
||||
#include <asm/sie.h>
|
||||
#include <asm/debug.h>
|
||||
#include <asm/dis.h>
|
||||
|
||||
#undef TRACE_SYSTEM
|
||||
#define TRACE_SYSTEM kvm
|
||||
#define TRACE_INCLUDE_PATH .
|
||||
#undef TRACE_INCLUDE_FILE
|
||||
#define TRACE_INCLUDE_FILE trace
|
||||
|
||||
/*
|
||||
* Helpers for vcpu-specific tracepoints containing the same information
|
||||
* as s390dbf VCPU_EVENTs.
|
||||
*/
|
||||
#define VCPU_PROTO_COMMON struct kvm_vcpu *vcpu
|
||||
#define VCPU_ARGS_COMMON vcpu
|
||||
#define VCPU_FIELD_COMMON __field(int, id) \
|
||||
__field(unsigned long, pswmask) \
|
||||
__field(unsigned long, pswaddr)
|
||||
#define VCPU_ASSIGN_COMMON do { \
|
||||
__entry->id = vcpu->vcpu_id; \
|
||||
__entry->pswmask = vcpu->arch.sie_block->gpsw.mask; \
|
||||
__entry->pswaddr = vcpu->arch.sie_block->gpsw.addr; \
|
||||
} while (0);
|
||||
#define VCPU_TP_PRINTK(p_str, p_args...) \
|
||||
TP_printk("%02d[%016lx-%016lx]: " p_str, __entry->id, \
|
||||
__entry->pswmask, __entry->pswaddr, p_args)
|
||||
|
||||
TRACE_EVENT(kvm_s390_skey_related_inst,
|
||||
TP_PROTO(VCPU_PROTO_COMMON),
|
||||
TP_ARGS(VCPU_ARGS_COMMON),
|
||||
|
||||
TP_STRUCT__entry(
|
||||
VCPU_FIELD_COMMON
|
||||
),
|
||||
|
||||
TP_fast_assign(
|
||||
VCPU_ASSIGN_COMMON
|
||||
),
|
||||
VCPU_TP_PRINTK("%s", "first instruction related to skeys on vcpu")
|
||||
);
|
||||
|
||||
TRACE_EVENT(kvm_s390_major_guest_pfault,
|
||||
TP_PROTO(VCPU_PROTO_COMMON),
|
||||
TP_ARGS(VCPU_ARGS_COMMON),
|
||||
|
||||
TP_STRUCT__entry(
|
||||
VCPU_FIELD_COMMON
|
||||
),
|
||||
|
||||
TP_fast_assign(
|
||||
VCPU_ASSIGN_COMMON
|
||||
),
|
||||
VCPU_TP_PRINTK("%s", "major fault, maybe applicable for pfault")
|
||||
);
|
||||
|
||||
TRACE_EVENT(kvm_s390_pfault_init,
|
||||
TP_PROTO(VCPU_PROTO_COMMON, long pfault_token),
|
||||
TP_ARGS(VCPU_ARGS_COMMON, pfault_token),
|
||||
|
||||
TP_STRUCT__entry(
|
||||
VCPU_FIELD_COMMON
|
||||
__field(long, pfault_token)
|
||||
),
|
||||
|
||||
TP_fast_assign(
|
||||
VCPU_ASSIGN_COMMON
|
||||
__entry->pfault_token = pfault_token;
|
||||
),
|
||||
VCPU_TP_PRINTK("init pfault token %ld", __entry->pfault_token)
|
||||
);
|
||||
|
||||
TRACE_EVENT(kvm_s390_pfault_done,
|
||||
TP_PROTO(VCPU_PROTO_COMMON, long pfault_token),
|
||||
TP_ARGS(VCPU_ARGS_COMMON, pfault_token),
|
||||
|
||||
TP_STRUCT__entry(
|
||||
VCPU_FIELD_COMMON
|
||||
__field(long, pfault_token)
|
||||
),
|
||||
|
||||
TP_fast_assign(
|
||||
VCPU_ASSIGN_COMMON
|
||||
__entry->pfault_token = pfault_token;
|
||||
),
|
||||
VCPU_TP_PRINTK("done pfault token %ld", __entry->pfault_token)
|
||||
);
|
||||
|
||||
/*
|
||||
* Tracepoints for SIE entry and exit.
|
||||
*/
|
||||
TRACE_EVENT(kvm_s390_sie_enter,
|
||||
TP_PROTO(VCPU_PROTO_COMMON, int cpuflags),
|
||||
TP_ARGS(VCPU_ARGS_COMMON, cpuflags),
|
||||
|
||||
TP_STRUCT__entry(
|
||||
VCPU_FIELD_COMMON
|
||||
__field(int, cpuflags)
|
||||
),
|
||||
|
||||
TP_fast_assign(
|
||||
VCPU_ASSIGN_COMMON
|
||||
__entry->cpuflags = cpuflags;
|
||||
),
|
||||
|
||||
VCPU_TP_PRINTK("entering sie flags %x", __entry->cpuflags)
|
||||
);
|
||||
|
||||
TRACE_EVENT(kvm_s390_sie_fault,
|
||||
TP_PROTO(VCPU_PROTO_COMMON),
|
||||
TP_ARGS(VCPU_ARGS_COMMON),
|
||||
|
||||
TP_STRUCT__entry(
|
||||
VCPU_FIELD_COMMON
|
||||
),
|
||||
|
||||
TP_fast_assign(
|
||||
VCPU_ASSIGN_COMMON
|
||||
),
|
||||
|
||||
VCPU_TP_PRINTK("%s", "fault in sie instruction")
|
||||
);
|
||||
|
||||
TRACE_EVENT(kvm_s390_sie_exit,
|
||||
TP_PROTO(VCPU_PROTO_COMMON, u8 icptcode),
|
||||
TP_ARGS(VCPU_ARGS_COMMON, icptcode),
|
||||
|
||||
TP_STRUCT__entry(
|
||||
VCPU_FIELD_COMMON
|
||||
__field(u8, icptcode)
|
||||
),
|
||||
|
||||
TP_fast_assign(
|
||||
VCPU_ASSIGN_COMMON
|
||||
__entry->icptcode = icptcode;
|
||||
),
|
||||
|
||||
VCPU_TP_PRINTK("exit sie icptcode %d (%s)", __entry->icptcode,
|
||||
__print_symbolic(__entry->icptcode,
|
||||
sie_intercept_code))
|
||||
);
|
||||
|
||||
/*
|
||||
* Trace point for intercepted instructions.
|
||||
*/
|
||||
TRACE_EVENT(kvm_s390_intercept_instruction,
|
||||
TP_PROTO(VCPU_PROTO_COMMON, __u16 ipa, __u32 ipb),
|
||||
TP_ARGS(VCPU_ARGS_COMMON, ipa, ipb),
|
||||
|
||||
TP_STRUCT__entry(
|
||||
VCPU_FIELD_COMMON
|
||||
__field(__u64, instruction)
|
||||
),
|
||||
|
||||
TP_fast_assign(
|
||||
VCPU_ASSIGN_COMMON
|
||||
__entry->instruction = ((__u64)ipa << 48) |
|
||||
((__u64)ipb << 16);
|
||||
),
|
||||
|
||||
VCPU_TP_PRINTK("intercepted instruction %016llx (%s)",
|
||||
__entry->instruction,
|
||||
__print_symbolic(icpt_insn_decoder(__entry->instruction),
|
||||
icpt_insn_codes))
|
||||
);
|
||||
|
||||
/*
|
||||
* Trace point for intercepted program interruptions.
|
||||
*/
|
||||
TRACE_EVENT(kvm_s390_intercept_prog,
|
||||
TP_PROTO(VCPU_PROTO_COMMON, __u16 code),
|
||||
TP_ARGS(VCPU_ARGS_COMMON, code),
|
||||
|
||||
TP_STRUCT__entry(
|
||||
VCPU_FIELD_COMMON
|
||||
__field(__u16, code)
|
||||
),
|
||||
|
||||
TP_fast_assign(
|
||||
VCPU_ASSIGN_COMMON
|
||||
__entry->code = code;
|
||||
),
|
||||
|
||||
VCPU_TP_PRINTK("intercepted program interruption %04x",
|
||||
__entry->code)
|
||||
);
|
||||
|
||||
/*
|
||||
* Trace point for validity intercepts.
|
||||
*/
|
||||
TRACE_EVENT(kvm_s390_intercept_validity,
|
||||
TP_PROTO(VCPU_PROTO_COMMON, __u16 viwhy),
|
||||
TP_ARGS(VCPU_ARGS_COMMON, viwhy),
|
||||
|
||||
TP_STRUCT__entry(
|
||||
VCPU_FIELD_COMMON
|
||||
__field(__u16, viwhy)
|
||||
),
|
||||
|
||||
TP_fast_assign(
|
||||
VCPU_ASSIGN_COMMON
|
||||
__entry->viwhy = viwhy;
|
||||
),
|
||||
|
||||
VCPU_TP_PRINTK("got validity intercept %04x", __entry->viwhy)
|
||||
);
|
||||
|
||||
/*
|
||||
* Trace points for instructions that are of special interest.
|
||||
*/
|
||||
|
||||
TRACE_EVENT(kvm_s390_handle_sigp,
|
||||
TP_PROTO(VCPU_PROTO_COMMON, __u8 order_code, __u16 cpu_addr, \
|
||||
__u32 parameter),
|
||||
TP_ARGS(VCPU_ARGS_COMMON, order_code, cpu_addr, parameter),
|
||||
|
||||
TP_STRUCT__entry(
|
||||
VCPU_FIELD_COMMON
|
||||
__field(__u8, order_code)
|
||||
__field(__u16, cpu_addr)
|
||||
__field(__u32, parameter)
|
||||
),
|
||||
|
||||
TP_fast_assign(
|
||||
VCPU_ASSIGN_COMMON
|
||||
__entry->order_code = order_code;
|
||||
__entry->cpu_addr = cpu_addr;
|
||||
__entry->parameter = parameter;
|
||||
),
|
||||
|
||||
VCPU_TP_PRINTK("handle sigp order %02x (%s), cpu address %04x, " \
|
||||
"parameter %08x", __entry->order_code,
|
||||
__print_symbolic(__entry->order_code,
|
||||
sigp_order_codes),
|
||||
__entry->cpu_addr, __entry->parameter)
|
||||
);
|
||||
|
||||
TRACE_EVENT(kvm_s390_handle_sigp_pei,
|
||||
TP_PROTO(VCPU_PROTO_COMMON, __u8 order_code, __u16 cpu_addr),
|
||||
TP_ARGS(VCPU_ARGS_COMMON, order_code, cpu_addr),
|
||||
|
||||
TP_STRUCT__entry(
|
||||
VCPU_FIELD_COMMON
|
||||
__field(__u8, order_code)
|
||||
__field(__u16, cpu_addr)
|
||||
),
|
||||
|
||||
TP_fast_assign(
|
||||
VCPU_ASSIGN_COMMON
|
||||
__entry->order_code = order_code;
|
||||
__entry->cpu_addr = cpu_addr;
|
||||
),
|
||||
|
||||
VCPU_TP_PRINTK("handle sigp pei order %02x (%s), cpu address %04x",
|
||||
__entry->order_code,
|
||||
__print_symbolic(__entry->order_code,
|
||||
sigp_order_codes),
|
||||
__entry->cpu_addr)
|
||||
);
|
||||
|
||||
TRACE_EVENT(kvm_s390_handle_diag,
|
||||
TP_PROTO(VCPU_PROTO_COMMON, __u16 code),
|
||||
TP_ARGS(VCPU_ARGS_COMMON, code),
|
||||
|
||||
TP_STRUCT__entry(
|
||||
VCPU_FIELD_COMMON
|
||||
__field(__u16, code)
|
||||
),
|
||||
|
||||
TP_fast_assign(
|
||||
VCPU_ASSIGN_COMMON
|
||||
__entry->code = code;
|
||||
),
|
||||
|
||||
VCPU_TP_PRINTK("handle diagnose call %04x (%s)", __entry->code,
|
||||
__print_symbolic(__entry->code, diagnose_codes))
|
||||
);
|
||||
|
||||
TRACE_EVENT(kvm_s390_handle_lctl,
|
||||
TP_PROTO(VCPU_PROTO_COMMON, int g, int reg1, int reg3, u64 addr),
|
||||
TP_ARGS(VCPU_ARGS_COMMON, g, reg1, reg3, addr),
|
||||
|
||||
TP_STRUCT__entry(
|
||||
VCPU_FIELD_COMMON
|
||||
__field(int, g)
|
||||
__field(int, reg1)
|
||||
__field(int, reg3)
|
||||
__field(u64, addr)
|
||||
),
|
||||
|
||||
TP_fast_assign(
|
||||
VCPU_ASSIGN_COMMON
|
||||
__entry->g = g;
|
||||
__entry->reg1 = reg1;
|
||||
__entry->reg3 = reg3;
|
||||
__entry->addr = addr;
|
||||
),
|
||||
|
||||
VCPU_TP_PRINTK("%s: loading cr %x-%x from %016llx",
|
||||
__entry->g ? "lctlg" : "lctl",
|
||||
__entry->reg1, __entry->reg3, __entry->addr)
|
||||
);
|
||||
|
||||
TRACE_EVENT(kvm_s390_handle_stctl,
|
||||
TP_PROTO(VCPU_PROTO_COMMON, int g, int reg1, int reg3, u64 addr),
|
||||
TP_ARGS(VCPU_ARGS_COMMON, g, reg1, reg3, addr),
|
||||
|
||||
TP_STRUCT__entry(
|
||||
VCPU_FIELD_COMMON
|
||||
__field(int, g)
|
||||
__field(int, reg1)
|
||||
__field(int, reg3)
|
||||
__field(u64, addr)
|
||||
),
|
||||
|
||||
TP_fast_assign(
|
||||
VCPU_ASSIGN_COMMON
|
||||
__entry->g = g;
|
||||
__entry->reg1 = reg1;
|
||||
__entry->reg3 = reg3;
|
||||
__entry->addr = addr;
|
||||
),
|
||||
|
||||
VCPU_TP_PRINTK("%s: storing cr %x-%x to %016llx",
|
||||
__entry->g ? "stctg" : "stctl",
|
||||
__entry->reg1, __entry->reg3, __entry->addr)
|
||||
);
|
||||
|
||||
TRACE_EVENT(kvm_s390_handle_prefix,
|
||||
TP_PROTO(VCPU_PROTO_COMMON, int set, u32 address),
|
||||
TP_ARGS(VCPU_ARGS_COMMON, set, address),
|
||||
|
||||
TP_STRUCT__entry(
|
||||
VCPU_FIELD_COMMON
|
||||
__field(int, set)
|
||||
__field(u32, address)
|
||||
),
|
||||
|
||||
TP_fast_assign(
|
||||
VCPU_ASSIGN_COMMON
|
||||
__entry->set = set;
|
||||
__entry->address = address;
|
||||
),
|
||||
|
||||
VCPU_TP_PRINTK("%s prefix to %08x",
|
||||
__entry->set ? "setting" : "storing",
|
||||
__entry->address)
|
||||
);
|
||||
|
||||
TRACE_EVENT(kvm_s390_handle_stap,
|
||||
TP_PROTO(VCPU_PROTO_COMMON, u64 address),
|
||||
TP_ARGS(VCPU_ARGS_COMMON, address),
|
||||
|
||||
TP_STRUCT__entry(
|
||||
VCPU_FIELD_COMMON
|
||||
__field(u64, address)
|
||||
),
|
||||
|
||||
TP_fast_assign(
|
||||
VCPU_ASSIGN_COMMON
|
||||
__entry->address = address;
|
||||
),
|
||||
|
||||
VCPU_TP_PRINTK("storing cpu address to %016llx",
|
||||
__entry->address)
|
||||
);
|
||||
|
||||
TRACE_EVENT(kvm_s390_handle_stfl,
|
||||
TP_PROTO(VCPU_PROTO_COMMON, unsigned int facility_list),
|
||||
TP_ARGS(VCPU_ARGS_COMMON, facility_list),
|
||||
|
||||
TP_STRUCT__entry(
|
||||
VCPU_FIELD_COMMON
|
||||
__field(unsigned int, facility_list)
|
||||
),
|
||||
|
||||
TP_fast_assign(
|
||||
VCPU_ASSIGN_COMMON
|
||||
__entry->facility_list = facility_list;
|
||||
),
|
||||
|
||||
VCPU_TP_PRINTK("store facility list value %08x",
|
||||
__entry->facility_list)
|
||||
);
|
||||
|
||||
TRACE_EVENT(kvm_s390_handle_stsi,
|
||||
TP_PROTO(VCPU_PROTO_COMMON, int fc, int sel1, int sel2, u64 addr),
|
||||
TP_ARGS(VCPU_ARGS_COMMON, fc, sel1, sel2, addr),
|
||||
|
||||
TP_STRUCT__entry(
|
||||
VCPU_FIELD_COMMON
|
||||
__field(int, fc)
|
||||
__field(int, sel1)
|
||||
__field(int, sel2)
|
||||
__field(u64, addr)
|
||||
),
|
||||
|
||||
TP_fast_assign(
|
||||
VCPU_ASSIGN_COMMON
|
||||
__entry->fc = fc;
|
||||
__entry->sel1 = sel1;
|
||||
__entry->sel2 = sel2;
|
||||
__entry->addr = addr;
|
||||
),
|
||||
|
||||
VCPU_TP_PRINTK("STSI %d.%d.%d information stored to %016llx",
|
||||
__entry->fc, __entry->sel1, __entry->sel2,
|
||||
__entry->addr)
|
||||
);
|
||||
|
||||
#endif /* _TRACE_KVM_H */
|
||||
|
||||
/* This part must be outside protection */
|
||||
#include <trace/define_trace.h>
|
Loading…
Add table
Add a link
Reference in a new issue