ANDROID: KVM: arm64: Refactor enter_exception64()

In order to simplify the injection of exceptions in the host in pkvm
context, let's factor out of enter_exception64() the code calculating
the exception offset from VBAR_EL1 and the cpsr.

Signed-off-by: Quentin Perret <qperret@google.com>
Bug: 215520143
Change-Id: I97b2431a79fdec87c95c2d1f691bd3a11635c29b
This commit is contained in:
Quentin Perret
2022-03-22 14:33:37 +00:00
parent 91c32ff1fe
commit e7b80adac2
2 changed files with 57 additions and 37 deletions

View File

@@ -41,6 +41,11 @@ void kvm_inject_vabt(struct kvm_vcpu *vcpu);
void kvm_inject_dabt(struct kvm_vcpu *vcpu, unsigned long addr);
void kvm_inject_pabt(struct kvm_vcpu *vcpu, unsigned long addr);
unsigned long get_except64_offset(unsigned long psr, unsigned long target_mode,
enum exception_type type);
unsigned long get_except64_cpsr(unsigned long old, bool has_mte,
unsigned long sctlr, unsigned long mode);
static inline int kvm_vcpu_enable_ptrauth(struct kvm_vcpu *vcpu)
{
/*

View File

@@ -60,12 +60,25 @@ static void __vcpu_write_spsr_und(struct kvm_vcpu *vcpu, u64 val)
vcpu->arch.ctxt.spsr_und = val;
}
unsigned long get_except64_offset(unsigned long psr, unsigned long target_mode,
enum exception_type type)
{
u64 mode = psr & (PSR_MODE_MASK | PSR_MODE32_BIT);
u64 exc_offset;
if (mode == target_mode)
exc_offset = CURRENT_EL_SP_ELx_VECTOR;
else if ((mode | PSR_MODE_THREAD_BIT) == target_mode)
exc_offset = CURRENT_EL_SP_EL0_VECTOR;
else if (!(mode & PSR_MODE32_BIT))
exc_offset = LOWER_EL_AArch64_VECTOR;
else
exc_offset = LOWER_EL_AArch32_VECTOR;
return exc_offset + type;
}
/*
* This performs the exception entry at a given EL (@target_mode), stashing PC
* and PSTATE into ELR and SPSR respectively, and compute the new PC/PSTATE.
* The EL passed to this function *must* be a non-secure, privileged mode with
* bit 0 being set (PSTATE.SP == 1).
*
* When an exception is taken, most PSTATE fields are left unchanged in the
* handler. However, some are explicitly overridden (e.g. M[4:0]). Luckily all
* of the inherited bits have the same position in the AArch64/AArch32 SPSR_ELx
@@ -77,45 +90,17 @@ static void __vcpu_write_spsr_und(struct kvm_vcpu *vcpu, u64 val)
* Here we manipulate the fields in order of the AArch64 SPSR_ELx layout, from
* MSB to LSB.
*/
static void enter_exception64(struct kvm_vcpu *vcpu, unsigned long target_mode,
enum exception_type type)
unsigned long get_except64_cpsr(unsigned long old, bool has_mte,
unsigned long sctlr, unsigned long target_mode)
{
unsigned long sctlr, vbar, old, new, mode;
u64 exc_offset;
mode = *vcpu_cpsr(vcpu) & (PSR_MODE_MASK | PSR_MODE32_BIT);
if (mode == target_mode)
exc_offset = CURRENT_EL_SP_ELx_VECTOR;
else if ((mode | PSR_MODE_THREAD_BIT) == target_mode)
exc_offset = CURRENT_EL_SP_EL0_VECTOR;
else if (!(mode & PSR_MODE32_BIT))
exc_offset = LOWER_EL_AArch64_VECTOR;
else
exc_offset = LOWER_EL_AArch32_VECTOR;
switch (target_mode) {
case PSR_MODE_EL1h:
vbar = __vcpu_read_sys_reg(vcpu, VBAR_EL1);
sctlr = __vcpu_read_sys_reg(vcpu, SCTLR_EL1);
__vcpu_write_sys_reg(vcpu, *vcpu_pc(vcpu), ELR_EL1);
break;
default:
/* Don't do that */
BUG();
}
*vcpu_pc(vcpu) = vbar + exc_offset + type;
old = *vcpu_cpsr(vcpu);
new = 0;
u64 new = 0;
new |= (old & PSR_N_BIT);
new |= (old & PSR_Z_BIT);
new |= (old & PSR_C_BIT);
new |= (old & PSR_V_BIT);
if (kvm_has_mte(vcpu->kvm))
if (has_mte)
new |= PSR_TCO_BIT;
new |= (old & PSR_DIT_BIT);
@@ -151,6 +136,36 @@ static void enter_exception64(struct kvm_vcpu *vcpu, unsigned long target_mode,
new |= target_mode;
return new;
}
/*
* This performs the exception entry at a given EL (@target_mode), stashing PC
* and PSTATE into ELR and SPSR respectively, and compute the new PC/PSTATE.
* The EL passed to this function *must* be a non-secure, privileged mode with
* bit 0 being set (PSTATE.SP == 1).
*/
static void enter_exception64(struct kvm_vcpu *vcpu, unsigned long target_mode,
enum exception_type type)
{
u64 offset = get_except64_offset(*vcpu_cpsr(vcpu), target_mode, type);
unsigned long sctlr, vbar, old, new;
switch (target_mode) {
case PSR_MODE_EL1h:
vbar = __vcpu_read_sys_reg(vcpu, VBAR_EL1);
sctlr = __vcpu_read_sys_reg(vcpu, SCTLR_EL1);
__vcpu_write_sys_reg(vcpu, *vcpu_pc(vcpu), ELR_EL1);
break;
default:
/* Don't do that */
BUG();
}
*vcpu_pc(vcpu) = vbar + offset;
old = *vcpu_cpsr(vcpu);
new = get_except64_cpsr(old, kvm_has_mte(vcpu->kvm), sctlr, target_mode);
*vcpu_cpsr(vcpu) = new;
__vcpu_write_spsr(vcpu, old);
}