ANDROID: KVM: arm64: Use PSCI MEM_PROTECT to zap guest pages on reset

If a malicious/compromised host issues a PSCI SYSTEM_RESET call in the
presence of guest-owned pages then the contents of those pages may be
susceptible to cold-reboot attacks.

Use the PSCI MEM_PROTECT call to ensure that volatile memory is wiped by
the firmware if a SYSTEM_RESET occurs while unpoisoned guest pages exist
in the system. Since this call does not offer protection for a "warm"
reset initiated by SYSTEM_RESET2, detect this case in the PSCI relay and
repaint the call to a standard SYSTEM_RESET instead.

Bug: 196204410
Signed-off-by: Will Deacon <willdeacon@google.com>
Change-Id: I5c3dd93bc83ebcd0b6cea2ec734f6e3a77f0064e
This commit is contained in:
Will Deacon
2022-03-25 15:01:38 +00:00
parent 40493bc91e
commit ba73e0b827
4 changed files with 59 additions and 4 deletions

View File

@@ -91,6 +91,9 @@ int refill_memcache(struct kvm_hyp_memcache *mc, unsigned long min_pages,
struct kvm_hyp_memcache *host_mc);
void reclaim_guest_pages(struct kvm_shadow_vm *vm, struct kvm_hyp_memcache *mc);
void psci_mem_protect_inc(void);
void psci_mem_protect_dec(void);
static __always_inline void __load_host_stage2(void)
{
if (static_branch_likely(&kvm_protected_mode_initialized))

View File

@@ -1073,10 +1073,14 @@ static int guest_complete_donation(u64 addr, const struct pkvm_mem_transition *t
u64 size = tx->nr_pages * PAGE_SIZE;
int err;
if (tx->initiator.id == PKVM_ID_HOST && ipa_in_pvmfw_region(vm, addr)) {
err = pkvm_load_pvmfw_pages(vm, addr, phys, size);
if (err)
return err;
if (tx->initiator.id == PKVM_ID_HOST) {
psci_mem_protect_inc();
if (ipa_in_pvmfw_region(vm, addr)) {
err = pkvm_load_pvmfw_pages(vm, addr, phys, size);
if (err)
return err;
}
}
return kvm_pgtable_stage2_map(&vm->pgt, addr, size, phys, prot,
@@ -1893,6 +1897,7 @@ int __pkvm_host_reclaim_page(u64 pfn)
if (ret)
goto unlock;
page->flags &= ~HOST_PAGE_NEED_POISONING;
psci_mem_protect_dec();
}
ret = host_stage2_set_owner_locked(addr, PAGE_SIZE, pkvm_host_id);

View File

@@ -222,6 +222,44 @@ asmlinkage void __noreturn kvm_host_psci_cpu_entry(bool is_cpu_on)
__host_enter(host_ctxt);
}
static DEFINE_HYP_SPINLOCK(mem_protect_lock);
static u64 psci_mem_protect(s64 offset)
{
static u64 cnt;
u64 new = cnt + offset;
hyp_assert_lock_held(&mem_protect_lock);
if (!offset || kvm_host_psci_config.version < PSCI_VERSION(1, 1))
return cnt;
if (!cnt || !new)
psci_call(PSCI_1_1_FN64_MEM_PROTECT, offset < 0 ? 0 : 1, 0, 0);
cnt = new;
return cnt;
}
static bool psci_mem_protect_active(void)
{
return psci_mem_protect(0);
}
void psci_mem_protect_inc(void)
{
hyp_spin_lock(&mem_protect_lock);
psci_mem_protect(1);
hyp_spin_unlock(&mem_protect_lock);
}
void psci_mem_protect_dec(void)
{
hyp_spin_lock(&mem_protect_lock);
psci_mem_protect(-1);
hyp_spin_unlock(&mem_protect_lock);
}
static unsigned long psci_0_1_handler(u64 func_id, struct kvm_cpu_context *host_ctxt)
{
if (is_psci_0_1(cpu_off, func_id) || is_psci_0_1(migrate, func_id))
@@ -251,6 +289,8 @@ static unsigned long psci_0_2_handler(u64 func_id, struct kvm_cpu_context *host_
case PSCI_0_2_FN_SYSTEM_OFF:
case PSCI_0_2_FN_SYSTEM_RESET:
pkvm_clear_pvmfw_pages();
/* Avoid racing with a MEM_PROTECT call. */
hyp_spin_lock(&mem_protect_lock);
return psci_forward(host_ctxt);
case PSCI_0_2_FN64_CPU_SUSPEND:
return psci_cpu_suspend(func_id, host_ctxt);
@@ -266,6 +306,11 @@ static unsigned long psci_1_0_handler(u64 func_id, struct kvm_cpu_context *host_
switch (func_id) {
case PSCI_1_1_FN64_SYSTEM_RESET2:
pkvm_clear_pvmfw_pages();
hyp_spin_lock(&mem_protect_lock);
if (psci_mem_protect_active()) {
return psci_0_2_handler(PSCI_0_2_FN_SYSTEM_RESET,
host_ctxt);
}
fallthrough;
case PSCI_1_0_FN_PSCI_FEATURES:
case PSCI_1_0_FN_SET_SUSPEND_MODE:

View File

@@ -55,6 +55,8 @@
#define PSCI_1_0_FN64_SYSTEM_SUSPEND PSCI_0_2_FN64(14)
#define PSCI_1_1_FN64_SYSTEM_RESET2 PSCI_0_2_FN64(18)
#define PSCI_1_1_FN64_MEM_PROTECT PSCI_0_2_FN64(19)
/* PSCI v0.2 power state encoding for CPU_SUSPEND function */
#define PSCI_0_2_POWER_STATE_ID_MASK 0xffff
#define PSCI_0_2_POWER_STATE_ID_SHIFT 0