diff --git a/mm/memory.c b/mm/memory.c index 1813ed8c3805..746fdba467df 100644 --- a/mm/memory.c +++ b/mm/memory.c @@ -3720,16 +3720,26 @@ vm_fault_t do_swap_page(struct vm_fault *vmf) void *shadow = NULL; if (vmf->flags & FAULT_FLAG_SPECULATIVE) { + /* ksm_might_need_to_copy() needs a stable VMA, spf can't be used */ +#ifdef CONFIG_KSM pte_unmap(vmf->pte); count_vm_spf_event(SPF_ABORT_SWAP); return VM_FAULT_RETRY; +#endif } - if (!pte_unmap_same(vma->vm_mm, vmf->pmd, vmf->pte, vmf->orig_pte)) + if (!pte_unmap_same(vma->vm_mm, vmf->pmd, vmf->pte, vmf->orig_pte)) { + if (vmf->flags & FAULT_FLAG_SPECULATIVE) + ret = VM_FAULT_RETRY; goto out; + } entry = pte_to_swp_entry(vmf->orig_pte); if (unlikely(non_swap_entry(entry))) { + if (vmf->flags & FAULT_FLAG_SPECULATIVE) { + ret = VM_FAULT_RETRY; + goto out; + } if (is_migration_entry(entry)) { migration_entry_wait(vma->vm_mm, vmf->pmd, vmf->address); @@ -3787,6 +3797,17 @@ vm_fault_t do_swap_page(struct vm_fault *vmf) swap_readpage(page, true); set_page_private(page, 0); } + } else if (vmf->flags & FAULT_FLAG_SPECULATIVE) { + /* + * Don't try readahead during a speculative page fault + * as the VMA's boundaries may change in our back. + * If the page is not in the swap cache and synchronous + * read is disabled, fall back to the regular page fault + * mechanism. + */ + delayacct_clear_flag(current, DELAYACCT_PF_SWAPIN); + ret = VM_FAULT_RETRY; + goto out; } else { page = swapin_readahead(entry, GFP_HIGHUSER_MOVABLE | __GFP_CMA,