mirror of
https://github.com/hardkernel/linux.git
synced 2026-03-24 19:40:21 +09:00
[ Upstream commit 09cda0a400519b1541591c506e54c9c48e3101bf ] If the cmma no-dat feature is available all pages that are not used for dynamic address translation are marked as "no-dat" with the ESSA instruction. This information is visible to the hypervisor, so that the hypervisor can optimize purging of guest TLB entries. This also means that pages which are used for dynamic address translation must not be marked as "no-dat", since the hypervisor may then incorrectly not purge guest TLB entries. Region and segment tables allocated via vmem_crst_alloc() are incorrectly marked as "no-dat", as soon as slab_is_available() returns true. Such tables are allocated e.g. when kernel page tables are split, memory is hotplugged, or a DCSS segment is loaded. Fix this by adding the missing arch_set_page_dat() call. Cc: <stable@vger.kernel.org> Reviewed-by: Claudio Imbrenda <imbrenda@linux.ibm.com> Signed-off-by: Heiko Carstens <hca@linux.ibm.com> Signed-off-by: Vasily Gorbik <gor@linux.ibm.com> Signed-off-by: Sasha Levin <sashal@kernel.org>
698 lines
17 KiB
C
698 lines
17 KiB
C
// SPDX-License-Identifier: GPL-2.0
|
|
/*
|
|
* Copyright IBM Corp. 2006
|
|
*/
|
|
|
|
#include <linux/memory_hotplug.h>
|
|
#include <linux/memblock.h>
|
|
#include <linux/pfn.h>
|
|
#include <linux/mm.h>
|
|
#include <linux/init.h>
|
|
#include <linux/list.h>
|
|
#include <linux/hugetlb.h>
|
|
#include <linux/slab.h>
|
|
#include <asm/page-states.h>
|
|
#include <asm/cacheflush.h>
|
|
#include <asm/nospec-branch.h>
|
|
#include <asm/pgalloc.h>
|
|
#include <asm/setup.h>
|
|
#include <asm/tlbflush.h>
|
|
#include <asm/sections.h>
|
|
#include <asm/set_memory.h>
|
|
|
|
static DEFINE_MUTEX(vmem_mutex);
|
|
|
|
static void __ref *vmem_alloc_pages(unsigned int order)
|
|
{
|
|
unsigned long size = PAGE_SIZE << order;
|
|
|
|
if (slab_is_available())
|
|
return (void *)__get_free_pages(GFP_KERNEL, order);
|
|
return memblock_alloc(size, size);
|
|
}
|
|
|
|
static void vmem_free_pages(unsigned long addr, int order)
|
|
{
|
|
/* We don't expect boot memory to be removed ever. */
|
|
if (!slab_is_available() ||
|
|
WARN_ON_ONCE(PageReserved(virt_to_page(addr))))
|
|
return;
|
|
free_pages(addr, order);
|
|
}
|
|
|
|
void *vmem_crst_alloc(unsigned long val)
|
|
{
|
|
unsigned long *table;
|
|
|
|
table = vmem_alloc_pages(CRST_ALLOC_ORDER);
|
|
if (!table)
|
|
return NULL;
|
|
crst_table_init(table, val);
|
|
if (slab_is_available())
|
|
arch_set_page_dat(virt_to_page(table), CRST_ALLOC_ORDER);
|
|
return table;
|
|
}
|
|
|
|
pte_t __ref *vmem_pte_alloc(void)
|
|
{
|
|
unsigned long size = PTRS_PER_PTE * sizeof(pte_t);
|
|
pte_t *pte;
|
|
|
|
if (slab_is_available())
|
|
pte = (pte_t *) page_table_alloc(&init_mm);
|
|
else
|
|
pte = (pte_t *) memblock_alloc(size, size);
|
|
if (!pte)
|
|
return NULL;
|
|
memset64((u64 *)pte, _PAGE_INVALID, PTRS_PER_PTE);
|
|
return pte;
|
|
}
|
|
|
|
static void vmem_pte_free(unsigned long *table)
|
|
{
|
|
/* We don't expect boot memory to be removed ever. */
|
|
if (!slab_is_available() ||
|
|
WARN_ON_ONCE(PageReserved(virt_to_page(table))))
|
|
return;
|
|
page_table_free(&init_mm, table);
|
|
}
|
|
|
|
#define PAGE_UNUSED 0xFD
|
|
|
|
/*
|
|
* The unused vmemmap range, which was not yet memset(PAGE_UNUSED) ranges
|
|
* from unused_sub_pmd_start to next PMD_SIZE boundary.
|
|
*/
|
|
static unsigned long unused_sub_pmd_start;
|
|
|
|
static void vmemmap_flush_unused_sub_pmd(void)
|
|
{
|
|
if (!unused_sub_pmd_start)
|
|
return;
|
|
memset((void *)unused_sub_pmd_start, PAGE_UNUSED,
|
|
ALIGN(unused_sub_pmd_start, PMD_SIZE) - unused_sub_pmd_start);
|
|
unused_sub_pmd_start = 0;
|
|
}
|
|
|
|
static void vmemmap_mark_sub_pmd_used(unsigned long start, unsigned long end)
|
|
{
|
|
/*
|
|
* As we expect to add in the same granularity as we remove, it's
|
|
* sufficient to mark only some piece used to block the memmap page from
|
|
* getting removed (just in case the memmap never gets initialized,
|
|
* e.g., because the memory block never gets onlined).
|
|
*/
|
|
memset((void *)start, 0, sizeof(struct page));
|
|
}
|
|
|
|
static void vmemmap_use_sub_pmd(unsigned long start, unsigned long end)
|
|
{
|
|
/*
|
|
* We only optimize if the new used range directly follows the
|
|
* previously unused range (esp., when populating consecutive sections).
|
|
*/
|
|
if (unused_sub_pmd_start == start) {
|
|
unused_sub_pmd_start = end;
|
|
if (likely(IS_ALIGNED(unused_sub_pmd_start, PMD_SIZE)))
|
|
unused_sub_pmd_start = 0;
|
|
return;
|
|
}
|
|
vmemmap_flush_unused_sub_pmd();
|
|
vmemmap_mark_sub_pmd_used(start, end);
|
|
}
|
|
|
|
static void vmemmap_use_new_sub_pmd(unsigned long start, unsigned long end)
|
|
{
|
|
unsigned long page = ALIGN_DOWN(start, PMD_SIZE);
|
|
|
|
vmemmap_flush_unused_sub_pmd();
|
|
|
|
/* Could be our memmap page is filled with PAGE_UNUSED already ... */
|
|
vmemmap_mark_sub_pmd_used(start, end);
|
|
|
|
/* Mark the unused parts of the new memmap page PAGE_UNUSED. */
|
|
if (!IS_ALIGNED(start, PMD_SIZE))
|
|
memset((void *)page, PAGE_UNUSED, start - page);
|
|
/*
|
|
* We want to avoid memset(PAGE_UNUSED) when populating the vmemmap of
|
|
* consecutive sections. Remember for the last added PMD the last
|
|
* unused range in the populated PMD.
|
|
*/
|
|
if (!IS_ALIGNED(end, PMD_SIZE))
|
|
unused_sub_pmd_start = end;
|
|
}
|
|
|
|
/* Returns true if the PMD is completely unused and can be freed. */
|
|
static bool vmemmap_unuse_sub_pmd(unsigned long start, unsigned long end)
|
|
{
|
|
unsigned long page = ALIGN_DOWN(start, PMD_SIZE);
|
|
|
|
vmemmap_flush_unused_sub_pmd();
|
|
memset((void *)start, PAGE_UNUSED, end - start);
|
|
return !memchr_inv((void *)page, PAGE_UNUSED, PMD_SIZE);
|
|
}
|
|
|
|
/* __ref: we'll only call vmemmap_alloc_block() via vmemmap_populate() */
|
|
static int __ref modify_pte_table(pmd_t *pmd, unsigned long addr,
|
|
unsigned long end, bool add, bool direct)
|
|
{
|
|
unsigned long prot, pages = 0;
|
|
int ret = -ENOMEM;
|
|
pte_t *pte;
|
|
|
|
prot = pgprot_val(PAGE_KERNEL);
|
|
if (!MACHINE_HAS_NX)
|
|
prot &= ~_PAGE_NOEXEC;
|
|
|
|
pte = pte_offset_kernel(pmd, addr);
|
|
for (; addr < end; addr += PAGE_SIZE, pte++) {
|
|
if (!add) {
|
|
if (pte_none(*pte))
|
|
continue;
|
|
if (!direct)
|
|
vmem_free_pages((unsigned long) pfn_to_virt(pte_pfn(*pte)), 0);
|
|
pte_clear(&init_mm, addr, pte);
|
|
} else if (pte_none(*pte)) {
|
|
if (!direct) {
|
|
void *new_page = vmemmap_alloc_block(PAGE_SIZE, NUMA_NO_NODE);
|
|
|
|
if (!new_page)
|
|
goto out;
|
|
set_pte(pte, __pte(__pa(new_page) | prot));
|
|
} else {
|
|
set_pte(pte, __pte(__pa(addr) | prot));
|
|
}
|
|
} else {
|
|
continue;
|
|
}
|
|
pages++;
|
|
}
|
|
ret = 0;
|
|
out:
|
|
if (direct)
|
|
update_page_count(PG_DIRECT_MAP_4K, add ? pages : -pages);
|
|
return ret;
|
|
}
|
|
|
|
static void try_free_pte_table(pmd_t *pmd, unsigned long start)
|
|
{
|
|
pte_t *pte;
|
|
int i;
|
|
|
|
/* We can safely assume this is fully in 1:1 mapping & vmemmap area */
|
|
pte = pte_offset_kernel(pmd, start);
|
|
for (i = 0; i < PTRS_PER_PTE; i++, pte++) {
|
|
if (!pte_none(*pte))
|
|
return;
|
|
}
|
|
vmem_pte_free((unsigned long *) pmd_deref(*pmd));
|
|
pmd_clear(pmd);
|
|
}
|
|
|
|
/* __ref: we'll only call vmemmap_alloc_block() via vmemmap_populate() */
|
|
static int __ref modify_pmd_table(pud_t *pud, unsigned long addr,
|
|
unsigned long end, bool add, bool direct)
|
|
{
|
|
unsigned long next, prot, pages = 0;
|
|
int ret = -ENOMEM;
|
|
pmd_t *pmd;
|
|
pte_t *pte;
|
|
|
|
prot = pgprot_val(SEGMENT_KERNEL);
|
|
if (!MACHINE_HAS_NX)
|
|
prot &= ~_SEGMENT_ENTRY_NOEXEC;
|
|
|
|
pmd = pmd_offset(pud, addr);
|
|
for (; addr < end; addr = next, pmd++) {
|
|
next = pmd_addr_end(addr, end);
|
|
if (!add) {
|
|
if (pmd_none(*pmd))
|
|
continue;
|
|
if (pmd_large(*pmd)) {
|
|
if (IS_ALIGNED(addr, PMD_SIZE) &&
|
|
IS_ALIGNED(next, PMD_SIZE)) {
|
|
if (!direct)
|
|
vmem_free_pages(pmd_deref(*pmd), get_order(PMD_SIZE));
|
|
pmd_clear(pmd);
|
|
pages++;
|
|
} else if (!direct && vmemmap_unuse_sub_pmd(addr, next)) {
|
|
vmem_free_pages(pmd_deref(*pmd), get_order(PMD_SIZE));
|
|
pmd_clear(pmd);
|
|
}
|
|
continue;
|
|
}
|
|
} else if (pmd_none(*pmd)) {
|
|
if (IS_ALIGNED(addr, PMD_SIZE) &&
|
|
IS_ALIGNED(next, PMD_SIZE) &&
|
|
MACHINE_HAS_EDAT1 && direct &&
|
|
!debug_pagealloc_enabled()) {
|
|
set_pmd(pmd, __pmd(__pa(addr) | prot));
|
|
pages++;
|
|
continue;
|
|
} else if (!direct && MACHINE_HAS_EDAT1) {
|
|
void *new_page;
|
|
|
|
/*
|
|
* Use 1MB frames for vmemmap if available. We
|
|
* always use large frames even if they are only
|
|
* partially used. Otherwise we would have also
|
|
* page tables since vmemmap_populate gets
|
|
* called for each section separately.
|
|
*/
|
|
new_page = vmemmap_alloc_block(PMD_SIZE, NUMA_NO_NODE);
|
|
if (new_page) {
|
|
set_pmd(pmd, __pmd(__pa(new_page) | prot));
|
|
if (!IS_ALIGNED(addr, PMD_SIZE) ||
|
|
!IS_ALIGNED(next, PMD_SIZE)) {
|
|
vmemmap_use_new_sub_pmd(addr, next);
|
|
}
|
|
continue;
|
|
}
|
|
}
|
|
pte = vmem_pte_alloc();
|
|
if (!pte)
|
|
goto out;
|
|
pmd_populate(&init_mm, pmd, pte);
|
|
} else if (pmd_large(*pmd)) {
|
|
if (!direct)
|
|
vmemmap_use_sub_pmd(addr, next);
|
|
continue;
|
|
}
|
|
ret = modify_pte_table(pmd, addr, next, add, direct);
|
|
if (ret)
|
|
goto out;
|
|
if (!add)
|
|
try_free_pte_table(pmd, addr & PMD_MASK);
|
|
}
|
|
ret = 0;
|
|
out:
|
|
if (direct)
|
|
update_page_count(PG_DIRECT_MAP_1M, add ? pages : -pages);
|
|
return ret;
|
|
}
|
|
|
|
static void try_free_pmd_table(pud_t *pud, unsigned long start)
|
|
{
|
|
const unsigned long end = start + PUD_SIZE;
|
|
pmd_t *pmd;
|
|
int i;
|
|
|
|
/* Don't mess with any tables not fully in 1:1 mapping & vmemmap area */
|
|
if (end > VMALLOC_START)
|
|
return;
|
|
#ifdef CONFIG_KASAN
|
|
if (start < KASAN_SHADOW_END && end > KASAN_SHADOW_START)
|
|
return;
|
|
#endif
|
|
pmd = pmd_offset(pud, start);
|
|
for (i = 0; i < PTRS_PER_PMD; i++, pmd++)
|
|
if (!pmd_none(*pmd))
|
|
return;
|
|
vmem_free_pages(pud_deref(*pud), CRST_ALLOC_ORDER);
|
|
pud_clear(pud);
|
|
}
|
|
|
|
static int modify_pud_table(p4d_t *p4d, unsigned long addr, unsigned long end,
|
|
bool add, bool direct)
|
|
{
|
|
unsigned long next, prot, pages = 0;
|
|
int ret = -ENOMEM;
|
|
pud_t *pud;
|
|
pmd_t *pmd;
|
|
|
|
prot = pgprot_val(REGION3_KERNEL);
|
|
if (!MACHINE_HAS_NX)
|
|
prot &= ~_REGION_ENTRY_NOEXEC;
|
|
pud = pud_offset(p4d, addr);
|
|
for (; addr < end; addr = next, pud++) {
|
|
next = pud_addr_end(addr, end);
|
|
if (!add) {
|
|
if (pud_none(*pud))
|
|
continue;
|
|
if (pud_large(*pud)) {
|
|
if (IS_ALIGNED(addr, PUD_SIZE) &&
|
|
IS_ALIGNED(next, PUD_SIZE)) {
|
|
pud_clear(pud);
|
|
pages++;
|
|
}
|
|
continue;
|
|
}
|
|
} else if (pud_none(*pud)) {
|
|
if (IS_ALIGNED(addr, PUD_SIZE) &&
|
|
IS_ALIGNED(next, PUD_SIZE) &&
|
|
MACHINE_HAS_EDAT2 && direct &&
|
|
!debug_pagealloc_enabled()) {
|
|
set_pud(pud, __pud(__pa(addr) | prot));
|
|
pages++;
|
|
continue;
|
|
}
|
|
pmd = vmem_crst_alloc(_SEGMENT_ENTRY_EMPTY);
|
|
if (!pmd)
|
|
goto out;
|
|
pud_populate(&init_mm, pud, pmd);
|
|
} else if (pud_large(*pud)) {
|
|
continue;
|
|
}
|
|
ret = modify_pmd_table(pud, addr, next, add, direct);
|
|
if (ret)
|
|
goto out;
|
|
if (!add)
|
|
try_free_pmd_table(pud, addr & PUD_MASK);
|
|
}
|
|
ret = 0;
|
|
out:
|
|
if (direct)
|
|
update_page_count(PG_DIRECT_MAP_2G, add ? pages : -pages);
|
|
return ret;
|
|
}
|
|
|
|
static void try_free_pud_table(p4d_t *p4d, unsigned long start)
|
|
{
|
|
const unsigned long end = start + P4D_SIZE;
|
|
pud_t *pud;
|
|
int i;
|
|
|
|
/* Don't mess with any tables not fully in 1:1 mapping & vmemmap area */
|
|
if (end > VMALLOC_START)
|
|
return;
|
|
#ifdef CONFIG_KASAN
|
|
if (start < KASAN_SHADOW_END && end > KASAN_SHADOW_START)
|
|
return;
|
|
#endif
|
|
|
|
pud = pud_offset(p4d, start);
|
|
for (i = 0; i < PTRS_PER_PUD; i++, pud++) {
|
|
if (!pud_none(*pud))
|
|
return;
|
|
}
|
|
vmem_free_pages(p4d_deref(*p4d), CRST_ALLOC_ORDER);
|
|
p4d_clear(p4d);
|
|
}
|
|
|
|
static int modify_p4d_table(pgd_t *pgd, unsigned long addr, unsigned long end,
|
|
bool add, bool direct)
|
|
{
|
|
unsigned long next;
|
|
int ret = -ENOMEM;
|
|
p4d_t *p4d;
|
|
pud_t *pud;
|
|
|
|
p4d = p4d_offset(pgd, addr);
|
|
for (; addr < end; addr = next, p4d++) {
|
|
next = p4d_addr_end(addr, end);
|
|
if (!add) {
|
|
if (p4d_none(*p4d))
|
|
continue;
|
|
} else if (p4d_none(*p4d)) {
|
|
pud = vmem_crst_alloc(_REGION3_ENTRY_EMPTY);
|
|
if (!pud)
|
|
goto out;
|
|
p4d_populate(&init_mm, p4d, pud);
|
|
}
|
|
ret = modify_pud_table(p4d, addr, next, add, direct);
|
|
if (ret)
|
|
goto out;
|
|
if (!add)
|
|
try_free_pud_table(p4d, addr & P4D_MASK);
|
|
}
|
|
ret = 0;
|
|
out:
|
|
return ret;
|
|
}
|
|
|
|
static void try_free_p4d_table(pgd_t *pgd, unsigned long start)
|
|
{
|
|
const unsigned long end = start + PGDIR_SIZE;
|
|
p4d_t *p4d;
|
|
int i;
|
|
|
|
/* Don't mess with any tables not fully in 1:1 mapping & vmemmap area */
|
|
if (end > VMALLOC_START)
|
|
return;
|
|
#ifdef CONFIG_KASAN
|
|
if (start < KASAN_SHADOW_END && end > KASAN_SHADOW_START)
|
|
return;
|
|
#endif
|
|
|
|
p4d = p4d_offset(pgd, start);
|
|
for (i = 0; i < PTRS_PER_P4D; i++, p4d++) {
|
|
if (!p4d_none(*p4d))
|
|
return;
|
|
}
|
|
vmem_free_pages(pgd_deref(*pgd), CRST_ALLOC_ORDER);
|
|
pgd_clear(pgd);
|
|
}
|
|
|
|
static int modify_pagetable(unsigned long start, unsigned long end, bool add,
|
|
bool direct)
|
|
{
|
|
unsigned long addr, next;
|
|
int ret = -ENOMEM;
|
|
pgd_t *pgd;
|
|
p4d_t *p4d;
|
|
|
|
if (WARN_ON_ONCE(!PAGE_ALIGNED(start | end)))
|
|
return -EINVAL;
|
|
for (addr = start; addr < end; addr = next) {
|
|
next = pgd_addr_end(addr, end);
|
|
pgd = pgd_offset_k(addr);
|
|
|
|
if (!add) {
|
|
if (pgd_none(*pgd))
|
|
continue;
|
|
} else if (pgd_none(*pgd)) {
|
|
p4d = vmem_crst_alloc(_REGION2_ENTRY_EMPTY);
|
|
if (!p4d)
|
|
goto out;
|
|
pgd_populate(&init_mm, pgd, p4d);
|
|
}
|
|
ret = modify_p4d_table(pgd, addr, next, add, direct);
|
|
if (ret)
|
|
goto out;
|
|
if (!add)
|
|
try_free_p4d_table(pgd, addr & PGDIR_MASK);
|
|
}
|
|
ret = 0;
|
|
out:
|
|
if (!add)
|
|
flush_tlb_kernel_range(start, end);
|
|
return ret;
|
|
}
|
|
|
|
static int add_pagetable(unsigned long start, unsigned long end, bool direct)
|
|
{
|
|
return modify_pagetable(start, end, true, direct);
|
|
}
|
|
|
|
static int remove_pagetable(unsigned long start, unsigned long end, bool direct)
|
|
{
|
|
return modify_pagetable(start, end, false, direct);
|
|
}
|
|
|
|
/*
|
|
* Add a physical memory range to the 1:1 mapping.
|
|
*/
|
|
static int vmem_add_range(unsigned long start, unsigned long size)
|
|
{
|
|
return add_pagetable(start, start + size, true);
|
|
}
|
|
|
|
/*
|
|
* Remove a physical memory range from the 1:1 mapping.
|
|
*/
|
|
static void vmem_remove_range(unsigned long start, unsigned long size)
|
|
{
|
|
remove_pagetable(start, start + size, true);
|
|
}
|
|
|
|
/*
|
|
* Add a backed mem_map array to the virtual mem_map array.
|
|
*/
|
|
int __meminit vmemmap_populate(unsigned long start, unsigned long end, int node,
|
|
struct vmem_altmap *altmap)
|
|
{
|
|
int ret;
|
|
|
|
mutex_lock(&vmem_mutex);
|
|
/* We don't care about the node, just use NUMA_NO_NODE on allocations */
|
|
ret = add_pagetable(start, end, false);
|
|
if (ret)
|
|
remove_pagetable(start, end, false);
|
|
mutex_unlock(&vmem_mutex);
|
|
return ret;
|
|
}
|
|
|
|
void vmemmap_free(unsigned long start, unsigned long end,
|
|
struct vmem_altmap *altmap)
|
|
{
|
|
mutex_lock(&vmem_mutex);
|
|
remove_pagetable(start, end, false);
|
|
mutex_unlock(&vmem_mutex);
|
|
}
|
|
|
|
void vmem_remove_mapping(unsigned long start, unsigned long size)
|
|
{
|
|
mutex_lock(&vmem_mutex);
|
|
vmem_remove_range(start, size);
|
|
mutex_unlock(&vmem_mutex);
|
|
}
|
|
|
|
struct range arch_get_mappable_range(void)
|
|
{
|
|
struct range mhp_range;
|
|
|
|
mhp_range.start = 0;
|
|
mhp_range.end = VMEM_MAX_PHYS - 1;
|
|
return mhp_range;
|
|
}
|
|
|
|
int vmem_add_mapping(unsigned long start, unsigned long size)
|
|
{
|
|
struct range range = arch_get_mappable_range();
|
|
int ret;
|
|
|
|
if (start < range.start ||
|
|
start + size > range.end + 1 ||
|
|
start + size < start)
|
|
return -ERANGE;
|
|
|
|
mutex_lock(&vmem_mutex);
|
|
ret = vmem_add_range(start, size);
|
|
if (ret)
|
|
vmem_remove_range(start, size);
|
|
mutex_unlock(&vmem_mutex);
|
|
return ret;
|
|
}
|
|
|
|
/*
|
|
* Allocate new or return existing page-table entry, but do not map it
|
|
* to any physical address. If missing, allocate segment- and region-
|
|
* table entries along. Meeting a large segment- or region-table entry
|
|
* while traversing is an error, since the function is expected to be
|
|
* called against virtual regions reserverd for 4KB mappings only.
|
|
*/
|
|
pte_t *vmem_get_alloc_pte(unsigned long addr, bool alloc)
|
|
{
|
|
pte_t *ptep = NULL;
|
|
pgd_t *pgd;
|
|
p4d_t *p4d;
|
|
pud_t *pud;
|
|
pmd_t *pmd;
|
|
pte_t *pte;
|
|
|
|
pgd = pgd_offset_k(addr);
|
|
if (pgd_none(*pgd)) {
|
|
if (!alloc)
|
|
goto out;
|
|
p4d = vmem_crst_alloc(_REGION2_ENTRY_EMPTY);
|
|
if (!p4d)
|
|
goto out;
|
|
pgd_populate(&init_mm, pgd, p4d);
|
|
}
|
|
p4d = p4d_offset(pgd, addr);
|
|
if (p4d_none(*p4d)) {
|
|
if (!alloc)
|
|
goto out;
|
|
pud = vmem_crst_alloc(_REGION3_ENTRY_EMPTY);
|
|
if (!pud)
|
|
goto out;
|
|
p4d_populate(&init_mm, p4d, pud);
|
|
}
|
|
pud = pud_offset(p4d, addr);
|
|
if (pud_none(*pud)) {
|
|
if (!alloc)
|
|
goto out;
|
|
pmd = vmem_crst_alloc(_SEGMENT_ENTRY_EMPTY);
|
|
if (!pmd)
|
|
goto out;
|
|
pud_populate(&init_mm, pud, pmd);
|
|
} else if (WARN_ON_ONCE(pud_large(*pud))) {
|
|
goto out;
|
|
}
|
|
pmd = pmd_offset(pud, addr);
|
|
if (pmd_none(*pmd)) {
|
|
if (!alloc)
|
|
goto out;
|
|
pte = vmem_pte_alloc();
|
|
if (!pte)
|
|
goto out;
|
|
pmd_populate(&init_mm, pmd, pte);
|
|
} else if (WARN_ON_ONCE(pmd_large(*pmd))) {
|
|
goto out;
|
|
}
|
|
ptep = pte_offset_kernel(pmd, addr);
|
|
out:
|
|
return ptep;
|
|
}
|
|
|
|
int __vmem_map_4k_page(unsigned long addr, unsigned long phys, pgprot_t prot, bool alloc)
|
|
{
|
|
pte_t *ptep, pte;
|
|
|
|
if (!IS_ALIGNED(addr, PAGE_SIZE))
|
|
return -EINVAL;
|
|
ptep = vmem_get_alloc_pte(addr, alloc);
|
|
if (!ptep)
|
|
return -ENOMEM;
|
|
__ptep_ipte(addr, ptep, 0, 0, IPTE_GLOBAL);
|
|
pte = mk_pte_phys(phys, prot);
|
|
set_pte(ptep, pte);
|
|
return 0;
|
|
}
|
|
|
|
int vmem_map_4k_page(unsigned long addr, unsigned long phys, pgprot_t prot)
|
|
{
|
|
int rc;
|
|
|
|
mutex_lock(&vmem_mutex);
|
|
rc = __vmem_map_4k_page(addr, phys, prot, true);
|
|
mutex_unlock(&vmem_mutex);
|
|
return rc;
|
|
}
|
|
|
|
void vmem_unmap_4k_page(unsigned long addr)
|
|
{
|
|
pte_t *ptep;
|
|
|
|
mutex_lock(&vmem_mutex);
|
|
ptep = virt_to_kpte(addr);
|
|
__ptep_ipte(addr, ptep, 0, 0, IPTE_GLOBAL);
|
|
pte_clear(&init_mm, addr, ptep);
|
|
mutex_unlock(&vmem_mutex);
|
|
}
|
|
|
|
/*
|
|
* map whole physical memory to virtual memory (identity mapping)
|
|
* we reserve enough space in the vmalloc area for vmemmap to hotplug
|
|
* additional memory segments.
|
|
*/
|
|
void __init vmem_map_init(void)
|
|
{
|
|
phys_addr_t base, end;
|
|
u64 i;
|
|
|
|
for_each_mem_range(i, &base, &end)
|
|
vmem_add_range(base, end - base);
|
|
__set_memory((unsigned long)_stext,
|
|
(unsigned long)(_etext - _stext) >> PAGE_SHIFT,
|
|
SET_MEMORY_RO | SET_MEMORY_X);
|
|
__set_memory((unsigned long)_etext,
|
|
(unsigned long)(__end_rodata - _etext) >> PAGE_SHIFT,
|
|
SET_MEMORY_RO);
|
|
__set_memory((unsigned long)_sinittext,
|
|
(unsigned long)(_einittext - _sinittext) >> PAGE_SHIFT,
|
|
SET_MEMORY_RO | SET_MEMORY_X);
|
|
__set_memory(__stext_amode31, (__etext_amode31 - __stext_amode31) >> PAGE_SHIFT,
|
|
SET_MEMORY_RO | SET_MEMORY_X);
|
|
|
|
/* lowcore requires 4k mapping for real addresses / prefixing */
|
|
set_memory_4k(0, LC_PAGES);
|
|
|
|
/* lowcore must be executable for LPSWE */
|
|
if (!static_key_enabled(&cpu_has_bear))
|
|
set_memory_x(0, 1);
|
|
|
|
pr_info("Write protected kernel read-only data: %luk\n",
|
|
(unsigned long)(__end_rodata - _stext) >> 10);
|
|
}
|