mirror of
https://github.com/tbsdtv/linux_media.git
synced 2025-07-23 04:33:26 +02:00
Merge tag 'x86_cc_for_v6.5' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip
Pull x86 confidential computing update from Borislav Petkov: - Add support for unaccepted memory as specified in the UEFI spec v2.9. The gist of it all is that Intel TDX and AMD SEV-SNP confidential computing guests define the notion of accepting memory before using it and thus preventing a whole set of attacks against such guests like memory replay and the like. There are a couple of strategies of how memory should be accepted - the current implementation does an on-demand way of accepting. * tag 'x86_cc_for_v6.5' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: virt: sevguest: Add CONFIG_CRYPTO dependency x86/efi: Safely enable unaccepted memory in UEFI x86/sev: Add SNP-specific unaccepted memory support x86/sev: Use large PSC requests if applicable x86/sev: Allow for use of the early boot GHCB for PSC requests x86/sev: Put PSC struct on the stack in prep for unaccepted memory support x86/sev: Fix calculation of end address based on number of pages x86/tdx: Add unaccepted memory support x86/tdx: Refactor try_accept_one() x86/tdx: Make _tdx_hypercall() and __tdx_module_call() available in boot stub efi/unaccepted: Avoid load_unaligned_zeropad() stepping into unaccepted memory efi: Add unaccepted memory support x86/boot/compressed: Handle unaccepted memory efi/libstub: Implement support for unaccepted memory efi/x86: Get full memory map in allocate_e820() mm: Add support for unaccepted memory
This commit is contained in:
@@ -1436,6 +1436,15 @@ done:
|
||||
*/
|
||||
kmemleak_alloc_phys(found, size, 0);
|
||||
|
||||
/*
|
||||
* Some Virtual Machine platforms, such as Intel TDX or AMD SEV-SNP,
|
||||
* require memory to be accepted before it can be used by the
|
||||
* guest.
|
||||
*
|
||||
* Accept the memory of the allocated buffer.
|
||||
*/
|
||||
accept_memory(found, found + size);
|
||||
|
||||
return found;
|
||||
}
|
||||
|
||||
|
@@ -1375,6 +1375,10 @@ static void __meminit zone_init_free_lists(struct zone *zone)
|
||||
INIT_LIST_HEAD(&zone->free_area[order].free_list[t]);
|
||||
zone->free_area[order].nr_free = 0;
|
||||
}
|
||||
|
||||
#ifdef CONFIG_UNACCEPTED_MEMORY
|
||||
INIT_LIST_HEAD(&zone->unaccepted_pages);
|
||||
#endif
|
||||
}
|
||||
|
||||
void __meminit init_currently_empty_zone(struct zone *zone,
|
||||
@@ -1960,6 +1964,9 @@ static void __init deferred_free_range(unsigned long pfn,
|
||||
return;
|
||||
}
|
||||
|
||||
/* Accept chunks smaller than MAX_ORDER upfront */
|
||||
accept_memory(PFN_PHYS(pfn), PFN_PHYS(pfn + nr_pages));
|
||||
|
||||
for (i = 0; i < nr_pages; i++, page++, pfn++) {
|
||||
if (pageblock_aligned(pfn))
|
||||
set_pageblock_migratetype(page, MIGRATE_MOVABLE);
|
||||
|
173
mm/page_alloc.c
173
mm/page_alloc.c
@@ -387,6 +387,12 @@ EXPORT_SYMBOL(nr_node_ids);
|
||||
EXPORT_SYMBOL(nr_online_nodes);
|
||||
#endif
|
||||
|
||||
static bool page_contains_unaccepted(struct page *page, unsigned int order);
|
||||
static void accept_page(struct page *page, unsigned int order);
|
||||
static bool try_to_accept_memory(struct zone *zone, unsigned int order);
|
||||
static inline bool has_unaccepted_memory(void);
|
||||
static bool __free_unaccepted(struct page *page);
|
||||
|
||||
int page_group_by_mobility_disabled __read_mostly;
|
||||
|
||||
#ifdef CONFIG_DEFERRED_STRUCT_PAGE_INIT
|
||||
@@ -1481,6 +1487,13 @@ void __free_pages_core(struct page *page, unsigned int order)
|
||||
|
||||
atomic_long_add(nr_pages, &page_zone(page)->managed_pages);
|
||||
|
||||
if (page_contains_unaccepted(page, order)) {
|
||||
if (order == MAX_ORDER && __free_unaccepted(page))
|
||||
return;
|
||||
|
||||
accept_page(page, order);
|
||||
}
|
||||
|
||||
/*
|
||||
* Bypass PCP and place fresh pages right to the tail, primarily
|
||||
* relevant for memory onlining.
|
||||
@@ -3159,6 +3172,9 @@ static inline long __zone_watermark_unusable_free(struct zone *z,
|
||||
if (!(alloc_flags & ALLOC_CMA))
|
||||
unusable_free += zone_page_state(z, NR_FREE_CMA_PAGES);
|
||||
#endif
|
||||
#ifdef CONFIG_UNACCEPTED_MEMORY
|
||||
unusable_free += zone_page_state(z, NR_UNACCEPTED);
|
||||
#endif
|
||||
|
||||
return unusable_free;
|
||||
}
|
||||
@@ -3458,6 +3474,11 @@ retry:
|
||||
gfp_mask)) {
|
||||
int ret;
|
||||
|
||||
if (has_unaccepted_memory()) {
|
||||
if (try_to_accept_memory(zone, order))
|
||||
goto try_this_zone;
|
||||
}
|
||||
|
||||
#ifdef CONFIG_DEFERRED_STRUCT_PAGE_INIT
|
||||
/*
|
||||
* Watermark failed for this zone, but see if we can
|
||||
@@ -3510,6 +3531,11 @@ try_this_zone:
|
||||
|
||||
return page;
|
||||
} else {
|
||||
if (has_unaccepted_memory()) {
|
||||
if (try_to_accept_memory(zone, order))
|
||||
goto try_this_zone;
|
||||
}
|
||||
|
||||
#ifdef CONFIG_DEFERRED_STRUCT_PAGE_INIT
|
||||
/* Try again if zone has deferred pages */
|
||||
if (deferred_pages_enabled()) {
|
||||
@@ -7215,3 +7241,150 @@ bool has_managed_dma(void)
|
||||
return false;
|
||||
}
|
||||
#endif /* CONFIG_ZONE_DMA */
|
||||
|
||||
#ifdef CONFIG_UNACCEPTED_MEMORY
|
||||
|
||||
/* Counts number of zones with unaccepted pages. */
|
||||
static DEFINE_STATIC_KEY_FALSE(zones_with_unaccepted_pages);
|
||||
|
||||
static bool lazy_accept = true;
|
||||
|
||||
static int __init accept_memory_parse(char *p)
|
||||
{
|
||||
if (!strcmp(p, "lazy")) {
|
||||
lazy_accept = true;
|
||||
return 0;
|
||||
} else if (!strcmp(p, "eager")) {
|
||||
lazy_accept = false;
|
||||
return 0;
|
||||
} else {
|
||||
return -EINVAL;
|
||||
}
|
||||
}
|
||||
early_param("accept_memory", accept_memory_parse);
|
||||
|
||||
static bool page_contains_unaccepted(struct page *page, unsigned int order)
|
||||
{
|
||||
phys_addr_t start = page_to_phys(page);
|
||||
phys_addr_t end = start + (PAGE_SIZE << order);
|
||||
|
||||
return range_contains_unaccepted_memory(start, end);
|
||||
}
|
||||
|
||||
static void accept_page(struct page *page, unsigned int order)
|
||||
{
|
||||
phys_addr_t start = page_to_phys(page);
|
||||
|
||||
accept_memory(start, start + (PAGE_SIZE << order));
|
||||
}
|
||||
|
||||
static bool try_to_accept_memory_one(struct zone *zone)
|
||||
{
|
||||
unsigned long flags;
|
||||
struct page *page;
|
||||
bool last;
|
||||
|
||||
if (list_empty(&zone->unaccepted_pages))
|
||||
return false;
|
||||
|
||||
spin_lock_irqsave(&zone->lock, flags);
|
||||
page = list_first_entry_or_null(&zone->unaccepted_pages,
|
||||
struct page, lru);
|
||||
if (!page) {
|
||||
spin_unlock_irqrestore(&zone->lock, flags);
|
||||
return false;
|
||||
}
|
||||
|
||||
list_del(&page->lru);
|
||||
last = list_empty(&zone->unaccepted_pages);
|
||||
|
||||
__mod_zone_freepage_state(zone, -MAX_ORDER_NR_PAGES, MIGRATE_MOVABLE);
|
||||
__mod_zone_page_state(zone, NR_UNACCEPTED, -MAX_ORDER_NR_PAGES);
|
||||
spin_unlock_irqrestore(&zone->lock, flags);
|
||||
|
||||
accept_page(page, MAX_ORDER);
|
||||
|
||||
__free_pages_ok(page, MAX_ORDER, FPI_TO_TAIL);
|
||||
|
||||
if (last)
|
||||
static_branch_dec(&zones_with_unaccepted_pages);
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
static bool try_to_accept_memory(struct zone *zone, unsigned int order)
|
||||
{
|
||||
long to_accept;
|
||||
int ret = false;
|
||||
|
||||
/* How much to accept to get to high watermark? */
|
||||
to_accept = high_wmark_pages(zone) -
|
||||
(zone_page_state(zone, NR_FREE_PAGES) -
|
||||
__zone_watermark_unusable_free(zone, order, 0));
|
||||
|
||||
/* Accept at least one page */
|
||||
do {
|
||||
if (!try_to_accept_memory_one(zone))
|
||||
break;
|
||||
ret = true;
|
||||
to_accept -= MAX_ORDER_NR_PAGES;
|
||||
} while (to_accept > 0);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static inline bool has_unaccepted_memory(void)
|
||||
{
|
||||
return static_branch_unlikely(&zones_with_unaccepted_pages);
|
||||
}
|
||||
|
||||
static bool __free_unaccepted(struct page *page)
|
||||
{
|
||||
struct zone *zone = page_zone(page);
|
||||
unsigned long flags;
|
||||
bool first = false;
|
||||
|
||||
if (!lazy_accept)
|
||||
return false;
|
||||
|
||||
spin_lock_irqsave(&zone->lock, flags);
|
||||
first = list_empty(&zone->unaccepted_pages);
|
||||
list_add_tail(&page->lru, &zone->unaccepted_pages);
|
||||
__mod_zone_freepage_state(zone, MAX_ORDER_NR_PAGES, MIGRATE_MOVABLE);
|
||||
__mod_zone_page_state(zone, NR_UNACCEPTED, MAX_ORDER_NR_PAGES);
|
||||
spin_unlock_irqrestore(&zone->lock, flags);
|
||||
|
||||
if (first)
|
||||
static_branch_inc(&zones_with_unaccepted_pages);
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
#else
|
||||
|
||||
static bool page_contains_unaccepted(struct page *page, unsigned int order)
|
||||
{
|
||||
return false;
|
||||
}
|
||||
|
||||
static void accept_page(struct page *page, unsigned int order)
|
||||
{
|
||||
}
|
||||
|
||||
static bool try_to_accept_memory(struct zone *zone, unsigned int order)
|
||||
{
|
||||
return false;
|
||||
}
|
||||
|
||||
static inline bool has_unaccepted_memory(void)
|
||||
{
|
||||
return false;
|
||||
}
|
||||
|
||||
static bool __free_unaccepted(struct page *page)
|
||||
{
|
||||
BUILD_BUG();
|
||||
return false;
|
||||
}
|
||||
|
||||
#endif /* CONFIG_UNACCEPTED_MEMORY */
|
||||
|
@@ -1180,6 +1180,9 @@ const char * const vmstat_text[] = {
|
||||
"nr_zspages",
|
||||
#endif
|
||||
"nr_free_cma",
|
||||
#ifdef CONFIG_UNACCEPTED_MEMORY
|
||||
"nr_unaccepted",
|
||||
#endif
|
||||
|
||||
/* enum numa_stat_item counters */
|
||||
#ifdef CONFIG_NUMA
|
||||
|
Reference in New Issue
Block a user