LoongArch: mm: Fix huge page entry update for virtual machine

In virtual machine (guest mode), the tlbwr instruction can not write the
last entry of MTLB, so we need to make it non-present by invtlb and then
write it by tlbfill. This also simplify the whole logic.

Signed-off-by: Rui Wang <wangrui@loongson.cn>
Signed-off-by: Huacai Chen <chenhuacai@loongson.cn>
This commit is contained in:
Huacai Chen
2022-12-08 14:59:15 +08:00
parent 143d64bdbd
commit b681604eda

View File

@@ -10,6 +10,8 @@
#include <asm/regdef.h> #include <asm/regdef.h>
#include <asm/stackframe.h> #include <asm/stackframe.h>
#define INVTLB_ADDR_GFALSE_AND_ASID 5
#define PTRS_PER_PGD_BITS (PAGE_SHIFT - 3) #define PTRS_PER_PGD_BITS (PAGE_SHIFT - 3)
#define PTRS_PER_PUD_BITS (PAGE_SHIFT - 3) #define PTRS_PER_PUD_BITS (PAGE_SHIFT - 3)
#define PTRS_PER_PMD_BITS (PAGE_SHIFT - 3) #define PTRS_PER_PMD_BITS (PAGE_SHIFT - 3)
@@ -136,13 +138,10 @@ tlb_huge_update_load:
ori t0, ra, _PAGE_VALID ori t0, ra, _PAGE_VALID
st.d t0, t1, 0 st.d t0, t1, 0
#endif #endif
tlbsrch csrrd ra, LOONGARCH_CSR_ASID
addu16i.d t1, zero, -(CSR_TLBIDX_EHINV >> 16) csrrd t1, LOONGARCH_CSR_BADV
addi.d ra, t1, 0 andi ra, ra, CSR_ASID_ASID
csrxchg ra, t1, LOONGARCH_CSR_TLBIDX invtlb INVTLB_ADDR_GFALSE_AND_ASID, ra, t1
tlbwr
csrxchg zero, t1, LOONGARCH_CSR_TLBIDX
/* /*
* A huge PTE describes an area the size of the * A huge PTE describes an area the size of the
@@ -287,13 +286,11 @@ tlb_huge_update_store:
ori t0, ra, (_PAGE_VALID | _PAGE_DIRTY | _PAGE_MODIFIED) ori t0, ra, (_PAGE_VALID | _PAGE_DIRTY | _PAGE_MODIFIED)
st.d t0, t1, 0 st.d t0, t1, 0
#endif #endif
tlbsrch csrrd ra, LOONGARCH_CSR_ASID
addu16i.d t1, zero, -(CSR_TLBIDX_EHINV >> 16) csrrd t1, LOONGARCH_CSR_BADV
addi.d ra, t1, 0 andi ra, ra, CSR_ASID_ASID
csrxchg ra, t1, LOONGARCH_CSR_TLBIDX invtlb INVTLB_ADDR_GFALSE_AND_ASID, ra, t1
tlbwr
csrxchg zero, t1, LOONGARCH_CSR_TLBIDX
/* /*
* A huge PTE describes an area the size of the * A huge PTE describes an area the size of the
* configured huge page size. This is twice the * configured huge page size. This is twice the
@@ -436,6 +433,11 @@ tlb_huge_update_modify:
ori t0, ra, (_PAGE_VALID | _PAGE_DIRTY | _PAGE_MODIFIED) ori t0, ra, (_PAGE_VALID | _PAGE_DIRTY | _PAGE_MODIFIED)
st.d t0, t1, 0 st.d t0, t1, 0
#endif #endif
csrrd ra, LOONGARCH_CSR_ASID
csrrd t1, LOONGARCH_CSR_BADV
andi ra, ra, CSR_ASID_ASID
invtlb INVTLB_ADDR_GFALSE_AND_ASID, ra, t1
/* /*
* A huge PTE describes an area the size of the * A huge PTE describes an area the size of the
* configured huge page size. This is twice the * configured huge page size. This is twice the
@@ -466,7 +468,7 @@ tlb_huge_update_modify:
addu16i.d t1, zero, (PS_HUGE_SIZE << (CSR_TLBIDX_PS_SHIFT - 16)) addu16i.d t1, zero, (PS_HUGE_SIZE << (CSR_TLBIDX_PS_SHIFT - 16))
csrxchg t1, t0, LOONGARCH_CSR_TLBIDX csrxchg t1, t0, LOONGARCH_CSR_TLBIDX
tlbwr tlbfill
/* Reset default page size */ /* Reset default page size */
addu16i.d t0, zero, (CSR_TLBIDX_PS >> 16) addu16i.d t0, zero, (CSR_TLBIDX_PS >> 16)