From a7e0ac295d964086af3bf98352614f33c381213e Mon Sep 17 00:00:00 2001 From: Dave Martin Date: Tue, 19 Jan 2016 16:20:18 +0000 Subject: arm64: KVM: Obey RES0/1 reserved bits when setting CPTR_EL2 Some bits in CPTR are defined as RES1 in the architecture. Setting these bits to zero may unintentionally enable future architecture extensions, allowing guests to use them without supervision by the host. This would be bad: for forwards compatibility, this patch makes sure the affected bits are always written with 1, not 0. This patch only addresses CPTR_EL2. Initialisation of other system registers may still need review. Reviewed-by: Marc Zyngier Signed-off-by: Dave Martin Signed-off-by: Marc Zyngier --- arch/arm64/include/asm/kvm_arm.h | 1 + 1 file changed, 1 insertion(+) (limited to 'arch/arm64/include') diff --git a/arch/arm64/include/asm/kvm_arm.h b/arch/arm64/include/asm/kvm_arm.h index 738a95f93e49..bef6e9243c63 100644 --- a/arch/arm64/include/asm/kvm_arm.h +++ b/arch/arm64/include/asm/kvm_arm.h @@ -182,6 +182,7 @@ #define CPTR_EL2_TCPAC (1 << 31) #define CPTR_EL2_TTA (1 << 20) #define CPTR_EL2_TFP (1 << CPTR_EL2_TFP_SHIFT) +#define CPTR_EL2_DEFAULT 0x000033ff /* Hyp Debug Configuration Register bits */ #define MDCR_EL2_TDRA (1 << 11) -- cgit v1.2.3 From 9586a2ea6806599c819a9e800581c2a698ef7467 Mon Sep 17 00:00:00 2001 From: Shannon Zhao Date: Wed, 13 Jan 2016 17:16:39 +0800 Subject: arm64: KVM: Fix wrong use of the CPSR MODE mask for 32bit guests The values of CPSR MODE mask are different between aarch32 and aarch64. It should use the right one according to the execution state. Reviewed-by: Marc Zyngier Signed-off-by: Shannon Zhao Signed-off-by: Marc Zyngier --- arch/arm64/include/asm/kvm_emulate.h | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) (limited to 'arch/arm64/include') diff --git a/arch/arm64/include/asm/kvm_emulate.h b/arch/arm64/include/asm/kvm_emulate.h index 3066328cd86b..779a5872a2c5 100644 --- a/arch/arm64/include/asm/kvm_emulate.h +++ b/arch/arm64/include/asm/kvm_emulate.h @@ -127,10 +127,14 @@ static inline unsigned long *vcpu_spsr(const struct kvm_vcpu *vcpu) static inline bool vcpu_mode_priv(const struct kvm_vcpu *vcpu) { - u32 mode = *vcpu_cpsr(vcpu) & PSR_MODE_MASK; + u32 mode; - if (vcpu_mode_is_32bit(vcpu)) + if (vcpu_mode_is_32bit(vcpu)) { + mode = *vcpu_cpsr(vcpu) & COMPAT_PSR_MODE_MASK; return mode > COMPAT_PSR_MODE_USR; + } + + mode = *vcpu_cpsr(vcpu) & PSR_MODE_MASK; return mode != PSR_MODE_EL0t; } -- cgit v1.2.3 From 1a1ebd5fb1e203ee8cc73508cc7a38ac4b804596 Mon Sep 17 00:00:00 2001 From: Tirumalesh Chalamarla Date: Thu, 4 Feb 2016 10:45:25 -0800 Subject: irqchip/gic-v3: Make sure read from ICC_IAR1_EL1 is visible on redestributor The ARM GICv3 specification mentions the need for dsb after a read from the ICC_IAR1_EL1 register: 4.1.1 Physical CPU Interface: The effects of reading ICC_IAR0_EL1 and ICC_IAR1_EL1 on the state of a returned INTID are not guaranteed to be visible until after the execution of a DSB. Not having this could result in missed interrupts, so let's add the required barrier. [Marc: fixed commit message] Acked-by: Marc Zyngier Signed-off-by: Tirumalesh Chalamarla Signed-off-by: Marc Zyngier --- arch/arm64/include/asm/arch_gicv3.h | 1 + 1 file changed, 1 insertion(+) (limited to 'arch/arm64/include') diff --git a/arch/arm64/include/asm/arch_gicv3.h b/arch/arm64/include/asm/arch_gicv3.h index 2731d3b25ed2..8ec88e5b290f 100644 --- a/arch/arm64/include/asm/arch_gicv3.h +++ b/arch/arm64/include/asm/arch_gicv3.h @@ -103,6 +103,7 @@ static inline u64 gic_read_iar_common(void) u64 irqstat; asm volatile("mrs_s %0, " __stringify(ICC_IAR1_EL1) : "=r" (irqstat)); + dsb(sy); return irqstat; } -- cgit v1.2.3 From 3c5b1d92b3b02be07873d611a27950addff544d3 Mon Sep 17 00:00:00 2001 From: Tirumalesh Chalamarla Date: Wed, 10 Feb 2016 10:46:53 -0800 Subject: arm64: KVM: Configure TCR_EL2.PS at runtime Setting TCR_EL2.PS to 40 bits is wrong on systems with less that less than 40 bits of physical addresses. and breaks KVM on systems where the RAM is above 40 bits. This patch uses ID_AA64MMFR0_EL1.PARange to set TCR_EL2.PS dynamically, just like we already do for VTCR_EL2.PS. [Marc: rewrote commit message, patch tidy up] Reviewed-by: Marc Zyngier Signed-off-by: Tirumalesh Chalamarla Signed-off-by: Marc Zyngier --- arch/arm64/include/asm/kvm_arm.h | 2 -- arch/arm64/kvm/hyp-init.S | 12 +++++++----- 2 files changed, 7 insertions(+), 7 deletions(-) (limited to 'arch/arm64/include') diff --git a/arch/arm64/include/asm/kvm_arm.h b/arch/arm64/include/asm/kvm_arm.h index bef6e9243c63..d201d4b396d1 100644 --- a/arch/arm64/include/asm/kvm_arm.h +++ b/arch/arm64/include/asm/kvm_arm.h @@ -107,8 +107,6 @@ #define TCR_EL2_MASK (TCR_EL2_TG0 | TCR_EL2_SH0 | \ TCR_EL2_ORGN0 | TCR_EL2_IRGN0 | TCR_EL2_T0SZ) -#define TCR_EL2_FLAGS (TCR_EL2_RES1 | TCR_EL2_PS_40B) - /* VTCR_EL2 Registers bits */ #define VTCR_EL2_RES1 (1 << 31) #define VTCR_EL2_PS_MASK (7 << 16) diff --git a/arch/arm64/kvm/hyp-init.S b/arch/arm64/kvm/hyp-init.S index 3e568dcd907b..d073b5a216f7 100644 --- a/arch/arm64/kvm/hyp-init.S +++ b/arch/arm64/kvm/hyp-init.S @@ -64,7 +64,7 @@ __do_hyp_init: mrs x4, tcr_el1 ldr x5, =TCR_EL2_MASK and x4, x4, x5 - ldr x5, =TCR_EL2_FLAGS + mov x5, #TCR_EL2_RES1 orr x4, x4, x5 #ifndef CONFIG_ARM64_VA_BITS_48 @@ -85,15 +85,17 @@ __do_hyp_init: ldr_l x5, idmap_t0sz bfi x4, x5, TCR_T0SZ_OFFSET, TCR_TxSZ_WIDTH #endif - msr tcr_el2, x4 - - ldr x4, =VTCR_EL2_FLAGS /* * Read the PARange bits from ID_AA64MMFR0_EL1 and set the PS bits in - * VTCR_EL2. + * TCR_EL2 and VTCR_EL2. */ mrs x5, ID_AA64MMFR0_EL1 bfi x4, x5, #16, #3 + + msr tcr_el2, x4 + + ldr x4, =VTCR_EL2_FLAGS + bfi x4, x5, #16, #3 /* * Read the VMIDBits bits from ID_AA64MMFR1_EL1 and set the VS bit in * VTCR_EL2. -- cgit v1.2.3 From dfd55ad85e4a7fbaa82df12467515ac3c81e8a3e Mon Sep 17 00:00:00 2001 From: Ard Biesheuvel Date: Fri, 26 Feb 2016 17:57:13 +0100 Subject: arm64: vmemmap: use virtual projection of linear region Commit dd006da21646 ("arm64: mm: increase VA range of identity map") made some changes to the memory mapping code to allow physical memory to reside at an offset that exceeds the size of the virtual mapping. However, since the size of the vmemmap area is proportional to the size of the VA area, but it is populated relative to the physical space, we may end up with the struct page array being mapped outside of the vmemmap region. For instance, on my Seattle A0 box, I can see the following output in the dmesg log. vmemmap : 0xffffffbdc0000000 - 0xffffffbfc0000000 ( 8 GB maximum) 0xffffffbfc0000000 - 0xffffffbfd0000000 ( 256 MB actual) We can fix this by deciding that the vmemmap region is not a projection of the physical space, but of the virtual space above PAGE_OFFSET, i.e., the linear region. This way, we are guaranteed that the vmemmap region is of sufficient size, and we can even reduce the size by half. Cc: Signed-off-by: Ard Biesheuvel Signed-off-by: Will Deacon --- arch/arm64/include/asm/pgtable.h | 7 ++++--- arch/arm64/mm/init.c | 4 ++-- 2 files changed, 6 insertions(+), 5 deletions(-) (limited to 'arch/arm64/include') diff --git a/arch/arm64/include/asm/pgtable.h b/arch/arm64/include/asm/pgtable.h index bf464de33f52..f50608674580 100644 --- a/arch/arm64/include/asm/pgtable.h +++ b/arch/arm64/include/asm/pgtable.h @@ -34,13 +34,13 @@ /* * VMALLOC and SPARSEMEM_VMEMMAP ranges. * - * VMEMAP_SIZE: allows the whole VA space to be covered by a struct page array + * VMEMAP_SIZE: allows the whole linear region to be covered by a struct page array * (rounded up to PUD_SIZE). * VMALLOC_START: beginning of the kernel VA space * VMALLOC_END: extends to the available space below vmmemmap, PCI I/O space, * fixed mappings and modules */ -#define VMEMMAP_SIZE ALIGN((1UL << (VA_BITS - PAGE_SHIFT)) * sizeof(struct page), PUD_SIZE) +#define VMEMMAP_SIZE ALIGN((1UL << (VA_BITS - PAGE_SHIFT - 1)) * sizeof(struct page), PUD_SIZE) #ifndef CONFIG_KASAN #define VMALLOC_START (VA_START) @@ -51,7 +51,8 @@ #define VMALLOC_END (PAGE_OFFSET - PUD_SIZE - VMEMMAP_SIZE - SZ_64K) -#define vmemmap ((struct page *)(VMALLOC_END + SZ_64K)) +#define VMEMMAP_START (VMALLOC_END + SZ_64K) +#define vmemmap ((struct page *)VMEMMAP_START - (memstart_addr >> PAGE_SHIFT)) #define FIRST_USER_ADDRESS 0UL diff --git a/arch/arm64/mm/init.c b/arch/arm64/mm/init.c index f3b061e67bfe..7802f216a67a 100644 --- a/arch/arm64/mm/init.c +++ b/arch/arm64/mm/init.c @@ -319,8 +319,8 @@ void __init mem_init(void) #endif MLG(VMALLOC_START, VMALLOC_END), #ifdef CONFIG_SPARSEMEM_VMEMMAP - MLG((unsigned long)vmemmap, - (unsigned long)vmemmap + VMEMMAP_SIZE), + MLG(VMEMMAP_START, + VMEMMAP_START + VMEMMAP_SIZE), MLM((unsigned long)virt_to_page(PAGE_OFFSET), (unsigned long)virt_to_page(high_memory)), #endif -- cgit v1.2.3