From 05513992c69d159e698bb4b5f651abd0ceff525c Mon Sep 17 00:00:00 2001 From: Andrzej Hajda Date: Mon, 21 Sep 2015 15:34:08 +0200 Subject: MIPS: Remove invalid check Unsigned values cannot be lesser than zero. The problem has been detected using proposed semantic patch scripts/coccinelle/tests/unsigned_lesser_than_zero.cocci [1]. [ralf@linux-mips.org: Chris Dearman's original commit 9318c51acd9689505850152cc98277a6d6f2d752 ([MIPS] MIPS32/MIPS64 secondary cache management) introduced these less than zero checks in 2.6.18.] [1]: http://permalink.gmane.org/gmane.linux.kernel/2038576 Signed-off-by: Andrzej Hajda Cc: linux-kernel@vger.kernel.org Cc: Bartlomiej Zolnierkiewicz Cc: Marek Szyprowski Cc: Markos Chandras Cc: Chris Dearman Cc: linux-mips@linux-mips.org Patchwork: https://patchwork.linux-mips.org/patch/11165/ Signed-off-by: Ralf Baechle --- arch/mips/mm/sc-mips.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'arch/mips/mm') diff --git a/arch/mips/mm/sc-mips.c b/arch/mips/mm/sc-mips.c index 53ea8391f9bb..1755187a6545 100644 --- a/arch/mips/mm/sc-mips.c +++ b/arch/mips/mm/sc-mips.c @@ -162,13 +162,13 @@ static inline int __init mips_sc_probe(void) return 0; tmp = (config2 >> 8) & 0x0f; - if (0 <= tmp && tmp <= 7) + if (tmp <= 7) c->scache.sets = 64 << tmp; else return 0; tmp = (config2 >> 0) & 0x0f; - if (0 <= tmp && tmp <= 7) + if (tmp <= 7) c->scache.ways = tmp + 1; else return 0; -- cgit v1.2.3 From 4d035516921713b41bb279682e53b4fbd5a87232 Mon Sep 17 00:00:00 2001 From: Paul Burton Date: Tue, 22 Sep 2015 10:10:54 -0700 Subject: MIPS: Enable L2 prefetching for CM >= 2.5 On systems with CM 2.5 & beyond there may be L2 prefetch units present which are not enabled by default. Detect them, configuring & enabling prefetching when available. Signed-off-by: Paul Burton Cc: linux-mips@linux-mips.org Cc: Leonid Yegoshin Cc: linux-kernel@vger.kernel.org Cc: James Hogan Cc: Markos Chandras Patchwork: https://patchwork.linux-mips.org/patch/11180/ Signed-off-by: Ralf Baechle --- arch/mips/include/asm/mips-cm.h | 17 ++++++++++++ arch/mips/mm/sc-mips.c | 61 ++++++++++++++++++++++++++++++++++++++++- 2 files changed, 77 insertions(+), 1 deletion(-) (limited to 'arch/mips/mm') diff --git a/arch/mips/include/asm/mips-cm.h b/arch/mips/include/asm/mips-cm.h index 1f1927ab4269..d66d0e00365b 100644 --- a/arch/mips/include/asm/mips-cm.h +++ b/arch/mips/include/asm/mips-cm.h @@ -195,6 +195,8 @@ BUILD_CM_R_(gic_status, MIPS_CM_GCB_OFS + 0xd0) BUILD_CM_R_(cpc_status, MIPS_CM_GCB_OFS + 0xf0) BUILD_CM_RW(l2_config, MIPS_CM_GCB_OFS + 0x130) BUILD_CM_RW(sys_config2, MIPS_CM_GCB_OFS + 0x150) +BUILD_CM_RW(l2_pft_control, MIPS_CM_GCB_OFS + 0x300) +BUILD_CM_RW(l2_pft_control_b, MIPS_CM_GCB_OFS + 0x308) /* Core Local & Core Other register accessor functions */ BUILD_CM_Cx_RW(reset_release, 0x00) @@ -245,6 +247,7 @@ BUILD_CM_Cx_R_(tcid_8_priority, 0x80) ((minor) << CM_GCR_REV_MINOR_SHF)) #define CM_REV_CM2 CM_ENCODE_REV(6, 0) +#define CM_REV_CM2_5 CM_ENCODE_REV(7, 0) #define CM_REV_CM3 CM_ENCODE_REV(8, 0) /* GCR_ERROR_CAUSE register fields */ @@ -321,6 +324,20 @@ BUILD_CM_Cx_R_(tcid_8_priority, 0x80) #define CM_GCR_SYS_CONFIG2_MAXVPW_SHF 0 #define CM_GCR_SYS_CONFIG2_MAXVPW_MSK (_ULCAST_(0xf) << 0) +/* GCR_L2_PFT_CONTROL register fields */ +#define CM_GCR_L2_PFT_CONTROL_PAGEMASK_SHF 12 +#define CM_GCR_L2_PFT_CONTROL_PAGEMASK_MSK (_ULCAST_(0xfffff) << 12) +#define CM_GCR_L2_PFT_CONTROL_PFTEN_SHF 8 +#define CM_GCR_L2_PFT_CONTROL_PFTEN_MSK (_ULCAST_(0x1) << 8) +#define CM_GCR_L2_PFT_CONTROL_NPFT_SHF 0 +#define CM_GCR_L2_PFT_CONTROL_NPFT_MSK (_ULCAST_(0xff) << 0) + +/* GCR_L2_PFT_CONTROL_B register fields */ +#define CM_GCR_L2_PFT_CONTROL_B_CEN_SHF 8 +#define CM_GCR_L2_PFT_CONTROL_B_CEN_MSK (_ULCAST_(0x1) << 8) +#define CM_GCR_L2_PFT_CONTROL_B_PORTID_SHF 0 +#define CM_GCR_L2_PFT_CONTROL_B_PORTID_MSK (_ULCAST_(0xff) << 0) + /* GCR_Cx_COHERENCE register fields */ #define CM_GCR_Cx_COHERENCE_COHDOMAINEN_SHF 0 #define CM_GCR_Cx_COHERENCE_COHDOMAINEN_MSK (_ULCAST_(0xff) << 0) diff --git a/arch/mips/mm/sc-mips.c b/arch/mips/mm/sc-mips.c index 1755187a6545..3bd0597d9c3d 100644 --- a/arch/mips/mm/sc-mips.c +++ b/arch/mips/mm/sc-mips.c @@ -51,11 +51,69 @@ static void mips_sc_disable(void) /* L2 cache is permanently enabled */ } +static void mips_sc_prefetch_enable(void) +{ + unsigned long pftctl; + + if (mips_cm_revision() < CM_REV_CM2_5) + return; + + /* + * If there is one or more L2 prefetch unit present then enable + * prefetching for both code & data, for all ports. + */ + pftctl = read_gcr_l2_pft_control(); + if (pftctl & CM_GCR_L2_PFT_CONTROL_NPFT_MSK) { + pftctl &= ~CM_GCR_L2_PFT_CONTROL_PAGEMASK_MSK; + pftctl |= PAGE_MASK & CM_GCR_L2_PFT_CONTROL_PAGEMASK_MSK; + pftctl |= CM_GCR_L2_PFT_CONTROL_PFTEN_MSK; + write_gcr_l2_pft_control(pftctl); + + pftctl = read_gcr_l2_pft_control_b(); + pftctl |= CM_GCR_L2_PFT_CONTROL_B_PORTID_MSK; + pftctl |= CM_GCR_L2_PFT_CONTROL_B_CEN_MSK; + write_gcr_l2_pft_control_b(pftctl); + } +} + +static void mips_sc_prefetch_disable(void) +{ + unsigned long pftctl; + + if (mips_cm_revision() < CM_REV_CM2_5) + return; + + pftctl = read_gcr_l2_pft_control(); + pftctl &= ~CM_GCR_L2_PFT_CONTROL_PFTEN_MSK; + write_gcr_l2_pft_control(pftctl); + + pftctl = read_gcr_l2_pft_control_b(); + pftctl &= ~CM_GCR_L2_PFT_CONTROL_B_PORTID_MSK; + pftctl &= ~CM_GCR_L2_PFT_CONTROL_B_CEN_MSK; + write_gcr_l2_pft_control_b(pftctl); +} + +static bool mips_sc_prefetch_is_enabled(void) +{ + unsigned long pftctl; + + if (mips_cm_revision() < CM_REV_CM2_5) + return false; + + pftctl = read_gcr_l2_pft_control(); + if (!(pftctl & CM_GCR_L2_PFT_CONTROL_NPFT_MSK)) + return false; + return !!(pftctl & CM_GCR_L2_PFT_CONTROL_PFTEN_MSK); +} + static struct bcache_ops mips_sc_ops = { .bc_enable = mips_sc_enable, .bc_disable = mips_sc_disable, .bc_wback_inv = mips_sc_wback_inv, - .bc_inv = mips_sc_inv + .bc_inv = mips_sc_inv, + .bc_prefetch_enable = mips_sc_prefetch_enable, + .bc_prefetch_disable = mips_sc_prefetch_disable, + .bc_prefetch_is_enabled = mips_sc_prefetch_is_enabled, }; /* @@ -186,6 +244,7 @@ int mips_sc_init(void) int found = mips_sc_probe(); if (found) { mips_sc_enable(); + mips_sc_prefetch_enable(); bcops = &mips_sc_ops; } return found; -- cgit v1.2.3 From d478b088a2f74fc8f34af7ceed86fa7640ca8610 Mon Sep 17 00:00:00 2001 From: Paul Burton Date: Tue, 22 Sep 2015 10:10:56 -0700 Subject: MIPS: Allow L2 prefetch to be configured via debugfs When debugging or examining the performance of a system it can be useful to examine the effect of L2 prefetching. Provide an optional debugfs entry to allow a user to enable or disable L2 prefetching. Signed-off-by: Paul Burton Cc: Maciej W. Rozycki Cc: linux-mips@linux-mips.org Cc: linux-kernel@vger.kernel.org Patchwork: https://patchwork.linux-mips.org/patch/11182/ Signed-off-by: Ralf Baechle --- arch/mips/Kconfig.debug | 10 ++++++ arch/mips/mm/Makefile | 1 + arch/mips/mm/sc-debugfs.c | 81 +++++++++++++++++++++++++++++++++++++++++++++++ 3 files changed, 92 insertions(+) create mode 100644 arch/mips/mm/sc-debugfs.c (limited to 'arch/mips/mm') diff --git a/arch/mips/Kconfig.debug b/arch/mips/Kconfig.debug index 13d796547ebd..b43e288ec567 100644 --- a/arch/mips/Kconfig.debug +++ b/arch/mips/Kconfig.debug @@ -149,4 +149,14 @@ endchoice endif # CPU_MIPSR6 +config SCACHE_DEBUGFS + bool "L2 cache debugfs entries" + depends on DEBUG_FS + help + Enable this to allow parts of the L2 cache configuration, such as + whether or not prefetching is enabled, to be exposed to userland + via debugfs. + + If unsure, say N. + endmenu diff --git a/arch/mips/mm/Makefile b/arch/mips/mm/Makefile index 67ede4ef9b8d..b4c64bd3f723 100644 --- a/arch/mips/mm/Makefile +++ b/arch/mips/mm/Makefile @@ -28,3 +28,4 @@ obj-$(CONFIG_IP22_CPU_SCACHE) += sc-ip22.o obj-$(CONFIG_R5000_CPU_SCACHE) += sc-r5k.o obj-$(CONFIG_RM7000_CPU_SCACHE) += sc-rm7k.o obj-$(CONFIG_MIPS_CPU_SCACHE) += sc-mips.o +obj-$(CONFIG_SCACHE_DEBUGFS) += sc-debugfs.o diff --git a/arch/mips/mm/sc-debugfs.c b/arch/mips/mm/sc-debugfs.c new file mode 100644 index 000000000000..5eefe3281b24 --- /dev/null +++ b/arch/mips/mm/sc-debugfs.c @@ -0,0 +1,81 @@ +/* + * Copyright (C) 2015 Imagination Technologies + * Author: Paul Burton + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or (at your + * option) any later version. + */ + +#include +#include +#include +#include +#include + +static ssize_t sc_prefetch_read(struct file *file, char __user *user_buf, + size_t count, loff_t *ppos) +{ + bool enabled = bc_prefetch_is_enabled(); + char buf[3]; + + buf[0] = enabled ? 'Y' : 'N'; + buf[1] = '\n'; + buf[2] = 0; + + return simple_read_from_buffer(user_buf, count, ppos, buf, 2); +} + +static ssize_t sc_prefetch_write(struct file *file, + const char __user *user_buf, + size_t count, loff_t *ppos) +{ + char buf[32]; + ssize_t buf_size; + bool enabled; + int err; + + buf_size = min(count, sizeof(buf) - 1); + if (copy_from_user(buf, user_buf, buf_size)) + return -EFAULT; + + buf[buf_size] = '\0'; + err = strtobool(buf, &enabled); + if (err) + return err; + + if (enabled) + bc_prefetch_enable(); + else + bc_prefetch_disable(); + + return count; +} + +static const struct file_operations sc_prefetch_fops = { + .open = simple_open, + .llseek = default_llseek, + .read = sc_prefetch_read, + .write = sc_prefetch_write, +}; + +static int __init sc_debugfs_init(void) +{ + struct dentry *dir, *file; + + if (!mips_debugfs_dir) + return -ENODEV; + + dir = debugfs_create_dir("l2cache", mips_debugfs_dir); + if (IS_ERR(dir)) + return PTR_ERR(dir); + + file = debugfs_create_file("prefetch", S_IRUGO | S_IWUSR, dir, + NULL, &sc_prefetch_fops); + if (IS_ERR(file)) + return PTR_ERR(file); + + return 0; +} +late_initcall(sc_debugfs_init); -- cgit v1.2.3 From dbfd657ad12d872d04d5b105876669058285a166 Mon Sep 17 00:00:00 2001 From: Paul Burton Date: Tue, 22 Sep 2015 11:42:48 -0700 Subject: MIPS: tlbex: Stop open-coding build_convert_pte_to_entrylo Make use of build_convert_pte_to_entrylo in the RIXI cases within build_update_entries rather than open-coding it 4 times. Signed-off-by: Paul Burton Cc: linux-mips@linux-mips.org Cc: Steven J. Hill Cc: Leonid Yegoshin Cc: linux-kernel@vger.kernel.org Cc: James Hogan Cc: Markos Chandras Patchwork: https://patchwork.linux-mips.org/patch/11214/ Signed-off-by: Ralf Baechle --- arch/mips/mm/tlbex.c | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) (limited to 'arch/mips/mm') diff --git a/arch/mips/mm/tlbex.c b/arch/mips/mm/tlbex.c index 323d1d302f2b..0dd24b00e551 100644 --- a/arch/mips/mm/tlbex.c +++ b/arch/mips/mm/tlbex.c @@ -1010,13 +1010,13 @@ static void build_update_entries(u32 **p, unsigned int tmp, unsigned int ptep) uasm_i_ld(p, tmp, 0, ptep); /* get even pte */ uasm_i_ld(p, ptep, sizeof(pte_t), ptep); /* get odd pte */ if (cpu_has_rixi) { - UASM_i_ROTR(p, tmp, tmp, ilog2(_PAGE_GLOBAL)); + build_convert_pte_to_entrylo(p, tmp); UASM_i_MTC0(p, tmp, C0_ENTRYLO0); /* load it */ - UASM_i_ROTR(p, ptep, ptep, ilog2(_PAGE_GLOBAL)); + build_convert_pte_to_entrylo(p, ptep); } else { - uasm_i_dsrl_safe(p, tmp, tmp, ilog2(_PAGE_GLOBAL)); /* convert to entrylo0 */ + build_convert_pte_to_entrylo(p, tmp); UASM_i_MTC0(p, tmp, C0_ENTRYLO0); /* load it */ - uasm_i_dsrl_safe(p, ptep, ptep, ilog2(_PAGE_GLOBAL)); /* convert to entrylo1 */ + build_convert_pte_to_entrylo(p, ptep); } UASM_i_MTC0(p, ptep, C0_ENTRYLO1); /* load it */ } else { @@ -1050,17 +1050,17 @@ static void build_update_entries(u32 **p, unsigned int tmp, unsigned int ptep) if (r45k_bvahwbug()) build_tlb_probe_entry(p); if (cpu_has_rixi) { - UASM_i_ROTR(p, tmp, tmp, ilog2(_PAGE_GLOBAL)); + build_convert_pte_to_entrylo(p, tmp); if (r4k_250MHZhwbug()) UASM_i_MTC0(p, 0, C0_ENTRYLO0); UASM_i_MTC0(p, tmp, C0_ENTRYLO0); /* load it */ - UASM_i_ROTR(p, ptep, ptep, ilog2(_PAGE_GLOBAL)); + build_convert_pte_to_entrylo(p, ptep); } else { - UASM_i_SRL(p, tmp, tmp, ilog2(_PAGE_GLOBAL)); /* convert to entrylo0 */ + build_convert_pte_to_entrylo(p, tmp); if (r4k_250MHZhwbug()) UASM_i_MTC0(p, 0, C0_ENTRYLO0); UASM_i_MTC0(p, tmp, C0_ENTRYLO0); /* load it */ - UASM_i_SRL(p, ptep, ptep, ilog2(_PAGE_GLOBAL)); /* convert to entrylo1 */ + build_convert_pte_to_entrylo(p, ptep); if (r45k_bvahwbug()) uasm_i_mfc0(p, tmp, C0_INDEX); } -- cgit v1.2.3 From 974a0b6a2c40260b0ac64a5523106f5991cc5127 Mon Sep 17 00:00:00 2001 From: Paul Burton Date: Tue, 22 Sep 2015 11:42:49 -0700 Subject: MIPS: tlbex: Remove some RIXI redundancy The cpu_has_rixi cases in build_update_entries are now identical to the non-RIXI cases with the one exception of the r45k_bvahwbug case which is hardcoded as never happening anyway & presumably was either missed from the RIXI path or would never happen on a CPU with RIXI support. Remove the redundant checks & duplication. Signed-off-by: Paul Burton Cc: linux-mips@linux-mips.org Cc: Steven J. Hill Cc: Leonid Yegoshin Cc: Paul Gortmaker Cc: linux-kernel@vger.kernel.org Cc: James Hogan Cc: Markos Chandras Patchwork: https://patchwork.linux-mips.org/patch/11215/ Signed-off-by: Ralf Baechle --- arch/mips/mm/tlbex.c | 34 ++++++++++------------------------ 1 file changed, 10 insertions(+), 24 deletions(-) (limited to 'arch/mips/mm') diff --git a/arch/mips/mm/tlbex.c b/arch/mips/mm/tlbex.c index 0dd24b00e551..ce5a0ecfeb21 100644 --- a/arch/mips/mm/tlbex.c +++ b/arch/mips/mm/tlbex.c @@ -1009,15 +1009,9 @@ static void build_update_entries(u32 **p, unsigned int tmp, unsigned int ptep) if (cpu_has_64bits) { uasm_i_ld(p, tmp, 0, ptep); /* get even pte */ uasm_i_ld(p, ptep, sizeof(pte_t), ptep); /* get odd pte */ - if (cpu_has_rixi) { - build_convert_pte_to_entrylo(p, tmp); - UASM_i_MTC0(p, tmp, C0_ENTRYLO0); /* load it */ - build_convert_pte_to_entrylo(p, ptep); - } else { - build_convert_pte_to_entrylo(p, tmp); - UASM_i_MTC0(p, tmp, C0_ENTRYLO0); /* load it */ - build_convert_pte_to_entrylo(p, ptep); - } + build_convert_pte_to_entrylo(p, tmp); + UASM_i_MTC0(p, tmp, C0_ENTRYLO0); /* load it */ + build_convert_pte_to_entrylo(p, ptep); UASM_i_MTC0(p, ptep, C0_ENTRYLO1); /* load it */ } else { int pte_off_even = sizeof(pte_t) / 2; @@ -1049,21 +1043,13 @@ static void build_update_entries(u32 **p, unsigned int tmp, unsigned int ptep) UASM_i_LW(p, ptep, sizeof(pte_t), ptep); /* get odd pte */ if (r45k_bvahwbug()) build_tlb_probe_entry(p); - if (cpu_has_rixi) { - build_convert_pte_to_entrylo(p, tmp); - if (r4k_250MHZhwbug()) - UASM_i_MTC0(p, 0, C0_ENTRYLO0); - UASM_i_MTC0(p, tmp, C0_ENTRYLO0); /* load it */ - build_convert_pte_to_entrylo(p, ptep); - } else { - build_convert_pte_to_entrylo(p, tmp); - if (r4k_250MHZhwbug()) - UASM_i_MTC0(p, 0, C0_ENTRYLO0); - UASM_i_MTC0(p, tmp, C0_ENTRYLO0); /* load it */ - build_convert_pte_to_entrylo(p, ptep); - if (r45k_bvahwbug()) - uasm_i_mfc0(p, tmp, C0_INDEX); - } + build_convert_pte_to_entrylo(p, tmp); + if (r4k_250MHZhwbug()) + UASM_i_MTC0(p, 0, C0_ENTRYLO0); + UASM_i_MTC0(p, tmp, C0_ENTRYLO0); /* load it */ + build_convert_pte_to_entrylo(p, ptep); + if (r45k_bvahwbug()) + uasm_i_mfc0(p, tmp, C0_INDEX); if (r4k_250MHZhwbug()) UASM_i_MTC0(p, 0, C0_ENTRYLO1); UASM_i_MTC0(p, ptep, C0_ENTRYLO1); /* load it */ -- cgit v1.2.3 From c676589b057a01dab418985f018123a634c1265c Mon Sep 17 00:00:00 2001 From: Paul Burton Date: Tue, 22 Sep 2015 11:42:50 -0700 Subject: MIPS: tlbex: Share MIPS32 32 bit phys & MIPS64 64 bit phys code The code in build_update_entries for 64 bit physical addresses on a MIPS64 CPU and 32 bit physical addresses on a MIPS32 CPU is now identical, with the exception of r4k bug workaround in the latter which would simply not apply to the former. Remove the duplication and some Signed-off-by: Paul Burton Cc: linux-mips@linux-mips.org Cc: Steven J. Hill Cc: Leonid Yegoshin Cc: linux-kernel@vger.kernel.org Cc: James Hogan Cc: Markos Chandras Patchwork: https://patchwork.linux-mips.org/patch/11216/ Signed-off-by: Ralf Baechle --- arch/mips/mm/tlbex.c | 14 +++----------- 1 file changed, 3 insertions(+), 11 deletions(-) (limited to 'arch/mips/mm') diff --git a/arch/mips/mm/tlbex.c b/arch/mips/mm/tlbex.c index ce5a0ecfeb21..bc829fc2421d 100644 --- a/arch/mips/mm/tlbex.c +++ b/arch/mips/mm/tlbex.c @@ -1005,15 +1005,7 @@ static void build_update_entries(u32 **p, unsigned int tmp, unsigned int ptep) * 64bit address support (36bit on a 32bit CPU) in a 32bit * Kernel is a special case. Only a few CPUs use it. */ -#ifdef CONFIG_PHYS_ADDR_T_64BIT - if (cpu_has_64bits) { - uasm_i_ld(p, tmp, 0, ptep); /* get even pte */ - uasm_i_ld(p, ptep, sizeof(pte_t), ptep); /* get odd pte */ - build_convert_pte_to_entrylo(p, tmp); - UASM_i_MTC0(p, tmp, C0_ENTRYLO0); /* load it */ - build_convert_pte_to_entrylo(p, ptep); - UASM_i_MTC0(p, ptep, C0_ENTRYLO1); /* load it */ - } else { + if (config_enabled(CONFIG_PHYS_ADDR_T_64BIT) && !cpu_has_64bits) { int pte_off_even = sizeof(pte_t) / 2; int pte_off_odd = pte_off_even + sizeof(pte_t); #ifdef CONFIG_XPA @@ -1037,8 +1029,9 @@ static void build_update_entries(u32 **p, unsigned int tmp, unsigned int ptep) uasm_i_mthc0(p, tmp, C0_ENTRYLO0); uasm_i_mthc0(p, ptep, C0_ENTRYLO1); #endif + return; } -#else + UASM_i_LW(p, tmp, 0, ptep); /* get even pte */ UASM_i_LW(p, ptep, sizeof(pte_t), ptep); /* get odd pte */ if (r45k_bvahwbug()) @@ -1053,7 +1046,6 @@ static void build_update_entries(u32 **p, unsigned int tmp, unsigned int ptep) if (r4k_250MHZhwbug()) UASM_i_MTC0(p, 0, C0_ENTRYLO1); UASM_i_MTC0(p, ptep, C0_ENTRYLO1); /* load it */ -#endif } struct mips_huge_tlb_info { -- cgit v1.2.3 From 00bf1c691d082c1945fdba032c03a9a82e9e7e61 Mon Sep 17 00:00:00 2001 From: Paul Burton Date: Tue, 22 Sep 2015 11:42:52 -0700 Subject: MIPS: tlbex: Avoid placing software PTE bits in Entry* PFN fields Commit 748e787eb6de ("MIPS: Optimize TLB refill for RI/XI configurations.") stopped explicitly clearing the bits used by software in PTEs by making use of a rotate instruction that rotates them into the fill bits of the Entry{Lo,Hi} register. This can only work if there are actually enough fill bits in the register to cover the software maintained bits, otherwise we end up writing those bits into the upper bits of the PFN or PFNX field of the Entry{Lo,Hi} register. Fix this by detecting the number of fill bits present in the Entry{Lo,Hi} registers & explicitly clearing the software bits where necessary. Signed-off-by: Paul Burton Cc: linux-mips@linux-mips.org Cc: Steven J. Hill Cc: Leonid Yegoshin Cc: Paul Gortmaker Cc: linux-kernel@vger.kernel.org Cc: James Hogan Cc: Markos Chandras Patchwork: https://patchwork.linux-mips.org/patch/11218/ Signed-off-by: Ralf Baechle --- arch/mips/mm/tlbex.c | 47 +++++++++++++++++++++++++++++++++++++++++++++-- 1 file changed, 45 insertions(+), 2 deletions(-) (limited to 'arch/mips/mm') diff --git a/arch/mips/mm/tlbex.c b/arch/mips/mm/tlbex.c index bc829fc2421d..6b7cc20efd92 100644 --- a/arch/mips/mm/tlbex.c +++ b/arch/mips/mm/tlbex.c @@ -311,6 +311,7 @@ static struct uasm_label labels[128]; static struct uasm_reloc relocs[128]; static int check_for_high_segbits; +static bool fill_includes_sw_bits; static unsigned int kscratch_used_mask; @@ -630,8 +631,14 @@ static void build_tlb_write_entry(u32 **p, struct uasm_label **l, static __maybe_unused void build_convert_pte_to_entrylo(u32 **p, unsigned int reg) { - if (cpu_has_rixi) { - UASM_i_ROTR(p, reg, reg, ilog2(_PAGE_GLOBAL)); + if (cpu_has_rixi && _PAGE_NO_EXEC) { + if (fill_includes_sw_bits) { + UASM_i_ROTR(p, reg, reg, ilog2(_PAGE_GLOBAL)); + } else { + UASM_i_SRL(p, reg, reg, ilog2(_PAGE_NO_EXEC)); + UASM_i_ROTR(p, reg, reg, + ilog2(_PAGE_GLOBAL) - ilog2(_PAGE_NO_EXEC)); + } } else { #ifdef CONFIG_PHYS_ADDR_T_64BIT uasm_i_dsrl_safe(p, reg, reg, ilog2(_PAGE_GLOBAL)); @@ -2338,6 +2345,41 @@ static void config_xpa_params(void) #endif } +static void check_pabits(void) +{ + unsigned long entry; + unsigned pabits, fillbits; + + if (!cpu_has_rixi || !_PAGE_NO_EXEC) { + /* + * We'll only be making use of the fact that we can rotate bits + * into the fill if the CPU supports RIXI, so don't bother + * probing this for CPUs which don't. + */ + return; + } + + write_c0_entrylo0(~0ul); + back_to_back_c0_hazard(); + entry = read_c0_entrylo0(); + + /* clear all non-PFN bits */ + entry &= ~((1 << MIPS_ENTRYLO_PFN_SHIFT) - 1); + entry &= ~(MIPS_ENTRYLO_RI | MIPS_ENTRYLO_XI); + + /* find a lower bound on PABITS, and upper bound on fill bits */ + pabits = fls_long(entry) + 6; + fillbits = max_t(int, (int)BITS_PER_LONG - pabits, 0); + + /* minus the RI & XI bits */ + fillbits -= min_t(unsigned, fillbits, 2); + + if (fillbits >= ilog2(_PAGE_NO_EXEC)) + fill_includes_sw_bits = true; + + pr_debug("Entry* registers contain %u fill bits\n", fillbits); +} + void build_tlb_refill_handler(void) { /* @@ -2348,6 +2390,7 @@ void build_tlb_refill_handler(void) static int run_once = 0; output_pgtable_bits_defines(); + check_pabits(); #ifdef CONFIG_64BIT check_for_high_segbits = current_cpu_data.vmbits > (PGDIR_SHIFT + PGD_ORDER + PAGE_SHIFT - 3); -- cgit v1.2.3 From cab25bc7537badea41f635a049f45408c24dff26 Mon Sep 17 00:00:00 2001 From: Paul Burton Date: Tue, 22 Sep 2015 12:03:37 -0700 Subject: MIPS: Extend hardware table walking support to MIPS64 Extend the existing support for Hardware Table Walking (HTW) to MIPS64 systems by supporting PMDs & setting the pointer size bit in PWSize, then ceasing to blacklist HTW on MIPS64 systems. Signed-off-by: Paul Burton Cc: linux-mips@linux-mips.org Cc: Steven J. Hill Cc: Joshua Kinard Cc: Leonid Yegoshin Cc: Maciej W. Rozycki Cc: Paul Gortmaker Cc: linux-kernel@vger.kernel.org Cc: James Hogan Cc: Markos Chandras Patchwork: https://patchwork.linux-mips.org/patch/11224/ Signed-off-by: Ralf Baechle --- arch/mips/kernel/cpu-probe.c | 3 +-- arch/mips/mm/tlbex.c | 8 +++++++- 2 files changed, 8 insertions(+), 3 deletions(-) (limited to 'arch/mips/mm') diff --git a/arch/mips/kernel/cpu-probe.c b/arch/mips/kernel/cpu-probe.c index 09a51d091941..6b9064499bd3 100644 --- a/arch/mips/kernel/cpu-probe.c +++ b/arch/mips/kernel/cpu-probe.c @@ -536,8 +536,7 @@ static inline unsigned int decode_config3(struct cpuinfo_mips *c) c->options |= MIPS_CPU_SEGMENTS; if (config3 & MIPS_CONF3_MSA) c->ases |= MIPS_ASE_MSA; - /* Only tested on 32-bit cores */ - if ((config3 & MIPS_CONF3_PW) && config_enabled(CONFIG_32BIT)) { + if (config3 & MIPS_CONF3_PW) { c->htw_seq = 0; c->options |= MIPS_CPU_HTW; } diff --git a/arch/mips/mm/tlbex.c b/arch/mips/mm/tlbex.c index 6b7cc20efd92..32e0be27673f 100644 --- a/arch/mips/mm/tlbex.c +++ b/arch/mips/mm/tlbex.c @@ -2284,6 +2284,10 @@ static void config_htw_params(void) /* re-initialize the PTI field including the even/odd bit */ pwfield &= ~MIPS_PWFIELD_PTI_MASK; pwfield |= PAGE_SHIFT << MIPS_PWFIELD_PTI_SHIFT; + if (CONFIG_PGTABLE_LEVELS >= 3) { + pwfield &= ~MIPS_PWFIELD_MDI_MASK; + pwfield |= PMD_SHIFT << MIPS_PWFIELD_MDI_SHIFT; + } /* Set the PTEI right shift */ ptei = _PAGE_GLOBAL_SHIFT << MIPS_PWFIELD_PTEI_SHIFT; pwfield |= ptei; @@ -2305,9 +2309,11 @@ static void config_htw_params(void) pwsize = ilog2(PTRS_PER_PGD) << MIPS_PWSIZE_GDW_SHIFT; pwsize |= ilog2(PTRS_PER_PTE) << MIPS_PWSIZE_PTW_SHIFT; + if (CONFIG_PGTABLE_LEVELS >= 3) + pwsize |= ilog2(PTRS_PER_PMD) << MIPS_PWSIZE_MDW_SHIFT; /* If XPA has been enabled, PTEs are 64-bit in size. */ - if (read_c0_pagegrain() & PG_ELPA) + if (config_enabled(CONFIG_64BITS) || (read_c0_pagegrain() & PG_ELPA)) pwsize |= 1; write_c0_pwsize(pwsize); -- cgit v1.2.3