diff options
Diffstat (limited to 'arch/sparc')
68 files changed, 508 insertions, 1038 deletions
diff --git a/arch/sparc/Kconfig b/arch/sparc/Kconfig index dcfdb7f1dae9..0f88123925a4 100644 --- a/arch/sparc/Kconfig +++ b/arch/sparc/Kconfig @@ -110,6 +110,7 @@ config SPARC64 select HAVE_SETUP_PER_CPU_AREA select NEED_PER_CPU_EMBED_FIRST_CHUNK select NEED_PER_CPU_PAGE_FIRST_CHUNK + select ARCH_HAS_CRC32 config ARCH_PROC_KCORE_TEXT def_bool y diff --git a/arch/sparc/Makefile b/arch/sparc/Makefile index 757451c3ea1d..0400078076e5 100644 --- a/arch/sparc/Makefile +++ b/arch/sparc/Makefile @@ -29,7 +29,7 @@ UTS_MACHINE := sparc # versions of gcc. Some gcc versions won't pass -Av8 to binutils when you # give -mcpu=v8. This silently worked with older bintutils versions but # does not any more. -KBUILD_CFLAGS += -m32 -mcpu=v8 -pipe -mno-fpu -fcall-used-g5 -fcall-used-g7 +KBUILD_CFLAGS += -m32 -mcpu=v8 -pipe -mno-fpu $(call cc-option,-fcall-used-g5) $(call cc-option,-fcall-used-g7) KBUILD_CFLAGS += -Wa,-Av8 KBUILD_AFLAGS += -m32 -Wa,-Av8 @@ -45,7 +45,7 @@ export BITS := 64 UTS_MACHINE := sparc64 KBUILD_CFLAGS += -m64 -pipe -mno-fpu -mcpu=ultrasparc -mcmodel=medlow -KBUILD_CFLAGS += -ffixed-g4 -ffixed-g5 -fcall-used-g7 -Wno-sign-compare +KBUILD_CFLAGS += -ffixed-g4 -ffixed-g5 $(call cc-option,-fcall-used-g7) -Wno-sign-compare KBUILD_CFLAGS += -Wa,--undeclared-regs KBUILD_CFLAGS += $(call cc-option,-mtune=ultrasparc3) KBUILD_AFLAGS += -m64 -mcpu=ultrasparc -Wa,--undeclared-regs diff --git a/arch/sparc/configs/sparc32_defconfig b/arch/sparc/configs/sparc32_defconfig index 5010164de3e4..f6341b063b01 100644 --- a/arch/sparc/configs/sparc32_defconfig +++ b/arch/sparc/configs/sparc32_defconfig @@ -94,4 +94,3 @@ CONFIG_CRYPTO_SERPENT=m CONFIG_CRYPTO_TWOFISH=m # CONFIG_CRYPTO_ANSI_CPRNG is not set # CONFIG_CRYPTO_HW is not set -CONFIG_LIBCRC32C=m diff --git a/arch/sparc/configs/sparc64_defconfig b/arch/sparc/configs/sparc64_defconfig index 0bb0d4da5227..7a7c4dec2925 100644 --- a/arch/sparc/configs/sparc64_defconfig +++ b/arch/sparc/configs/sparc64_defconfig @@ -205,7 +205,7 @@ CONFIG_BLK_DEV_IO_TRACE=y CONFIG_UPROBE_EVENTS=y CONFIG_KEYS=y CONFIG_CRYPTO_NULL=m -CONFIG_CRYPTO_TEST=m +CONFIG_CRYPTO_BENCHMARK=m CONFIG_CRYPTO_LRW=m CONFIG_CRYPTO_PCBC=m CONFIG_CRYPTO_XTS=m @@ -229,8 +229,6 @@ CONFIG_CRYPTO_SERPENT=m CONFIG_CRYPTO_TEA=m CONFIG_CRYPTO_TWOFISH=m # CONFIG_CRYPTO_ANSI_CPRNG is not set -CONFIG_CRC16=m -CONFIG_LIBCRC32C=m CONFIG_VCC=m CONFIG_PATA_CMD64X=y CONFIG_IP_PNP=y diff --git a/arch/sparc/crypto/Kconfig b/arch/sparc/crypto/Kconfig index cfe5102b1c68..a6ba319c42dc 100644 --- a/arch/sparc/crypto/Kconfig +++ b/arch/sparc/crypto/Kconfig @@ -16,16 +16,6 @@ config CRYPTO_DES_SPARC64 Architecture: sparc64 -config CRYPTO_CRC32C_SPARC64 - tristate "CRC32c" - depends on SPARC64 - select CRYPTO_HASH - select CRC32 - help - CRC32c CRC algorithm with the iSCSI polynomial (RFC 3385 and RFC 3720) - - Architecture: sparc64 - config CRYPTO_MD5_SPARC64 tristate "Digests: MD5" depends on SPARC64 @@ -46,16 +36,6 @@ config CRYPTO_SHA1_SPARC64 Architecture: sparc64 -config CRYPTO_SHA256_SPARC64 - tristate "Hash functions: SHA-224 and SHA-256" - depends on SPARC64 - select CRYPTO_SHA256 - select CRYPTO_HASH - help - SHA-224 and SHA-256 secure hash algorithms (FIPS 180) - - Architecture: sparc64 using crypto instructions, when available - config CRYPTO_SHA512_SPARC64 tristate "Hash functions: SHA-384 and SHA-512" depends on SPARC64 diff --git a/arch/sparc/crypto/Makefile b/arch/sparc/crypto/Makefile index d257186c27d1..701c39edb0d7 100644 --- a/arch/sparc/crypto/Makefile +++ b/arch/sparc/crypto/Makefile @@ -4,7 +4,6 @@ # obj-$(CONFIG_CRYPTO_SHA1_SPARC64) += sha1-sparc64.o -obj-$(CONFIG_CRYPTO_SHA256_SPARC64) += sha256-sparc64.o obj-$(CONFIG_CRYPTO_SHA512_SPARC64) += sha512-sparc64.o obj-$(CONFIG_CRYPTO_MD5_SPARC64) += md5-sparc64.o @@ -12,15 +11,10 @@ obj-$(CONFIG_CRYPTO_AES_SPARC64) += aes-sparc64.o obj-$(CONFIG_CRYPTO_DES_SPARC64) += des-sparc64.o obj-$(CONFIG_CRYPTO_CAMELLIA_SPARC64) += camellia-sparc64.o -obj-$(CONFIG_CRYPTO_CRC32C_SPARC64) += crc32c-sparc64.o - sha1-sparc64-y := sha1_asm.o sha1_glue.o -sha256-sparc64-y := sha256_asm.o sha256_glue.o sha512-sparc64-y := sha512_asm.o sha512_glue.o md5-sparc64-y := md5_asm.o md5_glue.o aes-sparc64-y := aes_asm.o aes_glue.o des-sparc64-y := des_asm.o des_glue.o camellia-sparc64-y := camellia_asm.o camellia_glue.o - -crc32c-sparc64-y := crc32c_asm.o crc32c_glue.o diff --git a/arch/sparc/crypto/aes_asm.S b/arch/sparc/crypto/aes_asm.S index 155cefb98520..f291174a72a1 100644 --- a/arch/sparc/crypto/aes_asm.S +++ b/arch/sparc/crypto/aes_asm.S @@ -1,9 +1,8 @@ /* SPDX-License-Identifier: GPL-2.0 */ #include <linux/linkage.h> +#include <asm/opcodes.h> #include <asm/visasm.h> -#include "opcodes.h" - #define ENCRYPT_TWO_ROUNDS(KEY_BASE, I0, I1, T0, T1) \ AES_EROUND01(KEY_BASE + 0, I0, I1, T0) \ AES_EROUND23(KEY_BASE + 2, I0, I1, T1) \ diff --git a/arch/sparc/crypto/aes_glue.c b/arch/sparc/crypto/aes_glue.c index e3d2138ff9e2..359f22643b05 100644 --- a/arch/sparc/crypto/aes_glue.c +++ b/arch/sparc/crypto/aes_glue.c @@ -27,11 +27,10 @@ #include <crypto/internal/skcipher.h> #include <asm/fpumacro.h> +#include <asm/opcodes.h> #include <asm/pstate.h> #include <asm/elf.h> -#include "opcodes.h" - struct aes_ops { void (*encrypt)(const u64 *key, const u32 *input, u32 *output); void (*decrypt)(const u64 *key, const u32 *input, u32 *output); @@ -321,7 +320,7 @@ static void ctr_crypt_final(const struct crypto_sparc64_aes_ctx *ctx, { u8 *ctrblk = walk->iv; u64 keystream[AES_BLOCK_SIZE / sizeof(u64)]; - u8 *src = walk->src.virt.addr; + const u8 *src = walk->src.virt.addr; u8 *dst = walk->dst.virt.addr; unsigned int nbytes = walk->nbytes; diff --git a/arch/sparc/crypto/camellia_asm.S b/arch/sparc/crypto/camellia_asm.S index dcdc9193fcd7..8471b346ef54 100644 --- a/arch/sparc/crypto/camellia_asm.S +++ b/arch/sparc/crypto/camellia_asm.S @@ -1,9 +1,8 @@ /* SPDX-License-Identifier: GPL-2.0 */ #include <linux/linkage.h> +#include <asm/opcodes.h> #include <asm/visasm.h> -#include "opcodes.h" - #define CAMELLIA_6ROUNDS(KEY_BASE, I0, I1) \ CAMELLIA_F(KEY_BASE + 0, I1, I0, I1) \ CAMELLIA_F(KEY_BASE + 2, I0, I1, I0) \ diff --git a/arch/sparc/crypto/camellia_glue.c b/arch/sparc/crypto/camellia_glue.c index aaa9714378e6..e7a1e1c42b99 100644 --- a/arch/sparc/crypto/camellia_glue.c +++ b/arch/sparc/crypto/camellia_glue.c @@ -15,11 +15,10 @@ #include <crypto/internal/skcipher.h> #include <asm/fpumacro.h> +#include <asm/opcodes.h> #include <asm/pstate.h> #include <asm/elf.h> -#include "opcodes.h" - #define CAMELLIA_MIN_KEY_SIZE 16 #define CAMELLIA_MAX_KEY_SIZE 32 #define CAMELLIA_BLOCK_SIZE 16 diff --git a/arch/sparc/crypto/crc32c_glue.c b/arch/sparc/crypto/crc32c_glue.c deleted file mode 100644 index 913b9a09e885..000000000000 --- a/arch/sparc/crypto/crc32c_glue.c +++ /dev/null @@ -1,184 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0-only -/* Glue code for CRC32C optimized for sparc64 crypto opcodes. - * - * This is based largely upon arch/x86/crypto/crc32c-intel.c - * - * Copyright (C) 2008 Intel Corporation - * Authors: Austin Zhang <austin_zhang@linux.intel.com> - * Kent Liu <kent.liu@intel.com> - */ - -#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt - -#include <linux/init.h> -#include <linux/module.h> -#include <linux/string.h> -#include <linux/kernel.h> -#include <linux/crc32.h> - -#include <crypto/internal/hash.h> - -#include <asm/pstate.h> -#include <asm/elf.h> -#include <linux/unaligned.h> - -#include "opcodes.h" - -/* - * Setting the seed allows arbitrary accumulators and flexible XOR policy - * If your algorithm starts with ~0, then XOR with ~0 before you set - * the seed. - */ -static int crc32c_sparc64_setkey(struct crypto_shash *hash, const u8 *key, - unsigned int keylen) -{ - u32 *mctx = crypto_shash_ctx(hash); - - if (keylen != sizeof(u32)) - return -EINVAL; - *mctx = get_unaligned_le32(key); - return 0; -} - -static int crc32c_sparc64_init(struct shash_desc *desc) -{ - u32 *mctx = crypto_shash_ctx(desc->tfm); - u32 *crcp = shash_desc_ctx(desc); - - *crcp = *mctx; - - return 0; -} - -extern void crc32c_sparc64(u32 *crcp, const u64 *data, unsigned int len); - -static u32 crc32c_compute(u32 crc, const u8 *data, unsigned int len) -{ - unsigned int n = -(uintptr_t)data & 7; - - if (n) { - /* Data isn't 8-byte aligned. Align it. */ - n = min(n, len); - crc = __crc32c_le(crc, data, n); - data += n; - len -= n; - } - n = len & ~7U; - if (n) { - crc32c_sparc64(&crc, (const u64 *)data, n); - data += n; - len -= n; - } - if (len) - crc = __crc32c_le(crc, data, len); - return crc; -} - -static int crc32c_sparc64_update(struct shash_desc *desc, const u8 *data, - unsigned int len) -{ - u32 *crcp = shash_desc_ctx(desc); - - *crcp = crc32c_compute(*crcp, data, len); - return 0; -} - -static int __crc32c_sparc64_finup(const u32 *crcp, const u8 *data, - unsigned int len, u8 *out) -{ - put_unaligned_le32(~crc32c_compute(*crcp, data, len), out); - return 0; -} - -static int crc32c_sparc64_finup(struct shash_desc *desc, const u8 *data, - unsigned int len, u8 *out) -{ - return __crc32c_sparc64_finup(shash_desc_ctx(desc), data, len, out); -} - -static int crc32c_sparc64_final(struct shash_desc *desc, u8 *out) -{ - u32 *crcp = shash_desc_ctx(desc); - - put_unaligned_le32(~*crcp, out); - return 0; -} - -static int crc32c_sparc64_digest(struct shash_desc *desc, const u8 *data, - unsigned int len, u8 *out) -{ - return __crc32c_sparc64_finup(crypto_shash_ctx(desc->tfm), data, len, - out); -} - -static int crc32c_sparc64_cra_init(struct crypto_tfm *tfm) -{ - u32 *key = crypto_tfm_ctx(tfm); - - *key = ~0; - - return 0; -} - -#define CHKSUM_BLOCK_SIZE 1 -#define CHKSUM_DIGEST_SIZE 4 - -static struct shash_alg alg = { - .setkey = crc32c_sparc64_setkey, - .init = crc32c_sparc64_init, - .update = crc32c_sparc64_update, - .final = crc32c_sparc64_final, - .finup = crc32c_sparc64_finup, - .digest = crc32c_sparc64_digest, - .descsize = sizeof(u32), - .digestsize = CHKSUM_DIGEST_SIZE, - .base = { - .cra_name = "crc32c", - .cra_driver_name = "crc32c-sparc64", - .cra_priority = SPARC_CR_OPCODE_PRIORITY, - .cra_flags = CRYPTO_ALG_OPTIONAL_KEY, - .cra_blocksize = CHKSUM_BLOCK_SIZE, - .cra_ctxsize = sizeof(u32), - .cra_module = THIS_MODULE, - .cra_init = crc32c_sparc64_cra_init, - } -}; - -static bool __init sparc64_has_crc32c_opcode(void) -{ - unsigned long cfr; - - if (!(sparc64_elf_hwcap & HWCAP_SPARC_CRYPTO)) - return false; - - __asm__ __volatile__("rd %%asr26, %0" : "=r" (cfr)); - if (!(cfr & CFR_CRC32C)) - return false; - - return true; -} - -static int __init crc32c_sparc64_mod_init(void) -{ - if (sparc64_has_crc32c_opcode()) { - pr_info("Using sparc64 crc32c opcode optimized CRC32C implementation\n"); - return crypto_register_shash(&alg); - } - pr_info("sparc64 crc32c opcode not available.\n"); - return -ENODEV; -} - -static void __exit crc32c_sparc64_mod_fini(void) -{ - crypto_unregister_shash(&alg); -} - -module_init(crc32c_sparc64_mod_init); -module_exit(crc32c_sparc64_mod_fini); - -MODULE_LICENSE("GPL"); -MODULE_DESCRIPTION("CRC32c (Castagnoli), sparc64 crc32c opcode accelerated"); - -MODULE_ALIAS_CRYPTO("crc32c"); - -#include "crop_devid.c" diff --git a/arch/sparc/crypto/des_asm.S b/arch/sparc/crypto/des_asm.S index 7157468a679d..d534446cbef9 100644 --- a/arch/sparc/crypto/des_asm.S +++ b/arch/sparc/crypto/des_asm.S @@ -1,9 +1,8 @@ /* SPDX-License-Identifier: GPL-2.0 */ #include <linux/linkage.h> +#include <asm/opcodes.h> #include <asm/visasm.h> -#include "opcodes.h" - .align 32 ENTRY(des_sparc64_key_expand) /* %o0=input_key, %o1=output_key */ diff --git a/arch/sparc/crypto/des_glue.c b/arch/sparc/crypto/des_glue.c index a499102bf706..e50ec4cd57cd 100644 --- a/arch/sparc/crypto/des_glue.c +++ b/arch/sparc/crypto/des_glue.c @@ -16,11 +16,10 @@ #include <crypto/internal/skcipher.h> #include <asm/fpumacro.h> +#include <asm/opcodes.h> #include <asm/pstate.h> #include <asm/elf.h> -#include "opcodes.h" - struct des_sparc64_ctx { u64 encrypt_expkey[DES_EXPKEY_WORDS / 2]; u64 decrypt_expkey[DES_EXPKEY_WORDS / 2]; diff --git a/arch/sparc/crypto/md5_asm.S b/arch/sparc/crypto/md5_asm.S index 7a6637455f37..60b544e4d205 100644 --- a/arch/sparc/crypto/md5_asm.S +++ b/arch/sparc/crypto/md5_asm.S @@ -1,9 +1,8 @@ /* SPDX-License-Identifier: GPL-2.0 */ #include <linux/linkage.h> +#include <asm/opcodes.h> #include <asm/visasm.h> -#include "opcodes.h" - ENTRY(md5_sparc64_transform) /* %o0 = digest, %o1 = data, %o2 = rounds */ VISEntryHalf diff --git a/arch/sparc/crypto/md5_glue.c b/arch/sparc/crypto/md5_glue.c index 511db98d590a..b3615f0cdf62 100644 --- a/arch/sparc/crypto/md5_glue.c +++ b/arch/sparc/crypto/md5_glue.c @@ -14,121 +14,104 @@ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt +#include <asm/elf.h> +#include <asm/opcodes.h> +#include <asm/pstate.h> #include <crypto/internal/hash.h> -#include <linux/init.h> -#include <linux/module.h> -#include <linux/mm.h> -#include <linux/types.h> #include <crypto/md5.h> +#include <linux/errno.h> +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/string.h> +#include <linux/unaligned.h> -#include <asm/pstate.h> -#include <asm/elf.h> - -#include "opcodes.h" +struct sparc_md5_state { + __le32 hash[MD5_HASH_WORDS]; + u64 byte_count; +}; -asmlinkage void md5_sparc64_transform(u32 *digest, const char *data, +asmlinkage void md5_sparc64_transform(__le32 *digest, const char *data, unsigned int rounds); static int md5_sparc64_init(struct shash_desc *desc) { - struct md5_state *mctx = shash_desc_ctx(desc); + struct sparc_md5_state *mctx = shash_desc_ctx(desc); - mctx->hash[0] = MD5_H0; - mctx->hash[1] = MD5_H1; - mctx->hash[2] = MD5_H2; - mctx->hash[3] = MD5_H3; - le32_to_cpu_array(mctx->hash, 4); + mctx->hash[0] = cpu_to_le32(MD5_H0); + mctx->hash[1] = cpu_to_le32(MD5_H1); + mctx->hash[2] = cpu_to_le32(MD5_H2); + mctx->hash[3] = cpu_to_le32(MD5_H3); mctx->byte_count = 0; return 0; } -static void __md5_sparc64_update(struct md5_state *sctx, const u8 *data, - unsigned int len, unsigned int partial) -{ - unsigned int done = 0; - - sctx->byte_count += len; - if (partial) { - done = MD5_HMAC_BLOCK_SIZE - partial; - memcpy((u8 *)sctx->block + partial, data, done); - md5_sparc64_transform(sctx->hash, (u8 *)sctx->block, 1); - } - if (len - done >= MD5_HMAC_BLOCK_SIZE) { - const unsigned int rounds = (len - done) / MD5_HMAC_BLOCK_SIZE; - - md5_sparc64_transform(sctx->hash, data + done, rounds); - done += rounds * MD5_HMAC_BLOCK_SIZE; - } - - memcpy(sctx->block, data + done, len - done); -} - static int md5_sparc64_update(struct shash_desc *desc, const u8 *data, unsigned int len) { - struct md5_state *sctx = shash_desc_ctx(desc); - unsigned int partial = sctx->byte_count % MD5_HMAC_BLOCK_SIZE; - - /* Handle the fast case right here */ - if (partial + len < MD5_HMAC_BLOCK_SIZE) { - sctx->byte_count += len; - memcpy((u8 *)sctx->block + partial, data, len); - } else - __md5_sparc64_update(sctx, data, len, partial); + struct sparc_md5_state *sctx = shash_desc_ctx(desc); - return 0; + sctx->byte_count += round_down(len, MD5_HMAC_BLOCK_SIZE); + md5_sparc64_transform(sctx->hash, data, len / MD5_HMAC_BLOCK_SIZE); + return len - round_down(len, MD5_HMAC_BLOCK_SIZE); } /* Add padding and return the message digest. */ -static int md5_sparc64_final(struct shash_desc *desc, u8 *out) +static int md5_sparc64_finup(struct shash_desc *desc, const u8 *src, + unsigned int offset, u8 *out) { - struct md5_state *sctx = shash_desc_ctx(desc); - unsigned int i, index, padlen; - u32 *dst = (u32 *)out; - __le64 bits; - static const u8 padding[MD5_HMAC_BLOCK_SIZE] = { 0x80, }; - - bits = cpu_to_le64(sctx->byte_count << 3); - - /* Pad out to 56 mod 64 and append length */ - index = sctx->byte_count % MD5_HMAC_BLOCK_SIZE; - padlen = (index < 56) ? (56 - index) : ((MD5_HMAC_BLOCK_SIZE+56) - index); - - /* We need to fill a whole block for __md5_sparc64_update() */ - if (padlen <= 56) { - sctx->byte_count += padlen; - memcpy((u8 *)sctx->block + index, padding, padlen); - } else { - __md5_sparc64_update(sctx, padding, padlen, index); - } - __md5_sparc64_update(sctx, (const u8 *)&bits, sizeof(bits), 56); + struct sparc_md5_state *sctx = shash_desc_ctx(desc); + __le64 block[MD5_BLOCK_WORDS] = {}; + u8 *p = memcpy(block, src, offset); + __le32 *dst = (__le32 *)out; + __le64 *pbits; + int i; + + src = p; + p += offset; + *p++ = 0x80; + sctx->byte_count += offset; + pbits = &block[(MD5_BLOCK_WORDS / (offset > 55 ? 1 : 2)) - 1]; + *pbits = cpu_to_le64(sctx->byte_count << 3); + md5_sparc64_transform(sctx->hash, src, (pbits - block + 1) / 8); + memzero_explicit(block, sizeof(block)); /* Store state in digest */ for (i = 0; i < MD5_HASH_WORDS; i++) dst[i] = sctx->hash[i]; - /* Wipe context */ - memset(sctx, 0, sizeof(*sctx)); - return 0; } static int md5_sparc64_export(struct shash_desc *desc, void *out) { - struct md5_state *sctx = shash_desc_ctx(desc); - - memcpy(out, sctx, sizeof(*sctx)); + struct sparc_md5_state *sctx = shash_desc_ctx(desc); + union { + u8 *u8; + u32 *u32; + u64 *u64; + } p = { .u8 = out }; + int i; + for (i = 0; i < MD5_HASH_WORDS; i++) + put_unaligned(le32_to_cpu(sctx->hash[i]), p.u32++); + put_unaligned(sctx->byte_count, p.u64); return 0; } static int md5_sparc64_import(struct shash_desc *desc, const void *in) { - struct md5_state *sctx = shash_desc_ctx(desc); - - memcpy(sctx, in, sizeof(*sctx)); + struct sparc_md5_state *sctx = shash_desc_ctx(desc); + union { + const u8 *u8; + const u32 *u32; + const u64 *u64; + } p = { .u8 = in }; + int i; + for (i = 0; i < MD5_HASH_WORDS; i++) + sctx->hash[i] = cpu_to_le32(get_unaligned(p.u32++)); + sctx->byte_count = get_unaligned(p.u64); return 0; } @@ -136,15 +119,16 @@ static struct shash_alg alg = { .digestsize = MD5_DIGEST_SIZE, .init = md5_sparc64_init, .update = md5_sparc64_update, - .final = md5_sparc64_final, + .finup = md5_sparc64_finup, .export = md5_sparc64_export, .import = md5_sparc64_import, - .descsize = sizeof(struct md5_state), - .statesize = sizeof(struct md5_state), + .descsize = sizeof(struct sparc_md5_state), + .statesize = sizeof(struct sparc_md5_state), .base = { .cra_name = "md5", .cra_driver_name= "md5-sparc64", .cra_priority = SPARC_CR_OPCODE_PRIORITY, + .cra_flags = CRYPTO_AHASH_ALG_BLOCK_ONLY, .cra_blocksize = MD5_HMAC_BLOCK_SIZE, .cra_module = THIS_MODULE, } diff --git a/arch/sparc/crypto/sha1_asm.S b/arch/sparc/crypto/sha1_asm.S index 7d8bf354f0e7..00b46bac1b08 100644 --- a/arch/sparc/crypto/sha1_asm.S +++ b/arch/sparc/crypto/sha1_asm.S @@ -1,9 +1,8 @@ /* SPDX-License-Identifier: GPL-2.0 */ #include <linux/linkage.h> +#include <asm/opcodes.h> #include <asm/visasm.h> -#include "opcodes.h" - ENTRY(sha1_sparc64_transform) /* %o0 = digest, %o1 = data, %o2 = rounds */ VISEntryHalf diff --git a/arch/sparc/crypto/sha1_glue.c b/arch/sparc/crypto/sha1_glue.c index 06b7becfcb21..ef19d5023b1b 100644 --- a/arch/sparc/crypto/sha1_glue.c +++ b/arch/sparc/crypto/sha1_glue.c @@ -11,124 +11,44 @@ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt +#include <asm/elf.h> +#include <asm/opcodes.h> +#include <asm/pstate.h> #include <crypto/internal/hash.h> -#include <linux/init.h> -#include <linux/module.h> -#include <linux/mm.h> -#include <linux/types.h> #include <crypto/sha1.h> #include <crypto/sha1_base.h> +#include <linux/kernel.h> +#include <linux/module.h> -#include <asm/pstate.h> -#include <asm/elf.h> - -#include "opcodes.h" - -asmlinkage void sha1_sparc64_transform(u32 *digest, const char *data, - unsigned int rounds); - -static void __sha1_sparc64_update(struct sha1_state *sctx, const u8 *data, - unsigned int len, unsigned int partial) -{ - unsigned int done = 0; - - sctx->count += len; - if (partial) { - done = SHA1_BLOCK_SIZE - partial; - memcpy(sctx->buffer + partial, data, done); - sha1_sparc64_transform(sctx->state, sctx->buffer, 1); - } - if (len - done >= SHA1_BLOCK_SIZE) { - const unsigned int rounds = (len - done) / SHA1_BLOCK_SIZE; - - sha1_sparc64_transform(sctx->state, data + done, rounds); - done += rounds * SHA1_BLOCK_SIZE; - } - - memcpy(sctx->buffer, data + done, len - done); -} +asmlinkage void sha1_sparc64_transform(struct sha1_state *digest, + const u8 *data, int rounds); static int sha1_sparc64_update(struct shash_desc *desc, const u8 *data, unsigned int len) { - struct sha1_state *sctx = shash_desc_ctx(desc); - unsigned int partial = sctx->count % SHA1_BLOCK_SIZE; - - /* Handle the fast case right here */ - if (partial + len < SHA1_BLOCK_SIZE) { - sctx->count += len; - memcpy(sctx->buffer + partial, data, len); - } else - __sha1_sparc64_update(sctx, data, len, partial); - - return 0; + return sha1_base_do_update_blocks(desc, data, len, + sha1_sparc64_transform); } /* Add padding and return the message digest. */ -static int sha1_sparc64_final(struct shash_desc *desc, u8 *out) -{ - struct sha1_state *sctx = shash_desc_ctx(desc); - unsigned int i, index, padlen; - __be32 *dst = (__be32 *)out; - __be64 bits; - static const u8 padding[SHA1_BLOCK_SIZE] = { 0x80, }; - - bits = cpu_to_be64(sctx->count << 3); - - /* Pad out to 56 mod 64 and append length */ - index = sctx->count % SHA1_BLOCK_SIZE; - padlen = (index < 56) ? (56 - index) : ((SHA1_BLOCK_SIZE+56) - index); - - /* We need to fill a whole block for __sha1_sparc64_update() */ - if (padlen <= 56) { - sctx->count += padlen; - memcpy(sctx->buffer + index, padding, padlen); - } else { - __sha1_sparc64_update(sctx, padding, padlen, index); - } - __sha1_sparc64_update(sctx, (const u8 *)&bits, sizeof(bits), 56); - - /* Store state in digest */ - for (i = 0; i < 5; i++) - dst[i] = cpu_to_be32(sctx->state[i]); - - /* Wipe context */ - memset(sctx, 0, sizeof(*sctx)); - - return 0; -} - -static int sha1_sparc64_export(struct shash_desc *desc, void *out) -{ - struct sha1_state *sctx = shash_desc_ctx(desc); - - memcpy(out, sctx, sizeof(*sctx)); - - return 0; -} - -static int sha1_sparc64_import(struct shash_desc *desc, const void *in) +static int sha1_sparc64_finup(struct shash_desc *desc, const u8 *src, + unsigned int len, u8 *out) { - struct sha1_state *sctx = shash_desc_ctx(desc); - - memcpy(sctx, in, sizeof(*sctx)); - - return 0; + sha1_base_do_finup(desc, src, len, sha1_sparc64_transform); + return sha1_base_finish(desc, out); } static struct shash_alg alg = { .digestsize = SHA1_DIGEST_SIZE, .init = sha1_base_init, .update = sha1_sparc64_update, - .final = sha1_sparc64_final, - .export = sha1_sparc64_export, - .import = sha1_sparc64_import, - .descsize = sizeof(struct sha1_state), - .statesize = sizeof(struct sha1_state), + .finup = sha1_sparc64_finup, + .descsize = SHA1_STATE_SIZE, .base = { .cra_name = "sha1", .cra_driver_name= "sha1-sparc64", .cra_priority = SPARC_CR_OPCODE_PRIORITY, + .cra_flags = CRYPTO_AHASH_ALG_BLOCK_ONLY, .cra_blocksize = SHA1_BLOCK_SIZE, .cra_module = THIS_MODULE, } diff --git a/arch/sparc/crypto/sha256_glue.c b/arch/sparc/crypto/sha256_glue.c deleted file mode 100644 index 285561a1cde5..000000000000 --- a/arch/sparc/crypto/sha256_glue.c +++ /dev/null @@ -1,210 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0-only -/* Glue code for SHA256 hashing optimized for sparc64 crypto opcodes. - * - * This is based largely upon crypto/sha256_generic.c - * - * Copyright (c) Jean-Luc Cooke <jlcooke@certainkey.com> - * Copyright (c) Andrew McDonald <andrew@mcdonald.org.uk> - * Copyright (c) 2002 James Morris <jmorris@intercode.com.au> - * SHA224 Support Copyright 2007 Intel Corporation <jonathan.lynch@intel.com> - */ - -#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt - -#include <crypto/internal/hash.h> -#include <linux/init.h> -#include <linux/module.h> -#include <linux/mm.h> -#include <linux/types.h> -#include <crypto/sha2.h> -#include <crypto/sha256_base.h> - -#include <asm/pstate.h> -#include <asm/elf.h> - -#include "opcodes.h" - -asmlinkage void sha256_sparc64_transform(u32 *digest, const char *data, - unsigned int rounds); - -static void __sha256_sparc64_update(struct sha256_state *sctx, const u8 *data, - unsigned int len, unsigned int partial) -{ - unsigned int done = 0; - - sctx->count += len; - if (partial) { - done = SHA256_BLOCK_SIZE - partial; - memcpy(sctx->buf + partial, data, done); - sha256_sparc64_transform(sctx->state, sctx->buf, 1); - } - if (len - done >= SHA256_BLOCK_SIZE) { - const unsigned int rounds = (len - done) / SHA256_BLOCK_SIZE; - - sha256_sparc64_transform(sctx->state, data + done, rounds); - done += rounds * SHA256_BLOCK_SIZE; - } - - memcpy(sctx->buf, data + done, len - done); -} - -static int sha256_sparc64_update(struct shash_desc *desc, const u8 *data, - unsigned int len) -{ - struct sha256_state *sctx = shash_desc_ctx(desc); - unsigned int partial = sctx->count % SHA256_BLOCK_SIZE; - - /* Handle the fast case right here */ - if (partial + len < SHA256_BLOCK_SIZE) { - sctx->count += len; - memcpy(sctx->buf + partial, data, len); - } else - __sha256_sparc64_update(sctx, data, len, partial); - - return 0; -} - -static int sha256_sparc64_final(struct shash_desc *desc, u8 *out) -{ - struct sha256_state *sctx = shash_desc_ctx(desc); - unsigned int i, index, padlen; - __be32 *dst = (__be32 *)out; - __be64 bits; - static const u8 padding[SHA256_BLOCK_SIZE] = { 0x80, }; - - bits = cpu_to_be64(sctx->count << 3); - - /* Pad out to 56 mod 64 and append length */ - index = sctx->count % SHA256_BLOCK_SIZE; - padlen = (index < 56) ? (56 - index) : ((SHA256_BLOCK_SIZE+56) - index); - - /* We need to fill a whole block for __sha256_sparc64_update() */ - if (padlen <= 56) { - sctx->count += padlen; - memcpy(sctx->buf + index, padding, padlen); - } else { - __sha256_sparc64_update(sctx, padding, padlen, index); - } - __sha256_sparc64_update(sctx, (const u8 *)&bits, sizeof(bits), 56); - - /* Store state in digest */ - for (i = 0; i < 8; i++) - dst[i] = cpu_to_be32(sctx->state[i]); - - /* Wipe context */ - memset(sctx, 0, sizeof(*sctx)); - - return 0; -} - -static int sha224_sparc64_final(struct shash_desc *desc, u8 *hash) -{ - u8 D[SHA256_DIGEST_SIZE]; - - sha256_sparc64_final(desc, D); - - memcpy(hash, D, SHA224_DIGEST_SIZE); - memzero_explicit(D, SHA256_DIGEST_SIZE); - - return 0; -} - -static int sha256_sparc64_export(struct shash_desc *desc, void *out) -{ - struct sha256_state *sctx = shash_desc_ctx(desc); - - memcpy(out, sctx, sizeof(*sctx)); - return 0; -} - -static int sha256_sparc64_import(struct shash_desc *desc, const void *in) -{ - struct sha256_state *sctx = shash_desc_ctx(desc); - - memcpy(sctx, in, sizeof(*sctx)); - return 0; -} - -static struct shash_alg sha256_alg = { - .digestsize = SHA256_DIGEST_SIZE, - .init = sha256_base_init, - .update = sha256_sparc64_update, - .final = sha256_sparc64_final, - .export = sha256_sparc64_export, - .import = sha256_sparc64_import, - .descsize = sizeof(struct sha256_state), - .statesize = sizeof(struct sha256_state), - .base = { - .cra_name = "sha256", - .cra_driver_name= "sha256-sparc64", - .cra_priority = SPARC_CR_OPCODE_PRIORITY, - .cra_blocksize = SHA256_BLOCK_SIZE, - .cra_module = THIS_MODULE, - } -}; - -static struct shash_alg sha224_alg = { - .digestsize = SHA224_DIGEST_SIZE, - .init = sha224_base_init, - .update = sha256_sparc64_update, - .final = sha224_sparc64_final, - .descsize = sizeof(struct sha256_state), - .base = { - .cra_name = "sha224", - .cra_driver_name= "sha224-sparc64", - .cra_priority = SPARC_CR_OPCODE_PRIORITY, - .cra_blocksize = SHA224_BLOCK_SIZE, - .cra_module = THIS_MODULE, - } -}; - -static bool __init sparc64_has_sha256_opcode(void) -{ - unsigned long cfr; - - if (!(sparc64_elf_hwcap & HWCAP_SPARC_CRYPTO)) - return false; - - __asm__ __volatile__("rd %%asr26, %0" : "=r" (cfr)); - if (!(cfr & CFR_SHA256)) - return false; - - return true; -} - -static int __init sha256_sparc64_mod_init(void) -{ - if (sparc64_has_sha256_opcode()) { - int ret = crypto_register_shash(&sha224_alg); - if (ret < 0) - return ret; - - ret = crypto_register_shash(&sha256_alg); - if (ret < 0) { - crypto_unregister_shash(&sha224_alg); - return ret; - } - - pr_info("Using sparc64 sha256 opcode optimized SHA-256/SHA-224 implementation\n"); - return 0; - } - pr_info("sparc64 sha256 opcode not available.\n"); - return -ENODEV; -} - -static void __exit sha256_sparc64_mod_fini(void) -{ - crypto_unregister_shash(&sha224_alg); - crypto_unregister_shash(&sha256_alg); -} - -module_init(sha256_sparc64_mod_init); -module_exit(sha256_sparc64_mod_fini); - -MODULE_LICENSE("GPL"); -MODULE_DESCRIPTION("SHA-224 and SHA-256 Secure Hash Algorithm, sparc64 sha256 opcode accelerated"); - -MODULE_ALIAS_CRYPTO("sha224"); -MODULE_ALIAS_CRYPTO("sha256"); - -#include "crop_devid.c" diff --git a/arch/sparc/crypto/sha512_asm.S b/arch/sparc/crypto/sha512_asm.S index b2f6e6728802..9932b4fe1b59 100644 --- a/arch/sparc/crypto/sha512_asm.S +++ b/arch/sparc/crypto/sha512_asm.S @@ -1,9 +1,8 @@ /* SPDX-License-Identifier: GPL-2.0 */ #include <linux/linkage.h> +#include <asm/opcodes.h> #include <asm/visasm.h> -#include "opcodes.h" - ENTRY(sha512_sparc64_transform) /* %o0 = digest, %o1 = data, %o2 = rounds */ VISEntry diff --git a/arch/sparc/crypto/sha512_glue.c b/arch/sparc/crypto/sha512_glue.c index d66efa4ec59a..47b9277b6877 100644 --- a/arch/sparc/crypto/sha512_glue.c +++ b/arch/sparc/crypto/sha512_glue.c @@ -10,115 +10,42 @@ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt +#include <asm/elf.h> +#include <asm/opcodes.h> +#include <asm/pstate.h> #include <crypto/internal/hash.h> -#include <linux/init.h> -#include <linux/module.h> -#include <linux/mm.h> -#include <linux/types.h> #include <crypto/sha2.h> #include <crypto/sha512_base.h> - -#include <asm/pstate.h> -#include <asm/elf.h> - -#include "opcodes.h" +#include <linux/kernel.h> +#include <linux/module.h> asmlinkage void sha512_sparc64_transform(u64 *digest, const char *data, unsigned int rounds); -static void __sha512_sparc64_update(struct sha512_state *sctx, const u8 *data, - unsigned int len, unsigned int partial) +static void sha512_block(struct sha512_state *sctx, const u8 *src, int blocks) { - unsigned int done = 0; - - if ((sctx->count[0] += len) < len) - sctx->count[1]++; - if (partial) { - done = SHA512_BLOCK_SIZE - partial; - memcpy(sctx->buf + partial, data, done); - sha512_sparc64_transform(sctx->state, sctx->buf, 1); - } - if (len - done >= SHA512_BLOCK_SIZE) { - const unsigned int rounds = (len - done) / SHA512_BLOCK_SIZE; - - sha512_sparc64_transform(sctx->state, data + done, rounds); - done += rounds * SHA512_BLOCK_SIZE; - } - - memcpy(sctx->buf, data + done, len - done); + sha512_sparc64_transform(sctx->state, src, blocks); } static int sha512_sparc64_update(struct shash_desc *desc, const u8 *data, unsigned int len) { - struct sha512_state *sctx = shash_desc_ctx(desc); - unsigned int partial = sctx->count[0] % SHA512_BLOCK_SIZE; - - /* Handle the fast case right here */ - if (partial + len < SHA512_BLOCK_SIZE) { - if ((sctx->count[0] += len) < len) - sctx->count[1]++; - memcpy(sctx->buf + partial, data, len); - } else - __sha512_sparc64_update(sctx, data, len, partial); - - return 0; + return sha512_base_do_update_blocks(desc, data, len, sha512_block); } -static int sha512_sparc64_final(struct shash_desc *desc, u8 *out) +static int sha512_sparc64_finup(struct shash_desc *desc, const u8 *src, + unsigned int len, u8 *out) { - struct sha512_state *sctx = shash_desc_ctx(desc); - unsigned int i, index, padlen; - __be64 *dst = (__be64 *)out; - __be64 bits[2]; - static const u8 padding[SHA512_BLOCK_SIZE] = { 0x80, }; - - /* Save number of bits */ - bits[1] = cpu_to_be64(sctx->count[0] << 3); - bits[0] = cpu_to_be64(sctx->count[1] << 3 | sctx->count[0] >> 61); - - /* Pad out to 112 mod 128 and append length */ - index = sctx->count[0] % SHA512_BLOCK_SIZE; - padlen = (index < 112) ? (112 - index) : ((SHA512_BLOCK_SIZE+112) - index); - - /* We need to fill a whole block for __sha512_sparc64_update() */ - if (padlen <= 112) { - if ((sctx->count[0] += padlen) < padlen) - sctx->count[1]++; - memcpy(sctx->buf + index, padding, padlen); - } else { - __sha512_sparc64_update(sctx, padding, padlen, index); - } - __sha512_sparc64_update(sctx, (const u8 *)&bits, sizeof(bits), 112); - - /* Store state in digest */ - for (i = 0; i < 8; i++) - dst[i] = cpu_to_be64(sctx->state[i]); - - /* Wipe context */ - memset(sctx, 0, sizeof(*sctx)); - - return 0; -} - -static int sha384_sparc64_final(struct shash_desc *desc, u8 *hash) -{ - u8 D[64]; - - sha512_sparc64_final(desc, D); - - memcpy(hash, D, 48); - memzero_explicit(D, 64); - - return 0; + sha512_base_do_finup(desc, src, len, sha512_block); + return sha512_base_finish(desc, out); } static struct shash_alg sha512 = { .digestsize = SHA512_DIGEST_SIZE, .init = sha512_base_init, .update = sha512_sparc64_update, - .final = sha512_sparc64_final, - .descsize = sizeof(struct sha512_state), + .finup = sha512_sparc64_finup, + .descsize = SHA512_STATE_SIZE, .base = { .cra_name = "sha512", .cra_driver_name= "sha512-sparc64", @@ -132,8 +59,8 @@ static struct shash_alg sha384 = { .digestsize = SHA384_DIGEST_SIZE, .init = sha384_base_init, .update = sha512_sparc64_update, - .final = sha384_sparc64_final, - .descsize = sizeof(struct sha512_state), + .finup = sha512_sparc64_finup, + .descsize = SHA512_STATE_SIZE, .base = { .cra_name = "sha384", .cra_driver_name= "sha384-sparc64", diff --git a/arch/sparc/include/asm/Kbuild b/arch/sparc/include/asm/Kbuild index 43b0ae4c2c21..17ee8a273aa6 100644 --- a/arch/sparc/include/asm/Kbuild +++ b/arch/sparc/include/asm/Kbuild @@ -4,3 +4,4 @@ generated-y += syscall_table_64.h generic-y += agp.h generic-y += kvm_para.h generic-y += mcs_spinlock.h +generic-y += text-patching.h diff --git a/arch/sparc/include/asm/hugetlb.h b/arch/sparc/include/asm/hugetlb.h index c714ca6a05aa..e7a9cdd498dc 100644 --- a/arch/sparc/include/asm/hugetlb.h +++ b/arch/sparc/include/asm/hugetlb.h @@ -20,7 +20,7 @@ void __set_huge_pte_at(struct mm_struct *mm, unsigned long addr, #define __HAVE_ARCH_HUGE_PTEP_GET_AND_CLEAR pte_t huge_ptep_get_and_clear(struct mm_struct *mm, unsigned long addr, - pte_t *ptep); + pte_t *ptep, unsigned long sz); #define __HAVE_ARCH_HUGE_PTEP_CLEAR_FLUSH static inline pte_t huge_ptep_clear_flush(struct vm_area_struct *vma, diff --git a/arch/sparc/include/asm/hvtramp.h b/arch/sparc/include/asm/hvtramp.h index 688ea43af0f5..ce2453ea4f2b 100644 --- a/arch/sparc/include/asm/hvtramp.h +++ b/arch/sparc/include/asm/hvtramp.h @@ -17,7 +17,7 @@ struct hvtramp_descr { __u64 fault_info_va; __u64 fault_info_pa; __u64 thread_reg; - struct hvtramp_mapping maps[1]; + struct hvtramp_mapping maps[]; }; void hv_cpu_startup(unsigned long hvdescr_pa); diff --git a/arch/sparc/crypto/opcodes.h b/arch/sparc/include/asm/opcodes.h index 417b6a10a337..ebfda6eb49b2 100644 --- a/arch/sparc/crypto/opcodes.h +++ b/arch/sparc/include/asm/opcodes.h @@ -1,6 +1,6 @@ /* SPDX-License-Identifier: GPL-2.0 */ -#ifndef _OPCODES_H -#define _OPCODES_H +#ifndef _SPARC_ASM_OPCODES_H +#define _SPARC_ASM_OPCODES_H #define SPARC_CR_OPCODE_PRIORITY 300 @@ -97,4 +97,4 @@ #define MOVXTOD_G7_F62 \ .word 0xbfb02307; -#endif /* _OPCODES_H */ +#endif /* _SPARC_ASM_OPCODES_H */ diff --git a/arch/sparc/include/asm/page.h b/arch/sparc/include/asm/page.h index 5e44cdf2a8f2..1a00cc0a1893 100644 --- a/arch/sparc/include/asm/page.h +++ b/arch/sparc/include/asm/page.h @@ -2,8 +2,6 @@ #ifndef ___ASM_SPARC_PAGE_H #define ___ASM_SPARC_PAGE_H -#define page_to_phys(page) (page_to_pfn(page) << PAGE_SHIFT) - #if defined(__sparc__) && defined(__arch64__) #include <asm/page_64.h> #else diff --git a/arch/sparc/include/asm/page_32.h b/arch/sparc/include/asm/page_32.h index 9977c77374cd..9954254ea569 100644 --- a/arch/sparc/include/asm/page_32.h +++ b/arch/sparc/include/asm/page_32.h @@ -11,9 +11,7 @@ #include <linux/const.h> -#define PAGE_SHIFT CONFIG_PAGE_SHIFT -#define PAGE_SIZE (_AC(1, UL) << PAGE_SHIFT) -#define PAGE_MASK (~(PAGE_SIZE-1)) +#include <vdso/page.h> #ifndef __ASSEMBLY__ diff --git a/arch/sparc/include/asm/page_64.h b/arch/sparc/include/asm/page_64.h index e9bd24821c93..2a68ff5b6eab 100644 --- a/arch/sparc/include/asm/page_64.h +++ b/arch/sparc/include/asm/page_64.h @@ -4,9 +4,7 @@ #include <linux/const.h> -#define PAGE_SHIFT CONFIG_PAGE_SHIFT -#define PAGE_SIZE (_AC(1,UL) << PAGE_SHIFT) -#define PAGE_MASK (~(PAGE_SIZE-1)) +#include <vdso/page.h> /* Flushing for D-cache alias handling is only needed if * the page size is smaller than 16K. diff --git a/arch/sparc/include/asm/parport_64.h b/arch/sparc/include/asm/parport_64.h index 4f530a270760..3068809ef9ad 100644 --- a/arch/sparc/include/asm/parport_64.h +++ b/arch/sparc/include/asm/parport_64.h @@ -243,7 +243,7 @@ static struct platform_driver ecpp_driver = { .of_match_table = ecpp_match, }, .probe = ecpp_probe, - .remove_new = ecpp_remove, + .remove = ecpp_remove, }; static int parport_pc_find_nonpci_ports(int autoirq, int autodma) diff --git a/arch/sparc/include/asm/pgtable_32.h b/arch/sparc/include/asm/pgtable_32.h index 62bcafe38b1f..7c199c003ffe 100644 --- a/arch/sparc/include/asm/pgtable_32.h +++ b/arch/sparc/include/asm/pgtable_32.h @@ -255,7 +255,11 @@ static inline pte_t pte_mkyoung(pte_t pte) } #define PFN_PTE_SHIFT (PAGE_SHIFT - 4) -#define pfn_pte(pfn, prot) mk_pte(pfn_to_page(pfn), prot) + +static inline pte_t pfn_pte(unsigned long pfn, pgprot_t pgprot) +{ + return __pte((pfn << PFN_PTE_SHIFT) | pgprot_val(pgprot)); +} static inline unsigned long pte_pfn(pte_t pte) { @@ -272,15 +276,6 @@ static inline unsigned long pte_pfn(pte_t pte) #define pte_page(pte) pfn_to_page(pte_pfn(pte)) -/* - * Conversion functions: convert a page and protection to a page entry, - * and a page entry and page directory to the page they refer to. - */ -static inline pte_t mk_pte(struct page *page, pgprot_t pgprot) -{ - return __pte((page_to_pfn(page) << (PAGE_SHIFT-4)) | pgprot_val(pgprot)); -} - static inline pte_t mk_pte_phys(unsigned long page, pgprot_t pgprot) { return __pte(((page) >> 4) | pgprot_val(pgprot)); @@ -353,7 +348,7 @@ static inline swp_entry_t __swp_entry(unsigned long type, unsigned long offset) #define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) }) #define __swp_entry_to_pte(x) ((pte_t) { (x).val }) -static inline int pte_swp_exclusive(pte_t pte) +static inline bool pte_swp_exclusive(pte_t pte) { return pte_val(pte) & SRMMU_SWP_EXCLUSIVE; } diff --git a/arch/sparc/include/asm/pgtable_64.h b/arch/sparc/include/asm/pgtable_64.h index 2b7f358762c1..669cd02469a1 100644 --- a/arch/sparc/include/asm/pgtable_64.h +++ b/arch/sparc/include/asm/pgtable_64.h @@ -225,7 +225,6 @@ static inline pte_t pfn_pte(unsigned long pfn, pgprot_t prot) BUILD_BUG_ON(_PAGE_SZBITS_4U != 0UL || _PAGE_SZBITS_4V != 0UL); return __pte(paddr | pgprot_val(prot)); } -#define mk_pte(page, pgprot) pfn_pte(page_to_pfn(page), (pgprot)) #ifdef CONFIG_TRANSPARENT_HUGEPAGE static inline pmd_t pfn_pmd(unsigned long page_nr, pgprot_t pgprot) @@ -234,7 +233,6 @@ static inline pmd_t pfn_pmd(unsigned long page_nr, pgprot_t pgprot) return __pmd(pte_val(pte)); } -#define mk_pmd(page, pgprot) pfn_pmd(page_to_pfn(page), (pgprot)) #endif /* This one can be done with two shifts. */ @@ -936,7 +934,6 @@ static inline void __set_pte_at(struct mm_struct *mm, unsigned long addr, static inline void set_ptes(struct mm_struct *mm, unsigned long addr, pte_t *ptep, pte_t pte, unsigned int nr) { - arch_enter_lazy_mmu_mode(); for (;;) { __set_pte_at(mm, addr, ptep, pte, 0); if (--nr == 0) @@ -945,7 +942,6 @@ static inline void set_ptes(struct mm_struct *mm, unsigned long addr, pte_val(pte) += PAGE_SIZE; addr += PAGE_SIZE; } - arch_leave_lazy_mmu_mode(); } #define set_ptes set_ptes @@ -1027,7 +1023,7 @@ pgtable_t pgtable_trans_huge_withdraw(struct mm_struct *mm, pmd_t *pmdp); #define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) }) #define __swp_entry_to_pte(x) ((pte_t) { (x).val }) -static inline int pte_swp_exclusive(pte_t pte) +static inline bool pte_swp_exclusive(pte_t pte) { return pte_val(pte) & _PAGE_SWP_EXCLUSIVE; } diff --git a/arch/sparc/include/asm/syscall.h b/arch/sparc/include/asm/syscall.h index 20c109ac8cc9..b0233924d323 100644 --- a/arch/sparc/include/asm/syscall.h +++ b/arch/sparc/include/asm/syscall.h @@ -25,6 +25,18 @@ static inline long syscall_get_nr(struct task_struct *task, return (syscall_p ? regs->u_regs[UREG_G1] : -1L); } +static inline void syscall_set_nr(struct task_struct *task, + struct pt_regs *regs, + int nr) +{ + /* + * Unlike syscall_get_nr(), syscall_set_nr() can be called only when + * the target task is stopped for tracing on entering syscall, so + * there is no need to have the same check syscall_get_nr() has. + */ + regs->u_regs[UREG_G1] = nr; +} + static inline void syscall_rollback(struct task_struct *task, struct pt_regs *regs) { @@ -117,6 +129,16 @@ static inline void syscall_get_arguments(struct task_struct *task, } } +static inline void syscall_set_arguments(struct task_struct *task, + struct pt_regs *regs, + const unsigned long *args) +{ + unsigned int i; + + for (i = 0; i < 6; i++) + regs->u_regs[UREG_I0 + i] = args[i]; +} + static inline int syscall_get_arch(struct task_struct *task) { #if defined(CONFIG_SPARC64) && defined(CONFIG_COMPAT) diff --git a/arch/sparc/include/asm/tlb_64.h b/arch/sparc/include/asm/tlb_64.h index 3037187482db..1a6e694418e3 100644 --- a/arch/sparc/include/asm/tlb_64.h +++ b/arch/sparc/include/asm/tlb_64.h @@ -33,6 +33,7 @@ void flush_tlb_pending(void); #define tlb_needs_table_invalidate() (false) #endif +#define __HAVE_ARCH_TLB_REMOVE_TABLE #include <asm-generic/tlb.h> #endif /* _SPARC64_TLB_H */ diff --git a/arch/sparc/include/asm/vga.h b/arch/sparc/include/asm/vga.h deleted file mode 100644 index 2952d667d936..000000000000 --- a/arch/sparc/include/asm/vga.h +++ /dev/null @@ -1,60 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 */ -/* - * Access to VGA videoram - * - * (c) 1998 Martin Mares <mj@ucw.cz> - */ - -#ifndef _LINUX_ASM_VGA_H_ -#define _LINUX_ASM_VGA_H_ - -#include <linux/bug.h> -#include <linux/string.h> -#include <asm/types.h> - -#define VT_BUF_HAVE_RW -#define VT_BUF_HAVE_MEMSETW -#define VT_BUF_HAVE_MEMCPYW -#define VT_BUF_HAVE_MEMMOVEW - -#undef scr_writew -#undef scr_readw - -static inline void scr_writew(u16 val, u16 *addr) -{ - BUG_ON((long) addr >= 0); - - *addr = val; -} - -static inline u16 scr_readw(const u16 *addr) -{ - BUG_ON((long) addr >= 0); - - return *addr; -} - -static inline void scr_memsetw(u16 *p, u16 v, unsigned int n) -{ - BUG_ON((long) p >= 0); - - memset16(p, cpu_to_le16(v), n / 2); -} - -static inline void scr_memcpyw(u16 *d, u16 *s, unsigned int n) -{ - BUG_ON((long) d >= 0); - - memcpy(d, s, n); -} - -static inline void scr_memmovew(u16 *d, u16 *s, unsigned int n) -{ - BUG_ON((long) d >= 0); - - memmove(d, s, n); -} - -#define VGA_MAP_MEM(x,s) (x) - -#endif diff --git a/arch/sparc/include/uapi/asm/socket.h b/arch/sparc/include/uapi/asm/socket.h index 57084ed2f3c4..adcba7329386 100644 --- a/arch/sparc/include/uapi/asm/socket.h +++ b/arch/sparc/include/uapi/asm/socket.h @@ -139,6 +139,12 @@ #define SCM_DEVMEM_DMABUF SO_DEVMEM_DMABUF #define SO_DEVMEM_DONTNEED 0x0059 +#define SCM_TS_OPT_ID 0x005a + +#define SO_RCVPRIORITY 0x005b + +#define SO_PASSRIGHTS 0x005c + #if !defined(__KERNEL__) diff --git a/arch/sparc/kernel/Makefile b/arch/sparc/kernel/Makefile index 58ea4ef9b622..36f2727e1445 100644 --- a/arch/sparc/kernel/Makefile +++ b/arch/sparc/kernel/Makefile @@ -9,7 +9,7 @@ asflags-y := -ansi # Undefine sparc when processing vmlinux.lds - it is used # And teach CPP we are doing $(BITS) builds (for this case) CPPFLAGS_vmlinux.lds := -Usparc -m$(BITS) -extra-y += vmlinux.lds +always-$(KBUILD_BUILTIN) += vmlinux.lds ifdef CONFIG_FUNCTION_TRACER # Do not profile debug and lowlevel utilities @@ -35,6 +35,7 @@ obj-y += process.o obj-y += signal_$(BITS).o obj-y += sigutil_$(BITS).o obj-$(CONFIG_SPARC32) += ioport.o +obj-y += setup.o obj-y += setup_$(BITS).o obj-y += idprom.o obj-y += sys_sparc_$(BITS).o diff --git a/arch/sparc/kernel/chmc.c b/arch/sparc/kernel/chmc.c index e02074062001..d4c74d6b2e1b 100644 --- a/arch/sparc/kernel/chmc.c +++ b/arch/sparc/kernel/chmc.c @@ -814,7 +814,7 @@ static struct platform_driver us3mc_driver = { .of_match_table = us3mc_match, }, .probe = us3mc_probe, - .remove_new = us3mc_remove, + .remove = us3mc_remove, }; static inline bool us3mc_platform(void) diff --git a/arch/sparc/kernel/irq_32.c b/arch/sparc/kernel/irq_32.c index 8605dd710f3c..5210991429d5 100644 --- a/arch/sparc/kernel/irq_32.c +++ b/arch/sparc/kernel/irq_32.c @@ -199,18 +199,18 @@ int arch_show_interrupts(struct seq_file *p, int prec) int j; #ifdef CONFIG_SMP - seq_printf(p, "RES: "); + seq_printf(p, "RES:"); for_each_online_cpu(j) - seq_printf(p, "%10u ", cpu_data(j).irq_resched_count); + seq_put_decimal_ull_width(p, " ", cpu_data(j).irq_resched_count, 10); seq_printf(p, " IPI rescheduling interrupts\n"); - seq_printf(p, "CAL: "); + seq_printf(p, "CAL:"); for_each_online_cpu(j) - seq_printf(p, "%10u ", cpu_data(j).irq_call_count); + seq_put_decimal_ull_width(p, " ", cpu_data(j).irq_call_count, 10); seq_printf(p, " IPI function call interrupts\n"); #endif - seq_printf(p, "NMI: "); + seq_printf(p, "NMI:"); for_each_online_cpu(j) - seq_printf(p, "%10u ", cpu_data(j).counter); + seq_put_decimal_ull_width(p, " ", cpu_data(j).counter, 10); seq_printf(p, " Non-maskable interrupts\n"); return 0; } diff --git a/arch/sparc/kernel/irq_64.c b/arch/sparc/kernel/irq_64.c index 01ee800efde3..ded463c82abd 100644 --- a/arch/sparc/kernel/irq_64.c +++ b/arch/sparc/kernel/irq_64.c @@ -22,6 +22,7 @@ #include <linux/seq_file.h> #include <linux/ftrace.h> #include <linux/irq.h> +#include <linux/string_choices.h> #include <asm/ptrace.h> #include <asm/processor.h> @@ -145,9 +146,7 @@ static int hv_irq_version; */ static bool sun4v_cookie_only_virqs(void) { - if (hv_irq_version >= 3) - return true; - return false; + return hv_irq_version >= 3; } static void __init irq_init_hv(void) @@ -170,7 +169,7 @@ static void __init irq_init_hv(void) pr_info("SUN4V: Using IRQ API major %d, cookie only virqs %s\n", hv_irq_version, - sun4v_cookie_only_virqs() ? "enabled" : "disabled"); + str_enabled_disabled(sun4v_cookie_only_virqs())); } /* This function is for the timer interrupt.*/ @@ -304,9 +303,9 @@ int arch_show_interrupts(struct seq_file *p, int prec) { int j; - seq_printf(p, "NMI: "); + seq_printf(p, "NMI:"); for_each_online_cpu(j) - seq_printf(p, "%10u ", cpu_data(j).__nmi_count); + seq_put_decimal_ull_width(p, " ", cpu_data(j).__nmi_count, 10); seq_printf(p, " Non-maskable interrupts\n"); return 0; } diff --git a/arch/sparc/kernel/led.c b/arch/sparc/kernel/led.c index ab657b359789..f4fb82b019bb 100644 --- a/arch/sparc/kernel/led.c +++ b/arch/sparc/kernel/led.c @@ -84,7 +84,7 @@ static ssize_t led_proc_write(struct file *file, const char __user *buffer, /* before we change anything we want to stop any running timers, * otherwise calls such as on will have no persistent effect */ - del_timer_sync(&led_blink_timer); + timer_delete_sync(&led_blink_timer); if (!strcmp(buf, "on")) { auxio_set_led(AUXIO_LED_ON); @@ -134,7 +134,7 @@ static int __init led_init(void) static void __exit led_exit(void) { remove_proc_entry("led", NULL); - del_timer_sync(&led_blink_timer); + timer_delete_sync(&led_blink_timer); } module_init(led_init); diff --git a/arch/sparc/kernel/pci.c b/arch/sparc/kernel/pci.c index 50a0927a84a6..ddac216a2aff 100644 --- a/arch/sparc/kernel/pci.c +++ b/arch/sparc/kernel/pci.c @@ -932,7 +932,7 @@ static void pci_bus_slot_names(struct device_node *node, struct pci_bus *bus) { const struct pci_slot_names { u32 slot_mask; - char names[0]; + char names[]; } *prop; const char *sp; int len, i; diff --git a/arch/sparc/kernel/pci_common.c b/arch/sparc/kernel/pci_common.c index 5eeec9ad6845..2576f4f31309 100644 --- a/arch/sparc/kernel/pci_common.c +++ b/arch/sparc/kernel/pci_common.c @@ -361,7 +361,7 @@ void pci_determine_mem_io_space(struct pci_pbm_info *pbm) int i, saw_mem, saw_io; int num_pbm_ranges; - /* Corresponding generic code in of_pci_get_host_bridge_resources() */ + /* Corresponds to generic devm_of_pci_get_host_bridge_resources() */ saw_mem = saw_io = 0; pbm_ranges = of_get_property(pbm->op->dev.of_node, "ranges", &i); diff --git a/arch/sparc/kernel/perf_event.c b/arch/sparc/kernel/perf_event.c index f02a283a8e8f..cae4d33002a5 100644 --- a/arch/sparc/kernel/perf_event.c +++ b/arch/sparc/kernel/perf_event.c @@ -1668,8 +1668,7 @@ static int __kprobes perf_event_nmi_handler(struct notifier_block *self, if (!sparc_perf_event_set_period(event, hwc, idx)) continue; - if (perf_event_overflow(event, &data, regs)) - sparc_pmu_stop(event, 0); + perf_event_overflow(event, &data, regs); } finish_clock = sched_clock(); diff --git a/arch/sparc/kernel/prom_32.c b/arch/sparc/kernel/prom_32.c index 3df960c137f7..a67dd67f10c8 100644 --- a/arch/sparc/kernel/prom_32.c +++ b/arch/sparc/kernel/prom_32.c @@ -28,9 +28,7 @@ void * __init prom_early_alloc(unsigned long size) { void *ret; - ret = memblock_alloc(size, SMP_CACHE_BYTES); - if (!ret) - panic("%s: Failed to allocate %lu bytes\n", __func__, size); + ret = memblock_alloc_or_panic(size, SMP_CACHE_BYTES); prom_early_allocated += size; diff --git a/arch/sparc/kernel/setup.c b/arch/sparc/kernel/setup.c new file mode 100644 index 000000000000..4975867d9001 --- /dev/null +++ b/arch/sparc/kernel/setup.c @@ -0,0 +1,46 @@ +// SPDX-License-Identifier: GPL-2.0 + +#include <asm/setup.h> +#include <linux/sysctl.h> + +static const struct ctl_table sparc_sysctl_table[] = { + { + .procname = "reboot-cmd", + .data = reboot_command, + .maxlen = 256, + .mode = 0644, + .proc_handler = proc_dostring, + }, + { + .procname = "stop-a", + .data = &stop_a_enabled, + .maxlen = sizeof(int), + .mode = 0644, + .proc_handler = proc_dointvec, + }, + { + .procname = "scons-poweroff", + .data = &scons_pwroff, + .maxlen = sizeof(int), + .mode = 0644, + .proc_handler = proc_dointvec, + }, +#ifdef CONFIG_SPARC64 + { + .procname = "tsb-ratio", + .data = &sysctl_tsb_ratio, + .maxlen = sizeof(int), + .mode = 0644, + .proc_handler = proc_dointvec, + }, +#endif +}; + + +static int __init init_sparc_sysctls(void) +{ + register_sysctl_init("kernel", sparc_sysctl_table); + return 0; +} + +arch_initcall(init_sparc_sysctls); diff --git a/arch/sparc/kernel/smp_64.c b/arch/sparc/kernel/smp_64.c index e40c395db202..5cbd6ed5ef6f 100644 --- a/arch/sparc/kernel/smp_64.c +++ b/arch/sparc/kernel/smp_64.c @@ -297,9 +297,7 @@ static void ldom_startcpu_cpuid(unsigned int cpu, unsigned long thread_reg, unsigned long hv_err; int i; - hdesc = kzalloc(sizeof(*hdesc) + - (sizeof(struct hvtramp_mapping) * - num_kernel_image_mappings - 1), + hdesc = kzalloc(struct_size(hdesc, maps, num_kernel_image_mappings), GFP_KERNEL); if (!hdesc) { printk(KERN_ERR "ldom_startcpu_cpuid: Cannot allocate " diff --git a/arch/sparc/kernel/sys_sparc_32.c b/arch/sparc/kernel/sys_sparc_32.c index 80822f922e76..fb31bc0c5b48 100644 --- a/arch/sparc/kernel/sys_sparc_32.c +++ b/arch/sparc/kernel/sys_sparc_32.c @@ -23,6 +23,7 @@ #include <linux/utsname.h> #include <linux/smp.h> #include <linux/ipc.h> +#include <linux/hugetlb.h> #include <linux/uaccess.h> #include <asm/unistd.h> @@ -42,12 +43,16 @@ SYSCALL_DEFINE0(getpagesize) unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsigned long len, unsigned long pgoff, unsigned long flags, vm_flags_t vm_flags) { struct vm_unmapped_area_info info = {}; + bool file_hugepage = false; + + if (filp && is_file_hugepages(filp)) + file_hugepage = true; if (flags & MAP_FIXED) { /* We do not accept a shared mapping if it would violate * cache aliasing constraints. */ - if ((flags & MAP_SHARED) && + if (!file_hugepage && (flags & MAP_SHARED) && ((addr - (pgoff << PAGE_SHIFT)) & (SHMLBA - 1))) return -EINVAL; return addr; @@ -62,9 +67,13 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi info.length = len; info.low_limit = addr; info.high_limit = TASK_SIZE; - info.align_mask = (flags & MAP_SHARED) ? - (PAGE_MASK & (SHMLBA - 1)) : 0; - info.align_offset = pgoff << PAGE_SHIFT; + if (!file_hugepage) { + info.align_mask = (flags & MAP_SHARED) ? + (PAGE_MASK & (SHMLBA - 1)) : 0; + info.align_offset = pgoff << PAGE_SHIFT; + } else { + info.align_mask = huge_page_mask_align(filp); + } return vm_unmapped_area(&info); } diff --git a/arch/sparc/kernel/sys_sparc_64.c b/arch/sparc/kernel/sys_sparc_64.c index acade309dc2f..c5a284df7b41 100644 --- a/arch/sparc/kernel/sys_sparc_64.c +++ b/arch/sparc/kernel/sys_sparc_64.c @@ -30,6 +30,7 @@ #include <linux/context_tracking.h> #include <linux/timex.h> #include <linux/uaccess.h> +#include <linux/hugetlb.h> #include <asm/utrap.h> #include <asm/unistd.h> @@ -87,6 +88,16 @@ static inline unsigned long COLOR_ALIGN(unsigned long addr, return base + off; } +static unsigned long get_align_mask(struct file *filp, unsigned long flags) +{ + if (filp && is_file_hugepages(filp)) + return huge_page_mask_align(filp); + if (filp || (flags & MAP_SHARED)) + return PAGE_MASK & (SHMLBA - 1); + + return 0; +} + unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsigned long len, unsigned long pgoff, unsigned long flags, vm_flags_t vm_flags) { struct mm_struct *mm = current->mm; @@ -94,12 +105,16 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi unsigned long task_size = TASK_SIZE; int do_color_align; struct vm_unmapped_area_info info = {}; + bool file_hugepage = false; + + if (filp && is_file_hugepages(filp)) + file_hugepage = true; if (flags & MAP_FIXED) { /* We do not accept a shared mapping if it would violate * cache aliasing constraints. */ - if ((flags & MAP_SHARED) && + if (!file_hugepage && (flags & MAP_SHARED) && ((addr - (pgoff << PAGE_SHIFT)) & (SHMLBA - 1))) return -EINVAL; return addr; @@ -111,7 +126,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi return -ENOMEM; do_color_align = 0; - if (filp || (flags & MAP_SHARED)) + if ((filp || (flags & MAP_SHARED)) && !file_hugepage) do_color_align = 1; if (addr) { @@ -129,8 +144,9 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi info.length = len; info.low_limit = TASK_UNMAPPED_BASE; info.high_limit = min(task_size, VA_EXCLUDE_START); - info.align_mask = do_color_align ? (PAGE_MASK & (SHMLBA - 1)) : 0; - info.align_offset = pgoff << PAGE_SHIFT; + info.align_mask = get_align_mask(filp, flags); + if (!file_hugepage) + info.align_offset = pgoff << PAGE_SHIFT; addr = vm_unmapped_area(&info); if ((addr & ~PAGE_MASK) && task_size > VA_EXCLUDE_END) { @@ -154,15 +170,19 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0, unsigned long addr = addr0; int do_color_align; struct vm_unmapped_area_info info = {}; + bool file_hugepage = false; /* This should only ever run for 32-bit processes. */ BUG_ON(!test_thread_flag(TIF_32BIT)); + if (filp && is_file_hugepages(filp)) + file_hugepage = true; + if (flags & MAP_FIXED) { /* We do not accept a shared mapping if it would violate * cache aliasing constraints. */ - if ((flags & MAP_SHARED) && + if (!file_hugepage && (flags & MAP_SHARED) && ((addr - (pgoff << PAGE_SHIFT)) & (SHMLBA - 1))) return -EINVAL; return addr; @@ -172,7 +192,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0, return -ENOMEM; do_color_align = 0; - if (filp || (flags & MAP_SHARED)) + if ((filp || (flags & MAP_SHARED)) && !file_hugepage) do_color_align = 1; /* requesting a specific address */ @@ -192,8 +212,9 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0, info.length = len; info.low_limit = PAGE_SIZE; info.high_limit = mm->mmap_base; - info.align_mask = do_color_align ? (PAGE_MASK & (SHMLBA - 1)) : 0; - info.align_offset = pgoff << PAGE_SHIFT; + info.align_mask = get_align_mask(filp, flags); + if (!file_hugepage) + info.align_offset = pgoff << PAGE_SHIFT; addr = vm_unmapped_area(&info); /* diff --git a/arch/sparc/kernel/syscalls/syscall.tbl b/arch/sparc/kernel/syscalls/syscall.tbl index cfdfb3707c16..83e45eb6c095 100644 --- a/arch/sparc/kernel/syscalls/syscall.tbl +++ b/arch/sparc/kernel/syscalls/syscall.tbl @@ -508,3 +508,8 @@ 460 common lsm_set_self_attr sys_lsm_set_self_attr 461 common lsm_list_modules sys_lsm_list_modules 462 common mseal sys_mseal +463 common setxattrat sys_setxattrat +464 common getxattrat sys_getxattrat +465 common listxattrat sys_listxattrat +466 common removexattrat sys_removexattrat +467 common open_tree_attr sys_open_tree_attr diff --git a/arch/sparc/kernel/time_32.c b/arch/sparc/kernel/time_32.c index 08bbdc458596..578fd0d49f30 100644 --- a/arch/sparc/kernel/time_32.c +++ b/arch/sparc/kernel/time_32.c @@ -255,6 +255,7 @@ static void mostek_write_byte(struct device *dev, u32 ofs, u8 val) static struct m48t59_plat_data m48t59_data = { .read_byte = mostek_read_byte, .write_byte = mostek_write_byte, + .yy_offset = 68, }; /* resource is set at runtime */ diff --git a/arch/sparc/kernel/time_64.c b/arch/sparc/kernel/time_64.c index 60f1c8cc5363..b32f27f929d1 100644 --- a/arch/sparc/kernel/time_64.c +++ b/arch/sparc/kernel/time_64.c @@ -544,6 +544,7 @@ static void mostek_write_byte(struct device *dev, u32 ofs, u8 val) static struct m48t59_plat_data m48t59_data = { .read_byte = mostek_read_byte, .write_byte = mostek_write_byte, + .yy_offset = 68, }; static struct platform_device m48t59_rtc = { diff --git a/arch/sparc/kernel/vio.c b/arch/sparc/kernel/vio.c index 07933d75ac81..1a1a9d6b8f2e 100644 --- a/arch/sparc/kernel/vio.c +++ b/arch/sparc/kernel/vio.c @@ -419,13 +419,13 @@ struct vio_remove_node_data { u64 node; }; -static int vio_md_node_match(struct device *dev, void *arg) +static int vio_md_node_match(struct device *dev, const void *arg) { struct vio_dev *vdev = to_vio_dev(dev); - struct vio_remove_node_data *node_data; + const struct vio_remove_node_data *node_data; u64 node; - node_data = (struct vio_remove_node_data *)arg; + node_data = (const struct vio_remove_node_data *)arg; node = vio_vdev_node(node_data->hp, vdev); diff --git a/arch/sparc/kernel/viohs.c b/arch/sparc/kernel/viohs.c index e27afd233bf5..8fb2e7ca5015 100644 --- a/arch/sparc/kernel/viohs.c +++ b/arch/sparc/kernel/viohs.c @@ -804,7 +804,7 @@ EXPORT_SYMBOL(vio_port_up); static void vio_port_timer(struct timer_list *t) { - struct vio_driver_state *vio = from_timer(vio, t, timer); + struct vio_driver_state *vio = timer_container_of(vio, t, timer); vio_port_up(vio); } diff --git a/arch/sparc/kernel/vmlinux.lds.S b/arch/sparc/kernel/vmlinux.lds.S index d317a843f7ea..f1b86eb30340 100644 --- a/arch/sparc/kernel/vmlinux.lds.S +++ b/arch/sparc/kernel/vmlinux.lds.S @@ -48,6 +48,11 @@ SECTIONS { _text = .; HEAD_TEXT + ALIGN_FUNCTION(); +#ifdef CONFIG_SPARC64 + /* Match text section symbols in head_64.S first */ + *head_64.o(.text) +#endif TEXT_TEXT SCHED_TEXT LOCK_TEXT diff --git a/arch/sparc/lib/Makefile b/arch/sparc/lib/Makefile index ee5091dd67ed..5cf9781d68b4 100644 --- a/arch/sparc/lib/Makefile +++ b/arch/sparc/lib/Makefile @@ -4,6 +4,7 @@ asflags-y := -ansi -DST_DIV0=0x02 +obj-y += crypto/ lib-$(CONFIG_SPARC32) += ashrdi3.o lib-$(CONFIG_SPARC32) += memcpy.o memset.o lib-y += strlen.o @@ -53,3 +54,5 @@ lib-$(CONFIG_SPARC64) += mcount.o ipcsum.o xor.o hweight.o ffs.o obj-$(CONFIG_SPARC64) += iomap.o obj-$(CONFIG_SPARC32) += atomic32.o obj-$(CONFIG_SPARC64) += PeeCeeI.o +obj-$(CONFIG_CRC32_ARCH) += crc32-sparc.o +crc32-sparc-y := crc32.o crc32c_asm.o diff --git a/arch/sparc/lib/crc32.c b/arch/sparc/lib/crc32.c new file mode 100644 index 000000000000..40d4720a42a1 --- /dev/null +++ b/arch/sparc/lib/crc32.c @@ -0,0 +1,93 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* CRC32c (Castagnoli), sparc64 crc32c opcode accelerated + * + * This is based largely upon arch/x86/crypto/crc32c-intel.c + * + * Copyright (C) 2008 Intel Corporation + * Authors: Austin Zhang <austin_zhang@linux.intel.com> + * Kent Liu <kent.liu@intel.com> + */ + +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + +#include <linux/init.h> +#include <linux/module.h> +#include <linux/kernel.h> +#include <linux/crc32.h> +#include <asm/pstate.h> +#include <asm/elf.h> + +static __ro_after_init DEFINE_STATIC_KEY_FALSE(have_crc32c_opcode); + +u32 crc32_le_arch(u32 crc, const u8 *data, size_t len) +{ + return crc32_le_base(crc, data, len); +} +EXPORT_SYMBOL(crc32_le_arch); + +void crc32c_sparc64(u32 *crcp, const u64 *data, size_t len); + +u32 crc32c_arch(u32 crc, const u8 *data, size_t len) +{ + size_t n = -(uintptr_t)data & 7; + + if (!static_branch_likely(&have_crc32c_opcode)) + return crc32c_base(crc, data, len); + + if (n) { + /* Data isn't 8-byte aligned. Align it. */ + n = min(n, len); + crc = crc32c_base(crc, data, n); + data += n; + len -= n; + } + n = len & ~7U; + if (n) { + crc32c_sparc64(&crc, (const u64 *)data, n); + data += n; + len -= n; + } + if (len) + crc = crc32c_base(crc, data, len); + return crc; +} +EXPORT_SYMBOL(crc32c_arch); + +u32 crc32_be_arch(u32 crc, const u8 *data, size_t len) +{ + return crc32_be_base(crc, data, len); +} +EXPORT_SYMBOL(crc32_be_arch); + +static int __init crc32_sparc_init(void) +{ + unsigned long cfr; + + if (!(sparc64_elf_hwcap & HWCAP_SPARC_CRYPTO)) + return 0; + + __asm__ __volatile__("rd %%asr26, %0" : "=r" (cfr)); + if (!(cfr & CFR_CRC32C)) + return 0; + + static_branch_enable(&have_crc32c_opcode); + pr_info("Using sparc64 crc32c opcode optimized CRC32C implementation\n"); + return 0; +} +subsys_initcall(crc32_sparc_init); + +static void __exit crc32_sparc_exit(void) +{ +} +module_exit(crc32_sparc_exit); + +u32 crc32_optimizations(void) +{ + if (static_key_enabled(&have_crc32c_opcode)) + return CRC32C_OPTIMIZATION; + return 0; +} +EXPORT_SYMBOL(crc32_optimizations); + +MODULE_LICENSE("GPL"); +MODULE_DESCRIPTION("CRC32c (Castagnoli), sparc64 crc32c opcode accelerated"); diff --git a/arch/sparc/crypto/crc32c_asm.S b/arch/sparc/lib/crc32c_asm.S index b8659a479242..4db873850f44 100644 --- a/arch/sparc/crypto/crc32c_asm.S +++ b/arch/sparc/lib/crc32c_asm.S @@ -1,10 +1,9 @@ /* SPDX-License-Identifier: GPL-2.0 */ #include <linux/linkage.h> +#include <asm/opcodes.h> #include <asm/visasm.h> #include <asm/asi.h> -#include "opcodes.h" - ENTRY(crc32c_sparc64) /* %o0=crc32p, %o1=data_ptr, %o2=len */ VISEntryHalf diff --git a/arch/sparc/lib/crypto/Kconfig b/arch/sparc/lib/crypto/Kconfig new file mode 100644 index 000000000000..e5c3e4d3dba6 --- /dev/null +++ b/arch/sparc/lib/crypto/Kconfig @@ -0,0 +1,8 @@ +# SPDX-License-Identifier: GPL-2.0-only + +config CRYPTO_SHA256_SPARC64 + tristate + depends on SPARC64 + default CRYPTO_LIB_SHA256 + select CRYPTO_ARCH_HAVE_LIB_SHA256 + select CRYPTO_LIB_SHA256_GENERIC diff --git a/arch/sparc/lib/crypto/Makefile b/arch/sparc/lib/crypto/Makefile new file mode 100644 index 000000000000..75ee244ad6f7 --- /dev/null +++ b/arch/sparc/lib/crypto/Makefile @@ -0,0 +1,4 @@ +# SPDX-License-Identifier: GPL-2.0-only + +obj-$(CONFIG_CRYPTO_SHA256_SPARC64) += sha256-sparc64.o +sha256-sparc64-y := sha256.o sha256_asm.o diff --git a/arch/sparc/lib/crypto/sha256.c b/arch/sparc/lib/crypto/sha256.c new file mode 100644 index 000000000000..8bdec2db08b3 --- /dev/null +++ b/arch/sparc/lib/crypto/sha256.c @@ -0,0 +1,64 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * SHA-256 accelerated using the sparc64 sha256 opcodes + * + * Copyright (c) Jean-Luc Cooke <jlcooke@certainkey.com> + * Copyright (c) Andrew McDonald <andrew@mcdonald.org.uk> + * Copyright (c) 2002 James Morris <jmorris@intercode.com.au> + * SHA224 Support Copyright 2007 Intel Corporation <jonathan.lynch@intel.com> + */ + +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + +#include <asm/elf.h> +#include <asm/opcodes.h> +#include <asm/pstate.h> +#include <crypto/internal/sha2.h> +#include <linux/kernel.h> +#include <linux/module.h> + +static __ro_after_init DEFINE_STATIC_KEY_FALSE(have_sha256_opcodes); + +asmlinkage void sha256_sparc64_transform(u32 state[SHA256_STATE_WORDS], + const u8 *data, size_t nblocks); + +void sha256_blocks_arch(u32 state[SHA256_STATE_WORDS], + const u8 *data, size_t nblocks) +{ + if (static_branch_likely(&have_sha256_opcodes)) + sha256_sparc64_transform(state, data, nblocks); + else + sha256_blocks_generic(state, data, nblocks); +} +EXPORT_SYMBOL_GPL(sha256_blocks_arch); + +bool sha256_is_arch_optimized(void) +{ + return static_key_enabled(&have_sha256_opcodes); +} +EXPORT_SYMBOL_GPL(sha256_is_arch_optimized); + +static int __init sha256_sparc64_mod_init(void) +{ + unsigned long cfr; + + if (!(sparc64_elf_hwcap & HWCAP_SPARC_CRYPTO)) + return 0; + + __asm__ __volatile__("rd %%asr26, %0" : "=r" (cfr)); + if (!(cfr & CFR_SHA256)) + return 0; + + static_branch_enable(&have_sha256_opcodes); + pr_info("Using sparc64 sha256 opcode optimized SHA-256/SHA-224 implementation\n"); + return 0; +} +subsys_initcall(sha256_sparc64_mod_init); + +static void __exit sha256_sparc64_mod_exit(void) +{ +} +module_exit(sha256_sparc64_mod_exit); + +MODULE_LICENSE("GPL"); +MODULE_DESCRIPTION("SHA-256 accelerated using the sparc64 sha256 opcodes"); diff --git a/arch/sparc/crypto/sha256_asm.S b/arch/sparc/lib/crypto/sha256_asm.S index 0b39ec7d7ca2..ddcdd3daf31e 100644 --- a/arch/sparc/crypto/sha256_asm.S +++ b/arch/sparc/lib/crypto/sha256_asm.S @@ -1,11 +1,10 @@ /* SPDX-License-Identifier: GPL-2.0 */ #include <linux/linkage.h> +#include <asm/opcodes.h> #include <asm/visasm.h> -#include "opcodes.h" - ENTRY(sha256_sparc64_transform) - /* %o0 = digest, %o1 = data, %o2 = rounds */ + /* %o0 = state, %o1 = data, %o2 = nblocks */ VISEntryHalf ld [%o0 + 0x00], %f0 ld [%o0 + 0x04], %f1 diff --git a/arch/sparc/mm/hugetlbpage.c b/arch/sparc/mm/hugetlbpage.c index cc91ca7a1e18..80504148d8a5 100644 --- a/arch/sparc/mm/hugetlbpage.c +++ b/arch/sparc/mm/hugetlbpage.c @@ -19,114 +19,6 @@ #include <asm/cacheflush.h> #include <asm/mmu_context.h> -/* Slightly simplified from the non-hugepage variant because by - * definition we don't have to worry about any page coloring stuff - */ - -static unsigned long hugetlb_get_unmapped_area_bottomup(struct file *filp, - unsigned long addr, - unsigned long len, - unsigned long pgoff, - unsigned long flags) -{ - struct hstate *h = hstate_file(filp); - unsigned long task_size = TASK_SIZE; - struct vm_unmapped_area_info info = {}; - - if (test_thread_flag(TIF_32BIT)) - task_size = STACK_TOP32; - - info.length = len; - info.low_limit = TASK_UNMAPPED_BASE; - info.high_limit = min(task_size, VA_EXCLUDE_START); - info.align_mask = PAGE_MASK & ~huge_page_mask(h); - addr = vm_unmapped_area(&info); - - if ((addr & ~PAGE_MASK) && task_size > VA_EXCLUDE_END) { - VM_BUG_ON(addr != -ENOMEM); - info.low_limit = VA_EXCLUDE_END; - info.high_limit = task_size; - addr = vm_unmapped_area(&info); - } - - return addr; -} - -static unsigned long -hugetlb_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0, - const unsigned long len, - const unsigned long pgoff, - const unsigned long flags) -{ - struct hstate *h = hstate_file(filp); - struct mm_struct *mm = current->mm; - unsigned long addr = addr0; - struct vm_unmapped_area_info info = {}; - - /* This should only ever run for 32-bit processes. */ - BUG_ON(!test_thread_flag(TIF_32BIT)); - - info.flags = VM_UNMAPPED_AREA_TOPDOWN; - info.length = len; - info.low_limit = PAGE_SIZE; - info.high_limit = mm->mmap_base; - info.align_mask = PAGE_MASK & ~huge_page_mask(h); - addr = vm_unmapped_area(&info); - - /* - * A failed mmap() very likely causes application failure, - * so fall back to the bottom-up function here. This scenario - * can happen with large stack limits and large mmap() - * allocations. - */ - if (addr & ~PAGE_MASK) { - VM_BUG_ON(addr != -ENOMEM); - info.flags = 0; - info.low_limit = TASK_UNMAPPED_BASE; - info.high_limit = STACK_TOP32; - addr = vm_unmapped_area(&info); - } - - return addr; -} - -unsigned long -hugetlb_get_unmapped_area(struct file *file, unsigned long addr, - unsigned long len, unsigned long pgoff, unsigned long flags) -{ - struct hstate *h = hstate_file(file); - struct mm_struct *mm = current->mm; - struct vm_area_struct *vma; - unsigned long task_size = TASK_SIZE; - - if (test_thread_flag(TIF_32BIT)) - task_size = STACK_TOP32; - - if (len & ~huge_page_mask(h)) - return -EINVAL; - if (len > task_size) - return -ENOMEM; - - if (flags & MAP_FIXED) { - if (prepare_hugepage_range(file, addr, len)) - return -EINVAL; - return addr; - } - - if (addr) { - addr = ALIGN(addr, huge_page_size(h)); - vma = find_vma(mm, addr); - if (task_size - len >= addr && - (!vma || addr + len <= vm_start_gap(vma))) - return addr; - } - if (!test_bit(MMF_TOPDOWN, &mm->flags)) - return hugetlb_get_unmapped_area_bottomup(file, addr, len, - pgoff, flags); - else - return hugetlb_get_unmapped_area_topdown(file, addr, len, - pgoff, flags); -} static pte_t sun4u_hugepage_shift_to_tte(pte_t entry, unsigned int shift) { @@ -368,7 +260,7 @@ void set_huge_pte_at(struct mm_struct *mm, unsigned long addr, } pte_t huge_ptep_get_and_clear(struct mm_struct *mm, unsigned long addr, - pte_t *ptep) + pte_t *ptep, unsigned long sz) { unsigned int i, nptes, orig_shift, shift; unsigned long size; diff --git a/arch/sparc/mm/init_32.c b/arch/sparc/mm/init_32.c index d96a14ffceeb..fdc93dd12c3e 100644 --- a/arch/sparc/mm/init_32.c +++ b/arch/sparc/mm/init_32.c @@ -232,19 +232,7 @@ static void __init taint_real_pages(void) } } -static void map_high_region(unsigned long start_pfn, unsigned long end_pfn) -{ - unsigned long tmp; - -#ifdef CONFIG_DEBUG_HIGHMEM - printk("mapping high region %08lx - %08lx\n", start_pfn, end_pfn); -#endif - - for (tmp = start_pfn; tmp < end_pfn; tmp++) - free_highmem_page(pfn_to_page(tmp)); -} - -void __init mem_init(void) +void __init arch_mm_preinit(void) { int i; @@ -274,23 +262,6 @@ void __init mem_init(void) memset(sparc_valid_addr_bitmap, 0, i << 2); taint_real_pages(); - - max_mapnr = last_valid_pfn - pfn_base; - high_memory = __va(max_low_pfn << PAGE_SHIFT); - memblock_free_all(); - - for (i = 0; sp_banks[i].num_bytes != 0; i++) { - unsigned long start_pfn = sp_banks[i].base_addr >> PAGE_SHIFT; - unsigned long end_pfn = (sp_banks[i].base_addr + sp_banks[i].num_bytes) >> PAGE_SHIFT; - - if (end_pfn <= highstart_pfn) - continue; - - if (start_pfn < highstart_pfn) - start_pfn = highstart_pfn; - - map_high_region(start_pfn, end_pfn); - } } void sparc_flush_page_to_ram(struct page *page) diff --git a/arch/sparc/mm/init_64.c b/arch/sparc/mm/init_64.c index 21f8cbbd0581..25ae4c897aae 100644 --- a/arch/sparc/mm/init_64.c +++ b/arch/sparc/mm/init_64.c @@ -2505,10 +2505,6 @@ static void __init register_page_bootmem_info(void) } void __init mem_init(void) { - high_memory = __va(last_valid_pfn << PAGE_SHIFT); - - memblock_free_all(); - /* * Must be done after boot memory is put on freelist, because here we * might set fields in deferred struct pages that have not yet been @@ -2882,43 +2878,42 @@ void __flush_tlb_all(void) : : "r" (pstate)); } -pte_t *pte_alloc_one_kernel(struct mm_struct *mm) -{ - struct page *page = alloc_page(GFP_KERNEL | __GFP_ZERO); - pte_t *pte = NULL; - - if (page) - pte = (pte_t *) page_address(page); - - return pte; -} - -pgtable_t pte_alloc_one(struct mm_struct *mm) +static pte_t *__pte_alloc_one(struct mm_struct *mm) { struct ptdesc *ptdesc = pagetable_alloc(GFP_KERNEL | __GFP_ZERO, 0); if (!ptdesc) return NULL; - if (!pagetable_pte_ctor(ptdesc)) { + if (!pagetable_pte_ctor(mm, ptdesc)) { pagetable_free(ptdesc); return NULL; } return ptdesc_address(ptdesc); } -void pte_free_kernel(struct mm_struct *mm, pte_t *pte) +pte_t *pte_alloc_one_kernel(struct mm_struct *mm) +{ + return __pte_alloc_one(mm); +} + +pgtable_t pte_alloc_one(struct mm_struct *mm) { - free_page((unsigned long)pte); + return __pte_alloc_one(mm); } static void __pte_free(pgtable_t pte) { struct ptdesc *ptdesc = virt_to_ptdesc(pte); - pagetable_pte_dtor(ptdesc); + pagetable_dtor(ptdesc); pagetable_free(ptdesc); } +void pte_free_kernel(struct mm_struct *mm, pte_t *pte) +{ + __pte_free(pte); +} + void pte_free(struct mm_struct *mm, pgtable_t pte) { __pte_free(pte); diff --git a/arch/sparc/mm/srmmu.c b/arch/sparc/mm/srmmu.c index 9df51a62333d..f8fb4911d360 100644 --- a/arch/sparc/mm/srmmu.c +++ b/arch/sparc/mm/srmmu.c @@ -277,19 +277,13 @@ static void __init srmmu_nocache_init(void) bitmap_bits = srmmu_nocache_size >> SRMMU_NOCACHE_BITMAP_SHIFT; - srmmu_nocache_pool = memblock_alloc(srmmu_nocache_size, + srmmu_nocache_pool = memblock_alloc_or_panic(srmmu_nocache_size, SRMMU_NOCACHE_ALIGN_MAX); - if (!srmmu_nocache_pool) - panic("%s: Failed to allocate %lu bytes align=0x%x\n", - __func__, srmmu_nocache_size, SRMMU_NOCACHE_ALIGN_MAX); memset(srmmu_nocache_pool, 0, srmmu_nocache_size); srmmu_nocache_bitmap = - memblock_alloc(BITS_TO_LONGS(bitmap_bits) * sizeof(long), + memblock_alloc_or_panic(BITS_TO_LONGS(bitmap_bits) * sizeof(long), SMP_CACHE_BYTES); - if (!srmmu_nocache_bitmap) - panic("%s: Failed to allocate %zu bytes\n", __func__, - BITS_TO_LONGS(bitmap_bits) * sizeof(long)); bit_map_init(&srmmu_nocache_map, srmmu_nocache_bitmap, bitmap_bits); srmmu_swapper_pg_dir = __srmmu_get_nocache(SRMMU_PGD_TABLE_SIZE, SRMMU_PGD_TABLE_SIZE); @@ -356,7 +350,7 @@ pgtable_t pte_alloc_one(struct mm_struct *mm) page = pfn_to_page(__nocache_pa((unsigned long)ptep) >> PAGE_SHIFT); spin_lock(&mm->page_table_lock); if (page_ref_inc_return(page) == 2 && - !pagetable_pte_ctor(page_ptdesc(page))) { + !pagetable_pte_ctor(mm, page_ptdesc(page))) { page_ref_dec(page); ptep = NULL; } @@ -372,7 +366,7 @@ void pte_free(struct mm_struct *mm, pgtable_t ptep) page = pfn_to_page(__nocache_pa((unsigned long)ptep) >> PAGE_SHIFT); spin_lock(&mm->page_table_lock); if (page_ref_dec_return(page) == 1) - pagetable_pte_dtor(page_ptdesc(page)); + pagetable_dtor(page_ptdesc(page)); spin_unlock(&mm->page_table_lock); srmmu_free_nocache(ptep, SRMMU_PTE_TABLE_SIZE); @@ -452,9 +446,7 @@ static void __init sparc_context_init(int numctx) unsigned long size; size = numctx * sizeof(struct ctx_list); - ctx_list_pool = memblock_alloc(size, SMP_CACHE_BYTES); - if (!ctx_list_pool) - panic("%s: Failed to allocate %lu bytes\n", __func__, size); + ctx_list_pool = memblock_alloc_or_panic(size, SMP_CACHE_BYTES); for (ctx = 0; ctx < numctx; ctx++) { struct ctx_list *clist; diff --git a/arch/sparc/mm/tlb.c b/arch/sparc/mm/tlb.c index 8648a50afe88..a35ddcca5e76 100644 --- a/arch/sparc/mm/tlb.c +++ b/arch/sparc/mm/tlb.c @@ -52,8 +52,10 @@ out: void arch_enter_lazy_mmu_mode(void) { - struct tlb_batch *tb = this_cpu_ptr(&tlb_batch); + struct tlb_batch *tb; + preempt_disable(); + tb = this_cpu_ptr(&tlb_batch); tb->active = 1; } @@ -64,6 +66,7 @@ void arch_leave_lazy_mmu_mode(void) if (tb->tlb_nr) flush_tlb_pending(); tb->active = 0; + preempt_enable(); } static void tlb_batch_add_one(struct mm_struct *mm, unsigned long vaddr, diff --git a/arch/sparc/vdso/Makefile b/arch/sparc/vdso/Makefile index 243dbfc4609d..fdc4a8f5a49c 100644 --- a/arch/sparc/vdso/Makefile +++ b/arch/sparc/vdso/Makefile @@ -22,7 +22,7 @@ targets += $(foreach x, 32 64, vdso-image-$(x).c vdso$(x).so vdso$(x).so.dbg) CPPFLAGS_vdso.lds += -P -C -VDSO_LDFLAGS_vdso.lds = -m elf64_sparc -soname linux-vdso.so.1 --no-undefined \ +VDSO_LDFLAGS_vdso.lds = -m elf64_sparc -soname linux-vdso.so.1 \ -z max-page-size=8192 $(obj)/vdso64.so.dbg: $(obj)/vdso.lds $(vobjs) FORCE @@ -46,7 +46,7 @@ CFL := $(PROFILING) -mcmodel=medlow -fPIC -O2 -fasynchronous-unwind-tables -m64 -fno-omit-frame-pointer -foptimize-sibling-calls \ -DDISABLE_BRANCH_PROFILING -DBUILD_VDSO -SPARC_REG_CFLAGS = -ffixed-g4 -ffixed-g5 -fcall-used-g5 -fcall-used-g7 +SPARC_REG_CFLAGS = -ffixed-g4 -ffixed-g5 $(call cc-option,-fcall-used-g5) $(call cc-option,-fcall-used-g7) $(vobjs): KBUILD_CFLAGS := $(filter-out $(RANDSTRUCT_CFLAGS) $(GCC_PLUGINS_CFLAGS) $(SPARC_REG_CFLAGS),$(KBUILD_CFLAGS)) $(CFL) @@ -101,7 +101,6 @@ $(obj)/vdso32.so.dbg: FORCE \ quiet_cmd_vdso = VDSO $@ cmd_vdso = $(LD) -nostdlib -o $@ \ $(VDSO_LDFLAGS) $(VDSO_LDFLAGS_$(filter %.lds,$(^F))) \ - -T $(filter %.lds,$^) $(filter %.o,$^) && \ - sh $(src)/checkundef.sh '$(OBJDUMP)' '$@' + -T $(filter %.lds,$^) $(filter %.o,$^) -VDSO_LDFLAGS = -shared --hash-style=both --build-id=sha1 -Bsymbolic +VDSO_LDFLAGS = -shared --hash-style=both --build-id=sha1 -Bsymbolic --no-undefined diff --git a/arch/sparc/vdso/checkundef.sh b/arch/sparc/vdso/checkundef.sh deleted file mode 100644 index 2d85876ffc32..000000000000 --- a/arch/sparc/vdso/checkundef.sh +++ /dev/null @@ -1,10 +0,0 @@ -#!/bin/sh -objdump="$1" -file="$2" -$objdump -t "$file" | grep '*UUND*' | grep -v '#scratch' > /dev/null 2>&1 -if [ $? -eq 1 ]; then - exit 0 -else - echo "$file: undefined symbols found" >&2 - exit 1 -fi diff --git a/arch/sparc/vdso/vclock_gettime.c b/arch/sparc/vdso/vclock_gettime.c index e794edde6755..79607804ea1b 100644 --- a/arch/sparc/vdso/vclock_gettime.c +++ b/arch/sparc/vdso/vclock_gettime.c @@ -86,6 +86,11 @@ notrace static long vdso_fallback_gettimeofday(struct __kernel_old_timeval *tv, } #ifdef CONFIG_SPARC64 +notrace static __always_inline u64 __shr64(u64 val, int amt) +{ + return val >> amt; +} + notrace static __always_inline u64 vread_tick(void) { u64 ret; @@ -102,6 +107,21 @@ notrace static __always_inline u64 vread_tick_stick(void) return ret; } #else +notrace static __always_inline u64 __shr64(u64 val, int amt) +{ + u64 ret; + + __asm__ __volatile__("sllx %H1, 32, %%g1\n\t" + "srl %L1, 0, %L1\n\t" + "or %%g1, %L1, %%g1\n\t" + "srlx %%g1, %2, %L0\n\t" + "srlx %L0, 32, %H0" + : "=r" (ret) + : "r" (val), "r" (amt) + : "g1"); + return ret; +} + notrace static __always_inline u64 vread_tick(void) { register unsigned long long ret asm("o4"); @@ -154,7 +174,7 @@ notrace static __always_inline int do_realtime(struct vvar_data *vvar, ts->tv_sec = vvar->wall_time_sec; ns = vvar->wall_time_snsec; ns += vgetsns(vvar); - ns >>= vvar->clock.shift; + ns = __shr64(ns, vvar->clock.shift); } while (unlikely(vvar_read_retry(vvar, seq))); ts->tv_sec += __iter_div_u64_rem(ns, NSEC_PER_SEC, &ns); @@ -174,7 +194,7 @@ notrace static __always_inline int do_realtime_stick(struct vvar_data *vvar, ts->tv_sec = vvar->wall_time_sec; ns = vvar->wall_time_snsec; ns += vgetsns_stick(vvar); - ns >>= vvar->clock.shift; + ns = __shr64(ns, vvar->clock.shift); } while (unlikely(vvar_read_retry(vvar, seq))); ts->tv_sec += __iter_div_u64_rem(ns, NSEC_PER_SEC, &ns); @@ -194,7 +214,7 @@ notrace static __always_inline int do_monotonic(struct vvar_data *vvar, ts->tv_sec = vvar->monotonic_time_sec; ns = vvar->monotonic_time_snsec; ns += vgetsns(vvar); - ns >>= vvar->clock.shift; + ns = __shr64(ns, vvar->clock.shift); } while (unlikely(vvar_read_retry(vvar, seq))); ts->tv_sec += __iter_div_u64_rem(ns, NSEC_PER_SEC, &ns); @@ -214,7 +234,7 @@ notrace static __always_inline int do_monotonic_stick(struct vvar_data *vvar, ts->tv_sec = vvar->monotonic_time_sec; ns = vvar->monotonic_time_snsec; ns += vgetsns_stick(vvar); - ns >>= vvar->clock.shift; + ns = __shr64(ns, vvar->clock.shift); } while (unlikely(vvar_read_retry(vvar, seq))); ts->tv_sec += __iter_div_u64_rem(ns, NSEC_PER_SEC, &ns); |