diff options
author | Joe Perches <joe@perches.com> | 2020-10-22 05:36:07 +0300 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2020-10-26 00:51:49 +0300 |
commit | 33def8498fdde180023444b08e12b72a9efed41d (patch) | |
tree | 1efe1dda24a8c8865fbc7a538a749d30a3532d92 /arch/arm64 | |
parent | 986b9eacb25910865b50e5f298aa8e2df7642f1b (diff) | |
download | linux-33def8498fdde180023444b08e12b72a9efed41d.tar.xz |
treewide: Convert macro and uses of __section(foo) to __section("foo")
Use a more generic form for __section that requires quotes to avoid
complications with clang and gcc differences.
Remove the quote operator # from compiler_attributes.h __section macro.
Convert all unquoted __section(foo) uses to quoted __section("foo").
Also convert __attribute__((section("foo"))) uses to __section("foo")
even if the __attribute__ has multiple list entry forms.
Conversion done using the script at:
https://lore.kernel.org/lkml/75393e5ddc272dc7403de74d645e6c6e0f4e70eb.camel@perches.com/2-convert_section.pl
Signed-off-by: Joe Perches <joe@perches.com>
Reviewed-by: Nick Desaulniers <ndesaulniers@gooogle.com>
Reviewed-by: Miguel Ojeda <ojeda@kernel.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'arch/arm64')
-rw-r--r-- | arch/arm64/include/asm/cache.h | 2 | ||||
-rw-r--r-- | arch/arm64/kernel/efi.c | 2 | ||||
-rw-r--r-- | arch/arm64/kernel/smp_spin_table.c | 2 | ||||
-rw-r--r-- | arch/arm64/mm/mmu.c | 2 |
4 files changed, 4 insertions, 4 deletions
diff --git a/arch/arm64/include/asm/cache.h b/arch/arm64/include/asm/cache.h index a4d1b5f771f6..0ac3e06a2118 100644 --- a/arch/arm64/include/asm/cache.h +++ b/arch/arm64/include/asm/cache.h @@ -79,7 +79,7 @@ static inline u32 cache_type_cwg(void) return (read_cpuid_cachetype() >> CTR_CWG_SHIFT) & CTR_CWG_MASK; } -#define __read_mostly __section(.data..read_mostly) +#define __read_mostly __section(".data..read_mostly") static inline int cache_line_size_of_cpu(void) { diff --git a/arch/arm64/kernel/efi.c b/arch/arm64/kernel/efi.c index d0cf596db82c..fa02efb28e88 100644 --- a/arch/arm64/kernel/efi.c +++ b/arch/arm64/kernel/efi.c @@ -54,7 +54,7 @@ static __init pteval_t create_mapping_protection(efi_memory_desc_t *md) } /* we will fill this structure from the stub, so don't put it in .bss */ -struct screen_info screen_info __section(.data); +struct screen_info screen_info __section(".data"); int __init efi_create_mapping(struct mm_struct *mm, efi_memory_desc_t *md) { diff --git a/arch/arm64/kernel/smp_spin_table.c b/arch/arm64/kernel/smp_spin_table.c index 5892e79fa429..056772c26098 100644 --- a/arch/arm64/kernel/smp_spin_table.c +++ b/arch/arm64/kernel/smp_spin_table.c @@ -19,7 +19,7 @@ #include <asm/smp_plat.h> extern void secondary_holding_pen(void); -volatile unsigned long __section(.mmuoff.data.read) +volatile unsigned long __section(".mmuoff.data.read") secondary_holding_pen_release = INVALID_HWID; static phys_addr_t cpu_release_addr[NR_CPUS]; diff --git a/arch/arm64/mm/mmu.c b/arch/arm64/mm/mmu.c index beff3ad8c7f8..1c0f3e02f731 100644 --- a/arch/arm64/mm/mmu.c +++ b/arch/arm64/mm/mmu.c @@ -43,7 +43,7 @@ u64 idmap_t0sz = TCR_T0SZ(VA_BITS); u64 idmap_ptrs_per_pgd = PTRS_PER_PGD; -u64 __section(.mmuoff.data.write) vabits_actual; +u64 __section(".mmuoff.data.write") vabits_actual; EXPORT_SYMBOL(vabits_actual); u64 kimage_voffset __ro_after_init; |