diff options
author | Mauro Carvalho Chehab <mchehab@s-opensource.com> | 2016-07-23 13:59:19 +0300 |
---|---|---|
committer | Mauro Carvalho Chehab <mchehab@s-opensource.com> | 2016-07-23 13:59:19 +0300 |
commit | c278256d05a2fc75b427fa6a5dc0024faa93465d (patch) | |
tree | 2c09c6c65a4b6c597a568ec2425adb75eff7d5d5 /arch/arc | |
parent | 7e5b7d1b3a8facd4dc1ddb5d9ec53c0687d13de7 (diff) | |
parent | 009a620848218d521f008141c62f56bf19294dd9 (diff) | |
download | linux-c278256d05a2fc75b427fa6a5dc0024faa93465d.tar.xz |
Merge branch 'patchwork' into topic/docs-next
* patchwork: (1492 commits)
[media] cec: always check all_device_types and features
[media] cec: poll should check if there is room in the tx queue
[media] vivid: support monitor all mode
[media] cec: fix test for unconfigured adapter in main message loop
[media] cec: limit the size of the transmit queue
[media] cec: zero unused msg part after msg->len
[media] cec: don't set fh to NULL in CEC_TRANSMIT
[media] cec: clear all status fields before transmit and always fill in sequence
[media] cec: CEC_RECEIVE overwrote the timeout field
[media] cxd2841er: Reading SNR for DVB-C added
[media] cxd2841er: Reading BER and UCB for DVB-C added
[media] cxd2841er: fix switch-case for DVB-C
[media] cxd2841er: fix signal strength scale for ISDB-T
[media] cxd2841er: adjust the dB scale for DVB-C
[media] cxd2841er: provide signal strength for DVB-C
[media] cxd2841er: fix BER report via DVBv5 stats API
[media] mb86a20s: apply mask to val after checking for read failure
[media] airspy: fix error logic during device register
[media] s5p-cec/TODO: add TODO item
[media] cec/TODO: drop comment about sphinx documentation
...
Signed-off-by: Mauro Carvalho Chehab <mchehab@s-opensource.com>
Diffstat (limited to 'arch/arc')
37 files changed, 33 insertions, 420 deletions
diff --git a/arch/arc/Kconfig b/arch/arc/Kconfig index 0dcbacfdea4b..0d3e59f56974 100644 --- a/arch/arc/Kconfig +++ b/arch/arc/Kconfig @@ -61,7 +61,7 @@ config RWSEM_GENERIC_SPINLOCK def_bool y config ARCH_DISCONTIGMEM_ENABLE - def_bool y + def_bool n config ARCH_FLATMEM_ENABLE def_bool y @@ -186,9 +186,6 @@ if SMP config ARC_HAS_COH_CACHES def_bool n -config ARC_HAS_REENTRANT_IRQ_LV2 - def_bool n - config ARC_MCIP bool "ARConnect Multicore IP (MCIP) Support " depends on ISA_ARCV2 @@ -366,25 +363,10 @@ config NODES_SHIFT if ISA_ARCOMPACT config ARC_COMPACT_IRQ_LEVELS - bool "ARCompact IRQ Priorities: High(2)/Low(1)" + bool "Setup Timer IRQ as high Priority" default n - # Timer HAS to be high priority, for any other high priority config - select ARC_IRQ3_LV2 # if SMP, LV2 enabled ONLY if ARC implementation has LV2 re-entrancy - depends on !SMP || ARC_HAS_REENTRANT_IRQ_LV2 - -if ARC_COMPACT_IRQ_LEVELS - -config ARC_IRQ3_LV2 - bool - -config ARC_IRQ5_LV2 - bool - -config ARC_IRQ6_LV2 - bool - -endif #ARC_COMPACT_IRQ_LEVELS + depends on !SMP config ARC_FPU_SAVE_RESTORE bool "Enable FPU state persistence across context switch" @@ -407,11 +389,6 @@ config ARC_HAS_LLSC default y depends on !ARC_CANT_LLSC -config ARC_STAR_9000923308 - bool "Workaround for llock/scond livelock" - default n - depends on ISA_ARCV2 && SMP && ARC_HAS_LLSC - config ARC_HAS_SWAPE bool "Insn: SWAPE (endian-swap)" default y @@ -471,7 +448,7 @@ config LINUX_LINK_BASE config HIGHMEM bool "High Memory Support" - select DISCONTIGMEM + select ARCH_DISCONTIGMEM_ENABLE help With ARC 2G:2G address split, only upper 2G is directly addressable by kernel. Enable this to potentially allow access to rest of 2G and PAE diff --git a/arch/arc/Makefile b/arch/arc/Makefile index 02fabef2891c..85814e74677d 100644 --- a/arch/arc/Makefile +++ b/arch/arc/Makefile @@ -66,8 +66,6 @@ endif endif -cflags-$(CONFIG_ARC_DW2_UNWIND) += -fasynchronous-unwind-tables - # By default gcc 4.8 generates dwarf4 which kernel unwinder can't grok ifeq ($(atleast_gcc48),y) cflags-$(CONFIG_ARC_DW2_UNWIND) += -gdwarf-2 @@ -127,7 +125,7 @@ libs-y += arch/arc/lib/ $(LIBGCC) boot := arch/arc/boot -#default target for make without any arguements. +#default target for make without any arguments. KBUILD_IMAGE := bootpImage all: $(KBUILD_IMAGE) diff --git a/arch/arc/boot/dts/abilis_tb100.dtsi b/arch/arc/boot/dts/abilis_tb100.dtsi index 3942634f805a..02410b211433 100644 --- a/arch/arc/boot/dts/abilis_tb100.dtsi +++ b/arch/arc/boot/dts/abilis_tb100.dtsi @@ -23,8 +23,6 @@ / { - clock-frequency = <500000000>; /* 500 MHZ */ - soc100 { bus-frequency = <166666666>; diff --git a/arch/arc/boot/dts/abilis_tb101.dtsi b/arch/arc/boot/dts/abilis_tb101.dtsi index b0467229a5c4..f9e7686044eb 100644 --- a/arch/arc/boot/dts/abilis_tb101.dtsi +++ b/arch/arc/boot/dts/abilis_tb101.dtsi @@ -23,8 +23,6 @@ / { - clock-frequency = <500000000>; /* 500 MHZ */ - soc100 { bus-frequency = <166666666>; diff --git a/arch/arc/boot/dts/axc001.dtsi b/arch/arc/boot/dts/axc001.dtsi index 3e02f152edcb..6ae2c476ad82 100644 --- a/arch/arc/boot/dts/axc001.dtsi +++ b/arch/arc/boot/dts/axc001.dtsi @@ -15,7 +15,6 @@ / { compatible = "snps,arc"; - clock-frequency = <750000000>; /* 750 MHZ */ #address-cells = <1>; #size-cells = <1>; diff --git a/arch/arc/boot/dts/axc003.dtsi b/arch/arc/boot/dts/axc003.dtsi index 378e455a94c4..14df46f141bf 100644 --- a/arch/arc/boot/dts/axc003.dtsi +++ b/arch/arc/boot/dts/axc003.dtsi @@ -14,7 +14,6 @@ / { compatible = "snps,arc"; - clock-frequency = <90000000>; #address-cells = <1>; #size-cells = <1>; diff --git a/arch/arc/boot/dts/axc003_idu.dtsi b/arch/arc/boot/dts/axc003_idu.dtsi index 64c94b2860ab..3d6cfa32bf51 100644 --- a/arch/arc/boot/dts/axc003_idu.dtsi +++ b/arch/arc/boot/dts/axc003_idu.dtsi @@ -14,7 +14,6 @@ / { compatible = "snps,arc"; - clock-frequency = <90000000>; #address-cells = <1>; #size-cells = <1>; diff --git a/arch/arc/boot/dts/eznps.dts b/arch/arc/boot/dts/eznps.dts index b89f6c3eb352..1e0d225791c1 100644 --- a/arch/arc/boot/dts/eznps.dts +++ b/arch/arc/boot/dts/eznps.dts @@ -18,7 +18,6 @@ / { compatible = "ezchip,arc-nps"; - clock-frequency = <83333333>; /* 83.333333 MHZ */ #address-cells = <1>; #size-cells = <1>; interrupt-parent = <&intc>; diff --git a/arch/arc/boot/dts/nsim_700.dts b/arch/arc/boot/dts/nsim_700.dts index 5d5e373e0ebc..63970513e4ae 100644 --- a/arch/arc/boot/dts/nsim_700.dts +++ b/arch/arc/boot/dts/nsim_700.dts @@ -11,7 +11,6 @@ / { compatible = "snps,nsim"; - clock-frequency = <80000000>; /* 80 MHZ */ #address-cells = <1>; #size-cells = <1>; interrupt-parent = <&core_intc>; diff --git a/arch/arc/boot/dts/nsimosci.dts b/arch/arc/boot/dts/nsimosci.dts index b5b060adce8a..763d66c883da 100644 --- a/arch/arc/boot/dts/nsimosci.dts +++ b/arch/arc/boot/dts/nsimosci.dts @@ -11,7 +11,6 @@ / { compatible = "snps,nsimosci"; - clock-frequency = <20000000>; /* 20 MHZ */ #address-cells = <1>; #size-cells = <1>; interrupt-parent = <&core_intc>; diff --git a/arch/arc/boot/dts/nsimosci_hs.dts b/arch/arc/boot/dts/nsimosci_hs.dts index 325e73090a18..4eb97c584b18 100644 --- a/arch/arc/boot/dts/nsimosci_hs.dts +++ b/arch/arc/boot/dts/nsimosci_hs.dts @@ -11,7 +11,6 @@ / { compatible = "snps,nsimosci_hs"; - clock-frequency = <20000000>; /* 20 MHZ */ #address-cells = <1>; #size-cells = <1>; interrupt-parent = <&core_intc>; diff --git a/arch/arc/boot/dts/nsimosci_hs_idu.dts b/arch/arc/boot/dts/nsimosci_hs_idu.dts index ee03d7126581..853f897eb2a3 100644 --- a/arch/arc/boot/dts/nsimosci_hs_idu.dts +++ b/arch/arc/boot/dts/nsimosci_hs_idu.dts @@ -11,7 +11,6 @@ / { compatible = "snps,nsimosci_hs"; - clock-frequency = <5000000>; /* 5 MHZ */ #address-cells = <1>; #size-cells = <1>; interrupt-parent = <&core_intc>; diff --git a/arch/arc/boot/dts/skeleton.dtsi b/arch/arc/boot/dts/skeleton.dtsi index 3a10cc633e2b..65808fe0a290 100644 --- a/arch/arc/boot/dts/skeleton.dtsi +++ b/arch/arc/boot/dts/skeleton.dtsi @@ -13,7 +13,6 @@ / { compatible = "snps,arc"; - clock-frequency = <80000000>; /* 80 MHZ */ #address-cells = <1>; #size-cells = <1>; chosen { }; diff --git a/arch/arc/boot/dts/skeleton_hs.dtsi b/arch/arc/boot/dts/skeleton_hs.dtsi index 71fd308a9298..2dfe8037dfbb 100644 --- a/arch/arc/boot/dts/skeleton_hs.dtsi +++ b/arch/arc/boot/dts/skeleton_hs.dtsi @@ -8,7 +8,6 @@ / { compatible = "snps,arc"; - clock-frequency = <80000000>; /* 80 MHZ */ #address-cells = <1>; #size-cells = <1>; chosen { }; diff --git a/arch/arc/boot/dts/skeleton_hs_idu.dtsi b/arch/arc/boot/dts/skeleton_hs_idu.dtsi index d1cb25a66989..4c11079f3565 100644 --- a/arch/arc/boot/dts/skeleton_hs_idu.dtsi +++ b/arch/arc/boot/dts/skeleton_hs_idu.dtsi @@ -8,7 +8,6 @@ / { compatible = "snps,arc"; - clock-frequency = <80000000>; /* 80 MHZ */ #address-cells = <1>; #size-cells = <1>; chosen { }; diff --git a/arch/arc/boot/dts/vdk_axc003.dtsi b/arch/arc/boot/dts/vdk_axc003.dtsi index ad4ee43bd2ac..0fd6ba985b16 100644 --- a/arch/arc/boot/dts/vdk_axc003.dtsi +++ b/arch/arc/boot/dts/vdk_axc003.dtsi @@ -14,7 +14,6 @@ / { compatible = "snps,arc"; - clock-frequency = <50000000>; #address-cells = <1>; #size-cells = <1>; diff --git a/arch/arc/boot/dts/vdk_axc003_idu.dtsi b/arch/arc/boot/dts/vdk_axc003_idu.dtsi index a3cb6263c581..82214cd7ba0c 100644 --- a/arch/arc/boot/dts/vdk_axc003_idu.dtsi +++ b/arch/arc/boot/dts/vdk_axc003_idu.dtsi @@ -15,7 +15,6 @@ / { compatible = "snps,arc"; - clock-frequency = <50000000>; #address-cells = <1>; #size-cells = <1>; diff --git a/arch/arc/include/asm/atomic.h b/arch/arc/include/asm/atomic.h index 5f3dcbbc0cc9..dd683995bc9d 100644 --- a/arch/arc/include/asm/atomic.h +++ b/arch/arc/include/asm/atomic.h @@ -25,50 +25,17 @@ #define atomic_set(v, i) WRITE_ONCE(((v)->counter), (i)) -#ifdef CONFIG_ARC_STAR_9000923308 - -#define SCOND_FAIL_RETRY_VAR_DEF \ - unsigned int delay = 1, tmp; \ - -#define SCOND_FAIL_RETRY_ASM \ - " bz 4f \n" \ - " ; --- scond fail delay --- \n" \ - " mov %[tmp], %[delay] \n" /* tmp = delay */ \ - "2: brne.d %[tmp], 0, 2b \n" /* while (tmp != 0) */ \ - " sub %[tmp], %[tmp], 1 \n" /* tmp-- */ \ - " rol %[delay], %[delay] \n" /* delay *= 2 */ \ - " b 1b \n" /* start over */ \ - "4: ; --- success --- \n" \ - -#define SCOND_FAIL_RETRY_VARS \ - ,[delay] "+&r" (delay),[tmp] "=&r" (tmp) \ - -#else /* !CONFIG_ARC_STAR_9000923308 */ - -#define SCOND_FAIL_RETRY_VAR_DEF - -#define SCOND_FAIL_RETRY_ASM \ - " bnz 1b \n" \ - -#define SCOND_FAIL_RETRY_VARS - -#endif - #define ATOMIC_OP(op, c_op, asm_op) \ static inline void atomic_##op(int i, atomic_t *v) \ { \ - unsigned int val; \ - SCOND_FAIL_RETRY_VAR_DEF \ + unsigned int val; \ \ __asm__ __volatile__( \ "1: llock %[val], [%[ctr]] \n" \ " " #asm_op " %[val], %[val], %[i] \n" \ " scond %[val], [%[ctr]] \n" \ - " \n" \ - SCOND_FAIL_RETRY_ASM \ - \ + " bnz 1b \n" \ : [val] "=&r" (val) /* Early clobber to prevent reg reuse */ \ - SCOND_FAIL_RETRY_VARS \ : [ctr] "r" (&v->counter), /* Not "m": llock only supports reg direct addr mode */ \ [i] "ir" (i) \ : "cc"); \ @@ -77,8 +44,7 @@ static inline void atomic_##op(int i, atomic_t *v) \ #define ATOMIC_OP_RETURN(op, c_op, asm_op) \ static inline int atomic_##op##_return(int i, atomic_t *v) \ { \ - unsigned int val; \ - SCOND_FAIL_RETRY_VAR_DEF \ + unsigned int val; \ \ /* \ * Explicit full memory barrier needed before/after as \ @@ -90,11 +56,8 @@ static inline int atomic_##op##_return(int i, atomic_t *v) \ "1: llock %[val], [%[ctr]] \n" \ " " #asm_op " %[val], %[val], %[i] \n" \ " scond %[val], [%[ctr]] \n" \ - " \n" \ - SCOND_FAIL_RETRY_ASM \ - \ + " bnz 1b \n" \ : [val] "=&r" (val) \ - SCOND_FAIL_RETRY_VARS \ : [ctr] "r" (&v->counter), \ [i] "ir" (i) \ : "cc"); \ diff --git a/arch/arc/include/asm/entry-compact.h b/arch/arc/include/asm/entry-compact.h index e0e1faf03c50..14c310f2e0b1 100644 --- a/arch/arc/include/asm/entry-compact.h +++ b/arch/arc/include/asm/entry-compact.h @@ -76,8 +76,8 @@ * We need to be a bit more cautious here. What if a kernel bug in * L1 ISR, caused SP to go whaco (some small value which looks like * USER stk) and then we take L2 ISR. - * Above brlo alone would treat it as a valid L1-L2 sceanrio - * instead of shouting alound + * Above brlo alone would treat it as a valid L1-L2 scenario + * instead of shouting around * The only feasible way is to make sure this L2 happened in * L1 prelogue ONLY i.e. ilink2 is less than a pre-set marker in * L1 ISR before it switches stack diff --git a/arch/arc/include/asm/mmu_context.h b/arch/arc/include/asm/mmu_context.h index 1fd467ef658f..b0b87f2447f5 100644 --- a/arch/arc/include/asm/mmu_context.h +++ b/arch/arc/include/asm/mmu_context.h @@ -83,7 +83,7 @@ static inline void get_new_mmu_context(struct mm_struct *mm) local_flush_tlb_all(); /* - * Above checke for rollover of 8 bit ASID in 32 bit container. + * Above check for rollover of 8 bit ASID in 32 bit container. * If the container itself wrapped around, set it to a non zero * "generation" to distinguish from no context */ diff --git a/arch/arc/include/asm/pgalloc.h b/arch/arc/include/asm/pgalloc.h index 86ed671286df..3749234b7419 100644 --- a/arch/arc/include/asm/pgalloc.h +++ b/arch/arc/include/asm/pgalloc.h @@ -95,7 +95,7 @@ static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm, { pte_t *pte; - pte = (pte_t *) __get_free_pages(GFP_KERNEL | __GFP_REPEAT | __GFP_ZERO, + pte = (pte_t *) __get_free_pages(GFP_KERNEL | __GFP_ZERO, __get_order_pte()); return pte; @@ -107,7 +107,7 @@ pte_alloc_one(struct mm_struct *mm, unsigned long address) pgtable_t pte_pg; struct page *page; - pte_pg = (pgtable_t)__get_free_pages(GFP_KERNEL | __GFP_REPEAT, __get_order_pte()); + pte_pg = (pgtable_t)__get_free_pages(GFP_KERNEL, __get_order_pte()); if (!pte_pg) return 0; memzero((void *)pte_pg, PTRS_PER_PTE * sizeof(pte_t)); diff --git a/arch/arc/include/asm/pgtable.h b/arch/arc/include/asm/pgtable.h index 034bbdc0ff61..858f98ef7f1b 100644 --- a/arch/arc/include/asm/pgtable.h +++ b/arch/arc/include/asm/pgtable.h @@ -47,7 +47,7 @@ * Page Tables are purely for Linux VM's consumption and the bits below are * suited to that (uniqueness). Hence some are not implemented in the TLB and * some have different value in TLB. - * e.g. MMU v2: K_READ bit is 8 and so is GLOBAL (possible becoz they live in + * e.g. MMU v2: K_READ bit is 8 and so is GLOBAL (possible because they live in * seperate PD0 and PD1, which combined forms a translation entry) * while for PTE perspective, they are 8 and 9 respectively * with MMU v3: Most bits (except SHARED) represent the exact hardware pos diff --git a/arch/arc/include/asm/processor.h b/arch/arc/include/asm/processor.h index f9048994b22f..16b630fbeb6a 100644 --- a/arch/arc/include/asm/processor.h +++ b/arch/arc/include/asm/processor.h @@ -78,7 +78,7 @@ struct task_struct; #define KSTK_ESP(tsk) (task_pt_regs(tsk)->sp) /* - * Where abouts of Task's sp, fp, blink when it was last seen in kernel mode. + * Where about of Task's sp, fp, blink when it was last seen in kernel mode. * Look in process.c for details of kernel stack layout */ #define TSK_K_ESP(tsk) (tsk->thread.ksp) diff --git a/arch/arc/include/asm/smp.h b/arch/arc/include/asm/smp.h index 991380438d6b..89fdd1b0a76e 100644 --- a/arch/arc/include/asm/smp.h +++ b/arch/arc/include/asm/smp.h @@ -86,7 +86,7 @@ static inline const char *arc_platform_smp_cpuinfo(void) * (1) These insn were introduced only in 4.10 release. So for older released * support needed. * - * (2) In a SMP setup, the LLOCK/SCOND atomiticity across CPUs needs to be + * (2) In a SMP setup, the LLOCK/SCOND atomicity across CPUs needs to be * gaurantted by the platform (not something which core handles). * Assuming a platform won't, SMP Linux needs to use spinlocks + local IRQ * disabling for atomicity. diff --git a/arch/arc/include/asm/spinlock.h b/arch/arc/include/asm/spinlock.h index 800e7c430ca5..cded4a9b5438 100644 --- a/arch/arc/include/asm/spinlock.h +++ b/arch/arc/include/asm/spinlock.h @@ -20,11 +20,6 @@ #ifdef CONFIG_ARC_HAS_LLSC -/* - * A normal LLOCK/SCOND based system, w/o need for livelock workaround - */ -#ifndef CONFIG_ARC_STAR_9000923308 - static inline void arch_spin_lock(arch_spinlock_t *lock) { unsigned int val; @@ -238,293 +233,6 @@ static inline void arch_write_unlock(arch_rwlock_t *rw) smp_mb(); } -#else /* CONFIG_ARC_STAR_9000923308 */ - -/* - * HS38x4 could get into a LLOCK/SCOND livelock in case of multiple overlapping - * coherency transactions in the SCU. The exclusive line state keeps rotating - * among contenting cores leading to a never ending cycle. So break the cycle - * by deferring the retry of failed exclusive access (SCOND). The actual delay - * needed is function of number of contending cores as well as the unrelated - * coherency traffic from other cores. To keep the code simple, start off with - * small delay of 1 which would suffice most cases and in case of contention - * double the delay. Eventually the delay is sufficient such that the coherency - * pipeline is drained, thus a subsequent exclusive access would succeed. - */ - -#define SCOND_FAIL_RETRY_VAR_DEF \ - unsigned int delay, tmp; \ - -#define SCOND_FAIL_RETRY_ASM \ - " ; --- scond fail delay --- \n" \ - " mov %[tmp], %[delay] \n" /* tmp = delay */ \ - "2: brne.d %[tmp], 0, 2b \n" /* while (tmp != 0) */ \ - " sub %[tmp], %[tmp], 1 \n" /* tmp-- */ \ - " rol %[delay], %[delay] \n" /* delay *= 2 */ \ - " b 1b \n" /* start over */ \ - " \n" \ - "4: ; --- done --- \n" \ - -#define SCOND_FAIL_RETRY_VARS \ - ,[delay] "=&r" (delay), [tmp] "=&r" (tmp) \ - -static inline void arch_spin_lock(arch_spinlock_t *lock) -{ - unsigned int val; - SCOND_FAIL_RETRY_VAR_DEF; - - smp_mb(); - - __asm__ __volatile__( - "0: mov %[delay], 1 \n" - "1: llock %[val], [%[slock]] \n" - " breq %[val], %[LOCKED], 0b \n" /* spin while LOCKED */ - " scond %[LOCKED], [%[slock]] \n" /* acquire */ - " bz 4f \n" /* done */ - " \n" - SCOND_FAIL_RETRY_ASM - - : [val] "=&r" (val) - SCOND_FAIL_RETRY_VARS - : [slock] "r" (&(lock->slock)), - [LOCKED] "r" (__ARCH_SPIN_LOCK_LOCKED__) - : "memory", "cc"); - - smp_mb(); -} - -/* 1 - lock taken successfully */ -static inline int arch_spin_trylock(arch_spinlock_t *lock) -{ - unsigned int val, got_it = 0; - SCOND_FAIL_RETRY_VAR_DEF; - - smp_mb(); - - __asm__ __volatile__( - "0: mov %[delay], 1 \n" - "1: llock %[val], [%[slock]] \n" - " breq %[val], %[LOCKED], 4f \n" /* already LOCKED, just bail */ - " scond %[LOCKED], [%[slock]] \n" /* acquire */ - " bz.d 4f \n" - " mov.z %[got_it], 1 \n" /* got it */ - " \n" - SCOND_FAIL_RETRY_ASM - - : [val] "=&r" (val), - [got_it] "+&r" (got_it) - SCOND_FAIL_RETRY_VARS - : [slock] "r" (&(lock->slock)), - [LOCKED] "r" (__ARCH_SPIN_LOCK_LOCKED__) - : "memory", "cc"); - - smp_mb(); - - return got_it; -} - -static inline void arch_spin_unlock(arch_spinlock_t *lock) -{ - smp_mb(); - - lock->slock = __ARCH_SPIN_LOCK_UNLOCKED__; - - smp_mb(); -} - -/* - * Read-write spinlocks, allowing multiple readers but only one writer. - * Unfair locking as Writers could be starved indefinitely by Reader(s) - */ - -static inline void arch_read_lock(arch_rwlock_t *rw) -{ - unsigned int val; - SCOND_FAIL_RETRY_VAR_DEF; - - smp_mb(); - - /* - * zero means writer holds the lock exclusively, deny Reader. - * Otherwise grant lock to first/subseq reader - * - * if (rw->counter > 0) { - * rw->counter--; - * ret = 1; - * } - */ - - __asm__ __volatile__( - "0: mov %[delay], 1 \n" - "1: llock %[val], [%[rwlock]] \n" - " brls %[val], %[WR_LOCKED], 0b\n" /* <= 0: spin while write locked */ - " sub %[val], %[val], 1 \n" /* reader lock */ - " scond %[val], [%[rwlock]] \n" - " bz 4f \n" /* done */ - " \n" - SCOND_FAIL_RETRY_ASM - - : [val] "=&r" (val) - SCOND_FAIL_RETRY_VARS - : [rwlock] "r" (&(rw->counter)), - [WR_LOCKED] "ir" (0) - : "memory", "cc"); - - smp_mb(); -} - -/* 1 - lock taken successfully */ -static inline int arch_read_trylock(arch_rwlock_t *rw) -{ - unsigned int val, got_it = 0; - SCOND_FAIL_RETRY_VAR_DEF; - - smp_mb(); - - __asm__ __volatile__( - "0: mov %[delay], 1 \n" - "1: llock %[val], [%[rwlock]] \n" - " brls %[val], %[WR_LOCKED], 4f\n" /* <= 0: already write locked, bail */ - " sub %[val], %[val], 1 \n" /* counter-- */ - " scond %[val], [%[rwlock]] \n" - " bz.d 4f \n" - " mov.z %[got_it], 1 \n" /* got it */ - " \n" - SCOND_FAIL_RETRY_ASM - - : [val] "=&r" (val), - [got_it] "+&r" (got_it) - SCOND_FAIL_RETRY_VARS - : [rwlock] "r" (&(rw->counter)), - [WR_LOCKED] "ir" (0) - : "memory", "cc"); - - smp_mb(); - - return got_it; -} - -static inline void arch_write_lock(arch_rwlock_t *rw) -{ - unsigned int val; - SCOND_FAIL_RETRY_VAR_DEF; - - smp_mb(); - - /* - * If reader(s) hold lock (lock < __ARCH_RW_LOCK_UNLOCKED__), - * deny writer. Otherwise if unlocked grant to writer - * Hence the claim that Linux rwlocks are unfair to writers. - * (can be starved for an indefinite time by readers). - * - * if (rw->counter == __ARCH_RW_LOCK_UNLOCKED__) { - * rw->counter = 0; - * ret = 1; - * } - */ - - __asm__ __volatile__( - "0: mov %[delay], 1 \n" - "1: llock %[val], [%[rwlock]] \n" - " brne %[val], %[UNLOCKED], 0b \n" /* while !UNLOCKED spin */ - " mov %[val], %[WR_LOCKED] \n" - " scond %[val], [%[rwlock]] \n" - " bz 4f \n" - " \n" - SCOND_FAIL_RETRY_ASM - - : [val] "=&r" (val) - SCOND_FAIL_RETRY_VARS - : [rwlock] "r" (&(rw->counter)), - [UNLOCKED] "ir" (__ARCH_RW_LOCK_UNLOCKED__), - [WR_LOCKED] "ir" (0) - : "memory", "cc"); - - smp_mb(); -} - -/* 1 - lock taken successfully */ -static inline int arch_write_trylock(arch_rwlock_t *rw) -{ - unsigned int val, got_it = 0; - SCOND_FAIL_RETRY_VAR_DEF; - - smp_mb(); - - __asm__ __volatile__( - "0: mov %[delay], 1 \n" - "1: llock %[val], [%[rwlock]] \n" - " brne %[val], %[UNLOCKED], 4f \n" /* !UNLOCKED, bail */ - " mov %[val], %[WR_LOCKED] \n" - " scond %[val], [%[rwlock]] \n" - " bz.d 4f \n" - " mov.z %[got_it], 1 \n" /* got it */ - " \n" - SCOND_FAIL_RETRY_ASM - - : [val] "=&r" (val), - [got_it] "+&r" (got_it) - SCOND_FAIL_RETRY_VARS - : [rwlock] "r" (&(rw->counter)), - [UNLOCKED] "ir" (__ARCH_RW_LOCK_UNLOCKED__), - [WR_LOCKED] "ir" (0) - : "memory", "cc"); - - smp_mb(); - - return got_it; -} - -static inline void arch_read_unlock(arch_rwlock_t *rw) -{ - unsigned int val; - - smp_mb(); - - /* - * rw->counter++; - */ - __asm__ __volatile__( - "1: llock %[val], [%[rwlock]] \n" - " add %[val], %[val], 1 \n" - " scond %[val], [%[rwlock]] \n" - " bnz 1b \n" - " \n" - : [val] "=&r" (val) - : [rwlock] "r" (&(rw->counter)) - : "memory", "cc"); - - smp_mb(); -} - -static inline void arch_write_unlock(arch_rwlock_t *rw) -{ - unsigned int val; - - smp_mb(); - - /* - * rw->counter = __ARCH_RW_LOCK_UNLOCKED__; - */ - __asm__ __volatile__( - "1: llock %[val], [%[rwlock]] \n" - " scond %[UNLOCKED], [%[rwlock]]\n" - " bnz 1b \n" - " \n" - : [val] "=&r" (val) - : [rwlock] "r" (&(rw->counter)), - [UNLOCKED] "r" (__ARCH_RW_LOCK_UNLOCKED__) - : "memory", "cc"); - - smp_mb(); -} - -#undef SCOND_FAIL_RETRY_VAR_DEF -#undef SCOND_FAIL_RETRY_ASM -#undef SCOND_FAIL_RETRY_VARS - -#endif /* CONFIG_ARC_STAR_9000923308 */ - #else /* !CONFIG_ARC_HAS_LLSC */ static inline void arch_spin_lock(arch_spinlock_t *lock) diff --git a/arch/arc/include/asm/thread_info.h b/arch/arc/include/asm/thread_info.h index 3af67455659a..2d79e527fa50 100644 --- a/arch/arc/include/asm/thread_info.h +++ b/arch/arc/include/asm/thread_info.h @@ -103,7 +103,7 @@ static inline __attribute_const__ struct thread_info *current_thread_info(void) /* * _TIF_ALLWORK_MASK includes SYSCALL_TRACE, but we don't need it. - * SYSCALL_TRACE is anways seperately/unconditionally tested right after a + * SYSCALL_TRACE is anyway seperately/unconditionally tested right after a * syscall, so all that reamins to be tested is _TIF_WORK_MASK */ diff --git a/arch/arc/include/asm/uaccess.h b/arch/arc/include/asm/uaccess.h index d1da6032b715..a78d5670884f 100644 --- a/arch/arc/include/asm/uaccess.h +++ b/arch/arc/include/asm/uaccess.h @@ -32,7 +32,7 @@ #define __kernel_ok (segment_eq(get_fs(), KERNEL_DS)) /* - * Algorthmically, for __user_ok() we want do: + * Algorithmically, for __user_ok() we want do: * (start < TASK_SIZE) && (start+len < TASK_SIZE) * where TASK_SIZE could either be retrieved from thread_info->addr_limit or * emitted directly in code. diff --git a/arch/arc/include/uapi/asm/swab.h b/arch/arc/include/uapi/asm/swab.h index 095599a73195..71f3918b0fc3 100644 --- a/arch/arc/include/uapi/asm/swab.h +++ b/arch/arc/include/uapi/asm/swab.h @@ -74,7 +74,7 @@ __tmp ^ __in; \ }) -#elif (ARC_BSWAP_TYPE == 2) /* Custom single cycle bwap instruction */ +#elif (ARC_BSWAP_TYPE == 2) /* Custom single cycle bswap instruction */ #define __arch_swab32(x) \ ({ \ diff --git a/arch/arc/kernel/entry-compact.S b/arch/arc/kernel/entry-compact.S index 0cb0abaa0479..98812c1248df 100644 --- a/arch/arc/kernel/entry-compact.S +++ b/arch/arc/kernel/entry-compact.S @@ -91,27 +91,13 @@ VECTOR mem_service ; 0x8, Mem exception (0x1) VECTOR instr_service ; 0x10, Instrn Error (0x2) ; ******************** Device ISRs ********************** -#ifdef CONFIG_ARC_IRQ3_LV2 -VECTOR handle_interrupt_level2 -#else -VECTOR handle_interrupt_level1 -#endif - -VECTOR handle_interrupt_level1 - -#ifdef CONFIG_ARC_IRQ5_LV2 -VECTOR handle_interrupt_level2 -#else -VECTOR handle_interrupt_level1 -#endif - -#ifdef CONFIG_ARC_IRQ6_LV2 +#ifdef CONFIG_ARC_COMPACT_IRQ_LEVELS VECTOR handle_interrupt_level2 #else VECTOR handle_interrupt_level1 #endif -.rept 25 +.rept 28 VECTOR handle_interrupt_level1 ; Other devices .endr diff --git a/arch/arc/kernel/intc-compact.c b/arch/arc/kernel/intc-compact.c index c5cceca36118..ce9deb953ca9 100644 --- a/arch/arc/kernel/intc-compact.c +++ b/arch/arc/kernel/intc-compact.c @@ -28,10 +28,8 @@ void arc_init_IRQ(void) { int level_mask = 0; - /* setup any high priority Interrupts (Level2 in ARCompact jargon) */ - level_mask |= IS_ENABLED(CONFIG_ARC_IRQ3_LV2) << 3; - level_mask |= IS_ENABLED(CONFIG_ARC_IRQ5_LV2) << 5; - level_mask |= IS_ENABLED(CONFIG_ARC_IRQ6_LV2) << 6; + /* Is timer high priority Interrupt (Level2 in ARCompact jargon) */ + level_mask |= IS_ENABLED(CONFIG_ARC_COMPACT_IRQ_LEVELS) << TIMER0_IRQ; /* * Write to register, even if no LV2 IRQs configured to reset it diff --git a/arch/arc/kernel/perf_event.c b/arch/arc/kernel/perf_event.c index 6fd48021324b..08f03d9b5b3e 100644 --- a/arch/arc/kernel/perf_event.c +++ b/arch/arc/kernel/perf_event.c @@ -108,7 +108,7 @@ static void arc_perf_event_update(struct perf_event *event, int64_t delta = new_raw_count - prev_raw_count; /* - * We don't afaraid of hwc->prev_count changing beneath our feet + * We aren't afraid of hwc->prev_count changing beneath our feet * because there's no way for us to re-enter this function anytime. */ local64_set(&hwc->prev_count, new_raw_count); diff --git a/arch/arc/kernel/setup.c b/arch/arc/kernel/setup.c index f63b8bfefb0c..2ee7a4d758a8 100644 --- a/arch/arc/kernel/setup.c +++ b/arch/arc/kernel/setup.c @@ -392,7 +392,7 @@ void __init setup_arch(char **cmdline_p) /* * If we are here, it is established that @uboot_arg didn't * point to DT blob. Instead if u-boot says it is cmdline, - * Appent to embedded DT cmdline. + * append to embedded DT cmdline. * setup_machine_fdt() would have populated @boot_command_line */ if (uboot_tag == 1) { diff --git a/arch/arc/kernel/signal.c b/arch/arc/kernel/signal.c index 004b7f0bc76c..6cb3736b6b83 100644 --- a/arch/arc/kernel/signal.c +++ b/arch/arc/kernel/signal.c @@ -34,7 +34,7 @@ * -ViXS were still seeing crashes when using insmod to load drivers. * It turned out that the code to change Execute permssions for TLB entries * of user was not guarded for interrupts (mod_tlb_permission) - * This was cauing TLB entries to be overwritten on unrelated indexes + * This was causing TLB entries to be overwritten on unrelated indexes * * Vineetg: July 15th 2008: Bug #94183 * -Exception happens in Delay slot of a JMP, and before user space resumes, diff --git a/arch/arc/kernel/stacktrace.c b/arch/arc/kernel/stacktrace.c index e0efff15a5ae..b9192a653b7e 100644 --- a/arch/arc/kernel/stacktrace.c +++ b/arch/arc/kernel/stacktrace.c @@ -142,7 +142,7 @@ arc_unwind_core(struct task_struct *tsk, struct pt_regs *regs, * prelogue is setup (callee regs saved and then fp set and not other * way around */ - pr_warn("CONFIG_ARC_DW2_UNWIND needs to be enabled\n"); + pr_warn_once("CONFIG_ARC_DW2_UNWIND needs to be enabled\n"); return 0; #endif diff --git a/arch/arc/kernel/troubleshoot.c b/arch/arc/kernel/troubleshoot.c index a6f91e88ce36..934150e7ac48 100644 --- a/arch/arc/kernel/troubleshoot.c +++ b/arch/arc/kernel/troubleshoot.c @@ -276,7 +276,7 @@ static int tlb_stats_open(struct inode *inode, struct file *file) return 0; } -/* called on user read(): display the couters */ +/* called on user read(): display the counters */ static ssize_t tlb_stats_output(struct file *file, /* file descriptor */ char __user *user_buf, /* user buffer */ size_t len, /* length of buffer */ diff --git a/arch/arc/mm/cache.c b/arch/arc/mm/cache.c index 9e5eddbb856f..5a294b2c3cb3 100644 --- a/arch/arc/mm/cache.c +++ b/arch/arc/mm/cache.c @@ -215,7 +215,7 @@ slc_chk: * ------------------ * This ver of MMU supports variable page sizes (1k-16k): although Linux will * only support 8k (default), 16k and 4k. - * However from hardware perspective, smaller page sizes aggrevate aliasing + * However from hardware perspective, smaller page sizes aggravate aliasing * meaning more vaddr bits needed to disambiguate the cache-line-op ; * the existing scheme of piggybacking won't work for certain configurations. * Two new registers IC_PTAG and DC_PTAG inttoduced. @@ -302,7 +302,7 @@ void __cache_line_loop_v3(phys_addr_t paddr, unsigned long vaddr, /* * This is technically for MMU v4, using the MMU v3 programming model - * Special work for HS38 aliasing I-cache configuratino with PAE40 + * Special work for HS38 aliasing I-cache configuration with PAE40 * - upper 8 bits of paddr need to be written into PTAG_HI * - (and needs to be written before the lower 32 bits) * Note that PTAG_HI is hoisted outside the line loop @@ -936,7 +936,7 @@ void arc_cache_init(void) ic->ver, CONFIG_ARC_MMU_VER); /* - * In MMU v4 (HS38x) the alising icache config uses IVIL/PTAG + * In MMU v4 (HS38x) the aliasing icache config uses IVIL/PTAG * pair to provide vaddr/paddr respectively, just as in MMU v3 */ if (is_isa_arcv2() && ic->alias) diff --git a/arch/arc/mm/dma.c b/arch/arc/mm/dma.c index 8c8e36fa5659..73d7e4c75b7d 100644 --- a/arch/arc/mm/dma.c +++ b/arch/arc/mm/dma.c @@ -10,7 +10,7 @@ * DMA Coherent API Notes * * I/O is inherently non-coherent on ARC. So a coherent DMA buffer is - * implemented by accessintg it using a kernel virtual address, with + * implemented by accessing it using a kernel virtual address, with * Cache bit off in the TLB entry. * * The default DMA address == Phy address which is 0x8000_0000 based. |