summaryrefslogtreecommitdiff
path: root/arch/arc/include
diff options
context:
space:
mode:
Diffstat (limited to 'arch/arc/include')
-rw-r--r--arch/arc/include/asm/atomic.h45
-rw-r--r--arch/arc/include/asm/entry-compact.h4
-rw-r--r--arch/arc/include/asm/mmu_context.h2
-rw-r--r--arch/arc/include/asm/pgalloc.h4
-rw-r--r--arch/arc/include/asm/pgtable.h2
-rw-r--r--arch/arc/include/asm/processor.h2
-rw-r--r--arch/arc/include/asm/smp.h2
-rw-r--r--arch/arc/include/asm/spinlock.h292
-rw-r--r--arch/arc/include/asm/thread_info.h2
-rw-r--r--arch/arc/include/asm/uaccess.h2
-rw-r--r--arch/arc/include/uapi/asm/swab.h2
11 files changed, 15 insertions, 344 deletions
diff --git a/arch/arc/include/asm/atomic.h b/arch/arc/include/asm/atomic.h
index 5f3dcbbc0cc9..dd683995bc9d 100644
--- a/arch/arc/include/asm/atomic.h
+++ b/arch/arc/include/asm/atomic.h
@@ -25,50 +25,17 @@
#define atomic_set(v, i) WRITE_ONCE(((v)->counter), (i))
-#ifdef CONFIG_ARC_STAR_9000923308
-
-#define SCOND_FAIL_RETRY_VAR_DEF \
- unsigned int delay = 1, tmp; \
-
-#define SCOND_FAIL_RETRY_ASM \
- " bz 4f \n" \
- " ; --- scond fail delay --- \n" \
- " mov %[tmp], %[delay] \n" /* tmp = delay */ \
- "2: brne.d %[tmp], 0, 2b \n" /* while (tmp != 0) */ \
- " sub %[tmp], %[tmp], 1 \n" /* tmp-- */ \
- " rol %[delay], %[delay] \n" /* delay *= 2 */ \
- " b 1b \n" /* start over */ \
- "4: ; --- success --- \n" \
-
-#define SCOND_FAIL_RETRY_VARS \
- ,[delay] "+&r" (delay),[tmp] "=&r" (tmp) \
-
-#else /* !CONFIG_ARC_STAR_9000923308 */
-
-#define SCOND_FAIL_RETRY_VAR_DEF
-
-#define SCOND_FAIL_RETRY_ASM \
- " bnz 1b \n" \
-
-#define SCOND_FAIL_RETRY_VARS
-
-#endif
-
#define ATOMIC_OP(op, c_op, asm_op) \
static inline void atomic_##op(int i, atomic_t *v) \
{ \
- unsigned int val; \
- SCOND_FAIL_RETRY_VAR_DEF \
+ unsigned int val; \
\
__asm__ __volatile__( \
"1: llock %[val], [%[ctr]] \n" \
" " #asm_op " %[val], %[val], %[i] \n" \
" scond %[val], [%[ctr]] \n" \
- " \n" \
- SCOND_FAIL_RETRY_ASM \
- \
+ " bnz 1b \n" \
: [val] "=&r" (val) /* Early clobber to prevent reg reuse */ \
- SCOND_FAIL_RETRY_VARS \
: [ctr] "r" (&v->counter), /* Not "m": llock only supports reg direct addr mode */ \
[i] "ir" (i) \
: "cc"); \
@@ -77,8 +44,7 @@ static inline void atomic_##op(int i, atomic_t *v) \
#define ATOMIC_OP_RETURN(op, c_op, asm_op) \
static inline int atomic_##op##_return(int i, atomic_t *v) \
{ \
- unsigned int val; \
- SCOND_FAIL_RETRY_VAR_DEF \
+ unsigned int val; \
\
/* \
* Explicit full memory barrier needed before/after as \
@@ -90,11 +56,8 @@ static inline int atomic_##op##_return(int i, atomic_t *v) \
"1: llock %[val], [%[ctr]] \n" \
" " #asm_op " %[val], %[val], %[i] \n" \
" scond %[val], [%[ctr]] \n" \
- " \n" \
- SCOND_FAIL_RETRY_ASM \
- \
+ " bnz 1b \n" \
: [val] "=&r" (val) \
- SCOND_FAIL_RETRY_VARS \
: [ctr] "r" (&v->counter), \
[i] "ir" (i) \
: "cc"); \
diff --git a/arch/arc/include/asm/entry-compact.h b/arch/arc/include/asm/entry-compact.h
index e0e1faf03c50..14c310f2e0b1 100644
--- a/arch/arc/include/asm/entry-compact.h
+++ b/arch/arc/include/asm/entry-compact.h
@@ -76,8 +76,8 @@
* We need to be a bit more cautious here. What if a kernel bug in
* L1 ISR, caused SP to go whaco (some small value which looks like
* USER stk) and then we take L2 ISR.
- * Above brlo alone would treat it as a valid L1-L2 sceanrio
- * instead of shouting alound
+ * Above brlo alone would treat it as a valid L1-L2 scenario
+ * instead of shouting around
* The only feasible way is to make sure this L2 happened in
* L1 prelogue ONLY i.e. ilink2 is less than a pre-set marker in
* L1 ISR before it switches stack
diff --git a/arch/arc/include/asm/mmu_context.h b/arch/arc/include/asm/mmu_context.h
index 1fd467ef658f..b0b87f2447f5 100644
--- a/arch/arc/include/asm/mmu_context.h
+++ b/arch/arc/include/asm/mmu_context.h
@@ -83,7 +83,7 @@ static inline void get_new_mmu_context(struct mm_struct *mm)
local_flush_tlb_all();
/*
- * Above checke for rollover of 8 bit ASID in 32 bit container.
+ * Above check for rollover of 8 bit ASID in 32 bit container.
* If the container itself wrapped around, set it to a non zero
* "generation" to distinguish from no context
*/
diff --git a/arch/arc/include/asm/pgalloc.h b/arch/arc/include/asm/pgalloc.h
index 86ed671286df..3749234b7419 100644
--- a/arch/arc/include/asm/pgalloc.h
+++ b/arch/arc/include/asm/pgalloc.h
@@ -95,7 +95,7 @@ static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm,
{
pte_t *pte;
- pte = (pte_t *) __get_free_pages(GFP_KERNEL | __GFP_REPEAT | __GFP_ZERO,
+ pte = (pte_t *) __get_free_pages(GFP_KERNEL | __GFP_ZERO,
__get_order_pte());
return pte;
@@ -107,7 +107,7 @@ pte_alloc_one(struct mm_struct *mm, unsigned long address)
pgtable_t pte_pg;
struct page *page;
- pte_pg = (pgtable_t)__get_free_pages(GFP_KERNEL | __GFP_REPEAT, __get_order_pte());
+ pte_pg = (pgtable_t)__get_free_pages(GFP_KERNEL, __get_order_pte());
if (!pte_pg)
return 0;
memzero((void *)pte_pg, PTRS_PER_PTE * sizeof(pte_t));
diff --git a/arch/arc/include/asm/pgtable.h b/arch/arc/include/asm/pgtable.h
index 034bbdc0ff61..858f98ef7f1b 100644
--- a/arch/arc/include/asm/pgtable.h
+++ b/arch/arc/include/asm/pgtable.h
@@ -47,7 +47,7 @@
* Page Tables are purely for Linux VM's consumption and the bits below are
* suited to that (uniqueness). Hence some are not implemented in the TLB and
* some have different value in TLB.
- * e.g. MMU v2: K_READ bit is 8 and so is GLOBAL (possible becoz they live in
+ * e.g. MMU v2: K_READ bit is 8 and so is GLOBAL (possible because they live in
* seperate PD0 and PD1, which combined forms a translation entry)
* while for PTE perspective, they are 8 and 9 respectively
* with MMU v3: Most bits (except SHARED) represent the exact hardware pos
diff --git a/arch/arc/include/asm/processor.h b/arch/arc/include/asm/processor.h
index f9048994b22f..16b630fbeb6a 100644
--- a/arch/arc/include/asm/processor.h
+++ b/arch/arc/include/asm/processor.h
@@ -78,7 +78,7 @@ struct task_struct;
#define KSTK_ESP(tsk) (task_pt_regs(tsk)->sp)
/*
- * Where abouts of Task's sp, fp, blink when it was last seen in kernel mode.
+ * Where about of Task's sp, fp, blink when it was last seen in kernel mode.
* Look in process.c for details of kernel stack layout
*/
#define TSK_K_ESP(tsk) (tsk->thread.ksp)
diff --git a/arch/arc/include/asm/smp.h b/arch/arc/include/asm/smp.h
index 991380438d6b..89fdd1b0a76e 100644
--- a/arch/arc/include/asm/smp.h
+++ b/arch/arc/include/asm/smp.h
@@ -86,7 +86,7 @@ static inline const char *arc_platform_smp_cpuinfo(void)
* (1) These insn were introduced only in 4.10 release. So for older released
* support needed.
*
- * (2) In a SMP setup, the LLOCK/SCOND atomiticity across CPUs needs to be
+ * (2) In a SMP setup, the LLOCK/SCOND atomicity across CPUs needs to be
* gaurantted by the platform (not something which core handles).
* Assuming a platform won't, SMP Linux needs to use spinlocks + local IRQ
* disabling for atomicity.
diff --git a/arch/arc/include/asm/spinlock.h b/arch/arc/include/asm/spinlock.h
index 800e7c430ca5..cded4a9b5438 100644
--- a/arch/arc/include/asm/spinlock.h
+++ b/arch/arc/include/asm/spinlock.h
@@ -20,11 +20,6 @@
#ifdef CONFIG_ARC_HAS_LLSC
-/*
- * A normal LLOCK/SCOND based system, w/o need for livelock workaround
- */
-#ifndef CONFIG_ARC_STAR_9000923308
-
static inline void arch_spin_lock(arch_spinlock_t *lock)
{
unsigned int val;
@@ -238,293 +233,6 @@ static inline void arch_write_unlock(arch_rwlock_t *rw)
smp_mb();
}
-#else /* CONFIG_ARC_STAR_9000923308 */
-
-/*
- * HS38x4 could get into a LLOCK/SCOND livelock in case of multiple overlapping
- * coherency transactions in the SCU. The exclusive line state keeps rotating
- * among contenting cores leading to a never ending cycle. So break the cycle
- * by deferring the retry of failed exclusive access (SCOND). The actual delay
- * needed is function of number of contending cores as well as the unrelated
- * coherency traffic from other cores. To keep the code simple, start off with
- * small delay of 1 which would suffice most cases and in case of contention
- * double the delay. Eventually the delay is sufficient such that the coherency
- * pipeline is drained, thus a subsequent exclusive access would succeed.
- */
-
-#define SCOND_FAIL_RETRY_VAR_DEF \
- unsigned int delay, tmp; \
-
-#define SCOND_FAIL_RETRY_ASM \
- " ; --- scond fail delay --- \n" \
- " mov %[tmp], %[delay] \n" /* tmp = delay */ \
- "2: brne.d %[tmp], 0, 2b \n" /* while (tmp != 0) */ \
- " sub %[tmp], %[tmp], 1 \n" /* tmp-- */ \
- " rol %[delay], %[delay] \n" /* delay *= 2 */ \
- " b 1b \n" /* start over */ \
- " \n" \
- "4: ; --- done --- \n" \
-
-#define SCOND_FAIL_RETRY_VARS \
- ,[delay] "=&r" (delay), [tmp] "=&r" (tmp) \
-
-static inline void arch_spin_lock(arch_spinlock_t *lock)
-{
- unsigned int val;
- SCOND_FAIL_RETRY_VAR_DEF;
-
- smp_mb();
-
- __asm__ __volatile__(
- "0: mov %[delay], 1 \n"
- "1: llock %[val], [%[slock]] \n"
- " breq %[val], %[LOCKED], 0b \n" /* spin while LOCKED */
- " scond %[LOCKED], [%[slock]] \n" /* acquire */
- " bz 4f \n" /* done */
- " \n"
- SCOND_FAIL_RETRY_ASM
-
- : [val] "=&r" (val)
- SCOND_FAIL_RETRY_VARS
- : [slock] "r" (&(lock->slock)),
- [LOCKED] "r" (__ARCH_SPIN_LOCK_LOCKED__)
- : "memory", "cc");
-
- smp_mb();
-}
-
-/* 1 - lock taken successfully */
-static inline int arch_spin_trylock(arch_spinlock_t *lock)
-{
- unsigned int val, got_it = 0;
- SCOND_FAIL_RETRY_VAR_DEF;
-
- smp_mb();
-
- __asm__ __volatile__(
- "0: mov %[delay], 1 \n"
- "1: llock %[val], [%[slock]] \n"
- " breq %[val], %[LOCKED], 4f \n" /* already LOCKED, just bail */
- " scond %[LOCKED], [%[slock]] \n" /* acquire */
- " bz.d 4f \n"
- " mov.z %[got_it], 1 \n" /* got it */
- " \n"
- SCOND_FAIL_RETRY_ASM
-
- : [val] "=&r" (val),
- [got_it] "+&r" (got_it)
- SCOND_FAIL_RETRY_VARS
- : [slock] "r" (&(lock->slock)),
- [LOCKED] "r" (__ARCH_SPIN_LOCK_LOCKED__)
- : "memory", "cc");
-
- smp_mb();
-
- return got_it;
-}
-
-static inline void arch_spin_unlock(arch_spinlock_t *lock)
-{
- smp_mb();
-
- lock->slock = __ARCH_SPIN_LOCK_UNLOCKED__;
-
- smp_mb();
-}
-
-/*
- * Read-write spinlocks, allowing multiple readers but only one writer.
- * Unfair locking as Writers could be starved indefinitely by Reader(s)
- */
-
-static inline void arch_read_lock(arch_rwlock_t *rw)
-{
- unsigned int val;
- SCOND_FAIL_RETRY_VAR_DEF;
-
- smp_mb();
-
- /*
- * zero means writer holds the lock exclusively, deny Reader.
- * Otherwise grant lock to first/subseq reader
- *
- * if (rw->counter > 0) {
- * rw->counter--;
- * ret = 1;
- * }
- */
-
- __asm__ __volatile__(
- "0: mov %[delay], 1 \n"
- "1: llock %[val], [%[rwlock]] \n"
- " brls %[val], %[WR_LOCKED], 0b\n" /* <= 0: spin while write locked */
- " sub %[val], %[val], 1 \n" /* reader lock */
- " scond %[val], [%[rwlock]] \n"
- " bz 4f \n" /* done */
- " \n"
- SCOND_FAIL_RETRY_ASM
-
- : [val] "=&r" (val)
- SCOND_FAIL_RETRY_VARS
- : [rwlock] "r" (&(rw->counter)),
- [WR_LOCKED] "ir" (0)
- : "memory", "cc");
-
- smp_mb();
-}
-
-/* 1 - lock taken successfully */
-static inline int arch_read_trylock(arch_rwlock_t *rw)
-{
- unsigned int val, got_it = 0;
- SCOND_FAIL_RETRY_VAR_DEF;
-
- smp_mb();
-
- __asm__ __volatile__(
- "0: mov %[delay], 1 \n"
- "1: llock %[val], [%[rwlock]] \n"
- " brls %[val], %[WR_LOCKED], 4f\n" /* <= 0: already write locked, bail */
- " sub %[val], %[val], 1 \n" /* counter-- */
- " scond %[val], [%[rwlock]] \n"
- " bz.d 4f \n"
- " mov.z %[got_it], 1 \n" /* got it */
- " \n"
- SCOND_FAIL_RETRY_ASM
-
- : [val] "=&r" (val),
- [got_it] "+&r" (got_it)
- SCOND_FAIL_RETRY_VARS
- : [rwlock] "r" (&(rw->counter)),
- [WR_LOCKED] "ir" (0)
- : "memory", "cc");
-
- smp_mb();
-
- return got_it;
-}
-
-static inline void arch_write_lock(arch_rwlock_t *rw)
-{
- unsigned int val;
- SCOND_FAIL_RETRY_VAR_DEF;
-
- smp_mb();
-
- /*
- * If reader(s) hold lock (lock < __ARCH_RW_LOCK_UNLOCKED__),
- * deny writer. Otherwise if unlocked grant to writer
- * Hence the claim that Linux rwlocks are unfair to writers.
- * (can be starved for an indefinite time by readers).
- *
- * if (rw->counter == __ARCH_RW_LOCK_UNLOCKED__) {
- * rw->counter = 0;
- * ret = 1;
- * }
- */
-
- __asm__ __volatile__(
- "0: mov %[delay], 1 \n"
- "1: llock %[val], [%[rwlock]] \n"
- " brne %[val], %[UNLOCKED], 0b \n" /* while !UNLOCKED spin */
- " mov %[val], %[WR_LOCKED] \n"
- " scond %[val], [%[rwlock]] \n"
- " bz 4f \n"
- " \n"
- SCOND_FAIL_RETRY_ASM
-
- : [val] "=&r" (val)
- SCOND_FAIL_RETRY_VARS
- : [rwlock] "r" (&(rw->counter)),
- [UNLOCKED] "ir" (__ARCH_RW_LOCK_UNLOCKED__),
- [WR_LOCKED] "ir" (0)
- : "memory", "cc");
-
- smp_mb();
-}
-
-/* 1 - lock taken successfully */
-static inline int arch_write_trylock(arch_rwlock_t *rw)
-{
- unsigned int val, got_it = 0;
- SCOND_FAIL_RETRY_VAR_DEF;
-
- smp_mb();
-
- __asm__ __volatile__(
- "0: mov %[delay], 1 \n"
- "1: llock %[val], [%[rwlock]] \n"
- " brne %[val], %[UNLOCKED], 4f \n" /* !UNLOCKED, bail */
- " mov %[val], %[WR_LOCKED] \n"
- " scond %[val], [%[rwlock]] \n"
- " bz.d 4f \n"
- " mov.z %[got_it], 1 \n" /* got it */
- " \n"
- SCOND_FAIL_RETRY_ASM
-
- : [val] "=&r" (val),
- [got_it] "+&r" (got_it)
- SCOND_FAIL_RETRY_VARS
- : [rwlock] "r" (&(rw->counter)),
- [UNLOCKED] "ir" (__ARCH_RW_LOCK_UNLOCKED__),
- [WR_LOCKED] "ir" (0)
- : "memory", "cc");
-
- smp_mb();
-
- return got_it;
-}
-
-static inline void arch_read_unlock(arch_rwlock_t *rw)
-{
- unsigned int val;
-
- smp_mb();
-
- /*
- * rw->counter++;
- */
- __asm__ __volatile__(
- "1: llock %[val], [%[rwlock]] \n"
- " add %[val], %[val], 1 \n"
- " scond %[val], [%[rwlock]] \n"
- " bnz 1b \n"
- " \n"
- : [val] "=&r" (val)
- : [rwlock] "r" (&(rw->counter))
- : "memory", "cc");
-
- smp_mb();
-}
-
-static inline void arch_write_unlock(arch_rwlock_t *rw)
-{
- unsigned int val;
-
- smp_mb();
-
- /*
- * rw->counter = __ARCH_RW_LOCK_UNLOCKED__;
- */
- __asm__ __volatile__(
- "1: llock %[val], [%[rwlock]] \n"
- " scond %[UNLOCKED], [%[rwlock]]\n"
- " bnz 1b \n"
- " \n"
- : [val] "=&r" (val)
- : [rwlock] "r" (&(rw->counter)),
- [UNLOCKED] "r" (__ARCH_RW_LOCK_UNLOCKED__)
- : "memory", "cc");
-
- smp_mb();
-}
-
-#undef SCOND_FAIL_RETRY_VAR_DEF
-#undef SCOND_FAIL_RETRY_ASM
-#undef SCOND_FAIL_RETRY_VARS
-
-#endif /* CONFIG_ARC_STAR_9000923308 */
-
#else /* !CONFIG_ARC_HAS_LLSC */
static inline void arch_spin_lock(arch_spinlock_t *lock)
diff --git a/arch/arc/include/asm/thread_info.h b/arch/arc/include/asm/thread_info.h
index 3af67455659a..2d79e527fa50 100644
--- a/arch/arc/include/asm/thread_info.h
+++ b/arch/arc/include/asm/thread_info.h
@@ -103,7 +103,7 @@ static inline __attribute_const__ struct thread_info *current_thread_info(void)
/*
* _TIF_ALLWORK_MASK includes SYSCALL_TRACE, but we don't need it.
- * SYSCALL_TRACE is anways seperately/unconditionally tested right after a
+ * SYSCALL_TRACE is anyway seperately/unconditionally tested right after a
* syscall, so all that reamins to be tested is _TIF_WORK_MASK
*/
diff --git a/arch/arc/include/asm/uaccess.h b/arch/arc/include/asm/uaccess.h
index d1da6032b715..a78d5670884f 100644
--- a/arch/arc/include/asm/uaccess.h
+++ b/arch/arc/include/asm/uaccess.h
@@ -32,7 +32,7 @@
#define __kernel_ok (segment_eq(get_fs(), KERNEL_DS))
/*
- * Algorthmically, for __user_ok() we want do:
+ * Algorithmically, for __user_ok() we want do:
* (start < TASK_SIZE) && (start+len < TASK_SIZE)
* where TASK_SIZE could either be retrieved from thread_info->addr_limit or
* emitted directly in code.
diff --git a/arch/arc/include/uapi/asm/swab.h b/arch/arc/include/uapi/asm/swab.h
index 095599a73195..71f3918b0fc3 100644
--- a/arch/arc/include/uapi/asm/swab.h
+++ b/arch/arc/include/uapi/asm/swab.h
@@ -74,7 +74,7 @@
__tmp ^ __in; \
})
-#elif (ARC_BSWAP_TYPE == 2) /* Custom single cycle bwap instruction */
+#elif (ARC_BSWAP_TYPE == 2) /* Custom single cycle bswap instruction */
#define __arch_swab32(x) \
({ \