diff options
Diffstat (limited to 'kernel/rcutiny_plugin.h')
| -rw-r--r-- | kernel/rcutiny_plugin.h | 56 | 
1 files changed, 5 insertions, 51 deletions
diff --git a/kernel/rcutiny_plugin.h b/kernel/rcutiny_plugin.h index fc31a2d65100..918fd1e8509c 100644 --- a/kernel/rcutiny_plugin.h +++ b/kernel/rcutiny_plugin.h @@ -132,7 +132,6 @@ static struct rcu_preempt_ctrlblk rcu_preempt_ctrlblk = {  	RCU_TRACE(.rcb.name = "rcu_preempt")  }; -static void rcu_read_unlock_special(struct task_struct *t);  static int rcu_preempted_readers_exp(void);  static void rcu_report_exp_done(void); @@ -351,8 +350,9 @@ static int rcu_initiate_boost(void)  			rcu_preempt_ctrlblk.boost_tasks =  				rcu_preempt_ctrlblk.gp_tasks;  		invoke_rcu_callbacks(); -	} else +	} else {  		RCU_TRACE(rcu_initiate_boost_trace()); +	}  	return 1;  } @@ -527,23 +527,11 @@ void rcu_preempt_note_context_switch(void)  }  /* - * Tiny-preemptible RCU implementation for rcu_read_lock(). - * Just increment ->rcu_read_lock_nesting, shared state will be updated - * if we block. - */ -void __rcu_read_lock(void) -{ -	current->rcu_read_lock_nesting++; -	barrier();  /* needed if we ever invoke rcu_read_lock in rcutiny.c */ -} -EXPORT_SYMBOL_GPL(__rcu_read_lock); - -/*   * Handle special cases during rcu_read_unlock(), such as needing to   * notify RCU core processing or task having blocked during the RCU   * read-side critical section.   */ -static noinline void rcu_read_unlock_special(struct task_struct *t) +void rcu_read_unlock_special(struct task_struct *t)  {  	int empty;  	int empty_exp; @@ -627,38 +615,6 @@ static noinline void rcu_read_unlock_special(struct task_struct *t)  }  /* - * Tiny-preemptible RCU implementation for rcu_read_unlock(). - * Decrement ->rcu_read_lock_nesting.  If the result is zero (outermost - * rcu_read_unlock()) and ->rcu_read_unlock_special is non-zero, then - * invoke rcu_read_unlock_special() to clean up after a context switch - * in an RCU read-side critical section and other special cases. - */ -void __rcu_read_unlock(void) -{ -	struct task_struct *t = current; - -	barrier();  /* needed if we ever invoke rcu_read_unlock in rcutiny.c */ -	if (t->rcu_read_lock_nesting != 1) -		--t->rcu_read_lock_nesting; -	else { -		t->rcu_read_lock_nesting = INT_MIN; -		barrier();  /* assign before ->rcu_read_unlock_special load */ -		if (unlikely(ACCESS_ONCE(t->rcu_read_unlock_special))) -			rcu_read_unlock_special(t); -		barrier();  /* ->rcu_read_unlock_special load before assign */ -		t->rcu_read_lock_nesting = 0; -	} -#ifdef CONFIG_PROVE_LOCKING -	{ -		int rrln = ACCESS_ONCE(t->rcu_read_lock_nesting); - -		WARN_ON_ONCE(rrln < 0 && rrln > INT_MIN / 2); -	} -#endif /* #ifdef CONFIG_PROVE_LOCKING */ -} -EXPORT_SYMBOL_GPL(__rcu_read_unlock); - -/*   * Check for a quiescent state from the current CPU.  When a task blocks,   * the task is recorded in the rcu_preempt_ctrlblk structure, which is   * checked elsewhere.  This is called from the scheduling-clock interrupt. @@ -823,9 +779,9 @@ void synchronize_rcu_expedited(void)  		rpcp->exp_tasks = NULL;  	/* Wait for tail of ->blkd_tasks list to drain. */ -	if (!rcu_preempted_readers_exp()) +	if (!rcu_preempted_readers_exp()) {  		local_irq_restore(flags); -	else { +	} else {  		rcu_initiate_boost();  		local_irq_restore(flags);  		wait_event(sync_rcu_preempt_exp_wq, @@ -846,8 +802,6 @@ EXPORT_SYMBOL_GPL(synchronize_rcu_expedited);   */  int rcu_preempt_needs_cpu(void)  { -	if (!rcu_preempt_running_reader()) -		rcu_preempt_cpu_qs();  	return rcu_preempt_ctrlblk.rcb.rcucblist != NULL;  }  | 
