diff options
Diffstat (limited to 'kernel/trace/ring_buffer.c')
| -rw-r--r-- | kernel/trace/ring_buffer.c | 74 | 
1 files changed, 35 insertions, 39 deletions
| diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c index 75f1d05ea82d..95181e36891a 100644 --- a/kernel/trace/ring_buffer.c +++ b/kernel/trace/ring_buffer.c @@ -1001,17 +1001,13 @@ static int rb_head_page_replace(struct buffer_page *old,  /*   * rb_tail_page_update - move the tail page forward - * - * Returns 1 if moved tail page, 0 if someone else did.   */ -static int rb_tail_page_update(struct ring_buffer_per_cpu *cpu_buffer, +static void rb_tail_page_update(struct ring_buffer_per_cpu *cpu_buffer,  			       struct buffer_page *tail_page,  			       struct buffer_page *next_page)  { -	struct buffer_page *old_tail;  	unsigned long old_entries;  	unsigned long old_write; -	int ret = 0;  	/*  	 * The tail page now needs to be moved forward. @@ -1036,7 +1032,7 @@ static int rb_tail_page_update(struct ring_buffer_per_cpu *cpu_buffer,  	 * it is, then it is up to us to update the tail  	 * pointer.  	 */ -	if (tail_page == cpu_buffer->tail_page) { +	if (tail_page == READ_ONCE(cpu_buffer->tail_page)) {  		/* Zero the write counter */  		unsigned long val = old_write & ~RB_WRITE_MASK;  		unsigned long eval = old_entries & ~RB_WRITE_MASK; @@ -1061,14 +1057,9 @@ static int rb_tail_page_update(struct ring_buffer_per_cpu *cpu_buffer,  		 */  		local_set(&next_page->page->commit, 0); -		old_tail = cmpxchg(&cpu_buffer->tail_page, -				   tail_page, next_page); - -		if (old_tail == tail_page) -			ret = 1; +		/* Again, either we update tail_page or an interrupt does */ +		(void)cmpxchg(&cpu_buffer->tail_page, tail_page, next_page);  	} - -	return ret;  }  static int rb_check_bpage(struct ring_buffer_per_cpu *cpu_buffer, @@ -1887,12 +1878,6 @@ rb_event_index(struct ring_buffer_event *event)  	return (addr & ~PAGE_MASK) - BUF_PAGE_HDR_SIZE;  } -static void rb_reset_reader_page(struct ring_buffer_per_cpu *cpu_buffer) -{ -	cpu_buffer->read_stamp = cpu_buffer->reader_page->page->time_stamp; -	cpu_buffer->reader_page->read = 0; -} -  static void rb_inc_iter(struct ring_buffer_iter *iter)  {  	struct ring_buffer_per_cpu *cpu_buffer = iter->cpu_buffer; @@ -2042,12 +2027,15 @@ rb_handle_head_page(struct ring_buffer_per_cpu *cpu_buffer,  	 * the tail page would have moved.  	 */  	if (ret == RB_PAGE_NORMAL) { +		struct buffer_page *buffer_tail_page; + +		buffer_tail_page = READ_ONCE(cpu_buffer->tail_page);  		/*  		 * If the tail had moved passed next, then we need  		 * to reset the pointer.  		 */ -		if (cpu_buffer->tail_page != tail_page && -		    cpu_buffer->tail_page != next_page) +		if (buffer_tail_page != tail_page && +		    buffer_tail_page != next_page)  			rb_head_page_set_normal(cpu_buffer, new_head,  						next_page,  						RB_PAGE_HEAD); @@ -2141,6 +2129,8 @@ rb_reset_tail(struct ring_buffer_per_cpu *cpu_buffer,  	local_sub(length, &tail_page->write);  } +static inline void rb_end_commit(struct ring_buffer_per_cpu *cpu_buffer); +  /*   * This is the slow path, force gcc not to inline it.   */ @@ -2153,7 +2143,6 @@ rb_move_tail(struct ring_buffer_per_cpu *cpu_buffer,  	struct ring_buffer *buffer = cpu_buffer->buffer;  	struct buffer_page *next_page;  	int ret; -	u64 ts;  	next_page = tail_page; @@ -2227,20 +2216,17 @@ rb_move_tail(struct ring_buffer_per_cpu *cpu_buffer,  		}  	} -	ret = rb_tail_page_update(cpu_buffer, tail_page, next_page); -	if (ret) { -		/* -		 * Nested commits always have zero deltas, so -		 * just reread the time stamp -		 */ -		ts = rb_time_stamp(buffer); -		next_page->page->time_stamp = ts; -	} +	rb_tail_page_update(cpu_buffer, tail_page, next_page);   out_again:  	rb_reset_tail(cpu_buffer, tail, info); +	/* Commit what we have for now. */ +	rb_end_commit(cpu_buffer); +	/* rb_end_commit() decs committing */ +	local_inc(&cpu_buffer->committing); +  	/* fail and let the caller try again */  	return ERR_PTR(-EAGAIN); @@ -2368,7 +2354,7 @@ rb_try_to_discard(struct ring_buffer_per_cpu *cpu_buffer,  	addr = (unsigned long)event;  	addr &= PAGE_MASK; -	bpage = cpu_buffer->tail_page; +	bpage = READ_ONCE(cpu_buffer->tail_page);  	if (bpage->page == (void *)addr && rb_page_write(bpage) == old_index) {  		unsigned long write_mask = @@ -2416,7 +2402,7 @@ rb_set_commit_to_write(struct ring_buffer_per_cpu *cpu_buffer)   again:  	max_count = cpu_buffer->nr_pages * 100; -	while (cpu_buffer->commit_page != cpu_buffer->tail_page) { +	while (cpu_buffer->commit_page != READ_ONCE(cpu_buffer->tail_page)) {  		if (RB_WARN_ON(cpu_buffer, !(--max_count)))  			return;  		if (RB_WARN_ON(cpu_buffer, @@ -2425,8 +2411,10 @@ rb_set_commit_to_write(struct ring_buffer_per_cpu *cpu_buffer)  		local_set(&cpu_buffer->commit_page->page->commit,  			  rb_page_write(cpu_buffer->commit_page));  		rb_inc_page(cpu_buffer, &cpu_buffer->commit_page); -		cpu_buffer->write_stamp = -			cpu_buffer->commit_page->page->time_stamp; +		/* Only update the write stamp if the page has an event */ +		if (rb_page_write(cpu_buffer->commit_page)) +			cpu_buffer->write_stamp = +				cpu_buffer->commit_page->page->time_stamp;  		/* add barrier to keep gcc from optimizing too much */  		barrier();  	} @@ -2449,7 +2437,7 @@ rb_set_commit_to_write(struct ring_buffer_per_cpu *cpu_buffer)  	 * and pushed the tail page forward, we will be left with  	 * a dangling commit that will never go forward.  	 */ -	if (unlikely(cpu_buffer->commit_page != cpu_buffer->tail_page)) +	if (unlikely(cpu_buffer->commit_page != READ_ONCE(cpu_buffer->tail_page)))  		goto again;  } @@ -2705,7 +2693,8 @@ __rb_reserve_next(struct ring_buffer_per_cpu *cpu_buffer,  	if (unlikely(info->add_timestamp))  		info->length += RB_LEN_TIME_EXTEND; -	tail_page = info->tail_page = cpu_buffer->tail_page; +	/* Don't let the compiler play games with cpu_buffer->tail_page */ +	tail_page = info->tail_page = READ_ONCE(cpu_buffer->tail_page);  	write = local_add_return(info->length, &tail_page->write);  	/* set write to only the index of the write */ @@ -2803,8 +2792,11 @@ rb_reserve_next_event(struct ring_buffer *buffer,  	event = __rb_reserve_next(cpu_buffer, &info); -	if (unlikely(PTR_ERR(event) == -EAGAIN)) +	if (unlikely(PTR_ERR(event) == -EAGAIN)) { +		if (info.add_timestamp) +			info.length -= RB_LEN_TIME_EXTEND;  		goto again; +	}  	if (!event)  		goto out_fail; @@ -3626,7 +3618,7 @@ rb_get_reader_page(struct ring_buffer_per_cpu *cpu_buffer)  	/* Finally update the reader page to the new head */  	cpu_buffer->reader_page = reader; -	rb_reset_reader_page(cpu_buffer); +	cpu_buffer->reader_page->read = 0;  	if (overwrite != cpu_buffer->last_overrun) {  		cpu_buffer->lost_events = overwrite - cpu_buffer->last_overrun; @@ -3636,6 +3628,10 @@ rb_get_reader_page(struct ring_buffer_per_cpu *cpu_buffer)  	goto again;   out: +	/* Update the read_stamp on the first event */ +	if (reader && reader->read == 0) +		cpu_buffer->read_stamp = reader->page->time_stamp; +  	arch_spin_unlock(&cpu_buffer->lock);  	local_irq_restore(flags); | 
