diff options
Diffstat (limited to 'drivers/oprofile/cpu_buffer.c')
| -rw-r--r-- | drivers/oprofile/cpu_buffer.c | 139 | 
1 files changed, 75 insertions, 64 deletions
diff --git a/drivers/oprofile/cpu_buffer.c b/drivers/oprofile/cpu_buffer.c index 400f7fcffdbe..e859d23cfc57 100644 --- a/drivers/oprofile/cpu_buffer.c +++ b/drivers/oprofile/cpu_buffer.c @@ -212,6 +212,59 @@ unsigned long op_cpu_buffer_entries(int cpu)  		+ ring_buffer_entries_cpu(op_ring_buffer_write, cpu);  } +static int +op_add_code(struct oprofile_cpu_buffer *cpu_buf, unsigned long backtrace, +	    int is_kernel, struct task_struct *task) +{ +	struct op_entry entry; +	struct op_sample *sample; +	unsigned long flags; +	int size; + +	flags = 0; + +	if (backtrace) +		flags |= TRACE_BEGIN; + +	/* notice a switch from user->kernel or vice versa */ +	is_kernel = !!is_kernel; +	if (cpu_buf->last_is_kernel != is_kernel) { +		cpu_buf->last_is_kernel = is_kernel; +		flags |= KERNEL_CTX_SWITCH; +		if (is_kernel) +			flags |= IS_KERNEL; +	} + +	/* notice a task switch */ +	if (cpu_buf->last_task != task) { +		cpu_buf->last_task = task; +		flags |= USER_CTX_SWITCH; +	} + +	if (!flags) +		/* nothing to do */ +		return 0; + +	if (flags & USER_CTX_SWITCH) +		size = 1; +	else +		size = 0; + +	sample = op_cpu_buffer_write_reserve(&entry, size); +	if (!sample) +		return -ENOMEM; + +	sample->eip = ESCAPE_CODE; +	sample->event = flags; + +	if (size) +		sample->data[0] = (unsigned long)task; + +	op_cpu_buffer_write_commit(&entry); + +	return 0; +} +  static inline int  op_add_sample(struct oprofile_cpu_buffer *cpu_buf,  	      unsigned long pc, unsigned long event) @@ -229,26 +282,18 @@ op_add_sample(struct oprofile_cpu_buffer *cpu_buf,  	return op_cpu_buffer_write_commit(&entry);  } -static inline int -add_code(struct oprofile_cpu_buffer *buffer, unsigned long value) -{ -	return op_add_sample(buffer, ESCAPE_CODE, value); -} - -/* This must be safe from any context. It's safe writing here - * because of the head/tail separation of the writer and reader - * of the CPU buffer. +/* + * This must be safe from any context.   *   * is_kernel is needed because on some architectures you cannot   * tell if you are in kernel or user space simply by looking at   * pc. We tag this in the buffer by generating kernel enter/exit   * events whenever is_kernel changes   */ -static int log_sample(struct oprofile_cpu_buffer *cpu_buf, unsigned long pc, -		      int is_kernel, unsigned long event) +static int +log_sample(struct oprofile_cpu_buffer *cpu_buf, unsigned long pc, +	   unsigned long backtrace, int is_kernel, unsigned long event)  { -	struct task_struct *task; -  	cpu_buf->sample_received++;  	if (pc == ESCAPE_CODE) { @@ -256,23 +301,8 @@ static int log_sample(struct oprofile_cpu_buffer *cpu_buf, unsigned long pc,  		return 0;  	} -	is_kernel = !!is_kernel; - -	task = current; - -	/* notice a switch from user->kernel or vice versa */ -	if (cpu_buf->last_is_kernel != is_kernel) { -		cpu_buf->last_is_kernel = is_kernel; -		if (add_code(cpu_buf, is_kernel)) -			goto fail; -	} - -	/* notice a task switch */ -	if (cpu_buf->last_task != task) { -		cpu_buf->last_task = task; -		if (add_code(cpu_buf, (unsigned long)task)) -			goto fail; -	} +	if (op_add_code(cpu_buf, backtrace, is_kernel, current)) +		goto fail;  	if (op_add_sample(cpu_buf, pc, event))  		goto fail; @@ -286,7 +316,6 @@ fail:  static inline void oprofile_begin_trace(struct oprofile_cpu_buffer *cpu_buf)  { -	add_code(cpu_buf, CPU_TRACE_BEGIN);  	cpu_buf->tracing = 1;  } @@ -300,21 +329,21 @@ __oprofile_add_ext_sample(unsigned long pc, struct pt_regs * const regs,  			  unsigned long event, int is_kernel)  {  	struct oprofile_cpu_buffer *cpu_buf = &__get_cpu_var(cpu_buffer); - -	if (!oprofile_backtrace_depth) { -		log_sample(cpu_buf, pc, is_kernel, event); -		return; -	} - -	oprofile_begin_trace(cpu_buf); +	unsigned long backtrace = oprofile_backtrace_depth;  	/*  	 * if log_sample() fail we can't backtrace since we lost the  	 * source of this event  	 */ -	if (log_sample(cpu_buf, pc, is_kernel, event)) -		oprofile_ops.backtrace(regs, oprofile_backtrace_depth); +	if (!log_sample(cpu_buf, pc, backtrace, is_kernel, event)) +		/* failed */ +		return; +	if (!backtrace) +		return; + +	oprofile_begin_trace(cpu_buf); +	oprofile_ops.backtrace(regs, backtrace);  	oprofile_end_trace(cpu_buf);  } @@ -339,29 +368,14 @@ void oprofile_add_ibs_sample(struct pt_regs * const regs,  {  	int is_kernel = !user_mode(regs);  	struct oprofile_cpu_buffer *cpu_buf = &__get_cpu_var(cpu_buffer); -	struct task_struct *task;  	int fail = 0;  	cpu_buf->sample_received++; -	/* notice a switch from user->kernel or vice versa */ -	if (cpu_buf->last_is_kernel != is_kernel) { -		if (add_code(cpu_buf, is_kernel)) -			goto fail; -		cpu_buf->last_is_kernel = is_kernel; -	} +	/* backtraces disabled for ibs */ +	fail = fail || op_add_code(cpu_buf, 0, is_kernel, current); -	/* notice a task switch */ -	if (!is_kernel) { -		task = current; -		if (cpu_buf->last_task != task) { -			if (add_code(cpu_buf, (unsigned long)task)) -				goto fail; -			cpu_buf->last_task = task; -		} -	} - -	fail = fail || add_code(cpu_buf, ibs_code); +	fail = fail || op_add_sample(cpu_buf, ESCAPE_CODE,   ibs_code);  	fail = fail || op_add_sample(cpu_buf, ibs_sample[0], ibs_sample[1]);  	fail = fail || op_add_sample(cpu_buf, ibs_sample[2], ibs_sample[3]);  	fail = fail || op_add_sample(cpu_buf, ibs_sample[4], ibs_sample[5]); @@ -372,11 +386,8 @@ void oprofile_add_ibs_sample(struct pt_regs * const regs,  		fail = fail || op_add_sample(cpu_buf, ibs_sample[10], ibs_sample[11]);  	} -	if (!fail) -		return; - -fail: -	cpu_buf->sample_lost_overflow++; +	if (fail) +		cpu_buf->sample_lost_overflow++;  }  #endif @@ -384,7 +395,7 @@ fail:  void oprofile_add_pc(unsigned long pc, int is_kernel, unsigned long event)  {  	struct oprofile_cpu_buffer *cpu_buf = &__get_cpu_var(cpu_buffer); -	log_sample(cpu_buf, pc, is_kernel, event); +	log_sample(cpu_buf, pc, 0, is_kernel, event);  }  void oprofile_add_trace(unsigned long pc)  | 
