aboutsummaryrefslogtreecommitdiff
path: root/kernel/trace/ring_buffer.c
diff options
context:
space:
mode:
authorSteven Rostedt <srostedt@redhat.com>2009-02-12 13:19:48 -0500
committerSteven Rostedt <srostedt@redhat.com>2009-02-12 13:39:46 -0500
commit45141d4667d208421ca787a3301542b6a5e0b112 (patch)
tree7f1cedbd431b9b01345730e338c179e6bb8029ad /kernel/trace/ring_buffer.c
parente7669b8e329255bbcb40af65b38e342825d97a46 (diff)
ring-buffer: rename label out_unlock to out_reset
Impact: clean up While reviewing the ring buffer code, I thougth I saw a bug with if (!__raw_spin_trylock(&cpu_buffer->lock)) goto out_unlock; But I forgot that we use a variable "lock_taken" that is set if the spinlock is taken, and only unlock it if that variable is set. To avoid further confusion from other reviewers, this patch renames the label out_unlock with out_reset, which is the more appropriate name. Signed-off-by: Steven Rostedt <srostedt@redhat.com>
Diffstat (limited to 'kernel/trace/ring_buffer.c')
-rw-r--r--kernel/trace/ring_buffer.c10
1 files changed, 5 insertions, 5 deletions
diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c
index dc18b5b9ccb..f39d7e9a430 100644
--- a/kernel/trace/ring_buffer.c
+++ b/kernel/trace/ring_buffer.c
@@ -1017,7 +1017,7 @@ __rb_reserve_next(struct ring_buffer_per_cpu *cpu_buffer,
*/
if (unlikely(in_nmi())) {
if (!__raw_spin_trylock(&cpu_buffer->lock))
- goto out_unlock;
+ goto out_reset;
} else
__raw_spin_lock(&cpu_buffer->lock);
@@ -1030,7 +1030,7 @@ __rb_reserve_next(struct ring_buffer_per_cpu *cpu_buffer,
/* we grabbed the lock before incrementing */
if (RB_WARN_ON(cpu_buffer, next_page == reader_page))
- goto out_unlock;
+ goto out_reset;
/*
* If for some reason, we had an interrupt storm that made
@@ -1039,12 +1039,12 @@ __rb_reserve_next(struct ring_buffer_per_cpu *cpu_buffer,
*/
if (unlikely(next_page == commit_page)) {
WARN_ON_ONCE(1);
- goto out_unlock;
+ goto out_reset;
}
if (next_page == head_page) {
if (!(buffer->flags & RB_FL_OVERWRITE))
- goto out_unlock;
+ goto out_reset;
/* tail_page has not moved yet? */
if (tail_page == cpu_buffer->tail_page) {
@@ -1118,7 +1118,7 @@ __rb_reserve_next(struct ring_buffer_per_cpu *cpu_buffer,
return event;
- out_unlock:
+ out_reset:
/* reset write */
if (tail <= BUF_PAGE_SIZE)
local_set(&tail_page->write, tail);