aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorSteven Rostedt <srostedt@redhat.com>2009-05-06 10:26:45 -0400
committerSteven Rostedt <rostedt@goodmis.org>2009-05-06 12:49:19 -0400
commit8e7abf1c62941ebb7a1416cbc62392c8a0902625 (patch)
tree72b62a8ce2cecee2ed0cff586fb8bed10cf0eb2d
parent35cf723e99c0e26ddf51f037dffaa4ff2c2c9106 (diff)
ring-buffer: remove unneeded conditional in rb_reserve_next
The code in __rb_reserve_next checks on page overflow if it is the original commiter and then resets the page back to the original setting. Although this is fine, and the code is correct, it is a bit fragil. Some experimental work I did breaks it easily. The better and more robust solution is to have all commiters that overflow the page, simply subtract what they added. [ Impact: more robust ring buffer account management ] Signed-off-by: Steven Rostedt <rostedt@goodmis.org>
-rw-r--r--kernel/trace/ring_buffer.c8
1 files changed, 3 insertions, 5 deletions
diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c
index 424129eb20a..03ed52b67db 100644
--- a/kernel/trace/ring_buffer.c
+++ b/kernel/trace/ring_buffer.c
@@ -1290,9 +1290,8 @@ __rb_reserve_next(struct ring_buffer_per_cpu *cpu_buffer,
rb_event_set_padding(event);
}
- if (tail <= BUF_PAGE_SIZE)
- /* Set the write back to the previous setting */
- local_set(&tail_page->write, tail);
+ /* Set the write back to the previous setting */
+ local_sub(length, &tail_page->write);
/*
* If this was a commit entry that failed,
@@ -1311,8 +1310,7 @@ __rb_reserve_next(struct ring_buffer_per_cpu *cpu_buffer,
out_reset:
/* reset write */
- if (tail <= BUF_PAGE_SIZE)
- local_set(&tail_page->write, tail);
+ local_sub(length, &tail_page->write);
if (likely(lock_taken))
__raw_spin_unlock(&cpu_buffer->lock);