aboutsummaryrefslogtreecommitdiff
path: root/fs/btrfs/extent_io.h
diff options
context:
space:
mode:
authorChris Mason <chris.mason@oracle.com>2009-02-04 09:25:08 -0500
committerChris Mason <chris.mason@oracle.com>2009-02-04 09:25:08 -0500
commitb4ce94de9b4d64e8ab3cf155d13653c666e22b9b (patch)
treeebc44a9554a50b495b091cb0979d79fd29e50fe7 /fs/btrfs/extent_io.h
parentc487685d7c18a8481900755aa5c56a7a74193101 (diff)
Btrfs: Change btree locking to use explicit blocking points
Most of the btrfs metadata operations can be protected by a spinlock, but some operations still need to schedule. So far, btrfs has been using a mutex along with a trylock loop, most of the time it is able to avoid going for the full mutex, so the trylock loop is a big performance gain. This commit is step one for getting rid of the blocking locks entirely. btrfs_tree_lock takes a spinlock, and the code explicitly switches to a blocking lock when it starts an operation that can schedule. We'll be able get rid of the blocking locks in smaller pieces over time. Tracing allows us to find the most common cause of blocking, so we can start with the hot spots first. The basic idea is: btrfs_tree_lock() returns with the spin lock held btrfs_set_lock_blocking() sets the EXTENT_BUFFER_BLOCKING bit in the extent buffer flags, and then drops the spin lock. The buffer is still considered locked by all of the btrfs code. If btrfs_tree_lock gets the spinlock but finds the blocking bit set, it drops the spin lock and waits on a wait queue for the blocking bit to go away. Much of the code that needs to set the blocking bit finishes without actually blocking a good percentage of the time. So, an adaptive spin is still used against the blocking bit to avoid very high context switch rates. btrfs_clear_lock_blocking() clears the blocking bit and returns with the spinlock held again. btrfs_tree_unlock() can be called on either blocking or spinning locks, it does the right thing based on the blocking bit. ctree.c has a helper function to set/clear all the locked buffers in a path as blocking. Signed-off-by: Chris Mason <chris.mason@oracle.com>
Diffstat (limited to 'fs/btrfs/extent_io.h')
-rw-r--r--fs/btrfs/extent_io.h16
1 files changed, 14 insertions, 2 deletions
diff --git a/fs/btrfs/extent_io.h b/fs/btrfs/extent_io.h
index e80c6d96b31..1f9df88afbf 100644
--- a/fs/btrfs/extent_io.h
+++ b/fs/btrfs/extent_io.h
@@ -22,6 +22,10 @@
/* flags for bio submission */
#define EXTENT_BIO_COMPRESSED 1
+/* these are bit numbers for test/set bit */
+#define EXTENT_BUFFER_UPTODATE 0
+#define EXTENT_BUFFER_BLOCKING 1
+
/*
* page->private values. Every page that is controlled by the extent
* map has page->private set to one.
@@ -95,11 +99,19 @@ struct extent_buffer {
unsigned long map_start;
unsigned long map_len;
struct page *first_page;
+ unsigned long bflags;
atomic_t refs;
- int flags;
struct list_head leak_list;
struct rb_node rb_node;
- struct mutex mutex;
+
+ /* the spinlock is used to protect most operations */
+ spinlock_t lock;
+
+ /*
+ * when we keep the lock held while blocking, waiters go onto
+ * the wq
+ */
+ wait_queue_head_t lock_wq;
};
struct extent_map_tree;