aboutsummaryrefslogtreecommitdiff
path: root/fs
diff options
context:
space:
mode:
authorSteven Whitehouse <swhiteho@redhat.com>2009-06-25 16:30:26 +0100
committerSteven Whitehouse <swhiteho@redhat.com>2009-07-30 10:52:14 +0100
commit2163b1e616c41c286f5ab79912671cd4bf52057c (patch)
tree5f7170d724c69e0f51857367b5de5a2c0e9bdbae /fs
parent4be3bd7849165e7efa6b0b35a23d6a3598d97465 (diff)
GFS2: Shrink the shrinker
This patch removes some of the special cases that the shrinker was trying to deal with. As a result we leave fewer items on the list and none at all which cannot be demoted. This makes the list scanning more efficient and solves some issues seen with large numbers of inodes. Signed-off-by: Steven Whitehouse <swhiteho@redhat.com>
Diffstat (limited to 'fs')
-rw-r--r--fs/gfs2/glock.c23
1 files changed, 5 insertions, 18 deletions
diff --git a/fs/gfs2/glock.c b/fs/gfs2/glock.c
index 297421c0427..fdb796c4f94 100644
--- a/fs/gfs2/glock.c
+++ b/fs/gfs2/glock.c
@@ -1300,7 +1300,6 @@ static int gfs2_shrink_glock_memory(int nr, gfp_t gfp_mask)
struct gfs2_glock *gl;
int may_demote;
int nr_skipped = 0;
- int got_ref = 0;
LIST_HEAD(skipped);
if (nr == 0)
@@ -1318,7 +1317,6 @@ static int gfs2_shrink_glock_memory(int nr, gfp_t gfp_mask)
/* Test for being demotable */
if (!test_and_set_bit(GLF_LOCK, &gl->gl_flags)) {
gfs2_glock_hold(gl);
- got_ref = 1;
spin_unlock(&lru_lock);
spin_lock(&gl->gl_spin);
may_demote = demote_ok(gl);
@@ -1327,25 +1325,14 @@ static int gfs2_shrink_glock_memory(int nr, gfp_t gfp_mask)
if (may_demote) {
handle_callback(gl, LM_ST_UNLOCKED, 0);
nr--;
- if (queue_delayed_work(glock_workqueue, &gl->gl_work, 0) == 0)
- gfs2_glock_put(gl);
- got_ref = 0;
}
+ if (queue_delayed_work(glock_workqueue, &gl->gl_work, 0) == 0)
+ gfs2_glock_put(gl);
spin_lock(&lru_lock);
- if (may_demote)
- continue;
- }
- if (list_empty(&gl->gl_lru) &&
- (atomic_read(&gl->gl_ref) <= (2 + got_ref))) {
- nr_skipped++;
- list_add(&gl->gl_lru, &skipped);
- }
- if (got_ref) {
- spin_unlock(&lru_lock);
- gfs2_glock_put(gl);
- spin_lock(&lru_lock);
- got_ref = 0;
+ continue;
}
+ nr_skipped++;
+ list_add(&gl->gl_lru, &skipped);
}
list_splice(&skipped, &lru_list);
atomic_add(nr_skipped, &lru_count);