Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/sparc-2.6
[kernel.git] / fs / logfs / dev_bdev.c
1 /*
2  * fs/logfs/dev_bdev.c  - Device access methods for block devices
3  *
4  * As should be obvious for Linux kernel code, license is GPLv2
5  *
6  * Copyright (c) 2005-2008 Joern Engel <joern@logfs.org>
7  */
8 #include "logfs.h"
9 #include <linux/bio.h>
10 #include <linux/blkdev.h>
11 #include <linux/buffer_head.h>
12
13 #define PAGE_OFS(ofs) ((ofs) & (PAGE_SIZE-1))
14
15 static void request_complete(struct bio *bio, int err)
16 {
17         complete((struct completion *)bio->bi_private);
18 }
19
20 static int sync_request(struct page *page, struct block_device *bdev, int rw)
21 {
22         struct bio bio;
23         struct bio_vec bio_vec;
24         struct completion complete;
25
26         bio_init(&bio);
27         bio.bi_io_vec = &bio_vec;
28         bio_vec.bv_page = page;
29         bio_vec.bv_len = PAGE_SIZE;
30         bio_vec.bv_offset = 0;
31         bio.bi_vcnt = 1;
32         bio.bi_idx = 0;
33         bio.bi_size = PAGE_SIZE;
34         bio.bi_bdev = bdev;
35         bio.bi_sector = page->index * (PAGE_SIZE >> 9);
36         init_completion(&complete);
37         bio.bi_private = &complete;
38         bio.bi_end_io = request_complete;
39
40         submit_bio(rw, &bio);
41         generic_unplug_device(bdev_get_queue(bdev));
42         wait_for_completion(&complete);
43         return test_bit(BIO_UPTODATE, &bio.bi_flags) ? 0 : -EIO;
44 }
45
46 static int bdev_readpage(void *_sb, struct page *page)
47 {
48         struct super_block *sb = _sb;
49         struct block_device *bdev = logfs_super(sb)->s_bdev;
50         int err;
51
52         err = sync_request(page, bdev, READ);
53         if (err) {
54                 ClearPageUptodate(page);
55                 SetPageError(page);
56         } else {
57                 SetPageUptodate(page);
58                 ClearPageError(page);
59         }
60         unlock_page(page);
61         return err;
62 }
63
64 static DECLARE_WAIT_QUEUE_HEAD(wq);
65
66 static void writeseg_end_io(struct bio *bio, int err)
67 {
68         const int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags);
69         struct bio_vec *bvec = bio->bi_io_vec + bio->bi_vcnt - 1;
70         struct super_block *sb = bio->bi_private;
71         struct logfs_super *super = logfs_super(sb);
72         struct page *page;
73
74         BUG_ON(!uptodate); /* FIXME: Retry io or write elsewhere */
75         BUG_ON(err);
76         BUG_ON(bio->bi_vcnt == 0);
77         do {
78                 page = bvec->bv_page;
79                 if (--bvec >= bio->bi_io_vec)
80                         prefetchw(&bvec->bv_page->flags);
81
82                 end_page_writeback(page);
83                 page_cache_release(page);
84         } while (bvec >= bio->bi_io_vec);
85         bio_put(bio);
86         if (atomic_dec_and_test(&super->s_pending_writes))
87                 wake_up(&wq);
88 }
89
90 static int __bdev_writeseg(struct super_block *sb, u64 ofs, pgoff_t index,
91                 size_t nr_pages)
92 {
93         struct logfs_super *super = logfs_super(sb);
94         struct address_space *mapping = super->s_mapping_inode->i_mapping;
95         struct bio *bio;
96         struct page *page;
97         struct request_queue *q = bdev_get_queue(sb->s_bdev);
98         unsigned int max_pages = queue_max_hw_sectors(q) >> (PAGE_SHIFT - 9);
99         int i;
100
101         if (max_pages > BIO_MAX_PAGES)
102                 max_pages = BIO_MAX_PAGES;
103         bio = bio_alloc(GFP_NOFS, max_pages);
104         BUG_ON(!bio);
105
106         for (i = 0; i < nr_pages; i++) {
107                 if (i >= max_pages) {
108                         /* Block layer cannot split bios :( */
109                         bio->bi_vcnt = i;
110                         bio->bi_idx = 0;
111                         bio->bi_size = i * PAGE_SIZE;
112                         bio->bi_bdev = super->s_bdev;
113                         bio->bi_sector = ofs >> 9;
114                         bio->bi_private = sb;
115                         bio->bi_end_io = writeseg_end_io;
116                         atomic_inc(&super->s_pending_writes);
117                         submit_bio(WRITE, bio);
118
119                         ofs += i * PAGE_SIZE;
120                         index += i;
121                         nr_pages -= i;
122                         i = 0;
123
124                         bio = bio_alloc(GFP_NOFS, max_pages);
125                         BUG_ON(!bio);
126                 }
127                 page = find_lock_page(mapping, index + i);
128                 BUG_ON(!page);
129                 bio->bi_io_vec[i].bv_page = page;
130                 bio->bi_io_vec[i].bv_len = PAGE_SIZE;
131                 bio->bi_io_vec[i].bv_offset = 0;
132
133                 BUG_ON(PageWriteback(page));
134                 set_page_writeback(page);
135                 unlock_page(page);
136         }
137         bio->bi_vcnt = nr_pages;
138         bio->bi_idx = 0;
139         bio->bi_size = nr_pages * PAGE_SIZE;
140         bio->bi_bdev = super->s_bdev;
141         bio->bi_sector = ofs >> 9;
142         bio->bi_private = sb;
143         bio->bi_end_io = writeseg_end_io;
144         atomic_inc(&super->s_pending_writes);
145         submit_bio(WRITE, bio);
146         return 0;
147 }
148
149 static void bdev_writeseg(struct super_block *sb, u64 ofs, size_t len)
150 {
151         struct logfs_super *super = logfs_super(sb);
152         int head;
153
154         BUG_ON(super->s_flags & LOGFS_SB_FLAG_RO);
155
156         if (len == 0) {
157                 /* This can happen when the object fit perfectly into a
158                  * segment, the segment gets written per sync and subsequently
159                  * closed.
160                  */
161                 return;
162         }
163         head = ofs & (PAGE_SIZE - 1);
164         if (head) {
165                 ofs -= head;
166                 len += head;
167         }
168         len = PAGE_ALIGN(len);
169         __bdev_writeseg(sb, ofs, ofs >> PAGE_SHIFT, len >> PAGE_SHIFT);
170         generic_unplug_device(bdev_get_queue(logfs_super(sb)->s_bdev));
171 }
172
173
174 static void erase_end_io(struct bio *bio, int err) 
175
176         const int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags); 
177         struct super_block *sb = bio->bi_private; 
178         struct logfs_super *super = logfs_super(sb); 
179
180         BUG_ON(!uptodate); /* FIXME: Retry io or write elsewhere */ 
181         BUG_ON(err); 
182         BUG_ON(bio->bi_vcnt == 0); 
183         bio_put(bio); 
184         if (atomic_dec_and_test(&super->s_pending_writes))
185                 wake_up(&wq); 
186
187
188 static int do_erase(struct super_block *sb, u64 ofs, pgoff_t index,
189                 size_t nr_pages)
190 {
191         struct logfs_super *super = logfs_super(sb);
192         struct bio *bio;
193         struct request_queue *q = bdev_get_queue(sb->s_bdev);
194         unsigned int max_pages = queue_max_hw_sectors(q) >> (PAGE_SHIFT - 9);
195         int i;
196
197         if (max_pages > BIO_MAX_PAGES)
198                 max_pages = BIO_MAX_PAGES;
199         bio = bio_alloc(GFP_NOFS, max_pages);
200         BUG_ON(!bio);
201
202         for (i = 0; i < nr_pages; i++) {
203                 if (i >= max_pages) {
204                         /* Block layer cannot split bios :( */
205                         bio->bi_vcnt = i;
206                         bio->bi_idx = 0;
207                         bio->bi_size = i * PAGE_SIZE;
208                         bio->bi_bdev = super->s_bdev;
209                         bio->bi_sector = ofs >> 9;
210                         bio->bi_private = sb;
211                         bio->bi_end_io = erase_end_io;
212                         atomic_inc(&super->s_pending_writes);
213                         submit_bio(WRITE, bio);
214
215                         ofs += i * PAGE_SIZE;
216                         index += i;
217                         nr_pages -= i;
218                         i = 0;
219
220                         bio = bio_alloc(GFP_NOFS, max_pages);
221                         BUG_ON(!bio);
222                 }
223                 bio->bi_io_vec[i].bv_page = super->s_erase_page;
224                 bio->bi_io_vec[i].bv_len = PAGE_SIZE;
225                 bio->bi_io_vec[i].bv_offset = 0;
226         }
227         bio->bi_vcnt = nr_pages;
228         bio->bi_idx = 0;
229         bio->bi_size = nr_pages * PAGE_SIZE;
230         bio->bi_bdev = super->s_bdev;
231         bio->bi_sector = ofs >> 9;
232         bio->bi_private = sb;
233         bio->bi_end_io = erase_end_io;
234         atomic_inc(&super->s_pending_writes);
235         submit_bio(WRITE, bio);
236         return 0;
237 }
238
239 static int bdev_erase(struct super_block *sb, loff_t to, size_t len,
240                 int ensure_write)
241 {
242         struct logfs_super *super = logfs_super(sb);
243
244         BUG_ON(to & (PAGE_SIZE - 1));
245         BUG_ON(len & (PAGE_SIZE - 1));
246
247         if (super->s_flags & LOGFS_SB_FLAG_RO)
248                 return -EROFS;
249
250         if (ensure_write) {
251                 /*
252                  * Object store doesn't care whether erases happen or not.
253                  * But for the journal they are required.  Otherwise a scan
254                  * can find an old commit entry and assume it is the current
255                  * one, travelling back in time.
256                  */
257                 do_erase(sb, to, to >> PAGE_SHIFT, len >> PAGE_SHIFT);
258         }
259
260         return 0;
261 }
262
263 static void bdev_sync(struct super_block *sb)
264 {
265         struct logfs_super *super = logfs_super(sb);
266
267         wait_event(wq, atomic_read(&super->s_pending_writes) == 0);
268 }
269
270 static struct page *bdev_find_first_sb(struct super_block *sb, u64 *ofs)
271 {
272         struct logfs_super *super = logfs_super(sb);
273         struct address_space *mapping = super->s_mapping_inode->i_mapping;
274         filler_t *filler = bdev_readpage;
275
276         *ofs = 0;
277         return read_cache_page(mapping, 0, filler, sb);
278 }
279
280 static struct page *bdev_find_last_sb(struct super_block *sb, u64 *ofs)
281 {
282         struct logfs_super *super = logfs_super(sb);
283         struct address_space *mapping = super->s_mapping_inode->i_mapping;
284         filler_t *filler = bdev_readpage;
285         u64 pos = (super->s_bdev->bd_inode->i_size & ~0xfffULL) - 0x1000;
286         pgoff_t index = pos >> PAGE_SHIFT;
287
288         *ofs = pos;
289         return read_cache_page(mapping, index, filler, sb);
290 }
291
292 static int bdev_write_sb(struct super_block *sb, struct page *page)
293 {
294         struct block_device *bdev = logfs_super(sb)->s_bdev;
295
296         /* Nothing special to do for block devices. */
297         return sync_request(page, bdev, WRITE);
298 }
299
300 static void bdev_put_device(struct super_block *sb)
301 {
302         close_bdev_exclusive(logfs_super(sb)->s_bdev, FMODE_READ|FMODE_WRITE);
303 }
304
305 static const struct logfs_device_ops bd_devops = {
306         .find_first_sb  = bdev_find_first_sb,
307         .find_last_sb   = bdev_find_last_sb,
308         .write_sb       = bdev_write_sb,
309         .readpage       = bdev_readpage,
310         .writeseg       = bdev_writeseg,
311         .erase          = bdev_erase,
312         .sync           = bdev_sync,
313         .put_device     = bdev_put_device,
314 };
315
316 int logfs_get_sb_bdev(struct file_system_type *type, int flags,
317                 const char *devname, struct vfsmount *mnt)
318 {
319         struct block_device *bdev;
320
321         bdev = open_bdev_exclusive(devname, FMODE_READ|FMODE_WRITE, type);
322         if (IS_ERR(bdev))
323                 return PTR_ERR(bdev);
324
325         if (MAJOR(bdev->bd_dev) == MTD_BLOCK_MAJOR) {
326                 int mtdnr = MINOR(bdev->bd_dev);
327                 close_bdev_exclusive(bdev, FMODE_READ|FMODE_WRITE);
328                 return logfs_get_sb_mtd(type, flags, mtdnr, mnt);
329         }
330
331         return logfs_get_sb_device(type, flags, NULL, bdev, &bd_devops, mnt);
332 }