aboutsummaryrefslogtreecommitdiff
path: root/fs
diff options
context:
space:
mode:
Diffstat (limited to 'fs')
-rw-r--r--fs/9p/error.c1
-rw-r--r--fs/9p/trans_sock.c3
-rw-r--r--fs/9p/v9fs.c6
-rw-r--r--fs/9p/vfs_file.c1
-rw-r--r--fs/9p/vfs_inode.c10
-rw-r--r--fs/Kconfig77
-rw-r--r--fs/Makefile2
-rw-r--r--fs/adfs/adfs.h1
-rw-r--r--fs/affs/file.c18
-rw-r--r--fs/affs/super.c16
-rw-r--r--fs/afs/callback.c1
-rw-r--r--fs/afs/file.c33
-rw-r--r--fs/afs/inode.c2
-rw-r--r--fs/afs/internal.h1
-rw-r--r--fs/aio.c75
-rw-r--r--fs/autofs/waitq.c6
-rw-r--r--fs/autofs4/inode.c6
-rw-r--r--fs/autofs4/waitq.c6
-rw-r--r--fs/befs/attribute.c117
-rw-r--r--fs/befs/linuxvfs.c20
-rw-r--r--fs/binfmt_elf.c3
-rw-r--r--fs/binfmt_elf_fdpic.c15
-rw-r--r--fs/binfmt_misc.c2
-rw-r--r--fs/buffer.c6
-rw-r--r--fs/cifs/CHANGES2
-rw-r--r--fs/cifs/asn1.c3
-rw-r--r--fs/cifs/cifs_unicode.c5
-rw-r--r--fs/cifs/cifsfs.c11
-rw-r--r--fs/cifs/cifsproto.h2
-rw-r--r--fs/cifs/cifssmb.c2
-rw-r--r--fs/cifs/cn_cifs.h37
-rw-r--r--fs/cifs/connect.c82
-rw-r--r--fs/cifs/inode.c2
-rw-r--r--fs/cifs/link.c23
-rw-r--r--fs/cifs/misc.c15
-rw-r--r--fs/cifs/xattr.c15
-rw-r--r--fs/compat_ioctl.c325
-rw-r--r--fs/dcache.c10
-rw-r--r--fs/devfs/base.c6
-rw-r--r--fs/dquot.c19
-rw-r--r--fs/exec.c20
-rw-r--r--fs/ext2/CHANGES157
-rw-r--r--fs/ext2/acl.c6
-rw-r--r--fs/ext2/balloc.c73
-rw-r--r--fs/ext2/ialloc.c40
-rw-r--r--fs/ext2/super.c18
-rw-r--r--fs/ext3/balloc.c73
-rw-r--r--fs/ext3/ialloc.c41
-rw-r--r--fs/ext3/inode.c4
-rw-r--r--fs/ext3/super.c17
-rw-r--r--fs/fat/inode.c11
-rw-r--r--fs/file_table.c4
-rw-r--r--fs/freevxfs/vxfs_bmap.c1
-rw-r--r--fs/freevxfs/vxfs_extern.h4
-rw-r--r--fs/freevxfs/vxfs_inode.c11
-rw-r--r--fs/freevxfs/vxfs_olt.c1
-rw-r--r--fs/fs-writeback.c7
-rw-r--r--fs/fuse/dev.c7
-rw-r--r--fs/fuse/dir.c172
-rw-r--r--fs/fuse/file.c132
-rw-r--r--fs/fuse/fuse_i.h17
-rw-r--r--fs/hfs/hfs_fs.h1
-rw-r--r--fs/hfs/inode.c1
-rw-r--r--fs/hfsplus/bnode.c1
-rw-r--r--fs/hfsplus/dir.c1
-rw-r--r--fs/hfsplus/extents.c1
-rw-r--r--fs/hfsplus/hfsplus_fs.h1
-rw-r--r--fs/hfsplus/inode.c1
-rw-r--r--fs/hfsplus/super.c1
-rw-r--r--fs/hfsplus/wrapper.c1
-rw-r--r--fs/hostfs/hostfs_kern.c4
-rw-r--r--fs/hpfs/dnode.c8
-rw-r--r--fs/hpfs/file.c7
-rw-r--r--fs/hpfs/super.c10
-rw-r--r--fs/hugetlbfs/inode.c2
-rw-r--r--fs/inotify.c2
-rw-r--r--fs/ioprio.c1
-rw-r--r--fs/isofs/inode.c12
-rw-r--r--fs/jbd/commit.c6
-rw-r--r--fs/jbd/recovery.c4
-rw-r--r--fs/jbd/transaction.c9
-rw-r--r--fs/jffs/intrep.c18
-rw-r--r--fs/jffs/jffs_fm.c1
-rw-r--r--fs/jffs2/Makefile5
-rw-r--r--fs/jffs2/TODO38
-rw-r--r--fs/jffs2/background.c4
-rw-r--r--fs/jffs2/build.c171
-rw-r--r--fs/jffs2/compr.c40
-rw-r--r--fs/jffs2/compr.h12
-rw-r--r--fs/jffs2/compr_rtime.c32
-rw-r--r--fs/jffs2/compr_rubin.c37
-rw-r--r--fs/jffs2/compr_rubin.h6
-rw-r--r--fs/jffs2/compr_zlib.c14
-rw-r--r--fs/jffs2/comprtest.c30
-rw-r--r--fs/jffs2/debug.c705
-rw-r--r--fs/jffs2/debug.h279
-rw-r--r--fs/jffs2/dir.c121
-rw-r--r--fs/jffs2/erase.c37
-rw-r--r--fs/jffs2/file.c29
-rw-r--r--fs/jffs2/fs.c104
-rw-r--r--fs/jffs2/gc.c156
-rw-r--r--fs/jffs2/histo.h2
-rw-r--r--fs/jffs2/histo_mips.h2
-rw-r--r--fs/jffs2/ioctl.c6
-rw-r--r--fs/jffs2/malloc.c84
-rw-r--r--fs/jffs2/nodelist.c1226
-rw-r--r--fs/jffs2/nodelist.h163
-rw-r--r--fs/jffs2/nodemgmt.c471
-rw-r--r--fs/jffs2/os-linux.h47
-rw-r--r--fs/jffs2/read.c17
-rw-r--r--fs/jffs2/readinode.c1153
-rw-r--r--fs/jffs2/scan.c303
-rw-r--r--fs/jffs2/summary.c730
-rw-r--r--fs/jffs2/summary.h183
-rw-r--r--fs/jffs2/super.c24
-rw-r--r--fs/jffs2/symlink.c32
-rw-r--r--fs/jffs2/wbuf.c215
-rw-r--r--fs/jffs2/write.c157
-rw-r--r--fs/jffs2/writev.c35
-rw-r--r--fs/jfs/namei.c3
-rw-r--r--fs/lockd/clntproc.c3
-rw-r--r--fs/locks.c3
-rw-r--r--fs/mbcache.c3
-rw-r--r--fs/namei.c68
-rw-r--r--fs/namespace.c699
-rw-r--r--fs/ncpfs/ioctl.c34
-rw-r--r--fs/nfs/delegation.c3
-rw-r--r--fs/nfs/inode.c15
-rw-r--r--fs/nfs/nfs4state.c9
-rw-r--r--fs/nfs/unlink.c3
-rw-r--r--fs/nfsd/export.c6
-rw-r--r--fs/nfsd/nfs3xdr.c3
-rw-r--r--fs/nfsd/nfs4xdr.c9
-rw-r--r--fs/nfsd/nfscache.c3
-rw-r--r--fs/nfsd/nfsctl.c98
-rw-r--r--fs/nfsd/nfssvc.c80
-rw-r--r--fs/nfsd/vfs.c9
-rw-r--r--fs/open.c33
-rw-r--r--fs/openpromfs/inode.c3
-rw-r--r--fs/partitions/ibm.c15
-rw-r--r--fs/partitions/ultrix.c1
-rw-r--r--fs/pnode.c305
-rw-r--r--fs/pnode.h37
-rw-r--r--fs/proc/base.c62
-rw-r--r--fs/proc/proc_devtree.c57
-rw-r--r--fs/proc/task_mmu.c2
-rw-r--r--fs/quota.c1
-rw-r--r--fs/reiserfs/file.c4
-rw-r--r--fs/reiserfs/hashes.c1
-rw-r--r--fs/seq_file.c12
-rw-r--r--fs/smbfs/request.c3
-rw-r--r--fs/smbfs/symlink.c4
-rw-r--r--fs/super.c3
-rw-r--r--fs/udf/file.c2
-rw-r--r--fs/udf/udf_sb.h3
-rw-r--r--fs/ufs/super.c14
-rw-r--r--fs/xattr.c9
-rw-r--r--fs/xfs/linux-2.6/kmem.h4
-rw-r--r--fs/xfs/linux-2.6/xfs_linux.h1
-rw-r--r--fs/xfs/xfs.h7
-rw-r--r--fs/xfs/xfs_dmapi.h1
161 files changed, 6520 insertions, 3681 deletions
diff --git a/fs/9p/error.c b/fs/9p/error.c
index fee5d19179c..834cb179e38 100644
--- a/fs/9p/error.c
+++ b/fs/9p/error.c
@@ -33,6 +33,7 @@
#include <linux/list.h>
#include <linux/jhash.h>
+#include <linux/string.h>
#include "debug.h"
#include "error.h"
diff --git a/fs/9p/trans_sock.c b/fs/9p/trans_sock.c
index 01e26f0013a..a93c2bf94c3 100644
--- a/fs/9p/trans_sock.c
+++ b/fs/9p/trans_sock.c
@@ -269,8 +269,7 @@ static void v9fs_sock_close(struct v9fs_transport *trans)
dprintk(DEBUG_TRANS, "socket closed\n");
}
- if (ts)
- kfree(ts);
+ kfree(ts);
trans->priv = NULL;
}
diff --git a/fs/9p/v9fs.c b/fs/9p/v9fs.c
index 82303f3bf76..418c3743fde 100644
--- a/fs/9p/v9fs.c
+++ b/fs/9p/v9fs.c
@@ -266,7 +266,7 @@ v9fs_session_init(struct v9fs_session_info *v9ses,
v9ses->remotename = __getname();
if (!v9ses->remotename) {
- putname(v9ses->name);
+ __putname(v9ses->name);
return -ENOMEM;
}
@@ -411,8 +411,8 @@ void v9fs_session_close(struct v9fs_session_info *v9ses)
if (v9ses->transport)
v9ses->transport->close(v9ses->transport);
- putname(v9ses->name);
- putname(v9ses->remotename);
+ __putname(v9ses->name);
+ __putname(v9ses->remotename);
}
/**
diff --git a/fs/9p/vfs_file.c b/fs/9p/vfs_file.c
index bbc3cc63854..89c849da850 100644
--- a/fs/9p/vfs_file.c
+++ b/fs/9p/vfs_file.c
@@ -32,7 +32,6 @@
#include <linux/string.h>
#include <linux/smp_lock.h>
#include <linux/inet.h>
-#include <linux/version.h>
#include <linux/list.h>
#include <asm/uaccess.h>
#include <linux/idr.h>
diff --git a/fs/9p/vfs_inode.c b/fs/9p/vfs_inode.c
index 2b696ae6655..be7288184fa 100644
--- a/fs/9p/vfs_inode.c
+++ b/fs/9p/vfs_inode.c
@@ -1105,7 +1105,7 @@ static int v9fs_vfs_readlink(struct dentry *dentry, char __user * buffer,
}
}
- putname(link);
+ __putname(link);
return retval;
}
@@ -1129,7 +1129,7 @@ static void *v9fs_vfs_follow_link(struct dentry *dentry, struct nameidata *nd)
len = v9fs_readlink(dentry, link, strlen(link));
if (len < 0) {
- putname(link);
+ __putname(link);
link = ERR_PTR(len);
} else
link[len] = 0;
@@ -1152,7 +1152,7 @@ static void v9fs_vfs_put_link(struct dentry *dentry, struct nameidata *nd, void
dprintk(DEBUG_VFS, " %s %s\n", dentry->d_name.name, s);
if (!IS_ERR(s))
- putname(s);
+ __putname(s);
}
/**
@@ -1228,7 +1228,7 @@ v9fs_vfs_link(struct dentry *old_dentry, struct inode *dir,
FreeMem:
kfree(mistat);
kfree(fcall);
- putname(symname);
+ __putname(symname);
return retval;
}
@@ -1319,7 +1319,7 @@ v9fs_vfs_mknod(struct inode *dir, struct dentry *dentry, int mode, dev_t rdev)
FreeMem:
kfree(mistat);
kfree(fcall);
- putname(symname);
+ __putname(symname);
return retval;
}
diff --git a/fs/Kconfig b/fs/Kconfig
index 01a295232f7..d5255e627b5 100644
--- a/fs/Kconfig
+++ b/fs/Kconfig
@@ -898,6 +898,7 @@ config AFFS_FS
config HFS_FS
tristate "Apple Macintosh file system support (EXPERIMENTAL)"
depends on EXPERIMENTAL
+ select NLS
help
If you say Y here, you will be able to mount Macintosh-formatted
floppy disks and hard drive partitions with full read-write access.
@@ -1050,6 +1051,19 @@ config JFFS2_FS_WRITEBUFFER
- NOR flash with transparent ECC
- DataFlash
+config JFFS2_SUMMARY
+ bool "JFFS2 summary support (EXPERIMENTAL)"
+ depends on JFFS2_FS && EXPERIMENTAL
+ default n
+ help
+ This feature makes it possible to use summary information
+ for faster filesystem mount.
+
+ The summary information can be inserted into a filesystem image
+ by the utility 'sumtool'.
+
+ If unsure, say 'N'.
+
config JFFS2_COMPRESSION_OPTIONS
bool "Advanced compression options for JFFS2"
depends on JFFS2_FS
@@ -1071,10 +1085,10 @@ config JFFS2_ZLIB
default y
help
Zlib is designed to be a free, general-purpose, legally unencumbered,
- lossless data-compression library for use on virtually any computer
+ lossless data-compression library for use on virtually any computer
hardware and operating system. See <http://www.gzip.org/zlib/> for
further information.
-
+
Say 'Y' if unsure.
config JFFS2_RTIME
@@ -1096,7 +1110,7 @@ choice
default JFFS2_CMODE_PRIORITY
depends on JFFS2_FS
help
- You can set here the default compression mode of JFFS2 from
+ You can set here the default compression mode of JFFS2 from
the available compression modes. Don't touch if unsure.
config JFFS2_CMODE_NONE
@@ -1107,13 +1121,13 @@ config JFFS2_CMODE_NONE
config JFFS2_CMODE_PRIORITY
bool "priority"
help
- Tries the compressors in a predefinied order and chooses the first
+ Tries the compressors in a predefinied order and chooses the first
successful one.
config JFFS2_CMODE_SIZE
bool "size (EXPERIMENTAL)"
help
- Tries all compressors and chooses the one which has the smallest
+ Tries all compressors and chooses the one which has the smallest
result.
endchoice
@@ -1587,9 +1601,10 @@ config CIFS
PC operating systems. The CIFS protocol is fully supported by
file servers such as Windows 2000 (including Windows 2003, NT 4
and Windows XP) as well by Samba (which provides excellent CIFS
- server support for Linux and many other operating systems). Currently
- you must use the smbfs client filesystem to access older SMB servers
- such as Windows 9x and OS/2.
+ server support for Linux and many other operating systems). Limited
+ support for Windows ME and similar servers is provided as well.
+ You must use the smbfs client filesystem to access older SMB servers
+ such as OS/2 and DOS.
The intent of the cifs module is to provide an advanced
network file system client for mounting to CIFS compliant servers,
@@ -1600,7 +1615,7 @@ config CIFS
cifs if running only a (Samba) server. It is possible to enable both
smbfs and cifs (e.g. if you are using CIFS for accessing Windows 2003
and Samba 3 servers, and smbfs for accessing old servers). If you need
- to mount to Samba or Windows 2003 servers from this machine, say Y.
+ to mount to Samba or Windows from this machine, say Y.
config CIFS_STATS
bool "CIFS statistics"
@@ -1609,8 +1624,22 @@ config CIFS_STATS
Enabling this option will cause statistics for each server share
mounted by the cifs client to be displayed in /proc/fs/cifs/Stats
+config CIFS_STATS2
+ bool "CIFS extended statistics"
+ depends on CIFS_STATS
+ help
+ Enabling this option will allow more detailed statistics on SMB
+ request timing to be displayed in /proc/fs/cifs/DebugData and also
+ allow optional logging of slow responses to dmesg (depending on the
+ value of /proc/fs/cifs/cifsFYI, see fs/cifs/README for more details).
+ These additional statistics may have a minor effect on performance
+ and memory utilization.
+
+ Unless you are a developer or are doing network performance analysis
+ or tuning, say N.
+
config CIFS_XATTR
- bool "CIFS extended attributes (EXPERIMENTAL)"
+ bool "CIFS extended attributes"
depends on CIFS
help
Extended attributes are name:value pairs associated with inodes by
@@ -1622,11 +1651,11 @@ config CIFS_XATTR
prefaced by the user namespace prefix. The system namespace
(used by some filesystems to store ACLs) is not supported at
this time.
-
+
If unsure, say N.
config CIFS_POSIX
- bool "CIFS POSIX Extensions (EXPERIMENTAL)"
+ bool "CIFS POSIX Extensions"
depends on CIFS_XATTR
help
Enabling this option will cause the cifs client to attempt to
@@ -1639,10 +1668,28 @@ config CIFS_POSIX
config CIFS_EXPERIMENTAL
bool "CIFS Experimental Features (EXPERIMENTAL)"
- depends on CIFS
+ depends on CIFS && EXPERIMENTAL
+ help
+ Enables cifs features under testing. These features are
+ experimental and currently include support for writepages
+ (multipage writebehind performance improvements) and directory
+ change notification ie fcntl(F_DNOTIFY) as well as some security
+ improvements. Some also depend on setting at runtime the
+ pseudo-file /proc/fs/cifs/Experimental (which is disabled by
+ default). See the file fs/cifs/README for more details.
+
+ If unsure, say N.
+
+config CIFS_UPCALL
+ bool "CIFS Kerberos/SPNEGO advanced session setup (EXPERIMENTAL)"
+ depends on CIFS_EXPERIMENTAL
+ select CONNECTOR
help
- Enables cifs features under testing. These features
- are highly experimental. If unsure, say N.
+ Enables an upcall mechanism for CIFS which will be used to contact
+ userspace helper utilities to provide SPNEGO packaged Kerberos
+ tickets which are needed to mount to certain secure servers
+ (for which more secure Kerberos authentication is required). If
+ unsure, say N.
config NCP_FS
tristate "NCP file system support (to mount NetWare volumes)"
diff --git a/fs/Makefile b/fs/Makefile
index 1972da18627..4c265575907 100644
--- a/fs/Makefile
+++ b/fs/Makefile
@@ -10,7 +10,7 @@ obj-y := open.o read_write.o file_table.o buffer.o bio.o super.o \
ioctl.o readdir.o select.o fifo.o locks.o dcache.o inode.o \
attr.o bad_inode.o file.o filesystems.o namespace.o aio.o \
seq_file.o xattr.o libfs.o fs-writeback.o mpage.o direct-io.o \
- ioprio.o
+ ioprio.o pnode.o
obj-$(CONFIG_INOTIFY) += inotify.o
obj-$(CONFIG_EPOLL) += eventpoll.o
diff --git a/fs/adfs/adfs.h b/fs/adfs/adfs.h
index fd528433de4..f6cd01352cc 100644
--- a/fs/adfs/adfs.h
+++ b/fs/adfs/adfs.h
@@ -12,7 +12,6 @@
#define ADFS_NDA_PUBLIC_READ (1 << 5)
#define ADFS_NDA_PUBLIC_WRITE (1 << 6)
-#include <linux/version.h>
#include "dir_f.h"
struct buffer_head;
diff --git a/fs/affs/file.c b/fs/affs/file.c
index 6744924b690..f72fb776ecd 100644
--- a/fs/affs/file.c
+++ b/fs/affs/file.c
@@ -22,14 +22,13 @@ static int affs_grow_extcache(struct inode *inode, u32 lc_idx);
static struct buffer_head *affs_alloc_extblock(struct inode *inode, struct buffer_head *bh, u32 ext);
static inline struct buffer_head *affs_get_extblock(struct inode *inode, u32 ext);
static struct buffer_head *affs_get_extblock_slow(struct inode *inode, u32 ext);
-static ssize_t affs_file_write(struct file *filp, const char __user *buf, size_t count, loff_t *ppos);
static int affs_file_open(struct inode *inode, struct file *filp);
static int affs_file_release(struct inode *inode, struct file *filp);
struct file_operations affs_file_operations = {
.llseek = generic_file_llseek,
.read = generic_file_read,
- .write = affs_file_write,
+ .write = generic_file_write,
.mmap = generic_file_mmap,
.open = affs_file_open,
.release = affs_file_release,
@@ -473,21 +472,6 @@ affs_getemptyblk_ino(struct inode *inode, int block)
return ERR_PTR(err);
}
-static ssize_t
-affs_file_write(struct file *file, const char __user *buf,
- size_t count, loff_t *ppos)
-{
- ssize_t retval;
-
- retval = generic_file_write (file, buf, count, ppos);
- if (retval >0) {
- struct inode *inode = file->f_dentry->d_inode;
- inode->i_ctime = inode->i_mtime = CURRENT_TIME_SEC;
- mark_inode_dirty(inode);
- }
- return retval;
-}
-
static int
affs_do_readpage_ofs(struct file *file, struct page *page, unsigned from, unsigned to)
{
diff --git a/fs/affs/super.c b/fs/affs/super.c
index 9c3080716c9..aaec015a16e 100644
--- a/fs/affs/super.c
+++ b/fs/affs/super.c
@@ -35,8 +35,7 @@ affs_put_super(struct super_block *sb)
mark_buffer_dirty(sbi->s_root_bh);
}
- if (sbi->s_prefix)
- kfree(sbi->s_prefix);
+ kfree(sbi->s_prefix);
affs_free_bitmap(sb);
affs_brelse(sbi->s_root_bh);
kfree(sbi);
@@ -198,10 +197,9 @@ parse_options(char *options, uid_t *uid, gid_t *gid, int *mode, int *reserved, s
*mount_opts |= SF_MUFS;
break;
case Opt_prefix:
- if (*prefix) { /* Free any previous prefix */
- kfree(*prefix);
- *prefix = NULL;
- }
+ /* Free any previous prefix */
+ kfree(*prefix);
+ *prefix = NULL;
*prefix = match_strdup(&args[0]);
if (!*prefix)
return 0;
@@ -462,11 +460,9 @@ got_root:
out_error:
if (root_inode)
iput(root_inode);
- if (sbi->s_bitmap)
- kfree(sbi->s_bitmap);
+ kfree(sbi->s_bitmap);
affs_brelse(root_bh);
- if (sbi->s_prefix)
- kfree(sbi->s_prefix);
+ kfree(sbi->s_prefix);
kfree(sbi);
sb->s_fs_info = NULL;
return -EINVAL;
diff --git a/fs/afs/callback.c b/fs/afs/callback.c
index 2fd62f89ae0..9cb206e9d4b 100644
--- a/fs/afs/callback.c
+++ b/fs/afs/callback.c
@@ -19,6 +19,7 @@
#include "server.h"
#include "vnode.h"
#include "internal.h"
+#include "cmservice.h"
/*****************************************************************************/
/*
diff --git a/fs/afs/file.c b/fs/afs/file.c
index 4975c9c193d..150b1922792 100644
--- a/fs/afs/file.c
+++ b/fs/afs/file.c
@@ -31,24 +31,10 @@ static int afs_file_readpage(struct file *file, struct page *page);
static int afs_file_invalidatepage(struct page *page, unsigned long offset);
static int afs_file_releasepage(struct page *page, gfp_t gfp_flags);
-static ssize_t afs_file_write(struct file *file, const char __user *buf,
- size_t size, loff_t *off);
-
struct inode_operations afs_file_inode_operations = {
.getattr = afs_inode_getattr,
};
-struct file_operations afs_file_file_operations = {
- .read = generic_file_read,
- .write = afs_file_write,
- .mmap = generic_file_mmap,
-#if 0
- .open = afs_file_open,
- .release = afs_file_release,
- .fsync = afs_file_fsync,
-#endif
-};
-
struct address_space_operations afs_fs_aops = {
.readpage = afs_file_readpage,
.sync_page = block_sync_page,
@@ -59,22 +45,6 @@ struct address_space_operations afs_fs_aops = {
/*****************************************************************************/
/*
- * AFS file write
- */
-static ssize_t afs_file_write(struct file *file, const char __user *buf,
- size_t size, loff_t *off)
-{
- struct afs_vnode *vnode;
-
- vnode = AFS_FS_I(file->f_dentry->d_inode);
- if (vnode->flags & AFS_VNODE_DELETED)
- return -ESTALE;
-
- return -EIO;
-} /* end afs_file_write() */
-
-/*****************************************************************************/
-/*
* deal with notification that a page was read from the cache
*/
#ifdef AFS_CACHING_SUPPORT
@@ -295,8 +265,7 @@ static int afs_file_releasepage(struct page *page, gfp_t gfp_flags)
set_page_private(page, 0);
ClearPagePrivate(page);
- if (pageio)
- kfree(pageio);
+ kfree(pageio);
}
_leave(" = 0");
diff --git a/fs/afs/inode.c b/fs/afs/inode.c
index c476fde33fb..4ebb30a50ed 100644
--- a/fs/afs/inode.c
+++ b/fs/afs/inode.c
@@ -49,7 +49,7 @@ static int afs_inode_map_status(struct afs_vnode *vnode)
case AFS_FTYPE_FILE:
inode->i_mode = S_IFREG | vnode->status.mode;
inode->i_op = &afs_file_inode_operations;
- inode->i_fop = &afs_file_file_operations;
+ inode->i_fop = &generic_ro_fops;
break;
case AFS_FTYPE_DIR:
inode->i_mode = S_IFDIR | vnode->status.mode;
diff --git a/fs/afs/internal.h b/fs/afs/internal.h
index f09860b45c1..ab8f87c6631 100644
--- a/fs/afs/internal.h
+++ b/fs/afs/internal.h
@@ -71,7 +71,6 @@ extern struct file_operations afs_dir_file_operations;
*/
extern struct address_space_operations afs_fs_aops;
extern struct inode_operations afs_file_inode_operations;
-extern struct file_operations afs_file_file_operations;
#ifdef AFS_CACHING_SUPPORT
extern int afs_cache_get_page_cookie(struct page *page,
diff --git a/fs/aio.c b/fs/aio.c
index edfca5b7553..5a28b69ad22 100644
--- a/fs/aio.c
+++ b/fs/aio.c
@@ -42,8 +42,9 @@
#endif
/*------ sysctl variables----*/
-atomic_t aio_nr = ATOMIC_INIT(0); /* current system wide number of aio requests */
-unsigned aio_max_nr = 0x10000; /* system wide maximum number of aio requests */
+static DEFINE_SPINLOCK(aio_nr_lock);
+unsigned long aio_nr; /* current system wide number of aio requests */
+unsigned long aio_max_nr = 0x10000; /* system wide maximum number of aio requests */
/*----end sysctl variables---*/
static kmem_cache_t *kiocb_cachep;
@@ -208,7 +209,7 @@ static struct kioctx *ioctx_alloc(unsigned nr_events)
return ERR_PTR(-EINVAL);
}
- if (nr_events > aio_max_nr)
+ if ((unsigned long)nr_events > aio_max_nr)
return ERR_PTR(-EAGAIN);
ctx = kmem_cache_alloc(kioctx_cachep, GFP_KERNEL);
@@ -233,8 +234,14 @@ static struct kioctx *ioctx_alloc(unsigned nr_events)
goto out_freectx;
/* limit the number of system wide aios */
- atomic_add(ctx->max_reqs, &aio_nr); /* undone by __put_ioctx */
- if (unlikely(atomic_read(&aio_nr) > aio_max_nr))
+ spin_lock(&aio_nr_lock);
+ if (aio_nr + ctx->max_reqs > aio_max_nr ||
+ aio_nr + ctx->max_reqs < aio_nr)
+ ctx->max_reqs = 0;
+ else
+ aio_nr += ctx->max_reqs;
+ spin_unlock(&aio_nr_lock);
+ if (ctx->max_reqs == 0)
goto out_cleanup;
/* now link into global list. kludge. FIXME */
@@ -248,8 +255,6 @@ static struct kioctx *ioctx_alloc(unsigned nr_events)
return ctx;
out_cleanup:
- atomic_sub(ctx->max_reqs, &aio_nr);
- ctx->max_reqs = 0; /* prevent __put_ioctx from sub'ing aio_nr */
__put_ioctx(ctx);
return ERR_PTR(-EAGAIN);
@@ -374,7 +379,12 @@ void fastcall __put_ioctx(struct kioctx *ctx)
pr_debug("__put_ioctx: freeing %p\n", ctx);
kmem_cache_free(kioctx_cachep, ctx);
- atomic_sub(nr_events, &aio_nr);
+ if (nr_events) {
+ spin_lock(&aio_nr_lock);
+ BUG_ON(aio_nr - nr_events > aio_nr);
+ aio_nr -= nr_events;
+ spin_unlock(&aio_nr_lock);
+ }
}
/* aio_get_req
@@ -447,6 +457,8 @@ static inline struct kiocb *aio_get_req(struct kioctx *ctx)
static inline void really_put_req(struct kioctx *ctx, struct kiocb *req)
{
+ assert_spin_locked(&ctx->ctx_lock);
+
if (req->ki_dtor)
req->ki_dtor(req);
kmem_cache_free(kiocb_cachep, req);
@@ -488,6 +500,8 @@ static int __aio_put_req(struct kioctx *ctx, struct kiocb *req)
dprintk(KERN_DEBUG "aio_put(%p): f_count=%d\n",
req, atomic_read(&req->ki_filp->f_count));
+ assert_spin_locked(&ctx->ctx_lock);
+
req->ki_users --;
if (unlikely(req->ki_users < 0))
BUG();
@@ -609,14 +623,13 @@ static void unuse_mm(struct mm_struct *mm)
* the kiocb (to tell the caller to activate the work
* queue to process it), or 0, if it found that it was
* already queued.
- *
- * Should be called with the spin lock iocb->ki_ctx->ctx_lock
- * held
*/
static inline int __queue_kicked_iocb(struct kiocb *iocb)
{
struct kioctx *ctx = iocb->ki_ctx;
+ assert_spin_locked(&ctx->ctx_lock);
+
if (list_empty(&iocb->ki_run_list)) {
list_add_tail(&iocb->ki_run_list,
&ctx->run_list);
@@ -761,13 +774,15 @@ out:
* Process all pending retries queued on the ioctx
* run list.
* Assumes it is operating within the aio issuer's mm
- * context. Expects to be called with ctx->ctx_lock held
+ * context.
*/
static int __aio_run_iocbs(struct kioctx *ctx)
{
struct kiocb *iocb;
LIST_HEAD(run_list);
+ assert_spin_locked(&ctx->ctx_lock);
+
list_splice_init(&ctx->run_list, &run_list);
while (!list_empty(&run_list)) {
iocb = list_entry(run_list.next, struct kiocb,
@@ -927,28 +942,19 @@ int fastcall aio_complete(struct kiocb *iocb, long res, long res2)
unsigned long tail;
int ret;
- /* Special case handling for sync iocbs: events go directly
- * into the iocb for fast handling. Note that this will not
- * work if we allow sync kiocbs to be cancelled. in which
- * case the usage count checks will have to move under ctx_lock
- * for all cases.
+ /*
+ * Special case handling for sync iocbs:
+ * - events go directly into the iocb for fast handling
+ * - the sync task with the iocb in its stack holds the single iocb
+ * ref, no other paths have a way to get another ref
+ * - the sync task helpfully left a reference to itself in the iocb
*/
if (is_sync_kiocb(iocb)) {
- int ret;
-
+ BUG_ON(iocb->ki_users != 1);
iocb->ki_user_data = res;
- if (iocb->ki_users == 1) {
- iocb->ki_users = 0;
- ret = 1;
- } else {
- spin_lock_irq(&ctx->ctx_lock);
- iocb->ki_users--;
- ret = (0 == iocb->ki_users);
- spin_unlock_irq(&ctx->ctx_lock);
- }
- /* sync iocbs put the task here for us */
+ iocb->ki_users = 0;
wake_up_process(iocb->ki_obj.tsk);
- return ret;
+ return 1;
}
info = &ctx->ring_info;
@@ -1258,8 +1264,9 @@ asmlinkage long sys_io_setup(unsigned nr_events, aio_context_t __user *ctxp)
goto out;
ret = -EINVAL;
- if (unlikely(ctx || (int)nr_events <= 0)) {
- pr_debug("EINVAL: io_setup: ctx or nr_events > max\n");
+ if (unlikely(ctx || nr_events == 0)) {
+ pr_debug("EINVAL: io_setup: ctx %lu nr_events %u\n",
+ ctx, nr_events);
goto out;
}
@@ -1602,12 +1609,14 @@ asmlinkage long sys_io_submit(aio_context_t ctx_id, long nr,
/* lookup_kiocb
* Finds a given iocb for cancellation.
- * MUST be called with ctx->ctx_lock held.
*/
static struct kiocb *lookup_kiocb(struct kioctx *ctx, struct iocb __user *iocb,
u32 key)
{
struct list_head *pos;
+
+ assert_spin_locked(&ctx->ctx_lock);
+
/* TODO: use a hash or array, this sucks. */
list_for_each(pos, &ctx->active_reqs) {
struct kiocb *kiocb = list_kiocb(pos);
diff --git a/fs/autofs/waitq.c b/fs/autofs/waitq.c
index 1fcaa156854..633f628005b 100644
--- a/fs/autofs/waitq.c
+++ b/fs/autofs/waitq.c
@@ -150,10 +150,8 @@ int autofs_wait(struct autofs_sb_info *sbi, struct qstr *name)
if ( sbi->catatonic ) {
/* We might have slept, so check again for catatonic mode */
wq->status = -ENOENT;
- if ( wq->name ) {
- kfree(wq->name);
- wq->name = NULL;
- }
+ kfree(wq->name);
+ wq->name = NULL;
}
if ( wq->name ) {
diff --git a/fs/autofs4/inode.c b/fs/autofs4/inode.c
index 0a3c05d1016..818b37be515 100644
--- a/fs/autofs4/inode.c
+++ b/fs/autofs4/inode.c
@@ -22,10 +22,8 @@
static void ino_lnkfree(struct autofs_info *ino)
{
- if (ino->u.symlink) {
- kfree(ino->u.symlink);
- ino->u.symlink = NULL;
- }
+ kfree(ino->u.symlink);
+ ino->u.symlink = NULL;
}
struct autofs_info *autofs4_init_ino(struct autofs_info *ino,
diff --git a/fs/autofs4/waitq.c b/fs/autofs4/waitq.c
index 3df86285a1c..394ff36ef8f 100644
--- a/fs/autofs4/waitq.c
+++ b/fs/autofs4/waitq.c
@@ -243,10 +243,8 @@ int autofs4_wait(struct autofs_sb_info *sbi, struct dentry *dentry,
if ( sbi->catatonic ) {
/* We might have slept, so check again for catatonic mode */
wq->status = -ENOENT;
- if ( wq->name ) {
- kfree(wq->name);
- wq->name = NULL;
- }
+ kfree(wq->name);
+ wq->name = NULL;
}
if ( wq->name ) {
diff --git a/fs/befs/attribute.c b/fs/befs/attribute.c
deleted file mode 100644
index e329d727053..00000000000
--- a/fs/befs/attribute.c
+++ /dev/null
@@ -1,117 +0,0 @@
-/*
- * linux/fs/befs/attribute.c
- *
- * Copyright (C) 2002 Will Dyson <will_dyson@pobox.com>
- *
- * Many thanks to Dominic Giampaolo, author of "Practical File System
- * Design with the Be File System", for such a helpful book.
- *
- */
-
-#include <linux/fs.h>
-#include <linux/kernel.h>
-#include <linux/string.h>
-
-#include "befs.h"
-#include "endian.h"
-
-#define SD_DATA(sd)\
- (void*)((char*)sd + sizeof(*sd) + (sd->name_size - sizeof(sd->name)))
-
-#define SD_NEXT(sd)\
- (befs_small_data*)((char*)sd + sizeof(*sd) + (sd->name_size - \
- sizeof(sd->name) + sd->data_size))
-
-int
-list_small_data(struct super_block *sb, befs_inode * inode, filldir_t filldir);
-
-befs_small_data *
-find_small_data(struct super_block *sb, befs_inode * inode,
- const char *name);
-int
-read_small_data(struct super_block *sb, befs_inode * inode,
- befs_small_data * sdata, void *buf, size_t bufsize);
-
-/**
- *
- *
- *
- *
- *
- */
-befs_small_data *
-find_small_data(struct super_block *sb, befs_inode * inode, const char *name)
-{
- befs_small_data *sdata = inode->small_data;
-
- while (sdata->type != 0) {
- if (strcmp(name, sdata->name) != 0) {
- return sdata;
- }
- sdata = SD_NEXT(sdata);
- }
- return NULL;
-}
-
-/**
- *
- *
- *
- *
- *
- */
-int
-read_small_data(struct super_block *sb, befs_inode * inode,
- const char *name, void *buf, size_t bufsize)
-{
- befs_small_data *sdata;
-
- sdata = find_small_data(sb, inode, name);
- if (sdata == NULL)
- return BEFS_ERR;
- else if (sdata->data_size > bufsize)
- return BEFS_ERR;
-
- memcpy(buf, SD_DATA(sdata), sdata->data_size);
-
- return BEFS_OK;
-}
-
-/**
- *
- *
- *
- *
- *
- */
-int
-list_small_data(struct super_block *sb, befs_inode * inode)
-{
-
-}
-
-/**
- *
- *
- *
- *
- *
- */
-int
-list_attr(struct super_block *sb, befs_inode * inode)
-{
-
-}
-
-/**
- *
- *
- *
- *
- *
- */
-int
-read_attr(struct super_block *sb, befs_inode * inode)
-{
-
-}
diff --git a/fs/befs/linuxvfs.c b/fs/befs/linuxvfs.c
index e0a6025f1d0..2d365cb8eec 100644
--- a/fs/befs/linuxvfs.c
+++ b/fs/befs/linuxvfs.c
@@ -73,12 +73,6 @@ static struct inode_operations befs_dir_inode_operations = {
.lookup = befs_lookup,
};
-static struct file_operations befs_file_operations = {
- .llseek = default_llseek,
- .read = generic_file_read,
- .mmap = generic_file_readonly_mmap,
-};
-
static struct address_space_operations befs_aops = {
.readpage = befs_readpage,
.sync_page = block_sync_page,
@@ -398,7 +392,7 @@ befs_read_inode(struct inode *inode)
inode->i_mapping->a_ops = &befs_aops;
if (S_ISREG(inode->i_mode)) {
- inode->i_fop = &befs_file_operations;
+ inode->i_fop = &generic_ro_fops;
} else if (S_ISDIR(inode->i_mode)) {
inode->i_op = &befs_dir_inode_operations;
inode->i_fop = &befs_dir_operations;
@@ -731,20 +725,16 @@ parse_options(char *options, befs_mount_options * opts)
static void
befs_put_super(struct super_block *sb)
{
- if (BEFS_SB(sb)->mount_opts.iocharset) {
- kfree(BEFS_SB(sb)->mount_opts.iocharset);
- BEFS_SB(sb)->mount_opts.iocharset = NULL;
- }
+ kfree(BEFS_SB(sb)->mount_opts.iocharset);
+ BEFS_SB(sb)->mount_opts.iocharset = NULL;
if (BEFS_SB(sb)->nls) {
unload_nls(BEFS_SB(sb)->nls);
BEFS_SB(sb)->nls = NULL;
}
- if (sb->s_fs_info) {
- kfree(sb->s_fs_info);
- sb->s_fs_info = NULL;
- }
+ kfree(sb->s_fs_info);
+ sb->s_fs_info = NULL;
return;
}
diff --git a/fs/binfmt_elf.c b/fs/binfmt_elf.c
index 6fa6adc4097..f36f2210204 100644
--- a/fs/binfmt_elf.c
+++ b/fs/binfmt_elf.c
@@ -1006,8 +1006,7 @@ out_free_dentry:
if (interpreter)
fput(interpreter);
out_free_interp:
- if (elf_interpreter)
- kfree(elf_interpreter);
+ kfree(elf_interpreter);
out_free_file:
sys_close(elf_exec_fileno);
out_free_fh:
diff --git a/fs/binfmt_elf_fdpic.c b/fs/binfmt_elf_fdpic.c
index dda87c4c82a..e0344f69c79 100644
--- a/fs/binfmt_elf_fdpic.c
+++ b/fs/binfmt_elf_fdpic.c
@@ -411,16 +411,11 @@ error:
allow_write_access(interpreter);
fput(interpreter);
}
- if (interpreter_name)
- kfree(interpreter_name);
- if (exec_params.phdrs)
- kfree(exec_params.phdrs);
- if (exec_params.loadmap)
- kfree(exec_params.loadmap);
- if (interp_params.phdrs)
- kfree(interp_params.phdrs);
- if (interp_params.loadmap)
- kfree(interp_params.loadmap);
+ kfree(interpreter_name);
+ kfree(exec_params.phdrs);
+ kfree(exec_params.loadmap);
+ kfree(interp_params.phdrs);
+ kfree(interp_params.loadmap);
return retval;
/* unrecoverable error - kill the process */
diff --git a/fs/binfmt_misc.c b/fs/binfmt_misc.c
index 8ae0db6cd69..2568eb41cb3 100644
--- a/fs/binfmt_misc.c
+++ b/fs/binfmt_misc.c
@@ -150,7 +150,7 @@ static int load_misc_binary(struct linux_binprm *bprm, struct pt_regs *regs)
/* if the binary is not readable than enforce mm->dumpable=0
regardless of the interpreter's permissions */
- if (permission(bprm->file->f_dentry->d_inode, MAY_READ, NULL))
+ if (file_permission(bprm->file, MAY_READ))
bprm->interp_flags |= BINPRM_FLAGS_ENFORCE_NONDUMP;
allow_write_access(bprm->file);
diff --git a/fs/buffer.c b/fs/buffer.c
index 35fa34977e8..5287be18633 100644
--- a/fs/buffer.c
+++ b/fs/buffer.c
@@ -396,7 +396,7 @@ asmlinkage long sys_fdatasync(unsigned int fd)
* private_lock is contended then so is mapping->tree_lock).
*/
static struct buffer_head *
-__find_get_block_slow(struct block_device *bdev, sector_t block, int unused)
+__find_get_block_slow(struct block_device *bdev, sector_t block)
{
struct inode *bd_inode = bdev->bd_inode;
struct address_space *bd_mapping = bd_inode->i_mapping;
@@ -1438,7 +1438,7 @@ __find_get_block(struct block_device *bdev, sector_t block, int size)
struct buffer_head *bh = lookup_bh_lru(bdev, block, size);
if (bh == NULL) {
- bh = __find_get_block_slow(bdev, block, size);
+ bh = __find_get_block_slow(bdev, block);
if (bh)
bh_lru_install(bh);
}
@@ -1705,7 +1705,7 @@ void unmap_underlying_metadata(struct block_device *bdev, sector_t block)
might_sleep();
- old_bh = __find_get_block_slow(bdev, block, 0);
+ old_bh = __find_get_block_slow(bdev, block);
if (old_bh) {
clear_buffer_dirty(old_bh);
wait_on_buffer(old_bh);
diff --git a/fs/cifs/CHANGES b/fs/cifs/CHANGES
index 5bab24f5905..eab3750cf30 100644
--- a/fs/cifs/CHANGES
+++ b/fs/cifs/CHANGES
@@ -2,7 +2,7 @@ Version 1.39
------------
Defer close of a file handle slightly if pending writes depend on that file handle
(this reduces the EBADF bad file handle errors that can be logged under heavy
-stress on writes).
+stress on writes). Modify cifs Kconfig options to expose CONFIG_CIFS_STATS2
Version 1.38
------------
diff --git a/fs/cifs/asn1.c b/fs/cifs/asn1.c
index 98539e2afe8..086ae8f4a20 100644
--- a/fs/cifs/asn1.c
+++ b/fs/cifs/asn1.c
@@ -553,8 +553,7 @@ decode_negTokenInit(unsigned char *security_blob, int length,
*(oid + 3)));
rc = compare_oid(oid, oidlen, NTLMSSP_OID,
NTLMSSP_OID_LEN);
- if(oid)
- kfree(oid);
+ kfree(oid);
if (rc)
use_ntlmssp = TRUE;
}
diff --git a/fs/cifs/cifs_unicode.c b/fs/cifs/cifs_unicode.c
index 99a096d3f84..4e12053f080 100644
--- a/fs/cifs/cifs_unicode.c
+++ b/fs/cifs/cifs_unicode.c
@@ -74,10 +74,11 @@ cifs_strtoUCS(wchar_t * to, const char *from, int len,
cERROR(1,
("cifs_strtoUCS: char2uni returned %d",
charlen));
- to[i] = cpu_to_le16(0x003f); /* a question mark */
+ /* A question mark */
+ to[i] = (wchar_t)cpu_to_le16(0x003f);
charlen = 1;
} else
- to[i] = cpu_to_le16(to[i]);
+ to[i] = (wchar_t)cpu_to_le16(to[i]);
}
diff --git a/fs/cifs/cifsfs.c b/fs/cifs/cifsfs.c
index 877095a1192..682b0235ad9 100644
--- a/fs/cifs/cifsfs.c
+++ b/fs/cifs/cifsfs.c
@@ -405,6 +405,7 @@ static struct quotactl_ops cifs_quotactl_ops = {
};
#endif
+#ifdef CONFIG_CIFS_EXPERIMENTAL
static void cifs_umount_begin(struct super_block * sblock)
{
struct cifs_sb_info *cifs_sb;
@@ -422,16 +423,18 @@ static void cifs_umount_begin(struct super_block * sblock)
tcon->tidStatus = CifsExiting;
up(&tcon->tconSem);
+ /* cancel_brl_requests(tcon); */
+ /* cancel_notify_requests(tcon); */
if(tcon->ses && tcon->ses->server)
{
- cERROR(1,("wake up tasks now - umount begin not complete"));
+ cFYI(1,("wake up tasks now - umount begin not complete"));
wake_up_all(&tcon->ses->server->request_q);
}
/* BB FIXME - finish add checks for tidStatus BB */
return;
}
-
+#endif
static int cifs_remount(struct super_block *sb, int *flags, char *data)
{
@@ -450,7 +453,9 @@ struct super_operations cifs_super_ops = {
unless later we add lazy close of inodes or unless the kernel forgets to call
us with the same number of releases (closes) as opens */
.show_options = cifs_show_options,
-/* .umount_begin = cifs_umount_begin, */ /* BB finish in the future */
+#ifdef CONFIG_CIFS_EXPERIMENTAL
+ .umount_begin = cifs_umount_begin,
+#endif
.remount_fs = cifs_remount,
};
diff --git a/fs/cifs/cifsproto.h b/fs/cifs/cifsproto.h
index d301149b1bb..1b73f4f4c5c 100644
--- a/fs/cifs/cifsproto.h
+++ b/fs/cifs/cifsproto.h
@@ -242,11 +242,11 @@ extern int CIFSSMBWrite2(const int xid, struct cifsTconInfo *tcon,
const int netfid, const unsigned int count,
const __u64 offset, unsigned int *nbytes,
struct kvec *iov, const int nvec, const int long_op);
+#endif /* CONFIG_CIFS_EXPERIMENTAL */
extern int CIFSGetSrvInodeNumber(const int xid, struct cifsTconInfo *tcon,
const unsigned char *searchName, __u64 * inode_number,
const struct nls_table *nls_codepage,
int remap_special_chars);
-#endif /* CONFIG_CIFS_EXPERIMENTAL */
extern int cifs_convertUCSpath(char *target, const __le16 *source, int maxlen,
const struct nls_table * codepage);
extern int cifsConvertToUCS(__le16 * target, const char *source, int maxlen,
diff --git a/fs/cifs/cifssmb.c b/fs/cifs/cifssmb.c
index 9312bfc5668..a53c596e108 100644
--- a/fs/cifs/cifssmb.c
+++ b/fs/cifs/cifssmb.c
@@ -2959,7 +2959,6 @@ CIFSFindClose(const int xid, struct cifsTconInfo *tcon, const __u16 searchHandle
return rc;
}
-#ifdef CONFIG_CIFS_EXPERIMENTAL
int
CIFSGetSrvInodeNumber(const int xid, struct cifsTconInfo *tcon,
const unsigned char *searchName,
@@ -3053,7 +3052,6 @@ GetInodeNumOut:
goto GetInodeNumberRetry;
return rc;
}
-#endif /* CIFS_EXPERIMENTAL */
int
CIFSGetDFSRefer(const int xid, struct cifsSesInfo *ses,
diff --git a/fs/cifs/cn_cifs.h b/fs/cifs/cn_cifs.h
new file mode 100644
index 00000000000..ea59ccac2eb
--- /dev/null
+++ b/fs/cifs/cn_cifs.h
@@ -0,0 +1,37 @@
+/*
+ * fs/cifs/cn_cifs.h
+ *
+ * Copyright (c) International Business Machines Corp., 2002
+ * Author(s): Steve French (sfrench@us.ibm.com)
+ *
+ * This library is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU Lesser General Public License as published
+ * by the Free Software Foundation; either version 2.1 of the License, or
+ * (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See
+ * the GNU Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public License
+ * along with this library; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+
+#ifndef _CN_CIFS_H
+#define _CN_CIFS_H
+#ifdef CONFIG_CIFS_UPCALL
+#include <linux/types.h>
+#include <linux/connector.h>
+
+struct cifs_upcall {
+ char signature[4]; /* CIFS */
+ enum command {
+ CIFS_GET_IP = 0x00000001, /* get ip address for hostname */
+ CIFS_GET_SECBLOB = 0x00000002, /* get SPNEGO wrapped blob */
+ } command;
+ /* union cifs upcall data follows */
+};
+#endif /* CIFS_UPCALL */
+#endif /* _CN_CIFS_H */
diff --git a/fs/cifs/connect.c b/fs/cifs/connect.c
index d74367a08d5..2cb620716bc 100644
--- a/fs/cifs/connect.c
+++ b/fs/cifs/connect.c
@@ -42,6 +42,7 @@
#include "ntlmssp.h"
#include "nterr.h"
#include "rfc1002pdu.h"
+#include "cn_cifs.h"
#define CIFS_PORT 445
#define RFC1001_PORT 139
@@ -1265,8 +1266,7 @@ connect_to_dfs_path(int xid, struct cifsSesInfo *pSesInfo,
the helper that resolves tcp names, mount to it, try to
tcon to it unmount it if fail */
- if(referrals)
- kfree(referrals);
+ kfree(referrals);
return rc;
}
@@ -1535,10 +1535,8 @@ cifs_mount(struct super_block *sb, struct cifs_sb_info *cifs_sb,
memset(&volume_info,0,sizeof(struct smb_vol));
if (cifs_parse_mount_options(mount_data, devname, &volume_info)) {
- if(volume_info.UNC)
- kfree(volume_info.UNC);
- if(volume_info.password)
- kfree(volume_info.password);
+ kfree(volume_info.UNC);
+ kfree(volume_info.password);
FreeXid(xid);
return -EINVAL;
}
@@ -1551,10 +1549,8 @@ cifs_mount(struct super_block *sb, struct cifs_sb_info *cifs_sb,
cifserror("No username specified ");
/* In userspace mount helper we can get user name from alternate
locations such as env variables and files on disk */
- if(volume_info.UNC)
- kfree(volume_info.UNC);
- if(volume_info.password)
- kfree(volume_info.password);
+ kfree(volume_info.UNC);
+ kfree(volume_info.password);
FreeXid(xid);
return -EINVAL;
}
@@ -1573,10 +1569,8 @@ cifs_mount(struct super_block *sb, struct cifs_sb_info *cifs_sb,
if(rc <= 0) {
/* we failed translating address */
- if(volume_info.UNC)
- kfree(volume_info.UNC);
- if(volume_info.password)
- kfree(volume_info.password);
+ kfree(volume_info.UNC);
+ kfree(volume_info.password);
FreeXid(xid);
return -EINVAL;
}
@@ -1587,19 +1581,15 @@ cifs_mount(struct super_block *sb, struct cifs_sb_info *cifs_sb,
} else if (volume_info.UNCip){
/* BB using ip addr as server name connect to the DFS root below */
cERROR(1,("Connecting to DFS root not implemented yet"));
- if(volume_info.UNC)
- kfree(volume_info.UNC);
- if(volume_info.password)
- kfree(volume_info.password);
+ kfree(volume_info.UNC);
+ kfree(volume_info.password);
FreeXid(xid);
return -EINVAL;
} else /* which servers DFS root would we conect to */ {
cERROR(1,
("CIFS mount error: No UNC path (e.g. -o unc=//192.168.1.100/public) specified "));
- if(volume_info.UNC)
- kfree(volume_info.UNC);
- if(volume_info.password)
- kfree(volume_info.password);
+ kfree(volume_info.UNC);
+ kfree(volume_info.password);
FreeXid(xid);
return -EINVAL;
}
@@ -1612,10 +1602,8 @@ cifs_mount(struct super_block *sb, struct cifs_sb_info *cifs_sb,
cifs_sb->local_nls = load_nls(volume_info.iocharset);
if(cifs_sb->local_nls == NULL) {
cERROR(1,("CIFS mount error: iocharset %s not found",volume_info.iocharset));
- if(volume_info.UNC)
- kfree(volume_info.UNC);
- if(volume_info.password)
- kfree(volume_info.password);
+ kfree(volume_info.UNC);
+ kfree(volume_info.password);
FreeXid(xid);
return -ELIBACC;
}
@@ -1630,10 +1618,8 @@ cifs_mount(struct super_block *sb, struct cifs_sb_info *cifs_sb,
&sin_server6.sin6_addr,
volume_info.username, &srvTcp);
else {
- if(volume_info.UNC)
- kfree(volume_info.UNC);
- if(volume_info.password)
- kfree(volume_info.password);
+ kfree(volume_info.UNC);
+ kfree(volume_info.password);
FreeXid(xid);
return -EINVAL;
}
@@ -1654,10 +1640,8 @@ cifs_mount(struct super_block *sb, struct cifs_sb_info *cifs_sb,
("Error connecting to IPv4 socket. Aborting operation"));
if(csocket != NULL)
sock_release(csocket);
- if(volume_info.UNC)
- kfree(volume_info.UNC);
- if(volume_info.password)
- kfree(volume_info.password);
+ kfree(volume_info.UNC);
+ kfree(volume_info.password);
FreeXid(xid);
return rc;
}
@@ -1666,10 +1650,8 @@ cifs_mount(struct super_block *sb, struct cifs_sb_info *cifs_sb,
if (srvTcp == NULL) {
rc = -ENOMEM;
sock_release(csocket);
- if(volume_info.UNC)
- kfree(volume_info.UNC);
- if(volume_info.password)
- kfree(volume_info.password);
+ kfree(volume_info.UNC);
+ kfree(volume_info.password);
FreeXid(xid);
return rc;
} else {
@@ -1692,10 +1674,8 @@ cifs_mount(struct super_block *sb, struct cifs_sb_info *cifs_sb,
if(rc < 0) {
rc = -ENOMEM;
sock_release(csocket);
- if(volume_info.UNC)
- kfree(volume_info.UNC);
- if(volume_info.password)
- kfree(volume_info.password);
+ kfree(volume_info.UNC);
+ kfree(volume_info.password);
FreeXid(xid);
return rc;
}
@@ -1710,8 +1690,7 @@ cifs_mount(struct super_block *sb, struct cifs_sb_info *cifs_sb,
if (existingCifsSes) {
pSesInfo = existingCifsSes;
cFYI(1, ("Existing smb sess found "));
- if(volume_info.password)
- kfree(volume_info.password);
+ kfree(volume_info.password);
/* volume_info.UNC freed at end of function */
} else if (!rc) {
cFYI(1, ("Existing smb sess not found "));
@@ -1741,8 +1720,7 @@ cifs_mount(struct super_block *sb, struct cifs_sb_info *cifs_sb,
if(!rc)
atomic_inc(&srvTcp->socketUseCount);
} else
- if(volume_info.password)
- kfree(volume_info.password);
+ kfree(volume_info.password);
}
/* search for existing tcon to this server share */
@@ -1821,8 +1799,7 @@ cifs_mount(struct super_block *sb, struct cifs_sb_info *cifs_sb,
"", cifs_sb->local_nls,
cifs_sb->mnt_cifs_flags &
CIFS_MOUNT_MAP_SPECIAL_CHR);
- if(volume_info.UNC)
- kfree(volume_info.UNC);
+ kfree(volume_info.UNC);
FreeXid(xid);
return -ENODEV;
} else {
@@ -1925,8 +1902,7 @@ cifs_mount(struct super_block *sb, struct cifs_sb_info *cifs_sb,
(in which case it is not needed anymore) but when new sesion is created
the password ptr is put in the new session structure (in which case the
password will be freed at unmount time) */
- if(volume_info.UNC)
- kfree(volume_info.UNC);
+ kfree(volume_info.UNC);
FreeXid(xid);
return rc;
}
@@ -3283,8 +3259,7 @@ CIFSTCon(unsigned int xid, struct cifsSesInfo *ses,
if ((bcc_ptr + (2 * length)) -
pByteArea(smb_buffer_response) <=
BCC(smb_buffer_response)) {
- if(tcon->nativeFileSystem)
- kfree(tcon->nativeFileSystem);
+ kfree(tcon->nativeFileSystem);
tcon->nativeFileSystem =
kzalloc(length + 2, GFP_KERNEL);
cifs_strfromUCS_le(tcon->nativeFileSystem,
@@ -3301,8 +3276,7 @@ CIFSTCon(unsigned int xid, struct cifsSesInfo *ses,
if ((bcc_ptr + length) -
pByteArea(smb_buffer_response) <=
BCC(smb_buffer_response)) {
- if(tcon->nativeFileSystem)
- kfree(tcon->nativeFileSystem);
+ kfree(tcon->nativeFileSystem);
tcon->nativeFileSystem =
kzalloc(length + 1, GFP_KERNEL);
strncpy(tcon->nativeFileSystem, bcc_ptr,
diff --git a/fs/cifs/inode.c b/fs/cifs/inode.c
index 912d401600f..923d071163b 100644
--- a/fs/cifs/inode.c
+++ b/fs/cifs/inode.c
@@ -283,7 +283,6 @@ int cifs_get_inode_info(struct inode **pinode,
there Windows server or network appliances for which
IndexNumber field is not guaranteed unique? */
-#ifdef CONFIG_CIFS_EXPERIMENTAL
if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_SERVER_INUM){
int rc1 = 0;
__u64 inode_num;
@@ -299,7 +298,6 @@ int cifs_get_inode_info(struct inode **pinode,
} else /* do we need cast or hash to ino? */
(*pinode)->i_ino = inode_num;
} /* else ino incremented to unique num in new_inode*/
-#endif /* CIFS_EXPERIMENTAL */
insert_inode_hash(*pinode);
}
inode = *pinode;
diff --git a/fs/cifs/link.c b/fs/cifs/link.c
index b43e071fe11..0f99aae3316 100644
--- a/fs/cifs/link.c
+++ b/fs/cifs/link.c
@@ -84,10 +84,8 @@ cifs_hardlink(struct dentry *old_file, struct inode *inode,
cifsInode->time = 0; /* will force revalidate to go get info when needed */
cifs_hl_exit:
- if (fromName)
- kfree(fromName);
- if (toName)
- kfree(toName);
+ kfree(fromName);
+ kfree(toName);
FreeXid(xid);
return rc;
}
@@ -206,8 +204,7 @@ cifs_symlink(struct inode *inode, struct dentry *direntry, const char *symname)
}
}
- if (full_path)
- kfree(full_path);
+ kfree(full_path);
FreeXid(xid);
return rc;
}
@@ -253,8 +250,7 @@ cifs_readlink(struct dentry *direntry, char __user *pBuffer, int buflen)
len = buflen;
tmpbuffer = kmalloc(len,GFP_KERNEL);
if(tmpbuffer == NULL) {
- if (full_path)
- kfree(full_path);
+ kfree(full_path);
FreeXid(xid);
return -ENOMEM;
}
@@ -303,8 +299,7 @@ cifs_readlink(struct dentry *direntry, char __user *pBuffer, int buflen)
strncpy(tmpbuffer, referrals, len-1);
}
}
- if(referrals)
- kfree(referrals);
+ kfree(referrals);
kfree(tmp_path);
}
/* BB add code like else decode referrals then memcpy to
@@ -323,12 +318,8 @@ cifs_readlink(struct dentry *direntry, char __user *pBuffer, int buflen)
rc));
}
- if (tmpbuffer) {
- kfree(tmpbuffer);
- }
- if (full_path) {
- kfree(full_path);
- }
+ kfree(tmpbuffer);
+ kfree(full_path);
FreeXid(xid);
return rc;
}
diff --git a/fs/cifs/misc.c b/fs/cifs/misc.c
index eba1de917f2..34a06692e4f 100644
--- a/fs/cifs/misc.c
+++ b/fs/cifs/misc.c
@@ -98,14 +98,10 @@ sesInfoFree(struct cifsSesInfo *buf_to_free)
atomic_dec(&sesInfoAllocCount);
list_del(&buf_to_free->cifsSessionList);
write_unlock(&GlobalSMBSeslock);
- if (buf_to_free->serverOS)
- kfree(buf_to_free->serverOS);
- if (buf_to_free->serverDomain)
- kfree(buf_to_free->serverDomain);
- if (buf_to_free->serverNOS)
- kfree(buf_to_free->serverNOS);
- if (buf_to_free->password)
- kfree(buf_to_free->password);
+ kfree(buf_to_free->serverOS);
+ kfree(buf_to_free->serverDomain);
+ kfree(buf_to_free->serverNOS);
+ kfree(buf_to_free->password);
kfree(buf_to_free);
}
@@ -144,8 +140,7 @@ tconInfoFree(struct cifsTconInfo *buf_to_free)
atomic_dec(&tconInfoAllocCount);
list_del(&buf_to_free->cifsConnectionList);
write_unlock(&GlobalSMBSeslock);
- if (buf_to_free->nativeFileSystem)
- kfree(buf_to_free->nativeFileSystem);
+ kfree(buf_to_free->nativeFileSystem);
kfree(buf_to_free);
}
diff --git a/fs/cifs/xattr.c b/fs/cifs/xattr.c
index c1e02eff1d2..f375f87c7db 100644
--- a/fs/cifs/xattr.c
+++ b/fs/cifs/xattr.c
@@ -87,8 +87,7 @@ int cifs_removexattr(struct dentry * direntry, const char * ea_name)
cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MAP_SPECIAL_CHR);
}
remove_ea_exit:
- if (full_path)
- kfree(full_path);
+ kfree(full_path);
FreeXid(xid);
#endif
return rc;
@@ -132,8 +131,7 @@ int cifs_setxattr(struct dentry * direntry, const char * ea_name,
returns as xattrs */
if(value_size > MAX_EA_VALUE_SIZE) {
cFYI(1,("size of EA value too large"));
- if(full_path)
- kfree(full_path);
+ kfree(full_path);
FreeXid(xid);
return -EOPNOTSUPP;
}
@@ -195,8 +193,7 @@ int cifs_setxattr(struct dentry * direntry, const char * ea_name,
}
set_ea_exit:
- if (full_path)
- kfree(full_path);
+ kfree(full_path);
FreeXid(xid);
#endif
return rc;
@@ -298,8 +295,7 @@ ssize_t cifs_getxattr(struct dentry * direntry, const char * ea_name,
rc = -EOPNOTSUPP;
get_ea_exit:
- if (full_path)
- kfree(full_path);
+ kfree(full_path);
FreeXid(xid);
#endif
return rc;
@@ -345,8 +341,7 @@ ssize_t cifs_listxattr(struct dentry * direntry, char * data, size_t buf_size)
cifs_sb->mnt_cifs_flags &
CIFS_MOUNT_MAP_SPECIAL_CHR);
- if (full_path)
- kfree(full_path);
+ kfree(full_path);
FreeXid(xid);
#endif
return rc;
diff --git a/fs/compat_ioctl.c b/fs/compat_ioctl.c
index 43dbcb0b21e..991c00de5c4 100644
--- a/fs/compat_ioctl.c
+++ b/fs/compat_ioctl.c
@@ -49,6 +49,8 @@
#include <linux/vt_kern.h>
#include <linux/fb.h>
#include <linux/ext2_fs.h>
+#include <linux/ext3_jbd.h>
+#include <linux/ext3_fs.h>
#include <linux/videodev.h>
#include <linux/netdevice.h>
#include <linux/raw.h>
@@ -121,6 +123,11 @@
#include <linux/hiddev.h>
+#include <linux/dvb/audio.h>
+#include <linux/dvb/dmx.h>
+#include <linux/dvb/frontend.h>
+#include <linux/dvb/video.h>
+
#undef INCLUDES
#endif
@@ -129,6 +136,15 @@
/* Aiee. Someone does not find a difference between int and long */
#define EXT2_IOC32_GETFLAGS _IOR('f', 1, int)
#define EXT2_IOC32_SETFLAGS _IOW('f', 2, int)
+#define EXT3_IOC32_GETVERSION _IOR('f', 3, int)
+#define EXT3_IOC32_SETVERSION _IOR('f', 4, int)
+#define EXT3_IOC32_GETRSVSZ _IOR('f', 5, int)
+#define EXT3_IOC32_SETRSVSZ _IOW('f', 6, int)
+#define EXT3_IOC32_GROUP_EXTEND _IOW('f', 7, unsigned int)
+#ifdef CONFIG_JBD_DEBUG
+#define EXT3_IOC32_WAIT_FOR_READONLY _IOR('f', 99, int)
+#endif
+
#define EXT2_IOC32_GETVERSION _IOR('v', 1, int)
#define EXT2_IOC32_SETVERSION _IOW('v', 2, int)
@@ -175,6 +191,22 @@ static int do_ext2_ioctl(unsigned int fd, unsigned int cmd, unsigned long arg)
return sys_ioctl(fd, cmd, (unsigned long)compat_ptr(arg));
}
+static int do_ext3_ioctl(unsigned int fd, unsigned int cmd, unsigned long arg)
+{
+ /* These are just misnamed, they actually get/put from/to user an int */
+ switch (cmd) {
+ case EXT3_IOC32_GETVERSION: cmd = EXT3_IOC_GETVERSION; break;
+ case EXT3_IOC32_SETVERSION: cmd = EXT3_IOC_SETVERSION; break;
+ case EXT3_IOC32_GETRSVSZ: cmd = EXT3_IOC_GETRSVSZ; break;
+ case EXT3_IOC32_SETRSVSZ: cmd = EXT3_IOC_SETRSVSZ; break;
+ case EXT3_IOC32_GROUP_EXTEND: cmd = EXT3_IOC_GROUP_EXTEND; break;
+#ifdef CONFIG_JBD_DEBUG
+ case EXT3_IOC32_WAIT_FOR_READONLY: cmd = EXT3_IOC_WAIT_FOR_READONLY; break;
+#endif
+ }
+ return sys_ioctl(fd, cmd, (unsigned long)compat_ptr(arg));
+}
+
struct video_tuner32 {
compat_int_t tuner;
char name[32];
@@ -413,6 +445,128 @@ out:
return err;
}
+struct compat_dmx_event {
+ dmx_event_t event;
+ compat_time_t timeStamp;
+ union
+ {
+ dmx_scrambling_status_t scrambling;
+ } u;
+};
+
+static int do_dmx_get_event(unsigned int fd, unsigned int cmd, unsigned long arg)
+{
+ struct dmx_event kevent;
+ mm_segment_t old_fs = get_fs();
+ int err;
+
+ set_fs(KERNEL_DS);
+ err = sys_ioctl(fd, cmd, (unsigned long) &kevent);
+ set_fs(old_fs);
+
+ if (!err) {
+ struct compat_dmx_event __user *up = compat_ptr(arg);
+
+ err = put_user(kevent.event, &up->event);
+ err |= put_user(kevent.timeStamp, &up->timeStamp);
+ err |= put_user(kevent.u.scrambling, &up->u.scrambling);
+ if (err)
+ err = -EFAULT;
+ }
+
+ return err;
+}
+
+struct compat_video_event {
+ int32_t type;
+ compat_time_t timestamp;
+ union {
+ video_size_t size;
+ unsigned int frame_rate;
+ } u;
+};
+
+static int do_video_get_event(unsigned int fd, unsigned int cmd, unsigned long arg)
+{
+ struct video_event kevent;
+ mm_segment_t old_fs = get_fs();
+ int err;
+
+ set_fs(KERNEL_DS);
+ err = sys_ioctl(fd, cmd, (unsigned long) &kevent);
+ set_fs(old_fs);
+
+ if (!err) {
+ struct compat_video_event __user *up = compat_ptr(arg);
+
+ err = put_user(kevent.type, &up->type);
+ err |= put_user(kevent.timestamp, &up->timestamp);
+ err |= put_user(kevent.u.size.w, &up->u.size.w);
+ err |= put_user(kevent.u.size.h, &up->u.size.h);
+ err |= put_user(kevent.u.size.aspect_ratio,
+ &up->u.size.aspect_ratio);
+ if (err)
+ err = -EFAULT;
+ }
+
+ return err;
+}
+
+struct compat_video_still_picture {
+ compat_uptr_t iFrame;
+ int32_t size;
+};
+
+static int do_video_stillpicture(unsigned int fd, unsigned int cmd, unsigned long arg)
+{
+ struct compat_video_still_picture __user *up;
+ struct video_still_picture __user *up_native;
+ compat_uptr_t fp;
+ int32_t size;
+ int err;
+
+ up = (struct compat_video_still_picture __user *) arg;
+ err = get_user(fp, &up->iFrame);
+ err |= get_user(size, &up->size);
+ if (err)
+ return -EFAULT;
+
+ up_native =
+ compat_alloc_user_space(sizeof(struct video_still_picture));
+
+ put_user(compat_ptr(fp), &up_native->iFrame);
+ put_user(size, &up_native->size);
+
+ err = sys_ioctl(fd, cmd, (unsigned long) up_native);
+
+ return err;
+}
+
+struct compat_video_spu_palette {
+ int length;
+ compat_uptr_t palette;
+};
+
+static int do_video_set_spu_palette(unsigned int fd, unsigned int cmd, unsigned long arg)
+{
+ struct compat_video_spu_palette __user *up;
+ struct video_spu_palette __user *up_native;
+ compat_uptr_t palp;
+ int length, err;
+
+ up = (struct compat_video_spu_palette __user *) arg;
+ err = get_user(palp, &up->palette);
+ err |= get_user(length, &up->length);
+
+ up_native = compat_alloc_user_space(sizeof(struct video_spu_palette));
+ put_user(compat_ptr(palp), &up_native->palette);
+ put_user(length, &up_native->length);
+
+ err = sys_ioctl(fd, cmd, (unsigned long) up_native);
+
+ return err;
+}
+
#ifdef CONFIG_NET
static int do_siocgstamp(unsigned int fd, unsigned int cmd, unsigned long arg)
{
@@ -840,146 +994,6 @@ static int hdio_getgeo(unsigned int fd, unsigned int cmd, unsigned long arg)
return err ? -EFAULT : 0;
}
-struct fb_fix_screeninfo32 {
- char id[16];
- compat_caddr_t smem_start;
- u32 smem_len;
- u32 type;
- u32 type_aux;
- u32 visual;
- u16 xpanstep;
- u16 ypanstep;
- u16 ywrapstep;
- u32 line_length;
- compat_caddr_t mmio_start;
- u32 mmio_len;
- u32 accel;
- u16 reserved[3];
-};
-
-struct fb_cmap32 {
- u32 start;
- u32 len;
- compat_caddr_t red;
- compat_caddr_t green;
- compat_caddr_t blue;
- compat_caddr_t transp;
-};
-
-static int fb_getput_cmap(unsigned int fd, unsigned int cmd, unsigned long arg)
-{
- struct fb_cmap_user __user *cmap;
- struct fb_cmap32 __user *cmap32;
- __u32 data;
- int err;
-
- cmap = compat_alloc_user_space(sizeof(*cmap));
- cmap32 = compat_ptr(arg);
-
- if (copy_in_user(&cmap->start, &cmap32->start, 2 * sizeof(__u32)))
- return -EFAULT;
-
- if (get_user(data, &cmap32->red) ||
- put_user(compat_ptr(data), &cmap->red) ||
- get_user(data, &cmap32->green) ||
- put_user(compat_ptr(data), &cmap->green) ||
- get_user(data, &cmap32->blue) ||
- put_user(compat_ptr(data), &cmap->blue) ||
- get_user(data, &cmap32->transp) ||
- put_user(compat_ptr(data), &cmap->transp))
- return -EFAULT;
-
- err = sys_ioctl(fd, cmd, (unsigned long) cmap);
-
- if (!err) {
- if (copy_in_user(&cmap32->start,
- &cmap->start,
- 2 * sizeof(__u32)))
- err = -EFAULT;
- }
- return err;
-}
-
-static int do_fscreeninfo_to_user(struct fb_fix_screeninfo *fix,
- struct fb_fix_screeninfo32 __user *fix32)
-{
- __u32 data;
- int err;
-
- err = copy_to_user(&fix32->id, &fix->id, sizeof(fix32->id));
-
- data = (__u32) (unsigned long) fix->smem_start;
- err |= put_user(data, &fix32->smem_start);
-
- err |= put_user(fix->smem_len, &fix32->smem_len);
- err |= put_user(fix->type, &fix32->type);
- err |= put_user(fix->type_aux, &fix32->type_aux);
- err |= put_user(fix->visual, &fix32->visual);
- err |= put_user(fix->xpanstep, &fix32->xpanstep);
- err |= put_user(fix->ypanstep, &fix32->ypanstep);
- err |= put_user(fix->ywrapstep, &fix32->ywrapstep);
- err |= put_user(fix->line_length, &fix32->line_length);
-
- data = (__u32) (unsigned long) fix->mmio_start;
- err |= put_user(data, &fix32->mmio_start);
-
- err |= put_user(fix->mmio_len, &fix32->mmio_len);
- err |= put_user(fix->accel, &fix32->accel);
- err |= copy_to_user(fix32->reserved, fix->reserved,
- sizeof(fix->reserved));
-
- return err;
-}
-
-static int fb_get_fscreeninfo(unsigned int fd, unsigned int cmd, unsigned long arg)
-{
- mm_segment_t old_fs;
- struct fb_fix_screeninfo fix;
- struct fb_fix_screeninfo32 __user *fix32;
- int err;
-
- fix32 = compat_ptr(arg);
-
- old_fs = get_fs();
- set_fs(KERNEL_DS);
- err = sys_ioctl(fd, cmd, (unsigned long) &fix);
- set_fs(old_fs);
-
- if (!err)
- err = do_fscreeninfo_to_user(&fix, fix32);
-
- return err;
-}
-
-static int fb_ioctl_trans(unsigned int fd, unsigned int cmd, unsigned long arg)
-{
- int err;
-
- switch (cmd) {
- case FBIOGET_FSCREENINFO:
- err = fb_get_fscreeninfo(fd,cmd, arg);
- break;
-
- case FBIOGETCMAP:
- case FBIOPUTCMAP:
- err = fb_getput_cmap(fd, cmd, arg);
- break;
-
- default:
- do {
- static int count;
- if (++count <= 20)
- printk("%s: Unknown fb ioctl cmd fd(%d) "
- "cmd(%08x) arg(%08lx)\n",
- __FUNCTION__, fd, cmd, arg);
- } while(0);
- err = -ENOSYS;
- break;
- };
-
- return err;
-}
-
static int hdio_ioctl_trans(unsigned int fd, unsigned int cmd, unsigned long arg)
{
mm_segment_t old_fs = get_fs();
@@ -2235,7 +2249,8 @@ static int fd_ioctl_trans(unsigned int fd, unsigned int cmd, unsigned long arg)
if (err)
err = -EFAULT;
-out: if (karg) kfree(karg);
+out:
+ kfree(karg);
return err;
}
@@ -2952,10 +2967,7 @@ HANDLE_IOCTL(BLKGETSIZE, w_long)
HANDLE_IOCTL(0x1260, broken_blkgetsize)
HANDLE_IOCTL(BLKFRAGET, w_long)
HANDLE_IOCTL(BLKSECTGET, w_long)
-HANDLE_IOCTL(FBIOGET_FSCREENINFO, fb_ioctl_trans)
HANDLE_IOCTL(BLKPG, blkpg_ioctl_trans)
-HANDLE_IOCTL(FBIOGETCMAP, fb_ioctl_trans)
-HANDLE_IOCTL(FBIOPUTCMAP, fb_ioctl_trans)
HANDLE_IOCTL(HDIO_GET_KEEPSETTINGS, hdio_ioctl_trans)
HANDLE_IOCTL(HDIO_GET_UNMASKINTR, hdio_ioctl_trans)
HANDLE_IOCTL(HDIO_GET_DMA, hdio_ioctl_trans)
@@ -2996,6 +3008,15 @@ HANDLE_IOCTL(EXT2_IOC32_GETFLAGS, do_ext2_ioctl)
HANDLE_IOCTL(EXT2_IOC32_SETFLAGS, do_ext2_ioctl)
HANDLE_IOCTL(EXT2_IOC32_GETVERSION, do_ext2_ioctl)
HANDLE_IOCTL(EXT2_IOC32_SETVERSION, do_ext2_ioctl)
+HANDLE_IOCTL(EXT3_IOC32_GETVERSION, do_ext3_ioctl)
+HANDLE_IOCTL(EXT3_IOC32_SETVERSION, do_ext3_ioctl)
+HANDLE_IOCTL(EXT3_IOC32_GETRSVSZ, do_ext3_ioctl)
+HANDLE_IOCTL(EXT3_IOC32_SETRSVSZ, do_ext3_ioctl)
+HANDLE_IOCTL(EXT3_IOC32_GROUP_EXTEND, do_ext3_ioctl)
+COMPATIBLE_IOCTL(EXT3_IOC_GROUP_ADD)
+#ifdef CONFIG_JBD_DEBUG
+HANDLE_IOCTL(EXT3_IOC32_WAIT_FOR_READONLY, do_ext3_ioctl)
+#endif
HANDLE_IOCTL(VIDIOCGTUNER32, do_video_ioctl)
HANDLE_IOCTL(VIDIOCSTUNER32, do_video_ioctl)
HANDLE_IOCTL(VIDIOCGWIN32, do_video_ioctl)
@@ -3050,6 +3071,16 @@ HANDLE_IOCTL(TIOCSSERIAL, serial_struct_ioctl)
COMPATIBLE_IOCTL(TIOCGLTC)
COMPATIBLE_IOCTL(TIOCSLTC)
#endif
+#ifdef TIOCSTART
+/*
+ * For these two we have defintions in ioctls.h and/or termios.h on
+ * some architectures but no actual implemention. Some applications
+ * like bash call them if they are defined in the headers, so we provide
+ * entries here to avoid syslog message spew.
+ */
+COMPATIBLE_IOCTL(TIOCSTART)
+COMPATIBLE_IOCTL(TIOCSTOP)
+#endif
/* Usbdevfs */
HANDLE_IOCTL(USBDEVFS_CONTROL32, do_usbdevfs_control)
HANDLE_IOCTL(USBDEVFS_BULK32, do_usbdevfs_bulk)
@@ -3086,5 +3117,11 @@ HANDLE_IOCTL(NCP_IOC_GETPRIVATEDATA_32, do_ncp_getprivatedata)
HANDLE_IOCTL(NCP_IOC_SETPRIVATEDATA_32, do_ncp_setprivatedata)
#endif
+/* dvb */
+HANDLE_IOCTL(DMX_GET_EVENT, do_dmx_get_event)
+HANDLE_IOCTL(VIDEO_GET_EVENT, do_video_get_event)
+HANDLE_IOCTL(VIDEO_STILLPICTURE, do_video_stillpicture)
+HANDLE_IOCTL(VIDEO_SET_SPU_PALETTE, do_video_set_spu_palette)
+
#undef DECLARES
#endif
diff --git a/fs/dcache.c b/fs/dcache.c
index e90512ed35a..17e43913868 100644
--- a/fs/dcache.c
+++ b/fs/dcache.c
@@ -644,7 +644,7 @@ void shrink_dcache_parent(struct dentry * parent)
*
* Prune the dentries that are anonymous
*
- * parsing d_hash list does not hlist_for_each_rcu() as it
+ * parsing d_hash list does not hlist_for_each_entry_rcu() as it
* done under dcache_lock.
*
*/
@@ -1043,15 +1043,13 @@ struct dentry * __d_lookup(struct dentry * parent, struct qstr * name)
struct hlist_head *head = d_hash(parent,hash);
struct dentry *found = NULL;
struct hlist_node *node;
+ struct dentry *dentry;
rcu_read_lock();
- hlist_for_each_rcu(node, head) {
- struct dentry *dentry;
+ hlist_for_each_entry_rcu(dentry, node, head, d_hash) {
struct qstr *qstr;
- dentry = hlist_entry(node, struct dentry, d_hash);
-
if (dentry->d_name.hash != hash)
continue;
if (dentry->d_parent != parent)
@@ -1123,7 +1121,7 @@ int d_validate(struct dentry *dentry, struct dentry *dparent)
spin_lock(&dcache_lock);
base = d_hash(dparent, dentry->d_name.hash);
hlist_for_each(lhp,base) {
- /* hlist_for_each_rcu() not required for d_hash list
+ /* hlist_for_each_entry_rcu() not required for d_hash list
* as it is parsed under dcache_lock
*/
if (dentry == hlist_entry(lhp, struct dentry, d_hash)) {
diff --git a/fs/devfs/base.c b/fs/devfs/base.c
index 8b679b67e5e..1274422a538 100644
--- a/fs/devfs/base.c
+++ b/fs/devfs/base.c
@@ -2738,10 +2738,8 @@ static int devfsd_close(struct inode *inode, struct file *file)
entry = fs_info->devfsd_first_event;
fs_info->devfsd_first_event = NULL;
fs_info->devfsd_last_event = NULL;
- if (fs_info->devfsd_info) {
- kfree(fs_info->devfsd_info);
- fs_info->devfsd_info = NULL;
- }
+ kfree(fs_info->devfsd_info);
+ fs_info->devfsd_info = NULL;
spin_unlock(&fs_info->devfsd_buffer_lock);
fs_info->devfsd_pgrp = 0;
fs_info->devfsd_task = NULL;
diff --git a/fs/dquot.c b/fs/dquot.c
index ea7644227a6..05b60283c9c 100644
--- a/fs/dquot.c
+++ b/fs/dquot.c
@@ -77,6 +77,7 @@
#include <linux/kmod.h>
#include <linux/namei.h>
#include <linux/buffer_head.h>
+#include <linux/quotaops.h>
#include <asm/uaccess.h>
@@ -1320,13 +1321,11 @@ int vfs_quota_off(struct super_block *sb, int type)
int cnt;
struct quota_info *dqopt = sb_dqopt(sb);
struct inode *toputinode[MAXQUOTAS];
- struct vfsmount *toputmnt[MAXQUOTAS];
/* We need to serialize quota_off() for device */
down(&dqopt->dqonoff_sem);
for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
toputinode[cnt] = NULL;
- toputmnt[cnt] = NULL;
if (type != -1 && cnt != type)
continue;
if (!sb_has_quota_enabled(sb, cnt))
@@ -1347,9 +1346,7 @@ int vfs_quota_off(struct super_block *sb, int type)
put_quota_format(dqopt->info[cnt].dqi_format);
toputinode[cnt] = dqopt->files[cnt];
- toputmnt[cnt] = dqopt->mnt[cnt];
dqopt->files[cnt] = NULL;
- dqopt->mnt[cnt] = NULL;
dqopt->info[cnt].dqi_flags = 0;
dqopt->info[cnt].dqi_igrace = 0;
dqopt->info[cnt].dqi_bgrace = 0;
@@ -1357,10 +1354,7 @@ int vfs_quota_off(struct super_block *sb, int type)
}
up(&dqopt->dqonoff_sem);
/* Sync the superblock so that buffers with quota data are written to
- * disk (and so userspace sees correct data afterwards).
- * The reference to vfsmnt we are still holding protects us from
- * umount (we don't have it only when quotas are turned on/off for
- * journal replay but in that case we are guarded by the fs anyway). */
+ * disk (and so userspace sees correct data afterwards). */
if (sb->s_op->sync_fs)
sb->s_op->sync_fs(sb, 1);
sync_blockdev(sb->s_bdev);
@@ -1384,10 +1378,6 @@ int vfs_quota_off(struct super_block *sb, int type)
iput(toputinode[cnt]);
}
up(&dqopt->dqonoff_sem);
- /* We don't hold the reference when we turned on quotas
- * just for the journal replay... */
- if (toputmnt[cnt])
- mntput(toputmnt[cnt]);
}
if (sb->s_bdev)
invalidate_bdev(sb->s_bdev, 0);
@@ -1502,11 +1492,8 @@ int vfs_quota_on(struct super_block *sb, int type, int format_id, char *path)
/* Quota file not on the same filesystem? */
if (nd.mnt->mnt_sb != sb)
error = -EXDEV;
- else {
+ else
error = vfs_quota_on_inode(nd.dentry->d_inode, type, format_id);
- if (!error)
- sb_dqopt(sb)->mnt[type] = mntget(nd.mnt);
- }
out_path:
path_release(&nd);
return error;
diff --git a/fs/exec.c b/fs/exec.c
index 10d493fea7c..c466fec5de2 100644
--- a/fs/exec.c
+++ b/fs/exec.c
@@ -48,6 +48,7 @@
#include <linux/syscalls.h>
#include <linux/rmap.h>
#include <linux/acct.h>
+#include <linux/cn_proc.h>
#include <asm/uaccess.h>
#include <asm/mmu_context.h>
@@ -134,7 +135,7 @@ asmlinkage long sys_uselib(const char __user * library)
if (!S_ISREG(nd.dentry->d_inode->i_mode))
goto exit;
- error = permission(nd.dentry->d_inode, MAY_READ | MAY_EXEC, &nd);
+ error = vfs_permission(&nd, MAY_READ | MAY_EXEC);
if (error)
goto exit;
@@ -494,7 +495,7 @@ struct file *open_exec(const char *name)
file = ERR_PTR(-EACCES);
if (!(nd.mnt->mnt_flags & MNT_NOEXEC) &&
S_ISREG(inode->i_mode)) {
- int err = permission(inode, MAY_EXEC, &nd);
+ int err = vfs_permission(&nd, MAY_EXEC);
if (!err && !(inode->i_mode & 0111))
err = -EACCES;
file = ERR_PTR(err);
@@ -589,6 +590,7 @@ static inline int de_thread(struct task_struct *tsk)
struct signal_struct *sig = tsk->signal;
struct sighand_struct *newsighand, *oldsighand = tsk->sighand;
spinlock_t *lock = &oldsighand->siglock;
+ struct task_struct *leader = NULL;
int count;
/*
@@ -664,7 +666,7 @@ static inline int de_thread(struct task_struct *tsk)
* and to assume its PID:
*/
if (!thread_group_leader(current)) {
- struct task_struct *leader = current->group_leader, *parent;
+ struct task_struct *parent;
struct dentry *proc_dentry1, *proc_dentry2;
unsigned long exit_state, ptrace;
@@ -673,6 +675,7 @@ static inline int de_thread(struct task_struct *tsk)
* It should already be zombie at this point, most
* of the time.
*/
+ leader = current->group_leader;
while (leader->exit_state != EXIT_ZOMBIE)
yield();
@@ -732,7 +735,6 @@ static inline int de_thread(struct task_struct *tsk)
proc_pid_flush(proc_dentry2);
BUG_ON(exit_state != EXIT_ZOMBIE);
- release_task(leader);
}
/*
@@ -742,8 +744,11 @@ static inline int de_thread(struct task_struct *tsk)
sig->flags = 0;
no_thread_group:
- BUG_ON(atomic_read(&sig->count) != 1);
exit_itimers(sig);
+ if (leader)
+ release_task(leader);
+
+ BUG_ON(atomic_read(&sig->count) != 1);
if (atomic_read(&oldsighand->count) == 1) {
/*
@@ -891,7 +896,7 @@ int flush_old_exec(struct linux_binprm * bprm)
flush_thread();
if (bprm->e_uid != current->euid || bprm->e_gid != current->egid ||
- permission(bprm->file->f_dentry->d_inode,MAY_READ, NULL) ||
+ file_permission(bprm->file, MAY_READ) ||
(bprm->interp_flags & BINPRM_FLAGS_ENFORCE_NONDUMP)) {
suid_keys(current);
current->mm->dumpable = suid_dumpable;
@@ -1096,6 +1101,7 @@ int search_binary_handler(struct linux_binprm *bprm,struct pt_regs *regs)
fput(bprm->file);
bprm->file = NULL;
current->did_exec = 1;
+ proc_exec_connector(current);
return retval;
}
read_lock(&binfmt_lock);
@@ -1509,7 +1515,7 @@ int do_coredump(long signr, int exit_code, struct pt_regs * regs)
goto close_fail;
if (!file->f_op->write)
goto close_fail;
- if (do_truncate(file->f_dentry, 0) != 0)
+ if (do_truncate(file->f_dentry, 0, file) != 0)
goto close_fail;
retval = binfmt->core_dump(signr, regs, file);
diff --git a/fs/ext2/CHANGES b/fs/ext2/CHANGES
deleted file mode 100644
index aa5aaf0e591..00000000000
--- a/fs/ext2/CHANGES
+++ /dev/null
@@ -1,157 +0,0 @@
-Changes from version 0.5a to version 0.5b
-=========================================
- - Now that we have sysctl(), the immutable flag cannot be changed when
- the system is running at security level > 0.
- - Some cleanups in the code.
- - More consistency checks on directories.
- - The ext2.diff patch from Tom May <ftom@netcom.com> has been
- integrated. This patch replaces expensive "/" and "%" with
- cheap ">>" and "&" where possible.
-
-Changes from version 0.5 to version 0.5a
-========================================
- - Zero the partial block following the end of the file when a file
- is truncated.
- - Dates updated in the copyright.
- - More checks when the filesystem is mounted: the count of blocks,
- fragments, and inodes per group is checked against the block size.
- - The buffers used by the error routines are now static variables, to
- avoid using space on the kernel stack, as requested by Linus.
- - Some cleanups in the error messages (some versions of syslog contain
- a bug which truncates an error message if it contains '\n').
- - Check that no data can be written to a file past the 2GB limit.
- - The famous readdir() bug has been fixed by Stephen Tweedie.
- - Added a revision level in the superblock.
- - Full support for O_SYNC flag of the open system call.
- - New mount options: `resuid=#uid' and `resgid=#gid'. `resuid' causes
- ext2fs to consider user #uid like root for the reserved blocks.
- `resgid' acts the same way with group #gid. New fields in the
- superblock contain default values for resuid and resgid and can
- be modified by tune2fs.
- Idea comes from Rene Cougnenc <cougnenc@renux.frmug.fr.net>.
- - New mount options: `bsddf' and `minixdf'. `bsddf' causes ext2fs
- to remove the blocks used for FS structures from the total block
- count in statfs. With `minixdf', ext2fs mimics Minix behavior
- in statfs (i.e. it returns the total number of blocks on the
- partition). This is intended to make bde happy :-)
- - New file attributes:
- - Immutable files cannot be modified. Data cannot be written to
- these files. They cannot be removed, renamed and new links cannot
- be created. Even root cannot modify the files. He has to remove
- the immutable attribute first.
- - Append-only files: can only be written in append-mode when writing.
- They cannot be removed, renamed and new links cannot be created.
- Note: files may only be added to an append-only directory.
- - No-dump files: the attribute is not used by the kernel. My port
- of dump uses it to avoid backing up files which are not important.
- - New check in ext2_check_dir_entry: the inode number is checked.
- - Support for big file systems: the copy of the FS descriptor is now
- dynamically allocated (previous versions used a fixed size array).
- This allows to mount 2GB+ FS.
- - Reorganization of the ext2_inode structure to allow other operating
- systems to create specific fields if they use ext2fs as their native
- file system. Currently, ext2fs is only implemented in Linux but
- will soon be part of Gnu Hurd and of Masix.
-
-Changes from version 0.4b to version 0.5
-========================================
- - New superblock fields: s_lastcheck and s_checkinterval added
- by Uwe Ohse <uwe@tirka.gun.de> to implement timedependent checks
- of the file system
- - Real random numbers for secure rm added by Pierre del Perugia
- <delperug@gla.ecoledoc.ibp.fr>
- - The mount warnings related to the state of a fs are not printed
- if the fs is mounted read-only, idea by Nick Holloway
- <alfie@dcs.warwick.ac.uk>
-
-Changes from version 0.4a to version 0.4b
-=========================================
- - Copyrights changed to include the name of my laboratory.
- - Clean up of balloc.c and ialloc.c.
- - More consistency checks.
- - Block preallocation added by Stephen Tweedie.
- - Direct reads of directories disallowed.
- - Readahead implemented in readdir by Stephen Tweedie.
- - Bugs in block and inodes allocation fixed.
- - Readahead implemented in ext2_find_entry by Chip Salzenberg.
- - New mount options:
- `check=none|normal|strict'
- `debug'
- `errors=continue|remount-ro|panic'
- `grpid', `bsdgroups'
- `nocheck'
- `nogrpid', `sysvgroups'
- - truncate() now tries to deallocate contiguous blocks in a single call
- to ext2_free_blocks().
- - lots of cosmetic changes.
-
-Changes from version 0.4 to version 0.4a
-========================================
- - the `sync' option support is now complete. Version 0.4 was not
- supporting it when truncating a file. I have tested the synchronous
- writes and they work but they make the system very slow :-( I have
- to work again on this to make it faster.
- - when detecting an error on a mounted filesystem, version 0.4 used
- to try to write a flag in the super block even if the filesystem had
- been mounted read-only. This is fixed.
- - the `sb=#' option now causes the kernel code to use the filesystem
- descriptors located at block #+1. Version 0.4 used the superblock
- backup located at block # but used the main copy of the descriptors.
- - a new file attribute `S' is supported. This attribute causes
- synchronous writes but is applied to a file not to the entire file
- system (thanks to Michael Kraehe <kraehe@bakunin.north.de> for
- suggesting it).
- - the directory cache is inhibited by default. The cache management
- code seems to be buggy and I have to look at it carefully before
- using it again.
- - deleting a file with the `s' attribute (secure deletion) causes its
- blocks to be overwritten with random values not with zeros (thanks to
- Michael A. Griffith <grif@cs.ucr.edu> for suggesting it).
- - lots of cosmetic changes have been made.
-
-Changes from version 0.3 to version 0.4
-=======================================
- - Three new mount options are supported: `check', `sync' and `sb=#'.
- `check' tells the kernel code to make more consistency checks
- when the file system is mounted. Currently, the kernel code checks
- that the blocks and inodes bitmaps are consistent with the free
- blocks and inodes counts. More checks will be added in future
- releases.
- `sync' tells the kernel code to use synchronous writes when updating
- an inode, a bitmap, a directory entry or an indirect block. This
- can make the file system much slower but can be a big win for files
- recovery in case of a crash (and we can now say to the BSD folks
- that Linux also supports synchronous updates :-).
- `sb=#' tells the kernel code to use an alternate super block instead
- of its master copy. `#' is the number of the block (counted in
- 1024 bytes blocks) which contains the alternate super block.
- An ext2 file system typically contains backups of the super block
- at blocks 8193, 16385, and so on.
- - I have change the meaning of the valid flag used by e2fsck. it
- now contains the state of the file system. If the kernel code
- detects an inconsistency while the file system is mounted, it flags
- it as erroneous and e2fsck will detect that on next run.
- - The super block now contains a mount counter. This counter is
- incremented each time the file system is mounted read/write. When
- this counter becomes bigger than a maximal mount counts (also stored
- in the super block), e2fsck checks the file system, even if it had
- been unmounted cleanly, and resets this counter to 0.
- - File attributes are now supported. One can associate a set of
- attributes to a file. Three attributes are defined:
- `c': the file is marked for automatic compression,
- `s': the file is marked for secure deletion: when the file is
- deleted, its blocks are zeroed and written back to the disk,
- `u': the file is marked for undeletion: when the file is deleted,
- its contents are saved to allow a future undeletion.
- Currently, only the `s' attribute is implemented in the kernel
- code. Support for the other attributes will be added in a future
- release.
- - a few bugs related to times updates have been fixed by Bruce
- Evans and me.
- - a bug related to the links count of deleted inodes has been fixed.
- Previous versions used to keep the links count set to 1 when a file
- was deleted. The new version now sets links_count to 0 when deleting
- the last link.
- - a race condition when deallocating an inode has been fixed by
- Stephen Tweedie.
-
diff --git a/fs/ext2/acl.c b/fs/ext2/acl.c
index 213148c36eb..6af2f413029 100644
--- a/fs/ext2/acl.c
+++ b/fs/ext2/acl.c
@@ -194,8 +194,7 @@ ext2_get_acl(struct inode *inode, int type)
acl = NULL;
else
acl = ERR_PTR(retval);
- if (value)
- kfree(value);
+ kfree(value);
if (!IS_ERR(acl)) {
switch(type) {
@@ -262,8 +261,7 @@ ext2_set_acl(struct inode *inode, int type, struct posix_acl *acl)
error = ext2_xattr_set(inode, name_index, "", value, size, 0);
- if (value)
- kfree(value);
+ kfree(value);
if (!error) {
switch(type) {
case ACL_TYPE_ACCESS:
diff --git a/fs/ext2/balloc.c b/fs/ext2/balloc.c
index 6591abef64d..bb690806649 100644
--- a/fs/ext2/balloc.c
+++ b/fs/ext2/balloc.c
@@ -624,76 +624,3 @@ unsigned long ext2_bg_num_gdb(struct super_block *sb, int group)
return EXT2_SB(sb)->s_gdb_count;
}
-#ifdef CONFIG_EXT2_CHECK
-/* Called at mount-time, super-block is locked */
-void ext2_check_blocks_bitmap (struct super_block * sb)
-{
- struct buffer_head *bitmap_bh = NULL;
- struct ext2_super_block * es;
- unsigned long desc_count, bitmap_count, x, j;
- unsigned long desc_blocks;
- struct ext2_group_desc * desc;
- int i;
-
- es = EXT2_SB(sb)->s_es;
- desc_count = 0;
- bitmap_count = 0;
- desc = NULL;
- for (i = 0; i < EXT2_SB(sb)->s_groups_count; i++) {
- desc = ext2_get_group_desc (sb, i, NULL);
- if (!desc)
- continue;
- desc_count += le16_to_cpu(desc->bg_free_blocks_count);
- brelse(bitmap_bh);
- bitmap_bh = read_block_bitmap(sb, i);
- if (!bitmap_bh)
- continue;
-
- if (ext2_bg_has_super(sb, i) &&
- !ext2_test_bit(0, bitmap_bh->b_data))
- ext2_error(sb, __FUNCTION__,
- "Superblock in group %d is marked free", i);
-
- desc_blocks = ext2_bg_num_gdb(sb, i);
- for (j = 0; j < desc_blocks; j++)
- if (!ext2_test_bit(j + 1, bitmap_bh->b_data))
- ext2_error(sb, __FUNCTION__,
- "Descriptor block #%ld in group "
- "%d is marked free", j, i);
-
- if (!block_in_use(le32_to_cpu(desc->bg_block_bitmap),
- sb, bitmap_bh->b_data))
- ext2_error(sb, "ext2_check_blocks_bitmap",
- "Block bitmap for group %d is marked free",
- i);
-
- if (!block_in_use(le32_to_cpu(desc->bg_inode_bitmap),
- sb, bitmap_bh->b_data))
- ext2_error(sb, "ext2_check_blocks_bitmap",
- "Inode bitmap for group %d is marked free",
- i);
-
- for (j = 0; j < EXT2_SB(sb)->s_itb_per_group; j++)
- if (!block_in_use(le32_to_cpu(desc->bg_inode_table) + j,
- sb, bitmap_bh->b_data))
- ext2_error (sb, "ext2_check_blocks_bitmap",
- "Block #%ld of the inode table in "
- "group %d is marked free", j, i);
-
- x = ext2_count_free(bitmap_bh, sb->s_blocksize);
- if (le16_to_cpu(desc->bg_free_blocks_count) != x)
- ext2_error (sb, "ext2_check_blocks_bitmap",
- "Wrong free blocks count for group %d, "
- "stored = %d, counted = %lu", i,
- le16_to_cpu(desc->bg_free_blocks_count), x);
- bitmap_count += x;
- }
- if (le32_to_cpu(es->s_free_blocks_count) != bitmap_count)
- ext2_error (sb, "ext2_check_blocks_bitmap",
- "Wrong free blocks count in super block, "
- "stored = %lu, counted = %lu",
- (unsigned long)le32_to_cpu(es->s_free_blocks_count),
- bitmap_count);
- brelse(bitmap_bh);
-}
-#endif
diff --git a/fs/ext2/ialloc.c b/fs/ext2/ialloc.c
index e2d6208633a..74714af4ae6 100644
--- a/fs/ext2/ialloc.c
+++ b/fs/ext2/ialloc.c
@@ -700,43 +700,3 @@ unsigned long ext2_count_dirs (struct super_block * sb)
return count;
}
-#ifdef CONFIG_EXT2_CHECK
-/* Called at mount-time, super-block is locked */
-void ext2_check_inodes_bitmap (struct super_block * sb)
-{
- struct ext2_super_block * es = EXT2_SB(sb)->s_es;
- unsigned long desc_count = 0, bitmap_count = 0;
- struct buffer_head *bitmap_bh = NULL;
- int i;
-
- for (i = 0; i < EXT2_SB(sb)->s_groups_count; i++) {
- struct ext2_group_desc *desc;
- unsigned x;
-
- desc = ext2_get_group_desc(sb, i, NULL);
- if (!desc)
- continue;
- desc_count += le16_to_cpu(desc->bg_free_inodes_count);
- brelse(bitmap_bh);
- bitmap_bh = read_inode_bitmap(sb, i);
- if (!bitmap_bh)
- continue;
-
- x = ext2_count_free(bitmap_bh, EXT2_INODES_PER_GROUP(sb) / 8);
- if (le16_to_cpu(desc->bg_free_inodes_count) != x)
- ext2_error (sb, "ext2_check_inodes_bitmap",
- "Wrong free inodes count in group %d, "
- "stored = %d, counted = %lu", i,
- le16_to_cpu(desc->bg_free_inodes_count), x);
- bitmap_count += x;
- }
- brelse(bitmap_bh);
- if (percpu_counter_read(&EXT2_SB(sb)->s_freeinodes_counter) !=
- bitmap_count)
- ext2_error(sb, "ext2_check_inodes_bitmap",
- "Wrong free inodes count in super block, "
- "stored = %lu, counted = %lu",
- (unsigned long)le32_to_cpu(es->s_free_inodes_count),
- bitmap_count);
-}
-#endif
diff --git a/fs/ext2/super.c b/fs/ext2/super.c
index 3c0c7c6a5b4..522fa70dd8e 100644
--- a/fs/ext2/super.c
+++ b/fs/ext2/super.c
@@ -281,7 +281,7 @@ static unsigned long get_sb_block(void **data)
enum {
Opt_bsd_df, Opt_minix_df, Opt_grpid, Opt_nogrpid,
Opt_resgid, Opt_resuid, Opt_sb, Opt_err_cont, Opt_err_panic,
- Opt_err_ro, Opt_nouid32, Opt_check, Opt_nocheck, Opt_debug,
+ Opt_err_ro, Opt_nouid32, Opt_nocheck, Opt_debug,
Opt_oldalloc, Opt_orlov, Opt_nobh, Opt_user_xattr, Opt_nouser_xattr,
Opt_acl, Opt_noacl, Opt_xip, Opt_ignore, Opt_err, Opt_quota,
Opt_usrquota, Opt_grpquota
@@ -303,7 +303,6 @@ static match_table_t tokens = {
{Opt_nouid32, "nouid32"},
{Opt_nocheck, "check=none"},
{Opt_nocheck, "nocheck"},
- {Opt_check, "check"},
{Opt_debug, "debug"},
{Opt_oldalloc, "oldalloc"},
{Opt_orlov, "orlov"},
@@ -376,13 +375,6 @@ static int parse_options (char * options,
case Opt_nouid32:
set_opt (sbi->s_mount_opt, NO_UID32);
break;
- case Opt_check:
-#ifdef CONFIG_EXT2_CHECK
- set_opt (sbi->s_mount_opt, CHECK);
-#else
- printk("EXT2 Check option not supported\n");
-#endif
- break;
case Opt_nocheck:
clear_opt (sbi->s_mount_opt, CHECK);
break;
@@ -503,12 +495,6 @@ static int ext2_setup_super (struct super_block * sb,
EXT2_BLOCKS_PER_GROUP(sb),
EXT2_INODES_PER_GROUP(sb),
sbi->s_mount_opt);
-#ifdef CONFIG_EXT2_CHECK
- if (test_opt (sb, CHECK)) {
- ext2_check_blocks_bitmap (sb);
- ext2_check_inodes_bitmap (sb);
- }
-#endif
return res;
}
@@ -895,7 +881,7 @@ static int ext2_fill_super(struct super_block *sb, void *data, int silent)
}
if (EXT2_HAS_COMPAT_FEATURE(sb, EXT3_FEATURE_COMPAT_HAS_JOURNAL))
ext2_warning(sb, __FUNCTION__,
- "mounting ext3 filesystem as ext2\n");
+ "mounting ext3 filesystem as ext2");
ext2_setup_super (sb, es, sb->s_flags & MS_RDONLY);
percpu_counter_mod(&sbi->s_freeblocks_counter,
ext2_count_free_blocks(sb));
diff --git a/fs/ext3/balloc.c b/fs/ext3/balloc.c
index 7992d21e0e0..ae1148c24c5 100644
--- a/fs/ext3/balloc.c
+++ b/fs/ext3/balloc.c
@@ -1517,76 +1517,3 @@ unsigned long ext3_bg_num_gdb(struct super_block *sb, int group)
return EXT3_SB(sb)->s_gdb_count;
}
-#ifdef CONFIG_EXT3_CHECK
-/* Called at mount-time, super-block is locked */
-void ext3_check_blocks_bitmap (struct super_block * sb)
-{
- struct ext3_super_block *es;
- unsigned long desc_count, bitmap_count, x, j;
- unsigned long desc_blocks;
- struct buffer_head *bitmap_bh = NULL;
- struct ext3_group_desc *gdp;
- int i;
-
- es = EXT3_SB(sb)->s_es;
- desc_count = 0;
- bitmap_count = 0;
- gdp = NULL;
- for (i = 0; i < EXT3_SB(sb)->s_groups_count; i++) {
- gdp = ext3_get_group_desc (sb, i, NULL);
- if (!gdp)
- continue;
- desc_count += le16_to_cpu(gdp->bg_free_blocks_count);
- brelse(bitmap_bh);
- bitmap_bh = read_block_bitmap(sb, i);
- if (bitmap_bh == NULL)
- continue;
-
- if (ext3_bg_has_super(sb, i) &&
- !ext3_test_bit(0, bitmap_bh->b_data))
- ext3_error(sb, __FUNCTION__,
- "Superblock in group %d is marked free", i);
-
- desc_blocks = ext3_bg_num_gdb(sb, i);
- for (j = 0; j < desc_blocks; j++)
- if (!ext3_test_bit(j + 1, bitmap_bh->b_data))
- ext3_error(sb, __FUNCTION__,
- "Descriptor block #%ld in group "
- "%d is marked free", j, i);
-
- if (!block_in_use (le32_to_cpu(gdp->bg_block_bitmap),
- sb, bitmap_bh->b_data))
- ext3_error (sb, "ext3_check_blocks_bitmap",
- "Block bitmap for group %d is marked free",
- i);
-
- if (!block_in_use (le32_to_cpu(gdp->bg_inode_bitmap),
- sb, bitmap_bh->b_data))
- ext3_error (sb, "ext3_check_blocks_bitmap",
- "Inode bitmap for group %d is marked free",
- i);
-
- for (j = 0; j < EXT3_SB(sb)->s_itb_per_group; j++)
- if (!block_in_use (le32_to_cpu(gdp->bg_inode_table) + j,
- sb, bitmap_bh->b_data))
- ext3_error (sb, "ext3_check_blocks_bitmap",
- "Block #%d of the inode table in "
- "group %d is marked free", j, i);
-
- x = ext3_count_free(bitmap_bh, sb->s_blocksize);
- if (le16_to_cpu(gdp->bg_free_blocks_count) != x)
- ext3_error (sb, "ext3_check_blocks_bitmap",
- "Wrong free blocks count for group %d, "
- "stored = %d, counted = %lu", i,
- le16_to_cpu(gdp->bg_free_blocks_count), x);
- bitmap_count += x;
- }
- brelse(bitmap_bh);
- if (le32_to_cpu(es->s_free_blocks_count) != bitmap_count)
- ext3_error (sb, "ext3_check_blocks_bitmap",
- "Wrong free blocks count in super block, "
- "stored = %lu, counted = %lu",
- (unsigned long)le32_to_cpu(es->s_free_blocks_count),
- bitmap_count);
-}
-#endif
diff --git a/fs/ext3/ialloc.c b/fs/ext3/ialloc.c
index df3f517c54a..9e4a2437621 100644
--- a/fs/ext3/ialloc.c
+++ b/fs/ext3/ialloc.c
@@ -756,44 +756,3 @@ unsigned long ext3_count_dirs (struct super_block * sb)
return count;
}
-#ifdef CONFIG_EXT3_CHECK
-/* Called at mount-time, super-block is locked */
-void ext3_check_inodes_bitmap (struct super_block * sb)
-{
- struct ext3_super_block * es;
- unsigned long desc_count, bitmap_count, x;
- struct buffer_head *bitmap_bh = NULL;
- struct ext3_group_desc * gdp;
- int i;
-
- es = EXT3_SB(sb)->s_es;
- desc_count = 0;
- bitmap_count = 0;
- gdp = NULL;
- for (i = 0; i < EXT3_SB(sb)->s_groups_count; i++) {
- gdp = ext3_get_group_desc (sb, i, NULL);
- if (!gdp)
- continue;
- desc_count += le16_to_cpu(gdp->bg_free_inodes_count);
- brelse(bitmap_bh);
- bitmap_bh = read_inode_bitmap(sb, i);
- if (!bitmap_bh)
- continue;
-
- x = ext3_count_free(bitmap_bh, EXT3_INODES_PER_GROUP(sb) / 8);
- if (le16_to_cpu(gdp->bg_free_inodes_count) != x)
- ext3_error (sb, "ext3_check_inodes_bitmap",
- "Wrong free inodes count in group %d, "
- "stored = %d, counted = %lu", i,
- le16_to_cpu(gdp->bg_free_inodes_count), x);
- bitmap_count += x;
- }
- brelse(bitmap_bh);
- if (le32_to_cpu(es->s_free_inodes_count) != bitmap_count)
- ext3_error (sb, "ext3_check_inodes_bitmap",
- "Wrong free inodes count in super block, "
- "stored = %lu, counted = %lu",
- (unsigned long)le32_to_cpu(es->s_free_inodes_count),
- bitmap_count);
-}
-#endif
diff --git a/fs/ext3/inode.c b/fs/ext3/inode.c
index 5d9b00e2883..8824e84f8a5 100644
--- a/fs/ext3/inode.c
+++ b/fs/ext3/inode.c
@@ -1384,8 +1384,10 @@ static int ext3_journalled_writepage(struct page *page,
ClearPageChecked(page);
ret = block_prepare_write(page, 0, PAGE_CACHE_SIZE,
ext3_get_block);
- if (ret != 0)
+ if (ret != 0) {
+ ext3_journal_stop(handle);
goto out_unlock;
+ }
ret = walk_page_buffers(handle, page_buffers(page), 0,
PAGE_CACHE_SIZE, NULL, do_journal_get_write_access);
diff --git a/fs/ext3/super.c b/fs/ext3/super.c
index f594989ccb7..4e6730622d9 100644
--- a/fs/ext3/super.c
+++ b/fs/ext3/super.c
@@ -625,7 +625,7 @@ static struct export_operations ext3_export_ops = {
enum {
Opt_bsd_df, Opt_minix_df, Opt_grpid, Opt_nogrpid,
Opt_resgid, Opt_resuid, Opt_sb, Opt_err_cont, Opt_err_panic, Opt_err_ro,
- Opt_nouid32, Opt_check, Opt_nocheck, Opt_debug, Opt_oldalloc, Opt_orlov,
+ Opt_nouid32, Opt_nocheck, Opt_debug, Opt_oldalloc, Opt_orlov,
Opt_user_xattr, Opt_nouser_xattr, Opt_acl, Opt_noacl,
Opt_reservation, Opt_noreservation, Opt_noload, Opt_nobh,
Opt_commit, Opt_journal_update, Opt_journal_inum,
@@ -652,7 +652,6 @@ static match_table_t tokens = {
{Opt_nouid32, "nouid32"},
{Opt_nocheck, "nocheck"},
{Opt_nocheck, "check=none"},
- {Opt_check, "check"},
{Opt_debug, "debug"},
{Opt_oldalloc, "oldalloc"},
{Opt_orlov, "orlov"},
@@ -773,14 +772,6 @@ static int parse_options (char * options, struct super_block *sb,
case Opt_nouid32:
set_opt (sbi->s_mount_opt, NO_UID32);
break;
- case Opt_check:
-#ifdef CONFIG_EXT3_CHECK
- set_opt (sbi->s_mount_opt, CHECK);
-#else
- printk(KERN_ERR
- "EXT3 Check option not supported\n");
-#endif
- break;
case Opt_nocheck:
clear_opt (sbi->s_mount_opt, CHECK);
break;
@@ -1115,12 +1106,6 @@ static int ext3_setup_super(struct super_block *sb, struct ext3_super_block *es,
} else {
printk("internal journal\n");
}
-#ifdef CONFIG_EXT3_CHECK
- if (test_opt (sb, CHECK)) {
- ext3_check_blocks_bitmap (sb);
- ext3_check_inodes_bitmap (sb);
- }
-#endif
return res;
}
diff --git a/fs/fat/inode.c b/fs/fat/inode.c
index e2effe2dc9b..a0f9b9fe130 100644
--- a/fs/fat/inode.c
+++ b/fs/fat/inode.c
@@ -846,7 +846,7 @@ static match_table_t vfat_tokens = {
{Opt_err, NULL}
};
-static int parse_options(char *options, int is_vfat, int *debug,
+static int parse_options(char *options, int is_vfat, int silent, int *debug,
struct fat_mount_options *opts)
{
char *p;
@@ -1008,8 +1008,11 @@ static int parse_options(char *options, int is_vfat, int *debug,
break;
/* unknown option */
default:
- printk(KERN_ERR "FAT: Unrecognized mount option \"%s\" "
- "or missing value\n", p);
+ if (!silent) {
+ printk(KERN_ERR
+ "FAT: Unrecognized mount option \"%s\" "
+ "or missing value\n", p);
+ }
return -EINVAL;
}
}
@@ -1091,7 +1094,7 @@ int fat_fill_super(struct super_block *sb, void *data, int silent,
sb->s_export_op = &fat_export_ops;
sbi->dir_ops = fs_dir_inode_ops;
- error = parse_options(data, isvfat, &debug, &sbi->options);
+ error = parse_options(data, isvfat, silent, &debug, &sbi->options);
if (error)
goto out_fail;
diff --git a/fs/file_table.c b/fs/file_table.c
index 4dc20554654..c3a5e2fd663 100644
--- a/fs/file_table.c
+++ b/fs/file_table.c
@@ -35,7 +35,7 @@ static DEFINE_SPINLOCK(filp_count_lock);
* context and must be fully threaded - use a local spinlock
* to protect files_stat.nr_files
*/
-void filp_ctor(void * objp, struct kmem_cache_s *cachep, unsigned long cflags)
+void filp_ctor(void *objp, struct kmem_cache *cachep, unsigned long cflags)
{
if ((cflags & (SLAB_CTOR_VERIFY|SLAB_CTOR_CONSTRUCTOR)) ==
SLAB_CTOR_CONSTRUCTOR) {
@@ -46,7 +46,7 @@ void filp_ctor(void * objp, struct kmem_cache_s *cachep, unsigned long cflags)
}
}
-void filp_dtor(void * objp, struct kmem_cache_s *cachep, unsigned long dflags)
+void filp_dtor(void *objp, struct kmem_cache *cachep, unsigned long dflags)
{
unsigned long flags;
spin_lock_irqsave(&filp_count_lock, flags);
diff --git a/fs/freevxfs/vxfs_bmap.c b/fs/freevxfs/vxfs_bmap.c
index d3f6b2835bc..2d71128bd8d 100644
--- a/fs/freevxfs/vxfs_bmap.c
+++ b/fs/freevxfs/vxfs_bmap.c
@@ -36,6 +36,7 @@
#include "vxfs.h"
#include "vxfs_inode.h"
+#include "vxfs_extern.h"
#ifdef DIAGNOSTIC
diff --git a/fs/freevxfs/vxfs_extern.h b/fs/freevxfs/vxfs_extern.h
index d8be917f979..927acf70c59 100644
--- a/fs/freevxfs/vxfs_extern.h
+++ b/fs/freevxfs/vxfs_extern.h
@@ -38,7 +38,7 @@
*/
-struct kmem_cache_s;
+struct kmem_cache;
struct super_block;
struct vxfs_inode_info;
struct inode;
@@ -51,7 +51,7 @@ extern daddr_t vxfs_bmap1(struct inode *, long);
extern int vxfs_read_fshead(struct super_block *);
/* vxfs_inode.c */
-extern struct kmem_cache_s *vxfs_inode_cachep;
+extern struct kmem_cache *vxfs_inode_cachep;
extern void vxfs_dumpi(struct vxfs_inode_info *, ino_t);
extern struct inode * vxfs_get_fake_inode(struct super_block *,
struct vxfs_inode_info *);
diff --git a/fs/freevxfs/vxfs_inode.c b/fs/freevxfs/vxfs_inode.c
index 9672d2facff..f544aae9169 100644
--- a/fs/freevxfs/vxfs_inode.c
+++ b/fs/freevxfs/vxfs_inode.c
@@ -46,15 +46,6 @@ extern struct address_space_operations vxfs_immed_aops;
extern struct inode_operations vxfs_immed_symlink_iops;
-static struct file_operations vxfs_file_operations = {
- .open = generic_file_open,
- .llseek = generic_file_llseek,
- .read = generic_file_read,
- .mmap = generic_file_mmap,
- .sendfile = generic_file_sendfile,
-};
-
-
kmem_cache_t *vxfs_inode_cachep;
@@ -318,7 +309,7 @@ vxfs_read_inode(struct inode *ip)
aops = &vxfs_aops;
if (S_ISREG(ip->i_mode)) {
- ip->i_fop = &vxfs_file_operations;
+ ip->i_fop = &generic_ro_fops;
ip->i_mapping->a_ops = aops;
} else if (S_ISDIR(ip->i_mode)) {
ip->i_op = &vxfs_dir_inode_ops;
diff --git a/fs/freevxfs/vxfs_olt.c b/fs/freevxfs/vxfs_olt.c
index 133476201d8..76a0708ae97 100644
--- a/fs/freevxfs/vxfs_olt.c
+++ b/fs/freevxfs/vxfs_olt.c
@@ -36,6 +36,7 @@
#include "vxfs.h"
#include "vxfs_olt.h"
+#include "vxfs_extern.h"
static inline void
diff --git a/fs/fs-writeback.c b/fs/fs-writeback.c
index c27f8d4098b..785c7213a54 100644
--- a/fs/fs-writeback.c
+++ b/fs/fs-writeback.c
@@ -562,7 +562,7 @@ int write_inode_now(struct inode *inode, int sync)
};
if (!mapping_cap_writeback_dirty(inode->i_mapping))
- return 0;
+ wbc.nr_to_write = 0;
might_sleep();
spin_lock(&inode_lock);
@@ -606,7 +606,7 @@ EXPORT_SYMBOL(sync_inode);
* O_SYNC flag set, to flush dirty writes to disk.
*
* @what is a bitmask, specifying which part of the inode's data should be
- * written and waited upon:
+ * written and waited upon.
*
* OSYNC_DATA: i_mapping's dirty data
* OSYNC_METADATA: the buffers at i_mapping->private_list
@@ -672,8 +672,9 @@ int writeback_acquire(struct backing_dev_info *bdi)
/**
* writeback_in_progress: determine whether there is writeback in progress
- * against a backing device.
* @bdi: the device's backing_dev_info structure.
+ *
+ * Determine whether there is writeback in progress against a backing device.
*/
int writeback_in_progress(struct backing_dev_info *bdi)
{
diff --git a/fs/fuse/dev.c b/fs/fuse/dev.c
index a6f90a6c754..8f873e621f4 100644
--- a/fs/fuse/dev.c
+++ b/fs/fuse/dev.c
@@ -184,6 +184,13 @@ static void request_end(struct fuse_conn *fc, struct fuse_req *req)
fuse_putback_request() */
for (i = 1; i < FUSE_MAX_OUTSTANDING; i++)
up(&fc->outstanding_sem);
+ } else if (req->in.h.opcode == FUSE_RELEASE && req->inode == NULL) {
+ /* Special case for failed iget in CREATE */
+ u64 nodeid = req->in.h.nodeid;
+ __fuse_get_request(req);
+ fuse_reset_request(req);
+ fuse_send_forget(fc, req, nodeid, 1);
+ putback = 0;
}
if (putback)
fuse_putback_request(fc, req);
diff --git a/fs/fuse/dir.c b/fs/fuse/dir.c
index 70dba721aca..c045cc70c74 100644
--- a/fs/fuse/dir.c
+++ b/fs/fuse/dir.c
@@ -13,6 +13,7 @@
#include <linux/gfp.h>
#include <linux/sched.h>
#include <linux/namei.h>
+#include <linux/mount.h>
static inline unsigned long time_to_jiffies(unsigned long sec,
unsigned long nsec)
@@ -134,6 +135,101 @@ static void fuse_invalidate_entry(struct dentry *entry)
entry->d_time = jiffies - 1;
}
+static int fuse_create_open(struct inode *dir, struct dentry *entry, int mode,
+ struct nameidata *nd)
+{
+ int err;
+ struct inode *inode;
+ struct fuse_conn *fc = get_fuse_conn(dir);
+ struct fuse_req *req;
+ struct fuse_open_in inarg;
+ struct fuse_open_out outopen;
+ struct fuse_entry_out outentry;
+ struct fuse_inode *fi;
+ struct fuse_file *ff;
+ struct file *file;
+ int flags = nd->intent.open.flags - 1;
+
+ err = -ENOSYS;
+ if (fc->no_create)
+ goto out;
+
+ err = -ENAMETOOLONG;
+ if (entry->d_name.len > FUSE_NAME_MAX)
+ goto out;
+
+ err = -EINTR;
+ req = fuse_get_request(fc);
+ if (!req)
+ goto out;
+
+ ff = fuse_file_alloc();
+ if (!ff)
+ goto out_put_request;
+
+ flags &= ~O_NOCTTY;
+ memset(&inarg, 0, sizeof(inarg));
+ inarg.flags = flags;
+ inarg.mode = mode;
+ req->in.h.opcode = FUSE_CREATE;
+ req->in.h.nodeid = get_node_id(dir);
+ req->inode = dir;
+ req->in.numargs = 2;
+ req->in.args[0].size = sizeof(inarg);
+ req->in.args[0].value = &inarg;
+ req->in.args[1].size = entry->d_name.len + 1;
+ req->in.args[1].value = entry->d_name.name;
+ req->out.numargs = 2;
+ req->out.args[0].size = sizeof(outentry);
+ req->out.args[0].value = &outentry;
+ req->out.args[1].size = sizeof(outopen);
+ req->out.args[1].value = &outopen;
+ request_send(fc, req);
+ err = req->out.h.error;
+ if (err) {
+ if (err == -ENOSYS)
+ fc->no_create = 1;
+ goto out_free_ff;
+ }
+
+ err = -EIO;
+ if (!S_ISREG(outentry.attr.mode))
+ goto out_free_ff;
+
+ inode = fuse_iget(dir->i_sb, outentry.nodeid, outentry.generation,
+ &outentry.attr);
+ err = -ENOMEM;
+ if (!inode) {
+ flags &= ~(O_CREAT | O_EXCL | O_TRUNC);
+ ff->fh = outopen.fh;
+ fuse_send_release(fc, ff, outentry.nodeid, NULL, flags, 0);
+ goto out_put_request;
+ }
+ fuse_put_request(fc, req);
+ entry->d_time = time_to_jiffies(outentry.entry_valid,
+ outentry.entry_valid_nsec);
+ fi = get_fuse_inode(inode);
+ fi->i_time = time_to_jiffies(outentry.attr_valid,
+ outentry.attr_valid_nsec);
+
+ d_instantiate(entry, inode);
+ file = lookup_instantiate_filp(nd, entry, generic_file_open);
+ if (IS_ERR(file)) {
+ ff->fh = outopen.fh;
+ fuse_send_release(fc, ff, outentry.nodeid, inode, flags, 0);
+ return PTR_ERR(file);
+ }
+ fuse_finish_open(inode, file, ff, &outopen);
+ return 0;
+
+ out_free_ff:
+ fuse_file_free(ff);
+ out_put_request:
+ fuse_put_request(fc, req);
+ out:
+ return err;
+}
+
static int create_new_entry(struct fuse_conn *fc, struct fuse_req *req,
struct inode *dir, struct dentry *entry,
int mode)
@@ -208,6 +304,12 @@ static int fuse_mknod(struct inode *dir, struct dentry *entry, int mode,
static int fuse_create(struct inode *dir, struct dentry *entry, int mode,
struct nameidata *nd)
{
+ if (nd && (nd->flags & LOOKUP_CREATE)) {
+ int err = fuse_create_open(dir, entry, mode, nd);
+ if (err != -ENOSYS)
+ return err;
+ /* Fall back on mknod */
+ }
return fuse_mknod(dir, entry, mode, 0);
}
@@ -461,6 +563,38 @@ static int fuse_revalidate(struct dentry *entry)
return fuse_do_getattr(inode);
}
+static int fuse_access(struct inode *inode, int mask)
+{
+ struct fuse_conn *fc = get_fuse_conn(inode);
+ struct fuse_req *req;
+ struct fuse_access_in inarg;
+ int err;
+
+ if (fc->no_access)
+ return 0;
+
+ req = fuse_get_request(fc);
+ if (!req)
+ return -EINTR;
+
+ memset(&inarg, 0, sizeof(inarg));
+ inarg.mask = mask;
+ req->in.h.opcode = FUSE_ACCESS;
+ req->in.h.nodeid = get_node_id(inode);
+ req->inode = inode;
+ req->in.numargs = 1;
+ req->in.args[0].size = sizeof(inarg);
+ req->in.args[0].value = &inarg;
+ request_send(fc, req);
+ err = req->out.h.error;
+ fuse_put_request(fc, req);
+ if (err == -ENOSYS) {
+ fc->no_access = 1;
+ err = 0;
+ }
+ return err;
+}
+
static int fuse_permission(struct inode *inode, int mask, struct nameidata *nd)
{
struct fuse_conn *fc = get_fuse_conn(inode);
@@ -491,11 +625,11 @@ static int fuse_permission(struct inode *inode, int mask, struct nameidata *nd)
return err;
} else {
int mode = inode->i_mode;
- if ((mask & MAY_WRITE) && IS_RDONLY(inode) &&
- (S_ISREG(mode) || S_ISDIR(mode) || S_ISLNK(mode)))
- return -EROFS;
if ((mask & MAY_EXEC) && !S_ISDIR(mode) && !(mode & S_IXUGO))
return -EACCES;
+
+ if (nd && (nd->flags & LOOKUP_ACCESS))
+ return fuse_access(inode, mask);
return 0;
}
}
@@ -629,29 +763,29 @@ static int fuse_dir_fsync(struct file *file, struct dentry *de, int datasync)
return file ? fuse_fsync_common(file, de, datasync, 1) : 0;
}
-static unsigned iattr_to_fattr(struct iattr *iattr, struct fuse_attr *fattr)
+static void iattr_to_fattr(struct iattr *iattr, struct fuse_setattr_in *arg)
{
unsigned ivalid = iattr->ia_valid;
- unsigned fvalid = 0;
-
- memset(fattr, 0, sizeof(*fattr));
if (ivalid & ATTR_MODE)
- fvalid |= FATTR_MODE, fattr->mode = iattr->ia_mode;
+ arg->valid |= FATTR_MODE, arg->mode = iattr->ia_mode;
if (ivalid & ATTR_UID)
- fvalid |= FATTR_UID, fattr->uid = iattr->ia_uid;
+ arg->valid |= FATTR_UID, arg->uid = iattr->ia_uid;
if (ivalid & ATTR_GID)
- fvalid |= FATTR_GID, fattr->gid = iattr->ia_gid;
+ arg->valid |= FATTR_GID, arg->gid = iattr->ia_gid;
if (ivalid & ATTR_SIZE)
- fvalid |= FATTR_SIZE, fattr->size = iattr->ia_size;
+ arg->valid |= FATTR_SIZE, arg->size = iattr->ia_size;
/* You can only _set_ these together (they may change by themselves) */
if ((ivalid & (ATTR_ATIME | ATTR_MTIME)) == (ATTR_ATIME | ATTR_MTIME)) {
- fvalid |= FATTR_ATIME | FATTR_MTIME;
- fattr->atime = iattr->ia_atime.tv_sec;
- fattr->mtime = iattr->ia_mtime.tv_sec;
+ arg->valid |= FATTR_ATIME | FATTR_MTIME;
+ arg->atime = iattr->ia_atime.tv_sec;
+ arg->mtime = iattr->ia_mtime.tv_sec;
+ }
+ if (ivalid & ATTR_FILE) {
+ struct fuse_file *ff = iattr->ia_file->private_data;
+ arg->valid |= FATTR_FH;
+ arg->fh = ff->fh;
}
-
- return fvalid;
}
static int fuse_setattr(struct dentry *entry, struct iattr *attr)
@@ -686,7 +820,7 @@ static int fuse_setattr(struct dentry *entry, struct iattr *attr)
return -EINTR;
memset(&inarg, 0, sizeof(inarg));
- inarg.valid = iattr_to_fattr(attr, &inarg.attr);
+ iattr_to_fattr(attr, &inarg);
req->in.h.opcode = FUSE_SETATTR;
req->in.h.nodeid = get_node_id(inode);
req->inode = inode;
@@ -735,7 +869,9 @@ static struct dentry *fuse_lookup(struct inode *dir, struct dentry *entry,
struct nameidata *nd)
{
struct inode *inode;
- int err = fuse_lookup_iget(dir, entry, &inode);
+ int err;
+
+ err = fuse_lookup_iget(dir, entry, &inode);
if (err)
return ERR_PTR(err);
if (inode && S_ISDIR(inode->i_mode)) {
diff --git a/fs/fuse/file.c b/fs/fuse/file.c
index 657ab11c173..2ca86141d13 100644
--- a/fs/fuse/file.c
+++ b/fs/fuse/file.c
@@ -14,11 +14,69 @@
static struct file_operations fuse_direct_io_file_operations;
-int fuse_open_common(struct inode *inode, struct file *file, int isdir)
+static int fuse_send_open(struct inode *inode, struct file *file, int isdir,
+ struct fuse_open_out *outargp)
{
struct fuse_conn *fc = get_fuse_conn(inode);
- struct fuse_req *req;
struct fuse_open_in inarg;
+ struct fuse_req *req;
+ int err;
+
+ req = fuse_get_request(fc);
+ if (!req)
+ return -EINTR;
+
+ memset(&inarg, 0, sizeof(inarg));
+ inarg.flags = file->f_flags & ~(O_CREAT | O_EXCL | O_NOCTTY | O_TRUNC);
+ req->in.h.opcode = isdir ? FUSE_OPENDIR : FUSE_OPEN;
+ req->in.h.nodeid = get_node_id(inode);
+ req->inode = inode;
+ req->in.numargs = 1;
+ req->in.args[0].size = sizeof(inarg);
+ req->in.args[0].value = &inarg;
+ req->out.numargs = 1;
+ req->out.args[0].size = sizeof(*outargp);
+ req->out.args[0].value = outargp;
+ request_send(fc, req);
+ err = req->out.h.error;
+ fuse_put_request(fc, req);
+
+ return err;
+}
+
+struct fuse_file *fuse_file_alloc(void)
+{
+ struct fuse_file *ff;
+ ff = kmalloc(sizeof(struct fuse_file), GFP_KERNEL);
+ if (ff) {
+ ff->release_req = fuse_request_alloc();
+ if (!ff->release_req) {
+ kfree(ff);
+ ff = NULL;
+ }
+ }
+ return ff;
+}
+
+void fuse_file_free(struct fuse_file *ff)
+{
+ fuse_request_free(ff->release_req);
+ kfree(ff);
+}
+
+void fuse_finish_open(struct inode *inode, struct file *file,
+ struct fuse_file *ff, struct fuse_open_out *outarg)
+{
+ if (outarg->open_flags & FOPEN_DIRECT_IO)
+ file->f_op = &fuse_direct_io_file_operations;
+ if (!(outarg->open_flags & FOPEN_KEEP_CACHE))
+ invalidate_inode_pages(inode->i_mapping);
+ ff->fh = outarg->fh;
+ file->private_data = ff;
+}
+
+int fuse_open_common(struct inode *inode, struct file *file, int isdir)
+{
struct fuse_open_out outarg;
struct fuse_file *ff;
int err;
@@ -34,73 +92,53 @@ int fuse_open_common(struct inode *inode, struct file *file, int isdir)
/* If opening the root node, no lookup has been performed on
it, so the attributes must be refreshed */
if (get_node_id(inode) == FUSE_ROOT_ID) {
- int err = fuse_do_getattr(inode);
+ err = fuse_do_getattr(inode);
if (err)
return err;
}
- req = fuse_get_request(fc);
- if (!req)
- return -EINTR;
-
- err = -ENOMEM;
- ff = kmalloc(sizeof(struct fuse_file), GFP_KERNEL);
+ ff = fuse_file_alloc();
if (!ff)
- goto out_put_request;
+ return -ENOMEM;
- ff->release_req = fuse_request_alloc();
- if (!ff->release_req) {
- kfree(ff);
- goto out_put_request;
- }
-
- memset(&inarg, 0, sizeof(inarg));
- inarg.flags = file->f_flags & ~(O_CREAT | O_EXCL | O_NOCTTY | O_TRUNC);
- req->in.h.opcode = isdir ? FUSE_OPENDIR : FUSE_OPEN;
- req->in.h.nodeid = get_node_id(inode);
- req->inode = inode;
- req->in.numargs = 1;
- req->in.args[0].size = sizeof(inarg);
- req->in.args[0].value = &inarg;
- req->out.numargs = 1;
- req->out.args[0].size = sizeof(outarg);
- req->out.args[0].value = &outarg;
- request_send(fc, req);
- err = req->out.h.error;
- if (err) {
- fuse_request_free(ff->release_req);
- kfree(ff);
- } else {
- if (!isdir && (outarg.open_flags & FOPEN_DIRECT_IO))
- file->f_op = &fuse_direct_io_file_operations;
- if (!(outarg.open_flags & FOPEN_KEEP_CACHE))
- invalidate_inode_pages(inode->i_mapping);
- ff->fh = outarg.fh;
- file->private_data = ff;
+ err = fuse_send_open(inode, file, isdir, &outarg);
+ if (err)
+ fuse_file_free(ff);
+ else {
+ if (isdir)
+ outarg.open_flags &= ~FOPEN_DIRECT_IO;
+ fuse_finish_open(inode, file, ff, &outarg);
}
- out_put_request:
- fuse_put_request(fc, req);
return err;
}
-int fuse_release_common(struct inode *inode, struct file *file, int isdir)
+void fuse_send_release(struct fuse_conn *fc, struct fuse_file *ff,
+ u64 nodeid, struct inode *inode, int flags, int isdir)
{
- struct fuse_conn *fc = get_fuse_conn(inode);
- struct fuse_file *ff = file->private_data;
- struct fuse_req *req = ff->release_req;
+ struct fuse_req * req = ff->release_req;
struct fuse_release_in *inarg = &req->misc.release_in;
inarg->fh = ff->fh;
- inarg->flags = file->f_flags & ~O_EXCL;
+ inarg->flags = flags;
req->in.h.opcode = isdir ? FUSE_RELEASEDIR : FUSE_RELEASE;
- req->in.h.nodeid = get_node_id(inode);
+ req->in.h.nodeid = nodeid;
req->inode = inode;
req->in.numargs = 1;
req->in.args[0].size = sizeof(struct fuse_release_in);
req->in.args[0].value = inarg;
request_send_background(fc, req);
kfree(ff);
+}
+
+int fuse_release_common(struct inode *inode, struct file *file, int isdir)
+{
+ struct fuse_file *ff = file->private_data;
+ if (ff) {
+ struct fuse_conn *fc = get_fuse_conn(inode);
+ u64 nodeid = get_node_id(inode);
+ fuse_send_release(fc, ff, nodeid, inode, file->f_flags, isdir);
+ }
/* Return value is ignored by VFS */
return 0;
diff --git a/fs/fuse/fuse_i.h b/fs/fuse/fuse_i.h
index 5cb456f572c..0ea5301f86b 100644
--- a/fs/fuse/fuse_i.h
+++ b/fs/fuse/fuse_i.h
@@ -266,6 +266,12 @@ struct fuse_conn {
/** Is removexattr not implemented by fs? */
unsigned no_removexattr : 1;
+ /** Is access not implemented by fs? */
+ unsigned no_access : 1;
+
+ /** Is create not implemented by fs? */
+ unsigned no_create : 1;
+
/** Backing dev info */
struct backing_dev_info bdi;
};
@@ -337,6 +343,17 @@ size_t fuse_send_read_common(struct fuse_req *req, struct file *file,
*/
int fuse_open_common(struct inode *inode, struct file *file, int isdir);
+struct fuse_file *fuse_file_alloc(void);
+void fuse_file_free(struct fuse_file *ff);
+void fuse_finish_open(struct inode *inode, struct file *file,
+ struct fuse_file *ff, struct fuse_open_out *outarg);
+
+/**
+ * Send a RELEASE request
+ */
+void fuse_send_release(struct fuse_conn *fc, struct fuse_file *ff,
+ u64 nodeid, struct inode *inode, int flags, int isdir);
+
/**
* Send RELEASE or RELEASEDIR request
*/
diff --git a/fs/hfs/hfs_fs.h b/fs/hfs/hfs_fs.h
index aae019aadf8..cc5dcd52e23 100644
--- a/fs/hfs/hfs_fs.h
+++ b/fs/hfs/hfs_fs.h
@@ -9,7 +9,6 @@
#ifndef _LINUX_HFS_FS_H
#define _LINUX_HFS_FS_H
-#include <linux/version.h>
#include <linux/slab.h>
#include <linux/types.h>
#include <linux/buffer_head.h>
diff --git a/fs/hfs/inode.c b/fs/hfs/inode.c
index 3f680c5675b..d499393a8ae 100644
--- a/fs/hfs/inode.c
+++ b/fs/hfs/inode.c
@@ -12,7 +12,6 @@
*/
#include <linux/pagemap.h>
-#include <linux/version.h>
#include <linux/mpage.h>
#include "hfs_fs.h"
diff --git a/fs/hfsplus/bnode.c b/fs/hfsplus/bnode.c
index b85abc6e6f8..930cd9212de 100644
--- a/fs/hfsplus/bnode.c
+++ b/fs/hfsplus/bnode.c
@@ -13,7 +13,6 @@
#include <linux/pagemap.h>
#include <linux/fs.h>
#include <linux/swap.h>
-#include <linux/version.h>
#include "hfsplus_fs.h"
#include "hfsplus_raw.h"
diff --git a/fs/hfsplus/dir.c b/fs/hfsplus/dir.c
index 7bda76667a4..50c8f44b6c6 100644
--- a/fs/hfsplus/dir.c
+++ b/fs/hfsplus/dir.c
@@ -13,7 +13,6 @@
#include <linux/sched.h>
#include <linux/slab.h>
#include <linux/random.h>
-#include <linux/version.h>
#include "hfsplus_fs.h"
#include "hfsplus_raw.h"
diff --git a/fs/hfsplus/extents.c b/fs/hfsplus/extents.c
index e7235ca79a9..e3ff56a0301 100644
--- a/fs/hfsplus/extents.c
+++ b/fs/hfsplus/extents.c
@@ -11,7 +11,6 @@
#include <linux/errno.h>
#include <linux/fs.h>
#include <linux/pagemap.h>
-#include <linux/version.h>
#include "hfsplus_fs.h"
#include "hfsplus_raw.h"
diff --git a/fs/hfsplus/hfsplus_fs.h b/fs/hfsplus/hfsplus_fs.h
index 2bc0cdd30e5..c60e5635498 100644
--- a/fs/hfsplus/hfsplus_fs.h
+++ b/fs/hfsplus/hfsplus_fs.h
@@ -11,7 +11,6 @@
#define _LINUX_HFSPLUS_FS_H
#include <linux/fs.h>
-#include <linux/version.h>
#include <linux/buffer_head.h>
#include "hfsplus_raw.h"
diff --git a/fs/hfsplus/inode.c b/fs/hfsplus/inode.c
index f205773ddfb..fc98583cf04 100644
--- a/fs/hfsplus/inode.c
+++ b/fs/hfsplus/inode.c
@@ -11,7 +11,6 @@
#include <linux/mm.h>
#include <linux/fs.h>
#include <linux/pagemap.h>
-#include <linux/version.h>
#include <linux/mpage.h>
#include "hfsplus_fs.h"
diff --git a/fs/hfsplus/super.c b/fs/hfsplus/super.c
index 452fc1fdbd3..0ce1c455ae5 100644
--- a/fs/hfsplus/super.c
+++ b/fs/hfsplus/super.c
@@ -14,7 +14,6 @@
#include <linux/fs.h>
#include <linux/sched.h>
#include <linux/slab.h>
-#include <linux/version.h>
#include <linux/vfs.h>
#include <linux/nls.h>
diff --git a/fs/hfsplus/wrapper.c b/fs/hfsplus/wrapper.c
index 0c51d6338b0..95455e83923 100644
--- a/fs/hfsplus/wrapper.c
+++ b/fs/hfsplus/wrapper.c
@@ -12,7 +12,6 @@
#include <linux/blkdev.h>
#include <linux/cdrom.h>
#include <linux/genhd.h>
-#include <linux/version.h>
#include <asm/unaligned.h>
#include "hfsplus_fs.h"
diff --git a/fs/hostfs/hostfs_kern.c b/fs/hostfs/hostfs_kern.c
index dd711310626..4684eb7d48c 100644
--- a/fs/hostfs/hostfs_kern.c
+++ b/fs/hostfs/hostfs_kern.c
@@ -8,7 +8,6 @@
#include <linux/stddef.h>
#include <linux/fs.h>
-#include <linux/version.h>
#include <linux/module.h>
#include <linux/init.h>
#include <linux/slab.h>
@@ -294,8 +293,7 @@ static void hostfs_delete_inode(struct inode *inode)
static void hostfs_destroy_inode(struct inode *inode)
{
- if(HOSTFS_I(inode)->host_filename)
- kfree(HOSTFS_I(inode)->host_filename);
+ kfree(HOSTFS_I(inode)->host_filename);
/*XXX: This should not happen, probably. The check is here for
* additional safety.*/
diff --git a/fs/hpfs/dnode.c b/fs/hpfs/dnode.c
index 1d21307730a..229ff2fb180 100644
--- a/fs/hpfs/dnode.c
+++ b/fs/hpfs/dnode.c
@@ -244,12 +244,12 @@ static int hpfs_add_to_dnode(struct inode *i, dnode_secno dno,
go_up:
if (namelen >= 256) {
hpfs_error(i->i_sb, "hpfs_add_to_dnode: namelen == %d", namelen);
- if (nd) kfree(nd);
+ kfree(nd);
kfree(nname);
return 1;
}
if (!(d = hpfs_map_dnode(i->i_sb, dno, &qbh))) {
- if (nd) kfree(nd);
+ kfree(nd);
kfree(nname);
return 1;
}
@@ -257,7 +257,7 @@ static int hpfs_add_to_dnode(struct inode *i, dnode_secno dno,
if (hpfs_sb(i->i_sb)->sb_chk)
if (hpfs_stop_cycles(i->i_sb, dno, &c1, &c2, "hpfs_add_to_dnode")) {
hpfs_brelse4(&qbh);
- if (nd) kfree(nd);
+ kfree(nd);
kfree(nname);
return 1;
}
@@ -270,7 +270,7 @@ static int hpfs_add_to_dnode(struct inode *i, dnode_secno dno,
for_all_poss(i, hpfs_pos_subst, 5, t + 1);
hpfs_mark_4buffers_dirty(&qbh);
hpfs_brelse4(&qbh);
- if (nd) kfree(nd);
+ kfree(nd);
kfree(nname);
return 0;
}
diff --git a/fs/hpfs/file.c b/fs/hpfs/file.c
index ab144dabd87..7c995ac4081 100644
--- a/fs/hpfs/file.c
+++ b/fs/hpfs/file.c
@@ -114,11 +114,8 @@ static ssize_t hpfs_file_write(struct file *file, const char __user *buf,
ssize_t retval;
retval = generic_file_write(file, buf, count, ppos);
- if (retval > 0) {
- struct inode *inode = file->f_dentry->d_inode;
- inode->i_mtime = CURRENT_TIME_SEC;
- hpfs_i(inode)->i_dirty = 1;
- }
+ if (retval > 0)
+ hpfs_i(file->f_dentry->d_inode)->i_dirty = 1;
return retval;
}
diff --git a/fs/hpfs/super.c b/fs/hpfs/super.c
index 8eefa6366db..63e88d7e2c3 100644
--- a/fs/hpfs/super.c
+++ b/fs/hpfs/super.c
@@ -75,7 +75,7 @@ void hpfs_error(struct super_block *s, char *m,...)
} else if (s->s_flags & MS_RDONLY) printk("; going on - but anything won't be destroyed because it's read-only\n");
else printk("; corrupted filesystem mounted read/write - your computer will explode within 20 seconds ... but you wanted it so!\n");
} else printk("\n");
- if (buf) kfree(buf);
+ kfree(buf);
hpfs_sb(s)->sb_was_error = 1;
}
@@ -102,8 +102,8 @@ int hpfs_stop_cycles(struct super_block *s, int key, int *c1, int *c2,
static void hpfs_put_super(struct super_block *s)
{
struct hpfs_sb_info *sbi = hpfs_sb(s);
- if (sbi->sb_cp_table) kfree(sbi->sb_cp_table);
- if (sbi->sb_bmp_dir) kfree(sbi->sb_bmp_dir);
+ kfree(sbi->sb_cp_table);
+ kfree(sbi->sb_bmp_dir);
unmark_dirty(s);
s->s_fs_info = NULL;
kfree(sbi);
@@ -654,8 +654,8 @@ bail3: brelse(bh1);
bail2: brelse(bh0);
bail1:
bail0:
- if (sbi->sb_bmp_dir) kfree(sbi->sb_bmp_dir);
- if (sbi->sb_cp_table) kfree(sbi->sb_cp_table);
+ kfree(sbi->sb_bmp_dir);
+ kfree(sbi->sb_cp_table);
s->s_fs_info = NULL;
kfree(sbi);
return -EINVAL;
diff --git a/fs/hugetlbfs/inode.c b/fs/hugetlbfs/inode.c
index e026c807e6b..64983ab5558 100644
--- a/fs/hugetlbfs/inode.c
+++ b/fs/hugetlbfs/inode.c
@@ -63,7 +63,7 @@ static void huge_pagevec_release(struct pagevec *pvec)
*
* Result is in bytes to be compatible with is_hugepage_mem_enough()
*/
-unsigned long
+static unsigned long
huge_pages_needed(struct address_space *mapping, struct vm_area_struct *vma)
{
int i;
diff --git a/fs/inotify.c b/fs/inotify.c
index 9fbaebfdf40..bf7ce1d2412 100644
--- a/fs/inotify.c
+++ b/fs/inotify.c
@@ -372,7 +372,7 @@ static int find_inode(const char __user *dirname, struct nameidata *nd)
if (error)
return error;
/* you can only watch an inode if you have read permissions on it */
- error = permission(nd->dentry->d_inode, MAY_READ, NULL);
+ error = vfs_permission(nd, MAY_READ);
if (error)
path_release(nd);
return error;
diff --git a/fs/ioprio.c b/fs/ioprio.c
index d1c1f2b2c9d..4bf1c6365a1 100644
--- a/fs/ioprio.c
+++ b/fs/ioprio.c
@@ -22,6 +22,7 @@
#include <linux/kernel.h>
#include <linux/ioprio.h>
#include <linux/blkdev.h>
+#include <linux/syscalls.h>
static int set_task_ioprio(struct task_struct *task, int ioprio)
{
diff --git a/fs/isofs/inode.c b/fs/isofs/inode.c
index 1652de1b6cb..298f08be22d 100644
--- a/fs/isofs/inode.c
+++ b/fs/isofs/inode.c
@@ -855,8 +855,7 @@ root_found:
if (opt.check == 'r') table++;
s->s_root->d_op = &isofs_dentry_ops[table];
- if (opt.iocharset)
- kfree(opt.iocharset);
+ kfree(opt.iocharset);
return 0;
@@ -895,8 +894,7 @@ out_unknown_format:
out_freebh:
brelse(bh);
out_freesbi:
- if (opt.iocharset)
- kfree(opt.iocharset);
+ kfree(opt.iocharset);
kfree(sbi);
s->s_fs_info = NULL;
return -EINVAL;
@@ -1164,8 +1162,7 @@ out_nomem:
out_noread:
printk(KERN_INFO "ISOFS: unable to read i-node block %lu\n", block);
- if (tmpde)
- kfree(tmpde);
+ kfree(tmpde);
return -EIO;
out_toomany:
@@ -1334,8 +1331,7 @@ static void isofs_read_inode(struct inode *inode)
init_special_inode(inode, inode->i_mode, inode->i_rdev);
out:
- if (tmpde)
- kfree(tmpde);
+ kfree(tmpde);
if (bh)
brelse(bh);
return;
diff --git a/fs/jbd/commit.c b/fs/jbd/commit.c
index 2a3e310f79e..002ad2bbc76 100644
--- a/fs/jbd/commit.c
+++ b/fs/jbd/commit.c
@@ -261,10 +261,8 @@ void journal_commit_transaction(journal_t *journal)
struct buffer_head *bh = jh2bh(jh);
jbd_lock_bh_state(bh);
- if (jh->b_committed_data) {
- kfree(jh->b_committed_data);
- jh->b_committed_data = NULL;
- }
+ kfree(jh->b_committed_data);
+ jh->b_committed_data = NULL;
jbd_unlock_bh_state(bh);
}
journal_refile_buffer(journal, jh);
diff --git a/fs/jbd/recovery.c b/fs/jbd/recovery.c
index 103c34e4fb2..80d7f53fd0a 100644
--- a/fs/jbd/recovery.c
+++ b/fs/jbd/recovery.c
@@ -210,7 +210,7 @@ do { \
} while (0)
/**
- * int journal_recover(journal_t *journal) - recovers a on-disk journal
+ * journal_recover - recovers a on-disk journal
* @journal: the journal to recover
*
* The primary function for recovering the log contents when mounting a
@@ -266,7 +266,7 @@ int journal_recover(journal_t *journal)
}
/**
- * int journal_skip_recovery() - Start journal and wipe exiting records
+ * journal_skip_recovery - Start journal and wipe exiting records
* @journal: journal to startup
*
* Locate any valid recovery information from the journal and set up the
diff --git a/fs/jbd/transaction.c b/fs/jbd/transaction.c
index 13cb05bf604..429f4b263cf 100644
--- a/fs/jbd/transaction.c
+++ b/fs/jbd/transaction.c
@@ -227,8 +227,7 @@ repeat_locked:
spin_unlock(&transaction->t_handle_lock);
spin_unlock(&journal->j_state_lock);
out:
- if (new_transaction)
- kfree(new_transaction);
+ kfree(new_transaction);
return ret;
}
@@ -725,8 +724,7 @@ done:
journal_cancel_revoke(handle, jh);
out:
- if (frozen_buffer)
- kfree(frozen_buffer);
+ kfree(frozen_buffer);
JBUFFER_TRACE(jh, "exit");
return error;
@@ -905,8 +903,7 @@ repeat:
jbd_unlock_bh_state(bh);
out:
journal_put_journal_head(jh);
- if (committed_data)
- kfree(committed_data);
+ kfree(committed_data);
return err;
}
diff --git a/fs/jffs/intrep.c b/fs/jffs/intrep.c
index 27f199e94cf..b2e95421d93 100644
--- a/fs/jffs/intrep.c
+++ b/fs/jffs/intrep.c
@@ -462,7 +462,7 @@ jffs_checksum_flash(struct mtd_info *mtd, loff_t start, int size, __u32 *result)
}
/* Free read buffer */
- kfree (read_buf);
+ kfree(read_buf);
/* Return result */
D3(printk("checksum result: 0x%08x\n", sum));
@@ -1011,12 +1011,12 @@ jffs_scan_flash(struct jffs_control *c)
offset , fmc->sector_size);
flash_safe_release(fmc->mtd);
- kfree (read_buf);
+ kfree(read_buf);
return -1; /* bad, bad, bad! */
}
flash_safe_release(fmc->mtd);
- kfree (read_buf);
+ kfree(read_buf);
return -EAGAIN; /* erased offending sector. Try mount one more time please. */
}
@@ -1112,7 +1112,7 @@ jffs_scan_flash(struct jffs_control *c)
if (!node) {
if (!(node = jffs_alloc_node())) {
/* Free read buffer */
- kfree (read_buf);
+ kfree(read_buf);
/* Release the flash device */
flash_safe_release(fmc->mtd);
@@ -1269,7 +1269,7 @@ jffs_scan_flash(struct jffs_control *c)
DJM(no_jffs_node--);
/* Free read buffer */
- kfree (read_buf);
+ kfree(read_buf);
/* Release the flash device */
flash_safe_release(fmc->mtd);
@@ -1296,7 +1296,7 @@ jffs_scan_flash(struct jffs_control *c)
flash_safe_release(fmc->flash_part);
/* Free read buffer */
- kfree (read_buf);
+ kfree(read_buf);
return -ENOMEM;
}
@@ -1324,7 +1324,7 @@ jffs_scan_flash(struct jffs_control *c)
jffs_build_end(fmc);
/* Free read buffer */
- kfree (read_buf);
+ kfree(read_buf);
if(!num_free_space){
printk(KERN_WARNING "jffs_scan_flash(): Did not find even a single "
@@ -1747,9 +1747,7 @@ jffs_find_child(struct jffs_file *dir, const char *name, int len)
}
printk("jffs_find_child(): Didn't find the file \"%s\".\n",
(copy ? copy : ""));
- if (copy) {
- kfree(copy);
- }
+ kfree(copy);
});
return f;
diff --git a/fs/jffs/jffs_fm.c b/fs/jffs/jffs_fm.c
index 053e3a98a27..6da13b309bd 100644
--- a/fs/jffs/jffs_fm.c
+++ b/fs/jffs/jffs_fm.c
@@ -20,6 +20,7 @@
#include <linux/blkdev.h>
#include <linux/jffs.h>
#include "jffs_fm.h"
+#include "intrep.h"
#if defined(JFFS_MARK_OBSOLETE) && JFFS_MARK_OBSOLETE
static int jffs_mark_obsolete(struct jffs_fmcontrol *fmc, __u32 fm_offset);
diff --git a/fs/jffs2/Makefile b/fs/jffs2/Makefile
index f1afe681ecd..77dc5561a04 100644
--- a/fs/jffs2/Makefile
+++ b/fs/jffs2/Makefile
@@ -1,7 +1,7 @@
#
# Makefile for the Linux Journalling Flash File System v2 (JFFS2)
#
-# $Id: Makefile.common,v 1.9 2005/02/09 09:23:53 pavlov Exp $
+# $Id: Makefile.common,v 1.11 2005/09/07 08:34:53 havasi Exp $
#
obj-$(CONFIG_JFFS2_FS) += jffs2.o
@@ -9,9 +9,10 @@ obj-$(CONFIG_JFFS2_FS) += jffs2.o
jffs2-y := compr.o dir.o file.o ioctl.o nodelist.o malloc.o
jffs2-y += read.o nodemgmt.o readinode.o write.o scan.o gc.o
jffs2-y += symlink.o build.o erase.o background.o fs.o writev.o
-jffs2-y += super.o
+jffs2-y += super.o debug.o
jffs2-$(CONFIG_JFFS2_FS_WRITEBUFFER) += wbuf.o
jffs2-$(CONFIG_JFFS2_RUBIN) += compr_rubin.o
jffs2-$(CONFIG_JFFS2_RTIME) += compr_rtime.o
jffs2-$(CONFIG_JFFS2_ZLIB) += compr_zlib.o
+jffs2-$(CONFIG_JFFS2_SUMMARY) += summary.o
diff --git a/fs/jffs2/TODO b/fs/jffs2/TODO
index 2bff82fd221..d0e23b26fa5 100644
--- a/fs/jffs2/TODO
+++ b/fs/jffs2/TODO
@@ -1,5 +1,11 @@
-$Id: TODO,v 1.10 2002/09/09 16:31:21 dwmw2 Exp $
+$Id: TODO,v 1.18 2005/09/22 11:24:56 dedekind Exp $
+ - support asynchronous operation -- add a per-fs 'reserved_space' count,
+ let each outstanding write reserve the _maximum_ amount of physical
+ space it could take. Let GC flush the outstanding writes because the
+ reservations will necessarily be pessimistic. With this we could even
+ do shared writable mmap, if we can have a fs hook for do_wp_page() to
+ make the reservation.
- disable compression in commit_write()?
- fine-tune the allocation / GC thresholds
- chattr support - turning on/off and tuning compression per-inode
@@ -11,26 +17,15 @@ $Id: TODO,v 1.10 2002/09/09 16:31:21 dwmw2 Exp $
- test, test, test
- NAND flash support:
- - flush_wbuf using GC to fill it, don't just pad.
- - Deal with write errors. Data don't get lost - we just have to write
- the affected node(s) out again somewhere else.
- - make fsync flush only if actually required
- - make sys_sync() work.
- - reboot notifier
- - timed flush of old wbuf
- - fix magical second arg of jffs2_flush_wbuf(). Split into two or more functions instead.
-
+ - almost done :)
+ - use bad block check instead of the hardwired byte check
- Optimisations:
- - Stop GC from decompressing and immediately recompressing nodes which could
- just be copied intact. (We now keep track of REF_PRISTINE flag. Easy now.)
- - Furthermore, in the case where it could be copied intact we don't even need
- to call iget() for it -- if we use (raw_node_raw->flash_offset & 2) as a flag
- to show a node can be copied intact and it's _not_ in icache, we could just do
- it, fix up the next_in_ino list and move on. We would need a way to find out
- _whether_ it's in icache though -- if it's in icache we also need to do the
- fragment lists, etc. P'raps a flag or pointer in the jffs2_inode_cache could
- help. (We have half of this now.)
+ - Split writes so they go to two separate blocks rather than just c->nextblock.
+ By writing _new_ nodes to one block, and garbage-collected REF_PRISTINE
+ nodes to a different one, we can separate clean nodes from those which
+ are likely to become dirty, and end up with blocks which are each far
+ closer to 100% or 0% clean, hence speeding up later GC progress dramatically.
- Stop keeping name in-core with struct jffs2_full_dirent. If we keep the hash in
the full dirent, we only need to go to the flash in lookup() when we think we've
got a match, and in readdir().
@@ -38,3 +33,8 @@ $Id: TODO,v 1.10 2002/09/09 16:31:21 dwmw2 Exp $
- Remove totlen from jffs2_raw_node_ref? Need to have totlen passed into
jffs2_mark_node_obsolete(). Can all callers work it out?
- Remove size from jffs2_raw_node_frag.
+
+dedekind:
+1. __jffs2_flush_wbuf() has a strange 'pad' parameter. Eliminate.
+2. get_sb()->build_fs()->scan() path... Why get_sb() removes scan()'s crap in
+ case of failure? scan() does not clean everything. Fix.
diff --git a/fs/jffs2/background.c b/fs/jffs2/background.c
index 8210ac16a36..7b77a954112 100644
--- a/fs/jffs2/background.c
+++ b/fs/jffs2/background.c
@@ -51,7 +51,7 @@ int jffs2_start_garbage_collect_thread(struct jffs2_sb_info *c)
D1(printk(KERN_DEBUG "JFFS2: Garbage collect thread is pid %d\n", pid));
wait_for_completion(&c->gc_thread_start);
}
-
+
return ret;
}
@@ -101,7 +101,7 @@ static int jffs2_garbage_collect_thread(void *_c)
cond_resched();
- /* Put_super will send a SIGKILL and then wait on the sem.
+ /* Put_super will send a SIGKILL and then wait on the sem.
*/
while (signal_pending(current)) {
siginfo_t info;
diff --git a/fs/jffs2/build.c b/fs/jffs2/build.c
index 97dc39796e2..fff108bb118 100644
--- a/fs/jffs2/build.c
+++ b/fs/jffs2/build.c
@@ -7,7 +7,7 @@
*
* For licensing information, see the file 'LICENCE' in this directory.
*
- * $Id: build.c,v 1.71 2005/07/12 16:37:08 dedekind Exp $
+ * $Id: build.c,v 1.85 2005/11/07 11:14:38 gleixner Exp $
*
*/
@@ -18,7 +18,8 @@
#include <linux/mtd/mtd.h>
#include "nodelist.h"
-static void jffs2_build_remove_unlinked_inode(struct jffs2_sb_info *, struct jffs2_inode_cache *, struct jffs2_full_dirent **);
+static void jffs2_build_remove_unlinked_inode(struct jffs2_sb_info *,
+ struct jffs2_inode_cache *, struct jffs2_full_dirent **);
static inline struct jffs2_inode_cache *
first_inode_chain(int *i, struct jffs2_sb_info *c)
@@ -46,11 +47,12 @@ next_inode(int *i, struct jffs2_inode_cache *ic, struct jffs2_sb_info *c)
ic = next_inode(&i, ic, (c)))
-static inline void jffs2_build_inode_pass1(struct jffs2_sb_info *c, struct jffs2_inode_cache *ic)
+static inline void jffs2_build_inode_pass1(struct jffs2_sb_info *c,
+ struct jffs2_inode_cache *ic)
{
struct jffs2_full_dirent *fd;
- D1(printk(KERN_DEBUG "jffs2_build_inode building directory inode #%u\n", ic->ino));
+ dbg_fsbuild("building directory inode #%u\n", ic->ino);
/* For each child, increase nlink */
for(fd = ic->scan_dents; fd; fd = fd->next) {
@@ -58,26 +60,23 @@ static inline void jffs2_build_inode_pass1(struct jffs2_sb_info *c, struct jffs2
if (!fd->ino)
continue;
- /* XXX: Can get high latency here with huge directories */
+ /* we can get high latency here with huge directories */
child_ic = jffs2_get_ino_cache(c, fd->ino);
if (!child_ic) {
- printk(KERN_NOTICE "Eep. Child \"%s\" (ino #%u) of dir ino #%u doesn't exist!\n",
+ dbg_fsbuild("child \"%s\" (ino #%u) of dir ino #%u doesn't exist!\n",
fd->name, fd->ino, ic->ino);
jffs2_mark_node_obsolete(c, fd->raw);
continue;
}
if (child_ic->nlink++ && fd->type == DT_DIR) {
- printk(KERN_NOTICE "Child dir \"%s\" (ino #%u) of dir ino #%u appears to be a hard link\n", fd->name, fd->ino, ic->ino);
- if (fd->ino == 1 && ic->ino == 1) {
- printk(KERN_NOTICE "This is mostly harmless, and probably caused by creating a JFFS2 image\n");
- printk(KERN_NOTICE "using a buggy version of mkfs.jffs2. Use at least v1.17.\n");
- }
- /* What do we do about it? */
+ JFFS2_ERROR("child dir \"%s\" (ino #%u) of dir ino #%u appears to be a hard link\n",
+ fd->name, fd->ino, ic->ino);
+ /* TODO: What do we do about it? */
}
- D1(printk(KERN_DEBUG "Increased nlink for child \"%s\" (ino #%u)\n", fd->name, fd->ino));
- /* Can't free them. We might need them in pass 2 */
+ dbg_fsbuild("increased nlink for child \"%s\" (ino #%u)\n", fd->name, fd->ino);
+ /* Can't free scan_dents so far. We might need them in pass 2 */
}
}
@@ -94,6 +93,8 @@ static int jffs2_build_filesystem(struct jffs2_sb_info *c)
struct jffs2_full_dirent *fd;
struct jffs2_full_dirent *dead_fds = NULL;
+ dbg_fsbuild("build FS data structures\n");
+
/* First, scan the medium and build all the inode caches with
lists of physical nodes */
@@ -103,60 +104,54 @@ static int jffs2_build_filesystem(struct jffs2_sb_info *c)
if (ret)
goto exit;
- D1(printk(KERN_DEBUG "Scanned flash completely\n"));
- D2(jffs2_dump_block_lists(c));
+ dbg_fsbuild("scanned flash completely\n");
+ jffs2_dbg_dump_block_lists_nolock(c);
+ dbg_fsbuild("pass 1 starting\n");
c->flags |= JFFS2_SB_FLAG_BUILDING;
/* Now scan the directory tree, increasing nlink according to every dirent found. */
for_each_inode(i, c, ic) {
- D1(printk(KERN_DEBUG "Pass 1: ino #%u\n", ic->ino));
-
- D1(BUG_ON(ic->ino > c->highest_ino));
-
if (ic->scan_dents) {
jffs2_build_inode_pass1(c, ic);
cond_resched();
}
}
- D1(printk(KERN_DEBUG "Pass 1 complete\n"));
+ dbg_fsbuild("pass 1 complete\n");
/* Next, scan for inodes with nlink == 0 and remove them. If
they were directories, then decrement the nlink of their
children too, and repeat the scan. As that's going to be
a fairly uncommon occurrence, it's not so evil to do it this
way. Recursion bad. */
- D1(printk(KERN_DEBUG "Pass 2 starting\n"));
+ dbg_fsbuild("pass 2 starting\n");
for_each_inode(i, c, ic) {
- D1(printk(KERN_DEBUG "Pass 2: ino #%u, nlink %d, ic %p, nodes %p\n", ic->ino, ic->nlink, ic, ic->nodes));
if (ic->nlink)
continue;
-
+
jffs2_build_remove_unlinked_inode(c, ic, &dead_fds);
cond_resched();
- }
+ }
- D1(printk(KERN_DEBUG "Pass 2a starting\n"));
+ dbg_fsbuild("pass 2a starting\n");
while (dead_fds) {
fd = dead_fds;
dead_fds = fd->next;
ic = jffs2_get_ino_cache(c, fd->ino);
- D1(printk(KERN_DEBUG "Removing dead_fd ino #%u (\"%s\"), ic at %p\n", fd->ino, fd->name, ic));
if (ic)
jffs2_build_remove_unlinked_inode(c, ic, &dead_fds);
jffs2_free_full_dirent(fd);
}
- D1(printk(KERN_DEBUG "Pass 2 complete\n"));
-
+ dbg_fsbuild("pass 2a complete\n");
+ dbg_fsbuild("freeing temporary data structures\n");
+
/* Finally, we can scan again and free the dirent structs */
for_each_inode(i, c, ic) {
- D1(printk(KERN_DEBUG "Pass 3: ino #%u, ic %p, nodes %p\n", ic->ino, ic, ic->nodes));
-
while(ic->scan_dents) {
fd = ic->scan_dents;
ic->scan_dents = fd->next;
@@ -166,9 +161,8 @@ static int jffs2_build_filesystem(struct jffs2_sb_info *c)
cond_resched();
}
c->flags &= ~JFFS2_SB_FLAG_BUILDING;
-
- D1(printk(KERN_DEBUG "Pass 3 complete\n"));
- D2(jffs2_dump_block_lists(c));
+
+ dbg_fsbuild("FS build complete\n");
/* Rotate the lists by some number to ensure wear levelling */
jffs2_rotate_lists(c);
@@ -189,24 +183,26 @@ exit:
return ret;
}
-static void jffs2_build_remove_unlinked_inode(struct jffs2_sb_info *c, struct jffs2_inode_cache *ic, struct jffs2_full_dirent **dead_fds)
+static void jffs2_build_remove_unlinked_inode(struct jffs2_sb_info *c,
+ struct jffs2_inode_cache *ic,
+ struct jffs2_full_dirent **dead_fds)
{
struct jffs2_raw_node_ref *raw;
struct jffs2_full_dirent *fd;
- D1(printk(KERN_DEBUG "JFFS2: Removing ino #%u with nlink == zero.\n", ic->ino));
-
+ dbg_fsbuild("removing ino #%u with nlink == zero.\n", ic->ino);
+
raw = ic->nodes;
while (raw != (void *)ic) {
struct jffs2_raw_node_ref *next = raw->next_in_ino;
- D1(printk(KERN_DEBUG "obsoleting node at 0x%08x\n", ref_offset(raw)));
+ dbg_fsbuild("obsoleting node at 0x%08x\n", ref_offset(raw));
jffs2_mark_node_obsolete(c, raw);
raw = next;
}
if (ic->scan_dents) {
int whinged = 0;
- D1(printk(KERN_DEBUG "Inode #%u was a directory which may have children...\n", ic->ino));
+ dbg_fsbuild("inode #%u was a directory which may have children...\n", ic->ino);
while(ic->scan_dents) {
struct jffs2_inode_cache *child_ic;
@@ -216,45 +212,43 @@ static void jffs2_build_remove_unlinked_inode(struct jffs2_sb_info *c, struct jf
if (!fd->ino) {
/* It's a deletion dirent. Ignore it */
- D1(printk(KERN_DEBUG "Child \"%s\" is a deletion dirent, skipping...\n", fd->name));
+ dbg_fsbuild("child \"%s\" is a deletion dirent, skipping...\n", fd->name);
jffs2_free_full_dirent(fd);
continue;
}
- if (!whinged) {
+ if (!whinged)
whinged = 1;
- printk(KERN_NOTICE "Inode #%u was a directory with children - removing those too...\n", ic->ino);
- }
- D1(printk(KERN_DEBUG "Removing child \"%s\", ino #%u\n",
- fd->name, fd->ino));
-
+ dbg_fsbuild("removing child \"%s\", ino #%u\n", fd->name, fd->ino);
+
child_ic = jffs2_get_ino_cache(c, fd->ino);
if (!child_ic) {
- printk(KERN_NOTICE "Cannot remove child \"%s\", ino #%u, because it doesn't exist\n", fd->name, fd->ino);
+ dbg_fsbuild("cannot remove child \"%s\", ino #%u, because it doesn't exist\n",
+ fd->name, fd->ino);
jffs2_free_full_dirent(fd);
continue;
}
- /* Reduce nlink of the child. If it's now zero, stick it on the
+ /* Reduce nlink of the child. If it's now zero, stick it on the
dead_fds list to be cleaned up later. Else just free the fd */
child_ic->nlink--;
-
+
if (!child_ic->nlink) {
- D1(printk(KERN_DEBUG "Inode #%u (\"%s\") has now got zero nlink. Adding to dead_fds list.\n",
- fd->ino, fd->name));
+ dbg_fsbuild("inode #%u (\"%s\") has now got zero nlink, adding to dead_fds list.\n",
+ fd->ino, fd->name);
fd->next = *dead_fds;
*dead_fds = fd;
} else {
- D1(printk(KERN_DEBUG "Inode #%u (\"%s\") has now got nlink %d. Ignoring.\n",
- fd->ino, fd->name, child_ic->nlink));
+ dbg_fsbuild("inode #%u (\"%s\") has now got nlink %d. Ignoring.\n",
+ fd->ino, fd->name, child_ic->nlink);
jffs2_free_full_dirent(fd);
}
}
}
/*
- We don't delete the inocache from the hash list and free it yet.
+ We don't delete the inocache from the hash list and free it yet.
The erase code will do that, when all the nodes are completely gone.
*/
}
@@ -268,7 +262,7 @@ static void jffs2_calc_trigger_levels(struct jffs2_sb_info *c)
because there's not enough free space... */
c->resv_blocks_deletion = 2;
- /* Be conservative about how much space we need before we allow writes.
+ /* Be conservative about how much space we need before we allow writes.
On top of that which is required for deletia, require an extra 2%
of the medium to be available, for overhead caused by nodes being
split across blocks, etc. */
@@ -283,7 +277,7 @@ static void jffs2_calc_trigger_levels(struct jffs2_sb_info *c)
c->resv_blocks_gctrigger = c->resv_blocks_write + 1;
- /* When do we allow garbage collection to merge nodes to make
+ /* When do we allow garbage collection to merge nodes to make
long-term progress at the expense of short-term space exhaustion? */
c->resv_blocks_gcmerge = c->resv_blocks_deletion + 1;
@@ -295,45 +289,45 @@ static void jffs2_calc_trigger_levels(struct jffs2_sb_info *c)
trying to GC to make more space. It'll be a fruitless task */
c->nospc_dirty_size = c->sector_size + (c->flash_size / 100);
- D1(printk(KERN_DEBUG "JFFS2 trigger levels (size %d KiB, block size %d KiB, %d blocks)\n",
- c->flash_size / 1024, c->sector_size / 1024, c->nr_blocks));
- D1(printk(KERN_DEBUG "Blocks required to allow deletion: %d (%d KiB)\n",
- c->resv_blocks_deletion, c->resv_blocks_deletion*c->sector_size/1024));
- D1(printk(KERN_DEBUG "Blocks required to allow writes: %d (%d KiB)\n",
- c->resv_blocks_write, c->resv_blocks_write*c->sector_size/1024));
- D1(printk(KERN_DEBUG "Blocks required to quiesce GC thread: %d (%d KiB)\n",
- c->resv_blocks_gctrigger, c->resv_blocks_gctrigger*c->sector_size/1024));
- D1(printk(KERN_DEBUG "Blocks required to allow GC merges: %d (%d KiB)\n",
- c->resv_blocks_gcmerge, c->resv_blocks_gcmerge*c->sector_size/1024));
- D1(printk(KERN_DEBUG "Blocks required to GC bad blocks: %d (%d KiB)\n",
- c->resv_blocks_gcbad, c->resv_blocks_gcbad*c->sector_size/1024));
- D1(printk(KERN_DEBUG "Amount of dirty space required to GC: %d bytes\n",
- c->nospc_dirty_size));
-}
+ dbg_fsbuild("JFFS2 trigger levels (size %d KiB, block size %d KiB, %d blocks)\n",
+ c->flash_size / 1024, c->sector_size / 1024, c->nr_blocks);
+ dbg_fsbuild("Blocks required to allow deletion: %d (%d KiB)\n",
+ c->resv_blocks_deletion, c->resv_blocks_deletion*c->sector_size/1024);
+ dbg_fsbuild("Blocks required to allow writes: %d (%d KiB)\n",
+ c->resv_blocks_write, c->resv_blocks_write*c->sector_size/1024);
+ dbg_fsbuild("Blocks required to quiesce GC thread: %d (%d KiB)\n",
+ c->resv_blocks_gctrigger, c->resv_blocks_gctrigger*c->sector_size/1024);
+ dbg_fsbuild("Blocks required to allow GC merges: %d (%d KiB)\n",
+ c->resv_blocks_gcmerge, c->resv_blocks_gcmerge*c->sector_size/1024);
+ dbg_fsbuild("Blocks required to GC bad blocks: %d (%d KiB)\n",
+ c->resv_blocks_gcbad, c->resv_blocks_gcbad*c->sector_size/1024);
+ dbg_fsbuild("Amount of dirty space required to GC: %d bytes\n",
+ c->nospc_dirty_size);
+}
int jffs2_do_mount_fs(struct jffs2_sb_info *c)
{
+ int ret;
int i;
+ int size;
c->free_size = c->flash_size;
c->nr_blocks = c->flash_size / c->sector_size;
- if (c->mtd->flags & MTD_NO_VIRTBLOCKS)
- c->blocks = vmalloc(sizeof(struct jffs2_eraseblock) * c->nr_blocks);
+ size = sizeof(struct jffs2_eraseblock) * c->nr_blocks;
+#ifndef __ECOS
+ if (jffs2_blocks_use_vmalloc(c))
+ c->blocks = vmalloc(size);
else
- c->blocks = kmalloc(sizeof(struct jffs2_eraseblock) * c->nr_blocks, GFP_KERNEL);
+#endif
+ c->blocks = kmalloc(size, GFP_KERNEL);
if (!c->blocks)
return -ENOMEM;
+
+ memset(c->blocks, 0, size);
for (i=0; i<c->nr_blocks; i++) {
INIT_LIST_HEAD(&c->blocks[i].list);
c->blocks[i].offset = i * c->sector_size;
c->blocks[i].free_size = c->sector_size;
- c->blocks[i].dirty_size = 0;
- c->blocks[i].wasted_size = 0;
- c->blocks[i].unchecked_size = 0;
- c->blocks[i].used_size = 0;
- c->blocks[i].first_node = NULL;
- c->blocks[i].last_node = NULL;
- c->blocks[i].bad_count = 0;
}
INIT_LIST_HEAD(&c->clean_list);
@@ -348,16 +342,23 @@ int jffs2_do_mount_fs(struct jffs2_sb_info *c)
INIT_LIST_HEAD(&c->bad_list);
INIT_LIST_HEAD(&c->bad_used_list);
c->highest_ino = 1;
+ c->summary = NULL;
+
+ ret = jffs2_sum_init(c);
+ if (ret)
+ return ret;
if (jffs2_build_filesystem(c)) {
- D1(printk(KERN_DEBUG "build_fs failed\n"));
+ dbg_fsbuild("build_fs failed\n");
jffs2_free_ino_caches(c);
jffs2_free_raw_node_refs(c);
- if (c->mtd->flags & MTD_NO_VIRTBLOCKS) {
+#ifndef __ECOS
+ if (jffs2_blocks_use_vmalloc(c))
vfree(c->blocks);
- } else {
+ else
+#endif
kfree(c->blocks);
- }
+
return -EIO;
}
diff --git a/fs/jffs2/compr.c b/fs/jffs2/compr.c
index af922a9618a..e7944e665b9 100644
--- a/fs/jffs2/compr.c
+++ b/fs/jffs2/compr.c
@@ -9,7 +9,7 @@
*
* For licensing information, see the file 'LICENCE' in this directory.
*
- * $Id: compr.c,v 1.42 2004/08/07 21:56:08 dwmw2 Exp $
+ * $Id: compr.c,v 1.46 2005/11/07 11:14:38 gleixner Exp $
*
*/
@@ -36,16 +36,16 @@ static uint32_t none_stat_compr_blocks=0,none_stat_decompr_blocks=0,none_stat_co
* data.
*
* Returns: Lower byte to be stored with data indicating compression type used.
- * Zero is used to show that the data could not be compressed - the
+ * Zero is used to show that the data could not be compressed - the
* compressed version was actually larger than the original.
* Upper byte will be used later. (soon)
*
* If the cdata buffer isn't large enough to hold all the uncompressed data,
- * jffs2_compress should compress as much as will fit, and should set
+ * jffs2_compress should compress as much as will fit, and should set
* *datalen accordingly to show the amount of data which were compressed.
*/
uint16_t jffs2_compress(struct jffs2_sb_info *c, struct jffs2_inode_info *f,
- unsigned char *data_in, unsigned char **cpage_out,
+ unsigned char *data_in, unsigned char **cpage_out,
uint32_t *datalen, uint32_t *cdatalen)
{
int ret = JFFS2_COMPR_NONE;
@@ -164,7 +164,7 @@ uint16_t jffs2_compress(struct jffs2_sb_info *c, struct jffs2_inode_info *f,
}
int jffs2_decompress(struct jffs2_sb_info *c, struct jffs2_inode_info *f,
- uint16_t comprtype, unsigned char *cdata_in,
+ uint16_t comprtype, unsigned char *cdata_in,
unsigned char *data_out, uint32_t cdatalen, uint32_t datalen)
{
struct jffs2_compressor *this;
@@ -298,7 +298,7 @@ char *jffs2_stats(void)
act_buf += sprintf(act_buf,"JFFS2 compressor statistics:\n");
act_buf += sprintf(act_buf,"%10s ","none");
- act_buf += sprintf(act_buf,"compr: %d blocks (%d) decompr: %d blocks\n", none_stat_compr_blocks,
+ act_buf += sprintf(act_buf,"compr: %d blocks (%d) decompr: %d blocks\n", none_stat_compr_blocks,
none_stat_compr_size, none_stat_decompr_blocks);
spin_lock(&jffs2_compressor_list_lock);
list_for_each_entry(this, &jffs2_compressor_list, list) {
@@ -307,8 +307,8 @@ char *jffs2_stats(void)
act_buf += sprintf(act_buf,"- ");
else
act_buf += sprintf(act_buf,"+ ");
- act_buf += sprintf(act_buf,"compr: %d blocks (%d/%d) decompr: %d blocks ", this->stat_compr_blocks,
- this->stat_compr_new_size, this->stat_compr_orig_size,
+ act_buf += sprintf(act_buf,"compr: %d blocks (%d/%d) decompr: %d blocks ", this->stat_compr_blocks,
+ this->stat_compr_new_size, this->stat_compr_orig_size,
this->stat_decompr_blocks);
act_buf += sprintf(act_buf,"\n");
}
@@ -317,7 +317,7 @@ char *jffs2_stats(void)
return buf;
}
-char *jffs2_get_compression_mode_name(void)
+char *jffs2_get_compression_mode_name(void)
{
switch (jffs2_compression_mode) {
case JFFS2_COMPR_MODE_NONE:
@@ -330,7 +330,7 @@ char *jffs2_get_compression_mode_name(void)
return "unkown";
}
-int jffs2_set_compression_mode_name(const char *name)
+int jffs2_set_compression_mode_name(const char *name)
{
if (!strcmp("none",name)) {
jffs2_compression_mode = JFFS2_COMPR_MODE_NONE;
@@ -355,7 +355,7 @@ static int jffs2_compressor_Xable(const char *name, int disabled)
if (!strcmp(this->name, name)) {
this->disabled = disabled;
spin_unlock(&jffs2_compressor_list_lock);
- return 0;
+ return 0;
}
}
spin_unlock(&jffs2_compressor_list_lock);
@@ -385,7 +385,7 @@ int jffs2_set_compressor_priority(const char *name, int priority)
}
}
spin_unlock(&jffs2_compressor_list_lock);
- printk(KERN_WARNING "JFFS2: compressor %s not found.\n",name);
+ printk(KERN_WARNING "JFFS2: compressor %s not found.\n",name);
return 1;
reinsert:
/* list is sorted in the order of priority, so if
@@ -412,7 +412,7 @@ void jffs2_free_comprbuf(unsigned char *comprbuf, unsigned char *orig)
kfree(comprbuf);
}
-int jffs2_compressors_init(void)
+int jffs2_compressors_init(void)
{
/* Registering compressors */
#ifdef CONFIG_JFFS2_ZLIB
@@ -425,12 +425,6 @@ int jffs2_compressors_init(void)
jffs2_rubinmips_init();
jffs2_dynrubin_init();
#endif
-#ifdef CONFIG_JFFS2_LZARI
- jffs2_lzari_init();
-#endif
-#ifdef CONFIG_JFFS2_LZO
- jffs2_lzo_init();
-#endif
/* Setting default compression mode */
#ifdef CONFIG_JFFS2_CMODE_NONE
jffs2_compression_mode = JFFS2_COMPR_MODE_NONE;
@@ -446,15 +440,9 @@ int jffs2_compressors_init(void)
return 0;
}
-int jffs2_compressors_exit(void)
+int jffs2_compressors_exit(void)
{
/* Unregistering compressors */
-#ifdef CONFIG_JFFS2_LZO
- jffs2_lzo_exit();
-#endif
-#ifdef CONFIG_JFFS2_LZARI
- jffs2_lzari_exit();
-#endif
#ifdef CONFIG_JFFS2_RUBIN
jffs2_dynrubin_exit();
jffs2_rubinmips_exit();
diff --git a/fs/jffs2/compr.h b/fs/jffs2/compr.h
index 89ceeed201e..a77e830d85c 100644
--- a/fs/jffs2/compr.h
+++ b/fs/jffs2/compr.h
@@ -4,10 +4,10 @@
* Copyright (C) 2004 Ferenc Havasi <havasi@inf.u-szeged.hu>,
* University of Szeged, Hungary
*
- * For licensing information, see the file 'LICENCE' in the
+ * For licensing information, see the file 'LICENCE' in the
* jffs2 directory.
*
- * $Id: compr.h,v 1.6 2004/07/16 15:17:57 dwmw2 Exp $
+ * $Id: compr.h,v 1.9 2005/11/07 11:14:38 gleixner Exp $
*
*/
@@ -103,13 +103,5 @@ void jffs2_rtime_exit(void);
int jffs2_zlib_init(void);
void jffs2_zlib_exit(void);
#endif
-#ifdef CONFIG_JFFS2_LZARI
-int jffs2_lzari_init(void);
-void jffs2_lzari_exit(void);
-#endif
-#ifdef CONFIG_JFFS2_LZO
-int jffs2_lzo_init(void);
-void jffs2_lzo_exit(void);
-#endif
#endif /* __JFFS2_COMPR_H__ */
diff --git a/fs/jffs2/compr_rtime.c b/fs/jffs2/compr_rtime.c
index 39312941866..2eb1b7428d1 100644
--- a/fs/jffs2/compr_rtime.c
+++ b/fs/jffs2/compr_rtime.c
@@ -24,8 +24,8 @@
#include <linux/kernel.h>
#include <linux/types.h>
#include <linux/errno.h>
-#include <linux/string.h>
-#include <linux/jffs2.h>
+#include <linux/string.h>
+#include <linux/jffs2.h>
#include "compr.h"
/* _compress returns the compressed size, -1 if bigger */
@@ -38,19 +38,19 @@ static int jffs2_rtime_compress(unsigned char *data_in,
int outpos = 0;
int pos=0;
- memset(positions,0,sizeof(positions));
-
+ memset(positions,0,sizeof(positions));
+
while (pos < (*sourcelen) && outpos <= (*dstlen)-2) {
int backpos, runlen=0;
unsigned char value;
-
+
value = data_in[pos];
cpage_out[outpos++] = data_in[pos++];
-
+
backpos = positions[value];
positions[value]=pos;
-
+
while ((backpos < pos) && (pos < (*sourcelen)) &&
(data_in[pos]==data_in[backpos++]) && (runlen<255)) {
pos++;
@@ -63,12 +63,12 @@ static int jffs2_rtime_compress(unsigned char *data_in,
/* We failed */
return -1;
}
-
+
/* Tell the caller how much we managed to compress, and how much space it took */
*sourcelen = pos;
*dstlen = outpos;
return 0;
-}
+}
static int jffs2_rtime_decompress(unsigned char *data_in,
@@ -79,19 +79,19 @@ static int jffs2_rtime_decompress(unsigned char *data_in,
short positions[256];
int outpos = 0;
int pos=0;
-
- memset(positions,0,sizeof(positions));
-
+
+ memset(positions,0,sizeof(positions));
+
while (outpos<destlen) {
unsigned char value;
int backoffs;
int repeat;
-
+
value = data_in[pos++];
cpage_out[outpos++] = value; /* first the verbatim copied byte */
repeat = data_in[pos++];
backoffs = positions[value];
-
+
positions[value]=outpos;
if (repeat) {
if (backoffs + repeat >= outpos) {
@@ -101,12 +101,12 @@ static int jffs2_rtime_decompress(unsigned char *data_in,
}
} else {
memcpy(&cpage_out[outpos],&cpage_out[backoffs],repeat);
- outpos+=repeat;
+ outpos+=repeat;
}
}
}
return 0;
-}
+}
static struct jffs2_compressor jffs2_rtime_comp = {
.priority = JFFS2_RTIME_PRIORITY,
diff --git a/fs/jffs2/compr_rubin.c b/fs/jffs2/compr_rubin.c
index 09422388fb9..e792e675d62 100644
--- a/fs/jffs2/compr_rubin.c
+++ b/fs/jffs2/compr_rubin.c
@@ -11,7 +11,6 @@
*
*/
-
#include <linux/string.h>
#include <linux/types.h>
#include <linux/jffs2.h>
@@ -20,7 +19,7 @@
#include "compr.h"
static void init_rubin(struct rubin_state *rs, int div, int *bits)
-{
+{
int c;
rs->q = 0;
@@ -40,7 +39,7 @@ static int encode(struct rubin_state *rs, long A, long B, int symbol)
while ((rs->q >= UPPER_BIT_RUBIN) || ((rs->p + rs->q) <= UPPER_BIT_RUBIN)) {
rs->bit_number++;
-
+
ret = pushbit(&rs->pp, (rs->q & UPPER_BIT_RUBIN) ? 1 : 0, 0);
if (ret)
return ret;
@@ -68,7 +67,7 @@ static int encode(struct rubin_state *rs, long A, long B, int symbol)
static void end_rubin(struct rubin_state *rs)
-{
+{
int i;
@@ -82,7 +81,7 @@ static void end_rubin(struct rubin_state *rs)
static void init_decode(struct rubin_state *rs, int div, int *bits)
{
- init_rubin(rs, div, bits);
+ init_rubin(rs, div, bits);
/* behalve lower */
rs->rec_q = 0;
@@ -188,7 +187,7 @@ static int in_byte(struct rubin_state *rs)
-static int rubin_do_compress(int bit_divider, int *bits, unsigned char *data_in,
+static int rubin_do_compress(int bit_divider, int *bits, unsigned char *data_in,
unsigned char *cpage_out, uint32_t *sourcelen, uint32_t *dstlen)
{
int outpos = 0;
@@ -198,31 +197,31 @@ static int rubin_do_compress(int bit_divider, int *bits, unsigned char *data_in,
init_pushpull(&rs.pp, cpage_out, *dstlen * 8, 0, 32);
init_rubin(&rs, bit_divider, bits);
-
+
while (pos < (*sourcelen) && !out_byte(&rs, data_in[pos]))
pos++;
-
+
end_rubin(&rs);
if (outpos > pos) {
/* We failed */
return -1;
}
-
- /* Tell the caller how much we managed to compress,
+
+ /* Tell the caller how much we managed to compress,
* and how much space it took */
-
+
outpos = (pushedbits(&rs.pp)+7)/8;
-
+
if (outpos >= pos)
return -1; /* We didn't actually compress */
*sourcelen = pos;
*dstlen = outpos;
return 0;
-}
+}
#if 0
/* _compress returns the compressed size, -1 if bigger */
-int jffs2_rubinmips_compress(unsigned char *data_in, unsigned char *cpage_out,
+int jffs2_rubinmips_compress(unsigned char *data_in, unsigned char *cpage_out,
uint32_t *sourcelen, uint32_t *dstlen, void *model)
{
return rubin_do_compress(BIT_DIVIDER_MIPS, bits_mips, data_in, cpage_out, sourcelen, dstlen);
@@ -277,7 +276,7 @@ static int jffs2_dynrubin_compress(unsigned char *data_in,
}
ret = rubin_do_compress(256, bits, data_in, cpage_out+8, &mysrclen, &mydstlen);
- if (ret)
+ if (ret)
return ret;
/* Add back the 8 bytes we took for the probabilities */
@@ -293,19 +292,19 @@ static int jffs2_dynrubin_compress(unsigned char *data_in,
return 0;
}
-static void rubin_do_decompress(int bit_divider, int *bits, unsigned char *cdata_in,
+static void rubin_do_decompress(int bit_divider, int *bits, unsigned char *cdata_in,
unsigned char *page_out, uint32_t srclen, uint32_t destlen)
{
int outpos = 0;
struct rubin_state rs;
-
+
init_pushpull(&rs.pp, cdata_in, srclen, 0, 0);
init_decode(&rs, bit_divider, bits);
-
+
while (outpos < destlen) {
page_out[outpos++] = in_byte(&rs);
}
-}
+}
static int jffs2_rubinmips_decompress(unsigned char *data_in,
diff --git a/fs/jffs2/compr_rubin.h b/fs/jffs2/compr_rubin.h
index cf51e34f657..bf1a9345162 100644
--- a/fs/jffs2/compr_rubin.h
+++ b/fs/jffs2/compr_rubin.h
@@ -1,7 +1,7 @@
/* Rubin encoder/decoder header */
/* work started at : aug 3, 1994 */
/* last modification : aug 15, 1994 */
-/* $Id: compr_rubin.h,v 1.6 2002/01/25 01:49:26 dwmw2 Exp $ */
+/* $Id: compr_rubin.h,v 1.7 2005/11/07 11:14:38 gleixner Exp $ */
#include "pushpull.h"
@@ -11,8 +11,8 @@
struct rubin_state {
- unsigned long p;
- unsigned long q;
+ unsigned long p;
+ unsigned long q;
unsigned long rec_q;
long bit_number;
struct pushpull pp;
diff --git a/fs/jffs2/compr_zlib.c b/fs/jffs2/compr_zlib.c
index 83f7e0788fd..4db8be8e90c 100644
--- a/fs/jffs2/compr_zlib.c
+++ b/fs/jffs2/compr_zlib.c
@@ -7,7 +7,7 @@
*
* For licensing information, see the file 'LICENCE' in this directory.
*
- * $Id: compr_zlib.c,v 1.31 2005/05/20 19:30:06 gleixner Exp $
+ * $Id: compr_zlib.c,v 1.32 2005/11/07 11:14:38 gleixner Exp $
*
*/
@@ -24,11 +24,11 @@
#include "nodelist.h"
#include "compr.h"
- /* Plan: call deflate() with avail_in == *sourcelen,
- avail_out = *dstlen - 12 and flush == Z_FINISH.
+ /* Plan: call deflate() with avail_in == *sourcelen,
+ avail_out = *dstlen - 12 and flush == Z_FINISH.
If it doesn't manage to finish, call it again with
avail_in == 0 and avail_out set to the remaining 12
- bytes for it to clean up.
+ bytes for it to clean up.
Q: Is 12 bytes sufficient?
*/
#define STREAM_END_SPACE 12
@@ -89,7 +89,7 @@ static int jffs2_zlib_compress(unsigned char *data_in,
def_strm.next_in = data_in;
def_strm.total_in = 0;
-
+
def_strm.next_out = cpage_out;
def_strm.total_out = 0;
@@ -99,7 +99,7 @@ static int jffs2_zlib_compress(unsigned char *data_in,
D1(printk(KERN_DEBUG "calling deflate with avail_in %d, avail_out %d\n",
def_strm.avail_in, def_strm.avail_out));
ret = zlib_deflate(&def_strm, Z_PARTIAL_FLUSH);
- D1(printk(KERN_DEBUG "deflate returned with avail_in %d, avail_out %d, total_in %ld, total_out %ld\n",
+ D1(printk(KERN_DEBUG "deflate returned with avail_in %d, avail_out %d, total_in %ld, total_out %ld\n",
def_strm.avail_in, def_strm.avail_out, def_strm.total_in, def_strm.total_out));
if (ret != Z_OK) {
D1(printk(KERN_DEBUG "deflate in loop returned %d\n", ret));
@@ -150,7 +150,7 @@ static int jffs2_zlib_decompress(unsigned char *data_in,
inf_strm.next_in = data_in;
inf_strm.avail_in = srclen;
inf_strm.total_in = 0;
-
+
inf_strm.next_out = cpage_out;
inf_strm.avail_out = destlen;
inf_strm.total_out = 0;
diff --git a/fs/jffs2/comprtest.c b/fs/jffs2/comprtest.c
index cf51f091d0e..f0fb8be7740 100644
--- a/fs/jffs2/comprtest.c
+++ b/fs/jffs2/comprtest.c
@@ -1,4 +1,4 @@
-/* $Id: comprtest.c,v 1.5 2002/01/03 15:20:44 dwmw2 Exp $ */
+/* $Id: comprtest.c,v 1.6 2005/11/07 11:14:38 gleixner Exp $ */
#include <linux/kernel.h>
#include <linux/string.h>
@@ -265,9 +265,9 @@ static unsigned char testdata[TESTDATA_LEN] = {
static unsigned char comprbuf[TESTDATA_LEN];
static unsigned char decomprbuf[TESTDATA_LEN];
-int jffs2_decompress(unsigned char comprtype, unsigned char *cdata_in,
+int jffs2_decompress(unsigned char comprtype, unsigned char *cdata_in,
unsigned char *data_out, uint32_t cdatalen, uint32_t datalen);
-unsigned char jffs2_compress(unsigned char *data_in, unsigned char *cpage_out,
+unsigned char jffs2_compress(unsigned char *data_in, unsigned char *cpage_out,
uint32_t *datalen, uint32_t *cdatalen);
int init_module(void ) {
@@ -276,10 +276,10 @@ int init_module(void ) {
int ret;
printk("Original data: %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x\n",
- testdata[0],testdata[1],testdata[2],testdata[3],
- testdata[4],testdata[5],testdata[6],testdata[7],
- testdata[8],testdata[9],testdata[10],testdata[11],
- testdata[12],testdata[13],testdata[14],testdata[15]);
+ testdata[0],testdata[1],testdata[2],testdata[3],
+ testdata[4],testdata[5],testdata[6],testdata[7],
+ testdata[8],testdata[9],testdata[10],testdata[11],
+ testdata[12],testdata[13],testdata[14],testdata[15]);
d = TESTDATA_LEN;
c = TESTDATA_LEN;
comprtype = jffs2_compress(testdata, comprbuf, &d, &c);
@@ -287,18 +287,18 @@ int init_module(void ) {
printk("jffs2_compress used compression type %d. Compressed size %d, uncompressed size %d\n",
comprtype, c, d);
printk("Compressed data: %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x\n",
- comprbuf[0],comprbuf[1],comprbuf[2],comprbuf[3],
- comprbuf[4],comprbuf[5],comprbuf[6],comprbuf[7],
- comprbuf[8],comprbuf[9],comprbuf[10],comprbuf[11],
- comprbuf[12],comprbuf[13],comprbuf[14],comprbuf[15]);
+ comprbuf[0],comprbuf[1],comprbuf[2],comprbuf[3],
+ comprbuf[4],comprbuf[5],comprbuf[6],comprbuf[7],
+ comprbuf[8],comprbuf[9],comprbuf[10],comprbuf[11],
+ comprbuf[12],comprbuf[13],comprbuf[14],comprbuf[15]);
ret = jffs2_decompress(comprtype, comprbuf, decomprbuf, c, d);
printk("jffs2_decompress returned %d\n", ret);
printk("Decompressed data: %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x\n",
- decomprbuf[0],decomprbuf[1],decomprbuf[2],decomprbuf[3],
- decomprbuf[4],decomprbuf[5],decomprbuf[6],decomprbuf[7],
- decomprbuf[8],decomprbuf[9],decomprbuf[10],decomprbuf[11],
- decomprbuf[12],decomprbuf[13],decomprbuf[14],decomprbuf[15]);
+ decomprbuf[0],decomprbuf[1],decomprbuf[2],decomprbuf[3],
+ decomprbuf[4],decomprbuf[5],decomprbuf[6],decomprbuf[7],
+ decomprbuf[8],decomprbuf[9],decomprbuf[10],decomprbuf[11],
+ decomprbuf[12],decomprbuf[13],decomprbuf[14],decomprbuf[15]);
if (memcmp(decomprbuf, testdata, d))
printk("Compression and decompression corrupted data\n");
else
diff --git a/fs/jffs2/debug.c b/fs/jffs2/debug.c
new file mode 100644
index 00000000000..1fe17de713e
--- /dev/null
+++ b/fs/jffs2/debug.c
@@ -0,0 +1,705 @@
+/*
+ * JFFS2 -- Journalling Flash File System, Version 2.
+ *
+ * Copyright (C) 2001-2003 Red Hat, Inc.
+ *
+ * Created by David Woodhouse <dwmw2@infradead.org>
+ *
+ * For licensing information, see the file 'LICENCE' in this directory.
+ *
+ * $Id: debug.c,v 1.12 2005/11/07 11:14:39 gleixner Exp $
+ *
+ */
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <linux/pagemap.h>
+#include <linux/crc32.h>
+#include <linux/jffs2.h>
+#include <linux/mtd/mtd.h>
+#include "nodelist.h"
+#include "debug.h"
+
+#ifdef JFFS2_DBG_SANITY_CHECKS
+
+void
+__jffs2_dbg_acct_sanity_check_nolock(struct jffs2_sb_info *c,
+ struct jffs2_eraseblock *jeb)
+{
+ if (unlikely(jeb && jeb->used_size + jeb->dirty_size +
+ jeb->free_size + jeb->wasted_size +
+ jeb->unchecked_size != c->sector_size)) {
+ JFFS2_ERROR("eeep, space accounting for block at 0x%08x is screwed.\n", jeb->offset);
+ JFFS2_ERROR("free %#08x + dirty %#08x + used %#08x + wasted %#08x + unchecked %#08x != total %#08x.\n",
+ jeb->free_size, jeb->dirty_size, jeb->used_size,
+ jeb->wasted_size, jeb->unchecked_size, c->sector_size);
+ BUG();
+ }
+
+ if (unlikely(c->used_size + c->dirty_size + c->free_size + c->erasing_size + c->bad_size
+ + c->wasted_size + c->unchecked_size != c->flash_size)) {
+ JFFS2_ERROR("eeep, space accounting superblock info is screwed.\n");
+ JFFS2_ERROR("free %#08x + dirty %#08x + used %#08x + erasing %#08x + bad %#08x + wasted %#08x + unchecked %#08x != total %#08x.\n",
+ c->free_size, c->dirty_size, c->used_size, c->erasing_size, c->bad_size,
+ c->wasted_size, c->unchecked_size, c->flash_size);
+ BUG();
+ }
+}
+
+void
+__jffs2_dbg_acct_sanity_check(struct jffs2_sb_info *c,
+ struct jffs2_eraseblock *jeb)
+{
+ spin_lock(&c->erase_completion_lock);
+ jffs2_dbg_acct_sanity_check_nolock(c, jeb);
+ spin_unlock(&c->erase_completion_lock);
+}
+
+#endif /* JFFS2_DBG_SANITY_CHECKS */
+
+#ifdef JFFS2_DBG_PARANOIA_CHECKS
+/*
+ * Check the fragtree.
+ */
+void
+__jffs2_dbg_fragtree_paranoia_check(struct jffs2_inode_info *f)
+{
+ down(&f->sem);
+ __jffs2_dbg_fragtree_paranoia_check_nolock(f);
+ up(&f->sem);
+}
+
+void
+__jffs2_dbg_fragtree_paranoia_check_nolock(struct jffs2_inode_info *f)
+{
+ struct jffs2_node_frag *frag;
+ int bitched = 0;
+
+ for (frag = frag_first(&f->fragtree); frag; frag = frag_next(frag)) {
+ struct jffs2_full_dnode *fn = frag->node;
+
+ if (!fn || !fn->raw)
+ continue;
+
+ if (ref_flags(fn->raw) == REF_PRISTINE) {
+ if (fn->frags > 1) {
+ JFFS2_ERROR("REF_PRISTINE node at 0x%08x had %d frags. Tell dwmw2.\n",
+ ref_offset(fn->raw), fn->frags);
+ bitched = 1;
+ }
+
+ /* A hole node which isn't multi-page should be garbage-collected
+ and merged anyway, so we just check for the frag size here,
+ rather than mucking around with actually reading the node
+ and checking the compression type, which is the real way
+ to tell a hole node. */
+ if (frag->ofs & (PAGE_CACHE_SIZE-1) && frag_prev(frag)
+ && frag_prev(frag)->size < PAGE_CACHE_SIZE && frag_prev(frag)->node) {
+ JFFS2_ERROR("REF_PRISTINE node at 0x%08x had a previous non-hole frag in the same page. Tell dwmw2.\n",
+ ref_offset(fn->raw));
+ bitched = 1;
+ }
+
+ if ((frag->ofs+frag->size) & (PAGE_CACHE_SIZE-1) && frag_next(frag)
+ && frag_next(frag)->size < PAGE_CACHE_SIZE && frag_next(frag)->node) {
+ JFFS2_ERROR("REF_PRISTINE node at 0x%08x (%08x-%08x) had a following non-hole frag in the same page. Tell dwmw2.\n",
+ ref_offset(fn->raw), frag->ofs, frag->ofs+frag->size);
+ bitched = 1;
+ }
+ }
+ }
+
+ if (bitched) {
+ JFFS2_ERROR("fragtree is corrupted.\n");
+ __jffs2_dbg_dump_fragtree_nolock(f);
+ BUG();
+ }
+}
+
+/*
+ * Check if the flash contains all 0xFF before we start writing.
+ */
+void
+__jffs2_dbg_prewrite_paranoia_check(struct jffs2_sb_info *c,
+ uint32_t ofs, int len)
+{
+ size_t retlen;
+ int ret, i;
+ unsigned char *buf;
+
+ buf = kmalloc(len, GFP_KERNEL);
+ if (!buf)
+ return;
+
+ ret = jffs2_flash_read(c, ofs, len, &retlen, buf);
+ if (ret || (retlen != len)) {
+ JFFS2_WARNING("read %d bytes failed or short. ret %d, retlen %zd.\n",
+ len, ret, retlen);
+ kfree(buf);
+ return;
+ }
+
+ ret = 0;
+ for (i = 0; i < len; i++)
+ if (buf[i] != 0xff)
+ ret = 1;
+
+ if (ret) {
+ JFFS2_ERROR("argh, about to write node to %#08x on flash, but there are data already there. The first corrupted byte is at %#08x offset.\n",
+ ofs, ofs + i);
+ __jffs2_dbg_dump_buffer(buf, len, ofs);
+ kfree(buf);
+ BUG();
+ }
+
+ kfree(buf);
+}
+
+/*
+ * Check the space accounting and node_ref list correctness for the JFFS2 erasable block 'jeb'.
+ */
+void
+__jffs2_dbg_acct_paranoia_check(struct jffs2_sb_info *c,
+ struct jffs2_eraseblock *jeb)
+{
+ spin_lock(&c->erase_completion_lock);
+ __jffs2_dbg_acct_paranoia_check_nolock(c, jeb);
+ spin_unlock(&c->erase_completion_lock);
+}
+
+void
+__jffs2_dbg_acct_paranoia_check_nolock(struct jffs2_sb_info *c,
+ struct jffs2_eraseblock *jeb)
+{
+ uint32_t my_used_size = 0;
+ uint32_t my_unchecked_size = 0;
+ uint32_t my_dirty_size = 0;
+ struct jffs2_raw_node_ref *ref2 = jeb->first_node;
+
+ while (ref2) {
+ uint32_t totlen = ref_totlen(c, jeb, ref2);
+
+ if (ref2->flash_offset < jeb->offset ||
+ ref2->flash_offset > jeb->offset + c->sector_size) {
+ JFFS2_ERROR("node_ref %#08x shouldn't be in block at %#08x.\n",
+ ref_offset(ref2), jeb->offset);
+ goto error;
+
+ }
+ if (ref_flags(ref2) == REF_UNCHECKED)
+ my_unchecked_size += totlen;
+ else if (!ref_obsolete(ref2))
+ my_used_size += totlen;
+ else
+ my_dirty_size += totlen;
+
+ if ((!ref2->next_phys) != (ref2 == jeb->last_node)) {
+ JFFS2_ERROR("node_ref for node at %#08x (mem %p) has next_phys at %#08x (mem %p), last_node is at %#08x (mem %p).\n",
+ ref_offset(ref2), ref2, ref_offset(ref2->next_phys), ref2->next_phys,
+ ref_offset(jeb->last_node), jeb->last_node);
+ goto error;
+ }
+ ref2 = ref2->next_phys;
+ }
+
+ if (my_used_size != jeb->used_size) {
+ JFFS2_ERROR("Calculated used size %#08x != stored used size %#08x.\n",
+ my_used_size, jeb->used_size);
+ goto error;
+ }
+
+ if (my_unchecked_size != jeb->unchecked_size) {
+ JFFS2_ERROR("Calculated unchecked size %#08x != stored unchecked size %#08x.\n",
+ my_unchecked_size, jeb->unchecked_size);
+ goto error;
+ }
+
+#if 0
+ /* This should work when we implement ref->__totlen elemination */
+ if (my_dirty_size != jeb->dirty_size + jeb->wasted_size) {
+ JFFS2_ERROR("Calculated dirty+wasted size %#08x != stored dirty + wasted size %#08x\n",
+ my_dirty_size, jeb->dirty_size + jeb->wasted_size);
+ goto error;
+ }
+
+ if (jeb->free_size == 0
+ && my_used_size + my_unchecked_size + my_dirty_size != c->sector_size) {
+ JFFS2_ERROR("The sum of all nodes in block (%#x) != size of block (%#x)\n",
+ my_used_size + my_unchecked_size + my_dirty_size,
+ c->sector_size);
+ goto error;
+ }
+#endif
+
+ return;
+
+error:
+ __jffs2_dbg_dump_node_refs_nolock(c, jeb);
+ __jffs2_dbg_dump_jeb_nolock(jeb);
+ __jffs2_dbg_dump_block_lists_nolock(c);
+ BUG();
+
+}
+#endif /* JFFS2_DBG_PARANOIA_CHECKS */
+
+#if defined(JFFS2_DBG_DUMPS) || defined(JFFS2_DBG_PARANOIA_CHECKS)
+/*
+ * Dump the node_refs of the 'jeb' JFFS2 eraseblock.
+ */
+void
+__jffs2_dbg_dump_node_refs(struct jffs2_sb_info *c,
+ struct jffs2_eraseblock *jeb)
+{
+ spin_lock(&c->erase_completion_lock);
+ __jffs2_dbg_dump_node_refs_nolock(c, jeb);
+ spin_unlock(&c->erase_completion_lock);
+}
+
+void
+__jffs2_dbg_dump_node_refs_nolock(struct jffs2_sb_info *c,
+ struct jffs2_eraseblock *jeb)
+{
+ struct jffs2_raw_node_ref *ref;
+ int i = 0;
+
+ printk(JFFS2_DBG_MSG_PREFIX " Dump node_refs of the eraseblock %#08x\n", jeb->offset);
+ if (!jeb->first_node) {
+ printk(JFFS2_DBG_MSG_PREFIX " no nodes in the eraseblock %#08x\n", jeb->offset);
+ return;
+ }
+
+ printk(JFFS2_DBG);
+ for (ref = jeb->first_node; ; ref = ref->next_phys) {
+ printk("%#08x(%#x)", ref_offset(ref), ref->__totlen);
+ if (ref->next_phys)
+ printk("->");
+ else
+ break;
+ if (++i == 4) {
+ i = 0;
+ printk("\n" JFFS2_DBG);
+ }
+ }
+ printk("\n");
+}
+
+/*
+ * Dump an eraseblock's space accounting.
+ */
+void
+__jffs2_dbg_dump_jeb(struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb)
+{
+ spin_lock(&c->erase_completion_lock);
+ __jffs2_dbg_dump_jeb_nolock(jeb);
+ spin_unlock(&c->erase_completion_lock);
+}
+
+void
+__jffs2_dbg_dump_jeb_nolock(struct jffs2_eraseblock *jeb)
+{
+ if (!jeb)
+ return;
+
+ printk(JFFS2_DBG_MSG_PREFIX " dump space accounting for the eraseblock at %#08x:\n",
+ jeb->offset);
+
+ printk(JFFS2_DBG "used_size: %#08x\n", jeb->used_size);
+ printk(JFFS2_DBG "dirty_size: %#08x\n", jeb->dirty_size);
+ printk(JFFS2_DBG "wasted_size: %#08x\n", jeb->wasted_size);
+ printk(JFFS2_DBG "unchecked_size: %#08x\n", jeb->unchecked_size);
+ printk(JFFS2_DBG "free_size: %#08x\n", jeb->free_size);
+}
+
+void
+__jffs2_dbg_dump_block_lists(struct jffs2_sb_info *c)
+{
+ spin_lock(&c->erase_completion_lock);
+ __jffs2_dbg_dump_block_lists_nolock(c);
+ spin_unlock(&c->erase_completion_lock);
+}
+
+void
+__jffs2_dbg_dump_block_lists_nolock(struct jffs2_sb_info *c)
+{
+ printk(JFFS2_DBG_MSG_PREFIX " dump JFFS2 blocks lists:\n");
+
+ printk(JFFS2_DBG "flash_size: %#08x\n", c->flash_size);
+ printk(JFFS2_DBG "used_size: %#08x\n", c->used_size);
+ printk(JFFS2_DBG "dirty_size: %#08x\n", c->dirty_size);
+ printk(JFFS2_DBG "wasted_size: %#08x\n", c->wasted_size);
+ printk(JFFS2_DBG "unchecked_size: %#08x\n", c->unchecked_size);
+ printk(JFFS2_DBG "free_size: %#08x\n", c->free_size);
+ printk(JFFS2_DBG "erasing_size: %#08x\n", c->erasing_size);
+ printk(JFFS2_DBG "bad_size: %#08x\n", c->bad_size);
+ printk(JFFS2_DBG "sector_size: %#08x\n", c->sector_size);
+ printk(JFFS2_DBG "jffs2_reserved_blocks size: %#08x\n",
+ c->sector_size * c->resv_blocks_write);
+
+ if (c->nextblock)
+ printk(JFFS2_DBG "nextblock: %#08x (used %#08x, dirty %#08x, wasted %#08x, unchecked %#08x, free %#08x)\n",
+ c->nextblock->offset, c->nextblock->used_size,
+ c->nextblock->dirty_size, c->nextblock->wasted_size,
+ c->nextblock->unchecked_size, c->nextblock->free_size);
+ else
+ printk(JFFS2_DBG "nextblock: NULL\n");
+
+ if (c->gcblock)
+ printk(JFFS2_DBG "gcblock: %#08x (used %#08x, dirty %#08x, wasted %#08x, unchecked %#08x, free %#08x)\n",
+ c->gcblock->offset, c->gcblock->used_size, c->gcblock->dirty_size,
+ c->gcblock->wasted_size, c->gcblock->unchecked_size, c->gcblock->free_size);
+ else
+ printk(JFFS2_DBG "gcblock: NULL\n");
+
+ if (list_empty(&c->clean_list)) {
+ printk(JFFS2_DBG "clean_list: empty\n");
+ } else {
+ struct list_head *this;
+ int numblocks = 0;
+ uint32_t dirty = 0;
+
+ list_for_each(this, &c->clean_list) {
+ struct jffs2_eraseblock *jeb = list_entry(this, struct jffs2_eraseblock, list);
+ numblocks ++;
+ dirty += jeb->wasted_size;
+ if (!(jeb->used_size == 0 && jeb->dirty_size == 0 && jeb->wasted_size == 0)) {
+ printk(JFFS2_DBG "clean_list: %#08x (used %#08x, dirty %#08x, wasted %#08x, unchecked %#08x, free %#08x)\n",
+ jeb->offset, jeb->used_size, jeb->dirty_size, jeb->wasted_size,
+ jeb->unchecked_size, jeb->free_size);
+ }
+ }
+
+ printk (JFFS2_DBG "Contains %d blocks with total wasted size %u, average wasted size: %u\n",
+ numblocks, dirty, dirty / numblocks);
+ }
+
+ if (list_empty(&c->very_dirty_list)) {
+ printk(JFFS2_DBG "very_dirty_list: empty\n");
+ } else {
+ struct list_head *this;
+ int numblocks = 0;
+ uint32_t dirty = 0;
+
+ list_for_each(this, &c->very_dirty_list) {
+ struct jffs2_eraseblock *jeb = list_entry(this, struct jffs2_eraseblock, list);
+
+ numblocks ++;
+ dirty += jeb->dirty_size;
+ if (!(jeb->used_size == 0 && jeb->dirty_size == 0 && jeb->wasted_size == 0)) {
+ printk(JFFS2_DBG "very_dirty_list: %#08x (used %#08x, dirty %#08x, wasted %#08x, unchecked %#08x, free %#08x)\n",
+ jeb->offset, jeb->used_size, jeb->dirty_size, jeb->wasted_size,
+ jeb->unchecked_size, jeb->free_size);
+ }
+ }
+
+ printk (JFFS2_DBG "Contains %d blocks with total dirty size %u, average dirty size: %u\n",
+ numblocks, dirty, dirty / numblocks);
+ }
+
+ if (list_empty(&c->dirty_list)) {
+ printk(JFFS2_DBG "dirty_list: empty\n");
+ } else {
+ struct list_head *this;
+ int numblocks = 0;
+ uint32_t dirty = 0;
+
+ list_for_each(this, &c->dirty_list) {
+ struct jffs2_eraseblock *jeb = list_entry(this, struct jffs2_eraseblock, list);
+
+ numblocks ++;
+ dirty += jeb->dirty_size;
+ if (!(jeb->used_size == 0 && jeb->dirty_size == 0 && jeb->wasted_size == 0)) {
+ printk(JFFS2_DBG "dirty_list: %#08x (used %#08x, dirty %#08x, wasted %#08x, unchecked %#08x, free %#08x)\n",
+ jeb->offset, jeb->used_size, jeb->dirty_size, jeb->wasted_size,
+ jeb->unchecked_size, jeb->free_size);
+ }
+ }
+
+ printk (JFFS2_DBG "contains %d blocks with total dirty size %u, average dirty size: %u\n",
+ numblocks, dirty, dirty / numblocks);
+ }
+
+ if (list_empty(&c->erasable_list)) {
+ printk(JFFS2_DBG "erasable_list: empty\n");
+ } else {
+ struct list_head *this;
+
+ list_for_each(this, &c->erasable_list) {
+ struct jffs2_eraseblock *jeb = list_entry(this, struct jffs2_eraseblock, list);
+
+ if (!(jeb->used_size == 0 && jeb->dirty_size == 0 && jeb->wasted_size == 0)) {
+ printk(JFFS2_DBG "erasable_list: %#08x (used %#08x, dirty %#08x, wasted %#08x, unchecked %#08x, free %#08x)\n",
+ jeb->offset, jeb->used_size, jeb->dirty_size, jeb->wasted_size,
+ jeb->unchecked_size, jeb->free_size);
+ }
+ }
+ }
+
+ if (list_empty(&c->erasing_list)) {
+ printk(JFFS2_DBG "erasing_list: empty\n");
+ } else {
+ struct list_head *this;
+
+ list_for_each(this, &c->erasing_list) {
+ struct jffs2_eraseblock *jeb = list_entry(this, struct jffs2_eraseblock, list);
+
+ if (!(jeb->used_size == 0 && jeb->dirty_size == 0 && jeb->wasted_size == 0)) {
+ printk(JFFS2_DBG "erasing_list: %#08x (used %#08x, dirty %#08x, wasted %#08x, unchecked %#08x, free %#08x)\n",
+ jeb->offset, jeb->used_size, jeb->dirty_size, jeb->wasted_size,
+ jeb->unchecked_size, jeb->free_size);
+ }
+ }
+ }
+
+ if (list_empty(&c->erase_pending_list)) {
+ printk(JFFS2_DBG "erase_pending_list: empty\n");
+ } else {
+ struct list_head *this;
+
+ list_for_each(this, &c->erase_pending_list) {
+ struct jffs2_eraseblock *jeb = list_entry(this, struct jffs2_eraseblock, list);
+
+ if (!(jeb->used_size == 0 && jeb->dirty_size == 0 && jeb->wasted_size == 0)) {
+ printk(JFFS2_DBG "erase_pending_list: %#08x (used %#08x, dirty %#08x, wasted %#08x, unchecked %#08x, free %#08x)\n",
+ jeb->offset, jeb->used_size, jeb->dirty_size, jeb->wasted_size,
+ jeb->unchecked_size, jeb->free_size);
+ }
+ }
+ }
+
+ if (list_empty(&c->erasable_pending_wbuf_list)) {
+ printk(JFFS2_DBG "erasable_pending_wbuf_list: empty\n");
+ } else {
+ struct list_head *this;
+
+ list_for_each(this, &c->erasable_pending_wbuf_list) {
+ struct jffs2_eraseblock *jeb = list_entry(this, struct jffs2_eraseblock, list);
+
+ if (!(jeb->used_size == 0 && jeb->dirty_size == 0 && jeb->wasted_size == 0)) {
+ printk(JFFS2_DBG "erasable_pending_wbuf_list: %#08x (used %#08x, dirty %#08x, wasted %#08x, unchecked %#08x, free %#08x)\n",
+ jeb->offset, jeb->used_size, jeb->dirty_size, jeb->wasted_size,
+ jeb->unchecked_size, jeb->free_size);
+ }
+ }
+ }
+
+ if (list_empty(&c->free_list)) {
+ printk(JFFS2_DBG "free_list: empty\n");
+ } else {
+ struct list_head *this;
+
+ list_for_each(this, &c->free_list) {
+ struct jffs2_eraseblock *jeb = list_entry(this, struct jffs2_eraseblock, list);
+
+ if (!(jeb->used_size == 0 && jeb->dirty_size == 0 && jeb->wasted_size == 0)) {
+ printk(JFFS2_DBG "free_list: %#08x (used %#08x, dirty %#08x, wasted %#08x, unchecked %#08x, free %#08x)\n",
+ jeb->offset, jeb->used_size, jeb->dirty_size, jeb->wasted_size,
+ jeb->unchecked_size, jeb->free_size);
+ }
+ }
+ }
+
+ if (list_empty(&c->bad_list)) {
+ printk(JFFS2_DBG "bad_list: empty\n");
+ } else {
+ struct list_head *this;
+
+ list_for_each(this, &c->bad_list) {
+ struct jffs2_eraseblock *jeb = list_entry(this, struct jffs2_eraseblock, list);
+
+ if (!(jeb->used_size == 0 && jeb->dirty_size == 0 && jeb->wasted_size == 0)) {
+ printk(JFFS2_DBG "bad_list: %#08x (used %#08x, dirty %#08x, wasted %#08x, unchecked %#08x, free %#08x)\n",
+ jeb->offset, jeb->used_size, jeb->dirty_size, jeb->wasted_size,
+ jeb->unchecked_size, jeb->free_size);
+ }
+ }
+ }
+
+ if (list_empty(&c->bad_used_list)) {
+ printk(JFFS2_DBG "bad_used_list: empty\n");
+ } else {
+ struct list_head *this;
+
+ list_for_each(this, &c->bad_used_list) {
+ struct jffs2_eraseblock *jeb = list_entry(this, struct jffs2_eraseblock, list);
+
+ if (!(jeb->used_size == 0 && jeb->dirty_size == 0 && jeb->wasted_size == 0)) {
+ printk(JFFS2_DBG "bad_used_list: %#08x (used %#08x, dirty %#08x, wasted %#08x, unchecked %#08x, free %#08x)\n",
+ jeb->offset, jeb->used_size, jeb->dirty_size, jeb->wasted_size,
+ jeb->unchecked_size, jeb->free_size);
+ }
+ }
+ }
+}
+
+void
+__jffs2_dbg_dump_fragtree(struct jffs2_inode_info *f)
+{
+ down(&f->sem);
+ jffs2_dbg_dump_fragtree_nolock(f);
+ up(&f->sem);
+}
+
+void
+__jffs2_dbg_dump_fragtree_nolock(struct jffs2_inode_info *f)
+{
+ struct jffs2_node_frag *this = frag_first(&f->fragtree);
+ uint32_t lastofs = 0;
+ int buggy = 0;
+
+ printk(JFFS2_DBG_MSG_PREFIX " dump fragtree of ino #%u\n", f->inocache->ino);
+ while(this) {
+ if (this->node)
+ printk(JFFS2_DBG "frag %#04x-%#04x: %#08x(%d) on flash (*%p), left (%p), right (%p), parent (%p)\n",
+ this->ofs, this->ofs+this->size, ref_offset(this->node->raw),
+ ref_flags(this->node->raw), this, frag_left(this), frag_right(this),
+ frag_parent(this));
+ else
+ printk(JFFS2_DBG "frag %#04x-%#04x: hole (*%p). left (%p), right (%p), parent (%p)\n",
+ this->ofs, this->ofs+this->size, this, frag_left(this),
+ frag_right(this), frag_parent(this));
+ if (this->ofs != lastofs)
+ buggy = 1;
+ lastofs = this->ofs + this->size;
+ this = frag_next(this);
+ }
+
+ if (f->metadata)
+ printk(JFFS2_DBG "metadata at 0x%08x\n", ref_offset(f->metadata->raw));
+
+ if (buggy) {
+ JFFS2_ERROR("frag tree got a hole in it.\n");
+ BUG();
+ }
+}
+
+#define JFFS2_BUFDUMP_BYTES_PER_LINE 32
+void
+__jffs2_dbg_dump_buffer(unsigned char *buf, int len, uint32_t offs)
+{
+ int skip;
+ int i;
+
+ printk(JFFS2_DBG_MSG_PREFIX " dump from offset %#08x to offset %#08x (%x bytes).\n",
+ offs, offs + len, len);
+ i = skip = offs % JFFS2_BUFDUMP_BYTES_PER_LINE;
+ offs = offs & ~(JFFS2_BUFDUMP_BYTES_PER_LINE - 1);
+
+ if (skip != 0)
+ printk(JFFS2_DBG "%#08x: ", offs);
+
+ while (skip--)
+ printk(" ");
+
+ while (i < len) {
+ if ((i % JFFS2_BUFDUMP_BYTES_PER_LINE) == 0 && i != len -1) {
+ if (i != 0)
+ printk("\n");
+ offs += JFFS2_BUFDUMP_BYTES_PER_LINE;
+ printk(JFFS2_DBG "%0#8x: ", offs);
+ }
+
+ printk("%02x ", buf[i]);
+
+ i += 1;
+ }
+
+ printk("\n");
+}
+
+/*
+ * Dump a JFFS2 node.
+ */
+void
+__jffs2_dbg_dump_node(struct jffs2_sb_info *c, uint32_t ofs)
+{
+ union jffs2_node_union node;
+ int len = sizeof(union jffs2_node_union);
+ size_t retlen;
+ uint32_t crc;
+ int ret;
+
+ printk(JFFS2_DBG_MSG_PREFIX " dump node at offset %#08x.\n", ofs);
+
+ ret = jffs2_flash_read(c, ofs, len, &retlen, (unsigned char *)&node);
+ if (ret || (retlen != len)) {
+ JFFS2_ERROR("read %d bytes failed or short. ret %d, retlen %zd.\n",
+ len, ret, retlen);
+ return;
+ }
+
+ printk(JFFS2_DBG "magic:\t%#04x\n", je16_to_cpu(node.u.magic));
+ printk(JFFS2_DBG "nodetype:\t%#04x\n", je16_to_cpu(node.u.nodetype));
+ printk(JFFS2_DBG "totlen:\t%#08x\n", je32_to_cpu(node.u.totlen));
+ printk(JFFS2_DBG "hdr_crc:\t%#08x\n", je32_to_cpu(node.u.hdr_crc));
+
+ crc = crc32(0, &node.u, sizeof(node.u) - 4);
+ if (crc != je32_to_cpu(node.u.hdr_crc)) {
+ JFFS2_ERROR("wrong common header CRC.\n");
+ return;
+ }
+
+ if (je16_to_cpu(node.u.magic) != JFFS2_MAGIC_BITMASK &&
+ je16_to_cpu(node.u.magic) != JFFS2_OLD_MAGIC_BITMASK)
+ {
+ JFFS2_ERROR("wrong node magic: %#04x instead of %#04x.\n",
+ je16_to_cpu(node.u.magic), JFFS2_MAGIC_BITMASK);
+ return;
+ }
+
+ switch(je16_to_cpu(node.u.nodetype)) {
+
+ case JFFS2_NODETYPE_INODE:
+
+ printk(JFFS2_DBG "the node is inode node\n");
+ printk(JFFS2_DBG "ino:\t%#08x\n", je32_to_cpu(node.i.ino));
+ printk(JFFS2_DBG "version:\t%#08x\n", je32_to_cpu(node.i.version));
+ printk(JFFS2_DBG "mode:\t%#08x\n", node.i.mode.m);
+ printk(JFFS2_DBG "uid:\t%#04x\n", je16_to_cpu(node.i.uid));
+ printk(JFFS2_DBG "gid:\t%#04x\n", je16_to_cpu(node.i.gid));
+ printk(JFFS2_DBG "isize:\t%#08x\n", je32_to_cpu(node.i.isize));
+ printk(JFFS2_DBG "atime:\t%#08x\n", je32_to_cpu(node.i.atime));
+ printk(JFFS2_DBG "mtime:\t%#08x\n", je32_to_cpu(node.i.mtime));
+ printk(JFFS2_DBG "ctime:\t%#08x\n", je32_to_cpu(node.i.ctime));
+ printk(JFFS2_DBG "offset:\t%#08x\n", je32_to_cpu(node.i.offset));
+ printk(JFFS2_DBG "csize:\t%#08x\n", je32_to_cpu(node.i.csize));
+ printk(JFFS2_DBG "dsize:\t%#08x\n", je32_to_cpu(node.i.dsize));
+ printk(JFFS2_DBG "compr:\t%#02x\n", node.i.compr);
+ printk(JFFS2_DBG "usercompr:\t%#02x\n", node.i.usercompr);
+ printk(JFFS2_DBG "flags:\t%#04x\n", je16_to_cpu(node.i.flags));
+ printk(JFFS2_DBG "data_crc:\t%#08x\n", je32_to_cpu(node.i.data_crc));
+ printk(JFFS2_DBG "node_crc:\t%#08x\n", je32_to_cpu(node.i.node_crc));
+
+ crc = crc32(0, &node.i, sizeof(node.i) - 8);
+ if (crc != je32_to_cpu(node.i.node_crc)) {
+ JFFS2_ERROR("wrong node header CRC.\n");
+ return;
+ }
+ break;
+
+ case JFFS2_NODETYPE_DIRENT:
+
+ printk(JFFS2_DBG "the node is dirent node\n");
+ printk(JFFS2_DBG "pino:\t%#08x\n", je32_to_cpu(node.d.pino));
+ printk(JFFS2_DBG "version:\t%#08x\n", je32_to_cpu(node.d.version));
+ printk(JFFS2_DBG "ino:\t%#08x\n", je32_to_cpu(node.d.ino));
+ printk(JFFS2_DBG "mctime:\t%#08x\n", je32_to_cpu(node.d.mctime));
+ printk(JFFS2_DBG "nsize:\t%#02x\n", node.d.nsize);
+ printk(JFFS2_DBG "type:\t%#02x\n", node.d.type);
+ printk(JFFS2_DBG "node_crc:\t%#08x\n", je32_to_cpu(node.d.node_crc));
+ printk(JFFS2_DBG "name_crc:\t%#08x\n", je32_to_cpu(node.d.name_crc));
+
+ node.d.name[node.d.nsize] = '\0';
+ printk(JFFS2_DBG "name:\t\"%s\"\n", node.d.name);
+
+ crc = crc32(0, &node.d, sizeof(node.d) - 8);
+ if (crc != je32_to_cpu(node.d.node_crc)) {
+ JFFS2_ERROR("wrong node header CRC.\n");
+ return;
+ }
+ break;
+
+ default:
+ printk(JFFS2_DBG "node type is unknown\n");
+ break;
+ }
+}
+#endif /* JFFS2_DBG_DUMPS || JFFS2_DBG_PARANOIA_CHECKS */
diff --git a/fs/jffs2/debug.h b/fs/jffs2/debug.h
new file mode 100644
index 00000000000..f193d43a8a5
--- /dev/null
+++ b/fs/jffs2/debug.h
@@ -0,0 +1,279 @@
+/*
+ * JFFS2 -- Journalling Flash File System, Version 2.
+ *
+ * Copyright (C) 2001-2003 Red Hat, Inc.
+ *
+ * Created by David Woodhouse <dwmw2@infradead.org>
+ *
+ * For licensing information, see the file 'LICENCE' in this directory.
+ *
+ * $Id: debug.h,v 1.21 2005/11/07 11:14:39 gleixner Exp $
+ *
+ */
+#ifndef _JFFS2_DEBUG_H_
+#define _JFFS2_DEBUG_H_
+
+#include <linux/config.h>
+
+#ifndef CONFIG_JFFS2_FS_DEBUG
+#define CONFIG_JFFS2_FS_DEBUG 0
+#endif
+
+#if CONFIG_JFFS2_FS_DEBUG > 0
+/* Enable "paranoia" checks and dumps */
+#define JFFS2_DBG_PARANOIA_CHECKS
+#define JFFS2_DBG_DUMPS
+
+/*
+ * By defining/undefining the below macros one may select debugging messages
+ * fro specific JFFS2 subsystems.
+ */
+#define JFFS2_DBG_READINODE_MESSAGES
+#define JFFS2_DBG_FRAGTREE_MESSAGES
+#define JFFS2_DBG_DENTLIST_MESSAGES
+#define JFFS2_DBG_NODEREF_MESSAGES
+#define JFFS2_DBG_INOCACHE_MESSAGES
+#define JFFS2_DBG_SUMMARY_MESSAGES
+#define JFFS2_DBG_FSBUILD_MESSAGES
+#endif
+
+#if CONFIG_JFFS2_FS_DEBUG > 1
+#define JFFS2_DBG_FRAGTREE2_MESSAGES
+#define JFFS2_DBG_MEMALLOC_MESSAGES
+#endif
+
+/* Sanity checks are supposed to be light-weight and enabled by default */
+#define JFFS2_DBG_SANITY_CHECKS
+
+/*
+ * Dx() are mainly used for debugging messages, they must go away and be
+ * superseded by nicer dbg_xxx() macros...
+ */
+#if CONFIG_JFFS2_FS_DEBUG > 0
+#define D1(x) x
+#else
+#define D1(x)
+#endif
+
+#if CONFIG_JFFS2_FS_DEBUG > 1
+#define D2(x) x
+#else
+#define D2(x)
+#endif
+
+/* The prefixes of JFFS2 messages */
+#define JFFS2_DBG_PREFIX "[JFFS2 DBG]"
+#define JFFS2_ERR_PREFIX "JFFS2 error:"
+#define JFFS2_WARN_PREFIX "JFFS2 warning:"
+#define JFFS2_NOTICE_PREFIX "JFFS2 notice:"
+
+#define JFFS2_ERR KERN_ERR
+#define JFFS2_WARN KERN_WARNING
+#define JFFS2_NOT KERN_NOTICE
+#define JFFS2_DBG KERN_DEBUG
+
+#define JFFS2_DBG_MSG_PREFIX JFFS2_DBG JFFS2_DBG_PREFIX
+#define JFFS2_ERR_MSG_PREFIX JFFS2_ERR JFFS2_ERR_PREFIX
+#define JFFS2_WARN_MSG_PREFIX JFFS2_WARN JFFS2_WARN_PREFIX
+#define JFFS2_NOTICE_MSG_PREFIX JFFS2_NOT JFFS2_NOTICE_PREFIX
+
+/* JFFS2 message macros */
+#define JFFS2_ERROR(fmt, ...) \
+ do { \
+ printk(JFFS2_ERR_MSG_PREFIX \
+ " (%d) %s: " fmt, current->pid, \
+ __FUNCTION__, ##__VA_ARGS__); \
+ } while(0)
+
+#define JFFS2_WARNING(fmt, ...) \
+ do { \
+ printk(JFFS2_WARN_MSG_PREFIX \
+ " (%d) %s: " fmt, current->pid, \
+ __FUNCTION__, ##__VA_ARGS__); \
+ } while(0)
+
+#define JFFS2_NOTICE(fmt, ...) \
+ do { \
+ printk(JFFS2_NOTICE_MSG_PREFIX \
+ " (%d) %s: " fmt, current->pid, \
+ __FUNCTION__, ##__VA_ARGS__); \
+ } while(0)
+
+#define JFFS2_DEBUG(fmt, ...) \
+ do { \
+ printk(JFFS2_DBG_MSG_PREFIX \
+ " (%d) %s: " fmt, current->pid, \
+ __FUNCTION__, ##__VA_ARGS__); \
+ } while(0)
+
+/*
+ * We split our debugging messages on several parts, depending on the JFFS2
+ * subsystem the message belongs to.
+ */
+/* Read inode debugging messages */
+#ifdef JFFS2_DBG_READINODE_MESSAGES
+#define dbg_readinode(fmt, ...) JFFS2_DEBUG(fmt, ##__VA_ARGS__)
+#else
+#define dbg_readinode(fmt, ...)
+#endif
+
+/* Fragtree build debugging messages */
+#ifdef JFFS2_DBG_FRAGTREE_MESSAGES
+#define dbg_fragtree(fmt, ...) JFFS2_DEBUG(fmt, ##__VA_ARGS__)
+#else
+#define dbg_fragtree(fmt, ...)
+#endif
+#ifdef JFFS2_DBG_FRAGTREE2_MESSAGES
+#define dbg_fragtree2(fmt, ...) JFFS2_DEBUG(fmt, ##__VA_ARGS__)
+#else
+#define dbg_fragtree2(fmt, ...)
+#endif
+
+/* Directory entry list manilulation debugging messages */
+#ifdef JFFS2_DBG_DENTLIST_MESSAGES
+#define dbg_dentlist(fmt, ...) JFFS2_DEBUG(fmt, ##__VA_ARGS__)
+#else
+#define dbg_dentlist(fmt, ...)
+#endif
+
+/* Print the messages about manipulating node_refs */
+#ifdef JFFS2_DBG_NODEREF_MESSAGES
+#define dbg_noderef(fmt, ...) JFFS2_DEBUG(fmt, ##__VA_ARGS__)
+#else
+#define dbg_noderef(fmt, ...)
+#endif
+
+/* Manipulations with the list of inodes (JFFS2 inocache) */
+#ifdef JFFS2_DBG_INOCACHE_MESSAGES
+#define dbg_inocache(fmt, ...) JFFS2_DEBUG(fmt, ##__VA_ARGS__)
+#else
+#define dbg_inocache(fmt, ...)
+#endif
+
+/* Summary debugging messages */
+#ifdef JFFS2_DBG_SUMMARY_MESSAGES
+#define dbg_summary(fmt, ...) JFFS2_DEBUG(fmt, ##__VA_ARGS__)
+#else
+#define dbg_summary(fmt, ...)
+#endif
+
+/* File system build messages */
+#ifdef JFFS2_DBG_FSBUILD_MESSAGES
+#define dbg_fsbuild(fmt, ...) JFFS2_DEBUG(fmt, ##__VA_ARGS__)
+#else
+#define dbg_fsbuild(fmt, ...)
+#endif
+
+/* Watch the object allocations */
+#ifdef JFFS2_DBG_MEMALLOC_MESSAGES
+#define dbg_memalloc(fmt, ...) JFFS2_DEBUG(fmt, ##__VA_ARGS__)
+#else
+#define dbg_memalloc(fmt, ...)
+#endif
+
+
+/* "Sanity" checks */
+void
+__jffs2_dbg_acct_sanity_check_nolock(struct jffs2_sb_info *c,
+ struct jffs2_eraseblock *jeb);
+void
+__jffs2_dbg_acct_sanity_check(struct jffs2_sb_info *c,
+ struct jffs2_eraseblock *jeb);
+
+/* "Paranoia" checks */
+void
+__jffs2_dbg_fragtree_paranoia_check(struct jffs2_inode_info *f);
+void
+__jffs2_dbg_fragtree_paranoia_check_nolock(struct jffs2_inode_info *f);
+void
+__jffs2_dbg_acct_paranoia_check(struct jffs2_sb_info *c,
+ struct jffs2_eraseblock *jeb);
+void
+__jffs2_dbg_acct_paranoia_check_nolock(struct jffs2_sb_info *c,
+ struct jffs2_eraseblock *jeb);
+void
+__jffs2_dbg_prewrite_paranoia_check(struct jffs2_sb_info *c,
+ uint32_t ofs, int len);
+
+/* "Dump" functions */
+void
+__jffs2_dbg_dump_jeb(struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb);
+void
+__jffs2_dbg_dump_jeb_nolock(struct jffs2_eraseblock *jeb);
+void
+__jffs2_dbg_dump_block_lists(struct jffs2_sb_info *c);
+void
+__jffs2_dbg_dump_block_lists_nolock(struct jffs2_sb_info *c);
+void
+__jffs2_dbg_dump_node_refs(struct jffs2_sb_info *c,
+ struct jffs2_eraseblock *jeb);
+void
+__jffs2_dbg_dump_node_refs_nolock(struct jffs2_sb_info *c,
+ struct jffs2_eraseblock *jeb);
+void
+__jffs2_dbg_dump_fragtree(struct jffs2_inode_info *f);
+void
+__jffs2_dbg_dump_fragtree_nolock(struct jffs2_inode_info *f);
+void
+__jffs2_dbg_dump_buffer(unsigned char *buf, int len, uint32_t offs);
+void
+__jffs2_dbg_dump_node(struct jffs2_sb_info *c, uint32_t ofs);
+
+#ifdef JFFS2_DBG_PARANOIA_CHECKS
+#define jffs2_dbg_fragtree_paranoia_check(f) \
+ __jffs2_dbg_fragtree_paranoia_check(f)
+#define jffs2_dbg_fragtree_paranoia_check_nolock(f) \
+ __jffs2_dbg_fragtree_paranoia_check_nolock(f)
+#define jffs2_dbg_acct_paranoia_check(c, jeb) \
+ __jffs2_dbg_acct_paranoia_check(c,jeb)
+#define jffs2_dbg_acct_paranoia_check_nolock(c, jeb) \
+ __jffs2_dbg_acct_paranoia_check_nolock(c,jeb)
+#define jffs2_dbg_prewrite_paranoia_check(c, ofs, len) \
+ __jffs2_dbg_prewrite_paranoia_check(c, ofs, len)
+#else
+#define jffs2_dbg_fragtree_paranoia_check(f)
+#define jffs2_dbg_fragtree_paranoia_check_nolock(f)
+#define jffs2_dbg_acct_paranoia_check(c, jeb)
+#define jffs2_dbg_acct_paranoia_check_nolock(c, jeb)
+#define jffs2_dbg_prewrite_paranoia_check(c, ofs, len)
+#endif /* !JFFS2_PARANOIA_CHECKS */
+
+#ifdef JFFS2_DBG_DUMPS
+#define jffs2_dbg_dump_jeb(c, jeb) \
+ __jffs2_dbg_dump_jeb(c, jeb);
+#define jffs2_dbg_dump_jeb_nolock(jeb) \
+ __jffs2_dbg_dump_jeb_nolock(jeb);
+#define jffs2_dbg_dump_block_lists(c) \
+ __jffs2_dbg_dump_block_lists(c)
+#define jffs2_dbg_dump_block_lists_nolock(c) \
+ __jffs2_dbg_dump_block_lists_nolock(c)
+#define jffs2_dbg_dump_fragtree(f) \
+ __jffs2_dbg_dump_fragtree(f);
+#define jffs2_dbg_dump_fragtree_nolock(f) \
+ __jffs2_dbg_dump_fragtree_nolock(f);
+#define jffs2_dbg_dump_buffer(buf, len, offs) \
+ __jffs2_dbg_dump_buffer(*buf, len, offs);
+#define jffs2_dbg_dump_node(c, ofs) \
+ __jffs2_dbg_dump_node(c, ofs);
+#else
+#define jffs2_dbg_dump_jeb(c, jeb)
+#define jffs2_dbg_dump_jeb_nolock(jeb)
+#define jffs2_dbg_dump_block_lists(c)
+#define jffs2_dbg_dump_block_lists_nolock(c)
+#define jffs2_dbg_dump_fragtree(f)
+#define jffs2_dbg_dump_fragtree_nolock(f)
+#define jffs2_dbg_dump_buffer(buf, len, offs)
+#define jffs2_dbg_dump_node(c, ofs)
+#endif /* !JFFS2_DBG_DUMPS */
+
+#ifdef JFFS2_DBG_SANITY_CHECKS
+#define jffs2_dbg_acct_sanity_check(c, jeb) \
+ __jffs2_dbg_acct_sanity_check(c, jeb)
+#define jffs2_dbg_acct_sanity_check_nolock(c, jeb) \
+ __jffs2_dbg_acct_sanity_check_nolock(c, jeb)
+#else
+#define jffs2_dbg_acct_sanity_check(c, jeb)
+#define jffs2_dbg_acct_sanity_check_nolock(c, jeb)
+#endif /* !JFFS2_DBG_SANITY_CHECKS */
+
+#endif /* _JFFS2_DEBUG_H_ */
diff --git a/fs/jffs2/dir.c b/fs/jffs2/dir.c
index 3ca0d25eef1..a7bf9cb2567 100644
--- a/fs/jffs2/dir.c
+++ b/fs/jffs2/dir.c
@@ -7,7 +7,7 @@
*
* For licensing information, see the file 'LICENCE' in this directory.
*
- * $Id: dir.c,v 1.86 2005/07/06 12:13:09 dwmw2 Exp $
+ * $Id: dir.c,v 1.90 2005/11/07 11:14:39 gleixner Exp $
*
*/
@@ -64,7 +64,7 @@ struct inode_operations jffs2_dir_inode_operations =
/* We keep the dirent list sorted in increasing order of name hash,
- and we use the same hash function as the dentries. Makes this
+ and we use the same hash function as the dentries. Makes this
nice and simple
*/
static struct dentry *jffs2_lookup(struct inode *dir_i, struct dentry *target,
@@ -85,7 +85,7 @@ static struct dentry *jffs2_lookup(struct inode *dir_i, struct dentry *target,
/* NB: The 2.2 backport will need to explicitly check for '.' and '..' here */
for (fd_list = dir_f->dents; fd_list && fd_list->nhash <= target->d_name.hash; fd_list = fd_list->next) {
- if (fd_list->nhash == target->d_name.hash &&
+ if (fd_list->nhash == target->d_name.hash &&
(!fd || fd_list->version > fd->version) &&
strlen(fd_list->name) == target->d_name.len &&
!strncmp(fd_list->name, target->d_name.name, target->d_name.len)) {
@@ -147,7 +147,7 @@ static int jffs2_readdir(struct file *filp, void *dirent, filldir_t filldir)
curofs++;
/* First loop: curofs = 2; offset = 2 */
if (curofs < offset) {
- D2(printk(KERN_DEBUG "Skipping dirent: \"%s\", ino #%u, type %d, because curofs %ld < offset %ld\n",
+ D2(printk(KERN_DEBUG "Skipping dirent: \"%s\", ino #%u, type %d, because curofs %ld < offset %ld\n",
fd->name, fd->ino, fd->type, curofs, offset));
continue;
}
@@ -182,7 +182,7 @@ static int jffs2_create(struct inode *dir_i, struct dentry *dentry, int mode,
ri = jffs2_alloc_raw_inode();
if (!ri)
return -ENOMEM;
-
+
c = JFFS2_SB_INFO(dir_i->i_sb);
D1(printk(KERN_DEBUG "jffs2_create()\n"));
@@ -203,7 +203,7 @@ static int jffs2_create(struct inode *dir_i, struct dentry *dentry, int mode,
f = JFFS2_INODE_INFO(inode);
dir_f = JFFS2_INODE_INFO(dir_i);
- ret = jffs2_do_create(c, dir_f, f, ri,
+ ret = jffs2_do_create(c, dir_f, f, ri,
dentry->d_name.name, dentry->d_name.len);
if (ret) {
@@ -232,11 +232,14 @@ static int jffs2_unlink(struct inode *dir_i, struct dentry *dentry)
struct jffs2_inode_info *dir_f = JFFS2_INODE_INFO(dir_i);
struct jffs2_inode_info *dead_f = JFFS2_INODE_INFO(dentry->d_inode);
int ret;
+ uint32_t now = get_seconds();
- ret = jffs2_do_unlink(c, dir_f, dentry->d_name.name,
- dentry->d_name.len, dead_f);
+ ret = jffs2_do_unlink(c, dir_f, dentry->d_name.name,
+ dentry->d_name.len, dead_f, now);
if (dead_f->inocache)
dentry->d_inode->i_nlink = dead_f->inocache->nlink;
+ if (!ret)
+ dir_i->i_mtime = dir_i->i_ctime = ITIME(now);
return ret;
}
/***********************************************************************/
@@ -249,6 +252,7 @@ static int jffs2_link (struct dentry *old_dentry, struct inode *dir_i, struct de
struct jffs2_inode_info *dir_f = JFFS2_INODE_INFO(dir_i);
int ret;
uint8_t type;
+ uint32_t now;
/* Don't let people make hard links to bad inodes. */
if (!f->inocache)
@@ -261,13 +265,15 @@ static int jffs2_link (struct dentry *old_dentry, struct inode *dir_i, struct de
type = (old_dentry->d_inode->i_mode & S_IFMT) >> 12;
if (!type) type = DT_REG;
- ret = jffs2_do_link(c, dir_f, f->inocache->ino, type, dentry->d_name.name, dentry->d_name.len);
+ now = get_seconds();
+ ret = jffs2_do_link(c, dir_f, f->inocache->ino, type, dentry->d_name.name, dentry->d_name.len, now);
if (!ret) {
down(&f->sem);
old_dentry->d_inode->i_nlink = ++f->inocache->nlink;
up(&f->sem);
d_instantiate(dentry, old_dentry->d_inode);
+ dir_i->i_mtime = dir_i->i_ctime = ITIME(now);
atomic_inc(&old_dentry->d_inode->i_count);
}
return ret;
@@ -297,14 +303,15 @@ static int jffs2_symlink (struct inode *dir_i, struct dentry *dentry, const char
if (!ri)
return -ENOMEM;
-
+
c = JFFS2_SB_INFO(dir_i->i_sb);
-
- /* Try to reserve enough space for both node and dirent.
- * Just the node will do for now, though
+
+ /* Try to reserve enough space for both node and dirent.
+ * Just the node will do for now, though
*/
namelen = dentry->d_name.len;
- ret = jffs2_reserve_space(c, sizeof(*ri) + targetlen, &phys_ofs, &alloclen, ALLOC_NORMAL);
+ ret = jffs2_reserve_space(c, sizeof(*ri) + targetlen, &phys_ofs, &alloclen,
+ ALLOC_NORMAL, JFFS2_SUMMARY_INODE_SIZE);
if (ret) {
jffs2_free_raw_inode(ri);
@@ -331,7 +338,7 @@ static int jffs2_symlink (struct inode *dir_i, struct dentry *dentry, const char
ri->compr = JFFS2_COMPR_NONE;
ri->data_crc = cpu_to_je32(crc32(0, target, targetlen));
ri->node_crc = cpu_to_je32(crc32(0, ri, sizeof(*ri)-8));
-
+
fn = jffs2_write_dnode(c, f, ri, target, targetlen, phys_ofs, ALLOC_NORMAL);
jffs2_free_raw_inode(ri);
@@ -344,9 +351,9 @@ static int jffs2_symlink (struct inode *dir_i, struct dentry *dentry, const char
return PTR_ERR(fn);
}
- /* We use f->dents field to store the target path. */
- f->dents = kmalloc(targetlen + 1, GFP_KERNEL);
- if (!f->dents) {
+ /* We use f->target field to store the target path. */
+ f->target = kmalloc(targetlen + 1, GFP_KERNEL);
+ if (!f->target) {
printk(KERN_WARNING "Can't allocate %d bytes of memory\n", targetlen + 1);
up(&f->sem);
jffs2_complete_reservation(c);
@@ -354,17 +361,18 @@ static int jffs2_symlink (struct inode *dir_i, struct dentry *dentry, const char
return -ENOMEM;
}
- memcpy(f->dents, target, targetlen + 1);
- D1(printk(KERN_DEBUG "jffs2_symlink: symlink's target '%s' cached\n", (char *)f->dents));
+ memcpy(f->target, target, targetlen + 1);
+ D1(printk(KERN_DEBUG "jffs2_symlink: symlink's target '%s' cached\n", (char *)f->target));
- /* No data here. Only a metadata node, which will be
+ /* No data here. Only a metadata node, which will be
obsoleted by the first data write
*/
f->metadata = fn;
up(&f->sem);
jffs2_complete_reservation(c);
- ret = jffs2_reserve_space(c, sizeof(*rd)+namelen, &phys_ofs, &alloclen, ALLOC_NORMAL);
+ ret = jffs2_reserve_space(c, sizeof(*rd)+namelen, &phys_ofs, &alloclen,
+ ALLOC_NORMAL, JFFS2_SUMMARY_DIRENT_SIZE(namelen));
if (ret) {
/* Eep. */
jffs2_clear_inode(inode);
@@ -399,7 +407,7 @@ static int jffs2_symlink (struct inode *dir_i, struct dentry *dentry, const char
fd = jffs2_write_dirent(c, dir_f, rd, dentry->d_name.name, namelen, phys_ofs, ALLOC_NORMAL);
if (IS_ERR(fd)) {
- /* dirent failed to write. Delete the inode normally
+ /* dirent failed to write. Delete the inode normally
as if it were the final unlink() */
jffs2_complete_reservation(c);
jffs2_free_raw_dirent(rd);
@@ -442,14 +450,15 @@ static int jffs2_mkdir (struct inode *dir_i, struct dentry *dentry, int mode)
ri = jffs2_alloc_raw_inode();
if (!ri)
return -ENOMEM;
-
+
c = JFFS2_SB_INFO(dir_i->i_sb);
- /* Try to reserve enough space for both node and dirent.
- * Just the node will do for now, though
+ /* Try to reserve enough space for both node and dirent.
+ * Just the node will do for now, though
*/
namelen = dentry->d_name.len;
- ret = jffs2_reserve_space(c, sizeof(*ri), &phys_ofs, &alloclen, ALLOC_NORMAL);
+ ret = jffs2_reserve_space(c, sizeof(*ri), &phys_ofs, &alloclen, ALLOC_NORMAL,
+ JFFS2_SUMMARY_INODE_SIZE);
if (ret) {
jffs2_free_raw_inode(ri);
@@ -473,7 +482,7 @@ static int jffs2_mkdir (struct inode *dir_i, struct dentry *dentry, int mode)
ri->data_crc = cpu_to_je32(0);
ri->node_crc = cpu_to_je32(crc32(0, ri, sizeof(*ri)-8));
-
+
fn = jffs2_write_dnode(c, f, ri, NULL, 0, phys_ofs, ALLOC_NORMAL);
jffs2_free_raw_inode(ri);
@@ -485,20 +494,21 @@ static int jffs2_mkdir (struct inode *dir_i, struct dentry *dentry, int mode)
jffs2_clear_inode(inode);
return PTR_ERR(fn);
}
- /* No data here. Only a metadata node, which will be
+ /* No data here. Only a metadata node, which will be
obsoleted by the first data write
*/
f->metadata = fn;
up(&f->sem);
jffs2_complete_reservation(c);
- ret = jffs2_reserve_space(c, sizeof(*rd)+namelen, &phys_ofs, &alloclen, ALLOC_NORMAL);
+ ret = jffs2_reserve_space(c, sizeof(*rd)+namelen, &phys_ofs, &alloclen,
+ ALLOC_NORMAL, JFFS2_SUMMARY_DIRENT_SIZE(namelen));
if (ret) {
/* Eep. */
jffs2_clear_inode(inode);
return ret;
}
-
+
rd = jffs2_alloc_raw_dirent();
if (!rd) {
/* Argh. Now we treat it like a normal delete */
@@ -525,9 +535,9 @@ static int jffs2_mkdir (struct inode *dir_i, struct dentry *dentry, int mode)
rd->name_crc = cpu_to_je32(crc32(0, dentry->d_name.name, namelen));
fd = jffs2_write_dirent(c, dir_f, rd, dentry->d_name.name, namelen, phys_ofs, ALLOC_NORMAL);
-
+
if (IS_ERR(fd)) {
- /* dirent failed to write. Delete the inode normally
+ /* dirent failed to write. Delete the inode normally
as if it were the final unlink() */
jffs2_complete_reservation(c);
jffs2_free_raw_dirent(rd);
@@ -589,19 +599,20 @@ static int jffs2_mknod (struct inode *dir_i, struct dentry *dentry, int mode, de
ri = jffs2_alloc_raw_inode();
if (!ri)
return -ENOMEM;
-
+
c = JFFS2_SB_INFO(dir_i->i_sb);
-
+
if (S_ISBLK(mode) || S_ISCHR(mode)) {
dev = cpu_to_je16(old_encode_dev(rdev));
devlen = sizeof(dev);
}
-
- /* Try to reserve enough space for both node and dirent.
- * Just the node will do for now, though
+
+ /* Try to reserve enough space for both node and dirent.
+ * Just the node will do for now, though
*/
namelen = dentry->d_name.len;
- ret = jffs2_reserve_space(c, sizeof(*ri) + devlen, &phys_ofs, &alloclen, ALLOC_NORMAL);
+ ret = jffs2_reserve_space(c, sizeof(*ri) + devlen, &phys_ofs, &alloclen,
+ ALLOC_NORMAL, JFFS2_SUMMARY_INODE_SIZE);
if (ret) {
jffs2_free_raw_inode(ri);
@@ -627,7 +638,7 @@ static int jffs2_mknod (struct inode *dir_i, struct dentry *dentry, int mode, de
ri->compr = JFFS2_COMPR_NONE;
ri->data_crc = cpu_to_je32(crc32(0, &dev, devlen));
ri->node_crc = cpu_to_je32(crc32(0, ri, sizeof(*ri)-8));
-
+
fn = jffs2_write_dnode(c, f, ri, (char *)&dev, devlen, phys_ofs, ALLOC_NORMAL);
jffs2_free_raw_inode(ri);
@@ -639,14 +650,15 @@ static int jffs2_mknod (struct inode *dir_i, struct dentry *dentry, int mode, de
jffs2_clear_inode(inode);
return PTR_ERR(fn);
}
- /* No data here. Only a metadata node, which will be
+ /* No data here. Only a metadata node, which will be
obsoleted by the first data write
*/
f->metadata = fn;
up(&f->sem);
jffs2_complete_reservation(c);
- ret = jffs2_reserve_space(c, sizeof(*rd)+namelen, &phys_ofs, &alloclen, ALLOC_NORMAL);
+ ret = jffs2_reserve_space(c, sizeof(*rd)+namelen, &phys_ofs, &alloclen,
+ ALLOC_NORMAL, JFFS2_SUMMARY_DIRENT_SIZE(namelen));
if (ret) {
/* Eep. */
jffs2_clear_inode(inode);
@@ -682,9 +694,9 @@ static int jffs2_mknod (struct inode *dir_i, struct dentry *dentry, int mode, de
rd->name_crc = cpu_to_je32(crc32(0, dentry->d_name.name, namelen));
fd = jffs2_write_dirent(c, dir_f, rd, dentry->d_name.name, namelen, phys_ofs, ALLOC_NORMAL);
-
+
if (IS_ERR(fd)) {
- /* dirent failed to write. Delete the inode normally
+ /* dirent failed to write. Delete the inode normally
as if it were the final unlink() */
jffs2_complete_reservation(c);
jffs2_free_raw_dirent(rd);
@@ -716,8 +728,9 @@ static int jffs2_rename (struct inode *old_dir_i, struct dentry *old_dentry,
struct jffs2_sb_info *c = JFFS2_SB_INFO(old_dir_i->i_sb);
struct jffs2_inode_info *victim_f = NULL;
uint8_t type;
+ uint32_t now;
- /* The VFS will check for us and prevent trying to rename a
+ /* The VFS will check for us and prevent trying to rename a
* file over a directory and vice versa, but if it's a directory,
* the VFS can't check whether the victim is empty. The filesystem
* needs to do that for itself.
@@ -739,19 +752,20 @@ static int jffs2_rename (struct inode *old_dir_i, struct dentry *old_dentry,
}
/* XXX: We probably ought to alloc enough space for
- both nodes at the same time. Writing the new link,
+ both nodes at the same time. Writing the new link,
then getting -ENOSPC, is quite bad :)
*/
/* Make a hard link */
-
+
/* XXX: This is ugly */
type = (old_dentry->d_inode->i_mode & S_IFMT) >> 12;
if (!type) type = DT_REG;
- ret = jffs2_do_link(c, JFFS2_INODE_INFO(new_dir_i),
+ now = get_seconds();
+ ret = jffs2_do_link(c, JFFS2_INODE_INFO(new_dir_i),
old_dentry->d_inode->i_ino, type,
- new_dentry->d_name.name, new_dentry->d_name.len);
+ new_dentry->d_name.name, new_dentry->d_name.len, now);
if (ret)
return ret;
@@ -768,14 +782,14 @@ static int jffs2_rename (struct inode *old_dir_i, struct dentry *old_dentry,
}
}
- /* If it was a directory we moved, and there was no victim,
+ /* If it was a directory we moved, and there was no victim,
increase i_nlink on its new parent */
if (S_ISDIR(old_dentry->d_inode->i_mode) && !victim_f)
new_dir_i->i_nlink++;
/* Unlink the original */
- ret = jffs2_do_unlink(c, JFFS2_INODE_INFO(old_dir_i),
- old_dentry->d_name.name, old_dentry->d_name.len, NULL);
+ ret = jffs2_do_unlink(c, JFFS2_INODE_INFO(old_dir_i),
+ old_dentry->d_name.name, old_dentry->d_name.len, NULL, now);
/* We don't touch inode->i_nlink */
@@ -792,12 +806,15 @@ static int jffs2_rename (struct inode *old_dir_i, struct dentry *old_dentry,
/* Might as well let the VFS know */
d_instantiate(new_dentry, old_dentry->d_inode);
atomic_inc(&old_dentry->d_inode->i_count);
+ new_dir_i->i_mtime = new_dir_i->i_ctime = ITIME(now);
return ret;
}
if (S_ISDIR(old_dentry->d_inode->i_mode))
old_dir_i->i_nlink--;
+ new_dir_i->i_mtime = new_dir_i->i_ctime = old_dir_i->i_mtime = old_dir_i->i_ctime = ITIME(now);
+
return 0;
}
diff --git a/fs/jffs2/erase.c b/fs/jffs2/erase.c
index 787d84ac2bc..dad68fdffe9 100644
--- a/fs/jffs2/erase.c
+++ b/fs/jffs2/erase.c
@@ -7,7 +7,7 @@
*
* For licensing information, see the file 'LICENCE' in this directory.
*
- * $Id: erase.c,v 1.80 2005/07/14 19:46:24 joern Exp $
+ * $Id: erase.c,v 1.85 2005/09/20 14:53:15 dedekind Exp $
*
*/
@@ -24,7 +24,7 @@ struct erase_priv_struct {
struct jffs2_eraseblock *jeb;
struct jffs2_sb_info *c;
};
-
+
#ifndef __ECOS
static void jffs2_erase_callback(struct erase_info *);
#endif
@@ -48,7 +48,8 @@ static void jffs2_erase_block(struct jffs2_sb_info *c,
#else /* Linux */
struct erase_info *instr;
- D1(printk(KERN_DEBUG "jffs2_erase_block(): erase block %#x (range %#x-%#x)\n", jeb->offset, jeb->offset, jeb->offset + c->sector_size));
+ D1(printk(KERN_DEBUG "jffs2_erase_block(): erase block %#08x (range %#08x-%#08x)\n",
+ jeb->offset, jeb->offset, jeb->offset + c->sector_size));
instr = kmalloc(sizeof(struct erase_info) + sizeof(struct erase_priv_struct), GFP_KERNEL);
if (!instr) {
printk(KERN_WARNING "kmalloc for struct erase_info in jffs2_erase_block failed. Refiling block for later\n");
@@ -70,7 +71,7 @@ static void jffs2_erase_block(struct jffs2_sb_info *c,
instr->callback = jffs2_erase_callback;
instr->priv = (unsigned long)(&instr[1]);
instr->fail_addr = 0xffffffff;
-
+
((struct erase_priv_struct *)instr->priv)->jeb = jeb;
((struct erase_priv_struct *)instr->priv)->c = c;
@@ -95,7 +96,7 @@ static void jffs2_erase_block(struct jffs2_sb_info *c,
return;
}
- if (ret == -EROFS)
+ if (ret == -EROFS)
printk(KERN_WARNING "Erase at 0x%08x failed immediately: -EROFS. Is the sector locked?\n", jeb->offset);
else
printk(KERN_WARNING "Erase at 0x%08x failed immediately: errno %d\n", jeb->offset, ret);
@@ -196,7 +197,7 @@ static void jffs2_erase_failed(struct jffs2_sb_info *c, struct jffs2_eraseblock
c->nr_erasing_blocks--;
spin_unlock(&c->erase_completion_lock);
wake_up(&c->erase_wait);
-}
+}
#ifndef __ECOS
static void jffs2_erase_callback(struct erase_info *instr)
@@ -208,7 +209,7 @@ static void jffs2_erase_callback(struct erase_info *instr)
jffs2_erase_failed(priv->c, priv->jeb, instr->fail_addr);
} else {
jffs2_erase_succeeded(priv->c, priv->jeb);
- }
+ }
kfree(instr);
}
#endif /* !__ECOS */
@@ -226,13 +227,13 @@ static inline void jffs2_remove_node_refs_from_ino_list(struct jffs2_sb_info *c,
/* Walk the inode's list once, removing any nodes from this eraseblock */
while (1) {
if (!(*prev)->next_in_ino) {
- /* We're looking at the jffs2_inode_cache, which is
+ /* We're looking at the jffs2_inode_cache, which is
at the end of the linked list. Stash it and continue
from the beginning of the list */
ic = (struct jffs2_inode_cache *)(*prev);
prev = &ic->nodes;
continue;
- }
+ }
if (SECTOR_ADDR((*prev)->flash_offset) == jeb->offset) {
/* It's in the block we're erasing */
@@ -266,7 +267,7 @@ static inline void jffs2_remove_node_refs_from_ino_list(struct jffs2_sb_info *c,
printk(KERN_DEBUG "After remove_node_refs_from_ino_list: \n" KERN_DEBUG);
this = ic->nodes;
-
+
while(this) {
printk( "0x%08x(%d)->", ref_offset(this), ref_flags(this));
if (++i == 5) {
@@ -289,7 +290,7 @@ static void jffs2_free_all_node_refs(struct jffs2_sb_info *c, struct jffs2_erase
while(jeb->first_node) {
ref = jeb->first_node;
jeb->first_node = ref->next_phys;
-
+
/* Remove from the inode-list */
if (ref->next_in_ino)
jffs2_remove_node_refs_from_ino_list(c, ref, jeb);
@@ -306,7 +307,7 @@ static int jffs2_block_check_erase(struct jffs2_sb_info *c, struct jffs2_erasebl
uint32_t ofs;
size_t retlen;
int ret = -EIO;
-
+
ebuf = kmalloc(PAGE_SIZE, GFP_KERNEL);
if (!ebuf) {
printk(KERN_WARNING "Failed to allocate page buffer for verifying erase at 0x%08x. Refiling\n", jeb->offset);
@@ -360,7 +361,7 @@ static void jffs2_mark_erased_block(struct jffs2_sb_info *c, struct jffs2_eraseb
case -EIO: goto filebad;
}
- /* Write the erase complete marker */
+ /* Write the erase complete marker */
D1(printk(KERN_DEBUG "Writing erased marker to block at 0x%08x\n", jeb->offset));
bad_offset = jeb->offset;
@@ -398,7 +399,7 @@ static void jffs2_mark_erased_block(struct jffs2_sb_info *c, struct jffs2_eraseb
vecs[0].iov_base = (unsigned char *) &marker;
vecs[0].iov_len = sizeof(marker);
ret = jffs2_flash_direct_writev(c, vecs, 1, jeb->offset, &retlen);
-
+
if (ret || retlen != sizeof(marker)) {
if (ret)
printk(KERN_WARNING "Write clean marker to block at 0x%08x failed: %d\n",
@@ -415,9 +416,9 @@ static void jffs2_mark_erased_block(struct jffs2_sb_info *c, struct jffs2_eraseb
marker_ref->next_phys = NULL;
marker_ref->flash_offset = jeb->offset | REF_NORMAL;
marker_ref->__totlen = c->cleanmarker_size;
-
+
jeb->first_node = jeb->last_node = marker_ref;
-
+
jeb->free_size = c->sector_size - c->cleanmarker_size;
jeb->used_size = c->cleanmarker_size;
jeb->dirty_size = 0;
@@ -429,8 +430,8 @@ static void jffs2_mark_erased_block(struct jffs2_sb_info *c, struct jffs2_eraseb
c->free_size += jeb->free_size;
c->used_size += jeb->used_size;
- ACCT_SANITY_CHECK(c,jeb);
- D1(ACCT_PARANOIA_CHECK(jeb));
+ jffs2_dbg_acct_sanity_check_nolock(c,jeb);
+ jffs2_dbg_acct_paranoia_check_nolock(c, jeb);
list_add_tail(&jeb->list, &c->free_list);
c->nr_erasing_blocks--;
diff --git a/fs/jffs2/file.c b/fs/jffs2/file.c
index 8279bf0133f..935f273dc57 100644
--- a/fs/jffs2/file.c
+++ b/fs/jffs2/file.c
@@ -7,7 +7,7 @@
*
* For licensing information, see the file 'LICENCE' in this directory.
*
- * $Id: file.c,v 1.102 2005/07/06 12:13:09 dwmw2 Exp $
+ * $Id: file.c,v 1.104 2005/10/18 23:29:35 tpoynor Exp $
*
*/
@@ -34,8 +34,8 @@ int jffs2_fsync(struct file *filp, struct dentry *dentry, int datasync)
/* Trigger GC to flush any pending writes for this inode */
jffs2_flush_wbuf_gc(c, inode->i_ino);
-
- return 0;
+
+ return 0;
}
struct file_operations jffs2_file_operations =
@@ -107,7 +107,7 @@ static int jffs2_readpage (struct file *filp, struct page *pg)
{
struct jffs2_inode_info *f = JFFS2_INODE_INFO(pg->mapping->host);
int ret;
-
+
down(&f->sem);
ret = jffs2_do_readpage_unlock(pg->mapping->host, pg);
up(&f->sem);
@@ -130,11 +130,12 @@ static int jffs2_prepare_write (struct file *filp, struct page *pg,
struct jffs2_raw_inode ri;
struct jffs2_full_dnode *fn;
uint32_t phys_ofs, alloc_len;
-
+
D1(printk(KERN_DEBUG "Writing new hole frag 0x%x-0x%x between current EOF and new page\n",
(unsigned int)inode->i_size, pageofs));
- ret = jffs2_reserve_space(c, sizeof(ri), &phys_ofs, &alloc_len, ALLOC_NORMAL);
+ ret = jffs2_reserve_space(c, sizeof(ri), &phys_ofs, &alloc_len,
+ ALLOC_NORMAL, JFFS2_SUMMARY_INODE_SIZE);
if (ret)
return ret;
@@ -159,7 +160,7 @@ static int jffs2_prepare_write (struct file *filp, struct page *pg,
ri.compr = JFFS2_COMPR_ZERO;
ri.node_crc = cpu_to_je32(crc32(0, &ri, sizeof(ri)-8));
ri.data_crc = cpu_to_je32(0);
-
+
fn = jffs2_write_dnode(c, f, &ri, NULL, 0, phys_ofs, ALLOC_NORMAL);
if (IS_ERR(fn)) {
@@ -186,7 +187,7 @@ static int jffs2_prepare_write (struct file *filp, struct page *pg,
inode->i_size = pageofs;
up(&f->sem);
}
-
+
/* Read in the page if it wasn't already present, unless it's a whole page */
if (!PageUptodate(pg) && (start || end < PAGE_CACHE_SIZE)) {
down(&f->sem);
@@ -217,7 +218,7 @@ static int jffs2_commit_write (struct file *filp, struct page *pg,
if (!start && end == PAGE_CACHE_SIZE) {
/* We need to avoid deadlock with page_cache_read() in
jffs2_garbage_collect_pass(). So we have to mark the
- page up to date, to prevent page_cache_read() from
+ page up to date, to prevent page_cache_read() from
trying to re-lock it. */
SetPageUptodate(pg);
}
@@ -251,7 +252,7 @@ static int jffs2_commit_write (struct file *filp, struct page *pg,
/* There was an error writing. */
SetPageError(pg);
}
-
+
/* Adjust writtenlen for the padding we did, so we don't confuse our caller */
if (writtenlen < (start&3))
writtenlen = 0;
@@ -262,7 +263,7 @@ static int jffs2_commit_write (struct file *filp, struct page *pg,
if (inode->i_size < (pg->index << PAGE_CACHE_SHIFT) + start + writtenlen) {
inode->i_size = (pg->index << PAGE_CACHE_SHIFT) + start + writtenlen;
inode->i_blocks = (inode->i_size + 511) >> 9;
-
+
inode->i_ctime = inode->i_mtime = ITIME(je32_to_cpu(ri->ctime));
}
}
@@ -271,13 +272,13 @@ static int jffs2_commit_write (struct file *filp, struct page *pg,
if (start+writtenlen < end) {
/* generic_file_write has written more to the page cache than we've
- actually written to the medium. Mark the page !Uptodate so that
+ actually written to the medium. Mark the page !Uptodate so that
it gets reread */
D1(printk(KERN_DEBUG "jffs2_commit_write(): Not all bytes written. Marking page !uptodate\n"));
SetPageError(pg);
ClearPageUptodate(pg);
}
- D1(printk(KERN_DEBUG "jffs2_commit_write() returning %d\n",writtenlen?writtenlen:ret));
- return writtenlen?writtenlen:ret;
+ D1(printk(KERN_DEBUG "jffs2_commit_write() returning %d\n",start+writtenlen==end?0:ret));
+ return start+writtenlen==end?0:ret;
}
diff --git a/fs/jffs2/fs.c b/fs/jffs2/fs.c
index 5687c3f4200..543420665c5 100644
--- a/fs/jffs2/fs.c
+++ b/fs/jffs2/fs.c
@@ -7,7 +7,7 @@
*
* For licensing information, see the file 'LICENCE' in this directory.
*
- * $Id: fs.c,v 1.56 2005/07/06 12:13:09 dwmw2 Exp $
+ * $Id: fs.c,v 1.66 2005/09/27 13:17:29 dedekind Exp $
*
*/
@@ -40,7 +40,7 @@ static int jffs2_do_setattr (struct inode *inode, struct iattr *iattr)
int ret;
D1(printk(KERN_DEBUG "jffs2_setattr(): ino #%lu\n", inode->i_ino));
ret = inode_change_ok(inode, iattr);
- if (ret)
+ if (ret)
return ret;
/* Special cases - we don't want more than one data node
@@ -73,8 +73,9 @@ static int jffs2_do_setattr (struct inode *inode, struct iattr *iattr)
kfree(mdata);
return -ENOMEM;
}
-
- ret = jffs2_reserve_space(c, sizeof(*ri) + mdatalen, &phys_ofs, &alloclen, ALLOC_NORMAL);
+
+ ret = jffs2_reserve_space(c, sizeof(*ri) + mdatalen, &phys_ofs, &alloclen,
+ ALLOC_NORMAL, JFFS2_SUMMARY_INODE_SIZE);
if (ret) {
jffs2_free_raw_inode(ri);
if (S_ISLNK(inode->i_mode & S_IFMT))
@@ -83,7 +84,7 @@ static int jffs2_do_setattr (struct inode *inode, struct iattr *iattr)
}
down(&f->sem);
ivalid = iattr->ia_valid;
-
+
ri->magic = cpu_to_je16(JFFS2_MAGIC_BITMASK);
ri->nodetype = cpu_to_je16(JFFS2_NODETYPE_INODE);
ri->totlen = cpu_to_je32(sizeof(*ri) + mdatalen);
@@ -99,7 +100,7 @@ static int jffs2_do_setattr (struct inode *inode, struct iattr *iattr)
if (iattr->ia_mode & S_ISGID &&
!in_group_p(je16_to_cpu(ri->gid)) && !capable(CAP_FSETID))
ri->mode = cpu_to_jemode(iattr->ia_mode & ~S_ISGID);
- else
+ else
ri->mode = cpu_to_jemode(iattr->ia_mode);
else
ri->mode = cpu_to_jemode(inode->i_mode);
@@ -128,7 +129,7 @@ static int jffs2_do_setattr (struct inode *inode, struct iattr *iattr)
new_metadata = jffs2_write_dnode(c, f, ri, mdata, mdatalen, phys_ofs, ALLOC_NORMAL);
if (S_ISLNK(inode->i_mode))
kfree(mdata);
-
+
if (IS_ERR(new_metadata)) {
jffs2_complete_reservation(c);
jffs2_free_raw_inode(ri);
@@ -147,7 +148,7 @@ static int jffs2_do_setattr (struct inode *inode, struct iattr *iattr)
old_metadata = f->metadata;
if (ivalid & ATTR_SIZE && inode->i_size > iattr->ia_size)
- jffs2_truncate_fraglist (c, &f->fragtree, iattr->ia_size);
+ jffs2_truncate_fragtree (c, &f->fragtree, iattr->ia_size);
if (ivalid & ATTR_SIZE && inode->i_size < iattr->ia_size) {
jffs2_add_full_dnode_to_inode(c, f, new_metadata);
@@ -166,7 +167,7 @@ static int jffs2_do_setattr (struct inode *inode, struct iattr *iattr)
jffs2_complete_reservation(c);
/* We have to do the vmtruncate() without f->sem held, since
- some pages may be locked and waiting for it in readpage().
+ some pages may be locked and waiting for it in readpage().
We are protected from a simultaneous write() extending i_size
back past iattr->ia_size, because do_truncate() holds the
generic inode semaphore. */
@@ -194,31 +195,27 @@ int jffs2_statfs(struct super_block *sb, struct kstatfs *buf)
buf->f_namelen = JFFS2_MAX_NAME_LEN;
spin_lock(&c->erase_completion_lock);
-
avail = c->dirty_size + c->free_size;
if (avail > c->sector_size * c->resv_blocks_write)
avail -= c->sector_size * c->resv_blocks_write;
else
avail = 0;
+ spin_unlock(&c->erase_completion_lock);
buf->f_bavail = buf->f_bfree = avail >> PAGE_SHIFT;
- D2(jffs2_dump_block_lists(c));
-
- spin_unlock(&c->erase_completion_lock);
-
return 0;
}
void jffs2_clear_inode (struct inode *inode)
{
- /* We can forget about this inode for now - drop all
+ /* We can forget about this inode for now - drop all
* the nodelists associated with it, etc.
*/
struct jffs2_sb_info *c = JFFS2_SB_INFO(inode->i_sb);
struct jffs2_inode_info *f = JFFS2_INODE_INFO(inode);
-
+
D1(printk(KERN_DEBUG "jffs2_clear_inode(): ino #%lu mode %o\n", inode->i_ino, inode->i_mode));
jffs2_do_clear_inode(c, f);
@@ -237,7 +234,7 @@ void jffs2_read_inode (struct inode *inode)
c = JFFS2_SB_INFO(inode->i_sb);
jffs2_init_inode_info(f);
-
+
ret = jffs2_do_read_inode(c, f, inode->i_ino, &latest_node);
if (ret) {
@@ -257,14 +254,14 @@ void jffs2_read_inode (struct inode *inode)
inode->i_blksize = PAGE_SIZE;
inode->i_blocks = (inode->i_size + 511) >> 9;
-
+
switch (inode->i_mode & S_IFMT) {
jint16_t rdev;
case S_IFLNK:
inode->i_op = &jffs2_symlink_inode_operations;
break;
-
+
case S_IFDIR:
{
struct jffs2_full_dirent *fd;
@@ -301,7 +298,7 @@ void jffs2_read_inode (struct inode *inode)
jffs2_do_clear_inode(c, f);
make_bad_inode(inode);
return;
- }
+ }
case S_IFSOCK:
case S_IFIFO:
@@ -357,11 +354,11 @@ int jffs2_remount_fs (struct super_block *sb, int *flags, char *data)
down(&c->alloc_sem);
jffs2_flush_wbuf_pad(c);
up(&c->alloc_sem);
- }
+ }
if (!(*flags & MS_RDONLY))
jffs2_start_garbage_collect_thread(c);
-
+
*flags |= MS_NOATIME;
return 0;
@@ -395,9 +392,9 @@ struct inode *jffs2_new_inode (struct inode *dir_i, int mode, struct jffs2_raw_i
D1(printk(KERN_DEBUG "jffs2_new_inode(): dir_i %ld, mode 0x%x\n", dir_i->i_ino, mode));
c = JFFS2_SB_INFO(sb);
-
+
inode = new_inode(sb);
-
+
if (!inode)
return ERR_PTR(-ENOMEM);
@@ -461,40 +458,24 @@ int jffs2_do_fill_super(struct super_block *sb, void *data, int silent)
#endif
c->flash_size = c->mtd->size;
-
- /*
- * Check, if we have to concatenate physical blocks to larger virtual blocks
- * to reduce the memorysize for c->blocks. (kmalloc allows max. 128K allocation)
- */
- c->sector_size = c->mtd->erasesize;
+ c->sector_size = c->mtd->erasesize;
blocks = c->flash_size / c->sector_size;
- if (!(c->mtd->flags & MTD_NO_VIRTBLOCKS)) {
- while ((blocks * sizeof (struct jffs2_eraseblock)) > (128 * 1024)) {
- blocks >>= 1;
- c->sector_size <<= 1;
- }
- }
/*
* Size alignment check
*/
if ((c->sector_size * blocks) != c->flash_size) {
- c->flash_size = c->sector_size * blocks;
+ c->flash_size = c->sector_size * blocks;
printk(KERN_INFO "jffs2: Flash size not aligned to erasesize, reducing to %dKiB\n",
c->flash_size / 1024);
}
- if (c->sector_size != c->mtd->erasesize)
- printk(KERN_INFO "jffs2: Erase block size too small (%dKiB). Using virtual blocks size (%dKiB) instead\n",
- c->mtd->erasesize / 1024, c->sector_size / 1024);
-
if (c->flash_size < 5*c->sector_size) {
printk(KERN_ERR "jffs2: Too few erase blocks (%d)\n", c->flash_size / c->sector_size);
return -EINVAL;
}
c->cleanmarker_size = sizeof(struct jffs2_unknown_node);
- /* Joern -- stick alignment for weird 8-byte-page flash here */
/* NAND (or other bizarre) flash... do setup accordingly */
ret = jffs2_flash_setup(c);
@@ -517,7 +498,7 @@ int jffs2_do_fill_super(struct super_block *sb, void *data, int silent)
root_i = iget(sb, 1);
if (is_bad_inode(root_i)) {
D1(printk(KERN_WARNING "get root inode failed\n"));
- goto out_nodes;
+ goto out_root_i;
}
D1(printk(KERN_DEBUG "jffs2_do_fill_super(): d_alloc_root()\n"));
@@ -535,10 +516,9 @@ int jffs2_do_fill_super(struct super_block *sb, void *data, int silent)
out_root_i:
iput(root_i);
- out_nodes:
jffs2_free_ino_caches(c);
jffs2_free_raw_node_refs(c);
- if (c->mtd->flags & MTD_NO_VIRTBLOCKS)
+ if (jffs2_blocks_use_vmalloc(c))
vfree(c->blocks);
else
kfree(c->blocks);
@@ -563,16 +543,16 @@ struct jffs2_inode_info *jffs2_gc_fetch_inode(struct jffs2_sb_info *c,
struct jffs2_inode_cache *ic;
if (!nlink) {
/* The inode has zero nlink but its nodes weren't yet marked
- obsolete. This has to be because we're still waiting for
+ obsolete. This has to be because we're still waiting for
the final (close() and) iput() to happen.
- There's a possibility that the final iput() could have
+ There's a possibility that the final iput() could have
happened while we were contemplating. In order to ensure
that we don't cause a new read_inode() (which would fail)
for the inode in question, we use ilookup() in this case
instead of iget().
- The nlink can't _become_ zero at this point because we're
+ The nlink can't _become_ zero at this point because we're
holding the alloc_sem, and jffs2_do_unlink() would also
need that while decrementing nlink on any inode.
*/
@@ -619,19 +599,19 @@ struct jffs2_inode_info *jffs2_gc_fetch_inode(struct jffs2_sb_info *c,
return JFFS2_INODE_INFO(inode);
}
-unsigned char *jffs2_gc_fetch_page(struct jffs2_sb_info *c,
- struct jffs2_inode_info *f,
+unsigned char *jffs2_gc_fetch_page(struct jffs2_sb_info *c,
+ struct jffs2_inode_info *f,
unsigned long offset,
unsigned long *priv)
{
struct inode *inode = OFNI_EDONI_2SFFJ(f);
struct page *pg;
- pg = read_cache_page(inode->i_mapping, offset >> PAGE_CACHE_SHIFT,
+ pg = read_cache_page(inode->i_mapping, offset >> PAGE_CACHE_SHIFT,
(void *)jffs2_do_readpage_unlock, inode);
if (IS_ERR(pg))
return (void *)pg;
-
+
*priv = (unsigned long)pg;
return kmap(pg);
}
@@ -648,7 +628,7 @@ void jffs2_gc_release_page(struct jffs2_sb_info *c,
static int jffs2_flash_setup(struct jffs2_sb_info *c) {
int ret = 0;
-
+
if (jffs2_cleanmarker_oob(c)) {
/* NAND flash... do setup accordingly */
ret = jffs2_nand_flash_setup(c);
@@ -662,14 +642,21 @@ static int jffs2_flash_setup(struct jffs2_sb_info *c) {
if (ret)
return ret;
}
-
+
/* and Dataflash */
if (jffs2_dataflash(c)) {
ret = jffs2_dataflash_setup(c);
if (ret)
return ret;
}
-
+
+ /* and Intel "Sibley" flash */
+ if (jffs2_nor_wbuf_flash(c)) {
+ ret = jffs2_nor_wbuf_flash_setup(c);
+ if (ret)
+ return ret;
+ }
+
return ret;
}
@@ -683,9 +670,14 @@ void jffs2_flash_cleanup(struct jffs2_sb_info *c) {
if (jffs2_nor_ecc(c)) {
jffs2_nor_ecc_flash_cleanup(c);
}
-
+
/* and DataFlash */
if (jffs2_dataflash(c)) {
jffs2_dataflash_cleanup(c);
}
+
+ /* and Intel "Sibley" flash */
+ if (jffs2_nor_wbuf_flash(c)) {
+ jffs2_nor_wbuf_flash_cleanup(c);
+ }
}
diff --git a/fs/jffs2/gc.c b/fs/jffs2/gc.c
index 7086cd63450..f9ffece453a 100644
--- a/fs/jffs2/gc.c
+++ b/fs/jffs2/gc.c
@@ -7,7 +7,7 @@
*
* For licensing information, see the file 'LICENCE' in this directory.
*
- * $Id: gc.c,v 1.148 2005/04/09 10:47:00 dedekind Exp $
+ * $Id: gc.c,v 1.155 2005/11/07 11:14:39 gleixner Exp $
*
*/
@@ -21,14 +21,14 @@
#include "nodelist.h"
#include "compr.h"
-static int jffs2_garbage_collect_pristine(struct jffs2_sb_info *c,
+static int jffs2_garbage_collect_pristine(struct jffs2_sb_info *c,
struct jffs2_inode_cache *ic,
struct jffs2_raw_node_ref *raw);
-static int jffs2_garbage_collect_metadata(struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb,
+static int jffs2_garbage_collect_metadata(struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb,
struct jffs2_inode_info *f, struct jffs2_full_dnode *fd);
-static int jffs2_garbage_collect_dirent(struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb,
+static int jffs2_garbage_collect_dirent(struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb,
struct jffs2_inode_info *f, struct jffs2_full_dirent *fd);
-static int jffs2_garbage_collect_deletion_dirent(struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb,
+static int jffs2_garbage_collect_deletion_dirent(struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb,
struct jffs2_inode_info *f, struct jffs2_full_dirent *fd);
static int jffs2_garbage_collect_hole(struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb,
struct jffs2_inode_info *f, struct jffs2_full_dnode *fn,
@@ -55,7 +55,7 @@ again:
D1(printk(KERN_DEBUG "Picking block from bad_used_list to GC next\n"));
nextlist = &c->bad_used_list;
} else if (n < 50 && !list_empty(&c->erasable_list)) {
- /* Note that most of them will have gone directly to be erased.
+ /* Note that most of them will have gone directly to be erased.
So don't favour the erasable_list _too_ much. */
D1(printk(KERN_DEBUG "Picking block from erasable_list to GC next\n"));
nextlist = &c->erasable_list;
@@ -101,7 +101,7 @@ again:
printk(KERN_WARNING "Eep. ret->gc_node for block at 0x%08x is NULL\n", ret->offset);
BUG();
}
-
+
/* Have we accidentally picked a clean block with wasted space ? */
if (ret->wasted_size) {
D1(printk(KERN_DEBUG "Converting wasted_size %08x to dirty_size\n", ret->wasted_size));
@@ -111,7 +111,6 @@ again:
ret->wasted_size = 0;
}
- D2(jffs2_dump_block_lists(c));
return ret;
}
@@ -137,12 +136,12 @@ int jffs2_garbage_collect_pass(struct jffs2_sb_info *c)
/* We can't start doing GC yet. We haven't finished checking
the node CRCs etc. Do it now. */
-
+
/* checked_ino is protected by the alloc_sem */
if (c->checked_ino > c->highest_ino) {
printk(KERN_CRIT "Checked all inodes but still 0x%x bytes of unchecked space?\n",
c->unchecked_size);
- D2(jffs2_dump_block_lists(c));
+ jffs2_dbg_dump_block_lists_nolock(c);
spin_unlock(&c->erase_completion_lock);
BUG();
}
@@ -179,7 +178,7 @@ int jffs2_garbage_collect_pass(struct jffs2_sb_info *c)
case INO_STATE_READING:
/* We need to wait for it to finish, lest we move on
- and trigger the BUG() above while we haven't yet
+ and trigger the BUG() above while we haven't yet
finished checking all its nodes */
D1(printk(KERN_DEBUG "Waiting for ino #%u to finish reading\n", ic->ino));
up(&c->alloc_sem);
@@ -229,13 +228,13 @@ int jffs2_garbage_collect_pass(struct jffs2_sb_info *c)
}
raw = jeb->gc_node;
-
+
while(ref_obsolete(raw)) {
D1(printk(KERN_DEBUG "Node at 0x%08x is obsolete... skipping\n", ref_offset(raw)));
raw = raw->next_phys;
if (unlikely(!raw)) {
printk(KERN_WARNING "eep. End of raw list while still supposedly nodes to GC\n");
- printk(KERN_WARNING "erase block at 0x%08x. free_size 0x%08x, dirty_size 0x%08x, used_size 0x%08x\n",
+ printk(KERN_WARNING "erase block at 0x%08x. free_size 0x%08x, dirty_size 0x%08x, used_size 0x%08x\n",
jeb->offset, jeb->free_size, jeb->dirty_size, jeb->used_size);
jeb->gc_node = raw;
spin_unlock(&c->erase_completion_lock);
@@ -260,7 +259,7 @@ int jffs2_garbage_collect_pass(struct jffs2_sb_info *c)
ic = jffs2_raw_ref_to_ic(raw);
/* We need to hold the inocache. Either the erase_completion_lock or
- the inocache_lock are sufficient; we trade down since the inocache_lock
+ the inocache_lock are sufficient; we trade down since the inocache_lock
causes less contention. */
spin_lock(&c->inocache_lock);
@@ -279,14 +278,14 @@ int jffs2_garbage_collect_pass(struct jffs2_sb_info *c)
switch(ic->state) {
case INO_STATE_CHECKEDABSENT:
- /* It's been checked, but it's not currently in-core.
+ /* It's been checked, but it's not currently in-core.
We can just copy any pristine nodes, but have
to prevent anyone else from doing read_inode() while
we're at it, so we set the state accordingly */
if (ref_flags(raw) == REF_PRISTINE)
ic->state = INO_STATE_GC;
else {
- D1(printk(KERN_DEBUG "Ino #%u is absent but node not REF_PRISTINE. Reading.\n",
+ D1(printk(KERN_DEBUG "Ino #%u is absent but node not REF_PRISTINE. Reading.\n",
ic->ino));
}
break;
@@ -299,8 +298,8 @@ int jffs2_garbage_collect_pass(struct jffs2_sb_info *c)
case INO_STATE_CHECKING:
case INO_STATE_GC:
/* Should never happen. We should have finished checking
- by the time we actually start doing any GC, and since
- we're holding the alloc_sem, no other garbage collection
+ by the time we actually start doing any GC, and since
+ we're holding the alloc_sem, no other garbage collection
can happen.
*/
printk(KERN_CRIT "Inode #%u already in state %d in jffs2_garbage_collect_pass()!\n",
@@ -320,21 +319,21 @@ int jffs2_garbage_collect_pass(struct jffs2_sb_info *c)
D1(printk(KERN_DEBUG "jffs2_garbage_collect_pass() waiting for ino #%u in state %d\n",
ic->ino, ic->state));
sleep_on_spinunlock(&c->inocache_wq, &c->inocache_lock);
- /* And because we dropped the alloc_sem we must start again from the
+ /* And because we dropped the alloc_sem we must start again from the
beginning. Ponder chance of livelock here -- we're returning success
without actually making any progress.
- Q: What are the chances that the inode is back in INO_STATE_READING
+ Q: What are the chances that the inode is back in INO_STATE_READING
again by the time we next enter this function? And that this happens
enough times to cause a real delay?
- A: Small enough that I don't care :)
+ A: Small enough that I don't care :)
*/
return 0;
}
/* OK. Now if the inode is in state INO_STATE_GC, we are going to copy the
- node intact, and we don't have to muck about with the fragtree etc.
+ node intact, and we don't have to muck about with the fragtree etc.
because we know it's not in-core. If it _was_ in-core, we go through
all the iget() crap anyway */
@@ -454,7 +453,7 @@ static int jffs2_garbage_collect_live(struct jffs2_sb_info *c, struct jffs2_era
if (!ret) {
/* Urgh. Return it sensibly. */
frag->node->raw = f->inocache->nodes;
- }
+ }
if (ret != -EBADFD)
goto upnout;
}
@@ -468,7 +467,7 @@ static int jffs2_garbage_collect_live(struct jffs2_sb_info *c, struct jffs2_era
}
goto upnout;
}
-
+
/* Wasn't a dnode. Try dirent */
for (fd = f->dents; fd; fd=fd->next) {
if (fd->raw == raw)
@@ -485,7 +484,8 @@ static int jffs2_garbage_collect_live(struct jffs2_sb_info *c, struct jffs2_era
if (ref_obsolete(raw)) {
printk(KERN_WARNING "But it's obsolete so we don't mind too much\n");
} else {
- ret = -EIO;
+ jffs2_dbg_dump_node(c, ref_offset(raw));
+ BUG();
}
}
upnout:
@@ -494,7 +494,7 @@ static int jffs2_garbage_collect_live(struct jffs2_sb_info *c, struct jffs2_era
return ret;
}
-static int jffs2_garbage_collect_pristine(struct jffs2_sb_info *c,
+static int jffs2_garbage_collect_pristine(struct jffs2_sb_info *c,
struct jffs2_inode_cache *ic,
struct jffs2_raw_node_ref *raw)
{
@@ -513,8 +513,11 @@ static int jffs2_garbage_collect_pristine(struct jffs2_sb_info *c,
/* Ask for a small amount of space (or the totlen if smaller) because we
don't want to force wastage of the end of a block if splitting would
work. */
- ret = jffs2_reserve_space_gc(c, min_t(uint32_t, sizeof(struct jffs2_raw_inode) + JFFS2_MIN_DATA_LEN,
- rawlen), &phys_ofs, &alloclen);
+ ret = jffs2_reserve_space_gc(c, min_t(uint32_t, sizeof(struct jffs2_raw_inode) +
+ JFFS2_MIN_DATA_LEN, rawlen), &phys_ofs, &alloclen, rawlen);
+ /* this is not the exact summary size of it,
+ it is only an upper estimation */
+
if (ret)
return ret;
@@ -577,7 +580,7 @@ static int jffs2_garbage_collect_pristine(struct jffs2_sb_info *c,
}
break;
default:
- printk(KERN_WARNING "Unknown node type for REF_PRISTINE node at 0x%08x: 0x%04x\n",
+ printk(KERN_WARNING "Unknown node type for REF_PRISTINE node at 0x%08x: 0x%04x\n",
ref_offset(raw), je16_to_cpu(node->u.nodetype));
goto bail;
}
@@ -618,17 +621,19 @@ static int jffs2_garbage_collect_pristine(struct jffs2_sb_info *c,
retried = 1;
D1(printk(KERN_DEBUG "Retrying failed write of REF_PRISTINE node.\n"));
-
- ACCT_SANITY_CHECK(c,jeb);
- D1(ACCT_PARANOIA_CHECK(jeb));
- ret = jffs2_reserve_space_gc(c, rawlen, &phys_ofs, &dummy);
+ jffs2_dbg_acct_sanity_check(c,jeb);
+ jffs2_dbg_acct_paranoia_check(c, jeb);
+
+ ret = jffs2_reserve_space_gc(c, rawlen, &phys_ofs, &dummy, rawlen);
+ /* this is not the exact summary size of it,
+ it is only an upper estimation */
if (!ret) {
D1(printk(KERN_DEBUG "Allocated space at 0x%08x to retry failed write.\n", phys_ofs));
- ACCT_SANITY_CHECK(c,jeb);
- D1(ACCT_PARANOIA_CHECK(jeb));
+ jffs2_dbg_acct_sanity_check(c,jeb);
+ jffs2_dbg_acct_paranoia_check(c, jeb);
goto retry;
}
@@ -664,7 +669,7 @@ static int jffs2_garbage_collect_pristine(struct jffs2_sb_info *c,
goto out_node;
}
-static int jffs2_garbage_collect_metadata(struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb,
+static int jffs2_garbage_collect_metadata(struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb,
struct jffs2_inode_info *f, struct jffs2_full_dnode *fn)
{
struct jffs2_full_dnode *new_fn;
@@ -679,7 +684,7 @@ static int jffs2_garbage_collect_metadata(struct jffs2_sb_info *c, struct jffs2_
S_ISCHR(JFFS2_F_I_MODE(f)) ) {
/* For these, we don't actually need to read the old node */
/* FIXME: for minor or major > 255. */
- dev = cpu_to_je16(((JFFS2_F_I_RDEV_MAJ(f) << 8) |
+ dev = cpu_to_je16(((JFFS2_F_I_RDEV_MAJ(f) << 8) |
JFFS2_F_I_RDEV_MIN(f)));
mdata = (char *)&dev;
mdatalen = sizeof(dev);
@@ -700,14 +705,15 @@ static int jffs2_garbage_collect_metadata(struct jffs2_sb_info *c, struct jffs2_
D1(printk(KERN_DEBUG "jffs2_garbage_collect_metadata(): Writing %d bites of symlink target\n", mdatalen));
}
-
- ret = jffs2_reserve_space_gc(c, sizeof(ri) + mdatalen, &phys_ofs, &alloclen);
+
+ ret = jffs2_reserve_space_gc(c, sizeof(ri) + mdatalen, &phys_ofs, &alloclen,
+ JFFS2_SUMMARY_INODE_SIZE);
if (ret) {
printk(KERN_WARNING "jffs2_reserve_space_gc of %zd bytes for garbage_collect_metadata failed: %d\n",
sizeof(ri)+ mdatalen, ret);
goto out;
}
-
+
last_frag = frag_last(&f->fragtree);
if (last_frag)
/* Fetch the inode length from the fragtree rather then
@@ -715,7 +721,7 @@ static int jffs2_garbage_collect_metadata(struct jffs2_sb_info *c, struct jffs2_
ilen = last_frag->ofs + last_frag->size;
else
ilen = JFFS2_F_I_SIZE(f);
-
+
memset(&ri, 0, sizeof(ri));
ri.magic = cpu_to_je16(JFFS2_MAGIC_BITMASK);
ri.nodetype = cpu_to_je16(JFFS2_NODETYPE_INODE);
@@ -754,7 +760,7 @@ static int jffs2_garbage_collect_metadata(struct jffs2_sb_info *c, struct jffs2_
return ret;
}
-static int jffs2_garbage_collect_dirent(struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb,
+static int jffs2_garbage_collect_dirent(struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb,
struct jffs2_inode_info *f, struct jffs2_full_dirent *fd)
{
struct jffs2_full_dirent *new_fd;
@@ -771,12 +777,18 @@ static int jffs2_garbage_collect_dirent(struct jffs2_sb_info *c, struct jffs2_er
rd.pino = cpu_to_je32(f->inocache->ino);
rd.version = cpu_to_je32(++f->highest_version);
rd.ino = cpu_to_je32(fd->ino);
- rd.mctime = cpu_to_je32(max(JFFS2_F_I_MTIME(f), JFFS2_F_I_CTIME(f)));
+ /* If the times on this inode were set by explicit utime() they can be different,
+ so refrain from splatting them. */
+ if (JFFS2_F_I_MTIME(f) == JFFS2_F_I_CTIME(f))
+ rd.mctime = cpu_to_je32(JFFS2_F_I_MTIME(f));
+ else
+ rd.mctime = cpu_to_je32(0);
rd.type = fd->type;
rd.node_crc = cpu_to_je32(crc32(0, &rd, sizeof(rd)-8));
rd.name_crc = cpu_to_je32(crc32(0, fd->name, rd.nsize));
-
- ret = jffs2_reserve_space_gc(c, sizeof(rd)+rd.nsize, &phys_ofs, &alloclen);
+
+ ret = jffs2_reserve_space_gc(c, sizeof(rd)+rd.nsize, &phys_ofs, &alloclen,
+ JFFS2_SUMMARY_DIRENT_SIZE(rd.nsize));
if (ret) {
printk(KERN_WARNING "jffs2_reserve_space_gc of %zd bytes for garbage_collect_dirent failed: %d\n",
sizeof(rd)+rd.nsize, ret);
@@ -792,7 +804,7 @@ static int jffs2_garbage_collect_dirent(struct jffs2_sb_info *c, struct jffs2_er
return 0;
}
-static int jffs2_garbage_collect_deletion_dirent(struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb,
+static int jffs2_garbage_collect_deletion_dirent(struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb,
struct jffs2_inode_info *f, struct jffs2_full_dirent *fd)
{
struct jffs2_full_dirent **fdp = &f->dents;
@@ -831,7 +843,7 @@ static int jffs2_garbage_collect_deletion_dirent(struct jffs2_sb_info *c, struct
if (ref_totlen(c, NULL, raw) != rawlen)
continue;
- /* Doesn't matter if there's one in the same erase block. We're going to
+ /* Doesn't matter if there's one in the same erase block. We're going to
delete it too at the same time. */
if (SECTOR_ADDR(raw->flash_offset) == SECTOR_ADDR(fd->raw->flash_offset))
continue;
@@ -883,6 +895,9 @@ static int jffs2_garbage_collect_deletion_dirent(struct jffs2_sb_info *c, struct
kfree(rd);
}
+ /* FIXME: If we're deleting a dirent which contains the current mtime and ctime,
+ we should update the metadata node with those times accordingly */
+
/* No need for it any more. Just mark it obsolete and remove it from the list */
while (*fdp) {
if ((*fdp) == fd) {
@@ -912,13 +927,13 @@ static int jffs2_garbage_collect_hole(struct jffs2_sb_info *c, struct jffs2_eras
D1(printk(KERN_DEBUG "Writing replacement hole node for ino #%u from offset 0x%x to 0x%x\n",
f->inocache->ino, start, end));
-
+
memset(&ri, 0, sizeof(ri));
if(fn->frags > 1) {
size_t readlen;
uint32_t crc;
- /* It's partially obsoleted by a later write. So we have to
+ /* It's partially obsoleted by a later write. So we have to
write it out again with the _same_ version as before */
ret = jffs2_flash_read(c, ref_offset(fn->raw), sizeof(ri), &readlen, (char *)&ri);
if (readlen != sizeof(ri) || ret) {
@@ -940,16 +955,16 @@ static int jffs2_garbage_collect_hole(struct jffs2_sb_info *c, struct jffs2_eras
crc = crc32(0, &ri, sizeof(ri)-8);
if (crc != je32_to_cpu(ri.node_crc)) {
printk(KERN_WARNING "jffs2_garbage_collect_hole: Node at 0x%08x had CRC 0x%08x which doesn't match calculated CRC 0x%08x\n",
- ref_offset(fn->raw),
+ ref_offset(fn->raw),
je32_to_cpu(ri.node_crc), crc);
/* FIXME: We could possibly deal with this by writing new holes for each frag */
- printk(KERN_WARNING "Data in the range 0x%08x to 0x%08x of inode #%u will be lost\n",
+ printk(KERN_WARNING "Data in the range 0x%08x to 0x%08x of inode #%u will be lost\n",
start, end, f->inocache->ino);
goto fill;
}
if (ri.compr != JFFS2_COMPR_ZERO) {
printk(KERN_WARNING "jffs2_garbage_collect_hole: Node 0x%08x wasn't a hole node!\n", ref_offset(fn->raw));
- printk(KERN_WARNING "Data in the range 0x%08x to 0x%08x of inode #%u will be lost\n",
+ printk(KERN_WARNING "Data in the range 0x%08x to 0x%08x of inode #%u will be lost\n",
start, end, f->inocache->ino);
goto fill;
}
@@ -967,7 +982,7 @@ static int jffs2_garbage_collect_hole(struct jffs2_sb_info *c, struct jffs2_eras
ri.csize = cpu_to_je32(0);
ri.compr = JFFS2_COMPR_ZERO;
}
-
+
frag = frag_last(&f->fragtree);
if (frag)
/* Fetch the inode length from the fragtree rather then
@@ -986,7 +1001,8 @@ static int jffs2_garbage_collect_hole(struct jffs2_sb_info *c, struct jffs2_eras
ri.data_crc = cpu_to_je32(0);
ri.node_crc = cpu_to_je32(crc32(0, &ri, sizeof(ri)-8));
- ret = jffs2_reserve_space_gc(c, sizeof(ri), &phys_ofs, &alloclen);
+ ret = jffs2_reserve_space_gc(c, sizeof(ri), &phys_ofs, &alloclen,
+ JFFS2_SUMMARY_INODE_SIZE);
if (ret) {
printk(KERN_WARNING "jffs2_reserve_space_gc of %zd bytes for garbage_collect_hole failed: %d\n",
sizeof(ri), ret);
@@ -1008,10 +1024,10 @@ static int jffs2_garbage_collect_hole(struct jffs2_sb_info *c, struct jffs2_eras
return 0;
}
- /*
+ /*
* We should only get here in the case where the node we are
* replacing had more than one frag, so we kept the same version
- * number as before. (Except in case of error -- see 'goto fill;'
+ * number as before. (Except in case of error -- see 'goto fill;'
* above.)
*/
D1(if(unlikely(fn->frags <= 1)) {
@@ -1023,7 +1039,7 @@ static int jffs2_garbage_collect_hole(struct jffs2_sb_info *c, struct jffs2_eras
/* This is a partially-overlapped hole node. Mark it REF_NORMAL not REF_PRISTINE */
mark_ref_normal(new_fn->raw);
- for (frag = jffs2_lookup_node_frag(&f->fragtree, fn->ofs);
+ for (frag = jffs2_lookup_node_frag(&f->fragtree, fn->ofs);
frag; frag = frag_next(frag)) {
if (frag->ofs > fn->size + fn->ofs)
break;
@@ -1041,10 +1057,10 @@ static int jffs2_garbage_collect_hole(struct jffs2_sb_info *c, struct jffs2_eras
printk(KERN_WARNING "jffs2_garbage_collect_hole: New node has no frags!\n");
BUG();
}
-
+
jffs2_mark_node_obsolete(c, fn->raw);
jffs2_free_full_dnode(fn);
-
+
return 0;
}
@@ -1054,12 +1070,12 @@ static int jffs2_garbage_collect_dnode(struct jffs2_sb_info *c, struct jffs2_era
{
struct jffs2_full_dnode *new_fn;
struct jffs2_raw_inode ri;
- uint32_t alloclen, phys_ofs, offset, orig_end, orig_start;
+ uint32_t alloclen, phys_ofs, offset, orig_end, orig_start;
int ret = 0;
unsigned char *comprbuf = NULL, *writebuf;
unsigned long pg;
unsigned char *pg_ptr;
-
+
memset(&ri, 0, sizeof(ri));
D1(printk(KERN_DEBUG "Writing replacement dnode for ino #%u from offset 0x%x to 0x%x\n",
@@ -1071,8 +1087,8 @@ static int jffs2_garbage_collect_dnode(struct jffs2_sb_info *c, struct jffs2_era
if (c->nr_free_blocks + c->nr_erasing_blocks > c->resv_blocks_gcmerge) {
/* Attempt to do some merging. But only expand to cover logically
adjacent frags if the block containing them is already considered
- to be dirty. Otherwise we end up with GC just going round in
- circles dirtying the nodes it already wrote out, especially
+ to be dirty. Otherwise we end up with GC just going round in
+ circles dirtying the nodes it already wrote out, especially
on NAND where we have small eraseblocks and hence a much higher
chance of nodes having to be split to cross boundaries. */
@@ -1106,7 +1122,7 @@ static int jffs2_garbage_collect_dnode(struct jffs2_sb_info *c, struct jffs2_era
break;
} else {
- /* OK, it's a frag which extends to the beginning of the page. Does it live
+ /* OK, it's a frag which extends to the beginning of the page. Does it live
in a block which is still considered clean? If so, don't obsolete it.
If not, cover it anyway. */
@@ -1156,7 +1172,7 @@ static int jffs2_garbage_collect_dnode(struct jffs2_sb_info *c, struct jffs2_era
break;
} else {
- /* OK, it's a frag which extends to the beginning of the page. Does it live
+ /* OK, it's a frag which extends to the beginning of the page. Does it live
in a block which is still considered clean? If so, don't obsolete it.
If not, cover it anyway. */
@@ -1183,14 +1199,14 @@ static int jffs2_garbage_collect_dnode(struct jffs2_sb_info *c, struct jffs2_era
break;
}
}
- D1(printk(KERN_DEBUG "Expanded dnode to write from (0x%x-0x%x) to (0x%x-0x%x)\n",
+ D1(printk(KERN_DEBUG "Expanded dnode to write from (0x%x-0x%x) to (0x%x-0x%x)\n",
orig_start, orig_end, start, end));
D1(BUG_ON(end > frag_last(&f->fragtree)->ofs + frag_last(&f->fragtree)->size));
BUG_ON(end < orig_end);
BUG_ON(start > orig_start);
}
-
+
/* First, use readpage() to read the appropriate page into the page cache */
/* Q: What happens if we actually try to GC the _same_ page for which commit_write()
* triggered garbage collection in the first place?
@@ -1211,7 +1227,8 @@ static int jffs2_garbage_collect_dnode(struct jffs2_sb_info *c, struct jffs2_era
uint32_t cdatalen;
uint16_t comprtype = JFFS2_COMPR_NONE;
- ret = jffs2_reserve_space_gc(c, sizeof(ri) + JFFS2_MIN_DATA_LEN, &phys_ofs, &alloclen);
+ ret = jffs2_reserve_space_gc(c, sizeof(ri) + JFFS2_MIN_DATA_LEN, &phys_ofs,
+ &alloclen, JFFS2_SUMMARY_INODE_SIZE);
if (ret) {
printk(KERN_WARNING "jffs2_reserve_space_gc of %zd bytes for garbage_collect_dnode failed: %d\n",
@@ -1246,7 +1263,7 @@ static int jffs2_garbage_collect_dnode(struct jffs2_sb_info *c, struct jffs2_era
ri.usercompr = (comprtype >> 8) & 0xff;
ri.node_crc = cpu_to_je32(crc32(0, &ri, sizeof(ri)-8));
ri.data_crc = cpu_to_je32(crc32(0, comprbuf, cdatalen));
-
+
new_fn = jffs2_write_dnode(c, f, &ri, comprbuf, cdatalen, phys_ofs, ALLOC_GC);
jffs2_free_comprbuf(comprbuf, writebuf);
@@ -1268,4 +1285,3 @@ static int jffs2_garbage_collect_dnode(struct jffs2_sb_info *c, struct jffs2_era
jffs2_gc_release_page(c, pg_ptr, &pg);
return ret;
}
-
diff --git a/fs/jffs2/histo.h b/fs/jffs2/histo.h
index 84f184f0836..22a93a08210 100644
--- a/fs/jffs2/histo.h
+++ b/fs/jffs2/histo.h
@@ -1,3 +1,3 @@
/* This file provides the bit-probabilities for the input file */
-#define BIT_DIVIDER 629
+#define BIT_DIVIDER 629
static int bits[9] = { 179,167,183,165,159,198,178,119,}; /* ia32 .so files */
diff --git a/fs/jffs2/histo_mips.h b/fs/jffs2/histo_mips.h
index 9a443268d88..fa3dac19a10 100644
--- a/fs/jffs2/histo_mips.h
+++ b/fs/jffs2/histo_mips.h
@@ -1,2 +1,2 @@
-#define BIT_DIVIDER_MIPS 1043
+#define BIT_DIVIDER_MIPS 1043
static int bits_mips[8] = { 277,249,290,267,229,341,212,241}; /* mips32 */
diff --git a/fs/jffs2/ioctl.c b/fs/jffs2/ioctl.c
index 238c7992064..69099835de1 100644
--- a/fs/jffs2/ioctl.c
+++ b/fs/jffs2/ioctl.c
@@ -7,17 +7,17 @@
*
* For licensing information, see the file 'LICENCE' in this directory.
*
- * $Id: ioctl.c,v 1.9 2004/11/16 20:36:11 dwmw2 Exp $
+ * $Id: ioctl.c,v 1.10 2005/11/07 11:14:40 gleixner Exp $
*
*/
#include <linux/fs.h>
-int jffs2_ioctl(struct inode *inode, struct file *filp, unsigned int cmd,
+int jffs2_ioctl(struct inode *inode, struct file *filp, unsigned int cmd,
unsigned long arg)
{
/* Later, this will provide for lsattr.jffs2 and chattr.jffs2, which
will include compression support etc. */
return -ENOTTY;
}
-
+
diff --git a/fs/jffs2/malloc.c b/fs/jffs2/malloc.c
index 5abb431c2a0..036cbd11c00 100644
--- a/fs/jffs2/malloc.c
+++ b/fs/jffs2/malloc.c
@@ -7,7 +7,7 @@
*
* For licensing information, see the file 'LICENCE' in this directory.
*
- * $Id: malloc.c,v 1.28 2004/11/16 20:36:11 dwmw2 Exp $
+ * $Id: malloc.c,v 1.31 2005/11/07 11:14:40 gleixner Exp $
*
*/
@@ -17,15 +17,6 @@
#include <linux/jffs2.h>
#include "nodelist.h"
-#if 0
-#define JFFS2_SLAB_POISON SLAB_POISON
-#else
-#define JFFS2_SLAB_POISON 0
-#endif
-
-// replace this by #define D3 (x) x for cache debugging
-#define D3(x)
-
/* These are initialised to NULL in the kernel startup code.
If you're porting to other operating systems, beware */
static kmem_cache_t *full_dnode_slab;
@@ -38,45 +29,45 @@ static kmem_cache_t *inode_cache_slab;
int __init jffs2_create_slab_caches(void)
{
- full_dnode_slab = kmem_cache_create("jffs2_full_dnode",
+ full_dnode_slab = kmem_cache_create("jffs2_full_dnode",
sizeof(struct jffs2_full_dnode),
- 0, JFFS2_SLAB_POISON, NULL, NULL);
+ 0, 0, NULL, NULL);
if (!full_dnode_slab)
goto err;
raw_dirent_slab = kmem_cache_create("jffs2_raw_dirent",
sizeof(struct jffs2_raw_dirent),
- 0, JFFS2_SLAB_POISON, NULL, NULL);
+ 0, 0, NULL, NULL);
if (!raw_dirent_slab)
goto err;
raw_inode_slab = kmem_cache_create("jffs2_raw_inode",
sizeof(struct jffs2_raw_inode),
- 0, JFFS2_SLAB_POISON, NULL, NULL);
+ 0, 0, NULL, NULL);
if (!raw_inode_slab)
goto err;
tmp_dnode_info_slab = kmem_cache_create("jffs2_tmp_dnode",
sizeof(struct jffs2_tmp_dnode_info),
- 0, JFFS2_SLAB_POISON, NULL, NULL);
+ 0, 0, NULL, NULL);
if (!tmp_dnode_info_slab)
goto err;
raw_node_ref_slab = kmem_cache_create("jffs2_raw_node_ref",
sizeof(struct jffs2_raw_node_ref),
- 0, JFFS2_SLAB_POISON, NULL, NULL);
+ 0, 0, NULL, NULL);
if (!raw_node_ref_slab)
goto err;
node_frag_slab = kmem_cache_create("jffs2_node_frag",
sizeof(struct jffs2_node_frag),
- 0, JFFS2_SLAB_POISON, NULL, NULL);
+ 0, 0, NULL, NULL);
if (!node_frag_slab)
goto err;
inode_cache_slab = kmem_cache_create("jffs2_inode_cache",
sizeof(struct jffs2_inode_cache),
- 0, JFFS2_SLAB_POISON, NULL, NULL);
+ 0, 0, NULL, NULL);
if (inode_cache_slab)
return 0;
err:
@@ -104,102 +95,113 @@ void jffs2_destroy_slab_caches(void)
struct jffs2_full_dirent *jffs2_alloc_full_dirent(int namesize)
{
- return kmalloc(sizeof(struct jffs2_full_dirent) + namesize, GFP_KERNEL);
+ struct jffs2_full_dirent *ret;
+ ret = kmalloc(sizeof(struct jffs2_full_dirent) + namesize, GFP_KERNEL);
+ dbg_memalloc("%p\n", ret);
+ return ret;
}
void jffs2_free_full_dirent(struct jffs2_full_dirent *x)
{
+ dbg_memalloc("%p\n", x);
kfree(x);
}
struct jffs2_full_dnode *jffs2_alloc_full_dnode(void)
{
- struct jffs2_full_dnode *ret = kmem_cache_alloc(full_dnode_slab, GFP_KERNEL);
- D3 (printk (KERN_DEBUG "alloc_full_dnode at %p\n", ret));
+ struct jffs2_full_dnode *ret;
+ ret = kmem_cache_alloc(full_dnode_slab, GFP_KERNEL);
+ dbg_memalloc("%p\n", ret);
return ret;
}
void jffs2_free_full_dnode(struct jffs2_full_dnode *x)
{
- D3 (printk (KERN_DEBUG "free full_dnode at %p\n", x));
+ dbg_memalloc("%p\n", x);
kmem_cache_free(full_dnode_slab, x);
}
struct jffs2_raw_dirent *jffs2_alloc_raw_dirent(void)
{
- struct jffs2_raw_dirent *ret = kmem_cache_alloc(raw_dirent_slab, GFP_KERNEL);
- D3 (printk (KERN_DEBUG "alloc_raw_dirent\n", ret));
+ struct jffs2_raw_dirent *ret;
+ ret = kmem_cache_alloc(raw_dirent_slab, GFP_KERNEL);
+ dbg_memalloc("%p\n", ret);
return ret;
}
void jffs2_free_raw_dirent(struct jffs2_raw_dirent *x)
{
- D3 (printk (KERN_DEBUG "free_raw_dirent at %p\n", x));
+ dbg_memalloc("%p\n", x);
kmem_cache_free(raw_dirent_slab, x);
}
struct jffs2_raw_inode *jffs2_alloc_raw_inode(void)
{
- struct jffs2_raw_inode *ret = kmem_cache_alloc(raw_inode_slab, GFP_KERNEL);
- D3 (printk (KERN_DEBUG "alloc_raw_inode at %p\n", ret));
+ struct jffs2_raw_inode *ret;
+ ret = kmem_cache_alloc(raw_inode_slab, GFP_KERNEL);
+ dbg_memalloc("%p\n", ret);
return ret;
}
void jffs2_free_raw_inode(struct jffs2_raw_inode *x)
{
- D3 (printk (KERN_DEBUG "free_raw_inode at %p\n", x));
+ dbg_memalloc("%p\n", x);
kmem_cache_free(raw_inode_slab, x);
}
struct jffs2_tmp_dnode_info *jffs2_alloc_tmp_dnode_info(void)
{
- struct jffs2_tmp_dnode_info *ret = kmem_cache_alloc(tmp_dnode_info_slab, GFP_KERNEL);
- D3 (printk (KERN_DEBUG "alloc_tmp_dnode_info at %p\n", ret));
+ struct jffs2_tmp_dnode_info *ret;
+ ret = kmem_cache_alloc(tmp_dnode_info_slab, GFP_KERNEL);
+ dbg_memalloc("%p\n",
+ ret);
return ret;
}
void jffs2_free_tmp_dnode_info(struct jffs2_tmp_dnode_info *x)
{
- D3 (printk (KERN_DEBUG "free_tmp_dnode_info at %p\n", x));
+ dbg_memalloc("%p\n", x);
kmem_cache_free(tmp_dnode_info_slab, x);
}
struct jffs2_raw_node_ref *jffs2_alloc_raw_node_ref(void)
{
- struct jffs2_raw_node_ref *ret = kmem_cache_alloc(raw_node_ref_slab, GFP_KERNEL);
- D3 (printk (KERN_DEBUG "alloc_raw_node_ref at %p\n", ret));
+ struct jffs2_raw_node_ref *ret;
+ ret = kmem_cache_alloc(raw_node_ref_slab, GFP_KERNEL);
+ dbg_memalloc("%p\n", ret);
return ret;
}
void jffs2_free_raw_node_ref(struct jffs2_raw_node_ref *x)
{
- D3 (printk (KERN_DEBUG "free_raw_node_ref at %p\n", x));
+ dbg_memalloc("%p\n", x);
kmem_cache_free(raw_node_ref_slab, x);
}
struct jffs2_node_frag *jffs2_alloc_node_frag(void)
{
- struct jffs2_node_frag *ret = kmem_cache_alloc(node_frag_slab, GFP_KERNEL);
- D3 (printk (KERN_DEBUG "alloc_node_frag at %p\n", ret));
+ struct jffs2_node_frag *ret;
+ ret = kmem_cache_alloc(node_frag_slab, GFP_KERNEL);
+ dbg_memalloc("%p\n", ret);
return ret;
}
void jffs2_free_node_frag(struct jffs2_node_frag *x)
{
- D3 (printk (KERN_DEBUG "free_node_frag at %p\n", x));
+ dbg_memalloc("%p\n", x);
kmem_cache_free(node_frag_slab, x);
}
struct jffs2_inode_cache *jffs2_alloc_inode_cache(void)
{
- struct jffs2_inode_cache *ret = kmem_cache_alloc(inode_cache_slab, GFP_KERNEL);
- D3 (printk(KERN_DEBUG "Allocated inocache at %p\n", ret));
+ struct jffs2_inode_cache *ret;
+ ret = kmem_cache_alloc(inode_cache_slab, GFP_KERNEL);
+ dbg_memalloc("%p\n", ret);
return ret;
}
void jffs2_free_inode_cache(struct jffs2_inode_cache *x)
{
- D3 (printk(KERN_DEBUG "Freeing inocache at %p\n", x));
+ dbg_memalloc("%p\n", x);
kmem_cache_free(inode_cache_slab, x);
}
-
diff --git a/fs/jffs2/nodelist.c b/fs/jffs2/nodelist.c
index 4991c348f6e..c79eebb8ab3 100644
--- a/fs/jffs2/nodelist.c
+++ b/fs/jffs2/nodelist.c
@@ -7,7 +7,7 @@
*
* For licensing information, see the file 'LICENCE' in this directory.
*
- * $Id: nodelist.c,v 1.98 2005/07/10 15:15:32 dedekind Exp $
+ * $Id: nodelist.c,v 1.115 2005/11/07 11:14:40 gleixner Exp $
*
*/
@@ -24,469 +24,832 @@
void jffs2_add_fd_to_list(struct jffs2_sb_info *c, struct jffs2_full_dirent *new, struct jffs2_full_dirent **list)
{
struct jffs2_full_dirent **prev = list;
- D1(printk(KERN_DEBUG "jffs2_add_fd_to_list( %p, %p (->%p))\n", new, list, *list));
+
+ dbg_dentlist("add dirent \"%s\", ino #%u\n", new->name, new->ino);
while ((*prev) && (*prev)->nhash <= new->nhash) {
if ((*prev)->nhash == new->nhash && !strcmp((*prev)->name, new->name)) {
/* Duplicate. Free one */
if (new->version < (*prev)->version) {
- D1(printk(KERN_DEBUG "Eep! Marking new dirent node obsolete\n"));
- D1(printk(KERN_DEBUG "New dirent is \"%s\"->ino #%u. Old is \"%s\"->ino #%u\n", new->name, new->ino, (*prev)->name, (*prev)->ino));
+ dbg_dentlist("Eep! Marking new dirent node is obsolete, old is \"%s\", ino #%u\n",
+ (*prev)->name, (*prev)->ino);
jffs2_mark_node_obsolete(c, new->raw);
jffs2_free_full_dirent(new);
} else {
- D1(printk(KERN_DEBUG "Marking old dirent node (ino #%u) obsolete\n", (*prev)->ino));
+ dbg_dentlist("marking old dirent \"%s\", ino #%u bsolete\n",
+ (*prev)->name, (*prev)->ino);
new->next = (*prev)->next;
jffs2_mark_node_obsolete(c, ((*prev)->raw));
jffs2_free_full_dirent(*prev);
*prev = new;
}
- goto out;
+ return;
}
prev = &((*prev)->next);
}
new->next = *prev;
*prev = new;
+}
+
+void jffs2_truncate_fragtree(struct jffs2_sb_info *c, struct rb_root *list, uint32_t size)
+{
+ struct jffs2_node_frag *frag = jffs2_lookup_node_frag(list, size);
+
+ dbg_fragtree("truncating fragtree to 0x%08x bytes\n", size);
+
+ /* We know frag->ofs <= size. That's what lookup does for us */
+ if (frag && frag->ofs != size) {
+ if (frag->ofs+frag->size > size) {
+ frag->size = size - frag->ofs;
+ }
+ frag = frag_next(frag);
+ }
+ while (frag && frag->ofs >= size) {
+ struct jffs2_node_frag *next = frag_next(frag);
+
+ frag_erase(frag, list);
+ jffs2_obsolete_node_frag(c, frag);
+ frag = next;
+ }
- out:
- D2(while(*list) {
- printk(KERN_DEBUG "Dirent \"%s\" (hash 0x%08x, ino #%u\n", (*list)->name, (*list)->nhash, (*list)->ino);
- list = &(*list)->next;
- });
+ if (size == 0)
+ return;
+
+ /*
+ * If the last fragment starts at the RAM page boundary, it is
+ * REF_PRISTINE irrespective of its size.
+ */
+ frag = frag_last(list);
+ if (frag->node && (frag->ofs & (PAGE_CACHE_SIZE - 1)) == 0) {
+ dbg_fragtree2("marking the last fragment 0x%08x-0x%08x REF_PRISTINE.\n",
+ frag->ofs, frag->ofs + frag->size);
+ frag->node->raw->flash_offset = ref_offset(frag->node->raw) | REF_PRISTINE;
+ }
}
-/*
- * Put a new tmp_dnode_info into the temporaty RB-tree, keeping the list in
- * order of increasing version.
- */
-static void jffs2_add_tn_to_tree(struct jffs2_tmp_dnode_info *tn, struct rb_root *list)
+void jffs2_obsolete_node_frag(struct jffs2_sb_info *c, struct jffs2_node_frag *this)
{
- struct rb_node **p = &list->rb_node;
- struct rb_node * parent = NULL;
- struct jffs2_tmp_dnode_info *this;
-
- while (*p) {
- parent = *p;
- this = rb_entry(parent, struct jffs2_tmp_dnode_info, rb);
-
- /* There may actually be a collision here, but it doesn't
- actually matter. As long as the two nodes with the same
- version are together, it's all fine. */
- if (tn->version < this->version)
- p = &(*p)->rb_left;
- else
- p = &(*p)->rb_right;
- }
+ if (this->node) {
+ this->node->frags--;
+ if (!this->node->frags) {
+ /* The node has no valid frags left. It's totally obsoleted */
+ dbg_fragtree2("marking old node @0x%08x (0x%04x-0x%04x) obsolete\n",
+ ref_offset(this->node->raw), this->node->ofs, this->node->ofs+this->node->size);
+ jffs2_mark_node_obsolete(c, this->node->raw);
+ jffs2_free_full_dnode(this->node);
+ } else {
+ dbg_fragtree2("marking old node @0x%08x (0x%04x-0x%04x) REF_NORMAL. frags is %d\n",
+ ref_offset(this->node->raw), this->node->ofs, this->node->ofs+this->node->size, this->node->frags);
+ mark_ref_normal(this->node->raw);
+ }
- rb_link_node(&tn->rb, parent, p);
- rb_insert_color(&tn->rb, list);
+ }
+ jffs2_free_node_frag(this);
}
-static void jffs2_free_tmp_dnode_info_list(struct rb_root *list)
+static void jffs2_fragtree_insert(struct jffs2_node_frag *newfrag, struct jffs2_node_frag *base)
{
- struct rb_node *this;
- struct jffs2_tmp_dnode_info *tn;
+ struct rb_node *parent = &base->rb;
+ struct rb_node **link = &parent;
- this = list->rb_node;
+ dbg_fragtree2("insert frag (0x%04x-0x%04x)\n", newfrag->ofs, newfrag->ofs + newfrag->size);
- /* Now at bottom of tree */
- while (this) {
- if (this->rb_left)
- this = this->rb_left;
- else if (this->rb_right)
- this = this->rb_right;
+ while (*link) {
+ parent = *link;
+ base = rb_entry(parent, struct jffs2_node_frag, rb);
+
+ if (newfrag->ofs > base->ofs)
+ link = &base->rb.rb_right;
+ else if (newfrag->ofs < base->ofs)
+ link = &base->rb.rb_left;
else {
- tn = rb_entry(this, struct jffs2_tmp_dnode_info, rb);
- jffs2_free_full_dnode(tn->fn);
- jffs2_free_tmp_dnode_info(tn);
-
- this = this->rb_parent;
- if (!this)
- break;
-
- if (this->rb_left == &tn->rb)
- this->rb_left = NULL;
- else if (this->rb_right == &tn->rb)
- this->rb_right = NULL;
- else BUG();
+ JFFS2_ERROR("duplicate frag at %08x (%p,%p)\n", newfrag->ofs, newfrag, base);
+ BUG();
}
}
- list->rb_node = NULL;
+
+ rb_link_node(&newfrag->rb, &base->rb, link);
}
-static void jffs2_free_full_dirent_list(struct jffs2_full_dirent *fd)
+/*
+ * Allocate and initializes a new fragment.
+ */
+static inline struct jffs2_node_frag * new_fragment(struct jffs2_full_dnode *fn, uint32_t ofs, uint32_t size)
{
- struct jffs2_full_dirent *next;
-
- while (fd) {
- next = fd->next;
- jffs2_free_full_dirent(fd);
- fd = next;
+ struct jffs2_node_frag *newfrag;
+
+ newfrag = jffs2_alloc_node_frag();
+ if (likely(newfrag)) {
+ newfrag->ofs = ofs;
+ newfrag->size = size;
+ newfrag->node = fn;
+ } else {
+ JFFS2_ERROR("cannot allocate a jffs2_node_frag object\n");
}
+
+ return newfrag;
}
-/* Returns first valid node after 'ref'. May return 'ref' */
-static struct jffs2_raw_node_ref *jffs2_first_valid_node(struct jffs2_raw_node_ref *ref)
+/*
+ * Called when there is no overlapping fragment exist. Inserts a hole before the new
+ * fragment and inserts the new fragment to the fragtree.
+ */
+static int no_overlapping_node(struct jffs2_sb_info *c, struct rb_root *root,
+ struct jffs2_node_frag *newfrag,
+ struct jffs2_node_frag *this, uint32_t lastend)
{
- while (ref && ref->next_in_ino) {
- if (!ref_obsolete(ref))
- return ref;
- D1(printk(KERN_DEBUG "node at 0x%08x is obsoleted. Ignoring.\n", ref_offset(ref)));
- ref = ref->next_in_ino;
+ if (lastend < newfrag->node->ofs) {
+ /* put a hole in before the new fragment */
+ struct jffs2_node_frag *holefrag;
+
+ holefrag= new_fragment(NULL, lastend, newfrag->node->ofs - lastend);
+ if (unlikely(!holefrag)) {
+ jffs2_free_node_frag(newfrag);
+ return -ENOMEM;
+ }
+
+ if (this) {
+ /* By definition, the 'this' node has no right-hand child,
+ because there are no frags with offset greater than it.
+ So that's where we want to put the hole */
+ dbg_fragtree2("add hole frag %#04x-%#04x on the right of the new frag.\n",
+ holefrag->ofs, holefrag->ofs + holefrag->size);
+ rb_link_node(&holefrag->rb, &this->rb, &this->rb.rb_right);
+ } else {
+ dbg_fragtree2("Add hole frag %#04x-%#04x to the root of the tree.\n",
+ holefrag->ofs, holefrag->ofs + holefrag->size);
+ rb_link_node(&holefrag->rb, NULL, &root->rb_node);
+ }
+ rb_insert_color(&holefrag->rb, root);
+ this = holefrag;
+ }
+
+ if (this) {
+ /* By definition, the 'this' node has no right-hand child,
+ because there are no frags with offset greater than it.
+ So that's where we want to put new fragment */
+ dbg_fragtree2("add the new node at the right\n");
+ rb_link_node(&newfrag->rb, &this->rb, &this->rb.rb_right);
+ } else {
+ dbg_fragtree2("insert the new node at the root of the tree\n");
+ rb_link_node(&newfrag->rb, NULL, &root->rb_node);
}
- return NULL;
+ rb_insert_color(&newfrag->rb, root);
+
+ return 0;
}
-/* Get tmp_dnode_info and full_dirent for all non-obsolete nodes associated
- with this ino, returning the former in order of version */
+/* Doesn't set inode->i_size */
+static int jffs2_add_frag_to_fragtree(struct jffs2_sb_info *c, struct rb_root *root, struct jffs2_node_frag *newfrag)
+{
+ struct jffs2_node_frag *this;
+ uint32_t lastend;
+
+ /* Skip all the nodes which are completed before this one starts */
+ this = jffs2_lookup_node_frag(root, newfrag->node->ofs);
+
+ if (this) {
+ dbg_fragtree2("lookup gave frag 0x%04x-0x%04x; phys 0x%08x (*%p)\n",
+ this->ofs, this->ofs+this->size, this->node?(ref_offset(this->node->raw)):0xffffffff, this);
+ lastend = this->ofs + this->size;
+ } else {
+ dbg_fragtree2("lookup gave no frag\n");
+ lastend = 0;
+ }
+
+ /* See if we ran off the end of the fragtree */
+ if (lastend <= newfrag->ofs) {
+ /* We did */
+
+ /* Check if 'this' node was on the same page as the new node.
+ If so, both 'this' and the new node get marked REF_NORMAL so
+ the GC can take a look.
+ */
+ if (lastend && (lastend-1) >> PAGE_CACHE_SHIFT == newfrag->ofs >> PAGE_CACHE_SHIFT) {
+ if (this->node)
+ mark_ref_normal(this->node->raw);
+ mark_ref_normal(newfrag->node->raw);
+ }
+
+ return no_overlapping_node(c, root, newfrag, this, lastend);
+ }
-int jffs2_get_inode_nodes(struct jffs2_sb_info *c, struct jffs2_inode_info *f,
- struct rb_root *tnp, struct jffs2_full_dirent **fdp,
- uint32_t *highest_version, uint32_t *latest_mctime,
- uint32_t *mctime_ver)
+ if (this->node)
+ dbg_fragtree2("dealing with frag %u-%u, phys %#08x(%d).\n",
+ this->ofs, this->ofs + this->size,
+ ref_offset(this->node->raw), ref_flags(this->node->raw));
+ else
+ dbg_fragtree2("dealing with hole frag %u-%u.\n",
+ this->ofs, this->ofs + this->size);
+
+ /* OK. 'this' is pointing at the first frag that newfrag->ofs at least partially obsoletes,
+ * - i.e. newfrag->ofs < this->ofs+this->size && newfrag->ofs >= this->ofs
+ */
+ if (newfrag->ofs > this->ofs) {
+ /* This node isn't completely obsoleted. The start of it remains valid */
+
+ /* Mark the new node and the partially covered node REF_NORMAL -- let
+ the GC take a look at them */
+ mark_ref_normal(newfrag->node->raw);
+ if (this->node)
+ mark_ref_normal(this->node->raw);
+
+ if (this->ofs + this->size > newfrag->ofs + newfrag->size) {
+ /* The new node splits 'this' frag into two */
+ struct jffs2_node_frag *newfrag2;
+
+ if (this->node)
+ dbg_fragtree2("split old frag 0x%04x-0x%04x, phys 0x%08x\n",
+ this->ofs, this->ofs+this->size, ref_offset(this->node->raw));
+ else
+ dbg_fragtree2("split old hole frag 0x%04x-0x%04x\n",
+ this->ofs, this->ofs+this->size);
+
+ /* New second frag pointing to this's node */
+ newfrag2 = new_fragment(this->node, newfrag->ofs + newfrag->size,
+ this->ofs + this->size - newfrag->ofs - newfrag->size);
+ if (unlikely(!newfrag2))
+ return -ENOMEM;
+ if (this->node)
+ this->node->frags++;
+
+ /* Adjust size of original 'this' */
+ this->size = newfrag->ofs - this->ofs;
+
+ /* Now, we know there's no node with offset
+ greater than this->ofs but smaller than
+ newfrag2->ofs or newfrag->ofs, for obvious
+ reasons. So we can do a tree insert from
+ 'this' to insert newfrag, and a tree insert
+ from newfrag to insert newfrag2. */
+ jffs2_fragtree_insert(newfrag, this);
+ rb_insert_color(&newfrag->rb, root);
+
+ jffs2_fragtree_insert(newfrag2, newfrag);
+ rb_insert_color(&newfrag2->rb, root);
+
+ return 0;
+ }
+ /* New node just reduces 'this' frag in size, doesn't split it */
+ this->size = newfrag->ofs - this->ofs;
+
+ /* Again, we know it lives down here in the tree */
+ jffs2_fragtree_insert(newfrag, this);
+ rb_insert_color(&newfrag->rb, root);
+ } else {
+ /* New frag starts at the same point as 'this' used to. Replace
+ it in the tree without doing a delete and insertion */
+ dbg_fragtree2("inserting newfrag (*%p),%d-%d in before 'this' (*%p),%d-%d\n",
+ newfrag, newfrag->ofs, newfrag->ofs+newfrag->size, this, this->ofs, this->ofs+this->size);
+
+ rb_replace_node(&this->rb, &newfrag->rb, root);
+
+ if (newfrag->ofs + newfrag->size >= this->ofs+this->size) {
+ dbg_fragtree2("obsoleting node frag %p (%x-%x)\n", this, this->ofs, this->ofs+this->size);
+ jffs2_obsolete_node_frag(c, this);
+ } else {
+ this->ofs += newfrag->size;
+ this->size -= newfrag->size;
+
+ jffs2_fragtree_insert(this, newfrag);
+ rb_insert_color(&this->rb, root);
+ return 0;
+ }
+ }
+ /* OK, now we have newfrag added in the correct place in the tree, but
+ frag_next(newfrag) may be a fragment which is overlapped by it
+ */
+ while ((this = frag_next(newfrag)) && newfrag->ofs + newfrag->size >= this->ofs + this->size) {
+ /* 'this' frag is obsoleted completely. */
+ dbg_fragtree2("obsoleting node frag %p (%x-%x) and removing from tree\n",
+ this, this->ofs, this->ofs+this->size);
+ rb_erase(&this->rb, root);
+ jffs2_obsolete_node_frag(c, this);
+ }
+ /* Now we're pointing at the first frag which isn't totally obsoleted by
+ the new frag */
+
+ if (!this || newfrag->ofs + newfrag->size == this->ofs)
+ return 0;
+
+ /* Still some overlap but we don't need to move it in the tree */
+ this->size = (this->ofs + this->size) - (newfrag->ofs + newfrag->size);
+ this->ofs = newfrag->ofs + newfrag->size;
+
+ /* And mark them REF_NORMAL so the GC takes a look at them */
+ if (this->node)
+ mark_ref_normal(this->node->raw);
+ mark_ref_normal(newfrag->node->raw);
+
+ return 0;
+}
+
+/*
+ * Given an inode, probably with existing tree of fragments, add the new node
+ * to the fragment tree.
+ */
+int jffs2_add_full_dnode_to_inode(struct jffs2_sb_info *c, struct jffs2_inode_info *f, struct jffs2_full_dnode *fn)
{
- struct jffs2_raw_node_ref *ref, *valid_ref;
- struct jffs2_tmp_dnode_info *tn;
- struct rb_root ret_tn = RB_ROOT;
- struct jffs2_full_dirent *fd, *ret_fd = NULL;
- union jffs2_node_union node;
- size_t retlen;
- int err;
-
- *mctime_ver = 0;
-
- D1(printk(KERN_DEBUG "jffs2_get_inode_nodes(): ino #%u\n", f->inocache->ino));
+ int ret;
+ struct jffs2_node_frag *newfrag;
- spin_lock(&c->erase_completion_lock);
+ if (unlikely(!fn->size))
+ return 0;
- valid_ref = jffs2_first_valid_node(f->inocache->nodes);
+ newfrag = new_fragment(fn, fn->ofs, fn->size);
+ if (unlikely(!newfrag))
+ return -ENOMEM;
+ newfrag->node->frags = 1;
- if (!valid_ref && (f->inocache->ino != 1))
- printk(KERN_WARNING "Eep. No valid nodes for ino #%u\n", f->inocache->ino);
+ dbg_fragtree("adding node %#04x-%#04x @0x%08x on flash, newfrag *%p\n",
+ fn->ofs, fn->ofs+fn->size, ref_offset(fn->raw), newfrag);
- while (valid_ref) {
- /* We can hold a pointer to a non-obsolete node without the spinlock,
- but _obsolete_ nodes may disappear at any time, if the block
- they're in gets erased. So if we mark 'ref' obsolete while we're
- not holding the lock, it can go away immediately. For that reason,
- we find the next valid node first, before processing 'ref'.
- */
- ref = valid_ref;
- valid_ref = jffs2_first_valid_node(ref->next_in_ino);
- spin_unlock(&c->erase_completion_lock);
+ ret = jffs2_add_frag_to_fragtree(c, &f->fragtree, newfrag);
+ if (unlikely(ret))
+ return ret;
- cond_resched();
+ /* If we now share a page with other nodes, mark either previous
+ or next node REF_NORMAL, as appropriate. */
+ if (newfrag->ofs & (PAGE_CACHE_SIZE-1)) {
+ struct jffs2_node_frag *prev = frag_prev(newfrag);
+
+ mark_ref_normal(fn->raw);
+ /* If we don't start at zero there's _always_ a previous */
+ if (prev->node)
+ mark_ref_normal(prev->node->raw);
+ }
+
+ if ((newfrag->ofs+newfrag->size) & (PAGE_CACHE_SIZE-1)) {
+ struct jffs2_node_frag *next = frag_next(newfrag);
+
+ if (next) {
+ mark_ref_normal(fn->raw);
+ if (next->node)
+ mark_ref_normal(next->node->raw);
+ }
+ }
+ jffs2_dbg_fragtree_paranoia_check_nolock(f);
+
+ return 0;
+}
+
+/*
+ * Check the data CRC of the node.
+ *
+ * Returns: 0 if the data CRC is correct;
+ * 1 - if incorrect;
+ * error code if an error occured.
+ */
+static int check_node_data(struct jffs2_sb_info *c, struct jffs2_tmp_dnode_info *tn)
+{
+ struct jffs2_raw_node_ref *ref = tn->fn->raw;
+ int err = 0, pointed = 0;
+ struct jffs2_eraseblock *jeb;
+ unsigned char *buffer;
+ uint32_t crc, ofs, retlen, len;
+
+ BUG_ON(tn->csize == 0);
+
+ if (!jffs2_is_writebuffered(c))
+ goto adj_acc;
+
+ /* Calculate how many bytes were already checked */
+ ofs = ref_offset(ref) + sizeof(struct jffs2_raw_inode);
+ len = ofs % c->wbuf_pagesize;
+ if (likely(len))
+ len = c->wbuf_pagesize - len;
+
+ if (len >= tn->csize) {
+ dbg_readinode("no need to check node at %#08x, data length %u, data starts at %#08x - it has already been checked.\n",
+ ref_offset(ref), tn->csize, ofs);
+ goto adj_acc;
+ }
+
+ ofs += len;
+ len = tn->csize - len;
+
+ dbg_readinode("check node at %#08x, data length %u, partial CRC %#08x, correct CRC %#08x, data starts at %#08x, start checking from %#08x - %u bytes.\n",
+ ref_offset(ref), tn->csize, tn->partial_crc, tn->data_crc, ofs - len, ofs, len);
+
+#ifndef __ECOS
+ /* TODO: instead, incapsulate point() stuff to jffs2_flash_read(),
+ * adding and jffs2_flash_read_end() interface. */
+ if (c->mtd->point) {
+ err = c->mtd->point(c->mtd, ofs, len, &retlen, &buffer);
+ if (!err && retlen < tn->csize) {
+ JFFS2_WARNING("MTD point returned len too short: %u instead of %u.\n", retlen, tn->csize);
+ c->mtd->unpoint(c->mtd, buffer, ofs, len);
+ } else if (err)
+ JFFS2_WARNING("MTD point failed: error code %d.\n", err);
+ else
+ pointed = 1; /* succefully pointed to device */
+ }
+#endif
+
+ if (!pointed) {
+ buffer = kmalloc(len, GFP_KERNEL);
+ if (unlikely(!buffer))
+ return -ENOMEM;
- /* FIXME: point() */
- err = jffs2_flash_read(c, (ref_offset(ref)),
- min_t(uint32_t, ref_totlen(c, NULL, ref), sizeof(node)),
- &retlen, (void *)&node);
+ /* TODO: this is very frequent pattern, make it a separate
+ * routine */
+ err = jffs2_flash_read(c, ofs, len, &retlen, buffer);
if (err) {
- printk(KERN_WARNING "error %d reading node at 0x%08x in get_inode_nodes()\n", err, ref_offset(ref));
+ JFFS2_ERROR("can not read %d bytes from 0x%08x, error code: %d.\n", len, ofs, err);
goto free_out;
}
-
- /* Check we've managed to read at least the common node header */
- if (retlen < min_t(uint32_t, ref_totlen(c, NULL, ref), sizeof(node.u))) {
- printk(KERN_WARNING "short read in get_inode_nodes()\n");
+ if (retlen != len) {
+ JFFS2_ERROR("short read at %#08x: %d instead of %d.\n", ofs, retlen, len);
err = -EIO;
goto free_out;
}
-
- switch (je16_to_cpu(node.u.nodetype)) {
- case JFFS2_NODETYPE_DIRENT:
- D1(printk(KERN_DEBUG "Node at %08x (%d) is a dirent node\n", ref_offset(ref), ref_flags(ref)));
- if (ref_flags(ref) == REF_UNCHECKED) {
- printk(KERN_WARNING "BUG: Dirent node at 0x%08x never got checked? How?\n", ref_offset(ref));
- BUG();
- }
- if (retlen < sizeof(node.d)) {
- printk(KERN_WARNING "short read in get_inode_nodes()\n");
- err = -EIO;
- goto free_out;
- }
- /* sanity check */
- if (PAD((node.d.nsize + sizeof (node.d))) != PAD(je32_to_cpu (node.d.totlen))) {
- printk(KERN_NOTICE "jffs2_get_inode_nodes(): Illegal nsize in node at 0x%08x: nsize 0x%02x, totlen %04x\n",
- ref_offset(ref), node.d.nsize, je32_to_cpu(node.d.totlen));
- jffs2_mark_node_obsolete(c, ref);
- spin_lock(&c->erase_completion_lock);
- continue;
- }
- if (je32_to_cpu(node.d.version) > *highest_version)
- *highest_version = je32_to_cpu(node.d.version);
- if (ref_obsolete(ref)) {
- /* Obsoleted. This cannot happen, surely? dwmw2 20020308 */
- printk(KERN_ERR "Dirent node at 0x%08x became obsolete while we weren't looking\n",
- ref_offset(ref));
- BUG();
- }
-
- fd = jffs2_alloc_full_dirent(node.d.nsize+1);
- if (!fd) {
- err = -ENOMEM;
- goto free_out;
- }
- fd->raw = ref;
- fd->version = je32_to_cpu(node.d.version);
- fd->ino = je32_to_cpu(node.d.ino);
- fd->type = node.d.type;
-
- /* Pick out the mctime of the latest dirent */
- if(fd->version > *mctime_ver) {
- *mctime_ver = fd->version;
- *latest_mctime = je32_to_cpu(node.d.mctime);
- }
+ }
- /* memcpy as much of the name as possible from the raw
- dirent we've already read from the flash
- */
- if (retlen > sizeof(struct jffs2_raw_dirent))
- memcpy(&fd->name[0], &node.d.name[0], min_t(uint32_t, node.d.nsize, (retlen-sizeof(struct jffs2_raw_dirent))));
-
- /* Do we need to copy any more of the name directly
- from the flash?
- */
- if (node.d.nsize + sizeof(struct jffs2_raw_dirent) > retlen) {
- /* FIXME: point() */
- int already = retlen - sizeof(struct jffs2_raw_dirent);
-
- err = jffs2_flash_read(c, (ref_offset(ref)) + retlen,
- node.d.nsize - already, &retlen, &fd->name[already]);
- if (!err && retlen != node.d.nsize - already)
- err = -EIO;
-
- if (err) {
- printk(KERN_WARNING "Read remainder of name in jffs2_get_inode_nodes(): error %d\n", err);
- jffs2_free_full_dirent(fd);
- goto free_out;
- }
- }
- fd->nhash = full_name_hash(fd->name, node.d.nsize);
- fd->next = NULL;
- fd->name[node.d.nsize] = '\0';
- /* Wheee. We now have a complete jffs2_full_dirent structure, with
- the name in it and everything. Link it into the list
- */
- D1(printk(KERN_DEBUG "Adding fd \"%s\", ino #%u\n", fd->name, fd->ino));
- jffs2_add_fd_to_list(c, fd, &ret_fd);
- break;
-
- case JFFS2_NODETYPE_INODE:
- D1(printk(KERN_DEBUG "Node at %08x (%d) is a data node\n", ref_offset(ref), ref_flags(ref)));
- if (retlen < sizeof(node.i)) {
- printk(KERN_WARNING "read too short for dnode\n");
- err = -EIO;
- goto free_out;
- }
- if (je32_to_cpu(node.i.version) > *highest_version)
- *highest_version = je32_to_cpu(node.i.version);
- D1(printk(KERN_DEBUG "version %d, highest_version now %d\n", je32_to_cpu(node.i.version), *highest_version));
-
- if (ref_obsolete(ref)) {
- /* Obsoleted. This cannot happen, surely? dwmw2 20020308 */
- printk(KERN_ERR "Inode node at 0x%08x became obsolete while we weren't looking\n",
- ref_offset(ref));
- BUG();
- }
+ /* Continue calculating CRC */
+ crc = crc32(tn->partial_crc, buffer, len);
+ if(!pointed)
+ kfree(buffer);
+#ifndef __ECOS
+ else
+ c->mtd->unpoint(c->mtd, buffer, ofs, len);
+#endif
- /* If we've never checked the CRCs on this node, check them now. */
- if (ref_flags(ref) == REF_UNCHECKED) {
- uint32_t crc, len;
- struct jffs2_eraseblock *jeb;
-
- crc = crc32(0, &node, sizeof(node.i)-8);
- if (crc != je32_to_cpu(node.i.node_crc)) {
- printk(KERN_NOTICE "jffs2_get_inode_nodes(): CRC failed on node at 0x%08x: Read 0x%08x, calculated 0x%08x\n",
- ref_offset(ref), je32_to_cpu(node.i.node_crc), crc);
- jffs2_mark_node_obsolete(c, ref);
- spin_lock(&c->erase_completion_lock);
- continue;
- }
-
- /* sanity checks */
- if ( je32_to_cpu(node.i.offset) > je32_to_cpu(node.i.isize) ||
- PAD(je32_to_cpu(node.i.csize) + sizeof (node.i)) != PAD(je32_to_cpu(node.i.totlen))) {
- printk(KERN_NOTICE "jffs2_get_inode_nodes(): Inode corrupted at 0x%08x, totlen %d, #ino %d, version %d, isize %d, csize %d, dsize %d \n",
- ref_offset(ref), je32_to_cpu(node.i.totlen), je32_to_cpu(node.i.ino),
- je32_to_cpu(node.i.version), je32_to_cpu(node.i.isize),
- je32_to_cpu(node.i.csize), je32_to_cpu(node.i.dsize));
- jffs2_mark_node_obsolete(c, ref);
- spin_lock(&c->erase_completion_lock);
- continue;
- }
+ if (crc != tn->data_crc) {
+ JFFS2_NOTICE("wrong data CRC in data node at 0x%08x: read %#08x, calculated %#08x.\n",
+ ofs, tn->data_crc, crc);
+ return 1;
+ }
- if (node.i.compr != JFFS2_COMPR_ZERO && je32_to_cpu(node.i.csize)) {
- unsigned char *buf=NULL;
- uint32_t pointed = 0;
-#ifndef __ECOS
- if (c->mtd->point) {
- err = c->mtd->point (c->mtd, ref_offset(ref) + sizeof(node.i), je32_to_cpu(node.i.csize),
- &retlen, &buf);
- if (!err && retlen < je32_to_cpu(node.i.csize)) {
- D1(printk(KERN_DEBUG "MTD point returned len too short: 0x%zx\n", retlen));
- c->mtd->unpoint(c->mtd, buf, ref_offset(ref) + sizeof(node.i), je32_to_cpu(node.i.csize));
- } else if (err){
- D1(printk(KERN_DEBUG "MTD point failed %d\n", err));
- } else
- pointed = 1; /* succefully pointed to device */
- }
-#endif
- if(!pointed){
- buf = kmalloc(je32_to_cpu(node.i.csize), GFP_KERNEL);
- if (!buf)
- return -ENOMEM;
-
- err = jffs2_flash_read(c, ref_offset(ref) + sizeof(node.i), je32_to_cpu(node.i.csize),
- &retlen, buf);
- if (!err && retlen != je32_to_cpu(node.i.csize))
- err = -EIO;
- if (err) {
- kfree(buf);
- return err;
- }
- }
- crc = crc32(0, buf, je32_to_cpu(node.i.csize));
- if(!pointed)
- kfree(buf);
+adj_acc:
+ jeb = &c->blocks[ref->flash_offset / c->sector_size];
+ len = ref_totlen(c, jeb, ref);
+
+ /*
+ * Mark the node as having been checked and fix the
+ * accounting accordingly.
+ */
+ spin_lock(&c->erase_completion_lock);
+ jeb->used_size += len;
+ jeb->unchecked_size -= len;
+ c->used_size += len;
+ c->unchecked_size -= len;
+ spin_unlock(&c->erase_completion_lock);
+
+ return 0;
+
+free_out:
+ if(!pointed)
+ kfree(buffer);
#ifndef __ECOS
- else
- c->mtd->unpoint(c->mtd, buf, ref_offset(ref) + sizeof(node.i), je32_to_cpu(node.i.csize));
+ else
+ c->mtd->unpoint(c->mtd, buffer, ofs, len);
#endif
+ return err;
+}
- if (crc != je32_to_cpu(node.i.data_crc)) {
- printk(KERN_NOTICE "jffs2_get_inode_nodes(): Data CRC failed on node at 0x%08x: Read 0x%08x, calculated 0x%08x\n",
- ref_offset(ref), je32_to_cpu(node.i.data_crc), crc);
- jffs2_mark_node_obsolete(c, ref);
- spin_lock(&c->erase_completion_lock);
- continue;
- }
-
- }
+/*
+ * Helper function for jffs2_add_older_frag_to_fragtree().
+ *
+ * Checks the node if we are in the checking stage.
+ */
+static inline int check_node(struct jffs2_sb_info *c, struct jffs2_inode_info *f, struct jffs2_tmp_dnode_info *tn)
+{
+ int ret;
- /* Mark the node as having been checked and fix the accounting accordingly */
- spin_lock(&c->erase_completion_lock);
- jeb = &c->blocks[ref->flash_offset / c->sector_size];
- len = ref_totlen(c, jeb, ref);
-
- jeb->used_size += len;
- jeb->unchecked_size -= len;
- c->used_size += len;
- c->unchecked_size -= len;
-
- /* If node covers at least a whole page, or if it starts at the
- beginning of a page and runs to the end of the file, or if
- it's a hole node, mark it REF_PRISTINE, else REF_NORMAL.
-
- If it's actually overlapped, it'll get made NORMAL (or OBSOLETE)
- when the overlapping node(s) get added to the tree anyway.
- */
- if ((je32_to_cpu(node.i.dsize) >= PAGE_CACHE_SIZE) ||
- ( ((je32_to_cpu(node.i.offset)&(PAGE_CACHE_SIZE-1))==0) &&
- (je32_to_cpu(node.i.dsize)+je32_to_cpu(node.i.offset) == je32_to_cpu(node.i.isize)))) {
- D1(printk(KERN_DEBUG "Marking node at 0x%08x REF_PRISTINE\n", ref_offset(ref)));
- ref->flash_offset = ref_offset(ref) | REF_PRISTINE;
- } else {
- D1(printk(KERN_DEBUG "Marking node at 0x%08x REF_NORMAL\n", ref_offset(ref)));
- ref->flash_offset = ref_offset(ref) | REF_NORMAL;
- }
- spin_unlock(&c->erase_completion_lock);
+ BUG_ON(ref_obsolete(tn->fn->raw));
+
+ /* We only check the data CRC of unchecked nodes */
+ if (ref_flags(tn->fn->raw) != REF_UNCHECKED)
+ return 0;
+
+ dbg_fragtree2("check node %#04x-%#04x, phys offs %#08x.\n",
+ tn->fn->ofs, tn->fn->ofs + tn->fn->size, ref_offset(tn->fn->raw));
+
+ ret = check_node_data(c, tn);
+ if (unlikely(ret < 0)) {
+ JFFS2_ERROR("check_node_data() returned error: %d.\n",
+ ret);
+ } else if (unlikely(ret > 0)) {
+ dbg_fragtree2("CRC error, mark it obsolete.\n");
+ jffs2_mark_node_obsolete(c, tn->fn->raw);
+ }
+
+ return ret;
+}
+
+/*
+ * Helper function for jffs2_add_older_frag_to_fragtree().
+ *
+ * Called when the new fragment that is being inserted
+ * splits a hole fragment.
+ */
+static int split_hole(struct jffs2_sb_info *c, struct rb_root *root,
+ struct jffs2_node_frag *newfrag, struct jffs2_node_frag *hole)
+{
+ dbg_fragtree2("fragment %#04x-%#04x splits the hole %#04x-%#04x\n",
+ newfrag->ofs, newfrag->ofs + newfrag->size, hole->ofs, hole->ofs + hole->size);
+
+ if (hole->ofs == newfrag->ofs) {
+ /*
+ * Well, the new fragment actually starts at the same offset as
+ * the hole.
+ */
+ if (hole->ofs + hole->size > newfrag->ofs + newfrag->size) {
+ /*
+ * We replace the overlapped left part of the hole by
+ * the new node.
+ */
+
+ dbg_fragtree2("insert fragment %#04x-%#04x and cut the left part of the hole\n",
+ newfrag->ofs, newfrag->ofs + newfrag->size);
+ rb_replace_node(&hole->rb, &newfrag->rb, root);
+
+ hole->ofs += newfrag->size;
+ hole->size -= newfrag->size;
+
+ /*
+ * We know that 'hole' should be the right hand
+ * fragment.
+ */
+ jffs2_fragtree_insert(hole, newfrag);
+ rb_insert_color(&hole->rb, root);
+ } else {
+ /*
+ * Ah, the new fragment is of the same size as the hole.
+ * Relace the hole by it.
+ */
+ dbg_fragtree2("insert fragment %#04x-%#04x and overwrite hole\n",
+ newfrag->ofs, newfrag->ofs + newfrag->size);
+ rb_replace_node(&hole->rb, &newfrag->rb, root);
+ jffs2_free_node_frag(hole);
+ }
+ } else {
+ /* The new fragment lefts some hole space at the left */
+
+ struct jffs2_node_frag * newfrag2 = NULL;
+
+ if (hole->ofs + hole->size > newfrag->ofs + newfrag->size) {
+ /* The new frag also lefts some space at the right */
+ newfrag2 = new_fragment(NULL, newfrag->ofs +
+ newfrag->size, hole->ofs + hole->size
+ - newfrag->ofs - newfrag->size);
+ if (unlikely(!newfrag2)) {
+ jffs2_free_node_frag(newfrag);
+ return -ENOMEM;
}
+ }
+
+ hole->size = newfrag->ofs - hole->ofs;
+ dbg_fragtree2("left the hole %#04x-%#04x at the left and inserd fragment %#04x-%#04x\n",
+ hole->ofs, hole->ofs + hole->size, newfrag->ofs, newfrag->ofs + newfrag->size);
+
+ jffs2_fragtree_insert(newfrag, hole);
+ rb_insert_color(&newfrag->rb, root);
+
+ if (newfrag2) {
+ dbg_fragtree2("left the hole %#04x-%#04x at the right\n",
+ newfrag2->ofs, newfrag2->ofs + newfrag2->size);
+ jffs2_fragtree_insert(newfrag2, newfrag);
+ rb_insert_color(&newfrag2->rb, root);
+ }
+ }
+
+ return 0;
+}
+
+/*
+ * This function is used when we build inode. It expects the nodes are passed
+ * in the decreasing version order. The whole point of this is to improve the
+ * inodes checking on NAND: we check the nodes' data CRC only when they are not
+ * obsoleted. Previously, add_frag_to_fragtree() function was used and
+ * nodes were passed to it in the increasing version ordes and CRCs of all
+ * nodes were checked.
+ *
+ * Note: tn->fn->size shouldn't be zero.
+ *
+ * Returns 0 if the node was inserted
+ * 1 if it wasn't inserted (since it is obsolete)
+ * < 0 an if error occured
+ */
+int jffs2_add_older_frag_to_fragtree(struct jffs2_sb_info *c, struct jffs2_inode_info *f,
+ struct jffs2_tmp_dnode_info *tn)
+{
+ struct jffs2_node_frag *this, *newfrag;
+ uint32_t lastend;
+ struct jffs2_full_dnode *fn = tn->fn;
+ struct rb_root *root = &f->fragtree;
+ uint32_t fn_size = fn->size, fn_ofs = fn->ofs;
+ int err, checked = 0;
+ int ref_flag;
+
+ dbg_fragtree("insert fragment %#04x-%#04x, ver %u\n", fn_ofs, fn_ofs + fn_size, tn->version);
+
+ /* Skip all the nodes which are completed before this one starts */
+ this = jffs2_lookup_node_frag(root, fn_ofs);
+ if (this)
+ dbg_fragtree2("'this' found %#04x-%#04x (%s)\n", this->ofs, this->ofs + this->size, this->node ? "data" : "hole");
+
+ if (this)
+ lastend = this->ofs + this->size;
+ else
+ lastend = 0;
+
+ /* Detect the preliminary type of node */
+ if (fn->size >= PAGE_CACHE_SIZE)
+ ref_flag = REF_PRISTINE;
+ else
+ ref_flag = REF_NORMAL;
+
+ /* See if we ran off the end of the root */
+ if (lastend <= fn_ofs) {
+ /* We did */
+
+ /*
+ * We are going to insert the new node into the
+ * fragment tree, so check it.
+ */
+ err = check_node(c, f, tn);
+ if (err != 0)
+ return err;
+
+ fn->frags = 1;
+
+ newfrag = new_fragment(fn, fn_ofs, fn_size);
+ if (unlikely(!newfrag))
+ return -ENOMEM;
+
+ err = no_overlapping_node(c, root, newfrag, this, lastend);
+ if (unlikely(err != 0)) {
+ jffs2_free_node_frag(newfrag);
+ return err;
+ }
+
+ goto out_ok;
+ }
- tn = jffs2_alloc_tmp_dnode_info();
- if (!tn) {
- D1(printk(KERN_DEBUG "alloc tn failed\n"));
- err = -ENOMEM;
- goto free_out;
+ fn->frags = 0;
+
+ while (1) {
+ /*
+ * Here we have:
+ * fn_ofs < this->ofs + this->size && fn_ofs >= this->ofs.
+ *
+ * Remember, 'this' has higher version, any non-hole node
+ * which is already in the fragtree is newer then the newly
+ * inserted.
+ */
+ if (!this->node) {
+ /*
+ * 'this' is the hole fragment, so at least the
+ * beginning of the new fragment is valid.
+ */
+
+ /*
+ * We are going to insert the new node into the
+ * fragment tree, so check it.
+ */
+ if (!checked) {
+ err = check_node(c, f, tn);
+ if (unlikely(err != 0))
+ return err;
+ checked = 1;
}
- tn->fn = jffs2_alloc_full_dnode();
- if (!tn->fn) {
- D1(printk(KERN_DEBUG "alloc fn failed\n"));
- err = -ENOMEM;
- jffs2_free_tmp_dnode_info(tn);
- goto free_out;
+ if (this->ofs + this->size >= fn_ofs + fn_size) {
+ /* We split the hole on two parts */
+
+ fn->frags += 1;
+ newfrag = new_fragment(fn, fn_ofs, fn_size);
+ if (unlikely(!newfrag))
+ return -ENOMEM;
+
+ err = split_hole(c, root, newfrag, this);
+ if (unlikely(err))
+ return err;
+ goto out_ok;
}
- tn->version = je32_to_cpu(node.i.version);
- tn->fn->ofs = je32_to_cpu(node.i.offset);
- /* There was a bug where we wrote hole nodes out with
- csize/dsize swapped. Deal with it */
- if (node.i.compr == JFFS2_COMPR_ZERO && !je32_to_cpu(node.i.dsize) && je32_to_cpu(node.i.csize))
- tn->fn->size = je32_to_cpu(node.i.csize);
- else // normal case...
- tn->fn->size = je32_to_cpu(node.i.dsize);
- tn->fn->raw = ref;
- D1(printk(KERN_DEBUG "dnode @%08x: ver %u, offset %04x, dsize %04x\n",
- ref_offset(ref), je32_to_cpu(node.i.version),
- je32_to_cpu(node.i.offset), je32_to_cpu(node.i.dsize)));
- jffs2_add_tn_to_tree(tn, &ret_tn);
- break;
-
- default:
- if (ref_flags(ref) == REF_UNCHECKED) {
- struct jffs2_eraseblock *jeb;
- uint32_t len;
-
- printk(KERN_ERR "Eep. Unknown node type %04x at %08x was marked REF_UNCHECKED\n",
- je16_to_cpu(node.u.nodetype), ref_offset(ref));
-
- /* Mark the node as having been checked and fix the accounting accordingly */
- spin_lock(&c->erase_completion_lock);
- jeb = &c->blocks[ref->flash_offset / c->sector_size];
- len = ref_totlen(c, jeb, ref);
-
- jeb->used_size += len;
- jeb->unchecked_size -= len;
- c->used_size += len;
- c->unchecked_size -= len;
-
- mark_ref_normal(ref);
- spin_unlock(&c->erase_completion_lock);
+
+ /*
+ * The beginning of the new fragment is valid since it
+ * overlaps the hole node.
+ */
+
+ ref_flag = REF_NORMAL;
+
+ fn->frags += 1;
+ newfrag = new_fragment(fn, fn_ofs,
+ this->ofs + this->size - fn_ofs);
+ if (unlikely(!newfrag))
+ return -ENOMEM;
+
+ if (fn_ofs == this->ofs) {
+ /*
+ * The new node starts at the same offset as
+ * the hole and supersieds the hole.
+ */
+ dbg_fragtree2("add the new fragment instead of hole %#04x-%#04x, refcnt %d\n",
+ fn_ofs, fn_ofs + this->ofs + this->size - fn_ofs, fn->frags);
+
+ rb_replace_node(&this->rb, &newfrag->rb, root);
+ jffs2_free_node_frag(this);
+ } else {
+ /*
+ * The hole becomes shorter as its right part
+ * is supersieded by the new fragment.
+ */
+ dbg_fragtree2("reduce size of hole %#04x-%#04x to %#04x-%#04x\n",
+ this->ofs, this->ofs + this->size, this->ofs, this->ofs + this->size - newfrag->size);
+
+ dbg_fragtree2("add new fragment %#04x-%#04x, refcnt %d\n", fn_ofs,
+ fn_ofs + this->ofs + this->size - fn_ofs, fn->frags);
+
+ this->size -= newfrag->size;
+ jffs2_fragtree_insert(newfrag, this);
+ rb_insert_color(&newfrag->rb, root);
}
- node.u.nodetype = cpu_to_je16(JFFS2_NODE_ACCURATE | je16_to_cpu(node.u.nodetype));
- if (crc32(0, &node, sizeof(struct jffs2_unknown_node)-4) != je32_to_cpu(node.u.hdr_crc)) {
- /* Hmmm. This should have been caught at scan time. */
- printk(KERN_ERR "Node header CRC failed at %08x. But it must have been OK earlier.\n",
- ref_offset(ref));
- printk(KERN_ERR "Node was: { %04x, %04x, %08x, %08x }\n",
- je16_to_cpu(node.u.magic), je16_to_cpu(node.u.nodetype), je32_to_cpu(node.u.totlen),
- je32_to_cpu(node.u.hdr_crc));
- jffs2_mark_node_obsolete(c, ref);
- } else switch(je16_to_cpu(node.u.nodetype) & JFFS2_COMPAT_MASK) {
- case JFFS2_FEATURE_INCOMPAT:
- printk(KERN_NOTICE "Unknown INCOMPAT nodetype %04X at %08x\n", je16_to_cpu(node.u.nodetype), ref_offset(ref));
- /* EEP */
- BUG();
- break;
- case JFFS2_FEATURE_ROCOMPAT:
- printk(KERN_NOTICE "Unknown ROCOMPAT nodetype %04X at %08x\n", je16_to_cpu(node.u.nodetype), ref_offset(ref));
- if (!(c->flags & JFFS2_SB_FLAG_RO))
- BUG();
- break;
- case JFFS2_FEATURE_RWCOMPAT_COPY:
- printk(KERN_NOTICE "Unknown RWCOMPAT_COPY nodetype %04X at %08x\n", je16_to_cpu(node.u.nodetype), ref_offset(ref));
- break;
- case JFFS2_FEATURE_RWCOMPAT_DELETE:
- printk(KERN_NOTICE "Unknown RWCOMPAT_DELETE nodetype %04X at %08x\n", je16_to_cpu(node.u.nodetype), ref_offset(ref));
- jffs2_mark_node_obsolete(c, ref);
- break;
+
+ fn_ofs += newfrag->size;
+ fn_size -= newfrag->size;
+ this = rb_entry(rb_next(&newfrag->rb),
+ struct jffs2_node_frag, rb);
+
+ dbg_fragtree2("switch to the next 'this' fragment: %#04x-%#04x %s\n",
+ this->ofs, this->ofs + this->size, this->node ? "(data)" : "(hole)");
+ }
+
+ /*
+ * 'This' node is not the hole so it obsoletes the new fragment
+ * either fully or partially.
+ */
+ if (this->ofs + this->size >= fn_ofs + fn_size) {
+ /* The new node is obsolete, drop it */
+ if (fn->frags == 0) {
+ dbg_fragtree2("%#04x-%#04x is obsolete, mark it obsolete\n", fn_ofs, fn_ofs + fn_size);
+ ref_flag = REF_OBSOLETE;
}
+ goto out_ok;
+ } else {
+ struct jffs2_node_frag *new_this;
+
+ /* 'This' node obsoletes the beginning of the new node */
+ dbg_fragtree2("the beginning %#04x-%#04x is obsolete\n", fn_ofs, this->ofs + this->size);
+
+ ref_flag = REF_NORMAL;
+
+ fn_size -= this->ofs + this->size - fn_ofs;
+ fn_ofs = this->ofs + this->size;
+ dbg_fragtree2("now considering %#04x-%#04x\n", fn_ofs, fn_ofs + fn_size);
+
+ new_this = rb_entry(rb_next(&this->rb), struct jffs2_node_frag, rb);
+ if (!new_this) {
+ /*
+ * There is no next fragment. Add the rest of
+ * the new node as the right-hand child.
+ */
+ if (!checked) {
+ err = check_node(c, f, tn);
+ if (unlikely(err != 0))
+ return err;
+ checked = 1;
+ }
+ fn->frags += 1;
+ newfrag = new_fragment(fn, fn_ofs, fn_size);
+ if (unlikely(!newfrag))
+ return -ENOMEM;
+
+ dbg_fragtree2("there are no more fragments, insert %#04x-%#04x\n",
+ newfrag->ofs, newfrag->ofs + newfrag->size);
+ rb_link_node(&newfrag->rb, &this->rb, &this->rb.rb_right);
+ rb_insert_color(&newfrag->rb, root);
+ goto out_ok;
+ } else {
+ this = new_this;
+ dbg_fragtree2("switch to the next 'this' fragment: %#04x-%#04x %s\n",
+ this->ofs, this->ofs + this->size, this->node ? "(data)" : "(hole)");
+ }
}
- spin_lock(&c->erase_completion_lock);
+ }
+
+out_ok:
+ BUG_ON(fn->size < PAGE_CACHE_SIZE && ref_flag == REF_PRISTINE);
+ if (ref_flag == REF_OBSOLETE) {
+ dbg_fragtree2("the node is obsolete now\n");
+ /* jffs2_mark_node_obsolete() will adjust space accounting */
+ jffs2_mark_node_obsolete(c, fn->raw);
+ return 1;
}
+
+ dbg_fragtree2("the node is \"%s\" now\n", ref_flag == REF_NORMAL ? "REF_NORMAL" : "REF_PRISTINE");
+
+ /* Space accounting was adjusted at check_node_data() */
+ spin_lock(&c->erase_completion_lock);
+ fn->raw->flash_offset = ref_offset(fn->raw) | ref_flag;
spin_unlock(&c->erase_completion_lock);
- *tnp = ret_tn;
- *fdp = ret_fd;
return 0;
-
- free_out:
- jffs2_free_tmp_dnode_info_list(&ret_tn);
- jffs2_free_full_dirent_list(ret_fd);
- return err;
}
void jffs2_set_inocache_state(struct jffs2_sb_info *c, struct jffs2_inode_cache *ic, int state)
@@ -499,24 +862,21 @@ void jffs2_set_inocache_state(struct jffs2_sb_info *c, struct jffs2_inode_cache
/* During mount, this needs no locking. During normal operation, its
callers want to do other stuff while still holding the inocache_lock.
- Rather than introducing special case get_ino_cache functions or
+ Rather than introducing special case get_ino_cache functions or
callbacks, we just let the caller do the locking itself. */
-
+
struct jffs2_inode_cache *jffs2_get_ino_cache(struct jffs2_sb_info *c, uint32_t ino)
{
struct jffs2_inode_cache *ret;
- D2(printk(KERN_DEBUG "jffs2_get_ino_cache(): ino %u\n", ino));
-
ret = c->inocache_list[ino % INOCACHE_HASHSIZE];
while (ret && ret->ino < ino) {
ret = ret->next;
}
-
+
if (ret && ret->ino != ino)
ret = NULL;
- D2(printk(KERN_DEBUG "jffs2_get_ino_cache found %p for ino %u\n", ret, ino));
return ret;
}
@@ -528,7 +888,7 @@ void jffs2_add_ino_cache (struct jffs2_sb_info *c, struct jffs2_inode_cache *new
if (!new->ino)
new->ino = ++c->highest_ino;
- D2(printk(KERN_DEBUG "jffs2_add_ino_cache: Add %p (ino #%u)\n", new, new->ino));
+ dbg_inocache("add %p (ino #%u)\n", new, new->ino);
prev = &c->inocache_list[new->ino % INOCACHE_HASHSIZE];
@@ -544,11 +904,12 @@ void jffs2_add_ino_cache (struct jffs2_sb_info *c, struct jffs2_inode_cache *new
void jffs2_del_ino_cache(struct jffs2_sb_info *c, struct jffs2_inode_cache *old)
{
struct jffs2_inode_cache **prev;
- D1(printk(KERN_DEBUG "jffs2_del_ino_cache: Del %p (ino #%u)\n", old, old->ino));
+
+ dbg_inocache("del %p (ino #%u)\n", old, old->ino);
spin_lock(&c->inocache_lock);
-
+
prev = &c->inocache_list[old->ino % INOCACHE_HASHSIZE];
-
+
while ((*prev) && (*prev)->ino < old->ino) {
prev = &(*prev)->next;
}
@@ -558,7 +919,7 @@ void jffs2_del_ino_cache(struct jffs2_sb_info *c, struct jffs2_inode_cache *old)
/* Free it now unless it's in READING or CLEARING state, which
are the transitions upon read_inode() and clear_inode(). The
- rest of the time we know nobody else is looking at it, and
+ rest of the time we know nobody else is looking at it, and
if it's held by read_inode() or clear_inode() they'll free it
for themselves. */
if (old->state != INO_STATE_READING && old->state != INO_STATE_CLEARING)
@@ -571,7 +932,7 @@ void jffs2_free_ino_caches(struct jffs2_sb_info *c)
{
int i;
struct jffs2_inode_cache *this, *next;
-
+
for (i=0; i<INOCACHE_HASHSIZE; i++) {
this = c->inocache_list[i];
while (this) {
@@ -598,38 +959,30 @@ void jffs2_free_raw_node_refs(struct jffs2_sb_info *c)
c->blocks[i].first_node = c->blocks[i].last_node = NULL;
}
}
-
+
struct jffs2_node_frag *jffs2_lookup_node_frag(struct rb_root *fragtree, uint32_t offset)
{
- /* The common case in lookup is that there will be a node
+ /* The common case in lookup is that there will be a node
which precisely matches. So we go looking for that first */
struct rb_node *next;
struct jffs2_node_frag *prev = NULL;
struct jffs2_node_frag *frag = NULL;
- D2(printk(KERN_DEBUG "jffs2_lookup_node_frag(%p, %d)\n", fragtree, offset));
+ dbg_fragtree2("root %p, offset %d\n", fragtree, offset);
next = fragtree->rb_node;
while(next) {
frag = rb_entry(next, struct jffs2_node_frag, rb);
- D2(printk(KERN_DEBUG "Considering frag %d-%d (%p). left %p, right %p\n",
- frag->ofs, frag->ofs+frag->size, frag, frag->rb.rb_left, frag->rb.rb_right));
if (frag->ofs + frag->size <= offset) {
- D2(printk(KERN_DEBUG "Going right from frag %d-%d, before the region we care about\n",
- frag->ofs, frag->ofs+frag->size));
/* Remember the closest smaller match on the way down */
if (!prev || frag->ofs > prev->ofs)
prev = frag;
next = frag->rb.rb_right;
} else if (frag->ofs > offset) {
- D2(printk(KERN_DEBUG "Going left from frag %d-%d, after the region we care about\n",
- frag->ofs, frag->ofs+frag->size));
next = frag->rb.rb_left;
} else {
- D2(printk(KERN_DEBUG "Returning frag %d,%d, matched\n",
- frag->ofs, frag->ofs+frag->size));
return frag;
}
}
@@ -638,11 +991,11 @@ struct jffs2_node_frag *jffs2_lookup_node_frag(struct rb_root *fragtree, uint32_
and return the closest smaller one */
if (prev)
- D2(printk(KERN_DEBUG "No match. Returning frag %d,%d, closest previous\n",
- prev->ofs, prev->ofs+prev->size));
- else
- D2(printk(KERN_DEBUG "Returning NULL, empty fragtree\n"));
-
+ dbg_fragtree2("no match. Returning frag %#04x-%#04x, closest previous\n",
+ prev->ofs, prev->ofs+prev->size);
+ else
+ dbg_fragtree2("returning NULL, empty fragtree\n");
+
return prev;
}
@@ -656,39 +1009,32 @@ void jffs2_kill_fragtree(struct rb_root *root, struct jffs2_sb_info *c)
if (!root->rb_node)
return;
- frag = (rb_entry(root->rb_node, struct jffs2_node_frag, rb));
+ dbg_fragtree("killing\n");
+ frag = (rb_entry(root->rb_node, struct jffs2_node_frag, rb));
while(frag) {
if (frag->rb.rb_left) {
- D2(printk(KERN_DEBUG "Going left from frag (%p) %d-%d\n",
- frag, frag->ofs, frag->ofs+frag->size));
frag = frag_left(frag);
continue;
}
if (frag->rb.rb_right) {
- D2(printk(KERN_DEBUG "Going right from frag (%p) %d-%d\n",
- frag, frag->ofs, frag->ofs+frag->size));
frag = frag_right(frag);
continue;
}
- D2(printk(KERN_DEBUG "jffs2_kill_fragtree: frag at 0x%x-0x%x: node %p, frags %d--\n",
- frag->ofs, frag->ofs+frag->size, frag->node,
- frag->node?frag->node->frags:0));
-
if (frag->node && !(--frag->node->frags)) {
- /* Not a hole, and it's the final remaining frag
+ /* Not a hole, and it's the final remaining frag
of this node. Free the node */
if (c)
jffs2_mark_node_obsolete(c, frag->node->raw);
-
+
jffs2_free_full_dnode(frag->node);
}
parent = frag_parent(frag);
if (parent) {
if (frag_left(parent) == frag)
parent->rb.rb_left = NULL;
- else
+ else
parent->rb.rb_right = NULL;
}
@@ -698,29 +1044,3 @@ void jffs2_kill_fragtree(struct rb_root *root, struct jffs2_sb_info *c)
cond_resched();
}
}
-
-void jffs2_fragtree_insert(struct jffs2_node_frag *newfrag, struct jffs2_node_frag *base)
-{
- struct rb_node *parent = &base->rb;
- struct rb_node **link = &parent;
-
- D2(printk(KERN_DEBUG "jffs2_fragtree_insert(%p; %d-%d, %p)\n", newfrag,
- newfrag->ofs, newfrag->ofs+newfrag->size, base));
-
- while (*link) {
- parent = *link;
- base = rb_entry(parent, struct jffs2_node_frag, rb);
-
- D2(printk(KERN_DEBUG "fragtree_insert considering frag at 0x%x\n", base->ofs));
- if (newfrag->ofs > base->ofs)
- link = &base->rb.rb_right;
- else if (newfrag->ofs < base->ofs)
- link = &base->rb.rb_left;
- else {
- printk(KERN_CRIT "Duplicate frag at %08x (%p,%p)\n", newfrag->ofs, newfrag, base);
- BUG();
- }
- }
-
- rb_link_node(&newfrag->rb, &base->rb, link);
-}
diff --git a/fs/jffs2/nodelist.h b/fs/jffs2/nodelist.h
index b34c397909e..23a67bb3052 100644
--- a/fs/jffs2/nodelist.h
+++ b/fs/jffs2/nodelist.h
@@ -7,7 +7,7 @@
*
* For licensing information, see the file 'LICENCE' in this directory.
*
- * $Id: nodelist.h,v 1.131 2005/07/05 21:03:07 dwmw2 Exp $
+ * $Id: nodelist.h,v 1.140 2005/09/07 08:34:54 havasi Exp $
*
*/
@@ -20,30 +20,15 @@
#include <linux/jffs2.h>
#include <linux/jffs2_fs_sb.h>
#include <linux/jffs2_fs_i.h>
+#include "summary.h"
#ifdef __ECOS
#include "os-ecos.h"
#else
-#include <linux/mtd/compatmac.h> /* For min/max in older kernels */
+#include <linux/mtd/compatmac.h> /* For compatibility with older kernels */
#include "os-linux.h"
#endif
-#ifndef CONFIG_JFFS2_FS_DEBUG
-#define CONFIG_JFFS2_FS_DEBUG 1
-#endif
-
-#if CONFIG_JFFS2_FS_DEBUG > 0
-#define D1(x) x
-#else
-#define D1(x)
-#endif
-
-#if CONFIG_JFFS2_FS_DEBUG > 1
-#define D2(x) x
-#else
-#define D2(x)
-#endif
-
#define JFFS2_NATIVE_ENDIAN
/* Note we handle mode bits conversion from JFFS2 (i.e. Linux) to/from
@@ -73,14 +58,17 @@
#define je16_to_cpu(x) (le16_to_cpu(x.v16))
#define je32_to_cpu(x) (le32_to_cpu(x.v32))
#define jemode_to_cpu(x) (le32_to_cpu(jffs2_to_os_mode((x).m)))
-#else
+#else
#error wibble
#endif
+/* The minimal node header size */
+#define JFFS2_MIN_NODE_HEADER sizeof(struct jffs2_raw_dirent)
+
/*
This is all we need to keep in-core for each raw node during normal
operation. As and when we do read_inode on a particular inode, we can
- scan the nodes which are listed for it and build up a proper map of
+ scan the nodes which are listed for it and build up a proper map of
which nodes are currently valid. JFFSv1 always used to keep that whole
map in core for each inode.
*/
@@ -97,7 +85,7 @@ struct jffs2_raw_node_ref
/* flash_offset & 3 always has to be zero, because nodes are
always aligned at 4 bytes. So we have a couple of extra bits
- to play with, which indicate the node's status; see below: */
+ to play with, which indicate the node's status; see below: */
#define REF_UNCHECKED 0 /* We haven't yet checked the CRC or built its inode */
#define REF_OBSOLETE 1 /* Obsolete, can be completely ignored */
#define REF_PRISTINE 2 /* Completely clean. GC without looking */
@@ -110,7 +98,7 @@ struct jffs2_raw_node_ref
/* For each inode in the filesystem, we need to keep a record of
nlink, because it would be a PITA to scan the whole directory tree
at read_inode() time to calculate it, and to keep sufficient information
- in the raw_node_ref (basically both parent and child inode number for
+ in the raw_node_ref (basically both parent and child inode number for
dirent nodes) would take more space than this does. We also keep
a pointer to the first physical node which is part of this inode, too.
*/
@@ -140,7 +128,7 @@ struct jffs2_inode_cache {
#define INOCACHE_HASHSIZE 128
/*
- Larger representation of a raw node, kept in-core only when the
+ Larger representation of a raw node, kept in-core only when the
struct inode for this particular ino is instantiated.
*/
@@ -150,11 +138,11 @@ struct jffs2_full_dnode
uint32_t ofs; /* The offset to which the data of this node belongs */
uint32_t size;
uint32_t frags; /* Number of fragments which currently refer
- to this node. When this reaches zero,
+ to this node. When this reaches zero,
the node is obsolete. */
};
-/*
+/*
Even larger representation of a raw node, kept in-core only while
we're actually building up the original map of which nodes go where,
in read_inode()
@@ -164,7 +152,10 @@ struct jffs2_tmp_dnode_info
struct rb_node rb;
struct jffs2_full_dnode *fn;
uint32_t version;
-};
+ uint32_t data_crc;
+ uint32_t partial_crc;
+ uint32_t csize;
+};
struct jffs2_full_dirent
{
@@ -178,7 +169,7 @@ struct jffs2_full_dirent
};
/*
- Fragments - used to build a map of which raw node to obtain
+ Fragments - used to build a map of which raw node to obtain
data from for each part of the ino
*/
struct jffs2_node_frag
@@ -207,86 +198,18 @@ struct jffs2_eraseblock
struct jffs2_raw_node_ref *gc_node; /* Next node to be garbage collected */
};
-#define ACCT_SANITY_CHECK(c, jeb) do { \
- struct jffs2_eraseblock *___j = jeb; \
- if ((___j) && ___j->used_size + ___j->dirty_size + ___j->free_size + ___j->wasted_size + ___j->unchecked_size != c->sector_size) { \
- printk(KERN_NOTICE "Eeep. Space accounting for block at 0x%08x is screwed\n", ___j->offset); \
- printk(KERN_NOTICE "free 0x%08x + dirty 0x%08x + used %08x + wasted %08x + unchecked %08x != total %08x\n", \
- ___j->free_size, ___j->dirty_size, ___j->used_size, ___j->wasted_size, ___j->unchecked_size, c->sector_size); \
- BUG(); \
- } \
- if (c->used_size + c->dirty_size + c->free_size + c->erasing_size + c->bad_size + c->wasted_size + c->unchecked_size != c->flash_size) { \
- printk(KERN_NOTICE "Eeep. Space accounting superblock info is screwed\n"); \
- printk(KERN_NOTICE "free 0x%08x + dirty 0x%08x + used %08x + erasing %08x + bad %08x + wasted %08x + unchecked %08x != total %08x\n", \
- c->free_size, c->dirty_size, c->used_size, c->erasing_size, c->bad_size, c->wasted_size, c->unchecked_size, c->flash_size); \
- BUG(); \
- } \
-} while(0)
-
-static inline void paranoia_failed_dump(struct jffs2_eraseblock *jeb)
+static inline int jffs2_blocks_use_vmalloc(struct jffs2_sb_info *c)
{
- struct jffs2_raw_node_ref *ref;
- int i=0;
-
- printk(KERN_NOTICE);
- for (ref = jeb->first_node; ref; ref = ref->next_phys) {
- printk("%08x->", ref_offset(ref));
- if (++i == 8) {
- i = 0;
- printk("\n" KERN_NOTICE);
- }
- }
- printk("\n");
+ return ((c->flash_size / c->sector_size) * sizeof (struct jffs2_eraseblock)) > (128 * 1024);
}
-
-#define ACCT_PARANOIA_CHECK(jeb) do { \
- uint32_t my_used_size = 0; \
- uint32_t my_unchecked_size = 0; \
- struct jffs2_raw_node_ref *ref2 = jeb->first_node; \
- while (ref2) { \
- if (unlikely(ref2->flash_offset < jeb->offset || \
- ref2->flash_offset > jeb->offset + c->sector_size)) { \
- printk(KERN_NOTICE "Node %08x shouldn't be in block at %08x!\n", \
- ref_offset(ref2), jeb->offset); \
- paranoia_failed_dump(jeb); \
- BUG(); \
- } \
- if (ref_flags(ref2) == REF_UNCHECKED) \
- my_unchecked_size += ref_totlen(c, jeb, ref2); \
- else if (!ref_obsolete(ref2)) \
- my_used_size += ref_totlen(c, jeb, ref2); \
- if (unlikely((!ref2->next_phys) != (ref2 == jeb->last_node))) { \
- if (!ref2->next_phys) \
- printk("ref for node at %p (phys %08x) has next_phys->%p (----), last_node->%p (phys %08x)\n", \
- ref2, ref_offset(ref2), ref2->next_phys, \
- jeb->last_node, ref_offset(jeb->last_node)); \
- else \
- printk("ref for node at %p (phys %08x) has next_phys->%p (%08x), last_node->%p (phys %08x)\n", \
- ref2, ref_offset(ref2), ref2->next_phys, ref_offset(ref2->next_phys), \
- jeb->last_node, ref_offset(jeb->last_node)); \
- paranoia_failed_dump(jeb); \
- BUG(); \
- } \
- ref2 = ref2->next_phys; \
- } \
- if (my_used_size != jeb->used_size) { \
- printk(KERN_NOTICE "Calculated used size %08x != stored used size %08x\n", my_used_size, jeb->used_size); \
- BUG(); \
- } \
- if (my_unchecked_size != jeb->unchecked_size) { \
- printk(KERN_NOTICE "Calculated unchecked size %08x != stored unchecked size %08x\n", my_unchecked_size, jeb->unchecked_size); \
- BUG(); \
- } \
- } while(0)
-
/* Calculate totlen from surrounding nodes or eraseblock */
static inline uint32_t __ref_totlen(struct jffs2_sb_info *c,
struct jffs2_eraseblock *jeb,
struct jffs2_raw_node_ref *ref)
{
uint32_t ref_end;
-
+
if (ref->next_phys)
ref_end = ref_offset(ref->next_phys);
else {
@@ -306,11 +229,13 @@ static inline uint32_t ref_totlen(struct jffs2_sb_info *c,
{
uint32_t ret;
- D1(if (jeb && jeb != &c->blocks[ref->flash_offset / c->sector_size]) {
+#if CONFIG_JFFS2_FS_DEBUG > 0
+ if (jeb && jeb != &c->blocks[ref->flash_offset / c->sector_size]) {
printk(KERN_CRIT "ref_totlen called with wrong block -- at 0x%08x instead of 0x%08x; ref 0x%08x\n",
jeb->offset, c->blocks[ref->flash_offset / c->sector_size].offset, ref_offset(ref));
BUG();
- })
+ }
+#endif
#if 1
ret = ref->__totlen;
@@ -323,14 +248,13 @@ static inline uint32_t ref_totlen(struct jffs2_sb_info *c,
ret, ref->__totlen);
if (!jeb)
jeb = &c->blocks[ref->flash_offset / c->sector_size];
- paranoia_failed_dump(jeb);
+ jffs2_dbg_dump_node_refs_nolock(c, jeb);
BUG();
}
#endif
return ret;
}
-
#define ALLOC_NORMAL 0 /* Normal allocation */
#define ALLOC_DELETION 1 /* Deletion node. Best to allow it */
#define ALLOC_GC 2 /* Space requested for GC. Give it or die */
@@ -340,7 +264,7 @@ static inline uint32_t ref_totlen(struct jffs2_sb_info *c,
#define VERYDIRTY(c, size) ((size) >= ((c)->sector_size / 2))
/* check if dirty space is more than 255 Byte */
-#define ISDIRTY(size) ((size) > sizeof (struct jffs2_raw_inode) + JFFS2_MIN_DATA_LEN)
+#define ISDIRTY(size) ((size) > sizeof (struct jffs2_raw_inode) + JFFS2_MIN_DATA_LEN)
#define PAD(x) (((x)+3)&~3)
@@ -384,12 +308,7 @@ static inline struct jffs2_node_frag *frag_last(struct rb_root *root)
#define frag_erase(frag, list) rb_erase(&frag->rb, list);
/* nodelist.c */
-D2(void jffs2_print_frag_list(struct jffs2_inode_info *f));
void jffs2_add_fd_to_list(struct jffs2_sb_info *c, struct jffs2_full_dirent *new, struct jffs2_full_dirent **list);
-int jffs2_get_inode_nodes(struct jffs2_sb_info *c, struct jffs2_inode_info *f,
- struct rb_root *tnp, struct jffs2_full_dirent **fdp,
- uint32_t *highest_version, uint32_t *latest_mctime,
- uint32_t *mctime_ver);
void jffs2_set_inocache_state(struct jffs2_sb_info *c, struct jffs2_inode_cache *ic, int state);
struct jffs2_inode_cache *jffs2_get_ino_cache(struct jffs2_sb_info *c, uint32_t ino);
void jffs2_add_ino_cache (struct jffs2_sb_info *c, struct jffs2_inode_cache *new);
@@ -398,19 +317,23 @@ void jffs2_free_ino_caches(struct jffs2_sb_info *c);
void jffs2_free_raw_node_refs(struct jffs2_sb_info *c);
struct jffs2_node_frag *jffs2_lookup_node_frag(struct rb_root *fragtree, uint32_t offset);
void jffs2_kill_fragtree(struct rb_root *root, struct jffs2_sb_info *c_delete);
-void jffs2_fragtree_insert(struct jffs2_node_frag *newfrag, struct jffs2_node_frag *base);
struct rb_node *rb_next(struct rb_node *);
struct rb_node *rb_prev(struct rb_node *);
void rb_replace_node(struct rb_node *victim, struct rb_node *new, struct rb_root *root);
+void jffs2_obsolete_node_frag(struct jffs2_sb_info *c, struct jffs2_node_frag *this);
+int jffs2_add_full_dnode_to_inode(struct jffs2_sb_info *c, struct jffs2_inode_info *f, struct jffs2_full_dnode *fn);
+void jffs2_truncate_fragtree (struct jffs2_sb_info *c, struct rb_root *list, uint32_t size);
+int jffs2_add_older_frag_to_fragtree(struct jffs2_sb_info *c, struct jffs2_inode_info *f, struct jffs2_tmp_dnode_info *tn);
/* nodemgmt.c */
int jffs2_thread_should_wake(struct jffs2_sb_info *c);
-int jffs2_reserve_space(struct jffs2_sb_info *c, uint32_t minsize, uint32_t *ofs, uint32_t *len, int prio);
-int jffs2_reserve_space_gc(struct jffs2_sb_info *c, uint32_t minsize, uint32_t *ofs, uint32_t *len);
+int jffs2_reserve_space(struct jffs2_sb_info *c, uint32_t minsize, uint32_t *ofs,
+ uint32_t *len, int prio, uint32_t sumsize);
+int jffs2_reserve_space_gc(struct jffs2_sb_info *c, uint32_t minsize, uint32_t *ofs,
+ uint32_t *len, uint32_t sumsize);
int jffs2_add_physical_node_ref(struct jffs2_sb_info *c, struct jffs2_raw_node_ref *new);
void jffs2_complete_reservation(struct jffs2_sb_info *c);
void jffs2_mark_node_obsolete(struct jffs2_sb_info *c, struct jffs2_raw_node_ref *raw);
-void jffs2_dump_block_lists(struct jffs2_sb_info *c);
/* write.c */
int jffs2_do_new_inode(struct jffs2_sb_info *c, struct jffs2_inode_info *f, uint32_t mode, struct jffs2_raw_inode *ri);
@@ -418,17 +341,15 @@ int jffs2_do_new_inode(struct jffs2_sb_info *c, struct jffs2_inode_info *f, uint
struct jffs2_full_dnode *jffs2_write_dnode(struct jffs2_sb_info *c, struct jffs2_inode_info *f, struct jffs2_raw_inode *ri, const unsigned char *data, uint32_t datalen, uint32_t flash_ofs, int alloc_mode);
struct jffs2_full_dirent *jffs2_write_dirent(struct jffs2_sb_info *c, struct jffs2_inode_info *f, struct jffs2_raw_dirent *rd, const unsigned char *name, uint32_t namelen, uint32_t flash_ofs, int alloc_mode);
int jffs2_write_inode_range(struct jffs2_sb_info *c, struct jffs2_inode_info *f,
- struct jffs2_raw_inode *ri, unsigned char *buf,
+ struct jffs2_raw_inode *ri, unsigned char *buf,
uint32_t offset, uint32_t writelen, uint32_t *retlen);
int jffs2_do_create(struct jffs2_sb_info *c, struct jffs2_inode_info *dir_f, struct jffs2_inode_info *f, struct jffs2_raw_inode *ri, const char *name, int namelen);
-int jffs2_do_unlink(struct jffs2_sb_info *c, struct jffs2_inode_info *dir_f, const char *name, int namelen, struct jffs2_inode_info *dead_f);
-int jffs2_do_link (struct jffs2_sb_info *c, struct jffs2_inode_info *dir_f, uint32_t ino, uint8_t type, const char *name, int namelen);
+int jffs2_do_unlink(struct jffs2_sb_info *c, struct jffs2_inode_info *dir_f, const char *name, int namelen, struct jffs2_inode_info *dead_f, uint32_t time);
+int jffs2_do_link (struct jffs2_sb_info *c, struct jffs2_inode_info *dir_f, uint32_t ino, uint8_t type, const char *name, int namelen, uint32_t time);
/* readinode.c */
-void jffs2_truncate_fraglist (struct jffs2_sb_info *c, struct rb_root *list, uint32_t size);
-int jffs2_add_full_dnode_to_inode(struct jffs2_sb_info *c, struct jffs2_inode_info *f, struct jffs2_full_dnode *fn);
-int jffs2_do_read_inode(struct jffs2_sb_info *c, struct jffs2_inode_info *f,
+int jffs2_do_read_inode(struct jffs2_sb_info *c, struct jffs2_inode_info *f,
uint32_t ino, struct jffs2_raw_inode *latest_node);
int jffs2_do_crccheck_inode(struct jffs2_sb_info *c, struct jffs2_inode_cache *ic);
void jffs2_do_clear_inode(struct jffs2_sb_info *c, struct jffs2_inode_info *f);
@@ -468,6 +389,10 @@ char *jffs2_getlink(struct jffs2_sb_info *c, struct jffs2_inode_info *f);
/* scan.c */
int jffs2_scan_medium(struct jffs2_sb_info *c);
void jffs2_rotate_lists(struct jffs2_sb_info *c);
+int jffs2_fill_scan_buf(struct jffs2_sb_info *c, void *buf,
+ uint32_t ofs, uint32_t len);
+struct jffs2_inode_cache *jffs2_scan_make_ino_cache(struct jffs2_sb_info *c, uint32_t ino);
+int jffs2_scan_classify_jeb(struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb);
/* build.c */
int jffs2_do_mount_fs(struct jffs2_sb_info *c);
@@ -483,4 +408,6 @@ int jffs2_check_nand_cleanmarker(struct jffs2_sb_info *c, struct jffs2_erasebloc
int jffs2_write_nand_cleanmarker(struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb);
#endif
+#include "debug.h"
+
#endif /* __JFFS2_NODELIST_H__ */
diff --git a/fs/jffs2/nodemgmt.c b/fs/jffs2/nodemgmt.c
index c1d8b5ed9ab..49127a1f045 100644
--- a/fs/jffs2/nodemgmt.c
+++ b/fs/jffs2/nodemgmt.c
@@ -7,7 +7,7 @@
*
* For licensing information, see the file 'LICENCE' in this directory.
*
- * $Id: nodemgmt.c,v 1.122 2005/05/06 09:30:27 dedekind Exp $
+ * $Id: nodemgmt.c,v 1.127 2005/09/20 15:49:12 dedekind Exp $
*
*/
@@ -17,6 +17,7 @@
#include <linux/compiler.h>
#include <linux/sched.h> /* For cond_resched() */
#include "nodelist.h"
+#include "debug.h"
/**
* jffs2_reserve_space - request physical space to write nodes to flash
@@ -38,9 +39,11 @@
* for the requested allocation.
*/
-static int jffs2_do_reserve_space(struct jffs2_sb_info *c, uint32_t minsize, uint32_t *ofs, uint32_t *len);
+static int jffs2_do_reserve_space(struct jffs2_sb_info *c, uint32_t minsize,
+ uint32_t *ofs, uint32_t *len, uint32_t sumsize);
-int jffs2_reserve_space(struct jffs2_sb_info *c, uint32_t minsize, uint32_t *ofs, uint32_t *len, int prio)
+int jffs2_reserve_space(struct jffs2_sb_info *c, uint32_t minsize, uint32_t *ofs,
+ uint32_t *len, int prio, uint32_t sumsize)
{
int ret = -EAGAIN;
int blocksneeded = c->resv_blocks_write;
@@ -85,12 +88,12 @@ int jffs2_reserve_space(struct jffs2_sb_info *c, uint32_t minsize, uint32_t *ofs
up(&c->alloc_sem);
return -ENOSPC;
}
-
+
/* Calc possibly available space. Possibly available means that we
* don't know, if unchecked size contains obsoleted nodes, which could give us some
* more usable space. This will affect the sum only once, as gc first finishes checking
* of nodes.
- + Return -ENOSPC, if the maximum possibly available space is less or equal than
+ + Return -ENOSPC, if the maximum possibly available space is less or equal than
* blocksneeded * sector_size.
* This blocks endless gc looping on a filesystem, which is nearly full, even if
* the check above passes.
@@ -115,7 +118,7 @@ int jffs2_reserve_space(struct jffs2_sb_info *c, uint32_t minsize, uint32_t *ofs
c->nr_free_blocks, c->nr_erasing_blocks, c->free_size, c->dirty_size, c->wasted_size, c->used_size, c->erasing_size, c->bad_size,
c->free_size + c->dirty_size + c->wasted_size + c->used_size + c->erasing_size + c->bad_size, c->flash_size));
spin_unlock(&c->erase_completion_lock);
-
+
ret = jffs2_garbage_collect_pass(c);
if (ret)
return ret;
@@ -129,7 +132,7 @@ int jffs2_reserve_space(struct jffs2_sb_info *c, uint32_t minsize, uint32_t *ofs
spin_lock(&c->erase_completion_lock);
}
- ret = jffs2_do_reserve_space(c, minsize, ofs, len);
+ ret = jffs2_do_reserve_space(c, minsize, ofs, len, sumsize);
if (ret) {
D1(printk(KERN_DEBUG "jffs2_reserve_space: ret is %d\n", ret));
}
@@ -140,7 +143,8 @@ int jffs2_reserve_space(struct jffs2_sb_info *c, uint32_t minsize, uint32_t *ofs
return ret;
}
-int jffs2_reserve_space_gc(struct jffs2_sb_info *c, uint32_t minsize, uint32_t *ofs, uint32_t *len)
+int jffs2_reserve_space_gc(struct jffs2_sb_info *c, uint32_t minsize, uint32_t *ofs,
+ uint32_t *len, uint32_t sumsize)
{
int ret = -EAGAIN;
minsize = PAD(minsize);
@@ -149,7 +153,7 @@ int jffs2_reserve_space_gc(struct jffs2_sb_info *c, uint32_t minsize, uint32_t *
spin_lock(&c->erase_completion_lock);
while(ret == -EAGAIN) {
- ret = jffs2_do_reserve_space(c, minsize, ofs, len);
+ ret = jffs2_do_reserve_space(c, minsize, ofs, len, sumsize);
if (ret) {
D1(printk(KERN_DEBUG "jffs2_reserve_space_gc: looping, ret is %d\n", ret));
}
@@ -158,105 +162,185 @@ int jffs2_reserve_space_gc(struct jffs2_sb_info *c, uint32_t minsize, uint32_t *
return ret;
}
-/* Called with alloc sem _and_ erase_completion_lock */
-static int jffs2_do_reserve_space(struct jffs2_sb_info *c, uint32_t minsize, uint32_t *ofs, uint32_t *len)
+
+/* Classify nextblock (clean, dirty of verydirty) and force to select an other one */
+
+static void jffs2_close_nextblock(struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb)
{
- struct jffs2_eraseblock *jeb = c->nextblock;
-
- restart:
- if (jeb && minsize > jeb->free_size) {
- /* Skip the end of this block and file it as having some dirty space */
- /* If there's a pending write to it, flush now */
- if (jffs2_wbuf_dirty(c)) {
+
+ /* Check, if we have a dirty block now, or if it was dirty already */
+ if (ISDIRTY (jeb->wasted_size + jeb->dirty_size)) {
+ c->dirty_size += jeb->wasted_size;
+ c->wasted_size -= jeb->wasted_size;
+ jeb->dirty_size += jeb->wasted_size;
+ jeb->wasted_size = 0;
+ if (VERYDIRTY(c, jeb->dirty_size)) {
+ D1(printk(KERN_DEBUG "Adding full erase block at 0x%08x to very_dirty_list (free 0x%08x, dirty 0x%08x, used 0x%08x\n",
+ jeb->offset, jeb->free_size, jeb->dirty_size, jeb->used_size));
+ list_add_tail(&jeb->list, &c->very_dirty_list);
+ } else {
+ D1(printk(KERN_DEBUG "Adding full erase block at 0x%08x to dirty_list (free 0x%08x, dirty 0x%08x, used 0x%08x\n",
+ jeb->offset, jeb->free_size, jeb->dirty_size, jeb->used_size));
+ list_add_tail(&jeb->list, &c->dirty_list);
+ }
+ } else {
+ D1(printk(KERN_DEBUG "Adding full erase block at 0x%08x to clean_list (free 0x%08x, dirty 0x%08x, used 0x%08x\n",
+ jeb->offset, jeb->free_size, jeb->dirty_size, jeb->used_size));
+ list_add_tail(&jeb->list, &c->clean_list);
+ }
+ c->nextblock = NULL;
+
+}
+
+/* Select a new jeb for nextblock */
+
+static int jffs2_find_nextblock(struct jffs2_sb_info *c)
+{
+ struct list_head *next;
+
+ /* Take the next block off the 'free' list */
+
+ if (list_empty(&c->free_list)) {
+
+ if (!c->nr_erasing_blocks &&
+ !list_empty(&c->erasable_list)) {
+ struct jffs2_eraseblock *ejeb;
+
+ ejeb = list_entry(c->erasable_list.next, struct jffs2_eraseblock, list);
+ list_del(&ejeb->list);
+ list_add_tail(&ejeb->list, &c->erase_pending_list);
+ c->nr_erasing_blocks++;
+ jffs2_erase_pending_trigger(c);
+ D1(printk(KERN_DEBUG "jffs2_find_nextblock: Triggering erase of erasable block at 0x%08x\n",
+ ejeb->offset));
+ }
+
+ if (!c->nr_erasing_blocks &&
+ !list_empty(&c->erasable_pending_wbuf_list)) {
+ D1(printk(KERN_DEBUG "jffs2_find_nextblock: Flushing write buffer\n"));
+ /* c->nextblock is NULL, no update to c->nextblock allowed */
spin_unlock(&c->erase_completion_lock);
- D1(printk(KERN_DEBUG "jffs2_do_reserve_space: Flushing write buffer\n"));
jffs2_flush_wbuf_pad(c);
spin_lock(&c->erase_completion_lock);
- jeb = c->nextblock;
- goto restart;
+ /* Have another go. It'll be on the erasable_list now */
+ return -EAGAIN;
}
- c->wasted_size += jeb->free_size;
- c->free_size -= jeb->free_size;
- jeb->wasted_size += jeb->free_size;
- jeb->free_size = 0;
-
- /* Check, if we have a dirty block now, or if it was dirty already */
- if (ISDIRTY (jeb->wasted_size + jeb->dirty_size)) {
- c->dirty_size += jeb->wasted_size;
- c->wasted_size -= jeb->wasted_size;
- jeb->dirty_size += jeb->wasted_size;
- jeb->wasted_size = 0;
- if (VERYDIRTY(c, jeb->dirty_size)) {
- D1(printk(KERN_DEBUG "Adding full erase block at 0x%08x to very_dirty_list (free 0x%08x, dirty 0x%08x, used 0x%08x\n",
- jeb->offset, jeb->free_size, jeb->dirty_size, jeb->used_size));
- list_add_tail(&jeb->list, &c->very_dirty_list);
- } else {
- D1(printk(KERN_DEBUG "Adding full erase block at 0x%08x to dirty_list (free 0x%08x, dirty 0x%08x, used 0x%08x\n",
- jeb->offset, jeb->free_size, jeb->dirty_size, jeb->used_size));
- list_add_tail(&jeb->list, &c->dirty_list);
- }
- } else {
- D1(printk(KERN_DEBUG "Adding full erase block at 0x%08x to clean_list (free 0x%08x, dirty 0x%08x, used 0x%08x\n",
- jeb->offset, jeb->free_size, jeb->dirty_size, jeb->used_size));
- list_add_tail(&jeb->list, &c->clean_list);
+
+ if (!c->nr_erasing_blocks) {
+ /* Ouch. We're in GC, or we wouldn't have got here.
+ And there's no space left. At all. */
+ printk(KERN_CRIT "Argh. No free space left for GC. nr_erasing_blocks is %d. nr_free_blocks is %d. (erasableempty: %s, erasingempty: %s, erasependingempty: %s)\n",
+ c->nr_erasing_blocks, c->nr_free_blocks, list_empty(&c->erasable_list)?"yes":"no",
+ list_empty(&c->erasing_list)?"yes":"no", list_empty(&c->erase_pending_list)?"yes":"no");
+ return -ENOSPC;
}
- c->nextblock = jeb = NULL;
+
+ spin_unlock(&c->erase_completion_lock);
+ /* Don't wait for it; just erase one right now */
+ jffs2_erase_pending_blocks(c, 1);
+ spin_lock(&c->erase_completion_lock);
+
+ /* An erase may have failed, decreasing the
+ amount of free space available. So we must
+ restart from the beginning */
+ return -EAGAIN;
}
-
- if (!jeb) {
- struct list_head *next;
- /* Take the next block off the 'free' list */
- if (list_empty(&c->free_list)) {
+ next = c->free_list.next;
+ list_del(next);
+ c->nextblock = list_entry(next, struct jffs2_eraseblock, list);
+ c->nr_free_blocks--;
- if (!c->nr_erasing_blocks &&
- !list_empty(&c->erasable_list)) {
- struct jffs2_eraseblock *ejeb;
+ jffs2_sum_reset_collected(c->summary); /* reset collected summary */
- ejeb = list_entry(c->erasable_list.next, struct jffs2_eraseblock, list);
- list_del(&ejeb->list);
- list_add_tail(&ejeb->list, &c->erase_pending_list);
- c->nr_erasing_blocks++;
- jffs2_erase_pending_trigger(c);
- D1(printk(KERN_DEBUG "jffs2_do_reserve_space: Triggering erase of erasable block at 0x%08x\n",
- ejeb->offset));
+ D1(printk(KERN_DEBUG "jffs2_find_nextblock(): new nextblock = 0x%08x\n", c->nextblock->offset));
+
+ return 0;
+}
+
+/* Called with alloc sem _and_ erase_completion_lock */
+static int jffs2_do_reserve_space(struct jffs2_sb_info *c, uint32_t minsize, uint32_t *ofs, uint32_t *len, uint32_t sumsize)
+{
+ struct jffs2_eraseblock *jeb = c->nextblock;
+ uint32_t reserved_size; /* for summary information at the end of the jeb */
+ int ret;
+
+ restart:
+ reserved_size = 0;
+
+ if (jffs2_sum_active() && (sumsize != JFFS2_SUMMARY_NOSUM_SIZE)) {
+ /* NOSUM_SIZE means not to generate summary */
+
+ if (jeb) {
+ reserved_size = PAD(sumsize + c->summary->sum_size + JFFS2_SUMMARY_FRAME_SIZE);
+ dbg_summary("minsize=%d , jeb->free=%d ,"
+ "summary->size=%d , sumsize=%d\n",
+ minsize, jeb->free_size,
+ c->summary->sum_size, sumsize);
+ }
+
+ /* Is there enough space for writing out the current node, or we have to
+ write out summary information now, close this jeb and select new nextblock? */
+ if (jeb && (PAD(minsize) + PAD(c->summary->sum_size + sumsize +
+ JFFS2_SUMMARY_FRAME_SIZE) > jeb->free_size)) {
+
+ /* Has summary been disabled for this jeb? */
+ if (jffs2_sum_is_disabled(c->summary)) {
+ sumsize = JFFS2_SUMMARY_NOSUM_SIZE;
+ goto restart;
}
- if (!c->nr_erasing_blocks &&
- !list_empty(&c->erasable_pending_wbuf_list)) {
- D1(printk(KERN_DEBUG "jffs2_do_reserve_space: Flushing write buffer\n"));
- /* c->nextblock is NULL, no update to c->nextblock allowed */
+ /* Writing out the collected summary information */
+ dbg_summary("generating summary for 0x%08x.\n", jeb->offset);
+ ret = jffs2_sum_write_sumnode(c);
+
+ if (ret)
+ return ret;
+
+ if (jffs2_sum_is_disabled(c->summary)) {
+ /* jffs2_write_sumnode() couldn't write out the summary information
+ diabling summary for this jeb and free the collected information
+ */
+ sumsize = JFFS2_SUMMARY_NOSUM_SIZE;
+ goto restart;
+ }
+
+ jffs2_close_nextblock(c, jeb);
+ jeb = NULL;
+ /* keep always valid value in reserved_size */
+ reserved_size = PAD(sumsize + c->summary->sum_size + JFFS2_SUMMARY_FRAME_SIZE);
+ }
+ } else {
+ if (jeb && minsize > jeb->free_size) {
+ /* Skip the end of this block and file it as having some dirty space */
+ /* If there's a pending write to it, flush now */
+
+ if (jffs2_wbuf_dirty(c)) {
spin_unlock(&c->erase_completion_lock);
+ D1(printk(KERN_DEBUG "jffs2_do_reserve_space: Flushing write buffer\n"));
jffs2_flush_wbuf_pad(c);
spin_lock(&c->erase_completion_lock);
- /* Have another go. It'll be on the erasable_list now */
- return -EAGAIN;
+ jeb = c->nextblock;
+ goto restart;
}
- if (!c->nr_erasing_blocks) {
- /* Ouch. We're in GC, or we wouldn't have got here.
- And there's no space left. At all. */
- printk(KERN_CRIT "Argh. No free space left for GC. nr_erasing_blocks is %d. nr_free_blocks is %d. (erasableempty: %s, erasingempty: %s, erasependingempty: %s)\n",
- c->nr_erasing_blocks, c->nr_free_blocks, list_empty(&c->erasable_list)?"yes":"no",
- list_empty(&c->erasing_list)?"yes":"no", list_empty(&c->erase_pending_list)?"yes":"no");
- return -ENOSPC;
- }
-
- spin_unlock(&c->erase_completion_lock);
- /* Don't wait for it; just erase one right now */
- jffs2_erase_pending_blocks(c, 1);
- spin_lock(&c->erase_completion_lock);
+ c->wasted_size += jeb->free_size;
+ c->free_size -= jeb->free_size;
+ jeb->wasted_size += jeb->free_size;
+ jeb->free_size = 0;
- /* An erase may have failed, decreasing the
- amount of free space available. So we must
- restart from the beginning */
- return -EAGAIN;
+ jffs2_close_nextblock(c, jeb);
+ jeb = NULL;
}
+ }
+
+ if (!jeb) {
- next = c->free_list.next;
- list_del(next);
- c->nextblock = jeb = list_entry(next, struct jffs2_eraseblock, list);
- c->nr_free_blocks--;
+ ret = jffs2_find_nextblock(c);
+ if (ret)
+ return ret;
+
+ jeb = c->nextblock;
if (jeb->free_size != c->sector_size - c->cleanmarker_size) {
printk(KERN_WARNING "Eep. Block 0x%08x taken from free_list had free_size of 0x%08x!!\n", jeb->offset, jeb->free_size);
@@ -266,13 +350,13 @@ static int jffs2_do_reserve_space(struct jffs2_sb_info *c, uint32_t minsize, ui
/* OK, jeb (==c->nextblock) is now pointing at a block which definitely has
enough space */
*ofs = jeb->offset + (c->sector_size - jeb->free_size);
- *len = jeb->free_size;
+ *len = jeb->free_size - reserved_size;
if (c->cleanmarker_size && jeb->used_size == c->cleanmarker_size &&
!jeb->first_node->next_in_ino) {
- /* Only node in it beforehand was a CLEANMARKER node (we think).
+ /* Only node in it beforehand was a CLEANMARKER node (we think).
So mark it obsolete now that there's going to be another node
- in the block. This will reduce used_size to zero but We've
+ in the block. This will reduce used_size to zero but We've
already set c->nextblock so that jffs2_mark_node_obsolete()
won't try to refile it to the dirty_list.
*/
@@ -292,12 +376,12 @@ static int jffs2_do_reserve_space(struct jffs2_sb_info *c, uint32_t minsize, ui
* @len: length of this physical node
* @dirty: dirty flag for new node
*
- * Should only be used to report nodes for which space has been allocated
+ * Should only be used to report nodes for which space has been allocated
* by jffs2_reserve_space.
*
* Must be called with the alloc_sem held.
*/
-
+
int jffs2_add_physical_node_ref(struct jffs2_sb_info *c, struct jffs2_raw_node_ref *new)
{
struct jffs2_eraseblock *jeb;
@@ -349,8 +433,8 @@ int jffs2_add_physical_node_ref(struct jffs2_sb_info *c, struct jffs2_raw_node_r
list_add_tail(&jeb->list, &c->clean_list);
c->nextblock = NULL;
}
- ACCT_SANITY_CHECK(c,jeb);
- D1(ACCT_PARANOIA_CHECK(jeb));
+ jffs2_dbg_acct_sanity_check_nolock(c,jeb);
+ jffs2_dbg_acct_paranoia_check_nolock(c, jeb);
spin_unlock(&c->erase_completion_lock);
@@ -404,8 +488,8 @@ void jffs2_mark_node_obsolete(struct jffs2_sb_info *c, struct jffs2_raw_node_ref
if (jffs2_can_mark_obsolete(c) && !jffs2_is_readonly(c) &&
!(c->flags & (JFFS2_SB_FLAG_SCANNING | JFFS2_SB_FLAG_BUILDING))) {
- /* Hm. This may confuse static lock analysis. If any of the above
- three conditions is false, we're going to return from this
+ /* Hm. This may confuse static lock analysis. If any of the above
+ three conditions is false, we're going to return from this
function without actually obliterating any nodes or freeing
any jffs2_raw_node_refs. So we don't need to stop erases from
happening, or protect against people holding an obsolete
@@ -430,7 +514,7 @@ void jffs2_mark_node_obsolete(struct jffs2_sb_info *c, struct jffs2_raw_node_ref
ref_totlen(c, jeb, ref), blocknr, ref->flash_offset, jeb->used_size);
BUG();
})
- D1(printk(KERN_DEBUG "Obsoleting node at 0x%08x of len %x: ", ref_offset(ref), ref_totlen(c, jeb, ref)));
+ D1(printk(KERN_DEBUG "Obsoleting node at 0x%08x of len %#x: ", ref_offset(ref), ref_totlen(c, jeb, ref)));
jeb->used_size -= ref_totlen(c, jeb, ref);
c->used_size -= ref_totlen(c, jeb, ref);
}
@@ -462,18 +546,17 @@ void jffs2_mark_node_obsolete(struct jffs2_sb_info *c, struct jffs2_raw_node_ref
D1(printk(KERN_DEBUG "Wasting\n"));
addedsize = 0;
jeb->wasted_size += ref_totlen(c, jeb, ref);
- c->wasted_size += ref_totlen(c, jeb, ref);
+ c->wasted_size += ref_totlen(c, jeb, ref);
}
ref->flash_offset = ref_offset(ref) | REF_OBSOLETE;
-
- ACCT_SANITY_CHECK(c, jeb);
- D1(ACCT_PARANOIA_CHECK(jeb));
+ jffs2_dbg_acct_sanity_check_nolock(c, jeb);
+ jffs2_dbg_acct_paranoia_check_nolock(c, jeb);
if (c->flags & JFFS2_SB_FLAG_SCANNING) {
/* Flash scanning is in progress. Don't muck about with the block
lists because they're not ready yet, and don't actually
- obliterate nodes that look obsolete. If they weren't
+ obliterate nodes that look obsolete. If they weren't
marked obsolete on the flash at the time they _became_
obsolete, there was probably a reason for that. */
spin_unlock(&c->erase_completion_lock);
@@ -507,7 +590,7 @@ void jffs2_mark_node_obsolete(struct jffs2_sb_info *c, struct jffs2_raw_node_ref
immediately reused, and we spread the load a bit. */
D1(printk(KERN_DEBUG "...and adding to erasable_list\n"));
list_add_tail(&jeb->list, &c->erasable_list);
- }
+ }
}
D1(printk(KERN_DEBUG "Done OK\n"));
} else if (jeb == c->gcblock) {
@@ -525,8 +608,8 @@ void jffs2_mark_node_obsolete(struct jffs2_sb_info *c, struct jffs2_raw_node_ref
list_add_tail(&jeb->list, &c->very_dirty_list);
} else {
D1(printk(KERN_DEBUG "Eraseblock at 0x%08x not moved anywhere. (free 0x%08x, dirty 0x%08x, used 0x%08x)\n",
- jeb->offset, jeb->free_size, jeb->dirty_size, jeb->used_size));
- }
+ jeb->offset, jeb->free_size, jeb->dirty_size, jeb->used_size));
+ }
spin_unlock(&c->erase_completion_lock);
@@ -573,11 +656,11 @@ void jffs2_mark_node_obsolete(struct jffs2_sb_info *c, struct jffs2_raw_node_ref
/* Nodes which have been marked obsolete no longer need to be
associated with any inode. Remove them from the per-inode list.
-
- Note we can't do this for NAND at the moment because we need
+
+ Note we can't do this for NAND at the moment because we need
obsolete dirent nodes to stay on the lists, because of the
horridness in jffs2_garbage_collect_deletion_dirent(). Also
- because we delete the inocache, and on NAND we need that to
+ because we delete the inocache, and on NAND we need that to
stay around until all the nodes are actually erased, in order
to stop us from giving the same inode number to another newly
created inode. */
@@ -606,7 +689,7 @@ void jffs2_mark_node_obsolete(struct jffs2_sb_info *c, struct jffs2_raw_node_ref
if (ref->next_phys && ref_obsolete(ref->next_phys) &&
!ref->next_phys->next_in_ino) {
struct jffs2_raw_node_ref *n = ref->next_phys;
-
+
spin_lock(&c->erase_completion_lock);
ref->__totlen += n->__totlen;
@@ -620,7 +703,7 @@ void jffs2_mark_node_obsolete(struct jffs2_sb_info *c, struct jffs2_raw_node_ref
jffs2_free_raw_node_ref(n);
}
-
+
/* Also merge with the previous node in the list, if there is one
and that one is obsolete */
if (ref != jeb->first_node ) {
@@ -630,7 +713,7 @@ void jffs2_mark_node_obsolete(struct jffs2_sb_info *c, struct jffs2_raw_node_ref
while (p->next_phys != ref)
p = p->next_phys;
-
+
if (ref_obsolete(p) && !ref->next_in_ino) {
p->__totlen += ref->__totlen;
if (jeb->last_node == ref) {
@@ -649,164 +732,6 @@ void jffs2_mark_node_obsolete(struct jffs2_sb_info *c, struct jffs2_raw_node_ref
up(&c->erase_free_sem);
}
-#if CONFIG_JFFS2_FS_DEBUG >= 2
-void jffs2_dump_block_lists(struct jffs2_sb_info *c)
-{
-
-
- printk(KERN_DEBUG "jffs2_dump_block_lists:\n");
- printk(KERN_DEBUG "flash_size: %08x\n", c->flash_size);
- printk(KERN_DEBUG "used_size: %08x\n", c->used_size);
- printk(KERN_DEBUG "dirty_size: %08x\n", c->dirty_size);
- printk(KERN_DEBUG "wasted_size: %08x\n", c->wasted_size);
- printk(KERN_DEBUG "unchecked_size: %08x\n", c->unchecked_size);
- printk(KERN_DEBUG "free_size: %08x\n", c->free_size);
- printk(KERN_DEBUG "erasing_size: %08x\n", c->erasing_size);
- printk(KERN_DEBUG "bad_size: %08x\n", c->bad_size);
- printk(KERN_DEBUG "sector_size: %08x\n", c->sector_size);
- printk(KERN_DEBUG "jffs2_reserved_blocks size: %08x\n",c->sector_size * c->resv_blocks_write);
-
- if (c->nextblock) {
- printk(KERN_DEBUG "nextblock: %08x (used %08x, dirty %08x, wasted %08x, unchecked %08x, free %08x)\n",
- c->nextblock->offset, c->nextblock->used_size, c->nextblock->dirty_size, c->nextblock->wasted_size, c->nextblock->unchecked_size, c->nextblock->free_size);
- } else {
- printk(KERN_DEBUG "nextblock: NULL\n");
- }
- if (c->gcblock) {
- printk(KERN_DEBUG "gcblock: %08x (used %08x, dirty %08x, wasted %08x, unchecked %08x, free %08x)\n",
- c->gcblock->offset, c->gcblock->used_size, c->gcblock->dirty_size, c->gcblock->wasted_size, c->gcblock->unchecked_size, c->gcblock->free_size);
- } else {
- printk(KERN_DEBUG "gcblock: NULL\n");
- }
- if (list_empty(&c->clean_list)) {
- printk(KERN_DEBUG "clean_list: empty\n");
- } else {
- struct list_head *this;
- int numblocks = 0;
- uint32_t dirty = 0;
-
- list_for_each(this, &c->clean_list) {
- struct jffs2_eraseblock *jeb = list_entry(this, struct jffs2_eraseblock, list);
- numblocks ++;
- dirty += jeb->wasted_size;
- printk(KERN_DEBUG "clean_list: %08x (used %08x, dirty %08x, wasted %08x, unchecked %08x, free %08x)\n", jeb->offset, jeb->used_size, jeb->dirty_size, jeb->wasted_size, jeb->unchecked_size, jeb->free_size);
- }
- printk (KERN_DEBUG "Contains %d blocks with total wasted size %u, average wasted size: %u\n", numblocks, dirty, dirty / numblocks);
- }
- if (list_empty(&c->very_dirty_list)) {
- printk(KERN_DEBUG "very_dirty_list: empty\n");
- } else {
- struct list_head *this;
- int numblocks = 0;
- uint32_t dirty = 0;
-
- list_for_each(this, &c->very_dirty_list) {
- struct jffs2_eraseblock *jeb = list_entry(this, struct jffs2_eraseblock, list);
- numblocks ++;
- dirty += jeb->dirty_size;
- printk(KERN_DEBUG "very_dirty_list: %08x (used %08x, dirty %08x, wasted %08x, unchecked %08x, free %08x)\n",
- jeb->offset, jeb->used_size, jeb->dirty_size, jeb->wasted_size, jeb->unchecked_size, jeb->free_size);
- }
- printk (KERN_DEBUG "Contains %d blocks with total dirty size %u, average dirty size: %u\n",
- numblocks, dirty, dirty / numblocks);
- }
- if (list_empty(&c->dirty_list)) {
- printk(KERN_DEBUG "dirty_list: empty\n");
- } else {
- struct list_head *this;
- int numblocks = 0;
- uint32_t dirty = 0;
-
- list_for_each(this, &c->dirty_list) {
- struct jffs2_eraseblock *jeb = list_entry(this, struct jffs2_eraseblock, list);
- numblocks ++;
- dirty += jeb->dirty_size;
- printk(KERN_DEBUG "dirty_list: %08x (used %08x, dirty %08x, wasted %08x, unchecked %08x, free %08x)\n",
- jeb->offset, jeb->used_size, jeb->dirty_size, jeb->wasted_size, jeb->unchecked_size, jeb->free_size);
- }
- printk (KERN_DEBUG "Contains %d blocks with total dirty size %u, average dirty size: %u\n",
- numblocks, dirty, dirty / numblocks);
- }
- if (list_empty(&c->erasable_list)) {
- printk(KERN_DEBUG "erasable_list: empty\n");
- } else {
- struct list_head *this;
-
- list_for_each(this, &c->erasable_list) {
- struct jffs2_eraseblock *jeb = list_entry(this, struct jffs2_eraseblock, list);
- printk(KERN_DEBUG "erasable_list: %08x (used %08x, dirty %08x, wasted %08x, unchecked %08x, free %08x)\n",
- jeb->offset, jeb->used_size, jeb->dirty_size, jeb->wasted_size, jeb->unchecked_size, jeb->free_size);
- }
- }
- if (list_empty(&c->erasing_list)) {
- printk(KERN_DEBUG "erasing_list: empty\n");
- } else {
- struct list_head *this;
-
- list_for_each(this, &c->erasing_list) {
- struct jffs2_eraseblock *jeb = list_entry(this, struct jffs2_eraseblock, list);
- printk(KERN_DEBUG "erasing_list: %08x (used %08x, dirty %08x, wasted %08x, unchecked %08x, free %08x)\n",
- jeb->offset, jeb->used_size, jeb->dirty_size, jeb->wasted_size, jeb->unchecked_size, jeb->free_size);
- }
- }
- if (list_empty(&c->erase_pending_list)) {
- printk(KERN_DEBUG "erase_pending_list: empty\n");
- } else {
- struct list_head *this;
-
- list_for_each(this, &c->erase_pending_list) {
- struct jffs2_eraseblock *jeb = list_entry(this, struct jffs2_eraseblock, list);
- printk(KERN_DEBUG "erase_pending_list: %08x (used %08x, dirty %08x, wasted %08x, unchecked %08x, free %08x)\n",
- jeb->offset, jeb->used_size, jeb->dirty_size, jeb->wasted_size, jeb->unchecked_size, jeb->free_size);
- }
- }
- if (list_empty(&c->erasable_pending_wbuf_list)) {
- printk(KERN_DEBUG "erasable_pending_wbuf_list: empty\n");
- } else {
- struct list_head *this;
-
- list_for_each(this, &c->erasable_pending_wbuf_list) {
- struct jffs2_eraseblock *jeb = list_entry(this, struct jffs2_eraseblock, list);
- printk(KERN_DEBUG "erasable_pending_wbuf_list: %08x (used %08x, dirty %08x, wasted %08x, unchecked %08x, free %08x)\n",
- jeb->offset, jeb->used_size, jeb->dirty_size, jeb->wasted_size, jeb->unchecked_size, jeb->free_size);
- }
- }
- if (list_empty(&c->free_list)) {
- printk(KERN_DEBUG "free_list: empty\n");
- } else {
- struct list_head *this;
-
- list_for_each(this, &c->free_list) {
- struct jffs2_eraseblock *jeb = list_entry(this, struct jffs2_eraseblock, list);
- printk(KERN_DEBUG "free_list: %08x (used %08x, dirty %08x, wasted %08x, unchecked %08x, free %08x)\n",
- jeb->offset, jeb->used_size, jeb->dirty_size, jeb->wasted_size, jeb->unchecked_size, jeb->free_size);
- }
- }
- if (list_empty(&c->bad_list)) {
- printk(KERN_DEBUG "bad_list: empty\n");
- } else {
- struct list_head *this;
-
- list_for_each(this, &c->bad_list) {
- struct jffs2_eraseblock *jeb = list_entry(this, struct jffs2_eraseblock, list);
- printk(KERN_DEBUG "bad_list: %08x (used %08x, dirty %08x, wasted %08x, unchecked %08x, free %08x)\n",
- jeb->offset, jeb->used_size, jeb->dirty_size, jeb->wasted_size, jeb->unchecked_size, jeb->free_size);
- }
- }
- if (list_empty(&c->bad_used_list)) {
- printk(KERN_DEBUG "bad_used_list: empty\n");
- } else {
- struct list_head *this;
-
- list_for_each(this, &c->bad_used_list) {
- struct jffs2_eraseblock *jeb = list_entry(this, struct jffs2_eraseblock, list);
- printk(KERN_DEBUG "bad_used_list: %08x (used %08x, dirty %08x, wasted %08x, unchecked %08x, free %08x)\n",
- jeb->offset, jeb->used_size, jeb->dirty_size, jeb->wasted_size, jeb->unchecked_size, jeb->free_size);
- }
- }
-}
-#endif /* CONFIG_JFFS2_FS_DEBUG */
-
int jffs2_thread_should_wake(struct jffs2_sb_info *c)
{
int ret = 0;
@@ -828,11 +753,11 @@ int jffs2_thread_should_wake(struct jffs2_sb_info *c)
*/
dirty = c->dirty_size + c->erasing_size - c->nr_erasing_blocks * c->sector_size;
- if (c->nr_free_blocks + c->nr_erasing_blocks < c->resv_blocks_gctrigger &&
- (dirty > c->nospc_dirty_size))
+ if (c->nr_free_blocks + c->nr_erasing_blocks < c->resv_blocks_gctrigger &&
+ (dirty > c->nospc_dirty_size))
ret = 1;
- D1(printk(KERN_DEBUG "jffs2_thread_should_wake(): nr_free_blocks %d, nr_erasing_blocks %d, dirty_size 0x%x: %s\n",
+ D1(printk(KERN_DEBUG "jffs2_thread_should_wake(): nr_free_blocks %d, nr_erasing_blocks %d, dirty_size 0x%x: %s\n",
c->nr_free_blocks, c->nr_erasing_blocks, c->dirty_size, ret?"yes":"no"));
return ret;
diff --git a/fs/jffs2/os-linux.h b/fs/jffs2/os-linux.h
index d900c8929b0..59e7a393200 100644
--- a/fs/jffs2/os-linux.h
+++ b/fs/jffs2/os-linux.h
@@ -7,7 +7,7 @@
*
* For licensing information, see the file 'LICENCE' in this directory.
*
- * $Id: os-linux.h,v 1.58 2005/07/12 02:34:35 tpoynor Exp $
+ * $Id: os-linux.h,v 1.64 2005/09/30 13:59:13 dedekind Exp $
*
*/
@@ -57,6 +57,7 @@ static inline void jffs2_init_inode_info(struct jffs2_inode_info *f)
f->fragtree = RB_ROOT;
f->metadata = NULL;
f->dents = NULL;
+ f->target = NULL;
f->flags = 0;
f->usercompr = 0;
}
@@ -64,17 +65,24 @@ static inline void jffs2_init_inode_info(struct jffs2_inode_info *f)
#define jffs2_is_readonly(c) (OFNI_BS_2SFFJ(c)->s_flags & MS_RDONLY)
+#define SECTOR_ADDR(x) ( (((unsigned long)(x) / c->sector_size) * c->sector_size) )
#ifndef CONFIG_JFFS2_FS_WRITEBUFFER
-#define SECTOR_ADDR(x) ( ((unsigned long)(x) & ~(c->sector_size-1)) )
+
+
+#ifdef CONFIG_JFFS2_SUMMARY
+#define jffs2_can_mark_obsolete(c) (0)
+#else
#define jffs2_can_mark_obsolete(c) (1)
+#endif
+
#define jffs2_is_writebuffered(c) (0)
#define jffs2_cleanmarker_oob(c) (0)
#define jffs2_write_nand_cleanmarker(c,jeb) (-EIO)
-#define jffs2_flash_write(c, ofs, len, retlen, buf) ((c)->mtd->write((c)->mtd, ofs, len, retlen, buf))
+#define jffs2_flash_write(c, ofs, len, retlen, buf) jffs2_flash_direct_write(c, ofs, len, retlen, buf)
#define jffs2_flash_read(c, ofs, len, retlen, buf) ((c)->mtd->read((c)->mtd, ofs, len, retlen, buf))
-#define jffs2_flush_wbuf_pad(c) ({ (void)(c), 0; })
-#define jffs2_flush_wbuf_gc(c, i) ({ (void)(c), (void) i, 0; })
+#define jffs2_flush_wbuf_pad(c) ({ do{} while(0); (void)(c), 0; })
+#define jffs2_flush_wbuf_gc(c, i) ({ do{} while(0); (void)(c), (void) i, 0; })
#define jffs2_write_nand_badblock(c,jeb,bad_offset) (1)
#define jffs2_nand_flash_setup(c) (0)
#define jffs2_nand_flash_cleanup(c) do {} while(0)
@@ -84,16 +92,26 @@ static inline void jffs2_init_inode_info(struct jffs2_inode_info *f)
#define jffs2_wbuf_process NULL
#define jffs2_nor_ecc(c) (0)
#define jffs2_dataflash(c) (0)
+#define jffs2_nor_wbuf_flash(c) (0)
#define jffs2_nor_ecc_flash_setup(c) (0)
#define jffs2_nor_ecc_flash_cleanup(c) do {} while (0)
#define jffs2_dataflash_setup(c) (0)
#define jffs2_dataflash_cleanup(c) do {} while (0)
+#define jffs2_nor_wbuf_flash_setup(c) (0)
+#define jffs2_nor_wbuf_flash_cleanup(c) do {} while (0)
#else /* NAND and/or ECC'd NOR support present */
#define jffs2_is_writebuffered(c) (c->wbuf != NULL)
-#define SECTOR_ADDR(x) ( ((unsigned long)(x) / (unsigned long)(c->sector_size)) * c->sector_size )
-#define jffs2_can_mark_obsolete(c) ((c->mtd->type == MTD_NORFLASH && !(c->mtd->flags & MTD_ECC)) || c->mtd->type == MTD_RAM)
+
+#ifdef CONFIG_JFFS2_SUMMARY
+#define jffs2_can_mark_obsolete(c) (0)
+#else
+#define jffs2_can_mark_obsolete(c) \
+ ((c->mtd->type == MTD_NORFLASH && !(c->mtd->flags & (MTD_ECC|MTD_PROGRAM_REGIONS))) || \
+ c->mtd->type == MTD_RAM)
+#endif
+
#define jffs2_cleanmarker_oob(c) (c->mtd->type == MTD_NANDFLASH)
#define jffs2_flash_write_oob(c, ofs, len, retlen, buf) ((c)->mtd->write_oob((c)->mtd, ofs, len, retlen, buf))
@@ -123,6 +141,10 @@ void jffs2_nor_ecc_flash_cleanup(struct jffs2_sb_info *c);
int jffs2_dataflash_setup(struct jffs2_sb_info *c);
void jffs2_dataflash_cleanup(struct jffs2_sb_info *c);
+#define jffs2_nor_wbuf_flash(c) (c->mtd->type == MTD_NORFLASH && (c->mtd->flags & MTD_PROGRAM_REGIONS))
+int jffs2_nor_wbuf_flash_setup(struct jffs2_sb_info *c);
+void jffs2_nor_wbuf_flash_cleanup(struct jffs2_sb_info *c);
+
#endif /* WRITEBUFFER */
/* erase.c */
@@ -169,20 +191,21 @@ void jffs2_gc_release_inode(struct jffs2_sb_info *c,
struct jffs2_inode_info *jffs2_gc_fetch_inode(struct jffs2_sb_info *c,
int inum, int nlink);
-unsigned char *jffs2_gc_fetch_page(struct jffs2_sb_info *c,
- struct jffs2_inode_info *f,
+unsigned char *jffs2_gc_fetch_page(struct jffs2_sb_info *c,
+ struct jffs2_inode_info *f,
unsigned long offset,
unsigned long *priv);
void jffs2_gc_release_page(struct jffs2_sb_info *c,
unsigned char *pg,
unsigned long *priv);
void jffs2_flash_cleanup(struct jffs2_sb_info *c);
-
+
/* writev.c */
-int jffs2_flash_direct_writev(struct jffs2_sb_info *c, const struct kvec *vecs,
+int jffs2_flash_direct_writev(struct jffs2_sb_info *c, const struct kvec *vecs,
unsigned long count, loff_t to, size_t *retlen);
-
+int jffs2_flash_direct_write(struct jffs2_sb_info *c, loff_t ofs, size_t len,
+ size_t *retlen, const u_char *buf);
#endif /* __JFFS2_OS_LINUX_H__ */
diff --git a/fs/jffs2/read.c b/fs/jffs2/read.c
index c7f9068907c..f3b86da833b 100644
--- a/fs/jffs2/read.c
+++ b/fs/jffs2/read.c
@@ -7,7 +7,7 @@
*
* For licensing information, see the file 'LICENCE' in this directory.
*
- * $Id: read.c,v 1.39 2005/03/01 10:34:03 dedekind Exp $
+ * $Id: read.c,v 1.42 2005/11/07 11:14:41 gleixner Exp $
*
*/
@@ -43,7 +43,7 @@ int jffs2_read_dnode(struct jffs2_sb_info *c, struct jffs2_inode_info *f,
}
if (readlen != sizeof(*ri)) {
jffs2_free_raw_inode(ri);
- printk(KERN_WARNING "Short read from 0x%08x: wanted 0x%zx bytes, got 0x%zx\n",
+ printk(KERN_WARNING "Short read from 0x%08x: wanted 0x%zx bytes, got 0x%zx\n",
ref_offset(fd->raw), sizeof(*ri), readlen);
return -EIO;
}
@@ -61,7 +61,7 @@ int jffs2_read_dnode(struct jffs2_sb_info *c, struct jffs2_inode_info *f,
}
/* There was a bug where we wrote hole nodes out with csize/dsize
swapped. Deal with it */
- if (ri->compr == JFFS2_COMPR_ZERO && !je32_to_cpu(ri->dsize) &&
+ if (ri->compr == JFFS2_COMPR_ZERO && !je32_to_cpu(ri->dsize) &&
je32_to_cpu(ri->csize)) {
ri->dsize = ri->csize;
ri->csize = cpu_to_je32(0);
@@ -74,7 +74,7 @@ int jffs2_read_dnode(struct jffs2_sb_info *c, struct jffs2_inode_info *f,
goto out_ri;
});
-
+
if (ri->compr == JFFS2_COMPR_ZERO) {
memset(buf, 0, len);
goto out_ri;
@@ -82,8 +82,8 @@ int jffs2_read_dnode(struct jffs2_sb_info *c, struct jffs2_inode_info *f,
/* Cases:
Reading whole node and it's uncompressed - read directly to buffer provided, check CRC.
- Reading whole node and it's compressed - read into comprbuf, check CRC and decompress to buffer provided
- Reading partial node and it's uncompressed - read into readbuf, check CRC, and copy
+ Reading whole node and it's compressed - read into comprbuf, check CRC and decompress to buffer provided
+ Reading partial node and it's uncompressed - read into readbuf, check CRC, and copy
Reading partial node and it's compressed - read into readbuf, check checksum, decompress to decomprbuf and copy
*/
if (ri->compr == JFFS2_COMPR_NONE && len == je32_to_cpu(ri->dsize)) {
@@ -129,7 +129,7 @@ int jffs2_read_dnode(struct jffs2_sb_info *c, struct jffs2_inode_info *f,
D2(printk(KERN_DEBUG "Data CRC matches calculated CRC %08x\n", crc));
if (ri->compr != JFFS2_COMPR_NONE) {
D2(printk(KERN_DEBUG "Decompress %d bytes from %p to %d bytes at %p\n",
- je32_to_cpu(ri->csize), readbuf, je32_to_cpu(ri->dsize), decomprbuf));
+ je32_to_cpu(ri->csize), readbuf, je32_to_cpu(ri->dsize), decomprbuf));
ret = jffs2_decompress(c, f, ri->compr | (ri->usercompr << 8), readbuf, decomprbuf, je32_to_cpu(ri->csize), je32_to_cpu(ri->dsize));
if (ret) {
printk(KERN_WARNING "Error: jffs2_decompress returned %d\n", ret);
@@ -174,7 +174,6 @@ int jffs2_read_inode_range(struct jffs2_sb_info *c, struct jffs2_inode_info *f,
if (frag) {
D1(printk(KERN_NOTICE "Eep. Hole in ino #%u fraglist. frag->ofs = 0x%08x, offset = 0x%08x\n", f->inocache->ino, frag->ofs, offset));
holesize = min(holesize, frag->ofs - offset);
- D2(jffs2_print_frag_list(f));
}
D1(printk(KERN_DEBUG "Filling non-frag hole from %d-%d\n", offset, offset+holesize));
memset(buf, 0, holesize);
@@ -192,7 +191,7 @@ int jffs2_read_inode_range(struct jffs2_sb_info *c, struct jffs2_inode_info *f,
} else {
uint32_t readlen;
uint32_t fragofs; /* offset within the frag to start reading */
-
+
fragofs = offset - frag->ofs;
readlen = min(frag->size - fragofs, end - offset);
D1(printk(KERN_DEBUG "Reading %d-%d from node at 0x%08x (%d)\n",
diff --git a/fs/jffs2/readinode.c b/fs/jffs2/readinode.c
index 5b2a83599d7..5f0652df5d4 100644
--- a/fs/jffs2/readinode.c
+++ b/fs/jffs2/readinode.c
@@ -7,11 +7,12 @@
*
* For licensing information, see the file 'LICENCE' in this directory.
*
- * $Id: readinode.c,v 1.125 2005/07/10 13:13:55 dedekind Exp $
+ * $Id: readinode.c,v 1.143 2005/11/07 11:14:41 gleixner Exp $
*
*/
#include <linux/kernel.h>
+#include <linux/sched.h>
#include <linux/slab.h>
#include <linux/fs.h>
#include <linux/crc32.h>
@@ -20,502 +21,631 @@
#include <linux/compiler.h>
#include "nodelist.h"
-static int jffs2_add_frag_to_fragtree(struct jffs2_sb_info *c, struct rb_root *list, struct jffs2_node_frag *newfrag);
-
-#if CONFIG_JFFS2_FS_DEBUG >= 2
-static void jffs2_print_fragtree(struct rb_root *list, int permitbug)
+/*
+ * Put a new tmp_dnode_info into the temporaty RB-tree, keeping the list in
+ * order of increasing version.
+ */
+static void jffs2_add_tn_to_tree(struct jffs2_tmp_dnode_info *tn, struct rb_root *list)
{
- struct jffs2_node_frag *this = frag_first(list);
- uint32_t lastofs = 0;
- int buggy = 0;
-
- while(this) {
- if (this->node)
- printk(KERN_DEBUG "frag %04x-%04x: 0x%08x(%d) on flash (*%p). left (%p), right (%p), parent (%p)\n",
- this->ofs, this->ofs+this->size, ref_offset(this->node->raw), ref_flags(this->node->raw),
- this, frag_left(this), frag_right(this), frag_parent(this));
- else
- printk(KERN_DEBUG "frag %04x-%04x: hole (*%p). left (%p} right (%p), parent (%p)\n", this->ofs,
- this->ofs+this->size, this, frag_left(this), frag_right(this), frag_parent(this));
- if (this->ofs != lastofs)
- buggy = 1;
- lastofs = this->ofs+this->size;
- this = frag_next(this);
+ struct rb_node **p = &list->rb_node;
+ struct rb_node * parent = NULL;
+ struct jffs2_tmp_dnode_info *this;
+
+ while (*p) {
+ parent = *p;
+ this = rb_entry(parent, struct jffs2_tmp_dnode_info, rb);
+
+ /* There may actually be a collision here, but it doesn't
+ actually matter. As long as the two nodes with the same
+ version are together, it's all fine. */
+ if (tn->version > this->version)
+ p = &(*p)->rb_left;
+ else
+ p = &(*p)->rb_right;
}
- if (buggy && !permitbug) {
- printk(KERN_CRIT "Frag tree got a hole in it\n");
- BUG();
+
+ rb_link_node(&tn->rb, parent, p);
+ rb_insert_color(&tn->rb, list);
+}
+
+static void jffs2_free_tmp_dnode_info_list(struct rb_root *list)
+{
+ struct rb_node *this;
+ struct jffs2_tmp_dnode_info *tn;
+
+ this = list->rb_node;
+
+ /* Now at bottom of tree */
+ while (this) {
+ if (this->rb_left)
+ this = this->rb_left;
+ else if (this->rb_right)
+ this = this->rb_right;
+ else {
+ tn = rb_entry(this, struct jffs2_tmp_dnode_info, rb);
+ jffs2_free_full_dnode(tn->fn);
+ jffs2_free_tmp_dnode_info(tn);
+
+ this = this->rb_parent;
+ if (!this)
+ break;
+
+ if (this->rb_left == &tn->rb)
+ this->rb_left = NULL;
+ else if (this->rb_right == &tn->rb)
+ this->rb_right = NULL;
+ else BUG();
+ }
}
+ list->rb_node = NULL;
}
-void jffs2_print_frag_list(struct jffs2_inode_info *f)
+static void jffs2_free_full_dirent_list(struct jffs2_full_dirent *fd)
{
- jffs2_print_fragtree(&f->fragtree, 0);
+ struct jffs2_full_dirent *next;
- if (f->metadata) {
- printk(KERN_DEBUG "metadata at 0x%08x\n", ref_offset(f->metadata->raw));
+ while (fd) {
+ next = fd->next;
+ jffs2_free_full_dirent(fd);
+ fd = next;
}
}
-#endif
-#if CONFIG_JFFS2_FS_DEBUG >= 1
-static int jffs2_sanitycheck_fragtree(struct jffs2_inode_info *f)
+/* Returns first valid node after 'ref'. May return 'ref' */
+static struct jffs2_raw_node_ref *jffs2_first_valid_node(struct jffs2_raw_node_ref *ref)
{
- struct jffs2_node_frag *frag;
- int bitched = 0;
-
- for (frag = frag_first(&f->fragtree); frag; frag = frag_next(frag)) {
+ while (ref && ref->next_in_ino) {
+ if (!ref_obsolete(ref))
+ return ref;
+ dbg_noderef("node at 0x%08x is obsoleted. Ignoring.\n", ref_offset(ref));
+ ref = ref->next_in_ino;
+ }
+ return NULL;
+}
- struct jffs2_full_dnode *fn = frag->node;
- if (!fn || !fn->raw)
- continue;
+/*
+ * Helper function for jffs2_get_inode_nodes().
+ * It is called every time an directory entry node is found.
+ *
+ * Returns: 0 on succes;
+ * 1 if the node should be marked obsolete;
+ * negative error code on failure.
+ */
+static inline int read_direntry(struct jffs2_sb_info *c, struct jffs2_raw_node_ref *ref,
+ struct jffs2_raw_dirent *rd, uint32_t read, struct jffs2_full_dirent **fdp,
+ uint32_t *latest_mctime, uint32_t *mctime_ver)
+{
+ struct jffs2_full_dirent *fd;
+
+ /* The direntry nodes are checked during the flash scanning */
+ BUG_ON(ref_flags(ref) == REF_UNCHECKED);
+ /* Obsoleted. This cannot happen, surely? dwmw2 20020308 */
+ BUG_ON(ref_obsolete(ref));
+
+ /* Sanity check */
+ if (unlikely(PAD((rd->nsize + sizeof(*rd))) != PAD(je32_to_cpu(rd->totlen)))) {
+ JFFS2_ERROR("illegal nsize in node at %#08x: nsize %#02x, totlen %#04x\n",
+ ref_offset(ref), rd->nsize, je32_to_cpu(rd->totlen));
+ return 1;
+ }
- if (ref_flags(fn->raw) == REF_PRISTINE) {
+ fd = jffs2_alloc_full_dirent(rd->nsize + 1);
+ if (unlikely(!fd))
+ return -ENOMEM;
- if (fn->frags > 1) {
- printk(KERN_WARNING "REF_PRISTINE node at 0x%08x had %d frags. Tell dwmw2\n", ref_offset(fn->raw), fn->frags);
- bitched = 1;
- }
- /* A hole node which isn't multi-page should be garbage-collected
- and merged anyway, so we just check for the frag size here,
- rather than mucking around with actually reading the node
- and checking the compression type, which is the real way
- to tell a hole node. */
- if (frag->ofs & (PAGE_CACHE_SIZE-1) && frag_prev(frag) && frag_prev(frag)->size < PAGE_CACHE_SIZE && frag_prev(frag)->node) {
- printk(KERN_WARNING "REF_PRISTINE node at 0x%08x had a previous non-hole frag in the same page. Tell dwmw2\n",
- ref_offset(fn->raw));
- bitched = 1;
- }
+ fd->raw = ref;
+ fd->version = je32_to_cpu(rd->version);
+ fd->ino = je32_to_cpu(rd->ino);
+ fd->type = rd->type;
- if ((frag->ofs+frag->size) & (PAGE_CACHE_SIZE-1) && frag_next(frag) && frag_next(frag)->size < PAGE_CACHE_SIZE && frag_next(frag)->node) {
- printk(KERN_WARNING "REF_PRISTINE node at 0x%08x (%08x-%08x) had a following non-hole frag in the same page. Tell dwmw2\n",
- ref_offset(fn->raw), frag->ofs, frag->ofs+frag->size);
- bitched = 1;
- }
- }
+ /* Pick out the mctime of the latest dirent */
+ if(fd->version > *mctime_ver && je32_to_cpu(rd->mctime)) {
+ *mctime_ver = fd->version;
+ *latest_mctime = je32_to_cpu(rd->mctime);
}
-
- if (bitched) {
- struct jffs2_node_frag *thisfrag;
-
- printk(KERN_WARNING "Inode is #%u\n", f->inocache->ino);
- thisfrag = frag_first(&f->fragtree);
- while (thisfrag) {
- if (!thisfrag->node) {
- printk("Frag @0x%x-0x%x; node-less hole\n",
- thisfrag->ofs, thisfrag->size + thisfrag->ofs);
- } else if (!thisfrag->node->raw) {
- printk("Frag @0x%x-0x%x; raw-less hole\n",
- thisfrag->ofs, thisfrag->size + thisfrag->ofs);
- } else {
- printk("Frag @0x%x-0x%x; raw at 0x%08x(%d) (0x%x-0x%x)\n",
- thisfrag->ofs, thisfrag->size + thisfrag->ofs,
- ref_offset(thisfrag->node->raw), ref_flags(thisfrag->node->raw),
- thisfrag->node->ofs, thisfrag->node->ofs+thisfrag->node->size);
- }
- thisfrag = frag_next(thisfrag);
- }
- }
- return bitched;
-}
-#endif /* D1 */
-static void jffs2_obsolete_node_frag(struct jffs2_sb_info *c, struct jffs2_node_frag *this)
-{
- if (this->node) {
- this->node->frags--;
- if (!this->node->frags) {
- /* The node has no valid frags left. It's totally obsoleted */
- D2(printk(KERN_DEBUG "Marking old node @0x%08x (0x%04x-0x%04x) obsolete\n",
- ref_offset(this->node->raw), this->node->ofs, this->node->ofs+this->node->size));
- jffs2_mark_node_obsolete(c, this->node->raw);
- jffs2_free_full_dnode(this->node);
- } else {
- D2(printk(KERN_DEBUG "Marking old node @0x%08x (0x%04x-0x%04x) REF_NORMAL. frags is %d\n",
- ref_offset(this->node->raw), this->node->ofs, this->node->ofs+this->node->size,
- this->node->frags));
- mark_ref_normal(this->node->raw);
+ /*
+ * Copy as much of the name as possible from the raw
+ * dirent we've already read from the flash.
+ */
+ if (read > sizeof(*rd))
+ memcpy(&fd->name[0], &rd->name[0],
+ min_t(uint32_t, rd->nsize, (read - sizeof(*rd)) ));
+
+ /* Do we need to copy any more of the name directly from the flash? */
+ if (rd->nsize + sizeof(*rd) > read) {
+ /* FIXME: point() */
+ int err;
+ int already = read - sizeof(*rd);
+
+ err = jffs2_flash_read(c, (ref_offset(ref)) + read,
+ rd->nsize - already, &read, &fd->name[already]);
+ if (unlikely(read != rd->nsize - already) && likely(!err))
+ return -EIO;
+
+ if (unlikely(err)) {
+ JFFS2_ERROR("read remainder of name: error %d\n", err);
+ jffs2_free_full_dirent(fd);
+ return -EIO;
}
-
}
- jffs2_free_node_frag(this);
+
+ fd->nhash = full_name_hash(fd->name, rd->nsize);
+ fd->next = NULL;
+ fd->name[rd->nsize] = '\0';
+
+ /*
+ * Wheee. We now have a complete jffs2_full_dirent structure, with
+ * the name in it and everything. Link it into the list
+ */
+ jffs2_add_fd_to_list(c, fd, fdp);
+
+ return 0;
}
-/* Given an inode, probably with existing list of fragments, add the new node
- * to the fragment list.
+/*
+ * Helper function for jffs2_get_inode_nodes().
+ * It is called every time an inode node is found.
+ *
+ * Returns: 0 on succes;
+ * 1 if the node should be marked obsolete;
+ * negative error code on failure.
*/
-int jffs2_add_full_dnode_to_inode(struct jffs2_sb_info *c, struct jffs2_inode_info *f, struct jffs2_full_dnode *fn)
+static inline int read_dnode(struct jffs2_sb_info *c, struct jffs2_raw_node_ref *ref,
+ struct jffs2_raw_inode *rd, struct rb_root *tnp, int rdlen,
+ uint32_t *latest_mctime, uint32_t *mctime_ver)
{
- int ret;
- struct jffs2_node_frag *newfrag;
-
- D1(printk(KERN_DEBUG "jffs2_add_full_dnode_to_inode(ino #%u, f %p, fn %p)\n", f->inocache->ino, f, fn));
+ struct jffs2_tmp_dnode_info *tn;
+ uint32_t len, csize;
+ int ret = 1;
- if (unlikely(!fn->size))
- return 0;
+ /* Obsoleted. This cannot happen, surely? dwmw2 20020308 */
+ BUG_ON(ref_obsolete(ref));
- newfrag = jffs2_alloc_node_frag();
- if (unlikely(!newfrag))
+ tn = jffs2_alloc_tmp_dnode_info();
+ if (!tn) {
+ JFFS2_ERROR("failed to allocate tn (%d bytes).\n", sizeof(*tn));
return -ENOMEM;
+ }
- D2(printk(KERN_DEBUG "adding node %04x-%04x @0x%08x on flash, newfrag *%p\n",
- fn->ofs, fn->ofs+fn->size, ref_offset(fn->raw), newfrag));
-
- newfrag->ofs = fn->ofs;
- newfrag->size = fn->size;
- newfrag->node = fn;
- newfrag->node->frags = 1;
+ tn->partial_crc = 0;
+ csize = je32_to_cpu(rd->csize);
- ret = jffs2_add_frag_to_fragtree(c, &f->fragtree, newfrag);
- if (ret)
- return ret;
+ /* If we've never checked the CRCs on this node, check them now */
+ if (ref_flags(ref) == REF_UNCHECKED) {
+ uint32_t crc;
- /* If we now share a page with other nodes, mark either previous
- or next node REF_NORMAL, as appropriate. */
- if (newfrag->ofs & (PAGE_CACHE_SIZE-1)) {
- struct jffs2_node_frag *prev = frag_prev(newfrag);
+ crc = crc32(0, rd, sizeof(*rd) - 8);
+ if (unlikely(crc != je32_to_cpu(rd->node_crc))) {
+ JFFS2_NOTICE("header CRC failed on node at %#08x: read %#08x, calculated %#08x\n",
+ ref_offset(ref), je32_to_cpu(rd->node_crc), crc);
+ goto free_out;
+ }
- mark_ref_normal(fn->raw);
- /* If we don't start at zero there's _always_ a previous */
- if (prev->node)
- mark_ref_normal(prev->node->raw);
- }
+ /* Sanity checks */
+ if (unlikely(je32_to_cpu(rd->offset) > je32_to_cpu(rd->isize)) ||
+ unlikely(PAD(je32_to_cpu(rd->csize) + sizeof(*rd)) != PAD(je32_to_cpu(rd->totlen)))) {
+ JFFS2_WARNING("inode node header CRC is corrupted at %#08x\n", ref_offset(ref));
+ jffs2_dbg_dump_node(c, ref_offset(ref));
+ goto free_out;
+ }
- if ((newfrag->ofs+newfrag->size) & (PAGE_CACHE_SIZE-1)) {
- struct jffs2_node_frag *next = frag_next(newfrag);
-
- if (next) {
- mark_ref_normal(fn->raw);
- if (next->node)
- mark_ref_normal(next->node->raw);
+ if (jffs2_is_writebuffered(c) && csize != 0) {
+ /* At this point we are supposed to check the data CRC
+ * of our unchecked node. But thus far, we do not
+ * know whether the node is valid or obsolete. To
+ * figure this out, we need to walk all the nodes of
+ * the inode and build the inode fragtree. We don't
+ * want to spend time checking data of nodes which may
+ * later be found to be obsolete. So we put off the full
+ * data CRC checking until we have read all the inode
+ * nodes and have started building the fragtree.
+ *
+ * The fragtree is being built starting with nodes
+ * having the highest version number, so we'll be able
+ * to detect whether a node is valid (i.e., it is not
+ * overlapped by a node with higher version) or not.
+ * And we'll be able to check only those nodes, which
+ * are not obsolete.
+ *
+ * Of course, this optimization only makes sense in case
+ * of NAND flashes (or other flashes whith
+ * !jffs2_can_mark_obsolete()), since on NOR flashes
+ * nodes are marked obsolete physically.
+ *
+ * Since NAND flashes (or other flashes with
+ * jffs2_is_writebuffered(c)) are anyway read by
+ * fractions of c->wbuf_pagesize, and we have just read
+ * the node header, it is likely that the starting part
+ * of the node data is also read when we read the
+ * header. So we don't mind to check the CRC of the
+ * starting part of the data of the node now, and check
+ * the second part later (in jffs2_check_node_data()).
+ * Of course, we will not need to re-read and re-check
+ * the NAND page which we have just read. This is why we
+ * read the whole NAND page at jffs2_get_inode_nodes(),
+ * while we needed only the node header.
+ */
+ unsigned char *buf;
+
+ /* 'buf' will point to the start of data */
+ buf = (unsigned char *)rd + sizeof(*rd);
+ /* len will be the read data length */
+ len = min_t(uint32_t, rdlen - sizeof(*rd), csize);
+ tn->partial_crc = crc32(0, buf, len);
+
+ dbg_readinode("Calculates CRC (%#08x) for %d bytes, csize %d\n", tn->partial_crc, len, csize);
+
+ /* If we actually calculated the whole data CRC
+ * and it is wrong, drop the node. */
+ if (len >= csize && unlikely(tn->partial_crc != je32_to_cpu(rd->data_crc))) {
+ JFFS2_NOTICE("wrong data CRC in data node at 0x%08x: read %#08x, calculated %#08x.\n",
+ ref_offset(ref), tn->partial_crc, je32_to_cpu(rd->data_crc));
+ goto free_out;
+ }
+
+ } else if (csize == 0) {
+ /*
+ * We checked the header CRC. If the node has no data, adjust
+ * the space accounting now. For other nodes this will be done
+ * later either when the node is marked obsolete or when its
+ * data is checked.
+ */
+ struct jffs2_eraseblock *jeb;
+
+ dbg_readinode("the node has no data.\n");
+ jeb = &c->blocks[ref->flash_offset / c->sector_size];
+ len = ref_totlen(c, jeb, ref);
+
+ spin_lock(&c->erase_completion_lock);
+ jeb->used_size += len;
+ jeb->unchecked_size -= len;
+ c->used_size += len;
+ c->unchecked_size -= len;
+ ref->flash_offset = ref_offset(ref) | REF_NORMAL;
+ spin_unlock(&c->erase_completion_lock);
}
}
- D2(if (jffs2_sanitycheck_fragtree(f)) {
- printk(KERN_WARNING "Just added node %04x-%04x @0x%08x on flash, newfrag *%p\n",
- fn->ofs, fn->ofs+fn->size, ref_offset(fn->raw), newfrag);
- return 0;
- })
- D2(jffs2_print_frag_list(f));
+
+ tn->fn = jffs2_alloc_full_dnode();
+ if (!tn->fn) {
+ JFFS2_ERROR("alloc fn failed\n");
+ ret = -ENOMEM;
+ goto free_out;
+ }
+
+ tn->version = je32_to_cpu(rd->version);
+ tn->fn->ofs = je32_to_cpu(rd->offset);
+ tn->data_crc = je32_to_cpu(rd->data_crc);
+ tn->csize = csize;
+ tn->fn->raw = ref;
+
+ /* There was a bug where we wrote hole nodes out with
+ csize/dsize swapped. Deal with it */
+ if (rd->compr == JFFS2_COMPR_ZERO && !je32_to_cpu(rd->dsize) && csize)
+ tn->fn->size = csize;
+ else // normal case...
+ tn->fn->size = je32_to_cpu(rd->dsize);
+
+ dbg_readinode("dnode @%08x: ver %u, offset %#04x, dsize %#04x, csize %#04x\n",
+ ref_offset(ref), je32_to_cpu(rd->version), je32_to_cpu(rd->offset), je32_to_cpu(rd->dsize), csize);
+
+ jffs2_add_tn_to_tree(tn, tnp);
+
return 0;
+
+free_out:
+ jffs2_free_tmp_dnode_info(tn);
+ return ret;
}
-/* Doesn't set inode->i_size */
-static int jffs2_add_frag_to_fragtree(struct jffs2_sb_info *c, struct rb_root *list, struct jffs2_node_frag *newfrag)
+/*
+ * Helper function for jffs2_get_inode_nodes().
+ * It is called every time an unknown node is found.
+ *
+ * Returns: 0 on succes;
+ * 1 if the node should be marked obsolete;
+ * negative error code on failure.
+ */
+static inline int read_unknown(struct jffs2_sb_info *c, struct jffs2_raw_node_ref *ref, struct jffs2_unknown_node *un)
{
- struct jffs2_node_frag *this;
- uint32_t lastend;
+ /* We don't mark unknown nodes as REF_UNCHECKED */
+ BUG_ON(ref_flags(ref) == REF_UNCHECKED);
- /* Skip all the nodes which are completed before this one starts */
- this = jffs2_lookup_node_frag(list, newfrag->node->ofs);
+ un->nodetype = cpu_to_je16(JFFS2_NODE_ACCURATE | je16_to_cpu(un->nodetype));
- if (this) {
- D2(printk(KERN_DEBUG "j_a_f_d_t_f: Lookup gave frag 0x%04x-0x%04x; phys 0x%08x (*%p)\n",
- this->ofs, this->ofs+this->size, this->node?(ref_offset(this->node->raw)):0xffffffff, this));
- lastend = this->ofs + this->size;
+ if (crc32(0, un, sizeof(struct jffs2_unknown_node) - 4) != je32_to_cpu(un->hdr_crc)) {
+ /* Hmmm. This should have been caught at scan time. */
+ JFFS2_NOTICE("node header CRC failed at %#08x. But it must have been OK earlier.\n", ref_offset(ref));
+ jffs2_dbg_dump_node(c, ref_offset(ref));
+ return 1;
} else {
- D2(printk(KERN_DEBUG "j_a_f_d_t_f: Lookup gave no frag\n"));
- lastend = 0;
- }
-
- /* See if we ran off the end of the list */
- if (lastend <= newfrag->ofs) {
- /* We did */
-
- /* Check if 'this' node was on the same page as the new node.
- If so, both 'this' and the new node get marked REF_NORMAL so
- the GC can take a look.
- */
- if (lastend && (lastend-1) >> PAGE_CACHE_SHIFT == newfrag->ofs >> PAGE_CACHE_SHIFT) {
- if (this->node)
- mark_ref_normal(this->node->raw);
- mark_ref_normal(newfrag->node->raw);
- }
+ switch(je16_to_cpu(un->nodetype) & JFFS2_COMPAT_MASK) {
- if (lastend < newfrag->node->ofs) {
- /* ... and we need to put a hole in before the new node */
- struct jffs2_node_frag *holefrag = jffs2_alloc_node_frag();
- if (!holefrag) {
- jffs2_free_node_frag(newfrag);
- return -ENOMEM;
- }
- holefrag->ofs = lastend;
- holefrag->size = newfrag->node->ofs - lastend;
- holefrag->node = NULL;
- if (this) {
- /* By definition, the 'this' node has no right-hand child,
- because there are no frags with offset greater than it.
- So that's where we want to put the hole */
- D2(printk(KERN_DEBUG "Adding hole frag (%p) on right of node at (%p)\n", holefrag, this));
- rb_link_node(&holefrag->rb, &this->rb, &this->rb.rb_right);
- } else {
- D2(printk(KERN_DEBUG "Adding hole frag (%p) at root of tree\n", holefrag));
- rb_link_node(&holefrag->rb, NULL, &list->rb_node);
- }
- rb_insert_color(&holefrag->rb, list);
- this = holefrag;
- }
- if (this) {
- /* By definition, the 'this' node has no right-hand child,
- because there are no frags with offset greater than it.
- So that's where we want to put the hole */
- D2(printk(KERN_DEBUG "Adding new frag (%p) on right of node at (%p)\n", newfrag, this));
- rb_link_node(&newfrag->rb, &this->rb, &this->rb.rb_right);
- } else {
- D2(printk(KERN_DEBUG "Adding new frag (%p) at root of tree\n", newfrag));
- rb_link_node(&newfrag->rb, NULL, &list->rb_node);
+ case JFFS2_FEATURE_INCOMPAT:
+ JFFS2_ERROR("unknown INCOMPAT nodetype %#04X at %#08x\n",
+ je16_to_cpu(un->nodetype), ref_offset(ref));
+ /* EEP */
+ BUG();
+ break;
+
+ case JFFS2_FEATURE_ROCOMPAT:
+ JFFS2_ERROR("unknown ROCOMPAT nodetype %#04X at %#08x\n",
+ je16_to_cpu(un->nodetype), ref_offset(ref));
+ BUG_ON(!(c->flags & JFFS2_SB_FLAG_RO));
+ break;
+
+ case JFFS2_FEATURE_RWCOMPAT_COPY:
+ JFFS2_NOTICE("unknown RWCOMPAT_COPY nodetype %#04X at %#08x\n",
+ je16_to_cpu(un->nodetype), ref_offset(ref));
+ break;
+
+ case JFFS2_FEATURE_RWCOMPAT_DELETE:
+ JFFS2_NOTICE("unknown RWCOMPAT_DELETE nodetype %#04X at %#08x\n",
+ je16_to_cpu(un->nodetype), ref_offset(ref));
+ return 1;
}
- rb_insert_color(&newfrag->rb, list);
- return 0;
}
- D2(printk(KERN_DEBUG "j_a_f_d_t_f: dealing with frag 0x%04x-0x%04x; phys 0x%08x (*%p)\n",
- this->ofs, this->ofs+this->size, this->node?(ref_offset(this->node->raw)):0xffffffff, this));
+ return 0;
+}
- /* OK. 'this' is pointing at the first frag that newfrag->ofs at least partially obsoletes,
- * - i.e. newfrag->ofs < this->ofs+this->size && newfrag->ofs >= this->ofs
- */
- if (newfrag->ofs > this->ofs) {
- /* This node isn't completely obsoleted. The start of it remains valid */
-
- /* Mark the new node and the partially covered node REF_NORMAL -- let
- the GC take a look at them */
- mark_ref_normal(newfrag->node->raw);
- if (this->node)
- mark_ref_normal(this->node->raw);
-
- if (this->ofs + this->size > newfrag->ofs + newfrag->size) {
- /* The new node splits 'this' frag into two */
- struct jffs2_node_frag *newfrag2 = jffs2_alloc_node_frag();
- if (!newfrag2) {
- jffs2_free_node_frag(newfrag);
- return -ENOMEM;
- }
- D2(printk(KERN_DEBUG "split old frag 0x%04x-0x%04x -->", this->ofs, this->ofs+this->size);
- if (this->node)
- printk("phys 0x%08x\n", ref_offset(this->node->raw));
- else
- printk("hole\n");
- )
-
- /* New second frag pointing to this's node */
- newfrag2->ofs = newfrag->ofs + newfrag->size;
- newfrag2->size = (this->ofs+this->size) - newfrag2->ofs;
- newfrag2->node = this->node;
- if (this->node)
- this->node->frags++;
-
- /* Adjust size of original 'this' */
- this->size = newfrag->ofs - this->ofs;
-
- /* Now, we know there's no node with offset
- greater than this->ofs but smaller than
- newfrag2->ofs or newfrag->ofs, for obvious
- reasons. So we can do a tree insert from
- 'this' to insert newfrag, and a tree insert
- from newfrag to insert newfrag2. */
- jffs2_fragtree_insert(newfrag, this);
- rb_insert_color(&newfrag->rb, list);
-
- jffs2_fragtree_insert(newfrag2, newfrag);
- rb_insert_color(&newfrag2->rb, list);
-
- return 0;
- }
- /* New node just reduces 'this' frag in size, doesn't split it */
- this->size = newfrag->ofs - this->ofs;
+/*
+ * Helper function for jffs2_get_inode_nodes().
+ * The function detects whether more data should be read and reads it if yes.
+ *
+ * Returns: 0 on succes;
+ * negative error code on failure.
+ */
+static int read_more(struct jffs2_sb_info *c, struct jffs2_raw_node_ref *ref,
+ int right_size, int *rdlen, unsigned char *buf, unsigned char *bufstart)
+{
+ int right_len, err, len;
+ size_t retlen;
+ uint32_t offs;
- /* Again, we know it lives down here in the tree */
- jffs2_fragtree_insert(newfrag, this);
- rb_insert_color(&newfrag->rb, list);
- } else {
- /* New frag starts at the same point as 'this' used to. Replace
- it in the tree without doing a delete and insertion */
- D2(printk(KERN_DEBUG "Inserting newfrag (*%p),%d-%d in before 'this' (*%p),%d-%d\n",
- newfrag, newfrag->ofs, newfrag->ofs+newfrag->size,
- this, this->ofs, this->ofs+this->size));
-
- rb_replace_node(&this->rb, &newfrag->rb, list);
-
- if (newfrag->ofs + newfrag->size >= this->ofs+this->size) {
- D2(printk(KERN_DEBUG "Obsoleting node frag %p (%x-%x)\n", this, this->ofs, this->ofs+this->size));
- jffs2_obsolete_node_frag(c, this);
- } else {
- this->ofs += newfrag->size;
- this->size -= newfrag->size;
+ if (jffs2_is_writebuffered(c)) {
+ right_len = c->wbuf_pagesize - (bufstart - buf);
+ if (right_size + (int)(bufstart - buf) > c->wbuf_pagesize)
+ right_len += c->wbuf_pagesize;
+ } else
+ right_len = right_size;
- jffs2_fragtree_insert(this, newfrag);
- rb_insert_color(&this->rb, list);
- return 0;
- }
+ if (*rdlen == right_len)
+ return 0;
+
+ /* We need to read more data */
+ offs = ref_offset(ref) + *rdlen;
+ if (jffs2_is_writebuffered(c)) {
+ bufstart = buf + c->wbuf_pagesize;
+ len = c->wbuf_pagesize;
+ } else {
+ bufstart = buf + *rdlen;
+ len = right_size - *rdlen;
}
- /* OK, now we have newfrag added in the correct place in the tree, but
- frag_next(newfrag) may be a fragment which is overlapped by it
- */
- while ((this = frag_next(newfrag)) && newfrag->ofs + newfrag->size >= this->ofs + this->size) {
- /* 'this' frag is obsoleted completely. */
- D2(printk(KERN_DEBUG "Obsoleting node frag %p (%x-%x) and removing from tree\n", this, this->ofs, this->ofs+this->size));
- rb_erase(&this->rb, list);
- jffs2_obsolete_node_frag(c, this);
+
+ dbg_readinode("read more %d bytes\n", len);
+
+ err = jffs2_flash_read(c, offs, len, &retlen, bufstart);
+ if (err) {
+ JFFS2_ERROR("can not read %d bytes from 0x%08x, "
+ "error code: %d.\n", len, offs, err);
+ return err;
}
- /* Now we're pointing at the first frag which isn't totally obsoleted by
- the new frag */
- if (!this || newfrag->ofs + newfrag->size == this->ofs) {
- return 0;
+ if (retlen < len) {
+ JFFS2_ERROR("short read at %#08x: %d instead of %d.\n",
+ offs, retlen, len);
+ return -EIO;
}
- /* Still some overlap but we don't need to move it in the tree */
- this->size = (this->ofs + this->size) - (newfrag->ofs + newfrag->size);
- this->ofs = newfrag->ofs + newfrag->size;
- /* And mark them REF_NORMAL so the GC takes a look at them */
- if (this->node)
- mark_ref_normal(this->node->raw);
- mark_ref_normal(newfrag->node->raw);
+ *rdlen = right_len;
return 0;
}
-void jffs2_truncate_fraglist (struct jffs2_sb_info *c, struct rb_root *list, uint32_t size)
+/* Get tmp_dnode_info and full_dirent for all non-obsolete nodes associated
+ with this ino, returning the former in order of version */
+static int jffs2_get_inode_nodes(struct jffs2_sb_info *c, struct jffs2_inode_info *f,
+ struct rb_root *tnp, struct jffs2_full_dirent **fdp,
+ uint32_t *highest_version, uint32_t *latest_mctime,
+ uint32_t *mctime_ver)
{
- struct jffs2_node_frag *frag = jffs2_lookup_node_frag(list, size);
+ struct jffs2_raw_node_ref *ref, *valid_ref;
+ struct rb_root ret_tn = RB_ROOT;
+ struct jffs2_full_dirent *ret_fd = NULL;
+ unsigned char *buf = NULL;
+ union jffs2_node_union *node;
+ size_t retlen;
+ int len, err;
+
+ *mctime_ver = 0;
+
+ dbg_readinode("ino #%u\n", f->inocache->ino);
+
+ if (jffs2_is_writebuffered(c)) {
+ /*
+ * If we have the write buffer, we assume the minimal I/O unit
+ * is c->wbuf_pagesize. We implement some optimizations which in
+ * this case and we need a temporary buffer of size =
+ * 2*c->wbuf_pagesize bytes (see comments in read_dnode()).
+ * Basically, we want to read not only the node header, but the
+ * whole wbuf (NAND page in case of NAND) or 2, if the node
+ * header overlaps the border between the 2 wbufs.
+ */
+ len = 2*c->wbuf_pagesize;
+ } else {
+ /*
+ * When there is no write buffer, the size of the temporary
+ * buffer is the size of the larges node header.
+ */
+ len = sizeof(union jffs2_node_union);
+ }
- D1(printk(KERN_DEBUG "Truncating fraglist to 0x%08x bytes\n", size));
+ /* FIXME: in case of NOR and available ->point() this
+ * needs to be fixed. */
+ buf = kmalloc(len, GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
- /* We know frag->ofs <= size. That's what lookup does for us */
- if (frag && frag->ofs != size) {
- if (frag->ofs+frag->size >= size) {
- D1(printk(KERN_DEBUG "Truncating frag 0x%08x-0x%08x\n", frag->ofs, frag->ofs+frag->size));
- frag->size = size - frag->ofs;
+ spin_lock(&c->erase_completion_lock);
+ valid_ref = jffs2_first_valid_node(f->inocache->nodes);
+ if (!valid_ref && f->inocache->ino != 1)
+ JFFS2_WARNING("Eep. No valid nodes for ino #%u.\n", f->inocache->ino);
+ while (valid_ref) {
+ unsigned char *bufstart;
+
+ /* We can hold a pointer to a non-obsolete node without the spinlock,
+ but _obsolete_ nodes may disappear at any time, if the block
+ they're in gets erased. So if we mark 'ref' obsolete while we're
+ not holding the lock, it can go away immediately. For that reason,
+ we find the next valid node first, before processing 'ref'.
+ */
+ ref = valid_ref;
+ valid_ref = jffs2_first_valid_node(ref->next_in_ino);
+ spin_unlock(&c->erase_completion_lock);
+
+ cond_resched();
+
+ /*
+ * At this point we don't know the type of the node we're going
+ * to read, so we do not know the size of its header. In order
+ * to minimize the amount of flash IO we assume the node has
+ * size = JFFS2_MIN_NODE_HEADER.
+ */
+ if (jffs2_is_writebuffered(c)) {
+ /*
+ * We treat 'buf' as 2 adjacent wbufs. We want to
+ * adjust bufstart such as it points to the
+ * beginning of the node within this wbuf.
+ */
+ bufstart = buf + (ref_offset(ref) % c->wbuf_pagesize);
+ /* We will read either one wbuf or 2 wbufs. */
+ len = c->wbuf_pagesize - (bufstart - buf);
+ if (JFFS2_MIN_NODE_HEADER + (int)(bufstart - buf) > c->wbuf_pagesize) {
+ /* The header spans the border of the first wbuf */
+ len += c->wbuf_pagesize;
+ }
+ } else {
+ bufstart = buf;
+ len = JFFS2_MIN_NODE_HEADER;
}
- frag = frag_next(frag);
- }
- while (frag && frag->ofs >= size) {
- struct jffs2_node_frag *next = frag_next(frag);
- D1(printk(KERN_DEBUG "Removing frag 0x%08x-0x%08x\n", frag->ofs, frag->ofs+frag->size));
- frag_erase(frag, list);
- jffs2_obsolete_node_frag(c, frag);
- frag = next;
- }
-}
+ dbg_readinode("read %d bytes at %#08x(%d).\n", len, ref_offset(ref), ref_flags(ref));
-/* Scan the list of all nodes present for this ino, build map of versions, etc. */
+ /* FIXME: point() */
+ err = jffs2_flash_read(c, ref_offset(ref), len,
+ &retlen, bufstart);
+ if (err) {
+ JFFS2_ERROR("can not read %d bytes from 0x%08x, " "error code: %d.\n", len, ref_offset(ref), err);
+ goto free_out;
+ }
-static int jffs2_do_read_inode_internal(struct jffs2_sb_info *c,
- struct jffs2_inode_info *f,
- struct jffs2_raw_inode *latest_node);
+ if (retlen < len) {
+ JFFS2_ERROR("short read at %#08x: %d instead of %d.\n", ref_offset(ref), retlen, len);
+ err = -EIO;
+ goto free_out;
+ }
-int jffs2_do_read_inode(struct jffs2_sb_info *c, struct jffs2_inode_info *f,
- uint32_t ino, struct jffs2_raw_inode *latest_node)
-{
- D2(printk(KERN_DEBUG "jffs2_do_read_inode(): getting inocache\n"));
+ node = (union jffs2_node_union *)bufstart;
- retry_inocache:
- spin_lock(&c->inocache_lock);
- f->inocache = jffs2_get_ino_cache(c, ino);
+ switch (je16_to_cpu(node->u.nodetype)) {
- D2(printk(KERN_DEBUG "jffs2_do_read_inode(): Got inocache at %p\n", f->inocache));
+ case JFFS2_NODETYPE_DIRENT:
+
+ if (JFFS2_MIN_NODE_HEADER < sizeof(struct jffs2_raw_dirent)) {
+ err = read_more(c, ref, sizeof(struct jffs2_raw_dirent), &len, buf, bufstart);
+ if (unlikely(err))
+ goto free_out;
+ }
+
+ err = read_direntry(c, ref, &node->d, retlen, &ret_fd, latest_mctime, mctime_ver);
+ if (err == 1) {
+ jffs2_mark_node_obsolete(c, ref);
+ break;
+ } else if (unlikely(err))
+ goto free_out;
+
+ if (je32_to_cpu(node->d.version) > *highest_version)
+ *highest_version = je32_to_cpu(node->d.version);
- if (f->inocache) {
- /* Check its state. We may need to wait before we can use it */
- switch(f->inocache->state) {
- case INO_STATE_UNCHECKED:
- case INO_STATE_CHECKEDABSENT:
- f->inocache->state = INO_STATE_READING;
break;
-
- case INO_STATE_CHECKING:
- case INO_STATE_GC:
- /* If it's in either of these states, we need
- to wait for whoever's got it to finish and
- put it back. */
- D1(printk(KERN_DEBUG "jffs2_get_ino_cache_read waiting for ino #%u in state %d\n",
- ino, f->inocache->state));
- sleep_on_spinunlock(&c->inocache_wq, &c->inocache_lock);
- goto retry_inocache;
- case INO_STATE_READING:
- case INO_STATE_PRESENT:
- /* Eep. This should never happen. It can
- happen if Linux calls read_inode() again
- before clear_inode() has finished though. */
- printk(KERN_WARNING "Eep. Trying to read_inode #%u when it's already in state %d!\n", ino, f->inocache->state);
- /* Fail. That's probably better than allowing it to succeed */
- f->inocache = NULL;
+ case JFFS2_NODETYPE_INODE:
+
+ if (JFFS2_MIN_NODE_HEADER < sizeof(struct jffs2_raw_inode)) {
+ err = read_more(c, ref, sizeof(struct jffs2_raw_inode), &len, buf, bufstart);
+ if (unlikely(err))
+ goto free_out;
+ }
+
+ err = read_dnode(c, ref, &node->i, &ret_tn, len, latest_mctime, mctime_ver);
+ if (err == 1) {
+ jffs2_mark_node_obsolete(c, ref);
+ break;
+ } else if (unlikely(err))
+ goto free_out;
+
+ if (je32_to_cpu(node->i.version) > *highest_version)
+ *highest_version = je32_to_cpu(node->i.version);
+
break;
default:
- BUG();
- }
- }
- spin_unlock(&c->inocache_lock);
+ if (JFFS2_MIN_NODE_HEADER < sizeof(struct jffs2_unknown_node)) {
+ err = read_more(c, ref, sizeof(struct jffs2_unknown_node), &len, buf, bufstart);
+ if (unlikely(err))
+ goto free_out;
+ }
+
+ err = read_unknown(c, ref, &node->u);
+ if (err == 1) {
+ jffs2_mark_node_obsolete(c, ref);
+ break;
+ } else if (unlikely(err))
+ goto free_out;
- if (!f->inocache && ino == 1) {
- /* Special case - no root inode on medium */
- f->inocache = jffs2_alloc_inode_cache();
- if (!f->inocache) {
- printk(KERN_CRIT "jffs2_do_read_inode(): Cannot allocate inocache for root inode\n");
- return -ENOMEM;
}
- D1(printk(KERN_DEBUG "jffs2_do_read_inode(): Creating inocache for root inode\n"));
- memset(f->inocache, 0, sizeof(struct jffs2_inode_cache));
- f->inocache->ino = f->inocache->nlink = 1;
- f->inocache->nodes = (struct jffs2_raw_node_ref *)f->inocache;
- f->inocache->state = INO_STATE_READING;
- jffs2_add_ino_cache(c, f->inocache);
- }
- if (!f->inocache) {
- printk(KERN_WARNING "jffs2_do_read_inode() on nonexistent ino %u\n", ino);
- return -ENOENT;
+ spin_lock(&c->erase_completion_lock);
}
- return jffs2_do_read_inode_internal(c, f, latest_node);
-}
-
-int jffs2_do_crccheck_inode(struct jffs2_sb_info *c, struct jffs2_inode_cache *ic)
-{
- struct jffs2_raw_inode n;
- struct jffs2_inode_info *f = kmalloc(sizeof(*f), GFP_KERNEL);
- int ret;
-
- if (!f)
- return -ENOMEM;
+ spin_unlock(&c->erase_completion_lock);
+ *tnp = ret_tn;
+ *fdp = ret_fd;
+ kfree(buf);
- memset(f, 0, sizeof(*f));
- init_MUTEX_LOCKED(&f->sem);
- f->inocache = ic;
+ dbg_readinode("nodes of inode #%u were read, the highest version is %u, latest_mctime %u, mctime_ver %u.\n",
+ f->inocache->ino, *highest_version, *latest_mctime, *mctime_ver);
+ return 0;
- ret = jffs2_do_read_inode_internal(c, f, &n);
- if (!ret) {
- up(&f->sem);
- jffs2_do_clear_inode(c, f);
- }
- kfree (f);
- return ret;
+ free_out:
+ jffs2_free_tmp_dnode_info_list(&ret_tn);
+ jffs2_free_full_dirent_list(ret_fd);
+ kfree(buf);
+ return err;
}
-static int jffs2_do_read_inode_internal(struct jffs2_sb_info *c,
+static int jffs2_do_read_inode_internal(struct jffs2_sb_info *c,
struct jffs2_inode_info *f,
struct jffs2_raw_inode *latest_node)
{
- struct jffs2_tmp_dnode_info *tn = NULL;
+ struct jffs2_tmp_dnode_info *tn;
struct rb_root tn_list;
struct rb_node *rb, *repl_rb;
struct jffs2_full_dirent *fd_list;
- struct jffs2_full_dnode *fn = NULL;
+ struct jffs2_full_dnode *fn, *first_fn = NULL;
uint32_t crc;
uint32_t latest_mctime, mctime_ver;
- uint32_t mdata_ver = 0;
size_t retlen;
int ret;
- D1(printk(KERN_DEBUG "jffs2_do_read_inode_internal(): ino #%u nlink is %d\n", f->inocache->ino, f->inocache->nlink));
+ dbg_readinode("ino #%u nlink is %d\n", f->inocache->ino, f->inocache->nlink);
/* Grab all nodes relevant to this ino */
ret = jffs2_get_inode_nodes(c, f, &tn_list, &fd_list, &f->highest_version, &latest_mctime, &mctime_ver);
if (ret) {
- printk(KERN_CRIT "jffs2_get_inode_nodes() for ino %u returned %d\n", f->inocache->ino, ret);
+ JFFS2_ERROR("cannot read nodes for ino %u, returned error is %d\n", f->inocache->ino, ret);
if (f->inocache->state == INO_STATE_READING)
jffs2_set_inocache_state(c, f->inocache, INO_STATE_CHECKEDABSENT);
return ret;
@@ -525,42 +655,33 @@ static int jffs2_do_read_inode_internal(struct jffs2_sb_info *c,
rb = rb_first(&tn_list);
while (rb) {
+ cond_resched();
tn = rb_entry(rb, struct jffs2_tmp_dnode_info, rb);
fn = tn->fn;
-
- if (f->metadata) {
- if (likely(tn->version >= mdata_ver)) {
- D1(printk(KERN_DEBUG "Obsoleting old metadata at 0x%08x\n", ref_offset(f->metadata->raw)));
- jffs2_mark_node_obsolete(c, f->metadata->raw);
- jffs2_free_full_dnode(f->metadata);
- f->metadata = NULL;
-
- mdata_ver = 0;
- } else {
- /* This should never happen. */
- printk(KERN_WARNING "Er. New metadata at 0x%08x with ver %d is actually older than previous ver %d at 0x%08x\n",
- ref_offset(fn->raw), tn->version, mdata_ver, ref_offset(f->metadata->raw));
- jffs2_mark_node_obsolete(c, fn->raw);
- jffs2_free_full_dnode(fn);
- /* Fill in latest_node from the metadata, not this one we're about to free... */
- fn = f->metadata;
- goto next_tn;
- }
- }
+ ret = 1;
+ dbg_readinode("consider node ver %u, phys offset "
+ "%#08x(%d), range %u-%u.\n", tn->version,
+ ref_offset(fn->raw), ref_flags(fn->raw),
+ fn->ofs, fn->ofs + fn->size);
if (fn->size) {
- jffs2_add_full_dnode_to_inode(c, f, fn);
- } else {
- /* Zero-sized node at end of version list. Just a metadata update */
- D1(printk(KERN_DEBUG "metadata @%08x: ver %d\n", ref_offset(fn->raw), tn->version));
+ ret = jffs2_add_older_frag_to_fragtree(c, f, tn);
+ /* TODO: the error code isn't checked, check it */
+ jffs2_dbg_fragtree_paranoia_check_nolock(f);
+ BUG_ON(ret < 0);
+ if (!first_fn && ret == 0)
+ first_fn = fn;
+ } else if (!first_fn) {
+ first_fn = fn;
f->metadata = fn;
- mdata_ver = tn->version;
- }
- next_tn:
+ ret = 0; /* Prevent freeing the metadata update node */
+ } else
+ jffs2_mark_node_obsolete(c, fn->raw);
+
BUG_ON(rb->rb_left);
if (rb->rb_parent && rb->rb_parent->rb_left == rb) {
/* We were then left-hand child of our parent. We need
- to move our own right-hand child into our place. */
+ * to move our own right-hand child into our place. */
repl_rb = rb->rb_right;
if (repl_rb)
repl_rb->rb_parent = rb->rb_parent;
@@ -570,7 +691,7 @@ static int jffs2_do_read_inode_internal(struct jffs2_sb_info *c,
rb = rb_next(rb);
/* Remove the spent tn from the tree; don't bother rebalancing
- but put our right-hand child in our own place. */
+ * but put our right-hand child in our own place. */
if (tn->rb.rb_parent) {
if (tn->rb.rb_parent->rb_left == &tn->rb)
tn->rb.rb_parent->rb_left = repl_rb;
@@ -581,19 +702,27 @@ static int jffs2_do_read_inode_internal(struct jffs2_sb_info *c,
tn->rb.rb_right->rb_parent = NULL;
jffs2_free_tmp_dnode_info(tn);
+ if (ret) {
+ dbg_readinode("delete dnode %u-%u.\n",
+ fn->ofs, fn->ofs + fn->size);
+ jffs2_free_full_dnode(fn);
+ }
}
- D1(jffs2_sanitycheck_fragtree(f));
+ jffs2_dbg_fragtree_paranoia_check_nolock(f);
- if (!fn) {
+ BUG_ON(first_fn && ref_obsolete(first_fn->raw));
+
+ fn = first_fn;
+ if (unlikely(!first_fn)) {
/* No data nodes for this inode. */
if (f->inocache->ino != 1) {
- printk(KERN_WARNING "jffs2_do_read_inode(): No data nodes found for ino #%u\n", f->inocache->ino);
+ JFFS2_WARNING("no data nodes found for ino #%u\n", f->inocache->ino);
if (!fd_list) {
if (f->inocache->state == INO_STATE_READING)
jffs2_set_inocache_state(c, f->inocache, INO_STATE_CHECKEDABSENT);
return -EIO;
}
- printk(KERN_WARNING "jffs2_do_read_inode(): But it has children so we fake some modes for it\n");
+ JFFS2_NOTICE("but it has children so we fake some modes for it\n");
}
latest_node->mode = cpu_to_jemode(S_IFDIR|S_IRUGO|S_IWUSR|S_IXUGO);
latest_node->version = cpu_to_je32(0);
@@ -608,8 +737,8 @@ static int jffs2_do_read_inode_internal(struct jffs2_sb_info *c,
ret = jffs2_flash_read(c, ref_offset(fn->raw), sizeof(*latest_node), &retlen, (void *)latest_node);
if (ret || retlen != sizeof(*latest_node)) {
- printk(KERN_NOTICE "MTD read in jffs2_do_read_inode() failed: Returned %d, %zd of %zd bytes read\n",
- ret, retlen, sizeof(*latest_node));
+ JFFS2_ERROR("failed to read from flash: error %d, %zd of %zd bytes read\n",
+ ret, retlen, sizeof(*latest_node));
/* FIXME: If this fails, there seems to be a memory leak. Find it. */
up(&f->sem);
jffs2_do_clear_inode(c, f);
@@ -618,7 +747,8 @@ static int jffs2_do_read_inode_internal(struct jffs2_sb_info *c,
crc = crc32(0, latest_node, sizeof(*latest_node)-8);
if (crc != je32_to_cpu(latest_node->node_crc)) {
- printk(KERN_NOTICE "CRC failed for read_inode of inode %u at physical location 0x%x\n", f->inocache->ino, ref_offset(fn->raw));
+ JFFS2_ERROR("CRC failed for read_inode of inode %u at physical location 0x%x\n",
+ f->inocache->ino, ref_offset(fn->raw));
up(&f->sem);
jffs2_do_clear_inode(c, f);
return -EIO;
@@ -633,10 +763,10 @@ static int jffs2_do_read_inode_internal(struct jffs2_sb_info *c,
}
break;
-
+
case S_IFREG:
/* If it was a regular file, truncate it to the latest node's isize */
- jffs2_truncate_fraglist(c, &f->fragtree, je32_to_cpu(latest_node->isize));
+ jffs2_truncate_fragtree(c, &f->fragtree, je32_to_cpu(latest_node->isize));
break;
case S_IFLNK:
@@ -649,37 +779,33 @@ static int jffs2_do_read_inode_internal(struct jffs2_sb_info *c,
if (f->inocache->state != INO_STATE_CHECKING) {
/* Symlink's inode data is the target path. Read it and
- * keep in RAM to facilitate quick follow symlink operation.
- * We use f->dents field to store the target path, which
- * is somewhat ugly. */
- f->dents = kmalloc(je32_to_cpu(latest_node->csize) + 1, GFP_KERNEL);
- if (!f->dents) {
- printk(KERN_WARNING "Can't allocate %d bytes of memory "
- "for the symlink target path cache\n",
- je32_to_cpu(latest_node->csize));
+ * keep in RAM to facilitate quick follow symlink
+ * operation. */
+ f->target = kmalloc(je32_to_cpu(latest_node->csize) + 1, GFP_KERNEL);
+ if (!f->target) {
+ JFFS2_ERROR("can't allocate %d bytes of memory for the symlink target path cache\n", je32_to_cpu(latest_node->csize));
up(&f->sem);
jffs2_do_clear_inode(c, f);
return -ENOMEM;
}
-
+
ret = jffs2_flash_read(c, ref_offset(fn->raw) + sizeof(*latest_node),
- je32_to_cpu(latest_node->csize), &retlen, (char *)f->dents);
-
+ je32_to_cpu(latest_node->csize), &retlen, (char *)f->target);
+
if (ret || retlen != je32_to_cpu(latest_node->csize)) {
if (retlen != je32_to_cpu(latest_node->csize))
ret = -EIO;
- kfree(f->dents);
- f->dents = NULL;
+ kfree(f->target);
+ f->target = NULL;
up(&f->sem);
jffs2_do_clear_inode(c, f);
return -ret;
}
- ((char *)f->dents)[je32_to_cpu(latest_node->csize)] = '\0';
- D1(printk(KERN_DEBUG "jffs2_do_read_inode(): symlink's target '%s' cached\n",
- (char *)f->dents));
+ f->target[je32_to_cpu(latest_node->csize)] = '\0';
+ dbg_readinode("symlink's target '%s' cached\n", f->target);
}
-
+
/* fall through... */
case S_IFBLK:
@@ -687,14 +813,14 @@ static int jffs2_do_read_inode_internal(struct jffs2_sb_info *c,
/* Certain inode types should have only one data node, and it's
kept as the metadata node */
if (f->metadata) {
- printk(KERN_WARNING "Argh. Special inode #%u with mode 0%o had metadata node\n",
+ JFFS2_ERROR("Argh. Special inode #%u with mode 0%o had metadata node\n",
f->inocache->ino, jemode_to_cpu(latest_node->mode));
up(&f->sem);
jffs2_do_clear_inode(c, f);
return -EIO;
}
if (!frag_first(&f->fragtree)) {
- printk(KERN_WARNING "Argh. Special inode #%u with mode 0%o has no fragments\n",
+ JFFS2_ERROR("Argh. Special inode #%u with mode 0%o has no fragments\n",
f->inocache->ino, jemode_to_cpu(latest_node->mode));
up(&f->sem);
jffs2_do_clear_inode(c, f);
@@ -702,7 +828,7 @@ static int jffs2_do_read_inode_internal(struct jffs2_sb_info *c,
}
/* ASSERT: f->fraglist != NULL */
if (frag_next(frag_first(&f->fragtree))) {
- printk(KERN_WARNING "Argh. Special inode #%u with mode 0x%x had more than one node\n",
+ JFFS2_ERROR("Argh. Special inode #%u with mode 0x%x had more than one node\n",
f->inocache->ino, jemode_to_cpu(latest_node->mode));
/* FIXME: Deal with it - check crc32, check for duplicate node, check times and discard the older one */
up(&f->sem);
@@ -721,6 +847,93 @@ static int jffs2_do_read_inode_internal(struct jffs2_sb_info *c,
return 0;
}
+/* Scan the list of all nodes present for this ino, build map of versions, etc. */
+int jffs2_do_read_inode(struct jffs2_sb_info *c, struct jffs2_inode_info *f,
+ uint32_t ino, struct jffs2_raw_inode *latest_node)
+{
+ dbg_readinode("read inode #%u\n", ino);
+
+ retry_inocache:
+ spin_lock(&c->inocache_lock);
+ f->inocache = jffs2_get_ino_cache(c, ino);
+
+ if (f->inocache) {
+ /* Check its state. We may need to wait before we can use it */
+ switch(f->inocache->state) {
+ case INO_STATE_UNCHECKED:
+ case INO_STATE_CHECKEDABSENT:
+ f->inocache->state = INO_STATE_READING;
+ break;
+
+ case INO_STATE_CHECKING:
+ case INO_STATE_GC:
+ /* If it's in either of these states, we need
+ to wait for whoever's got it to finish and
+ put it back. */
+ dbg_readinode("waiting for ino #%u in state %d\n", ino, f->inocache->state);
+ sleep_on_spinunlock(&c->inocache_wq, &c->inocache_lock);
+ goto retry_inocache;
+
+ case INO_STATE_READING:
+ case INO_STATE_PRESENT:
+ /* Eep. This should never happen. It can
+ happen if Linux calls read_inode() again
+ before clear_inode() has finished though. */
+ JFFS2_ERROR("Eep. Trying to read_inode #%u when it's already in state %d!\n", ino, f->inocache->state);
+ /* Fail. That's probably better than allowing it to succeed */
+ f->inocache = NULL;
+ break;
+
+ default:
+ BUG();
+ }
+ }
+ spin_unlock(&c->inocache_lock);
+
+ if (!f->inocache && ino == 1) {
+ /* Special case - no root inode on medium */
+ f->inocache = jffs2_alloc_inode_cache();
+ if (!f->inocache) {
+ JFFS2_ERROR("cannot allocate inocache for root inode\n");
+ return -ENOMEM;
+ }
+ dbg_readinode("creating inocache for root inode\n");
+ memset(f->inocache, 0, sizeof(struct jffs2_inode_cache));
+ f->inocache->ino = f->inocache->nlink = 1;
+ f->inocache->nodes = (struct jffs2_raw_node_ref *)f->inocache;
+ f->inocache->state = INO_STATE_READING;
+ jffs2_add_ino_cache(c, f->inocache);
+ }
+ if (!f->inocache) {
+ JFFS2_ERROR("requestied to read an nonexistent ino %u\n", ino);
+ return -ENOENT;
+ }
+
+ return jffs2_do_read_inode_internal(c, f, latest_node);
+}
+
+int jffs2_do_crccheck_inode(struct jffs2_sb_info *c, struct jffs2_inode_cache *ic)
+{
+ struct jffs2_raw_inode n;
+ struct jffs2_inode_info *f = kmalloc(sizeof(*f), GFP_KERNEL);
+ int ret;
+
+ if (!f)
+ return -ENOMEM;
+
+ memset(f, 0, sizeof(*f));
+ init_MUTEX_LOCKED(&f->sem);
+ f->inocache = ic;
+
+ ret = jffs2_do_read_inode_internal(c, f, &n);
+ if (!ret) {
+ up(&f->sem);
+ jffs2_do_clear_inode(c, f);
+ }
+ kfree (f);
+ return ret;
+}
+
void jffs2_do_clear_inode(struct jffs2_sb_info *c, struct jffs2_inode_info *f)
{
struct jffs2_full_dirent *fd, *fds;
@@ -740,20 +953,16 @@ void jffs2_do_clear_inode(struct jffs2_sb_info *c, struct jffs2_inode_info *f)
jffs2_kill_fragtree(&f->fragtree, deleted?c:NULL);
- /* For symlink inodes we us f->dents to store the target path name */
- if (S_ISLNK(OFNI_EDONI_2SFFJ(f)->i_mode)) {
- if (f->dents) {
- kfree(f->dents);
- f->dents = NULL;
- }
- } else {
- fds = f->dents;
+ if (f->target) {
+ kfree(f->target);
+ f->target = NULL;
+ }
- while(fds) {
- fd = fds;
- fds = fd->next;
- jffs2_free_full_dirent(fd);
- }
+ fds = f->dents;
+ while(fds) {
+ fd = fds;
+ fds = fd->next;
+ jffs2_free_full_dirent(fd);
}
if (f->inocache && f->inocache->state != INO_STATE_CHECKING) {
diff --git a/fs/jffs2/scan.c b/fs/jffs2/scan.c
index b63160f83ba..3e51dd1da8a 100644
--- a/fs/jffs2/scan.c
+++ b/fs/jffs2/scan.c
@@ -7,7 +7,7 @@
*
* For licensing information, see the file 'LICENCE' in this directory.
*
- * $Id: scan.c,v 1.119 2005/02/17 17:51:13 dedekind Exp $
+ * $Id: scan.c,v 1.125 2005/09/30 13:59:13 dedekind Exp $
*
*/
#include <linux/kernel.h>
@@ -18,22 +18,11 @@
#include <linux/crc32.h>
#include <linux/compiler.h>
#include "nodelist.h"
+#include "summary.h"
+#include "debug.h"
#define DEFAULT_EMPTY_SCAN_SIZE 1024
-#define DIRTY_SPACE(x) do { typeof(x) _x = (x); \
- c->free_size -= _x; c->dirty_size += _x; \
- jeb->free_size -= _x ; jeb->dirty_size += _x; \
- }while(0)
-#define USED_SPACE(x) do { typeof(x) _x = (x); \
- c->free_size -= _x; c->used_size += _x; \
- jeb->free_size -= _x ; jeb->used_size += _x; \
- }while(0)
-#define UNCHECKED_SPACE(x) do { typeof(x) _x = (x); \
- c->free_size -= _x; c->unchecked_size += _x; \
- jeb->free_size -= _x ; jeb->unchecked_size += _x; \
- }while(0)
-
#define noisy_printk(noise, args...) do { \
if (*(noise)) { \
printk(KERN_NOTICE args); \
@@ -47,23 +36,16 @@
static uint32_t pseudo_random;
static int jffs2_scan_eraseblock (struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb,
- unsigned char *buf, uint32_t buf_size);
+ unsigned char *buf, uint32_t buf_size, struct jffs2_summary *s);
-/* These helper functions _must_ increase ofs and also do the dirty/used space accounting.
+/* These helper functions _must_ increase ofs and also do the dirty/used space accounting.
* Returning an error will abort the mount - bad checksums etc. should just mark the space
* as dirty.
*/
-static int jffs2_scan_inode_node(struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb,
- struct jffs2_raw_inode *ri, uint32_t ofs);
+static int jffs2_scan_inode_node(struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb,
+ struct jffs2_raw_inode *ri, uint32_t ofs, struct jffs2_summary *s);
static int jffs2_scan_dirent_node(struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb,
- struct jffs2_raw_dirent *rd, uint32_t ofs);
-
-#define BLK_STATE_ALLFF 0
-#define BLK_STATE_CLEAN 1
-#define BLK_STATE_PARTDIRTY 2
-#define BLK_STATE_CLEANMARKER 3
-#define BLK_STATE_ALLDIRTY 4
-#define BLK_STATE_BADBLOCK 5
+ struct jffs2_raw_dirent *rd, uint32_t ofs, struct jffs2_summary *s);
static inline int min_free(struct jffs2_sb_info *c)
{
@@ -89,6 +71,7 @@ int jffs2_scan_medium(struct jffs2_sb_info *c)
uint32_t empty_blocks = 0, bad_blocks = 0;
unsigned char *flashbuf = NULL;
uint32_t buf_size = 0;
+ struct jffs2_summary *s = NULL; /* summary info collected by the scan process */
#ifndef __ECOS
size_t pointlen;
@@ -122,21 +105,34 @@ int jffs2_scan_medium(struct jffs2_sb_info *c)
return -ENOMEM;
}
+ if (jffs2_sum_active()) {
+ s = kmalloc(sizeof(struct jffs2_summary), GFP_KERNEL);
+ if (!s) {
+ JFFS2_WARNING("Can't allocate memory for summary\n");
+ return -ENOMEM;
+ }
+ memset(s, 0, sizeof(struct jffs2_summary));
+ }
+
for (i=0; i<c->nr_blocks; i++) {
struct jffs2_eraseblock *jeb = &c->blocks[i];
- ret = jffs2_scan_eraseblock(c, jeb, buf_size?flashbuf:(flashbuf+jeb->offset), buf_size);
+ /* reset summary info for next eraseblock scan */
+ jffs2_sum_reset_collected(s);
+
+ ret = jffs2_scan_eraseblock(c, jeb, buf_size?flashbuf:(flashbuf+jeb->offset),
+ buf_size, s);
if (ret < 0)
goto out;
- ACCT_PARANOIA_CHECK(jeb);
+ jffs2_dbg_acct_paranoia_check_nolock(c, jeb);
/* Now decide which list to put it on */
switch(ret) {
case BLK_STATE_ALLFF:
- /*
- * Empty block. Since we can't be sure it
+ /*
+ * Empty block. Since we can't be sure it
* was entirely erased, we just queue it for erase
* again. It will be marked as such when the erase
* is complete. Meanwhile we still count it as empty
@@ -162,18 +158,18 @@ int jffs2_scan_medium(struct jffs2_sb_info *c)
break;
case BLK_STATE_CLEAN:
- /* Full (or almost full) of clean data. Clean list */
- list_add(&jeb->list, &c->clean_list);
+ /* Full (or almost full) of clean data. Clean list */
+ list_add(&jeb->list, &c->clean_list);
break;
case BLK_STATE_PARTDIRTY:
- /* Some data, but not full. Dirty list. */
- /* We want to remember the block with most free space
- and stick it in the 'nextblock' position to start writing to it. */
- if (jeb->free_size > min_free(c) &&
- (!c->nextblock || c->nextblock->free_size < jeb->free_size)) {
- /* Better candidate for the next writes to go to */
- if (c->nextblock) {
+ /* Some data, but not full. Dirty list. */
+ /* We want to remember the block with most free space
+ and stick it in the 'nextblock' position to start writing to it. */
+ if (jeb->free_size > min_free(c) &&
+ (!c->nextblock || c->nextblock->free_size < jeb->free_size)) {
+ /* Better candidate for the next writes to go to */
+ if (c->nextblock) {
c->nextblock->dirty_size += c->nextblock->free_size + c->nextblock->wasted_size;
c->dirty_size += c->nextblock->free_size + c->nextblock->wasted_size;
c->free_size -= c->nextblock->free_size;
@@ -184,9 +180,14 @@ int jffs2_scan_medium(struct jffs2_sb_info *c)
} else {
list_add(&c->nextblock->list, &c->dirty_list);
}
+ /* deleting summary information of the old nextblock */
+ jffs2_sum_reset_collected(c->summary);
}
- c->nextblock = jeb;
- } else {
+ /* update collected summary infromation for the current nextblock */
+ jffs2_sum_move_collected(c, s);
+ D1(printk(KERN_DEBUG "jffs2_scan_medium(): new nextblock = 0x%08x\n", jeb->offset));
+ c->nextblock = jeb;
+ } else {
jeb->dirty_size += jeb->free_size + jeb->wasted_size;
c->dirty_size += jeb->free_size + jeb->wasted_size;
c->free_size -= jeb->free_size;
@@ -197,30 +198,33 @@ int jffs2_scan_medium(struct jffs2_sb_info *c)
} else {
list_add(&jeb->list, &c->dirty_list);
}
- }
+ }
break;
case BLK_STATE_ALLDIRTY:
/* Nothing valid - not even a clean marker. Needs erasing. */
- /* For now we just put it on the erasing list. We'll start the erases later */
+ /* For now we just put it on the erasing list. We'll start the erases later */
D1(printk(KERN_NOTICE "JFFS2: Erase block at 0x%08x is not formatted. It will be erased\n", jeb->offset));
- list_add(&jeb->list, &c->erase_pending_list);
+ list_add(&jeb->list, &c->erase_pending_list);
c->nr_erasing_blocks++;
break;
-
+
case BLK_STATE_BADBLOCK:
D1(printk(KERN_NOTICE "JFFS2: Block at 0x%08x is bad\n", jeb->offset));
- list_add(&jeb->list, &c->bad_list);
+ list_add(&jeb->list, &c->bad_list);
c->bad_size += c->sector_size;
c->free_size -= c->sector_size;
bad_blocks++;
break;
default:
printk(KERN_WARNING "jffs2_scan_medium(): unknown block state\n");
- BUG();
+ BUG();
}
}
-
+
+ if (jffs2_sum_active() && s)
+ kfree(s);
+
/* Nextblock dirty is always seen as wasted, because we cannot recycle it now */
if (c->nextblock && (c->nextblock->dirty_size)) {
c->nextblock->wasted_size += c->nextblock->dirty_size;
@@ -229,12 +233,12 @@ int jffs2_scan_medium(struct jffs2_sb_info *c)
c->nextblock->dirty_size = 0;
}
#ifdef CONFIG_JFFS2_FS_WRITEBUFFER
- if (!jffs2_can_mark_obsolete(c) && c->nextblock && (c->nextblock->free_size & (c->wbuf_pagesize-1))) {
- /* If we're going to start writing into a block which already
+ if (!jffs2_can_mark_obsolete(c) && c->nextblock && (c->nextblock->free_size % c->wbuf_pagesize)) {
+ /* If we're going to start writing into a block which already
contains data, and the end of the data isn't page-aligned,
skip a little and align it. */
- uint32_t skip = c->nextblock->free_size & (c->wbuf_pagesize-1);
+ uint32_t skip = c->nextblock->free_size % c->wbuf_pagesize;
D1(printk(KERN_DEBUG "jffs2_scan_medium(): Skipping %d bytes in nextblock to ensure page alignment\n",
skip));
@@ -246,7 +250,7 @@ int jffs2_scan_medium(struct jffs2_sb_info *c)
}
#endif
if (c->nr_erasing_blocks) {
- if ( !c->used_size && ((c->nr_free_blocks+empty_blocks+bad_blocks)!= c->nr_blocks || bad_blocks == c->nr_blocks) ) {
+ if ( !c->used_size && ((c->nr_free_blocks+empty_blocks+bad_blocks)!= c->nr_blocks || bad_blocks == c->nr_blocks) ) {
printk(KERN_NOTICE "Cowardly refusing to erase blocks on filesystem with no valid JFFS2 nodes\n");
printk(KERN_NOTICE "empty_blocks %d, bad_blocks %d, c->nr_blocks %d\n",empty_blocks,bad_blocks,c->nr_blocks);
ret = -EIO;
@@ -259,13 +263,13 @@ int jffs2_scan_medium(struct jffs2_sb_info *c)
if (buf_size)
kfree(flashbuf);
#ifndef __ECOS
- else
+ else
c->mtd->unpoint(c->mtd, flashbuf, 0, c->mtd->size);
#endif
return ret;
}
-static int jffs2_fill_scan_buf (struct jffs2_sb_info *c, unsigned char *buf,
+int jffs2_fill_scan_buf (struct jffs2_sb_info *c, void *buf,
uint32_t ofs, uint32_t len)
{
int ret;
@@ -280,20 +284,39 @@ static int jffs2_fill_scan_buf (struct jffs2_sb_info *c, unsigned char *buf,
D1(printk(KERN_WARNING "Read at 0x%x gave only 0x%zx bytes\n", ofs, retlen));
return -EIO;
}
- D2(printk(KERN_DEBUG "Read 0x%x bytes from 0x%08x into buf\n", len, ofs));
- D2(printk(KERN_DEBUG "000: %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x\n",
- buf[0], buf[1], buf[2], buf[3], buf[4], buf[5], buf[6], buf[7], buf[8], buf[9], buf[10], buf[11], buf[12], buf[13], buf[14], buf[15]));
return 0;
}
+int jffs2_scan_classify_jeb(struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb)
+{
+ if ((jeb->used_size + jeb->unchecked_size) == PAD(c->cleanmarker_size) && !jeb->dirty_size
+ && (!jeb->first_node || !jeb->first_node->next_phys) )
+ return BLK_STATE_CLEANMARKER;
+
+ /* move blocks with max 4 byte dirty space to cleanlist */
+ else if (!ISDIRTY(c->sector_size - (jeb->used_size + jeb->unchecked_size))) {
+ c->dirty_size -= jeb->dirty_size;
+ c->wasted_size += jeb->dirty_size;
+ jeb->wasted_size += jeb->dirty_size;
+ jeb->dirty_size = 0;
+ return BLK_STATE_CLEAN;
+ } else if (jeb->used_size || jeb->unchecked_size)
+ return BLK_STATE_PARTDIRTY;
+ else
+ return BLK_STATE_ALLDIRTY;
+}
+
static int jffs2_scan_eraseblock (struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb,
- unsigned char *buf, uint32_t buf_size) {
+ unsigned char *buf, uint32_t buf_size, struct jffs2_summary *s) {
struct jffs2_unknown_node *node;
struct jffs2_unknown_node crcnode;
+ struct jffs2_sum_marker *sm;
uint32_t ofs, prevofs;
uint32_t hdr_crc, buf_ofs, buf_len;
int err;
int noise = 0;
+
+
#ifdef CONFIG_JFFS2_FS_WRITEBUFFER
int cleanmarkerfound = 0;
#endif
@@ -319,17 +342,53 @@ static int jffs2_scan_eraseblock (struct jffs2_sb_info *c, struct jffs2_eraseblo
}
}
#endif
+
+ if (jffs2_sum_active()) {
+ sm = kmalloc(sizeof(struct jffs2_sum_marker), GFP_KERNEL);
+ if (!sm) {
+ return -ENOMEM;
+ }
+
+ err = jffs2_fill_scan_buf(c, (unsigned char *) sm, jeb->offset + c->sector_size -
+ sizeof(struct jffs2_sum_marker), sizeof(struct jffs2_sum_marker));
+ if (err) {
+ kfree(sm);
+ return err;
+ }
+
+ if (je32_to_cpu(sm->magic) == JFFS2_SUM_MAGIC ) {
+ err = jffs2_sum_scan_sumnode(c, jeb, je32_to_cpu(sm->offset), &pseudo_random);
+ if (err) {
+ kfree(sm);
+ return err;
+ }
+ }
+
+ kfree(sm);
+
+ ofs = jeb->offset;
+ prevofs = jeb->offset - 1;
+ }
+
buf_ofs = jeb->offset;
if (!buf_size) {
buf_len = c->sector_size;
+
+ if (jffs2_sum_active()) {
+ /* must reread because of summary test */
+ err = jffs2_fill_scan_buf(c, buf, buf_ofs, buf_len);
+ if (err)
+ return err;
+ }
+
} else {
buf_len = EMPTY_SCAN_SIZE(c->sector_size);
err = jffs2_fill_scan_buf(c, buf, buf_ofs, buf_len);
if (err)
return err;
}
-
+
/* We temporarily use 'ofs' as a pointer into the buffer/jeb */
ofs = 0;
@@ -367,10 +426,12 @@ static int jffs2_scan_eraseblock (struct jffs2_sb_info *c, struct jffs2_eraseblo
noise = 10;
-scan_more:
+ dbg_summary("no summary found in jeb 0x%08x. Apply original scan.\n",jeb->offset);
+
+scan_more:
while(ofs < jeb->offset + c->sector_size) {
- D1(ACCT_PARANOIA_CHECK(jeb));
+ jffs2_dbg_acct_paranoia_check_nolock(c, jeb);
cond_resched();
@@ -432,7 +493,7 @@ scan_more:
/* If we're only checking the beginning of a block with a cleanmarker,
bail now */
- if (buf_ofs == jeb->offset && jeb->used_size == PAD(c->cleanmarker_size) &&
+ if (buf_ofs == jeb->offset && jeb->used_size == PAD(c->cleanmarker_size) &&
c->cleanmarker_size && !jeb->dirty_size && !jeb->first_node->next_phys) {
D1(printk(KERN_DEBUG "%d bytes at start of block seems clean... assuming all clean\n", EMPTY_SCAN_SIZE(c->sector_size)));
return BLK_STATE_CLEANMARKER;
@@ -441,7 +502,7 @@ scan_more:
/* See how much more there is to read in this eraseblock... */
buf_len = min_t(uint32_t, buf_size, jeb->offset + c->sector_size - ofs);
if (!buf_len) {
- /* No more to read. Break out of main loop without marking
+ /* No more to read. Break out of main loop without marking
this range of empty space as dirty (because it's not) */
D1(printk(KERN_DEBUG "Empty flash at %08x runs to end of block. Treating as free_space\n",
empty_start));
@@ -476,8 +537,8 @@ scan_more:
}
if (je16_to_cpu(node->magic) != JFFS2_MAGIC_BITMASK) {
/* OK. We're out of possibilities. Whinge and move on */
- noisy_printk(&noise, "jffs2_scan_eraseblock(): Magic bitmask 0x%04x not found at 0x%08x: 0x%04x instead\n",
- JFFS2_MAGIC_BITMASK, ofs,
+ noisy_printk(&noise, "jffs2_scan_eraseblock(): Magic bitmask 0x%04x not found at 0x%08x: 0x%04x instead\n",
+ JFFS2_MAGIC_BITMASK, ofs,
je16_to_cpu(node->magic));
DIRTY_SPACE(4);
ofs += 4;
@@ -492,7 +553,7 @@ scan_more:
if (hdr_crc != je32_to_cpu(node->hdr_crc)) {
noisy_printk(&noise, "jffs2_scan_eraseblock(): Node at 0x%08x {0x%04x, 0x%04x, 0x%08x) has invalid CRC 0x%08x (calculated 0x%08x)\n",
ofs, je16_to_cpu(node->magic),
- je16_to_cpu(node->nodetype),
+ je16_to_cpu(node->nodetype),
je32_to_cpu(node->totlen),
je32_to_cpu(node->hdr_crc),
hdr_crc);
@@ -501,7 +562,7 @@ scan_more:
continue;
}
- if (ofs + je32_to_cpu(node->totlen) >
+ if (ofs + je32_to_cpu(node->totlen) >
jeb->offset + c->sector_size) {
/* Eep. Node goes over the end of the erase block. */
printk(KERN_WARNING "Node at 0x%08x with length 0x%08x would run over the end of the erase block\n",
@@ -532,11 +593,11 @@ scan_more:
buf_ofs = ofs;
node = (void *)buf;
}
- err = jffs2_scan_inode_node(c, jeb, (void *)node, ofs);
+ err = jffs2_scan_inode_node(c, jeb, (void *)node, ofs, s);
if (err) return err;
ofs += PAD(je32_to_cpu(node->totlen));
break;
-
+
case JFFS2_NODETYPE_DIRENT:
if (buf_ofs + buf_len < ofs + je32_to_cpu(node->totlen)) {
buf_len = min_t(uint32_t, buf_size, jeb->offset + c->sector_size - ofs);
@@ -548,7 +609,7 @@ scan_more:
buf_ofs = ofs;
node = (void *)buf;
}
- err = jffs2_scan_dirent_node(c, jeb, (void *)node, ofs);
+ err = jffs2_scan_dirent_node(c, jeb, (void *)node, ofs, s);
if (err) return err;
ofs += PAD(je32_to_cpu(node->totlen));
break;
@@ -556,7 +617,7 @@ scan_more:
case JFFS2_NODETYPE_CLEANMARKER:
D1(printk(KERN_DEBUG "CLEANMARKER node found at 0x%08x\n", ofs));
if (je32_to_cpu(node->totlen) != c->cleanmarker_size) {
- printk(KERN_NOTICE "CLEANMARKER node found at 0x%08x has totlen 0x%x != normal 0x%x\n",
+ printk(KERN_NOTICE "CLEANMARKER node found at 0x%08x has totlen 0x%x != normal 0x%x\n",
ofs, je32_to_cpu(node->totlen), c->cleanmarker_size);
DIRTY_SPACE(PAD(sizeof(struct jffs2_unknown_node)));
ofs += PAD(sizeof(struct jffs2_unknown_node));
@@ -575,13 +636,15 @@ scan_more:
marker_ref->flash_offset = ofs | REF_NORMAL;
marker_ref->__totlen = c->cleanmarker_size;
jeb->first_node = jeb->last_node = marker_ref;
-
+
USED_SPACE(PAD(c->cleanmarker_size));
ofs += PAD(c->cleanmarker_size);
}
break;
case JFFS2_NODETYPE_PADDING:
+ if (jffs2_sum_active())
+ jffs2_sum_add_padding_mem(s, je32_to_cpu(node->totlen));
DIRTY_SPACE(PAD(je32_to_cpu(node->totlen)));
ofs += PAD(je32_to_cpu(node->totlen));
break;
@@ -616,8 +679,15 @@ scan_more:
}
}
+ if (jffs2_sum_active()) {
+ if (PAD(s->sum_size + JFFS2_SUMMARY_FRAME_SIZE) > jeb->free_size) {
+ dbg_summary("There is not enough space for "
+ "summary information, disabling for this jeb!\n");
+ jffs2_sum_disable_collecting(s);
+ }
+ }
- D1(printk(KERN_DEBUG "Block at 0x%08x: free 0x%08x, dirty 0x%08x, unchecked 0x%08x, used 0x%08x\n", jeb->offset,
+ D1(printk(KERN_DEBUG "Block at 0x%08x: free 0x%08x, dirty 0x%08x, unchecked 0x%08x, used 0x%08x\n", jeb->offset,
jeb->free_size, jeb->dirty_size, jeb->unchecked_size, jeb->used_size));
/* mark_node_obsolete can add to wasted !! */
@@ -628,24 +698,10 @@ scan_more:
jeb->wasted_size = 0;
}
- if ((jeb->used_size + jeb->unchecked_size) == PAD(c->cleanmarker_size) && !jeb->dirty_size
- && (!jeb->first_node || !jeb->first_node->next_phys) )
- return BLK_STATE_CLEANMARKER;
-
- /* move blocks with max 4 byte dirty space to cleanlist */
- else if (!ISDIRTY(c->sector_size - (jeb->used_size + jeb->unchecked_size))) {
- c->dirty_size -= jeb->dirty_size;
- c->wasted_size += jeb->dirty_size;
- jeb->wasted_size += jeb->dirty_size;
- jeb->dirty_size = 0;
- return BLK_STATE_CLEAN;
- } else if (jeb->used_size || jeb->unchecked_size)
- return BLK_STATE_PARTDIRTY;
- else
- return BLK_STATE_ALLDIRTY;
+ return jffs2_scan_classify_jeb(c, jeb);
}
-static struct jffs2_inode_cache *jffs2_scan_make_ino_cache(struct jffs2_sb_info *c, uint32_t ino)
+struct jffs2_inode_cache *jffs2_scan_make_ino_cache(struct jffs2_sb_info *c, uint32_t ino)
{
struct jffs2_inode_cache *ic;
@@ -671,8 +727,8 @@ static struct jffs2_inode_cache *jffs2_scan_make_ino_cache(struct jffs2_sb_info
return ic;
}
-static int jffs2_scan_inode_node(struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb,
- struct jffs2_raw_inode *ri, uint32_t ofs)
+static int jffs2_scan_inode_node(struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb,
+ struct jffs2_raw_inode *ri, uint32_t ofs, struct jffs2_summary *s)
{
struct jffs2_raw_node_ref *raw;
struct jffs2_inode_cache *ic;
@@ -681,11 +737,11 @@ static int jffs2_scan_inode_node(struct jffs2_sb_info *c, struct jffs2_erasebloc
D1(printk(KERN_DEBUG "jffs2_scan_inode_node(): Node at 0x%08x\n", ofs));
/* We do very little here now. Just check the ino# to which we should attribute
- this node; we can do all the CRC checking etc. later. There's a tradeoff here --
+ this node; we can do all the CRC checking etc. later. There's a tradeoff here --
we used to scan the flash once only, reading everything we want from it into
memory, then building all our in-core data structures and freeing the extra
information. Now we allow the first part of the mount to complete a lot quicker,
- but we have to go _back_ to the flash in order to finish the CRC checking, etc.
+ but we have to go _back_ to the flash in order to finish the CRC checking, etc.
Which means that the _full_ amount of time to get to proper write mode with GC
operational may actually be _longer_ than before. Sucks to be me. */
@@ -731,7 +787,7 @@ static int jffs2_scan_inode_node(struct jffs2_sb_info *c, struct jffs2_erasebloc
jeb->last_node->next_phys = raw;
jeb->last_node = raw;
- D1(printk(KERN_DEBUG "Node is ino #%u, version %d. Range 0x%x-0x%x\n",
+ D1(printk(KERN_DEBUG "Node is ino #%u, version %d. Range 0x%x-0x%x\n",
je32_to_cpu(ri->ino), je32_to_cpu(ri->version),
je32_to_cpu(ri->offset),
je32_to_cpu(ri->offset)+je32_to_cpu(ri->dsize)));
@@ -739,11 +795,16 @@ static int jffs2_scan_inode_node(struct jffs2_sb_info *c, struct jffs2_erasebloc
pseudo_random += je32_to_cpu(ri->version);
UNCHECKED_SPACE(PAD(je32_to_cpu(ri->totlen)));
+
+ if (jffs2_sum_active()) {
+ jffs2_sum_add_inode_mem(s, ri, ofs - jeb->offset);
+ }
+
return 0;
}
-static int jffs2_scan_dirent_node(struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb,
- struct jffs2_raw_dirent *rd, uint32_t ofs)
+static int jffs2_scan_dirent_node(struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb,
+ struct jffs2_raw_dirent *rd, uint32_t ofs, struct jffs2_summary *s)
{
struct jffs2_raw_node_ref *raw;
struct jffs2_full_dirent *fd;
@@ -776,7 +837,7 @@ static int jffs2_scan_dirent_node(struct jffs2_sb_info *c, struct jffs2_eraseblo
crc = crc32(0, fd->name, rd->nsize);
if (crc != je32_to_cpu(rd->name_crc)) {
printk(KERN_NOTICE "jffs2_scan_dirent_node(): Name CRC failed on node at 0x%08x: Read 0x%08x, calculated 0x%08x\n",
- ofs, je32_to_cpu(rd->name_crc), crc);
+ ofs, je32_to_cpu(rd->name_crc), crc);
D1(printk(KERN_NOTICE "Name for which CRC failed is (now) '%s', ino #%d\n", fd->name, je32_to_cpu(rd->ino)));
jffs2_free_full_dirent(fd);
/* FIXME: Why do we believe totlen? */
@@ -796,7 +857,7 @@ static int jffs2_scan_dirent_node(struct jffs2_sb_info *c, struct jffs2_eraseblo
jffs2_free_raw_node_ref(raw);
return -ENOMEM;
}
-
+
raw->__totlen = PAD(je32_to_cpu(rd->totlen));
raw->flash_offset = ofs | REF_PRISTINE;
raw->next_phys = NULL;
@@ -817,6 +878,10 @@ static int jffs2_scan_dirent_node(struct jffs2_sb_info *c, struct jffs2_eraseblo
USED_SPACE(PAD(je32_to_cpu(rd->totlen)));
jffs2_add_fd_to_list(c, fd, &ic->scan_dents);
+ if (jffs2_sum_active()) {
+ jffs2_sum_add_dirent_mem(s, rd, ofs - jeb->offset);
+ }
+
return 0;
}
@@ -852,76 +917,34 @@ void jffs2_rotate_lists(struct jffs2_sb_info *c)
x = count_list(&c->clean_list);
if (x) {
rotateby = pseudo_random % x;
- D1(printk(KERN_DEBUG "Rotating clean_list by %d\n", rotateby));
-
rotate_list((&c->clean_list), rotateby);
-
- D1(printk(KERN_DEBUG "Erase block at front of clean_list is at %08x\n",
- list_entry(c->clean_list.next, struct jffs2_eraseblock, list)->offset));
- } else {
- D1(printk(KERN_DEBUG "Not rotating empty clean_list\n"));
}
x = count_list(&c->very_dirty_list);
if (x) {
rotateby = pseudo_random % x;
- D1(printk(KERN_DEBUG "Rotating very_dirty_list by %d\n", rotateby));
-
rotate_list((&c->very_dirty_list), rotateby);
-
- D1(printk(KERN_DEBUG "Erase block at front of very_dirty_list is at %08x\n",
- list_entry(c->very_dirty_list.next, struct jffs2_eraseblock, list)->offset));
- } else {
- D1(printk(KERN_DEBUG "Not rotating empty very_dirty_list\n"));
}
x = count_list(&c->dirty_list);
if (x) {
rotateby = pseudo_random % x;
- D1(printk(KERN_DEBUG "Rotating dirty_list by %d\n", rotateby));
-
rotate_list((&c->dirty_list), rotateby);
-
- D1(printk(KERN_DEBUG "Erase block at front of dirty_list is at %08x\n",
- list_entry(c->dirty_list.next, struct jffs2_eraseblock, list)->offset));
- } else {
- D1(printk(KERN_DEBUG "Not rotating empty dirty_list\n"));
}
x = count_list(&c->erasable_list);
if (x) {
rotateby = pseudo_random % x;
- D1(printk(KERN_DEBUG "Rotating erasable_list by %d\n", rotateby));
-
rotate_list((&c->erasable_list), rotateby);
-
- D1(printk(KERN_DEBUG "Erase block at front of erasable_list is at %08x\n",
- list_entry(c->erasable_list.next, struct jffs2_eraseblock, list)->offset));
- } else {
- D1(printk(KERN_DEBUG "Not rotating empty erasable_list\n"));
}
if (c->nr_erasing_blocks) {
rotateby = pseudo_random % c->nr_erasing_blocks;
- D1(printk(KERN_DEBUG "Rotating erase_pending_list by %d\n", rotateby));
-
rotate_list((&c->erase_pending_list), rotateby);
-
- D1(printk(KERN_DEBUG "Erase block at front of erase_pending_list is at %08x\n",
- list_entry(c->erase_pending_list.next, struct jffs2_eraseblock, list)->offset));
- } else {
- D1(printk(KERN_DEBUG "Not rotating empty erase_pending_list\n"));
}
if (c->nr_free_blocks) {
rotateby = pseudo_random % c->nr_free_blocks;
- D1(printk(KERN_DEBUG "Rotating free_list by %d\n", rotateby));
-
rotate_list((&c->free_list), rotateby);
-
- D1(printk(KERN_DEBUG "Erase block at front of free_list is at %08x\n",
- list_entry(c->free_list.next, struct jffs2_eraseblock, list)->offset));
- } else {
- D1(printk(KERN_DEBUG "Not rotating empty free_list\n"));
}
}
diff --git a/fs/jffs2/summary.c b/fs/jffs2/summary.c
new file mode 100644
index 00000000000..fb9cec61fcf
--- /dev/null
+++ b/fs/jffs2/summary.c
@@ -0,0 +1,730 @@
+/*
+ * JFFS2 -- Journalling Flash File System, Version 2.
+ *
+ * Copyright (C) 2004 Ferenc Havasi <havasi@inf.u-szeged.hu>,
+ * Zoltan Sogor <weth@inf.u-szeged.hu>,
+ * Patrik Kluba <pajko@halom.u-szeged.hu>,
+ * University of Szeged, Hungary
+ *
+ * For licensing information, see the file 'LICENCE' in this directory.
+ *
+ * $Id: summary.c,v 1.4 2005/09/26 11:37:21 havasi Exp $
+ *
+ */
+
+#include <linux/kernel.h>
+#include <linux/sched.h>
+#include <linux/slab.h>
+#include <linux/mtd/mtd.h>
+#include <linux/pagemap.h>
+#include <linux/crc32.h>
+#include <linux/compiler.h>
+#include <linux/vmalloc.h>
+#include "nodelist.h"
+#include "debug.h"
+
+int jffs2_sum_init(struct jffs2_sb_info *c)
+{
+ c->summary = kmalloc(sizeof(struct jffs2_summary), GFP_KERNEL);
+
+ if (!c->summary) {
+ JFFS2_WARNING("Can't allocate memory for summary information!\n");
+ return -ENOMEM;
+ }
+
+ memset(c->summary, 0, sizeof(struct jffs2_summary));
+
+ c->summary->sum_buf = vmalloc(c->sector_size);
+
+ if (!c->summary->sum_buf) {
+ JFFS2_WARNING("Can't allocate buffer for writing out summary information!\n");
+ kfree(c->summary);
+ return -ENOMEM;
+ }
+
+ dbg_summary("returned succesfully\n");
+
+ return 0;
+}
+
+void jffs2_sum_exit(struct jffs2_sb_info *c)
+{
+ dbg_summary("called\n");
+
+ jffs2_sum_disable_collecting(c->summary);
+
+ vfree(c->summary->sum_buf);
+ c->summary->sum_buf = NULL;
+
+ kfree(c->summary);
+ c->summary = NULL;
+}
+
+static int jffs2_sum_add_mem(struct jffs2_summary *s, union jffs2_sum_mem *item)
+{
+ if (!s->sum_list_head)
+ s->sum_list_head = (union jffs2_sum_mem *) item;
+ if (s->sum_list_tail)
+ s->sum_list_tail->u.next = (union jffs2_sum_mem *) item;
+ s->sum_list_tail = (union jffs2_sum_mem *) item;
+
+ switch (je16_to_cpu(item->u.nodetype)) {
+ case JFFS2_NODETYPE_INODE:
+ s->sum_size += JFFS2_SUMMARY_INODE_SIZE;
+ s->sum_num++;
+ dbg_summary("inode (%u) added to summary\n",
+ je32_to_cpu(item->i.inode));
+ break;
+ case JFFS2_NODETYPE_DIRENT:
+ s->sum_size += JFFS2_SUMMARY_DIRENT_SIZE(item->d.nsize);
+ s->sum_num++;
+ dbg_summary("dirent (%u) added to summary\n",
+ je32_to_cpu(item->d.ino));
+ break;
+ default:
+ JFFS2_WARNING("UNKNOWN node type %u\n",
+ je16_to_cpu(item->u.nodetype));
+ return 1;
+ }
+ return 0;
+}
+
+
+/* The following 3 functions are called from scan.c to collect summary info for not closed jeb */
+
+int jffs2_sum_add_padding_mem(struct jffs2_summary *s, uint32_t size)
+{
+ dbg_summary("called with %u\n", size);
+ s->sum_padded += size;
+ return 0;
+}
+
+int jffs2_sum_add_inode_mem(struct jffs2_summary *s, struct jffs2_raw_inode *ri,
+ uint32_t ofs)
+{
+ struct jffs2_sum_inode_mem *temp = kmalloc(sizeof(struct jffs2_sum_inode_mem), GFP_KERNEL);
+
+ if (!temp)
+ return -ENOMEM;
+
+ temp->nodetype = ri->nodetype;
+ temp->inode = ri->ino;
+ temp->version = ri->version;
+ temp->offset = cpu_to_je32(ofs); /* relative offset from the begining of the jeb */
+ temp->totlen = ri->totlen;
+ temp->next = NULL;
+
+ return jffs2_sum_add_mem(s, (union jffs2_sum_mem *)temp);
+}
+
+int jffs2_sum_add_dirent_mem(struct jffs2_summary *s, struct jffs2_raw_dirent *rd,
+ uint32_t ofs)
+{
+ struct jffs2_sum_dirent_mem *temp =
+ kmalloc(sizeof(struct jffs2_sum_dirent_mem) + rd->nsize, GFP_KERNEL);
+
+ if (!temp)
+ return -ENOMEM;
+
+ temp->nodetype = rd->nodetype;
+ temp->totlen = rd->totlen;
+ temp->offset = cpu_to_je32(ofs); /* relative from the begining of the jeb */
+ temp->pino = rd->pino;
+ temp->version = rd->version;
+ temp->ino = rd->ino;
+ temp->nsize = rd->nsize;
+ temp->type = rd->type;
+ temp->next = NULL;
+
+ memcpy(temp->name, rd->name, rd->nsize);
+
+ return jffs2_sum_add_mem(s, (union jffs2_sum_mem *)temp);
+}
+
+/* Cleanup every collected summary information */
+
+static void jffs2_sum_clean_collected(struct jffs2_summary *s)
+{
+ union jffs2_sum_mem *temp;
+
+ if (!s->sum_list_head) {
+ dbg_summary("already empty\n");
+ }
+ while (s->sum_list_head) {
+ temp = s->sum_list_head;
+ s->sum_list_head = s->sum_list_head->u.next;
+ kfree(temp);
+ }
+ s->sum_list_tail = NULL;
+ s->sum_padded = 0;
+ s->sum_num = 0;
+}
+
+void jffs2_sum_reset_collected(struct jffs2_summary *s)
+{
+ dbg_summary("called\n");
+ jffs2_sum_clean_collected(s);
+ s->sum_size = 0;
+}
+
+void jffs2_sum_disable_collecting(struct jffs2_summary *s)
+{
+ dbg_summary("called\n");
+ jffs2_sum_clean_collected(s);
+ s->sum_size = JFFS2_SUMMARY_NOSUM_SIZE;
+}
+
+int jffs2_sum_is_disabled(struct jffs2_summary *s)
+{
+ return (s->sum_size == JFFS2_SUMMARY_NOSUM_SIZE);
+}
+
+/* Move the collected summary information into sb (called from scan.c) */
+
+void jffs2_sum_move_collected(struct jffs2_sb_info *c, struct jffs2_summary *s)
+{
+ dbg_summary("oldsize=0x%x oldnum=%u => newsize=0x%x newnum=%u\n",
+ c->summary->sum_size, c->summary->sum_num,
+ s->sum_size, s->sum_num);
+
+ c->summary->sum_size = s->sum_size;
+ c->summary->sum_num = s->sum_num;
+ c->summary->sum_padded = s->sum_padded;
+ c->summary->sum_list_head = s->sum_list_head;
+ c->summary->sum_list_tail = s->sum_list_tail;
+
+ s->sum_list_head = s->sum_list_tail = NULL;
+}
+
+/* Called from wbuf.c to collect writed node info */
+
+int jffs2_sum_add_kvec(struct jffs2_sb_info *c, const struct kvec *invecs,
+ unsigned long count, uint32_t ofs)
+{
+ union jffs2_node_union *node;
+ struct jffs2_eraseblock *jeb;
+
+ node = invecs[0].iov_base;
+ jeb = &c->blocks[ofs / c->sector_size];
+ ofs -= jeb->offset;
+
+ switch (je16_to_cpu(node->u.nodetype)) {
+ case JFFS2_NODETYPE_INODE: {
+ struct jffs2_sum_inode_mem *temp =
+ kmalloc(sizeof(struct jffs2_sum_inode_mem), GFP_KERNEL);
+
+ if (!temp)
+ goto no_mem;
+
+ temp->nodetype = node->i.nodetype;
+ temp->inode = node->i.ino;
+ temp->version = node->i.version;
+ temp->offset = cpu_to_je32(ofs);
+ temp->totlen = node->i.totlen;
+ temp->next = NULL;
+
+ return jffs2_sum_add_mem(c->summary, (union jffs2_sum_mem *)temp);
+ }
+
+ case JFFS2_NODETYPE_DIRENT: {
+ struct jffs2_sum_dirent_mem *temp =
+ kmalloc(sizeof(struct jffs2_sum_dirent_mem) + node->d.nsize, GFP_KERNEL);
+
+ if (!temp)
+ goto no_mem;
+
+ temp->nodetype = node->d.nodetype;
+ temp->totlen = node->d.totlen;
+ temp->offset = cpu_to_je32(ofs);
+ temp->pino = node->d.pino;
+ temp->version = node->d.version;
+ temp->ino = node->d.ino;
+ temp->nsize = node->d.nsize;
+ temp->type = node->d.type;
+ temp->next = NULL;
+
+ switch (count) {
+ case 1:
+ memcpy(temp->name,node->d.name,node->d.nsize);
+ break;
+
+ case 2:
+ memcpy(temp->name,invecs[1].iov_base,node->d.nsize);
+ break;
+
+ default:
+ BUG(); /* impossible count value */
+ break;
+ }
+
+ return jffs2_sum_add_mem(c->summary, (union jffs2_sum_mem *)temp);
+ }
+
+ case JFFS2_NODETYPE_PADDING:
+ dbg_summary("node PADDING\n");
+ c->summary->sum_padded += je32_to_cpu(node->u.totlen);
+ break;
+
+ case JFFS2_NODETYPE_CLEANMARKER:
+ dbg_summary("node CLEANMARKER\n");
+ break;
+
+ case JFFS2_NODETYPE_SUMMARY:
+ dbg_summary("node SUMMARY\n");
+ break;
+
+ default:
+ /* If you implement a new node type you should also implement
+ summary support for it or disable summary.
+ */
+ BUG();
+ break;
+ }
+
+ return 0;
+
+no_mem:
+ JFFS2_WARNING("MEMORY ALLOCATION ERROR!");
+ return -ENOMEM;
+}
+
+
+/* Process the stored summary information - helper function for jffs2_sum_scan_sumnode() */
+
+static int jffs2_sum_process_sum_data(struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb,
+ struct jffs2_raw_summary *summary, uint32_t *pseudo_random)
+{
+ struct jffs2_raw_node_ref *raw;
+ struct jffs2_inode_cache *ic;
+ struct jffs2_full_dirent *fd;
+ void *sp;
+ int i, ino;
+
+ sp = summary->sum;
+
+ for (i=0; i<je32_to_cpu(summary->sum_num); i++) {
+ dbg_summary("processing summary index %d\n", i);
+
+ switch (je16_to_cpu(((struct jffs2_sum_unknown_flash *)sp)->nodetype)) {
+ case JFFS2_NODETYPE_INODE: {
+ struct jffs2_sum_inode_flash *spi;
+ spi = sp;
+
+ ino = je32_to_cpu(spi->inode);
+
+ dbg_summary("Inode at 0x%08x\n",
+ jeb->offset + je32_to_cpu(spi->offset));
+
+ raw = jffs2_alloc_raw_node_ref();
+ if (!raw) {
+ JFFS2_NOTICE("allocation of node reference failed\n");
+ kfree(summary);
+ return -ENOMEM;
+ }
+
+ ic = jffs2_scan_make_ino_cache(c, ino);
+ if (!ic) {
+ JFFS2_NOTICE("scan_make_ino_cache failed\n");
+ jffs2_free_raw_node_ref(raw);
+ kfree(summary);
+ return -ENOMEM;
+ }
+
+ raw->flash_offset = (jeb->offset + je32_to_cpu(spi->offset)) | REF_UNCHECKED;
+ raw->__totlen = PAD(je32_to_cpu(spi->totlen));
+ raw->next_phys = NULL;
+ raw->next_in_ino = ic->nodes;
+
+ ic->nodes = raw;
+ if (!jeb->first_node)
+ jeb->first_node = raw;
+ if (jeb->last_node)
+ jeb->last_node->next_phys = raw;
+ jeb->last_node = raw;
+ *pseudo_random += je32_to_cpu(spi->version);
+
+ UNCHECKED_SPACE(PAD(je32_to_cpu(spi->totlen)));
+
+ sp += JFFS2_SUMMARY_INODE_SIZE;
+
+ break;
+ }
+
+ case JFFS2_NODETYPE_DIRENT: {
+ struct jffs2_sum_dirent_flash *spd;
+ spd = sp;
+
+ dbg_summary("Dirent at 0x%08x\n",
+ jeb->offset + je32_to_cpu(spd->offset));
+
+ fd = jffs2_alloc_full_dirent(spd->nsize+1);
+ if (!fd) {
+ kfree(summary);
+ return -ENOMEM;
+ }
+
+ memcpy(&fd->name, spd->name, spd->nsize);
+ fd->name[spd->nsize] = 0;
+
+ raw = jffs2_alloc_raw_node_ref();
+ if (!raw) {
+ jffs2_free_full_dirent(fd);
+ JFFS2_NOTICE("allocation of node reference failed\n");
+ kfree(summary);
+ return -ENOMEM;
+ }
+
+ ic = jffs2_scan_make_ino_cache(c, je32_to_cpu(spd->pino));
+ if (!ic) {
+ jffs2_free_full_dirent(fd);
+ jffs2_free_raw_node_ref(raw);
+ kfree(summary);
+ return -ENOMEM;
+ }
+
+ raw->__totlen = PAD(je32_to_cpu(spd->totlen));
+ raw->flash_offset = (jeb->offset + je32_to_cpu(spd->offset)) | REF_PRISTINE;
+ raw->next_phys = NULL;
+ raw->next_in_ino = ic->nodes;
+ ic->nodes = raw;
+ if (!jeb->first_node)
+ jeb->first_node = raw;
+ if (jeb->last_node)
+ jeb->last_node->next_phys = raw;
+ jeb->last_node = raw;
+
+ fd->raw = raw;
+ fd->next = NULL;
+ fd->version = je32_to_cpu(spd->version);
+ fd->ino = je32_to_cpu(spd->ino);
+ fd->nhash = full_name_hash(fd->name, spd->nsize);
+ fd->type = spd->type;
+ USED_SPACE(PAD(je32_to_cpu(spd->totlen)));
+ jffs2_add_fd_to_list(c, fd, &ic->scan_dents);
+
+ *pseudo_random += je32_to_cpu(spd->version);
+
+ sp += JFFS2_SUMMARY_DIRENT_SIZE(spd->nsize);
+
+ break;
+ }
+
+ default : {
+ JFFS2_WARNING("Unsupported node type found in summary! Exiting...");
+ kfree(summary);
+ return -EIO;
+ }
+ }
+ }
+
+ kfree(summary);
+ return 0;
+}
+
+/* Process the summary node - called from jffs2_scan_eraseblock() */
+
+int jffs2_sum_scan_sumnode(struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb,
+ uint32_t ofs, uint32_t *pseudo_random)
+{
+ struct jffs2_unknown_node crcnode;
+ struct jffs2_raw_node_ref *cache_ref;
+ struct jffs2_raw_summary *summary;
+ int ret, sumsize;
+ uint32_t crc;
+
+ sumsize = c->sector_size - ofs;
+ ofs += jeb->offset;
+
+ dbg_summary("summary found for 0x%08x at 0x%08x (0x%x bytes)\n",
+ jeb->offset, ofs, sumsize);
+
+ summary = kmalloc(sumsize, GFP_KERNEL);
+
+ if (!summary) {
+ return -ENOMEM;
+ }
+
+ ret = jffs2_fill_scan_buf(c, (unsigned char *)summary, ofs, sumsize);
+
+ if (ret) {
+ kfree(summary);
+ return ret;
+ }
+
+ /* OK, now check for node validity and CRC */
+ crcnode.magic = cpu_to_je16(JFFS2_MAGIC_BITMASK);
+ crcnode.nodetype = cpu_to_je16(JFFS2_NODETYPE_SUMMARY);
+ crcnode.totlen = summary->totlen;
+ crc = crc32(0, &crcnode, sizeof(crcnode)-4);
+
+ if (je32_to_cpu(summary->hdr_crc) != crc) {
+ dbg_summary("Summary node header is corrupt (bad CRC or "
+ "no summary at all)\n");
+ goto crc_err;
+ }
+
+ if (je32_to_cpu(summary->totlen) != sumsize) {
+ dbg_summary("Summary node is corrupt (wrong erasesize?)\n");
+ goto crc_err;
+ }
+
+ crc = crc32(0, summary, sizeof(struct jffs2_raw_summary)-8);
+
+ if (je32_to_cpu(summary->node_crc) != crc) {
+ dbg_summary("Summary node is corrupt (bad CRC)\n");
+ goto crc_err;
+ }
+
+ crc = crc32(0, summary->sum, sumsize - sizeof(struct jffs2_raw_summary));
+
+ if (je32_to_cpu(summary->sum_crc) != crc) {
+ dbg_summary("Summary node data is corrupt (bad CRC)\n");
+ goto crc_err;
+ }
+
+ if ( je32_to_cpu(summary->cln_mkr) ) {
+
+ dbg_summary("Summary : CLEANMARKER node \n");
+
+ if (je32_to_cpu(summary->cln_mkr) != c->cleanmarker_size) {
+ dbg_summary("CLEANMARKER node has totlen 0x%x != normal 0x%x\n",
+ je32_to_cpu(summary->cln_mkr), c->cleanmarker_size);
+ UNCHECKED_SPACE(PAD(je32_to_cpu(summary->cln_mkr)));
+ } else if (jeb->first_node) {
+ dbg_summary("CLEANMARKER node not first node in block "
+ "(0x%08x)\n", jeb->offset);
+ UNCHECKED_SPACE(PAD(je32_to_cpu(summary->cln_mkr)));
+ } else {
+ struct jffs2_raw_node_ref *marker_ref = jffs2_alloc_raw_node_ref();
+
+ if (!marker_ref) {
+ JFFS2_NOTICE("Failed to allocate node ref for clean marker\n");
+ kfree(summary);
+ return -ENOMEM;
+ }
+
+ marker_ref->next_in_ino = NULL;
+ marker_ref->next_phys = NULL;
+ marker_ref->flash_offset = jeb->offset | REF_NORMAL;
+ marker_ref->__totlen = je32_to_cpu(summary->cln_mkr);
+ jeb->first_node = jeb->last_node = marker_ref;
+
+ USED_SPACE( PAD(je32_to_cpu(summary->cln_mkr)) );
+ }
+ }
+
+ if (je32_to_cpu(summary->padded)) {
+ DIRTY_SPACE(je32_to_cpu(summary->padded));
+ }
+
+ ret = jffs2_sum_process_sum_data(c, jeb, summary, pseudo_random);
+ if (ret)
+ return ret;
+
+ /* for PARANOIA_CHECK */
+ cache_ref = jffs2_alloc_raw_node_ref();
+
+ if (!cache_ref) {
+ JFFS2_NOTICE("Failed to allocate node ref for cache\n");
+ return -ENOMEM;
+ }
+
+ cache_ref->next_in_ino = NULL;
+ cache_ref->next_phys = NULL;
+ cache_ref->flash_offset = ofs | REF_NORMAL;
+ cache_ref->__totlen = sumsize;
+
+ if (!jeb->first_node)
+ jeb->first_node = cache_ref;
+ if (jeb->last_node)
+ jeb->last_node->next_phys = cache_ref;
+ jeb->last_node = cache_ref;
+
+ USED_SPACE(sumsize);
+
+ jeb->wasted_size += jeb->free_size;
+ c->wasted_size += jeb->free_size;
+ c->free_size -= jeb->free_size;
+ jeb->free_size = 0;
+
+ return jffs2_scan_classify_jeb(c, jeb);
+
+crc_err:
+ JFFS2_WARNING("Summary node crc error, skipping summary information.\n");
+
+ return 0;
+}
+
+/* Write summary data to flash - helper function for jffs2_sum_write_sumnode() */
+
+static int jffs2_sum_write_data(struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb,
+ uint32_t infosize, uint32_t datasize, int padsize)
+{
+ struct jffs2_raw_summary isum;
+ union jffs2_sum_mem *temp;
+ struct jffs2_sum_marker *sm;
+ struct kvec vecs[2];
+ void *wpage;
+ int ret;
+ size_t retlen;
+
+ memset(c->summary->sum_buf, 0xff, datasize);
+ memset(&isum, 0, sizeof(isum));
+
+ isum.magic = cpu_to_je16(JFFS2_MAGIC_BITMASK);
+ isum.nodetype = cpu_to_je16(JFFS2_NODETYPE_SUMMARY);
+ isum.totlen = cpu_to_je32(infosize);
+ isum.hdr_crc = cpu_to_je32(crc32(0, &isum, sizeof(struct jffs2_unknown_node) - 4));
+ isum.padded = cpu_to_je32(c->summary->sum_padded);
+ isum.cln_mkr = cpu_to_je32(c->cleanmarker_size);
+ isum.sum_num = cpu_to_je32(c->summary->sum_num);
+ wpage = c->summary->sum_buf;
+
+ while (c->summary->sum_num) {
+
+ switch (je16_to_cpu(c->summary->sum_list_head->u.nodetype)) {
+ case JFFS2_NODETYPE_INODE: {
+ struct jffs2_sum_inode_flash *sino_ptr = wpage;
+
+ sino_ptr->nodetype = c->summary->sum_list_head->i.nodetype;
+ sino_ptr->inode = c->summary->sum_list_head->i.inode;
+ sino_ptr->version = c->summary->sum_list_head->i.version;
+ sino_ptr->offset = c->summary->sum_list_head->i.offset;
+ sino_ptr->totlen = c->summary->sum_list_head->i.totlen;
+
+ wpage += JFFS2_SUMMARY_INODE_SIZE;
+
+ break;
+ }
+
+ case JFFS2_NODETYPE_DIRENT: {
+ struct jffs2_sum_dirent_flash *sdrnt_ptr = wpage;
+
+ sdrnt_ptr->nodetype = c->summary->sum_list_head->d.nodetype;
+ sdrnt_ptr->totlen = c->summary->sum_list_head->d.totlen;
+ sdrnt_ptr->offset = c->summary->sum_list_head->d.offset;
+ sdrnt_ptr->pino = c->summary->sum_list_head->d.pino;
+ sdrnt_ptr->version = c->summary->sum_list_head->d.version;
+ sdrnt_ptr->ino = c->summary->sum_list_head->d.ino;
+ sdrnt_ptr->nsize = c->summary->sum_list_head->d.nsize;
+ sdrnt_ptr->type = c->summary->sum_list_head->d.type;
+
+ memcpy(sdrnt_ptr->name, c->summary->sum_list_head->d.name,
+ c->summary->sum_list_head->d.nsize);
+
+ wpage += JFFS2_SUMMARY_DIRENT_SIZE(c->summary->sum_list_head->d.nsize);
+
+ break;
+ }
+
+ default : {
+ BUG(); /* unknown node in summary information */
+ }
+ }
+
+ temp = c->summary->sum_list_head;
+ c->summary->sum_list_head = c->summary->sum_list_head->u.next;
+ kfree(temp);
+
+ c->summary->sum_num--;
+ }
+
+ jffs2_sum_reset_collected(c->summary);
+
+ wpage += padsize;
+
+ sm = wpage;
+ sm->offset = cpu_to_je32(c->sector_size - jeb->free_size);
+ sm->magic = cpu_to_je32(JFFS2_SUM_MAGIC);
+
+ isum.sum_crc = cpu_to_je32(crc32(0, c->summary->sum_buf, datasize));
+ isum.node_crc = cpu_to_je32(crc32(0, &isum, sizeof(isum) - 8));
+
+ vecs[0].iov_base = &isum;
+ vecs[0].iov_len = sizeof(isum);
+ vecs[1].iov_base = c->summary->sum_buf;
+ vecs[1].iov_len = datasize;
+
+ dbg_summary("JFFS2: writing out data to flash to pos : 0x%08x\n",
+ jeb->offset + c->sector_size - jeb->free_size);
+
+ spin_unlock(&c->erase_completion_lock);
+ ret = jffs2_flash_writev(c, vecs, 2, jeb->offset + c->sector_size -
+ jeb->free_size, &retlen, 0);
+ spin_lock(&c->erase_completion_lock);
+
+
+ if (ret || (retlen != infosize)) {
+ JFFS2_WARNING("Write of %zd bytes at 0x%08x failed. returned %d, retlen %zd\n",
+ infosize, jeb->offset + c->sector_size - jeb->free_size, ret, retlen);
+
+ c->summary->sum_size = JFFS2_SUMMARY_NOSUM_SIZE;
+ WASTED_SPACE(infosize);
+
+ return 1;
+ }
+
+ return 0;
+}
+
+/* Write out summary information - called from jffs2_do_reserve_space */
+
+int jffs2_sum_write_sumnode(struct jffs2_sb_info *c)
+{
+ struct jffs2_raw_node_ref *summary_ref;
+ int datasize, infosize, padsize, ret;
+ struct jffs2_eraseblock *jeb;
+
+ dbg_summary("called\n");
+
+ jeb = c->nextblock;
+
+ if (!c->summary->sum_num || !c->summary->sum_list_head) {
+ JFFS2_WARNING("Empty summary info!!!\n");
+ BUG();
+ }
+
+ datasize = c->summary->sum_size + sizeof(struct jffs2_sum_marker);
+ infosize = sizeof(struct jffs2_raw_summary) + datasize;
+ padsize = jeb->free_size - infosize;
+ infosize += padsize;
+ datasize += padsize;
+
+ /* Is there enough space for summary? */
+ if (padsize < 0) {
+ /* don't try to write out summary for this jeb */
+ jffs2_sum_disable_collecting(c->summary);
+
+ JFFS2_WARNING("Not enough space for summary, padsize = %d\n", padsize);
+ return 0;
+ }
+
+ ret = jffs2_sum_write_data(c, jeb, infosize, datasize, padsize);
+ if (ret)
+ return 0; /* can't write out summary, block is marked as NOSUM_SIZE */
+
+ /* for ACCT_PARANOIA_CHECK */
+ spin_unlock(&c->erase_completion_lock);
+ summary_ref = jffs2_alloc_raw_node_ref();
+ spin_lock(&c->erase_completion_lock);
+
+ if (!summary_ref) {
+ JFFS2_NOTICE("Failed to allocate node ref for summary\n");
+ return -ENOMEM;
+ }
+
+ summary_ref->next_in_ino = NULL;
+ summary_ref->next_phys = NULL;
+ summary_ref->flash_offset = (jeb->offset + c->sector_size - jeb->free_size) | REF_NORMAL;
+ summary_ref->__totlen = infosize;
+
+ if (!jeb->first_node)
+ jeb->first_node = summary_ref;
+ if (jeb->last_node)
+ jeb->last_node->next_phys = summary_ref;
+ jeb->last_node = summary_ref;
+
+ USED_SPACE(infosize);
+
+ return 0;
+}
diff --git a/fs/jffs2/summary.h b/fs/jffs2/summary.h
new file mode 100644
index 00000000000..b7a678be170
--- /dev/null
+++ b/fs/jffs2/summary.h
@@ -0,0 +1,183 @@
+/*
+ * JFFS2 -- Journalling Flash File System, Version 2.
+ *
+ * Copyright (C) 2004 Ferenc Havasi <havasi@inf.u-szeged.hu>,
+ * Zoltan Sogor <weth@inf.u-szeged.hu>,
+ * Patrik Kluba <pajko@halom.u-szeged.hu>,
+ * University of Szeged, Hungary
+ *
+ * For licensing information, see the file 'LICENCE' in this directory.
+ *
+ * $Id: summary.h,v 1.2 2005/09/26 11:37:21 havasi Exp $
+ *
+ */
+
+#ifndef JFFS2_SUMMARY_H
+#define JFFS2_SUMMARY_H
+
+#include <linux/uio.h>
+#include <linux/jffs2.h>
+
+#define DIRTY_SPACE(x) do { typeof(x) _x = (x); \
+ c->free_size -= _x; c->dirty_size += _x; \
+ jeb->free_size -= _x ; jeb->dirty_size += _x; \
+ }while(0)
+#define USED_SPACE(x) do { typeof(x) _x = (x); \
+ c->free_size -= _x; c->used_size += _x; \
+ jeb->free_size -= _x ; jeb->used_size += _x; \
+ }while(0)
+#define WASTED_SPACE(x) do { typeof(x) _x = (x); \
+ c->free_size -= _x; c->wasted_size += _x; \
+ jeb->free_size -= _x ; jeb->wasted_size += _x; \
+ }while(0)
+#define UNCHECKED_SPACE(x) do { typeof(x) _x = (x); \
+ c->free_size -= _x; c->unchecked_size += _x; \
+ jeb->free_size -= _x ; jeb->unchecked_size += _x; \
+ }while(0)
+
+#define BLK_STATE_ALLFF 0
+#define BLK_STATE_CLEAN 1
+#define BLK_STATE_PARTDIRTY 2
+#define BLK_STATE_CLEANMARKER 3
+#define BLK_STATE_ALLDIRTY 4
+#define BLK_STATE_BADBLOCK 5
+
+#define JFFS2_SUMMARY_NOSUM_SIZE 0xffffffff
+#define JFFS2_SUMMARY_INODE_SIZE (sizeof(struct jffs2_sum_inode_flash))
+#define JFFS2_SUMMARY_DIRENT_SIZE(x) (sizeof(struct jffs2_sum_dirent_flash) + (x))
+
+/* Summary structures used on flash */
+
+struct jffs2_sum_unknown_flash
+{
+ jint16_t nodetype; /* node type */
+};
+
+struct jffs2_sum_inode_flash
+{
+ jint16_t nodetype; /* node type */
+ jint32_t inode; /* inode number */
+ jint32_t version; /* inode version */
+ jint32_t offset; /* offset on jeb */
+ jint32_t totlen; /* record length */
+} __attribute__((packed));
+
+struct jffs2_sum_dirent_flash
+{
+ jint16_t nodetype; /* == JFFS_NODETYPE_DIRENT */
+ jint32_t totlen; /* record length */
+ jint32_t offset; /* offset on jeb */
+ jint32_t pino; /* parent inode */
+ jint32_t version; /* dirent version */
+ jint32_t ino; /* == zero for unlink */
+ uint8_t nsize; /* dirent name size */
+ uint8_t type; /* dirent type */
+ uint8_t name[0]; /* dirent name */
+} __attribute__((packed));
+
+union jffs2_sum_flash
+{
+ struct jffs2_sum_unknown_flash u;
+ struct jffs2_sum_inode_flash i;
+ struct jffs2_sum_dirent_flash d;
+};
+
+/* Summary structures used in the memory */
+
+struct jffs2_sum_unknown_mem
+{
+ union jffs2_sum_mem *next;
+ jint16_t nodetype; /* node type */
+};
+
+struct jffs2_sum_inode_mem
+{
+ union jffs2_sum_mem *next;
+ jint16_t nodetype; /* node type */
+ jint32_t inode; /* inode number */
+ jint32_t version; /* inode version */
+ jint32_t offset; /* offset on jeb */
+ jint32_t totlen; /* record length */
+} __attribute__((packed));
+
+struct jffs2_sum_dirent_mem
+{
+ union jffs2_sum_mem *next;
+ jint16_t nodetype; /* == JFFS_NODETYPE_DIRENT */
+ jint32_t totlen; /* record length */
+ jint32_t offset; /* ofset on jeb */
+ jint32_t pino; /* parent inode */
+ jint32_t version; /* dirent version */
+ jint32_t ino; /* == zero for unlink */
+ uint8_t nsize; /* dirent name size */
+ uint8_t type; /* dirent type */
+ uint8_t name[0]; /* dirent name */
+} __attribute__((packed));
+
+union jffs2_sum_mem
+{
+ struct jffs2_sum_unknown_mem u;
+ struct jffs2_sum_inode_mem i;
+ struct jffs2_sum_dirent_mem d;
+};
+
+/* Summary related information stored in superblock */
+
+struct jffs2_summary
+{
+ uint32_t sum_size; /* collected summary information for nextblock */
+ uint32_t sum_num;
+ uint32_t sum_padded;
+ union jffs2_sum_mem *sum_list_head;
+ union jffs2_sum_mem *sum_list_tail;
+
+ jint32_t *sum_buf; /* buffer for writing out summary */
+};
+
+/* Summary marker is stored at the end of every sumarized erase block */
+
+struct jffs2_sum_marker
+{
+ jint32_t offset; /* offset of the summary node in the jeb */
+ jint32_t magic; /* == JFFS2_SUM_MAGIC */
+};
+
+#define JFFS2_SUMMARY_FRAME_SIZE (sizeof(struct jffs2_raw_summary) + sizeof(struct jffs2_sum_marker))
+
+#ifdef CONFIG_JFFS2_SUMMARY /* SUMMARY SUPPORT ENABLED */
+
+#define jffs2_sum_active() (1)
+int jffs2_sum_init(struct jffs2_sb_info *c);
+void jffs2_sum_exit(struct jffs2_sb_info *c);
+void jffs2_sum_disable_collecting(struct jffs2_summary *s);
+int jffs2_sum_is_disabled(struct jffs2_summary *s);
+void jffs2_sum_reset_collected(struct jffs2_summary *s);
+void jffs2_sum_move_collected(struct jffs2_sb_info *c, struct jffs2_summary *s);
+int jffs2_sum_add_kvec(struct jffs2_sb_info *c, const struct kvec *invecs,
+ unsigned long count, uint32_t to);
+int jffs2_sum_write_sumnode(struct jffs2_sb_info *c);
+int jffs2_sum_add_padding_mem(struct jffs2_summary *s, uint32_t size);
+int jffs2_sum_add_inode_mem(struct jffs2_summary *s, struct jffs2_raw_inode *ri, uint32_t ofs);
+int jffs2_sum_add_dirent_mem(struct jffs2_summary *s, struct jffs2_raw_dirent *rd, uint32_t ofs);
+int jffs2_sum_scan_sumnode(struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb,
+ uint32_t ofs, uint32_t *pseudo_random);
+
+#else /* SUMMARY DISABLED */
+
+#define jffs2_sum_active() (0)
+#define jffs2_sum_init(a) (0)
+#define jffs2_sum_exit(a)
+#define jffs2_sum_disable_collecting(a)
+#define jffs2_sum_is_disabled(a) (0)
+#define jffs2_sum_reset_collected(a)
+#define jffs2_sum_add_kvec(a,b,c,d) (0)
+#define jffs2_sum_move_collected(a,b)
+#define jffs2_sum_write_sumnode(a) (0)
+#define jffs2_sum_add_padding_mem(a,b)
+#define jffs2_sum_add_inode_mem(a,b,c)
+#define jffs2_sum_add_dirent_mem(a,b,c)
+#define jffs2_sum_scan_sumnode(a,b,c,d) (0)
+
+#endif /* CONFIG_JFFS2_SUMMARY */
+
+#endif /* JFFS2_SUMMARY_H */
diff --git a/fs/jffs2/super.c b/fs/jffs2/super.c
index aaf9475cfb6..9e0b5458d9c 100644
--- a/fs/jffs2/super.c
+++ b/fs/jffs2/super.c
@@ -7,7 +7,7 @@
*
* For licensing information, see the file 'LICENCE' in this directory.
*
- * $Id: super.c,v 1.107 2005/07/12 16:37:08 dedekind Exp $
+ * $Id: super.c,v 1.110 2005/11/07 11:14:42 gleixner Exp $
*
*/
@@ -62,7 +62,7 @@ static int jffs2_sync_fs(struct super_block *sb, int wait)
down(&c->alloc_sem);
jffs2_flush_wbuf_pad(c);
- up(&c->alloc_sem);
+ up(&c->alloc_sem);
return 0;
}
@@ -112,7 +112,7 @@ static int jffs2_sb_set(struct super_block *sb, void *data)
}
static struct super_block *jffs2_get_sb_mtd(struct file_system_type *fs_type,
- int flags, const char *dev_name,
+ int flags, const char *dev_name,
void *data, struct mtd_info *mtd)
{
struct super_block *sb;
@@ -172,7 +172,7 @@ static struct super_block *jffs2_get_sb_mtd(struct file_system_type *fs_type,
}
static struct super_block *jffs2_get_sb_mtdnr(struct file_system_type *fs_type,
- int flags, const char *dev_name,
+ int flags, const char *dev_name,
void *data, int mtdnr)
{
struct mtd_info *mtd;
@@ -201,7 +201,7 @@ static struct super_block *jffs2_get_sb(struct file_system_type *fs_type,
/* The preferred way of mounting in future; especially when
CONFIG_BLK_DEV is implemented - we specify the underlying
- MTD device by number or by name, so that we don't require
+ MTD device by number or by name, so that we don't require
block device support to be present in the kernel. */
/* FIXME: How to do the root fs this way? */
@@ -225,7 +225,7 @@ static struct super_block *jffs2_get_sb(struct file_system_type *fs_type,
} else if (isdigit(dev_name[3])) {
/* Mount by MTD device number name */
char *endptr;
-
+
mtdnr = simple_strtoul(dev_name+3, &endptr, 0);
if (!*endptr) {
/* It was a valid number */
@@ -235,7 +235,7 @@ static struct super_block *jffs2_get_sb(struct file_system_type *fs_type,
}
}
- /* Try the old way - the hack where we allowed users to mount
+ /* Try the old way - the hack where we allowed users to mount
/dev/mtdblock$(n) but didn't actually _use_ the blkdev */
err = path_lookup(dev_name, LOOKUP_FOLLOW, &nd);
@@ -282,9 +282,12 @@ static void jffs2_put_super (struct super_block *sb)
down(&c->alloc_sem);
jffs2_flush_wbuf_pad(c);
up(&c->alloc_sem);
+
+ jffs2_sum_exit(c);
+
jffs2_free_ino_caches(c);
jffs2_free_raw_node_refs(c);
- if (c->mtd->flags & MTD_NO_VIRTBLOCKS)
+ if (jffs2_blocks_use_vmalloc(c))
vfree(c->blocks);
else
kfree(c->blocks);
@@ -321,6 +324,9 @@ static int __init init_jffs2_fs(void)
#ifdef CONFIG_JFFS2_FS_WRITEBUFFER
" (NAND)"
#endif
+#ifdef CONFIG_JFFS2_SUMMARY
+ " (SUMMARY) "
+#endif
" (C) 2001-2003 Red Hat, Inc.\n");
jffs2_inode_cachep = kmem_cache_create("jffs2_i",
@@ -370,5 +376,5 @@ module_exit(exit_jffs2_fs);
MODULE_DESCRIPTION("The Journalling Flash File System, v2");
MODULE_AUTHOR("Red Hat, Inc.");
-MODULE_LICENSE("GPL"); // Actually dual-licensed, but it doesn't matter for
+MODULE_LICENSE("GPL"); // Actually dual-licensed, but it doesn't matter for
// the sake of this tag. It's Free Software.
diff --git a/fs/jffs2/symlink.c b/fs/jffs2/symlink.c
index 82ef484f5e1..d55754fe892 100644
--- a/fs/jffs2/symlink.c
+++ b/fs/jffs2/symlink.c
@@ -7,7 +7,7 @@
*
* For licensing information, see the file 'LICENCE' in this directory.
*
- * $Id: symlink.c,v 1.16 2005/03/01 10:50:48 dedekind Exp $
+ * $Id: symlink.c,v 1.19 2005/11/07 11:14:42 gleixner Exp $
*
*/
@@ -21,7 +21,7 @@
static void *jffs2_follow_link(struct dentry *dentry, struct nameidata *nd);
struct inode_operations jffs2_symlink_inode_operations =
-{
+{
.readlink = generic_readlink,
.follow_link = jffs2_follow_link,
.setattr = jffs2_setattr
@@ -30,35 +30,33 @@ struct inode_operations jffs2_symlink_inode_operations =
static void *jffs2_follow_link(struct dentry *dentry, struct nameidata *nd)
{
struct jffs2_inode_info *f = JFFS2_INODE_INFO(dentry->d_inode);
- char *p = (char *)f->dents;
-
+ char *p = (char *)f->target;
+
/*
* We don't acquire the f->sem mutex here since the only data we
- * use is f->dents which in case of the symlink inode points to the
- * symlink's target path.
+ * use is f->target.
*
- * 1. If we are here the inode has already built and f->dents has
+ * 1. If we are here the inode has already built and f->target has
* to point to the target path.
- * 2. Nobody uses f->dents (if the inode is symlink's inode). The
- * exception is inode freeing function which frees f->dents. But
+ * 2. Nobody uses f->target (if the inode is symlink's inode). The
+ * exception is inode freeing function which frees f->target. But
* it can't be called while we are here and before VFS has
- * stopped using our f->dents string which we provide by means of
+ * stopped using our f->target string which we provide by means of
* nd_set_link() call.
*/
-
+
if (!p) {
printk(KERN_ERR "jffs2_follow_link(): can't find symlink taerget\n");
p = ERR_PTR(-EIO);
- } else {
- D1(printk(KERN_DEBUG "jffs2_follow_link(): target path is '%s'\n", (char *) f->dents));
}
+ D1(printk(KERN_DEBUG "jffs2_follow_link(): target path is '%s'\n", (char *) f->target));
nd_set_link(nd, p);
-
+
/*
- * We unlock the f->sem mutex but VFS will use the f->dents string. This is safe
- * since the only way that may cause f->dents to be changed is iput() operation.
- * But VFS will not use f->dents after iput() has been called.
+ * We will unlock the f->sem mutex but VFS will use the f->target string. This is safe
+ * since the only way that may cause f->target to be changed is iput() operation.
+ * But VFS will not use f->target after iput() has been called.
*/
return NULL;
}
diff --git a/fs/jffs2/wbuf.c b/fs/jffs2/wbuf.c
index 316133c626b..4cebf0e57c4 100644
--- a/fs/jffs2/wbuf.c
+++ b/fs/jffs2/wbuf.c
@@ -9,7 +9,7 @@
*
* For licensing information, see the file 'LICENCE' in this directory.
*
- * $Id: wbuf.c,v 1.92 2005/04/05 12:51:54 dedekind Exp $
+ * $Id: wbuf.c,v 1.100 2005/09/30 13:59:13 dedekind Exp $
*
*/
@@ -30,12 +30,12 @@
static unsigned char *brokenbuf;
#endif
+#define PAGE_DIV(x) ( ((unsigned long)(x) / (unsigned long)(c->wbuf_pagesize)) * (unsigned long)(c->wbuf_pagesize) )
+#define PAGE_MOD(x) ( (unsigned long)(x) % (unsigned long)(c->wbuf_pagesize) )
+
/* max. erase failures before we mark a block bad */
#define MAX_ERASE_FAILURES 2
-/* two seconds timeout for timed wbuf-flushing */
-#define WBUF_FLUSH_TIMEOUT 2 * HZ
-
struct jffs2_inodirty {
uint32_t ino;
struct jffs2_inodirty *next;
@@ -139,7 +139,6 @@ static void jffs2_block_refile(struct jffs2_sb_info *c, struct jffs2_eraseblock
{
D1(printk("About to refile bad block at %08x\n", jeb->offset));
- D2(jffs2_dump_block_lists(c));
/* File the existing block on the bad_used_list.... */
if (c->nextblock == jeb)
c->nextblock = NULL;
@@ -156,7 +155,6 @@ static void jffs2_block_refile(struct jffs2_sb_info *c, struct jffs2_eraseblock
c->nr_erasing_blocks++;
jffs2_erase_pending_trigger(c);
}
- D2(jffs2_dump_block_lists(c));
/* Adjust its size counts accordingly */
c->wasted_size += jeb->free_size;
@@ -164,8 +162,9 @@ static void jffs2_block_refile(struct jffs2_sb_info *c, struct jffs2_eraseblock
jeb->wasted_size += jeb->free_size;
jeb->free_size = 0;
- ACCT_SANITY_CHECK(c,jeb);
- D1(ACCT_PARANOIA_CHECK(jeb));
+ jffs2_dbg_dump_block_lists_nolock(c);
+ jffs2_dbg_acct_sanity_check_nolock(c,jeb);
+ jffs2_dbg_acct_paranoia_check_nolock(c, jeb);
}
/* Recover from failure to write wbuf. Recover the nodes up to the
@@ -189,7 +188,7 @@ static void jffs2_wbuf_recover(struct jffs2_sb_info *c)
/* Find the first node to be recovered, by skipping over every
node which ends before the wbuf starts, or which is obsolete. */
first_raw = &jeb->first_node;
- while (*first_raw &&
+ while (*first_raw &&
(ref_obsolete(*first_raw) ||
(ref_offset(*first_raw)+ref_totlen(c, jeb, *first_raw)) < c->wbuf_ofs)) {
D1(printk(KERN_DEBUG "Skipping node at 0x%08x(%d)-0x%08x which is either before 0x%08x or obsolete\n",
@@ -238,7 +237,7 @@ static void jffs2_wbuf_recover(struct jffs2_sb_info *c)
ret = c->mtd->read_ecc(c->mtd, start, c->wbuf_ofs - start, &retlen, buf, NULL, c->oobinfo);
else
ret = c->mtd->read(c->mtd, start, c->wbuf_ofs - start, &retlen, buf);
-
+
if (ret == -EBADMSG && retlen == c->wbuf_ofs - start) {
/* ECC recovered */
ret = 0;
@@ -266,7 +265,7 @@ static void jffs2_wbuf_recover(struct jffs2_sb_info *c)
/* ... and get an allocation of space from a shiny new block instead */
- ret = jffs2_reserve_space_gc(c, end-start, &ofs, &len);
+ ret = jffs2_reserve_space_gc(c, end-start, &ofs, &len, JFFS2_SUMMARY_NOSUM_SIZE);
if (ret) {
printk(KERN_WARNING "Failed to allocate space for wbuf recovery. Data loss ensues.\n");
kfree(buf);
@@ -275,15 +274,15 @@ static void jffs2_wbuf_recover(struct jffs2_sb_info *c)
if (end-start >= c->wbuf_pagesize) {
/* Need to do another write immediately, but it's possible
that this is just because the wbuf itself is completely
- full, and there's nothing earlier read back from the
- flash. Hence 'buf' isn't necessarily what we're writing
+ full, and there's nothing earlier read back from the
+ flash. Hence 'buf' isn't necessarily what we're writing
from. */
unsigned char *rewrite_buf = buf?:c->wbuf;
uint32_t towrite = (end-start) - ((end-start)%c->wbuf_pagesize);
D1(printk(KERN_DEBUG "Write 0x%x bytes at 0x%08x in wbuf recover\n",
towrite, ofs));
-
+
#ifdef BREAKMEHEADER
static int breakme;
if (breakme++ == 20) {
@@ -327,8 +326,7 @@ static void jffs2_wbuf_recover(struct jffs2_sb_info *c)
c->wbuf_ofs = ofs + towrite;
memmove(c->wbuf, rewrite_buf + towrite, c->wbuf_len);
/* Don't muck about with c->wbuf_inodes. False positives are harmless. */
- if (buf)
- kfree(buf);
+ kfree(buf);
} else {
/* OK, now we're left with the dregs in whichever buffer we're using */
if (buf) {
@@ -392,11 +390,11 @@ static void jffs2_wbuf_recover(struct jffs2_sb_info *c)
else
jeb->last_node = container_of(first_raw, struct jffs2_raw_node_ref, next_phys);
- ACCT_SANITY_CHECK(c,jeb);
- D1(ACCT_PARANOIA_CHECK(jeb));
+ jffs2_dbg_acct_sanity_check_nolock(c, jeb);
+ jffs2_dbg_acct_paranoia_check_nolock(c, jeb);
- ACCT_SANITY_CHECK(c,new_jeb);
- D1(ACCT_PARANOIA_CHECK(new_jeb));
+ jffs2_dbg_acct_sanity_check_nolock(c, new_jeb);
+ jffs2_dbg_acct_paranoia_check_nolock(c, new_jeb);
spin_unlock(&c->erase_completion_lock);
@@ -435,15 +433,15 @@ static int __jffs2_flush_wbuf(struct jffs2_sb_info *c, int pad)
this happens, if we have a change to a new block,
or if fsync forces us to flush the writebuffer.
if we have a switch to next page, we will not have
- enough remaining space for this.
+ enough remaining space for this.
*/
- if (pad && !jffs2_dataflash(c)) {
+ if (pad ) {
c->wbuf_len = PAD(c->wbuf_len);
/* Pad with JFFS2_DIRTY_BITMASK initially. this helps out ECC'd NOR
with 8 byte page size */
memset(c->wbuf + c->wbuf_len, 0, c->wbuf_pagesize - c->wbuf_len);
-
+
if ( c->wbuf_len + sizeof(struct jffs2_unknown_node) < c->wbuf_pagesize) {
struct jffs2_unknown_node *padnode = (void *)(c->wbuf + c->wbuf_len);
padnode->magic = cpu_to_je16(JFFS2_MAGIC_BITMASK);
@@ -454,7 +452,7 @@ static int __jffs2_flush_wbuf(struct jffs2_sb_info *c, int pad)
}
/* else jffs2_flash_writev has actually filled in the rest of the
buffer for us, and will deal with the node refs etc. later. */
-
+
#ifdef BREAKME
static int breakme;
if (breakme++ == 20) {
@@ -463,9 +461,9 @@ static int __jffs2_flush_wbuf(struct jffs2_sb_info *c, int pad)
c->mtd->write_ecc(c->mtd, c->wbuf_ofs, c->wbuf_pagesize,
&retlen, brokenbuf, NULL, c->oobinfo);
ret = -EIO;
- } else
+ } else
#endif
-
+
if (jffs2_cleanmarker_oob(c))
ret = c->mtd->write_ecc(c->mtd, c->wbuf_ofs, c->wbuf_pagesize, &retlen, c->wbuf, NULL, c->oobinfo);
else
@@ -488,7 +486,7 @@ static int __jffs2_flush_wbuf(struct jffs2_sb_info *c, int pad)
spin_lock(&c->erase_completion_lock);
/* Adjust free size of the block if we padded. */
- if (pad && !jffs2_dataflash(c)) {
+ if (pad) {
struct jffs2_eraseblock *jeb;
jeb = &c->blocks[c->wbuf_ofs / c->sector_size];
@@ -496,7 +494,7 @@ static int __jffs2_flush_wbuf(struct jffs2_sb_info *c, int pad)
D1(printk(KERN_DEBUG "jffs2_flush_wbuf() adjusting free_size of %sblock at %08x\n",
(jeb==c->nextblock)?"next":"", jeb->offset));
- /* wbuf_pagesize - wbuf_len is the amount of space that's to be
+ /* wbuf_pagesize - wbuf_len is the amount of space that's to be
padded. If there is less free space in the block than that,
something screwed up */
if (jeb->free_size < (c->wbuf_pagesize - c->wbuf_len)) {
@@ -524,9 +522,9 @@ static int __jffs2_flush_wbuf(struct jffs2_sb_info *c, int pad)
return 0;
}
-/* Trigger garbage collection to flush the write-buffer.
+/* Trigger garbage collection to flush the write-buffer.
If ino arg is zero, do it if _any_ real (i.e. not GC) writes are
- outstanding. If ino arg non-zero, do it only if a write for the
+ outstanding. If ino arg non-zero, do it only if a write for the
given inode is outstanding. */
int jffs2_flush_wbuf_gc(struct jffs2_sb_info *c, uint32_t ino)
{
@@ -605,15 +603,6 @@ int jffs2_flush_wbuf_pad(struct jffs2_sb_info *c)
return ret;
}
-
-#ifdef CONFIG_JFFS2_FS_WRITEBUFFER
-#define PAGE_DIV(x) ( ((unsigned long)(x) / (unsigned long)(c->wbuf_pagesize)) * (unsigned long)(c->wbuf_pagesize) )
-#define PAGE_MOD(x) ( (unsigned long)(x) % (unsigned long)(c->wbuf_pagesize) )
-#else
-#define PAGE_DIV(x) ( (x) & (~(c->wbuf_pagesize - 1)) )
-#define PAGE_MOD(x) ( (x) & (c->wbuf_pagesize - 1) )
-#endif
-
int jffs2_flash_writev(struct jffs2_sb_info *c, const struct kvec *invecs, unsigned long count, loff_t to, size_t *retlen, uint32_t ino)
{
struct kvec outvecs[3];
@@ -630,13 +619,13 @@ int jffs2_flash_writev(struct jffs2_sb_info *c, const struct kvec *invecs, unsig
/* If not NAND flash, don't bother */
if (!jffs2_is_writebuffered(c))
return jffs2_flash_direct_writev(c, invecs, count, to, retlen);
-
+
down_write(&c->wbuf_sem);
/* If wbuf_ofs is not initialized, set it to target address */
if (c->wbuf_ofs == 0xFFFFFFFF) {
c->wbuf_ofs = PAGE_DIV(to);
- c->wbuf_len = PAGE_MOD(to);
+ c->wbuf_len = PAGE_MOD(to);
memset(c->wbuf,0xff,c->wbuf_pagesize);
}
@@ -650,10 +639,10 @@ int jffs2_flash_writev(struct jffs2_sb_info *c, const struct kvec *invecs, unsig
memset(c->wbuf,0xff,c->wbuf_pagesize);
}
}
-
- /* Sanity checks on target address.
- It's permitted to write at PAD(c->wbuf_len+c->wbuf_ofs),
- and it's permitted to write at the beginning of a new
+
+ /* Sanity checks on target address.
+ It's permitted to write at PAD(c->wbuf_len+c->wbuf_ofs),
+ and it's permitted to write at the beginning of a new
erase block. Anything else, and you die.
New block starts at xxx000c (0-b = block header)
*/
@@ -671,8 +660,8 @@ int jffs2_flash_writev(struct jffs2_sb_info *c, const struct kvec *invecs, unsig
}
/* set pointer to new block */
c->wbuf_ofs = PAGE_DIV(to);
- c->wbuf_len = PAGE_MOD(to);
- }
+ c->wbuf_len = PAGE_MOD(to);
+ }
if (to != PAD(c->wbuf_ofs + c->wbuf_len)) {
/* We're not writing immediately after the writebuffer. Bad. */
@@ -692,21 +681,21 @@ int jffs2_flash_writev(struct jffs2_sb_info *c, const struct kvec *invecs, unsig
invec = 0;
outvec = 0;
- /* Fill writebuffer first, if already in use */
+ /* Fill writebuffer first, if already in use */
if (c->wbuf_len) {
uint32_t invec_ofs = 0;
- /* adjust alignment offset */
+ /* adjust alignment offset */
if (c->wbuf_len != PAGE_MOD(to)) {
c->wbuf_len = PAGE_MOD(to);
/* take care of alignment to next page */
if (!c->wbuf_len)
c->wbuf_len = c->wbuf_pagesize;
}
-
+
while(c->wbuf_len < c->wbuf_pagesize) {
uint32_t thislen;
-
+
if (invec == count)
goto alldone;
@@ -714,17 +703,17 @@ int jffs2_flash_writev(struct jffs2_sb_info *c, const struct kvec *invecs, unsig
if (thislen >= invecs[invec].iov_len)
thislen = invecs[invec].iov_len;
-
+
invec_ofs = thislen;
memcpy(c->wbuf + c->wbuf_len, invecs[invec].iov_base, thislen);
c->wbuf_len += thislen;
donelen += thislen;
/* Get next invec, if actual did not fill the buffer */
- if (c->wbuf_len < c->wbuf_pagesize)
+ if (c->wbuf_len < c->wbuf_pagesize)
invec++;
- }
-
+ }
+
/* write buffer is full, flush buffer */
ret = __jffs2_flush_wbuf(c, NOPAD);
if (ret) {
@@ -783,10 +772,10 @@ int jffs2_flash_writev(struct jffs2_sb_info *c, const struct kvec *invecs, unsig
/* We did cross a page boundary, so we write some now */
if (jffs2_cleanmarker_oob(c))
- ret = c->mtd->writev_ecc(c->mtd, outvecs, splitvec+1, outvec_to, &wbuf_retlen, NULL, c->oobinfo);
+ ret = c->mtd->writev_ecc(c->mtd, outvecs, splitvec+1, outvec_to, &wbuf_retlen, NULL, c->oobinfo);
else
ret = jffs2_flash_direct_writev(c, outvecs, splitvec+1, outvec_to, &wbuf_retlen);
-
+
if (ret < 0 || wbuf_retlen != PAGE_DIV(totlen)) {
/* At this point we have no problem,
c->wbuf is empty. However refile nextblock to avoid
@@ -803,7 +792,7 @@ int jffs2_flash_writev(struct jffs2_sb_info *c, const struct kvec *invecs, unsig
spin_unlock(&c->erase_completion_lock);
goto exit;
}
-
+
donelen += wbuf_retlen;
c->wbuf_ofs = PAGE_DIV(outvec_to) + PAGE_DIV(totlen);
@@ -837,11 +826,17 @@ int jffs2_flash_writev(struct jffs2_sb_info *c, const struct kvec *invecs, unsig
alldone:
*retlen = donelen;
+ if (jffs2_sum_active()) {
+ int res = jffs2_sum_add_kvec(c, invecs, count, (uint32_t) to);
+ if (res)
+ return res;
+ }
+
if (c->wbuf_len && ino)
jffs2_wbuf_dirties_inode(c, ino);
ret = 0;
-
+
exit:
up_write(&c->wbuf_sem);
return ret;
@@ -856,7 +851,7 @@ int jffs2_flash_write(struct jffs2_sb_info *c, loff_t ofs, size_t len, size_t *r
struct kvec vecs[1];
if (!jffs2_is_writebuffered(c))
- return c->mtd->write(c->mtd, ofs, len, retlen, buf);
+ return jffs2_flash_direct_write(c, ofs, len, retlen, buf);
vecs[0].iov_base = (unsigned char *) buf;
vecs[0].iov_len = len;
@@ -884,18 +879,18 @@ int jffs2_flash_read(struct jffs2_sb_info *c, loff_t ofs, size_t len, size_t *re
if ( (ret == -EBADMSG) && (*retlen == len) ) {
printk(KERN_WARNING "mtd->read(0x%zx bytes from 0x%llx) returned ECC error\n",
len, ofs);
- /*
- * We have the raw data without ECC correction in the buffer, maybe
+ /*
+ * We have the raw data without ECC correction in the buffer, maybe
* we are lucky and all data or parts are correct. We check the node.
* If data are corrupted node check will sort it out.
* We keep this block, it will fail on write or erase and the we
* mark it bad. Or should we do that now? But we should give him a chance.
- * Maybe we had a system crash or power loss before the ecc write or
+ * Maybe we had a system crash or power loss before the ecc write or
* a erase was completed.
* So we return success. :)
*/
ret = 0;
- }
+ }
/* if no writebuffer available or write buffer empty, return */
if (!c->wbuf_pagesize || !c->wbuf_len)
@@ -910,16 +905,16 @@ int jffs2_flash_read(struct jffs2_sb_info *c, loff_t ofs, size_t len, size_t *re
if (owbf > c->wbuf_len) /* is read beyond write buffer ? */
goto exit;
lwbf = c->wbuf_len - owbf; /* number of bytes to copy */
- if (lwbf > len)
+ if (lwbf > len)
lwbf = len;
- } else {
+ } else {
orbf = (c->wbuf_ofs - ofs); /* offset in read buffer */
if (orbf > len) /* is write beyond write buffer ? */
goto exit;
lwbf = len - orbf; /* number of bytes to copy */
- if (lwbf > c->wbuf_len)
+ if (lwbf > c->wbuf_len)
lwbf = c->wbuf_len;
- }
+ }
if (lwbf > 0)
memcpy(buf+orbf,c->wbuf+owbf,lwbf);
@@ -947,7 +942,7 @@ int jffs2_check_oob_empty( struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb
printk(KERN_NOTICE "jffs2_check_oob_empty(): allocation of temporary data buffer for oob check failed\n");
return -ENOMEM;
}
- /*
+ /*
* if mode = 0, we scan for a total empty oob area, else we have
* to take care of the cleanmarker in the first page of the block
*/
@@ -956,41 +951,41 @@ int jffs2_check_oob_empty( struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb
D1(printk(KERN_WARNING "jffs2_check_oob_empty(): Read OOB failed %d for block at %08x\n", ret, jeb->offset));
goto out;
}
-
+
if (retlen < len) {
D1(printk(KERN_WARNING "jffs2_check_oob_empty(): Read OOB return short read "
"(%zd bytes not %d) for block at %08x\n", retlen, len, jeb->offset));
ret = -EIO;
goto out;
}
-
+
/* Special check for first page */
for(i = 0; i < oob_size ; i++) {
/* Yeah, we know about the cleanmarker. */
- if (mode && i >= c->fsdata_pos &&
+ if (mode && i >= c->fsdata_pos &&
i < c->fsdata_pos + c->fsdata_len)
continue;
if (buf[i] != 0xFF) {
D2(printk(KERN_DEBUG "Found %02x at %x in OOB for %08x\n",
- buf[page+i], page+i, jeb->offset));
- ret = 1;
+ buf[i], i, jeb->offset));
+ ret = 1;
goto out;
}
}
- /* we know, we are aligned :) */
+ /* we know, we are aligned :) */
for (page = oob_size; page < len; page += sizeof(long)) {
unsigned long dat = *(unsigned long *)(&buf[page]);
if(dat != -1) {
- ret = 1;
+ ret = 1;
goto out;
}
}
out:
- kfree(buf);
-
+ kfree(buf);
+
return ret;
}
@@ -1072,7 +1067,7 @@ int jffs2_write_nand_cleanmarker(struct jffs2_sb_info *c, struct jffs2_erasebloc
n.totlen = cpu_to_je32(8);
ret = jffs2_flash_write_oob(c, jeb->offset + c->fsdata_pos, c->fsdata_len, &retlen, (unsigned char *)&n);
-
+
if (ret) {
D1(printk(KERN_WARNING "jffs2_write_nand_cleanmarker(): Write failed for block at %08x: error %d\n", jeb->offset, ret));
return ret;
@@ -1084,7 +1079,7 @@ int jffs2_write_nand_cleanmarker(struct jffs2_sb_info *c, struct jffs2_erasebloc
return 0;
}
-/*
+/*
* On NAND we try to mark this block bad. If the block was erased more
* than MAX_ERASE_FAILURES we mark it finaly bad.
* Don't care about failures. This block remains on the erase-pending
@@ -1105,7 +1100,7 @@ int jffs2_write_nand_badblock(struct jffs2_sb_info *c, struct jffs2_eraseblock *
D1(printk(KERN_WARNING "jffs2_write_nand_badblock(): Marking bad block at %08x\n", bad_offset));
ret = c->mtd->block_markbad(c->mtd, bad_offset);
-
+
if (ret) {
D1(printk(KERN_WARNING "jffs2_write_nand_badblock(): Write failed for block at %08x: error %d\n", jeb->offset, ret));
return ret;
@@ -1129,7 +1124,7 @@ static int jffs2_nand_set_oobinfo(struct jffs2_sb_info *c)
/* Do this only, if we have an oob buffer */
if (!c->mtd->oobsize)
return 0;
-
+
/* Cleanmarker is out-of-band, so inline size zero */
c->cleanmarker_size = 0;
@@ -1155,7 +1150,7 @@ static int jffs2_nand_set_oobinfo(struct jffs2_sb_info *c)
c->fsdata_len = NAND_JFFS2_OOB16_FSDALEN;
c->badblock_pos = 15;
break;
-
+
default:
D1(printk(KERN_DEBUG "JFFS2 on NAND. No autoplacment info found\n"));
return -EINVAL;
@@ -1172,7 +1167,7 @@ int jffs2_nand_flash_setup(struct jffs2_sb_info *c)
init_rwsem(&c->wbuf_sem);
c->wbuf_pagesize = c->mtd->oobblock;
c->wbuf_ofs = 0xFFFFFFFF;
-
+
c->wbuf = kmalloc(c->wbuf_pagesize, GFP_KERNEL);
if (!c->wbuf)
return -ENOMEM;
@@ -1198,17 +1193,41 @@ void jffs2_nand_flash_cleanup(struct jffs2_sb_info *c)
int jffs2_dataflash_setup(struct jffs2_sb_info *c) {
c->cleanmarker_size = 0; /* No cleanmarkers needed */
-
+
/* Initialize write buffer */
init_rwsem(&c->wbuf_sem);
- c->wbuf_pagesize = c->sector_size;
- c->wbuf_ofs = 0xFFFFFFFF;
+
+ c->wbuf_pagesize = c->mtd->erasesize;
+
+ /* Find a suitable c->sector_size
+ * - Not too much sectors
+ * - Sectors have to be at least 4 K + some bytes
+ * - All known dataflashes have erase sizes of 528 or 1056
+ * - we take at least 8 eraseblocks and want to have at least 8K size
+ * - The concatenation should be a power of 2
+ */
+
+ c->sector_size = 8 * c->mtd->erasesize;
+
+ while (c->sector_size < 8192) {
+ c->sector_size *= 2;
+ }
+
+ /* It may be necessary to adjust the flash size */
+ c->flash_size = c->mtd->size;
+
+ if ((c->flash_size % c->sector_size) != 0) {
+ c->flash_size = (c->flash_size / c->sector_size) * c->sector_size;
+ printk(KERN_WARNING "JFFS2 flash size adjusted to %dKiB\n", c->flash_size);
+ };
+
+ c->wbuf_ofs = 0xFFFFFFFF;
c->wbuf = kmalloc(c->wbuf_pagesize, GFP_KERNEL);
if (!c->wbuf)
return -ENOMEM;
- printk(KERN_INFO "JFFS2 write-buffering enabled (%i)\n", c->wbuf_pagesize);
+ printk(KERN_INFO "JFFS2 write-buffering enabled buffer (%d) erasesize (%d)\n", c->wbuf_pagesize, c->sector_size);
return 0;
}
@@ -1236,3 +1255,23 @@ int jffs2_nor_ecc_flash_setup(struct jffs2_sb_info *c) {
void jffs2_nor_ecc_flash_cleanup(struct jffs2_sb_info *c) {
kfree(c->wbuf);
}
+
+int jffs2_nor_wbuf_flash_setup(struct jffs2_sb_info *c) {
+ /* Cleanmarker currently occupies a whole programming region */
+ c->cleanmarker_size = MTD_PROGREGION_SIZE(c->mtd);
+
+ /* Initialize write buffer */
+ init_rwsem(&c->wbuf_sem);
+ c->wbuf_pagesize = MTD_PROGREGION_SIZE(c->mtd);
+ c->wbuf_ofs = 0xFFFFFFFF;
+
+ c->wbuf = kmalloc(c->wbuf_pagesize, GFP_KERNEL);
+ if (!c->wbuf)
+ return -ENOMEM;
+
+ return 0;
+}
+
+void jffs2_nor_wbuf_flash_cleanup(struct jffs2_sb_info *c) {
+ kfree(c->wbuf);
+}
diff --git a/fs/jffs2/write.c b/fs/jffs2/write.c
index 69100615d9a..1342f0158e9 100644
--- a/fs/jffs2/write.c
+++ b/fs/jffs2/write.c
@@ -7,7 +7,7 @@
*
* For licensing information, see the file 'LICENCE' in this directory.
*
- * $Id: write.c,v 1.92 2005/04/13 13:22:35 dwmw2 Exp $
+ * $Id: write.c,v 1.97 2005/11/07 11:14:42 gleixner Exp $
*
*/
@@ -54,35 +54,7 @@ int jffs2_do_new_inode(struct jffs2_sb_info *c, struct jffs2_inode_info *f, uint
return 0;
}
-#if CONFIG_JFFS2_FS_DEBUG > 0
-static void writecheck(struct jffs2_sb_info *c, uint32_t ofs)
-{
- unsigned char buf[16];
- size_t retlen;
- int ret, i;
-
- ret = jffs2_flash_read(c, ofs, 16, &retlen, buf);
- if (ret || (retlen != 16)) {
- D1(printk(KERN_DEBUG "read failed or short in writecheck(). ret %d, retlen %zd\n", ret, retlen));
- return;
- }
- ret = 0;
- for (i=0; i<16; i++) {
- if (buf[i] != 0xff)
- ret = 1;
- }
- if (ret) {
- printk(KERN_WARNING "ARGH. About to write node to 0x%08x on flash, but there are data already there:\n", ofs);
- printk(KERN_WARNING "0x%08x: %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x\n",
- ofs,
- buf[0], buf[1], buf[2], buf[3], buf[4], buf[5], buf[6], buf[7],
- buf[8], buf[9], buf[10], buf[11], buf[12], buf[13], buf[14], buf[15]);
- }
-}
-#endif
-
-
-/* jffs2_write_dnode - given a raw_inode, allocate a full_dnode for it,
+/* jffs2_write_dnode - given a raw_inode, allocate a full_dnode for it,
write it to the flash, link it into the existing inode/fragment list */
struct jffs2_full_dnode *jffs2_write_dnode(struct jffs2_sb_info *c, struct jffs2_inode_info *f, struct jffs2_raw_inode *ri, const unsigned char *data, uint32_t datalen, uint32_t flash_ofs, int alloc_mode)
@@ -106,7 +78,7 @@ struct jffs2_full_dnode *jffs2_write_dnode(struct jffs2_sb_info *c, struct jffs2
vecs[1].iov_base = (unsigned char *)data;
vecs[1].iov_len = datalen;
- D1(writecheck(c, flash_ofs));
+ jffs2_dbg_prewrite_paranoia_check(c, flash_ofs, vecs[0].iov_len + vecs[1].iov_len);
if (je32_to_cpu(ri->totlen) != sizeof(*ri) + datalen) {
printk(KERN_WARNING "jffs2_write_dnode: ri->totlen (0x%08x) != sizeof(*ri) (0x%08zx) + datalen (0x%08x)\n", je32_to_cpu(ri->totlen), sizeof(*ri), datalen);
@@ -114,7 +86,7 @@ struct jffs2_full_dnode *jffs2_write_dnode(struct jffs2_sb_info *c, struct jffs2
raw = jffs2_alloc_raw_node_ref();
if (!raw)
return ERR_PTR(-ENOMEM);
-
+
fn = jffs2_alloc_full_dnode();
if (!fn) {
jffs2_free_raw_node_ref(raw);
@@ -138,7 +110,7 @@ struct jffs2_full_dnode *jffs2_write_dnode(struct jffs2_sb_info *c, struct jffs2
if ((alloc_mode!=ALLOC_GC) && (je32_to_cpu(ri->version) < f->highest_version)) {
BUG_ON(!retried);
D1(printk(KERN_DEBUG "jffs2_write_dnode : dnode_version %d, "
- "highest version %d -> updating dnode\n",
+ "highest version %d -> updating dnode\n",
je32_to_cpu(ri->version), f->highest_version));
ri->version = cpu_to_je32(++f->highest_version);
ri->node_crc = cpu_to_je32(crc32(0, ri, sizeof(*ri)-8));
@@ -148,7 +120,7 @@ struct jffs2_full_dnode *jffs2_write_dnode(struct jffs2_sb_info *c, struct jffs2
(alloc_mode==ALLOC_GC)?0:f->inocache->ino);
if (ret || (retlen != sizeof(*ri) + datalen)) {
- printk(KERN_NOTICE "Write of %zd bytes at 0x%08x failed. returned %d, retlen %zd\n",
+ printk(KERN_NOTICE "Write of %zd bytes at 0x%08x failed. returned %d, retlen %zd\n",
sizeof(*ri)+datalen, flash_ofs, ret, retlen);
/* Mark the space as dirtied */
@@ -156,10 +128,10 @@ struct jffs2_full_dnode *jffs2_write_dnode(struct jffs2_sb_info *c, struct jffs2
/* Doesn't belong to any inode */
raw->next_in_ino = NULL;
- /* Don't change raw->size to match retlen. We may have
+ /* Don't change raw->size to match retlen. We may have
written the node header already, and only the data will
seem corrupted, in which case the scan would skip over
- any node we write before the original intended end of
+ any node we write before the original intended end of
this node */
raw->flash_offset |= REF_OBSOLETE;
jffs2_add_physical_node_ref(c, raw);
@@ -176,26 +148,28 @@ struct jffs2_full_dnode *jffs2_write_dnode(struct jffs2_sb_info *c, struct jffs2
retried = 1;
D1(printk(KERN_DEBUG "Retrying failed write.\n"));
-
- ACCT_SANITY_CHECK(c,jeb);
- D1(ACCT_PARANOIA_CHECK(jeb));
+
+ jffs2_dbg_acct_sanity_check(c,jeb);
+ jffs2_dbg_acct_paranoia_check(c, jeb);
if (alloc_mode == ALLOC_GC) {
- ret = jffs2_reserve_space_gc(c, sizeof(*ri) + datalen, &flash_ofs, &dummy);
+ ret = jffs2_reserve_space_gc(c, sizeof(*ri) + datalen, &flash_ofs,
+ &dummy, JFFS2_SUMMARY_INODE_SIZE);
} else {
/* Locking pain */
up(&f->sem);
jffs2_complete_reservation(c);
-
- ret = jffs2_reserve_space(c, sizeof(*ri) + datalen, &flash_ofs, &dummy, alloc_mode);
+
+ ret = jffs2_reserve_space(c, sizeof(*ri) + datalen, &flash_ofs,
+ &dummy, alloc_mode, JFFS2_SUMMARY_INODE_SIZE);
down(&f->sem);
}
if (!ret) {
D1(printk(KERN_DEBUG "Allocated space at 0x%08x to retry failed write.\n", flash_ofs));
- ACCT_SANITY_CHECK(c,jeb);
- D1(ACCT_PARANOIA_CHECK(jeb));
+ jffs2_dbg_acct_sanity_check(c,jeb);
+ jffs2_dbg_acct_paranoia_check(c, jeb);
goto retry;
}
@@ -207,9 +181,9 @@ struct jffs2_full_dnode *jffs2_write_dnode(struct jffs2_sb_info *c, struct jffs2
return ERR_PTR(ret?ret:-EIO);
}
/* Mark the space used */
- /* If node covers at least a whole page, or if it starts at the
- beginning of a page and runs to the end of the file, or if
- it's a hole node, mark it REF_PRISTINE, else REF_NORMAL.
+ /* If node covers at least a whole page, or if it starts at the
+ beginning of a page and runs to the end of the file, or if
+ it's a hole node, mark it REF_PRISTINE, else REF_NORMAL.
*/
if ((je32_to_cpu(ri->dsize) >= PAGE_CACHE_SIZE) ||
( ((je32_to_cpu(ri->offset)&(PAGE_CACHE_SIZE-1))==0) &&
@@ -227,12 +201,12 @@ struct jffs2_full_dnode *jffs2_write_dnode(struct jffs2_sb_info *c, struct jffs2
spin_unlock(&c->erase_completion_lock);
D1(printk(KERN_DEBUG "jffs2_write_dnode wrote node at 0x%08x(%d) with dsize 0x%x, csize 0x%x, node_crc 0x%08x, data_crc 0x%08x, totlen 0x%08x\n",
- flash_ofs, ref_flags(raw), je32_to_cpu(ri->dsize),
+ flash_ofs, ref_flags(raw), je32_to_cpu(ri->dsize),
je32_to_cpu(ri->csize), je32_to_cpu(ri->node_crc),
je32_to_cpu(ri->data_crc), je32_to_cpu(ri->totlen)));
if (retried) {
- ACCT_SANITY_CHECK(c,NULL);
+ jffs2_dbg_acct_sanity_check(c,NULL);
}
return fn;
@@ -247,10 +221,9 @@ struct jffs2_full_dirent *jffs2_write_dirent(struct jffs2_sb_info *c, struct jff
int retried = 0;
int ret;
- D1(printk(KERN_DEBUG "jffs2_write_dirent(ino #%u, name at *0x%p \"%s\"->ino #%u, name_crc 0x%08x)\n",
+ D1(printk(KERN_DEBUG "jffs2_write_dirent(ino #%u, name at *0x%p \"%s\"->ino #%u, name_crc 0x%08x)\n",
je32_to_cpu(rd->pino), name, name, je32_to_cpu(rd->ino),
je32_to_cpu(rd->name_crc)));
- D1(writecheck(c, flash_ofs));
D1(if(je32_to_cpu(rd->hdr_crc) != crc32(0, rd, sizeof(struct jffs2_unknown_node)-4)) {
printk(KERN_CRIT "Eep. CRC not correct in jffs2_write_dirent()\n");
@@ -262,7 +235,9 @@ struct jffs2_full_dirent *jffs2_write_dirent(struct jffs2_sb_info *c, struct jff
vecs[0].iov_len = sizeof(*rd);
vecs[1].iov_base = (unsigned char *)name;
vecs[1].iov_len = namelen;
-
+
+ jffs2_dbg_prewrite_paranoia_check(c, flash_ofs, vecs[0].iov_len + vecs[1].iov_len);
+
raw = jffs2_alloc_raw_node_ref();
if (!raw)
@@ -301,7 +276,7 @@ struct jffs2_full_dirent *jffs2_write_dirent(struct jffs2_sb_info *c, struct jff
ret = jffs2_flash_writev(c, vecs, 2, flash_ofs, &retlen,
(alloc_mode==ALLOC_GC)?0:je32_to_cpu(rd->pino));
if (ret || (retlen != sizeof(*rd) + namelen)) {
- printk(KERN_NOTICE "Write of %zd bytes at 0x%08x failed. returned %d, retlen %zd\n",
+ printk(KERN_NOTICE "Write of %zd bytes at 0x%08x failed. returned %d, retlen %zd\n",
sizeof(*rd)+namelen, flash_ofs, ret, retlen);
/* Mark the space as dirtied */
if (retlen) {
@@ -322,24 +297,26 @@ struct jffs2_full_dirent *jffs2_write_dirent(struct jffs2_sb_info *c, struct jff
D1(printk(KERN_DEBUG "Retrying failed write.\n"));
- ACCT_SANITY_CHECK(c,jeb);
- D1(ACCT_PARANOIA_CHECK(jeb));
+ jffs2_dbg_acct_sanity_check(c,jeb);
+ jffs2_dbg_acct_paranoia_check(c, jeb);
if (alloc_mode == ALLOC_GC) {
- ret = jffs2_reserve_space_gc(c, sizeof(*rd) + namelen, &flash_ofs, &dummy);
+ ret = jffs2_reserve_space_gc(c, sizeof(*rd) + namelen, &flash_ofs,
+ &dummy, JFFS2_SUMMARY_DIRENT_SIZE(namelen));
} else {
/* Locking pain */
up(&f->sem);
jffs2_complete_reservation(c);
-
- ret = jffs2_reserve_space(c, sizeof(*rd) + namelen, &flash_ofs, &dummy, alloc_mode);
+
+ ret = jffs2_reserve_space(c, sizeof(*rd) + namelen, &flash_ofs,
+ &dummy, alloc_mode, JFFS2_SUMMARY_DIRENT_SIZE(namelen));
down(&f->sem);
}
if (!ret) {
D1(printk(KERN_DEBUG "Allocated space at 0x%08x to retry failed write.\n", flash_ofs));
- ACCT_SANITY_CHECK(c,jeb);
- D1(ACCT_PARANOIA_CHECK(jeb));
+ jffs2_dbg_acct_sanity_check(c,jeb);
+ jffs2_dbg_acct_paranoia_check(c, jeb);
goto retry;
}
D1(printk(KERN_DEBUG "Failed to allocate space to retry failed write: %d!\n", ret));
@@ -359,7 +336,7 @@ struct jffs2_full_dirent *jffs2_write_dirent(struct jffs2_sb_info *c, struct jff
spin_unlock(&c->erase_completion_lock);
if (retried) {
- ACCT_SANITY_CHECK(c,NULL);
+ jffs2_dbg_acct_sanity_check(c,NULL);
}
return fd;
@@ -369,7 +346,7 @@ struct jffs2_full_dirent *jffs2_write_dirent(struct jffs2_sb_info *c, struct jff
we don't have to go digging in struct inode or its equivalent. It should set:
mode, uid, gid, (starting)isize, atime, ctime, mtime */
int jffs2_write_inode_range(struct jffs2_sb_info *c, struct jffs2_inode_info *f,
- struct jffs2_raw_inode *ri, unsigned char *buf,
+ struct jffs2_raw_inode *ri, unsigned char *buf,
uint32_t offset, uint32_t writelen, uint32_t *retlen)
{
int ret = 0;
@@ -377,7 +354,7 @@ int jffs2_write_inode_range(struct jffs2_sb_info *c, struct jffs2_inode_info *f,
D1(printk(KERN_DEBUG "jffs2_write_inode_range(): Ino #%u, ofs 0x%x, len 0x%x\n",
f->inocache->ino, offset, writelen));
-
+
while(writelen) {
struct jffs2_full_dnode *fn;
unsigned char *comprbuf = NULL;
@@ -389,7 +366,8 @@ int jffs2_write_inode_range(struct jffs2_sb_info *c, struct jffs2_inode_info *f,
retry:
D2(printk(KERN_DEBUG "jffs2_commit_write() loop: 0x%x to write to 0x%x\n", writelen, offset));
- ret = jffs2_reserve_space(c, sizeof(*ri) + JFFS2_MIN_DATA_LEN, &phys_ofs, &alloclen, ALLOC_NORMAL);
+ ret = jffs2_reserve_space(c, sizeof(*ri) + JFFS2_MIN_DATA_LEN, &phys_ofs,
+ &alloclen, ALLOC_NORMAL, JFFS2_SUMMARY_INODE_SIZE);
if (ret) {
D1(printk(KERN_DEBUG "jffs2_reserve_space returned %d\n", ret));
break;
@@ -473,10 +451,11 @@ int jffs2_do_create(struct jffs2_sb_info *c, struct jffs2_inode_info *dir_f, str
uint32_t alloclen, phys_ofs;
int ret;
- /* Try to reserve enough space for both node and dirent.
- * Just the node will do for now, though
+ /* Try to reserve enough space for both node and dirent.
+ * Just the node will do for now, though
*/
- ret = jffs2_reserve_space(c, sizeof(*ri), &phys_ofs, &alloclen, ALLOC_NORMAL);
+ ret = jffs2_reserve_space(c, sizeof(*ri), &phys_ofs, &alloclen, ALLOC_NORMAL,
+ JFFS2_SUMMARY_INODE_SIZE);
D1(printk(KERN_DEBUG "jffs2_do_create(): reserved 0x%x bytes\n", alloclen));
if (ret) {
up(&f->sem);
@@ -498,15 +477,16 @@ int jffs2_do_create(struct jffs2_sb_info *c, struct jffs2_inode_info *dir_f, str
jffs2_complete_reservation(c);
return PTR_ERR(fn);
}
- /* No data here. Only a metadata node, which will be
+ /* No data here. Only a metadata node, which will be
obsoleted by the first data write
*/
f->metadata = fn;
up(&f->sem);
jffs2_complete_reservation(c);
- ret = jffs2_reserve_space(c, sizeof(*rd)+namelen, &phys_ofs, &alloclen, ALLOC_NORMAL);
-
+ ret = jffs2_reserve_space(c, sizeof(*rd)+namelen, &phys_ofs, &alloclen,
+ ALLOC_NORMAL, JFFS2_SUMMARY_DIRENT_SIZE(namelen));
+
if (ret) {
/* Eep. */
D1(printk(KERN_DEBUG "jffs2_reserve_space() for dirent failed\n"));
@@ -539,9 +519,9 @@ int jffs2_do_create(struct jffs2_sb_info *c, struct jffs2_inode_info *dir_f, str
fd = jffs2_write_dirent(c, dir_f, rd, name, namelen, phys_ofs, ALLOC_NORMAL);
jffs2_free_raw_dirent(rd);
-
+
if (IS_ERR(fd)) {
- /* dirent failed to write. Delete the inode normally
+ /* dirent failed to write. Delete the inode normally
as if it were the final unlink() */
jffs2_complete_reservation(c);
up(&dir_f->sem);
@@ -560,14 +540,15 @@ int jffs2_do_create(struct jffs2_sb_info *c, struct jffs2_inode_info *dir_f, str
int jffs2_do_unlink(struct jffs2_sb_info *c, struct jffs2_inode_info *dir_f,
- const char *name, int namelen, struct jffs2_inode_info *dead_f)
+ const char *name, int namelen, struct jffs2_inode_info *dead_f,
+ uint32_t time)
{
struct jffs2_raw_dirent *rd;
struct jffs2_full_dirent *fd;
uint32_t alloclen, phys_ofs;
int ret;
- if (1 /* alternative branch needs testing */ ||
+ if (1 /* alternative branch needs testing */ ||
!jffs2_can_mark_obsolete(c)) {
/* We can't mark stuff obsolete on the medium. We need to write a deletion dirent */
@@ -575,7 +556,8 @@ int jffs2_do_unlink(struct jffs2_sb_info *c, struct jffs2_inode_info *dir_f,
if (!rd)
return -ENOMEM;
- ret = jffs2_reserve_space(c, sizeof(*rd)+namelen, &phys_ofs, &alloclen, ALLOC_DELETION);
+ ret = jffs2_reserve_space(c, sizeof(*rd)+namelen, &phys_ofs, &alloclen,
+ ALLOC_DELETION, JFFS2_SUMMARY_DIRENT_SIZE(namelen));
if (ret) {
jffs2_free_raw_dirent(rd);
return ret;
@@ -588,18 +570,18 @@ int jffs2_do_unlink(struct jffs2_sb_info *c, struct jffs2_inode_info *dir_f,
rd->nodetype = cpu_to_je16(JFFS2_NODETYPE_DIRENT);
rd->totlen = cpu_to_je32(sizeof(*rd) + namelen);
rd->hdr_crc = cpu_to_je32(crc32(0, rd, sizeof(struct jffs2_unknown_node)-4));
-
+
rd->pino = cpu_to_je32(dir_f->inocache->ino);
rd->version = cpu_to_je32(++dir_f->highest_version);
rd->ino = cpu_to_je32(0);
- rd->mctime = cpu_to_je32(get_seconds());
+ rd->mctime = cpu_to_je32(time);
rd->nsize = namelen;
rd->type = DT_UNKNOWN;
rd->node_crc = cpu_to_je32(crc32(0, rd, sizeof(*rd)-8));
rd->name_crc = cpu_to_je32(crc32(0, name, namelen));
fd = jffs2_write_dirent(c, dir_f, rd, name, namelen, phys_ofs, ALLOC_DELETION);
-
+
jffs2_free_raw_dirent(rd);
if (IS_ERR(fd)) {
@@ -618,7 +600,7 @@ int jffs2_do_unlink(struct jffs2_sb_info *c, struct jffs2_inode_info *dir_f,
down(&dir_f->sem);
while ((*prev) && (*prev)->nhash <= nhash) {
- if ((*prev)->nhash == nhash &&
+ if ((*prev)->nhash == nhash &&
!memcmp((*prev)->name, name, namelen) &&
!(*prev)->name[namelen]) {
struct jffs2_full_dirent *this = *prev;
@@ -639,7 +621,7 @@ int jffs2_do_unlink(struct jffs2_sb_info *c, struct jffs2_inode_info *dir_f,
/* dead_f is NULL if this was a rename not a real unlink */
/* Also catch the !f->inocache case, where there was a dirent
pointing to an inode which didn't exist. */
- if (dead_f && dead_f->inocache) {
+ if (dead_f && dead_f->inocache) {
down(&dead_f->sem);
@@ -647,9 +629,9 @@ int jffs2_do_unlink(struct jffs2_sb_info *c, struct jffs2_inode_info *dir_f,
while (dead_f->dents) {
/* There can be only deleted ones */
fd = dead_f->dents;
-
+
dead_f->dents = fd->next;
-
+
if (fd->ino) {
printk(KERN_WARNING "Deleting inode #%u with active dentry \"%s\"->ino #%u\n",
dead_f->inocache->ino, fd->name, fd->ino);
@@ -673,7 +655,7 @@ int jffs2_do_unlink(struct jffs2_sb_info *c, struct jffs2_inode_info *dir_f,
}
-int jffs2_do_link (struct jffs2_sb_info *c, struct jffs2_inode_info *dir_f, uint32_t ino, uint8_t type, const char *name, int namelen)
+int jffs2_do_link (struct jffs2_sb_info *c, struct jffs2_inode_info *dir_f, uint32_t ino, uint8_t type, const char *name, int namelen, uint32_t time)
{
struct jffs2_raw_dirent *rd;
struct jffs2_full_dirent *fd;
@@ -684,12 +666,13 @@ int jffs2_do_link (struct jffs2_sb_info *c, struct jffs2_inode_info *dir_f, uint
if (!rd)
return -ENOMEM;
- ret = jffs2_reserve_space(c, sizeof(*rd)+namelen, &phys_ofs, &alloclen, ALLOC_NORMAL);
+ ret = jffs2_reserve_space(c, sizeof(*rd)+namelen, &phys_ofs, &alloclen,
+ ALLOC_NORMAL, JFFS2_SUMMARY_DIRENT_SIZE(namelen));
if (ret) {
jffs2_free_raw_dirent(rd);
return ret;
}
-
+
down(&dir_f->sem);
/* Build a deletion node */
@@ -701,7 +684,7 @@ int jffs2_do_link (struct jffs2_sb_info *c, struct jffs2_inode_info *dir_f, uint
rd->pino = cpu_to_je32(dir_f->inocache->ino);
rd->version = cpu_to_je32(++dir_f->highest_version);
rd->ino = cpu_to_je32(ino);
- rd->mctime = cpu_to_je32(get_seconds());
+ rd->mctime = cpu_to_je32(time);
rd->nsize = namelen;
rd->type = type;
@@ -710,7 +693,7 @@ int jffs2_do_link (struct jffs2_sb_info *c, struct jffs2_inode_info *dir_f, uint
rd->name_crc = cpu_to_je32(crc32(0, name, namelen));
fd = jffs2_write_dirent(c, dir_f, rd, name, namelen, phys_ofs, ALLOC_NORMAL);
-
+
jffs2_free_raw_dirent(rd);
if (IS_ERR(fd)) {
diff --git a/fs/jffs2/writev.c b/fs/jffs2/writev.c
index f079f838856..c638ae1008d 100644
--- a/fs/jffs2/writev.c
+++ b/fs/jffs2/writev.c
@@ -7,7 +7,7 @@
*
* For licensing information, see the file 'LICENCE' in this directory.
*
- * $Id: writev.c,v 1.6 2004/11/16 20:36:12 dwmw2 Exp $
+ * $Id: writev.c,v 1.8 2005/09/09 15:11:58 havasi Exp $
*
*/
@@ -42,9 +42,40 @@ static inline int mtd_fake_writev(struct mtd_info *mtd, const struct kvec *vecs,
int jffs2_flash_direct_writev(struct jffs2_sb_info *c, const struct kvec *vecs,
unsigned long count, loff_t to, size_t *retlen)
{
+ if (!jffs2_is_writebuffered(c)) {
+ if (jffs2_sum_active()) {
+ int res;
+ res = jffs2_sum_add_kvec(c, vecs, count, (uint32_t) to);
+ if (res) {
+ return res;
+ }
+ }
+ }
+
if (c->mtd->writev)
return c->mtd->writev(c->mtd, vecs, count, to, retlen);
- else
+ else {
return mtd_fake_writev(c->mtd, vecs, count, to, retlen);
+ }
}
+int jffs2_flash_direct_write(struct jffs2_sb_info *c, loff_t ofs, size_t len,
+ size_t *retlen, const u_char *buf)
+{
+ int ret;
+ ret = c->mtd->write(c->mtd, ofs, len, retlen, buf);
+
+ if (jffs2_sum_active()) {
+ struct kvec vecs[1];
+ int res;
+
+ vecs[0].iov_base = (unsigned char *) buf;
+ vecs[0].iov_len = len;
+
+ res = jffs2_sum_add_kvec(c, vecs, 1, (uint32_t) ofs);
+ if (res) {
+ return res;
+ }
+ }
+ return ret;
+}
diff --git a/fs/jfs/namei.c b/fs/jfs/namei.c
index 1abe7343f92..4abbe860430 100644
--- a/fs/jfs/namei.c
+++ b/fs/jfs/namei.c
@@ -827,6 +827,7 @@ static int jfs_link(struct dentry *old_dentry,
/* update object inode */
ip->i_nlink++; /* for new link */
ip->i_ctime = CURRENT_TIME;
+ dir->i_ctime = dir->i_mtime = CURRENT_TIME;
mark_inode_dirty(dir);
atomic_inc(&ip->i_count);
@@ -1024,6 +1025,8 @@ static int jfs_symlink(struct inode *dip, struct dentry *dentry,
insert_inode_hash(ip);
mark_inode_dirty(ip);
+ dip->i_ctime = dip->i_mtime = CURRENT_TIME;
+ mark_inode_dirty(dip);
/*
* commit update of parent directory and link object
*/
diff --git a/fs/lockd/clntproc.c b/fs/lockd/clntproc.c
index 87332f30141..c5a33648e9f 100644
--- a/fs/lockd/clntproc.c
+++ b/fs/lockd/clntproc.c
@@ -112,8 +112,7 @@ static struct nlm_lockowner *nlm_find_lockowner(struct nlm_host *host, fl_owner_
}
}
spin_unlock(&host->h_lock);
- if (new != NULL)
- kfree(new);
+ kfree(new);
return res;
}
diff --git a/fs/locks.c b/fs/locks.c
index a1e8b224801..250ef53d25e 100644
--- a/fs/locks.c
+++ b/fs/locks.c
@@ -1105,7 +1105,6 @@ static void time_out_leases(struct inode *inode)
before = &fl->fl_next;
continue;
}
- printk(KERN_INFO "lease broken - owner pid = %d\n", fl->fl_pid);
lease_modify(before, fl->fl_type & ~F_INPROGRESS);
if (fl == *before) /* lease_modify may have freed fl */
before = &fl->fl_next;
@@ -1430,7 +1429,7 @@ int fcntl_setlease(unsigned int fd, struct file *filp, long arg)
lock_kernel();
error = __setlease(filp, arg, &flp);
- if (error)
+ if (error || arg == F_UNLCK)
goto out_unlock;
error = fasync_helper(fd, filp, 1, &flp->fl_fasync);
diff --git a/fs/mbcache.c b/fs/mbcache.c
index 298997f1747..0f1e4530670 100644
--- a/fs/mbcache.c
+++ b/fs/mbcache.c
@@ -301,8 +301,7 @@ fail:
if (cache) {
while (--m >= 0)
kfree(cache->c_indexes_hash[m]);
- if (cache->c_block_hash)
- kfree(cache->c_block_hash);
+ kfree(cache->c_block_hash);
kfree(cache);
}
return NULL;
diff --git a/fs/namei.c b/fs/namei.c
index c5769c4fcab..6dbbd42d8b9 100644
--- a/fs/namei.c
+++ b/fs/namei.c
@@ -256,6 +256,38 @@ int permission(struct inode *inode, int mask, struct nameidata *nd)
return security_inode_permission(inode, mask, nd);
}
+/**
+ * vfs_permission - check for access rights to a given path
+ * @nd: lookup result that describes the path
+ * @mask: right to check for (%MAY_READ, %MAY_WRITE, %MAY_EXEC)
+ *
+ * Used to check for read/write/execute permissions on a path.
+ * We use "fsuid" for this, letting us set arbitrary permissions
+ * for filesystem access without changing the "normal" uids which
+ * are used for other things.
+ */
+int vfs_permission(struct nameidata *nd, int mask)
+{
+ return permission(nd->dentry->d_inode, mask, nd);
+}
+
+/**
+ * file_permission - check for additional access rights to a given file
+ * @file: file to check access rights for
+ * @mask: right to check for (%MAY_READ, %MAY_WRITE, %MAY_EXEC)
+ *
+ * Used to check for read/write/execute permissions on an already opened
+ * file.
+ *
+ * Note:
+ * Do not use this function in new code. All access checks should
+ * be done using vfs_permission().
+ */
+int file_permission(struct file *file, int mask)
+{
+ return permission(file->f_dentry->d_inode, mask, NULL);
+}
+
/*
* get_write_access() gets write permission for a file.
* put_write_access() releases this write permission.
@@ -765,9 +797,8 @@ static fastcall int __link_path_walk(const char * name, struct nameidata *nd)
nd->flags |= LOOKUP_CONTINUE;
err = exec_permission_lite(inode, nd);
- if (err == -EAGAIN) {
- err = permission(inode, MAY_EXEC, nd);
- }
+ if (err == -EAGAIN)
+ err = vfs_permission(nd, MAY_EXEC);
if (err)
break;
@@ -1109,8 +1140,9 @@ int path_lookup_open(const char *name, unsigned int lookup_flags,
* @open_flags: open intent flags
* @create_mode: create intent flags
*/
-int path_lookup_create(const char *name, unsigned int lookup_flags,
- struct nameidata *nd, int open_flags, int create_mode)
+static int path_lookup_create(const char *name, unsigned int lookup_flags,
+ struct nameidata *nd, int open_flags,
+ int create_mode)
{
return __path_lookup_intent_open(name, lookup_flags|LOOKUP_CREATE, nd,
open_flags, create_mode);
@@ -1173,9 +1205,9 @@ out:
return dentry;
}
-struct dentry * lookup_hash(struct qstr *name, struct dentry * base)
+struct dentry * lookup_hash(struct nameidata *nd)
{
- return __lookup_hash(name, base, NULL);
+ return __lookup_hash(&nd->last, nd->dentry, nd);
}
/* SMP-safe */
@@ -1199,7 +1231,7 @@ struct dentry * lookup_one_len(const char * name, struct dentry * base, int len)
}
this.hash = end_name_hash(hash);
- return lookup_hash(&this, base);
+ return __lookup_hash(&this, base, NULL);
access:
return ERR_PTR(-EACCES);
}
@@ -1407,7 +1439,7 @@ int may_open(struct nameidata *nd, int acc_mode, int flag)
if (S_ISDIR(inode->i_mode) && (flag & FMODE_WRITE))
return -EISDIR;
- error = permission(inode, acc_mode, nd);
+ error = vfs_permission(nd, acc_mode);
if (error)
return error;
@@ -1459,7 +1491,7 @@ int may_open(struct nameidata *nd, int acc_mode, int flag)
if (!error) {
DQUOT_INIT(inode);
- error = do_truncate(dentry, 0);
+ error = do_truncate(dentry, 0, NULL);
}
put_write_access(inode);
if (error)
@@ -1532,7 +1564,7 @@ int open_namei(const char * pathname, int flag, int mode, struct nameidata *nd)
dir = nd->dentry;
nd->flags &= ~LOOKUP_PARENT;
down(&dir->d_inode->i_sem);
- path.dentry = __lookup_hash(&nd->last, nd->dentry, nd);
+ path.dentry = lookup_hash(nd);
path.mnt = nd->mnt;
do_last:
@@ -1634,7 +1666,7 @@ do_link:
}
dir = nd->dentry;
down(&dir->d_inode->i_sem);
- path.dentry = __lookup_hash(&nd->last, nd->dentry, nd);
+ path.dentry = lookup_hash(nd);
path.mnt = nd->mnt;
__putname(nd->last.name);
goto do_last;
@@ -1666,7 +1698,7 @@ struct dentry *lookup_create(struct nameidata *nd, int is_dir)
/*
* Do the final lookup.
*/
- dentry = lookup_hash(&nd->last, nd->dentry);
+ dentry = lookup_hash(nd);
if (IS_ERR(dentry))
goto fail;
@@ -1901,7 +1933,7 @@ asmlinkage long sys_rmdir(const char __user * pathname)
goto exit1;
}
down(&nd.dentry->d_inode->i_sem);
- dentry = lookup_hash(&nd.last, nd.dentry);
+ dentry = lookup_hash(&nd);
error = PTR_ERR(dentry);
if (!IS_ERR(dentry)) {
error = vfs_rmdir(nd.dentry->d_inode, dentry);
@@ -1970,7 +2002,7 @@ asmlinkage long sys_unlink(const char __user * pathname)
if (nd.last_type != LAST_NORM)
goto exit1;
down(&nd.dentry->d_inode->i_sem);
- dentry = lookup_hash(&nd.last, nd.dentry);
+ dentry = lookup_hash(&nd);
error = PTR_ERR(dentry);
if (!IS_ERR(dentry)) {
/* Why not before? Because we want correct error value */
@@ -2313,7 +2345,7 @@ static inline int do_rename(const char * oldname, const char * newname)
trap = lock_rename(new_dir, old_dir);
- old_dentry = lookup_hash(&oldnd.last, old_dir);
+ old_dentry = lookup_hash(&oldnd);
error = PTR_ERR(old_dentry);
if (IS_ERR(old_dentry))
goto exit3;
@@ -2333,7 +2365,7 @@ static inline int do_rename(const char * oldname, const char * newname)
error = -EINVAL;
if (old_dentry == trap)
goto exit4;
- new_dentry = lookup_hash(&newnd.last, new_dir);
+ new_dentry = lookup_hash(&newnd);
error = PTR_ERR(new_dentry);
if (IS_ERR(new_dentry))
goto exit4;
@@ -2536,6 +2568,8 @@ EXPORT_SYMBOL(path_lookup);
EXPORT_SYMBOL(path_release);
EXPORT_SYMBOL(path_walk);
EXPORT_SYMBOL(permission);
+EXPORT_SYMBOL(vfs_permission);
+EXPORT_SYMBOL(file_permission);
EXPORT_SYMBOL(unlock_rename);
EXPORT_SYMBOL(vfs_create);
EXPORT_SYMBOL(vfs_follow_link);
diff --git a/fs/namespace.c b/fs/namespace.c
index 2fa9fdf7d6f..2019899f2ab 100644
--- a/fs/namespace.c
+++ b/fs/namespace.c
@@ -24,6 +24,7 @@
#include <linux/mount.h>
#include <asm/uaccess.h>
#include <asm/unistd.h>
+#include "pnode.h"
extern int __init init_rootfs(void);
@@ -37,33 +38,39 @@ static inline int sysfs_init(void)
#endif
/* spinlock for vfsmount related operations, inplace of dcache_lock */
- __cacheline_aligned_in_smp DEFINE_SPINLOCK(vfsmount_lock);
+__cacheline_aligned_in_smp DEFINE_SPINLOCK(vfsmount_lock);
+
+static int event;
static struct list_head *mount_hashtable;
static int hash_mask __read_mostly, hash_bits __read_mostly;
-static kmem_cache_t *mnt_cache;
+static kmem_cache_t *mnt_cache;
+static struct rw_semaphore namespace_sem;
static inline unsigned long hash(struct vfsmount *mnt, struct dentry *dentry)
{
- unsigned long tmp = ((unsigned long) mnt / L1_CACHE_BYTES);
- tmp += ((unsigned long) dentry / L1_CACHE_BYTES);
+ unsigned long tmp = ((unsigned long)mnt / L1_CACHE_BYTES);
+ tmp += ((unsigned long)dentry / L1_CACHE_BYTES);
tmp = tmp + (tmp >> hash_bits);
return tmp & hash_mask;
}
struct vfsmount *alloc_vfsmnt(const char *name)
{
- struct vfsmount *mnt = kmem_cache_alloc(mnt_cache, GFP_KERNEL);
+ struct vfsmount *mnt = kmem_cache_alloc(mnt_cache, GFP_KERNEL);
if (mnt) {
memset(mnt, 0, sizeof(struct vfsmount));
- atomic_set(&mnt->mnt_count,1);
+ atomic_set(&mnt->mnt_count, 1);
INIT_LIST_HEAD(&mnt->mnt_hash);
INIT_LIST_HEAD(&mnt->mnt_child);
INIT_LIST_HEAD(&mnt->mnt_mounts);
INIT_LIST_HEAD(&mnt->mnt_list);
INIT_LIST_HEAD(&mnt->mnt_expire);
+ INIT_LIST_HEAD(&mnt->mnt_share);
+ INIT_LIST_HEAD(&mnt->mnt_slave_list);
+ INIT_LIST_HEAD(&mnt->mnt_slave);
if (name) {
- int size = strlen(name)+1;
+ int size = strlen(name) + 1;
char *newname = kmalloc(size, GFP_KERNEL);
if (newname) {
memcpy(newname, name, size);
@@ -81,36 +88,65 @@ void free_vfsmnt(struct vfsmount *mnt)
}
/*
- * Now, lookup_mnt increments the ref count before returning
- * the vfsmount struct.
+ * find the first or last mount at @dentry on vfsmount @mnt depending on
+ * @dir. If @dir is set return the first mount else return the last mount.
*/
-struct vfsmount *lookup_mnt(struct vfsmount *mnt, struct dentry *dentry)
+struct vfsmount *__lookup_mnt(struct vfsmount *mnt, struct dentry *dentry,
+ int dir)
{
- struct list_head * head = mount_hashtable + hash(mnt, dentry);
- struct list_head * tmp = head;
+ struct list_head *head = mount_hashtable + hash(mnt, dentry);
+ struct list_head *tmp = head;
struct vfsmount *p, *found = NULL;
- spin_lock(&vfsmount_lock);
for (;;) {
- tmp = tmp->next;
+ tmp = dir ? tmp->next : tmp->prev;
p = NULL;
if (tmp == head)
break;
p = list_entry(tmp, struct vfsmount, mnt_hash);
if (p->mnt_parent == mnt && p->mnt_mountpoint == dentry) {
- found = mntget(p);
+ found = p;
break;
}
}
- spin_unlock(&vfsmount_lock);
return found;
}
+/*
+ * lookup_mnt increments the ref count before returning
+ * the vfsmount struct.
+ */
+struct vfsmount *lookup_mnt(struct vfsmount *mnt, struct dentry *dentry)
+{
+ struct vfsmount *child_mnt;
+ spin_lock(&vfsmount_lock);
+ if ((child_mnt = __lookup_mnt(mnt, dentry, 1)))
+ mntget(child_mnt);
+ spin_unlock(&vfsmount_lock);
+ return child_mnt;
+}
+
static inline int check_mnt(struct vfsmount *mnt)
{
return mnt->mnt_namespace == current->namespace;
}
+static void touch_namespace(struct namespace *ns)
+{
+ if (ns) {
+ ns->event = ++event;
+ wake_up_interruptible(&ns->poll);
+ }
+}
+
+static void __touch_namespace(struct namespace *ns)
+{
+ if (ns && ns->event != event) {
+ ns->event = event;
+ wake_up_interruptible(&ns->poll);
+ }
+}
+
static void detach_mnt(struct vfsmount *mnt, struct nameidata *old_nd)
{
old_nd->dentry = mnt->mnt_mountpoint;
@@ -122,13 +158,43 @@ static void detach_mnt(struct vfsmount *mnt, struct nameidata *old_nd)
old_nd->dentry->d_mounted--;
}
+void mnt_set_mountpoint(struct vfsmount *mnt, struct dentry *dentry,
+ struct vfsmount *child_mnt)
+{
+ child_mnt->mnt_parent = mntget(mnt);
+ child_mnt->mnt_mountpoint = dget(dentry);
+ dentry->d_mounted++;
+}
+
static void attach_mnt(struct vfsmount *mnt, struct nameidata *nd)
{
- mnt->mnt_parent = mntget(nd->mnt);
- mnt->mnt_mountpoint = dget(nd->dentry);
- list_add(&mnt->mnt_hash, mount_hashtable+hash(nd->mnt, nd->dentry));
+ mnt_set_mountpoint(nd->mnt, nd->dentry, mnt);
+ list_add_tail(&mnt->mnt_hash, mount_hashtable +
+ hash(nd->mnt, nd->dentry));
list_add_tail(&mnt->mnt_child, &nd->mnt->mnt_mounts);
- nd->dentry->d_mounted++;
+}
+
+/*
+ * the caller must hold vfsmount_lock
+ */
+static void commit_tree(struct vfsmount *mnt)
+{
+ struct vfsmount *parent = mnt->mnt_parent;
+ struct vfsmount *m;
+ LIST_HEAD(head);
+ struct namespace *n = parent->mnt_namespace;
+
+ BUG_ON(parent == mnt);
+
+ list_add_tail(&head, &mnt->mnt_list);
+ list_for_each_entry(m, &head, mnt_list)
+ m->mnt_namespace = n;
+ list_splice(&head, n->list.prev);
+
+ list_add_tail(&mnt->mnt_hash, mount_hashtable +
+ hash(parent, mnt->mnt_mountpoint));
+ list_add_tail(&mnt->mnt_child, &parent->mnt_mounts);
+ touch_namespace(n);
}
static struct vfsmount *next_mnt(struct vfsmount *p, struct vfsmount *root)
@@ -147,8 +213,18 @@ static struct vfsmount *next_mnt(struct vfsmount *p, struct vfsmount *root)
return list_entry(next, struct vfsmount, mnt_child);
}
-static struct vfsmount *
-clone_mnt(struct vfsmount *old, struct dentry *root)
+static struct vfsmount *skip_mnt_tree(struct vfsmount *p)
+{
+ struct list_head *prev = p->mnt_mounts.prev;
+ while (prev != &p->mnt_mounts) {
+ p = list_entry(prev, struct vfsmount, mnt_child);
+ prev = p->mnt_mounts.prev;
+ }
+ return p;
+}
+
+static struct vfsmount *clone_mnt(struct vfsmount *old, struct dentry *root,
+ int flag)
{
struct super_block *sb = old->mnt_sb;
struct vfsmount *mnt = alloc_vfsmnt(old->mnt_devname);
@@ -160,19 +236,34 @@ clone_mnt(struct vfsmount *old, struct dentry *root)
mnt->mnt_root = dget(root);
mnt->mnt_mountpoint = mnt->mnt_root;
mnt->mnt_parent = mnt;
- mnt->mnt_namespace = current->namespace;
+
+ if (flag & CL_SLAVE) {
+ list_add(&mnt->mnt_slave, &old->mnt_slave_list);
+ mnt->mnt_master = old;
+ CLEAR_MNT_SHARED(mnt);
+ } else {
+ if ((flag & CL_PROPAGATION) || IS_MNT_SHARED(old))
+ list_add(&mnt->mnt_share, &old->mnt_share);
+ if (IS_MNT_SLAVE(old))
+ list_add(&mnt->mnt_slave, &old->mnt_slave);
+ mnt->mnt_master = old->mnt_master;
+ }
+ if (flag & CL_MAKE_SHARED)
+ set_mnt_shared(mnt);
/* stick the duplicate mount on the same expiry list
* as the original if that was on one */
- spin_lock(&vfsmount_lock);
- if (!list_empty(&old->mnt_expire))
- list_add(&mnt->mnt_expire, &old->mnt_expire);
- spin_unlock(&vfsmount_lock);
+ if (flag & CL_EXPIRE) {
+ spin_lock(&vfsmount_lock);
+ if (!list_empty(&old->mnt_expire))
+ list_add(&mnt->mnt_expire, &old->mnt_expire);
+ spin_unlock(&vfsmount_lock);
+ }
}
return mnt;
}
-void __mntput(struct vfsmount *mnt)
+static inline void __mntput(struct vfsmount *mnt)
{
struct super_block *sb = mnt->mnt_sb;
dput(mnt->mnt_root);
@@ -180,7 +271,46 @@ void __mntput(struct vfsmount *mnt)
deactivate_super(sb);
}
-EXPORT_SYMBOL(__mntput);
+void mntput_no_expire(struct vfsmount *mnt)
+{
+repeat:
+ if (atomic_dec_and_lock(&mnt->mnt_count, &vfsmount_lock)) {
+ if (likely(!mnt->mnt_pinned)) {
+ spin_unlock(&vfsmount_lock);
+ __mntput(mnt);
+ return;
+ }
+ atomic_add(mnt->mnt_pinned + 1, &mnt->mnt_count);
+ mnt->mnt_pinned = 0;
+ spin_unlock(&vfsmount_lock);
+ acct_auto_close_mnt(mnt);
+ security_sb_umount_close(mnt);
+ goto repeat;
+ }
+}
+
+EXPORT_SYMBOL(mntput_no_expire);
+
+void mnt_pin(struct vfsmount *mnt)
+{
+ spin_lock(&vfsmount_lock);
+ mnt->mnt_pinned++;
+ spin_unlock(&vfsmount_lock);
+}
+
+EXPORT_SYMBOL(mnt_pin);
+
+void mnt_unpin(struct vfsmount *mnt)
+{
+ spin_lock(&vfsmount_lock);
+ if (mnt->mnt_pinned) {
+ atomic_inc(&mnt->mnt_count);
+ mnt->mnt_pinned--;
+ }
+ spin_unlock(&vfsmount_lock);
+}
+
+EXPORT_SYMBOL(mnt_unpin);
/* iterator */
static void *m_start(struct seq_file *m, loff_t *pos)
@@ -189,7 +319,7 @@ static void *m_start(struct seq_file *m, loff_t *pos)
struct list_head *p;
loff_t l = *pos;
- down_read(&n->sem);
+ down_read(&namespace_sem);
list_for_each(p, &n->list)
if (!l--)
return list_entry(p, struct vfsmount, mnt_list);
@@ -201,13 +331,12 @@ static void *m_next(struct seq_file *m, void *v, loff_t *pos)
struct namespace *n = m->private;
struct list_head *p = ((struct vfsmount *)v)->mnt_list.next;
(*pos)++;
- return p==&n->list ? NULL : list_entry(p, struct vfsmount, mnt_list);
+ return p == &n->list ? NULL : list_entry(p, struct vfsmount, mnt_list);
}
static void m_stop(struct seq_file *m, void *v)
{
- struct namespace *n = m->private;
- up_read(&n->sem);
+ up_read(&namespace_sem);
}
static inline void mangle(struct seq_file *m, const char *s)
@@ -275,35 +404,14 @@ struct seq_operations mounts_op = {
*/
int may_umount_tree(struct vfsmount *mnt)
{
- struct list_head *next;
- struct vfsmount *this_parent = mnt;
- int actual_refs;
- int minimum_refs;
+ int actual_refs = 0;
+ int minimum_refs = 0;
+ struct vfsmount *p;
spin_lock(&vfsmount_lock);
- actual_refs = atomic_read(&mnt->mnt_count);
- minimum_refs = 2;
-repeat:
- next = this_parent->mnt_mounts.next;
-resume:
- while (next != &this_parent->mnt_mounts) {
- struct vfsmount *p = list_entry(next, struct vfsmount, mnt_child);
-
- next = next->next;
-
+ for (p = mnt; p; p = next_mnt(p, mnt)) {
actual_refs += atomic_read(&p->mnt_count);
minimum_refs += 2;
-
- if (!list_empty(&p->mnt_mounts)) {
- this_parent = p;
- goto repeat;
- }
- }
-
- if (this_parent != mnt) {
- next = this_parent->mnt_child.next;
- this_parent = this_parent->mnt_parent;
- goto resume;
}
spin_unlock(&vfsmount_lock);
@@ -330,45 +438,67 @@ EXPORT_SYMBOL(may_umount_tree);
*/
int may_umount(struct vfsmount *mnt)
{
- if (atomic_read(&mnt->mnt_count) > 2)
- return -EBUSY;
- return 0;
+ int ret = 0;
+ spin_lock(&vfsmount_lock);
+ if (propagate_mount_busy(mnt, 2))
+ ret = -EBUSY;
+ spin_unlock(&vfsmount_lock);
+ return ret;
}
EXPORT_SYMBOL(may_umount);
-static void umount_tree(struct vfsmount *mnt)
+void release_mounts(struct list_head *head)
+{
+ struct vfsmount *mnt;
+ while(!list_empty(head)) {
+ mnt = list_entry(head->next, struct vfsmount, mnt_hash);
+ list_del_init(&mnt->mnt_hash);
+ if (mnt->mnt_parent != mnt) {
+ struct dentry *dentry;
+ struct vfsmount *m;
+ spin_lock(&vfsmount_lock);
+ dentry = mnt->mnt_mountpoint;
+ m = mnt->mnt_parent;
+ mnt->mnt_mountpoint = mnt->mnt_root;
+ mnt->mnt_parent = mnt;
+ spin_unlock(&vfsmount_lock);
+ dput(dentry);
+ mntput(m);
+ }
+ mntput(mnt);
+ }
+}
+
+void umount_tree(struct vfsmount *mnt, int propagate, struct list_head *kill)
{
struct vfsmount *p;
- LIST_HEAD(kill);
for (p = mnt; p; p = next_mnt(p, mnt)) {
- list_del(&p->mnt_list);
- list_add(&p->mnt_list, &kill);
- p->mnt_namespace = NULL;
+ list_del(&p->mnt_hash);
+ list_add(&p->mnt_hash, kill);
}
- while (!list_empty(&kill)) {
- mnt = list_entry(kill.next, struct vfsmount, mnt_list);
- list_del_init(&mnt->mnt_list);
- list_del_init(&mnt->mnt_expire);
- if (mnt->mnt_parent == mnt) {
- spin_unlock(&vfsmount_lock);
- } else {
- struct nameidata old_nd;
- detach_mnt(mnt, &old_nd);
- spin_unlock(&vfsmount_lock);
- path_release(&old_nd);
- }
- mntput(mnt);
- spin_lock(&vfsmount_lock);
+ if (propagate)
+ propagate_umount(kill);
+
+ list_for_each_entry(p, kill, mnt_hash) {
+ list_del_init(&p->mnt_expire);
+ list_del_init(&p->mnt_list);
+ __touch_namespace(p->mnt_namespace);
+ p->mnt_namespace = NULL;
+ list_del_init(&p->mnt_child);
+ if (p->mnt_parent != p)
+ mnt->mnt_mountpoint->d_mounted--;
+ change_mnt_propagation(p, MS_PRIVATE);
}
}
static int do_umount(struct vfsmount *mnt, int flags)
{
- struct super_block * sb = mnt->mnt_sb;
+ struct super_block *sb = mnt->mnt_sb;
int retval;
+ LIST_HEAD(umount_list);
retval = security_sb_umount(mnt, flags);
if (retval)
@@ -403,7 +533,7 @@ static int do_umount(struct vfsmount *mnt, int flags)
*/
lock_kernel();
- if( (flags&MNT_FORCE) && sb->s_op->umount_begin)
+ if ((flags & MNT_FORCE) && sb->s_op->umount_begin)
sb->s_op->umount_begin(sb);
unlock_kernel();
@@ -432,29 +562,21 @@ static int do_umount(struct vfsmount *mnt, int flags)
return retval;
}
- down_write(&current->namespace->sem);
+ down_write(&namespace_sem);
spin_lock(&vfsmount_lock);
+ event++;
- if (atomic_read(&sb->s_active) == 1) {
- /* last instance - try to be smart */
- spin_unlock(&vfsmount_lock);
- lock_kernel();
- DQUOT_OFF(sb);
- acct_auto_close(sb);
- unlock_kernel();
- security_sb_umount_close(mnt);
- spin_lock(&vfsmount_lock);
- }
retval = -EBUSY;
- if (atomic_read(&mnt->mnt_count) == 2 || flags & MNT_DETACH) {
+ if (flags & MNT_DETACH || !propagate_mount_busy(mnt, 2)) {
if (!list_empty(&mnt->mnt_list))
- umount_tree(mnt);
+ umount_tree(mnt, 1, &umount_list);
retval = 0;
}
spin_unlock(&vfsmount_lock);
if (retval)
security_sb_umount_busy(mnt);
- up_write(&current->namespace->sem);
+ up_write(&namespace_sem);
+ release_mounts(&umount_list);
return retval;
}
@@ -494,12 +616,11 @@ out:
#ifdef __ARCH_WANT_SYS_OLDUMOUNT
/*
- * The 2.0 compatible umount. No flags.
+ * The 2.0 compatible umount. No flags.
*/
-
asmlinkage long sys_oldumount(char __user * name)
{
- return sys_umount(name,0);
+ return sys_umount(name, 0);
}
#endif
@@ -516,14 +637,13 @@ static int mount_is_safe(struct nameidata *nd)
if (current->uid != nd->dentry->d_inode->i_uid)
return -EPERM;
}
- if (permission(nd->dentry->d_inode, MAY_WRITE, nd))
+ if (vfs_permission(nd, MAY_WRITE))
return -EPERM;
return 0;
#endif
}
-static int
-lives_below_in_same_fs(struct dentry *d, struct dentry *dentry)
+static int lives_below_in_same_fs(struct dentry *d, struct dentry *dentry)
{
while (1) {
if (d == dentry)
@@ -534,12 +654,16 @@ lives_below_in_same_fs(struct dentry *d, struct dentry *dentry)
}
}
-static struct vfsmount *copy_tree(struct vfsmount *mnt, struct dentry *dentry)
+struct vfsmount *copy_tree(struct vfsmount *mnt, struct dentry *dentry,
+ int flag)
{
struct vfsmount *res, *p, *q, *r, *s;
struct nameidata nd;
- res = q = clone_mnt(mnt, dentry);
+ if (!(flag & CL_COPY_ALL) && IS_MNT_UNBINDABLE(mnt))
+ return NULL;
+
+ res = q = clone_mnt(mnt, dentry, flag);
if (!q)
goto Enomem;
q->mnt_mountpoint = mnt->mnt_mountpoint;
@@ -550,6 +674,10 @@ static struct vfsmount *copy_tree(struct vfsmount *mnt, struct dentry *dentry)
continue;
for (s = r; s; s = next_mnt(s, r)) {
+ if (!(flag & CL_COPY_ALL) && IS_MNT_UNBINDABLE(s)) {
+ s = skip_mnt_tree(s);
+ continue;
+ }
while (p != s->mnt_parent) {
p = p->mnt_parent;
q = q->mnt_parent;
@@ -557,7 +685,7 @@ static struct vfsmount *copy_tree(struct vfsmount *mnt, struct dentry *dentry)
p = s;
nd.mnt = q;
nd.dentry = p->mnt_mountpoint;
- q = clone_mnt(p, p->mnt_root);
+ q = clone_mnt(p, p->mnt_root, flag);
if (!q)
goto Enomem;
spin_lock(&vfsmount_lock);
@@ -567,15 +695,114 @@ static struct vfsmount *copy_tree(struct vfsmount *mnt, struct dentry *dentry)
}
}
return res;
- Enomem:
+Enomem:
if (res) {
+ LIST_HEAD(umount_list);
spin_lock(&vfsmount_lock);
- umount_tree(res);
+ umount_tree(res, 0, &umount_list);
spin_unlock(&vfsmount_lock);
+ release_mounts(&umount_list);
}
return NULL;
}
+/*
+ * @source_mnt : mount tree to be attached
+ * @nd : place the mount tree @source_mnt is attached
+ * @parent_nd : if non-null, detach the source_mnt from its parent and
+ * store the parent mount and mountpoint dentry.
+ * (done when source_mnt is moved)
+ *
+ * NOTE: in the table below explains the semantics when a source mount
+ * of a given type is attached to a destination mount of a given type.
+ * ---------------------------------------------------------------------------
+ * | BIND MOUNT OPERATION |
+ * |**************************************************************************
+ * | source-->| shared | private | slave | unbindable |
+ * | dest | | | | |
+ * | | | | | | |
+ * | v | | | | |
+ * |**************************************************************************
+ * | shared | shared (++) | shared (+) | shared(+++)| invalid |
+ * | | | | | |
+ * |non-shared| shared (+) | private | slave (*) | invalid |
+ * ***************************************************************************
+ * A bind operation clones the source mount and mounts the clone on the
+ * destination mount.
+ *
+ * (++) the cloned mount is propagated to all the mounts in the propagation
+ * tree of the destination mount and the cloned mount is added to
+ * the peer group of the source mount.
+ * (+) the cloned mount is created under the destination mount and is marked
+ * as shared. The cloned mount is added to the peer group of the source
+ * mount.
+ * (+++) the mount is propagated to all the mounts in the propagation tree
+ * of the destination mount and the cloned mount is made slave
+ * of the same master as that of the source mount. The cloned mount
+ * is marked as 'shared and slave'.
+ * (*) the cloned mount is made a slave of the same master as that of the
+ * source mount.
+ *
+ * ---------------------------------------------------------------------------
+ * | MOVE MOUNT OPERATION |
+ * |**************************************************************************
+ * | source-->| shared | private | slave | unbindable |
+ * | dest | | | | |
+ * | | | | | | |
+ * | v | | | | |
+ * |**************************************************************************
+ * | shared | shared (+) | shared (+) | shared(+++) | invalid |
+ * | | | | | |
+ * |non-shared| shared (+*) | private | slave (*) | unbindable |
+ * ***************************************************************************
+ *
+ * (+) the mount is moved to the destination. And is then propagated to
+ * all the mounts in the propagation tree of the destination mount.
+ * (+*) the mount is moved to the destination.
+ * (+++) the mount is moved to the destination and is then propagated to
+ * all the mounts belonging to the destination mount's propagation tree.
+ * the mount is marked as 'shared and slave'.
+ * (*) the mount continues to be a slave at the new location.
+ *
+ * if the source mount is a tree, the operations explained above is
+ * applied to each mount in the tree.
+ * Must be called without spinlocks held, since this function can sleep
+ * in allocations.
+ */
+static int attach_recursive_mnt(struct vfsmount *source_mnt,
+ struct nameidata *nd, struct nameidata *parent_nd)
+{
+ LIST_HEAD(tree_list);
+ struct vfsmount *dest_mnt = nd->mnt;
+ struct dentry *dest_dentry = nd->dentry;
+ struct vfsmount *child, *p;
+
+ if (propagate_mnt(dest_mnt, dest_dentry, source_mnt, &tree_list))
+ return -EINVAL;
+
+ if (IS_MNT_SHARED(dest_mnt)) {
+ for (p = source_mnt; p; p = next_mnt(p, source_mnt))
+ set_mnt_shared(p);
+ }
+
+ spin_lock(&vfsmount_lock);
+ if (parent_nd) {
+ detach_mnt(source_mnt, parent_nd);
+ attach_mnt(source_mnt, nd);
+ touch_namespace(current->namespace);
+ } else {
+ mnt_set_mountpoint(dest_mnt, dest_dentry, source_mnt);
+ commit_tree(source_mnt);
+ }
+
+ list_for_each_entry_safe(child, p, &tree_list, mnt_hash) {
+ list_del_init(&child->mnt_hash);
+ commit_tree(child);
+ }
+ spin_unlock(&vfsmount_lock);
+ return 0;
+}
+
static int graft_tree(struct vfsmount *mnt, struct nameidata *nd)
{
int err;
@@ -596,17 +823,8 @@ static int graft_tree(struct vfsmount *mnt, struct nameidata *nd)
goto out_unlock;
err = -ENOENT;
- spin_lock(&vfsmount_lock);
- if (IS_ROOT(nd->dentry) || !d_unhashed(nd->dentry)) {
- struct list_head head;
-
- attach_mnt(mnt, nd);
- list_add_tail(&head, &mnt->mnt_list);
- list_splice(&head, current->namespace->list.prev);
- mntget(mnt);
- err = 0;
- }
- spin_unlock(&vfsmount_lock);
+ if (IS_ROOT(nd->dentry) || !d_unhashed(nd->dentry))
+ err = attach_recursive_mnt(mnt, nd, NULL);
out_unlock:
up(&nd->dentry->d_inode->i_sem);
if (!err)
@@ -615,6 +833,27 @@ out_unlock:
}
/*
+ * recursively change the type of the mountpoint.
+ */
+static int do_change_type(struct nameidata *nd, int flag)
+{
+ struct vfsmount *m, *mnt = nd->mnt;
+ int recurse = flag & MS_REC;
+ int type = flag & ~MS_REC;
+
+ if (nd->dentry != nd->mnt->mnt_root)
+ return -EINVAL;
+
+ down_write(&namespace_sem);
+ spin_lock(&vfsmount_lock);
+ for (m = mnt; m; m = (recurse ? next_mnt(m, mnt) : NULL))
+ change_mnt_propagation(m, type);
+ spin_unlock(&vfsmount_lock);
+ up_write(&namespace_sem);
+ return 0;
+}
+
+/*
* do loopback mount.
*/
static int do_loopback(struct nameidata *nd, char *old_name, int recurse)
@@ -630,32 +869,34 @@ static int do_loopback(struct nameidata *nd, char *old_name, int recurse)
if (err)
return err;
- down_write(&current->namespace->sem);
+ down_write(&namespace_sem);
err = -EINVAL;
- if (check_mnt(nd->mnt) && (!recurse || check_mnt(old_nd.mnt))) {
- err = -ENOMEM;
- if (recurse)
- mnt = copy_tree(old_nd.mnt, old_nd.dentry);
- else
- mnt = clone_mnt(old_nd.mnt, old_nd.dentry);
- }
+ if (IS_MNT_UNBINDABLE(old_nd.mnt))
+ goto out;
- if (mnt) {
- /* stop bind mounts from expiring */
+ if (!check_mnt(nd->mnt) || !check_mnt(old_nd.mnt))
+ goto out;
+
+ err = -ENOMEM;
+ if (recurse)
+ mnt = copy_tree(old_nd.mnt, old_nd.dentry, 0);
+ else
+ mnt = clone_mnt(old_nd.mnt, old_nd.dentry, 0);
+
+ if (!mnt)
+ goto out;
+
+ err = graft_tree(mnt, nd);
+ if (err) {
+ LIST_HEAD(umount_list);
spin_lock(&vfsmount_lock);
- list_del_init(&mnt->mnt_expire);
+ umount_tree(mnt, 0, &umount_list);
spin_unlock(&vfsmount_lock);
-
- err = graft_tree(mnt, nd);
- if (err) {
- spin_lock(&vfsmount_lock);
- umount_tree(mnt);
- spin_unlock(&vfsmount_lock);
- } else
- mntput(mnt);
+ release_mounts(&umount_list);
}
- up_write(&current->namespace->sem);
+out:
+ up_write(&namespace_sem);
path_release(&old_nd);
return err;
}
@@ -665,12 +906,11 @@ static int do_loopback(struct nameidata *nd, char *old_name, int recurse)
* If you've mounted a non-root directory somewhere and want to do remount
* on it - tough luck.
*/
-
static int do_remount(struct nameidata *nd, int flags, int mnt_flags,
void *data)
{
int err;
- struct super_block * sb = nd->mnt->mnt_sb;
+ struct super_block *sb = nd->mnt->mnt_sb;
if (!capable(CAP_SYS_ADMIN))
return -EPERM;
@@ -684,13 +924,23 @@ static int do_remount(struct nameidata *nd, int flags, int mnt_flags,
down_write(&sb->s_umount);
err = do_remount_sb(sb, flags, data, 0);
if (!err)
- nd->mnt->mnt_flags=mnt_flags;
+ nd->mnt->mnt_flags = mnt_flags;
up_write(&sb->s_umount);
if (!err)
security_sb_post_remount(nd->mnt, flags, data);
return err;
}
+static inline int tree_contains_unbindable(struct vfsmount *mnt)
+{
+ struct vfsmount *p;
+ for (p = mnt; p; p = next_mnt(p, mnt)) {
+ if (IS_MNT_UNBINDABLE(p))
+ return 1;
+ }
+ return 0;
+}
+
static int do_move_mount(struct nameidata *nd, char *old_name)
{
struct nameidata old_nd, parent_nd;
@@ -704,8 +954,8 @@ static int do_move_mount(struct nameidata *nd, char *old_name)
if (err)
return err;
- down_write(&current->namespace->sem);
- while(d_mountpoint(nd->dentry) && follow_down(&nd->mnt, &nd->dentry))
+ down_write(&namespace_sem);
+ while (d_mountpoint(nd->dentry) && follow_down(&nd->mnt, &nd->dentry))
;
err = -EINVAL;
if (!check_mnt(nd->mnt) || !check_mnt(old_nd.mnt))
@@ -716,39 +966,47 @@ static int do_move_mount(struct nameidata *nd, char *old_name)
if (IS_DEADDIR(nd->dentry->d_inode))
goto out1;
- spin_lock(&vfsmount_lock);
if (!IS_ROOT(nd->dentry) && d_unhashed(nd->dentry))
- goto out2;
+ goto out1;
err = -EINVAL;
if (old_nd.dentry != old_nd.mnt->mnt_root)
- goto out2;
+ goto out1;
if (old_nd.mnt == old_nd.mnt->mnt_parent)
- goto out2;
+ goto out1;
if (S_ISDIR(nd->dentry->d_inode->i_mode) !=
S_ISDIR(old_nd.dentry->d_inode->i_mode))
- goto out2;
-
+ goto out1;
+ /*
+ * Don't move a mount residing in a shared parent.
+ */
+ if (old_nd.mnt->mnt_parent && IS_MNT_SHARED(old_nd.mnt->mnt_parent))
+ goto out1;
+ /*
+ * Don't move a mount tree containing unbindable mounts to a destination
+ * mount which is shared.
+ */
+ if (IS_MNT_SHARED(nd->mnt) && tree_contains_unbindable(old_nd.mnt))
+ goto out1;
err = -ELOOP;
- for (p = nd->mnt; p->mnt_parent!=p; p = p->mnt_parent)
+ for (p = nd->mnt; p->mnt_parent != p; p = p->mnt_parent)
if (p == old_nd.mnt)
- goto out2;
- err = 0;
+ goto out1;
- detach_mnt(old_nd.mnt, &parent_nd);
- attach_mnt(old_nd.mnt, nd);
+ if ((err = attach_recursive_mnt(old_nd.mnt, nd, &parent_nd)))
+ goto out1;
+ spin_lock(&vfsmount_lock);
/* if the mount is moved, it should no longer be expire
* automatically */
list_del_init(&old_nd.mnt->mnt_expire);
-out2:
spin_unlock(&vfsmount_lock);
out1:
up(&nd->dentry->d_inode->i_sem);
out:
- up_write(&current->namespace->sem);
+ up_write(&namespace_sem);
if (!err)
path_release(&parent_nd);
path_release(&old_nd);
@@ -787,9 +1045,9 @@ int do_add_mount(struct vfsmount *newmnt, struct nameidata *nd,
{
int err;
- down_write(&current->namespace->sem);
+ down_write(&namespace_sem);
/* Something was mounted here while we slept */
- while(d_mountpoint(nd->dentry) && follow_down(&nd->mnt, &nd->dentry))
+ while (d_mountpoint(nd->dentry) && follow_down(&nd->mnt, &nd->dentry))
;
err = -EINVAL;
if (!check_mnt(nd->mnt))
@@ -806,25 +1064,28 @@ int do_add_mount(struct vfsmount *newmnt, struct nameidata *nd,
goto unlock;
newmnt->mnt_flags = mnt_flags;
- newmnt->mnt_namespace = current->namespace;
- err = graft_tree(newmnt, nd);
+ if ((err = graft_tree(newmnt, nd)))
+ goto unlock;
- if (err == 0 && fslist) {
+ if (fslist) {
/* add to the specified expiration list */
spin_lock(&vfsmount_lock);
list_add_tail(&newmnt->mnt_expire, fslist);
spin_unlock(&vfsmount_lock);
}
+ up_write(&namespace_sem);
+ return 0;
unlock:
- up_write(&current->namespace->sem);
+ up_write(&namespace_sem);
mntput(newmnt);
return err;
}
EXPORT_SYMBOL_GPL(do_add_mount);
-static void expire_mount(struct vfsmount *mnt, struct list_head *mounts)
+static void expire_mount(struct vfsmount *mnt, struct list_head *mounts,
+ struct list_head *umounts)
{
spin_lock(&vfsmount_lock);
@@ -841,27 +1102,13 @@ static void expire_mount(struct vfsmount *mnt, struct list_head *mounts)
* Check that it is still dead: the count should now be 2 - as
* contributed by the vfsmount parent and the mntget above
*/
- if (atomic_read(&mnt->mnt_count) == 2) {
- struct nameidata old_nd;
-
+ if (!propagate_mount_busy(mnt, 2)) {
/* delete from the namespace */
+ touch_namespace(mnt->mnt_namespace);
list_del_init(&mnt->mnt_list);
mnt->mnt_namespace = NULL;
- detach_mnt(mnt, &old_nd);
+ umount_tree(mnt, 1, umounts);
spin_unlock(&vfsmount_lock);
- path_release(&old_nd);
-
- /*
- * Now lay it to rest if this was the last ref on the superblock
- */
- if (atomic_read(&mnt->mnt_sb->s_active) == 1) {
- /* last instance - try to be smart */
- lock_kernel();
- DQUOT_OFF(mnt->mnt_sb);
- acct_auto_close(mnt->mnt_sb);
- unlock_kernel();
- }
- mntput(mnt);
} else {
/*
* Someone brought it back to life whilst we didn't have any
@@ -910,6 +1157,7 @@ void mark_mounts_for_expiry(struct list_head *mounts)
* - dispose of the corpse
*/
while (!list_empty(&graveyard)) {
+ LIST_HEAD(umounts);
mnt = list_entry(graveyard.next, struct vfsmount, mnt_expire);
list_del_init(&mnt->mnt_expire);
@@ -921,13 +1169,12 @@ void mark_mounts_for_expiry(struct list_head *mounts)
get_namespace(namespace);
spin_unlock(&vfsmount_lock);
- down_write(&namespace->sem);
- expire_mount(mnt, mounts);
- up_write(&namespace->sem);
-
+ down_write(&namespace_sem);
+ expire_mount(mnt, mounts, &umounts);
+ up_write(&namespace_sem);
+ release_mounts(&umounts);
mntput(mnt);
put_namespace(namespace);
-
spin_lock(&vfsmount_lock);
}
@@ -942,8 +1189,8 @@ EXPORT_SYMBOL_GPL(mark_mounts_for_expiry);
* Note that this function differs from copy_from_user() in that it will oops
* on bad values of `to', rather than returning a short copy.
*/
-static long
-exact_copy_from_user(void *to, const void __user *from, unsigned long n)
+static long exact_copy_from_user(void *to, const void __user * from,
+ unsigned long n)
{
char *t = to;
const char __user *f = from;
@@ -964,12 +1211,12 @@ exact_copy_from_user(void *to, const void __user *from, unsigned long n)
return n;
}
-int copy_mount_options(const void __user *data, unsigned long *where)
+int copy_mount_options(const void __user * data, unsigned long *where)
{
int i;
unsigned long page;
unsigned long size;
-
+
*where = 0;
if (!data)
return 0;
@@ -988,7 +1235,7 @@ int copy_mount_options(const void __user *data, unsigned long *where)
i = size - exact_copy_from_user((void *)page, data, size);
if (!i) {
- free_page(page);
+ free_page(page);
return -EFAULT;
}
if (i != PAGE_SIZE)
@@ -1011,7 +1258,7 @@ int copy_mount_options(const void __user *data, unsigned long *where)
* Therefore, if this magic number is present, it carries no information
* and must be discarded.
*/
-long do_mount(char * dev_name, char * dir_name, char *type_page,
+long do_mount(char *dev_name, char *dir_name, char *type_page,
unsigned long flags, void *data_page)
{
struct nameidata nd;
@@ -1039,7 +1286,7 @@ long do_mount(char * dev_name, char * dir_name, char *type_page,
mnt_flags |= MNT_NODEV;
if (flags & MS_NOEXEC)
mnt_flags |= MNT_NOEXEC;
- flags &= ~(MS_NOSUID|MS_NOEXEC|MS_NODEV|MS_ACTIVE);
+ flags &= ~(MS_NOSUID | MS_NOEXEC | MS_NODEV | MS_ACTIVE);
/* ... and get the mountpoint */
retval = path_lookup(dir_name, LOOKUP_FOLLOW, &nd);
@@ -1055,6 +1302,8 @@ long do_mount(char * dev_name, char * dir_name, char *type_page,
data_page);
else if (flags & MS_BIND)
retval = do_loopback(&nd, dev_name, flags & MS_REC);
+ else if (flags & (MS_SHARED | MS_PRIVATE | MS_SLAVE | MS_UNBINDABLE))
+ retval = do_change_type(&nd, flags);
else if (flags & MS_MOVE)
retval = do_move_mount(&nd, dev_name);
else
@@ -1091,14 +1340,16 @@ int copy_namespace(int flags, struct task_struct *tsk)
goto out;
atomic_set(&new_ns->count, 1);
- init_rwsem(&new_ns->sem);
INIT_LIST_HEAD(&new_ns->list);
+ init_waitqueue_head(&new_ns->poll);
+ new_ns->event = 0;
- down_write(&tsk->namespace->sem);
+ down_write(&namespace_sem);
/* First pass: copy the tree topology */
- new_ns->root = copy_tree(namespace->root, namespace->root->mnt_root);
+ new_ns->root = copy_tree(namespace->root, namespace->root->mnt_root,
+ CL_COPY_ALL | CL_EXPIRE);
if (!new_ns->root) {
- up_write(&tsk->namespace->sem);
+ up_write(&namespace_sem);
kfree(new_ns);
goto out;
}
@@ -1132,7 +1383,7 @@ int copy_namespace(int flags, struct task_struct *tsk)
p = next_mnt(p, namespace->root);
q = next_mnt(q, new_ns->root);
}
- up_write(&tsk->namespace->sem);
+ up_write(&namespace_sem);
tsk->namespace = new_ns;
@@ -1161,7 +1412,7 @@ asmlinkage long sys_mount(char __user * dev_name, char __user * dir_name,
unsigned long dev_page;
char *dir_page;
- retval = copy_mount_options (type, &type_page);
+ retval = copy_mount_options(type, &type_page);
if (retval < 0)
return retval;
@@ -1170,17 +1421,17 @@ asmlinkage long sys_mount(char __user * dev_name, char __user * dir_name,
if (IS_ERR(dir_page))
goto out1;
- retval = copy_mount_options (dev_name, &dev_page);
+ retval = copy_mount_options(dev_name, &dev_page);
if (retval < 0)
goto out2;
- retval = copy_mount_options (data, &data_page);
+ retval = copy_mount_options(data, &data_page);
if (retval < 0)
goto out3;
lock_kernel();
- retval = do_mount((char*)dev_page, dir_page, (char*)type_page,
- flags, (void*)data_page);
+ retval = do_mount((char *)dev_page, dir_page, (char *)type_page,
+ flags, (void *)data_page);
unlock_kernel();
free_page(data_page);
@@ -1249,9 +1500,11 @@ static void chroot_fs_refs(struct nameidata *old_nd, struct nameidata *new_nd)
if (fs) {
atomic_inc(&fs->count);
task_unlock(p);
- if (fs->root==old_nd->dentry&&fs->rootmnt==old_nd->mnt)
+ if (fs->root == old_nd->dentry
+ && fs->rootmnt == old_nd->mnt)
set_fs_root(fs, new_nd->mnt, new_nd->dentry);
- if (fs->pwd==old_nd->dentry&&fs->pwdmnt==old_nd->mnt)
+ if (fs->pwd == old_nd->dentry
+ && fs->pwdmnt == old_nd->mnt)
set_fs_pwd(fs, new_nd->mnt, new_nd->dentry);
put_fs_struct(fs);
} else
@@ -1281,8 +1534,8 @@ static void chroot_fs_refs(struct nameidata *old_nd, struct nameidata *new_nd)
* though, so you may need to say mount --bind /nfs/my_root /nfs/my_root
* first.
*/
-
-asmlinkage long sys_pivot_root(const char __user *new_root, const char __user *put_old)
+asmlinkage long sys_pivot_root(const char __user * new_root,
+ const char __user * put_old)
{
struct vfsmount *tmp;
struct nameidata new_nd, old_nd, parent_nd, root_parent, user_nd;
@@ -1293,14 +1546,15 @@ asmlinkage long sys_pivot_root(const char __user *new_root, const char __user *p
lock_kernel();
- error = __user_walk(new_root, LOOKUP_FOLLOW|LOOKUP_DIRECTORY, &new_nd);
+ error = __user_walk(new_root, LOOKUP_FOLLOW | LOOKUP_DIRECTORY,
+ &new_nd);
if (error)
goto out0;
error = -EINVAL;
if (!check_mnt(new_nd.mnt))
goto out1;
- error = __user_walk(put_old, LOOKUP_FOLLOW|LOOKUP_DIRECTORY, &old_nd);
+ error = __user_walk(put_old, LOOKUP_FOLLOW | LOOKUP_DIRECTORY, &old_nd);
if (error)
goto out1;
@@ -1314,9 +1568,13 @@ asmlinkage long sys_pivot_root(const char __user *new_root, const char __user *p
user_nd.mnt = mntget(current->fs->rootmnt);
user_nd.dentry = dget(current->fs->root);
read_unlock(&current->fs->lock);
- down_write(&current->namespace->sem);
+ down_write(&namespace_sem);
down(&old_nd.dentry->d_inode->i_sem);
error = -EINVAL;
+ if (IS_MNT_SHARED(old_nd.mnt) ||
+ IS_MNT_SHARED(new_nd.mnt->mnt_parent) ||
+ IS_MNT_SHARED(user_nd.mnt->mnt_parent))
+ goto out2;
if (!check_mnt(user_nd.mnt))
goto out2;
error = -ENOENT;
@@ -1356,6 +1614,7 @@ asmlinkage long sys_pivot_root(const char __user *new_root, const char __user *p
detach_mnt(user_nd.mnt, &root_parent);
attach_mnt(user_nd.mnt, &old_nd); /* mount old root on put_old */
attach_mnt(new_nd.mnt, &root_parent); /* mount new_root on / */
+ touch_namespace(current->namespace);
spin_unlock(&vfsmount_lock);
chroot_fs_refs(&user_nd, &new_nd);
security_sb_post_pivotroot(&user_nd, &new_nd);
@@ -1364,7 +1623,7 @@ asmlinkage long sys_pivot_root(const char __user *new_root, const char __user *p
path_release(&parent_nd);
out2:
up(&old_nd.dentry->d_inode->i_sem);
- up_write(&current->namespace->sem);
+ up_write(&namespace_sem);
path_release(&user_nd);
path_release(&old_nd);
out1:
@@ -1391,7 +1650,8 @@ static void __init init_mount_tree(void)
panic("Can't allocate initial namespace");
atomic_set(&namespace->count, 1);
INIT_LIST_HEAD(&namespace->list);
- init_rwsem(&namespace->sem);
+ init_waitqueue_head(&namespace->poll);
+ namespace->event = 0;
list_add(&mnt->mnt_list, &namespace->list);
namespace->root = mnt;
mnt->mnt_namespace = namespace;
@@ -1414,11 +1674,12 @@ void __init mnt_init(unsigned long mempages)
unsigned int nr_hash;
int i;
+ init_rwsem(&namespace_sem);
+
mnt_cache = kmem_cache_create("mnt_cache", sizeof(struct vfsmount),
- 0, SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL, NULL);
+ 0, SLAB_HWCACHE_ALIGN | SLAB_PANIC, NULL, NULL);
- mount_hashtable = (struct list_head *)
- __get_free_page(GFP_ATOMIC);
+ mount_hashtable = (struct list_head *)__get_free_page(GFP_ATOMIC);
if (!mount_hashtable)
panic("Failed to allocate mount hash table\n");
@@ -1440,7 +1701,7 @@ void __init mnt_init(unsigned long mempages)
* from the number of bits we can fit.
*/
nr_hash = 1UL << hash_bits;
- hash_mask = nr_hash-1;
+ hash_mask = nr_hash - 1;
printk("Mount-cache hash table entries: %d\n", nr_hash);
@@ -1460,12 +1721,14 @@ void __init mnt_init(unsigned long mempages)
void __put_namespace(struct namespace *namespace)
{
struct vfsmount *root = namespace->root;
+ LIST_HEAD(umount_list);
namespace->root = NULL;
spin_unlock(&vfsmount_lock);
- down_write(&namespace->sem);
+ down_write(&namespace_sem);
spin_lock(&vfsmount_lock);
- umount_tree(root);
+ umount_tree(root, 0, &umount_list);
spin_unlock(&vfsmount_lock);
- up_write(&namespace->sem);
+ up_write(&namespace_sem);
+ release_mounts(&umount_list);
kfree(namespace);
}
diff --git a/fs/ncpfs/ioctl.c b/fs/ncpfs/ioctl.c
index 88df79356a1..fd3efdca5ae 100644
--- a/fs/ncpfs/ioctl.c
+++ b/fs/ncpfs/ioctl.c
@@ -30,11 +30,13 @@
#define NCP_PACKET_SIZE_INTERNAL 65536
static int
-ncp_get_fs_info(struct ncp_server* server, struct inode* inode, struct ncp_fs_info __user *arg)
+ncp_get_fs_info(struct ncp_server * server, struct file *file,
+ struct ncp_fs_info __user *arg)
{
+ struct inode *inode = file->f_dentry->d_inode;
struct ncp_fs_info info;
- if ((permission(inode, MAY_WRITE, NULL) != 0)
+ if ((file_permission(file, MAY_WRITE) != 0)
&& (current->uid != server->m.mounted_uid)) {
return -EACCES;
}
@@ -58,11 +60,13 @@ ncp_get_fs_info(struct ncp_server* server, struct inode* inode, struct ncp_fs_in
}
static int
-ncp_get_fs_info_v2(struct ncp_server* server, struct inode* inode, struct ncp_fs_info_v2 __user * arg)
+ncp_get_fs_info_v2(struct ncp_server * server, struct file *file,
+ struct ncp_fs_info_v2 __user * arg)
{
+ struct inode *inode = file->f_dentry->d_inode;
struct ncp_fs_info_v2 info2;
- if ((permission(inode, MAY_WRITE, NULL) != 0)
+ if ((file_permission(file, MAY_WRITE) != 0)
&& (current->uid != server->m.mounted_uid)) {
return -EACCES;
}
@@ -190,7 +194,7 @@ int ncp_ioctl(struct inode *inode, struct file *filp,
switch (cmd) {
case NCP_IOC_NCPREQUEST:
- if ((permission(inode, MAY_WRITE, NULL) != 0)
+ if ((file_permission(filp, MAY_WRITE) != 0)
&& (current->uid != server->m.mounted_uid)) {
return -EACCES;
}
@@ -245,16 +249,16 @@ int ncp_ioctl(struct inode *inode, struct file *filp,
return ncp_conn_logged_in(inode->i_sb);
case NCP_IOC_GET_FS_INFO:
- return ncp_get_fs_info(server, inode, argp);
+ return ncp_get_fs_info(server, filp, argp);
case NCP_IOC_GET_FS_INFO_V2:
- return ncp_get_fs_info_v2(server, inode, argp);
+ return ncp_get_fs_info_v2(server, filp, argp);
case NCP_IOC_GETMOUNTUID2:
{
unsigned long tmp = server->m.mounted_uid;
- if ( (permission(inode, MAY_READ, NULL) != 0)
+ if ((file_permission(filp, MAY_READ) != 0)
&& (current->uid != server->m.mounted_uid))
{
return -EACCES;
@@ -268,7 +272,7 @@ int ncp_ioctl(struct inode *inode, struct file *filp,
{
struct ncp_setroot_ioctl sr;
- if ( (permission(inode, MAY_READ, NULL) != 0)
+ if ((file_permission(filp, MAY_READ) != 0)
&& (current->uid != server->m.mounted_uid))
{
return -EACCES;
@@ -343,7 +347,7 @@ int ncp_ioctl(struct inode *inode, struct file *filp,
#ifdef CONFIG_NCPFS_PACKET_SIGNING
case NCP_IOC_SIGN_INIT:
- if ((permission(inode, MAY_WRITE, NULL) != 0)
+ if ((file_permission(filp, MAY_WRITE) != 0)
&& (current->uid != server->m.mounted_uid))
{
return -EACCES;
@@ -366,7 +370,7 @@ int ncp_ioctl(struct inode *inode, struct file *filp,
return 0;
case NCP_IOC_SIGN_WANTED:
- if ( (permission(inode, MAY_READ, NULL) != 0)
+ if ((file_permission(filp, MAY_READ) != 0)
&& (current->uid != server->m.mounted_uid))
{
return -EACCES;
@@ -379,7 +383,7 @@ int ncp_ioctl(struct inode *inode, struct file *filp,
{
int newstate;
- if ( (permission(inode, MAY_WRITE, NULL) != 0)
+ if ((file_permission(filp, MAY_WRITE) != 0)
&& (current->uid != server->m.mounted_uid))
{
return -EACCES;
@@ -400,7 +404,7 @@ int ncp_ioctl(struct inode *inode, struct file *filp,
#ifdef CONFIG_NCPFS_IOCTL_LOCKING
case NCP_IOC_LOCKUNLOCK:
- if ( (permission(inode, MAY_WRITE, NULL) != 0)
+ if ((file_permission(filp, MAY_WRITE) != 0)
&& (current->uid != server->m.mounted_uid))
{
return -EACCES;
@@ -605,7 +609,7 @@ outrel:
#endif /* CONFIG_NCPFS_NLS */
case NCP_IOC_SETDENTRYTTL:
- if ((permission(inode, MAY_WRITE, NULL) != 0) &&
+ if ((file_permission(filp, MAY_WRITE) != 0) &&
(current->uid != server->m.mounted_uid))
return -EACCES;
{
@@ -635,7 +639,7 @@ outrel:
so we have this out of switch */
if (cmd == NCP_IOC_GETMOUNTUID) {
__kernel_uid_t uid = 0;
- if ((permission(inode, MAY_READ, NULL) != 0)
+ if ((file_permission(filp, MAY_READ) != 0)
&& (current->uid != server->m.mounted_uid)) {
return -EACCES;
}
diff --git a/fs/nfs/delegation.c b/fs/nfs/delegation.c
index 3976c177a7d..618a327027b 100644
--- a/fs/nfs/delegation.c
+++ b/fs/nfs/delegation.c
@@ -149,8 +149,7 @@ int nfs_inode_set_delegation(struct inode *inode, struct rpc_cred *cred, struct
}
}
spin_unlock(&clp->cl_lock);
- if (delegation != NULL)
- kfree(delegation);
+ kfree(delegation);
return status;
}
diff --git a/fs/nfs/inode.c b/fs/nfs/inode.c
index 24d2fbf549b..6391d896421 100644
--- a/fs/nfs/inode.c
+++ b/fs/nfs/inode.c
@@ -1688,8 +1688,7 @@ static void nfs_kill_super(struct super_block *s)
rpciod_down(); /* release rpciod */
- if (server->hostname != NULL)
- kfree(server->hostname);
+ kfree(server->hostname);
kfree(server);
}
@@ -1908,8 +1907,7 @@ nfs_copy_user_string(char *dst, struct nfs_string *src, int maxlen)
return ERR_PTR(-ENOMEM);
}
if (copy_from_user(dst, src->data, maxlen)) {
- if (p != NULL)
- kfree(p);
+ kfree(p);
return ERR_PTR(-EFAULT);
}
dst[maxlen] = '\0';
@@ -2000,10 +1998,8 @@ static struct super_block *nfs4_get_sb(struct file_system_type *fs_type,
out_err:
s = (struct super_block *)p;
out_free:
- if (server->mnt_path)
- kfree(server->mnt_path);
- if (server->hostname)
- kfree(server->hostname);
+ kfree(server->mnt_path);
+ kfree(server->hostname);
kfree(server);
return s;
}
@@ -2023,8 +2019,7 @@ static void nfs4_kill_super(struct super_block *sb)
destroy_nfsv4_state(server);
- if (server->hostname != NULL)
- kfree(server->hostname);
+ kfree(server->hostname);
kfree(server);
}
diff --git a/fs/nfs/nfs4state.c b/fs/nfs/nfs4state.c
index 52a26baa114..0675f3215e0 100644
--- a/fs/nfs/nfs4state.c
+++ b/fs/nfs/nfs4state.c
@@ -69,10 +69,8 @@ init_nfsv4_state(struct nfs_server *server)
void
destroy_nfsv4_state(struct nfs_server *server)
{
- if (server->mnt_path) {
- kfree(server->mnt_path);
- server->mnt_path = NULL;
- }
+ kfree(server->mnt_path);
+ server->mnt_path = NULL;
if (server->nfs4_state) {
nfs4_put_client(server->nfs4_state);
server->nfs4_state = NULL;
@@ -311,8 +309,7 @@ struct nfs4_state_owner *nfs4_get_state_owner(struct nfs_server *server, struct
new = NULL;
}
spin_unlock(&clp->cl_lock);
- if (new)
- kfree(new);
+ kfree(new);
if (sp != NULL)
return sp;
put_rpccred(cred);
diff --git a/fs/nfs/unlink.c b/fs/nfs/unlink.c
index f732541a333..d639d172d56 100644
--- a/fs/nfs/unlink.c
+++ b/fs/nfs/unlink.c
@@ -52,8 +52,7 @@ nfs_put_unlinkdata(struct nfs_unlinkdata *data)
{
if (--data->count == 0) {
nfs_detach_unlinkdata(data);
- if (data->name.name != NULL)
- kfree(data->name.name);
+ kfree(data->name.name);
kfree(data);
}
}
diff --git a/fs/nfsd/export.c b/fs/nfsd/export.c
index 057aff74550..417ec02df44 100644
--- a/fs/nfsd/export.c
+++ b/fs/nfsd/export.c
@@ -190,8 +190,7 @@ static int expkey_parse(struct cache_detail *cd, char *mesg, int mlen)
out:
if (dom)
auth_domain_put(dom);
- if (buf)
- kfree(buf);
+ kfree(buf);
return err;
}
@@ -428,8 +427,7 @@ static int svc_export_parse(struct cache_detail *cd, char *mesg, int mlen)
path_release(&nd);
if (dom)
auth_domain_put(dom);
- if (buf)
- kfree(buf);
+ kfree(buf);
return err;
}
diff --git a/fs/nfsd/nfs3xdr.c b/fs/nfsd/nfs3xdr.c
index e0e134d6bab..9147b8524d0 100644
--- a/fs/nfsd/nfs3xdr.c
+++ b/fs/nfsd/nfs3xdr.c
@@ -366,7 +366,8 @@ nfs3svc_decode_writeargs(struct svc_rqst *rqstp, u32 *p,
len = args->len = ntohl(*p++);
hdr = (void*)p - rqstp->rq_arg.head[0].iov_base;
- if (rqstp->rq_arg.len < len + hdr)
+ if (rqstp->rq_arg.len < hdr ||
+ rqstp->rq_arg.len - hdr < len)
return 0;
args->vec[0].iov_base = (void*)p;
diff --git a/fs/nfsd/nfs4xdr.c b/fs/nfsd/nfs4xdr.c
index 4c414635023..dcd67318694 100644
--- a/fs/nfsd/nfs4xdr.c
+++ b/fs/nfsd/nfs4xdr.c
@@ -151,8 +151,7 @@ static u32 *read_buf(struct nfsd4_compoundargs *argp, int nbytes)
if (nbytes <= sizeof(argp->tmp))
p = argp->tmp;
else {
- if (argp->tmpp)
- kfree(argp->tmpp);
+ kfree(argp->tmpp);
p = argp->tmpp = kmalloc(nbytes, GFP_KERNEL);
if (!p)
return NULL;
@@ -2476,10 +2475,8 @@ void nfsd4_release_compoundargs(struct nfsd4_compoundargs *args)
kfree(args->ops);
args->ops = args->iops;
}
- if (args->tmpp) {
- kfree(args->tmpp);
- args->tmpp = NULL;
- }
+ kfree(args->tmpp);
+ args->tmpp = NULL;
while (args->to_free) {
struct tmpbuf *tb = args->to_free;
args->to_free = tb->next;
diff --git a/fs/nfsd/nfscache.c b/fs/nfsd/nfscache.c
index 119e4d4495b..d852ebb538e 100644
--- a/fs/nfsd/nfscache.c
+++ b/fs/nfsd/nfscache.c
@@ -93,8 +93,7 @@ nfsd_cache_shutdown(void)
cache_disabled = 1;
- if (hash_list)
- kfree (hash_list);
+ kfree (hash_list);
hash_list = NULL;
}
diff --git a/fs/nfsd/nfsctl.c b/fs/nfsd/nfsctl.c
index 841c562991e..a0871b3efeb 100644
--- a/fs/nfsd/nfsctl.c
+++ b/fs/nfsd/nfsctl.c
@@ -23,6 +23,7 @@
#include <linux/seq_file.h>
#include <linux/pagemap.h>
#include <linux/init.h>
+#include <linux/string.h>
#include <linux/nfs.h>
#include <linux/nfsd_idmap.h>
@@ -35,6 +36,8 @@
#include <asm/uaccess.h>
+unsigned int nfsd_versbits = ~0;
+
/*
* We have a single directory with 9 nodes in it.
*/
@@ -50,8 +53,15 @@ enum {
NFSD_List,
NFSD_Fh,
NFSD_Threads,
+ NFSD_Versions,
+ /*
+ * The below MUST come last. Otherwise we leave a hole in nfsd_files[]
+ * with !CONFIG_NFSD_V4 and simple_fill_super() goes oops
+ */
+#ifdef CONFIG_NFSD_V4
NFSD_Leasetime,
NFSD_RecoveryDir,
+#endif
};
/*
@@ -66,8 +76,11 @@ static ssize_t write_getfd(struct file *file, char *buf, size_t size);
static ssize_t write_getfs(struct file *file, char *buf, size_t size);
static ssize_t write_filehandle(struct file *file, char *buf, size_t size);
static ssize_t write_threads(struct file *file, char *buf, size_t size);
+static ssize_t write_versions(struct file *file, char *buf, size_t size);
+#ifdef CONFIG_NFSD_V4
static ssize_t write_leasetime(struct file *file, char *buf, size_t size);
static ssize_t write_recoverydir(struct file *file, char *buf, size_t size);
+#endif
static ssize_t (*write_op[])(struct file *, char *, size_t) = {
[NFSD_Svc] = write_svc,
@@ -79,8 +92,11 @@ static ssize_t (*write_op[])(struct file *, char *, size_t) = {
[NFSD_Getfs] = write_getfs,
[NFSD_Fh] = write_filehandle,
[NFSD_Threads] = write_threads,
+ [NFSD_Versions] = write_versions,
+#ifdef CONFIG_NFSD_V4
[NFSD_Leasetime] = write_leasetime,
[NFSD_RecoveryDir] = write_recoverydir,
+#endif
};
static ssize_t nfsctl_transaction_write(struct file *file, const char __user *buf, size_t size, loff_t *pos)
@@ -104,9 +120,23 @@ static ssize_t nfsctl_transaction_write(struct file *file, const char __user *bu
return rv;
}
+static ssize_t nfsctl_transaction_read(struct file *file, char __user *buf, size_t size, loff_t *pos)
+{
+ if (! file->private_data) {
+ /* An attempt to read a transaction file without writing
+ * causes a 0-byte write so that the file can return
+ * state information
+ */
+ ssize_t rv = nfsctl_transaction_write(file, buf, 0, pos);
+ if (rv < 0)
+ return rv;
+ }
+ return simple_transaction_read(file, buf, size, pos);
+}
+
static struct file_operations transaction_ops = {
.write = nfsctl_transaction_write,
- .read = simple_transaction_read,
+ .read = nfsctl_transaction_read,
.release = simple_transaction_release,
};
@@ -329,6 +359,70 @@ static ssize_t write_threads(struct file *file, char *buf, size_t size)
return strlen(buf);
}
+static ssize_t write_versions(struct file *file, char *buf, size_t size)
+{
+ /*
+ * Format:
+ * [-/+]vers [-/+]vers ...
+ */
+ char *mesg = buf;
+ char *vers, sign;
+ int len, num;
+ ssize_t tlen = 0;
+ char *sep;
+
+ if (size>0) {
+ if (nfsd_serv)
+ return -EBUSY;
+ if (buf[size-1] != '\n')
+ return -EINVAL;
+ buf[size-1] = 0;
+
+ vers = mesg;
+ len = qword_get(&mesg, vers, size);
+ if (len <= 0) return -EINVAL;
+ do {
+ sign = *vers;
+ if (sign == '+' || sign == '-')
+ num = simple_strtol((vers+1), NULL, 0);
+ else
+ num = simple_strtol(vers, NULL, 0);
+ switch(num) {
+ case 2:
+ case 3:
+ case 4:
+ if (sign != '-')
+ NFSCTL_VERSET(nfsd_versbits, num);
+ else
+ NFSCTL_VERUNSET(nfsd_versbits, num);
+ break;
+ default:
+ return -EINVAL;
+ }
+ vers += len + 1;
+ tlen += len;
+ } while ((len = qword_get(&mesg, vers, size)) > 0);
+ /* If all get turned off, turn them back on, as
+ * having no versions is BAD
+ */
+ if ((nfsd_versbits & NFSCTL_VERALL)==0)
+ nfsd_versbits = NFSCTL_VERALL;
+ }
+ /* Now write current state into reply buffer */
+ len = 0;
+ sep = "";
+ for (num=2 ; num <= 4 ; num++)
+ if (NFSCTL_VERISSET(NFSCTL_VERALL, num)) {
+ len += sprintf(buf+len, "%s%c%d", sep,
+ NFSCTL_VERISSET(nfsd_versbits, num)?'+':'-',
+ num);
+ sep = " ";
+ }
+ len += sprintf(buf+len, "\n");
+ return len;
+}
+
+#ifdef CONFIG_NFSD_V4
extern time_t nfs4_leasetime(void);
static ssize_t write_leasetime(struct file *file, char *buf, size_t size)
@@ -370,6 +464,7 @@ static ssize_t write_recoverydir(struct file *file, char *buf, size_t size)
status = nfs4_reset_recoverydir(recdir);
return strlen(buf);
}
+#endif
/*----------------------------------------------------------------------------*/
/*
@@ -389,6 +484,7 @@ static int nfsd_fill_super(struct super_block * sb, void * data, int silent)
[NFSD_List] = {"exports", &exports_operations, S_IRUGO},
[NFSD_Fh] = {"filehandle", &transaction_ops, S_IWUSR|S_IRUSR},
[NFSD_Threads] = {"threads", &transaction_ops, S_IWUSR|S_IRUSR},
+ [NFSD_Versions] = {"versions", &transaction_ops, S_IWUSR|S_IRUSR},
#ifdef CONFIG_NFSD_V4
[NFSD_Leasetime] = {"nfsv4leasetime", &transaction_ops, S_IWUSR|S_IRUSR},
[NFSD_RecoveryDir] = {"nfsv4recoverydir", &transaction_ops, S_IWUSR|S_IRUSR},
diff --git a/fs/nfsd/nfssvc.c b/fs/nfsd/nfssvc.c
index 1697539a717..89ed0469686 100644
--- a/fs/nfsd/nfssvc.c
+++ b/fs/nfsd/nfssvc.c
@@ -30,6 +30,7 @@
#include <linux/nfsd/nfsd.h>
#include <linux/nfsd/stats.h>
#include <linux/nfsd/cache.h>
+#include <linux/nfsd/syscall.h>
#include <linux/lockd/bind.h>
#include <linux/nfsacl.h>
@@ -52,7 +53,7 @@
extern struct svc_program nfsd_program;
static void nfsd(struct svc_rqst *rqstp);
struct timeval nfssvc_boot;
-static struct svc_serv *nfsd_serv;
+ struct svc_serv *nfsd_serv;
static atomic_t nfsd_busy;
static unsigned long nfsd_last_call;
static DEFINE_SPINLOCK(nfsd_call_lock);
@@ -63,6 +64,31 @@ struct nfsd_list {
};
static struct list_head nfsd_list = LIST_HEAD_INIT(nfsd_list);
+static struct svc_version * nfsd_version[] = {
+ [2] = &nfsd_version2,
+#if defined(CONFIG_NFSD_V3)
+ [3] = &nfsd_version3,
+#endif
+#if defined(CONFIG_NFSD_V4)
+ [4] = &nfsd_version4,
+#endif
+};
+
+#define NFSD_MINVERS 2
+#define NFSD_NRVERS (sizeof(nfsd_version)/sizeof(nfsd_version[0]))
+static struct svc_version *nfsd_versions[NFSD_NRVERS];
+
+struct svc_program nfsd_program = {
+ .pg_prog = NFS_PROGRAM, /* program number */
+ .pg_nvers = NFSD_NRVERS, /* nr of entries in nfsd_version */
+ .pg_vers = nfsd_versions, /* version table */
+ .pg_name = "nfsd", /* program name */
+ .pg_class = "nfsd", /* authentication class */
+ .pg_stats = &nfsd_svcstats, /* version table */
+ .pg_authenticate = &svc_set_client, /* export authentication */
+
+};
+
/*
* Maximum number of nfsd processes
*/
@@ -80,11 +106,12 @@ int
nfsd_svc(unsigned short port, int nrservs)
{
int error;
- int none_left;
+ int none_left, found_one, i;
struct list_head *victim;
lock_kernel();
- dprintk("nfsd: creating service\n");
+ dprintk("nfsd: creating service: vers 0x%x\n",
+ nfsd_versbits);
error = -EINVAL;
if (nrservs <= 0)
nrservs = 0;
@@ -99,6 +126,27 @@ nfsd_svc(unsigned short port, int nrservs)
if (error<0)
goto out;
if (!nfsd_serv) {
+ /*
+ * Use the nfsd_ctlbits to define which
+ * versions that will be advertised.
+ * If nfsd_ctlbits doesn't list any version,
+ * export them all.
+ */
+ found_one = 0;
+
+ for (i = NFSD_MINVERS; i < NFSD_NRVERS; i++) {
+ if (NFSCTL_VERISSET(nfsd_versbits, i)) {
+ nfsd_program.pg_vers[i] = nfsd_version[i];
+ found_one = 1;
+ } else
+ nfsd_program.pg_vers[i] = NULL;
+ }
+
+ if (!found_one) {
+ for (i = NFSD_MINVERS; i < NFSD_NRVERS; i++)
+ nfsd_program.pg_vers[i] = nfsd_version[i];
+ }
+
atomic_set(&nfsd_busy, 0);
error = -ENOMEM;
nfsd_serv = svc_create(&nfsd_program, NFSD_BUFSIZE);
@@ -379,6 +427,7 @@ static struct svc_program nfsd_acl_program = {
.pg_name = "nfsd",
.pg_class = "nfsd",
.pg_stats = &nfsd_acl_svcstats,
+ .pg_authenticate = &svc_set_client,
};
static struct svc_stat nfsd_acl_svcstats = {
@@ -389,28 +438,3 @@ static struct svc_stat nfsd_acl_svcstats = {
#else
#define nfsd_acl_program_p NULL
#endif /* defined(CONFIG_NFSD_V2_ACL) || defined(CONFIG_NFSD_V3_ACL) */
-
-extern struct svc_version nfsd_version2, nfsd_version3, nfsd_version4;
-
-static struct svc_version * nfsd_version[] = {
- [2] = &nfsd_version2,
-#if defined(CONFIG_NFSD_V3)
- [3] = &nfsd_version3,
-#endif
-#if defined(CONFIG_NFSD_V4)
- [4] = &nfsd_version4,
-#endif
-};
-
-#define NFSD_NRVERS (sizeof(nfsd_version)/sizeof(nfsd_version[0]))
-struct svc_program nfsd_program = {
- .pg_next = nfsd_acl_program_p,
- .pg_prog = NFS_PROGRAM, /* program number */
- .pg_nvers = NFSD_NRVERS, /* nr of entries in nfsd_version */
- .pg_vers = nfsd_version, /* version table */
- .pg_name = "nfsd", /* program name */
- .pg_class = "nfsd", /* authentication class */
- .pg_stats = &nfsd_svcstats, /* version table */
- .pg_authenticate = &svc_set_client, /* export authentication */
-
-};
diff --git a/fs/nfsd/vfs.c b/fs/nfsd/vfs.c
index 4f2cd3d2756..af7c3c3074b 100644
--- a/fs/nfsd/vfs.c
+++ b/fs/nfsd/vfs.c
@@ -254,12 +254,19 @@ nfsd_setattr(struct svc_rqst *rqstp, struct svc_fh *fhp, struct iattr *iap,
/* Get inode */
err = fh_verify(rqstp, fhp, ftype, accmode);
- if (err || !iap->ia_valid)
+ if (err)
goto out;
dentry = fhp->fh_dentry;
inode = dentry->d_inode;
+ /* Ignore any mode updates on symlinks */
+ if (S_ISLNK(inode->i_mode))
+ iap->ia_valid &= ~ATTR_MODE;
+
+ if (!iap->ia_valid)
+ goto out;
+
/* NFSv2 does not differentiate between "set-[ac]time-to-now"
* which only requires access, and "set-[ac]time-to-X" which
* requires ownership.
diff --git a/fs/open.c b/fs/open.c
index 8d06ec911fd..f53a5b9ffb7 100644
--- a/fs/open.c
+++ b/fs/open.c
@@ -194,7 +194,7 @@ out:
return error;
}
-int do_truncate(struct dentry *dentry, loff_t length)
+int do_truncate(struct dentry *dentry, loff_t length, struct file *filp)
{
int err;
struct iattr newattrs;
@@ -205,6 +205,10 @@ int do_truncate(struct dentry *dentry, loff_t length)
newattrs.ia_size = length;
newattrs.ia_valid = ATTR_SIZE | ATTR_CTIME;
+ if (filp) {
+ newattrs.ia_file = filp;
+ newattrs.ia_valid |= ATTR_FILE;
+ }
down(&dentry->d_inode->i_sem);
err = notify_change(dentry, &newattrs);
@@ -236,7 +240,7 @@ static inline long do_sys_truncate(const char __user * path, loff_t length)
if (!S_ISREG(inode->i_mode))
goto dput_and_out;
- error = permission(inode,MAY_WRITE,&nd);
+ error = vfs_permission(&nd, MAY_WRITE);
if (error)
goto dput_and_out;
@@ -262,7 +266,7 @@ static inline long do_sys_truncate(const char __user * path, loff_t length)
error = locks_verify_truncate(inode, NULL, length);
if (!error) {
DQUOT_INIT(inode);
- error = do_truncate(nd.dentry, length);
+ error = do_truncate(nd.dentry, length, NULL);
}
put_write_access(inode);
@@ -314,7 +318,7 @@ static inline long do_sys_ftruncate(unsigned int fd, loff_t length, int small)
error = locks_verify_truncate(inode, file, length);
if (!error)
- error = do_truncate(dentry, length);
+ error = do_truncate(dentry, length, file);
out_putf:
fput(file);
out:
@@ -390,7 +394,7 @@ asmlinkage long sys_utime(char __user * filename, struct utimbuf __user * times)
goto dput_and_out;
if (current->fsuid != inode->i_uid &&
- (error = permission(inode,MAY_WRITE,&nd)) != 0)
+ (error = vfs_permission(&nd, MAY_WRITE)) != 0)
goto dput_and_out;
}
down(&inode->i_sem);
@@ -443,7 +447,7 @@ long do_utimes(char __user * filename, struct timeval * times)
goto dput_and_out;
if (current->fsuid != inode->i_uid &&
- (error = permission(inode,MAY_WRITE,&nd)) != 0)
+ (error = vfs_permission(&nd, MAY_WRITE)) != 0)
goto dput_and_out;
}
down(&inode->i_sem);
@@ -502,7 +506,7 @@ asmlinkage long sys_access(const char __user * filename, int mode)
res = __user_walk(filename, LOOKUP_FOLLOW|LOOKUP_ACCESS, &nd);
if (!res) {
- res = permission(nd.dentry->d_inode, mode, &nd);
+ res = vfs_permission(&nd, mode);
/* SuS v2 requires we report a read only fs too */
if(!res && (mode & S_IWOTH) && IS_RDONLY(nd.dentry->d_inode)
&& !special_file(nd.dentry->d_inode->i_mode))
@@ -526,7 +530,7 @@ asmlinkage long sys_chdir(const char __user * filename)
if (error)
goto out;
- error = permission(nd.dentry->d_inode,MAY_EXEC,&nd);
+ error = vfs_permission(&nd, MAY_EXEC);
if (error)
goto dput_and_out;
@@ -559,7 +563,7 @@ asmlinkage long sys_fchdir(unsigned int fd)
if (!S_ISDIR(inode->i_mode))
goto out_putf;
- error = permission(inode, MAY_EXEC, NULL);
+ error = file_permission(file, MAY_EXEC);
if (!error)
set_fs_pwd(current->fs, mnt, dentry);
out_putf:
@@ -577,7 +581,7 @@ asmlinkage long sys_chroot(const char __user * filename)
if (error)
goto out;
- error = permission(nd.dentry->d_inode,MAY_EXEC,&nd);
+ error = vfs_permission(&nd, MAY_EXEC);
if (error)
goto dput_and_out;
@@ -887,6 +891,10 @@ struct file *nameidata_to_filp(struct nameidata *nd, int flags)
return filp;
}
+/*
+ * dentry_open() will have done dput(dentry) and mntput(mnt) if it returns an
+ * error.
+ */
struct file *dentry_open(struct dentry *dentry, struct vfsmount *mnt, int flags)
{
int error;
@@ -894,8 +902,11 @@ struct file *dentry_open(struct dentry *dentry, struct vfsmount *mnt, int flags)
error = -ENFILE;
f = get_empty_filp();
- if (f == NULL)
+ if (f == NULL) {
+ dput(dentry);
+ mntput(mnt);
return ERR_PTR(error);
+ }
return __dentry_open(dentry, mnt, flags, f, NULL);
}
diff --git a/fs/openpromfs/inode.c b/fs/openpromfs/inode.c
index 1be11ce96b0..aeb0106890e 100644
--- a/fs/openpromfs/inode.c
+++ b/fs/openpromfs/inode.c
@@ -1088,8 +1088,7 @@ static void __exit exit_openprom_fs(void)
unregister_filesystem(&openprom_fs_type);
free_pages ((unsigned long)nodes, alloced);
for (i = 0; i < aliases_nodes; i++)
- if (alias_names [i])
- kfree (alias_names [i]);
+ kfree (alias_names [i]);
nodes = NULL;
}
diff --git a/fs/partitions/ibm.c b/fs/partitions/ibm.c
index d59dcbf2bd4..6327bcb2d73 100644
--- a/fs/partitions/ibm.c
+++ b/fs/partitions/ibm.c
@@ -29,7 +29,7 @@
* cyl-cyl-head-head structure
*/
static inline int
-cchh2blk (cchh_t *ptr, struct hd_geometry *geo) {
+cchh2blk (struct vtoc_cchh *ptr, struct hd_geometry *geo) {
return ptr->cc * geo->heads * geo->sectors +
ptr->hh * geo->sectors;
}
@@ -40,7 +40,7 @@ cchh2blk (cchh_t *ptr, struct hd_geometry *geo) {
* cyl-cyl-head-head-block structure
*/
static inline int
-cchhb2blk (cchhb_t *ptr, struct hd_geometry *geo) {
+cchhb2blk (struct vtoc_cchhb *ptr, struct hd_geometry *geo) {
return ptr->cc * geo->heads * geo->sectors +
ptr->hh * geo->sectors +
ptr->b;
@@ -56,7 +56,7 @@ ibm_partition(struct parsed_partitions *state, struct block_device *bdev)
struct hd_geometry *geo;
char type[5] = {0,};
char name[7] = {0,};
- volume_label_t *vlabel;
+ struct vtoc_volume_label *vlabel;
unsigned char *data;
Sector sect;
@@ -64,7 +64,8 @@ ibm_partition(struct parsed_partitions *state, struct block_device *bdev)
goto out_noinfo;
if ((geo = kmalloc(sizeof(struct hd_geometry), GFP_KERNEL)) == NULL)
goto out_nogeo;
- if ((vlabel = kmalloc(sizeof(volume_label_t), GFP_KERNEL)) == NULL)
+ if ((vlabel = kmalloc(sizeof(struct vtoc_volume_label),
+ GFP_KERNEL)) == NULL)
goto out_novlab;
if (ioctl_by_bdev(bdev, BIODASDINFO, (unsigned long)info) != 0 ||
@@ -86,7 +87,7 @@ ibm_partition(struct parsed_partitions *state, struct block_device *bdev)
strncpy(name, data + 8, 6);
else
strncpy(name, data + 4, 6);
- memcpy (vlabel, data, sizeof(volume_label_t));
+ memcpy (vlabel, data, sizeof(struct vtoc_volume_label));
put_dev_sector(sect);
EBCASC(type, 4);
@@ -129,9 +130,9 @@ ibm_partition(struct parsed_partitions *state, struct block_device *bdev)
counter = 0;
while ((data = read_dev_sector(bdev, blk*(blocksize/512),
&sect)) != NULL) {
- format1_label_t f1;
+ struct vtoc_format1_label f1;
- memcpy(&f1, data, sizeof(format1_label_t));
+ memcpy(&f1, data, sizeof(struct vtoc_format1_label));
put_dev_sector(sect);
/* skip FMT4 / FMT5 / FMT7 labels */
diff --git a/fs/partitions/ultrix.c b/fs/partitions/ultrix.c
index 8a8d4d9db31..ec852c11dce 100644
--- a/fs/partitions/ultrix.c
+++ b/fs/partitions/ultrix.c
@@ -7,6 +7,7 @@
*/
#include "check.h"
+#include "ultrix.h"
int ultrix_partition(struct parsed_partitions *state, struct block_device *bdev)
{
diff --git a/fs/pnode.c b/fs/pnode.c
new file mode 100644
index 00000000000..aeeec8ba8dd
--- /dev/null
+++ b/fs/pnode.c
@@ -0,0 +1,305 @@
+/*
+ * linux/fs/pnode.c
+ *
+ * (C) Copyright IBM Corporation 2005.
+ * Released under GPL v2.
+ * Author : Ram Pai (linuxram@us.ibm.com)
+ *
+ */
+#include <linux/namespace.h>
+#include <linux/mount.h>
+#include <linux/fs.h>
+#include "pnode.h"
+
+/* return the next shared peer mount of @p */
+static inline struct vfsmount *next_peer(struct vfsmount *p)
+{
+ return list_entry(p->mnt_share.next, struct vfsmount, mnt_share);
+}
+
+static inline struct vfsmount *first_slave(struct vfsmount *p)
+{
+ return list_entry(p->mnt_slave_list.next, struct vfsmount, mnt_slave);
+}
+
+static inline struct vfsmount *next_slave(struct vfsmount *p)
+{
+ return list_entry(p->mnt_slave.next, struct vfsmount, mnt_slave);
+}
+
+static int do_make_slave(struct vfsmount *mnt)
+{
+ struct vfsmount *peer_mnt = mnt, *master = mnt->mnt_master;
+ struct vfsmount *slave_mnt;
+
+ /*
+ * slave 'mnt' to a peer mount that has the
+ * same root dentry. If none is available than
+ * slave it to anything that is available.
+ */
+ while ((peer_mnt = next_peer(peer_mnt)) != mnt &&
+ peer_mnt->mnt_root != mnt->mnt_root) ;
+
+ if (peer_mnt == mnt) {
+ peer_mnt = next_peer(mnt);
+ if (peer_mnt == mnt)
+ peer_mnt = NULL;
+ }
+ list_del_init(&mnt->mnt_share);
+
+ if (peer_mnt)
+ master = peer_mnt;
+
+ if (master) {
+ list_for_each_entry(slave_mnt, &mnt->mnt_slave_list, mnt_slave)
+ slave_mnt->mnt_master = master;
+ list_del(&mnt->mnt_slave);
+ list_add(&mnt->mnt_slave, &master->mnt_slave_list);
+ list_splice(&mnt->mnt_slave_list, master->mnt_slave_list.prev);
+ INIT_LIST_HEAD(&mnt->mnt_slave_list);
+ } else {
+ struct list_head *p = &mnt->mnt_slave_list;
+ while (!list_empty(p)) {
+ slave_mnt = list_entry(p->next,
+ struct vfsmount, mnt_slave);
+ list_del_init(&slave_mnt->mnt_slave);
+ slave_mnt->mnt_master = NULL;
+ }
+ }
+ mnt->mnt_master = master;
+ CLEAR_MNT_SHARED(mnt);
+ INIT_LIST_HEAD(&mnt->mnt_slave_list);
+ return 0;
+}
+
+void change_mnt_propagation(struct vfsmount *mnt, int type)
+{
+ if (type == MS_SHARED) {
+ set_mnt_shared(mnt);
+ return;
+ }
+ do_make_slave(mnt);
+ if (type != MS_SLAVE) {
+ list_del_init(&mnt->mnt_slave);
+ mnt->mnt_master = NULL;
+ if (type == MS_UNBINDABLE)
+ mnt->mnt_flags |= MNT_UNBINDABLE;
+ }
+}
+
+/*
+ * get the next mount in the propagation tree.
+ * @m: the mount seen last
+ * @origin: the original mount from where the tree walk initiated
+ */
+static struct vfsmount *propagation_next(struct vfsmount *m,
+ struct vfsmount *origin)
+{
+ /* are there any slaves of this mount? */
+ if (!IS_MNT_NEW(m) && !list_empty(&m->mnt_slave_list))
+ return first_slave(m);
+
+ while (1) {
+ struct vfsmount *next;
+ struct vfsmount *master = m->mnt_master;
+
+ if ( master == origin->mnt_master ) {
+ next = next_peer(m);
+ return ((next == origin) ? NULL : next);
+ } else if (m->mnt_slave.next != &master->mnt_slave_list)
+ return next_slave(m);
+
+ /* back at master */
+ m = master;
+ }
+}
+
+/*
+ * return the source mount to be used for cloning
+ *
+ * @dest the current destination mount
+ * @last_dest the last seen destination mount
+ * @last_src the last seen source mount
+ * @type return CL_SLAVE if the new mount has to be
+ * cloned as a slave.
+ */
+static struct vfsmount *get_source(struct vfsmount *dest,
+ struct vfsmount *last_dest,
+ struct vfsmount *last_src,
+ int *type)
+{
+ struct vfsmount *p_last_src = NULL;
+ struct vfsmount *p_last_dest = NULL;
+ *type = CL_PROPAGATION;;
+
+ if (IS_MNT_SHARED(dest))
+ *type |= CL_MAKE_SHARED;
+
+ while (last_dest != dest->mnt_master) {
+ p_last_dest = last_dest;
+ p_last_src = last_src;
+ last_dest = last_dest->mnt_master;
+ last_src = last_src->mnt_master;
+ }
+
+ if (p_last_dest) {
+ do {
+ p_last_dest = next_peer(p_last_dest);
+ } while (IS_MNT_NEW(p_last_dest));
+ }
+
+ if (dest != p_last_dest) {
+ *type |= CL_SLAVE;
+ return last_src;
+ } else
+ return p_last_src;
+}
+
+/*
+ * mount 'source_mnt' under the destination 'dest_mnt' at
+ * dentry 'dest_dentry'. And propagate that mount to
+ * all the peer and slave mounts of 'dest_mnt'.
+ * Link all the new mounts into a propagation tree headed at
+ * source_mnt. Also link all the new mounts using ->mnt_list
+ * headed at source_mnt's ->mnt_list
+ *
+ * @dest_mnt: destination mount.
+ * @dest_dentry: destination dentry.
+ * @source_mnt: source mount.
+ * @tree_list : list of heads of trees to be attached.
+ */
+int propagate_mnt(struct vfsmount *dest_mnt, struct dentry *dest_dentry,
+ struct vfsmount *source_mnt, struct list_head *tree_list)
+{
+ struct vfsmount *m, *child;
+ int ret = 0;
+ struct vfsmount *prev_dest_mnt = dest_mnt;
+ struct vfsmount *prev_src_mnt = source_mnt;
+ LIST_HEAD(tmp_list);
+ LIST_HEAD(umount_list);
+
+ for (m = propagation_next(dest_mnt, dest_mnt); m;
+ m = propagation_next(m, dest_mnt)) {
+ int type;
+ struct vfsmount *source;
+
+ if (IS_MNT_NEW(m))
+ continue;
+
+ source = get_source(m, prev_dest_mnt, prev_src_mnt, &type);
+
+ if (!(child = copy_tree(source, source->mnt_root, type))) {
+ ret = -ENOMEM;
+ list_splice(tree_list, tmp_list.prev);
+ goto out;
+ }
+
+ if (is_subdir(dest_dentry, m->mnt_root)) {
+ mnt_set_mountpoint(m, dest_dentry, child);
+ list_add_tail(&child->mnt_hash, tree_list);
+ } else {
+ /*
+ * This can happen if the parent mount was bind mounted
+ * on some subdirectory of a shared/slave mount.
+ */
+ list_add_tail(&child->mnt_hash, &tmp_list);
+ }
+ prev_dest_mnt = m;
+ prev_src_mnt = child;
+ }
+out:
+ spin_lock(&vfsmount_lock);
+ while (!list_empty(&tmp_list)) {
+ child = list_entry(tmp_list.next, struct vfsmount, mnt_hash);
+ list_del_init(&child->mnt_hash);
+ umount_tree(child, 0, &umount_list);
+ }
+ spin_unlock(&vfsmount_lock);
+ release_mounts(&umount_list);
+ return ret;
+}
+
+/*
+ * return true if the refcount is greater than count
+ */
+static inline int do_refcount_check(struct vfsmount *mnt, int count)
+{
+ int mycount = atomic_read(&mnt->mnt_count);
+ return (mycount > count);
+}
+
+/*
+ * check if the mount 'mnt' can be unmounted successfully.
+ * @mnt: the mount to be checked for unmount
+ * NOTE: unmounting 'mnt' would naturally propagate to all
+ * other mounts its parent propagates to.
+ * Check if any of these mounts that **do not have submounts**
+ * have more references than 'refcnt'. If so return busy.
+ */
+int propagate_mount_busy(struct vfsmount *mnt, int refcnt)
+{
+ struct vfsmount *m, *child;
+ struct vfsmount *parent = mnt->mnt_parent;
+ int ret = 0;
+
+ if (mnt == parent)
+ return do_refcount_check(mnt, refcnt);
+
+ /*
+ * quickly check if the current mount can be unmounted.
+ * If not, we don't have to go checking for all other
+ * mounts
+ */
+ if (!list_empty(&mnt->mnt_mounts) || do_refcount_check(mnt, refcnt))
+ return 1;
+
+ for (m = propagation_next(parent, parent); m;
+ m = propagation_next(m, parent)) {
+ child = __lookup_mnt(m, mnt->mnt_mountpoint, 0);
+ if (child && list_empty(&child->mnt_mounts) &&
+ (ret = do_refcount_check(child, 1)))
+ break;
+ }
+ return ret;
+}
+
+/*
+ * NOTE: unmounting 'mnt' naturally propagates to all other mounts its
+ * parent propagates to.
+ */
+static void __propagate_umount(struct vfsmount *mnt)
+{
+ struct vfsmount *parent = mnt->mnt_parent;
+ struct vfsmount *m;
+
+ BUG_ON(parent == mnt);
+
+ for (m = propagation_next(parent, parent); m;
+ m = propagation_next(m, parent)) {
+
+ struct vfsmount *child = __lookup_mnt(m,
+ mnt->mnt_mountpoint, 0);
+ /*
+ * umount the child only if the child has no
+ * other children
+ */
+ if (child && list_empty(&child->mnt_mounts)) {
+ list_del(&child->mnt_hash);
+ list_add_tail(&child->mnt_hash, &mnt->mnt_hash);
+ }
+ }
+}
+
+/*
+ * collect all mounts that receive propagation from the mount in @list,
+ * and return these additional mounts in the same list.
+ * @list: the list of mounts to be unmounted.
+ */
+int propagate_umount(struct list_head *list)
+{
+ struct vfsmount *mnt;
+
+ list_for_each_entry(mnt, list, mnt_hash)
+ __propagate_umount(mnt);
+ return 0;
+}
diff --git a/fs/pnode.h b/fs/pnode.h
new file mode 100644
index 00000000000..020e1bb60fd
--- /dev/null
+++ b/fs/pnode.h
@@ -0,0 +1,37 @@
+/*
+ * linux/fs/pnode.h
+ *
+ * (C) Copyright IBM Corporation 2005.
+ * Released under GPL v2.
+ *
+ */
+#ifndef _LINUX_PNODE_H
+#define _LINUX_PNODE_H
+
+#include <linux/list.h>
+#include <linux/mount.h>
+
+#define IS_MNT_SHARED(mnt) (mnt->mnt_flags & MNT_SHARED)
+#define IS_MNT_SLAVE(mnt) (mnt->mnt_master)
+#define IS_MNT_NEW(mnt) (!mnt->mnt_namespace)
+#define CLEAR_MNT_SHARED(mnt) (mnt->mnt_flags &= ~MNT_SHARED)
+#define IS_MNT_UNBINDABLE(mnt) (mnt->mnt_flags & MNT_UNBINDABLE)
+
+#define CL_EXPIRE 0x01
+#define CL_SLAVE 0x02
+#define CL_COPY_ALL 0x04
+#define CL_MAKE_SHARED 0x08
+#define CL_PROPAGATION 0x10
+
+static inline void set_mnt_shared(struct vfsmount *mnt)
+{
+ mnt->mnt_flags &= ~MNT_PNODE_MASK;
+ mnt->mnt_flags |= MNT_SHARED;
+}
+
+void change_mnt_propagation(struct vfsmount *, int);
+int propagate_mnt(struct vfsmount *, struct dentry *, struct vfsmount *,
+ struct list_head *);
+int propagate_umount(struct list_head *);
+int propagate_mount_busy(struct vfsmount *, int);
+#endif /* _LINUX_PNODE_H */
diff --git a/fs/proc/base.c b/fs/proc/base.c
index a170450aadb..634355e1698 100644
--- a/fs/proc/base.c
+++ b/fs/proc/base.c
@@ -70,6 +70,7 @@
#include <linux/seccomp.h>
#include <linux/cpuset.h>
#include <linux/audit.h>
+#include <linux/poll.h>
#include "internal.h"
/*
@@ -660,26 +661,38 @@ static struct file_operations proc_smaps_operations = {
#endif
extern struct seq_operations mounts_op;
+struct proc_mounts {
+ struct seq_file m;
+ int event;
+};
+
static int mounts_open(struct inode *inode, struct file *file)
{
struct task_struct *task = proc_task(inode);
- int ret = seq_open(file, &mounts_op);
+ struct namespace *namespace;
+ struct proc_mounts *p;
+ int ret = -EINVAL;
- if (!ret) {
- struct seq_file *m = file->private_data;
- struct namespace *namespace;
- task_lock(task);
- namespace = task->namespace;
- if (namespace)
- get_namespace(namespace);
- task_unlock(task);
-
- if (namespace)
- m->private = namespace;
- else {
- seq_release(inode, file);
- ret = -EINVAL;
+ task_lock(task);
+ namespace = task->namespace;
+ if (namespace)
+ get_namespace(namespace);
+ task_unlock(task);
+
+ if (namespace) {
+ ret = -ENOMEM;
+ p = kmalloc(sizeof(struct proc_mounts), GFP_KERNEL);
+ if (p) {
+ file->private_data = &p->m;
+ ret = seq_open(file, &mounts_op);
+ if (!ret) {
+ p->m.private = namespace;
+ p->event = namespace->event;
+ return 0;
+ }
+ kfree(p);
}
+ put_namespace(namespace);
}
return ret;
}
@@ -692,11 +705,30 @@ static int mounts_release(struct inode *inode, struct file *file)
return seq_release(inode, file);
}
+static unsigned mounts_poll(struct file *file, poll_table *wait)
+{
+ struct proc_mounts *p = file->private_data;
+ struct namespace *ns = p->m.private;
+ unsigned res = 0;
+
+ poll_wait(file, &ns->poll, wait);
+
+ spin_lock(&vfsmount_lock);
+ if (p->event != ns->event) {
+ p->event = ns->event;
+ res = POLLERR;
+ }
+ spin_unlock(&vfsmount_lock);
+
+ return res;
+}
+
static struct file_operations proc_mounts_operations = {
.open = mounts_open,
.read = seq_read,
.llseek = seq_lseek,
.release = mounts_release,
+ .poll = mounts_poll,
};
#define PROC_BLOCK_SIZE (3*1024) /* 4K page size but our output routines use some slack for overruns */
diff --git a/fs/proc/proc_devtree.c b/fs/proc/proc_devtree.c
index 6fd57f15419..fb117b74809 100644
--- a/fs/proc/proc_devtree.c
+++ b/fs/proc/proc_devtree.c
@@ -49,6 +49,39 @@ static int property_read_proc(char *page, char **start, off_t off,
*/
/*
+ * Add a property to a node
+ */
+static struct proc_dir_entry *
+__proc_device_tree_add_prop(struct proc_dir_entry *de, struct property *pp)
+{
+ struct proc_dir_entry *ent;
+
+ /*
+ * Unfortunately proc_register puts each new entry
+ * at the beginning of the list. So we rearrange them.
+ */
+ ent = create_proc_read_entry(pp->name,
+ strncmp(pp->name, "security-", 9)
+ ? S_IRUGO : S_IRUSR, de,
+ property_read_proc, pp);
+ if (ent == NULL)
+ return NULL;
+
+ if (!strncmp(pp->name, "security-", 9))
+ ent->size = 0; /* don't leak number of password chars */
+ else
+ ent->size = pp->length;
+
+ return ent;
+}
+
+
+void proc_device_tree_add_prop(struct proc_dir_entry *pde, struct property *prop)
+{
+ __proc_device_tree_add_prop(pde, prop);
+}
+
+/*
* Process a node, adding entries for its children and its properties.
*/
void proc_device_tree_add_node(struct device_node *np,
@@ -57,11 +90,9 @@ void proc_device_tree_add_node(struct device_node *np,
struct property *pp;
struct proc_dir_entry *ent;
struct device_node *child;
- struct proc_dir_entry *list = NULL, **lastp;
const char *p;
set_node_proc_entry(np, de);
- lastp = &list;
for (child = NULL; (child = of_get_next_child(np, child));) {
p = strrchr(child->full_name, '/');
if (!p)
@@ -71,9 +102,6 @@ void proc_device_tree_add_node(struct device_node *np,
ent = proc_mkdir(p, de);
if (ent == 0)
break;
- *lastp = ent;
- ent->next = NULL;
- lastp = &ent->next;
proc_device_tree_add_node(child, ent);
}
of_node_put(child);
@@ -84,7 +112,7 @@ void proc_device_tree_add_node(struct device_node *np,
* properties are quite unimportant for us though, thus we
* simply "skip" them here, but we do have to check.
*/
- for (ent = list; ent != NULL; ent = ent->next)
+ for (ent = de->subdir; ent != NULL; ent = ent->next)
if (!strcmp(ent->name, pp->name))
break;
if (ent != NULL) {
@@ -94,25 +122,10 @@ void proc_device_tree_add_node(struct device_node *np,
continue;
}
- /*
- * Unfortunately proc_register puts each new entry
- * at the beginning of the list. So we rearrange them.
- */
- ent = create_proc_read_entry(pp->name,
- strncmp(pp->name, "security-", 9)
- ? S_IRUGO : S_IRUSR, de,
- property_read_proc, pp);
+ ent = __proc_device_tree_add_prop(de, pp);
if (ent == 0)
break;
- if (!strncmp(pp->name, "security-", 9))
- ent->size = 0; /* don't leak number of password chars */
- else
- ent->size = pp->length;
- ent->next = NULL;
- *lastp = ent;
- lastp = &ent->next;
}
- de->subdir = list;
}
/*
diff --git a/fs/proc/task_mmu.c b/fs/proc/task_mmu.c
index d2fa42006d8..9ab97cef0da 100644
--- a/fs/proc/task_mmu.c
+++ b/fs/proc/task_mmu.c
@@ -195,7 +195,7 @@ static int show_map_internal(struct seq_file *m, void *v, struct mem_size_stats
static int show_map(struct seq_file *m, void *v)
{
- return show_map_internal(m, v, 0);
+ return show_map_internal(m, v, NULL);
}
static void smaps_pte_range(struct vm_area_struct *vma, pmd_t *pmd,
diff --git a/fs/quota.c b/fs/quota.c
index 1df7832b4e0..612e04db4b9 100644
--- a/fs/quota.c
+++ b/fs/quota.c
@@ -15,6 +15,7 @@
#include <linux/security.h>
#include <linux/syscalls.h>
#include <linux/buffer_head.h>
+#include <linux/quotaops.h>
/* Check validity of generic quotactl commands */
static int generic_quotactl_valid(struct super_block *sb, int type, int cmd, qid_t id)
diff --git a/fs/reiserfs/file.c b/fs/reiserfs/file.c
index c20babd6216..7892a865b58 100644
--- a/fs/reiserfs/file.c
+++ b/fs/reiserfs/file.c
@@ -251,12 +251,12 @@ static int reiserfs_allocate_blocks_for_region(struct reiserfs_transaction_handl
blocks_to_allocate,
blocks_to_allocate);
if (res != CARRY_ON) {
- res = -ENOSPC;
+ res = res == QUOTA_EXCEEDED ? -EDQUOT : -ENOSPC;
pathrelse(&path);
goto error_exit;
}
} else {
- res = -ENOSPC;
+ res = res == QUOTA_EXCEEDED ? -EDQUOT : -ENOSPC;
pathrelse(&path);
goto error_exit;
}
diff --git a/fs/reiserfs/hashes.c b/fs/reiserfs/hashes.c
index 37c1306eb9b..a3ec238fd9e 100644
--- a/fs/reiserfs/hashes.c
+++ b/fs/reiserfs/hashes.c
@@ -19,6 +19,7 @@
//
#include <linux/kernel.h>
+#include <linux/reiserfs_fs.h>
#include <asm/types.h>
#include <asm/bug.h>
diff --git a/fs/seq_file.c b/fs/seq_file.c
index 38ef913767f..7c40570b71d 100644
--- a/fs/seq_file.c
+++ b/fs/seq_file.c
@@ -28,13 +28,17 @@
*/
int seq_open(struct file *file, struct seq_operations *op)
{
- struct seq_file *p = kmalloc(sizeof(*p), GFP_KERNEL);
- if (!p)
- return -ENOMEM;
+ struct seq_file *p = file->private_data;
+
+ if (!p) {
+ p = kmalloc(sizeof(*p), GFP_KERNEL);
+ if (!p)
+ return -ENOMEM;
+ file->private_data = p;
+ }
memset(p, 0, sizeof(*p));
sema_init(&p->sem, 1);
p->op = op;
- file->private_data = p;
/*
* Wrappers around seq_open(e.g. swaps_open) need to be
diff --git a/fs/smbfs/request.c b/fs/smbfs/request.c
index 2d85dd7415b..a0f296d9928 100644
--- a/fs/smbfs/request.c
+++ b/fs/smbfs/request.c
@@ -786,8 +786,7 @@ int smb_request_recv(struct smb_sb_info *server)
/* We should never be called with any of these states */
case SMB_RECV_END:
case SMB_RECV_REQUEST:
- server->rstate = SMB_RECV_END;
- break;
+ BUG();
}
if (result < 0) {
diff --git a/fs/smbfs/symlink.c b/fs/smbfs/symlink.c
index 0c64bc3a012..cdc53c4fb38 100644
--- a/fs/smbfs/symlink.c
+++ b/fs/smbfs/symlink.c
@@ -45,7 +45,7 @@ static void *smb_follow_link(struct dentry *dentry, struct nameidata *nd)
int len = smb_proc_read_link(server_from_dentry(dentry),
dentry, link, PATH_MAX - 1);
if (len < 0) {
- putname(link);
+ __putname(link);
link = ERR_PTR(len);
} else {
link[len] = 0;
@@ -59,7 +59,7 @@ static void smb_put_link(struct dentry *dentry, struct nameidata *nd, void *p)
{
char *s = nd_get_link(nd);
if (!IS_ERR(s))
- putname(s);
+ __putname(s);
}
struct inode_operations smb_link_inode_operations =
diff --git a/fs/super.c b/fs/super.c
index f60155ec778..6689dded3c8 100644
--- a/fs/super.c
+++ b/fs/super.c
@@ -171,6 +171,7 @@ void deactivate_super(struct super_block *s)
if (atomic_dec_and_lock(&s->s_active, &sb_lock)) {
s->s_count -= S_BIAS-1;
spin_unlock(&sb_lock);
+ DQUOT_OFF(s);
down_write(&s->s_umount);
fs->kill_sb(s);
put_filesystem(fs);
@@ -474,8 +475,6 @@ rescan:
return NULL;
}
-EXPORT_SYMBOL(user_get_super);
-
asmlinkage long sys_ustat(unsigned dev, struct ustat __user * ubuf)
{
struct super_block *s;
diff --git a/fs/udf/file.c b/fs/udf/file.c
index bb40d63f328..01f520c71dc 100644
--- a/fs/udf/file.c
+++ b/fs/udf/file.c
@@ -186,7 +186,7 @@ int udf_ioctl(struct inode *inode, struct file *filp, unsigned int cmd,
{
int result = -EINVAL;
- if ( permission(inode, MAY_READ, NULL) != 0 )
+ if ( file_permission(filp, MAY_READ) != 0 )
{
udf_debug("no permission to access inode %lu\n",
inode->i_ino);
diff --git a/fs/udf/udf_sb.h b/fs/udf/udf_sb.h
index 0e54922daa0..663669810be 100644
--- a/fs/udf/udf_sb.h
+++ b/fs/udf/udf_sb.h
@@ -39,8 +39,7 @@ static inline struct udf_sb_info *UDF_SB(struct super_block *sb)
{\
if (UDF_SB(X))\
{\
- if (UDF_SB_PARTMAPS(X))\
- kfree(UDF_SB_PARTMAPS(X));\
+ kfree(UDF_SB_PARTMAPS(X));\
UDF_SB_PARTMAPS(X) = NULL;\
}\
}
diff --git a/fs/ufs/super.c b/fs/ufs/super.c
index f036d694ba5..54828ebcf1b 100644
--- a/fs/ufs/super.c
+++ b/fs/ufs/super.c
@@ -472,13 +472,14 @@ static int ufs_read_cylinder_structures (struct super_block *sb) {
return 1;
failed:
- if (base) kfree (base);
+ kfree (base);
if (sbi->s_ucg) {
for (i = 0; i < uspi->s_ncg; i++)
- if (sbi->s_ucg[i]) brelse (sbi->s_ucg[i]);
+ if (sbi->s_ucg[i])
+ brelse (sbi->s_ucg[i]);
kfree (sbi->s_ucg);
for (i = 0; i < UFS_MAX_GROUP_LOADED; i++)
- if (sbi->s_ucpi[i]) kfree (sbi->s_ucpi[i]);
+ kfree (sbi->s_ucpi[i]);
}
UFSD(("EXIT (FAILED)\n"))
return 0;
@@ -981,9 +982,10 @@ magic_found:
dalloc_failed:
iput(inode);
failed:
- if (ubh) ubh_brelse_uspi (uspi);
- if (uspi) kfree (uspi);
- if (sbi) kfree(sbi);
+ if (ubh)
+ ubh_brelse_uspi (uspi);
+ kfree (uspi);
+ kfree(sbi);
sb->s_fs_info = NULL;
UFSD(("EXIT (FAILED)\n"))
return -EINVAL;
diff --git a/fs/xattr.c b/fs/xattr.c
index f6e00c0e114..a9db2255799 100644
--- a/fs/xattr.c
+++ b/fs/xattr.c
@@ -74,8 +74,7 @@ setxattr(struct dentry *d, char __user *name, void __user *value,
}
out:
up(&d->d_inode->i_sem);
- if (kvalue)
- kfree(kvalue);
+ kfree(kvalue);
return error;
}
@@ -173,8 +172,7 @@ getxattr(struct dentry *d, char __user *name, void __user *value, size_t size)
error = -E2BIG;
}
out:
- if (kvalue)
- kfree(kvalue);
+ kfree(kvalue);
return error;
}
@@ -259,8 +257,7 @@ listxattr(struct dentry *d, char __user *list, size_t size)
error = -E2BIG;
}
out:
- if (klist)
- kfree(klist);
+ kfree(klist);
return error;
}
diff --git a/fs/xfs/linux-2.6/kmem.h b/fs/xfs/linux-2.6/kmem.h
index 8f82c1a20dc..c64a29cdfff 100644
--- a/fs/xfs/linux-2.6/kmem.h
+++ b/fs/xfs/linux-2.6/kmem.h
@@ -30,8 +30,8 @@
#define KM_NOFS 0x0004u
#define KM_MAYFAIL 0x0008u
-#define kmem_zone kmem_cache_s
-#define kmem_zone_t kmem_cache_t
+#define kmem_zone kmem_cache
+#define kmem_zone_t struct kmem_cache
typedef unsigned long xfs_pflags_t;
diff --git a/fs/xfs/linux-2.6/xfs_linux.h b/fs/xfs/linux-2.6/xfs_linux.h
index 44fed10af0d..d8e21ba0ccc 100644
--- a/fs/xfs/linux-2.6/xfs_linux.h
+++ b/fs/xfs/linux-2.6/xfs_linux.h
@@ -72,7 +72,6 @@
#include <linux/init.h>
#include <linux/list.h>
#include <linux/proc_fs.h>
-#include <linux/version.h>
#include <linux/sort.h>
#include <asm/page.h>
diff --git a/fs/xfs/xfs.h b/fs/xfs/xfs.h
index 99b50d2bda9..1a48dbb902a 100644
--- a/fs/xfs/xfs.h
+++ b/fs/xfs/xfs.h
@@ -17,12 +17,5 @@
*/
#ifndef __XFS_H__
#define __XFS_H__
-
-#include <linux/version.h>
-#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,0)
#include <linux-2.6/xfs_linux.h>
-#else
-#include <linux-2.4/xfs_linux.h>
-#endif
-
#endif /* __XFS_H__ */
diff --git a/fs/xfs/xfs_dmapi.h b/fs/xfs/xfs_dmapi.h
index 5a5c7a63e80..864bf695568 100644
--- a/fs/xfs/xfs_dmapi.h
+++ b/fs/xfs/xfs_dmapi.h
@@ -18,6 +18,7 @@
#ifndef __XFS_DMAPI_H__
#define __XFS_DMAPI_H__
+#include <linux/version.h>
/* Values used to define the on-disk version of dm_attrname_t. All
* on-disk attribute names start with the 8-byte string "SGI_DMI_".
*