aboutsummaryrefslogtreecommitdiff
path: root/fs
diff options
context:
space:
mode:
Diffstat (limited to 'fs')
-rw-r--r--fs/bio.c9
-rw-r--r--fs/cifs/CHANGES3
-rw-r--r--fs/cifs/cifsglob.h2
-rw-r--r--fs/cifs/cifspdu.h6
-rw-r--r--fs/cifs/cifsproto.h7
-rw-r--r--fs/cifs/cifssmb.c360
-rw-r--r--fs/cifs/connect.c8
-rw-r--r--fs/cifs/inode.c3
-rw-r--r--fs/cifs/misc.c2
-rw-r--r--fs/cifs/xattr.c8
-rw-r--r--fs/dlm/ast.c74
-rw-r--r--fs/dlm/ast.h4
-rw-r--r--fs/dlm/debug_fs.c2
-rw-r--r--fs/dlm/dlm_internal.h10
-rw-r--r--fs/dlm/lock.c120
-rw-r--r--fs/dlm/lockspace.c14
-rw-r--r--fs/dlm/user.c10
-rw-r--r--fs/dlm/user.h4
-rw-r--r--fs/ext4/ext4.h2
-rw-r--r--fs/file.c2
-rw-r--r--fs/fuse/dev.c30
-rw-r--r--fs/gfs2/aops.c4
-rw-r--r--fs/gfs2/glock.c75
-rw-r--r--fs/gfs2/glock.h7
-rw-r--r--fs/gfs2/glops.c16
-rw-r--r--fs/gfs2/incore.h5
-rw-r--r--fs/gfs2/inode.c6
-rw-r--r--fs/gfs2/lock_dlm.c5
-rw-r--r--fs/gfs2/lops.c4
-rw-r--r--fs/gfs2/main.c28
-rw-r--r--fs/gfs2/meta_io.c46
-rw-r--r--fs/gfs2/meta_io.h12
-rw-r--r--fs/gfs2/ops_fstype.c4
-rw-r--r--fs/gfs2/super.c27
-rw-r--r--fs/gfs2/sys.c2
-rw-r--r--fs/gfs2/util.c1
-rw-r--r--fs/gfs2/util.h1
-rw-r--r--fs/nfs/Kconfig3
-rw-r--r--fs/nfs/iostat.h4
-rw-r--r--fs/nilfs2/dat.c3
-rw-r--r--fs/nilfs2/ioctl.c66
-rw-r--r--fs/nilfs2/recovery.c41
-rw-r--r--fs/nilfs2/segbuf.c18
-rw-r--r--fs/nilfs2/segbuf.h5
-rw-r--r--fs/nilfs2/segment.c120
-rw-r--r--fs/nilfs2/segment.h2
-rw-r--r--fs/nilfs2/super.c15
-rw-r--r--fs/nilfs2/the_nilfs.c38
-rw-r--r--fs/nilfs2/the_nilfs.h3
-rw-r--r--fs/ocfs2/Makefile1
-rw-r--r--fs/ocfs2/alloc.c5
-rw-r--r--fs/ocfs2/aops.c5
-rw-r--r--fs/ocfs2/cluster/masklog.c1
-rw-r--r--fs/ocfs2/cluster/masklog.h7
-rw-r--r--fs/ocfs2/dir.c2
-rw-r--r--fs/ocfs2/dlm/Makefile3
-rw-r--r--fs/ocfs2/dlm/dlmrecovery.c2
-rw-r--r--fs/ocfs2/dlmfs/Makefile5
-rw-r--r--fs/ocfs2/dlmfs/dlmfs.c (renamed from fs/ocfs2/dlm/dlmfs.c)127
-rw-r--r--fs/ocfs2/dlmfs/dlmfsver.c (renamed from fs/ocfs2/dlm/dlmfsver.c)0
-rw-r--r--fs/ocfs2/dlmfs/dlmfsver.h (renamed from fs/ocfs2/dlm/dlmfsver.h)0
-rw-r--r--fs/ocfs2/dlmfs/userdlm.c (renamed from fs/ocfs2/dlm/userdlm.c)308
-rw-r--r--fs/ocfs2/dlmfs/userdlm.h (renamed from fs/ocfs2/dlm/userdlm.h)16
-rw-r--r--fs/ocfs2/dlmglue.c284
-rw-r--r--fs/ocfs2/file.c13
-rw-r--r--fs/ocfs2/ioctl.h6
-rw-r--r--fs/ocfs2/localalloc.c2
-rw-r--r--fs/ocfs2/ocfs2.h32
-rw-r--r--fs/ocfs2/ocfs2_fs.h57
-rw-r--r--fs/ocfs2/ocfs2_ioctl.h79
-rw-r--r--fs/ocfs2/ocfs2_lockingver.h2
-rw-r--r--fs/ocfs2/refcounttree.c6
-rw-r--r--fs/ocfs2/stack_o2cb.c37
-rw-r--r--fs/ocfs2/stack_user.c49
-rw-r--r--fs/ocfs2/stackglue.c98
-rw-r--r--fs/ocfs2/stackglue.h95
-rw-r--r--fs/ocfs2/suballoc.c171
-rw-r--r--fs/ocfs2/suballoc.h1
-rw-r--r--fs/ocfs2/super.c10
-rw-r--r--fs/ocfs2/xattr.c2182
-rw-r--r--fs/partitions/check.c7
-rw-r--r--fs/proc/array.c2
-rw-r--r--fs/proc/base.c6
-rw-r--r--fs/proc/kmsg.c14
-rw-r--r--fs/proc/proc_devtree.c7
-rw-r--r--fs/seq_file.c130
-rw-r--r--fs/xfs/Makefile2
-rw-r--r--fs/xfs/linux-2.6/kmem.c56
-rw-r--r--fs/xfs/linux-2.6/kmem.h21
-rw-r--r--fs/xfs/linux-2.6/xfs_acl.c11
-rw-r--r--fs/xfs/linux-2.6/xfs_buf.c320
-rw-r--r--fs/xfs/linux-2.6/xfs_buf.h52
-rw-r--r--fs/xfs/linux-2.6/xfs_fs_subr.c2
-rw-r--r--fs/xfs/linux-2.6/xfs_ioctl.c21
-rw-r--r--fs/xfs/linux-2.6/xfs_ioctl.h12
-rw-r--r--fs/xfs/linux-2.6/xfs_ioctl32.c4
-rw-r--r--fs/xfs/linux-2.6/xfs_iops.c4
-rw-r--r--fs/xfs/linux-2.6/xfs_lrw.c62
-rw-r--r--fs/xfs/linux-2.6/xfs_lrw.h3
-rw-r--r--fs/xfs/linux-2.6/xfs_super.c169
-rw-r--r--fs/xfs/linux-2.6/xfs_sync.c186
-rw-r--r--fs/xfs/linux-2.6/xfs_sync.h1
-rw-r--r--fs/xfs/linux-2.6/xfs_trace.h81
-rw-r--r--fs/xfs/linux-2.6/xfs_xattr.c27
-rw-r--r--fs/xfs/quota/xfs_dquot.c47
-rw-r--r--fs/xfs/quota/xfs_dquot_item.c99
-rw-r--r--fs/xfs/quota/xfs_dquot_item.h4
-rw-r--r--fs/xfs/quota/xfs_qm.c40
-rw-r--r--fs/xfs/quota/xfs_qm_bhv.c2
-rw-r--r--fs/xfs/quota/xfs_qm_syscalls.c4
-rw-r--r--fs/xfs/quota/xfs_trans_dquot.c49
-rw-r--r--fs/xfs/xfs_acl.h4
-rw-r--r--fs/xfs/xfs_ag.h16
-rw-r--r--fs/xfs/xfs_alloc.c96
-rw-r--r--fs/xfs/xfs_alloc_btree.c9
-rw-r--r--fs/xfs/xfs_attr.c52
-rw-r--r--fs/xfs/xfs_attr.h3
-rw-r--r--fs/xfs/xfs_attr_leaf.c30
-rw-r--r--fs/xfs/xfs_attr_sf.h2
-rw-r--r--fs/xfs/xfs_bmap.c17
-rw-r--r--fs/xfs/xfs_bmap_btree.c2
-rw-r--r--fs/xfs/xfs_bmap_btree.h1
-rw-r--r--fs/xfs/xfs_btree.c4
-rw-r--r--fs/xfs/xfs_buf_item.c72
-rw-r--r--fs/xfs/xfs_da_btree.c4
-rw-r--r--fs/xfs/xfs_da_btree.h5
-rw-r--r--fs/xfs/xfs_dfrag.c43
-rw-r--r--fs/xfs/xfs_dfrag.h3
-rw-r--r--fs/xfs/xfs_dir2.c8
-rw-r--r--fs/xfs/xfs_dir2.h4
-rw-r--r--fs/xfs/xfs_dir2_block.c9
-rw-r--r--fs/xfs/xfs_dir2_leaf.c2
-rw-r--r--fs/xfs/xfs_dir2_node.c2
-rw-r--r--fs/xfs/xfs_dir2_node.h2
-rw-r--r--fs/xfs/xfs_dir2_sf.c2
-rw-r--r--fs/xfs/xfs_extfree_item.c4
-rw-r--r--fs/xfs/xfs_filestream.c42
-rw-r--r--fs/xfs/xfs_filestream.h28
-rw-r--r--fs/xfs/xfs_fsops.c42
-rw-r--r--fs/xfs/xfs_ialloc.c62
-rw-r--r--fs/xfs/xfs_iget.c10
-rw-r--r--fs/xfs/xfs_inode.c126
-rw-r--r--fs/xfs/xfs_inode.h11
-rw-r--r--fs/xfs/xfs_inode_item.c129
-rw-r--r--fs/xfs/xfs_inode_item.h6
-rw-r--r--fs/xfs/xfs_itable.c12
-rw-r--r--fs/xfs/xfs_log.c383
-rw-r--r--fs/xfs/xfs_log.h19
-rw-r--r--fs/xfs/xfs_log_priv.h5
-rw-r--r--fs/xfs/xfs_log_recover.c222
-rw-r--r--fs/xfs/xfs_log_recover.h23
-rw-r--r--fs/xfs/xfs_mount.c181
-rw-r--r--fs/xfs/xfs_mount.h29
-rw-r--r--fs/xfs/xfs_mru_cache.c2
-rw-r--r--fs/xfs/xfs_mru_cache.h1
-rw-r--r--fs/xfs/xfs_quota.h9
-rw-r--r--fs/xfs/xfs_rw.c155
-rw-r--r--fs/xfs/xfs_rw.h4
-rw-r--r--fs/xfs/xfs_trans.c7
-rw-r--r--fs/xfs/xfs_trans.h3
-rw-r--r--fs/xfs/xfs_trans_ail.c34
-rw-r--r--fs/xfs/xfs_trans_buf.c27
-rw-r--r--fs/xfs/xfs_types.h4
-rw-r--r--fs/xfs/xfs_vnodeops.c33
-rw-r--r--fs/xfs/xfs_vnodeops.h10
165 files changed, 4511 insertions, 3796 deletions
diff --git a/fs/bio.c b/fs/bio.c
index 88094afc29e..dc17afd672e 100644
--- a/fs/bio.c
+++ b/fs/bio.c
@@ -507,10 +507,8 @@ int bio_get_nr_vecs(struct block_device *bdev)
int nr_pages;
nr_pages = ((queue_max_sectors(q) << 9) + PAGE_SIZE - 1) >> PAGE_SHIFT;
- if (nr_pages > queue_max_phys_segments(q))
- nr_pages = queue_max_phys_segments(q);
- if (nr_pages > queue_max_hw_segments(q))
- nr_pages = queue_max_hw_segments(q);
+ if (nr_pages > queue_max_segments(q))
+ nr_pages = queue_max_segments(q);
return nr_pages;
}
@@ -575,8 +573,7 @@ static int __bio_add_page(struct request_queue *q, struct bio *bio, struct page
* make this too complex.
*/
- while (bio->bi_phys_segments >= queue_max_phys_segments(q)
- || bio->bi_phys_segments >= queue_max_hw_segments(q)) {
+ while (bio->bi_phys_segments >= queue_max_segments(q)) {
if (retried_segments)
return 0;
diff --git a/fs/cifs/CHANGES b/fs/cifs/CHANGES
index 49503d2edc7..bc0025cdd1c 100644
--- a/fs/cifs/CHANGES
+++ b/fs/cifs/CHANGES
@@ -1,6 +1,7 @@
Version 1.62
------------
-Add sockopt=TCP_NODELAY mount option.
+Add sockopt=TCP_NODELAY mount option. EA (xattr) routines hardened
+to more strictly handle corrupt frames.
Version 1.61
------------
diff --git a/fs/cifs/cifsglob.h b/fs/cifs/cifsglob.h
index ed751bb657d..a1c817eb291 100644
--- a/fs/cifs/cifsglob.h
+++ b/fs/cifs/cifsglob.h
@@ -205,7 +205,7 @@ struct cifsUidInfo {
struct cifsSesInfo {
struct list_head smb_ses_list;
struct list_head tcon_list;
- struct semaphore sesSem;
+ struct mutex session_mutex;
#if 0
struct cifsUidInfo *uidInfo; /* pointer to user info */
#endif
diff --git a/fs/cifs/cifspdu.h b/fs/cifs/cifspdu.h
index 3877737f96a..14d036d8db1 100644
--- a/fs/cifs/cifspdu.h
+++ b/fs/cifs/cifspdu.h
@@ -415,10 +415,10 @@ struct smb_hdr {
__u8 WordCount;
} __attribute__((packed));
/* given a pointer to an smb_hdr retrieve the value of byte count */
-#define BCC(smb_var) (*(__u16 *)((char *)smb_var + sizeof(struct smb_hdr) + (2 * smb_var->WordCount)))
-#define BCC_LE(smb_var) (*(__le16 *)((char *)smb_var + sizeof(struct smb_hdr) + (2 * smb_var->WordCount)))
+#define BCC(smb_var) (*(__u16 *)((char *)(smb_var) + sizeof(struct smb_hdr) + (2 * (smb_var)->WordCount)))
+#define BCC_LE(smb_var) (*(__le16 *)((char *)(smb_var) + sizeof(struct smb_hdr) + (2 * (smb_var)->WordCount)))
/* given a pointer to an smb_hdr retrieve the pointer to the byte area */
-#define pByteArea(smb_var) ((unsigned char *)smb_var + sizeof(struct smb_hdr) + (2 * smb_var->WordCount) + 2)
+#define pByteArea(smb_var) ((unsigned char *)(smb_var) + sizeof(struct smb_hdr) + (2 * (smb_var)->WordCount) + 2)
/*
* Computer Name Length (since Netbios name was length 16 with last byte 0x20)
diff --git a/fs/cifs/cifsproto.h b/fs/cifs/cifsproto.h
index 5646727e33f..88e2bc44ac5 100644
--- a/fs/cifs/cifsproto.h
+++ b/fs/cifs/cifsproto.h
@@ -363,13 +363,10 @@ extern int CIFSSMBNotify(const int xid, struct cifsTconInfo *tcon,
__u32 filter, struct file *file, int multishot,
const struct nls_table *nls_codepage);
extern ssize_t CIFSSMBQAllEAs(const int xid, struct cifsTconInfo *tcon,
- const unsigned char *searchName, char *EAData,
+ const unsigned char *searchName,
+ const unsigned char *ea_name, char *EAData,
size_t bufsize, const struct nls_table *nls_codepage,
int remap_special_chars);
-extern ssize_t CIFSSMBQueryEA(const int xid, struct cifsTconInfo *tcon,
- const unsigned char *searchName, const unsigned char *ea_name,
- unsigned char *ea_value, size_t buf_size,
- const struct nls_table *nls_codepage, int remap_special_chars);
extern int CIFSSMBSetEA(const int xid, struct cifsTconInfo *tcon,
const char *fileName, const char *ea_name,
const void *ea_value, const __u16 ea_value_len,
diff --git a/fs/cifs/cifssmb.c b/fs/cifs/cifssmb.c
index 941441d3e38..9d17df3e076 100644
--- a/fs/cifs/cifssmb.c
+++ b/fs/cifs/cifssmb.c
@@ -170,19 +170,19 @@ cifs_reconnect_tcon(struct cifsTconInfo *tcon, int smb_command)
* need to prevent multiple threads trying to simultaneously
* reconnect the same SMB session
*/
- down(&ses->sesSem);
+ mutex_lock(&ses->session_mutex);
if (ses->need_reconnect)
rc = cifs_setup_session(0, ses, nls_codepage);
/* do we need to reconnect tcon? */
if (rc || !tcon->need_reconnect) {
- up(&ses->sesSem);
+ mutex_unlock(&ses->session_mutex);
goto out;
}
mark_open_files_invalid(tcon);
rc = CIFSTCon(0, ses, tcon->treeName, tcon, nls_codepage);
- up(&ses->sesSem);
+ mutex_unlock(&ses->session_mutex);
cFYI(1, ("reconnect tcon rc = %d", rc));
if (rc)
@@ -700,13 +700,13 @@ CIFSSMBLogoff(const int xid, struct cifsSesInfo *ses)
if (!ses || !ses->server)
return -EIO;
- down(&ses->sesSem);
+ mutex_lock(&ses->session_mutex);
if (ses->need_reconnect)
goto session_already_dead; /* no need to send SMBlogoff if uid
already closed due to reconnect */
rc = small_smb_init(SMB_COM_LOGOFF_ANDX, 2, NULL, (void **)&pSMB);
if (rc) {
- up(&ses->sesSem);
+ mutex_unlock(&ses->session_mutex);
return rc;
}
@@ -721,7 +721,7 @@ CIFSSMBLogoff(const int xid, struct cifsSesInfo *ses)
pSMB->AndXCommand = 0xFF;
rc = SendReceiveNoRsp(xid, ses, (struct smb_hdr *) pSMB, 0);
session_already_dead:
- up(&ses->sesSem);
+ mutex_unlock(&ses->session_mutex);
/* if session dead then we do not need to do ulogoff,
since server closed smb session, no sense reporting
@@ -5269,22 +5269,34 @@ int CIFSSMBNotify(const int xid, struct cifsTconInfo *tcon,
cifs_buf_release(pSMB);
return rc;
}
+
#ifdef CONFIG_CIFS_XATTR
+/*
+ * Do a path-based QUERY_ALL_EAS call and parse the result. This is a common
+ * function used by listxattr and getxattr type calls. When ea_name is set,
+ * it looks for that attribute name and stuffs that value into the EAData
+ * buffer. When ea_name is NULL, it stuffs a list of attribute names into the
+ * buffer. In both cases, the return value is either the length of the
+ * resulting data or a negative error code. If EAData is a NULL pointer then
+ * the data isn't copied to it, but the length is returned.
+ */
ssize_t
CIFSSMBQAllEAs(const int xid, struct cifsTconInfo *tcon,
- const unsigned char *searchName,
- char *EAData, size_t buf_size,
- const struct nls_table *nls_codepage, int remap)
+ const unsigned char *searchName, const unsigned char *ea_name,
+ char *EAData, size_t buf_size,
+ const struct nls_table *nls_codepage, int remap)
{
/* BB assumes one setup word */
TRANSACTION2_QPI_REQ *pSMB = NULL;
TRANSACTION2_QPI_RSP *pSMBr = NULL;
int rc = 0;
int bytes_returned;
- int name_len;
+ int list_len;
+ struct fealist *ea_response_data;
struct fea *temp_fea;
char *temp_ptr;
- __u16 params, byte_count;
+ char *end_of_smb;
+ __u16 params, byte_count, data_offset;
cFYI(1, ("In Query All EAs path %s", searchName));
QAllEAsRetry:
@@ -5294,22 +5306,22 @@ QAllEAsRetry:
return rc;
if (pSMB->hdr.Flags2 & SMBFLG2_UNICODE) {
- name_len =
+ list_len =
cifsConvertToUCS((__le16 *) pSMB->FileName, searchName,
PATH_MAX, nls_codepage, remap);
- name_len++; /* trailing null */
- name_len *= 2;
+ list_len++; /* trailing null */
+ list_len *= 2;
} else { /* BB improve the check for buffer overruns BB */
- name_len = strnlen(searchName, PATH_MAX);
- name_len++; /* trailing null */
- strncpy(pSMB->FileName, searchName, name_len);
+ list_len = strnlen(searchName, PATH_MAX);
+ list_len++; /* trailing null */
+ strncpy(pSMB->FileName, searchName, list_len);
}
- params = 2 /* level */ + 4 /* reserved */ + name_len /* includes NUL */;
+ params = 2 /* level */ + 4 /* reserved */ + list_len /* includes NUL */;
pSMB->TotalDataCount = 0;
pSMB->MaxParameterCount = cpu_to_le16(2);
/* BB find exact max SMB PDU from sess structure BB */
- pSMB->MaxDataCount = cpu_to_le16(4000);
+ pSMB->MaxDataCount = cpu_to_le16(CIFSMaxBufSize);
pSMB->MaxSetupCount = 0;
pSMB->Reserved = 0;
pSMB->Flags = 0;
@@ -5334,237 +5346,117 @@ QAllEAsRetry:
(struct smb_hdr *) pSMBr, &bytes_returned, 0);
if (rc) {
cFYI(1, ("Send error in QueryAllEAs = %d", rc));
- } else { /* decode response */
- rc = validate_t2((struct smb_t2_rsp *)pSMBr);
+ goto QAllEAsOut;
+ }
- /* BB also check enough total bytes returned */
- /* BB we need to improve the validity checking
- of these trans2 responses */
- if (rc || (pSMBr->ByteCount < 4))
- rc = -EIO; /* bad smb */
- /* else if (pFindData){
- memcpy((char *) pFindData,
- (char *) &pSMBr->hdr.Protocol +
- data_offset, kl);
- }*/ else {
- /* check that length of list is not more than bcc */
- /* check that each entry does not go beyond length
- of list */
- /* check that each element of each entry does not
- go beyond end of list */
- __u16 data_offset = le16_to_cpu(pSMBr->t2.DataOffset);
- struct fealist *ea_response_data;
- rc = 0;
- /* validate_trans2_offsets() */
- /* BB check if start of smb + data_offset > &bcc+ bcc */
- ea_response_data = (struct fealist *)
- (((char *) &pSMBr->hdr.Protocol) +
- data_offset);
- name_len = le32_to_cpu(ea_response_data->list_len);
- cFYI(1, ("ea length %d", name_len));
- if (name_len <= 8) {
- /* returned EA size zeroed at top of function */
- cFYI(1, ("empty EA list returned from server"));
- } else {
- /* account for ea list len */
- name_len -= 4;
- temp_fea = ea_response_data->list;
- temp_ptr = (char *)temp_fea;
- while (name_len > 0) {
- __u16 value_len;
- name_len -= 4;
- temp_ptr += 4;
- rc += temp_fea->name_len;
- /* account for prefix user. and trailing null */
- rc = rc + 5 + 1;
- if (rc < (int)buf_size) {
- memcpy(EAData, "user.", 5);
- EAData += 5;
- memcpy(EAData, temp_ptr,
- temp_fea->name_len);
- EAData += temp_fea->name_len;
- /* null terminate name */
- *EAData = 0;
- EAData = EAData + 1;
- } else if (buf_size == 0) {
- /* skip copy - calc size only */
- } else {
- /* stop before overrun buffer */
- rc = -ERANGE;
- break;
- }
- name_len -= temp_fea->name_len;
- temp_ptr += temp_fea->name_len;
- /* account for trailing null */
- name_len--;
- temp_ptr++;
- value_len =
- le16_to_cpu(temp_fea->value_len);
- name_len -= value_len;
- temp_ptr += value_len;
- /* BB check that temp_ptr is still
- within the SMB BB*/
-
- /* no trailing null to account for
- in value len */
- /* go on to next EA */
- temp_fea = (struct fea *)temp_ptr;
- }
- }
- }
+
+ /* BB also check enough total bytes returned */
+ /* BB we need to improve the validity checking
+ of these trans2 responses */
+
+ rc = validate_t2((struct smb_t2_rsp *)pSMBr);
+ if (rc || (pSMBr->ByteCount < 4)) {
+ rc = -EIO; /* bad smb */
+ goto QAllEAsOut;
}
- cifs_buf_release(pSMB);
- if (rc == -EAGAIN)
- goto QAllEAsRetry;
- return (ssize_t)rc;
-}
+ /* check that length of list is not more than bcc */
+ /* check that each entry does not go beyond length
+ of list */
+ /* check that each element of each entry does not
+ go beyond end of list */
+ /* validate_trans2_offsets() */
+ /* BB check if start of smb + data_offset > &bcc+ bcc */
-ssize_t CIFSSMBQueryEA(const int xid, struct cifsTconInfo *tcon,
- const unsigned char *searchName, const unsigned char *ea_name,
- unsigned char *ea_value, size_t buf_size,
- const struct nls_table *nls_codepage, int remap)
-{
- TRANSACTION2_QPI_REQ *pSMB = NULL;
- TRANSACTION2_QPI_RSP *pSMBr = NULL;
- int rc = 0;
- int bytes_returned;
- int name_len;
- struct fea *temp_fea;
- char *temp_ptr;
- __u16 params, byte_count;
+ data_offset = le16_to_cpu(pSMBr->t2.DataOffset);
+ ea_response_data = (struct fealist *)
+ (((char *) &pSMBr->hdr.Protocol) + data_offset);
- cFYI(1, ("In Query EA path %s", searchName));
-QEARetry:
- rc = smb_init(SMB_COM_TRANSACTION2, 15, tcon, (void **) &pSMB,
- (void **) &pSMBr);
- if (rc)
- return rc;
+ list_len = le32_to_cpu(ea_response_data->list_len);
+ cFYI(1, ("ea length %d", list_len));
+ if (list_len <= 8) {
+ cFYI(1, ("empty EA list returned from server"));
+ goto QAllEAsOut;
+ }
- if (pSMB->hdr.Flags2 & SMBFLG2_UNICODE) {
- name_len =
- cifsConvertToUCS((__le16 *) pSMB->FileName, searchName,
- PATH_MAX, nls_codepage, remap);
- name_len++; /* trailing null */
- name_len *= 2;
- } else { /* BB improve the check for buffer overruns BB */
- name_len = strnlen(searchName, PATH_MAX);
- name_len++; /* trailing null */
- strncpy(pSMB->FileName, searchName, name_len);
+ /* make sure list_len doesn't go past end of SMB */
+ end_of_smb = (char *)pByteArea(&pSMBr->hdr) + BCC(&pSMBr->hdr);
+ if ((char *)ea_response_data + list_len > end_of_smb) {
+ cFYI(1, ("EA list appears to go beyond SMB"));
+ rc = -EIO;
+ goto QAllEAsOut;
}
- params = 2 /* level */ + 4 /* reserved */ + name_len /* includes NUL */;
- pSMB->TotalDataCount = 0;
- pSMB->MaxParameterCount = cpu_to_le16(2);
- /* BB find exact max SMB PDU from sess structure BB */
- pSMB->MaxDataCount = cpu_to_le16(4000);
- pSMB->MaxSetupCount = 0;
- pSMB->Reserved = 0;
- pSMB->Flags = 0;
- pSMB->Timeout = 0;
- pSMB->Reserved2 = 0;
- pSMB->ParameterOffset = cpu_to_le16(offsetof(
- struct smb_com_transaction2_qpi_req, InformationLevel) - 4);
- pSMB->DataCount = 0;
- pSMB->DataOffset = 0;
- pSMB->SetupCount = 1;
- pSMB->Reserved3 = 0;
- pSMB->SubCommand = cpu_to_le16(TRANS2_QUERY_PATH_INFORMATION);
- byte_count = params + 1 /* pad */ ;
- pSMB->TotalParameterCount = cpu_to_le16(params);
- pSMB->ParameterCount = pSMB->TotalParameterCount;
- pSMB->InformationLevel = cpu_to_le16(SMB_INFO_QUERY_ALL_EAS);
- pSMB->Reserved4 = 0;
- pSMB->hdr.smb_buf_length += byte_count;
- pSMB->ByteCount = cpu_to_le16(byte_count);
+ /* account for ea list len */
+ list_len -= 4;
+ temp_fea = ea_response_data->list;
+ temp_ptr = (char *)temp_fea;
+ while (list_len > 0) {
+ unsigned int name_len;
+ __u16 value_len;
+
+ list_len -= 4;
+ temp_ptr += 4;
+ /* make sure we can read name_len and value_len */
+ if (list_len < 0) {
+ cFYI(1, ("EA entry goes beyond length of list"));
+ rc = -EIO;
+ goto QAllEAsOut;
+ }
- rc = SendReceive(xid, tcon->ses, (struct smb_hdr *) pSMB,
- (struct smb_hdr *) pSMBr, &bytes_returned, 0);
- if (rc) {
- cFYI(1, ("Send error in Query EA = %d", rc));
- } else { /* decode response */
- rc = validate_t2((struct smb_t2_rsp *)pSMBr);
+ name_len = temp_fea->name_len;
+ value_len = le16_to_cpu(temp_fea->value_len);
+ list_len -= name_len + 1 + value_len;
+ if (list_len < 0) {
+ cFYI(1, ("EA entry goes beyond length of list"));
+ rc = -EIO;
+ goto QAllEAsOut;
+ }
- /* BB also check enough total bytes returned */
- /* BB we need to improve the validity checking
- of these trans2 responses */
- if (rc || (pSMBr->ByteCount < 4))
- rc = -EIO; /* bad smb */
- /* else if (pFindData){
- memcpy((char *) pFindData,
- (char *) &pSMBr->hdr.Protocol +
- data_offset, kl);
- }*/ else {
- /* check that length of list is not more than bcc */
- /* check that each entry does not go beyond length
- of list */
- /* check that each element of each entry does not
- go beyond end of list */
- __u16 data_offset = le16_to_cpu(pSMBr->t2.DataOffset);
- struct fealist *ea_response_data;
- rc = -ENODATA;
- /* validate_trans2_offsets() */
- /* BB check if start of smb + data_offset > &bcc+ bcc*/
- ea_response_data = (struct fealist *)
- (((char *) &pSMBr->hdr.Protocol) +
- data_offset);
- name_len = le32_to_cpu(ea_response_data->list_len);
- cFYI(1, ("ea length %d", name_len));
- if (name_len <= 8) {
- /* returned EA size zeroed at top of function */
- cFYI(1, ("empty EA list returned from server"));
- } else {
- /* account for ea list len */
- name_len -= 4;
- temp_fea = ea_response_data->list;
- temp_ptr = (char *)temp_fea;
- /* loop through checking if we have a matching
- name and then return the associated value */
- while (name_len > 0) {
- __u16 value_len;
- name_len -= 4;
- temp_ptr += 4;
- value_len =
- le16_to_cpu(temp_fea->value_len);
- /* BB validate that value_len falls within SMB,
- even though maximum for name_len is 255 */
- if (memcmp(temp_fea->name, ea_name,
- temp_fea->name_len) == 0) {
- /* found a match */
- rc = value_len;
- /* account for prefix user. and trailing null */
- if (rc <= (int)buf_size) {
- memcpy(ea_value,
- temp_fea->name+temp_fea->name_len+1,
- rc);
- /* ea values, unlike ea
- names, are not null
- terminated */
- } else if (buf_size == 0) {
- /* skip copy - calc size only */
- } else {
- /* stop before overrun buffer */
- rc = -ERANGE;
- }
- break;
- }
- name_len -= temp_fea->name_len;
- temp_ptr += temp_fea->name_len;
- /* account for trailing null */
- name_len--;
- temp_ptr++;
- name_len -= value_len;
- temp_ptr += value_len;
- /* No trailing null to account for in
- value_len. Go on to next EA */
- temp_fea = (struct fea *)temp_ptr;
+ if (ea_name) {
+ if (strncmp(ea_name, temp_ptr, name_len) == 0) {
+ temp_ptr += name_len + 1;
+ rc = value_len;
+ if (buf_size == 0)
+ goto QAllEAsOut;
+ if ((size_t)value_len > buf_size) {
+ rc = -ERANGE;
+ goto QAllEAsOut;
}
+ memcpy(EAData, temp_ptr, value_len);
+ goto QAllEAsOut;
+ }
+ } else {
+ /* account for prefix user. and trailing null */
+ rc += (5 + 1 + name_len);
+ if (rc < (int) buf_size) {
+ memcpy(EAData, "user.", 5);
+ EAData += 5;
+ memcpy(EAData, temp_ptr, name_len);
+ EAData += name_len;
+ /* null terminate name */
+ *EAData = 0;
+ ++EAData;
+ } else if (buf_size == 0) {
+ /* skip copy - calc size only */
+ } else {
+ /* stop before overrun buffer */
+ rc = -ERANGE;
+ break;
}
}
+ temp_ptr += name_len + 1 + value_len;
+ temp_fea = (struct fea *)temp_ptr;
}
+
+ /* didn't find the named attribute */
+ if (ea_name)
+ rc = -ENODATA;
+
+QAllEAsOut:
cifs_buf_release(pSMB);
if (rc == -EAGAIN)
- goto QEARetry;
+ goto QAllEAsRetry;
return (ssize_t)rc;
}
diff --git a/fs/cifs/connect.c b/fs/cifs/connect.c
index 2e9e09ca0e3..45eb6cba793 100644
--- a/fs/cifs/connect.c
+++ b/fs/cifs/connect.c
@@ -2388,13 +2388,13 @@ try_mount_again:
*/
cifs_put_tcp_session(srvTcp);
- down(&pSesInfo->sesSem);
+ mutex_lock(&pSesInfo->session_mutex);
if (pSesInfo->need_reconnect) {
cFYI(1, ("Session needs reconnect"));
rc = cifs_setup_session(xid, pSesInfo,
cifs_sb->local_nls);
}
- up(&pSesInfo->sesSem);
+ mutex_unlock(&pSesInfo->session_mutex);
} else if (!rc) {
cFYI(1, ("Existing smb sess not found"));
pSesInfo = sesInfoAlloc();
@@ -2437,12 +2437,12 @@ try_mount_again:
}
pSesInfo->linux_uid = volume_info->linux_uid;
pSesInfo->overrideSecFlg = volume_info->secFlg;
- down(&pSesInfo->sesSem);
+ mutex_lock(&pSesInfo->session_mutex);
/* BB FIXME need to pass vol->secFlgs BB */
rc = cifs_setup_session(xid, pSesInfo,
cifs_sb->local_nls);
- up(&pSesInfo->sesSem);
+ mutex_unlock(&pSesInfo->session_mutex);
}
/* search for existing tcon to this server share */
diff --git a/fs/cifs/inode.c b/fs/cifs/inode.c
index e3fda978f48..8bdbc818164 100644
--- a/fs/cifs/inode.c
+++ b/fs/cifs/inode.c
@@ -111,6 +111,7 @@ cifs_fattr_to_inode(struct inode *inode, struct cifs_fattr *fattr)
cifs_i->delete_pending = fattr->cf_flags & CIFS_FATTR_DELETE_PENDING;
+ cifs_i->server_eof = fattr->cf_eof;
/*
* Can't safely change the file size here if the client is writing to
* it due to potential races.
@@ -366,7 +367,7 @@ static int cifs_sfu_mode(struct cifs_fattr *fattr, const unsigned char *path,
char ea_value[4];
__u32 mode;
- rc = CIFSSMBQueryEA(xid, cifs_sb->tcon, path, "SETFILEBITS",
+ rc = CIFSSMBQAllEAs(xid, cifs_sb->tcon, path, "SETFILEBITS",
ea_value, 4 /* size of buf */, cifs_sb->local_nls,
cifs_sb->mnt_cifs_flags &
CIFS_MOUNT_MAP_SPECIAL_CHR);
diff --git a/fs/cifs/misc.c b/fs/cifs/misc.c
index d27d4ec6579..d1474996a81 100644
--- a/fs/cifs/misc.c
+++ b/fs/cifs/misc.c
@@ -79,7 +79,7 @@ sesInfoAlloc(void)
++ret_buf->ses_count;
INIT_LIST_HEAD(&ret_buf->smb_ses_list);
INIT_LIST_HEAD(&ret_buf->tcon_list);
- init_MUTEX(&ret_buf->sesSem);
+ mutex_init(&ret_buf->session_mutex);
}
return ret_buf;
}
diff --git a/fs/cifs/xattr.c b/fs/cifs/xattr.c
index a75afa3dd9e..3e2ef0de120 100644
--- a/fs/cifs/xattr.c
+++ b/fs/cifs/xattr.c
@@ -244,7 +244,7 @@ ssize_t cifs_getxattr(struct dentry *direntry, const char *ea_name,
/* revalidate/getattr then populate from inode */
} /* BB add else when above is implemented */
ea_name += 5; /* skip past user. prefix */
- rc = CIFSSMBQueryEA(xid, pTcon, full_path, ea_name, ea_value,
+ rc = CIFSSMBQAllEAs(xid, pTcon, full_path, ea_name, ea_value,
buf_size, cifs_sb->local_nls,
cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MAP_SPECIAL_CHR);
} else if (strncmp(ea_name, CIFS_XATTR_OS2_PREFIX, 4) == 0) {
@@ -252,7 +252,7 @@ ssize_t cifs_getxattr(struct dentry *direntry, const char *ea_name,
goto get_ea_exit;
ea_name += 4; /* skip past os2. prefix */
- rc = CIFSSMBQueryEA(xid, pTcon, full_path, ea_name, ea_value,
+ rc = CIFSSMBQAllEAs(xid, pTcon, full_path, ea_name, ea_value,
buf_size, cifs_sb->local_nls,
cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MAP_SPECIAL_CHR);
} else if (strncmp(ea_name, POSIX_ACL_XATTR_ACCESS,
@@ -364,8 +364,8 @@ ssize_t cifs_listxattr(struct dentry *direntry, char *data, size_t buf_size)
/* if proc/fs/cifs/streamstoxattr is set then
search server for EAs or streams to
returns as xattrs */
- rc = CIFSSMBQAllEAs(xid, pTcon, full_path, data, buf_size,
- cifs_sb->local_nls,
+ rc = CIFSSMBQAllEAs(xid, pTcon, full_path, NULL, data,
+ buf_size, cifs_sb->local_nls,
cifs_sb->mnt_cifs_flags &
CIFS_MOUNT_MAP_SPECIAL_CHR);
diff --git a/fs/dlm/ast.c b/fs/dlm/ast.c
index dc2ad6008b2..4314f0d48d8 100644
--- a/fs/dlm/ast.c
+++ b/fs/dlm/ast.c
@@ -2,7 +2,7 @@
*******************************************************************************
**
** Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved.
-** Copyright (C) 2004-2008 Red Hat, Inc. All rights reserved.
+** Copyright (C) 2004-2010 Red Hat, Inc. All rights reserved.
**
** This copyrighted material is made available to anyone wishing to use,
** modify, copy, or redistribute it subject to the terms and conditions
@@ -33,10 +33,10 @@ void dlm_del_ast(struct dlm_lkb *lkb)
spin_unlock(&ast_queue_lock);
}
-void dlm_add_ast(struct dlm_lkb *lkb, int type, int bastmode)
+void dlm_add_ast(struct dlm_lkb *lkb, int type, int mode)
{
if (lkb->lkb_flags & DLM_IFL_USER) {
- dlm_user_add_ast(lkb, type, bastmode);
+ dlm_user_add_ast(lkb, type, mode);
return;
}
@@ -44,10 +44,21 @@ void dlm_add_ast(struct dlm_lkb *lkb, int type, int bastmode)
if (!(lkb->lkb_ast_type & (AST_COMP | AST_BAST))) {
kref_get(&lkb->lkb_ref);
list_add_tail(&lkb->lkb_astqueue, &ast_queue);
+ lkb->lkb_ast_first = type;
}
+
+ /* sanity check, this should not happen */
+
+ if ((type == AST_COMP) && (lkb->lkb_ast_type & AST_COMP))
+ log_print("repeat cast %d castmode %d lock %x %s",
+ mode, lkb->lkb_castmode,
+ lkb->lkb_id, lkb->lkb_resource->res_name);
+
lkb->lkb_ast_type |= type;
- if (bastmode)
- lkb->lkb_bastmode = bastmode;
+ if (type == AST_BAST)
+ lkb->lkb_bastmode = mode;
+ else
+ lkb->lkb_castmode = mode;
spin_unlock(&ast_queue_lock);
set_bit(WAKE_ASTS, &astd_wakeflags);
@@ -59,9 +70,9 @@ static void process_asts(void)
struct dlm_ls *ls = NULL;
struct dlm_rsb *r = NULL;
struct dlm_lkb *lkb;
- void (*cast) (void *astparam);
- void (*bast) (void *astparam, int mode);
- int type = 0, bastmode;
+ void (*castfn) (void *astparam);
+ void (*bastfn) (void *astparam, int mode);
+ int type, first, bastmode, castmode, do_bast, do_cast, last_castmode;
repeat:
spin_lock(&ast_queue_lock);
@@ -75,17 +86,48 @@ repeat:
list_del(&lkb->lkb_astqueue);
type = lkb->lkb_ast_type;
lkb->lkb_ast_type = 0;
+ first = lkb->lkb_ast_first;
+ lkb->lkb_ast_first = 0;
bastmode = lkb->lkb_bastmode;
-
+ castmode = lkb->lkb_castmode;
+ castfn = lkb->lkb_astfn;
+ bastfn = lkb->lkb_bastfn;
spin_unlock(&ast_queue_lock);
- cast = lkb->lkb_astfn;
- bast = lkb->lkb_bastfn;
-
- if ((type & AST_COMP) && cast)
- cast(lkb->lkb_astparam);
- if ((type & AST_BAST) && bast)
- bast(lkb->lkb_astparam, bastmode);
+ do_cast = (type & AST_COMP) && castfn;
+ do_bast = (type & AST_BAST) && bastfn;
+
+ /* Skip a bast if its blocking mode is compatible with the
+ granted mode of the preceding cast. */
+
+ if (do_bast) {
+ if (first == AST_COMP)
+ last_castmode = castmode;
+ else
+ last_castmode = lkb->lkb_castmode_done;
+ if (dlm_modes_compat(bastmode, last_castmode))
+ do_bast = 0;
+ }
+
+ if (first == AST_COMP) {
+ if (do_cast)
+ castfn(lkb->lkb_astparam);
+ if (do_bast)
+ bastfn(lkb->lkb_astparam, bastmode);
+ } else if (first == AST_BAST) {
+ if (do_bast)
+ bastfn(lkb->lkb_astparam, bastmode);
+ if (do_cast)
+ castfn(lkb->lkb_astparam);
+ } else {
+ log_error(ls, "bad ast_first %d ast_type %d",
+ first, type);
+ }
+
+ if (do_cast)
+ lkb->lkb_castmode_done = castmode;
+ if (do_bast)
+ lkb->lkb_bastmode_done = bastmode;
/* this removes the reference added by dlm_add_ast
and may result in the lkb being freed */
diff --git a/fs/dlm/ast.h b/fs/dlm/ast.h
index 1b5fc5f428f..bcb1aaba519 100644
--- a/fs/dlm/ast.h
+++ b/fs/dlm/ast.h
@@ -1,7 +1,7 @@
/******************************************************************************
*******************************************************************************
**
-** Copyright (C) 2005-2008 Red Hat, Inc. All rights reserved.
+** Copyright (C) 2005-2010 Red Hat, Inc. All rights reserved.
**
** This copyrighted material is made available to anyone wishing to use,
** modify, copy, or redistribute it subject to the terms and conditions
@@ -13,7 +13,7 @@
#ifndef __ASTD_DOT_H__
#define __ASTD_DOT_H__
-void dlm_add_ast(struct dlm_lkb *lkb, int type, int bastmode);
+void dlm_add_ast(struct dlm_lkb *lkb, int type, int mode);
void dlm_del_ast(struct dlm_lkb *lkb);
void dlm_astd_wake(void);
diff --git a/fs/dlm/debug_fs.c b/fs/dlm/debug_fs.c
index 375a2359b3b..29d6139c35f 100644
--- a/fs/dlm/debug_fs.c
+++ b/fs/dlm/debug_fs.c
@@ -256,7 +256,7 @@ static int print_format3_lock(struct seq_file *s, struct dlm_lkb *lkb,
lkb->lkb_status,
lkb->lkb_grmode,
lkb->lkb_rqmode,
- lkb->lkb_highbast,
+ lkb->lkb_bastmode,
rsb_lookup,
lkb->lkb_wait_type,
lkb->lkb_lvbseq,
diff --git a/fs/dlm/dlm_internal.h b/fs/dlm/dlm_internal.h
index 826d3dc6e0a..f632b58cd22 100644
--- a/fs/dlm/dlm_internal.h
+++ b/fs/dlm/dlm_internal.h
@@ -2,7 +2,7 @@
*******************************************************************************
**
** Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved.
-** Copyright (C) 2004-2008 Red Hat, Inc. All rights reserved.
+** Copyright (C) 2004-2010 Red Hat, Inc. All rights reserved.
**
** This copyrighted material is made available to anyone wishing to use,
** modify, copy, or redistribute it subject to the terms and conditions
@@ -232,11 +232,17 @@ struct dlm_lkb {
int8_t lkb_status; /* granted, waiting, convert */
int8_t lkb_rqmode; /* requested lock mode */
int8_t lkb_grmode; /* granted lock mode */
- int8_t lkb_bastmode; /* requested mode */
int8_t lkb_highbast; /* highest mode bast sent for */
+
int8_t lkb_wait_type; /* type of reply waiting for */
int8_t lkb_wait_count;
int8_t lkb_ast_type; /* type of ast queued for */
+ int8_t lkb_ast_first; /* type of first ast queued */
+
+ int8_t lkb_bastmode; /* req mode of queued bast */
+ int8_t lkb_castmode; /* gr mode of queued cast */
+ int8_t lkb_bastmode_done; /* last delivered bastmode */
+ int8_t lkb_castmode_done; /* last delivered castmode */
struct list_head lkb_idtbl_list; /* lockspace lkbtbl */
struct list_head lkb_statequeue; /* rsb g/c/w list */
diff --git a/fs/dlm/lock.c b/fs/dlm/lock.c
index 9c0c1db1e10..46ffd3eeaaf 100644
--- a/fs/dlm/lock.c
+++ b/fs/dlm/lock.c
@@ -1,7 +1,7 @@
/******************************************************************************
*******************************************************************************
**
-** Copyright (C) 2005-2008 Red Hat, Inc. All rights reserved.
+** Copyright (C) 2005-2010 Red Hat, Inc. All rights reserved.
**
** This copyrighted material is made available to anyone wishing to use,
** modify, copy, or redistribute it subject to the terms and conditions
@@ -307,7 +307,7 @@ static void queue_cast(struct dlm_rsb *r, struct dlm_lkb *lkb, int rv)
lkb->lkb_lksb->sb_status = rv;
lkb->lkb_lksb->sb_flags = lkb->lkb_sbflags;
- dlm_add_ast(lkb, AST_COMP, 0);
+ dlm_add_ast(lkb, AST_COMP, lkb->lkb_grmode);
}
static inline void queue_cast_overlap(struct dlm_rsb *r, struct dlm_lkb *lkb)
@@ -320,10 +320,12 @@ static void queue_bast(struct dlm_rsb *r, struct dlm_lkb *lkb, int rqmode)
{
lkb->lkb_time_bast = ktime_get();
- if (is_master_copy(lkb))
+ if (is_master_copy(lkb)) {
+ lkb->lkb_bastmode = rqmode; /* printed by debugfs */
send_bast(r, lkb, rqmode);
- else
+ } else {
dlm_add_ast(lkb, AST_BAST, rqmode);
+ }
}
/*
@@ -2280,20 +2282,30 @@ static int do_request(struct dlm_rsb *r, struct dlm_lkb *lkb)
if (can_be_queued(lkb)) {
error = -EINPROGRESS;
add_lkb(r, lkb, DLM_LKSTS_WAITING);
- send_blocking_asts(r, lkb);
add_timeout(lkb);
goto out;
}
error = -EAGAIN;
- if (force_blocking_asts(lkb))
- send_blocking_asts_all(r, lkb);
queue_cast(r, lkb, -EAGAIN);
-
out:
return error;
}
+static void do_request_effects(struct dlm_rsb *r, struct dlm_lkb *lkb,
+ int error)
+{
+ switch (error) {
+ case -EAGAIN:
+ if (force_blocking_asts(lkb))
+ send_blocking_asts_all(r, lkb);
+ break;
+ case -EINPROGRESS:
+ send_blocking_asts(r, lkb);
+ break;
+ }
+}
+
static int do_convert(struct dlm_rsb *r, struct dlm_lkb *lkb)
{
int error = 0;
@@ -2304,7 +2316,6 @@ static int do_convert(struct dlm_rsb *r, struct dlm_lkb *lkb)
if (can_be_granted(r, lkb, 1, &deadlk)) {
grant_lock(r, lkb);
queue_cast(r, lkb, 0);
- grant_pending_locks(r);
goto out;
}
@@ -2334,7 +2345,6 @@ static int do_convert(struct dlm_rsb *r, struct dlm_lkb *lkb)
if (_can_be_granted(r, lkb, 1)) {
grant_lock(r, lkb);
queue_cast(r, lkb, 0);
- grant_pending_locks(r);
goto out;
}
/* else fall through and move to convert queue */
@@ -2344,28 +2354,47 @@ static int do_convert(struct dlm_rsb *r, struct dlm_lkb *lkb)
error = -EINPROGRESS;
del_lkb(r, lkb);
add_lkb(r, lkb, DLM_LKSTS_CONVERT);
- send_blocking_asts(r, lkb);
add_timeout(lkb);
goto out;
}
error = -EAGAIN;
- if (force_blocking_asts(lkb))
- send_blocking_asts_all(r, lkb);
queue_cast(r, lkb, -EAGAIN);
-
out:
return error;
}
+static void do_convert_effects(struct dlm_rsb *r, struct dlm_lkb *lkb,
+ int error)
+{
+ switch (error) {
+ case 0:
+ grant_pending_locks(r);
+ /* grant_pending_locks also sends basts */
+ break;
+ case -EAGAIN:
+ if (force_blocking_asts(lkb))
+ send_blocking_asts_all(r, lkb);
+ break;
+ case -EINPROGRESS:
+ send_blocking_asts(r, lkb);
+ break;
+ }
+}
+
static int do_unlock(struct dlm_rsb *r, struct dlm_lkb *lkb)
{
remove_lock(r, lkb);
queue_cast(r, lkb, -DLM_EUNLOCK);
- grant_pending_locks(r);
return -DLM_EUNLOCK;
}
+static void do_unlock_effects(struct dlm_rsb *r, struct dlm_lkb *lkb,
+ int error)
+{
+ grant_pending_locks(r);
+}
+
/* returns: 0 did nothing, -DLM_ECANCEL canceled lock */
static int do_cancel(struct dlm_rsb *r, struct dlm_lkb *lkb)
@@ -2375,12 +2404,18 @@ static int do_cancel(struct dlm_rsb *r, struct dlm_lkb *lkb)
error = revert_lock(r, lkb);
if (error) {
queue_cast(r, lkb, -DLM_ECANCEL);
- grant_pending_locks(r);
return -DLM_ECANCEL;
}
return 0;
}
+static void do_cancel_effects(struct dlm_rsb *r, struct dlm_lkb *lkb,
+ int error)
+{
+ if (error)
+ grant_pending_locks(r);
+}
+
/*
* Four stage 3 varieties:
* _request_lock(), _convert_lock(), _unlock_lock(), _cancel_lock()
@@ -2402,11 +2437,15 @@ static int _request_lock(struct dlm_rsb *r, struct dlm_lkb *lkb)
goto out;
}
- if (is_remote(r))
+ if (is_remote(r)) {
/* receive_request() calls do_request() on remote node */
error = send_request(r, lkb);
- else
+ } else {
error = do_request(r, lkb);
+ /* for remote locks the request_reply is sent
+ between do_request and do_request_effects */
+ do_request_effects(r, lkb, error);
+ }
out:
return error;
}
@@ -2417,11 +2456,15 @@ static int _convert_lock(struct dlm_rsb *r, struct dlm_lkb *lkb)
{
int error;
- if (is_remote(r))
+ if (is_remote(r)) {
/* receive_convert() calls do_convert() on remote node */
error = send_convert(r, lkb);
- else
+ } else {
error = do_convert(r, lkb);
+ /* for remote locks the convert_reply is sent
+ between do_convert and do_convert_effects */
+ do_convert_effects(r, lkb, error);
+ }
return error;
}
@@ -2432,11 +2475,15 @@ static int _unlock_lock(struct dlm_rsb *r, struct dlm_lkb *lkb)
{
int error;
- if (is_remote(r))
+ if (is_remote(r)) {
/* receive_unlock() calls do_unlock() on remote node */
error = send_unlock(r, lkb);
- else
+ } else {
error = do_unlock(r, lkb);
+ /* for remote locks the unlock_reply is sent
+ between do_unlock and do_unlock_effects */
+ do_unlock_effects(r, lkb, error);
+ }
return error;
}
@@ -2447,11 +2494,15 @@ static int _cancel_lock(struct dlm_rsb *r, struct dlm_lkb *lkb)
{
int error;
- if (is_remote(r))
+ if (is_remote(r)) {
/* receive_cancel() calls do_cancel() on remote node */
error = send_cancel(r, lkb);
- else
+ } else {
error = do_cancel(r, lkb);
+ /* for remote locks the cancel_reply is sent
+ between do_cancel and do_cancel_effects */
+ do_cancel_effects(r, lkb, error);
+ }
return error;
}
@@ -3191,6 +3242,7 @@ static void receive_request(struct dlm_ls *ls, struct dlm_message *ms)
attach_lkb(r, lkb);
error = do_request(r, lkb);
send_request_reply(r, lkb, error);
+ do_request_effects(r, lkb, error);
unlock_rsb(r);
put_rsb(r);
@@ -3226,15 +3278,19 @@ static void receive_convert(struct dlm_ls *ls, struct dlm_message *ms)
goto out;
receive_flags(lkb, ms);
+
error = receive_convert_args(ls, lkb, ms);
- if (error)
- goto out_reply;
+ if (error) {
+ send_convert_reply(r, lkb, error);
+ goto out;
+ }
+
reply = !down_conversion(lkb);
error = do_convert(r, lkb);
- out_reply:
if (reply)
send_convert_reply(r, lkb, error);
+ do_convert_effects(r, lkb, error);
out:
unlock_rsb(r);
put_rsb(r);
@@ -3266,13 +3322,16 @@ static void receive_unlock(struct dlm_ls *ls, struct dlm_message *ms)
goto out;
receive_flags(lkb, ms);
+
error = receive_unlock_args(ls, lkb, ms);
- if (error)
- goto out_reply;
+ if (error) {
+ send_unlock_reply(r, lkb, error);
+ goto out;
+ }
error = do_unlock(r, lkb);
- out_reply:
send_unlock_reply(r, lkb, error);
+ do_unlock_effects(r, lkb, error);
out:
unlock_rsb(r);
put_rsb(r);
@@ -3307,6 +3366,7 @@ static void receive_cancel(struct dlm_ls *ls, struct dlm_message *ms)
error = do_cancel(r, lkb);
send_cancel_reply(r, lkb, error);
+ do_cancel_effects(r, lkb, error);
out:
unlock_rsb(r);
put_rsb(r);
diff --git a/fs/dlm/lockspace.c b/fs/dlm/lockspace.c
index c010ecfc0d2..26a8bd40400 100644
--- a/fs/dlm/lockspace.c
+++ b/fs/dlm/lockspace.c
@@ -191,6 +191,18 @@ static int do_uevent(struct dlm_ls *ls, int in)
return error;
}
+static int dlm_uevent(struct kset *kset, struct kobject *kobj,
+ struct kobj_uevent_env *env)
+{
+ struct dlm_ls *ls = container_of(kobj, struct dlm_ls, ls_kobj);
+
+ add_uevent_var(env, "LOCKSPACE=%s", ls->ls_name);
+ return 0;
+}
+
+static struct kset_uevent_ops dlm_uevent_ops = {
+ .uevent = dlm_uevent,
+};
int __init dlm_lockspace_init(void)
{
@@ -199,7 +211,7 @@ int __init dlm_lockspace_init(void)
INIT_LIST_HEAD(&lslist);
spin_lock_init(&lslist_lock);
- dlm_kset = kset_create_and_add("dlm", NULL, kernel_kobj);
+ dlm_kset = kset_create_and_add("dlm", &dlm_uevent_ops, kernel_kobj);
if (!dlm_kset) {
printk(KERN_WARNING "%s: can not create kset\n", __func__);
return -ENOMEM;
diff --git a/fs/dlm/user.c b/fs/dlm/user.c
index e73a4bb572a..a4bfd31ac45 100644
--- a/fs/dlm/user.c
+++ b/fs/dlm/user.c
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2006-2009 Red Hat, Inc. All rights reserved.
+ * Copyright (C) 2006-2010 Red Hat, Inc. All rights reserved.
*
* This copyrighted material is made available to anyone wishing to use,
* modify, copy, or redistribute it subject to the terms and conditions
@@ -173,7 +173,7 @@ static int lkb_is_endoflife(struct dlm_lkb *lkb, int sb_status, int type)
/* we could possibly check if the cancel of an orphan has resulted in the lkb
being removed and then remove that lkb from the orphans list and free it */
-void dlm_user_add_ast(struct dlm_lkb *lkb, int type, int bastmode)
+void dlm_user_add_ast(struct dlm_lkb *lkb, int type, int mode)
{
struct dlm_ls *ls;
struct dlm_user_args *ua;
@@ -206,8 +206,10 @@ void dlm_user_add_ast(struct dlm_lkb *lkb, int type, int bastmode)
ast_type = lkb->lkb_ast_type;
lkb->lkb_ast_type |= type;
- if (bastmode)
- lkb->lkb_bastmode = bastmode;
+ if (type == AST_BAST)
+ lkb->lkb_bastmode = mode;
+ else
+ lkb->lkb_castmode = mode;
if (!ast_type) {
kref_get(&lkb->lkb_ref);
diff --git a/fs/dlm/user.h b/fs/dlm/user.h
index 1c968649228..f196091dd7f 100644
--- a/fs/dlm/user.h
+++ b/fs/dlm/user.h
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2006-2008 Red Hat, Inc. All rights reserved.
+ * Copyright (C) 2006-2010 Red Hat, Inc. All rights reserved.
*
* This copyrighted material is made available to anyone wishing to use,
* modify, copy, or redistribute it subject to the terms and conditions
@@ -9,7 +9,7 @@
#ifndef __USER_DOT_H__
#define __USER_DOT_H__
-void dlm_user_add_ast(struct dlm_lkb *lkb, int type, int bastmode);
+void dlm_user_add_ast(struct dlm_lkb *lkb, int type, int mode);
int dlm_user_init(void);
void dlm_user_exit(void);
int dlm_device_deregister(struct dlm_ls *ls);
diff --git a/fs/ext4/ext4.h b/fs/ext4/ext4.h
index 874d169a193..4cedc91ec59 100644
--- a/fs/ext4/ext4.h
+++ b/fs/ext4/ext4.h
@@ -1014,7 +1014,7 @@ struct ext4_sb_info {
atomic_t s_lock_busy;
/* locality groups */
- struct ext4_locality_group *s_locality_groups;
+ struct ext4_locality_group __percpu *s_locality_groups;
/* for write statistics */
unsigned long s_sectors_written_start;
diff --git a/fs/file.c b/fs/file.c
index 87e129030ab..38039af6766 100644
--- a/fs/file.c
+++ b/fs/file.c
@@ -478,7 +478,7 @@ repeat:
error = fd;
#if 1
/* Sanity check */
- if (rcu_dereference(fdt->fd[fd]) != NULL) {
+ if (rcu_dereference_raw(fdt->fd[fd]) != NULL) {
printk(KERN_WARNING "alloc_fd: slot %d not NULL!\n", fd);
rcu_assign_pointer(fdt->fd[fd], NULL);
}
diff --git a/fs/fuse/dev.c b/fs/fuse/dev.c
index 51d9e33d634..eb7e9423691 100644
--- a/fs/fuse/dev.c
+++ b/fs/fuse/dev.c
@@ -865,13 +865,10 @@ static int fuse_notify_inval_inode(struct fuse_conn *fc, unsigned int size,
down_read(&fc->killsb);
err = -ENOENT;
- if (!fc->sb)
- goto err_unlock;
-
- err = fuse_reverse_inval_inode(fc->sb, outarg.ino,
- outarg.off, outarg.len);
-
-err_unlock:
+ if (fc->sb) {
+ err = fuse_reverse_inval_inode(fc->sb, outarg.ino,
+ outarg.off, outarg.len);
+ }
up_read(&fc->killsb);
return err;
@@ -884,10 +881,15 @@ static int fuse_notify_inval_entry(struct fuse_conn *fc, unsigned int size,
struct fuse_copy_state *cs)
{
struct fuse_notify_inval_entry_out outarg;
- int err = -EINVAL;
- char buf[FUSE_NAME_MAX+1];
+ int err = -ENOMEM;
+ char *buf;
struct qstr name;
+ buf = kzalloc(FUSE_NAME_MAX + 1, GFP_KERNEL);
+ if (!buf)
+ goto err;
+
+ err = -EINVAL;
if (size < sizeof(outarg))
goto err;
@@ -910,16 +912,14 @@ static int fuse_notify_inval_entry(struct fuse_conn *fc, unsigned int size,
down_read(&fc->killsb);
err = -ENOENT;
- if (!fc->sb)
- goto err_unlock;
-
- err = fuse_reverse_inval_entry(fc->sb, outarg.parent, &name);
-
-err_unlock:
+ if (fc->sb)
+ err = fuse_reverse_inval_entry(fc->sb, outarg.parent, &name);
up_read(&fc->killsb);
+ kfree(buf);
return err;
err:
+ kfree(buf);
fuse_copy_finish(cs);
return err;
}
diff --git a/fs/gfs2/aops.c b/fs/gfs2/aops.c
index 7b8da941526..0c1d0b82dcf 100644
--- a/fs/gfs2/aops.c
+++ b/fs/gfs2/aops.c
@@ -1061,8 +1061,8 @@ out:
int gfs2_releasepage(struct page *page, gfp_t gfp_mask)
{
- struct inode *aspace = page->mapping->host;
- struct gfs2_sbd *sdp = aspace->i_sb->s_fs_info;
+ struct address_space *mapping = page->mapping;
+ struct gfs2_sbd *sdp = gfs2_mapping2sbd(mapping);
struct buffer_head *bh, *head;
struct gfs2_bufdata *bd;
diff --git a/fs/gfs2/glock.c b/fs/gfs2/glock.c
index f4266332593..454d4b4eb36 100644
--- a/fs/gfs2/glock.c
+++ b/fs/gfs2/glock.c
@@ -19,7 +19,6 @@
#include <linux/list.h>
#include <linux/wait.h>
#include <linux/module.h>
-#include <linux/rwsem.h>
#include <asm/uaccess.h>
#include <linux/seq_file.h>
#include <linux/debugfs.h>
@@ -60,7 +59,6 @@ static int __dump_glock(struct seq_file *seq, const struct gfs2_glock *gl);
#define GLOCK_BUG_ON(gl,x) do { if (unlikely(x)) { __dump_glock(NULL, gl); BUG(); } } while(0)
static void do_xmote(struct gfs2_glock *gl, struct gfs2_holder *gh, unsigned int target);
-static DECLARE_RWSEM(gfs2_umount_flush_sem);
static struct dentry *gfs2_root;
static struct workqueue_struct *glock_workqueue;
struct workqueue_struct *gfs2_delete_workqueue;
@@ -154,12 +152,14 @@ static unsigned int gl_hash(const struct gfs2_sbd *sdp,
static void glock_free(struct gfs2_glock *gl)
{
struct gfs2_sbd *sdp = gl->gl_sbd;
- struct inode *aspace = gl->gl_aspace;
+ struct address_space *mapping = gfs2_glock2aspace(gl);
+ struct kmem_cache *cachep = gfs2_glock_cachep;
- if (aspace)
- gfs2_aspace_put(aspace);
+ GLOCK_BUG_ON(gl, mapping && mapping->nrpages);
trace_gfs2_glock_put(gl);
- sdp->sd_lockstruct.ls_ops->lm_put_lock(gfs2_glock_cachep, gl);
+ if (mapping)
+ cachep = gfs2_glock_aspace_cachep;
+ sdp->sd_lockstruct.ls_ops->lm_put_lock(cachep, gl);
}
/**
@@ -712,7 +712,6 @@ static void glock_work_func(struct work_struct *work)
finish_xmote(gl, gl->gl_reply);
drop_ref = 1;
}
- down_read(&gfs2_umount_flush_sem);
spin_lock(&gl->gl_spin);
if (test_and_clear_bit(GLF_PENDING_DEMOTE, &gl->gl_flags) &&
gl->gl_state != LM_ST_UNLOCKED &&
@@ -725,7 +724,6 @@ static void glock_work_func(struct work_struct *work)
}
run_queue(gl, 0);
spin_unlock(&gl->gl_spin);
- up_read(&gfs2_umount_flush_sem);
if (!delay ||
queue_delayed_work(glock_workqueue, &gl->gl_work, delay) == 0)
gfs2_glock_put(gl);
@@ -750,10 +748,11 @@ int gfs2_glock_get(struct gfs2_sbd *sdp, u64 number,
const struct gfs2_glock_operations *glops, int create,
struct gfs2_glock **glp)
{
+ struct super_block *s = sdp->sd_vfs;
struct lm_lockname name = { .ln_number = number, .ln_type = glops->go_type };
struct gfs2_glock *gl, *tmp;
unsigned int hash = gl_hash(sdp, &name);
- int error;
+ struct address_space *mapping;
read_lock(gl_lock_addr(hash));
gl = search_bucket(hash, sdp, &name);
@@ -765,7 +764,10 @@ int gfs2_glock_get(struct gfs2_sbd *sdp, u64 number,
if (!create)
return -ENOENT;
- gl = kmem_cache_alloc(gfs2_glock_cachep, GFP_KERNEL);
+ if (glops->go_flags & GLOF_ASPACE)
+ gl = kmem_cache_alloc(gfs2_glock_aspace_cachep, GFP_KERNEL);
+ else
+ gl = kmem_cache_alloc(gfs2_glock_cachep, GFP_KERNEL);
if (!gl)
return -ENOMEM;
@@ -784,18 +786,18 @@ int gfs2_glock_get(struct gfs2_sbd *sdp, u64 number,
gl->gl_tchange = jiffies;
gl->gl_object = NULL;
gl->gl_sbd = sdp;
- gl->gl_aspace = NULL;
INIT_DELAYED_WORK(&gl->gl_work, glock_work_func);
INIT_WORK(&gl->gl_delete, delete_work_func);
- /* If this glock protects actual on-disk data or metadata blocks,
- create a VFS inode to manage the pages/buffers holding them. */
- if (glops == &gfs2_inode_glops || glops == &gfs2_rgrp_glops) {
- gl->gl_aspace = gfs2_aspace_get(sdp);
- if (!gl->gl_aspace) {
- error = -ENOMEM;
- goto fail;
- }
+ mapping = gfs2_glock2aspace(gl);
+ if (mapping) {
+ mapping->a_ops = &gfs2_meta_aops;
+ mapping->host = s->s_bdev->bd_inode;
+ mapping->flags = 0;
+ mapping_set_gfp_mask(mapping, GFP_NOFS);
+ mapping->assoc_mapping = NULL;
+ mapping->backing_dev_info = s->s_bdi;
+ mapping->writeback_index = 0;
}
write_lock(gl_lock_addr(hash));
@@ -812,10 +814,6 @@ int gfs2_glock_get(struct gfs2_sbd *sdp, u64 number,
*glp = gl;
return 0;
-
-fail:
- kmem_cache_free(gfs2_glock_cachep, gl);
- return error;
}
/**
@@ -1510,35 +1508,10 @@ void gfs2_glock_thaw(struct gfs2_sbd *sdp)
void gfs2_gl_hash_clear(struct gfs2_sbd *sdp)
{
- unsigned long t;
unsigned int x;
- int cont;
- t = jiffies;
-
- for (;;) {
- cont = 0;
- for (x = 0; x < GFS2_GL_HASH_SIZE; x++) {
- if (examine_bucket(clear_glock, sdp, x))
- cont = 1;
- }
-
- if (!cont)
- break;
-
- if (time_after_eq(jiffies,
- t + gfs2_tune_get(sdp, gt_stall_secs) * HZ)) {
- fs_warn(sdp, "Unmount seems to be stalled. "
- "Dumping lock state...\n");
- gfs2_dump_lockstate(sdp);
- t = jiffies;
- }
-
- down_write(&gfs2_umount_flush_sem);
- invalidate_inodes(sdp->sd_vfs);
- up_write(&gfs2_umount_flush_sem);
- msleep(10);
- }
+ for (x = 0; x < GFS2_GL_HASH_SIZE; x++)
+ examine_bucket(clear_glock, sdp, x);
flush_workqueue(glock_workqueue);
wait_event(sdp->sd_glock_wait, atomic_read(&sdp->sd_glock_disposal) == 0);
gfs2_dump_lockstate(sdp);
@@ -1685,7 +1658,7 @@ static int __dump_glock(struct seq_file *seq, const struct gfs2_glock *gl)
dtime *= 1000000/HZ; /* demote time in uSec */
if (!test_bit(GLF_DEMOTE, &gl->gl_flags))
dtime = 0;
- gfs2_print_dbg(seq, "G: s:%s n:%u/%llu f:%s t:%s d:%s/%llu a:%d r:%d\n",
+ gfs2_print_dbg(seq, "G: s:%s n:%u/%llx f:%s t:%s d:%s/%llu a:%d r:%d\n",
state2str(gl->gl_state),
gl->gl_name.ln_type,
(unsigned long long)gl->gl_name.ln_number,
diff --git a/fs/gfs2/glock.h b/fs/gfs2/glock.h
index c0262faf472..2bda1911b15 100644
--- a/fs/gfs2/glock.h
+++ b/fs/gfs2/glock.h
@@ -180,6 +180,13 @@ static inline int gfs2_glock_is_held_shrd(struct gfs2_glock *gl)
return gl->gl_state == LM_ST_SHARED;
}
+static inline struct address_space *gfs2_glock2aspace(struct gfs2_glock *gl)
+{
+ if (gl->gl_ops->go_flags & GLOF_ASPACE)
+ return (struct address_space *)(gl + 1);
+ return NULL;
+}
+
int gfs2_glock_get(struct gfs2_sbd *sdp,
u64 number, const struct gfs2_glock_operations *glops,
int create, struct gfs2_glock **glp);
diff --git a/fs/gfs2/glops.c b/fs/gfs2/glops.c
index 78554acc060..38e3749d476 100644
--- a/fs/gfs2/glops.c
+++ b/fs/gfs2/glops.c
@@ -87,7 +87,7 @@ static void gfs2_ail_empty_gl(struct gfs2_glock *gl)
static void rgrp_go_sync(struct gfs2_glock *gl)
{
- struct address_space *metamapping = gl->gl_aspace->i_mapping;
+ struct address_space *metamapping = gfs2_glock2aspace(gl);
int error;
if (!test_and_clear_bit(GLF_DIRTY, &gl->gl_flags))
@@ -113,7 +113,7 @@ static void rgrp_go_sync(struct gfs2_glock *gl)
static void rgrp_go_inval(struct gfs2_glock *gl, int flags)
{
- struct address_space *mapping = gl->gl_aspace->i_mapping;
+ struct address_space *mapping = gfs2_glock2aspace(gl);
BUG_ON(!(flags & DIO_METADATA));
gfs2_assert_withdraw(gl->gl_sbd, !atomic_read(&gl->gl_ail_count));
@@ -134,7 +134,7 @@ static void rgrp_go_inval(struct gfs2_glock *gl, int flags)
static void inode_go_sync(struct gfs2_glock *gl)
{
struct gfs2_inode *ip = gl->gl_object;
- struct address_space *metamapping = gl->gl_aspace->i_mapping;
+ struct address_space *metamapping = gfs2_glock2aspace(gl);
int error;
if (ip && !S_ISREG(ip->i_inode.i_mode))
@@ -183,7 +183,7 @@ static void inode_go_inval(struct gfs2_glock *gl, int flags)
gfs2_assert_withdraw(gl->gl_sbd, !atomic_read(&gl->gl_ail_count));
if (flags & DIO_METADATA) {
- struct address_space *mapping = gl->gl_aspace->i_mapping;
+ struct address_space *mapping = gfs2_glock2aspace(gl);
truncate_inode_pages(mapping, 0);
if (ip) {
set_bit(GIF_INVALID, &ip->i_flags);
@@ -282,7 +282,8 @@ static int inode_go_dump(struct seq_file *seq, const struct gfs2_glock *gl)
static int rgrp_go_demote_ok(const struct gfs2_glock *gl)
{
- return !gl->gl_aspace->i_mapping->nrpages;
+ const struct address_space *mapping = (const struct address_space *)(gl + 1);
+ return !mapping->nrpages;
}
/**
@@ -387,8 +388,7 @@ static void iopen_go_callback(struct gfs2_glock *gl)
struct gfs2_inode *ip = (struct gfs2_inode *)gl->gl_object;
if (gl->gl_demote_state == LM_ST_UNLOCKED &&
- gl->gl_state == LM_ST_SHARED &&
- ip && test_bit(GIF_USER, &ip->i_flags)) {
+ gl->gl_state == LM_ST_SHARED && ip) {
gfs2_glock_hold(gl);
if (queue_work(gfs2_delete_workqueue, &gl->gl_delete) == 0)
gfs2_glock_put_nolock(gl);
@@ -407,6 +407,7 @@ const struct gfs2_glock_operations gfs2_inode_glops = {
.go_dump = inode_go_dump,
.go_type = LM_TYPE_INODE,
.go_min_hold_time = HZ / 5,
+ .go_flags = GLOF_ASPACE,
};
const struct gfs2_glock_operations gfs2_rgrp_glops = {
@@ -418,6 +419,7 @@ const struct gfs2_glock_operations gfs2_rgrp_glops = {
.go_dump = gfs2_rgrp_dump,
.go_type = LM_TYPE_RGRP,
.go_min_hold_time = HZ / 5,
+ .go_flags = GLOF_ASPACE,
};
const struct gfs2_glock_operations gfs2_trans_glops = {
diff --git a/fs/gfs2/incore.h b/fs/gfs2/incore.h
index bc0ad158e6b..b8025e51cab 100644
--- a/fs/gfs2/incore.h
+++ b/fs/gfs2/incore.h
@@ -162,6 +162,8 @@ struct gfs2_glock_operations {
void (*go_callback) (struct gfs2_glock *gl);
const int go_type;
const unsigned long go_min_hold_time;
+ const unsigned long go_flags;
+#define GLOF_ASPACE 1
};
enum {
@@ -225,7 +227,6 @@ struct gfs2_glock {
struct gfs2_sbd *gl_sbd;
- struct inode *gl_aspace;
struct list_head gl_ail_list;
atomic_t gl_ail_count;
struct delayed_work gl_work;
@@ -258,7 +259,6 @@ enum {
GIF_INVALID = 0,
GIF_QD_LOCKED = 1,
GIF_SW_PAGED = 3,
- GIF_USER = 4, /* user inode, not metadata addr space */
};
@@ -451,7 +451,6 @@ struct gfs2_tune {
unsigned int gt_quota_quantum; /* Secs between syncs to quota file */
unsigned int gt_new_files_jdata;
unsigned int gt_max_readahead; /* Max bytes to read-ahead from disk */
- unsigned int gt_stall_secs; /* Detects trouble! */
unsigned int gt_complain_secs;
unsigned int gt_statfs_quantum;
unsigned int gt_statfs_slow;
diff --git a/fs/gfs2/inode.c b/fs/gfs2/inode.c
index 6e220f4eee7..b1bf2694fb2 100644
--- a/fs/gfs2/inode.c
+++ b/fs/gfs2/inode.c
@@ -45,7 +45,7 @@ static int iget_test(struct inode *inode, void *opaque)
struct gfs2_inode *ip = GFS2_I(inode);
u64 *no_addr = opaque;
- if (ip->i_no_addr == *no_addr && test_bit(GIF_USER, &ip->i_flags))
+ if (ip->i_no_addr == *no_addr)
return 1;
return 0;
@@ -58,7 +58,6 @@ static int iget_set(struct inode *inode, void *opaque)
inode->i_ino = (unsigned long)*no_addr;
ip->i_no_addr = *no_addr;
- set_bit(GIF_USER, &ip->i_flags);
return 0;
}
@@ -84,7 +83,7 @@ static int iget_skip_test(struct inode *inode, void *opaque)
struct gfs2_inode *ip = GFS2_I(inode);
struct gfs2_skip_data *data = opaque;
- if (ip->i_no_addr == data->no_addr && test_bit(GIF_USER, &ip->i_flags)){
+ if (ip->i_no_addr == data->no_addr) {
if (inode->i_state & (I_FREEING|I_CLEAR|I_WILL_FREE)){
data->skipped = 1;
return 0;
@@ -103,7 +102,6 @@ static int iget_skip_set(struct inode *inode, void *opaque)
return 1;
inode->i_ino = (unsigned long)(data->no_addr);
ip->i_no_addr = data->no_addr;
- set_bit(GIF_USER, &ip->i_flags);
return 0;
}
diff --git a/fs/gfs2/lock_dlm.c b/fs/gfs2/lock_dlm.c
index 0e5e0e7022e..569b46240f6 100644
--- a/fs/gfs2/lock_dlm.c
+++ b/fs/gfs2/lock_dlm.c
@@ -30,7 +30,10 @@ static void gdlm_ast(void *arg)
switch (gl->gl_lksb.sb_status) {
case -DLM_EUNLOCK: /* Unlocked, so glock can be freed */
- kmem_cache_free(gfs2_glock_cachep, gl);
+ if (gl->gl_ops->go_flags & GLOF_ASPACE)
+ kmem_cache_free(gfs2_glock_aspace_cachep, gl);
+ else
+ kmem_cache_free(gfs2_glock_cachep, gl);
if (atomic_dec_and_test(&sdp->sd_glock_disposal))
wake_up(&sdp->sd_glock_wait);
return;
diff --git a/fs/gfs2/lops.c b/fs/gfs2/lops.c
index de97632ba32..adc260fbea9 100644
--- a/fs/gfs2/lops.c
+++ b/fs/gfs2/lops.c
@@ -528,9 +528,9 @@ static void databuf_lo_add(struct gfs2_sbd *sdp, struct gfs2_log_element *le)
gfs2_pin(sdp, bd->bd_bh);
tr->tr_num_databuf_new++;
sdp->sd_log_num_databuf++;
- list_add(&le->le_list, &sdp->sd_log_le_databuf);
+ list_add_tail(&le->le_list, &sdp->sd_log_le_databuf);
} else {
- list_add(&le->le_list, &sdp->sd_log_le_ordered);
+ list_add_tail(&le->le_list, &sdp->sd_log_le_ordered);
}
out:
gfs2_log_unlock(sdp);
diff --git a/fs/gfs2/main.c b/fs/gfs2/main.c
index 5b31f7741a8..a88fadc704b 100644
--- a/fs/gfs2/main.c
+++ b/fs/gfs2/main.c
@@ -52,6 +52,22 @@ static void gfs2_init_glock_once(void *foo)
atomic_set(&gl->gl_ail_count, 0);
}
+static void gfs2_init_gl_aspace_once(void *foo)
+{
+ struct gfs2_glock *gl = foo;
+ struct address_space *mapping = (struct address_space *)(gl + 1);
+
+ gfs2_init_glock_once(gl);
+ memset(mapping, 0, sizeof(*mapping));
+ INIT_RADIX_TREE(&mapping->page_tree, GFP_ATOMIC);
+ spin_lock_init(&mapping->tree_lock);
+ spin_lock_init(&mapping->i_mmap_lock);
+ INIT_LIST_HEAD(&mapping->private_list);
+ spin_lock_init(&mapping->private_lock);
+ INIT_RAW_PRIO_TREE_ROOT(&mapping->i_mmap);
+ INIT_LIST_HEAD(&mapping->i_mmap_nonlinear);
+}
+
/**
* init_gfs2_fs - Register GFS2 as a filesystem
*
@@ -78,6 +94,14 @@ static int __init init_gfs2_fs(void)
if (!gfs2_glock_cachep)
goto fail;
+ gfs2_glock_aspace_cachep = kmem_cache_create("gfs2_glock (aspace)",
+ sizeof(struct gfs2_glock) +
+ sizeof(struct address_space),
+ 0, 0, gfs2_init_gl_aspace_once);
+
+ if (!gfs2_glock_aspace_cachep)
+ goto fail;
+
gfs2_inode_cachep = kmem_cache_create("gfs2_inode",
sizeof(struct gfs2_inode),
0, SLAB_RECLAIM_ACCOUNT|
@@ -144,6 +168,9 @@ fail:
if (gfs2_inode_cachep)
kmem_cache_destroy(gfs2_inode_cachep);
+ if (gfs2_glock_aspace_cachep)
+ kmem_cache_destroy(gfs2_glock_aspace_cachep);
+
if (gfs2_glock_cachep)
kmem_cache_destroy(gfs2_glock_cachep);
@@ -169,6 +196,7 @@ static void __exit exit_gfs2_fs(void)
kmem_cache_destroy(gfs2_rgrpd_cachep);
kmem_cache_destroy(gfs2_bufdata_cachep);
kmem_cache_destroy(gfs2_inode_cachep);
+ kmem_cache_destroy(gfs2_glock_aspace_cachep);
kmem_cache_destroy(gfs2_glock_cachep);
gfs2_sys_uninit();
diff --git a/fs/gfs2/meta_io.c b/fs/gfs2/meta_io.c
index 6f68a5f18eb..0bb12c80937 100644
--- a/fs/gfs2/meta_io.c
+++ b/fs/gfs2/meta_io.c
@@ -93,49 +93,13 @@ static int gfs2_aspace_writepage(struct page *page, struct writeback_control *wb
return err;
}
-static const struct address_space_operations aspace_aops = {
+const struct address_space_operations gfs2_meta_aops = {
.writepage = gfs2_aspace_writepage,
.releasepage = gfs2_releasepage,
.sync_page = block_sync_page,
};
/**
- * gfs2_aspace_get - Create and initialize a struct inode structure
- * @sdp: the filesystem the aspace is in
- *
- * Right now a struct inode is just a struct inode. Maybe Linux
- * will supply a more lightweight address space construct (that works)
- * in the future.
- *
- * Make sure pages/buffers in this aspace aren't in high memory.
- *
- * Returns: the aspace
- */
-
-struct inode *gfs2_aspace_get(struct gfs2_sbd *sdp)
-{
- struct inode *aspace;
- struct gfs2_inode *ip;
-
- aspace = new_inode(sdp->sd_vfs);
- if (aspace) {
- mapping_set_gfp_mask(aspace->i_mapping, GFP_NOFS);
- aspace->i_mapping->a_ops = &aspace_aops;
- aspace->i_size = MAX_LFS_FILESIZE;
- ip = GFS2_I(aspace);
- clear_bit(GIF_USER, &ip->i_flags);
- insert_inode_hash(aspace);
- }
- return aspace;
-}
-
-void gfs2_aspace_put(struct inode *aspace)
-{
- remove_inode_hash(aspace);
- iput(aspace);
-}
-
-/**
* gfs2_meta_sync - Sync all buffers associated with a glock
* @gl: The glock
*
@@ -143,7 +107,7 @@ void gfs2_aspace_put(struct inode *aspace)
void gfs2_meta_sync(struct gfs2_glock *gl)
{
- struct address_space *mapping = gl->gl_aspace->i_mapping;
+ struct address_space *mapping = gfs2_glock2aspace(gl);
int error;
filemap_fdatawrite(mapping);
@@ -164,7 +128,7 @@ void gfs2_meta_sync(struct gfs2_glock *gl)
struct buffer_head *gfs2_getbuf(struct gfs2_glock *gl, u64 blkno, int create)
{
- struct address_space *mapping = gl->gl_aspace->i_mapping;
+ struct address_space *mapping = gfs2_glock2aspace(gl);
struct gfs2_sbd *sdp = gl->gl_sbd;
struct page *page;
struct buffer_head *bh;
@@ -344,8 +308,10 @@ void gfs2_attach_bufdata(struct gfs2_glock *gl, struct buffer_head *bh,
void gfs2_remove_from_journal(struct buffer_head *bh, struct gfs2_trans *tr, int meta)
{
- struct gfs2_sbd *sdp = GFS2_SB(bh->b_page->mapping->host);
+ struct address_space *mapping = bh->b_page->mapping;
+ struct gfs2_sbd *sdp = gfs2_mapping2sbd(mapping);
struct gfs2_bufdata *bd = bh->b_private;
+
if (test_clear_buffer_pinned(bh)) {
list_del_init(&bd->bd_le.le_list);
if (meta) {
diff --git a/fs/gfs2/meta_io.h b/fs/gfs2/meta_io.h
index de270c2f9b6..6a1d9ba1641 100644
--- a/fs/gfs2/meta_io.h
+++ b/fs/gfs2/meta_io.h
@@ -37,8 +37,16 @@ static inline void gfs2_buffer_copy_tail(struct buffer_head *to_bh,
0, from_head - to_head);
}
-struct inode *gfs2_aspace_get(struct gfs2_sbd *sdp);
-void gfs2_aspace_put(struct inode *aspace);
+extern const struct address_space_operations gfs2_meta_aops;
+
+static inline struct gfs2_sbd *gfs2_mapping2sbd(struct address_space *mapping)
+{
+ struct inode *inode = mapping->host;
+ if (mapping->a_ops == &gfs2_meta_aops)
+ return (((struct gfs2_glock *)mapping) - 1)->gl_sbd;
+ else
+ return inode->i_sb->s_fs_info;
+}
void gfs2_meta_sync(struct gfs2_glock *gl);
diff --git a/fs/gfs2/ops_fstype.c b/fs/gfs2/ops_fstype.c
index a86ed638156..a054b526dc0 100644
--- a/fs/gfs2/ops_fstype.c
+++ b/fs/gfs2/ops_fstype.c
@@ -65,7 +65,6 @@ static void gfs2_tune_init(struct gfs2_tune *gt)
gt->gt_quota_scale_den = 1;
gt->gt_new_files_jdata = 0;
gt->gt_max_readahead = 1 << 18;
- gt->gt_stall_secs = 600;
gt->gt_complain_secs = 10;
}
@@ -1241,10 +1240,9 @@ fail_sb:
fail_locking:
init_locking(sdp, &mount_gh, UNDO);
fail_lm:
+ invalidate_inodes(sb);
gfs2_gl_hash_clear(sdp);
gfs2_lm_unmount(sdp);
- while (invalidate_inodes(sb))
- yield();
fail_sys:
gfs2_sys_fs_del(sdp);
fail:
diff --git a/fs/gfs2/super.c b/fs/gfs2/super.c
index b9dd3da22c0..e5e22629da6 100644
--- a/fs/gfs2/super.c
+++ b/fs/gfs2/super.c
@@ -722,8 +722,7 @@ static int gfs2_write_inode(struct inode *inode, int sync)
int ret = 0;
/* Check this is a "normal" inode, etc */
- if (!test_bit(GIF_USER, &ip->i_flags) ||
- (current->flags & PF_MEMALLOC))
+ if (current->flags & PF_MEMALLOC)
return 0;
ret = gfs2_glock_nq_init(ip->i_gl, LM_ST_EXCLUSIVE, 0, &gh);
if (ret)
@@ -860,6 +859,7 @@ restart:
gfs2_clear_rgrpd(sdp);
gfs2_jindex_free(sdp);
/* Take apart glock structures and buffer lists */
+ invalidate_inodes(sdp->sd_vfs);
gfs2_gl_hash_clear(sdp);
/* Unmount the locking protocol */
gfs2_lm_unmount(sdp);
@@ -1194,7 +1194,7 @@ static void gfs2_drop_inode(struct inode *inode)
{
struct gfs2_inode *ip = GFS2_I(inode);
- if (test_bit(GIF_USER, &ip->i_flags) && inode->i_nlink) {
+ if (inode->i_nlink) {
struct gfs2_glock *gl = ip->i_iopen_gh.gh_gl;
if (gl && test_bit(GLF_DEMOTE, &gl->gl_flags))
clear_nlink(inode);
@@ -1212,18 +1212,12 @@ static void gfs2_clear_inode(struct inode *inode)
{
struct gfs2_inode *ip = GFS2_I(inode);
- /* This tells us its a "real" inode and not one which only
- * serves to contain an address space (see rgrp.c, meta_io.c)
- * which therefore doesn't have its own glocks.
- */
- if (test_bit(GIF_USER, &ip->i_flags)) {
- ip->i_gl->gl_object = NULL;
- gfs2_glock_put(ip->i_gl);
- ip->i_gl = NULL;
- if (ip->i_iopen_gh.gh_gl) {
- ip->i_iopen_gh.gh_gl->gl_object = NULL;
- gfs2_glock_dq_uninit(&ip->i_iopen_gh);
- }
+ ip->i_gl->gl_object = NULL;
+ gfs2_glock_put(ip->i_gl);
+ ip->i_gl = NULL;
+ if (ip->i_iopen_gh.gh_gl) {
+ ip->i_iopen_gh.gh_gl->gl_object = NULL;
+ gfs2_glock_dq_uninit(&ip->i_iopen_gh);
}
}
@@ -1358,9 +1352,6 @@ static void gfs2_delete_inode(struct inode *inode)
struct gfs2_holder gh;
int error;
- if (!test_bit(GIF_USER, &ip->i_flags))
- goto out;
-
error = gfs2_glock_nq_init(ip->i_gl, LM_ST_EXCLUSIVE, 0, &gh);
if (unlikely(error)) {
gfs2_glock_dq_uninit(&ip->i_iopen_gh);
diff --git a/fs/gfs2/sys.c b/fs/gfs2/sys.c
index 0dc34621f6a..a0db1c94317 100644
--- a/fs/gfs2/sys.c
+++ b/fs/gfs2/sys.c
@@ -478,7 +478,6 @@ TUNE_ATTR(complain_secs, 0);
TUNE_ATTR(statfs_slow, 0);
TUNE_ATTR(new_files_jdata, 0);
TUNE_ATTR(quota_simul_sync, 1);
-TUNE_ATTR(stall_secs, 1);
TUNE_ATTR(statfs_quantum, 1);
TUNE_ATTR_3(quota_scale, quota_scale_show, quota_scale_store);
@@ -491,7 +490,6 @@ static struct attribute *tune_attrs[] = {
&tune_attr_complain_secs.attr,
&tune_attr_statfs_slow.attr,
&tune_attr_quota_simul_sync.attr,
- &tune_attr_stall_secs.attr,
&tune_attr_statfs_quantum.attr,
&tune_attr_quota_scale.attr,
&tune_attr_new_files_jdata.attr,
diff --git a/fs/gfs2/util.c b/fs/gfs2/util.c
index f6a7efa34eb..226f2bfbf16 100644
--- a/fs/gfs2/util.c
+++ b/fs/gfs2/util.c
@@ -21,6 +21,7 @@
#include "util.h"
struct kmem_cache *gfs2_glock_cachep __read_mostly;
+struct kmem_cache *gfs2_glock_aspace_cachep __read_mostly;
struct kmem_cache *gfs2_inode_cachep __read_mostly;
struct kmem_cache *gfs2_bufdata_cachep __read_mostly;
struct kmem_cache *gfs2_rgrpd_cachep __read_mostly;
diff --git a/fs/gfs2/util.h b/fs/gfs2/util.h
index 33e96b0ce9a..b432e04600d 100644
--- a/fs/gfs2/util.h
+++ b/fs/gfs2/util.h
@@ -145,6 +145,7 @@ gfs2_io_error_bh_i((sdp), (bh), __func__, __FILE__, __LINE__);
extern struct kmem_cache *gfs2_glock_cachep;
+extern struct kmem_cache *gfs2_glock_aspace_cachep;
extern struct kmem_cache *gfs2_inode_cachep;
extern struct kmem_cache *gfs2_bufdata_cachep;
extern struct kmem_cache *gfs2_rgrpd_cachep;
diff --git a/fs/nfs/Kconfig b/fs/nfs/Kconfig
index 59e5673b459..a43d07e7b92 100644
--- a/fs/nfs/Kconfig
+++ b/fs/nfs/Kconfig
@@ -95,8 +95,7 @@ config ROOT_NFS
Most people say N here.
config NFS_FSCACHE
- bool "Provide NFS client caching support (EXPERIMENTAL)"
- depends on EXPERIMENTAL
+ bool "Provide NFS client caching support"
depends on NFS_FS=m && FSCACHE || NFS_FS=y && FSCACHE=y
help
Say Y here if you want NFS data to be cached locally on disc through
diff --git a/fs/nfs/iostat.h b/fs/nfs/iostat.h
index 46d779abafd..1d8d5c813b0 100644
--- a/fs/nfs/iostat.h
+++ b/fs/nfs/iostat.h
@@ -57,12 +57,12 @@ static inline void nfs_add_fscache_stats(struct inode *inode,
}
#endif
-static inline struct nfs_iostats *nfs_alloc_iostats(void)
+static inline struct nfs_iostats __percpu *nfs_alloc_iostats(void)
{
return alloc_percpu(struct nfs_iostats);
}
-static inline void nfs_free_iostats(struct nfs_iostats *stats)
+static inline void nfs_free_iostats(struct nfs_iostats __percpu *stats)
{
if (stats != NULL)
free_percpu(stats);
diff --git a/fs/nilfs2/dat.c b/fs/nilfs2/dat.c
index 187dd07ba86..9d1e5de91af 100644
--- a/fs/nilfs2/dat.c
+++ b/fs/nilfs2/dat.c
@@ -388,8 +388,7 @@ int nilfs_dat_translate(struct inode *dat, __u64 vblocknr, sector_t *blocknrp)
ret = -ENOENT;
goto out;
}
- if (blocknrp != NULL)
- *blocknrp = blocknr;
+ *blocknrp = blocknr;
out:
kunmap_atomic(kaddr, KM_USER0);
diff --git a/fs/nilfs2/ioctl.c b/fs/nilfs2/ioctl.c
index d6b2b83de36..313d0a21da4 100644
--- a/fs/nilfs2/ioctl.c
+++ b/fs/nilfs2/ioctl.c
@@ -26,6 +26,7 @@
#include <linux/capability.h> /* capable() */
#include <linux/uaccess.h> /* copy_from_user(), copy_to_user() */
#include <linux/vmalloc.h>
+#include <linux/mount.h> /* mnt_want_write(), mnt_drop_write() */
#include <linux/nilfs2_fs.h>
#include "nilfs.h"
#include "segment.h"
@@ -107,20 +108,28 @@ static int nilfs_ioctl_change_cpmode(struct inode *inode, struct file *filp,
if (!capable(CAP_SYS_ADMIN))
return -EPERM;
+
+ ret = mnt_want_write(filp->f_path.mnt);
+ if (ret)
+ return ret;
+
+ ret = -EFAULT;
if (copy_from_user(&cpmode, argp, sizeof(cpmode)))
- return -EFAULT;
+ goto out;
mutex_lock(&nilfs->ns_mount_mutex);
+
nilfs_transaction_begin(inode->i_sb, &ti, 0);
ret = nilfs_cpfile_change_cpmode(
cpfile, cpmode.cm_cno, cpmode.cm_mode);
- if (unlikely(ret < 0)) {
+ if (unlikely(ret < 0))
nilfs_transaction_abort(inode->i_sb);
- mutex_unlock(&nilfs->ns_mount_mutex);
- return ret;
- }
- nilfs_transaction_commit(inode->i_sb); /* never fails */
+ else
+ nilfs_transaction_commit(inode->i_sb); /* never fails */
+
mutex_unlock(&nilfs->ns_mount_mutex);
+out:
+ mnt_drop_write(filp->f_path.mnt);
return ret;
}
@@ -135,16 +144,23 @@ nilfs_ioctl_delete_checkpoint(struct inode *inode, struct file *filp,
if (!capable(CAP_SYS_ADMIN))
return -EPERM;
+
+ ret = mnt_want_write(filp->f_path.mnt);
+ if (ret)
+ return ret;
+
+ ret = -EFAULT;
if (copy_from_user(&cno, argp, sizeof(cno)))
- return -EFAULT;
+ goto out;
nilfs_transaction_begin(inode->i_sb, &ti, 0);
ret = nilfs_cpfile_delete_checkpoint(cpfile, cno);
- if (unlikely(ret < 0)) {
+ if (unlikely(ret < 0))
nilfs_transaction_abort(inode->i_sb);
- return ret;
- }
- nilfs_transaction_commit(inode->i_sb); /* never fails */
+ else
+ nilfs_transaction_commit(inode->i_sb); /* never fails */
+out:
+ mnt_drop_write(filp->f_path.mnt);
return ret;
}
@@ -496,12 +512,19 @@ static int nilfs_ioctl_clean_segments(struct inode *inode, struct file *filp,
if (!capable(CAP_SYS_ADMIN))
return -EPERM;
+ ret = mnt_want_write(filp->f_path.mnt);
+ if (ret)
+ return ret;
+
+ ret = -EFAULT;
if (copy_from_user(argv, argp, sizeof(argv)))
- return -EFAULT;
+ goto out;
+ ret = -EINVAL;
nsegs = argv[4].v_nmembs;
if (argv[4].v_size != argsz[4])
- return -EINVAL;
+ goto out;
+
/*
* argv[4] points to segment numbers this ioctl cleans. We
* use kmalloc() for its buffer because memory used for the
@@ -509,9 +532,10 @@ static int nilfs_ioctl_clean_segments(struct inode *inode, struct file *filp,
*/
kbufs[4] = memdup_user((void __user *)(unsigned long)argv[4].v_base,
nsegs * sizeof(__u64));
- if (IS_ERR(kbufs[4]))
- return PTR_ERR(kbufs[4]);
-
+ if (IS_ERR(kbufs[4])) {
+ ret = PTR_ERR(kbufs[4]);
+ goto out;
+ }
nilfs = NILFS_SB(inode->i_sb)->s_nilfs;
for (n = 0; n < 4; n++) {
@@ -563,10 +587,12 @@ static int nilfs_ioctl_clean_segments(struct inode *inode, struct file *filp,
nilfs_remove_all_gcinode(nilfs);
clear_nilfs_gc_running(nilfs);
- out_free:
+out_free:
while (--n >= 0)
vfree(kbufs[n]);
kfree(kbufs[4]);
+out:
+ mnt_drop_write(filp->f_path.mnt);
return ret;
}
@@ -575,13 +601,17 @@ static int nilfs_ioctl_sync(struct inode *inode, struct file *filp,
{
__u64 cno;
int ret;
+ struct the_nilfs *nilfs;
ret = nilfs_construct_segment(inode->i_sb);
if (ret < 0)
return ret;
if (argp != NULL) {
- cno = NILFS_SB(inode->i_sb)->s_nilfs->ns_cno - 1;
+ nilfs = NILFS_SB(inode->i_sb)->s_nilfs;
+ down_read(&nilfs->ns_segctor_sem);
+ cno = nilfs->ns_cno - 1;
+ up_read(&nilfs->ns_segctor_sem);
if (copy_to_user(argp, &cno, sizeof(cno)))
return -EFAULT;
}
diff --git a/fs/nilfs2/recovery.c b/fs/nilfs2/recovery.c
index c9c96c7825d..017bedc761a 100644
--- a/fs/nilfs2/recovery.c
+++ b/fs/nilfs2/recovery.c
@@ -39,7 +39,6 @@ enum {
NILFS_SEG_FAIL_IO,
NILFS_SEG_FAIL_MAGIC,
NILFS_SEG_FAIL_SEQ,
- NILFS_SEG_FAIL_CHECKSUM_SEGSUM,
NILFS_SEG_FAIL_CHECKSUM_SUPER_ROOT,
NILFS_SEG_FAIL_CHECKSUM_FULL,
NILFS_SEG_FAIL_CONSISTENCY,
@@ -71,10 +70,6 @@ static int nilfs_warn_segment_error(int err)
printk(KERN_WARNING
"NILFS warning: Sequence number mismatch\n");
break;
- case NILFS_SEG_FAIL_CHECKSUM_SEGSUM:
- printk(KERN_WARNING
- "NILFS warning: Checksum error in segment summary\n");
- break;
case NILFS_SEG_FAIL_CHECKSUM_SUPER_ROOT:
printk(KERN_WARNING
"NILFS warning: Checksum error in super root\n");
@@ -206,19 +201,15 @@ int nilfs_read_super_root_block(struct super_block *sb, sector_t sr_block,
* @pseg_start: start disk block number of partial segment
* @seg_seq: sequence number requested
* @ssi: pointer to nilfs_segsum_info struct to store information
- * @full_check: full check flag
- * (0: only checks segment summary CRC, 1: data CRC)
*/
static int
load_segment_summary(struct nilfs_sb_info *sbi, sector_t pseg_start,
- u64 seg_seq, struct nilfs_segsum_info *ssi,
- int full_check)
+ u64 seg_seq, struct nilfs_segsum_info *ssi)
{
struct buffer_head *bh_sum;
struct nilfs_segment_summary *sum;
- unsigned long offset, nblock;
- u64 check_bytes;
- u32 crc, crc_sum;
+ unsigned long nblock;
+ u32 crc;
int ret = NILFS_SEG_FAIL_IO;
bh_sum = sb_bread(sbi->s_super, pseg_start);
@@ -237,34 +228,24 @@ load_segment_summary(struct nilfs_sb_info *sbi, sector_t pseg_start,
ret = NILFS_SEG_FAIL_SEQ;
goto failed;
}
- if (full_check) {
- offset = sizeof(sum->ss_datasum);
- check_bytes =
- ((u64)ssi->nblocks << sbi->s_super->s_blocksize_bits);
- nblock = ssi->nblocks;
- crc_sum = le32_to_cpu(sum->ss_datasum);
- ret = NILFS_SEG_FAIL_CHECKSUM_FULL;
- } else { /* only checks segment summary */
- offset = sizeof(sum->ss_datasum) + sizeof(sum->ss_sumsum);
- check_bytes = ssi->sumbytes;
- nblock = ssi->nsumblk;
- crc_sum = le32_to_cpu(sum->ss_sumsum);
- ret = NILFS_SEG_FAIL_CHECKSUM_SEGSUM;
- }
+ nblock = ssi->nblocks;
if (unlikely(nblock == 0 ||
nblock > sbi->s_nilfs->ns_blocks_per_segment)) {
/* This limits the number of blocks read in the CRC check */
ret = NILFS_SEG_FAIL_CONSISTENCY;
goto failed;
}
- if (calc_crc_cont(sbi, bh_sum, &crc, offset, check_bytes,
+ if (calc_crc_cont(sbi, bh_sum, &crc, sizeof(sum->ss_datasum),
+ ((u64)nblock << sbi->s_super->s_blocksize_bits),
pseg_start, nblock)) {
ret = NILFS_SEG_FAIL_IO;
goto failed;
}
- if (crc == crc_sum)
+ if (crc == le32_to_cpu(sum->ss_datasum))
ret = 0;
+ else
+ ret = NILFS_SEG_FAIL_CHECKSUM_FULL;
failed:
brelse(bh_sum);
out:
@@ -598,7 +579,7 @@ static int nilfs_do_roll_forward(struct the_nilfs *nilfs,
while (segnum != ri->ri_segnum || pseg_start <= ri->ri_pseg_start) {
- ret = load_segment_summary(sbi, pseg_start, seg_seq, &ssi, 1);
+ ret = load_segment_summary(sbi, pseg_start, seg_seq, &ssi);
if (ret) {
if (ret == NILFS_SEG_FAIL_IO) {
err = -EIO;
@@ -821,7 +802,7 @@ int nilfs_search_super_root(struct the_nilfs *nilfs, struct nilfs_sb_info *sbi,
for (;;) {
/* Load segment summary */
- ret = load_segment_summary(sbi, pseg_start, seg_seq, &ssi, 1);
+ ret = load_segment_summary(sbi, pseg_start, seg_seq, &ssi);
if (ret) {
if (ret == NILFS_SEG_FAIL_IO)
goto failed;
diff --git a/fs/nilfs2/segbuf.c b/fs/nilfs2/segbuf.c
index 645c78656aa..ab56fe44e37 100644
--- a/fs/nilfs2/segbuf.c
+++ b/fs/nilfs2/segbuf.c
@@ -40,6 +40,11 @@ struct nilfs_write_info {
};
+static int nilfs_segbuf_write(struct nilfs_segment_buffer *segbuf,
+ struct the_nilfs *nilfs);
+static int nilfs_segbuf_wait(struct nilfs_segment_buffer *segbuf);
+
+
static struct kmem_cache *nilfs_segbuf_cachep;
static void nilfs_segbuf_init_once(void *obj)
@@ -302,6 +307,19 @@ void nilfs_truncate_logs(struct list_head *logs,
}
}
+int nilfs_write_logs(struct list_head *logs, struct the_nilfs *nilfs)
+{
+ struct nilfs_segment_buffer *segbuf;
+ int ret = 0;
+
+ list_for_each_entry(segbuf, logs, sb_list) {
+ ret = nilfs_segbuf_write(segbuf, nilfs);
+ if (ret)
+ break;
+ }
+ return ret;
+}
+
int nilfs_wait_on_logs(struct list_head *logs)
{
struct nilfs_segment_buffer *segbuf;
diff --git a/fs/nilfs2/segbuf.h b/fs/nilfs2/segbuf.h
index 6af1630fb40..94dfd3517bc 100644
--- a/fs/nilfs2/segbuf.h
+++ b/fs/nilfs2/segbuf.h
@@ -166,13 +166,10 @@ nilfs_segbuf_add_file_buffer(struct nilfs_segment_buffer *segbuf,
segbuf->sb_sum.nfileblk++;
}
-int nilfs_segbuf_write(struct nilfs_segment_buffer *segbuf,
- struct the_nilfs *nilfs);
-int nilfs_segbuf_wait(struct nilfs_segment_buffer *segbuf);
-
void nilfs_clear_logs(struct list_head *logs);
void nilfs_truncate_logs(struct list_head *logs,
struct nilfs_segment_buffer *last);
+int nilfs_write_logs(struct list_head *logs, struct the_nilfs *nilfs);
int nilfs_wait_on_logs(struct list_head *logs);
static inline void nilfs_destroy_logs(struct list_head *logs)
diff --git a/fs/nilfs2/segment.c b/fs/nilfs2/segment.c
index 105b508b47a..ada2f1b947a 100644
--- a/fs/nilfs2/segment.c
+++ b/fs/nilfs2/segment.c
@@ -1764,14 +1764,9 @@ static int nilfs_segctor_prepare_write(struct nilfs_sc_info *sci,
static int nilfs_segctor_write(struct nilfs_sc_info *sci,
struct the_nilfs *nilfs)
{
- struct nilfs_segment_buffer *segbuf;
- int ret = 0;
+ int ret;
- list_for_each_entry(segbuf, &sci->sc_segbufs, sb_list) {
- ret = nilfs_segbuf_write(segbuf, nilfs);
- if (ret)
- break;
- }
+ ret = nilfs_write_logs(&sci->sc_segbufs, nilfs);
list_splice_tail_init(&sci->sc_segbufs, &sci->sc_write_logs);
return ret;
}
@@ -1937,8 +1932,7 @@ static void nilfs_segctor_complete_write(struct nilfs_sc_info *sci)
{
struct nilfs_segment_buffer *segbuf;
struct page *bd_page = NULL, *fs_page = NULL;
- struct nilfs_sb_info *sbi = sci->sc_sbi;
- struct the_nilfs *nilfs = sbi->s_nilfs;
+ struct the_nilfs *nilfs = sci->sc_sbi->s_nilfs;
int update_sr = (sci->sc_super_root != NULL);
list_for_each_entry(segbuf, &sci->sc_write_logs, sb_list) {
@@ -2020,7 +2014,7 @@ static void nilfs_segctor_complete_write(struct nilfs_sc_info *sci)
if (update_sr) {
nilfs_set_last_segment(nilfs, segbuf->sb_pseg_start,
segbuf->sb_sum.seg_seq, nilfs->ns_cno++);
- sbi->s_super->s_dirt = 1;
+ set_nilfs_sb_dirty(nilfs);
clear_bit(NILFS_SC_HAVE_DELTA, &sci->sc_flags);
clear_bit(NILFS_SC_DIRTY, &sci->sc_flags);
@@ -2425,43 +2419,43 @@ int nilfs_construct_dsync_segment(struct super_block *sb, struct inode *inode,
return err;
}
-struct nilfs_segctor_req {
- int mode;
- __u32 seq_accepted;
- int sc_err; /* construction failure */
- int sb_err; /* super block writeback failure */
-};
-
#define FLUSH_FILE_BIT (0x1) /* data file only */
#define FLUSH_DAT_BIT (1 << NILFS_DAT_INO) /* DAT only */
-static void nilfs_segctor_accept(struct nilfs_sc_info *sci,
- struct nilfs_segctor_req *req)
+/**
+ * nilfs_segctor_accept - record accepted sequence count of log-write requests
+ * @sci: segment constructor object
+ */
+static void nilfs_segctor_accept(struct nilfs_sc_info *sci)
{
- req->sc_err = req->sb_err = 0;
spin_lock(&sci->sc_state_lock);
- req->seq_accepted = sci->sc_seq_request;
+ sci->sc_seq_accepted = sci->sc_seq_request;
spin_unlock(&sci->sc_state_lock);
if (sci->sc_timer)
del_timer_sync(sci->sc_timer);
}
-static void nilfs_segctor_notify(struct nilfs_sc_info *sci,
- struct nilfs_segctor_req *req)
+/**
+ * nilfs_segctor_notify - notify the result of request to caller threads
+ * @sci: segment constructor object
+ * @mode: mode of log forming
+ * @err: error code to be notified
+ */
+static void nilfs_segctor_notify(struct nilfs_sc_info *sci, int mode, int err)
{
/* Clear requests (even when the construction failed) */
spin_lock(&sci->sc_state_lock);
- if (req->mode == SC_LSEG_SR) {
+ if (mode == SC_LSEG_SR) {
sci->sc_state &= ~NILFS_SEGCTOR_COMMIT;
- sci->sc_seq_done = req->seq_accepted;
- nilfs_segctor_wakeup(sci, req->sc_err ? : req->sb_err);
+ sci->sc_seq_done = sci->sc_seq_accepted;
+ nilfs_segctor_wakeup(sci, err);
sci->sc_flush_request = 0;
} else {
- if (req->mode == SC_FLUSH_FILE)
+ if (mode == SC_FLUSH_FILE)
sci->sc_flush_request &= ~FLUSH_FILE_BIT;
- else if (req->mode == SC_FLUSH_DAT)
+ else if (mode == SC_FLUSH_DAT)
sci->sc_flush_request &= ~FLUSH_DAT_BIT;
/* re-enable timer if checkpoint creation was not done */
@@ -2472,30 +2466,37 @@ static void nilfs_segctor_notify(struct nilfs_sc_info *sci,
spin_unlock(&sci->sc_state_lock);
}
-static int nilfs_segctor_construct(struct nilfs_sc_info *sci,
- struct nilfs_segctor_req *req)
+/**
+ * nilfs_segctor_construct - form logs and write them to disk
+ * @sci: segment constructor object
+ * @mode: mode of log forming
+ */
+static int nilfs_segctor_construct(struct nilfs_sc_info *sci, int mode)
{
struct nilfs_sb_info *sbi = sci->sc_sbi;
struct the_nilfs *nilfs = sbi->s_nilfs;
int err = 0;
+ nilfs_segctor_accept(sci);
+
if (nilfs_discontinued(nilfs))
- req->mode = SC_LSEG_SR;
- if (!nilfs_segctor_confirm(sci)) {
- err = nilfs_segctor_do_construct(sci, req->mode);
- req->sc_err = err;
- }
+ mode = SC_LSEG_SR;
+ if (!nilfs_segctor_confirm(sci))
+ err = nilfs_segctor_do_construct(sci, mode);
+
if (likely(!err)) {
- if (req->mode != SC_FLUSH_DAT)
+ if (mode != SC_FLUSH_DAT)
atomic_set(&nilfs->ns_ndirtyblks, 0);
if (test_bit(NILFS_SC_SUPER_ROOT, &sci->sc_flags) &&
nilfs_discontinued(nilfs)) {
down_write(&nilfs->ns_sem);
- req->sb_err = nilfs_commit_super(sbi,
- nilfs_altsb_need_update(nilfs));
+ err = nilfs_commit_super(
+ sbi, nilfs_altsb_need_update(nilfs));
up_write(&nilfs->ns_sem);
}
}
+
+ nilfs_segctor_notify(sci, mode, err);
return err;
}
@@ -2526,7 +2527,6 @@ int nilfs_clean_segments(struct super_block *sb, struct nilfs_argv *argv,
struct nilfs_sc_info *sci = NILFS_SC(sbi);
struct the_nilfs *nilfs = sbi->s_nilfs;
struct nilfs_transaction_info ti;
- struct nilfs_segctor_req req = { .mode = SC_LSEG_SR };
int err;
if (unlikely(!sci))
@@ -2547,10 +2547,8 @@ int nilfs_clean_segments(struct super_block *sb, struct nilfs_argv *argv,
list_splice_tail_init(&nilfs->ns_gc_inodes, &sci->sc_gc_inodes);
for (;;) {
- nilfs_segctor_accept(sci, &req);
- err = nilfs_segctor_construct(sci, &req);
+ err = nilfs_segctor_construct(sci, SC_LSEG_SR);
nilfs_remove_written_gcinodes(nilfs, &sci->sc_gc_inodes);
- nilfs_segctor_notify(sci, &req);
if (likely(!err))
break;
@@ -2560,6 +2558,16 @@ int nilfs_clean_segments(struct super_block *sb, struct nilfs_argv *argv,
set_current_state(TASK_INTERRUPTIBLE);
schedule_timeout(sci->sc_interval);
}
+ if (nilfs_test_opt(sbi, DISCARD)) {
+ int ret = nilfs_discard_segments(nilfs, sci->sc_freesegs,
+ sci->sc_nfreesegs);
+ if (ret) {
+ printk(KERN_WARNING
+ "NILFS warning: error %d on discard request, "
+ "turning discards off for the device\n", ret);
+ nilfs_clear_opt(sbi, DISCARD);
+ }
+ }
out_unlock:
sci->sc_freesegs = NULL;
@@ -2573,13 +2581,9 @@ static void nilfs_segctor_thread_construct(struct nilfs_sc_info *sci, int mode)
{
struct nilfs_sb_info *sbi = sci->sc_sbi;
struct nilfs_transaction_info ti;
- struct nilfs_segctor_req req = { .mode = mode };
nilfs_transaction_lock(sbi, &ti, 0);
-
- nilfs_segctor_accept(sci, &req);
- nilfs_segctor_construct(sci, &req);
- nilfs_segctor_notify(sci, &req);
+ nilfs_segctor_construct(sci, mode);
/*
* Unclosed segment should be retried. We do this using sc_timer.
@@ -2635,6 +2639,7 @@ static int nilfs_segctor_flush_mode(struct nilfs_sc_info *sci)
static int nilfs_segctor_thread(void *arg)
{
struct nilfs_sc_info *sci = (struct nilfs_sc_info *)arg;
+ struct the_nilfs *nilfs = sci->sc_sbi->s_nilfs;
struct timer_list timer;
int timeout = 0;
@@ -2680,7 +2685,6 @@ static int nilfs_segctor_thread(void *arg)
} else {
DEFINE_WAIT(wait);
int should_sleep = 1;
- struct the_nilfs *nilfs;
prepare_to_wait(&sci->sc_wait_daemon, &wait,
TASK_INTERRUPTIBLE);
@@ -2701,8 +2705,8 @@ static int nilfs_segctor_thread(void *arg)
finish_wait(&sci->sc_wait_daemon, &wait);
timeout = ((sci->sc_state & NILFS_SEGCTOR_COMMIT) &&
time_after_eq(jiffies, sci->sc_timer->expires));
- nilfs = sci->sc_sbi->s_nilfs;
- if (sci->sc_super->s_dirt && nilfs_sb_need_update(nilfs))
+
+ if (nilfs_sb_dirty(nilfs) && nilfs_sb_need_update(nilfs))
set_nilfs_discontinued(nilfs);
}
goto loop;
@@ -2797,12 +2801,9 @@ static void nilfs_segctor_write_out(struct nilfs_sc_info *sci)
do {
struct nilfs_sb_info *sbi = sci->sc_sbi;
struct nilfs_transaction_info ti;
- struct nilfs_segctor_req req = { .mode = SC_LSEG_SR };
nilfs_transaction_lock(sbi, &ti, 0);
- nilfs_segctor_accept(sci, &req);
- ret = nilfs_segctor_construct(sci, &req);
- nilfs_segctor_notify(sci, &req);
+ ret = nilfs_segctor_construct(sci, SC_LSEG_SR);
nilfs_transaction_unlock(sbi);
} while (ret && retrycount-- > 0);
@@ -2865,8 +2866,15 @@ int nilfs_attach_segment_constructor(struct nilfs_sb_info *sbi)
struct the_nilfs *nilfs = sbi->s_nilfs;
int err;
- /* Each field of nilfs_segctor is cleared through the initialization
- of super-block info */
+ if (NILFS_SC(sbi)) {
+ /*
+ * This happens if the filesystem was remounted
+ * read/write after nilfs_error degenerated it into a
+ * read-only mount.
+ */
+ nilfs_detach_segment_constructor(sbi);
+ }
+
sbi->s_sc_info = nilfs_segctor_new(sbi);
if (!sbi->s_sc_info)
return -ENOMEM;
diff --git a/fs/nilfs2/segment.h b/fs/nilfs2/segment.h
index 3d3ab2f9864..3155e0c7f41 100644
--- a/fs/nilfs2/segment.h
+++ b/fs/nilfs2/segment.h
@@ -116,6 +116,7 @@ struct nilfs_segsum_pointer {
* @sc_wait_daemon: Daemon wait queue
* @sc_wait_task: Start/end wait queue to control segctord task
* @sc_seq_request: Request counter
+ * @sc_seq_accept: Accepted request count
* @sc_seq_done: Completion counter
* @sc_sync: Request of explicit sync operation
* @sc_interval: Timeout value of background construction
@@ -169,6 +170,7 @@ struct nilfs_sc_info {
wait_queue_head_t sc_wait_task;
__u32 sc_seq_request;
+ __u32 sc_seq_accepted;
__u32 sc_seq_done;
int sc_sync;
diff --git a/fs/nilfs2/super.c b/fs/nilfs2/super.c
index 8173faee31e..92579cc4c93 100644
--- a/fs/nilfs2/super.c
+++ b/fs/nilfs2/super.c
@@ -96,9 +96,6 @@ void nilfs_error(struct super_block *sb, const char *function,
if (!(sb->s_flags & MS_RDONLY)) {
struct the_nilfs *nilfs = sbi->s_nilfs;
- if (!nilfs_test_opt(sbi, ERRORS_CONT))
- nilfs_detach_segment_constructor(sbi);
-
down_write(&nilfs->ns_sem);
if (!(nilfs->ns_mount_state & NILFS_ERROR_FS)) {
nilfs->ns_mount_state |= NILFS_ERROR_FS;
@@ -301,7 +298,7 @@ int nilfs_commit_super(struct nilfs_sb_info *sbi, int dupsb)
memcpy(sbp[1], sbp[0], nilfs->ns_sbsize);
nilfs->ns_sbwtime[1] = t;
}
- sbi->s_super->s_dirt = 0;
+ clear_nilfs_sb_dirty(nilfs);
return nilfs_sync_super(sbi, dupsb);
}
@@ -345,7 +342,7 @@ static int nilfs_sync_fs(struct super_block *sb, int wait)
err = nilfs_construct_segment(sb);
down_write(&nilfs->ns_sem);
- if (sb->s_dirt)
+ if (nilfs_sb_dirty(nilfs))
nilfs_commit_super(sbi, 1);
up_write(&nilfs->ns_sem);
@@ -481,6 +478,8 @@ static int nilfs_show_options(struct seq_file *seq, struct vfsmount *vfs)
seq_printf(seq, ",order=strict");
if (nilfs_test_opt(sbi, NORECOVERY))
seq_printf(seq, ",norecovery");
+ if (nilfs_test_opt(sbi, DISCARD))
+ seq_printf(seq, ",discard");
return 0;
}
@@ -550,7 +549,7 @@ static const struct export_operations nilfs_export_ops = {
enum {
Opt_err_cont, Opt_err_panic, Opt_err_ro,
Opt_nobarrier, Opt_snapshot, Opt_order, Opt_norecovery,
- Opt_err,
+ Opt_discard, Opt_err,
};
static match_table_t tokens = {
@@ -561,6 +560,7 @@ static match_table_t tokens = {
{Opt_snapshot, "cp=%u"},
{Opt_order, "order=%s"},
{Opt_norecovery, "norecovery"},
+ {Opt_discard, "discard"},
{Opt_err, NULL}
};
@@ -614,6 +614,9 @@ static int parse_options(char *options, struct super_block *sb)
case Opt_norecovery:
nilfs_set_opt(sbi, NORECOVERY);
break;
+ case Opt_discard:
+ nilfs_set_opt(sbi, DISCARD);
+ break;
default:
printk(KERN_ERR
"NILFS: Unrecognized mount option \"%s\"\n", p);
diff --git a/fs/nilfs2/the_nilfs.c b/fs/nilfs2/the_nilfs.c
index 6241e1722ef..92733d5651d 100644
--- a/fs/nilfs2/the_nilfs.c
+++ b/fs/nilfs2/the_nilfs.c
@@ -646,6 +646,44 @@ int init_nilfs(struct the_nilfs *nilfs, struct nilfs_sb_info *sbi, char *data)
goto out;
}
+int nilfs_discard_segments(struct the_nilfs *nilfs, __u64 *segnump,
+ size_t nsegs)
+{
+ sector_t seg_start, seg_end;
+ sector_t start = 0, nblocks = 0;
+ unsigned int sects_per_block;
+ __u64 *sn;
+ int ret = 0;
+
+ sects_per_block = (1 << nilfs->ns_blocksize_bits) /
+ bdev_logical_block_size(nilfs->ns_bdev);
+ for (sn = segnump; sn < segnump + nsegs; sn++) {
+ nilfs_get_segment_range(nilfs, *sn, &seg_start, &seg_end);
+
+ if (!nblocks) {
+ start = seg_start;
+ nblocks = seg_end - seg_start + 1;
+ } else if (start + nblocks == seg_start) {
+ nblocks += seg_end - seg_start + 1;
+ } else {
+ ret = blkdev_issue_discard(nilfs->ns_bdev,
+ start * sects_per_block,
+ nblocks * sects_per_block,
+ GFP_NOFS,
+ DISCARD_FL_BARRIER);
+ if (ret < 0)
+ return ret;
+ nblocks = 0;
+ }
+ }
+ if (nblocks)
+ ret = blkdev_issue_discard(nilfs->ns_bdev,
+ start * sects_per_block,
+ nblocks * sects_per_block,
+ GFP_NOFS, DISCARD_FL_BARRIER);
+ return ret;
+}
+
int nilfs_count_free_blocks(struct the_nilfs *nilfs, sector_t *nblocks)
{
struct inode *dat = nilfs_dat_inode(nilfs);
diff --git a/fs/nilfs2/the_nilfs.h b/fs/nilfs2/the_nilfs.h
index 589786e3346..e9795f1724d 100644
--- a/fs/nilfs2/the_nilfs.h
+++ b/fs/nilfs2/the_nilfs.h
@@ -38,6 +38,7 @@ enum {
the latest checkpoint was loaded */
THE_NILFS_DISCONTINUED, /* 'next' pointer chain has broken */
THE_NILFS_GC_RUNNING, /* gc process is running */
+ THE_NILFS_SB_DIRTY, /* super block is dirty */
};
/**
@@ -197,6 +198,7 @@ THE_NILFS_FNS(INIT, init)
THE_NILFS_FNS(LOADED, loaded)
THE_NILFS_FNS(DISCONTINUED, discontinued)
THE_NILFS_FNS(GC_RUNNING, gc_running)
+THE_NILFS_FNS(SB_DIRTY, sb_dirty)
/* Minimum interval of periodical update of superblocks (in seconds) */
#define NILFS_SB_FREQ 10
@@ -221,6 +223,7 @@ struct the_nilfs *find_or_create_nilfs(struct block_device *);
void put_nilfs(struct the_nilfs *);
int init_nilfs(struct the_nilfs *, struct nilfs_sb_info *, char *);
int load_nilfs(struct the_nilfs *, struct nilfs_sb_info *);
+int nilfs_discard_segments(struct the_nilfs *, __u64 *, size_t);
int nilfs_count_free_blocks(struct the_nilfs *, sector_t *);
struct nilfs_sb_info *nilfs_find_sbinfo(struct the_nilfs *, int, __u64);
int nilfs_checkpoint_is_mounted(struct the_nilfs *, __u64, int);
diff --git a/fs/ocfs2/Makefile b/fs/ocfs2/Makefile
index 600d2d2ade1..791c0886c06 100644
--- a/fs/ocfs2/Makefile
+++ b/fs/ocfs2/Makefile
@@ -46,6 +46,7 @@ ocfs2_stackglue-objs := stackglue.o
ocfs2_stack_o2cb-objs := stack_o2cb.o
ocfs2_stack_user-objs := stack_user.o
+obj-$(CONFIG_OCFS2_FS) += dlmfs/
# cluster/ is always needed when OCFS2_FS for masklog support
obj-$(CONFIG_OCFS2_FS) += cluster/
obj-$(CONFIG_OCFS2_FS_O2CB) += dlm/
diff --git a/fs/ocfs2/alloc.c b/fs/ocfs2/alloc.c
index d17bdc718f7..2bbe1ecc08c 100644
--- a/fs/ocfs2/alloc.c
+++ b/fs/ocfs2/alloc.c
@@ -1050,7 +1050,8 @@ static int ocfs2_create_new_meta_bhs(handle_t *handle,
strcpy(eb->h_signature, OCFS2_EXTENT_BLOCK_SIGNATURE);
eb->h_blkno = cpu_to_le64(first_blkno);
eb->h_fs_generation = cpu_to_le32(osb->fs_generation);
- eb->h_suballoc_slot = cpu_to_le16(osb->slot_num);
+ eb->h_suballoc_slot =
+ cpu_to_le16(meta_ac->ac_alloc_slot);
eb->h_suballoc_bit = cpu_to_le16(suballoc_bit_start);
eb->h_list.l_count =
cpu_to_le16(ocfs2_extent_recs_per_eb(osb->sb));
@@ -6037,7 +6038,7 @@ static void ocfs2_truncate_log_worker(struct work_struct *work)
if (status < 0)
mlog_errno(status);
else
- ocfs2_init_inode_steal_slot(osb);
+ ocfs2_init_steal_slots(osb);
mlog_exit(status);
}
diff --git a/fs/ocfs2/aops.c b/fs/ocfs2/aops.c
index 7e9df11260f..4c2a6d282c4 100644
--- a/fs/ocfs2/aops.c
+++ b/fs/ocfs2/aops.c
@@ -577,8 +577,9 @@ static int ocfs2_direct_IO_get_blocks(struct inode *inode, sector_t iblock,
goto bail;
}
- /* We should already CoW the refcounted extent. */
- BUG_ON(ext_flags & OCFS2_EXT_REFCOUNTED);
+ /* We should already CoW the refcounted extent in case of create. */
+ BUG_ON(create && (ext_flags & OCFS2_EXT_REFCOUNTED));
+
/*
* get_more_blocks() expects us to describe a hole by clearing
* the mapped bit on bh_result().
diff --git a/fs/ocfs2/cluster/masklog.c b/fs/ocfs2/cluster/masklog.c
index 1cd2934de61..b39da877b12 100644
--- a/fs/ocfs2/cluster/masklog.c
+++ b/fs/ocfs2/cluster/masklog.c
@@ -112,6 +112,7 @@ static struct mlog_attribute mlog_attrs[MLOG_MAX_BITS] = {
define_mask(XATTR),
define_mask(QUOTA),
define_mask(REFCOUNT),
+ define_mask(BASTS),
define_mask(ERROR),
define_mask(NOTICE),
define_mask(KTHREAD),
diff --git a/fs/ocfs2/cluster/masklog.h b/fs/ocfs2/cluster/masklog.h
index 9b4d11726cf..3dfddbec32f 100644
--- a/fs/ocfs2/cluster/masklog.h
+++ b/fs/ocfs2/cluster/masklog.h
@@ -114,6 +114,7 @@
#define ML_XATTR 0x0000000020000000ULL /* ocfs2 extended attributes */
#define ML_QUOTA 0x0000000040000000ULL /* ocfs2 quota operations */
#define ML_REFCOUNT 0x0000000080000000ULL /* refcount tree operations */
+#define ML_BASTS 0x0000001000000000ULL /* dlmglue asts and basts */
/* bits that are infrequently given and frequently matched in the high word */
#define ML_ERROR 0x0000000100000000ULL /* sent to KERN_ERR */
#define ML_NOTICE 0x0000000200000000ULL /* setn to KERN_NOTICE */
@@ -194,9 +195,9 @@ extern struct mlog_bits mlog_and_bits, mlog_not_bits;
* previous token if args expands to nothing.
*/
#define __mlog_printk(level, fmt, args...) \
- printk(level "(%u,%lu):%s:%d " fmt, task_pid_nr(current), \
- __mlog_cpu_guess, __PRETTY_FUNCTION__, __LINE__ , \
- ##args)
+ printk(level "(%s,%u,%lu):%s:%d " fmt, current->comm, \
+ task_pid_nr(current), __mlog_cpu_guess, \
+ __PRETTY_FUNCTION__, __LINE__ , ##args)
#define mlog(mask, fmt, args...) do { \
u64 __m = MLOG_MASK_PREFIX | (mask); \
diff --git a/fs/ocfs2/dir.c b/fs/ocfs2/dir.c
index 28c3ec23879..765d66c7098 100644
--- a/fs/ocfs2/dir.c
+++ b/fs/ocfs2/dir.c
@@ -2439,7 +2439,7 @@ static int ocfs2_dx_dir_attach_index(struct ocfs2_super *osb,
dx_root = (struct ocfs2_dx_root_block *)dx_root_bh->b_data;
memset(dx_root, 0, osb->sb->s_blocksize);
strcpy(dx_root->dr_signature, OCFS2_DX_ROOT_SIGNATURE);
- dx_root->dr_suballoc_slot = cpu_to_le16(osb->slot_num);
+ dx_root->dr_suballoc_slot = cpu_to_le16(meta_ac->ac_alloc_slot);
dx_root->dr_suballoc_bit = cpu_to_le16(dr_suballoc_bit);
dx_root->dr_fs_generation = cpu_to_le32(osb->fs_generation);
dx_root->dr_blkno = cpu_to_le64(dr_blkno);
diff --git a/fs/ocfs2/dlm/Makefile b/fs/ocfs2/dlm/Makefile
index 19036137570..dcebf0d920f 100644
--- a/fs/ocfs2/dlm/Makefile
+++ b/fs/ocfs2/dlm/Makefile
@@ -1,8 +1,7 @@
EXTRA_CFLAGS += -Ifs/ocfs2
-obj-$(CONFIG_OCFS2_FS_O2CB) += ocfs2_dlm.o ocfs2_dlmfs.o
+obj-$(CONFIG_OCFS2_FS_O2CB) += ocfs2_dlm.o
ocfs2_dlm-objs := dlmdomain.o dlmdebug.o dlmthread.o dlmrecovery.o \
dlmmaster.o dlmast.o dlmconvert.o dlmlock.o dlmunlock.o dlmver.o
-ocfs2_dlmfs-objs := userdlm.o dlmfs.o dlmfsver.o
diff --git a/fs/ocfs2/dlm/dlmrecovery.c b/fs/ocfs2/dlm/dlmrecovery.c
index 344bcf90cbf..b4f99de2caf 100644
--- a/fs/ocfs2/dlm/dlmrecovery.c
+++ b/fs/ocfs2/dlm/dlmrecovery.c
@@ -310,7 +310,7 @@ static int dlm_recovery_thread(void *data)
mlog(0, "dlm thread running for %s...\n", dlm->name);
while (!kthread_should_stop()) {
- if (dlm_joined(dlm)) {
+ if (dlm_domain_fully_joined(dlm)) {
status = dlm_do_recovery(dlm);
if (status == -EAGAIN) {
/* do not sleep, recheck immediately. */
diff --git a/fs/ocfs2/dlmfs/Makefile b/fs/ocfs2/dlmfs/Makefile
new file mode 100644
index 00000000000..df69b4856d0
--- /dev/null
+++ b/fs/ocfs2/dlmfs/Makefile
@@ -0,0 +1,5 @@
+EXTRA_CFLAGS += -Ifs/ocfs2
+
+obj-$(CONFIG_OCFS2_FS) += ocfs2_dlmfs.o
+
+ocfs2_dlmfs-objs := userdlm.o dlmfs.o dlmfsver.o
diff --git a/fs/ocfs2/dlm/dlmfs.c b/fs/ocfs2/dlmfs/dlmfs.c
index 02bf17808bd..1b0de157a08 100644
--- a/fs/ocfs2/dlm/dlmfs.c
+++ b/fs/ocfs2/dlmfs/dlmfs.c
@@ -43,24 +43,17 @@
#include <linux/init.h>
#include <linux/string.h>
#include <linux/backing-dev.h>
+#include <linux/poll.h>
#include <asm/uaccess.h>
-
-#include "cluster/nodemanager.h"
-#include "cluster/heartbeat.h"
-#include "cluster/tcp.h"
-
-#include "dlmapi.h"
-
+#include "stackglue.h"
#include "userdlm.h"
-
#include "dlmfsver.h"
#define MLOG_MASK_PREFIX ML_DLMFS
#include "cluster/masklog.h"
-#include "ocfs2_lockingver.h"
static const struct super_operations dlmfs_ops;
static const struct file_operations dlmfs_file_operations;
@@ -71,15 +64,46 @@ static struct kmem_cache *dlmfs_inode_cache;
struct workqueue_struct *user_dlm_worker;
+
+
/*
- * This is the userdlmfs locking protocol version.
+ * These are the ABI capabilities of dlmfs.
+ *
+ * Over time, dlmfs has added some features that were not part of the
+ * initial ABI. Unfortunately, some of these features are not detectable
+ * via standard usage. For example, Linux's default poll always returns
+ * POLLIN, so there is no way for a caller of poll(2) to know when dlmfs
+ * added poll support. Instead, we provide this list of new capabilities.
+ *
+ * Capabilities is a read-only attribute. We do it as a module parameter
+ * so we can discover it whether dlmfs is built in, loaded, or even not
+ * loaded.
*
- * See fs/ocfs2/dlmglue.c for more details on locking versions.
+ * The ABI features are local to this machine's dlmfs mount. This is
+ * distinct from the locking protocol, which is concerned with inter-node
+ * interaction.
+ *
+ * Capabilities:
+ * - bast : POLLIN against the file descriptor of a held lock
+ * signifies a bast fired on the lock.
*/
-static const struct dlm_protocol_version user_locking_protocol = {
- .pv_major = OCFS2_LOCKING_PROTOCOL_MAJOR,
- .pv_minor = OCFS2_LOCKING_PROTOCOL_MINOR,
-};
+#define DLMFS_CAPABILITIES "bast stackglue"
+extern int param_set_dlmfs_capabilities(const char *val,
+ struct kernel_param *kp)
+{
+ printk(KERN_ERR "%s: readonly parameter\n", kp->name);
+ return -EINVAL;
+}
+static int param_get_dlmfs_capabilities(char *buffer,
+ struct kernel_param *kp)
+{
+ return strlcpy(buffer, DLMFS_CAPABILITIES,
+ strlen(DLMFS_CAPABILITIES) + 1);
+}
+module_param_call(capabilities, param_set_dlmfs_capabilities,
+ param_get_dlmfs_capabilities, NULL, 0444);
+MODULE_PARM_DESC(capabilities, DLMFS_CAPABILITIES);
+
/*
* decodes a set of open flags into a valid lock level and a set of flags.
@@ -179,13 +203,46 @@ static int dlmfs_file_release(struct inode *inode,
return 0;
}
+/*
+ * We do ->setattr() just to override size changes. Our size is the size
+ * of the LVB and nothing else.
+ */
+static int dlmfs_file_setattr(struct dentry *dentry, struct iattr *attr)
+{
+ int error;
+ struct inode *inode = dentry->d_inode;
+
+ attr->ia_valid &= ~ATTR_SIZE;
+ error = inode_change_ok(inode, attr);
+ if (!error)
+ error = inode_setattr(inode, attr);
+
+ return error;
+}
+
+static unsigned int dlmfs_file_poll(struct file *file, poll_table *wait)
+{
+ int event = 0;
+ struct inode *inode = file->f_path.dentry->d_inode;
+ struct dlmfs_inode_private *ip = DLMFS_I(inode);
+
+ poll_wait(file, &ip->ip_lockres.l_event, wait);
+
+ spin_lock(&ip->ip_lockres.l_lock);
+ if (ip->ip_lockres.l_flags & USER_LOCK_BLOCKED)
+ event = POLLIN | POLLRDNORM;
+ spin_unlock(&ip->ip_lockres.l_lock);
+
+ return event;
+}
+
static ssize_t dlmfs_file_read(struct file *filp,
char __user *buf,
size_t count,
loff_t *ppos)
{
int bytes_left;
- ssize_t readlen;
+ ssize_t readlen, got;
char *lvb_buf;
struct inode *inode = filp->f_path.dentry->d_inode;
@@ -211,9 +268,13 @@ static ssize_t dlmfs_file_read(struct file *filp,
if (!lvb_buf)
return -ENOMEM;
- user_dlm_read_lvb(inode, lvb_buf, readlen);
- bytes_left = __copy_to_user(buf, lvb_buf, readlen);
- readlen -= bytes_left;
+ got = user_dlm_read_lvb(inode, lvb_buf, readlen);
+ if (got) {
+ BUG_ON(got != readlen);
+ bytes_left = __copy_to_user(buf, lvb_buf, readlen);
+ readlen -= bytes_left;
+ } else
+ readlen = 0;
kfree(lvb_buf);
@@ -272,7 +333,7 @@ static void dlmfs_init_once(void *foo)
struct dlmfs_inode_private *ip =
(struct dlmfs_inode_private *) foo;
- ip->ip_dlm = NULL;
+ ip->ip_conn = NULL;
ip->ip_parent = NULL;
inode_init_once(&ip->ip_vfs_inode);
@@ -314,14 +375,14 @@ static void dlmfs_clear_inode(struct inode *inode)
goto clear_fields;
}
- mlog(0, "we're a directory, ip->ip_dlm = 0x%p\n", ip->ip_dlm);
+ mlog(0, "we're a directory, ip->ip_conn = 0x%p\n", ip->ip_conn);
/* we must be a directory. If required, lets unregister the
* dlm context now. */
- if (ip->ip_dlm)
- user_dlm_unregister_context(ip->ip_dlm);
+ if (ip->ip_conn)
+ user_dlm_unregister(ip->ip_conn);
clear_fields:
ip->ip_parent = NULL;
- ip->ip_dlm = NULL;
+ ip->ip_conn = NULL;
}
static struct backing_dev_info dlmfs_backing_dev_info = {
@@ -371,7 +432,7 @@ static struct inode *dlmfs_get_inode(struct inode *parent,
inode->i_atime = inode->i_mtime = inode->i_ctime = CURRENT_TIME;
ip = DLMFS_I(inode);
- ip->ip_dlm = DLMFS_I(parent)->ip_dlm;
+ ip->ip_conn = DLMFS_I(parent)->ip_conn;
switch (mode & S_IFMT) {
default:
@@ -425,13 +486,12 @@ static int dlmfs_mkdir(struct inode * dir,
struct inode *inode = NULL;
struct qstr *domain = &dentry->d_name;
struct dlmfs_inode_private *ip;
- struct dlm_ctxt *dlm;
- struct dlm_protocol_version proto = user_locking_protocol;
+ struct ocfs2_cluster_connection *conn;
mlog(0, "mkdir %.*s\n", domain->len, domain->name);
/* verify that we have a proper domain */
- if (domain->len >= O2NM_MAX_NAME_LEN) {
+ if (domain->len >= GROUP_NAME_MAX) {
status = -EINVAL;
mlog(ML_ERROR, "invalid domain name for directory.\n");
goto bail;
@@ -446,14 +506,14 @@ static int dlmfs_mkdir(struct inode * dir,
ip = DLMFS_I(inode);
- dlm = user_dlm_register_context(domain, &proto);
- if (IS_ERR(dlm)) {
- status = PTR_ERR(dlm);
+ conn = user_dlm_register(domain);
+ if (IS_ERR(conn)) {
+ status = PTR_ERR(conn);
mlog(ML_ERROR, "Error %d could not register domain \"%.*s\"\n",
status, domain->len, domain->name);
goto bail;
}
- ip->ip_dlm = dlm;
+ ip->ip_conn = conn;
inc_nlink(dir);
d_instantiate(dentry, inode);
@@ -549,6 +609,7 @@ static int dlmfs_fill_super(struct super_block * sb,
static const struct file_operations dlmfs_file_operations = {
.open = dlmfs_file_open,
.release = dlmfs_file_release,
+ .poll = dlmfs_file_poll,
.read = dlmfs_file_read,
.write = dlmfs_file_write,
};
@@ -576,6 +637,7 @@ static const struct super_operations dlmfs_ops = {
static const struct inode_operations dlmfs_file_inode_operations = {
.getattr = simple_getattr,
+ .setattr = dlmfs_file_setattr,
};
static int dlmfs_get_sb(struct file_system_type *fs_type,
@@ -620,6 +682,7 @@ static int __init init_dlmfs_fs(void)
}
cleanup_worker = 1;
+ user_dlm_set_locking_protocol();
status = register_filesystem(&dlmfs_fs_type);
bail:
if (status) {
diff --git a/fs/ocfs2/dlm/dlmfsver.c b/fs/ocfs2/dlmfs/dlmfsver.c
index a733b3321f8..a733b3321f8 100644
--- a/fs/ocfs2/dlm/dlmfsver.c
+++ b/fs/ocfs2/dlmfs/dlmfsver.c
diff --git a/fs/ocfs2/dlm/dlmfsver.h b/fs/ocfs2/dlmfs/dlmfsver.h
index f35eadbed25..f35eadbed25 100644
--- a/fs/ocfs2/dlm/dlmfsver.h
+++ b/fs/ocfs2/dlmfs/dlmfsver.h
diff --git a/fs/ocfs2/dlm/userdlm.c b/fs/ocfs2/dlmfs/userdlm.c
index 4cb1d3dae25..0499e3fb7bd 100644
--- a/fs/ocfs2/dlm/userdlm.c
+++ b/fs/ocfs2/dlmfs/userdlm.c
@@ -34,18 +34,19 @@
#include <linux/types.h>
#include <linux/crc32.h>
-
-#include "cluster/nodemanager.h"
-#include "cluster/heartbeat.h"
-#include "cluster/tcp.h"
-
-#include "dlmapi.h"
-
+#include "ocfs2_lockingver.h"
+#include "stackglue.h"
#include "userdlm.h"
#define MLOG_MASK_PREFIX ML_DLMFS
#include "cluster/masklog.h"
+
+static inline struct user_lock_res *user_lksb_to_lock_res(struct ocfs2_dlm_lksb *lksb)
+{
+ return container_of(lksb, struct user_lock_res, l_lksb);
+}
+
static inline int user_check_wait_flag(struct user_lock_res *lockres,
int flag)
{
@@ -73,15 +74,15 @@ static inline void user_wait_on_blocked_lock(struct user_lock_res *lockres)
}
/* I heart container_of... */
-static inline struct dlm_ctxt *
-dlm_ctxt_from_user_lockres(struct user_lock_res *lockres)
+static inline struct ocfs2_cluster_connection *
+cluster_connection_from_user_lockres(struct user_lock_res *lockres)
{
struct dlmfs_inode_private *ip;
ip = container_of(lockres,
struct dlmfs_inode_private,
ip_lockres);
- return ip->ip_dlm;
+ return ip->ip_conn;
}
static struct inode *
@@ -103,9 +104,9 @@ static inline void user_recover_from_dlm_error(struct user_lock_res *lockres)
}
#define user_log_dlm_error(_func, _stat, _lockres) do { \
- mlog(ML_ERROR, "Dlm error \"%s\" while calling %s on " \
- "resource %.*s: %s\n", dlm_errname(_stat), _func, \
- _lockres->l_namelen, _lockres->l_name, dlm_errmsg(_stat)); \
+ mlog(ML_ERROR, "Dlm error %d while calling %s on " \
+ "resource %.*s\n", _stat, _func, \
+ _lockres->l_namelen, _lockres->l_name); \
} while (0)
/* WARNING: This function lives in a world where the only three lock
@@ -113,34 +114,35 @@ static inline void user_recover_from_dlm_error(struct user_lock_res *lockres)
* lock types are added. */
static inline int user_highest_compat_lock_level(int level)
{
- int new_level = LKM_EXMODE;
+ int new_level = DLM_LOCK_EX;
- if (level == LKM_EXMODE)
- new_level = LKM_NLMODE;
- else if (level == LKM_PRMODE)
- new_level = LKM_PRMODE;
+ if (level == DLM_LOCK_EX)
+ new_level = DLM_LOCK_NL;
+ else if (level == DLM_LOCK_PR)
+ new_level = DLM_LOCK_PR;
return new_level;
}
-static void user_ast(void *opaque)
+static void user_ast(struct ocfs2_dlm_lksb *lksb)
{
- struct user_lock_res *lockres = opaque;
- struct dlm_lockstatus *lksb;
+ struct user_lock_res *lockres = user_lksb_to_lock_res(lksb);
+ int status;
- mlog(0, "AST fired for lockres %.*s\n", lockres->l_namelen,
- lockres->l_name);
+ mlog(ML_BASTS, "AST fired for lockres %.*s, level %d => %d\n",
+ lockres->l_namelen, lockres->l_name, lockres->l_level,
+ lockres->l_requested);
spin_lock(&lockres->l_lock);
- lksb = &(lockres->l_lksb);
- if (lksb->status != DLM_NORMAL) {
+ status = ocfs2_dlm_lock_status(&lockres->l_lksb);
+ if (status) {
mlog(ML_ERROR, "lksb status value of %u on lockres %.*s\n",
- lksb->status, lockres->l_namelen, lockres->l_name);
+ status, lockres->l_namelen, lockres->l_name);
spin_unlock(&lockres->l_lock);
return;
}
- mlog_bug_on_msg(lockres->l_requested == LKM_IVMODE,
+ mlog_bug_on_msg(lockres->l_requested == DLM_LOCK_IV,
"Lockres %.*s, requested ivmode. flags 0x%x\n",
lockres->l_namelen, lockres->l_name, lockres->l_flags);
@@ -148,13 +150,13 @@ static void user_ast(void *opaque)
if (lockres->l_requested < lockres->l_level) {
if (lockres->l_requested <=
user_highest_compat_lock_level(lockres->l_blocking)) {
- lockres->l_blocking = LKM_NLMODE;
+ lockres->l_blocking = DLM_LOCK_NL;
lockres->l_flags &= ~USER_LOCK_BLOCKED;
}
}
lockres->l_level = lockres->l_requested;
- lockres->l_requested = LKM_IVMODE;
+ lockres->l_requested = DLM_LOCK_IV;
lockres->l_flags |= USER_LOCK_ATTACHED;
lockres->l_flags &= ~USER_LOCK_BUSY;
@@ -193,11 +195,11 @@ static void __user_dlm_cond_queue_lockres(struct user_lock_res *lockres)
return;
switch (lockres->l_blocking) {
- case LKM_EXMODE:
+ case DLM_LOCK_EX:
if (!lockres->l_ex_holders && !lockres->l_ro_holders)
queue = 1;
break;
- case LKM_PRMODE:
+ case DLM_LOCK_PR:
if (!lockres->l_ex_holders)
queue = 1;
break;
@@ -209,12 +211,12 @@ static void __user_dlm_cond_queue_lockres(struct user_lock_res *lockres)
__user_dlm_queue_lockres(lockres);
}
-static void user_bast(void *opaque, int level)
+static void user_bast(struct ocfs2_dlm_lksb *lksb, int level)
{
- struct user_lock_res *lockres = opaque;
+ struct user_lock_res *lockres = user_lksb_to_lock_res(lksb);
- mlog(0, "Blocking AST fired for lockres %.*s. Blocking level %d\n",
- lockres->l_namelen, lockres->l_name, level);
+ mlog(ML_BASTS, "BAST fired for lockres %.*s, blocking %d, level %d\n",
+ lockres->l_namelen, lockres->l_name, level, lockres->l_level);
spin_lock(&lockres->l_lock);
lockres->l_flags |= USER_LOCK_BLOCKED;
@@ -227,15 +229,15 @@ static void user_bast(void *opaque, int level)
wake_up(&lockres->l_event);
}
-static void user_unlock_ast(void *opaque, enum dlm_status status)
+static void user_unlock_ast(struct ocfs2_dlm_lksb *lksb, int status)
{
- struct user_lock_res *lockres = opaque;
+ struct user_lock_res *lockres = user_lksb_to_lock_res(lksb);
- mlog(0, "UNLOCK AST called on lock %.*s\n", lockres->l_namelen,
- lockres->l_name);
+ mlog(ML_BASTS, "UNLOCK AST fired for lockres %.*s, flags 0x%x\n",
+ lockres->l_namelen, lockres->l_name, lockres->l_flags);
- if (status != DLM_NORMAL && status != DLM_CANCELGRANT)
- mlog(ML_ERROR, "Dlm returns status %d\n", status);
+ if (status)
+ mlog(ML_ERROR, "dlm returns status %d\n", status);
spin_lock(&lockres->l_lock);
/* The teardown flag gets set early during the unlock process,
@@ -243,7 +245,7 @@ static void user_unlock_ast(void *opaque, enum dlm_status status)
* for a concurrent cancel. */
if (lockres->l_flags & USER_LOCK_IN_TEARDOWN
&& !(lockres->l_flags & USER_LOCK_IN_CANCEL)) {
- lockres->l_level = LKM_IVMODE;
+ lockres->l_level = DLM_LOCK_IV;
} else if (status == DLM_CANCELGRANT) {
/* We tried to cancel a convert request, but it was
* already granted. Don't clear the busy flag - the
@@ -254,7 +256,7 @@ static void user_unlock_ast(void *opaque, enum dlm_status status)
} else {
BUG_ON(!(lockres->l_flags & USER_LOCK_IN_CANCEL));
/* Cancel succeeded, we want to re-queue */
- lockres->l_requested = LKM_IVMODE; /* cancel an
+ lockres->l_requested = DLM_LOCK_IV; /* cancel an
* upconvert
* request. */
lockres->l_flags &= ~USER_LOCK_IN_CANCEL;
@@ -271,6 +273,21 @@ out_noclear:
wake_up(&lockres->l_event);
}
+/*
+ * This is the userdlmfs locking protocol version.
+ *
+ * See fs/ocfs2/dlmglue.c for more details on locking versions.
+ */
+static struct ocfs2_locking_protocol user_dlm_lproto = {
+ .lp_max_version = {
+ .pv_major = OCFS2_LOCKING_PROTOCOL_MAJOR,
+ .pv_minor = OCFS2_LOCKING_PROTOCOL_MINOR,
+ },
+ .lp_lock_ast = user_ast,
+ .lp_blocking_ast = user_bast,
+ .lp_unlock_ast = user_unlock_ast,
+};
+
static inline void user_dlm_drop_inode_ref(struct user_lock_res *lockres)
{
struct inode *inode;
@@ -283,10 +300,10 @@ static void user_dlm_unblock_lock(struct work_struct *work)
int new_level, status;
struct user_lock_res *lockres =
container_of(work, struct user_lock_res, l_work);
- struct dlm_ctxt *dlm = dlm_ctxt_from_user_lockres(lockres);
+ struct ocfs2_cluster_connection *conn =
+ cluster_connection_from_user_lockres(lockres);
- mlog(0, "processing lockres %.*s\n", lockres->l_namelen,
- lockres->l_name);
+ mlog(0, "lockres %.*s\n", lockres->l_namelen, lockres->l_name);
spin_lock(&lockres->l_lock);
@@ -304,17 +321,23 @@ static void user_dlm_unblock_lock(struct work_struct *work)
* flag, and finally we might get another bast which re-queues
* us before our ast for the downconvert is called. */
if (!(lockres->l_flags & USER_LOCK_BLOCKED)) {
+ mlog(ML_BASTS, "lockres %.*s USER_LOCK_BLOCKED\n",
+ lockres->l_namelen, lockres->l_name);
spin_unlock(&lockres->l_lock);
goto drop_ref;
}
if (lockres->l_flags & USER_LOCK_IN_TEARDOWN) {
+ mlog(ML_BASTS, "lockres %.*s USER_LOCK_IN_TEARDOWN\n",
+ lockres->l_namelen, lockres->l_name);
spin_unlock(&lockres->l_lock);
goto drop_ref;
}
if (lockres->l_flags & USER_LOCK_BUSY) {
if (lockres->l_flags & USER_LOCK_IN_CANCEL) {
+ mlog(ML_BASTS, "lockres %.*s USER_LOCK_IN_CANCEL\n",
+ lockres->l_namelen, lockres->l_name);
spin_unlock(&lockres->l_lock);
goto drop_ref;
}
@@ -322,32 +345,31 @@ static void user_dlm_unblock_lock(struct work_struct *work)
lockres->l_flags |= USER_LOCK_IN_CANCEL;
spin_unlock(&lockres->l_lock);
- status = dlmunlock(dlm,
- &lockres->l_lksb,
- LKM_CANCEL,
- user_unlock_ast,
- lockres);
- if (status != DLM_NORMAL)
- user_log_dlm_error("dlmunlock", status, lockres);
+ status = ocfs2_dlm_unlock(conn, &lockres->l_lksb,
+ DLM_LKF_CANCEL);
+ if (status)
+ user_log_dlm_error("ocfs2_dlm_unlock", status, lockres);
goto drop_ref;
}
/* If there are still incompat holders, we can exit safely
* without worrying about re-queueing this lock as that will
* happen on the last call to user_cluster_unlock. */
- if ((lockres->l_blocking == LKM_EXMODE)
+ if ((lockres->l_blocking == DLM_LOCK_EX)
&& (lockres->l_ex_holders || lockres->l_ro_holders)) {
spin_unlock(&lockres->l_lock);
- mlog(0, "can't downconvert for ex: ro = %u, ex = %u\n",
- lockres->l_ro_holders, lockres->l_ex_holders);
+ mlog(ML_BASTS, "lockres %.*s, EX/PR Holders %u,%u\n",
+ lockres->l_namelen, lockres->l_name,
+ lockres->l_ex_holders, lockres->l_ro_holders);
goto drop_ref;
}
- if ((lockres->l_blocking == LKM_PRMODE)
+ if ((lockres->l_blocking == DLM_LOCK_PR)
&& lockres->l_ex_holders) {
spin_unlock(&lockres->l_lock);
- mlog(0, "can't downconvert for pr: ex = %u\n",
- lockres->l_ex_holders);
+ mlog(ML_BASTS, "lockres %.*s, EX Holders %u\n",
+ lockres->l_namelen, lockres->l_name,
+ lockres->l_ex_holders);
goto drop_ref;
}
@@ -355,22 +377,17 @@ static void user_dlm_unblock_lock(struct work_struct *work)
new_level = user_highest_compat_lock_level(lockres->l_blocking);
lockres->l_requested = new_level;
lockres->l_flags |= USER_LOCK_BUSY;
- mlog(0, "Downconvert lock from %d to %d\n",
- lockres->l_level, new_level);
+ mlog(ML_BASTS, "lockres %.*s, downconvert %d => %d\n",
+ lockres->l_namelen, lockres->l_name, lockres->l_level, new_level);
spin_unlock(&lockres->l_lock);
/* need lock downconvert request now... */
- status = dlmlock(dlm,
- new_level,
- &lockres->l_lksb,
- LKM_CONVERT|LKM_VALBLK,
- lockres->l_name,
- lockres->l_namelen,
- user_ast,
- lockres,
- user_bast);
- if (status != DLM_NORMAL) {
- user_log_dlm_error("dlmlock", status, lockres);
+ status = ocfs2_dlm_lock(conn, new_level, &lockres->l_lksb,
+ DLM_LKF_CONVERT|DLM_LKF_VALBLK,
+ lockres->l_name,
+ lockres->l_namelen);
+ if (status) {
+ user_log_dlm_error("ocfs2_dlm_lock", status, lockres);
user_recover_from_dlm_error(lockres);
}
@@ -382,10 +399,10 @@ static inline void user_dlm_inc_holders(struct user_lock_res *lockres,
int level)
{
switch(level) {
- case LKM_EXMODE:
+ case DLM_LOCK_EX:
lockres->l_ex_holders++;
break;
- case LKM_PRMODE:
+ case DLM_LOCK_PR:
lockres->l_ro_holders++;
break;
default:
@@ -410,20 +427,19 @@ int user_dlm_cluster_lock(struct user_lock_res *lockres,
int lkm_flags)
{
int status, local_flags;
- struct dlm_ctxt *dlm = dlm_ctxt_from_user_lockres(lockres);
+ struct ocfs2_cluster_connection *conn =
+ cluster_connection_from_user_lockres(lockres);
- if (level != LKM_EXMODE &&
- level != LKM_PRMODE) {
+ if (level != DLM_LOCK_EX &&
+ level != DLM_LOCK_PR) {
mlog(ML_ERROR, "lockres %.*s: invalid request!\n",
lockres->l_namelen, lockres->l_name);
status = -EINVAL;
goto bail;
}
- mlog(0, "lockres %.*s: asking for %s lock, passed flags = 0x%x\n",
- lockres->l_namelen, lockres->l_name,
- (level == LKM_EXMODE) ? "LKM_EXMODE" : "LKM_PRMODE",
- lkm_flags);
+ mlog(ML_BASTS, "lockres %.*s, level %d, flags = 0x%x\n",
+ lockres->l_namelen, lockres->l_name, level, lkm_flags);
again:
if (signal_pending(current)) {
@@ -457,35 +473,26 @@ again:
}
if (level > lockres->l_level) {
- local_flags = lkm_flags | LKM_VALBLK;
- if (lockres->l_level != LKM_IVMODE)
- local_flags |= LKM_CONVERT;
+ local_flags = lkm_flags | DLM_LKF_VALBLK;
+ if (lockres->l_level != DLM_LOCK_IV)
+ local_flags |= DLM_LKF_CONVERT;
lockres->l_requested = level;
lockres->l_flags |= USER_LOCK_BUSY;
spin_unlock(&lockres->l_lock);
- BUG_ON(level == LKM_IVMODE);
- BUG_ON(level == LKM_NLMODE);
+ BUG_ON(level == DLM_LOCK_IV);
+ BUG_ON(level == DLM_LOCK_NL);
/* call dlm_lock to upgrade lock now */
- status = dlmlock(dlm,
- level,
- &lockres->l_lksb,
- local_flags,
- lockres->l_name,
- lockres->l_namelen,
- user_ast,
- lockres,
- user_bast);
- if (status != DLM_NORMAL) {
- if ((lkm_flags & LKM_NOQUEUE) &&
- (status == DLM_NOTQUEUED))
- status = -EAGAIN;
- else {
- user_log_dlm_error("dlmlock", status, lockres);
- status = -EINVAL;
- }
+ status = ocfs2_dlm_lock(conn, level, &lockres->l_lksb,
+ local_flags, lockres->l_name,
+ lockres->l_namelen);
+ if (status) {
+ if ((lkm_flags & DLM_LKF_NOQUEUE) &&
+ (status != -EAGAIN))
+ user_log_dlm_error("ocfs2_dlm_lock",
+ status, lockres);
user_recover_from_dlm_error(lockres);
goto bail;
}
@@ -506,11 +513,11 @@ static inline void user_dlm_dec_holders(struct user_lock_res *lockres,
int level)
{
switch(level) {
- case LKM_EXMODE:
+ case DLM_LOCK_EX:
BUG_ON(!lockres->l_ex_holders);
lockres->l_ex_holders--;
break;
- case LKM_PRMODE:
+ case DLM_LOCK_PR:
BUG_ON(!lockres->l_ro_holders);
lockres->l_ro_holders--;
break;
@@ -522,8 +529,8 @@ static inline void user_dlm_dec_holders(struct user_lock_res *lockres,
void user_dlm_cluster_unlock(struct user_lock_res *lockres,
int level)
{
- if (level != LKM_EXMODE &&
- level != LKM_PRMODE) {
+ if (level != DLM_LOCK_EX &&
+ level != DLM_LOCK_PR) {
mlog(ML_ERROR, "lockres %.*s: invalid request!\n",
lockres->l_namelen, lockres->l_name);
return;
@@ -540,33 +547,40 @@ void user_dlm_write_lvb(struct inode *inode,
unsigned int len)
{
struct user_lock_res *lockres = &DLMFS_I(inode)->ip_lockres;
- char *lvb = lockres->l_lksb.lvb;
+ char *lvb;
BUG_ON(len > DLM_LVB_LEN);
spin_lock(&lockres->l_lock);
- BUG_ON(lockres->l_level < LKM_EXMODE);
+ BUG_ON(lockres->l_level < DLM_LOCK_EX);
+ lvb = ocfs2_dlm_lvb(&lockres->l_lksb);
memcpy(lvb, val, len);
spin_unlock(&lockres->l_lock);
}
-void user_dlm_read_lvb(struct inode *inode,
- char *val,
- unsigned int len)
+ssize_t user_dlm_read_lvb(struct inode *inode,
+ char *val,
+ unsigned int len)
{
struct user_lock_res *lockres = &DLMFS_I(inode)->ip_lockres;
- char *lvb = lockres->l_lksb.lvb;
+ char *lvb;
+ ssize_t ret = len;
BUG_ON(len > DLM_LVB_LEN);
spin_lock(&lockres->l_lock);
- BUG_ON(lockres->l_level < LKM_PRMODE);
- memcpy(val, lvb, len);
+ BUG_ON(lockres->l_level < DLM_LOCK_PR);
+ if (ocfs2_dlm_lvb_valid(&lockres->l_lksb)) {
+ lvb = ocfs2_dlm_lvb(&lockres->l_lksb);
+ memcpy(val, lvb, len);
+ } else
+ ret = 0;
spin_unlock(&lockres->l_lock);
+ return ret;
}
void user_dlm_lock_res_init(struct user_lock_res *lockres,
@@ -576,9 +590,9 @@ void user_dlm_lock_res_init(struct user_lock_res *lockres,
spin_lock_init(&lockres->l_lock);
init_waitqueue_head(&lockres->l_event);
- lockres->l_level = LKM_IVMODE;
- lockres->l_requested = LKM_IVMODE;
- lockres->l_blocking = LKM_IVMODE;
+ lockres->l_level = DLM_LOCK_IV;
+ lockres->l_requested = DLM_LOCK_IV;
+ lockres->l_blocking = DLM_LOCK_IV;
/* should have been checked before getting here. */
BUG_ON(dentry->d_name.len >= USER_DLM_LOCK_ID_MAX_LEN);
@@ -592,9 +606,10 @@ void user_dlm_lock_res_init(struct user_lock_res *lockres,
int user_dlm_destroy_lock(struct user_lock_res *lockres)
{
int status = -EBUSY;
- struct dlm_ctxt *dlm = dlm_ctxt_from_user_lockres(lockres);
+ struct ocfs2_cluster_connection *conn =
+ cluster_connection_from_user_lockres(lockres);
- mlog(0, "asked to destroy %.*s\n", lockres->l_namelen, lockres->l_name);
+ mlog(ML_BASTS, "lockres %.*s\n", lockres->l_namelen, lockres->l_name);
spin_lock(&lockres->l_lock);
if (lockres->l_flags & USER_LOCK_IN_TEARDOWN) {
@@ -627,14 +642,9 @@ int user_dlm_destroy_lock(struct user_lock_res *lockres)
lockres->l_flags |= USER_LOCK_BUSY;
spin_unlock(&lockres->l_lock);
- status = dlmunlock(dlm,
- &lockres->l_lksb,
- LKM_VALBLK,
- user_unlock_ast,
- lockres);
- if (status != DLM_NORMAL) {
- user_log_dlm_error("dlmunlock", status, lockres);
- status = -EINVAL;
+ status = ocfs2_dlm_unlock(conn, &lockres->l_lksb, DLM_LKF_VALBLK);
+ if (status) {
+ user_log_dlm_error("ocfs2_dlm_unlock", status, lockres);
goto bail;
}
@@ -645,32 +655,34 @@ bail:
return status;
}
-struct dlm_ctxt *user_dlm_register_context(struct qstr *name,
- struct dlm_protocol_version *proto)
+static void user_dlm_recovery_handler_noop(int node_num,
+ void *recovery_data)
{
- struct dlm_ctxt *dlm;
- u32 dlm_key;
- char *domain;
-
- domain = kmalloc(name->len + 1, GFP_NOFS);
- if (!domain) {
- mlog_errno(-ENOMEM);
- return ERR_PTR(-ENOMEM);
- }
+ /* We ignore recovery events */
+ return;
+}
- dlm_key = crc32_le(0, name->name, name->len);
+void user_dlm_set_locking_protocol(void)
+{
+ ocfs2_stack_glue_set_max_proto_version(&user_dlm_lproto.lp_max_version);
+}
- snprintf(domain, name->len + 1, "%.*s", name->len, name->name);
+struct ocfs2_cluster_connection *user_dlm_register(struct qstr *name)
+{
+ int rc;
+ struct ocfs2_cluster_connection *conn;
- dlm = dlm_register_domain(domain, dlm_key, proto);
- if (IS_ERR(dlm))
- mlog_errno(PTR_ERR(dlm));
+ rc = ocfs2_cluster_connect_agnostic(name->name, name->len,
+ &user_dlm_lproto,
+ user_dlm_recovery_handler_noop,
+ NULL, &conn);
+ if (rc)
+ mlog_errno(rc);
- kfree(domain);
- return dlm;
+ return rc ? ERR_PTR(rc) : conn;
}
-void user_dlm_unregister_context(struct dlm_ctxt *dlm)
+void user_dlm_unregister(struct ocfs2_cluster_connection *conn)
{
- dlm_unregister_domain(dlm);
+ ocfs2_cluster_disconnect(conn, 0);
}
diff --git a/fs/ocfs2/dlm/userdlm.h b/fs/ocfs2/dlmfs/userdlm.h
index 0c3cc03c61f..3b42d79531d 100644
--- a/fs/ocfs2/dlm/userdlm.h
+++ b/fs/ocfs2/dlmfs/userdlm.h
@@ -57,7 +57,7 @@ struct user_lock_res {
int l_level;
unsigned int l_ro_holders;
unsigned int l_ex_holders;
- struct dlm_lockstatus l_lksb;
+ struct ocfs2_dlm_lksb l_lksb;
int l_requested;
int l_blocking;
@@ -80,15 +80,15 @@ void user_dlm_cluster_unlock(struct user_lock_res *lockres,
void user_dlm_write_lvb(struct inode *inode,
const char *val,
unsigned int len);
-void user_dlm_read_lvb(struct inode *inode,
- char *val,
- unsigned int len);
-struct dlm_ctxt *user_dlm_register_context(struct qstr *name,
- struct dlm_protocol_version *proto);
-void user_dlm_unregister_context(struct dlm_ctxt *dlm);
+ssize_t user_dlm_read_lvb(struct inode *inode,
+ char *val,
+ unsigned int len);
+struct ocfs2_cluster_connection *user_dlm_register(struct qstr *name);
+void user_dlm_unregister(struct ocfs2_cluster_connection *conn);
+void user_dlm_set_locking_protocol(void);
struct dlmfs_inode_private {
- struct dlm_ctxt *ip_dlm;
+ struct ocfs2_cluster_connection *ip_conn;
struct user_lock_res ip_lockres; /* unused for directories. */
struct inode *ip_parent;
diff --git a/fs/ocfs2/dlmglue.c b/fs/ocfs2/dlmglue.c
index e044019cb3b..8298608d416 100644
--- a/fs/ocfs2/dlmglue.c
+++ b/fs/ocfs2/dlmglue.c
@@ -297,6 +297,11 @@ static inline int ocfs2_is_inode_lock(struct ocfs2_lock_res *lockres)
lockres->l_type == OCFS2_LOCK_TYPE_OPEN;
}
+static inline struct ocfs2_lock_res *ocfs2_lksb_to_lock_res(struct ocfs2_dlm_lksb *lksb)
+{
+ return container_of(lksb, struct ocfs2_lock_res, l_lksb);
+}
+
static inline struct inode *ocfs2_lock_res_inode(struct ocfs2_lock_res *lockres)
{
BUG_ON(!ocfs2_is_inode_lock(lockres));
@@ -927,6 +932,10 @@ static int ocfs2_generic_handle_bast(struct ocfs2_lock_res *lockres,
lockres->l_blocking = level;
}
+ mlog(ML_BASTS, "lockres %s, block %d, level %d, l_block %d, dwn %d\n",
+ lockres->l_name, level, lockres->l_level, lockres->l_blocking,
+ needs_downconvert);
+
if (needs_downconvert)
lockres_or_flags(lockres, OCFS2_LOCK_BLOCKED);
@@ -1040,18 +1049,17 @@ static unsigned int lockres_set_pending(struct ocfs2_lock_res *lockres)
return lockres->l_pending_gen;
}
-
-static void ocfs2_blocking_ast(void *opaque, int level)
+static void ocfs2_blocking_ast(struct ocfs2_dlm_lksb *lksb, int level)
{
- struct ocfs2_lock_res *lockres = opaque;
+ struct ocfs2_lock_res *lockres = ocfs2_lksb_to_lock_res(lksb);
struct ocfs2_super *osb = ocfs2_get_lockres_osb(lockres);
int needs_downconvert;
unsigned long flags;
BUG_ON(level <= DLM_LOCK_NL);
- mlog(0, "BAST fired for lockres %s, blocking %d, level %d type %s\n",
- lockres->l_name, level, lockres->l_level,
+ mlog(ML_BASTS, "BAST fired for lockres %s, blocking %d, level %d, "
+ "type %s\n", lockres->l_name, level, lockres->l_level,
ocfs2_lock_type_string(lockres->l_type));
/*
@@ -1072,9 +1080,9 @@ static void ocfs2_blocking_ast(void *opaque, int level)
ocfs2_wake_downconvert_thread(osb);
}
-static void ocfs2_locking_ast(void *opaque)
+static void ocfs2_locking_ast(struct ocfs2_dlm_lksb *lksb)
{
- struct ocfs2_lock_res *lockres = opaque;
+ struct ocfs2_lock_res *lockres = ocfs2_lksb_to_lock_res(lksb);
struct ocfs2_super *osb = ocfs2_get_lockres_osb(lockres);
unsigned long flags;
int status;
@@ -1095,6 +1103,10 @@ static void ocfs2_locking_ast(void *opaque)
return;
}
+ mlog(ML_BASTS, "AST fired for lockres %s, action %d, unlock %d, "
+ "level %d => %d\n", lockres->l_name, lockres->l_action,
+ lockres->l_unlock_action, lockres->l_level, lockres->l_requested);
+
switch(lockres->l_action) {
case OCFS2_AST_ATTACH:
ocfs2_generic_handle_attach_action(lockres);
@@ -1107,8 +1119,8 @@ static void ocfs2_locking_ast(void *opaque)
ocfs2_generic_handle_downconvert_action(lockres);
break;
default:
- mlog(ML_ERROR, "lockres %s: ast fired with invalid action: %u "
- "lockres flags = 0x%lx, unlock action: %u\n",
+ mlog(ML_ERROR, "lockres %s: AST fired with invalid action: %u, "
+ "flags 0x%lx, unlock: %u\n",
lockres->l_name, lockres->l_action, lockres->l_flags,
lockres->l_unlock_action);
BUG();
@@ -1134,6 +1146,88 @@ out:
spin_unlock_irqrestore(&lockres->l_lock, flags);
}
+static void ocfs2_unlock_ast(struct ocfs2_dlm_lksb *lksb, int error)
+{
+ struct ocfs2_lock_res *lockres = ocfs2_lksb_to_lock_res(lksb);
+ unsigned long flags;
+
+ mlog_entry_void();
+
+ mlog(ML_BASTS, "UNLOCK AST fired for lockres %s, action = %d\n",
+ lockres->l_name, lockres->l_unlock_action);
+
+ spin_lock_irqsave(&lockres->l_lock, flags);
+ if (error) {
+ mlog(ML_ERROR, "Dlm passes error %d for lock %s, "
+ "unlock_action %d\n", error, lockres->l_name,
+ lockres->l_unlock_action);
+ spin_unlock_irqrestore(&lockres->l_lock, flags);
+ mlog_exit_void();
+ return;
+ }
+
+ switch(lockres->l_unlock_action) {
+ case OCFS2_UNLOCK_CANCEL_CONVERT:
+ mlog(0, "Cancel convert success for %s\n", lockres->l_name);
+ lockres->l_action = OCFS2_AST_INVALID;
+ /* Downconvert thread may have requeued this lock, we
+ * need to wake it. */
+ if (lockres->l_flags & OCFS2_LOCK_BLOCKED)
+ ocfs2_wake_downconvert_thread(ocfs2_get_lockres_osb(lockres));
+ break;
+ case OCFS2_UNLOCK_DROP_LOCK:
+ lockres->l_level = DLM_LOCK_IV;
+ break;
+ default:
+ BUG();
+ }
+
+ lockres_clear_flags(lockres, OCFS2_LOCK_BUSY);
+ lockres->l_unlock_action = OCFS2_UNLOCK_INVALID;
+ wake_up(&lockres->l_event);
+ spin_unlock_irqrestore(&lockres->l_lock, flags);
+
+ mlog_exit_void();
+}
+
+/*
+ * This is the filesystem locking protocol. It provides the lock handling
+ * hooks for the underlying DLM. It has a maximum version number.
+ * The version number allows interoperability with systems running at
+ * the same major number and an equal or smaller minor number.
+ *
+ * Whenever the filesystem does new things with locks (adds or removes a
+ * lock, orders them differently, does different things underneath a lock),
+ * the version must be changed. The protocol is negotiated when joining
+ * the dlm domain. A node may join the domain if its major version is
+ * identical to all other nodes and its minor version is greater than
+ * or equal to all other nodes. When its minor version is greater than
+ * the other nodes, it will run at the minor version specified by the
+ * other nodes.
+ *
+ * If a locking change is made that will not be compatible with older
+ * versions, the major number must be increased and the minor version set
+ * to zero. If a change merely adds a behavior that can be disabled when
+ * speaking to older versions, the minor version must be increased. If a
+ * change adds a fully backwards compatible change (eg, LVB changes that
+ * are just ignored by older versions), the version does not need to be
+ * updated.
+ */
+static struct ocfs2_locking_protocol lproto = {
+ .lp_max_version = {
+ .pv_major = OCFS2_LOCKING_PROTOCOL_MAJOR,
+ .pv_minor = OCFS2_LOCKING_PROTOCOL_MINOR,
+ },
+ .lp_lock_ast = ocfs2_locking_ast,
+ .lp_blocking_ast = ocfs2_blocking_ast,
+ .lp_unlock_ast = ocfs2_unlock_ast,
+};
+
+void ocfs2_set_locking_protocol(void)
+{
+ ocfs2_stack_glue_set_max_proto_version(&lproto.lp_max_version);
+}
+
static inline void ocfs2_recover_from_dlm_error(struct ocfs2_lock_res *lockres,
int convert)
{
@@ -1189,8 +1283,7 @@ static int ocfs2_lock_create(struct ocfs2_super *osb,
&lockres->l_lksb,
dlm_flags,
lockres->l_name,
- OCFS2_LOCK_ID_MAX_LEN - 1,
- lockres);
+ OCFS2_LOCK_ID_MAX_LEN - 1);
lockres_clear_pending(lockres, gen, osb);
if (ret) {
ocfs2_log_dlm_error("ocfs2_dlm_lock", ret, lockres);
@@ -1412,7 +1505,7 @@ again:
BUG_ON(level == DLM_LOCK_IV);
BUG_ON(level == DLM_LOCK_NL);
- mlog(0, "lock %s, convert from %d to level = %d\n",
+ mlog(ML_BASTS, "lockres %s, convert from %d to %d\n",
lockres->l_name, lockres->l_level, level);
/* call dlm_lock to upgrade lock now */
@@ -1421,8 +1514,7 @@ again:
&lockres->l_lksb,
lkm_flags,
lockres->l_name,
- OCFS2_LOCK_ID_MAX_LEN - 1,
- lockres);
+ OCFS2_LOCK_ID_MAX_LEN - 1);
lockres_clear_pending(lockres, gen, osb);
if (ret) {
if (!(lkm_flags & DLM_LKF_NOQUEUE) ||
@@ -1859,8 +1951,7 @@ int ocfs2_file_lock(struct file *file, int ex, int trylock)
spin_unlock_irqrestore(&lockres->l_lock, flags);
ret = ocfs2_dlm_lock(osb->cconn, level, &lockres->l_lksb, lkm_flags,
- lockres->l_name, OCFS2_LOCK_ID_MAX_LEN - 1,
- lockres);
+ lockres->l_name, OCFS2_LOCK_ID_MAX_LEN - 1);
if (ret) {
if (!trylock || (ret != -EAGAIN)) {
ocfs2_log_dlm_error("ocfs2_dlm_lock", ret, lockres);
@@ -2989,7 +3080,7 @@ int ocfs2_dlm_init(struct ocfs2_super *osb)
status = ocfs2_cluster_connect(osb->osb_cluster_stack,
osb->uuid_str,
strlen(osb->uuid_str),
- ocfs2_do_node_down, osb,
+ &lproto, ocfs2_do_node_down, osb,
&conn);
if (status) {
mlog_errno(status);
@@ -3056,50 +3147,6 @@ void ocfs2_dlm_shutdown(struct ocfs2_super *osb,
mlog_exit_void();
}
-static void ocfs2_unlock_ast(void *opaque, int error)
-{
- struct ocfs2_lock_res *lockres = opaque;
- unsigned long flags;
-
- mlog_entry_void();
-
- mlog(0, "UNLOCK AST called on lock %s, action = %d\n", lockres->l_name,
- lockres->l_unlock_action);
-
- spin_lock_irqsave(&lockres->l_lock, flags);
- if (error) {
- mlog(ML_ERROR, "Dlm passes error %d for lock %s, "
- "unlock_action %d\n", error, lockres->l_name,
- lockres->l_unlock_action);
- spin_unlock_irqrestore(&lockres->l_lock, flags);
- mlog_exit_void();
- return;
- }
-
- switch(lockres->l_unlock_action) {
- case OCFS2_UNLOCK_CANCEL_CONVERT:
- mlog(0, "Cancel convert success for %s\n", lockres->l_name);
- lockres->l_action = OCFS2_AST_INVALID;
- /* Downconvert thread may have requeued this lock, we
- * need to wake it. */
- if (lockres->l_flags & OCFS2_LOCK_BLOCKED)
- ocfs2_wake_downconvert_thread(ocfs2_get_lockres_osb(lockres));
- break;
- case OCFS2_UNLOCK_DROP_LOCK:
- lockres->l_level = DLM_LOCK_IV;
- break;
- default:
- BUG();
- }
-
- lockres_clear_flags(lockres, OCFS2_LOCK_BUSY);
- lockres->l_unlock_action = OCFS2_UNLOCK_INVALID;
- wake_up(&lockres->l_event);
- spin_unlock_irqrestore(&lockres->l_lock, flags);
-
- mlog_exit_void();
-}
-
static int ocfs2_drop_lock(struct ocfs2_super *osb,
struct ocfs2_lock_res *lockres)
{
@@ -3167,8 +3214,7 @@ static int ocfs2_drop_lock(struct ocfs2_super *osb,
mlog(0, "lock %s\n", lockres->l_name);
- ret = ocfs2_dlm_unlock(osb->cconn, &lockres->l_lksb, lkm_flags,
- lockres);
+ ret = ocfs2_dlm_unlock(osb->cconn, &lockres->l_lksb, lkm_flags);
if (ret) {
ocfs2_log_dlm_error("ocfs2_dlm_unlock", ret, lockres);
mlog(ML_ERROR, "lockres flags: %lu\n", lockres->l_flags);
@@ -3276,13 +3322,20 @@ static unsigned int ocfs2_prepare_downconvert(struct ocfs2_lock_res *lockres,
BUG_ON(lockres->l_blocking <= DLM_LOCK_NL);
if (lockres->l_level <= new_level) {
- mlog(ML_ERROR, "lockres->l_level (%d) <= new_level (%d)\n",
- lockres->l_level, new_level);
+ mlog(ML_ERROR, "lockres %s, lvl %d <= %d, blcklst %d, mask %d, "
+ "type %d, flags 0x%lx, hold %d %d, act %d %d, req %d, "
+ "block %d, pgen %d\n", lockres->l_name, lockres->l_level,
+ new_level, list_empty(&lockres->l_blocked_list),
+ list_empty(&lockres->l_mask_waiters), lockres->l_type,
+ lockres->l_flags, lockres->l_ro_holders,
+ lockres->l_ex_holders, lockres->l_action,
+ lockres->l_unlock_action, lockres->l_requested,
+ lockres->l_blocking, lockres->l_pending_gen);
BUG();
}
- mlog(0, "lock %s, new_level = %d, l_blocking = %d\n",
- lockres->l_name, new_level, lockres->l_blocking);
+ mlog(ML_BASTS, "lockres %s, level %d => %d, blocking %d\n",
+ lockres->l_name, lockres->l_level, new_level, lockres->l_blocking);
lockres->l_action = OCFS2_AST_DOWNCONVERT;
lockres->l_requested = new_level;
@@ -3301,6 +3354,9 @@ static int ocfs2_downconvert_lock(struct ocfs2_super *osb,
mlog_entry_void();
+ mlog(ML_BASTS, "lockres %s, level %d => %d\n", lockres->l_name,
+ lockres->l_level, new_level);
+
if (lvb)
dlm_flags |= DLM_LKF_VALBLK;
@@ -3309,8 +3365,7 @@ static int ocfs2_downconvert_lock(struct ocfs2_super *osb,
&lockres->l_lksb,
dlm_flags,
lockres->l_name,
- OCFS2_LOCK_ID_MAX_LEN - 1,
- lockres);
+ OCFS2_LOCK_ID_MAX_LEN - 1);
lockres_clear_pending(lockres, generation, osb);
if (ret) {
ocfs2_log_dlm_error("ocfs2_dlm_lock", ret, lockres);
@@ -3331,14 +3386,12 @@ static int ocfs2_prepare_cancel_convert(struct ocfs2_super *osb,
assert_spin_locked(&lockres->l_lock);
mlog_entry_void();
- mlog(0, "lock %s\n", lockres->l_name);
if (lockres->l_unlock_action == OCFS2_UNLOCK_CANCEL_CONVERT) {
/* If we're already trying to cancel a lock conversion
* then just drop the spinlock and allow the caller to
* requeue this lock. */
-
- mlog(0, "Lockres %s, skip convert\n", lockres->l_name);
+ mlog(ML_BASTS, "lockres %s, skip convert\n", lockres->l_name);
return 0;
}
@@ -3353,6 +3406,8 @@ static int ocfs2_prepare_cancel_convert(struct ocfs2_super *osb,
"lock %s, invalid flags: 0x%lx\n",
lockres->l_name, lockres->l_flags);
+ mlog(ML_BASTS, "lockres %s\n", lockres->l_name);
+
return 1;
}
@@ -3362,16 +3417,15 @@ static int ocfs2_cancel_convert(struct ocfs2_super *osb,
int ret;
mlog_entry_void();
- mlog(0, "lock %s\n", lockres->l_name);
ret = ocfs2_dlm_unlock(osb->cconn, &lockres->l_lksb,
- DLM_LKF_CANCEL, lockres);
+ DLM_LKF_CANCEL);
if (ret) {
ocfs2_log_dlm_error("ocfs2_dlm_unlock", ret, lockres);
ocfs2_recover_from_dlm_error(lockres, 0);
}
- mlog(0, "lock %s return from ocfs2_dlm_unlock\n", lockres->l_name);
+ mlog(ML_BASTS, "lockres %s\n", lockres->l_name);
mlog_exit(ret);
return ret;
@@ -3428,8 +3482,11 @@ recheck:
* at the same time they set OCFS2_DLM_BUSY. They must
* clear OCFS2_DLM_PENDING after dlm_lock() returns.
*/
- if (lockres->l_flags & OCFS2_LOCK_PENDING)
+ if (lockres->l_flags & OCFS2_LOCK_PENDING) {
+ mlog(ML_BASTS, "lockres %s, ReQ: Pending\n",
+ lockres->l_name);
goto leave_requeue;
+ }
ctl->requeue = 1;
ret = ocfs2_prepare_cancel_convert(osb, lockres);
@@ -3461,6 +3518,7 @@ recheck:
*/
if (lockres->l_level == DLM_LOCK_NL) {
BUG_ON(lockres->l_ex_holders || lockres->l_ro_holders);
+ mlog(ML_BASTS, "lockres %s, Aborting dc\n", lockres->l_name);
lockres->l_blocking = DLM_LOCK_NL;
lockres_clear_flags(lockres, OCFS2_LOCK_BLOCKED);
spin_unlock_irqrestore(&lockres->l_lock, flags);
@@ -3470,28 +3528,41 @@ recheck:
/* if we're blocking an exclusive and we have *any* holders,
* then requeue. */
if ((lockres->l_blocking == DLM_LOCK_EX)
- && (lockres->l_ex_holders || lockres->l_ro_holders))
+ && (lockres->l_ex_holders || lockres->l_ro_holders)) {
+ mlog(ML_BASTS, "lockres %s, ReQ: EX/PR Holders %u,%u\n",
+ lockres->l_name, lockres->l_ex_holders,
+ lockres->l_ro_holders);
goto leave_requeue;
+ }
/* If it's a PR we're blocking, then only
* requeue if we've got any EX holders */
if (lockres->l_blocking == DLM_LOCK_PR &&
- lockres->l_ex_holders)
+ lockres->l_ex_holders) {
+ mlog(ML_BASTS, "lockres %s, ReQ: EX Holders %u\n",
+ lockres->l_name, lockres->l_ex_holders);
goto leave_requeue;
+ }
/*
* Can we get a lock in this state if the holder counts are
* zero? The meta data unblock code used to check this.
*/
if ((lockres->l_ops->flags & LOCK_TYPE_REQUIRES_REFRESH)
- && (lockres->l_flags & OCFS2_LOCK_REFRESHING))
+ && (lockres->l_flags & OCFS2_LOCK_REFRESHING)) {
+ mlog(ML_BASTS, "lockres %s, ReQ: Lock Refreshing\n",
+ lockres->l_name);
goto leave_requeue;
+ }
new_level = ocfs2_highest_compat_lock_level(lockres->l_blocking);
if (lockres->l_ops->check_downconvert
- && !lockres->l_ops->check_downconvert(lockres, new_level))
+ && !lockres->l_ops->check_downconvert(lockres, new_level)) {
+ mlog(ML_BASTS, "lockres %s, ReQ: Checkpointing\n",
+ lockres->l_name);
goto leave_requeue;
+ }
/* If we get here, then we know that there are no more
* incompatible holders (and anyone asking for an incompatible
@@ -3509,13 +3580,19 @@ recheck:
ctl->unblock_action = lockres->l_ops->downconvert_worker(lockres, blocking);
- if (ctl->unblock_action == UNBLOCK_STOP_POST)
+ if (ctl->unblock_action == UNBLOCK_STOP_POST) {
+ mlog(ML_BASTS, "lockres %s, UNBLOCK_STOP_POST\n",
+ lockres->l_name);
goto leave;
+ }
spin_lock_irqsave(&lockres->l_lock, flags);
if ((blocking != lockres->l_blocking) || (level != lockres->l_level)) {
/* If this changed underneath us, then we can't drop
* it just yet. */
+ mlog(ML_BASTS, "lockres %s, block=%d:%d, level=%d:%d, "
+ "Recheck\n", lockres->l_name, blocking,
+ lockres->l_blocking, level, lockres->l_level);
goto recheck;
}
@@ -3910,45 +3987,6 @@ void ocfs2_refcount_unlock(struct ocfs2_refcount_tree *ref_tree, int ex)
ocfs2_cluster_unlock(osb, lockres, level);
}
-/*
- * This is the filesystem locking protocol. It provides the lock handling
- * hooks for the underlying DLM. It has a maximum version number.
- * The version number allows interoperability with systems running at
- * the same major number and an equal or smaller minor number.
- *
- * Whenever the filesystem does new things with locks (adds or removes a
- * lock, orders them differently, does different things underneath a lock),
- * the version must be changed. The protocol is negotiated when joining
- * the dlm domain. A node may join the domain if its major version is
- * identical to all other nodes and its minor version is greater than
- * or equal to all other nodes. When its minor version is greater than
- * the other nodes, it will run at the minor version specified by the
- * other nodes.
- *
- * If a locking change is made that will not be compatible with older
- * versions, the major number must be increased and the minor version set
- * to zero. If a change merely adds a behavior that can be disabled when
- * speaking to older versions, the minor version must be increased. If a
- * change adds a fully backwards compatible change (eg, LVB changes that
- * are just ignored by older versions), the version does not need to be
- * updated.
- */
-static struct ocfs2_locking_protocol lproto = {
- .lp_max_version = {
- .pv_major = OCFS2_LOCKING_PROTOCOL_MAJOR,
- .pv_minor = OCFS2_LOCKING_PROTOCOL_MINOR,
- },
- .lp_lock_ast = ocfs2_locking_ast,
- .lp_blocking_ast = ocfs2_blocking_ast,
- .lp_unlock_ast = ocfs2_unlock_ast,
-};
-
-void ocfs2_set_locking_protocol(void)
-{
- ocfs2_stack_glue_set_locking_protocol(&lproto);
-}
-
-
static void ocfs2_process_blocked_lock(struct ocfs2_super *osb,
struct ocfs2_lock_res *lockres)
{
@@ -3965,7 +4003,7 @@ static void ocfs2_process_blocked_lock(struct ocfs2_super *osb,
BUG_ON(!lockres);
BUG_ON(!lockres->l_ops);
- mlog(0, "lockres %s blocked.\n", lockres->l_name);
+ mlog(ML_BASTS, "lockres %s blocked\n", lockres->l_name);
/* Detect whether a lock has been marked as going away while
* the downconvert thread was processing other things. A lock can
@@ -3988,7 +4026,7 @@ unqueue:
} else
ocfs2_schedule_blocked_lock(osb, lockres);
- mlog(0, "lockres %s, requeue = %s.\n", lockres->l_name,
+ mlog(ML_BASTS, "lockres %s, requeue = %s.\n", lockres->l_name,
ctl.requeue ? "yes" : "no");
spin_unlock_irqrestore(&lockres->l_lock, flags);
@@ -4010,7 +4048,7 @@ static void ocfs2_schedule_blocked_lock(struct ocfs2_super *osb,
/* Do not schedule a lock for downconvert when it's on
* the way to destruction - any nodes wanting access
* to the resource will get it soon. */
- mlog(0, "Lockres %s won't be scheduled: flags 0x%lx\n",
+ mlog(ML_BASTS, "lockres %s won't be scheduled: flags 0x%lx\n",
lockres->l_name, lockres->l_flags);
return;
}
diff --git a/fs/ocfs2/file.c b/fs/ocfs2/file.c
index 558ce031242..5b52547d629 100644
--- a/fs/ocfs2/file.c
+++ b/fs/ocfs2/file.c
@@ -993,10 +993,9 @@ int ocfs2_setattr(struct dentry *dentry, struct iattr *attr)
}
if (size_change && attr->ia_size != i_size_read(inode)) {
- if (attr->ia_size > sb->s_maxbytes) {
- status = -EFBIG;
+ status = inode_newsize_ok(inode, attr->ia_size);
+ if (status)
goto bail_unlock;
- }
if (i_size_read(inode) > attr->ia_size) {
if (ocfs2_should_order_data(inode)) {
@@ -1836,6 +1835,8 @@ static int ocfs2_prepare_inode_for_write(struct dentry *dentry,
&meta_level);
if (has_refcount)
*has_refcount = 1;
+ if (direct_io)
+ *direct_io = 0;
}
if (ret < 0) {
@@ -1859,10 +1860,6 @@ static int ocfs2_prepare_inode_for_write(struct dentry *dentry,
break;
}
- if (has_refcount && *has_refcount == 1) {
- *direct_io = 0;
- break;
- }
/*
* Allowing concurrent direct writes means
* i_size changes wouldn't be synchronized, so
@@ -2043,7 +2040,7 @@ out_dio:
* async dio is going to do it in the future or an end_io after an
* error has already done it.
*/
- if (ret == -EIOCBQUEUED || !ocfs2_iocb_is_rw_locked(iocb)) {
+ if ((ret == -EIOCBQUEUED) || (!ocfs2_iocb_is_rw_locked(iocb))) {
rw_level = -1;
have_alloc_sem = 0;
}
diff --git a/fs/ocfs2/ioctl.h b/fs/ocfs2/ioctl.h
index cf9a5ee30fe..0cd5323bd3f 100644
--- a/fs/ocfs2/ioctl.h
+++ b/fs/ocfs2/ioctl.h
@@ -7,10 +7,10 @@
*
*/
-#ifndef OCFS2_IOCTL_H
-#define OCFS2_IOCTL_H
+#ifndef OCFS2_IOCTL_PROTO_H
+#define OCFS2_IOCTL_PROTO_H
long ocfs2_ioctl(struct file *filp, unsigned int cmd, unsigned long arg);
long ocfs2_compat_ioctl(struct file *file, unsigned cmd, unsigned long arg);
-#endif /* OCFS2_IOCTL_H */
+#endif /* OCFS2_IOCTL_PROTO_H */
diff --git a/fs/ocfs2/localalloc.c b/fs/ocfs2/localalloc.c
index ac10f83edb9..ca992d91f51 100644
--- a/fs/ocfs2/localalloc.c
+++ b/fs/ocfs2/localalloc.c
@@ -476,7 +476,7 @@ out_mutex:
out:
if (!status)
- ocfs2_init_inode_steal_slot(osb);
+ ocfs2_init_steal_slots(osb);
mlog_exit(status);
return status;
}
diff --git a/fs/ocfs2/ocfs2.h b/fs/ocfs2/ocfs2.h
index 740f448041e..1238b491db9 100644
--- a/fs/ocfs2/ocfs2.h
+++ b/fs/ocfs2/ocfs2.h
@@ -42,6 +42,7 @@
#include "ocfs2_fs.h"
#include "ocfs2_lockid.h"
+#include "ocfs2_ioctl.h"
/* For struct ocfs2_blockcheck_stats */
#include "blockcheck.h"
@@ -159,7 +160,7 @@ struct ocfs2_lock_res {
int l_level;
unsigned int l_ro_holders;
unsigned int l_ex_holders;
- union ocfs2_dlm_lksb l_lksb;
+ struct ocfs2_dlm_lksb l_lksb;
/* used from AST/BAST funcs. */
enum ocfs2_ast_action l_action;
@@ -305,7 +306,9 @@ struct ocfs2_super
u32 s_next_generation;
unsigned long osb_flags;
s16 s_inode_steal_slot;
+ s16 s_meta_steal_slot;
atomic_t s_num_inodes_stolen;
+ atomic_t s_num_meta_stolen;
unsigned long s_mount_opt;
unsigned int s_atime_quantum;
@@ -760,33 +763,6 @@ static inline unsigned int ocfs2_megabytes_to_clusters(struct super_block *sb,
return megs << (20 - OCFS2_SB(sb)->s_clustersize_bits);
}
-static inline void ocfs2_init_inode_steal_slot(struct ocfs2_super *osb)
-{
- spin_lock(&osb->osb_lock);
- osb->s_inode_steal_slot = OCFS2_INVALID_SLOT;
- spin_unlock(&osb->osb_lock);
- atomic_set(&osb->s_num_inodes_stolen, 0);
-}
-
-static inline void ocfs2_set_inode_steal_slot(struct ocfs2_super *osb,
- s16 slot)
-{
- spin_lock(&osb->osb_lock);
- osb->s_inode_steal_slot = slot;
- spin_unlock(&osb->osb_lock);
-}
-
-static inline s16 ocfs2_get_inode_steal_slot(struct ocfs2_super *osb)
-{
- s16 slot;
-
- spin_lock(&osb->osb_lock);
- slot = osb->s_inode_steal_slot;
- spin_unlock(&osb->osb_lock);
-
- return slot;
-}
-
#define ocfs2_set_bit ext2_set_bit
#define ocfs2_clear_bit ext2_clear_bit
#define ocfs2_test_bit ext2_test_bit
diff --git a/fs/ocfs2/ocfs2_fs.h b/fs/ocfs2/ocfs2_fs.h
index 7638a38c32b..bb37218a797 100644
--- a/fs/ocfs2/ocfs2_fs.h
+++ b/fs/ocfs2/ocfs2_fs.h
@@ -254,63 +254,6 @@
* refcount tree */
/*
- * ioctl commands
- */
-#define OCFS2_IOC_GETFLAGS _IOR('f', 1, long)
-#define OCFS2_IOC_SETFLAGS _IOW('f', 2, long)
-#define OCFS2_IOC32_GETFLAGS _IOR('f', 1, int)
-#define OCFS2_IOC32_SETFLAGS _IOW('f', 2, int)
-
-/*
- * Space reservation / allocation / free ioctls and argument structure
- * are designed to be compatible with XFS.
- *
- * ALLOCSP* and FREESP* are not and will never be supported, but are
- * included here for completeness.
- */
-struct ocfs2_space_resv {
- __s16 l_type;
- __s16 l_whence;
- __s64 l_start;
- __s64 l_len; /* len == 0 means until end of file */
- __s32 l_sysid;
- __u32 l_pid;
- __s32 l_pad[4]; /* reserve area */
-};
-
-#define OCFS2_IOC_ALLOCSP _IOW ('X', 10, struct ocfs2_space_resv)
-#define OCFS2_IOC_FREESP _IOW ('X', 11, struct ocfs2_space_resv)
-#define OCFS2_IOC_RESVSP _IOW ('X', 40, struct ocfs2_space_resv)
-#define OCFS2_IOC_UNRESVSP _IOW ('X', 41, struct ocfs2_space_resv)
-#define OCFS2_IOC_ALLOCSP64 _IOW ('X', 36, struct ocfs2_space_resv)
-#define OCFS2_IOC_FREESP64 _IOW ('X', 37, struct ocfs2_space_resv)
-#define OCFS2_IOC_RESVSP64 _IOW ('X', 42, struct ocfs2_space_resv)
-#define OCFS2_IOC_UNRESVSP64 _IOW ('X', 43, struct ocfs2_space_resv)
-
-/* Used to pass group descriptor data when online resize is done */
-struct ocfs2_new_group_input {
- __u64 group; /* Group descriptor's blkno. */
- __u32 clusters; /* Total number of clusters in this group */
- __u32 frees; /* Total free clusters in this group */
- __u16 chain; /* Chain for this group */
- __u16 reserved1;
- __u32 reserved2;
-};
-
-#define OCFS2_IOC_GROUP_EXTEND _IOW('o', 1, int)
-#define OCFS2_IOC_GROUP_ADD _IOW('o', 2,struct ocfs2_new_group_input)
-#define OCFS2_IOC_GROUP_ADD64 _IOW('o', 3,struct ocfs2_new_group_input)
-
-/* Used to pass 2 file names to reflink. */
-struct reflink_arguments {
- __u64 old_path;
- __u64 new_path;
- __u64 preserve;
-};
-#define OCFS2_IOC_REFLINK _IOW('o', 4, struct reflink_arguments)
-
-
-/*
* Journal Flags (ocfs2_dinode.id1.journal1.i_flags)
*/
#define OCFS2_JOURNAL_DIRTY_FL (0x00000001) /* Journal needs recovery */
diff --git a/fs/ocfs2/ocfs2_ioctl.h b/fs/ocfs2/ocfs2_ioctl.h
new file mode 100644
index 00000000000..2d3420af1a8
--- /dev/null
+++ b/fs/ocfs2/ocfs2_ioctl.h
@@ -0,0 +1,79 @@
+/* -*- mode: c; c-basic-offset: 8; -*-
+ * vim: noexpandtab sw=8 ts=8 sts=0:
+ *
+ * ocfs2_ioctl.h
+ *
+ * Defines OCFS2 ioctls.
+ *
+ * Copyright (C) 2010 Oracle. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public
+ * License, version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ */
+
+#ifndef OCFS2_IOCTL_H
+#define OCFS2_IOCTL_H
+
+/*
+ * ioctl commands
+ */
+#define OCFS2_IOC_GETFLAGS _IOR('f', 1, long)
+#define OCFS2_IOC_SETFLAGS _IOW('f', 2, long)
+#define OCFS2_IOC32_GETFLAGS _IOR('f', 1, int)
+#define OCFS2_IOC32_SETFLAGS _IOW('f', 2, int)
+
+/*
+ * Space reservation / allocation / free ioctls and argument structure
+ * are designed to be compatible with XFS.
+ *
+ * ALLOCSP* and FREESP* are not and will never be supported, but are
+ * included here for completeness.
+ */
+struct ocfs2_space_resv {
+ __s16 l_type;
+ __s16 l_whence;
+ __s64 l_start;
+ __s64 l_len; /* len == 0 means until end of file */
+ __s32 l_sysid;
+ __u32 l_pid;
+ __s32 l_pad[4]; /* reserve area */
+};
+
+#define OCFS2_IOC_ALLOCSP _IOW ('X', 10, struct ocfs2_space_resv)
+#define OCFS2_IOC_FREESP _IOW ('X', 11, struct ocfs2_space_resv)
+#define OCFS2_IOC_RESVSP _IOW ('X', 40, struct ocfs2_space_resv)
+#define OCFS2_IOC_UNRESVSP _IOW ('X', 41, struct ocfs2_space_resv)
+#define OCFS2_IOC_ALLOCSP64 _IOW ('X', 36, struct ocfs2_space_resv)
+#define OCFS2_IOC_FREESP64 _IOW ('X', 37, struct ocfs2_space_resv)
+#define OCFS2_IOC_RESVSP64 _IOW ('X', 42, struct ocfs2_space_resv)
+#define OCFS2_IOC_UNRESVSP64 _IOW ('X', 43, struct ocfs2_space_resv)
+
+/* Used to pass group descriptor data when online resize is done */
+struct ocfs2_new_group_input {
+ __u64 group; /* Group descriptor's blkno. */
+ __u32 clusters; /* Total number of clusters in this group */
+ __u32 frees; /* Total free clusters in this group */
+ __u16 chain; /* Chain for this group */
+ __u16 reserved1;
+ __u32 reserved2;
+};
+
+#define OCFS2_IOC_GROUP_EXTEND _IOW('o', 1, int)
+#define OCFS2_IOC_GROUP_ADD _IOW('o', 2,struct ocfs2_new_group_input)
+#define OCFS2_IOC_GROUP_ADD64 _IOW('o', 3,struct ocfs2_new_group_input)
+
+/* Used to pass 2 file names to reflink. */
+struct reflink_arguments {
+ __u64 old_path;
+ __u64 new_path;
+ __u64 preserve;
+};
+#define OCFS2_IOC_REFLINK _IOW('o', 4, struct reflink_arguments)
+
+#endif /* OCFS2_IOCTL_H */
diff --git a/fs/ocfs2/ocfs2_lockingver.h b/fs/ocfs2/ocfs2_lockingver.h
index 82d5eeac0ff..2e45c8d2ea7 100644
--- a/fs/ocfs2/ocfs2_lockingver.h
+++ b/fs/ocfs2/ocfs2_lockingver.h
@@ -23,6 +23,8 @@
/*
* The protocol version for ocfs2 cluster locking. See dlmglue.c for
* more details.
+ *
+ * 1.0 - Initial locking version from ocfs2 1.4.
*/
#define OCFS2_LOCKING_PROTOCOL_MAJOR 1
#define OCFS2_LOCKING_PROTOCOL_MINOR 0
diff --git a/fs/ocfs2/refcounttree.c b/fs/ocfs2/refcounttree.c
index 8ae65c9c020..fb6aa7acf54 100644
--- a/fs/ocfs2/refcounttree.c
+++ b/fs/ocfs2/refcounttree.c
@@ -626,7 +626,7 @@ static int ocfs2_create_refcount_tree(struct inode *inode,
rb = (struct ocfs2_refcount_block *)new_bh->b_data;
memset(rb, 0, inode->i_sb->s_blocksize);
strcpy((void *)rb, OCFS2_REFCOUNT_BLOCK_SIGNATURE);
- rb->rf_suballoc_slot = cpu_to_le16(osb->slot_num);
+ rb->rf_suballoc_slot = cpu_to_le16(meta_ac->ac_alloc_slot);
rb->rf_suballoc_bit = cpu_to_le16(suballoc_bit_start);
rb->rf_fs_generation = cpu_to_le32(osb->fs_generation);
rb->rf_blkno = cpu_to_le64(first_blkno);
@@ -1330,7 +1330,7 @@ static int ocfs2_expand_inline_ref_root(handle_t *handle,
memcpy(new_bh->b_data, ref_root_bh->b_data, sb->s_blocksize);
new_rb = (struct ocfs2_refcount_block *)new_bh->b_data;
- new_rb->rf_suballoc_slot = cpu_to_le16(OCFS2_SB(sb)->slot_num);
+ new_rb->rf_suballoc_slot = cpu_to_le16(meta_ac->ac_alloc_slot);
new_rb->rf_suballoc_bit = cpu_to_le16(suballoc_bit_start);
new_rb->rf_blkno = cpu_to_le64(blkno);
new_rb->rf_cpos = cpu_to_le32(0);
@@ -1576,7 +1576,7 @@ static int ocfs2_new_leaf_refcount_block(handle_t *handle,
new_rb = (struct ocfs2_refcount_block *)new_bh->b_data;
memset(new_rb, 0, sb->s_blocksize);
strcpy((void *)new_rb, OCFS2_REFCOUNT_BLOCK_SIGNATURE);
- new_rb->rf_suballoc_slot = cpu_to_le16(OCFS2_SB(sb)->slot_num);
+ new_rb->rf_suballoc_slot = cpu_to_le16(meta_ac->ac_alloc_slot);
new_rb->rf_suballoc_bit = cpu_to_le16(suballoc_bit_start);
new_rb->rf_fs_generation = cpu_to_le32(OCFS2_SB(sb)->fs_generation);
new_rb->rf_blkno = cpu_to_le64(blkno);
diff --git a/fs/ocfs2/stack_o2cb.c b/fs/ocfs2/stack_o2cb.c
index 3038c92af49..7020e1253ff 100644
--- a/fs/ocfs2/stack_o2cb.c
+++ b/fs/ocfs2/stack_o2cb.c
@@ -161,24 +161,23 @@ static int dlm_status_to_errno(enum dlm_status status)
static void o2dlm_lock_ast_wrapper(void *astarg)
{
- BUG_ON(o2cb_stack.sp_proto == NULL);
+ struct ocfs2_dlm_lksb *lksb = astarg;
- o2cb_stack.sp_proto->lp_lock_ast(astarg);
+ lksb->lksb_conn->cc_proto->lp_lock_ast(lksb);
}
static void o2dlm_blocking_ast_wrapper(void *astarg, int level)
{
- BUG_ON(o2cb_stack.sp_proto == NULL);
+ struct ocfs2_dlm_lksb *lksb = astarg;
- o2cb_stack.sp_proto->lp_blocking_ast(astarg, level);
+ lksb->lksb_conn->cc_proto->lp_blocking_ast(lksb, level);
}
static void o2dlm_unlock_ast_wrapper(void *astarg, enum dlm_status status)
{
+ struct ocfs2_dlm_lksb *lksb = astarg;
int error = dlm_status_to_errno(status);
- BUG_ON(o2cb_stack.sp_proto == NULL);
-
/*
* In o2dlm, you can get both the lock_ast() for the lock being
* granted and the unlock_ast() for the CANCEL failing. A
@@ -193,16 +192,15 @@ static void o2dlm_unlock_ast_wrapper(void *astarg, enum dlm_status status)
if (status == DLM_CANCELGRANT)
return;
- o2cb_stack.sp_proto->lp_unlock_ast(astarg, error);
+ lksb->lksb_conn->cc_proto->lp_unlock_ast(lksb, error);
}
static int o2cb_dlm_lock(struct ocfs2_cluster_connection *conn,
int mode,
- union ocfs2_dlm_lksb *lksb,
+ struct ocfs2_dlm_lksb *lksb,
u32 flags,
void *name,
- unsigned int namelen,
- void *astarg)
+ unsigned int namelen)
{
enum dlm_status status;
int o2dlm_mode = mode_to_o2dlm(mode);
@@ -211,28 +209,27 @@ static int o2cb_dlm_lock(struct ocfs2_cluster_connection *conn,
status = dlmlock(conn->cc_lockspace, o2dlm_mode, &lksb->lksb_o2dlm,
o2dlm_flags, name, namelen,
- o2dlm_lock_ast_wrapper, astarg,
+ o2dlm_lock_ast_wrapper, lksb,
o2dlm_blocking_ast_wrapper);
ret = dlm_status_to_errno(status);
return ret;
}
static int o2cb_dlm_unlock(struct ocfs2_cluster_connection *conn,
- union ocfs2_dlm_lksb *lksb,
- u32 flags,
- void *astarg)
+ struct ocfs2_dlm_lksb *lksb,
+ u32 flags)
{
enum dlm_status status;
int o2dlm_flags = flags_to_o2dlm(flags);
int ret;
status = dlmunlock(conn->cc_lockspace, &lksb->lksb_o2dlm,
- o2dlm_flags, o2dlm_unlock_ast_wrapper, astarg);
+ o2dlm_flags, o2dlm_unlock_ast_wrapper, lksb);
ret = dlm_status_to_errno(status);
return ret;
}
-static int o2cb_dlm_lock_status(union ocfs2_dlm_lksb *lksb)
+static int o2cb_dlm_lock_status(struct ocfs2_dlm_lksb *lksb)
{
return dlm_status_to_errno(lksb->lksb_o2dlm.status);
}
@@ -242,17 +239,17 @@ static int o2cb_dlm_lock_status(union ocfs2_dlm_lksb *lksb)
* contents, it will zero out the LVB. Thus the caller can always trust
* the contents.
*/
-static int o2cb_dlm_lvb_valid(union ocfs2_dlm_lksb *lksb)
+static int o2cb_dlm_lvb_valid(struct ocfs2_dlm_lksb *lksb)
{
return 1;
}
-static void *o2cb_dlm_lvb(union ocfs2_dlm_lksb *lksb)
+static void *o2cb_dlm_lvb(struct ocfs2_dlm_lksb *lksb)
{
return (void *)(lksb->lksb_o2dlm.lvb);
}
-static void o2cb_dump_lksb(union ocfs2_dlm_lksb *lksb)
+static void o2cb_dump_lksb(struct ocfs2_dlm_lksb *lksb)
{
dlm_print_one_lock(lksb->lksb_o2dlm.lockid);
}
@@ -280,7 +277,7 @@ static int o2cb_cluster_connect(struct ocfs2_cluster_connection *conn)
struct dlm_protocol_version fs_version;
BUG_ON(conn == NULL);
- BUG_ON(o2cb_stack.sp_proto == NULL);
+ BUG_ON(conn->cc_proto == NULL);
/* for now we only have one cluster/node, make sure we see it
* in the heartbeat universe */
diff --git a/fs/ocfs2/stack_user.c b/fs/ocfs2/stack_user.c
index da78a2a334f..5ae8812b286 100644
--- a/fs/ocfs2/stack_user.c
+++ b/fs/ocfs2/stack_user.c
@@ -25,7 +25,6 @@
#include <linux/reboot.h>
#include <asm/uaccess.h>
-#include "ocfs2.h" /* For struct ocfs2_lock_res */
#include "stackglue.h"
#include <linux/dlm_plock.h>
@@ -63,8 +62,8 @@
* negotiated by the client. The client negotiates based on the maximum
* version advertised in /sys/fs/ocfs2/max_locking_protocol. The major
* number from the "SETV" message must match
- * ocfs2_user_plugin.sp_proto->lp_max_version.pv_major, and the minor number
- * must be less than or equal to ...->lp_max_version.pv_minor.
+ * ocfs2_user_plugin.sp_max_proto.pv_major, and the minor number
+ * must be less than or equal to ...sp_max_version.pv_minor.
*
* Once this information has been set, mounts will be allowed. From this
* point on, the "DOWN" message can be sent for node down notification.
@@ -401,7 +400,7 @@ static int ocfs2_control_do_setversion_msg(struct file *file,
char *ptr = NULL;
struct ocfs2_control_private *p = file->private_data;
struct ocfs2_protocol_version *max =
- &ocfs2_user_plugin.sp_proto->lp_max_version;
+ &ocfs2_user_plugin.sp_max_proto;
if (ocfs2_control_get_handshake_state(file) !=
OCFS2_CONTROL_HANDSHAKE_PROTOCOL)
@@ -664,18 +663,10 @@ static void ocfs2_control_exit(void)
-rc);
}
-static struct dlm_lksb *fsdlm_astarg_to_lksb(void *astarg)
-{
- struct ocfs2_lock_res *res = astarg;
- return &res->l_lksb.lksb_fsdlm;
-}
-
static void fsdlm_lock_ast_wrapper(void *astarg)
{
- struct dlm_lksb *lksb = fsdlm_astarg_to_lksb(astarg);
- int status = lksb->sb_status;
-
- BUG_ON(ocfs2_user_plugin.sp_proto == NULL);
+ struct ocfs2_dlm_lksb *lksb = astarg;
+ int status = lksb->lksb_fsdlm.sb_status;
/*
* For now we're punting on the issue of other non-standard errors
@@ -688,25 +679,24 @@ static void fsdlm_lock_ast_wrapper(void *astarg)
*/
if (status == -DLM_EUNLOCK || status == -DLM_ECANCEL)
- ocfs2_user_plugin.sp_proto->lp_unlock_ast(astarg, 0);
+ lksb->lksb_conn->cc_proto->lp_unlock_ast(lksb, 0);
else
- ocfs2_user_plugin.sp_proto->lp_lock_ast(astarg);
+ lksb->lksb_conn->cc_proto->lp_lock_ast(lksb);
}
static void fsdlm_blocking_ast_wrapper(void *astarg, int level)
{
- BUG_ON(ocfs2_user_plugin.sp_proto == NULL);
+ struct ocfs2_dlm_lksb *lksb = astarg;
- ocfs2_user_plugin.sp_proto->lp_blocking_ast(astarg, level);
+ lksb->lksb_conn->cc_proto->lp_blocking_ast(lksb, level);
}
static int user_dlm_lock(struct ocfs2_cluster_connection *conn,
int mode,
- union ocfs2_dlm_lksb *lksb,
+ struct ocfs2_dlm_lksb *lksb,
u32 flags,
void *name,
- unsigned int namelen,
- void *astarg)
+ unsigned int namelen)
{
int ret;
@@ -716,36 +706,35 @@ static int user_dlm_lock(struct ocfs2_cluster_connection *conn,
ret = dlm_lock(conn->cc_lockspace, mode, &lksb->lksb_fsdlm,
flags|DLM_LKF_NODLCKWT, name, namelen, 0,
- fsdlm_lock_ast_wrapper, astarg,
+ fsdlm_lock_ast_wrapper, lksb,
fsdlm_blocking_ast_wrapper);
return ret;
}
static int user_dlm_unlock(struct ocfs2_cluster_connection *conn,
- union ocfs2_dlm_lksb *lksb,
- u32 flags,
- void *astarg)
+ struct ocfs2_dlm_lksb *lksb,
+ u32 flags)
{
int ret;
ret = dlm_unlock(conn->cc_lockspace, lksb->lksb_fsdlm.sb_lkid,
- flags, &lksb->lksb_fsdlm, astarg);
+ flags, &lksb->lksb_fsdlm, lksb);
return ret;
}
-static int user_dlm_lock_status(union ocfs2_dlm_lksb *lksb)
+static int user_dlm_lock_status(struct ocfs2_dlm_lksb *lksb)
{
return lksb->lksb_fsdlm.sb_status;
}
-static int user_dlm_lvb_valid(union ocfs2_dlm_lksb *lksb)
+static int user_dlm_lvb_valid(struct ocfs2_dlm_lksb *lksb)
{
int invalid = lksb->lksb_fsdlm.sb_flags & DLM_SBF_VALNOTVALID;
return !invalid;
}
-static void *user_dlm_lvb(union ocfs2_dlm_lksb *lksb)
+static void *user_dlm_lvb(struct ocfs2_dlm_lksb *lksb)
{
if (!lksb->lksb_fsdlm.sb_lvbptr)
lksb->lksb_fsdlm.sb_lvbptr = (char *)lksb +
@@ -753,7 +742,7 @@ static void *user_dlm_lvb(union ocfs2_dlm_lksb *lksb)
return (void *)(lksb->lksb_fsdlm.sb_lvbptr);
}
-static void user_dlm_dump_lksb(union ocfs2_dlm_lksb *lksb)
+static void user_dlm_dump_lksb(struct ocfs2_dlm_lksb *lksb)
{
}
diff --git a/fs/ocfs2/stackglue.c b/fs/ocfs2/stackglue.c
index f3df0baa9a4..39abf89697e 100644
--- a/fs/ocfs2/stackglue.c
+++ b/fs/ocfs2/stackglue.c
@@ -36,7 +36,7 @@
#define OCFS2_STACK_PLUGIN_USER "user"
#define OCFS2_MAX_HB_CTL_PATH 256
-static struct ocfs2_locking_protocol *lproto;
+static struct ocfs2_protocol_version locking_max_version;
static DEFINE_SPINLOCK(ocfs2_stack_lock);
static LIST_HEAD(ocfs2_stack_list);
static char cluster_stack_name[OCFS2_STACK_LABEL_LEN + 1];
@@ -176,7 +176,7 @@ int ocfs2_stack_glue_register(struct ocfs2_stack_plugin *plugin)
spin_lock(&ocfs2_stack_lock);
if (!ocfs2_stack_lookup(plugin->sp_name)) {
plugin->sp_count = 0;
- plugin->sp_proto = lproto;
+ plugin->sp_max_proto = locking_max_version;
list_add(&plugin->sp_list, &ocfs2_stack_list);
printk(KERN_INFO "ocfs2: Registered cluster interface %s\n",
plugin->sp_name);
@@ -213,77 +213,76 @@ void ocfs2_stack_glue_unregister(struct ocfs2_stack_plugin *plugin)
}
EXPORT_SYMBOL_GPL(ocfs2_stack_glue_unregister);
-void ocfs2_stack_glue_set_locking_protocol(struct ocfs2_locking_protocol *proto)
+void ocfs2_stack_glue_set_max_proto_version(struct ocfs2_protocol_version *max_proto)
{
struct ocfs2_stack_plugin *p;
- BUG_ON(proto == NULL);
-
spin_lock(&ocfs2_stack_lock);
- BUG_ON(active_stack != NULL);
+ if (memcmp(max_proto, &locking_max_version,
+ sizeof(struct ocfs2_protocol_version))) {
+ BUG_ON(locking_max_version.pv_major != 0);
- lproto = proto;
- list_for_each_entry(p, &ocfs2_stack_list, sp_list) {
- p->sp_proto = lproto;
+ locking_max_version = *max_proto;
+ list_for_each_entry(p, &ocfs2_stack_list, sp_list) {
+ p->sp_max_proto = locking_max_version;
+ }
}
-
spin_unlock(&ocfs2_stack_lock);
}
-EXPORT_SYMBOL_GPL(ocfs2_stack_glue_set_locking_protocol);
+EXPORT_SYMBOL_GPL(ocfs2_stack_glue_set_max_proto_version);
/*
- * The ocfs2_dlm_lock() and ocfs2_dlm_unlock() functions take
- * "struct ocfs2_lock_res *astarg" instead of "void *astarg" because the
- * underlying stack plugins need to pilfer the lksb off of the lock_res.
- * If some other structure needs to be passed as an astarg, the plugins
- * will need to be given a different avenue to the lksb.
+ * The ocfs2_dlm_lock() and ocfs2_dlm_unlock() functions take no argument
+ * for the ast and bast functions. They will pass the lksb to the ast
+ * and bast. The caller can wrap the lksb with their own structure to
+ * get more information.
*/
int ocfs2_dlm_lock(struct ocfs2_cluster_connection *conn,
int mode,
- union ocfs2_dlm_lksb *lksb,
+ struct ocfs2_dlm_lksb *lksb,
u32 flags,
void *name,
- unsigned int namelen,
- struct ocfs2_lock_res *astarg)
+ unsigned int namelen)
{
- BUG_ON(lproto == NULL);
-
+ if (!lksb->lksb_conn)
+ lksb->lksb_conn = conn;
+ else
+ BUG_ON(lksb->lksb_conn != conn);
return active_stack->sp_ops->dlm_lock(conn, mode, lksb, flags,
- name, namelen, astarg);
+ name, namelen);
}
EXPORT_SYMBOL_GPL(ocfs2_dlm_lock);
int ocfs2_dlm_unlock(struct ocfs2_cluster_connection *conn,
- union ocfs2_dlm_lksb *lksb,
- u32 flags,
- struct ocfs2_lock_res *astarg)
+ struct ocfs2_dlm_lksb *lksb,
+ u32 flags)
{
- BUG_ON(lproto == NULL);
+ BUG_ON(lksb->lksb_conn == NULL);
- return active_stack->sp_ops->dlm_unlock(conn, lksb, flags, astarg);
+ return active_stack->sp_ops->dlm_unlock(conn, lksb, flags);
}
EXPORT_SYMBOL_GPL(ocfs2_dlm_unlock);
-int ocfs2_dlm_lock_status(union ocfs2_dlm_lksb *lksb)
+int ocfs2_dlm_lock_status(struct ocfs2_dlm_lksb *lksb)
{
return active_stack->sp_ops->lock_status(lksb);
}
EXPORT_SYMBOL_GPL(ocfs2_dlm_lock_status);
-int ocfs2_dlm_lvb_valid(union ocfs2_dlm_lksb *lksb)
+int ocfs2_dlm_lvb_valid(struct ocfs2_dlm_lksb *lksb)
{
return active_stack->sp_ops->lvb_valid(lksb);
}
EXPORT_SYMBOL_GPL(ocfs2_dlm_lvb_valid);
-void *ocfs2_dlm_lvb(union ocfs2_dlm_lksb *lksb)
+void *ocfs2_dlm_lvb(struct ocfs2_dlm_lksb *lksb)
{
return active_stack->sp_ops->lock_lvb(lksb);
}
EXPORT_SYMBOL_GPL(ocfs2_dlm_lvb);
-void ocfs2_dlm_dump_lksb(union ocfs2_dlm_lksb *lksb)
+void ocfs2_dlm_dump_lksb(struct ocfs2_dlm_lksb *lksb)
{
active_stack->sp_ops->dump_lksb(lksb);
}
@@ -312,6 +311,7 @@ EXPORT_SYMBOL_GPL(ocfs2_plock);
int ocfs2_cluster_connect(const char *stack_name,
const char *group,
int grouplen,
+ struct ocfs2_locking_protocol *lproto,
void (*recovery_handler)(int node_num,
void *recovery_data),
void *recovery_data,
@@ -329,6 +329,12 @@ int ocfs2_cluster_connect(const char *stack_name,
goto out;
}
+ if (memcmp(&lproto->lp_max_version, &locking_max_version,
+ sizeof(struct ocfs2_protocol_version))) {
+ rc = -EINVAL;
+ goto out;
+ }
+
new_conn = kzalloc(sizeof(struct ocfs2_cluster_connection),
GFP_KERNEL);
if (!new_conn) {
@@ -341,6 +347,7 @@ int ocfs2_cluster_connect(const char *stack_name,
new_conn->cc_recovery_handler = recovery_handler;
new_conn->cc_recovery_data = recovery_data;
+ new_conn->cc_proto = lproto;
/* Start the new connection at our maximum compatibility level */
new_conn->cc_version = lproto->lp_max_version;
@@ -366,6 +373,24 @@ out:
}
EXPORT_SYMBOL_GPL(ocfs2_cluster_connect);
+/* The caller will ensure all nodes have the same cluster stack */
+int ocfs2_cluster_connect_agnostic(const char *group,
+ int grouplen,
+ struct ocfs2_locking_protocol *lproto,
+ void (*recovery_handler)(int node_num,
+ void *recovery_data),
+ void *recovery_data,
+ struct ocfs2_cluster_connection **conn)
+{
+ char *stack_name = NULL;
+
+ if (cluster_stack_name[0])
+ stack_name = cluster_stack_name;
+ return ocfs2_cluster_connect(stack_name, group, grouplen, lproto,
+ recovery_handler, recovery_data, conn);
+}
+EXPORT_SYMBOL_GPL(ocfs2_cluster_connect_agnostic);
+
/* If hangup_pending is 0, the stack driver will be dropped */
int ocfs2_cluster_disconnect(struct ocfs2_cluster_connection *conn,
int hangup_pending)
@@ -453,10 +478,10 @@ static ssize_t ocfs2_max_locking_protocol_show(struct kobject *kobj,
ssize_t ret = 0;
spin_lock(&ocfs2_stack_lock);
- if (lproto)
+ if (locking_max_version.pv_major)
ret = snprintf(buf, PAGE_SIZE, "%u.%u\n",
- lproto->lp_max_version.pv_major,
- lproto->lp_max_version.pv_minor);
+ locking_max_version.pv_major,
+ locking_max_version.pv_minor);
spin_unlock(&ocfs2_stack_lock);
return ret;
@@ -685,7 +710,10 @@ static int __init ocfs2_stack_glue_init(void)
static void __exit ocfs2_stack_glue_exit(void)
{
- lproto = NULL;
+ memset(&locking_max_version, 0,
+ sizeof(struct ocfs2_protocol_version));
+ locking_max_version.pv_major = 0;
+ locking_max_version.pv_minor = 0;
ocfs2_sysfs_exit();
if (ocfs2_table_header)
unregister_sysctl_table(ocfs2_table_header);
diff --git a/fs/ocfs2/stackglue.h b/fs/ocfs2/stackglue.h
index 03a44d60eac..8ce7398ae1d 100644
--- a/fs/ocfs2/stackglue.h
+++ b/fs/ocfs2/stackglue.h
@@ -56,17 +56,6 @@ struct ocfs2_protocol_version {
};
/*
- * The ocfs2_locking_protocol defines the handlers called on ocfs2's behalf.
- */
-struct ocfs2_locking_protocol {
- struct ocfs2_protocol_version lp_max_version;
- void (*lp_lock_ast)(void *astarg);
- void (*lp_blocking_ast)(void *astarg, int level);
- void (*lp_unlock_ast)(void *astarg, int error);
-};
-
-
-/*
* The dlm_lockstatus struct includes lvb space, but the dlm_lksb struct only
* has a pointer to separately allocated lvb space. This struct exists only to
* include in the lksb union to make space for a combined dlm_lksb and lvb.
@@ -81,12 +70,27 @@ struct fsdlm_lksb_plus_lvb {
* size of the union is known. Lock status structures are embedded in
* ocfs2 inodes.
*/
-union ocfs2_dlm_lksb {
- struct dlm_lockstatus lksb_o2dlm;
- struct dlm_lksb lksb_fsdlm;
- struct fsdlm_lksb_plus_lvb padding;
+struct ocfs2_cluster_connection;
+struct ocfs2_dlm_lksb {
+ union {
+ struct dlm_lockstatus lksb_o2dlm;
+ struct dlm_lksb lksb_fsdlm;
+ struct fsdlm_lksb_plus_lvb padding;
+ };
+ struct ocfs2_cluster_connection *lksb_conn;
+};
+
+/*
+ * The ocfs2_locking_protocol defines the handlers called on ocfs2's behalf.
+ */
+struct ocfs2_locking_protocol {
+ struct ocfs2_protocol_version lp_max_version;
+ void (*lp_lock_ast)(struct ocfs2_dlm_lksb *lksb);
+ void (*lp_blocking_ast)(struct ocfs2_dlm_lksb *lksb, int level);
+ void (*lp_unlock_ast)(struct ocfs2_dlm_lksb *lksb, int error);
};
+
/*
* A cluster connection. Mostly opaque to ocfs2, the connection holds
* state for the underlying stack. ocfs2 does use cc_version to determine
@@ -96,6 +100,7 @@ struct ocfs2_cluster_connection {
char cc_name[GROUP_NAME_MAX];
int cc_namelen;
struct ocfs2_protocol_version cc_version;
+ struct ocfs2_locking_protocol *cc_proto;
void (*cc_recovery_handler)(int node_num, void *recovery_data);
void *cc_recovery_data;
void *cc_lockspace;
@@ -155,27 +160,29 @@ struct ocfs2_stack_operations {
*
* ast and bast functions are not part of the call because the
* stack will likely want to wrap ast and bast calls before passing
- * them to stack->sp_proto.
+ * them to stack->sp_proto. There is no astarg. The lksb will
+ * be passed back to the ast and bast functions. The caller can
+ * use this to find their object.
*/
int (*dlm_lock)(struct ocfs2_cluster_connection *conn,
int mode,
- union ocfs2_dlm_lksb *lksb,
+ struct ocfs2_dlm_lksb *lksb,
u32 flags,
void *name,
- unsigned int namelen,
- void *astarg);
+ unsigned int namelen);
/*
* Call the underlying dlm unlock function. The ->dlm_unlock()
* function should convert the flags as appropriate.
*
* The unlock ast is not passed, as the stack will want to wrap
- * it before calling stack->sp_proto->lp_unlock_ast().
+ * it before calling stack->sp_proto->lp_unlock_ast(). There is
+ * no astarg. The lksb will be passed back to the unlock ast
+ * function. The caller can use this to find their object.
*/
int (*dlm_unlock)(struct ocfs2_cluster_connection *conn,
- union ocfs2_dlm_lksb *lksb,
- u32 flags,
- void *astarg);
+ struct ocfs2_dlm_lksb *lksb,
+ u32 flags);
/*
* Return the status of the current lock status block. The fs
@@ -183,17 +190,17 @@ struct ocfs2_stack_operations {
* callback pulls out the stack-specific lksb, converts the status
* to a proper errno, and returns it.
*/
- int (*lock_status)(union ocfs2_dlm_lksb *lksb);
+ int (*lock_status)(struct ocfs2_dlm_lksb *lksb);
/*
* Return non-zero if the LVB is valid.
*/
- int (*lvb_valid)(union ocfs2_dlm_lksb *lksb);
+ int (*lvb_valid)(struct ocfs2_dlm_lksb *lksb);
/*
* Pull the lvb pointer off of the stack-specific lksb.
*/
- void *(*lock_lvb)(union ocfs2_dlm_lksb *lksb);
+ void *(*lock_lvb)(struct ocfs2_dlm_lksb *lksb);
/*
* Cluster-aware posix locks
@@ -210,7 +217,7 @@ struct ocfs2_stack_operations {
* This is an optoinal debugging hook. If provided, the
* stack can dump debugging information about this lock.
*/
- void (*dump_lksb)(union ocfs2_dlm_lksb *lksb);
+ void (*dump_lksb)(struct ocfs2_dlm_lksb *lksb);
};
/*
@@ -226,7 +233,7 @@ struct ocfs2_stack_plugin {
/* These are managed by the stackglue code. */
struct list_head sp_list;
unsigned int sp_count;
- struct ocfs2_locking_protocol *sp_proto;
+ struct ocfs2_protocol_version sp_max_proto;
};
@@ -234,10 +241,22 @@ struct ocfs2_stack_plugin {
int ocfs2_cluster_connect(const char *stack_name,
const char *group,
int grouplen,
+ struct ocfs2_locking_protocol *lproto,
void (*recovery_handler)(int node_num,
void *recovery_data),
void *recovery_data,
struct ocfs2_cluster_connection **conn);
+/*
+ * Used by callers that don't store their stack name. They must ensure
+ * all nodes have the same stack.
+ */
+int ocfs2_cluster_connect_agnostic(const char *group,
+ int grouplen,
+ struct ocfs2_locking_protocol *lproto,
+ void (*recovery_handler)(int node_num,
+ void *recovery_data),
+ void *recovery_data,
+ struct ocfs2_cluster_connection **conn);
int ocfs2_cluster_disconnect(struct ocfs2_cluster_connection *conn,
int hangup_pending);
void ocfs2_cluster_hangup(const char *group, int grouplen);
@@ -246,26 +265,24 @@ int ocfs2_cluster_this_node(unsigned int *node);
struct ocfs2_lock_res;
int ocfs2_dlm_lock(struct ocfs2_cluster_connection *conn,
int mode,
- union ocfs2_dlm_lksb *lksb,
+ struct ocfs2_dlm_lksb *lksb,
u32 flags,
void *name,
- unsigned int namelen,
- struct ocfs2_lock_res *astarg);
+ unsigned int namelen);
int ocfs2_dlm_unlock(struct ocfs2_cluster_connection *conn,
- union ocfs2_dlm_lksb *lksb,
- u32 flags,
- struct ocfs2_lock_res *astarg);
+ struct ocfs2_dlm_lksb *lksb,
+ u32 flags);
-int ocfs2_dlm_lock_status(union ocfs2_dlm_lksb *lksb);
-int ocfs2_dlm_lvb_valid(union ocfs2_dlm_lksb *lksb);
-void *ocfs2_dlm_lvb(union ocfs2_dlm_lksb *lksb);
-void ocfs2_dlm_dump_lksb(union ocfs2_dlm_lksb *lksb);
+int ocfs2_dlm_lock_status(struct ocfs2_dlm_lksb *lksb);
+int ocfs2_dlm_lvb_valid(struct ocfs2_dlm_lksb *lksb);
+void *ocfs2_dlm_lvb(struct ocfs2_dlm_lksb *lksb);
+void ocfs2_dlm_dump_lksb(struct ocfs2_dlm_lksb *lksb);
int ocfs2_stack_supports_plocks(void);
int ocfs2_plock(struct ocfs2_cluster_connection *conn, u64 ino,
struct file *file, int cmd, struct file_lock *fl);
-void ocfs2_stack_glue_set_locking_protocol(struct ocfs2_locking_protocol *proto);
+void ocfs2_stack_glue_set_max_proto_version(struct ocfs2_protocol_version *max_proto);
/* Used by stack plugins */
diff --git a/fs/ocfs2/suballoc.c b/fs/ocfs2/suballoc.c
index c30b644d957..c3c60bc3e07 100644
--- a/fs/ocfs2/suballoc.c
+++ b/fs/ocfs2/suballoc.c
@@ -51,7 +51,7 @@
#define ALLOC_NEW_GROUP 0x1
#define ALLOC_GROUPS_FROM_GLOBAL 0x2
-#define OCFS2_MAX_INODES_TO_STEAL 1024
+#define OCFS2_MAX_TO_STEAL 1024
static inline void ocfs2_debug_bg(struct ocfs2_group_desc *bg);
static inline void ocfs2_debug_suballoc_inode(struct ocfs2_dinode *fe);
@@ -637,12 +637,113 @@ bail:
return status;
}
+static void ocfs2_init_inode_steal_slot(struct ocfs2_super *osb)
+{
+ spin_lock(&osb->osb_lock);
+ osb->s_inode_steal_slot = OCFS2_INVALID_SLOT;
+ spin_unlock(&osb->osb_lock);
+ atomic_set(&osb->s_num_inodes_stolen, 0);
+}
+
+static void ocfs2_init_meta_steal_slot(struct ocfs2_super *osb)
+{
+ spin_lock(&osb->osb_lock);
+ osb->s_meta_steal_slot = OCFS2_INVALID_SLOT;
+ spin_unlock(&osb->osb_lock);
+ atomic_set(&osb->s_num_meta_stolen, 0);
+}
+
+void ocfs2_init_steal_slots(struct ocfs2_super *osb)
+{
+ ocfs2_init_inode_steal_slot(osb);
+ ocfs2_init_meta_steal_slot(osb);
+}
+
+static void __ocfs2_set_steal_slot(struct ocfs2_super *osb, int slot, int type)
+{
+ spin_lock(&osb->osb_lock);
+ if (type == INODE_ALLOC_SYSTEM_INODE)
+ osb->s_inode_steal_slot = slot;
+ else if (type == EXTENT_ALLOC_SYSTEM_INODE)
+ osb->s_meta_steal_slot = slot;
+ spin_unlock(&osb->osb_lock);
+}
+
+static int __ocfs2_get_steal_slot(struct ocfs2_super *osb, int type)
+{
+ int slot = OCFS2_INVALID_SLOT;
+
+ spin_lock(&osb->osb_lock);
+ if (type == INODE_ALLOC_SYSTEM_INODE)
+ slot = osb->s_inode_steal_slot;
+ else if (type == EXTENT_ALLOC_SYSTEM_INODE)
+ slot = osb->s_meta_steal_slot;
+ spin_unlock(&osb->osb_lock);
+
+ return slot;
+}
+
+static int ocfs2_get_inode_steal_slot(struct ocfs2_super *osb)
+{
+ return __ocfs2_get_steal_slot(osb, INODE_ALLOC_SYSTEM_INODE);
+}
+
+static int ocfs2_get_meta_steal_slot(struct ocfs2_super *osb)
+{
+ return __ocfs2_get_steal_slot(osb, EXTENT_ALLOC_SYSTEM_INODE);
+}
+
+static int ocfs2_steal_resource(struct ocfs2_super *osb,
+ struct ocfs2_alloc_context *ac,
+ int type)
+{
+ int i, status = -ENOSPC;
+ int slot = __ocfs2_get_steal_slot(osb, type);
+
+ /* Start to steal resource from the first slot after ours. */
+ if (slot == OCFS2_INVALID_SLOT)
+ slot = osb->slot_num + 1;
+
+ for (i = 0; i < osb->max_slots; i++, slot++) {
+ if (slot == osb->max_slots)
+ slot = 0;
+
+ if (slot == osb->slot_num)
+ continue;
+
+ status = ocfs2_reserve_suballoc_bits(osb, ac,
+ type,
+ (u32)slot, NULL,
+ NOT_ALLOC_NEW_GROUP);
+ if (status >= 0) {
+ __ocfs2_set_steal_slot(osb, slot, type);
+ break;
+ }
+
+ ocfs2_free_ac_resource(ac);
+ }
+
+ return status;
+}
+
+static int ocfs2_steal_inode(struct ocfs2_super *osb,
+ struct ocfs2_alloc_context *ac)
+{
+ return ocfs2_steal_resource(osb, ac, INODE_ALLOC_SYSTEM_INODE);
+}
+
+static int ocfs2_steal_meta(struct ocfs2_super *osb,
+ struct ocfs2_alloc_context *ac)
+{
+ return ocfs2_steal_resource(osb, ac, EXTENT_ALLOC_SYSTEM_INODE);
+}
+
int ocfs2_reserve_new_metadata_blocks(struct ocfs2_super *osb,
int blocks,
struct ocfs2_alloc_context **ac)
{
int status;
- u32 slot;
+ int slot = ocfs2_get_meta_steal_slot(osb);
*ac = kzalloc(sizeof(struct ocfs2_alloc_context), GFP_KERNEL);
if (!(*ac)) {
@@ -653,12 +754,34 @@ int ocfs2_reserve_new_metadata_blocks(struct ocfs2_super *osb,
(*ac)->ac_bits_wanted = blocks;
(*ac)->ac_which = OCFS2_AC_USE_META;
- slot = osb->slot_num;
(*ac)->ac_group_search = ocfs2_block_group_search;
+ if (slot != OCFS2_INVALID_SLOT &&
+ atomic_read(&osb->s_num_meta_stolen) < OCFS2_MAX_TO_STEAL)
+ goto extent_steal;
+
+ atomic_set(&osb->s_num_meta_stolen, 0);
status = ocfs2_reserve_suballoc_bits(osb, (*ac),
EXTENT_ALLOC_SYSTEM_INODE,
- slot, NULL, ALLOC_NEW_GROUP);
+ (u32)osb->slot_num, NULL,
+ ALLOC_NEW_GROUP);
+
+
+ if (status >= 0) {
+ status = 0;
+ if (slot != OCFS2_INVALID_SLOT)
+ ocfs2_init_meta_steal_slot(osb);
+ goto bail;
+ } else if (status < 0 && status != -ENOSPC) {
+ mlog_errno(status);
+ goto bail;
+ }
+
+ ocfs2_free_ac_resource(*ac);
+
+extent_steal:
+ status = ocfs2_steal_meta(osb, *ac);
+ atomic_inc(&osb->s_num_meta_stolen);
if (status < 0) {
if (status != -ENOSPC)
mlog_errno(status);
@@ -685,43 +808,11 @@ int ocfs2_reserve_new_metadata(struct ocfs2_super *osb,
ac);
}
-static int ocfs2_steal_inode_from_other_nodes(struct ocfs2_super *osb,
- struct ocfs2_alloc_context *ac)
-{
- int i, status = -ENOSPC;
- s16 slot = ocfs2_get_inode_steal_slot(osb);
-
- /* Start to steal inodes from the first slot after ours. */
- if (slot == OCFS2_INVALID_SLOT)
- slot = osb->slot_num + 1;
-
- for (i = 0; i < osb->max_slots; i++, slot++) {
- if (slot == osb->max_slots)
- slot = 0;
-
- if (slot == osb->slot_num)
- continue;
-
- status = ocfs2_reserve_suballoc_bits(osb, ac,
- INODE_ALLOC_SYSTEM_INODE,
- slot, NULL,
- NOT_ALLOC_NEW_GROUP);
- if (status >= 0) {
- ocfs2_set_inode_steal_slot(osb, slot);
- break;
- }
-
- ocfs2_free_ac_resource(ac);
- }
-
- return status;
-}
-
int ocfs2_reserve_new_inode(struct ocfs2_super *osb,
struct ocfs2_alloc_context **ac)
{
int status;
- s16 slot = ocfs2_get_inode_steal_slot(osb);
+ int slot = ocfs2_get_inode_steal_slot(osb);
u64 alloc_group;
*ac = kzalloc(sizeof(struct ocfs2_alloc_context), GFP_KERNEL);
@@ -754,14 +845,14 @@ int ocfs2_reserve_new_inode(struct ocfs2_super *osb,
* need to check our slots to see whether there is some space for us.
*/
if (slot != OCFS2_INVALID_SLOT &&
- atomic_read(&osb->s_num_inodes_stolen) < OCFS2_MAX_INODES_TO_STEAL)
+ atomic_read(&osb->s_num_inodes_stolen) < OCFS2_MAX_TO_STEAL)
goto inode_steal;
atomic_set(&osb->s_num_inodes_stolen, 0);
alloc_group = osb->osb_inode_alloc_group;
status = ocfs2_reserve_suballoc_bits(osb, *ac,
INODE_ALLOC_SYSTEM_INODE,
- osb->slot_num,
+ (u32)osb->slot_num,
&alloc_group,
ALLOC_NEW_GROUP |
ALLOC_GROUPS_FROM_GLOBAL);
@@ -789,7 +880,7 @@ int ocfs2_reserve_new_inode(struct ocfs2_super *osb,
ocfs2_free_ac_resource(*ac);
inode_steal:
- status = ocfs2_steal_inode_from_other_nodes(osb, *ac);
+ status = ocfs2_steal_inode(osb, *ac);
atomic_inc(&osb->s_num_inodes_stolen);
if (status < 0) {
if (status != -ENOSPC)
diff --git a/fs/ocfs2/suballoc.h b/fs/ocfs2/suballoc.h
index 8c9a78a4316..fa60723c43e 100644
--- a/fs/ocfs2/suballoc.h
+++ b/fs/ocfs2/suballoc.h
@@ -56,6 +56,7 @@ struct ocfs2_alloc_context {
is the same as ~0 - unlimited */
};
+void ocfs2_init_steal_slots(struct ocfs2_super *osb);
void ocfs2_free_alloc_context(struct ocfs2_alloc_context *ac);
static inline int ocfs2_alloc_context_bits_left(struct ocfs2_alloc_context *ac)
{
diff --git a/fs/ocfs2/super.c b/fs/ocfs2/super.c
index 755cd49a5ef..dee03197a49 100644
--- a/fs/ocfs2/super.c
+++ b/fs/ocfs2/super.c
@@ -69,6 +69,7 @@
#include "xattr.h"
#include "quota.h"
#include "refcounttree.h"
+#include "suballoc.h"
#include "buffer_head_io.h"
@@ -301,9 +302,12 @@ static int ocfs2_osb_dump(struct ocfs2_super *osb, char *buf, int len)
spin_lock(&osb->osb_lock);
out += snprintf(buf + out, len - out,
- "%10s => Slot: %d NumStolen: %d\n", "Steal",
+ "%10s => InodeSlot: %d StolenInodes: %d, "
+ "MetaSlot: %d StolenMeta: %d\n", "Steal",
osb->s_inode_steal_slot,
- atomic_read(&osb->s_num_inodes_stolen));
+ atomic_read(&osb->s_num_inodes_stolen),
+ osb->s_meta_steal_slot,
+ atomic_read(&osb->s_num_meta_stolen));
spin_unlock(&osb->osb_lock);
out += snprintf(buf + out, len - out, "OrphanScan => ");
@@ -1997,7 +2001,7 @@ static int ocfs2_initialize_super(struct super_block *sb,
osb->blocked_lock_count = 0;
spin_lock_init(&osb->osb_lock);
spin_lock_init(&osb->osb_xattr_lock);
- ocfs2_init_inode_steal_slot(osb);
+ ocfs2_init_steal_slots(osb);
atomic_set(&osb->alloc_stats.moves, 0);
atomic_set(&osb->alloc_stats.local_data, 0);
diff --git a/fs/ocfs2/xattr.c b/fs/ocfs2/xattr.c
index 8fc6fb071c6..d1b0d386f6d 100644
--- a/fs/ocfs2/xattr.c
+++ b/fs/ocfs2/xattr.c
@@ -116,10 +116,11 @@ static struct xattr_handler *ocfs2_xattr_handler_map[OCFS2_XATTR_MAX] = {
};
struct ocfs2_xattr_info {
- int name_index;
- const char *name;
- const void *value;
- size_t value_len;
+ int xi_name_index;
+ const char *xi_name;
+ int xi_name_len;
+ const void *xi_value;
+ size_t xi_value_len;
};
struct ocfs2_xattr_search {
@@ -137,6 +138,115 @@ struct ocfs2_xattr_search {
int not_found;
};
+/* Operations on struct ocfs2_xa_entry */
+struct ocfs2_xa_loc;
+struct ocfs2_xa_loc_operations {
+ /*
+ * Journal functions
+ */
+ int (*xlo_journal_access)(handle_t *handle, struct ocfs2_xa_loc *loc,
+ int type);
+ void (*xlo_journal_dirty)(handle_t *handle, struct ocfs2_xa_loc *loc);
+
+ /*
+ * Return a pointer to the appropriate buffer in loc->xl_storage
+ * at the given offset from loc->xl_header.
+ */
+ void *(*xlo_offset_pointer)(struct ocfs2_xa_loc *loc, int offset);
+
+ /* Can we reuse the existing entry for the new value? */
+ int (*xlo_can_reuse)(struct ocfs2_xa_loc *loc,
+ struct ocfs2_xattr_info *xi);
+
+ /* How much space is needed for the new value? */
+ int (*xlo_check_space)(struct ocfs2_xa_loc *loc,
+ struct ocfs2_xattr_info *xi);
+
+ /*
+ * Return the offset of the first name+value pair. This is
+ * the start of our downward-filling free space.
+ */
+ int (*xlo_get_free_start)(struct ocfs2_xa_loc *loc);
+
+ /*
+ * Remove the name+value at this location. Do whatever is
+ * appropriate with the remaining name+value pairs.
+ */
+ void (*xlo_wipe_namevalue)(struct ocfs2_xa_loc *loc);
+
+ /* Fill xl_entry with a new entry */
+ void (*xlo_add_entry)(struct ocfs2_xa_loc *loc, u32 name_hash);
+
+ /* Add name+value storage to an entry */
+ void (*xlo_add_namevalue)(struct ocfs2_xa_loc *loc, int size);
+
+ /*
+ * Initialize the value buf's access and bh fields for this entry.
+ * ocfs2_xa_fill_value_buf() will handle the xv pointer.
+ */
+ void (*xlo_fill_value_buf)(struct ocfs2_xa_loc *loc,
+ struct ocfs2_xattr_value_buf *vb);
+};
+
+/*
+ * Describes an xattr entry location. This is a memory structure
+ * tracking the on-disk structure.
+ */
+struct ocfs2_xa_loc {
+ /* This xattr belongs to this inode */
+ struct inode *xl_inode;
+
+ /* The ocfs2_xattr_header inside the on-disk storage. Not NULL. */
+ struct ocfs2_xattr_header *xl_header;
+
+ /* Bytes from xl_header to the end of the storage */
+ int xl_size;
+
+ /*
+ * The ocfs2_xattr_entry this location describes. If this is
+ * NULL, this location describes the on-disk structure where it
+ * would have been.
+ */
+ struct ocfs2_xattr_entry *xl_entry;
+
+ /*
+ * Internal housekeeping
+ */
+
+ /* Buffer(s) containing this entry */
+ void *xl_storage;
+
+ /* Operations on the storage backing this location */
+ const struct ocfs2_xa_loc_operations *xl_ops;
+};
+
+/*
+ * Convenience functions to calculate how much space is needed for a
+ * given name+value pair
+ */
+static int namevalue_size(int name_len, uint64_t value_len)
+{
+ if (value_len > OCFS2_XATTR_INLINE_SIZE)
+ return OCFS2_XATTR_SIZE(name_len) + OCFS2_XATTR_ROOT_SIZE;
+ else
+ return OCFS2_XATTR_SIZE(name_len) + OCFS2_XATTR_SIZE(value_len);
+}
+
+static int namevalue_size_xi(struct ocfs2_xattr_info *xi)
+{
+ return namevalue_size(xi->xi_name_len, xi->xi_value_len);
+}
+
+static int namevalue_size_xe(struct ocfs2_xattr_entry *xe)
+{
+ u64 value_len = le64_to_cpu(xe->xe_value_size);
+
+ BUG_ON((value_len > OCFS2_XATTR_INLINE_SIZE) &&
+ ocfs2_xattr_is_local(xe));
+ return namevalue_size(xe->xe_name_len, value_len);
+}
+
+
static int ocfs2_xattr_bucket_get_name_value(struct super_block *sb,
struct ocfs2_xattr_header *xh,
int index,
@@ -212,14 +322,6 @@ static inline u16 ocfs2_blocks_per_xattr_bucket(struct super_block *sb)
return OCFS2_XATTR_BUCKET_SIZE / (1 << sb->s_blocksize_bits);
}
-static inline u16 ocfs2_xattr_max_xe_in_bucket(struct super_block *sb)
-{
- u16 len = sb->s_blocksize -
- offsetof(struct ocfs2_xattr_header, xh_entries);
-
- return len / sizeof(struct ocfs2_xattr_entry);
-}
-
#define bucket_blkno(_b) ((_b)->bu_bhs[0]->b_blocknr)
#define bucket_block(_b, _n) ((_b)->bu_bhs[(_n)]->b_data)
#define bucket_xh(_b) ((struct ocfs2_xattr_header *)bucket_block((_b), 0))
@@ -463,35 +565,22 @@ static u32 ocfs2_xattr_name_hash(struct inode *inode,
return hash;
}
-/*
- * ocfs2_xattr_hash_entry()
- *
- * Compute the hash of an extended attribute.
- */
-static void ocfs2_xattr_hash_entry(struct inode *inode,
- struct ocfs2_xattr_header *header,
- struct ocfs2_xattr_entry *entry)
+static int ocfs2_xattr_entry_real_size(int name_len, size_t value_len)
{
- u32 hash = 0;
- char *name = (char *)header + le16_to_cpu(entry->xe_name_offset);
-
- hash = ocfs2_xattr_name_hash(inode, name, entry->xe_name_len);
- entry->xe_name_hash = cpu_to_le32(hash);
-
- return;
+ return namevalue_size(name_len, value_len) +
+ sizeof(struct ocfs2_xattr_entry);
}
-static int ocfs2_xattr_entry_real_size(int name_len, size_t value_len)
+static int ocfs2_xi_entry_usage(struct ocfs2_xattr_info *xi)
{
- int size = 0;
-
- if (value_len <= OCFS2_XATTR_INLINE_SIZE)
- size = OCFS2_XATTR_SIZE(name_len) + OCFS2_XATTR_SIZE(value_len);
- else
- size = OCFS2_XATTR_SIZE(name_len) + OCFS2_XATTR_ROOT_SIZE;
- size += sizeof(struct ocfs2_xattr_entry);
+ return namevalue_size_xi(xi) +
+ sizeof(struct ocfs2_xattr_entry);
+}
- return size;
+static int ocfs2_xe_entry_usage(struct ocfs2_xattr_entry *xe)
+{
+ return namevalue_size_xe(xe) +
+ sizeof(struct ocfs2_xattr_entry);
}
int ocfs2_calc_security_init(struct inode *dir,
@@ -1308,452 +1397,897 @@ out:
return ret;
}
-static int ocfs2_xattr_cleanup(struct inode *inode,
- handle_t *handle,
- struct ocfs2_xattr_info *xi,
- struct ocfs2_xattr_search *xs,
- struct ocfs2_xattr_value_buf *vb,
- size_t offs)
+static int ocfs2_xa_check_space_helper(int needed_space, int free_start,
+ int num_entries)
{
- int ret = 0;
- size_t name_len = strlen(xi->name);
- void *val = xs->base + offs;
- size_t size = OCFS2_XATTR_SIZE(name_len) + OCFS2_XATTR_ROOT_SIZE;
+ int free_space;
- ret = vb->vb_access(handle, INODE_CACHE(inode), vb->vb_bh,
- OCFS2_JOURNAL_ACCESS_WRITE);
- if (ret) {
- mlog_errno(ret);
- goto out;
- }
- /* Decrease xattr count */
- le16_add_cpu(&xs->header->xh_count, -1);
- /* Remove the xattr entry and tree root which has already be set*/
- memset((void *)xs->here, 0, sizeof(struct ocfs2_xattr_entry));
- memset(val, 0, size);
+ if (!needed_space)
+ return 0;
- ret = ocfs2_journal_dirty(handle, vb->vb_bh);
- if (ret < 0)
- mlog_errno(ret);
-out:
- return ret;
+ free_space = free_start -
+ sizeof(struct ocfs2_xattr_header) -
+ (num_entries * sizeof(struct ocfs2_xattr_entry)) -
+ OCFS2_XATTR_HEADER_GAP;
+ if (free_space < 0)
+ return -EIO;
+ if (free_space < needed_space)
+ return -ENOSPC;
+
+ return 0;
}
-static int ocfs2_xattr_update_entry(struct inode *inode,
- handle_t *handle,
- struct ocfs2_xattr_info *xi,
- struct ocfs2_xattr_search *xs,
- struct ocfs2_xattr_value_buf *vb,
- size_t offs)
+static int ocfs2_xa_journal_access(handle_t *handle, struct ocfs2_xa_loc *loc,
+ int type)
{
- int ret;
+ return loc->xl_ops->xlo_journal_access(handle, loc, type);
+}
- ret = vb->vb_access(handle, INODE_CACHE(inode), vb->vb_bh,
- OCFS2_JOURNAL_ACCESS_WRITE);
- if (ret) {
- mlog_errno(ret);
- goto out;
- }
+static void ocfs2_xa_journal_dirty(handle_t *handle, struct ocfs2_xa_loc *loc)
+{
+ loc->xl_ops->xlo_journal_dirty(handle, loc);
+}
- xs->here->xe_name_offset = cpu_to_le16(offs);
- xs->here->xe_value_size = cpu_to_le64(xi->value_len);
- if (xi->value_len <= OCFS2_XATTR_INLINE_SIZE)
- ocfs2_xattr_set_local(xs->here, 1);
- else
- ocfs2_xattr_set_local(xs->here, 0);
- ocfs2_xattr_hash_entry(inode, xs->header, xs->here);
+/* Give a pointer into the storage for the given offset */
+static void *ocfs2_xa_offset_pointer(struct ocfs2_xa_loc *loc, int offset)
+{
+ BUG_ON(offset >= loc->xl_size);
+ return loc->xl_ops->xlo_offset_pointer(loc, offset);
+}
- ret = ocfs2_journal_dirty(handle, vb->vb_bh);
- if (ret < 0)
- mlog_errno(ret);
-out:
- return ret;
+/*
+ * Wipe the name+value pair and allow the storage to reclaim it. This
+ * must be followed by either removal of the entry or a call to
+ * ocfs2_xa_add_namevalue().
+ */
+static void ocfs2_xa_wipe_namevalue(struct ocfs2_xa_loc *loc)
+{
+ loc->xl_ops->xlo_wipe_namevalue(loc);
}
/*
- * ocfs2_xattr_set_value_outside()
- *
- * Set large size value in B tree.
+ * Find lowest offset to a name+value pair. This is the start of our
+ * downward-growing free space.
*/
-static int ocfs2_xattr_set_value_outside(struct inode *inode,
- struct ocfs2_xattr_info *xi,
- struct ocfs2_xattr_search *xs,
- struct ocfs2_xattr_set_ctxt *ctxt,
- struct ocfs2_xattr_value_buf *vb,
- size_t offs)
+static int ocfs2_xa_get_free_start(struct ocfs2_xa_loc *loc)
{
- size_t name_len = strlen(xi->name);
- void *val = xs->base + offs;
- struct ocfs2_xattr_value_root *xv = NULL;
- size_t size = OCFS2_XATTR_SIZE(name_len) + OCFS2_XATTR_ROOT_SIZE;
- int ret = 0;
+ return loc->xl_ops->xlo_get_free_start(loc);
+}
- memset(val, 0, size);
- memcpy(val, xi->name, name_len);
- xv = (struct ocfs2_xattr_value_root *)
- (val + OCFS2_XATTR_SIZE(name_len));
- xv->xr_clusters = 0;
- xv->xr_last_eb_blk = 0;
- xv->xr_list.l_tree_depth = 0;
- xv->xr_list.l_count = cpu_to_le16(1);
- xv->xr_list.l_next_free_rec = 0;
- vb->vb_xv = xv;
-
- ret = ocfs2_xattr_value_truncate(inode, vb, xi->value_len, ctxt);
- if (ret < 0) {
- mlog_errno(ret);
- return ret;
+/* Can we reuse loc->xl_entry for xi? */
+static int ocfs2_xa_can_reuse_entry(struct ocfs2_xa_loc *loc,
+ struct ocfs2_xattr_info *xi)
+{
+ return loc->xl_ops->xlo_can_reuse(loc, xi);
+}
+
+/* How much free space is needed to set the new value */
+static int ocfs2_xa_check_space(struct ocfs2_xa_loc *loc,
+ struct ocfs2_xattr_info *xi)
+{
+ return loc->xl_ops->xlo_check_space(loc, xi);
+}
+
+static void ocfs2_xa_add_entry(struct ocfs2_xa_loc *loc, u32 name_hash)
+{
+ loc->xl_ops->xlo_add_entry(loc, name_hash);
+ loc->xl_entry->xe_name_hash = cpu_to_le32(name_hash);
+ /*
+ * We can't leave the new entry's xe_name_offset at zero or
+ * add_namevalue() will go nuts. We set it to the size of our
+ * storage so that it can never be less than any other entry.
+ */
+ loc->xl_entry->xe_name_offset = cpu_to_le16(loc->xl_size);
+}
+
+static void ocfs2_xa_add_namevalue(struct ocfs2_xa_loc *loc,
+ struct ocfs2_xattr_info *xi)
+{
+ int size = namevalue_size_xi(xi);
+ int nameval_offset;
+ char *nameval_buf;
+
+ loc->xl_ops->xlo_add_namevalue(loc, size);
+ loc->xl_entry->xe_value_size = cpu_to_le64(xi->xi_value_len);
+ loc->xl_entry->xe_name_len = xi->xi_name_len;
+ ocfs2_xattr_set_type(loc->xl_entry, xi->xi_name_index);
+ ocfs2_xattr_set_local(loc->xl_entry,
+ xi->xi_value_len <= OCFS2_XATTR_INLINE_SIZE);
+
+ nameval_offset = le16_to_cpu(loc->xl_entry->xe_name_offset);
+ nameval_buf = ocfs2_xa_offset_pointer(loc, nameval_offset);
+ memset(nameval_buf, 0, size);
+ memcpy(nameval_buf, xi->xi_name, xi->xi_name_len);
+}
+
+static void ocfs2_xa_fill_value_buf(struct ocfs2_xa_loc *loc,
+ struct ocfs2_xattr_value_buf *vb)
+{
+ int nameval_offset = le16_to_cpu(loc->xl_entry->xe_name_offset);
+ int name_size = OCFS2_XATTR_SIZE(loc->xl_entry->xe_name_len);
+
+ /* Value bufs are for value trees */
+ BUG_ON(ocfs2_xattr_is_local(loc->xl_entry));
+ BUG_ON(namevalue_size_xe(loc->xl_entry) !=
+ (name_size + OCFS2_XATTR_ROOT_SIZE));
+
+ loc->xl_ops->xlo_fill_value_buf(loc, vb);
+ vb->vb_xv =
+ (struct ocfs2_xattr_value_root *)ocfs2_xa_offset_pointer(loc,
+ nameval_offset +
+ name_size);
+}
+
+static int ocfs2_xa_block_journal_access(handle_t *handle,
+ struct ocfs2_xa_loc *loc, int type)
+{
+ struct buffer_head *bh = loc->xl_storage;
+ ocfs2_journal_access_func access;
+
+ if (loc->xl_size == (bh->b_size -
+ offsetof(struct ocfs2_xattr_block,
+ xb_attrs.xb_header)))
+ access = ocfs2_journal_access_xb;
+ else
+ access = ocfs2_journal_access_di;
+ return access(handle, INODE_CACHE(loc->xl_inode), bh, type);
+}
+
+static void ocfs2_xa_block_journal_dirty(handle_t *handle,
+ struct ocfs2_xa_loc *loc)
+{
+ struct buffer_head *bh = loc->xl_storage;
+
+ ocfs2_journal_dirty(handle, bh);
+}
+
+static void *ocfs2_xa_block_offset_pointer(struct ocfs2_xa_loc *loc,
+ int offset)
+{
+ return (char *)loc->xl_header + offset;
+}
+
+static int ocfs2_xa_block_can_reuse(struct ocfs2_xa_loc *loc,
+ struct ocfs2_xattr_info *xi)
+{
+ /*
+ * Block storage is strict. If the sizes aren't exact, we will
+ * remove the old one and reinsert the new.
+ */
+ return namevalue_size_xe(loc->xl_entry) ==
+ namevalue_size_xi(xi);
+}
+
+static int ocfs2_xa_block_get_free_start(struct ocfs2_xa_loc *loc)
+{
+ struct ocfs2_xattr_header *xh = loc->xl_header;
+ int i, count = le16_to_cpu(xh->xh_count);
+ int offset, free_start = loc->xl_size;
+
+ for (i = 0; i < count; i++) {
+ offset = le16_to_cpu(xh->xh_entries[i].xe_name_offset);
+ if (offset < free_start)
+ free_start = offset;
}
- ret = ocfs2_xattr_update_entry(inode, ctxt->handle, xi, xs, vb, offs);
- if (ret < 0) {
- mlog_errno(ret);
- return ret;
+
+ return free_start;
+}
+
+static int ocfs2_xa_block_check_space(struct ocfs2_xa_loc *loc,
+ struct ocfs2_xattr_info *xi)
+{
+ int count = le16_to_cpu(loc->xl_header->xh_count);
+ int free_start = ocfs2_xa_get_free_start(loc);
+ int needed_space = ocfs2_xi_entry_usage(xi);
+
+ /*
+ * Block storage will reclaim the original entry before inserting
+ * the new value, so we only need the difference. If the new
+ * entry is smaller than the old one, we don't need anything.
+ */
+ if (loc->xl_entry) {
+ /* Don't need space if we're reusing! */
+ if (ocfs2_xa_can_reuse_entry(loc, xi))
+ needed_space = 0;
+ else
+ needed_space -= ocfs2_xe_entry_usage(loc->xl_entry);
}
- ret = __ocfs2_xattr_set_value_outside(inode, ctxt->handle, vb,
- xi->value, xi->value_len);
- if (ret < 0)
- mlog_errno(ret);
+ if (needed_space < 0)
+ needed_space = 0;
+ return ocfs2_xa_check_space_helper(needed_space, free_start, count);
+}
- return ret;
+/*
+ * Block storage for xattrs keeps the name+value pairs compacted. When
+ * we remove one, we have to shift any that preceded it towards the end.
+ */
+static void ocfs2_xa_block_wipe_namevalue(struct ocfs2_xa_loc *loc)
+{
+ int i, offset;
+ int namevalue_offset, first_namevalue_offset, namevalue_size;
+ struct ocfs2_xattr_entry *entry = loc->xl_entry;
+ struct ocfs2_xattr_header *xh = loc->xl_header;
+ int count = le16_to_cpu(xh->xh_count);
+
+ namevalue_offset = le16_to_cpu(entry->xe_name_offset);
+ namevalue_size = namevalue_size_xe(entry);
+ first_namevalue_offset = ocfs2_xa_get_free_start(loc);
+
+ /* Shift the name+value pairs */
+ memmove((char *)xh + first_namevalue_offset + namevalue_size,
+ (char *)xh + first_namevalue_offset,
+ namevalue_offset - first_namevalue_offset);
+ memset((char *)xh + first_namevalue_offset, 0, namevalue_size);
+
+ /* Now tell xh->xh_entries about it */
+ for (i = 0; i < count; i++) {
+ offset = le16_to_cpu(xh->xh_entries[i].xe_name_offset);
+ if (offset < namevalue_offset)
+ le16_add_cpu(&xh->xh_entries[i].xe_name_offset,
+ namevalue_size);
+ }
+
+ /*
+ * Note that we don't update xh_free_start or xh_name_value_len
+ * because they're not used in block-stored xattrs.
+ */
+}
+
+static void ocfs2_xa_block_add_entry(struct ocfs2_xa_loc *loc, u32 name_hash)
+{
+ int count = le16_to_cpu(loc->xl_header->xh_count);
+ loc->xl_entry = &(loc->xl_header->xh_entries[count]);
+ le16_add_cpu(&loc->xl_header->xh_count, 1);
+ memset(loc->xl_entry, 0, sizeof(struct ocfs2_xattr_entry));
+}
+
+static void ocfs2_xa_block_add_namevalue(struct ocfs2_xa_loc *loc, int size)
+{
+ int free_start = ocfs2_xa_get_free_start(loc);
+
+ loc->xl_entry->xe_name_offset = cpu_to_le16(free_start - size);
+}
+
+static void ocfs2_xa_block_fill_value_buf(struct ocfs2_xa_loc *loc,
+ struct ocfs2_xattr_value_buf *vb)
+{
+ struct buffer_head *bh = loc->xl_storage;
+
+ if (loc->xl_size == (bh->b_size -
+ offsetof(struct ocfs2_xattr_block,
+ xb_attrs.xb_header)))
+ vb->vb_access = ocfs2_journal_access_xb;
+ else
+ vb->vb_access = ocfs2_journal_access_di;
+ vb->vb_bh = bh;
}
/*
- * ocfs2_xattr_set_entry_local()
- *
- * Set, replace or remove extended attribute in local.
+ * Operations for xattrs stored in blocks. This includes inline inode
+ * storage and unindexed ocfs2_xattr_blocks.
*/
-static void ocfs2_xattr_set_entry_local(struct inode *inode,
- struct ocfs2_xattr_info *xi,
- struct ocfs2_xattr_search *xs,
- struct ocfs2_xattr_entry *last,
- size_t min_offs)
+static const struct ocfs2_xa_loc_operations ocfs2_xa_block_loc_ops = {
+ .xlo_journal_access = ocfs2_xa_block_journal_access,
+ .xlo_journal_dirty = ocfs2_xa_block_journal_dirty,
+ .xlo_offset_pointer = ocfs2_xa_block_offset_pointer,
+ .xlo_check_space = ocfs2_xa_block_check_space,
+ .xlo_can_reuse = ocfs2_xa_block_can_reuse,
+ .xlo_get_free_start = ocfs2_xa_block_get_free_start,
+ .xlo_wipe_namevalue = ocfs2_xa_block_wipe_namevalue,
+ .xlo_add_entry = ocfs2_xa_block_add_entry,
+ .xlo_add_namevalue = ocfs2_xa_block_add_namevalue,
+ .xlo_fill_value_buf = ocfs2_xa_block_fill_value_buf,
+};
+
+static int ocfs2_xa_bucket_journal_access(handle_t *handle,
+ struct ocfs2_xa_loc *loc, int type)
{
- size_t name_len = strlen(xi->name);
- int i;
+ struct ocfs2_xattr_bucket *bucket = loc->xl_storage;
- if (xi->value && xs->not_found) {
- /* Insert the new xattr entry. */
- le16_add_cpu(&xs->header->xh_count, 1);
- ocfs2_xattr_set_type(last, xi->name_index);
- ocfs2_xattr_set_local(last, 1);
- last->xe_name_len = name_len;
- } else {
- void *first_val;
- void *val;
- size_t offs, size;
-
- first_val = xs->base + min_offs;
- offs = le16_to_cpu(xs->here->xe_name_offset);
- val = xs->base + offs;
-
- if (le64_to_cpu(xs->here->xe_value_size) >
- OCFS2_XATTR_INLINE_SIZE)
- size = OCFS2_XATTR_SIZE(name_len) +
- OCFS2_XATTR_ROOT_SIZE;
+ return ocfs2_xattr_bucket_journal_access(handle, bucket, type);
+}
+
+static void ocfs2_xa_bucket_journal_dirty(handle_t *handle,
+ struct ocfs2_xa_loc *loc)
+{
+ struct ocfs2_xattr_bucket *bucket = loc->xl_storage;
+
+ ocfs2_xattr_bucket_journal_dirty(handle, bucket);
+}
+
+static void *ocfs2_xa_bucket_offset_pointer(struct ocfs2_xa_loc *loc,
+ int offset)
+{
+ struct ocfs2_xattr_bucket *bucket = loc->xl_storage;
+ int block, block_offset;
+
+ /* The header is at the front of the bucket */
+ block = offset >> loc->xl_inode->i_sb->s_blocksize_bits;
+ block_offset = offset % loc->xl_inode->i_sb->s_blocksize;
+
+ return bucket_block(bucket, block) + block_offset;
+}
+
+static int ocfs2_xa_bucket_can_reuse(struct ocfs2_xa_loc *loc,
+ struct ocfs2_xattr_info *xi)
+{
+ return namevalue_size_xe(loc->xl_entry) >=
+ namevalue_size_xi(xi);
+}
+
+static int ocfs2_xa_bucket_get_free_start(struct ocfs2_xa_loc *loc)
+{
+ struct ocfs2_xattr_bucket *bucket = loc->xl_storage;
+ return le16_to_cpu(bucket_xh(bucket)->xh_free_start);
+}
+
+static int ocfs2_bucket_align_free_start(struct super_block *sb,
+ int free_start, int size)
+{
+ /*
+ * We need to make sure that the name+value pair fits within
+ * one block.
+ */
+ if (((free_start - size) >> sb->s_blocksize_bits) !=
+ ((free_start - 1) >> sb->s_blocksize_bits))
+ free_start -= free_start % sb->s_blocksize;
+
+ return free_start;
+}
+
+static int ocfs2_xa_bucket_check_space(struct ocfs2_xa_loc *loc,
+ struct ocfs2_xattr_info *xi)
+{
+ int rc;
+ int count = le16_to_cpu(loc->xl_header->xh_count);
+ int free_start = ocfs2_xa_get_free_start(loc);
+ int needed_space = ocfs2_xi_entry_usage(xi);
+ int size = namevalue_size_xi(xi);
+ struct super_block *sb = loc->xl_inode->i_sb;
+
+ /*
+ * Bucket storage does not reclaim name+value pairs it cannot
+ * reuse. They live as holes until the bucket fills, and then
+ * the bucket is defragmented. However, the bucket can reclaim
+ * the ocfs2_xattr_entry.
+ */
+ if (loc->xl_entry) {
+ /* Don't need space if we're reusing! */
+ if (ocfs2_xa_can_reuse_entry(loc, xi))
+ needed_space = 0;
else
- size = OCFS2_XATTR_SIZE(name_len) +
- OCFS2_XATTR_SIZE(le64_to_cpu(xs->here->xe_value_size));
-
- if (xi->value && size == OCFS2_XATTR_SIZE(name_len) +
- OCFS2_XATTR_SIZE(xi->value_len)) {
- /* The old and the new value have the
- same size. Just replace the value. */
- ocfs2_xattr_set_local(xs->here, 1);
- xs->here->xe_value_size = cpu_to_le64(xi->value_len);
- /* Clear value bytes. */
- memset(val + OCFS2_XATTR_SIZE(name_len),
- 0,
- OCFS2_XATTR_SIZE(xi->value_len));
- memcpy(val + OCFS2_XATTR_SIZE(name_len),
- xi->value,
- xi->value_len);
- return;
- }
- /* Remove the old name+value. */
- memmove(first_val + size, first_val, val - first_val);
- memset(first_val, 0, size);
- xs->here->xe_name_hash = 0;
- xs->here->xe_name_offset = 0;
- ocfs2_xattr_set_local(xs->here, 1);
- xs->here->xe_value_size = 0;
-
- min_offs += size;
-
- /* Adjust all value offsets. */
- last = xs->header->xh_entries;
- for (i = 0 ; i < le16_to_cpu(xs->header->xh_count); i++) {
- size_t o = le16_to_cpu(last->xe_name_offset);
-
- if (o < offs)
- last->xe_name_offset = cpu_to_le16(o + size);
- last += 1;
- }
+ needed_space -= sizeof(struct ocfs2_xattr_entry);
+ }
+ BUG_ON(needed_space < 0);
- if (!xi->value) {
- /* Remove the old entry. */
- last -= 1;
- memmove(xs->here, xs->here + 1,
- (void *)last - (void *)xs->here);
- memset(last, 0, sizeof(struct ocfs2_xattr_entry));
- le16_add_cpu(&xs->header->xh_count, -1);
- }
+ if (free_start < size) {
+ if (needed_space)
+ return -ENOSPC;
+ } else {
+ /*
+ * First we check if it would fit in the first place.
+ * Below, we align the free start to a block. This may
+ * slide us below the minimum gap. By checking unaligned
+ * first, we avoid that error.
+ */
+ rc = ocfs2_xa_check_space_helper(needed_space, free_start,
+ count);
+ if (rc)
+ return rc;
+ free_start = ocfs2_bucket_align_free_start(sb, free_start,
+ size);
}
- if (xi->value) {
- /* Insert the new name+value. */
- size_t size = OCFS2_XATTR_SIZE(name_len) +
- OCFS2_XATTR_SIZE(xi->value_len);
- void *val = xs->base + min_offs - size;
+ return ocfs2_xa_check_space_helper(needed_space, free_start, count);
+}
+
+static void ocfs2_xa_bucket_wipe_namevalue(struct ocfs2_xa_loc *loc)
+{
+ le16_add_cpu(&loc->xl_header->xh_name_value_len,
+ -namevalue_size_xe(loc->xl_entry));
+}
- xs->here->xe_name_offset = cpu_to_le16(min_offs - size);
- memset(val, 0, size);
- memcpy(val, xi->name, name_len);
- memcpy(val + OCFS2_XATTR_SIZE(name_len),
- xi->value,
- xi->value_len);
- xs->here->xe_value_size = cpu_to_le64(xi->value_len);
- ocfs2_xattr_set_local(xs->here, 1);
- ocfs2_xattr_hash_entry(inode, xs->header, xs->here);
+static void ocfs2_xa_bucket_add_entry(struct ocfs2_xa_loc *loc, u32 name_hash)
+{
+ struct ocfs2_xattr_header *xh = loc->xl_header;
+ int count = le16_to_cpu(xh->xh_count);
+ int low = 0, high = count - 1, tmp;
+ struct ocfs2_xattr_entry *tmp_xe;
+
+ /*
+ * We keep buckets sorted by name_hash, so we need to find
+ * our insert place.
+ */
+ while (low <= high && count) {
+ tmp = (low + high) / 2;
+ tmp_xe = &xh->xh_entries[tmp];
+
+ if (name_hash > le32_to_cpu(tmp_xe->xe_name_hash))
+ low = tmp + 1;
+ else if (name_hash < le32_to_cpu(tmp_xe->xe_name_hash))
+ high = tmp - 1;
+ else {
+ low = tmp;
+ break;
+ }
}
- return;
+ if (low != count)
+ memmove(&xh->xh_entries[low + 1],
+ &xh->xh_entries[low],
+ ((count - low) * sizeof(struct ocfs2_xattr_entry)));
+
+ le16_add_cpu(&xh->xh_count, 1);
+ loc->xl_entry = &xh->xh_entries[low];
+ memset(loc->xl_entry, 0, sizeof(struct ocfs2_xattr_entry));
+}
+
+static void ocfs2_xa_bucket_add_namevalue(struct ocfs2_xa_loc *loc, int size)
+{
+ int free_start = ocfs2_xa_get_free_start(loc);
+ struct ocfs2_xattr_header *xh = loc->xl_header;
+ struct super_block *sb = loc->xl_inode->i_sb;
+ int nameval_offset;
+
+ free_start = ocfs2_bucket_align_free_start(sb, free_start, size);
+ nameval_offset = free_start - size;
+ loc->xl_entry->xe_name_offset = cpu_to_le16(nameval_offset);
+ xh->xh_free_start = cpu_to_le16(nameval_offset);
+ le16_add_cpu(&xh->xh_name_value_len, size);
+
+}
+
+static void ocfs2_xa_bucket_fill_value_buf(struct ocfs2_xa_loc *loc,
+ struct ocfs2_xattr_value_buf *vb)
+{
+ struct ocfs2_xattr_bucket *bucket = loc->xl_storage;
+ struct super_block *sb = loc->xl_inode->i_sb;
+ int nameval_offset = le16_to_cpu(loc->xl_entry->xe_name_offset);
+ int size = namevalue_size_xe(loc->xl_entry);
+ int block_offset = nameval_offset >> sb->s_blocksize_bits;
+
+ /* Values are not allowed to straddle block boundaries */
+ BUG_ON(block_offset !=
+ ((nameval_offset + size - 1) >> sb->s_blocksize_bits));
+ /* We expect the bucket to be filled in */
+ BUG_ON(!bucket->bu_bhs[block_offset]);
+
+ vb->vb_access = ocfs2_journal_access;
+ vb->vb_bh = bucket->bu_bhs[block_offset];
+}
+
+/* Operations for xattrs stored in buckets. */
+static const struct ocfs2_xa_loc_operations ocfs2_xa_bucket_loc_ops = {
+ .xlo_journal_access = ocfs2_xa_bucket_journal_access,
+ .xlo_journal_dirty = ocfs2_xa_bucket_journal_dirty,
+ .xlo_offset_pointer = ocfs2_xa_bucket_offset_pointer,
+ .xlo_check_space = ocfs2_xa_bucket_check_space,
+ .xlo_can_reuse = ocfs2_xa_bucket_can_reuse,
+ .xlo_get_free_start = ocfs2_xa_bucket_get_free_start,
+ .xlo_wipe_namevalue = ocfs2_xa_bucket_wipe_namevalue,
+ .xlo_add_entry = ocfs2_xa_bucket_add_entry,
+ .xlo_add_namevalue = ocfs2_xa_bucket_add_namevalue,
+ .xlo_fill_value_buf = ocfs2_xa_bucket_fill_value_buf,
+};
+
+static unsigned int ocfs2_xa_value_clusters(struct ocfs2_xa_loc *loc)
+{
+ struct ocfs2_xattr_value_buf vb;
+
+ if (ocfs2_xattr_is_local(loc->xl_entry))
+ return 0;
+
+ ocfs2_xa_fill_value_buf(loc, &vb);
+ return le32_to_cpu(vb.vb_xv->xr_clusters);
+}
+
+static int ocfs2_xa_value_truncate(struct ocfs2_xa_loc *loc, u64 bytes,
+ struct ocfs2_xattr_set_ctxt *ctxt)
+{
+ int trunc_rc, access_rc;
+ struct ocfs2_xattr_value_buf vb;
+
+ ocfs2_xa_fill_value_buf(loc, &vb);
+ trunc_rc = ocfs2_xattr_value_truncate(loc->xl_inode, &vb, bytes,
+ ctxt);
+
+ /*
+ * The caller of ocfs2_xa_value_truncate() has already called
+ * ocfs2_xa_journal_access on the loc. However, The truncate code
+ * calls ocfs2_extend_trans(). This may commit the previous
+ * transaction and open a new one. If this is a bucket, truncate
+ * could leave only vb->vb_bh set up for journaling. Meanwhile,
+ * the caller is expecting to dirty the entire bucket. So we must
+ * reset the journal work. We do this even if truncate has failed,
+ * as it could have failed after committing the extend.
+ */
+ access_rc = ocfs2_xa_journal_access(ctxt->handle, loc,
+ OCFS2_JOURNAL_ACCESS_WRITE);
+
+ /* Errors in truncate take precedence */
+ return trunc_rc ? trunc_rc : access_rc;
+}
+
+static void ocfs2_xa_remove_entry(struct ocfs2_xa_loc *loc)
+{
+ int index, count;
+ struct ocfs2_xattr_header *xh = loc->xl_header;
+ struct ocfs2_xattr_entry *entry = loc->xl_entry;
+
+ ocfs2_xa_wipe_namevalue(loc);
+ loc->xl_entry = NULL;
+
+ le16_add_cpu(&xh->xh_count, -1);
+ count = le16_to_cpu(xh->xh_count);
+
+ /*
+ * Only zero out the entry if there are more remaining. This is
+ * important for an empty bucket, as it keeps track of the
+ * bucket's hash value. It doesn't hurt empty block storage.
+ */
+ if (count) {
+ index = ((char *)entry - (char *)&xh->xh_entries) /
+ sizeof(struct ocfs2_xattr_entry);
+ memmove(&xh->xh_entries[index], &xh->xh_entries[index + 1],
+ (count - index) * sizeof(struct ocfs2_xattr_entry));
+ memset(&xh->xh_entries[count], 0,
+ sizeof(struct ocfs2_xattr_entry));
+ }
}
/*
- * ocfs2_xattr_set_entry()
+ * If we have a problem adjusting the size of an external value during
+ * ocfs2_xa_prepare_entry() or ocfs2_xa_remove(), we may have an xattr
+ * in an intermediate state. For example, the value may be partially
+ * truncated.
+ *
+ * If the value tree hasn't changed, the extend/truncate went nowhere.
+ * We have nothing to do. The caller can treat it as a straight error.
*
- * Set extended attribute entry into inode or block.
+ * If the value tree got partially truncated, we now have a corrupted
+ * extended attribute. We're going to wipe its entry and leak the
+ * clusters. Better to leak some storage than leave a corrupt entry.
*
- * If extended attribute value size > OCFS2_XATTR_INLINE_SIZE,
- * We first insert tree root(ocfs2_xattr_value_root) with set_entry_local(),
- * then set value in B tree with set_value_outside().
+ * If the value tree grew, it obviously didn't grow enough for the
+ * new entry. We're not going to try and reclaim those clusters either.
+ * If there was already an external value there (orig_clusters != 0),
+ * the new clusters are attached safely and we can just leave the old
+ * value in place. If there was no external value there, we remove
+ * the entry.
+ *
+ * This way, the xattr block we store in the journal will be consistent.
+ * If the size change broke because of the journal, no changes will hit
+ * disk anyway.
*/
-static int ocfs2_xattr_set_entry(struct inode *inode,
- struct ocfs2_xattr_info *xi,
- struct ocfs2_xattr_search *xs,
- struct ocfs2_xattr_set_ctxt *ctxt,
- int flag)
-{
- struct ocfs2_xattr_entry *last;
- struct ocfs2_inode_info *oi = OCFS2_I(inode);
- struct ocfs2_dinode *di = (struct ocfs2_dinode *)xs->inode_bh->b_data;
- size_t min_offs = xs->end - xs->base, name_len = strlen(xi->name);
- size_t size_l = 0;
- handle_t *handle = ctxt->handle;
- int free, i, ret;
- struct ocfs2_xattr_info xi_l = {
- .name_index = xi->name_index,
- .name = xi->name,
- .value = xi->value,
- .value_len = xi->value_len,
- };
- struct ocfs2_xattr_value_buf vb = {
- .vb_bh = xs->xattr_bh,
- .vb_access = ocfs2_journal_access_di,
- };
+static void ocfs2_xa_cleanup_value_truncate(struct ocfs2_xa_loc *loc,
+ const char *what,
+ unsigned int orig_clusters)
+{
+ unsigned int new_clusters = ocfs2_xa_value_clusters(loc);
+ char *nameval_buf = ocfs2_xa_offset_pointer(loc,
+ le16_to_cpu(loc->xl_entry->xe_name_offset));
+
+ if (new_clusters < orig_clusters) {
+ mlog(ML_ERROR,
+ "Partial truncate while %s xattr %.*s. Leaking "
+ "%u clusters and removing the entry\n",
+ what, loc->xl_entry->xe_name_len, nameval_buf,
+ orig_clusters - new_clusters);
+ ocfs2_xa_remove_entry(loc);
+ } else if (!orig_clusters) {
+ mlog(ML_ERROR,
+ "Unable to allocate an external value for xattr "
+ "%.*s safely. Leaking %u clusters and removing the "
+ "entry\n",
+ loc->xl_entry->xe_name_len, nameval_buf,
+ new_clusters - orig_clusters);
+ ocfs2_xa_remove_entry(loc);
+ } else if (new_clusters > orig_clusters)
+ mlog(ML_ERROR,
+ "Unable to grow xattr %.*s safely. %u new clusters "
+ "have been added, but the value will not be "
+ "modified\n",
+ loc->xl_entry->xe_name_len, nameval_buf,
+ new_clusters - orig_clusters);
+}
+
+static int ocfs2_xa_remove(struct ocfs2_xa_loc *loc,
+ struct ocfs2_xattr_set_ctxt *ctxt)
+{
+ int rc = 0;
+ unsigned int orig_clusters;
+
+ if (!ocfs2_xattr_is_local(loc->xl_entry)) {
+ orig_clusters = ocfs2_xa_value_clusters(loc);
+ rc = ocfs2_xa_value_truncate(loc, 0, ctxt);
+ if (rc) {
+ mlog_errno(rc);
+ /*
+ * Since this is remove, we can return 0 if
+ * ocfs2_xa_cleanup_value_truncate() is going to
+ * wipe the entry anyway. So we check the
+ * cluster count as well.
+ */
+ if (orig_clusters != ocfs2_xa_value_clusters(loc))
+ rc = 0;
+ ocfs2_xa_cleanup_value_truncate(loc, "removing",
+ orig_clusters);
+ if (rc)
+ goto out;
+ }
+ }
- if (!(flag & OCFS2_INLINE_XATTR_FL)) {
- BUG_ON(xs->xattr_bh == xs->inode_bh);
- vb.vb_access = ocfs2_journal_access_xb;
- } else
- BUG_ON(xs->xattr_bh != xs->inode_bh);
+ ocfs2_xa_remove_entry(loc);
- /* Compute min_offs, last and free space. */
- last = xs->header->xh_entries;
+out:
+ return rc;
+}
- for (i = 0 ; i < le16_to_cpu(xs->header->xh_count); i++) {
- size_t offs = le16_to_cpu(last->xe_name_offset);
- if (offs < min_offs)
- min_offs = offs;
- last += 1;
- }
+static void ocfs2_xa_install_value_root(struct ocfs2_xa_loc *loc)
+{
+ int name_size = OCFS2_XATTR_SIZE(loc->xl_entry->xe_name_len);
+ char *nameval_buf;
- free = min_offs - ((void *)last - xs->base) - OCFS2_XATTR_HEADER_GAP;
- if (free < 0)
- return -EIO;
+ nameval_buf = ocfs2_xa_offset_pointer(loc,
+ le16_to_cpu(loc->xl_entry->xe_name_offset));
+ memcpy(nameval_buf + name_size, &def_xv, OCFS2_XATTR_ROOT_SIZE);
+}
- if (!xs->not_found) {
- size_t size = 0;
- if (ocfs2_xattr_is_local(xs->here))
- size = OCFS2_XATTR_SIZE(name_len) +
- OCFS2_XATTR_SIZE(le64_to_cpu(xs->here->xe_value_size));
- else
- size = OCFS2_XATTR_SIZE(name_len) +
- OCFS2_XATTR_ROOT_SIZE;
- free += (size + sizeof(struct ocfs2_xattr_entry));
- }
- /* Check free space in inode or block */
- if (xi->value && xi->value_len > OCFS2_XATTR_INLINE_SIZE) {
- if (free < sizeof(struct ocfs2_xattr_entry) +
- OCFS2_XATTR_SIZE(name_len) +
- OCFS2_XATTR_ROOT_SIZE) {
- ret = -ENOSPC;
- goto out;
+/*
+ * Take an existing entry and make it ready for the new value. This
+ * won't allocate space, but it may free space. It should be ready for
+ * ocfs2_xa_prepare_entry() to finish the work.
+ */
+static int ocfs2_xa_reuse_entry(struct ocfs2_xa_loc *loc,
+ struct ocfs2_xattr_info *xi,
+ struct ocfs2_xattr_set_ctxt *ctxt)
+{
+ int rc = 0;
+ int name_size = OCFS2_XATTR_SIZE(xi->xi_name_len);
+ unsigned int orig_clusters;
+ char *nameval_buf;
+ int xe_local = ocfs2_xattr_is_local(loc->xl_entry);
+ int xi_local = xi->xi_value_len <= OCFS2_XATTR_INLINE_SIZE;
+
+ BUG_ON(OCFS2_XATTR_SIZE(loc->xl_entry->xe_name_len) !=
+ name_size);
+
+ nameval_buf = ocfs2_xa_offset_pointer(loc,
+ le16_to_cpu(loc->xl_entry->xe_name_offset));
+ if (xe_local) {
+ memset(nameval_buf + name_size, 0,
+ namevalue_size_xe(loc->xl_entry) - name_size);
+ if (!xi_local)
+ ocfs2_xa_install_value_root(loc);
+ } else {
+ orig_clusters = ocfs2_xa_value_clusters(loc);
+ if (xi_local) {
+ rc = ocfs2_xa_value_truncate(loc, 0, ctxt);
+ if (rc < 0)
+ mlog_errno(rc);
+ else
+ memset(nameval_buf + name_size, 0,
+ namevalue_size_xe(loc->xl_entry) -
+ name_size);
+ } else if (le64_to_cpu(loc->xl_entry->xe_value_size) >
+ xi->xi_value_len) {
+ rc = ocfs2_xa_value_truncate(loc, xi->xi_value_len,
+ ctxt);
+ if (rc < 0)
+ mlog_errno(rc);
}
- size_l = OCFS2_XATTR_SIZE(name_len) + OCFS2_XATTR_ROOT_SIZE;
- xi_l.value = (void *)&def_xv;
- xi_l.value_len = OCFS2_XATTR_ROOT_SIZE;
- } else if (xi->value) {
- if (free < sizeof(struct ocfs2_xattr_entry) +
- OCFS2_XATTR_SIZE(name_len) +
- OCFS2_XATTR_SIZE(xi->value_len)) {
- ret = -ENOSPC;
+
+ if (rc) {
+ ocfs2_xa_cleanup_value_truncate(loc, "reusing",
+ orig_clusters);
goto out;
}
}
- if (!xs->not_found) {
- /* For existing extended attribute */
- size_t size = OCFS2_XATTR_SIZE(name_len) +
- OCFS2_XATTR_SIZE(le64_to_cpu(xs->here->xe_value_size));
- size_t offs = le16_to_cpu(xs->here->xe_name_offset);
- void *val = xs->base + offs;
+ loc->xl_entry->xe_value_size = cpu_to_le64(xi->xi_value_len);
+ ocfs2_xattr_set_local(loc->xl_entry, xi_local);
- if (ocfs2_xattr_is_local(xs->here) && size == size_l) {
- /* Replace existing local xattr with tree root */
- ret = ocfs2_xattr_set_value_outside(inode, xi, xs,
- ctxt, &vb, offs);
- if (ret < 0)
- mlog_errno(ret);
- goto out;
- } else if (!ocfs2_xattr_is_local(xs->here)) {
- /* For existing xattr which has value outside */
- vb.vb_xv = (struct ocfs2_xattr_value_root *)
- (val + OCFS2_XATTR_SIZE(name_len));
+out:
+ return rc;
+}
- if (xi->value_len > OCFS2_XATTR_INLINE_SIZE) {
- /*
- * If new value need set outside also,
- * first truncate old value to new value,
- * then set new value with set_value_outside().
- */
- ret = ocfs2_xattr_value_truncate(inode,
- &vb,
- xi->value_len,
- ctxt);
- if (ret < 0) {
- mlog_errno(ret);
- goto out;
- }
+/*
+ * Prepares loc->xl_entry to receive the new xattr. This includes
+ * properly setting up the name+value pair region. If loc->xl_entry
+ * already exists, it will take care of modifying it appropriately.
+ *
+ * Note that this modifies the data. You did journal_access already,
+ * right?
+ */
+static int ocfs2_xa_prepare_entry(struct ocfs2_xa_loc *loc,
+ struct ocfs2_xattr_info *xi,
+ u32 name_hash,
+ struct ocfs2_xattr_set_ctxt *ctxt)
+{
+ int rc = 0;
+ unsigned int orig_clusters;
+ __le64 orig_value_size = 0;
- ret = ocfs2_xattr_update_entry(inode,
- handle,
- xi,
- xs,
- &vb,
- offs);
- if (ret < 0) {
- mlog_errno(ret);
- goto out;
- }
+ rc = ocfs2_xa_check_space(loc, xi);
+ if (rc)
+ goto out;
- ret = __ocfs2_xattr_set_value_outside(inode,
- handle,
- &vb,
- xi->value,
- xi->value_len);
- if (ret < 0)
- mlog_errno(ret);
+ if (loc->xl_entry) {
+ if (ocfs2_xa_can_reuse_entry(loc, xi)) {
+ orig_value_size = loc->xl_entry->xe_value_size;
+ rc = ocfs2_xa_reuse_entry(loc, xi, ctxt);
+ if (rc)
+ goto out;
+ goto alloc_value;
+ }
+
+ if (!ocfs2_xattr_is_local(loc->xl_entry)) {
+ orig_clusters = ocfs2_xa_value_clusters(loc);
+ rc = ocfs2_xa_value_truncate(loc, 0, ctxt);
+ if (rc) {
+ mlog_errno(rc);
+ ocfs2_xa_cleanup_value_truncate(loc,
+ "overwriting",
+ orig_clusters);
goto out;
- } else {
- /*
- * If new value need set in local,
- * just trucate old value to zero.
- */
- ret = ocfs2_xattr_value_truncate(inode,
- &vb,
- 0,
- ctxt);
- if (ret < 0)
- mlog_errno(ret);
}
}
+ ocfs2_xa_wipe_namevalue(loc);
+ } else
+ ocfs2_xa_add_entry(loc, name_hash);
+
+ /*
+ * If we get here, we have a blank entry. Fill it. We grow our
+ * name+value pair back from the end.
+ */
+ ocfs2_xa_add_namevalue(loc, xi);
+ if (xi->xi_value_len > OCFS2_XATTR_INLINE_SIZE)
+ ocfs2_xa_install_value_root(loc);
+
+alloc_value:
+ if (xi->xi_value_len > OCFS2_XATTR_INLINE_SIZE) {
+ orig_clusters = ocfs2_xa_value_clusters(loc);
+ rc = ocfs2_xa_value_truncate(loc, xi->xi_value_len, ctxt);
+ if (rc < 0) {
+ /*
+ * If we tried to grow an existing external value,
+ * ocfs2_xa_cleanuP-value_truncate() is going to
+ * let it stand. We have to restore its original
+ * value size.
+ */
+ loc->xl_entry->xe_value_size = orig_value_size;
+ ocfs2_xa_cleanup_value_truncate(loc, "growing",
+ orig_clusters);
+ mlog_errno(rc);
+ }
}
- ret = ocfs2_journal_access_di(handle, INODE_CACHE(inode), xs->inode_bh,
+out:
+ return rc;
+}
+
+/*
+ * Store the value portion of the name+value pair. This will skip
+ * values that are stored externally. Their tree roots were set up
+ * by ocfs2_xa_prepare_entry().
+ */
+static int ocfs2_xa_store_value(struct ocfs2_xa_loc *loc,
+ struct ocfs2_xattr_info *xi,
+ struct ocfs2_xattr_set_ctxt *ctxt)
+{
+ int rc = 0;
+ int nameval_offset = le16_to_cpu(loc->xl_entry->xe_name_offset);
+ int name_size = OCFS2_XATTR_SIZE(xi->xi_name_len);
+ char *nameval_buf;
+ struct ocfs2_xattr_value_buf vb;
+
+ nameval_buf = ocfs2_xa_offset_pointer(loc, nameval_offset);
+ if (xi->xi_value_len > OCFS2_XATTR_INLINE_SIZE) {
+ ocfs2_xa_fill_value_buf(loc, &vb);
+ rc = __ocfs2_xattr_set_value_outside(loc->xl_inode,
+ ctxt->handle, &vb,
+ xi->xi_value,
+ xi->xi_value_len);
+ } else
+ memcpy(nameval_buf + name_size, xi->xi_value, xi->xi_value_len);
+
+ return rc;
+}
+
+static int ocfs2_xa_set(struct ocfs2_xa_loc *loc,
+ struct ocfs2_xattr_info *xi,
+ struct ocfs2_xattr_set_ctxt *ctxt)
+{
+ int ret;
+ u32 name_hash = ocfs2_xattr_name_hash(loc->xl_inode, xi->xi_name,
+ xi->xi_name_len);
+
+ ret = ocfs2_xa_journal_access(ctxt->handle, loc,
OCFS2_JOURNAL_ACCESS_WRITE);
if (ret) {
mlog_errno(ret);
goto out;
}
- if (!(flag & OCFS2_INLINE_XATTR_FL)) {
- ret = vb.vb_access(handle, INODE_CACHE(inode), vb.vb_bh,
- OCFS2_JOURNAL_ACCESS_WRITE);
- if (ret) {
- mlog_errno(ret);
- goto out;
- }
- }
-
/*
- * Set value in local, include set tree root in local.
- * This is the first step for value size >INLINE_SIZE.
+ * From here on out, everything is going to modify the buffer a
+ * little. Errors are going to leave the xattr header in a
+ * sane state. Thus, even with errors we dirty the sucker.
*/
- ocfs2_xattr_set_entry_local(inode, &xi_l, xs, last, min_offs);
- if (!(flag & OCFS2_INLINE_XATTR_FL)) {
- ret = ocfs2_journal_dirty(handle, xs->xattr_bh);
- if (ret < 0) {
- mlog_errno(ret);
- goto out;
- }
+ /* Don't worry, we are never called with !xi_value and !xl_entry */
+ if (!xi->xi_value) {
+ ret = ocfs2_xa_remove(loc, ctxt);
+ goto out_dirty;
}
- if (!(oi->ip_dyn_features & OCFS2_INLINE_XATTR_FL) &&
- (flag & OCFS2_INLINE_XATTR_FL)) {
- struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
- unsigned int xattrsize = osb->s_xattr_inline_size;
-
- /*
- * Adjust extent record count or inline data size
- * to reserve space for extended attribute.
- */
- if (oi->ip_dyn_features & OCFS2_INLINE_DATA_FL) {
- struct ocfs2_inline_data *idata = &di->id2.i_data;
- le16_add_cpu(&idata->id_count, -xattrsize);
- } else if (!(ocfs2_inode_is_fast_symlink(inode))) {
- struct ocfs2_extent_list *el = &di->id2.i_list;
- le16_add_cpu(&el->l_count, -(xattrsize /
- sizeof(struct ocfs2_extent_rec)));
- }
- di->i_xattr_inline_size = cpu_to_le16(xattrsize);
+ ret = ocfs2_xa_prepare_entry(loc, xi, name_hash, ctxt);
+ if (ret) {
+ if (ret != -ENOSPC)
+ mlog_errno(ret);
+ goto out_dirty;
}
- /* Update xattr flag */
- spin_lock(&oi->ip_lock);
- oi->ip_dyn_features |= flag;
- di->i_dyn_features = cpu_to_le16(oi->ip_dyn_features);
- spin_unlock(&oi->ip_lock);
- ret = ocfs2_journal_dirty(handle, xs->inode_bh);
- if (ret < 0)
+ ret = ocfs2_xa_store_value(loc, xi, ctxt);
+ if (ret)
mlog_errno(ret);
- if (!ret && xi->value_len > OCFS2_XATTR_INLINE_SIZE) {
- /*
- * Set value outside in B tree.
- * This is the second step for value size > INLINE_SIZE.
- */
- size_t offs = le16_to_cpu(xs->here->xe_name_offset);
- ret = ocfs2_xattr_set_value_outside(inode, xi, xs, ctxt,
- &vb, offs);
- if (ret < 0) {
- int ret2;
+out_dirty:
+ ocfs2_xa_journal_dirty(ctxt->handle, loc);
- mlog_errno(ret);
- /*
- * If set value outside failed, we have to clean
- * the junk tree root we have already set in local.
- */
- ret2 = ocfs2_xattr_cleanup(inode, ctxt->handle,
- xi, xs, &vb, offs);
- if (ret2 < 0)
- mlog_errno(ret2);
- }
- }
out:
return ret;
}
+static void ocfs2_init_dinode_xa_loc(struct ocfs2_xa_loc *loc,
+ struct inode *inode,
+ struct buffer_head *bh,
+ struct ocfs2_xattr_entry *entry)
+{
+ struct ocfs2_dinode *di = (struct ocfs2_dinode *)bh->b_data;
+
+ BUG_ON(!(OCFS2_I(inode)->ip_dyn_features & OCFS2_INLINE_XATTR_FL));
+
+ loc->xl_inode = inode;
+ loc->xl_ops = &ocfs2_xa_block_loc_ops;
+ loc->xl_storage = bh;
+ loc->xl_entry = entry;
+ loc->xl_size = le16_to_cpu(di->i_xattr_inline_size);
+ loc->xl_header =
+ (struct ocfs2_xattr_header *)(bh->b_data + bh->b_size -
+ loc->xl_size);
+}
+
+static void ocfs2_init_xattr_block_xa_loc(struct ocfs2_xa_loc *loc,
+ struct inode *inode,
+ struct buffer_head *bh,
+ struct ocfs2_xattr_entry *entry)
+{
+ struct ocfs2_xattr_block *xb =
+ (struct ocfs2_xattr_block *)bh->b_data;
+
+ BUG_ON(le16_to_cpu(xb->xb_flags) & OCFS2_XATTR_INDEXED);
+
+ loc->xl_inode = inode;
+ loc->xl_ops = &ocfs2_xa_block_loc_ops;
+ loc->xl_storage = bh;
+ loc->xl_header = &(xb->xb_attrs.xb_header);
+ loc->xl_entry = entry;
+ loc->xl_size = bh->b_size - offsetof(struct ocfs2_xattr_block,
+ xb_attrs.xb_header);
+}
+
+static void ocfs2_init_xattr_bucket_xa_loc(struct ocfs2_xa_loc *loc,
+ struct ocfs2_xattr_bucket *bucket,
+ struct ocfs2_xattr_entry *entry)
+{
+ loc->xl_inode = bucket->bu_inode;
+ loc->xl_ops = &ocfs2_xa_bucket_loc_ops;
+ loc->xl_storage = bucket;
+ loc->xl_header = bucket_xh(bucket);
+ loc->xl_entry = entry;
+ loc->xl_size = OCFS2_XATTR_BUCKET_SIZE;
+}
+
/*
* In xattr remove, if it is stored outside and refcounted, we may have
* the chance to split the refcount tree. So need the allocators.
@@ -2149,6 +2683,55 @@ static int ocfs2_xattr_ibody_find(struct inode *inode,
return 0;
}
+static int ocfs2_xattr_ibody_init(struct inode *inode,
+ struct buffer_head *di_bh,
+ struct ocfs2_xattr_set_ctxt *ctxt)
+{
+ int ret;
+ struct ocfs2_inode_info *oi = OCFS2_I(inode);
+ struct ocfs2_dinode *di = (struct ocfs2_dinode *)di_bh->b_data;
+ struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
+ unsigned int xattrsize = osb->s_xattr_inline_size;
+
+ if (!ocfs2_xattr_has_space_inline(inode, di)) {
+ ret = -ENOSPC;
+ goto out;
+ }
+
+ ret = ocfs2_journal_access_di(ctxt->handle, INODE_CACHE(inode), di_bh,
+ OCFS2_JOURNAL_ACCESS_WRITE);
+ if (ret) {
+ mlog_errno(ret);
+ goto out;
+ }
+
+ /*
+ * Adjust extent record count or inline data size
+ * to reserve space for extended attribute.
+ */
+ if (oi->ip_dyn_features & OCFS2_INLINE_DATA_FL) {
+ struct ocfs2_inline_data *idata = &di->id2.i_data;
+ le16_add_cpu(&idata->id_count, -xattrsize);
+ } else if (!(ocfs2_inode_is_fast_symlink(inode))) {
+ struct ocfs2_extent_list *el = &di->id2.i_list;
+ le16_add_cpu(&el->l_count, -(xattrsize /
+ sizeof(struct ocfs2_extent_rec)));
+ }
+ di->i_xattr_inline_size = cpu_to_le16(xattrsize);
+
+ spin_lock(&oi->ip_lock);
+ oi->ip_dyn_features |= OCFS2_INLINE_XATTR_FL|OCFS2_HAS_XATTR_FL;
+ di->i_dyn_features = cpu_to_le16(oi->ip_dyn_features);
+ spin_unlock(&oi->ip_lock);
+
+ ret = ocfs2_journal_dirty(ctxt->handle, di_bh);
+ if (ret < 0)
+ mlog_errno(ret);
+
+out:
+ return ret;
+}
+
/*
* ocfs2_xattr_ibody_set()
*
@@ -2160,9 +2743,10 @@ static int ocfs2_xattr_ibody_set(struct inode *inode,
struct ocfs2_xattr_search *xs,
struct ocfs2_xattr_set_ctxt *ctxt)
{
+ int ret;
struct ocfs2_inode_info *oi = OCFS2_I(inode);
struct ocfs2_dinode *di = (struct ocfs2_dinode *)xs->inode_bh->b_data;
- int ret;
+ struct ocfs2_xa_loc loc;
if (inode->i_sb->s_blocksize == OCFS2_MIN_BLOCKSIZE)
return -ENOSPC;
@@ -2175,8 +2759,25 @@ static int ocfs2_xattr_ibody_set(struct inode *inode,
}
}
- ret = ocfs2_xattr_set_entry(inode, xi, xs, ctxt,
- (OCFS2_INLINE_XATTR_FL | OCFS2_HAS_XATTR_FL));
+ if (!(oi->ip_dyn_features & OCFS2_INLINE_XATTR_FL)) {
+ ret = ocfs2_xattr_ibody_init(inode, xs->inode_bh, ctxt);
+ if (ret) {
+ if (ret != -ENOSPC)
+ mlog_errno(ret);
+ goto out;
+ }
+ }
+
+ ocfs2_init_dinode_xa_loc(&loc, inode, xs->inode_bh,
+ xs->not_found ? NULL : xs->here);
+ ret = ocfs2_xa_set(&loc, xi, ctxt);
+ if (ret) {
+ if (ret != -ENOSPC)
+ mlog_errno(ret);
+ goto out;
+ }
+ xs->here = loc.xl_entry;
+
out:
up_write(&oi->ip_alloc_sem);
@@ -2236,12 +2837,11 @@ cleanup:
return ret;
}
-static int ocfs2_create_xattr_block(handle_t *handle,
- struct inode *inode,
+static int ocfs2_create_xattr_block(struct inode *inode,
struct buffer_head *inode_bh,
- struct ocfs2_alloc_context *meta_ac,
- struct buffer_head **ret_bh,
- int indexed)
+ struct ocfs2_xattr_set_ctxt *ctxt,
+ int indexed,
+ struct buffer_head **ret_bh)
{
int ret;
u16 suballoc_bit_start;
@@ -2252,14 +2852,14 @@ static int ocfs2_create_xattr_block(handle_t *handle,
struct buffer_head *new_bh = NULL;
struct ocfs2_xattr_block *xblk;
- ret = ocfs2_journal_access_di(handle, INODE_CACHE(inode), inode_bh,
- OCFS2_JOURNAL_ACCESS_CREATE);
+ ret = ocfs2_journal_access_di(ctxt->handle, INODE_CACHE(inode),
+ inode_bh, OCFS2_JOURNAL_ACCESS_CREATE);
if (ret < 0) {
mlog_errno(ret);
goto end;
}
- ret = ocfs2_claim_metadata(osb, handle, meta_ac, 1,
+ ret = ocfs2_claim_metadata(osb, ctxt->handle, ctxt->meta_ac, 1,
&suballoc_bit_start, &num_got,
&first_blkno);
if (ret < 0) {
@@ -2270,7 +2870,7 @@ static int ocfs2_create_xattr_block(handle_t *handle,
new_bh = sb_getblk(inode->i_sb, first_blkno);
ocfs2_set_new_buffer_uptodate(INODE_CACHE(inode), new_bh);
- ret = ocfs2_journal_access_xb(handle, INODE_CACHE(inode),
+ ret = ocfs2_journal_access_xb(ctxt->handle, INODE_CACHE(inode),
new_bh,
OCFS2_JOURNAL_ACCESS_CREATE);
if (ret < 0) {
@@ -2282,11 +2882,10 @@ static int ocfs2_create_xattr_block(handle_t *handle,
xblk = (struct ocfs2_xattr_block *)new_bh->b_data;
memset(xblk, 0, inode->i_sb->s_blocksize);
strcpy((void *)xblk, OCFS2_XATTR_BLOCK_SIGNATURE);
- xblk->xb_suballoc_slot = cpu_to_le16(osb->slot_num);
+ xblk->xb_suballoc_slot = cpu_to_le16(ctxt->meta_ac->ac_alloc_slot);
xblk->xb_suballoc_bit = cpu_to_le16(suballoc_bit_start);
xblk->xb_fs_generation = cpu_to_le32(osb->fs_generation);
xblk->xb_blkno = cpu_to_le64(first_blkno);
-
if (indexed) {
struct ocfs2_xattr_tree_root *xr = &xblk->xb_attrs.xb_root;
xr->xt_clusters = cpu_to_le32(1);
@@ -2297,14 +2896,17 @@ static int ocfs2_create_xattr_block(handle_t *handle,
xr->xt_list.l_next_free_rec = cpu_to_le16(1);
xblk->xb_flags = cpu_to_le16(OCFS2_XATTR_INDEXED);
}
+ ocfs2_journal_dirty(ctxt->handle, new_bh);
- ret = ocfs2_journal_dirty(handle, new_bh);
- if (ret < 0) {
- mlog_errno(ret);
- goto end;
- }
+ /* Add it to the inode */
di->i_xattr_loc = cpu_to_le64(first_blkno);
- ocfs2_journal_dirty(handle, inode_bh);
+
+ spin_lock(&OCFS2_I(inode)->ip_lock);
+ OCFS2_I(inode)->ip_dyn_features |= OCFS2_HAS_XATTR_FL;
+ di->i_dyn_features = cpu_to_le16(OCFS2_I(inode)->ip_dyn_features);
+ spin_unlock(&OCFS2_I(inode)->ip_lock);
+
+ ocfs2_journal_dirty(ctxt->handle, inode_bh);
*ret_bh = new_bh;
new_bh = NULL;
@@ -2326,13 +2928,13 @@ static int ocfs2_xattr_block_set(struct inode *inode,
struct ocfs2_xattr_set_ctxt *ctxt)
{
struct buffer_head *new_bh = NULL;
- handle_t *handle = ctxt->handle;
struct ocfs2_xattr_block *xblk = NULL;
int ret;
+ struct ocfs2_xa_loc loc;
if (!xs->xattr_bh) {
- ret = ocfs2_create_xattr_block(handle, inode, xs->inode_bh,
- ctxt->meta_ac, &new_bh, 0);
+ ret = ocfs2_create_xattr_block(inode, xs->inode_bh, ctxt,
+ 0, &new_bh);
if (ret) {
mlog_errno(ret);
goto end;
@@ -2348,21 +2950,25 @@ static int ocfs2_xattr_block_set(struct inode *inode,
xblk = (struct ocfs2_xattr_block *)xs->xattr_bh->b_data;
if (!(le16_to_cpu(xblk->xb_flags) & OCFS2_XATTR_INDEXED)) {
- /* Set extended attribute into external block */
- ret = ocfs2_xattr_set_entry(inode, xi, xs, ctxt,
- OCFS2_HAS_XATTR_FL);
- if (!ret || ret != -ENOSPC)
- goto end;
+ ocfs2_init_xattr_block_xa_loc(&loc, inode, xs->xattr_bh,
+ xs->not_found ? NULL : xs->here);
- ret = ocfs2_xattr_create_index_block(inode, xs, ctxt);
- if (ret)
+ ret = ocfs2_xa_set(&loc, xi, ctxt);
+ if (!ret)
+ xs->here = loc.xl_entry;
+ else if (ret != -ENOSPC)
goto end;
+ else {
+ ret = ocfs2_xattr_create_index_block(inode, xs, ctxt);
+ if (ret)
+ goto end;
+ }
}
- ret = ocfs2_xattr_set_entry_index_block(inode, xi, xs, ctxt);
+ if (le16_to_cpu(xblk->xb_flags) & OCFS2_XATTR_INDEXED)
+ ret = ocfs2_xattr_set_entry_index_block(inode, xi, xs, ctxt);
end:
-
return ret;
}
@@ -2371,7 +2977,6 @@ static int ocfs2_xattr_can_be_in_inode(struct inode *inode,
struct ocfs2_xattr_info *xi,
struct ocfs2_xattr_search *xs)
{
- u64 value_size;
struct ocfs2_xattr_entry *last;
int free, i;
size_t min_offs = xs->end - xs->base;
@@ -2394,13 +2999,7 @@ static int ocfs2_xattr_can_be_in_inode(struct inode *inode,
BUG_ON(!xs->not_found);
- if (xi->value_len > OCFS2_XATTR_INLINE_SIZE)
- value_size = OCFS2_XATTR_ROOT_SIZE;
- else
- value_size = OCFS2_XATTR_SIZE(xi->value_len);
-
- if (free >= sizeof(struct ocfs2_xattr_entry) +
- OCFS2_XATTR_SIZE(strlen(xi->name)) + value_size)
+ if (free >= (sizeof(struct ocfs2_xattr_entry) + namevalue_size_xi(xi)))
return 1;
return 0;
@@ -2424,7 +3023,7 @@ static int ocfs2_calc_xattr_set_need(struct inode *inode,
char *base = NULL;
int name_offset, name_len = 0;
u32 new_clusters = ocfs2_clusters_for_bytes(inode->i_sb,
- xi->value_len);
+ xi->xi_value_len);
u64 value_size;
/*
@@ -2432,14 +3031,14 @@ static int ocfs2_calc_xattr_set_need(struct inode *inode,
* No matter whether we replace an old one or add a new one,
* we need this for writing.
*/
- if (xi->value_len > OCFS2_XATTR_INLINE_SIZE)
+ if (xi->xi_value_len > OCFS2_XATTR_INLINE_SIZE)
credits += new_clusters *
ocfs2_clusters_to_blocks(inode->i_sb, 1);
if (xis->not_found && xbs->not_found) {
credits += ocfs2_blocks_per_xattr_bucket(inode->i_sb);
- if (xi->value_len > OCFS2_XATTR_INLINE_SIZE) {
+ if (xi->xi_value_len > OCFS2_XATTR_INLINE_SIZE) {
clusters_add += new_clusters;
credits += ocfs2_calc_extend_credits(inode->i_sb,
&def_xv.xv.xr_list,
@@ -2484,7 +3083,7 @@ static int ocfs2_calc_xattr_set_need(struct inode *inode,
* The credits for removing the value tree will be extended
* by ocfs2_remove_extent itself.
*/
- if (!xi->value) {
+ if (!xi->xi_value) {
if (!ocfs2_xattr_is_local(xe))
credits += ocfs2_remove_extent_credits(inode->i_sb);
@@ -2514,7 +3113,7 @@ static int ocfs2_calc_xattr_set_need(struct inode *inode,
}
}
- if (xi->value_len > OCFS2_XATTR_INLINE_SIZE) {
+ if (xi->xi_value_len > OCFS2_XATTR_INLINE_SIZE) {
/* the new values will be stored outside. */
u32 old_clusters = 0;
@@ -2547,9 +3146,10 @@ static int ocfs2_calc_xattr_set_need(struct inode *inode,
* value, we don't need any allocation, otherwise we have
* to guess metadata allocation.
*/
- if ((ocfs2_xattr_is_local(xe) && value_size >= xi->value_len) ||
+ if ((ocfs2_xattr_is_local(xe) &&
+ (value_size >= xi->xi_value_len)) ||
(!ocfs2_xattr_is_local(xe) &&
- OCFS2_XATTR_ROOT_SIZE >= xi->value_len))
+ OCFS2_XATTR_ROOT_SIZE >= xi->xi_value_len))
goto out;
}
@@ -2639,7 +3239,7 @@ static int ocfs2_init_xattr_set_ctxt(struct inode *inode,
meta_add += extra_meta;
mlog(0, "Set xattr %s, reserve meta blocks = %d, clusters = %d, "
- "credits = %d\n", xi->name, meta_add, clusters_add, *credits);
+ "credits = %d\n", xi->xi_name, meta_add, clusters_add, *credits);
if (meta_add) {
ret = ocfs2_reserve_new_metadata_blocks(osb, meta_add,
@@ -2679,7 +3279,7 @@ static int __ocfs2_xattr_set_handle(struct inode *inode,
{
int ret = 0, credits, old_found;
- if (!xi->value) {
+ if (!xi->xi_value) {
/* Remove existing extended attribute */
if (!xis->not_found)
ret = ocfs2_xattr_ibody_set(inode, xi, xis, ctxt);
@@ -2693,8 +3293,8 @@ static int __ocfs2_xattr_set_handle(struct inode *inode,
* If succeed and that extended attribute existing in
* external block, then we will remove it.
*/
- xi->value = NULL;
- xi->value_len = 0;
+ xi->xi_value = NULL;
+ xi->xi_value_len = 0;
old_found = xis->not_found;
xis->not_found = -ENODATA;
@@ -2722,8 +3322,8 @@ static int __ocfs2_xattr_set_handle(struct inode *inode,
} else if (ret == -ENOSPC) {
if (di->i_xattr_loc && !xbs->xattr_bh) {
ret = ocfs2_xattr_block_find(inode,
- xi->name_index,
- xi->name, xbs);
+ xi->xi_name_index,
+ xi->xi_name, xbs);
if (ret)
goto out;
@@ -2762,8 +3362,8 @@ static int __ocfs2_xattr_set_handle(struct inode *inode,
* If succeed and that extended attribute
* existing in inode, we will remove it.
*/
- xi->value = NULL;
- xi->value_len = 0;
+ xi->xi_value = NULL;
+ xi->xi_value_len = 0;
xbs->not_found = -ENODATA;
ret = ocfs2_calc_xattr_set_need(inode,
di,
@@ -2829,10 +3429,11 @@ int ocfs2_xattr_set_handle(handle_t *handle,
int ret;
struct ocfs2_xattr_info xi = {
- .name_index = name_index,
- .name = name,
- .value = value,
- .value_len = value_len,
+ .xi_name_index = name_index,
+ .xi_name = name,
+ .xi_name_len = strlen(name),
+ .xi_value = value,
+ .xi_value_len = value_len,
};
struct ocfs2_xattr_search xis = {
@@ -2912,10 +3513,11 @@ int ocfs2_xattr_set(struct inode *inode,
struct ocfs2_refcount_tree *ref_tree = NULL;
struct ocfs2_xattr_info xi = {
- .name_index = name_index,
- .name = name,
- .value = value,
- .value_len = value_len,
+ .xi_name_index = name_index,
+ .xi_name = name,
+ .xi_name_len = strlen(name),
+ .xi_value = value,
+ .xi_value_len = value_len,
};
struct ocfs2_xattr_search xis = {
@@ -3759,7 +4361,7 @@ static int ocfs2_defrag_xattr_bucket(struct inode *inode,
struct ocfs2_xattr_bucket *bucket)
{
int ret, i;
- size_t end, offset, len, value_len;
+ size_t end, offset, len;
struct ocfs2_xattr_header *xh;
char *entries, *buf, *bucket_buf = NULL;
u64 blkno = bucket_blkno(bucket);
@@ -3813,12 +4415,7 @@ static int ocfs2_defrag_xattr_bucket(struct inode *inode,
end = OCFS2_XATTR_BUCKET_SIZE;
for (i = 0; i < le16_to_cpu(xh->xh_count); i++, xe++) {
offset = le16_to_cpu(xe->xe_name_offset);
- if (ocfs2_xattr_is_local(xe))
- value_len = OCFS2_XATTR_SIZE(
- le64_to_cpu(xe->xe_value_size));
- else
- value_len = OCFS2_XATTR_ROOT_SIZE;
- len = OCFS2_XATTR_SIZE(xe->xe_name_len) + value_len;
+ len = namevalue_size_xe(xe);
/*
* We must make sure that the name/value pair
@@ -4007,7 +4604,7 @@ static int ocfs2_divide_xattr_bucket(struct inode *inode,
int new_bucket_head)
{
int ret, i;
- int count, start, len, name_value_len = 0, xe_len, name_offset = 0;
+ int count, start, len, name_value_len = 0, name_offset = 0;
struct ocfs2_xattr_bucket *s_bucket = NULL, *t_bucket = NULL;
struct ocfs2_xattr_header *xh;
struct ocfs2_xattr_entry *xe;
@@ -4098,13 +4695,7 @@ static int ocfs2_divide_xattr_bucket(struct inode *inode,
name_value_len = 0;
for (i = 0; i < start; i++) {
xe = &xh->xh_entries[i];
- xe_len = OCFS2_XATTR_SIZE(xe->xe_name_len);
- if (ocfs2_xattr_is_local(xe))
- xe_len +=
- OCFS2_XATTR_SIZE(le64_to_cpu(xe->xe_value_size));
- else
- xe_len += OCFS2_XATTR_ROOT_SIZE;
- name_value_len += xe_len;
+ name_value_len += namevalue_size_xe(xe);
if (le16_to_cpu(xe->xe_name_offset) < name_offset)
name_offset = le16_to_cpu(xe->xe_name_offset);
}
@@ -4134,12 +4725,6 @@ static int ocfs2_divide_xattr_bucket(struct inode *inode,
xh->xh_free_start = cpu_to_le16(OCFS2_XATTR_BUCKET_SIZE);
for (i = 0; i < le16_to_cpu(xh->xh_count); i++) {
xe = &xh->xh_entries[i];
- xe_len = OCFS2_XATTR_SIZE(xe->xe_name_len);
- if (ocfs2_xattr_is_local(xe))
- xe_len +=
- OCFS2_XATTR_SIZE(le64_to_cpu(xe->xe_value_size));
- else
- xe_len += OCFS2_XATTR_ROOT_SIZE;
if (le16_to_cpu(xe->xe_name_offset) <
le16_to_cpu(xh->xh_free_start))
xh->xh_free_start = xe->xe_name_offset;
@@ -4751,195 +5336,6 @@ static inline char *ocfs2_xattr_bucket_get_val(struct inode *inode,
}
/*
- * Handle the normal xattr set, including replace, delete and new.
- *
- * Note: "local" indicates the real data's locality. So we can't
- * just its bucket locality by its length.
- */
-static void ocfs2_xattr_set_entry_normal(struct inode *inode,
- struct ocfs2_xattr_info *xi,
- struct ocfs2_xattr_search *xs,
- u32 name_hash,
- int local)
-{
- struct ocfs2_xattr_entry *last, *xe;
- int name_len = strlen(xi->name);
- struct ocfs2_xattr_header *xh = xs->header;
- u16 count = le16_to_cpu(xh->xh_count), start;
- size_t blocksize = inode->i_sb->s_blocksize;
- char *val;
- size_t offs, size, new_size;
-
- last = &xh->xh_entries[count];
- if (!xs->not_found) {
- xe = xs->here;
- offs = le16_to_cpu(xe->xe_name_offset);
- if (ocfs2_xattr_is_local(xe))
- size = OCFS2_XATTR_SIZE(name_len) +
- OCFS2_XATTR_SIZE(le64_to_cpu(xe->xe_value_size));
- else
- size = OCFS2_XATTR_SIZE(name_len) +
- OCFS2_XATTR_SIZE(OCFS2_XATTR_ROOT_SIZE);
-
- /*
- * If the new value will be stored outside, xi->value has been
- * initalized as an empty ocfs2_xattr_value_root, and the same
- * goes with xi->value_len, so we can set new_size safely here.
- * See ocfs2_xattr_set_in_bucket.
- */
- new_size = OCFS2_XATTR_SIZE(name_len) +
- OCFS2_XATTR_SIZE(xi->value_len);
-
- le16_add_cpu(&xh->xh_name_value_len, -size);
- if (xi->value) {
- if (new_size > size)
- goto set_new_name_value;
-
- /* Now replace the old value with new one. */
- if (local)
- xe->xe_value_size = cpu_to_le64(xi->value_len);
- else
- xe->xe_value_size = 0;
-
- val = ocfs2_xattr_bucket_get_val(inode,
- xs->bucket, offs);
- memset(val + OCFS2_XATTR_SIZE(name_len), 0,
- size - OCFS2_XATTR_SIZE(name_len));
- if (OCFS2_XATTR_SIZE(xi->value_len) > 0)
- memcpy(val + OCFS2_XATTR_SIZE(name_len),
- xi->value, xi->value_len);
-
- le16_add_cpu(&xh->xh_name_value_len, new_size);
- ocfs2_xattr_set_local(xe, local);
- return;
- } else {
- /*
- * Remove the old entry if there is more than one.
- * We don't remove the last entry so that we can
- * use it to indicate the hash value of the empty
- * bucket.
- */
- last -= 1;
- le16_add_cpu(&xh->xh_count, -1);
- if (xh->xh_count) {
- memmove(xe, xe + 1,
- (void *)last - (void *)xe);
- memset(last, 0,
- sizeof(struct ocfs2_xattr_entry));
- } else
- xh->xh_free_start =
- cpu_to_le16(OCFS2_XATTR_BUCKET_SIZE);
-
- return;
- }
- } else {
- /* find a new entry for insert. */
- int low = 0, high = count - 1, tmp;
- struct ocfs2_xattr_entry *tmp_xe;
-
- while (low <= high && count) {
- tmp = (low + high) / 2;
- tmp_xe = &xh->xh_entries[tmp];
-
- if (name_hash > le32_to_cpu(tmp_xe->xe_name_hash))
- low = tmp + 1;
- else if (name_hash <
- le32_to_cpu(tmp_xe->xe_name_hash))
- high = tmp - 1;
- else {
- low = tmp;
- break;
- }
- }
-
- xe = &xh->xh_entries[low];
- if (low != count)
- memmove(xe + 1, xe, (void *)last - (void *)xe);
-
- le16_add_cpu(&xh->xh_count, 1);
- memset(xe, 0, sizeof(struct ocfs2_xattr_entry));
- xe->xe_name_hash = cpu_to_le32(name_hash);
- xe->xe_name_len = name_len;
- ocfs2_xattr_set_type(xe, xi->name_index);
- }
-
-set_new_name_value:
- /* Insert the new name+value. */
- size = OCFS2_XATTR_SIZE(name_len) + OCFS2_XATTR_SIZE(xi->value_len);
-
- /*
- * We must make sure that the name/value pair
- * exists in the same block.
- */
- offs = le16_to_cpu(xh->xh_free_start);
- start = offs - size;
-
- if (start >> inode->i_sb->s_blocksize_bits !=
- (offs - 1) >> inode->i_sb->s_blocksize_bits) {
- offs = offs - offs % blocksize;
- xh->xh_free_start = cpu_to_le16(offs);
- }
-
- val = ocfs2_xattr_bucket_get_val(inode, xs->bucket, offs - size);
- xe->xe_name_offset = cpu_to_le16(offs - size);
-
- memset(val, 0, size);
- memcpy(val, xi->name, name_len);
- memcpy(val + OCFS2_XATTR_SIZE(name_len), xi->value, xi->value_len);
-
- xe->xe_value_size = cpu_to_le64(xi->value_len);
- ocfs2_xattr_set_local(xe, local);
- xs->here = xe;
- le16_add_cpu(&xh->xh_free_start, -size);
- le16_add_cpu(&xh->xh_name_value_len, size);
-
- return;
-}
-
-/*
- * Set the xattr entry in the specified bucket.
- * The bucket is indicated by xs->bucket and it should have the enough
- * space for the xattr insertion.
- */
-static int ocfs2_xattr_set_entry_in_bucket(struct inode *inode,
- handle_t *handle,
- struct ocfs2_xattr_info *xi,
- struct ocfs2_xattr_search *xs,
- u32 name_hash,
- int local)
-{
- int ret;
- u64 blkno;
-
- mlog(0, "Set xattr entry len = %lu index = %d in bucket %llu\n",
- (unsigned long)xi->value_len, xi->name_index,
- (unsigned long long)bucket_blkno(xs->bucket));
-
- if (!xs->bucket->bu_bhs[1]) {
- blkno = bucket_blkno(xs->bucket);
- ocfs2_xattr_bucket_relse(xs->bucket);
- ret = ocfs2_read_xattr_bucket(xs->bucket, blkno);
- if (ret) {
- mlog_errno(ret);
- goto out;
- }
- }
-
- ret = ocfs2_xattr_bucket_journal_access(handle, xs->bucket,
- OCFS2_JOURNAL_ACCESS_WRITE);
- if (ret < 0) {
- mlog_errno(ret);
- goto out;
- }
-
- ocfs2_xattr_set_entry_normal(inode, xi, xs, name_hash, local);
- ocfs2_xattr_bucket_journal_dirty(handle, xs->bucket);
-
-out:
- return ret;
-}
-
-/*
* Truncate the specified xe_off entry in xattr bucket.
* bucket is indicated by header_bh and len is the new length.
* Both the ocfs2_xattr_value_root and the entry will be updated here.
@@ -5009,66 +5405,6 @@ out:
return ret;
}
-static int ocfs2_xattr_bucket_value_truncate_xs(struct inode *inode,
- struct ocfs2_xattr_search *xs,
- int len,
- struct ocfs2_xattr_set_ctxt *ctxt)
-{
- int ret, offset;
- struct ocfs2_xattr_entry *xe = xs->here;
- struct ocfs2_xattr_header *xh = (struct ocfs2_xattr_header *)xs->base;
-
- BUG_ON(!xs->bucket->bu_bhs[0] || !xe || ocfs2_xattr_is_local(xe));
-
- offset = xe - xh->xh_entries;
- ret = ocfs2_xattr_bucket_value_truncate(inode, xs->bucket,
- offset, len, ctxt);
- if (ret)
- mlog_errno(ret);
-
- return ret;
-}
-
-static int ocfs2_xattr_bucket_set_value_outside(struct inode *inode,
- handle_t *handle,
- struct ocfs2_xattr_search *xs,
- char *val,
- int value_len)
-{
- int ret, offset, block_off;
- struct ocfs2_xattr_value_root *xv;
- struct ocfs2_xattr_entry *xe = xs->here;
- struct ocfs2_xattr_header *xh = bucket_xh(xs->bucket);
- void *base;
- struct ocfs2_xattr_value_buf vb = {
- .vb_access = ocfs2_journal_access,
- };
-
- BUG_ON(!xs->base || !xe || ocfs2_xattr_is_local(xe));
-
- ret = ocfs2_xattr_bucket_get_name_value(inode->i_sb, xh,
- xe - xh->xh_entries,
- &block_off,
- &offset);
- if (ret) {
- mlog_errno(ret);
- goto out;
- }
-
- base = bucket_block(xs->bucket, block_off);
- xv = (struct ocfs2_xattr_value_root *)(base + offset +
- OCFS2_XATTR_SIZE(xe->xe_name_len));
-
- vb.vb_xv = xv;
- vb.vb_bh = xs->bucket->bu_bhs[block_off];
- ret = __ocfs2_xattr_set_value_outside(inode, handle,
- &vb, val, value_len);
- if (ret)
- mlog_errno(ret);
-out:
- return ret;
-}
-
static int ocfs2_rm_xattr_cluster(struct inode *inode,
struct buffer_head *root_bh,
u64 blkno,
@@ -5167,128 +5503,6 @@ out:
return ret;
}
-static void ocfs2_xattr_bucket_remove_xs(struct inode *inode,
- handle_t *handle,
- struct ocfs2_xattr_search *xs)
-{
- struct ocfs2_xattr_header *xh = bucket_xh(xs->bucket);
- struct ocfs2_xattr_entry *last = &xh->xh_entries[
- le16_to_cpu(xh->xh_count) - 1];
- int ret = 0;
-
- ret = ocfs2_xattr_bucket_journal_access(handle, xs->bucket,
- OCFS2_JOURNAL_ACCESS_WRITE);
- if (ret) {
- mlog_errno(ret);
- return;
- }
-
- /* Remove the old entry. */
- memmove(xs->here, xs->here + 1,
- (void *)last - (void *)xs->here);
- memset(last, 0, sizeof(struct ocfs2_xattr_entry));
- le16_add_cpu(&xh->xh_count, -1);
-
- ocfs2_xattr_bucket_journal_dirty(handle, xs->bucket);
-}
-
-/*
- * Set the xattr name/value in the bucket specified in xs.
- *
- * As the new value in xi may be stored in the bucket or in an outside cluster,
- * we divide the whole process into 3 steps:
- * 1. insert name/value in the bucket(ocfs2_xattr_set_entry_in_bucket)
- * 2. truncate of the outside cluster(ocfs2_xattr_bucket_value_truncate_xs)
- * 3. Set the value to the outside cluster(ocfs2_xattr_bucket_set_value_outside)
- * 4. If the clusters for the new outside value can't be allocated, we need
- * to free the xattr we allocated in set.
- */
-static int ocfs2_xattr_set_in_bucket(struct inode *inode,
- struct ocfs2_xattr_info *xi,
- struct ocfs2_xattr_search *xs,
- struct ocfs2_xattr_set_ctxt *ctxt)
-{
- int ret, local = 1;
- size_t value_len;
- char *val = (char *)xi->value;
- struct ocfs2_xattr_entry *xe = xs->here;
- u32 name_hash = ocfs2_xattr_name_hash(inode, xi->name,
- strlen(xi->name));
-
- if (!xs->not_found && !ocfs2_xattr_is_local(xe)) {
- /*
- * We need to truncate the xattr storage first.
- *
- * If both the old and new value are stored to
- * outside block, we only need to truncate
- * the storage and then set the value outside.
- *
- * If the new value should be stored within block,
- * we should free all the outside block first and
- * the modification to the xattr block will be done
- * by following steps.
- */
- if (xi->value_len > OCFS2_XATTR_INLINE_SIZE)
- value_len = xi->value_len;
- else
- value_len = 0;
-
- ret = ocfs2_xattr_bucket_value_truncate_xs(inode, xs,
- value_len,
- ctxt);
- if (ret)
- goto out;
-
- if (value_len)
- goto set_value_outside;
- }
-
- value_len = xi->value_len;
- /* So we have to handle the inside block change now. */
- if (value_len > OCFS2_XATTR_INLINE_SIZE) {
- /*
- * If the new value will be stored outside of block,
- * initalize a new empty value root and insert it first.
- */
- local = 0;
- xi->value = &def_xv;
- xi->value_len = OCFS2_XATTR_ROOT_SIZE;
- }
-
- ret = ocfs2_xattr_set_entry_in_bucket(inode, ctxt->handle, xi, xs,
- name_hash, local);
- if (ret) {
- mlog_errno(ret);
- goto out;
- }
-
- if (value_len <= OCFS2_XATTR_INLINE_SIZE)
- goto out;
-
- /* allocate the space now for the outside block storage. */
- ret = ocfs2_xattr_bucket_value_truncate_xs(inode, xs,
- value_len, ctxt);
- if (ret) {
- mlog_errno(ret);
-
- if (xs->not_found) {
- /*
- * We can't allocate enough clusters for outside
- * storage and we have allocated xattr already,
- * so need to remove it.
- */
- ocfs2_xattr_bucket_remove_xs(inode, ctxt->handle, xs);
- }
- goto out;
- }
-
-set_value_outside:
- ret = ocfs2_xattr_bucket_set_value_outside(inode, ctxt->handle,
- xs, val, value_len);
-out:
- return ret;
-}
-
/*
* check whether the xattr bucket is filled up with the same hash value.
* If we want to insert the xattr with the same hash, return -ENOSPC.
@@ -5317,156 +5531,116 @@ static int ocfs2_check_xattr_bucket_collision(struct inode *inode,
return 0;
}
-static int ocfs2_xattr_set_entry_index_block(struct inode *inode,
- struct ocfs2_xattr_info *xi,
- struct ocfs2_xattr_search *xs,
- struct ocfs2_xattr_set_ctxt *ctxt)
+/*
+ * Try to set the entry in the current bucket. If we fail, the caller
+ * will handle getting us another bucket.
+ */
+static int ocfs2_xattr_set_entry_bucket(struct inode *inode,
+ struct ocfs2_xattr_info *xi,
+ struct ocfs2_xattr_search *xs,
+ struct ocfs2_xattr_set_ctxt *ctxt)
{
- struct ocfs2_xattr_header *xh;
- struct ocfs2_xattr_entry *xe;
- u16 count, header_size, xh_free_start;
- int free, max_free, need, old;
- size_t value_size = 0, name_len = strlen(xi->name);
- size_t blocksize = inode->i_sb->s_blocksize;
- int ret, allocation = 0;
-
- mlog_entry("Set xattr %s in xattr index block\n", xi->name);
-
-try_again:
- xh = xs->header;
- count = le16_to_cpu(xh->xh_count);
- xh_free_start = le16_to_cpu(xh->xh_free_start);
- header_size = sizeof(struct ocfs2_xattr_header) +
- count * sizeof(struct ocfs2_xattr_entry);
- max_free = OCFS2_XATTR_BUCKET_SIZE - header_size -
- le16_to_cpu(xh->xh_name_value_len) - OCFS2_XATTR_HEADER_GAP;
-
- mlog_bug_on_msg(header_size > blocksize, "bucket %llu has header size "
- "of %u which exceed block size\n",
- (unsigned long long)bucket_blkno(xs->bucket),
- header_size);
+ int ret;
+ struct ocfs2_xa_loc loc;
- if (xi->value && xi->value_len > OCFS2_XATTR_INLINE_SIZE)
- value_size = OCFS2_XATTR_ROOT_SIZE;
- else if (xi->value)
- value_size = OCFS2_XATTR_SIZE(xi->value_len);
+ mlog_entry("Set xattr %s in xattr bucket\n", xi->xi_name);
- if (xs->not_found)
- need = sizeof(struct ocfs2_xattr_entry) +
- OCFS2_XATTR_SIZE(name_len) + value_size;
- else {
- need = value_size + OCFS2_XATTR_SIZE(name_len);
+ ocfs2_init_xattr_bucket_xa_loc(&loc, xs->bucket,
+ xs->not_found ? NULL : xs->here);
+ ret = ocfs2_xa_set(&loc, xi, ctxt);
+ if (!ret) {
+ xs->here = loc.xl_entry;
+ goto out;
+ }
+ if (ret != -ENOSPC) {
+ mlog_errno(ret);
+ goto out;
+ }
- /*
- * We only replace the old value if the new length is smaller
- * than the old one. Otherwise we will allocate new space in the
- * bucket to store it.
- */
- xe = xs->here;
- if (ocfs2_xattr_is_local(xe))
- old = OCFS2_XATTR_SIZE(le64_to_cpu(xe->xe_value_size));
- else
- old = OCFS2_XATTR_SIZE(OCFS2_XATTR_ROOT_SIZE);
+ /* Ok, we need space. Let's try defragmenting the bucket. */
+ ret = ocfs2_defrag_xattr_bucket(inode, ctxt->handle,
+ xs->bucket);
+ if (ret) {
+ mlog_errno(ret);
+ goto out;
+ }
- if (old >= value_size)
- need = 0;
+ ret = ocfs2_xa_set(&loc, xi, ctxt);
+ if (!ret) {
+ xs->here = loc.xl_entry;
+ goto out;
}
+ if (ret != -ENOSPC)
+ mlog_errno(ret);
- free = xh_free_start - header_size - OCFS2_XATTR_HEADER_GAP;
- /*
- * We need to make sure the new name/value pair
- * can exist in the same block.
- */
- if (xh_free_start % blocksize < need)
- free -= xh_free_start % blocksize;
-
- mlog(0, "xs->not_found = %d, in xattr bucket %llu: free = %d, "
- "need = %d, max_free = %d, xh_free_start = %u, xh_name_value_len ="
- " %u\n", xs->not_found,
- (unsigned long long)bucket_blkno(xs->bucket),
- free, need, max_free, le16_to_cpu(xh->xh_free_start),
- le16_to_cpu(xh->xh_name_value_len));
-
- if (free < need ||
- (xs->not_found &&
- count == ocfs2_xattr_max_xe_in_bucket(inode->i_sb))) {
- if (need <= max_free &&
- count < ocfs2_xattr_max_xe_in_bucket(inode->i_sb)) {
- /*
- * We can create the space by defragment. Since only the
- * name/value will be moved, the xe shouldn't be changed
- * in xs.
- */
- ret = ocfs2_defrag_xattr_bucket(inode, ctxt->handle,
- xs->bucket);
- if (ret) {
- mlog_errno(ret);
- goto out;
- }
- xh_free_start = le16_to_cpu(xh->xh_free_start);
- free = xh_free_start - header_size
- - OCFS2_XATTR_HEADER_GAP;
- if (xh_free_start % blocksize < need)
- free -= xh_free_start % blocksize;
+out:
+ mlog_exit(ret);
+ return ret;
+}
- if (free >= need)
- goto xattr_set;
+static int ocfs2_xattr_set_entry_index_block(struct inode *inode,
+ struct ocfs2_xattr_info *xi,
+ struct ocfs2_xattr_search *xs,
+ struct ocfs2_xattr_set_ctxt *ctxt)
+{
+ int ret;
- mlog(0, "Can't get enough space for xattr insert by "
- "defragment. Need %u bytes, but we have %d, so "
- "allocate new bucket for it.\n", need, free);
- }
+ mlog_entry("Set xattr %s in xattr index block\n", xi->xi_name);
- /*
- * We have to add new buckets or clusters and one
- * allocation should leave us enough space for insert.
- */
- BUG_ON(allocation);
+ ret = ocfs2_xattr_set_entry_bucket(inode, xi, xs, ctxt);
+ if (!ret)
+ goto out;
+ if (ret != -ENOSPC) {
+ mlog_errno(ret);
+ goto out;
+ }
- /*
- * We do not allow for overlapping ranges between buckets. And
- * the maximum number of collisions we will allow for then is
- * one bucket's worth, so check it here whether we need to
- * add a new bucket for the insert.
- */
- ret = ocfs2_check_xattr_bucket_collision(inode,
- xs->bucket,
- xi->name);
- if (ret) {
- mlog_errno(ret);
- goto out;
- }
+ /* Ack, need more space. Let's try to get another bucket! */
- ret = ocfs2_add_new_xattr_bucket(inode,
- xs->xattr_bh,
+ /*
+ * We do not allow for overlapping ranges between buckets. And
+ * the maximum number of collisions we will allow for then is
+ * one bucket's worth, so check it here whether we need to
+ * add a new bucket for the insert.
+ */
+ ret = ocfs2_check_xattr_bucket_collision(inode,
xs->bucket,
- ctxt);
- if (ret) {
- mlog_errno(ret);
- goto out;
- }
+ xi->xi_name);
+ if (ret) {
+ mlog_errno(ret);
+ goto out;
+ }
- /*
- * ocfs2_add_new_xattr_bucket() will have updated
- * xs->bucket if it moved, but it will not have updated
- * any of the other search fields. Thus, we drop it and
- * re-search. Everything should be cached, so it'll be
- * quick.
- */
- ocfs2_xattr_bucket_relse(xs->bucket);
- ret = ocfs2_xattr_index_block_find(inode, xs->xattr_bh,
- xi->name_index,
- xi->name, xs);
- if (ret && ret != -ENODATA)
- goto out;
- xs->not_found = ret;
- allocation = 1;
- goto try_again;
+ ret = ocfs2_add_new_xattr_bucket(inode,
+ xs->xattr_bh,
+ xs->bucket,
+ ctxt);
+ if (ret) {
+ mlog_errno(ret);
+ goto out;
}
-xattr_set:
- ret = ocfs2_xattr_set_in_bucket(inode, xi, xs, ctxt);
+ /*
+ * ocfs2_add_new_xattr_bucket() will have updated
+ * xs->bucket if it moved, but it will not have updated
+ * any of the other search fields. Thus, we drop it and
+ * re-search. Everything should be cached, so it'll be
+ * quick.
+ */
+ ocfs2_xattr_bucket_relse(xs->bucket);
+ ret = ocfs2_xattr_index_block_find(inode, xs->xattr_bh,
+ xi->xi_name_index,
+ xi->xi_name, xs);
+ if (ret && ret != -ENODATA)
+ goto out;
+ xs->not_found = ret;
+
+ /* Ok, we have a new bucket, let's try again */
+ ret = ocfs2_xattr_set_entry_bucket(inode, xi, xs, ctxt);
+ if (ret && (ret != -ENOSPC))
+ mlog_errno(ret);
+
out:
mlog_exit(ret);
return ret;
@@ -5678,7 +5852,7 @@ static int ocfs2_prepare_refcount_xattr(struct inode *inode,
* refcount tree, and make the original extent become 3. So we will need
* 2 * cluster more extent recs at most.
*/
- if (!xi->value || xi->value_len <= OCFS2_XATTR_INLINE_SIZE) {
+ if (!xi->xi_value || xi->xi_value_len <= OCFS2_XATTR_INLINE_SIZE) {
ret = ocfs2_refcounted_xattr_delete_need(inode,
&(*ref_tree)->rf_ci,
@@ -6354,9 +6528,11 @@ static int ocfs2_create_empty_xattr_block(struct inode *inode,
int indexed)
{
int ret;
- handle_t *handle;
struct ocfs2_alloc_context *meta_ac;
struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
+ struct ocfs2_xattr_set_ctxt ctxt = {
+ .meta_ac = meta_ac,
+ };
ret = ocfs2_reserve_new_metadata_blocks(osb, 1, &meta_ac);
if (ret < 0) {
@@ -6364,21 +6540,21 @@ static int ocfs2_create_empty_xattr_block(struct inode *inode,
return ret;
}
- handle = ocfs2_start_trans(osb, OCFS2_XATTR_BLOCK_CREATE_CREDITS);
- if (IS_ERR(handle)) {
- ret = PTR_ERR(handle);
+ ctxt.handle = ocfs2_start_trans(osb, OCFS2_XATTR_BLOCK_CREATE_CREDITS);
+ if (IS_ERR(ctxt.handle)) {
+ ret = PTR_ERR(ctxt.handle);
mlog_errno(ret);
goto out;
}
mlog(0, "create new xattr block for inode %llu, index = %d\n",
(unsigned long long)fe_bh->b_blocknr, indexed);
- ret = ocfs2_create_xattr_block(handle, inode, fe_bh,
- meta_ac, ret_bh, indexed);
+ ret = ocfs2_create_xattr_block(inode, fe_bh, &ctxt, indexed,
+ ret_bh);
if (ret)
mlog_errno(ret);
- ocfs2_commit_trans(osb, handle);
+ ocfs2_commit_trans(osb, ctxt.handle);
out:
ocfs2_free_alloc_context(meta_ac);
return ret;
diff --git a/fs/partitions/check.c b/fs/partitions/check.c
index 64bc8998ac9..e8865c11777 100644
--- a/fs/partitions/check.c
+++ b/fs/partitions/check.c
@@ -412,9 +412,10 @@ struct hd_struct *add_partition(struct gendisk *disk, int partno,
pdev = part_to_dev(p);
p->start_sect = start;
- p->alignment_offset = queue_sector_alignment_offset(disk->queue, start);
- p->discard_alignment = queue_sector_discard_alignment(disk->queue,
- start);
+ p->alignment_offset =
+ queue_limit_alignment_offset(&disk->queue->limits, start);
+ p->discard_alignment =
+ queue_limit_discard_alignment(&disk->queue->limits, start);
p->nr_sects = len;
p->partno = partno;
p->policy = get_disk_ro(disk);
diff --git a/fs/proc/array.c b/fs/proc/array.c
index 13b5d070817..18e20feee25 100644
--- a/fs/proc/array.c
+++ b/fs/proc/array.c
@@ -270,7 +270,9 @@ static inline void task_sig(struct seq_file *m, struct task_struct *p)
blocked = p->blocked;
collect_sigign_sigcatch(p, &ignored, &caught);
num_threads = atomic_read(&p->signal->count);
+ rcu_read_lock(); /* FIXME: is this correct? */
qsize = atomic_read(&__task_cred(p)->user->sigpending);
+ rcu_read_unlock();
qlim = p->signal->rlim[RLIMIT_SIGPENDING].rlim_cur;
unlock_task_sighand(p, &flags);
}
diff --git a/fs/proc/base.c b/fs/proc/base.c
index 746895ddfda..a7310841c83 100644
--- a/fs/proc/base.c
+++ b/fs/proc/base.c
@@ -1089,8 +1089,12 @@ static ssize_t proc_loginuid_write(struct file * file, const char __user * buf,
if (!capable(CAP_AUDIT_CONTROL))
return -EPERM;
- if (current != pid_task(proc_pid(inode), PIDTYPE_PID))
+ rcu_read_lock();
+ if (current != pid_task(proc_pid(inode), PIDTYPE_PID)) {
+ rcu_read_unlock();
return -EPERM;
+ }
+ rcu_read_unlock();
if (count >= PAGE_SIZE)
count = PAGE_SIZE - 1;
diff --git a/fs/proc/kmsg.c b/fs/proc/kmsg.c
index 7ca78346d3f..cfe90a48a6e 100644
--- a/fs/proc/kmsg.c
+++ b/fs/proc/kmsg.c
@@ -12,37 +12,37 @@
#include <linux/poll.h>
#include <linux/proc_fs.h>
#include <linux/fs.h>
+#include <linux/syslog.h>
#include <asm/uaccess.h>
#include <asm/io.h>
extern wait_queue_head_t log_wait;
-extern int do_syslog(int type, char __user *bug, int count);
-
static int kmsg_open(struct inode * inode, struct file * file)
{
- return do_syslog(1,NULL,0);
+ return do_syslog(SYSLOG_ACTION_OPEN, NULL, 0, SYSLOG_FROM_FILE);
}
static int kmsg_release(struct inode * inode, struct file * file)
{
- (void) do_syslog(0,NULL,0);
+ (void) do_syslog(SYSLOG_ACTION_CLOSE, NULL, 0, SYSLOG_FROM_FILE);
return 0;
}
static ssize_t kmsg_read(struct file *file, char __user *buf,
size_t count, loff_t *ppos)
{
- if ((file->f_flags & O_NONBLOCK) && !do_syslog(9, NULL, 0))
+ if ((file->f_flags & O_NONBLOCK) &&
+ !do_syslog(SYSLOG_ACTION_SIZE_UNREAD, NULL, 0, SYSLOG_FROM_FILE))
return -EAGAIN;
- return do_syslog(2, buf, count);
+ return do_syslog(SYSLOG_ACTION_READ, buf, count, SYSLOG_FROM_FILE);
}
static unsigned int kmsg_poll(struct file *file, poll_table *wait)
{
poll_wait(file, &log_wait, wait);
- if (do_syslog(9, NULL, 0))
+ if (do_syslog(SYSLOG_ACTION_SIZE_UNREAD, NULL, 0, SYSLOG_FROM_FILE))
return POLLIN | POLLRDNORM;
return 0;
}
diff --git a/fs/proc/proc_devtree.c b/fs/proc/proc_devtree.c
index 123257bb356..f8650dce74f 100644
--- a/fs/proc/proc_devtree.c
+++ b/fs/proc/proc_devtree.c
@@ -10,16 +10,19 @@
#include <linux/seq_file.h>
#include <linux/stat.h>
#include <linux/string.h>
+#include <linux/of.h>
+#include <linux/module.h>
#include <asm/prom.h>
#include <asm/uaccess.h>
#include "internal.h"
-#ifndef HAVE_ARCH_DEVTREE_FIXUPS
static inline void set_node_proc_entry(struct device_node *np,
struct proc_dir_entry *de)
{
-}
+#ifdef HAVE_ARCH_DEVTREE_FIXUPS
+ np->pde = de;
#endif
+}
static struct proc_dir_entry *proc_device_tree;
diff --git a/fs/seq_file.c b/fs/seq_file.c
index eae7d9dbf3f..5afd554efad 100644
--- a/fs/seq_file.c
+++ b/fs/seq_file.c
@@ -674,7 +674,6 @@ struct list_head *seq_list_start(struct list_head *head, loff_t pos)
return NULL;
}
-
EXPORT_SYMBOL(seq_list_start);
struct list_head *seq_list_start_head(struct list_head *head, loff_t pos)
@@ -684,7 +683,6 @@ struct list_head *seq_list_start_head(struct list_head *head, loff_t pos)
return seq_list_start(head, pos - 1);
}
-
EXPORT_SYMBOL(seq_list_start_head);
struct list_head *seq_list_next(void *v, struct list_head *head, loff_t *ppos)
@@ -695,5 +693,131 @@ struct list_head *seq_list_next(void *v, struct list_head *head, loff_t *ppos)
++*ppos;
return lh == head ? NULL : lh;
}
-
EXPORT_SYMBOL(seq_list_next);
+
+/**
+ * seq_hlist_start - start an iteration of a hlist
+ * @head: the head of the hlist
+ * @pos: the start position of the sequence
+ *
+ * Called at seq_file->op->start().
+ */
+struct hlist_node *seq_hlist_start(struct hlist_head *head, loff_t pos)
+{
+ struct hlist_node *node;
+
+ hlist_for_each(node, head)
+ if (pos-- == 0)
+ return node;
+ return NULL;
+}
+EXPORT_SYMBOL(seq_hlist_start);
+
+/**
+ * seq_hlist_start_head - start an iteration of a hlist
+ * @head: the head of the hlist
+ * @pos: the start position of the sequence
+ *
+ * Called at seq_file->op->start(). Call this function if you want to
+ * print a header at the top of the output.
+ */
+struct hlist_node *seq_hlist_start_head(struct hlist_head *head, loff_t pos)
+{
+ if (!pos)
+ return SEQ_START_TOKEN;
+
+ return seq_hlist_start(head, pos - 1);
+}
+EXPORT_SYMBOL(seq_hlist_start_head);
+
+/**
+ * seq_hlist_next - move to the next position of the hlist
+ * @v: the current iterator
+ * @head: the head of the hlist
+ * @pos: the current posision
+ *
+ * Called at seq_file->op->next().
+ */
+struct hlist_node *seq_hlist_next(void *v, struct hlist_head *head,
+ loff_t *ppos)
+{
+ struct hlist_node *node = v;
+
+ ++*ppos;
+ if (v == SEQ_START_TOKEN)
+ return head->first;
+ else
+ return node->next;
+}
+EXPORT_SYMBOL(seq_hlist_next);
+
+/**
+ * seq_hlist_start_rcu - start an iteration of a hlist protected by RCU
+ * @head: the head of the hlist
+ * @pos: the start position of the sequence
+ *
+ * Called at seq_file->op->start().
+ *
+ * This list-traversal primitive may safely run concurrently with
+ * the _rcu list-mutation primitives such as hlist_add_head_rcu()
+ * as long as the traversal is guarded by rcu_read_lock().
+ */
+struct hlist_node *seq_hlist_start_rcu(struct hlist_head *head,
+ loff_t pos)
+{
+ struct hlist_node *node;
+
+ __hlist_for_each_rcu(node, head)
+ if (pos-- == 0)
+ return node;
+ return NULL;
+}
+EXPORT_SYMBOL(seq_hlist_start_rcu);
+
+/**
+ * seq_hlist_start_head_rcu - start an iteration of a hlist protected by RCU
+ * @head: the head of the hlist
+ * @pos: the start position of the sequence
+ *
+ * Called at seq_file->op->start(). Call this function if you want to
+ * print a header at the top of the output.
+ *
+ * This list-traversal primitive may safely run concurrently with
+ * the _rcu list-mutation primitives such as hlist_add_head_rcu()
+ * as long as the traversal is guarded by rcu_read_lock().
+ */
+struct hlist_node *seq_hlist_start_head_rcu(struct hlist_head *head,
+ loff_t pos)
+{
+ if (!pos)
+ return SEQ_START_TOKEN;
+
+ return seq_hlist_start_rcu(head, pos - 1);
+}
+EXPORT_SYMBOL(seq_hlist_start_head_rcu);
+
+/**
+ * seq_hlist_next_rcu - move to the next position of the hlist protected by RCU
+ * @v: the current iterator
+ * @head: the head of the hlist
+ * @pos: the current posision
+ *
+ * Called at seq_file->op->next().
+ *
+ * This list-traversal primitive may safely run concurrently with
+ * the _rcu list-mutation primitives such as hlist_add_head_rcu()
+ * as long as the traversal is guarded by rcu_read_lock().
+ */
+struct hlist_node *seq_hlist_next_rcu(void *v,
+ struct hlist_head *head,
+ loff_t *ppos)
+{
+ struct hlist_node *node = v;
+
+ ++*ppos;
+ if (v == SEQ_START_TOKEN)
+ return rcu_dereference(head->first);
+ else
+ return rcu_dereference(node->next);
+}
+EXPORT_SYMBOL(seq_hlist_next_rcu);
diff --git a/fs/xfs/Makefile b/fs/xfs/Makefile
index 56641fe52a2..5c5a366aa33 100644
--- a/fs/xfs/Makefile
+++ b/fs/xfs/Makefile
@@ -16,7 +16,7 @@
# Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
#
-EXTRA_CFLAGS += -I$(src) -I$(src)/linux-2.6 -funsigned-char
+EXTRA_CFLAGS += -I$(src) -I$(src)/linux-2.6
XFS_LINUX := linux-2.6
diff --git a/fs/xfs/linux-2.6/kmem.c b/fs/xfs/linux-2.6/kmem.c
index 2d3f90afe5f..bc7405585de 100644
--- a/fs/xfs/linux-2.6/kmem.c
+++ b/fs/xfs/linux-2.6/kmem.c
@@ -16,7 +16,6 @@
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
*/
#include <linux/mm.h>
-#include <linux/vmalloc.h>
#include <linux/highmem.h>
#include <linux/swap.h>
#include <linux/blkdev.h>
@@ -24,8 +23,25 @@
#include "time.h"
#include "kmem.h"
-#define MAX_VMALLOCS 6
-#define MAX_SLAB_SIZE 0x20000
+/*
+ * Greedy allocation. May fail and may return vmalloced memory.
+ *
+ * Must be freed using kmem_free_large.
+ */
+void *
+kmem_zalloc_greedy(size_t *size, size_t minsize, size_t maxsize)
+{
+ void *ptr;
+ size_t kmsize = maxsize;
+
+ while (!(ptr = kmem_zalloc_large(kmsize))) {
+ if ((kmsize >>= 1) <= minsize)
+ kmsize = minsize;
+ }
+ if (ptr)
+ *size = kmsize;
+ return ptr;
+}
void *
kmem_alloc(size_t size, unsigned int __nocast flags)
@@ -34,19 +50,8 @@ kmem_alloc(size_t size, unsigned int __nocast flags)
gfp_t lflags = kmem_flags_convert(flags);
void *ptr;
-#ifdef DEBUG
- if (unlikely(!(flags & KM_LARGE) && (size > PAGE_SIZE))) {
- printk(KERN_WARNING "Large %s attempt, size=%ld\n",
- __func__, (long)size);
- dump_stack();
- }
-#endif
-
do {
- if (size < MAX_SLAB_SIZE || retries > MAX_VMALLOCS)
- ptr = kmalloc(size, lflags);
- else
- ptr = __vmalloc(size, lflags, PAGE_KERNEL);
+ ptr = kmalloc(size, lflags);
if (ptr || (flags & (KM_MAYFAIL|KM_NOSLEEP)))
return ptr;
if (!(++retries % 100))
@@ -68,27 +73,6 @@ kmem_zalloc(size_t size, unsigned int __nocast flags)
return ptr;
}
-void *
-kmem_zalloc_greedy(size_t *size, size_t minsize, size_t maxsize,
- unsigned int __nocast flags)
-{
- void *ptr;
- size_t kmsize = maxsize;
- unsigned int kmflags = (flags & ~KM_SLEEP) | KM_NOSLEEP;
-
- while (!(ptr = kmem_zalloc(kmsize, kmflags))) {
- if ((kmsize <= minsize) && (flags & KM_NOSLEEP))
- break;
- if ((kmsize >>= 1) <= minsize) {
- kmsize = minsize;
- kmflags = flags;
- }
- }
- if (ptr)
- *size = kmsize;
- return ptr;
-}
-
void
kmem_free(const void *ptr)
{
diff --git a/fs/xfs/linux-2.6/kmem.h b/fs/xfs/linux-2.6/kmem.h
index 179cbd630f6..f7c8f7a9ea6 100644
--- a/fs/xfs/linux-2.6/kmem.h
+++ b/fs/xfs/linux-2.6/kmem.h
@@ -21,6 +21,7 @@
#include <linux/slab.h>
#include <linux/sched.h>
#include <linux/mm.h>
+#include <linux/vmalloc.h>
/*
* General memory allocation interfaces
@@ -30,7 +31,6 @@
#define KM_NOSLEEP 0x0002u
#define KM_NOFS 0x0004u
#define KM_MAYFAIL 0x0008u
-#define KM_LARGE 0x0010u
/*
* We use a special process flag to avoid recursive callbacks into
@@ -42,7 +42,7 @@ kmem_flags_convert(unsigned int __nocast flags)
{
gfp_t lflags;
- BUG_ON(flags & ~(KM_SLEEP|KM_NOSLEEP|KM_NOFS|KM_MAYFAIL|KM_LARGE));
+ BUG_ON(flags & ~(KM_SLEEP|KM_NOSLEEP|KM_NOFS|KM_MAYFAIL));
if (flags & KM_NOSLEEP) {
lflags = GFP_ATOMIC | __GFP_NOWARN;
@@ -56,10 +56,25 @@ kmem_flags_convert(unsigned int __nocast flags)
extern void *kmem_alloc(size_t, unsigned int __nocast);
extern void *kmem_zalloc(size_t, unsigned int __nocast);
-extern void *kmem_zalloc_greedy(size_t *, size_t, size_t, unsigned int __nocast);
extern void *kmem_realloc(const void *, size_t, size_t, unsigned int __nocast);
extern void kmem_free(const void *);
+static inline void *kmem_zalloc_large(size_t size)
+{
+ void *ptr;
+
+ ptr = vmalloc(size);
+ if (ptr)
+ memset(ptr, 0, size);
+ return ptr;
+}
+static inline void kmem_free_large(void *ptr)
+{
+ vfree(ptr);
+}
+
+extern void *kmem_zalloc_greedy(size_t *, size_t, size_t);
+
/*
* Zone interfaces
*/
diff --git a/fs/xfs/linux-2.6/xfs_acl.c b/fs/xfs/linux-2.6/xfs_acl.c
index 883ca5ab8af..bf85bbe4a9a 100644
--- a/fs/xfs/linux-2.6/xfs_acl.c
+++ b/fs/xfs/linux-2.6/xfs_acl.c
@@ -106,7 +106,7 @@ xfs_get_acl(struct inode *inode, int type)
struct posix_acl *acl;
struct xfs_acl *xfs_acl;
int len = sizeof(struct xfs_acl);
- char *ea_name;
+ unsigned char *ea_name;
int error;
acl = get_cached_acl(inode, type);
@@ -133,7 +133,8 @@ xfs_get_acl(struct inode *inode, int type)
if (!xfs_acl)
return ERR_PTR(-ENOMEM);
- error = -xfs_attr_get(ip, ea_name, (char *)xfs_acl, &len, ATTR_ROOT);
+ error = -xfs_attr_get(ip, ea_name, (unsigned char *)xfs_acl,
+ &len, ATTR_ROOT);
if (error) {
/*
* If the attribute doesn't exist make sure we have a negative
@@ -162,7 +163,7 @@ STATIC int
xfs_set_acl(struct inode *inode, int type, struct posix_acl *acl)
{
struct xfs_inode *ip = XFS_I(inode);
- char *ea_name;
+ unsigned char *ea_name;
int error;
if (S_ISLNK(inode->i_mode))
@@ -194,7 +195,7 @@ xfs_set_acl(struct inode *inode, int type, struct posix_acl *acl)
(sizeof(struct xfs_acl_entry) *
(XFS_ACL_MAX_ENTRIES - acl->a_count));
- error = -xfs_attr_set(ip, ea_name, (char *)xfs_acl,
+ error = -xfs_attr_set(ip, ea_name, (unsigned char *)xfs_acl,
len, ATTR_ROOT);
kfree(xfs_acl);
@@ -262,7 +263,7 @@ xfs_set_mode(struct inode *inode, mode_t mode)
}
static int
-xfs_acl_exists(struct inode *inode, char *name)
+xfs_acl_exists(struct inode *inode, unsigned char *name)
{
int len = sizeof(struct xfs_acl);
diff --git a/fs/xfs/linux-2.6/xfs_buf.c b/fs/xfs/linux-2.6/xfs_buf.c
index 77b8be81c76..6f76ba85f19 100644
--- a/fs/xfs/linux-2.6/xfs_buf.c
+++ b/fs/xfs/linux-2.6/xfs_buf.c
@@ -33,6 +33,7 @@
#include <linux/migrate.h>
#include <linux/backing-dev.h>
#include <linux/freezer.h>
+#include <linux/list_sort.h>
#include "xfs_sb.h"
#include "xfs_inum.h"
@@ -76,6 +77,27 @@ struct workqueue_struct *xfsconvertd_workqueue;
#define xfs_buf_deallocate(bp) \
kmem_zone_free(xfs_buf_zone, (bp));
+static inline int
+xfs_buf_is_vmapped(
+ struct xfs_buf *bp)
+{
+ /*
+ * Return true if the buffer is vmapped.
+ *
+ * The XBF_MAPPED flag is set if the buffer should be mapped, but the
+ * code is clever enough to know it doesn't have to map a single page,
+ * so the check has to be both for XBF_MAPPED and bp->b_page_count > 1.
+ */
+ return (bp->b_flags & XBF_MAPPED) && bp->b_page_count > 1;
+}
+
+static inline int
+xfs_buf_vmap_len(
+ struct xfs_buf *bp)
+{
+ return (bp->b_page_count * PAGE_SIZE) - bp->b_offset;
+}
+
/*
* Page Region interfaces.
*
@@ -314,7 +336,7 @@ xfs_buf_free(
if (bp->b_flags & (_XBF_PAGE_CACHE|_XBF_PAGES)) {
uint i;
- if ((bp->b_flags & XBF_MAPPED) && (bp->b_page_count > 1))
+ if (xfs_buf_is_vmapped(bp))
free_address(bp->b_addr - bp->b_offset);
for (i = 0; i < bp->b_page_count; i++) {
@@ -1051,22 +1073,30 @@ xfs_buf_ioerror(
}
int
-xfs_bawrite(
- void *mp,
+xfs_bwrite(
+ struct xfs_mount *mp,
struct xfs_buf *bp)
{
- trace_xfs_buf_bawrite(bp, _RET_IP_);
+ int iowait = (bp->b_flags & XBF_ASYNC) == 0;
+ int error = 0;
- ASSERT(bp->b_bn != XFS_BUF_DADDR_NULL);
+ bp->b_strat = xfs_bdstrat_cb;
+ bp->b_mount = mp;
+ bp->b_flags |= XBF_WRITE;
+ if (!iowait)
+ bp->b_flags |= _XBF_RUN_QUEUES;
xfs_buf_delwri_dequeue(bp);
+ xfs_buf_iostrategy(bp);
- bp->b_flags &= ~(XBF_READ | XBF_DELWRI | XBF_READ_AHEAD);
- bp->b_flags |= (XBF_WRITE | XBF_ASYNC | _XBF_RUN_QUEUES);
+ if (iowait) {
+ error = xfs_buf_iowait(bp);
+ if (error)
+ xfs_force_shutdown(mp, SHUTDOWN_META_IO_ERROR);
+ xfs_buf_relse(bp);
+ }
- bp->b_mount = mp;
- bp->b_strat = xfs_bdstrat_cb;
- return xfs_bdstrat_cb(bp);
+ return error;
}
void
@@ -1085,6 +1115,126 @@ xfs_bdwrite(
xfs_buf_delwri_queue(bp, 1);
}
+/*
+ * Called when we want to stop a buffer from getting written or read.
+ * We attach the EIO error, muck with its flags, and call biodone
+ * so that the proper iodone callbacks get called.
+ */
+STATIC int
+xfs_bioerror(
+ xfs_buf_t *bp)
+{
+#ifdef XFSERRORDEBUG
+ ASSERT(XFS_BUF_ISREAD(bp) || bp->b_iodone);
+#endif
+
+ /*
+ * No need to wait until the buffer is unpinned, we aren't flushing it.
+ */
+ XFS_BUF_ERROR(bp, EIO);
+
+ /*
+ * We're calling biodone, so delete XBF_DONE flag.
+ */
+ XFS_BUF_UNREAD(bp);
+ XFS_BUF_UNDELAYWRITE(bp);
+ XFS_BUF_UNDONE(bp);
+ XFS_BUF_STALE(bp);
+
+ XFS_BUF_CLR_BDSTRAT_FUNC(bp);
+ xfs_biodone(bp);
+
+ return EIO;
+}
+
+/*
+ * Same as xfs_bioerror, except that we are releasing the buffer
+ * here ourselves, and avoiding the biodone call.
+ * This is meant for userdata errors; metadata bufs come with
+ * iodone functions attached, so that we can track down errors.
+ */
+STATIC int
+xfs_bioerror_relse(
+ struct xfs_buf *bp)
+{
+ int64_t fl = XFS_BUF_BFLAGS(bp);
+ /*
+ * No need to wait until the buffer is unpinned.
+ * We aren't flushing it.
+ *
+ * chunkhold expects B_DONE to be set, whether
+ * we actually finish the I/O or not. We don't want to
+ * change that interface.
+ */
+ XFS_BUF_UNREAD(bp);
+ XFS_BUF_UNDELAYWRITE(bp);
+ XFS_BUF_DONE(bp);
+ XFS_BUF_STALE(bp);
+ XFS_BUF_CLR_IODONE_FUNC(bp);
+ XFS_BUF_CLR_BDSTRAT_FUNC(bp);
+ if (!(fl & XBF_ASYNC)) {
+ /*
+ * Mark b_error and B_ERROR _both_.
+ * Lot's of chunkcache code assumes that.
+ * There's no reason to mark error for
+ * ASYNC buffers.
+ */
+ XFS_BUF_ERROR(bp, EIO);
+ XFS_BUF_FINISH_IOWAIT(bp);
+ } else {
+ xfs_buf_relse(bp);
+ }
+
+ return EIO;
+}
+
+
+/*
+ * All xfs metadata buffers except log state machine buffers
+ * get this attached as their b_bdstrat callback function.
+ * This is so that we can catch a buffer
+ * after prematurely unpinning it to forcibly shutdown the filesystem.
+ */
+int
+xfs_bdstrat_cb(
+ struct xfs_buf *bp)
+{
+ if (XFS_FORCED_SHUTDOWN(bp->b_mount)) {
+ trace_xfs_bdstrat_shut(bp, _RET_IP_);
+ /*
+ * Metadata write that didn't get logged but
+ * written delayed anyway. These aren't associated
+ * with a transaction, and can be ignored.
+ */
+ if (!bp->b_iodone && !XFS_BUF_ISREAD(bp))
+ return xfs_bioerror_relse(bp);
+ else
+ return xfs_bioerror(bp);
+ }
+
+ xfs_buf_iorequest(bp);
+ return 0;
+}
+
+/*
+ * Wrapper around bdstrat so that we can stop data from going to disk in case
+ * we are shutting down the filesystem. Typically user data goes thru this
+ * path; one of the exceptions is the superblock.
+ */
+void
+xfsbdstrat(
+ struct xfs_mount *mp,
+ struct xfs_buf *bp)
+{
+ if (XFS_FORCED_SHUTDOWN(mp)) {
+ trace_xfs_bdstrat_shut(bp, _RET_IP_);
+ xfs_bioerror_relse(bp);
+ return;
+ }
+
+ xfs_buf_iorequest(bp);
+}
+
STATIC void
_xfs_buf_ioend(
xfs_buf_t *bp,
@@ -1107,6 +1257,9 @@ xfs_buf_bio_end_io(
xfs_buf_ioerror(bp, -error);
+ if (!error && xfs_buf_is_vmapped(bp) && (bp->b_flags & XBF_READ))
+ invalidate_kernel_vmap_range(bp->b_addr, xfs_buf_vmap_len(bp));
+
do {
struct page *page = bvec->bv_page;
@@ -1216,6 +1369,10 @@ next_chunk:
submit_io:
if (likely(bio->bi_size)) {
+ if (xfs_buf_is_vmapped(bp)) {
+ flush_kernel_vmap_range(bp->b_addr,
+ xfs_buf_vmap_len(bp));
+ }
submit_bio(rw, bio);
if (size)
goto next_chunk;
@@ -1296,7 +1453,7 @@ xfs_buf_iomove(
xfs_buf_t *bp, /* buffer to process */
size_t boff, /* starting buffer offset */
size_t bsize, /* length to copy */
- caddr_t data, /* data address */
+ void *data, /* data address */
xfs_buf_rw_t mode) /* read/write/zero flag */
{
size_t bend, cpoff, csize;
@@ -1378,8 +1535,8 @@ xfs_alloc_bufhash(
btp->bt_hashshift = external ? 3 : 8; /* 8 or 256 buckets */
btp->bt_hashmask = (1 << btp->bt_hashshift) - 1;
- btp->bt_hash = kmem_zalloc((1 << btp->bt_hashshift) *
- sizeof(xfs_bufhash_t), KM_SLEEP | KM_LARGE);
+ btp->bt_hash = kmem_zalloc_large((1 << btp->bt_hashshift) *
+ sizeof(xfs_bufhash_t));
for (i = 0; i < (1 << btp->bt_hashshift); i++) {
spin_lock_init(&btp->bt_hash[i].bh_lock);
INIT_LIST_HEAD(&btp->bt_hash[i].bh_list);
@@ -1390,7 +1547,7 @@ STATIC void
xfs_free_bufhash(
xfs_buftarg_t *btp)
{
- kmem_free(btp->bt_hash);
+ kmem_free_large(btp->bt_hash);
btp->bt_hash = NULL;
}
@@ -1595,6 +1752,11 @@ xfs_buf_delwri_queue(
list_del(&bp->b_list);
}
+ if (list_empty(dwq)) {
+ /* start xfsbufd as it is about to have something to do */
+ wake_up_process(bp->b_target->bt_task);
+ }
+
bp->b_flags |= _XBF_DELWRI_Q;
list_add_tail(&bp->b_list, dwq);
bp->b_queuetime = jiffies;
@@ -1626,6 +1788,35 @@ xfs_buf_delwri_dequeue(
trace_xfs_buf_delwri_dequeue(bp, _RET_IP_);
}
+/*
+ * If a delwri buffer needs to be pushed before it has aged out, then promote
+ * it to the head of the delwri queue so that it will be flushed on the next
+ * xfsbufd run. We do this by resetting the queuetime of the buffer to be older
+ * than the age currently needed to flush the buffer. Hence the next time the
+ * xfsbufd sees it is guaranteed to be considered old enough to flush.
+ */
+void
+xfs_buf_delwri_promote(
+ struct xfs_buf *bp)
+{
+ struct xfs_buftarg *btp = bp->b_target;
+ long age = xfs_buf_age_centisecs * msecs_to_jiffies(10) + 1;
+
+ ASSERT(bp->b_flags & XBF_DELWRI);
+ ASSERT(bp->b_flags & _XBF_DELWRI_Q);
+
+ /*
+ * Check the buffer age before locking the delayed write queue as we
+ * don't need to promote buffers that are already past the flush age.
+ */
+ if (bp->b_queuetime < jiffies - age)
+ return;
+ bp->b_queuetime = jiffies - age;
+ spin_lock(&btp->bt_delwrite_lock);
+ list_move(&bp->b_list, &btp->bt_delwrite_queue);
+ spin_unlock(&btp->bt_delwrite_lock);
+}
+
STATIC void
xfs_buf_runall_queues(
struct workqueue_struct *queue)
@@ -1644,6 +1835,8 @@ xfsbufd_wakeup(
list_for_each_entry(btp, &xfs_buftarg_list, bt_list) {
if (test_bit(XBT_FORCE_SLEEP, &btp->bt_flags))
continue;
+ if (list_empty(&btp->bt_delwrite_queue))
+ continue;
set_bit(XBT_FORCE_FLUSH, &btp->bt_flags);
wake_up_process(btp->bt_task);
}
@@ -1694,20 +1887,53 @@ xfs_buf_delwri_split(
}
+/*
+ * Compare function is more complex than it needs to be because
+ * the return value is only 32 bits and we are doing comparisons
+ * on 64 bit values
+ */
+static int
+xfs_buf_cmp(
+ void *priv,
+ struct list_head *a,
+ struct list_head *b)
+{
+ struct xfs_buf *ap = container_of(a, struct xfs_buf, b_list);
+ struct xfs_buf *bp = container_of(b, struct xfs_buf, b_list);
+ xfs_daddr_t diff;
+
+ diff = ap->b_bn - bp->b_bn;
+ if (diff < 0)
+ return -1;
+ if (diff > 0)
+ return 1;
+ return 0;
+}
+
+void
+xfs_buf_delwri_sort(
+ xfs_buftarg_t *target,
+ struct list_head *list)
+{
+ list_sort(NULL, list, xfs_buf_cmp);
+}
+
STATIC int
xfsbufd(
void *data)
{
- struct list_head tmp;
- xfs_buftarg_t *target = (xfs_buftarg_t *)data;
- int count;
- xfs_buf_t *bp;
+ xfs_buftarg_t *target = (xfs_buftarg_t *)data;
current->flags |= PF_MEMALLOC;
set_freezable();
do {
+ long age = xfs_buf_age_centisecs * msecs_to_jiffies(10);
+ long tout = xfs_buf_timer_centisecs * msecs_to_jiffies(10);
+ int count = 0;
+ struct list_head tmp;
+
if (unlikely(freezing(current))) {
set_bit(XBT_FORCE_SLEEP, &target->bt_flags);
refrigerator();
@@ -1715,17 +1941,16 @@ xfsbufd(
clear_bit(XBT_FORCE_SLEEP, &target->bt_flags);
}
- schedule_timeout_interruptible(
- xfs_buf_timer_centisecs * msecs_to_jiffies(10));
+ /* sleep for a long time if there is nothing to do. */
+ if (list_empty(&target->bt_delwrite_queue))
+ tout = MAX_SCHEDULE_TIMEOUT;
+ schedule_timeout_interruptible(tout);
- xfs_buf_delwri_split(target, &tmp,
- xfs_buf_age_centisecs * msecs_to_jiffies(10));
-
- count = 0;
+ xfs_buf_delwri_split(target, &tmp, age);
+ list_sort(NULL, &tmp, xfs_buf_cmp);
while (!list_empty(&tmp)) {
- bp = list_entry(tmp.next, xfs_buf_t, b_list);
- ASSERT(target == bp->b_target);
-
+ struct xfs_buf *bp;
+ bp = list_first_entry(&tmp, struct xfs_buf, b_list);
list_del_init(&bp->b_list);
xfs_buf_iostrategy(bp);
count++;
@@ -1751,42 +1976,45 @@ xfs_flush_buftarg(
xfs_buftarg_t *target,
int wait)
{
- struct list_head tmp;
- xfs_buf_t *bp, *n;
+ xfs_buf_t *bp;
int pincount = 0;
+ LIST_HEAD(tmp_list);
+ LIST_HEAD(wait_list);
xfs_buf_runall_queues(xfsconvertd_workqueue);
xfs_buf_runall_queues(xfsdatad_workqueue);
xfs_buf_runall_queues(xfslogd_workqueue);
set_bit(XBT_FORCE_FLUSH, &target->bt_flags);
- pincount = xfs_buf_delwri_split(target, &tmp, 0);
+ pincount = xfs_buf_delwri_split(target, &tmp_list, 0);
/*
- * Dropped the delayed write list lock, now walk the temporary list
+ * Dropped the delayed write list lock, now walk the temporary list.
+ * All I/O is issued async and then if we need to wait for completion
+ * we do that after issuing all the IO.
*/
- list_for_each_entry_safe(bp, n, &tmp, b_list) {
+ list_sort(NULL, &tmp_list, xfs_buf_cmp);
+ while (!list_empty(&tmp_list)) {
+ bp = list_first_entry(&tmp_list, struct xfs_buf, b_list);
ASSERT(target == bp->b_target);
- if (wait)
+ list_del_init(&bp->b_list);
+ if (wait) {
bp->b_flags &= ~XBF_ASYNC;
- else
- list_del_init(&bp->b_list);
-
+ list_add(&bp->b_list, &wait_list);
+ }
xfs_buf_iostrategy(bp);
}
- if (wait)
+ if (wait) {
+ /* Expedite and wait for IO to complete. */
blk_run_address_space(target->bt_mapping);
+ while (!list_empty(&wait_list)) {
+ bp = list_first_entry(&wait_list, struct xfs_buf, b_list);
- /*
- * Remaining list items must be flushed before returning
- */
- while (!list_empty(&tmp)) {
- bp = list_entry(tmp.next, xfs_buf_t, b_list);
-
- list_del_init(&bp->b_list);
- xfs_iowait(bp);
- xfs_buf_relse(bp);
+ list_del_init(&bp->b_list);
+ xfs_iowait(bp);
+ xfs_buf_relse(bp);
+ }
}
return pincount;
diff --git a/fs/xfs/linux-2.6/xfs_buf.h b/fs/xfs/linux-2.6/xfs_buf.h
index a34c7b54822..386e7361e50 100644
--- a/fs/xfs/linux-2.6/xfs_buf.h
+++ b/fs/xfs/linux-2.6/xfs_buf.h
@@ -232,13 +232,17 @@ extern void xfs_buf_lock(xfs_buf_t *);
extern void xfs_buf_unlock(xfs_buf_t *);
/* Buffer Read and Write Routines */
-extern int xfs_bawrite(void *mp, xfs_buf_t *bp);
+extern int xfs_bwrite(struct xfs_mount *mp, struct xfs_buf *bp);
extern void xfs_bdwrite(void *mp, xfs_buf_t *bp);
+
+extern void xfsbdstrat(struct xfs_mount *, struct xfs_buf *);
+extern int xfs_bdstrat_cb(struct xfs_buf *);
+
extern void xfs_buf_ioend(xfs_buf_t *, int);
extern void xfs_buf_ioerror(xfs_buf_t *, int);
extern int xfs_buf_iorequest(xfs_buf_t *);
extern int xfs_buf_iowait(xfs_buf_t *);
-extern void xfs_buf_iomove(xfs_buf_t *, size_t, size_t, xfs_caddr_t,
+extern void xfs_buf_iomove(xfs_buf_t *, size_t, size_t, void *,
xfs_buf_rw_t);
static inline int xfs_buf_iostrategy(xfs_buf_t *bp)
@@ -261,6 +265,7 @@ extern int xfs_buf_ispin(xfs_buf_t *);
/* Delayed Write Buffer Routines */
extern void xfs_buf_delwri_dequeue(xfs_buf_t *);
+extern void xfs_buf_delwri_promote(xfs_buf_t *);
/* Buffer Daemon Setup Routines */
extern int xfs_buf_init(void);
@@ -270,33 +275,19 @@ extern void xfs_buf_terminate(void);
({ char __b[BDEVNAME_SIZE]; bdevname((target)->bt_bdev, __b); __b; })
-#define XFS_B_ASYNC XBF_ASYNC
-#define XFS_B_DELWRI XBF_DELWRI
-#define XFS_B_READ XBF_READ
-#define XFS_B_WRITE XBF_WRITE
-#define XFS_B_STALE XBF_STALE
-
-#define XFS_BUF_TRYLOCK XBF_TRYLOCK
-#define XFS_INCORE_TRYLOCK XBF_TRYLOCK
-#define XFS_BUF_LOCK XBF_LOCK
-#define XFS_BUF_MAPPED XBF_MAPPED
-
-#define BUF_BUSY XBF_DONT_BLOCK
-
#define XFS_BUF_BFLAGS(bp) ((bp)->b_flags)
#define XFS_BUF_ZEROFLAGS(bp) ((bp)->b_flags &= \
~(XBF_READ|XBF_WRITE|XBF_ASYNC|XBF_DELWRI|XBF_ORDERED))
-#define XFS_BUF_STALE(bp) ((bp)->b_flags |= XFS_B_STALE)
-#define XFS_BUF_UNSTALE(bp) ((bp)->b_flags &= ~XFS_B_STALE)
-#define XFS_BUF_ISSTALE(bp) ((bp)->b_flags & XFS_B_STALE)
+#define XFS_BUF_STALE(bp) ((bp)->b_flags |= XBF_STALE)
+#define XFS_BUF_UNSTALE(bp) ((bp)->b_flags &= ~XBF_STALE)
+#define XFS_BUF_ISSTALE(bp) ((bp)->b_flags & XBF_STALE)
#define XFS_BUF_SUPER_STALE(bp) do { \
XFS_BUF_STALE(bp); \
xfs_buf_delwri_dequeue(bp); \
XFS_BUF_DONE(bp); \
} while (0)
-#define XFS_BUF_MANAGE XBF_FS_MANAGED
#define XFS_BUF_UNMANAGE(bp) ((bp)->b_flags &= ~XBF_FS_MANAGED)
#define XFS_BUF_DELAYWRITE(bp) ((bp)->b_flags |= XBF_DELWRI)
@@ -385,31 +376,11 @@ static inline void xfs_buf_relse(xfs_buf_t *bp)
#define xfs_biomove(bp, off, len, data, rw) \
xfs_buf_iomove((bp), (off), (len), (data), \
- ((rw) == XFS_B_WRITE) ? XBRW_WRITE : XBRW_READ)
+ ((rw) == XBF_WRITE) ? XBRW_WRITE : XBRW_READ)
#define xfs_biozero(bp, off, len) \
xfs_buf_iomove((bp), (off), (len), NULL, XBRW_ZERO)
-
-static inline int XFS_bwrite(xfs_buf_t *bp)
-{
- int iowait = (bp->b_flags & XBF_ASYNC) == 0;
- int error = 0;
-
- if (!iowait)
- bp->b_flags |= _XBF_RUN_QUEUES;
-
- xfs_buf_delwri_dequeue(bp);
- xfs_buf_iostrategy(bp);
- if (iowait) {
- error = xfs_buf_iowait(bp);
- xfs_buf_relse(bp);
- }
- return error;
-}
-
-#define XFS_bdstrat(bp) xfs_buf_iorequest(bp)
-
#define xfs_iowait(bp) xfs_buf_iowait(bp)
#define xfs_baread(target, rablkno, ralen) \
@@ -424,6 +395,7 @@ extern void xfs_free_buftarg(struct xfs_mount *, struct xfs_buftarg *);
extern void xfs_wait_buftarg(xfs_buftarg_t *);
extern int xfs_setsize_buftarg(xfs_buftarg_t *, unsigned int, unsigned int);
extern int xfs_flush_buftarg(xfs_buftarg_t *, int);
+
#ifdef CONFIG_KDB_MODULES
extern struct list_head *xfs_get_buftarg_list(void);
#endif
diff --git a/fs/xfs/linux-2.6/xfs_fs_subr.c b/fs/xfs/linux-2.6/xfs_fs_subr.c
index 7501b85fd86..b6918d76bc7 100644
--- a/fs/xfs/linux-2.6/xfs_fs_subr.c
+++ b/fs/xfs/linux-2.6/xfs_fs_subr.c
@@ -79,7 +79,7 @@ xfs_flush_pages(
xfs_iflags_clear(ip, XFS_ITRUNCATED);
ret = -filemap_fdatawrite(mapping);
}
- if (flags & XFS_B_ASYNC)
+ if (flags & XBF_ASYNC)
return ret;
ret2 = xfs_wait_on_pages(ip, first, last);
if (!ret)
diff --git a/fs/xfs/linux-2.6/xfs_ioctl.c b/fs/xfs/linux-2.6/xfs_ioctl.c
index a034cf62443..4ea1ee18ade 100644
--- a/fs/xfs/linux-2.6/xfs_ioctl.c
+++ b/fs/xfs/linux-2.6/xfs_ioctl.c
@@ -447,12 +447,12 @@ xfs_attrlist_by_handle(
int
xfs_attrmulti_attr_get(
struct inode *inode,
- char *name,
- char __user *ubuf,
+ unsigned char *name,
+ unsigned char __user *ubuf,
__uint32_t *len,
__uint32_t flags)
{
- char *kbuf;
+ unsigned char *kbuf;
int error = EFAULT;
if (*len > XATTR_SIZE_MAX)
@@ -476,12 +476,12 @@ xfs_attrmulti_attr_get(
int
xfs_attrmulti_attr_set(
struct inode *inode,
- char *name,
- const char __user *ubuf,
+ unsigned char *name,
+ const unsigned char __user *ubuf,
__uint32_t len,
__uint32_t flags)
{
- char *kbuf;
+ unsigned char *kbuf;
int error = EFAULT;
if (IS_IMMUTABLE(inode) || IS_APPEND(inode))
@@ -501,7 +501,7 @@ xfs_attrmulti_attr_set(
int
xfs_attrmulti_attr_remove(
struct inode *inode,
- char *name,
+ unsigned char *name,
__uint32_t flags)
{
if (IS_IMMUTABLE(inode) || IS_APPEND(inode))
@@ -519,7 +519,7 @@ xfs_attrmulti_by_handle(
xfs_fsop_attrmulti_handlereq_t am_hreq;
struct dentry *dentry;
unsigned int i, size;
- char *attr_name;
+ unsigned char *attr_name;
if (!capable(CAP_SYS_ADMIN))
return -XFS_ERROR(EPERM);
@@ -547,7 +547,7 @@ xfs_attrmulti_by_handle(
error = 0;
for (i = 0; i < am_hreq.opcount; i++) {
- ops[i].am_error = strncpy_from_user(attr_name,
+ ops[i].am_error = strncpy_from_user((char *)attr_name,
ops[i].am_attrname, MAXNAMELEN);
if (ops[i].am_error == 0 || ops[i].am_error == MAXNAMELEN)
error = -ERANGE;
@@ -1431,6 +1431,9 @@ xfs_file_ioctl(
if (!capable(CAP_SYS_ADMIN))
return -EPERM;
+ if (mp->m_flags & XFS_MOUNT_RDONLY)
+ return -XFS_ERROR(EROFS);
+
if (copy_from_user(&inout, arg, sizeof(inout)))
return -XFS_ERROR(EFAULT);
diff --git a/fs/xfs/linux-2.6/xfs_ioctl.h b/fs/xfs/linux-2.6/xfs_ioctl.h
index 7bd7c6afc1e..d56173b34a2 100644
--- a/fs/xfs/linux-2.6/xfs_ioctl.h
+++ b/fs/xfs/linux-2.6/xfs_ioctl.h
@@ -45,23 +45,23 @@ xfs_readlink_by_handle(
extern int
xfs_attrmulti_attr_get(
struct inode *inode,
- char *name,
- char __user *ubuf,
+ unsigned char *name,
+ unsigned char __user *ubuf,
__uint32_t *len,
__uint32_t flags);
extern int
- xfs_attrmulti_attr_set(
+xfs_attrmulti_attr_set(
struct inode *inode,
- char *name,
- const char __user *ubuf,
+ unsigned char *name,
+ const unsigned char __user *ubuf,
__uint32_t len,
__uint32_t flags);
extern int
xfs_attrmulti_attr_remove(
struct inode *inode,
- char *name,
+ unsigned char *name,
__uint32_t flags);
extern struct dentry *
diff --git a/fs/xfs/linux-2.6/xfs_ioctl32.c b/fs/xfs/linux-2.6/xfs_ioctl32.c
index be1527b1670..0bf6d61f052 100644
--- a/fs/xfs/linux-2.6/xfs_ioctl32.c
+++ b/fs/xfs/linux-2.6/xfs_ioctl32.c
@@ -411,7 +411,7 @@ xfs_compat_attrmulti_by_handle(
compat_xfs_fsop_attrmulti_handlereq_t am_hreq;
struct dentry *dentry;
unsigned int i, size;
- char *attr_name;
+ unsigned char *attr_name;
if (!capable(CAP_SYS_ADMIN))
return -XFS_ERROR(EPERM);
@@ -440,7 +440,7 @@ xfs_compat_attrmulti_by_handle(
error = 0;
for (i = 0; i < am_hreq.opcount; i++) {
- ops[i].am_error = strncpy_from_user(attr_name,
+ ops[i].am_error = strncpy_from_user((char *)attr_name,
compat_ptr(ops[i].am_attrname),
MAXNAMELEN);
if (ops[i].am_error == 0 || ops[i].am_error == MAXNAMELEN)
diff --git a/fs/xfs/linux-2.6/xfs_iops.c b/fs/xfs/linux-2.6/xfs_iops.c
index 225946012d0..e8566bbf0f0 100644
--- a/fs/xfs/linux-2.6/xfs_iops.c
+++ b/fs/xfs/linux-2.6/xfs_iops.c
@@ -140,10 +140,10 @@ xfs_init_security(
struct xfs_inode *ip = XFS_I(inode);
size_t length;
void *value;
- char *name;
+ unsigned char *name;
int error;
- error = security_inode_init_security(inode, dir, &name,
+ error = security_inode_init_security(inode, dir, (char **)&name,
&value, &length);
if (error) {
if (error == -EOPNOTSUPP)
diff --git a/fs/xfs/linux-2.6/xfs_lrw.c b/fs/xfs/linux-2.6/xfs_lrw.c
index 0d32457abef..eac6f80d786 100644
--- a/fs/xfs/linux-2.6/xfs_lrw.c
+++ b/fs/xfs/linux-2.6/xfs_lrw.c
@@ -630,18 +630,9 @@ start:
* by root. This keeps people from modifying setuid and
* setgid binaries.
*/
-
- if (((xip->i_d.di_mode & S_ISUID) ||
- ((xip->i_d.di_mode & (S_ISGID | S_IXGRP)) ==
- (S_ISGID | S_IXGRP))) &&
- !capable(CAP_FSETID)) {
- error = xfs_write_clear_setuid(xip);
- if (likely(!error))
- error = -file_remove_suid(file);
- if (unlikely(error)) {
- goto out_unlock_internal;
- }
- }
+ error = -file_remove_suid(file);
+ if (unlikely(error))
+ goto out_unlock_internal;
/* We can write back this queue in page reclaim */
current->backing_dev_info = mapping->backing_dev_info;
@@ -784,53 +775,6 @@ write_retry:
}
/*
- * All xfs metadata buffers except log state machine buffers
- * get this attached as their b_bdstrat callback function.
- * This is so that we can catch a buffer
- * after prematurely unpinning it to forcibly shutdown the filesystem.
- */
-int
-xfs_bdstrat_cb(struct xfs_buf *bp)
-{
- if (XFS_FORCED_SHUTDOWN(bp->b_mount)) {
- trace_xfs_bdstrat_shut(bp, _RET_IP_);
- /*
- * Metadata write that didn't get logged but
- * written delayed anyway. These aren't associated
- * with a transaction, and can be ignored.
- */
- if (XFS_BUF_IODONE_FUNC(bp) == NULL &&
- (XFS_BUF_ISREAD(bp)) == 0)
- return (xfs_bioerror_relse(bp));
- else
- return (xfs_bioerror(bp));
- }
-
- xfs_buf_iorequest(bp);
- return 0;
-}
-
-/*
- * Wrapper around bdstrat so that we can stop data from going to disk in case
- * we are shutting down the filesystem. Typically user data goes thru this
- * path; one of the exceptions is the superblock.
- */
-void
-xfsbdstrat(
- struct xfs_mount *mp,
- struct xfs_buf *bp)
-{
- ASSERT(mp);
- if (!XFS_FORCED_SHUTDOWN(mp)) {
- xfs_buf_iorequest(bp);
- return;
- }
-
- trace_xfs_bdstrat_shut(bp, _RET_IP_);
- xfs_bioerror_relse(bp);
-}
-
-/*
* If the underlying (data/log/rt) device is readonly, there are some
* operations that cannot proceed.
*/
diff --git a/fs/xfs/linux-2.6/xfs_lrw.h b/fs/xfs/linux-2.6/xfs_lrw.h
index d1f7789c7ff..342ae8c0d01 100644
--- a/fs/xfs/linux-2.6/xfs_lrw.h
+++ b/fs/xfs/linux-2.6/xfs_lrw.h
@@ -22,9 +22,6 @@ struct xfs_mount;
struct xfs_inode;
struct xfs_buf;
-/* errors from xfsbdstrat() must be extracted from the buffer */
-extern void xfsbdstrat(struct xfs_mount *, struct xfs_buf *);
-extern int xfs_bdstrat_cb(struct xfs_buf *);
extern int xfs_dev_is_read_only(struct xfs_mount *, char *);
extern int xfs_zero_eof(struct xfs_inode *, xfs_off_t, xfs_fsize_t);
diff --git a/fs/xfs/linux-2.6/xfs_super.c b/fs/xfs/linux-2.6/xfs_super.c
index 77414db10dc..25ea2408118 100644
--- a/fs/xfs/linux-2.6/xfs_super.c
+++ b/fs/xfs/linux-2.6/xfs_super.c
@@ -877,12 +877,11 @@ xfsaild(
{
struct xfs_ail *ailp = data;
xfs_lsn_t last_pushed_lsn = 0;
- long tout = 0;
+ long tout = 0; /* milliseconds */
while (!kthread_should_stop()) {
- if (tout)
- schedule_timeout_interruptible(msecs_to_jiffies(tout));
- tout = 1000;
+ schedule_timeout_interruptible(tout ?
+ msecs_to_jiffies(tout) : MAX_SCHEDULE_TIMEOUT);
/* swsusp */
try_to_freeze();
@@ -1022,12 +1021,45 @@ xfs_fs_dirty_inode(
XFS_I(inode)->i_update_core = 1;
}
-/*
- * Attempt to flush the inode, this will actually fail
- * if the inode is pinned, but we dirty the inode again
- * at the point when it is unpinned after a log write,
- * since this is when the inode itself becomes flushable.
- */
+STATIC int
+xfs_log_inode(
+ struct xfs_inode *ip)
+{
+ struct xfs_mount *mp = ip->i_mount;
+ struct xfs_trans *tp;
+ int error;
+
+ xfs_iunlock(ip, XFS_ILOCK_SHARED);
+ tp = xfs_trans_alloc(mp, XFS_TRANS_FSYNC_TS);
+ error = xfs_trans_reserve(tp, 0, XFS_FSYNC_TS_LOG_RES(mp), 0, 0, 0);
+
+ if (error) {
+ xfs_trans_cancel(tp, 0);
+ /* we need to return with the lock hold shared */
+ xfs_ilock(ip, XFS_ILOCK_SHARED);
+ return error;
+ }
+
+ xfs_ilock(ip, XFS_ILOCK_EXCL);
+
+ /*
+ * Note - it's possible that we might have pushed ourselves out of the
+ * way during trans_reserve which would flush the inode. But there's
+ * no guarantee that the inode buffer has actually gone out yet (it's
+ * delwri). Plus the buffer could be pinned anyway if it's part of
+ * an inode in another recent transaction. So we play it safe and
+ * fire off the transaction anyway.
+ */
+ xfs_trans_ijoin(tp, ip, XFS_ILOCK_EXCL);
+ xfs_trans_ihold(tp, ip);
+ xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE);
+ xfs_trans_set_sync(tp);
+ error = xfs_trans_commit(tp, 0);
+ xfs_ilock_demote(ip, XFS_ILOCK_EXCL);
+
+ return error;
+}
+
STATIC int
xfs_fs_write_inode(
struct inode *inode,
@@ -1035,7 +1067,7 @@ xfs_fs_write_inode(
{
struct xfs_inode *ip = XFS_I(inode);
struct xfs_mount *mp = ip->i_mount;
- int error = 0;
+ int error = EAGAIN;
xfs_itrace_entry(ip);
@@ -1046,35 +1078,55 @@ xfs_fs_write_inode(
error = xfs_wait_on_pages(ip, 0, -1);
if (error)
goto out;
- }
-
- /*
- * Bypass inodes which have already been cleaned by
- * the inode flush clustering code inside xfs_iflush
- */
- if (xfs_inode_clean(ip))
- goto out;
- /*
- * We make this non-blocking if the inode is contended, return
- * EAGAIN to indicate to the caller that they did not succeed.
- * This prevents the flush path from blocking on inodes inside
- * another operation right now, they get caught later by xfs_sync.
- */
- if (sync) {
+ /*
+ * Make sure the inode has hit stable storage. By using the
+ * log and the fsync transactions we reduce the IOs we have
+ * to do here from two (log and inode) to just the log.
+ *
+ * Note: We still need to do a delwri write of the inode after
+ * this to flush it to the backing buffer so that bulkstat
+ * works properly if this is the first time the inode has been
+ * written. Because we hold the ilock atomically over the
+ * transaction commit and the inode flush we are guaranteed
+ * that the inode is not pinned when it returns. If the flush
+ * lock is already held, then the inode has already been
+ * flushed once and we don't need to flush it again. Hence
+ * the code will only flush the inode if it isn't already
+ * being flushed.
+ */
xfs_ilock(ip, XFS_ILOCK_SHARED);
- xfs_iflock(ip);
-
- error = xfs_iflush(ip, XFS_IFLUSH_SYNC);
+ if (ip->i_update_core) {
+ error = xfs_log_inode(ip);
+ if (error)
+ goto out_unlock;
+ }
} else {
- error = EAGAIN;
+ /*
+ * We make this non-blocking if the inode is contended, return
+ * EAGAIN to indicate to the caller that they did not succeed.
+ * This prevents the flush path from blocking on inodes inside
+ * another operation right now, they get caught later by xfs_sync.
+ */
if (!xfs_ilock_nowait(ip, XFS_ILOCK_SHARED))
goto out;
- if (xfs_ipincount(ip) || !xfs_iflock_nowait(ip))
- goto out_unlock;
+ }
- error = xfs_iflush(ip, XFS_IFLUSH_ASYNC_NOBLOCK);
+ if (xfs_ipincount(ip) || !xfs_iflock_nowait(ip))
+ goto out_unlock;
+
+ /*
+ * Now we have the flush lock and the inode is not pinned, we can check
+ * if the inode is really clean as we know that there are no pending
+ * transaction completions, it is not waiting on the delayed write
+ * queue and there is no IO in progress.
+ */
+ if (xfs_inode_clean(ip)) {
+ xfs_ifunlock(ip);
+ error = 0;
+ goto out_unlock;
}
+ error = xfs_iflush(ip, 0);
out_unlock:
xfs_iunlock(ip, XFS_ILOCK_SHARED);
@@ -1257,6 +1309,29 @@ xfs_fs_statfs(
return 0;
}
+STATIC void
+xfs_save_resvblks(struct xfs_mount *mp)
+{
+ __uint64_t resblks = 0;
+
+ mp->m_resblks_save = mp->m_resblks;
+ xfs_reserve_blocks(mp, &resblks, NULL);
+}
+
+STATIC void
+xfs_restore_resvblks(struct xfs_mount *mp)
+{
+ __uint64_t resblks;
+
+ if (mp->m_resblks_save) {
+ resblks = mp->m_resblks_save;
+ mp->m_resblks_save = 0;
+ } else
+ resblks = xfs_default_resblks(mp);
+
+ xfs_reserve_blocks(mp, &resblks, NULL);
+}
+
STATIC int
xfs_fs_remount(
struct super_block *sb,
@@ -1336,11 +1411,27 @@ xfs_fs_remount(
}
mp->m_update_flags = 0;
}
+
+ /*
+ * Fill out the reserve pool if it is empty. Use the stashed
+ * value if it is non-zero, otherwise go with the default.
+ */
+ xfs_restore_resvblks(mp);
}
/* rw -> ro */
if (!(mp->m_flags & XFS_MOUNT_RDONLY) && (*flags & MS_RDONLY)) {
+ /*
+ * After we have synced the data but before we sync the
+ * metadata, we need to free up the reserve block pool so that
+ * the used block count in the superblock on disk is correct at
+ * the end of the remount. Stash the current reserve pool size
+ * so that if we get remounted rw, we can return it to the same
+ * size.
+ */
+
xfs_quiesce_data(mp);
+ xfs_save_resvblks(mp);
xfs_quiesce_attr(mp);
mp->m_flags |= XFS_MOUNT_RDONLY;
}
@@ -1359,11 +1450,22 @@ xfs_fs_freeze(
{
struct xfs_mount *mp = XFS_M(sb);
+ xfs_save_resvblks(mp);
xfs_quiesce_attr(mp);
return -xfs_fs_log_dummy(mp);
}
STATIC int
+xfs_fs_unfreeze(
+ struct super_block *sb)
+{
+ struct xfs_mount *mp = XFS_M(sb);
+
+ xfs_restore_resvblks(mp);
+ return 0;
+}
+
+STATIC int
xfs_fs_show_options(
struct seq_file *m,
struct vfsmount *mnt)
@@ -1585,6 +1687,7 @@ static const struct super_operations xfs_super_operations = {
.put_super = xfs_fs_put_super,
.sync_fs = xfs_fs_sync_fs,
.freeze_fs = xfs_fs_freeze,
+ .unfreeze_fs = xfs_fs_unfreeze,
.statfs = xfs_fs_statfs,
.remount_fs = xfs_fs_remount,
.show_options = xfs_fs_show_options,
diff --git a/fs/xfs/linux-2.6/xfs_sync.c b/fs/xfs/linux-2.6/xfs_sync.c
index 1f5e4bb5e97..a9f6d20aff4 100644
--- a/fs/xfs/linux-2.6/xfs_sync.c
+++ b/fs/xfs/linux-2.6/xfs_sync.c
@@ -90,14 +90,13 @@ xfs_inode_ag_lookup(
STATIC int
xfs_inode_ag_walk(
struct xfs_mount *mp,
- xfs_agnumber_t ag,
+ struct xfs_perag *pag,
int (*execute)(struct xfs_inode *ip,
struct xfs_perag *pag, int flags),
int flags,
int tag,
int exclusive)
{
- struct xfs_perag *pag = &mp->m_perag[ag];
uint32_t first_index;
int last_error = 0;
int skipped;
@@ -141,8 +140,6 @@ restart:
delay(1);
goto restart;
}
-
- xfs_put_perag(mp, pag);
return last_error;
}
@@ -160,10 +157,16 @@ xfs_inode_ag_iterator(
xfs_agnumber_t ag;
for (ag = 0; ag < mp->m_sb.sb_agcount; ag++) {
- if (!mp->m_perag[ag].pag_ici_init)
+ struct xfs_perag *pag;
+
+ pag = xfs_perag_get(mp, ag);
+ if (!pag->pag_ici_init) {
+ xfs_perag_put(pag);
continue;
- error = xfs_inode_ag_walk(mp, ag, execute, flags, tag,
+ }
+ error = xfs_inode_ag_walk(mp, pag, execute, flags, tag,
exclusive);
+ xfs_perag_put(pag);
if (error) {
last_error = error;
if (error == EFSCORRUPTED)
@@ -231,7 +234,7 @@ xfs_sync_inode_data(
}
error = xfs_flush_pages(ip, 0, -1, (flags & SYNC_WAIT) ?
- 0 : XFS_B_ASYNC, FI_NONE);
+ 0 : XBF_ASYNC, FI_NONE);
xfs_iunlock(ip, XFS_IOLOCK_SHARED);
out_wait:
@@ -267,8 +270,7 @@ xfs_sync_inode_attr(
goto out_unlock;
}
- error = xfs_iflush(ip, (flags & SYNC_WAIT) ?
- XFS_IFLUSH_SYNC : XFS_IFLUSH_DELWRI);
+ error = xfs_iflush(ip, flags);
out_unlock:
xfs_iunlock(ip, XFS_ILOCK_SHARED);
@@ -293,10 +295,7 @@ xfs_sync_data(
if (error)
return XFS_ERROR(error);
- xfs_log_force(mp, 0,
- (flags & SYNC_WAIT) ?
- XFS_LOG_FORCE | XFS_LOG_SYNC :
- XFS_LOG_FORCE);
+ xfs_log_force(mp, (flags & SYNC_WAIT) ? XFS_LOG_SYNC : 0);
return 0;
}
@@ -322,10 +321,6 @@ xfs_commit_dummy_trans(
struct xfs_inode *ip = mp->m_rootip;
struct xfs_trans *tp;
int error;
- int log_flags = XFS_LOG_FORCE;
-
- if (flags & SYNC_WAIT)
- log_flags |= XFS_LOG_SYNC;
/*
* Put a dummy transaction in the log to tell recovery
@@ -347,11 +342,11 @@ xfs_commit_dummy_trans(
xfs_iunlock(ip, XFS_ILOCK_EXCL);
/* the log force ensures this transaction is pushed to disk */
- xfs_log_force(mp, 0, log_flags);
+ xfs_log_force(mp, (flags & SYNC_WAIT) ? XFS_LOG_SYNC : 0);
return error;
}
-int
+STATIC int
xfs_sync_fsdata(
struct xfs_mount *mp,
int flags)
@@ -367,7 +362,7 @@ xfs_sync_fsdata(
if (flags & SYNC_TRYLOCK) {
ASSERT(!(flags & SYNC_WAIT));
- bp = xfs_getsb(mp, XFS_BUF_TRYLOCK);
+ bp = xfs_getsb(mp, XBF_TRYLOCK);
if (!bp)
goto out;
@@ -387,7 +382,7 @@ xfs_sync_fsdata(
* become pinned in between there and here.
*/
if (XFS_BUF_ISPINNED(bp))
- xfs_log_force(mp, 0, XFS_LOG_FORCE);
+ xfs_log_force(mp, 0);
}
@@ -448,9 +443,6 @@ xfs_quiesce_data(
xfs_sync_data(mp, SYNC_WAIT);
xfs_qm_sync(mp, SYNC_WAIT);
- /* drop inode references pinned by filestreams */
- xfs_filestream_flush(mp);
-
/* write superblock and hoover up shutdown errors */
error = xfs_sync_fsdata(mp, SYNC_WAIT);
@@ -467,16 +459,18 @@ xfs_quiesce_fs(
{
int count = 0, pincount;
+ xfs_reclaim_inodes(mp, 0);
xfs_flush_buftarg(mp->m_ddev_targp, 0);
- xfs_reclaim_inodes(mp, XFS_IFLUSH_DELWRI_ELSE_ASYNC);
/*
* This loop must run at least twice. The first instance of the loop
* will flush most meta data but that will generate more meta data
* (typically directory updates). Which then must be flushed and
- * logged before we can write the unmount record.
+ * logged before we can write the unmount record. We also so sync
+ * reclaim of inodes to catch any that the above delwri flush skipped.
*/
do {
+ xfs_reclaim_inodes(mp, SYNC_WAIT);
xfs_sync_attr(mp, SYNC_WAIT);
pincount = xfs_flush_buftarg(mp->m_ddev_targp, 1);
if (!pincount) {
@@ -575,7 +569,7 @@ xfs_flush_inodes(
igrab(inode);
xfs_syncd_queue_work(ip->i_mount, inode, xfs_flush_inodes_work, &completion);
wait_for_completion(&completion);
- xfs_log_force(ip->i_mount, (xfs_lsn_t)0, XFS_LOG_FORCE|XFS_LOG_SYNC);
+ xfs_log_force(ip->i_mount, XFS_LOG_SYNC);
}
/*
@@ -591,8 +585,8 @@ xfs_sync_worker(
int error;
if (!(mp->m_flags & XFS_MOUNT_RDONLY)) {
- xfs_log_force(mp, (xfs_lsn_t)0, XFS_LOG_FORCE);
- xfs_reclaim_inodes(mp, XFS_IFLUSH_DELWRI_ELSE_ASYNC);
+ xfs_log_force(mp, 0);
+ xfs_reclaim_inodes(mp, 0);
/* dgc: errors ignored here */
error = xfs_qm_sync(mp, SYNC_TRYLOCK);
error = xfs_sync_fsdata(mp, SYNC_TRYLOCK);
@@ -690,16 +684,17 @@ void
xfs_inode_set_reclaim_tag(
xfs_inode_t *ip)
{
- xfs_mount_t *mp = ip->i_mount;
- xfs_perag_t *pag = xfs_get_perag(mp, ip->i_ino);
+ struct xfs_mount *mp = ip->i_mount;
+ struct xfs_perag *pag;
+ pag = xfs_perag_get(mp, XFS_INO_TO_AGNO(mp, ip->i_ino));
read_lock(&pag->pag_ici_lock);
spin_lock(&ip->i_flags_lock);
__xfs_inode_set_reclaim_tag(pag, ip);
__xfs_iflags_set(ip, XFS_IRECLAIMABLE);
spin_unlock(&ip->i_flags_lock);
read_unlock(&pag->pag_ici_lock);
- xfs_put_perag(mp, pag);
+ xfs_perag_put(pag);
}
void
@@ -712,12 +707,64 @@ __xfs_inode_clear_reclaim_tag(
XFS_INO_TO_AGINO(mp, ip->i_ino), XFS_ICI_RECLAIM_TAG);
}
+/*
+ * Inodes in different states need to be treated differently, and the return
+ * value of xfs_iflush is not sufficient to get this right. The following table
+ * lists the inode states and the reclaim actions necessary for non-blocking
+ * reclaim:
+ *
+ *
+ * inode state iflush ret required action
+ * --------------- ---------- ---------------
+ * bad - reclaim
+ * shutdown EIO unpin and reclaim
+ * clean, unpinned 0 reclaim
+ * stale, unpinned 0 reclaim
+ * clean, pinned(*) 0 requeue
+ * stale, pinned EAGAIN requeue
+ * dirty, delwri ok 0 requeue
+ * dirty, delwri blocked EAGAIN requeue
+ * dirty, sync flush 0 reclaim
+ *
+ * (*) dgc: I don't think the clean, pinned state is possible but it gets
+ * handled anyway given the order of checks implemented.
+ *
+ * As can be seen from the table, the return value of xfs_iflush() is not
+ * sufficient to correctly decide the reclaim action here. The checks in
+ * xfs_iflush() might look like duplicates, but they are not.
+ *
+ * Also, because we get the flush lock first, we know that any inode that has
+ * been flushed delwri has had the flush completed by the time we check that
+ * the inode is clean. The clean inode check needs to be done before flushing
+ * the inode delwri otherwise we would loop forever requeuing clean inodes as
+ * we cannot tell apart a successful delwri flush and a clean inode from the
+ * return value of xfs_iflush().
+ *
+ * Note that because the inode is flushed delayed write by background
+ * writeback, the flush lock may already be held here and waiting on it can
+ * result in very long latencies. Hence for sync reclaims, where we wait on the
+ * flush lock, the caller should push out delayed write inodes first before
+ * trying to reclaim them to minimise the amount of time spent waiting. For
+ * background relaim, we just requeue the inode for the next pass.
+ *
+ * Hence the order of actions after gaining the locks should be:
+ * bad => reclaim
+ * shutdown => unpin and reclaim
+ * pinned, delwri => requeue
+ * pinned, sync => unpin
+ * stale => reclaim
+ * clean => reclaim
+ * dirty, delwri => flush and requeue
+ * dirty, sync => flush, wait and reclaim
+ */
STATIC int
xfs_reclaim_inode(
struct xfs_inode *ip,
struct xfs_perag *pag,
int sync_mode)
{
+ int error = 0;
+
/*
* The radix tree lock here protects a thread in xfs_iget from racing
* with us starting reclaim on the inode. Once we have the
@@ -735,33 +782,70 @@ xfs_reclaim_inode(
spin_unlock(&ip->i_flags_lock);
write_unlock(&pag->pag_ici_lock);
- /*
- * If the inode is still dirty, then flush it out. If the inode
- * is not in the AIL, then it will be OK to flush it delwri as
- * long as xfs_iflush() does not keep any references to the inode.
- * We leave that decision up to xfs_iflush() since it has the
- * knowledge of whether it's OK to simply do a delwri flush of
- * the inode or whether we need to wait until the inode is
- * pulled from the AIL.
- * We get the flush lock regardless, though, just to make sure
- * we don't free it while it is being flushed.
- */
xfs_ilock(ip, XFS_ILOCK_EXCL);
- xfs_iflock(ip);
+ if (!xfs_iflock_nowait(ip)) {
+ if (!(sync_mode & SYNC_WAIT))
+ goto out;
+ xfs_iflock(ip);
+ }
+
+ if (is_bad_inode(VFS_I(ip)))
+ goto reclaim;
+ if (XFS_FORCED_SHUTDOWN(ip->i_mount)) {
+ xfs_iunpin_wait(ip);
+ goto reclaim;
+ }
+ if (xfs_ipincount(ip)) {
+ if (!(sync_mode & SYNC_WAIT)) {
+ xfs_ifunlock(ip);
+ goto out;
+ }
+ xfs_iunpin_wait(ip);
+ }
+ if (xfs_iflags_test(ip, XFS_ISTALE))
+ goto reclaim;
+ if (xfs_inode_clean(ip))
+ goto reclaim;
+
+ /* Now we have an inode that needs flushing */
+ error = xfs_iflush(ip, sync_mode);
+ if (sync_mode & SYNC_WAIT) {
+ xfs_iflock(ip);
+ goto reclaim;
+ }
/*
- * In the case of a forced shutdown we rely on xfs_iflush() to
- * wait for the inode to be unpinned before returning an error.
+ * When we have to flush an inode but don't have SYNC_WAIT set, we
+ * flush the inode out using a delwri buffer and wait for the next
+ * call into reclaim to find it in a clean state instead of waiting for
+ * it now. We also don't return errors here - if the error is transient
+ * then the next reclaim pass will flush the inode, and if the error
+ * is permanent then the next sync reclaim will relcaim the inode and
+ * pass on the error.
*/
- if (!is_bad_inode(VFS_I(ip)) && xfs_iflush(ip, sync_mode) == 0) {
- /* synchronize with xfs_iflush_done */
- xfs_iflock(ip);
- xfs_ifunlock(ip);
+ if (error && !XFS_FORCED_SHUTDOWN(ip->i_mount)) {
+ xfs_fs_cmn_err(CE_WARN, ip->i_mount,
+ "inode 0x%llx background reclaim flush failed with %d",
+ (long long)ip->i_ino, error);
}
+out:
+ xfs_iflags_clear(ip, XFS_IRECLAIM);
+ xfs_iunlock(ip, XFS_ILOCK_EXCL);
+ /*
+ * We could return EAGAIN here to make reclaim rescan the inode tree in
+ * a short while. However, this just burns CPU time scanning the tree
+ * waiting for IO to complete and xfssyncd never goes back to the idle
+ * state. Instead, return 0 to let the next scheduled background reclaim
+ * attempt to reclaim the inode again.
+ */
+ return 0;
+reclaim:
+ xfs_ifunlock(ip);
xfs_iunlock(ip, XFS_ILOCK_EXCL);
xfs_ireclaim(ip);
- return 0;
+ return error;
+
}
int
diff --git a/fs/xfs/linux-2.6/xfs_sync.h b/fs/xfs/linux-2.6/xfs_sync.h
index ea932b43335..d480c346cab 100644
--- a/fs/xfs/linux-2.6/xfs_sync.h
+++ b/fs/xfs/linux-2.6/xfs_sync.h
@@ -37,7 +37,6 @@ void xfs_syncd_stop(struct xfs_mount *mp);
int xfs_sync_attr(struct xfs_mount *mp, int flags);
int xfs_sync_data(struct xfs_mount *mp, int flags);
-int xfs_sync_fsdata(struct xfs_mount *mp, int flags);
int xfs_quiesce_data(struct xfs_mount *mp);
void xfs_quiesce_attr(struct xfs_mount *mp);
diff --git a/fs/xfs/linux-2.6/xfs_trace.h b/fs/xfs/linux-2.6/xfs_trace.h
index c22a608321a..a4574dcf506 100644
--- a/fs/xfs/linux-2.6/xfs_trace.h
+++ b/fs/xfs/linux-2.6/xfs_trace.h
@@ -78,6 +78,33 @@ DECLARE_EVENT_CLASS(xfs_attr_list_class,
)
)
+#define DEFINE_PERAG_REF_EVENT(name) \
+TRACE_EVENT(name, \
+ TP_PROTO(struct xfs_mount *mp, xfs_agnumber_t agno, int refcount, \
+ unsigned long caller_ip), \
+ TP_ARGS(mp, agno, refcount, caller_ip), \
+ TP_STRUCT__entry( \
+ __field(dev_t, dev) \
+ __field(xfs_agnumber_t, agno) \
+ __field(int, refcount) \
+ __field(unsigned long, caller_ip) \
+ ), \
+ TP_fast_assign( \
+ __entry->dev = mp->m_super->s_dev; \
+ __entry->agno = agno; \
+ __entry->refcount = refcount; \
+ __entry->caller_ip = caller_ip; \
+ ), \
+ TP_printk("dev %d:%d agno %u refcount %d caller %pf", \
+ MAJOR(__entry->dev), MINOR(__entry->dev), \
+ __entry->agno, \
+ __entry->refcount, \
+ (char *)__entry->caller_ip) \
+);
+
+DEFINE_PERAG_REF_EVENT(xfs_perag_get)
+DEFINE_PERAG_REF_EVENT(xfs_perag_put)
+
#define DEFINE_ATTR_LIST_EVENT(name) \
DEFINE_EVENT(xfs_attr_list_class, name, \
TP_PROTO(struct xfs_attr_list_context *ctx), \
@@ -456,6 +483,7 @@ DEFINE_BUF_ITEM_EVENT(xfs_buf_item_unlock);
DEFINE_BUF_ITEM_EVENT(xfs_buf_item_unlock_stale);
DEFINE_BUF_ITEM_EVENT(xfs_buf_item_committed);
DEFINE_BUF_ITEM_EVENT(xfs_buf_item_push);
+DEFINE_BUF_ITEM_EVENT(xfs_buf_item_pushbuf);
DEFINE_BUF_ITEM_EVENT(xfs_trans_get_buf);
DEFINE_BUF_ITEM_EVENT(xfs_trans_get_buf_recur);
DEFINE_BUF_ITEM_EVENT(xfs_trans_getsb);
@@ -1414,6 +1442,59 @@ TRACE_EVENT(xfs_dir2_leafn_moveents,
__entry->count)
);
+#define XFS_SWAPEXT_INODES \
+ { 0, "target" }, \
+ { 1, "temp" }
+
+#define XFS_INODE_FORMAT_STR \
+ { 0, "invalid" }, \
+ { 1, "local" }, \
+ { 2, "extent" }, \
+ { 3, "btree" }
+
+DECLARE_EVENT_CLASS(xfs_swap_extent_class,
+ TP_PROTO(struct xfs_inode *ip, int which),
+ TP_ARGS(ip, which),
+ TP_STRUCT__entry(
+ __field(dev_t, dev)
+ __field(int, which)
+ __field(xfs_ino_t, ino)
+ __field(int, format)
+ __field(int, nex)
+ __field(int, max_nex)
+ __field(int, broot_size)
+ __field(int, fork_off)
+ ),
+ TP_fast_assign(
+ __entry->dev = VFS_I(ip)->i_sb->s_dev;
+ __entry->which = which;
+ __entry->ino = ip->i_ino;
+ __entry->format = ip->i_d.di_format;
+ __entry->nex = ip->i_d.di_nextents;
+ __entry->max_nex = ip->i_df.if_ext_max;
+ __entry->broot_size = ip->i_df.if_broot_bytes;
+ __entry->fork_off = XFS_IFORK_BOFF(ip);
+ ),
+ TP_printk("dev %d:%d ino 0x%llx (%s), %s format, num_extents %d, "
+ "Max in-fork extents %d, broot size %d, fork offset %d",
+ MAJOR(__entry->dev), MINOR(__entry->dev),
+ __entry->ino,
+ __print_symbolic(__entry->which, XFS_SWAPEXT_INODES),
+ __print_symbolic(__entry->format, XFS_INODE_FORMAT_STR),
+ __entry->nex,
+ __entry->max_nex,
+ __entry->broot_size,
+ __entry->fork_off)
+)
+
+#define DEFINE_SWAPEXT_EVENT(name) \
+DEFINE_EVENT(xfs_swap_extent_class, name, \
+ TP_PROTO(struct xfs_inode *ip, int which), \
+ TP_ARGS(ip, which))
+
+DEFINE_SWAPEXT_EVENT(xfs_swap_extent_before);
+DEFINE_SWAPEXT_EVENT(xfs_swap_extent_after);
+
#endif /* _TRACE_XFS_H */
#undef TRACE_INCLUDE_PATH
diff --git a/fs/xfs/linux-2.6/xfs_xattr.c b/fs/xfs/linux-2.6/xfs_xattr.c
index 0b1878857fc..fa01b9daba6 100644
--- a/fs/xfs/linux-2.6/xfs_xattr.c
+++ b/fs/xfs/linux-2.6/xfs_xattr.c
@@ -45,7 +45,7 @@ xfs_xattr_get(struct dentry *dentry, const char *name,
value = NULL;
}
- error = -xfs_attr_get(ip, name, value, &asize, xflags);
+ error = -xfs_attr_get(ip, (unsigned char *)name, value, &asize, xflags);
if (error)
return error;
return asize;
@@ -67,8 +67,9 @@ xfs_xattr_set(struct dentry *dentry, const char *name, const void *value,
xflags |= ATTR_REPLACE;
if (!value)
- return -xfs_attr_remove(ip, name, xflags);
- return -xfs_attr_set(ip, name, (void *)value, size, xflags);
+ return -xfs_attr_remove(ip, (unsigned char *)name, xflags);
+ return -xfs_attr_set(ip, (unsigned char *)name,
+ (void *)value, size, xflags);
}
static struct xattr_handler xfs_xattr_user_handler = {
@@ -124,8 +125,13 @@ static const char *xfs_xattr_prefix(int flags)
}
static int
-xfs_xattr_put_listent(struct xfs_attr_list_context *context, int flags,
- char *name, int namelen, int valuelen, char *value)
+xfs_xattr_put_listent(
+ struct xfs_attr_list_context *context,
+ int flags,
+ unsigned char *name,
+ int namelen,
+ int valuelen,
+ unsigned char *value)
{
unsigned int prefix_len = xfs_xattr_prefix_len(flags);
char *offset;
@@ -148,7 +154,7 @@ xfs_xattr_put_listent(struct xfs_attr_list_context *context, int flags,
offset = (char *)context->alist + context->count;
strncpy(offset, xfs_xattr_prefix(flags), prefix_len);
offset += prefix_len;
- strncpy(offset, name, namelen); /* real name */
+ strncpy(offset, (char *)name, namelen); /* real name */
offset += namelen;
*offset = '\0';
context->count += prefix_len + namelen + 1;
@@ -156,8 +162,13 @@ xfs_xattr_put_listent(struct xfs_attr_list_context *context, int flags,
}
static int
-xfs_xattr_put_listent_sizes(struct xfs_attr_list_context *context, int flags,
- char *name, int namelen, int valuelen, char *value)
+xfs_xattr_put_listent_sizes(
+ struct xfs_attr_list_context *context,
+ int flags,
+ unsigned char *name,
+ int namelen,
+ int valuelen,
+ unsigned char *value)
{
context->count += xfs_xattr_prefix_len(flags) + namelen + 1;
return 0;
diff --git a/fs/xfs/quota/xfs_dquot.c b/fs/xfs/quota/xfs_dquot.c
index d7c7eea09fc..5f79dd78626 100644
--- a/fs/xfs/quota/xfs_dquot.c
+++ b/fs/xfs/quota/xfs_dquot.c
@@ -1187,7 +1187,7 @@ xfs_qm_dqflush(
* block, nada.
*/
if (!XFS_DQ_IS_DIRTY(dqp) ||
- (!(flags & XFS_QMOPT_SYNC) && atomic_read(&dqp->q_pincount) > 0)) {
+ (!(flags & SYNC_WAIT) && atomic_read(&dqp->q_pincount) > 0)) {
xfs_dqfunlock(dqp);
return 0;
}
@@ -1248,23 +1248,20 @@ xfs_qm_dqflush(
*/
if (XFS_BUF_ISPINNED(bp)) {
trace_xfs_dqflush_force(dqp);
- xfs_log_force(mp, (xfs_lsn_t)0, XFS_LOG_FORCE);
+ xfs_log_force(mp, 0);
}
- if (flags & XFS_QMOPT_DELWRI) {
- xfs_bdwrite(mp, bp);
- } else if (flags & XFS_QMOPT_ASYNC) {
- error = xfs_bawrite(mp, bp);
- } else {
+ if (flags & SYNC_WAIT)
error = xfs_bwrite(mp, bp);
- }
+ else
+ xfs_bdwrite(mp, bp);
trace_xfs_dqflush_done(dqp);
/*
* dqp is still locked, but caller is free to unlock it now.
*/
- return (error);
+ return error;
}
@@ -1445,7 +1442,7 @@ xfs_qm_dqpurge(
* We don't care about getting disk errors here. We need
* to purge this dquot anyway, so we go ahead regardless.
*/
- error = xfs_qm_dqflush(dqp, XFS_QMOPT_SYNC);
+ error = xfs_qm_dqflush(dqp, SYNC_WAIT);
if (error)
xfs_fs_cmn_err(CE_WARN, mp,
"xfs_qm_dqpurge: dquot %p flush failed", dqp);
@@ -1529,25 +1526,17 @@ xfs_qm_dqflock_pushbuf_wait(
* the flush lock when the I/O completes.
*/
bp = xfs_incore(dqp->q_mount->m_ddev_targp, dqp->q_blkno,
- XFS_QI_DQCHUNKLEN(dqp->q_mount),
- XFS_INCORE_TRYLOCK);
- if (bp != NULL) {
- if (XFS_BUF_ISDELAYWRITE(bp)) {
- int error;
- if (XFS_BUF_ISPINNED(bp)) {
- xfs_log_force(dqp->q_mount,
- (xfs_lsn_t)0,
- XFS_LOG_FORCE);
- }
- error = xfs_bawrite(dqp->q_mount, bp);
- if (error)
- xfs_fs_cmn_err(CE_WARN, dqp->q_mount,
- "xfs_qm_dqflock_pushbuf_wait: "
- "pushbuf error %d on dqp %p, bp %p",
- error, dqp, bp);
- } else {
- xfs_buf_relse(bp);
- }
+ XFS_QI_DQCHUNKLEN(dqp->q_mount), XBF_TRYLOCK);
+ if (!bp)
+ goto out_lock;
+
+ if (XFS_BUF_ISDELAYWRITE(bp)) {
+ if (XFS_BUF_ISPINNED(bp))
+ xfs_log_force(dqp->q_mount, 0);
+ xfs_buf_delwri_promote(bp);
+ wake_up_process(bp->b_target->bt_task);
}
+ xfs_buf_relse(bp);
+out_lock:
xfs_dqflock(dqp);
}
diff --git a/fs/xfs/quota/xfs_dquot_item.c b/fs/xfs/quota/xfs_dquot_item.c
index d0d4a9a0bbd..4e4ee9a5719 100644
--- a/fs/xfs/quota/xfs_dquot_item.c
+++ b/fs/xfs/quota/xfs_dquot_item.c
@@ -74,11 +74,11 @@ xfs_qm_dquot_logitem_format(
logvec->i_addr = (xfs_caddr_t)&logitem->qli_format;
logvec->i_len = sizeof(xfs_dq_logformat_t);
- XLOG_VEC_SET_TYPE(logvec, XLOG_REG_TYPE_QFORMAT);
+ logvec->i_type = XLOG_REG_TYPE_QFORMAT;
logvec++;
logvec->i_addr = (xfs_caddr_t)&logitem->qli_dquot->q_core;
logvec->i_len = sizeof(xfs_disk_dquot_t);
- XLOG_VEC_SET_TYPE(logvec, XLOG_REG_TYPE_DQUOT);
+ logvec->i_type = XLOG_REG_TYPE_DQUOT;
ASSERT(2 == logitem->qli_item.li_desc->lid_size);
logitem->qli_format.qlf_size = 2;
@@ -153,7 +153,7 @@ xfs_qm_dquot_logitem_push(
* lock without sleeping, then there must not have been
* anyone in the process of flushing the dquot.
*/
- error = xfs_qm_dqflush(dqp, XFS_QMOPT_DELWRI);
+ error = xfs_qm_dqflush(dqp, 0);
if (error)
xfs_fs_cmn_err(CE_WARN, dqp->q_mount,
"xfs_qm_dquot_logitem_push: push error %d on dqp %p",
@@ -190,7 +190,7 @@ xfs_qm_dqunpin_wait(
/*
* Give the log a push so we don't wait here too long.
*/
- xfs_log_force(dqp->q_mount, (xfs_lsn_t)0, XFS_LOG_FORCE);
+ xfs_log_force(dqp->q_mount, 0);
wait_event(dqp->q_pinwait, (atomic_read(&dqp->q_pincount) == 0));
}
@@ -212,68 +212,31 @@ xfs_qm_dquot_logitem_pushbuf(
xfs_dquot_t *dqp;
xfs_mount_t *mp;
xfs_buf_t *bp;
- uint dopush;
dqp = qip->qli_dquot;
ASSERT(XFS_DQ_IS_LOCKED(dqp));
/*
- * The qli_pushbuf_flag keeps others from
- * trying to duplicate our effort.
- */
- ASSERT(qip->qli_pushbuf_flag != 0);
- ASSERT(qip->qli_push_owner == current_pid());
-
- /*
* If flushlock isn't locked anymore, chances are that the
* inode flush completed and the inode was taken off the AIL.
* So, just get out.
*/
if (completion_done(&dqp->q_flush) ||
((qip->qli_item.li_flags & XFS_LI_IN_AIL) == 0)) {
- qip->qli_pushbuf_flag = 0;
xfs_dqunlock(dqp);
return;
}
mp = dqp->q_mount;
bp = xfs_incore(mp->m_ddev_targp, qip->qli_format.qlf_blkno,
- XFS_QI_DQCHUNKLEN(mp),
- XFS_INCORE_TRYLOCK);
- if (bp != NULL) {
- if (XFS_BUF_ISDELAYWRITE(bp)) {
- dopush = ((qip->qli_item.li_flags & XFS_LI_IN_AIL) &&
- !completion_done(&dqp->q_flush));
- qip->qli_pushbuf_flag = 0;
- xfs_dqunlock(dqp);
-
- if (XFS_BUF_ISPINNED(bp)) {
- xfs_log_force(mp, (xfs_lsn_t)0,
- XFS_LOG_FORCE);
- }
- if (dopush) {
- int error;
-#ifdef XFSRACEDEBUG
- delay_for_intr();
- delay(300);
-#endif
- error = xfs_bawrite(mp, bp);
- if (error)
- xfs_fs_cmn_err(CE_WARN, mp,
- "xfs_qm_dquot_logitem_pushbuf: pushbuf error %d on qip %p, bp %p",
- error, qip, bp);
- } else {
- xfs_buf_relse(bp);
- }
- } else {
- qip->qli_pushbuf_flag = 0;
- xfs_dqunlock(dqp);
- xfs_buf_relse(bp);
- }
+ XFS_QI_DQCHUNKLEN(mp), XBF_TRYLOCK);
+ xfs_dqunlock(dqp);
+ if (!bp)
return;
- }
+ if (XFS_BUF_ISDELAYWRITE(bp))
+ xfs_buf_delwri_promote(bp);
+ xfs_buf_relse(bp);
+ return;
- qip->qli_pushbuf_flag = 0;
- xfs_dqunlock(dqp);
}
/*
@@ -291,50 +254,24 @@ xfs_qm_dquot_logitem_trylock(
xfs_dq_logitem_t *qip)
{
xfs_dquot_t *dqp;
- uint retval;
dqp = qip->qli_dquot;
if (atomic_read(&dqp->q_pincount) > 0)
- return (XFS_ITEM_PINNED);
+ return XFS_ITEM_PINNED;
if (! xfs_qm_dqlock_nowait(dqp))
- return (XFS_ITEM_LOCKED);
+ return XFS_ITEM_LOCKED;
- retval = XFS_ITEM_SUCCESS;
if (!xfs_dqflock_nowait(dqp)) {
/*
- * The dquot is already being flushed. It may have been
- * flushed delayed write, however, and we don't want to
- * get stuck waiting for that to complete. So, we want to check
- * to see if we can lock the dquot's buffer without sleeping.
- * If we can and it is marked for delayed write, then we
- * hold it and send it out from the push routine. We don't
- * want to do that now since we might sleep in the device
- * strategy routine. We also don't want to grab the buffer lock
- * here because we'd like not to call into the buffer cache
- * while holding the AIL lock.
- * Make sure to only return PUSHBUF if we set pushbuf_flag
- * ourselves. If someone else is doing it then we don't
- * want to go to the push routine and duplicate their efforts.
+ * dquot has already been flushed to the backing buffer,
+ * leave it locked, pushbuf routine will unlock it.
*/
- if (qip->qli_pushbuf_flag == 0) {
- qip->qli_pushbuf_flag = 1;
- ASSERT(qip->qli_format.qlf_blkno == dqp->q_blkno);
-#ifdef DEBUG
- qip->qli_push_owner = current_pid();
-#endif
- /*
- * The dquot is left locked.
- */
- retval = XFS_ITEM_PUSHBUF;
- } else {
- retval = XFS_ITEM_FLUSHING;
- xfs_dqunlock_nonotify(dqp);
- }
+ return XFS_ITEM_PUSHBUF;
}
ASSERT(qip->qli_item.li_flags & XFS_LI_IN_AIL);
- return (retval);
+ return XFS_ITEM_SUCCESS;
}
@@ -467,7 +404,7 @@ xfs_qm_qoff_logitem_format(xfs_qoff_logitem_t *qf,
log_vector->i_addr = (xfs_caddr_t)&(qf->qql_format);
log_vector->i_len = sizeof(xfs_qoff_logitem_t);
- XLOG_VEC_SET_TYPE(log_vector, XLOG_REG_TYPE_QUOTAOFF);
+ log_vector->i_type = XLOG_REG_TYPE_QUOTAOFF;
qf->qql_format.qf_size = 1;
}
diff --git a/fs/xfs/quota/xfs_dquot_item.h b/fs/xfs/quota/xfs_dquot_item.h
index 5a632531f84..5acae2ada70 100644
--- a/fs/xfs/quota/xfs_dquot_item.h
+++ b/fs/xfs/quota/xfs_dquot_item.h
@@ -27,10 +27,6 @@ typedef struct xfs_dq_logitem {
xfs_log_item_t qli_item; /* common portion */
struct xfs_dquot *qli_dquot; /* dquot ptr */
xfs_lsn_t qli_flush_lsn; /* lsn at last flush */
- unsigned short qli_pushbuf_flag; /* 1 bit used in push_ail */
-#ifdef DEBUG
- uint64_t qli_push_owner;
-#endif
xfs_dq_logformat_t qli_format; /* logged structure */
} xfs_dq_logitem_t;
diff --git a/fs/xfs/quota/xfs_qm.c b/fs/xfs/quota/xfs_qm.c
index 9e627a8b5b0..417e61e3d9d 100644
--- a/fs/xfs/quota/xfs_qm.c
+++ b/fs/xfs/quota/xfs_qm.c
@@ -118,9 +118,14 @@ xfs_Gqm_init(void)
*/
udqhash = kmem_zalloc_greedy(&hsize,
XFS_QM_HASHSIZE_LOW * sizeof(xfs_dqhash_t),
- XFS_QM_HASHSIZE_HIGH * sizeof(xfs_dqhash_t),
- KM_SLEEP | KM_MAYFAIL | KM_LARGE);
- gdqhash = kmem_zalloc(hsize, KM_SLEEP | KM_LARGE);
+ XFS_QM_HASHSIZE_HIGH * sizeof(xfs_dqhash_t));
+ if (!udqhash)
+ goto out;
+
+ gdqhash = kmem_zalloc_large(hsize);
+ if (!gdqhash)
+ goto out_free_udqhash;
+
hsize /= sizeof(xfs_dqhash_t);
ndquot = hsize << 8;
@@ -170,6 +175,11 @@ xfs_Gqm_init(void)
mutex_init(&qcheck_lock);
#endif
return xqm;
+
+ out_free_udqhash:
+ kmem_free_large(udqhash);
+ out:
+ return NULL;
}
/*
@@ -189,8 +199,8 @@ xfs_qm_destroy(
xfs_qm_list_destroy(&(xqm->qm_usr_dqhtable[i]));
xfs_qm_list_destroy(&(xqm->qm_grp_dqhtable[i]));
}
- kmem_free(xqm->qm_usr_dqhtable);
- kmem_free(xqm->qm_grp_dqhtable);
+ kmem_free_large(xqm->qm_usr_dqhtable);
+ kmem_free_large(xqm->qm_grp_dqhtable);
xqm->qm_usr_dqhtable = NULL;
xqm->qm_grp_dqhtable = NULL;
xqm->qm_dqhashmask = 0;
@@ -219,8 +229,12 @@ xfs_qm_hold_quotafs_ref(
*/
mutex_lock(&xfs_Gqm_lock);
- if (xfs_Gqm == NULL)
+ if (!xfs_Gqm) {
xfs_Gqm = xfs_Gqm_init();
+ if (!xfs_Gqm)
+ return ENOMEM;
+ }
+
/*
* We can keep a list of all filesystems with quotas mounted for
* debugging and statistical purposes, but ...
@@ -436,7 +450,7 @@ xfs_qm_unmount_quotas(
STATIC int
xfs_qm_dqflush_all(
xfs_mount_t *mp,
- int flags)
+ int sync_mode)
{
int recl;
xfs_dquot_t *dqp;
@@ -472,7 +486,7 @@ again:
* across a disk write.
*/
xfs_qm_mplist_unlock(mp);
- error = xfs_qm_dqflush(dqp, flags);
+ error = xfs_qm_dqflush(dqp, sync_mode);
xfs_dqunlock(dqp);
if (error)
return error;
@@ -912,13 +926,11 @@ xfs_qm_sync(
{
int recl, restarts;
xfs_dquot_t *dqp;
- uint flush_flags;
int error;
if (!XFS_IS_QUOTA_RUNNING(mp) || !XFS_IS_QUOTA_ON(mp))
return 0;
- flush_flags = (flags & SYNC_WAIT) ? XFS_QMOPT_SYNC : XFS_QMOPT_DELWRI;
restarts = 0;
again:
@@ -978,7 +990,7 @@ xfs_qm_sync(
* across a disk write
*/
xfs_qm_mplist_unlock(mp);
- error = xfs_qm_dqflush(dqp, flush_flags);
+ error = xfs_qm_dqflush(dqp, flags);
xfs_dqunlock(dqp);
if (error && XFS_FORCED_SHUTDOWN(mp))
return 0; /* Need to prevent umount failure */
@@ -1782,7 +1794,7 @@ xfs_qm_quotacheck(
* successfully.
*/
if (!error)
- error = xfs_qm_dqflush_all(mp, XFS_QMOPT_DELWRI);
+ error = xfs_qm_dqflush_all(mp, 0);
/*
* We can get this error if we couldn't do a dquot allocation inside
@@ -2004,7 +2016,7 @@ xfs_qm_shake_freelist(
* We flush it delayed write, so don't bother
* releasing the mplock.
*/
- error = xfs_qm_dqflush(dqp, XFS_QMOPT_DELWRI);
+ error = xfs_qm_dqflush(dqp, 0);
if (error) {
xfs_fs_cmn_err(CE_WARN, dqp->q_mount,
"xfs_qm_dqflush_all: dquot %p flush failed", dqp);
@@ -2187,7 +2199,7 @@ xfs_qm_dqreclaim_one(void)
* We flush it delayed write, so don't bother
* releasing the freelist lock.
*/
- error = xfs_qm_dqflush(dqp, XFS_QMOPT_DELWRI);
+ error = xfs_qm_dqflush(dqp, 0);
if (error) {
xfs_fs_cmn_err(CE_WARN, dqp->q_mount,
"xfs_qm_dqreclaim: dquot %p flush failed", dqp);
diff --git a/fs/xfs/quota/xfs_qm_bhv.c b/fs/xfs/quota/xfs_qm_bhv.c
index a5346630dfa..97b410c1279 100644
--- a/fs/xfs/quota/xfs_qm_bhv.c
+++ b/fs/xfs/quota/xfs_qm_bhv.c
@@ -59,7 +59,7 @@ xfs_fill_statvfs_from_dquot(
be64_to_cpu(dp->d_blk_hardlimit);
if (limit && statp->f_blocks > limit) {
statp->f_blocks = limit;
- statp->f_bfree =
+ statp->f_bfree = statp->f_bavail =
(statp->f_blocks > be64_to_cpu(dp->d_bcount)) ?
(statp->f_blocks - be64_to_cpu(dp->d_bcount)) : 0;
}
diff --git a/fs/xfs/quota/xfs_qm_syscalls.c b/fs/xfs/quota/xfs_qm_syscalls.c
index 873e07e2907..5d0ee8d492d 100644
--- a/fs/xfs/quota/xfs_qm_syscalls.c
+++ b/fs/xfs/quota/xfs_qm_syscalls.c
@@ -1192,9 +1192,9 @@ xfs_qm_internalqcheck(
if (! XFS_IS_QUOTA_ON(mp))
return XFS_ERROR(ESRCH);
- xfs_log_force(mp, (xfs_lsn_t)0, XFS_LOG_FORCE | XFS_LOG_SYNC);
+ xfs_log_force(mp, XFS_LOG_SYNC);
XFS_bflush(mp->m_ddev_targp);
- xfs_log_force(mp, (xfs_lsn_t)0, XFS_LOG_FORCE | XFS_LOG_SYNC);
+ xfs_log_force(mp, XFS_LOG_SYNC);
XFS_bflush(mp->m_ddev_targp);
mutex_lock(&qcheck_lock);
diff --git a/fs/xfs/quota/xfs_trans_dquot.c b/fs/xfs/quota/xfs_trans_dquot.c
index 97ac9640be9..c3ab75cb1d9 100644
--- a/fs/xfs/quota/xfs_trans_dquot.c
+++ b/fs/xfs/quota/xfs_trans_dquot.c
@@ -589,12 +589,18 @@ xfs_trans_unreserve_and_mod_dquots(
}
}
-STATIC int
-xfs_quota_error(uint flags)
+STATIC void
+xfs_quota_warn(
+ struct xfs_mount *mp,
+ struct xfs_dquot *dqp,
+ int type)
{
- if (flags & XFS_QMOPT_ENOSPC)
- return ENOSPC;
- return EDQUOT;
+ /* no warnings for project quotas - we just return ENOSPC later */
+ if (dqp->dq_flags & XFS_DQ_PROJ)
+ return;
+ quota_send_warning((dqp->dq_flags & XFS_DQ_USER) ? USRQUOTA : GRPQUOTA,
+ be32_to_cpu(dqp->q_core.d_id), mp->m_super->s_dev,
+ type);
}
/*
@@ -612,7 +618,6 @@ xfs_trans_dqresv(
long ninos,
uint flags)
{
- int error;
xfs_qcnt_t hardlimit;
xfs_qcnt_t softlimit;
time_t timer;
@@ -649,7 +654,6 @@ xfs_trans_dqresv(
warnlimit = XFS_QI_RTBWARNLIMIT(dqp->q_mount);
resbcountp = &dqp->q_res_rtbcount;
}
- error = 0;
if ((flags & XFS_QMOPT_FORCE_RES) == 0 &&
dqp->q_core.d_id &&
@@ -667,18 +671,20 @@ xfs_trans_dqresv(
* nblks.
*/
if (hardlimit > 0ULL &&
- (hardlimit <= nblks + *resbcountp)) {
- error = xfs_quota_error(flags);
+ hardlimit <= nblks + *resbcountp) {
+ xfs_quota_warn(mp, dqp, QUOTA_NL_BHARDWARN);
goto error_return;
}
-
if (softlimit > 0ULL &&
- (softlimit <= nblks + *resbcountp)) {
+ softlimit <= nblks + *resbcountp) {
if ((timer != 0 && get_seconds() > timer) ||
(warns != 0 && warns >= warnlimit)) {
- error = xfs_quota_error(flags);
+ xfs_quota_warn(mp, dqp,
+ QUOTA_NL_BSOFTLONGWARN);
goto error_return;
}
+
+ xfs_quota_warn(mp, dqp, QUOTA_NL_BSOFTWARN);
}
}
if (ninos > 0) {
@@ -692,15 +698,19 @@ xfs_trans_dqresv(
softlimit = be64_to_cpu(dqp->q_core.d_ino_softlimit);
if (!softlimit)
softlimit = q->qi_isoftlimit;
+
if (hardlimit > 0ULL && count >= hardlimit) {
- error = xfs_quota_error(flags);
+ xfs_quota_warn(mp, dqp, QUOTA_NL_IHARDWARN);
goto error_return;
- } else if (softlimit > 0ULL && count >= softlimit) {
- if ((timer != 0 && get_seconds() > timer) ||
+ }
+ if (softlimit > 0ULL && count >= softlimit) {
+ if ((timer != 0 && get_seconds() > timer) ||
(warns != 0 && warns >= warnlimit)) {
- error = xfs_quota_error(flags);
+ xfs_quota_warn(mp, dqp,
+ QUOTA_NL_ISOFTLONGWARN);
goto error_return;
}
+ xfs_quota_warn(mp, dqp, QUOTA_NL_ISOFTWARN);
}
}
}
@@ -736,9 +746,14 @@ xfs_trans_dqresv(
ASSERT(dqp->q_res_rtbcount >= be64_to_cpu(dqp->q_core.d_rtbcount));
ASSERT(dqp->q_res_icount >= be64_to_cpu(dqp->q_core.d_icount));
+ xfs_dqunlock(dqp);
+ return 0;
+
error_return:
xfs_dqunlock(dqp);
- return error;
+ if (flags & XFS_QMOPT_ENOSPC)
+ return ENOSPC;
+ return EDQUOT;
}
diff --git a/fs/xfs/xfs_acl.h b/fs/xfs/xfs_acl.h
index 00fd357c3e4..d13eeba2c8f 100644
--- a/fs/xfs/xfs_acl.h
+++ b/fs/xfs/xfs_acl.h
@@ -36,8 +36,8 @@ struct xfs_acl {
};
/* On-disk XFS extended attribute names */
-#define SGI_ACL_FILE "SGI_ACL_FILE"
-#define SGI_ACL_DEFAULT "SGI_ACL_DEFAULT"
+#define SGI_ACL_FILE (unsigned char *)"SGI_ACL_FILE"
+#define SGI_ACL_DEFAULT (unsigned char *)"SGI_ACL_DEFAULT"
#define SGI_ACL_FILE_SIZE (sizeof(SGI_ACL_FILE)-1)
#define SGI_ACL_DEFAULT_SIZE (sizeof(SGI_ACL_DEFAULT)-1)
diff --git a/fs/xfs/xfs_ag.h b/fs/xfs/xfs_ag.h
index 6702bd86581..b1a5a1ff88e 100644
--- a/fs/xfs/xfs_ag.h
+++ b/fs/xfs/xfs_ag.h
@@ -187,17 +187,13 @@ typedef struct xfs_perag_busy {
/*
* Per-ag incore structure, copies of information in agf and agi,
* to improve the performance of allocation group selection.
- *
- * pick sizes which fit in allocation buckets well
*/
-#if (BITS_PER_LONG == 32)
-#define XFS_PAGB_NUM_SLOTS 84
-#elif (BITS_PER_LONG == 64)
#define XFS_PAGB_NUM_SLOTS 128
-#endif
-typedef struct xfs_perag
-{
+typedef struct xfs_perag {
+ struct xfs_mount *pag_mount; /* owner filesystem */
+ xfs_agnumber_t pag_agno; /* AG this structure belongs to */
+ atomic_t pag_ref; /* perag reference count */
char pagf_init; /* this agf's entry is initialized */
char pagi_init; /* this agi's entry is initialized */
char pagf_metadata; /* the agf is preferred to be metadata */
@@ -210,8 +206,6 @@ typedef struct xfs_perag
__uint32_t pagf_btreeblks; /* # of blocks held in AGF btrees */
xfs_agino_t pagi_freecount; /* number of free inodes */
xfs_agino_t pagi_count; /* number of allocated inodes */
- int pagb_count; /* pagb slots in use */
- xfs_perag_busy_t *pagb_list; /* unstable blocks */
/*
* Inode allocation search lookup optimisation.
@@ -230,6 +224,8 @@ typedef struct xfs_perag
rwlock_t pag_ici_lock; /* incore inode lock */
struct radix_tree_root pag_ici_root; /* incore inode cache root */
#endif
+ int pagb_count; /* pagb slots in use */
+ xfs_perag_busy_t pagb_list[XFS_PAGB_NUM_SLOTS]; /* unstable blocks */
} xfs_perag_t;
/*
diff --git a/fs/xfs/xfs_alloc.c b/fs/xfs/xfs_alloc.c
index 275b1f4f943..94cddbfb256 100644
--- a/fs/xfs/xfs_alloc.c
+++ b/fs/xfs/xfs_alloc.c
@@ -1662,11 +1662,13 @@ xfs_free_ag_extent(
xfs_agf_t *agf;
xfs_perag_t *pag; /* per allocation group data */
+ pag = xfs_perag_get(mp, agno);
+ pag->pagf_freeblks += len;
+ xfs_perag_put(pag);
+
agf = XFS_BUF_TO_AGF(agbp);
- pag = &mp->m_perag[agno];
be32_add_cpu(&agf->agf_freeblks, len);
xfs_trans_agblocks_delta(tp, len);
- pag->pagf_freeblks += len;
XFS_WANT_CORRUPTED_GOTO(
be32_to_cpu(agf->agf_freeblks) <=
be32_to_cpu(agf->agf_length),
@@ -1969,10 +1971,12 @@ xfs_alloc_get_freelist(
xfs_trans_brelse(tp, agflbp);
if (be32_to_cpu(agf->agf_flfirst) == XFS_AGFL_SIZE(mp))
agf->agf_flfirst = 0;
- pag = &mp->m_perag[be32_to_cpu(agf->agf_seqno)];
+
+ pag = xfs_perag_get(mp, be32_to_cpu(agf->agf_seqno));
be32_add_cpu(&agf->agf_flcount, -1);
xfs_trans_agflist_delta(tp, -1);
pag->pagf_flcount--;
+ xfs_perag_put(pag);
logflags = XFS_AGF_FLFIRST | XFS_AGF_FLCOUNT;
if (btreeblk) {
@@ -2078,7 +2082,8 @@ xfs_alloc_put_freelist(
be32_add_cpu(&agf->agf_fllast, 1);
if (be32_to_cpu(agf->agf_fllast) == XFS_AGFL_SIZE(mp))
agf->agf_fllast = 0;
- pag = &mp->m_perag[be32_to_cpu(agf->agf_seqno)];
+
+ pag = xfs_perag_get(mp, be32_to_cpu(agf->agf_seqno));
be32_add_cpu(&agf->agf_flcount, 1);
xfs_trans_agflist_delta(tp, 1);
pag->pagf_flcount++;
@@ -2089,6 +2094,7 @@ xfs_alloc_put_freelist(
pag->pagf_btreeblks--;
logflags |= XFS_AGF_BTREEBLKS;
}
+ xfs_perag_put(pag);
xfs_alloc_log_agf(tp, agbp, logflags);
@@ -2152,7 +2158,6 @@ xfs_read_agf(
xfs_trans_brelse(tp, *bpp);
return XFS_ERROR(EFSCORRUPTED);
}
-
XFS_BUF_SET_VTYPE_REF(*bpp, B_FS_AGF, XFS_AGF_REF);
return 0;
}
@@ -2175,7 +2180,7 @@ xfs_alloc_read_agf(
ASSERT(agno != NULLAGNUMBER);
error = xfs_read_agf(mp, tp, agno,
- (flags & XFS_ALLOC_FLAG_TRYLOCK) ? XFS_BUF_TRYLOCK : 0,
+ (flags & XFS_ALLOC_FLAG_TRYLOCK) ? XBF_TRYLOCK : 0,
bpp);
if (error)
return error;
@@ -2184,7 +2189,7 @@ xfs_alloc_read_agf(
ASSERT(!XFS_BUF_GETERROR(*bpp));
agf = XFS_BUF_TO_AGF(*bpp);
- pag = &mp->m_perag[agno];
+ pag = xfs_perag_get(mp, agno);
if (!pag->pagf_init) {
pag->pagf_freeblks = be32_to_cpu(agf->agf_freeblks);
pag->pagf_btreeblks = be32_to_cpu(agf->agf_btreeblks);
@@ -2195,8 +2200,8 @@ xfs_alloc_read_agf(
pag->pagf_levels[XFS_BTNUM_CNTi] =
be32_to_cpu(agf->agf_levels[XFS_BTNUM_CNTi]);
spin_lock_init(&pag->pagb_lock);
- pag->pagb_list = kmem_zalloc(XFS_PAGB_NUM_SLOTS *
- sizeof(xfs_perag_busy_t), KM_SLEEP);
+ pag->pagb_count = 0;
+ memset(pag->pagb_list, 0, sizeof(pag->pagb_list));
pag->pagf_init = 1;
}
#ifdef DEBUG
@@ -2211,6 +2216,7 @@ xfs_alloc_read_agf(
be32_to_cpu(agf->agf_levels[XFS_BTNUM_CNTi]));
}
#endif
+ xfs_perag_put(pag);
return 0;
}
@@ -2270,8 +2276,7 @@ xfs_alloc_vextent(
* These three force us into a single a.g.
*/
args->agno = XFS_FSB_TO_AGNO(mp, args->fsbno);
- down_read(&mp->m_peraglock);
- args->pag = &mp->m_perag[args->agno];
+ args->pag = xfs_perag_get(mp, args->agno);
args->minleft = 0;
error = xfs_alloc_fix_freelist(args, 0);
args->minleft = minleft;
@@ -2280,14 +2285,12 @@ xfs_alloc_vextent(
goto error0;
}
if (!args->agbp) {
- up_read(&mp->m_peraglock);
trace_xfs_alloc_vextent_noagbp(args);
break;
}
args->agbno = XFS_FSB_TO_AGBNO(mp, args->fsbno);
if ((error = xfs_alloc_ag_vextent(args)))
goto error0;
- up_read(&mp->m_peraglock);
break;
case XFS_ALLOCTYPE_START_BNO:
/*
@@ -2339,9 +2342,8 @@ xfs_alloc_vextent(
* Loop over allocation groups twice; first time with
* trylock set, second time without.
*/
- down_read(&mp->m_peraglock);
for (;;) {
- args->pag = &mp->m_perag[args->agno];
+ args->pag = xfs_perag_get(mp, args->agno);
if (no_min) args->minleft = 0;
error = xfs_alloc_fix_freelist(args, flags);
args->minleft = minleft;
@@ -2400,8 +2402,8 @@ xfs_alloc_vextent(
}
}
}
+ xfs_perag_put(args->pag);
}
- up_read(&mp->m_peraglock);
if (bump_rotor || (type == XFS_ALLOCTYPE_ANY_AG)) {
if (args->agno == sagno)
mp->m_agfrotor = (mp->m_agfrotor + 1) %
@@ -2427,9 +2429,10 @@ xfs_alloc_vextent(
args->len);
#endif
}
+ xfs_perag_put(args->pag);
return 0;
error0:
- up_read(&mp->m_peraglock);
+ xfs_perag_put(args->pag);
return error;
}
@@ -2454,8 +2457,7 @@ xfs_free_extent(
args.agno = XFS_FSB_TO_AGNO(args.mp, bno);
ASSERT(args.agno < args.mp->m_sb.sb_agcount);
args.agbno = XFS_FSB_TO_AGBNO(args.mp, bno);
- down_read(&args.mp->m_peraglock);
- args.pag = &args.mp->m_perag[args.agno];
+ args.pag = xfs_perag_get(args.mp, args.agno);
if ((error = xfs_alloc_fix_freelist(&args, XFS_ALLOC_FLAG_FREEING)))
goto error0;
#ifdef DEBUG
@@ -2465,7 +2467,7 @@ xfs_free_extent(
#endif
error = xfs_free_ag_extent(tp, args.agbp, args.agno, args.agbno, len, 0);
error0:
- up_read(&args.mp->m_peraglock);
+ xfs_perag_put(args.pag);
return error;
}
@@ -2486,15 +2488,15 @@ xfs_alloc_mark_busy(xfs_trans_t *tp,
xfs_agblock_t bno,
xfs_extlen_t len)
{
- xfs_mount_t *mp;
xfs_perag_busy_t *bsy;
+ struct xfs_perag *pag;
int n;
- mp = tp->t_mountp;
- spin_lock(&mp->m_perag[agno].pagb_lock);
+ pag = xfs_perag_get(tp->t_mountp, agno);
+ spin_lock(&pag->pagb_lock);
/* search pagb_list for an open slot */
- for (bsy = mp->m_perag[agno].pagb_list, n = 0;
+ for (bsy = pag->pagb_list, n = 0;
n < XFS_PAGB_NUM_SLOTS;
bsy++, n++) {
if (bsy->busy_tp == NULL) {
@@ -2502,11 +2504,11 @@ xfs_alloc_mark_busy(xfs_trans_t *tp,
}
}
- trace_xfs_alloc_busy(mp, agno, bno, len, n);
+ trace_xfs_alloc_busy(tp->t_mountp, agno, bno, len, n);
if (n < XFS_PAGB_NUM_SLOTS) {
- bsy = &mp->m_perag[agno].pagb_list[n];
- mp->m_perag[agno].pagb_count++;
+ bsy = &pag->pagb_list[n];
+ pag->pagb_count++;
bsy->busy_start = bno;
bsy->busy_length = len;
bsy->busy_tp = tp;
@@ -2521,7 +2523,8 @@ xfs_alloc_mark_busy(xfs_trans_t *tp,
xfs_trans_set_sync(tp);
}
- spin_unlock(&mp->m_perag[agno].pagb_lock);
+ spin_unlock(&pag->pagb_lock);
+ xfs_perag_put(pag);
}
void
@@ -2529,24 +2532,23 @@ xfs_alloc_clear_busy(xfs_trans_t *tp,
xfs_agnumber_t agno,
int idx)
{
- xfs_mount_t *mp;
+ struct xfs_perag *pag;
xfs_perag_busy_t *list;
- mp = tp->t_mountp;
-
- spin_lock(&mp->m_perag[agno].pagb_lock);
- list = mp->m_perag[agno].pagb_list;
-
ASSERT(idx < XFS_PAGB_NUM_SLOTS);
+ pag = xfs_perag_get(tp->t_mountp, agno);
+ spin_lock(&pag->pagb_lock);
+ list = pag->pagb_list;
- trace_xfs_alloc_unbusy(mp, agno, idx, list[idx].busy_tp == tp);
+ trace_xfs_alloc_unbusy(tp->t_mountp, agno, idx, list[idx].busy_tp == tp);
if (list[idx].busy_tp == tp) {
list[idx].busy_tp = NULL;
- mp->m_perag[agno].pagb_count--;
+ pag->pagb_count--;
}
- spin_unlock(&mp->m_perag[agno].pagb_lock);
+ spin_unlock(&pag->pagb_lock);
+ xfs_perag_put(pag);
}
@@ -2560,17 +2562,15 @@ xfs_alloc_search_busy(xfs_trans_t *tp,
xfs_agblock_t bno,
xfs_extlen_t len)
{
- xfs_mount_t *mp;
+ struct xfs_perag *pag;
xfs_perag_busy_t *bsy;
xfs_agblock_t uend, bend;
xfs_lsn_t lsn = 0;
int cnt;
- mp = tp->t_mountp;
-
- spin_lock(&mp->m_perag[agno].pagb_lock);
-
- uend = bno + len - 1;
+ pag = xfs_perag_get(tp->t_mountp, agno);
+ spin_lock(&pag->pagb_lock);
+ cnt = pag->pagb_count;
/*
* search pagb_list for this slot, skipping open slots. We have to
@@ -2578,8 +2578,9 @@ xfs_alloc_search_busy(xfs_trans_t *tp,
* we have to get the most recent LSN for the log force to push out
* all the transactions that span the range.
*/
- for (cnt = 0; cnt < mp->m_perag[agno].pagb_count; cnt++) {
- bsy = &mp->m_perag[agno].pagb_list[cnt];
+ uend = bno + len - 1;
+ for (cnt = 0; cnt < pag->pagb_count; cnt++) {
+ bsy = &pag->pagb_list[cnt];
if (!bsy->busy_tp)
continue;
@@ -2591,7 +2592,8 @@ xfs_alloc_search_busy(xfs_trans_t *tp,
if (XFS_LSN_CMP(bsy->busy_tp->t_commit_lsn, lsn) > 0)
lsn = bsy->busy_tp->t_commit_lsn;
}
- spin_unlock(&mp->m_perag[agno].pagb_lock);
+ spin_unlock(&pag->pagb_lock);
+ xfs_perag_put(pag);
trace_xfs_alloc_busysearch(tp->t_mountp, agno, bno, len, lsn);
/*
@@ -2599,5 +2601,5 @@ xfs_alloc_search_busy(xfs_trans_t *tp,
* transaction that freed the block
*/
if (lsn)
- xfs_log_force(mp, lsn, XFS_LOG_FORCE|XFS_LOG_SYNC);
+ xfs_log_force_lsn(tp->t_mountp, lsn, XFS_LOG_SYNC);
}
diff --git a/fs/xfs/xfs_alloc_btree.c b/fs/xfs/xfs_alloc_btree.c
index adbd9141aea..b726e10d2c1 100644
--- a/fs/xfs/xfs_alloc_btree.c
+++ b/fs/xfs/xfs_alloc_btree.c
@@ -61,12 +61,14 @@ xfs_allocbt_set_root(
struct xfs_agf *agf = XFS_BUF_TO_AGF(agbp);
xfs_agnumber_t seqno = be32_to_cpu(agf->agf_seqno);
int btnum = cur->bc_btnum;
+ struct xfs_perag *pag = xfs_perag_get(cur->bc_mp, seqno);
ASSERT(ptr->s != 0);
agf->agf_roots[btnum] = ptr->s;
be32_add_cpu(&agf->agf_levels[btnum], inc);
- cur->bc_mp->m_perag[seqno].pagf_levels[btnum] += inc;
+ pag->pagf_levels[btnum] += inc;
+ xfs_perag_put(pag);
xfs_alloc_log_agf(cur->bc_tp, agbp, XFS_AGF_ROOTS | XFS_AGF_LEVELS);
}
@@ -150,6 +152,7 @@ xfs_allocbt_update_lastrec(
{
struct xfs_agf *agf = XFS_BUF_TO_AGF(cur->bc_private.a.agbp);
xfs_agnumber_t seqno = be32_to_cpu(agf->agf_seqno);
+ struct xfs_perag *pag;
__be32 len;
int numrecs;
@@ -193,7 +196,9 @@ xfs_allocbt_update_lastrec(
}
agf->agf_longest = len;
- cur->bc_mp->m_perag[seqno].pagf_longest = be32_to_cpu(len);
+ pag = xfs_perag_get(cur->bc_mp, seqno);
+ pag->pagf_longest = be32_to_cpu(len);
+ xfs_perag_put(pag);
xfs_alloc_log_agf(cur->bc_tp, cur->bc_private.a.agbp, XFS_AGF_LONGEST);
}
diff --git a/fs/xfs/xfs_attr.c b/fs/xfs/xfs_attr.c
index e953b6cfb2a..b9c196a53c4 100644
--- a/fs/xfs/xfs_attr.c
+++ b/fs/xfs/xfs_attr.c
@@ -93,12 +93,12 @@ STATIC int xfs_attr_rmtval_remove(xfs_da_args_t *args);
STATIC int
xfs_attr_name_to_xname(
struct xfs_name *xname,
- const char *aname)
+ const unsigned char *aname)
{
if (!aname)
return EINVAL;
xname->name = aname;
- xname->len = strlen(aname);
+ xname->len = strlen((char *)aname);
if (xname->len >= MAXNAMELEN)
return EFAULT; /* match IRIX behaviour */
@@ -124,7 +124,7 @@ STATIC int
xfs_attr_get_int(
struct xfs_inode *ip,
struct xfs_name *name,
- char *value,
+ unsigned char *value,
int *valuelenp,
int flags)
{
@@ -171,8 +171,8 @@ xfs_attr_get_int(
int
xfs_attr_get(
xfs_inode_t *ip,
- const char *name,
- char *value,
+ const unsigned char *name,
+ unsigned char *value,
int *valuelenp,
int flags)
{
@@ -197,7 +197,7 @@ xfs_attr_get(
/*
* Calculate how many blocks we need for the new attribute,
*/
-int
+STATIC int
xfs_attr_calc_size(
struct xfs_inode *ip,
int namelen,
@@ -235,8 +235,12 @@ xfs_attr_calc_size(
}
STATIC int
-xfs_attr_set_int(xfs_inode_t *dp, struct xfs_name *name,
- char *value, int valuelen, int flags)
+xfs_attr_set_int(
+ struct xfs_inode *dp,
+ struct xfs_name *name,
+ unsigned char *value,
+ int valuelen,
+ int flags)
{
xfs_da_args_t args;
xfs_fsblock_t firstblock;
@@ -452,8 +456,8 @@ out:
int
xfs_attr_set(
xfs_inode_t *dp,
- const char *name,
- char *value,
+ const unsigned char *name,
+ unsigned char *value,
int valuelen,
int flags)
{
@@ -600,7 +604,7 @@ out:
int
xfs_attr_remove(
xfs_inode_t *dp,
- const char *name,
+ const unsigned char *name,
int flags)
{
int error;
@@ -669,9 +673,13 @@ xfs_attr_list_int(xfs_attr_list_context_t *context)
*/
/*ARGSUSED*/
STATIC int
-xfs_attr_put_listent(xfs_attr_list_context_t *context, int flags,
- char *name, int namelen,
- int valuelen, char *value)
+xfs_attr_put_listent(
+ xfs_attr_list_context_t *context,
+ int flags,
+ unsigned char *name,
+ int namelen,
+ int valuelen,
+ unsigned char *value)
{
struct attrlist *alist = (struct attrlist *)context->alist;
attrlist_ent_t *aep;
@@ -1980,7 +1988,7 @@ xfs_attr_rmtval_get(xfs_da_args_t *args)
xfs_bmbt_irec_t map[ATTR_RMTVALUE_MAPSIZE];
xfs_mount_t *mp;
xfs_daddr_t dblkno;
- xfs_caddr_t dst;
+ void *dst;
xfs_buf_t *bp;
int nmap, error, tmp, valuelen, blkcnt, i;
xfs_dablk_t lblkno;
@@ -2007,15 +2015,14 @@ xfs_attr_rmtval_get(xfs_da_args_t *args)
dblkno = XFS_FSB_TO_DADDR(mp, map[i].br_startblock);
blkcnt = XFS_FSB_TO_BB(mp, map[i].br_blockcount);
error = xfs_read_buf(mp, mp->m_ddev_targp, dblkno,
- blkcnt,
- XFS_BUF_LOCK | XBF_DONT_BLOCK,
+ blkcnt, XBF_LOCK | XBF_DONT_BLOCK,
&bp);
if (error)
return(error);
tmp = (valuelen < XFS_BUF_SIZE(bp))
? valuelen : XFS_BUF_SIZE(bp);
- xfs_biomove(bp, 0, tmp, dst, XFS_B_READ);
+ xfs_biomove(bp, 0, tmp, dst, XBF_READ);
xfs_buf_relse(bp);
dst += tmp;
valuelen -= tmp;
@@ -2039,7 +2046,7 @@ xfs_attr_rmtval_set(xfs_da_args_t *args)
xfs_inode_t *dp;
xfs_bmbt_irec_t map;
xfs_daddr_t dblkno;
- xfs_caddr_t src;
+ void *src;
xfs_buf_t *bp;
xfs_dablk_t lblkno;
int blkcnt, valuelen, nmap, error, tmp, committed;
@@ -2141,13 +2148,13 @@ xfs_attr_rmtval_set(xfs_da_args_t *args)
blkcnt = XFS_FSB_TO_BB(mp, map.br_blockcount);
bp = xfs_buf_get(mp->m_ddev_targp, dblkno, blkcnt,
- XFS_BUF_LOCK | XBF_DONT_BLOCK);
+ XBF_LOCK | XBF_DONT_BLOCK);
ASSERT(bp);
ASSERT(!XFS_BUF_GETERROR(bp));
tmp = (valuelen < XFS_BUF_SIZE(bp)) ? valuelen :
XFS_BUF_SIZE(bp);
- xfs_biomove(bp, 0, tmp, src, XFS_B_WRITE);
+ xfs_biomove(bp, 0, tmp, src, XBF_WRITE);
if (tmp < XFS_BUF_SIZE(bp))
xfs_biozero(bp, tmp, XFS_BUF_SIZE(bp) - tmp);
if ((error = xfs_bwrite(mp, bp))) {/* GROT: NOTE: synchronous write */
@@ -2208,8 +2215,7 @@ xfs_attr_rmtval_remove(xfs_da_args_t *args)
/*
* If the "remote" value is in the cache, remove it.
*/
- bp = xfs_incore(mp->m_ddev_targp, dblkno, blkcnt,
- XFS_INCORE_TRYLOCK);
+ bp = xfs_incore(mp->m_ddev_targp, dblkno, blkcnt, XBF_TRYLOCK);
if (bp) {
XFS_BUF_STALE(bp);
XFS_BUF_UNDELAYWRITE(bp);
diff --git a/fs/xfs/xfs_attr.h b/fs/xfs/xfs_attr.h
index 59b410ce69a..e920d68ef50 100644
--- a/fs/xfs/xfs_attr.h
+++ b/fs/xfs/xfs_attr.h
@@ -113,7 +113,7 @@ typedef struct attrlist_cursor_kern {
typedef int (*put_listent_func_t)(struct xfs_attr_list_context *, int,
- char *, int, int, char *);
+ unsigned char *, int, int, unsigned char *);
typedef struct xfs_attr_list_context {
struct xfs_inode *dp; /* inode */
@@ -139,7 +139,6 @@ typedef struct xfs_attr_list_context {
/*
* Overall external interface routines.
*/
-int xfs_attr_calc_size(struct xfs_inode *, int, int, int *);
int xfs_attr_inactive(struct xfs_inode *dp);
int xfs_attr_rmtval_get(struct xfs_da_args *args);
int xfs_attr_list_int(struct xfs_attr_list_context *);
diff --git a/fs/xfs/xfs_attr_leaf.c b/fs/xfs/xfs_attr_leaf.c
index baf41b5af75..a90ce74fc25 100644
--- a/fs/xfs/xfs_attr_leaf.c
+++ b/fs/xfs/xfs_attr_leaf.c
@@ -521,11 +521,11 @@ xfs_attr_shortform_to_leaf(xfs_da_args_t *args)
sfe = &sf->list[0];
for (i = 0; i < sf->hdr.count; i++) {
- nargs.name = (char *)sfe->nameval;
+ nargs.name = sfe->nameval;
nargs.namelen = sfe->namelen;
- nargs.value = (char *)&sfe->nameval[nargs.namelen];
+ nargs.value = &sfe->nameval[nargs.namelen];
nargs.valuelen = sfe->valuelen;
- nargs.hashval = xfs_da_hashname((char *)sfe->nameval,
+ nargs.hashval = xfs_da_hashname(sfe->nameval,
sfe->namelen);
nargs.flags = XFS_ATTR_NSP_ONDISK_TO_ARGS(sfe->flags);
error = xfs_attr_leaf_lookup_int(bp, &nargs); /* set a->index */
@@ -612,10 +612,10 @@ xfs_attr_shortform_list(xfs_attr_list_context_t *context)
for (i = 0, sfe = &sf->list[0]; i < sf->hdr.count; i++) {
error = context->put_listent(context,
sfe->flags,
- (char *)sfe->nameval,
+ sfe->nameval,
(int)sfe->namelen,
(int)sfe->valuelen,
- (char*)&sfe->nameval[sfe->namelen]);
+ &sfe->nameval[sfe->namelen]);
/*
* Either search callback finished early or
@@ -659,8 +659,8 @@ xfs_attr_shortform_list(xfs_attr_list_context_t *context)
}
sbp->entno = i;
- sbp->hash = xfs_da_hashname((char *)sfe->nameval, sfe->namelen);
- sbp->name = (char *)sfe->nameval;
+ sbp->hash = xfs_da_hashname(sfe->nameval, sfe->namelen);
+ sbp->name = sfe->nameval;
sbp->namelen = sfe->namelen;
/* These are bytes, and both on-disk, don't endian-flip */
sbp->valuelen = sfe->valuelen;
@@ -818,9 +818,9 @@ xfs_attr_leaf_to_shortform(xfs_dabuf_t *bp, xfs_da_args_t *args, int forkoff)
continue;
ASSERT(entry->flags & XFS_ATTR_LOCAL);
name_loc = xfs_attr_leaf_name_local(leaf, i);
- nargs.name = (char *)name_loc->nameval;
+ nargs.name = name_loc->nameval;
nargs.namelen = name_loc->namelen;
- nargs.value = (char *)&name_loc->nameval[nargs.namelen];
+ nargs.value = &name_loc->nameval[nargs.namelen];
nargs.valuelen = be16_to_cpu(name_loc->valuelen);
nargs.hashval = be32_to_cpu(entry->hashval);
nargs.flags = XFS_ATTR_NSP_ONDISK_TO_ARGS(entry->flags);
@@ -2370,10 +2370,10 @@ xfs_attr_leaf_list_int(xfs_dabuf_t *bp, xfs_attr_list_context_t *context)
retval = context->put_listent(context,
entry->flags,
- (char *)name_loc->nameval,
+ name_loc->nameval,
(int)name_loc->namelen,
be16_to_cpu(name_loc->valuelen),
- (char *)&name_loc->nameval[name_loc->namelen]);
+ &name_loc->nameval[name_loc->namelen]);
if (retval)
return retval;
} else {
@@ -2397,15 +2397,15 @@ xfs_attr_leaf_list_int(xfs_dabuf_t *bp, xfs_attr_list_context_t *context)
return retval;
retval = context->put_listent(context,
entry->flags,
- (char *)name_rmt->name,
+ name_rmt->name,
(int)name_rmt->namelen,
valuelen,
- (char*)args.value);
+ args.value);
kmem_free(args.value);
} else {
retval = context->put_listent(context,
entry->flags,
- (char *)name_rmt->name,
+ name_rmt->name,
(int)name_rmt->namelen,
valuelen,
NULL);
@@ -2950,7 +2950,7 @@ xfs_attr_leaf_freextent(xfs_trans_t **trans, xfs_inode_t *dp,
map.br_blockcount);
bp = xfs_trans_get_buf(*trans,
dp->i_mount->m_ddev_targp,
- dblkno, dblkcnt, XFS_BUF_LOCK);
+ dblkno, dblkcnt, XBF_LOCK);
xfs_trans_binval(*trans, bp);
/*
* Roll to next transaction.
diff --git a/fs/xfs/xfs_attr_sf.h b/fs/xfs/xfs_attr_sf.h
index 76ab7b0cbb3..919756e3ba5 100644
--- a/fs/xfs/xfs_attr_sf.h
+++ b/fs/xfs/xfs_attr_sf.h
@@ -52,7 +52,7 @@ typedef struct xfs_attr_sf_sort {
__uint8_t valuelen; /* length of value */
__uint8_t flags; /* flags bits (see xfs_attr_leaf.h) */
xfs_dahash_t hash; /* this entry's hash value */
- char *name; /* name value, pointer into buffer */
+ unsigned char *name; /* name value, pointer into buffer */
} xfs_attr_sf_sort_t;
#define XFS_ATTR_SF_ENTSIZE_BYNAME(nlen,vlen) /* space name/value uses */ \
diff --git a/fs/xfs/xfs_bmap.c b/fs/xfs/xfs_bmap.c
index 98251cdc52a..1869fb97381 100644
--- a/fs/xfs/xfs_bmap.c
+++ b/fs/xfs/xfs_bmap.c
@@ -2629,13 +2629,12 @@ xfs_bmap_btalloc(
if (startag == NULLAGNUMBER)
startag = ag = 0;
notinit = 0;
- down_read(&mp->m_peraglock);
+ pag = xfs_perag_get(mp, ag);
while (blen < ap->alen) {
- pag = &mp->m_perag[ag];
if (!pag->pagf_init &&
(error = xfs_alloc_pagf_init(mp, args.tp,
ag, XFS_ALLOC_FLAG_TRYLOCK))) {
- up_read(&mp->m_peraglock);
+ xfs_perag_put(pag);
return error;
}
/*
@@ -2667,13 +2666,13 @@ xfs_bmap_btalloc(
break;
error = xfs_filestream_new_ag(ap, &ag);
- if (error) {
- up_read(&mp->m_peraglock);
+ xfs_perag_put(pag);
+ if (error)
return error;
- }
/* loop again to set 'blen'*/
startag = NULLAGNUMBER;
+ pag = xfs_perag_get(mp, ag);
continue;
}
}
@@ -2681,8 +2680,10 @@ xfs_bmap_btalloc(
ag = 0;
if (ag == startag)
break;
+ xfs_perag_put(pag);
+ pag = xfs_perag_get(mp, ag);
}
- up_read(&mp->m_peraglock);
+ xfs_perag_put(pag);
/*
* Since the above loop did a BUF_TRYLOCK, it is
* possible that there is space for this request.
@@ -4470,7 +4471,7 @@ xfs_bmapi(
xfs_fsblock_t abno; /* allocated block number */
xfs_extlen_t alen; /* allocated extent length */
xfs_fileoff_t aoff; /* allocated file offset */
- xfs_bmalloca_t bma; /* args for xfs_bmap_alloc */
+ xfs_bmalloca_t bma = { 0 }; /* args for xfs_bmap_alloc */
xfs_btree_cur_t *cur; /* bmap btree cursor */
xfs_fileoff_t end; /* end of mapped file region */
int eof; /* we've hit the end of extents */
diff --git a/fs/xfs/xfs_bmap_btree.c b/fs/xfs/xfs_bmap_btree.c
index 38751d5fac6..416e47e54b8 100644
--- a/fs/xfs/xfs_bmap_btree.c
+++ b/fs/xfs/xfs_bmap_btree.c
@@ -334,7 +334,7 @@ xfs_bmbt_disk_set_allf(
/*
* Set all the fields in a bmap extent record from the uncompressed form.
*/
-void
+STATIC void
xfs_bmbt_disk_set_all(
xfs_bmbt_rec_t *r,
xfs_bmbt_irec_t *s)
diff --git a/fs/xfs/xfs_bmap_btree.h b/fs/xfs/xfs_bmap_btree.h
index cf07ca7c22e..0e66c4ea0f8 100644
--- a/fs/xfs/xfs_bmap_btree.h
+++ b/fs/xfs/xfs_bmap_btree.h
@@ -223,7 +223,6 @@ extern void xfs_bmbt_set_startblock(xfs_bmbt_rec_host_t *r, xfs_fsblock_t v);
extern void xfs_bmbt_set_startoff(xfs_bmbt_rec_host_t *r, xfs_fileoff_t v);
extern void xfs_bmbt_set_state(xfs_bmbt_rec_host_t *r, xfs_exntst_t v);
-extern void xfs_bmbt_disk_set_all(xfs_bmbt_rec_t *r, xfs_bmbt_irec_t *s);
extern void xfs_bmbt_disk_set_allf(xfs_bmbt_rec_t *r, xfs_fileoff_t o,
xfs_fsblock_t b, xfs_filblks_t c, xfs_exntst_t v);
diff --git a/fs/xfs/xfs_btree.c b/fs/xfs/xfs_btree.c
index 36a0992dd66..96be4b0f249 100644
--- a/fs/xfs/xfs_btree.c
+++ b/fs/xfs/xfs_btree.c
@@ -977,7 +977,7 @@ xfs_btree_get_buf_block(
xfs_daddr_t d;
/* need to sort out how callers deal with failures first */
- ASSERT(!(flags & XFS_BUF_TRYLOCK));
+ ASSERT(!(flags & XBF_TRYLOCK));
d = xfs_btree_ptr_to_daddr(cur, ptr);
*bpp = xfs_trans_get_buf(cur->bc_tp, mp->m_ddev_targp, d,
@@ -1008,7 +1008,7 @@ xfs_btree_read_buf_block(
int error;
/* need to sort out how callers deal with failures first */
- ASSERT(!(flags & XFS_BUF_TRYLOCK));
+ ASSERT(!(flags & XBF_TRYLOCK));
d = xfs_btree_ptr_to_daddr(cur, ptr);
error = xfs_trans_read_buf(mp, cur->bc_tp, mp->m_ddev_targp, d,
diff --git a/fs/xfs/xfs_buf_item.c b/fs/xfs/xfs_buf_item.c
index a30f7e9eb2b..f3c49e69eab 100644
--- a/fs/xfs/xfs_buf_item.c
+++ b/fs/xfs/xfs_buf_item.c
@@ -250,7 +250,7 @@ xfs_buf_item_format(
((bip->bli_format.blf_map_size - 1) * sizeof(uint)));
vecp->i_addr = (xfs_caddr_t)&bip->bli_format;
vecp->i_len = base_size;
- XLOG_VEC_SET_TYPE(vecp, XLOG_REG_TYPE_BFORMAT);
+ vecp->i_type = XLOG_REG_TYPE_BFORMAT;
vecp++;
nvecs = 1;
@@ -297,14 +297,14 @@ xfs_buf_item_format(
buffer_offset = first_bit * XFS_BLI_CHUNK;
vecp->i_addr = xfs_buf_offset(bp, buffer_offset);
vecp->i_len = nbits * XFS_BLI_CHUNK;
- XLOG_VEC_SET_TYPE(vecp, XLOG_REG_TYPE_BCHUNK);
+ vecp->i_type = XLOG_REG_TYPE_BCHUNK;
nvecs++;
break;
} else if (next_bit != last_bit + 1) {
buffer_offset = first_bit * XFS_BLI_CHUNK;
vecp->i_addr = xfs_buf_offset(bp, buffer_offset);
vecp->i_len = nbits * XFS_BLI_CHUNK;
- XLOG_VEC_SET_TYPE(vecp, XLOG_REG_TYPE_BCHUNK);
+ vecp->i_type = XLOG_REG_TYPE_BCHUNK;
nvecs++;
vecp++;
first_bit = next_bit;
@@ -316,7 +316,7 @@ xfs_buf_item_format(
buffer_offset = first_bit * XFS_BLI_CHUNK;
vecp->i_addr = xfs_buf_offset(bp, buffer_offset);
vecp->i_len = nbits * XFS_BLI_CHUNK;
- XLOG_VEC_SET_TYPE(vecp, XLOG_REG_TYPE_BCHUNK);
+ vecp->i_type = XLOG_REG_TYPE_BCHUNK;
/* You would think we need to bump the nvecs here too, but we do not
* this number is used by recovery, and it gets confused by the boundary
* split here
@@ -467,8 +467,10 @@ xfs_buf_item_unpin_remove(
/*
* This is called to attempt to lock the buffer associated with this
* buf log item. Don't sleep on the buffer lock. If we can't get
- * the lock right away, return 0. If we can get the lock, pull the
- * buffer from the free list, mark it busy, and return 1.
+ * the lock right away, return 0. If we can get the lock, take a
+ * reference to the buffer. If this is a delayed write buffer that
+ * needs AIL help to be written back, invoke the pushbuf routine
+ * rather than the normal success path.
*/
STATIC uint
xfs_buf_item_trylock(
@@ -477,24 +479,18 @@ xfs_buf_item_trylock(
xfs_buf_t *bp;
bp = bip->bli_buf;
-
- if (XFS_BUF_ISPINNED(bp)) {
+ if (XFS_BUF_ISPINNED(bp))
return XFS_ITEM_PINNED;
- }
-
- if (!XFS_BUF_CPSEMA(bp)) {
+ if (!XFS_BUF_CPSEMA(bp))
return XFS_ITEM_LOCKED;
- }
- /*
- * Remove the buffer from the free list. Only do this
- * if it's on the free list. Private buffers like the
- * superblock buffer are not.
- */
+ /* take a reference to the buffer. */
XFS_BUF_HOLD(bp);
ASSERT(!(bip->bli_flags & XFS_BLI_STALE));
trace_xfs_buf_item_trylock(bip);
+ if (XFS_BUF_ISDELAYWRITE(bp))
+ return XFS_ITEM_PUSHBUF;
return XFS_ITEM_SUCCESS;
}
@@ -626,11 +622,9 @@ xfs_buf_item_committed(
}
/*
- * This is called to asynchronously write the buffer associated with this
- * buf log item out to disk. The buffer will already have been locked by
- * a successful call to xfs_buf_item_trylock(). If the buffer still has
- * B_DELWRI set, then get it going out to disk with a call to bawrite().
- * If not, then just release the buffer.
+ * The buffer is locked, but is not a delayed write buffer. This happens
+ * if we race with IO completion and hence we don't want to try to write it
+ * again. Just release the buffer.
*/
STATIC void
xfs_buf_item_push(
@@ -642,17 +636,29 @@ xfs_buf_item_push(
trace_xfs_buf_item_push(bip);
bp = bip->bli_buf;
+ ASSERT(!XFS_BUF_ISDELAYWRITE(bp));
+ xfs_buf_relse(bp);
+}
- if (XFS_BUF_ISDELAYWRITE(bp)) {
- int error;
- error = xfs_bawrite(bip->bli_item.li_mountp, bp);
- if (error)
- xfs_fs_cmn_err(CE_WARN, bip->bli_item.li_mountp,
- "xfs_buf_item_push: pushbuf error %d on bip %p, bp %p",
- error, bip, bp);
- } else {
- xfs_buf_relse(bp);
- }
+/*
+ * The buffer is locked and is a delayed write buffer. Promote the buffer
+ * in the delayed write queue as the caller knows that they must invoke
+ * the xfsbufd to get this buffer written. We have to unlock the buffer
+ * to allow the xfsbufd to write it, too.
+ */
+STATIC void
+xfs_buf_item_pushbuf(
+ xfs_buf_log_item_t *bip)
+{
+ xfs_buf_t *bp;
+
+ ASSERT(!(bip->bli_flags & XFS_BLI_STALE));
+ trace_xfs_buf_item_pushbuf(bip);
+
+ bp = bip->bli_buf;
+ ASSERT(XFS_BUF_ISDELAYWRITE(bp));
+ xfs_buf_delwri_promote(bp);
+ xfs_buf_relse(bp);
}
/* ARGSUSED */
@@ -677,7 +683,7 @@ static struct xfs_item_ops xfs_buf_item_ops = {
.iop_committed = (xfs_lsn_t(*)(xfs_log_item_t*, xfs_lsn_t))
xfs_buf_item_committed,
.iop_push = (void(*)(xfs_log_item_t*))xfs_buf_item_push,
- .iop_pushbuf = NULL,
+ .iop_pushbuf = (void(*)(xfs_log_item_t*))xfs_buf_item_pushbuf,
.iop_committing = (void(*)(xfs_log_item_t*, xfs_lsn_t))
xfs_buf_item_committing
};
diff --git a/fs/xfs/xfs_da_btree.c b/fs/xfs/xfs_da_btree.c
index c0c8869115b..0ca556b4bf3 100644
--- a/fs/xfs/xfs_da_btree.c
+++ b/fs/xfs/xfs_da_btree.c
@@ -1534,8 +1534,8 @@ xfs_da_hashname(const __uint8_t *name, int namelen)
enum xfs_dacmp
xfs_da_compname(
struct xfs_da_args *args,
- const char *name,
- int len)
+ const unsigned char *name,
+ int len)
{
return (args->namelen == len && memcmp(args->name, name, len) == 0) ?
XFS_CMP_EXACT : XFS_CMP_DIFFERENT;
diff --git a/fs/xfs/xfs_da_btree.h b/fs/xfs/xfs_da_btree.h
index 30cd08f56a3..fe9f5a8c1d2 100644
--- a/fs/xfs/xfs_da_btree.h
+++ b/fs/xfs/xfs_da_btree.h
@@ -209,7 +209,8 @@ typedef struct xfs_da_state {
*/
struct xfs_nameops {
xfs_dahash_t (*hashname)(struct xfs_name *);
- enum xfs_dacmp (*compname)(struct xfs_da_args *, const char *, int);
+ enum xfs_dacmp (*compname)(struct xfs_da_args *,
+ const unsigned char *, int);
};
@@ -260,7 +261,7 @@ int xfs_da_shrink_inode(xfs_da_args_t *args, xfs_dablk_t dead_blkno,
uint xfs_da_hashname(const __uint8_t *name_string, int name_length);
enum xfs_dacmp xfs_da_compname(struct xfs_da_args *args,
- const char *name, int len);
+ const unsigned char *name, int len);
xfs_da_state_t *xfs_da_state_alloc(void);
diff --git a/fs/xfs/xfs_dfrag.c b/fs/xfs/xfs_dfrag.c
index 84ca1cf16a1..cd27c9d6c71 100644
--- a/fs/xfs/xfs_dfrag.c
+++ b/fs/xfs/xfs_dfrag.c
@@ -45,15 +45,21 @@
#include "xfs_vnodeops.h"
#include "xfs_trace.h"
+
+static int xfs_swap_extents(
+ xfs_inode_t *ip, /* target inode */
+ xfs_inode_t *tip, /* tmp inode */
+ xfs_swapext_t *sxp);
+
/*
- * Syssgi interface for swapext
+ * ioctl interface for swapext
*/
int
xfs_swapext(
xfs_swapext_t *sxp)
{
xfs_inode_t *ip, *tip;
- struct file *file, *target_file;
+ struct file *file, *tmp_file;
int error = 0;
/* Pull information for the target fd */
@@ -68,46 +74,46 @@ xfs_swapext(
goto out_put_file;
}
- target_file = fget((int)sxp->sx_fdtmp);
- if (!target_file) {
+ tmp_file = fget((int)sxp->sx_fdtmp);
+ if (!tmp_file) {
error = XFS_ERROR(EINVAL);
goto out_put_file;
}
- if (!(target_file->f_mode & FMODE_WRITE) ||
- (target_file->f_flags & O_APPEND)) {
+ if (!(tmp_file->f_mode & FMODE_WRITE) ||
+ (tmp_file->f_flags & O_APPEND)) {
error = XFS_ERROR(EBADF);
- goto out_put_target_file;
+ goto out_put_tmp_file;
}
if (IS_SWAPFILE(file->f_path.dentry->d_inode) ||
- IS_SWAPFILE(target_file->f_path.dentry->d_inode)) {
+ IS_SWAPFILE(tmp_file->f_path.dentry->d_inode)) {
error = XFS_ERROR(EINVAL);
- goto out_put_target_file;
+ goto out_put_tmp_file;
}
ip = XFS_I(file->f_path.dentry->d_inode);
- tip = XFS_I(target_file->f_path.dentry->d_inode);
+ tip = XFS_I(tmp_file->f_path.dentry->d_inode);
if (ip->i_mount != tip->i_mount) {
error = XFS_ERROR(EINVAL);
- goto out_put_target_file;
+ goto out_put_tmp_file;
}
if (ip->i_ino == tip->i_ino) {
error = XFS_ERROR(EINVAL);
- goto out_put_target_file;
+ goto out_put_tmp_file;
}
if (XFS_FORCED_SHUTDOWN(ip->i_mount)) {
error = XFS_ERROR(EIO);
- goto out_put_target_file;
+ goto out_put_tmp_file;
}
error = xfs_swap_extents(ip, tip, sxp);
- out_put_target_file:
- fput(target_file);
+ out_put_tmp_file:
+ fput(tmp_file);
out_put_file:
fput(file);
out:
@@ -186,7 +192,7 @@ xfs_swap_extents_check_format(
return 0;
}
-int
+static int
xfs_swap_extents(
xfs_inode_t *ip, /* target inode */
xfs_inode_t *tip, /* tmp inode */
@@ -254,6 +260,9 @@ xfs_swap_extents(
goto out_unlock;
}
+ trace_xfs_swap_extent_before(ip, 0);
+ trace_xfs_swap_extent_before(tip, 1);
+
/* check inode formats now that data is flushed */
error = xfs_swap_extents_check_format(ip, tip);
if (error) {
@@ -421,6 +430,8 @@ xfs_swap_extents(
error = xfs_trans_commit(tp, XFS_TRANS_SWAPEXT);
+ trace_xfs_swap_extent_after(ip, 0);
+ trace_xfs_swap_extent_after(tip, 1);
out:
kmem_free(tempifp);
return error;
diff --git a/fs/xfs/xfs_dfrag.h b/fs/xfs/xfs_dfrag.h
index 4f55a630655..20bdd935c12 100644
--- a/fs/xfs/xfs_dfrag.h
+++ b/fs/xfs/xfs_dfrag.h
@@ -48,9 +48,6 @@ typedef struct xfs_swapext
*/
int xfs_swapext(struct xfs_swapext *sx);
-int xfs_swap_extents(struct xfs_inode *ip, struct xfs_inode *tip,
- struct xfs_swapext *sxp);
-
#endif /* __KERNEL__ */
#endif /* __XFS_DFRAG_H__ */
diff --git a/fs/xfs/xfs_dir2.c b/fs/xfs/xfs_dir2.c
index 93634a7e90e..42520f04126 100644
--- a/fs/xfs/xfs_dir2.c
+++ b/fs/xfs/xfs_dir2.c
@@ -44,7 +44,7 @@
#include "xfs_vnodeops.h"
#include "xfs_trace.h"
-struct xfs_name xfs_name_dotdot = {"..", 2};
+struct xfs_name xfs_name_dotdot = { (unsigned char *)"..", 2};
/*
* ASCII case-insensitive (ie. A-Z) support for directories that was
@@ -66,8 +66,8 @@ xfs_ascii_ci_hashname(
STATIC enum xfs_dacmp
xfs_ascii_ci_compname(
struct xfs_da_args *args,
- const char *name,
- int len)
+ const unsigned char *name,
+ int len)
{
enum xfs_dacmp result;
int i;
@@ -247,7 +247,7 @@ xfs_dir_createname(
int
xfs_dir_cilookup_result(
struct xfs_da_args *args,
- const char *name,
+ const unsigned char *name,
int len)
{
if (args->cmpresult == XFS_CMP_DIFFERENT)
diff --git a/fs/xfs/xfs_dir2.h b/fs/xfs/xfs_dir2.h
index 1d9ef96f33a..74a3b105768 100644
--- a/fs/xfs/xfs_dir2.h
+++ b/fs/xfs/xfs_dir2.h
@@ -100,7 +100,7 @@ extern int xfs_dir2_isleaf(struct xfs_trans *tp, struct xfs_inode *dp,
extern int xfs_dir2_shrink_inode(struct xfs_da_args *args, xfs_dir2_db_t db,
struct xfs_dabuf *bp);
-extern int xfs_dir_cilookup_result(struct xfs_da_args *args, const char *name,
- int len);
+extern int xfs_dir_cilookup_result(struct xfs_da_args *args,
+ const unsigned char *name, int len);
#endif /* __XFS_DIR2_H__ */
diff --git a/fs/xfs/xfs_dir2_block.c b/fs/xfs/xfs_dir2_block.c
index ddc4ecc7807..779a267b0a8 100644
--- a/fs/xfs/xfs_dir2_block.c
+++ b/fs/xfs/xfs_dir2_block.c
@@ -57,8 +57,8 @@ static xfs_dahash_t xfs_dir_hash_dot, xfs_dir_hash_dotdot;
void
xfs_dir_startup(void)
{
- xfs_dir_hash_dot = xfs_da_hashname(".", 1);
- xfs_dir_hash_dotdot = xfs_da_hashname("..", 2);
+ xfs_dir_hash_dot = xfs_da_hashname((unsigned char *)".", 1);
+ xfs_dir_hash_dotdot = xfs_da_hashname((unsigned char *)"..", 2);
}
/*
@@ -513,8 +513,9 @@ xfs_dir2_block_getdents(
/*
* If it didn't fit, set the final offset to here & return.
*/
- if (filldir(dirent, dep->name, dep->namelen, cook & 0x7fffffff,
- be64_to_cpu(dep->inumber), DT_UNKNOWN)) {
+ if (filldir(dirent, (char *)dep->name, dep->namelen,
+ cook & 0x7fffffff, be64_to_cpu(dep->inumber),
+ DT_UNKNOWN)) {
*offset = cook & 0x7fffffff;
xfs_da_brelse(NULL, bp);
return 0;
diff --git a/fs/xfs/xfs_dir2_leaf.c b/fs/xfs/xfs_dir2_leaf.c
index 29f484c11b3..e2d89854ec9 100644
--- a/fs/xfs/xfs_dir2_leaf.c
+++ b/fs/xfs/xfs_dir2_leaf.c
@@ -1081,7 +1081,7 @@ xfs_dir2_leaf_getdents(
dep = (xfs_dir2_data_entry_t *)ptr;
length = xfs_dir2_data_entsize(dep->namelen);
- if (filldir(dirent, dep->name, dep->namelen,
+ if (filldir(dirent, (char *)dep->name, dep->namelen,
xfs_dir2_byte_to_dataptr(mp, curoff) & 0x7fffffff,
be64_to_cpu(dep->inumber), DT_UNKNOWN))
break;
diff --git a/fs/xfs/xfs_dir2_node.c b/fs/xfs/xfs_dir2_node.c
index ce6e355199b..78fc4d9ae75 100644
--- a/fs/xfs/xfs_dir2_node.c
+++ b/fs/xfs/xfs_dir2_node.c
@@ -65,7 +65,7 @@ static int xfs_dir2_node_addname_int(xfs_da_args_t *args,
/*
* Log entries from a freespace block.
*/
-void
+STATIC void
xfs_dir2_free_log_bests(
xfs_trans_t *tp, /* transaction pointer */
xfs_dabuf_t *bp, /* freespace buffer */
diff --git a/fs/xfs/xfs_dir2_node.h b/fs/xfs/xfs_dir2_node.h
index dde72db3d69..82dfe714719 100644
--- a/fs/xfs/xfs_dir2_node.h
+++ b/fs/xfs/xfs_dir2_node.h
@@ -75,8 +75,6 @@ xfs_dir2_db_to_fdindex(struct xfs_mount *mp, xfs_dir2_db_t db)
return ((db) % XFS_DIR2_MAX_FREE_BESTS(mp));
}
-extern void xfs_dir2_free_log_bests(struct xfs_trans *tp, struct xfs_dabuf *bp,
- int first, int last);
extern int xfs_dir2_leaf_to_node(struct xfs_da_args *args,
struct xfs_dabuf *lbp);
extern xfs_dahash_t xfs_dir2_leafn_lasthash(struct xfs_dabuf *bp, int *count);
diff --git a/fs/xfs/xfs_dir2_sf.c b/fs/xfs/xfs_dir2_sf.c
index 9d4f17a6967..c1a5945d463 100644
--- a/fs/xfs/xfs_dir2_sf.c
+++ b/fs/xfs/xfs_dir2_sf.c
@@ -782,7 +782,7 @@ xfs_dir2_sf_getdents(
}
ino = xfs_dir2_sf_get_inumber(sfp, xfs_dir2_sf_inumberp(sfep));
- if (filldir(dirent, sfep->name, sfep->namelen,
+ if (filldir(dirent, (char *)sfep->name, sfep->namelen,
off & 0x7fffffff, ino, DT_UNKNOWN)) {
*offset = off & 0x7fffffff;
return 0;
diff --git a/fs/xfs/xfs_extfree_item.c b/fs/xfs/xfs_extfree_item.c
index 05a4bdd4be3..6f35ed1b39b 100644
--- a/fs/xfs/xfs_extfree_item.c
+++ b/fs/xfs/xfs_extfree_item.c
@@ -82,7 +82,7 @@ xfs_efi_item_format(xfs_efi_log_item_t *efip,
log_vector->i_addr = (xfs_caddr_t)&(efip->efi_format);
log_vector->i_len = size;
- XLOG_VEC_SET_TYPE(log_vector, XLOG_REG_TYPE_EFI_FORMAT);
+ log_vector->i_type = XLOG_REG_TYPE_EFI_FORMAT;
ASSERT(size >= sizeof(xfs_efi_log_format_t));
}
@@ -406,7 +406,7 @@ xfs_efd_item_format(xfs_efd_log_item_t *efdp,
log_vector->i_addr = (xfs_caddr_t)&(efdp->efd_format);
log_vector->i_len = size;
- XLOG_VEC_SET_TYPE(log_vector, XLOG_REG_TYPE_EFD_FORMAT);
+ log_vector->i_type = XLOG_REG_TYPE_EFD_FORMAT;
ASSERT(size >= sizeof(xfs_efd_log_format_t));
}
diff --git a/fs/xfs/xfs_filestream.c b/fs/xfs/xfs_filestream.c
index a631e1451ab..390850ee660 100644
--- a/fs/xfs/xfs_filestream.c
+++ b/fs/xfs/xfs_filestream.c
@@ -140,6 +140,7 @@ _xfs_filestream_pick_ag(
int flags,
xfs_extlen_t minlen)
{
+ int streams, max_streams;
int err, trylock, nscan;
xfs_extlen_t longest, free, minfree, maxfree = 0;
xfs_agnumber_t ag, max_ag = NULLAGNUMBER;
@@ -155,15 +156,15 @@ _xfs_filestream_pick_ag(
trylock = XFS_ALLOC_FLAG_TRYLOCK;
for (nscan = 0; 1; nscan++) {
-
- TRACE_AG_SCAN(mp, ag, xfs_filestream_peek_ag(mp, ag));
-
- pag = mp->m_perag + ag;
+ pag = xfs_perag_get(mp, ag);
+ TRACE_AG_SCAN(mp, ag, atomic_read(&pag->pagf_fstrms));
if (!pag->pagf_init) {
err = xfs_alloc_pagf_init(mp, NULL, ag, trylock);
- if (err && !trylock)
+ if (err && !trylock) {
+ xfs_perag_put(pag);
return err;
+ }
}
/* Might fail sometimes during the 1st pass with trylock set. */
@@ -173,6 +174,7 @@ _xfs_filestream_pick_ag(
/* Keep track of the AG with the most free blocks. */
if (pag->pagf_freeblks > maxfree) {
maxfree = pag->pagf_freeblks;
+ max_streams = atomic_read(&pag->pagf_fstrms);
max_ag = ag;
}
@@ -195,6 +197,8 @@ _xfs_filestream_pick_ag(
/* Break out, retaining the reference on the AG. */
free = pag->pagf_freeblks;
+ streams = atomic_read(&pag->pagf_fstrms);
+ xfs_perag_put(pag);
*agp = ag;
break;
}
@@ -202,6 +206,7 @@ _xfs_filestream_pick_ag(
/* Drop the reference on this AG, it's not usable. */
xfs_filestream_put_ag(mp, ag);
next_ag:
+ xfs_perag_put(pag);
/* Move to the next AG, wrapping to AG 0 if necessary. */
if (++ag >= mp->m_sb.sb_agcount)
ag = 0;
@@ -229,6 +234,7 @@ next_ag:
if (max_ag != NULLAGNUMBER) {
xfs_filestream_get_ag(mp, max_ag);
TRACE_AG_PICK1(mp, max_ag, maxfree);
+ streams = max_streams;
free = maxfree;
*agp = max_ag;
break;
@@ -240,16 +246,14 @@ next_ag:
return 0;
}
- TRACE_AG_PICK2(mp, startag, *agp, xfs_filestream_peek_ag(mp, *agp),
- free, nscan, flags);
+ TRACE_AG_PICK2(mp, startag, *agp, streams, free, nscan, flags);
return 0;
}
/*
* Set the allocation group number for a file or a directory, updating inode
- * references and per-AG references as appropriate. Must be called with the
- * m_peraglock held in read mode.
+ * references and per-AG references as appropriate.
*/
static int
_xfs_filestream_update_ag(
@@ -451,20 +455,6 @@ xfs_filestream_unmount(
}
/*
- * If the mount point's m_perag array is going to be reallocated, all
- * outstanding cache entries must be flushed to avoid accessing reference count
- * addresses that have been freed. The call to xfs_filestream_flush() must be
- * made inside the block that holds the m_peraglock in write mode to do the
- * reallocation.
- */
-void
-xfs_filestream_flush(
- xfs_mount_t *mp)
-{
- xfs_mru_cache_flush(mp->m_filestream);
-}
-
-/*
* Return the AG of the filestream the file or directory belongs to, or
* NULLAGNUMBER otherwise.
*/
@@ -526,7 +516,6 @@ xfs_filestream_associate(
mp = pip->i_mount;
cache = mp->m_filestream;
- down_read(&mp->m_peraglock);
/*
* We have a problem, Houston.
@@ -543,10 +532,8 @@ xfs_filestream_associate(
*
* So, if we can't get the iolock without sleeping then just give up
*/
- if (!xfs_ilock_nowait(pip, XFS_IOLOCK_EXCL)) {
- up_read(&mp->m_peraglock);
+ if (!xfs_ilock_nowait(pip, XFS_IOLOCK_EXCL))
return 1;
- }
/* If the parent directory is already in the cache, use its AG. */
item = xfs_mru_cache_lookup(cache, pip->i_ino);
@@ -601,7 +588,6 @@ exit_did_pick:
exit:
xfs_iunlock(pip, XFS_IOLOCK_EXCL);
- up_read(&mp->m_peraglock);
return -err;
}
diff --git a/fs/xfs/xfs_filestream.h b/fs/xfs/xfs_filestream.h
index 4aba67c5f64..260f757bbc5 100644
--- a/fs/xfs/xfs_filestream.h
+++ b/fs/xfs/xfs_filestream.h
@@ -79,12 +79,21 @@ extern ktrace_t *xfs_filestreams_trace_buf;
* the cache that reference per-ag array elements that have since been
* reallocated.
*/
+/*
+ * xfs_filestream_peek_ag is only used in tracing code
+ */
static inline int
xfs_filestream_peek_ag(
xfs_mount_t *mp,
xfs_agnumber_t agno)
{
- return atomic_read(&mp->m_perag[agno].pagf_fstrms);
+ struct xfs_perag *pag;
+ int ret;
+
+ pag = xfs_perag_get(mp, agno);
+ ret = atomic_read(&pag->pagf_fstrms);
+ xfs_perag_put(pag);
+ return ret;
}
static inline int
@@ -92,7 +101,13 @@ xfs_filestream_get_ag(
xfs_mount_t *mp,
xfs_agnumber_t agno)
{
- return atomic_inc_return(&mp->m_perag[agno].pagf_fstrms);
+ struct xfs_perag *pag;
+ int ret;
+
+ pag = xfs_perag_get(mp, agno);
+ ret = atomic_inc_return(&pag->pagf_fstrms);
+ xfs_perag_put(pag);
+ return ret;
}
static inline int
@@ -100,7 +115,13 @@ xfs_filestream_put_ag(
xfs_mount_t *mp,
xfs_agnumber_t agno)
{
- return atomic_dec_return(&mp->m_perag[agno].pagf_fstrms);
+ struct xfs_perag *pag;
+ int ret;
+
+ pag = xfs_perag_get(mp, agno);
+ ret = atomic_dec_return(&pag->pagf_fstrms);
+ xfs_perag_put(pag);
+ return ret;
}
/* allocation selection flags */
@@ -114,7 +135,6 @@ int xfs_filestream_init(void);
void xfs_filestream_uninit(void);
int xfs_filestream_mount(struct xfs_mount *mp);
void xfs_filestream_unmount(struct xfs_mount *mp);
-void xfs_filestream_flush(struct xfs_mount *mp);
xfs_agnumber_t xfs_filestream_lookup_ag(struct xfs_inode *ip);
int xfs_filestream_associate(struct xfs_inode *dip, struct xfs_inode *ip);
void xfs_filestream_deassociate(struct xfs_inode *ip);
diff --git a/fs/xfs/xfs_fsops.c b/fs/xfs/xfs_fsops.c
index a13919a6a36..37a6f62c57b 100644
--- a/fs/xfs/xfs_fsops.c
+++ b/fs/xfs/xfs_fsops.c
@@ -167,27 +167,14 @@ xfs_growfs_data_private(
}
new = nb - mp->m_sb.sb_dblocks;
oagcount = mp->m_sb.sb_agcount;
- if (nagcount > oagcount) {
- void *new_perag, *old_perag;
-
- xfs_filestream_flush(mp);
-
- new_perag = kmem_zalloc(sizeof(xfs_perag_t) * nagcount,
- KM_MAYFAIL);
- if (!new_perag)
- return XFS_ERROR(ENOMEM);
-
- down_write(&mp->m_peraglock);
- memcpy(new_perag, mp->m_perag, sizeof(xfs_perag_t) * oagcount);
- old_perag = mp->m_perag;
- mp->m_perag = new_perag;
-
- mp->m_flags |= XFS_MOUNT_32BITINODES;
- nagimax = xfs_initialize_perag(mp, nagcount);
- up_write(&mp->m_peraglock);
- kmem_free(old_perag);
+ /* allocate the new per-ag structures */
+ if (nagcount > oagcount) {
+ error = xfs_initialize_perag(mp, nagcount, &nagimax);
+ if (error)
+ return error;
}
+
tp = xfs_trans_alloc(mp, XFS_TRANS_GROWFS);
tp->t_flags |= XFS_TRANS_RESERVE;
if ((error = xfs_trans_reserve(tp, XFS_GROWFS_SPACE_RES(mp),
@@ -196,6 +183,11 @@ xfs_growfs_data_private(
return error;
}
+ /*
+ * Write new AG headers to disk. Non-transactional, but written
+ * synchronously so they are completed prior to the growfs transaction
+ * being logged.
+ */
nfree = 0;
for (agno = nagcount - 1; agno >= oagcount; agno--, new -= agsize) {
/*
@@ -359,6 +351,12 @@ xfs_growfs_data_private(
goto error0;
}
}
+
+ /*
+ * Update changed superblock fields transactionally. These are not
+ * seen by the rest of the world until the transaction commit applies
+ * them atomically to the superblock.
+ */
if (nagcount > oagcount)
xfs_trans_mod_sb(tp, XFS_TRANS_SB_AGCOUNT, nagcount - oagcount);
if (nb > mp->m_sb.sb_dblocks)
@@ -369,9 +367,9 @@ xfs_growfs_data_private(
if (dpct)
xfs_trans_mod_sb(tp, XFS_TRANS_SB_IMAXPCT, dpct);
error = xfs_trans_commit(tp, 0);
- if (error) {
+ if (error)
return error;
- }
+
/* New allocation groups fully initialized, so update mount struct */
if (nagimax)
mp->m_maxagi = nagimax;
@@ -381,6 +379,8 @@ xfs_growfs_data_private(
mp->m_maxicount = icount << mp->m_sb.sb_inopblog;
} else
mp->m_maxicount = 0;
+
+ /* update secondary superblocks. */
for (agno = 1; agno < nagcount; agno++) {
error = xfs_read_buf(mp, mp->m_ddev_targp,
XFS_AGB_TO_DADDR(mp, agno, XFS_SB_BLOCK(mp)),
diff --git a/fs/xfs/xfs_ialloc.c b/fs/xfs/xfs_ialloc.c
index cb907ba69c4..9d884c127bb 100644
--- a/fs/xfs/xfs_ialloc.c
+++ b/fs/xfs/xfs_ialloc.c
@@ -205,7 +205,7 @@ xfs_ialloc_inode_init(
d = XFS_AGB_TO_DADDR(mp, agno, agbno + (j * blks_per_cluster));
fbuf = xfs_trans_get_buf(tp, mp->m_ddev_targp, d,
mp->m_bsize * blks_per_cluster,
- XFS_BUF_LOCK);
+ XBF_LOCK);
ASSERT(fbuf);
ASSERT(!XFS_BUF_GETERROR(fbuf));
@@ -253,6 +253,7 @@ xfs_ialloc_ag_alloc(
xfs_agino_t thisino; /* current inode number, for loop */
int isaligned = 0; /* inode allocation at stripe unit */
/* boundary */
+ struct xfs_perag *pag;
args.tp = tp;
args.mp = tp->t_mountp;
@@ -382,9 +383,9 @@ xfs_ialloc_ag_alloc(
newino = XFS_OFFBNO_TO_AGINO(args.mp, args.agbno, 0);
be32_add_cpu(&agi->agi_count, newlen);
be32_add_cpu(&agi->agi_freecount, newlen);
- down_read(&args.mp->m_peraglock);
- args.mp->m_perag[agno].pagi_freecount += newlen;
- up_read(&args.mp->m_peraglock);
+ pag = xfs_perag_get(args.mp, agno);
+ pag->pagi_freecount += newlen;
+ xfs_perag_put(pag);
agi->agi_newino = cpu_to_be32(newino);
/*
@@ -486,9 +487,8 @@ xfs_ialloc_ag_select(
*/
agno = pagno;
flags = XFS_ALLOC_FLAG_TRYLOCK;
- down_read(&mp->m_peraglock);
for (;;) {
- pag = &mp->m_perag[agno];
+ pag = xfs_perag_get(mp, agno);
if (!pag->pagi_init) {
if (xfs_ialloc_read_agi(mp, tp, agno, &agbp)) {
agbp = NULL;
@@ -527,7 +527,7 @@ xfs_ialloc_ag_select(
agbp = NULL;
goto nextag;
}
- up_read(&mp->m_peraglock);
+ xfs_perag_put(pag);
return agbp;
}
}
@@ -535,22 +535,19 @@ unlock_nextag:
if (agbp)
xfs_trans_brelse(tp, agbp);
nextag:
+ xfs_perag_put(pag);
/*
* No point in iterating over the rest, if we're shutting
* down.
*/
- if (XFS_FORCED_SHUTDOWN(mp)) {
- up_read(&mp->m_peraglock);
+ if (XFS_FORCED_SHUTDOWN(mp))
return NULL;
- }
agno++;
if (agno >= agcount)
agno = 0;
if (agno == pagno) {
- if (flags == 0) {
- up_read(&mp->m_peraglock);
+ if (flags == 0)
return NULL;
- }
flags = 0;
}
}
@@ -672,6 +669,7 @@ xfs_dialloc(
xfs_agnumber_t tagno; /* testing allocation group number */
xfs_btree_cur_t *tcur; /* temp cursor */
xfs_inobt_rec_incore_t trec; /* temp inode allocation record */
+ struct xfs_perag *pag;
if (*IO_agbp == NULL) {
@@ -771,13 +769,13 @@ nextag:
*inop = NULLFSINO;
return noroom ? ENOSPC : 0;
}
- down_read(&mp->m_peraglock);
- if (mp->m_perag[tagno].pagi_inodeok == 0) {
- up_read(&mp->m_peraglock);
+ pag = xfs_perag_get(mp, tagno);
+ if (pag->pagi_inodeok == 0) {
+ xfs_perag_put(pag);
goto nextag;
}
error = xfs_ialloc_read_agi(mp, tp, tagno, &agbp);
- up_read(&mp->m_peraglock);
+ xfs_perag_put(pag);
if (error)
goto nextag;
agi = XFS_BUF_TO_AGI(agbp);
@@ -790,6 +788,7 @@ nextag:
*/
agno = tagno;
*IO_agbp = NULL;
+ pag = xfs_perag_get(mp, agno);
restart_pagno:
cur = xfs_inobt_init_cursor(mp, tp, agbp, be32_to_cpu(agi->agi_seqno));
@@ -808,7 +807,6 @@ nextag:
* If in the same AG as the parent, try to get near the parent.
*/
if (pagno == agno) {
- xfs_perag_t *pag = &mp->m_perag[agno];
int doneleft; /* done, to the left */
int doneright; /* done, to the right */
int searchdistance = 10;
@@ -1006,9 +1004,7 @@ alloc_inode:
goto error0;
be32_add_cpu(&agi->agi_freecount, -1);
xfs_ialloc_log_agi(tp, agbp, XFS_AGI_FREECOUNT);
- down_read(&mp->m_peraglock);
- mp->m_perag[tagno].pagi_freecount--;
- up_read(&mp->m_peraglock);
+ pag->pagi_freecount--;
error = xfs_check_agi_freecount(cur, agi);
if (error)
@@ -1016,12 +1012,14 @@ alloc_inode:
xfs_btree_del_cursor(cur, XFS_BTREE_NOERROR);
xfs_trans_mod_sb(tp, XFS_TRANS_SB_IFREE, -1);
+ xfs_perag_put(pag);
*inop = ino;
return 0;
error1:
xfs_btree_del_cursor(tcur, XFS_BTREE_ERROR);
error0:
xfs_btree_del_cursor(cur, XFS_BTREE_ERROR);
+ xfs_perag_put(pag);
return error;
}
@@ -1052,6 +1050,7 @@ xfs_difree(
xfs_mount_t *mp; /* mount structure for filesystem */
int off; /* offset of inode in inode chunk */
xfs_inobt_rec_incore_t rec; /* btree record */
+ struct xfs_perag *pag;
mp = tp->t_mountp;
@@ -1088,9 +1087,7 @@ xfs_difree(
/*
* Get the allocation group header.
*/
- down_read(&mp->m_peraglock);
error = xfs_ialloc_read_agi(mp, tp, agno, &agbp);
- up_read(&mp->m_peraglock);
if (error) {
cmn_err(CE_WARN,
"xfs_difree: xfs_ialloc_read_agi() returned an error %d on %s. Returning error.",
@@ -1157,9 +1154,9 @@ xfs_difree(
be32_add_cpu(&agi->agi_count, -ilen);
be32_add_cpu(&agi->agi_freecount, -(ilen - 1));
xfs_ialloc_log_agi(tp, agbp, XFS_AGI_COUNT | XFS_AGI_FREECOUNT);
- down_read(&mp->m_peraglock);
- mp->m_perag[agno].pagi_freecount -= ilen - 1;
- up_read(&mp->m_peraglock);
+ pag = xfs_perag_get(mp, agno);
+ pag->pagi_freecount -= ilen - 1;
+ xfs_perag_put(pag);
xfs_trans_mod_sb(tp, XFS_TRANS_SB_ICOUNT, -ilen);
xfs_trans_mod_sb(tp, XFS_TRANS_SB_IFREE, -(ilen - 1));
@@ -1188,9 +1185,9 @@ xfs_difree(
*/
be32_add_cpu(&agi->agi_freecount, 1);
xfs_ialloc_log_agi(tp, agbp, XFS_AGI_FREECOUNT);
- down_read(&mp->m_peraglock);
- mp->m_perag[agno].pagi_freecount++;
- up_read(&mp->m_peraglock);
+ pag = xfs_perag_get(mp, agno);
+ pag->pagi_freecount++;
+ xfs_perag_put(pag);
xfs_trans_mod_sb(tp, XFS_TRANS_SB_IFREE, 1);
}
@@ -1312,9 +1309,7 @@ xfs_imap(
xfs_buf_t *agbp; /* agi buffer */
int i; /* temp state */
- down_read(&mp->m_peraglock);
error = xfs_ialloc_read_agi(mp, tp, agno, &agbp);
- up_read(&mp->m_peraglock);
if (error) {
xfs_fs_cmn_err(CE_ALERT, mp, "xfs_imap: "
"xfs_ialloc_read_agi() returned "
@@ -1379,7 +1374,6 @@ xfs_imap(
XFS_FSB_TO_BB(mp, mp->m_sb.sb_dblocks));
return XFS_ERROR(EINVAL);
}
-
return 0;
}
@@ -1523,8 +1517,7 @@ xfs_ialloc_read_agi(
return error;
agi = XFS_BUF_TO_AGI(*bpp);
- pag = &mp->m_perag[agno];
-
+ pag = xfs_perag_get(mp, agno);
if (!pag->pagi_init) {
pag->pagi_freecount = be32_to_cpu(agi->agi_freecount);
pag->pagi_count = be32_to_cpu(agi->agi_count);
@@ -1537,6 +1530,7 @@ xfs_ialloc_read_agi(
*/
ASSERT(pag->pagi_freecount == be32_to_cpu(agi->agi_freecount) ||
XFS_FORCED_SHUTDOWN(mp));
+ xfs_perag_put(pag);
return 0;
}
diff --git a/fs/xfs/xfs_iget.c b/fs/xfs/xfs_iget.c
index 155e798f30a..e281eb4a1c4 100644
--- a/fs/xfs/xfs_iget.c
+++ b/fs/xfs/xfs_iget.c
@@ -374,7 +374,7 @@ xfs_iget(
return EINVAL;
/* get the perag structure and ensure that it's inode capable */
- pag = xfs_get_perag(mp, ino);
+ pag = xfs_perag_get(mp, XFS_INO_TO_AGNO(mp, ino));
if (!pag->pagi_inodeok)
return EINVAL;
ASSERT(pag->pag_ici_init);
@@ -398,7 +398,7 @@ again:
if (error)
goto out_error_or_again;
}
- xfs_put_perag(mp, pag);
+ xfs_perag_put(pag);
*ipp = ip;
@@ -417,7 +417,7 @@ out_error_or_again:
delay(1);
goto again;
}
- xfs_put_perag(mp, pag);
+ xfs_perag_put(pag);
return error;
}
@@ -488,12 +488,12 @@ xfs_ireclaim(
* added to the tree assert that it's been there before to catch
* problems with the inode life time early on.
*/
- pag = xfs_get_perag(mp, ip->i_ino);
+ pag = xfs_perag_get(mp, XFS_INO_TO_AGNO(mp, ip->i_ino));
write_lock(&pag->pag_ici_lock);
if (!radix_tree_delete(&pag->pag_ici_root, agino))
ASSERT(0);
write_unlock(&pag->pag_ici_lock);
- xfs_put_perag(mp, pag);
+ xfs_perag_put(pag);
/*
* Here we do an (almost) spurious inode lock in order to coordinate
diff --git a/fs/xfs/xfs_inode.c b/fs/xfs/xfs_inode.c
index ef77fd88c8e..fa31360046d 100644
--- a/fs/xfs/xfs_inode.c
+++ b/fs/xfs/xfs_inode.c
@@ -151,7 +151,7 @@ xfs_imap_to_bp(
"an error %d on %s. Returning error.",
error, mp->m_fsname);
} else {
- ASSERT(buf_flags & XFS_BUF_TRYLOCK);
+ ASSERT(buf_flags & XBF_TRYLOCK);
}
return error;
}
@@ -239,7 +239,7 @@ xfs_inotobp(
if (error)
return error;
- error = xfs_imap_to_bp(mp, tp, &imap, &bp, XFS_BUF_LOCK, imap_flags);
+ error = xfs_imap_to_bp(mp, tp, &imap, &bp, XBF_LOCK, imap_flags);
if (error)
return error;
@@ -285,7 +285,7 @@ xfs_itobp(
return error;
if (!bp) {
- ASSERT(buf_flags & XFS_BUF_TRYLOCK);
+ ASSERT(buf_flags & XBF_TRYLOCK);
ASSERT(tp == NULL);
*bpp = NULL;
return EAGAIN;
@@ -807,7 +807,7 @@ xfs_iread(
* Get pointers to the on-disk inode and the buffer containing it.
*/
error = xfs_imap_to_bp(mp, tp, &ip->i_imap, &bp,
- XFS_BUF_LOCK, iget_flags);
+ XBF_LOCK, iget_flags);
if (error)
return error;
dip = (xfs_dinode_t *)xfs_buf_offset(bp, ip->i_imap.im_boffset);
@@ -1751,7 +1751,7 @@ xfs_iunlink(
* Here we put the head pointer into our next pointer,
* and then we fall through to point the head at us.
*/
- error = xfs_itobp(mp, tp, ip, &dip, &ibp, XFS_BUF_LOCK);
+ error = xfs_itobp(mp, tp, ip, &dip, &ibp, XBF_LOCK);
if (error)
return error;
@@ -1833,7 +1833,7 @@ xfs_iunlink_remove(
* of dealing with the buffer when there is no need to
* change it.
*/
- error = xfs_itobp(mp, tp, ip, &dip, &ibp, XFS_BUF_LOCK);
+ error = xfs_itobp(mp, tp, ip, &dip, &ibp, XBF_LOCK);
if (error) {
cmn_err(CE_WARN,
"xfs_iunlink_remove: xfs_itobp() returned an error %d on %s. Returning error.",
@@ -1895,7 +1895,7 @@ xfs_iunlink_remove(
* Now last_ibp points to the buffer previous to us on
* the unlinked list. Pull us from the list.
*/
- error = xfs_itobp(mp, tp, ip, &dip, &ibp, XFS_BUF_LOCK);
+ error = xfs_itobp(mp, tp, ip, &dip, &ibp, XBF_LOCK);
if (error) {
cmn_err(CE_WARN,
"xfs_iunlink_remove: xfs_itobp() returned an error %d on %s. Returning error.",
@@ -1946,8 +1946,9 @@ xfs_ifree_cluster(
xfs_inode_t *ip, **ip_found;
xfs_inode_log_item_t *iip;
xfs_log_item_t *lip;
- xfs_perag_t *pag = xfs_get_perag(mp, inum);
+ struct xfs_perag *pag;
+ pag = xfs_perag_get(mp, XFS_INO_TO_AGNO(mp, inum));
if (mp->m_sb.sb_blocksize >= XFS_INODE_CLUSTER_SIZE(mp)) {
blks_per_cluster = 1;
ninodes = mp->m_sb.sb_inopblock;
@@ -2039,7 +2040,7 @@ xfs_ifree_cluster(
bp = xfs_trans_get_buf(tp, mp->m_ddev_targp, blkno,
mp->m_bsize * blks_per_cluster,
- XFS_BUF_LOCK);
+ XBF_LOCK);
pre_flushed = 0;
lip = XFS_BUF_FSPRIVATE(bp, xfs_log_item_t *);
@@ -2088,7 +2089,7 @@ xfs_ifree_cluster(
}
kmem_free(ip_found);
- xfs_put_perag(mp, pag);
+ xfs_perag_put(pag);
}
/*
@@ -2150,7 +2151,7 @@ xfs_ifree(
xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE);
- error = xfs_itobp(ip->i_mount, tp, ip, &dip, &ibp, XFS_BUF_LOCK);
+ error = xfs_itobp(ip->i_mount, tp, ip, &dip, &ibp, XBF_LOCK);
if (error)
return error;
@@ -2483,13 +2484,16 @@ __xfs_iunpin_wait(
return;
/* Give the log a push to start the unpinning I/O */
- xfs_log_force(ip->i_mount, (iip && iip->ili_last_lsn) ?
- iip->ili_last_lsn : 0, XFS_LOG_FORCE);
+ if (iip && iip->ili_last_lsn)
+ xfs_log_force_lsn(ip->i_mount, iip->ili_last_lsn, 0);
+ else
+ xfs_log_force(ip->i_mount, 0);
+
if (wait)
wait_event(ip->i_ipin_wait, (atomic_read(&ip->i_pincount) == 0));
}
-static inline void
+void
xfs_iunpin_wait(
xfs_inode_t *ip)
{
@@ -2675,7 +2679,7 @@ xfs_iflush_cluster(
xfs_buf_t *bp)
{
xfs_mount_t *mp = ip->i_mount;
- xfs_perag_t *pag = xfs_get_perag(mp, ip->i_ino);
+ struct xfs_perag *pag;
unsigned long first_index, mask;
unsigned long inodes_per_cluster;
int ilist_size;
@@ -2686,6 +2690,7 @@ xfs_iflush_cluster(
int bufwasdelwri;
int i;
+ pag = xfs_perag_get(mp, XFS_INO_TO_AGNO(mp, ip->i_ino));
ASSERT(pag->pagi_inodeok);
ASSERT(pag->pag_ici_init);
@@ -2693,7 +2698,7 @@ xfs_iflush_cluster(
ilist_size = inodes_per_cluster * sizeof(xfs_inode_t *);
ilist = kmem_alloc(ilist_size, KM_MAYFAIL|KM_NOFS);
if (!ilist)
- return 0;
+ goto out_put;
mask = ~(((XFS_INODE_CLUSTER_SIZE(mp) >> mp->m_sb.sb_inodelog)) - 1);
first_index = XFS_INO_TO_AGINO(mp, ip->i_ino) & mask;
@@ -2762,6 +2767,8 @@ xfs_iflush_cluster(
out_free:
read_unlock(&pag->pag_ici_lock);
kmem_free(ilist);
+out_put:
+ xfs_perag_put(pag);
return 0;
@@ -2805,6 +2812,7 @@ cluster_corrupt_out:
*/
xfs_iflush_abort(iq);
kmem_free(ilist);
+ xfs_perag_put(pag);
return XFS_ERROR(EFSCORRUPTED);
}
@@ -2827,8 +2835,6 @@ xfs_iflush(
xfs_dinode_t *dip;
xfs_mount_t *mp;
int error;
- int noblock = (flags == XFS_IFLUSH_ASYNC_NOBLOCK);
- enum { INT_DELWRI = (1 << 0), INT_ASYNC = (1 << 1) };
XFS_STATS_INC(xs_iflush_count);
@@ -2841,15 +2847,6 @@ xfs_iflush(
mp = ip->i_mount;
/*
- * If the inode isn't dirty, then just release the inode flush lock and
- * do nothing.
- */
- if (xfs_inode_clean(ip)) {
- xfs_ifunlock(ip);
- return 0;
- }
-
- /*
* We can't flush the inode until it is unpinned, so wait for it if we
* are allowed to block. We know noone new can pin it, because we are
* holding the inode lock shared and you need to hold it exclusively to
@@ -2860,7 +2857,7 @@ xfs_iflush(
* in the same cluster are dirty, they will probably write the inode
* out for us if they occur after the log force completes.
*/
- if (noblock && xfs_ipincount(ip)) {
+ if (!(flags & SYNC_WAIT) && xfs_ipincount(ip)) {
xfs_iunpin_nowait(ip);
xfs_ifunlock(ip);
return EAGAIN;
@@ -2894,60 +2891,10 @@ xfs_iflush(
}
/*
- * Decide how buffer will be flushed out. This is done before
- * the call to xfs_iflush_int because this field is zeroed by it.
- */
- if (iip != NULL && iip->ili_format.ilf_fields != 0) {
- /*
- * Flush out the inode buffer according to the directions
- * of the caller. In the cases where the caller has given
- * us a choice choose the non-delwri case. This is because
- * the inode is in the AIL and we need to get it out soon.
- */
- switch (flags) {
- case XFS_IFLUSH_SYNC:
- case XFS_IFLUSH_DELWRI_ELSE_SYNC:
- flags = 0;
- break;
- case XFS_IFLUSH_ASYNC_NOBLOCK:
- case XFS_IFLUSH_ASYNC:
- case XFS_IFLUSH_DELWRI_ELSE_ASYNC:
- flags = INT_ASYNC;
- break;
- case XFS_IFLUSH_DELWRI:
- flags = INT_DELWRI;
- break;
- default:
- ASSERT(0);
- flags = 0;
- break;
- }
- } else {
- switch (flags) {
- case XFS_IFLUSH_DELWRI_ELSE_SYNC:
- case XFS_IFLUSH_DELWRI_ELSE_ASYNC:
- case XFS_IFLUSH_DELWRI:
- flags = INT_DELWRI;
- break;
- case XFS_IFLUSH_ASYNC_NOBLOCK:
- case XFS_IFLUSH_ASYNC:
- flags = INT_ASYNC;
- break;
- case XFS_IFLUSH_SYNC:
- flags = 0;
- break;
- default:
- ASSERT(0);
- flags = 0;
- break;
- }
- }
-
- /*
* Get the buffer containing the on-disk inode.
*/
error = xfs_itobp(mp, NULL, ip, &dip, &bp,
- noblock ? XFS_BUF_TRYLOCK : XFS_BUF_LOCK);
+ (flags & SYNC_WAIT) ? XBF_LOCK : XBF_TRYLOCK);
if (error || !bp) {
xfs_ifunlock(ip);
return error;
@@ -2965,7 +2912,7 @@ xfs_iflush(
* get stuck waiting in the write for too long.
*/
if (XFS_BUF_ISPINNED(bp))
- xfs_log_force(mp, (xfs_lsn_t)0, XFS_LOG_FORCE);
+ xfs_log_force(mp, 0);
/*
* inode clustering:
@@ -2975,13 +2922,10 @@ xfs_iflush(
if (error)
goto cluster_corrupt_out;
- if (flags & INT_DELWRI) {
- xfs_bdwrite(mp, bp);
- } else if (flags & INT_ASYNC) {
- error = xfs_bawrite(mp, bp);
- } else {
+ if (flags & SYNC_WAIT)
error = xfs_bwrite(mp, bp);
- }
+ else
+ xfs_bdwrite(mp, bp);
return error;
corrupt_out:
@@ -3016,16 +2960,6 @@ xfs_iflush_int(
iip = ip->i_itemp;
mp = ip->i_mount;
-
- /*
- * If the inode isn't dirty, then just release the inode
- * flush lock and do nothing.
- */
- if (xfs_inode_clean(ip)) {
- xfs_ifunlock(ip);
- return 0;
- }
-
/* set *dip = inode's place in the buffer */
dip = (xfs_dinode_t *)xfs_buf_offset(bp, ip->i_imap.im_boffset);
diff --git a/fs/xfs/xfs_inode.h b/fs/xfs/xfs_inode.h
index ec1f28c4fc4..6c912b02759 100644
--- a/fs/xfs/xfs_inode.h
+++ b/fs/xfs/xfs_inode.h
@@ -420,16 +420,6 @@ static inline void xfs_ifunlock(xfs_inode_t *ip)
#define XFS_ILOCK_DEP(flags) (((flags) & XFS_ILOCK_DEP_MASK) >> XFS_ILOCK_SHIFT)
/*
- * Flags for xfs_iflush()
- */
-#define XFS_IFLUSH_DELWRI_ELSE_SYNC 1
-#define XFS_IFLUSH_DELWRI_ELSE_ASYNC 2
-#define XFS_IFLUSH_SYNC 3
-#define XFS_IFLUSH_ASYNC 4
-#define XFS_IFLUSH_DELWRI 5
-#define XFS_IFLUSH_ASYNC_NOBLOCK 6
-
-/*
* Flags for xfs_itruncate_start().
*/
#define XFS_ITRUNC_DEFINITE 0x1
@@ -483,6 +473,7 @@ int xfs_iunlink(struct xfs_trans *, xfs_inode_t *);
void xfs_iext_realloc(xfs_inode_t *, int, int);
void xfs_ipin(xfs_inode_t *);
void xfs_iunpin(xfs_inode_t *);
+void xfs_iunpin_wait(xfs_inode_t *);
int xfs_iflush(xfs_inode_t *, uint);
void xfs_ichgtime(xfs_inode_t *, int);
void xfs_lock_inodes(xfs_inode_t **, int, uint);
diff --git a/fs/xfs/xfs_inode_item.c b/fs/xfs/xfs_inode_item.c
index f38855d21ea..d4dc063111f 100644
--- a/fs/xfs/xfs_inode_item.c
+++ b/fs/xfs/xfs_inode_item.c
@@ -228,7 +228,7 @@ xfs_inode_item_format(
vecp->i_addr = (xfs_caddr_t)&iip->ili_format;
vecp->i_len = sizeof(xfs_inode_log_format_t);
- XLOG_VEC_SET_TYPE(vecp, XLOG_REG_TYPE_IFORMAT);
+ vecp->i_type = XLOG_REG_TYPE_IFORMAT;
vecp++;
nvecs = 1;
@@ -279,7 +279,7 @@ xfs_inode_item_format(
vecp->i_addr = (xfs_caddr_t)&ip->i_d;
vecp->i_len = sizeof(struct xfs_icdinode);
- XLOG_VEC_SET_TYPE(vecp, XLOG_REG_TYPE_ICORE);
+ vecp->i_type = XLOG_REG_TYPE_ICORE;
vecp++;
nvecs++;
iip->ili_format.ilf_fields |= XFS_ILOG_CORE;
@@ -336,7 +336,7 @@ xfs_inode_item_format(
vecp->i_addr =
(char *)(ip->i_df.if_u1.if_extents);
vecp->i_len = ip->i_df.if_bytes;
- XLOG_VEC_SET_TYPE(vecp, XLOG_REG_TYPE_IEXT);
+ vecp->i_type = XLOG_REG_TYPE_IEXT;
} else
#endif
{
@@ -355,7 +355,7 @@ xfs_inode_item_format(
vecp->i_addr = (xfs_caddr_t)ext_buffer;
vecp->i_len = xfs_iextents_copy(ip, ext_buffer,
XFS_DATA_FORK);
- XLOG_VEC_SET_TYPE(vecp, XLOG_REG_TYPE_IEXT);
+ vecp->i_type = XLOG_REG_TYPE_IEXT;
}
ASSERT(vecp->i_len <= ip->i_df.if_bytes);
iip->ili_format.ilf_dsize = vecp->i_len;
@@ -373,7 +373,7 @@ xfs_inode_item_format(
ASSERT(ip->i_df.if_broot != NULL);
vecp->i_addr = (xfs_caddr_t)ip->i_df.if_broot;
vecp->i_len = ip->i_df.if_broot_bytes;
- XLOG_VEC_SET_TYPE(vecp, XLOG_REG_TYPE_IBROOT);
+ vecp->i_type = XLOG_REG_TYPE_IBROOT;
vecp++;
nvecs++;
iip->ili_format.ilf_dsize = ip->i_df.if_broot_bytes;
@@ -399,7 +399,7 @@ xfs_inode_item_format(
ASSERT((ip->i_df.if_real_bytes == 0) ||
(ip->i_df.if_real_bytes == data_bytes));
vecp->i_len = (int)data_bytes;
- XLOG_VEC_SET_TYPE(vecp, XLOG_REG_TYPE_ILOCAL);
+ vecp->i_type = XLOG_REG_TYPE_ILOCAL;
vecp++;
nvecs++;
iip->ili_format.ilf_dsize = (unsigned)data_bytes;
@@ -477,7 +477,7 @@ xfs_inode_item_format(
vecp->i_len = xfs_iextents_copy(ip, ext_buffer,
XFS_ATTR_FORK);
#endif
- XLOG_VEC_SET_TYPE(vecp, XLOG_REG_TYPE_IATTR_EXT);
+ vecp->i_type = XLOG_REG_TYPE_IATTR_EXT;
iip->ili_format.ilf_asize = vecp->i_len;
vecp++;
nvecs++;
@@ -492,7 +492,7 @@ xfs_inode_item_format(
ASSERT(ip->i_afp->if_broot != NULL);
vecp->i_addr = (xfs_caddr_t)ip->i_afp->if_broot;
vecp->i_len = ip->i_afp->if_broot_bytes;
- XLOG_VEC_SET_TYPE(vecp, XLOG_REG_TYPE_IATTR_BROOT);
+ vecp->i_type = XLOG_REG_TYPE_IATTR_BROOT;
vecp++;
nvecs++;
iip->ili_format.ilf_asize = ip->i_afp->if_broot_bytes;
@@ -516,7 +516,7 @@ xfs_inode_item_format(
ASSERT((ip->i_afp->if_real_bytes == 0) ||
(ip->i_afp->if_real_bytes == data_bytes));
vecp->i_len = (int)data_bytes;
- XLOG_VEC_SET_TYPE(vecp, XLOG_REG_TYPE_IATTR_LOCAL);
+ vecp->i_type = XLOG_REG_TYPE_IATTR_LOCAL;
vecp++;
nvecs++;
iip->ili_format.ilf_asize = (unsigned)data_bytes;
@@ -602,33 +602,20 @@ xfs_inode_item_trylock(
if (!xfs_iflock_nowait(ip)) {
/*
- * If someone else isn't already trying to push the inode
- * buffer, we get to do it.
+ * inode has already been flushed to the backing buffer,
+ * leave it locked in shared mode, pushbuf routine will
+ * unlock it.
*/
- if (iip->ili_pushbuf_flag == 0) {
- iip->ili_pushbuf_flag = 1;
-#ifdef DEBUG
- iip->ili_push_owner = current_pid();
-#endif
- /*
- * Inode is left locked in shared mode.
- * Pushbuf routine gets to unlock it.
- */
- return XFS_ITEM_PUSHBUF;
- } else {
- /*
- * We hold the AIL lock, so we must specify the
- * NONOTIFY flag so that we won't double trip.
- */
- xfs_iunlock(ip, XFS_ILOCK_SHARED|XFS_IUNLOCK_NONOTIFY);
- return XFS_ITEM_FLUSHING;
- }
- /* NOTREACHED */
+ return XFS_ITEM_PUSHBUF;
}
/* Stale items should force out the iclog */
if (ip->i_flags & XFS_ISTALE) {
xfs_ifunlock(ip);
+ /*
+ * we hold the AIL lock - notify the unlock routine of this
+ * so it doesn't try to get the lock again.
+ */
xfs_iunlock(ip, XFS_ILOCK_SHARED|XFS_IUNLOCK_NONOTIFY);
return XFS_ITEM_PINNED;
}
@@ -746,11 +733,8 @@ xfs_inode_item_committed(
* This gets called by xfs_trans_push_ail(), when IOP_TRYLOCK
* failed to get the inode flush lock but did get the inode locked SHARED.
* Here we're trying to see if the inode buffer is incore, and if so whether it's
- * marked delayed write. If that's the case, we'll initiate a bawrite on that
- * buffer to expedite the process.
- *
- * We aren't holding the AIL lock (or the flush lock) when this gets called,
- * so it is inherently race-y.
+ * marked delayed write. If that's the case, we'll promote it and that will
+ * allow the caller to write the buffer by triggering the xfsbufd to run.
*/
STATIC void
xfs_inode_item_pushbuf(
@@ -759,82 +743,30 @@ xfs_inode_item_pushbuf(
xfs_inode_t *ip;
xfs_mount_t *mp;
xfs_buf_t *bp;
- uint dopush;
ip = iip->ili_inode;
-
ASSERT(xfs_isilocked(ip, XFS_ILOCK_SHARED));
/*
- * The ili_pushbuf_flag keeps others from
- * trying to duplicate our effort.
- */
- ASSERT(iip->ili_pushbuf_flag != 0);
- ASSERT(iip->ili_push_owner == current_pid());
-
- /*
* If a flush is not in progress anymore, chances are that the
* inode was taken off the AIL. So, just get out.
*/
if (completion_done(&ip->i_flush) ||
((iip->ili_item.li_flags & XFS_LI_IN_AIL) == 0)) {
- iip->ili_pushbuf_flag = 0;
xfs_iunlock(ip, XFS_ILOCK_SHARED);
return;
}
mp = ip->i_mount;
bp = xfs_incore(mp->m_ddev_targp, iip->ili_format.ilf_blkno,
- iip->ili_format.ilf_len, XFS_INCORE_TRYLOCK);
+ iip->ili_format.ilf_len, XBF_TRYLOCK);
- if (bp != NULL) {
- if (XFS_BUF_ISDELAYWRITE(bp)) {
- /*
- * We were racing with iflush because we don't hold
- * the AIL lock or the flush lock. However, at this point,
- * we have the buffer, and we know that it's dirty.
- * So, it's possible that iflush raced with us, and
- * this item is already taken off the AIL.
- * If not, we can flush it async.
- */
- dopush = ((iip->ili_item.li_flags & XFS_LI_IN_AIL) &&
- !completion_done(&ip->i_flush));
- iip->ili_pushbuf_flag = 0;
- xfs_iunlock(ip, XFS_ILOCK_SHARED);
-
- trace_xfs_inode_item_push(bp, _RET_IP_);
-
- if (XFS_BUF_ISPINNED(bp)) {
- xfs_log_force(mp, (xfs_lsn_t)0,
- XFS_LOG_FORCE);
- }
- if (dopush) {
- int error;
- error = xfs_bawrite(mp, bp);
- if (error)
- xfs_fs_cmn_err(CE_WARN, mp,
- "xfs_inode_item_pushbuf: pushbuf error %d on iip %p, bp %p",
- error, iip, bp);
- } else {
- xfs_buf_relse(bp);
- }
- } else {
- iip->ili_pushbuf_flag = 0;
- xfs_iunlock(ip, XFS_ILOCK_SHARED);
- xfs_buf_relse(bp);
- }
- return;
- }
- /*
- * We have to be careful about resetting pushbuf flag too early (above).
- * Even though in theory we can do it as soon as we have the buflock,
- * we don't want others to be doing work needlessly. They'll come to
- * this function thinking that pushing the buffer is their
- * responsibility only to find that the buffer is still locked by
- * another doing the same thing
- */
- iip->ili_pushbuf_flag = 0;
xfs_iunlock(ip, XFS_ILOCK_SHARED);
+ if (!bp)
+ return;
+ if (XFS_BUF_ISDELAYWRITE(bp))
+ xfs_buf_delwri_promote(bp);
+ xfs_buf_relse(bp);
return;
}
@@ -867,10 +799,14 @@ xfs_inode_item_push(
iip->ili_format.ilf_fields != 0);
/*
- * Write out the inode. The completion routine ('iflush_done') will
- * pull it from the AIL, mark it clean, unlock the flush lock.
+ * Push the inode to it's backing buffer. This will not remove the
+ * inode from the AIL - a further push will be required to trigger a
+ * buffer push. However, this allows all the dirty inodes to be pushed
+ * to the buffer before it is pushed to disk. THe buffer IO completion
+ * will pull th einode from the AIL, mark it clean and unlock the flush
+ * lock.
*/
- (void) xfs_iflush(ip, XFS_IFLUSH_ASYNC);
+ (void) xfs_iflush(ip, 0);
xfs_iunlock(ip, XFS_ILOCK_SHARED);
return;
@@ -934,7 +870,6 @@ xfs_inode_item_init(
/*
We have zeroed memory. No need ...
iip->ili_extents_buf = NULL;
- iip->ili_pushbuf_flag = 0;
*/
iip->ili_format.ilf_type = XFS_LI_INODE;
diff --git a/fs/xfs/xfs_inode_item.h b/fs/xfs/xfs_inode_item.h
index cc8df1ac778..9a467958ecd 100644
--- a/fs/xfs/xfs_inode_item.h
+++ b/fs/xfs/xfs_inode_item.h
@@ -144,12 +144,6 @@ typedef struct xfs_inode_log_item {
data exts */
struct xfs_bmbt_rec *ili_aextents_buf; /* array of logged
attr exts */
- unsigned int ili_pushbuf_flag; /* one bit used in push_ail */
-
-#ifdef DEBUG
- uint64_t ili_push_owner; /* one who sets pushbuf_flag
- above gets to push the buf */
-#endif
#ifdef XFS_TRANS_DEBUG
int ili_root_size;
char *ili_orig_root;
diff --git a/fs/xfs/xfs_itable.c b/fs/xfs/xfs_itable.c
index 62efab2f383..3af02314c60 100644
--- a/fs/xfs/xfs_itable.c
+++ b/fs/xfs/xfs_itable.c
@@ -408,8 +408,10 @@ xfs_bulkstat(
(XFS_INODE_CLUSTER_SIZE(mp) >> mp->m_sb.sb_inodelog);
nimask = ~(nicluster - 1);
nbcluster = nicluster >> mp->m_sb.sb_inopblog;
- irbuf = kmem_zalloc_greedy(&irbsize, PAGE_SIZE, PAGE_SIZE * 4,
- KM_SLEEP | KM_MAYFAIL | KM_LARGE);
+ irbuf = kmem_zalloc_greedy(&irbsize, PAGE_SIZE, PAGE_SIZE * 4);
+ if (!irbuf)
+ return ENOMEM;
+
nirbuf = irbsize / sizeof(*irbuf);
/*
@@ -420,9 +422,7 @@ xfs_bulkstat(
while (XFS_BULKSTAT_UBLEFT(ubleft) && agno < mp->m_sb.sb_agcount) {
cond_resched();
bp = NULL;
- down_read(&mp->m_peraglock);
error = xfs_ialloc_read_agi(mp, NULL, agno, &agbp);
- up_read(&mp->m_peraglock);
if (error) {
/*
* Skip this allocation group and go to the next one.
@@ -729,7 +729,7 @@ xfs_bulkstat(
/*
* Done, we're either out of filesystem or space to put the data.
*/
- kmem_free(irbuf);
+ kmem_free_large(irbuf);
*ubcountp = ubelem;
/*
* Found some inodes, return them now and return the error next time.
@@ -849,9 +849,7 @@ xfs_inumbers(
agbp = NULL;
while (left > 0 && agno < mp->m_sb.sb_agcount) {
if (agbp == NULL) {
- down_read(&mp->m_peraglock);
error = xfs_ialloc_read_agi(mp, NULL, agno, &agbp);
- up_read(&mp->m_peraglock);
if (error) {
/*
* If we can't read the AGI of this ag,
diff --git a/fs/xfs/xfs_log.c b/fs/xfs/xfs_log.c
index 600b5b06aae..4f16be4b6ee 100644
--- a/fs/xfs/xfs_log.c
+++ b/fs/xfs/xfs_log.c
@@ -50,7 +50,6 @@ kmem_zone_t *xfs_log_ticket_zone;
(off) += (bytes);}
/* Local miscellaneous function prototypes */
-STATIC int xlog_bdstrat_cb(struct xfs_buf *);
STATIC int xlog_commit_record(xfs_mount_t *mp, xlog_ticket_t *ticket,
xlog_in_core_t **, xfs_lsn_t *);
STATIC xlog_t * xlog_alloc_log(xfs_mount_t *mp,
@@ -80,11 +79,6 @@ STATIC int xlog_state_release_iclog(xlog_t *log,
STATIC void xlog_state_switch_iclogs(xlog_t *log,
xlog_in_core_t *iclog,
int eventual_size);
-STATIC int xlog_state_sync(xlog_t *log,
- xfs_lsn_t lsn,
- uint flags,
- int *log_flushed);
-STATIC int xlog_state_sync_all(xlog_t *log, uint flags, int *log_flushed);
STATIC void xlog_state_want_sync(xlog_t *log, xlog_in_core_t *iclog);
/* local functions to manipulate grant head */
@@ -297,65 +291,6 @@ xfs_log_done(xfs_mount_t *mp,
return lsn;
} /* xfs_log_done */
-
-/*
- * Force the in-core log to disk. If flags == XFS_LOG_SYNC,
- * the force is done synchronously.
- *
- * Asynchronous forces are implemented by setting the WANT_SYNC
- * bit in the appropriate in-core log and then returning.
- *
- * Synchronous forces are implemented with a signal variable. All callers
- * to force a given lsn to disk will wait on a the sv attached to the
- * specific in-core log. When given in-core log finally completes its
- * write to disk, that thread will wake up all threads waiting on the
- * sv.
- */
-int
-_xfs_log_force(
- xfs_mount_t *mp,
- xfs_lsn_t lsn,
- uint flags,
- int *log_flushed)
-{
- xlog_t *log = mp->m_log;
- int dummy;
-
- if (!log_flushed)
- log_flushed = &dummy;
-
- ASSERT(flags & XFS_LOG_FORCE);
-
- XFS_STATS_INC(xs_log_force);
-
- if (log->l_flags & XLOG_IO_ERROR)
- return XFS_ERROR(EIO);
- if (lsn == 0)
- return xlog_state_sync_all(log, flags, log_flushed);
- else
- return xlog_state_sync(log, lsn, flags, log_flushed);
-} /* _xfs_log_force */
-
-/*
- * Wrapper for _xfs_log_force(), to be used when caller doesn't care
- * about errors or whether the log was flushed or not. This is the normal
- * interface to use when trying to unpin items or move the log forward.
- */
-void
-xfs_log_force(
- xfs_mount_t *mp,
- xfs_lsn_t lsn,
- uint flags)
-{
- int error;
- error = _xfs_log_force(mp, lsn, flags, NULL);
- if (error) {
- xfs_fs_cmn_err(CE_WARN, mp, "xfs_log_force: "
- "error %d returned.", error);
- }
-}
-
-
/*
* Attaches a new iclog I/O completion callback routine during
* transaction commit. If the log is in error state, a non-zero
@@ -602,7 +537,7 @@ xfs_log_unmount_write(xfs_mount_t *mp)
if (mp->m_flags & XFS_MOUNT_RDONLY)
return 0;
- error = _xfs_log_force(mp, 0, XFS_LOG_FORCE|XFS_LOG_SYNC, NULL);
+ error = _xfs_log_force(mp, XFS_LOG_SYNC, NULL);
ASSERT(error || !(XLOG_FORCED_SHUTDOWN(log)));
#ifdef DEBUG
@@ -618,7 +553,7 @@ xfs_log_unmount_write(xfs_mount_t *mp)
if (! (XLOG_FORCED_SHUTDOWN(log))) {
reg[0].i_addr = (void*)&magic;
reg[0].i_len = sizeof(magic);
- XLOG_VEC_SET_TYPE(&reg[0], XLOG_REG_TYPE_UNMOUNT);
+ reg[0].i_type = XLOG_REG_TYPE_UNMOUNT;
error = xfs_log_reserve(mp, 600, 1, &tic,
XFS_LOG, 0, XLOG_UNMOUNT_REC_TYPE);
@@ -988,35 +923,6 @@ xlog_iodone(xfs_buf_t *bp)
} /* xlog_iodone */
/*
- * The bdstrat callback function for log bufs. This gives us a central
- * place to trap bufs in case we get hit by a log I/O error and need to
- * shutdown. Actually, in practice, even when we didn't get a log error,
- * we transition the iclogs to IOERROR state *after* flushing all existing
- * iclogs to disk. This is because we don't want anymore new transactions to be
- * started or completed afterwards.
- */
-STATIC int
-xlog_bdstrat_cb(struct xfs_buf *bp)
-{
- xlog_in_core_t *iclog;
-
- iclog = XFS_BUF_FSPRIVATE(bp, xlog_in_core_t *);
-
- if ((iclog->ic_state & XLOG_STATE_IOERROR) == 0) {
- /* note for irix bstrat will need struct bdevsw passed
- * Fix the following macro if the code ever is merged
- */
- XFS_bdstrat(bp);
- return 0;
- }
-
- XFS_BUF_ERROR(bp, EIO);
- XFS_BUF_STALE(bp);
- xfs_biodone(bp);
- return XFS_ERROR(EIO);
-}
-
-/*
* Return size of each in-core log record buffer.
*
* All machines get 8 x 32kB buffers by default, unless tuned otherwise.
@@ -1158,7 +1064,6 @@ xlog_alloc_log(xfs_mount_t *mp,
if (!bp)
goto out_free_log;
XFS_BUF_SET_IODONE_FUNC(bp, xlog_iodone);
- XFS_BUF_SET_BDSTRAT_FUNC(bp, xlog_bdstrat_cb);
XFS_BUF_SET_FSPRIVATE2(bp, (unsigned long)1);
ASSERT(XFS_BUF_ISBUSY(bp));
ASSERT(XFS_BUF_VALUSEMA(bp) <= 0);
@@ -1196,7 +1101,6 @@ xlog_alloc_log(xfs_mount_t *mp,
if (!XFS_BUF_CPSEMA(bp))
ASSERT(0);
XFS_BUF_SET_IODONE_FUNC(bp, xlog_iodone);
- XFS_BUF_SET_BDSTRAT_FUNC(bp, xlog_bdstrat_cb);
XFS_BUF_SET_FSPRIVATE2(bp, (unsigned long)1);
iclog->ic_bp = bp;
iclog->ic_data = bp->b_addr;
@@ -1268,7 +1172,7 @@ xlog_commit_record(xfs_mount_t *mp,
reg[0].i_addr = NULL;
reg[0].i_len = 0;
- XLOG_VEC_SET_TYPE(&reg[0], XLOG_REG_TYPE_COMMIT);
+ reg[0].i_type = XLOG_REG_TYPE_COMMIT;
ASSERT_ALWAYS(iclog);
if ((error = xlog_write(mp, reg, 1, ticket, commitlsnp,
@@ -1343,6 +1247,37 @@ xlog_grant_push_ail(xfs_mount_t *mp,
xfs_trans_ail_push(log->l_ailp, threshold_lsn);
} /* xlog_grant_push_ail */
+/*
+ * The bdstrat callback function for log bufs. This gives us a central
+ * place to trap bufs in case we get hit by a log I/O error and need to
+ * shutdown. Actually, in practice, even when we didn't get a log error,
+ * we transition the iclogs to IOERROR state *after* flushing all existing
+ * iclogs to disk. This is because we don't want anymore new transactions to be
+ * started or completed afterwards.
+ */
+STATIC int
+xlog_bdstrat(
+ struct xfs_buf *bp)
+{
+ struct xlog_in_core *iclog;
+
+ iclog = XFS_BUF_FSPRIVATE(bp, xlog_in_core_t *);
+ if (iclog->ic_state & XLOG_STATE_IOERROR) {
+ XFS_BUF_ERROR(bp, EIO);
+ XFS_BUF_STALE(bp);
+ xfs_biodone(bp);
+ /*
+ * It would seem logical to return EIO here, but we rely on
+ * the log state machine to propagate I/O errors instead of
+ * doing it here.
+ */
+ return 0;
+ }
+
+ bp->b_flags |= _XBF_RUN_QUEUES;
+ xfs_buf_iorequest(bp);
+ return 0;
+}
/*
* Flush out the in-core log (iclog) to the on-disk log in an asynchronous
@@ -1462,7 +1397,7 @@ xlog_sync(xlog_t *log,
*/
XFS_BUF_WRITE(bp);
- if ((error = XFS_bwrite(bp))) {
+ if ((error = xlog_bdstrat(bp))) {
xfs_ioerror_alert("xlog_sync", log->l_mp, bp,
XFS_BUF_ADDR(bp));
return error;
@@ -1502,7 +1437,7 @@ xlog_sync(xlog_t *log,
/* account for internal log which doesn't start at block #0 */
XFS_BUF_SET_ADDR(bp, XFS_BUF_ADDR(bp) + log->l_logBBstart);
XFS_BUF_WRITE(bp);
- if ((error = XFS_bwrite(bp))) {
+ if ((error = xlog_bdstrat(bp))) {
xfs_ioerror_alert("xlog_sync (split)", log->l_mp,
bp, XFS_BUF_ADDR(bp));
return error;
@@ -2854,7 +2789,6 @@ xlog_state_switch_iclogs(xlog_t *log,
log->l_iclog = iclog->ic_next;
} /* xlog_state_switch_iclogs */
-
/*
* Write out all data in the in-core log as of this exact moment in time.
*
@@ -2882,11 +2816,17 @@ xlog_state_switch_iclogs(xlog_t *log,
* b) when we return from flushing out this iclog, it is still
* not in the active nor dirty state.
*/
-STATIC int
-xlog_state_sync_all(xlog_t *log, uint flags, int *log_flushed)
+int
+_xfs_log_force(
+ struct xfs_mount *mp,
+ uint flags,
+ int *log_flushed)
{
- xlog_in_core_t *iclog;
- xfs_lsn_t lsn;
+ struct log *log = mp->m_log;
+ struct xlog_in_core *iclog;
+ xfs_lsn_t lsn;
+
+ XFS_STATS_INC(xs_log_force);
spin_lock(&log->l_icloglock);
@@ -2932,7 +2872,9 @@ xlog_state_sync_all(xlog_t *log, uint flags, int *log_flushed)
if (xlog_state_release_iclog(log, iclog))
return XFS_ERROR(EIO);
- *log_flushed = 1;
+
+ if (log_flushed)
+ *log_flushed = 1;
spin_lock(&log->l_icloglock);
if (be64_to_cpu(iclog->ic_header.h_lsn) == lsn &&
iclog->ic_state != XLOG_STATE_DIRTY)
@@ -2976,19 +2918,37 @@ maybe_sleep:
*/
if (iclog->ic_state & XLOG_STATE_IOERROR)
return XFS_ERROR(EIO);
- *log_flushed = 1;
-
+ if (log_flushed)
+ *log_flushed = 1;
} else {
no_sleep:
spin_unlock(&log->l_icloglock);
}
return 0;
-} /* xlog_state_sync_all */
+}
+/*
+ * Wrapper for _xfs_log_force(), to be used when caller doesn't care
+ * about errors or whether the log was flushed or not. This is the normal
+ * interface to use when trying to unpin items or move the log forward.
+ */
+void
+xfs_log_force(
+ xfs_mount_t *mp,
+ uint flags)
+{
+ int error;
+
+ error = _xfs_log_force(mp, flags, NULL);
+ if (error) {
+ xfs_fs_cmn_err(CE_WARN, mp, "xfs_log_force: "
+ "error %d returned.", error);
+ }
+}
/*
- * Used by code which implements synchronous log forces.
+ * Force the in-core log to disk for a specific LSN.
*
* Find in-core log with lsn.
* If it is in the DIRTY state, just return.
@@ -2996,109 +2956,142 @@ no_sleep:
* state and go to sleep or return.
* If it is in any other state, go to sleep or return.
*
- * If filesystem activity goes to zero, the iclog will get flushed only by
- * bdflush().
+ * Synchronous forces are implemented with a signal variable. All callers
+ * to force a given lsn to disk will wait on a the sv attached to the
+ * specific in-core log. When given in-core log finally completes its
+ * write to disk, that thread will wake up all threads waiting on the
+ * sv.
*/
-STATIC int
-xlog_state_sync(xlog_t *log,
- xfs_lsn_t lsn,
- uint flags,
- int *log_flushed)
+int
+_xfs_log_force_lsn(
+ struct xfs_mount *mp,
+ xfs_lsn_t lsn,
+ uint flags,
+ int *log_flushed)
{
- xlog_in_core_t *iclog;
- int already_slept = 0;
+ struct log *log = mp->m_log;
+ struct xlog_in_core *iclog;
+ int already_slept = 0;
-try_again:
- spin_lock(&log->l_icloglock);
- iclog = log->l_iclog;
+ ASSERT(lsn != 0);
- if (iclog->ic_state & XLOG_STATE_IOERROR) {
- spin_unlock(&log->l_icloglock);
- return XFS_ERROR(EIO);
- }
-
- do {
- if (be64_to_cpu(iclog->ic_header.h_lsn) != lsn) {
- iclog = iclog->ic_next;
- continue;
- }
+ XFS_STATS_INC(xs_log_force);
- if (iclog->ic_state == XLOG_STATE_DIRTY) {
+try_again:
+ spin_lock(&log->l_icloglock);
+ iclog = log->l_iclog;
+ if (iclog->ic_state & XLOG_STATE_IOERROR) {
spin_unlock(&log->l_icloglock);
- return 0;
+ return XFS_ERROR(EIO);
}
- if (iclog->ic_state == XLOG_STATE_ACTIVE) {
- /*
- * We sleep here if we haven't already slept (e.g.
- * this is the first time we've looked at the correct
- * iclog buf) and the buffer before us is going to
- * be sync'ed. The reason for this is that if we
- * are doing sync transactions here, by waiting for
- * the previous I/O to complete, we can allow a few
- * more transactions into this iclog before we close
- * it down.
- *
- * Otherwise, we mark the buffer WANT_SYNC, and bump
- * up the refcnt so we can release the log (which drops
- * the ref count). The state switch keeps new transaction
- * commits from using this buffer. When the current commits
- * finish writing into the buffer, the refcount will drop to
- * zero and the buffer will go out then.
- */
- if (!already_slept &&
- (iclog->ic_prev->ic_state & (XLOG_STATE_WANT_SYNC |
- XLOG_STATE_SYNCING))) {
- ASSERT(!(iclog->ic_state & XLOG_STATE_IOERROR));
- XFS_STATS_INC(xs_log_force_sleep);
- sv_wait(&iclog->ic_prev->ic_write_wait, PSWP,
- &log->l_icloglock, s);
- *log_flushed = 1;
- already_slept = 1;
- goto try_again;
- } else {
+ do {
+ if (be64_to_cpu(iclog->ic_header.h_lsn) != lsn) {
+ iclog = iclog->ic_next;
+ continue;
+ }
+
+ if (iclog->ic_state == XLOG_STATE_DIRTY) {
+ spin_unlock(&log->l_icloglock);
+ return 0;
+ }
+
+ if (iclog->ic_state == XLOG_STATE_ACTIVE) {
+ /*
+ * We sleep here if we haven't already slept (e.g.
+ * this is the first time we've looked at the correct
+ * iclog buf) and the buffer before us is going to
+ * be sync'ed. The reason for this is that if we
+ * are doing sync transactions here, by waiting for
+ * the previous I/O to complete, we can allow a few
+ * more transactions into this iclog before we close
+ * it down.
+ *
+ * Otherwise, we mark the buffer WANT_SYNC, and bump
+ * up the refcnt so we can release the log (which
+ * drops the ref count). The state switch keeps new
+ * transaction commits from using this buffer. When
+ * the current commits finish writing into the buffer,
+ * the refcount will drop to zero and the buffer will
+ * go out then.
+ */
+ if (!already_slept &&
+ (iclog->ic_prev->ic_state &
+ (XLOG_STATE_WANT_SYNC | XLOG_STATE_SYNCING))) {
+ ASSERT(!(iclog->ic_state & XLOG_STATE_IOERROR));
+
+ XFS_STATS_INC(xs_log_force_sleep);
+
+ sv_wait(&iclog->ic_prev->ic_write_wait,
+ PSWP, &log->l_icloglock, s);
+ if (log_flushed)
+ *log_flushed = 1;
+ already_slept = 1;
+ goto try_again;
+ }
atomic_inc(&iclog->ic_refcnt);
xlog_state_switch_iclogs(log, iclog, 0);
spin_unlock(&log->l_icloglock);
if (xlog_state_release_iclog(log, iclog))
return XFS_ERROR(EIO);
- *log_flushed = 1;
+ if (log_flushed)
+ *log_flushed = 1;
spin_lock(&log->l_icloglock);
}
- }
- if ((flags & XFS_LOG_SYNC) && /* sleep */
- !(iclog->ic_state & (XLOG_STATE_ACTIVE | XLOG_STATE_DIRTY))) {
+ if ((flags & XFS_LOG_SYNC) && /* sleep */
+ !(iclog->ic_state &
+ (XLOG_STATE_ACTIVE | XLOG_STATE_DIRTY))) {
+ /*
+ * Don't wait on completion if we know that we've
+ * gotten a log write error.
+ */
+ if (iclog->ic_state & XLOG_STATE_IOERROR) {
+ spin_unlock(&log->l_icloglock);
+ return XFS_ERROR(EIO);
+ }
+ XFS_STATS_INC(xs_log_force_sleep);
+ sv_wait(&iclog->ic_force_wait, PSWP, &log->l_icloglock, s);
+ /*
+ * No need to grab the log lock here since we're
+ * only deciding whether or not to return EIO
+ * and the memory read should be atomic.
+ */
+ if (iclog->ic_state & XLOG_STATE_IOERROR)
+ return XFS_ERROR(EIO);
- /*
- * Don't wait on completion if we know that we've
- * gotten a log write error.
- */
- if (iclog->ic_state & XLOG_STATE_IOERROR) {
+ if (log_flushed)
+ *log_flushed = 1;
+ } else { /* just return */
spin_unlock(&log->l_icloglock);
- return XFS_ERROR(EIO);
}
- XFS_STATS_INC(xs_log_force_sleep);
- sv_wait(&iclog->ic_force_wait, PSWP, &log->l_icloglock, s);
- /*
- * No need to grab the log lock here since we're
- * only deciding whether or not to return EIO
- * and the memory read should be atomic.
- */
- if (iclog->ic_state & XLOG_STATE_IOERROR)
- return XFS_ERROR(EIO);
- *log_flushed = 1;
- } else { /* just return */
- spin_unlock(&log->l_icloglock);
- }
- return 0;
- } while (iclog != log->l_iclog);
+ return 0;
+ } while (iclog != log->l_iclog);
- spin_unlock(&log->l_icloglock);
- return 0;
-} /* xlog_state_sync */
+ spin_unlock(&log->l_icloglock);
+ return 0;
+}
+/*
+ * Wrapper for _xfs_log_force_lsn(), to be used when caller doesn't care
+ * about errors or whether the log was flushed or not. This is the normal
+ * interface to use when trying to unpin items or move the log forward.
+ */
+void
+xfs_log_force_lsn(
+ xfs_mount_t *mp,
+ xfs_lsn_t lsn,
+ uint flags)
+{
+ int error;
+
+ error = _xfs_log_force_lsn(mp, lsn, flags, NULL);
+ if (error) {
+ xfs_fs_cmn_err(CE_WARN, mp, "xfs_log_force: "
+ "error %d returned.", error);
+ }
+}
/*
* Called when we want to mark the current iclog as being ready to sync to
@@ -3463,7 +3456,6 @@ xfs_log_force_umount(
xlog_ticket_t *tic;
xlog_t *log;
int retval;
- int dummy;
log = mp->m_log;
@@ -3537,13 +3529,14 @@ xfs_log_force_umount(
}
spin_unlock(&log->l_grant_lock);
- if (! (log->l_iclog->ic_state & XLOG_STATE_IOERROR)) {
+ if (!(log->l_iclog->ic_state & XLOG_STATE_IOERROR)) {
ASSERT(!logerror);
/*
* Force the incore logs to disk before shutting the
* log down completely.
*/
- xlog_state_sync_all(log, XFS_LOG_FORCE|XFS_LOG_SYNC, &dummy);
+ _xfs_log_force(mp, XFS_LOG_SYNC, NULL);
+
spin_lock(&log->l_icloglock);
retval = xlog_state_ioerror(log);
spin_unlock(&log->l_icloglock);
diff --git a/fs/xfs/xfs_log.h b/fs/xfs/xfs_log.h
index d0c9baa50b1..7074be9d13e 100644
--- a/fs/xfs/xfs_log.h
+++ b/fs/xfs/xfs_log.h
@@ -70,14 +70,8 @@ static inline xfs_lsn_t _lsn_cmp(xfs_lsn_t lsn1, xfs_lsn_t lsn2)
* Flags to xfs_log_force()
*
* XFS_LOG_SYNC: Synchronous force in-core log to disk
- * XFS_LOG_FORCE: Start in-core log write now.
- * XFS_LOG_URGE: Start write within some window of time.
- *
- * Note: Either XFS_LOG_FORCE or XFS_LOG_URGE must be set.
*/
#define XFS_LOG_SYNC 0x1
-#define XFS_LOG_FORCE 0x2
-#define XFS_LOG_URGE 0x4
#endif /* __KERNEL__ */
@@ -110,10 +104,8 @@ static inline xfs_lsn_t _lsn_cmp(xfs_lsn_t lsn1, xfs_lsn_t lsn2)
#define XLOG_REG_TYPE_TRANSHDR 19
#define XLOG_REG_TYPE_MAX 19
-#define XLOG_VEC_SET_TYPE(vecp, t) ((vecp)->i_type = (t))
-
typedef struct xfs_log_iovec {
- xfs_caddr_t i_addr; /* beginning address of region */
+ xfs_caddr_t i_addr; /* beginning address of region */
int i_len; /* length in bytes of region */
uint i_type; /* type of region */
} xfs_log_iovec_t;
@@ -140,12 +132,17 @@ xfs_lsn_t xfs_log_done(struct xfs_mount *mp,
void **iclog,
uint flags);
int _xfs_log_force(struct xfs_mount *mp,
- xfs_lsn_t lsn,
uint flags,
int *log_forced);
void xfs_log_force(struct xfs_mount *mp,
- xfs_lsn_t lsn,
uint flags);
+int _xfs_log_force_lsn(struct xfs_mount *mp,
+ xfs_lsn_t lsn,
+ uint flags,
+ int *log_forced);
+void xfs_log_force_lsn(struct xfs_mount *mp,
+ xfs_lsn_t lsn,
+ uint flags);
int xfs_log_mount(struct xfs_mount *mp,
struct xfs_buftarg *log_target,
xfs_daddr_t start_block,
diff --git a/fs/xfs/xfs_log_priv.h b/fs/xfs/xfs_log_priv.h
index d55662db707..fd02a18facd 100644
--- a/fs/xfs/xfs_log_priv.h
+++ b/fs/xfs/xfs_log_priv.h
@@ -443,14 +443,9 @@ typedef struct log {
/* common routines */
extern xfs_lsn_t xlog_assign_tail_lsn(struct xfs_mount *mp);
-extern int xlog_find_tail(xlog_t *log,
- xfs_daddr_t *head_blk,
- xfs_daddr_t *tail_blk);
extern int xlog_recover(xlog_t *log);
extern int xlog_recover_finish(xlog_t *log);
extern void xlog_pack_data(xlog_t *log, xlog_in_core_t *iclog, int);
-extern struct xfs_buf *xlog_get_bp(xlog_t *, int);
-extern void xlog_put_bp(struct xfs_buf *);
extern kmem_zone_t *xfs_log_ticket_zone;
diff --git a/fs/xfs/xfs_log_recover.c b/fs/xfs/xfs_log_recover.c
index 69ac2e5ef20..22e6efdc17e 100644
--- a/fs/xfs/xfs_log_recover.c
+++ b/fs/xfs/xfs_log_recover.c
@@ -50,8 +50,6 @@
STATIC int xlog_find_zeroed(xlog_t *, xfs_daddr_t *);
STATIC int xlog_clear_stale_blocks(xlog_t *, xfs_lsn_t);
-STATIC void xlog_recover_insert_item_backq(xlog_recover_item_t **q,
- xlog_recover_item_t *item);
#if defined(DEBUG)
STATIC void xlog_recover_check_summary(xlog_t *);
#else
@@ -68,7 +66,7 @@ STATIC void xlog_recover_check_summary(xlog_t *);
((bbs + (log)->l_sectbb_mask + 1) & ~(log)->l_sectbb_mask) : (bbs) )
#define XLOG_SECTOR_ROUNDDOWN_BLKNO(log, bno) ((bno) & ~(log)->l_sectbb_mask)
-xfs_buf_t *
+STATIC xfs_buf_t *
xlog_get_bp(
xlog_t *log,
int nbblks)
@@ -88,7 +86,7 @@ xlog_get_bp(
return xfs_buf_get_noaddr(BBTOB(nbblks), log->l_mp->m_logdev_targp);
}
-void
+STATIC void
xlog_put_bp(
xfs_buf_t *bp)
{
@@ -805,7 +803,7 @@ xlog_find_head(
* We could speed up search by using current head_blk buffer, but it is not
* available.
*/
-int
+STATIC int
xlog_find_tail(
xlog_t *log,
xfs_daddr_t *head_blk,
@@ -1367,36 +1365,45 @@ xlog_clear_stale_blocks(
STATIC xlog_recover_t *
xlog_recover_find_tid(
- xlog_recover_t *q,
+ struct hlist_head *head,
xlog_tid_t tid)
{
- xlog_recover_t *p = q;
+ xlog_recover_t *trans;
+ struct hlist_node *n;
- while (p != NULL) {
- if (p->r_log_tid == tid)
- break;
- p = p->r_next;
+ hlist_for_each_entry(trans, n, head, r_list) {
+ if (trans->r_log_tid == tid)
+ return trans;
}
- return p;
+ return NULL;
}
STATIC void
-xlog_recover_put_hashq(
- xlog_recover_t **q,
- xlog_recover_t *trans)
+xlog_recover_new_tid(
+ struct hlist_head *head,
+ xlog_tid_t tid,
+ xfs_lsn_t lsn)
{
- trans->r_next = *q;
- *q = trans;
+ xlog_recover_t *trans;
+
+ trans = kmem_zalloc(sizeof(xlog_recover_t), KM_SLEEP);
+ trans->r_log_tid = tid;
+ trans->r_lsn = lsn;
+ INIT_LIST_HEAD(&trans->r_itemq);
+
+ INIT_HLIST_NODE(&trans->r_list);
+ hlist_add_head(&trans->r_list, head);
}
STATIC void
xlog_recover_add_item(
- xlog_recover_item_t **itemq)
+ struct list_head *head)
{
xlog_recover_item_t *item;
item = kmem_zalloc(sizeof(xlog_recover_item_t), KM_SLEEP);
- xlog_recover_insert_item_backq(itemq, item);
+ INIT_LIST_HEAD(&item->ri_list);
+ list_add_tail(&item->ri_list, head);
}
STATIC int
@@ -1409,8 +1416,7 @@ xlog_recover_add_to_cont_trans(
xfs_caddr_t ptr, old_ptr;
int old_len;
- item = trans->r_itemq;
- if (item == NULL) {
+ if (list_empty(&trans->r_itemq)) {
/* finish copying rest of trans header */
xlog_recover_add_item(&trans->r_itemq);
ptr = (xfs_caddr_t) &trans->r_theader +
@@ -1418,7 +1424,8 @@ xlog_recover_add_to_cont_trans(
memcpy(ptr, dp, len); /* d, s, l */
return 0;
}
- item = item->ri_prev;
+ /* take the tail entry */
+ item = list_entry(trans->r_itemq.prev, xlog_recover_item_t, ri_list);
old_ptr = item->ri_buf[item->ri_cnt-1].i_addr;
old_len = item->ri_buf[item->ri_cnt-1].i_len;
@@ -1455,8 +1462,7 @@ xlog_recover_add_to_trans(
if (!len)
return 0;
- item = trans->r_itemq;
- if (item == NULL) {
+ if (list_empty(&trans->r_itemq)) {
/* we need to catch log corruptions here */
if (*(uint *)dp != XFS_TRANS_HEADER_MAGIC) {
xlog_warn("XFS: xlog_recover_add_to_trans: "
@@ -1474,12 +1480,15 @@ xlog_recover_add_to_trans(
memcpy(ptr, dp, len);
in_f = (xfs_inode_log_format_t *)ptr;
- if (item->ri_prev->ri_total != 0 &&
- item->ri_prev->ri_total == item->ri_prev->ri_cnt) {
+ /* take the tail entry */
+ item = list_entry(trans->r_itemq.prev, xlog_recover_item_t, ri_list);
+ if (item->ri_total != 0 &&
+ item->ri_total == item->ri_cnt) {
+ /* tail item is in use, get a new one */
xlog_recover_add_item(&trans->r_itemq);
+ item = list_entry(trans->r_itemq.prev,
+ xlog_recover_item_t, ri_list);
}
- item = trans->r_itemq;
- item = item->ri_prev;
if (item->ri_total == 0) { /* first region to be added */
if (in_f->ilf_size == 0 ||
@@ -1504,96 +1513,29 @@ xlog_recover_add_to_trans(
return 0;
}
-STATIC void
-xlog_recover_new_tid(
- xlog_recover_t **q,
- xlog_tid_t tid,
- xfs_lsn_t lsn)
-{
- xlog_recover_t *trans;
-
- trans = kmem_zalloc(sizeof(xlog_recover_t), KM_SLEEP);
- trans->r_log_tid = tid;
- trans->r_lsn = lsn;
- xlog_recover_put_hashq(q, trans);
-}
-
-STATIC int
-xlog_recover_unlink_tid(
- xlog_recover_t **q,
- xlog_recover_t *trans)
-{
- xlog_recover_t *tp;
- int found = 0;
-
- ASSERT(trans != NULL);
- if (trans == *q) {
- *q = (*q)->r_next;
- } else {
- tp = *q;
- while (tp) {
- if (tp->r_next == trans) {
- found = 1;
- break;
- }
- tp = tp->r_next;
- }
- if (!found) {
- xlog_warn(
- "XFS: xlog_recover_unlink_tid: trans not found");
- ASSERT(0);
- return XFS_ERROR(EIO);
- }
- tp->r_next = tp->r_next->r_next;
- }
- return 0;
-}
-
-STATIC void
-xlog_recover_insert_item_backq(
- xlog_recover_item_t **q,
- xlog_recover_item_t *item)
-{
- if (*q == NULL) {
- item->ri_prev = item->ri_next = item;
- *q = item;
- } else {
- item->ri_next = *q;
- item->ri_prev = (*q)->ri_prev;
- (*q)->ri_prev = item;
- item->ri_prev->ri_next = item;
- }
-}
-
-STATIC void
-xlog_recover_insert_item_frontq(
- xlog_recover_item_t **q,
- xlog_recover_item_t *item)
-{
- xlog_recover_insert_item_backq(q, item);
- *q = item;
-}
-
+/*
+ * Sort the log items in the transaction. Cancelled buffers need
+ * to be put first so they are processed before any items that might
+ * modify the buffers. If they are cancelled, then the modifications
+ * don't need to be replayed.
+ */
STATIC int
xlog_recover_reorder_trans(
xlog_recover_t *trans)
{
- xlog_recover_item_t *first_item, *itemq, *itemq_next;
- xfs_buf_log_format_t *buf_f;
- ushort flags = 0;
+ xlog_recover_item_t *item, *n;
+ LIST_HEAD(sort_list);
- first_item = itemq = trans->r_itemq;
- trans->r_itemq = NULL;
- do {
- itemq_next = itemq->ri_next;
- buf_f = (xfs_buf_log_format_t *)itemq->ri_buf[0].i_addr;
+ list_splice_init(&trans->r_itemq, &sort_list);
+ list_for_each_entry_safe(item, n, &sort_list, ri_list) {
+ xfs_buf_log_format_t *buf_f;
- switch (ITEM_TYPE(itemq)) {
+ buf_f = (xfs_buf_log_format_t *)item->ri_buf[0].i_addr;
+
+ switch (ITEM_TYPE(item)) {
case XFS_LI_BUF:
- flags = buf_f->blf_flags;
- if (!(flags & XFS_BLI_CANCEL)) {
- xlog_recover_insert_item_frontq(&trans->r_itemq,
- itemq);
+ if (!(buf_f->blf_flags & XFS_BLI_CANCEL)) {
+ list_move(&item->ri_list, &trans->r_itemq);
break;
}
case XFS_LI_INODE:
@@ -1601,7 +1543,7 @@ xlog_recover_reorder_trans(
case XFS_LI_QUOTAOFF:
case XFS_LI_EFD:
case XFS_LI_EFI:
- xlog_recover_insert_item_backq(&trans->r_itemq, itemq);
+ list_move_tail(&item->ri_list, &trans->r_itemq);
break;
default:
xlog_warn(
@@ -1609,8 +1551,8 @@ xlog_recover_reorder_trans(
ASSERT(0);
return XFS_ERROR(EIO);
}
- itemq = itemq_next;
- } while (first_item != itemq);
+ }
+ ASSERT(list_empty(&sort_list));
return 0;
}
@@ -2242,9 +2184,9 @@ xlog_recover_do_buffer_trans(
}
mp = log->l_mp;
- buf_flags = XFS_BUF_LOCK;
+ buf_flags = XBF_LOCK;
if (!(flags & XFS_BLI_INODE_BUF))
- buf_flags |= XFS_BUF_MAPPED;
+ buf_flags |= XBF_MAPPED;
bp = xfs_buf_read(mp->m_ddev_targp, blkno, len, buf_flags);
if (XFS_BUF_ISERROR(bp)) {
@@ -2346,7 +2288,7 @@ xlog_recover_do_inode_trans(
}
bp = xfs_buf_read(mp->m_ddev_targp, in_f->ilf_blkno, in_f->ilf_len,
- XFS_BUF_LOCK);
+ XBF_LOCK);
if (XFS_BUF_ISERROR(bp)) {
xfs_ioerror_alert("xlog_recover_do..(read#2)", mp,
bp, in_f->ilf_blkno);
@@ -2814,14 +2756,13 @@ xlog_recover_do_trans(
int pass)
{
int error = 0;
- xlog_recover_item_t *item, *first_item;
+ xlog_recover_item_t *item;
error = xlog_recover_reorder_trans(trans);
if (error)
return error;
- first_item = item = trans->r_itemq;
- do {
+ list_for_each_entry(item, &trans->r_itemq, ri_list) {
switch (ITEM_TYPE(item)) {
case XFS_LI_BUF:
error = xlog_recover_do_buffer_trans(log, item, pass);
@@ -2854,8 +2795,7 @@ xlog_recover_do_trans(
if (error)
return error;
- item = item->ri_next;
- } while (first_item != item);
+ }
return 0;
}
@@ -2869,21 +2809,18 @@ STATIC void
xlog_recover_free_trans(
xlog_recover_t *trans)
{
- xlog_recover_item_t *first_item, *item, *free_item;
+ xlog_recover_item_t *item, *n;
int i;
- item = first_item = trans->r_itemq;
- do {
- free_item = item;
- item = item->ri_next;
- /* Free the regions in the item. */
- for (i = 0; i < free_item->ri_cnt; i++) {
- kmem_free(free_item->ri_buf[i].i_addr);
- }
+ list_for_each_entry_safe(item, n, &trans->r_itemq, ri_list) {
+ /* Free the regions in the item. */
+ list_del(&item->ri_list);
+ for (i = 0; i < item->ri_cnt; i++)
+ kmem_free(item->ri_buf[i].i_addr);
/* Free the item itself */
- kmem_free(free_item->ri_buf);
- kmem_free(free_item);
- } while (first_item != item);
+ kmem_free(item->ri_buf);
+ kmem_free(item);
+ }
/* Free the transaction recover structure */
kmem_free(trans);
}
@@ -2891,14 +2828,12 @@ xlog_recover_free_trans(
STATIC int
xlog_recover_commit_trans(
xlog_t *log,
- xlog_recover_t **q,
xlog_recover_t *trans,
int pass)
{
int error;
- if ((error = xlog_recover_unlink_tid(q, trans)))
- return error;
+ hlist_del(&trans->r_list);
if ((error = xlog_recover_do_trans(log, trans, pass)))
return error;
xlog_recover_free_trans(trans); /* no error */
@@ -2926,7 +2861,7 @@ xlog_recover_unmount_trans(
STATIC int
xlog_recover_process_data(
xlog_t *log,
- xlog_recover_t *rhash[],
+ struct hlist_head rhash[],
xlog_rec_header_t *rhead,
xfs_caddr_t dp,
int pass)
@@ -2960,7 +2895,7 @@ xlog_recover_process_data(
}
tid = be32_to_cpu(ohead->oh_tid);
hash = XLOG_RHASH(tid);
- trans = xlog_recover_find_tid(rhash[hash], tid);
+ trans = xlog_recover_find_tid(&rhash[hash], tid);
if (trans == NULL) { /* not found; add new tid */
if (ohead->oh_flags & XLOG_START_TRANS)
xlog_recover_new_tid(&rhash[hash], tid,
@@ -2978,7 +2913,7 @@ xlog_recover_process_data(
switch (flags) {
case XLOG_COMMIT_TRANS:
error = xlog_recover_commit_trans(log,
- &rhash[hash], trans, pass);
+ trans, pass);
break;
case XLOG_UNMOUNT_TRANS:
error = xlog_recover_unmount_trans(trans);
@@ -3211,7 +3146,7 @@ xlog_recover_process_one_iunlink(
/*
* Get the on disk inode to find the next inode in the bucket.
*/
- error = xfs_itobp(mp, NULL, ip, &dip, &ibp, XFS_BUF_LOCK);
+ error = xfs_itobp(mp, NULL, ip, &dip, &ibp, XBF_LOCK);
if (error)
goto fail_iput;
@@ -3517,7 +3452,7 @@ xlog_do_recovery_pass(
int error = 0, h_size;
int bblks, split_bblks;
int hblks, split_hblks, wrapped_hblks;
- xlog_recover_t *rhash[XLOG_RHASH_SIZE];
+ struct hlist_head rhash[XLOG_RHASH_SIZE];
ASSERT(head_blk != tail_blk);
@@ -3978,8 +3913,7 @@ xlog_recover_finish(
* case the unlink transactions would have problems
* pushing the EFIs out of the way.
*/
- xfs_log_force(log->l_mp, (xfs_lsn_t)0,
- (XFS_LOG_FORCE | XFS_LOG_SYNC));
+ xfs_log_force(log->l_mp, XFS_LOG_SYNC);
xlog_recover_process_iunlinks(log);
diff --git a/fs/xfs/xfs_log_recover.h b/fs/xfs/xfs_log_recover.h
index b2254555530..75d74920725 100644
--- a/fs/xfs/xfs_log_recover.h
+++ b/fs/xfs/xfs_log_recover.h
@@ -35,22 +35,21 @@
* item headers are in ri_buf[0]. Additional buffers follow.
*/
typedef struct xlog_recover_item {
- struct xlog_recover_item *ri_next;
- struct xlog_recover_item *ri_prev;
- int ri_type;
- int ri_cnt; /* count of regions found */
- int ri_total; /* total regions */
- xfs_log_iovec_t *ri_buf; /* ptr to regions buffer */
+ struct list_head ri_list;
+ int ri_type;
+ int ri_cnt; /* count of regions found */
+ int ri_total; /* total regions */
+ xfs_log_iovec_t *ri_buf; /* ptr to regions buffer */
} xlog_recover_item_t;
struct xlog_tid;
typedef struct xlog_recover {
- struct xlog_recover *r_next;
- xlog_tid_t r_log_tid; /* log's transaction id */
- xfs_trans_header_t r_theader; /* trans header for partial */
- int r_state; /* not needed */
- xfs_lsn_t r_lsn; /* xact lsn */
- xlog_recover_item_t *r_itemq; /* q for items */
+ struct hlist_node r_list;
+ xlog_tid_t r_log_tid; /* log's transaction id */
+ xfs_trans_header_t r_theader; /* trans header for partial */
+ int r_state; /* not needed */
+ xfs_lsn_t r_lsn; /* xact lsn */
+ struct list_head r_itemq; /* q for items */
} xlog_recover_t;
#define ITEM_TYPE(i) (*(ushort *)(i)->ri_buf[0].i_addr)
diff --git a/fs/xfs/xfs_mount.c b/fs/xfs/xfs_mount.c
index eb403b40e12..6afaaeb2950 100644
--- a/fs/xfs/xfs_mount.c
+++ b/fs/xfs/xfs_mount.c
@@ -201,6 +201,38 @@ xfs_uuid_unmount(
/*
+ * Reference counting access wrappers to the perag structures.
+ */
+struct xfs_perag *
+xfs_perag_get(struct xfs_mount *mp, xfs_agnumber_t agno)
+{
+ struct xfs_perag *pag;
+ int ref = 0;
+
+ spin_lock(&mp->m_perag_lock);
+ pag = radix_tree_lookup(&mp->m_perag_tree, agno);
+ if (pag) {
+ ASSERT(atomic_read(&pag->pag_ref) >= 0);
+ /* catch leaks in the positive direction during testing */
+ ASSERT(atomic_read(&pag->pag_ref) < 1000);
+ ref = atomic_inc_return(&pag->pag_ref);
+ }
+ spin_unlock(&mp->m_perag_lock);
+ trace_xfs_perag_get(mp, agno, ref, _RET_IP_);
+ return pag;
+}
+
+void
+xfs_perag_put(struct xfs_perag *pag)
+{
+ int ref;
+
+ ASSERT(atomic_read(&pag->pag_ref) > 0);
+ ref = atomic_dec_return(&pag->pag_ref);
+ trace_xfs_perag_put(pag->pag_mount, pag->pag_agno, ref, _RET_IP_);
+}
+
+/*
* Free up the resources associated with a mount structure. Assume that
* the structure was initially zeroed, so we can tell which fields got
* initialized.
@@ -209,13 +241,16 @@ STATIC void
xfs_free_perag(
xfs_mount_t *mp)
{
- if (mp->m_perag) {
- int agno;
+ xfs_agnumber_t agno;
+ struct xfs_perag *pag;
- for (agno = 0; agno < mp->m_maxagi; agno++)
- if (mp->m_perag[agno].pagb_list)
- kmem_free(mp->m_perag[agno].pagb_list);
- kmem_free(mp->m_perag);
+ for (agno = 0; agno < mp->m_sb.sb_agcount; agno++) {
+ spin_lock(&mp->m_perag_lock);
+ pag = radix_tree_delete(&mp->m_perag_tree, agno);
+ ASSERT(pag);
+ ASSERT(atomic_read(&pag->pag_ref) == 0);
+ spin_unlock(&mp->m_perag_lock);
+ kmem_free(pag);
}
}
@@ -389,22 +424,57 @@ xfs_initialize_perag_icache(
}
}
-xfs_agnumber_t
+int
xfs_initialize_perag(
xfs_mount_t *mp,
- xfs_agnumber_t agcount)
+ xfs_agnumber_t agcount,
+ xfs_agnumber_t *maxagi)
{
xfs_agnumber_t index, max_metadata;
+ xfs_agnumber_t first_initialised = 0;
xfs_perag_t *pag;
xfs_agino_t agino;
xfs_ino_t ino;
xfs_sb_t *sbp = &mp->m_sb;
xfs_ino_t max_inum = XFS_MAXINUMBER_32;
+ int error = -ENOMEM;
/* Check to see if the filesystem can overflow 32 bit inodes */
agino = XFS_OFFBNO_TO_AGINO(mp, sbp->sb_agblocks - 1, 0);
ino = XFS_AGINO_TO_INO(mp, agcount - 1, agino);
+ /*
+ * Walk the current per-ag tree so we don't try to initialise AGs
+ * that already exist (growfs case). Allocate and insert all the
+ * AGs we don't find ready for initialisation.
+ */
+ for (index = 0; index < agcount; index++) {
+ pag = xfs_perag_get(mp, index);
+ if (pag) {
+ xfs_perag_put(pag);
+ continue;
+ }
+ if (!first_initialised)
+ first_initialised = index;
+ pag = kmem_zalloc(sizeof(*pag), KM_MAYFAIL);
+ if (!pag)
+ goto out_unwind;
+ if (radix_tree_preload(GFP_NOFS))
+ goto out_unwind;
+ spin_lock(&mp->m_perag_lock);
+ if (radix_tree_insert(&mp->m_perag_tree, index, pag)) {
+ BUG();
+ spin_unlock(&mp->m_perag_lock);
+ radix_tree_preload_end();
+ error = -EEXIST;
+ goto out_unwind;
+ }
+ pag->pag_agno = index;
+ pag->pag_mount = mp;
+ spin_unlock(&mp->m_perag_lock);
+ radix_tree_preload_end();
+ }
+
/* Clear the mount flag if no inode can overflow 32 bits
* on this filesystem, or if specifically requested..
*/
@@ -438,21 +508,33 @@ xfs_initialize_perag(
}
/* This ag is preferred for inodes */
- pag = &mp->m_perag[index];
+ pag = xfs_perag_get(mp, index);
pag->pagi_inodeok = 1;
if (index < max_metadata)
pag->pagf_metadata = 1;
xfs_initialize_perag_icache(pag);
+ xfs_perag_put(pag);
}
} else {
/* Setup default behavior for smaller filesystems */
for (index = 0; index < agcount; index++) {
- pag = &mp->m_perag[index];
+ pag = xfs_perag_get(mp, index);
pag->pagi_inodeok = 1;
xfs_initialize_perag_icache(pag);
+ xfs_perag_put(pag);
}
}
- return index;
+ if (maxagi)
+ *maxagi = index;
+ return 0;
+
+out_unwind:
+ kmem_free(pag);
+ for (; index > first_initialised; index--) {
+ pag = radix_tree_delete(&mp->m_perag_tree, index);
+ kmem_free(pag);
+ }
+ return error;
}
void
@@ -583,7 +665,7 @@ xfs_readsb(xfs_mount_t *mp, int flags)
* access to the superblock.
*/
sector_size = xfs_getsize_buftarg(mp->m_ddev_targp);
- extra_flags = XFS_BUF_LOCK | XFS_BUF_MANAGE | XFS_BUF_MAPPED;
+ extra_flags = XBF_LOCK | XBF_FS_MANAGED | XBF_MAPPED;
bp = xfs_buf_read(mp->m_ddev_targp, XFS_SB_DADDR, BTOBB(sector_size),
extra_flags);
@@ -731,12 +813,13 @@ xfs_initialize_perag_data(xfs_mount_t *mp, xfs_agnumber_t agcount)
error = xfs_ialloc_pagi_init(mp, NULL, index);
if (error)
return error;
- pag = &mp->m_perag[index];
+ pag = xfs_perag_get(mp, index);
ifree += pag->pagi_freecount;
ialloc += pag->pagi_count;
bfree += pag->pagf_freeblks;
bfreelst += pag->pagf_flcount;
btree += pag->pagf_btreeblks;
+ xfs_perag_put(pag);
}
/*
* Overwrite incore superblock counters with just-read data
@@ -1008,6 +1091,22 @@ xfs_mount_reset_sbqflags(
return xfs_trans_commit(tp, 0);
}
+__uint64_t
+xfs_default_resblks(xfs_mount_t *mp)
+{
+ __uint64_t resblks;
+
+ /*
+ * We default to 5% or 1024 fsbs of space reserved, whichever is smaller.
+ * This may drive us straight to ENOSPC on mount, but that implies
+ * we were already there on the last unmount. Warn if this occurs.
+ */
+ resblks = mp->m_sb.sb_dblocks;
+ do_div(resblks, 20);
+ resblks = min_t(__uint64_t, resblks, 1024);
+ return resblks;
+}
+
/*
* This function does the following on an initial mount of a file system:
* - reads the superblock from disk and init the mount struct
@@ -1152,13 +1251,13 @@ xfs_mountfs(
/*
* Allocate and initialize the per-ag data.
*/
- init_rwsem(&mp->m_peraglock);
- mp->m_perag = kmem_zalloc(sbp->sb_agcount * sizeof(xfs_perag_t),
- KM_MAYFAIL);
- if (!mp->m_perag)
+ spin_lock_init(&mp->m_perag_lock);
+ INIT_RADIX_TREE(&mp->m_perag_tree, GFP_NOFS);
+ error = xfs_initialize_perag(mp, sbp->sb_agcount, &mp->m_maxagi);
+ if (error) {
+ cmn_err(CE_WARN, "XFS: Failed per-ag init: %d", error);
goto out_remove_uuid;
-
- mp->m_maxagi = xfs_initialize_perag(mp, sbp->sb_agcount);
+ }
if (!sbp->sb_logblocks) {
cmn_err(CE_WARN, "XFS: no log defined");
@@ -1318,18 +1417,14 @@ xfs_mountfs(
* when at ENOSPC. This is needed for operations like create with
* attr, unwritten extent conversion at ENOSPC, etc. Data allocations
* are not allowed to use this reserved space.
- *
- * We default to 5% or 1024 fsbs of space reserved, whichever is smaller.
- * This may drive us straight to ENOSPC on mount, but that implies
- * we were already there on the last unmount. Warn if this occurs.
*/
- resblks = mp->m_sb.sb_dblocks;
- do_div(resblks, 20);
- resblks = min_t(__uint64_t, resblks, 1024);
- error = xfs_reserve_blocks(mp, &resblks, NULL);
- if (error)
- cmn_err(CE_WARN, "XFS: Unable to allocate reserve blocks. "
- "Continuing without a reserve pool.");
+ if (!(mp->m_flags & XFS_MOUNT_RDONLY)) {
+ resblks = xfs_default_resblks(mp);
+ error = xfs_reserve_blocks(mp, &resblks, NULL);
+ if (error)
+ cmn_err(CE_WARN, "XFS: Unable to allocate reserve "
+ "blocks. Continuing without a reserve pool.");
+ }
return 0;
@@ -1372,8 +1467,19 @@ xfs_unmountfs(
* push out the iclog we will never get that unlocked. hence we
* need to force the log first.
*/
- xfs_log_force(mp, (xfs_lsn_t)0, XFS_LOG_FORCE | XFS_LOG_SYNC);
- xfs_reclaim_inodes(mp, XFS_IFLUSH_ASYNC);
+ xfs_log_force(mp, XFS_LOG_SYNC);
+
+ /*
+ * Do a delwri reclaim pass first so that as many dirty inodes are
+ * queued up for IO as possible. Then flush the buffers before making
+ * a synchronous path to catch all the remaining inodes are reclaimed.
+ * This makes the reclaim process as quick as possible by avoiding
+ * synchronous writeout and blocking on inodes already in the delwri
+ * state as much as possible.
+ */
+ xfs_reclaim_inodes(mp, 0);
+ XFS_bflush(mp->m_ddev_targp);
+ xfs_reclaim_inodes(mp, SYNC_WAIT);
xfs_qm_unmount(mp);
@@ -1382,7 +1488,7 @@ xfs_unmountfs(
* that nothing is pinned. This is important because bflush()
* will skip pinned buffers.
*/
- xfs_log_force(mp, (xfs_lsn_t)0, XFS_LOG_FORCE | XFS_LOG_SYNC);
+ xfs_log_force(mp, XFS_LOG_SYNC);
xfs_binval(mp->m_ddev_targp);
if (mp->m_rtdev_targp) {
@@ -1548,15 +1654,14 @@ xfs_mod_sb(xfs_trans_t *tp, __int64_t fields)
xfs_sb_to_disk(XFS_BUF_TO_SBP(bp), &mp->m_sb, fields);
/* find modified range */
+ f = (xfs_sb_field_t)xfs_highbit64((__uint64_t)fields);
+ ASSERT((1LL << f) & XFS_SB_MOD_BITS);
+ last = xfs_sb_info[f + 1].offset - 1;
f = (xfs_sb_field_t)xfs_lowbit64((__uint64_t)fields);
ASSERT((1LL << f) & XFS_SB_MOD_BITS);
first = xfs_sb_info[f].offset;
- f = (xfs_sb_field_t)xfs_highbit64((__uint64_t)fields);
- ASSERT((1LL << f) & XFS_SB_MOD_BITS);
- last = xfs_sb_info[f + 1].offset - 1;
-
xfs_trans_log_buf(tp, bp, first, last);
}
@@ -1887,7 +1992,7 @@ xfs_getsb(
ASSERT(mp->m_sb_bp != NULL);
bp = mp->m_sb_bp;
- if (flags & XFS_BUF_TRYLOCK) {
+ if (flags & XBF_TRYLOCK) {
if (!XFS_BUF_CPSEMA(bp)) {
return NULL;
}
diff --git a/fs/xfs/xfs_mount.h b/fs/xfs/xfs_mount.h
index 1df7e450296..14dafd60823 100644
--- a/fs/xfs/xfs_mount.h
+++ b/fs/xfs/xfs_mount.h
@@ -78,7 +78,8 @@ typedef int (*xfs_send_destroy_t)(struct xfs_inode *, dm_right_t);
typedef int (*xfs_send_namesp_t)(dm_eventtype_t, struct xfs_mount *,
struct xfs_inode *, dm_right_t,
struct xfs_inode *, dm_right_t,
- const char *, const char *, mode_t, int, int);
+ const unsigned char *, const unsigned char *,
+ mode_t, int, int);
typedef int (*xfs_send_mount_t)(struct xfs_mount *, dm_right_t,
char *, char *);
typedef void (*xfs_send_unmount_t)(struct xfs_mount *, struct xfs_inode *,
@@ -207,8 +208,8 @@ typedef struct xfs_mount {
uint m_ag_maxlevels; /* XFS_AG_MAXLEVELS */
uint m_bm_maxlevels[2]; /* XFS_BM_MAXLEVELS */
uint m_in_maxlevels; /* max inobt btree levels. */
- struct xfs_perag *m_perag; /* per-ag accounting info */
- struct rw_semaphore m_peraglock; /* lock for m_perag (pointer) */
+ struct radix_tree_root m_perag_tree; /* per-ag accounting info */
+ spinlock_t m_perag_lock; /* lock for m_perag_tree */
struct mutex m_growlock; /* growfs mutex */
int m_fixedfsid[2]; /* unchanged for life of FS */
uint m_dmevmask; /* DMI events for this FS */
@@ -224,6 +225,7 @@ typedef struct xfs_mount {
__uint64_t m_maxioffset; /* maximum inode offset */
__uint64_t m_resblks; /* total reserved blocks */
__uint64_t m_resblks_avail;/* available reserved blocks */
+ __uint64_t m_resblks_save; /* reserved blks @ remount,ro */
int m_dalign; /* stripe unit */
int m_swidth; /* stripe width */
int m_sinoalign; /* stripe unit inode alignment */
@@ -243,7 +245,7 @@ typedef struct xfs_mount {
struct xfs_qmops *m_qm_ops; /* vector of XQM ops */
atomic_t m_active_trans; /* number trans frozen */
#ifdef HAVE_PERCPU_SB
- xfs_icsb_cnts_t *m_sb_cnts; /* per-cpu superblock counters */
+ xfs_icsb_cnts_t __percpu *m_sb_cnts; /* per-cpu superblock counters */
unsigned long m_icsb_counters; /* disabled per-cpu counters */
struct notifier_block m_icsb_notifier; /* hotplug cpu notifier */
struct mutex m_icsb_mutex; /* balancer sync lock */
@@ -384,19 +386,10 @@ xfs_daddr_to_agbno(struct xfs_mount *mp, xfs_daddr_t d)
}
/*
- * perag get/put wrappers for eventual ref counting
+ * perag get/put wrappers for ref counting
*/
-static inline xfs_perag_t *
-xfs_get_perag(struct xfs_mount *mp, xfs_ino_t ino)
-{
- return &mp->m_perag[XFS_INO_TO_AGNO(mp, ino)];
-}
-
-static inline void
-xfs_put_perag(struct xfs_mount *mp, xfs_perag_t *pag)
-{
- /* nothing to see here, move along */
-}
+struct xfs_perag *xfs_perag_get(struct xfs_mount *mp, xfs_agnumber_t agno);
+void xfs_perag_put(struct xfs_perag *pag);
/*
* Per-cpu superblock locking functions
@@ -428,6 +421,7 @@ typedef struct xfs_mod_sb {
} xfs_mod_sb_t;
extern int xfs_log_sbcount(xfs_mount_t *, uint);
+extern __uint64_t xfs_default_resblks(xfs_mount_t *mp);
extern int xfs_mountfs(xfs_mount_t *mp);
extern void xfs_unmountfs(xfs_mount_t *);
@@ -450,7 +444,8 @@ extern struct xfs_dmops xfs_dmcore_xfs;
#endif /* __KERNEL__ */
extern void xfs_mod_sb(struct xfs_trans *, __int64_t);
-extern xfs_agnumber_t xfs_initialize_perag(struct xfs_mount *, xfs_agnumber_t);
+extern int xfs_initialize_perag(struct xfs_mount *, xfs_agnumber_t,
+ xfs_agnumber_t *);
extern void xfs_sb_from_disk(struct xfs_sb *, struct xfs_dsb *);
extern void xfs_sb_to_disk(struct xfs_dsb *, struct xfs_sb *, __int64_t);
diff --git a/fs/xfs/xfs_mru_cache.c b/fs/xfs/xfs_mru_cache.c
index 4b0613d99fa..45ce15dc5b2 100644
--- a/fs/xfs/xfs_mru_cache.c
+++ b/fs/xfs/xfs_mru_cache.c
@@ -398,7 +398,7 @@ exit:
* guaranteed that all the free functions for all the elements have finished
* executing and the reaper is not running.
*/
-void
+static void
xfs_mru_cache_flush(
xfs_mru_cache_t *mru)
{
diff --git a/fs/xfs/xfs_mru_cache.h b/fs/xfs/xfs_mru_cache.h
index 5d439f34b0c..36dd3ec8b4e 100644
--- a/fs/xfs/xfs_mru_cache.h
+++ b/fs/xfs/xfs_mru_cache.h
@@ -42,7 +42,6 @@ void xfs_mru_cache_uninit(void);
int xfs_mru_cache_create(struct xfs_mru_cache **mrup, unsigned int lifetime_ms,
unsigned int grp_count,
xfs_mru_cache_free_func_t free_func);
-void xfs_mru_cache_flush(xfs_mru_cache_t *mru);
void xfs_mru_cache_destroy(struct xfs_mru_cache *mru);
int xfs_mru_cache_insert(struct xfs_mru_cache *mru, unsigned long key,
void *value);
diff --git a/fs/xfs/xfs_quota.h b/fs/xfs/xfs_quota.h
index 91bfd60f4c7..fdcab3f81dd 100644
--- a/fs/xfs/xfs_quota.h
+++ b/fs/xfs/xfs_quota.h
@@ -223,16 +223,9 @@ typedef struct xfs_qoff_logformat {
#define XFS_QMOPT_RES_INOS 0x0800000
/*
- * flags for dqflush and dqflush_all.
- */
-#define XFS_QMOPT_SYNC 0x1000000
-#define XFS_QMOPT_ASYNC 0x2000000
-#define XFS_QMOPT_DELWRI 0x4000000
-
-/*
* flags for dqalloc.
*/
-#define XFS_QMOPT_INHERIT 0x8000000
+#define XFS_QMOPT_INHERIT 0x1000000
/*
* flags to xfs_trans_mod_dquot.
diff --git a/fs/xfs/xfs_rw.c b/fs/xfs/xfs_rw.c
index 5aa07caea5f..e336742a58a 100644
--- a/fs/xfs/xfs_rw.c
+++ b/fs/xfs/xfs_rw.c
@@ -47,48 +47,6 @@
#include "xfs_trace.h"
/*
- * This is a subroutine for xfs_write() and other writers (xfs_ioctl)
- * which clears the setuid and setgid bits when a file is written.
- */
-int
-xfs_write_clear_setuid(
- xfs_inode_t *ip)
-{
- xfs_mount_t *mp;
- xfs_trans_t *tp;
- int error;
-
- mp = ip->i_mount;
- tp = xfs_trans_alloc(mp, XFS_TRANS_WRITEID);
- if ((error = xfs_trans_reserve(tp, 0,
- XFS_WRITEID_LOG_RES(mp),
- 0, 0, 0))) {
- xfs_trans_cancel(tp, 0);
- return error;
- }
- xfs_ilock(ip, XFS_ILOCK_EXCL);
- xfs_trans_ijoin(tp, ip, XFS_ILOCK_EXCL);
- xfs_trans_ihold(tp, ip);
- ip->i_d.di_mode &= ~S_ISUID;
-
- /*
- * Note that we don't have to worry about mandatory
- * file locking being disabled here because we only
- * clear the S_ISGID bit if the Group execute bit is
- * on, but if it was on then mandatory locking wouldn't
- * have been enabled.
- */
- if (ip->i_d.di_mode & S_IXGRP) {
- ip->i_d.di_mode &= ~S_ISGID;
- }
- xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE);
- xfs_trans_set_sync(tp);
- error = xfs_trans_commit(tp, 0);
- xfs_iunlock(ip, XFS_ILOCK_EXCL);
- return 0;
-}
-
-/*
* Force a shutdown of the filesystem instantly while keeping
* the filesystem consistent. We don't do an unmount here; just shutdown
* the shop, make sure that absolutely nothing persistent happens to
@@ -153,88 +111,6 @@ xfs_do_force_shutdown(
}
}
-
-/*
- * Called when we want to stop a buffer from getting written or read.
- * We attach the EIO error, muck with its flags, and call biodone
- * so that the proper iodone callbacks get called.
- */
-int
-xfs_bioerror(
- xfs_buf_t *bp)
-{
-
-#ifdef XFSERRORDEBUG
- ASSERT(XFS_BUF_ISREAD(bp) || bp->b_iodone);
-#endif
-
- /*
- * No need to wait until the buffer is unpinned.
- * We aren't flushing it.
- */
- XFS_BUF_ERROR(bp, EIO);
- /*
- * We're calling biodone, so delete B_DONE flag. Either way
- * we have to call the iodone callback, and calling biodone
- * probably is the best way since it takes care of
- * GRIO as well.
- */
- XFS_BUF_UNREAD(bp);
- XFS_BUF_UNDELAYWRITE(bp);
- XFS_BUF_UNDONE(bp);
- XFS_BUF_STALE(bp);
-
- XFS_BUF_CLR_BDSTRAT_FUNC(bp);
- xfs_biodone(bp);
-
- return (EIO);
-}
-
-/*
- * Same as xfs_bioerror, except that we are releasing the buffer
- * here ourselves, and avoiding the biodone call.
- * This is meant for userdata errors; metadata bufs come with
- * iodone functions attached, so that we can track down errors.
- */
-int
-xfs_bioerror_relse(
- xfs_buf_t *bp)
-{
- int64_t fl;
-
- ASSERT(XFS_BUF_IODONE_FUNC(bp) != xfs_buf_iodone_callbacks);
- ASSERT(XFS_BUF_IODONE_FUNC(bp) != xlog_iodone);
-
- fl = XFS_BUF_BFLAGS(bp);
- /*
- * No need to wait until the buffer is unpinned.
- * We aren't flushing it.
- *
- * chunkhold expects B_DONE to be set, whether
- * we actually finish the I/O or not. We don't want to
- * change that interface.
- */
- XFS_BUF_UNREAD(bp);
- XFS_BUF_UNDELAYWRITE(bp);
- XFS_BUF_DONE(bp);
- XFS_BUF_STALE(bp);
- XFS_BUF_CLR_IODONE_FUNC(bp);
- XFS_BUF_CLR_BDSTRAT_FUNC(bp);
- if (!(fl & XFS_B_ASYNC)) {
- /*
- * Mark b_error and B_ERROR _both_.
- * Lot's of chunkcache code assumes that.
- * There's no reason to mark error for
- * ASYNC buffers.
- */
- XFS_BUF_ERROR(bp, EIO);
- XFS_BUF_FINISH_IOWAIT(bp);
- } else {
- xfs_buf_relse(bp);
- }
- return (EIO);
-}
-
/*
* Prints out an ALERT message about I/O error.
*/
@@ -306,37 +182,6 @@ xfs_read_buf(
}
/*
- * Wrapper around bwrite() so that we can trap
- * write errors, and act accordingly.
- */
-int
-xfs_bwrite(
- struct xfs_mount *mp,
- struct xfs_buf *bp)
-{
- int error;
-
- /*
- * XXXsup how does this work for quotas.
- */
- XFS_BUF_SET_BDSTRAT_FUNC(bp, xfs_bdstrat_cb);
- bp->b_mount = mp;
- XFS_BUF_WRITE(bp);
-
- if ((error = XFS_bwrite(bp))) {
- ASSERT(mp);
- /*
- * Cannot put a buftrace here since if the buffer is not
- * B_HOLD then we will brelse() the buffer before returning
- * from bwrite and we could be tracing a buffer that has
- * been reused.
- */
- xfs_force_shutdown(mp, SHUTDOWN_META_IO_ERROR);
- }
- return (error);
-}
-
-/*
* helper function to extract extent size hint from inode
*/
xfs_extlen_t
diff --git a/fs/xfs/xfs_rw.h b/fs/xfs/xfs_rw.h
index 571f2174435..11c41ec6ed7 100644
--- a/fs/xfs/xfs_rw.h
+++ b/fs/xfs/xfs_rw.h
@@ -39,10 +39,6 @@ xfs_fsb_to_db(struct xfs_inode *ip, xfs_fsblock_t fsb)
/*
* Prototypes for functions in xfs_rw.c.
*/
-extern int xfs_write_clear_setuid(struct xfs_inode *ip);
-extern int xfs_bwrite(struct xfs_mount *mp, struct xfs_buf *bp);
-extern int xfs_bioerror(struct xfs_buf *bp);
-extern int xfs_bioerror_relse(struct xfs_buf *bp);
extern int xfs_read_buf(struct xfs_mount *mp, xfs_buftarg_t *btp,
xfs_daddr_t blkno, int len, uint flags,
struct xfs_buf **bpp);
diff --git a/fs/xfs/xfs_trans.c b/fs/xfs/xfs_trans.c
index 237badcbac3..be942d4e332 100644
--- a/fs/xfs/xfs_trans.c
+++ b/fs/xfs/xfs_trans.c
@@ -981,9 +981,8 @@ shut_us_down:
*/
if (sync) {
if (!error) {
- error = _xfs_log_force(mp, commit_lsn,
- XFS_LOG_FORCE | XFS_LOG_SYNC,
- log_flushed);
+ error = _xfs_log_force_lsn(mp, commit_lsn,
+ XFS_LOG_SYNC, log_flushed);
}
XFS_STATS_INC(xs_trans_sync);
} else {
@@ -1121,7 +1120,7 @@ xfs_trans_fill_vecs(
tp->t_header.th_num_items = nitems;
log_vector->i_addr = (xfs_caddr_t)&tp->t_header;
log_vector->i_len = sizeof(xfs_trans_header_t);
- XLOG_VEC_SET_TYPE(log_vector, XLOG_REG_TYPE_TRANSHDR);
+ log_vector->i_type = XLOG_REG_TYPE_TRANSHDR;
}
diff --git a/fs/xfs/xfs_trans.h b/fs/xfs/xfs_trans.h
index ca64f33c63a..c93e3a10285 100644
--- a/fs/xfs/xfs_trans.h
+++ b/fs/xfs/xfs_trans.h
@@ -861,8 +861,7 @@ typedef struct xfs_item_ops {
#define XFS_ITEM_SUCCESS 0
#define XFS_ITEM_PINNED 1
#define XFS_ITEM_LOCKED 2
-#define XFS_ITEM_FLUSHING 3
-#define XFS_ITEM_PUSHBUF 4
+#define XFS_ITEM_PUSHBUF 3
/*
* This structure is used to maintain a list of block ranges that have been
diff --git a/fs/xfs/xfs_trans_ail.c b/fs/xfs/xfs_trans_ail.c
index 2ffc570679b..e799824f724 100644
--- a/fs/xfs/xfs_trans_ail.c
+++ b/fs/xfs/xfs_trans_ail.c
@@ -237,14 +237,15 @@ out:
}
/*
- * Function that does the work of pushing on the AIL
+ * xfsaild_push does the work of pushing on the AIL. Returning a timeout of
+ * zero indicates that the caller should sleep until woken.
*/
long
xfsaild_push(
struct xfs_ail *ailp,
xfs_lsn_t *last_lsn)
{
- long tout = 1000; /* milliseconds */
+ long tout = 0;
xfs_lsn_t last_pushed_lsn = *last_lsn;
xfs_lsn_t target = ailp->xa_target;
xfs_lsn_t lsn;
@@ -252,6 +253,7 @@ xfsaild_push(
int flush_log, count, stuck;
xfs_mount_t *mp = ailp->xa_mount;
struct xfs_ail_cursor *cur = &ailp->xa_cursors;
+ int push_xfsbufd = 0;
spin_lock(&ailp->xa_lock);
xfs_trans_ail_cursor_init(ailp, cur);
@@ -262,7 +264,7 @@ xfsaild_push(
*/
xfs_trans_ail_cursor_done(ailp, cur);
spin_unlock(&ailp->xa_lock);
- last_pushed_lsn = 0;
+ *last_lsn = 0;
return tout;
}
@@ -279,7 +281,6 @@ xfsaild_push(
* prevents use from spinning when we can't do anything or there is
* lots of contention on the AIL lists.
*/
- tout = 10;
lsn = lip->li_lsn;
flush_log = stuck = count = 0;
while ((XFS_LSN_CMP(lip->li_lsn, target) < 0)) {
@@ -308,6 +309,7 @@ xfsaild_push(
XFS_STATS_INC(xs_push_ail_pushbuf);
IOP_PUSHBUF(lip);
last_pushed_lsn = lsn;
+ push_xfsbufd = 1;
break;
case XFS_ITEM_PINNED:
@@ -322,12 +324,6 @@ xfsaild_push(
stuck++;
break;
- case XFS_ITEM_FLUSHING:
- XFS_STATS_INC(xs_push_ail_flushing);
- last_pushed_lsn = lsn;
- stuck++;
- break;
-
default:
ASSERT(0);
break;
@@ -371,19 +367,24 @@ xfsaild_push(
* move forward in the AIL.
*/
XFS_STATS_INC(xs_push_ail_flush);
- xfs_log_force(mp, (xfs_lsn_t)0, XFS_LOG_FORCE);
+ xfs_log_force(mp, 0);
+ }
+
+ if (push_xfsbufd) {
+ /* we've got delayed write buffers to flush */
+ wake_up_process(mp->m_ddev_targp->bt_task);
}
if (!count) {
/* We're past our target or empty, so idle */
- tout = 1000;
+ last_pushed_lsn = 0;
} else if (XFS_LSN_CMP(lsn, target) >= 0) {
/*
* We reached the target so wait a bit longer for I/O to
* complete and remove pushed items from the AIL before we
* start the next scan from the start of the AIL.
*/
- tout += 20;
+ tout = 50;
last_pushed_lsn = 0;
} else if ((stuck * 100) / count > 90) {
/*
@@ -395,11 +396,14 @@ xfsaild_push(
* Backoff a bit more to allow some I/O to complete before
* continuing from where we were.
*/
- tout += 10;
+ tout = 20;
+ } else {
+ /* more to do, but wait a short while before continuing */
+ tout = 10;
}
*last_lsn = last_pushed_lsn;
return tout;
-} /* xfsaild_push */
+}
/*
diff --git a/fs/xfs/xfs_trans_buf.c b/fs/xfs/xfs_trans_buf.c
index 49130628d5e..5ffd544434e 100644
--- a/fs/xfs/xfs_trans_buf.c
+++ b/fs/xfs/xfs_trans_buf.c
@@ -75,13 +75,14 @@ xfs_trans_get_buf(xfs_trans_t *tp,
xfs_buf_log_item_t *bip;
if (flags == 0)
- flags = XFS_BUF_LOCK | XFS_BUF_MAPPED;
+ flags = XBF_LOCK | XBF_MAPPED;
/*
* Default to a normal get_buf() call if the tp is NULL.
*/
if (tp == NULL)
- return xfs_buf_get(target_dev, blkno, len, flags | BUF_BUSY);
+ return xfs_buf_get(target_dev, blkno, len,
+ flags | XBF_DONT_BLOCK);
/*
* If we find the buffer in the cache with this transaction
@@ -117,14 +118,14 @@ xfs_trans_get_buf(xfs_trans_t *tp,
}
/*
- * We always specify the BUF_BUSY flag within a transaction so
- * that get_buf does not try to push out a delayed write buffer
+ * We always specify the XBF_DONT_BLOCK flag within a transaction
+ * so that get_buf does not try to push out a delayed write buffer
* which might cause another transaction to take place (if the
* buffer was delayed alloc). Such recursive transactions can
* easily deadlock with our current transaction as well as cause
* us to run out of stack space.
*/
- bp = xfs_buf_get(target_dev, blkno, len, flags | BUF_BUSY);
+ bp = xfs_buf_get(target_dev, blkno, len, flags | XBF_DONT_BLOCK);
if (bp == NULL) {
return NULL;
}
@@ -290,15 +291,15 @@ xfs_trans_read_buf(
int error;
if (flags == 0)
- flags = XFS_BUF_LOCK | XFS_BUF_MAPPED;
+ flags = XBF_LOCK | XBF_MAPPED;
/*
* Default to a normal get_buf() call if the tp is NULL.
*/
if (tp == NULL) {
- bp = xfs_buf_read(target, blkno, len, flags | BUF_BUSY);
+ bp = xfs_buf_read(target, blkno, len, flags | XBF_DONT_BLOCK);
if (!bp)
- return (flags & XFS_BUF_TRYLOCK) ?
+ return (flags & XBF_TRYLOCK) ?
EAGAIN : XFS_ERROR(ENOMEM);
if (XFS_BUF_GETERROR(bp) != 0) {
@@ -385,14 +386,14 @@ xfs_trans_read_buf(
}
/*
- * We always specify the BUF_BUSY flag within a transaction so
- * that get_buf does not try to push out a delayed write buffer
+ * We always specify the XBF_DONT_BLOCK flag within a transaction
+ * so that get_buf does not try to push out a delayed write buffer
* which might cause another transaction to take place (if the
* buffer was delayed alloc). Such recursive transactions can
* easily deadlock with our current transaction as well as cause
* us to run out of stack space.
*/
- bp = xfs_buf_read(target, blkno, len, flags | BUF_BUSY);
+ bp = xfs_buf_read(target, blkno, len, flags | XBF_DONT_BLOCK);
if (bp == NULL) {
*bpp = NULL;
return 0;
@@ -472,8 +473,8 @@ shutdown_abort:
if (XFS_BUF_ISSTALE(bp) && XFS_BUF_ISDELAYWRITE(bp))
cmn_err(CE_NOTE, "about to pop assert, bp == 0x%p", bp);
#endif
- ASSERT((XFS_BUF_BFLAGS(bp) & (XFS_B_STALE|XFS_B_DELWRI)) !=
- (XFS_B_STALE|XFS_B_DELWRI));
+ ASSERT((XFS_BUF_BFLAGS(bp) & (XBF_STALE|XBF_DELWRI)) !=
+ (XBF_STALE|XBF_DELWRI));
trace_xfs_trans_read_buf_shut(bp, _RET_IP_);
xfs_buf_relse(bp);
diff --git a/fs/xfs/xfs_types.h b/fs/xfs/xfs_types.h
index d725428c9df..b09904555d0 100644
--- a/fs/xfs/xfs_types.h
+++ b/fs/xfs/xfs_types.h
@@ -151,8 +151,8 @@ typedef enum {
} xfs_btnum_t;
struct xfs_name {
- const char *name;
- int len;
+ const unsigned char *name;
+ int len;
};
#endif /* __XFS_TYPES_H__ */
diff --git a/fs/xfs/xfs_vnodeops.c b/fs/xfs/xfs_vnodeops.c
index 6f268756bf3..ddd2c5d1b85 100644
--- a/fs/xfs/xfs_vnodeops.c
+++ b/fs/xfs/xfs_vnodeops.c
@@ -256,7 +256,7 @@ xfs_setattr(
iattr->ia_size > ip->i_d.di_size) {
code = xfs_flush_pages(ip,
ip->i_d.di_size, iattr->ia_size,
- XFS_B_ASYNC, FI_NONE);
+ XBF_ASYNC, FI_NONE);
}
/* wait for all I/O to complete */
@@ -597,7 +597,7 @@ xfs_fsync(
{
xfs_trans_t *tp;
int error = 0;
- int log_flushed = 0, changed = 1;
+ int log_flushed = 0;
xfs_itrace_entry(ip);
@@ -627,19 +627,16 @@ xfs_fsync(
* disk yet, the inode will be still be pinned. If it is,
* force the log.
*/
-
xfs_iunlock(ip, XFS_ILOCK_SHARED);
-
if (xfs_ipincount(ip)) {
- error = _xfs_log_force(ip->i_mount, (xfs_lsn_t)0,
- XFS_LOG_FORCE | XFS_LOG_SYNC,
- &log_flushed);
- } else {
- /*
- * If the inode is not pinned and nothing has changed
- * we don't need to flush the cache.
- */
- changed = 0;
+ if (ip->i_itemp->ili_last_lsn) {
+ error = _xfs_log_force_lsn(ip->i_mount,
+ ip->i_itemp->ili_last_lsn,
+ XFS_LOG_SYNC, &log_flushed);
+ } else {
+ error = _xfs_log_force(ip->i_mount,
+ XFS_LOG_SYNC, &log_flushed);
+ }
}
} else {
/*
@@ -674,7 +671,7 @@ xfs_fsync(
xfs_iunlock(ip, XFS_ILOCK_EXCL);
}
- if ((ip->i_mount->m_flags & XFS_MOUNT_BARRIER) && changed) {
+ if (ip->i_mount->m_flags & XFS_MOUNT_BARRIER) {
/*
* If the log write didn't issue an ordered tag we need
* to flush the disk cache for the data device now.
@@ -1096,7 +1093,7 @@ xfs_release(
*/
truncated = xfs_iflags_test_and_clear(ip, XFS_ITRUNCATED);
if (truncated && VN_DIRTY(VFS_I(ip)) && ip->i_delayed_blks > 0)
- xfs_flush_pages(ip, 0, -1, XFS_B_ASYNC, FI_NONE);
+ xfs_flush_pages(ip, 0, -1, XBF_ASYNC, FI_NONE);
}
if (ip->i_d.di_nlink != 0) {
@@ -2199,7 +2196,8 @@ xfs_symlink(
if (DM_EVENT_ENABLED(dp, DM_EVENT_SYMLINK)) {
error = XFS_SEND_NAMESP(mp, DM_EVENT_SYMLINK, dp,
DM_RIGHT_NULL, NULL, DM_RIGHT_NULL,
- link_name->name, target_path, 0, 0, 0);
+ link_name->name,
+ (unsigned char *)target_path, 0, 0, 0);
if (error)
return error;
}
@@ -2395,7 +2393,8 @@ std_return:
dp, DM_RIGHT_NULL,
error ? NULL : ip,
DM_RIGHT_NULL, link_name->name,
- target_path, 0, error, 0);
+ (unsigned char *)target_path,
+ 0, error, 0);
}
if (!error)
diff --git a/fs/xfs/xfs_vnodeops.h b/fs/xfs/xfs_vnodeops.h
index 167a467403a..774f40729ca 100644
--- a/fs/xfs/xfs_vnodeops.h
+++ b/fs/xfs/xfs_vnodeops.h
@@ -43,11 +43,11 @@ int xfs_change_file_space(struct xfs_inode *ip, int cmd,
int xfs_rename(struct xfs_inode *src_dp, struct xfs_name *src_name,
struct xfs_inode *src_ip, struct xfs_inode *target_dp,
struct xfs_name *target_name, struct xfs_inode *target_ip);
-int xfs_attr_get(struct xfs_inode *ip, const char *name, char *value,
- int *valuelenp, int flags);
-int xfs_attr_set(struct xfs_inode *dp, const char *name, char *value,
- int valuelen, int flags);
-int xfs_attr_remove(struct xfs_inode *dp, const char *name, int flags);
+int xfs_attr_get(struct xfs_inode *ip, const unsigned char *name,
+ unsigned char *value, int *valuelenp, int flags);
+int xfs_attr_set(struct xfs_inode *dp, const unsigned char *name,
+ unsigned char *value, int valuelen, int flags);
+int xfs_attr_remove(struct xfs_inode *dp, const unsigned char *name, int flags);
int xfs_attr_list(struct xfs_inode *dp, char *buffer, int bufsize,
int flags, struct attrlist_cursor_kern *cursor);
ssize_t xfs_read(struct xfs_inode *ip, struct kiocb *iocb,