aboutsummaryrefslogtreecommitdiff
path: root/fs/ntfs
diff options
context:
space:
mode:
Diffstat (limited to 'fs/ntfs')
-rw-r--r--fs/ntfs/ChangeLog5
-rw-r--r--fs/ntfs/aops.c21
2 files changed, 19 insertions, 7 deletions
diff --git a/fs/ntfs/ChangeLog b/fs/ntfs/ChangeLog
index 051c6818aad..8a249df2b5c 100644
--- a/fs/ntfs/ChangeLog
+++ b/fs/ntfs/ChangeLog
@@ -51,6 +51,11 @@ ToDo/Notes:
value afterwards when reading the size of the bitmap inode.
- Use i_size_{read,write}() in fs/ntfs/{aops.c,mft.c} and protect
access to the i_size and other size fields using the size_lock.
+ - Implement extension of resident files in the regular file write code
+ paths (fs/ntfs/aops.c::ntfs_{prepare,commit}_write()). At present
+ this only works until the data attribute becomes too big for the mft
+ record after which we abort the write returning -EOPNOTSUPP from
+ ntfs_prepare_write().
2.1.22 - Many bug and race fixes and error handling improvements.
diff --git a/fs/ntfs/aops.c b/fs/ntfs/aops.c
index ac65806ee51..92215228eea 100644
--- a/fs/ntfs/aops.c
+++ b/fs/ntfs/aops.c
@@ -872,6 +872,7 @@ static int ntfs_write_mst_block(struct page *page,
if (likely(block < rec_block)) {
if (unlikely(block >= dblock)) {
clear_buffer_dirty(bh);
+ set_buffer_uptodate(bh);
continue;
}
/*
@@ -1830,6 +1831,7 @@ static int ntfs_prepare_write(struct file *file, struct page *page,
unsigned from, unsigned to)
{
s64 new_size;
+ unsigned long flags;
struct inode *vi = page->mapping->host;
ntfs_inode *base_ni = NULL, *ni = NTFS_I(vi);
ntfs_volume *vol = ni->vol;
@@ -1903,12 +1905,6 @@ static int ntfs_prepare_write(struct file *file, struct page *page,
/* If we do not need to resize the attribute allocation we are done. */
if (new_size <= i_size_read(vi))
goto done;
-
- // FIXME: We abort for now as this code is not safe.
- ntfs_error(vi->i_sb, "Changing the file size is not supported yet. "
- "Sorry.");
- return -EOPNOTSUPP;
-
/* Map, pin, and lock the (base) mft record. */
if (!NInoAttr(ni))
base_ni = ni;
@@ -1937,7 +1933,17 @@ static int ntfs_prepare_write(struct file *file, struct page *page,
a = ctx->attr;
/* The total length of the attribute value. */
attr_len = le32_to_cpu(a->data.resident.value_length);
- BUG_ON(i_size_read(vi) != attr_len);
+ /* Fix an eventual previous failure of ntfs_commit_write(). */
+ read_lock_irqsave(&ni->size_lock, flags);
+ if (unlikely(ni->initialized_size < attr_len)) {
+ attr_len = ni->initialized_size;
+ a->data.resident.value_length = cpu_to_le32(attr_len);
+ BUG_ON(attr_len < i_size_read(vi));
+ }
+ read_unlock_irqrestore(&ni->size_lock, flags);
+ /* If we do not need to resize the attribute allocation we are done. */
+ if (new_size <= attr_len)
+ goto done_unm;
/* Check if new size is allowed in $AttrDef. */
err = ntfs_attr_size_bounds_check(vol, ni->type, new_size);
if (unlikely(err)) {
@@ -1995,6 +2001,7 @@ static int ntfs_prepare_write(struct file *file, struct page *page,
}
flush_dcache_mft_record_page(ctx->ntfs_ino);
mark_mft_record_dirty(ctx->ntfs_ino);
+done_unm:
ntfs_attr_put_search_ctx(ctx);
unmap_mft_record(base_ni);
/*