ntfs: zero out stale data in straddle block beyond initialized_size

ntfs_read_iomap_begin_non_resident() rounds up MAPPED extents
to the block boundary of initialized_size. This ensures that
any subsequent blocks are treated as IOMAP_UNWRITTEN, but
it also causes the "straddle block" containing initialized_size
to be read from disk. The disk data beyond initialized_size in
this block is stale and must be zeroed to prevent data leakage.

Signed-off-by: Hyunchul Lee <hyc.lee@gmail.com>
Signed-off-by: Namjae Jeon <linkinjeon@kernel.org>
This commit is contained in:
Hyunchul Lee 2026-04-01 16:24:57 +09:00 committed by Namjae Jeon
parent 8541d8f725
commit 8b4064e614
2 changed files with 45 additions and 11 deletions

View File

@ -15,6 +15,41 @@
#include "debug.h"
#include "iomap.h"
static void ntfs_iomap_read_end_io(struct bio *bio)
{
int error = blk_status_to_errno(bio->bi_status);
struct folio_iter iter;
bio_for_each_folio_all(iter, bio) {
struct folio *folio = iter.folio;
struct ntfs_inode *ni = NTFS_I(folio->mapping->host);
s64 init_size;
loff_t pos = folio_pos(folio);
init_size = ni->initialized_size;
if (pos + iter.offset < init_size &&
pos + iter.offset + iter.length > init_size)
folio_zero_segment(folio, offset_in_folio(folio, init_size),
iter.offset + iter.length);
iomap_finish_folio_read(folio, iter.offset, iter.length, error);
}
bio_put(bio);
}
static void ntfs_iomap_bio_submit_read(const struct iomap_iter *iter,
struct iomap_read_folio_ctx *ctx)
{
struct bio *bio = ctx->read_ctx;
bio->bi_end_io = ntfs_iomap_read_end_io;
submit_bio(bio);
}
static const struct iomap_read_ops ntfs_iomap_bio_read_ops = {
.read_folio_range = iomap_bio_read_folio_range,
.submit_read = ntfs_iomap_bio_submit_read,
};
/*
* ntfs_read_folio - Read data for a folio from the device
* @file: open file to which the folio @folio belongs or NULL
@ -35,6 +70,10 @@
static int ntfs_read_folio(struct file *file, struct folio *folio)
{
struct ntfs_inode *ni = NTFS_I(folio->mapping->host);
struct iomap_read_folio_ctx ctx = {
.cur_folio = folio,
.ops = &ntfs_iomap_bio_read_ops,
};
/*
* Only $DATA attributes can be encrypted and only unnamed $DATA
@ -58,7 +97,7 @@ static int ntfs_read_folio(struct file *file, struct folio *folio)
return ntfs_read_compressed_block(folio);
}
iomap_bio_read_folio(folio, &ntfs_read_iomap_ops);
iomap_read_folio(&ntfs_read_iomap_ops, &ctx, NULL);
return 0;
}
@ -188,6 +227,10 @@ static void ntfs_readahead(struct readahead_control *rac)
struct address_space *mapping = rac->mapping;
struct inode *inode = mapping->host;
struct ntfs_inode *ni = NTFS_I(inode);
struct iomap_read_folio_ctx ctx = {
.ops = &ntfs_iomap_bio_read_ops,
.rac = rac,
};
/*
* Resident files are not cached in the page cache,
@ -195,7 +238,7 @@ static void ntfs_readahead(struct readahead_control *rac)
*/
if (!NInoNonResident(ni) || NInoCompressed(ni))
return;
iomap_bio_readahead(rac, &ntfs_read_iomap_ops);
iomap_readahead(&ntfs_read_iomap_ops, &ctx, NULL);
}
static int ntfs_writepages(struct address_space *mapping,

View File

@ -267,15 +267,6 @@ static int ntfs_setattr_size(struct inode *vi, struct iattr *attr)
return err;
inode_dio_wait(vi);
/* Serialize against page faults */
if (NInoNonResident(NTFS_I(vi)) && attr->ia_size < old_size) {
err = iomap_truncate_page(vi, attr->ia_size, NULL,
&ntfs_read_iomap_ops,
&ntfs_iomap_folio_ops, NULL);
if (err)
return err;
}
truncate_setsize(vi, attr->ia_size);
err = ntfs_truncate_vfs(vi, attr->ia_size, old_size);
if (err) {