fsdax: decouple zeroing from the iomap buffered I/O code

Unshare the DAX and iomap buffered I/O page zeroing code.  This code
previously did a IS_DAX check deep inside the iomap code, which in
fact was the only DAX check in the code.  Instead move these checks
into the callers.  Most callers already have DAX special casing anyway
and XFS will need it for reflink support as well.

Signed-off-by: Christoph Hellwig <hch@lst.de>
Reviewed-by: Dan Williams <dan.j.williams@intel.com>
Reviewed-by: Darrick J. Wong <djwong@kernel.org>
Link: https://lore.kernel.org/r/20211129102203.2243509-19-hch@lst.de
Signed-off-by: Dan Williams <dan.j.williams@intel.com>
This commit is contained in:
Christoph Hellwig
2021-11-29 11:21:52 +01:00
committed by Dan Williams
parent e5c71954ca
commit c6f4046865
6 changed files with 94 additions and 44 deletions

View File

@@ -1135,25 +1135,74 @@ static int dax_memzero(struct dax_device *dax_dev, pgoff_t pgoff,
return ret;
}
s64 dax_iomap_zero(loff_t pos, u64 length, struct iomap *iomap)
static s64 dax_zero_iter(struct iomap_iter *iter, bool *did_zero)
{
pgoff_t pgoff = dax_iomap_pgoff(iomap, pos);
long rc, id;
unsigned offset = offset_in_page(pos);
unsigned size = min_t(u64, PAGE_SIZE - offset, length);
const struct iomap *iomap = &iter->iomap;
const struct iomap *srcmap = iomap_iter_srcmap(iter);
loff_t pos = iter->pos;
u64 length = iomap_length(iter);
s64 written = 0;
id = dax_read_lock();
if (IS_ALIGNED(pos, PAGE_SIZE) && size == PAGE_SIZE)
rc = dax_zero_page_range(iomap->dax_dev, pgoff, 1);
else
rc = dax_memzero(iomap->dax_dev, pgoff, offset, size);
dax_read_unlock(id);
/* already zeroed? we're done. */
if (srcmap->type == IOMAP_HOLE || srcmap->type == IOMAP_UNWRITTEN)
return length;
if (rc < 0)
return rc;
return size;
do {
unsigned offset = offset_in_page(pos);
unsigned size = min_t(u64, PAGE_SIZE - offset, length);
pgoff_t pgoff = dax_iomap_pgoff(iomap, pos);
long rc;
int id;
id = dax_read_lock();
if (IS_ALIGNED(pos, PAGE_SIZE) && size == PAGE_SIZE)
rc = dax_zero_page_range(iomap->dax_dev, pgoff, 1);
else
rc = dax_memzero(iomap->dax_dev, pgoff, offset, size);
dax_read_unlock(id);
if (rc < 0)
return rc;
pos += size;
length -= size;
written += size;
if (did_zero)
*did_zero = true;
} while (length > 0);
return written;
}
int dax_zero_range(struct inode *inode, loff_t pos, loff_t len, bool *did_zero,
const struct iomap_ops *ops)
{
struct iomap_iter iter = {
.inode = inode,
.pos = pos,
.len = len,
.flags = IOMAP_ZERO,
};
int ret;
while ((ret = iomap_iter(&iter, ops)) > 0)
iter.processed = dax_zero_iter(&iter, did_zero);
return ret;
}
EXPORT_SYMBOL_GPL(dax_zero_range);
int dax_truncate_page(struct inode *inode, loff_t pos, bool *did_zero,
const struct iomap_ops *ops)
{
unsigned int blocksize = i_blocksize(inode);
unsigned int off = pos & (blocksize - 1);
/* Block boundary? Nothing to do */
if (!off)
return 0;
return dax_zero_range(inode, pos, blocksize - off, did_zero, ops);
}
EXPORT_SYMBOL_GPL(dax_truncate_page);
static loff_t dax_iomap_iter(const struct iomap_iter *iomi,
struct iov_iter *iter)
{