From 56f5746c414d92ae8e8314f46760822b4ecf8be3 Mon Sep 17 00:00:00 2001 From: "Matthew Wilcox (Oracle)" Date: Tue, 22 Feb 2022 09:40:54 -0500 Subject: namei: Merge page_symlink() and __page_symlink() There are no callers of __page_symlink() left, so we can remove that entry point. Signed-off-by: Matthew Wilcox (Oracle) Reviewed-by: Christoph Hellwig Reviewed-by: Christian Brauner --- include/linux/fs.h | 2 -- 1 file changed, 2 deletions(-) (limited to 'include') diff --git a/include/linux/fs.h b/include/linux/fs.h index bbde95387a23..e108aff23a28 100644 --- a/include/linux/fs.h +++ b/include/linux/fs.h @@ -3109,8 +3109,6 @@ extern int page_readlink(struct dentry *, char __user *, int); extern const char *page_get_link(struct dentry *, struct inode *, struct delayed_call *); extern void page_put_link(void *); -extern int __page_symlink(struct inode *inode, const char *symname, int len, - int nofs); extern int page_symlink(struct inode *inode, const char *symname, int len); extern const struct inode_operations page_symlink_inode_operations; extern void kfree_link(void *); -- cgit v1.2.3-71-gd317 From 236d93c4bf2d6da83241cc8e4625e89d9604cb43 Mon Sep 17 00:00:00 2001 From: "Matthew Wilcox (Oracle)" Date: Tue, 22 Feb 2022 10:40:11 -0500 Subject: fs: Remove AOP_FLAG_NOFS With all users of this flag gone, we can stop testing whether it's set. Signed-off-by: Matthew Wilcox (Oracle) Reviewed-by: Christoph Hellwig --- fs/netfs/buffered_read.c | 6 +----- include/linux/fs.h | 4 ---- mm/folio-compat.c | 2 -- 3 files changed, 1 insertion(+), 11 deletions(-) (limited to 'include') diff --git a/fs/netfs/buffered_read.c b/fs/netfs/buffered_read.c index 281a88a5b8dc..65c17c5a5567 100644 --- a/fs/netfs/buffered_read.c +++ b/fs/netfs/buffered_read.c @@ -302,7 +302,6 @@ zero_out: * @mapping: The mapping to read from * @pos: File position at which the write will begin * @len: The length of the write (may extend beyond the end of the folio chosen) - * @aop_flags: AOP_* flags * @_folio: Where to put the resultant folio * @_fsdata: Place for the netfs to store a cookie * @@ -335,16 +334,13 @@ int netfs_write_begin(struct file *file, struct address_space *mapping, struct netfs_io_request *rreq; struct netfs_i_context *ctx = netfs_i_context(file_inode(file )); struct folio *folio; - unsigned int fgp_flags; + unsigned int fgp_flags = FGP_LOCK | FGP_WRITE | FGP_CREAT | FGP_STABLE; pgoff_t index = pos >> PAGE_SHIFT; int ret; DEFINE_READAHEAD(ractl, file, NULL, mapping, index); retry: - fgp_flags = FGP_LOCK | FGP_WRITE | FGP_CREAT | FGP_STABLE; - if (aop_flags & AOP_FLAG_NOFS) - fgp_flags |= FGP_NOFS; folio = __filemap_get_folio(mapping, index, fgp_flags, mapping_gfp_mask(mapping)); if (!folio) diff --git a/include/linux/fs.h b/include/linux/fs.h index e108aff23a28..f81bc5cbcbb6 100644 --- a/include/linux/fs.h +++ b/include/linux/fs.h @@ -275,10 +275,6 @@ enum positive_aop_returns { AOP_TRUNCATED_PAGE = 0x80001, }; -#define AOP_FLAG_NOFS 0x0002 /* used by filesystem to direct - * helper code (eg buffer layer) - * to clear GFP_FS from alloc */ - /* * oh the beauties of C type declarations. */ diff --git a/mm/folio-compat.c b/mm/folio-compat.c index 46fa179e32fb..3e42ddb81918 100644 --- a/mm/folio-compat.c +++ b/mm/folio-compat.c @@ -135,8 +135,6 @@ struct page *grab_cache_page_write_begin(struct address_space *mapping, { unsigned fgp_flags = FGP_LOCK | FGP_WRITE | FGP_CREAT | FGP_STABLE; - if (flags & AOP_FLAG_NOFS) - fgp_flags |= FGP_NOFS; return pagecache_get_page(mapping, index, fgp_flags, mapping_gfp_mask(mapping)); } -- cgit v1.2.3-71-gd317 From de2a931150177957d37e9c975025604f4a1fe853 Mon Sep 17 00:00:00 2001 From: "Matthew Wilcox (Oracle)" Date: Tue, 22 Feb 2022 10:47:09 -0500 Subject: fs: Remove aop_flags parameter from netfs_write_begin() There are no more aop flags left, so remove the parameter. Signed-off-by: Matthew Wilcox (Oracle) Reviewed-by: Christoph Hellwig --- Documentation/filesystems/netfs_library.rst | 1 - fs/9p/vfs_addr.c | 2 +- fs/afs/write.c | 2 +- fs/ceph/addr.c | 2 +- fs/netfs/buffered_read.c | 4 ++-- include/linux/netfs.h | 2 +- 6 files changed, 6 insertions(+), 7 deletions(-) (limited to 'include') diff --git a/Documentation/filesystems/netfs_library.rst b/Documentation/filesystems/netfs_library.rst index 69f00179fdfe..d51c2a5ccf57 100644 --- a/Documentation/filesystems/netfs_library.rst +++ b/Documentation/filesystems/netfs_library.rst @@ -142,7 +142,6 @@ Three read helpers are provided:: struct address_space *mapping, loff_t pos, unsigned int len, - unsigned int flags, struct folio **_folio, void **_fsdata); diff --git a/fs/9p/vfs_addr.c b/fs/9p/vfs_addr.c index 501128188343..d311e68e21fd 100644 --- a/fs/9p/vfs_addr.c +++ b/fs/9p/vfs_addr.c @@ -275,7 +275,7 @@ static int v9fs_write_begin(struct file *filp, struct address_space *mapping, * file. We need to do this before we get a lock on the page in case * there's more than one writer competing for the same cache block. */ - retval = netfs_write_begin(filp, mapping, pos, len, flags, &folio, fsdata); + retval = netfs_write_begin(filp, mapping, pos, len, &folio, fsdata); if (retval < 0) return retval; diff --git a/fs/afs/write.c b/fs/afs/write.c index 4763132ca57e..af496c98d394 100644 --- a/fs/afs/write.c +++ b/fs/afs/write.c @@ -60,7 +60,7 @@ int afs_write_begin(struct file *file, struct address_space *mapping, * file. We need to do this before we get a lock on the page in case * there's more than one writer competing for the same cache block. */ - ret = netfs_write_begin(file, mapping, pos, len, flags, &folio, fsdata); + ret = netfs_write_begin(file, mapping, pos, len, &folio, fsdata); if (ret < 0) return ret; diff --git a/fs/ceph/addr.c b/fs/ceph/addr.c index aa25bffd4823..415f0886bc25 100644 --- a/fs/ceph/addr.c +++ b/fs/ceph/addr.c @@ -1318,7 +1318,7 @@ static int ceph_write_begin(struct file *file, struct address_space *mapping, struct folio *folio = NULL; int r; - r = netfs_write_begin(file, inode->i_mapping, pos, len, 0, &folio, NULL); + r = netfs_write_begin(file, inode->i_mapping, pos, len, &folio, NULL); if (r == 0) folio_wait_fscache(folio); if (r < 0) { diff --git a/fs/netfs/buffered_read.c b/fs/netfs/buffered_read.c index 65c17c5a5567..1d44509455a5 100644 --- a/fs/netfs/buffered_read.c +++ b/fs/netfs/buffered_read.c @@ -328,8 +328,8 @@ zero_out: * This is usable whether or not caching is enabled. */ int netfs_write_begin(struct file *file, struct address_space *mapping, - loff_t pos, unsigned int len, unsigned int aop_flags, - struct folio **_folio, void **_fsdata) + loff_t pos, unsigned int len, struct folio **_folio, + void **_fsdata) { struct netfs_io_request *rreq; struct netfs_i_context *ctx = netfs_i_context(file_inode(file )); diff --git a/include/linux/netfs.h b/include/linux/netfs.h index c7bf1eaf51d5..1c29f317d907 100644 --- a/include/linux/netfs.h +++ b/include/linux/netfs.h @@ -276,7 +276,7 @@ struct readahead_control; extern void netfs_readahead(struct readahead_control *); extern int netfs_readpage(struct file *, struct page *); extern int netfs_write_begin(struct file *, struct address_space *, - loff_t, unsigned int, unsigned int, struct folio **, + loff_t, unsigned int, struct folio **, void **); extern void netfs_subreq_terminated(struct netfs_io_subrequest *, ssize_t, bool); -- cgit v1.2.3-71-gd317 From b3992d1e2ebcd478e0614494a6abd95e902a029b Mon Sep 17 00:00:00 2001 From: "Matthew Wilcox (Oracle)" Date: Tue, 22 Feb 2022 11:25:12 -0500 Subject: fs: Remove aop flags parameter from block_write_begin() There are no more aop flags left, so remove the parameter. Signed-off-by: Matthew Wilcox (Oracle) Reviewed-by: Christoph Hellwig --- block/fops.c | 3 +-- fs/bfs/file.c | 3 +-- fs/buffer.c | 6 +++--- fs/ext2/inode.c | 3 +-- fs/minix/inode.c | 3 +-- fs/nilfs2/inode.c | 3 +-- fs/nilfs2/recovery.c | 2 +- fs/ntfs3/inode.c | 4 ++-- fs/omfs/file.c | 3 +-- fs/sysv/itree.c | 2 +- fs/udf/inode.c | 2 +- fs/ufs/inode.c | 3 +-- include/linux/buffer_head.h | 2 +- 13 files changed, 16 insertions(+), 23 deletions(-) (limited to 'include') diff --git a/block/fops.c b/block/fops.c index 9f2ecec406b0..b432756570c6 100644 --- a/block/fops.c +++ b/block/fops.c @@ -401,8 +401,7 @@ static int blkdev_write_begin(struct file *file, struct address_space *mapping, loff_t pos, unsigned len, unsigned flags, struct page **pagep, void **fsdata) { - return block_write_begin(mapping, pos, len, flags, pagep, - blkdev_get_block); + return block_write_begin(mapping, pos, len, pagep, blkdev_get_block); } static int blkdev_write_end(struct file *file, struct address_space *mapping, diff --git a/fs/bfs/file.c b/fs/bfs/file.c index 03139344568f..9408f45225cb 100644 --- a/fs/bfs/file.c +++ b/fs/bfs/file.c @@ -174,8 +174,7 @@ static int bfs_write_begin(struct file *file, struct address_space *mapping, { int ret; - ret = block_write_begin(mapping, pos, len, flags, pagep, - bfs_get_block); + ret = block_write_begin(mapping, pos, len, pagep, bfs_get_block); if (unlikely(ret)) bfs_write_failed(mapping, pos + len); diff --git a/fs/buffer.c b/fs/buffer.c index 2b5561ae5d0b..4ec6eb03c0eb 100644 --- a/fs/buffer.c +++ b/fs/buffer.c @@ -2104,13 +2104,13 @@ static int __block_commit_write(struct inode *inode, struct page *page, * The filesystem needs to handle block truncation upon failure. */ int block_write_begin(struct address_space *mapping, loff_t pos, unsigned len, - unsigned flags, struct page **pagep, get_block_t *get_block) + struct page **pagep, get_block_t *get_block) { pgoff_t index = pos >> PAGE_SHIFT; struct page *page; int status; - page = grab_cache_page_write_begin(mapping, index, flags); + page = grab_cache_page_write_begin(mapping, index, 0); if (!page) return -ENOMEM; @@ -2460,7 +2460,7 @@ int cont_write_begin(struct file *file, struct address_space *mapping, (*bytes)++; } - return block_write_begin(mapping, pos, len, flags, pagep, get_block); + return block_write_begin(mapping, pos, len, pagep, get_block); } EXPORT_SYMBOL(cont_write_begin); diff --git a/fs/ext2/inode.c b/fs/ext2/inode.c index 52377a0ee735..97192932ea56 100644 --- a/fs/ext2/inode.c +++ b/fs/ext2/inode.c @@ -892,8 +892,7 @@ ext2_write_begin(struct file *file, struct address_space *mapping, { int ret; - ret = block_write_begin(mapping, pos, len, flags, pagep, - ext2_get_block); + ret = block_write_begin(mapping, pos, len, pagep, ext2_get_block); if (ret < 0) ext2_write_failed(mapping, pos + len); return ret; diff --git a/fs/minix/inode.c b/fs/minix/inode.c index f1a6610e4ee6..5e8d7ba661cf 100644 --- a/fs/minix/inode.c +++ b/fs/minix/inode.c @@ -428,8 +428,7 @@ static int minix_write_begin(struct file *file, struct address_space *mapping, { int ret; - ret = block_write_begin(mapping, pos, len, flags, pagep, - minix_get_block); + ret = block_write_begin(mapping, pos, len, pagep, minix_get_block); if (unlikely(ret)) minix_write_failed(mapping, pos + len); diff --git a/fs/nilfs2/inode.c b/fs/nilfs2/inode.c index 6045cea21f52..be09a0d10f04 100644 --- a/fs/nilfs2/inode.c +++ b/fs/nilfs2/inode.c @@ -258,8 +258,7 @@ static int nilfs_write_begin(struct file *file, struct address_space *mapping, if (unlikely(err)) return err; - err = block_write_begin(mapping, pos, len, flags, pagep, - nilfs_get_block); + err = block_write_begin(mapping, pos, len, pagep, nilfs_get_block); if (unlikely(err)) { nilfs_write_failed(mapping, pos + len); nilfs_transaction_abort(inode->i_sb); diff --git a/fs/nilfs2/recovery.c b/fs/nilfs2/recovery.c index 9e2ed76c0f25..0955b657938f 100644 --- a/fs/nilfs2/recovery.c +++ b/fs/nilfs2/recovery.c @@ -511,7 +511,7 @@ static int nilfs_recover_dsync_blocks(struct the_nilfs *nilfs, pos = rb->blkoff << inode->i_blkbits; err = block_write_begin(inode->i_mapping, pos, blocksize, - 0, &page, nilfs_get_block); + &page, nilfs_get_block); if (unlikely(err)) { loff_t isize = inode->i_size; diff --git a/fs/ntfs3/inode.c b/fs/ntfs3/inode.c index 9eab11e3b034..3914138fd8ba 100644 --- a/fs/ntfs3/inode.c +++ b/fs/ntfs3/inode.c @@ -894,7 +894,7 @@ static int ntfs_write_begin(struct file *file, struct address_space *mapping, goto out; } - err = block_write_begin(mapping, pos, len, flags, pagep, + err = block_write_begin(mapping, pos, len, pagep, ntfs_get_block_write_begin); out: @@ -975,7 +975,7 @@ int reset_log_file(struct inode *inode) len = pos + PAGE_SIZE > log_size ? (log_size - pos) : PAGE_SIZE; - err = block_write_begin(mapping, pos, len, 0, &page, + err = block_write_begin(mapping, pos, len, &page, ntfs_get_block_write_begin); if (err) goto out; diff --git a/fs/omfs/file.c b/fs/omfs/file.c index 3f297b541713..349b96d89c44 100644 --- a/fs/omfs/file.c +++ b/fs/omfs/file.c @@ -321,8 +321,7 @@ static int omfs_write_begin(struct file *file, struct address_space *mapping, { int ret; - ret = block_write_begin(mapping, pos, len, flags, pagep, - omfs_get_block); + ret = block_write_begin(mapping, pos, len, pagep, omfs_get_block); if (unlikely(ret)) omfs_write_failed(mapping, pos + len); diff --git a/fs/sysv/itree.c b/fs/sysv/itree.c index 409ab5e17803..96b7fd4facf3 100644 --- a/fs/sysv/itree.c +++ b/fs/sysv/itree.c @@ -482,7 +482,7 @@ static int sysv_write_begin(struct file *file, struct address_space *mapping, { int ret; - ret = block_write_begin(mapping, pos, len, flags, pagep, get_block); + ret = block_write_begin(mapping, pos, len, pagep, get_block); if (unlikely(ret)) sysv_write_failed(mapping, pos + len); diff --git a/fs/udf/inode.c b/fs/udf/inode.c index ca4fa710e562..88a95886ce8a 100644 --- a/fs/udf/inode.c +++ b/fs/udf/inode.c @@ -209,7 +209,7 @@ static int udf_write_begin(struct file *file, struct address_space *mapping, { int ret; - ret = block_write_begin(mapping, pos, len, flags, pagep, udf_get_block); + ret = block_write_begin(mapping, pos, len, pagep, udf_get_block); if (unlikely(ret)) udf_write_failed(mapping, pos + len); return ret; diff --git a/fs/ufs/inode.c b/fs/ufs/inode.c index d0dda01620f0..bd0e0c66f93d 100644 --- a/fs/ufs/inode.c +++ b/fs/ufs/inode.c @@ -500,8 +500,7 @@ static int ufs_write_begin(struct file *file, struct address_space *mapping, { int ret; - ret = block_write_begin(mapping, pos, len, flags, pagep, - ufs_getfrag_block); + ret = block_write_begin(mapping, pos, len, pagep, ufs_getfrag_block); if (unlikely(ret)) ufs_write_failed(mapping, pos + len); diff --git a/include/linux/buffer_head.h b/include/linux/buffer_head.h index bcb4fe9b8575..63e49dfa7738 100644 --- a/include/linux/buffer_head.h +++ b/include/linux/buffer_head.h @@ -226,7 +226,7 @@ int __block_write_full_page(struct inode *inode, struct page *page, int block_read_full_page(struct page*, get_block_t*); bool block_is_partially_uptodate(struct folio *, size_t from, size_t count); int block_write_begin(struct address_space *mapping, loff_t pos, unsigned len, - unsigned flags, struct page **pagep, get_block_t *get_block); + struct page **pagep, get_block_t *get_block); int __block_write_begin(struct page *page, loff_t pos, unsigned len, get_block_t *get_block); int block_write_end(struct file *, struct address_space *, -- cgit v1.2.3-71-gd317 From be3bbbc588118bdc10e21fdd7bfa6ee6b8c2555d Mon Sep 17 00:00:00 2001 From: "Matthew Wilcox (Oracle)" Date: Tue, 22 Feb 2022 11:25:12 -0500 Subject: fs: Remove aop flags parameter from cont_write_begin() There are no more aop flags left, so remove the parameter. Signed-off-by: Matthew Wilcox (Oracle) Reviewed-by: Christoph Hellwig --- fs/adfs/inode.c | 2 +- fs/affs/file.c | 2 +- fs/buffer.c | 2 +- fs/exfat/inode.c | 2 +- fs/fat/inode.c | 2 +- fs/hfs/inode.c | 2 +- fs/hfsplus/inode.c | 2 +- fs/hpfs/file.c | 2 +- include/linux/buffer_head.h | 2 +- 9 files changed, 9 insertions(+), 9 deletions(-) (limited to 'include') diff --git a/fs/adfs/inode.c b/fs/adfs/inode.c index 561bc748c04a..b6912496bb19 100644 --- a/fs/adfs/inode.c +++ b/fs/adfs/inode.c @@ -58,7 +58,7 @@ static int adfs_write_begin(struct file *file, struct address_space *mapping, int ret; *pagep = NULL; - ret = cont_write_begin(file, mapping, pos, len, flags, pagep, fsdata, + ret = cont_write_begin(file, mapping, pos, len, pagep, fsdata, adfs_get_block, &ADFS_I(mapping->host)->mmu_private); if (unlikely(ret)) diff --git a/fs/affs/file.c b/fs/affs/file.c index b3f81d84ff4c..704911d6aeba 100644 --- a/fs/affs/file.c +++ b/fs/affs/file.c @@ -420,7 +420,7 @@ static int affs_write_begin(struct file *file, struct address_space *mapping, int ret; *pagep = NULL; - ret = cont_write_begin(file, mapping, pos, len, flags, pagep, fsdata, + ret = cont_write_begin(file, mapping, pos, len, pagep, fsdata, affs_get_block, &AFFS_I(mapping->host)->mmu_private); if (unlikely(ret)) diff --git a/fs/buffer.c b/fs/buffer.c index 4ec6eb03c0eb..fb97646d1977 100644 --- a/fs/buffer.c +++ b/fs/buffer.c @@ -2441,7 +2441,7 @@ out: * We may have to extend the file. */ int cont_write_begin(struct file *file, struct address_space *mapping, - loff_t pos, unsigned len, unsigned flags, + loff_t pos, unsigned len, struct page **pagep, void **fsdata, get_block_t *get_block, loff_t *bytes) { diff --git a/fs/exfat/inode.c b/fs/exfat/inode.c index fc0ea1684880..8ed3c4b700cd 100644 --- a/fs/exfat/inode.c +++ b/fs/exfat/inode.c @@ -395,7 +395,7 @@ static int exfat_write_begin(struct file *file, struct address_space *mapping, int ret; *pagep = NULL; - ret = cont_write_begin(file, mapping, pos, len, flags, pagep, fsdata, + ret = cont_write_begin(file, mapping, pos, len, pagep, fsdata, exfat_get_block, &EXFAT_I(mapping->host)->i_size_ondisk); diff --git a/fs/fat/inode.c b/fs/fat/inode.c index bf6051bdf1d1..9b34ccef2501 100644 --- a/fs/fat/inode.c +++ b/fs/fat/inode.c @@ -232,7 +232,7 @@ static int fat_write_begin(struct file *file, struct address_space *mapping, int err; *pagep = NULL; - err = cont_write_begin(file, mapping, pos, len, flags, + err = cont_write_begin(file, mapping, pos, len, pagep, fsdata, fat_get_block, &MSDOS_I(mapping->host)->mmu_private); if (err < 0) diff --git a/fs/hfs/inode.c b/fs/hfs/inode.c index 55f45e9b4930..396735dd3407 100644 --- a/fs/hfs/inode.c +++ b/fs/hfs/inode.c @@ -56,7 +56,7 @@ static int hfs_write_begin(struct file *file, struct address_space *mapping, int ret; *pagep = NULL; - ret = cont_write_begin(file, mapping, pos, len, flags, pagep, fsdata, + ret = cont_write_begin(file, mapping, pos, len, pagep, fsdata, hfs_get_block, &HFS_I(mapping->host)->phys_size); if (unlikely(ret)) diff --git a/fs/hfsplus/inode.c b/fs/hfsplus/inode.c index 446a816aa8e1..435b6202532a 100644 --- a/fs/hfsplus/inode.c +++ b/fs/hfsplus/inode.c @@ -50,7 +50,7 @@ static int hfsplus_write_begin(struct file *file, struct address_space *mapping, int ret; *pagep = NULL; - ret = cont_write_begin(file, mapping, pos, len, flags, pagep, fsdata, + ret = cont_write_begin(file, mapping, pos, len, pagep, fsdata, hfsplus_get_block, &HFSPLUS_I(mapping->host)->phys_size); if (unlikely(ret)) diff --git a/fs/hpfs/file.c b/fs/hpfs/file.c index 99493a23c5d0..8740b4ea0b52 100644 --- a/fs/hpfs/file.c +++ b/fs/hpfs/file.c @@ -200,7 +200,7 @@ static int hpfs_write_begin(struct file *file, struct address_space *mapping, int ret; *pagep = NULL; - ret = cont_write_begin(file, mapping, pos, len, flags, pagep, fsdata, + ret = cont_write_begin(file, mapping, pos, len, pagep, fsdata, hpfs_get_block, &hpfs_i(mapping->host)->mmu_private); if (unlikely(ret)) diff --git a/include/linux/buffer_head.h b/include/linux/buffer_head.h index 63e49dfa7738..127b60fad77e 100644 --- a/include/linux/buffer_head.h +++ b/include/linux/buffer_head.h @@ -238,7 +238,7 @@ int generic_write_end(struct file *, struct address_space *, void page_zero_new_buffers(struct page *page, unsigned from, unsigned to); void clean_page_buffers(struct page *page); int cont_write_begin(struct file *, struct address_space *, loff_t, - unsigned, unsigned, struct page **, void **, + unsigned, struct page **, void **, get_block_t *, loff_t *); int generic_cont_expand_simple(struct inode *inode, loff_t size); int block_commit_write(struct page *page, unsigned from, unsigned to); -- cgit v1.2.3-71-gd317 From b7446e7cf15f0926866c8e5de90ab278998bf8c8 Mon Sep 17 00:00:00 2001 From: "Matthew Wilcox (Oracle)" Date: Tue, 22 Feb 2022 11:25:12 -0500 Subject: fs: Remove aop flags parameter from grab_cache_page_write_begin() There are no more aop flags left, so remove the parameter. Signed-off-by: Matthew Wilcox (Oracle) Reviewed-by: Christoph Hellwig --- fs/affs/file.c | 2 +- fs/buffer.c | 4 ++-- fs/cifs/file.c | 2 +- fs/ecryptfs/mmap.c | 2 +- fs/ext4/inline.c | 8 ++++---- fs/ext4/inode.c | 4 ++-- fs/ext4/move_extent.c | 4 ++-- fs/f2fs/f2fs.h | 2 +- fs/fuse/file.c | 4 ++-- fs/hostfs/hostfs_kern.c | 2 +- fs/jffs2/file.c | 2 +- fs/libfs.c | 2 +- fs/nfs/file.c | 2 +- fs/ntfs3/inode.c | 2 +- fs/orangefs/inode.c | 2 +- fs/reiserfs/inode.c | 2 +- fs/ubifs/file.c | 4 ++-- fs/udf/file.c | 2 +- include/linux/pagemap.h | 2 +- mm/folio-compat.c | 2 +- 20 files changed, 28 insertions(+), 28 deletions(-) (limited to 'include') diff --git a/fs/affs/file.c b/fs/affs/file.c index 704911d6aeba..06645d05c717 100644 --- a/fs/affs/file.c +++ b/fs/affs/file.c @@ -670,7 +670,7 @@ static int affs_write_begin_ofs(struct file *file, struct address_space *mapping } index = pos >> PAGE_SHIFT; - page = grab_cache_page_write_begin(mapping, index, flags); + page = grab_cache_page_write_begin(mapping, index); if (!page) return -ENOMEM; *pagep = page; diff --git a/fs/buffer.c b/fs/buffer.c index fb97646d1977..01630218c75f 100644 --- a/fs/buffer.c +++ b/fs/buffer.c @@ -2110,7 +2110,7 @@ int block_write_begin(struct address_space *mapping, loff_t pos, unsigned len, struct page *page; int status; - page = grab_cache_page_write_begin(mapping, index, 0); + page = grab_cache_page_write_begin(mapping, index); if (!page) return -ENOMEM; @@ -2591,7 +2591,7 @@ int nobh_write_begin(struct address_space *mapping, from = pos & (PAGE_SIZE - 1); to = from + len; - page = grab_cache_page_write_begin(mapping, index, flags); + page = grab_cache_page_write_begin(mapping, index); if (!page) return -ENOMEM; *pagep = page; diff --git a/fs/cifs/file.c b/fs/cifs/file.c index d511a78383c3..91aeae7fced8 100644 --- a/fs/cifs/file.c +++ b/fs/cifs/file.c @@ -4695,7 +4695,7 @@ static int cifs_write_begin(struct file *file, struct address_space *mapping, cifs_dbg(FYI, "write_begin from %lld len %d\n", (long long)pos, len); start: - page = grab_cache_page_write_begin(mapping, index, flags); + page = grab_cache_page_write_begin(mapping, index); if (!page) { rc = -ENOMEM; goto out; diff --git a/fs/ecryptfs/mmap.c b/fs/ecryptfs/mmap.c index 9ad61b582f07..84e399a921ad 100644 --- a/fs/ecryptfs/mmap.c +++ b/fs/ecryptfs/mmap.c @@ -272,7 +272,7 @@ static int ecryptfs_write_begin(struct file *file, loff_t prev_page_end_size; int rc = 0; - page = grab_cache_page_write_begin(mapping, index, flags); + page = grab_cache_page_write_begin(mapping, index); if (!page) return -ENOMEM; *pagep = page; diff --git a/fs/ext4/inline.c b/fs/ext4/inline.c index b2ef5ba568bc..6d253edebf9f 100644 --- a/fs/ext4/inline.c +++ b/fs/ext4/inline.c @@ -563,7 +563,7 @@ retry: /* We cannot recurse into the filesystem as the transaction is already * started */ flags = memalloc_nofs_save(); - page = grab_cache_page_write_begin(mapping, 0, 0); + page = grab_cache_page_write_begin(mapping, 0); memalloc_nofs_restore(flags); if (!page) { ret = -ENOMEM; @@ -692,7 +692,7 @@ int ext4_try_to_write_inline_data(struct address_space *mapping, goto out; flags = memalloc_nofs_save(); - page = grab_cache_page_write_begin(mapping, 0, 0); + page = grab_cache_page_write_begin(mapping, 0); memalloc_nofs_restore(flags); if (!page) { ret = -ENOMEM; @@ -852,7 +852,7 @@ static int ext4_da_convert_inline_data_to_extent(struct address_space *mapping, int ret = 0, inline_size; struct page *page; - page = grab_cache_page_write_begin(mapping, 0, 0); + page = grab_cache_page_write_begin(mapping, 0); if (!page) return -ENOMEM; @@ -946,7 +946,7 @@ retry_journal: * is already started. */ flags = memalloc_nofs_save(); - page = grab_cache_page_write_begin(mapping, 0, 0); + page = grab_cache_page_write_begin(mapping, 0); memalloc_nofs_restore(flags); if (!page) { ret = -ENOMEM; diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c index 01a55647c959..512d8143c765 100644 --- a/fs/ext4/inode.c +++ b/fs/ext4/inode.c @@ -1171,7 +1171,7 @@ static int ext4_write_begin(struct file *file, struct address_space *mapping, * the page (if needed) without using GFP_NOFS. */ retry_grab: - page = grab_cache_page_write_begin(mapping, index, flags); + page = grab_cache_page_write_begin(mapping, index); if (!page) return -ENOMEM; unlock_page(page); @@ -2963,7 +2963,7 @@ static int ext4_da_write_begin(struct file *file, struct address_space *mapping, } retry: - page = grab_cache_page_write_begin(mapping, index, flags); + page = grab_cache_page_write_begin(mapping, index); if (!page) return -ENOMEM; diff --git a/fs/ext4/move_extent.c b/fs/ext4/move_extent.c index 56f21272fb00..4172a7d22471 100644 --- a/fs/ext4/move_extent.c +++ b/fs/ext4/move_extent.c @@ -141,13 +141,13 @@ mext_page_double_lock(struct inode *inode1, struct inode *inode2, } flags = memalloc_nofs_save(); - page[0] = grab_cache_page_write_begin(mapping[0], index1, 0); + page[0] = grab_cache_page_write_begin(mapping[0], index1); if (!page[0]) { memalloc_nofs_restore(flags); return -ENOMEM; } - page[1] = grab_cache_page_write_begin(mapping[1], index2, 0); + page[1] = grab_cache_page_write_begin(mapping[1], index2); memalloc_nofs_restore(flags); if (!page[1]) { unlock_page(page[0]); diff --git a/fs/f2fs/f2fs.h b/fs/f2fs/f2fs.h index 74929ade4b5e..18df53ef3d7e 100644 --- a/fs/f2fs/f2fs.h +++ b/fs/f2fs/f2fs.h @@ -2677,7 +2677,7 @@ static inline struct page *f2fs_grab_cache_page(struct address_space *mapping, return grab_cache_page(mapping, index); flags = memalloc_nofs_save(); - page = grab_cache_page_write_begin(mapping, index, 0); + page = grab_cache_page_write_begin(mapping, index); memalloc_nofs_restore(flags); return page; diff --git a/fs/fuse/file.c b/fs/fuse/file.c index f18d14d5fea1..e35e394264ad 100644 --- a/fs/fuse/file.c +++ b/fs/fuse/file.c @@ -1174,7 +1174,7 @@ static ssize_t fuse_fill_write_pages(struct fuse_io_args *ia, break; err = -ENOMEM; - page = grab_cache_page_write_begin(mapping, index, 0); + page = grab_cache_page_write_begin(mapping, index); if (!page) break; @@ -2284,7 +2284,7 @@ static int fuse_write_begin(struct file *file, struct address_space *mapping, WARN_ON(!fc->writeback_cache); - page = grab_cache_page_write_begin(mapping, index, flags); + page = grab_cache_page_write_begin(mapping, index); if (!page) goto error; diff --git a/fs/hostfs/hostfs_kern.c b/fs/hostfs/hostfs_kern.c index 14f9ac973a2e..2bfd316e1bf1 100644 --- a/fs/hostfs/hostfs_kern.c +++ b/fs/hostfs/hostfs_kern.c @@ -468,7 +468,7 @@ static int hostfs_write_begin(struct file *file, struct address_space *mapping, { pgoff_t index = pos >> PAGE_SHIFT; - *pagep = grab_cache_page_write_begin(mapping, index, flags); + *pagep = grab_cache_page_write_begin(mapping, index); if (!*pagep) return -ENOMEM; return 0; diff --git a/fs/jffs2/file.c b/fs/jffs2/file.c index bd7d58d27bfc..142d3ba9f0a8 100644 --- a/fs/jffs2/file.c +++ b/fs/jffs2/file.c @@ -213,7 +213,7 @@ static int jffs2_write_begin(struct file *filp, struct address_space *mapping, * page in read_cache_page(), which causes a deadlock. */ mutex_lock(&c->alloc_sem); - pg = grab_cache_page_write_begin(mapping, index, flags); + pg = grab_cache_page_write_begin(mapping, index); if (!pg) { ret = -ENOMEM; goto release_sem; diff --git a/fs/libfs.c b/fs/libfs.c index e64bdedef168..d4395e1c6696 100644 --- a/fs/libfs.c +++ b/fs/libfs.c @@ -557,7 +557,7 @@ int simple_write_begin(struct file *file, struct address_space *mapping, index = pos >> PAGE_SHIFT; - page = grab_cache_page_write_begin(mapping, index, flags); + page = grab_cache_page_write_begin(mapping, index); if (!page) return -ENOMEM; diff --git a/fs/nfs/file.c b/fs/nfs/file.c index 150b7fa8f0a7..d66088dd33e7 100644 --- a/fs/nfs/file.c +++ b/fs/nfs/file.c @@ -325,7 +325,7 @@ static int nfs_write_begin(struct file *file, struct address_space *mapping, file, mapping->host->i_ino, len, (long long) pos); start: - page = grab_cache_page_write_begin(mapping, index, flags); + page = grab_cache_page_write_begin(mapping, index); if (!page) return -ENOMEM; *pagep = page; diff --git a/fs/ntfs3/inode.c b/fs/ntfs3/inode.c index 3914138fd8ba..16466c8648f3 100644 --- a/fs/ntfs3/inode.c +++ b/fs/ntfs3/inode.c @@ -872,7 +872,7 @@ static int ntfs_write_begin(struct file *file, struct address_space *mapping, *pagep = NULL; if (is_resident(ni)) { struct page *page = grab_cache_page_write_begin( - mapping, pos >> PAGE_SHIFT, flags); + mapping, pos >> PAGE_SHIFT); if (!page) { err = -ENOMEM; diff --git a/fs/orangefs/inode.c b/fs/orangefs/inode.c index 79c1025d18ea..809690db8be2 100644 --- a/fs/orangefs/inode.c +++ b/fs/orangefs/inode.c @@ -338,7 +338,7 @@ static int orangefs_write_begin(struct file *file, index = pos >> PAGE_SHIFT; - page = grab_cache_page_write_begin(mapping, index, flags); + page = grab_cache_page_write_begin(mapping, index); if (!page) return -ENOMEM; diff --git a/fs/reiserfs/inode.c b/fs/reiserfs/inode.c index 36c59b25486c..aa31cf1dbba6 100644 --- a/fs/reiserfs/inode.c +++ b/fs/reiserfs/inode.c @@ -2764,7 +2764,7 @@ static int reiserfs_write_begin(struct file *file, inode = mapping->host; index = pos >> PAGE_SHIFT; - page = grab_cache_page_write_begin(mapping, index, flags); + page = grab_cache_page_write_begin(mapping, index); if (!page) return -ENOMEM; *pagep = page; diff --git a/fs/ubifs/file.c b/fs/ubifs/file.c index 0383fbdc95ff..0911fc311434 100644 --- a/fs/ubifs/file.c +++ b/fs/ubifs/file.c @@ -244,7 +244,7 @@ static int write_begin_slow(struct address_space *mapping, if (unlikely(err)) return err; - page = grab_cache_page_write_begin(mapping, index, flags); + page = grab_cache_page_write_begin(mapping, index); if (unlikely(!page)) { ubifs_release_budget(c, &req); return -ENOMEM; @@ -437,7 +437,7 @@ static int ubifs_write_begin(struct file *file, struct address_space *mapping, return -EROFS; /* Try out the fast-path part first */ - page = grab_cache_page_write_begin(mapping, index, flags); + page = grab_cache_page_write_begin(mapping, index); if (unlikely(!page)) return -ENOMEM; diff --git a/fs/udf/file.c b/fs/udf/file.c index 0f6bf2504437..724bb3141fda 100644 --- a/fs/udf/file.c +++ b/fs/udf/file.c @@ -94,7 +94,7 @@ static int udf_adinicb_write_begin(struct file *file, if (WARN_ON_ONCE(pos >= PAGE_SIZE)) return -EIO; - page = grab_cache_page_write_begin(mapping, 0, flags); + page = grab_cache_page_write_begin(mapping, 0); if (!page) return -ENOMEM; *pagep = page; diff --git a/include/linux/pagemap.h b/include/linux/pagemap.h index 993994cd943a..65ae8f96554b 100644 --- a/include/linux/pagemap.h +++ b/include/linux/pagemap.h @@ -735,7 +735,7 @@ static inline unsigned find_get_pages_tag(struct address_space *mapping, } struct page *grab_cache_page_write_begin(struct address_space *mapping, - pgoff_t index, unsigned flags); + pgoff_t index); /* * Returns locked page at given index in given cache, creating it if needed. diff --git a/mm/folio-compat.c b/mm/folio-compat.c index 3e42ddb81918..20bc15b57d93 100644 --- a/mm/folio-compat.c +++ b/mm/folio-compat.c @@ -131,7 +131,7 @@ struct page *pagecache_get_page(struct address_space *mapping, pgoff_t index, EXPORT_SYMBOL(pagecache_get_page); struct page *grab_cache_page_write_begin(struct address_space *mapping, - pgoff_t index, unsigned flags) + pgoff_t index) { unsigned fgp_flags = FGP_LOCK | FGP_WRITE | FGP_CREAT | FGP_STABLE; -- cgit v1.2.3-71-gd317 From 8371f30cf774a20fd627a0f7b1ecf00e8257f3bc Mon Sep 17 00:00:00 2001 From: "Matthew Wilcox (Oracle)" Date: Tue, 22 Feb 2022 11:54:56 -0500 Subject: fs: Remove aop flags parameter from nobh_write_begin() There are no more aop flags left, so remove the parameter. Signed-off-by: Matthew Wilcox (Oracle) Reviewed-by: Christoph Hellwig --- fs/buffer.c | 3 +-- fs/ext2/inode.c | 2 +- fs/jfs/inode.c | 3 +-- include/linux/buffer_head.h | 2 +- 4 files changed, 4 insertions(+), 6 deletions(-) (limited to 'include') diff --git a/fs/buffer.c b/fs/buffer.c index 01630218c75f..02b50e3e4fbb 100644 --- a/fs/buffer.c +++ b/fs/buffer.c @@ -2568,8 +2568,7 @@ static void attach_nobh_buffers(struct page *page, struct buffer_head *head) * On exit the page is fully uptodate in the areas outside (from,to) * The filesystem needs to handle block truncation upon failure. */ -int nobh_write_begin(struct address_space *mapping, - loff_t pos, unsigned len, unsigned flags, +int nobh_write_begin(struct address_space *mapping, loff_t pos, unsigned len, struct page **pagep, void **fsdata, get_block_t *get_block) { diff --git a/fs/ext2/inode.c b/fs/ext2/inode.c index 97192932ea56..bfa69c52ce2c 100644 --- a/fs/ext2/inode.c +++ b/fs/ext2/inode.c @@ -917,7 +917,7 @@ ext2_nobh_write_begin(struct file *file, struct address_space *mapping, { int ret; - ret = nobh_write_begin(mapping, pos, len, flags, pagep, fsdata, + ret = nobh_write_begin(mapping, pos, len, pagep, fsdata, ext2_get_block); if (ret < 0) ext2_write_failed(mapping, pos + len); diff --git a/fs/jfs/inode.c b/fs/jfs/inode.c index d1943a7b4b04..e16f77b4e84c 100644 --- a/fs/jfs/inode.c +++ b/fs/jfs/inode.c @@ -319,8 +319,7 @@ static int jfs_write_begin(struct file *file, struct address_space *mapping, { int ret; - ret = nobh_write_begin(mapping, pos, len, flags, pagep, fsdata, - jfs_get_block); + ret = nobh_write_begin(mapping, pos, len, pagep, fsdata, jfs_get_block); if (unlikely(ret)) jfs_write_failed(mapping, pos + len); diff --git a/include/linux/buffer_head.h b/include/linux/buffer_head.h index 127b60fad77e..6e5a64005fef 100644 --- a/include/linux/buffer_head.h +++ b/include/linux/buffer_head.h @@ -258,7 +258,7 @@ static inline vm_fault_t block_page_mkwrite_return(int err) } sector_t generic_block_bmap(struct address_space *, sector_t, get_block_t *); int block_truncate_page(struct address_space *, loff_t, get_block_t *); -int nobh_write_begin(struct address_space *, loff_t, unsigned, unsigned, +int nobh_write_begin(struct address_space *, loff_t, unsigned len, struct page **, void **, get_block_t*); int nobh_write_end(struct file *, struct address_space *, loff_t, unsigned, unsigned, -- cgit v1.2.3-71-gd317 From 9d6b0cd7579844761ed68926eb3073bab1dca87b Mon Sep 17 00:00:00 2001 From: "Matthew Wilcox (Oracle)" Date: Tue, 22 Feb 2022 14:31:43 -0500 Subject: fs: Remove flags parameter from aops->write_begin There are no more aop flags left, so remove the parameter. Signed-off-by: Matthew Wilcox (Oracle) Reviewed-by: Christoph Hellwig --- Documentation/filesystems/locking.rst | 2 +- Documentation/filesystems/vfs.rst | 5 +---- block/fops.c | 3 +-- fs/9p/vfs_addr.c | 2 +- fs/adfs/inode.c | 2 +- fs/affs/file.c | 6 +++--- fs/afs/internal.h | 2 +- fs/afs/write.c | 2 +- fs/bfs/file.c | 2 +- fs/ceph/addr.c | 2 +- fs/cifs/file.c | 2 +- fs/ecryptfs/mmap.c | 2 +- fs/exfat/inode.c | 2 +- fs/ext2/inode.c | 6 ++---- fs/ext4/inode.c | 10 +++++----- fs/f2fs/data.c | 5 ++--- fs/f2fs/super.c | 2 +- fs/fat/inode.c | 2 +- fs/fuse/file.c | 3 +-- fs/hfs/inode.c | 2 +- fs/hfsplus/inode.c | 2 +- fs/hostfs/hostfs_kern.c | 2 +- fs/hpfs/file.c | 2 +- fs/hugetlbfs/inode.c | 2 +- fs/jffs2/file.c | 4 ++-- fs/jfs/inode.c | 2 +- fs/libfs.c | 2 +- fs/minix/inode.c | 2 +- fs/nfs/file.c | 2 +- fs/nilfs2/inode.c | 2 +- fs/ntfs3/inode.c | 2 +- fs/ocfs2/aops.c | 2 +- fs/omfs/file.c | 2 +- fs/orangefs/inode.c | 5 ++--- fs/reiserfs/inode.c | 2 +- fs/sysv/itree.c | 2 +- fs/ubifs/file.c | 7 +++---- fs/udf/file.c | 2 +- fs/udf/inode.c | 2 +- fs/ufs/inode.c | 2 +- include/linux/fs.h | 4 ++-- include/trace/events/ext4.h | 21 ++++++++------------- include/trace/events/f2fs.h | 12 ++++-------- mm/filemap.c | 6 ++---- mm/shmem.c | 2 +- 45 files changed, 69 insertions(+), 90 deletions(-) (limited to 'include') diff --git a/Documentation/filesystems/locking.rst b/Documentation/filesystems/locking.rst index c26d854275a0..fd9d9caf09ab 100644 --- a/Documentation/filesystems/locking.rst +++ b/Documentation/filesystems/locking.rst @@ -242,7 +242,7 @@ prototypes:: bool (*dirty_folio)(struct address_space *, struct folio *folio); void (*readahead)(struct readahead_control *); int (*write_begin)(struct file *, struct address_space *mapping, - loff_t pos, unsigned len, unsigned flags, + loff_t pos, unsigned len, struct page **pagep, void **fsdata); int (*write_end)(struct file *, struct address_space *mapping, loff_t pos, unsigned len, unsigned copied, diff --git a/Documentation/filesystems/vfs.rst b/Documentation/filesystems/vfs.rst index 794bd1a66bfb..30f303180a7d 100644 --- a/Documentation/filesystems/vfs.rst +++ b/Documentation/filesystems/vfs.rst @@ -727,7 +727,7 @@ cache in your filesystem. The following members are defined: bool (*dirty_folio)(struct address_space *, struct folio *); void (*readahead)(struct readahead_control *); int (*write_begin)(struct file *, struct address_space *mapping, - loff_t pos, unsigned len, unsigned flags, + loff_t pos, unsigned len, struct page **pagep, void **fsdata); int (*write_end)(struct file *, struct address_space *mapping, loff_t pos, unsigned len, unsigned copied, @@ -832,9 +832,6 @@ cache in your filesystem. The following members are defined: passed to write_begin is greater than the number of bytes copied into the page). - flags is a field for AOP_FLAG_xxx flags, described in - include/linux/fs.h. - A void * may be returned in fsdata, which then gets passed into write_end. diff --git a/block/fops.c b/block/fops.c index b432756570c6..712affe56e29 100644 --- a/block/fops.c +++ b/block/fops.c @@ -398,8 +398,7 @@ static void blkdev_readahead(struct readahead_control *rac) } static int blkdev_write_begin(struct file *file, struct address_space *mapping, - loff_t pos, unsigned len, unsigned flags, struct page **pagep, - void **fsdata) + loff_t pos, unsigned len, struct page **pagep, void **fsdata) { return block_write_begin(mapping, pos, len, pagep, blkdev_get_block); } diff --git a/fs/9p/vfs_addr.c b/fs/9p/vfs_addr.c index d311e68e21fd..a2d57112f53e 100644 --- a/fs/9p/vfs_addr.c +++ b/fs/9p/vfs_addr.c @@ -260,7 +260,7 @@ v9fs_direct_IO(struct kiocb *iocb, struct iov_iter *iter) } static int v9fs_write_begin(struct file *filp, struct address_space *mapping, - loff_t pos, unsigned int len, unsigned int flags, + loff_t pos, unsigned int len, struct page **subpagep, void **fsdata) { int retval; diff --git a/fs/adfs/inode.c b/fs/adfs/inode.c index b6912496bb19..f7959b1a2d52 100644 --- a/fs/adfs/inode.c +++ b/fs/adfs/inode.c @@ -52,7 +52,7 @@ static void adfs_write_failed(struct address_space *mapping, loff_t to) } static int adfs_write_begin(struct file *file, struct address_space *mapping, - loff_t pos, unsigned len, unsigned flags, + loff_t pos, unsigned len, struct page **pagep, void **fsdata) { int ret; diff --git a/fs/affs/file.c b/fs/affs/file.c index 06645d05c717..b952f65c3f06 100644 --- a/fs/affs/file.c +++ b/fs/affs/file.c @@ -414,7 +414,7 @@ affs_direct_IO(struct kiocb *iocb, struct iov_iter *iter) } static int affs_write_begin(struct file *file, struct address_space *mapping, - loff_t pos, unsigned len, unsigned flags, + loff_t pos, unsigned len, struct page **pagep, void **fsdata) { int ret; @@ -650,7 +650,7 @@ affs_readpage_ofs(struct file *file, struct page *page) } static int affs_write_begin_ofs(struct file *file, struct address_space *mapping, - loff_t pos, unsigned len, unsigned flags, + loff_t pos, unsigned len, struct page **pagep, void **fsdata) { struct inode *inode = mapping->host; @@ -887,7 +887,7 @@ affs_truncate(struct inode *inode) loff_t isize = inode->i_size; int res; - res = mapping->a_ops->write_begin(NULL, mapping, isize, 0, 0, &page, &fsdata); + res = mapping->a_ops->write_begin(NULL, mapping, isize, 0, &page, &fsdata); if (!res) res = mapping->a_ops->write_end(NULL, mapping, isize, 0, 0, page, fsdata); else diff --git a/fs/afs/internal.h b/fs/afs/internal.h index 7b7ef945dc78..7a72e9c60423 100644 --- a/fs/afs/internal.h +++ b/fs/afs/internal.h @@ -1535,7 +1535,7 @@ bool afs_dirty_folio(struct address_space *, struct folio *); #define afs_dirty_folio filemap_dirty_folio #endif extern int afs_write_begin(struct file *file, struct address_space *mapping, - loff_t pos, unsigned len, unsigned flags, + loff_t pos, unsigned len, struct page **pagep, void **fsdata); extern int afs_write_end(struct file *file, struct address_space *mapping, loff_t pos, unsigned len, unsigned copied, diff --git a/fs/afs/write.c b/fs/afs/write.c index af496c98d394..5224e346fbad 100644 --- a/fs/afs/write.c +++ b/fs/afs/write.c @@ -42,7 +42,7 @@ static void afs_folio_start_fscache(bool caching, struct folio *folio) * prepare to perform part of a write to a page */ int afs_write_begin(struct file *file, struct address_space *mapping, - loff_t pos, unsigned len, unsigned flags, + loff_t pos, unsigned len, struct page **_page, void **fsdata) { struct afs_vnode *vnode = AFS_FS_I(file_inode(file)); diff --git a/fs/bfs/file.c b/fs/bfs/file.c index 9408f45225cb..dc97c9b8f23b 100644 --- a/fs/bfs/file.c +++ b/fs/bfs/file.c @@ -169,7 +169,7 @@ static void bfs_write_failed(struct address_space *mapping, loff_t to) } static int bfs_write_begin(struct file *file, struct address_space *mapping, - loff_t pos, unsigned len, unsigned flags, + loff_t pos, unsigned len, struct page **pagep, void **fsdata) { int ret; diff --git a/fs/ceph/addr.c b/fs/ceph/addr.c index 415f0886bc25..e65541a51b68 100644 --- a/fs/ceph/addr.c +++ b/fs/ceph/addr.c @@ -1311,7 +1311,7 @@ static int ceph_netfs_check_write_begin(struct file *file, loff_t pos, unsigned * clean, or already dirty within the same snap context. */ static int ceph_write_begin(struct file *file, struct address_space *mapping, - loff_t pos, unsigned len, unsigned aop_flags, + loff_t pos, unsigned len, struct page **pagep, void **fsdata) { struct inode *inode = file_inode(file); diff --git a/fs/cifs/file.c b/fs/cifs/file.c index 91aeae7fced8..da362b5a0c96 100644 --- a/fs/cifs/file.c +++ b/fs/cifs/file.c @@ -4681,7 +4681,7 @@ bool is_size_safe_to_change(struct cifsInodeInfo *cifsInode, __u64 end_of_file) } static int cifs_write_begin(struct file *file, struct address_space *mapping, - loff_t pos, unsigned len, unsigned flags, + loff_t pos, unsigned len, struct page **pagep, void **fsdata) { int oncethru = 0; diff --git a/fs/ecryptfs/mmap.c b/fs/ecryptfs/mmap.c index 84e399a921ad..47904d40ef88 100644 --- a/fs/ecryptfs/mmap.c +++ b/fs/ecryptfs/mmap.c @@ -264,7 +264,7 @@ out: */ static int ecryptfs_write_begin(struct file *file, struct address_space *mapping, - loff_t pos, unsigned len, unsigned flags, + loff_t pos, unsigned len, struct page **pagep, void **fsdata) { pgoff_t index = pos >> PAGE_SHIFT; diff --git a/fs/exfat/inode.c b/fs/exfat/inode.c index 8ed3c4b700cd..b9f63113db2d 100644 --- a/fs/exfat/inode.c +++ b/fs/exfat/inode.c @@ -389,7 +389,7 @@ static void exfat_write_failed(struct address_space *mapping, loff_t to) } static int exfat_write_begin(struct file *file, struct address_space *mapping, - loff_t pos, unsigned int len, unsigned int flags, + loff_t pos, unsigned int len, struct page **pagep, void **fsdata) { int ret; diff --git a/fs/ext2/inode.c b/fs/ext2/inode.c index bfa69c52ce2c..d8ca8050945a 100644 --- a/fs/ext2/inode.c +++ b/fs/ext2/inode.c @@ -887,8 +887,7 @@ static void ext2_readahead(struct readahead_control *rac) static int ext2_write_begin(struct file *file, struct address_space *mapping, - loff_t pos, unsigned len, unsigned flags, - struct page **pagep, void **fsdata) + loff_t pos, unsigned len, struct page **pagep, void **fsdata) { int ret; @@ -912,8 +911,7 @@ static int ext2_write_end(struct file *file, struct address_space *mapping, static int ext2_nobh_write_begin(struct file *file, struct address_space *mapping, - loff_t pos, unsigned len, unsigned flags, - struct page **pagep, void **fsdata) + loff_t pos, unsigned len, struct page **pagep, void **fsdata) { int ret; diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c index 512d8143c765..d3a7e8581291 100644 --- a/fs/ext4/inode.c +++ b/fs/ext4/inode.c @@ -1130,7 +1130,7 @@ static int ext4_block_write_begin(struct page *page, loff_t pos, unsigned len, #endif static int ext4_write_begin(struct file *file, struct address_space *mapping, - loff_t pos, unsigned len, unsigned flags, + loff_t pos, unsigned len, struct page **pagep, void **fsdata) { struct inode *inode = mapping->host; @@ -1144,7 +1144,7 @@ static int ext4_write_begin(struct file *file, struct address_space *mapping, if (unlikely(ext4_forced_shutdown(EXT4_SB(inode->i_sb)))) return -EIO; - trace_ext4_write_begin(inode, pos, len, flags); + trace_ext4_write_begin(inode, pos, len); /* * Reserve one block more for addition to orphan list in case * we allocate blocks but write fails for some reason @@ -2931,7 +2931,7 @@ static int ext4_nonda_switch(struct super_block *sb) } static int ext4_da_write_begin(struct file *file, struct address_space *mapping, - loff_t pos, unsigned len, unsigned flags, + loff_t pos, unsigned len, struct page **pagep, void **fsdata) { int ret, retries = 0; @@ -2948,10 +2948,10 @@ static int ext4_da_write_begin(struct file *file, struct address_space *mapping, ext4_verity_in_progress(inode)) { *fsdata = (void *)FALL_BACK_TO_NONDELALLOC; return ext4_write_begin(file, mapping, pos, - len, flags, pagep, fsdata); + len, pagep, fsdata); } *fsdata = (void *)0; - trace_ext4_da_write_begin(inode, pos, len, flags); + trace_ext4_da_write_begin(inode, pos, len); if (ext4_test_inode_state(inode, EXT4_STATE_MAY_INLINE_DATA)) { ret = ext4_da_write_inline_data_begin(mapping, inode, pos, len, diff --git a/fs/f2fs/data.c b/fs/f2fs/data.c index 9a1a526f2092..b3cf49136b9f 100644 --- a/fs/f2fs/data.c +++ b/fs/f2fs/data.c @@ -3314,8 +3314,7 @@ unlock_out: } static int f2fs_write_begin(struct file *file, struct address_space *mapping, - loff_t pos, unsigned len, unsigned flags, - struct page **pagep, void **fsdata) + loff_t pos, unsigned len, struct page **pagep, void **fsdata) { struct inode *inode = mapping->host; struct f2fs_sb_info *sbi = F2FS_I_SB(inode); @@ -3325,7 +3324,7 @@ static int f2fs_write_begin(struct file *file, struct address_space *mapping, block_t blkaddr = NULL_ADDR; int err = 0; - trace_f2fs_write_begin(inode, pos, len, flags); + trace_f2fs_write_begin(inode, pos, len); if (!f2fs_is_checkpoint_ready(sbi)) { err = -ENOSPC; diff --git a/fs/f2fs/super.c b/fs/f2fs/super.c index 4368f90571bd..ed3e8b7a8260 100644 --- a/fs/f2fs/super.c +++ b/fs/f2fs/super.c @@ -2483,7 +2483,7 @@ static ssize_t f2fs_quota_write(struct super_block *sb, int type, tocopy = min_t(unsigned long, sb->s_blocksize - offset, towrite); retry: - err = a_ops->write_begin(NULL, mapping, off, tocopy, 0, + err = a_ops->write_begin(NULL, mapping, off, tocopy, &page, &fsdata); if (unlikely(err)) { if (err == -ENOMEM) { diff --git a/fs/fat/inode.c b/fs/fat/inode.c index 9b34ccef2501..1f15b0fd1bb0 100644 --- a/fs/fat/inode.c +++ b/fs/fat/inode.c @@ -226,7 +226,7 @@ static void fat_write_failed(struct address_space *mapping, loff_t to) } static int fat_write_begin(struct file *file, struct address_space *mapping, - loff_t pos, unsigned len, unsigned flags, + loff_t pos, unsigned len, struct page **pagep, void **fsdata) { int err; diff --git a/fs/fuse/file.c b/fs/fuse/file.c index e35e394264ad..bca8c2135ec5 100644 --- a/fs/fuse/file.c +++ b/fs/fuse/file.c @@ -2273,8 +2273,7 @@ out: * but how to implement it without killing performance need more thinking. */ static int fuse_write_begin(struct file *file, struct address_space *mapping, - loff_t pos, unsigned len, unsigned flags, - struct page **pagep, void **fsdata) + loff_t pos, unsigned len, struct page **pagep, void **fsdata) { pgoff_t index = pos >> PAGE_SHIFT; struct fuse_conn *fc = get_fuse_conn(file_inode(file)); diff --git a/fs/hfs/inode.c b/fs/hfs/inode.c index 396735dd3407..93d9aa832139 100644 --- a/fs/hfs/inode.c +++ b/fs/hfs/inode.c @@ -50,7 +50,7 @@ static void hfs_write_failed(struct address_space *mapping, loff_t to) } static int hfs_write_begin(struct file *file, struct address_space *mapping, - loff_t pos, unsigned len, unsigned flags, + loff_t pos, unsigned len, struct page **pagep, void **fsdata) { int ret; diff --git a/fs/hfsplus/inode.c b/fs/hfsplus/inode.c index 435b6202532a..73010aa4623f 100644 --- a/fs/hfsplus/inode.c +++ b/fs/hfsplus/inode.c @@ -44,7 +44,7 @@ static void hfsplus_write_failed(struct address_space *mapping, loff_t to) } static int hfsplus_write_begin(struct file *file, struct address_space *mapping, - loff_t pos, unsigned len, unsigned flags, + loff_t pos, unsigned len, struct page **pagep, void **fsdata) { int ret; diff --git a/fs/hostfs/hostfs_kern.c b/fs/hostfs/hostfs_kern.c index 2bfd316e1bf1..e658d8edde35 100644 --- a/fs/hostfs/hostfs_kern.c +++ b/fs/hostfs/hostfs_kern.c @@ -463,7 +463,7 @@ static int hostfs_readpage(struct file *file, struct page *page) } static int hostfs_write_begin(struct file *file, struct address_space *mapping, - loff_t pos, unsigned len, unsigned flags, + loff_t pos, unsigned len, struct page **pagep, void **fsdata) { pgoff_t index = pos >> PAGE_SHIFT; diff --git a/fs/hpfs/file.c b/fs/hpfs/file.c index 8740b4ea0b52..8b590b3826c3 100644 --- a/fs/hpfs/file.c +++ b/fs/hpfs/file.c @@ -194,7 +194,7 @@ static void hpfs_write_failed(struct address_space *mapping, loff_t to) } static int hpfs_write_begin(struct file *file, struct address_space *mapping, - loff_t pos, unsigned len, unsigned flags, + loff_t pos, unsigned len, struct page **pagep, void **fsdata) { int ret; diff --git a/fs/hugetlbfs/inode.c b/fs/hugetlbfs/inode.c index dd3a088db11d..2de9ca5d260d 100644 --- a/fs/hugetlbfs/inode.c +++ b/fs/hugetlbfs/inode.c @@ -383,7 +383,7 @@ static ssize_t hugetlbfs_read_iter(struct kiocb *iocb, struct iov_iter *to) static int hugetlbfs_write_begin(struct file *file, struct address_space *mapping, - loff_t pos, unsigned len, unsigned flags, + loff_t pos, unsigned len, struct page **pagep, void **fsdata) { return -EINVAL; diff --git a/fs/jffs2/file.c b/fs/jffs2/file.c index 142d3ba9f0a8..2b35811772de 100644 --- a/fs/jffs2/file.c +++ b/fs/jffs2/file.c @@ -25,7 +25,7 @@ static int jffs2_write_end(struct file *filp, struct address_space *mapping, loff_t pos, unsigned len, unsigned copied, struct page *pg, void *fsdata); static int jffs2_write_begin(struct file *filp, struct address_space *mapping, - loff_t pos, unsigned len, unsigned flags, + loff_t pos, unsigned len, struct page **pagep, void **fsdata); static int jffs2_readpage (struct file *filp, struct page *pg); @@ -130,7 +130,7 @@ static int jffs2_readpage (struct file *filp, struct page *pg) } static int jffs2_write_begin(struct file *filp, struct address_space *mapping, - loff_t pos, unsigned len, unsigned flags, + loff_t pos, unsigned len, struct page **pagep, void **fsdata) { struct page *pg; diff --git a/fs/jfs/inode.c b/fs/jfs/inode.c index e16f77b4e84c..aa9f112107b2 100644 --- a/fs/jfs/inode.c +++ b/fs/jfs/inode.c @@ -314,7 +314,7 @@ static void jfs_write_failed(struct address_space *mapping, loff_t to) } static int jfs_write_begin(struct file *file, struct address_space *mapping, - loff_t pos, unsigned len, unsigned flags, + loff_t pos, unsigned len, struct page **pagep, void **fsdata) { int ret; diff --git a/fs/libfs.c b/fs/libfs.c index d4395e1c6696..a1c10d3163e0 100644 --- a/fs/libfs.c +++ b/fs/libfs.c @@ -549,7 +549,7 @@ static int simple_readpage(struct file *file, struct page *page) } int simple_write_begin(struct file *file, struct address_space *mapping, - loff_t pos, unsigned len, unsigned flags, + loff_t pos, unsigned len, struct page **pagep, void **fsdata) { struct page *page; diff --git a/fs/minix/inode.c b/fs/minix/inode.c index 5e8d7ba661cf..3add78bccedc 100644 --- a/fs/minix/inode.c +++ b/fs/minix/inode.c @@ -423,7 +423,7 @@ static void minix_write_failed(struct address_space *mapping, loff_t to) } static int minix_write_begin(struct file *file, struct address_space *mapping, - loff_t pos, unsigned len, unsigned flags, + loff_t pos, unsigned len, struct page **pagep, void **fsdata) { int ret; diff --git a/fs/nfs/file.c b/fs/nfs/file.c index d66088dd33e7..314d2d7ba84a 100644 --- a/fs/nfs/file.c +++ b/fs/nfs/file.c @@ -313,7 +313,7 @@ static bool nfs_want_read_modify_write(struct file *file, struct page *page, * increment the page use counts until he is done with the page. */ static int nfs_write_begin(struct file *file, struct address_space *mapping, - loff_t pos, unsigned len, unsigned flags, + loff_t pos, unsigned len, struct page **pagep, void **fsdata) { int ret; diff --git a/fs/nilfs2/inode.c b/fs/nilfs2/inode.c index be09a0d10f04..02297ec8dc55 100644 --- a/fs/nilfs2/inode.c +++ b/fs/nilfs2/inode.c @@ -248,7 +248,7 @@ void nilfs_write_failed(struct address_space *mapping, loff_t to) } static int nilfs_write_begin(struct file *file, struct address_space *mapping, - loff_t pos, unsigned len, unsigned flags, + loff_t pos, unsigned len, struct page **pagep, void **fsdata) { diff --git a/fs/ntfs3/inode.c b/fs/ntfs3/inode.c index 16466c8648f3..1364174cc6c9 100644 --- a/fs/ntfs3/inode.c +++ b/fs/ntfs3/inode.c @@ -862,7 +862,7 @@ static int ntfs_get_block_write_begin(struct inode *inode, sector_t vbn, } static int ntfs_write_begin(struct file *file, struct address_space *mapping, - loff_t pos, u32 len, u32 flags, struct page **pagep, + loff_t pos, u32 len, struct page **pagep, void **fsdata) { int err; diff --git a/fs/ocfs2/aops.c b/fs/ocfs2/aops.c index 4b9af65cb61b..7cffe9dcad17 100644 --- a/fs/ocfs2/aops.c +++ b/fs/ocfs2/aops.c @@ -1881,7 +1881,7 @@ out: } static int ocfs2_write_begin(struct file *file, struct address_space *mapping, - loff_t pos, unsigned len, unsigned flags, + loff_t pos, unsigned len, struct page **pagep, void **fsdata) { int ret; diff --git a/fs/omfs/file.c b/fs/omfs/file.c index 349b96d89c44..980b0a72c172 100644 --- a/fs/omfs/file.c +++ b/fs/omfs/file.c @@ -316,7 +316,7 @@ static void omfs_write_failed(struct address_space *mapping, loff_t to) } static int omfs_write_begin(struct file *file, struct address_space *mapping, - loff_t pos, unsigned len, unsigned flags, + loff_t pos, unsigned len, struct page **pagep, void **fsdata) { int ret; diff --git a/fs/orangefs/inode.c b/fs/orangefs/inode.c index 809690db8be2..bc7ccd15d7a3 100644 --- a/fs/orangefs/inode.c +++ b/fs/orangefs/inode.c @@ -326,9 +326,8 @@ static int orangefs_readpage(struct file *file, struct page *page) } static int orangefs_write_begin(struct file *file, - struct address_space *mapping, - loff_t pos, unsigned len, unsigned flags, struct page **pagep, - void **fsdata) + struct address_space *mapping, loff_t pos, unsigned len, + struct page **pagep, void **fsdata) { struct orangefs_write_range *wr; struct folio *folio; diff --git a/fs/reiserfs/inode.c b/fs/reiserfs/inode.c index aa31cf1dbba6..46ba4892030a 100644 --- a/fs/reiserfs/inode.c +++ b/fs/reiserfs/inode.c @@ -2753,7 +2753,7 @@ static void reiserfs_truncate_failed_write(struct inode *inode) static int reiserfs_write_begin(struct file *file, struct address_space *mapping, - loff_t pos, unsigned len, unsigned flags, + loff_t pos, unsigned len, struct page **pagep, void **fsdata) { struct inode *inode; diff --git a/fs/sysv/itree.c b/fs/sysv/itree.c index 96b7fd4facf3..96ad24fe0ffb 100644 --- a/fs/sysv/itree.c +++ b/fs/sysv/itree.c @@ -477,7 +477,7 @@ static void sysv_write_failed(struct address_space *mapping, loff_t to) } static int sysv_write_begin(struct file *file, struct address_space *mapping, - loff_t pos, unsigned len, unsigned flags, + loff_t pos, unsigned len, struct page **pagep, void **fsdata) { int ret; diff --git a/fs/ubifs/file.c b/fs/ubifs/file.c index 0911fc311434..81c085c4decf 100644 --- a/fs/ubifs/file.c +++ b/fs/ubifs/file.c @@ -215,8 +215,7 @@ static void release_existing_page_budget(struct ubifs_info *c) } static int write_begin_slow(struct address_space *mapping, - loff_t pos, unsigned len, struct page **pagep, - unsigned flags) + loff_t pos, unsigned len, struct page **pagep) { struct inode *inode = mapping->host; struct ubifs_info *c = inode->i_sb->s_fs_info; @@ -419,7 +418,7 @@ static int allocate_budget(struct ubifs_info *c, struct page *page, * without forcing write-back. The slow path does not make this assumption. */ static int ubifs_write_begin(struct file *file, struct address_space *mapping, - loff_t pos, unsigned len, unsigned flags, + loff_t pos, unsigned len, struct page **pagep, void **fsdata) { struct inode *inode = mapping->host; @@ -493,7 +492,7 @@ static int ubifs_write_begin(struct file *file, struct address_space *mapping, unlock_page(page); put_page(page); - return write_begin_slow(mapping, pos, len, pagep, flags); + return write_begin_slow(mapping, pos, len, pagep); } /* diff --git a/fs/udf/file.c b/fs/udf/file.c index 724bb3141fda..3f4d5c44c784 100644 --- a/fs/udf/file.c +++ b/fs/udf/file.c @@ -87,7 +87,7 @@ static int udf_adinicb_writepage(struct page *page, static int udf_adinicb_write_begin(struct file *file, struct address_space *mapping, loff_t pos, - unsigned len, unsigned flags, struct page **pagep, + unsigned len, struct page **pagep, void **fsdata) { struct page *page; diff --git a/fs/udf/inode.c b/fs/udf/inode.c index 88a95886ce8a..866f9a53248e 100644 --- a/fs/udf/inode.c +++ b/fs/udf/inode.c @@ -204,7 +204,7 @@ static void udf_readahead(struct readahead_control *rac) } static int udf_write_begin(struct file *file, struct address_space *mapping, - loff_t pos, unsigned len, unsigned flags, + loff_t pos, unsigned len, struct page **pagep, void **fsdata) { int ret; diff --git a/fs/ufs/inode.c b/fs/ufs/inode.c index bd0e0c66f93d..6c973b71cab2 100644 --- a/fs/ufs/inode.c +++ b/fs/ufs/inode.c @@ -495,7 +495,7 @@ static void ufs_write_failed(struct address_space *mapping, loff_t to) } static int ufs_write_begin(struct file *file, struct address_space *mapping, - loff_t pos, unsigned len, unsigned flags, + loff_t pos, unsigned len, struct page **pagep, void **fsdata) { int ret; diff --git a/include/linux/fs.h b/include/linux/fs.h index f81bc5cbcbb6..a0e73432526f 100644 --- a/include/linux/fs.h +++ b/include/linux/fs.h @@ -346,7 +346,7 @@ struct address_space_operations { void (*readahead)(struct readahead_control *); int (*write_begin)(struct file *, struct address_space *mapping, - loff_t pos, unsigned len, unsigned flags, + loff_t pos, unsigned len, struct page **pagep, void **fsdata); int (*write_end)(struct file *, struct address_space *mapping, loff_t pos, unsigned len, unsigned copied, @@ -3179,7 +3179,7 @@ extern int noop_fsync(struct file *, loff_t, loff_t, int); extern ssize_t noop_direct_IO(struct kiocb *iocb, struct iov_iter *iter); extern int simple_empty(struct dentry *); extern int simple_write_begin(struct file *file, struct address_space *mapping, - loff_t pos, unsigned len, unsigned flags, + loff_t pos, unsigned len, struct page **pagep, void **fsdata); extern const struct address_space_operations ram_aops; extern int always_delete_dentry(const struct dentry *); diff --git a/include/trace/events/ext4.h b/include/trace/events/ext4.h index d06ffffad434..229e8fae66a3 100644 --- a/include/trace/events/ext4.h +++ b/include/trace/events/ext4.h @@ -335,17 +335,15 @@ TRACE_EVENT(ext4_begin_ordered_truncate, DECLARE_EVENT_CLASS(ext4__write_begin, - TP_PROTO(struct inode *inode, loff_t pos, unsigned int len, - unsigned int flags), + TP_PROTO(struct inode *inode, loff_t pos, unsigned int len), - TP_ARGS(inode, pos, len, flags), + TP_ARGS(inode, pos, len), TP_STRUCT__entry( __field( dev_t, dev ) __field( ino_t, ino ) __field( loff_t, pos ) __field( unsigned int, len ) - __field( unsigned int, flags ) ), TP_fast_assign( @@ -353,29 +351,26 @@ DECLARE_EVENT_CLASS(ext4__write_begin, __entry->ino = inode->i_ino; __entry->pos = pos; __entry->len = len; - __entry->flags = flags; ), - TP_printk("dev %d,%d ino %lu pos %lld len %u flags %u", + TP_printk("dev %d,%d ino %lu pos %lld len %u", MAJOR(__entry->dev), MINOR(__entry->dev), (unsigned long) __entry->ino, - __entry->pos, __entry->len, __entry->flags) + __entry->pos, __entry->len) ); DEFINE_EVENT(ext4__write_begin, ext4_write_begin, - TP_PROTO(struct inode *inode, loff_t pos, unsigned int len, - unsigned int flags), + TP_PROTO(struct inode *inode, loff_t pos, unsigned int len), - TP_ARGS(inode, pos, len, flags) + TP_ARGS(inode, pos, len) ); DEFINE_EVENT(ext4__write_begin, ext4_da_write_begin, - TP_PROTO(struct inode *inode, loff_t pos, unsigned int len, - unsigned int flags), + TP_PROTO(struct inode *inode, loff_t pos, unsigned int len), - TP_ARGS(inode, pos, len, flags) + TP_ARGS(inode, pos, len) ); DECLARE_EVENT_CLASS(ext4__write_end, diff --git a/include/trace/events/f2fs.h b/include/trace/events/f2fs.h index 1779e133cea0..bea654a85e6b 100644 --- a/include/trace/events/f2fs.h +++ b/include/trace/events/f2fs.h @@ -1159,17 +1159,15 @@ DEFINE_EVENT_CONDITION(f2fs__bio, f2fs_submit_write_bio, TRACE_EVENT(f2fs_write_begin, - TP_PROTO(struct inode *inode, loff_t pos, unsigned int len, - unsigned int flags), + TP_PROTO(struct inode *inode, loff_t pos, unsigned int len), - TP_ARGS(inode, pos, len, flags), + TP_ARGS(inode, pos, len), TP_STRUCT__entry( __field(dev_t, dev) __field(ino_t, ino) __field(loff_t, pos) __field(unsigned int, len) - __field(unsigned int, flags) ), TP_fast_assign( @@ -1177,14 +1175,12 @@ TRACE_EVENT(f2fs_write_begin, __entry->ino = inode->i_ino; __entry->pos = pos; __entry->len = len; - __entry->flags = flags; ), - TP_printk("dev = (%d,%d), ino = %lu, pos = %llu, len = %u, flags = %u", + TP_printk("dev = (%d,%d), ino = %lu, pos = %llu, len = %u", show_dev_ino(__entry), (unsigned long long)__entry->pos, - __entry->len, - __entry->flags) + __entry->len) ); TRACE_EVENT(f2fs_write_end, diff --git a/mm/filemap.c b/mm/filemap.c index 9a1eef6c5d35..0751843b052f 100644 --- a/mm/filemap.c +++ b/mm/filemap.c @@ -3628,8 +3628,7 @@ int pagecache_write_begin(struct file *file, struct address_space *mapping, { const struct address_space_operations *aops = mapping->a_ops; - return aops->write_begin(file, mapping, pos, len, flags, - pagep, fsdata); + return aops->write_begin(file, mapping, pos, len, pagep, fsdata); } EXPORT_SYMBOL(pagecache_write_begin); @@ -3754,7 +3753,6 @@ ssize_t generic_perform_write(struct kiocb *iocb, struct iov_iter *i) const struct address_space_operations *a_ops = mapping->a_ops; long status = 0; ssize_t written = 0; - unsigned int flags = 0; do { struct page *page; @@ -3784,7 +3782,7 @@ again: break; } - status = a_ops->write_begin(file, mapping, pos, bytes, flags, + status = a_ops->write_begin(file, mapping, pos, bytes, &page, &fsdata); if (unlikely(status < 0)) break; diff --git a/mm/shmem.c b/mm/shmem.c index 4b2fea33158e..0f557a512171 100644 --- a/mm/shmem.c +++ b/mm/shmem.c @@ -2426,7 +2426,7 @@ static int shmem_initxattrs(struct inode *, const struct xattr *, void *); static int shmem_write_begin(struct file *file, struct address_space *mapping, - loff_t pos, unsigned len, unsigned flags, + loff_t pos, unsigned len, struct page **pagep, void **fsdata) { struct inode *inode = mapping->host; -- cgit v1.2.3-71-gd317 From 84a1041c60ff8f648a09d28af7b2e50a8f6345ed Mon Sep 17 00:00:00 2001 From: "Matthew Wilcox (Oracle)" Date: Thu, 3 Mar 2022 15:00:20 -0500 Subject: fs: Remove pagecache_write_begin() and pagecache_write_end() These wrappers have no more users; remove them. Signed-off-by: Matthew Wilcox (Oracle) Reviewed-by: Christoph Hellwig --- include/linux/fs.h | 12 ------------ mm/filemap.c | 20 -------------------- 2 files changed, 32 deletions(-) (limited to 'include') diff --git a/include/linux/fs.h b/include/linux/fs.h index a0e73432526f..b35ce086a7a1 100644 --- a/include/linux/fs.h +++ b/include/linux/fs.h @@ -380,18 +380,6 @@ struct address_space_operations { extern const struct address_space_operations empty_aops; -/* - * pagecache_write_begin/pagecache_write_end must be used by general code - * to write into the pagecache. - */ -int pagecache_write_begin(struct file *, struct address_space *mapping, - loff_t pos, unsigned len, unsigned flags, - struct page **pagep, void **fsdata); - -int pagecache_write_end(struct file *, struct address_space *mapping, - loff_t pos, unsigned len, unsigned copied, - struct page *page, void *fsdata); - /** * struct address_space - Contents of a cacheable, mappable object. * @host: Owner, either the inode or the block_device. diff --git a/mm/filemap.c b/mm/filemap.c index 0751843b052f..c15cfc28f9ce 100644 --- a/mm/filemap.c +++ b/mm/filemap.c @@ -3622,26 +3622,6 @@ struct page *read_cache_page_gfp(struct address_space *mapping, } EXPORT_SYMBOL(read_cache_page_gfp); -int pagecache_write_begin(struct file *file, struct address_space *mapping, - loff_t pos, unsigned len, unsigned flags, - struct page **pagep, void **fsdata) -{ - const struct address_space_operations *aops = mapping->a_ops; - - return aops->write_begin(file, mapping, pos, len, pagep, fsdata); -} -EXPORT_SYMBOL(pagecache_write_begin); - -int pagecache_write_end(struct file *file, struct address_space *mapping, - loff_t pos, unsigned len, unsigned copied, - struct page *page, void *fsdata) -{ - const struct address_space_operations *aops = mapping->a_ops; - - return aops->write_end(file, mapping, pos, len, copied, page, fsdata); -} -EXPORT_SYMBOL(pagecache_write_end); - /* * Warn about a page cache invalidation failure during a direct I/O write. */ -- cgit v1.2.3-71-gd317 From 65aa6b5a18294b3713a90c120312ed5d63a16b82 Mon Sep 17 00:00:00 2001 From: Miaohe Lin Date: Mon, 4 Apr 2022 11:38:20 -0400 Subject: filemap: Remove obsolete comment in lock_page We no longer need the page's inode pinned. This comment dates back to commit db37648cd6ce ("[PATCH] mm: non syncing lock_page()") which added lock_page_nosync(). That was removed by commit 7eaceaccab5f ("block: remove per-queue plugging") which also made this comment obsolete. Signed-off-by: Miaohe Lin Signed-off-by: Matthew Wilcox (Oracle) Reviewed-by: Christoph Hellwig --- include/linux/pagemap.h | 3 --- 1 file changed, 3 deletions(-) (limited to 'include') diff --git a/include/linux/pagemap.h b/include/linux/pagemap.h index 65ae8f96554b..ab47579af434 100644 --- a/include/linux/pagemap.h +++ b/include/linux/pagemap.h @@ -908,9 +908,6 @@ static inline void folio_lock(struct folio *folio) __folio_lock(folio); } -/* - * lock_page may only be called if we have the page's inode pinned. - */ static inline void lock_page(struct page *page) { struct folio *folio; -- cgit v1.2.3-71-gd317 From cd125eeab2de8ef8ca3a0f3a284bc695375c73af Mon Sep 17 00:00:00 2001 From: "Matthew Wilcox (Oracle)" Date: Mon, 4 Apr 2022 13:24:36 -0400 Subject: filemap: Update the folio_lock documentation Add kernel-doc for several functions relating to take the folio lock. Signed-off-by: Matthew Wilcox (Oracle) --- include/linux/pagemap.h | 59 +++++++++++++++++++++++++++++++++++++++++++++++-- 1 file changed, 57 insertions(+), 2 deletions(-) (limited to 'include') diff --git a/include/linux/pagemap.h b/include/linux/pagemap.h index ab47579af434..60657132080f 100644 --- a/include/linux/pagemap.h +++ b/include/linux/pagemap.h @@ -888,6 +888,18 @@ bool __folio_lock_or_retry(struct folio *folio, struct mm_struct *mm, void unlock_page(struct page *page); void folio_unlock(struct folio *folio); +/** + * folio_trylock() - Attempt to lock a folio. + * @folio: The folio to attempt to lock. + * + * Sometimes it is undesirable to wait for a folio to be unlocked (eg + * when the locks are being taken in the wrong order, or if making + * progress through a batch of folios is more important than processing + * them in order). Usually folio_lock() is the correct function to call. + * + * Context: Any context. + * Return: Whether the lock was successfully acquired. + */ static inline bool folio_trylock(struct folio *folio) { return likely(!test_and_set_bit_lock(PG_locked, folio_flags(folio, 0))); @@ -901,6 +913,28 @@ static inline int trylock_page(struct page *page) return folio_trylock(page_folio(page)); } +/** + * folio_lock() - Lock this folio. + * @folio: The folio to lock. + * + * The folio lock protects against many things, probably more than it + * should. It is primarily held while a folio is being brought uptodate, + * either from its backing file or from swap. It is also held while a + * folio is being truncated from its address_space, so holding the lock + * is sufficient to keep folio->mapping stable. + * + * The folio lock is also held while write() is modifying the page to + * provide POSIX atomicity guarantees (as long as the write does not + * cross a page boundary). Other modifications to the data in the folio + * do not hold the folio lock and can race with writes, eg DMA and stores + * to mapped pages. + * + * Context: May sleep. If you need to acquire the locks of two or + * more folios, they must be in order of ascending index, if they are + * in the same address_space. If they are in different address_spaces, + * acquire the lock of the folio which belongs to the address_space which + * has the lowest address in memory first. + */ static inline void folio_lock(struct folio *folio) { might_sleep(); @@ -908,6 +942,17 @@ static inline void folio_lock(struct folio *folio) __folio_lock(folio); } +/** + * lock_page() - Lock the folio containing this page. + * @page: The page to lock. + * + * See folio_lock() for a description of what the lock protects. + * This is a legacy function and new code should probably use folio_lock() + * instead. + * + * Context: May sleep. Pages in the same folio share a lock, so do not + * attempt to lock two pages which share a folio. + */ static inline void lock_page(struct page *page) { struct folio *folio; @@ -918,6 +963,16 @@ static inline void lock_page(struct page *page) __folio_lock(folio); } +/** + * folio_lock_killable() - Lock this folio, interruptible by a fatal signal. + * @folio: The folio to lock. + * + * Attempts to lock the folio, like folio_lock(), except that the sleep + * to acquire the lock is interruptible by a fatal signal. + * + * Context: May sleep; see folio_lock(). + * Return: 0 if the lock was acquired; -EINTR if a fatal signal was received. + */ static inline int folio_lock_killable(struct folio *folio) { might_sleep(); @@ -964,8 +1019,8 @@ int folio_wait_bit_killable(struct folio *folio, int bit_nr); * Wait for a folio to be unlocked. * * This must be called with the caller "holding" the folio, - * ie with increased "page->count" so that the folio won't - * go away during the wait.. + * ie with increased folio reference count so that the folio won't + * go away during the wait. */ static inline void folio_wait_locked(struct folio *folio) { -- cgit v1.2.3-71-gd317 From 520f301c54faa3484e820b80d4505d48ee587163 Mon Sep 17 00:00:00 2001 From: "Matthew Wilcox (Oracle)" Date: Mon, 17 Jan 2022 14:35:22 -0500 Subject: fs: Convert is_dirty_writeback() to take a folio Pass a folio instead of a page to aops->is_dirty_writeback(). Convert both implementations and the caller. Signed-off-by: Matthew Wilcox (Oracle) Reviewed-by: Christoph Hellwig --- Documentation/filesystems/vfs.rst | 10 +++++----- fs/buffer.c | 16 ++++++++-------- fs/nfs/file.c | 21 +++++++++------------ include/linux/buffer_head.h | 2 +- include/linux/fs.h | 2 +- mm/vmscan.c | 2 +- 6 files changed, 25 insertions(+), 28 deletions(-) (limited to 'include') diff --git a/Documentation/filesystems/vfs.rst b/Documentation/filesystems/vfs.rst index 30f303180a7d..469882f72fc1 100644 --- a/Documentation/filesystems/vfs.rst +++ b/Documentation/filesystems/vfs.rst @@ -747,7 +747,7 @@ cache in your filesystem. The following members are defined: bool (*is_partially_uptodate) (struct folio *, size_t from, size_t count); - void (*is_dirty_writeback) (struct page *, bool *, bool *); + void (*is_dirty_writeback)(struct folio *, bool *, bool *); int (*error_remove_page) (struct mapping *mapping, struct page *page); int (*swap_activate)(struct file *); int (*swap_deactivate)(struct file *); @@ -932,14 +932,14 @@ cache in your filesystem. The following members are defined: without needing I/O to bring the whole page up to date. ``is_dirty_writeback`` - Called by the VM when attempting to reclaim a page. The VM uses + Called by the VM when attempting to reclaim a folio. The VM uses dirty and writeback information to determine if it needs to stall to allow flushers a chance to complete some IO. - Ordinarily it can use PageDirty and PageWriteback but some - filesystems have more complex state (unstable pages in NFS + Ordinarily it can use folio_test_dirty and folio_test_writeback but + some filesystems have more complex state (unstable folios in NFS prevent reclaim) or do not set those flags due to locking problems. This callback allows a filesystem to indicate to the - VM if a page should be treated as dirty or writeback for the + VM if a folio should be treated as dirty or writeback for the purposes of stalling. ``error_remove_page`` diff --git a/fs/buffer.c b/fs/buffer.c index d538495a0553..fb4df259c92d 100644 --- a/fs/buffer.c +++ b/fs/buffer.c @@ -79,26 +79,26 @@ void unlock_buffer(struct buffer_head *bh) EXPORT_SYMBOL(unlock_buffer); /* - * Returns if the page has dirty or writeback buffers. If all the buffers - * are unlocked and clean then the PageDirty information is stale. If - * any of the pages are locked, it is assumed they are locked for IO. + * Returns if the folio has dirty or writeback buffers. If all the buffers + * are unlocked and clean then the folio_test_dirty information is stale. If + * any of the buffers are locked, it is assumed they are locked for IO. */ -void buffer_check_dirty_writeback(struct page *page, +void buffer_check_dirty_writeback(struct folio *folio, bool *dirty, bool *writeback) { struct buffer_head *head, *bh; *dirty = false; *writeback = false; - BUG_ON(!PageLocked(page)); + BUG_ON(!folio_test_locked(folio)); - if (!page_has_buffers(page)) + head = folio_buffers(folio); + if (!head) return; - if (PageWriteback(page)) + if (folio_test_writeback(folio)) *writeback = true; - head = page_buffers(page); bh = head; do { if (buffer_locked(bh)) diff --git a/fs/nfs/file.c b/fs/nfs/file.c index 314d2d7ba84a..f05c4b18b681 100644 --- a/fs/nfs/file.c +++ b/fs/nfs/file.c @@ -430,19 +430,16 @@ static int nfs_release_page(struct page *page, gfp_t gfp) return nfs_fscache_release_page(page, gfp); } -static void nfs_check_dirty_writeback(struct page *page, +static void nfs_check_dirty_writeback(struct folio *folio, bool *dirty, bool *writeback) { struct nfs_inode *nfsi; - struct address_space *mapping = page_file_mapping(page); - - if (!mapping || PageSwapCache(page)) - return; + struct address_space *mapping = folio->mapping; /* - * Check if an unstable page is currently being committed and - * if so, have the VM treat it as if the page is under writeback - * so it will not block due to pages that will shortly be freeable. + * Check if an unstable folio is currently being committed and + * if so, have the VM treat it as if the folio is under writeback + * so it will not block due to folios that will shortly be freeable. */ nfsi = NFS_I(mapping->host); if (atomic_read(&nfsi->commit_info.rpcs_out)) { @@ -451,11 +448,11 @@ static void nfs_check_dirty_writeback(struct page *page, } /* - * If PagePrivate() is set, then the page is not freeable and as the - * inode is not being committed, it's not going to be cleaned in the - * near future so treat it as dirty + * If the private flag is set, then the folio is not freeable + * and as the inode is not being committed, it's not going to + * be cleaned in the near future so treat it as dirty */ - if (PagePrivate(page)) + if (folio_test_private(folio)) *dirty = true; } diff --git a/include/linux/buffer_head.h b/include/linux/buffer_head.h index 6e5a64005fef..805c4e12700a 100644 --- a/include/linux/buffer_head.h +++ b/include/linux/buffer_head.h @@ -146,7 +146,7 @@ BUFFER_FNS(Defer_Completion, defer_completion) #define page_has_buffers(page) PagePrivate(page) #define folio_buffers(folio) folio_get_private(folio) -void buffer_check_dirty_writeback(struct page *page, +void buffer_check_dirty_writeback(struct folio *folio, bool *dirty, bool *writeback); /* diff --git a/include/linux/fs.h b/include/linux/fs.h index b35ce086a7a1..2be852661a29 100644 --- a/include/linux/fs.h +++ b/include/linux/fs.h @@ -369,7 +369,7 @@ struct address_space_operations { int (*launder_folio)(struct folio *); bool (*is_partially_uptodate) (struct folio *, size_t from, size_t count); - void (*is_dirty_writeback) (struct page *, bool *, bool *); + void (*is_dirty_writeback) (struct folio *, bool *dirty, bool *wb); int (*error_remove_page)(struct address_space *, struct page *); /* swapfile support */ diff --git a/mm/vmscan.c b/mm/vmscan.c index 1678802e03e7..27851232e00c 100644 --- a/mm/vmscan.c +++ b/mm/vmscan.c @@ -1451,7 +1451,7 @@ static void folio_check_dirty_writeback(struct folio *folio, mapping = folio_mapping(folio); if (mapping && mapping->a_ops->is_dirty_writeback) - mapping->a_ops->is_dirty_writeback(&folio->page, dirty, writeback); + mapping->a_ops->is_dirty_writeback(folio, dirty, writeback); } static struct page *alloc_demote_page(struct page *page, unsigned long node) -- cgit v1.2.3-71-gd317 From 2ebdd1df316636c2faf25a1780e12553adf09cf7 Mon Sep 17 00:00:00 2001 From: "Matthew Wilcox (Oracle)" Date: Wed, 17 Mar 2021 22:38:26 -0400 Subject: mm/readahead: Convert page_cache_async_readahead to take a folio Removes a couple of calls to compound_head and saves a few bytes. Also convert verity's read_file_data_page() to be folio-based. Signed-off-by: Matthew Wilcox (Oracle) Reviewed-by: Christoph Hellwig --- fs/btrfs/relocation.c | 5 +++-- fs/btrfs/send.c | 3 ++- fs/verity/enable.c | 29 ++++++++++++++--------------- include/linux/pagemap.h | 6 +++--- 4 files changed, 22 insertions(+), 21 deletions(-) (limited to 'include') diff --git a/fs/btrfs/relocation.c b/fs/btrfs/relocation.c index fdc2c4b411f0..9ae06895ffc9 100644 --- a/fs/btrfs/relocation.c +++ b/fs/btrfs/relocation.c @@ -2967,8 +2967,9 @@ static int relocate_one_page(struct inode *inode, struct file_ra_state *ra, goto release_page; if (PageReadahead(page)) - page_cache_async_readahead(inode->i_mapping, ra, NULL, page, - page_index, last_index + 1 - page_index); + page_cache_async_readahead(inode->i_mapping, ra, NULL, + page_folio(page), page_index, + last_index + 1 - page_index); if (!PageUptodate(page)) { btrfs_readpage(NULL, page); diff --git a/fs/btrfs/send.c b/fs/btrfs/send.c index 7d1642937274..b327dbe0cbf5 100644 --- a/fs/btrfs/send.c +++ b/fs/btrfs/send.c @@ -4986,7 +4986,8 @@ static int put_file_data(struct send_ctx *sctx, u64 offset, u32 len) if (PageReadahead(page)) { page_cache_async_readahead(inode->i_mapping, &sctx->ra, - NULL, page, index, last_index + 1 - index); + NULL, page_folio(page), index, + last_index + 1 - index); } if (!PageUptodate(page)) { diff --git a/fs/verity/enable.c b/fs/verity/enable.c index 60a4372aa4d7..f75d2c010f36 100644 --- a/fs/verity/enable.c +++ b/fs/verity/enable.c @@ -18,27 +18,26 @@ * Read a file data page for Merkle tree construction. Do aggressive readahead, * since we're sequentially reading the entire file. */ -static struct page *read_file_data_page(struct file *filp, pgoff_t index, +static struct page *read_file_data_page(struct file *file, pgoff_t index, struct file_ra_state *ra, unsigned long remaining_pages) { - struct page *page; + DEFINE_READAHEAD(ractl, file, ra, file->f_mapping, index); + struct folio *folio; - page = find_get_page_flags(filp->f_mapping, index, FGP_ACCESSED); - if (!page || !PageUptodate(page)) { - if (page) - put_page(page); + folio = __filemap_get_folio(ractl.mapping, index, FGP_ACCESSED, 0); + if (!folio || !folio_test_uptodate(folio)) { + if (folio) + folio_put(folio); else - page_cache_sync_readahead(filp->f_mapping, ra, filp, - index, remaining_pages); - page = read_mapping_page(filp->f_mapping, index, NULL); - if (IS_ERR(page)) - return page; + page_cache_sync_ra(&ractl, remaining_pages); + folio = read_cache_folio(ractl.mapping, index, NULL, file); + if (IS_ERR(folio)) + return &folio->page; } - if (PageReadahead(page)) - page_cache_async_readahead(filp->f_mapping, ra, filp, page, - index, remaining_pages); - return page; + if (folio_test_readahead(folio)) + page_cache_async_ra(&ractl, folio, remaining_pages); + return folio_file_page(folio, index); } static int build_merkle_tree_level(struct file *filp, unsigned int level, diff --git a/include/linux/pagemap.h b/include/linux/pagemap.h index 60657132080f..b70192f56454 100644 --- a/include/linux/pagemap.h +++ b/include/linux/pagemap.h @@ -1242,7 +1242,7 @@ void page_cache_sync_readahead(struct address_space *mapping, * @mapping: address_space which holds the pagecache and I/O vectors * @ra: file_ra_state which holds the readahead state * @file: Used by the filesystem for authentication. - * @page: The page at @index which triggered the readahead call. + * @folio: The folio at @index which triggered the readahead call. * @index: Index of first page to be read. * @req_count: Total number of pages being read by the caller. * @@ -1254,10 +1254,10 @@ void page_cache_sync_readahead(struct address_space *mapping, static inline void page_cache_async_readahead(struct address_space *mapping, struct file_ra_state *ra, struct file *file, - struct page *page, pgoff_t index, unsigned long req_count) + struct folio *folio, pgoff_t index, unsigned long req_count) { DEFINE_READAHEAD(ractl, file, ra, mapping, index); - page_cache_async_ra(&ractl, page_folio(page), req_count); + page_cache_async_ra(&ractl, folio, req_count); } static inline struct folio *__readahead_folio(struct readahead_control *ractl) -- cgit v1.2.3-71-gd317 From 5efe7448a1426250b5747c10ad438517f44f1e51 Mon Sep 17 00:00:00 2001 From: "Matthew Wilcox (Oracle)" Date: Fri, 29 Apr 2022 08:43:23 -0400 Subject: fs: Introduce aops->read_folio Change all the callers of ->readpage to call ->read_folio in preference, if it exists. This is a transitional duplication, and will be removed by the end of the series. Signed-off-by: Matthew Wilcox (Oracle) --- fs/btrfs/file.c | 2 +- fs/buffer.c | 5 ++++- fs/ceph/addr.c | 2 +- include/linux/fs.h | 1 + kernel/events/uprobes.c | 6 ++++-- mm/filemap.c | 9 +++++++-- mm/readahead.c | 14 +++++++++----- mm/swapfile.c | 2 +- 8 files changed, 28 insertions(+), 13 deletions(-) (limited to 'include') diff --git a/fs/btrfs/file.c b/fs/btrfs/file.c index 380054c94e4b..59510d7b1c65 100644 --- a/fs/btrfs/file.c +++ b/fs/btrfs/file.c @@ -2401,7 +2401,7 @@ static int btrfs_file_mmap(struct file *filp, struct vm_area_struct *vma) { struct address_space *mapping = filp->f_mapping; - if (!mapping->a_ops->readpage) + if (!mapping->a_ops->readpage && !mapping->a_ops->read_folio) return -ENOEXEC; file_accessed(filp); diff --git a/fs/buffer.c b/fs/buffer.c index 9737e0dbe3ec..225d03cd622d 100644 --- a/fs/buffer.c +++ b/fs/buffer.c @@ -2824,7 +2824,10 @@ int nobh_truncate_page(struct address_space *mapping, /* Ok, it's mapped. Make sure it's up-to-date */ if (!folio_test_uptodate(folio)) { - err = mapping->a_ops->readpage(NULL, &folio->page); + if (mapping->a_ops->read_folio) + err = mapping->a_ops->read_folio(NULL, folio); + else + err = mapping->a_ops->readpage(NULL, &folio->page); if (err) { folio_put(folio); goto out; diff --git a/fs/ceph/addr.c b/fs/ceph/addr.c index e65541a51b68..42bba2b5d98b 100644 --- a/fs/ceph/addr.c +++ b/fs/ceph/addr.c @@ -1772,7 +1772,7 @@ int ceph_mmap(struct file *file, struct vm_area_struct *vma) { struct address_space *mapping = file->f_mapping; - if (!mapping->a_ops->readpage) + if (!mapping->a_ops->readpage && !mapping->a_ops->read_folio) return -ENOEXEC; file_accessed(file); vma->vm_ops = &ceph_vmops; diff --git a/include/linux/fs.h b/include/linux/fs.h index 2be852661a29..5ad942183a2c 100644 --- a/include/linux/fs.h +++ b/include/linux/fs.h @@ -336,6 +336,7 @@ static inline bool is_sync_kiocb(struct kiocb *kiocb) struct address_space_operations { int (*writepage)(struct page *page, struct writeback_control *wbc); int (*readpage)(struct file *, struct page *); + int (*read_folio)(struct file *, struct folio *); /* Write back some dirty pages from this mapping. */ int (*writepages)(struct address_space *, struct writeback_control *); diff --git a/kernel/events/uprobes.c b/kernel/events/uprobes.c index 6418083901d4..2c7815d20038 100644 --- a/kernel/events/uprobes.c +++ b/kernel/events/uprobes.c @@ -790,7 +790,7 @@ static int __copy_insn(struct address_space *mapping, struct file *filp, * and in page-cache. If ->readpage == NULL it must be shmem_mapping(), * see uprobe_register(). */ - if (mapping->a_ops->readpage) + if (mapping->a_ops->read_folio || mapping->a_ops->readpage) page = read_mapping_page(mapping, offset >> PAGE_SHIFT, filp); else page = shmem_read_mapping_page(mapping, offset >> PAGE_SHIFT); @@ -1143,7 +1143,9 @@ static int __uprobe_register(struct inode *inode, loff_t offset, return -EINVAL; /* copy_insn() uses read_mapping_page() or shmem_read_mapping_page() */ - if (!inode->i_mapping->a_ops->readpage && !shmem_mapping(inode->i_mapping)) + if (!inode->i_mapping->a_ops->read_folio && + !inode->i_mapping->a_ops->readpage && + !shmem_mapping(inode->i_mapping)) return -EIO; /* Racy, just to catch the obvious mistakes */ if (offset > i_size_read(inode)) diff --git a/mm/filemap.c b/mm/filemap.c index c15cfc28f9ce..96e3d7ffd98e 100644 --- a/mm/filemap.c +++ b/mm/filemap.c @@ -2419,7 +2419,10 @@ static int filemap_read_folio(struct file *file, struct address_space *mapping, */ folio_clear_error(folio); /* Start the actual read. The read will unlock the page. */ - error = mapping->a_ops->readpage(file, &folio->page); + if (mapping->a_ops->read_folio) + error = mapping->a_ops->read_folio(file, folio); + else + error = mapping->a_ops->readpage(file, &folio->page); if (error) return error; @@ -3447,7 +3450,7 @@ int generic_file_mmap(struct file *file, struct vm_area_struct *vma) { struct address_space *mapping = file->f_mapping; - if (!mapping->a_ops->readpage) + if (!mapping->a_ops->read_folio && !mapping->a_ops->readpage) return -ENOEXEC; file_accessed(file); vma->vm_ops = &generic_file_vm_ops; @@ -3505,6 +3508,8 @@ repeat: filler: if (filler) err = filler(data, &folio->page); + else if (mapping->a_ops->read_folio) + err = mapping->a_ops->read_folio(data, folio); else err = mapping->a_ops->readpage(data, &folio->page); diff --git a/mm/readahead.c b/mm/readahead.c index 60a28af25c4e..76024c20a5a5 100644 --- a/mm/readahead.c +++ b/mm/readahead.c @@ -15,7 +15,7 @@ * explicitly requested by the application. Readahead only ever * attempts to read folios that are not yet in the page cache. If a * folio is present but not up-to-date, readahead will not try to read - * it. In that case a simple ->readpage() will be requested. + * it. In that case a simple ->read_folio() will be requested. * * Readahead is triggered when an application read request (whether a * system call or a page fault) finds that the requested folio is not in @@ -78,7 +78,7 @@ * address space operation, for which mpage_readahead() is a canonical * implementation. ->readahead() should normally initiate reads on all * folios, but may fail to read any or all folios without causing an I/O - * error. The page cache reading code will issue a ->readpage() request + * error. The page cache reading code will issue a ->read_folio() request * for any folio which ->readahead() did not read, and only an error * from this will be final. * @@ -110,7 +110,7 @@ * were not fetched with readahead_folio(). This will allow a * subsequent synchronous readahead request to try them again. If they * are left in the page cache, then they will be read individually using - * ->readpage() which may be less efficient. + * ->read_folio() which may be less efficient. */ #include @@ -170,8 +170,11 @@ static void read_pages(struct readahead_control *rac) } folio_unlock(folio); } + } else if (aops->read_folio) { + while ((folio = readahead_folio(rac)) != NULL) + aops->read_folio(rac->file, folio); } else { - while ((folio = readahead_folio(rac))) + while ((folio = readahead_folio(rac)) != NULL) aops->readpage(rac->file, &folio->page); } @@ -302,7 +305,8 @@ void force_page_cache_ra(struct readahead_control *ractl, struct backing_dev_info *bdi = inode_to_bdi(mapping->host); unsigned long max_pages, index; - if (unlikely(!mapping->a_ops->readpage && !mapping->a_ops->readahead)) + if (unlikely(!mapping->a_ops->read_folio && + !mapping->a_ops->readpage && !mapping->a_ops->readahead)) return; /* diff --git a/mm/swapfile.c b/mm/swapfile.c index 63c61f8b2611..7c19098b8b45 100644 --- a/mm/swapfile.c +++ b/mm/swapfile.c @@ -3041,7 +3041,7 @@ SYSCALL_DEFINE2(swapon, const char __user *, specialfile, int, swap_flags) /* * Read the swap header. */ - if (!mapping->a_ops->readpage) { + if (!mapping->a_ops->read_folio && !mapping->a_ops->readpage) { error = -EINVAL; goto bad_swap_unlock_inode; } -- cgit v1.2.3-71-gd317 From 6c62371b7fd77628feb5b806bc29433caecedff8 Mon Sep 17 00:00:00 2001 From: "Matthew Wilcox (Oracle)" Date: Fri, 29 Apr 2022 08:49:28 -0400 Subject: fs: Convert netfs_readpage to netfs_read_folio This is straightforward because netfs already worked in terms of folios. Signed-off-by: Matthew Wilcox (Oracle) --- fs/9p/vfs_addr.c | 2 +- fs/afs/file.c | 2 +- fs/ceph/addr.c | 2 +- fs/netfs/buffered_read.c | 15 +++++++-------- include/linux/netfs.h | 2 +- 5 files changed, 11 insertions(+), 12 deletions(-) (limited to 'include') diff --git a/fs/9p/vfs_addr.c b/fs/9p/vfs_addr.c index a2d57112f53e..3a84167f4893 100644 --- a/fs/9p/vfs_addr.c +++ b/fs/9p/vfs_addr.c @@ -336,7 +336,7 @@ static bool v9fs_dirty_folio(struct address_space *mapping, struct folio *folio) #endif const struct address_space_operations v9fs_addr_operations = { - .readpage = netfs_readpage, + .read_folio = netfs_read_folio, .readahead = netfs_readahead, .dirty_folio = v9fs_dirty_folio, .writepage = v9fs_vfs_writepage, diff --git a/fs/afs/file.c b/fs/afs/file.c index 26292a110a8f..e277fbe55262 100644 --- a/fs/afs/file.c +++ b/fs/afs/file.c @@ -50,7 +50,7 @@ const struct inode_operations afs_file_inode_operations = { }; const struct address_space_operations afs_file_aops = { - .readpage = netfs_readpage, + .read_folio = netfs_read_folio, .readahead = netfs_readahead, .dirty_folio = afs_dirty_folio, .launder_folio = afs_launder_folio, diff --git a/fs/ceph/addr.c b/fs/ceph/addr.c index 42bba2b5d98b..be3e47784f08 100644 --- a/fs/ceph/addr.c +++ b/fs/ceph/addr.c @@ -1372,7 +1372,7 @@ out: } const struct address_space_operations ceph_aops = { - .readpage = netfs_readpage, + .read_folio = netfs_read_folio, .readahead = netfs_readahead, .writepage = ceph_writepage, .writepages = ceph_writepages_start, diff --git a/fs/netfs/buffered_read.c b/fs/netfs/buffered_read.c index 1d44509455a5..8742d22dfd2b 100644 --- a/fs/netfs/buffered_read.c +++ b/fs/netfs/buffered_read.c @@ -198,22 +198,21 @@ cleanup_free: EXPORT_SYMBOL(netfs_readahead); /** - * netfs_readpage - Helper to manage a readpage request + * netfs_read_folio - Helper to manage a read_folio request * @file: The file to read from - * @subpage: A subpage of the folio to read + * @folio: The folio to read * - * Fulfil a readpage request by drawing data from the cache if possible, or the - * netfs if not. Space beyond the EOF is zero-filled. Multiple I/O requests - * from different sources will get munged together. + * Fulfil a read_folio request by drawing data from the cache if + * possible, or the netfs if not. Space beyond the EOF is zero-filled. + * Multiple I/O requests from different sources will get munged together. * * The calling netfs must initialise a netfs context contiguous to the vfs * inode before calling this. * * This is usable whether or not caching is enabled. */ -int netfs_readpage(struct file *file, struct page *subpage) +int netfs_read_folio(struct file *file, struct folio *folio) { - struct folio *folio = page_folio(subpage); struct address_space *mapping = folio_file_mapping(folio); struct netfs_io_request *rreq; struct netfs_i_context *ctx = netfs_i_context(mapping->host); @@ -245,7 +244,7 @@ alloc_error: folio_unlock(folio); return ret; } -EXPORT_SYMBOL(netfs_readpage); +EXPORT_SYMBOL(netfs_read_folio); /* * Prepare a folio for writing without reading first diff --git a/include/linux/netfs.h b/include/linux/netfs.h index 1c29f317d907..4bd5ee709daa 100644 --- a/include/linux/netfs.h +++ b/include/linux/netfs.h @@ -274,7 +274,7 @@ struct netfs_cache_ops { struct readahead_control; extern void netfs_readahead(struct readahead_control *); -extern int netfs_readpage(struct file *, struct page *); +int netfs_read_folio(struct file *, struct folio *); extern int netfs_write_begin(struct file *, struct address_space *, loff_t, unsigned int, struct folio **, void **); -- cgit v1.2.3-71-gd317 From 7479c505b4ab5ed5f81f35fdd68c44c58d6f0439 Mon Sep 17 00:00:00 2001 From: "Matthew Wilcox (Oracle)" Date: Fri, 29 Apr 2022 08:54:32 -0400 Subject: fs: Convert iomap_readpage to iomap_read_folio A straightforward conversion as iomap_readpage already worked in folios. Signed-off-by: Matthew Wilcox (Oracle) --- fs/erofs/data.c | 6 +++--- fs/gfs2/aops.c | 3 ++- fs/iomap/buffered-io.c | 12 +++++------- fs/xfs/xfs_aops.c | 8 ++++---- fs/zonefs/super.c | 6 +++--- include/linux/iomap.h | 2 +- 6 files changed, 18 insertions(+), 19 deletions(-) (limited to 'include') diff --git a/fs/erofs/data.c b/fs/erofs/data.c index 780db1e5f4b7..2edca5669578 100644 --- a/fs/erofs/data.c +++ b/fs/erofs/data.c @@ -337,9 +337,9 @@ int erofs_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo, * since we dont have write or truncate flows, so no inode * locking needs to be held at the moment. */ -static int erofs_readpage(struct file *file, struct page *page) +static int erofs_read_folio(struct file *file, struct folio *folio) { - return iomap_readpage(page, &erofs_iomap_ops); + return iomap_read_folio(folio, &erofs_iomap_ops); } static void erofs_readahead(struct readahead_control *rac) @@ -394,7 +394,7 @@ static ssize_t erofs_file_read_iter(struct kiocb *iocb, struct iov_iter *to) /* for uncompressed (aligned) files and raw access for other files */ const struct address_space_operations erofs_raw_access_aops = { - .readpage = erofs_readpage, + .read_folio = erofs_read_folio, .readahead = erofs_readahead, .bmap = erofs_bmap, .direct_IO = noop_direct_IO, diff --git a/fs/gfs2/aops.c b/fs/gfs2/aops.c index 72c9f31ce724..a29eb1e5bfe2 100644 --- a/fs/gfs2/aops.c +++ b/fs/gfs2/aops.c @@ -467,6 +467,7 @@ static int stuffed_readpage(struct gfs2_inode *ip, struct page *page) static int __gfs2_readpage(void *file, struct page *page) { + struct folio *folio = page_folio(page); struct inode *inode = page->mapping->host; struct gfs2_inode *ip = GFS2_I(inode); struct gfs2_sbd *sdp = GFS2_SB(inode); @@ -474,7 +475,7 @@ static int __gfs2_readpage(void *file, struct page *page) if (!gfs2_is_jdata(ip) || (i_blocksize(inode) == PAGE_SIZE && !page_has_buffers(page))) { - error = iomap_readpage(page, &gfs2_iomap_ops); + error = iomap_read_folio(folio, &gfs2_iomap_ops); } else if (gfs2_is_stuffed(ip)) { error = stuffed_readpage(ip, page); unlock_page(page); diff --git a/fs/iomap/buffered-io.c b/fs/iomap/buffered-io.c index 8ce8720093b9..72f63d719c7c 100644 --- a/fs/iomap/buffered-io.c +++ b/fs/iomap/buffered-io.c @@ -320,10 +320,8 @@ done: return pos - orig_pos + plen; } -int -iomap_readpage(struct page *page, const struct iomap_ops *ops) +int iomap_read_folio(struct folio *folio, const struct iomap_ops *ops) { - struct folio *folio = page_folio(page); struct iomap_iter iter = { .inode = folio->mapping->host, .pos = folio_pos(folio), @@ -352,12 +350,12 @@ iomap_readpage(struct page *page, const struct iomap_ops *ops) /* * Just like mpage_readahead and block_read_full_page, we always - * return 0 and just mark the page as PageError on errors. This + * return 0 and just set the folio error flag on errors. This * should be cleaned up throughout the stack eventually. */ return 0; } -EXPORT_SYMBOL_GPL(iomap_readpage); +EXPORT_SYMBOL_GPL(iomap_read_folio); static loff_t iomap_readahead_iter(const struct iomap_iter *iter, struct iomap_readpage_ctx *ctx) @@ -663,10 +661,10 @@ static size_t __iomap_write_end(struct inode *inode, loff_t pos, size_t len, /* * The blocks that were entirely written will now be uptodate, so we - * don't have to worry about a readpage reading them and overwriting a + * don't have to worry about a read_folio reading them and overwriting a * partial write. However, if we've encountered a short write and only * partially written into a block, it will not be marked uptodate, so a - * readpage might come in and destroy our partial write. + * read_folio might come in and destroy our partial write. * * Do the simplest thing and just treat any short write to a * non-uptodate page as a zero-length write, and force the caller to diff --git a/fs/xfs/xfs_aops.c b/fs/xfs/xfs_aops.c index 90b7f4d127de..a9c4bb500d53 100644 --- a/fs/xfs/xfs_aops.c +++ b/fs/xfs/xfs_aops.c @@ -538,11 +538,11 @@ xfs_vm_bmap( } STATIC int -xfs_vm_readpage( +xfs_vm_read_folio( struct file *unused, - struct page *page) + struct folio *folio) { - return iomap_readpage(page, &xfs_read_iomap_ops); + return iomap_read_folio(folio, &xfs_read_iomap_ops); } STATIC void @@ -564,7 +564,7 @@ xfs_iomap_swapfile_activate( } const struct address_space_operations xfs_address_space_operations = { - .readpage = xfs_vm_readpage, + .read_folio = xfs_vm_read_folio, .readahead = xfs_vm_readahead, .writepages = xfs_vm_writepages, .dirty_folio = filemap_dirty_folio, diff --git a/fs/zonefs/super.c b/fs/zonefs/super.c index e20e7c841489..c3a38f711b24 100644 --- a/fs/zonefs/super.c +++ b/fs/zonefs/super.c @@ -124,9 +124,9 @@ static const struct iomap_ops zonefs_iomap_ops = { .iomap_begin = zonefs_iomap_begin, }; -static int zonefs_readpage(struct file *unused, struct page *page) +static int zonefs_read_folio(struct file *unused, struct folio *folio) { - return iomap_readpage(page, &zonefs_iomap_ops); + return iomap_read_folio(folio, &zonefs_iomap_ops); } static void zonefs_readahead(struct readahead_control *rac) @@ -192,7 +192,7 @@ static int zonefs_swap_activate(struct swap_info_struct *sis, } static const struct address_space_operations zonefs_file_aops = { - .readpage = zonefs_readpage, + .read_folio = zonefs_read_folio, .readahead = zonefs_readahead, .writepage = zonefs_writepage, .writepages = zonefs_writepages, diff --git a/include/linux/iomap.h b/include/linux/iomap.h index b76f0dd149fb..5b2aa45ddda3 100644 --- a/include/linux/iomap.h +++ b/include/linux/iomap.h @@ -225,7 +225,7 @@ static inline const struct iomap *iomap_iter_srcmap(const struct iomap_iter *i) ssize_t iomap_file_buffered_write(struct kiocb *iocb, struct iov_iter *from, const struct iomap_ops *ops); -int iomap_readpage(struct page *page, const struct iomap_ops *ops); +int iomap_read_folio(struct folio *folio, const struct iomap_ops *ops); void iomap_readahead(struct readahead_control *, const struct iomap_ops *ops); bool iomap_is_partially_uptodate(struct folio *, size_t from, size_t count); int iomap_releasepage(struct page *page, gfp_t gfp_mask); -- cgit v1.2.3-71-gd317 From 2c69e2057962b6bd76d72446453862eb59325b49 Mon Sep 17 00:00:00 2001 From: "Matthew Wilcox (Oracle)" Date: Fri, 29 Apr 2022 10:40:40 -0400 Subject: fs: Convert block_read_full_page() to block_read_full_folio() This function is NOT converted to handle large folios, so include an assert that the filesystem isn't passing one in. Otherwise, use the folio functions instead of the page functions, where they exist. Convert all filesystems which use block_read_full_page(). Signed-off-by: Matthew Wilcox (Oracle) --- block/fops.c | 6 ++--- fs/adfs/inode.c | 6 ++--- fs/affs/file.c | 6 ++--- fs/befs/linuxvfs.c | 10 ++++----- fs/bfs/file.c | 6 ++--- fs/buffer.c | 53 ++++++++++++++++++++++++--------------------- fs/efs/inode.c | 8 ++++--- fs/ext4/readpage.c | 4 ++-- fs/freevxfs/vxfs_subr.c | 17 +++++++-------- fs/hfs/inode.c | 8 +++---- fs/hfsplus/inode.c | 8 +++---- fs/iomap/buffered-io.c | 2 +- fs/minix/inode.c | 6 ++--- fs/mpage.c | 10 ++++----- fs/ntfs/compress.c | 4 ++-- fs/ocfs2/aops.c | 6 ++--- fs/ocfs2/refcounttree.c | 6 +++-- fs/omfs/file.c | 6 ++--- fs/qnx4/inode.c | 7 +++--- fs/reiserfs/file.c | 2 +- fs/reiserfs/inode.c | 12 +++++----- fs/sysv/itree.c | 6 ++--- fs/ufs/inode.c | 8 +++---- include/linux/buffer_head.h | 2 +- 24 files changed, 108 insertions(+), 101 deletions(-) (limited to 'include') diff --git a/block/fops.c b/block/fops.c index 712affe56e29..06feb41d798b 100644 --- a/block/fops.c +++ b/block/fops.c @@ -387,9 +387,9 @@ static int blkdev_writepage(struct page *page, struct writeback_control *wbc) return block_write_full_page(page, blkdev_get_block, wbc); } -static int blkdev_readpage(struct file * file, struct page * page) +static int blkdev_read_folio(struct file *file, struct folio *folio) { - return block_read_full_page(page, blkdev_get_block); + return block_read_full_folio(folio, blkdev_get_block); } static void blkdev_readahead(struct readahead_control *rac) @@ -425,7 +425,7 @@ static int blkdev_writepages(struct address_space *mapping, const struct address_space_operations def_blk_aops = { .dirty_folio = block_dirty_folio, .invalidate_folio = block_invalidate_folio, - .readpage = blkdev_readpage, + .read_folio = blkdev_read_folio, .readahead = blkdev_readahead, .writepage = blkdev_writepage, .write_begin = blkdev_write_begin, diff --git a/fs/adfs/inode.c b/fs/adfs/inode.c index f7959b1a2d52..ee22278b0cfc 100644 --- a/fs/adfs/inode.c +++ b/fs/adfs/inode.c @@ -38,9 +38,9 @@ static int adfs_writepage(struct page *page, struct writeback_control *wbc) return block_write_full_page(page, adfs_get_block, wbc); } -static int adfs_readpage(struct file *file, struct page *page) +static int adfs_read_folio(struct file *file, struct folio *folio) { - return block_read_full_page(page, adfs_get_block); + return block_read_full_folio(folio, adfs_get_block); } static void adfs_write_failed(struct address_space *mapping, loff_t to) @@ -75,7 +75,7 @@ static sector_t _adfs_bmap(struct address_space *mapping, sector_t block) static const struct address_space_operations adfs_aops = { .dirty_folio = block_dirty_folio, .invalidate_folio = block_invalidate_folio, - .readpage = adfs_readpage, + .read_folio = adfs_read_folio, .writepage = adfs_writepage, .write_begin = adfs_write_begin, .write_end = generic_write_end, diff --git a/fs/affs/file.c b/fs/affs/file.c index b952f65c3f06..5da562cc7fb7 100644 --- a/fs/affs/file.c +++ b/fs/affs/file.c @@ -375,9 +375,9 @@ static int affs_writepage(struct page *page, struct writeback_control *wbc) return block_write_full_page(page, affs_get_block, wbc); } -static int affs_readpage(struct file *file, struct page *page) +static int affs_read_folio(struct file *file, struct folio *folio) { - return block_read_full_page(page, affs_get_block); + return block_read_full_folio(folio, affs_get_block); } static void affs_write_failed(struct address_space *mapping, loff_t to) @@ -455,7 +455,7 @@ static sector_t _affs_bmap(struct address_space *mapping, sector_t block) const struct address_space_operations affs_aops = { .dirty_folio = block_dirty_folio, .invalidate_folio = block_invalidate_folio, - .readpage = affs_readpage, + .read_folio = affs_read_folio, .writepage = affs_writepage, .write_begin = affs_write_begin, .write_end = affs_write_end, diff --git a/fs/befs/linuxvfs.c b/fs/befs/linuxvfs.c index b4b3567ac655..25350dd22cda 100644 --- a/fs/befs/linuxvfs.c +++ b/fs/befs/linuxvfs.c @@ -40,7 +40,7 @@ MODULE_LICENSE("GPL"); static int befs_readdir(struct file *, struct dir_context *); static int befs_get_block(struct inode *, sector_t, struct buffer_head *, int); -static int befs_readpage(struct file *file, struct page *page); +static int befs_read_folio(struct file *file, struct folio *folio); static sector_t befs_bmap(struct address_space *mapping, sector_t block); static struct dentry *befs_lookup(struct inode *, struct dentry *, unsigned int); @@ -87,7 +87,7 @@ static const struct inode_operations befs_dir_inode_operations = { }; static const struct address_space_operations befs_aops = { - .readpage = befs_readpage, + .read_folio = befs_read_folio, .bmap = befs_bmap, }; @@ -102,16 +102,16 @@ static const struct export_operations befs_export_operations = { }; /* - * Called by generic_file_read() to read a page of data + * Called by generic_file_read() to read a folio of data * * In turn, simply calls a generic block read function and * passes it the address of befs_get_block, for mapping file * positions to disk blocks. */ static int -befs_readpage(struct file *file, struct page *page) +befs_read_folio(struct file *file, struct folio *folio) { - return block_read_full_page(page, befs_get_block); + return block_read_full_folio(folio, befs_get_block); } static sector_t diff --git a/fs/bfs/file.c b/fs/bfs/file.c index dc97c9b8f23b..57ae5ee6deec 100644 --- a/fs/bfs/file.c +++ b/fs/bfs/file.c @@ -155,9 +155,9 @@ static int bfs_writepage(struct page *page, struct writeback_control *wbc) return block_write_full_page(page, bfs_get_block, wbc); } -static int bfs_readpage(struct file *file, struct page *page) +static int bfs_read_folio(struct file *file, struct folio *folio) { - return block_read_full_page(page, bfs_get_block); + return block_read_full_folio(folio, bfs_get_block); } static void bfs_write_failed(struct address_space *mapping, loff_t to) @@ -189,7 +189,7 @@ static sector_t bfs_bmap(struct address_space *mapping, sector_t block) const struct address_space_operations bfs_aops = { .dirty_folio = block_dirty_folio, .invalidate_folio = block_invalidate_folio, - .readpage = bfs_readpage, + .read_folio = bfs_read_folio, .writepage = bfs_writepage, .write_begin = bfs_write_begin, .write_end = generic_write_end, diff --git a/fs/buffer.c b/fs/buffer.c index 225d03cd622d..ec0c52c8848e 100644 --- a/fs/buffer.c +++ b/fs/buffer.c @@ -314,7 +314,7 @@ static void decrypt_bh(struct work_struct *work) } /* - * I/O completion handler for block_read_full_page() - pages + * I/O completion handler for block_read_full_folio() - pages * which come unlocked at the end of I/O. */ static void end_buffer_async_read_io(struct buffer_head *bh, int uptodate) @@ -1060,8 +1060,8 @@ __getblk_slow(struct block_device *bdev, sector_t block, * Also. When blockdev buffers are explicitly read with bread(), they * individually become uptodate. But their backing page remains not * uptodate - even if all of its buffers are uptodate. A subsequent - * block_read_full_page() against that page will discover all the uptodate - * buffers, will set the page uptodate and will perform no I/O. + * block_read_full_folio() against that folio will discover all the uptodate + * buffers, will set the folio uptodate and will perform no I/O. */ /** @@ -2088,7 +2088,7 @@ static int __block_commit_write(struct inode *inode, struct page *page, /* * If this is a partial write which happened to make all buffers - * uptodate then we can optimize away a bogus readpage() for + * uptodate then we can optimize away a bogus read_folio() for * the next read(). Here we 'discover' whether the page went * uptodate as a result of this (potentially partial) write. */ @@ -2137,12 +2137,12 @@ int block_write_end(struct file *file, struct address_space *mapping, if (unlikely(copied < len)) { /* - * The buffers that were written will now be uptodate, so we - * don't have to worry about a readpage reading them and - * overwriting a partial write. However if we have encountered - * a short write and only partially written into a buffer, it - * will not be marked uptodate, so a readpage might come in and - * destroy our partial write. + * The buffers that were written will now be uptodate, so + * we don't have to worry about a read_folio reading them + * and overwriting a partial write. However if we have + * encountered a short write and only partially written + * into a buffer, it will not be marked uptodate, so a + * read_folio might come in and destroy our partial write. * * Do the simplest thing, and just treat any short write to a * non uptodate page as a zero-length write, and force the @@ -2245,26 +2245,28 @@ bool block_is_partially_uptodate(struct folio *folio, size_t from, size_t count) EXPORT_SYMBOL(block_is_partially_uptodate); /* - * Generic "read page" function for block devices that have the normal + * Generic "read_folio" function for block devices that have the normal * get_block functionality. This is most of the block device filesystems. - * Reads the page asynchronously --- the unlock_buffer() and + * Reads the folio asynchronously --- the unlock_buffer() and * set/clear_buffer_uptodate() functions propagate buffer state into the - * page struct once IO has completed. + * folio once IO has completed. */ -int block_read_full_page(struct page *page, get_block_t *get_block) +int block_read_full_folio(struct folio *folio, get_block_t *get_block) { - struct inode *inode = page->mapping->host; + struct inode *inode = folio->mapping->host; sector_t iblock, lblock; struct buffer_head *bh, *head, *arr[MAX_BUF_PER_PAGE]; unsigned int blocksize, bbits; int nr, i; int fully_mapped = 1; - head = create_page_buffers(page, inode, 0); + VM_BUG_ON_FOLIO(folio_test_large(folio), folio); + + head = create_page_buffers(&folio->page, inode, 0); blocksize = head->b_size; bbits = block_size_bits(blocksize); - iblock = (sector_t)page->index << (PAGE_SHIFT - bbits); + iblock = (sector_t)folio->index << (PAGE_SHIFT - bbits); lblock = (i_size_read(inode)+blocksize-1) >> bbits; bh = head; nr = 0; @@ -2282,10 +2284,11 @@ int block_read_full_page(struct page *page, get_block_t *get_block) WARN_ON(bh->b_size != blocksize); err = get_block(inode, iblock, bh, 0); if (err) - SetPageError(page); + folio_set_error(folio); } if (!buffer_mapped(bh)) { - zero_user(page, i * blocksize, blocksize); + folio_zero_range(folio, i * blocksize, + blocksize); if (!err) set_buffer_uptodate(bh); continue; @@ -2301,16 +2304,16 @@ int block_read_full_page(struct page *page, get_block_t *get_block) } while (i++, iblock++, (bh = bh->b_this_page) != head); if (fully_mapped) - SetPageMappedToDisk(page); + folio_set_mappedtodisk(folio); if (!nr) { /* - * All buffers are uptodate - we can set the page uptodate + * All buffers are uptodate - we can set the folio uptodate * as well. But not if get_block() returned an error. */ - if (!PageError(page)) - SetPageUptodate(page); - unlock_page(page); + if (!folio_test_error(folio)) + folio_mark_uptodate(folio); + folio_unlock(folio); return 0; } @@ -2335,7 +2338,7 @@ int block_read_full_page(struct page *page, get_block_t *get_block) } return 0; } -EXPORT_SYMBOL(block_read_full_page); +EXPORT_SYMBOL(block_read_full_folio); /* utility function for filesystems that need to do work on expanding * truncates. Uses filesystem pagecache writes to allow the filesystem to diff --git a/fs/efs/inode.c b/fs/efs/inode.c index 89e73a6f0d36..3ba94bb005a6 100644 --- a/fs/efs/inode.c +++ b/fs/efs/inode.c @@ -14,16 +14,18 @@ #include "efs.h" #include -static int efs_readpage(struct file *file, struct page *page) +static int efs_read_folio(struct file *file, struct folio *folio) { - return block_read_full_page(page,efs_get_block); + return block_read_full_folio(folio, efs_get_block); } + static sector_t _efs_bmap(struct address_space *mapping, sector_t block) { return generic_block_bmap(mapping,block,efs_get_block); } + static const struct address_space_operations efs_aops = { - .readpage = efs_readpage, + .read_folio = efs_read_folio, .bmap = _efs_bmap }; diff --git a/fs/ext4/readpage.c b/fs/ext4/readpage.c index af491e170c4a..e02a5f14e021 100644 --- a/fs/ext4/readpage.c +++ b/fs/ext4/readpage.c @@ -163,7 +163,7 @@ static bool bio_post_read_required(struct bio *bio) * * The mpage code never puts partial pages into a BIO (except for end-of-file). * If a page does not map to a contiguous run of blocks then it simply falls - * back to block_read_full_page(). + * back to block_read_full_folio(). * * Why is this? If a page's completion depends on a number of different BIOs * which can complete in any order (or at the same time) then determining the @@ -394,7 +394,7 @@ int ext4_mpage_readpages(struct inode *inode, bio = NULL; } if (!PageUptodate(page)) - block_read_full_page(page, ext4_get_block); + block_read_full_folio(page_folio(page), ext4_get_block); else unlock_page(page); next_page: diff --git a/fs/freevxfs/vxfs_subr.c b/fs/freevxfs/vxfs_subr.c index e806694d4145..6143ebab940d 100644 --- a/fs/freevxfs/vxfs_subr.c +++ b/fs/freevxfs/vxfs_subr.c @@ -38,11 +38,11 @@ #include "vxfs_extern.h" -static int vxfs_readpage(struct file *, struct page *); +static int vxfs_read_folio(struct file *, struct folio *); static sector_t vxfs_bmap(struct address_space *, sector_t); const struct address_space_operations vxfs_aops = { - .readpage = vxfs_readpage, + .read_folio = vxfs_read_folio, .bmap = vxfs_bmap, }; @@ -141,24 +141,23 @@ vxfs_getblk(struct inode *ip, sector_t iblock, } /** - * vxfs_readpage - read one page synchronously into the pagecache + * vxfs_read_folio - read one page synchronously into the pagecache * @file: file context (unused) - * @page: page frame to fill in. + * @folio: folio to fill in. * * Description: - * The vxfs_readpage routine reads @page synchronously into the + * The vxfs_read_folio routine reads @folio synchronously into the * pagecache. * * Returns: * Zero on success, else a negative error code. * * Locking status: - * @page is locked and will be unlocked. + * @folio is locked and will be unlocked. */ -static int -vxfs_readpage(struct file *file, struct page *page) +static int vxfs_read_folio(struct file *file, struct folio *folio) { - return block_read_full_page(page, vxfs_getblk); + return block_read_full_folio(folio, vxfs_getblk); } /** diff --git a/fs/hfs/inode.c b/fs/hfs/inode.c index 9a26b9510da0..ba3ff9cd7cfc 100644 --- a/fs/hfs/inode.c +++ b/fs/hfs/inode.c @@ -34,9 +34,9 @@ static int hfs_writepage(struct page *page, struct writeback_control *wbc) return block_write_full_page(page, hfs_get_block, wbc); } -static int hfs_readpage(struct file *file, struct page *page) +static int hfs_read_folio(struct file *file, struct folio *folio) { - return block_read_full_page(page, hfs_get_block); + return block_read_full_folio(folio, hfs_get_block); } static void hfs_write_failed(struct address_space *mapping, loff_t to) @@ -160,7 +160,7 @@ static int hfs_writepages(struct address_space *mapping, const struct address_space_operations hfs_btree_aops = { .dirty_folio = block_dirty_folio, .invalidate_folio = block_invalidate_folio, - .readpage = hfs_readpage, + .read_folio = hfs_read_folio, .writepage = hfs_writepage, .write_begin = hfs_write_begin, .write_end = generic_write_end, @@ -171,7 +171,7 @@ const struct address_space_operations hfs_btree_aops = { const struct address_space_operations hfs_aops = { .dirty_folio = block_dirty_folio, .invalidate_folio = block_invalidate_folio, - .readpage = hfs_readpage, + .read_folio = hfs_read_folio, .writepage = hfs_writepage, .write_begin = hfs_write_begin, .write_end = generic_write_end, diff --git a/fs/hfsplus/inode.c b/fs/hfsplus/inode.c index 905ae3660315..982b34eefec7 100644 --- a/fs/hfsplus/inode.c +++ b/fs/hfsplus/inode.c @@ -23,9 +23,9 @@ #include "hfsplus_raw.h" #include "xattr.h" -static int hfsplus_readpage(struct file *file, struct page *page) +static int hfsplus_read_folio(struct file *file, struct folio *folio) { - return block_read_full_page(page, hfsplus_get_block); + return block_read_full_folio(folio, hfsplus_get_block); } static int hfsplus_writepage(struct page *page, struct writeback_control *wbc) @@ -157,7 +157,7 @@ static int hfsplus_writepages(struct address_space *mapping, const struct address_space_operations hfsplus_btree_aops = { .dirty_folio = block_dirty_folio, .invalidate_folio = block_invalidate_folio, - .readpage = hfsplus_readpage, + .read_folio = hfsplus_read_folio, .writepage = hfsplus_writepage, .write_begin = hfsplus_write_begin, .write_end = generic_write_end, @@ -168,7 +168,7 @@ const struct address_space_operations hfsplus_btree_aops = { const struct address_space_operations hfsplus_aops = { .dirty_folio = block_dirty_folio, .invalidate_folio = block_invalidate_folio, - .readpage = hfsplus_readpage, + .read_folio = hfsplus_read_folio, .writepage = hfsplus_writepage, .write_begin = hfsplus_write_begin, .write_end = generic_write_end, diff --git a/fs/iomap/buffered-io.c b/fs/iomap/buffered-io.c index 72f63d719c7c..75eb0c27a0e8 100644 --- a/fs/iomap/buffered-io.c +++ b/fs/iomap/buffered-io.c @@ -349,7 +349,7 @@ int iomap_read_folio(struct folio *folio, const struct iomap_ops *ops) } /* - * Just like mpage_readahead and block_read_full_page, we always + * Just like mpage_readahead and block_read_full_folio, we always * return 0 and just set the folio error flag on errors. This * should be cleaned up throughout the stack eventually. */ diff --git a/fs/minix/inode.c b/fs/minix/inode.c index 3add78bccedc..da8bdd1712a7 100644 --- a/fs/minix/inode.c +++ b/fs/minix/inode.c @@ -402,9 +402,9 @@ static int minix_writepage(struct page *page, struct writeback_control *wbc) return block_write_full_page(page, minix_get_block, wbc); } -static int minix_readpage(struct file *file, struct page *page) +static int minix_read_folio(struct file *file, struct folio *folio) { - return block_read_full_page(page,minix_get_block); + return block_read_full_folio(folio, minix_get_block); } int minix_prepare_chunk(struct page *page, loff_t pos, unsigned len) @@ -443,7 +443,7 @@ static sector_t minix_bmap(struct address_space *mapping, sector_t block) static const struct address_space_operations minix_aops = { .dirty_folio = block_dirty_folio, .invalidate_folio = block_invalidate_folio, - .readpage = minix_readpage, + .read_folio = minix_read_folio, .writepage = minix_writepage, .write_begin = minix_write_begin, .write_end = generic_write_end, diff --git a/fs/mpage.c b/fs/mpage.c index 1fe56f8c495f..a04439b84ae2 100644 --- a/fs/mpage.c +++ b/fs/mpage.c @@ -36,7 +36,7 @@ * * The mpage code never puts partial pages into a BIO (except for end-of-file). * If a page does not map to a contiguous run of blocks then it simply falls - * back to block_read_full_page(). + * back to block_read_full_folio(). * * Why is this? If a page's completion depends on a number of different BIOs * which can complete in any order (or at the same time) then determining the @@ -68,7 +68,7 @@ static struct bio *mpage_bio_submit(struct bio *bio) /* * support function for mpage_readahead. The fs supplied get_block might * return an up to date buffer. This is used to map that buffer into - * the page, which allows readpage to avoid triggering a duplicate call + * the page, which allows read_folio to avoid triggering a duplicate call * to get_block. * * The idea is to avoid adding buffers to pages that don't already have @@ -296,7 +296,7 @@ confused: if (args->bio) args->bio = mpage_bio_submit(args->bio); if (!PageUptodate(page)) - block_read_full_page(page, args->get_block); + block_read_full_folio(page_folio(page), args->get_block); else unlock_page(page); goto out; @@ -425,7 +425,7 @@ static void clean_buffers(struct page *page, unsigned first_unmapped) /* * we cannot drop the bh if the page is not uptodate or a concurrent - * readpage would fail to serialize with the bh and it would read from + * read_folio would fail to serialize with the bh and it would read from * disk before we reach the platter. */ if (buffer_heads_over_limit && PageUptodate(page)) @@ -510,7 +510,7 @@ static int __mpage_writepage(struct page *page, struct writeback_control *wbc, /* * Page has buffers, but they are all unmapped. The page was * created by pagein or read over a hole which was handled by - * block_read_full_page(). If this address_space is also + * block_read_full_folio(). If this address_space is also * using mpage_readahead then this can rarely happen. */ goto confused; diff --git a/fs/ntfs/compress.c b/fs/ntfs/compress.c index d2f9d6a0ee32..a60f543e7557 100644 --- a/fs/ntfs/compress.c +++ b/fs/ntfs/compress.c @@ -780,12 +780,12 @@ lock_retry_remap: /* Uncompressed cb, copy it to the destination pages. */ /* * TODO: As a big optimization, we could detect this case - * before we read all the pages and use block_read_full_page() + * before we read all the pages and use block_read_full_folio() * on all full pages instead (we still have to treat partial * pages especially but at least we are getting rid of the * synchronous io for the majority of pages. * Or if we choose not to do the read-ahead/-behind stuff, we - * could just return block_read_full_page(pages[xpage]) as long + * could just return block_read_full_folio(pages[xpage]) as long * as PAGE_SIZE <= cb_size. */ if (cb_max_ofs) diff --git a/fs/ocfs2/aops.c b/fs/ocfs2/aops.c index 7cffe9dcad17..7bf4b6fd93bf 100644 --- a/fs/ocfs2/aops.c +++ b/fs/ocfs2/aops.c @@ -309,7 +309,7 @@ static int ocfs2_readpage(struct file *file, struct page *page) /* * i_size might have just been updated as we grabed the meta lock. We * might now be discovering a truncate that hit on another node. - * block_read_full_page->get_block freaks out if it is asked to read + * block_read_full_folio->get_block freaks out if it is asked to read * beyond the end of a file, so we check here. Callers * (generic_file_read, vm_ops->fault) are clever enough to check i_size * and notice that the page they just read isn't needed. @@ -326,7 +326,7 @@ static int ocfs2_readpage(struct file *file, struct page *page) if (oi->ip_dyn_features & OCFS2_INLINE_DATA_FL) ret = ocfs2_readpage_inline(inode, page); else - ret = block_read_full_page(page, ocfs2_get_block); + ret = block_read_full_folio(page_folio(page), ocfs2_get_block); unlock = 0; out_alloc: @@ -1897,7 +1897,7 @@ static int ocfs2_write_begin(struct file *file, struct address_space *mapping, /* * Take alloc sem here to prevent concurrent lookups. That way * the mapping, zeroing and tree manipulation within - * ocfs2_write() will be safe against ->readpage(). This + * ocfs2_write() will be safe against ->read_folio(). This * should also serve to lock out allocation from a shared * writeable region. */ diff --git a/fs/ocfs2/refcounttree.c b/fs/ocfs2/refcounttree.c index 7f6355cbb587..e04358a46b68 100644 --- a/fs/ocfs2/refcounttree.c +++ b/fs/ocfs2/refcounttree.c @@ -2961,12 +2961,14 @@ retry: } if (!PageUptodate(page)) { - ret = block_read_full_page(page, ocfs2_get_block); + struct folio *folio = page_folio(page); + + ret = block_read_full_folio(folio, ocfs2_get_block); if (ret) { mlog_errno(ret); goto unlock; } - lock_page(page); + folio_lock(folio); } if (page_has_buffers(page)) { diff --git a/fs/omfs/file.c b/fs/omfs/file.c index 980b0a72c172..fa7fe2393ff6 100644 --- a/fs/omfs/file.c +++ b/fs/omfs/file.c @@ -284,9 +284,9 @@ out: return ret; } -static int omfs_readpage(struct file *file, struct page *page) +static int omfs_read_folio(struct file *file, struct folio *folio) { - return block_read_full_page(page, omfs_get_block); + return block_read_full_folio(folio, omfs_get_block); } static void omfs_readahead(struct readahead_control *rac) @@ -373,7 +373,7 @@ const struct inode_operations omfs_file_inops = { const struct address_space_operations omfs_aops = { .dirty_folio = block_dirty_folio, .invalidate_folio = block_invalidate_folio, - .readpage = omfs_readpage, + .read_folio = omfs_read_folio, .readahead = omfs_readahead, .writepage = omfs_writepage, .writepages = omfs_writepages, diff --git a/fs/qnx4/inode.c b/fs/qnx4/inode.c index a635bb6615e9..391ea402920d 100644 --- a/fs/qnx4/inode.c +++ b/fs/qnx4/inode.c @@ -245,17 +245,18 @@ static void qnx4_kill_sb(struct super_block *sb) } } -static int qnx4_readpage(struct file *file, struct page *page) +static int qnx4_read_folio(struct file *file, struct folio *folio) { - return block_read_full_page(page,qnx4_get_block); + return block_read_full_folio(folio, qnx4_get_block); } static sector_t qnx4_bmap(struct address_space *mapping, sector_t block) { return generic_block_bmap(mapping,block,qnx4_get_block); } + static const struct address_space_operations qnx4_aops = { - .readpage = qnx4_readpage, + .read_folio = qnx4_read_folio, .bmap = qnx4_bmap }; diff --git a/fs/reiserfs/file.c b/fs/reiserfs/file.c index 203a47232707..6e228bfbe7ef 100644 --- a/fs/reiserfs/file.c +++ b/fs/reiserfs/file.c @@ -227,7 +227,7 @@ drop_write_lock: } /* * If this is a partial write which happened to make all buffers - * uptodate then we can optimize away a bogus readpage() for + * uptodate then we can optimize away a bogus read_folio() for * the next read(). Here we 'discover' whether the page went * uptodate as a result of this (potentially partial) write. */ diff --git a/fs/reiserfs/inode.c b/fs/reiserfs/inode.c index 46ba4892030a..33a9555f77b9 100644 --- a/fs/reiserfs/inode.c +++ b/fs/reiserfs/inode.c @@ -167,10 +167,10 @@ inline void make_le_item_head(struct item_head *ih, const struct cpu_key *key, * cutting the code is fine, since it really isn't in use yet and is easy * to add back in. But, Vladimir has a really good idea here. Think * about what happens for reading a file. For each page, - * The VFS layer calls reiserfs_readpage, who searches the tree to find + * The VFS layer calls reiserfs_read_folio, who searches the tree to find * an indirect item. This indirect item has X number of pointers, where * X is a big number if we've done the block allocation right. But, - * we only use one or two of these pointers during each call to readpage, + * we only use one or two of these pointers during each call to read_folio, * needlessly researching again later on. * * The size of the cache could be dynamic based on the size of the file. @@ -966,7 +966,7 @@ research: * it is important the set_buffer_uptodate is done * after the direct2indirect. The buffer might * contain valid data newer than the data on disk - * (read by readpage, changed, and then sent here by + * (read by read_folio, changed, and then sent here by * writepage). direct2indirect needs to know if unbh * was already up to date, so it can decide if the * data in unbh needs to be replaced with data from @@ -2733,9 +2733,9 @@ fail: goto done; } -static int reiserfs_readpage(struct file *f, struct page *page) +static int reiserfs_read_folio(struct file *f, struct folio *folio) { - return block_read_full_page(page, reiserfs_get_block); + return block_read_full_folio(folio, reiserfs_get_block); } static int reiserfs_writepage(struct page *page, struct writeback_control *wbc) @@ -3421,7 +3421,7 @@ out: const struct address_space_operations reiserfs_address_space_operations = { .writepage = reiserfs_writepage, - .readpage = reiserfs_readpage, + .read_folio = reiserfs_read_folio, .readahead = reiserfs_readahead, .releasepage = reiserfs_releasepage, .invalidate_folio = reiserfs_invalidate_folio, diff --git a/fs/sysv/itree.c b/fs/sysv/itree.c index 96ad24fe0ffb..d4ec9bb97de9 100644 --- a/fs/sysv/itree.c +++ b/fs/sysv/itree.c @@ -456,9 +456,9 @@ static int sysv_writepage(struct page *page, struct writeback_control *wbc) return block_write_full_page(page,get_block,wbc); } -static int sysv_readpage(struct file *file, struct page *page) +static int sysv_read_folio(struct file *file, struct folio *folio) { - return block_read_full_page(page,get_block); + return block_read_full_folio(folio, get_block); } int sysv_prepare_chunk(struct page *page, loff_t pos, unsigned len) @@ -497,7 +497,7 @@ static sector_t sysv_bmap(struct address_space *mapping, sector_t block) const struct address_space_operations sysv_aops = { .dirty_folio = block_dirty_folio, .invalidate_folio = block_invalidate_folio, - .readpage = sysv_readpage, + .read_folio = sysv_read_folio, .writepage = sysv_writepage, .write_begin = sysv_write_begin, .write_end = generic_write_end, diff --git a/fs/ufs/inode.c b/fs/ufs/inode.c index 6c973b71cab2..a873de7dec1c 100644 --- a/fs/ufs/inode.c +++ b/fs/ufs/inode.c @@ -390,7 +390,7 @@ out: /** * ufs_getfrag_block() - `get_block_t' function, interface between UFS and - * readpage, writepage and so on + * read_folio, writepage and so on */ static int ufs_getfrag_block(struct inode *inode, sector_t fragment, struct buffer_head *bh_result, int create) @@ -472,9 +472,9 @@ static int ufs_writepage(struct page *page, struct writeback_control *wbc) return block_write_full_page(page,ufs_getfrag_block,wbc); } -static int ufs_readpage(struct file *file, struct page *page) +static int ufs_read_folio(struct file *file, struct folio *folio) { - return block_read_full_page(page,ufs_getfrag_block); + return block_read_full_folio(folio, ufs_getfrag_block); } int ufs_prepare_chunk(struct page *page, loff_t pos, unsigned len) @@ -527,7 +527,7 @@ static sector_t ufs_bmap(struct address_space *mapping, sector_t block) const struct address_space_operations ufs_aops = { .dirty_folio = block_dirty_folio, .invalidate_folio = block_invalidate_folio, - .readpage = ufs_readpage, + .read_folio = ufs_read_folio, .writepage = ufs_writepage, .write_begin = ufs_write_begin, .write_end = ufs_write_end, diff --git a/include/linux/buffer_head.h b/include/linux/buffer_head.h index 805c4e12700a..31d82fd9abe8 100644 --- a/include/linux/buffer_head.h +++ b/include/linux/buffer_head.h @@ -223,7 +223,7 @@ int block_write_full_page(struct page *page, get_block_t *get_block, int __block_write_full_page(struct inode *inode, struct page *page, get_block_t *get_block, struct writeback_control *wbc, bh_end_io_t *handler); -int block_read_full_page(struct page*, get_block_t*); +int block_read_full_folio(struct folio *, get_block_t *); bool block_is_partially_uptodate(struct folio *, size_t from, size_t count); int block_write_begin(struct address_space *mapping, loff_t pos, unsigned len, struct page **pagep, get_block_t *get_block); -- cgit v1.2.3-71-gd317 From f132ab7d3ab03c5bae28d31fb80ba77c4da05500 Mon Sep 17 00:00:00 2001 From: "Matthew Wilcox (Oracle)" Date: Fri, 29 Apr 2022 11:47:39 -0400 Subject: fs: Convert mpage_readpage to mpage_read_folio mpage_readpage still works in terms of pages, and has not been audited for correctness with large folios, so include an assertion that the filesystem is not passing it large folios. Convert all the filesystems to call mpage_read_folio() instead of mpage_readpage(). Signed-off-by: Matthew Wilcox (Oracle) --- fs/exfat/inode.c | 6 +++--- fs/ext2/inode.c | 8 ++++---- fs/fat/inode.c | 6 +++--- fs/gfs2/aops.c | 15 +++++++-------- fs/hpfs/file.c | 6 +++--- fs/iomap/buffered-io.c | 2 +- fs/isofs/inode.c | 6 +++--- fs/jfs/inode.c | 6 +++--- fs/mpage.c | 8 +++++--- fs/nilfs2/inode.c | 10 +++++----- fs/ntfs3/inode.c | 9 +++++---- fs/qnx6/inode.c | 6 +++--- fs/udf/inode.c | 6 +++--- include/linux/mpage.h | 2 +- 14 files changed, 49 insertions(+), 47 deletions(-) (limited to 'include') diff --git a/fs/exfat/inode.c b/fs/exfat/inode.c index b9f63113db2d..0133d385d8e8 100644 --- a/fs/exfat/inode.c +++ b/fs/exfat/inode.c @@ -357,9 +357,9 @@ unlock_ret: return err; } -static int exfat_readpage(struct file *file, struct page *page) +static int exfat_read_folio(struct file *file, struct folio *folio) { - return mpage_readpage(page, exfat_get_block); + return mpage_read_folio(folio, exfat_get_block); } static void exfat_readahead(struct readahead_control *rac) @@ -492,7 +492,7 @@ int exfat_block_truncate_page(struct inode *inode, loff_t from) static const struct address_space_operations exfat_aops = { .dirty_folio = block_dirty_folio, .invalidate_folio = block_invalidate_folio, - .readpage = exfat_readpage, + .read_folio = exfat_read_folio, .readahead = exfat_readahead, .writepage = exfat_writepage, .writepages = exfat_writepages, diff --git a/fs/ext2/inode.c b/fs/ext2/inode.c index d8ca8050945a..9e1ecd89f47f 100644 --- a/fs/ext2/inode.c +++ b/fs/ext2/inode.c @@ -875,9 +875,9 @@ static int ext2_writepage(struct page *page, struct writeback_control *wbc) return block_write_full_page(page, ext2_get_block, wbc); } -static int ext2_readpage(struct file *file, struct page *page) +static int ext2_read_folio(struct file *file, struct folio *folio) { - return mpage_readpage(page, ext2_get_block); + return mpage_read_folio(folio, ext2_get_block); } static void ext2_readahead(struct readahead_control *rac) @@ -966,7 +966,7 @@ ext2_dax_writepages(struct address_space *mapping, struct writeback_control *wbc const struct address_space_operations ext2_aops = { .dirty_folio = block_dirty_folio, .invalidate_folio = block_invalidate_folio, - .readpage = ext2_readpage, + .read_folio = ext2_read_folio, .readahead = ext2_readahead, .writepage = ext2_writepage, .write_begin = ext2_write_begin, @@ -982,7 +982,7 @@ const struct address_space_operations ext2_aops = { const struct address_space_operations ext2_nobh_aops = { .dirty_folio = block_dirty_folio, .invalidate_folio = block_invalidate_folio, - .readpage = ext2_readpage, + .read_folio = ext2_read_folio, .readahead = ext2_readahead, .writepage = ext2_nobh_writepage, .write_begin = ext2_nobh_write_begin, diff --git a/fs/fat/inode.c b/fs/fat/inode.c index 1f15b0fd1bb0..8a81017f8d60 100644 --- a/fs/fat/inode.c +++ b/fs/fat/inode.c @@ -205,9 +205,9 @@ static int fat_writepages(struct address_space *mapping, return mpage_writepages(mapping, wbc, fat_get_block); } -static int fat_readpage(struct file *file, struct page *page) +static int fat_read_folio(struct file *file, struct folio *folio) { - return mpage_readpage(page, fat_get_block); + return mpage_read_folio(folio, fat_get_block); } static void fat_readahead(struct readahead_control *rac) @@ -344,7 +344,7 @@ int fat_block_truncate_page(struct inode *inode, loff_t from) static const struct address_space_operations fat_aops = { .dirty_folio = block_dirty_folio, .invalidate_folio = block_invalidate_folio, - .readpage = fat_readpage, + .read_folio = fat_read_folio, .readahead = fat_readahead, .writepage = fat_writepage, .writepages = fat_writepages, diff --git a/fs/gfs2/aops.c b/fs/gfs2/aops.c index a29eb1e5bfe2..340bf5d0e835 100644 --- a/fs/gfs2/aops.c +++ b/fs/gfs2/aops.c @@ -480,7 +480,7 @@ static int __gfs2_readpage(void *file, struct page *page) error = stuffed_readpage(ip, page); unlock_page(page); } else { - error = mpage_readpage(page, gfs2_block_map); + error = mpage_read_folio(folio, gfs2_block_map); } if (unlikely(gfs2_withdrawn(sdp))) @@ -490,14 +490,13 @@ static int __gfs2_readpage(void *file, struct page *page) } /** - * gfs2_readpage - read a page of a file + * gfs2_read_folio - read a folio from a file * @file: The file to read - * @page: The page of the file + * @folio: The folio in the file */ - -static int gfs2_readpage(struct file *file, struct page *page) +static int gfs2_read_folio(struct file *file, struct folio *folio) { - return __gfs2_readpage(file, page); + return __gfs2_readpage(file, &folio->page); } /** @@ -773,7 +772,7 @@ cannot_release: static const struct address_space_operations gfs2_aops = { .writepage = gfs2_writepage, .writepages = gfs2_writepages, - .readpage = gfs2_readpage, + .read_folio = gfs2_read_folio, .readahead = gfs2_readahead, .dirty_folio = filemap_dirty_folio, .releasepage = iomap_releasepage, @@ -788,7 +787,7 @@ static const struct address_space_operations gfs2_aops = { static const struct address_space_operations gfs2_jdata_aops = { .writepage = gfs2_jdata_writepage, .writepages = gfs2_jdata_writepages, - .readpage = gfs2_readpage, + .read_folio = gfs2_read_folio, .readahead = gfs2_readahead, .dirty_folio = jdata_dirty_folio, .bmap = gfs2_bmap, diff --git a/fs/hpfs/file.c b/fs/hpfs/file.c index 8b590b3826c3..f7547a62c81f 100644 --- a/fs/hpfs/file.c +++ b/fs/hpfs/file.c @@ -158,9 +158,9 @@ static const struct iomap_ops hpfs_iomap_ops = { .iomap_begin = hpfs_iomap_begin, }; -static int hpfs_readpage(struct file *file, struct page *page) +static int hpfs_read_folio(struct file *file, struct folio *folio) { - return mpage_readpage(page, hpfs_get_block); + return mpage_read_folio(folio, hpfs_get_block); } static int hpfs_writepage(struct page *page, struct writeback_control *wbc) @@ -247,7 +247,7 @@ static int hpfs_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo, const struct address_space_operations hpfs_aops = { .dirty_folio = block_dirty_folio, .invalidate_folio = block_invalidate_folio, - .readpage = hpfs_readpage, + .read_folio = hpfs_read_folio, .writepage = hpfs_writepage, .readahead = hpfs_readahead, .writepages = hpfs_writepages, diff --git a/fs/iomap/buffered-io.c b/fs/iomap/buffered-io.c index 75eb0c27a0e8..2de087ac87b6 100644 --- a/fs/iomap/buffered-io.c +++ b/fs/iomap/buffered-io.c @@ -297,7 +297,7 @@ static loff_t iomap_readpage_iter(const struct iomap_iter *iter, /* * If the bio_alloc fails, try it again for a single page to * avoid having to deal with partial page reads. This emulates - * what do_mpage_readpage does. + * what do_mpage_read_folio does. */ if (!ctx->bio) { ctx->bio = bio_alloc(iomap->bdev, 1, REQ_OP_READ, diff --git a/fs/isofs/inode.c b/fs/isofs/inode.c index d7491692aea3..88bf20303466 100644 --- a/fs/isofs/inode.c +++ b/fs/isofs/inode.c @@ -1174,9 +1174,9 @@ struct buffer_head *isofs_bread(struct inode *inode, sector_t block) return sb_bread(inode->i_sb, blknr); } -static int isofs_readpage(struct file *file, struct page *page) +static int isofs_read_folio(struct file *file, struct folio *folio) { - return mpage_readpage(page, isofs_get_block); + return mpage_read_folio(folio, isofs_get_block); } static void isofs_readahead(struct readahead_control *rac) @@ -1190,7 +1190,7 @@ static sector_t _isofs_bmap(struct address_space *mapping, sector_t block) } static const struct address_space_operations isofs_aops = { - .readpage = isofs_readpage, + .read_folio = isofs_read_folio, .readahead = isofs_readahead, .bmap = _isofs_bmap }; diff --git a/fs/jfs/inode.c b/fs/jfs/inode.c index aa9f112107b2..a5dd7e53754a 100644 --- a/fs/jfs/inode.c +++ b/fs/jfs/inode.c @@ -293,9 +293,9 @@ static int jfs_writepages(struct address_space *mapping, return mpage_writepages(mapping, wbc, jfs_get_block); } -static int jfs_readpage(struct file *file, struct page *page) +static int jfs_read_folio(struct file *file, struct folio *folio) { - return mpage_readpage(page, jfs_get_block); + return mpage_read_folio(folio, jfs_get_block); } static void jfs_readahead(struct readahead_control *rac) @@ -359,7 +359,7 @@ static ssize_t jfs_direct_IO(struct kiocb *iocb, struct iov_iter *iter) const struct address_space_operations jfs_aops = { .dirty_folio = block_dirty_folio, .invalidate_folio = block_invalidate_folio, - .readpage = jfs_readpage, + .read_folio = jfs_read_folio, .readahead = jfs_readahead, .writepage = jfs_writepage, .writepages = jfs_writepages, diff --git a/fs/mpage.c b/fs/mpage.c index a04439b84ae2..6df9c3aa5728 100644 --- a/fs/mpage.c +++ b/fs/mpage.c @@ -364,20 +364,22 @@ EXPORT_SYMBOL(mpage_readahead); /* * This isn't called much at all */ -int mpage_readpage(struct page *page, get_block_t get_block) +int mpage_read_folio(struct folio *folio, get_block_t get_block) { struct mpage_readpage_args args = { - .page = page, + .page = &folio->page, .nr_pages = 1, .get_block = get_block, }; + VM_BUG_ON_FOLIO(folio_test_large(folio), folio); + args.bio = do_mpage_readpage(&args); if (args.bio) mpage_bio_submit(args.bio); return 0; } -EXPORT_SYMBOL(mpage_readpage); +EXPORT_SYMBOL(mpage_read_folio); /* * Writing is not so simple. diff --git a/fs/nilfs2/inode.c b/fs/nilfs2/inode.c index 02297ec8dc55..26b8065401b0 100644 --- a/fs/nilfs2/inode.c +++ b/fs/nilfs2/inode.c @@ -140,14 +140,14 @@ int nilfs_get_block(struct inode *inode, sector_t blkoff, } /** - * nilfs_readpage() - implement readpage() method of nilfs_aops {} + * nilfs_read_folio() - implement read_folio() method of nilfs_aops {} * address_space_operations. * @file - file struct of the file to be read - * @page - the page to be read + * @folio - the folio to be read */ -static int nilfs_readpage(struct file *file, struct page *page) +static int nilfs_read_folio(struct file *file, struct folio *folio) { - return mpage_readpage(page, nilfs_get_block); + return mpage_read_folio(folio, nilfs_get_block); } static void nilfs_readahead(struct readahead_control *rac) @@ -298,7 +298,7 @@ nilfs_direct_IO(struct kiocb *iocb, struct iov_iter *iter) const struct address_space_operations nilfs_aops = { .writepage = nilfs_writepage, - .readpage = nilfs_readpage, + .read_folio = nilfs_read_folio, .writepages = nilfs_writepages, .dirty_folio = nilfs_dirty_folio, .readahead = nilfs_readahead, diff --git a/fs/ntfs3/inode.c b/fs/ntfs3/inode.c index bfd71f384e21..74f60c457f28 100644 --- a/fs/ntfs3/inode.c +++ b/fs/ntfs3/inode.c @@ -676,8 +676,9 @@ static sector_t ntfs_bmap(struct address_space *mapping, sector_t block) return generic_block_bmap(mapping, block, ntfs_get_block_bmap); } -static int ntfs_readpage(struct file *file, struct page *page) +static int ntfs_read_folio(struct file *file, struct folio *folio) { + struct page *page = &folio->page; int err; struct address_space *mapping = page->mapping; struct inode *inode = mapping->host; @@ -701,7 +702,7 @@ static int ntfs_readpage(struct file *file, struct page *page) } /* Normal + sparse files. */ - return mpage_readpage(page, ntfs_get_block); + return mpage_read_folio(folio, ntfs_get_block); } static void ntfs_readahead(struct readahead_control *rac) @@ -1940,7 +1941,7 @@ const struct inode_operations ntfs_link_inode_operations = { }; const struct address_space_operations ntfs_aops = { - .readpage = ntfs_readpage, + .read_folio = ntfs_read_folio, .readahead = ntfs_readahead, .writepage = ntfs_writepage, .writepages = ntfs_writepages, @@ -1952,7 +1953,7 @@ const struct address_space_operations ntfs_aops = { }; const struct address_space_operations ntfs_aops_cmpr = { - .readpage = ntfs_readpage, + .read_folio = ntfs_read_folio, .readahead = ntfs_readahead, }; // clang-format on diff --git a/fs/qnx6/inode.c b/fs/qnx6/inode.c index 9d8e7e9788a1..b9895afca9d1 100644 --- a/fs/qnx6/inode.c +++ b/fs/qnx6/inode.c @@ -94,9 +94,9 @@ static int qnx6_check_blockptr(__fs32 ptr) return 1; } -static int qnx6_readpage(struct file *file, struct page *page) +static int qnx6_read_folio(struct file *file, struct folio *folio) { - return mpage_readpage(page, qnx6_get_block); + return mpage_read_folio(folio, qnx6_get_block); } static void qnx6_readahead(struct readahead_control *rac) @@ -496,7 +496,7 @@ static sector_t qnx6_bmap(struct address_space *mapping, sector_t block) return generic_block_bmap(mapping, block, qnx6_get_block); } static const struct address_space_operations qnx6_aops = { - .readpage = qnx6_readpage, + .read_folio = qnx6_read_folio, .readahead = qnx6_readahead, .bmap = qnx6_bmap }; diff --git a/fs/udf/inode.c b/fs/udf/inode.c index 866f9a53248e..edc88716751a 100644 --- a/fs/udf/inode.c +++ b/fs/udf/inode.c @@ -193,9 +193,9 @@ static int udf_writepages(struct address_space *mapping, return mpage_writepages(mapping, wbc, udf_get_block); } -static int udf_readpage(struct file *file, struct page *page) +static int udf_read_folio(struct file *file, struct folio *folio) { - return mpage_readpage(page, udf_get_block); + return mpage_read_folio(folio, udf_get_block); } static void udf_readahead(struct readahead_control *rac) @@ -237,7 +237,7 @@ static sector_t udf_bmap(struct address_space *mapping, sector_t block) const struct address_space_operations udf_aops = { .dirty_folio = block_dirty_folio, .invalidate_folio = block_invalidate_folio, - .readpage = udf_readpage, + .read_folio = udf_read_folio, .readahead = udf_readahead, .writepage = udf_writepage, .writepages = udf_writepages, diff --git a/include/linux/mpage.h b/include/linux/mpage.h index f4f5e90a6844..43986f7ec4dd 100644 --- a/include/linux/mpage.h +++ b/include/linux/mpage.h @@ -16,7 +16,7 @@ struct writeback_control; struct readahead_control; void mpage_readahead(struct readahead_control *, get_block_t get_block); -int mpage_readpage(struct page *page, get_block_t get_block); +int mpage_read_folio(struct folio *folio, get_block_t get_block); int mpage_writepages(struct address_space *mapping, struct writeback_control *wbc, get_block_t get_block); int mpage_writepage(struct page *page, get_block_t *get_block, -- cgit v1.2.3-71-gd317 From 65d023af7f29eb1250a6105141a74776bae7e1f8 Mon Sep 17 00:00:00 2001 From: "Matthew Wilcox (Oracle)" Date: Fri, 29 Apr 2022 11:12:16 -0400 Subject: nfs: Convert nfs to read_folio This is a "weak" conversion which converts straight back to using pages. A full conversion should be performed at some point, hopefully by someone familiar with the filesystem. Signed-off-by: Matthew Wilcox (Oracle) --- fs/nfs/file.c | 4 ++-- fs/nfs/read.c | 3 ++- include/linux/nfs_fs.h | 2 +- 3 files changed, 5 insertions(+), 4 deletions(-) (limited to 'include') diff --git a/fs/nfs/file.c b/fs/nfs/file.c index f05c4b18b681..4f6d1f90b87f 100644 --- a/fs/nfs/file.c +++ b/fs/nfs/file.c @@ -337,7 +337,7 @@ start: } else if (!once_thru && nfs_want_read_modify_write(file, page, pos, len)) { once_thru = 1; - ret = nfs_readpage(file, page); + ret = nfs_read_folio(file, page_folio(page)); put_page(page); if (!ret) goto start; @@ -514,7 +514,7 @@ static void nfs_swap_deactivate(struct file *file) } const struct address_space_operations nfs_file_aops = { - .readpage = nfs_readpage, + .read_folio = nfs_read_folio, .readahead = nfs_readahead, .dirty_folio = filemap_dirty_folio, .writepage = nfs_writepage, diff --git a/fs/nfs/read.c b/fs/nfs/read.c index 5e7657374bc3..5a9b043662e9 100644 --- a/fs/nfs/read.c +++ b/fs/nfs/read.c @@ -333,8 +333,9 @@ out: * - The error flag is set for this page. This happens only when a * previous async read operation failed. */ -int nfs_readpage(struct file *file, struct page *page) +int nfs_read_folio(struct file *file, struct folio *folio) { + struct page *page = &folio->page; struct nfs_readdesc desc; struct inode *inode = page_file_mapping(page)->host; int ret; diff --git a/include/linux/nfs_fs.h b/include/linux/nfs_fs.h index b48b9259e02c..1bba71757d62 100644 --- a/include/linux/nfs_fs.h +++ b/include/linux/nfs_fs.h @@ -594,7 +594,7 @@ static inline bool nfs_have_writebacks(const struct inode *inode) /* * linux/fs/nfs/read.c */ -extern int nfs_readpage(struct file *, struct page *); +int nfs_read_folio(struct file *, struct folio *); void nfs_readahead(struct readahead_control *); /* -- cgit v1.2.3-71-gd317 From 7e0a126519b82648b254afcd95a168c15f65ea40 Mon Sep 17 00:00:00 2001 From: "Matthew Wilcox (Oracle)" Date: Fri, 29 Apr 2022 11:53:28 -0400 Subject: mm,fs: Remove aops->readpage With all implementations of aops->readpage converted to aops->read_folio, we can stop checking whether it's set and remove the member from aops. Signed-off-by: Matthew Wilcox (Oracle) --- fs/btrfs/file.c | 2 +- fs/buffer.c | 5 +---- fs/ceph/addr.c | 2 +- include/linux/fs.h | 3 +-- kernel/events/uprobes.c | 5 ++--- mm/filemap.c | 15 +++++---------- mm/memory.c | 4 ++-- mm/readahead.c | 12 ++++-------- mm/shmem.c | 2 +- mm/swapfile.c | 2 +- 10 files changed, 19 insertions(+), 33 deletions(-) (limited to 'include') diff --git a/fs/btrfs/file.c b/fs/btrfs/file.c index 373df5ebaf8d..57fba5abb059 100644 --- a/fs/btrfs/file.c +++ b/fs/btrfs/file.c @@ -2402,7 +2402,7 @@ static int btrfs_file_mmap(struct file *filp, struct vm_area_struct *vma) { struct address_space *mapping = filp->f_mapping; - if (!mapping->a_ops->readpage && !mapping->a_ops->read_folio) + if (!mapping->a_ops->read_folio) return -ENOEXEC; file_accessed(filp); diff --git a/fs/buffer.c b/fs/buffer.c index ec0c52c8848e..786ef5b98c80 100644 --- a/fs/buffer.c +++ b/fs/buffer.c @@ -2827,10 +2827,7 @@ int nobh_truncate_page(struct address_space *mapping, /* Ok, it's mapped. Make sure it's up-to-date */ if (!folio_test_uptodate(folio)) { - if (mapping->a_ops->read_folio) - err = mapping->a_ops->read_folio(NULL, folio); - else - err = mapping->a_ops->readpage(NULL, &folio->page); + err = mapping->a_ops->read_folio(NULL, folio); if (err) { folio_put(folio); goto out; diff --git a/fs/ceph/addr.c b/fs/ceph/addr.c index be3e47784f08..e040b92bb17c 100644 --- a/fs/ceph/addr.c +++ b/fs/ceph/addr.c @@ -1772,7 +1772,7 @@ int ceph_mmap(struct file *file, struct vm_area_struct *vma) { struct address_space *mapping = file->f_mapping; - if (!mapping->a_ops->readpage && !mapping->a_ops->read_folio) + if (!mapping->a_ops->read_folio) return -ENOEXEC; file_accessed(file); vma->vm_ops = &ceph_vmops; diff --git a/include/linux/fs.h b/include/linux/fs.h index 5ad942183a2c..f812f5aa07dd 100644 --- a/include/linux/fs.h +++ b/include/linux/fs.h @@ -262,7 +262,7 @@ struct iattr { * trying again. The aop will be taking reasonable * precautions not to livelock. If the caller held a page * reference, it should drop it before retrying. Returned - * by readpage(). + * by read_folio(). * * address_space_operation functions return these large constants to indicate * special semantics to the caller. These are much larger than the bytes in a @@ -335,7 +335,6 @@ static inline bool is_sync_kiocb(struct kiocb *kiocb) struct address_space_operations { int (*writepage)(struct page *page, struct writeback_control *wbc); - int (*readpage)(struct file *, struct page *); int (*read_folio)(struct file *, struct folio *); /* Write back some dirty pages from this mapping. */ diff --git a/kernel/events/uprobes.c b/kernel/events/uprobes.c index 2c7815d20038..a9bc3c98f76a 100644 --- a/kernel/events/uprobes.c +++ b/kernel/events/uprobes.c @@ -787,10 +787,10 @@ static int __copy_insn(struct address_space *mapping, struct file *filp, struct page *page; /* * Ensure that the page that has the original instruction is populated - * and in page-cache. If ->readpage == NULL it must be shmem_mapping(), + * and in page-cache. If ->read_folio == NULL it must be shmem_mapping(), * see uprobe_register(). */ - if (mapping->a_ops->read_folio || mapping->a_ops->readpage) + if (mapping->a_ops->read_folio) page = read_mapping_page(mapping, offset >> PAGE_SHIFT, filp); else page = shmem_read_mapping_page(mapping, offset >> PAGE_SHIFT); @@ -1144,7 +1144,6 @@ static int __uprobe_register(struct inode *inode, loff_t offset, /* copy_insn() uses read_mapping_page() or shmem_read_mapping_page() */ if (!inode->i_mapping->a_ops->read_folio && - !inode->i_mapping->a_ops->readpage && !shmem_mapping(inode->i_mapping)) return -EIO; /* Racy, just to catch the obvious mistakes */ diff --git a/mm/filemap.c b/mm/filemap.c index 96e3d7ffd98e..079f8cca7959 100644 --- a/mm/filemap.c +++ b/mm/filemap.c @@ -2414,15 +2414,12 @@ static int filemap_read_folio(struct file *file, struct address_space *mapping, /* * A previous I/O error may have been due to temporary failures, - * eg. multipath errors. PG_error will be set again if readpage + * eg. multipath errors. PG_error will be set again if read_folio * fails. */ folio_clear_error(folio); /* Start the actual read. The read will unlock the page. */ - if (mapping->a_ops->read_folio) - error = mapping->a_ops->read_folio(file, folio); - else - error = mapping->a_ops->readpage(file, &folio->page); + error = mapping->a_ops->read_folio(file, folio); if (error) return error; @@ -2639,7 +2636,7 @@ err: * @already_read: Number of bytes already read by the caller. * * Copies data from the page cache. If the data is not currently present, - * uses the readahead and readpage address_space operations to fetch it. + * uses the readahead and read_folio address_space operations to fetch it. * * Return: Total number of bytes copied, including those already read by * the caller. If an error happens before any bytes are copied, returns @@ -3450,7 +3447,7 @@ int generic_file_mmap(struct file *file, struct vm_area_struct *vma) { struct address_space *mapping = file->f_mapping; - if (!mapping->a_ops->read_folio && !mapping->a_ops->readpage) + if (!mapping->a_ops->read_folio) return -ENOEXEC; file_accessed(file); vma->vm_ops = &generic_file_vm_ops; @@ -3508,10 +3505,8 @@ repeat: filler: if (filler) err = filler(data, &folio->page); - else if (mapping->a_ops->read_folio) - err = mapping->a_ops->read_folio(data, folio); else - err = mapping->a_ops->readpage(data, &folio->page); + err = mapping->a_ops->read_folio(data, folio); if (err < 0) { folio_put(folio); diff --git a/mm/memory.c b/mm/memory.c index 76e3af9639d9..2a12028a3749 100644 --- a/mm/memory.c +++ b/mm/memory.c @@ -555,11 +555,11 @@ static void print_bad_pte(struct vm_area_struct *vma, unsigned long addr, dump_page(page, "bad pte"); pr_alert("addr:%px vm_flags:%08lx anon_vma:%px mapping:%px index:%lx\n", (void *)addr, vma->vm_flags, vma->anon_vma, mapping, index); - pr_alert("file:%pD fault:%ps mmap:%ps readpage:%ps\n", + pr_alert("file:%pD fault:%ps mmap:%ps read_folio:%ps\n", vma->vm_file, vma->vm_ops ? vma->vm_ops->fault : NULL, vma->vm_file ? vma->vm_file->f_op->mmap : NULL, - mapping ? mapping->a_ops->readpage : NULL); + mapping ? mapping->a_ops->read_folio : NULL); dump_stack(); add_taint(TAINT_BAD_PAGE, LOCKDEP_NOW_UNRELIABLE); } diff --git a/mm/readahead.c b/mm/readahead.c index 76024c20a5a5..39983a3a93f0 100644 --- a/mm/readahead.c +++ b/mm/readahead.c @@ -170,12 +170,9 @@ static void read_pages(struct readahead_control *rac) } folio_unlock(folio); } - } else if (aops->read_folio) { - while ((folio = readahead_folio(rac)) != NULL) - aops->read_folio(rac->file, folio); } else { while ((folio = readahead_folio(rac)) != NULL) - aops->readpage(rac->file, &folio->page); + aops->read_folio(rac->file, folio); } blk_finish_plug(&plug); @@ -256,8 +253,8 @@ void page_cache_ra_unbounded(struct readahead_control *ractl, } /* - * Now start the IO. We ignore I/O errors - if the page is not - * uptodate then the caller will launch readpage again, and + * Now start the IO. We ignore I/O errors - if the folio is not + * uptodate then the caller will launch read_folio again, and * will then handle the error. */ read_pages(ractl); @@ -305,8 +302,7 @@ void force_page_cache_ra(struct readahead_control *ractl, struct backing_dev_info *bdi = inode_to_bdi(mapping->host); unsigned long max_pages, index; - if (unlikely(!mapping->a_ops->read_folio && - !mapping->a_ops->readpage && !mapping->a_ops->readahead)) + if (unlikely(!mapping->a_ops->read_folio && !mapping->a_ops->readahead)) return; /* diff --git a/mm/shmem.c b/mm/shmem.c index 0f557a512171..f3e8de8ff75c 100644 --- a/mm/shmem.c +++ b/mm/shmem.c @@ -4162,7 +4162,7 @@ int shmem_zero_setup(struct vm_area_struct *vma) * * This behaves as a tmpfs "read_cache_page_gfp(mapping, index, gfp)", * with any new page allocations done using the specified allocation flags. - * But read_cache_page_gfp() uses the ->readpage() method: which does not + * But read_cache_page_gfp() uses the ->read_folio() method: which does not * suit tmpfs, since it may have pages in swapcache, and needs to find those * for itself; although drivers/gpu/drm i915 and ttm rely upon this support. * diff --git a/mm/swapfile.c b/mm/swapfile.c index 7c19098b8b45..ecd45bdbad9b 100644 --- a/mm/swapfile.c +++ b/mm/swapfile.c @@ -3041,7 +3041,7 @@ SYSCALL_DEFINE2(swapon, const char __user *, specialfile, int, swap_flags) /* * Read the swap header. */ - if (!mapping->a_ops->read_folio && !mapping->a_ops->readpage) { + if (!mapping->a_ops->read_folio) { error = -EINVAL; goto bad_swap_unlock_inode; } -- cgit v1.2.3-71-gd317 From e9b5b23e957ef9260fec811d8d8081125889308a Mon Sep 17 00:00:00 2001 From: "Matthew Wilcox (Oracle)" Date: Sun, 1 May 2022 21:39:29 -0400 Subject: fs: Change the type of filler_t By making filler_t the same as read_folio, we can use the same function for both in gfs2. We can push the use of folios down one more level in jffs2 and nfs. We also increase type safety for future users of the various read_cache_page() family of functions by forcing the parameter to be a pointer to struct file (or NULL). Signed-off-by: Matthew Wilcox (Oracle) Reviewed-by: Christoph Hellwig Reviewed-by: Andreas Gruenbacher --- fs/gfs2/aops.c | 29 +++++++++++------------------ fs/jffs2/file.c | 9 ++++----- fs/jffs2/gc.c | 2 +- fs/jffs2/os-linux.h | 2 +- fs/nfs/symlink.c | 14 +++++++------- include/linux/pagemap.h | 6 +++--- mm/filemap.c | 40 ++++++++++++++++++++-------------------- 7 files changed, 47 insertions(+), 55 deletions(-) (limited to 'include') diff --git a/fs/gfs2/aops.c b/fs/gfs2/aops.c index 340bf5d0e835..1016631bcbdc 100644 --- a/fs/gfs2/aops.c +++ b/fs/gfs2/aops.c @@ -464,21 +464,24 @@ static int stuffed_readpage(struct gfs2_inode *ip, struct page *page) return 0; } - -static int __gfs2_readpage(void *file, struct page *page) +/** + * gfs2_read_folio - read a folio from a file + * @file: The file to read + * @folio: The folio in the file + */ +static int gfs2_read_folio(struct file *file, struct folio *folio) { - struct folio *folio = page_folio(page); - struct inode *inode = page->mapping->host; + struct inode *inode = folio->mapping->host; struct gfs2_inode *ip = GFS2_I(inode); struct gfs2_sbd *sdp = GFS2_SB(inode); int error; if (!gfs2_is_jdata(ip) || - (i_blocksize(inode) == PAGE_SIZE && !page_has_buffers(page))) { + (i_blocksize(inode) == PAGE_SIZE && !folio_buffers(folio))) { error = iomap_read_folio(folio, &gfs2_iomap_ops); } else if (gfs2_is_stuffed(ip)) { - error = stuffed_readpage(ip, page); - unlock_page(page); + error = stuffed_readpage(ip, &folio->page); + folio_unlock(folio); } else { error = mpage_read_folio(folio, gfs2_block_map); } @@ -489,16 +492,6 @@ static int __gfs2_readpage(void *file, struct page *page) return error; } -/** - * gfs2_read_folio - read a folio from a file - * @file: The file to read - * @folio: The folio in the file - */ -static int gfs2_read_folio(struct file *file, struct folio *folio) -{ - return __gfs2_readpage(file, &folio->page); -} - /** * gfs2_internal_read - read an internal file * @ip: The gfs2 inode @@ -523,7 +516,7 @@ int gfs2_internal_read(struct gfs2_inode *ip, char *buf, loff_t *pos, amt = size - copied; if (offset + size > PAGE_SIZE) amt = PAGE_SIZE - offset; - page = read_cache_page(mapping, index, __gfs2_readpage, NULL); + page = read_cache_page(mapping, index, gfs2_read_folio, NULL); if (IS_ERR(page)) return PTR_ERR(page); p = kmap_atomic(page); diff --git a/fs/jffs2/file.c b/fs/jffs2/file.c index 492fb2da0403..ba86acbe12d3 100644 --- a/fs/jffs2/file.c +++ b/fs/jffs2/file.c @@ -110,21 +110,20 @@ static int jffs2_do_readpage_nolock (struct inode *inode, struct page *pg) return ret; } -int jffs2_do_readpage_unlock(void *data, struct page *pg) +int __jffs2_read_folio(struct file *file, struct folio *folio) { - int ret = jffs2_do_readpage_nolock(pg->mapping->host, pg); - unlock_page(pg); + int ret = jffs2_do_readpage_nolock(folio->mapping->host, &folio->page); + folio_unlock(folio); return ret; } - static int jffs2_read_folio(struct file *file, struct folio *folio) { struct jffs2_inode_info *f = JFFS2_INODE_INFO(folio->mapping->host); int ret; mutex_lock(&f->sem); - ret = jffs2_do_readpage_unlock(file, &folio->page); + ret = __jffs2_read_folio(file, folio); mutex_unlock(&f->sem); return ret; } diff --git a/fs/jffs2/gc.c b/fs/jffs2/gc.c index a53bac7569b6..5c6602f3c189 100644 --- a/fs/jffs2/gc.c +++ b/fs/jffs2/gc.c @@ -1327,7 +1327,7 @@ static int jffs2_garbage_collect_dnode(struct jffs2_sb_info *c, struct jffs2_era * trying to write out, read_cache_page() will not deadlock. */ mutex_unlock(&f->sem); page = read_cache_page(inode->i_mapping, start >> PAGE_SHIFT, - jffs2_do_readpage_unlock, NULL); + __jffs2_read_folio, NULL); if (IS_ERR(page)) { pr_warn("read_cache_page() returned error: %ld\n", PTR_ERR(page)); diff --git a/fs/jffs2/os-linux.h b/fs/jffs2/os-linux.h index 173eccac691d..921d782583d6 100644 --- a/fs/jffs2/os-linux.h +++ b/fs/jffs2/os-linux.h @@ -155,7 +155,7 @@ extern const struct file_operations jffs2_file_operations; extern const struct inode_operations jffs2_file_inode_operations; extern const struct address_space_operations jffs2_file_address_operations; int jffs2_fsync(struct file *, loff_t, loff_t, int); -int jffs2_do_readpage_unlock(void *data, struct page *pg); +int __jffs2_read_folio(struct file *file, struct folio *folio); /* ioctl.c */ long jffs2_ioctl(struct file *, unsigned int, unsigned long); diff --git a/fs/nfs/symlink.c b/fs/nfs/symlink.c index 8b53538bcc75..0e27a2e4e68b 100644 --- a/fs/nfs/symlink.c +++ b/fs/nfs/symlink.c @@ -26,21 +26,21 @@ * and straight-forward than readdir caching. */ -static int nfs_symlink_filler(void *data, struct page *page) +static int nfs_symlink_filler(struct file *file, struct folio *folio) { - struct inode *inode = page->mapping->host; + struct inode *inode = folio->mapping->host; int error; - error = NFS_PROTO(inode)->readlink(inode, page, 0, PAGE_SIZE); + error = NFS_PROTO(inode)->readlink(inode, &folio->page, 0, PAGE_SIZE); if (error < 0) goto error; - SetPageUptodate(page); - unlock_page(page); + folio_mark_uptodate(folio); + folio_unlock(folio); return 0; error: - SetPageError(page); - unlock_page(page); + folio_set_error(folio); + folio_unlock(folio); return -EIO; } diff --git a/include/linux/pagemap.h b/include/linux/pagemap.h index b70192f56454..831b28dab01a 100644 --- a/include/linux/pagemap.h +++ b/include/linux/pagemap.h @@ -492,7 +492,7 @@ static inline gfp_t readahead_gfp_mask(struct address_space *x) return mapping_gfp_mask(x) | __GFP_NORETRY | __GFP_NOWARN; } -typedef int filler_t(void *, struct page *); +typedef int filler_t(struct file *, struct folio *); pgoff_t page_cache_next_miss(struct address_space *mapping, pgoff_t index, unsigned long max_scan); @@ -747,9 +747,9 @@ static inline struct page *grab_cache_page(struct address_space *mapping, } struct folio *read_cache_folio(struct address_space *, pgoff_t index, - filler_t *filler, void *data); + filler_t *filler, struct file *file); struct page *read_cache_page(struct address_space *, pgoff_t index, - filler_t *filler, void *data); + filler_t *filler, struct file *file); extern struct page * read_cache_page_gfp(struct address_space *mapping, pgoff_t index, gfp_t gfp_mask); diff --git a/mm/filemap.c b/mm/filemap.c index 079f8cca7959..81a0ed08a82c 100644 --- a/mm/filemap.c +++ b/mm/filemap.c @@ -3483,7 +3483,7 @@ EXPORT_SYMBOL(generic_file_mmap); EXPORT_SYMBOL(generic_file_readonly_mmap); static struct folio *do_read_cache_folio(struct address_space *mapping, - pgoff_t index, filler_t filler, void *data, gfp_t gfp) + pgoff_t index, filler_t filler, struct file *file, gfp_t gfp) { struct folio *folio; int err; @@ -3504,9 +3504,9 @@ repeat: filler: if (filler) - err = filler(data, &folio->page); + err = filler(file, folio); else - err = mapping->a_ops->read_folio(data, folio); + err = mapping->a_ops->read_folio(file, folio); if (err < 0) { folio_put(folio); @@ -3557,44 +3557,44 @@ out: } /** - * read_cache_folio - read into page cache, fill it if needed - * @mapping: the page's address_space - * @index: the page index - * @filler: function to perform the read - * @data: first arg to filler(data, page) function, often left as NULL - * - * Read into the page cache. If a page already exists, and PageUptodate() is - * not set, try to fill the page and wait for it to become unlocked. + * read_cache_folio - Read into page cache, fill it if needed. + * @mapping: The address_space to read from. + * @index: The index to read. + * @filler: Function to perform the read, or NULL to use aops->read_folio(). + * @file: Passed to filler function, may be NULL if not required. * - * If the page does not get brought uptodate, return -EIO. + * Read one page into the page cache. If it succeeds, the folio returned + * will contain @index, but it may not be the first page of the folio. * - * The function expects mapping->invalidate_lock to be already held. + * If the filler function returns an error, it will be returned to the + * caller. * - * Return: up to date page on success, ERR_PTR() on failure. + * Context: May sleep. Expects mapping->invalidate_lock to be held. + * Return: An uptodate folio on success, ERR_PTR() on failure. */ struct folio *read_cache_folio(struct address_space *mapping, pgoff_t index, - filler_t filler, void *data) + filler_t filler, struct file *file) { - return do_read_cache_folio(mapping, index, filler, data, + return do_read_cache_folio(mapping, index, filler, file, mapping_gfp_mask(mapping)); } EXPORT_SYMBOL(read_cache_folio); static struct page *do_read_cache_page(struct address_space *mapping, - pgoff_t index, filler_t *filler, void *data, gfp_t gfp) + pgoff_t index, filler_t *filler, struct file *file, gfp_t gfp) { struct folio *folio; - folio = do_read_cache_folio(mapping, index, filler, data, gfp); + folio = do_read_cache_folio(mapping, index, filler, file, gfp); if (IS_ERR(folio)) return &folio->page; return folio_file_page(folio, index); } struct page *read_cache_page(struct address_space *mapping, - pgoff_t index, filler_t *filler, void *data) + pgoff_t index, filler_t *filler, struct file *file) { - return do_read_cache_page(mapping, index, filler, data, + return do_read_cache_page(mapping, index, filler, file, mapping_gfp_mask(mapping)); } EXPORT_SYMBOL(read_cache_page); -- cgit v1.2.3-71-gd317 From fa29000b6b2603ec2bfdc4c73249fcb00cd54f85 Mon Sep 17 00:00:00 2001 From: "Matthew Wilcox (Oracle)" Date: Fri, 29 Apr 2022 17:00:05 -0400 Subject: fs: Add aops->release_folio This replaces aops->releasepage. Update the documentation, and call it if it exists. Signed-off-by: Matthew Wilcox (Oracle) Reviewed-by: Jeff Layton --- Documentation/filesystems/caching/netfs-api.rst | 4 +-- Documentation/filesystems/locking.rst | 14 ++++---- Documentation/filesystems/vfs.rst | 45 ++++++++++++------------- include/linux/fs.h | 1 + mm/filemap.c | 2 ++ 5 files changed, 34 insertions(+), 32 deletions(-) (limited to 'include') diff --git a/Documentation/filesystems/caching/netfs-api.rst b/Documentation/filesystems/caching/netfs-api.rst index 7308d76a29dc..1d18e9def183 100644 --- a/Documentation/filesystems/caching/netfs-api.rst +++ b/Documentation/filesystems/caching/netfs-api.rst @@ -433,11 +433,11 @@ has done a write and then the page it wrote from has been released by the VM, after which it *has* to look in the cache. To inform fscache that a page might now be in the cache, the following function -should be called from the ``releasepage`` address space op:: +should be called from the ``release_folio`` address space op:: void fscache_note_page_release(struct fscache_cookie *cookie); -if the page has been released (ie. releasepage returned true). +if the page has been released (ie. release_folio returned true). Page release and page invalidation should also wait for any mark left on the page to say that a DIO write is underway from that page:: diff --git a/Documentation/filesystems/locking.rst b/Documentation/filesystems/locking.rst index aeba2475a53c..ddef4a753e73 100644 --- a/Documentation/filesystems/locking.rst +++ b/Documentation/filesystems/locking.rst @@ -249,7 +249,7 @@ prototypes:: struct page *page, void *fsdata); sector_t (*bmap)(struct address_space *, sector_t); void (*invalidate_folio) (struct folio *, size_t start, size_t len); - int (*releasepage) (struct page *, int); + bool (*release_folio)(struct folio *, gfp_t); void (*freepage)(struct page *); int (*direct_IO)(struct kiocb *, struct iov_iter *iter); bool (*isolate_page) (struct page *, isolate_mode_t); @@ -270,13 +270,13 @@ ops PageLocked(page) i_rwsem invalidate_lock writepage: yes, unlocks (see below) read_folio: yes, unlocks shared writepages: -dirty_folio maybe +dirty_folio: maybe readahead: yes, unlocks shared write_begin: locks the page exclusive write_end: yes, unlocks exclusive bmap: invalidate_folio: yes exclusive -releasepage: yes +release_folio: yes freepage: yes direct_IO: isolate_page: yes @@ -372,10 +372,10 @@ invalidate_lock before invalidating page cache in truncate / hole punch path (and thus calling into ->invalidate_folio) to block races between page cache invalidation and page cache filling functions (fault, read, ...). -->releasepage() is called when the kernel is about to try to drop the -buffers from the page in preparation for freeing it. It returns zero to -indicate that the buffers are (or may be) freeable. If ->releasepage is zero, -the kernel assumes that the fs has no private interest in the buffers. +->release_folio() is called when the kernel is about to try to drop the +buffers from the folio in preparation for freeing it. It returns false to +indicate that the buffers are (or may be) freeable. If ->release_folio is +NULL, the kernel assumes that the fs has no private interest in the buffers. ->freepage() is called when the kernel is done dropping the page from the page cache. diff --git a/Documentation/filesystems/vfs.rst b/Documentation/filesystems/vfs.rst index 0919a4ad973a..679887b5c8fc 100644 --- a/Documentation/filesystems/vfs.rst +++ b/Documentation/filesystems/vfs.rst @@ -620,9 +620,9 @@ Writeback. The first can be used independently to the others. The VM can try to either write dirty pages in order to clean them, or release clean pages in order to reuse them. To do this it can call the ->writepage method -on dirty pages, and ->releasepage on clean pages with PagePrivate set. -Clean pages without PagePrivate and with no external references will be -released without notice being given to the address_space. +on dirty pages, and ->release_folio on clean folios with the private +flag set. Clean pages without PagePrivate and with no external references +will be released without notice being given to the address_space. To achieve this functionality, pages need to be placed on an LRU with lru_cache_add and mark_page_active needs to be called whenever the page @@ -734,7 +734,7 @@ cache in your filesystem. The following members are defined: struct page *page, void *fsdata); sector_t (*bmap)(struct address_space *, sector_t); void (*invalidate_folio) (struct folio *, size_t start, size_t len); - int (*releasepage) (struct page *, int); + bool (*release_folio)(struct folio *, gfp_t); void (*freepage)(struct page *); ssize_t (*direct_IO)(struct kiocb *, struct iov_iter *iter); /* isolate a page for migration */ @@ -864,33 +864,32 @@ cache in your filesystem. The following members are defined: address space. This generally corresponds to either a truncation, punch hole or a complete invalidation of the address space (in the latter case 'offset' will always be 0 and 'length' - will be folio_size()). Any private data associated with the page + will be folio_size()). Any private data associated with the folio should be updated to reflect this truncation. If offset is 0 and length is folio_size(), then the private data should be - released, because the page must be able to be completely - discarded. This may be done by calling the ->releasepage + released, because the folio must be able to be completely + discarded. This may be done by calling the ->release_folio function, but in this case the release MUST succeed. -``releasepage`` - releasepage is called on PagePrivate pages to indicate that the - page should be freed if possible. ->releasepage should remove - any private data from the page and clear the PagePrivate flag. - If releasepage() fails for some reason, it must indicate failure - with a 0 return value. releasepage() is used in two distinct - though related cases. The first is when the VM finds a clean - page with no active users and wants to make it a free page. If - ->releasepage succeeds, the page will be removed from the - address_space and become free. +``release_folio`` + release_folio is called on folios with private data to tell the + filesystem that the folio is about to be freed. ->release_folio + should remove any private data from the folio and clear the + private flag. If release_folio() fails, it should return false. + release_folio() is used in two distinct though related cases. + The first is when the VM wants to free a clean folio with no + active users. If ->release_folio succeeds, the folio will be + removed from the address_space and be freed. The second case is when a request has been made to invalidate - some or all pages in an address_space. This can happen through - the fadvise(POSIX_FADV_DONTNEED) system call or by the - filesystem explicitly requesting it as nfs and 9fs do (when they + some or all folios in an address_space. This can happen + through the fadvise(POSIX_FADV_DONTNEED) system call or by the + filesystem explicitly requesting it as nfs and 9p do (when they believe the cache may be out of date with storage) by calling invalidate_inode_pages2(). If the filesystem makes such a call, - and needs to be certain that all pages are invalidated, then its - releasepage will need to ensure this. Possibly it can clear the - PageUptodate bit if it cannot free private data yet. + and needs to be certain that all folios are invalidated, then + its release_folio will need to ensure this. Possibly it can + clear the uptodate flag if it cannot free private data yet. ``freepage`` freepage is called once the page is no longer visible in the diff --git a/include/linux/fs.h b/include/linux/fs.h index f812f5aa07dd..ad768f13f485 100644 --- a/include/linux/fs.h +++ b/include/linux/fs.h @@ -355,6 +355,7 @@ struct address_space_operations { /* Unfortunately this kludge is needed for FIBMAP. Don't use it */ sector_t (*bmap)(struct address_space *, sector_t); void (*invalidate_folio) (struct folio *, size_t offset, size_t len); + bool (*release_folio)(struct folio *, gfp_t); int (*releasepage) (struct page *, gfp_t); void (*freepage)(struct page *); ssize_t (*direct_IO)(struct kiocb *, struct iov_iter *iter); diff --git a/mm/filemap.c b/mm/filemap.c index 9b7fa47feb5e..78e4a7dc3a56 100644 --- a/mm/filemap.c +++ b/mm/filemap.c @@ -3955,6 +3955,8 @@ bool filemap_release_folio(struct folio *folio, gfp_t gfp) if (folio_test_writeback(folio)) return false; + if (mapping && mapping->a_ops->release_folio) + return mapping->a_ops->release_folio(folio, gfp); if (mapping && mapping->a_ops->releasepage) return mapping->a_ops->releasepage(&folio->page, gfp); return try_to_free_buffers(&folio->page); -- cgit v1.2.3-71-gd317 From 8597447dc565a6a3fa7bc503674452b7ae2b914c Mon Sep 17 00:00:00 2001 From: "Matthew Wilcox (Oracle)" Date: Sat, 30 Apr 2022 23:01:08 -0400 Subject: iomap: Convert to release_folio Change all the filesystems which used iomap_releasepage to use the new function. Signed-off-by: Matthew Wilcox (Oracle) Reviewed-by: Jeff Layton --- fs/gfs2/aops.c | 2 +- fs/iomap/buffered-io.c | 22 ++++++++++------------ fs/iomap/trace.h | 2 +- fs/xfs/xfs_aops.c | 2 +- fs/zonefs/super.c | 2 +- include/linux/iomap.h | 2 +- 6 files changed, 15 insertions(+), 17 deletions(-) (limited to 'include') diff --git a/fs/gfs2/aops.c b/fs/gfs2/aops.c index 1016631bcbdc..3d6c5c5eb4f1 100644 --- a/fs/gfs2/aops.c +++ b/fs/gfs2/aops.c @@ -768,7 +768,7 @@ static const struct address_space_operations gfs2_aops = { .read_folio = gfs2_read_folio, .readahead = gfs2_readahead, .dirty_folio = filemap_dirty_folio, - .releasepage = iomap_releasepage, + .release_folio = iomap_release_folio, .invalidate_folio = iomap_invalidate_folio, .bmap = gfs2_bmap, .direct_IO = noop_direct_IO, diff --git a/fs/iomap/buffered-io.c b/fs/iomap/buffered-io.c index 2de087ac87b6..8532f0e2e2d6 100644 --- a/fs/iomap/buffered-io.c +++ b/fs/iomap/buffered-io.c @@ -452,25 +452,23 @@ bool iomap_is_partially_uptodate(struct folio *folio, size_t from, size_t count) } EXPORT_SYMBOL_GPL(iomap_is_partially_uptodate); -int -iomap_releasepage(struct page *page, gfp_t gfp_mask) +bool iomap_release_folio(struct folio *folio, gfp_t gfp_flags) { - struct folio *folio = page_folio(page); - - trace_iomap_releasepage(folio->mapping->host, folio_pos(folio), + trace_iomap_release_folio(folio->mapping->host, folio_pos(folio), folio_size(folio)); /* - * mm accommodates an old ext3 case where clean pages might not have had - * the dirty bit cleared. Thus, it can send actual dirty pages to - * ->releasepage() via shrink_active_list(); skip those here. + * mm accommodates an old ext3 case where clean folios might + * not have had the dirty bit cleared. Thus, it can send actual + * dirty folios to ->release_folio() via shrink_active_list(); + * skip those here. */ if (folio_test_dirty(folio) || folio_test_writeback(folio)) - return 0; + return false; iomap_page_release(folio); - return 1; + return true; } -EXPORT_SYMBOL_GPL(iomap_releasepage); +EXPORT_SYMBOL_GPL(iomap_release_folio); void iomap_invalidate_folio(struct folio *folio, size_t offset, size_t len) { @@ -1483,7 +1481,7 @@ iomap_do_writepage(struct page *page, struct writeback_control *wbc, void *data) * Skip the page if it's fully outside i_size, e.g. due to a * truncate operation that's in progress. We must redirty the * page so that reclaim stops reclaiming it. Otherwise - * iomap_vm_releasepage() is called on it and gets confused. + * iomap_release_folio() is called on it and gets confused. * * Note that the end_index is unsigned long. If the given * offset is greater than 16TB on a 32-bit system then if we diff --git a/fs/iomap/trace.h b/fs/iomap/trace.h index a6689a563c6e..d48868fc40d7 100644 --- a/fs/iomap/trace.h +++ b/fs/iomap/trace.h @@ -80,7 +80,7 @@ DEFINE_EVENT(iomap_range_class, name, \ TP_PROTO(struct inode *inode, loff_t off, u64 len),\ TP_ARGS(inode, off, len)) DEFINE_RANGE_EVENT(iomap_writepage); -DEFINE_RANGE_EVENT(iomap_releasepage); +DEFINE_RANGE_EVENT(iomap_release_folio); DEFINE_RANGE_EVENT(iomap_invalidate_folio); DEFINE_RANGE_EVENT(iomap_dio_invalidate_fail); diff --git a/fs/xfs/xfs_aops.c b/fs/xfs/xfs_aops.c index a9c4bb500d53..2acbfc6925dd 100644 --- a/fs/xfs/xfs_aops.c +++ b/fs/xfs/xfs_aops.c @@ -568,7 +568,7 @@ const struct address_space_operations xfs_address_space_operations = { .readahead = xfs_vm_readahead, .writepages = xfs_vm_writepages, .dirty_folio = filemap_dirty_folio, - .releasepage = iomap_releasepage, + .release_folio = iomap_release_folio, .invalidate_folio = iomap_invalidate_folio, .bmap = xfs_vm_bmap, .direct_IO = noop_direct_IO, diff --git a/fs/zonefs/super.c b/fs/zonefs/super.c index c3a38f711b24..b1a428f860b3 100644 --- a/fs/zonefs/super.c +++ b/fs/zonefs/super.c @@ -197,7 +197,7 @@ static const struct address_space_operations zonefs_file_aops = { .writepage = zonefs_writepage, .writepages = zonefs_writepages, .dirty_folio = filemap_dirty_folio, - .releasepage = iomap_releasepage, + .release_folio = iomap_release_folio, .invalidate_folio = iomap_invalidate_folio, .migratepage = iomap_migrate_page, .is_partially_uptodate = iomap_is_partially_uptodate, diff --git a/include/linux/iomap.h b/include/linux/iomap.h index 5b2aa45ddda3..0d674695b6d3 100644 --- a/include/linux/iomap.h +++ b/include/linux/iomap.h @@ -228,7 +228,7 @@ ssize_t iomap_file_buffered_write(struct kiocb *iocb, struct iov_iter *from, int iomap_read_folio(struct folio *folio, const struct iomap_ops *ops); void iomap_readahead(struct readahead_control *, const struct iomap_ops *ops); bool iomap_is_partially_uptodate(struct folio *, size_t from, size_t count); -int iomap_releasepage(struct page *page, gfp_t gfp_mask); +bool iomap_release_folio(struct folio *folio, gfp_t gfp_flags); void iomap_invalidate_folio(struct folio *folio, size_t offset, size_t len); #ifdef CONFIG_MIGRATION int iomap_migrate_page(struct address_space *mapping, struct page *newpage, -- cgit v1.2.3-71-gd317 From 704ead2bed202579f025a4754e52e9ab21ff3ada Mon Sep 17 00:00:00 2001 From: "Matthew Wilcox (Oracle)" Date: Sun, 1 May 2022 00:27:53 -0400 Subject: fs: Remove last vestiges of releasepage All users are now converted to release_folio Signed-off-by: Matthew Wilcox (Oracle) Reviewed-by: Jeff Layton --- include/linux/fs.h | 1 - include/linux/page-flags.h | 2 +- mm/filemap.c | 2 -- 3 files changed, 1 insertion(+), 4 deletions(-) (limited to 'include') diff --git a/include/linux/fs.h b/include/linux/fs.h index ad768f13f485..1cee64d9724b 100644 --- a/include/linux/fs.h +++ b/include/linux/fs.h @@ -356,7 +356,6 @@ struct address_space_operations { sector_t (*bmap)(struct address_space *, sector_t); void (*invalidate_folio) (struct folio *, size_t offset, size_t len); bool (*release_folio)(struct folio *, gfp_t); - int (*releasepage) (struct page *, gfp_t); void (*freepage)(struct page *); ssize_t (*direct_IO)(struct kiocb *, struct iov_iter *iter); /* diff --git a/include/linux/page-flags.h b/include/linux/page-flags.h index 9d8eeaa67d05..af10149a6c31 100644 --- a/include/linux/page-flags.h +++ b/include/linux/page-flags.h @@ -516,7 +516,7 @@ PAGEFLAG(SwapBacked, swapbacked, PF_NO_TAIL) /* * Private page markings that may be used by the filesystem that owns the page * for its own purposes. - * - PG_private and PG_private_2 cause releasepage() and co to be invoked + * - PG_private and PG_private_2 cause release_folio() and co to be invoked */ PAGEFLAG(Private, private, PF_ANY) PAGEFLAG(Private2, private_2, PF_ANY) TESTSCFLAG(Private2, private_2, PF_ANY) diff --git a/mm/filemap.c b/mm/filemap.c index 78e4a7dc3a56..ee892853a214 100644 --- a/mm/filemap.c +++ b/mm/filemap.c @@ -3957,8 +3957,6 @@ bool filemap_release_folio(struct folio *folio, gfp_t gfp) if (mapping && mapping->a_ops->release_folio) return mapping->a_ops->release_folio(folio, gfp); - if (mapping && mapping->a_ops->releasepage) - return mapping->a_ops->releasepage(&folio->page, gfp); return try_to_free_buffers(&folio->page); } EXPORT_SYMBOL(filemap_release_folio); -- cgit v1.2.3-71-gd317 From c56a6eb03deb187c989a966fda5a254249b56c2a Mon Sep 17 00:00:00 2001 From: "Matthew Wilcox (Oracle)" Date: Sun, 1 May 2022 00:46:03 -0400 Subject: jbd2: Convert jbd2_journal_try_to_free_buffers to take a folio Also convert it to return a bool since it's called from release_folio(). Signed-off-by: Matthew Wilcox (Oracle) Acked-by: Theodore Ts'o Reviewed-by: Jeff Layton --- fs/ext4/inode.c | 2 +- fs/jbd2/transaction.c | 12 ++++++------ include/linux/jbd2.h | 2 +- 3 files changed, 8 insertions(+), 8 deletions(-) (limited to 'include') diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c index 52c46ac5bc8a..943937cb5302 100644 --- a/fs/ext4/inode.c +++ b/fs/ext4/inode.c @@ -3253,7 +3253,7 @@ static bool ext4_release_folio(struct folio *folio, gfp_t wait) if (folio_test_checked(folio)) return false; if (journal) - return jbd2_journal_try_to_free_buffers(journal, &folio->page); + return jbd2_journal_try_to_free_buffers(journal, folio); else return try_to_free_buffers(&folio->page); } diff --git a/fs/jbd2/transaction.c b/fs/jbd2/transaction.c index fcb9175016a5..ee33d277d51e 100644 --- a/fs/jbd2/transaction.c +++ b/fs/jbd2/transaction.c @@ -2143,17 +2143,17 @@ out: * cannot happen because we never reallocate freed data as metadata * while the data is part of a transaction. Yes? * - * Return 0 on failure, 1 on success + * Return false on failure, true on success */ -int jbd2_journal_try_to_free_buffers(journal_t *journal, struct page *page) +bool jbd2_journal_try_to_free_buffers(journal_t *journal, struct folio *folio) { struct buffer_head *head; struct buffer_head *bh; - int ret = 0; + bool ret = false; - J_ASSERT(PageLocked(page)); + J_ASSERT(folio_test_locked(folio)); - head = page_buffers(page); + head = folio_buffers(folio); bh = head; do { struct journal_head *jh; @@ -2175,7 +2175,7 @@ int jbd2_journal_try_to_free_buffers(journal_t *journal, struct page *page) goto busy; } while ((bh = bh->b_this_page) != head); - ret = try_to_free_buffers(page); + ret = try_to_free_buffers(&folio->page); busy: return ret; } diff --git a/include/linux/jbd2.h b/include/linux/jbd2.h index de9536680b2b..e79d6e0b14e8 100644 --- a/include/linux/jbd2.h +++ b/include/linux/jbd2.h @@ -1529,7 +1529,7 @@ extern int jbd2_journal_dirty_metadata (handle_t *, struct buffer_head *); extern int jbd2_journal_forget (handle_t *, struct buffer_head *); int jbd2_journal_invalidate_folio(journal_t *, struct folio *, size_t offset, size_t length); -extern int jbd2_journal_try_to_free_buffers(journal_t *journal, struct page *page); +bool jbd2_journal_try_to_free_buffers(journal_t *journal, struct folio *folio); extern int jbd2_journal_stop(handle_t *); extern int jbd2_journal_flush(journal_t *journal, unsigned int flags); extern void jbd2_journal_lock_updates (journal_t *); -- cgit v1.2.3-71-gd317 From 68189fef88c7d02eb92e038be3d6428ebd0d2945 Mon Sep 17 00:00:00 2001 From: "Matthew Wilcox (Oracle)" Date: Sun, 1 May 2022 01:08:08 -0400 Subject: fs: Change try_to_free_buffers() to take a folio All but two of the callers already have a folio; pass a folio into try_to_free_buffers(). This removes the last user of cancel_dirty_page() so remove that wrapper function too. Signed-off-by: Matthew Wilcox (Oracle) Reviewed-by: Jeff Layton --- fs/buffer.c | 42 +++++++++++++++++++++--------------------- fs/ext4/inode.c | 2 +- fs/gfs2/aops.c | 2 +- fs/hfs/inode.c | 2 +- fs/hfsplus/inode.c | 2 +- fs/jbd2/commit.c | 2 +- fs/jbd2/transaction.c | 4 ++-- fs/mpage.c | 2 +- fs/ocfs2/aops.c | 2 +- fs/reiserfs/inode.c | 2 +- fs/reiserfs/journal.c | 2 +- include/linux/buffer_head.h | 4 ++-- include/linux/pagemap.h | 4 ---- mm/filemap.c | 2 +- mm/migrate.c | 2 +- mm/vmscan.c | 2 +- 16 files changed, 37 insertions(+), 41 deletions(-) (limited to 'include') diff --git a/fs/buffer.c b/fs/buffer.c index 786ef5b98c80..701af0035802 100644 --- a/fs/buffer.c +++ b/fs/buffer.c @@ -955,7 +955,7 @@ grow_dev_page(struct block_device *bdev, sector_t block, size); goto done; } - if (!try_to_free_buffers(page)) + if (!try_to_free_buffers(page_folio(page))) goto failed; } @@ -3155,20 +3155,20 @@ int sync_dirty_buffer(struct buffer_head *bh) EXPORT_SYMBOL(sync_dirty_buffer); /* - * try_to_free_buffers() checks if all the buffers on this particular page + * try_to_free_buffers() checks if all the buffers on this particular folio * are unused, and releases them if so. * * Exclusion against try_to_free_buffers may be obtained by either - * locking the page or by holding its mapping's private_lock. + * locking the folio or by holding its mapping's private_lock. * - * If the page is dirty but all the buffers are clean then we need to - * be sure to mark the page clean as well. This is because the page + * If the folio is dirty but all the buffers are clean then we need to + * be sure to mark the folio clean as well. This is because the folio * may be against a block device, and a later reattachment of buffers - * to a dirty page will set *all* buffers dirty. Which would corrupt + * to a dirty folio will set *all* buffers dirty. Which would corrupt * filesystem data on the same device. * - * The same applies to regular filesystem pages: if all the buffers are - * clean then we set the page clean and proceed. To do that, we require + * The same applies to regular filesystem folios: if all the buffers are + * clean then we set the folio clean and proceed. To do that, we require * total exclusion from block_dirty_folio(). That is obtained with * private_lock. * @@ -3207,40 +3207,40 @@ failed: return 0; } -int try_to_free_buffers(struct page *page) +bool try_to_free_buffers(struct folio *folio) { - struct address_space * const mapping = page->mapping; + struct address_space * const mapping = folio->mapping; struct buffer_head *buffers_to_free = NULL; - int ret = 0; + bool ret = 0; - BUG_ON(!PageLocked(page)); - if (PageWriteback(page)) - return 0; + BUG_ON(!folio_test_locked(folio)); + if (folio_test_writeback(folio)) + return false; if (mapping == NULL) { /* can this still happen? */ - ret = drop_buffers(page, &buffers_to_free); + ret = drop_buffers(&folio->page, &buffers_to_free); goto out; } spin_lock(&mapping->private_lock); - ret = drop_buffers(page, &buffers_to_free); + ret = drop_buffers(&folio->page, &buffers_to_free); /* * If the filesystem writes its buffers by hand (eg ext3) - * then we can have clean buffers against a dirty page. We - * clean the page here; otherwise the VM will never notice + * then we can have clean buffers against a dirty folio. We + * clean the folio here; otherwise the VM will never notice * that the filesystem did any IO at all. * * Also, during truncate, discard_buffer will have marked all - * the page's buffers clean. We discover that here and clean - * the page also. + * the folio's buffers clean. We discover that here and clean + * the folio also. * * private_lock must be held over this entire operation in order * to synchronise against block_dirty_folio and prevent the * dirty bit from being lost. */ if (ret) - cancel_dirty_page(page); + folio_cancel_dirty(folio); spin_unlock(&mapping->private_lock); out: if (buffers_to_free) { diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c index 943937cb5302..987ea77e672d 100644 --- a/fs/ext4/inode.c +++ b/fs/ext4/inode.c @@ -3255,7 +3255,7 @@ static bool ext4_release_folio(struct folio *folio, gfp_t wait) if (journal) return jbd2_journal_try_to_free_buffers(journal, folio); else - return try_to_free_buffers(&folio->page); + return try_to_free_buffers(folio); } static bool ext4_inode_datasync_dirty(struct inode *inode) diff --git a/fs/gfs2/aops.c b/fs/gfs2/aops.c index 95a674d70c04..106e90a36583 100644 --- a/fs/gfs2/aops.c +++ b/fs/gfs2/aops.c @@ -757,7 +757,7 @@ bool gfs2_release_folio(struct folio *folio, gfp_t gfp_mask) } while (bh != head); gfs2_log_unlock(sdp); - return try_to_free_buffers(&folio->page); + return try_to_free_buffers(folio); cannot_release: gfs2_log_unlock(sdp); diff --git a/fs/hfs/inode.c b/fs/hfs/inode.c index 86fd50e5fccb..c4526f16355d 100644 --- a/fs/hfs/inode.c +++ b/fs/hfs/inode.c @@ -124,7 +124,7 @@ static bool hfs_release_folio(struct folio *folio, gfp_t mask) } while (--i && nidx < tree->node_count); spin_unlock(&tree->hash_lock); } - return res ? try_to_free_buffers(&folio->page) : false; + return res ? try_to_free_buffers(folio) : false; } static ssize_t hfs_direct_IO(struct kiocb *iocb, struct iov_iter *iter) diff --git a/fs/hfsplus/inode.c b/fs/hfsplus/inode.c index f723e0e91d51..aeab83ed1c9c 100644 --- a/fs/hfsplus/inode.c +++ b/fs/hfsplus/inode.c @@ -121,7 +121,7 @@ static bool hfsplus_release_folio(struct folio *folio, gfp_t mask) } while (--i && nidx < tree->node_count); spin_unlock(&tree->hash_lock); } - return res ? try_to_free_buffers(&folio->page) : false; + return res ? try_to_free_buffers(folio) : false; } static ssize_t hfsplus_direct_IO(struct kiocb *iocb, struct iov_iter *iter) diff --git a/fs/jbd2/commit.c b/fs/jbd2/commit.c index 2f37108da0ec..eb315e81f1a6 100644 --- a/fs/jbd2/commit.c +++ b/fs/jbd2/commit.c @@ -82,7 +82,7 @@ static void release_buffer_page(struct buffer_head *bh) folio_get(folio); __brelse(bh); - try_to_free_buffers(&folio->page); + try_to_free_buffers(folio); folio_unlock(folio); folio_put(folio); return; diff --git a/fs/jbd2/transaction.c b/fs/jbd2/transaction.c index ee33d277d51e..e49bb0938376 100644 --- a/fs/jbd2/transaction.c +++ b/fs/jbd2/transaction.c @@ -2175,7 +2175,7 @@ bool jbd2_journal_try_to_free_buffers(journal_t *journal, struct folio *folio) goto busy; } while ((bh = bh->b_this_page) != head); - ret = try_to_free_buffers(&folio->page); + ret = try_to_free_buffers(folio); busy: return ret; } @@ -2482,7 +2482,7 @@ int jbd2_journal_invalidate_folio(journal_t *journal, struct folio *folio, } while (bh != head); if (!partial_page) { - if (may_free && try_to_free_buffers(&folio->page)) + if (may_free && try_to_free_buffers(folio)) J_ASSERT(!folio_buffers(folio)); } return 0; diff --git a/fs/mpage.c b/fs/mpage.c index 6df9c3aa5728..0d25f44f5707 100644 --- a/fs/mpage.c +++ b/fs/mpage.c @@ -431,7 +431,7 @@ static void clean_buffers(struct page *page, unsigned first_unmapped) * disk before we reach the platter. */ if (buffer_heads_over_limit && PageUptodate(page)) - try_to_free_buffers(page); + try_to_free_buffers(page_folio(page)); } /* diff --git a/fs/ocfs2/aops.c b/fs/ocfs2/aops.c index 7d7b86ca078f..35d40a67204c 100644 --- a/fs/ocfs2/aops.c +++ b/fs/ocfs2/aops.c @@ -502,7 +502,7 @@ static bool ocfs2_release_folio(struct folio *folio, gfp_t wait) { if (!folio_buffers(folio)) return false; - return try_to_free_buffers(&folio->page); + return try_to_free_buffers(folio); } static void ocfs2_figure_cluster_boundaries(struct ocfs2_super *osb, diff --git a/fs/reiserfs/inode.c b/fs/reiserfs/inode.c index 9cf2e1420a74..0cffe054b78e 100644 --- a/fs/reiserfs/inode.c +++ b/fs/reiserfs/inode.c @@ -3234,7 +3234,7 @@ static bool reiserfs_release_folio(struct folio *folio, gfp_t unused_gfp_flags) bh = bh->b_this_page; } while (bh != head); if (ret) - ret = try_to_free_buffers(&folio->page); + ret = try_to_free_buffers(folio); spin_unlock(&j->j_dirty_buffers_lock); return ret; } diff --git a/fs/reiserfs/journal.c b/fs/reiserfs/journal.c index 99ba495b0f28..d8cc9a366124 100644 --- a/fs/reiserfs/journal.c +++ b/fs/reiserfs/journal.c @@ -606,7 +606,7 @@ static void release_buffer_page(struct buffer_head *bh) folio_get(folio); put_bh(bh); if (!folio->mapping) - try_to_free_buffers(&folio->page); + try_to_free_buffers(folio); folio_unlock(folio); folio_put(folio); } else { diff --git a/include/linux/buffer_head.h b/include/linux/buffer_head.h index 31d82fd9abe8..c9d1463bb20f 100644 --- a/include/linux/buffer_head.h +++ b/include/linux/buffer_head.h @@ -158,7 +158,7 @@ void mark_buffer_write_io_error(struct buffer_head *bh); void touch_buffer(struct buffer_head *bh); void set_bh_page(struct buffer_head *bh, struct page *page, unsigned long offset); -int try_to_free_buffers(struct page *); +bool try_to_free_buffers(struct folio *); struct buffer_head *alloc_page_buffers(struct page *page, unsigned long size, bool retry); void create_empty_buffers(struct page *, unsigned long, @@ -402,7 +402,7 @@ bool block_dirty_folio(struct address_space *mapping, struct folio *folio); #else /* CONFIG_BLOCK */ static inline void buffer_init(void) {} -static inline int try_to_free_buffers(struct page *page) { return 1; } +static inline bool try_to_free_buffers(struct folio *folio) { return true; } static inline int inode_has_buffers(struct inode *inode) { return 0; } static inline void invalidate_inode_buffers(struct inode *inode) {} static inline int remove_inode_buffers(struct inode *inode) { return 1; } diff --git a/include/linux/pagemap.h b/include/linux/pagemap.h index 831b28dab01a..82dfb279e0c4 100644 --- a/include/linux/pagemap.h +++ b/include/linux/pagemap.h @@ -1067,10 +1067,6 @@ static inline void folio_cancel_dirty(struct folio *folio) if (folio_test_dirty(folio)) __folio_cancel_dirty(folio); } -static inline void cancel_dirty_page(struct page *page) -{ - folio_cancel_dirty(page_folio(page)); -} bool folio_clear_dirty_for_io(struct folio *folio); bool clear_page_dirty_for_io(struct page *page); void folio_invalidate(struct folio *folio, size_t offset, size_t length); diff --git a/mm/filemap.c b/mm/filemap.c index ee892853a214..d335a154a0d9 100644 --- a/mm/filemap.c +++ b/mm/filemap.c @@ -3957,6 +3957,6 @@ bool filemap_release_folio(struct folio *folio, gfp_t gfp) if (mapping && mapping->a_ops->release_folio) return mapping->a_ops->release_folio(folio, gfp); - return try_to_free_buffers(&folio->page); + return try_to_free_buffers(folio); } EXPORT_SYMBOL(filemap_release_folio); diff --git a/mm/migrate.c b/mm/migrate.c index 6c31ee1e1c9b..21d82636c291 100644 --- a/mm/migrate.c +++ b/mm/migrate.c @@ -1013,7 +1013,7 @@ static int __unmap_and_move(struct page *page, struct page *newpage, if (!page->mapping) { VM_BUG_ON_PAGE(PageAnon(page), page); if (page_has_private(page)) { - try_to_free_buffers(page); + try_to_free_buffers(folio); goto out_unlock_both; } } else if (page_mapped(page)) { diff --git a/mm/vmscan.c b/mm/vmscan.c index 27851232e00c..f3f7ce2c4068 100644 --- a/mm/vmscan.c +++ b/mm/vmscan.c @@ -1181,7 +1181,7 @@ static pageout_t pageout(struct folio *folio, struct address_space *mapping) * folio->mapping == NULL while being dirty with clean buffers. */ if (folio_test_private(folio)) { - if (try_to_free_buffers(&folio->page)) { + if (try_to_free_buffers(folio)) { folio_clear_dirty(folio); pr_info("%s: orphaned folio\n", __func__); return PAGE_CLEAN; -- cgit v1.2.3-71-gd317 From d2329aa0c78f4a8dd368bb706f196ab99f692eaa Mon Sep 17 00:00:00 2001 From: "Matthew Wilcox (Oracle)" Date: Sun, 1 May 2022 07:35:31 -0400 Subject: fs: Add free_folio address space operation Include documentation and convert the callers to use ->free_folio as well as ->freepage. Signed-off-by: Matthew Wilcox (Oracle) --- Documentation/filesystems/locking.rst | 10 +++++----- Documentation/filesystems/vfs.rst | 6 +++--- include/linux/fs.h | 1 + mm/filemap.c | 9 ++++++++- mm/vmscan.c | 6 +++++- 5 files changed, 22 insertions(+), 10 deletions(-) (limited to 'include') diff --git a/Documentation/filesystems/locking.rst b/Documentation/filesystems/locking.rst index ddef4a753e73..515bc48ab58b 100644 --- a/Documentation/filesystems/locking.rst +++ b/Documentation/filesystems/locking.rst @@ -250,7 +250,7 @@ prototypes:: sector_t (*bmap)(struct address_space *, sector_t); void (*invalidate_folio) (struct folio *, size_t start, size_t len); bool (*release_folio)(struct folio *, gfp_t); - void (*freepage)(struct page *); + void (*free_folio)(struct folio *); int (*direct_IO)(struct kiocb *, struct iov_iter *iter); bool (*isolate_page) (struct page *, isolate_mode_t); int (*migratepage)(struct address_space *, struct page *, struct page *); @@ -262,10 +262,10 @@ prototypes:: int (*swap_deactivate)(struct file *); locking rules: - All except dirty_folio and freepage may block + All except dirty_folio and free_folio may block ====================== ======================== ========= =============== -ops PageLocked(page) i_rwsem invalidate_lock +ops folio locked i_rwsem invalidate_lock ====================== ======================== ========= =============== writepage: yes, unlocks (see below) read_folio: yes, unlocks shared @@ -277,7 +277,7 @@ write_end: yes, unlocks exclusive bmap: invalidate_folio: yes exclusive release_folio: yes -freepage: yes +free_folio: yes direct_IO: isolate_page: yes migratepage: yes (both) @@ -377,7 +377,7 @@ buffers from the folio in preparation for freeing it. It returns false to indicate that the buffers are (or may be) freeable. If ->release_folio is NULL, the kernel assumes that the fs has no private interest in the buffers. -->freepage() is called when the kernel is done dropping the page +->free_folio() is called when the kernel has dropped the folio from the page cache. ->launder_folio() may be called prior to releasing a folio if diff --git a/Documentation/filesystems/vfs.rst b/Documentation/filesystems/vfs.rst index 679887b5c8fc..12a011d2cbc6 100644 --- a/Documentation/filesystems/vfs.rst +++ b/Documentation/filesystems/vfs.rst @@ -735,7 +735,7 @@ cache in your filesystem. The following members are defined: sector_t (*bmap)(struct address_space *, sector_t); void (*invalidate_folio) (struct folio *, size_t start, size_t len); bool (*release_folio)(struct folio *, gfp_t); - void (*freepage)(struct page *); + void (*free_folio)(struct folio *); ssize_t (*direct_IO)(struct kiocb *, struct iov_iter *iter); /* isolate a page for migration */ bool (*isolate_page) (struct page *, isolate_mode_t); @@ -891,8 +891,8 @@ cache in your filesystem. The following members are defined: its release_folio will need to ensure this. Possibly it can clear the uptodate flag if it cannot free private data yet. -``freepage`` - freepage is called once the page is no longer visible in the +``free_folio`` + free_folio is called once the folio is no longer visible in the page cache in order to allow the cleanup of any private data. Since it may be called by the memory reclaimer, it should not assume that the original address_space mapping still exists, and diff --git a/include/linux/fs.h b/include/linux/fs.h index 1cee64d9724b..915844e6293e 100644 --- a/include/linux/fs.h +++ b/include/linux/fs.h @@ -356,6 +356,7 @@ struct address_space_operations { sector_t (*bmap)(struct address_space *, sector_t); void (*invalidate_folio) (struct folio *, size_t offset, size_t len); bool (*release_folio)(struct folio *, gfp_t); + void (*free_folio)(struct folio *folio); void (*freepage)(struct page *); ssize_t (*direct_IO)(struct kiocb *, struct iov_iter *iter); /* diff --git a/mm/filemap.c b/mm/filemap.c index d335a154a0d9..adcdef56890f 100644 --- a/mm/filemap.c +++ b/mm/filemap.c @@ -226,8 +226,12 @@ void __filemap_remove_folio(struct folio *folio, void *shadow) void filemap_free_folio(struct address_space *mapping, struct folio *folio) { void (*freepage)(struct page *); + void (*free_folio)(struct folio *); int refs = 1; + free_folio = mapping->a_ops->free_folio; + if (free_folio) + free_folio(folio); freepage = mapping->a_ops->freepage; if (freepage) freepage(&folio->page); @@ -807,6 +811,7 @@ void replace_page_cache_page(struct page *old, struct page *new) struct folio *fold = page_folio(old); struct folio *fnew = page_folio(new); struct address_space *mapping = old->mapping; + void (*free_folio)(struct folio *) = mapping->a_ops->free_folio; void (*freepage)(struct page *) = mapping->a_ops->freepage; pgoff_t offset = old->index; XA_STATE(xas, &mapping->i_pages, offset); @@ -835,9 +840,11 @@ void replace_page_cache_page(struct page *old, struct page *new) if (PageSwapBacked(new)) __inc_lruvec_page_state(new, NR_SHMEM); xas_unlock_irq(&xas); + if (free_folio) + free_folio(fold); if (freepage) freepage(old); - put_page(old); + folio_put(fold); } EXPORT_SYMBOL_GPL(replace_page_cache_page); diff --git a/mm/vmscan.c b/mm/vmscan.c index f3f7ce2c4068..d8a031128ad0 100644 --- a/mm/vmscan.c +++ b/mm/vmscan.c @@ -1282,8 +1282,10 @@ static int __remove_mapping(struct address_space *mapping, struct folio *folio, xa_unlock_irq(&mapping->i_pages); put_swap_page(&folio->page, swap); } else { + void (*free_folio)(struct folio *); void (*freepage)(struct page *); + free_folio = mapping->a_ops->free_folio; freepage = mapping->a_ops->freepage; /* * Remember a shadow entry for reclaimed file cache in @@ -1310,7 +1312,9 @@ static int __remove_mapping(struct address_space *mapping, struct folio *folio, inode_add_lru(mapping->host); spin_unlock(&mapping->host->i_lock); - if (freepage != NULL) + if (free_folio) + free_folio(folio); + if (freepage) freepage(&folio->page); } -- cgit v1.2.3-71-gd317 From 8560cb1a7d75048af275dd23fb0cf05382b3c2b9 Mon Sep 17 00:00:00 2001 From: "Matthew Wilcox (Oracle)" Date: Thu, 5 May 2022 00:43:09 -0400 Subject: fs: Remove aops->freepage All implementations now use free_folio so we can delete the callers and the method. Signed-off-by: Matthew Wilcox (Oracle) --- include/linux/fs.h | 1 - mm/filemap.c | 7 ------- mm/vmscan.c | 4 ---- 3 files changed, 12 deletions(-) (limited to 'include') diff --git a/include/linux/fs.h b/include/linux/fs.h index 915844e6293e..6f305f1097a5 100644 --- a/include/linux/fs.h +++ b/include/linux/fs.h @@ -357,7 +357,6 @@ struct address_space_operations { void (*invalidate_folio) (struct folio *, size_t offset, size_t len); bool (*release_folio)(struct folio *, gfp_t); void (*free_folio)(struct folio *folio); - void (*freepage)(struct page *); ssize_t (*direct_IO)(struct kiocb *, struct iov_iter *iter); /* * migrate the contents of a page to the specified target. If diff --git a/mm/filemap.c b/mm/filemap.c index adcdef56890f..fa0ca674450f 100644 --- a/mm/filemap.c +++ b/mm/filemap.c @@ -225,16 +225,12 @@ void __filemap_remove_folio(struct folio *folio, void *shadow) void filemap_free_folio(struct address_space *mapping, struct folio *folio) { - void (*freepage)(struct page *); void (*free_folio)(struct folio *); int refs = 1; free_folio = mapping->a_ops->free_folio; if (free_folio) free_folio(folio); - freepage = mapping->a_ops->freepage; - if (freepage) - freepage(&folio->page); if (folio_test_large(folio) && !folio_test_hugetlb(folio)) refs = folio_nr_pages(folio); @@ -812,7 +808,6 @@ void replace_page_cache_page(struct page *old, struct page *new) struct folio *fnew = page_folio(new); struct address_space *mapping = old->mapping; void (*free_folio)(struct folio *) = mapping->a_ops->free_folio; - void (*freepage)(struct page *) = mapping->a_ops->freepage; pgoff_t offset = old->index; XA_STATE(xas, &mapping->i_pages, offset); @@ -842,8 +837,6 @@ void replace_page_cache_page(struct page *old, struct page *new) xas_unlock_irq(&xas); if (free_folio) free_folio(fold); - if (freepage) - freepage(old); folio_put(fold); } EXPORT_SYMBOL_GPL(replace_page_cache_page); diff --git a/mm/vmscan.c b/mm/vmscan.c index d8a031128ad0..edc89f26b738 100644 --- a/mm/vmscan.c +++ b/mm/vmscan.c @@ -1283,10 +1283,8 @@ static int __remove_mapping(struct address_space *mapping, struct folio *folio, put_swap_page(&folio->page, swap); } else { void (*free_folio)(struct folio *); - void (*freepage)(struct page *); free_folio = mapping->a_ops->free_folio; - freepage = mapping->a_ops->freepage; /* * Remember a shadow entry for reclaimed file cache in * order to detect refaults, thus thrashing, later on. @@ -1314,8 +1312,6 @@ static int __remove_mapping(struct address_space *mapping, struct folio *folio, if (free_folio) free_folio(folio); - if (freepage) - freepage(&folio->page); } return 1; -- cgit v1.2.3-71-gd317