cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

aops.c (20556B)


      1// SPDX-License-Identifier: GPL-2.0-only
      2/*
      3 * Copyright (C) Sistina Software, Inc.  1997-2003 All rights reserved.
      4 * Copyright (C) 2004-2008 Red Hat, Inc.  All rights reserved.
      5 */
      6
      7#include <linux/sched.h>
      8#include <linux/slab.h>
      9#include <linux/spinlock.h>
     10#include <linux/completion.h>
     11#include <linux/buffer_head.h>
     12#include <linux/pagemap.h>
     13#include <linux/pagevec.h>
     14#include <linux/mpage.h>
     15#include <linux/fs.h>
     16#include <linux/writeback.h>
     17#include <linux/swap.h>
     18#include <linux/gfs2_ondisk.h>
     19#include <linux/backing-dev.h>
     20#include <linux/uio.h>
     21#include <trace/events/writeback.h>
     22#include <linux/sched/signal.h>
     23
     24#include "gfs2.h"
     25#include "incore.h"
     26#include "bmap.h"
     27#include "glock.h"
     28#include "inode.h"
     29#include "log.h"
     30#include "meta_io.h"
     31#include "quota.h"
     32#include "trans.h"
     33#include "rgrp.h"
     34#include "super.h"
     35#include "util.h"
     36#include "glops.h"
     37#include "aops.h"
     38
     39
     40void gfs2_page_add_databufs(struct gfs2_inode *ip, struct page *page,
     41			    unsigned int from, unsigned int len)
     42{
     43	struct buffer_head *head = page_buffers(page);
     44	unsigned int bsize = head->b_size;
     45	struct buffer_head *bh;
     46	unsigned int to = from + len;
     47	unsigned int start, end;
     48
     49	for (bh = head, start = 0; bh != head || !start;
     50	     bh = bh->b_this_page, start = end) {
     51		end = start + bsize;
     52		if (end <= from)
     53			continue;
     54		if (start >= to)
     55			break;
     56		set_buffer_uptodate(bh);
     57		gfs2_trans_add_data(ip->i_gl, bh);
     58	}
     59}
     60
     61/**
     62 * gfs2_get_block_noalloc - Fills in a buffer head with details about a block
     63 * @inode: The inode
     64 * @lblock: The block number to look up
     65 * @bh_result: The buffer head to return the result in
     66 * @create: Non-zero if we may add block to the file
     67 *
     68 * Returns: errno
     69 */
     70
     71static int gfs2_get_block_noalloc(struct inode *inode, sector_t lblock,
     72				  struct buffer_head *bh_result, int create)
     73{
     74	int error;
     75
     76	error = gfs2_block_map(inode, lblock, bh_result, 0);
     77	if (error)
     78		return error;
     79	if (!buffer_mapped(bh_result))
     80		return -ENODATA;
     81	return 0;
     82}
     83
     84/**
     85 * gfs2_writepage - Write page for writeback mappings
     86 * @page: The page
     87 * @wbc: The writeback control
     88 */
     89static int gfs2_writepage(struct page *page, struct writeback_control *wbc)
     90{
     91	struct inode *inode = page->mapping->host;
     92	struct gfs2_inode *ip = GFS2_I(inode);
     93	struct gfs2_sbd *sdp = GFS2_SB(inode);
     94	struct iomap_writepage_ctx wpc = { };
     95
     96	if (gfs2_assert_withdraw(sdp, gfs2_glock_is_held_excl(ip->i_gl)))
     97		goto out;
     98	if (current->journal_info)
     99		goto redirty;
    100	return iomap_writepage(page, wbc, &wpc, &gfs2_writeback_ops);
    101
    102redirty:
    103	redirty_page_for_writepage(wbc, page);
    104out:
    105	unlock_page(page);
    106	return 0;
    107}
    108
    109/**
    110 * gfs2_write_jdata_page - gfs2 jdata-specific version of block_write_full_page
    111 * @page: The page to write
    112 * @wbc: The writeback control
    113 *
    114 * This is the same as calling block_write_full_page, but it also
    115 * writes pages outside of i_size
    116 */
    117static int gfs2_write_jdata_page(struct page *page,
    118				 struct writeback_control *wbc)
    119{
    120	struct inode * const inode = page->mapping->host;
    121	loff_t i_size = i_size_read(inode);
    122	const pgoff_t end_index = i_size >> PAGE_SHIFT;
    123	unsigned offset;
    124
    125	/*
    126	 * The page straddles i_size.  It must be zeroed out on each and every
    127	 * writepage invocation because it may be mmapped.  "A file is mapped
    128	 * in multiples of the page size.  For a file that is not a multiple of
    129	 * the  page size, the remaining memory is zeroed when mapped, and
    130	 * writes to that region are not written out to the file."
    131	 */
    132	offset = i_size & (PAGE_SIZE - 1);
    133	if (page->index == end_index && offset)
    134		zero_user_segment(page, offset, PAGE_SIZE);
    135
    136	return __block_write_full_page(inode, page, gfs2_get_block_noalloc, wbc,
    137				       end_buffer_async_write);
    138}
    139
    140/**
    141 * __gfs2_jdata_writepage - The core of jdata writepage
    142 * @page: The page to write
    143 * @wbc: The writeback control
    144 *
    145 * This is shared between writepage and writepages and implements the
    146 * core of the writepage operation. If a transaction is required then
    147 * PageChecked will have been set and the transaction will have
    148 * already been started before this is called.
    149 */
    150
    151static int __gfs2_jdata_writepage(struct page *page, struct writeback_control *wbc)
    152{
    153	struct inode *inode = page->mapping->host;
    154	struct gfs2_inode *ip = GFS2_I(inode);
    155	struct gfs2_sbd *sdp = GFS2_SB(inode);
    156
    157	if (PageChecked(page)) {
    158		ClearPageChecked(page);
    159		if (!page_has_buffers(page)) {
    160			create_empty_buffers(page, inode->i_sb->s_blocksize,
    161					     BIT(BH_Dirty)|BIT(BH_Uptodate));
    162		}
    163		gfs2_page_add_databufs(ip, page, 0, sdp->sd_vfs->s_blocksize);
    164	}
    165	return gfs2_write_jdata_page(page, wbc);
    166}
    167
    168/**
    169 * gfs2_jdata_writepage - Write complete page
    170 * @page: Page to write
    171 * @wbc: The writeback control
    172 *
    173 * Returns: errno
    174 *
    175 */
    176
    177static int gfs2_jdata_writepage(struct page *page, struct writeback_control *wbc)
    178{
    179	struct inode *inode = page->mapping->host;
    180	struct gfs2_inode *ip = GFS2_I(inode);
    181	struct gfs2_sbd *sdp = GFS2_SB(inode);
    182
    183	if (gfs2_assert_withdraw(sdp, gfs2_glock_is_held_excl(ip->i_gl)))
    184		goto out;
    185	if (PageChecked(page) || current->journal_info)
    186		goto out_ignore;
    187	return __gfs2_jdata_writepage(page, wbc);
    188
    189out_ignore:
    190	redirty_page_for_writepage(wbc, page);
    191out:
    192	unlock_page(page);
    193	return 0;
    194}
    195
    196/**
    197 * gfs2_writepages - Write a bunch of dirty pages back to disk
    198 * @mapping: The mapping to write
    199 * @wbc: Write-back control
    200 *
    201 * Used for both ordered and writeback modes.
    202 */
    203static int gfs2_writepages(struct address_space *mapping,
    204			   struct writeback_control *wbc)
    205{
    206	struct gfs2_sbd *sdp = gfs2_mapping2sbd(mapping);
    207	struct iomap_writepage_ctx wpc = { };
    208	int ret;
    209
    210	/*
    211	 * Even if we didn't write any pages here, we might still be holding
    212	 * dirty pages in the ail. We forcibly flush the ail because we don't
    213	 * want balance_dirty_pages() to loop indefinitely trying to write out
    214	 * pages held in the ail that it can't find.
    215	 */
    216	ret = iomap_writepages(mapping, wbc, &wpc, &gfs2_writeback_ops);
    217	if (ret == 0)
    218		set_bit(SDF_FORCE_AIL_FLUSH, &sdp->sd_flags);
    219	return ret;
    220}
    221
    222/**
    223 * gfs2_write_jdata_pagevec - Write back a pagevec's worth of pages
    224 * @mapping: The mapping
    225 * @wbc: The writeback control
    226 * @pvec: The vector of pages
    227 * @nr_pages: The number of pages to write
    228 * @done_index: Page index
    229 *
    230 * Returns: non-zero if loop should terminate, zero otherwise
    231 */
    232
    233static int gfs2_write_jdata_pagevec(struct address_space *mapping,
    234				    struct writeback_control *wbc,
    235				    struct pagevec *pvec,
    236				    int nr_pages,
    237				    pgoff_t *done_index)
    238{
    239	struct inode *inode = mapping->host;
    240	struct gfs2_sbd *sdp = GFS2_SB(inode);
    241	unsigned nrblocks = nr_pages * (PAGE_SIZE >> inode->i_blkbits);
    242	int i;
    243	int ret;
    244
    245	ret = gfs2_trans_begin(sdp, nrblocks, nrblocks);
    246	if (ret < 0)
    247		return ret;
    248
    249	for(i = 0; i < nr_pages; i++) {
    250		struct page *page = pvec->pages[i];
    251
    252		*done_index = page->index;
    253
    254		lock_page(page);
    255
    256		if (unlikely(page->mapping != mapping)) {
    257continue_unlock:
    258			unlock_page(page);
    259			continue;
    260		}
    261
    262		if (!PageDirty(page)) {
    263			/* someone wrote it for us */
    264			goto continue_unlock;
    265		}
    266
    267		if (PageWriteback(page)) {
    268			if (wbc->sync_mode != WB_SYNC_NONE)
    269				wait_on_page_writeback(page);
    270			else
    271				goto continue_unlock;
    272		}
    273
    274		BUG_ON(PageWriteback(page));
    275		if (!clear_page_dirty_for_io(page))
    276			goto continue_unlock;
    277
    278		trace_wbc_writepage(wbc, inode_to_bdi(inode));
    279
    280		ret = __gfs2_jdata_writepage(page, wbc);
    281		if (unlikely(ret)) {
    282			if (ret == AOP_WRITEPAGE_ACTIVATE) {
    283				unlock_page(page);
    284				ret = 0;
    285			} else {
    286
    287				/*
    288				 * done_index is set past this page,
    289				 * so media errors will not choke
    290				 * background writeout for the entire
    291				 * file. This has consequences for
    292				 * range_cyclic semantics (ie. it may
    293				 * not be suitable for data integrity
    294				 * writeout).
    295				 */
    296				*done_index = page->index + 1;
    297				ret = 1;
    298				break;
    299			}
    300		}
    301
    302		/*
    303		 * We stop writing back only if we are not doing
    304		 * integrity sync. In case of integrity sync we have to
    305		 * keep going until we have written all the pages
    306		 * we tagged for writeback prior to entering this loop.
    307		 */
    308		if (--wbc->nr_to_write <= 0 && wbc->sync_mode == WB_SYNC_NONE) {
    309			ret = 1;
    310			break;
    311		}
    312
    313	}
    314	gfs2_trans_end(sdp);
    315	return ret;
    316}
    317
    318/**
    319 * gfs2_write_cache_jdata - Like write_cache_pages but different
    320 * @mapping: The mapping to write
    321 * @wbc: The writeback control
    322 *
    323 * The reason that we use our own function here is that we need to
    324 * start transactions before we grab page locks. This allows us
    325 * to get the ordering right.
    326 */
    327
    328static int gfs2_write_cache_jdata(struct address_space *mapping,
    329				  struct writeback_control *wbc)
    330{
    331	int ret = 0;
    332	int done = 0;
    333	struct pagevec pvec;
    334	int nr_pages;
    335	pgoff_t writeback_index;
    336	pgoff_t index;
    337	pgoff_t end;
    338	pgoff_t done_index;
    339	int cycled;
    340	int range_whole = 0;
    341	xa_mark_t tag;
    342
    343	pagevec_init(&pvec);
    344	if (wbc->range_cyclic) {
    345		writeback_index = mapping->writeback_index; /* prev offset */
    346		index = writeback_index;
    347		if (index == 0)
    348			cycled = 1;
    349		else
    350			cycled = 0;
    351		end = -1;
    352	} else {
    353		index = wbc->range_start >> PAGE_SHIFT;
    354		end = wbc->range_end >> PAGE_SHIFT;
    355		if (wbc->range_start == 0 && wbc->range_end == LLONG_MAX)
    356			range_whole = 1;
    357		cycled = 1; /* ignore range_cyclic tests */
    358	}
    359	if (wbc->sync_mode == WB_SYNC_ALL || wbc->tagged_writepages)
    360		tag = PAGECACHE_TAG_TOWRITE;
    361	else
    362		tag = PAGECACHE_TAG_DIRTY;
    363
    364retry:
    365	if (wbc->sync_mode == WB_SYNC_ALL || wbc->tagged_writepages)
    366		tag_pages_for_writeback(mapping, index, end);
    367	done_index = index;
    368	while (!done && (index <= end)) {
    369		nr_pages = pagevec_lookup_range_tag(&pvec, mapping, &index, end,
    370				tag);
    371		if (nr_pages == 0)
    372			break;
    373
    374		ret = gfs2_write_jdata_pagevec(mapping, wbc, &pvec, nr_pages, &done_index);
    375		if (ret)
    376			done = 1;
    377		if (ret > 0)
    378			ret = 0;
    379		pagevec_release(&pvec);
    380		cond_resched();
    381	}
    382
    383	if (!cycled && !done) {
    384		/*
    385		 * range_cyclic:
    386		 * We hit the last page and there is more work to be done: wrap
    387		 * back to the start of the file
    388		 */
    389		cycled = 1;
    390		index = 0;
    391		end = writeback_index - 1;
    392		goto retry;
    393	}
    394
    395	if (wbc->range_cyclic || (range_whole && wbc->nr_to_write > 0))
    396		mapping->writeback_index = done_index;
    397
    398	return ret;
    399}
    400
    401
    402/**
    403 * gfs2_jdata_writepages - Write a bunch of dirty pages back to disk
    404 * @mapping: The mapping to write
    405 * @wbc: The writeback control
    406 * 
    407 */
    408
    409static int gfs2_jdata_writepages(struct address_space *mapping,
    410				 struct writeback_control *wbc)
    411{
    412	struct gfs2_inode *ip = GFS2_I(mapping->host);
    413	struct gfs2_sbd *sdp = GFS2_SB(mapping->host);
    414	int ret;
    415
    416	ret = gfs2_write_cache_jdata(mapping, wbc);
    417	if (ret == 0 && wbc->sync_mode == WB_SYNC_ALL) {
    418		gfs2_log_flush(sdp, ip->i_gl, GFS2_LOG_HEAD_FLUSH_NORMAL |
    419			       GFS2_LFC_JDATA_WPAGES);
    420		ret = gfs2_write_cache_jdata(mapping, wbc);
    421	}
    422	return ret;
    423}
    424
    425/**
    426 * stuffed_readpage - Fill in a Linux page with stuffed file data
    427 * @ip: the inode
    428 * @page: the page
    429 *
    430 * Returns: errno
    431 */
    432static int stuffed_readpage(struct gfs2_inode *ip, struct page *page)
    433{
    434	struct buffer_head *dibh;
    435	u64 dsize = i_size_read(&ip->i_inode);
    436	void *kaddr;
    437	int error;
    438
    439	/*
    440	 * Due to the order of unstuffing files and ->fault(), we can be
    441	 * asked for a zero page in the case of a stuffed file being extended,
    442	 * so we need to supply one here. It doesn't happen often.
    443	 */
    444	if (unlikely(page->index)) {
    445		zero_user(page, 0, PAGE_SIZE);
    446		SetPageUptodate(page);
    447		return 0;
    448	}
    449
    450	error = gfs2_meta_inode_buffer(ip, &dibh);
    451	if (error)
    452		return error;
    453
    454	kaddr = kmap_atomic(page);
    455	if (dsize > gfs2_max_stuffed_size(ip))
    456		dsize = gfs2_max_stuffed_size(ip);
    457	memcpy(kaddr, dibh->b_data + sizeof(struct gfs2_dinode), dsize);
    458	memset(kaddr + dsize, 0, PAGE_SIZE - dsize);
    459	kunmap_atomic(kaddr);
    460	flush_dcache_page(page);
    461	brelse(dibh);
    462	SetPageUptodate(page);
    463
    464	return 0;
    465}
    466
    467/**
    468 * gfs2_read_folio - read a folio from a file
    469 * @file: The file to read
    470 * @folio: The folio in the file
    471 */
    472static int gfs2_read_folio(struct file *file, struct folio *folio)
    473{
    474	struct inode *inode = folio->mapping->host;
    475	struct gfs2_inode *ip = GFS2_I(inode);
    476	struct gfs2_sbd *sdp = GFS2_SB(inode);
    477	int error;
    478
    479	if (!gfs2_is_jdata(ip) ||
    480	    (i_blocksize(inode) == PAGE_SIZE && !folio_buffers(folio))) {
    481		error = iomap_read_folio(folio, &gfs2_iomap_ops);
    482	} else if (gfs2_is_stuffed(ip)) {
    483		error = stuffed_readpage(ip, &folio->page);
    484		folio_unlock(folio);
    485	} else {
    486		error = mpage_read_folio(folio, gfs2_block_map);
    487	}
    488
    489	if (unlikely(gfs2_withdrawn(sdp)))
    490		return -EIO;
    491
    492	return error;
    493}
    494
    495/**
    496 * gfs2_internal_read - read an internal file
    497 * @ip: The gfs2 inode
    498 * @buf: The buffer to fill
    499 * @pos: The file position
    500 * @size: The amount to read
    501 *
    502 */
    503
    504int gfs2_internal_read(struct gfs2_inode *ip, char *buf, loff_t *pos,
    505                       unsigned size)
    506{
    507	struct address_space *mapping = ip->i_inode.i_mapping;
    508	unsigned long index = *pos >> PAGE_SHIFT;
    509	unsigned offset = *pos & (PAGE_SIZE - 1);
    510	unsigned copied = 0;
    511	unsigned amt;
    512	struct page *page;
    513	void *p;
    514
    515	do {
    516		amt = size - copied;
    517		if (offset + size > PAGE_SIZE)
    518			amt = PAGE_SIZE - offset;
    519		page = read_cache_page(mapping, index, gfs2_read_folio, NULL);
    520		if (IS_ERR(page))
    521			return PTR_ERR(page);
    522		p = kmap_atomic(page);
    523		memcpy(buf + copied, p + offset, amt);
    524		kunmap_atomic(p);
    525		put_page(page);
    526		copied += amt;
    527		index++;
    528		offset = 0;
    529	} while(copied < size);
    530	(*pos) += size;
    531	return size;
    532}
    533
    534/**
    535 * gfs2_readahead - Read a bunch of pages at once
    536 * @rac: Read-ahead control structure
    537 *
    538 * Some notes:
    539 * 1. This is only for readahead, so we can simply ignore any things
    540 *    which are slightly inconvenient (such as locking conflicts between
    541 *    the page lock and the glock) and return having done no I/O. Its
    542 *    obviously not something we'd want to do on too regular a basis.
    543 *    Any I/O we ignore at this time will be done via readpage later.
    544 * 2. We don't handle stuffed files here we let readpage do the honours.
    545 * 3. mpage_readahead() does most of the heavy lifting in the common case.
    546 * 4. gfs2_block_map() is relied upon to set BH_Boundary in the right places.
    547 */
    548
    549static void gfs2_readahead(struct readahead_control *rac)
    550{
    551	struct inode *inode = rac->mapping->host;
    552	struct gfs2_inode *ip = GFS2_I(inode);
    553
    554	if (gfs2_is_stuffed(ip))
    555		;
    556	else if (gfs2_is_jdata(ip))
    557		mpage_readahead(rac, gfs2_block_map);
    558	else
    559		iomap_readahead(rac, &gfs2_iomap_ops);
    560}
    561
    562/**
    563 * adjust_fs_space - Adjusts the free space available due to gfs2_grow
    564 * @inode: the rindex inode
    565 */
    566void adjust_fs_space(struct inode *inode)
    567{
    568	struct gfs2_sbd *sdp = GFS2_SB(inode);
    569	struct gfs2_inode *m_ip = GFS2_I(sdp->sd_statfs_inode);
    570	struct gfs2_statfs_change_host *m_sc = &sdp->sd_statfs_master;
    571	struct gfs2_statfs_change_host *l_sc = &sdp->sd_statfs_local;
    572	struct buffer_head *m_bh;
    573	u64 fs_total, new_free;
    574
    575	if (gfs2_trans_begin(sdp, 2 * RES_STATFS, 0) != 0)
    576		return;
    577
    578	/* Total up the file system space, according to the latest rindex. */
    579	fs_total = gfs2_ri_total(sdp);
    580	if (gfs2_meta_inode_buffer(m_ip, &m_bh) != 0)
    581		goto out;
    582
    583	spin_lock(&sdp->sd_statfs_spin);
    584	gfs2_statfs_change_in(m_sc, m_bh->b_data +
    585			      sizeof(struct gfs2_dinode));
    586	if (fs_total > (m_sc->sc_total + l_sc->sc_total))
    587		new_free = fs_total - (m_sc->sc_total + l_sc->sc_total);
    588	else
    589		new_free = 0;
    590	spin_unlock(&sdp->sd_statfs_spin);
    591	fs_warn(sdp, "File system extended by %llu blocks.\n",
    592		(unsigned long long)new_free);
    593	gfs2_statfs_change(sdp, new_free, new_free, 0);
    594
    595	update_statfs(sdp, m_bh);
    596	brelse(m_bh);
    597out:
    598	sdp->sd_rindex_uptodate = 0;
    599	gfs2_trans_end(sdp);
    600}
    601
    602static bool jdata_dirty_folio(struct address_space *mapping,
    603		struct folio *folio)
    604{
    605	if (current->journal_info)
    606		folio_set_checked(folio);
    607	return block_dirty_folio(mapping, folio);
    608}
    609
    610/**
    611 * gfs2_bmap - Block map function
    612 * @mapping: Address space info
    613 * @lblock: The block to map
    614 *
    615 * Returns: The disk address for the block or 0 on hole or error
    616 */
    617
    618static sector_t gfs2_bmap(struct address_space *mapping, sector_t lblock)
    619{
    620	struct gfs2_inode *ip = GFS2_I(mapping->host);
    621	struct gfs2_holder i_gh;
    622	sector_t dblock = 0;
    623	int error;
    624
    625	error = gfs2_glock_nq_init(ip->i_gl, LM_ST_SHARED, LM_FLAG_ANY, &i_gh);
    626	if (error)
    627		return 0;
    628
    629	if (!gfs2_is_stuffed(ip))
    630		dblock = iomap_bmap(mapping, lblock, &gfs2_iomap_ops);
    631
    632	gfs2_glock_dq_uninit(&i_gh);
    633
    634	return dblock;
    635}
    636
    637static void gfs2_discard(struct gfs2_sbd *sdp, struct buffer_head *bh)
    638{
    639	struct gfs2_bufdata *bd;
    640
    641	lock_buffer(bh);
    642	gfs2_log_lock(sdp);
    643	clear_buffer_dirty(bh);
    644	bd = bh->b_private;
    645	if (bd) {
    646		if (!list_empty(&bd->bd_list) && !buffer_pinned(bh))
    647			list_del_init(&bd->bd_list);
    648		else {
    649			spin_lock(&sdp->sd_ail_lock);
    650			gfs2_remove_from_journal(bh, REMOVE_JDATA);
    651			spin_unlock(&sdp->sd_ail_lock);
    652		}
    653	}
    654	bh->b_bdev = NULL;
    655	clear_buffer_mapped(bh);
    656	clear_buffer_req(bh);
    657	clear_buffer_new(bh);
    658	gfs2_log_unlock(sdp);
    659	unlock_buffer(bh);
    660}
    661
    662static void gfs2_invalidate_folio(struct folio *folio, size_t offset,
    663				size_t length)
    664{
    665	struct gfs2_sbd *sdp = GFS2_SB(folio->mapping->host);
    666	size_t stop = offset + length;
    667	int partial_page = (offset || length < folio_size(folio));
    668	struct buffer_head *bh, *head;
    669	unsigned long pos = 0;
    670
    671	BUG_ON(!folio_test_locked(folio));
    672	if (!partial_page)
    673		folio_clear_checked(folio);
    674	head = folio_buffers(folio);
    675	if (!head)
    676		goto out;
    677
    678	bh = head;
    679	do {
    680		if (pos + bh->b_size > stop)
    681			return;
    682
    683		if (offset <= pos)
    684			gfs2_discard(sdp, bh);
    685		pos += bh->b_size;
    686		bh = bh->b_this_page;
    687	} while (bh != head);
    688out:
    689	if (!partial_page)
    690		filemap_release_folio(folio, 0);
    691}
    692
    693/**
    694 * gfs2_release_folio - free the metadata associated with a folio
    695 * @folio: the folio that's being released
    696 * @gfp_mask: passed from Linux VFS, ignored by us
    697 *
    698 * Calls try_to_free_buffers() to free the buffers and put the folio if the
    699 * buffers can be released.
    700 *
    701 * Returns: true if the folio was put or else false
    702 */
    703
    704bool gfs2_release_folio(struct folio *folio, gfp_t gfp_mask)
    705{
    706	struct address_space *mapping = folio->mapping;
    707	struct gfs2_sbd *sdp = gfs2_mapping2sbd(mapping);
    708	struct buffer_head *bh, *head;
    709	struct gfs2_bufdata *bd;
    710
    711	head = folio_buffers(folio);
    712	if (!head)
    713		return false;
    714
    715	/*
    716	 * mm accommodates an old ext3 case where clean folios might
    717	 * not have had the dirty bit cleared.	Thus, it can send actual
    718	 * dirty folios to ->release_folio() via shrink_active_list().
    719	 *
    720	 * As a workaround, we skip folios that contain dirty buffers
    721	 * below.  Once ->release_folio isn't called on dirty folios
    722	 * anymore, we can warn on dirty buffers like we used to here
    723	 * again.
    724	 */
    725
    726	gfs2_log_lock(sdp);
    727	bh = head;
    728	do {
    729		if (atomic_read(&bh->b_count))
    730			goto cannot_release;
    731		bd = bh->b_private;
    732		if (bd && bd->bd_tr)
    733			goto cannot_release;
    734		if (buffer_dirty(bh) || WARN_ON(buffer_pinned(bh)))
    735			goto cannot_release;
    736		bh = bh->b_this_page;
    737	} while (bh != head);
    738
    739	bh = head;
    740	do {
    741		bd = bh->b_private;
    742		if (bd) {
    743			gfs2_assert_warn(sdp, bd->bd_bh == bh);
    744			bd->bd_bh = NULL;
    745			bh->b_private = NULL;
    746			/*
    747			 * The bd may still be queued as a revoke, in which
    748			 * case we must not dequeue nor free it.
    749			 */
    750			if (!bd->bd_blkno && !list_empty(&bd->bd_list))
    751				list_del_init(&bd->bd_list);
    752			if (list_empty(&bd->bd_list))
    753				kmem_cache_free(gfs2_bufdata_cachep, bd);
    754		}
    755
    756		bh = bh->b_this_page;
    757	} while (bh != head);
    758	gfs2_log_unlock(sdp);
    759
    760	return try_to_free_buffers(folio);
    761
    762cannot_release:
    763	gfs2_log_unlock(sdp);
    764	return false;
    765}
    766
    767static const struct address_space_operations gfs2_aops = {
    768	.writepage = gfs2_writepage,
    769	.writepages = gfs2_writepages,
    770	.read_folio = gfs2_read_folio,
    771	.readahead = gfs2_readahead,
    772	.dirty_folio = filemap_dirty_folio,
    773	.release_folio = iomap_release_folio,
    774	.invalidate_folio = iomap_invalidate_folio,
    775	.bmap = gfs2_bmap,
    776	.direct_IO = noop_direct_IO,
    777	.migratepage = iomap_migrate_page,
    778	.is_partially_uptodate = iomap_is_partially_uptodate,
    779	.error_remove_page = generic_error_remove_page,
    780};
    781
    782static const struct address_space_operations gfs2_jdata_aops = {
    783	.writepage = gfs2_jdata_writepage,
    784	.writepages = gfs2_jdata_writepages,
    785	.read_folio = gfs2_read_folio,
    786	.readahead = gfs2_readahead,
    787	.dirty_folio = jdata_dirty_folio,
    788	.bmap = gfs2_bmap,
    789	.invalidate_folio = gfs2_invalidate_folio,
    790	.release_folio = gfs2_release_folio,
    791	.is_partially_uptodate = block_is_partially_uptodate,
    792	.error_remove_page = generic_error_remove_page,
    793};
    794
    795void gfs2_set_aops(struct inode *inode)
    796{
    797	if (gfs2_is_jdata(GFS2_I(inode)))
    798		inode->i_mapping->a_ops = &gfs2_jdata_aops;
    799	else
    800		inode->i_mapping->a_ops = &gfs2_aops;
    801}