cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

inline.c (19651B)


      1// SPDX-License-Identifier: GPL-2.0
      2/*
      3 * fs/f2fs/inline.c
      4 * Copyright (c) 2013, Intel Corporation
      5 * Authors: Huajun Li <huajun.li@intel.com>
      6 *          Haicheng Li <haicheng.li@intel.com>
      7 */
      8
      9#include <linux/fs.h>
     10#include <linux/f2fs_fs.h>
     11#include <linux/fiemap.h>
     12
     13#include "f2fs.h"
     14#include "node.h"
     15#include <trace/events/f2fs.h>
     16
     17static bool support_inline_data(struct inode *inode)
     18{
     19	if (f2fs_is_atomic_file(inode))
     20		return false;
     21	if (!S_ISREG(inode->i_mode) && !S_ISLNK(inode->i_mode))
     22		return false;
     23	if (i_size_read(inode) > MAX_INLINE_DATA(inode))
     24		return false;
     25	return true;
     26}
     27
     28bool f2fs_may_inline_data(struct inode *inode)
     29{
     30	if (!support_inline_data(inode))
     31		return false;
     32
     33	return !f2fs_post_read_required(inode);
     34}
     35
     36bool f2fs_sanity_check_inline_data(struct inode *inode)
     37{
     38	if (!f2fs_has_inline_data(inode))
     39		return false;
     40
     41	if (!support_inline_data(inode))
     42		return true;
     43
     44	/*
     45	 * used by sanity_check_inode(), when disk layout fields has not
     46	 * been synchronized to inmem fields.
     47	 */
     48	return (S_ISREG(inode->i_mode) &&
     49		(file_is_encrypt(inode) || file_is_verity(inode) ||
     50		(F2FS_I(inode)->i_flags & F2FS_COMPR_FL)));
     51}
     52
     53bool f2fs_may_inline_dentry(struct inode *inode)
     54{
     55	if (!test_opt(F2FS_I_SB(inode), INLINE_DENTRY))
     56		return false;
     57
     58	if (!S_ISDIR(inode->i_mode))
     59		return false;
     60
     61	return true;
     62}
     63
     64void f2fs_do_read_inline_data(struct page *page, struct page *ipage)
     65{
     66	struct inode *inode = page->mapping->host;
     67	void *src_addr, *dst_addr;
     68
     69	if (PageUptodate(page))
     70		return;
     71
     72	f2fs_bug_on(F2FS_P_SB(page), page->index);
     73
     74	zero_user_segment(page, MAX_INLINE_DATA(inode), PAGE_SIZE);
     75
     76	/* Copy the whole inline data block */
     77	src_addr = inline_data_addr(inode, ipage);
     78	dst_addr = kmap_atomic(page);
     79	memcpy(dst_addr, src_addr, MAX_INLINE_DATA(inode));
     80	flush_dcache_page(page);
     81	kunmap_atomic(dst_addr);
     82	if (!PageUptodate(page))
     83		SetPageUptodate(page);
     84}
     85
     86void f2fs_truncate_inline_inode(struct inode *inode,
     87					struct page *ipage, u64 from)
     88{
     89	void *addr;
     90
     91	if (from >= MAX_INLINE_DATA(inode))
     92		return;
     93
     94	addr = inline_data_addr(inode, ipage);
     95
     96	f2fs_wait_on_page_writeback(ipage, NODE, true, true);
     97	memset(addr + from, 0, MAX_INLINE_DATA(inode) - from);
     98	set_page_dirty(ipage);
     99
    100	if (from == 0)
    101		clear_inode_flag(inode, FI_DATA_EXIST);
    102}
    103
    104int f2fs_read_inline_data(struct inode *inode, struct page *page)
    105{
    106	struct page *ipage;
    107
    108	ipage = f2fs_get_node_page(F2FS_I_SB(inode), inode->i_ino);
    109	if (IS_ERR(ipage)) {
    110		unlock_page(page);
    111		return PTR_ERR(ipage);
    112	}
    113
    114	if (!f2fs_has_inline_data(inode)) {
    115		f2fs_put_page(ipage, 1);
    116		return -EAGAIN;
    117	}
    118
    119	if (page->index)
    120		zero_user_segment(page, 0, PAGE_SIZE);
    121	else
    122		f2fs_do_read_inline_data(page, ipage);
    123
    124	if (!PageUptodate(page))
    125		SetPageUptodate(page);
    126	f2fs_put_page(ipage, 1);
    127	unlock_page(page);
    128	return 0;
    129}
    130
    131int f2fs_convert_inline_page(struct dnode_of_data *dn, struct page *page)
    132{
    133	struct f2fs_io_info fio = {
    134		.sbi = F2FS_I_SB(dn->inode),
    135		.ino = dn->inode->i_ino,
    136		.type = DATA,
    137		.op = REQ_OP_WRITE,
    138		.op_flags = REQ_SYNC | REQ_PRIO,
    139		.page = page,
    140		.encrypted_page = NULL,
    141		.io_type = FS_DATA_IO,
    142	};
    143	struct node_info ni;
    144	int dirty, err;
    145
    146	if (!f2fs_exist_data(dn->inode))
    147		goto clear_out;
    148
    149	err = f2fs_reserve_block(dn, 0);
    150	if (err)
    151		return err;
    152
    153	err = f2fs_get_node_info(fio.sbi, dn->nid, &ni, false);
    154	if (err) {
    155		f2fs_truncate_data_blocks_range(dn, 1);
    156		f2fs_put_dnode(dn);
    157		return err;
    158	}
    159
    160	fio.version = ni.version;
    161
    162	if (unlikely(dn->data_blkaddr != NEW_ADDR)) {
    163		f2fs_put_dnode(dn);
    164		set_sbi_flag(fio.sbi, SBI_NEED_FSCK);
    165		f2fs_warn(fio.sbi, "%s: corrupted inline inode ino=%lx, i_addr[0]:0x%x, run fsck to fix.",
    166			  __func__, dn->inode->i_ino, dn->data_blkaddr);
    167		return -EFSCORRUPTED;
    168	}
    169
    170	f2fs_bug_on(F2FS_P_SB(page), PageWriteback(page));
    171
    172	f2fs_do_read_inline_data(page, dn->inode_page);
    173	set_page_dirty(page);
    174
    175	/* clear dirty state */
    176	dirty = clear_page_dirty_for_io(page);
    177
    178	/* write data page to try to make data consistent */
    179	set_page_writeback(page);
    180	ClearPageError(page);
    181	fio.old_blkaddr = dn->data_blkaddr;
    182	set_inode_flag(dn->inode, FI_HOT_DATA);
    183	f2fs_outplace_write_data(dn, &fio);
    184	f2fs_wait_on_page_writeback(page, DATA, true, true);
    185	if (dirty) {
    186		inode_dec_dirty_pages(dn->inode);
    187		f2fs_remove_dirty_inode(dn->inode);
    188	}
    189
    190	/* this converted inline_data should be recovered. */
    191	set_inode_flag(dn->inode, FI_APPEND_WRITE);
    192
    193	/* clear inline data and flag after data writeback */
    194	f2fs_truncate_inline_inode(dn->inode, dn->inode_page, 0);
    195	clear_page_private_inline(dn->inode_page);
    196clear_out:
    197	stat_dec_inline_inode(dn->inode);
    198	clear_inode_flag(dn->inode, FI_INLINE_DATA);
    199	f2fs_put_dnode(dn);
    200	return 0;
    201}
    202
    203int f2fs_convert_inline_inode(struct inode *inode)
    204{
    205	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
    206	struct dnode_of_data dn;
    207	struct page *ipage, *page;
    208	int err = 0;
    209
    210	if (!f2fs_has_inline_data(inode) ||
    211			f2fs_hw_is_readonly(sbi) || f2fs_readonly(sbi->sb))
    212		return 0;
    213
    214	err = f2fs_dquot_initialize(inode);
    215	if (err)
    216		return err;
    217
    218	page = f2fs_grab_cache_page(inode->i_mapping, 0, false);
    219	if (!page)
    220		return -ENOMEM;
    221
    222	f2fs_lock_op(sbi);
    223
    224	ipage = f2fs_get_node_page(sbi, inode->i_ino);
    225	if (IS_ERR(ipage)) {
    226		err = PTR_ERR(ipage);
    227		goto out;
    228	}
    229
    230	set_new_dnode(&dn, inode, ipage, ipage, 0);
    231
    232	if (f2fs_has_inline_data(inode))
    233		err = f2fs_convert_inline_page(&dn, page);
    234
    235	f2fs_put_dnode(&dn);
    236out:
    237	f2fs_unlock_op(sbi);
    238
    239	f2fs_put_page(page, 1);
    240
    241	if (!err)
    242		f2fs_balance_fs(sbi, dn.node_changed);
    243
    244	return err;
    245}
    246
    247int f2fs_write_inline_data(struct inode *inode, struct page *page)
    248{
    249	void *src_addr, *dst_addr;
    250	struct dnode_of_data dn;
    251	int err;
    252
    253	set_new_dnode(&dn, inode, NULL, NULL, 0);
    254	err = f2fs_get_dnode_of_data(&dn, 0, LOOKUP_NODE);
    255	if (err)
    256		return err;
    257
    258	if (!f2fs_has_inline_data(inode)) {
    259		f2fs_put_dnode(&dn);
    260		return -EAGAIN;
    261	}
    262
    263	f2fs_bug_on(F2FS_I_SB(inode), page->index);
    264
    265	f2fs_wait_on_page_writeback(dn.inode_page, NODE, true, true);
    266	src_addr = kmap_atomic(page);
    267	dst_addr = inline_data_addr(inode, dn.inode_page);
    268	memcpy(dst_addr, src_addr, MAX_INLINE_DATA(inode));
    269	kunmap_atomic(src_addr);
    270	set_page_dirty(dn.inode_page);
    271
    272	f2fs_clear_page_cache_dirty_tag(page);
    273
    274	set_inode_flag(inode, FI_APPEND_WRITE);
    275	set_inode_flag(inode, FI_DATA_EXIST);
    276
    277	clear_page_private_inline(dn.inode_page);
    278	f2fs_put_dnode(&dn);
    279	return 0;
    280}
    281
    282int f2fs_recover_inline_data(struct inode *inode, struct page *npage)
    283{
    284	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
    285	struct f2fs_inode *ri = NULL;
    286	void *src_addr, *dst_addr;
    287	struct page *ipage;
    288
    289	/*
    290	 * The inline_data recovery policy is as follows.
    291	 * [prev.] [next] of inline_data flag
    292	 *    o       o  -> recover inline_data
    293	 *    o       x  -> remove inline_data, and then recover data blocks
    294	 *    x       o  -> remove data blocks, and then recover inline_data
    295	 *    x       x  -> recover data blocks
    296	 */
    297	if (IS_INODE(npage))
    298		ri = F2FS_INODE(npage);
    299
    300	if (f2fs_has_inline_data(inode) &&
    301			ri && (ri->i_inline & F2FS_INLINE_DATA)) {
    302process_inline:
    303		ipage = f2fs_get_node_page(sbi, inode->i_ino);
    304		if (IS_ERR(ipage))
    305			return PTR_ERR(ipage);
    306
    307		f2fs_wait_on_page_writeback(ipage, NODE, true, true);
    308
    309		src_addr = inline_data_addr(inode, npage);
    310		dst_addr = inline_data_addr(inode, ipage);
    311		memcpy(dst_addr, src_addr, MAX_INLINE_DATA(inode));
    312
    313		set_inode_flag(inode, FI_INLINE_DATA);
    314		set_inode_flag(inode, FI_DATA_EXIST);
    315
    316		set_page_dirty(ipage);
    317		f2fs_put_page(ipage, 1);
    318		return 1;
    319	}
    320
    321	if (f2fs_has_inline_data(inode)) {
    322		ipage = f2fs_get_node_page(sbi, inode->i_ino);
    323		if (IS_ERR(ipage))
    324			return PTR_ERR(ipage);
    325		f2fs_truncate_inline_inode(inode, ipage, 0);
    326		stat_dec_inline_inode(inode);
    327		clear_inode_flag(inode, FI_INLINE_DATA);
    328		f2fs_put_page(ipage, 1);
    329	} else if (ri && (ri->i_inline & F2FS_INLINE_DATA)) {
    330		int ret;
    331
    332		ret = f2fs_truncate_blocks(inode, 0, false);
    333		if (ret)
    334			return ret;
    335		stat_inc_inline_inode(inode);
    336		goto process_inline;
    337	}
    338	return 0;
    339}
    340
    341struct f2fs_dir_entry *f2fs_find_in_inline_dir(struct inode *dir,
    342					const struct f2fs_filename *fname,
    343					struct page **res_page)
    344{
    345	struct f2fs_sb_info *sbi = F2FS_SB(dir->i_sb);
    346	struct f2fs_dir_entry *de;
    347	struct f2fs_dentry_ptr d;
    348	struct page *ipage;
    349	void *inline_dentry;
    350
    351	ipage = f2fs_get_node_page(sbi, dir->i_ino);
    352	if (IS_ERR(ipage)) {
    353		*res_page = ipage;
    354		return NULL;
    355	}
    356
    357	inline_dentry = inline_data_addr(dir, ipage);
    358
    359	make_dentry_ptr_inline(dir, &d, inline_dentry);
    360	de = f2fs_find_target_dentry(&d, fname, NULL);
    361	unlock_page(ipage);
    362	if (IS_ERR(de)) {
    363		*res_page = ERR_CAST(de);
    364		de = NULL;
    365	}
    366	if (de)
    367		*res_page = ipage;
    368	else
    369		f2fs_put_page(ipage, 0);
    370
    371	return de;
    372}
    373
    374int f2fs_make_empty_inline_dir(struct inode *inode, struct inode *parent,
    375							struct page *ipage)
    376{
    377	struct f2fs_dentry_ptr d;
    378	void *inline_dentry;
    379
    380	inline_dentry = inline_data_addr(inode, ipage);
    381
    382	make_dentry_ptr_inline(inode, &d, inline_dentry);
    383	f2fs_do_make_empty_dir(inode, parent, &d);
    384
    385	set_page_dirty(ipage);
    386
    387	/* update i_size to MAX_INLINE_DATA */
    388	if (i_size_read(inode) < MAX_INLINE_DATA(inode))
    389		f2fs_i_size_write(inode, MAX_INLINE_DATA(inode));
    390	return 0;
    391}
    392
    393/*
    394 * NOTE: ipage is grabbed by caller, but if any error occurs, we should
    395 * release ipage in this function.
    396 */
    397static int f2fs_move_inline_dirents(struct inode *dir, struct page *ipage,
    398							void *inline_dentry)
    399{
    400	struct page *page;
    401	struct dnode_of_data dn;
    402	struct f2fs_dentry_block *dentry_blk;
    403	struct f2fs_dentry_ptr src, dst;
    404	int err;
    405
    406	page = f2fs_grab_cache_page(dir->i_mapping, 0, true);
    407	if (!page) {
    408		f2fs_put_page(ipage, 1);
    409		return -ENOMEM;
    410	}
    411
    412	set_new_dnode(&dn, dir, ipage, NULL, 0);
    413	err = f2fs_reserve_block(&dn, 0);
    414	if (err)
    415		goto out;
    416
    417	if (unlikely(dn.data_blkaddr != NEW_ADDR)) {
    418		f2fs_put_dnode(&dn);
    419		set_sbi_flag(F2FS_P_SB(page), SBI_NEED_FSCK);
    420		f2fs_warn(F2FS_P_SB(page), "%s: corrupted inline inode ino=%lx, i_addr[0]:0x%x, run fsck to fix.",
    421			  __func__, dir->i_ino, dn.data_blkaddr);
    422		err = -EFSCORRUPTED;
    423		goto out;
    424	}
    425
    426	f2fs_wait_on_page_writeback(page, DATA, true, true);
    427
    428	dentry_blk = page_address(page);
    429
    430	make_dentry_ptr_inline(dir, &src, inline_dentry);
    431	make_dentry_ptr_block(dir, &dst, dentry_blk);
    432
    433	/* copy data from inline dentry block to new dentry block */
    434	memcpy(dst.bitmap, src.bitmap, src.nr_bitmap);
    435	memset(dst.bitmap + src.nr_bitmap, 0, dst.nr_bitmap - src.nr_bitmap);
    436	/*
    437	 * we do not need to zero out remainder part of dentry and filename
    438	 * field, since we have used bitmap for marking the usage status of
    439	 * them, besides, we can also ignore copying/zeroing reserved space
    440	 * of dentry block, because them haven't been used so far.
    441	 */
    442	memcpy(dst.dentry, src.dentry, SIZE_OF_DIR_ENTRY * src.max);
    443	memcpy(dst.filename, src.filename, src.max * F2FS_SLOT_LEN);
    444
    445	if (!PageUptodate(page))
    446		SetPageUptodate(page);
    447	set_page_dirty(page);
    448
    449	/* clear inline dir and flag after data writeback */
    450	f2fs_truncate_inline_inode(dir, ipage, 0);
    451
    452	stat_dec_inline_dir(dir);
    453	clear_inode_flag(dir, FI_INLINE_DENTRY);
    454
    455	/*
    456	 * should retrieve reserved space which was used to keep
    457	 * inline_dentry's structure for backward compatibility.
    458	 */
    459	if (!f2fs_sb_has_flexible_inline_xattr(F2FS_I_SB(dir)) &&
    460			!f2fs_has_inline_xattr(dir))
    461		F2FS_I(dir)->i_inline_xattr_size = 0;
    462
    463	f2fs_i_depth_write(dir, 1);
    464	if (i_size_read(dir) < PAGE_SIZE)
    465		f2fs_i_size_write(dir, PAGE_SIZE);
    466out:
    467	f2fs_put_page(page, 1);
    468	return err;
    469}
    470
    471static int f2fs_add_inline_entries(struct inode *dir, void *inline_dentry)
    472{
    473	struct f2fs_dentry_ptr d;
    474	unsigned long bit_pos = 0;
    475	int err = 0;
    476
    477	make_dentry_ptr_inline(dir, &d, inline_dentry);
    478
    479	while (bit_pos < d.max) {
    480		struct f2fs_dir_entry *de;
    481		struct f2fs_filename fname;
    482		nid_t ino;
    483		umode_t fake_mode;
    484
    485		if (!test_bit_le(bit_pos, d.bitmap)) {
    486			bit_pos++;
    487			continue;
    488		}
    489
    490		de = &d.dentry[bit_pos];
    491
    492		if (unlikely(!de->name_len)) {
    493			bit_pos++;
    494			continue;
    495		}
    496
    497		/*
    498		 * We only need the disk_name and hash to move the dentry.
    499		 * We don't need the original or casefolded filenames.
    500		 */
    501		memset(&fname, 0, sizeof(fname));
    502		fname.disk_name.name = d.filename[bit_pos];
    503		fname.disk_name.len = le16_to_cpu(de->name_len);
    504		fname.hash = de->hash_code;
    505
    506		ino = le32_to_cpu(de->ino);
    507		fake_mode = f2fs_get_de_type(de) << S_SHIFT;
    508
    509		err = f2fs_add_regular_entry(dir, &fname, NULL, ino, fake_mode);
    510		if (err)
    511			goto punch_dentry_pages;
    512
    513		bit_pos += GET_DENTRY_SLOTS(le16_to_cpu(de->name_len));
    514	}
    515	return 0;
    516punch_dentry_pages:
    517	truncate_inode_pages(&dir->i_data, 0);
    518	f2fs_truncate_blocks(dir, 0, false);
    519	f2fs_remove_dirty_inode(dir);
    520	return err;
    521}
    522
    523static int f2fs_move_rehashed_dirents(struct inode *dir, struct page *ipage,
    524							void *inline_dentry)
    525{
    526	void *backup_dentry;
    527	int err;
    528
    529	backup_dentry = f2fs_kmalloc(F2FS_I_SB(dir),
    530				MAX_INLINE_DATA(dir), GFP_F2FS_ZERO);
    531	if (!backup_dentry) {
    532		f2fs_put_page(ipage, 1);
    533		return -ENOMEM;
    534	}
    535
    536	memcpy(backup_dentry, inline_dentry, MAX_INLINE_DATA(dir));
    537	f2fs_truncate_inline_inode(dir, ipage, 0);
    538
    539	unlock_page(ipage);
    540
    541	err = f2fs_add_inline_entries(dir, backup_dentry);
    542	if (err)
    543		goto recover;
    544
    545	lock_page(ipage);
    546
    547	stat_dec_inline_dir(dir);
    548	clear_inode_flag(dir, FI_INLINE_DENTRY);
    549
    550	/*
    551	 * should retrieve reserved space which was used to keep
    552	 * inline_dentry's structure for backward compatibility.
    553	 */
    554	if (!f2fs_sb_has_flexible_inline_xattr(F2FS_I_SB(dir)) &&
    555			!f2fs_has_inline_xattr(dir))
    556		F2FS_I(dir)->i_inline_xattr_size = 0;
    557
    558	kfree(backup_dentry);
    559	return 0;
    560recover:
    561	lock_page(ipage);
    562	f2fs_wait_on_page_writeback(ipage, NODE, true, true);
    563	memcpy(inline_dentry, backup_dentry, MAX_INLINE_DATA(dir));
    564	f2fs_i_depth_write(dir, 0);
    565	f2fs_i_size_write(dir, MAX_INLINE_DATA(dir));
    566	set_page_dirty(ipage);
    567	f2fs_put_page(ipage, 1);
    568
    569	kfree(backup_dentry);
    570	return err;
    571}
    572
    573static int do_convert_inline_dir(struct inode *dir, struct page *ipage,
    574							void *inline_dentry)
    575{
    576	if (!F2FS_I(dir)->i_dir_level)
    577		return f2fs_move_inline_dirents(dir, ipage, inline_dentry);
    578	else
    579		return f2fs_move_rehashed_dirents(dir, ipage, inline_dentry);
    580}
    581
    582int f2fs_try_convert_inline_dir(struct inode *dir, struct dentry *dentry)
    583{
    584	struct f2fs_sb_info *sbi = F2FS_I_SB(dir);
    585	struct page *ipage;
    586	struct f2fs_filename fname;
    587	void *inline_dentry = NULL;
    588	int err = 0;
    589
    590	if (!f2fs_has_inline_dentry(dir))
    591		return 0;
    592
    593	f2fs_lock_op(sbi);
    594
    595	err = f2fs_setup_filename(dir, &dentry->d_name, 0, &fname);
    596	if (err)
    597		goto out;
    598
    599	ipage = f2fs_get_node_page(sbi, dir->i_ino);
    600	if (IS_ERR(ipage)) {
    601		err = PTR_ERR(ipage);
    602		goto out_fname;
    603	}
    604
    605	if (f2fs_has_enough_room(dir, ipage, &fname)) {
    606		f2fs_put_page(ipage, 1);
    607		goto out_fname;
    608	}
    609
    610	inline_dentry = inline_data_addr(dir, ipage);
    611
    612	err = do_convert_inline_dir(dir, ipage, inline_dentry);
    613	if (!err)
    614		f2fs_put_page(ipage, 1);
    615out_fname:
    616	f2fs_free_filename(&fname);
    617out:
    618	f2fs_unlock_op(sbi);
    619	return err;
    620}
    621
    622int f2fs_add_inline_entry(struct inode *dir, const struct f2fs_filename *fname,
    623			  struct inode *inode, nid_t ino, umode_t mode)
    624{
    625	struct f2fs_sb_info *sbi = F2FS_I_SB(dir);
    626	struct page *ipage;
    627	unsigned int bit_pos;
    628	void *inline_dentry = NULL;
    629	struct f2fs_dentry_ptr d;
    630	int slots = GET_DENTRY_SLOTS(fname->disk_name.len);
    631	struct page *page = NULL;
    632	int err = 0;
    633
    634	ipage = f2fs_get_node_page(sbi, dir->i_ino);
    635	if (IS_ERR(ipage))
    636		return PTR_ERR(ipage);
    637
    638	inline_dentry = inline_data_addr(dir, ipage);
    639	make_dentry_ptr_inline(dir, &d, inline_dentry);
    640
    641	bit_pos = f2fs_room_for_filename(d.bitmap, slots, d.max);
    642	if (bit_pos >= d.max) {
    643		err = do_convert_inline_dir(dir, ipage, inline_dentry);
    644		if (err)
    645			return err;
    646		err = -EAGAIN;
    647		goto out;
    648	}
    649
    650	if (inode) {
    651		f2fs_down_write(&F2FS_I(inode)->i_sem);
    652		page = f2fs_init_inode_metadata(inode, dir, fname, ipage);
    653		if (IS_ERR(page)) {
    654			err = PTR_ERR(page);
    655			goto fail;
    656		}
    657	}
    658
    659	f2fs_wait_on_page_writeback(ipage, NODE, true, true);
    660
    661	f2fs_update_dentry(ino, mode, &d, &fname->disk_name, fname->hash,
    662			   bit_pos);
    663
    664	set_page_dirty(ipage);
    665
    666	/* we don't need to mark_inode_dirty now */
    667	if (inode) {
    668		f2fs_i_pino_write(inode, dir->i_ino);
    669
    670		/* synchronize inode page's data from inode cache */
    671		if (is_inode_flag_set(inode, FI_NEW_INODE))
    672			f2fs_update_inode(inode, page);
    673
    674		f2fs_put_page(page, 1);
    675	}
    676
    677	f2fs_update_parent_metadata(dir, inode, 0);
    678fail:
    679	if (inode)
    680		f2fs_up_write(&F2FS_I(inode)->i_sem);
    681out:
    682	f2fs_put_page(ipage, 1);
    683	return err;
    684}
    685
    686void f2fs_delete_inline_entry(struct f2fs_dir_entry *dentry, struct page *page,
    687					struct inode *dir, struct inode *inode)
    688{
    689	struct f2fs_dentry_ptr d;
    690	void *inline_dentry;
    691	int slots = GET_DENTRY_SLOTS(le16_to_cpu(dentry->name_len));
    692	unsigned int bit_pos;
    693	int i;
    694
    695	lock_page(page);
    696	f2fs_wait_on_page_writeback(page, NODE, true, true);
    697
    698	inline_dentry = inline_data_addr(dir, page);
    699	make_dentry_ptr_inline(dir, &d, inline_dentry);
    700
    701	bit_pos = dentry - d.dentry;
    702	for (i = 0; i < slots; i++)
    703		__clear_bit_le(bit_pos + i, d.bitmap);
    704
    705	set_page_dirty(page);
    706	f2fs_put_page(page, 1);
    707
    708	dir->i_ctime = dir->i_mtime = current_time(dir);
    709	f2fs_mark_inode_dirty_sync(dir, false);
    710
    711	if (inode)
    712		f2fs_drop_nlink(dir, inode);
    713}
    714
    715bool f2fs_empty_inline_dir(struct inode *dir)
    716{
    717	struct f2fs_sb_info *sbi = F2FS_I_SB(dir);
    718	struct page *ipage;
    719	unsigned int bit_pos = 2;
    720	void *inline_dentry;
    721	struct f2fs_dentry_ptr d;
    722
    723	ipage = f2fs_get_node_page(sbi, dir->i_ino);
    724	if (IS_ERR(ipage))
    725		return false;
    726
    727	inline_dentry = inline_data_addr(dir, ipage);
    728	make_dentry_ptr_inline(dir, &d, inline_dentry);
    729
    730	bit_pos = find_next_bit_le(d.bitmap, d.max, bit_pos);
    731
    732	f2fs_put_page(ipage, 1);
    733
    734	if (bit_pos < d.max)
    735		return false;
    736
    737	return true;
    738}
    739
    740int f2fs_read_inline_dir(struct file *file, struct dir_context *ctx,
    741				struct fscrypt_str *fstr)
    742{
    743	struct inode *inode = file_inode(file);
    744	struct page *ipage = NULL;
    745	struct f2fs_dentry_ptr d;
    746	void *inline_dentry = NULL;
    747	int err;
    748
    749	make_dentry_ptr_inline(inode, &d, inline_dentry);
    750
    751	if (ctx->pos == d.max)
    752		return 0;
    753
    754	ipage = f2fs_get_node_page(F2FS_I_SB(inode), inode->i_ino);
    755	if (IS_ERR(ipage))
    756		return PTR_ERR(ipage);
    757
    758	/*
    759	 * f2fs_readdir was protected by inode.i_rwsem, it is safe to access
    760	 * ipage without page's lock held.
    761	 */
    762	unlock_page(ipage);
    763
    764	inline_dentry = inline_data_addr(inode, ipage);
    765
    766	make_dentry_ptr_inline(inode, &d, inline_dentry);
    767
    768	err = f2fs_fill_dentries(ctx, &d, 0, fstr);
    769	if (!err)
    770		ctx->pos = d.max;
    771
    772	f2fs_put_page(ipage, 0);
    773	return err < 0 ? err : 0;
    774}
    775
    776int f2fs_inline_data_fiemap(struct inode *inode,
    777		struct fiemap_extent_info *fieinfo, __u64 start, __u64 len)
    778{
    779	__u64 byteaddr, ilen;
    780	__u32 flags = FIEMAP_EXTENT_DATA_INLINE | FIEMAP_EXTENT_NOT_ALIGNED |
    781		FIEMAP_EXTENT_LAST;
    782	struct node_info ni;
    783	struct page *ipage;
    784	int err = 0;
    785
    786	ipage = f2fs_get_node_page(F2FS_I_SB(inode), inode->i_ino);
    787	if (IS_ERR(ipage))
    788		return PTR_ERR(ipage);
    789
    790	if ((S_ISREG(inode->i_mode) || S_ISLNK(inode->i_mode)) &&
    791				!f2fs_has_inline_data(inode)) {
    792		err = -EAGAIN;
    793		goto out;
    794	}
    795
    796	if (S_ISDIR(inode->i_mode) && !f2fs_has_inline_dentry(inode)) {
    797		err = -EAGAIN;
    798		goto out;
    799	}
    800
    801	ilen = min_t(size_t, MAX_INLINE_DATA(inode), i_size_read(inode));
    802	if (start >= ilen)
    803		goto out;
    804	if (start + len < ilen)
    805		ilen = start + len;
    806	ilen -= start;
    807
    808	err = f2fs_get_node_info(F2FS_I_SB(inode), inode->i_ino, &ni, false);
    809	if (err)
    810		goto out;
    811
    812	byteaddr = (__u64)ni.blk_addr << inode->i_sb->s_blocksize_bits;
    813	byteaddr += (char *)inline_data_addr(inode, ipage) -
    814					(char *)F2FS_INODE(ipage);
    815	err = fiemap_fill_next_extent(fieinfo, start, byteaddr, ilen, flags);
    816	trace_f2fs_fiemap(inode, start, byteaddr, ilen, flags, err);
    817out:
    818	f2fs_put_page(ipage, 1);
    819	return err;
    820}