cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

io.c (9063B)


      1// SPDX-License-Identifier: GPL-2.0-or-later
      2/* Cache data I/O routines
      3 *
      4 * Copyright (C) 2021 Red Hat, Inc. All Rights Reserved.
      5 * Written by David Howells (dhowells@redhat.com)
      6 */
      7#define FSCACHE_DEBUG_LEVEL OPERATION
      8#include <linux/fscache-cache.h>
      9#include <linux/uio.h>
     10#include <linux/bvec.h>
     11#include <linux/slab.h>
     12#include <linux/uio.h>
     13#include "internal.h"
     14
     15/**
     16 * fscache_wait_for_operation - Wait for an object become accessible
     17 * @cres: The cache resources for the operation being performed
     18 * @want_state: The minimum state the object must be at
     19 *
     20 * See if the target cache object is at the specified minimum state of
     21 * accessibility yet, and if not, wait for it.
     22 */
     23bool fscache_wait_for_operation(struct netfs_cache_resources *cres,
     24				enum fscache_want_state want_state)
     25{
     26	struct fscache_cookie *cookie = fscache_cres_cookie(cres);
     27	enum fscache_cookie_state state;
     28
     29again:
     30	if (!fscache_cache_is_live(cookie->volume->cache)) {
     31		_leave(" [broken]");
     32		return false;
     33	}
     34
     35	state = fscache_cookie_state(cookie);
     36	_enter("c=%08x{%u},%x", cookie->debug_id, state, want_state);
     37
     38	switch (state) {
     39	case FSCACHE_COOKIE_STATE_CREATING:
     40	case FSCACHE_COOKIE_STATE_INVALIDATING:
     41		if (want_state == FSCACHE_WANT_PARAMS)
     42			goto ready; /* There can be no content */
     43		fallthrough;
     44	case FSCACHE_COOKIE_STATE_LOOKING_UP:
     45	case FSCACHE_COOKIE_STATE_LRU_DISCARDING:
     46		wait_var_event(&cookie->state,
     47			       fscache_cookie_state(cookie) != state);
     48		goto again;
     49
     50	case FSCACHE_COOKIE_STATE_ACTIVE:
     51		goto ready;
     52	case FSCACHE_COOKIE_STATE_DROPPED:
     53	case FSCACHE_COOKIE_STATE_RELINQUISHING:
     54	default:
     55		_leave(" [not live]");
     56		return false;
     57	}
     58
     59ready:
     60	if (!cres->cache_priv2)
     61		return cookie->volume->cache->ops->begin_operation(cres, want_state);
     62	return true;
     63}
     64EXPORT_SYMBOL(fscache_wait_for_operation);
     65
     66/*
     67 * Begin an I/O operation on the cache, waiting till we reach the right state.
     68 *
     69 * Attaches the resources required to the operation resources record.
     70 */
     71static int fscache_begin_operation(struct netfs_cache_resources *cres,
     72				   struct fscache_cookie *cookie,
     73				   enum fscache_want_state want_state,
     74				   enum fscache_access_trace why)
     75{
     76	enum fscache_cookie_state state;
     77	long timeo;
     78	bool once_only = false;
     79
     80	cres->ops		= NULL;
     81	cres->cache_priv	= cookie;
     82	cres->cache_priv2	= NULL;
     83	cres->debug_id		= cookie->debug_id;
     84	cres->inval_counter	= cookie->inval_counter;
     85
     86	if (!fscache_begin_cookie_access(cookie, why))
     87		return -ENOBUFS;
     88
     89again:
     90	spin_lock(&cookie->lock);
     91
     92	state = fscache_cookie_state(cookie);
     93	_enter("c=%08x{%u},%x", cookie->debug_id, state, want_state);
     94
     95	switch (state) {
     96	case FSCACHE_COOKIE_STATE_LOOKING_UP:
     97	case FSCACHE_COOKIE_STATE_LRU_DISCARDING:
     98	case FSCACHE_COOKIE_STATE_INVALIDATING:
     99		goto wait_for_file_wrangling;
    100	case FSCACHE_COOKIE_STATE_CREATING:
    101		if (want_state == FSCACHE_WANT_PARAMS)
    102			goto ready; /* There can be no content */
    103		goto wait_for_file_wrangling;
    104	case FSCACHE_COOKIE_STATE_ACTIVE:
    105		goto ready;
    106	case FSCACHE_COOKIE_STATE_DROPPED:
    107	case FSCACHE_COOKIE_STATE_RELINQUISHING:
    108		WARN(1, "Can't use cookie in state %u\n", cookie->state);
    109		goto not_live;
    110	default:
    111		goto not_live;
    112	}
    113
    114ready:
    115	spin_unlock(&cookie->lock);
    116	if (!cookie->volume->cache->ops->begin_operation(cres, want_state))
    117		goto failed;
    118	return 0;
    119
    120wait_for_file_wrangling:
    121	spin_unlock(&cookie->lock);
    122	trace_fscache_access(cookie->debug_id, refcount_read(&cookie->ref),
    123			     atomic_read(&cookie->n_accesses),
    124			     fscache_access_io_wait);
    125	timeo = wait_var_event_timeout(&cookie->state,
    126				       fscache_cookie_state(cookie) != state, 20 * HZ);
    127	if (timeo <= 1 && !once_only) {
    128		pr_warn("%s: cookie state change wait timed out: cookie->state=%u state=%u",
    129			__func__, fscache_cookie_state(cookie), state);
    130		fscache_print_cookie(cookie, 'O');
    131		once_only = true;
    132	}
    133	goto again;
    134
    135not_live:
    136	spin_unlock(&cookie->lock);
    137failed:
    138	cres->cache_priv = NULL;
    139	cres->ops = NULL;
    140	fscache_end_cookie_access(cookie, fscache_access_io_not_live);
    141	_leave(" = -ENOBUFS");
    142	return -ENOBUFS;
    143}
    144
    145int __fscache_begin_read_operation(struct netfs_cache_resources *cres,
    146				   struct fscache_cookie *cookie)
    147{
    148	return fscache_begin_operation(cres, cookie, FSCACHE_WANT_PARAMS,
    149				       fscache_access_io_read);
    150}
    151EXPORT_SYMBOL(__fscache_begin_read_operation);
    152
    153int __fscache_begin_write_operation(struct netfs_cache_resources *cres,
    154				    struct fscache_cookie *cookie)
    155{
    156	return fscache_begin_operation(cres, cookie, FSCACHE_WANT_PARAMS,
    157				       fscache_access_io_write);
    158}
    159EXPORT_SYMBOL(__fscache_begin_write_operation);
    160
    161/**
    162 * fscache_dirty_folio - Mark folio dirty and pin a cache object for writeback
    163 * @mapping: The mapping the folio belongs to.
    164 * @folio: The folio being dirtied.
    165 * @cookie: The cookie referring to the cache object
    166 *
    167 * Set the dirty flag on a folio and pin an in-use cache object in memory
    168 * so that writeback can later write to it.  This is intended
    169 * to be called from the filesystem's ->dirty_folio() method.
    170 *
    171 * Return: true if the dirty flag was set on the folio, false otherwise.
    172 */
    173bool fscache_dirty_folio(struct address_space *mapping, struct folio *folio,
    174				struct fscache_cookie *cookie)
    175{
    176	struct inode *inode = mapping->host;
    177	bool need_use = false;
    178
    179	_enter("");
    180
    181	if (!filemap_dirty_folio(mapping, folio))
    182		return false;
    183	if (!fscache_cookie_valid(cookie))
    184		return true;
    185
    186	if (!(inode->i_state & I_PINNING_FSCACHE_WB)) {
    187		spin_lock(&inode->i_lock);
    188		if (!(inode->i_state & I_PINNING_FSCACHE_WB)) {
    189			inode->i_state |= I_PINNING_FSCACHE_WB;
    190			need_use = true;
    191		}
    192		spin_unlock(&inode->i_lock);
    193
    194		if (need_use)
    195			fscache_use_cookie(cookie, true);
    196	}
    197	return true;
    198}
    199EXPORT_SYMBOL(fscache_dirty_folio);
    200
    201struct fscache_write_request {
    202	struct netfs_cache_resources cache_resources;
    203	struct address_space	*mapping;
    204	loff_t			start;
    205	size_t			len;
    206	bool			set_bits;
    207	netfs_io_terminated_t	term_func;
    208	void			*term_func_priv;
    209};
    210
    211void __fscache_clear_page_bits(struct address_space *mapping,
    212			       loff_t start, size_t len)
    213{
    214	pgoff_t first = start / PAGE_SIZE;
    215	pgoff_t last = (start + len - 1) / PAGE_SIZE;
    216	struct page *page;
    217
    218	if (len) {
    219		XA_STATE(xas, &mapping->i_pages, first);
    220
    221		rcu_read_lock();
    222		xas_for_each(&xas, page, last) {
    223			end_page_fscache(page);
    224		}
    225		rcu_read_unlock();
    226	}
    227}
    228EXPORT_SYMBOL(__fscache_clear_page_bits);
    229
    230/*
    231 * Deal with the completion of writing the data to the cache.
    232 */
    233static void fscache_wreq_done(void *priv, ssize_t transferred_or_error,
    234			      bool was_async)
    235{
    236	struct fscache_write_request *wreq = priv;
    237
    238	fscache_clear_page_bits(wreq->mapping, wreq->start, wreq->len,
    239				wreq->set_bits);
    240
    241	if (wreq->term_func)
    242		wreq->term_func(wreq->term_func_priv, transferred_or_error,
    243				was_async);
    244	fscache_end_operation(&wreq->cache_resources);
    245	kfree(wreq);
    246}
    247
    248void __fscache_write_to_cache(struct fscache_cookie *cookie,
    249			      struct address_space *mapping,
    250			      loff_t start, size_t len, loff_t i_size,
    251			      netfs_io_terminated_t term_func,
    252			      void *term_func_priv,
    253			      bool cond)
    254{
    255	struct fscache_write_request *wreq;
    256	struct netfs_cache_resources *cres;
    257	struct iov_iter iter;
    258	int ret = -ENOBUFS;
    259
    260	if (len == 0)
    261		goto abandon;
    262
    263	_enter("%llx,%zx", start, len);
    264
    265	wreq = kzalloc(sizeof(struct fscache_write_request), GFP_NOFS);
    266	if (!wreq)
    267		goto abandon;
    268	wreq->mapping		= mapping;
    269	wreq->start		= start;
    270	wreq->len		= len;
    271	wreq->set_bits		= cond;
    272	wreq->term_func		= term_func;
    273	wreq->term_func_priv	= term_func_priv;
    274
    275	cres = &wreq->cache_resources;
    276	if (fscache_begin_operation(cres, cookie, FSCACHE_WANT_WRITE,
    277				    fscache_access_io_write) < 0)
    278		goto abandon_free;
    279
    280	ret = cres->ops->prepare_write(cres, &start, &len, i_size, false);
    281	if (ret < 0)
    282		goto abandon_end;
    283
    284	/* TODO: Consider clearing page bits now for space the write isn't
    285	 * covering.  This is more complicated than it appears when THPs are
    286	 * taken into account.
    287	 */
    288
    289	iov_iter_xarray(&iter, WRITE, &mapping->i_pages, start, len);
    290	fscache_write(cres, start, &iter, fscache_wreq_done, wreq);
    291	return;
    292
    293abandon_end:
    294	return fscache_wreq_done(wreq, ret, false);
    295abandon_free:
    296	kfree(wreq);
    297abandon:
    298	fscache_clear_page_bits(mapping, start, len, cond);
    299	if (term_func)
    300		term_func(term_func_priv, ret, false);
    301}
    302EXPORT_SYMBOL(__fscache_write_to_cache);
    303
    304/*
    305 * Change the size of a backing object.
    306 */
    307void __fscache_resize_cookie(struct fscache_cookie *cookie, loff_t new_size)
    308{
    309	struct netfs_cache_resources cres;
    310
    311	trace_fscache_resize(cookie, new_size);
    312	if (fscache_begin_operation(&cres, cookie, FSCACHE_WANT_WRITE,
    313				    fscache_access_io_resize) == 0) {
    314		fscache_stat(&fscache_n_resizes);
    315		set_bit(FSCACHE_COOKIE_NEEDS_UPDATE, &cookie->flags);
    316
    317		/* We cannot defer a resize as we need to do it inside the
    318		 * netfs's inode lock so that we're serialised with respect to
    319		 * writes.
    320		 */
    321		cookie->volume->cache->ops->resize_cookie(&cres, new_size);
    322		fscache_end_operation(&cres);
    323	} else {
    324		fscache_stat(&fscache_n_resizes_null);
    325	}
    326}
    327EXPORT_SYMBOL(__fscache_resize_cookie);