cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

blk-flush.c (15840B)


      1// SPDX-License-Identifier: GPL-2.0
      2/*
      3 * Functions to sequence PREFLUSH and FUA writes.
      4 *
      5 * Copyright (C) 2011		Max Planck Institute for Gravitational Physics
      6 * Copyright (C) 2011		Tejun Heo <tj@kernel.org>
      7 *
      8 * REQ_{PREFLUSH|FUA} requests are decomposed to sequences consisted of three
      9 * optional steps - PREFLUSH, DATA and POSTFLUSH - according to the request
     10 * properties and hardware capability.
     11 *
     12 * If a request doesn't have data, only REQ_PREFLUSH makes sense, which
     13 * indicates a simple flush request.  If there is data, REQ_PREFLUSH indicates
     14 * that the device cache should be flushed before the data is executed, and
     15 * REQ_FUA means that the data must be on non-volatile media on request
     16 * completion.
     17 *
     18 * If the device doesn't have writeback cache, PREFLUSH and FUA don't make any
     19 * difference.  The requests are either completed immediately if there's no data
     20 * or executed as normal requests otherwise.
     21 *
     22 * If the device has writeback cache and supports FUA, REQ_PREFLUSH is
     23 * translated to PREFLUSH but REQ_FUA is passed down directly with DATA.
     24 *
     25 * If the device has writeback cache and doesn't support FUA, REQ_PREFLUSH
     26 * is translated to PREFLUSH and REQ_FUA to POSTFLUSH.
     27 *
     28 * The actual execution of flush is double buffered.  Whenever a request
     29 * needs to execute PRE or POSTFLUSH, it queues at
     30 * fq->flush_queue[fq->flush_pending_idx].  Once certain criteria are met, a
     31 * REQ_OP_FLUSH is issued and the pending_idx is toggled.  When the flush
     32 * completes, all the requests which were pending are proceeded to the next
     33 * step.  This allows arbitrary merging of different types of PREFLUSH/FUA
     34 * requests.
     35 *
     36 * Currently, the following conditions are used to determine when to issue
     37 * flush.
     38 *
     39 * C1. At any given time, only one flush shall be in progress.  This makes
     40 *     double buffering sufficient.
     41 *
     42 * C2. Flush is deferred if any request is executing DATA of its sequence.
     43 *     This avoids issuing separate POSTFLUSHes for requests which shared
     44 *     PREFLUSH.
     45 *
     46 * C3. The second condition is ignored if there is a request which has
     47 *     waited longer than FLUSH_PENDING_TIMEOUT.  This is to avoid
     48 *     starvation in the unlikely case where there are continuous stream of
     49 *     FUA (without PREFLUSH) requests.
     50 *
     51 * For devices which support FUA, it isn't clear whether C2 (and thus C3)
     52 * is beneficial.
     53 *
     54 * Note that a sequenced PREFLUSH/FUA request with DATA is completed twice.
     55 * Once while executing DATA and again after the whole sequence is
     56 * complete.  The first completion updates the contained bio but doesn't
     57 * finish it so that the bio submitter is notified only after the whole
     58 * sequence is complete.  This is implemented by testing RQF_FLUSH_SEQ in
     59 * req_bio_endio().
     60 *
     61 * The above peculiarity requires that each PREFLUSH/FUA request has only one
     62 * bio attached to it, which is guaranteed as they aren't allowed to be
     63 * merged in the usual way.
     64 */
     65
     66#include <linux/kernel.h>
     67#include <linux/module.h>
     68#include <linux/bio.h>
     69#include <linux/blkdev.h>
     70#include <linux/gfp.h>
     71#include <linux/blk-mq.h>
     72#include <linux/part_stat.h>
     73
     74#include "blk.h"
     75#include "blk-mq.h"
     76#include "blk-mq-tag.h"
     77#include "blk-mq-sched.h"
     78
     79/* PREFLUSH/FUA sequences */
     80enum {
     81	REQ_FSEQ_PREFLUSH	= (1 << 0), /* pre-flushing in progress */
     82	REQ_FSEQ_DATA		= (1 << 1), /* data write in progress */
     83	REQ_FSEQ_POSTFLUSH	= (1 << 2), /* post-flushing in progress */
     84	REQ_FSEQ_DONE		= (1 << 3),
     85
     86	REQ_FSEQ_ACTIONS	= REQ_FSEQ_PREFLUSH | REQ_FSEQ_DATA |
     87				  REQ_FSEQ_POSTFLUSH,
     88
     89	/*
     90	 * If flush has been pending longer than the following timeout,
     91	 * it's issued even if flush_data requests are still in flight.
     92	 */
     93	FLUSH_PENDING_TIMEOUT	= 5 * HZ,
     94};
     95
     96static void blk_kick_flush(struct request_queue *q,
     97			   struct blk_flush_queue *fq, unsigned int flags);
     98
     99static inline struct blk_flush_queue *
    100blk_get_flush_queue(struct request_queue *q, struct blk_mq_ctx *ctx)
    101{
    102	return blk_mq_map_queue(q, REQ_OP_FLUSH, ctx)->fq;
    103}
    104
    105static unsigned int blk_flush_policy(unsigned long fflags, struct request *rq)
    106{
    107	unsigned int policy = 0;
    108
    109	if (blk_rq_sectors(rq))
    110		policy |= REQ_FSEQ_DATA;
    111
    112	if (fflags & (1UL << QUEUE_FLAG_WC)) {
    113		if (rq->cmd_flags & REQ_PREFLUSH)
    114			policy |= REQ_FSEQ_PREFLUSH;
    115		if (!(fflags & (1UL << QUEUE_FLAG_FUA)) &&
    116		    (rq->cmd_flags & REQ_FUA))
    117			policy |= REQ_FSEQ_POSTFLUSH;
    118	}
    119	return policy;
    120}
    121
    122static unsigned int blk_flush_cur_seq(struct request *rq)
    123{
    124	return 1 << ffz(rq->flush.seq);
    125}
    126
    127static void blk_flush_restore_request(struct request *rq)
    128{
    129	/*
    130	 * After flush data completion, @rq->bio is %NULL but we need to
    131	 * complete the bio again.  @rq->biotail is guaranteed to equal the
    132	 * original @rq->bio.  Restore it.
    133	 */
    134	rq->bio = rq->biotail;
    135
    136	/* make @rq a normal request */
    137	rq->rq_flags &= ~RQF_FLUSH_SEQ;
    138	rq->end_io = rq->flush.saved_end_io;
    139}
    140
    141static void blk_flush_queue_rq(struct request *rq, bool add_front)
    142{
    143	blk_mq_add_to_requeue_list(rq, add_front, true);
    144}
    145
    146static void blk_account_io_flush(struct request *rq)
    147{
    148	struct block_device *part = rq->q->disk->part0;
    149
    150	part_stat_lock();
    151	part_stat_inc(part, ios[STAT_FLUSH]);
    152	part_stat_add(part, nsecs[STAT_FLUSH],
    153		      ktime_get_ns() - rq->start_time_ns);
    154	part_stat_unlock();
    155}
    156
    157/**
    158 * blk_flush_complete_seq - complete flush sequence
    159 * @rq: PREFLUSH/FUA request being sequenced
    160 * @fq: flush queue
    161 * @seq: sequences to complete (mask of %REQ_FSEQ_*, can be zero)
    162 * @error: whether an error occurred
    163 *
    164 * @rq just completed @seq part of its flush sequence, record the
    165 * completion and trigger the next step.
    166 *
    167 * CONTEXT:
    168 * spin_lock_irq(fq->mq_flush_lock)
    169 */
    170static void blk_flush_complete_seq(struct request *rq,
    171				   struct blk_flush_queue *fq,
    172				   unsigned int seq, blk_status_t error)
    173{
    174	struct request_queue *q = rq->q;
    175	struct list_head *pending = &fq->flush_queue[fq->flush_pending_idx];
    176	unsigned int cmd_flags;
    177
    178	BUG_ON(rq->flush.seq & seq);
    179	rq->flush.seq |= seq;
    180	cmd_flags = rq->cmd_flags;
    181
    182	if (likely(!error))
    183		seq = blk_flush_cur_seq(rq);
    184	else
    185		seq = REQ_FSEQ_DONE;
    186
    187	switch (seq) {
    188	case REQ_FSEQ_PREFLUSH:
    189	case REQ_FSEQ_POSTFLUSH:
    190		/* queue for flush */
    191		if (list_empty(pending))
    192			fq->flush_pending_since = jiffies;
    193		list_move_tail(&rq->flush.list, pending);
    194		break;
    195
    196	case REQ_FSEQ_DATA:
    197		list_move_tail(&rq->flush.list, &fq->flush_data_in_flight);
    198		blk_flush_queue_rq(rq, true);
    199		break;
    200
    201	case REQ_FSEQ_DONE:
    202		/*
    203		 * @rq was previously adjusted by blk_insert_flush() for
    204		 * flush sequencing and may already have gone through the
    205		 * flush data request completion path.  Restore @rq for
    206		 * normal completion and end it.
    207		 */
    208		BUG_ON(!list_empty(&rq->queuelist));
    209		list_del_init(&rq->flush.list);
    210		blk_flush_restore_request(rq);
    211		blk_mq_end_request(rq, error);
    212		break;
    213
    214	default:
    215		BUG();
    216	}
    217
    218	blk_kick_flush(q, fq, cmd_flags);
    219}
    220
    221static void flush_end_io(struct request *flush_rq, blk_status_t error)
    222{
    223	struct request_queue *q = flush_rq->q;
    224	struct list_head *running;
    225	struct request *rq, *n;
    226	unsigned long flags = 0;
    227	struct blk_flush_queue *fq = blk_get_flush_queue(q, flush_rq->mq_ctx);
    228
    229	/* release the tag's ownership to the req cloned from */
    230	spin_lock_irqsave(&fq->mq_flush_lock, flags);
    231
    232	if (!req_ref_put_and_test(flush_rq)) {
    233		fq->rq_status = error;
    234		spin_unlock_irqrestore(&fq->mq_flush_lock, flags);
    235		return;
    236	}
    237
    238	blk_account_io_flush(flush_rq);
    239	/*
    240	 * Flush request has to be marked as IDLE when it is really ended
    241	 * because its .end_io() is called from timeout code path too for
    242	 * avoiding use-after-free.
    243	 */
    244	WRITE_ONCE(flush_rq->state, MQ_RQ_IDLE);
    245	if (fq->rq_status != BLK_STS_OK) {
    246		error = fq->rq_status;
    247		fq->rq_status = BLK_STS_OK;
    248	}
    249
    250	if (!q->elevator) {
    251		flush_rq->tag = BLK_MQ_NO_TAG;
    252	} else {
    253		blk_mq_put_driver_tag(flush_rq);
    254		flush_rq->internal_tag = BLK_MQ_NO_TAG;
    255	}
    256
    257	running = &fq->flush_queue[fq->flush_running_idx];
    258	BUG_ON(fq->flush_pending_idx == fq->flush_running_idx);
    259
    260	/* account completion of the flush request */
    261	fq->flush_running_idx ^= 1;
    262
    263	/* and push the waiting requests to the next stage */
    264	list_for_each_entry_safe(rq, n, running, flush.list) {
    265		unsigned int seq = blk_flush_cur_seq(rq);
    266
    267		BUG_ON(seq != REQ_FSEQ_PREFLUSH && seq != REQ_FSEQ_POSTFLUSH);
    268		blk_flush_complete_seq(rq, fq, seq, error);
    269	}
    270
    271	spin_unlock_irqrestore(&fq->mq_flush_lock, flags);
    272}
    273
    274bool is_flush_rq(struct request *rq)
    275{
    276	return rq->end_io == flush_end_io;
    277}
    278
    279/**
    280 * blk_kick_flush - consider issuing flush request
    281 * @q: request_queue being kicked
    282 * @fq: flush queue
    283 * @flags: cmd_flags of the original request
    284 *
    285 * Flush related states of @q have changed, consider issuing flush request.
    286 * Please read the comment at the top of this file for more info.
    287 *
    288 * CONTEXT:
    289 * spin_lock_irq(fq->mq_flush_lock)
    290 *
    291 */
    292static void blk_kick_flush(struct request_queue *q, struct blk_flush_queue *fq,
    293			   unsigned int flags)
    294{
    295	struct list_head *pending = &fq->flush_queue[fq->flush_pending_idx];
    296	struct request *first_rq =
    297		list_first_entry(pending, struct request, flush.list);
    298	struct request *flush_rq = fq->flush_rq;
    299
    300	/* C1 described at the top of this file */
    301	if (fq->flush_pending_idx != fq->flush_running_idx || list_empty(pending))
    302		return;
    303
    304	/* C2 and C3 */
    305	if (!list_empty(&fq->flush_data_in_flight) &&
    306	    time_before(jiffies,
    307			fq->flush_pending_since + FLUSH_PENDING_TIMEOUT))
    308		return;
    309
    310	/*
    311	 * Issue flush and toggle pending_idx.  This makes pending_idx
    312	 * different from running_idx, which means flush is in flight.
    313	 */
    314	fq->flush_pending_idx ^= 1;
    315
    316	blk_rq_init(q, flush_rq);
    317
    318	/*
    319	 * In case of none scheduler, borrow tag from the first request
    320	 * since they can't be in flight at the same time. And acquire
    321	 * the tag's ownership for flush req.
    322	 *
    323	 * In case of IO scheduler, flush rq need to borrow scheduler tag
    324	 * just for cheating put/get driver tag.
    325	 */
    326	flush_rq->mq_ctx = first_rq->mq_ctx;
    327	flush_rq->mq_hctx = first_rq->mq_hctx;
    328
    329	if (!q->elevator) {
    330		flush_rq->tag = first_rq->tag;
    331
    332		/*
    333		 * We borrow data request's driver tag, so have to mark
    334		 * this flush request as INFLIGHT for avoiding double
    335		 * account of this driver tag
    336		 */
    337		flush_rq->rq_flags |= RQF_MQ_INFLIGHT;
    338	} else
    339		flush_rq->internal_tag = first_rq->internal_tag;
    340
    341	flush_rq->cmd_flags = REQ_OP_FLUSH | REQ_PREFLUSH;
    342	flush_rq->cmd_flags |= (flags & REQ_DRV) | (flags & REQ_FAILFAST_MASK);
    343	flush_rq->rq_flags |= RQF_FLUSH_SEQ;
    344	flush_rq->end_io = flush_end_io;
    345	/*
    346	 * Order WRITE ->end_io and WRITE rq->ref, and its pair is the one
    347	 * implied in refcount_inc_not_zero() called from
    348	 * blk_mq_find_and_get_req(), which orders WRITE/READ flush_rq->ref
    349	 * and READ flush_rq->end_io
    350	 */
    351	smp_wmb();
    352	req_ref_set(flush_rq, 1);
    353
    354	blk_flush_queue_rq(flush_rq, false);
    355}
    356
    357static void mq_flush_data_end_io(struct request *rq, blk_status_t error)
    358{
    359	struct request_queue *q = rq->q;
    360	struct blk_mq_hw_ctx *hctx = rq->mq_hctx;
    361	struct blk_mq_ctx *ctx = rq->mq_ctx;
    362	unsigned long flags;
    363	struct blk_flush_queue *fq = blk_get_flush_queue(q, ctx);
    364
    365	if (q->elevator) {
    366		WARN_ON(rq->tag < 0);
    367		blk_mq_put_driver_tag(rq);
    368	}
    369
    370	/*
    371	 * After populating an empty queue, kick it to avoid stall.  Read
    372	 * the comment in flush_end_io().
    373	 */
    374	spin_lock_irqsave(&fq->mq_flush_lock, flags);
    375	blk_flush_complete_seq(rq, fq, REQ_FSEQ_DATA, error);
    376	spin_unlock_irqrestore(&fq->mq_flush_lock, flags);
    377
    378	blk_mq_sched_restart(hctx);
    379}
    380
    381/**
    382 * blk_insert_flush - insert a new PREFLUSH/FUA request
    383 * @rq: request to insert
    384 *
    385 * To be called from __elv_add_request() for %ELEVATOR_INSERT_FLUSH insertions.
    386 * or __blk_mq_run_hw_queue() to dispatch request.
    387 * @rq is being submitted.  Analyze what needs to be done and put it on the
    388 * right queue.
    389 */
    390void blk_insert_flush(struct request *rq)
    391{
    392	struct request_queue *q = rq->q;
    393	unsigned long fflags = q->queue_flags;	/* may change, cache */
    394	unsigned int policy = blk_flush_policy(fflags, rq);
    395	struct blk_flush_queue *fq = blk_get_flush_queue(q, rq->mq_ctx);
    396
    397	/*
    398	 * @policy now records what operations need to be done.  Adjust
    399	 * REQ_PREFLUSH and FUA for the driver.
    400	 */
    401	rq->cmd_flags &= ~REQ_PREFLUSH;
    402	if (!(fflags & (1UL << QUEUE_FLAG_FUA)))
    403		rq->cmd_flags &= ~REQ_FUA;
    404
    405	/*
    406	 * REQ_PREFLUSH|REQ_FUA implies REQ_SYNC, so if we clear any
    407	 * of those flags, we have to set REQ_SYNC to avoid skewing
    408	 * the request accounting.
    409	 */
    410	rq->cmd_flags |= REQ_SYNC;
    411
    412	/*
    413	 * An empty flush handed down from a stacking driver may
    414	 * translate into nothing if the underlying device does not
    415	 * advertise a write-back cache.  In this case, simply
    416	 * complete the request.
    417	 */
    418	if (!policy) {
    419		blk_mq_end_request(rq, 0);
    420		return;
    421	}
    422
    423	BUG_ON(rq->bio != rq->biotail); /*assumes zero or single bio rq */
    424
    425	/*
    426	 * If there's data but flush is not necessary, the request can be
    427	 * processed directly without going through flush machinery.  Queue
    428	 * for normal execution.
    429	 */
    430	if ((policy & REQ_FSEQ_DATA) &&
    431	    !(policy & (REQ_FSEQ_PREFLUSH | REQ_FSEQ_POSTFLUSH))) {
    432		blk_mq_request_bypass_insert(rq, false, true);
    433		return;
    434	}
    435
    436	/*
    437	 * @rq should go through flush machinery.  Mark it part of flush
    438	 * sequence and submit for further processing.
    439	 */
    440	memset(&rq->flush, 0, sizeof(rq->flush));
    441	INIT_LIST_HEAD(&rq->flush.list);
    442	rq->rq_flags |= RQF_FLUSH_SEQ;
    443	rq->flush.saved_end_io = rq->end_io; /* Usually NULL */
    444
    445	rq->end_io = mq_flush_data_end_io;
    446
    447	spin_lock_irq(&fq->mq_flush_lock);
    448	blk_flush_complete_seq(rq, fq, REQ_FSEQ_ACTIONS & ~policy, 0);
    449	spin_unlock_irq(&fq->mq_flush_lock);
    450}
    451
    452/**
    453 * blkdev_issue_flush - queue a flush
    454 * @bdev:	blockdev to issue flush for
    455 *
    456 * Description:
    457 *    Issue a flush for the block device in question.
    458 */
    459int blkdev_issue_flush(struct block_device *bdev)
    460{
    461	struct bio bio;
    462
    463	bio_init(&bio, bdev, NULL, 0, REQ_OP_WRITE | REQ_PREFLUSH);
    464	return submit_bio_wait(&bio);
    465}
    466EXPORT_SYMBOL(blkdev_issue_flush);
    467
    468struct blk_flush_queue *blk_alloc_flush_queue(int node, int cmd_size,
    469					      gfp_t flags)
    470{
    471	struct blk_flush_queue *fq;
    472	int rq_sz = sizeof(struct request);
    473
    474	fq = kzalloc_node(sizeof(*fq), flags, node);
    475	if (!fq)
    476		goto fail;
    477
    478	spin_lock_init(&fq->mq_flush_lock);
    479
    480	rq_sz = round_up(rq_sz + cmd_size, cache_line_size());
    481	fq->flush_rq = kzalloc_node(rq_sz, flags, node);
    482	if (!fq->flush_rq)
    483		goto fail_rq;
    484
    485	INIT_LIST_HEAD(&fq->flush_queue[0]);
    486	INIT_LIST_HEAD(&fq->flush_queue[1]);
    487	INIT_LIST_HEAD(&fq->flush_data_in_flight);
    488
    489	return fq;
    490
    491 fail_rq:
    492	kfree(fq);
    493 fail:
    494	return NULL;
    495}
    496
    497void blk_free_flush_queue(struct blk_flush_queue *fq)
    498{
    499	/* bio based request queue hasn't flush queue */
    500	if (!fq)
    501		return;
    502
    503	kfree(fq->flush_rq);
    504	kfree(fq);
    505}
    506
    507/*
    508 * Allow driver to set its own lock class to fq->mq_flush_lock for
    509 * avoiding lockdep complaint.
    510 *
    511 * flush_end_io() may be called recursively from some driver, such as
    512 * nvme-loop, so lockdep may complain 'possible recursive locking' because
    513 * all 'struct blk_flush_queue' instance share same mq_flush_lock lock class
    514 * key. We need to assign different lock class for these driver's
    515 * fq->mq_flush_lock for avoiding the lockdep warning.
    516 *
    517 * Use dynamically allocated lock class key for each 'blk_flush_queue'
    518 * instance is over-kill, and more worse it introduces horrible boot delay
    519 * issue because synchronize_rcu() is implied in lockdep_unregister_key which
    520 * is called for each hctx release. SCSI probing may synchronously create and
    521 * destroy lots of MQ request_queues for non-existent devices, and some robot
    522 * test kernel always enable lockdep option. It is observed that more than half
    523 * an hour is taken during SCSI MQ probe with per-fq lock class.
    524 */
    525void blk_mq_hctx_set_fq_lock_class(struct blk_mq_hw_ctx *hctx,
    526		struct lock_class_key *key)
    527{
    528	lockdep_set_class(&hctx->fq->mq_flush_lock, key);
    529}
    530EXPORT_SYMBOL_GPL(blk_mq_hctx_set_fq_lock_class);