cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

blk-timeout.c (4216B)


      1// SPDX-License-Identifier: GPL-2.0
      2/*
      3 * Functions related to generic timeout handling of requests.
      4 */
      5#include <linux/kernel.h>
      6#include <linux/module.h>
      7#include <linux/blkdev.h>
      8#include <linux/fault-inject.h>
      9
     10#include "blk.h"
     11#include "blk-mq.h"
     12
     13#ifdef CONFIG_FAIL_IO_TIMEOUT
     14
     15static DECLARE_FAULT_ATTR(fail_io_timeout);
     16
     17static int __init setup_fail_io_timeout(char *str)
     18{
     19	return setup_fault_attr(&fail_io_timeout, str);
     20}
     21__setup("fail_io_timeout=", setup_fail_io_timeout);
     22
     23bool __blk_should_fake_timeout(struct request_queue *q)
     24{
     25	return should_fail(&fail_io_timeout, 1);
     26}
     27EXPORT_SYMBOL_GPL(__blk_should_fake_timeout);
     28
     29static int __init fail_io_timeout_debugfs(void)
     30{
     31	struct dentry *dir = fault_create_debugfs_attr("fail_io_timeout",
     32						NULL, &fail_io_timeout);
     33
     34	return PTR_ERR_OR_ZERO(dir);
     35}
     36
     37late_initcall(fail_io_timeout_debugfs);
     38
     39ssize_t part_timeout_show(struct device *dev, struct device_attribute *attr,
     40			  char *buf)
     41{
     42	struct gendisk *disk = dev_to_disk(dev);
     43	int set = test_bit(QUEUE_FLAG_FAIL_IO, &disk->queue->queue_flags);
     44
     45	return sprintf(buf, "%d\n", set != 0);
     46}
     47
     48ssize_t part_timeout_store(struct device *dev, struct device_attribute *attr,
     49			   const char *buf, size_t count)
     50{
     51	struct gendisk *disk = dev_to_disk(dev);
     52	int val;
     53
     54	if (count) {
     55		struct request_queue *q = disk->queue;
     56		char *p = (char *) buf;
     57
     58		val = simple_strtoul(p, &p, 10);
     59		if (val)
     60			blk_queue_flag_set(QUEUE_FLAG_FAIL_IO, q);
     61		else
     62			blk_queue_flag_clear(QUEUE_FLAG_FAIL_IO, q);
     63	}
     64
     65	return count;
     66}
     67
     68#endif /* CONFIG_FAIL_IO_TIMEOUT */
     69
     70/**
     71 * blk_abort_request - Request recovery for the specified command
     72 * @req:	pointer to the request of interest
     73 *
     74 * This function requests that the block layer start recovery for the
     75 * request by deleting the timer and calling the q's timeout function.
     76 * LLDDs who implement their own error recovery MAY ignore the timeout
     77 * event if they generated blk_abort_request.
     78 */
     79void blk_abort_request(struct request *req)
     80{
     81	/*
     82	 * All we need to ensure is that timeout scan takes place
     83	 * immediately and that scan sees the new timeout value.
     84	 * No need for fancy synchronizations.
     85	 */
     86	WRITE_ONCE(req->deadline, jiffies);
     87	kblockd_schedule_work(&req->q->timeout_work);
     88}
     89EXPORT_SYMBOL_GPL(blk_abort_request);
     90
     91static unsigned long blk_timeout_mask __read_mostly;
     92
     93static int __init blk_timeout_init(void)
     94{
     95	blk_timeout_mask = roundup_pow_of_two(HZ) - 1;
     96	return 0;
     97}
     98
     99late_initcall(blk_timeout_init);
    100
    101/*
    102 * Just a rough estimate, we don't care about specific values for timeouts.
    103 */
    104static inline unsigned long blk_round_jiffies(unsigned long j)
    105{
    106	return (j + blk_timeout_mask) + 1;
    107}
    108
    109unsigned long blk_rq_timeout(unsigned long timeout)
    110{
    111	unsigned long maxt;
    112
    113	maxt = blk_round_jiffies(jiffies + BLK_MAX_TIMEOUT);
    114	if (time_after(timeout, maxt))
    115		timeout = maxt;
    116
    117	return timeout;
    118}
    119
    120/**
    121 * blk_add_timer - Start timeout timer for a single request
    122 * @req:	request that is about to start running.
    123 *
    124 * Notes:
    125 *    Each request has its own timer, and as it is added to the queue, we
    126 *    set up the timer. When the request completes, we cancel the timer.
    127 */
    128void blk_add_timer(struct request *req)
    129{
    130	struct request_queue *q = req->q;
    131	unsigned long expiry;
    132
    133	/*
    134	 * Some LLDs, like scsi, peek at the timeout to prevent a
    135	 * command from being retried forever.
    136	 */
    137	if (!req->timeout)
    138		req->timeout = q->rq_timeout;
    139
    140	req->rq_flags &= ~RQF_TIMED_OUT;
    141
    142	expiry = jiffies + req->timeout;
    143	WRITE_ONCE(req->deadline, expiry);
    144
    145	/*
    146	 * If the timer isn't already pending or this timeout is earlier
    147	 * than an existing one, modify the timer. Round up to next nearest
    148	 * second.
    149	 */
    150	expiry = blk_rq_timeout(blk_round_jiffies(expiry));
    151
    152	if (!timer_pending(&q->timeout) ||
    153	    time_before(expiry, q->timeout.expires)) {
    154		unsigned long diff = q->timeout.expires - expiry;
    155
    156		/*
    157		 * Due to added timer slack to group timers, the timer
    158		 * will often be a little in front of what we asked for.
    159		 * So apply some tolerance here too, otherwise we keep
    160		 * modifying the timer because expires for value X
    161		 * will be X + something.
    162		 */
    163		if (!timer_pending(&q->timeout) || (diff >= HZ / 2))
    164			mod_timer(&q->timeout, expiry);
    165	}
    166
    167}