cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

blk-rq-qos.h (5696B)


      1/* SPDX-License-Identifier: GPL-2.0 */
      2#ifndef RQ_QOS_H
      3#define RQ_QOS_H
      4
      5#include <linux/kernel.h>
      6#include <linux/blkdev.h>
      7#include <linux/blk_types.h>
      8#include <linux/atomic.h>
      9#include <linux/wait.h>
     10#include <linux/blk-mq.h>
     11
     12#include "blk-mq-debugfs.h"
     13
     14struct blk_mq_debugfs_attr;
     15
     16enum rq_qos_id {
     17	RQ_QOS_WBT,
     18	RQ_QOS_LATENCY,
     19	RQ_QOS_COST,
     20	RQ_QOS_IOPRIO,
     21};
     22
     23struct rq_wait {
     24	wait_queue_head_t wait;
     25	atomic_t inflight;
     26};
     27
     28struct rq_qos {
     29	struct rq_qos_ops *ops;
     30	struct request_queue *q;
     31	enum rq_qos_id id;
     32	struct rq_qos *next;
     33#ifdef CONFIG_BLK_DEBUG_FS
     34	struct dentry *debugfs_dir;
     35#endif
     36};
     37
     38struct rq_qos_ops {
     39	void (*throttle)(struct rq_qos *, struct bio *);
     40	void (*track)(struct rq_qos *, struct request *, struct bio *);
     41	void (*merge)(struct rq_qos *, struct request *, struct bio *);
     42	void (*issue)(struct rq_qos *, struct request *);
     43	void (*requeue)(struct rq_qos *, struct request *);
     44	void (*done)(struct rq_qos *, struct request *);
     45	void (*done_bio)(struct rq_qos *, struct bio *);
     46	void (*cleanup)(struct rq_qos *, struct bio *);
     47	void (*queue_depth_changed)(struct rq_qos *);
     48	void (*exit)(struct rq_qos *);
     49	const struct blk_mq_debugfs_attr *debugfs_attrs;
     50};
     51
     52struct rq_depth {
     53	unsigned int max_depth;
     54
     55	int scale_step;
     56	bool scaled_max;
     57
     58	unsigned int queue_depth;
     59	unsigned int default_depth;
     60};
     61
     62static inline struct rq_qos *rq_qos_id(struct request_queue *q,
     63				       enum rq_qos_id id)
     64{
     65	struct rq_qos *rqos;
     66	for (rqos = q->rq_qos; rqos; rqos = rqos->next) {
     67		if (rqos->id == id)
     68			break;
     69	}
     70	return rqos;
     71}
     72
     73static inline struct rq_qos *wbt_rq_qos(struct request_queue *q)
     74{
     75	return rq_qos_id(q, RQ_QOS_WBT);
     76}
     77
     78static inline struct rq_qos *blkcg_rq_qos(struct request_queue *q)
     79{
     80	return rq_qos_id(q, RQ_QOS_LATENCY);
     81}
     82
     83static inline void rq_wait_init(struct rq_wait *rq_wait)
     84{
     85	atomic_set(&rq_wait->inflight, 0);
     86	init_waitqueue_head(&rq_wait->wait);
     87}
     88
     89static inline void rq_qos_add(struct request_queue *q, struct rq_qos *rqos)
     90{
     91	/*
     92	 * No IO can be in-flight when adding rqos, so freeze queue, which
     93	 * is fine since we only support rq_qos for blk-mq queue.
     94	 *
     95	 * Reuse ->queue_lock for protecting against other concurrent
     96	 * rq_qos adding/deleting
     97	 */
     98	blk_mq_freeze_queue(q);
     99
    100	spin_lock_irq(&q->queue_lock);
    101	rqos->next = q->rq_qos;
    102	q->rq_qos = rqos;
    103	spin_unlock_irq(&q->queue_lock);
    104
    105	blk_mq_unfreeze_queue(q);
    106
    107	if (rqos->ops->debugfs_attrs) {
    108		mutex_lock(&q->debugfs_mutex);
    109		blk_mq_debugfs_register_rqos(rqos);
    110		mutex_unlock(&q->debugfs_mutex);
    111	}
    112}
    113
    114static inline void rq_qos_del(struct request_queue *q, struct rq_qos *rqos)
    115{
    116	struct rq_qos **cur;
    117
    118	/*
    119	 * See comment in rq_qos_add() about freezing queue & using
    120	 * ->queue_lock.
    121	 */
    122	blk_mq_freeze_queue(q);
    123
    124	spin_lock_irq(&q->queue_lock);
    125	for (cur = &q->rq_qos; *cur; cur = &(*cur)->next) {
    126		if (*cur == rqos) {
    127			*cur = rqos->next;
    128			break;
    129		}
    130	}
    131	spin_unlock_irq(&q->queue_lock);
    132
    133	blk_mq_unfreeze_queue(q);
    134
    135	mutex_lock(&q->debugfs_mutex);
    136	blk_mq_debugfs_unregister_rqos(rqos);
    137	mutex_unlock(&q->debugfs_mutex);
    138}
    139
    140typedef bool (acquire_inflight_cb_t)(struct rq_wait *rqw, void *private_data);
    141typedef void (cleanup_cb_t)(struct rq_wait *rqw, void *private_data);
    142
    143void rq_qos_wait(struct rq_wait *rqw, void *private_data,
    144		 acquire_inflight_cb_t *acquire_inflight_cb,
    145		 cleanup_cb_t *cleanup_cb);
    146bool rq_wait_inc_below(struct rq_wait *rq_wait, unsigned int limit);
    147bool rq_depth_scale_up(struct rq_depth *rqd);
    148bool rq_depth_scale_down(struct rq_depth *rqd, bool hard_throttle);
    149bool rq_depth_calc_max_depth(struct rq_depth *rqd);
    150
    151void __rq_qos_cleanup(struct rq_qos *rqos, struct bio *bio);
    152void __rq_qos_done(struct rq_qos *rqos, struct request *rq);
    153void __rq_qos_issue(struct rq_qos *rqos, struct request *rq);
    154void __rq_qos_requeue(struct rq_qos *rqos, struct request *rq);
    155void __rq_qos_throttle(struct rq_qos *rqos, struct bio *bio);
    156void __rq_qos_track(struct rq_qos *rqos, struct request *rq, struct bio *bio);
    157void __rq_qos_merge(struct rq_qos *rqos, struct request *rq, struct bio *bio);
    158void __rq_qos_done_bio(struct rq_qos *rqos, struct bio *bio);
    159void __rq_qos_queue_depth_changed(struct rq_qos *rqos);
    160
    161static inline void rq_qos_cleanup(struct request_queue *q, struct bio *bio)
    162{
    163	if (q->rq_qos)
    164		__rq_qos_cleanup(q->rq_qos, bio);
    165}
    166
    167static inline void rq_qos_done(struct request_queue *q, struct request *rq)
    168{
    169	if (q->rq_qos)
    170		__rq_qos_done(q->rq_qos, rq);
    171}
    172
    173static inline void rq_qos_issue(struct request_queue *q, struct request *rq)
    174{
    175	if (q->rq_qos)
    176		__rq_qos_issue(q->rq_qos, rq);
    177}
    178
    179static inline void rq_qos_requeue(struct request_queue *q, struct request *rq)
    180{
    181	if (q->rq_qos)
    182		__rq_qos_requeue(q->rq_qos, rq);
    183}
    184
    185static inline void rq_qos_done_bio(struct bio *bio)
    186{
    187	if (bio->bi_bdev && (bio_flagged(bio, BIO_QOS_THROTTLED) ||
    188			     bio_flagged(bio, BIO_QOS_MERGED))) {
    189		struct request_queue *q = bdev_get_queue(bio->bi_bdev);
    190		if (q->rq_qos)
    191			__rq_qos_done_bio(q->rq_qos, bio);
    192	}
    193}
    194
    195static inline void rq_qos_throttle(struct request_queue *q, struct bio *bio)
    196{
    197	if (q->rq_qos) {
    198		bio_set_flag(bio, BIO_QOS_THROTTLED);
    199		__rq_qos_throttle(q->rq_qos, bio);
    200	}
    201}
    202
    203static inline void rq_qos_track(struct request_queue *q, struct request *rq,
    204				struct bio *bio)
    205{
    206	if (q->rq_qos)
    207		__rq_qos_track(q->rq_qos, rq, bio);
    208}
    209
    210static inline void rq_qos_merge(struct request_queue *q, struct request *rq,
    211				struct bio *bio)
    212{
    213	if (q->rq_qos) {
    214		bio_set_flag(bio, BIO_QOS_MERGED);
    215		__rq_qos_merge(q->rq_qos, rq, bio);
    216	}
    217}
    218
    219static inline void rq_qos_queue_depth_changed(struct request_queue *q)
    220{
    221	if (q->rq_qos)
    222		__rq_qos_queue_depth_changed(q->rq_qos);
    223}
    224
    225void rq_qos_exit(struct request_queue *);
    226
    227#endif