cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

blk-mq-sched.h (2608B)


      1/* SPDX-License-Identifier: GPL-2.0 */
      2#ifndef BLK_MQ_SCHED_H
      3#define BLK_MQ_SCHED_H
      4
      5#include "elevator.h"
      6#include "blk-mq.h"
      7#include "blk-mq-tag.h"
      8
      9#define MAX_SCHED_RQ (16 * BLKDEV_DEFAULT_RQ)
     10
     11bool blk_mq_sched_try_merge(struct request_queue *q, struct bio *bio,
     12		unsigned int nr_segs, struct request **merged_request);
     13bool blk_mq_sched_bio_merge(struct request_queue *q, struct bio *bio,
     14		unsigned int nr_segs);
     15bool blk_mq_sched_try_insert_merge(struct request_queue *q, struct request *rq,
     16				   struct list_head *free);
     17void blk_mq_sched_mark_restart_hctx(struct blk_mq_hw_ctx *hctx);
     18void __blk_mq_sched_restart(struct blk_mq_hw_ctx *hctx);
     19
     20void blk_mq_sched_insert_request(struct request *rq, bool at_head,
     21				 bool run_queue, bool async);
     22void blk_mq_sched_insert_requests(struct blk_mq_hw_ctx *hctx,
     23				  struct blk_mq_ctx *ctx,
     24				  struct list_head *list, bool run_queue_async);
     25
     26void blk_mq_sched_dispatch_requests(struct blk_mq_hw_ctx *hctx);
     27
     28int blk_mq_init_sched(struct request_queue *q, struct elevator_type *e);
     29void blk_mq_exit_sched(struct request_queue *q, struct elevator_queue *e);
     30void blk_mq_sched_free_rqs(struct request_queue *q);
     31
     32static inline void blk_mq_sched_restart(struct blk_mq_hw_ctx *hctx)
     33{
     34	if (test_bit(BLK_MQ_S_SCHED_RESTART, &hctx->state))
     35		__blk_mq_sched_restart(hctx);
     36}
     37
     38static inline bool bio_mergeable(struct bio *bio)
     39{
     40	return !(bio->bi_opf & REQ_NOMERGE_FLAGS);
     41}
     42
     43static inline bool
     44blk_mq_sched_allow_merge(struct request_queue *q, struct request *rq,
     45			 struct bio *bio)
     46{
     47	if (rq->rq_flags & RQF_ELV) {
     48		struct elevator_queue *e = q->elevator;
     49
     50		if (e->type->ops.allow_merge)
     51			return e->type->ops.allow_merge(q, rq, bio);
     52	}
     53	return true;
     54}
     55
     56static inline void blk_mq_sched_completed_request(struct request *rq, u64 now)
     57{
     58	if (rq->rq_flags & RQF_ELV) {
     59		struct elevator_queue *e = rq->q->elevator;
     60
     61		if (e->type->ops.completed_request)
     62			e->type->ops.completed_request(rq, now);
     63	}
     64}
     65
     66static inline void blk_mq_sched_requeue_request(struct request *rq)
     67{
     68	if (rq->rq_flags & RQF_ELV) {
     69		struct request_queue *q = rq->q;
     70		struct elevator_queue *e = q->elevator;
     71
     72		if ((rq->rq_flags & RQF_ELVPRIV) && e->type->ops.requeue_request)
     73			e->type->ops.requeue_request(rq);
     74	}
     75}
     76
     77static inline bool blk_mq_sched_has_work(struct blk_mq_hw_ctx *hctx)
     78{
     79	struct elevator_queue *e = hctx->queue->elevator;
     80
     81	if (e && e->type->ops.has_work)
     82		return e->type->ops.has_work(hctx);
     83
     84	return false;
     85}
     86
     87static inline bool blk_mq_sched_needs_restart(struct blk_mq_hw_ctx *hctx)
     88{
     89	return test_bit(BLK_MQ_S_SCHED_RESTART, &hctx->state);
     90}
     91
     92#endif