cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

dm-core.h (6779B)


      1/*
      2 * Internal header file _only_ for device mapper core
      3 *
      4 * Copyright (C) 2016 Red Hat, Inc. All rights reserved.
      5 *
      6 * This file is released under the LGPL.
      7 */
      8
      9#ifndef DM_CORE_INTERNAL_H
     10#define DM_CORE_INTERNAL_H
     11
     12#include <linux/kthread.h>
     13#include <linux/ktime.h>
     14#include <linux/blk-mq.h>
     15#include <linux/blk-crypto-profile.h>
     16#include <linux/jump_label.h>
     17
     18#include <trace/events/block.h>
     19
     20#include "dm.h"
     21#include "dm-ima.h"
     22
     23#define DM_RESERVED_MAX_IOS		1024
     24
     25struct dm_kobject_holder {
     26	struct kobject kobj;
     27	struct completion completion;
     28};
     29
     30/*
     31 * DM core internal structures used directly by dm.c, dm-rq.c and dm-table.c.
     32 * DM targets must _not_ deference a mapped_device or dm_table to directly
     33 * access their members!
     34 */
     35
     36/*
     37 * For mempools pre-allocation at the table loading time.
     38 */
     39struct dm_md_mempools {
     40	struct bio_set bs;
     41	struct bio_set io_bs;
     42};
     43
     44struct mapped_device {
     45	struct mutex suspend_lock;
     46
     47	struct mutex table_devices_lock;
     48	struct list_head table_devices;
     49
     50	/*
     51	 * The current mapping (struct dm_table *).
     52	 * Use dm_get_live_table{_fast} or take suspend_lock for
     53	 * dereference.
     54	 */
     55	void __rcu *map;
     56
     57	unsigned long flags;
     58
     59	/* Protect queue and type against concurrent access. */
     60	struct mutex type_lock;
     61	enum dm_queue_mode type;
     62
     63	int numa_node_id;
     64	struct request_queue *queue;
     65
     66	atomic_t holders;
     67	atomic_t open_count;
     68
     69	struct dm_target *immutable_target;
     70	struct target_type *immutable_target_type;
     71
     72	char name[16];
     73	struct gendisk *disk;
     74	struct dax_device *dax_dev;
     75
     76	wait_queue_head_t wait;
     77	unsigned long __percpu *pending_io;
     78
     79	/* forced geometry settings */
     80	struct hd_geometry geometry;
     81
     82	/*
     83	 * Processing queue (flush)
     84	 */
     85	struct workqueue_struct *wq;
     86
     87	/*
     88	 * A list of ios that arrived while we were suspended.
     89	 */
     90	struct work_struct work;
     91	spinlock_t deferred_lock;
     92	struct bio_list deferred;
     93
     94	void *interface_ptr;
     95
     96	/*
     97	 * Event handling.
     98	 */
     99	wait_queue_head_t eventq;
    100	atomic_t event_nr;
    101	atomic_t uevent_seq;
    102	struct list_head uevent_list;
    103	spinlock_t uevent_lock; /* Protect access to uevent_list */
    104
    105	/* for blk-mq request-based DM support */
    106	bool init_tio_pdu:1;
    107	struct blk_mq_tag_set *tag_set;
    108
    109	struct dm_stats stats;
    110
    111	/* the number of internal suspends */
    112	unsigned internal_suspend_count;
    113
    114	int swap_bios;
    115	struct semaphore swap_bios_semaphore;
    116	struct mutex swap_bios_lock;
    117
    118	/*
    119	 * io objects are allocated from here.
    120	 */
    121	struct dm_md_mempools *mempools;
    122
    123	/* kobject and completion */
    124	struct dm_kobject_holder kobj_holder;
    125
    126	struct srcu_struct io_barrier;
    127
    128#ifdef CONFIG_BLK_DEV_ZONED
    129	unsigned int nr_zones;
    130	unsigned int *zwp_offset;
    131#endif
    132
    133#ifdef CONFIG_IMA
    134	struct dm_ima_measurements ima;
    135#endif
    136};
    137
    138/*
    139 * Bits for the flags field of struct mapped_device.
    140 */
    141#define DMF_BLOCK_IO_FOR_SUSPEND 0
    142#define DMF_SUSPENDED 1
    143#define DMF_FROZEN 2
    144#define DMF_FREEING 3
    145#define DMF_DELETING 4
    146#define DMF_NOFLUSH_SUSPENDING 5
    147#define DMF_DEFERRED_REMOVE 6
    148#define DMF_SUSPENDED_INTERNALLY 7
    149#define DMF_POST_SUSPENDING 8
    150#define DMF_EMULATE_ZONE_APPEND 9
    151
    152void disable_discard(struct mapped_device *md);
    153void disable_write_zeroes(struct mapped_device *md);
    154
    155static inline sector_t dm_get_size(struct mapped_device *md)
    156{
    157	return get_capacity(md->disk);
    158}
    159
    160static inline struct dm_stats *dm_get_stats(struct mapped_device *md)
    161{
    162	return &md->stats;
    163}
    164
    165DECLARE_STATIC_KEY_FALSE(stats_enabled);
    166DECLARE_STATIC_KEY_FALSE(swap_bios_enabled);
    167DECLARE_STATIC_KEY_FALSE(zoned_enabled);
    168
    169static inline bool dm_emulate_zone_append(struct mapped_device *md)
    170{
    171	if (blk_queue_is_zoned(md->queue))
    172		return test_bit(DMF_EMULATE_ZONE_APPEND, &md->flags);
    173	return false;
    174}
    175
    176#define DM_TABLE_MAX_DEPTH 16
    177
    178struct dm_table {
    179	struct mapped_device *md;
    180	enum dm_queue_mode type;
    181
    182	/* btree table */
    183	unsigned int depth;
    184	unsigned int counts[DM_TABLE_MAX_DEPTH]; /* in nodes */
    185	sector_t *index[DM_TABLE_MAX_DEPTH];
    186
    187	unsigned int num_targets;
    188	unsigned int num_allocated;
    189	sector_t *highs;
    190	struct dm_target *targets;
    191
    192	struct target_type *immutable_target_type;
    193
    194	bool integrity_supported:1;
    195	bool singleton:1;
    196	unsigned integrity_added:1;
    197
    198	/*
    199	 * Indicates the rw permissions for the new logical
    200	 * device.  This should be a combination of FMODE_READ
    201	 * and FMODE_WRITE.
    202	 */
    203	fmode_t mode;
    204
    205	/* a list of devices used by this table */
    206	struct list_head devices;
    207
    208	/* events get handed up using this callback */
    209	void (*event_fn)(void *);
    210	void *event_context;
    211
    212	struct dm_md_mempools *mempools;
    213
    214#ifdef CONFIG_BLK_INLINE_ENCRYPTION
    215	struct blk_crypto_profile *crypto_profile;
    216#endif
    217};
    218
    219/*
    220 * One of these is allocated per clone bio.
    221 */
    222#define DM_TIO_MAGIC 28714
    223struct dm_target_io {
    224	unsigned short magic;
    225	blk_short_t flags;
    226	unsigned int target_bio_nr;
    227	struct dm_io *io;
    228	struct dm_target *ti;
    229	unsigned int *len_ptr;
    230	sector_t old_sector;
    231	struct bio clone;
    232};
    233
    234/*
    235 * dm_target_io flags
    236 */
    237enum {
    238	DM_TIO_INSIDE_DM_IO,
    239	DM_TIO_IS_DUPLICATE_BIO
    240};
    241
    242static inline bool dm_tio_flagged(struct dm_target_io *tio, unsigned int bit)
    243{
    244	return (tio->flags & (1U << bit)) != 0;
    245}
    246
    247static inline void dm_tio_set_flag(struct dm_target_io *tio, unsigned int bit)
    248{
    249	tio->flags |= (1U << bit);
    250}
    251
    252static inline bool dm_tio_is_normal(struct dm_target_io *tio)
    253{
    254	return (dm_tio_flagged(tio, DM_TIO_INSIDE_DM_IO) &&
    255		!dm_tio_flagged(tio, DM_TIO_IS_DUPLICATE_BIO));
    256}
    257
    258/*
    259 * One of these is allocated per original bio.
    260 * It contains the first clone used for that original.
    261 */
    262#define DM_IO_MAGIC 19577
    263struct dm_io {
    264	unsigned short magic;
    265	blk_short_t flags;
    266	spinlock_t lock;
    267	unsigned long start_time;
    268	void *data;
    269	struct dm_io *next;
    270	struct dm_stats_aux stats_aux;
    271	blk_status_t status;
    272	atomic_t io_count;
    273	struct mapped_device *md;
    274
    275	struct bio *split_bio;
    276	/* The three fields represent mapped part of original bio */
    277	struct bio *orig_bio;
    278	unsigned int sector_offset; /* offset to end of orig_bio */
    279	unsigned int sectors;
    280
    281	/* last member of dm_target_io is 'struct bio' */
    282	struct dm_target_io tio;
    283};
    284
    285/*
    286 * dm_io flags
    287 */
    288enum {
    289	DM_IO_ACCOUNTED,
    290	DM_IO_WAS_SPLIT
    291};
    292
    293static inline bool dm_io_flagged(struct dm_io *io, unsigned int bit)
    294{
    295	return (io->flags & (1U << bit)) != 0;
    296}
    297
    298static inline void dm_io_set_flag(struct dm_io *io, unsigned int bit)
    299{
    300	io->flags |= (1U << bit);
    301}
    302
    303static inline struct completion *dm_get_completion_from_kobject(struct kobject *kobj)
    304{
    305	return &container_of(kobj, struct dm_kobject_holder, kobj)->completion;
    306}
    307
    308unsigned __dm_get_module_param(unsigned *module_param, unsigned def, unsigned max);
    309
    310static inline bool dm_message_test_buffer_overflow(char *result, unsigned maxlen)
    311{
    312	return !maxlen || strlen(result) + 1 >= maxlen;
    313}
    314
    315extern atomic_t dm_global_event_nr;
    316extern wait_queue_head_t dm_global_eventq;
    317void dm_issue_global_event(void);
    318
    319#endif