cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

dm-bio-prison-v1.h (4563B)


      1/*
      2 * Copyright (C) 2011-2017 Red Hat, Inc.
      3 *
      4 * This file is released under the GPL.
      5 */
      6
      7#ifndef DM_BIO_PRISON_H
      8#define DM_BIO_PRISON_H
      9
     10#include "persistent-data/dm-block-manager.h" /* FIXME: for dm_block_t */
     11#include "dm-thin-metadata.h" /* FIXME: for dm_thin_id */
     12
     13#include <linux/bio.h>
     14#include <linux/rbtree.h>
     15
     16/*----------------------------------------------------------------*/
     17
     18/*
     19 * Sometimes we can't deal with a bio straight away.  We put them in prison
     20 * where they can't cause any mischief.  Bios are put in a cell identified
     21 * by a key, multiple bios can be in the same cell.  When the cell is
     22 * subsequently unlocked the bios become available.
     23 */
     24struct dm_bio_prison;
     25
     26/*
     27 * Keys define a range of blocks within either a virtual or physical
     28 * device.
     29 */
     30struct dm_cell_key {
     31	int virtual;
     32	dm_thin_id dev;
     33	dm_block_t block_begin, block_end;
     34};
     35
     36/*
     37 * Treat this as opaque, only in header so callers can manage allocation
     38 * themselves.
     39 */
     40struct dm_bio_prison_cell {
     41	struct list_head user_list;	/* for client use */
     42	struct rb_node node;
     43
     44	struct dm_cell_key key;
     45	struct bio *holder;
     46	struct bio_list bios;
     47};
     48
     49struct dm_bio_prison *dm_bio_prison_create(void);
     50void dm_bio_prison_destroy(struct dm_bio_prison *prison);
     51
     52/*
     53 * These two functions just wrap a mempool.  This is a transitory step:
     54 * Eventually all bio prison clients should manage their own cell memory.
     55 *
     56 * Like mempool_alloc(), dm_bio_prison_alloc_cell() can only fail if called
     57 * in interrupt context or passed GFP_NOWAIT.
     58 */
     59struct dm_bio_prison_cell *dm_bio_prison_alloc_cell(struct dm_bio_prison *prison,
     60						    gfp_t gfp);
     61void dm_bio_prison_free_cell(struct dm_bio_prison *prison,
     62			     struct dm_bio_prison_cell *cell);
     63
     64/*
     65 * Creates, or retrieves a cell that overlaps the given key.
     66 *
     67 * Returns 1 if pre-existing cell returned, zero if new cell created using
     68 * @cell_prealloc.
     69 */
     70int dm_get_cell(struct dm_bio_prison *prison,
     71		struct dm_cell_key *key,
     72		struct dm_bio_prison_cell *cell_prealloc,
     73		struct dm_bio_prison_cell **cell_result);
     74
     75/*
     76 * An atomic op that combines retrieving or creating a cell, and adding a
     77 * bio to it.
     78 *
     79 * Returns 1 if the cell was already held, 0 if @inmate is the new holder.
     80 */
     81int dm_bio_detain(struct dm_bio_prison *prison,
     82		  struct dm_cell_key *key,
     83		  struct bio *inmate,
     84		  struct dm_bio_prison_cell *cell_prealloc,
     85		  struct dm_bio_prison_cell **cell_result);
     86
     87void dm_cell_release(struct dm_bio_prison *prison,
     88		     struct dm_bio_prison_cell *cell,
     89		     struct bio_list *bios);
     90void dm_cell_release_no_holder(struct dm_bio_prison *prison,
     91			       struct dm_bio_prison_cell *cell,
     92			       struct bio_list *inmates);
     93void dm_cell_error(struct dm_bio_prison *prison,
     94		   struct dm_bio_prison_cell *cell, blk_status_t error);
     95
     96/*
     97 * Visits the cell and then releases.  Guarantees no new inmates are
     98 * inserted between the visit and release.
     99 */
    100void dm_cell_visit_release(struct dm_bio_prison *prison,
    101			   void (*visit_fn)(void *, struct dm_bio_prison_cell *),
    102			   void *context, struct dm_bio_prison_cell *cell);
    103
    104/*
    105 * Rather than always releasing the prisoners in a cell, the client may
    106 * want to promote one of them to be the new holder.  There is a race here
    107 * though between releasing an empty cell, and other threads adding new
    108 * inmates.  So this function makes the decision with its lock held.
    109 *
    110 * This function can have two outcomes:
    111 * i) An inmate is promoted to be the holder of the cell (return value of 0).
    112 * ii) The cell has no inmate for promotion and is released (return value of 1).
    113 */
    114int dm_cell_promote_or_release(struct dm_bio_prison *prison,
    115			       struct dm_bio_prison_cell *cell);
    116
    117/*----------------------------------------------------------------*/
    118
    119/*
    120 * We use the deferred set to keep track of pending reads to shared blocks.
    121 * We do this to ensure the new mapping caused by a write isn't performed
    122 * until these prior reads have completed.  Otherwise the insertion of the
    123 * new mapping could free the old block that the read bios are mapped to.
    124 */
    125
    126struct dm_deferred_set;
    127struct dm_deferred_entry;
    128
    129struct dm_deferred_set *dm_deferred_set_create(void);
    130void dm_deferred_set_destroy(struct dm_deferred_set *ds);
    131
    132struct dm_deferred_entry *dm_deferred_entry_inc(struct dm_deferred_set *ds);
    133void dm_deferred_entry_dec(struct dm_deferred_entry *entry, struct list_head *head);
    134int dm_deferred_set_add_work(struct dm_deferred_set *ds, struct list_head *work);
    135
    136/*----------------------------------------------------------------*/
    137
    138#endif