cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

device-mapper.h (19542B)


      1/*
      2 * Copyright (C) 2001 Sistina Software (UK) Limited.
      3 * Copyright (C) 2004-2008 Red Hat, Inc. All rights reserved.
      4 *
      5 * This file is released under the LGPL.
      6 */
      7
      8#ifndef _LINUX_DEVICE_MAPPER_H
      9#define _LINUX_DEVICE_MAPPER_H
     10
     11#include <linux/bio.h>
     12#include <linux/blkdev.h>
     13#include <linux/dm-ioctl.h>
     14#include <linux/math64.h>
     15#include <linux/ratelimit.h>
     16
     17struct dm_dev;
     18struct dm_target;
     19struct dm_table;
     20struct dm_report_zones_args;
     21struct mapped_device;
     22struct bio_vec;
     23enum dax_access_mode;
     24
     25/*
     26 * Type of table, mapped_device's mempool and request_queue
     27 */
     28enum dm_queue_mode {
     29	DM_TYPE_NONE		 = 0,
     30	DM_TYPE_BIO_BASED	 = 1,
     31	DM_TYPE_REQUEST_BASED	 = 2,
     32	DM_TYPE_DAX_BIO_BASED	 = 3,
     33};
     34
     35typedef enum { STATUSTYPE_INFO, STATUSTYPE_TABLE, STATUSTYPE_IMA } status_type_t;
     36
     37union map_info {
     38	void *ptr;
     39};
     40
     41/*
     42 * In the constructor the target parameter will already have the
     43 * table, type, begin and len fields filled in.
     44 */
     45typedef int (*dm_ctr_fn) (struct dm_target *target,
     46			  unsigned int argc, char **argv);
     47
     48/*
     49 * The destructor doesn't need to free the dm_target, just
     50 * anything hidden ti->private.
     51 */
     52typedef void (*dm_dtr_fn) (struct dm_target *ti);
     53
     54/*
     55 * The map function must return:
     56 * < 0: error
     57 * = 0: The target will handle the io by resubmitting it later
     58 * = 1: simple remap complete
     59 * = 2: The target wants to push back the io
     60 */
     61typedef int (*dm_map_fn) (struct dm_target *ti, struct bio *bio);
     62typedef int (*dm_clone_and_map_request_fn) (struct dm_target *ti,
     63					    struct request *rq,
     64					    union map_info *map_context,
     65					    struct request **clone);
     66typedef void (*dm_release_clone_request_fn) (struct request *clone,
     67					     union map_info *map_context);
     68
     69/*
     70 * Returns:
     71 * < 0 : error (currently ignored)
     72 * 0   : ended successfully
     73 * 1   : for some reason the io has still not completed (eg,
     74 *       multipath target might want to requeue a failed io).
     75 * 2   : The target wants to push back the io
     76 */
     77typedef int (*dm_endio_fn) (struct dm_target *ti,
     78			    struct bio *bio, blk_status_t *error);
     79typedef int (*dm_request_endio_fn) (struct dm_target *ti,
     80				    struct request *clone, blk_status_t error,
     81				    union map_info *map_context);
     82
     83typedef void (*dm_presuspend_fn) (struct dm_target *ti);
     84typedef void (*dm_presuspend_undo_fn) (struct dm_target *ti);
     85typedef void (*dm_postsuspend_fn) (struct dm_target *ti);
     86typedef int (*dm_preresume_fn) (struct dm_target *ti);
     87typedef void (*dm_resume_fn) (struct dm_target *ti);
     88
     89typedef void (*dm_status_fn) (struct dm_target *ti, status_type_t status_type,
     90			      unsigned status_flags, char *result, unsigned maxlen);
     91
     92typedef int (*dm_message_fn) (struct dm_target *ti, unsigned argc, char **argv,
     93			      char *result, unsigned maxlen);
     94
     95typedef int (*dm_prepare_ioctl_fn) (struct dm_target *ti, struct block_device **bdev);
     96
     97#ifdef CONFIG_BLK_DEV_ZONED
     98typedef int (*dm_report_zones_fn) (struct dm_target *ti,
     99				   struct dm_report_zones_args *args,
    100				   unsigned int nr_zones);
    101#else
    102/*
    103 * Define dm_report_zones_fn so that targets can assign to NULL if
    104 * CONFIG_BLK_DEV_ZONED disabled. Otherwise each target needs to do
    105 * awkward #ifdefs in their target_type, etc.
    106 */
    107typedef int (*dm_report_zones_fn) (struct dm_target *dummy);
    108#endif
    109
    110/*
    111 * These iteration functions are typically used to check (and combine)
    112 * properties of underlying devices.
    113 * E.g. Does at least one underlying device support flush?
    114 *      Does any underlying device not support WRITE_SAME?
    115 *
    116 * The callout function is called once for each contiguous section of
    117 * an underlying device.  State can be maintained in *data.
    118 * Return non-zero to stop iterating through any further devices.
    119 */
    120typedef int (*iterate_devices_callout_fn) (struct dm_target *ti,
    121					   struct dm_dev *dev,
    122					   sector_t start, sector_t len,
    123					   void *data);
    124
    125/*
    126 * This function must iterate through each section of device used by the
    127 * target until it encounters a non-zero return code, which it then returns.
    128 * Returns zero if no callout returned non-zero.
    129 */
    130typedef int (*dm_iterate_devices_fn) (struct dm_target *ti,
    131				      iterate_devices_callout_fn fn,
    132				      void *data);
    133
    134typedef void (*dm_io_hints_fn) (struct dm_target *ti,
    135				struct queue_limits *limits);
    136
    137/*
    138 * Returns:
    139 *    0: The target can handle the next I/O immediately.
    140 *    1: The target can't handle the next I/O immediately.
    141 */
    142typedef int (*dm_busy_fn) (struct dm_target *ti);
    143
    144/*
    145 * Returns:
    146 *  < 0 : error
    147 * >= 0 : the number of bytes accessible at the address
    148 */
    149typedef long (*dm_dax_direct_access_fn) (struct dm_target *ti, pgoff_t pgoff,
    150		long nr_pages, enum dax_access_mode node, void **kaddr,
    151		pfn_t *pfn);
    152typedef int (*dm_dax_zero_page_range_fn)(struct dm_target *ti, pgoff_t pgoff,
    153		size_t nr_pages);
    154
    155/*
    156 * Returns:
    157 * != 0 : number of bytes transferred
    158 * 0    : recovery write failed
    159 */
    160typedef size_t (*dm_dax_recovery_write_fn)(struct dm_target *ti, pgoff_t pgoff,
    161		void *addr, size_t bytes, struct iov_iter *i);
    162
    163void dm_error(const char *message);
    164
    165struct dm_dev {
    166	struct block_device *bdev;
    167	struct dax_device *dax_dev;
    168	fmode_t mode;
    169	char name[16];
    170};
    171
    172dev_t dm_get_dev_t(const char *path);
    173
    174/*
    175 * Constructors should call these functions to ensure destination devices
    176 * are opened/closed correctly.
    177 */
    178int dm_get_device(struct dm_target *ti, const char *path, fmode_t mode,
    179		  struct dm_dev **result);
    180void dm_put_device(struct dm_target *ti, struct dm_dev *d);
    181
    182/*
    183 * Information about a target type
    184 */
    185
    186struct target_type {
    187	uint64_t features;
    188	const char *name;
    189	struct module *module;
    190	unsigned version[3];
    191	dm_ctr_fn ctr;
    192	dm_dtr_fn dtr;
    193	dm_map_fn map;
    194	dm_clone_and_map_request_fn clone_and_map_rq;
    195	dm_release_clone_request_fn release_clone_rq;
    196	dm_endio_fn end_io;
    197	dm_request_endio_fn rq_end_io;
    198	dm_presuspend_fn presuspend;
    199	dm_presuspend_undo_fn presuspend_undo;
    200	dm_postsuspend_fn postsuspend;
    201	dm_preresume_fn preresume;
    202	dm_resume_fn resume;
    203	dm_status_fn status;
    204	dm_message_fn message;
    205	dm_prepare_ioctl_fn prepare_ioctl;
    206	dm_report_zones_fn report_zones;
    207	dm_busy_fn busy;
    208	dm_iterate_devices_fn iterate_devices;
    209	dm_io_hints_fn io_hints;
    210	dm_dax_direct_access_fn direct_access;
    211	dm_dax_zero_page_range_fn dax_zero_page_range;
    212	dm_dax_recovery_write_fn dax_recovery_write;
    213
    214	/* For internal device-mapper use. */
    215	struct list_head list;
    216};
    217
    218/*
    219 * Target features
    220 */
    221
    222/*
    223 * Any table that contains an instance of this target must have only one.
    224 */
    225#define DM_TARGET_SINGLETON		0x00000001
    226#define dm_target_needs_singleton(type)	((type)->features & DM_TARGET_SINGLETON)
    227
    228/*
    229 * Indicates that a target does not support read-only devices.
    230 */
    231#define DM_TARGET_ALWAYS_WRITEABLE	0x00000002
    232#define dm_target_always_writeable(type) \
    233		((type)->features & DM_TARGET_ALWAYS_WRITEABLE)
    234
    235/*
    236 * Any device that contains a table with an instance of this target may never
    237 * have tables containing any different target type.
    238 */
    239#define DM_TARGET_IMMUTABLE		0x00000004
    240#define dm_target_is_immutable(type)	((type)->features & DM_TARGET_IMMUTABLE)
    241
    242/*
    243 * Indicates that a target may replace any target; even immutable targets.
    244 * .map, .map_rq, .clone_and_map_rq and .release_clone_rq are all defined.
    245 */
    246#define DM_TARGET_WILDCARD		0x00000008
    247#define dm_target_is_wildcard(type)	((type)->features & DM_TARGET_WILDCARD)
    248
    249/*
    250 * A target implements own bio data integrity.
    251 */
    252#define DM_TARGET_INTEGRITY		0x00000010
    253#define dm_target_has_integrity(type)	((type)->features & DM_TARGET_INTEGRITY)
    254
    255/*
    256 * A target passes integrity data to the lower device.
    257 */
    258#define DM_TARGET_PASSES_INTEGRITY	0x00000020
    259#define dm_target_passes_integrity(type) ((type)->features & DM_TARGET_PASSES_INTEGRITY)
    260
    261/*
    262 * Indicates support for zoned block devices:
    263 * - DM_TARGET_ZONED_HM: the target also supports host-managed zoned
    264 *   block devices but does not support combining different zoned models.
    265 * - DM_TARGET_MIXED_ZONED_MODEL: the target supports combining multiple
    266 *   devices with different zoned models.
    267 */
    268#ifdef CONFIG_BLK_DEV_ZONED
    269#define DM_TARGET_ZONED_HM		0x00000040
    270#define dm_target_supports_zoned_hm(type) ((type)->features & DM_TARGET_ZONED_HM)
    271#else
    272#define DM_TARGET_ZONED_HM		0x00000000
    273#define dm_target_supports_zoned_hm(type) (false)
    274#endif
    275
    276/*
    277 * A target handles REQ_NOWAIT
    278 */
    279#define DM_TARGET_NOWAIT		0x00000080
    280#define dm_target_supports_nowait(type) ((type)->features & DM_TARGET_NOWAIT)
    281
    282/*
    283 * A target supports passing through inline crypto support.
    284 */
    285#define DM_TARGET_PASSES_CRYPTO		0x00000100
    286#define dm_target_passes_crypto(type) ((type)->features & DM_TARGET_PASSES_CRYPTO)
    287
    288#ifdef CONFIG_BLK_DEV_ZONED
    289#define DM_TARGET_MIXED_ZONED_MODEL	0x00000200
    290#define dm_target_supports_mixed_zoned_model(type) \
    291	((type)->features & DM_TARGET_MIXED_ZONED_MODEL)
    292#else
    293#define DM_TARGET_MIXED_ZONED_MODEL	0x00000000
    294#define dm_target_supports_mixed_zoned_model(type) (false)
    295#endif
    296
    297struct dm_target {
    298	struct dm_table *table;
    299	struct target_type *type;
    300
    301	/* target limits */
    302	sector_t begin;
    303	sector_t len;
    304
    305	/* If non-zero, maximum size of I/O submitted to a target. */
    306	uint32_t max_io_len;
    307
    308	/*
    309	 * A number of zero-length barrier bios that will be submitted
    310	 * to the target for the purpose of flushing cache.
    311	 *
    312	 * The bio number can be accessed with dm_bio_get_target_bio_nr.
    313	 * It is a responsibility of the target driver to remap these bios
    314	 * to the real underlying devices.
    315	 */
    316	unsigned num_flush_bios;
    317
    318	/*
    319	 * The number of discard bios that will be submitted to the target.
    320	 * The bio number can be accessed with dm_bio_get_target_bio_nr.
    321	 */
    322	unsigned num_discard_bios;
    323
    324	/*
    325	 * The number of secure erase bios that will be submitted to the target.
    326	 * The bio number can be accessed with dm_bio_get_target_bio_nr.
    327	 */
    328	unsigned num_secure_erase_bios;
    329
    330	/*
    331	 * The number of WRITE ZEROES bios that will be submitted to the target.
    332	 * The bio number can be accessed with dm_bio_get_target_bio_nr.
    333	 */
    334	unsigned num_write_zeroes_bios;
    335
    336	/*
    337	 * The minimum number of extra bytes allocated in each io for the
    338	 * target to use.
    339	 */
    340	unsigned per_io_data_size;
    341
    342	/* target specific data */
    343	void *private;
    344
    345	/* Used to provide an error string from the ctr */
    346	char *error;
    347
    348	/*
    349	 * Set if this target needs to receive flushes regardless of
    350	 * whether or not its underlying devices have support.
    351	 */
    352	bool flush_supported:1;
    353
    354	/*
    355	 * Set if this target needs to receive discards regardless of
    356	 * whether or not its underlying devices have support.
    357	 */
    358	bool discards_supported:1;
    359
    360	/*
    361	 * Set if we need to limit the number of in-flight bios when swapping.
    362	 */
    363	bool limit_swap_bios:1;
    364
    365	/*
    366	 * Set if this target implements a zoned device and needs emulation of
    367	 * zone append operations using regular writes.
    368	 */
    369	bool emulate_zone_append:1;
    370
    371	/*
    372	 * Set if the target will submit IO using dm_submit_bio_remap()
    373	 * after returning DM_MAPIO_SUBMITTED from its map function.
    374	 */
    375	bool accounts_remapped_io:1;
    376};
    377
    378void *dm_per_bio_data(struct bio *bio, size_t data_size);
    379struct bio *dm_bio_from_per_bio_data(void *data, size_t data_size);
    380unsigned dm_bio_get_target_bio_nr(const struct bio *bio);
    381
    382u64 dm_start_time_ns_from_clone(struct bio *bio);
    383
    384int dm_register_target(struct target_type *t);
    385void dm_unregister_target(struct target_type *t);
    386
    387/*
    388 * Target argument parsing.
    389 */
    390struct dm_arg_set {
    391	unsigned argc;
    392	char **argv;
    393};
    394
    395/*
    396 * The minimum and maximum value of a numeric argument, together with
    397 * the error message to use if the number is found to be outside that range.
    398 */
    399struct dm_arg {
    400	unsigned min;
    401	unsigned max;
    402	char *error;
    403};
    404
    405/*
    406 * Validate the next argument, either returning it as *value or, if invalid,
    407 * returning -EINVAL and setting *error.
    408 */
    409int dm_read_arg(const struct dm_arg *arg, struct dm_arg_set *arg_set,
    410		unsigned *value, char **error);
    411
    412/*
    413 * Process the next argument as the start of a group containing between
    414 * arg->min and arg->max further arguments. Either return the size as
    415 * *num_args or, if invalid, return -EINVAL and set *error.
    416 */
    417int dm_read_arg_group(const struct dm_arg *arg, struct dm_arg_set *arg_set,
    418		      unsigned *num_args, char **error);
    419
    420/*
    421 * Return the current argument and shift to the next.
    422 */
    423const char *dm_shift_arg(struct dm_arg_set *as);
    424
    425/*
    426 * Move through num_args arguments.
    427 */
    428void dm_consume_args(struct dm_arg_set *as, unsigned num_args);
    429
    430/*-----------------------------------------------------------------
    431 * Functions for creating and manipulating mapped devices.
    432 * Drop the reference with dm_put when you finish with the object.
    433 *---------------------------------------------------------------*/
    434
    435/*
    436 * DM_ANY_MINOR chooses the next available minor number.
    437 */
    438#define DM_ANY_MINOR (-1)
    439int dm_create(int minor, struct mapped_device **md);
    440
    441/*
    442 * Reference counting for md.
    443 */
    444struct mapped_device *dm_get_md(dev_t dev);
    445void dm_get(struct mapped_device *md);
    446int dm_hold(struct mapped_device *md);
    447void dm_put(struct mapped_device *md);
    448
    449/*
    450 * An arbitrary pointer may be stored alongside a mapped device.
    451 */
    452void dm_set_mdptr(struct mapped_device *md, void *ptr);
    453void *dm_get_mdptr(struct mapped_device *md);
    454
    455/*
    456 * A device can still be used while suspended, but I/O is deferred.
    457 */
    458int dm_suspend(struct mapped_device *md, unsigned suspend_flags);
    459int dm_resume(struct mapped_device *md);
    460
    461/*
    462 * Event functions.
    463 */
    464uint32_t dm_get_event_nr(struct mapped_device *md);
    465int dm_wait_event(struct mapped_device *md, int event_nr);
    466uint32_t dm_next_uevent_seq(struct mapped_device *md);
    467void dm_uevent_add(struct mapped_device *md, struct list_head *elist);
    468
    469/*
    470 * Info functions.
    471 */
    472const char *dm_device_name(struct mapped_device *md);
    473int dm_copy_name_and_uuid(struct mapped_device *md, char *name, char *uuid);
    474struct gendisk *dm_disk(struct mapped_device *md);
    475int dm_suspended(struct dm_target *ti);
    476int dm_post_suspending(struct dm_target *ti);
    477int dm_noflush_suspending(struct dm_target *ti);
    478void dm_accept_partial_bio(struct bio *bio, unsigned n_sectors);
    479void dm_submit_bio_remap(struct bio *clone, struct bio *tgt_clone);
    480union map_info *dm_get_rq_mapinfo(struct request *rq);
    481
    482#ifdef CONFIG_BLK_DEV_ZONED
    483struct dm_report_zones_args {
    484	struct dm_target *tgt;
    485	sector_t next_sector;
    486
    487	void *orig_data;
    488	report_zones_cb orig_cb;
    489	unsigned int zone_idx;
    490
    491	/* must be filled by ->report_zones before calling dm_report_zones_cb */
    492	sector_t start;
    493};
    494int dm_report_zones(struct block_device *bdev, sector_t start, sector_t sector,
    495		    struct dm_report_zones_args *args, unsigned int nr_zones);
    496#endif /* CONFIG_BLK_DEV_ZONED */
    497
    498/*
    499 * Device mapper functions to parse and create devices specified by the
    500 * parameter "dm-mod.create="
    501 */
    502int __init dm_early_create(struct dm_ioctl *dmi,
    503			   struct dm_target_spec **spec_array,
    504			   char **target_params_array);
    505
    506struct queue_limits *dm_get_queue_limits(struct mapped_device *md);
    507
    508/*
    509 * Geometry functions.
    510 */
    511int dm_get_geometry(struct mapped_device *md, struct hd_geometry *geo);
    512int dm_set_geometry(struct mapped_device *md, struct hd_geometry *geo);
    513
    514/*-----------------------------------------------------------------
    515 * Functions for manipulating device-mapper tables.
    516 *---------------------------------------------------------------*/
    517
    518/*
    519 * First create an empty table.
    520 */
    521int dm_table_create(struct dm_table **result, fmode_t mode,
    522		    unsigned num_targets, struct mapped_device *md);
    523
    524/*
    525 * Then call this once for each target.
    526 */
    527int dm_table_add_target(struct dm_table *t, const char *type,
    528			sector_t start, sector_t len, char *params);
    529
    530/*
    531 * Target can use this to set the table's type.
    532 * Can only ever be called from a target's ctr.
    533 * Useful for "hybrid" target (supports both bio-based
    534 * and request-based).
    535 */
    536void dm_table_set_type(struct dm_table *t, enum dm_queue_mode type);
    537
    538/*
    539 * Finally call this to make the table ready for use.
    540 */
    541int dm_table_complete(struct dm_table *t);
    542
    543/*
    544 * Destroy the table when finished.
    545 */
    546void dm_table_destroy(struct dm_table *t);
    547
    548/*
    549 * Target may require that it is never sent I/O larger than len.
    550 */
    551int __must_check dm_set_target_max_io_len(struct dm_target *ti, sector_t len);
    552
    553/*
    554 * Table reference counting.
    555 */
    556struct dm_table *dm_get_live_table(struct mapped_device *md, int *srcu_idx);
    557void dm_put_live_table(struct mapped_device *md, int srcu_idx);
    558void dm_sync_table(struct mapped_device *md);
    559
    560/*
    561 * Queries
    562 */
    563sector_t dm_table_get_size(struct dm_table *t);
    564unsigned int dm_table_get_num_targets(struct dm_table *t);
    565fmode_t dm_table_get_mode(struct dm_table *t);
    566struct mapped_device *dm_table_get_md(struct dm_table *t);
    567const char *dm_table_device_name(struct dm_table *t);
    568
    569/*
    570 * Trigger an event.
    571 */
    572void dm_table_event(struct dm_table *t);
    573
    574/*
    575 * Run the queue for request-based targets.
    576 */
    577void dm_table_run_md_queue_async(struct dm_table *t);
    578
    579/*
    580 * The device must be suspended before calling this method.
    581 * Returns the previous table, which the caller must destroy.
    582 */
    583struct dm_table *dm_swap_table(struct mapped_device *md,
    584			       struct dm_table *t);
    585
    586/*
    587 * Table blk_crypto_profile functions
    588 */
    589void dm_destroy_crypto_profile(struct blk_crypto_profile *profile);
    590
    591/*-----------------------------------------------------------------
    592 * Macros.
    593 *---------------------------------------------------------------*/
    594#define DM_NAME "device-mapper"
    595
    596#define DM_FMT(fmt) DM_NAME ": " DM_MSG_PREFIX ": " fmt "\n"
    597
    598#define DMCRIT(fmt, ...) pr_crit(DM_FMT(fmt), ##__VA_ARGS__)
    599
    600#define DMERR(fmt, ...) pr_err(DM_FMT(fmt), ##__VA_ARGS__)
    601#define DMERR_LIMIT(fmt, ...) pr_err_ratelimited(DM_FMT(fmt), ##__VA_ARGS__)
    602#define DMWARN(fmt, ...) pr_warn(DM_FMT(fmt), ##__VA_ARGS__)
    603#define DMWARN_LIMIT(fmt, ...) pr_warn_ratelimited(DM_FMT(fmt), ##__VA_ARGS__)
    604#define DMINFO(fmt, ...) pr_info(DM_FMT(fmt), ##__VA_ARGS__)
    605#define DMINFO_LIMIT(fmt, ...) pr_info_ratelimited(DM_FMT(fmt), ##__VA_ARGS__)
    606
    607#define DMDEBUG(fmt, ...) pr_debug(DM_FMT(fmt), ##__VA_ARGS__)
    608#define DMDEBUG_LIMIT(fmt, ...) pr_debug_ratelimited(DM_FMT(fmt), ##__VA_ARGS__)
    609
    610#define DMEMIT(x...) sz += ((sz >= maxlen) ? \
    611			  0 : scnprintf(result + sz, maxlen - sz, x))
    612
    613#define DMEMIT_TARGET_NAME_VERSION(y) \
    614		DMEMIT("target_name=%s,target_version=%u.%u.%u", \
    615		       (y)->name, (y)->version[0], (y)->version[1], (y)->version[2])
    616
    617/*
    618 * Definitions of return values from target end_io function.
    619 */
    620#define DM_ENDIO_DONE		0
    621#define DM_ENDIO_INCOMPLETE	1
    622#define DM_ENDIO_REQUEUE	2
    623#define DM_ENDIO_DELAY_REQUEUE	3
    624
    625/*
    626 * Definitions of return values from target map function.
    627 */
    628#define DM_MAPIO_SUBMITTED	0
    629#define DM_MAPIO_REMAPPED	1
    630#define DM_MAPIO_REQUEUE	DM_ENDIO_REQUEUE
    631#define DM_MAPIO_DELAY_REQUEUE	DM_ENDIO_DELAY_REQUEUE
    632#define DM_MAPIO_KILL		4
    633
    634#define dm_sector_div64(x, y)( \
    635{ \
    636	u64 _res; \
    637	(x) = div64_u64_rem(x, y, &_res); \
    638	_res; \
    639} \
    640)
    641
    642/*
    643 * Ceiling(n / sz)
    644 */
    645#define dm_div_up(n, sz) (((n) + (sz) - 1) / (sz))
    646
    647#define dm_sector_div_up(n, sz) ( \
    648{ \
    649	sector_t _r = ((n) + (sz) - 1); \
    650	sector_div(_r, (sz)); \
    651	_r; \
    652} \
    653)
    654
    655/*
    656 * ceiling(n / size) * size
    657 */
    658#define dm_round_up(n, sz) (dm_div_up((n), (sz)) * (sz))
    659
    660/*
    661 * Sector offset taken relative to the start of the target instead of
    662 * relative to the start of the device.
    663 */
    664#define dm_target_offset(ti, sector) ((sector) - (ti)->begin)
    665
    666static inline sector_t to_sector(unsigned long long n)
    667{
    668	return (n >> SECTOR_SHIFT);
    669}
    670
    671static inline unsigned long to_bytes(sector_t n)
    672{
    673	return (n << SECTOR_SHIFT);
    674}
    675
    676#endif	/* _LINUX_DEVICE_MAPPER_H */