cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

libata-pata-timings.c (5968B)


      1// SPDX-License-Identifier: GPL-2.0-or-later
      2/*
      3 *  Helper library for PATA timings
      4 *
      5 *  Copyright 2003-2004 Red Hat, Inc.  All rights reserved.
      6 *  Copyright 2003-2004 Jeff Garzik
      7 */
      8
      9#include <linux/kernel.h>
     10#include <linux/module.h>
     11#include <linux/libata.h>
     12
     13/*
     14 * This mode timing computation functionality is ported over from
     15 * drivers/ide/ide-timing.h and was originally written by Vojtech Pavlik
     16 */
     17/*
     18 * PIO 0-4, MWDMA 0-2 and UDMA 0-6 timings (in nanoseconds).
     19 * These were taken from ATA/ATAPI-6 standard, rev 0a, except
     20 * for UDMA6, which is currently supported only by Maxtor drives.
     21 *
     22 * For PIO 5/6 MWDMA 3/4 see the CFA specification 3.0.
     23 */
     24
     25static const struct ata_timing ata_timing[] = {
     26/*	{ XFER_PIO_SLOW, 120, 290, 240, 960, 290, 240, 0,  960,   0 }, */
     27	{ XFER_PIO_0,     70, 290, 240, 600, 165, 150, 0,  600,   0 },
     28	{ XFER_PIO_1,     50, 290,  93, 383, 125, 100, 0,  383,   0 },
     29	{ XFER_PIO_2,     30, 290,  40, 330, 100,  90, 0,  240,   0 },
     30	{ XFER_PIO_3,     30,  80,  70, 180,  80,  70, 0,  180,   0 },
     31	{ XFER_PIO_4,     25,  70,  25, 120,  70,  25, 0,  120,   0 },
     32	{ XFER_PIO_5,     15,  65,  25, 100,  65,  25, 0,  100,   0 },
     33	{ XFER_PIO_6,     10,  55,  20,  80,  55,  20, 0,   80,   0 },
     34
     35	{ XFER_SW_DMA_0, 120,   0,   0,   0, 480, 480, 50, 960,   0 },
     36	{ XFER_SW_DMA_1,  90,   0,   0,   0, 240, 240, 30, 480,   0 },
     37	{ XFER_SW_DMA_2,  60,   0,   0,   0, 120, 120, 20, 240,   0 },
     38
     39	{ XFER_MW_DMA_0,  60,   0,   0,   0, 215, 215, 20, 480,   0 },
     40	{ XFER_MW_DMA_1,  45,   0,   0,   0,  80,  50, 5,  150,   0 },
     41	{ XFER_MW_DMA_2,  25,   0,   0,   0,  70,  25, 5,  120,   0 },
     42	{ XFER_MW_DMA_3,  25,   0,   0,   0,  65,  25, 5,  100,   0 },
     43	{ XFER_MW_DMA_4,  25,   0,   0,   0,  55,  20, 5,   80,   0 },
     44
     45/*	{ XFER_UDMA_SLOW,  0,   0,   0,   0,   0,   0, 0,    0, 150 }, */
     46	{ XFER_UDMA_0,     0,   0,   0,   0,   0,   0, 0,    0, 120 },
     47	{ XFER_UDMA_1,     0,   0,   0,   0,   0,   0, 0,    0,  80 },
     48	{ XFER_UDMA_2,     0,   0,   0,   0,   0,   0, 0,    0,  60 },
     49	{ XFER_UDMA_3,     0,   0,   0,   0,   0,   0, 0,    0,  45 },
     50	{ XFER_UDMA_4,     0,   0,   0,   0,   0,   0, 0,    0,  30 },
     51	{ XFER_UDMA_5,     0,   0,   0,   0,   0,   0, 0,    0,  20 },
     52	{ XFER_UDMA_6,     0,   0,   0,   0,   0,   0, 0,    0,  15 },
     53
     54	{ 0xFF }
     55};
     56
     57#define ENOUGH(v, unit)		(((v)-1)/(unit)+1)
     58#define EZ(v, unit)		((v)?ENOUGH(((v) * 1000), unit):0)
     59
     60static void ata_timing_quantize(const struct ata_timing *t,
     61				struct ata_timing *q, int T, int UT)
     62{
     63	q->setup	= EZ(t->setup,       T);
     64	q->act8b	= EZ(t->act8b,       T);
     65	q->rec8b	= EZ(t->rec8b,       T);
     66	q->cyc8b	= EZ(t->cyc8b,       T);
     67	q->active	= EZ(t->active,      T);
     68	q->recover	= EZ(t->recover,     T);
     69	q->dmack_hold	= EZ(t->dmack_hold,  T);
     70	q->cycle	= EZ(t->cycle,       T);
     71	q->udma		= EZ(t->udma,       UT);
     72}
     73
     74void ata_timing_merge(const struct ata_timing *a, const struct ata_timing *b,
     75		      struct ata_timing *m, unsigned int what)
     76{
     77	if (what & ATA_TIMING_SETUP)
     78		m->setup = max(a->setup, b->setup);
     79	if (what & ATA_TIMING_ACT8B)
     80		m->act8b = max(a->act8b, b->act8b);
     81	if (what & ATA_TIMING_REC8B)
     82		m->rec8b = max(a->rec8b, b->rec8b);
     83	if (what & ATA_TIMING_CYC8B)
     84		m->cyc8b = max(a->cyc8b, b->cyc8b);
     85	if (what & ATA_TIMING_ACTIVE)
     86		m->active = max(a->active, b->active);
     87	if (what & ATA_TIMING_RECOVER)
     88		m->recover = max(a->recover, b->recover);
     89	if (what & ATA_TIMING_DMACK_HOLD)
     90		m->dmack_hold = max(a->dmack_hold, b->dmack_hold);
     91	if (what & ATA_TIMING_CYCLE)
     92		m->cycle = max(a->cycle, b->cycle);
     93	if (what & ATA_TIMING_UDMA)
     94		m->udma = max(a->udma, b->udma);
     95}
     96EXPORT_SYMBOL_GPL(ata_timing_merge);
     97
     98const struct ata_timing *ata_timing_find_mode(u8 xfer_mode)
     99{
    100	const struct ata_timing *t = ata_timing;
    101
    102	while (xfer_mode > t->mode)
    103		t++;
    104
    105	if (xfer_mode == t->mode)
    106		return t;
    107
    108	WARN_ONCE(true, "%s: unable to find timing for xfer_mode 0x%x\n",
    109			__func__, xfer_mode);
    110
    111	return NULL;
    112}
    113EXPORT_SYMBOL_GPL(ata_timing_find_mode);
    114
    115int ata_timing_compute(struct ata_device *adev, unsigned short speed,
    116		       struct ata_timing *t, int T, int UT)
    117{
    118	const u16 *id = adev->id;
    119	const struct ata_timing *s;
    120	struct ata_timing p;
    121
    122	/*
    123	 * Find the mode.
    124	 */
    125	s = ata_timing_find_mode(speed);
    126	if (!s)
    127		return -EINVAL;
    128
    129	memcpy(t, s, sizeof(*s));
    130
    131	/*
    132	 * If the drive is an EIDE drive, it can tell us it needs extended
    133	 * PIO/MW_DMA cycle timing.
    134	 */
    135
    136	if (id[ATA_ID_FIELD_VALID] & 2) {	/* EIDE drive */
    137		memset(&p, 0, sizeof(p));
    138
    139		if (speed >= XFER_PIO_0 && speed < XFER_SW_DMA_0) {
    140			if (speed <= XFER_PIO_2)
    141				p.cycle = p.cyc8b = id[ATA_ID_EIDE_PIO];
    142			else if ((speed <= XFER_PIO_4) ||
    143				 (speed == XFER_PIO_5 && !ata_id_is_cfa(id)))
    144				p.cycle = p.cyc8b = id[ATA_ID_EIDE_PIO_IORDY];
    145		} else if (speed >= XFER_MW_DMA_0 && speed <= XFER_MW_DMA_2)
    146			p.cycle = id[ATA_ID_EIDE_DMA_MIN];
    147
    148		ata_timing_merge(&p, t, t, ATA_TIMING_CYCLE | ATA_TIMING_CYC8B);
    149	}
    150
    151	/*
    152	 * Convert the timing to bus clock counts.
    153	 */
    154
    155	ata_timing_quantize(t, t, T, UT);
    156
    157	/*
    158	 * Even in DMA/UDMA modes we still use PIO access for IDENTIFY,
    159	 * S.M.A.R.T * and some other commands. We have to ensure that the
    160	 * DMA cycle timing is slower/equal than the fastest PIO timing.
    161	 */
    162
    163	if (speed > XFER_PIO_6) {
    164		ata_timing_compute(adev, adev->pio_mode, &p, T, UT);
    165		ata_timing_merge(&p, t, t, ATA_TIMING_ALL);
    166	}
    167
    168	/*
    169	 * Lengthen active & recovery time so that cycle time is correct.
    170	 */
    171
    172	if (t->act8b + t->rec8b < t->cyc8b) {
    173		t->act8b += (t->cyc8b - (t->act8b + t->rec8b)) / 2;
    174		t->rec8b = t->cyc8b - t->act8b;
    175	}
    176
    177	if (t->active + t->recover < t->cycle) {
    178		t->active += (t->cycle - (t->active + t->recover)) / 2;
    179		t->recover = t->cycle - t->active;
    180	}
    181
    182	/*
    183	 * In a few cases quantisation may produce enough errors to
    184	 * leave t->cycle too low for the sum of active and recovery
    185	 * if so we must correct this.
    186	 */
    187	if (t->active + t->recover > t->cycle)
    188		t->cycle = t->active + t->recover;
    189
    190	return 0;
    191}
    192EXPORT_SYMBOL_GPL(ata_timing_compute);