cachepc-qemu

Fork of AMDESE/qemu with changes for cachepc side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-qemu
Log | Files | Refs | Submodules | LICENSE | sfeed.txt

etraxfs_dma.c (22761B)


      1/*
      2 * QEMU ETRAX DMA Controller.
      3 *
      4 * Copyright (c) 2008 Edgar E. Iglesias, Axis Communications AB.
      5 *
      6 * Permission is hereby granted, free of charge, to any person obtaining a copy
      7 * of this software and associated documentation files (the "Software"), to deal
      8 * in the Software without restriction, including without limitation the rights
      9 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
     10 * copies of the Software, and to permit persons to whom the Software is
     11 * furnished to do so, subject to the following conditions:
     12 *
     13 * The above copyright notice and this permission notice shall be included in
     14 * all copies or substantial portions of the Software.
     15 *
     16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
     17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
     18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
     19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
     20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
     21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
     22 * THE SOFTWARE.
     23 */
     24
     25#include "qemu/osdep.h"
     26#include "hw/hw.h"
     27#include "hw/irq.h"
     28#include "qemu/main-loop.h"
     29#include "sysemu/runstate.h"
     30#include "exec/address-spaces.h"
     31
     32#include "hw/cris/etraxfs_dma.h"
     33
     34#define D(x)
     35
     36#define RW_DATA           (0x0 / 4)
     37#define RW_SAVED_DATA     (0x58 / 4)
     38#define RW_SAVED_DATA_BUF (0x5c / 4)
     39#define RW_GROUP          (0x60 / 4)
     40#define RW_GROUP_DOWN     (0x7c / 4)
     41#define RW_CMD            (0x80 / 4)
     42#define RW_CFG            (0x84 / 4)
     43#define RW_STAT           (0x88 / 4)
     44#define RW_INTR_MASK      (0x8c / 4)
     45#define RW_ACK_INTR       (0x90 / 4)
     46#define R_INTR            (0x94 / 4)
     47#define R_MASKED_INTR     (0x98 / 4)
     48#define RW_STREAM_CMD     (0x9c / 4)
     49
     50#define DMA_REG_MAX       (0x100 / 4)
     51
     52/* descriptors */
     53
     54// ------------------------------------------------------------ dma_descr_group
     55typedef struct dma_descr_group {
     56  uint32_t                      next;
     57  unsigned                      eol        : 1;
     58  unsigned                      tol        : 1;
     59  unsigned                      bol        : 1;
     60  unsigned                                 : 1;
     61  unsigned                      intr       : 1;
     62  unsigned                                 : 2;
     63  unsigned                      en         : 1;
     64  unsigned                                 : 7;
     65  unsigned                      dis        : 1;
     66  unsigned                      md         : 16;
     67  struct dma_descr_group       *up;
     68  union {
     69    struct dma_descr_context   *context;
     70    struct dma_descr_group     *group;
     71  }                             down;
     72} dma_descr_group;
     73
     74// ---------------------------------------------------------- dma_descr_context
     75typedef struct dma_descr_context {
     76  uint32_t                      next;
     77  unsigned                      eol        : 1;
     78  unsigned                                 : 3;
     79  unsigned                      intr       : 1;
     80  unsigned                                 : 1;
     81  unsigned                      store_mode : 1;
     82  unsigned                      en         : 1;
     83  unsigned                                 : 7;
     84  unsigned                      dis        : 1;
     85  unsigned                      md0        : 16;
     86  unsigned                      md1;
     87  unsigned                      md2;
     88  unsigned                      md3;
     89  unsigned                      md4;
     90  uint32_t                      saved_data;
     91  uint32_t                      saved_data_buf;
     92} dma_descr_context;
     93
     94// ------------------------------------------------------------- dma_descr_data
     95typedef struct dma_descr_data {
     96  uint32_t                      next;
     97  uint32_t                      buf;
     98  unsigned                      eol        : 1;
     99  unsigned                                 : 2;
    100  unsigned                      out_eop    : 1;
    101  unsigned                      intr       : 1;
    102  unsigned                      wait       : 1;
    103  unsigned                                 : 2;
    104  unsigned                                 : 3;
    105  unsigned                      in_eop     : 1;
    106  unsigned                                 : 4;
    107  unsigned                      md         : 16;
    108  uint32_t                      after;
    109} dma_descr_data;
    110
    111/* Constants */
    112enum {
    113  regk_dma_ack_pkt                         = 0x00000100,
    114  regk_dma_anytime                         = 0x00000001,
    115  regk_dma_array                           = 0x00000008,
    116  regk_dma_burst                           = 0x00000020,
    117  regk_dma_client                          = 0x00000002,
    118  regk_dma_copy_next                       = 0x00000010,
    119  regk_dma_copy_up                         = 0x00000020,
    120  regk_dma_data_at_eol                     = 0x00000001,
    121  regk_dma_dis_c                           = 0x00000010,
    122  regk_dma_dis_g                           = 0x00000020,
    123  regk_dma_idle                            = 0x00000001,
    124  regk_dma_intern                          = 0x00000004,
    125  regk_dma_load_c                          = 0x00000200,
    126  regk_dma_load_c_n                        = 0x00000280,
    127  regk_dma_load_c_next                     = 0x00000240,
    128  regk_dma_load_d                          = 0x00000140,
    129  regk_dma_load_g                          = 0x00000300,
    130  regk_dma_load_g_down                     = 0x000003c0,
    131  regk_dma_load_g_next                     = 0x00000340,
    132  regk_dma_load_g_up                       = 0x00000380,
    133  regk_dma_next_en                         = 0x00000010,
    134  regk_dma_next_pkt                        = 0x00000010,
    135  regk_dma_no                              = 0x00000000,
    136  regk_dma_only_at_wait                    = 0x00000000,
    137  regk_dma_restore                         = 0x00000020,
    138  regk_dma_rst                             = 0x00000001,
    139  regk_dma_running                         = 0x00000004,
    140  regk_dma_rw_cfg_default                  = 0x00000000,
    141  regk_dma_rw_cmd_default                  = 0x00000000,
    142  regk_dma_rw_intr_mask_default            = 0x00000000,
    143  regk_dma_rw_stat_default                 = 0x00000101,
    144  regk_dma_rw_stream_cmd_default           = 0x00000000,
    145  regk_dma_save_down                       = 0x00000020,
    146  regk_dma_save_up                         = 0x00000020,
    147  regk_dma_set_reg                         = 0x00000050,
    148  regk_dma_set_w_size1                     = 0x00000190,
    149  regk_dma_set_w_size2                     = 0x000001a0,
    150  regk_dma_set_w_size4                     = 0x000001c0,
    151  regk_dma_stopped                         = 0x00000002,
    152  regk_dma_store_c                         = 0x00000002,
    153  regk_dma_store_descr                     = 0x00000000,
    154  regk_dma_store_g                         = 0x00000004,
    155  regk_dma_store_md                        = 0x00000001,
    156  regk_dma_sw                              = 0x00000008,
    157  regk_dma_update_down                     = 0x00000020,
    158  regk_dma_yes                             = 0x00000001
    159};
    160
    161enum dma_ch_state
    162{
    163	RST = 1,
    164	STOPPED = 2,
    165	RUNNING = 4
    166};
    167
    168struct fs_dma_channel
    169{
    170	qemu_irq irq;
    171	struct etraxfs_dma_client *client;
    172
    173	/* Internal status.  */
    174	int stream_cmd_src;
    175	enum dma_ch_state state;
    176
    177	unsigned int input : 1;
    178	unsigned int eol : 1;
    179
    180	struct dma_descr_group current_g;
    181	struct dma_descr_context current_c;
    182	struct dma_descr_data current_d;
    183
    184	/* Control registers.  */
    185	uint32_t regs[DMA_REG_MAX];
    186};
    187
    188struct fs_dma_ctrl
    189{
    190	MemoryRegion mmio;
    191	int nr_channels;
    192	struct fs_dma_channel *channels;
    193
    194        QEMUBH *bh;
    195};
    196
    197static void DMA_run(void *opaque);
    198static int channel_out_run(struct fs_dma_ctrl *ctrl, int c);
    199
    200static inline uint32_t channel_reg(struct fs_dma_ctrl *ctrl, int c, int reg)
    201{
    202	return ctrl->channels[c].regs[reg];
    203}
    204
    205static inline int channel_stopped(struct fs_dma_ctrl *ctrl, int c)
    206{
    207	return channel_reg(ctrl, c, RW_CFG) & 2;
    208}
    209
    210static inline int channel_en(struct fs_dma_ctrl *ctrl, int c)
    211{
    212	return (channel_reg(ctrl, c, RW_CFG) & 1)
    213		&& ctrl->channels[c].client;
    214}
    215
    216static inline int fs_channel(hwaddr addr)
    217{
    218	/* Every channel has a 0x2000 ctrl register map.  */
    219	return addr >> 13;
    220}
    221
    222#ifdef USE_THIS_DEAD_CODE
    223static void channel_load_g(struct fs_dma_ctrl *ctrl, int c)
    224{
    225	hwaddr addr = channel_reg(ctrl, c, RW_GROUP);
    226
    227	/* Load and decode. FIXME: handle endianness.  */
    228    cpu_physical_memory_read(addr, &ctrl->channels[c].current_g,
    229                             sizeof(ctrl->channels[c].current_g));
    230}
    231
    232static void dump_c(int ch, struct dma_descr_context *c)
    233{
    234	printf("%s ch=%d\n", __func__, ch);
    235	printf("next=%x\n", c->next);
    236	printf("saved_data=%x\n", c->saved_data);
    237	printf("saved_data_buf=%x\n", c->saved_data_buf);
    238	printf("eol=%x\n", (uint32_t) c->eol);
    239}
    240
    241static void dump_d(int ch, struct dma_descr_data *d)
    242{
    243	printf("%s ch=%d\n", __func__, ch);
    244	printf("next=%x\n", d->next);
    245	printf("buf=%x\n", d->buf);
    246	printf("after=%x\n", d->after);
    247	printf("intr=%x\n", (uint32_t) d->intr);
    248	printf("out_eop=%x\n", (uint32_t) d->out_eop);
    249	printf("in_eop=%x\n", (uint32_t) d->in_eop);
    250	printf("eol=%x\n", (uint32_t) d->eol);
    251}
    252#endif
    253
    254static void channel_load_c(struct fs_dma_ctrl *ctrl, int c)
    255{
    256	hwaddr addr = channel_reg(ctrl, c, RW_GROUP_DOWN);
    257
    258	/* Load and decode. FIXME: handle endianness.  */
    259    cpu_physical_memory_read(addr, &ctrl->channels[c].current_c,
    260                             sizeof(ctrl->channels[c].current_c));
    261
    262	D(dump_c(c, &ctrl->channels[c].current_c));
    263	/* I guess this should update the current pos.  */
    264	ctrl->channels[c].regs[RW_SAVED_DATA] =
    265		(uint32_t)(unsigned long)ctrl->channels[c].current_c.saved_data;
    266	ctrl->channels[c].regs[RW_SAVED_DATA_BUF] =
    267		(uint32_t)(unsigned long)ctrl->channels[c].current_c.saved_data_buf;
    268}
    269
    270static void channel_load_d(struct fs_dma_ctrl *ctrl, int c)
    271{
    272	hwaddr addr = channel_reg(ctrl, c, RW_SAVED_DATA);
    273
    274	/* Load and decode. FIXME: handle endianness.  */
    275	D(printf("%s ch=%d addr=" TARGET_FMT_plx "\n", __func__, c, addr));
    276    cpu_physical_memory_read(addr, &ctrl->channels[c].current_d,
    277                             sizeof(ctrl->channels[c].current_d));
    278
    279	D(dump_d(c, &ctrl->channels[c].current_d));
    280	ctrl->channels[c].regs[RW_DATA] = addr;
    281}
    282
    283static void channel_store_c(struct fs_dma_ctrl *ctrl, int c)
    284{
    285	hwaddr addr = channel_reg(ctrl, c, RW_GROUP_DOWN);
    286
    287	/* Encode and store. FIXME: handle endianness.  */
    288	D(printf("%s ch=%d addr=" TARGET_FMT_plx "\n", __func__, c, addr));
    289	D(dump_d(c, &ctrl->channels[c].current_d));
    290    cpu_physical_memory_write(addr, &ctrl->channels[c].current_c,
    291                              sizeof(ctrl->channels[c].current_c));
    292}
    293
    294static void channel_store_d(struct fs_dma_ctrl *ctrl, int c)
    295{
    296	hwaddr addr = channel_reg(ctrl, c, RW_SAVED_DATA);
    297
    298	/* Encode and store. FIXME: handle endianness.  */
    299	D(printf("%s ch=%d addr=" TARGET_FMT_plx "\n", __func__, c, addr));
    300    cpu_physical_memory_write(addr, &ctrl->channels[c].current_d,
    301                              sizeof(ctrl->channels[c].current_d));
    302}
    303
    304static inline void channel_stop(struct fs_dma_ctrl *ctrl, int c)
    305{
    306	/* FIXME:  */
    307}
    308
    309static inline void channel_start(struct fs_dma_ctrl *ctrl, int c)
    310{
    311	if (ctrl->channels[c].client)
    312	{
    313		ctrl->channels[c].eol = 0;
    314		ctrl->channels[c].state = RUNNING;
    315		if (!ctrl->channels[c].input)
    316			channel_out_run(ctrl, c);
    317	} else
    318		printf("WARNING: starting DMA ch %d with no client\n", c);
    319
    320        qemu_bh_schedule_idle(ctrl->bh);
    321}
    322
    323static void channel_continue(struct fs_dma_ctrl *ctrl, int c)
    324{
    325	if (!channel_en(ctrl, c) 
    326	    || channel_stopped(ctrl, c)
    327	    || ctrl->channels[c].state != RUNNING
    328	    /* Only reload the current data descriptor if it has eol set.  */
    329	    || !ctrl->channels[c].current_d.eol) {
    330		D(printf("continue failed ch=%d state=%d stopped=%d en=%d eol=%d\n", 
    331			 c, ctrl->channels[c].state,
    332			 channel_stopped(ctrl, c),
    333			 channel_en(ctrl,c),
    334			 ctrl->channels[c].eol));
    335		D(dump_d(c, &ctrl->channels[c].current_d));
    336		return;
    337	}
    338
    339	/* Reload the current descriptor.  */
    340	channel_load_d(ctrl, c);
    341
    342	/* If the current descriptor cleared the eol flag and we had already
    343	   reached eol state, do the continue.  */
    344	if (!ctrl->channels[c].current_d.eol && ctrl->channels[c].eol) {
    345		D(printf("continue %d ok %x\n", c,
    346			 ctrl->channels[c].current_d.next));
    347		ctrl->channels[c].regs[RW_SAVED_DATA] =
    348			(uint32_t)(unsigned long)ctrl->channels[c].current_d.next;
    349		channel_load_d(ctrl, c);
    350		ctrl->channels[c].regs[RW_SAVED_DATA_BUF] =
    351			(uint32_t)(unsigned long)ctrl->channels[c].current_d.buf;
    352
    353		channel_start(ctrl, c);
    354	}
    355	ctrl->channels[c].regs[RW_SAVED_DATA_BUF] =
    356		(uint32_t)(unsigned long)ctrl->channels[c].current_d.buf;
    357}
    358
    359static void channel_stream_cmd(struct fs_dma_ctrl *ctrl, int c, uint32_t v)
    360{
    361	unsigned int cmd = v & ((1 << 10) - 1);
    362
    363	D(printf("%s ch=%d cmd=%x\n",
    364		 __func__, c, cmd));
    365	if (cmd & regk_dma_load_d) {
    366		channel_load_d(ctrl, c);
    367		if (cmd & regk_dma_burst)
    368			channel_start(ctrl, c);
    369	}
    370
    371	if (cmd & regk_dma_load_c) {
    372		channel_load_c(ctrl, c);
    373	}
    374}
    375
    376static void channel_update_irq(struct fs_dma_ctrl *ctrl, int c)
    377{
    378	D(printf("%s %d\n", __func__, c));
    379        ctrl->channels[c].regs[R_INTR] &=
    380		~(ctrl->channels[c].regs[RW_ACK_INTR]);
    381
    382        ctrl->channels[c].regs[R_MASKED_INTR] =
    383		ctrl->channels[c].regs[R_INTR]
    384		& ctrl->channels[c].regs[RW_INTR_MASK];
    385
    386	D(printf("%s: chan=%d masked_intr=%x\n", __func__, 
    387		 c,
    388		 ctrl->channels[c].regs[R_MASKED_INTR]));
    389
    390        qemu_set_irq(ctrl->channels[c].irq,
    391		     !!ctrl->channels[c].regs[R_MASKED_INTR]);
    392}
    393
    394static int channel_out_run(struct fs_dma_ctrl *ctrl, int c)
    395{
    396	uint32_t len;
    397	uint32_t saved_data_buf;
    398	unsigned char buf[2 * 1024];
    399
    400	struct dma_context_metadata meta;
    401	bool send_context = true;
    402
    403	if (ctrl->channels[c].eol)
    404		return 0;
    405
    406	do {
    407		bool out_eop;
    408		D(printf("ch=%d buf=%x after=%x\n",
    409			 c,
    410			 (uint32_t)ctrl->channels[c].current_d.buf,
    411			 (uint32_t)ctrl->channels[c].current_d.after));
    412
    413		if (send_context) {
    414			if (ctrl->channels[c].client->client.metadata_push) {
    415				meta.metadata = ctrl->channels[c].current_d.md;
    416				ctrl->channels[c].client->client.metadata_push(
    417					ctrl->channels[c].client->client.opaque,
    418					&meta);
    419			}
    420			send_context = false;
    421		}
    422
    423		channel_load_d(ctrl, c);
    424		saved_data_buf = channel_reg(ctrl, c, RW_SAVED_DATA_BUF);
    425		len = (uint32_t)(unsigned long)
    426			ctrl->channels[c].current_d.after;
    427		len -= saved_data_buf;
    428
    429		if (len > sizeof buf)
    430			len = sizeof buf;
    431		cpu_physical_memory_read (saved_data_buf, buf, len);
    432
    433		out_eop = ((saved_data_buf + len) ==
    434		           ctrl->channels[c].current_d.after) &&
    435			ctrl->channels[c].current_d.out_eop;
    436
    437		D(printf("channel %d pushes %x %u bytes eop=%u\n", c,
    438		         saved_data_buf, len, out_eop));
    439
    440		if (ctrl->channels[c].client->client.push) {
    441                        if (len > 0) {
    442				ctrl->channels[c].client->client.push(
    443					ctrl->channels[c].client->client.opaque,
    444					buf, len, out_eop);
    445			}
    446		} else {
    447			printf("WARNING: DMA ch%d dataloss,"
    448			       " no attached client.\n", c);
    449		}
    450
    451		saved_data_buf += len;
    452
    453		if (saved_data_buf == (uint32_t)(unsigned long)
    454				ctrl->channels[c].current_d.after) {
    455			/* Done. Step to next.  */
    456			if (ctrl->channels[c].current_d.out_eop) {
    457				send_context = true;
    458			}
    459			if (ctrl->channels[c].current_d.intr) {
    460				/* data intr.  */
    461				D(printf("signal intr %d eol=%d\n",
    462					len, ctrl->channels[c].current_d.eol));
    463				ctrl->channels[c].regs[R_INTR] |= (1 << 2);
    464				channel_update_irq(ctrl, c);
    465			}
    466			channel_store_d(ctrl, c);
    467			if (ctrl->channels[c].current_d.eol) {
    468				D(printf("channel %d EOL\n", c));
    469				ctrl->channels[c].eol = 1;
    470
    471				/* Mark the context as disabled.  */
    472				ctrl->channels[c].current_c.dis = 1;
    473				channel_store_c(ctrl, c);
    474
    475				channel_stop(ctrl, c);
    476			} else {
    477				ctrl->channels[c].regs[RW_SAVED_DATA] =
    478					(uint32_t)(unsigned long)ctrl->
    479						channels[c].current_d.next;
    480				/* Load new descriptor.  */
    481				channel_load_d(ctrl, c);
    482				saved_data_buf = (uint32_t)(unsigned long)
    483					ctrl->channels[c].current_d.buf;
    484			}
    485
    486			ctrl->channels[c].regs[RW_SAVED_DATA_BUF] =
    487							saved_data_buf;
    488			D(dump_d(c, &ctrl->channels[c].current_d));
    489		}
    490		ctrl->channels[c].regs[RW_SAVED_DATA_BUF] = saved_data_buf;
    491	} while (!ctrl->channels[c].eol);
    492	return 1;
    493}
    494
    495static int channel_in_process(struct fs_dma_ctrl *ctrl, int c, 
    496			      unsigned char *buf, int buflen, int eop)
    497{
    498	uint32_t len;
    499	uint32_t saved_data_buf;
    500
    501	if (ctrl->channels[c].eol == 1)
    502		return 0;
    503
    504	channel_load_d(ctrl, c);
    505	saved_data_buf = channel_reg(ctrl, c, RW_SAVED_DATA_BUF);
    506	len = (uint32_t)(unsigned long)ctrl->channels[c].current_d.after;
    507	len -= saved_data_buf;
    508	
    509	if (len > buflen)
    510		len = buflen;
    511
    512	cpu_physical_memory_write (saved_data_buf, buf, len);
    513	saved_data_buf += len;
    514
    515	if (saved_data_buf ==
    516	    (uint32_t)(unsigned long)ctrl->channels[c].current_d.after
    517	    || eop) {
    518		uint32_t r_intr = ctrl->channels[c].regs[R_INTR];
    519
    520		D(printf("in dscr end len=%d\n", 
    521			 ctrl->channels[c].current_d.after
    522			 - ctrl->channels[c].current_d.buf));
    523		ctrl->channels[c].current_d.after = saved_data_buf;
    524
    525		/* Done. Step to next.  */
    526		if (ctrl->channels[c].current_d.intr) {
    527			/* TODO: signal eop to the client.  */
    528			/* data intr.  */
    529			ctrl->channels[c].regs[R_INTR] |= 3;
    530		}
    531		if (eop) {
    532			ctrl->channels[c].current_d.in_eop = 1;
    533			ctrl->channels[c].regs[R_INTR] |= 8;
    534		}
    535		if (r_intr != ctrl->channels[c].regs[R_INTR])
    536			channel_update_irq(ctrl, c);
    537
    538		channel_store_d(ctrl, c);
    539		D(dump_d(c, &ctrl->channels[c].current_d));
    540
    541		if (ctrl->channels[c].current_d.eol) {
    542			D(printf("channel %d EOL\n", c));
    543			ctrl->channels[c].eol = 1;
    544
    545			/* Mark the context as disabled.  */
    546			ctrl->channels[c].current_c.dis = 1;
    547			channel_store_c(ctrl, c);
    548
    549			channel_stop(ctrl, c);
    550		} else {
    551			ctrl->channels[c].regs[RW_SAVED_DATA] =
    552				(uint32_t)(unsigned long)ctrl->
    553					channels[c].current_d.next;
    554			/* Load new descriptor.  */
    555			channel_load_d(ctrl, c);
    556			saved_data_buf = (uint32_t)(unsigned long)
    557				ctrl->channels[c].current_d.buf;
    558		}
    559	}
    560
    561	ctrl->channels[c].regs[RW_SAVED_DATA_BUF] = saved_data_buf;
    562	return len;
    563}
    564
    565static inline int channel_in_run(struct fs_dma_ctrl *ctrl, int c)
    566{
    567	if (ctrl->channels[c].client->client.pull) {
    568		ctrl->channels[c].client->client.pull(
    569			ctrl->channels[c].client->client.opaque);
    570		return 1;
    571	} else
    572		return 0;
    573}
    574
    575static uint32_t dma_rinvalid (void *opaque, hwaddr addr)
    576{
    577        hw_error("Unsupported short raccess. reg=" TARGET_FMT_plx "\n", addr);
    578        return 0;
    579}
    580
    581static uint64_t
    582dma_read(void *opaque, hwaddr addr, unsigned int size)
    583{
    584        struct fs_dma_ctrl *ctrl = opaque;
    585	int c;
    586	uint32_t r = 0;
    587
    588	if (size != 4) {
    589		dma_rinvalid(opaque, addr);
    590	}
    591
    592	/* Make addr relative to this channel and bounded to nr regs.  */
    593	c = fs_channel(addr);
    594	addr &= 0xff;
    595	addr >>= 2;
    596	switch (addr)
    597	{
    598		case RW_STAT:
    599			r = ctrl->channels[c].state & 7;
    600			r |= ctrl->channels[c].eol << 5;
    601			r |= ctrl->channels[c].stream_cmd_src << 8;
    602			break;
    603
    604		default:
    605			r = ctrl->channels[c].regs[addr];
    606			D(printf ("%s c=%d addr=" TARGET_FMT_plx "\n",
    607				  __func__, c, addr));
    608			break;
    609	}
    610	return r;
    611}
    612
    613static void
    614dma_winvalid (void *opaque, hwaddr addr, uint32_t value)
    615{
    616        hw_error("Unsupported short waccess. reg=" TARGET_FMT_plx "\n", addr);
    617}
    618
    619static void
    620dma_update_state(struct fs_dma_ctrl *ctrl, int c)
    621{
    622	if (ctrl->channels[c].regs[RW_CFG] & 2)
    623		ctrl->channels[c].state = STOPPED;
    624	if (!(ctrl->channels[c].regs[RW_CFG] & 1))
    625		ctrl->channels[c].state = RST;
    626}
    627
    628static void
    629dma_write(void *opaque, hwaddr addr,
    630	  uint64_t val64, unsigned int size)
    631{
    632        struct fs_dma_ctrl *ctrl = opaque;
    633	uint32_t value = val64;
    634	int c;
    635
    636	if (size != 4) {
    637		dma_winvalid(opaque, addr, value);
    638	}
    639
    640        /* Make addr relative to this channel and bounded to nr regs.  */
    641	c = fs_channel(addr);
    642        addr &= 0xff;
    643        addr >>= 2;
    644        switch (addr)
    645	{
    646		case RW_DATA:
    647			ctrl->channels[c].regs[addr] = value;
    648			break;
    649
    650		case RW_CFG:
    651			ctrl->channels[c].regs[addr] = value;
    652			dma_update_state(ctrl, c);
    653			break;
    654		case RW_CMD:
    655			/* continue.  */
    656			if (value & ~1)
    657				printf("Invalid store to ch=%d RW_CMD %x\n",
    658				       c, value);
    659			ctrl->channels[c].regs[addr] = value;
    660			channel_continue(ctrl, c);
    661			break;
    662
    663		case RW_SAVED_DATA:
    664		case RW_SAVED_DATA_BUF:
    665		case RW_GROUP:
    666		case RW_GROUP_DOWN:
    667			ctrl->channels[c].regs[addr] = value;
    668			break;
    669
    670		case RW_ACK_INTR:
    671		case RW_INTR_MASK:
    672			ctrl->channels[c].regs[addr] = value;
    673			channel_update_irq(ctrl, c);
    674			if (addr == RW_ACK_INTR)
    675				ctrl->channels[c].regs[RW_ACK_INTR] = 0;
    676			break;
    677
    678		case RW_STREAM_CMD:
    679			if (value & ~1023)
    680				printf("Invalid store to ch=%d "
    681				       "RW_STREAMCMD %x\n",
    682				       c, value);
    683			ctrl->channels[c].regs[addr] = value;
    684			D(printf("stream_cmd ch=%d\n", c));
    685			channel_stream_cmd(ctrl, c, value);
    686			break;
    687
    688	        default:
    689			D(printf ("%s c=%d " TARGET_FMT_plx "\n",
    690				__func__, c, addr));
    691			break;
    692        }
    693}
    694
    695static const MemoryRegionOps dma_ops = {
    696	.read = dma_read,
    697	.write = dma_write,
    698	.endianness = DEVICE_NATIVE_ENDIAN,
    699	.valid = {
    700		.min_access_size = 1,
    701		.max_access_size = 4
    702	}
    703};
    704
    705static int etraxfs_dmac_run(void *opaque)
    706{
    707	struct fs_dma_ctrl *ctrl = opaque;
    708	int i;
    709	int p = 0;
    710
    711	for (i = 0; 
    712	     i < ctrl->nr_channels;
    713	     i++)
    714	{
    715		if (ctrl->channels[i].state == RUNNING)
    716		{
    717			if (ctrl->channels[i].input) {
    718				p += channel_in_run(ctrl, i);
    719			} else {
    720				p += channel_out_run(ctrl, i);
    721			}
    722		}
    723	}
    724	return p;
    725}
    726
    727int etraxfs_dmac_input(struct etraxfs_dma_client *client, 
    728		       void *buf, int len, int eop)
    729{
    730	return channel_in_process(client->ctrl, client->channel, 
    731				  buf, len, eop);
    732}
    733
    734/* Connect an IRQ line with a channel.  */
    735void etraxfs_dmac_connect(void *opaque, int c, qemu_irq *line, int input)
    736{
    737	struct fs_dma_ctrl *ctrl = opaque;
    738	ctrl->channels[c].irq = *line;
    739	ctrl->channels[c].input = input;
    740}
    741
    742void etraxfs_dmac_connect_client(void *opaque, int c, 
    743				 struct etraxfs_dma_client *cl)
    744{
    745	struct fs_dma_ctrl *ctrl = opaque;
    746	cl->ctrl = ctrl;
    747	cl->channel = c;
    748	ctrl->channels[c].client = cl;
    749}
    750
    751
    752static void DMA_run(void *opaque)
    753{
    754    struct fs_dma_ctrl *etraxfs_dmac = opaque;
    755    int p = 1;
    756
    757    if (runstate_is_running())
    758        p = etraxfs_dmac_run(etraxfs_dmac);
    759
    760    if (p)
    761        qemu_bh_schedule_idle(etraxfs_dmac->bh);
    762}
    763
    764void *etraxfs_dmac_init(hwaddr base, int nr_channels)
    765{
    766	struct fs_dma_ctrl *ctrl = NULL;
    767
    768	ctrl = g_malloc0(sizeof *ctrl);
    769
    770        ctrl->bh = qemu_bh_new(DMA_run, ctrl);
    771
    772	ctrl->nr_channels = nr_channels;
    773	ctrl->channels = g_malloc0(sizeof ctrl->channels[0] * nr_channels);
    774
    775	memory_region_init_io(&ctrl->mmio, NULL, &dma_ops, ctrl, "etraxfs-dma",
    776			      nr_channels * 0x2000);
    777	memory_region_add_subregion(get_system_memory(), base, &ctrl->mmio);
    778
    779	return ctrl;
    780}