--- /dev/null
+/*
+* UAE - The Un*x Amiga Emulator
+*
+* NCR 53C9x
+*
+* (c) 2014 Toni Wilen
+*/
+
+#include "sysconfig.h"
+#include "sysdeps.h"
+
+#ifdef NCR9X
+
+#define NCR_DEBUG 0
+
+#include "options.h"
+#include "uae.h"
+#include "memory.h"
+#include "rommgr.h"
+#include "custom.h"
+#include "newcpu.h"
+#include "ncr9x_scsi.h"
+#include "scsi.h"
+#include "filesys.h"
+#include "zfile.h"
+#include "blkdev.h"
+#include "cpuboard.h"
+#include "qemuvga\qemuuaeglue.h"
+#include "qemuvga\queue.h"
+#include "qemuvga\scsi\scsi.h"
+#include "qemuvga\scsi\esp.h"
+
+struct ncr9x_state
+{
+ TCHAR *name;
+ DeviceState devobject;
+ SCSIDevice *scsid[8];
+ SCSIBus scsibus;
+ uae_u32 board_mask;
+ uae_u8 *rom;
+ uae_u8 acmemory[128];
+ int configured;
+ bool enabled;
+ int rom_start, rom_end, rom_offset;
+ int io_start, io_end;
+ addrbank *bank;
+ bool irq;
+ void (*irq_func)(int);
+ int led;
+ uaecptr dma_ptr;
+ int dma_cnt;
+};
+
+
+
+/*
+ Blizzard SCSI Kit IV:
+
+ scsi: 0x8000
+ dma: 0x10000
+
+ pa >>= 1;
+ if (!bsc->sc_datain)
+ pa |= 0x80000000;
+ bsc->sc_dmabase[0x8000] = (u_int8_t)(pa >> 24);
+ bsc->sc_dmabase[0] = (u_int8_t)(pa >> 24);
+ bsc->sc_dmabase[0] = (u_int8_t)(pa >> 16);
+ bsc->sc_dmabase[0] = (u_int8_t)(pa >> 8);
+ bsc->sc_dmabase[0] = (u_int8_t)(pa);
+
+ Blizzard 2060:
+
+ scsi: 0x1ff00
+ dma: 0x1fff0
+
+ bsc->sc_reg[0xe0] = BZTZSC_PB_LED; LED
+
+ pa >>= 1;
+ if (!bsc->sc_datain)
+ pa |= 0x80000000;
+ bsc->sc_dmabase[12] = (u_int8_t)(pa);
+ bsc->sc_dmabase[8] = (u_int8_t)(pa >> 8);
+ bsc->sc_dmabase[4] = (u_int8_t)(pa >> 16);
+ bsc->sc_dmabase[0] = (u_int8_t)(pa >> 24);
+
+*/
+
+static struct ncr9x_state blizzard_scsi;
+
+
+static struct ncr9x_state *ncrs[] =
+{
+ &blizzard_scsi,
+ NULL
+};
+
+static void set_irq2(int level)
+{
+ if (level)
+ INTREQ(0x8000 | 0x0008);
+}
+
+void ncr9x_rethink(void)
+{
+ for (int i = 0; ncrs[i]; i++) {
+ if (ncrs[i]->irq)
+ INTREQ(0x8000 | 0x0008);
+ }
+}
+
+void esp_irq_raise(qemu_irq irq)
+{
+ struct ncr9x_state *ncr = (struct ncr9x_state*)irq;
+ ncr->irq = true;
+ ncr->irq_func(ncr->irq);
+}
+void esp_irq_lower(qemu_irq irq)
+{
+ struct ncr9x_state *ncr = (struct ncr9x_state*)irq;
+ ncr->irq = false;
+ ncr->irq_func(ncr->irq);
+}
+
+static void cyberstorm_mk1_mk2_dma_read(void *opaque, uint8_t *buf, int len)
+{
+ struct ncr9x_state *ncr = (struct ncr9x_state*)opaque;
+ while (len > 0) {
+ uae_u16 v = get_word(ncr->dma_ptr & ~1);
+ *buf++ = v >> 8;
+ len--;
+ if (len > 0) {
+ *buf++ = v;
+ len--;
+ }
+ ncr->dma_ptr += 2;
+ }
+}
+static void cyberstorm_mk1_mk2_dma_write(void *opaque, uint8_t *buf, int len)
+{
+ struct ncr9x_state *ncr = (struct ncr9x_state*)opaque;
+ while (len > 0) {
+ uae_u16 v;
+ v = *buf++;
+ len--;
+ v <<= 8;
+ if (len > 0) {
+ v |= *buf++;
+ len--;
+ }
+ put_word(ncr->dma_ptr & ~1, v);
+ ncr->dma_ptr += 2;
+ }
+}
+
+static void blizzard_dma_read(void *opaque, uint8_t *buf, int len)
+{
+ struct ncr9x_state *ncr = (struct ncr9x_state*)opaque;
+ while (len > 0) {
+ uae_u16 v = get_word((ncr->dma_ptr & 0x7fffffff) * 2);
+ *buf++ = v >> 8;
+ len--;
+ if (len > 0) {
+ *buf++ = v;
+ len--;
+ }
+ ncr->dma_ptr++;
+ }
+}
+static void blizzard_dma_write(void *opaque, uint8_t *buf, int len)
+{
+ struct ncr9x_state *ncr = (struct ncr9x_state*)opaque;
+ while (len > 0) {
+ uae_u16 v;
+ v = *buf++;
+ len--;
+ v <<= 8;
+ if (len > 0) {
+ v |= *buf++;
+ len--;
+ }
+ put_word((ncr->dma_ptr & 0x7fffffff) * 2, v);
+ ncr->dma_ptr++;
+ }
+}
+
+static int get_scb_len(uae_u8 cmd)
+{
+ if (cmd <= 0x1f)
+ return 6;
+ if (cmd >= 0x20 && cmd <= 0x5f)
+ return 10;
+ if (cmd >= 0x80 && cmd <= 0x9f)
+ return 16;
+ if (cmd >= 0xa0 && cmd <= 0xbf)
+ return 12;
+ return 0;
+}
+
+void scsiesp_req_continue(SCSIRequest *req)
+{
+ struct scsi_data *sd = (struct scsi_data*)req->dev->handle;
+ if (sd->data_len < 0) {
+ esp_command_complete(req, sd->status, 0);
+ }
+ else if (sd->data_len) {
+ esp_transfer_data(req, sd->data_len);
+ } else {
+ if (sd->direction > 0)
+ scsi_emulate_cmd(sd);
+ esp_command_complete(req, sd->status, 0);
+ }
+}
+SCSIRequest *scsiesp_req_new(SCSIDevice *d, uint32_t tag, uint32_t lun, uint8_t *buf, void *hba_private)
+{
+ SCSIRequest *req = xcalloc(SCSIRequest, 1);
+ struct scsi_data *sd = (struct scsi_data*)d->handle;
+ struct ncr9x_state *ncr = (struct ncr9x_state*)sd->privdata;
+ int len = get_scb_len(buf[0]);
+
+ req->dev = d;
+ req->hba_private = hba_private;
+ req->bus = &ncr->scsibus;
+ req->bus->qbus.parent = &ncr->devobject;
+
+ memcpy(sd->cmd, buf, len);
+ sd->cmd_len = len;
+ return req;
+}
+int32_t scsiesp_req_enqueue(SCSIRequest *req)
+{
+ struct scsi_data *sd = (struct scsi_data*)req->dev->handle;
+
+ sd->data_len = 0;
+ scsi_start_transfer(sd);
+ scsi_emulate_analyze(sd);
+ //write_log (_T("%02x.%02x.%02x.%02x.%02x.%02x\n"), sd->cmd[0], sd->cmd[1], sd->cmd[2], sd->cmd[3], sd->cmd[4], sd->cmd[5]);
+
+ if (sd->direction <= 0)
+ scsi_emulate_cmd(sd);
+ if (sd->direction == 0)
+ return 1;
+ return sd->data_len;
+}
+void scsiesp_req_unref(SCSIRequest *req)
+{
+ xfree(req);
+}
+uint8_t *scsiesp_req_get_buf(SCSIRequest *req)
+{
+ struct scsi_data *sd = (struct scsi_data*)req->dev->handle;
+ sd->data_len = 0;
+ return sd->buffer;
+}
+SCSIDevice *scsiesp_device_find(SCSIBus *bus, int channel, int target, int lun)
+{
+ struct ncr9x_state *ncr = (struct ncr9x_state*)bus->privdata;
+ if (lun != 0 || target < 0 || target >= 8)
+ return NULL;
+ return ncr->scsid[target];
+}
+void scsiesp_req_cancel(SCSIRequest *req)
+{
+ write_log(_T("scsi_req_cancel\n"));
+}
+
+#define IO_MASK 0xff
+
+static uaecptr beswap(uaecptr addr)
+{
+ return (addr & ~3) | (3 - (addr & 3));
+}
+
+static void ncr9x_io_bput(struct ncr9x_state *ncr, uaecptr addr, uae_u32 val)
+{
+ addr &= ncr->board_mask;
+ if (currprefs.cpuboard_type == BOARD_BLIZZARD_2060) {
+ if (addr >= BLIZZARD_2060_DMA_OFFSET) {
+ //write_log (_T("Blizzard DMA PUT %08x %02X\n"), addr, (uae_u8)val);
+ addr &= 0xf;
+ addr >>= 2;
+ addr = 3 - addr;
+
+ ncr->dma_ptr &= ~(0xff << (addr * 8));
+ ncr->dma_ptr |= (val & 0xff) << (addr * 8);
+ if (addr == 3)
+ esp_dma_enable(ncr->devobject.lsistate, 1);
+ return;
+ } else if (addr >= BLIZZARD_2060_LED_OFFSET) {
+ ncr->led = val;
+ return;
+ }
+ } else if (currprefs.cpuboard_type == BOARD_BLIZZARD_1230_IV_SCSI || currprefs.cpuboard_type == BOARD_BLIZZARD_1260_SCSI) {
+ if (addr >= BLIZZARD_SCSI_KIT_DMA_OFFSET) {
+ addr &= 0x18000;
+ if (addr == 0x18000) {
+ ncr->dma_ptr = 0;
+ ncr->dma_cnt = 4;
+ } else {
+ ncr->dma_ptr <<= 8;
+ ncr->dma_ptr |= (uae_u8)val;
+ ncr->dma_cnt--;
+ if (ncr->dma_cnt == 0)
+ esp_dma_enable(ncr->devobject.lsistate, 1);
+ }
+ //write_log(_T("Blizzard DMA PUT %08x %02X\n"), addr, (uae_u8)val);
+ return;
+ }
+ } else if (currprefs.cpuboard_type == BOARD_CSMK1) {
+ if (addr >= CYBERSTORM_MK1_JUMPER_OFFSET) {
+ if (addr == CYBERSTORM_MK1_JUMPER_OFFSET)
+ esp_dma_enable(ncr->devobject.lsistate, 1);
+ } else if (addr >= CYBERSTORM_MK1_DMA_OFFSET) {
+ addr &= 7;
+ addr >>= 1;
+ addr = 3 - addr;
+ ncr->dma_ptr &= ~(0xff << (addr * 8));
+ ncr->dma_ptr |= (val & 0xff) << (addr * 8);
+ return;
+ } else if (addr >= CYBERSTORM_MK2_LED_OFFSET) {
+ ncr->led = val;
+ return;
+ }
+ } else if (currprefs.cpuboard_type == BOARD_CSMK2) {
+ if (addr >= CYBERSTORM_MK2_DMA_OFFSET) {
+ addr &= 0xf;
+ addr >>= 2;
+ addr = 3 - addr;
+ ncr->dma_ptr &= ~(0xff << (addr * 8));
+ ncr->dma_ptr |= (val & 0xff) << (addr * 8);
+ if (addr == 0)
+ esp_dma_enable(ncr->devobject.lsistate, 1);
+ return;
+ } else if (addr >= CYBERSTORM_MK2_LED_OFFSET) {
+ ncr->led = val;
+ return;
+ }
+ }
+ addr &= IO_MASK;
+ addr >>= 2;
+ esp_reg_write(ncr->devobject.lsistate, (addr), val);
+}
+uae_u32 ncr9x_io_bget(struct ncr9x_state *ncr, uaecptr addr)
+{
+ addr &= ncr->board_mask;
+ if (currprefs.cpuboard_type == BOARD_BLIZZARD_2060) {
+ if (addr >= BLIZZARD_2060_DMA_OFFSET) {
+ write_log(_T("Blizzard DMA GET %08x\n"), addr);
+ return 0;
+ } else if (addr >= BLIZZARD_2060_LED_OFFSET) {
+ return ncr->led;
+ }
+ } else if (currprefs.cpuboard_type == BOARD_BLIZZARD_1230_IV_SCSI || currprefs.cpuboard_type == BOARD_BLIZZARD_1260_SCSI) {
+ if (addr >= BLIZZARD_SCSI_KIT_DMA_OFFSET)
+ return 0;
+ } else if (currprefs.cpuboard_type == BOARD_CSMK1) {
+ if (addr >= CYBERSTORM_MK1_JUMPER_OFFSET) {
+ return 0xff;
+ } else if (addr >= CYBERSTORM_MK1_DMA_OFFSET) {
+ return 0;
+ } else if (addr >= CYBERSTORM_MK1_LED_OFFSET) {
+ return ncr->led;
+ }
+ } else if (currprefs.cpuboard_type == BOARD_CSMK2) {
+ if (addr >= CYBERSTORM_MK2_DMA_OFFSET) {
+ return 0;
+ } else if (addr >= CYBERSTORM_MK2_LED_OFFSET) {
+ return ncr->led;
+ }
+ }
+ addr &= IO_MASK;
+ addr >>= 2;
+ return esp_reg_read(ncr->devobject.lsistate, (addr));
+}
+
+static uae_u32 ncr9x_bget2(struct ncr9x_state *ncr, uaecptr addr)
+{
+ uae_u32 v = 0;
+
+ addr &= ncr->board_mask;
+ if (ncr->io_end && (addr < ncr->io_start || addr >= ncr->io_end))
+ return v;
+ return ncr9x_io_bget(ncr, addr);
+}
+static void ncr9x_bput2(struct ncr9x_state *ncr, uaecptr addr, uae_u32 val)
+{
+ uae_u32 v = val;
+ addr &= ncr->board_mask;
+ if (ncr->io_end && (addr < ncr->io_start || addr >= ncr->io_end))
+ return;
+ ncr9x_io_bput(ncr, addr, val);
+}
+
+static uae_u32 REGPARAM2 ncr9x_lget(struct ncr9x_state *ncr, uaecptr addr)
+{
+ uae_u32 v;
+#ifdef JIT
+ special_mem |= S_READ;
+#endif
+ addr &= ncr->board_mask;
+ v = (ncr9x_bget2(ncr, addr + 3) << 0) | (ncr9x_bget2(ncr, addr + 2) << 8) |
+ (ncr9x_bget2(ncr, addr + 1) << 16) | (ncr9x_bget2(ncr, addr + 0) << 24);
+ return v;
+}
+
+static uae_u32 REGPARAM2 ncr9x_wget(struct ncr9x_state *ncr, uaecptr addr)
+{
+ uae_u32 v;
+#ifdef JIT
+ special_mem |= S_READ;
+#endif
+ addr &= ncr->board_mask;
+ v = (ncr9x_bget2(ncr, addr) << 8) | ncr9x_bget2(ncr, addr + 1);
+ return v;
+}
+
+static uae_u32 REGPARAM2 ncr9x_bget(struct ncr9x_state *ncr, uaecptr addr)
+{
+ uae_u32 v;
+#ifdef JIT
+ special_mem |= S_READ;
+#endif
+ addr &= ncr->board_mask;
+ if (!ncr->configured) {
+ if (addr >= sizeof ncr->acmemory)
+ return 0;
+ return ncr->acmemory[addr];
+ }
+ v = ncr9x_bget2(ncr, addr);
+ return v;
+}
+
+
+static void REGPARAM2 ncr9x_lput(struct ncr9x_state *ncr, uaecptr addr, uae_u32 l)
+{
+#ifdef JIT
+ special_mem |= S_WRITE;
+#endif
+ addr &= ncr->board_mask;
+ ncr9x_bput2(ncr, addr + 3, l >> 0);
+ ncr9x_bput2(ncr, addr + 2, l >> 8);
+ ncr9x_bput2(ncr, addr + 1, l >> 16);
+ ncr9x_bput2(ncr, addr + 0, l >> 24);
+}
+
+
+static void REGPARAM2 ncr9x_wput(struct ncr9x_state *ncr, uaecptr addr, uae_u32 w)
+{
+#ifdef JIT
+ special_mem |= S_WRITE;
+#endif
+ w &= 0xffff;
+ addr &= ncr->board_mask;
+ if (!ncr->configured)
+ return;
+ ncr9x_bput2(ncr, addr, w >> 8);
+ ncr9x_bput2(ncr, addr + 1, w);
+}
+
+static void REGPARAM2 ncr9x_bput(struct ncr9x_state *ncr, uaecptr addr, uae_u32 b)
+{
+#ifdef JIT
+ special_mem |= S_WRITE;
+#endif
+ b &= 0xff;
+ addr &= ncr->board_mask;
+ if (!ncr->configured) {
+ return;
+ }
+ ncr9x_bput2(ncr, addr, b);
+}
+
+static void REGPARAM2 bncr9x_bput(uaecptr addr, uae_u32 b)
+{
+ ncr9x_bput(&blizzard_scsi, addr, b);
+}
+static void REGPARAM2 bncr9x_wput(uaecptr addr, uae_u32 b)
+{
+ ncr9x_wput(&blizzard_scsi, addr, b);
+}
+static void REGPARAM2 bncr9x_lput(uaecptr addr, uae_u32 b)
+{
+ ncr9x_lput(&blizzard_scsi, addr, b);
+}
+static uae_u32 REGPARAM2 bncr9x_bget(uaecptr addr)
+{
+ return ncr9x_bget(&blizzard_scsi, addr);
+}
+static uae_u32 REGPARAM2 bncr9x_wget(uaecptr addr)
+{
+ return ncr9x_wget(&blizzard_scsi, addr);
+}
+static uae_u32 REGPARAM2 bncr9x_lget(uaecptr addr)
+{
+ return ncr9x_lget(&blizzard_scsi, addr);
+}
+
+static addrbank ncr9x_bank_blizzard = {
+ bncr9x_lget, bncr9x_wget, bncr9x_bget,
+ bncr9x_lput, bncr9x_wput, bncr9x_bput,
+ default_xlate, default_check, NULL, _T("53C94/FAS216"),
+ dummy_lgeti, dummy_wgeti, ABFLAG_IO
+};
+
+uae_u32 cpuboard_ncr9x_scsi_get(uaecptr addr)
+{
+ return ncr9x_io_bget(&blizzard_scsi, addr);
+}
+void cpuboard_ncr9x_scsi_put(uaecptr addr, uae_u32 v)
+{
+ ncr9x_io_bput(&blizzard_scsi, addr, v);
+}
+
+static void ew(struct ncr9x_state *ncr, int addr, uae_u8 value)
+{
+ if (addr == 00 || addr == 02 || addr == 0x40 || addr == 0x42) {
+ ncr->acmemory[addr] = (value & 0xf0);
+ ncr->acmemory[addr + 2] = (value & 0x0f) << 4;
+ }
+ else {
+ ncr->acmemory[addr] = ~(value & 0xf0);
+ ncr->acmemory[addr + 2] = ~((value & 0x0f) << 4);
+ }
+}
+
+
+static void freescsi_hdf(struct scsi_data *sd)
+{
+ if (!sd)
+ return;
+ hdf_hd_close(sd->hfd);
+ scsi_free(sd);
+}
+
+static void freescsi(SCSIDevice *scsi)
+{
+ if (scsi) {
+ freescsi_hdf((struct scsi_data*)scsi->handle);
+ xfree(scsi);
+ }
+}
+
+static void ncr9x_free2(struct ncr9x_state *ncr)
+{
+ for (int ch = 0; ch < 8; ch++) {
+ freescsi(ncr->scsid[ch]);
+ ncr->scsid[ch] = NULL;
+ }
+}
+
+void ncr9x_free(void)
+{
+ ncr9x_free2(&blizzard_scsi);
+}
+
+void ncr9x_init(void)
+{
+ if (!blizzard_scsi.devobject.lsistate) {
+ if (currprefs.cpuboard_type == BOARD_CSMK2 || currprefs.cpuboard_type == BOARD_CSMK1)
+ esp_scsi_init(&blizzard_scsi.devobject, cyberstorm_mk1_mk2_dma_read, cyberstorm_mk1_mk2_dma_write);
+ else
+ esp_scsi_init(&blizzard_scsi.devobject, blizzard_dma_read, blizzard_dma_write);
+ }
+}
+
+static void ncr9x_reset_board(struct ncr9x_state *ncr)
+{
+ ncr->configured = 0;
+ if (currprefs.cpuboard_type == BOARD_CSMK1)
+ ncr->board_mask = 0xffff;
+ else
+ ncr->board_mask = 0x1ffff;
+ ncr->irq = false;
+ if (ncr->devobject.lsistate)
+ esp_scsi_reset(&ncr->devobject, ncr);
+ ncr->bank = &ncr9x_bank_blizzard;
+ ncr->name = ncr->bank->name;
+ ncr->irq_func = set_irq2;
+}
+
+void ncr9x_reset(void)
+{
+ ncr9x_reset_board(&blizzard_scsi);
+ blizzard_scsi.configured = -1;
+ blizzard_scsi.enabled = true;
+}
+
+
+static int add_ncr_scsi_hd(struct ncr9x_state *ncr, int ch, struct hd_hardfiledata *hfd, struct uaedev_config_info *ci, int scsi_level)
+{
+ struct scsi_data *handle;
+ freescsi(ncr->scsid[ch]);
+ ncr->scsid[ch] = NULL;
+ if (!hfd) {
+ hfd = xcalloc(struct hd_hardfiledata, 1);
+ memcpy(&hfd->hfd.ci, ci, sizeof(struct uaedev_config_info));
+ }
+ if (!hdf_hd_open(hfd))
+ return 0;
+ hfd->ansi_version = scsi_level;
+ handle = scsi_alloc_hd(ch, hfd);
+ if (!handle)
+ return 0;
+ handle->privdata = ncr;
+ ncr->scsid[ch] = xcalloc(SCSIDevice, 1);
+ ncr->scsid[ch]->handle = handle;
+ ncr->enabled = true;
+ return ncr->scsid[ch] ? 1 : 0;
+}
+
+
+static int add_ncr_scsi_cd(struct ncr9x_state *ncr, int ch, int unitnum)
+{
+ struct scsi_data *handle;
+ device_func_init(0);
+ freescsi(ncr->scsid[ch]);
+ ncr->scsid[ch] = NULL;
+ handle = scsi_alloc_cd(ch, unitnum, false);
+ if (!handle)
+ return 0;
+ handle->privdata = ncr;
+ ncr->scsid[ch] = xcalloc(SCSIDevice, 1);
+ ncr->scsid[ch]->handle = handle;
+ ncr->enabled = true;
+ return ncr->scsid[ch] ? 1 : 0;
+}
+
+static int add_ncr_scsi_tape(struct ncr9x_state *ncr, int ch, const TCHAR *tape_directory, bool readonly)
+{
+ struct scsi_data *handle;
+ freescsi(ncr->scsid[ch]);
+ ncr->scsid[ch] = NULL;
+ handle = scsi_alloc_tape(ch, tape_directory, readonly);
+ if (!handle)
+ return 0;
+ handle->privdata = ncr;
+ ncr->scsid[ch] = xcalloc(SCSIDevice, 1);
+ ncr->scsid[ch]->handle = handle;
+ ncr->enabled = true;
+ return ncr->scsid[ch] ? 1 : 0;
+}
+
+int cpuboard_ncr9x_add_scsi_unit(int ch, struct uaedev_config_info *ci)
+{
+ if (ci->type == UAEDEV_CD)
+ return add_ncr_scsi_cd(&blizzard_scsi, ch, ci->device_emu_unit);
+ else if (ci->type == UAEDEV_TAPE)
+ return add_ncr_scsi_tape(&blizzard_scsi, ch, ci->rootdir, ci->readonly);
+ else
+ return add_ncr_scsi_hd(&blizzard_scsi, ch, NULL, ci, 1);
+}
+
+#endif
--- /dev/null
+/*
+ * QEMU ESP/NCR53C9x emulation
+ *
+ * Copyright (c) 2005-2006 Fabrice Bellard
+ * Copyright (c) 2012 Herve Poussineau
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ */
+
+#include <assert.h>
+
+#include "qemuuaeglue.h"
+#include "queue.h"
+
+//#include "hw/sysbus.h"
+#include "scsi/scsi.h"
+#include "scsi/esp.h"
+//#include "trace.h"
+//#include "qemu/log.h"
+
+/*
+ * On Sparc32, this is the ESP (NCR53C90) part of chip STP2000 (Master I/O),
+ * also produced as NCR89C100. See
+ * http://www.ibiblio.org/pub/historic-linux/early-ports/Sparc/NCR/NCR89C100.txt
+ * and
+ * http://www.ibiblio.org/pub/historic-linux/early-ports/Sparc/NCR/NCR53C9X.txt
+ */
+
+static void esp_raise_irq(ESPState *s)
+{
+ if (!(s->rregs[ESP_RSTAT] & STAT_INT)) {
+ s->rregs[ESP_RSTAT] |= STAT_INT;
+ esp_irq_raise(s->irq);
+ }
+}
+
+static void esp_lower_irq(ESPState *s)
+{
+ if (s->rregs[ESP_RSTAT] & STAT_INT) {
+ s->rregs[ESP_RSTAT] &= ~STAT_INT;
+ esp_irq_lower(s->irq);
+ }
+}
+
+void esp_dma_enable(void *opaque, int level)
+{
+ ESPState *s = (ESPState*)opaque;
+ if (level) {
+ s->dma_enabled = 1;
+ if (s->dma_cb) {
+ s->dma_cb(s);
+ s->dma_cb = NULL;
+ }
+ } else {
+ s->dma_enabled = 0;
+ }
+}
+
+void esp_request_cancelled(SCSIRequest *req)
+{
+ ESPState *s = (ESPState*)req->hba_private;
+
+ if (req == s->current_req) {
+ scsiesp_req_unref(s->current_req);
+ s->current_req = NULL;
+ s->current_dev = NULL;
+ }
+}
+
+static uint32_t get_cmd(ESPState *s, uint8_t *buf)
+{
+ uint32_t dmalen;
+ int target;
+
+ target = s->wregs[ESP_WBUSID] & BUSID_DID;
+ if (s->dma) {
+ dmalen = s->rregs[ESP_TCLO];
+ dmalen |= s->rregs[ESP_TCMID] << 8;
+ dmalen |= s->rregs[ESP_TCHI] << 16;
+ s->dma_memory_read(s->dma_opaque, buf, dmalen);
+ } else {
+ dmalen = s->ti_size;
+ memcpy(buf, s->ti_buf, dmalen);
+ //buf[0] = buf[2] >> 5; // This makes no sense!
+ }
+
+ s->ti_size = 0;
+ s->ti_rptr = 0;
+ s->ti_wptr = 0;
+
+ if (s->current_req) {
+ /* Started a new command before the old one finished. Cancel it. */
+ scsiesp_req_cancel(s->current_req);
+ s->async_len = 0;
+ }
+
+ s->current_dev = scsiesp_device_find(&s->bus, 0, target, 0);
+ if (!s->current_dev) {
+ // No such drive
+ s->rregs[ESP_RSTAT] = 0;
+ s->rregs[ESP_RINTR] = INTR_DC;
+ s->rregs[ESP_RSEQ] = SEQ_0;
+ esp_raise_irq(s);
+ return 0;
+ }
+ return dmalen;
+}
+
+static void do_busid_cmd(ESPState *s, uint8_t *buf, uint8_t busid)
+{
+ int32_t datalen;
+ int lun;
+ SCSIDevice *current_lun;
+
+ lun = busid & 7;
+ current_lun = scsiesp_device_find(&s->bus, 0, s->current_dev->id, lun);
+ s->current_req = scsiesp_req_new(current_lun, 0, lun, buf, s);
+ datalen = scsiesp_req_enqueue(s->current_req);
+ s->ti_size = datalen;
+ if (datalen != 0) {
+ s->rregs[ESP_RSTAT] = STAT_TC;
+ s->dma_left = 0;
+ s->dma_counter = 0;
+ if (datalen > 0) {
+ s->rregs[ESP_RSTAT] |= STAT_DI;
+ } else {
+ s->rregs[ESP_RSTAT] |= STAT_DO;
+ }
+ scsiesp_req_continue(s->current_req);
+ }
+ s->rregs[ESP_RINTR] = INTR_BS | INTR_FC;
+ s->rregs[ESP_RSEQ] = SEQ_CD;
+ esp_raise_irq(s);
+}
+
+static void do_cmd(ESPState *s, uint8_t *buf)
+{
+ uint8_t busid = buf[0];
+
+ do_busid_cmd(s, &buf[1], busid);
+}
+
+static void handle_satn(ESPState *s)
+{
+ uint8_t buf[32];
+ int len;
+
+ if (s->dma && !s->dma_enabled) {
+ s->dma_cb = handle_satn;
+ return;
+ }
+ len = get_cmd(s, buf);
+ if (len)
+ do_cmd(s, buf);
+}
+
+static void handle_s_without_atn(ESPState *s)
+{
+ uint8_t buf[32];
+ int len;
+
+ if (s->dma && !s->dma_enabled) {
+ s->dma_cb = handle_s_without_atn;
+ return;
+ }
+ len = get_cmd(s, buf);
+ if (len) {
+ do_busid_cmd(s, buf, 0);
+ }
+}
+
+static void handle_satn_stop(ESPState *s)
+{
+ if (s->dma && !s->dma_enabled) {
+ s->dma_cb = handle_satn_stop;
+ return;
+ }
+ s->cmdlen = get_cmd(s, s->cmdbuf);
+ if (s->cmdlen) {
+ s->do_cmd = 1;
+ s->rregs[ESP_RSTAT] = STAT_TC | STAT_CD;
+ s->rregs[ESP_RINTR] = INTR_BS | INTR_FC;
+ s->rregs[ESP_RSEQ] = SEQ_CD;
+ esp_raise_irq(s);
+ }
+}
+
+static void write_response(ESPState *s)
+{
+ s->ti_buf[0] = s->status;
+ s->ti_buf[1] = 0;
+ if (s->dma) {
+ s->dma_memory_write(s->dma_opaque, s->ti_buf, 2);
+ s->rregs[ESP_RSTAT] = STAT_TC | STAT_ST;
+ s->rregs[ESP_RINTR] = INTR_BS | INTR_FC;
+ s->rregs[ESP_RSEQ] = SEQ_CD;
+ } else {
+ s->ti_size = 2;
+ s->ti_rptr = 0;
+ s->ti_wptr = 0;
+ s->rregs[ESP_RFLAGS] = 2;
+ }
+ esp_raise_irq(s);
+}
+
+static void esp_dma_done(ESPState *s)
+{
+ s->rregs[ESP_RSTAT] |= STAT_TC;
+ s->rregs[ESP_RINTR] = INTR_BS;
+ s->rregs[ESP_RSEQ] = 0;
+ s->rregs[ESP_RFLAGS] = 0;
+ s->rregs[ESP_TCLO] = 0;
+ s->rregs[ESP_TCMID] = 0;
+ s->rregs[ESP_TCHI] = 0;
+ esp_raise_irq(s);
+}
+
+static void esp_do_dma(ESPState *s)
+{
+ uint32_t len;
+ int to_device;
+
+ to_device = (s->ti_size < 0);
+ len = s->dma_left;
+ if (s->do_cmd) {
+ s->dma_memory_read(s->dma_opaque, &s->cmdbuf[s->cmdlen], len);
+ s->ti_size = 0;
+ s->cmdlen = 0;
+ s->do_cmd = 0;
+ do_cmd(s, s->cmdbuf);
+ return;
+ }
+ if (s->async_len == 0) {
+ /* Defer until data is available. */
+ return;
+ }
+ if (len > s->async_len) {
+ len = s->async_len;
+ }
+ if (to_device) {
+ s->dma_memory_read(s->dma_opaque, s->async_buf, len);
+ } else {
+ s->dma_memory_write(s->dma_opaque, s->async_buf, len);
+ }
+ s->dma_left -= len;
+ s->async_buf += len;
+ s->async_len -= len;
+ if (to_device)
+ s->ti_size += len;
+ else
+ s->ti_size -= len;
+ if (s->async_len == 0) {
+ scsiesp_req_continue(s->current_req);
+ /* If there is still data to be read from the device then
+ complete the DMA operation immediately. Otherwise defer
+ until the scsi layer has completed. */
+ if (to_device || s->dma_left != 0 || s->ti_size == 0) {
+ return;
+ }
+ }
+
+ /* Partially filled a scsi buffer. Complete immediately. */
+ esp_dma_done(s);
+}
+
+void esp_command_complete(SCSIRequest *req, uint32_t status,
+ size_t resid)
+{
+ ESPState *s = (ESPState*)req->hba_private;
+
+ s->ti_size = 0;
+ s->dma_left = 0;
+ s->async_len = 0;
+ s->status = status;
+ s->rregs[ESP_RSTAT] = STAT_ST;
+ esp_dma_done(s);
+ if (s->current_req) {
+ scsiesp_req_unref(s->current_req);
+ s->current_req = NULL;
+ s->current_dev = NULL;
+ }
+}
+
+void esp_transfer_data(SCSIRequest *req, uint32_t len)
+{
+ ESPState *s = (ESPState*)req->hba_private;
+
+ s->async_len = len;
+ s->async_buf = scsiesp_req_get_buf(req);
+ if (s->dma_left) {
+ esp_do_dma(s);
+ } else if (s->dma_counter != 0 && s->ti_size <= 0) {
+ /* If this was the last part of a DMA transfer then the
+ completion interrupt is deferred to here. */
+ esp_dma_done(s);
+ }
+}
+
+static void handle_ti(ESPState *s)
+{
+ uint32_t dmalen, minlen;
+
+ if (s->dma && !s->dma_enabled) {
+ s->dma_cb = handle_ti;
+ return;
+ }
+
+ dmalen = s->rregs[ESP_TCLO];
+ dmalen |= s->rregs[ESP_TCMID] << 8;
+ dmalen |= s->rregs[ESP_TCHI] << 16;
+ if (dmalen==0) {
+ dmalen=0x10000;
+ }
+ s->dma_counter = dmalen;
+
+ if (s->do_cmd)
+ minlen = (dmalen < 32) ? dmalen : 32;
+ else if (s->ti_size < 0)
+ minlen = (dmalen < -s->ti_size) ? dmalen : -s->ti_size;
+ else
+ minlen = (dmalen < s->ti_size) ? dmalen : s->ti_size;
+ if (s->dma) {
+ s->dma_left = minlen;
+ s->rregs[ESP_RSTAT] &= ~STAT_TC;
+ esp_do_dma(s);
+ } else if (s->do_cmd) {
+ s->ti_size = 0;
+ s->cmdlen = 0;
+ s->do_cmd = 0;
+ do_cmd(s, s->cmdbuf);
+ return;
+ }
+}
+
+void esp_hard_reset(ESPState *s)
+{
+ memset(s->rregs, 0, ESP_REGS);
+ memset(s->wregs, 0, ESP_REGS);
+ s->rregs[ESP_TCHI] = s->chip_id;
+ s->ti_size = 0;
+ s->ti_rptr = 0;
+ s->ti_wptr = 0;
+ s->dma = 0;
+ s->do_cmd = 0;
+ s->dma_cb = NULL;
+
+ s->rregs[ESP_CFG1] = 7;
+}
+
+static void esp_soft_reset(ESPState *s)
+{
+ esp_hard_reset(s);
+}
+
+static void parent_esp_reset(ESPState *s, int irq, int level)
+{
+ if (level) {
+ esp_soft_reset(s);
+ }
+}
+
+uint64_t esp_reg_read(void *opaque, uint32_t saddr)
+{
+ ESPState *s = (ESPState*)opaque;
+ uint32_t old_val;
+
+ switch (saddr) {
+ case ESP_FIFO:
+ if (s->ti_size > 0) {
+ s->ti_size--;
+ if ((s->rregs[ESP_RSTAT] & STAT_PIO_MASK) == 0) {
+ /* Data out. */
+ write_log("esp: PIO data read not implemented\n");
+ s->rregs[ESP_FIFO] = 0;
+ } else {
+ s->rregs[ESP_FIFO] = s->ti_buf[s->ti_rptr++];
+ }
+ esp_raise_irq(s);
+ }
+ if (s->ti_size == 0) {
+ s->ti_rptr = 0;
+ s->ti_wptr = 0;
+ }
+ break;
+ case ESP_RINTR:
+ /* Clear sequence step, interrupt register and all status bits
+ except TC */
+ old_val = s->rregs[ESP_RINTR];
+ s->rregs[ESP_RINTR] = 0;
+ s->rregs[ESP_RSTAT] &= ~STAT_TC;
+ s->rregs[ESP_RSEQ] = SEQ_CD;
+ esp_lower_irq(s);
+
+ return old_val;
+ default:
+ //write_log("read unknown 53c94 register %02x\n", saddr);
+ break;
+ }
+ return s->rregs[saddr];
+}
+
+void esp_reg_write(void *opaque, uint32_t saddr, uint64_t val)
+{
+ ESPState *s = (ESPState*)opaque;
+
+ switch (saddr) {
+ case ESP_TCLO:
+ case ESP_TCMID:
+ case ESP_TCHI:
+ s->rregs[ESP_RSTAT] &= ~STAT_TC;
+ break;
+ case ESP_FIFO:
+ if (s->do_cmd) {
+ s->cmdbuf[s->cmdlen++] = val & 0xff;
+ } else if (s->ti_size == TI_BUFSZ - 1) {
+ ;
+ } else {
+ s->ti_size++;
+ s->ti_buf[s->ti_wptr++] = val & 0xff;
+ }
+ break;
+ case ESP_CMD:
+ s->rregs[saddr] = val;
+ if (val & CMD_DMA) {
+ s->dma = 1;
+ /* Reload DMA counter. */
+ s->rregs[ESP_TCLO] = s->wregs[ESP_TCLO];
+ s->rregs[ESP_TCMID] = s->wregs[ESP_TCMID];
+ s->rregs[ESP_TCHI] = s->wregs[ESP_TCHI];
+ } else {
+ s->dma = 0;
+ }
+ switch(val & CMD_CMD) {
+ case CMD_NOP:
+ break;
+ case CMD_FLUSH:
+ //s->ti_size = 0;
+ s->rregs[ESP_RINTR] = INTR_FC;
+ s->rregs[ESP_RSEQ] = 0;
+ s->rregs[ESP_RFLAGS] = 0;
+ break;
+ case CMD_RESET:
+ esp_soft_reset(s);
+ break;
+ case CMD_BUSRESET:
+ s->rregs[ESP_RINTR] = INTR_RST;
+ if (!(s->wregs[ESP_CFG1] & CFG1_RESREPT)) {
+ esp_raise_irq(s);
+ }
+ break;
+ case CMD_TI:
+ handle_ti(s);
+ break;
+ case CMD_ICCS:
+ write_response(s);
+ s->rregs[ESP_RINTR] = INTR_FC;
+ s->rregs[ESP_RSTAT] |= STAT_MI;
+ break;
+ case CMD_MSGACC:
+ s->rregs[ESP_RINTR] = INTR_DC;
+ s->rregs[ESP_RSEQ] = 0;
+ s->rregs[ESP_RFLAGS] = 0;
+ esp_raise_irq(s);
+ break;
+ case CMD_PAD:
+ s->rregs[ESP_RSTAT] = STAT_TC;
+ s->rregs[ESP_RINTR] = INTR_FC;
+ s->rregs[ESP_RSEQ] = 0;
+ break;
+ case CMD_SATN:
+ break;
+ case CMD_RSTATN:
+ break;
+ case CMD_SEL:
+ handle_s_without_atn(s);
+ break;
+ case CMD_SELATN:
+ handle_satn(s);
+ break;
+ case CMD_SELATNS:
+ handle_satn_stop(s);
+ break;
+ case CMD_ENSEL:
+ s->rregs[ESP_RINTR] = 0;
+ break;
+ case CMD_DISSEL:
+ s->rregs[ESP_RINTR] = 0;
+ esp_raise_irq(s);
+ break;
+ default:
+ break;
+ }
+ break;
+ case ESP_WBUSID:
+ case ESP_WSEL:
+ case ESP_WSYNTP:
+ case ESP_WSYNO:
+ break;
+ case ESP_CFG1:
+ case ESP_CFG2: case ESP_CFG3:
+ case ESP_RES3: case ESP_RES4:
+ s->rregs[saddr] = val;
+ break;
+ case ESP_WCCF:
+ case ESP_WTEST:
+ break;
+ default:
+ write_log("write unknown 53c94 register %02x\n", saddr);
+ //activate_debugger();
+ return;
+ }
+ s->wregs[saddr] = val;
+}
+
+static bool esp_mem_accepts(void *opaque, hwaddr addr,
+ unsigned size, bool is_write)
+{
+ return (size == 1) || (is_write && size == 4);
+}
+
+#if 0
+const VMStateDescription vmstate_esp = {
+ .name ="esp",
+ .version_id = 3,
+ .minimum_version_id = 3,
+ .minimum_version_id_old = 3,
+ .fields = (VMStateField []) {
+ VMSTATE_BUFFER(rregs, ESPState),
+ VMSTATE_BUFFER(wregs, ESPState),
+ VMSTATE_INT32(ti_size, ESPState),
+ VMSTATE_UINT32(ti_rptr, ESPState),
+ VMSTATE_UINT32(ti_wptr, ESPState),
+ VMSTATE_BUFFER(ti_buf, ESPState),
+ VMSTATE_UINT32(status, ESPState),
+ VMSTATE_UINT32(dma, ESPState),
+ VMSTATE_BUFFER(cmdbuf, ESPState),
+ VMSTATE_UINT32(cmdlen, ESPState),
+ VMSTATE_UINT32(do_cmd, ESPState),
+ VMSTATE_UINT32(dma_left, ESPState),
+ VMSTATE_END_OF_LIST()
+ }
+};
+#endif
+
+#define TYPE_ESP "esp"
+//#define ESP(obj) OBJECT_CHECK(SysBusESPState, (obj), TYPE_ESP)
+#define ESP(obj) (ESPState*)obj->lsistate
+
+typedef struct {
+ /*< private >*/
+ SysBusDevice parent_obj;
+ /*< public >*/
+
+ MemoryRegion iomem;
+ uint32_t it_shift;
+ ESPState esp;
+} SysBusESPState;
+
+#if 0
+static void sysbus_esp_mem_write(void *opaque, hwaddr addr,
+ uint64_t val, unsigned int size)
+{
+ SysBusESPState *sysbus = (SysBusESPState*)opaque;
+ uint32_t saddr;
+
+ saddr = addr >> sysbus->it_shift;
+ esp_reg_write(&sysbus->esp, saddr, val);
+}
+
+static uint64_t sysbus_esp_mem_read(void *opaque, hwaddr addr,
+ unsigned int size)
+{
+ SysBusESPState *sysbus = (SysBusESPState*)opaque;
+ uint32_t saddr;
+
+ saddr = addr >> sysbus->it_shift;
+ return esp_reg_read(&sysbus->esp, saddr);
+}
+
+static const MemoryRegionOps sysbus_esp_mem_ops = {
+ .read = sysbus_esp_mem_read,
+ .write = sysbus_esp_mem_write,
+ .endianness = DEVICE_NATIVE_ENDIAN,
+ .valid.accepts = esp_mem_accepts,
+};
+
+void esp_init(hwaddr espaddr, int it_shift,
+ ESPDMAMemoryReadWriteFunc dma_memory_read,
+ ESPDMAMemoryReadWriteFunc dma_memory_write,
+ void *dma_opaque, qemu_irq irq, qemu_irq *reset,
+ qemu_irq *dma_enable)
+{
+ DeviceState *dev;
+ SysBusDevice *s;
+ SysBusESPState *sysbus;
+ ESPState *esp;
+
+ dev = qdev_create(NULL, TYPE_ESP);
+ sysbus = ESP(dev);
+ esp = &sysbus->esp;
+ esp->dma_memory_read = dma_memory_read;
+ esp->dma_memory_write = dma_memory_write;
+ esp->dma_opaque = dma_opaque;
+ sysbus->it_shift = it_shift;
+ /* XXX for now until rc4030 has been changed to use DMA enable signal */
+ esp->dma_enabled = 1;
+ qdev_init_nofail(dev);
+ s = SYS_BUS_DEVICE(dev);
+ sysbus_connect_irq(s, 0, irq);
+ sysbus_mmio_map(s, 0, espaddr);
+ *reset = qdev_get_gpio_in(dev, 0);
+ *dma_enable = qdev_get_gpio_in(dev, 1);
+}
+
+static const struct SCSIBusInfo esp_scsi_info = {
+ .tcq = false,
+ .max_target = ESP_MAX_DEVS,
+ .max_lun = 7,
+
+ .transfer_data = esp_transfer_data,
+ .complete = esp_command_complete,
+ .cancel = esp_request_cancelled
+};
+
+static void sysbus_esp_gpio_demux(void *opaque, int irq, int level)
+{
+ SysBusESPState *sysbus = ESP(opaque);
+ ESPState *s = &sysbus->esp;
+
+ switch (irq) {
+ case 0:
+ parent_esp_reset(s, irq, level);
+ break;
+ case 1:
+ esp_dma_enable(opaque, irq, level);
+ break;
+ }
+}
+
+static void sysbus_esp_realize(DeviceState *dev, Error **errp)
+{
+ SysBusDevice *sbd = SYS_BUS_DEVICE(dev);
+ SysBusESPState *sysbus = ESP(dev);
+ ESPState *s = &sysbus->esp;
+ Error *err = NULL;
+
+ sysbus_init_irq(sbd, &s->irq);
+ assert(sysbus->it_shift != -1);
+
+ s->chip_id = TCHI_FAS100A;
+ memory_region_init_io(&sysbus->iomem, OBJECT(sysbus), &sysbus_esp_mem_ops,
+ sysbus, "esp", ESP_REGS << sysbus->it_shift);
+ sysbus_init_mmio(sbd, &sysbus->iomem);
+
+ qdev_init_gpio_in(dev, sysbus_esp_gpio_demux, 2);
+
+ scsi_bus_new(&s->bus, sizeof(s->bus), dev, &esp_scsi_info, NULL);
+ scsi_bus_legacy_handle_cmdline(&s->bus, &err);
+ if (err != NULL) {
+ error_propagate(errp, err);
+ return;
+ }
+}
+
+static void sysbus_esp_hard_reset(DeviceState *dev)
+{
+ SysBusESPState *sysbus = ESP(dev);
+ esp_hard_reset(&sysbus->esp);
+}
+
+static const VMStateDescription vmstate_sysbus_esp_scsi = {
+ .name = "sysbusespscsi",
+ .version_id = 0,
+ .minimum_version_id = 0,
+ .minimum_version_id_old = 0,
+ .fields = (VMStateField[]) {
+ VMSTATE_STRUCT(esp, SysBusESPState, 0, vmstate_esp, ESPState),
+ VMSTATE_END_OF_LIST()
+ }
+};
+
+static void sysbus_esp_class_init(ObjectClass *klass, void *data)
+{
+ DeviceClass *dc = DEVICE_CLASS(klass);
+
+ dc->realize = sysbus_esp_realize;
+ dc->reset = sysbus_esp_hard_reset;
+ dc->vmsd = &vmstate_sysbus_esp_scsi;
+ set_bit(DEVICE_CATEGORY_STORAGE, dc->categories);
+}
+
+static const TypeInfo sysbus_esp_info = {
+ .name = TYPE_ESP,
+ .parent = TYPE_SYS_BUS_DEVICE,
+ .instance_size = sizeof(SysBusESPState),
+ .class_init = sysbus_esp_class_init,
+};
+
+static void esp_register_types(void)
+{
+ type_register_static(&sysbus_esp_info);
+}
+
+type_init(esp_register_types)
+#endif
+
+void esp_scsi_init(DeviceState *dev, ESPDMAMemoryReadWriteFunc read, ESPDMAMemoryReadWriteFunc write)
+{
+ dev->lsistate = calloc(sizeof(ESPState), 1);
+ ESPState *s = ESP(dev);
+ s->dma_memory_read = read;
+ s->dma_memory_write = write;
+}
+
+void esp_scsi_reset(DeviceState *dev, void *privdata)
+{
+ ESPState *s = ESP(dev);
+
+ esp_soft_reset(s);
+ s->bus.privdata = privdata;
+ s->irq = privdata;
+ s->dma_opaque = privdata;
+}