2010-12-14 00:34:40 +00:00
|
|
|
/*
|
|
|
|
* QEMU AHCI Emulation
|
|
|
|
*
|
|
|
|
* Copyright (c) 2010 qiaochong@loongson.cn
|
|
|
|
* Copyright (c) 2010 Roland Elek <elek.roland@gmail.com>
|
|
|
|
* Copyright (c) 2010 Sebastian Herbszt <herbszt@gmx.de>
|
|
|
|
* Copyright (c) 2010 Alexander Graf <agraf@suse.de>
|
|
|
|
*
|
|
|
|
* This library is free software; you can redistribute it and/or
|
|
|
|
* modify it under the terms of the GNU Lesser General Public
|
|
|
|
* License as published by the Free Software Foundation; either
|
|
|
|
* version 2 of the License, or (at your option) any later version.
|
|
|
|
*
|
|
|
|
* This library is distributed in the hope that it will be useful,
|
|
|
|
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
|
|
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
|
|
|
* Lesser General Public License for more details.
|
|
|
|
*
|
|
|
|
* You should have received a copy of the GNU Lesser General Public
|
|
|
|
* License along with this library; if not, see <http://www.gnu.org/licenses/>.
|
|
|
|
*
|
|
|
|
*/
|
|
|
|
|
2016-01-26 18:17:09 +00:00
|
|
|
#include "qemu/osdep.h"
|
2016-06-22 17:11:19 +00:00
|
|
|
#include "hw/hw.h"
|
|
|
|
#include "hw/pci/msi.h"
|
|
|
|
#include "hw/i386/pc.h"
|
|
|
|
#include "hw/pci/pci.h"
|
2010-12-14 00:34:40 +00:00
|
|
|
|
2015-03-17 17:29:20 +00:00
|
|
|
#include "qemu/error-report.h"
|
2014-10-07 11:59:18 +00:00
|
|
|
#include "sysemu/block-backend.h"
|
2012-12-17 17:20:04 +00:00
|
|
|
#include "sysemu/dma.h"
|
2016-06-22 17:11:19 +00:00
|
|
|
#include "hw/ide/internal.h"
|
|
|
|
#include "hw/ide/pci.h"
|
|
|
|
#include "hw/ide/ahci.h"
|
2010-12-14 00:34:40 +00:00
|
|
|
|
2014-11-13 10:24:40 +00:00
|
|
|
#define DEBUG_AHCI 0
|
2010-12-14 00:34:40 +00:00
|
|
|
|
|
|
|
#define DPRINTF(port, fmt, ...) \
|
2014-11-13 10:24:40 +00:00
|
|
|
do { \
|
|
|
|
if (DEBUG_AHCI) { \
|
|
|
|
fprintf(stderr, "ahci: %s: [%d] ", __func__, port); \
|
|
|
|
fprintf(stderr, fmt, ## __VA_ARGS__); \
|
|
|
|
} \
|
|
|
|
} while (0)
|
2010-12-14 00:34:40 +00:00
|
|
|
|
|
|
|
static void check_cmd(AHCIState *s, int port);
|
2015-07-04 06:06:05 +00:00
|
|
|
static int handle_cmd(AHCIState *s, int port, uint8_t slot);
|
2010-12-14 00:34:40 +00:00
|
|
|
static void ahci_reset_port(AHCIState *s, int port);
|
2015-09-01 20:50:41 +00:00
|
|
|
static bool ahci_write_fis_d2h(AHCIDevice *ad);
|
2011-02-01 14:51:29 +00:00
|
|
|
static void ahci_init_d2h(AHCIDevice *ad);
|
ide: add limit to .prepare_buf()
prepare_buf should not always grab as many descriptors
as it can, sometimes it should self-limit.
For example, an NCQ transfer of 1 sector with a PRDT that
describes 4GiB of data should not copy 4GiB of data, it
should just transfer that first 512 bytes.
PIO is not affected, because the dma_buf_rw dma helpers
already have a byte limit built-in to them, but DMA/NCQ
will exhaust the entire list regardless of requested size.
AHCI 1.3 specifies in section 6.1.6 Command List Underflow that
NCQ is not required to detect underflow conditions. Non-NCQ
pathways signal underflow by writing to the PRDBC field, which
will already occur by writing the actual transferred byte count
to the PRDBC, signaling the underflow.
Our NCQ pathways aren't required to detect underflow, but since our DMA
backend uses the size of the PRDT to determine the size of the transer,
if our PRDT is bigger than the transaction (the underflow condition) it
doesn't cost us anything to detect it and truncate the PRDT.
This is a recoverable error and is not signaled to the guest, in either
NCQ or normal DMA cases.
For BMDMA, the existing pathways should see no guest-visible difference,
but any bytes described in the overage will no longer be transferred
before indicating to the guest that there was an underflow.
Signed-off-by: John Snow <jsnow@redhat.com>
Reviewed-by: Stefan Hajnoczi <stefanha@redhat.com>
Message-id: 1435767578-32743-2-git-send-email-jsnow@redhat.com
2015-07-04 06:06:04 +00:00
|
|
|
static int ahci_dma_prepare_buf(IDEDMA *dma, int32_t limit);
|
2015-03-27 19:48:11 +00:00
|
|
|
static bool ahci_map_clb_address(AHCIDevice *ad);
|
|
|
|
static bool ahci_map_fis_address(AHCIDevice *ad);
|
2015-03-27 19:48:11 +00:00
|
|
|
static void ahci_unmap_clb_address(AHCIDevice *ad);
|
|
|
|
static void ahci_unmap_fis_address(AHCIDevice *ad);
|
2014-10-01 22:55:47 +00:00
|
|
|
|
2010-12-14 00:34:40 +00:00
|
|
|
|
|
|
|
static uint32_t ahci_port_read(AHCIState *s, int port, int offset)
|
|
|
|
{
|
|
|
|
uint32_t val;
|
|
|
|
AHCIPortRegs *pr;
|
|
|
|
pr = &s->dev[port].port_regs;
|
|
|
|
|
|
|
|
switch (offset) {
|
|
|
|
case PORT_LST_ADDR:
|
|
|
|
val = pr->lst_addr;
|
|
|
|
break;
|
|
|
|
case PORT_LST_ADDR_HI:
|
|
|
|
val = pr->lst_addr_hi;
|
|
|
|
break;
|
|
|
|
case PORT_FIS_ADDR:
|
|
|
|
val = pr->fis_addr;
|
|
|
|
break;
|
|
|
|
case PORT_FIS_ADDR_HI:
|
|
|
|
val = pr->fis_addr_hi;
|
|
|
|
break;
|
|
|
|
case PORT_IRQ_STAT:
|
|
|
|
val = pr->irq_stat;
|
|
|
|
break;
|
|
|
|
case PORT_IRQ_MASK:
|
|
|
|
val = pr->irq_mask;
|
|
|
|
break;
|
|
|
|
case PORT_CMD:
|
|
|
|
val = pr->cmd;
|
|
|
|
break;
|
|
|
|
case PORT_TFDATA:
|
2014-08-21 17:44:36 +00:00
|
|
|
val = pr->tfdata;
|
2010-12-14 00:34:40 +00:00
|
|
|
break;
|
|
|
|
case PORT_SIG:
|
|
|
|
val = pr->sig;
|
|
|
|
break;
|
|
|
|
case PORT_SCR_STAT:
|
2014-10-07 11:59:18 +00:00
|
|
|
if (s->dev[port].port.ifs[0].blk) {
|
2010-12-14 00:34:40 +00:00
|
|
|
val = SATA_SCR_SSTATUS_DET_DEV_PRESENT_PHY_UP |
|
|
|
|
SATA_SCR_SSTATUS_SPD_GEN1 | SATA_SCR_SSTATUS_IPM_ACTIVE;
|
|
|
|
} else {
|
|
|
|
val = SATA_SCR_SSTATUS_DET_NODEV;
|
|
|
|
}
|
|
|
|
break;
|
|
|
|
case PORT_SCR_CTL:
|
|
|
|
val = pr->scr_ctl;
|
|
|
|
break;
|
|
|
|
case PORT_SCR_ERR:
|
|
|
|
val = pr->scr_err;
|
|
|
|
break;
|
|
|
|
case PORT_SCR_ACT:
|
|
|
|
val = pr->scr_act;
|
|
|
|
break;
|
|
|
|
case PORT_CMD_ISSUE:
|
|
|
|
val = pr->cmd_issue;
|
|
|
|
break;
|
|
|
|
case PORT_RESERVED:
|
|
|
|
default:
|
|
|
|
val = 0;
|
|
|
|
}
|
|
|
|
DPRINTF(port, "offset: 0x%x val: 0x%x\n", offset, val);
|
|
|
|
return val;
|
|
|
|
|
|
|
|
}
|
|
|
|
|
|
|
|
static void ahci_irq_raise(AHCIState *s, AHCIDevice *dev)
|
|
|
|
{
|
2015-09-08 16:38:45 +00:00
|
|
|
DeviceState *dev_state = s->container;
|
|
|
|
PCIDevice *pci_dev = (PCIDevice *) object_dynamic_cast(OBJECT(dev_state),
|
|
|
|
TYPE_PCI_DEVICE);
|
2010-12-14 00:34:40 +00:00
|
|
|
|
|
|
|
DPRINTF(0, "raise irq\n");
|
|
|
|
|
2014-03-18 19:36:13 +00:00
|
|
|
if (pci_dev && msi_enabled(pci_dev)) {
|
2013-06-30 12:19:24 +00:00
|
|
|
msi_notify(pci_dev, 0);
|
2010-12-14 00:34:40 +00:00
|
|
|
} else {
|
|
|
|
qemu_irq_raise(s->irq);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
static void ahci_irq_lower(AHCIState *s, AHCIDevice *dev)
|
|
|
|
{
|
2015-09-08 16:38:45 +00:00
|
|
|
DeviceState *dev_state = s->container;
|
|
|
|
PCIDevice *pci_dev = (PCIDevice *) object_dynamic_cast(OBJECT(dev_state),
|
|
|
|
TYPE_PCI_DEVICE);
|
2010-12-14 00:34:40 +00:00
|
|
|
|
|
|
|
DPRINTF(0, "lower irq\n");
|
|
|
|
|
2014-03-18 19:36:13 +00:00
|
|
|
if (!pci_dev || !msi_enabled(pci_dev)) {
|
2010-12-14 00:34:40 +00:00
|
|
|
qemu_irq_lower(s->irq);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
static void ahci_check_irq(AHCIState *s)
|
|
|
|
{
|
|
|
|
int i;
|
|
|
|
|
|
|
|
DPRINTF(-1, "check irq %#x\n", s->control_regs.irqstatus);
|
|
|
|
|
2012-01-30 22:29:48 +00:00
|
|
|
s->control_regs.irqstatus = 0;
|
2011-02-01 14:51:31 +00:00
|
|
|
for (i = 0; i < s->ports; i++) {
|
2010-12-14 00:34:40 +00:00
|
|
|
AHCIPortRegs *pr = &s->dev[i].port_regs;
|
|
|
|
if (pr->irq_stat & pr->irq_mask) {
|
|
|
|
s->control_regs.irqstatus |= (1 << i);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if (s->control_regs.irqstatus &&
|
|
|
|
(s->control_regs.ghc & HOST_CTL_IRQ_EN)) {
|
|
|
|
ahci_irq_raise(s, NULL);
|
|
|
|
} else {
|
|
|
|
ahci_irq_lower(s, NULL);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
static void ahci_trigger_irq(AHCIState *s, AHCIDevice *d,
|
|
|
|
int irq_type)
|
|
|
|
{
|
|
|
|
DPRINTF(d->port_no, "trigger irq %#x -> %x\n",
|
|
|
|
irq_type, d->port_regs.irq_mask & irq_type);
|
|
|
|
|
|
|
|
d->port_regs.irq_stat |= irq_type;
|
|
|
|
ahci_check_irq(s);
|
|
|
|
}
|
|
|
|
|
2014-07-03 08:26:27 +00:00
|
|
|
static void map_page(AddressSpace *as, uint8_t **ptr, uint64_t addr,
|
|
|
|
uint32_t wanted)
|
2010-12-14 00:34:40 +00:00
|
|
|
{
|
2012-10-23 10:30:10 +00:00
|
|
|
hwaddr len = wanted;
|
2010-12-14 00:34:40 +00:00
|
|
|
|
|
|
|
if (*ptr) {
|
2014-07-03 08:26:27 +00:00
|
|
|
dma_memory_unmap(as, *ptr, len, DMA_DIRECTION_FROM_DEVICE, len);
|
2010-12-14 00:34:40 +00:00
|
|
|
}
|
|
|
|
|
2014-07-03 08:26:27 +00:00
|
|
|
*ptr = dma_memory_map(as, addr, &len, DMA_DIRECTION_FROM_DEVICE);
|
2010-12-14 00:34:40 +00:00
|
|
|
if (len < wanted) {
|
2014-07-03 08:26:27 +00:00
|
|
|
dma_memory_unmap(as, *ptr, len, DMA_DIRECTION_FROM_DEVICE, len);
|
2010-12-14 00:34:40 +00:00
|
|
|
*ptr = NULL;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2015-05-22 18:13:44 +00:00
|
|
|
/**
|
|
|
|
* Check the cmd register to see if we should start or stop
|
|
|
|
* the DMA or FIS RX engines.
|
|
|
|
*
|
2016-02-10 18:29:40 +00:00
|
|
|
* @ad: Device to dis/engage.
|
2015-05-22 18:13:44 +00:00
|
|
|
*
|
|
|
|
* @return 0 on success, -1 on error.
|
|
|
|
*/
|
2016-02-10 18:29:40 +00:00
|
|
|
static int ahci_cond_start_engines(AHCIDevice *ad)
|
2015-05-22 18:13:44 +00:00
|
|
|
{
|
|
|
|
AHCIPortRegs *pr = &ad->port_regs;
|
2016-02-10 18:29:40 +00:00
|
|
|
bool cmd_start = pr->cmd & PORT_CMD_START;
|
|
|
|
bool cmd_on = pr->cmd & PORT_CMD_LIST_ON;
|
|
|
|
bool fis_start = pr->cmd & PORT_CMD_FIS_RX;
|
|
|
|
bool fis_on = pr->cmd & PORT_CMD_FIS_ON;
|
2015-05-22 18:13:44 +00:00
|
|
|
|
2016-02-10 18:29:40 +00:00
|
|
|
if (cmd_start && !cmd_on) {
|
2016-02-10 18:29:40 +00:00
|
|
|
if (!ahci_map_clb_address(ad)) {
|
2016-02-10 18:29:40 +00:00
|
|
|
pr->cmd &= ~PORT_CMD_START;
|
2015-05-22 18:13:44 +00:00
|
|
|
error_report("AHCI: Failed to start DMA engine: "
|
|
|
|
"bad command list buffer address");
|
|
|
|
return -1;
|
|
|
|
}
|
2016-02-10 18:29:40 +00:00
|
|
|
} else if (!cmd_start && cmd_on) {
|
|
|
|
ahci_unmap_clb_address(ad);
|
2015-05-22 18:13:44 +00:00
|
|
|
}
|
|
|
|
|
2016-02-10 18:29:40 +00:00
|
|
|
if (fis_start && !fis_on) {
|
2016-02-10 18:29:40 +00:00
|
|
|
if (!ahci_map_fis_address(ad)) {
|
2016-02-10 18:29:40 +00:00
|
|
|
pr->cmd &= ~PORT_CMD_FIS_RX;
|
2015-05-22 18:13:44 +00:00
|
|
|
error_report("AHCI: Failed to start FIS receive engine: "
|
|
|
|
"bad FIS receive buffer address");
|
|
|
|
return -1;
|
|
|
|
}
|
2016-02-10 18:29:40 +00:00
|
|
|
} else if (!fis_start && fis_on) {
|
|
|
|
ahci_unmap_fis_address(ad);
|
2015-05-22 18:13:44 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2010-12-14 00:34:40 +00:00
|
|
|
static void ahci_port_write(AHCIState *s, int port, int offset, uint32_t val)
|
|
|
|
{
|
|
|
|
AHCIPortRegs *pr = &s->dev[port].port_regs;
|
|
|
|
|
|
|
|
DPRINTF(port, "offset: 0x%x val: 0x%x\n", offset, val);
|
|
|
|
switch (offset) {
|
|
|
|
case PORT_LST_ADDR:
|
|
|
|
pr->lst_addr = val;
|
|
|
|
break;
|
|
|
|
case PORT_LST_ADDR_HI:
|
|
|
|
pr->lst_addr_hi = val;
|
|
|
|
break;
|
|
|
|
case PORT_FIS_ADDR:
|
|
|
|
pr->fis_addr = val;
|
|
|
|
break;
|
|
|
|
case PORT_FIS_ADDR_HI:
|
|
|
|
pr->fis_addr_hi = val;
|
|
|
|
break;
|
|
|
|
case PORT_IRQ_STAT:
|
|
|
|
pr->irq_stat &= ~val;
|
2012-01-30 22:29:48 +00:00
|
|
|
ahci_check_irq(s);
|
2010-12-14 00:34:40 +00:00
|
|
|
break;
|
|
|
|
case PORT_IRQ_MASK:
|
|
|
|
pr->irq_mask = val & 0xfdc000ff;
|
|
|
|
ahci_check_irq(s);
|
|
|
|
break;
|
|
|
|
case PORT_CMD:
|
2015-03-27 19:48:11 +00:00
|
|
|
/* Block any Read-only fields from being set;
|
2015-07-20 16:21:18 +00:00
|
|
|
* including LIST_ON and FIS_ON.
|
|
|
|
* The spec requires to set ICC bits to zero after the ICC change
|
|
|
|
* is done. We don't support ICC state changes, therefore always
|
|
|
|
* force the ICC bits to zero.
|
|
|
|
*/
|
|
|
|
pr->cmd = (pr->cmd & PORT_CMD_RO_MASK) |
|
|
|
|
(val & ~(PORT_CMD_RO_MASK|PORT_CMD_ICC_MASK));
|
2010-12-14 00:34:40 +00:00
|
|
|
|
2016-02-10 18:29:40 +00:00
|
|
|
/* Check FIS RX and CLB engines */
|
|
|
|
ahci_cond_start_engines(&s->dev[port]);
|
2010-12-14 00:34:40 +00:00
|
|
|
|
2011-02-01 14:51:29 +00:00
|
|
|
/* XXX usually the FIS would be pending on the bus here and
|
|
|
|
issuing deferred until the OS enables FIS receival.
|
|
|
|
Instead, we only submit it once - which works in most
|
|
|
|
cases, but is a hack. */
|
|
|
|
if ((pr->cmd & PORT_CMD_FIS_ON) &&
|
|
|
|
!s->dev[port].init_d2h_sent) {
|
|
|
|
ahci_init_d2h(&s->dev[port]);
|
|
|
|
}
|
|
|
|
|
2010-12-14 00:34:40 +00:00
|
|
|
check_cmd(s, port);
|
|
|
|
break;
|
|
|
|
case PORT_TFDATA:
|
2014-08-21 17:44:36 +00:00
|
|
|
/* Read Only. */
|
2010-12-14 00:34:40 +00:00
|
|
|
break;
|
|
|
|
case PORT_SIG:
|
2014-08-21 17:44:36 +00:00
|
|
|
/* Read Only */
|
2010-12-14 00:34:40 +00:00
|
|
|
break;
|
|
|
|
case PORT_SCR_STAT:
|
2014-08-21 17:44:36 +00:00
|
|
|
/* Read Only */
|
2010-12-14 00:34:40 +00:00
|
|
|
break;
|
|
|
|
case PORT_SCR_CTL:
|
|
|
|
if (((pr->scr_ctl & AHCI_SCR_SCTL_DET) == 1) &&
|
|
|
|
((val & AHCI_SCR_SCTL_DET) == 0)) {
|
|
|
|
ahci_reset_port(s, port);
|
|
|
|
}
|
|
|
|
pr->scr_ctl = val;
|
|
|
|
break;
|
|
|
|
case PORT_SCR_ERR:
|
|
|
|
pr->scr_err &= ~val;
|
|
|
|
break;
|
|
|
|
case PORT_SCR_ACT:
|
|
|
|
/* RW1 */
|
|
|
|
pr->scr_act |= val;
|
|
|
|
break;
|
|
|
|
case PORT_CMD_ISSUE:
|
|
|
|
pr->cmd_issue |= val;
|
|
|
|
check_cmd(s, port);
|
|
|
|
break;
|
|
|
|
default:
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2015-07-04 06:06:02 +00:00
|
|
|
static uint64_t ahci_mem_read_32(void *opaque, hwaddr addr)
|
2010-12-14 00:34:40 +00:00
|
|
|
{
|
2011-08-08 13:09:14 +00:00
|
|
|
AHCIState *s = opaque;
|
2010-12-14 00:34:40 +00:00
|
|
|
uint32_t val = 0;
|
|
|
|
|
|
|
|
if (addr < AHCI_GENERIC_HOST_CONTROL_REGS_MAX_ADDR) {
|
|
|
|
switch (addr) {
|
|
|
|
case HOST_CAP:
|
|
|
|
val = s->control_regs.cap;
|
|
|
|
break;
|
|
|
|
case HOST_CTL:
|
|
|
|
val = s->control_regs.ghc;
|
|
|
|
break;
|
|
|
|
case HOST_IRQ_STAT:
|
|
|
|
val = s->control_regs.irqstatus;
|
|
|
|
break;
|
|
|
|
case HOST_PORTS_IMPL:
|
|
|
|
val = s->control_regs.impl;
|
|
|
|
break;
|
|
|
|
case HOST_VERSION:
|
|
|
|
val = s->control_regs.version;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
DPRINTF(-1, "(addr 0x%08X), val 0x%08X\n", (unsigned) addr, val);
|
|
|
|
} else if ((addr >= AHCI_PORT_REGS_START_ADDR) &&
|
2011-02-01 14:51:31 +00:00
|
|
|
(addr < (AHCI_PORT_REGS_START_ADDR +
|
|
|
|
(s->ports * AHCI_PORT_ADDR_OFFSET_LEN)))) {
|
2010-12-14 00:34:40 +00:00
|
|
|
val = ahci_port_read(s, (addr - AHCI_PORT_REGS_START_ADDR) >> 7,
|
|
|
|
addr & AHCI_PORT_ADDR_OFFSET_MASK);
|
|
|
|
}
|
|
|
|
|
|
|
|
return val;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2015-07-04 06:06:02 +00:00
|
|
|
/**
|
|
|
|
* AHCI 1.3 section 3 ("HBA Memory Registers")
|
|
|
|
* Support unaligned 8/16/32 bit reads, and 64 bit aligned reads.
|
|
|
|
* Caller is responsible for masking unwanted higher order bytes.
|
|
|
|
*/
|
|
|
|
static uint64_t ahci_mem_read(void *opaque, hwaddr addr, unsigned size)
|
|
|
|
{
|
|
|
|
hwaddr aligned = addr & ~0x3;
|
|
|
|
int ofst = addr - aligned;
|
|
|
|
uint64_t lo = ahci_mem_read_32(opaque, aligned);
|
|
|
|
uint64_t hi;
|
2015-11-06 19:09:00 +00:00
|
|
|
uint64_t val;
|
2015-07-04 06:06:02 +00:00
|
|
|
|
|
|
|
/* if < 8 byte read does not cross 4 byte boundary */
|
|
|
|
if (ofst + size <= 4) {
|
2015-11-06 19:09:00 +00:00
|
|
|
val = lo >> (ofst * 8);
|
|
|
|
} else {
|
|
|
|
g_assert_cmpint(size, >, 1);
|
|
|
|
|
|
|
|
/* If the 64bit read is unaligned, we will produce undefined
|
|
|
|
* results. AHCI does not support unaligned 64bit reads. */
|
|
|
|
hi = ahci_mem_read_32(opaque, aligned + 4);
|
|
|
|
val = (hi << 32 | lo) >> (ofst * 8);
|
2015-07-04 06:06:02 +00:00
|
|
|
}
|
|
|
|
|
2015-11-06 19:09:00 +00:00
|
|
|
DPRINTF(-1, "addr=0x%" HWADDR_PRIx " val=0x%" PRIx64 ", size=%d\n",
|
|
|
|
addr, val, size);
|
|
|
|
return val;
|
2015-07-04 06:06:02 +00:00
|
|
|
}
|
|
|
|
|
2010-12-14 00:34:40 +00:00
|
|
|
|
2012-10-23 10:30:10 +00:00
|
|
|
static void ahci_mem_write(void *opaque, hwaddr addr,
|
2011-08-08 13:09:14 +00:00
|
|
|
uint64_t val, unsigned size)
|
2010-12-14 00:34:40 +00:00
|
|
|
{
|
2011-08-08 13:09:14 +00:00
|
|
|
AHCIState *s = opaque;
|
2010-12-14 00:34:40 +00:00
|
|
|
|
2015-11-06 19:09:00 +00:00
|
|
|
DPRINTF(-1, "addr=0x%" HWADDR_PRIx " val=0x%" PRIx64 ", size=%d\n",
|
|
|
|
addr, val, size);
|
|
|
|
|
2010-12-14 00:34:40 +00:00
|
|
|
/* Only aligned reads are allowed on AHCI */
|
|
|
|
if (addr & 3) {
|
|
|
|
fprintf(stderr, "ahci: Mis-aligned write to addr 0x"
|
|
|
|
TARGET_FMT_plx "\n", addr);
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (addr < AHCI_GENERIC_HOST_CONTROL_REGS_MAX_ADDR) {
|
2011-10-26 11:06:14 +00:00
|
|
|
DPRINTF(-1, "(addr 0x%08X), val 0x%08"PRIX64"\n", (unsigned) addr, val);
|
2010-12-14 00:34:40 +00:00
|
|
|
|
|
|
|
switch (addr) {
|
|
|
|
case HOST_CAP: /* R/WO, RO */
|
|
|
|
/* FIXME handle R/WO */
|
|
|
|
break;
|
|
|
|
case HOST_CTL: /* R/W */
|
|
|
|
if (val & HOST_CTL_RESET) {
|
|
|
|
DPRINTF(-1, "HBA Reset\n");
|
2012-05-11 14:42:36 +00:00
|
|
|
ahci_reset(s);
|
2010-12-14 00:34:40 +00:00
|
|
|
} else {
|
|
|
|
s->control_regs.ghc = (val & 0x3) | HOST_CTL_AHCI_EN;
|
|
|
|
ahci_check_irq(s);
|
|
|
|
}
|
|
|
|
break;
|
|
|
|
case HOST_IRQ_STAT: /* R/WC, RO */
|
|
|
|
s->control_regs.irqstatus &= ~val;
|
|
|
|
ahci_check_irq(s);
|
|
|
|
break;
|
|
|
|
case HOST_PORTS_IMPL: /* R/WO, RO */
|
|
|
|
/* FIXME handle R/WO */
|
|
|
|
break;
|
|
|
|
case HOST_VERSION: /* RO */
|
|
|
|
/* FIXME report write? */
|
|
|
|
break;
|
|
|
|
default:
|
|
|
|
DPRINTF(-1, "write to unknown register 0x%x\n", (unsigned)addr);
|
|
|
|
}
|
|
|
|
} else if ((addr >= AHCI_PORT_REGS_START_ADDR) &&
|
2011-02-01 14:51:31 +00:00
|
|
|
(addr < (AHCI_PORT_REGS_START_ADDR +
|
|
|
|
(s->ports * AHCI_PORT_ADDR_OFFSET_LEN)))) {
|
2010-12-14 00:34:40 +00:00
|
|
|
ahci_port_write(s, (addr - AHCI_PORT_REGS_START_ADDR) >> 7,
|
|
|
|
addr & AHCI_PORT_ADDR_OFFSET_MASK, val);
|
|
|
|
}
|
|
|
|
|
|
|
|
}
|
|
|
|
|
2012-02-05 10:19:07 +00:00
|
|
|
static const MemoryRegionOps ahci_mem_ops = {
|
2011-08-08 13:09:14 +00:00
|
|
|
.read = ahci_mem_read,
|
|
|
|
.write = ahci_mem_write,
|
|
|
|
.endianness = DEVICE_LITTLE_ENDIAN,
|
2010-12-14 00:34:40 +00:00
|
|
|
};
|
|
|
|
|
2012-10-23 10:30:10 +00:00
|
|
|
static uint64_t ahci_idp_read(void *opaque, hwaddr addr,
|
2011-08-27 09:12:28 +00:00
|
|
|
unsigned size)
|
|
|
|
{
|
|
|
|
AHCIState *s = opaque;
|
|
|
|
|
|
|
|
if (addr == s->idp_offset) {
|
|
|
|
/* index register */
|
|
|
|
return s->idp_index;
|
|
|
|
} else if (addr == s->idp_offset + 4) {
|
|
|
|
/* data register - do memory read at location selected by index */
|
|
|
|
return ahci_mem_read(opaque, s->idp_index, size);
|
|
|
|
} else {
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2012-10-23 10:30:10 +00:00
|
|
|
static void ahci_idp_write(void *opaque, hwaddr addr,
|
2011-08-27 09:12:28 +00:00
|
|
|
uint64_t val, unsigned size)
|
|
|
|
{
|
|
|
|
AHCIState *s = opaque;
|
|
|
|
|
|
|
|
if (addr == s->idp_offset) {
|
|
|
|
/* index register - mask off reserved bits */
|
|
|
|
s->idp_index = (uint32_t)val & ((AHCI_MEM_BAR_SIZE - 1) & ~3);
|
|
|
|
} else if (addr == s->idp_offset + 4) {
|
|
|
|
/* data register - do memory write at location selected by index */
|
|
|
|
ahci_mem_write(opaque, s->idp_index, val, size);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2012-02-05 10:19:07 +00:00
|
|
|
static const MemoryRegionOps ahci_idp_ops = {
|
2011-08-27 09:12:28 +00:00
|
|
|
.read = ahci_idp_read,
|
|
|
|
.write = ahci_idp_write,
|
|
|
|
.endianness = DEVICE_LITTLE_ENDIAN,
|
|
|
|
};
|
|
|
|
|
|
|
|
|
2010-12-14 00:34:40 +00:00
|
|
|
static void ahci_reg_init(AHCIState *s)
|
|
|
|
{
|
|
|
|
int i;
|
|
|
|
|
2011-02-01 14:51:31 +00:00
|
|
|
s->control_regs.cap = (s->ports - 1) |
|
2010-12-14 00:34:40 +00:00
|
|
|
(AHCI_NUM_COMMAND_SLOTS << 8) |
|
|
|
|
(AHCI_SUPPORTED_SPEED_GEN1 << AHCI_SUPPORTED_SPEED) |
|
2017-02-10 16:47:11 +00:00
|
|
|
HOST_CAP_NCQ | HOST_CAP_AHCI | HOST_CAP_64;
|
2010-12-14 00:34:40 +00:00
|
|
|
|
2011-02-01 14:51:31 +00:00
|
|
|
s->control_regs.impl = (1 << s->ports) - 1;
|
2010-12-14 00:34:40 +00:00
|
|
|
|
|
|
|
s->control_regs.version = AHCI_VERSION_1_0;
|
|
|
|
|
2011-02-01 14:51:31 +00:00
|
|
|
for (i = 0; i < s->ports; i++) {
|
2010-12-14 00:34:40 +00:00
|
|
|
s->dev[i].port_state = STATE_RUN;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
static void check_cmd(AHCIState *s, int port)
|
|
|
|
{
|
|
|
|
AHCIPortRegs *pr = &s->dev[port].port_regs;
|
2015-07-04 06:06:05 +00:00
|
|
|
uint8_t slot;
|
2010-12-14 00:34:40 +00:00
|
|
|
|
|
|
|
if ((pr->cmd & PORT_CMD_START) && pr->cmd_issue) {
|
|
|
|
for (slot = 0; (slot < 32) && pr->cmd_issue; slot++) {
|
2014-03-28 15:12:55 +00:00
|
|
|
if ((pr->cmd_issue & (1U << slot)) &&
|
2010-12-14 00:34:40 +00:00
|
|
|
!handle_cmd(s, port, slot)) {
|
2014-03-28 15:12:55 +00:00
|
|
|
pr->cmd_issue &= ~(1U << slot);
|
2010-12-14 00:34:40 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
static void ahci_check_cmd_bh(void *opaque)
|
|
|
|
{
|
|
|
|
AHCIDevice *ad = opaque;
|
|
|
|
|
|
|
|
qemu_bh_delete(ad->check_bh);
|
|
|
|
ad->check_bh = NULL;
|
|
|
|
|
|
|
|
if ((ad->busy_slot != -1) &&
|
|
|
|
!(ad->port.ifs[0].status & (BUSY_STAT|DRQ_STAT))) {
|
|
|
|
/* no longer busy */
|
|
|
|
ad->port_regs.cmd_issue &= ~(1 << ad->busy_slot);
|
|
|
|
ad->busy_slot = -1;
|
|
|
|
}
|
|
|
|
|
|
|
|
check_cmd(ad->hba, ad->port_no);
|
|
|
|
}
|
|
|
|
|
2011-02-01 14:51:29 +00:00
|
|
|
static void ahci_init_d2h(AHCIDevice *ad)
|
|
|
|
{
|
|
|
|
IDEState *ide_state = &ad->port.ifs[0];
|
2015-09-01 20:50:39 +00:00
|
|
|
AHCIPortRegs *pr = &ad->port_regs;
|
2011-02-01 14:51:29 +00:00
|
|
|
|
2015-09-01 20:50:41 +00:00
|
|
|
if (ad->init_d2h_sent) {
|
|
|
|
return;
|
|
|
|
}
|
2011-02-01 14:51:29 +00:00
|
|
|
|
2015-09-01 20:50:41 +00:00
|
|
|
if (ahci_write_fis_d2h(ad)) {
|
|
|
|
ad->init_d2h_sent = true;
|
|
|
|
/* We're emulating receiving the first Reg H2D Fis from the device;
|
|
|
|
* Update the SIG register, but otherwise proceed as normal. */
|
2015-10-16 16:19:35 +00:00
|
|
|
pr->sig = ((uint32_t)ide_state->hcyl << 24) |
|
2015-09-01 20:50:41 +00:00
|
|
|
(ide_state->lcyl << 16) |
|
|
|
|
(ide_state->sector << 8) |
|
|
|
|
(ide_state->nsector & 0xFF);
|
|
|
|
}
|
2011-02-01 14:51:29 +00:00
|
|
|
}
|
|
|
|
|
2015-09-01 20:50:39 +00:00
|
|
|
static void ahci_set_signature(AHCIDevice *ad, uint32_t sig)
|
|
|
|
{
|
|
|
|
IDEState *s = &ad->port.ifs[0];
|
|
|
|
s->hcyl = sig >> 24 & 0xFF;
|
|
|
|
s->lcyl = sig >> 16 & 0xFF;
|
|
|
|
s->sector = sig >> 8 & 0xFF;
|
|
|
|
s->nsector = sig & 0xFF;
|
|
|
|
|
|
|
|
DPRINTF(ad->port_no, "set hcyl:lcyl:sect:nsect = 0x%08x\n", sig);
|
|
|
|
}
|
|
|
|
|
2010-12-14 00:34:40 +00:00
|
|
|
static void ahci_reset_port(AHCIState *s, int port)
|
|
|
|
{
|
|
|
|
AHCIDevice *d = &s->dev[port];
|
|
|
|
AHCIPortRegs *pr = &d->port_regs;
|
|
|
|
IDEState *ide_state = &d->port.ifs[0];
|
|
|
|
int i;
|
|
|
|
|
|
|
|
DPRINTF(port, "reset port\n");
|
|
|
|
|
|
|
|
ide_bus_reset(&d->port);
|
|
|
|
ide_state->ncq_queues = AHCI_MAX_CMDS;
|
|
|
|
|
|
|
|
pr->scr_stat = 0;
|
|
|
|
pr->scr_err = 0;
|
|
|
|
pr->scr_act = 0;
|
2014-08-21 17:44:36 +00:00
|
|
|
pr->tfdata = 0x7F;
|
|
|
|
pr->sig = 0xFFFFFFFF;
|
2010-12-14 00:34:40 +00:00
|
|
|
d->busy_slot = -1;
|
2013-01-15 15:12:09 +00:00
|
|
|
d->init_d2h_sent = false;
|
2010-12-14 00:34:40 +00:00
|
|
|
|
|
|
|
ide_state = &s->dev[port].port.ifs[0];
|
2014-10-07 11:59:18 +00:00
|
|
|
if (!ide_state->blk) {
|
2010-12-14 00:34:40 +00:00
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* reset ncq queue */
|
|
|
|
for (i = 0; i < AHCI_MAX_CMDS; i++) {
|
|
|
|
NCQTransferState *ncq_tfs = &s->dev[port].ncq_tfs[i];
|
2015-07-04 06:06:04 +00:00
|
|
|
ncq_tfs->halt = false;
|
2010-12-14 00:34:40 +00:00
|
|
|
if (!ncq_tfs->used) {
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (ncq_tfs->aiocb) {
|
2014-10-07 11:59:18 +00:00
|
|
|
blk_aio_cancel(ncq_tfs->aiocb);
|
2010-12-14 00:34:40 +00:00
|
|
|
ncq_tfs->aiocb = NULL;
|
|
|
|
}
|
|
|
|
|
2014-10-07 11:59:18 +00:00
|
|
|
/* Maybe we just finished the request thanks to blk_aio_cancel() */
|
2012-01-30 22:29:47 +00:00
|
|
|
if (!ncq_tfs->used) {
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
|
2010-12-14 00:34:40 +00:00
|
|
|
qemu_sglist_destroy(&ncq_tfs->sglist);
|
|
|
|
ncq_tfs->used = 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
s->dev[port].port_state = STATE_RUN;
|
2015-09-01 20:50:38 +00:00
|
|
|
if (ide_state->drive_kind == IDE_CD) {
|
2015-09-01 20:50:39 +00:00
|
|
|
ahci_set_signature(d, SATA_SIGNATURE_CDROM);\
|
2010-12-14 00:34:40 +00:00
|
|
|
ide_state->status = SEEK_STAT | WRERR_STAT | READY_STAT;
|
|
|
|
} else {
|
2015-09-01 20:50:39 +00:00
|
|
|
ahci_set_signature(d, SATA_SIGNATURE_DISK);
|
2010-12-14 00:34:40 +00:00
|
|
|
ide_state->status = SEEK_STAT | WRERR_STAT;
|
|
|
|
}
|
|
|
|
|
|
|
|
ide_state->error = 1;
|
2011-02-01 14:51:29 +00:00
|
|
|
ahci_init_d2h(d);
|
2010-12-14 00:34:40 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
static void debug_print_fis(uint8_t *fis, int cmd_len)
|
|
|
|
{
|
2014-11-13 10:24:40 +00:00
|
|
|
#if DEBUG_AHCI
|
2010-12-14 00:34:40 +00:00
|
|
|
int i;
|
|
|
|
|
|
|
|
fprintf(stderr, "fis:");
|
|
|
|
for (i = 0; i < cmd_len; i++) {
|
|
|
|
if ((i & 0xf) == 0) {
|
|
|
|
fprintf(stderr, "\n%02x:",i);
|
|
|
|
}
|
|
|
|
fprintf(stderr, "%02x ",fis[i]);
|
|
|
|
}
|
|
|
|
fprintf(stderr, "\n");
|
|
|
|
#endif
|
|
|
|
}
|
|
|
|
|
2015-03-27 19:48:11 +00:00
|
|
|
static bool ahci_map_fis_address(AHCIDevice *ad)
|
|
|
|
{
|
|
|
|
AHCIPortRegs *pr = &ad->port_regs;
|
|
|
|
map_page(ad->hba->as, &ad->res_fis,
|
|
|
|
((uint64_t)pr->fis_addr_hi << 32) | pr->fis_addr, 256);
|
2016-02-10 18:29:40 +00:00
|
|
|
if (ad->res_fis != NULL) {
|
|
|
|
pr->cmd |= PORT_CMD_FIS_ON;
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
|
|
|
pr->cmd &= ~PORT_CMD_FIS_ON;
|
|
|
|
return false;
|
2015-03-27 19:48:11 +00:00
|
|
|
}
|
|
|
|
|
2015-03-27 19:48:11 +00:00
|
|
|
static void ahci_unmap_fis_address(AHCIDevice *ad)
|
|
|
|
{
|
2016-02-10 18:29:40 +00:00
|
|
|
if (ad->res_fis == NULL) {
|
|
|
|
DPRINTF(ad->port_no, "Attempt to unmap NULL FIS address\n");
|
|
|
|
return;
|
|
|
|
}
|
2016-02-10 18:29:40 +00:00
|
|
|
ad->port_regs.cmd &= ~PORT_CMD_FIS_ON;
|
2015-03-27 19:48:11 +00:00
|
|
|
dma_memory_unmap(ad->hba->as, ad->res_fis, 256,
|
|
|
|
DMA_DIRECTION_FROM_DEVICE, 256);
|
|
|
|
ad->res_fis = NULL;
|
|
|
|
}
|
|
|
|
|
2015-03-27 19:48:11 +00:00
|
|
|
static bool ahci_map_clb_address(AHCIDevice *ad)
|
|
|
|
{
|
|
|
|
AHCIPortRegs *pr = &ad->port_regs;
|
|
|
|
ad->cur_cmd = NULL;
|
|
|
|
map_page(ad->hba->as, &ad->lst,
|
|
|
|
((uint64_t)pr->lst_addr_hi << 32) | pr->lst_addr, 1024);
|
2016-02-10 18:29:40 +00:00
|
|
|
if (ad->lst != NULL) {
|
|
|
|
pr->cmd |= PORT_CMD_LIST_ON;
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
|
|
|
pr->cmd &= ~PORT_CMD_LIST_ON;
|
|
|
|
return false;
|
2015-03-27 19:48:11 +00:00
|
|
|
}
|
|
|
|
|
2015-03-27 19:48:11 +00:00
|
|
|
static void ahci_unmap_clb_address(AHCIDevice *ad)
|
|
|
|
{
|
2016-02-10 18:29:40 +00:00
|
|
|
if (ad->lst == NULL) {
|
|
|
|
DPRINTF(ad->port_no, "Attempt to unmap NULL CLB address\n");
|
|
|
|
return;
|
|
|
|
}
|
2016-02-10 18:29:40 +00:00
|
|
|
ad->port_regs.cmd &= ~PORT_CMD_LIST_ON;
|
2015-03-27 19:48:11 +00:00
|
|
|
dma_memory_unmap(ad->hba->as, ad->lst, 1024,
|
|
|
|
DMA_DIRECTION_FROM_DEVICE, 1024);
|
|
|
|
ad->lst = NULL;
|
|
|
|
}
|
|
|
|
|
2015-07-04 06:06:05 +00:00
|
|
|
static void ahci_write_fis_sdb(AHCIState *s, NCQTransferState *ncq_tfs)
|
2010-12-14 00:34:40 +00:00
|
|
|
{
|
2015-07-04 06:06:05 +00:00
|
|
|
AHCIDevice *ad = ncq_tfs->drive;
|
2014-08-21 17:44:36 +00:00
|
|
|
AHCIPortRegs *pr = &ad->port_regs;
|
2010-12-14 00:34:40 +00:00
|
|
|
IDEState *ide_state;
|
2014-10-01 22:55:51 +00:00
|
|
|
SDBFIS *sdb_fis;
|
2010-12-14 00:34:40 +00:00
|
|
|
|
2015-07-04 06:06:05 +00:00
|
|
|
if (!ad->res_fis ||
|
2010-12-14 00:34:40 +00:00
|
|
|
!(pr->cmd & PORT_CMD_FIS_RX)) {
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2014-10-01 22:55:51 +00:00
|
|
|
sdb_fis = (SDBFIS *)&ad->res_fis[RES_FIS_SDBFIS];
|
2014-08-21 17:44:36 +00:00
|
|
|
ide_state = &ad->port.ifs[0];
|
2010-12-14 00:34:40 +00:00
|
|
|
|
2014-11-13 10:24:41 +00:00
|
|
|
sdb_fis->type = SATA_FIS_TYPE_SDB;
|
2014-10-01 22:55:51 +00:00
|
|
|
/* Interrupt pending & Notification bit */
|
2015-07-04 06:06:05 +00:00
|
|
|
sdb_fis->flags = 0x40; /* Interrupt bit, always 1 for NCQ */
|
2014-10-01 22:55:51 +00:00
|
|
|
sdb_fis->status = ide_state->status & 0x77;
|
|
|
|
sdb_fis->error = ide_state->error;
|
|
|
|
/* update SAct field in SDB_FIS */
|
|
|
|
sdb_fis->payload = cpu_to_le32(ad->finished);
|
2010-12-14 00:34:40 +00:00
|
|
|
|
2014-08-21 17:44:36 +00:00
|
|
|
/* Update shadow registers (except BSY 0x80 and DRQ 0x08) */
|
|
|
|
pr->tfdata = (ad->port.ifs[0].error << 8) |
|
|
|
|
(ad->port.ifs[0].status & 0x77) |
|
|
|
|
(pr->tfdata & 0x88);
|
2015-07-04 06:06:05 +00:00
|
|
|
pr->scr_act &= ~ad->finished;
|
|
|
|
ad->finished = 0;
|
2014-08-21 17:44:36 +00:00
|
|
|
|
2015-07-04 06:06:05 +00:00
|
|
|
/* Trigger IRQ if interrupt bit is set (which currently, it always is) */
|
|
|
|
if (sdb_fis->flags & 0x40) {
|
|
|
|
ahci_trigger_irq(s, ad, PORT_IRQ_SDB_FIS);
|
|
|
|
}
|
2010-12-14 00:34:40 +00:00
|
|
|
}
|
|
|
|
|
2014-08-04 21:11:18 +00:00
|
|
|
static void ahci_write_fis_pio(AHCIDevice *ad, uint16_t len)
|
|
|
|
{
|
|
|
|
AHCIPortRegs *pr = &ad->port_regs;
|
2015-07-04 06:06:05 +00:00
|
|
|
uint8_t *pio_fis;
|
2014-10-01 22:55:46 +00:00
|
|
|
IDEState *s = &ad->port.ifs[0];
|
2014-08-04 21:11:18 +00:00
|
|
|
|
|
|
|
if (!ad->res_fis || !(pr->cmd & PORT_CMD_FIS_RX)) {
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
pio_fis = &ad->res_fis[RES_FIS_PSFIS];
|
|
|
|
|
2014-11-13 10:24:41 +00:00
|
|
|
pio_fis[0] = SATA_FIS_TYPE_PIO_SETUP;
|
2014-08-04 21:11:18 +00:00
|
|
|
pio_fis[1] = (ad->hba->control_regs.irqstatus ? (1 << 6) : 0);
|
2014-10-01 22:55:46 +00:00
|
|
|
pio_fis[2] = s->status;
|
|
|
|
pio_fis[3] = s->error;
|
|
|
|
|
|
|
|
pio_fis[4] = s->sector;
|
|
|
|
pio_fis[5] = s->lcyl;
|
|
|
|
pio_fis[6] = s->hcyl;
|
|
|
|
pio_fis[7] = s->select;
|
|
|
|
pio_fis[8] = s->hob_sector;
|
|
|
|
pio_fis[9] = s->hob_lcyl;
|
|
|
|
pio_fis[10] = s->hob_hcyl;
|
|
|
|
pio_fis[11] = 0;
|
2015-07-04 06:06:05 +00:00
|
|
|
pio_fis[12] = s->nsector & 0xFF;
|
|
|
|
pio_fis[13] = (s->nsector >> 8) & 0xFF;
|
2014-08-04 21:11:18 +00:00
|
|
|
pio_fis[14] = 0;
|
2014-10-01 22:55:46 +00:00
|
|
|
pio_fis[15] = s->status;
|
2014-08-04 21:11:18 +00:00
|
|
|
pio_fis[16] = len & 255;
|
|
|
|
pio_fis[17] = len >> 8;
|
|
|
|
pio_fis[18] = 0;
|
|
|
|
pio_fis[19] = 0;
|
|
|
|
|
2014-08-21 17:44:36 +00:00
|
|
|
/* Update shadow registers: */
|
|
|
|
pr->tfdata = (ad->port.ifs[0].error << 8) |
|
|
|
|
ad->port.ifs[0].status;
|
|
|
|
|
2014-08-04 21:11:18 +00:00
|
|
|
if (pio_fis[2] & ERR_STAT) {
|
|
|
|
ahci_trigger_irq(ad->hba, ad, PORT_IRQ_TF_ERR);
|
|
|
|
}
|
|
|
|
|
|
|
|
ahci_trigger_irq(ad->hba, ad, PORT_IRQ_PIOS_FIS);
|
|
|
|
}
|
|
|
|
|
2015-09-01 20:50:41 +00:00
|
|
|
static bool ahci_write_fis_d2h(AHCIDevice *ad)
|
2010-12-14 00:34:40 +00:00
|
|
|
{
|
|
|
|
AHCIPortRegs *pr = &ad->port_regs;
|
|
|
|
uint8_t *d2h_fis;
|
|
|
|
int i;
|
2014-10-01 22:55:46 +00:00
|
|
|
IDEState *s = &ad->port.ifs[0];
|
2010-12-14 00:34:40 +00:00
|
|
|
|
|
|
|
if (!ad->res_fis || !(pr->cmd & PORT_CMD_FIS_RX)) {
|
2015-09-01 20:50:41 +00:00
|
|
|
return false;
|
2010-12-14 00:34:40 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
d2h_fis = &ad->res_fis[RES_FIS_RFIS];
|
|
|
|
|
2014-11-13 10:24:41 +00:00
|
|
|
d2h_fis[0] = SATA_FIS_TYPE_REGISTER_D2H;
|
2010-12-14 00:34:40 +00:00
|
|
|
d2h_fis[1] = (ad->hba->control_regs.irqstatus ? (1 << 6) : 0);
|
2014-10-01 22:55:46 +00:00
|
|
|
d2h_fis[2] = s->status;
|
|
|
|
d2h_fis[3] = s->error;
|
|
|
|
|
|
|
|
d2h_fis[4] = s->sector;
|
|
|
|
d2h_fis[5] = s->lcyl;
|
|
|
|
d2h_fis[6] = s->hcyl;
|
|
|
|
d2h_fis[7] = s->select;
|
|
|
|
d2h_fis[8] = s->hob_sector;
|
|
|
|
d2h_fis[9] = s->hob_lcyl;
|
|
|
|
d2h_fis[10] = s->hob_hcyl;
|
|
|
|
d2h_fis[11] = 0;
|
2015-07-04 06:06:05 +00:00
|
|
|
d2h_fis[12] = s->nsector & 0xFF;
|
|
|
|
d2h_fis[13] = (s->nsector >> 8) & 0xFF;
|
2012-05-22 23:26:42 +00:00
|
|
|
for (i = 14; i < 20; i++) {
|
2010-12-14 00:34:40 +00:00
|
|
|
d2h_fis[i] = 0;
|
|
|
|
}
|
|
|
|
|
2014-08-21 17:44:36 +00:00
|
|
|
/* Update shadow registers: */
|
|
|
|
pr->tfdata = (ad->port.ifs[0].error << 8) |
|
|
|
|
ad->port.ifs[0].status;
|
|
|
|
|
2010-12-14 00:34:40 +00:00
|
|
|
if (d2h_fis[2] & ERR_STAT) {
|
2014-08-04 21:11:15 +00:00
|
|
|
ahci_trigger_irq(ad->hba, ad, PORT_IRQ_TF_ERR);
|
2010-12-14 00:34:40 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
ahci_trigger_irq(ad->hba, ad, PORT_IRQ_D2H_REG_FIS);
|
2015-09-01 20:50:41 +00:00
|
|
|
return true;
|
2010-12-14 00:34:40 +00:00
|
|
|
}
|
|
|
|
|
2014-07-01 11:13:27 +00:00
|
|
|
static int prdt_tbl_entry_size(const AHCI_SG *tbl)
|
|
|
|
{
|
ide: add limit to .prepare_buf()
prepare_buf should not always grab as many descriptors
as it can, sometimes it should self-limit.
For example, an NCQ transfer of 1 sector with a PRDT that
describes 4GiB of data should not copy 4GiB of data, it
should just transfer that first 512 bytes.
PIO is not affected, because the dma_buf_rw dma helpers
already have a byte limit built-in to them, but DMA/NCQ
will exhaust the entire list regardless of requested size.
AHCI 1.3 specifies in section 6.1.6 Command List Underflow that
NCQ is not required to detect underflow conditions. Non-NCQ
pathways signal underflow by writing to the PRDBC field, which
will already occur by writing the actual transferred byte count
to the PRDBC, signaling the underflow.
Our NCQ pathways aren't required to detect underflow, but since our DMA
backend uses the size of the PRDT to determine the size of the transer,
if our PRDT is bigger than the transaction (the underflow condition) it
doesn't cost us anything to detect it and truncate the PRDT.
This is a recoverable error and is not signaled to the guest, in either
NCQ or normal DMA cases.
For BMDMA, the existing pathways should see no guest-visible difference,
but any bytes described in the overage will no longer be transferred
before indicating to the guest that there was an underflow.
Signed-off-by: John Snow <jsnow@redhat.com>
Reviewed-by: Stefan Hajnoczi <stefanha@redhat.com>
Message-id: 1435767578-32743-2-git-send-email-jsnow@redhat.com
2015-07-04 06:06:04 +00:00
|
|
|
/* flags_size is zero-based */
|
2014-07-01 11:13:27 +00:00
|
|
|
return (le32_to_cpu(tbl->flags_size) & AHCI_PRDT_SIZE_MASK) + 1;
|
|
|
|
}
|
|
|
|
|
2015-11-06 19:09:00 +00:00
|
|
|
/**
|
|
|
|
* Fetch entries in a guest-provided PRDT and convert it into a QEMU SGlist.
|
|
|
|
* @ad: The AHCIDevice for whom we are building the SGList.
|
|
|
|
* @sglist: The SGList target to add PRD entries to.
|
|
|
|
* @cmd: The AHCI Command Header that describes where the PRDT is.
|
|
|
|
* @limit: The remaining size of the S/ATA transaction, in bytes.
|
|
|
|
* @offset: The number of bytes already transferred, in bytes.
|
|
|
|
*
|
|
|
|
* The AHCI PRDT can describe up to 256GiB. S/ATA only support transactions of
|
|
|
|
* up to 32MiB as of ATA8-ACS3 rev 1b, assuming a 512 byte sector size. We stop
|
|
|
|
* building the sglist from the PRDT as soon as we hit @limit bytes,
|
|
|
|
* which is <= INT32_MAX/2GiB.
|
|
|
|
*/
|
ide: Correct handling of malformed/short PRDTs
This impacts both BMDMA and AHCI HBA interfaces for IDE.
Currently, we confuse the difference between a PRDT having
"0 bytes" and a PRDT having "0 complete sectors."
When we receive an incomplete sector, inconsistent error checking
leads to an infinite loop wherein the call succeeds, but it
didn't give us enough bytes -- leading us to re-call the
DMA chain over and over again. This leads to, in the BMDMA case,
leaked memory for short PRDTs, and infinite loops and resource
usage in the AHCI case.
The .prepare_buf() callback is reworked to return the number of
bytes that it successfully prepared. 0 is a valid, non-error
answer that means the table was empty and described no bytes.
-1 indicates an error.
Our current implementation uses the io_buffer in IDEState to
ultimately describe the size of a prepared scatter-gather list.
Even though the AHCI PRDT/SGList can be as large as 256GiB, the
AHCI command header limits transactions to just 4GiB. ATA8-ACS3,
however, defines the largest transaction to be an LBA48 command
that transfers 65,536 sectors. With a 512 byte sector size, this
is just 32MiB.
Since our current state structures use the int type to describe
the size of the buffer, and this state is migrated as int32, we
are limited to describing 2GiB buffer sizes unless we change the
migration protocol.
For this reason, this patch begins to unify the assertions in the
IDE pathways that the scatter-gather list provided by either the
AHCI PRDT or the PCI BMDMA PRDs can only describe, at a maximum,
2GiB. This should be resilient enough unless we need a sector
size that exceeds 32KiB.
Further, the likelihood of any guest operating system actually
attempting to transfer this much data in a single operation is
very slim.
To this end, the IDEState variables have been updated to more
explicitly clarify our maximum supported size. Callers to the
prepare_buf callback have been reworked to understand the new
return code, and all versions of the prepare_buf callback have
been adjusted accordingly.
Lastly, the ahci_populate_sglist helper, relied upon by the
AHCI implementation of .prepare_buf() as well as the PCI
implementation of the callback have had overflow assertions
added to help make clear the reasonings behind the various
type changes.
[Added %d -> %"PRId64" fix John sent because off_pos changed from int to
int64_t.
--Stefan]
Signed-off-by: John Snow <jsnow@redhat.com>
Reviewed-by: Paolo Bonzini <pbonzini@redhat.com>
Message-id: 1414785819-26209-4-git-send-email-jsnow@redhat.com
Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com>
2014-10-31 20:03:39 +00:00
|
|
|
static int ahci_populate_sglist(AHCIDevice *ad, QEMUSGList *sglist,
|
2015-11-06 19:09:00 +00:00
|
|
|
AHCICmdHdr *cmd, int64_t limit, uint64_t offset)
|
2010-12-14 00:34:40 +00:00
|
|
|
{
|
2015-07-04 06:06:03 +00:00
|
|
|
uint16_t opts = le16_to_cpu(cmd->opts);
|
|
|
|
uint16_t prdtl = le16_to_cpu(cmd->prdtl);
|
|
|
|
uint64_t cfis_addr = le64_to_cpu(cmd->tbl_addr);
|
|
|
|
uint64_t prdt_addr = cfis_addr + 0x80;
|
|
|
|
dma_addr_t prdt_len = (prdtl * sizeof(AHCI_SG));
|
2012-06-27 04:50:41 +00:00
|
|
|
dma_addr_t real_prdt_len = prdt_len;
|
2010-12-14 00:34:40 +00:00
|
|
|
uint8_t *prdt;
|
|
|
|
int i;
|
|
|
|
int r = 0;
|
ide: Correct handling of malformed/short PRDTs
This impacts both BMDMA and AHCI HBA interfaces for IDE.
Currently, we confuse the difference between a PRDT having
"0 bytes" and a PRDT having "0 complete sectors."
When we receive an incomplete sector, inconsistent error checking
leads to an infinite loop wherein the call succeeds, but it
didn't give us enough bytes -- leading us to re-call the
DMA chain over and over again. This leads to, in the BMDMA case,
leaked memory for short PRDTs, and infinite loops and resource
usage in the AHCI case.
The .prepare_buf() callback is reworked to return the number of
bytes that it successfully prepared. 0 is a valid, non-error
answer that means the table was empty and described no bytes.
-1 indicates an error.
Our current implementation uses the io_buffer in IDEState to
ultimately describe the size of a prepared scatter-gather list.
Even though the AHCI PRDT/SGList can be as large as 256GiB, the
AHCI command header limits transactions to just 4GiB. ATA8-ACS3,
however, defines the largest transaction to be an LBA48 command
that transfers 65,536 sectors. With a 512 byte sector size, this
is just 32MiB.
Since our current state structures use the int type to describe
the size of the buffer, and this state is migrated as int32, we
are limited to describing 2GiB buffer sizes unless we change the
migration protocol.
For this reason, this patch begins to unify the assertions in the
IDE pathways that the scatter-gather list provided by either the
AHCI PRDT or the PCI BMDMA PRDs can only describe, at a maximum,
2GiB. This should be resilient enough unless we need a sector
size that exceeds 32KiB.
Further, the likelihood of any guest operating system actually
attempting to transfer this much data in a single operation is
very slim.
To this end, the IDEState variables have been updated to more
explicitly clarify our maximum supported size. Callers to the
prepare_buf callback have been reworked to understand the new
return code, and all versions of the prepare_buf callback have
been adjusted accordingly.
Lastly, the ahci_populate_sglist helper, relied upon by the
AHCI implementation of .prepare_buf() as well as the PCI
implementation of the callback have had overflow assertions
added to help make clear the reasonings behind the various
type changes.
[Added %d -> %"PRId64" fix John sent because off_pos changed from int to
int64_t.
--Stefan]
Signed-off-by: John Snow <jsnow@redhat.com>
Reviewed-by: Paolo Bonzini <pbonzini@redhat.com>
Message-id: 1414785819-26209-4-git-send-email-jsnow@redhat.com
Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com>
2014-10-31 20:03:39 +00:00
|
|
|
uint64_t sum = 0;
|
ahci: Fix ahci cdrom read corruptions for reads > 128k
While testing q35, which has its cdrom attached to the ahci controller, I found
that the Fedora 17 install would panic on boot. The panic occurs while
squashfs is trying to read from the cdrom. The errors are:
[ 8.622711] SQUASHFS error: xz_dec_run error, data probably corrupt
[ 8.625180] SQUASHFS error: squashfs_read_data failed to read block
0x20be48a
I was also able to produce corrupt data reads using an installed piix based
qemu machine, using 'dd'. I found that the corruptions were only occuring when
then read size was greater than 128k. For example, the following command
results in corrupted reads:
dd if=/dev/sr0 of=/tmp/blah bs=256k iflag=direct
The > 128k size reads exercise a different code path than 128k and below. In
ide_atapi_cmd_read_dma_cb() s->io_buffer_size is capped at 128k. Thus,
ide_atapi_cmd_read_dma_cb() is called a second time when the read is > 128k.
However, ahci_dma_rw_buf() restart the read from offset 0, instead of at 128k.
Thus, resulting in a corrupted read.
To fix this, I've introduced 'io_buffer_offset' field in IDEState to keep
track of the offset. I've also modified ahci_populate_sglist() to take a new
3rd offset argument, so that the sglist is property initialized.
I've tested this patch using 'dd' testing, and Fedora 17 now correctly boots
and installs on q35 with the cdrom ahci controller.
Signed-off-by: Jason Baron <jbaron@redhat.com>
Tested-by: Andreas Färber <afaerber@suse.de>
Signed-off-by: Kevin Wolf <kwolf@redhat.com>
2012-08-03 19:57:06 +00:00
|
|
|
int off_idx = -1;
|
ide: Correct handling of malformed/short PRDTs
This impacts both BMDMA and AHCI HBA interfaces for IDE.
Currently, we confuse the difference between a PRDT having
"0 bytes" and a PRDT having "0 complete sectors."
When we receive an incomplete sector, inconsistent error checking
leads to an infinite loop wherein the call succeeds, but it
didn't give us enough bytes -- leading us to re-call the
DMA chain over and over again. This leads to, in the BMDMA case,
leaked memory for short PRDTs, and infinite loops and resource
usage in the AHCI case.
The .prepare_buf() callback is reworked to return the number of
bytes that it successfully prepared. 0 is a valid, non-error
answer that means the table was empty and described no bytes.
-1 indicates an error.
Our current implementation uses the io_buffer in IDEState to
ultimately describe the size of a prepared scatter-gather list.
Even though the AHCI PRDT/SGList can be as large as 256GiB, the
AHCI command header limits transactions to just 4GiB. ATA8-ACS3,
however, defines the largest transaction to be an LBA48 command
that transfers 65,536 sectors. With a 512 byte sector size, this
is just 32MiB.
Since our current state structures use the int type to describe
the size of the buffer, and this state is migrated as int32, we
are limited to describing 2GiB buffer sizes unless we change the
migration protocol.
For this reason, this patch begins to unify the assertions in the
IDE pathways that the scatter-gather list provided by either the
AHCI PRDT or the PCI BMDMA PRDs can only describe, at a maximum,
2GiB. This should be resilient enough unless we need a sector
size that exceeds 32KiB.
Further, the likelihood of any guest operating system actually
attempting to transfer this much data in a single operation is
very slim.
To this end, the IDEState variables have been updated to more
explicitly clarify our maximum supported size. Callers to the
prepare_buf callback have been reworked to understand the new
return code, and all versions of the prepare_buf callback have
been adjusted accordingly.
Lastly, the ahci_populate_sglist helper, relied upon by the
AHCI implementation of .prepare_buf() as well as the PCI
implementation of the callback have had overflow assertions
added to help make clear the reasonings behind the various
type changes.
[Added %d -> %"PRId64" fix John sent because off_pos changed from int to
int64_t.
--Stefan]
Signed-off-by: John Snow <jsnow@redhat.com>
Reviewed-by: Paolo Bonzini <pbonzini@redhat.com>
Message-id: 1414785819-26209-4-git-send-email-jsnow@redhat.com
Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com>
2014-10-31 20:03:39 +00:00
|
|
|
int64_t off_pos = -1;
|
ahci: Fix ahci cdrom read corruptions for reads > 128k
While testing q35, which has its cdrom attached to the ahci controller, I found
that the Fedora 17 install would panic on boot. The panic occurs while
squashfs is trying to read from the cdrom. The errors are:
[ 8.622711] SQUASHFS error: xz_dec_run error, data probably corrupt
[ 8.625180] SQUASHFS error: squashfs_read_data failed to read block
0x20be48a
I was also able to produce corrupt data reads using an installed piix based
qemu machine, using 'dd'. I found that the corruptions were only occuring when
then read size was greater than 128k. For example, the following command
results in corrupted reads:
dd if=/dev/sr0 of=/tmp/blah bs=256k iflag=direct
The > 128k size reads exercise a different code path than 128k and below. In
ide_atapi_cmd_read_dma_cb() s->io_buffer_size is capped at 128k. Thus,
ide_atapi_cmd_read_dma_cb() is called a second time when the read is > 128k.
However, ahci_dma_rw_buf() restart the read from offset 0, instead of at 128k.
Thus, resulting in a corrupted read.
To fix this, I've introduced 'io_buffer_offset' field in IDEState to keep
track of the offset. I've also modified ahci_populate_sglist() to take a new
3rd offset argument, so that the sglist is property initialized.
I've tested this patch using 'dd' testing, and Fedora 17 now correctly boots
and installs on q35 with the cdrom ahci controller.
Signed-off-by: Jason Baron <jbaron@redhat.com>
Tested-by: Andreas Färber <afaerber@suse.de>
Signed-off-by: Kevin Wolf <kwolf@redhat.com>
2012-08-03 19:57:06 +00:00
|
|
|
int tbl_entry_size;
|
2013-06-03 12:17:19 +00:00
|
|
|
IDEBus *bus = &ad->port;
|
|
|
|
BusState *qbus = BUS(bus);
|
2010-12-14 00:34:40 +00:00
|
|
|
|
2015-07-04 06:06:03 +00:00
|
|
|
if (!prdtl) {
|
2010-12-14 00:34:40 +00:00
|
|
|
DPRINTF(ad->port_no, "no sg list given by guest: 0x%08x\n", opts);
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* map PRDT */
|
2013-04-10 16:15:49 +00:00
|
|
|
if (!(prdt = dma_memory_map(ad->hba->as, prdt_addr, &prdt_len,
|
2012-06-27 04:50:41 +00:00
|
|
|
DMA_DIRECTION_TO_DEVICE))){
|
2010-12-14 00:34:40 +00:00
|
|
|
DPRINTF(ad->port_no, "map failed\n");
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (prdt_len < real_prdt_len) {
|
|
|
|
DPRINTF(ad->port_no, "mapped less than expected\n");
|
|
|
|
r = -1;
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Get entries in the PRDT, init a qemu sglist accordingly */
|
2015-07-04 06:06:03 +00:00
|
|
|
if (prdtl > 0) {
|
2010-12-14 00:34:40 +00:00
|
|
|
AHCI_SG *tbl = (AHCI_SG *)prdt;
|
ahci: Fix ahci cdrom read corruptions for reads > 128k
While testing q35, which has its cdrom attached to the ahci controller, I found
that the Fedora 17 install would panic on boot. The panic occurs while
squashfs is trying to read from the cdrom. The errors are:
[ 8.622711] SQUASHFS error: xz_dec_run error, data probably corrupt
[ 8.625180] SQUASHFS error: squashfs_read_data failed to read block
0x20be48a
I was also able to produce corrupt data reads using an installed piix based
qemu machine, using 'dd'. I found that the corruptions were only occuring when
then read size was greater than 128k. For example, the following command
results in corrupted reads:
dd if=/dev/sr0 of=/tmp/blah bs=256k iflag=direct
The > 128k size reads exercise a different code path than 128k and below. In
ide_atapi_cmd_read_dma_cb() s->io_buffer_size is capped at 128k. Thus,
ide_atapi_cmd_read_dma_cb() is called a second time when the read is > 128k.
However, ahci_dma_rw_buf() restart the read from offset 0, instead of at 128k.
Thus, resulting in a corrupted read.
To fix this, I've introduced 'io_buffer_offset' field in IDEState to keep
track of the offset. I've also modified ahci_populate_sglist() to take a new
3rd offset argument, so that the sglist is property initialized.
I've tested this patch using 'dd' testing, and Fedora 17 now correctly boots
and installs on q35 with the cdrom ahci controller.
Signed-off-by: Jason Baron <jbaron@redhat.com>
Tested-by: Andreas Färber <afaerber@suse.de>
Signed-off-by: Kevin Wolf <kwolf@redhat.com>
2012-08-03 19:57:06 +00:00
|
|
|
sum = 0;
|
2015-07-04 06:06:03 +00:00
|
|
|
for (i = 0; i < prdtl; i++) {
|
2014-07-01 11:13:27 +00:00
|
|
|
tbl_entry_size = prdt_tbl_entry_size(&tbl[i]);
|
ide: add limit to .prepare_buf()
prepare_buf should not always grab as many descriptors
as it can, sometimes it should self-limit.
For example, an NCQ transfer of 1 sector with a PRDT that
describes 4GiB of data should not copy 4GiB of data, it
should just transfer that first 512 bytes.
PIO is not affected, because the dma_buf_rw dma helpers
already have a byte limit built-in to them, but DMA/NCQ
will exhaust the entire list regardless of requested size.
AHCI 1.3 specifies in section 6.1.6 Command List Underflow that
NCQ is not required to detect underflow conditions. Non-NCQ
pathways signal underflow by writing to the PRDBC field, which
will already occur by writing the actual transferred byte count
to the PRDBC, signaling the underflow.
Our NCQ pathways aren't required to detect underflow, but since our DMA
backend uses the size of the PRDT to determine the size of the transer,
if our PRDT is bigger than the transaction (the underflow condition) it
doesn't cost us anything to detect it and truncate the PRDT.
This is a recoverable error and is not signaled to the guest, in either
NCQ or normal DMA cases.
For BMDMA, the existing pathways should see no guest-visible difference,
but any bytes described in the overage will no longer be transferred
before indicating to the guest that there was an underflow.
Signed-off-by: John Snow <jsnow@redhat.com>
Reviewed-by: Stefan Hajnoczi <stefanha@redhat.com>
Message-id: 1435767578-32743-2-git-send-email-jsnow@redhat.com
2015-07-04 06:06:04 +00:00
|
|
|
if (offset < (sum + tbl_entry_size)) {
|
ahci: Fix ahci cdrom read corruptions for reads > 128k
While testing q35, which has its cdrom attached to the ahci controller, I found
that the Fedora 17 install would panic on boot. The panic occurs while
squashfs is trying to read from the cdrom. The errors are:
[ 8.622711] SQUASHFS error: xz_dec_run error, data probably corrupt
[ 8.625180] SQUASHFS error: squashfs_read_data failed to read block
0x20be48a
I was also able to produce corrupt data reads using an installed piix based
qemu machine, using 'dd'. I found that the corruptions were only occuring when
then read size was greater than 128k. For example, the following command
results in corrupted reads:
dd if=/dev/sr0 of=/tmp/blah bs=256k iflag=direct
The > 128k size reads exercise a different code path than 128k and below. In
ide_atapi_cmd_read_dma_cb() s->io_buffer_size is capped at 128k. Thus,
ide_atapi_cmd_read_dma_cb() is called a second time when the read is > 128k.
However, ahci_dma_rw_buf() restart the read from offset 0, instead of at 128k.
Thus, resulting in a corrupted read.
To fix this, I've introduced 'io_buffer_offset' field in IDEState to keep
track of the offset. I've also modified ahci_populate_sglist() to take a new
3rd offset argument, so that the sglist is property initialized.
I've tested this patch using 'dd' testing, and Fedora 17 now correctly boots
and installs on q35 with the cdrom ahci controller.
Signed-off-by: Jason Baron <jbaron@redhat.com>
Tested-by: Andreas Färber <afaerber@suse.de>
Signed-off-by: Kevin Wolf <kwolf@redhat.com>
2012-08-03 19:57:06 +00:00
|
|
|
off_idx = i;
|
|
|
|
off_pos = offset - sum;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
sum += tbl_entry_size;
|
|
|
|
}
|
|
|
|
if ((off_idx == -1) || (off_pos < 0) || (off_pos > tbl_entry_size)) {
|
|
|
|
DPRINTF(ad->port_no, "%s: Incorrect offset! "
|
ide: Correct handling of malformed/short PRDTs
This impacts both BMDMA and AHCI HBA interfaces for IDE.
Currently, we confuse the difference between a PRDT having
"0 bytes" and a PRDT having "0 complete sectors."
When we receive an incomplete sector, inconsistent error checking
leads to an infinite loop wherein the call succeeds, but it
didn't give us enough bytes -- leading us to re-call the
DMA chain over and over again. This leads to, in the BMDMA case,
leaked memory for short PRDTs, and infinite loops and resource
usage in the AHCI case.
The .prepare_buf() callback is reworked to return the number of
bytes that it successfully prepared. 0 is a valid, non-error
answer that means the table was empty and described no bytes.
-1 indicates an error.
Our current implementation uses the io_buffer in IDEState to
ultimately describe the size of a prepared scatter-gather list.
Even though the AHCI PRDT/SGList can be as large as 256GiB, the
AHCI command header limits transactions to just 4GiB. ATA8-ACS3,
however, defines the largest transaction to be an LBA48 command
that transfers 65,536 sectors. With a 512 byte sector size, this
is just 32MiB.
Since our current state structures use the int type to describe
the size of the buffer, and this state is migrated as int32, we
are limited to describing 2GiB buffer sizes unless we change the
migration protocol.
For this reason, this patch begins to unify the assertions in the
IDE pathways that the scatter-gather list provided by either the
AHCI PRDT or the PCI BMDMA PRDs can only describe, at a maximum,
2GiB. This should be resilient enough unless we need a sector
size that exceeds 32KiB.
Further, the likelihood of any guest operating system actually
attempting to transfer this much data in a single operation is
very slim.
To this end, the IDEState variables have been updated to more
explicitly clarify our maximum supported size. Callers to the
prepare_buf callback have been reworked to understand the new
return code, and all versions of the prepare_buf callback have
been adjusted accordingly.
Lastly, the ahci_populate_sglist helper, relied upon by the
AHCI implementation of .prepare_buf() as well as the PCI
implementation of the callback have had overflow assertions
added to help make clear the reasonings behind the various
type changes.
[Added %d -> %"PRId64" fix John sent because off_pos changed from int to
int64_t.
--Stefan]
Signed-off-by: John Snow <jsnow@redhat.com>
Reviewed-by: Paolo Bonzini <pbonzini@redhat.com>
Message-id: 1414785819-26209-4-git-send-email-jsnow@redhat.com
Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com>
2014-10-31 20:03:39 +00:00
|
|
|
"off_idx: %d, off_pos: %"PRId64"\n",
|
ahci: Fix ahci cdrom read corruptions for reads > 128k
While testing q35, which has its cdrom attached to the ahci controller, I found
that the Fedora 17 install would panic on boot. The panic occurs while
squashfs is trying to read from the cdrom. The errors are:
[ 8.622711] SQUASHFS error: xz_dec_run error, data probably corrupt
[ 8.625180] SQUASHFS error: squashfs_read_data failed to read block
0x20be48a
I was also able to produce corrupt data reads using an installed piix based
qemu machine, using 'dd'. I found that the corruptions were only occuring when
then read size was greater than 128k. For example, the following command
results in corrupted reads:
dd if=/dev/sr0 of=/tmp/blah bs=256k iflag=direct
The > 128k size reads exercise a different code path than 128k and below. In
ide_atapi_cmd_read_dma_cb() s->io_buffer_size is capped at 128k. Thus,
ide_atapi_cmd_read_dma_cb() is called a second time when the read is > 128k.
However, ahci_dma_rw_buf() restart the read from offset 0, instead of at 128k.
Thus, resulting in a corrupted read.
To fix this, I've introduced 'io_buffer_offset' field in IDEState to keep
track of the offset. I've also modified ahci_populate_sglist() to take a new
3rd offset argument, so that the sglist is property initialized.
I've tested this patch using 'dd' testing, and Fedora 17 now correctly boots
and installs on q35 with the cdrom ahci controller.
Signed-off-by: Jason Baron <jbaron@redhat.com>
Tested-by: Andreas Färber <afaerber@suse.de>
Signed-off-by: Kevin Wolf <kwolf@redhat.com>
2012-08-03 19:57:06 +00:00
|
|
|
__func__, off_idx, off_pos);
|
|
|
|
r = -1;
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
|
2015-07-04 06:06:03 +00:00
|
|
|
qemu_sglist_init(sglist, qbus->parent, (prdtl - off_idx),
|
2013-06-03 12:17:19 +00:00
|
|
|
ad->hba->as);
|
2015-03-20 00:24:15 +00:00
|
|
|
qemu_sglist_add(sglist, le64_to_cpu(tbl[off_idx].addr) + off_pos,
|
ide: add limit to .prepare_buf()
prepare_buf should not always grab as many descriptors
as it can, sometimes it should self-limit.
For example, an NCQ transfer of 1 sector with a PRDT that
describes 4GiB of data should not copy 4GiB of data, it
should just transfer that first 512 bytes.
PIO is not affected, because the dma_buf_rw dma helpers
already have a byte limit built-in to them, but DMA/NCQ
will exhaust the entire list regardless of requested size.
AHCI 1.3 specifies in section 6.1.6 Command List Underflow that
NCQ is not required to detect underflow conditions. Non-NCQ
pathways signal underflow by writing to the PRDBC field, which
will already occur by writing the actual transferred byte count
to the PRDBC, signaling the underflow.
Our NCQ pathways aren't required to detect underflow, but since our DMA
backend uses the size of the PRDT to determine the size of the transer,
if our PRDT is bigger than the transaction (the underflow condition) it
doesn't cost us anything to detect it and truncate the PRDT.
This is a recoverable error and is not signaled to the guest, in either
NCQ or normal DMA cases.
For BMDMA, the existing pathways should see no guest-visible difference,
but any bytes described in the overage will no longer be transferred
before indicating to the guest that there was an underflow.
Signed-off-by: John Snow <jsnow@redhat.com>
Reviewed-by: Stefan Hajnoczi <stefanha@redhat.com>
Message-id: 1435767578-32743-2-git-send-email-jsnow@redhat.com
2015-07-04 06:06:04 +00:00
|
|
|
MIN(prdt_tbl_entry_size(&tbl[off_idx]) - off_pos,
|
|
|
|
limit));
|
ahci: Fix ahci cdrom read corruptions for reads > 128k
While testing q35, which has its cdrom attached to the ahci controller, I found
that the Fedora 17 install would panic on boot. The panic occurs while
squashfs is trying to read from the cdrom. The errors are:
[ 8.622711] SQUASHFS error: xz_dec_run error, data probably corrupt
[ 8.625180] SQUASHFS error: squashfs_read_data failed to read block
0x20be48a
I was also able to produce corrupt data reads using an installed piix based
qemu machine, using 'dd'. I found that the corruptions were only occuring when
then read size was greater than 128k. For example, the following command
results in corrupted reads:
dd if=/dev/sr0 of=/tmp/blah bs=256k iflag=direct
The > 128k size reads exercise a different code path than 128k and below. In
ide_atapi_cmd_read_dma_cb() s->io_buffer_size is capped at 128k. Thus,
ide_atapi_cmd_read_dma_cb() is called a second time when the read is > 128k.
However, ahci_dma_rw_buf() restart the read from offset 0, instead of at 128k.
Thus, resulting in a corrupted read.
To fix this, I've introduced 'io_buffer_offset' field in IDEState to keep
track of the offset. I've also modified ahci_populate_sglist() to take a new
3rd offset argument, so that the sglist is property initialized.
I've tested this patch using 'dd' testing, and Fedora 17 now correctly boots
and installs on q35 with the cdrom ahci controller.
Signed-off-by: Jason Baron <jbaron@redhat.com>
Tested-by: Andreas Färber <afaerber@suse.de>
Signed-off-by: Kevin Wolf <kwolf@redhat.com>
2012-08-03 19:57:06 +00:00
|
|
|
|
ide: add limit to .prepare_buf()
prepare_buf should not always grab as many descriptors
as it can, sometimes it should self-limit.
For example, an NCQ transfer of 1 sector with a PRDT that
describes 4GiB of data should not copy 4GiB of data, it
should just transfer that first 512 bytes.
PIO is not affected, because the dma_buf_rw dma helpers
already have a byte limit built-in to them, but DMA/NCQ
will exhaust the entire list regardless of requested size.
AHCI 1.3 specifies in section 6.1.6 Command List Underflow that
NCQ is not required to detect underflow conditions. Non-NCQ
pathways signal underflow by writing to the PRDBC field, which
will already occur by writing the actual transferred byte count
to the PRDBC, signaling the underflow.
Our NCQ pathways aren't required to detect underflow, but since our DMA
backend uses the size of the PRDT to determine the size of the transer,
if our PRDT is bigger than the transaction (the underflow condition) it
doesn't cost us anything to detect it and truncate the PRDT.
This is a recoverable error and is not signaled to the guest, in either
NCQ or normal DMA cases.
For BMDMA, the existing pathways should see no guest-visible difference,
but any bytes described in the overage will no longer be transferred
before indicating to the guest that there was an underflow.
Signed-off-by: John Snow <jsnow@redhat.com>
Reviewed-by: Stefan Hajnoczi <stefanha@redhat.com>
Message-id: 1435767578-32743-2-git-send-email-jsnow@redhat.com
2015-07-04 06:06:04 +00:00
|
|
|
for (i = off_idx + 1; i < prdtl && sglist->size < limit; i++) {
|
2010-12-14 00:34:40 +00:00
|
|
|
qemu_sglist_add(sglist, le64_to_cpu(tbl[i].addr),
|
ide: add limit to .prepare_buf()
prepare_buf should not always grab as many descriptors
as it can, sometimes it should self-limit.
For example, an NCQ transfer of 1 sector with a PRDT that
describes 4GiB of data should not copy 4GiB of data, it
should just transfer that first 512 bytes.
PIO is not affected, because the dma_buf_rw dma helpers
already have a byte limit built-in to them, but DMA/NCQ
will exhaust the entire list regardless of requested size.
AHCI 1.3 specifies in section 6.1.6 Command List Underflow that
NCQ is not required to detect underflow conditions. Non-NCQ
pathways signal underflow by writing to the PRDBC field, which
will already occur by writing the actual transferred byte count
to the PRDBC, signaling the underflow.
Our NCQ pathways aren't required to detect underflow, but since our DMA
backend uses the size of the PRDT to determine the size of the transer,
if our PRDT is bigger than the transaction (the underflow condition) it
doesn't cost us anything to detect it and truncate the PRDT.
This is a recoverable error and is not signaled to the guest, in either
NCQ or normal DMA cases.
For BMDMA, the existing pathways should see no guest-visible difference,
but any bytes described in the overage will no longer be transferred
before indicating to the guest that there was an underflow.
Signed-off-by: John Snow <jsnow@redhat.com>
Reviewed-by: Stefan Hajnoczi <stefanha@redhat.com>
Message-id: 1435767578-32743-2-git-send-email-jsnow@redhat.com
2015-07-04 06:06:04 +00:00
|
|
|
MIN(prdt_tbl_entry_size(&tbl[i]),
|
|
|
|
limit - sglist->size));
|
2010-12-14 00:34:40 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
out:
|
2013-04-10 16:15:49 +00:00
|
|
|
dma_memory_unmap(ad->hba->as, prdt, prdt_len,
|
2012-06-27 04:50:41 +00:00
|
|
|
DMA_DIRECTION_TO_DEVICE, prdt_len);
|
2010-12-14 00:34:40 +00:00
|
|
|
return r;
|
|
|
|
}
|
|
|
|
|
2015-07-04 06:06:03 +00:00
|
|
|
static void ncq_err(NCQTransferState *ncq_tfs)
|
|
|
|
{
|
|
|
|
IDEState *ide_state = &ncq_tfs->drive->port.ifs[0];
|
|
|
|
|
|
|
|
ide_state->error = ABRT_ERR;
|
|
|
|
ide_state->status = READY_STAT | ERR_STAT;
|
|
|
|
ncq_tfs->drive->port_regs.scr_err |= (1 << ncq_tfs->tag);
|
2016-07-19 06:47:46 +00:00
|
|
|
qemu_sglist_destroy(&ncq_tfs->sglist);
|
2016-01-11 19:10:42 +00:00
|
|
|
ncq_tfs->used = 0;
|
2015-07-04 06:06:03 +00:00
|
|
|
}
|
|
|
|
|
2015-07-04 06:06:04 +00:00
|
|
|
static void ncq_finish(NCQTransferState *ncq_tfs)
|
|
|
|
{
|
2015-07-04 06:06:05 +00:00
|
|
|
/* If we didn't error out, set our finished bit. Errored commands
|
|
|
|
* do not get a bit set for the SDB FIS ACT register, nor do they
|
|
|
|
* clear the outstanding bit in scr_act (PxSACT). */
|
|
|
|
if (!(ncq_tfs->drive->port_regs.scr_err & (1 << ncq_tfs->tag))) {
|
|
|
|
ncq_tfs->drive->finished |= (1 << ncq_tfs->tag);
|
|
|
|
}
|
2015-07-04 06:06:04 +00:00
|
|
|
|
2015-07-04 06:06:05 +00:00
|
|
|
ahci_write_fis_sdb(ncq_tfs->drive->hba, ncq_tfs);
|
2015-07-04 06:06:04 +00:00
|
|
|
|
|
|
|
DPRINTF(ncq_tfs->drive->port_no, "NCQ transfer tag %d finished\n",
|
|
|
|
ncq_tfs->tag);
|
|
|
|
|
|
|
|
block_acct_done(blk_get_stats(ncq_tfs->drive->port.ifs[0].blk),
|
|
|
|
&ncq_tfs->acct);
|
|
|
|
qemu_sglist_destroy(&ncq_tfs->sglist);
|
|
|
|
ncq_tfs->used = 0;
|
|
|
|
}
|
|
|
|
|
2010-12-14 00:34:40 +00:00
|
|
|
static void ncq_cb(void *opaque, int ret)
|
|
|
|
{
|
|
|
|
NCQTransferState *ncq_tfs = (NCQTransferState *)opaque;
|
|
|
|
IDEState *ide_state = &ncq_tfs->drive->port.ifs[0];
|
|
|
|
|
2016-09-26 18:33:37 +00:00
|
|
|
ncq_tfs->aiocb = NULL;
|
2014-09-11 05:41:07 +00:00
|
|
|
if (ret == -ECANCELED) {
|
|
|
|
return;
|
|
|
|
}
|
2010-12-14 00:34:40 +00:00
|
|
|
|
|
|
|
if (ret < 0) {
|
2015-07-04 06:06:04 +00:00
|
|
|
bool is_read = ncq_tfs->cmd == READ_FPDMA_QUEUED;
|
|
|
|
BlockErrorAction action = blk_get_error_action(ide_state->blk,
|
|
|
|
is_read, -ret);
|
|
|
|
if (action == BLOCK_ERROR_ACTION_STOP) {
|
|
|
|
ncq_tfs->halt = true;
|
|
|
|
ide_state->bus->error_status = IDE_RETRY_HBA;
|
|
|
|
} else if (action == BLOCK_ERROR_ACTION_REPORT) {
|
|
|
|
ncq_err(ncq_tfs);
|
|
|
|
}
|
|
|
|
blk_error_action(ide_state->blk, action, is_read, -ret);
|
2010-12-14 00:34:40 +00:00
|
|
|
} else {
|
|
|
|
ide_state->status = READY_STAT | SEEK_STAT;
|
|
|
|
}
|
|
|
|
|
2015-07-04 06:06:04 +00:00
|
|
|
if (!ncq_tfs->halt) {
|
|
|
|
ncq_finish(ncq_tfs);
|
|
|
|
}
|
2010-12-14 00:34:40 +00:00
|
|
|
}
|
|
|
|
|
2014-11-03 23:56:15 +00:00
|
|
|
static int is_ncq(uint8_t ata_cmd)
|
|
|
|
{
|
|
|
|
/* Based on SATA 3.2 section 13.6.3.2 */
|
|
|
|
switch (ata_cmd) {
|
|
|
|
case READ_FPDMA_QUEUED:
|
|
|
|
case WRITE_FPDMA_QUEUED:
|
|
|
|
case NCQ_NON_DATA:
|
|
|
|
case RECEIVE_FPDMA_QUEUED:
|
|
|
|
case SEND_FPDMA_QUEUED:
|
|
|
|
return 1;
|
|
|
|
default:
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2015-07-04 06:06:04 +00:00
|
|
|
static void execute_ncq_command(NCQTransferState *ncq_tfs)
|
|
|
|
{
|
|
|
|
AHCIDevice *ad = ncq_tfs->drive;
|
|
|
|
IDEState *ide_state = &ad->port.ifs[0];
|
|
|
|
int port = ad->port_no;
|
2015-07-04 06:06:04 +00:00
|
|
|
|
2015-07-04 06:06:04 +00:00
|
|
|
g_assert(is_ncq(ncq_tfs->cmd));
|
2015-07-04 06:06:04 +00:00
|
|
|
ncq_tfs->halt = false;
|
2015-07-04 06:06:04 +00:00
|
|
|
|
|
|
|
switch (ncq_tfs->cmd) {
|
|
|
|
case READ_FPDMA_QUEUED:
|
|
|
|
DPRINTF(port, "NCQ reading %d sectors from LBA %"PRId64", tag %d\n",
|
|
|
|
ncq_tfs->sector_count, ncq_tfs->lba, ncq_tfs->tag);
|
|
|
|
|
|
|
|
DPRINTF(port, "tag %d aio read %"PRId64"\n",
|
|
|
|
ncq_tfs->tag, ncq_tfs->lba);
|
|
|
|
|
|
|
|
dma_acct_start(ide_state->blk, &ncq_tfs->acct,
|
|
|
|
&ncq_tfs->sglist, BLOCK_ACCT_READ);
|
|
|
|
ncq_tfs->aiocb = dma_blk_read(ide_state->blk, &ncq_tfs->sglist,
|
2016-05-23 12:54:05 +00:00
|
|
|
ncq_tfs->lba << BDRV_SECTOR_BITS,
|
2016-10-27 20:29:13 +00:00
|
|
|
BDRV_SECTOR_SIZE,
|
2016-05-23 12:54:05 +00:00
|
|
|
ncq_cb, ncq_tfs);
|
2015-07-04 06:06:04 +00:00
|
|
|
break;
|
|
|
|
case WRITE_FPDMA_QUEUED:
|
|
|
|
DPRINTF(port, "NCQ writing %d sectors to LBA %"PRId64", tag %d\n",
|
|
|
|
ncq_tfs->sector_count, ncq_tfs->lba, ncq_tfs->tag);
|
|
|
|
|
|
|
|
DPRINTF(port, "tag %d aio write %"PRId64"\n",
|
|
|
|
ncq_tfs->tag, ncq_tfs->lba);
|
|
|
|
|
|
|
|
dma_acct_start(ide_state->blk, &ncq_tfs->acct,
|
|
|
|
&ncq_tfs->sglist, BLOCK_ACCT_WRITE);
|
|
|
|
ncq_tfs->aiocb = dma_blk_write(ide_state->blk, &ncq_tfs->sglist,
|
2016-05-23 12:54:05 +00:00
|
|
|
ncq_tfs->lba << BDRV_SECTOR_BITS,
|
2016-10-27 20:29:13 +00:00
|
|
|
BDRV_SECTOR_SIZE,
|
2016-05-23 12:54:05 +00:00
|
|
|
ncq_cb, ncq_tfs);
|
2015-07-04 06:06:04 +00:00
|
|
|
break;
|
|
|
|
default:
|
|
|
|
DPRINTF(port, "error: unsupported NCQ command (0x%02x) received\n",
|
|
|
|
ncq_tfs->cmd);
|
|
|
|
ncq_err(ncq_tfs);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2010-12-14 00:34:40 +00:00
|
|
|
static void process_ncq_command(AHCIState *s, int port, uint8_t *cmd_fis,
|
2015-07-04 06:06:05 +00:00
|
|
|
uint8_t slot)
|
2010-12-14 00:34:40 +00:00
|
|
|
{
|
2015-07-04 06:06:03 +00:00
|
|
|
AHCIDevice *ad = &s->dev[port];
|
|
|
|
IDEState *ide_state = &ad->port.ifs[0];
|
2010-12-14 00:34:40 +00:00
|
|
|
NCQFrame *ncq_fis = (NCQFrame*)cmd_fis;
|
|
|
|
uint8_t tag = ncq_fis->tag >> 3;
|
2015-07-04 06:06:03 +00:00
|
|
|
NCQTransferState *ncq_tfs = &ad->ncq_tfs[tag];
|
2015-07-04 06:06:03 +00:00
|
|
|
size_t size;
|
2010-12-14 00:34:40 +00:00
|
|
|
|
2015-07-04 06:06:04 +00:00
|
|
|
g_assert(is_ncq(ncq_fis->command));
|
2010-12-14 00:34:40 +00:00
|
|
|
if (ncq_tfs->used) {
|
|
|
|
/* error - already in use */
|
|
|
|
fprintf(stderr, "%s: tag %d already used\n", __FUNCTION__, tag);
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
ncq_tfs->used = 1;
|
2015-07-04 06:06:03 +00:00
|
|
|
ncq_tfs->drive = ad;
|
2010-12-14 00:34:40 +00:00
|
|
|
ncq_tfs->slot = slot;
|
2015-07-04 06:06:05 +00:00
|
|
|
ncq_tfs->cmdh = &((AHCICmdHdr *)ad->lst)[slot];
|
2015-07-04 06:06:04 +00:00
|
|
|
ncq_tfs->cmd = ncq_fis->command;
|
2010-12-14 00:34:40 +00:00
|
|
|
ncq_tfs->lba = ((uint64_t)ncq_fis->lba5 << 40) |
|
|
|
|
((uint64_t)ncq_fis->lba4 << 32) |
|
|
|
|
((uint64_t)ncq_fis->lba3 << 24) |
|
|
|
|
((uint64_t)ncq_fis->lba2 << 16) |
|
|
|
|
((uint64_t)ncq_fis->lba1 << 8) |
|
|
|
|
(uint64_t)ncq_fis->lba0;
|
2015-07-04 06:06:03 +00:00
|
|
|
ncq_tfs->tag = tag;
|
2010-12-14 00:34:40 +00:00
|
|
|
|
2015-07-04 06:06:03 +00:00
|
|
|
/* Sanity-check the NCQ packet */
|
|
|
|
if (tag != slot) {
|
|
|
|
DPRINTF(port, "Warn: NCQ slot (%d) did not match the given tag (%d)\n",
|
|
|
|
slot, tag);
|
|
|
|
}
|
|
|
|
|
|
|
|
if (ncq_fis->aux0 || ncq_fis->aux1 || ncq_fis->aux2 || ncq_fis->aux3) {
|
|
|
|
DPRINTF(port, "Warn: Attempt to use NCQ auxiliary fields.\n");
|
|
|
|
}
|
|
|
|
if (ncq_fis->prio || ncq_fis->icc) {
|
|
|
|
DPRINTF(port, "Warn: Unsupported attempt to use PRIO/ICC fields\n");
|
|
|
|
}
|
|
|
|
if (ncq_fis->fua & NCQ_FIS_FUA_MASK) {
|
|
|
|
DPRINTF(port, "Warn: Unsupported attempt to use Force Unit Access\n");
|
|
|
|
}
|
|
|
|
if (ncq_fis->tag & NCQ_FIS_RARC_MASK) {
|
|
|
|
DPRINTF(port, "Warn: Unsupported attempt to use Rebuild Assist\n");
|
|
|
|
}
|
|
|
|
|
2015-07-04 06:06:05 +00:00
|
|
|
ncq_tfs->sector_count = ((ncq_fis->sector_count_high << 8) |
|
|
|
|
ncq_fis->sector_count_low);
|
|
|
|
if (!ncq_tfs->sector_count) {
|
|
|
|
ncq_tfs->sector_count = 0x10000;
|
|
|
|
}
|
2015-07-04 06:06:03 +00:00
|
|
|
size = ncq_tfs->sector_count * 512;
|
2015-07-04 06:06:05 +00:00
|
|
|
ahci_populate_sglist(ad, &ncq_tfs->sglist, ncq_tfs->cmdh, size, 0);
|
2015-07-04 06:06:03 +00:00
|
|
|
|
|
|
|
if (ncq_tfs->sglist.size < size) {
|
|
|
|
error_report("ahci: PRDT length for NCQ command (0x%zx) "
|
|
|
|
"is smaller than the requested size (0x%zx)",
|
|
|
|
ncq_tfs->sglist.size, size);
|
|
|
|
ncq_err(ncq_tfs);
|
|
|
|
ahci_trigger_irq(ad->hba, ad, PORT_IRQ_OVERFLOW);
|
|
|
|
return;
|
2015-07-04 06:06:03 +00:00
|
|
|
} else if (ncq_tfs->sglist.size != size) {
|
|
|
|
DPRINTF(port, "Warn: PRDTL (0x%zx)"
|
|
|
|
" does not match requested size (0x%zx)",
|
|
|
|
ncq_tfs->sglist.size, size);
|
2015-07-04 06:06:03 +00:00
|
|
|
}
|
2010-12-14 00:34:40 +00:00
|
|
|
|
2011-10-26 11:06:14 +00:00
|
|
|
DPRINTF(port, "NCQ transfer LBA from %"PRId64" to %"PRId64", "
|
|
|
|
"drive max %"PRId64"\n",
|
2015-07-04 06:06:03 +00:00
|
|
|
ncq_tfs->lba, ncq_tfs->lba + ncq_tfs->sector_count - 1,
|
2015-07-04 06:06:03 +00:00
|
|
|
ide_state->nb_sectors - 1);
|
2010-12-14 00:34:40 +00:00
|
|
|
|
2015-07-04 06:06:04 +00:00
|
|
|
execute_ncq_command(ncq_tfs);
|
2010-12-14 00:34:40 +00:00
|
|
|
}
|
|
|
|
|
2015-07-04 06:06:05 +00:00
|
|
|
static AHCICmdHdr *get_cmd_header(AHCIState *s, uint8_t port, uint8_t slot)
|
|
|
|
{
|
|
|
|
if (port >= s->ports || slot >= AHCI_MAX_CMDS) {
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
|
|
|
return s->dev[port].lst ? &((AHCICmdHdr *)s->dev[port].lst)[slot] : NULL;
|
|
|
|
}
|
|
|
|
|
2014-11-03 23:56:19 +00:00
|
|
|
static void handle_reg_h2d_fis(AHCIState *s, int port,
|
2015-07-04 06:06:05 +00:00
|
|
|
uint8_t slot, uint8_t *cmd_fis)
|
2014-11-03 23:56:19 +00:00
|
|
|
{
|
|
|
|
IDEState *ide_state = &s->dev[port].port.ifs[0];
|
2015-07-04 06:06:05 +00:00
|
|
|
AHCICmdHdr *cmd = get_cmd_header(s, port, slot);
|
2015-07-04 06:06:03 +00:00
|
|
|
uint16_t opts = le16_to_cpu(cmd->opts);
|
2014-11-03 23:56:19 +00:00
|
|
|
|
|
|
|
if (cmd_fis[1] & 0x0F) {
|
|
|
|
DPRINTF(port, "Port Multiplier not supported."
|
|
|
|
" cmd_fis[0]=%02x cmd_fis[1]=%02x cmd_fis[2]=%02x\n",
|
|
|
|
cmd_fis[0], cmd_fis[1], cmd_fis[2]);
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (cmd_fis[1] & 0x70) {
|
|
|
|
DPRINTF(port, "Reserved flags set in H2D Register FIS."
|
|
|
|
" cmd_fis[0]=%02x cmd_fis[1]=%02x cmd_fis[2]=%02x\n",
|
|
|
|
cmd_fis[0], cmd_fis[1], cmd_fis[2]);
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (!(cmd_fis[1] & SATA_FIS_REG_H2D_UPDATE_COMMAND_REGISTER)) {
|
|
|
|
switch (s->dev[port].port_state) {
|
|
|
|
case STATE_RUN:
|
|
|
|
if (cmd_fis[15] & ATA_SRST) {
|
|
|
|
s->dev[port].port_state = STATE_RESET;
|
|
|
|
}
|
|
|
|
break;
|
|
|
|
case STATE_RESET:
|
|
|
|
if (!(cmd_fis[15] & ATA_SRST)) {
|
|
|
|
ahci_reset_port(s, port);
|
|
|
|
}
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Check for NCQ command */
|
|
|
|
if (is_ncq(cmd_fis[2])) {
|
|
|
|
process_ncq_command(s, port, cmd_fis, slot);
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Decompose the FIS:
|
|
|
|
* AHCI does not interpret FIS packets, it only forwards them.
|
|
|
|
* SATA 1.0 describes how to decode LBA28 and CHS FIS packets.
|
|
|
|
* Later specifications, e.g, SATA 3.2, describe LBA48 FIS packets.
|
|
|
|
*
|
|
|
|
* ATA4 describes sector number for LBA28/CHS commands.
|
|
|
|
* ATA6 describes sector number for LBA48 commands.
|
|
|
|
* ATA8 deprecates CHS fully, describing only LBA28/48.
|
|
|
|
*
|
|
|
|
* We dutifully convert the FIS into IDE registers, and allow the
|
|
|
|
* core layer to interpret them as needed. */
|
|
|
|
ide_state->feature = cmd_fis[3];
|
|
|
|
ide_state->sector = cmd_fis[4]; /* LBA 7:0 */
|
|
|
|
ide_state->lcyl = cmd_fis[5]; /* LBA 15:8 */
|
|
|
|
ide_state->hcyl = cmd_fis[6]; /* LBA 23:16 */
|
|
|
|
ide_state->select = cmd_fis[7]; /* LBA 27:24 (LBA28) */
|
|
|
|
ide_state->hob_sector = cmd_fis[8]; /* LBA 31:24 */
|
|
|
|
ide_state->hob_lcyl = cmd_fis[9]; /* LBA 39:32 */
|
|
|
|
ide_state->hob_hcyl = cmd_fis[10]; /* LBA 47:40 */
|
|
|
|
ide_state->hob_feature = cmd_fis[11];
|
|
|
|
ide_state->nsector = (int64_t)((cmd_fis[13] << 8) | cmd_fis[12]);
|
|
|
|
/* 14, 16, 17, 18, 19: Reserved (SATA 1.0) */
|
|
|
|
/* 15: Only valid when UPDATE_COMMAND not set. */
|
|
|
|
|
|
|
|
/* Copy the ACMD field (ATAPI packet, if any) from the AHCI command
|
|
|
|
* table to ide_state->io_buffer */
|
|
|
|
if (opts & AHCI_CMD_ATAPI) {
|
|
|
|
memcpy(ide_state->io_buffer, &cmd_fis[AHCI_COMMAND_TABLE_ACMD], 0x10);
|
|
|
|
debug_print_fis(ide_state->io_buffer, 0x10);
|
|
|
|
s->dev[port].done_atapi_packet = false;
|
|
|
|
/* XXX send PIO setup FIS */
|
|
|
|
}
|
|
|
|
|
|
|
|
ide_state->error = 0;
|
|
|
|
|
|
|
|
/* Reset transferred byte counter */
|
|
|
|
cmd->status = 0;
|
|
|
|
|
|
|
|
/* We're ready to process the command in FIS byte 2. */
|
|
|
|
ide_exec_cmd(&s->dev[port].port, cmd_fis[2]);
|
|
|
|
}
|
|
|
|
|
2015-07-04 06:06:05 +00:00
|
|
|
static int handle_cmd(AHCIState *s, int port, uint8_t slot)
|
2010-12-14 00:34:40 +00:00
|
|
|
{
|
|
|
|
IDEState *ide_state;
|
|
|
|
uint64_t tbl_addr;
|
|
|
|
AHCICmdHdr *cmd;
|
|
|
|
uint8_t *cmd_fis;
|
2012-06-27 04:50:41 +00:00
|
|
|
dma_addr_t cmd_len;
|
2010-12-14 00:34:40 +00:00
|
|
|
|
|
|
|
if (s->dev[port].port.ifs[0].status & (BUSY_STAT|DRQ_STAT)) {
|
|
|
|
/* Engine currently busy, try again later */
|
|
|
|
DPRINTF(port, "engine busy\n");
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (!s->dev[port].lst) {
|
|
|
|
DPRINTF(port, "error: lst not given but cmd handled");
|
|
|
|
return -1;
|
|
|
|
}
|
2015-07-04 06:06:05 +00:00
|
|
|
cmd = get_cmd_header(s, port, slot);
|
2010-12-14 00:34:40 +00:00
|
|
|
/* remember current slot handle for later */
|
|
|
|
s->dev[port].cur_cmd = cmd;
|
|
|
|
|
2014-11-03 23:56:17 +00:00
|
|
|
/* The device we are working for */
|
|
|
|
ide_state = &s->dev[port].port.ifs[0];
|
|
|
|
if (!ide_state->blk) {
|
|
|
|
DPRINTF(port, "error: guest accessed unused port");
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
2010-12-14 00:34:40 +00:00
|
|
|
tbl_addr = le64_to_cpu(cmd->tbl_addr);
|
|
|
|
cmd_len = 0x80;
|
2013-04-10 16:15:49 +00:00
|
|
|
cmd_fis = dma_memory_map(s->as, tbl_addr, &cmd_len,
|
2012-06-27 04:50:41 +00:00
|
|
|
DMA_DIRECTION_FROM_DEVICE);
|
2010-12-14 00:34:40 +00:00
|
|
|
if (!cmd_fis) {
|
|
|
|
DPRINTF(port, "error: guest passed us an invalid cmd fis\n");
|
|
|
|
return -1;
|
2014-11-03 23:56:17 +00:00
|
|
|
} else if (cmd_len != 0x80) {
|
|
|
|
ahci_trigger_irq(s, &s->dev[port], PORT_IRQ_HBUS_ERR);
|
|
|
|
DPRINTF(port, "error: dma_memory_map failed: "
|
|
|
|
"(len(%02"PRIx64") != 0x80)\n",
|
|
|
|
cmd_len);
|
2010-12-14 00:34:40 +00:00
|
|
|
goto out;
|
|
|
|
}
|
2014-11-03 23:56:17 +00:00
|
|
|
debug_print_fis(cmd_fis, 0x80);
|
2010-12-14 00:34:40 +00:00
|
|
|
|
|
|
|
switch (cmd_fis[0]) {
|
|
|
|
case SATA_FIS_TYPE_REGISTER_H2D:
|
2014-11-03 23:56:19 +00:00
|
|
|
handle_reg_h2d_fis(s, port, slot, cmd_fis);
|
2010-12-14 00:34:40 +00:00
|
|
|
break;
|
|
|
|
default:
|
|
|
|
DPRINTF(port, "unknown command cmd_fis[0]=%02x cmd_fis[1]=%02x "
|
|
|
|
"cmd_fis[2]=%02x\n", cmd_fis[0], cmd_fis[1],
|
|
|
|
cmd_fis[2]);
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
out:
|
2013-04-10 16:15:49 +00:00
|
|
|
dma_memory_unmap(s->as, cmd_fis, cmd_len, DMA_DIRECTION_FROM_DEVICE,
|
2012-06-27 04:50:41 +00:00
|
|
|
cmd_len);
|
2010-12-14 00:34:40 +00:00
|
|
|
|
|
|
|
if (s->dev[port].port.ifs[0].status & (BUSY_STAT|DRQ_STAT)) {
|
|
|
|
/* async command, complete later */
|
|
|
|
s->dev[port].busy_slot = slot;
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* done handling the command */
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* DMA dev <-> ram */
|
2014-08-04 21:11:09 +00:00
|
|
|
static void ahci_start_transfer(IDEDMA *dma)
|
2010-12-14 00:34:40 +00:00
|
|
|
{
|
|
|
|
AHCIDevice *ad = DO_UPCAST(AHCIDevice, dma, dma);
|
|
|
|
IDEState *s = &ad->port.ifs[0];
|
|
|
|
uint32_t size = (uint32_t)(s->data_end - s->data_ptr);
|
|
|
|
/* write == ram -> device */
|
2015-07-04 06:06:03 +00:00
|
|
|
uint16_t opts = le16_to_cpu(ad->cur_cmd->opts);
|
2010-12-14 00:34:40 +00:00
|
|
|
int is_write = opts & AHCI_CMD_WRITE;
|
|
|
|
int is_atapi = opts & AHCI_CMD_ATAPI;
|
|
|
|
int has_sglist = 0;
|
|
|
|
|
|
|
|
if (is_atapi && !ad->done_atapi_packet) {
|
|
|
|
/* already prepopulated iobuffer */
|
2013-01-15 15:12:09 +00:00
|
|
|
ad->done_atapi_packet = true;
|
2014-11-11 00:41:40 +00:00
|
|
|
size = 0;
|
2010-12-14 00:34:40 +00:00
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
|
ide: add limit to .prepare_buf()
prepare_buf should not always grab as many descriptors
as it can, sometimes it should self-limit.
For example, an NCQ transfer of 1 sector with a PRDT that
describes 4GiB of data should not copy 4GiB of data, it
should just transfer that first 512 bytes.
PIO is not affected, because the dma_buf_rw dma helpers
already have a byte limit built-in to them, but DMA/NCQ
will exhaust the entire list regardless of requested size.
AHCI 1.3 specifies in section 6.1.6 Command List Underflow that
NCQ is not required to detect underflow conditions. Non-NCQ
pathways signal underflow by writing to the PRDBC field, which
will already occur by writing the actual transferred byte count
to the PRDBC, signaling the underflow.
Our NCQ pathways aren't required to detect underflow, but since our DMA
backend uses the size of the PRDT to determine the size of the transer,
if our PRDT is bigger than the transaction (the underflow condition) it
doesn't cost us anything to detect it and truncate the PRDT.
This is a recoverable error and is not signaled to the guest, in either
NCQ or normal DMA cases.
For BMDMA, the existing pathways should see no guest-visible difference,
but any bytes described in the overage will no longer be transferred
before indicating to the guest that there was an underflow.
Signed-off-by: John Snow <jsnow@redhat.com>
Reviewed-by: Stefan Hajnoczi <stefanha@redhat.com>
Message-id: 1435767578-32743-2-git-send-email-jsnow@redhat.com
2015-07-04 06:06:04 +00:00
|
|
|
if (ahci_dma_prepare_buf(dma, size)) {
|
2010-12-14 00:34:40 +00:00
|
|
|
has_sglist = 1;
|
|
|
|
}
|
|
|
|
|
|
|
|
DPRINTF(ad->port_no, "%sing %d bytes on %s w/%s sglist\n",
|
|
|
|
is_write ? "writ" : "read", size, is_atapi ? "atapi" : "ata",
|
|
|
|
has_sglist ? "" : "o");
|
|
|
|
|
2011-12-15 13:32:04 +00:00
|
|
|
if (has_sglist && size) {
|
|
|
|
if (is_write) {
|
|
|
|
dma_buf_write(s->data_ptr, size, &s->sg);
|
|
|
|
} else {
|
|
|
|
dma_buf_read(s->data_ptr, size, &s->sg);
|
|
|
|
}
|
2010-12-14 00:34:40 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
out:
|
|
|
|
/* declare that we processed everything */
|
|
|
|
s->data_ptr = s->data_end;
|
|
|
|
|
2014-10-01 22:55:47 +00:00
|
|
|
/* Update number of transferred bytes, destroy sglist */
|
2015-09-17 18:17:04 +00:00
|
|
|
dma_buf_commit(s, size);
|
2010-12-14 00:34:40 +00:00
|
|
|
|
|
|
|
s->end_transfer_func(s);
|
2014-08-04 21:11:18 +00:00
|
|
|
|
|
|
|
if (!(s->status & DRQ_STAT)) {
|
|
|
|
/* done with PIO send/receive */
|
|
|
|
ahci_write_fis_pio(ad, le32_to_cpu(ad->cur_cmd->status));
|
|
|
|
}
|
2010-12-14 00:34:40 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
static void ahci_start_dma(IDEDMA *dma, IDEState *s,
|
2014-10-07 11:59:15 +00:00
|
|
|
BlockCompletionFunc *dma_cb)
|
2010-12-14 00:34:40 +00:00
|
|
|
{
|
|
|
|
AHCIDevice *ad = DO_UPCAST(AHCIDevice, dma, dma);
|
|
|
|
DPRINTF(ad->port_no, "\n");
|
ahci: Fix ahci cdrom read corruptions for reads > 128k
While testing q35, which has its cdrom attached to the ahci controller, I found
that the Fedora 17 install would panic on boot. The panic occurs while
squashfs is trying to read from the cdrom. The errors are:
[ 8.622711] SQUASHFS error: xz_dec_run error, data probably corrupt
[ 8.625180] SQUASHFS error: squashfs_read_data failed to read block
0x20be48a
I was also able to produce corrupt data reads using an installed piix based
qemu machine, using 'dd'. I found that the corruptions were only occuring when
then read size was greater than 128k. For example, the following command
results in corrupted reads:
dd if=/dev/sr0 of=/tmp/blah bs=256k iflag=direct
The > 128k size reads exercise a different code path than 128k and below. In
ide_atapi_cmd_read_dma_cb() s->io_buffer_size is capped at 128k. Thus,
ide_atapi_cmd_read_dma_cb() is called a second time when the read is > 128k.
However, ahci_dma_rw_buf() restart the read from offset 0, instead of at 128k.
Thus, resulting in a corrupted read.
To fix this, I've introduced 'io_buffer_offset' field in IDEState to keep
track of the offset. I've also modified ahci_populate_sglist() to take a new
3rd offset argument, so that the sglist is property initialized.
I've tested this patch using 'dd' testing, and Fedora 17 now correctly boots
and installs on q35 with the cdrom ahci controller.
Signed-off-by: Jason Baron <jbaron@redhat.com>
Tested-by: Andreas Färber <afaerber@suse.de>
Signed-off-by: Kevin Wolf <kwolf@redhat.com>
2012-08-03 19:57:06 +00:00
|
|
|
s->io_buffer_offset = 0;
|
2010-12-14 00:34:40 +00:00
|
|
|
dma_cb(s, 0);
|
|
|
|
}
|
|
|
|
|
2015-02-23 16:18:04 +00:00
|
|
|
static void ahci_restart_dma(IDEDMA *dma)
|
|
|
|
{
|
|
|
|
/* Nothing to do, ahci_start_dma already resets s->io_buffer_offset. */
|
|
|
|
}
|
|
|
|
|
2015-07-04 06:06:04 +00:00
|
|
|
/**
|
|
|
|
* IDE/PIO restarts are handled by the core layer, but NCQ commands
|
|
|
|
* need an extra kick from the AHCI HBA.
|
|
|
|
*/
|
|
|
|
static void ahci_restart(IDEDMA *dma)
|
|
|
|
{
|
|
|
|
AHCIDevice *ad = DO_UPCAST(AHCIDevice, dma, dma);
|
|
|
|
int i;
|
|
|
|
|
|
|
|
for (i = 0; i < AHCI_MAX_CMDS; i++) {
|
|
|
|
NCQTransferState *ncq_tfs = &ad->ncq_tfs[i];
|
|
|
|
if (ncq_tfs->halt) {
|
|
|
|
execute_ncq_command(ncq_tfs);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2014-10-01 22:55:47 +00:00
|
|
|
/**
|
2015-09-17 18:17:04 +00:00
|
|
|
* Called in DMA and PIO R/W chains to read the PRDT.
|
|
|
|
* Not shared with NCQ pathways.
|
2014-10-01 22:55:47 +00:00
|
|
|
*/
|
ide: add limit to .prepare_buf()
prepare_buf should not always grab as many descriptors
as it can, sometimes it should self-limit.
For example, an NCQ transfer of 1 sector with a PRDT that
describes 4GiB of data should not copy 4GiB of data, it
should just transfer that first 512 bytes.
PIO is not affected, because the dma_buf_rw dma helpers
already have a byte limit built-in to them, but DMA/NCQ
will exhaust the entire list regardless of requested size.
AHCI 1.3 specifies in section 6.1.6 Command List Underflow that
NCQ is not required to detect underflow conditions. Non-NCQ
pathways signal underflow by writing to the PRDBC field, which
will already occur by writing the actual transferred byte count
to the PRDBC, signaling the underflow.
Our NCQ pathways aren't required to detect underflow, but since our DMA
backend uses the size of the PRDT to determine the size of the transer,
if our PRDT is bigger than the transaction (the underflow condition) it
doesn't cost us anything to detect it and truncate the PRDT.
This is a recoverable error and is not signaled to the guest, in either
NCQ or normal DMA cases.
For BMDMA, the existing pathways should see no guest-visible difference,
but any bytes described in the overage will no longer be transferred
before indicating to the guest that there was an underflow.
Signed-off-by: John Snow <jsnow@redhat.com>
Reviewed-by: Stefan Hajnoczi <stefanha@redhat.com>
Message-id: 1435767578-32743-2-git-send-email-jsnow@redhat.com
2015-07-04 06:06:04 +00:00
|
|
|
static int32_t ahci_dma_prepare_buf(IDEDMA *dma, int32_t limit)
|
2010-12-14 00:34:40 +00:00
|
|
|
{
|
|
|
|
AHCIDevice *ad = DO_UPCAST(AHCIDevice, dma, dma);
|
|
|
|
IDEState *s = &ad->port.ifs[0];
|
|
|
|
|
2015-07-04 06:06:05 +00:00
|
|
|
if (ahci_populate_sglist(ad, &s->sg, ad->cur_cmd,
|
|
|
|
limit, s->io_buffer_offset) == -1) {
|
ide: Correct handling of malformed/short PRDTs
This impacts both BMDMA and AHCI HBA interfaces for IDE.
Currently, we confuse the difference between a PRDT having
"0 bytes" and a PRDT having "0 complete sectors."
When we receive an incomplete sector, inconsistent error checking
leads to an infinite loop wherein the call succeeds, but it
didn't give us enough bytes -- leading us to re-call the
DMA chain over and over again. This leads to, in the BMDMA case,
leaked memory for short PRDTs, and infinite loops and resource
usage in the AHCI case.
The .prepare_buf() callback is reworked to return the number of
bytes that it successfully prepared. 0 is a valid, non-error
answer that means the table was empty and described no bytes.
-1 indicates an error.
Our current implementation uses the io_buffer in IDEState to
ultimately describe the size of a prepared scatter-gather list.
Even though the AHCI PRDT/SGList can be as large as 256GiB, the
AHCI command header limits transactions to just 4GiB. ATA8-ACS3,
however, defines the largest transaction to be an LBA48 command
that transfers 65,536 sectors. With a 512 byte sector size, this
is just 32MiB.
Since our current state structures use the int type to describe
the size of the buffer, and this state is migrated as int32, we
are limited to describing 2GiB buffer sizes unless we change the
migration protocol.
For this reason, this patch begins to unify the assertions in the
IDE pathways that the scatter-gather list provided by either the
AHCI PRDT or the PCI BMDMA PRDs can only describe, at a maximum,
2GiB. This should be resilient enough unless we need a sector
size that exceeds 32KiB.
Further, the likelihood of any guest operating system actually
attempting to transfer this much data in a single operation is
very slim.
To this end, the IDEState variables have been updated to more
explicitly clarify our maximum supported size. Callers to the
prepare_buf callback have been reworked to understand the new
return code, and all versions of the prepare_buf callback have
been adjusted accordingly.
Lastly, the ahci_populate_sglist helper, relied upon by the
AHCI implementation of .prepare_buf() as well as the PCI
implementation of the callback have had overflow assertions
added to help make clear the reasonings behind the various
type changes.
[Added %d -> %"PRId64" fix John sent because off_pos changed from int to
int64_t.
--Stefan]
Signed-off-by: John Snow <jsnow@redhat.com>
Reviewed-by: Paolo Bonzini <pbonzini@redhat.com>
Message-id: 1414785819-26209-4-git-send-email-jsnow@redhat.com
Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com>
2014-10-31 20:03:39 +00:00
|
|
|
DPRINTF(ad->port_no, "ahci_dma_prepare_buf failed.\n");
|
|
|
|
return -1;
|
|
|
|
}
|
2011-12-15 13:32:04 +00:00
|
|
|
s->io_buffer_size = s->sg.size;
|
2010-12-14 00:34:40 +00:00
|
|
|
|
|
|
|
DPRINTF(ad->port_no, "len=%#x\n", s->io_buffer_size);
|
ide: Correct handling of malformed/short PRDTs
This impacts both BMDMA and AHCI HBA interfaces for IDE.
Currently, we confuse the difference between a PRDT having
"0 bytes" and a PRDT having "0 complete sectors."
When we receive an incomplete sector, inconsistent error checking
leads to an infinite loop wherein the call succeeds, but it
didn't give us enough bytes -- leading us to re-call the
DMA chain over and over again. This leads to, in the BMDMA case,
leaked memory for short PRDTs, and infinite loops and resource
usage in the AHCI case.
The .prepare_buf() callback is reworked to return the number of
bytes that it successfully prepared. 0 is a valid, non-error
answer that means the table was empty and described no bytes.
-1 indicates an error.
Our current implementation uses the io_buffer in IDEState to
ultimately describe the size of a prepared scatter-gather list.
Even though the AHCI PRDT/SGList can be as large as 256GiB, the
AHCI command header limits transactions to just 4GiB. ATA8-ACS3,
however, defines the largest transaction to be an LBA48 command
that transfers 65,536 sectors. With a 512 byte sector size, this
is just 32MiB.
Since our current state structures use the int type to describe
the size of the buffer, and this state is migrated as int32, we
are limited to describing 2GiB buffer sizes unless we change the
migration protocol.
For this reason, this patch begins to unify the assertions in the
IDE pathways that the scatter-gather list provided by either the
AHCI PRDT or the PCI BMDMA PRDs can only describe, at a maximum,
2GiB. This should be resilient enough unless we need a sector
size that exceeds 32KiB.
Further, the likelihood of any guest operating system actually
attempting to transfer this much data in a single operation is
very slim.
To this end, the IDEState variables have been updated to more
explicitly clarify our maximum supported size. Callers to the
prepare_buf callback have been reworked to understand the new
return code, and all versions of the prepare_buf callback have
been adjusted accordingly.
Lastly, the ahci_populate_sglist helper, relied upon by the
AHCI implementation of .prepare_buf() as well as the PCI
implementation of the callback have had overflow assertions
added to help make clear the reasonings behind the various
type changes.
[Added %d -> %"PRId64" fix John sent because off_pos changed from int to
int64_t.
--Stefan]
Signed-off-by: John Snow <jsnow@redhat.com>
Reviewed-by: Paolo Bonzini <pbonzini@redhat.com>
Message-id: 1414785819-26209-4-git-send-email-jsnow@redhat.com
Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com>
2014-10-31 20:03:39 +00:00
|
|
|
return s->io_buffer_size;
|
2010-12-14 00:34:40 +00:00
|
|
|
}
|
|
|
|
|
2014-10-01 22:55:47 +00:00
|
|
|
/**
|
2015-09-17 18:17:04 +00:00
|
|
|
* Updates the command header with a bytes-read value.
|
|
|
|
* Called via dma_buf_commit, for both DMA and PIO paths.
|
|
|
|
* sglist destruction is handled within dma_buf_commit.
|
2014-10-01 22:55:47 +00:00
|
|
|
*/
|
|
|
|
static void ahci_commit_buf(IDEDMA *dma, uint32_t tx_bytes)
|
|
|
|
{
|
|
|
|
AHCIDevice *ad = DO_UPCAST(AHCIDevice, dma, dma);
|
|
|
|
|
|
|
|
tx_bytes += le32_to_cpu(ad->cur_cmd->status);
|
|
|
|
ad->cur_cmd->status = cpu_to_le32(tx_bytes);
|
|
|
|
}
|
|
|
|
|
2010-12-14 00:34:40 +00:00
|
|
|
static int ahci_dma_rw_buf(IDEDMA *dma, int is_write)
|
|
|
|
{
|
|
|
|
AHCIDevice *ad = DO_UPCAST(AHCIDevice, dma, dma);
|
|
|
|
IDEState *s = &ad->port.ifs[0];
|
|
|
|
uint8_t *p = s->io_buffer + s->io_buffer_index;
|
|
|
|
int l = s->io_buffer_size - s->io_buffer_index;
|
|
|
|
|
2015-07-04 06:06:05 +00:00
|
|
|
if (ahci_populate_sglist(ad, &s->sg, ad->cur_cmd, l, s->io_buffer_offset)) {
|
2010-12-14 00:34:40 +00:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (is_write) {
|
2011-12-15 13:32:04 +00:00
|
|
|
dma_buf_read(p, l, &s->sg);
|
2010-12-14 00:34:40 +00:00
|
|
|
} else {
|
2011-12-15 13:32:04 +00:00
|
|
|
dma_buf_write(p, l, &s->sg);
|
2010-12-14 00:34:40 +00:00
|
|
|
}
|
|
|
|
|
2014-10-01 22:55:47 +00:00
|
|
|
/* free sglist, update byte count */
|
2015-09-17 18:17:04 +00:00
|
|
|
dma_buf_commit(s, l);
|
2012-08-03 19:57:10 +00:00
|
|
|
|
2010-12-14 00:34:40 +00:00
|
|
|
s->io_buffer_index += l;
|
|
|
|
|
|
|
|
DPRINTF(ad->port_no, "len=%#x\n", l);
|
|
|
|
|
|
|
|
return 1;
|
|
|
|
}
|
|
|
|
|
2014-08-04 21:11:17 +00:00
|
|
|
static void ahci_cmd_done(IDEDMA *dma)
|
2010-12-14 00:34:40 +00:00
|
|
|
{
|
|
|
|
AHCIDevice *ad = DO_UPCAST(AHCIDevice, dma, dma);
|
|
|
|
|
2014-08-04 21:11:17 +00:00
|
|
|
DPRINTF(ad->port_no, "cmd done\n");
|
2010-12-14 00:34:40 +00:00
|
|
|
|
|
|
|
/* update d2h status */
|
2015-09-01 20:50:40 +00:00
|
|
|
ahci_write_fis_d2h(ad);
|
2010-12-14 00:34:40 +00:00
|
|
|
|
2011-05-09 15:48:19 +00:00
|
|
|
if (!ad->check_bh) {
|
|
|
|
/* maybe we still have something to process, check later */
|
|
|
|
ad->check_bh = qemu_bh_new(ahci_check_cmd_bh, ad);
|
|
|
|
qemu_bh_schedule(ad->check_bh);
|
|
|
|
}
|
2010-12-14 00:34:40 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
static void ahci_irq_set(void *opaque, int n, int level)
|
|
|
|
{
|
|
|
|
}
|
|
|
|
|
|
|
|
static const IDEDMAOps ahci_dma_ops = {
|
|
|
|
.start_dma = ahci_start_dma,
|
2015-07-04 06:06:04 +00:00
|
|
|
.restart = ahci_restart,
|
2015-02-23 16:18:04 +00:00
|
|
|
.restart_dma = ahci_restart_dma,
|
2010-12-14 00:34:40 +00:00
|
|
|
.start_transfer = ahci_start_transfer,
|
|
|
|
.prepare_buf = ahci_dma_prepare_buf,
|
2014-10-01 22:55:47 +00:00
|
|
|
.commit_buf = ahci_commit_buf,
|
2010-12-14 00:34:40 +00:00
|
|
|
.rw_buf = ahci_dma_rw_buf,
|
2014-08-04 21:11:17 +00:00
|
|
|
.cmd_done = ahci_cmd_done,
|
2010-12-14 00:34:40 +00:00
|
|
|
};
|
|
|
|
|
2015-11-06 19:09:00 +00:00
|
|
|
void ahci_init(AHCIState *s, DeviceState *qdev)
|
2010-12-14 00:34:40 +00:00
|
|
|
{
|
2015-09-08 16:38:45 +00:00
|
|
|
s->container = qdev;
|
2011-08-08 13:09:14 +00:00
|
|
|
/* XXX BAR size should be 1k, but that breaks, so bump it to 4k for now */
|
2013-06-07 01:25:08 +00:00
|
|
|
memory_region_init_io(&s->mem, OBJECT(qdev), &ahci_mem_ops, s,
|
|
|
|
"ahci", AHCI_MEM_BAR_SIZE);
|
|
|
|
memory_region_init_io(&s->idp, OBJECT(qdev), &ahci_idp_ops, s,
|
|
|
|
"ahci-idp", 32);
|
2015-11-06 19:09:00 +00:00
|
|
|
}
|
2011-08-27 09:12:28 +00:00
|
|
|
|
2015-11-06 19:09:00 +00:00
|
|
|
void ahci_realize(AHCIState *s, DeviceState *qdev, AddressSpace *as, int ports)
|
|
|
|
{
|
|
|
|
qemu_irq *irqs;
|
|
|
|
int i;
|
2010-12-14 00:34:40 +00:00
|
|
|
|
2015-11-06 19:09:00 +00:00
|
|
|
s->as = as;
|
|
|
|
s->ports = ports;
|
|
|
|
s->dev = g_new0(AHCIDevice, ports);
|
|
|
|
ahci_reg_init(s);
|
|
|
|
irqs = qemu_allocate_irqs(ahci_irq_set, s, s->ports);
|
2011-02-01 14:51:31 +00:00
|
|
|
for (i = 0; i < s->ports; i++) {
|
2010-12-14 00:34:40 +00:00
|
|
|
AHCIDevice *ad = &s->dev[i];
|
|
|
|
|
2013-08-23 18:18:50 +00:00
|
|
|
ide_bus_new(&ad->port, sizeof(ad->port), qdev, i, 1);
|
2010-12-14 00:34:40 +00:00
|
|
|
ide_init2(&ad->port, irqs[i]);
|
|
|
|
|
|
|
|
ad->hba = s;
|
|
|
|
ad->port_no = i;
|
|
|
|
ad->port.dma = &ad->dma;
|
|
|
|
ad->port.dma->ops = &ahci_dma_ops;
|
2015-02-23 16:18:04 +00:00
|
|
|
ide_register_restart_cb(&ad->port);
|
2010-12-14 00:34:40 +00:00
|
|
|
}
|
2016-07-14 16:03:40 +00:00
|
|
|
g_free(irqs);
|
2010-12-14 00:34:40 +00:00
|
|
|
}
|
|
|
|
|
2011-02-01 14:51:31 +00:00
|
|
|
void ahci_uninit(AHCIState *s)
|
|
|
|
{
|
2017-03-16 00:50:14 +00:00
|
|
|
int i, j;
|
|
|
|
|
|
|
|
for (i = 0; i < s->ports; i++) {
|
|
|
|
AHCIDevice *ad = &s->dev[i];
|
|
|
|
|
|
|
|
for (j = 0; j < 2; j++) {
|
|
|
|
IDEState *s = &ad->port.ifs[j];
|
|
|
|
|
|
|
|
ide_exit(s);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2011-08-21 03:09:37 +00:00
|
|
|
g_free(s->dev);
|
2011-02-01 14:51:31 +00:00
|
|
|
}
|
|
|
|
|
2012-05-11 14:42:36 +00:00
|
|
|
void ahci_reset(AHCIState *s)
|
2010-12-14 00:34:40 +00:00
|
|
|
{
|
2011-09-12 08:19:25 +00:00
|
|
|
AHCIPortRegs *pr;
|
2010-12-14 00:34:40 +00:00
|
|
|
int i;
|
|
|
|
|
2012-05-11 14:42:36 +00:00
|
|
|
s->control_regs.irqstatus = 0;
|
2013-09-28 21:09:35 +00:00
|
|
|
/* AHCI Enable (AE)
|
|
|
|
* The implementation of this bit is dependent upon the value of the
|
|
|
|
* CAP.SAM bit. If CAP.SAM is '0', then GHC.AE shall be read-write and
|
|
|
|
* shall have a reset value of '0'. If CAP.SAM is '1', then AE shall be
|
|
|
|
* read-only and shall have a reset value of '1'.
|
|
|
|
*
|
|
|
|
* We set HOST_CAP_AHCI so we must enable AHCI at reset.
|
|
|
|
*/
|
|
|
|
s->control_regs.ghc = HOST_CTL_AHCI_EN;
|
2011-02-01 14:51:30 +00:00
|
|
|
|
2012-05-11 14:42:36 +00:00
|
|
|
for (i = 0; i < s->ports; i++) {
|
|
|
|
pr = &s->dev[i].port_regs;
|
2011-09-12 08:19:25 +00:00
|
|
|
pr->irq_stat = 0;
|
|
|
|
pr->irq_mask = 0;
|
|
|
|
pr->scr_ctl = 0;
|
ahci: properly reset PxCMD on HBA reset
While testing q35, I found that windows 7 (specifically, windows 7 ultimate
with sp1 x64), wouldn't install because it can't find the cdrom or disk drive.
The failure message is: 'A required cd/dvd device driver is missing. If you
have a driver floppy disk, CD, DVD, or USB flash drive, please insert it now.'
This can also be reproduced on piix by adding an ahci controller, and
observing that windows 7 does not see any devices behind it.
The problem is that when windows issues a HBA reset, qemu does not reset the
individual ports' PxCMD register. Windows 7 then reads back the PxCMD register
and presumably assumes that the ahci controller has already been initialized.
Windows then never sets up the PxIE register to enable interrupts, and thus it
never gets irqs back when it sends ata device inquiry commands.
This change brings qemu into ahci 1.3 specification compliance.
Section 10.4.3 HBA Reset:
"
When GHC.HR is set to '1', GHC.AE, GHC.IE, the IS register, and all port
register fields (except PxFB/PxFBU/PxCLB/PxCLBU) that are not HwInit in the
HBA's register memory space are reset.
"
I've also re-tested Fedora 16 and 17 to verify that they continue to work with
this change.
Signed-off-by: Jason Baron <jbaron@redhat.com>
Acked-by: Alexander Graf <agraf@suse.de>
Signed-off-by: Kevin Wolf <kwolf@redhat.com>
2012-09-04 20:08:08 +00:00
|
|
|
pr->cmd = PORT_CMD_SPIN_UP | PORT_CMD_POWER_ON;
|
2012-05-11 14:42:36 +00:00
|
|
|
ahci_reset_port(s, i);
|
2010-12-14 00:34:40 +00:00
|
|
|
}
|
|
|
|
}
|
2012-01-26 11:43:47 +00:00
|
|
|
|
2015-07-04 06:06:05 +00:00
|
|
|
static const VMStateDescription vmstate_ncq_tfs = {
|
|
|
|
.name = "ncq state",
|
|
|
|
.version_id = 1,
|
|
|
|
.fields = (VMStateField[]) {
|
|
|
|
VMSTATE_UINT32(sector_count, NCQTransferState),
|
|
|
|
VMSTATE_UINT64(lba, NCQTransferState),
|
|
|
|
VMSTATE_UINT8(tag, NCQTransferState),
|
|
|
|
VMSTATE_UINT8(cmd, NCQTransferState),
|
|
|
|
VMSTATE_UINT8(slot, NCQTransferState),
|
|
|
|
VMSTATE_BOOL(used, NCQTransferState),
|
|
|
|
VMSTATE_BOOL(halt, NCQTransferState),
|
|
|
|
VMSTATE_END_OF_LIST()
|
|
|
|
},
|
|
|
|
};
|
|
|
|
|
2013-01-04 19:44:42 +00:00
|
|
|
static const VMStateDescription vmstate_ahci_device = {
|
|
|
|
.name = "ahci port",
|
|
|
|
.version_id = 1,
|
2014-04-16 13:32:32 +00:00
|
|
|
.fields = (VMStateField[]) {
|
2013-01-04 19:44:42 +00:00
|
|
|
VMSTATE_IDE_BUS(port, AHCIDevice),
|
2015-02-23 16:18:03 +00:00
|
|
|
VMSTATE_IDE_DRIVE(port.ifs[0], AHCIDevice),
|
2013-01-04 19:44:42 +00:00
|
|
|
VMSTATE_UINT32(port_state, AHCIDevice),
|
|
|
|
VMSTATE_UINT32(finished, AHCIDevice),
|
|
|
|
VMSTATE_UINT32(port_regs.lst_addr, AHCIDevice),
|
|
|
|
VMSTATE_UINT32(port_regs.lst_addr_hi, AHCIDevice),
|
|
|
|
VMSTATE_UINT32(port_regs.fis_addr, AHCIDevice),
|
|
|
|
VMSTATE_UINT32(port_regs.fis_addr_hi, AHCIDevice),
|
|
|
|
VMSTATE_UINT32(port_regs.irq_stat, AHCIDevice),
|
|
|
|
VMSTATE_UINT32(port_regs.irq_mask, AHCIDevice),
|
|
|
|
VMSTATE_UINT32(port_regs.cmd, AHCIDevice),
|
|
|
|
VMSTATE_UINT32(port_regs.tfdata, AHCIDevice),
|
|
|
|
VMSTATE_UINT32(port_regs.sig, AHCIDevice),
|
|
|
|
VMSTATE_UINT32(port_regs.scr_stat, AHCIDevice),
|
|
|
|
VMSTATE_UINT32(port_regs.scr_ctl, AHCIDevice),
|
|
|
|
VMSTATE_UINT32(port_regs.scr_err, AHCIDevice),
|
|
|
|
VMSTATE_UINT32(port_regs.scr_act, AHCIDevice),
|
|
|
|
VMSTATE_UINT32(port_regs.cmd_issue, AHCIDevice),
|
|
|
|
VMSTATE_BOOL(done_atapi_packet, AHCIDevice),
|
|
|
|
VMSTATE_INT32(busy_slot, AHCIDevice),
|
|
|
|
VMSTATE_BOOL(init_d2h_sent, AHCIDevice),
|
2015-07-04 06:06:05 +00:00
|
|
|
VMSTATE_STRUCT_ARRAY(ncq_tfs, AHCIDevice, AHCI_MAX_CMDS,
|
|
|
|
1, vmstate_ncq_tfs, NCQTransferState),
|
2013-01-04 19:44:42 +00:00
|
|
|
VMSTATE_END_OF_LIST()
|
|
|
|
},
|
|
|
|
};
|
|
|
|
|
|
|
|
static int ahci_state_post_load(void *opaque, int version_id)
|
|
|
|
{
|
2015-07-04 06:06:05 +00:00
|
|
|
int i, j;
|
2013-01-04 19:44:42 +00:00
|
|
|
struct AHCIDevice *ad;
|
2015-07-04 06:06:05 +00:00
|
|
|
NCQTransferState *ncq_tfs;
|
2016-02-10 18:29:40 +00:00
|
|
|
AHCIPortRegs *pr;
|
2013-01-04 19:44:42 +00:00
|
|
|
AHCIState *s = opaque;
|
|
|
|
|
|
|
|
for (i = 0; i < s->ports; i++) {
|
|
|
|
ad = &s->dev[i];
|
2016-02-10 18:29:40 +00:00
|
|
|
pr = &ad->port_regs;
|
|
|
|
|
|
|
|
if (!(pr->cmd & PORT_CMD_START) && (pr->cmd & PORT_CMD_LIST_ON)) {
|
|
|
|
error_report("AHCI: DMA engine should be off, but status bit "
|
|
|
|
"indicates it is still running.");
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
if (!(pr->cmd & PORT_CMD_FIS_RX) && (pr->cmd & PORT_CMD_FIS_ON)) {
|
|
|
|
error_report("AHCI: FIS RX engine should be off, but status bit "
|
|
|
|
"indicates it is still running.");
|
|
|
|
return -1;
|
|
|
|
}
|
2013-01-04 19:44:42 +00:00
|
|
|
|
2016-02-10 18:29:40 +00:00
|
|
|
/* After a migrate, the DMA/FIS engines are "off" and
|
|
|
|
* need to be conditionally restarted */
|
|
|
|
pr->cmd &= ~(PORT_CMD_LIST_ON | PORT_CMD_FIS_ON);
|
|
|
|
if (ahci_cond_start_engines(ad) != 0) {
|
2015-05-22 18:13:44 +00:00
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
2015-07-04 06:06:05 +00:00
|
|
|
for (j = 0; j < AHCI_MAX_CMDS; j++) {
|
|
|
|
ncq_tfs = &ad->ncq_tfs[j];
|
|
|
|
ncq_tfs->drive = ad;
|
|
|
|
|
|
|
|
if (ncq_tfs->used != ncq_tfs->halt) {
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
if (!ncq_tfs->halt) {
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
if (!is_ncq(ncq_tfs->cmd)) {
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
if (ncq_tfs->slot != ncq_tfs->tag) {
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
/* If ncq_tfs->halt is justly set, the engine should be engaged,
|
|
|
|
* and the command list buffer should be mapped. */
|
|
|
|
ncq_tfs->cmdh = get_cmd_header(s, i, ncq_tfs->slot);
|
|
|
|
if (!ncq_tfs->cmdh) {
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
ahci_populate_sglist(ncq_tfs->drive, &ncq_tfs->sglist,
|
|
|
|
ncq_tfs->cmdh, ncq_tfs->sector_count * 512,
|
|
|
|
0);
|
|
|
|
if (ncq_tfs->sector_count != ncq_tfs->sglist.size >> 9) {
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2013-01-04 19:44:42 +00:00
|
|
|
/*
|
2015-02-23 16:18:04 +00:00
|
|
|
* If an error is present, ad->busy_slot will be valid and not -1.
|
|
|
|
* In this case, an operation is waiting to resume and will re-check
|
|
|
|
* for additional AHCI commands to execute upon completion.
|
|
|
|
*
|
|
|
|
* In the case where no error was present, busy_slot will be -1,
|
|
|
|
* and we should check to see if there are additional commands waiting.
|
2013-01-04 19:44:42 +00:00
|
|
|
*/
|
2015-02-23 16:18:04 +00:00
|
|
|
if (ad->busy_slot == -1) {
|
|
|
|
check_cmd(s, i);
|
2015-02-23 16:18:05 +00:00
|
|
|
} else {
|
|
|
|
/* We are in the middle of a command, and may need to access
|
|
|
|
* the command header in guest memory again. */
|
|
|
|
if (ad->busy_slot < 0 || ad->busy_slot >= AHCI_MAX_CMDS) {
|
|
|
|
return -1;
|
|
|
|
}
|
2015-07-04 06:06:05 +00:00
|
|
|
ad->cur_cmd = get_cmd_header(s, i, ad->busy_slot);
|
2013-01-04 19:44:42 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
const VMStateDescription vmstate_ahci = {
|
|
|
|
.name = "ahci",
|
|
|
|
.version_id = 1,
|
|
|
|
.post_load = ahci_state_post_load,
|
2014-04-16 13:32:32 +00:00
|
|
|
.fields = (VMStateField[]) {
|
2013-01-04 19:44:42 +00:00
|
|
|
VMSTATE_STRUCT_VARRAY_POINTER_INT32(dev, AHCIState, ports,
|
|
|
|
vmstate_ahci_device, AHCIDevice),
|
|
|
|
VMSTATE_UINT32(control_regs.cap, AHCIState),
|
|
|
|
VMSTATE_UINT32(control_regs.ghc, AHCIState),
|
|
|
|
VMSTATE_UINT32(control_regs.irqstatus, AHCIState),
|
|
|
|
VMSTATE_UINT32(control_regs.impl, AHCIState),
|
|
|
|
VMSTATE_UINT32(control_regs.version, AHCIState),
|
|
|
|
VMSTATE_UINT32(idp_index, AHCIState),
|
2017-06-23 14:48:23 +00:00
|
|
|
VMSTATE_INT32_EQUAL(ports, AHCIState, NULL),
|
2013-01-04 19:44:42 +00:00
|
|
|
VMSTATE_END_OF_LIST()
|
|
|
|
},
|
|
|
|
};
|
|
|
|
|
2012-01-26 11:43:47 +00:00
|
|
|
static const VMStateDescription vmstate_sysbus_ahci = {
|
|
|
|
.name = "sysbus-ahci",
|
2014-04-16 13:32:32 +00:00
|
|
|
.fields = (VMStateField[]) {
|
2014-03-18 19:36:13 +00:00
|
|
|
VMSTATE_AHCI(ahci, SysbusAHCIState),
|
2013-01-04 19:44:42 +00:00
|
|
|
VMSTATE_END_OF_LIST()
|
|
|
|
},
|
2012-01-26 11:43:47 +00:00
|
|
|
};
|
|
|
|
|
2012-05-11 14:42:36 +00:00
|
|
|
static void sysbus_ahci_reset(DeviceState *dev)
|
|
|
|
{
|
2013-07-01 10:18:30 +00:00
|
|
|
SysbusAHCIState *s = SYSBUS_AHCI(dev);
|
2012-05-11 14:42:36 +00:00
|
|
|
|
|
|
|
ahci_reset(&s->ahci);
|
|
|
|
}
|
|
|
|
|
2015-11-06 19:09:00 +00:00
|
|
|
static void sysbus_ahci_init(Object *obj)
|
2012-01-26 11:43:47 +00:00
|
|
|
{
|
2015-11-06 19:09:00 +00:00
|
|
|
SysbusAHCIState *s = SYSBUS_AHCI(obj);
|
|
|
|
SysBusDevice *sbd = SYS_BUS_DEVICE(obj);
|
2012-01-26 11:43:47 +00:00
|
|
|
|
2015-11-06 19:09:00 +00:00
|
|
|
ahci_init(&s->ahci, DEVICE(obj));
|
2013-07-01 10:18:31 +00:00
|
|
|
|
|
|
|
sysbus_init_mmio(sbd, &s->ahci.mem);
|
|
|
|
sysbus_init_irq(sbd, &s->ahci.irq);
|
2012-01-26 11:43:47 +00:00
|
|
|
}
|
|
|
|
|
2015-11-06 19:09:00 +00:00
|
|
|
static void sysbus_ahci_realize(DeviceState *dev, Error **errp)
|
|
|
|
{
|
|
|
|
SysbusAHCIState *s = SYSBUS_AHCI(dev);
|
|
|
|
|
|
|
|
ahci_realize(&s->ahci, dev, &address_space_memory, s->num_ports);
|
|
|
|
}
|
|
|
|
|
2011-12-08 03:34:16 +00:00
|
|
|
static Property sysbus_ahci_properties[] = {
|
|
|
|
DEFINE_PROP_UINT32("num-ports", SysbusAHCIState, num_ports, 1),
|
|
|
|
DEFINE_PROP_END_OF_LIST(),
|
|
|
|
};
|
|
|
|
|
2012-01-24 19:12:29 +00:00
|
|
|
static void sysbus_ahci_class_init(ObjectClass *klass, void *data)
|
|
|
|
{
|
2011-12-08 03:34:16 +00:00
|
|
|
DeviceClass *dc = DEVICE_CLASS(klass);
|
2012-01-24 19:12:29 +00:00
|
|
|
|
2013-07-01 10:18:31 +00:00
|
|
|
dc->realize = sysbus_ahci_realize;
|
2011-12-08 03:34:16 +00:00
|
|
|
dc->vmsd = &vmstate_sysbus_ahci;
|
|
|
|
dc->props = sysbus_ahci_properties;
|
2012-05-11 14:42:36 +00:00
|
|
|
dc->reset = sysbus_ahci_reset;
|
2013-07-29 14:17:45 +00:00
|
|
|
set_bit(DEVICE_CATEGORY_STORAGE, dc->categories);
|
2012-01-24 19:12:29 +00:00
|
|
|
}
|
|
|
|
|
2013-01-10 15:19:07 +00:00
|
|
|
static const TypeInfo sysbus_ahci_info = {
|
2013-07-01 10:18:30 +00:00
|
|
|
.name = TYPE_SYSBUS_AHCI,
|
2011-12-08 03:34:16 +00:00
|
|
|
.parent = TYPE_SYS_BUS_DEVICE,
|
|
|
|
.instance_size = sizeof(SysbusAHCIState),
|
2015-11-06 19:09:00 +00:00
|
|
|
.instance_init = sysbus_ahci_init,
|
2011-12-08 03:34:16 +00:00
|
|
|
.class_init = sysbus_ahci_class_init,
|
2012-01-26 11:43:47 +00:00
|
|
|
};
|
|
|
|
|
2015-11-06 19:09:01 +00:00
|
|
|
#define ALLWINNER_AHCI_BISTAFR ((0xa0 - ALLWINNER_AHCI_MMIO_OFF) / 4)
|
|
|
|
#define ALLWINNER_AHCI_BISTCR ((0xa4 - ALLWINNER_AHCI_MMIO_OFF) / 4)
|
|
|
|
#define ALLWINNER_AHCI_BISTFCTR ((0xa8 - ALLWINNER_AHCI_MMIO_OFF) / 4)
|
|
|
|
#define ALLWINNER_AHCI_BISTSR ((0xac - ALLWINNER_AHCI_MMIO_OFF) / 4)
|
|
|
|
#define ALLWINNER_AHCI_BISTDECR ((0xb0 - ALLWINNER_AHCI_MMIO_OFF) / 4)
|
|
|
|
#define ALLWINNER_AHCI_DIAGNR0 ((0xb4 - ALLWINNER_AHCI_MMIO_OFF) / 4)
|
|
|
|
#define ALLWINNER_AHCI_DIAGNR1 ((0xb8 - ALLWINNER_AHCI_MMIO_OFF) / 4)
|
|
|
|
#define ALLWINNER_AHCI_OOBR ((0xbc - ALLWINNER_AHCI_MMIO_OFF) / 4)
|
|
|
|
#define ALLWINNER_AHCI_PHYCS0R ((0xc0 - ALLWINNER_AHCI_MMIO_OFF) / 4)
|
|
|
|
#define ALLWINNER_AHCI_PHYCS1R ((0xc4 - ALLWINNER_AHCI_MMIO_OFF) / 4)
|
|
|
|
#define ALLWINNER_AHCI_PHYCS2R ((0xc8 - ALLWINNER_AHCI_MMIO_OFF) / 4)
|
|
|
|
#define ALLWINNER_AHCI_TIMER1MS ((0xe0 - ALLWINNER_AHCI_MMIO_OFF) / 4)
|
|
|
|
#define ALLWINNER_AHCI_GPARAM1R ((0xe8 - ALLWINNER_AHCI_MMIO_OFF) / 4)
|
|
|
|
#define ALLWINNER_AHCI_GPARAM2R ((0xec - ALLWINNER_AHCI_MMIO_OFF) / 4)
|
|
|
|
#define ALLWINNER_AHCI_PPARAMR ((0xf0 - ALLWINNER_AHCI_MMIO_OFF) / 4)
|
|
|
|
#define ALLWINNER_AHCI_TESTR ((0xf4 - ALLWINNER_AHCI_MMIO_OFF) / 4)
|
|
|
|
#define ALLWINNER_AHCI_VERSIONR ((0xf8 - ALLWINNER_AHCI_MMIO_OFF) / 4)
|
|
|
|
#define ALLWINNER_AHCI_IDR ((0xfc - ALLWINNER_AHCI_MMIO_OFF) / 4)
|
|
|
|
#define ALLWINNER_AHCI_RWCR ((0xfc - ALLWINNER_AHCI_MMIO_OFF) / 4)
|
|
|
|
|
|
|
|
static uint64_t allwinner_ahci_mem_read(void *opaque, hwaddr addr,
|
|
|
|
unsigned size)
|
|
|
|
{
|
|
|
|
AllwinnerAHCIState *a = opaque;
|
|
|
|
uint64_t val = a->regs[addr/4];
|
|
|
|
|
|
|
|
switch (addr / 4) {
|
|
|
|
case ALLWINNER_AHCI_PHYCS0R:
|
|
|
|
val |= 0x2 << 28;
|
|
|
|
break;
|
|
|
|
case ALLWINNER_AHCI_PHYCS2R:
|
|
|
|
val &= ~(0x1 << 24);
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
DPRINTF(-1, "addr=0x%" HWADDR_PRIx " val=0x%" PRIx64 ", size=%d\n",
|
|
|
|
addr, val, size);
|
|
|
|
return val;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void allwinner_ahci_mem_write(void *opaque, hwaddr addr,
|
|
|
|
uint64_t val, unsigned size)
|
|
|
|
{
|
|
|
|
AllwinnerAHCIState *a = opaque;
|
|
|
|
|
|
|
|
DPRINTF(-1, "addr=0x%" HWADDR_PRIx " val=0x%" PRIx64 ", size=%d\n",
|
|
|
|
addr, val, size);
|
|
|
|
a->regs[addr/4] = val;
|
|
|
|
}
|
|
|
|
|
|
|
|
static const MemoryRegionOps allwinner_ahci_mem_ops = {
|
|
|
|
.read = allwinner_ahci_mem_read,
|
|
|
|
.write = allwinner_ahci_mem_write,
|
|
|
|
.valid.min_access_size = 4,
|
|
|
|
.valid.max_access_size = 4,
|
|
|
|
.endianness = DEVICE_LITTLE_ENDIAN,
|
|
|
|
};
|
|
|
|
|
|
|
|
static void allwinner_ahci_init(Object *obj)
|
|
|
|
{
|
|
|
|
SysbusAHCIState *s = SYSBUS_AHCI(obj);
|
|
|
|
AllwinnerAHCIState *a = ALLWINNER_AHCI(obj);
|
|
|
|
|
|
|
|
memory_region_init_io(&a->mmio, OBJECT(obj), &allwinner_ahci_mem_ops, a,
|
|
|
|
"allwinner-ahci", ALLWINNER_AHCI_MMIO_SIZE);
|
|
|
|
memory_region_add_subregion(&s->ahci.mem, ALLWINNER_AHCI_MMIO_OFF,
|
|
|
|
&a->mmio);
|
|
|
|
}
|
|
|
|
|
|
|
|
static const VMStateDescription vmstate_allwinner_ahci = {
|
|
|
|
.name = "allwinner-ahci",
|
|
|
|
.version_id = 1,
|
|
|
|
.minimum_version_id = 1,
|
|
|
|
.fields = (VMStateField[]) {
|
|
|
|
VMSTATE_UINT32_ARRAY(regs, AllwinnerAHCIState,
|
|
|
|
ALLWINNER_AHCI_MMIO_SIZE/4),
|
|
|
|
VMSTATE_END_OF_LIST()
|
|
|
|
}
|
|
|
|
};
|
|
|
|
|
|
|
|
static void allwinner_ahci_class_init(ObjectClass *klass, void *data)
|
|
|
|
{
|
|
|
|
DeviceClass *dc = DEVICE_CLASS(klass);
|
|
|
|
|
|
|
|
dc->vmsd = &vmstate_allwinner_ahci;
|
|
|
|
}
|
|
|
|
|
|
|
|
static const TypeInfo allwinner_ahci_info = {
|
|
|
|
.name = TYPE_ALLWINNER_AHCI,
|
|
|
|
.parent = TYPE_SYSBUS_AHCI,
|
|
|
|
.instance_size = sizeof(AllwinnerAHCIState),
|
|
|
|
.instance_init = allwinner_ahci_init,
|
|
|
|
.class_init = allwinner_ahci_class_init,
|
|
|
|
};
|
|
|
|
|
2012-02-09 14:20:55 +00:00
|
|
|
static void sysbus_ahci_register_types(void)
|
2012-01-26 11:43:47 +00:00
|
|
|
{
|
2011-12-08 03:34:16 +00:00
|
|
|
type_register_static(&sysbus_ahci_info);
|
2015-11-06 19:09:01 +00:00
|
|
|
type_register_static(&allwinner_ahci_info);
|
2012-01-26 11:43:47 +00:00
|
|
|
}
|
|
|
|
|
2012-02-09 14:20:55 +00:00
|
|
|
type_init(sysbus_ahci_register_types)
|
2014-10-01 18:19:29 +00:00
|
|
|
|
|
|
|
void ahci_ide_create_devs(PCIDevice *dev, DriveInfo **hd)
|
|
|
|
{
|
|
|
|
AHCIPCIState *d = ICH_AHCI(dev);
|
|
|
|
AHCIState *ahci = &d->ahci;
|
|
|
|
int i;
|
|
|
|
|
|
|
|
for (i = 0; i < ahci->ports; i++) {
|
|
|
|
if (hd[i] == NULL) {
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
ide_create_drive(&ahci->dev[i].port, 0, hd[i]);
|
|
|
|
}
|
|
|
|
|
|
|
|
}
|