linux/drivers/ata/ahci.c

2798 lines
77 KiB
C
Raw Normal View History

/*
* ahci.c - AHCI SATA support
*
* Maintained by: Jeff Garzik <jgarzik@pobox.com>
* Please ALWAYS copy linux-ide@vger.kernel.org
* on emails.
*
* Copyright 2004-2005 Red Hat, Inc.
*
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2, or (at your option)
* any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; see the file COPYING. If not, write to
* the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
*
*
* libata documentation is available via 'make {ps|pdf}docs',
* as Documentation/DocBook/libata.*
*
* AHCI hardware documentation:
* http://www.intel.com/technology/serialata/pdf/rev1_0.pdf
* http://www.intel.com/technology/serialata/pdf/rev1_1.pdf
*
*/
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/pci.h>
#include <linux/init.h>
#include <linux/blkdev.h>
#include <linux/delay.h>
#include <linux/interrupt.h>
#include <linux/dma-mapping.h>
#include <linux/device.h>
#include <linux/dmi.h>
#include <scsi/scsi_host.h>
#include <scsi/scsi_cmnd.h>
#include <linux/libata.h>
#define DRV_NAME "ahci"
#define DRV_VERSION "3.0"
/* Enclosure Management Control */
#define EM_CTRL_MSG_TYPE 0x000f0000
/* Enclosure Management LED Message Type */
#define EM_MSG_LED_HBA_PORT 0x0000000f
#define EM_MSG_LED_PMP_SLOT 0x0000ff00
#define EM_MSG_LED_VALUE 0xffff0000
#define EM_MSG_LED_VALUE_ACTIVITY 0x00070000
#define EM_MSG_LED_VALUE_OFF 0xfff80000
#define EM_MSG_LED_VALUE_ON 0x00010000
static int ahci_skip_host_reset;
static int ahci_ignore_sss;
module_param_named(skip_host_reset, ahci_skip_host_reset, int, 0444);
MODULE_PARM_DESC(skip_host_reset, "skip global host reset (0=don't skip, 1=skip)");
module_param_named(ignore_sss, ahci_ignore_sss, int, 0444);
MODULE_PARM_DESC(ignore_sss, "Ignore staggered spinup flag (0=don't ignore, 1=ignore)");
static int ahci_enable_alpm(struct ata_port *ap,
enum link_pm policy);
static void ahci_disable_alpm(struct ata_port *ap);
static ssize_t ahci_led_show(struct ata_port *ap, char *buf);
static ssize_t ahci_led_store(struct ata_port *ap, const char *buf,
size_t size);
static ssize_t ahci_transmit_led_message(struct ata_port *ap, u32 state,
ssize_t size);
#define MAX_SLOTS 8
#define MAX_RETRY 15
enum {
AHCI_PCI_BAR = 5,
AHCI_MAX_PORTS = 32,
AHCI_MAX_SG = 168, /* hardware max is 64K */
AHCI_DMA_BOUNDARY = 0xffffffff,
AHCI_MAX_CMDS = 32,
AHCI_CMD_SZ = 32,
AHCI_CMD_SLOT_SZ = AHCI_MAX_CMDS * AHCI_CMD_SZ,
AHCI_RX_FIS_SZ = 256,
AHCI_CMD_TBL_CDB = 0x40,
AHCI_CMD_TBL_HDR_SZ = 0x80,
AHCI_CMD_TBL_SZ = AHCI_CMD_TBL_HDR_SZ + (AHCI_MAX_SG * 16),
AHCI_CMD_TBL_AR_SZ = AHCI_CMD_TBL_SZ * AHCI_MAX_CMDS,
AHCI_PORT_PRIV_DMA_SZ = AHCI_CMD_SLOT_SZ + AHCI_CMD_TBL_AR_SZ +
AHCI_RX_FIS_SZ,
AHCI_IRQ_ON_SG = (1 << 31),
AHCI_CMD_ATAPI = (1 << 5),
AHCI_CMD_WRITE = (1 << 6),
AHCI_CMD_PREFETCH = (1 << 7),
AHCI_CMD_RESET = (1 << 8),
AHCI_CMD_CLR_BUSY = (1 << 10),
RX_FIS_D2H_REG = 0x40, /* offset of D2H Register FIS data */
RX_FIS_SDB = 0x58, /* offset of SDB FIS data */
RX_FIS_UNK = 0x60, /* offset of Unknown FIS data */
board_ahci = 0,
board_ahci_vt8251 = 1,
board_ahci_ign_iferr = 2,
board_ahci_sb600 = 3,
board_ahci_mv = 4,
board_ahci_sb700 = 5, /* for SB700 and SB800 */
board_ahci_mcp65 = 6,
board_ahci_nopmp = 7,
board_ahci_yesncq = 8,
/* global controller registers */
HOST_CAP = 0x00, /* host capabilities */
HOST_CTL = 0x04, /* global host control */
HOST_IRQ_STAT = 0x08, /* interrupt status */
HOST_PORTS_IMPL = 0x0c, /* bitmap of implemented ports */
HOST_VERSION = 0x10, /* AHCI spec. version compliancy */
HOST_EM_LOC = 0x1c, /* Enclosure Management location */
HOST_EM_CTL = 0x20, /* Enclosure Management Control */
/* HOST_CTL bits */
HOST_RESET = (1 << 0), /* reset controller; self-clear */
HOST_IRQ_EN = (1 << 1), /* global IRQ enable */
HOST_AHCI_EN = (1 << 31), /* AHCI enabled */
/* HOST_CAP bits */
HOST_CAP_EMS = (1 << 6), /* Enclosure Management support */
HOST_CAP_SSC = (1 << 14), /* Slumber capable */
HOST_CAP_PMP = (1 << 17), /* Port Multiplier support */
HOST_CAP_CLO = (1 << 24), /* Command List Override support */
HOST_CAP_ALPM = (1 << 26), /* Aggressive Link PM support */
HOST_CAP_SSS = (1 << 27), /* Staggered Spin-up */
HOST_CAP_SNTF = (1 << 29), /* SNotification register */
HOST_CAP_NCQ = (1 << 30), /* Native Command Queueing */
HOST_CAP_64 = (1 << 31), /* PCI DAC (64-bit DMA) support */
/* registers for each SATA port */
PORT_LST_ADDR = 0x00, /* command list DMA addr */
PORT_LST_ADDR_HI = 0x04, /* command list DMA addr hi */
PORT_FIS_ADDR = 0x08, /* FIS rx buf addr */
PORT_FIS_ADDR_HI = 0x0c, /* FIS rx buf addr hi */
PORT_IRQ_STAT = 0x10, /* interrupt status */
PORT_IRQ_MASK = 0x14, /* interrupt enable/disable mask */
PORT_CMD = 0x18, /* port command */
PORT_TFDATA = 0x20, /* taskfile data */
PORT_SIG = 0x24, /* device TF signature */
PORT_CMD_ISSUE = 0x38, /* command issue */
PORT_SCR_STAT = 0x28, /* SATA phy register: SStatus */
PORT_SCR_CTL = 0x2c, /* SATA phy register: SControl */
PORT_SCR_ERR = 0x30, /* SATA phy register: SError */
PORT_SCR_ACT = 0x34, /* SATA phy register: SActive */
PORT_SCR_NTF = 0x3c, /* SATA phy register: SNotification */
/* PORT_IRQ_{STAT,MASK} bits */
PORT_IRQ_COLD_PRES = (1 << 31), /* cold presence detect */
PORT_IRQ_TF_ERR = (1 << 30), /* task file error */
PORT_IRQ_HBUS_ERR = (1 << 29), /* host bus fatal error */
PORT_IRQ_HBUS_DATA_ERR = (1 << 28), /* host bus data error */
PORT_IRQ_IF_ERR = (1 << 27), /* interface fatal error */
PORT_IRQ_IF_NONFATAL = (1 << 26), /* interface non-fatal error */
PORT_IRQ_OVERFLOW = (1 << 24), /* xfer exhausted available S/G */
PORT_IRQ_BAD_PMP = (1 << 23), /* incorrect port multiplier */
PORT_IRQ_PHYRDY = (1 << 22), /* PhyRdy changed */
PORT_IRQ_DEV_ILCK = (1 << 7), /* device interlock */
PORT_IRQ_CONNECT = (1 << 6), /* port connect change status */
PORT_IRQ_SG_DONE = (1 << 5), /* descriptor processed */
PORT_IRQ_UNK_FIS = (1 << 4), /* unknown FIS rx'd */
PORT_IRQ_SDB_FIS = (1 << 3), /* Set Device Bits FIS rx'd */
PORT_IRQ_DMAS_FIS = (1 << 2), /* DMA Setup FIS rx'd */
PORT_IRQ_PIOS_FIS = (1 << 1), /* PIO Setup FIS rx'd */
PORT_IRQ_D2H_REG_FIS = (1 << 0), /* D2H Register FIS rx'd */
PORT_IRQ_FREEZE = PORT_IRQ_HBUS_ERR |
PORT_IRQ_IF_ERR |
PORT_IRQ_CONNECT |
PORT_IRQ_PHYRDY |
PORT_IRQ_UNK_FIS |
PORT_IRQ_BAD_PMP,
PORT_IRQ_ERROR = PORT_IRQ_FREEZE |
PORT_IRQ_TF_ERR |
PORT_IRQ_HBUS_DATA_ERR,
DEF_PORT_IRQ = PORT_IRQ_ERROR | PORT_IRQ_SG_DONE |
PORT_IRQ_SDB_FIS | PORT_IRQ_DMAS_FIS |
PORT_IRQ_PIOS_FIS | PORT_IRQ_D2H_REG_FIS,
/* PORT_CMD bits */
PORT_CMD_ASP = (1 << 27), /* Aggressive Slumber/Partial */
PORT_CMD_ALPE = (1 << 26), /* Aggressive Link PM enable */
PORT_CMD_ATAPI = (1 << 24), /* Device is ATAPI */
PORT_CMD_PMP = (1 << 17), /* PMP attached */
PORT_CMD_LIST_ON = (1 << 15), /* cmd list DMA engine running */
PORT_CMD_FIS_ON = (1 << 14), /* FIS DMA engine running */
PORT_CMD_FIS_RX = (1 << 4), /* Enable FIS receive DMA engine */
PORT_CMD_CLO = (1 << 3), /* Command list override */
PORT_CMD_POWER_ON = (1 << 2), /* Power up device */
PORT_CMD_SPIN_UP = (1 << 1), /* Spin up device */
PORT_CMD_START = (1 << 0), /* Enable port DMA engine */
PORT_CMD_ICC_MASK = (0xf << 28), /* i/f ICC state mask */
PORT_CMD_ICC_ACTIVE = (0x1 << 28), /* Put i/f in active state */
PORT_CMD_ICC_PARTIAL = (0x2 << 28), /* Put i/f in partial state */
PORT_CMD_ICC_SLUMBER = (0x6 << 28), /* Put i/f in slumber state */
/* hpriv->flags bits */
AHCI_HFLAG_NO_NCQ = (1 << 0),
AHCI_HFLAG_IGN_IRQ_IF_ERR = (1 << 1), /* ignore IRQ_IF_ERR */
AHCI_HFLAG_IGN_SERR_INTERNAL = (1 << 2), /* ignore SERR_INTERNAL */
AHCI_HFLAG_32BIT_ONLY = (1 << 3), /* force 32bit */
AHCI_HFLAG_MV_PATA = (1 << 4), /* PATA port */
AHCI_HFLAG_NO_MSI = (1 << 5), /* no PCI MSI */
AHCI_HFLAG_NO_PMP = (1 << 6), /* no PMP */
AHCI_HFLAG_NO_HOTPLUG = (1 << 7), /* ignore PxSERR.DIAG.N */
AHCI_HFLAG_SECT255 = (1 << 8), /* max 255 sectors */
AHCI_HFLAG_YES_NCQ = (1 << 9), /* force NCQ cap on */
/* ap->flags bits */
AHCI_FLAG_COMMON = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY |
ATA_FLAG_MMIO | ATA_FLAG_PIO_DMA |
ATA_FLAG_ACPI_SATA | ATA_FLAG_AN |
ATA_FLAG_IPM,
ICH_MAP = 0x90, /* ICH MAP register */
/* em_ctl bits */
EM_CTL_RST = (1 << 9), /* Reset */
EM_CTL_TM = (1 << 8), /* Transmit Message */
EM_CTL_ALHD = (1 << 26), /* Activity LED */
};
struct ahci_cmd_hdr {
__le32 opts;
__le32 status;
__le32 tbl_addr;
__le32 tbl_addr_hi;
__le32 reserved[4];
};
struct ahci_sg {
__le32 addr;
__le32 addr_hi;
__le32 reserved;
__le32 flags_size;
};
struct ahci_em_priv {
enum sw_activity blink_policy;
struct timer_list timer;
unsigned long saved_activity;
unsigned long activity;
unsigned long led_state;
};
struct ahci_host_priv {
unsigned int flags; /* AHCI_HFLAG_* */
u32 cap; /* cap to use */
u32 port_map; /* port map to use */
u32 saved_cap; /* saved initial cap */
u32 saved_port_map; /* saved initial port_map */
u32 em_loc; /* enclosure management location */
};
struct ahci_port_priv {
struct ata_link *active_link;
struct ahci_cmd_hdr *cmd_slot;
dma_addr_t cmd_slot_dma;
void *cmd_tbl;
dma_addr_t cmd_tbl_dma;
void *rx_fis;
dma_addr_t rx_fis_dma;
/* for NCQ spurious interrupt analysis */
unsigned int ncq_saw_d2h:1;
unsigned int ncq_saw_dmas:1;
unsigned int ncq_saw_sdb:1;
u32 intr_mask; /* interrupts to enable */
struct ahci_em_priv em_priv[MAX_SLOTS];/* enclosure management info
* per PM slot */
};
static int ahci_scr_read(struct ata_link *link, unsigned int sc_reg, u32 *val);
static int ahci_scr_write(struct ata_link *link, unsigned int sc_reg, u32 val);
static int ahci_init_one(struct pci_dev *pdev, const struct pci_device_id *ent);
static unsigned int ahci_qc_issue(struct ata_queued_cmd *qc);
static bool ahci_qc_fill_rtf(struct ata_queued_cmd *qc);
static int ahci_port_start(struct ata_port *ap);
static void ahci_port_stop(struct ata_port *ap);
static void ahci_qc_prep(struct ata_queued_cmd *qc);
static void ahci_freeze(struct ata_port *ap);
static void ahci_thaw(struct ata_port *ap);
static void ahci_pmp_attach(struct ata_port *ap);
static void ahci_pmp_detach(struct ata_port *ap);
libata: make reset related methods proper port operations Currently reset methods are not specified directly in the ata_port_operations table. If a LLD wants to use custom reset methods, it should construct and use a error_handler which uses those reset methods. It's done this way for two reasons. First, the ops table already contained too many methods and adding four more of them would noticeably increase the amount of necessary boilerplate code all over low level drivers. Second, as ->error_handler uses those reset methods, it can get confusing. ie. By overriding ->error_handler, those reset ops can be made useless making layering a bit hazy. Now that ops table uses inheritance, the first problem doesn't exist anymore. The second isn't completely solved but is relieved by providing default values - most drivers can just override what it has implemented and don't have to concern itself about higher level callbacks. In fact, there currently is no driver which actually modifies error handling behavior. Drivers which override ->error_handler just wraps the standard error handler only to prepare the controller for EH. I don't think making ops layering strict has any noticeable benefit. This patch makes ->prereset, ->softreset, ->hardreset, ->postreset and their PMP counterparts propoer ops. Default ops are provided in the base ops tables and drivers are converted to override individual reset methods instead of creating custom error_handler. * ata_std_error_handler() doesn't use sata_std_hardreset() if SCRs aren't accessible. sata_promise doesn't need to use separate error_handlers for PATA and SATA anymore. * softreset is broken for sata_inic162x and sata_sx4. As libata now always prefers hardreset, this doesn't really matter but the ops are forced to NULL using ATA_OP_NULL for documentation purpose. * pata_hpt374 needs to use different prereset for the first and second PCI functions. This used to be done by branching from hpt374_error_handler(). The proper way to do this is to use separate ops and port_info tables for each function. Converted. Signed-off-by: Tejun Heo <htejun@gmail.com>
2008-03-25 03:22:50 +00:00
static int ahci_softreset(struct ata_link *link, unsigned int *class,
unsigned long deadline);
static int ahci_sb600_softreset(struct ata_link *link, unsigned int *class,
unsigned long deadline);
libata: make reset related methods proper port operations Currently reset methods are not specified directly in the ata_port_operations table. If a LLD wants to use custom reset methods, it should construct and use a error_handler which uses those reset methods. It's done this way for two reasons. First, the ops table already contained too many methods and adding four more of them would noticeably increase the amount of necessary boilerplate code all over low level drivers. Second, as ->error_handler uses those reset methods, it can get confusing. ie. By overriding ->error_handler, those reset ops can be made useless making layering a bit hazy. Now that ops table uses inheritance, the first problem doesn't exist anymore. The second isn't completely solved but is relieved by providing default values - most drivers can just override what it has implemented and don't have to concern itself about higher level callbacks. In fact, there currently is no driver which actually modifies error handling behavior. Drivers which override ->error_handler just wraps the standard error handler only to prepare the controller for EH. I don't think making ops layering strict has any noticeable benefit. This patch makes ->prereset, ->softreset, ->hardreset, ->postreset and their PMP counterparts propoer ops. Default ops are provided in the base ops tables and drivers are converted to override individual reset methods instead of creating custom error_handler. * ata_std_error_handler() doesn't use sata_std_hardreset() if SCRs aren't accessible. sata_promise doesn't need to use separate error_handlers for PATA and SATA anymore. * softreset is broken for sata_inic162x and sata_sx4. As libata now always prefers hardreset, this doesn't really matter but the ops are forced to NULL using ATA_OP_NULL for documentation purpose. * pata_hpt374 needs to use different prereset for the first and second PCI functions. This used to be done by branching from hpt374_error_handler(). The proper way to do this is to use separate ops and port_info tables for each function. Converted. Signed-off-by: Tejun Heo <htejun@gmail.com>
2008-03-25 03:22:50 +00:00
static int ahci_hardreset(struct ata_link *link, unsigned int *class,
unsigned long deadline);
static int ahci_vt8251_hardreset(struct ata_link *link, unsigned int *class,
unsigned long deadline);
static int ahci_p5wdh_hardreset(struct ata_link *link, unsigned int *class,
unsigned long deadline);
static void ahci_postreset(struct ata_link *link, unsigned int *class);
static void ahci_error_handler(struct ata_port *ap);
static void ahci_post_internal_cmd(struct ata_queued_cmd *qc);
static int ahci_port_resume(struct ata_port *ap);
static void ahci_dev_config(struct ata_device *dev);
static unsigned int ahci_fill_sg(struct ata_queued_cmd *qc, void *cmd_tbl);
static void ahci_fill_cmd_slot(struct ahci_port_priv *pp, unsigned int tag,
u32 opts);
#ifdef CONFIG_PM
static int ahci_port_suspend(struct ata_port *ap, pm_message_t mesg);
static int ahci_pci_device_suspend(struct pci_dev *pdev, pm_message_t mesg);
static int ahci_pci_device_resume(struct pci_dev *pdev);
#endif
static ssize_t ahci_activity_show(struct ata_device *dev, char *buf);
static ssize_t ahci_activity_store(struct ata_device *dev,
enum sw_activity val);
static void ahci_init_sw_activity(struct ata_link *link);
static struct device_attribute *ahci_shost_attrs[] = {
&dev_attr_link_power_management_policy,
&dev_attr_em_message_type,
&dev_attr_em_message,
NULL
};
static struct device_attribute *ahci_sdev_attrs[] = {
&dev_attr_sw_activity,
&dev_attr_unload_heads,
NULL
};
static struct scsi_host_template ahci_sht = {
ATA_NCQ_SHT(DRV_NAME),
.can_queue = AHCI_MAX_CMDS - 1,
.sg_tablesize = AHCI_MAX_SG,
.dma_boundary = AHCI_DMA_BOUNDARY,
.shost_attrs = ahci_shost_attrs,
.sdev_attrs = ahci_sdev_attrs,
};
libata: implement and use ops inheritance libata lets low level drivers build ata_port_operations table and register it with libata core layer. This allows low level drivers high level of flexibility but also burdens them with lots of boilerplate entries. This becomes worse for drivers which support related similar controllers which differ slightly. They share most of the operations except for a few. However, the driver still needs to list all operations for each variant. This results in large number of duplicate entries, which is not only inefficient but also error-prone as it becomes very difficult to tell what the actual differences are. This duplicate boilerplates all over the low level drivers also make updating the core layer exteremely difficult and error-prone. When compounded with multi-branched development model, it ends up accumulating inconsistencies over time. Some of those inconsistencies cause immediate problems and fixed. Others just remain there dormant making maintenance increasingly difficult. To rectify the problem, this patch implements ata_port_operations inheritance. To allow LLDs to easily re-use their own ops tables overriding only specific methods, this patch implements poor man's class inheritance. An ops table has ->inherits field which can be set to any ops table as long as it doesn't create a loop. When the host is started, the inheritance chain is followed and any operation which isn't specified is taken from the nearest ancestor which has it specified. This operation is called finalization and done only once per an ops table and the LLD doesn't have to do anything special about it other than making the ops table non-const such that libata can update it. libata provides four base ops tables lower drivers can inherit from - base, sata, pmp, sff and bmdma. To avoid overriding these ops accidentaly, these ops are declared const and LLDs should always inherit these instead of using them directly. After finalization, all the ops table are identical before and after the patch except for setting .irq_handler to ata_interrupt in drivers which didn't use to. The .irq_handler doesn't have any actual effect and the field will soon be removed by later patch. * sata_sx4 is still using old style EH and currently doesn't take advantage of ops inheritance. Signed-off-by: Tejun Heo <htejun@gmail.com>
2008-03-25 03:22:49 +00:00
static struct ata_port_operations ahci_ops = {
.inherits = &sata_pmp_port_ops,
.qc_defer = sata_pmp_qc_defer_cmd_switch,
.qc_prep = ahci_qc_prep,
.qc_issue = ahci_qc_issue,
.qc_fill_rtf = ahci_qc_fill_rtf,
.freeze = ahci_freeze,
.thaw = ahci_thaw,
libata: make reset related methods proper port operations Currently reset methods are not specified directly in the ata_port_operations table. If a LLD wants to use custom reset methods, it should construct and use a error_handler which uses those reset methods. It's done this way for two reasons. First, the ops table already contained too many methods and adding four more of them would noticeably increase the amount of necessary boilerplate code all over low level drivers. Second, as ->error_handler uses those reset methods, it can get confusing. ie. By overriding ->error_handler, those reset ops can be made useless making layering a bit hazy. Now that ops table uses inheritance, the first problem doesn't exist anymore. The second isn't completely solved but is relieved by providing default values - most drivers can just override what it has implemented and don't have to concern itself about higher level callbacks. In fact, there currently is no driver which actually modifies error handling behavior. Drivers which override ->error_handler just wraps the standard error handler only to prepare the controller for EH. I don't think making ops layering strict has any noticeable benefit. This patch makes ->prereset, ->softreset, ->hardreset, ->postreset and their PMP counterparts propoer ops. Default ops are provided in the base ops tables and drivers are converted to override individual reset methods instead of creating custom error_handler. * ata_std_error_handler() doesn't use sata_std_hardreset() if SCRs aren't accessible. sata_promise doesn't need to use separate error_handlers for PATA and SATA anymore. * softreset is broken for sata_inic162x and sata_sx4. As libata now always prefers hardreset, this doesn't really matter but the ops are forced to NULL using ATA_OP_NULL for documentation purpose. * pata_hpt374 needs to use different prereset for the first and second PCI functions. This used to be done by branching from hpt374_error_handler(). The proper way to do this is to use separate ops and port_info tables for each function. Converted. Signed-off-by: Tejun Heo <htejun@gmail.com>
2008-03-25 03:22:50 +00:00
.softreset = ahci_softreset,
.hardreset = ahci_hardreset,
.postreset = ahci_postreset,
.pmp_softreset = ahci_softreset,
.error_handler = ahci_error_handler,
.post_internal_cmd = ahci_post_internal_cmd,
.dev_config = ahci_dev_config,
.scr_read = ahci_scr_read,
.scr_write = ahci_scr_write,
.pmp_attach = ahci_pmp_attach,
.pmp_detach = ahci_pmp_detach,
libata: implement and use ops inheritance libata lets low level drivers build ata_port_operations table and register it with libata core layer. This allows low level drivers high level of flexibility but also burdens them with lots of boilerplate entries. This becomes worse for drivers which support related similar controllers which differ slightly. They share most of the operations except for a few. However, the driver still needs to list all operations for each variant. This results in large number of duplicate entries, which is not only inefficient but also error-prone as it becomes very difficult to tell what the actual differences are. This duplicate boilerplates all over the low level drivers also make updating the core layer exteremely difficult and error-prone. When compounded with multi-branched development model, it ends up accumulating inconsistencies over time. Some of those inconsistencies cause immediate problems and fixed. Others just remain there dormant making maintenance increasingly difficult. To rectify the problem, this patch implements ata_port_operations inheritance. To allow LLDs to easily re-use their own ops tables overriding only specific methods, this patch implements poor man's class inheritance. An ops table has ->inherits field which can be set to any ops table as long as it doesn't create a loop. When the host is started, the inheritance chain is followed and any operation which isn't specified is taken from the nearest ancestor which has it specified. This operation is called finalization and done only once per an ops table and the LLD doesn't have to do anything special about it other than making the ops table non-const such that libata can update it. libata provides four base ops tables lower drivers can inherit from - base, sata, pmp, sff and bmdma. To avoid overriding these ops accidentaly, these ops are declared const and LLDs should always inherit these instead of using them directly. After finalization, all the ops table are identical before and after the patch except for setting .irq_handler to ata_interrupt in drivers which didn't use to. The .irq_handler doesn't have any actual effect and the field will soon be removed by later patch. * sata_sx4 is still using old style EH and currently doesn't take advantage of ops inheritance. Signed-off-by: Tejun Heo <htejun@gmail.com>
2008-03-25 03:22:49 +00:00
.enable_pm = ahci_enable_alpm,
.disable_pm = ahci_disable_alpm,
.em_show = ahci_led_show,
.em_store = ahci_led_store,
.sw_activity_show = ahci_activity_show,
.sw_activity_store = ahci_activity_store,
#ifdef CONFIG_PM
.port_suspend = ahci_port_suspend,
.port_resume = ahci_port_resume,
#endif
.port_start = ahci_port_start,
.port_stop = ahci_port_stop,
};
libata: implement and use ops inheritance libata lets low level drivers build ata_port_operations table and register it with libata core layer. This allows low level drivers high level of flexibility but also burdens them with lots of boilerplate entries. This becomes worse for drivers which support related similar controllers which differ slightly. They share most of the operations except for a few. However, the driver still needs to list all operations for each variant. This results in large number of duplicate entries, which is not only inefficient but also error-prone as it becomes very difficult to tell what the actual differences are. This duplicate boilerplates all over the low level drivers also make updating the core layer exteremely difficult and error-prone. When compounded with multi-branched development model, it ends up accumulating inconsistencies over time. Some of those inconsistencies cause immediate problems and fixed. Others just remain there dormant making maintenance increasingly difficult. To rectify the problem, this patch implements ata_port_operations inheritance. To allow LLDs to easily re-use their own ops tables overriding only specific methods, this patch implements poor man's class inheritance. An ops table has ->inherits field which can be set to any ops table as long as it doesn't create a loop. When the host is started, the inheritance chain is followed and any operation which isn't specified is taken from the nearest ancestor which has it specified. This operation is called finalization and done only once per an ops table and the LLD doesn't have to do anything special about it other than making the ops table non-const such that libata can update it. libata provides four base ops tables lower drivers can inherit from - base, sata, pmp, sff and bmdma. To avoid overriding these ops accidentaly, these ops are declared const and LLDs should always inherit these instead of using them directly. After finalization, all the ops table are identical before and after the patch except for setting .irq_handler to ata_interrupt in drivers which didn't use to. The .irq_handler doesn't have any actual effect and the field will soon be removed by later patch. * sata_sx4 is still using old style EH and currently doesn't take advantage of ops inheritance. Signed-off-by: Tejun Heo <htejun@gmail.com>
2008-03-25 03:22:49 +00:00
static struct ata_port_operations ahci_vt8251_ops = {
.inherits = &ahci_ops,
libata: make reset related methods proper port operations Currently reset methods are not specified directly in the ata_port_operations table. If a LLD wants to use custom reset methods, it should construct and use a error_handler which uses those reset methods. It's done this way for two reasons. First, the ops table already contained too many methods and adding four more of them would noticeably increase the amount of necessary boilerplate code all over low level drivers. Second, as ->error_handler uses those reset methods, it can get confusing. ie. By overriding ->error_handler, those reset ops can be made useless making layering a bit hazy. Now that ops table uses inheritance, the first problem doesn't exist anymore. The second isn't completely solved but is relieved by providing default values - most drivers can just override what it has implemented and don't have to concern itself about higher level callbacks. In fact, there currently is no driver which actually modifies error handling behavior. Drivers which override ->error_handler just wraps the standard error handler only to prepare the controller for EH. I don't think making ops layering strict has any noticeable benefit. This patch makes ->prereset, ->softreset, ->hardreset, ->postreset and their PMP counterparts propoer ops. Default ops are provided in the base ops tables and drivers are converted to override individual reset methods instead of creating custom error_handler. * ata_std_error_handler() doesn't use sata_std_hardreset() if SCRs aren't accessible. sata_promise doesn't need to use separate error_handlers for PATA and SATA anymore. * softreset is broken for sata_inic162x and sata_sx4. As libata now always prefers hardreset, this doesn't really matter but the ops are forced to NULL using ATA_OP_NULL for documentation purpose. * pata_hpt374 needs to use different prereset for the first and second PCI functions. This used to be done by branching from hpt374_error_handler(). The proper way to do this is to use separate ops and port_info tables for each function. Converted. Signed-off-by: Tejun Heo <htejun@gmail.com>
2008-03-25 03:22:50 +00:00
.hardreset = ahci_vt8251_hardreset,
libata: implement and use ops inheritance libata lets low level drivers build ata_port_operations table and register it with libata core layer. This allows low level drivers high level of flexibility but also burdens them with lots of boilerplate entries. This becomes worse for drivers which support related similar controllers which differ slightly. They share most of the operations except for a few. However, the driver still needs to list all operations for each variant. This results in large number of duplicate entries, which is not only inefficient but also error-prone as it becomes very difficult to tell what the actual differences are. This duplicate boilerplates all over the low level drivers also make updating the core layer exteremely difficult and error-prone. When compounded with multi-branched development model, it ends up accumulating inconsistencies over time. Some of those inconsistencies cause immediate problems and fixed. Others just remain there dormant making maintenance increasingly difficult. To rectify the problem, this patch implements ata_port_operations inheritance. To allow LLDs to easily re-use their own ops tables overriding only specific methods, this patch implements poor man's class inheritance. An ops table has ->inherits field which can be set to any ops table as long as it doesn't create a loop. When the host is started, the inheritance chain is followed and any operation which isn't specified is taken from the nearest ancestor which has it specified. This operation is called finalization and done only once per an ops table and the LLD doesn't have to do anything special about it other than making the ops table non-const such that libata can update it. libata provides four base ops tables lower drivers can inherit from - base, sata, pmp, sff and bmdma. To avoid overriding these ops accidentaly, these ops are declared const and LLDs should always inherit these instead of using them directly. After finalization, all the ops table are identical before and after the patch except for setting .irq_handler to ata_interrupt in drivers which didn't use to. The .irq_handler doesn't have any actual effect and the field will soon be removed by later patch. * sata_sx4 is still using old style EH and currently doesn't take advantage of ops inheritance. Signed-off-by: Tejun Heo <htejun@gmail.com>
2008-03-25 03:22:49 +00:00
};
libata: implement and use ops inheritance libata lets low level drivers build ata_port_operations table and register it with libata core layer. This allows low level drivers high level of flexibility but also burdens them with lots of boilerplate entries. This becomes worse for drivers which support related similar controllers which differ slightly. They share most of the operations except for a few. However, the driver still needs to list all operations for each variant. This results in large number of duplicate entries, which is not only inefficient but also error-prone as it becomes very difficult to tell what the actual differences are. This duplicate boilerplates all over the low level drivers also make updating the core layer exteremely difficult and error-prone. When compounded with multi-branched development model, it ends up accumulating inconsistencies over time. Some of those inconsistencies cause immediate problems and fixed. Others just remain there dormant making maintenance increasingly difficult. To rectify the problem, this patch implements ata_port_operations inheritance. To allow LLDs to easily re-use their own ops tables overriding only specific methods, this patch implements poor man's class inheritance. An ops table has ->inherits field which can be set to any ops table as long as it doesn't create a loop. When the host is started, the inheritance chain is followed and any operation which isn't specified is taken from the nearest ancestor which has it specified. This operation is called finalization and done only once per an ops table and the LLD doesn't have to do anything special about it other than making the ops table non-const such that libata can update it. libata provides four base ops tables lower drivers can inherit from - base, sata, pmp, sff and bmdma. To avoid overriding these ops accidentaly, these ops are declared const and LLDs should always inherit these instead of using them directly. After finalization, all the ops table are identical before and after the patch except for setting .irq_handler to ata_interrupt in drivers which didn't use to. The .irq_handler doesn't have any actual effect and the field will soon be removed by later patch. * sata_sx4 is still using old style EH and currently doesn't take advantage of ops inheritance. Signed-off-by: Tejun Heo <htejun@gmail.com>
2008-03-25 03:22:49 +00:00
static struct ata_port_operations ahci_p5wdh_ops = {
.inherits = &ahci_ops,
libata: make reset related methods proper port operations Currently reset methods are not specified directly in the ata_port_operations table. If a LLD wants to use custom reset methods, it should construct and use a error_handler which uses those reset methods. It's done this way for two reasons. First, the ops table already contained too many methods and adding four more of them would noticeably increase the amount of necessary boilerplate code all over low level drivers. Second, as ->error_handler uses those reset methods, it can get confusing. ie. By overriding ->error_handler, those reset ops can be made useless making layering a bit hazy. Now that ops table uses inheritance, the first problem doesn't exist anymore. The second isn't completely solved but is relieved by providing default values - most drivers can just override what it has implemented and don't have to concern itself about higher level callbacks. In fact, there currently is no driver which actually modifies error handling behavior. Drivers which override ->error_handler just wraps the standard error handler only to prepare the controller for EH. I don't think making ops layering strict has any noticeable benefit. This patch makes ->prereset, ->softreset, ->hardreset, ->postreset and their PMP counterparts propoer ops. Default ops are provided in the base ops tables and drivers are converted to override individual reset methods instead of creating custom error_handler. * ata_std_error_handler() doesn't use sata_std_hardreset() if SCRs aren't accessible. sata_promise doesn't need to use separate error_handlers for PATA and SATA anymore. * softreset is broken for sata_inic162x and sata_sx4. As libata now always prefers hardreset, this doesn't really matter but the ops are forced to NULL using ATA_OP_NULL for documentation purpose. * pata_hpt374 needs to use different prereset for the first and second PCI functions. This used to be done by branching from hpt374_error_handler(). The proper way to do this is to use separate ops and port_info tables for each function. Converted. Signed-off-by: Tejun Heo <htejun@gmail.com>
2008-03-25 03:22:50 +00:00
.hardreset = ahci_p5wdh_hardreset,
};
static struct ata_port_operations ahci_sb600_ops = {
.inherits = &ahci_ops,
.softreset = ahci_sb600_softreset,
.pmp_softreset = ahci_sb600_softreset,
};
#define AHCI_HFLAGS(flags) .private_data = (void *)(flags)
static const struct ata_port_info ahci_port_info[] = {
/* board_ahci */
{
.flags = AHCI_FLAG_COMMON,
.pio_mask = ATA_PIO4,
.udma_mask = ATA_UDMA6,
.port_ops = &ahci_ops,
},
/* board_ahci_vt8251 */
{
AHCI_HFLAGS (AHCI_HFLAG_NO_NCQ | AHCI_HFLAG_NO_PMP),
.flags = AHCI_FLAG_COMMON,
.pio_mask = ATA_PIO4,
.udma_mask = ATA_UDMA6,
.port_ops = &ahci_vt8251_ops,
},
/* board_ahci_ign_iferr */
{
AHCI_HFLAGS (AHCI_HFLAG_IGN_IRQ_IF_ERR),
.flags = AHCI_FLAG_COMMON,
.pio_mask = ATA_PIO4,
.udma_mask = ATA_UDMA6,
.port_ops = &ahci_ops,
},
/* board_ahci_sb600 */
{
AHCI_HFLAGS (AHCI_HFLAG_IGN_SERR_INTERNAL |
AHCI_HFLAG_32BIT_ONLY | AHCI_HFLAG_NO_MSI |
AHCI_HFLAG_SECT255),
.flags = AHCI_FLAG_COMMON,
.pio_mask = ATA_PIO4,
.udma_mask = ATA_UDMA6,
.port_ops = &ahci_sb600_ops,
},
/* board_ahci_mv */
{
AHCI_HFLAGS (AHCI_HFLAG_NO_NCQ | AHCI_HFLAG_NO_MSI |
AHCI_HFLAG_MV_PATA | AHCI_HFLAG_NO_PMP),
.flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY |
ATA_FLAG_MMIO | ATA_FLAG_PIO_DMA,
.pio_mask = ATA_PIO4,
.udma_mask = ATA_UDMA6,
.port_ops = &ahci_ops,
},
/* board_ahci_sb700, for SB700 and SB800 */
{
AHCI_HFLAGS (AHCI_HFLAG_IGN_SERR_INTERNAL),
.flags = AHCI_FLAG_COMMON,
.pio_mask = ATA_PIO4,
.udma_mask = ATA_UDMA6,
.port_ops = &ahci_sb600_ops,
},
/* board_ahci_mcp65 */
{
AHCI_HFLAGS (AHCI_HFLAG_YES_NCQ),
.flags = AHCI_FLAG_COMMON,
.pio_mask = ATA_PIO4,
.udma_mask = ATA_UDMA6,
.port_ops = &ahci_ops,
},
/* board_ahci_nopmp */
{
AHCI_HFLAGS (AHCI_HFLAG_NO_PMP),
.flags = AHCI_FLAG_COMMON,
.pio_mask = ATA_PIO4,
.udma_mask = ATA_UDMA6,
.port_ops = &ahci_ops,
},
/* board_ahci_yesncq */
{
AHCI_HFLAGS (AHCI_HFLAG_YES_NCQ),
.flags = AHCI_FLAG_COMMON,
.pio_mask = ATA_PIO4,
.udma_mask = ATA_UDMA6,
.port_ops = &ahci_ops,
},
};
static const struct pci_device_id ahci_pci_tbl[] = {
/* Intel */
{ PCI_VDEVICE(INTEL, 0x2652), board_ahci }, /* ICH6 */
{ PCI_VDEVICE(INTEL, 0x2653), board_ahci }, /* ICH6M */
{ PCI_VDEVICE(INTEL, 0x27c1), board_ahci }, /* ICH7 */
{ PCI_VDEVICE(INTEL, 0x27c5), board_ahci }, /* ICH7M */
{ PCI_VDEVICE(INTEL, 0x27c3), board_ahci }, /* ICH7R */
{ PCI_VDEVICE(AL, 0x5288), board_ahci_ign_iferr }, /* ULi M5288 */
{ PCI_VDEVICE(INTEL, 0x2681), board_ahci }, /* ESB2 */
{ PCI_VDEVICE(INTEL, 0x2682), board_ahci }, /* ESB2 */
{ PCI_VDEVICE(INTEL, 0x2683), board_ahci }, /* ESB2 */
{ PCI_VDEVICE(INTEL, 0x27c6), board_ahci }, /* ICH7-M DH */
{ PCI_VDEVICE(INTEL, 0x2821), board_ahci }, /* ICH8 */
{ PCI_VDEVICE(INTEL, 0x2822), board_ahci }, /* ICH8 */
{ PCI_VDEVICE(INTEL, 0x2824), board_ahci }, /* ICH8 */
{ PCI_VDEVICE(INTEL, 0x2829), board_ahci }, /* ICH8M */
{ PCI_VDEVICE(INTEL, 0x282a), board_ahci }, /* ICH8M */
{ PCI_VDEVICE(INTEL, 0x2922), board_ahci }, /* ICH9 */
{ PCI_VDEVICE(INTEL, 0x2923), board_ahci }, /* ICH9 */
{ PCI_VDEVICE(INTEL, 0x2924), board_ahci }, /* ICH9 */
{ PCI_VDEVICE(INTEL, 0x2925), board_ahci }, /* ICH9 */
{ PCI_VDEVICE(INTEL, 0x2927), board_ahci }, /* ICH9 */
{ PCI_VDEVICE(INTEL, 0x2929), board_ahci }, /* ICH9M */
{ PCI_VDEVICE(INTEL, 0x292a), board_ahci }, /* ICH9M */
{ PCI_VDEVICE(INTEL, 0x292b), board_ahci }, /* ICH9M */
{ PCI_VDEVICE(INTEL, 0x292c), board_ahci }, /* ICH9M */
{ PCI_VDEVICE(INTEL, 0x292f), board_ahci }, /* ICH9M */
{ PCI_VDEVICE(INTEL, 0x294d), board_ahci }, /* ICH9 */
{ PCI_VDEVICE(INTEL, 0x294e), board_ahci }, /* ICH9M */
{ PCI_VDEVICE(INTEL, 0x502a), board_ahci }, /* Tolapai */
{ PCI_VDEVICE(INTEL, 0x502b), board_ahci }, /* Tolapai */
{ PCI_VDEVICE(INTEL, 0x3a05), board_ahci }, /* ICH10 */
{ PCI_VDEVICE(INTEL, 0x3a25), board_ahci }, /* ICH10 */
{ PCI_VDEVICE(INTEL, 0x3b24), board_ahci }, /* PCH RAID */
{ PCI_VDEVICE(INTEL, 0x3b25), board_ahci }, /* PCH RAID */
{ PCI_VDEVICE(INTEL, 0x3b2b), board_ahci }, /* PCH RAID */
{ PCI_VDEVICE(INTEL, 0x3b2c), board_ahci }, /* PCH RAID */
/* JMicron 360/1/3/5/6, match class to avoid IDE function */
{ PCI_VENDOR_ID_JMICRON, PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID,
PCI_CLASS_STORAGE_SATA_AHCI, 0xffffff, board_ahci_ign_iferr },
/* ATI */
{ PCI_VDEVICE(ATI, 0x4380), board_ahci_sb600 }, /* ATI SB600 */
{ PCI_VDEVICE(ATI, 0x4390), board_ahci_sb700 }, /* ATI SB700/800 */
{ PCI_VDEVICE(ATI, 0x4391), board_ahci_sb700 }, /* ATI SB700/800 */
{ PCI_VDEVICE(ATI, 0x4392), board_ahci_sb700 }, /* ATI SB700/800 */
{ PCI_VDEVICE(ATI, 0x4393), board_ahci_sb700 }, /* ATI SB700/800 */
{ PCI_VDEVICE(ATI, 0x4394), board_ahci_sb700 }, /* ATI SB700/800 */
{ PCI_VDEVICE(ATI, 0x4395), board_ahci_sb700 }, /* ATI SB700/800 */
/* VIA */
{ PCI_VDEVICE(VIA, 0x3349), board_ahci_vt8251 }, /* VIA VT8251 */
{ PCI_VDEVICE(VIA, 0x6287), board_ahci_vt8251 }, /* VIA VT8251 */
/* NVIDIA */
{ PCI_VDEVICE(NVIDIA, 0x044c), board_ahci_mcp65 }, /* MCP65 */
{ PCI_VDEVICE(NVIDIA, 0x044d), board_ahci_mcp65 }, /* MCP65 */
{ PCI_VDEVICE(NVIDIA, 0x044e), board_ahci_mcp65 }, /* MCP65 */
{ PCI_VDEVICE(NVIDIA, 0x044f), board_ahci_mcp65 }, /* MCP65 */
{ PCI_VDEVICE(NVIDIA, 0x045c), board_ahci_mcp65 }, /* MCP65 */
{ PCI_VDEVICE(NVIDIA, 0x045d), board_ahci_mcp65 }, /* MCP65 */
{ PCI_VDEVICE(NVIDIA, 0x045e), board_ahci_mcp65 }, /* MCP65 */
{ PCI_VDEVICE(NVIDIA, 0x045f), board_ahci_mcp65 }, /* MCP65 */
{ PCI_VDEVICE(NVIDIA, 0x0550), board_ahci_yesncq }, /* MCP67 */
{ PCI_VDEVICE(NVIDIA, 0x0551), board_ahci_yesncq }, /* MCP67 */
{ PCI_VDEVICE(NVIDIA, 0x0552), board_ahci_yesncq }, /* MCP67 */
{ PCI_VDEVICE(NVIDIA, 0x0553), board_ahci_yesncq }, /* MCP67 */
{ PCI_VDEVICE(NVIDIA, 0x0554), board_ahci_yesncq }, /* MCP67 */
{ PCI_VDEVICE(NVIDIA, 0x0555), board_ahci_yesncq }, /* MCP67 */
{ PCI_VDEVICE(NVIDIA, 0x0556), board_ahci_yesncq }, /* MCP67 */
{ PCI_VDEVICE(NVIDIA, 0x0557), board_ahci_yesncq }, /* MCP67 */
{ PCI_VDEVICE(NVIDIA, 0x0558), board_ahci_yesncq }, /* MCP67 */
{ PCI_VDEVICE(NVIDIA, 0x0559), board_ahci_yesncq }, /* MCP67 */
{ PCI_VDEVICE(NVIDIA, 0x055a), board_ahci_yesncq }, /* MCP67 */
{ PCI_VDEVICE(NVIDIA, 0x055b), board_ahci_yesncq }, /* MCP67 */
{ PCI_VDEVICE(NVIDIA, 0x07f0), board_ahci_yesncq }, /* MCP73 */
{ PCI_VDEVICE(NVIDIA, 0x07f1), board_ahci_yesncq }, /* MCP73 */
{ PCI_VDEVICE(NVIDIA, 0x07f2), board_ahci_yesncq }, /* MCP73 */
{ PCI_VDEVICE(NVIDIA, 0x07f3), board_ahci_yesncq }, /* MCP73 */
{ PCI_VDEVICE(NVIDIA, 0x07f4), board_ahci_yesncq }, /* MCP73 */
{ PCI_VDEVICE(NVIDIA, 0x07f5), board_ahci_yesncq }, /* MCP73 */
{ PCI_VDEVICE(NVIDIA, 0x07f6), board_ahci_yesncq }, /* MCP73 */
{ PCI_VDEVICE(NVIDIA, 0x07f7), board_ahci_yesncq }, /* MCP73 */
{ PCI_VDEVICE(NVIDIA, 0x07f8), board_ahci_yesncq }, /* MCP73 */
{ PCI_VDEVICE(NVIDIA, 0x07f9), board_ahci_yesncq }, /* MCP73 */
{ PCI_VDEVICE(NVIDIA, 0x07fa), board_ahci_yesncq }, /* MCP73 */
{ PCI_VDEVICE(NVIDIA, 0x07fb), board_ahci_yesncq }, /* MCP73 */
{ PCI_VDEVICE(NVIDIA, 0x0ad0), board_ahci }, /* MCP77 */
{ PCI_VDEVICE(NVIDIA, 0x0ad1), board_ahci }, /* MCP77 */
{ PCI_VDEVICE(NVIDIA, 0x0ad2), board_ahci }, /* MCP77 */
{ PCI_VDEVICE(NVIDIA, 0x0ad3), board_ahci }, /* MCP77 */
{ PCI_VDEVICE(NVIDIA, 0x0ad4), board_ahci }, /* MCP77 */
{ PCI_VDEVICE(NVIDIA, 0x0ad5), board_ahci }, /* MCP77 */
{ PCI_VDEVICE(NVIDIA, 0x0ad6), board_ahci }, /* MCP77 */
{ PCI_VDEVICE(NVIDIA, 0x0ad7), board_ahci }, /* MCP77 */
{ PCI_VDEVICE(NVIDIA, 0x0ad8), board_ahci }, /* MCP77 */
{ PCI_VDEVICE(NVIDIA, 0x0ad9), board_ahci }, /* MCP77 */
{ PCI_VDEVICE(NVIDIA, 0x0ada), board_ahci }, /* MCP77 */
{ PCI_VDEVICE(NVIDIA, 0x0adb), board_ahci }, /* MCP77 */
{ PCI_VDEVICE(NVIDIA, 0x0ab4), board_ahci }, /* MCP79 */
{ PCI_VDEVICE(NVIDIA, 0x0ab5), board_ahci }, /* MCP79 */
{ PCI_VDEVICE(NVIDIA, 0x0ab6), board_ahci }, /* MCP79 */
{ PCI_VDEVICE(NVIDIA, 0x0ab7), board_ahci }, /* MCP79 */
{ PCI_VDEVICE(NVIDIA, 0x0ab8), board_ahci }, /* MCP79 */
{ PCI_VDEVICE(NVIDIA, 0x0ab9), board_ahci }, /* MCP79 */
{ PCI_VDEVICE(NVIDIA, 0x0aba), board_ahci }, /* MCP79 */
{ PCI_VDEVICE(NVIDIA, 0x0abb), board_ahci }, /* MCP79 */
{ PCI_VDEVICE(NVIDIA, 0x0abc), board_ahci }, /* MCP79 */
{ PCI_VDEVICE(NVIDIA, 0x0abd), board_ahci }, /* MCP79 */
{ PCI_VDEVICE(NVIDIA, 0x0abe), board_ahci }, /* MCP79 */
{ PCI_VDEVICE(NVIDIA, 0x0abf), board_ahci }, /* MCP79 */
{ PCI_VDEVICE(NVIDIA, 0x0d84), board_ahci }, /* MCP89 */
{ PCI_VDEVICE(NVIDIA, 0x0d85), board_ahci }, /* MCP89 */
{ PCI_VDEVICE(NVIDIA, 0x0d86), board_ahci }, /* MCP89 */
{ PCI_VDEVICE(NVIDIA, 0x0d87), board_ahci }, /* MCP89 */
{ PCI_VDEVICE(NVIDIA, 0x0d88), board_ahci }, /* MCP89 */
{ PCI_VDEVICE(NVIDIA, 0x0d89), board_ahci }, /* MCP89 */
{ PCI_VDEVICE(NVIDIA, 0x0d8a), board_ahci }, /* MCP89 */
{ PCI_VDEVICE(NVIDIA, 0x0d8b), board_ahci }, /* MCP89 */
{ PCI_VDEVICE(NVIDIA, 0x0d8c), board_ahci }, /* MCP89 */
{ PCI_VDEVICE(NVIDIA, 0x0d8d), board_ahci }, /* MCP89 */
{ PCI_VDEVICE(NVIDIA, 0x0d8e), board_ahci }, /* MCP89 */
{ PCI_VDEVICE(NVIDIA, 0x0d8f), board_ahci }, /* MCP89 */
/* SiS */
{ PCI_VDEVICE(SI, 0x1184), board_ahci }, /* SiS 966 */
{ PCI_VDEVICE(SI, 0x1185), board_ahci }, /* SiS 968 */
{ PCI_VDEVICE(SI, 0x0186), board_ahci }, /* SiS 968 */
/* Marvell */
{ PCI_VDEVICE(MARVELL, 0x6145), board_ahci_mv }, /* 6145 */
{ PCI_VDEVICE(MARVELL, 0x6121), board_ahci_mv }, /* 6121 */
/* Promise */
{ PCI_VDEVICE(PROMISE, 0x3f20), board_ahci }, /* PDC42819 */
/* Generic, PCI class code for AHCI */
{ PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID,
PCI_CLASS_STORAGE_SATA_AHCI, 0xffffff, board_ahci },
{ } /* terminate list */
};
static struct pci_driver ahci_pci_driver = {
.name = DRV_NAME,
.id_table = ahci_pci_tbl,
.probe = ahci_init_one,
.remove = ata_pci_remove_one,
#ifdef CONFIG_PM
.suspend = ahci_pci_device_suspend,
.resume = ahci_pci_device_resume,
#endif
};
static int ahci_em_messages = 1;
module_param(ahci_em_messages, int, 0444);
/* add other LED protocol types when they become supported */
MODULE_PARM_DESC(ahci_em_messages,
"Set AHCI Enclosure Management Message type (0 = disabled, 1 = LED");
ahci, pata_marvell: play nicely together I've been chasing Jeff about this for months. Jeff added the Marvell device identifiers to the ahci driver without making the AHCI driver handle the PATA port. This means a lot of users can't use current kernels and in most distro cases can't even install. This has been going on since March 2008 for the 6121 Marvell, and late 2007 for the 6145!!! This was all pointed out at the time and repeatedly ignored. Bugs assigned to Jeff about this are ignored also. To quote Jeff in email > "Just switch the order of 'ahci' and 'pata_marvell' in > /etc/modprobe.conf, then use Fedora's tools regenerate the initrd. > See? It's not rocket science, and the current configuration can be > easily made to work for Fedora users." (Which isn't trivial, isn't end user, shouldn't be needed, and as it usually breaks at install time is in fact impossible) To quote Jeff in August 2007 > " mv-ahci-pata > Marvell 6121/6141 PATA support. Needs fixing in the 'PATA controller > command' area before it is usable, and can go upstream." Only he add the ids anyway later and caused regressions, adding a further id in March causing more regresions. The actual fix for the moment is very simple. If the user has included the pata_marvell driver let it drive the ports. If they've only selected for SATA support give them the AHCI driver which will run the port a fraction faster. Allow the user to control this decision via ahci.marvell_enable as a module parameter so that distributions can ship 'it works' defaults and smarter users (or config tools) can then flip it over it desired. Signed-off-by: Alan Cox <alan@redhat.com> Signed-off-by: Jeff Garzik <jgarzik@redhat.com>
2008-09-03 13:48:34 +00:00
#if defined(CONFIG_PATA_MARVELL) || defined(CONFIG_PATA_MARVELL_MODULE)
static int marvell_enable;
#else
static int marvell_enable = 1;
#endif
module_param(marvell_enable, int, 0644);
MODULE_PARM_DESC(marvell_enable, "Marvell SATA via AHCI (1 = enabled)");
static inline int ahci_nr_ports(u32 cap)
{
return (cap & 0x1f) + 1;
}
static inline void __iomem *__ahci_port_base(struct ata_host *host,
unsigned int port_no)
{
void __iomem *mmio = host->iomap[AHCI_PCI_BAR];
return mmio + 0x100 + (port_no * 0x80);
}
static inline void __iomem *ahci_port_base(struct ata_port *ap)
{
return __ahci_port_base(ap->host, ap->port_no);
}
static void ahci_enable_ahci(void __iomem *mmio)
{
int i;
u32 tmp;
/* turn on AHCI_EN */
tmp = readl(mmio + HOST_CTL);
if (tmp & HOST_AHCI_EN)
return;
/* Some controllers need AHCI_EN to be written multiple times.
* Try a few times before giving up.
*/
for (i = 0; i < 5; i++) {
tmp |= HOST_AHCI_EN;
writel(tmp, mmio + HOST_CTL);
tmp = readl(mmio + HOST_CTL); /* flush && sanity check */
if (tmp & HOST_AHCI_EN)
return;
msleep(10);
}
WARN_ON(1);
}
/**
* ahci_save_initial_config - Save and fixup initial config values
* @pdev: target PCI device
* @hpriv: host private area to store config values
*
* Some registers containing configuration info might be setup by
* BIOS and might be cleared on reset. This function saves the
* initial values of those registers into @hpriv such that they
* can be restored after controller reset.
*
* If inconsistent, config values are fixed up by this function.
*
* LOCKING:
* None.
*/
static void ahci_save_initial_config(struct pci_dev *pdev,
struct ahci_host_priv *hpriv)
{
void __iomem *mmio = pcim_iomap_table(pdev)[AHCI_PCI_BAR];
u32 cap, port_map;
int i;
int mv;
/* make sure AHCI mode is enabled before accessing CAP */
ahci_enable_ahci(mmio);
/* Values prefixed with saved_ are written back to host after
* reset. Values without are used for driver operation.
*/
hpriv->saved_cap = cap = readl(mmio + HOST_CAP);
hpriv->saved_port_map = port_map = readl(mmio + HOST_PORTS_IMPL);
/* some chips have errata preventing 64bit use */
if ((cap & HOST_CAP_64) && (hpriv->flags & AHCI_HFLAG_32BIT_ONLY)) {
dev_printk(KERN_INFO, &pdev->dev,
"controller can't do 64bit DMA, forcing 32bit\n");
cap &= ~HOST_CAP_64;
}
if ((cap & HOST_CAP_NCQ) && (hpriv->flags & AHCI_HFLAG_NO_NCQ)) {
dev_printk(KERN_INFO, &pdev->dev,
"controller can't do NCQ, turning off CAP_NCQ\n");
cap &= ~HOST_CAP_NCQ;
}
if (!(cap & HOST_CAP_NCQ) && (hpriv->flags & AHCI_HFLAG_YES_NCQ)) {
dev_printk(KERN_INFO, &pdev->dev,
"controller can do NCQ, turning on CAP_NCQ\n");
cap |= HOST_CAP_NCQ;
}
if ((cap & HOST_CAP_PMP) && (hpriv->flags & AHCI_HFLAG_NO_PMP)) {
dev_printk(KERN_INFO, &pdev->dev,
"controller can't do PMP, turning off CAP_PMP\n");
cap &= ~HOST_CAP_PMP;
}
if (pdev->vendor == PCI_VENDOR_ID_JMICRON && pdev->device == 0x2361 &&
port_map != 1) {
dev_printk(KERN_INFO, &pdev->dev,
"JMB361 has only one port, port_map 0x%x -> 0x%x\n",
port_map, 1);
port_map = 1;
}
/*
* Temporary Marvell 6145 hack: PATA port presence
* is asserted through the standard AHCI port
* presence register, as bit 4 (counting from 0)
*/
if (hpriv->flags & AHCI_HFLAG_MV_PATA) {
if (pdev->device == 0x6121)
mv = 0x3;
else
mv = 0xf;
dev_printk(KERN_ERR, &pdev->dev,
"MV_AHCI HACK: port_map %x -> %x\n",
port_map,
port_map & mv);
ahci, pata_marvell: play nicely together I've been chasing Jeff about this for months. Jeff added the Marvell device identifiers to the ahci driver without making the AHCI driver handle the PATA port. This means a lot of users can't use current kernels and in most distro cases can't even install. This has been going on since March 2008 for the 6121 Marvell, and late 2007 for the 6145!!! This was all pointed out at the time and repeatedly ignored. Bugs assigned to Jeff about this are ignored also. To quote Jeff in email > "Just switch the order of 'ahci' and 'pata_marvell' in > /etc/modprobe.conf, then use Fedora's tools regenerate the initrd. > See? It's not rocket science, and the current configuration can be > easily made to work for Fedora users." (Which isn't trivial, isn't end user, shouldn't be needed, and as it usually breaks at install time is in fact impossible) To quote Jeff in August 2007 > " mv-ahci-pata > Marvell 6121/6141 PATA support. Needs fixing in the 'PATA controller > command' area before it is usable, and can go upstream." Only he add the ids anyway later and caused regressions, adding a further id in March causing more regresions. The actual fix for the moment is very simple. If the user has included the pata_marvell driver let it drive the ports. If they've only selected for SATA support give them the AHCI driver which will run the port a fraction faster. Allow the user to control this decision via ahci.marvell_enable as a module parameter so that distributions can ship 'it works' defaults and smarter users (or config tools) can then flip it over it desired. Signed-off-by: Alan Cox <alan@redhat.com> Signed-off-by: Jeff Garzik <jgarzik@redhat.com>
2008-09-03 13:48:34 +00:00
dev_printk(KERN_ERR, &pdev->dev,
"Disabling your PATA port. Use the boot option 'ahci.marvell_enable=0' to avoid this.\n");
port_map &= mv;
}
/* cross check port_map and cap.n_ports */
if (port_map) {
int map_ports = 0;
for (i = 0; i < AHCI_MAX_PORTS; i++)
if (port_map & (1 << i))
map_ports++;
/* If PI has more ports than n_ports, whine, clear
* port_map and let it be generated from n_ports.
*/
if (map_ports > ahci_nr_ports(cap)) {
dev_printk(KERN_WARNING, &pdev->dev,
"implemented port map (0x%x) contains more "
"ports than nr_ports (%u), using nr_ports\n",
port_map, ahci_nr_ports(cap));
port_map = 0;
}
}
/* fabricate port_map from cap.nr_ports */
if (!port_map) {
port_map = (1 << ahci_nr_ports(cap)) - 1;
dev_printk(KERN_WARNING, &pdev->dev,
"forcing PORTS_IMPL to 0x%x\n", port_map);
/* write the fixed up value to the PI register */
hpriv->saved_port_map = port_map;
}
/* record values to use during operation */
hpriv->cap = cap;
hpriv->port_map = port_map;
}
/**
* ahci_restore_initial_config - Restore initial config
* @host: target ATA host
*
* Restore initial config stored by ahci_save_initial_config().
*
* LOCKING:
* None.
*/
static void ahci_restore_initial_config(struct ata_host *host)
{
struct ahci_host_priv *hpriv = host->private_data;
void __iomem *mmio = host->iomap[AHCI_PCI_BAR];
writel(hpriv->saved_cap, mmio + HOST_CAP);
writel(hpriv->saved_port_map, mmio + HOST_PORTS_IMPL);
(void) readl(mmio + HOST_PORTS_IMPL); /* flush */
}
static unsigned ahci_scr_offset(struct ata_port *ap, unsigned int sc_reg)
{
static const int offset[] = {
[SCR_STATUS] = PORT_SCR_STAT,
[SCR_CONTROL] = PORT_SCR_CTL,
[SCR_ERROR] = PORT_SCR_ERR,
[SCR_ACTIVE] = PORT_SCR_ACT,
[SCR_NOTIFICATION] = PORT_SCR_NTF,
};
struct ahci_host_priv *hpriv = ap->host->private_data;
if (sc_reg < ARRAY_SIZE(offset) &&
(sc_reg != SCR_NOTIFICATION || (hpriv->cap & HOST_CAP_SNTF)))
return offset[sc_reg];
return 0;
}
static int ahci_scr_read(struct ata_link *link, unsigned int sc_reg, u32 *val)
{
void __iomem *port_mmio = ahci_port_base(link->ap);
int offset = ahci_scr_offset(link->ap, sc_reg);
if (offset) {
*val = readl(port_mmio + offset);
return 0;
}
return -EINVAL;
}
static int ahci_scr_write(struct ata_link *link, unsigned int sc_reg, u32 val)
{
void __iomem *port_mmio = ahci_port_base(link->ap);
int offset = ahci_scr_offset(link->ap, sc_reg);
if (offset) {
writel(val, port_mmio + offset);
return 0;
}
return -EINVAL;
}
static void ahci_start_engine(struct ata_port *ap)
{
void __iomem *port_mmio = ahci_port_base(ap);
u32 tmp;
/* start DMA */
tmp = readl(port_mmio + PORT_CMD);
tmp |= PORT_CMD_START;
writel(tmp, port_mmio + PORT_CMD);
readl(port_mmio + PORT_CMD); /* flush */
}
static int ahci_stop_engine(struct ata_port *ap)
{
void __iomem *port_mmio = ahci_port_base(ap);
u32 tmp;
tmp = readl(port_mmio + PORT_CMD);
/* check if the HBA is idle */
if ((tmp & (PORT_CMD_START | PORT_CMD_LIST_ON)) == 0)
return 0;
/* setting HBA to idle */
tmp &= ~PORT_CMD_START;
writel(tmp, port_mmio + PORT_CMD);
/* wait for engine to stop. This could be as long as 500 msec */
tmp = ata_wait_register(port_mmio + PORT_CMD,
PORT_CMD_LIST_ON, PORT_CMD_LIST_ON, 1, 500);
if (tmp & PORT_CMD_LIST_ON)
return -EIO;
return 0;
}
static void ahci_start_fis_rx(struct ata_port *ap)
{
void __iomem *port_mmio = ahci_port_base(ap);
struct ahci_host_priv *hpriv = ap->host->private_data;
struct ahci_port_priv *pp = ap->private_data;
u32 tmp;
/* set FIS registers */
if (hpriv->cap & HOST_CAP_64)
writel((pp->cmd_slot_dma >> 16) >> 16,
port_mmio + PORT_LST_ADDR_HI);
writel(pp->cmd_slot_dma & 0xffffffff, port_mmio + PORT_LST_ADDR);
if (hpriv->cap & HOST_CAP_64)
writel((pp->rx_fis_dma >> 16) >> 16,
port_mmio + PORT_FIS_ADDR_HI);
writel(pp->rx_fis_dma & 0xffffffff, port_mmio + PORT_FIS_ADDR);
/* enable FIS reception */
tmp = readl(port_mmio + PORT_CMD);
tmp |= PORT_CMD_FIS_RX;
writel(tmp, port_mmio + PORT_CMD);
/* flush */
readl(port_mmio + PORT_CMD);
}
static int ahci_stop_fis_rx(struct ata_port *ap)
{
void __iomem *port_mmio = ahci_port_base(ap);
u32 tmp;
/* disable FIS reception */
tmp = readl(port_mmio + PORT_CMD);
tmp &= ~PORT_CMD_FIS_RX;
writel(tmp, port_mmio + PORT_CMD);
/* wait for completion, spec says 500ms, give it 1000 */
tmp = ata_wait_register(port_mmio + PORT_CMD, PORT_CMD_FIS_ON,
PORT_CMD_FIS_ON, 10, 1000);
if (tmp & PORT_CMD_FIS_ON)
return -EBUSY;
return 0;
}
static void ahci_power_up(struct ata_port *ap)
{
struct ahci_host_priv *hpriv = ap->host->private_data;
void __iomem *port_mmio = ahci_port_base(ap);
u32 cmd;
cmd = readl(port_mmio + PORT_CMD) & ~PORT_CMD_ICC_MASK;
/* spin up device */
if (hpriv->cap & HOST_CAP_SSS) {
cmd |= PORT_CMD_SPIN_UP;
writel(cmd, port_mmio + PORT_CMD);
}
/* wake up link */
writel(cmd | PORT_CMD_ICC_ACTIVE, port_mmio + PORT_CMD);
}
static void ahci_disable_alpm(struct ata_port *ap)
{
struct ahci_host_priv *hpriv = ap->host->private_data;
void __iomem *port_mmio = ahci_port_base(ap);
u32 cmd;
struct ahci_port_priv *pp = ap->private_data;
/* IPM bits should be disabled by libata-core */
/* get the existing command bits */
cmd = readl(port_mmio + PORT_CMD);
/* disable ALPM and ASP */
cmd &= ~PORT_CMD_ASP;
cmd &= ~PORT_CMD_ALPE;
/* force the interface back to active */
cmd |= PORT_CMD_ICC_ACTIVE;
/* write out new cmd value */
writel(cmd, port_mmio + PORT_CMD);
cmd = readl(port_mmio + PORT_CMD);
/* wait 10ms to be sure we've come out of any low power state */
msleep(10);
/* clear out any PhyRdy stuff from interrupt status */
writel(PORT_IRQ_PHYRDY, port_mmio + PORT_IRQ_STAT);
/* go ahead and clean out PhyRdy Change from Serror too */
ahci_scr_write(&ap->link, SCR_ERROR, ((1 << 16) | (1 << 18)));
/*
* Clear flag to indicate that we should ignore all PhyRdy
* state changes
*/
hpriv->flags &= ~AHCI_HFLAG_NO_HOTPLUG;
/*
* Enable interrupts on Phy Ready.
*/
pp->intr_mask |= PORT_IRQ_PHYRDY;
writel(pp->intr_mask, port_mmio + PORT_IRQ_MASK);
/*
* don't change the link pm policy - we can be called
* just to turn of link pm temporarily
*/
}
static int ahci_enable_alpm(struct ata_port *ap,
enum link_pm policy)
{
struct ahci_host_priv *hpriv = ap->host->private_data;
void __iomem *port_mmio = ahci_port_base(ap);
u32 cmd;
struct ahci_port_priv *pp = ap->private_data;
u32 asp;
/* Make sure the host is capable of link power management */
if (!(hpriv->cap & HOST_CAP_ALPM))
return -EINVAL;
switch (policy) {
case MAX_PERFORMANCE:
case NOT_AVAILABLE:
/*
* if we came here with NOT_AVAILABLE,
* it just means this is the first time we
* have tried to enable - default to max performance,
* and let the user go to lower power modes on request.
*/
ahci_disable_alpm(ap);
return 0;
case MIN_POWER:
/* configure HBA to enter SLUMBER */
asp = PORT_CMD_ASP;
break;
case MEDIUM_POWER:
/* configure HBA to enter PARTIAL */
asp = 0;
break;
default:
return -EINVAL;
}
/*
* Disable interrupts on Phy Ready. This keeps us from
* getting woken up due to spurious phy ready interrupts
* TBD - Hot plug should be done via polling now, is
* that even supported?
*/
pp->intr_mask &= ~PORT_IRQ_PHYRDY;
writel(pp->intr_mask, port_mmio + PORT_IRQ_MASK);
/*
* Set a flag to indicate that we should ignore all PhyRdy
* state changes since these can happen now whenever we
* change link state
*/
hpriv->flags |= AHCI_HFLAG_NO_HOTPLUG;
/* get the existing command bits */
cmd = readl(port_mmio + PORT_CMD);
/*
* Set ASP based on Policy
*/
cmd |= asp;
/*
* Setting this bit will instruct the HBA to aggressively
* enter a lower power link state when it's appropriate and
* based on the value set above for ASP
*/
cmd |= PORT_CMD_ALPE;
/* write out new cmd value */
writel(cmd, port_mmio + PORT_CMD);
cmd = readl(port_mmio + PORT_CMD);
/* IPM bits should be set by libata-core */
return 0;
}
#ifdef CONFIG_PM
static void ahci_power_down(struct ata_port *ap)
{
struct ahci_host_priv *hpriv = ap->host->private_data;
void __iomem *port_mmio = ahci_port_base(ap);
u32 cmd, scontrol;
if (!(hpriv->cap & HOST_CAP_SSS))
return;
/* put device into listen mode, first set PxSCTL.DET to 0 */
scontrol = readl(port_mmio + PORT_SCR_CTL);
scontrol &= ~0xf;
writel(scontrol, port_mmio + PORT_SCR_CTL);
/* then set PxCMD.SUD to 0 */
cmd = readl(port_mmio + PORT_CMD) & ~PORT_CMD_ICC_MASK;
cmd &= ~PORT_CMD_SPIN_UP;
writel(cmd, port_mmio + PORT_CMD);
}
#endif
static void ahci_start_port(struct ata_port *ap)
{
struct ahci_port_priv *pp = ap->private_data;
struct ata_link *link;
struct ahci_em_priv *emp;
ssize_t rc;
int i;
/* enable FIS reception */
ahci_start_fis_rx(ap);
/* enable DMA */
ahci_start_engine(ap);
/* turn on LEDs */
if (ap->flags & ATA_FLAG_EM) {
ata_for_each_link(link, ap, EDGE) {
emp = &pp->em_priv[link->pmp];
/* EM Transmit bit maybe busy during init */
for (i = 0; i < MAX_RETRY; i++) {
rc = ahci_transmit_led_message(ap,
emp->led_state,
4);
if (rc == -EBUSY)
udelay(100);
else
break;
}
}
}
if (ap->flags & ATA_FLAG_SW_ACTIVITY)
ata_for_each_link(link, ap, EDGE)
ahci_init_sw_activity(link);
}
static int ahci_deinit_port(struct ata_port *ap, const char **emsg)
{
int rc;
/* disable DMA */
rc = ahci_stop_engine(ap);
if (rc) {
*emsg = "failed to stop engine";
return rc;
}
/* disable FIS reception */
rc = ahci_stop_fis_rx(ap);
if (rc) {
*emsg = "failed stop FIS RX";
return rc;
}
return 0;
}
static int ahci_reset_controller(struct ata_host *host)
{
struct pci_dev *pdev = to_pci_dev(host->dev);
struct ahci_host_priv *hpriv = host->private_data;
void __iomem *mmio = host->iomap[AHCI_PCI_BAR];
u32 tmp;
/* we must be in AHCI mode, before using anything
* AHCI-specific, such as HOST_RESET.
*/
ahci_enable_ahci(mmio);
/* global controller reset */
if (!ahci_skip_host_reset) {
tmp = readl(mmio + HOST_CTL);
if ((tmp & HOST_RESET) == 0) {
writel(tmp | HOST_RESET, mmio + HOST_CTL);
readl(mmio + HOST_CTL); /* flush */
}
/*
* to perform host reset, OS should set HOST_RESET
* and poll until this bit is read to be "0".
* reset must complete within 1 second, or
* the hardware should be considered fried.
*/
tmp = ata_wait_register(mmio + HOST_CTL, HOST_RESET,
HOST_RESET, 10, 1000);
if (tmp & HOST_RESET) {
dev_printk(KERN_ERR, host->dev,
"controller reset failed (0x%x)\n", tmp);
return -EIO;
}
/* turn on AHCI mode */
ahci_enable_ahci(mmio);
/* Some registers might be cleared on reset. Restore
* initial values.
*/
ahci_restore_initial_config(host);
} else
dev_printk(KERN_INFO, host->dev,
"skipping global host reset\n");
if (pdev->vendor == PCI_VENDOR_ID_INTEL) {
u16 tmp16;
/* configure PCS */
pci_read_config_word(pdev, 0x92, &tmp16);
if ((tmp16 & hpriv->port_map) != hpriv->port_map) {
tmp16 |= hpriv->port_map;
pci_write_config_word(pdev, 0x92, tmp16);
}
}
return 0;
}
static void ahci_sw_activity(struct ata_link *link)
{
struct ata_port *ap = link->ap;
struct ahci_port_priv *pp = ap->private_data;
struct ahci_em_priv *emp = &pp->em_priv[link->pmp];
if (!(link->flags & ATA_LFLAG_SW_ACTIVITY))
return;
emp->activity++;
if (!timer_pending(&emp->timer))
mod_timer(&emp->timer, jiffies + msecs_to_jiffies(10));
}
static void ahci_sw_activity_blink(unsigned long arg)
{
struct ata_link *link = (struct ata_link *)arg;
struct ata_port *ap = link->ap;
struct ahci_port_priv *pp = ap->private_data;
struct ahci_em_priv *emp = &pp->em_priv[link->pmp];
unsigned long led_message = emp->led_state;
u32 activity_led_state;
unsigned long flags;
led_message &= EM_MSG_LED_VALUE;
led_message |= ap->port_no | (link->pmp << 8);
/* check to see if we've had activity. If so,
* toggle state of LED and reset timer. If not,
* turn LED to desired idle state.
*/
spin_lock_irqsave(ap->lock, flags);
if (emp->saved_activity != emp->activity) {
emp->saved_activity = emp->activity;
/* get the current LED state */
activity_led_state = led_message & EM_MSG_LED_VALUE_ON;
if (activity_led_state)
activity_led_state = 0;
else
activity_led_state = 1;
/* clear old state */
led_message &= ~EM_MSG_LED_VALUE_ACTIVITY;
/* toggle state */
led_message |= (activity_led_state << 16);
mod_timer(&emp->timer, jiffies + msecs_to_jiffies(100));
} else {
/* switch to idle */
led_message &= ~EM_MSG_LED_VALUE_ACTIVITY;
if (emp->blink_policy == BLINK_OFF)
led_message |= (1 << 16);
}
spin_unlock_irqrestore(ap->lock, flags);
ahci_transmit_led_message(ap, led_message, 4);
}
static void ahci_init_sw_activity(struct ata_link *link)
{
struct ata_port *ap = link->ap;
struct ahci_port_priv *pp = ap->private_data;
struct ahci_em_priv *emp = &pp->em_priv[link->pmp];
/* init activity stats, setup timer */
emp->saved_activity = emp->activity = 0;
setup_timer(&emp->timer, ahci_sw_activity_blink, (unsigned long)link);
/* check our blink policy and set flag for link if it's enabled */
if (emp->blink_policy)
link->flags |= ATA_LFLAG_SW_ACTIVITY;
}
static int ahci_reset_em(struct ata_host *host)
{
void __iomem *mmio = host->iomap[AHCI_PCI_BAR];
u32 em_ctl;
em_ctl = readl(mmio + HOST_EM_CTL);
if ((em_ctl & EM_CTL_TM) || (em_ctl & EM_CTL_RST))
return -EINVAL;
writel(em_ctl | EM_CTL_RST, mmio + HOST_EM_CTL);
return 0;
}
static ssize_t ahci_transmit_led_message(struct ata_port *ap, u32 state,
ssize_t size)
{
struct ahci_host_priv *hpriv = ap->host->private_data;
struct ahci_port_priv *pp = ap->private_data;
void __iomem *mmio = ap->host->iomap[AHCI_PCI_BAR];
u32 em_ctl;
u32 message[] = {0, 0};
unsigned long flags;
int pmp;
struct ahci_em_priv *emp;
/* get the slot number from the message */
pmp = (state & EM_MSG_LED_PMP_SLOT) >> 8;
if (pmp < MAX_SLOTS)
emp = &pp->em_priv[pmp];
else
return -EINVAL;
spin_lock_irqsave(ap->lock, flags);
/*
* if we are still busy transmitting a previous message,
* do not allow
*/
em_ctl = readl(mmio + HOST_EM_CTL);
if (em_ctl & EM_CTL_TM) {
spin_unlock_irqrestore(ap->lock, flags);
return -EBUSY;
}
/*
* create message header - this is all zero except for
* the message size, which is 4 bytes.
*/
message[0] |= (4 << 8);
/* ignore 0:4 of byte zero, fill in port info yourself */
message[1] = ((state & ~EM_MSG_LED_HBA_PORT) | ap->port_no);
/* write message to EM_LOC */
writel(message[0], mmio + hpriv->em_loc);
writel(message[1], mmio + hpriv->em_loc+4);
/* save off new led state for port/slot */
emp->led_state = state;
/*
* tell hardware to transmit the message
*/
writel(em_ctl | EM_CTL_TM, mmio + HOST_EM_CTL);
spin_unlock_irqrestore(ap->lock, flags);
return size;
}
static ssize_t ahci_led_show(struct ata_port *ap, char *buf)
{
struct ahci_port_priv *pp = ap->private_data;
struct ata_link *link;
struct ahci_em_priv *emp;
int rc = 0;
ata_for_each_link(link, ap, EDGE) {
emp = &pp->em_priv[link->pmp];
rc += sprintf(buf, "%lx\n", emp->led_state);
}
return rc;
}
static ssize_t ahci_led_store(struct ata_port *ap, const char *buf,
size_t size)
{
int state;
int pmp;
struct ahci_port_priv *pp = ap->private_data;
struct ahci_em_priv *emp;
state = simple_strtoul(buf, NULL, 0);
/* get the slot number from the message */
pmp = (state & EM_MSG_LED_PMP_SLOT) >> 8;
if (pmp < MAX_SLOTS)
emp = &pp->em_priv[pmp];
else
return -EINVAL;
/* mask off the activity bits if we are in sw_activity
* mode, user should turn off sw_activity before setting
* activity led through em_message
*/
if (emp->blink_policy)
state &= ~EM_MSG_LED_VALUE_ACTIVITY;
return ahci_transmit_led_message(ap, state, size);
}
static ssize_t ahci_activity_store(struct ata_device *dev, enum sw_activity val)
{
struct ata_link *link = dev->link;
struct ata_port *ap = link->ap;
struct ahci_port_priv *pp = ap->private_data;
struct ahci_em_priv *emp = &pp->em_priv[link->pmp];
u32 port_led_state = emp->led_state;
/* save the desired Activity LED behavior */
if (val == OFF) {
/* clear LFLAG */
link->flags &= ~(ATA_LFLAG_SW_ACTIVITY);
/* set the LED to OFF */
port_led_state &= EM_MSG_LED_VALUE_OFF;
port_led_state |= (ap->port_no | (link->pmp << 8));
ahci_transmit_led_message(ap, port_led_state, 4);
} else {
link->flags |= ATA_LFLAG_SW_ACTIVITY;
if (val == BLINK_OFF) {
/* set LED to ON for idle */
port_led_state &= EM_MSG_LED_VALUE_OFF;
port_led_state |= (ap->port_no | (link->pmp << 8));
port_led_state |= EM_MSG_LED_VALUE_ON; /* check this */
ahci_transmit_led_message(ap, port_led_state, 4);
}
}
emp->blink_policy = val;
return 0;
}
static ssize_t ahci_activity_show(struct ata_device *dev, char *buf)
{
struct ata_link *link = dev->link;
struct ata_port *ap = link->ap;
struct ahci_port_priv *pp = ap->private_data;
struct ahci_em_priv *emp = &pp->em_priv[link->pmp];
/* display the saved value of activity behavior for this
* disk.
*/
return sprintf(buf, "%d\n", emp->blink_policy);
}
static void ahci_port_init(struct pci_dev *pdev, struct ata_port *ap,
int port_no, void __iomem *mmio,
void __iomem *port_mmio)
{
const char *emsg = NULL;
int rc;
u32 tmp;
/* make sure port is not active */
rc = ahci_deinit_port(ap, &emsg);
if (rc)
dev_printk(KERN_WARNING, &pdev->dev,
"%s (%d)\n", emsg, rc);
/* clear SError */
tmp = readl(port_mmio + PORT_SCR_ERR);
VPRINTK("PORT_SCR_ERR 0x%x\n", tmp);
writel(tmp, port_mmio + PORT_SCR_ERR);
/* clear port IRQ */
tmp = readl(port_mmio + PORT_IRQ_STAT);
VPRINTK("PORT_IRQ_STAT 0x%x\n", tmp);
if (tmp)
writel(tmp, port_mmio + PORT_IRQ_STAT);
writel(1 << port_no, mmio + HOST_IRQ_STAT);
}
static void ahci_init_controller(struct ata_host *host)
{
struct ahci_host_priv *hpriv = host->private_data;
struct pci_dev *pdev = to_pci_dev(host->dev);
void __iomem *mmio = host->iomap[AHCI_PCI_BAR];
int i;
void __iomem *port_mmio;
u32 tmp;
int mv;
if (hpriv->flags & AHCI_HFLAG_MV_PATA) {
if (pdev->device == 0x6121)
mv = 2;
else
mv = 4;
port_mmio = __ahci_port_base(host, mv);
writel(0, port_mmio + PORT_IRQ_MASK);
/* clear port IRQ */
tmp = readl(port_mmio + PORT_IRQ_STAT);
VPRINTK("PORT_IRQ_STAT 0x%x\n", tmp);
if (tmp)
writel(tmp, port_mmio + PORT_IRQ_STAT);
}
for (i = 0; i < host->n_ports; i++) {
struct ata_port *ap = host->ports[i];
port_mmio = ahci_port_base(ap);
if (ata_port_is_dummy(ap))
continue;
ahci_port_init(pdev, ap, i, mmio, port_mmio);
}
tmp = readl(mmio + HOST_CTL);
VPRINTK("HOST_CTL 0x%x\n", tmp);
writel(tmp | HOST_IRQ_EN, mmio + HOST_CTL);
tmp = readl(mmio + HOST_CTL);
VPRINTK("HOST_CTL 0x%x\n", tmp);
}
static void ahci_dev_config(struct ata_device *dev)
{
struct ahci_host_priv *hpriv = dev->link->ap->host->private_data;
if (hpriv->flags & AHCI_HFLAG_SECT255) {
dev->max_sectors = 255;
ata_dev_printk(dev, KERN_INFO,
"SB600 AHCI: limiting to 255 sectors per cmd\n");
}
}
static unsigned int ahci_dev_classify(struct ata_port *ap)
{
void __iomem *port_mmio = ahci_port_base(ap);
struct ata_taskfile tf;
u32 tmp;
tmp = readl(port_mmio + PORT_SIG);
tf.lbah = (tmp >> 24) & 0xff;
tf.lbam = (tmp >> 16) & 0xff;
tf.lbal = (tmp >> 8) & 0xff;
tf.nsect = (tmp) & 0xff;
return ata_dev_classify(&tf);
}
static void ahci_fill_cmd_slot(struct ahci_port_priv *pp, unsigned int tag,
u32 opts)
{
dma_addr_t cmd_tbl_dma;
cmd_tbl_dma = pp->cmd_tbl_dma + tag * AHCI_CMD_TBL_SZ;
pp->cmd_slot[tag].opts = cpu_to_le32(opts);
pp->cmd_slot[tag].status = 0;
pp->cmd_slot[tag].tbl_addr = cpu_to_le32(cmd_tbl_dma & 0xffffffff);
pp->cmd_slot[tag].tbl_addr_hi = cpu_to_le32((cmd_tbl_dma >> 16) >> 16);
}
static int ahci_kick_engine(struct ata_port *ap, int force_restart)
{
void __iomem *port_mmio = ahci_port_base(ap);
struct ahci_host_priv *hpriv = ap->host->private_data;
u8 status = readl(port_mmio + PORT_TFDATA) & 0xFF;
u32 tmp;
int busy, rc;
/* do we need to kick the port? */
busy = status & (ATA_BUSY | ATA_DRQ);
if (!busy && !force_restart)
return 0;
/* stop engine */
rc = ahci_stop_engine(ap);
if (rc)
goto out_restart;
/* need to do CLO? */
if (!busy) {
rc = 0;
goto out_restart;
}
if (!(hpriv->cap & HOST_CAP_CLO)) {
rc = -EOPNOTSUPP;
goto out_restart;
}
/* perform CLO */
tmp = readl(port_mmio + PORT_CMD);
tmp |= PORT_CMD_CLO;
writel(tmp, port_mmio + PORT_CMD);
rc = 0;
tmp = ata_wait_register(port_mmio + PORT_CMD,
PORT_CMD_CLO, PORT_CMD_CLO, 1, 500);
if (tmp & PORT_CMD_CLO)
rc = -EIO;
/* restart engine */
out_restart:
ahci_start_engine(ap);
return rc;
}
static int ahci_exec_polled_cmd(struct ata_port *ap, int pmp,
struct ata_taskfile *tf, int is_cmd, u16 flags,
unsigned long timeout_msec)
{
const u32 cmd_fis_len = 5; /* five dwords */
struct ahci_port_priv *pp = ap->private_data;
void __iomem *port_mmio = ahci_port_base(ap);
u8 *fis = pp->cmd_tbl;
u32 tmp;
/* prep the command */
ata_tf_to_fis(tf, pmp, is_cmd, fis);
ahci_fill_cmd_slot(pp, 0, cmd_fis_len | flags | (pmp << 12));
/* issue & wait */
writel(1, port_mmio + PORT_CMD_ISSUE);
if (timeout_msec) {
tmp = ata_wait_register(port_mmio + PORT_CMD_ISSUE, 0x1, 0x1,
1, timeout_msec);
if (tmp & 0x1) {
ahci_kick_engine(ap, 1);
return -EBUSY;
}
} else
readl(port_mmio + PORT_CMD_ISSUE); /* flush */
return 0;
}
static int ahci_do_softreset(struct ata_link *link, unsigned int *class,
int pmp, unsigned long deadline,
int (*check_ready)(struct ata_link *link))
{
struct ata_port *ap = link->ap;
const char *reason = NULL;
unsigned long now, msecs;
struct ata_taskfile tf;
int rc;
DPRINTK("ENTER\n");
/* prepare for SRST (AHCI-1.1 10.4.1) */
rc = ahci_kick_engine(ap, 1);
if (rc && rc != -EOPNOTSUPP)
ata_link_printk(link, KERN_WARNING,
"failed to reset engine (errno=%d)\n", rc);
ata_tf_init(link->device, &tf);
/* issue the first D2H Register FIS */
msecs = 0;
now = jiffies;
if (time_after(now, deadline))
msecs = jiffies_to_msecs(deadline - now);
tf.ctl |= ATA_SRST;
if (ahci_exec_polled_cmd(ap, pmp, &tf, 0,
AHCI_CMD_RESET | AHCI_CMD_CLR_BUSY, msecs)) {
rc = -EIO;
reason = "1st FIS failed";
goto fail;
}
/* spec says at least 5us, but be generous and sleep for 1ms */
msleep(1);
/* issue the second D2H Register FIS */
tf.ctl &= ~ATA_SRST;
ahci_exec_polled_cmd(ap, pmp, &tf, 0, 0, 0);
libata: restructure SFF post-reset readiness waits Previously, post-softreset readiness is waited as follows. 1. ata_sff_wait_after_reset() waits for 150ms and then for ATA_TMOUT_FF_WAIT if status is 0xff and other conditions meet. 2. ata_bus_softreset() finishes with -ENODEV if status is still 0xff. If not, continue to #3. 3. ata_bus_post_reset() waits readiness of dev0 and/or dev1 depending on devmask using ata_sff_wait_ready(). And for post-hardreset readiness, 1. ata_sff_wait_after_reset() waits for 150ms and then for ATA_TMOUT_FF_WAIT if status is 0xff and other conditions meet. 2. sata_sff_hardreset waits for device readiness using ata_sff_wait_ready(). This patch merges and unifies post-reset readiness waits into ata_sff_wait_ready() and ata_sff_wait_after_reset(). ATA_TMOUT_FF_WAIT handling is merged into ata_sff_wait_ready(). If TF status is 0xff, link status is unknown and the port is SATA, it will continue polling till ATA_TMOUT_FF_WAIT. ata_sff_wait_after_reset() is updated to perform the following steps. 1. waits for 150ms. 2. waits for dev0 readiness using ata_sff_wait_ready(). Note that this is done regardless of devmask, as ata_sff_wait_ready() handles 0xff status correctly, this preserves the original behavior except that it may wait longer after softreset if link is online but status is 0xff. This behavior change is very unlikely to cause any actual difference and is intended. It brings softreset behavior to that of hardreset. 3. waits for dev1 readiness just the same way ata_bus_post_reset() did. Now both soft and hard resets call ata_sff_wait_after_reset() after reset to wait for readiness after resets. As ata_sff_wait_after_reset() contains calls to ->sff_dev_select(), explicit call near the end of sata_sff_hardreset() is removed. This change makes reset implementation simpler and more consistent. While at it, make the magical 150ms wait post-reset wait duration a constant and ata_sff_wait_ready() and ata_sff_wait_after_reset() take @link instead of @ap. This is to make them consistent with other reset helpers and ease core changes. pata_scc is updated accordingly. Signed-off-by: Tejun Heo <htejun@gmail.com>
2008-04-07 13:47:19 +00:00
/* wait for link to become ready */
rc = ata_wait_after_reset(link, deadline, check_ready);
/* link occupied, -ENODEV too is an error */
if (rc) {
reason = "device not ready";
goto fail;
}
*class = ahci_dev_classify(ap);
DPRINTK("EXIT, class=%u\n", *class);
return 0;
fail:
ata_link_printk(link, KERN_ERR, "softreset failed (%s)\n", reason);
return rc;
}
static int ahci_check_ready(struct ata_link *link)
{
void __iomem *port_mmio = ahci_port_base(link->ap);
u8 status = readl(port_mmio + PORT_TFDATA) & 0xFF;
return ata_check_ready(status);
}
static int ahci_softreset(struct ata_link *link, unsigned int *class,
unsigned long deadline)
{
int pmp = sata_srst_pmp(link);
DPRINTK("ENTER\n");
return ahci_do_softreset(link, class, pmp, deadline, ahci_check_ready);
}
static int ahci_sb600_check_ready(struct ata_link *link)
{
void __iomem *port_mmio = ahci_port_base(link->ap);
u8 status = readl(port_mmio + PORT_TFDATA) & 0xFF;
u32 irq_status = readl(port_mmio + PORT_IRQ_STAT);
/*
* There is no need to check TFDATA if BAD PMP is found due to HW bug,
* which can save timeout delay.
*/
if (irq_status & PORT_IRQ_BAD_PMP)
return -EIO;
return ata_check_ready(status);
}
static int ahci_sb600_softreset(struct ata_link *link, unsigned int *class,
unsigned long deadline)
{
struct ata_port *ap = link->ap;
void __iomem *port_mmio = ahci_port_base(ap);
int pmp = sata_srst_pmp(link);
int rc;
u32 irq_sts;
DPRINTK("ENTER\n");
rc = ahci_do_softreset(link, class, pmp, deadline,
ahci_sb600_check_ready);
/*
* Soft reset fails on some ATI chips with IPMS set when PMP
* is enabled but SATA HDD/ODD is connected to SATA port,
* do soft reset again to port 0.
*/
if (rc == -EIO) {
irq_sts = readl(port_mmio + PORT_IRQ_STAT);
if (irq_sts & PORT_IRQ_BAD_PMP) {
ata_link_printk(link, KERN_WARNING,
"failed due to HW bug, retry pmp=0\n");
rc = ahci_do_softreset(link, class, 0, deadline,
ahci_check_ready);
}
}
return rc;
}
static int ahci_hardreset(struct ata_link *link, unsigned int *class,
libata: add deadline support to prereset and reset methods Add @deadline to prereset and reset methods and make them honor it. ata_wait_ready() which directly takes @deadline is implemented to be used as the wait function. This patch is in preparation for EH timing improvements. * ata_wait_ready() never does busy sleep. It's only used from EH and no wait in EH is that urgent. This function also prints 'be patient' message automatically after 5 secs of waiting if more than 3 secs is remaining till deadline. * ata_bus_post_reset() now fails with error code if any of its wait fails. This is important because earlier reset tries will have shorter timeout than the spec requires. If a device fails to respond before the short timeout, reset should be retried with longer timeout rather than silently ignoring the device. There are three behavior differences. 1. Timeout is applied to both devices at once, not separately. This is more consistent with what the spec says. 2. When a device passes devchk but fails to become ready before deadline. Previouly, post_reset would just succeed and let device classification remove the device. New code fails the reset thus causing reset retry. After a few times, EH will give up disabling the port. 3. When slave device passes devchk but fails to become accessible (TF-wise) after reset. Original code disables dev1 after 30s timeout and continues as if the device doesn't exist, while the patched code fails reset. When this happens, new code fails reset on whole port rather than proceeding with only the primary device. If the failing device is suffering transient problems, new code retries reset which is a better behavior. If the failing device is actually broken, the net effect is identical to it, but not to the other device sharing the channel. In the previous code, reset would have succeeded after 30s thus detecting the working one. In the new code, reset fails and whole port gets disabled. IMO, it's a pathological case anyway (broken device sharing bus with working one) and doesn't really matter. * ata_bus_softreset() is changed to return error code from ata_bus_post_reset(). It used to return 0 unconditionally. * Spin up waiting is to be removed and not converted to honor deadline. * To be on the safe side, deadline is set to 40s for the time being. Signed-off-by: Tejun Heo <htejun@gmail.com> Signed-off-by: Jeff Garzik <jeff@garzik.org>
2007-02-02 07:50:52 +00:00
unsigned long deadline)
{
const unsigned long *timing = sata_ehc_deb_timing(&link->eh_context);
struct ata_port *ap = link->ap;
struct ahci_port_priv *pp = ap->private_data;
u8 *d2h_fis = pp->rx_fis + RX_FIS_D2H_REG;
struct ata_taskfile tf;
bool online;
int rc;
DPRINTK("ENTER\n");
ahci_stop_engine(ap);
/* clear D2H reception area to properly wait for D2H FIS */
ata_tf_init(link->device, &tf);
tf.command = 0x80;
ata_tf_to_fis(&tf, 0, 0, d2h_fis);
rc = sata_link_hardreset(link, timing, deadline, &online,
ahci_check_ready);
ahci_start_engine(ap);
if (online)
*class = ahci_dev_classify(ap);
DPRINTK("EXIT, rc=%d, class=%u\n", rc, *class);
return rc;
}
static int ahci_vt8251_hardreset(struct ata_link *link, unsigned int *class,
libata: add deadline support to prereset and reset methods Add @deadline to prereset and reset methods and make them honor it. ata_wait_ready() which directly takes @deadline is implemented to be used as the wait function. This patch is in preparation for EH timing improvements. * ata_wait_ready() never does busy sleep. It's only used from EH and no wait in EH is that urgent. This function also prints 'be patient' message automatically after 5 secs of waiting if more than 3 secs is remaining till deadline. * ata_bus_post_reset() now fails with error code if any of its wait fails. This is important because earlier reset tries will have shorter timeout than the spec requires. If a device fails to respond before the short timeout, reset should be retried with longer timeout rather than silently ignoring the device. There are three behavior differences. 1. Timeout is applied to both devices at once, not separately. This is more consistent with what the spec says. 2. When a device passes devchk but fails to become ready before deadline. Previouly, post_reset would just succeed and let device classification remove the device. New code fails the reset thus causing reset retry. After a few times, EH will give up disabling the port. 3. When slave device passes devchk but fails to become accessible (TF-wise) after reset. Original code disables dev1 after 30s timeout and continues as if the device doesn't exist, while the patched code fails reset. When this happens, new code fails reset on whole port rather than proceeding with only the primary device. If the failing device is suffering transient problems, new code retries reset which is a better behavior. If the failing device is actually broken, the net effect is identical to it, but not to the other device sharing the channel. In the previous code, reset would have succeeded after 30s thus detecting the working one. In the new code, reset fails and whole port gets disabled. IMO, it's a pathological case anyway (broken device sharing bus with working one) and doesn't really matter. * ata_bus_softreset() is changed to return error code from ata_bus_post_reset(). It used to return 0 unconditionally. * Spin up waiting is to be removed and not converted to honor deadline. * To be on the safe side, deadline is set to 40s for the time being. Signed-off-by: Tejun Heo <htejun@gmail.com> Signed-off-by: Jeff Garzik <jeff@garzik.org>
2007-02-02 07:50:52 +00:00
unsigned long deadline)
{
struct ata_port *ap = link->ap;
bool online;
int rc;
DPRINTK("ENTER\n");
ahci_stop_engine(ap);
rc = sata_link_hardreset(link, sata_ehc_deb_timing(&link->eh_context),
deadline, &online, NULL);
ahci_start_engine(ap);
DPRINTK("EXIT, rc=%d, class=%u\n", rc, *class);
/* vt8251 doesn't clear BSY on signature FIS reception,
* request follow-up softreset.
*/
return online ? -EAGAIN : rc;
}
static int ahci_p5wdh_hardreset(struct ata_link *link, unsigned int *class,
unsigned long deadline)
{
struct ata_port *ap = link->ap;
struct ahci_port_priv *pp = ap->private_data;
u8 *d2h_fis = pp->rx_fis + RX_FIS_D2H_REG;
struct ata_taskfile tf;
bool online;
int rc;
ahci_stop_engine(ap);
/* clear D2H reception area to properly wait for D2H FIS */
ata_tf_init(link->device, &tf);
tf.command = 0x80;
ata_tf_to_fis(&tf, 0, 0, d2h_fis);
rc = sata_link_hardreset(link, sata_ehc_deb_timing(&link->eh_context),
deadline, &online, NULL);
ahci_start_engine(ap);
/* The pseudo configuration device on SIMG4726 attached to
* ASUS P5W-DH Deluxe doesn't send signature FIS after
* hardreset if no device is attached to the first downstream
* port && the pseudo device locks up on SRST w/ PMP==0. To
* work around this, wait for !BSY only briefly. If BSY isn't
* cleared, perform CLO and proceed to IDENTIFY (achieved by
* ATA_LFLAG_NO_SRST and ATA_LFLAG_ASSUME_ATA).
*
* Wait for two seconds. Devices attached to downstream port
* which can't process the following IDENTIFY after this will
* have to be reset again. For most cases, this should
* suffice while making probing snappish enough.
*/
if (online) {
rc = ata_wait_after_reset(link, jiffies + 2 * HZ,
ahci_check_ready);
if (rc)
ahci_kick_engine(ap, 0);
}
return rc;
}
static void ahci_postreset(struct ata_link *link, unsigned int *class)
{
struct ata_port *ap = link->ap;
void __iomem *port_mmio = ahci_port_base(ap);
u32 new_tmp, tmp;
ata_std_postreset(link, class);
/* Make sure port's ATAPI bit is set appropriately */
new_tmp = tmp = readl(port_mmio + PORT_CMD);
if (*class == ATA_DEV_ATAPI)
new_tmp |= PORT_CMD_ATAPI;
else
new_tmp &= ~PORT_CMD_ATAPI;
if (new_tmp != tmp) {
writel(new_tmp, port_mmio + PORT_CMD);
readl(port_mmio + PORT_CMD); /* flush */
}
}
static unsigned int ahci_fill_sg(struct ata_queued_cmd *qc, void *cmd_tbl)
{
struct scatterlist *sg;
struct ahci_sg *ahci_sg = cmd_tbl + AHCI_CMD_TBL_HDR_SZ;
unsigned int si;
VPRINTK("ENTER\n");
/*
* Next, the S/G list.
*/
for_each_sg(qc->sg, sg, qc->n_elem, si) {
dma_addr_t addr = sg_dma_address(sg);
u32 sg_len = sg_dma_len(sg);
ahci_sg[si].addr = cpu_to_le32(addr & 0xffffffff);
ahci_sg[si].addr_hi = cpu_to_le32((addr >> 16) >> 16);
ahci_sg[si].flags_size = cpu_to_le32(sg_len - 1);
}
return si;
}
static void ahci_qc_prep(struct ata_queued_cmd *qc)
{
struct ata_port *ap = qc->ap;
struct ahci_port_priv *pp = ap->private_data;
int is_atapi = ata_is_atapi(qc->tf.protocol);
void *cmd_tbl;
u32 opts;
const u32 cmd_fis_len = 5; /* five dwords */
unsigned int n_elem;
/*
* Fill in command table information. First, the header,
* a SATA Register - Host to Device command FIS.
*/
cmd_tbl = pp->cmd_tbl + qc->tag * AHCI_CMD_TBL_SZ;
ata_tf_to_fis(&qc->tf, qc->dev->link->pmp, 1, cmd_tbl);
if (is_atapi) {
memset(cmd_tbl + AHCI_CMD_TBL_CDB, 0, 32);
memcpy(cmd_tbl + AHCI_CMD_TBL_CDB, qc->cdb, qc->dev->cdb_len);
}
n_elem = 0;
if (qc->flags & ATA_QCFLAG_DMAMAP)
n_elem = ahci_fill_sg(qc, cmd_tbl);
/*
* Fill in command slot information.
*/
opts = cmd_fis_len | n_elem << 16 | (qc->dev->link->pmp << 12);
if (qc->tf.flags & ATA_TFLAG_WRITE)
opts |= AHCI_CMD_WRITE;
if (is_atapi)
opts |= AHCI_CMD_ATAPI | AHCI_CMD_PREFETCH;
ahci_fill_cmd_slot(pp, qc->tag, opts);
}
static void ahci_error_intr(struct ata_port *ap, u32 irq_stat)
{
struct ahci_host_priv *hpriv = ap->host->private_data;
struct ahci_port_priv *pp = ap->private_data;
struct ata_eh_info *host_ehi = &ap->link.eh_info;
struct ata_link *link = NULL;
struct ata_queued_cmd *active_qc;
struct ata_eh_info *active_ehi;
u32 serror;
/* determine active link */
ata_for_each_link(link, ap, EDGE)
if (ata_link_active(link))
break;
if (!link)
link = &ap->link;
active_qc = ata_qc_from_tag(ap, link->active_tag);
active_ehi = &link->eh_info;
/* record irq stat */
ata_ehi_clear_desc(host_ehi);
ata_ehi_push_desc(host_ehi, "irq_stat 0x%08x", irq_stat);
/* AHCI needs SError cleared; otherwise, it might lock up */
ahci_scr_read(&ap->link, SCR_ERROR, &serror);
ahci_scr_write(&ap->link, SCR_ERROR, serror);
host_ehi->serror |= serror;
/* some controllers set IRQ_IF_ERR on device errors, ignore it */
if (hpriv->flags & AHCI_HFLAG_IGN_IRQ_IF_ERR)
irq_stat &= ~PORT_IRQ_IF_ERR;
if (irq_stat & PORT_IRQ_TF_ERR) {
/* If qc is active, charge it; otherwise, the active
* link. There's no active qc on NCQ errors. It will
* be determined by EH by reading log page 10h.
*/
if (active_qc)
active_qc->err_mask |= AC_ERR_DEV;
else
active_ehi->err_mask |= AC_ERR_DEV;
if (hpriv->flags & AHCI_HFLAG_IGN_SERR_INTERNAL)
host_ehi->serror &= ~SERR_INTERNAL;
}
if (irq_stat & PORT_IRQ_UNK_FIS) {
u32 *unk = (u32 *)(pp->rx_fis + RX_FIS_UNK);
active_ehi->err_mask |= AC_ERR_HSM;
libata: prefer hardreset When both soft and hard resets are available, libata preferred softreset till now. The logic behind it was to be softer to devices; however, this doesn't really help much. Rationales for the change: * BIOS may freeze lock certain things during boot and softreset can't unlock those. This by itself is okay but during operation PHY event or other error conditions can trigger hardreset and the device may end up with different configuration. For example, after a hardreset, previously unlockable HPA can be unlocked resulting in different device size and thus revalidation failure. Similar condition can occur during or after resume. * Certain ATAPI devices require hardreset to recover after certain error conditions. On PATA, this is done by issuing the DEVICE RESET command. On SATA, COMRESET has equivalent effect. The problem is that DEVICE RESET needs its own execution protocol. For SFF controllers with bare TF access, it can be easily implemented but more advanced controllers (e.g. ahci and sata_sil24) require specialized implementations. Simply using hardreset solves the problem nicely. * COMRESET initialization sequence is the norm in SATA land and many SATA devices don't work properly if only SRST is used. For example, some PMPs behave this way and libata works around by always issuing hardreset if the host supports PMP. Like the above example, libata has developed a number of mechanisms aiming to promote softreset to hardreset if softreset is not going to work. This approach is time consuming and error prone. Also, note that, dependingon how you read the specs, it could be argued that PMP fan-out ports require COMRESET to start operation. In fact, all the PMPs on the market except one don't work properly if COMRESET is not issued to fan-out ports after PMP reset. * COMRESET is an integral part of SATA connection and any working device should be able to handle COMRESET properly. After all, it's the way to signal hardreset during reboot. This is the most used and recommended (at least by the ahci spec) method of resetting devices. So, this patch makes libata prefer hardreset over softreset by making the following changes. * Rename ATA_EH_RESET_MASK to ATA_EH_RESET and use it whereever ATA_EH_{SOFT|HARD}RESET used to be used. ATA_EH_{SOFT|HARD}RESET is now only used to tell prereset whether soft or hard reset will be issued. * Strip out now unneeded promote-to-hardreset logics from ata_eh_reset(), ata_std_prereset(), sata_pmp_std_prereset() and other places. Signed-off-by: Tejun Heo <htejun@gmail.com>
2008-01-23 15:05:14 +00:00
active_ehi->action |= ATA_EH_RESET;
ata_ehi_push_desc(active_ehi,
"unknown FIS %08x %08x %08x %08x" ,
unk[0], unk[1], unk[2], unk[3]);
}
if (sata_pmp_attached(ap) && (irq_stat & PORT_IRQ_BAD_PMP)) {
active_ehi->err_mask |= AC_ERR_HSM;
libata: prefer hardreset When both soft and hard resets are available, libata preferred softreset till now. The logic behind it was to be softer to devices; however, this doesn't really help much. Rationales for the change: * BIOS may freeze lock certain things during boot and softreset can't unlock those. This by itself is okay but during operation PHY event or other error conditions can trigger hardreset and the device may end up with different configuration. For example, after a hardreset, previously unlockable HPA can be unlocked resulting in different device size and thus revalidation failure. Similar condition can occur during or after resume. * Certain ATAPI devices require hardreset to recover after certain error conditions. On PATA, this is done by issuing the DEVICE RESET command. On SATA, COMRESET has equivalent effect. The problem is that DEVICE RESET needs its own execution protocol. For SFF controllers with bare TF access, it can be easily implemented but more advanced controllers (e.g. ahci and sata_sil24) require specialized implementations. Simply using hardreset solves the problem nicely. * COMRESET initialization sequence is the norm in SATA land and many SATA devices don't work properly if only SRST is used. For example, some PMPs behave this way and libata works around by always issuing hardreset if the host supports PMP. Like the above example, libata has developed a number of mechanisms aiming to promote softreset to hardreset if softreset is not going to work. This approach is time consuming and error prone. Also, note that, dependingon how you read the specs, it could be argued that PMP fan-out ports require COMRESET to start operation. In fact, all the PMPs on the market except one don't work properly if COMRESET is not issued to fan-out ports after PMP reset. * COMRESET is an integral part of SATA connection and any working device should be able to handle COMRESET properly. After all, it's the way to signal hardreset during reboot. This is the most used and recommended (at least by the ahci spec) method of resetting devices. So, this patch makes libata prefer hardreset over softreset by making the following changes. * Rename ATA_EH_RESET_MASK to ATA_EH_RESET and use it whereever ATA_EH_{SOFT|HARD}RESET used to be used. ATA_EH_{SOFT|HARD}RESET is now only used to tell prereset whether soft or hard reset will be issued. * Strip out now unneeded promote-to-hardreset logics from ata_eh_reset(), ata_std_prereset(), sata_pmp_std_prereset() and other places. Signed-off-by: Tejun Heo <htejun@gmail.com>
2008-01-23 15:05:14 +00:00
active_ehi->action |= ATA_EH_RESET;
ata_ehi_push_desc(active_ehi, "incorrect PMP");
}
if (irq_stat & (PORT_IRQ_HBUS_ERR | PORT_IRQ_HBUS_DATA_ERR)) {
host_ehi->err_mask |= AC_ERR_HOST_BUS;
libata: prefer hardreset When both soft and hard resets are available, libata preferred softreset till now. The logic behind it was to be softer to devices; however, this doesn't really help much. Rationales for the change: * BIOS may freeze lock certain things during boot and softreset can't unlock those. This by itself is okay but during operation PHY event or other error conditions can trigger hardreset and the device may end up with different configuration. For example, after a hardreset, previously unlockable HPA can be unlocked resulting in different device size and thus revalidation failure. Similar condition can occur during or after resume. * Certain ATAPI devices require hardreset to recover after certain error conditions. On PATA, this is done by issuing the DEVICE RESET command. On SATA, COMRESET has equivalent effect. The problem is that DEVICE RESET needs its own execution protocol. For SFF controllers with bare TF access, it can be easily implemented but more advanced controllers (e.g. ahci and sata_sil24) require specialized implementations. Simply using hardreset solves the problem nicely. * COMRESET initialization sequence is the norm in SATA land and many SATA devices don't work properly if only SRST is used. For example, some PMPs behave this way and libata works around by always issuing hardreset if the host supports PMP. Like the above example, libata has developed a number of mechanisms aiming to promote softreset to hardreset if softreset is not going to work. This approach is time consuming and error prone. Also, note that, dependingon how you read the specs, it could be argued that PMP fan-out ports require COMRESET to start operation. In fact, all the PMPs on the market except one don't work properly if COMRESET is not issued to fan-out ports after PMP reset. * COMRESET is an integral part of SATA connection and any working device should be able to handle COMRESET properly. After all, it's the way to signal hardreset during reboot. This is the most used and recommended (at least by the ahci spec) method of resetting devices. So, this patch makes libata prefer hardreset over softreset by making the following changes. * Rename ATA_EH_RESET_MASK to ATA_EH_RESET and use it whereever ATA_EH_{SOFT|HARD}RESET used to be used. ATA_EH_{SOFT|HARD}RESET is now only used to tell prereset whether soft or hard reset will be issued. * Strip out now unneeded promote-to-hardreset logics from ata_eh_reset(), ata_std_prereset(), sata_pmp_std_prereset() and other places. Signed-off-by: Tejun Heo <htejun@gmail.com>
2008-01-23 15:05:14 +00:00
host_ehi->action |= ATA_EH_RESET;
ata_ehi_push_desc(host_ehi, "host bus error");
}
if (irq_stat & PORT_IRQ_IF_ERR) {
host_ehi->err_mask |= AC_ERR_ATA_BUS;
libata: prefer hardreset When both soft and hard resets are available, libata preferred softreset till now. The logic behind it was to be softer to devices; however, this doesn't really help much. Rationales for the change: * BIOS may freeze lock certain things during boot and softreset can't unlock those. This by itself is okay but during operation PHY event or other error conditions can trigger hardreset and the device may end up with different configuration. For example, after a hardreset, previously unlockable HPA can be unlocked resulting in different device size and thus revalidation failure. Similar condition can occur during or after resume. * Certain ATAPI devices require hardreset to recover after certain error conditions. On PATA, this is done by issuing the DEVICE RESET command. On SATA, COMRESET has equivalent effect. The problem is that DEVICE RESET needs its own execution protocol. For SFF controllers with bare TF access, it can be easily implemented but more advanced controllers (e.g. ahci and sata_sil24) require specialized implementations. Simply using hardreset solves the problem nicely. * COMRESET initialization sequence is the norm in SATA land and many SATA devices don't work properly if only SRST is used. For example, some PMPs behave this way and libata works around by always issuing hardreset if the host supports PMP. Like the above example, libata has developed a number of mechanisms aiming to promote softreset to hardreset if softreset is not going to work. This approach is time consuming and error prone. Also, note that, dependingon how you read the specs, it could be argued that PMP fan-out ports require COMRESET to start operation. In fact, all the PMPs on the market except one don't work properly if COMRESET is not issued to fan-out ports after PMP reset. * COMRESET is an integral part of SATA connection and any working device should be able to handle COMRESET properly. After all, it's the way to signal hardreset during reboot. This is the most used and recommended (at least by the ahci spec) method of resetting devices. So, this patch makes libata prefer hardreset over softreset by making the following changes. * Rename ATA_EH_RESET_MASK to ATA_EH_RESET and use it whereever ATA_EH_{SOFT|HARD}RESET used to be used. ATA_EH_{SOFT|HARD}RESET is now only used to tell prereset whether soft or hard reset will be issued. * Strip out now unneeded promote-to-hardreset logics from ata_eh_reset(), ata_std_prereset(), sata_pmp_std_prereset() and other places. Signed-off-by: Tejun Heo <htejun@gmail.com>
2008-01-23 15:05:14 +00:00
host_ehi->action |= ATA_EH_RESET;
ata_ehi_push_desc(host_ehi, "interface fatal error");
}
if (irq_stat & (PORT_IRQ_CONNECT | PORT_IRQ_PHYRDY)) {
ata_ehi_hotplugged(host_ehi);
ata_ehi_push_desc(host_ehi, "%s",
irq_stat & PORT_IRQ_CONNECT ?
"connection status changed" : "PHY RDY changed");
}
/* okay, let's hand over to EH */
if (irq_stat & PORT_IRQ_FREEZE)
ata_port_freeze(ap);
else
ata_port_abort(ap);
}
static void ahci_port_intr(struct ata_port *ap)
{
void __iomem *port_mmio = ahci_port_base(ap);
struct ata_eh_info *ehi = &ap->link.eh_info;
struct ahci_port_priv *pp = ap->private_data;
struct ahci_host_priv *hpriv = ap->host->private_data;
int resetting = !!(ap->pflags & ATA_PFLAG_RESETTING);
u32 status, qc_active;
libata: kill spurious NCQ completion detection Spurious NCQ completion detection implemented in ahci was incorrect. On AHCI receving and processing FISes and raising interrupts are not interlocked and spurious interrupts are expected. For example, if an interrupt occurs while interrupt handler is running and the running interrupt handler handles the event the new IRQ indicated, after IRQ handler finishes, it will be executed again because IRQ pending bit is set by the new interrupt but there won't be anything to process. Please read the following message for more information. http://article.gmane.org/gmane.linux.ide/26012 This patch... * Removes all spurious IRQ whining from ahci. Spurious NCQ completion detection was completely wrong. Spurious D2H Register FIS taught us that some early drives send spurious D2H Register FIS with I bit set while NCQ commands are in progress but none of recent drives does that and even the ones which show such behavior can do NCQ fine. * Kills all NCQ blacklist entries which were added because of spurious NCQ completions. I tracked down each commit and verified all removed ones are actually added because of spurious completions. WD740ADFD-00NLR1 wasn't deleted but moved upward because the drive not only had spurious NCQ completions but also is slow on sequential data transfers if NCQ is enabled. Maxtor 7V300F0 was added by 0e3dbc01d53940fe10e5a5cfec15ede3e929c918 from Alan Cox. I can only find evidences that the drive only had troubles with spuruious completions by searching the mailing list. This entry needs to be verified and removed if it doesn't have other NCQ related problems. Signed-off-by: Tejun Heo <htejun@gmail.com> Cc: Alan Cox <alan@lxorguk.ukuu.org.uk> Signed-off-by: Jeff Garzik <jeff@garzik.org>
2007-12-07 03:46:23 +00:00
int rc;
status = readl(port_mmio + PORT_IRQ_STAT);
writel(status, port_mmio + PORT_IRQ_STAT);
/* ignore BAD_PMP while resetting */
if (unlikely(resetting))
status &= ~PORT_IRQ_BAD_PMP;
/* If we are getting PhyRdy, this is
* just a power state change, we should
* clear out this, plus the PhyRdy/Comm
* Wake bits from Serror
*/
if ((hpriv->flags & AHCI_HFLAG_NO_HOTPLUG) &&
(status & PORT_IRQ_PHYRDY)) {
status &= ~PORT_IRQ_PHYRDY;
ahci_scr_write(&ap->link, SCR_ERROR, ((1 << 16) | (1 << 18)));
}
if (unlikely(status & PORT_IRQ_ERROR)) {
ahci_error_intr(ap, status);
return;
}
if (status & PORT_IRQ_SDB_FIS) {
/* If SNotification is available, leave notification
* handling to sata_async_notification(). If not,
* emulate it by snooping SDB FIS RX area.
*
* Snooping FIS RX area is probably cheaper than
* poking SNotification but some constrollers which
* implement SNotification, ICH9 for example, don't
* store AN SDB FIS into receive area.
*/
if (hpriv->cap & HOST_CAP_SNTF)
sata_async_notification(ap);
else {
/* If the 'N' bit in word 0 of the FIS is set,
* we just received asynchronous notification.
* Tell libata about it.
*/
const __le32 *f = pp->rx_fis + RX_FIS_SDB;
u32 f0 = le32_to_cpu(f[0]);
if (f0 & (1 << 15))
sata_async_notification(ap);
}
}
/* pp->active_link is valid iff any command is in flight */
if (ap->qc_active && pp->active_link->sactive)
qc_active = readl(port_mmio + PORT_SCR_ACT);
else
qc_active = readl(port_mmio + PORT_CMD_ISSUE);
rc = ata_qc_complete_multiple(ap, qc_active);
libata: kill spurious NCQ completion detection Spurious NCQ completion detection implemented in ahci was incorrect. On AHCI receving and processing FISes and raising interrupts are not interlocked and spurious interrupts are expected. For example, if an interrupt occurs while interrupt handler is running and the running interrupt handler handles the event the new IRQ indicated, after IRQ handler finishes, it will be executed again because IRQ pending bit is set by the new interrupt but there won't be anything to process. Please read the following message for more information. http://article.gmane.org/gmane.linux.ide/26012 This patch... * Removes all spurious IRQ whining from ahci. Spurious NCQ completion detection was completely wrong. Spurious D2H Register FIS taught us that some early drives send spurious D2H Register FIS with I bit set while NCQ commands are in progress but none of recent drives does that and even the ones which show such behavior can do NCQ fine. * Kills all NCQ blacklist entries which were added because of spurious NCQ completions. I tracked down each commit and verified all removed ones are actually added because of spurious completions. WD740ADFD-00NLR1 wasn't deleted but moved upward because the drive not only had spurious NCQ completions but also is slow on sequential data transfers if NCQ is enabled. Maxtor 7V300F0 was added by 0e3dbc01d53940fe10e5a5cfec15ede3e929c918 from Alan Cox. I can only find evidences that the drive only had troubles with spuruious completions by searching the mailing list. This entry needs to be verified and removed if it doesn't have other NCQ related problems. Signed-off-by: Tejun Heo <htejun@gmail.com> Cc: Alan Cox <alan@lxorguk.ukuu.org.uk> Signed-off-by: Jeff Garzik <jeff@garzik.org>
2007-12-07 03:46:23 +00:00
/* while resetting, invalid completions are expected */
if (unlikely(rc < 0 && !resetting)) {
ehi->err_mask |= AC_ERR_HSM;
libata: prefer hardreset When both soft and hard resets are available, libata preferred softreset till now. The logic behind it was to be softer to devices; however, this doesn't really help much. Rationales for the change: * BIOS may freeze lock certain things during boot and softreset can't unlock those. This by itself is okay but during operation PHY event or other error conditions can trigger hardreset and the device may end up with different configuration. For example, after a hardreset, previously unlockable HPA can be unlocked resulting in different device size and thus revalidation failure. Similar condition can occur during or after resume. * Certain ATAPI devices require hardreset to recover after certain error conditions. On PATA, this is done by issuing the DEVICE RESET command. On SATA, COMRESET has equivalent effect. The problem is that DEVICE RESET needs its own execution protocol. For SFF controllers with bare TF access, it can be easily implemented but more advanced controllers (e.g. ahci and sata_sil24) require specialized implementations. Simply using hardreset solves the problem nicely. * COMRESET initialization sequence is the norm in SATA land and many SATA devices don't work properly if only SRST is used. For example, some PMPs behave this way and libata works around by always issuing hardreset if the host supports PMP. Like the above example, libata has developed a number of mechanisms aiming to promote softreset to hardreset if softreset is not going to work. This approach is time consuming and error prone. Also, note that, dependingon how you read the specs, it could be argued that PMP fan-out ports require COMRESET to start operation. In fact, all the PMPs on the market except one don't work properly if COMRESET is not issued to fan-out ports after PMP reset. * COMRESET is an integral part of SATA connection and any working device should be able to handle COMRESET properly. After all, it's the way to signal hardreset during reboot. This is the most used and recommended (at least by the ahci spec) method of resetting devices. So, this patch makes libata prefer hardreset over softreset by making the following changes. * Rename ATA_EH_RESET_MASK to ATA_EH_RESET and use it whereever ATA_EH_{SOFT|HARD}RESET used to be used. ATA_EH_{SOFT|HARD}RESET is now only used to tell prereset whether soft or hard reset will be issued. * Strip out now unneeded promote-to-hardreset logics from ata_eh_reset(), ata_std_prereset(), sata_pmp_std_prereset() and other places. Signed-off-by: Tejun Heo <htejun@gmail.com>
2008-01-23 15:05:14 +00:00
ehi->action |= ATA_EH_RESET;
ata_port_freeze(ap);
}
}
IRQ: Maintain regs pointer globally rather than passing to IRQ handlers Maintain a per-CPU global "struct pt_regs *" variable which can be used instead of passing regs around manually through all ~1800 interrupt handlers in the Linux kernel. The regs pointer is used in few places, but it potentially costs both stack space and code to pass it around. On the FRV arch, removing the regs parameter from all the genirq function results in a 20% speed up of the IRQ exit path (ie: from leaving timer_interrupt() to leaving do_IRQ()). Where appropriate, an arch may override the generic storage facility and do something different with the variable. On FRV, for instance, the address is maintained in GR28 at all times inside the kernel as part of general exception handling. Having looked over the code, it appears that the parameter may be handed down through up to twenty or so layers of functions. Consider a USB character device attached to a USB hub, attached to a USB controller that posts its interrupts through a cascaded auxiliary interrupt controller. A character device driver may want to pass regs to the sysrq handler through the input layer which adds another few layers of parameter passing. I've build this code with allyesconfig for x86_64 and i386. I've runtested the main part of the code on FRV and i386, though I can't test most of the drivers. I've also done partial conversion for powerpc and MIPS - these at least compile with minimal configurations. This will affect all archs. Mostly the changes should be relatively easy. Take do_IRQ(), store the regs pointer at the beginning, saving the old one: struct pt_regs *old_regs = set_irq_regs(regs); And put the old one back at the end: set_irq_regs(old_regs); Don't pass regs through to generic_handle_irq() or __do_IRQ(). In timer_interrupt(), this sort of change will be necessary: - update_process_times(user_mode(regs)); - profile_tick(CPU_PROFILING, regs); + update_process_times(user_mode(get_irq_regs())); + profile_tick(CPU_PROFILING); I'd like to move update_process_times()'s use of get_irq_regs() into itself, except that i386, alone of the archs, uses something other than user_mode(). Some notes on the interrupt handling in the drivers: (*) input_dev() is now gone entirely. The regs pointer is no longer stored in the input_dev struct. (*) finish_unlinks() in drivers/usb/host/ohci-q.c needs checking. It does something different depending on whether it's been supplied with a regs pointer or not. (*) Various IRQ handler function pointers have been moved to type irq_handler_t. Signed-Off-By: David Howells <dhowells@redhat.com> (cherry picked from 1b16e7ac850969f38b375e511e3fa2f474a33867 commit)
2006-10-05 13:55:46 +00:00
static irqreturn_t ahci_interrupt(int irq, void *dev_instance)
{
struct ata_host *host = dev_instance;
struct ahci_host_priv *hpriv;
unsigned int i, handled = 0;
void __iomem *mmio;
u32 irq_stat, irq_masked;
VPRINTK("ENTER\n");
hpriv = host->private_data;
mmio = host->iomap[AHCI_PCI_BAR];
/* sigh. 0xffffffff is a valid return from h/w */
irq_stat = readl(mmio + HOST_IRQ_STAT);
if (!irq_stat)
return IRQ_NONE;
irq_masked = irq_stat & hpriv->port_map;
spin_lock(&host->lock);
for (i = 0; i < host->n_ports; i++) {
struct ata_port *ap;
if (!(irq_masked & (1 << i)))
continue;
ap = host->ports[i];
if (ap) {
ahci_port_intr(ap);
VPRINTK("port %u\n", i);
} else {
VPRINTK("port %u (no irq)\n", i);
if (ata_ratelimit())
dev_printk(KERN_WARNING, host->dev,
"interrupt on disabled port %u\n", i);
}
handled = 1;
}
/* HOST_IRQ_STAT behaves as level triggered latch meaning that
* it should be cleared after all the port events are cleared;
* otherwise, it will raise a spurious interrupt after each
* valid one. Please read section 10.6.2 of ahci 1.1 for more
* information.
*
* Also, use the unmasked value to clear interrupt as spurious
* pending event on a dummy port might cause screaming IRQ.
*/
writel(irq_stat, mmio + HOST_IRQ_STAT);
spin_unlock(&host->lock);
VPRINTK("EXIT\n");
return IRQ_RETVAL(handled);
}
static unsigned int ahci_qc_issue(struct ata_queued_cmd *qc)
{
struct ata_port *ap = qc->ap;
void __iomem *port_mmio = ahci_port_base(ap);
struct ahci_port_priv *pp = ap->private_data;
/* Keep track of the currently active link. It will be used
* in completion path to determine whether NCQ phase is in
* progress.
*/
pp->active_link = qc->dev->link;
if (qc->tf.protocol == ATA_PROT_NCQ)
writel(1 << qc->tag, port_mmio + PORT_SCR_ACT);
writel(1 << qc->tag, port_mmio + PORT_CMD_ISSUE);
ahci_sw_activity(qc->dev->link);
return 0;
}
static bool ahci_qc_fill_rtf(struct ata_queued_cmd *qc)
{
struct ahci_port_priv *pp = qc->ap->private_data;
u8 *d2h_fis = pp->rx_fis + RX_FIS_D2H_REG;
ata_tf_from_fis(d2h_fis, &qc->result_tf);
return true;
}
static void ahci_freeze(struct ata_port *ap)
{
void __iomem *port_mmio = ahci_port_base(ap);
/* turn IRQ off */
writel(0, port_mmio + PORT_IRQ_MASK);
}
static void ahci_thaw(struct ata_port *ap)
{
void __iomem *mmio = ap->host->iomap[AHCI_PCI_BAR];
void __iomem *port_mmio = ahci_port_base(ap);
u32 tmp;
struct ahci_port_priv *pp = ap->private_data;
/* clear IRQ */
tmp = readl(port_mmio + PORT_IRQ_STAT);
writel(tmp, port_mmio + PORT_IRQ_STAT);
writel(1 << ap->port_no, mmio + HOST_IRQ_STAT);
/* turn IRQ back on */
writel(pp->intr_mask, port_mmio + PORT_IRQ_MASK);
}
static void ahci_error_handler(struct ata_port *ap)
{
if (!(ap->pflags & ATA_PFLAG_FROZEN)) {
/* restart engine */
ahci_stop_engine(ap);
ahci_start_engine(ap);
}
libata: make reset related methods proper port operations Currently reset methods are not specified directly in the ata_port_operations table. If a LLD wants to use custom reset methods, it should construct and use a error_handler which uses those reset methods. It's done this way for two reasons. First, the ops table already contained too many methods and adding four more of them would noticeably increase the amount of necessary boilerplate code all over low level drivers. Second, as ->error_handler uses those reset methods, it can get confusing. ie. By overriding ->error_handler, those reset ops can be made useless making layering a bit hazy. Now that ops table uses inheritance, the first problem doesn't exist anymore. The second isn't completely solved but is relieved by providing default values - most drivers can just override what it has implemented and don't have to concern itself about higher level callbacks. In fact, there currently is no driver which actually modifies error handling behavior. Drivers which override ->error_handler just wraps the standard error handler only to prepare the controller for EH. I don't think making ops layering strict has any noticeable benefit. This patch makes ->prereset, ->softreset, ->hardreset, ->postreset and their PMP counterparts propoer ops. Default ops are provided in the base ops tables and drivers are converted to override individual reset methods instead of creating custom error_handler. * ata_std_error_handler() doesn't use sata_std_hardreset() if SCRs aren't accessible. sata_promise doesn't need to use separate error_handlers for PATA and SATA anymore. * softreset is broken for sata_inic162x and sata_sx4. As libata now always prefers hardreset, this doesn't really matter but the ops are forced to NULL using ATA_OP_NULL for documentation purpose. * pata_hpt374 needs to use different prereset for the first and second PCI functions. This used to be done by branching from hpt374_error_handler(). The proper way to do this is to use separate ops and port_info tables for each function. Converted. Signed-off-by: Tejun Heo <htejun@gmail.com>
2008-03-25 03:22:50 +00:00
sata_pmp_error_handler(ap);
}
static void ahci_post_internal_cmd(struct ata_queued_cmd *qc)
{
struct ata_port *ap = qc->ap;
/* make DMA engine forget about the failed command */
if (qc->flags & ATA_QCFLAG_FAILED)
ahci_kick_engine(ap, 1);
}
static void ahci_pmp_attach(struct ata_port *ap)
{
void __iomem *port_mmio = ahci_port_base(ap);
struct ahci_port_priv *pp = ap->private_data;
u32 cmd;
cmd = readl(port_mmio + PORT_CMD);
cmd |= PORT_CMD_PMP;
writel(cmd, port_mmio + PORT_CMD);
pp->intr_mask |= PORT_IRQ_BAD_PMP;
writel(pp->intr_mask, port_mmio + PORT_IRQ_MASK);
}
static void ahci_pmp_detach(struct ata_port *ap)
{
void __iomem *port_mmio = ahci_port_base(ap);
struct ahci_port_priv *pp = ap->private_data;
u32 cmd;
cmd = readl(port_mmio + PORT_CMD);
cmd &= ~PORT_CMD_PMP;
writel(cmd, port_mmio + PORT_CMD);
pp->intr_mask &= ~PORT_IRQ_BAD_PMP;
writel(pp->intr_mask, port_mmio + PORT_IRQ_MASK);
}
static int ahci_port_resume(struct ata_port *ap)
{
ahci_power_up(ap);
ahci_start_port(ap);
if (sata_pmp_attached(ap))
ahci_pmp_attach(ap);
else
ahci_pmp_detach(ap);
return 0;
}
#ifdef CONFIG_PM
static int ahci_port_suspend(struct ata_port *ap, pm_message_t mesg)
{
const char *emsg = NULL;
int rc;
rc = ahci_deinit_port(ap, &emsg);
[PATCH] ahci: do not powerdown during initialization ahci_init_controller() calls ahci_deinit_port() to make sure the controller is stopped before initializing the controller. In turn, ahci_deinit_port() invokes ahci_power_down() to power down the port. If the controller supports slumber mode, the link is put into it. Unfortunately, some devices don't implement link powersaving mode properly and show erratic behavior after link is put into slumber mode. For example, HL-DT-ST DVD-RAM GSA-H30N completely locks up on slumber transition and can only be recovered with the *REAL* hard reset - power removal and reapply. Note that this makes the first probing reset different from all others. If the above dvd-ram is hotplugged after ahci is initialized, no problem occurs because ahci is already fully initialized with phy powered up. So, this might also be the reason for other weird AHCI initial probing abnormalities. This patch moves power up/down out of port init/deinit and call them only when needed. Power down is now called only when suspending. As system suspend usually involves powering down 12v for storage devices, this shouldn't cause problem even if the attached device doesn't support slumber mode. However, in partial power management and suspend failure cases, devices might lock up after suspend attempt. I thought about removing transition to slumber mode altogether but ahci spec mandates it before HBA D3 state transition. Blacklisting such devices might be the solution. Signed-off-by: Tejun Heo <htejun@gmail.com>
2006-11-20 06:42:36 +00:00
if (rc == 0)
ahci_power_down(ap);
[PATCH] ahci: do not powerdown during initialization ahci_init_controller() calls ahci_deinit_port() to make sure the controller is stopped before initializing the controller. In turn, ahci_deinit_port() invokes ahci_power_down() to power down the port. If the controller supports slumber mode, the link is put into it. Unfortunately, some devices don't implement link powersaving mode properly and show erratic behavior after link is put into slumber mode. For example, HL-DT-ST DVD-RAM GSA-H30N completely locks up on slumber transition and can only be recovered with the *REAL* hard reset - power removal and reapply. Note that this makes the first probing reset different from all others. If the above dvd-ram is hotplugged after ahci is initialized, no problem occurs because ahci is already fully initialized with phy powered up. So, this might also be the reason for other weird AHCI initial probing abnormalities. This patch moves power up/down out of port init/deinit and call them only when needed. Power down is now called only when suspending. As system suspend usually involves powering down 12v for storage devices, this shouldn't cause problem even if the attached device doesn't support slumber mode. However, in partial power management and suspend failure cases, devices might lock up after suspend attempt. I thought about removing transition to slumber mode altogether but ahci spec mandates it before HBA D3 state transition. Blacklisting such devices might be the solution. Signed-off-by: Tejun Heo <htejun@gmail.com>
2006-11-20 06:42:36 +00:00
else {
ata_port_printk(ap, KERN_ERR, "%s (%d)\n", emsg, rc);
ahci_start_port(ap);
}
return rc;
}
static int ahci_pci_device_suspend(struct pci_dev *pdev, pm_message_t mesg)
{
struct ata_host *host = dev_get_drvdata(&pdev->dev);
void __iomem *mmio = host->iomap[AHCI_PCI_BAR];
u32 ctl;
if (mesg.event & PM_EVENT_SLEEP) {
/* AHCI spec rev1.1 section 8.3.3:
* Software must disable interrupts prior to requesting a
* transition of the HBA to D3 state.
*/
ctl = readl(mmio + HOST_CTL);
ctl &= ~HOST_IRQ_EN;
writel(ctl, mmio + HOST_CTL);
readl(mmio + HOST_CTL); /* flush */
}
return ata_pci_device_suspend(pdev, mesg);
}
static int ahci_pci_device_resume(struct pci_dev *pdev)
{
struct ata_host *host = dev_get_drvdata(&pdev->dev);
int rc;
rc = ata_pci_device_do_resume(pdev);
if (rc)
return rc;
if (pdev->dev.power.power_state.event == PM_EVENT_SUSPEND) {
rc = ahci_reset_controller(host);
if (rc)
return rc;
ahci_init_controller(host);
}
ata_host_resume(host);
return 0;
}
#endif
static int ahci_port_start(struct ata_port *ap)
{
struct device *dev = ap->host->dev;
struct ahci_port_priv *pp;
void *mem;
dma_addr_t mem_dma;
pp = devm_kzalloc(dev, sizeof(*pp), GFP_KERNEL);
if (!pp)
return -ENOMEM;
mem = dmam_alloc_coherent(dev, AHCI_PORT_PRIV_DMA_SZ, &mem_dma,
GFP_KERNEL);
if (!mem)
return -ENOMEM;
memset(mem, 0, AHCI_PORT_PRIV_DMA_SZ);
/*
* First item in chunk of DMA memory: 32-slot command table,
* 32 bytes each in size
*/
pp->cmd_slot = mem;
pp->cmd_slot_dma = mem_dma;
mem += AHCI_CMD_SLOT_SZ;
mem_dma += AHCI_CMD_SLOT_SZ;
/*
* Second item: Received-FIS area
*/
pp->rx_fis = mem;
pp->rx_fis_dma = mem_dma;
mem += AHCI_RX_FIS_SZ;
mem_dma += AHCI_RX_FIS_SZ;
/*
* Third item: data area for storing a single command
* and its scatter-gather table
*/
pp->cmd_tbl = mem;
pp->cmd_tbl_dma = mem_dma;
/*
* Save off initial list of interrupts to be enabled.
* This could be changed later
*/
pp->intr_mask = DEF_PORT_IRQ;
ap->private_data = pp;
/* engage engines, captain */
return ahci_port_resume(ap);
}
static void ahci_port_stop(struct ata_port *ap)
{
const char *emsg = NULL;
int rc;
/* de-initialize port */
rc = ahci_deinit_port(ap, &emsg);
if (rc)
ata_port_printk(ap, KERN_WARNING, "%s (%d)\n", emsg, rc);
}
static int ahci_configure_dma_masks(struct pci_dev *pdev, int using_dac)
{
int rc;
if (using_dac &&
!pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) {
rc = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
if (rc) {
rc = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
if (rc) {
dev_printk(KERN_ERR, &pdev->dev,
"64-bit DMA enable failed\n");
return rc;
}
}
} else {
rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
if (rc) {
dev_printk(KERN_ERR, &pdev->dev,
"32-bit DMA enable failed\n");
return rc;
}
rc = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
if (rc) {
dev_printk(KERN_ERR, &pdev->dev,
"32-bit consistent DMA enable failed\n");
return rc;
}
}
return 0;
}
static void ahci_print_info(struct ata_host *host)
{
struct ahci_host_priv *hpriv = host->private_data;
struct pci_dev *pdev = to_pci_dev(host->dev);
void __iomem *mmio = host->iomap[AHCI_PCI_BAR];
u32 vers, cap, impl, speed;
const char *speed_s;
u16 cc;
const char *scc_s;
vers = readl(mmio + HOST_VERSION);
cap = hpriv->cap;
impl = hpriv->port_map;
speed = (cap >> 20) & 0xf;
if (speed == 1)
speed_s = "1.5";
else if (speed == 2)
speed_s = "3";
else if (speed == 3)
speed_s = "6";
else
speed_s = "?";
pci_read_config_word(pdev, 0x0a, &cc);
if (cc == PCI_CLASS_STORAGE_IDE)
scc_s = "IDE";
else if (cc == PCI_CLASS_STORAGE_SATA)
scc_s = "SATA";
else if (cc == PCI_CLASS_STORAGE_RAID)
scc_s = "RAID";
else
scc_s = "unknown";
dev_printk(KERN_INFO, &pdev->dev,
"AHCI %02x%02x.%02x%02x "
"%u slots %u ports %s Gbps 0x%x impl %s mode\n"
,
(vers >> 24) & 0xff,
(vers >> 16) & 0xff,
(vers >> 8) & 0xff,
vers & 0xff,
((cap >> 8) & 0x1f) + 1,
(cap & 0x1f) + 1,
speed_s,
impl,
scc_s);
dev_printk(KERN_INFO, &pdev->dev,
"flags: "
"%s%s%s%s%s%s%s"
"%s%s%s%s%s%s%s"
"%s\n"
,
cap & (1 << 31) ? "64bit " : "",
cap & (1 << 30) ? "ncq " : "",
cap & (1 << 29) ? "sntf " : "",
cap & (1 << 28) ? "ilck " : "",
cap & (1 << 27) ? "stag " : "",
cap & (1 << 26) ? "pm " : "",
cap & (1 << 25) ? "led " : "",
cap & (1 << 24) ? "clo " : "",
cap & (1 << 19) ? "nz " : "",
cap & (1 << 18) ? "only " : "",
cap & (1 << 17) ? "pmp " : "",
cap & (1 << 15) ? "pio " : "",
cap & (1 << 14) ? "slum " : "",
cap & (1 << 13) ? "part " : "",
cap & (1 << 6) ? "ems ": ""
);
}
/* On ASUS P5W DH Deluxe, the second port of PCI device 00:1f.2 is
* hardwired to on-board SIMG 4726. The chipset is ICH8 and doesn't
* support PMP and the 4726 either directly exports the device
* attached to the first downstream port or acts as a hardware storage
* controller and emulate a single ATA device (can be RAID 0/1 or some
* other configuration).
*
* When there's no device attached to the first downstream port of the
* 4726, "Config Disk" appears, which is a pseudo ATA device to
* configure the 4726. However, ATA emulation of the device is very
* lame. It doesn't send signature D2H Reg FIS after the initial
* hardreset, pukes on SRST w/ PMP==0 and has bunch of other issues.
*
* The following function works around the problem by always using
* hardreset on the port and not depending on receiving signature FIS
* afterward. If signature FIS isn't received soon, ATA class is
* assumed without follow-up softreset.
*/
static void ahci_p5wdh_workaround(struct ata_host *host)
{
static struct dmi_system_id sysids[] = {
{
.ident = "P5W DH Deluxe",
.matches = {
DMI_MATCH(DMI_SYS_VENDOR,
"ASUSTEK COMPUTER INC"),
DMI_MATCH(DMI_PRODUCT_NAME, "P5W DH Deluxe"),
},
},
{ }
};
struct pci_dev *pdev = to_pci_dev(host->dev);
if (pdev->bus->number == 0 && pdev->devfn == PCI_DEVFN(0x1f, 2) &&
dmi_check_system(sysids)) {
struct ata_port *ap = host->ports[1];
dev_printk(KERN_INFO, &pdev->dev, "enabling ASUS P5W DH "
"Deluxe on-board SIMG4726 workaround\n");
ap->ops = &ahci_p5wdh_ops;
ap->link.flags |= ATA_LFLAG_NO_SRST | ATA_LFLAG_ASSUME_ATA;
}
}
static bool ahci_broken_system_poweroff(struct pci_dev *pdev)
{
static const struct dmi_system_id broken_systems[] = {
{
.ident = "HP Compaq nx6310",
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"),
DMI_MATCH(DMI_PRODUCT_NAME, "HP Compaq nx6310"),
},
/* PCI slot number of the controller */
.driver_data = (void *)0x1FUL,
},
{
.ident = "HP Compaq 6720s",
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"),
DMI_MATCH(DMI_PRODUCT_NAME, "HP Compaq 6720s"),
},
/* PCI slot number of the controller */
.driver_data = (void *)0x1FUL,
},
{ } /* terminate list */
};
const struct dmi_system_id *dmi = dmi_first_match(broken_systems);
if (dmi) {
unsigned long slot = (unsigned long)dmi->driver_data;
/* apply the quirk only to on-board controllers */
return slot == PCI_SLOT(pdev->devfn);
}
return false;
}
static int ahci_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
{
static int printed_version;
unsigned int board_id = ent->driver_data;
struct ata_port_info pi = ahci_port_info[board_id];
const struct ata_port_info *ppi[] = { &pi, NULL };
struct device *dev = &pdev->dev;
struct ahci_host_priv *hpriv;
struct ata_host *host;
int n_ports, i, rc;
VPRINTK("ENTER\n");
WARN_ON(ATA_MAX_QUEUE > AHCI_MAX_CMDS);
if (!printed_version++)
dev_printk(KERN_DEBUG, &pdev->dev, "version " DRV_VERSION "\n");
ahci, pata_marvell: play nicely together I've been chasing Jeff about this for months. Jeff added the Marvell device identifiers to the ahci driver without making the AHCI driver handle the PATA port. This means a lot of users can't use current kernels and in most distro cases can't even install. This has been going on since March 2008 for the 6121 Marvell, and late 2007 for the 6145!!! This was all pointed out at the time and repeatedly ignored. Bugs assigned to Jeff about this are ignored also. To quote Jeff in email > "Just switch the order of 'ahci' and 'pata_marvell' in > /etc/modprobe.conf, then use Fedora's tools regenerate the initrd. > See? It's not rocket science, and the current configuration can be > easily made to work for Fedora users." (Which isn't trivial, isn't end user, shouldn't be needed, and as it usually breaks at install time is in fact impossible) To quote Jeff in August 2007 > " mv-ahci-pata > Marvell 6121/6141 PATA support. Needs fixing in the 'PATA controller > command' area before it is usable, and can go upstream." Only he add the ids anyway later and caused regressions, adding a further id in March causing more regresions. The actual fix for the moment is very simple. If the user has included the pata_marvell driver let it drive the ports. If they've only selected for SATA support give them the AHCI driver which will run the port a fraction faster. Allow the user to control this decision via ahci.marvell_enable as a module parameter so that distributions can ship 'it works' defaults and smarter users (or config tools) can then flip it over it desired. Signed-off-by: Alan Cox <alan@redhat.com> Signed-off-by: Jeff Garzik <jgarzik@redhat.com>
2008-09-03 13:48:34 +00:00
/* The AHCI driver can only drive the SATA ports, the PATA driver
can drive them all so if both drivers are selected make sure
AHCI stays out of the way */
if (pdev->vendor == PCI_VENDOR_ID_MARVELL && !marvell_enable)
return -ENODEV;
/* acquire resources */
rc = pcim_enable_device(pdev);
if (rc)
return rc;
/* AHCI controllers often implement SFF compatible interface.
* Grab all PCI BARs just in case.
*/
rc = pcim_iomap_regions_request_all(pdev, 1 << AHCI_PCI_BAR, DRV_NAME);
if (rc == -EBUSY)
pcim_pin_device(pdev);
if (rc)
return rc;
if (pdev->vendor == PCI_VENDOR_ID_INTEL &&
(pdev->device == 0x2652 || pdev->device == 0x2653)) {
u8 map;
/* ICH6s share the same PCI ID for both piix and ahci
* modes. Enabling ahci mode while MAP indicates
* combined mode is a bad idea. Yield to ata_piix.
*/
pci_read_config_byte(pdev, ICH_MAP, &map);
if (map & 0x3) {
dev_printk(KERN_INFO, &pdev->dev, "controller is in "
"combined mode, can't enable AHCI mode\n");
return -ENODEV;
}
}
hpriv = devm_kzalloc(dev, sizeof(*hpriv), GFP_KERNEL);
if (!hpriv)
return -ENOMEM;
hpriv->flags |= (unsigned long)pi.private_data;
/* MCP65 revision A1 and A2 can't do MSI */
if (board_id == board_ahci_mcp65 &&
(pdev->revision == 0xa1 || pdev->revision == 0xa2))
hpriv->flags |= AHCI_HFLAG_NO_MSI;
/* SB800 does NOT need the workaround to ignore SERR_INTERNAL */
if (board_id == board_ahci_sb700 && pdev->revision >= 0x40)
hpriv->flags &= ~AHCI_HFLAG_IGN_SERR_INTERNAL;
if (!(hpriv->flags & AHCI_HFLAG_NO_MSI))
pci_enable_msi(pdev);
/* save initial config */
ahci_save_initial_config(pdev, hpriv);
/* prepare host */
if (hpriv->cap & HOST_CAP_NCQ)
pi.flags |= ATA_FLAG_NCQ;
if (hpriv->cap & HOST_CAP_PMP)
pi.flags |= ATA_FLAG_PMP;
if (ahci_em_messages && (hpriv->cap & HOST_CAP_EMS)) {
u8 messages;
void __iomem *mmio = pcim_iomap_table(pdev)[AHCI_PCI_BAR];
u32 em_loc = readl(mmio + HOST_EM_LOC);
u32 em_ctl = readl(mmio + HOST_EM_CTL);
messages = (em_ctl & EM_CTRL_MSG_TYPE) >> 16;
/* we only support LED message type right now */
if ((messages & 0x01) && (ahci_em_messages == 1)) {
/* store em_loc */
hpriv->em_loc = ((em_loc >> 16) * 4);
pi.flags |= ATA_FLAG_EM;
if (!(em_ctl & EM_CTL_ALHD))
pi.flags |= ATA_FLAG_SW_ACTIVITY;
}
}
if (ahci_broken_system_poweroff(pdev)) {
pi.flags |= ATA_FLAG_NO_POWEROFF_SPINDOWN;
dev_info(&pdev->dev,
"quirky BIOS, skipping spindown on poweroff\n");
}
/* CAP.NP sometimes indicate the index of the last enabled
* port, at other times, that of the last possible port, so
* determining the maximum port number requires looking at
* both CAP.NP and port_map.
*/
n_ports = max(ahci_nr_ports(hpriv->cap), fls(hpriv->port_map));
host = ata_host_alloc_pinfo(&pdev->dev, ppi, n_ports);
if (!host)
return -ENOMEM;
host->iomap = pcim_iomap_table(pdev);
host->private_data = hpriv;
if (!(hpriv->cap & HOST_CAP_SSS) || ahci_ignore_sss)
host->flags |= ATA_HOST_PARALLEL_SCAN;
else
printk(KERN_INFO "ahci: SSS flag set, parallel bus scan disabled\n");
if (pi.flags & ATA_FLAG_EM)
ahci_reset_em(host);
for (i = 0; i < host->n_ports; i++) {
struct ata_port *ap = host->ports[i];
ata_port_pbar_desc(ap, AHCI_PCI_BAR, -1, "abar");
ata_port_pbar_desc(ap, AHCI_PCI_BAR,
0x100 + ap->port_no * 0x80, "port");
/* set initial link pm policy */
ap->pm_policy = NOT_AVAILABLE;
/* set enclosure management message type */
if (ap->flags & ATA_FLAG_EM)
ap->em_message_type = ahci_em_messages;
/* disabled/not-implemented port */
if (!(hpriv->port_map & (1 << i)))
ap->ops = &ata_dummy_port_ops;
}
/* apply workaround for ASUS P5W DH Deluxe mainboard */
ahci_p5wdh_workaround(host);
/* initialize adapter */
rc = ahci_configure_dma_masks(pdev, hpriv->cap & HOST_CAP_64);
if (rc)
return rc;
rc = ahci_reset_controller(host);
if (rc)
return rc;
ahci_init_controller(host);
ahci_print_info(host);
pci_set_master(pdev);
return ata_host_activate(host, pdev->irq, ahci_interrupt, IRQF_SHARED,
&ahci_sht);
}
static int __init ahci_init(void)
{
return pci_register_driver(&ahci_pci_driver);
}
static void __exit ahci_exit(void)
{
pci_unregister_driver(&ahci_pci_driver);
}
MODULE_AUTHOR("Jeff Garzik");
MODULE_DESCRIPTION("AHCI SATA low-level driver");
MODULE_LICENSE("GPL");
MODULE_DEVICE_TABLE(pci, ahci_pci_tbl);
MODULE_VERSION(DRV_VERSION);
module_init(ahci_init);
module_exit(ahci_exit);