Merge master.kernel.org:/pub/scm/linux/kernel/git/jejb/scsi-misc-2.6

* master.kernel.org:/pub/scm/linux/kernel/git/jejb/scsi-misc-2.6: (97 commits)
  [SCSI] zfcp: removed wrong comment
  [SCSI] zfcp: use of uninitialized variable
  [SCSI] zfcp: Invalid locking order
  [SCSI] aic79xx: use dma_get_required_mask()
  [SCSI] aic79xx: fix bracket mismatch in unused macro
  [SCSI] BusLogic: Replace 'boolean' by 'bool'
  [SCSI] advansys: clean up warnings
  [SCSI] 53c7xx: brackets fix in uncompiled code
  [SCSI] nsp_cs: remove old scsi code
  [SCSI] aic79xx: make ahd_match_scb() static
  [SCSI] DAC960: kmalloc->kzalloc/Casting cleanups
  [SCSI] scsi_kmap_atomic_sg(): check that local irqs are disabled
  [SCSI] Buslogic: local_irq_disable() is redundant after local_irq_save()
  [SCSI] aic94xx: update for v28 firmware
  [SCSI] scsi_error: Fix lost EH commands
  [SCSI] aic94xx: Add default bus reset handler
  [SCSI] aic94xx: Remove TMF result code munging
  [SCSI] libsas: Add an LU reset mechanism to the error handler
  [SCSI] libsas: Don't BUG when connecting two expanders via wide port
  [SCSI] st: fix Tape dies if wrong block size used, bug 7919
  ...
This commit is contained in:
Linus Torvalds 2007-02-11 11:44:25 -08:00
commit 5f0b1437e0
104 changed files with 5851 additions and 3159 deletions

View file

@ -1,3 +1,19 @@
Release Date : Thu Nov 16 15:32:35 EST 2006 -
Sumant Patro <sumant.patro@lsi.com>
Current Version : 2.20.5.1 (scsi module), 2.20.2.6 (cmm module)
Older Version : 2.20.4.9 (scsi module), 2.20.2.6 (cmm module)
1. Changes in Initialization to fix kdump failure.
Send SYNC command on loading.
This command clears the pending commands in the adapter
and re-initialize its internal RAID structure.
Without this change, megaraid driver either panics or fails to
initialize the adapter during kdump's second kernel boot
if there are pending commands or interrupts from other devices
sharing the same IRQ.
2. Authors email-id domain name changed from lsil.com to lsi.com.
Also modified the MODULE_AUTHOR to megaraidlinux@lsi.com
Release Date : Fri May 19 09:31:45 EST 2006 - Seokmann Ju <sju@lsil.com> Release Date : Fri May 19 09:31:45 EST 2006 - Seokmann Ju <sju@lsil.com>
Current Version : 2.20.4.9 (scsi module), 2.20.2.6 (cmm module) Current Version : 2.20.4.9 (scsi module), 2.20.2.6 (cmm module)
Older Version : 2.20.4.8 (scsi module), 2.20.2.6 (cmm module) Older Version : 2.20.4.8 (scsi module), 2.20.2.6 (cmm module)

View file

@ -1373,8 +1373,7 @@ static bool DAC960_V2_EnableMemoryMailboxInterface(DAC960_Controller_T
Controller->BounceBufferLimit = DAC690_V2_PciDmaMask; Controller->BounceBufferLimit = DAC690_V2_PciDmaMask;
/* This is a temporary dma mapping, used only in the scope of this function */ /* This is a temporary dma mapping, used only in the scope of this function */
CommandMailbox = CommandMailbox = pci_alloc_consistent(PCI_Device,
(DAC960_V2_CommandMailbox_T *)pci_alloc_consistent( PCI_Device,
sizeof(DAC960_V2_CommandMailbox_T), &CommandMailboxDMA); sizeof(DAC960_V2_CommandMailbox_T), &CommandMailboxDMA);
if (CommandMailbox == NULL) if (CommandMailbox == NULL)
return false; return false;
@ -1879,8 +1878,8 @@ static bool DAC960_V2_ReadControllerConfiguration(DAC960_Controller_T
if (NewLogicalDeviceInfo->LogicalDeviceState != if (NewLogicalDeviceInfo->LogicalDeviceState !=
DAC960_V2_LogicalDevice_Offline) DAC960_V2_LogicalDevice_Offline)
Controller->LogicalDriveInitiallyAccessible[LogicalDeviceNumber] = true; Controller->LogicalDriveInitiallyAccessible[LogicalDeviceNumber] = true;
LogicalDeviceInfo = (DAC960_V2_LogicalDeviceInfo_T *) LogicalDeviceInfo = kmalloc(sizeof(DAC960_V2_LogicalDeviceInfo_T),
kmalloc(sizeof(DAC960_V2_LogicalDeviceInfo_T), GFP_ATOMIC); GFP_ATOMIC);
if (LogicalDeviceInfo == NULL) if (LogicalDeviceInfo == NULL)
return DAC960_Failure(Controller, "LOGICAL DEVICE ALLOCATION"); return DAC960_Failure(Controller, "LOGICAL DEVICE ALLOCATION");
Controller->V2.LogicalDeviceInformation[LogicalDeviceNumber] = Controller->V2.LogicalDeviceInformation[LogicalDeviceNumber] =
@ -2113,8 +2112,8 @@ static bool DAC960_V2_ReadDeviceConfiguration(DAC960_Controller_T
if (!DAC960_V2_NewPhysicalDeviceInfo(Controller, Channel, TargetID, LogicalUnit)) if (!DAC960_V2_NewPhysicalDeviceInfo(Controller, Channel, TargetID, LogicalUnit))
break; break;
PhysicalDeviceInfo = (DAC960_V2_PhysicalDeviceInfo_T *) PhysicalDeviceInfo = kmalloc(sizeof(DAC960_V2_PhysicalDeviceInfo_T),
kmalloc(sizeof(DAC960_V2_PhysicalDeviceInfo_T), GFP_ATOMIC); GFP_ATOMIC);
if (PhysicalDeviceInfo == NULL) if (PhysicalDeviceInfo == NULL)
return DAC960_Failure(Controller, "PHYSICAL DEVICE ALLOCATION"); return DAC960_Failure(Controller, "PHYSICAL DEVICE ALLOCATION");
Controller->V2.PhysicalDeviceInformation[PhysicalDeviceIndex] = Controller->V2.PhysicalDeviceInformation[PhysicalDeviceIndex] =
@ -2122,8 +2121,8 @@ static bool DAC960_V2_ReadDeviceConfiguration(DAC960_Controller_T
memcpy(PhysicalDeviceInfo, NewPhysicalDeviceInfo, memcpy(PhysicalDeviceInfo, NewPhysicalDeviceInfo,
sizeof(DAC960_V2_PhysicalDeviceInfo_T)); sizeof(DAC960_V2_PhysicalDeviceInfo_T));
InquiryUnitSerialNumber = (DAC960_SCSI_Inquiry_UnitSerialNumber_T *) InquiryUnitSerialNumber = kmalloc(
kmalloc(sizeof(DAC960_SCSI_Inquiry_UnitSerialNumber_T), GFP_ATOMIC); sizeof(DAC960_SCSI_Inquiry_UnitSerialNumber_T), GFP_ATOMIC);
if (InquiryUnitSerialNumber == NULL) { if (InquiryUnitSerialNumber == NULL) {
kfree(PhysicalDeviceInfo); kfree(PhysicalDeviceInfo);
return DAC960_Failure(Controller, "SERIAL NUMBER ALLOCATION"); return DAC960_Failure(Controller, "SERIAL NUMBER ALLOCATION");
@ -4949,8 +4948,8 @@ static void DAC960_V2_ProcessCompletedCommand(DAC960_Command_T *Command)
PhysicalDevice.LogicalUnit = NewLogicalDeviceInfo->LogicalUnit; PhysicalDevice.LogicalUnit = NewLogicalDeviceInfo->LogicalUnit;
Controller->V2.LogicalDriveToVirtualDevice[LogicalDeviceNumber] = Controller->V2.LogicalDriveToVirtualDevice[LogicalDeviceNumber] =
PhysicalDevice; PhysicalDevice;
LogicalDeviceInfo = (DAC960_V2_LogicalDeviceInfo_T *) LogicalDeviceInfo = kmalloc(sizeof(DAC960_V2_LogicalDeviceInfo_T),
kmalloc(sizeof(DAC960_V2_LogicalDeviceInfo_T), GFP_ATOMIC); GFP_ATOMIC);
Controller->V2.LogicalDeviceInformation[LogicalDeviceNumber] = Controller->V2.LogicalDeviceInformation[LogicalDeviceNumber] =
LogicalDeviceInfo; LogicalDeviceInfo;
DAC960_Critical("Logical Drive %d (/dev/rd/c%dd%d) " DAC960_Critical("Logical Drive %d (/dev/rd/c%dd%d) "
@ -5709,14 +5708,14 @@ static bool DAC960_CheckStatusBuffer(DAC960_Controller_T *Controller,
unsigned int NewStatusBufferLength = DAC960_InitialStatusBufferSize; unsigned int NewStatusBufferLength = DAC960_InitialStatusBufferSize;
while (NewStatusBufferLength < ByteCount) while (NewStatusBufferLength < ByteCount)
NewStatusBufferLength *= 2; NewStatusBufferLength *= 2;
Controller->CombinedStatusBuffer = Controller->CombinedStatusBuffer = kmalloc(NewStatusBufferLength,
(unsigned char *) kmalloc(NewStatusBufferLength, GFP_ATOMIC); GFP_ATOMIC);
if (Controller->CombinedStatusBuffer == NULL) return false; if (Controller->CombinedStatusBuffer == NULL) return false;
Controller->CombinedStatusBufferLength = NewStatusBufferLength; Controller->CombinedStatusBufferLength = NewStatusBufferLength;
return true; return true;
} }
NewStatusBuffer = (unsigned char *) NewStatusBuffer = kmalloc(2 * Controller->CombinedStatusBufferLength,
kmalloc(2 * Controller->CombinedStatusBufferLength, GFP_ATOMIC); GFP_ATOMIC);
if (NewStatusBuffer == NULL) if (NewStatusBuffer == NULL)
{ {
DAC960_Warning("Unable to expand Combined Status Buffer - Truncating\n", DAC960_Warning("Unable to expand Combined Status Buffer - Truncating\n",

View file

@ -66,7 +66,7 @@ config FUSION_MAX_SGE
config FUSION_CTL config FUSION_CTL
tristate "Fusion MPT misc device (ioctl) driver" tristate "Fusion MPT misc device (ioctl) driver"
depends on FUSION_SPI || FUSION_FC depends on FUSION_SPI || FUSION_FC || FUSION_SAS
---help--- ---help---
The Fusion MPT misc device driver provides specialized control The Fusion MPT misc device driver provides specialized control
of MPT adapters via system ioctl calls. Use of ioctl calls to of MPT adapters via system ioctl calls. Use of ioctl calls to

View file

@ -8,6 +8,9 @@
#EXTRA_CFLAGS += -DMPT_DEBUG_INIT #EXTRA_CFLAGS += -DMPT_DEBUG_INIT
#EXTRA_CFLAGS += -DMPT_DEBUG_EXIT #EXTRA_CFLAGS += -DMPT_DEBUG_EXIT
#EXTRA_CFLAGS += -DMPT_DEBUG_FAIL #EXTRA_CFLAGS += -DMPT_DEBUG_FAIL
#EXTRA_CFLAGS += -DMPT_DEBUG_DV
#EXTRA_CFLAGS += -DMPT_DEBUG_TM
#EXTRA_CFLAGS += -DMPT_DEBUG_REPLY
# #
# driver/module specifics... # driver/module specifics...
@ -20,11 +23,7 @@
#CFLAGS_mptbase.o += -DMPT_DEBUG_RESET #CFLAGS_mptbase.o += -DMPT_DEBUG_RESET
# #
# For mptscsih: # For mptscsih:
#CFLAGS_mptscsih.o += -DMPT_DEBUG_DV
#CFLAGS_mptscsih.o += -DMPT_DEBUG_NEGO
#CFLAGS_mptscsih.o += -DMPT_DEBUG_TM
#CFLAGS_mptscsih.o += -DMPT_DEBUG_SCSI #CFLAGS_mptscsih.o += -DMPT_DEBUG_SCSI
#CFLAGS_mptscsih.o += -DMPT_DEBUG_REPLY
# #
# For mptctl: # For mptctl:
#CFLAGS_mptctl.o += -DMPT_DEBUG_IOCTL #CFLAGS_mptctl.o += -DMPT_DEBUG_IOCTL

View file

@ -1,12 +1,12 @@
/* /*
* Copyright (c) 2000-2005 LSI Logic Corporation. * Copyright (c) 2000-2006 LSI Logic Corporation.
* *
* *
* Name: mpi.h * Name: mpi.h
* Title: MPI Message independent structures and definitions * Title: MPI Message independent structures and definitions
* Creation Date: July 27, 2000 * Creation Date: July 27, 2000
* *
* mpi.h Version: 01.05.11 * mpi.h Version: 01.05.12
* *
* Version History * Version History
* --------------- * ---------------
@ -77,6 +77,7 @@
* 08-03-05 01.05.09 Bumped MPI_HEADER_VERSION_UNIT. * 08-03-05 01.05.09 Bumped MPI_HEADER_VERSION_UNIT.
* 08-30-05 01.05.10 Added 2 new IOCStatus codes for Target. * 08-30-05 01.05.10 Added 2 new IOCStatus codes for Target.
* 03-27-06 01.05.11 Bumped MPI_HEADER_VERSION_UNIT. * 03-27-06 01.05.11 Bumped MPI_HEADER_VERSION_UNIT.
* 10-11-06 01.05.12 Bumped MPI_HEADER_VERSION_UNIT.
* -------------------------------------------------------------------------- * --------------------------------------------------------------------------
*/ */
@ -107,7 +108,7 @@
/* Note: The major versions of 0xe0 through 0xff are reserved */ /* Note: The major versions of 0xe0 through 0xff are reserved */
/* versioning for this MPI header set */ /* versioning for this MPI header set */
#define MPI_HEADER_VERSION_UNIT (0x0D) #define MPI_HEADER_VERSION_UNIT (0x0E)
#define MPI_HEADER_VERSION_DEV (0x00) #define MPI_HEADER_VERSION_DEV (0x00)
#define MPI_HEADER_VERSION_UNIT_MASK (0xFF00) #define MPI_HEADER_VERSION_UNIT_MASK (0xFF00)
#define MPI_HEADER_VERSION_UNIT_SHIFT (8) #define MPI_HEADER_VERSION_UNIT_SHIFT (8)

View file

@ -1,12 +1,12 @@
/* /*
* Copyright (c) 2000-2005 LSI Logic Corporation. * Copyright (c) 2000-2006 LSI Logic Corporation.
* *
* *
* Name: mpi_cnfg.h * Name: mpi_cnfg.h
* Title: MPI Config message, structures, and Pages * Title: MPI Config message, structures, and Pages
* Creation Date: July 27, 2000 * Creation Date: July 27, 2000
* *
* mpi_cnfg.h Version: 01.05.12 * mpi_cnfg.h Version: 01.05.13
* *
* Version History * Version History
* --------------- * ---------------
@ -276,6 +276,23 @@
* Added AdditionalControlFlags, MaxTargetPortConnectTime, * Added AdditionalControlFlags, MaxTargetPortConnectTime,
* ReportDeviceMissingDelay, and IODeviceMissingDelay * ReportDeviceMissingDelay, and IODeviceMissingDelay
* fields to SAS IO Unit Page 1. * fields to SAS IO Unit Page 1.
* 10-11-06 01.05.13 Added NumForceWWID field and ForceWWID array to
* Manufacturing Page 5.
* Added Manufacturing pages 8 through 10.
* Added defines for supported metadata size bits in
* CapabilitiesFlags field of IOC Page 6.
* Added defines for metadata size bits in VolumeSettings
* field of RAID Volume Page 0.
* Added SATA Link Reset settings, Enable SATA Asynchronous
* Notification bit, and HideNonZeroAttachedPhyIdentifiers
* bit to AdditionalControlFlags field of SAS IO Unit
* Page 1.
* Added defines for Enclosure Devices Unmapped and
* Device Limit Exceeded bits in Status field of SAS IO
* Unit Page 2.
* Added more AccessStatus values for SAS Device Page 0.
* Added bit for SATA Asynchronous Notification Support in
* Flags field of SAS Device Page 0.
* -------------------------------------------------------------------------- * --------------------------------------------------------------------------
*/ */
@ -654,17 +671,24 @@ typedef struct _CONFIG_PAGE_MANUFACTURING_4
#define MPI_MANPAGE4_IR_NO_MIX_SAS_SATA (0x01) #define MPI_MANPAGE4_IR_NO_MIX_SAS_SATA (0x01)
#ifndef MPI_MANPAGE5_NUM_FORCEWWID
#define MPI_MANPAGE5_NUM_FORCEWWID (1)
#endif
typedef struct _CONFIG_PAGE_MANUFACTURING_5 typedef struct _CONFIG_PAGE_MANUFACTURING_5
{ {
CONFIG_PAGE_HEADER Header; /* 00h */ CONFIG_PAGE_HEADER Header; /* 00h */
U64 BaseWWID; /* 04h */ U64 BaseWWID; /* 04h */
U8 Flags; /* 0Ch */ U8 Flags; /* 0Ch */
U8 Reserved1; /* 0Dh */ U8 NumForceWWID; /* 0Dh */
U16 Reserved2; /* 0Eh */ U16 Reserved2; /* 0Eh */
U32 Reserved3; /* 10h */
U32 Reserved4; /* 14h */
U64 ForceWWID[MPI_MANPAGE5_NUM_FORCEWWID]; /* 18h */
} CONFIG_PAGE_MANUFACTURING_5, MPI_POINTER PTR_CONFIG_PAGE_MANUFACTURING_5, } CONFIG_PAGE_MANUFACTURING_5, MPI_POINTER PTR_CONFIG_PAGE_MANUFACTURING_5,
ManufacturingPage5_t, MPI_POINTER pManufacturingPage5_t; ManufacturingPage5_t, MPI_POINTER pManufacturingPage5_t;
#define MPI_MANUFACTURING5_PAGEVERSION (0x01) #define MPI_MANUFACTURING5_PAGEVERSION (0x02)
/* defines for the Flags field */ /* defines for the Flags field */
#define MPI_MANPAGE5_TWO_WWID_PER_PHY (0x01) #define MPI_MANPAGE5_TWO_WWID_PER_PHY (0x01)
@ -740,6 +764,36 @@ typedef struct _CONFIG_PAGE_MANUFACTURING_7
#define MPI_MANPAGE7_FLAG_USE_SLOT_INFO (0x00000001) #define MPI_MANPAGE7_FLAG_USE_SLOT_INFO (0x00000001)
typedef struct _CONFIG_PAGE_MANUFACTURING_8
{
CONFIG_PAGE_HEADER Header; /* 00h */
U32 ProductSpecificInfo;/* 04h */
} CONFIG_PAGE_MANUFACTURING_8, MPI_POINTER PTR_CONFIG_PAGE_MANUFACTURING_8,
ManufacturingPage8_t, MPI_POINTER pManufacturingPage8_t;
#define MPI_MANUFACTURING8_PAGEVERSION (0x00)
typedef struct _CONFIG_PAGE_MANUFACTURING_9
{
CONFIG_PAGE_HEADER Header; /* 00h */
U32 ProductSpecificInfo;/* 04h */
} CONFIG_PAGE_MANUFACTURING_9, MPI_POINTER PTR_CONFIG_PAGE_MANUFACTURING_9,
ManufacturingPage9_t, MPI_POINTER pManufacturingPage9_t;
#define MPI_MANUFACTURING6_PAGEVERSION (0x00)
typedef struct _CONFIG_PAGE_MANUFACTURING_10
{
CONFIG_PAGE_HEADER Header; /* 00h */
U32 ProductSpecificInfo;/* 04h */
} CONFIG_PAGE_MANUFACTURING_10, MPI_POINTER PTR_CONFIG_PAGE_MANUFACTURING_10,
ManufacturingPage10_t, MPI_POINTER pManufacturingPage10_t;
#define MPI_MANUFACTURING10_PAGEVERSION (0x00)
/**************************************************************************** /****************************************************************************
* IO Unit Config Pages * IO Unit Config Pages
****************************************************************************/ ****************************************************************************/
@ -1080,10 +1134,14 @@ typedef struct _CONFIG_PAGE_IOC_6
} CONFIG_PAGE_IOC_6, MPI_POINTER PTR_CONFIG_PAGE_IOC_6, } CONFIG_PAGE_IOC_6, MPI_POINTER PTR_CONFIG_PAGE_IOC_6,
IOCPage6_t, MPI_POINTER pIOCPage6_t; IOCPage6_t, MPI_POINTER pIOCPage6_t;
#define MPI_IOCPAGE6_PAGEVERSION (0x00) #define MPI_IOCPAGE6_PAGEVERSION (0x01)
/* IOC Page 6 Capabilities Flags */ /* IOC Page 6 Capabilities Flags */
#define MPI_IOCPAGE6_CAP_FLAGS_MASK_METADATA_SIZE (0x00000006)
#define MPI_IOCPAGE6_CAP_FLAGS_64MB_METADATA_SIZE (0x00000000)
#define MPI_IOCPAGE6_CAP_FLAGS_512MB_METADATA_SIZE (0x00000002)
#define MPI_IOCPAGE6_CAP_FLAGS_GLOBAL_HOT_SPARE (0x00000001) #define MPI_IOCPAGE6_CAP_FLAGS_GLOBAL_HOT_SPARE (0x00000001)
@ -2160,6 +2218,11 @@ typedef struct _RAID_VOL0_SETTINGS
#define MPI_RAIDVOL0_SETTING_AUTO_CONFIGURE (0x0004) #define MPI_RAIDVOL0_SETTING_AUTO_CONFIGURE (0x0004)
#define MPI_RAIDVOL0_SETTING_PRIORITY_RESYNC (0x0008) #define MPI_RAIDVOL0_SETTING_PRIORITY_RESYNC (0x0008)
#define MPI_RAIDVOL0_SETTING_FAST_DATA_SCRUBBING_0102 (0x0020) /* obsolete */ #define MPI_RAIDVOL0_SETTING_FAST_DATA_SCRUBBING_0102 (0x0020) /* obsolete */
#define MPI_RAIDVOL0_SETTING_MASK_METADATA_SIZE (0x00C0)
#define MPI_RAIDVOL0_SETTING_64MB_METADATA_SIZE (0x0000)
#define MPI_RAIDVOL0_SETTING_512MB_METADATA_SIZE (0x0040)
#define MPI_RAIDVOL0_SETTING_USE_PRODUCT_ID_SUFFIX (0x0010) #define MPI_RAIDVOL0_SETTING_USE_PRODUCT_ID_SUFFIX (0x0010)
#define MPI_RAIDVOL0_SETTING_USE_DEFAULTS (0x8000) #define MPI_RAIDVOL0_SETTING_USE_DEFAULTS (0x8000)
@ -2203,7 +2266,7 @@ typedef struct _CONFIG_PAGE_RAID_VOL_0
} CONFIG_PAGE_RAID_VOL_0, MPI_POINTER PTR_CONFIG_PAGE_RAID_VOL_0, } CONFIG_PAGE_RAID_VOL_0, MPI_POINTER PTR_CONFIG_PAGE_RAID_VOL_0,
RaidVolumePage0_t, MPI_POINTER pRaidVolumePage0_t; RaidVolumePage0_t, MPI_POINTER pRaidVolumePage0_t;
#define MPI_RAIDVOLPAGE0_PAGEVERSION (0x06) #define MPI_RAIDVOLPAGE0_PAGEVERSION (0x07)
/* values for RAID Volume Page 0 InactiveStatus field */ /* values for RAID Volume Page 0 InactiveStatus field */
#define MPI_RAIDVOLPAGE0_UNKNOWN_INACTIVE (0x00) #define MPI_RAIDVOLPAGE0_UNKNOWN_INACTIVE (0x00)
@ -2518,7 +2581,7 @@ typedef struct _CONFIG_PAGE_SAS_IO_UNIT_1
} CONFIG_PAGE_SAS_IO_UNIT_1, MPI_POINTER PTR_CONFIG_PAGE_SAS_IO_UNIT_1, } CONFIG_PAGE_SAS_IO_UNIT_1, MPI_POINTER PTR_CONFIG_PAGE_SAS_IO_UNIT_1,
SasIOUnitPage1_t, MPI_POINTER pSasIOUnitPage1_t; SasIOUnitPage1_t, MPI_POINTER pSasIOUnitPage1_t;
#define MPI_SASIOUNITPAGE1_PAGEVERSION (0x06) #define MPI_SASIOUNITPAGE1_PAGEVERSION (0x07)
/* values for SAS IO Unit Page 1 ControlFlags */ /* values for SAS IO Unit Page 1 ControlFlags */
#define MPI_SAS_IOUNIT1_CONTROL_DEVICE_SELF_TEST (0x8000) #define MPI_SAS_IOUNIT1_CONTROL_DEVICE_SELF_TEST (0x8000)
@ -2544,7 +2607,13 @@ typedef struct _CONFIG_PAGE_SAS_IO_UNIT_1
#define MPI_SAS_IOUNIT1_CONTROL_CLEAR_AFFILIATION (0x0001) #define MPI_SAS_IOUNIT1_CONTROL_CLEAR_AFFILIATION (0x0001)
/* values for SAS IO Unit Page 1 AdditionalControlFlags */ /* values for SAS IO Unit Page 1 AdditionalControlFlags */
#define MPI_SAS_IOUNIT1_ACONTROL_ALLOW_TABLE_TO_TABLE (0x0001) #define MPI_SAS_IOUNIT1_ACONTROL_SATA_ASYNCHROUNOUS_NOTIFICATION (0x0040)
#define MPI_SAS_IOUNIT1_ACONTROL_HIDE_NONZERO_ATTACHED_PHY_IDENT (0x0020)
#define MPI_SAS_IOUNIT1_ACONTROL_PORT_ENABLE_ONLY_SATA_LINK_RESET (0x0010)
#define MPI_SAS_IOUNIT1_ACONTROL_OTHER_AFFILIATION_SATA_LINK_RESET (0x0008)
#define MPI_SAS_IOUNIT1_ACONTROL_SELF_AFFILIATION_SATA_LINK_RESET (0x0004)
#define MPI_SAS_IOUNIT1_ACONTROL_NO_AFFILIATION_SATA_LINK_RESET (0x0002)
#define MPI_SAS_IOUNIT1_ACONTROL_ALLOW_TABLE_TO_TABLE (0x0001)
/* defines for SAS IO Unit Page 1 ReportDeviceMissingDelay */ /* defines for SAS IO Unit Page 1 ReportDeviceMissingDelay */
#define MPI_SAS_IOUNIT1_REPORT_MISSING_TIMEOUT_MASK (0x7F) #define MPI_SAS_IOUNIT1_REPORT_MISSING_TIMEOUT_MASK (0x7F)
@ -2585,9 +2654,11 @@ typedef struct _CONFIG_PAGE_SAS_IO_UNIT_2
} CONFIG_PAGE_SAS_IO_UNIT_2, MPI_POINTER PTR_CONFIG_PAGE_SAS_IO_UNIT_2, } CONFIG_PAGE_SAS_IO_UNIT_2, MPI_POINTER PTR_CONFIG_PAGE_SAS_IO_UNIT_2,
SasIOUnitPage2_t, MPI_POINTER pSasIOUnitPage2_t; SasIOUnitPage2_t, MPI_POINTER pSasIOUnitPage2_t;
#define MPI_SASIOUNITPAGE2_PAGEVERSION (0x05) #define MPI_SASIOUNITPAGE2_PAGEVERSION (0x06)
/* values for SAS IO Unit Page 2 Status field */ /* values for SAS IO Unit Page 2 Status field */
#define MPI_SAS_IOUNIT2_STATUS_DEVICE_LIMIT_EXCEEDED (0x08)
#define MPI_SAS_IOUNIT2_STATUS_ENCLOSURE_DEVICES_UNMAPPED (0x04)
#define MPI_SAS_IOUNIT2_STATUS_DISABLED_PERSISTENT_MAPPINGS (0x02) #define MPI_SAS_IOUNIT2_STATUS_DISABLED_PERSISTENT_MAPPINGS (0x02)
#define MPI_SAS_IOUNIT2_STATUS_FULL_PERSISTENT_MAPPINGS (0x01) #define MPI_SAS_IOUNIT2_STATUS_FULL_PERSISTENT_MAPPINGS (0x01)
@ -2739,24 +2810,38 @@ typedef struct _CONFIG_PAGE_SAS_DEVICE_0
} CONFIG_PAGE_SAS_DEVICE_0, MPI_POINTER PTR_CONFIG_PAGE_SAS_DEVICE_0, } CONFIG_PAGE_SAS_DEVICE_0, MPI_POINTER PTR_CONFIG_PAGE_SAS_DEVICE_0,
SasDevicePage0_t, MPI_POINTER pSasDevicePage0_t; SasDevicePage0_t, MPI_POINTER pSasDevicePage0_t;
#define MPI_SASDEVICE0_PAGEVERSION (0x04) #define MPI_SASDEVICE0_PAGEVERSION (0x05)
/* values for SAS Device Page 0 AccessStatus field */ /* values for SAS Device Page 0 AccessStatus field */
#define MPI_SAS_DEVICE0_ASTATUS_NO_ERRORS (0x00) #define MPI_SAS_DEVICE0_ASTATUS_NO_ERRORS (0x00)
#define MPI_SAS_DEVICE0_ASTATUS_SATA_INIT_FAILED (0x01) #define MPI_SAS_DEVICE0_ASTATUS_SATA_INIT_FAILED (0x01)
#define MPI_SAS_DEVICE0_ASTATUS_SATA_CAPABILITY_FAILED (0x02) #define MPI_SAS_DEVICE0_ASTATUS_SATA_CAPABILITY_FAILED (0x02)
#define MPI_SAS_DEVICE0_ASTATUS_SATA_AFFILIATION_CONFLICT (0x03)
/* specific values for SATA Init failures */
#define MPI_SAS_DEVICE0_ASTATUS_SIF_UNKNOWN (0x10)
#define MPI_SAS_DEVICE0_ASTATUS_SIF_AFFILIATION_CONFLICT (0x11)
#define MPI_SAS_DEVICE0_ASTATUS_SIF_DIAG (0x12)
#define MPI_SAS_DEVICE0_ASTATUS_SIF_IDENTIFICATION (0x13)
#define MPI_SAS_DEVICE0_ASTATUS_SIF_CHECK_POWER (0x14)
#define MPI_SAS_DEVICE0_ASTATUS_SIF_PIO_SN (0x15)
#define MPI_SAS_DEVICE0_ASTATUS_SIF_MDMA_SN (0x16)
#define MPI_SAS_DEVICE0_ASTATUS_SIF_UDMA_SN (0x17)
#define MPI_SAS_DEVICE0_ASTATUS_SIF_ZONING_VIOLATION (0x18)
#define MPI_SAS_DEVICE0_ASTATUS_SIF_NOT_ADDRESSABLE (0x19)
#define MPI_SAS_DEVICE0_ASTATUS_SIF_MAX (0x1F)
/* values for SAS Device Page 0 Flags field */ /* values for SAS Device Page 0 Flags field */
#define MPI_SAS_DEVICE0_FLAGS_SATA_SW_PRESERVE (0x0200) #define MPI_SAS_DEVICE0_FLAGS_SATA_ASYNCHRONOUS_NOTIFY (0x0400)
#define MPI_SAS_DEVICE0_FLAGS_UNSUPPORTED_DEVICE (0x0100) #define MPI_SAS_DEVICE0_FLAGS_SATA_SW_PRESERVE (0x0200)
#define MPI_SAS_DEVICE0_FLAGS_SATA_48BIT_LBA_SUPPORTED (0x0080) #define MPI_SAS_DEVICE0_FLAGS_UNSUPPORTED_DEVICE (0x0100)
#define MPI_SAS_DEVICE0_FLAGS_SATA_SMART_SUPPORTED (0x0040) #define MPI_SAS_DEVICE0_FLAGS_SATA_48BIT_LBA_SUPPORTED (0x0080)
#define MPI_SAS_DEVICE0_FLAGS_SATA_NCQ_SUPPORTED (0x0020) #define MPI_SAS_DEVICE0_FLAGS_SATA_SMART_SUPPORTED (0x0040)
#define MPI_SAS_DEVICE0_FLAGS_SATA_FUA_SUPPORTED (0x0010) #define MPI_SAS_DEVICE0_FLAGS_SATA_NCQ_SUPPORTED (0x0020)
#define MPI_SAS_DEVICE0_FLAGS_PORT_SELECTOR_ATTACH (0x0008) #define MPI_SAS_DEVICE0_FLAGS_SATA_FUA_SUPPORTED (0x0010)
#define MPI_SAS_DEVICE0_FLAGS_MAPPING_PERSISTENT (0x0004) #define MPI_SAS_DEVICE0_FLAGS_PORT_SELECTOR_ATTACH (0x0008)
#define MPI_SAS_DEVICE0_FLAGS_DEVICE_MAPPED (0x0002) #define MPI_SAS_DEVICE0_FLAGS_MAPPING_PERSISTENT (0x0004)
#define MPI_SAS_DEVICE0_FLAGS_DEVICE_PRESENT (0x0001) #define MPI_SAS_DEVICE0_FLAGS_DEVICE_MAPPED (0x0002)
#define MPI_SAS_DEVICE0_FLAGS_DEVICE_PRESENT (0x0001)
/* see mpi_sas.h for values for SAS Device Page 0 DeviceInfo values */ /* see mpi_sas.h for values for SAS Device Page 0 DeviceInfo values */

View file

@ -3,28 +3,28 @@
MPI Header File Change History MPI Header File Change History
============================== ==============================
Copyright (c) 2000-2005 LSI Logic Corporation. Copyright (c) 2000-2006 LSI Logic Corporation.
--------------------------------------- ---------------------------------------
Header Set Release Version: 01.05.13 Header Set Release Version: 01.05.14
Header Set Release Date: 03-27-06 Header Set Release Date: 10-11-06
--------------------------------------- ---------------------------------------
Filename Current version Prior version Filename Current version Prior version
---------- --------------- ------------- ---------- --------------- -------------
mpi.h 01.05.11 01.05.10 mpi.h 01.05.12 01.05.11
mpi_ioc.h 01.05.11 01.05.10 mpi_ioc.h 01.05.12 01.05.11
mpi_cnfg.h 01.05.12 01.05.11 mpi_cnfg.h 01.05.13 01.05.12
mpi_init.h 01.05.07 01.05.06 mpi_init.h 01.05.08 01.05.07
mpi_targ.h 01.05.06 01.05.05 mpi_targ.h 01.05.06 01.05.06
mpi_fc.h 01.05.01 01.05.01 mpi_fc.h 01.05.01 01.05.01
mpi_lan.h 01.05.01 01.05.01 mpi_lan.h 01.05.01 01.05.01
mpi_raid.h 01.05.02 01.05.02 mpi_raid.h 01.05.02 01.05.02
mpi_tool.h 01.05.03 01.05.03 mpi_tool.h 01.05.03 01.05.03
mpi_inb.h 01.05.01 01.05.01 mpi_inb.h 01.05.01 01.05.01
mpi_sas.h 01.05.03 01.05.02 mpi_sas.h 01.05.04 01.05.03
mpi_type.h 01.05.02 01.05.02 mpi_type.h 01.05.02 01.05.02
mpi_history.txt 01.05.13 01.05.12 mpi_history.txt 01.05.14 01.05.13
* Date Version Description * Date Version Description
@ -94,6 +94,7 @@ mpi.h
* 08-03-05 01.05.09 Bumped MPI_HEADER_VERSION_UNIT. * 08-03-05 01.05.09 Bumped MPI_HEADER_VERSION_UNIT.
* 08-30-05 01.05.10 Added 2 new IOCStatus codes for Target. * 08-30-05 01.05.10 Added 2 new IOCStatus codes for Target.
* 03-27-06 01.05.11 Bumped MPI_HEADER_VERSION_UNIT. * 03-27-06 01.05.11 Bumped MPI_HEADER_VERSION_UNIT.
* 10-11-06 01.05.12 Bumped MPI_HEADER_VERSION_UNIT.
* -------------------------------------------------------------------------- * --------------------------------------------------------------------------
mpi_ioc.h mpi_ioc.h
@ -182,6 +183,14 @@ mpi_ioc.h
* Added MPI_EVENT_SAS_INIT_TABLE_OVERFLOW and event * Added MPI_EVENT_SAS_INIT_TABLE_OVERFLOW and event
* data structure. * data structure.
* Added MPI_EXT_IMAGE_TYPE_INITIALIZATION. * Added MPI_EXT_IMAGE_TYPE_INITIALIZATION.
* 10-11-06 01.05.12 Added MPI_IOCFACTS_EXCEPT_METADATA_UNSUPPORTED.
* Added MaxInitiators field to PortFacts reply.
* Added SAS Device Status Change ReasonCode for
* asynchronous notificaiton.
* Added MPI_EVENT_SAS_EXPANDER_STATUS_CHANGE and event
* data structure.
* Added new ImageType values for FWDownload and FWUpload
* requests.
* -------------------------------------------------------------------------- * --------------------------------------------------------------------------
mpi_cnfg.h mpi_cnfg.h
@ -447,6 +456,23 @@ mpi_cnfg.h
* Added AdditionalControlFlags, MaxTargetPortConnectTime, * Added AdditionalControlFlags, MaxTargetPortConnectTime,
* ReportDeviceMissingDelay, and IODeviceMissingDelay * ReportDeviceMissingDelay, and IODeviceMissingDelay
* fields to SAS IO Unit Page 1. * fields to SAS IO Unit Page 1.
* 10-11-06 01.05.13 Added NumForceWWID field and ForceWWID array to
* Manufacturing Page 5.
* Added Manufacturing pages 8 through 10.
* Added defines for supported metadata size bits in
* CapabilitiesFlags field of IOC Page 6.
* Added defines for metadata size bits in VolumeSettings
* field of RAID Volume Page 0.
* Added SATA Link Reset settings, Enable SATA Asynchronous
* Notification bit, and HideNonZeroAttachedPhyIdentifiers
* bit to AdditionalControlFlags field of SAS IO Unit
* Page 1.
* Added defines for Enclosure Devices Unmapped and
* Device Limit Exceeded bits in Status field of SAS IO
* Unit Page 2.
* Added more AccessStatus values for SAS Device Page 0.
* Added bit for SATA Asynchronous Notification Support in
* Flags field of SAS Device Page 0.
* -------------------------------------------------------------------------- * --------------------------------------------------------------------------
mpi_init.h mpi_init.h
@ -490,6 +516,7 @@ mpi_init.h
* 08-03-05 01.05.06 Fixed some MPI_SCSIIO32_MSGFLGS_ defines to make them * 08-03-05 01.05.06 Fixed some MPI_SCSIIO32_MSGFLGS_ defines to make them
* unique in the first 32 characters. * unique in the first 32 characters.
* 03-27-06 01.05.07 Added Task Management type of Clear ACA. * 03-27-06 01.05.07 Added Task Management type of Clear ACA.
* 10-11-06 01.05.08 Shortened define for Task Management type of Clear ACA.
* -------------------------------------------------------------------------- * --------------------------------------------------------------------------
mpi_targ.h mpi_targ.h
@ -638,6 +665,8 @@ mpi_sas.h
* and Remove Device operations to SAS IO Unit Control. * and Remove Device operations to SAS IO Unit Control.
* Added DevHandle field to SAS IO Unit Control request and * Added DevHandle field to SAS IO Unit Control request and
* reply. * reply.
* 10-11-06 01.05.04 Fixed the name of a define for Operation field of SAS IO
* Unit Control request.
* -------------------------------------------------------------------------- * --------------------------------------------------------------------------
mpi_type.h mpi_type.h
@ -653,20 +682,20 @@ mpi_type.h
mpi_history.txt Parts list history mpi_history.txt Parts list history
Filename 01.05.13 01.05.12 01.05.11 01.05.10 01.05.09 Filename 01.05.13 01.05.13 01.05.12 01.05.11 01.05.10 01.05.09
---------- -------- -------- -------- -------- -------- ---------- -------- -------- -------- -------- -------- --------
mpi.h 01.05.11 01.05.10 01.05.09 01.05.08 01.05.07 mpi.h 01.05.12 01.05.11 01.05.10 01.05.09 01.05.08 01.05.07
mpi_ioc.h 01.05.11 01.05.10 01.05.09 01.05.09 01.05.08 mpi_ioc.h 01.05.12 01.05.11 01.05.10 01.05.09 01.05.09 01.05.08
mpi_cnfg.h 01.05.12 01.05.11 01.05.10 01.05.09 01.05.08 mpi_cnfg.h 01.05.13 01.05.12 01.05.11 01.05.10 01.05.09 01.05.08
mpi_init.h 01.05.07 01.05.06 01.05.06 01.05.05 01.05.04 mpi_init.h 01.05.08 01.05.07 01.05.06 01.05.06 01.05.05 01.05.04
mpi_targ.h 01.05.06 01.05.05 01.05.05 01.05.05 01.05.04 mpi_targ.h 01.05.06 01.05.06 01.05.05 01.05.05 01.05.05 01.05.04
mpi_fc.h 01.05.01 01.05.01 01.05.01 01.05.01 01.05.01 mpi_fc.h 01.05.01 01.05.01 01.05.01 01.05.01 01.05.01 01.05.01
mpi_lan.h 01.05.01 01.05.01 01.05.01 01.05.01 01.05.01 mpi_lan.h 01.05.01 01.05.01 01.05.01 01.05.01 01.05.01 01.05.01
mpi_raid.h 01.05.02 01.05.02 01.05.02 01.05.02 01.05.02 mpi_raid.h 01.05.02 01.05.02 01.05.02 01.05.02 01.05.02 01.05.02
mpi_tool.h 01.05.03 01.05.03 01.05.03 01.05.03 01.05.03 mpi_tool.h 01.05.03 01.05.03 01.05.03 01.05.03 01.05.03 01.05.03
mpi_inb.h 01.05.01 01.05.01 01.05.01 01.05.01 01.05.01 mpi_inb.h 01.05.01 01.05.01 01.05.01 01.05.01 01.05.01 01.05.01
mpi_sas.h 01.05.03 01.05.02 01.05.01 01.05.01 01.05.01 mpi_sas.h 01.05.04 01.05.03 01.05.02 01.05.01 01.05.01 01.05.01
mpi_type.h 01.05.02 01.05.02 01.05.01 01.05.01 01.05.01 mpi_type.h 01.05.02 01.05.02 01.05.02 01.05.01 01.05.01 01.05.01
Filename 01.05.08 01.05.07 01.05.06 01.05.05 01.05.04 01.05.03 Filename 01.05.08 01.05.07 01.05.06 01.05.05 01.05.04 01.05.03
---------- -------- -------- -------- -------- -------- -------- ---------- -------- -------- -------- -------- -------- --------

View file

@ -1,12 +1,12 @@
/* /*
* Copyright (c) 2000-2005 LSI Logic Corporation. * Copyright (c) 2000-2006 LSI Logic Corporation.
* *
* *
* Name: mpi_init.h * Name: mpi_init.h
* Title: MPI initiator mode messages and structures * Title: MPI initiator mode messages and structures
* Creation Date: June 8, 2000 * Creation Date: June 8, 2000
* *
* mpi_init.h Version: 01.05.07 * mpi_init.h Version: 01.05.08
* *
* Version History * Version History
* --------------- * ---------------
@ -53,6 +53,7 @@
* 08-03-05 01.05.06 Fixed some MPI_SCSIIO32_MSGFLGS_ defines to make them * 08-03-05 01.05.06 Fixed some MPI_SCSIIO32_MSGFLGS_ defines to make them
* unique in the first 32 characters. * unique in the first 32 characters.
* 03-27-06 01.05.07 Added Task Management type of Clear ACA. * 03-27-06 01.05.07 Added Task Management type of Clear ACA.
* 10-11-06 01.05.08 Shortened define for Task Management type of Clear ACA.
* -------------------------------------------------------------------------- * --------------------------------------------------------------------------
*/ */
@ -428,7 +429,7 @@ typedef struct _MSG_SCSI_TASK_MGMT
#define MPI_SCSITASKMGMT_TASKTYPE_LOGICAL_UNIT_RESET (0x05) #define MPI_SCSITASKMGMT_TASKTYPE_LOGICAL_UNIT_RESET (0x05)
#define MPI_SCSITASKMGMT_TASKTYPE_CLEAR_TASK_SET (0x06) #define MPI_SCSITASKMGMT_TASKTYPE_CLEAR_TASK_SET (0x06)
#define MPI_SCSITASKMGMT_TASKTYPE_QUERY_TASK (0x07) #define MPI_SCSITASKMGMT_TASKTYPE_QUERY_TASK (0x07)
#define MPI_SCSITASKMGMT_TASKTYPE_CLEAR_ACA (0x08) #define MPI_SCSITASKMGMT_TASKTYPE_CLR_ACA (0x08)
/* MsgFlags bits */ /* MsgFlags bits */
#define MPI_SCSITASKMGMT_MSGFLAGS_TARGET_RESET_OPTION (0x00) #define MPI_SCSITASKMGMT_MSGFLAGS_TARGET_RESET_OPTION (0x00)

View file

@ -1,12 +1,12 @@
/* /*
* Copyright (c) 2000-2005 LSI Logic Corporation. * Copyright (c) 2000-2006 LSI Logic Corporation.
* *
* *
* Name: mpi_ioc.h * Name: mpi_ioc.h
* Title: MPI IOC, Port, Event, FW Download, and FW Upload messages * Title: MPI IOC, Port, Event, FW Download, and FW Upload messages
* Creation Date: August 11, 2000 * Creation Date: August 11, 2000
* *
* mpi_ioc.h Version: 01.05.11 * mpi_ioc.h Version: 01.05.12
* *
* Version History * Version History
* --------------- * ---------------
@ -98,6 +98,14 @@
* Added MPI_EVENT_SAS_INIT_TABLE_OVERFLOW and event * Added MPI_EVENT_SAS_INIT_TABLE_OVERFLOW and event
* data structure. * data structure.
* Added MPI_EXT_IMAGE_TYPE_INITIALIZATION. * Added MPI_EXT_IMAGE_TYPE_INITIALIZATION.
* 10-11-06 01.05.12 Added MPI_IOCFACTS_EXCEPT_METADATA_UNSUPPORTED.
* Added MaxInitiators field to PortFacts reply.
* Added SAS Device Status Change ReasonCode for
* asynchronous notificaiton.
* Added MPI_EVENT_SAS_EXPANDER_STATUS_CHANGE and event
* data structure.
* Added new ImageType values for FWDownload and FWUpload
* requests.
* -------------------------------------------------------------------------- * --------------------------------------------------------------------------
*/ */
@ -264,6 +272,7 @@ typedef struct _MSG_IOC_FACTS_REPLY
#define MPI_IOCFACTS_EXCEPT_RAID_CONFIG_INVALID (0x0002) #define MPI_IOCFACTS_EXCEPT_RAID_CONFIG_INVALID (0x0002)
#define MPI_IOCFACTS_EXCEPT_FW_CHECKSUM_FAIL (0x0004) #define MPI_IOCFACTS_EXCEPT_FW_CHECKSUM_FAIL (0x0004)
#define MPI_IOCFACTS_EXCEPT_PERSISTENT_TABLE_FULL (0x0008) #define MPI_IOCFACTS_EXCEPT_PERSISTENT_TABLE_FULL (0x0008)
#define MPI_IOCFACTS_EXCEPT_METADATA_UNSUPPORTED (0x0010)
#define MPI_IOCFACTS_FLAGS_FW_DOWNLOAD_BOOT (0x01) #define MPI_IOCFACTS_FLAGS_FW_DOWNLOAD_BOOT (0x01)
#define MPI_IOCFACTS_FLAGS_REPLY_FIFO_HOST_SIGNAL (0x02) #define MPI_IOCFACTS_FLAGS_REPLY_FIFO_HOST_SIGNAL (0x02)
@ -328,7 +337,8 @@ typedef struct _MSG_PORT_FACTS_REPLY
U16 MaxPostedCmdBuffers; /* 1Ch */ U16 MaxPostedCmdBuffers; /* 1Ch */
U16 MaxPersistentIDs; /* 1Eh */ U16 MaxPersistentIDs; /* 1Eh */
U16 MaxLanBuckets; /* 20h */ U16 MaxLanBuckets; /* 20h */
U16 Reserved4; /* 22h */ U8 MaxInitiators; /* 22h */
U8 Reserved4; /* 23h */
U32 Reserved5; /* 24h */ U32 Reserved5; /* 24h */
} MSG_PORT_FACTS_REPLY, MPI_POINTER PTR_MSG_PORT_FACTS_REPLY, } MSG_PORT_FACTS_REPLY, MPI_POINTER PTR_MSG_PORT_FACTS_REPLY,
PortFactsReply_t, MPI_POINTER pPortFactsReply_t; PortFactsReply_t, MPI_POINTER pPortFactsReply_t;
@ -487,6 +497,7 @@ typedef struct _MSG_EVENT_ACK_REPLY
#define MPI_EVENT_SAS_INIT_DEVICE_STATUS_CHANGE (0x00000018) #define MPI_EVENT_SAS_INIT_DEVICE_STATUS_CHANGE (0x00000018)
#define MPI_EVENT_SAS_INIT_TABLE_OVERFLOW (0x00000019) #define MPI_EVENT_SAS_INIT_TABLE_OVERFLOW (0x00000019)
#define MPI_EVENT_SAS_SMP_ERROR (0x0000001A) #define MPI_EVENT_SAS_SMP_ERROR (0x0000001A)
#define MPI_EVENT_SAS_EXPANDER_STATUS_CHANGE (0x0000001B)
#define MPI_EVENT_LOG_ENTRY_ADDED (0x00000021) #define MPI_EVENT_LOG_ENTRY_ADDED (0x00000021)
/* AckRequired field values */ /* AckRequired field values */
@ -593,6 +604,7 @@ typedef struct _EVENT_DATA_SAS_DEVICE_STATUS_CHANGE
#define MPI_EVENT_SAS_DEV_STAT_RC_ABORT_TASK_SET_INTERNAL (0x0A) #define MPI_EVENT_SAS_DEV_STAT_RC_ABORT_TASK_SET_INTERNAL (0x0A)
#define MPI_EVENT_SAS_DEV_STAT_RC_CLEAR_TASK_SET_INTERNAL (0x0B) #define MPI_EVENT_SAS_DEV_STAT_RC_CLEAR_TASK_SET_INTERNAL (0x0B)
#define MPI_EVENT_SAS_DEV_STAT_RC_QUERY_TASK_INTERNAL (0x0C) #define MPI_EVENT_SAS_DEV_STAT_RC_QUERY_TASK_INTERNAL (0x0C)
#define MPI_EVENT_SAS_DEV_STAT_RC_ASYNC_NOTIFICATION (0x0D)
/* SCSI Event data for Queue Full event */ /* SCSI Event data for Queue Full event */
@ -895,6 +907,54 @@ typedef struct _EVENT_DATA_SAS_INIT_TABLE_OVERFLOW
MpiEventDataSasInitTableOverflow_t, MpiEventDataSasInitTableOverflow_t,
MPI_POINTER pMpiEventDataSasInitTableOverflow_t; MPI_POINTER pMpiEventDataSasInitTableOverflow_t;
/* SAS Expander Status Change Event data */
typedef struct _EVENT_DATA_SAS_EXPANDER_STATUS_CHANGE
{
U8 ReasonCode; /* 00h */
U8 Reserved1; /* 01h */
U16 Reserved2; /* 02h */
U8 PhysicalPort; /* 04h */
U8 Reserved3; /* 05h */
U16 EnclosureHandle; /* 06h */
U64 SASAddress; /* 08h */
U32 DiscoveryStatus; /* 10h */
U16 DevHandle; /* 14h */
U16 ParentDevHandle; /* 16h */
U16 ExpanderChangeCount; /* 18h */
U16 ExpanderRouteIndexes; /* 1Ah */
U8 NumPhys; /* 1Ch */
U8 SASLevel; /* 1Dh */
U8 Flags; /* 1Eh */
U8 Reserved4; /* 1Fh */
} EVENT_DATA_SAS_EXPANDER_STATUS_CHANGE,
MPI_POINTER PTR_EVENT_DATA_SAS_EXPANDER_STATUS_CHANGE,
MpiEventDataSasExpanderStatusChange_t,
MPI_POINTER pMpiEventDataSasExpanderStatusChange_t;
/* values for ReasonCode field of SAS Expander Status Change Event data */
#define MPI_EVENT_SAS_EXP_RC_ADDED (0x00)
#define MPI_EVENT_SAS_EXP_RC_NOT_RESPONDING (0x01)
/* values for DiscoveryStatus field of SAS Expander Status Change Event data */
#define MPI_EVENT_SAS_EXP_DS_LOOP_DETECTED (0x00000001)
#define MPI_EVENT_SAS_EXP_DS_UNADDRESSABLE_DEVICE (0x00000002)
#define MPI_EVENT_SAS_EXP_DS_MULTIPLE_PORTS (0x00000004)
#define MPI_EVENT_SAS_EXP_DS_EXPANDER_ERR (0x00000008)
#define MPI_EVENT_SAS_EXP_DS_SMP_TIMEOUT (0x00000010)
#define MPI_EVENT_SAS_EXP_DS_OUT_ROUTE_ENTRIES (0x00000020)
#define MPI_EVENT_SAS_EXP_DS_INDEX_NOT_EXIST (0x00000040)
#define MPI_EVENT_SAS_EXP_DS_SMP_FUNCTION_FAILED (0x00000080)
#define MPI_EVENT_SAS_EXP_DS_SMP_CRC_ERROR (0x00000100)
#define MPI_EVENT_SAS_EXP_DS_SUBTRACTIVE_LINK (0x00000200)
#define MPI_EVENT_SAS_EXP_DS_TABLE_LINK (0x00000400)
#define MPI_EVENT_SAS_EXP_DS_UNSUPPORTED_DEVICE (0x00000800)
/* values for Flags field of SAS Expander Status Change Event data */
#define MPI_EVENT_SAS_EXP_FLAGS_ROUTE_TABLE_CONFIG (0x02)
#define MPI_EVENT_SAS_EXP_FLAGS_CONFIG_IN_PROGRESS (0x01)
/***************************************************************************** /*****************************************************************************
* *
@ -926,6 +986,10 @@ typedef struct _MSG_FW_DOWNLOAD
#define MPI_FW_DOWNLOAD_ITYPE_BIOS (0x02) #define MPI_FW_DOWNLOAD_ITYPE_BIOS (0x02)
#define MPI_FW_DOWNLOAD_ITYPE_NVDATA (0x03) #define MPI_FW_DOWNLOAD_ITYPE_NVDATA (0x03)
#define MPI_FW_DOWNLOAD_ITYPE_BOOTLOADER (0x04) #define MPI_FW_DOWNLOAD_ITYPE_BOOTLOADER (0x04)
#define MPI_FW_DOWNLOAD_ITYPE_MANUFACTURING (0x06)
#define MPI_FW_DOWNLOAD_ITYPE_CONFIG_1 (0x07)
#define MPI_FW_DOWNLOAD_ITYPE_CONFIG_2 (0x08)
#define MPI_FW_DOWNLOAD_ITYPE_MEGARAID (0x09)
typedef struct _FWDownloadTCSGE typedef struct _FWDownloadTCSGE
@ -980,6 +1044,11 @@ typedef struct _MSG_FW_UPLOAD
#define MPI_FW_UPLOAD_ITYPE_NVDATA (0x03) #define MPI_FW_UPLOAD_ITYPE_NVDATA (0x03)
#define MPI_FW_UPLOAD_ITYPE_BOOTLOADER (0x04) #define MPI_FW_UPLOAD_ITYPE_BOOTLOADER (0x04)
#define MPI_FW_UPLOAD_ITYPE_FW_BACKUP (0x05) #define MPI_FW_UPLOAD_ITYPE_FW_BACKUP (0x05)
#define MPI_FW_UPLOAD_ITYPE_MANUFACTURING (0x06)
#define MPI_FW_UPLOAD_ITYPE_CONFIG_1 (0x07)
#define MPI_FW_UPLOAD_ITYPE_CONFIG_2 (0x08)
#define MPI_FW_UPLOAD_ITYPE_MEGARAID (0x09)
#define MPI_FW_UPLOAD_ITYPE_COMPLETE (0x0A)
typedef struct _FWUploadTCSGE typedef struct _FWUploadTCSGE
{ {

View file

@ -1,4 +1,3 @@
/*************************************************************************** /***************************************************************************
* * * *
* Copyright 2003 LSI Logic Corporation. All rights reserved. * * Copyright 2003 LSI Logic Corporation. All rights reserved. *
@ -14,7 +13,7 @@
#define IOPI_IOCLOGINFO_H_INCLUDED #define IOPI_IOCLOGINFO_H_INCLUDED
#define SAS_LOGINFO_NEXUS_LOSS 0x31170000 #define SAS_LOGINFO_NEXUS_LOSS 0x31170000
#define SAS_LOGINFO_MASK 0xFFFF0000 #define SAS_LOGINFO_MASK 0xFFFF0000
/****************************************************************************/ /****************************************************************************/
/* IOC LOGINFO defines, 0x00000000 - 0x0FFFFFFF */ /* IOC LOGINFO defines, 0x00000000 - 0x0FFFFFFF */
@ -43,129 +42,172 @@
/****************************************************************************/ /****************************************************************************/
/* IOP LOGINFO_CODE defines, valid if IOC_LOGINFO_ORIGINATOR = IOP */ /* IOP LOGINFO_CODE defines, valid if IOC_LOGINFO_ORIGINATOR = IOP */
/****************************************************************************/ /****************************************************************************/
#define IOP_LOGINFO_CODE_INVALID_SAS_ADDRESS (0x00010000) #define IOP_LOGINFO_CODE_INVALID_SAS_ADDRESS (0x00010000)
#define IOP_LOGINFO_CODE_UNUSED2 (0x00020000) #define IOP_LOGINFO_CODE_UNUSED2 (0x00020000)
#define IOP_LOGINFO_CODE_CONFIG_INVALID_PAGE (0x00030000) #define IOP_LOGINFO_CODE_CONFIG_INVALID_PAGE (0x00030000)
#define IOP_LOGINFO_CODE_CONFIG_INVALID_PAGE_RT (0x00030100) /* Route Table Entry not found */ #define IOP_LOGINFO_CODE_CONFIG_INVALID_PAGE_RT (0x00030100) /* Route Table Entry not found */
#define IOP_LOGINFO_CODE_CONFIG_INVALID_PAGE_PN (0x00030200) /* Invalid Page Number */ #define IOP_LOGINFO_CODE_CONFIG_INVALID_PAGE_PN (0x00030200) /* Invalid Page Number */
#define IOP_LOGINFO_CODE_CONFIG_INVALID_PAGE_FORM (0x00030300) /* Invalid FORM */ #define IOP_LOGINFO_CODE_CONFIG_INVALID_PAGE_FORM (0x00030300) /* Invalid FORM */
#define IOP_LOGINFO_CODE_CONFIG_INVALID_PAGE_PT (0x00030400) /* Invalid Page Type */ #define IOP_LOGINFO_CODE_CONFIG_INVALID_PAGE_PT (0x00030400) /* Invalid Page Type */
#define IOP_LOGINFO_CODE_CONFIG_INVALID_PAGE_DNM (0x00030500) /* Device Not Mapped */ #define IOP_LOGINFO_CODE_CONFIG_INVALID_PAGE_DNM (0x00030500) /* Device Not Mapped */
#define IOP_LOGINFO_CODE_CONFIG_INVALID_PAGE_PERSIST (0x00030600) /* Persistent Page not found */ #define IOP_LOGINFO_CODE_CONFIG_INVALID_PAGE_PERSIST (0x00030600) /* Persistent Page not found */
#define IOP_LOGINFO_CODE_CONFIG_INVALID_PAGE_DEFAULT (0x00030700) /* Default Page not found */ #define IOP_LOGINFO_CODE_CONFIG_INVALID_PAGE_DEFAULT (0x00030700) /* Default Page not found */
#define IOP_LOGINFO_CODE_DIAG_MSG_ERROR (0x00040000) /* Error handling diag msg - or'd with diag status */ #define IOP_LOGINFO_CODE_FWUPLOAD_NO_FLASH_AVAILABLE (0x0003E000) /* Tried to upload from flash, but there is none */
#define IOP_LOGINFO_CODE_FWUPLOAD_UNKNOWN_IMAGE_TYPE (0x0003E001) /* ImageType field contents were invalid */
#define IOP_LOGINFO_CODE_FWUPLOAD_WRONG_IMAGE_SIZE (0x0003E002) /* ImageSize field in TCSGE was bad/offset in MfgPg 4 was wrong */
#define IOP_LOGINFO_CODE_FWUPLOAD_ENTIRE_FLASH_UPLOAD_FAILED (0x0003E003) /* Error occured while attempting to upload the entire flash */
#define IOP_LOGINFO_CODE_FWUPLOAD_REGION_UPLOAD_FAILED (0x0003E004) /* Error occured while attempting to upload single flash region */
#define IOP_LOGINFO_CODE_FWUPLOAD_DMA_FAILURE (0x0003E005) /* Problem occured while DMAing FW to host memory */
#define IOP_LOGINFO_CODE_TASK_TERMINATED (0x00050000) #define IOP_LOGINFO_CODE_DIAG_MSG_ERROR (0x00040000) /* Error handling diag msg - or'd with diag status */
#define IOP_LOGINFO_CODE_ENCL_MGMT_READ_ACTION_ERR0R (0x00060001) /* Read Action not supported for SEP msg */ #define IOP_LOGINFO_CODE_TASK_TERMINATED (0x00050000)
#define IOP_LOGINFO_CODE_ENCL_MGMT_INVALID_BUS_ID_ERR0R (0x00060002) /* Invalid Bus/ID in SEP msg */
#define IOP_LOGINFO_CODE_TARGET_ASSIST_TERMINATED (0x00070001) #define IOP_LOGINFO_CODE_ENCL_MGMT_READ_ACTION_ERR0R (0x00060001) /* Read Action not supported for SEP msg */
#define IOP_LOGINFO_CODE_TARGET_STATUS_SEND_TERMINATED (0x00070002) #define IOP_LOGINFO_CODE_ENCL_MGMT_INVALID_BUS_ID_ERR0R (0x00060002) /* Invalid Bus/ID in SEP msg */
#define IOP_LOGINFO_CODE_TARGET_MODE_ABORT_ALL_IO (0x00070003)
#define IOP_LOGINFO_CODE_TARGET_MODE_ABORT_EXACT_IO (0x00070004) #define IOP_LOGINFO_CODE_TARGET_ASSIST_TERMINATED (0x00070001)
#define IOP_LOGINFO_CODE_TARGET_MODE_ABORT_EXACT_IO_REQ (0x00070005) #define IOP_LOGINFO_CODE_TARGET_STATUS_SEND_TERMINATED (0x00070002)
#define IOP_LOGINFO_CODE_TARGET_MODE_ABORT_ALL_IO (0x00070003)
#define IOP_LOGINFO_CODE_TARGET_MODE_ABORT_EXACT_IO (0x00070004)
#define IOP_LOGINFO_CODE_TARGET_MODE_ABORT_EXACT_IO_REQ (0x00070005)
/****************************************************************************/ /****************************************************************************/
/* PL LOGINFO_CODE defines, valid if IOC_LOGINFO_ORIGINATOR = PL */ /* PL LOGINFO_CODE defines, valid if IOC_LOGINFO_ORIGINATOR = PL */
/****************************************************************************/ /****************************************************************************/
#define PL_LOGINFO_CODE_OPEN_FAILURE (0x00010000) #define PL_LOGINFO_CODE_OPEN_FAILURE (0x00010000) /* see SUB_CODE_OPEN_FAIL_ below */
#define PL_LOG_INFO_CODE_OPEN_FAILURE_NO_DEST_TIME_OUT (0x00010001)
#define PL_LOGINFO_CODE_OPEN_FAILURE_BAD_DESTINATION (0x00010011)
#define PL_LOGINFO_CODE_OPEN_FAILURE_PROTOCOL_NOT_SUPPORTED (0x00010013)
#define PL_LOGINFO_CODE_OPEN_FAILURE_STP_RESOURCES_BSY (0x00010018)
#define PL_LOGINFO_CODE_OPEN_FAILURE_WRONG_DESTINATION (0x00010019)
#define PL_LOGINFO_CODE_OPEN_FAILURE_ORR_TIMEOUT (0X0001001A)
#define PL_LOGINFO_CODE_OPEN_FAILURE_PATHWAY_BLOCKED (0x0001001B)
#define PL_LOGINFO_CODE_OPEN_FAILURE_AWT_MAXED (0x0001001C)
#define PL_LOGINFO_CODE_INVALID_SGL (0x00020000)
#define PL_LOGINFO_CODE_WRONG_REL_OFF_OR_FRAME_LENGTH (0x00030000)
#define PL_LOGINFO_CODE_FRAME_XFER_ERROR (0x00040000)
#define PL_LOGINFO_CODE_TX_FM_CONNECTED_LOW (0x00050000)
#define PL_LOGINFO_CODE_SATA_NON_NCQ_RW_ERR_BIT_SET (0x00060000)
#define PL_LOGINFO_CODE_SATA_READ_LOG_RECEIVE_DATA_ERR (0x00070000)
#define PL_LOGINFO_CODE_SATA_NCQ_FAIL_ALL_CMDS_AFTR_ERR (0x00080000)
#define PL_LOGINFO_CODE_SATA_ERR_IN_RCV_SET_DEV_BIT_FIS (0x00090000)
#define PL_LOGINFO_CODE_RX_FM_INVALID_MESSAGE (0x000A0000)
#define PL_LOGINFO_CODE_RX_CTX_MESSAGE_VALID_ERROR (0x000B0000)
#define PL_LOGINFO_CODE_RX_FM_CURRENT_FRAME_ERROR (0x000C0000)
#define PL_LOGINFO_CODE_SATA_LINK_DOWN (0x000D0000)
#define PL_LOGINFO_CODE_DISCOVERY_SATA_INIT_W_IOS (0x000E0000)
#define PL_LOGINFO_CODE_CONFIG_INVALID_PAGE (0x000F0000)
#define PL_LOGINFO_CODE_CONFIG_PL_NOT_INITIALIZED (0x000F0001) /* PL not yet initialized, can't do config page req. */
#define PL_LOGINFO_CODE_CONFIG_INVALID_PAGE_PT (0x000F0100) /* Invalid Page Type */
#define PL_LOGINFO_CODE_CONFIG_INVALID_PAGE_NUM_PHYS (0x000F0200) /* Invalid Number of Phys */
#define PL_LOGINFO_CODE_CONFIG_INVALID_PAGE_NOT_IMP (0x000F0300) /* Case Not Handled */
#define PL_LOGINFO_CODE_CONFIG_INVALID_PAGE_NO_DEV (0x000F0400) /* No Device Found */
#define PL_LOGINFO_CODE_CONFIG_INVALID_PAGE_FORM (0x000F0500) /* Invalid FORM */
#define PL_LOGINFO_CODE_CONFIG_INVALID_PAGE_PHY (0x000F0600) /* Invalid Phy */
#define PL_LOGINFO_CODE_CONFIG_INVALID_PAGE_NO_OWNER (0x000F0700) /* No Owner Found */
#define PL_LOGINFO_CODE_DSCVRY_SATA_INIT_TIMEOUT (0x00100000)
#define PL_LOGINFO_CODE_RESET (0x00110000) /* See Sub-Codes below */
#define PL_LOGINFO_CODE_ABORT (0x00120000) /* See Sub-Codes below */
#define PL_LOGINFO_CODE_IO_NOT_YET_EXECUTED (0x00130000)
#define PL_LOGINFO_CODE_IO_EXECUTED (0x00140000)
#define PL_LOGINFO_CODE_PERS_RESV_OUT_NOT_AFFIL_OWNER (0x00150000)
#define PL_LOGINFO_CODE_OPEN_TXDMA_ABORT (0x00160000)
#define PL_LOGINFO_CODE_IO_DEVICE_MISSING_DELAY_RETRY (0x00170000)
#define PL_LOGINFO_SUB_CODE_OPEN_FAILURE (0x00000100)
#define PL_LOGINFO_SUB_CODE_OPEN_FAILURE_NO_DEST_TIMEOUT (0x00000101)
#define PL_LOGINFO_SUB_CODE_OPEN_FAILURE_ORR_TIMEOUT (0x0000011A) /* Open Reject (Retry) Timeout */
#define PL_LOGINFO_SUB_CODE_OPEN_FAILURE_PATHWAY_BLOCKED (0x0000011B)
#define PL_LOGINFO_SUB_CODE_OPEN_FAILURE_AWT_MAXED (0x0000011C) /* Arbitration Wait Timer Maxed */
#define PL_LOGINFO_SUB_CODE_TARGET_BUS_RESET (0x00000120) #define PL_LOGINFO_SUB_CODE_OPEN_FAIL_NO_DEST_TIME_OUT (0x00000001)
#define PL_LOGINFO_SUB_CODE_TRANSPORT_LAYER (0x00000130) /* Leave lower nibble (1-f) reserved. */ #define PL_LOGINFO_SUB_CODE_OPEN_FAIL_PATHWAY_BLOCKED (0x00000002)
#define PL_LOGINFO_SUB_CODE_PORT_LAYER (0x00000140) /* Leave lower nibble (1-f) reserved. */ #define PL_LOGINFO_SUB_CODE_OPEN_FAIL_RES_CONTINUE0 (0x00000003)
#define PL_LOGINFO_SUB_CODE_OPEN_FAIL_RES_CONTINUE1 (0x00000004)
#define PL_LOGINFO_SUB_CODE_OPEN_FAIL_RES_INITIALIZE0 (0x00000005)
#define PL_LOGINFO_SUB_CODE_OPEN_FAIL_RES_INITIALIZE1 (0x00000006)
#define PL_LOGINFO_SUB_CODE_OPEN_FAIL_RES_STOP0 (0x00000007)
#define PL_LOGINFO_SUB_CODE_OPEN_FAIL_RES_STOP1 (0x00000008)
#define PL_LOGINFO_SUB_CODE_OPEN_FAIL_RETRY (0x00000009)
#define PL_LOGINFO_SUB_CODE_OPEN_FAIL_BREAK (0x0000000A)
#define PL_LOGINFO_SUB_CODE_OPEN_FAIL_UNUSED_0B (0x0000000B)
#define PL_LOGINFO_SUB_CODE_OPEN_FAIL_OPEN_TIMEOUT_EXP (0x0000000C)
#define PL_LOGINFO_SUB_CODE_OPEN_FAIL_UNUSED_0D (0x0000000D)
#define PL_LOGINFO_SUB_CODE_OPEN_FAIL_DVTBLE_ACCSS_FAIL (0x0000000E)
#define PL_LOGINFO_SUB CODE_OPEN_FAIL_BAD_DEST (0x00000011)
#define PL_LOGINFO_SUB_CODE_OPEN_FAIL_RATE_NOT_SUPP (0x00000012)
#define PL_LOGINFO_SUB_CODE_OPEN_FAIL_PROT_NOT_SUPP (0x00000013)
#define PL_LOGINFO_SUB_CODE_OPEN_FAIL_RESERVED_ABANDON0 (0x00000014)
#define PL_LOGINFO_SUB_CODE_OPEN_FAIL_RESERVED_ABANDON1 (0x00000015)
#define PL_LOGINFO_SUB_CODE_OPEN_FAIL_RESERVED_ABANDON2 (0x00000016)
#define PL_LOGINFO_SUB_CODE_OPEN_FAIL_RESERVED_ABANDON3 (0x00000017)
#define PL_LOGINFO_SUB_CODE_OPEN_FAIL_STP_RESOURCES_BSY (0x00000018)
#define PL_LOGINFO_SUB_CODE_OPEN_FAIL_WRONG_DESTINATION (0x00000019)
#define PL_LOGINFO_SUB_CODE_OPEN_FAIL_PATH_BLOCKED (0x0000001B) /* Retry Timeout */
#define PL_LOGINFO_SUB_CODE_OPEN_FAIL_AWT_MAXED (0x0000001C) /* Retry Timeout */
#define PL_LOGINFO_SUB_CODE_INVALID_SGL (0x00000200)
#define PL_LOGINFO_SUB_CODE_WRONG_REL_OFF_OR_FRAME_LENGTH (0x00000300) #define PL_LOGINFO_CODE_INVALID_SGL (0x00020000)
#define PL_LOGINFO_SUB_CODE_FRAME_XFER_ERROR (0x00000400) #define PL_LOGINFO_CODE_WRONG_REL_OFF_OR_FRAME_LENGTH (0x00030000)
#define PL_LOGINFO_SUB_CODE_TX_FM_CONNECTED_LOW (0x00000500) #define PL_LOGINFO_CODE_FRAME_XFER_ERROR (0x00040000)
#define PL_LOGINFO_SUB_CODE_SATA_NON_NCQ_RW_ERR_BIT_SET (0x00000600) #define PL_LOGINFO_CODE_TX_FM_CONNECTED_LOW (0x00050000)
#define PL_LOGINFO_SUB_CODE_SATA_READ_LOG_RECEIVE_DATA_ERR (0x00000700) #define PL_LOGINFO_CODE_SATA_NON_NCQ_RW_ERR_BIT_SET (0x00060000)
#define PL_LOGINFO_SUB_CODE_SATA_NCQ_FAIL_ALL_CMDS_AFTR_ERR (0x00000800) #define PL_LOGINFO_CODE_SATA_READ_LOG_RECEIVE_DATA_ERR (0x00070000)
#define PL_LOGINFO_SUB_CODE_SATA_ERR_IN_RCV_SET_DEV_BIT_FIS (0x00000900) #define PL_LOGINFO_CODE_SATA_NCQ_FAIL_ALL_CMDS_AFTR_ERR (0x00080000)
#define PL_LOGINFO_SUB_CODE_RX_FM_INVALID_MESSAGE (0x00000A00) #define PL_LOGINFO_CODE_SATA_ERR_IN_RCV_SET_DEV_BIT_FIS (0x00090000)
#define PL_LOGINFO_SUB_CODE_RX_CTX_MESSAGE_VALID_ERROR (0x00000B00) #define PL_LOGINFO_CODE_RX_FM_INVALID_MESSAGE (0x000A0000)
#define PL_LOGINFO_SUB_CODE_RX_FM_CURRENT_FRAME_ERROR (0x00000C00) #define PL_LOGINFO_CODE_RX_CTX_MESSAGE_VALID_ERROR (0x000B0000)
#define PL_LOGINFO_SUB_CODE_SATA_LINK_DOWN (0x00000D00) #define PL_LOGINFO_CODE_RX_FM_CURRENT_FRAME_ERROR (0x000C0000)
#define PL_LOGINFO_SUB_CODE_DISCOVERY_SATA_INIT_W_IOS (0x00000E00) #define PL_LOGINFO_CODE_SATA_LINK_DOWN (0x000D0000)
#define PL_LOGINFO_SUB_CODE_DISCOVERY_REMOTE_SEP_RESET (0x00000E01) #define PL_LOGINFO_CODE_DISCOVERY_SATA_INIT_W_IOS (0x000E0000)
#define PL_LOGINFO_SUB_CODE_SECOND_OPEN (0x00000F00) #define PL_LOGINFO_CODE_CONFIG_INVALID_PAGE (0x000F0000)
#define PL_LOGINFO_SUB_CODE_DSCVRY_SATA_INIT_TIMEOUT (0x00001000) #define PL_LOGINFO_CODE_CONFIG_PL_NOT_INITIALIZED (0x000F0001) /* PL not yet initialized, can't do config page req. */
#define PL_LOGINFO_CODE_CONFIG_INVALID_PAGE_PT (0x000F0100) /* Invalid Page Type */
#define PL_LOGINFO_CODE_CONFIG_INVALID_PAGE_NUM_PHYS (0x000F0200) /* Invalid Number of Phys */
#define PL_LOGINFO_CODE_CONFIG_INVALID_PAGE_NOT_IMP (0x000F0300) /* Case Not Handled */
#define PL_LOGINFO_CODE_CONFIG_INVALID_PAGE_NO_DEV (0x000F0400) /* No Device Found */
#define PL_LOGINFO_CODE_CONFIG_INVALID_PAGE_FORM (0x000F0500) /* Invalid FORM */
#define PL_LOGINFO_CODE_CONFIG_INVALID_PAGE_PHY (0x000F0600) /* Invalid Phy */
#define PL_LOGINFO_CODE_CONFIG_INVALID_PAGE_NO_OWNER (0x000F0700) /* No Owner Found */
#define PL_LOGINFO_CODE_DSCVRY_SATA_INIT_TIMEOUT (0x00100000)
#define PL_LOGINFO_CODE_RESET (0x00110000) /* See Sub-Codes below (PL_LOGINFO_SUB_CODE) */
#define PL_LOGINFO_CODE_ABORT (0x00120000) /* See Sub-Codes below (PL_LOGINFO_SUB_CODE)*/
#define PL_LOGINFO_CODE_IO_NOT_YET_EXECUTED (0x00130000)
#define PL_LOGINFO_CODE_IO_EXECUTED (0x00140000)
#define PL_LOGINFO_CODE_PERS_RESV_OUT_NOT_AFFIL_OWNER (0x00150000)
#define PL_LOGINFO_CODE_OPEN_TXDMA_ABORT (0x00160000)
#define PL_LOGINFO_CODE_IO_DEVICE_MISSING_DELAY_RETRY (0x00170000)
#define PL_LOGINFO_CODE_IO_CANCELLED_DUE_TO_R_ERR (0x00180000)
#define PL_LOGINFO_SUB_CODE_OPEN_FAILURE (0x00000100)
#define PL_LOGINFO_SUB_CODE_OPEN_FAILURE_NO_DEST_TIMEOUT (0x00000101)
#define PL_LOGINFO_SUB_CODE_OPEN_FAILURE_SATA_NEG_RATE_2HI (0x00000102)
#define PL_LOGINFO_SUB_CODE_OPEN_FAILURE_RATE_NOT_SUPPORTED (0x00000103)
#define PL_LOGINFO_SUB_CODE_OPEN_FAILURE_BREAK (0x00000104)
#define PL_LOGINFO_SUB_CODE_OPEN_FAILURE_ZONE_VIOLATION (0x00000114)
#define PL_LOGINFO_SUB_CODE_OPEN_FAILURE_ABANDON0 (0x00000114) /* Open Reject (Zone Violation) - available on SAS-2 devices */
#define PL_LOGINFO_SUB_CODE_OPEN_FAILURE_ABANDON1 (0x00000115)
#define PL_LOGINFO_SUB_CODE_OPEN_FAILURE_ABANDON2 (0x00000116)
#define PL_LOGINFO_SUB_CODE_OPEN_FAILURE_ABANDON3 (0x00000117)
#define PL_LOGINFO_SUB_CODE_OPEN_FAILURE_ORR_TIMEOUT (0x0000011A) /* Open Reject (Retry) Timeout */
#define PL_LOGINFO_SUB_CODE_OPEN_FAILURE_PATH_BLOCKED (0x0000011B)
#define PL_LOGINFO_SUB_CODE_OPEN_FAILURE_AWT_MAXED (0x0000011C) /* Arbitration Wait Timer Maxed */
#define PL_LOGINFO_SUB_CODE_TARGET_BUS_RESET (0x00000120)
#define PL_LOGINFO_SUB_CODE_TRANSPORT_LAYER (0x00000130) /* Leave lower nibble (1-f) reserved. */
#define PL_LOGINFO_SUB_CODE_PORT_LAYER (0x00000140) /* Leave lower nibble (1-f) reserved. */
#define PL_LOGINFO_CODE_ENCL_MGMT_SMP_FRAME_FAILURE (0x00200000) /* Can't get SMP Frame */ #define PL_LOGINFO_SUB_CODE_INVALID_SGL (0x00000200)
#define PL_LOGINFO_CODE_ENCL_MGMT_SMP_READ_ERROR (0x00200010) /* Error occured on SMP Read */ #define PL_LOGINFO_SUB_CODE_WRONG_REL_OFF_OR_FRAME_LENGTH (0x00000300)
#define PL_LOGINFO_CODE_ENCL_MGMT_SMP_WRITE_ERROR (0x00200020) /* Error occured on SMP Write */ #define PL_LOGINFO_SUB_CODE_FRAME_XFER_ERROR (0x00000400) /* Bits 0-3 encode Transport Status Register (offset 0x08) */
#define PL_LOGINFO_CODE_ENCL_MGMT_NOT_SUPPORTED_ON_ENCL (0x00200040) /* Encl Mgmt services not available for this WWID */ /* Bit 0 is Status Bit 0: FrameXferErr */
#define PL_LOGINFO_CODE_ENCL_MGMT_ADDR_MODE_NOT_SUPPORTED (0x00200050) /* Address Mode not suppored */ /* Bit 1 & 2 are Status Bits 16 and 17: FrameXmitErrStatus */
#define PL_LOGINFO_CODE_ENCL_MGMT_BAD_SLOT_NUM (0x00200060) /* Invalid Slot Number in SEP Msg */ /* Bit 3 is Status Bit 18 WriteDataLenghtGTDataLengthErr */
#define PL_LOGINFO_CODE_ENCL_MGMT_SGPIO_NOT_PRESENT (0x00200070) /* SGPIO not present/enabled */
#define PL_LOGINFO_CODE_ENCL_MGMT_GPIO_NOT_CONFIGURED (0x00200080) /* GPIO not configured */
#define PL_LOGINFO_CODE_ENCL_MGMT_GPIO_FRAME_ERROR (0x00200090) /* GPIO can't allocate a frame */
#define PL_LOGINFO_CODE_ENCL_MGMT_GPIO_CONFIG_PAGE_ERROR (0x002000A0) /* GPIO failed config page request */
#define PL_LOGINFO_CODE_ENCL_MGMT_SES_FRAME_ALLOC_ERROR (0x002000B0) /* Can't get frame for SES command */
#define PL_LOGINFO_CODE_ENCL_MGMT_SES_IO_ERROR (0x002000C0) /* I/O execution error */
#define PL_LOGINFO_CODE_ENCL_MGMT_SES_RETRIES_EXHAUSTED (0x002000D0) /* SEP I/O retries exhausted */
#define PL_LOGINFO_CODE_ENCL_MGMT_SMP_FRAME_ALLOC_ERROR (0x002000E0) /* Can't get frame for SMP command */
#define PL_LOGINFO_DA_SEP_NOT_PRESENT (0x00200100) /* SEP not present when msg received */ #define PL_LOGINFO_SUB_CODE_TX_FM_CONNECTED_LOW (0x00000500)
#define PL_LOGINFO_DA_SEP_SINGLE_THREAD_ERROR (0x00200101) /* Can only accept 1 msg at a time */ #define PL_LOGINFO_SUB_CODE_SATA_NON_NCQ_RW_ERR_BIT_SET (0x00000600)
#define PL_LOGINFO_DA_SEP_ISTWI_INTR_IN_IDLE_STATE (0x00200102) /* ISTWI interrupt recvd. while IDLE */ #define PL_LOGINFO_SUB_CODE_SATA_READ_LOG_RECEIVE_DATA_ERR (0x00000700)
#define PL_LOGINFO_DA_SEP_RECEIVED_NACK_FROM_SLAVE (0x00200103) /* SEP NACK'd, it is busy */ #define PL_LOGINFO_SUB_CODE_SATA_NCQ_FAIL_ALL_CMDS_AFTR_ERR (0x00000800)
#define PL_LOGINFO_DA_SEP_DID_NOT_RECEIVE_ACK (0x00200104) /* SEP didn't rcv. ACK (Last Rcvd Bit = 1) */ #define PL_LOGINFO_SUB_CODE_SATA_ERR_IN_RCV_SET_DEV_BIT_FIS (0x00000900)
#define PL_LOGINFO_DA_SEP_BAD_STATUS_HDR_CHKSUM (0x00200105) /* SEP stopped or sent bad chksum in Hdr */ #define PL_LOGINFO_SUB_CODE_RX_FM_INVALID_MESSAGE (0x00000A00)
#define PL_LOGINFO_DA_SEP_STOP_ON_DATA (0x00200106) /* SEP stopped while transfering data */ #define PL_LOGINFO_SUB_CODE_RX_CTX_MESSAGE_VALID_ERROR (0x00000B00)
#define PL_LOGINFO_DA_SEP_STOP_ON_SENSE_DATA (0x00200107) /* SEP stopped while transfering sense data */ #define PL_LOGINFO_SUB_CODE_RX_FM_CURRENT_FRAME_ERROR (0x00000C00)
#define PL_LOGINFO_DA_SEP_UNSUPPORTED_SCSI_STATUS_1 (0x00200108) /* SEP returned unknown scsi status */ #define PL_LOGINFO_SUB_CODE_SATA_LINK_DOWN (0x00000D00)
#define PL_LOGINFO_DA_SEP_UNSUPPORTED_SCSI_STATUS_2 (0x00200109) /* SEP returned unknown scsi status */ #define PL_LOGINFO_SUB_CODE_DISCOVERY_SATA_INIT_W_IOS (0x00000E00)
#define PL_LOGINFO_DA_SEP_CHKSUM_ERROR_AFTER_STOP (0x0020010A) /* SEP returned bad chksum after STOP */ #define PL_LOGINFO_SUB_CODE_DISCOVERY_REMOTE_SEP_RESET (0x00000E01)
#define PL_LOGINFO_DA_SEP_CHKSUM_ERROR_AFTER_STOP_GETDATA (0x0020010B) /* SEP returned bad chksum after STOP while gettin data*/ #define PL_LOGINFO_SUB_CODE_SECOND_OPEN (0x00000F00)
#define PL_LOGINFO_DA_SEP_UNSUPPORTED_COMMAND (0x0020010C) /* SEP doesn't support CDB opcode */ #define PL_LOGINFO_SUB_CODE_DSCVRY_SATA_INIT_TIMEOUT (0x00001000)
#define PL_LOGINFO_CODE_ENCL_MGMT_SMP_FRAME_FAILURE (0x00200000) /* Can't get SMP Frame */
#define PL_LOGINFO_CODE_ENCL_MGMT_SMP_READ_ERROR (0x00200010) /* Error occured on SMP Read */
#define PL_LOGINFO_CODE_ENCL_MGMT_SMP_WRITE_ERROR (0x00200020) /* Error occured on SMP Write */
#define PL_LOGINFO_CODE_ENCL_MGMT_NOT_SUPPORTED_ON_ENCL (0x00200040) /* Encl Mgmt services not available for this WWID */
#define PL_LOGINFO_CODE_ENCL_MGMT_ADDR_MODE_NOT_SUPPORTED (0x00200050) /* Address Mode not suppored */
#define PL_LOGINFO_CODE_ENCL_MGMT_BAD_SLOT_NUM (0x00200060) /* Invalid Slot Number in SEP Msg */
#define PL_LOGINFO_CODE_ENCL_MGMT_SGPIO_NOT_PRESENT (0x00200070) /* SGPIO not present/enabled */
#define PL_LOGINFO_CODE_ENCL_MGMT_GPIO_NOT_CONFIGURED (0x00200080) /* GPIO not configured */
#define PL_LOGINFO_CODE_ENCL_MGMT_GPIO_FRAME_ERROR (0x00200090) /* GPIO can't allocate a frame */
#define PL_LOGINFO_CODE_ENCL_MGMT_GPIO_CONFIG_PAGE_ERROR (0x002000A0) /* GPIO failed config page request */
#define PL_LOGINFO_CODE_ENCL_MGMT_SES_FRAME_ALLOC_ERROR (0x002000B0) /* Can't get frame for SES command */
#define PL_LOGINFO_CODE_ENCL_MGMT_SES_IO_ERROR (0x002000C0) /* I/O execution error */
#define PL_LOGINFO_CODE_ENCL_MGMT_SES_RETRIES_EXHAUSTED (0x002000D0) /* SEP I/O retries exhausted */
#define PL_LOGINFO_CODE_ENCL_MGMT_SMP_FRAME_ALLOC_ERROR (0x002000E0) /* Can't get frame for SMP command */
#define PL_LOGINFO_DA_SEP_NOT_PRESENT (0x00200100) /* SEP not present when msg received */
#define PL_LOGINFO_DA_SEP_SINGLE_THREAD_ERROR (0x00200101) /* Can only accept 1 msg at a time */
#define PL_LOGINFO_DA_SEP_ISTWI_INTR_IN_IDLE_STATE (0x00200102) /* ISTWI interrupt recvd. while IDLE */
#define PL_LOGINFO_DA_SEP_RECEIVED_NACK_FROM_SLAVE (0x00200103) /* SEP NACK'd, it is busy */
#define PL_LOGINFO_DA_SEP_DID_NOT_RECEIVE_ACK (0x00200104) /* SEP didn't rcv. ACK (Last Rcvd Bit = 1) */
#define PL_LOGINFO_DA_SEP_BAD_STATUS_HDR_CHKSUM (0x00200105) /* SEP stopped or sent bad chksum in Hdr */
#define PL_LOGINFO_DA_SEP_STOP_ON_DATA (0x00200106) /* SEP stopped while transfering data */
#define PL_LOGINFO_DA_SEP_STOP_ON_SENSE_DATA (0x00200107) /* SEP stopped while transfering sense data */
#define PL_LOGINFO_DA_SEP_UNSUPPORTED_SCSI_STATUS_1 (0x00200108) /* SEP returned unknown scsi status */
#define PL_LOGINFO_DA_SEP_UNSUPPORTED_SCSI_STATUS_2 (0x00200109) /* SEP returned unknown scsi status */
#define PL_LOGINFO_DA_SEP_CHKSUM_ERROR_AFTER_STOP (0x0020010A) /* SEP returned bad chksum after STOP */
#define PL_LOGINFO_DA_SEP_CHKSUM_ERROR_AFTER_STOP_GETDATA (0x0020010B) /* SEP returned bad chksum after STOP while gettin data*/
#define PL_LOGINFO_DA_SEP_UNSUPPORTED_COMMAND (0x0020010C) /* SEP doesn't support CDB opcode f/w location 1 */
#define PL_LOGINFO_DA_SEP_UNSUPPORTED_COMMAND_2 (0x0020010D) /* SEP doesn't support CDB opcode f/w location 2 */
#define PL_LOGINFO_DA_SEP_UNSUPPORTED_COMMAND_3 (0x0020010E) /* SEP doesn't support CDB opcode f/w location 3 */
/****************************************************************************/ /****************************************************************************/

View file

@ -1,12 +1,12 @@
/* /*
* Copyright (c) 2004 LSI Logic Corporation. * Copyright (c) 2004-2006 LSI Logic Corporation.
* *
* *
* Name: mpi_sas.h * Name: mpi_sas.h
* Title: MPI Serial Attached SCSI structures and definitions * Title: MPI Serial Attached SCSI structures and definitions
* Creation Date: August 19, 2004 * Creation Date: August 19, 2004
* *
* mpi_sas.h Version: 01.05.03 * mpi_sas.h Version: 01.05.04
* *
* Version History * Version History
* --------------- * ---------------
@ -21,6 +21,8 @@
* and Remove Device operations to SAS IO Unit Control. * and Remove Device operations to SAS IO Unit Control.
* Added DevHandle field to SAS IO Unit Control request and * Added DevHandle field to SAS IO Unit Control request and
* reply. * reply.
* 10-11-06 01.05.04 Fixed the name of a define for Operation field of SAS IO
* Unit Control request.
* -------------------------------------------------------------------------- * --------------------------------------------------------------------------
*/ */
@ -237,7 +239,8 @@ typedef struct _MSG_SAS_IOUNIT_CONTROL_REQUEST
#define MPI_SAS_OP_SEND_PRIMITIVE (0x0A) #define MPI_SAS_OP_SEND_PRIMITIVE (0x0A)
#define MPI_SAS_OP_FORCE_FULL_DISCOVERY (0x0B) #define MPI_SAS_OP_FORCE_FULL_DISCOVERY (0x0B)
#define MPI_SAS_OP_TRANSMIT_PORT_SELECT_SIGNAL (0x0C) #define MPI_SAS_OP_TRANSMIT_PORT_SELECT_SIGNAL (0x0C)
#define MPI_SAS_OP_TRANSMIT_REMOVE_DEVICE (0x0D) #define MPI_SAS_OP_TRANSMIT_REMOVE_DEVICE (0x0D) /* obsolete name */
#define MPI_SAS_OP_REMOVE_DEVICE (0x0D)
/* values for the PrimFlags field */ /* values for the PrimFlags field */
#define MPI_SAS_PRIMFLAGS_SINGLE (0x08) #define MPI_SAS_PRIMFLAGS_SINGLE (0x08)

File diff suppressed because it is too large Load diff

View file

@ -6,7 +6,7 @@
* running LSI Logic Fusion MPT (Message Passing Technology) firmware. * running LSI Logic Fusion MPT (Message Passing Technology) firmware.
* *
* Copyright (c) 1999-2007 LSI Logic Corporation * Copyright (c) 1999-2007 LSI Logic Corporation
* (mailto:mpt_linux_developer@lsil.com) * (mailto:mpt_linux_developer@lsi.com)
* *
*/ */
/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
@ -75,8 +75,8 @@
#define COPYRIGHT "Copyright (c) 1999-2007 " MODULEAUTHOR #define COPYRIGHT "Copyright (c) 1999-2007 " MODULEAUTHOR
#endif #endif
#define MPT_LINUX_VERSION_COMMON "3.04.03" #define MPT_LINUX_VERSION_COMMON "3.04.04"
#define MPT_LINUX_PACKAGE_NAME "@(#)mptlinux-3.04.03" #define MPT_LINUX_PACKAGE_NAME "@(#)mptlinux-3.04.04"
#define WHAT_MAGIC_STRING "@" "(" "#" ")" #define WHAT_MAGIC_STRING "@" "(" "#" ")"
#define show_mptmod_ver(s,ver) \ #define show_mptmod_ver(s,ver) \
@ -172,6 +172,9 @@
#define MPT_SCSI_SG_DEPTH 40 #define MPT_SCSI_SG_DEPTH 40
#endif #endif
/* debug print string length used for events and iocstatus */
# define EVENT_DESCR_STR_SZ 100
#ifdef __KERNEL__ /* { */ #ifdef __KERNEL__ /* { */
/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
@ -334,8 +337,8 @@ typedef struct _VirtTarget {
struct scsi_target *starget; struct scsi_target *starget;
u8 tflags; u8 tflags;
u8 ioc_id; u8 ioc_id;
u8 target_id; u8 id;
u8 bus_id; u8 channel;
u8 minSyncFactor; /* 0xFF is async */ u8 minSyncFactor; /* 0xFF is async */
u8 maxOffset; /* 0 if async */ u8 maxOffset; /* 0 if async */
u8 maxWidth; /* 0 if narrow, 1 if wide */ u8 maxWidth; /* 0 if narrow, 1 if wide */
@ -344,13 +347,12 @@ typedef struct _VirtTarget {
u8 type; /* byte 0 of Inquiry data */ u8 type; /* byte 0 of Inquiry data */
u8 deleted; /* target in process of being removed */ u8 deleted; /* target in process of being removed */
u32 num_luns; u32 num_luns;
u32 luns[8]; /* Max LUNs is 256 */
} VirtTarget; } VirtTarget;
typedef struct _VirtDevice { typedef struct _VirtDevice {
VirtTarget *vtarget; VirtTarget *vtarget;
u8 configured_lun; u8 configured_lun;
u32 lun; int lun;
} VirtDevice; } VirtDevice;
/* /*
@ -412,7 +414,7 @@ typedef struct _MPT_IOCTL {
u8 rsvd; u8 rsvd;
u8 status; /* current command status */ u8 status; /* current command status */
u8 reset; /* 1 if bus reset allowed */ u8 reset; /* 1 if bus reset allowed */
u8 target; /* target for reset */ u8 id; /* target for reset */
struct mutex ioctl_mutex; struct mutex ioctl_mutex;
} MPT_IOCTL; } MPT_IOCTL;
@ -483,10 +485,24 @@ typedef struct _SasCfgData {
*/ */
}SasCfgData; }SasCfgData;
/*
* Inactive volume link list of raid component data
* @inactive_list
*/
struct inactive_raid_component_info {
struct list_head list;
u8 volumeID; /* volume target id */
u8 volumeBus; /* volume channel */
IOC_3_PHYS_DISK d; /* phys disk info */
};
typedef struct _RaidCfgData { typedef struct _RaidCfgData {
IOCPage2_t *pIocPg2; /* table of Raid Volumes */ IOCPage2_t *pIocPg2; /* table of Raid Volumes */
IOCPage3_t *pIocPg3; /* table of physical disks */ IOCPage3_t *pIocPg3; /* table of physical disks */
int isRaid; /* bit field, 1 if RAID */ struct semaphore inactive_list_mutex;
struct list_head inactive_list; /* link list for physical
disk that belong in
inactive volumes */
}RaidCfgData; }RaidCfgData;
typedef struct _FcCfgData { typedef struct _FcCfgData {
@ -528,6 +544,8 @@ typedef struct _MPT_ADAPTER
u32 mem_phys; /* == f4020000 (mmap) */ u32 mem_phys; /* == f4020000 (mmap) */
u32 pio_mem_phys; /* Programmed IO (downloadboot) */ u32 pio_mem_phys; /* Programmed IO (downloadboot) */
int mem_size; /* mmap memory size */ int mem_size; /* mmap memory size */
int number_of_buses;
int devices_per_bus;
int alloc_total; int alloc_total;
u32 last_state; u32 last_state;
int active; int active;
@ -607,6 +625,8 @@ typedef struct _MPT_ADAPTER
u8 persist_reply_frame[MPT_DEFAULT_FRAME_SIZE]; /* persist reply */ u8 persist_reply_frame[MPT_DEFAULT_FRAME_SIZE]; /* persist reply */
LANPage0_t lan_cnfg_page0; LANPage0_t lan_cnfg_page0;
LANPage1_t lan_cnfg_page1; LANPage1_t lan_cnfg_page1;
u8 ir_firmware; /* =1 if IR firmware detected */
/* /*
* Description: errata_flag_1064 * Description: errata_flag_1064
* If a PCIX read occurs within 1 or 2 cycles after the chip receives * If a PCIX read occurs within 1 or 2 cycles after the chip receives
@ -790,12 +810,6 @@ typedef struct _mpt_sge {
#define ddvprintk(x) #define ddvprintk(x)
#endif #endif
#ifdef MPT_DEBUG_NEGO
#define dnegoprintk(x) printk x
#else
#define dnegoprintk(x)
#endif
#if defined(MPT_DEBUG_DV) || defined(MPT_DEBUG_DV_TINY) #if defined(MPT_DEBUG_DV) || defined(MPT_DEBUG_DV_TINY)
#define ddvtprintk(x) printk x #define ddvtprintk(x) printk x
#else #else
@ -957,7 +971,6 @@ typedef struct _MPT_SCSI_HOST {
int port; int port;
u32 pad0; u32 pad0;
struct scsi_cmnd **ScsiLookup; struct scsi_cmnd **ScsiLookup;
VirtTarget **Targets;
MPT_LOCAL_REPLY *pLocal; /* used for internal commands */ MPT_LOCAL_REPLY *pLocal; /* used for internal commands */
struct timer_list timer; struct timer_list timer;
/* Pool of memory for holding SCpnts before doing /* Pool of memory for holding SCpnts before doing
@ -981,6 +994,7 @@ typedef struct _MPT_SCSI_HOST {
int scandv_wait_done; int scandv_wait_done;
long last_queue_full; long last_queue_full;
u16 tm_iocstatus; u16 tm_iocstatus;
struct list_head target_reset_list;
} MPT_SCSI_HOST; } MPT_SCSI_HOST;
/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
@ -1046,6 +1060,7 @@ extern void mpt_alloc_fw_memory(MPT_ADAPTER *ioc, int size);
extern void mpt_free_fw_memory(MPT_ADAPTER *ioc); extern void mpt_free_fw_memory(MPT_ADAPTER *ioc);
extern int mpt_findImVolumes(MPT_ADAPTER *ioc); extern int mpt_findImVolumes(MPT_ADAPTER *ioc);
extern int mptbase_sas_persist_operation(MPT_ADAPTER *ioc, u8 persist_opcode); extern int mptbase_sas_persist_operation(MPT_ADAPTER *ioc, u8 persist_opcode);
extern int mpt_raid_phys_disk_pg0(MPT_ADAPTER *ioc, u8 phys_disk_num, pRaidPhysDiskPage0_t phys_disk);
/* /*
* Public data decl's... * Public data decl's...

View file

@ -5,7 +5,7 @@
* running LSI Logic Fusion MPT (Message Passing Technology) firmware. * running LSI Logic Fusion MPT (Message Passing Technology) firmware.
* *
* Copyright (c) 1999-2007 LSI Logic Corporation * Copyright (c) 1999-2007 LSI Logic Corporation
* (mailto:mpt_linux_developer@lsil.com) * (mailto:mpt_linux_developer@lsi.com)
* *
*/ */
/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
@ -313,7 +313,7 @@ static void mptctl_timeout_expired (MPT_IOCTL *ioctl)
*/ */
dctlprintk((MYIOC_s_INFO_FMT "Calling HardReset! \n", dctlprintk((MYIOC_s_INFO_FMT "Calling HardReset! \n",
ioctl->ioc->name)); ioctl->ioc->name));
mpt_HardResetHandler(ioctl->ioc, NO_SLEEP); mpt_HardResetHandler(ioctl->ioc, CAN_SLEEP);
} }
return; return;
@ -361,7 +361,7 @@ static int mptctl_bus_reset(MPT_IOCTL *ioctl)
ioctl->ioc->name, mf)); ioctl->ioc->name, mf));
pScsiTm = (SCSITaskMgmt_t *) mf; pScsiTm = (SCSITaskMgmt_t *) mf;
pScsiTm->TargetID = ioctl->target; pScsiTm->TargetID = ioctl->id;
pScsiTm->Bus = hd->port; /* 0 */ pScsiTm->Bus = hd->port; /* 0 */
pScsiTm->ChainOffset = 0; pScsiTm->ChainOffset = 0;
pScsiTm->Function = MPI_FUNCTION_SCSI_TASK_MGMT; pScsiTm->Function = MPI_FUNCTION_SCSI_TASK_MGMT;
@ -1159,15 +1159,12 @@ mptctl_getiocinfo (unsigned long arg, unsigned int data_size)
struct mpt_ioctl_iocinfo *karg; struct mpt_ioctl_iocinfo *karg;
MPT_ADAPTER *ioc; MPT_ADAPTER *ioc;
struct pci_dev *pdev; struct pci_dev *pdev;
struct Scsi_Host *sh;
MPT_SCSI_HOST *hd;
int iocnum; int iocnum;
int numDevices = 0;
unsigned int max_id;
int ii;
unsigned int port; unsigned int port;
int cim_rev; int cim_rev;
u8 revision; u8 revision;
struct scsi_device *sdev;
VirtDevice *vdev;
dctlprintk((": mptctl_getiocinfo called.\n")); dctlprintk((": mptctl_getiocinfo called.\n"));
/* Add of PCI INFO results in unaligned access for /* Add of PCI INFO results in unaligned access for
@ -1257,23 +1254,16 @@ mptctl_getiocinfo (unsigned long arg, unsigned int data_size)
/* Get number of devices /* Get number of devices
*/ */
if ((sh = ioc->sh) != NULL) { karg->numDevices = 0;
/* sh->max_id = maximum target ID + 1 if (ioc->sh) {
*/ shost_for_each_device(sdev, ioc->sh) {
max_id = sh->max_id - 1; vdev = sdev->hostdata;
hd = (MPT_SCSI_HOST *) sh->hostdata; if (vdev->vtarget->tflags &
MPT_TARGET_FLAGS_RAID_COMPONENT)
/* Check all of the target structures and continue;
* keep a counter. karg->numDevices++;
*/
if (hd && hd->Targets) {
for (ii = 0; ii <= max_id; ii++) {
if (hd->Targets[ii])
numDevices++;
}
} }
} }
karg->numDevices = numDevices;
/* Set the BIOS and FW Version /* Set the BIOS and FW Version
*/ */
@ -1319,21 +1309,16 @@ mptctl_gettargetinfo (unsigned long arg)
struct mpt_ioctl_targetinfo __user *uarg = (void __user *) arg; struct mpt_ioctl_targetinfo __user *uarg = (void __user *) arg;
struct mpt_ioctl_targetinfo karg; struct mpt_ioctl_targetinfo karg;
MPT_ADAPTER *ioc; MPT_ADAPTER *ioc;
struct Scsi_Host *sh; VirtDevice *vdev;
MPT_SCSI_HOST *hd;
VirtTarget *vdev;
char *pmem; char *pmem;
int *pdata; int *pdata;
IOCPage2_t *pIoc2;
IOCPage3_t *pIoc3;
int iocnum; int iocnum;
int numDevices = 0; int numDevices = 0;
unsigned int max_id; int lun;
int id, jj, indexed_lun, lun_index;
u32 lun;
int maxWordsLeft; int maxWordsLeft;
int numBytes; int numBytes;
u8 port, devType, bus_id; u8 port;
struct scsi_device *sdev;
dctlprintk(("mptctl_gettargetinfo called.\n")); dctlprintk(("mptctl_gettargetinfo called.\n"));
if (copy_from_user(&karg, uarg, sizeof(struct mpt_ioctl_targetinfo))) { if (copy_from_user(&karg, uarg, sizeof(struct mpt_ioctl_targetinfo))) {
@ -1389,74 +1374,22 @@ mptctl_gettargetinfo (unsigned long arg)
/* Get number of devices /* Get number of devices
*/ */
if ((sh = ioc->sh) != NULL) { if (ioc->sh){
shost_for_each_device(sdev, ioc->sh) {
max_id = sh->max_id - 1; if (!maxWordsLeft)
hd = (MPT_SCSI_HOST *) sh->hostdata; continue;
vdev = sdev->hostdata;
/* Check all of the target structures. if (vdev->vtarget->tflags &
* Save the Id and increment the counter, MPT_TARGET_FLAGS_RAID_COMPONENT)
* if ptr non-null. continue;
* sh->max_id = maximum target ID + 1 lun = (vdev->vtarget->raidVolume) ? 0x80 : vdev->lun;
*/ *pdata = (((u8)lun << 16) + (vdev->vtarget->channel << 8) +
if (hd && hd->Targets) { (vdev->vtarget->id ));
mpt_findImVolumes(ioc); pdata++;
pIoc2 = ioc->raid_data.pIocPg2; numDevices++;
for ( id = 0; id <= max_id; ) { --maxWordsLeft;
if ( pIoc2 && pIoc2->NumActiveVolumes ) {
if ( id == pIoc2->RaidVolume[0].VolumeID ) {
if (maxWordsLeft <= 0) {
printk(KERN_ERR "mptctl_gettargetinfo - "
"buffer is full but volume is available on ioc %d\n, numDevices=%d", iocnum, numDevices);
goto data_space_full;
}
if ( ( pIoc2->RaidVolume[0].Flags & MPI_IOCPAGE2_FLAG_VOLUME_INACTIVE ) == 0 )
devType = 0x80;
else
devType = 0xC0;
bus_id = pIoc2->RaidVolume[0].VolumeBus;
numDevices++;
*pdata = ( (devType << 24) | (bus_id << 8) | id );
dctlprintk((KERN_ERR "mptctl_gettargetinfo - "
"volume ioc=%d target=%x numDevices=%d pdata=%p\n", iocnum, *pdata, numDevices, pdata));
pdata++;
--maxWordsLeft;
goto next_id;
} else {
pIoc3 = ioc->raid_data.pIocPg3;
for ( jj = 0; jj < pIoc3->NumPhysDisks; jj++ ) {
if ( pIoc3->PhysDisk[jj].PhysDiskID == id )
goto next_id;
}
}
}
if ( (vdev = hd->Targets[id]) ) {
for (jj = 0; jj <= MPT_LAST_LUN; jj++) {
lun_index = (jj >> 5);
indexed_lun = (jj % 32);
lun = (1 << indexed_lun);
if (vdev->luns[lun_index] & lun) {
if (maxWordsLeft <= 0) {
printk(KERN_ERR "mptctl_gettargetinfo - "
"buffer is full but more targets are available on ioc %d numDevices=%d\n", iocnum, numDevices);
goto data_space_full;
}
bus_id = vdev->bus_id;
numDevices++;
*pdata = ( (jj << 16) | (bus_id << 8) | id );
dctlprintk((KERN_ERR "mptctl_gettargetinfo - "
"target ioc=%d target=%x numDevices=%d pdata=%p\n", iocnum, *pdata, numDevices, pdata));
pdata++;
--maxWordsLeft;
}
}
}
next_id:
id++;
}
} }
} }
data_space_full:
karg.numDevices = numDevices; karg.numDevices = numDevices;
/* Copy part of the data from kernel memory to user memory /* Copy part of the data from kernel memory to user memory
@ -1821,6 +1754,7 @@ mptctl_do_mpt_command (struct mpt_ioctl_command karg, void __user *mfPtr)
int msgContext; int msgContext;
u16 req_idx; u16 req_idx;
ulong timeout; ulong timeout;
struct scsi_device *sdev;
dctlprintk(("mptctl_do_mpt_command called.\n")); dctlprintk(("mptctl_do_mpt_command called.\n"));
bufIn.kptr = bufOut.kptr = NULL; bufIn.kptr = bufOut.kptr = NULL;
@ -1902,14 +1836,13 @@ mptctl_do_mpt_command (struct mpt_ioctl_command karg, void __user *mfPtr)
case MPI_FUNCTION_SCSI_IO_REQUEST: case MPI_FUNCTION_SCSI_IO_REQUEST:
if (ioc->sh) { if (ioc->sh) {
SCSIIORequest_t *pScsiReq = (SCSIIORequest_t *) mf; SCSIIORequest_t *pScsiReq = (SCSIIORequest_t *) mf;
VirtTarget *pTarget = NULL;
MPT_SCSI_HOST *hd = NULL;
int qtag = MPI_SCSIIO_CONTROL_UNTAGGED; int qtag = MPI_SCSIIO_CONTROL_UNTAGGED;
int scsidir = 0; int scsidir = 0;
int target = (int) pScsiReq->TargetID;
int dataSize; int dataSize;
u32 id;
if ((target < 0) || (target >= ioc->sh->max_id)) { id = (ioc->devices_per_bus == 0) ? 256 : ioc->devices_per_bus;
if (pScsiReq->TargetID > id) {
printk(KERN_ERR "%s@%d::mptctl_do_mpt_command - " printk(KERN_ERR "%s@%d::mptctl_do_mpt_command - "
"Target ID out of bounds. \n", "Target ID out of bounds. \n",
__FILE__, __LINE__); __FILE__, __LINE__);
@ -1917,6 +1850,14 @@ mptctl_do_mpt_command (struct mpt_ioctl_command karg, void __user *mfPtr)
goto done_free_mem; goto done_free_mem;
} }
if (pScsiReq->Bus >= ioc->number_of_buses) {
printk(KERN_ERR "%s@%d::mptctl_do_mpt_command - "
"Target Bus out of bounds. \n",
__FILE__, __LINE__);
rc = -ENODEV;
goto done_free_mem;
}
pScsiReq->MsgFlags &= ~MPI_SCSIIO_MSGFLGS_SENSE_WIDTH; pScsiReq->MsgFlags &= ~MPI_SCSIIO_MSGFLGS_SENSE_WIDTH;
pScsiReq->MsgFlags |= mpt_msg_flags(); pScsiReq->MsgFlags |= mpt_msg_flags();
@ -1936,13 +1877,15 @@ mptctl_do_mpt_command (struct mpt_ioctl_command karg, void __user *mfPtr)
cpu_to_le32(ioc->sense_buf_low_dma cpu_to_le32(ioc->sense_buf_low_dma
+ (req_idx * MPT_SENSE_BUFFER_ALLOC)); + (req_idx * MPT_SENSE_BUFFER_ALLOC));
if ((hd = (MPT_SCSI_HOST *) ioc->sh->hostdata)) { shost_for_each_device(sdev, ioc->sh) {
if (hd->Targets) struct scsi_target *starget = scsi_target(sdev);
pTarget = hd->Targets[target]; VirtTarget *vtarget = starget->hostdata;
}
if (pTarget &&(pTarget->tflags & MPT_TARGET_FLAGS_Q_YES)) if ((pScsiReq->TargetID == vtarget->id) &&
qtag = MPI_SCSIIO_CONTROL_SIMPLEQ; (pScsiReq->Bus == vtarget->channel) &&
(vtarget->tflags & MPT_TARGET_FLAGS_Q_YES))
qtag = MPI_SCSIIO_CONTROL_SIMPLEQ;
}
/* Have the IOCTL driver set the direction based /* Have the IOCTL driver set the direction based
* on the dataOutSize (ordering issue with Sparc). * on the dataOutSize (ordering issue with Sparc).
@ -1959,7 +1902,7 @@ mptctl_do_mpt_command (struct mpt_ioctl_command karg, void __user *mfPtr)
pScsiReq->DataLength = cpu_to_le32(dataSize); pScsiReq->DataLength = cpu_to_le32(dataSize);
ioc->ioctl->reset = MPTCTL_RESET_OK; ioc->ioctl->reset = MPTCTL_RESET_OK;
ioc->ioctl->target = target; ioc->ioctl->id = pScsiReq->TargetID;
} else { } else {
printk(KERN_ERR "%s@%d::mptctl_do_mpt_command - " printk(KERN_ERR "%s@%d::mptctl_do_mpt_command - "
@ -2038,7 +1981,7 @@ mptctl_do_mpt_command (struct mpt_ioctl_command karg, void __user *mfPtr)
pScsiReq->DataLength = cpu_to_le32(dataSize); pScsiReq->DataLength = cpu_to_le32(dataSize);
ioc->ioctl->reset = MPTCTL_RESET_OK; ioc->ioctl->reset = MPTCTL_RESET_OK;
ioc->ioctl->target = pScsiReq->TargetID; ioc->ioctl->id = pScsiReq->TargetID;
} else { } else {
printk(KERN_ERR "%s@%d::mptctl_do_mpt_command - " printk(KERN_ERR "%s@%d::mptctl_do_mpt_command - "
"SCSI driver is not loaded. \n", "SCSI driver is not loaded. \n",

View file

@ -6,7 +6,7 @@
* running LSI Logic Fusion MPT (Message Passing Technology) firmware. * running LSI Logic Fusion MPT (Message Passing Technology) firmware.
* *
* Copyright (c) 1999-2007 LSI Logic Corporation * Copyright (c) 1999-2007 LSI Logic Corporation
* (mailto:mpt_linux_developer@lsil.com) * (mailto:mpt_linux_developer@lsi.com)
* *
*/ */
/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/

View file

@ -4,7 +4,7 @@
* running LSI Logic Fusion MPT (Message Passing Technology) firmware. * running LSI Logic Fusion MPT (Message Passing Technology) firmware.
* *
* Copyright (c) 1999-2007 LSI Logic Corporation * Copyright (c) 1999-2007 LSI Logic Corporation
* (mailto:mpt_linux_developer@lsil.com) * (mailto:mpt_linux_developer@lsi.com)
* *
*/ */
/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
@ -86,6 +86,12 @@ MODULE_PARM_DESC(mptfc_dev_loss_tmo, " Initial time the driver programs the "
" return following a device loss event." " return following a device loss event."
" Default=60."); " Default=60.");
/* scsi-mid layer global parmeter is max_report_luns, which is 511 */
#define MPTFC_MAX_LUN (16895)
static int max_lun = MPTFC_MAX_LUN;
module_param(max_lun, int, 0);
MODULE_PARM_DESC(max_lun, " max lun, default=16895 ");
static int mptfcDoneCtx = -1; static int mptfcDoneCtx = -1;
static int mptfcTaskCtx = -1; static int mptfcTaskCtx = -1;
static int mptfcInternalCtx = -1; /* Used only for internal commands */ static int mptfcInternalCtx = -1; /* Used only for internal commands */
@ -292,10 +298,9 @@ mptfc_GetFcDevPage0(MPT_ADAPTER *ioc, int ioc_port,
U32 port_id = 0xffffff; U32 port_id = 0xffffff;
int num_targ = 0; int num_targ = 0;
int max_bus = ioc->facts.MaxBuses; int max_bus = ioc->facts.MaxBuses;
int max_targ = ioc->facts.MaxDevices; int max_targ;
if (max_bus == 0 || max_targ == 0) max_targ = (ioc->facts.MaxDevices == 0) ? 256 : ioc->facts.MaxDevices;
goto out;
data_sz = sizeof(FCDevicePage0_t) * max_bus * max_targ; data_sz = sizeof(FCDevicePage0_t) * max_bus * max_targ;
p_p0 = p0_array = kzalloc(data_sz, GFP_KERNEL); p_p0 = p0_array = kzalloc(data_sz, GFP_KERNEL);
@ -467,8 +472,8 @@ mptfc_register_dev(MPT_ADAPTER *ioc, int channel, FCDevicePage0_t *pg0)
if (ri->starget) { if (ri->starget) {
vtarget = ri->starget->hostdata; vtarget = ri->starget->hostdata;
if (vtarget) { if (vtarget) {
vtarget->target_id = pg0->CurrentTargetID; vtarget->id = pg0->CurrentTargetID;
vtarget->bus_id = pg0->CurrentBus; vtarget->channel = pg0->CurrentBus;
} }
} }
*((struct mptfc_rport_info **)rport->dd_data) = ri; *((struct mptfc_rport_info **)rport->dd_data) = ri;
@ -540,8 +545,8 @@ mptfc_target_alloc(struct scsi_target *starget)
if (rport) { if (rport) {
ri = *((struct mptfc_rport_info **)rport->dd_data); ri = *((struct mptfc_rport_info **)rport->dd_data);
if (ri) { /* better be! */ if (ri) { /* better be! */
vtarget->target_id = ri->pg0.CurrentTargetID; vtarget->id = ri->pg0.CurrentTargetID;
vtarget->bus_id = ri->pg0.CurrentBus; vtarget->channel = ri->pg0.CurrentBus;
ri->starget = starget; ri->starget = starget;
rc = 0; rc = 0;
} }
@ -592,7 +597,6 @@ mptfc_slave_alloc(struct scsi_device *sdev)
if (vtarget->num_luns == 0) { if (vtarget->num_luns == 0) {
vtarget->ioc_id = hd->ioc->id; vtarget->ioc_id = hd->ioc->id;
vtarget->tflags = MPT_TARGET_FLAGS_Q_YES; vtarget->tflags = MPT_TARGET_FLAGS_Q_YES;
hd->Targets[sdev->id] = vtarget;
} }
vdev->vtarget = vtarget; vdev->vtarget = vtarget;
@ -630,16 +634,17 @@ mptfc_qcmd(struct scsi_cmnd *SCpnt, void (*done)(struct scsi_cmnd *))
struct mptfc_rport_info *ri; struct mptfc_rport_info *ri;
struct fc_rport *rport = starget_to_rport(scsi_target(SCpnt->device)); struct fc_rport *rport = starget_to_rport(scsi_target(SCpnt->device));
int err; int err;
VirtDevice *vdev = SCpnt->device->hostdata;
err = fc_remote_port_chkready(rport); if (!vdev || !vdev->vtarget) {
if (unlikely(err)) { SCpnt->result = DID_NO_CONNECT << 16;
SCpnt->result = err;
done(SCpnt); done(SCpnt);
return 0; return 0;
} }
if (!SCpnt->device->hostdata) { /* vdev */ err = fc_remote_port_chkready(rport);
SCpnt->result = DID_NO_CONNECT << 16; if (unlikely(err)) {
SCpnt->result = err;
done(SCpnt); done(SCpnt);
return 0; return 0;
} }
@ -1143,7 +1148,7 @@ mptfc_probe(struct pci_dev *pdev, const struct pci_device_id *id)
printk(MYIOC_s_WARN_FMT printk(MYIOC_s_WARN_FMT
"Skipping ioc=%p because SCSI Initiator mode is NOT enabled!\n", "Skipping ioc=%p because SCSI Initiator mode is NOT enabled!\n",
ioc->name, ioc); ioc->name, ioc);
return -ENODEV; return 0;
} }
sh = scsi_host_alloc(&mptfc_driver_template, sizeof(MPT_SCSI_HOST)); sh = scsi_host_alloc(&mptfc_driver_template, sizeof(MPT_SCSI_HOST));
@ -1173,10 +1178,9 @@ mptfc_probe(struct pci_dev *pdev, const struct pci_device_id *id)
/* set 16 byte cdb's */ /* set 16 byte cdb's */
sh->max_cmd_len = 16; sh->max_cmd_len = 16;
sh->max_id = MPT_MAX_FC_DEVICES<256 ? MPT_MAX_FC_DEVICES : 255; sh->max_id = ioc->pfacts->MaxDevices;
sh->max_lun = max_lun;
sh->max_lun = MPT_LAST_LUN + 1;
sh->max_channel = 0;
sh->this_id = ioc->pfacts[0].PortSCSIID; sh->this_id = ioc->pfacts[0].PortSCSIID;
/* Required entry. /* Required entry.
@ -1230,19 +1234,6 @@ mptfc_probe(struct pci_dev *pdev, const struct pci_device_id *id)
dprintk((MYIOC_s_INFO_FMT "ScsiLookup @ %p\n", dprintk((MYIOC_s_INFO_FMT "ScsiLookup @ %p\n",
ioc->name, hd->ScsiLookup)); ioc->name, hd->ScsiLookup));
/* Allocate memory for the device structures.
* A non-Null pointer at an offset
* indicates a device exists.
* max_id = 1 + maximum id (hosts.h)
*/
hd->Targets = kcalloc(sh->max_id, sizeof(void *), GFP_ATOMIC);
if (!hd->Targets) {
error = -ENOMEM;
goto out_mptfc_probe;
}
dprintk((KERN_INFO " vdev @ %p\n", hd->Targets));
/* Clear the TM flags /* Clear the TM flags
*/ */
hd->tmPending = 0; hd->tmPending = 0;

View file

@ -5,6 +5,7 @@
* running LSI Logic Fusion MPT (Message Passing Technology) firmware. * running LSI Logic Fusion MPT (Message Passing Technology) firmware.
* *
* Copyright (c) 2000-2007 LSI Logic Corporation * Copyright (c) 2000-2007 LSI Logic Corporation
* (mailto:mpt_linux_developer@lsi.com)
* *
*/ */
/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/

View file

@ -5,6 +5,7 @@
* running LSI Logic Fusion MPT (Message Passing Technology) firmware. * running LSI Logic Fusion MPT (Message Passing Technology) firmware.
* *
* Copyright (c) 2000-2007 LSI Logic Corporation * Copyright (c) 2000-2007 LSI Logic Corporation
* (mailto:mpt_linux_developer@lsi.com)
* *
*/ */
/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/

File diff suppressed because it is too large Load diff

File diff suppressed because it is too large Load diff

View file

@ -6,7 +6,7 @@
* running LSI Logic Fusion MPT (Message Passing Technology) firmware. * running LSI Logic Fusion MPT (Message Passing Technology) firmware.
* *
* Copyright (c) 1999-2007 LSI Logic Corporation * Copyright (c) 1999-2007 LSI Logic Corporation
* (mailto:mpt_linux_developer@lsil.com) * (mailto:mpt_linux_developer@lsi.com)
* *
*/ */
/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
@ -53,6 +53,24 @@
* SCSI Public stuff... * SCSI Public stuff...
*/ */
#define MPT_SCANDV_GOOD (0x00000000) /* must be 0 */
#define MPT_SCANDV_DID_RESET (0x00000001)
#define MPT_SCANDV_SENSE (0x00000002)
#define MPT_SCANDV_SOME_ERROR (0x00000004)
#define MPT_SCANDV_SELECTION_TIMEOUT (0x00000008)
#define MPT_SCANDV_ISSUE_SENSE (0x00000010)
#define MPT_SCANDV_FALLBACK (0x00000020)
#define MPT_SCANDV_MAX_RETRIES (10)
#define MPT_ICFLAG_BUF_CAP 0x01 /* ReadBuffer Read Capacity format */
#define MPT_ICFLAG_ECHO 0x02 /* ReadBuffer Echo buffer format */
#define MPT_ICFLAG_EBOS 0x04 /* ReadBuffer Echo buffer has EBOS */
#define MPT_ICFLAG_PHYS_DISK 0x08 /* Any SCSI IO but do Phys Disk Format */
#define MPT_ICFLAG_TAGGED_CMD 0x10 /* Do tagged IO */
#define MPT_ICFLAG_DID_RESET 0x20 /* Bus Reset occurred with this command */
#define MPT_ICFLAG_RESERVED 0x40 /* Reserved has been issued */
#define MPT_SCSI_CMD_PER_DEV_HIGH 64 #define MPT_SCSI_CMD_PER_DEV_HIGH 64
#define MPT_SCSI_CMD_PER_DEV_LOW 32 #define MPT_SCSI_CMD_PER_DEV_LOW 32
@ -69,9 +87,22 @@
#define MPTSCSIH_SAF_TE 0 #define MPTSCSIH_SAF_TE 0
#define MPTSCSIH_PT_CLEAR 0 #define MPTSCSIH_PT_CLEAR 0
#endif #endif
typedef struct _internal_cmd {
char *data; /* data pointer */
dma_addr_t data_dma; /* data dma address */
int size; /* transfer size */
u8 cmd; /* SCSI Op Code */
u8 channel; /* bus number */
u8 id; /* SCSI ID (virtual) */
int lun;
u8 flags; /* Bit Field - See above */
u8 physDiskNum; /* Phys disk number, -1 else */
u8 rsvd2;
u8 rsvd;
} INTERNAL_CMD;
extern void mptscsih_remove(struct pci_dev *); extern void mptscsih_remove(struct pci_dev *);
extern void mptscsih_shutdown(struct pci_dev *); extern void mptscsih_shutdown(struct pci_dev *);
#ifdef CONFIG_PM #ifdef CONFIG_PM
@ -81,9 +112,6 @@ extern int mptscsih_resume(struct pci_dev *pdev);
extern int mptscsih_proc_info(struct Scsi_Host *host, char *buffer, char **start, off_t offset, int length, int func); extern int mptscsih_proc_info(struct Scsi_Host *host, char *buffer, char **start, off_t offset, int length, int func);
extern const char * mptscsih_info(struct Scsi_Host *SChost); extern const char * mptscsih_info(struct Scsi_Host *SChost);
extern int mptscsih_qcmd(struct scsi_cmnd *SCpnt, void (*done)(struct scsi_cmnd *)); extern int mptscsih_qcmd(struct scsi_cmnd *SCpnt, void (*done)(struct scsi_cmnd *));
extern int mptscsih_target_alloc(struct scsi_target *starget);
extern int mptscsih_slave_alloc(struct scsi_device *device);
extern void mptscsih_target_destroy(struct scsi_target *starget);
extern void mptscsih_slave_destroy(struct scsi_device *device); extern void mptscsih_slave_destroy(struct scsi_device *device);
extern int mptscsih_slave_configure(struct scsi_device *device); extern int mptscsih_slave_configure(struct scsi_device *device);
extern int mptscsih_abort(struct scsi_cmnd * SCpnt); extern int mptscsih_abort(struct scsi_cmnd * SCpnt);
@ -98,6 +126,6 @@ extern int mptscsih_event_process(MPT_ADAPTER *ioc, EventNotificationReply_t *pE
extern int mptscsih_ioc_reset(MPT_ADAPTER *ioc, int post_reset); extern int mptscsih_ioc_reset(MPT_ADAPTER *ioc, int post_reset);
extern int mptscsih_change_queue_depth(struct scsi_device *sdev, int qdepth); extern int mptscsih_change_queue_depth(struct scsi_device *sdev, int qdepth);
extern void mptscsih_timer_expired(unsigned long data); extern void mptscsih_timer_expired(unsigned long data);
extern int mptscsih_TMHandler(MPT_SCSI_HOST *hd, u8 type, u8 channel, u8 target, u8 lun, int ctx2abort, ulong timeout); extern int mptscsih_TMHandler(MPT_SCSI_HOST *hd, u8 type, u8 channel, u8 id, int lun, int ctx2abort, ulong timeout);
extern int mptscsih_raid_id_to_num(MPT_SCSI_HOST *hd, uint physdiskid); extern u8 mptscsih_raid_id_to_num(MPT_ADAPTER *ioc, u8 channel, u8 id);
extern int mptscsih_is_phys_disk(MPT_ADAPTER *ioc, int id); extern int mptscsih_is_phys_disk(MPT_ADAPTER *ioc, u8 channel, u8 id);

View file

@ -4,7 +4,7 @@
* running LSI Logic Fusion MPT (Message Passing Technology) firmware. * running LSI Logic Fusion MPT (Message Passing Technology) firmware.
* *
* Copyright (c) 1999-2007 LSI Logic Corporation * Copyright (c) 1999-2007 LSI Logic Corporation
* (mailto:mpt_linux_developer@lsil.com) * (mailto:mpt_linux_developer@lsi.com)
* *
*/ */
/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
@ -65,6 +65,7 @@
#include <scsi/scsi_tcq.h> #include <scsi/scsi_tcq.h>
#include <scsi/scsi_transport.h> #include <scsi/scsi_transport.h>
#include <scsi/scsi_transport_spi.h> #include <scsi/scsi_transport_spi.h>
#include <scsi/scsi_dbg.h>
#include "mptbase.h" #include "mptbase.h"
#include "mptscsih.h" #include "mptscsih.h"
@ -95,25 +96,339 @@ static int mptspiDoneCtx = -1;
static int mptspiTaskCtx = -1; static int mptspiTaskCtx = -1;
static int mptspiInternalCtx = -1; /* Used only for internal commands */ static int mptspiInternalCtx = -1; /* Used only for internal commands */
/**
* mptspi_setTargetNegoParms - Update the target negotiation
* parameters based on the the Inquiry data, adapter capabilities,
* and NVRAM settings
*
* @hd: Pointer to a SCSI Host Structure
* @vtarget: per target private data
* @sdev: SCSI device
*
**/
static void
mptspi_setTargetNegoParms(MPT_SCSI_HOST *hd, VirtTarget *target,
struct scsi_device *sdev)
{
SpiCfgData *pspi_data = &hd->ioc->spi_data;
int id = (int) target->id;
int nvram;
u8 width = MPT_NARROW;
u8 factor = MPT_ASYNC;
u8 offset = 0;
u8 nfactor;
u8 noQas = 1;
target->negoFlags = pspi_data->noQas;
if (sdev->scsi_level < SCSI_2) {
width = 0;
factor = MPT_ULTRA2;
offset = pspi_data->maxSyncOffset;
target->tflags &= ~MPT_TARGET_FLAGS_Q_YES;
} else {
if (scsi_device_wide(sdev))
width = 1;
if (scsi_device_sync(sdev)) {
factor = pspi_data->minSyncFactor;
if (!scsi_device_dt(sdev))
factor = MPT_ULTRA2;
else {
if (!scsi_device_ius(sdev) &&
!scsi_device_qas(sdev))
factor = MPT_ULTRA160;
else {
factor = MPT_ULTRA320;
if (scsi_device_qas(sdev)) {
ddvprintk((KERN_INFO "Enabling QAS due to byte56=%02x on id=%d!\n", scsi_device_qas(sdev), id));
noQas = 0;
}
if (sdev->type == TYPE_TAPE &&
scsi_device_ius(sdev))
target->negoFlags |= MPT_TAPE_NEGO_IDP;
}
}
offset = pspi_data->maxSyncOffset;
/* If RAID, never disable QAS
* else if non RAID, do not disable
* QAS if bit 1 is set
* bit 1 QAS support, non-raid only
* bit 0 IU support
*/
if (target->raidVolume == 1)
noQas = 0;
} else {
factor = MPT_ASYNC;
offset = 0;
}
}
if (!sdev->tagged_supported)
target->tflags &= ~MPT_TARGET_FLAGS_Q_YES;
/* Update tflags based on NVRAM settings. (SCSI only)
*/
if (pspi_data->nvram && (pspi_data->nvram[id] != MPT_HOST_NVRAM_INVALID)) {
nvram = pspi_data->nvram[id];
nfactor = (nvram & MPT_NVRAM_SYNC_MASK) >> 8;
if (width)
width = nvram & MPT_NVRAM_WIDE_DISABLE ? 0 : 1;
if (offset > 0) {
/* Ensure factor is set to the
* maximum of: adapter, nvram, inquiry
*/
if (nfactor) {
if (nfactor < pspi_data->minSyncFactor )
nfactor = pspi_data->minSyncFactor;
factor = max(factor, nfactor);
if (factor == MPT_ASYNC)
offset = 0;
} else {
offset = 0;
factor = MPT_ASYNC;
}
} else {
factor = MPT_ASYNC;
}
}
/* Make sure data is consistent
*/
if ((!width) && (factor < MPT_ULTRA2))
factor = MPT_ULTRA2;
/* Save the data to the target structure.
*/
target->minSyncFactor = factor;
target->maxOffset = offset;
target->maxWidth = width;
target->tflags |= MPT_TARGET_FLAGS_VALID_NEGO;
/* Disable unused features.
*/
if (!width)
target->negoFlags |= MPT_TARGET_NO_NEGO_WIDE;
if (!offset)
target->negoFlags |= MPT_TARGET_NO_NEGO_SYNC;
if ( factor > MPT_ULTRA320 )
noQas = 0;
if (noQas && (pspi_data->noQas == 0)) {
pspi_data->noQas |= MPT_TARGET_NO_NEGO_QAS;
target->negoFlags |= MPT_TARGET_NO_NEGO_QAS;
/* Disable QAS in a mixed configuration case
*/
ddvprintk((KERN_INFO "Disabling QAS due to noQas=%02x on id=%d!\n", noQas, id));
}
}
/**
* mptspi_writeIOCPage4 - write IOC Page 4
* @hd: Pointer to a SCSI Host Structure
* @channel:
* @id: write IOC Page4 for this ID & Bus
*
* Return: -EAGAIN if unable to obtain a Message Frame
* or 0 if success.
*
* Remark: We do not wait for a return, write pages sequentially.
**/
static int
mptspi_writeIOCPage4(MPT_SCSI_HOST *hd, u8 channel , u8 id)
{
MPT_ADAPTER *ioc = hd->ioc;
Config_t *pReq;
IOCPage4_t *IOCPage4Ptr;
MPT_FRAME_HDR *mf;
dma_addr_t dataDma;
u16 req_idx;
u32 frameOffset;
u32 flagsLength;
int ii;
/* Get a MF for this command.
*/
if ((mf = mpt_get_msg_frame(ioc->DoneCtx, ioc)) == NULL) {
dfailprintk((MYIOC_s_WARN_FMT "writeIOCPage4 : no msg frames!\n",
ioc->name));
return -EAGAIN;
}
/* Set the request and the data pointers.
* Place data at end of MF.
*/
pReq = (Config_t *)mf;
req_idx = le16_to_cpu(mf->u.frame.hwhdr.msgctxu.fld.req_idx);
frameOffset = ioc->req_sz - sizeof(IOCPage4_t);
/* Complete the request frame (same for all requests).
*/
pReq->Action = MPI_CONFIG_ACTION_PAGE_WRITE_CURRENT;
pReq->Reserved = 0;
pReq->ChainOffset = 0;
pReq->Function = MPI_FUNCTION_CONFIG;
pReq->ExtPageLength = 0;
pReq->ExtPageType = 0;
pReq->MsgFlags = 0;
for (ii=0; ii < 8; ii++) {
pReq->Reserved2[ii] = 0;
}
IOCPage4Ptr = ioc->spi_data.pIocPg4;
dataDma = ioc->spi_data.IocPg4_dma;
ii = IOCPage4Ptr->ActiveSEP++;
IOCPage4Ptr->SEP[ii].SEPTargetID = id;
IOCPage4Ptr->SEP[ii].SEPBus = channel;
pReq->Header = IOCPage4Ptr->Header;
pReq->PageAddress = cpu_to_le32(id | (channel << 8 ));
/* Add a SGE to the config request.
*/
flagsLength = MPT_SGE_FLAGS_SSIMPLE_WRITE |
(IOCPage4Ptr->Header.PageLength + ii) * 4;
mpt_add_sge((char *)&pReq->PageBufferSGE, flagsLength, dataDma);
ddvprintk((MYIOC_s_INFO_FMT
"writeIOCPage4: MaxSEP=%d ActiveSEP=%d id=%d bus=%d\n",
ioc->name, IOCPage4Ptr->MaxSEP, IOCPage4Ptr->ActiveSEP, id, channel));
mpt_put_msg_frame(ioc->DoneCtx, ioc, mf);
return 0;
}
/**
* mptspi_initTarget - Target, LUN alloc/free functionality.
* @hd: Pointer to MPT_SCSI_HOST structure
* @vtarget: per target private data
* @sdev: SCSI device
*
* NOTE: It's only SAFE to call this routine if data points to
* sane & valid STANDARD INQUIRY data!
*
* Allocate and initialize memory for this target.
* Save inquiry data.
*
**/
static void
mptspi_initTarget(MPT_SCSI_HOST *hd, VirtTarget *vtarget,
struct scsi_device *sdev)
{
/* Is LUN supported? If so, upper 2 bits will be 0
* in first byte of inquiry data.
*/
if (sdev->inq_periph_qual != 0)
return;
if (vtarget == NULL)
return;
vtarget->type = sdev->type;
if ((sdev->type == TYPE_PROCESSOR) && (hd->ioc->spi_data.Saf_Te)) {
/* Treat all Processors as SAF-TE if
* command line option is set */
vtarget->tflags |= MPT_TARGET_FLAGS_SAF_TE_ISSUED;
mptspi_writeIOCPage4(hd, vtarget->channel, vtarget->id);
}else if ((sdev->type == TYPE_PROCESSOR) &&
!(vtarget->tflags & MPT_TARGET_FLAGS_SAF_TE_ISSUED )) {
if (sdev->inquiry_len > 49 ) {
if (sdev->inquiry[44] == 'S' &&
sdev->inquiry[45] == 'A' &&
sdev->inquiry[46] == 'F' &&
sdev->inquiry[47] == '-' &&
sdev->inquiry[48] == 'T' &&
sdev->inquiry[49] == 'E' ) {
vtarget->tflags |= MPT_TARGET_FLAGS_SAF_TE_ISSUED;
mptspi_writeIOCPage4(hd, vtarget->channel, vtarget->id);
}
}
}
mptspi_setTargetNegoParms(hd, vtarget, sdev);
}
/**
* mptspi_is_raid - Determines whether target is belonging to volume
* @hd: Pointer to a SCSI HOST structure
* @id: target device id
*
* Return:
* non-zero = true
* zero = false
*
*/
static int
mptspi_is_raid(struct _MPT_SCSI_HOST *hd, u32 id)
{
int i, rc = 0;
if (!hd->ioc->raid_data.pIocPg2)
goto out;
if (!hd->ioc->raid_data.pIocPg2->NumActiveVolumes)
goto out;
for (i=0; i < hd->ioc->raid_data.pIocPg2->NumActiveVolumes; i++) {
if (hd->ioc->raid_data.pIocPg2->RaidVolume[i].VolumeID == id) {
rc = 1;
goto out;
}
}
out:
return rc;
}
static int mptspi_target_alloc(struct scsi_target *starget) static int mptspi_target_alloc(struct scsi_target *starget)
{ {
struct Scsi_Host *shost = dev_to_shost(&starget->dev); struct Scsi_Host *shost = dev_to_shost(&starget->dev);
struct _MPT_SCSI_HOST *hd = (struct _MPT_SCSI_HOST *)shost->hostdata; struct _MPT_SCSI_HOST *hd = (struct _MPT_SCSI_HOST *)shost->hostdata;
int ret; VirtTarget *vtarget;
if (hd == NULL) if (hd == NULL)
return -ENODEV; return -ENODEV;
ret = mptscsih_target_alloc(starget); vtarget = kzalloc(sizeof(VirtTarget), GFP_KERNEL);
if (ret) if (!vtarget)
return ret; return -ENOMEM;
/* if we're a device on virtual channel 1 and we're not part vtarget->ioc_id = hd->ioc->id;
* of an array, just return here (otherwise the setup below vtarget->tflags = MPT_TARGET_FLAGS_Q_YES;
* may actually affect a real physical device on channel 0 */ vtarget->id = (u8)starget->id;
if (starget->channel == 1 && vtarget->channel = (u8)starget->channel;
mptscsih_raid_id_to_num(hd, starget->id) < 0) vtarget->starget = starget;
return 0; starget->hostdata = vtarget;
if (starget->channel == 1) {
if (mptscsih_is_phys_disk(hd->ioc, 0, starget->id) == 0)
return 0;
vtarget->tflags |= MPT_TARGET_FLAGS_RAID_COMPONENT;
/* The real channel for this device is zero */
vtarget->channel = 0;
/* The actual physdisknum (for RAID passthrough) */
vtarget->id = mptscsih_raid_id_to_num(hd->ioc, 0,
starget->id);
}
if (starget->channel == 0 &&
mptspi_is_raid(hd, starget->id)) {
vtarget->raidVolume = 1;
ddvprintk((KERN_INFO
"RAID Volume @ channel=%d id=%d\n", starget->channel,
starget->id));
}
if (hd->ioc->spi_data.nvram && if (hd->ioc->spi_data.nvram &&
hd->ioc->spi_data.nvram[starget->id] != MPT_HOST_NVRAM_INVALID) { hd->ioc->spi_data.nvram[starget->id] != MPT_HOST_NVRAM_INVALID) {
@ -132,6 +447,64 @@ static int mptspi_target_alloc(struct scsi_target *starget)
return 0; return 0;
} }
void
mptspi_target_destroy(struct scsi_target *starget)
{
if (starget->hostdata)
kfree(starget->hostdata);
starget->hostdata = NULL;
}
/**
* mptspi_print_write_nego - negotiation parameters debug info that is being sent
* @hd: Pointer to a SCSI HOST structure
* @starget: SCSI target
* @ii: negotiation parameters
*
*/
static void
mptspi_print_write_nego(struct _MPT_SCSI_HOST *hd, struct scsi_target *starget, u32 ii)
{
ddvprintk((MYIOC_s_INFO_FMT "id=%d Requested = 0x%08x"
" ( %s factor = 0x%02x @ offset = 0x%02x %s%s%s%s%s%s%s%s)\n",
hd->ioc->name, starget->id, ii,
ii & MPI_SCSIDEVPAGE0_NP_WIDE ? "Wide ": "",
((ii >> 8) & 0xFF), ((ii >> 16) & 0xFF),
ii & MPI_SCSIDEVPAGE0_NP_IU ? "IU ": "",
ii & MPI_SCSIDEVPAGE0_NP_DT ? "DT ": "",
ii & MPI_SCSIDEVPAGE0_NP_QAS ? "QAS ": "",
ii & MPI_SCSIDEVPAGE0_NP_HOLD_MCS ? "HOLDMCS ": "",
ii & MPI_SCSIDEVPAGE0_NP_WR_FLOW ? "WRFLOW ": "",
ii & MPI_SCSIDEVPAGE0_NP_RD_STRM ? "RDSTRM ": "",
ii & MPI_SCSIDEVPAGE0_NP_RTI ? "RTI ": "",
ii & MPI_SCSIDEVPAGE0_NP_PCOMP_EN ? "PCOMP ": ""));
}
/**
* mptspi_print_read_nego - negotiation parameters debug info that is being read
* @hd: Pointer to a SCSI HOST structure
* @starget: SCSI target
* @ii: negotiation parameters
*
*/
static void
mptspi_print_read_nego(struct _MPT_SCSI_HOST *hd, struct scsi_target *starget, u32 ii)
{
ddvprintk((MYIOC_s_INFO_FMT "id=%d Read = 0x%08x"
" ( %s factor = 0x%02x @ offset = 0x%02x %s%s%s%s%s%s%s%s)\n",
hd->ioc->name, starget->id, ii,
ii & MPI_SCSIDEVPAGE0_NP_WIDE ? "Wide ": "",
((ii >> 8) & 0xFF), ((ii >> 16) & 0xFF),
ii & MPI_SCSIDEVPAGE0_NP_IU ? "IU ": "",
ii & MPI_SCSIDEVPAGE0_NP_DT ? "DT ": "",
ii & MPI_SCSIDEVPAGE0_NP_QAS ? "QAS ": "",
ii & MPI_SCSIDEVPAGE0_NP_HOLD_MCS ? "HOLDMCS ": "",
ii & MPI_SCSIDEVPAGE0_NP_WR_FLOW ? "WRFLOW ": "",
ii & MPI_SCSIDEVPAGE0_NP_RD_STRM ? "RDSTRM ": "",
ii & MPI_SCSIDEVPAGE0_NP_RTI ? "RTI ": "",
ii & MPI_SCSIDEVPAGE0_NP_PCOMP_EN ? "PCOMP ": ""));
}
static int mptspi_read_spi_device_pg0(struct scsi_target *starget, static int mptspi_read_spi_device_pg0(struct scsi_target *starget,
struct _CONFIG_PAGE_SCSI_DEVICE_0 *pass_pg0) struct _CONFIG_PAGE_SCSI_DEVICE_0 *pass_pg0)
{ {
@ -147,7 +520,7 @@ static int mptspi_read_spi_device_pg0(struct scsi_target *starget,
/* No SPI parameters for RAID devices */ /* No SPI parameters for RAID devices */
if (starget->channel == 0 && if (starget->channel == 0 &&
(hd->ioc->raid_data.isRaid & (1 << starget->id))) mptspi_is_raid(hd, starget->id))
return -1; return -1;
size = ioc->spi_data.sdp0length * 4; size = ioc->spi_data.sdp0length * 4;
@ -185,6 +558,8 @@ static int mptspi_read_spi_device_pg0(struct scsi_target *starget,
err = 0; err = 0;
memcpy(pass_pg0, pg0, size); memcpy(pass_pg0, pg0, size);
mptspi_print_read_nego(hd, starget, le32_to_cpu(pg0->NegotiatedParameters));
out_free: out_free:
dma_free_coherent(&ioc->pcidev->dev, size, pg0, pg0_dma); dma_free_coherent(&ioc->pcidev->dev, size, pg0, pg0_dma);
return err; return err;
@ -233,7 +608,7 @@ static void mptspi_read_parameters(struct scsi_target *starget)
} }
static int static int
mptscsih_quiesce_raid(MPT_SCSI_HOST *hd, int quiesce, int disk) mptscsih_quiesce_raid(MPT_SCSI_HOST *hd, int quiesce, u8 channel, u8 id)
{ {
MpiRaidActionRequest_t *pReq; MpiRaidActionRequest_t *pReq;
MPT_FRAME_HDR *mf; MPT_FRAME_HDR *mf;
@ -253,8 +628,8 @@ mptscsih_quiesce_raid(MPT_SCSI_HOST *hd, int quiesce, int disk)
pReq->Reserved1 = 0; pReq->Reserved1 = 0;
pReq->ChainOffset = 0; pReq->ChainOffset = 0;
pReq->Function = MPI_FUNCTION_RAID_ACTION; pReq->Function = MPI_FUNCTION_RAID_ACTION;
pReq->VolumeID = disk; pReq->VolumeID = id;
pReq->VolumeBus = 0; pReq->VolumeBus = channel;
pReq->PhysDiskNum = 0; pReq->PhysDiskNum = 0;
pReq->MsgFlags = 0; pReq->MsgFlags = 0;
pReq->Reserved2 = 0; pReq->Reserved2 = 0;
@ -263,8 +638,8 @@ mptscsih_quiesce_raid(MPT_SCSI_HOST *hd, int quiesce, int disk)
mpt_add_sge((char *)&pReq->ActionDataSGE, mpt_add_sge((char *)&pReq->ActionDataSGE,
MPT_SGE_FLAGS_SSIMPLE_READ | 0, (dma_addr_t) -1); MPT_SGE_FLAGS_SSIMPLE_READ | 0, (dma_addr_t) -1);
ddvprintk((MYIOC_s_INFO_FMT "RAID Volume action %x id %d\n", ddvprintk((MYIOC_s_INFO_FMT "RAID Volume action=%x channel=%d id=%d\n",
hd->ioc->name, action, io->id)); hd->ioc->name, pReq->Action, channel, id));
hd->pLocal = NULL; hd->pLocal = NULL;
hd->timer.expires = jiffies + HZ*10; /* 10 second timeout */ hd->timer.expires = jiffies + HZ*10; /* 10 second timeout */
@ -292,12 +667,12 @@ static void mptspi_dv_device(struct _MPT_SCSI_HOST *hd,
/* no DV on RAID devices */ /* no DV on RAID devices */
if (sdev->channel == 0 && if (sdev->channel == 0 &&
(hd->ioc->raid_data.isRaid & (1 << sdev->id))) mptspi_is_raid(hd, sdev->id))
return; return;
/* If this is a piece of a RAID, then quiesce first */ /* If this is a piece of a RAID, then quiesce first */
if (sdev->channel == 1 && if (sdev->channel == 1 &&
mptscsih_quiesce_raid(hd, 1, vtarget->target_id) < 0) { mptscsih_quiesce_raid(hd, 1, vtarget->channel, vtarget->id) < 0) {
starget_printk(KERN_ERR, scsi_target(sdev), starget_printk(KERN_ERR, scsi_target(sdev),
"Integrated RAID quiesce failed\n"); "Integrated RAID quiesce failed\n");
return; return;
@ -306,7 +681,7 @@ static void mptspi_dv_device(struct _MPT_SCSI_HOST *hd,
spi_dv_device(sdev); spi_dv_device(sdev);
if (sdev->channel == 1 && if (sdev->channel == 1 &&
mptscsih_quiesce_raid(hd, 0, vtarget->target_id) < 0) mptscsih_quiesce_raid(hd, 0, vtarget->channel, vtarget->id) < 0)
starget_printk(KERN_ERR, scsi_target(sdev), starget_printk(KERN_ERR, scsi_target(sdev),
"Integrated RAID resume failed\n"); "Integrated RAID resume failed\n");
@ -317,54 +692,89 @@ static void mptspi_dv_device(struct _MPT_SCSI_HOST *hd,
static int mptspi_slave_alloc(struct scsi_device *sdev) static int mptspi_slave_alloc(struct scsi_device *sdev)
{ {
int ret;
MPT_SCSI_HOST *hd = (MPT_SCSI_HOST *)sdev->host->hostdata; MPT_SCSI_HOST *hd = (MPT_SCSI_HOST *)sdev->host->hostdata;
/* gcc doesn't see that all uses of this variable occur within VirtTarget *vtarget;
* the if() statements, so stop it from whining */ VirtDevice *vdev;
int physdisknum = 0; struct scsi_target *starget;
if (sdev->channel == 1) { if (sdev->channel == 1 &&
physdisknum = mptscsih_raid_id_to_num(hd, sdev->id); mptscsih_is_phys_disk(hd->ioc, 0, sdev->id) == 0)
return -ENXIO;
if (physdisknum < 0) vdev = kzalloc(sizeof(VirtDevice), GFP_KERNEL);
return physdisknum; if (!vdev) {
printk(MYIOC_s_ERR_FMT "slave_alloc kmalloc(%zd) FAILED!\n",
hd->ioc->name, sizeof(VirtDevice));
return -ENOMEM;
} }
ret = mptscsih_slave_alloc(sdev); vdev->lun = sdev->lun;
sdev->hostdata = vdev;
if (ret) starget = scsi_target(sdev);
return ret; vtarget = starget->hostdata;
vdev->vtarget = vtarget;
vtarget->num_luns++;
if (sdev->channel == 1) { if (sdev->channel == 1)
VirtDevice *vdev = sdev->hostdata;
sdev->no_uld_attach = 1; sdev->no_uld_attach = 1;
vdev->vtarget->tflags |= MPT_TARGET_FLAGS_RAID_COMPONENT;
/* The real channel for this device is zero */
vdev->vtarget->bus_id = 0;
/* The actual physdisknum (for RAID passthrough) */
vdev->vtarget->target_id = physdisknum;
}
return 0; return 0;
} }
static int mptspi_slave_configure(struct scsi_device *sdev) static int mptspi_slave_configure(struct scsi_device *sdev)
{ {
int ret = mptscsih_slave_configure(sdev);
struct _MPT_SCSI_HOST *hd = struct _MPT_SCSI_HOST *hd =
(struct _MPT_SCSI_HOST *)sdev->host->hostdata; (struct _MPT_SCSI_HOST *)sdev->host->hostdata;
VirtTarget *vtarget = scsi_target(sdev)->hostdata;
int ret = mptscsih_slave_configure(sdev);
if (ret) if (ret)
return ret; return ret;
mptspi_initTarget(hd, vtarget, sdev);
ddvprintk((MYIOC_s_INFO_FMT "id=%d min_period=0x%02x"
" max_offset=0x%02x max_width=%d\n", hd->ioc->name,
sdev->id, spi_min_period(scsi_target(sdev)),
spi_max_offset(scsi_target(sdev)),
spi_max_width(scsi_target(sdev))));
if ((sdev->channel == 1 || if ((sdev->channel == 1 ||
!(hd->ioc->raid_data.isRaid & (1 << sdev->id))) && !(mptspi_is_raid(hd, sdev->id))) &&
!spi_initial_dv(sdev->sdev_target)) !spi_initial_dv(sdev->sdev_target))
mptspi_dv_device(hd, sdev); mptspi_dv_device(hd, sdev);
return 0; return 0;
} }
static int
mptspi_qcmd(struct scsi_cmnd *SCpnt, void (*done)(struct scsi_cmnd *))
{
struct _MPT_SCSI_HOST *hd = (MPT_SCSI_HOST *) SCpnt->device->host->hostdata;
VirtDevice *vdev = SCpnt->device->hostdata;
if (!vdev || !vdev->vtarget) {
SCpnt->result = DID_NO_CONNECT << 16;
done(SCpnt);
return 0;
}
if (SCpnt->device->channel == 1 &&
mptscsih_is_phys_disk(hd->ioc, 0, SCpnt->device->id) == 0) {
SCpnt->result = DID_NO_CONNECT << 16;
done(SCpnt);
return 0;
}
#ifdef MPT_DEBUG_DV
if (spi_dv_pending(scsi_target(SCpnt->device)))
scsi_print_command(SCpnt);
#endif
return mptscsih_qcmd(SCpnt,done);
}
static void mptspi_slave_destroy(struct scsi_device *sdev) static void mptspi_slave_destroy(struct scsi_device *sdev)
{ {
struct scsi_target *starget = scsi_target(sdev); struct scsi_target *starget = scsi_target(sdev);
@ -392,11 +802,11 @@ static struct scsi_host_template mptspi_driver_template = {
.proc_info = mptscsih_proc_info, .proc_info = mptscsih_proc_info,
.name = "MPT SPI Host", .name = "MPT SPI Host",
.info = mptscsih_info, .info = mptscsih_info,
.queuecommand = mptscsih_qcmd, .queuecommand = mptspi_qcmd,
.target_alloc = mptspi_target_alloc, .target_alloc = mptspi_target_alloc,
.slave_alloc = mptspi_slave_alloc, .slave_alloc = mptspi_slave_alloc,
.slave_configure = mptspi_slave_configure, .slave_configure = mptspi_slave_configure,
.target_destroy = mptscsih_target_destroy, .target_destroy = mptspi_target_destroy,
.slave_destroy = mptspi_slave_destroy, .slave_destroy = mptspi_slave_destroy,
.change_queue_depth = mptscsih_change_queue_depth, .change_queue_depth = mptscsih_change_queue_depth,
.eh_abort_handler = mptscsih_abort, .eh_abort_handler = mptscsih_abort,
@ -427,7 +837,7 @@ static int mptspi_write_spi_device_pg1(struct scsi_target *starget,
/* don't allow updating nego parameters on RAID devices */ /* don't allow updating nego parameters on RAID devices */
if (starget->channel == 0 && if (starget->channel == 0 &&
(hd->ioc->raid_data.isRaid & (1 << starget->id))) mptspi_is_raid(hd, starget->id))
return -1; return -1;
size = ioc->spi_data.sdp1length * 4; size = ioc->spi_data.sdp1length * 4;
@ -460,6 +870,8 @@ static int mptspi_write_spi_device_pg1(struct scsi_target *starget,
pg1->Header.PageNumber = hdr.PageNumber; pg1->Header.PageNumber = hdr.PageNumber;
pg1->Header.PageType = hdr.PageType; pg1->Header.PageType = hdr.PageType;
mptspi_print_write_nego(hd, starget, le32_to_cpu(pg1->RequestedParameters));
if (mpt_config(ioc, &cfg)) { if (mpt_config(ioc, &cfg)) {
starget_printk(KERN_ERR, starget, "mpt_config failed\n"); starget_printk(KERN_ERR, starget, "mpt_config failed\n");
goto out_free; goto out_free;
@ -672,9 +1084,9 @@ static void mpt_work_wrapper(struct work_struct *work)
if (sdev->channel != 1) if (sdev->channel != 1)
continue; continue;
/* The target_id is the raid PhysDiskNum, even if /* The id is the raid PhysDiskNum, even if
* starget->id is the actual target address */ * starget->id is the actual target address */
if(vtarget->target_id != disk) if(vtarget->id != disk)
continue; continue;
starget_printk(KERN_INFO, vtarget->starget, starget_printk(KERN_INFO, vtarget->starget,
@ -727,7 +1139,7 @@ mptspi_deny_binding(struct scsi_target *starget)
{ {
struct _MPT_SCSI_HOST *hd = struct _MPT_SCSI_HOST *hd =
(struct _MPT_SCSI_HOST *)dev_to_shost(starget->dev.parent)->hostdata; (struct _MPT_SCSI_HOST *)dev_to_shost(starget->dev.parent)->hostdata;
return ((hd->ioc->raid_data.isRaid & (1 << starget->id)) && return ((mptspi_is_raid(hd, starget->id)) &&
starget->channel == 0) ? 1 : 0; starget->channel == 0) ? 1 : 0;
} }
@ -945,14 +1357,13 @@ mptspi_probe(struct pci_dev *pdev, const struct pci_device_id *id)
* max_lun = 1 + actual last lun, * max_lun = 1 + actual last lun,
* see hosts.h :o( * see hosts.h :o(
*/ */
sh->max_id = MPT_MAX_SCSI_DEVICES; sh->max_id = ioc->devices_per_bus;
sh->max_lun = MPT_LAST_LUN + 1; sh->max_lun = MPT_LAST_LUN + 1;
/* /*
* If RAID Firmware Detected, setup virtual channel * If RAID Firmware Detected, setup virtual channel
*/ */
if ((ioc->facts.ProductID & MPI_FW_HEADER_PID_PROD_MASK) if (ioc->ir_firmware)
> MPI_FW_HEADER_PID_PROD_TARGET_SCSI)
sh->max_channel = 1; sh->max_channel = 1;
else else
sh->max_channel = 0; sh->max_channel = 0;
@ -1009,20 +1420,6 @@ mptspi_probe(struct pci_dev *pdev, const struct pci_device_id *id)
dprintk((MYIOC_s_INFO_FMT "ScsiLookup @ %p\n", dprintk((MYIOC_s_INFO_FMT "ScsiLookup @ %p\n",
ioc->name, hd->ScsiLookup)); ioc->name, hd->ScsiLookup));
/* Allocate memory for the device structures.
* A non-Null pointer at an offset
* indicates a device exists.
* max_id = 1 + maximum id (hosts.h)
*/
hd->Targets = kcalloc(sh->max_id * (sh->max_channel + 1),
sizeof(void *), GFP_ATOMIC);
if (!hd->Targets) {
error = -ENOMEM;
goto out_mptspi_probe;
}
dprintk((KERN_INFO " vdev @ %p\n", hd->Targets));
/* Clear the TM flags /* Clear the TM flags
*/ */
hd->tmPending = 0; hd->tmPending = 0;

View file

@ -838,32 +838,28 @@ zfcp_erp_action_exists(struct zfcp_erp_action *erp_action)
* and does appropriate preparations (dismiss fsf request, ...) * and does appropriate preparations (dismiss fsf request, ...)
* *
* locks: called under erp_lock (disabled interrupts) * locks: called under erp_lock (disabled interrupts)
*
* returns: 0
*/ */
static int static void
zfcp_erp_strategy_check_fsfreq(struct zfcp_erp_action *erp_action) zfcp_erp_strategy_check_fsfreq(struct zfcp_erp_action *erp_action)
{ {
int retval = 0;
struct zfcp_fsf_req *fsf_req = NULL;
struct zfcp_adapter *adapter = erp_action->adapter; struct zfcp_adapter *adapter = erp_action->adapter;
if (erp_action->fsf_req) { if (erp_action->fsf_req) {
/* take lock to ensure that request is not deleted meanwhile */ /* take lock to ensure that request is not deleted meanwhile */
spin_lock(&adapter->req_list_lock); spin_lock(&adapter->req_list_lock);
if ((!zfcp_reqlist_ismember(adapter, if (zfcp_reqlist_ismember(adapter,
erp_action->fsf_req->req_id)) && erp_action->fsf_req->req_id)) {
(fsf_req->erp_action == erp_action)) {
/* fsf_req still exists */ /* fsf_req still exists */
debug_text_event(adapter->erp_dbf, 3, "a_ca_req"); debug_text_event(adapter->erp_dbf, 3, "a_ca_req");
debug_event(adapter->erp_dbf, 3, &fsf_req, debug_event(adapter->erp_dbf, 3, &erp_action->fsf_req,
sizeof (unsigned long)); sizeof (unsigned long));
/* dismiss fsf_req of timed out/dismissed erp_action */ /* dismiss fsf_req of timed out/dismissed erp_action */
if (erp_action->status & (ZFCP_STATUS_ERP_DISMISSED | if (erp_action->status & (ZFCP_STATUS_ERP_DISMISSED |
ZFCP_STATUS_ERP_TIMEDOUT)) { ZFCP_STATUS_ERP_TIMEDOUT)) {
debug_text_event(adapter->erp_dbf, 3, debug_text_event(adapter->erp_dbf, 3,
"a_ca_disreq"); "a_ca_disreq");
fsf_req->status |= ZFCP_STATUS_FSFREQ_DISMISSED; erp_action->fsf_req->status |=
ZFCP_STATUS_FSFREQ_DISMISSED;
} }
if (erp_action->status & ZFCP_STATUS_ERP_TIMEDOUT) { if (erp_action->status & ZFCP_STATUS_ERP_TIMEDOUT) {
ZFCP_LOG_NORMAL("error: erp step timed out " ZFCP_LOG_NORMAL("error: erp step timed out "
@ -876,11 +872,11 @@ zfcp_erp_strategy_check_fsfreq(struct zfcp_erp_action *erp_action)
* then keep it running asynchronously and don't mess * then keep it running asynchronously and don't mess
* with the association of erp_action and fsf_req. * with the association of erp_action and fsf_req.
*/ */
if (fsf_req->status & (ZFCP_STATUS_FSFREQ_COMPLETED | if (erp_action->fsf_req->status &
(ZFCP_STATUS_FSFREQ_COMPLETED |
ZFCP_STATUS_FSFREQ_DISMISSED)) { ZFCP_STATUS_FSFREQ_DISMISSED)) {
/* forget about association between fsf_req /* forget about association between fsf_req
and erp_action */ and erp_action */
fsf_req->erp_action = NULL;
erp_action->fsf_req = NULL; erp_action->fsf_req = NULL;
} }
} else { } else {
@ -894,8 +890,6 @@ zfcp_erp_strategy_check_fsfreq(struct zfcp_erp_action *erp_action)
spin_unlock(&adapter->req_list_lock); spin_unlock(&adapter->req_list_lock);
} else } else
debug_text_event(adapter->erp_dbf, 3, "a_ca_noreq"); debug_text_event(adapter->erp_dbf, 3, "a_ca_noreq");
return retval;
} }
/** /**

View file

@ -89,7 +89,7 @@ extern int zfcp_fsf_control_file(struct zfcp_adapter *, struct zfcp_fsf_req **,
u32, u32, struct zfcp_sg_list *); u32, u32, struct zfcp_sg_list *);
extern void zfcp_fsf_start_timer(struct zfcp_fsf_req *, unsigned long); extern void zfcp_fsf_start_timer(struct zfcp_fsf_req *, unsigned long);
extern void zfcp_erp_start_timer(struct zfcp_fsf_req *); extern void zfcp_erp_start_timer(struct zfcp_fsf_req *);
extern int zfcp_fsf_req_dismiss_all(struct zfcp_adapter *); extern void zfcp_fsf_req_dismiss_all(struct zfcp_adapter *);
extern int zfcp_fsf_status_read(struct zfcp_adapter *, int); extern int zfcp_fsf_status_read(struct zfcp_adapter *, int);
extern int zfcp_fsf_req_create(struct zfcp_adapter *, u32, int, mempool_t *, extern int zfcp_fsf_req_create(struct zfcp_adapter *, u32, int, mempool_t *,
unsigned long *, struct zfcp_fsf_req **); unsigned long *, struct zfcp_fsf_req **);

View file

@ -176,28 +176,25 @@ static void zfcp_fsf_req_dismiss(struct zfcp_adapter *adapter,
/** /**
* zfcp_fsf_req_dismiss_all - dismiss all remaining fsf requests * zfcp_fsf_req_dismiss_all - dismiss all remaining fsf requests
*/ */
int zfcp_fsf_req_dismiss_all(struct zfcp_adapter *adapter) void zfcp_fsf_req_dismiss_all(struct zfcp_adapter *adapter)
{ {
struct zfcp_fsf_req *request, *tmp; struct zfcp_fsf_req *request, *tmp;
unsigned long flags; unsigned long flags;
LIST_HEAD(remove_queue);
unsigned int i, counter; unsigned int i, counter;
spin_lock_irqsave(&adapter->req_list_lock, flags); spin_lock_irqsave(&adapter->req_list_lock, flags);
atomic_set(&adapter->reqs_active, 0); atomic_set(&adapter->reqs_active, 0);
for (i=0; i<REQUEST_LIST_SIZE; i++) { for (i=0; i<REQUEST_LIST_SIZE; i++)
if (list_empty(&adapter->req_list[i])) list_splice_init(&adapter->req_list[i], &remove_queue);
continue;
counter = 0;
list_for_each_entry_safe(request, tmp,
&adapter->req_list[i], list) {
zfcp_fsf_req_dismiss(adapter, request, counter);
counter++;
}
}
spin_unlock_irqrestore(&adapter->req_list_lock, flags); spin_unlock_irqrestore(&adapter->req_list_lock, flags);
return 0; counter = 0;
list_for_each_entry_safe(request, tmp, &remove_queue, list) {
zfcp_fsf_req_dismiss(adapter, request, counter);
counter++;
}
} }
/* /*

View file

@ -667,12 +667,30 @@ NCR_700_chip_setup(struct Scsi_Host *host)
__u8 min_xferp = (hostdata->chip710 ? NCR_710_MIN_XFERP : NCR_700_MIN_XFERP); __u8 min_xferp = (hostdata->chip710 ? NCR_710_MIN_XFERP : NCR_700_MIN_XFERP);
if(hostdata->chip710) { if(hostdata->chip710) {
__u8 burst_disable = hostdata->burst_disable __u8 burst_disable = 0;
? BURST_DISABLE : 0; __u8 burst_length = 0;
switch (hostdata->burst_length) {
case 1:
burst_length = BURST_LENGTH_1;
break;
case 2:
burst_length = BURST_LENGTH_2;
break;
case 4:
burst_length = BURST_LENGTH_4;
break;
case 8:
burst_length = BURST_LENGTH_8;
break;
default:
burst_disable = BURST_DISABLE;
break;
}
dcntl_extra = COMPAT_700_MODE; dcntl_extra = COMPAT_700_MODE;
NCR_700_writeb(dcntl_extra, host, DCNTL_REG); NCR_700_writeb(dcntl_extra, host, DCNTL_REG);
NCR_700_writeb(BURST_LENGTH_8 | hostdata->dmode_extra, NCR_700_writeb(burst_length | hostdata->dmode_extra,
host, DMODE_710_REG); host, DMODE_710_REG);
NCR_700_writeb(burst_disable | (hostdata->differential ? NCR_700_writeb(burst_disable | (hostdata->differential ?
DIFF : 0), host, CTEST7_REG); DIFF : 0), host, CTEST7_REG);

View file

@ -203,7 +203,7 @@ struct NCR_700_Host_Parameters {
__u32 force_le_on_be:1; __u32 force_le_on_be:1;
#endif #endif
__u32 chip710:1; /* set if really a 710 not 700 */ __u32 chip710:1; /* set if really a 710 not 700 */
__u32 burst_disable:1; /* set to 1 to disable 710 bursting */ __u32 burst_length:4; /* set to 0 to disable 710 bursting */
/* NOTHING BELOW HERE NEEDS ALTERING */ /* NOTHING BELOW HERE NEEDS ALTERING */
__u32 fast:1; /* if we can alter the SCSI bus clock __u32 fast:1; /* if we can alter the SCSI bus clock

View file

@ -4399,7 +4399,7 @@ abort_connected (struct Scsi_Host *host) {
* account the current synchronous offset) * account the current synchronous offset)
*/ */
sstat = (NCR53c8x0_read8 (SSTAT2_REG); sstat = NCR53c8x0_read8 (SSTAT2_REG);
offset = OFFSET (sstat & SSTAT2_FF_MASK) >> SSTAT2_FF_SHIFT; offset = OFFSET (sstat & SSTAT2_FF_MASK) >> SSTAT2_FF_SHIFT;
phase = sstat & SSTAT2_PHASE_MASK; phase = sstat & SSTAT2_PHASE_MASK;
@ -5422,7 +5422,7 @@ insn_to_offset (Scsi_Cmnd *cmd, u32 *insn) {
--buffers, offset += segment->length, ++segment) --buffers, offset += segment->length, ++segment)
#if 0 #if 0
printk("scsi%d: comparing 0x%p to 0x%p\n", printk("scsi%d: comparing 0x%p to 0x%p\n",
cmd->device->host->host_no, saved, page_address(segment->page+segment->offset); cmd->device->host->host_no, saved, page_address(segment->page+segment->offset));
#else #else
; ;
#endif #endif

View file

@ -192,7 +192,7 @@ static void BusLogic_InitializeCCBs(struct BusLogic_HostAdapter *HostAdapter, vo
BusLogic_CreateInitialCCBs allocates the initial CCBs for Host Adapter. BusLogic_CreateInitialCCBs allocates the initial CCBs for Host Adapter.
*/ */
static boolean __init BusLogic_CreateInitialCCBs(struct BusLogic_HostAdapter *HostAdapter) static bool __init BusLogic_CreateInitialCCBs(struct BusLogic_HostAdapter *HostAdapter)
{ {
int BlockSize = BusLogic_CCB_AllocationGroupSize * sizeof(struct BusLogic_CCB); int BlockSize = BusLogic_CCB_AllocationGroupSize * sizeof(struct BusLogic_CCB);
void *BlockPointer; void *BlockPointer;
@ -238,7 +238,7 @@ static void BusLogic_DestroyCCBs(struct BusLogic_HostAdapter *HostAdapter)
multiple host adapters share the same IRQ Channel. multiple host adapters share the same IRQ Channel.
*/ */
static void BusLogic_CreateAdditionalCCBs(struct BusLogic_HostAdapter *HostAdapter, int AdditionalCCBs, boolean SuccessMessageP) static void BusLogic_CreateAdditionalCCBs(struct BusLogic_HostAdapter *HostAdapter, int AdditionalCCBs, bool SuccessMessageP)
{ {
int BlockSize = BusLogic_CCB_AllocationGroupSize * sizeof(struct BusLogic_CCB); int BlockSize = BusLogic_CCB_AllocationGroupSize * sizeof(struct BusLogic_CCB);
int PreviouslyAllocated = HostAdapter->AllocatedCCBs; int PreviouslyAllocated = HostAdapter->AllocatedCCBs;
@ -362,10 +362,8 @@ static int BusLogic_Command(struct BusLogic_HostAdapter *HostAdapter, enum BusLo
interrupt could occur if the IRQ Channel was previously enabled by another interrupt could occur if the IRQ Channel was previously enabled by another
BusLogic Host Adapter or another driver sharing the same IRQ Channel. BusLogic Host Adapter or another driver sharing the same IRQ Channel.
*/ */
if (!HostAdapter->IRQ_ChannelAcquired) { if (!HostAdapter->IRQ_ChannelAcquired)
local_irq_save(ProcessorFlags); local_irq_save(ProcessorFlags);
local_irq_disable();
}
/* /*
Wait for the Host Adapter Ready bit to be set and the Command/Parameter Wait for the Host Adapter Ready bit to be set and the Command/Parameter
Register Busy bit to be reset in the Status Register. Register Busy bit to be reset in the Status Register.
@ -639,9 +637,9 @@ static int __init BusLogic_InitializeMultiMasterProbeInfo(struct BusLogic_HostAd
struct BusLogic_ProbeInfo *PrimaryProbeInfo = &BusLogic_ProbeInfoList[BusLogic_ProbeInfoCount]; struct BusLogic_ProbeInfo *PrimaryProbeInfo = &BusLogic_ProbeInfoList[BusLogic_ProbeInfoCount];
int NonPrimaryPCIMultiMasterIndex = BusLogic_ProbeInfoCount + 1; int NonPrimaryPCIMultiMasterIndex = BusLogic_ProbeInfoCount + 1;
int NonPrimaryPCIMultiMasterCount = 0, PCIMultiMasterCount = 0; int NonPrimaryPCIMultiMasterCount = 0, PCIMultiMasterCount = 0;
boolean ForceBusDeviceScanningOrder = false; bool ForceBusDeviceScanningOrder = false;
boolean ForceBusDeviceScanningOrderChecked = false; bool ForceBusDeviceScanningOrderChecked = false;
boolean StandardAddressSeen[6]; bool StandardAddressSeen[6];
struct pci_dev *PCI_Device = NULL; struct pci_dev *PCI_Device = NULL;
int i; int i;
if (BusLogic_ProbeInfoCount >= BusLogic_MaxHostAdapters) if (BusLogic_ProbeInfoCount >= BusLogic_MaxHostAdapters)
@ -1011,7 +1009,7 @@ static void __init BusLogic_InitializeProbeInfoList(struct BusLogic_HostAdapter
BusLogic_Failure prints a standardized error message, and then returns false. BusLogic_Failure prints a standardized error message, and then returns false.
*/ */
static boolean BusLogic_Failure(struct BusLogic_HostAdapter *HostAdapter, char *ErrorMessage) static bool BusLogic_Failure(struct BusLogic_HostAdapter *HostAdapter, char *ErrorMessage)
{ {
BusLogic_AnnounceDriver(HostAdapter); BusLogic_AnnounceDriver(HostAdapter);
if (HostAdapter->HostAdapterBusType == BusLogic_PCI_Bus) { if (HostAdapter->HostAdapterBusType == BusLogic_PCI_Bus) {
@ -1030,7 +1028,7 @@ static boolean BusLogic_Failure(struct BusLogic_HostAdapter *HostAdapter, char *
BusLogic_ProbeHostAdapter probes for a BusLogic Host Adapter. BusLogic_ProbeHostAdapter probes for a BusLogic Host Adapter.
*/ */
static boolean __init BusLogic_ProbeHostAdapter(struct BusLogic_HostAdapter *HostAdapter) static bool __init BusLogic_ProbeHostAdapter(struct BusLogic_HostAdapter *HostAdapter)
{ {
union BusLogic_StatusRegister StatusRegister; union BusLogic_StatusRegister StatusRegister;
union BusLogic_InterruptRegister InterruptRegister; union BusLogic_InterruptRegister InterruptRegister;
@ -1101,8 +1099,8 @@ static boolean __init BusLogic_ProbeHostAdapter(struct BusLogic_HostAdapter *Hos
SCSI Bus Reset. SCSI Bus Reset.
*/ */
static boolean BusLogic_HardwareResetHostAdapter(struct BusLogic_HostAdapter static bool BusLogic_HardwareResetHostAdapter(struct BusLogic_HostAdapter
*HostAdapter, boolean HardReset) *HostAdapter, bool HardReset)
{ {
union BusLogic_StatusRegister StatusRegister; union BusLogic_StatusRegister StatusRegister;
int TimeoutCounter; int TimeoutCounter;
@ -1205,11 +1203,11 @@ static boolean BusLogic_HardwareResetHostAdapter(struct BusLogic_HostAdapter
Host Adapter. Host Adapter.
*/ */
static boolean __init BusLogic_CheckHostAdapter(struct BusLogic_HostAdapter *HostAdapter) static bool __init BusLogic_CheckHostAdapter(struct BusLogic_HostAdapter *HostAdapter)
{ {
struct BusLogic_ExtendedSetupInformation ExtendedSetupInformation; struct BusLogic_ExtendedSetupInformation ExtendedSetupInformation;
unsigned char RequestedReplyLength; unsigned char RequestedReplyLength;
boolean Result = true; bool Result = true;
/* /*
FlashPoint Host Adapters do not require this protection. FlashPoint Host Adapters do not require this protection.
*/ */
@ -1239,7 +1237,7 @@ static boolean __init BusLogic_CheckHostAdapter(struct BusLogic_HostAdapter *Hos
from Host Adapter and initializes the Host Adapter structure. from Host Adapter and initializes the Host Adapter structure.
*/ */
static boolean __init BusLogic_ReadHostAdapterConfiguration(struct BusLogic_HostAdapter static bool __init BusLogic_ReadHostAdapterConfiguration(struct BusLogic_HostAdapter
*HostAdapter) *HostAdapter)
{ {
struct BusLogic_BoardID BoardID; struct BusLogic_BoardID BoardID;
@ -1686,14 +1684,14 @@ static boolean __init BusLogic_ReadHostAdapterConfiguration(struct BusLogic_Host
Host Adapter. Host Adapter.
*/ */
static boolean __init BusLogic_ReportHostAdapterConfiguration(struct BusLogic_HostAdapter static bool __init BusLogic_ReportHostAdapterConfiguration(struct BusLogic_HostAdapter
*HostAdapter) *HostAdapter)
{ {
unsigned short AllTargetsMask = (1 << HostAdapter->MaxTargetDevices) - 1; unsigned short AllTargetsMask = (1 << HostAdapter->MaxTargetDevices) - 1;
unsigned short SynchronousPermitted, FastPermitted; unsigned short SynchronousPermitted, FastPermitted;
unsigned short UltraPermitted, WidePermitted; unsigned short UltraPermitted, WidePermitted;
unsigned short DisconnectPermitted, TaggedQueuingPermitted; unsigned short DisconnectPermitted, TaggedQueuingPermitted;
boolean CommonSynchronousNegotiation, CommonTaggedQueueDepth; bool CommonSynchronousNegotiation, CommonTaggedQueueDepth;
char SynchronousString[BusLogic_MaxTargetDevices + 1]; char SynchronousString[BusLogic_MaxTargetDevices + 1];
char WideString[BusLogic_MaxTargetDevices + 1]; char WideString[BusLogic_MaxTargetDevices + 1];
char DisconnectString[BusLogic_MaxTargetDevices + 1]; char DisconnectString[BusLogic_MaxTargetDevices + 1];
@ -1835,7 +1833,7 @@ static boolean __init BusLogic_ReportHostAdapterConfiguration(struct BusLogic_Ho
Host Adapter. Host Adapter.
*/ */
static boolean __init BusLogic_AcquireResources(struct BusLogic_HostAdapter *HostAdapter) static bool __init BusLogic_AcquireResources(struct BusLogic_HostAdapter *HostAdapter)
{ {
if (HostAdapter->IRQ_Channel == 0) { if (HostAdapter->IRQ_Channel == 0) {
BusLogic_Error("NO LEGAL INTERRUPT CHANNEL ASSIGNED - DETACHING\n", HostAdapter); BusLogic_Error("NO LEGAL INTERRUPT CHANNEL ASSIGNED - DETACHING\n", HostAdapter);
@ -1903,7 +1901,7 @@ static void BusLogic_ReleaseResources(struct BusLogic_HostAdapter *HostAdapter)
of the Host Adapter from its initial power on or hard reset state. of the Host Adapter from its initial power on or hard reset state.
*/ */
static boolean BusLogic_InitializeHostAdapter(struct BusLogic_HostAdapter static bool BusLogic_InitializeHostAdapter(struct BusLogic_HostAdapter
*HostAdapter) *HostAdapter)
{ {
struct BusLogic_ExtendedMailboxRequest ExtendedMailboxRequest; struct BusLogic_ExtendedMailboxRequest ExtendedMailboxRequest;
@ -2002,7 +2000,7 @@ static boolean BusLogic_InitializeHostAdapter(struct BusLogic_HostAdapter
through Host Adapter. through Host Adapter.
*/ */
static boolean __init BusLogic_TargetDeviceInquiry(struct BusLogic_HostAdapter static bool __init BusLogic_TargetDeviceInquiry(struct BusLogic_HostAdapter
*HostAdapter) *HostAdapter)
{ {
u16 InstalledDevices; u16 InstalledDevices;
@ -2739,7 +2737,7 @@ static irqreturn_t BusLogic_InterruptHandler(int IRQ_Channel, void *DeviceIdenti
already have been acquired by the caller. already have been acquired by the caller.
*/ */
static boolean BusLogic_WriteOutgoingMailbox(struct BusLogic_HostAdapter static bool BusLogic_WriteOutgoingMailbox(struct BusLogic_HostAdapter
*HostAdapter, enum BusLogic_ActionCode ActionCode, struct BusLogic_CCB *CCB) *HostAdapter, enum BusLogic_ActionCode ActionCode, struct BusLogic_CCB *CCB)
{ {
struct BusLogic_OutgoingMailbox *NextOutgoingMailbox; struct BusLogic_OutgoingMailbox *NextOutgoingMailbox;
@ -3058,7 +3056,7 @@ static int BusLogic_AbortCommand(struct scsi_cmnd *Command)
currently executing SCSI Commands as having been Reset. currently executing SCSI Commands as having been Reset.
*/ */
static int BusLogic_ResetHostAdapter(struct BusLogic_HostAdapter *HostAdapter, boolean HardReset) static int BusLogic_ResetHostAdapter(struct BusLogic_HostAdapter *HostAdapter, bool HardReset)
{ {
struct BusLogic_CCB *CCB; struct BusLogic_CCB *CCB;
int TargetID; int TargetID;
@ -3309,7 +3307,7 @@ Target Requested Completed Requested Completed Requested Completed\n\
static void BusLogic_Message(enum BusLogic_MessageLevel MessageLevel, char *Format, struct BusLogic_HostAdapter *HostAdapter, ...) static void BusLogic_Message(enum BusLogic_MessageLevel MessageLevel, char *Format, struct BusLogic_HostAdapter *HostAdapter, ...)
{ {
static char Buffer[BusLogic_LineBufferSize]; static char Buffer[BusLogic_LineBufferSize];
static boolean BeginningOfLine = true; static bool BeginningOfLine = true;
va_list Arguments; va_list Arguments;
int Length = 0; int Length = 0;
va_start(Arguments, HostAdapter); va_start(Arguments, HostAdapter);
@ -3347,7 +3345,7 @@ static void BusLogic_Message(enum BusLogic_MessageLevel MessageLevel, char *Form
and updates the pointer if the keyword is recognized and false otherwise. and updates the pointer if the keyword is recognized and false otherwise.
*/ */
static boolean __init BusLogic_ParseKeyword(char **StringPointer, char *Keyword) static bool __init BusLogic_ParseKeyword(char **StringPointer, char *Keyword)
{ {
char *Pointer = *StringPointer; char *Pointer = *StringPointer;
while (*Keyword != '\0') { while (*Keyword != '\0') {

View file

@ -233,12 +233,6 @@ enum BusLogic_BIOS_DiskGeometryTranslation {
} PACKED; } PACKED;
/*
Define a Boolean data type.
*/
typedef bool boolean;
/* /*
Define a 10^18 Statistics Byte Counter data type. Define a 10^18 Statistics Byte Counter data type.
*/ */
@ -269,19 +263,19 @@ struct BusLogic_ProbeInfo {
*/ */
struct BusLogic_ProbeOptions { struct BusLogic_ProbeOptions {
boolean NoProbe:1; /* Bit 0 */ bool NoProbe:1; /* Bit 0 */
boolean NoProbeISA:1; /* Bit 1 */ bool NoProbeISA:1; /* Bit 1 */
boolean NoProbePCI:1; /* Bit 2 */ bool NoProbePCI:1; /* Bit 2 */
boolean NoSortPCI:1; /* Bit 3 */ bool NoSortPCI:1; /* Bit 3 */
boolean MultiMasterFirst:1; /* Bit 4 */ bool MultiMasterFirst:1;/* Bit 4 */
boolean FlashPointFirst:1; /* Bit 5 */ bool FlashPointFirst:1; /* Bit 5 */
boolean LimitedProbeISA:1; /* Bit 6 */ bool LimitedProbeISA:1; /* Bit 6 */
boolean Probe330:1; /* Bit 7 */ bool Probe330:1; /* Bit 7 */
boolean Probe334:1; /* Bit 8 */ bool Probe334:1; /* Bit 8 */
boolean Probe230:1; /* Bit 9 */ bool Probe230:1; /* Bit 9 */
boolean Probe234:1; /* Bit 10 */ bool Probe234:1; /* Bit 10 */
boolean Probe130:1; /* Bit 11 */ bool Probe130:1; /* Bit 11 */
boolean Probe134:1; /* Bit 12 */ bool Probe134:1; /* Bit 12 */
}; };
/* /*
@ -289,10 +283,10 @@ struct BusLogic_ProbeOptions {
*/ */
struct BusLogic_GlobalOptions { struct BusLogic_GlobalOptions {
boolean TraceProbe:1; /* Bit 0 */ bool TraceProbe:1; /* Bit 0 */
boolean TraceHardwareReset:1; /* Bit 1 */ bool TraceHardwareReset:1; /* Bit 1 */
boolean TraceConfiguration:1; /* Bit 2 */ bool TraceConfiguration:1; /* Bit 2 */
boolean TraceErrors:1; /* Bit 3 */ bool TraceErrors:1; /* Bit 3 */
}; };
/* /*
@ -300,7 +294,7 @@ struct BusLogic_GlobalOptions {
*/ */
struct BusLogic_LocalOptions { struct BusLogic_LocalOptions {
boolean InhibitTargetInquiry:1; /* Bit 0 */ bool InhibitTargetInquiry:1; /* Bit 0 */
}; };
/* /*
@ -322,10 +316,10 @@ union BusLogic_ControlRegister {
unsigned char All; unsigned char All;
struct { struct {
unsigned char:4; /* Bits 0-3 */ unsigned char:4; /* Bits 0-3 */
boolean SCSIBusReset:1; /* Bit 4 */ bool SCSIBusReset:1; /* Bit 4 */
boolean InterruptReset:1; /* Bit 5 */ bool InterruptReset:1; /* Bit 5 */
boolean SoftReset:1; /* Bit 6 */ bool SoftReset:1; /* Bit 6 */
boolean HardReset:1; /* Bit 7 */ bool HardReset:1; /* Bit 7 */
} cr; } cr;
}; };
@ -336,14 +330,14 @@ union BusLogic_ControlRegister {
union BusLogic_StatusRegister { union BusLogic_StatusRegister {
unsigned char All; unsigned char All;
struct { struct {
boolean CommandInvalid:1; /* Bit 0 */ bool CommandInvalid:1; /* Bit 0 */
boolean Reserved:1; /* Bit 1 */ bool Reserved:1; /* Bit 1 */
boolean DataInRegisterReady:1; /* Bit 2 */ bool DataInRegisterReady:1; /* Bit 2 */
boolean CommandParameterRegisterBusy:1; /* Bit 3 */ bool CommandParameterRegisterBusy:1; /* Bit 3 */
boolean HostAdapterReady:1; /* Bit 4 */ bool HostAdapterReady:1; /* Bit 4 */
boolean InitializationRequired:1; /* Bit 5 */ bool InitializationRequired:1; /* Bit 5 */
boolean DiagnosticFailure:1; /* Bit 6 */ bool DiagnosticFailure:1; /* Bit 6 */
boolean DiagnosticActive:1; /* Bit 7 */ bool DiagnosticActive:1; /* Bit 7 */
} sr; } sr;
}; };
@ -354,12 +348,12 @@ union BusLogic_StatusRegister {
union BusLogic_InterruptRegister { union BusLogic_InterruptRegister {
unsigned char All; unsigned char All;
struct { struct {
boolean IncomingMailboxLoaded:1; /* Bit 0 */ bool IncomingMailboxLoaded:1; /* Bit 0 */
boolean OutgoingMailboxAvailable:1; /* Bit 1 */ bool OutgoingMailboxAvailable:1;/* Bit 1 */
boolean CommandComplete:1; /* Bit 2 */ bool CommandComplete:1; /* Bit 2 */
boolean ExternalBusReset:1; /* Bit 3 */ bool ExternalBusReset:1; /* Bit 3 */
unsigned char Reserved:3; /* Bits 4-6 */ unsigned char Reserved:3; /* Bits 4-6 */
boolean InterruptValid:1; /* Bit 7 */ bool InterruptValid:1; /* Bit 7 */
} ir; } ir;
}; };
@ -373,7 +367,7 @@ union BusLogic_GeometryRegister {
enum BusLogic_BIOS_DiskGeometryTranslation Drive0Geometry:2; /* Bits 0-1 */ enum BusLogic_BIOS_DiskGeometryTranslation Drive0Geometry:2; /* Bits 0-1 */
enum BusLogic_BIOS_DiskGeometryTranslation Drive1Geometry:2; /* Bits 2-3 */ enum BusLogic_BIOS_DiskGeometryTranslation Drive1Geometry:2; /* Bits 2-3 */
unsigned char:3; /* Bits 4-6 */ unsigned char:3; /* Bits 4-6 */
boolean ExtendedTranslationEnabled:1; /* Bit 7 */ bool ExtendedTranslationEnabled:1; /* Bit 7 */
} gr; } gr;
}; };
@ -445,16 +439,16 @@ struct BusLogic_BoardID {
struct BusLogic_Configuration { struct BusLogic_Configuration {
unsigned char:5; /* Byte 0 Bits 0-4 */ unsigned char:5; /* Byte 0 Bits 0-4 */
boolean DMA_Channel5:1; /* Byte 0 Bit 5 */ bool DMA_Channel5:1; /* Byte 0 Bit 5 */
boolean DMA_Channel6:1; /* Byte 0 Bit 6 */ bool DMA_Channel6:1; /* Byte 0 Bit 6 */
boolean DMA_Channel7:1; /* Byte 0 Bit 7 */ bool DMA_Channel7:1; /* Byte 0 Bit 7 */
boolean IRQ_Channel9:1; /* Byte 1 Bit 0 */ bool IRQ_Channel9:1; /* Byte 1 Bit 0 */
boolean IRQ_Channel10:1; /* Byte 1 Bit 1 */ bool IRQ_Channel10:1; /* Byte 1 Bit 1 */
boolean IRQ_Channel11:1; /* Byte 1 Bit 2 */ bool IRQ_Channel11:1; /* Byte 1 Bit 2 */
boolean IRQ_Channel12:1; /* Byte 1 Bit 3 */ bool IRQ_Channel12:1; /* Byte 1 Bit 3 */
unsigned char:1; /* Byte 1 Bit 4 */ unsigned char:1; /* Byte 1 Bit 4 */
boolean IRQ_Channel14:1; /* Byte 1 Bit 5 */ bool IRQ_Channel14:1; /* Byte 1 Bit 5 */
boolean IRQ_Channel15:1; /* Byte 1 Bit 6 */ bool IRQ_Channel15:1; /* Byte 1 Bit 6 */
unsigned char:1; /* Byte 1 Bit 7 */ unsigned char:1; /* Byte 1 Bit 7 */
unsigned char HostAdapterID:4; /* Byte 2 Bits 0-3 */ unsigned char HostAdapterID:4; /* Byte 2 Bits 0-3 */
unsigned char:4; /* Byte 2 Bits 4-7 */ unsigned char:4; /* Byte 2 Bits 4-7 */
@ -467,12 +461,12 @@ struct BusLogic_Configuration {
struct BusLogic_SynchronousValue { struct BusLogic_SynchronousValue {
unsigned char Offset:4; /* Bits 0-3 */ unsigned char Offset:4; /* Bits 0-3 */
unsigned char TransferPeriod:3; /* Bits 4-6 */ unsigned char TransferPeriod:3; /* Bits 4-6 */
boolean Synchronous:1; /* Bit 7 */ bool Synchronous:1; /* Bit 7 */
}; };
struct BusLogic_SetupInformation { struct BusLogic_SetupInformation {
boolean SynchronousInitiationEnabled:1; /* Byte 0 Bit 0 */ bool SynchronousInitiationEnabled:1; /* Byte 0 Bit 0 */
boolean ParityCheckingEnabled:1; /* Byte 0 Bit 1 */ bool ParityCheckingEnabled:1; /* Byte 0 Bit 1 */
unsigned char:6; /* Byte 0 Bits 2-7 */ unsigned char:6; /* Byte 0 Bits 2-7 */
unsigned char BusTransferRate; /* Byte 1 */ unsigned char BusTransferRate; /* Byte 1 */
unsigned char PreemptTimeOnBus; /* Byte 2 */ unsigned char PreemptTimeOnBus; /* Byte 2 */
@ -523,13 +517,13 @@ enum BusLogic_ISACompatibleIOPort {
struct BusLogic_PCIHostAdapterInformation { struct BusLogic_PCIHostAdapterInformation {
enum BusLogic_ISACompatibleIOPort ISACompatibleIOPort; /* Byte 0 */ enum BusLogic_ISACompatibleIOPort ISACompatibleIOPort; /* Byte 0 */
unsigned char PCIAssignedIRQChannel; /* Byte 1 */ unsigned char PCIAssignedIRQChannel; /* Byte 1 */
boolean LowByteTerminated:1; /* Byte 2 Bit 0 */ bool LowByteTerminated:1; /* Byte 2 Bit 0 */
boolean HighByteTerminated:1; /* Byte 2 Bit 1 */ bool HighByteTerminated:1; /* Byte 2 Bit 1 */
unsigned char:2; /* Byte 2 Bits 2-3 */ unsigned char:2; /* Byte 2 Bits 2-3 */
boolean JP1:1; /* Byte 2 Bit 4 */ bool JP1:1; /* Byte 2 Bit 4 */
boolean JP2:1; /* Byte 2 Bit 5 */ bool JP2:1; /* Byte 2 Bit 5 */
boolean JP3:1; /* Byte 2 Bit 6 */ bool JP3:1; /* Byte 2 Bit 6 */
boolean GenericInfoValid:1; /* Byte 2 Bit 7 */ bool GenericInfoValid:1;/* Byte 2 Bit 7 */
unsigned char:8; /* Byte 3 */ unsigned char:8; /* Byte 3 */
}; };
@ -545,17 +539,17 @@ struct BusLogic_ExtendedSetupInformation {
u32 BaseMailboxAddress; /* Bytes 5-8 */ u32 BaseMailboxAddress; /* Bytes 5-8 */
struct { struct {
unsigned char:2; /* Byte 9 Bits 0-1 */ unsigned char:2; /* Byte 9 Bits 0-1 */
boolean FastOnEISA:1; /* Byte 9 Bit 2 */ bool FastOnEISA:1; /* Byte 9 Bit 2 */
unsigned char:3; /* Byte 9 Bits 3-5 */ unsigned char:3; /* Byte 9 Bits 3-5 */
boolean LevelSensitiveInterrupt:1; /* Byte 9 Bit 6 */ bool LevelSensitiveInterrupt:1; /* Byte 9 Bit 6 */
unsigned char:1; /* Byte 9 Bit 7 */ unsigned char:1; /* Byte 9 Bit 7 */
} Misc; } Misc;
unsigned char FirmwareRevision[3]; /* Bytes 10-12 */ unsigned char FirmwareRevision[3]; /* Bytes 10-12 */
boolean HostWideSCSI:1; /* Byte 13 Bit 0 */ bool HostWideSCSI:1; /* Byte 13 Bit 0 */
boolean HostDifferentialSCSI:1; /* Byte 13 Bit 1 */ bool HostDifferentialSCSI:1; /* Byte 13 Bit 1 */
boolean HostSupportsSCAM:1; /* Byte 13 Bit 2 */ bool HostSupportsSCAM:1; /* Byte 13 Bit 2 */
boolean HostUltraSCSI:1; /* Byte 13 Bit 3 */ bool HostUltraSCSI:1; /* Byte 13 Bit 3 */
boolean HostSmartTermination:1; /* Byte 13 Bit 4 */ bool HostSmartTermination:1; /* Byte 13 Bit 4 */
unsigned char:3; /* Byte 13 Bits 5-7 */ unsigned char:3; /* Byte 13 Bits 5-7 */
} PACKED; } PACKED;
@ -590,35 +584,35 @@ struct BusLogic_AutoSCSIData {
unsigned char InformationByteCount; /* Byte 2 */ unsigned char InformationByteCount; /* Byte 2 */
unsigned char HostAdapterType[6]; /* Bytes 3-8 */ unsigned char HostAdapterType[6]; /* Bytes 3-8 */
unsigned char:8; /* Byte 9 */ unsigned char:8; /* Byte 9 */
boolean FloppyEnabled:1; /* Byte 10 Bit 0 */ bool FloppyEnabled:1; /* Byte 10 Bit 0 */
boolean FloppySecondary:1; /* Byte 10 Bit 1 */ bool FloppySecondary:1; /* Byte 10 Bit 1 */
boolean LevelSensitiveInterrupt:1; /* Byte 10 Bit 2 */ bool LevelSensitiveInterrupt:1; /* Byte 10 Bit 2 */
unsigned char:2; /* Byte 10 Bits 3-4 */ unsigned char:2; /* Byte 10 Bits 3-4 */
unsigned char SystemRAMAreaForBIOS:3; /* Byte 10 Bits 5-7 */ unsigned char SystemRAMAreaForBIOS:3; /* Byte 10 Bits 5-7 */
unsigned char DMA_Channel:7; /* Byte 11 Bits 0-6 */ unsigned char DMA_Channel:7; /* Byte 11 Bits 0-6 */
boolean DMA_AutoConfiguration:1; /* Byte 11 Bit 7 */ bool DMA_AutoConfiguration:1; /* Byte 11 Bit 7 */
unsigned char IRQ_Channel:7; /* Byte 12 Bits 0-6 */ unsigned char IRQ_Channel:7; /* Byte 12 Bits 0-6 */
boolean IRQ_AutoConfiguration:1; /* Byte 12 Bit 7 */ bool IRQ_AutoConfiguration:1; /* Byte 12 Bit 7 */
unsigned char DMA_TransferRate; /* Byte 13 */ unsigned char DMA_TransferRate; /* Byte 13 */
unsigned char SCSI_ID; /* Byte 14 */ unsigned char SCSI_ID; /* Byte 14 */
boolean LowByteTerminated:1; /* Byte 15 Bit 0 */ bool LowByteTerminated:1; /* Byte 15 Bit 0 */
boolean ParityCheckingEnabled:1; /* Byte 15 Bit 1 */ bool ParityCheckingEnabled:1; /* Byte 15 Bit 1 */
boolean HighByteTerminated:1; /* Byte 15 Bit 2 */ bool HighByteTerminated:1; /* Byte 15 Bit 2 */
boolean NoisyCablingEnvironment:1; /* Byte 15 Bit 3 */ bool NoisyCablingEnvironment:1; /* Byte 15 Bit 3 */
boolean FastSynchronousNegotiation:1; /* Byte 15 Bit 4 */ bool FastSynchronousNegotiation:1; /* Byte 15 Bit 4 */
boolean BusResetEnabled:1; /* Byte 15 Bit 5 */ bool BusResetEnabled:1; /* Byte 15 Bit 5 */
boolean:1; /* Byte 15 Bit 6 */ bool:1; /* Byte 15 Bit 6 */
boolean ActiveNegationEnabled:1; /* Byte 15 Bit 7 */ bool ActiveNegationEnabled:1; /* Byte 15 Bit 7 */
unsigned char BusOnDelay; /* Byte 16 */ unsigned char BusOnDelay; /* Byte 16 */
unsigned char BusOffDelay; /* Byte 17 */ unsigned char BusOffDelay; /* Byte 17 */
boolean HostAdapterBIOSEnabled:1; /* Byte 18 Bit 0 */ bool HostAdapterBIOSEnabled:1; /* Byte 18 Bit 0 */
boolean BIOSRedirectionOfINT19Enabled:1; /* Byte 18 Bit 1 */ bool BIOSRedirectionOfINT19Enabled:1; /* Byte 18 Bit 1 */
boolean ExtendedTranslationEnabled:1; /* Byte 18 Bit 2 */ bool ExtendedTranslationEnabled:1; /* Byte 18 Bit 2 */
boolean MapRemovableAsFixedEnabled:1; /* Byte 18 Bit 3 */ bool MapRemovableAsFixedEnabled:1; /* Byte 18 Bit 3 */
boolean:1; /* Byte 18 Bit 4 */ bool:1; /* Byte 18 Bit 4 */
boolean BIOSSupportsMoreThan2DrivesEnabled:1; /* Byte 18 Bit 5 */ bool BIOSSupportsMoreThan2DrivesEnabled:1; /* Byte 18 Bit 5 */
boolean BIOSInterruptModeEnabled:1; /* Byte 18 Bit 6 */ bool BIOSInterruptModeEnabled:1; /* Byte 18 Bit 6 */
boolean FlopticalSupportEnabled:1; /* Byte 19 Bit 7 */ bool FlopticalSupportEnabled:1; /* Byte 19 Bit 7 */
unsigned short DeviceEnabled; /* Bytes 19-20 */ unsigned short DeviceEnabled; /* Bytes 19-20 */
unsigned short WidePermitted; /* Bytes 21-22 */ unsigned short WidePermitted; /* Bytes 21-22 */
unsigned short FastPermitted; /* Bytes 23-24 */ unsigned short FastPermitted; /* Bytes 23-24 */
@ -628,22 +622,22 @@ struct BusLogic_AutoSCSIData {
unsigned short IgnoreInBIOSScan; /* Bytes 31-32 */ unsigned short IgnoreInBIOSScan; /* Bytes 31-32 */
unsigned char PCIInterruptPin:2; /* Byte 33 Bits 0-1 */ unsigned char PCIInterruptPin:2; /* Byte 33 Bits 0-1 */
unsigned char HostAdapterIOPortAddress:2; /* Byte 33 Bits 2-3 */ unsigned char HostAdapterIOPortAddress:2; /* Byte 33 Bits 2-3 */
boolean StrictRoundRobinModeEnabled:1; /* Byte 33 Bit 4 */ bool StrictRoundRobinModeEnabled:1; /* Byte 33 Bit 4 */
boolean VESABusSpeedGreaterThan33MHz:1; /* Byte 33 Bit 5 */ bool VESABusSpeedGreaterThan33MHz:1; /* Byte 33 Bit 5 */
boolean VESABurstWriteEnabled:1; /* Byte 33 Bit 6 */ bool VESABurstWriteEnabled:1; /* Byte 33 Bit 6 */
boolean VESABurstReadEnabled:1; /* Byte 33 Bit 7 */ bool VESABurstReadEnabled:1; /* Byte 33 Bit 7 */
unsigned short UltraPermitted; /* Bytes 34-35 */ unsigned short UltraPermitted; /* Bytes 34-35 */
unsigned int:32; /* Bytes 36-39 */ unsigned int:32; /* Bytes 36-39 */
unsigned char:8; /* Byte 40 */ unsigned char:8; /* Byte 40 */
unsigned char AutoSCSIMaximumLUN; /* Byte 41 */ unsigned char AutoSCSIMaximumLUN; /* Byte 41 */
boolean:1; /* Byte 42 Bit 0 */ bool:1; /* Byte 42 Bit 0 */
boolean SCAM_Dominant:1; /* Byte 42 Bit 1 */ bool SCAM_Dominant:1; /* Byte 42 Bit 1 */
boolean SCAM_Enabled:1; /* Byte 42 Bit 2 */ bool SCAM_Enabled:1; /* Byte 42 Bit 2 */
boolean SCAM_Level2:1; /* Byte 42 Bit 3 */ bool SCAM_Level2:1; /* Byte 42 Bit 3 */
unsigned char:4; /* Byte 42 Bits 4-7 */ unsigned char:4; /* Byte 42 Bits 4-7 */
boolean INT13ExtensionEnabled:1; /* Byte 43 Bit 0 */ bool INT13ExtensionEnabled:1; /* Byte 43 Bit 0 */
boolean:1; /* Byte 43 Bit 1 */ bool:1; /* Byte 43 Bit 1 */
boolean CDROMBootEnabled:1; /* Byte 43 Bit 2 */ bool CDROMBootEnabled:1; /* Byte 43 Bit 2 */
unsigned char:5; /* Byte 43 Bits 3-7 */ unsigned char:5; /* Byte 43 Bits 3-7 */
unsigned char BootTargetID:4; /* Byte 44 Bits 0-3 */ unsigned char BootTargetID:4; /* Byte 44 Bits 0-3 */
unsigned char BootChannel:4; /* Byte 44 Bits 4-7 */ unsigned char BootChannel:4; /* Byte 44 Bits 4-7 */
@ -852,7 +846,7 @@ struct BusLogic_CCB {
enum BusLogic_CCB_Opcode Opcode; /* Byte 0 */ enum BusLogic_CCB_Opcode Opcode; /* Byte 0 */
unsigned char:3; /* Byte 1 Bits 0-2 */ unsigned char:3; /* Byte 1 Bits 0-2 */
enum BusLogic_DataDirection DataDirection:2; /* Byte 1 Bits 3-4 */ enum BusLogic_DataDirection DataDirection:2; /* Byte 1 Bits 3-4 */
boolean TagEnable:1; /* Byte 1 Bit 5 */ bool TagEnable:1; /* Byte 1 Bit 5 */
enum BusLogic_QueueTag QueueTag:2; /* Byte 1 Bits 6-7 */ enum BusLogic_QueueTag QueueTag:2; /* Byte 1 Bits 6-7 */
unsigned char CDB_Length; /* Byte 2 */ unsigned char CDB_Length; /* Byte 2 */
unsigned char SenseDataLength; /* Byte 3 */ unsigned char SenseDataLength; /* Byte 3 */
@ -864,7 +858,7 @@ struct BusLogic_CCB {
enum BusLogic_TargetDeviceStatus TargetDeviceStatus; /* Byte 15 */ enum BusLogic_TargetDeviceStatus TargetDeviceStatus; /* Byte 15 */
unsigned char TargetID; /* Byte 16 */ unsigned char TargetID; /* Byte 16 */
unsigned char LogicalUnit:5; /* Byte 17 Bits 0-4 */ unsigned char LogicalUnit:5; /* Byte 17 Bits 0-4 */
boolean LegacyTagEnable:1; /* Byte 17 Bit 5 */ bool LegacyTagEnable:1; /* Byte 17 Bit 5 */
enum BusLogic_QueueTag LegacyQueueTag:2; /* Byte 17 Bits 6-7 */ enum BusLogic_QueueTag LegacyQueueTag:2; /* Byte 17 Bits 6-7 */
SCSI_CDB_T CDB; /* Bytes 18-29 */ SCSI_CDB_T CDB; /* Bytes 18-29 */
unsigned char:8; /* Byte 30 */ unsigned char:8; /* Byte 30 */
@ -939,13 +933,13 @@ struct BusLogic_DriverOptions {
*/ */
struct BusLogic_TargetFlags { struct BusLogic_TargetFlags {
boolean TargetExists:1; bool TargetExists:1;
boolean TaggedQueuingSupported:1; bool TaggedQueuingSupported:1;
boolean WideTransfersSupported:1; bool WideTransfersSupported:1;
boolean TaggedQueuingActive:1; bool TaggedQueuingActive:1;
boolean WideTransfersActive:1; bool WideTransfersActive:1;
boolean CommandSuccessfulFlag:1; bool CommandSuccessfulFlag:1;
boolean TargetInfoReported:1; bool TargetInfoReported:1;
}; };
/* /*
@ -992,7 +986,7 @@ typedef unsigned int FlashPoint_CardHandle_T;
struct FlashPoint_Info { struct FlashPoint_Info {
u32 BaseAddress; /* Bytes 0-3 */ u32 BaseAddress; /* Bytes 0-3 */
boolean Present; /* Byte 4 */ bool Present; /* Byte 4 */
unsigned char IRQ_Channel; /* Byte 5 */ unsigned char IRQ_Channel; /* Byte 5 */
unsigned char SCSI_ID; /* Byte 6 */ unsigned char SCSI_ID; /* Byte 6 */
unsigned char SCSI_LUN; /* Byte 7 */ unsigned char SCSI_LUN; /* Byte 7 */
@ -1002,15 +996,15 @@ struct FlashPoint_Info {
unsigned short UltraPermitted; /* Bytes 14-15 */ unsigned short UltraPermitted; /* Bytes 14-15 */
unsigned short DisconnectPermitted; /* Bytes 16-17 */ unsigned short DisconnectPermitted; /* Bytes 16-17 */
unsigned short WidePermitted; /* Bytes 18-19 */ unsigned short WidePermitted; /* Bytes 18-19 */
boolean ParityCheckingEnabled:1; /* Byte 20 Bit 0 */ bool ParityCheckingEnabled:1; /* Byte 20 Bit 0 */
boolean HostWideSCSI:1; /* Byte 20 Bit 1 */ bool HostWideSCSI:1; /* Byte 20 Bit 1 */
boolean HostSoftReset:1; /* Byte 20 Bit 2 */ bool HostSoftReset:1; /* Byte 20 Bit 2 */
boolean ExtendedTranslationEnabled:1; /* Byte 20 Bit 3 */ bool ExtendedTranslationEnabled:1; /* Byte 20 Bit 3 */
boolean LowByteTerminated:1; /* Byte 20 Bit 4 */ bool LowByteTerminated:1; /* Byte 20 Bit 4 */
boolean HighByteTerminated:1; /* Byte 20 Bit 5 */ bool HighByteTerminated:1; /* Byte 20 Bit 5 */
boolean ReportDataUnderrun:1; /* Byte 20 Bit 6 */ bool ReportDataUnderrun:1; /* Byte 20 Bit 6 */
boolean SCAM_Enabled:1; /* Byte 20 Bit 7 */ bool SCAM_Enabled:1; /* Byte 20 Bit 7 */
boolean SCAM_Level2:1; /* Byte 21 Bit 0 */ bool SCAM_Level2:1; /* Byte 21 Bit 0 */
unsigned char:7; /* Byte 21 Bits 1-7 */ unsigned char:7; /* Byte 21 Bits 1-7 */
unsigned char Family; /* Byte 22 */ unsigned char Family; /* Byte 22 */
unsigned char BusType; /* Byte 23 */ unsigned char BusType; /* Byte 23 */
@ -1044,29 +1038,29 @@ struct BusLogic_HostAdapter {
unsigned char IRQ_Channel; unsigned char IRQ_Channel;
unsigned char DMA_Channel; unsigned char DMA_Channel;
unsigned char SCSI_ID; unsigned char SCSI_ID;
boolean IRQ_ChannelAcquired:1; bool IRQ_ChannelAcquired:1;
boolean DMA_ChannelAcquired:1; bool DMA_ChannelAcquired:1;
boolean ExtendedTranslationEnabled:1; bool ExtendedTranslationEnabled:1;
boolean ParityCheckingEnabled:1; bool ParityCheckingEnabled:1;
boolean BusResetEnabled:1; bool BusResetEnabled:1;
boolean LevelSensitiveInterrupt:1; bool LevelSensitiveInterrupt:1;
boolean HostWideSCSI:1; bool HostWideSCSI:1;
boolean HostDifferentialSCSI:1; bool HostDifferentialSCSI:1;
boolean HostSupportsSCAM:1; bool HostSupportsSCAM:1;
boolean HostUltraSCSI:1; bool HostUltraSCSI:1;
boolean ExtendedLUNSupport:1; bool ExtendedLUNSupport:1;
boolean TerminationInfoValid:1; bool TerminationInfoValid:1;
boolean LowByteTerminated:1; bool LowByteTerminated:1;
boolean HighByteTerminated:1; bool HighByteTerminated:1;
boolean BounceBuffersRequired:1; bool BounceBuffersRequired:1;
boolean StrictRoundRobinModeSupport:1; bool StrictRoundRobinModeSupport:1;
boolean SCAM_Enabled:1; bool SCAM_Enabled:1;
boolean SCAM_Level2:1; bool SCAM_Level2:1;
boolean HostAdapterInitialized:1; bool HostAdapterInitialized:1;
boolean HostAdapterExternalReset:1; bool HostAdapterExternalReset:1;
boolean HostAdapterInternalError:1; bool HostAdapterInternalError:1;
boolean ProcessCompletedCCBsActive; bool ProcessCompletedCCBsActive;
volatile boolean HostAdapterCommandCompleted; volatile bool HostAdapterCommandCompleted;
unsigned short HostAdapterScatterGatherLimit; unsigned short HostAdapterScatterGatherLimit;
unsigned short DriverScatterGatherLimit; unsigned short DriverScatterGatherLimit;
unsigned short MaxTargetDevices; unsigned short MaxTargetDevices;
@ -1141,25 +1135,25 @@ struct SCSI_Inquiry {
unsigned char PeripheralDeviceType:5; /* Byte 0 Bits 0-4 */ unsigned char PeripheralDeviceType:5; /* Byte 0 Bits 0-4 */
unsigned char PeripheralQualifier:3; /* Byte 0 Bits 5-7 */ unsigned char PeripheralQualifier:3; /* Byte 0 Bits 5-7 */
unsigned char DeviceTypeModifier:7; /* Byte 1 Bits 0-6 */ unsigned char DeviceTypeModifier:7; /* Byte 1 Bits 0-6 */
boolean RMB:1; /* Byte 1 Bit 7 */ bool RMB:1; /* Byte 1 Bit 7 */
unsigned char ANSI_ApprovedVersion:3; /* Byte 2 Bits 0-2 */ unsigned char ANSI_ApprovedVersion:3; /* Byte 2 Bits 0-2 */
unsigned char ECMA_Version:3; /* Byte 2 Bits 3-5 */ unsigned char ECMA_Version:3; /* Byte 2 Bits 3-5 */
unsigned char ISO_Version:2; /* Byte 2 Bits 6-7 */ unsigned char ISO_Version:2; /* Byte 2 Bits 6-7 */
unsigned char ResponseDataFormat:4; /* Byte 3 Bits 0-3 */ unsigned char ResponseDataFormat:4; /* Byte 3 Bits 0-3 */
unsigned char:2; /* Byte 3 Bits 4-5 */ unsigned char:2; /* Byte 3 Bits 4-5 */
boolean TrmIOP:1; /* Byte 3 Bit 6 */ bool TrmIOP:1; /* Byte 3 Bit 6 */
boolean AENC:1; /* Byte 3 Bit 7 */ bool AENC:1; /* Byte 3 Bit 7 */
unsigned char AdditionalLength; /* Byte 4 */ unsigned char AdditionalLength; /* Byte 4 */
unsigned char:8; /* Byte 5 */ unsigned char:8; /* Byte 5 */
unsigned char:8; /* Byte 6 */ unsigned char:8; /* Byte 6 */
boolean SftRe:1; /* Byte 7 Bit 0 */ bool SftRe:1; /* Byte 7 Bit 0 */
boolean CmdQue:1; /* Byte 7 Bit 1 */ bool CmdQue:1; /* Byte 7 Bit 1 */
boolean:1; /* Byte 7 Bit 2 */ bool:1; /* Byte 7 Bit 2 */
boolean Linked:1; /* Byte 7 Bit 3 */ bool Linked:1; /* Byte 7 Bit 3 */
boolean Sync:1; /* Byte 7 Bit 4 */ bool Sync:1; /* Byte 7 Bit 4 */
boolean WBus16:1; /* Byte 7 Bit 5 */ bool WBus16:1; /* Byte 7 Bit 5 */
boolean WBus32:1; /* Byte 7 Bit 6 */ bool WBus32:1; /* Byte 7 Bit 6 */
boolean RelAdr:1; /* Byte 7 Bit 7 */ bool RelAdr:1; /* Byte 7 Bit 7 */
unsigned char VendorIdentification[8]; /* Bytes 8-15 */ unsigned char VendorIdentification[8]; /* Bytes 8-15 */
unsigned char ProductIdentification[16]; /* Bytes 16-31 */ unsigned char ProductIdentification[16]; /* Bytes 16-31 */
unsigned char ProductRevisionLevel[4]; /* Bytes 32-35 */ unsigned char ProductRevisionLevel[4]; /* Bytes 32-35 */
@ -1348,7 +1342,7 @@ static int BusLogic_ProcDirectoryInfo(struct Scsi_Host *, char *, char **, off_t
static int BusLogic_SlaveConfigure(struct scsi_device *); static int BusLogic_SlaveConfigure(struct scsi_device *);
static void BusLogic_QueueCompletedCCB(struct BusLogic_CCB *); static void BusLogic_QueueCompletedCCB(struct BusLogic_CCB *);
static irqreturn_t BusLogic_InterruptHandler(int, void *); static irqreturn_t BusLogic_InterruptHandler(int, void *);
static int BusLogic_ResetHostAdapter(struct BusLogic_HostAdapter *, boolean HardReset); static int BusLogic_ResetHostAdapter(struct BusLogic_HostAdapter *, bool HardReset);
static void BusLogic_Message(enum BusLogic_MessageLevel, char *, struct BusLogic_HostAdapter *, ...); static void BusLogic_Message(enum BusLogic_MessageLevel, char *, struct BusLogic_HostAdapter *, ...);
static int __init BusLogic_Setup(char *); static int __init BusLogic_Setup(char *);

View file

@ -7609,7 +7609,7 @@ FlashPoint__AbortCCB(FlashPoint_CardHandle_T CardHandle,
FlashPoint_AbortCCB(CardHandle, (struct sccb *)CCB); FlashPoint_AbortCCB(CardHandle, (struct sccb *)CCB);
} }
static inline boolean static inline bool
FlashPoint__InterruptPending(FlashPoint_CardHandle_T CardHandle) FlashPoint__InterruptPending(FlashPoint_CardHandle_T CardHandle)
{ {
return FlashPoint_InterruptPending(CardHandle); return FlashPoint_InterruptPending(CardHandle);
@ -7640,7 +7640,7 @@ extern FlashPoint_CardHandle_T
FlashPoint_HardwareResetHostAdapter(struct FlashPoint_Info *); FlashPoint_HardwareResetHostAdapter(struct FlashPoint_Info *);
extern void FlashPoint_StartCCB(FlashPoint_CardHandle_T, struct BusLogic_CCB *); extern void FlashPoint_StartCCB(FlashPoint_CardHandle_T, struct BusLogic_CCB *);
extern int FlashPoint_AbortCCB(FlashPoint_CardHandle_T, struct BusLogic_CCB *); extern int FlashPoint_AbortCCB(FlashPoint_CardHandle_T, struct BusLogic_CCB *);
extern boolean FlashPoint_InterruptPending(FlashPoint_CardHandle_T); extern bool FlashPoint_InterruptPending(FlashPoint_CardHandle_T);
extern int FlashPoint_HandleInterrupt(FlashPoint_CardHandle_T); extern int FlashPoint_HandleInterrupt(FlashPoint_CardHandle_T);
extern void FlashPoint_ReleaseHostAdapter(FlashPoint_CardHandle_T); extern void FlashPoint_ReleaseHostAdapter(FlashPoint_CardHandle_T);

View file

@ -973,6 +973,15 @@ config SCSI_LASI700
many PA-RISC workstations & servers. If you do not know whether you many PA-RISC workstations & servers. If you do not know whether you
have a Lasi chip, it is safe to say "Y" here. have a Lasi chip, it is safe to say "Y" here.
config SCSI_SNI_53C710
tristate "SNI RM SCSI support for 53c710"
depends on SNI_RM && SCSI
select SCSI_SPI_ATTRS
select 53C700_LE_ON_BE
help
This is a driver for the onboard SCSI controller found in older
SNI RM workstations & servers.
config 53C700_LE_ON_BE config 53C700_LE_ON_BE
bool bool
depends on SCSI_LASI700 depends on SCSI_LASI700

View file

@ -124,6 +124,7 @@ obj-$(CONFIG_JAZZ_ESP) += NCR53C9x.o jazz_esp.o
obj-$(CONFIG_SUN3X_ESP) += NCR53C9x.o sun3x_esp.o obj-$(CONFIG_SUN3X_ESP) += NCR53C9x.o sun3x_esp.o
obj-$(CONFIG_SCSI_FCAL) += fcal.o obj-$(CONFIG_SCSI_FCAL) += fcal.o
obj-$(CONFIG_SCSI_LASI700) += 53c700.o lasi700.o obj-$(CONFIG_SCSI_LASI700) += 53c700.o lasi700.o
obj-$(CONFIG_SCSI_SNI_53C710) += 53c700.o sni_53c710.o
obj-$(CONFIG_SCSI_NSP32) += nsp32.o obj-$(CONFIG_SCSI_NSP32) += nsp32.o
obj-$(CONFIG_SCSI_IPR) += ipr.o obj-$(CONFIG_SCSI_IPR) += ipr.o
obj-$(CONFIG_SCSI_SRP) += libsrp.o obj-$(CONFIG_SCSI_SRP) += libsrp.o

View file

@ -200,6 +200,7 @@ NCR_D700_probe_one(struct NCR_D700_private *p, int siop, int irq,
hostdata->base = ioport_map(region, 64); hostdata->base = ioport_map(region, 64);
hostdata->differential = (((1<<siop) & differential) != 0); hostdata->differential = (((1<<siop) & differential) != 0);
hostdata->clock = NCR_D700_CLOCK_MHZ; hostdata->clock = NCR_D700_CLOCK_MHZ;
hostdata->burst_length = 8;
/* and register the siop */ /* and register the siop */
host = NCR_700_detect(&NCR_D700_driver_template, hostdata, p->dev); host = NCR_700_detect(&NCR_D700_driver_template, hostdata, p->dev);

View file

@ -3,6 +3,6 @@
obj-$(CONFIG_SCSI_AACRAID) := aacraid.o obj-$(CONFIG_SCSI_AACRAID) := aacraid.o
aacraid-objs := linit.o aachba.o commctrl.o comminit.o commsup.o \ aacraid-objs := linit.o aachba.o commctrl.o comminit.o commsup.o \
dpcsup.o rx.o sa.o rkt.o dpcsup.o rx.o sa.o rkt.o nark.o
EXTRA_CFLAGS := -Idrivers/scsi EXTRA_CFLAGS := -Idrivers/scsi

View file

@ -170,9 +170,9 @@ int acbsize = -1;
module_param(acbsize, int, S_IRUGO|S_IWUSR); module_param(acbsize, int, S_IRUGO|S_IWUSR);
MODULE_PARM_DESC(acbsize, "Request a specific adapter control block (FIB) size. Valid values are 512, 2048, 4096 and 8192. Default is to use suggestion from Firmware."); MODULE_PARM_DESC(acbsize, "Request a specific adapter control block (FIB) size. Valid values are 512, 2048, 4096 and 8192. Default is to use suggestion from Firmware.");
int expose_physicals = 0; int expose_physicals = -1;
module_param(expose_physicals, int, S_IRUGO|S_IWUSR); module_param(expose_physicals, int, S_IRUGO|S_IWUSR);
MODULE_PARM_DESC(expose_physicals, "Expose physical components of the arrays. 0=off, 1=on"); MODULE_PARM_DESC(expose_physicals, "Expose physical components of the arrays. -1=protect 0=off, 1=on");
/** /**
* aac_get_config_status - check the adapter configuration * aac_get_config_status - check the adapter configuration
* @common: adapter to query * @common: adapter to query
@ -706,6 +706,309 @@ static void set_sense(u8 *sense_buf, u8 sense_key, u8 sense_code,
} }
} }
static int aac_bounds_32(struct aac_dev * dev, struct scsi_cmnd * cmd, u64 lba)
{
if (lba & 0xffffffff00000000LL) {
int cid = scmd_id(cmd);
dprintk((KERN_DEBUG "aacraid: Illegal lba\n"));
cmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8 |
SAM_STAT_CHECK_CONDITION;
set_sense((u8 *) &dev->fsa_dev[cid].sense_data,
HARDWARE_ERROR,
SENCODE_INTERNAL_TARGET_FAILURE,
ASENCODE_INTERNAL_TARGET_FAILURE, 0, 0,
0, 0);
memcpy(cmd->sense_buffer, &dev->fsa_dev[cid].sense_data,
(sizeof(dev->fsa_dev[cid].sense_data) > sizeof(cmd->sense_buffer))
? sizeof(cmd->sense_buffer)
: sizeof(dev->fsa_dev[cid].sense_data));
cmd->scsi_done(cmd);
return 1;
}
return 0;
}
static int aac_bounds_64(struct aac_dev * dev, struct scsi_cmnd * cmd, u64 lba)
{
return 0;
}
static void io_callback(void *context, struct fib * fibptr);
static int aac_read_raw_io(struct fib * fib, struct scsi_cmnd * cmd, u64 lba, u32 count)
{
u16 fibsize;
struct aac_raw_io *readcmd;
aac_fib_init(fib);
readcmd = (struct aac_raw_io *) fib_data(fib);
readcmd->block[0] = cpu_to_le32((u32)(lba&0xffffffff));
readcmd->block[1] = cpu_to_le32((u32)((lba&0xffffffff00000000LL)>>32));
readcmd->count = cpu_to_le32(count<<9);
readcmd->cid = cpu_to_le16(scmd_id(cmd));
readcmd->flags = cpu_to_le16(1);
readcmd->bpTotal = 0;
readcmd->bpComplete = 0;
aac_build_sgraw(cmd, &readcmd->sg);
fibsize = sizeof(struct aac_raw_io) + ((le32_to_cpu(readcmd->sg.count) - 1) * sizeof (struct sgentryraw));
BUG_ON(fibsize > (fib->dev->max_fib_size - sizeof(struct aac_fibhdr)));
/*
* Now send the Fib to the adapter
*/
return aac_fib_send(ContainerRawIo,
fib,
fibsize,
FsaNormal,
0, 1,
(fib_callback) io_callback,
(void *) cmd);
}
static int aac_read_block64(struct fib * fib, struct scsi_cmnd * cmd, u64 lba, u32 count)
{
u16 fibsize;
struct aac_read64 *readcmd;
aac_fib_init(fib);
readcmd = (struct aac_read64 *) fib_data(fib);
readcmd->command = cpu_to_le32(VM_CtHostRead64);
readcmd->cid = cpu_to_le16(scmd_id(cmd));
readcmd->sector_count = cpu_to_le16(count);
readcmd->block = cpu_to_le32((u32)(lba&0xffffffff));
readcmd->pad = 0;
readcmd->flags = 0;
aac_build_sg64(cmd, &readcmd->sg);
fibsize = sizeof(struct aac_read64) +
((le32_to_cpu(readcmd->sg.count) - 1) *
sizeof (struct sgentry64));
BUG_ON (fibsize > (fib->dev->max_fib_size -
sizeof(struct aac_fibhdr)));
/*
* Now send the Fib to the adapter
*/
return aac_fib_send(ContainerCommand64,
fib,
fibsize,
FsaNormal,
0, 1,
(fib_callback) io_callback,
(void *) cmd);
}
static int aac_read_block(struct fib * fib, struct scsi_cmnd * cmd, u64 lba, u32 count)
{
u16 fibsize;
struct aac_read *readcmd;
aac_fib_init(fib);
readcmd = (struct aac_read *) fib_data(fib);
readcmd->command = cpu_to_le32(VM_CtBlockRead);
readcmd->cid = cpu_to_le16(scmd_id(cmd));
readcmd->block = cpu_to_le32((u32)(lba&0xffffffff));
readcmd->count = cpu_to_le32(count * 512);
aac_build_sg(cmd, &readcmd->sg);
fibsize = sizeof(struct aac_read) +
((le32_to_cpu(readcmd->sg.count) - 1) *
sizeof (struct sgentry));
BUG_ON (fibsize > (fib->dev->max_fib_size -
sizeof(struct aac_fibhdr)));
/*
* Now send the Fib to the adapter
*/
return aac_fib_send(ContainerCommand,
fib,
fibsize,
FsaNormal,
0, 1,
(fib_callback) io_callback,
(void *) cmd);
}
static int aac_write_raw_io(struct fib * fib, struct scsi_cmnd * cmd, u64 lba, u32 count)
{
u16 fibsize;
struct aac_raw_io *writecmd;
aac_fib_init(fib);
writecmd = (struct aac_raw_io *) fib_data(fib);
writecmd->block[0] = cpu_to_le32((u32)(lba&0xffffffff));
writecmd->block[1] = cpu_to_le32((u32)((lba&0xffffffff00000000LL)>>32));
writecmd->count = cpu_to_le32(count<<9);
writecmd->cid = cpu_to_le16(scmd_id(cmd));
writecmd->flags = 0;
writecmd->bpTotal = 0;
writecmd->bpComplete = 0;
aac_build_sgraw(cmd, &writecmd->sg);
fibsize = sizeof(struct aac_raw_io) + ((le32_to_cpu(writecmd->sg.count) - 1) * sizeof (struct sgentryraw));
BUG_ON(fibsize > (fib->dev->max_fib_size - sizeof(struct aac_fibhdr)));
/*
* Now send the Fib to the adapter
*/
return aac_fib_send(ContainerRawIo,
fib,
fibsize,
FsaNormal,
0, 1,
(fib_callback) io_callback,
(void *) cmd);
}
static int aac_write_block64(struct fib * fib, struct scsi_cmnd * cmd, u64 lba, u32 count)
{
u16 fibsize;
struct aac_write64 *writecmd;
aac_fib_init(fib);
writecmd = (struct aac_write64 *) fib_data(fib);
writecmd->command = cpu_to_le32(VM_CtHostWrite64);
writecmd->cid = cpu_to_le16(scmd_id(cmd));
writecmd->sector_count = cpu_to_le16(count);
writecmd->block = cpu_to_le32((u32)(lba&0xffffffff));
writecmd->pad = 0;
writecmd->flags = 0;
aac_build_sg64(cmd, &writecmd->sg);
fibsize = sizeof(struct aac_write64) +
((le32_to_cpu(writecmd->sg.count) - 1) *
sizeof (struct sgentry64));
BUG_ON (fibsize > (fib->dev->max_fib_size -
sizeof(struct aac_fibhdr)));
/*
* Now send the Fib to the adapter
*/
return aac_fib_send(ContainerCommand64,
fib,
fibsize,
FsaNormal,
0, 1,
(fib_callback) io_callback,
(void *) cmd);
}
static int aac_write_block(struct fib * fib, struct scsi_cmnd * cmd, u64 lba, u32 count)
{
u16 fibsize;
struct aac_write *writecmd;
aac_fib_init(fib);
writecmd = (struct aac_write *) fib_data(fib);
writecmd->command = cpu_to_le32(VM_CtBlockWrite);
writecmd->cid = cpu_to_le16(scmd_id(cmd));
writecmd->block = cpu_to_le32((u32)(lba&0xffffffff));
writecmd->count = cpu_to_le32(count * 512);
writecmd->sg.count = cpu_to_le32(1);
/* ->stable is not used - it did mean which type of write */
aac_build_sg(cmd, &writecmd->sg);
fibsize = sizeof(struct aac_write) +
((le32_to_cpu(writecmd->sg.count) - 1) *
sizeof (struct sgentry));
BUG_ON (fibsize > (fib->dev->max_fib_size -
sizeof(struct aac_fibhdr)));
/*
* Now send the Fib to the adapter
*/
return aac_fib_send(ContainerCommand,
fib,
fibsize,
FsaNormal,
0, 1,
(fib_callback) io_callback,
(void *) cmd);
}
static struct aac_srb * aac_scsi_common(struct fib * fib, struct scsi_cmnd * cmd)
{
struct aac_srb * srbcmd;
u32 flag;
u32 timeout;
aac_fib_init(fib);
switch(cmd->sc_data_direction){
case DMA_TO_DEVICE:
flag = SRB_DataOut;
break;
case DMA_BIDIRECTIONAL:
flag = SRB_DataIn | SRB_DataOut;
break;
case DMA_FROM_DEVICE:
flag = SRB_DataIn;
break;
case DMA_NONE:
default: /* shuts up some versions of gcc */
flag = SRB_NoDataXfer;
break;
}
srbcmd = (struct aac_srb*) fib_data(fib);
srbcmd->function = cpu_to_le32(SRBF_ExecuteScsi);
srbcmd->channel = cpu_to_le32(aac_logical_to_phys(scmd_channel(cmd)));
srbcmd->id = cpu_to_le32(scmd_id(cmd));
srbcmd->lun = cpu_to_le32(cmd->device->lun);
srbcmd->flags = cpu_to_le32(flag);
timeout = cmd->timeout_per_command/HZ;
if (timeout == 0)
timeout = 1;
srbcmd->timeout = cpu_to_le32(timeout); // timeout in seconds
srbcmd->retry_limit = 0; /* Obsolete parameter */
srbcmd->cdb_size = cpu_to_le32(cmd->cmd_len);
return srbcmd;
}
static void aac_srb_callback(void *context, struct fib * fibptr);
static int aac_scsi_64(struct fib * fib, struct scsi_cmnd * cmd)
{
u16 fibsize;
struct aac_srb * srbcmd = aac_scsi_common(fib, cmd);
aac_build_sg64(cmd, (struct sgmap64*) &srbcmd->sg);
srbcmd->count = cpu_to_le32(cmd->request_bufflen);
memset(srbcmd->cdb, 0, sizeof(srbcmd->cdb));
memcpy(srbcmd->cdb, cmd->cmnd, cmd->cmd_len);
/*
* Build Scatter/Gather list
*/
fibsize = sizeof (struct aac_srb) - sizeof (struct sgentry) +
((le32_to_cpu(srbcmd->sg.count) & 0xff) *
sizeof (struct sgentry64));
BUG_ON (fibsize > (fib->dev->max_fib_size -
sizeof(struct aac_fibhdr)));
/*
* Now send the Fib to the adapter
*/
return aac_fib_send(ScsiPortCommand64, fib,
fibsize, FsaNormal, 0, 1,
(fib_callback) aac_srb_callback,
(void *) cmd);
}
static int aac_scsi_32(struct fib * fib, struct scsi_cmnd * cmd)
{
u16 fibsize;
struct aac_srb * srbcmd = aac_scsi_common(fib, cmd);
aac_build_sg(cmd, (struct sgmap*)&srbcmd->sg);
srbcmd->count = cpu_to_le32(cmd->request_bufflen);
memset(srbcmd->cdb, 0, sizeof(srbcmd->cdb));
memcpy(srbcmd->cdb, cmd->cmnd, cmd->cmd_len);
/*
* Build Scatter/Gather list
*/
fibsize = sizeof (struct aac_srb) +
(((le32_to_cpu(srbcmd->sg.count) & 0xff) - 1) *
sizeof (struct sgentry));
BUG_ON (fibsize > (fib->dev->max_fib_size -
sizeof(struct aac_fibhdr)));
/*
* Now send the Fib to the adapter
*/
return aac_fib_send(ScsiPortCommand, fib, fibsize, FsaNormal, 0, 1,
(fib_callback) aac_srb_callback, (void *) cmd);
}
int aac_get_adapter_info(struct aac_dev* dev) int aac_get_adapter_info(struct aac_dev* dev)
{ {
struct fib* fibptr; struct fib* fibptr;
@ -874,14 +1177,27 @@ int aac_get_adapter_info(struct aac_dev* dev)
} }
} }
/* /*
* 57 scatter gather elements * Deal with configuring for the individualized limits of each packet
* interface.
*/ */
if (!(dev->raw_io_interface)) { dev->a_ops.adapter_scsi = (dev->dac_support)
? aac_scsi_64
: aac_scsi_32;
if (dev->raw_io_interface) {
dev->a_ops.adapter_bounds = (dev->raw_io_64)
? aac_bounds_64
: aac_bounds_32;
dev->a_ops.adapter_read = aac_read_raw_io;
dev->a_ops.adapter_write = aac_write_raw_io;
} else {
dev->a_ops.adapter_bounds = aac_bounds_32;
dev->scsi_host_ptr->sg_tablesize = (dev->max_fib_size - dev->scsi_host_ptr->sg_tablesize = (dev->max_fib_size -
sizeof(struct aac_fibhdr) - sizeof(struct aac_fibhdr) -
sizeof(struct aac_write) + sizeof(struct sgentry)) / sizeof(struct aac_write) + sizeof(struct sgentry)) /
sizeof(struct sgentry); sizeof(struct sgentry);
if (dev->dac_support) { if (dev->dac_support) {
dev->a_ops.adapter_read = aac_read_block64;
dev->a_ops.adapter_write = aac_write_block64;
/* /*
* 38 scatter gather elements * 38 scatter gather elements
*/ */
@ -891,6 +1207,9 @@ int aac_get_adapter_info(struct aac_dev* dev)
sizeof(struct aac_write64) + sizeof(struct aac_write64) +
sizeof(struct sgentry64)) / sizeof(struct sgentry64)) /
sizeof(struct sgentry64); sizeof(struct sgentry64);
} else {
dev->a_ops.adapter_read = aac_read_block;
dev->a_ops.adapter_write = aac_write_block;
} }
dev->scsi_host_ptr->max_sectors = AAC_MAX_32BIT_SGBCOUNT; dev->scsi_host_ptr->max_sectors = AAC_MAX_32BIT_SGBCOUNT;
if(!(dev->adapter_info.options & AAC_OPT_NEW_COMM)) { if(!(dev->adapter_info.options & AAC_OPT_NEW_COMM)) {
@ -1004,8 +1323,6 @@ static int aac_read(struct scsi_cmnd * scsicmd, int cid)
u64 lba; u64 lba;
u32 count; u32 count;
int status; int status;
u16 fibsize;
struct aac_dev *dev; struct aac_dev *dev;
struct fib * cmd_fibcontext; struct fib * cmd_fibcontext;
@ -1059,23 +1376,8 @@ static int aac_read(struct scsi_cmnd * scsicmd, int cid)
} }
dprintk((KERN_DEBUG "aac_read[cpu %d]: lba = %llu, t = %ld.\n", dprintk((KERN_DEBUG "aac_read[cpu %d]: lba = %llu, t = %ld.\n",
smp_processor_id(), (unsigned long long)lba, jiffies)); smp_processor_id(), (unsigned long long)lba, jiffies));
if ((!(dev->raw_io_interface) || !(dev->raw_io_64)) && if (aac_adapter_bounds(dev,scsicmd,lba))
(lba & 0xffffffff00000000LL)) {
dprintk((KERN_DEBUG "aac_read: Illegal lba\n"));
scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8 |
SAM_STAT_CHECK_CONDITION;
set_sense((u8 *) &dev->fsa_dev[cid].sense_data,
HARDWARE_ERROR,
SENCODE_INTERNAL_TARGET_FAILURE,
ASENCODE_INTERNAL_TARGET_FAILURE, 0, 0,
0, 0);
memcpy(scsicmd->sense_buffer, &dev->fsa_dev[cid].sense_data,
(sizeof(dev->fsa_dev[cid].sense_data) > sizeof(scsicmd->sense_buffer))
? sizeof(scsicmd->sense_buffer)
: sizeof(dev->fsa_dev[cid].sense_data));
scsicmd->scsi_done(scsicmd);
return 0; return 0;
}
/* /*
* Alocate and initialize a Fib * Alocate and initialize a Fib
*/ */
@ -1083,85 +1385,7 @@ static int aac_read(struct scsi_cmnd * scsicmd, int cid)
return -1; return -1;
} }
aac_fib_init(cmd_fibcontext); status = aac_adapter_read(cmd_fibcontext, scsicmd, lba, count);
if (dev->raw_io_interface) {
struct aac_raw_io *readcmd;
readcmd = (struct aac_raw_io *) fib_data(cmd_fibcontext);
readcmd->block[0] = cpu_to_le32((u32)(lba&0xffffffff));
readcmd->block[1] = cpu_to_le32((u32)((lba&0xffffffff00000000LL)>>32));
readcmd->count = cpu_to_le32(count<<9);
readcmd->cid = cpu_to_le16(cid);
readcmd->flags = cpu_to_le16(1);
readcmd->bpTotal = 0;
readcmd->bpComplete = 0;
aac_build_sgraw(scsicmd, &readcmd->sg);
fibsize = sizeof(struct aac_raw_io) + ((le32_to_cpu(readcmd->sg.count) - 1) * sizeof (struct sgentryraw));
BUG_ON(fibsize > (dev->max_fib_size - sizeof(struct aac_fibhdr)));
/*
* Now send the Fib to the adapter
*/
status = aac_fib_send(ContainerRawIo,
cmd_fibcontext,
fibsize,
FsaNormal,
0, 1,
(fib_callback) io_callback,
(void *) scsicmd);
} else if (dev->dac_support == 1) {
struct aac_read64 *readcmd;
readcmd = (struct aac_read64 *) fib_data(cmd_fibcontext);
readcmd->command = cpu_to_le32(VM_CtHostRead64);
readcmd->cid = cpu_to_le16(cid);
readcmd->sector_count = cpu_to_le16(count);
readcmd->block = cpu_to_le32((u32)(lba&0xffffffff));
readcmd->pad = 0;
readcmd->flags = 0;
aac_build_sg64(scsicmd, &readcmd->sg);
fibsize = sizeof(struct aac_read64) +
((le32_to_cpu(readcmd->sg.count) - 1) *
sizeof (struct sgentry64));
BUG_ON (fibsize > (dev->max_fib_size -
sizeof(struct aac_fibhdr)));
/*
* Now send the Fib to the adapter
*/
status = aac_fib_send(ContainerCommand64,
cmd_fibcontext,
fibsize,
FsaNormal,
0, 1,
(fib_callback) io_callback,
(void *) scsicmd);
} else {
struct aac_read *readcmd;
readcmd = (struct aac_read *) fib_data(cmd_fibcontext);
readcmd->command = cpu_to_le32(VM_CtBlockRead);
readcmd->cid = cpu_to_le32(cid);
readcmd->block = cpu_to_le32((u32)(lba&0xffffffff));
readcmd->count = cpu_to_le32(count * 512);
aac_build_sg(scsicmd, &readcmd->sg);
fibsize = sizeof(struct aac_read) +
((le32_to_cpu(readcmd->sg.count) - 1) *
sizeof (struct sgentry));
BUG_ON (fibsize > (dev->max_fib_size -
sizeof(struct aac_fibhdr)));
/*
* Now send the Fib to the adapter
*/
status = aac_fib_send(ContainerCommand,
cmd_fibcontext,
fibsize,
FsaNormal,
0, 1,
(fib_callback) io_callback,
(void *) scsicmd);
}
/* /*
* Check that the command queued to the controller * Check that the command queued to the controller
@ -1187,7 +1411,6 @@ static int aac_write(struct scsi_cmnd * scsicmd, int cid)
u64 lba; u64 lba;
u32 count; u32 count;
int status; int status;
u16 fibsize;
struct aac_dev *dev; struct aac_dev *dev;
struct fib * cmd_fibcontext; struct fib * cmd_fibcontext;
@ -1227,22 +1450,8 @@ static int aac_write(struct scsi_cmnd * scsicmd, int cid)
} }
dprintk((KERN_DEBUG "aac_write[cpu %d]: lba = %llu, t = %ld.\n", dprintk((KERN_DEBUG "aac_write[cpu %d]: lba = %llu, t = %ld.\n",
smp_processor_id(), (unsigned long long)lba, jiffies)); smp_processor_id(), (unsigned long long)lba, jiffies));
if ((!(dev->raw_io_interface) || !(dev->raw_io_64)) if (aac_adapter_bounds(dev,scsicmd,lba))
&& (lba & 0xffffffff00000000LL)) {
dprintk((KERN_DEBUG "aac_write: Illegal lba\n"));
scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8 | SAM_STAT_CHECK_CONDITION;
set_sense((u8 *) &dev->fsa_dev[cid].sense_data,
HARDWARE_ERROR,
SENCODE_INTERNAL_TARGET_FAILURE,
ASENCODE_INTERNAL_TARGET_FAILURE, 0, 0,
0, 0);
memcpy(scsicmd->sense_buffer, &dev->fsa_dev[cid].sense_data,
(sizeof(dev->fsa_dev[cid].sense_data) > sizeof(scsicmd->sense_buffer))
? sizeof(scsicmd->sense_buffer)
: sizeof(dev->fsa_dev[cid].sense_data));
scsicmd->scsi_done(scsicmd);
return 0; return 0;
}
/* /*
* Allocate and initialize a Fib then setup a BlockWrite command * Allocate and initialize a Fib then setup a BlockWrite command
*/ */
@ -1251,85 +1460,8 @@ static int aac_write(struct scsi_cmnd * scsicmd, int cid)
scsicmd->scsi_done(scsicmd); scsicmd->scsi_done(scsicmd);
return 0; return 0;
} }
aac_fib_init(cmd_fibcontext);
if (dev->raw_io_interface) { status = aac_adapter_write(cmd_fibcontext, scsicmd, lba, count);
struct aac_raw_io *writecmd;
writecmd = (struct aac_raw_io *) fib_data(cmd_fibcontext);
writecmd->block[0] = cpu_to_le32((u32)(lba&0xffffffff));
writecmd->block[1] = cpu_to_le32((u32)((lba&0xffffffff00000000LL)>>32));
writecmd->count = cpu_to_le32(count<<9);
writecmd->cid = cpu_to_le16(cid);
writecmd->flags = 0;
writecmd->bpTotal = 0;
writecmd->bpComplete = 0;
aac_build_sgraw(scsicmd, &writecmd->sg);
fibsize = sizeof(struct aac_raw_io) + ((le32_to_cpu(writecmd->sg.count) - 1) * sizeof (struct sgentryraw));
BUG_ON(fibsize > (dev->max_fib_size - sizeof(struct aac_fibhdr)));
/*
* Now send the Fib to the adapter
*/
status = aac_fib_send(ContainerRawIo,
cmd_fibcontext,
fibsize,
FsaNormal,
0, 1,
(fib_callback) io_callback,
(void *) scsicmd);
} else if (dev->dac_support == 1) {
struct aac_write64 *writecmd;
writecmd = (struct aac_write64 *) fib_data(cmd_fibcontext);
writecmd->command = cpu_to_le32(VM_CtHostWrite64);
writecmd->cid = cpu_to_le16(cid);
writecmd->sector_count = cpu_to_le16(count);
writecmd->block = cpu_to_le32((u32)(lba&0xffffffff));
writecmd->pad = 0;
writecmd->flags = 0;
aac_build_sg64(scsicmd, &writecmd->sg);
fibsize = sizeof(struct aac_write64) +
((le32_to_cpu(writecmd->sg.count) - 1) *
sizeof (struct sgentry64));
BUG_ON (fibsize > (dev->max_fib_size -
sizeof(struct aac_fibhdr)));
/*
* Now send the Fib to the adapter
*/
status = aac_fib_send(ContainerCommand64,
cmd_fibcontext,
fibsize,
FsaNormal,
0, 1,
(fib_callback) io_callback,
(void *) scsicmd);
} else {
struct aac_write *writecmd;
writecmd = (struct aac_write *) fib_data(cmd_fibcontext);
writecmd->command = cpu_to_le32(VM_CtBlockWrite);
writecmd->cid = cpu_to_le32(cid);
writecmd->block = cpu_to_le32((u32)(lba&0xffffffff));
writecmd->count = cpu_to_le32(count * 512);
writecmd->sg.count = cpu_to_le32(1);
/* ->stable is not used - it did mean which type of write */
aac_build_sg(scsicmd, &writecmd->sg);
fibsize = sizeof(struct aac_write) +
((le32_to_cpu(writecmd->sg.count) - 1) *
sizeof (struct sgentry));
BUG_ON (fibsize > (dev->max_fib_size -
sizeof(struct aac_fibhdr)));
/*
* Now send the Fib to the adapter
*/
status = aac_fib_send(ContainerCommand,
cmd_fibcontext,
fibsize,
FsaNormal,
0, 1,
(fib_callback) io_callback,
(void *) scsicmd);
}
/* /*
* Check that the command queued to the controller * Check that the command queued to the controller
@ -2099,10 +2231,6 @@ static int aac_send_srb_fib(struct scsi_cmnd* scsicmd)
struct fib* cmd_fibcontext; struct fib* cmd_fibcontext;
struct aac_dev* dev; struct aac_dev* dev;
int status; int status;
struct aac_srb *srbcmd;
u16 fibsize;
u32 flag;
u32 timeout;
dev = (struct aac_dev *)scsicmd->device->host->hostdata; dev = (struct aac_dev *)scsicmd->device->host->hostdata;
if (scmd_id(scsicmd) >= dev->maximum_num_physicals || if (scmd_id(scsicmd) >= dev->maximum_num_physicals ||
@ -2112,88 +2240,14 @@ static int aac_send_srb_fib(struct scsi_cmnd* scsicmd)
return 0; return 0;
} }
switch(scsicmd->sc_data_direction){
case DMA_TO_DEVICE:
flag = SRB_DataOut;
break;
case DMA_BIDIRECTIONAL:
flag = SRB_DataIn | SRB_DataOut;
break;
case DMA_FROM_DEVICE:
flag = SRB_DataIn;
break;
case DMA_NONE:
default: /* shuts up some versions of gcc */
flag = SRB_NoDataXfer;
break;
}
/* /*
* Allocate and initialize a Fib then setup a BlockWrite command * Allocate and initialize a Fib then setup a BlockWrite command
*/ */
if (!(cmd_fibcontext = aac_fib_alloc(dev))) { if (!(cmd_fibcontext = aac_fib_alloc(dev))) {
return -1; return -1;
} }
aac_fib_init(cmd_fibcontext); status = aac_adapter_scsi(cmd_fibcontext, scsicmd);
srbcmd = (struct aac_srb*) fib_data(cmd_fibcontext);
srbcmd->function = cpu_to_le32(SRBF_ExecuteScsi);
srbcmd->channel = cpu_to_le32(aac_logical_to_phys(scmd_channel(scsicmd)));
srbcmd->id = cpu_to_le32(scmd_id(scsicmd));
srbcmd->lun = cpu_to_le32(scsicmd->device->lun);
srbcmd->flags = cpu_to_le32(flag);
timeout = scsicmd->timeout_per_command/HZ;
if(timeout == 0){
timeout = 1;
}
srbcmd->timeout = cpu_to_le32(timeout); // timeout in seconds
srbcmd->retry_limit = 0; /* Obsolete parameter */
srbcmd->cdb_size = cpu_to_le32(scsicmd->cmd_len);
if( dev->dac_support == 1 ) {
aac_build_sg64(scsicmd, (struct sgmap64*) &srbcmd->sg);
srbcmd->count = cpu_to_le32(scsicmd->request_bufflen);
memset(srbcmd->cdb, 0, sizeof(srbcmd->cdb));
memcpy(srbcmd->cdb, scsicmd->cmnd, scsicmd->cmd_len);
/*
* Build Scatter/Gather list
*/
fibsize = sizeof (struct aac_srb) - sizeof (struct sgentry) +
((le32_to_cpu(srbcmd->sg.count) & 0xff) *
sizeof (struct sgentry64));
BUG_ON (fibsize > (dev->max_fib_size -
sizeof(struct aac_fibhdr)));
/*
* Now send the Fib to the adapter
*/
status = aac_fib_send(ScsiPortCommand64, cmd_fibcontext,
fibsize, FsaNormal, 0, 1,
(fib_callback) aac_srb_callback,
(void *) scsicmd);
} else {
aac_build_sg(scsicmd, (struct sgmap*)&srbcmd->sg);
srbcmd->count = cpu_to_le32(scsicmd->request_bufflen);
memset(srbcmd->cdb, 0, sizeof(srbcmd->cdb));
memcpy(srbcmd->cdb, scsicmd->cmnd, scsicmd->cmd_len);
/*
* Build Scatter/Gather list
*/
fibsize = sizeof (struct aac_srb) +
(((le32_to_cpu(srbcmd->sg.count) & 0xff) - 1) *
sizeof (struct sgentry));
BUG_ON (fibsize > (dev->max_fib_size -
sizeof(struct aac_fibhdr)));
/*
* Now send the Fib to the adapter
*/
status = aac_fib_send(ScsiPortCommand, cmd_fibcontext, fibsize, FsaNormal, 0, 1,
(fib_callback) aac_srb_callback, (void *) scsicmd);
}
/* /*
* Check that the command queued to the controller * Check that the command queued to the controller
*/ */

View file

@ -5,6 +5,7 @@
#define _nblank(x) #x #define _nblank(x) #x
#define nblank(x) _nblank(x)[0] #define nblank(x) _nblank(x)[0]
#include <linux/interrupt.h>
/*------------------------------------------------------------------------------ /*------------------------------------------------------------------------------
* D E F I N E S * D E F I N E S
@ -485,16 +486,28 @@ enum aac_log_level {
struct aac_dev; struct aac_dev;
struct fib; struct fib;
struct scsi_cmnd;
struct adapter_ops struct adapter_ops
{ {
/* Low level operations */
void (*adapter_interrupt)(struct aac_dev *dev); void (*adapter_interrupt)(struct aac_dev *dev);
void (*adapter_notify)(struct aac_dev *dev, u32 event); void (*adapter_notify)(struct aac_dev *dev, u32 event);
void (*adapter_disable_int)(struct aac_dev *dev); void (*adapter_disable_int)(struct aac_dev *dev);
void (*adapter_enable_int)(struct aac_dev *dev);
int (*adapter_sync_cmd)(struct aac_dev *dev, u32 command, u32 p1, u32 p2, u32 p3, u32 p4, u32 p5, u32 p6, u32 *status, u32 *r1, u32 *r2, u32 *r3, u32 *r4); int (*adapter_sync_cmd)(struct aac_dev *dev, u32 command, u32 p1, u32 p2, u32 p3, u32 p4, u32 p5, u32 p6, u32 *status, u32 *r1, u32 *r2, u32 *r3, u32 *r4);
int (*adapter_check_health)(struct aac_dev *dev); int (*adapter_check_health)(struct aac_dev *dev);
int (*adapter_send)(struct fib * fib); /* Transport operations */
int (*adapter_ioremap)(struct aac_dev * dev, u32 size); int (*adapter_ioremap)(struct aac_dev * dev, u32 size);
irqreturn_t (*adapter_intr)(int irq, void *dev_id);
/* Packet operations */
int (*adapter_deliver)(struct fib * fib);
int (*adapter_bounds)(struct aac_dev * dev, struct scsi_cmnd * cmd, u64 lba);
int (*adapter_read)(struct fib * fib, struct scsi_cmnd * cmd, u64 lba, u32 count);
int (*adapter_write)(struct fib * fib, struct scsi_cmnd * cmd, u64 lba, u32 count);
int (*adapter_scsi)(struct fib * fib, struct scsi_cmnd * cmd);
/* Administrative operations */
int (*adapter_comm)(struct aac_dev * dev, int comm);
}; };
/* /*
@ -1018,7 +1031,9 @@ struct aac_dev
u8 nondasd_support; u8 nondasd_support;
u8 dac_support; u8 dac_support;
u8 raid_scsi_mode; u8 raid_scsi_mode;
u8 new_comm_interface; u8 comm_interface;
# define AAC_COMM_PRODUCER 0
# define AAC_COMM_MESSAGE 1
/* macro side-effects BEWARE */ /* macro side-effects BEWARE */
# define raw_io_interface \ # define raw_io_interface \
init->InitStructRevision==cpu_to_le32(ADAPTER_INIT_STRUCT_REVISION_4) init->InitStructRevision==cpu_to_le32(ADAPTER_INIT_STRUCT_REVISION_4)
@ -1036,18 +1051,36 @@ struct aac_dev
#define aac_adapter_disable_int(dev) \ #define aac_adapter_disable_int(dev) \
(dev)->a_ops.adapter_disable_int(dev) (dev)->a_ops.adapter_disable_int(dev)
#define aac_adapter_enable_int(dev) \
(dev)->a_ops.adapter_enable_int(dev)
#define aac_adapter_sync_cmd(dev, command, p1, p2, p3, p4, p5, p6, status, r1, r2, r3, r4) \ #define aac_adapter_sync_cmd(dev, command, p1, p2, p3, p4, p5, p6, status, r1, r2, r3, r4) \
(dev)->a_ops.adapter_sync_cmd(dev, command, p1, p2, p3, p4, p5, p6, status, r1, r2, r3, r4) (dev)->a_ops.adapter_sync_cmd(dev, command, p1, p2, p3, p4, p5, p6, status, r1, r2, r3, r4)
#define aac_adapter_check_health(dev) \ #define aac_adapter_check_health(dev) \
(dev)->a_ops.adapter_check_health(dev) (dev)->a_ops.adapter_check_health(dev)
#define aac_adapter_send(fib) \
((fib)->dev)->a_ops.adapter_send(fib)
#define aac_adapter_ioremap(dev, size) \ #define aac_adapter_ioremap(dev, size) \
(dev)->a_ops.adapter_ioremap(dev, size) (dev)->a_ops.adapter_ioremap(dev, size)
#define aac_adapter_deliver(fib) \
((fib)->dev)->a_ops.adapter_deliver(fib)
#define aac_adapter_bounds(dev,cmd,lba) \
dev->a_ops.adapter_bounds(dev,cmd,lba)
#define aac_adapter_read(fib,cmd,lba,count) \
((fib)->dev)->a_ops.adapter_read(fib,cmd,lba,count)
#define aac_adapter_write(fib,cmd,lba,count) \
((fib)->dev)->a_ops.adapter_write(fib,cmd,lba,count)
#define aac_adapter_scsi(fib,cmd) \
((fib)->dev)->a_ops.adapter_scsi(fib,cmd)
#define aac_adapter_comm(dev,comm) \
(dev)->a_ops.adapter_comm(dev, comm)
#define FIB_CONTEXT_FLAG_TIMED_OUT (0x00000001) #define FIB_CONTEXT_FLAG_TIMED_OUT (0x00000001)
/* /*
@ -1767,7 +1800,6 @@ static inline u32 cap_to_cyls(sector_t capacity, u32 divisor)
return (u32)capacity; return (u32)capacity;
} }
struct scsi_cmnd;
/* SCp.phase values */ /* SCp.phase values */
#define AAC_OWNER_MIDLEVEL 0x101 #define AAC_OWNER_MIDLEVEL 0x101
#define AAC_OWNER_LOWLEVEL 0x102 #define AAC_OWNER_LOWLEVEL 0x102
@ -1794,7 +1826,9 @@ int aac_dev_ioctl(struct aac_dev *dev, int cmd, void __user *arg);
int aac_do_ioctl(struct aac_dev * dev, int cmd, void __user *arg); int aac_do_ioctl(struct aac_dev * dev, int cmd, void __user *arg);
int aac_rx_init(struct aac_dev *dev); int aac_rx_init(struct aac_dev *dev);
int aac_rkt_init(struct aac_dev *dev); int aac_rkt_init(struct aac_dev *dev);
int aac_nark_init(struct aac_dev *dev);
int aac_sa_init(struct aac_dev *dev); int aac_sa_init(struct aac_dev *dev);
int aac_queue_get(struct aac_dev * dev, u32 * index, u32 qid, struct hw_fib * hw_fib, int wait, struct fib * fibptr, unsigned long *nonotify);
unsigned int aac_response_normal(struct aac_queue * q); unsigned int aac_response_normal(struct aac_queue * q);
unsigned int aac_command_normal(struct aac_queue * q); unsigned int aac_command_normal(struct aac_queue * q);
unsigned int aac_intr_normal(struct aac_dev * dev, u32 Index); unsigned int aac_intr_normal(struct aac_dev * dev, u32 Index);

View file

@ -95,7 +95,7 @@ static int aac_alloc_comm(struct aac_dev *dev, void **commaddr, unsigned long co
init->HostPhysMemPages = cpu_to_le32(AAC_MAX_HOSTPHYSMEMPAGES); init->HostPhysMemPages = cpu_to_le32(AAC_MAX_HOSTPHYSMEMPAGES);
init->InitFlags = 0; init->InitFlags = 0;
if (dev->new_comm_interface) { if (dev->comm_interface == AAC_COMM_MESSAGE) {
init->InitFlags = cpu_to_le32(INITFLAGS_NEW_COMM_SUPPORTED); init->InitFlags = cpu_to_le32(INITFLAGS_NEW_COMM_SUPPORTED);
dprintk((KERN_WARNING"aacraid: New Comm Interface enabled\n")); dprintk((KERN_WARNING"aacraid: New Comm Interface enabled\n"));
} }
@ -297,21 +297,23 @@ struct aac_dev *aac_init_adapter(struct aac_dev *dev)
- sizeof(struct aac_fibhdr) - sizeof(struct aac_fibhdr)
- sizeof(struct aac_write) + sizeof(struct sgentry)) - sizeof(struct aac_write) + sizeof(struct sgentry))
/ sizeof(struct sgentry); / sizeof(struct sgentry);
dev->new_comm_interface = 0; dev->comm_interface = AAC_COMM_PRODUCER;
dev->raw_io_64 = 0; dev->raw_io_64 = 0;
if ((!aac_adapter_sync_cmd(dev, GET_ADAPTER_PROPERTIES, if ((!aac_adapter_sync_cmd(dev, GET_ADAPTER_PROPERTIES,
0, 0, 0, 0, 0, 0, status+0, status+1, status+2, NULL, NULL)) && 0, 0, 0, 0, 0, 0, status+0, status+1, status+2, NULL, NULL)) &&
(status[0] == 0x00000001)) { (status[0] == 0x00000001)) {
if (status[1] & AAC_OPT_NEW_COMM_64) if (status[1] & AAC_OPT_NEW_COMM_64)
dev->raw_io_64 = 1; dev->raw_io_64 = 1;
if (status[1] & AAC_OPT_NEW_COMM) if (dev->a_ops.adapter_comm &&
dev->new_comm_interface = dev->a_ops.adapter_send != 0; (status[1] & AAC_OPT_NEW_COMM))
if (dev->new_comm_interface && (status[2] > dev->base_size)) { dev->comm_interface = AAC_COMM_MESSAGE;
if ((dev->comm_interface == AAC_COMM_MESSAGE) &&
(status[2] > dev->base_size)) {
aac_adapter_ioremap(dev, 0); aac_adapter_ioremap(dev, 0);
dev->base_size = status[2]; dev->base_size = status[2];
if (aac_adapter_ioremap(dev, status[2])) { if (aac_adapter_ioremap(dev, status[2])) {
/* remap failed, go back ... */ /* remap failed, go back ... */
dev->new_comm_interface = 0; dev->comm_interface = AAC_COMM_PRODUCER;
if (aac_adapter_ioremap(dev, AAC_MIN_FOOTPRINT_SIZE)) { if (aac_adapter_ioremap(dev, AAC_MIN_FOOTPRINT_SIZE)) {
printk(KERN_WARNING printk(KERN_WARNING
"aacraid: unable to map adapter.\n"); "aacraid: unable to map adapter.\n");

View file

@ -317,7 +317,7 @@ static int aac_get_entry (struct aac_dev * dev, u32 qid, struct aac_entry **entr
* success. * success.
*/ */
static int aac_queue_get(struct aac_dev * dev, u32 * index, u32 qid, struct hw_fib * hw_fib, int wait, struct fib * fibptr, unsigned long *nonotify) int aac_queue_get(struct aac_dev * dev, u32 * index, u32 qid, struct hw_fib * hw_fib, int wait, struct fib * fibptr, unsigned long *nonotify)
{ {
struct aac_entry * entry = NULL; struct aac_entry * entry = NULL;
int map = 0; int map = 0;
@ -387,7 +387,6 @@ int aac_fib_send(u16 command, struct fib *fibptr, unsigned long size,
{ {
struct aac_dev * dev = fibptr->dev; struct aac_dev * dev = fibptr->dev;
struct hw_fib * hw_fib = fibptr->hw_fib; struct hw_fib * hw_fib = fibptr->hw_fib;
struct aac_queue * q;
unsigned long flags = 0; unsigned long flags = 0;
unsigned long qflags; unsigned long qflags;
@ -469,38 +468,10 @@ int aac_fib_send(u16 command, struct fib *fibptr, unsigned long size,
if (!dev->queues) if (!dev->queues)
return -EBUSY; return -EBUSY;
q = &dev->queues->queue[AdapNormCmdQueue];
if(wait) if(wait)
spin_lock_irqsave(&fibptr->event_lock, flags); spin_lock_irqsave(&fibptr->event_lock, flags);
spin_lock_irqsave(q->lock, qflags); aac_adapter_deliver(fibptr);
if (dev->new_comm_interface) {
unsigned long count = 10000000L; /* 50 seconds */
q->numpending++;
spin_unlock_irqrestore(q->lock, qflags);
while (aac_adapter_send(fibptr) != 0) {
if (--count == 0) {
if (wait)
spin_unlock_irqrestore(&fibptr->event_lock, flags);
spin_lock_irqsave(q->lock, qflags);
q->numpending--;
spin_unlock_irqrestore(q->lock, qflags);
return -ETIMEDOUT;
}
udelay(5);
}
} else {
u32 index;
unsigned long nointr = 0;
aac_queue_get( dev, &index, AdapNormCmdQueue, hw_fib, 1, fibptr, &nointr);
q->numpending++;
*(q->headers.producer) = cpu_to_le32(index + 1);
spin_unlock_irqrestore(q->lock, qflags);
dprintk((KERN_DEBUG "aac_fib_send: inserting a queue entry at index %d.\n",index));
if (!(nointr & aac_config.irq_mod))
aac_adapter_notify(dev, AdapNormCmdQueue);
}
/* /*
* If the caller wanted us to wait for response wait now. * If the caller wanted us to wait for response wait now.
@ -520,6 +491,7 @@ int aac_fib_send(u16 command, struct fib *fibptr, unsigned long size,
while (down_trylock(&fibptr->event_wait)) { while (down_trylock(&fibptr->event_wait)) {
int blink; int blink;
if (--count == 0) { if (--count == 0) {
struct aac_queue * q = &dev->queues->queue[AdapNormCmdQueue];
spin_lock_irqsave(q->lock, qflags); spin_lock_irqsave(q->lock, qflags);
q->numpending--; q->numpending--;
spin_unlock_irqrestore(q->lock, qflags); spin_unlock_irqrestore(q->lock, qflags);
@ -659,7 +631,7 @@ int aac_fib_adapter_complete(struct fib *fibptr, unsigned short size)
unsigned long qflags; unsigned long qflags;
if (hw_fib->header.XferState == 0) { if (hw_fib->header.XferState == 0) {
if (dev->new_comm_interface) if (dev->comm_interface == AAC_COMM_MESSAGE)
kfree (hw_fib); kfree (hw_fib);
return 0; return 0;
} }
@ -667,7 +639,7 @@ int aac_fib_adapter_complete(struct fib *fibptr, unsigned short size)
* If we plan to do anything check the structure type first. * If we plan to do anything check the structure type first.
*/ */
if ( hw_fib->header.StructType != FIB_MAGIC ) { if ( hw_fib->header.StructType != FIB_MAGIC ) {
if (dev->new_comm_interface) if (dev->comm_interface == AAC_COMM_MESSAGE)
kfree (hw_fib); kfree (hw_fib);
return -EINVAL; return -EINVAL;
} }
@ -679,7 +651,7 @@ int aac_fib_adapter_complete(struct fib *fibptr, unsigned short size)
* send the completed cdb to the adapter. * send the completed cdb to the adapter.
*/ */
if (hw_fib->header.XferState & cpu_to_le32(SentFromAdapter)) { if (hw_fib->header.XferState & cpu_to_le32(SentFromAdapter)) {
if (dev->new_comm_interface) { if (dev->comm_interface == AAC_COMM_MESSAGE) {
kfree (hw_fib); kfree (hw_fib);
} else { } else {
u32 index; u32 index;

View file

@ -157,6 +157,7 @@ static struct pci_device_id aac_pci_tbl[] = {
{ 0x9005, 0x0285, 0x17aa, PCI_ANY_ID, 0, 0, 58 }, /* Legend Catchall */ { 0x9005, 0x0285, 0x17aa, PCI_ANY_ID, 0, 0, 58 }, /* Legend Catchall */
{ 0x9005, 0x0285, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 59 }, /* Adaptec Catch All */ { 0x9005, 0x0285, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 59 }, /* Adaptec Catch All */
{ 0x9005, 0x0286, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 60 }, /* Adaptec Rocket Catch All */ { 0x9005, 0x0286, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 60 }, /* Adaptec Rocket Catch All */
{ 0x9005, 0x0288, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 61 }, /* Adaptec NEMER/ARK Catch All */
{ 0,} { 0,}
}; };
MODULE_DEVICE_TABLE(pci, aac_pci_tbl); MODULE_DEVICE_TABLE(pci, aac_pci_tbl);
@ -230,7 +231,8 @@ static struct aac_driver_ident aac_drivers[] = {
{ aac_rx_init, "aacraid", "DELL ", "RAID ", 2, AAC_QUIRK_31BIT | AAC_QUIRK_34SG }, /* Dell Catchall */ { aac_rx_init, "aacraid", "DELL ", "RAID ", 2, AAC_QUIRK_31BIT | AAC_QUIRK_34SG }, /* Dell Catchall */
{ aac_rx_init, "aacraid", "Legend ", "RAID ", 2, AAC_QUIRK_31BIT | AAC_QUIRK_34SG }, /* Legend Catchall */ { aac_rx_init, "aacraid", "Legend ", "RAID ", 2, AAC_QUIRK_31BIT | AAC_QUIRK_34SG }, /* Legend Catchall */
{ aac_rx_init, "aacraid", "ADAPTEC ", "RAID ", 2, AAC_QUIRK_31BIT | AAC_QUIRK_34SG }, /* Adaptec Catch All */ { aac_rx_init, "aacraid", "ADAPTEC ", "RAID ", 2, AAC_QUIRK_31BIT | AAC_QUIRK_34SG }, /* Adaptec Catch All */
{ aac_rkt_init, "aacraid", "ADAPTEC ", "RAID ", 2 } /* Adaptec Rocket Catch All */ { aac_rkt_init, "aacraid", "ADAPTEC ", "RAID ", 2 }, /* Adaptec Rocket Catch All */
{ aac_nark_init, "aacraid", "ADAPTEC ", "RAID ", 2 } /* Adaptec NEMER/ARK Catch All */
}; };
/** /**
@ -396,11 +398,15 @@ static int aac_slave_configure(struct scsi_device *sdev)
sdev->skip_ms_page_3f = 1; sdev->skip_ms_page_3f = 1;
} }
if ((sdev->type == TYPE_DISK) && if ((sdev->type == TYPE_DISK) &&
!expose_physicals &&
(sdev_channel(sdev) != CONTAINER_CHANNEL)) { (sdev_channel(sdev) != CONTAINER_CHANNEL)) {
struct aac_dev *aac = (struct aac_dev *)sdev->host->hostdata; if (expose_physicals == 0)
if (!aac->raid_scsi_mode || (sdev_channel(sdev) != 2)) return -ENXIO;
sdev->no_uld_attach = 1; if (expose_physicals < 0) {
struct aac_dev *aac =
(struct aac_dev *)sdev->host->hostdata;
if (!aac->raid_scsi_mode || (sdev_channel(sdev) != 2))
sdev->no_uld_attach = 1;
}
} }
if (sdev->tagged_supported && (sdev->type == TYPE_DISK) && if (sdev->tagged_supported && (sdev->type == TYPE_DISK) &&
(sdev_channel(sdev) == CONTAINER_CHANNEL)) { (sdev_channel(sdev) == CONTAINER_CHANNEL)) {
@ -804,7 +810,6 @@ static struct scsi_host_template aac_driver_template = {
.emulated = 1, .emulated = 1,
}; };
static int __devinit aac_probe_one(struct pci_dev *pdev, static int __devinit aac_probe_one(struct pci_dev *pdev,
const struct pci_device_id *id) const struct pci_device_id *id)
{ {

View file

@ -0,0 +1,87 @@
/*
* Adaptec AAC series RAID controller driver
* (c) Copyright 2001 Red Hat Inc. <alan@redhat.com>
*
* based on the old aacraid driver that is..
* Adaptec aacraid device driver for Linux.
*
* Copyright (c) 2000 Adaptec, Inc. (aacraid@adaptec.com)
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2, or (at your option)
* any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; see the file COPYING. If not, write to
* the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
*
* Module Name:
* nark.c
*
* Abstract: Hardware Device Interface for NEMER/ARK
*
*/
#include <linux/pci.h>
#include <linux/blkdev.h>
#include <scsi/scsi_host.h>
#include "aacraid.h"
/**
* aac_nark_ioremap
* @size: mapping resize request
*
*/
static int aac_nark_ioremap(struct aac_dev * dev, u32 size)
{
if (!size) {
iounmap(dev->regs.rx);
dev->regs.rx = NULL;
iounmap(dev->base);
dev->base = NULL;
return 0;
}
dev->scsi_host_ptr->base = pci_resource_start(dev->pdev, 2);
dev->regs.rx = ioremap((u64)pci_resource_start(dev->pdev, 0) |
((u64)pci_resource_start(dev->pdev, 1) << 32),
sizeof(struct rx_registers) - sizeof(struct rx_inbound));
dev->base = NULL;
if (dev->regs.rx == NULL)
return -1;
dev->base = ioremap(dev->scsi_host_ptr->base, size);
if (dev->base == NULL) {
iounmap(dev->regs.rx);
dev->regs.rx = NULL;
return -1;
}
dev->IndexRegs = &((struct rx_registers __iomem *)dev->base)->IndexRegs;
return 0;
}
/**
* aac_nark_init - initialize an NEMER/ARK Split Bar card
* @dev: device to configure
*
*/
int aac_nark_init(struct aac_dev * dev)
{
extern int _aac_rx_init(struct aac_dev *dev);
extern int aac_rx_select_comm(struct aac_dev *dev, int comm);
/*
* Fill in the function dispatch table.
*/
dev->a_ops.adapter_ioremap = aac_nark_ioremap;
dev->a_ops.adapter_comm = aac_rx_select_comm;
return _aac_rx_init(dev);
}

View file

@ -34,6 +34,40 @@
#include "aacraid.h" #include "aacraid.h"
#define AAC_NUM_IO_FIB_RKT (246 - AAC_NUM_MGT_FIB)
/**
* aac_rkt_select_comm - Select communications method
* @dev: Adapter
* @comm: communications method
*/
static int aac_rkt_select_comm(struct aac_dev *dev, int comm)
{
int retval;
extern int aac_rx_select_comm(struct aac_dev *dev, int comm);
retval = aac_rx_select_comm(dev, comm);
if (comm == AAC_COMM_MESSAGE) {
/*
* FIB Setup has already been done, but we can minimize the
* damage by at least ensuring the OS never issues more
* commands than we can handle. The Rocket adapters currently
* can only handle 246 commands and 8 AIFs at the same time,
* and in fact do notify us accordingly if we negotiate the
* FIB size. The problem that causes us to add this check is
* to ensure that we do not overdo it with the adapter when a
* hard coded FIB override is being utilized. This special
* case warrants this half baked, but convenient, check here.
*/
if (dev->scsi_host_ptr->can_queue > AAC_NUM_IO_FIB_RKT) {
dev->init->MaxIoCommands =
cpu_to_le32(AAC_NUM_IO_FIB_RKT + AAC_NUM_MGT_FIB);
dev->scsi_host_ptr->can_queue = AAC_NUM_IO_FIB_RKT;
}
}
return retval;
}
/** /**
* aac_rkt_ioremap * aac_rkt_ioremap
* @size: mapping resize request * @size: mapping resize request
@ -63,39 +97,13 @@ static int aac_rkt_ioremap(struct aac_dev * dev, u32 size)
int aac_rkt_init(struct aac_dev *dev) int aac_rkt_init(struct aac_dev *dev)
{ {
int retval;
extern int _aac_rx_init(struct aac_dev *dev); extern int _aac_rx_init(struct aac_dev *dev);
extern void aac_rx_start_adapter(struct aac_dev *dev);
/* /*
* Fill in the function dispatch table. * Fill in the function dispatch table.
*/ */
dev->a_ops.adapter_ioremap = aac_rkt_ioremap; dev->a_ops.adapter_ioremap = aac_rkt_ioremap;
dev->a_ops.adapter_comm = aac_rkt_select_comm;
retval = _aac_rx_init(dev); return _aac_rx_init(dev);
if (retval)
return retval;
if (dev->new_comm_interface) {
/*
* FIB Setup has already been done, but we can minimize the
* damage by at least ensuring the OS never issues more
* commands than we can handle. The Rocket adapters currently
* can only handle 246 commands and 8 AIFs at the same time,
* and in fact do notify us accordingly if we negotiate the
* FIB size. The problem that causes us to add this check is
* to ensure that we do not overdo it with the adapter when a
* hard coded FIB override is being utilized. This special
* case warrants this half baked, but convenient, check here.
*/
if (dev->scsi_host_ptr->can_queue > (246 - AAC_NUM_MGT_FIB)) {
dev->init->MaxIoCommands = cpu_to_le32(246);
dev->scsi_host_ptr->can_queue = 246 - AAC_NUM_MGT_FIB;
}
}
/*
* Tell the adapter that all is configured, and it can start
* accepting requests
*/
aac_rx_start_adapter(dev);
return 0;
} }

View file

@ -46,60 +46,60 @@
#include "aacraid.h" #include "aacraid.h"
static irqreturn_t aac_rx_intr(int irq, void *dev_id) static irqreturn_t aac_rx_intr_producer(int irq, void *dev_id)
{ {
struct aac_dev *dev = dev_id; struct aac_dev *dev = dev_id;
unsigned long bellbits;
u8 intstat = rx_readb(dev, MUnit.OISR);
dprintk((KERN_DEBUG "aac_rx_intr(%d,%p)\n", irq, dev_id)); /*
if (dev->new_comm_interface) { * Read mask and invert because drawbridge is reversed.
u32 Index = rx_readl(dev, MUnit.OutboundQueue); * This allows us to only service interrupts that have
if (Index == 0xFFFFFFFFL) * been enabled.
* Check to see if this is our interrupt. If it isn't just return
*/
if (intstat & ~(dev->OIMR)) {
bellbits = rx_readl(dev, OutboundDoorbellReg);
if (bellbits & DoorBellPrintfReady) {
aac_printf(dev, readl (&dev->IndexRegs->Mailbox[5]));
rx_writel(dev, MUnit.ODR,DoorBellPrintfReady);
rx_writel(dev, InboundDoorbellReg,DoorBellPrintfDone);
}
else if (bellbits & DoorBellAdapterNormCmdReady) {
rx_writel(dev, MUnit.ODR, DoorBellAdapterNormCmdReady);
aac_command_normal(&dev->queues->queue[HostNormCmdQueue]);
}
else if (bellbits & DoorBellAdapterNormRespReady) {
rx_writel(dev, MUnit.ODR,DoorBellAdapterNormRespReady);
aac_response_normal(&dev->queues->queue[HostNormRespQueue]);
}
else if (bellbits & DoorBellAdapterNormCmdNotFull) {
rx_writel(dev, MUnit.ODR, DoorBellAdapterNormCmdNotFull);
}
else if (bellbits & DoorBellAdapterNormRespNotFull) {
rx_writel(dev, MUnit.ODR, DoorBellAdapterNormCmdNotFull);
rx_writel(dev, MUnit.ODR, DoorBellAdapterNormRespNotFull);
}
return IRQ_HANDLED;
}
return IRQ_NONE;
}
static irqreturn_t aac_rx_intr_message(int irq, void *dev_id)
{
struct aac_dev *dev = dev_id;
u32 Index = rx_readl(dev, MUnit.OutboundQueue);
if (Index == 0xFFFFFFFFL)
Index = rx_readl(dev, MUnit.OutboundQueue);
if (Index != 0xFFFFFFFFL) {
do {
if (aac_intr_normal(dev, Index)) {
rx_writel(dev, MUnit.OutboundQueue, Index);
rx_writel(dev, MUnit.ODR, DoorBellAdapterNormRespReady);
}
Index = rx_readl(dev, MUnit.OutboundQueue); Index = rx_readl(dev, MUnit.OutboundQueue);
if (Index != 0xFFFFFFFFL) { } while (Index != 0xFFFFFFFFL);
do { return IRQ_HANDLED;
if (aac_intr_normal(dev, Index)) {
rx_writel(dev, MUnit.OutboundQueue, Index);
rx_writel(dev, MUnit.ODR, DoorBellAdapterNormRespReady);
}
Index = rx_readl(dev, MUnit.OutboundQueue);
} while (Index != 0xFFFFFFFFL);
return IRQ_HANDLED;
}
} else {
unsigned long bellbits;
u8 intstat;
intstat = rx_readb(dev, MUnit.OISR);
/*
* Read mask and invert because drawbridge is reversed.
* This allows us to only service interrupts that have
* been enabled.
* Check to see if this is our interrupt. If it isn't just return
*/
if (intstat & ~(dev->OIMR))
{
bellbits = rx_readl(dev, OutboundDoorbellReg);
if (bellbits & DoorBellPrintfReady) {
aac_printf(dev, readl (&dev->IndexRegs->Mailbox[5]));
rx_writel(dev, MUnit.ODR,DoorBellPrintfReady);
rx_writel(dev, InboundDoorbellReg,DoorBellPrintfDone);
}
else if (bellbits & DoorBellAdapterNormCmdReady) {
rx_writel(dev, MUnit.ODR, DoorBellAdapterNormCmdReady);
aac_command_normal(&dev->queues->queue[HostNormCmdQueue]);
}
else if (bellbits & DoorBellAdapterNormRespReady) {
rx_writel(dev, MUnit.ODR,DoorBellAdapterNormRespReady);
aac_response_normal(&dev->queues->queue[HostNormRespQueue]);
}
else if (bellbits & DoorBellAdapterNormCmdNotFull) {
rx_writel(dev, MUnit.ODR, DoorBellAdapterNormCmdNotFull);
}
else if (bellbits & DoorBellAdapterNormRespNotFull) {
rx_writel(dev, MUnit.ODR, DoorBellAdapterNormCmdNotFull);
rx_writel(dev, MUnit.ODR, DoorBellAdapterNormRespNotFull);
}
return IRQ_HANDLED;
}
} }
return IRQ_NONE; return IRQ_NONE;
} }
@ -114,6 +114,26 @@ static void aac_rx_disable_interrupt(struct aac_dev *dev)
rx_writeb(dev, MUnit.OIMR, dev->OIMR = 0xff); rx_writeb(dev, MUnit.OIMR, dev->OIMR = 0xff);
} }
/**
* aac_rx_enable_interrupt_producer - Enable interrupts
* @dev: Adapter
*/
static void aac_rx_enable_interrupt_producer(struct aac_dev *dev)
{
rx_writeb(dev, MUnit.OIMR, dev->OIMR = 0xfb);
}
/**
* aac_rx_enable_interrupt_message - Enable interrupts
* @dev: Adapter
*/
static void aac_rx_enable_interrupt_message(struct aac_dev *dev)
{
rx_writeb(dev, MUnit.OIMR, dev->OIMR = 0xf7);
}
/** /**
* rx_sync_cmd - send a command and wait * rx_sync_cmd - send a command and wait
* @dev: Adapter * @dev: Adapter
@ -189,10 +209,7 @@ static int rx_sync_cmd(struct aac_dev *dev, u32 command,
/* /*
* Restore interrupt mask even though we timed out * Restore interrupt mask even though we timed out
*/ */
if (dev->new_comm_interface) aac_adapter_enable_int(dev);
rx_writeb(dev, MUnit.OIMR, dev->OIMR = 0xf7);
else
rx_writeb(dev, MUnit.OIMR, dev->OIMR = 0xfb);
return -ETIMEDOUT; return -ETIMEDOUT;
} }
/* /*
@ -215,10 +232,7 @@ static int rx_sync_cmd(struct aac_dev *dev, u32 command,
/* /*
* Restore interrupt mask * Restore interrupt mask
*/ */
if (dev->new_comm_interface) aac_adapter_enable_int(dev);
rx_writeb(dev, MUnit.OIMR, dev->OIMR = 0xf7);
else
rx_writeb(dev, MUnit.OIMR, dev->OIMR = 0xfb);
return 0; return 0;
} }
@ -360,35 +374,72 @@ static int aac_rx_check_health(struct aac_dev *dev)
} }
/** /**
* aac_rx_send * aac_rx_deliver_producer
* @fib: fib to issue * @fib: fib to issue
* *
* Will send a fib, returning 0 if successful. * Will send a fib, returning 0 if successful.
*/ */
static int aac_rx_send(struct fib * fib) static int aac_rx_deliver_producer(struct fib * fib)
{ {
u64 addr = fib->hw_fib_pa;
struct aac_dev *dev = fib->dev; struct aac_dev *dev = fib->dev;
volatile void __iomem *device = dev->regs.rx; struct aac_queue *q = &dev->queues->queue[AdapNormCmdQueue];
unsigned long qflags;
u32 Index; u32 Index;
unsigned long nointr = 0;
dprintk((KERN_DEBUG "%p->aac_rx_send(%p->%llx)\n", dev, fib, addr)); spin_lock_irqsave(q->lock, qflags);
Index = rx_readl(dev, MUnit.InboundQueue); aac_queue_get( dev, &Index, AdapNormCmdQueue, fib->hw_fib, 1, fib, &nointr);
if (Index == 0xFFFFFFFFL)
q->numpending++;
*(q->headers.producer) = cpu_to_le32(Index + 1);
spin_unlock_irqrestore(q->lock, qflags);
if (!(nointr & aac_config.irq_mod))
aac_adapter_notify(dev, AdapNormCmdQueue);
return 0;
}
/**
* aac_rx_deliver_message
* @fib: fib to issue
*
* Will send a fib, returning 0 if successful.
*/
static int aac_rx_deliver_message(struct fib * fib)
{
struct aac_dev *dev = fib->dev;
struct aac_queue *q = &dev->queues->queue[AdapNormCmdQueue];
unsigned long qflags;
u32 Index;
u64 addr;
volatile void __iomem *device;
unsigned long count = 10000000L; /* 50 seconds */
spin_lock_irqsave(q->lock, qflags);
q->numpending++;
spin_unlock_irqrestore(q->lock, qflags);
for(;;) {
Index = rx_readl(dev, MUnit.InboundQueue); Index = rx_readl(dev, MUnit.InboundQueue);
dprintk((KERN_DEBUG "Index = 0x%x\n", Index)); if (Index == 0xFFFFFFFFL)
if (Index == 0xFFFFFFFFL) Index = rx_readl(dev, MUnit.InboundQueue);
return Index; if (Index != 0xFFFFFFFFL)
break;
if (--count == 0) {
spin_lock_irqsave(q->lock, qflags);
q->numpending--;
spin_unlock_irqrestore(q->lock, qflags);
return -ETIMEDOUT;
}
udelay(5);
}
device = dev->base + Index; device = dev->base + Index;
dprintk((KERN_DEBUG "entry = %x %x %u\n", (u32)(addr & 0xffffffff), addr = fib->hw_fib_pa;
(u32)(addr >> 32), (u32)le16_to_cpu(fib->hw_fib->header.Size)));
writel((u32)(addr & 0xffffffff), device); writel((u32)(addr & 0xffffffff), device);
device += sizeof(u32); device += sizeof(u32);
writel((u32)(addr >> 32), device); writel((u32)(addr >> 32), device);
device += sizeof(u32); device += sizeof(u32);
writel(le16_to_cpu(fib->hw_fib->header.Size), device); writel(le16_to_cpu(fib->hw_fib->header.Size), device);
rx_writel(dev, MUnit.InboundQueue, Index); rx_writel(dev, MUnit.InboundQueue, Index);
dprintk((KERN_DEBUG "aac_rx_send - return 0\n"));
return 0; return 0;
} }
@ -429,6 +480,31 @@ static int aac_rx_restart_adapter(struct aac_dev *dev)
return 0; return 0;
} }
/**
* aac_rx_select_comm - Select communications method
* @dev: Adapter
* @comm: communications method
*/
int aac_rx_select_comm(struct aac_dev *dev, int comm)
{
switch (comm) {
case AAC_COMM_PRODUCER:
dev->a_ops.adapter_enable_int = aac_rx_enable_interrupt_producer;
dev->a_ops.adapter_intr = aac_rx_intr_producer;
dev->a_ops.adapter_deliver = aac_rx_deliver_producer;
break;
case AAC_COMM_MESSAGE:
dev->a_ops.adapter_enable_int = aac_rx_enable_interrupt_message;
dev->a_ops.adapter_intr = aac_rx_intr_message;
dev->a_ops.adapter_deliver = aac_rx_deliver_message;
break;
default:
return 1;
}
return 0;
}
/** /**
* aac_rx_init - initialize an i960 based AAC card * aac_rx_init - initialize an i960 based AAC card
* @dev: device to configure * @dev: device to configure
@ -489,40 +565,42 @@ int _aac_rx_init(struct aac_dev *dev)
} }
msleep(1); msleep(1);
} }
if (request_irq(dev->scsi_host_ptr->irq, aac_rx_intr, IRQF_SHARED|IRQF_DISABLED, "aacraid", (void *)dev)<0)
{
printk(KERN_ERR "%s%d: Interrupt unavailable.\n", name, instance);
goto error_iounmap;
}
/* /*
* Fill in the function dispatch table. * Fill in the common function dispatch table.
*/ */
dev->a_ops.adapter_interrupt = aac_rx_interrupt_adapter; dev->a_ops.adapter_interrupt = aac_rx_interrupt_adapter;
dev->a_ops.adapter_disable_int = aac_rx_disable_interrupt; dev->a_ops.adapter_disable_int = aac_rx_disable_interrupt;
dev->a_ops.adapter_notify = aac_rx_notify_adapter; dev->a_ops.adapter_notify = aac_rx_notify_adapter;
dev->a_ops.adapter_sync_cmd = rx_sync_cmd; dev->a_ops.adapter_sync_cmd = rx_sync_cmd;
dev->a_ops.adapter_check_health = aac_rx_check_health; dev->a_ops.adapter_check_health = aac_rx_check_health;
dev->a_ops.adapter_send = aac_rx_send;
/* /*
* First clear out all interrupts. Then enable the one's that we * First clear out all interrupts. Then enable the one's that we
* can handle. * can handle.
*/ */
rx_writeb(dev, MUnit.OIMR, 0xff); aac_adapter_comm(dev, AAC_COMM_PRODUCER);
aac_adapter_disable_int(dev);
rx_writel(dev, MUnit.ODR, 0xffffffff); rx_writel(dev, MUnit.ODR, 0xffffffff);
rx_writeb(dev, MUnit.OIMR, dev->OIMR = 0xfb); aac_adapter_enable_int(dev);
if (aac_init_adapter(dev) == NULL) if (aac_init_adapter(dev) == NULL)
goto error_irq; goto error_iounmap;
if (dev->new_comm_interface) aac_adapter_comm(dev, dev->comm_interface);
rx_writeb(dev, MUnit.OIMR, dev->OIMR = 0xf7); if (request_irq(dev->scsi_host_ptr->irq, dev->a_ops.adapter_intr,
IRQF_SHARED|IRQF_DISABLED, "aacraid", dev) < 0) {
printk(KERN_ERR "%s%d: Interrupt unavailable.\n",
name, instance);
goto error_iounmap;
}
aac_adapter_enable_int(dev);
/*
* Tell the adapter that all is configured, and it can
* start accepting requests
*/
aac_rx_start_adapter(dev);
return 0; return 0;
error_irq:
rx_writeb(dev, MUnit.OIMR, dev->OIMR = 0xff);
free_irq(dev->scsi_host_ptr->irq, (void *)dev);
error_iounmap: error_iounmap:
return -1; return -1;
@ -530,20 +608,11 @@ int _aac_rx_init(struct aac_dev *dev)
int aac_rx_init(struct aac_dev *dev) int aac_rx_init(struct aac_dev *dev)
{ {
int retval;
/* /*
* Fill in the function dispatch table. * Fill in the function dispatch table.
*/ */
dev->a_ops.adapter_ioremap = aac_rx_ioremap; dev->a_ops.adapter_ioremap = aac_rx_ioremap;
dev->a_ops.adapter_comm = aac_rx_select_comm;
retval = _aac_rx_init(dev); return _aac_rx_init(dev);
if (!retval) {
/*
* Tell the adapter that all is configured, and it can
* start accepting requests
*/
aac_rx_start_adapter(dev);
}
return retval;
} }

View file

@ -91,6 +91,17 @@ static void aac_sa_disable_interrupt (struct aac_dev *dev)
sa_writew(dev, SaDbCSR.PRISETIRQMASK, 0xffff); sa_writew(dev, SaDbCSR.PRISETIRQMASK, 0xffff);
} }
/**
* aac_sa_enable_interrupt - enable interrupt
* @dev: Which adapter to enable.
*/
static void aac_sa_enable_interrupt (struct aac_dev *dev)
{
sa_writew(dev, SaDbCSR.PRICLEARIRQMASK, (PrintfReady | DOORBELL_1 |
DOORBELL_2 | DOORBELL_3 | DOORBELL_4));
}
/** /**
* aac_sa_notify_adapter - handle adapter notification * aac_sa_notify_adapter - handle adapter notification
* @dev: Adapter that notification is for * @dev: Adapter that notification is for
@ -347,32 +358,36 @@ int aac_sa_init(struct aac_dev *dev)
msleep(1); msleep(1);
} }
if (request_irq(dev->scsi_host_ptr->irq, aac_sa_intr, IRQF_SHARED|IRQF_DISABLED, "aacraid", (void *)dev ) < 0) {
printk(KERN_WARNING "%s%d: Interrupt unavailable.\n", name, instance);
goto error_iounmap;
}
/* /*
* Fill in the function dispatch table. * Fill in the function dispatch table.
*/ */
dev->a_ops.adapter_interrupt = aac_sa_interrupt_adapter; dev->a_ops.adapter_interrupt = aac_sa_interrupt_adapter;
dev->a_ops.adapter_disable_int = aac_sa_disable_interrupt; dev->a_ops.adapter_disable_int = aac_sa_disable_interrupt;
dev->a_ops.adapter_enable_int = aac_sa_enable_interrupt;
dev->a_ops.adapter_notify = aac_sa_notify_adapter; dev->a_ops.adapter_notify = aac_sa_notify_adapter;
dev->a_ops.adapter_sync_cmd = sa_sync_cmd; dev->a_ops.adapter_sync_cmd = sa_sync_cmd;
dev->a_ops.adapter_check_health = aac_sa_check_health; dev->a_ops.adapter_check_health = aac_sa_check_health;
dev->a_ops.adapter_intr = aac_sa_intr;
dev->a_ops.adapter_ioremap = aac_sa_ioremap; dev->a_ops.adapter_ioremap = aac_sa_ioremap;
/* /*
* First clear out all interrupts. Then enable the one's that * First clear out all interrupts. Then enable the one's that
* we can handle. * we can handle.
*/ */
sa_writew(dev, SaDbCSR.PRISETIRQMASK, 0xffff); aac_adapter_disable_int(dev);
sa_writew(dev, SaDbCSR.PRICLEARIRQMASK, (PrintfReady | DOORBELL_1 | aac_adapter_enable_int(dev);
DOORBELL_2 | DOORBELL_3 | DOORBELL_4));
if(aac_init_adapter(dev) == NULL) if(aac_init_adapter(dev) == NULL)
goto error_irq; goto error_irq;
if (request_irq(dev->scsi_host_ptr->irq, dev->a_ops.adapter_intr,
IRQF_SHARED|IRQF_DISABLED,
"aacraid", (void *)dev ) < 0) {
printk(KERN_WARNING "%s%d: Interrupt unavailable.\n",
name, instance);
goto error_iounmap;
}
aac_adapter_enable_int(dev);
/* /*
* Tell the adapter that all is configure, and it can start * Tell the adapter that all is configure, and it can start
@ -382,7 +397,7 @@ int aac_sa_init(struct aac_dev *dev)
return 0; return 0;
error_irq: error_irq:
sa_writew(dev, SaDbCSR.PRISETIRQMASK, 0xffff); aac_sa_disable_interrupt(dev);
free_irq(dev->scsi_host_ptr->irq, (void *)dev); free_irq(dev->scsi_host_ptr->irq, (void *)dev);
error_iounmap: error_iounmap:

View file

@ -4403,7 +4403,7 @@ advansys_detect(struct scsi_host_template *tpnt)
ASC_DBG1(1, ASC_DBG1(1,
"advansys_detect: probing I/O port 0x%x...\n", "advansys_detect: probing I/O port 0x%x...\n",
iop); iop);
if (check_region(iop, ASC_IOADR_GAP) != 0) { if (!request_region(iop, ASC_IOADR_GAP, "advansys")){
printk( printk(
"AdvanSys SCSI: specified I/O Port 0x%X is busy\n", iop); "AdvanSys SCSI: specified I/O Port 0x%X is busy\n", iop);
/* Don't try this I/O port twice. */ /* Don't try this I/O port twice. */
@ -4413,6 +4413,7 @@ advansys_detect(struct scsi_host_template *tpnt)
printk( printk(
"AdvanSys SCSI: specified I/O Port 0x%X has no adapter\n", iop); "AdvanSys SCSI: specified I/O Port 0x%X has no adapter\n", iop);
/* Don't try this I/O port twice. */ /* Don't try this I/O port twice. */
release_region(iop, ASC_IOADR_GAP);
asc_ioport[ioport] = 0; asc_ioport[ioport] = 0;
goto ioport_try_again; goto ioport_try_again;
} else { } else {
@ -4431,6 +4432,7 @@ advansys_detect(struct scsi_host_template *tpnt)
* 'ioport' past this board. * 'ioport' past this board.
*/ */
ioport++; ioport++;
release_region(iop, ASC_IOADR_GAP);
goto ioport_try_again; goto ioport_try_again;
} }
} }
@ -9740,13 +9742,14 @@ AscSearchIOPortAddr11(
} }
for (; i < ASC_IOADR_TABLE_MAX_IX; i++) { for (; i < ASC_IOADR_TABLE_MAX_IX; i++) {
iop_base = _asc_def_iop_base[i]; iop_base = _asc_def_iop_base[i];
if (check_region(iop_base, ASC_IOADR_GAP) != 0) { if (!request_region(iop_base, ASC_IOADR_GAP, "advansys")){
ASC_DBG1(1, ASC_DBG1(1,
"AscSearchIOPortAddr11: check_region() failed I/O port 0x%x\n", "AscSearchIOPortAddr11: check_region() failed I/O port 0x%x\n",
iop_base); iop_base);
continue; continue;
} }
ASC_DBG1(1, "AscSearchIOPortAddr11: probing I/O port 0x%x\n", iop_base); ASC_DBG1(1, "AscSearchIOPortAddr11: probing I/O port 0x%x\n", iop_base);
release_region(iop_base, ASC_IOADR_GAP);
if (AscFindSignature(iop_base)) { if (AscFindSignature(iop_base)) {
return (iop_base); return (iop_base);
} }

View file

@ -1337,9 +1337,6 @@ int ahd_pci_test_register_access(struct ahd_softc *);
/************************** SCB and SCB queue management **********************/ /************************** SCB and SCB queue management **********************/
void ahd_qinfifo_requeue_tail(struct ahd_softc *ahd, void ahd_qinfifo_requeue_tail(struct ahd_softc *ahd,
struct scb *scb); struct scb *scb);
int ahd_match_scb(struct ahd_softc *ahd, struct scb *scb,
int target, char channel, int lun,
u_int tag, role_t role);
/****************************** Initialization ********************************/ /****************************** Initialization ********************************/
struct ahd_softc *ahd_alloc(void *platform_arg, char *name); struct ahd_softc *ahd_alloc(void *platform_arg, char *name);

View file

@ -262,6 +262,9 @@ static void ahd_update_coalescing_values(struct ahd_softc *ahd,
u_int mincmds); u_int mincmds);
static int ahd_verify_vpd_cksum(struct vpd_config *vpd); static int ahd_verify_vpd_cksum(struct vpd_config *vpd);
static int ahd_wait_seeprom(struct ahd_softc *ahd); static int ahd_wait_seeprom(struct ahd_softc *ahd);
static int ahd_match_scb(struct ahd_softc *ahd, struct scb *scb,
int target, char channel, int lun,
u_int tag, role_t role);
/******************************** Private Inlines *****************************/ /******************************** Private Inlines *****************************/
@ -7256,7 +7259,7 @@ ahd_busy_tcl(struct ahd_softc *ahd, u_int tcl, u_int scbid)
} }
/************************** SCB and SCB queue management **********************/ /************************** SCB and SCB queue management **********************/
int static int
ahd_match_scb(struct ahd_softc *ahd, struct scb *scb, int target, ahd_match_scb(struct ahd_softc *ahd, struct scb *scb, int target,
char channel, int lun, u_int tag, role_t role) char channel, int lun, u_int tag, role_t role)
{ {

View file

@ -1126,15 +1126,6 @@ ahd_linux_register_host(struct ahd_softc *ahd, struct scsi_host_template *templa
return 0; return 0;
} }
uint64_t
ahd_linux_get_memsize(void)
{
struct sysinfo si;
si_meminfo(&si);
return ((uint64_t)si.totalram << PAGE_SHIFT);
}
/* /*
* Place the SCSI bus into a known state by either resetting it, * Place the SCSI bus into a known state by either resetting it,
* or forcing transfer negotiations on the next command to any * or forcing transfer negotiations on the next command to any

View file

@ -496,8 +496,6 @@ ahd_insb(struct ahd_softc * ahd, long port, uint8_t *array, int count)
int ahd_linux_register_host(struct ahd_softc *, int ahd_linux_register_host(struct ahd_softc *,
struct scsi_host_template *); struct scsi_host_template *);
uint64_t ahd_linux_get_memsize(void);
/*************************** Pretty Printing **********************************/ /*************************** Pretty Printing **********************************/
struct info_str { struct info_str {
char *buffer; char *buffer;

View file

@ -132,6 +132,7 @@ ahd_linux_pci_dev_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
struct ahd_pci_identity *entry; struct ahd_pci_identity *entry;
char *name; char *name;
int error; int error;
struct device *dev = &pdev->dev;
pci = pdev; pci = pdev;
entry = ahd_find_pci_device(pci); entry = ahd_find_pci_device(pci);
@ -161,20 +162,18 @@ ahd_linux_pci_dev_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
pci_set_master(pdev); pci_set_master(pdev);
if (sizeof(dma_addr_t) > 4) { if (sizeof(dma_addr_t) > 4) {
uint64_t memsize; const u64 required_mask = dma_get_required_mask(dev);
const uint64_t mask_39bit = 0x7FFFFFFFFFULL;
memsize = ahd_linux_get_memsize(); if (required_mask > DMA_39BIT_MASK &&
dma_set_mask(dev, DMA_64BIT_MASK) == 0)
if (memsize >= 0x8000000000ULL
&& pci_set_dma_mask(pdev, DMA_64BIT_MASK) == 0) {
ahd->flags |= AHD_64BIT_ADDRESSING; ahd->flags |= AHD_64BIT_ADDRESSING;
} else if (memsize > 0x80000000 else if (required_mask > DMA_32BIT_MASK &&
&& pci_set_dma_mask(pdev, mask_39bit) == 0) { dma_set_mask(dev, DMA_39BIT_MASK) == 0)
ahd->flags |= AHD_39BIT_ADDRESSING; ahd->flags |= AHD_39BIT_ADDRESSING;
} else
dma_set_mask(dev, DMA_32BIT_MASK);
} else { } else {
pci_set_dma_mask(pdev, DMA_32BIT_MASK); dma_set_mask(dev, DMA_32BIT_MASK);
} }
ahd->dev_softc = pci; ahd->dev_softc = pci;
error = ahd_pci_config(ahd, entry); error = ahd_pci_config(ahd, entry);

View file

@ -88,7 +88,7 @@ ahd_compose_id(u_int device, u_int vendor, u_int subdevice, u_int subvendor)
#define SUBID_9005_LEGACYCONN_FUNC(id) ((id) & 0x20) #define SUBID_9005_LEGACYCONN_FUNC(id) ((id) & 0x20)
#define SUBID_9005_SEEPTYPE(id) ((id) & 0x0C0) >> 6) #define SUBID_9005_SEEPTYPE(id) (((id) & 0x0C0) >> 6)
#define SUBID_9005_SEEPTYPE_NONE 0x0 #define SUBID_9005_SEEPTYPE_NONE 0x0
#define SUBID_9005_SEEPTYPE_4K 0x1 #define SUBID_9005_SEEPTYPE_4K 0x1

View file

@ -37,18 +37,14 @@
static inline int asd_get_ddb(struct asd_ha_struct *asd_ha) static inline int asd_get_ddb(struct asd_ha_struct *asd_ha)
{ {
unsigned long flags;
int ddb, i; int ddb, i;
spin_lock_irqsave(&asd_ha->hw_prof.ddb_lock, flags);
ddb = FIND_FREE_DDB(asd_ha); ddb = FIND_FREE_DDB(asd_ha);
if (ddb >= asd_ha->hw_prof.max_ddbs) { if (ddb >= asd_ha->hw_prof.max_ddbs) {
ddb = -ENOMEM; ddb = -ENOMEM;
spin_unlock_irqrestore(&asd_ha->hw_prof.ddb_lock, flags);
goto out; goto out;
} }
SET_DDB(ddb, asd_ha); SET_DDB(ddb, asd_ha);
spin_unlock_irqrestore(&asd_ha->hw_prof.ddb_lock, flags);
for (i = 0; i < sizeof(struct asd_ddb_ssp_smp_target_port); i+= 4) for (i = 0; i < sizeof(struct asd_ddb_ssp_smp_target_port); i+= 4)
asd_ddbsite_write_dword(asd_ha, ddb, i, 0); asd_ddbsite_write_dword(asd_ha, ddb, i, 0);
@ -77,14 +73,10 @@ static inline int asd_get_ddb(struct asd_ha_struct *asd_ha)
static inline void asd_free_ddb(struct asd_ha_struct *asd_ha, int ddb) static inline void asd_free_ddb(struct asd_ha_struct *asd_ha, int ddb)
{ {
unsigned long flags;
if (!ddb || ddb >= 0xFFFF) if (!ddb || ddb >= 0xFFFF)
return; return;
asd_ddbsite_write_byte(asd_ha, ddb, DDB_TYPE, DDB_TYPE_UNUSED); asd_ddbsite_write_byte(asd_ha, ddb, DDB_TYPE, DDB_TYPE_UNUSED);
spin_lock_irqsave(&asd_ha->hw_prof.ddb_lock, flags);
CLEAR_DDB(ddb, asd_ha); CLEAR_DDB(ddb, asd_ha);
spin_unlock_irqrestore(&asd_ha->hw_prof.ddb_lock, flags);
} }
static inline void asd_set_ddb_type(struct domain_device *dev) static inline void asd_set_ddb_type(struct domain_device *dev)
@ -320,8 +312,11 @@ static int asd_init_sata_pm_ddb(struct domain_device *dev)
int asd_dev_found(struct domain_device *dev) int asd_dev_found(struct domain_device *dev)
{ {
unsigned long flags;
int res = 0; int res = 0;
struct asd_ha_struct *asd_ha = dev->port->ha->lldd_ha;
spin_lock_irqsave(&asd_ha->hw_prof.ddb_lock, flags);
switch (dev->dev_type) { switch (dev->dev_type) {
case SATA_PM: case SATA_PM:
res = asd_init_sata_pm_ddb(dev); res = asd_init_sata_pm_ddb(dev);
@ -335,14 +330,18 @@ int asd_dev_found(struct domain_device *dev)
else else
res = asd_init_initiator_ddb(dev); res = asd_init_initiator_ddb(dev);
} }
spin_unlock_irqrestore(&asd_ha->hw_prof.ddb_lock, flags);
return res; return res;
} }
void asd_dev_gone(struct domain_device *dev) void asd_dev_gone(struct domain_device *dev)
{ {
int ddb, sister_ddb; int ddb, sister_ddb;
unsigned long flags;
struct asd_ha_struct *asd_ha = dev->port->ha->lldd_ha; struct asd_ha_struct *asd_ha = dev->port->ha->lldd_ha;
spin_lock_irqsave(&asd_ha->hw_prof.ddb_lock, flags);
ddb = (int) (unsigned long) dev->lldd_dev; ddb = (int) (unsigned long) dev->lldd_dev;
sister_ddb = asd_ddbsite_read_word(asd_ha, ddb, SISTER_DDB); sister_ddb = asd_ddbsite_read_word(asd_ha, ddb, SISTER_DDB);
@ -350,4 +349,5 @@ void asd_dev_gone(struct domain_device *dev)
asd_free_ddb(asd_ha, sister_ddb); asd_free_ddb(asd_ha, sister_ddb);
asd_free_ddb(asd_ha, ddb); asd_free_ddb(asd_ha, ddb);
dev->lldd_dev = NULL; dev->lldd_dev = NULL;
spin_unlock_irqrestore(&asd_ha->hw_prof.ddb_lock, flags);
} }

View file

@ -556,7 +556,7 @@ static void asd_dump_lseq_state(struct asd_ha_struct *asd_ha, int lseq)
PRINT_LMIP_word(asd_ha, lseq, Q_TGTXFR_TAIL); PRINT_LMIP_word(asd_ha, lseq, Q_TGTXFR_TAIL);
PRINT_LMIP_byte(asd_ha, lseq, LINK_NUMBER); PRINT_LMIP_byte(asd_ha, lseq, LINK_NUMBER);
PRINT_LMIP_byte(asd_ha, lseq, SCRATCH_FLAGS); PRINT_LMIP_byte(asd_ha, lseq, SCRATCH_FLAGS);
PRINT_LMIP_qword(asd_ha, lseq, CONNECTION_STATE); PRINT_LMIP_dword(asd_ha, lseq, CONNECTION_STATE);
PRINT_LMIP_word(asd_ha, lseq, CONCTL); PRINT_LMIP_word(asd_ha, lseq, CONCTL);
PRINT_LMIP_byte(asd_ha, lseq, CONSTAT); PRINT_LMIP_byte(asd_ha, lseq, CONSTAT);
PRINT_LMIP_byte(asd_ha, lseq, CONNECTION_MODES); PRINT_LMIP_byte(asd_ha, lseq, CONNECTION_MODES);

View file

@ -38,7 +38,7 @@
#include "aic94xx_seq.h" #include "aic94xx_seq.h"
/* The format is "version.release.patchlevel" */ /* The format is "version.release.patchlevel" */
#define ASD_DRIVER_VERSION "1.0.2" #define ASD_DRIVER_VERSION "1.0.3"
static int use_msi = 0; static int use_msi = 0;
module_param_named(use_msi, use_msi, int, S_IRUGO); module_param_named(use_msi, use_msi, int, S_IRUGO);
@ -57,6 +57,8 @@ MODULE_PARM_DESC(collector, "\n"
char sas_addr_str[2*SAS_ADDR_SIZE + 1] = ""; char sas_addr_str[2*SAS_ADDR_SIZE + 1] = "";
static struct scsi_transport_template *aic94xx_transport_template; static struct scsi_transport_template *aic94xx_transport_template;
static int asd_scan_finished(struct Scsi_Host *, unsigned long);
static void asd_scan_start(struct Scsi_Host *);
static struct scsi_host_template aic94xx_sht = { static struct scsi_host_template aic94xx_sht = {
.module = THIS_MODULE, .module = THIS_MODULE,
@ -66,6 +68,8 @@ static struct scsi_host_template aic94xx_sht = {
.target_alloc = sas_target_alloc, .target_alloc = sas_target_alloc,
.slave_configure = sas_slave_configure, .slave_configure = sas_slave_configure,
.slave_destroy = sas_slave_destroy, .slave_destroy = sas_slave_destroy,
.scan_finished = asd_scan_finished,
.scan_start = asd_scan_start,
.change_queue_depth = sas_change_queue_depth, .change_queue_depth = sas_change_queue_depth,
.change_queue_type = sas_change_queue_type, .change_queue_type = sas_change_queue_type,
.bios_param = sas_bios_param, .bios_param = sas_bios_param,
@ -75,6 +79,8 @@ static struct scsi_host_template aic94xx_sht = {
.sg_tablesize = SG_ALL, .sg_tablesize = SG_ALL,
.max_sectors = SCSI_DEFAULT_MAX_SECTORS, .max_sectors = SCSI_DEFAULT_MAX_SECTORS,
.use_clustering = ENABLE_CLUSTERING, .use_clustering = ENABLE_CLUSTERING,
.eh_device_reset_handler = sas_eh_device_reset_handler,
.eh_bus_reset_handler = sas_eh_bus_reset_handler,
}; };
static int __devinit asd_map_memio(struct asd_ha_struct *asd_ha) static int __devinit asd_map_memio(struct asd_ha_struct *asd_ha)
@ -234,7 +240,7 @@ static int __devinit asd_common_setup(struct asd_ha_struct *asd_ha)
} }
/* Provide some sane default values. */ /* Provide some sane default values. */
asd_ha->hw_prof.max_scbs = 512; asd_ha->hw_prof.max_scbs = 512;
asd_ha->hw_prof.max_ddbs = 128; asd_ha->hw_prof.max_ddbs = ASD_MAX_DDBS;
asd_ha->hw_prof.num_phys = ASD_MAX_PHYS; asd_ha->hw_prof.num_phys = ASD_MAX_PHYS;
/* All phys are enabled, by default. */ /* All phys are enabled, by default. */
asd_ha->hw_prof.enabled_phys = 0xFF; asd_ha->hw_prof.enabled_phys = 0xFF;
@ -526,6 +532,7 @@ static int asd_register_sas_ha(struct asd_ha_struct *asd_ha)
asd_ha->sas_ha.num_phys= ASD_MAX_PHYS; asd_ha->sas_ha.num_phys= ASD_MAX_PHYS;
asd_ha->sas_ha.lldd_queue_size = asd_ha->seq.can_queue; asd_ha->sas_ha.lldd_queue_size = asd_ha->seq.can_queue;
asd_ha->sas_ha.lldd_max_execute_num = lldd_max_execute_num;
return sas_register_ha(&asd_ha->sas_ha); return sas_register_ha(&asd_ha->sas_ha);
} }
@ -671,21 +678,10 @@ static int __devinit asd_pci_probe(struct pci_dev *dev,
if (err) if (err)
goto Err_reg_sas; goto Err_reg_sas;
err = asd_enable_phys(asd_ha, asd_ha->hw_prof.enabled_phys); scsi_scan_host(shost);
if (err) {
asd_printk("coudln't enable phys, err:%d\n", err);
goto Err_en_phys;
}
ASD_DPRINTK("enabled phys\n");
/* give the phy enabling interrupt event time to come in (1s
* is empirically about all it takes) */
ssleep(1);
/* Wait for discovery to finish */
scsi_flush_work(asd_ha->sas_ha.core.shost);
return 0; return 0;
Err_en_phys:
asd_unregister_sas_ha(asd_ha);
Err_reg_sas: Err_reg_sas:
asd_remove_dev_attrs(asd_ha); asd_remove_dev_attrs(asd_ha);
Err_dev_attrs: Err_dev_attrs:
@ -778,6 +774,28 @@ static void __devexit asd_pci_remove(struct pci_dev *dev)
return; return;
} }
static void asd_scan_start(struct Scsi_Host *shost)
{
struct asd_ha_struct *asd_ha;
int err;
asd_ha = SHOST_TO_SAS_HA(shost)->lldd_ha;
err = asd_enable_phys(asd_ha, asd_ha->hw_prof.enabled_phys);
if (err)
asd_printk("Couldn't enable phys, err:%d\n", err);
}
static int asd_scan_finished(struct Scsi_Host *shost, unsigned long time)
{
/* give the phy enabling interrupt event time to come in (1s
* is empirically about all it takes) */
if (time < HZ)
return 0;
/* Wait for discovery to finish */
scsi_flush_work(shost);
return 1;
}
static ssize_t asd_version_show(struct device_driver *driver, char *buf) static ssize_t asd_version_show(struct device_driver *driver, char *buf)
{ {
return snprintf(buf, PAGE_SIZE, "%s\n", ASD_DRIVER_VERSION); return snprintf(buf, PAGE_SIZE, "%s\n", ASD_DRIVER_VERSION);
@ -885,6 +903,7 @@ static void __exit aic94xx_exit(void)
asd_remove_driver_attrs(&aic94xx_pci_driver.driver); asd_remove_driver_attrs(&aic94xx_pci_driver.driver);
pci_unregister_driver(&aic94xx_pci_driver); pci_unregister_driver(&aic94xx_pci_driver);
sas_release_transport(aic94xx_transport_template); sas_release_transport(aic94xx_transport_template);
asd_release_firmware();
asd_destroy_global_caches(); asd_destroy_global_caches();
asd_printk("%s version %s unloaded\n", ASD_DRIVER_DESCRIPTION, asd_printk("%s version %s unloaded\n", ASD_DRIVER_DESCRIPTION,
ASD_DRIVER_VERSION); ASD_DRIVER_VERSION);

View file

@ -2226,9 +2226,10 @@
#define LmSEQ_SAS_RESET_MODE(LinkNum) (LmSCRATCH(LinkNum) + 0x0074) #define LmSEQ_SAS_RESET_MODE(LinkNum) (LmSCRATCH(LinkNum) + 0x0074)
#define LmSEQ_LINK_RESET_RETRY_COUNT(LinkNum) (LmSCRATCH(LinkNum) + 0x0075) #define LmSEQ_LINK_RESET_RETRY_COUNT(LinkNum) (LmSCRATCH(LinkNum) + 0x0075)
#define LmSEQ_NUM_LINK_RESET_RETRIES(LinkNum) (LmSCRATCH(LinkNum) + 0x0076) #define LmSEQ_NUM_LINK_RESET_RETRIES(LinkNum) (LmSCRATCH(LinkNum) + 0x0076)
#define LmSEQ_OOB_INT_ENABLES(LinkNum) (LmSCRATCH(LinkNum) + 0x007A) #define LmSEQ_OOB_INT_ENABLES(LinkNum) (LmSCRATCH(LinkNum) + 0x0078)
#define LmSEQ_NOTIFY_TIMER_DOWN_COUNT(LinkNum) (LmSCRATCH(LinkNum) + 0x007A)
#define LmSEQ_NOTIFY_TIMER_TIMEOUT(LinkNum) (LmSCRATCH(LinkNum) + 0x007C) #define LmSEQ_NOTIFY_TIMER_TIMEOUT(LinkNum) (LmSCRATCH(LinkNum) + 0x007C)
#define LmSEQ_NOTIFY_TIMER_DOWN_COUNT(LinkNum) (LmSCRATCH(LinkNum) + 0x007E) #define LmSEQ_NOTIFY_TIMER_INITIAL_COUNT(LinkNum) (LmSCRATCH(LinkNum) + 0x007E)
/* Mode dependent scratch page 1, mode 0 and mode 1 */ /* Mode dependent scratch page 1, mode 0 and mode 1 */
#define LmSEQ_SG_LIST_PTR_ADDR0(LinkNum) (LmSCRATCH(LinkNum) + 0x0020) #define LmSEQ_SG_LIST_PTR_ADDR0(LinkNum) (LmSCRATCH(LinkNum) + 0x0020)

View file

@ -34,6 +34,7 @@
* domain that this sequencer can maintain low-level connections for * domain that this sequencer can maintain low-level connections for
* us. They are be 64 bytes. * us. They are be 64 bytes.
*/ */
#define ASD_MAX_DDBS 128
struct asd_ddb_ssp_smp_target_port { struct asd_ddb_ssp_smp_target_port {
u8 conn_type; /* byte 0 */ u8 conn_type; /* byte 0 */

View file

@ -413,40 +413,6 @@ void asd_invalidate_edb(struct asd_ascb *ascb, int edb_id)
} }
} }
/* hard reset a phy later */
static void do_phy_reset_later(struct work_struct *work)
{
struct sas_phy *sas_phy =
container_of(work, struct sas_phy, reset_work);
int error;
ASD_DPRINTK("%s: About to hard reset phy %d\n", __FUNCTION__,
sas_phy->identify.phy_identifier);
/* Reset device port */
error = sas_phy_reset(sas_phy, 1);
if (error)
ASD_DPRINTK("%s: Hard reset of phy %d failed (%d).\n",
__FUNCTION__, sas_phy->identify.phy_identifier, error);
}
static void phy_reset_later(struct sas_phy *sas_phy, struct Scsi_Host *shost)
{
INIT_WORK(&sas_phy->reset_work, do_phy_reset_later);
queue_work(shost->work_q, &sas_phy->reset_work);
}
/* start up the ABORT TASK tmf... */
static void task_kill_later(struct asd_ascb *ascb)
{
struct asd_ha_struct *asd_ha = ascb->ha;
struct sas_ha_struct *sas_ha = &asd_ha->sas_ha;
struct Scsi_Host *shost = sas_ha->core.shost;
struct sas_task *task = ascb->uldd_task;
INIT_WORK(&task->abort_work, sas_task_abort);
queue_work(shost->work_q, &task->abort_work);
}
static void escb_tasklet_complete(struct asd_ascb *ascb, static void escb_tasklet_complete(struct asd_ascb *ascb,
struct done_list_struct *dl) struct done_list_struct *dl)
{ {
@ -479,26 +445,55 @@ static void escb_tasklet_complete(struct asd_ascb *ascb,
case REQ_TASK_ABORT: { case REQ_TASK_ABORT: {
struct asd_ascb *a, *b; struct asd_ascb *a, *b;
u16 tc_abort; u16 tc_abort;
struct domain_device *failed_dev = NULL;
tc_abort = *((u16*)(&dl->status_block[1]));
tc_abort = le16_to_cpu(tc_abort);
ASD_DPRINTK("%s: REQ_TASK_ABORT, reason=0x%X\n", ASD_DPRINTK("%s: REQ_TASK_ABORT, reason=0x%X\n",
__FUNCTION__, dl->status_block[3]); __FUNCTION__, dl->status_block[3]);
/* Find the pending task and abort it. */ /*
list_for_each_entry_safe(a, b, &asd_ha->seq.pend_q, list) * Find the task that caused the abort and abort it first.
if (a->tc_index == tc_abort) { * The sequencer won't put anything on the done list until
task_kill_later(a); * that happens.
*/
tc_abort = *((u16*)(&dl->status_block[1]));
tc_abort = le16_to_cpu(tc_abort);
list_for_each_entry_safe(a, b, &asd_ha->seq.pend_q, list) {
struct sas_task *task = ascb->uldd_task;
if (task && a->tc_index == tc_abort) {
failed_dev = task->dev;
sas_task_abort(task);
break; break;
} }
}
if (!failed_dev) {
ASD_DPRINTK("%s: Can't find task (tc=%d) to abort!\n",
__FUNCTION__, tc_abort);
goto out;
}
/*
* Now abort everything else for that device (hba?) so
* that the EH will wake up and do something.
*/
list_for_each_entry_safe(a, b, &asd_ha->seq.pend_q, list) {
struct sas_task *task = ascb->uldd_task;
if (task &&
task->dev == failed_dev &&
a->tc_index != tc_abort)
sas_task_abort(task);
}
goto out; goto out;
} }
case REQ_DEVICE_RESET: { case REQ_DEVICE_RESET: {
struct Scsi_Host *shost = sas_ha->core.shost;
struct sas_phy *dev_phy;
struct asd_ascb *a; struct asd_ascb *a;
u16 conn_handle; u16 conn_handle;
unsigned long flags;
struct sas_task *last_dev_task = NULL;
conn_handle = *((u16*)(&dl->status_block[1])); conn_handle = *((u16*)(&dl->status_block[1]));
conn_handle = le16_to_cpu(conn_handle); conn_handle = le16_to_cpu(conn_handle);
@ -506,32 +501,47 @@ static void escb_tasklet_complete(struct asd_ascb *ascb,
ASD_DPRINTK("%s: REQ_DEVICE_RESET, reason=0x%X\n", __FUNCTION__, ASD_DPRINTK("%s: REQ_DEVICE_RESET, reason=0x%X\n", __FUNCTION__,
dl->status_block[3]); dl->status_block[3]);
/* Kill all pending tasks and reset the device */ /* Find the last pending task for the device... */
dev_phy = NULL;
list_for_each_entry(a, &asd_ha->seq.pend_q, list) { list_for_each_entry(a, &asd_ha->seq.pend_q, list) {
struct sas_task *task;
struct domain_device *dev;
u16 x; u16 x;
struct domain_device *dev;
struct sas_task *task = a->uldd_task;
task = a->uldd_task;
if (!task) if (!task)
continue; continue;
dev = task->dev; dev = task->dev;
x = (unsigned long)dev->lldd_dev; x = (unsigned long)dev->lldd_dev;
if (x == conn_handle) { if (x == conn_handle)
dev_phy = dev->port->phy; last_dev_task = task;
task_kill_later(a);
}
} }
/* Reset device port */ if (!last_dev_task) {
if (!dev_phy) { ASD_DPRINTK("%s: Device reset for idle device %d?\n",
ASD_DPRINTK("%s: No pending commands; can't reset.\n", __FUNCTION__, conn_handle);
__FUNCTION__);
goto out; goto out;
} }
phy_reset_later(dev_phy, shost);
/* ...and set the reset flag */
spin_lock_irqsave(&last_dev_task->task_state_lock, flags);
last_dev_task->task_state_flags |= SAS_TASK_NEED_DEV_RESET;
spin_unlock_irqrestore(&last_dev_task->task_state_lock, flags);
/* Kill all pending tasks for the device */
list_for_each_entry(a, &asd_ha->seq.pend_q, list) {
u16 x;
struct domain_device *dev;
struct sas_task *task = a->uldd_task;
if (!task)
continue;
dev = task->dev;
x = (unsigned long)dev->lldd_dev;
if (x == conn_handle)
sas_task_abort(task);
}
goto out; goto out;
} }
case SIGNAL_NCQ_ERROR: case SIGNAL_NCQ_ERROR:

View file

@ -427,7 +427,7 @@ struct asd_manuf_sec {
struct asd_manuf_phy_desc { struct asd_manuf_phy_desc {
u8 state; /* low 4 bits */ u8 state; /* low 4 bits */
#define MS_PHY_STATE_ENABLEABLE 0 #define MS_PHY_STATE_ENABLED 0
#define MS_PHY_STATE_REPORTED 1 #define MS_PHY_STATE_REPORTED 1
#define MS_PHY_STATE_HIDDEN 2 #define MS_PHY_STATE_HIDDEN 2
u8 phy_id; u8 phy_id;
@ -756,11 +756,11 @@ static void *asd_find_ll_by_id(void * const start, const u8 id0, const u8 id1)
* *
* HIDDEN phys do not count in the total count. REPORTED phys cannot * HIDDEN phys do not count in the total count. REPORTED phys cannot
* be enabled but are reported and counted towards the total. * be enabled but are reported and counted towards the total.
* ENEBLEABLE phys are enabled by default and count towards the total. * ENABLED phys are enabled by default and count towards the total.
* The absolute total phy number is ASD_MAX_PHYS. hw_prof->num_phys * The absolute total phy number is ASD_MAX_PHYS. hw_prof->num_phys
* merely specifies the number of phys the host adapter decided to * merely specifies the number of phys the host adapter decided to
* report. E.g., it is possible for phys 0, 1 and 2 to be HIDDEN, * report. E.g., it is possible for phys 0, 1 and 2 to be HIDDEN,
* phys 3, 4 and 5 to be REPORTED and phys 6 and 7 to be ENEBLEABLE. * phys 3, 4 and 5 to be REPORTED and phys 6 and 7 to be ENABLED.
* In this case ASD_MAX_PHYS is 8, hw_prof->num_phys is 5, and only 2 * In this case ASD_MAX_PHYS is 8, hw_prof->num_phys is 5, and only 2
* are actually enabled (enabled by default, max number of phys * are actually enabled (enabled by default, max number of phys
* enableable in this case). * enableable in this case).
@ -816,8 +816,8 @@ static int asd_ms_get_phy_params(struct asd_ha_struct *asd_ha,
asd_ha->hw_prof.enabled_phys &= ~(1 << i); asd_ha->hw_prof.enabled_phys &= ~(1 << i);
rep_phys++; rep_phys++;
continue; continue;
case MS_PHY_STATE_ENABLEABLE: case MS_PHY_STATE_ENABLED:
ASD_DPRINTK("ms: phy%d: ENEBLEABLE\n", i); ASD_DPRINTK("ms: phy%d: ENABLED\n", i);
asd_ha->hw_prof.enabled_phys |= (1 << i); asd_ha->hw_prof.enabled_phys |= (1 << i);
en_phys++; en_phys++;
break; break;

View file

@ -810,6 +810,8 @@ static void asd_init_lseq_mdp(struct asd_ha_struct *asd_ha, int lseq)
/* No delay for the first NOTIFY to be sent to the attached target. */ /* No delay for the first NOTIFY to be sent to the attached target. */
asd_write_reg_word(asd_ha, LmSEQ_NOTIFY_TIMER_DOWN_COUNT(lseq), asd_write_reg_word(asd_ha, LmSEQ_NOTIFY_TIMER_DOWN_COUNT(lseq),
ASD_NOTIFY_DOWN_COUNT); ASD_NOTIFY_DOWN_COUNT);
asd_write_reg_word(asd_ha, LmSEQ_NOTIFY_TIMER_INITIAL_COUNT(lseq),
ASD_NOTIFY_DOWN_COUNT);
/* LSEQ Mode dependent, mode 0 and 1, page 1 setup. */ /* LSEQ Mode dependent, mode 0 and 1, page 1 setup. */
for (i = 0; i < 2; i++) { for (i = 0; i < 2; i++) {
@ -907,6 +909,16 @@ static void asd_init_scb_sites(struct asd_ha_struct *asd_ha)
for (i = 0; i < ASD_SCB_SIZE; i += 4) for (i = 0; i < ASD_SCB_SIZE; i += 4)
asd_scbsite_write_dword(asd_ha, site_no, i, 0); asd_scbsite_write_dword(asd_ha, site_no, i, 0);
/* Initialize SCB Site Opcode field to invalid. */
asd_scbsite_write_byte(asd_ha, site_no,
offsetof(struct scb_header, opcode),
0xFF);
/* Initialize SCB Site Flags field to mean a response
* frame has been received. This means inadvertent
* frames received to be dropped. */
asd_scbsite_write_byte(asd_ha, site_no, 0x49, 0x01);
/* Workaround needed by SEQ to fix a SATA issue is to exclude /* Workaround needed by SEQ to fix a SATA issue is to exclude
* certain SCB sites from the free list. */ * certain SCB sites from the free list. */
if (!SCB_SITE_VALID(site_no)) if (!SCB_SITE_VALID(site_no))
@ -922,16 +934,6 @@ static void asd_init_scb_sites(struct asd_ha_struct *asd_ha)
/* Q_NEXT field of the last SCB is invalidated. */ /* Q_NEXT field of the last SCB is invalidated. */
asd_scbsite_write_word(asd_ha, site_no, 0, first_scb_site_no); asd_scbsite_write_word(asd_ha, site_no, 0, first_scb_site_no);
/* Initialize SCB Site Opcode field to invalid. */
asd_scbsite_write_byte(asd_ha, site_no,
offsetof(struct scb_header, opcode),
0xFF);
/* Initialize SCB Site Flags field to mean a response
* frame has been received. This means inadvertent
* frames received to be dropped. */
asd_scbsite_write_byte(asd_ha, site_no, 0x49, 0x01);
first_scb_site_no = site_no; first_scb_site_no = site_no;
max_scbs++; max_scbs++;
} }
@ -1173,6 +1175,16 @@ static void asd_init_ddb_0(struct asd_ha_struct *asd_ha)
set_bit(0, asd_ha->hw_prof.ddb_bitmap); set_bit(0, asd_ha->hw_prof.ddb_bitmap);
} }
static void asd_seq_init_ddb_sites(struct asd_ha_struct *asd_ha)
{
unsigned int i;
unsigned int ddb_site;
for (ddb_site = 0 ; ddb_site < ASD_MAX_DDBS; ddb_site++)
for (i = 0; i < sizeof(struct asd_ddb_ssp_smp_target_port); i+= 4)
asd_ddbsite_write_dword(asd_ha, ddb_site, i, 0);
}
/** /**
* asd_seq_setup_seqs -- setup and initialize central and link sequencers * asd_seq_setup_seqs -- setup and initialize central and link sequencers
* @asd_ha: pointer to host adapter structure * @asd_ha: pointer to host adapter structure
@ -1182,6 +1194,9 @@ static void asd_seq_setup_seqs(struct asd_ha_struct *asd_ha)
int lseq; int lseq;
u8 lseq_mask; u8 lseq_mask;
/* Initialize DDB sites */
asd_seq_init_ddb_sites(asd_ha);
/* Initialize SCB sites. Done first to compute some values which /* Initialize SCB sites. Done first to compute some values which
* the rest of the init code depends on. */ * the rest of the init code depends on. */
asd_init_scb_sites(asd_ha); asd_init_scb_sites(asd_ha);
@ -1232,6 +1247,13 @@ static int asd_seq_start_lseq(struct asd_ha_struct *asd_ha, int lseq)
return asd_seq_unpause_lseq(asd_ha, lseq); return asd_seq_unpause_lseq(asd_ha, lseq);
} }
int asd_release_firmware(void)
{
if (sequencer_fw)
release_firmware(sequencer_fw);
return 0;
}
static int asd_request_firmware(struct asd_ha_struct *asd_ha) static int asd_request_firmware(struct asd_ha_struct *asd_ha)
{ {
int err, i; int err, i;
@ -1375,7 +1397,9 @@ void asd_update_port_links(struct asd_ha_struct *asd_ha, struct asd_phy *phy)
u8 phy_is_up; u8 phy_is_up;
u8 mask; u8 mask;
int i, err; int i, err;
unsigned long flags;
spin_lock_irqsave(&asd_ha->hw_prof.ddb_lock, flags);
for_each_phy(phy_mask, mask, i) for_each_phy(phy_mask, mask, i)
asd_ddbsite_write_byte(asd_ha, 0, asd_ddbsite_write_byte(asd_ha, 0,
offsetof(struct asd_ddb_seq_shared, offsetof(struct asd_ddb_seq_shared,
@ -1395,6 +1419,7 @@ void asd_update_port_links(struct asd_ha_struct *asd_ha, struct asd_phy *phy)
break; break;
} }
} }
spin_unlock_irqrestore(&asd_ha->hw_prof.ddb_lock, flags);
if (err) if (err)
asd_printk("couldn't update DDB 0:error:%d\n", err); asd_printk("couldn't update DDB 0:error:%d\n", err);

View file

@ -63,6 +63,7 @@ int asd_pause_lseq(struct asd_ha_struct *asd_ha, u8 lseq_mask);
int asd_unpause_lseq(struct asd_ha_struct *asd_ha, u8 lseq_mask); int asd_unpause_lseq(struct asd_ha_struct *asd_ha, u8 lseq_mask);
int asd_init_seqs(struct asd_ha_struct *asd_ha); int asd_init_seqs(struct asd_ha_struct *asd_ha);
int asd_start_seqs(struct asd_ha_struct *asd_ha); int asd_start_seqs(struct asd_ha_struct *asd_ha);
int asd_release_firmware(void);
void asd_update_port_links(struct asd_ha_struct *asd_ha, struct asd_phy *phy); void asd_update_port_links(struct asd_ha_struct *asd_ha, struct asd_phy *phy);
#endif #endif

View file

@ -349,6 +349,7 @@ static void asd_task_tasklet_complete(struct asd_ascb *ascb,
spin_lock_irqsave(&task->task_state_lock, flags); spin_lock_irqsave(&task->task_state_lock, flags);
task->task_state_flags &= ~SAS_TASK_STATE_PENDING; task->task_state_flags &= ~SAS_TASK_STATE_PENDING;
task->task_state_flags &= ~SAS_TASK_AT_INITIATOR;
task->task_state_flags |= SAS_TASK_STATE_DONE; task->task_state_flags |= SAS_TASK_STATE_DONE;
if (unlikely((task->task_state_flags & SAS_TASK_STATE_ABORTED))) { if (unlikely((task->task_state_flags & SAS_TASK_STATE_ABORTED))) {
spin_unlock_irqrestore(&task->task_state_lock, flags); spin_unlock_irqrestore(&task->task_state_lock, flags);
@ -557,6 +558,7 @@ int asd_execute_task(struct sas_task *task, const int num,
struct sas_task *t = task; struct sas_task *t = task;
struct asd_ascb *ascb = NULL, *a; struct asd_ascb *ascb = NULL, *a;
struct asd_ha_struct *asd_ha = task->dev->port->ha->lldd_ha; struct asd_ha_struct *asd_ha = task->dev->port->ha->lldd_ha;
unsigned long flags;
res = asd_can_queue(asd_ha, num); res = asd_can_queue(asd_ha, num);
if (res) if (res)
@ -599,6 +601,10 @@ int asd_execute_task(struct sas_task *task, const int num,
} }
if (res) if (res)
goto out_err_unmap; goto out_err_unmap;
spin_lock_irqsave(&t->task_state_lock, flags);
t->task_state_flags |= SAS_TASK_AT_INITIATOR;
spin_unlock_irqrestore(&t->task_state_lock, flags);
} }
list_del_init(&alist); list_del_init(&alist);
@ -617,6 +623,9 @@ int asd_execute_task(struct sas_task *task, const int num,
if (a == b) if (a == b)
break; break;
t = a->uldd_task; t = a->uldd_task;
spin_lock_irqsave(&t->task_state_lock, flags);
t->task_state_flags &= ~SAS_TASK_AT_INITIATOR;
spin_unlock_irqrestore(&t->task_state_lock, flags);
switch (t->task_proto) { switch (t->task_proto) {
case SATA_PROTO: case SATA_PROTO:
case SAS_PROTO_STP: case SAS_PROTO_STP:

View file

@ -566,9 +566,7 @@ static int asd_initiate_ssp_tmf(struct domain_device *dev, u8 *lun,
res = TMF_RESP_FUNC_ESUPP; res = TMF_RESP_FUNC_ESUPP;
break; break;
default: default:
ASD_DPRINTK("%s: converting result 0x%x to TMF_RESP_FUNC_FAILED\n", /* Allow TMF response codes to propagate upwards */
__FUNCTION__, res);
res = TMF_RESP_FUNC_FAILED;
break; break;
} }
out_err: out_err:

View file

@ -595,10 +595,8 @@ static int ipr_save_pcix_cmd_reg(struct ipr_ioa_cfg *ioa_cfg)
{ {
int pcix_cmd_reg = pci_find_capability(ioa_cfg->pdev, PCI_CAP_ID_PCIX); int pcix_cmd_reg = pci_find_capability(ioa_cfg->pdev, PCI_CAP_ID_PCIX);
if (pcix_cmd_reg == 0) { if (pcix_cmd_reg == 0)
dev_err(&ioa_cfg->pdev->dev, "Failed to save PCI-X command register\n"); return 0;
return -EIO;
}
if (pci_read_config_word(ioa_cfg->pdev, pcix_cmd_reg + PCI_X_CMD, if (pci_read_config_word(ioa_cfg->pdev, pcix_cmd_reg + PCI_X_CMD,
&ioa_cfg->saved_pcix_cmd_reg) != PCIBIOS_SUCCESSFUL) { &ioa_cfg->saved_pcix_cmd_reg) != PCIBIOS_SUCCESSFUL) {
@ -627,10 +625,6 @@ static int ipr_set_pcix_cmd_reg(struct ipr_ioa_cfg *ioa_cfg)
dev_err(&ioa_cfg->pdev->dev, "Failed to setup PCI-X command register\n"); dev_err(&ioa_cfg->pdev->dev, "Failed to setup PCI-X command register\n");
return -EIO; return -EIO;
} }
} else {
dev_err(&ioa_cfg->pdev->dev,
"Failed to setup PCI-X command register\n");
return -EIO;
} }
return 0; return 0;
@ -6314,7 +6308,6 @@ static int ipr_reset_restore_cfg_space(struct ipr_cmnd *ipr_cmd)
int rc; int rc;
ENTER; ENTER;
pci_unblock_user_cfg_access(ioa_cfg->pdev);
rc = pci_restore_state(ioa_cfg->pdev); rc = pci_restore_state(ioa_cfg->pdev);
if (rc != PCIBIOS_SUCCESSFUL) { if (rc != PCIBIOS_SUCCESSFUL) {
@ -6354,6 +6347,24 @@ static int ipr_reset_restore_cfg_space(struct ipr_cmnd *ipr_cmd)
return IPR_RC_JOB_CONTINUE; return IPR_RC_JOB_CONTINUE;
} }
/**
* ipr_reset_bist_done - BIST has completed on the adapter.
* @ipr_cmd: ipr command struct
*
* Description: Unblock config space and resume the reset process.
*
* Return value:
* IPR_RC_JOB_CONTINUE
**/
static int ipr_reset_bist_done(struct ipr_cmnd *ipr_cmd)
{
ENTER;
pci_unblock_user_cfg_access(ipr_cmd->ioa_cfg->pdev);
ipr_cmd->job_step = ipr_reset_restore_cfg_space;
LEAVE;
return IPR_RC_JOB_CONTINUE;
}
/** /**
* ipr_reset_start_bist - Run BIST on the adapter. * ipr_reset_start_bist - Run BIST on the adapter.
* @ipr_cmd: ipr command struct * @ipr_cmd: ipr command struct
@ -6376,7 +6387,7 @@ static int ipr_reset_start_bist(struct ipr_cmnd *ipr_cmd)
ipr_cmd->ioasa.ioasc = cpu_to_be32(IPR_IOASC_PCI_ACCESS_ERROR); ipr_cmd->ioasa.ioasc = cpu_to_be32(IPR_IOASC_PCI_ACCESS_ERROR);
rc = IPR_RC_JOB_CONTINUE; rc = IPR_RC_JOB_CONTINUE;
} else { } else {
ipr_cmd->job_step = ipr_reset_restore_cfg_space; ipr_cmd->job_step = ipr_reset_bist_done;
ipr_reset_start_timer(ipr_cmd, IPR_WAIT_FOR_BIST_TIMEOUT); ipr_reset_start_timer(ipr_cmd, IPR_WAIT_FOR_BIST_TIMEOUT);
rc = IPR_RC_JOB_RETURN; rc = IPR_RC_JOB_RETURN;
} }
@ -7166,9 +7177,6 @@ ipr_get_chip_cfg(const struct pci_device_id *dev_id)
{ {
int i; int i;
if (dev_id->driver_data)
return (const struct ipr_chip_cfg_t *)dev_id->driver_data;
for (i = 0; i < ARRAY_SIZE(ipr_chip); i++) for (i = 0; i < ARRAY_SIZE(ipr_chip); i++)
if (ipr_chip[i].vendor == dev_id->vendor && if (ipr_chip[i].vendor == dev_id->vendor &&
ipr_chip[i].device == dev_id->device) ipr_chip[i].device == dev_id->device)
@ -7517,62 +7525,43 @@ static void ipr_shutdown(struct pci_dev *pdev)
static struct pci_device_id ipr_pci_table[] __devinitdata = { static struct pci_device_id ipr_pci_table[] __devinitdata = {
{ PCI_VENDOR_ID_MYLEX, PCI_DEVICE_ID_IBM_GEMSTONE, { PCI_VENDOR_ID_MYLEX, PCI_DEVICE_ID_IBM_GEMSTONE,
PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_5702, PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_5702, 0, 0, 0 },
0, 0, (kernel_ulong_t)&ipr_chip_cfg[0] },
{ PCI_VENDOR_ID_MYLEX, PCI_DEVICE_ID_IBM_GEMSTONE, { PCI_VENDOR_ID_MYLEX, PCI_DEVICE_ID_IBM_GEMSTONE,
PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_5703, PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_5703, 0, 0, 0 },
0, 0, (kernel_ulong_t)&ipr_chip_cfg[0] },
{ PCI_VENDOR_ID_MYLEX, PCI_DEVICE_ID_IBM_GEMSTONE, { PCI_VENDOR_ID_MYLEX, PCI_DEVICE_ID_IBM_GEMSTONE,
PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_573D, PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_573D, 0, 0, 0 },
0, 0, (kernel_ulong_t)&ipr_chip_cfg[0] },
{ PCI_VENDOR_ID_MYLEX, PCI_DEVICE_ID_IBM_GEMSTONE, { PCI_VENDOR_ID_MYLEX, PCI_DEVICE_ID_IBM_GEMSTONE,
PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_573E, PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_573E, 0, 0, 0 },
0, 0, (kernel_ulong_t)&ipr_chip_cfg[0] },
{ PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CITRINE, { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CITRINE,
PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_571B, PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_571B, 0, 0, 0 },
0, 0, (kernel_ulong_t)&ipr_chip_cfg[0] },
{ PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CITRINE, { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CITRINE,
PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_572E, PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_572E, 0, 0, 0 },
0, 0, (kernel_ulong_t)&ipr_chip_cfg[0] },
{ PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CITRINE, { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CITRINE,
PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_571A, PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_571A, 0, 0, 0 },
0, 0, (kernel_ulong_t)&ipr_chip_cfg[0] },
{ PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CITRINE, { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CITRINE,
PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_575B, PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_575B, 0, 0, 0 },
0, 0, (kernel_ulong_t)&ipr_chip_cfg[0] },
{ PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_OBSIDIAN, { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_OBSIDIAN,
PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_572A, PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_572A, 0, 0, 0 },
0, 0, (kernel_ulong_t)&ipr_chip_cfg[0] },
{ PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_OBSIDIAN, { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_OBSIDIAN,
PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_572B, PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_572B, 0, 0, 0 },
0, 0, (kernel_ulong_t)&ipr_chip_cfg[0] },
{ PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_OBSIDIAN, { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_OBSIDIAN,
PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_575C, PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_575C, 0, 0, 0 },
0, 0, (kernel_ulong_t)&ipr_chip_cfg[0] },
{ PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN, { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN,
PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_572A, PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_572A, 0, 0, 0 },
0, 0, (kernel_ulong_t)&ipr_chip_cfg[0] },
{ PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN, { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN,
PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_572B, PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_572B, 0, 0, 0 },
0, 0, (kernel_ulong_t)&ipr_chip_cfg[0] },
{ PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN, { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN,
PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_575C, PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_575C, 0, 0, 0 },
0, 0, (kernel_ulong_t)&ipr_chip_cfg[0] },
{ PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN_E, { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN_E,
PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57B7, PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57B7, 0, 0, 0 },
0, 0, (kernel_ulong_t)&ipr_chip_cfg[0] },
{ PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_SNIPE, { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_SNIPE,
PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_2780, PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_2780, 0, 0, 0 },
0, 0, (kernel_ulong_t)&ipr_chip_cfg[1] },
{ PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_SCAMP, { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_SCAMP,
PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_571E, PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_571E, 0, 0, 0 },
0, 0, (kernel_ulong_t)&ipr_chip_cfg[1] },
{ PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_SCAMP, { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_SCAMP,
PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_571F, PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_571F, 0, 0, 0 },
0, 0, (kernel_ulong_t)&ipr_chip_cfg[1] },
{ PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_SCAMP, { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_SCAMP,
PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_572F, PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_572F, 0, 0, 0 },
0, 0, (kernel_ulong_t)&ipr_chip_cfg[1] },
{ } { }
}; };
MODULE_DEVICE_TABLE(pci, ipr_pci_table); MODULE_DEVICE_TABLE(pci, ipr_pci_table);

View file

@ -37,8 +37,8 @@
/* /*
* Literals * Literals
*/ */
#define IPR_DRIVER_VERSION "2.3.0" #define IPR_DRIVER_VERSION "2.3.1"
#define IPR_DRIVER_DATE "(November 8, 2006)" #define IPR_DRIVER_DATE "(January 23, 2007)"
/* /*
* IPR_MAX_CMD_PER_LUN: This defines the maximum number of outstanding * IPR_MAX_CMD_PER_LUN: This defines the maximum number of outstanding

View file

@ -123,6 +123,7 @@ lasi700_probe(struct parisc_device *dev)
hostdata->force_le_on_be = 0; hostdata->force_le_on_be = 0;
hostdata->chip710 = 1; hostdata->chip710 = 1;
hostdata->dmode_extra = DMODE_FC2; hostdata->dmode_extra = DMODE_FC2;
hostdata->burst_length = 8;
} }
host = NCR_700_detect(&lasi700_template, hostdata, &dev->dev); host = NCR_700_detect(&lasi700_template, hostdata, &dev->dev);

View file

@ -548,7 +548,7 @@ int sas_discover_sata(struct domain_device *dev)
res = sas_notify_lldd_dev_found(dev); res = sas_notify_lldd_dev_found(dev);
if (res) if (res)
return res; goto out_err2;
switch (dev->dev_type) { switch (dev->dev_type) {
case SATA_DEV: case SATA_DEV:
@ -560,11 +560,23 @@ int sas_discover_sata(struct domain_device *dev)
default: default:
break; break;
} }
if (res)
goto out_err;
sas_notify_lldd_dev_gone(dev); sas_notify_lldd_dev_gone(dev);
if (!res) { res = sas_notify_lldd_dev_found(dev);
sas_notify_lldd_dev_found(dev); if (res)
} goto out_err2;
res = sas_rphy_add(dev->rphy);
if (res)
goto out_err;
return res;
out_err:
sas_notify_lldd_dev_gone(dev);
out_err2:
return res; return res;
} }
@ -580,21 +592,17 @@ int sas_discover_end_dev(struct domain_device *dev)
res = sas_notify_lldd_dev_found(dev); res = sas_notify_lldd_dev_found(dev);
if (res) if (res)
return res; goto out_err2;
res = sas_rphy_add(dev->rphy); res = sas_rphy_add(dev->rphy);
if (res) if (res)
goto out_err; goto out_err;
/* do this to get the end device port attributes which will have
* been scanned in sas_rphy_add */
sas_notify_lldd_dev_gone(dev);
sas_notify_lldd_dev_found(dev);
return 0; return 0;
out_err: out_err:
sas_notify_lldd_dev_gone(dev); sas_notify_lldd_dev_gone(dev);
out_err2:
return res; return res;
} }
@ -649,6 +657,7 @@ void sas_unregister_domain_devices(struct asd_sas_port *port)
*/ */
static void sas_discover_domain(struct work_struct *work) static void sas_discover_domain(struct work_struct *work)
{ {
struct domain_device *dev;
int error = 0; int error = 0;
struct sas_discovery_event *ev = struct sas_discovery_event *ev =
container_of(work, struct sas_discovery_event, work); container_of(work, struct sas_discovery_event, work);
@ -658,35 +667,42 @@ static void sas_discover_domain(struct work_struct *work)
&port->disc.pending); &port->disc.pending);
if (port->port_dev) if (port->port_dev)
return ; return;
else {
error = sas_get_port_device(port); error = sas_get_port_device(port);
if (error) if (error)
return; return;
} dev = port->port_dev;
SAS_DPRINTK("DOING DISCOVERY on port %d, pid:%d\n", port->id, SAS_DPRINTK("DOING DISCOVERY on port %d, pid:%d\n", port->id,
current->pid); current->pid);
switch (port->port_dev->dev_type) { switch (dev->dev_type) {
case SAS_END_DEV: case SAS_END_DEV:
error = sas_discover_end_dev(port->port_dev); error = sas_discover_end_dev(dev);
break; break;
case EDGE_DEV: case EDGE_DEV:
case FANOUT_DEV: case FANOUT_DEV:
error = sas_discover_root_expander(port->port_dev); error = sas_discover_root_expander(dev);
break; break;
case SATA_DEV: case SATA_DEV:
case SATA_PM: case SATA_PM:
error = sas_discover_sata(port->port_dev); error = sas_discover_sata(dev);
break; break;
default: default:
SAS_DPRINTK("unhandled device %d\n", port->port_dev->dev_type); SAS_DPRINTK("unhandled device %d\n", dev->dev_type);
break; break;
} }
if (error) { if (error) {
kfree(port->port_dev); /* not kobject_register-ed yet */ sas_rphy_free(dev->rphy);
dev->rphy = NULL;
spin_lock(&port->dev_list_lock);
list_del_init(&dev->dev_list_node);
spin_unlock(&port->dev_list_lock);
kfree(dev); /* not kobject_register-ed yet */
port->port_dev = NULL; port->port_dev = NULL;
} }
@ -726,7 +742,7 @@ int sas_discover_event(struct asd_sas_port *port, enum discover_event ev)
BUG_ON(ev >= DISC_NUM_EVENTS); BUG_ON(ev >= DISC_NUM_EVENTS);
sas_queue_event(ev, &disc->disc_event_lock, &disc->pending, sas_queue_event(ev, &disc->disc_event_lock, &disc->pending,
&disc->disc_work[ev].work, port->ha->core.shost); &disc->disc_work[ev].work, port->ha);
return 0; return 0;
} }

View file

@ -31,7 +31,7 @@ static void notify_ha_event(struct sas_ha_struct *sas_ha, enum ha_event event)
BUG_ON(event >= HA_NUM_EVENTS); BUG_ON(event >= HA_NUM_EVENTS);
sas_queue_event(event, &sas_ha->event_lock, &sas_ha->pending, sas_queue_event(event, &sas_ha->event_lock, &sas_ha->pending,
&sas_ha->ha_events[event].work, sas_ha->core.shost); &sas_ha->ha_events[event].work, sas_ha);
} }
static void notify_port_event(struct asd_sas_phy *phy, enum port_event event) static void notify_port_event(struct asd_sas_phy *phy, enum port_event event)
@ -41,7 +41,7 @@ static void notify_port_event(struct asd_sas_phy *phy, enum port_event event)
BUG_ON(event >= PORT_NUM_EVENTS); BUG_ON(event >= PORT_NUM_EVENTS);
sas_queue_event(event, &ha->event_lock, &phy->port_events_pending, sas_queue_event(event, &ha->event_lock, &phy->port_events_pending,
&phy->port_events[event].work, ha->core.shost); &phy->port_events[event].work, ha);
} }
static void notify_phy_event(struct asd_sas_phy *phy, enum phy_event event) static void notify_phy_event(struct asd_sas_phy *phy, enum phy_event event)
@ -51,7 +51,7 @@ static void notify_phy_event(struct asd_sas_phy *phy, enum phy_event event)
BUG_ON(event >= PHY_NUM_EVENTS); BUG_ON(event >= PHY_NUM_EVENTS);
sas_queue_event(event, &ha->event_lock, &phy->phy_events_pending, sas_queue_event(event, &ha->event_lock, &phy->phy_events_pending,
&phy->phy_events[event].work, ha->core.shost); &phy->phy_events[event].work, ha);
} }
int sas_init_events(struct sas_ha_struct *sas_ha) int sas_init_events(struct sas_ha_struct *sas_ha)

View file

@ -667,8 +667,9 @@ static struct domain_device *sas_ex_discover_end_dev(
return child; return child;
out_list_del: out_list_del:
sas_rphy_free(child->rphy);
child->rphy = NULL;
list_del(&child->dev_list_node); list_del(&child->dev_list_node);
sas_rphy_free(rphy);
out_free: out_free:
sas_port_delete(phy->port); sas_port_delete(phy->port);
out_err: out_err:
@ -677,6 +678,29 @@ static struct domain_device *sas_ex_discover_end_dev(
return NULL; return NULL;
} }
/* See if this phy is part of a wide port */
static int sas_ex_join_wide_port(struct domain_device *parent, int phy_id)
{
struct ex_phy *phy = &parent->ex_dev.ex_phy[phy_id];
int i;
for (i = 0; i < parent->ex_dev.num_phys; i++) {
struct ex_phy *ephy = &parent->ex_dev.ex_phy[i];
if (ephy == phy)
continue;
if (!memcmp(phy->attached_sas_addr, ephy->attached_sas_addr,
SAS_ADDR_SIZE) && ephy->port) {
sas_port_add_phy(ephy->port, phy->phy);
phy->phy_state = PHY_DEVICE_DISCOVERED;
return 0;
}
}
return -ENODEV;
}
static struct domain_device *sas_ex_discover_expander( static struct domain_device *sas_ex_discover_expander(
struct domain_device *parent, int phy_id) struct domain_device *parent, int phy_id)
{ {
@ -809,6 +833,13 @@ static int sas_ex_discover_dev(struct domain_device *dev, int phy_id)
return res; return res;
} }
res = sas_ex_join_wide_port(dev, phy_id);
if (!res) {
SAS_DPRINTK("Attaching ex phy%d to wide port %016llx\n",
phy_id, SAS_ADDR(ex_phy->attached_sas_addr));
return res;
}
switch (ex_phy->attached_dev_type) { switch (ex_phy->attached_dev_type) {
case SAS_END_DEV: case SAS_END_DEV:
child = sas_ex_discover_end_dev(dev, phy_id); child = sas_ex_discover_end_dev(dev, phy_id);
@ -1431,13 +1462,22 @@ int sas_discover_root_expander(struct domain_device *dev)
int res; int res;
struct sas_expander_device *ex = rphy_to_expander_device(dev->rphy); struct sas_expander_device *ex = rphy_to_expander_device(dev->rphy);
sas_rphy_add(dev->rphy); res = sas_rphy_add(dev->rphy);
if (res)
goto out_err;
ex->level = dev->port->disc.max_level; /* 0 */ ex->level = dev->port->disc.max_level; /* 0 */
res = sas_discover_expander(dev); res = sas_discover_expander(dev);
if (!res) if (res)
sas_ex_bfs_disc(dev->port); goto out_err2;
sas_ex_bfs_disc(dev->port);
return res;
out_err2:
sas_rphy_remove(dev->rphy);
out_err:
return res; return res;
} }

View file

@ -87,6 +87,9 @@ int sas_register_ha(struct sas_ha_struct *sas_ha)
else if (sas_ha->lldd_queue_size == -1) else if (sas_ha->lldd_queue_size == -1)
sas_ha->lldd_queue_size = 128; /* Sanity */ sas_ha->lldd_queue_size = 128; /* Sanity */
sas_ha->state = SAS_HA_REGISTERED;
spin_lock_init(&sas_ha->state_lock);
error = sas_register_phys(sas_ha); error = sas_register_phys(sas_ha);
if (error) { if (error) {
printk(KERN_NOTICE "couldn't register sas phys:%d\n", error); printk(KERN_NOTICE "couldn't register sas phys:%d\n", error);
@ -127,12 +130,22 @@ int sas_register_ha(struct sas_ha_struct *sas_ha)
int sas_unregister_ha(struct sas_ha_struct *sas_ha) int sas_unregister_ha(struct sas_ha_struct *sas_ha)
{ {
if (sas_ha->lldd_max_execute_num > 1) { unsigned long flags;
sas_shutdown_queue(sas_ha);
} /* Set the state to unregistered to avoid further
* events to be queued */
spin_lock_irqsave(&sas_ha->state_lock, flags);
sas_ha->state = SAS_HA_UNREGISTERED;
spin_unlock_irqrestore(&sas_ha->state_lock, flags);
scsi_flush_work(sas_ha->core.shost);
sas_unregister_ports(sas_ha); sas_unregister_ports(sas_ha);
if (sas_ha->lldd_max_execute_num > 1) {
sas_shutdown_queue(sas_ha);
sas_ha->lldd_max_execute_num = 1;
}
return 0; return 0;
} }
@ -146,6 +159,36 @@ static int sas_get_linkerrors(struct sas_phy *phy)
return sas_smp_get_phy_events(phy); return sas_smp_get_phy_events(phy);
} }
int sas_phy_enable(struct sas_phy *phy, int enable)
{
int ret;
enum phy_func command;
if (enable)
command = PHY_FUNC_LINK_RESET;
else
command = PHY_FUNC_DISABLE;
if (scsi_is_sas_phy_local(phy)) {
struct Scsi_Host *shost = dev_to_shost(phy->dev.parent);
struct sas_ha_struct *sas_ha = SHOST_TO_SAS_HA(shost);
struct asd_sas_phy *asd_phy = sas_ha->sas_phy[phy->number];
struct sas_internal *i =
to_sas_internal(sas_ha->core.shost->transportt);
if (!enable) {
sas_phy_disconnected(asd_phy);
sas_ha->notify_phy_event(asd_phy, PHYE_LOSS_OF_SIGNAL);
}
ret = i->dft->lldd_control_phy(asd_phy, command, NULL);
} else {
struct sas_rphy *rphy = dev_to_rphy(phy->dev.parent);
struct domain_device *ddev = sas_find_dev_by_rphy(rphy);
ret = sas_smp_phy_control(ddev, phy->number, command, NULL);
}
return ret;
}
int sas_phy_reset(struct sas_phy *phy, int hard_reset) int sas_phy_reset(struct sas_phy *phy, int hard_reset)
{ {
int ret; int ret;
@ -172,8 +215,8 @@ int sas_phy_reset(struct sas_phy *phy, int hard_reset)
return ret; return ret;
} }
static int sas_set_phy_speed(struct sas_phy *phy, int sas_set_phy_speed(struct sas_phy *phy,
struct sas_phy_linkrates *rates) struct sas_phy_linkrates *rates)
{ {
int ret; int ret;
@ -212,6 +255,7 @@ static int sas_set_phy_speed(struct sas_phy *phy,
} }
static struct sas_function_template sft = { static struct sas_function_template sft = {
.phy_enable = sas_phy_enable,
.phy_reset = sas_phy_reset, .phy_reset = sas_phy_reset,
.set_phy_speed = sas_set_phy_speed, .set_phy_speed = sas_set_phy_speed,
.get_linkerrors = sas_get_linkerrors, .get_linkerrors = sas_get_linkerrors,

View file

@ -80,7 +80,7 @@ void sas_hae_reset(struct work_struct *work);
static inline void sas_queue_event(int event, spinlock_t *lock, static inline void sas_queue_event(int event, spinlock_t *lock,
unsigned long *pending, unsigned long *pending,
struct work_struct *work, struct work_struct *work,
struct Scsi_Host *shost) struct sas_ha_struct *sas_ha)
{ {
unsigned long flags; unsigned long flags;
@ -91,7 +91,12 @@ static inline void sas_queue_event(int event, spinlock_t *lock,
} }
__set_bit(event, pending); __set_bit(event, pending);
spin_unlock_irqrestore(lock, flags); spin_unlock_irqrestore(lock, flags);
scsi_queue_work(shost, work);
spin_lock_irqsave(&sas_ha->state_lock, flags);
if (sas_ha->state != SAS_HA_UNREGISTERED) {
scsi_queue_work(sas_ha->core.shost, work);
}
spin_unlock_irqrestore(&sas_ha->state_lock, flags);
} }
static inline void sas_begin_event(int event, spinlock_t *lock, static inline void sas_begin_event(int event, spinlock_t *lock,

View file

@ -42,10 +42,11 @@ static void sas_form_port(struct asd_sas_phy *phy)
struct asd_sas_port *port = phy->port; struct asd_sas_port *port = phy->port;
struct sas_internal *si = struct sas_internal *si =
to_sas_internal(sas_ha->core.shost->transportt); to_sas_internal(sas_ha->core.shost->transportt);
unsigned long flags;
if (port) { if (port) {
if (memcmp(port->attached_sas_addr, phy->attached_sas_addr, if (memcmp(port->attached_sas_addr, phy->attached_sas_addr,
SAS_ADDR_SIZE) == 0) SAS_ADDR_SIZE) != 0)
sas_deform_port(phy); sas_deform_port(phy);
else { else {
SAS_DPRINTK("%s: phy%d belongs to port%d already(%d)!\n", SAS_DPRINTK("%s: phy%d belongs to port%d already(%d)!\n",
@ -56,7 +57,7 @@ static void sas_form_port(struct asd_sas_phy *phy)
} }
/* find a port */ /* find a port */
spin_lock(&sas_ha->phy_port_lock); spin_lock_irqsave(&sas_ha->phy_port_lock, flags);
for (i = 0; i < sas_ha->num_phys; i++) { for (i = 0; i < sas_ha->num_phys; i++) {
port = sas_ha->sas_port[i]; port = sas_ha->sas_port[i];
spin_lock(&port->phy_list_lock); spin_lock(&port->phy_list_lock);
@ -78,7 +79,7 @@ static void sas_form_port(struct asd_sas_phy *phy)
if (i >= sas_ha->num_phys) { if (i >= sas_ha->num_phys) {
printk(KERN_NOTICE "%s: couldn't find a free port, bug?\n", printk(KERN_NOTICE "%s: couldn't find a free port, bug?\n",
__FUNCTION__); __FUNCTION__);
spin_unlock(&sas_ha->phy_port_lock); spin_unlock_irqrestore(&sas_ha->phy_port_lock, flags);
return; return;
} }
@ -105,7 +106,7 @@ static void sas_form_port(struct asd_sas_phy *phy)
} else } else
port->linkrate = max(port->linkrate, phy->linkrate); port->linkrate = max(port->linkrate, phy->linkrate);
spin_unlock(&port->phy_list_lock); spin_unlock(&port->phy_list_lock);
spin_unlock(&sas_ha->phy_port_lock); spin_unlock_irqrestore(&sas_ha->phy_port_lock, flags);
if (!port->port) { if (!port->port) {
port->port = sas_port_alloc(phy->phy->dev.parent, port->id); port->port = sas_port_alloc(phy->phy->dev.parent, port->id);
@ -137,6 +138,7 @@ void sas_deform_port(struct asd_sas_phy *phy)
struct asd_sas_port *port = phy->port; struct asd_sas_port *port = phy->port;
struct sas_internal *si = struct sas_internal *si =
to_sas_internal(sas_ha->core.shost->transportt); to_sas_internal(sas_ha->core.shost->transportt);
unsigned long flags;
if (!port) if (!port)
return; /* done by a phy event */ return; /* done by a phy event */
@ -155,7 +157,7 @@ void sas_deform_port(struct asd_sas_phy *phy)
if (si->dft->lldd_port_deformed) if (si->dft->lldd_port_deformed)
si->dft->lldd_port_deformed(phy); si->dft->lldd_port_deformed(phy);
spin_lock(&sas_ha->phy_port_lock); spin_lock_irqsave(&sas_ha->phy_port_lock, flags);
spin_lock(&port->phy_list_lock); spin_lock(&port->phy_list_lock);
list_del_init(&phy->port_phy_el); list_del_init(&phy->port_phy_el);
@ -174,7 +176,7 @@ void sas_deform_port(struct asd_sas_phy *phy)
port->phy_mask = 0; port->phy_mask = 0;
} }
spin_unlock(&port->phy_list_lock); spin_unlock(&port->phy_list_lock);
spin_unlock(&sas_ha->phy_port_lock); spin_unlock_irqrestore(&sas_ha->phy_port_lock, flags);
return; return;
} }

View file

@ -34,6 +34,7 @@
#include <scsi/scsi_transport_sas.h> #include <scsi/scsi_transport_sas.h>
#include "../scsi_sas_internal.h" #include "../scsi_sas_internal.h"
#include "../scsi_transport_api.h" #include "../scsi_transport_api.h"
#include "../scsi_priv.h"
#include <linux/err.h> #include <linux/err.h>
#include <linux/blkdev.h> #include <linux/blkdev.h>
@ -130,7 +131,7 @@ static enum task_attribute sas_scsi_get_task_attr(struct scsi_cmnd *cmd)
if (cmd->request && blk_rq_tagged(cmd->request)) { if (cmd->request && blk_rq_tagged(cmd->request)) {
if (cmd->device->ordered_tags && if (cmd->device->ordered_tags &&
(cmd->request->cmd_flags & REQ_HARDBARRIER)) (cmd->request->cmd_flags & REQ_HARDBARRIER))
ta = TASK_ATTR_HOQ; ta = TASK_ATTR_ORDERED;
} }
return ta; return ta;
} }
@ -281,6 +282,7 @@ enum task_disposition {
TASK_IS_ABORTED, TASK_IS_ABORTED,
TASK_IS_AT_LU, TASK_IS_AT_LU,
TASK_IS_NOT_AT_LU, TASK_IS_NOT_AT_LU,
TASK_ABORT_FAILED,
}; };
static enum task_disposition sas_scsi_find_task(struct sas_task *task) static enum task_disposition sas_scsi_find_task(struct sas_task *task)
@ -310,15 +312,6 @@ static enum task_disposition sas_scsi_find_task(struct sas_task *task)
spin_unlock_irqrestore(&core->task_queue_lock, flags); spin_unlock_irqrestore(&core->task_queue_lock, flags);
} }
spin_lock_irqsave(&task->task_state_lock, flags);
if (task->task_state_flags & SAS_TASK_INITIATOR_ABORTED) {
spin_unlock_irqrestore(&task->task_state_lock, flags);
SAS_DPRINTK("%s: task 0x%p already aborted\n",
__FUNCTION__, task);
return TASK_IS_ABORTED;
}
spin_unlock_irqrestore(&task->task_state_lock, flags);
for (i = 0; i < 5; i++) { for (i = 0; i < 5; i++) {
SAS_DPRINTK("%s: aborting task 0x%p\n", __FUNCTION__, task); SAS_DPRINTK("%s: aborting task 0x%p\n", __FUNCTION__, task);
res = si->dft->lldd_abort_task(task); res = si->dft->lldd_abort_task(task);
@ -340,15 +333,21 @@ static enum task_disposition sas_scsi_find_task(struct sas_task *task)
SAS_DPRINTK("%s: querying task 0x%p\n", SAS_DPRINTK("%s: querying task 0x%p\n",
__FUNCTION__, task); __FUNCTION__, task);
res = si->dft->lldd_query_task(task); res = si->dft->lldd_query_task(task);
if (res == TMF_RESP_FUNC_SUCC) { switch (res) {
case TMF_RESP_FUNC_SUCC:
SAS_DPRINTK("%s: task 0x%p at LU\n", SAS_DPRINTK("%s: task 0x%p at LU\n",
__FUNCTION__, task); __FUNCTION__, task);
return TASK_IS_AT_LU; return TASK_IS_AT_LU;
} else if (res == TMF_RESP_FUNC_COMPLETE) { case TMF_RESP_FUNC_COMPLETE:
SAS_DPRINTK("%s: task 0x%p not at LU\n", SAS_DPRINTK("%s: task 0x%p not at LU\n",
__FUNCTION__, task); __FUNCTION__, task);
return TASK_IS_NOT_AT_LU; return TASK_IS_NOT_AT_LU;
} case TMF_RESP_FUNC_FAILED:
SAS_DPRINTK("%s: task 0x%p failed to abort\n",
__FUNCTION__, task);
return TASK_ABORT_FAILED;
}
} }
} }
return res; return res;
@ -398,35 +397,113 @@ static int sas_recover_I_T(struct domain_device *dev)
return res; return res;
} }
void sas_scsi_recover_host(struct Scsi_Host *shost) /* Find the sas_phy that's attached to this device */
struct sas_phy *find_local_sas_phy(struct domain_device *dev)
{
struct domain_device *pdev = dev->parent;
struct ex_phy *exphy = NULL;
int i;
/* Directly attached device */
if (!pdev)
return dev->port->phy;
/* Otherwise look in the expander */
for (i = 0; i < pdev->ex_dev.num_phys; i++)
if (!memcmp(dev->sas_addr,
pdev->ex_dev.ex_phy[i].attached_sas_addr,
SAS_ADDR_SIZE)) {
exphy = &pdev->ex_dev.ex_phy[i];
break;
}
BUG_ON(!exphy);
return exphy->phy;
}
/* Attempt to send a LUN reset message to a device */
int sas_eh_device_reset_handler(struct scsi_cmnd *cmd)
{
struct domain_device *dev = cmd_to_domain_dev(cmd);
struct sas_internal *i =
to_sas_internal(dev->port->ha->core.shost->transportt);
struct scsi_lun lun;
int res;
int_to_scsilun(cmd->device->lun, &lun);
if (!i->dft->lldd_lu_reset)
return FAILED;
res = i->dft->lldd_lu_reset(dev, lun.scsi_lun);
if (res == TMF_RESP_FUNC_SUCC || res == TMF_RESP_FUNC_COMPLETE)
return SUCCESS;
return FAILED;
}
/* Attempt to send a phy (bus) reset */
int sas_eh_bus_reset_handler(struct scsi_cmnd *cmd)
{
struct domain_device *dev = cmd_to_domain_dev(cmd);
struct sas_phy *phy = find_local_sas_phy(dev);
int res;
res = sas_phy_reset(phy, 1);
if (res)
SAS_DPRINTK("Bus reset of %s failed 0x%x\n",
phy->dev.kobj.k_name,
res);
if (res == TMF_RESP_FUNC_SUCC || res == TMF_RESP_FUNC_COMPLETE)
return SUCCESS;
return FAILED;
}
/* Try to reset a device */
static int try_to_reset_cmd_device(struct Scsi_Host *shost,
struct scsi_cmnd *cmd)
{
int res;
if (!shost->hostt->eh_device_reset_handler)
goto try_bus_reset;
res = shost->hostt->eh_device_reset_handler(cmd);
if (res == SUCCESS)
return res;
try_bus_reset:
if (shost->hostt->eh_bus_reset_handler)
return shost->hostt->eh_bus_reset_handler(cmd);
return FAILED;
}
static int sas_eh_handle_sas_errors(struct Scsi_Host *shost,
struct list_head *work_q,
struct list_head *done_q)
{ {
struct sas_ha_struct *ha = SHOST_TO_SAS_HA(shost);
unsigned long flags;
LIST_HEAD(error_q);
struct scsi_cmnd *cmd, *n; struct scsi_cmnd *cmd, *n;
enum task_disposition res = TASK_IS_DONE; enum task_disposition res = TASK_IS_DONE;
int tmf_resp; int tmf_resp, need_reset;
struct sas_internal *i = to_sas_internal(shost->transportt); struct sas_internal *i = to_sas_internal(shost->transportt);
unsigned long flags;
struct sas_ha_struct *ha = SHOST_TO_SAS_HA(shost);
spin_lock_irqsave(shost->host_lock, flags);
list_splice_init(&shost->eh_cmd_q, &error_q);
spin_unlock_irqrestore(shost->host_lock, flags);
SAS_DPRINTK("Enter %s\n", __FUNCTION__);
/* All tasks on this list were marked SAS_TASK_STATE_ABORTED
* by sas_scsi_timed_out() callback.
*/
Again: Again:
SAS_DPRINTK("going over list...\n"); list_for_each_entry_safe(cmd, n, work_q, eh_entry) {
list_for_each_entry_safe(cmd, n, &error_q, eh_entry) {
struct sas_task *task = TO_SAS_TASK(cmd); struct sas_task *task = TO_SAS_TASK(cmd);
if (!task)
continue;
list_del_init(&cmd->eh_entry); list_del_init(&cmd->eh_entry);
if (!task) { spin_lock_irqsave(&task->task_state_lock, flags);
SAS_DPRINTK("%s: taskless cmd?!\n", __FUNCTION__); need_reset = task->task_state_flags & SAS_TASK_NEED_DEV_RESET;
continue; spin_unlock_irqrestore(&task->task_state_lock, flags);
}
SAS_DPRINTK("trying to find task 0x%p\n", task); SAS_DPRINTK("trying to find task 0x%p\n", task);
res = sas_scsi_find_task(task); res = sas_scsi_find_task(task);
@ -437,11 +514,15 @@ void sas_scsi_recover_host(struct Scsi_Host *shost)
SAS_DPRINTK("%s: task 0x%p is done\n", __FUNCTION__, SAS_DPRINTK("%s: task 0x%p is done\n", __FUNCTION__,
task); task);
task->task_done(task); task->task_done(task);
if (need_reset)
try_to_reset_cmd_device(shost, cmd);
continue; continue;
case TASK_IS_ABORTED: case TASK_IS_ABORTED:
SAS_DPRINTK("%s: task 0x%p is aborted\n", SAS_DPRINTK("%s: task 0x%p is aborted\n",
__FUNCTION__, task); __FUNCTION__, task);
task->task_done(task); task->task_done(task);
if (need_reset)
try_to_reset_cmd_device(shost, cmd);
continue; continue;
case TASK_IS_AT_LU: case TASK_IS_AT_LU:
SAS_DPRINTK("task 0x%p is at LU: lu recover\n", task); SAS_DPRINTK("task 0x%p is at LU: lu recover\n", task);
@ -452,11 +533,14 @@ void sas_scsi_recover_host(struct Scsi_Host *shost)
SAS_ADDR(task->dev), SAS_ADDR(task->dev),
cmd->device->lun); cmd->device->lun);
task->task_done(task); task->task_done(task);
sas_scsi_clear_queue_lu(&error_q, cmd); if (need_reset)
try_to_reset_cmd_device(shost, cmd);
sas_scsi_clear_queue_lu(work_q, cmd);
goto Again; goto Again;
} }
/* fallthrough */ /* fallthrough */
case TASK_IS_NOT_AT_LU: case TASK_IS_NOT_AT_LU:
case TASK_ABORT_FAILED:
SAS_DPRINTK("task 0x%p is not at LU: I_T recover\n", SAS_DPRINTK("task 0x%p is not at LU: I_T recover\n",
task); task);
tmf_resp = sas_recover_I_T(task->dev); tmf_resp = sas_recover_I_T(task->dev);
@ -464,7 +548,9 @@ void sas_scsi_recover_host(struct Scsi_Host *shost)
SAS_DPRINTK("I_T %016llx recovered\n", SAS_DPRINTK("I_T %016llx recovered\n",
SAS_ADDR(task->dev->sas_addr)); SAS_ADDR(task->dev->sas_addr));
task->task_done(task); task->task_done(task);
sas_scsi_clear_queue_I_T(&error_q, task->dev); if (need_reset)
try_to_reset_cmd_device(shost, cmd);
sas_scsi_clear_queue_I_T(work_q, task->dev);
goto Again; goto Again;
} }
/* Hammer time :-) */ /* Hammer time :-) */
@ -477,7 +563,9 @@ void sas_scsi_recover_host(struct Scsi_Host *shost)
SAS_DPRINTK("clear nexus port:%d " SAS_DPRINTK("clear nexus port:%d "
"succeeded\n", port->id); "succeeded\n", port->id);
task->task_done(task); task->task_done(task);
sas_scsi_clear_queue_port(&error_q, if (need_reset)
try_to_reset_cmd_device(shost, cmd);
sas_scsi_clear_queue_port(work_q,
port); port);
goto Again; goto Again;
} }
@ -489,6 +577,8 @@ void sas_scsi_recover_host(struct Scsi_Host *shost)
SAS_DPRINTK("clear nexus ha " SAS_DPRINTK("clear nexus ha "
"succeeded\n"); "succeeded\n");
task->task_done(task); task->task_done(task);
if (need_reset)
try_to_reset_cmd_device(shost, cmd);
goto out; goto out;
} }
} }
@ -502,20 +592,54 @@ void sas_scsi_recover_host(struct Scsi_Host *shost)
cmd->device->lun); cmd->device->lun);
task->task_done(task); task->task_done(task);
if (need_reset)
try_to_reset_cmd_device(shost, cmd);
goto clear_q; goto clear_q;
} }
} }
out: out:
scsi_eh_flush_done_q(&ha->eh_done_q); return list_empty(work_q);
SAS_DPRINTK("--- Exit %s\n", __FUNCTION__);
return;
clear_q: clear_q:
SAS_DPRINTK("--- Exit %s -- clear_q\n", __FUNCTION__); SAS_DPRINTK("--- Exit %s -- clear_q\n", __FUNCTION__);
list_for_each_entry_safe(cmd, n, &error_q, eh_entry) { list_for_each_entry_safe(cmd, n, work_q, eh_entry) {
struct sas_task *task = TO_SAS_TASK(cmd); struct sas_task *task = TO_SAS_TASK(cmd);
list_del_init(&cmd->eh_entry); list_del_init(&cmd->eh_entry);
task->task_done(task); task->task_done(task);
} }
return list_empty(work_q);
}
void sas_scsi_recover_host(struct Scsi_Host *shost)
{
struct sas_ha_struct *ha = SHOST_TO_SAS_HA(shost);
unsigned long flags;
LIST_HEAD(eh_work_q);
spin_lock_irqsave(shost->host_lock, flags);
list_splice_init(&shost->eh_cmd_q, &eh_work_q);
spin_unlock_irqrestore(shost->host_lock, flags);
SAS_DPRINTK("Enter %s\n", __FUNCTION__);
/*
* Deal with commands that still have SAS tasks (i.e. they didn't
* complete via the normal sas_task completion mechanism)
*/
if (sas_eh_handle_sas_errors(shost, &eh_work_q, &ha->eh_done_q))
goto out;
/*
* Now deal with SCSI commands that completed ok but have a an error
* code (and hopefully sense data) attached. This is roughly what
* scsi_unjam_host does, but we skip scsi_eh_abort_cmds because any
* command we see here has no sas_task and is thus unknown to the HA.
*/
if (!scsi_eh_get_sense(&eh_work_q, &ha->eh_done_q))
scsi_eh_ready_devs(shost, &eh_work_q, &ha->eh_done_q);
out:
scsi_eh_flush_done_q(&ha->eh_done_q);
SAS_DPRINTK("--- Exit %s\n", __FUNCTION__);
return;
} }
enum scsi_eh_timer_return sas_scsi_timed_out(struct scsi_cmnd *cmd) enum scsi_eh_timer_return sas_scsi_timed_out(struct scsi_cmnd *cmd)
@ -524,24 +648,30 @@ enum scsi_eh_timer_return sas_scsi_timed_out(struct scsi_cmnd *cmd)
unsigned long flags; unsigned long flags;
if (!task) { if (!task) {
SAS_DPRINTK("command 0x%p, task 0x%p, gone: EH_HANDLED\n", cmd->timeout_per_command /= 2;
cmd, task); SAS_DPRINTK("command 0x%p, task 0x%p, gone: %s\n",
return EH_HANDLED; cmd, task, (cmd->timeout_per_command ?
"EH_RESET_TIMER" : "EH_NOT_HANDLED"));
if (!cmd->timeout_per_command)
return EH_NOT_HANDLED;
return EH_RESET_TIMER;
} }
spin_lock_irqsave(&task->task_state_lock, flags); spin_lock_irqsave(&task->task_state_lock, flags);
if (task->task_state_flags & SAS_TASK_INITIATOR_ABORTED) { BUG_ON(task->task_state_flags & SAS_TASK_STATE_ABORTED);
spin_unlock_irqrestore(&task->task_state_lock, flags);
SAS_DPRINTK("command 0x%p, task 0x%p, aborted by initiator: "
"EH_NOT_HANDLED\n", cmd, task);
return EH_NOT_HANDLED;
}
if (task->task_state_flags & SAS_TASK_STATE_DONE) { if (task->task_state_flags & SAS_TASK_STATE_DONE) {
spin_unlock_irqrestore(&task->task_state_lock, flags); spin_unlock_irqrestore(&task->task_state_lock, flags);
SAS_DPRINTK("command 0x%p, task 0x%p, timed out: EH_HANDLED\n", SAS_DPRINTK("command 0x%p, task 0x%p, timed out: EH_HANDLED\n",
cmd, task); cmd, task);
return EH_HANDLED; return EH_HANDLED;
} }
if (!(task->task_state_flags & SAS_TASK_AT_INITIATOR)) {
spin_unlock_irqrestore(&task->task_state_lock, flags);
SAS_DPRINTK("command 0x%p, task 0x%p, not at initiator: "
"EH_RESET_TIMER\n",
cmd, task);
return EH_RESET_TIMER;
}
task->task_state_flags |= SAS_TASK_STATE_ABORTED; task->task_state_flags |= SAS_TASK_STATE_ABORTED;
spin_unlock_irqrestore(&task->task_state_lock, flags); spin_unlock_irqrestore(&task->task_state_lock, flags);
@ -557,8 +687,9 @@ struct domain_device *sas_find_dev_by_rphy(struct sas_rphy *rphy)
struct sas_ha_struct *ha = SHOST_TO_SAS_HA(shost); struct sas_ha_struct *ha = SHOST_TO_SAS_HA(shost);
struct domain_device *found_dev = NULL; struct domain_device *found_dev = NULL;
int i; int i;
unsigned long flags;
spin_lock(&ha->phy_port_lock); spin_lock_irqsave(&ha->phy_port_lock, flags);
for (i = 0; i < ha->num_phys; i++) { for (i = 0; i < ha->num_phys; i++) {
struct asd_sas_port *port = ha->sas_port[i]; struct asd_sas_port *port = ha->sas_port[i];
struct domain_device *dev; struct domain_device *dev;
@ -574,7 +705,7 @@ struct domain_device *sas_find_dev_by_rphy(struct sas_rphy *rphy)
spin_unlock(&port->dev_list_lock); spin_unlock(&port->dev_list_lock);
} }
found: found:
spin_unlock(&ha->phy_port_lock); spin_unlock_irqrestore(&ha->phy_port_lock, flags);
return found_dev; return found_dev;
} }
@ -623,6 +754,8 @@ int sas_slave_configure(struct scsi_device *scsi_dev)
scsi_deactivate_tcq(scsi_dev, 1); scsi_deactivate_tcq(scsi_dev, 1);
} }
scsi_dev->allow_restart = 1;
return 0; return 0;
} }
@ -799,46 +932,42 @@ void sas_shutdown_queue(struct sas_ha_struct *sas_ha)
spin_unlock_irqrestore(&core->task_queue_lock, flags); spin_unlock_irqrestore(&core->task_queue_lock, flags);
} }
static int do_sas_task_abort(struct sas_task *task) /*
* Call the LLDD task abort routine directly. This function is intended for
* use by upper layers that need to tell the LLDD to abort a task.
*/
int __sas_task_abort(struct sas_task *task)
{ {
struct scsi_cmnd *sc = task->uldd_task;
struct sas_internal *si = struct sas_internal *si =
to_sas_internal(task->dev->port->ha->core.shost->transportt); to_sas_internal(task->dev->port->ha->core.shost->transportt);
unsigned long flags; unsigned long flags;
int res; int res;
spin_lock_irqsave(&task->task_state_lock, flags); spin_lock_irqsave(&task->task_state_lock, flags);
if (task->task_state_flags & SAS_TASK_STATE_ABORTED) { if (task->task_state_flags & SAS_TASK_STATE_ABORTED ||
task->task_state_flags & SAS_TASK_STATE_DONE) {
spin_unlock_irqrestore(&task->task_state_lock, flags); spin_unlock_irqrestore(&task->task_state_lock, flags);
SAS_DPRINTK("%s: Task %p already aborted.\n", __FUNCTION__, SAS_DPRINTK("%s: Task %p already finished.\n", __FUNCTION__,
task); task);
return 0; return 0;
} }
task->task_state_flags |= SAS_TASK_STATE_ABORTED;
task->task_state_flags |= SAS_TASK_INITIATOR_ABORTED;
if (!(task->task_state_flags & SAS_TASK_STATE_DONE))
task->task_state_flags |= SAS_TASK_STATE_ABORTED;
spin_unlock_irqrestore(&task->task_state_lock, flags); spin_unlock_irqrestore(&task->task_state_lock, flags);
if (!si->dft->lldd_abort_task) if (!si->dft->lldd_abort_task)
return -ENODEV; return -ENODEV;
res = si->dft->lldd_abort_task(task); res = si->dft->lldd_abort_task(task);
spin_lock_irqsave(&task->task_state_lock, flags);
if ((task->task_state_flags & SAS_TASK_STATE_DONE) || if ((task->task_state_flags & SAS_TASK_STATE_DONE) ||
(res == TMF_RESP_FUNC_COMPLETE)) (res == TMF_RESP_FUNC_COMPLETE))
{ {
/* SMP commands don't have scsi_cmds(?) */ spin_unlock_irqrestore(&task->task_state_lock, flags);
if (!sc) { task->task_done(task);
task->task_done(task);
return 0;
}
scsi_req_abort_cmd(sc);
scsi_schedule_eh(sc->device->host);
return 0; return 0;
} }
spin_lock_irqsave(&task->task_state_lock, flags);
task->task_state_flags &= ~SAS_TASK_INITIATOR_ABORTED;
if (!(task->task_state_flags & SAS_TASK_STATE_DONE)) if (!(task->task_state_flags & SAS_TASK_STATE_DONE))
task->task_state_flags &= ~SAS_TASK_STATE_ABORTED; task->task_state_flags &= ~SAS_TASK_STATE_ABORTED;
spin_unlock_irqrestore(&task->task_state_lock, flags); spin_unlock_irqrestore(&task->task_state_lock, flags);
@ -846,17 +975,24 @@ static int do_sas_task_abort(struct sas_task *task)
return -EAGAIN; return -EAGAIN;
} }
void sas_task_abort(struct work_struct *work) /*
* Tell an upper layer that it needs to initiate an abort for a given task.
* This should only ever be called by an LLDD.
*/
void sas_task_abort(struct sas_task *task)
{ {
struct sas_task *task = struct scsi_cmnd *sc = task->uldd_task;
container_of(work, struct sas_task, abort_work);
int i;
for (i = 0; i < 5; i++) /* Escape for libsas internal commands */
if (!do_sas_task_abort(task)) if (!sc) {
if (!del_timer(&task->timer))
return; return;
task->timer.function(task->timer.data);
return;
}
SAS_DPRINTK("%s: Could not kill task!\n", __FUNCTION__); scsi_req_abort_cmd(sc);
scsi_schedule_eh(sc->device->host);
} }
EXPORT_SYMBOL_GPL(sas_queuecommand); EXPORT_SYMBOL_GPL(sas_queuecommand);
@ -866,5 +1002,9 @@ EXPORT_SYMBOL_GPL(sas_slave_destroy);
EXPORT_SYMBOL_GPL(sas_change_queue_depth); EXPORT_SYMBOL_GPL(sas_change_queue_depth);
EXPORT_SYMBOL_GPL(sas_change_queue_type); EXPORT_SYMBOL_GPL(sas_change_queue_type);
EXPORT_SYMBOL_GPL(sas_bios_param); EXPORT_SYMBOL_GPL(sas_bios_param);
EXPORT_SYMBOL_GPL(__sas_task_abort);
EXPORT_SYMBOL_GPL(sas_task_abort); EXPORT_SYMBOL_GPL(sas_task_abort);
EXPORT_SYMBOL_GPL(sas_phy_reset); EXPORT_SYMBOL_GPL(sas_phy_reset);
EXPORT_SYMBOL_GPL(sas_phy_enable);
EXPORT_SYMBOL_GPL(sas_eh_device_reset_handler);
EXPORT_SYMBOL_GPL(sas_eh_bus_reset_handler);

View file

@ -748,7 +748,7 @@ typedef struct {
/** /**
* private_bios_data - bios private data for boot devices * struct private_bios_data - bios private data for boot devices
* @geometry : bits 0-3 - BIOS geometry, 0x0001 - 1GB, 0x0010 - 2GB, * @geometry : bits 0-3 - BIOS geometry, 0x0001 - 1GB, 0x0010 - 2GB,
* 0x1000 - 8GB, Others values are invalid * 0x1000 - 8GB, Others values are invalid
* @unused : bits 4-7 are unused * @unused : bits 4-7 are unused

View file

@ -46,17 +46,17 @@
/** /**
* scb_t - scsi command control block * scb_t - scsi command control block
* @param ccb : command control block for individual driver * @ccb : command control block for individual driver
* @param list : list of control blocks * @list : list of control blocks
* @param gp : general purpose field for LLDs * @gp : general purpose field for LLDs
* @param sno : all SCBs have a serial number * @sno : all SCBs have a serial number
* @param scp : associated scsi command * @scp : associated scsi command
* @param state : current state of scb * @state : current state of scb
* @param dma_dir : direction of data transfer * @dma_dir : direction of data transfer
* @param dma_type : transfer with sg list, buffer, or no data transfer * @dma_type : transfer with sg list, buffer, or no data transfer
* @param dev_channel : actual channel on the device * @dev_channel : actual channel on the device
* @param dev_target : actual target on the device * @dev_target : actual target on the device
* @param status : completion status * @status : completion status
* *
* This is our central data structure to issue commands the each driver. * This is our central data structure to issue commands the each driver.
* Driver specific data structures are maintained in the ccb field. * Driver specific data structures are maintained in the ccb field.
@ -99,42 +99,42 @@ typedef struct {
/** /**
* struct adapter_t - driver's initialization structure * struct adapter_t - driver's initialization structure
* @param dpc_h : tasklet handle * @aram dpc_h : tasklet handle
* @param pdev : pci configuration pointer for kernel * @pdev : pci configuration pointer for kernel
* @param host : pointer to host structure of mid-layer * @host : pointer to host structure of mid-layer
* @param lock : synchronization lock for mid-layer and driver * @lock : synchronization lock for mid-layer and driver
* @param quiescent : driver is quiescent for now. * @quiescent : driver is quiescent for now.
* @param outstanding_cmds : number of commands pending in the driver * @outstanding_cmds : number of commands pending in the driver
* @param kscb_list : pointer to the bulk of SCBs pointers for IO * @kscb_list : pointer to the bulk of SCBs pointers for IO
* @param kscb_pool : pool of free scbs for IO * @kscb_pool : pool of free scbs for IO
* @param kscb_pool_lock : lock for pool of free scbs * @kscb_pool_lock : lock for pool of free scbs
* @param pend_list : pending commands list * @pend_list : pending commands list
* @param pend_list_lock : exlusion lock for pending commands list * @pend_list_lock : exclusion lock for pending commands list
* @param completed_list : list of completed commands * @completed_list : list of completed commands
* @param completed_list_lock : exclusion lock for list of completed commands * @completed_list_lock : exclusion lock for list of completed commands
* @param sglen : max sg elements supported * @sglen : max sg elements supported
* @param device_ids : to convert kernel device addr to our devices. * @device_ids : to convert kernel device addr to our devices.
* @param raid_device : raid adapter specific pointer * @raid_device : raid adapter specific pointer
* @param max_channel : maximum channel number supported - inclusive * @max_channel : maximum channel number supported - inclusive
* @param max_target : max target supported - inclusive * @max_target : max target supported - inclusive
* @param max_lun : max lun supported - inclusive * @max_lun : max lun supported - inclusive
* @param unique_id : unique identifier for each adapter * @unique_id : unique identifier for each adapter
* @param irq : IRQ for this adapter * @irq : IRQ for this adapter
* @param ito : internal timeout value, (-1) means no timeout * @ito : internal timeout value, (-1) means no timeout
* @param ibuf : buffer to issue internal commands * @ibuf : buffer to issue internal commands
* @param ibuf_dma_h : dma handle for the above buffer * @ibuf_dma_h : dma handle for the above buffer
* @param uscb_list : SCB pointers for user cmds, common mgmt module * @uscb_list : SCB pointers for user cmds, common mgmt module
* @param uscb_pool : pool of SCBs for user commands * @uscb_pool : pool of SCBs for user commands
* @param uscb_pool_lock : exclusion lock for these SCBs * @uscb_pool_lock : exclusion lock for these SCBs
* @param max_cmds : max outstanding commands * @max_cmds : max outstanding commands
* @param fw_version : firmware version * @fw_version : firmware version
* @param bios_version : bios version * @bios_version : bios version
* @param max_cdb_sz : biggest CDB size supported. * @max_cdb_sz : biggest CDB size supported.
* @param ha : is high availability present - clustering * @ha : is high availability present - clustering
* @param init_id : initiator ID, the default value should be 7 * @init_id : initiator ID, the default value should be 7
* @param max_sectors : max sectors per request * @max_sectors : max sectors per request
* @param cmd_per_lun : max outstanding commands per LUN * @cmd_per_lun : max outstanding commands per LUN
* @param being_detached : set when unloading, no more mgmt calls * @being_detached : set when unloading, no more mgmt calls
* *
* *
* mraid_setup_device_map() can be called anytime after the device map is * mraid_setup_device_map() can be called anytime after the device map is
@ -211,23 +211,23 @@ typedef struct {
#define SCP2ADAPTER(scp) (adapter_t *)SCSIHOST2ADAP(SCP2HOST(scp)) #define SCP2ADAPTER(scp) (adapter_t *)SCSIHOST2ADAP(SCP2HOST(scp))
/**
* MRAID_GET_DEVICE_MAP - device ids
* @param adp - Adapter's soft state
* @param scp - mid-layer scsi command pointer
* @param p_chan - physical channel on the controller
* @param target - target id of the device or logical drive number
* @param islogical - set if the command is for the logical drive
*
* Macro to retrieve information about device class, logical or physical and
* the corresponding physical channel and target or logical drive number
**/
#define MRAID_IS_LOGICAL(adp, scp) \ #define MRAID_IS_LOGICAL(adp, scp) \
(SCP2CHANNEL(scp) == (adp)->max_channel) ? 1 : 0 (SCP2CHANNEL(scp) == (adp)->max_channel) ? 1 : 0
#define MRAID_IS_LOGICAL_SDEV(adp, sdev) \ #define MRAID_IS_LOGICAL_SDEV(adp, sdev) \
(sdev->channel == (adp)->max_channel) ? 1 : 0 (sdev->channel == (adp)->max_channel) ? 1 : 0
/**
* MRAID_GET_DEVICE_MAP - device ids
* @adp : adapter's soft state
* @scp : mid-layer scsi command pointer
* @p_chan : physical channel on the controller
* @target : target id of the device or logical drive number
* @islogical : set if the command is for the logical drive
*
* Macro to retrieve information about device class, logical or physical and
* the corresponding physical channel and target or logical drive number
*/
#define MRAID_GET_DEVICE_MAP(adp, scp, p_chan, target, islogical) \ #define MRAID_GET_DEVICE_MAP(adp, scp, p_chan, target, islogical) \
/* \ /* \
* Is the request coming for the virtual channel \ * Is the request coming for the virtual channel \
@ -271,10 +271,10 @@ typedef struct {
#define ASSERT(expression) #define ASSERT(expression)
#endif #endif
/* /**
* struct mraid_pci_blk - structure holds DMA memory block info * struct mraid_pci_blk - structure holds DMA memory block info
* @param vaddr : virtual address to a memory block * @vaddr : virtual address to a memory block
* @param dma_addr : DMA handle to a memory block * @dma_addr : DMA handle to a memory block
* *
* This structure is filled up for the caller. It is the responsibilty of the * This structure is filled up for the caller. It is the responsibilty of the
* caller to allocate this array big enough to store addresses for all * caller to allocate this array big enough to store addresses for all

View file

@ -22,23 +22,23 @@
#include "mbox_defs.h" #include "mbox_defs.h"
/** /*
* con_log() - console log routine * console messages debug levels
* @param level : indicates the severity of the message.
* @fparam mt : format string
*
* con_log displays the error messages on the console based on the current
* debug level. Also it attaches the appropriate kernel severity level with
* the message.
*
*
* consolge messages debug levels
*/ */
#define CL_ANN 0 /* print unconditionally, announcements */ #define CL_ANN 0 /* print unconditionally, announcements */
#define CL_DLEVEL1 1 /* debug level 1, informative */ #define CL_DLEVEL1 1 /* debug level 1, informative */
#define CL_DLEVEL2 2 /* debug level 2, verbose */ #define CL_DLEVEL2 2 /* debug level 2, verbose */
#define CL_DLEVEL3 3 /* debug level 3, very verbose */ #define CL_DLEVEL3 3 /* debug level 3, very verbose */
/**
* con_log() - console log routine
* @level : indicates the severity of the message.
* @fmt : format string
*
* con_log displays the error messages on the console based on the current
* debug level. Also it attaches the appropriate kernel severity level with
* the message.
*/
#define con_log(level, fmt) if (LSI_DBGLVL >= level) printk fmt; #define con_log(level, fmt) if (LSI_DBGLVL >= level) printk fmt;
/* /*
@ -157,14 +157,14 @@ typedef struct uioc {
/** /**
* struct mraid_hba_info - information about the controller * struct mraid_hba_info - information about the controller
* *
* @param pci_vendor_id : PCI vendor id * @pci_vendor_id : PCI vendor id
* @param pci_device_id : PCI device id * @pci_device_id : PCI device id
* @param subsystem_vendor_id : PCI subsystem vendor id * @subsystem_vendor_id : PCI subsystem vendor id
* @param subsystem_device_id : PCI subsystem device id * @subsystem_device_id : PCI subsystem device id
* @param baseport : base port of hba memory * @baseport : base port of hba memory
* @param pci_bus : PCI bus * @pci_bus : PCI bus
* @param pci_dev_fn : PCI device/function values * @pci_dev_fn : PCI device/function values
* @param irq : interrupt vector for the device * @irq : interrupt vector for the device
* *
* Extended information of 256 bytes about the controller. Align on the single * Extended information of 256 bytes about the controller. Align on the single
* byte boundary so that 32-bit applications can be run on 64-bit platform * byte boundary so that 32-bit applications can be run on 64-bit platform

View file

@ -10,13 +10,13 @@
* 2 of the License, or (at your option) any later version. * 2 of the License, or (at your option) any later version.
* *
* FILE : megaraid_mbox.c * FILE : megaraid_mbox.c
* Version : v2.20.4.9 (Jul 16 2006) * Version : v2.20.5.1 (Nov 16 2006)
* *
* Authors: * Authors:
* Atul Mukker <Atul.Mukker@lsil.com> * Atul Mukker <Atul.Mukker@lsi.com>
* Sreenivas Bagalkote <Sreenivas.Bagalkote@lsil.com> * Sreenivas Bagalkote <Sreenivas.Bagalkote@lsi.com>
* Manoj Jose <Manoj.Jose@lsil.com> * Manoj Jose <Manoj.Jose@lsi.com>
* Seokmann Ju <Seokmann.Ju@lsil.com> * Seokmann Ju
* *
* List of supported controllers * List of supported controllers
* *
@ -107,6 +107,7 @@ static int megaraid_mbox_support_random_del(adapter_t *);
static int megaraid_mbox_get_max_sg(adapter_t *); static int megaraid_mbox_get_max_sg(adapter_t *);
static void megaraid_mbox_enum_raid_scsi(adapter_t *); static void megaraid_mbox_enum_raid_scsi(adapter_t *);
static void megaraid_mbox_flush_cache(adapter_t *); static void megaraid_mbox_flush_cache(adapter_t *);
static int megaraid_mbox_fire_sync_cmd(adapter_t *);
static void megaraid_mbox_display_scb(adapter_t *, scb_t *); static void megaraid_mbox_display_scb(adapter_t *, scb_t *);
static void megaraid_mbox_setup_device_map(adapter_t *); static void megaraid_mbox_setup_device_map(adapter_t *);
@ -137,7 +138,7 @@ static int wait_till_fw_empty(adapter_t *);
MODULE_AUTHOR("sju@lsil.com"); MODULE_AUTHOR("megaraidlinux@lsi.com");
MODULE_DESCRIPTION("LSI Logic MegaRAID Mailbox Driver"); MODULE_DESCRIPTION("LSI Logic MegaRAID Mailbox Driver");
MODULE_LICENSE("GPL"); MODULE_LICENSE("GPL");
MODULE_VERSION(MEGARAID_VERSION); MODULE_VERSION(MEGARAID_VERSION);
@ -146,7 +147,7 @@ MODULE_VERSION(MEGARAID_VERSION);
* ### modules parameters for driver ### * ### modules parameters for driver ###
*/ */
/** /*
* Set to enable driver to expose unconfigured disk to kernel * Set to enable driver to expose unconfigured disk to kernel
*/ */
static int megaraid_expose_unconf_disks = 0; static int megaraid_expose_unconf_disks = 0;
@ -154,7 +155,7 @@ module_param_named(unconf_disks, megaraid_expose_unconf_disks, int, 0);
MODULE_PARM_DESC(unconf_disks, MODULE_PARM_DESC(unconf_disks,
"Set to expose unconfigured disks to kernel (default=0)"); "Set to expose unconfigured disks to kernel (default=0)");
/** /*
* driver wait time if the adapter's mailbox is busy * driver wait time if the adapter's mailbox is busy
*/ */
static unsigned int max_mbox_busy_wait = MBOX_BUSY_WAIT; static unsigned int max_mbox_busy_wait = MBOX_BUSY_WAIT;
@ -162,7 +163,7 @@ module_param_named(busy_wait, max_mbox_busy_wait, int, 0);
MODULE_PARM_DESC(busy_wait, MODULE_PARM_DESC(busy_wait,
"Max wait for mailbox in microseconds if busy (default=10)"); "Max wait for mailbox in microseconds if busy (default=10)");
/** /*
* number of sectors per IO command * number of sectors per IO command
*/ */
static unsigned int megaraid_max_sectors = MBOX_MAX_SECTORS; static unsigned int megaraid_max_sectors = MBOX_MAX_SECTORS;
@ -170,7 +171,7 @@ module_param_named(max_sectors, megaraid_max_sectors, int, 0);
MODULE_PARM_DESC(max_sectors, MODULE_PARM_DESC(max_sectors,
"Maximum number of sectors per IO command (default=128)"); "Maximum number of sectors per IO command (default=128)");
/** /*
* number of commands per logical unit * number of commands per logical unit
*/ */
static unsigned int megaraid_cmd_per_lun = MBOX_DEF_CMD_PER_LUN; static unsigned int megaraid_cmd_per_lun = MBOX_DEF_CMD_PER_LUN;
@ -179,7 +180,7 @@ MODULE_PARM_DESC(cmd_per_lun,
"Maximum number of commands per logical unit (default=64)"); "Maximum number of commands per logical unit (default=64)");
/** /*
* Fast driver load option, skip scanning for physical devices during load. * Fast driver load option, skip scanning for physical devices during load.
* This would result in non-disk devices being skipped during driver load * This would result in non-disk devices being skipped during driver load
* time. These can be later added though, using /proc/scsi/scsi * time. These can be later added though, using /proc/scsi/scsi
@ -190,7 +191,7 @@ MODULE_PARM_DESC(fast_load,
"Faster loading of the driver, skips physical devices! (default=0)"); "Faster loading of the driver, skips physical devices! (default=0)");
/** /*
* mraid_debug level - threshold for amount of information to be displayed by * mraid_debug level - threshold for amount of information to be displayed by
* the driver. This level can be changed through modules parameters, ioctl or * the driver. This level can be changed through modules parameters, ioctl or
* sysfs/proc interface. By default, print the announcement messages only. * sysfs/proc interface. By default, print the announcement messages only.
@ -337,7 +338,7 @@ static struct device_attribute *megaraid_sdev_attrs[] = {
* *
* Return value: * Return value:
* actual depth set * actual depth set
**/ */
static int megaraid_change_queue_depth(struct scsi_device *sdev, int qdepth) static int megaraid_change_queue_depth(struct scsi_device *sdev, int qdepth)
{ {
if (qdepth > MBOX_MAX_SCSI_CMDS) if (qdepth > MBOX_MAX_SCSI_CMDS)
@ -369,8 +370,8 @@ static struct scsi_host_template megaraid_template_g = {
* megaraid_init - module load hook * megaraid_init - module load hook
* *
* We register ourselves as hotplug enabled module and let PCI subsystem * We register ourselves as hotplug enabled module and let PCI subsystem
* discover our adaters * discover our adapters.
**/ */
static int __init static int __init
megaraid_init(void) megaraid_init(void)
{ {
@ -405,7 +406,7 @@ megaraid_init(void)
/** /**
* megaraid_exit - driver unload entry point * megaraid_exit - driver unload entry point
* *
* We simply unwrap the megaraid_init routine here * We simply unwrap the megaraid_init routine here.
*/ */
static void __exit static void __exit
megaraid_exit(void) megaraid_exit(void)
@ -421,12 +422,12 @@ megaraid_exit(void)
/** /**
* megaraid_probe_one - PCI hotplug entry point * megaraid_probe_one - PCI hotplug entry point
* @param pdev : handle to this controller's PCI configuration space * @pdev : handle to this controller's PCI configuration space
* @param id : pci device id of the class of controllers * @id : pci device id of the class of controllers
* *
* This routine should be called whenever a new adapter is detected by the * This routine should be called whenever a new adapter is detected by the
* PCI hotplug susbsytem. * PCI hotplug susbsytem.
**/ */
static int __devinit static int __devinit
megaraid_probe_one(struct pci_dev *pdev, const struct pci_device_id *id) megaraid_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
{ {
@ -542,16 +543,15 @@ megaraid_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
/** /**
* megaraid_detach_one - release the framework resources and call LLD release * megaraid_detach_one - release framework resources and call LLD release routine
* routine * @pdev : handle for our PCI cofiguration space
* @param pdev : handle for our PCI cofiguration space
* *
* This routine is called during driver unload. We free all the allocated * This routine is called during driver unload. We free all the allocated
* resources and call the corresponding LLD so that it can also release all * resources and call the corresponding LLD so that it can also release all
* its resources. * its resources.
* *
* This routine is also called from the PCI hotplug system * This routine is also called from the PCI hotplug system.
**/ */
static void static void
megaraid_detach_one(struct pci_dev *pdev) megaraid_detach_one(struct pci_dev *pdev)
{ {
@ -615,9 +615,9 @@ megaraid_detach_one(struct pci_dev *pdev)
/** /**
* megaraid_mbox_shutdown - PCI shutdown for megaraid HBA * megaraid_mbox_shutdown - PCI shutdown for megaraid HBA
* @param device : generice driver model device * @pdev : generic driver model device
* *
* Shutdown notification, perform flush cache * Shutdown notification, perform flush cache.
*/ */
static void static void
megaraid_mbox_shutdown(struct pci_dev *pdev) megaraid_mbox_shutdown(struct pci_dev *pdev)
@ -643,10 +643,10 @@ megaraid_mbox_shutdown(struct pci_dev *pdev)
/** /**
* megaraid_io_attach - attach a device with the IO subsystem * megaraid_io_attach - attach a device with the IO subsystem
* @param adapter : controller's soft state * @adapter : controller's soft state
* *
* Attach this device with the IO subsystem * Attach this device with the IO subsystem.
**/ */
static int static int
megaraid_io_attach(adapter_t *adapter) megaraid_io_attach(adapter_t *adapter)
{ {
@ -695,10 +695,10 @@ megaraid_io_attach(adapter_t *adapter)
/** /**
* megaraid_io_detach - detach a device from the IO subsystem * megaraid_io_detach - detach a device from the IO subsystem
* @param adapter : controller's soft state * @adapter : controller's soft state
* *
* Detach this device from the IO subsystem * Detach this device from the IO subsystem.
**/ */
static void static void
megaraid_io_detach(adapter_t *adapter) megaraid_io_detach(adapter_t *adapter)
{ {
@ -722,13 +722,13 @@ megaraid_io_detach(adapter_t *adapter)
/** /**
* megaraid_init_mbox - initialize controller * megaraid_init_mbox - initialize controller
* @param adapter - our soft state * @adapter : our soft state
* *
* . Allocate 16-byte aligned mailbox memory for firmware handshake * - Allocate 16-byte aligned mailbox memory for firmware handshake
* . Allocate controller's memory resources * - Allocate controller's memory resources
* . Find out all initialization data * - Find out all initialization data
* . Allocate memory required for all the commands * - Allocate memory required for all the commands
* . Use internal library of FW routines, build up complete soft state * - Use internal library of FW routines, build up complete soft state
*/ */
static int __devinit static int __devinit
megaraid_init_mbox(adapter_t *adapter) megaraid_init_mbox(adapter_t *adapter)
@ -779,33 +779,39 @@ megaraid_init_mbox(adapter_t *adapter)
goto out_release_regions; goto out_release_regions;
} }
// /* initialize the mutual exclusion lock for the mailbox */
// Setup the rest of the soft state using the library of FW routines spin_lock_init(&raid_dev->mailbox_lock);
//
// request IRQ and register the interrupt service routine /* allocate memory required for commands */
if (megaraid_alloc_cmd_packets(adapter) != 0)
goto out_iounmap;
/*
* Issue SYNC cmd to flush the pending cmds in the adapter
* and initialize its internal state
*/
if (megaraid_mbox_fire_sync_cmd(adapter))
con_log(CL_ANN, ("megaraid: sync cmd failed\n"));
/*
* Setup the rest of the soft state using the library of
* FW routines
*/
/* request IRQ and register the interrupt service routine */
if (request_irq(adapter->irq, megaraid_isr, IRQF_SHARED, "megaraid", if (request_irq(adapter->irq, megaraid_isr, IRQF_SHARED, "megaraid",
adapter)) { adapter)) {
con_log(CL_ANN, (KERN_WARNING con_log(CL_ANN, (KERN_WARNING
"megaraid: Couldn't register IRQ %d!\n", adapter->irq)); "megaraid: Couldn't register IRQ %d!\n", adapter->irq));
goto out_alloc_cmds;
goto out_iounmap;
}
// initialize the mutual exclusion lock for the mailbox
spin_lock_init(&raid_dev->mailbox_lock);
// allocate memory required for commands
if (megaraid_alloc_cmd_packets(adapter) != 0) {
goto out_free_irq;
} }
// Product info // Product info
if (megaraid_mbox_product_info(adapter) != 0) { if (megaraid_mbox_product_info(adapter) != 0)
goto out_alloc_cmds; goto out_free_irq;
}
// Do we support extended CDBs // Do we support extended CDBs
adapter->max_cdb_sz = 10; adapter->max_cdb_sz = 10;
@ -874,9 +880,8 @@ megaraid_init_mbox(adapter_t *adapter)
* Allocate resources required to issue FW calls, when sysfs is * Allocate resources required to issue FW calls, when sysfs is
* accessed * accessed
*/ */
if (megaraid_sysfs_alloc_resources(adapter) != 0) { if (megaraid_sysfs_alloc_resources(adapter) != 0)
goto out_alloc_cmds; goto out_free_irq;
}
// Set the DMA mask to 64-bit. All supported controllers as capable of // Set the DMA mask to 64-bit. All supported controllers as capable of
// DMA in this range // DMA in this range
@ -920,10 +925,10 @@ megaraid_init_mbox(adapter_t *adapter)
out_free_sysfs_res: out_free_sysfs_res:
megaraid_sysfs_free_resources(adapter); megaraid_sysfs_free_resources(adapter);
out_alloc_cmds:
megaraid_free_cmd_packets(adapter);
out_free_irq: out_free_irq:
free_irq(adapter->irq, adapter); free_irq(adapter->irq, adapter);
out_alloc_cmds:
megaraid_free_cmd_packets(adapter);
out_iounmap: out_iounmap:
iounmap(raid_dev->baseaddr); iounmap(raid_dev->baseaddr);
out_release_regions: out_release_regions:
@ -937,7 +942,7 @@ megaraid_init_mbox(adapter_t *adapter)
/** /**
* megaraid_fini_mbox - undo controller initialization * megaraid_fini_mbox - undo controller initialization
* @param adapter : our soft state * @adapter : our soft state
*/ */
static void static void
megaraid_fini_mbox(adapter_t *adapter) megaraid_fini_mbox(adapter_t *adapter)
@ -967,12 +972,12 @@ megaraid_fini_mbox(adapter_t *adapter)
/** /**
* megaraid_alloc_cmd_packets - allocate shared mailbox * megaraid_alloc_cmd_packets - allocate shared mailbox
* @param adapter : soft state of the raid controller * @adapter : soft state of the raid controller
* *
* Allocate and align the shared mailbox. This maibox is used to issue * Allocate and align the shared mailbox. This maibox is used to issue
* all the commands. For IO based controllers, the mailbox is also regsitered * all the commands. For IO based controllers, the mailbox is also regsitered
* with the FW. Allocate memory for all commands as well. * with the FW. Allocate memory for all commands as well.
* This is our big allocator * This is our big allocator.
*/ */
static int static int
megaraid_alloc_cmd_packets(adapter_t *adapter) megaraid_alloc_cmd_packets(adapter_t *adapter)
@ -1132,9 +1137,9 @@ megaraid_alloc_cmd_packets(adapter_t *adapter)
/** /**
* megaraid_free_cmd_packets - free memory * megaraid_free_cmd_packets - free memory
* @param adapter : soft state of the raid controller * @adapter : soft state of the raid controller
* *
* Release memory resources allocated for commands * Release memory resources allocated for commands.
*/ */
static void static void
megaraid_free_cmd_packets(adapter_t *adapter) megaraid_free_cmd_packets(adapter_t *adapter)
@ -1156,10 +1161,10 @@ megaraid_free_cmd_packets(adapter_t *adapter)
/** /**
* megaraid_mbox_setup_dma_pools - setup dma pool for command packets * megaraid_mbox_setup_dma_pools - setup dma pool for command packets
* @param adapter : HBA soft state * @adapter : HBA soft state
* *
* setup the dma pools for mailbox, passthru and extended passthru structures, * Setup the dma pools for mailbox, passthru and extended passthru structures,
* and scatter-gather lists * and scatter-gather lists.
*/ */
static int static int
megaraid_mbox_setup_dma_pools(adapter_t *adapter) megaraid_mbox_setup_dma_pools(adapter_t *adapter)
@ -1252,10 +1257,10 @@ megaraid_mbox_setup_dma_pools(adapter_t *adapter)
/** /**
* megaraid_mbox_teardown_dma_pools - teardown dma pools for command packets * megaraid_mbox_teardown_dma_pools - teardown dma pools for command packets
* @param adapter : HBA soft state * @adapter : HBA soft state
* *
* teardown the dma pool for mailbox, passthru and extended passthru * Teardown the dma pool for mailbox, passthru and extended passthru
* structures, and scatter-gather lists * structures, and scatter-gather lists.
*/ */
static void static void
megaraid_mbox_teardown_dma_pools(adapter_t *adapter) megaraid_mbox_teardown_dma_pools(adapter_t *adapter)
@ -1300,10 +1305,11 @@ megaraid_mbox_teardown_dma_pools(adapter_t *adapter)
/** /**
* megaraid_alloc_scb - detach and return a scb from the free list * megaraid_alloc_scb - detach and return a scb from the free list
* @adapter : controller's soft state * @adapter : controller's soft state
* @scp : pointer to the scsi command to be executed
* *
* return the scb from the head of the free list. NULL if there are none * Return the scb from the head of the free list. %NULL if there are none
* available * available.
**/ */
static scb_t * static scb_t *
megaraid_alloc_scb(adapter_t *adapter, struct scsi_cmnd *scp) megaraid_alloc_scb(adapter_t *adapter, struct scsi_cmnd *scp)
{ {
@ -1337,11 +1343,11 @@ megaraid_alloc_scb(adapter_t *adapter, struct scsi_cmnd *scp)
* @adapter : controller's soft state * @adapter : controller's soft state
* @scb : scb to be freed * @scb : scb to be freed
* *
* return the scb back to the free list of scbs. The caller must 'flush' the * Return the scb back to the free list of scbs. The caller must 'flush' the
* SCB before calling us. E.g., performing pci_unamp and/or pci_sync etc. * SCB before calling us. E.g., performing pci_unamp and/or pci_sync etc.
* NOTE NOTE: Make sure the scb is not on any list before calling this * NOTE NOTE: Make sure the scb is not on any list before calling this
* routine. * routine.
**/ */
static inline void static inline void
megaraid_dealloc_scb(adapter_t *adapter, scb_t *scb) megaraid_dealloc_scb(adapter_t *adapter, scb_t *scb)
{ {
@ -1362,10 +1368,10 @@ megaraid_dealloc_scb(adapter_t *adapter, scb_t *scb)
/** /**
* megaraid_mbox_mksgl - make the scatter-gather list * megaraid_mbox_mksgl - make the scatter-gather list
* @adapter - controller's soft state * @adapter : controller's soft state
* @scb - scsi control block * @scb : scsi control block
* *
* prepare the scatter-gather list * Prepare the scatter-gather list.
*/ */
static int static int
megaraid_mbox_mksgl(adapter_t *adapter, scb_t *scb) megaraid_mbox_mksgl(adapter_t *adapter, scb_t *scb)
@ -1435,10 +1441,10 @@ megaraid_mbox_mksgl(adapter_t *adapter, scb_t *scb)
/** /**
* mbox_post_cmd - issue a mailbox command * mbox_post_cmd - issue a mailbox command
* @adapter - controller's soft state * @adapter : controller's soft state
* @scb - command to be issued * @scb : command to be issued
* *
* post the command to the controller if mailbox is availble. * Post the command to the controller if mailbox is available.
*/ */
static int static int
mbox_post_cmd(adapter_t *adapter, scb_t *scb) mbox_post_cmd(adapter_t *adapter, scb_t *scb)
@ -1518,7 +1524,7 @@ mbox_post_cmd(adapter_t *adapter, scb_t *scb)
* Queue entry point for mailbox based controllers. * Queue entry point for mailbox based controllers.
*/ */
static int static int
megaraid_queue_command(struct scsi_cmnd *scp, void (* done)(struct scsi_cmnd *)) megaraid_queue_command(struct scsi_cmnd *scp, void (*done)(struct scsi_cmnd *))
{ {
adapter_t *adapter; adapter_t *adapter;
scb_t *scb; scb_t *scb;
@ -1548,15 +1554,15 @@ megaraid_queue_command(struct scsi_cmnd *scp, void (* done)(struct scsi_cmnd *))
} }
/** /**
* megaraid_mbox_build_cmd - transform the mid-layer scsi command to megaraid * megaraid_mbox_build_cmd - transform the mid-layer scsi commands
* firmware lingua * @adapter : controller's soft state
* @adapter - controller's soft state * @scp : mid-layer scsi command pointer
* @scp - mid-layer scsi command pointer * @busy : set if request could not be completed because of lack of
* @busy - set if request could not be completed because of lack of
* resources * resources
* *
* convert the command issued by mid-layer to format understood by megaraid * Transform the mid-layer scsi command to megaraid firmware lingua.
* firmware. We also complete certain command without sending them to firmware * Convert the command issued by mid-layer to format understood by megaraid
* firmware. We also complete certain commands without sending them to firmware.
*/ */
static scb_t * static scb_t *
megaraid_mbox_build_cmd(adapter_t *adapter, struct scsi_cmnd *scp, int *busy) megaraid_mbox_build_cmd(adapter_t *adapter, struct scsi_cmnd *scp, int *busy)
@ -1937,9 +1943,9 @@ megaraid_mbox_build_cmd(adapter_t *adapter, struct scsi_cmnd *scp, int *busy)
/** /**
* megaraid_mbox_runpendq - execute commands queued in the pending queue * megaraid_mbox_runpendq - execute commands queued in the pending queue
* @adapter : controller's soft state * @adapter : controller's soft state
* @scb : SCB to be queued in the pending list * @scb_q : SCB to be queued in the pending list
* *
* scan the pending list for commands which are not yet issued and try to * Scan the pending list for commands which are not yet issued and try to
* post to the controller. The SCB can be a null pointer, which would indicate * post to the controller. The SCB can be a null pointer, which would indicate
* no SCB to be queue, just try to execute the ones in the pending list. * no SCB to be queue, just try to execute the ones in the pending list.
* *
@ -2012,11 +2018,11 @@ megaraid_mbox_runpendq(adapter_t *adapter, scb_t *scb_q)
/** /**
* megaraid_mbox_prepare_pthru - prepare a command for physical devices * megaraid_mbox_prepare_pthru - prepare a command for physical devices
* @adapter - pointer to controller's soft state * @adapter : pointer to controller's soft state
* @scb - scsi control block * @scb : scsi control block
* @scp - scsi command from the mid-layer * @scp : scsi command from the mid-layer
* *
* prepare a command for the scsi physical devices * Prepare a command for the scsi physical devices.
*/ */
static void static void
megaraid_mbox_prepare_pthru(adapter_t *adapter, scb_t *scb, megaraid_mbox_prepare_pthru(adapter_t *adapter, scb_t *scb,
@ -2060,12 +2066,12 @@ megaraid_mbox_prepare_pthru(adapter_t *adapter, scb_t *scb,
/** /**
* megaraid_mbox_prepare_epthru - prepare a command for physical devices * megaraid_mbox_prepare_epthru - prepare a command for physical devices
* @adapter - pointer to controller's soft state * @adapter : pointer to controller's soft state
* @scb - scsi control block * @scb : scsi control block
* @scp - scsi command from the mid-layer * @scp : scsi command from the mid-layer
* *
* prepare a command for the scsi physical devices. This rountine prepares * Prepare a command for the scsi physical devices. This rountine prepares
* commands for devices which can take extended CDBs (>10 bytes) * commands for devices which can take extended CDBs (>10 bytes).
*/ */
static void static void
megaraid_mbox_prepare_epthru(adapter_t *adapter, scb_t *scb, megaraid_mbox_prepare_epthru(adapter_t *adapter, scb_t *scb,
@ -2109,9 +2115,9 @@ megaraid_mbox_prepare_epthru(adapter_t *adapter, scb_t *scb,
/** /**
* megaraid_ack_sequence - interrupt ack sequence for memory mapped HBAs * megaraid_ack_sequence - interrupt ack sequence for memory mapped HBAs
* @adapter - controller's soft state * @adapter : controller's soft state
* *
* Interrupt ackrowledgement sequence for memory mapped HBAs. Find out the * Interrupt acknowledgement sequence for memory mapped HBAs. Find out the
* completed command and put them on the completed list for later processing. * completed command and put them on the completed list for later processing.
* *
* Returns: 1 if the interrupt is valid, 0 otherwise * Returns: 1 if the interrupt is valid, 0 otherwise
@ -2224,9 +2230,8 @@ megaraid_ack_sequence(adapter_t *adapter)
/** /**
* megaraid_isr - isr for memory based mailbox based controllers * megaraid_isr - isr for memory based mailbox based controllers
* @irq - irq * @irq : irq
* @devp - pointer to our soft state * @devp : pointer to our soft state
* @regs - unused
* *
* Interrupt service routine for memory-mapped mailbox controllers. * Interrupt service routine for memory-mapped mailbox controllers.
*/ */
@ -2671,7 +2676,7 @@ megaraid_abort_handler(struct scsi_cmnd *scp)
* the FW is still live, in which case the outstanding commands counter mut go * the FW is still live, in which case the outstanding commands counter mut go
* down to 0. If that happens, also issue the reservation reset command to * down to 0. If that happens, also issue the reservation reset command to
* relinquish (possible) reservations on the logical drives connected to this * relinquish (possible) reservations on the logical drives connected to this
* host * host.
**/ **/
static int static int
megaraid_reset_handler(struct scsi_cmnd *scp) megaraid_reset_handler(struct scsi_cmnd *scp)
@ -2823,11 +2828,11 @@ megaraid_reset_handler(struct scsi_cmnd *scp)
/** /**
* mbox_post_sync_cmd() - blocking command to the mailbox based controllers * mbox_post_sync_cmd() - blocking command to the mailbox based controllers
* @adapter - controller's soft state * @adapter : controller's soft state
* @raw_mbox - the mailbox * @raw_mbox : the mailbox
* *
* Issue a scb in synchronous and non-interrupt mode for mailbox based * Issue a scb in synchronous and non-interrupt mode for mailbox based
* controllers * controllers.
*/ */
static int static int
mbox_post_sync_cmd(adapter_t *adapter, uint8_t raw_mbox[]) mbox_post_sync_cmd(adapter_t *adapter, uint8_t raw_mbox[])
@ -2955,12 +2960,12 @@ mbox_post_sync_cmd(adapter_t *adapter, uint8_t raw_mbox[])
/** /**
* mbox_post_sync_cmd_fast - blocking command to the mailbox based controllers * mbox_post_sync_cmd_fast - blocking command to the mailbox based controllers
* @adapter - controller's soft state * @adapter : controller's soft state
* @raw_mbox - the mailbox * @raw_mbox : the mailbox
* *
* Issue a scb in synchronous and non-interrupt mode for mailbox based * Issue a scb in synchronous and non-interrupt mode for mailbox based
* controllers. This is a faster version of the synchronous command and * controllers. This is a faster version of the synchronous command and
* therefore can be called in interrupt-context as well * therefore can be called in interrupt-context as well.
*/ */
static int static int
mbox_post_sync_cmd_fast(adapter_t *adapter, uint8_t raw_mbox[]) mbox_post_sync_cmd_fast(adapter_t *adapter, uint8_t raw_mbox[])
@ -3008,10 +3013,10 @@ mbox_post_sync_cmd_fast(adapter_t *adapter, uint8_t raw_mbox[])
/** /**
* megaraid_busywait_mbox() - Wait until the controller's mailbox is available * megaraid_busywait_mbox() - Wait until the controller's mailbox is available
* @raid_dev - RAID device (HBA) soft state * @raid_dev : RAID device (HBA) soft state
* *
* wait until the controller's mailbox is available to accept more commands. * Wait until the controller's mailbox is available to accept more commands.
* wait for at most 1 second * Wait for at most 1 second.
*/ */
static int static int
megaraid_busywait_mbox(mraid_device_t *raid_dev) megaraid_busywait_mbox(mraid_device_t *raid_dev)
@ -3032,9 +3037,9 @@ megaraid_busywait_mbox(mraid_device_t *raid_dev)
/** /**
* megaraid_mbox_product_info - some static information about the controller * megaraid_mbox_product_info - some static information about the controller
* @adapter - our soft state * @adapter : our soft state
* *
* issue commands to the controller to grab some parameters required by our * Issue commands to the controller to grab some parameters required by our
* caller. * caller.
*/ */
static int static int
@ -3157,10 +3162,10 @@ megaraid_mbox_product_info(adapter_t *adapter)
/** /**
* megaraid_mbox_extended_cdb - check for support for extended CDBs * megaraid_mbox_extended_cdb - check for support for extended CDBs
* @adapter - soft state for the controller * @adapter : soft state for the controller
* *
* this routine check whether the controller in question supports extended * This routine check whether the controller in question supports extended
* ( > 10 bytes ) CDBs * ( > 10 bytes ) CDBs.
*/ */
static int static int
megaraid_mbox_extended_cdb(adapter_t *adapter) megaraid_mbox_extended_cdb(adapter_t *adapter)
@ -3193,8 +3198,8 @@ megaraid_mbox_extended_cdb(adapter_t *adapter)
/** /**
* megaraid_mbox_support_ha - Do we support clustering * megaraid_mbox_support_ha - Do we support clustering
* @adapter - soft state for the controller * @adapter : soft state for the controller
* @init_id - ID of the initiator * @init_id : ID of the initiator
* *
* Determine if the firmware supports clustering and the ID of the initiator. * Determine if the firmware supports clustering and the ID of the initiator.
*/ */
@ -3236,9 +3241,9 @@ megaraid_mbox_support_ha(adapter_t *adapter, uint16_t *init_id)
/** /**
* megaraid_mbox_support_random_del - Do we support random deletion * megaraid_mbox_support_random_del - Do we support random deletion
* @adapter - soft state for the controller * @adapter : soft state for the controller
* *
* Determine if the firmware supports random deletion * Determine if the firmware supports random deletion.
* Return: 1 is operation supported, 0 otherwise * Return: 1 is operation supported, 0 otherwise
*/ */
static int static int
@ -3271,10 +3276,10 @@ megaraid_mbox_support_random_del(adapter_t *adapter)
/** /**
* megaraid_mbox_get_max_sg - maximum sg elements supported by the firmware * megaraid_mbox_get_max_sg - maximum sg elements supported by the firmware
* @adapter - soft state for the controller * @adapter : soft state for the controller
* *
* Find out the maximum number of scatter-gather elements supported by the * Find out the maximum number of scatter-gather elements supported by the
* firmware * firmware.
*/ */
static int static int
megaraid_mbox_get_max_sg(adapter_t *adapter) megaraid_mbox_get_max_sg(adapter_t *adapter)
@ -3311,10 +3316,10 @@ megaraid_mbox_get_max_sg(adapter_t *adapter)
/** /**
* megaraid_mbox_enum_raid_scsi - enumerate the RAID and SCSI channels * megaraid_mbox_enum_raid_scsi - enumerate the RAID and SCSI channels
* @adapter - soft state for the controller * @adapter : soft state for the controller
* *
* Enumerate the RAID and SCSI channels for ROMB platoforms so that channels * Enumerate the RAID and SCSI channels for ROMB platforms so that channels
* can be exported as regular SCSI channels * can be exported as regular SCSI channels.
*/ */
static void static void
megaraid_mbox_enum_raid_scsi(adapter_t *adapter) megaraid_mbox_enum_raid_scsi(adapter_t *adapter)
@ -3348,9 +3353,9 @@ megaraid_mbox_enum_raid_scsi(adapter_t *adapter)
/** /**
* megaraid_mbox_flush_cache - flush adapter and disks cache * megaraid_mbox_flush_cache - flush adapter and disks cache
* @param adapter : soft state for the controller * @adapter : soft state for the controller
* *
* Flush adapter cache followed by disks cache * Flush adapter cache followed by disks cache.
*/ */
static void static void
megaraid_mbox_flush_cache(adapter_t *adapter) megaraid_mbox_flush_cache(adapter_t *adapter)
@ -3379,14 +3384,92 @@ megaraid_mbox_flush_cache(adapter_t *adapter)
} }
/**
* megaraid_mbox_fire_sync_cmd - fire the sync cmd
* @adapter : soft state for the controller
*
* Clears the pending cmds in FW and reinits its RAID structs.
*/
static int
megaraid_mbox_fire_sync_cmd(adapter_t *adapter)
{
mbox_t *mbox;
uint8_t raw_mbox[sizeof(mbox_t)];
mraid_device_t *raid_dev = ADAP2RAIDDEV(adapter);
mbox64_t *mbox64;
int status = 0;
int i;
uint32_t dword;
mbox = (mbox_t *)raw_mbox;
memset((caddr_t)raw_mbox, 0, sizeof(mbox_t));
raw_mbox[0] = 0xFF;
mbox64 = raid_dev->mbox64;
mbox = raid_dev->mbox;
/* Wait until mailbox is free */
if (megaraid_busywait_mbox(raid_dev) != 0) {
status = 1;
goto blocked_mailbox;
}
/* Copy mailbox data into host structure */
memcpy((caddr_t)mbox, (caddr_t)raw_mbox, 16);
mbox->cmdid = 0xFE;
mbox->busy = 1;
mbox->poll = 0;
mbox->ack = 0;
mbox->numstatus = 0;
mbox->status = 0;
wmb();
WRINDOOR(raid_dev, raid_dev->mbox_dma | 0x1);
/* Wait for maximum 1 min for status to post.
* If the Firmware SUPPORTS the ABOVE COMMAND,
* mbox->cmd will be set to 0
* else
* the firmware will reject the command with
* mbox->numstatus set to 1
*/
i = 0;
status = 0;
while (!mbox->numstatus && mbox->cmd == 0xFF) {
rmb();
msleep(1);
i++;
if (i > 1000 * 60) {
status = 1;
break;
}
}
if (mbox->numstatus == 1)
status = 1; /*cmd not supported*/
/* Check for interrupt line */
dword = RDOUTDOOR(raid_dev);
WROUTDOOR(raid_dev, dword);
WRINDOOR(raid_dev,2);
return status;
blocked_mailbox:
con_log(CL_ANN, (KERN_WARNING "megaraid: blocked mailbox\n"));
return status;
}
/** /**
* megaraid_mbox_display_scb - display SCB information, mostly debug purposes * megaraid_mbox_display_scb - display SCB information, mostly debug purposes
* @param adapter : controllers' soft state * @adapter : controller's soft state
* @param scb : SCB to be displayed * @scb : SCB to be displayed
* @param level : debug level for console print * @level : debug level for console print
* *
* Diplay information about the given SCB iff the current debug level is * Diplay information about the given SCB iff the current debug level is
* verbose * verbose.
*/ */
static void static void
megaraid_mbox_display_scb(adapter_t *adapter, scb_t *scb) megaraid_mbox_display_scb(adapter_t *adapter, scb_t *scb)
@ -3434,7 +3517,7 @@ megaraid_mbox_display_scb(adapter_t *adapter, scb_t *scb)
* scsi addresses and megaraid scsi and logical drive addresses. We export * scsi addresses and megaraid scsi and logical drive addresses. We export
* scsi devices on their actual addresses, whereas the logical drives are * scsi devices on their actual addresses, whereas the logical drives are
* exported on a virtual scsi channel. * exported on a virtual scsi channel.
**/ */
static void static void
megaraid_mbox_setup_device_map(adapter_t *adapter) megaraid_mbox_setup_device_map(adapter_t *adapter)
{ {
@ -3472,7 +3555,7 @@ megaraid_mbox_setup_device_map(adapter_t *adapter)
/** /**
* megaraid_cmm_register - register with the mangement module * megaraid_cmm_register - register with the mangement module
* @param adapter : HBA soft state * @adapter : HBA soft state
* *
* Register with the management module, which allows applications to issue * Register with the management module, which allows applications to issue
* ioctl calls to the drivers. This interface is used by the management module * ioctl calls to the drivers. This interface is used by the management module
@ -3562,11 +3645,11 @@ megaraid_cmm_register(adapter_t *adapter)
/** /**
* megaraid_cmm_unregister - un-register with the mangement module * megaraid_cmm_unregister - un-register with the mangement module
* @param adapter : HBA soft state * @adapter : HBA soft state
* *
* Un-register with the management module. * Un-register with the management module.
* FIXME: mgmt module must return failure for unregister if it has pending * FIXME: mgmt module must return failure for unregister if it has pending
* commands in LLD * commands in LLD.
*/ */
static int static int
megaraid_cmm_unregister(adapter_t *adapter) megaraid_cmm_unregister(adapter_t *adapter)
@ -3579,9 +3662,9 @@ megaraid_cmm_unregister(adapter_t *adapter)
/** /**
* megaraid_mbox_mm_handler - interface for CMM to issue commands to LLD * megaraid_mbox_mm_handler - interface for CMM to issue commands to LLD
* @param drvr_data : LLD specific data * @drvr_data : LLD specific data
* @param kioc : CMM interface packet * @kioc : CMM interface packet
* @param action : command action * @action : command action
* *
* This routine is invoked whenever the Common Mangement Module (CMM) has a * This routine is invoked whenever the Common Mangement Module (CMM) has a
* command for us. The 'action' parameter specifies if this is a new command * command for us. The 'action' parameter specifies if this is a new command
@ -3634,8 +3717,8 @@ megaraid_mbox_mm_handler(unsigned long drvr_data, uioc_t *kioc, uint32_t action)
/** /**
* megaraid_mbox_mm_command - issues commands routed through CMM * megaraid_mbox_mm_command - issues commands routed through CMM
* @param adapter : HBA soft state * @adapter : HBA soft state
* @param kioc : management command packet * @kioc : management command packet
* *
* Issues commands, which are routed through the management module. * Issues commands, which are routed through the management module.
*/ */
@ -3804,8 +3887,8 @@ megaraid_mbox_mm_done(adapter_t *adapter, scb_t *scb)
/** /**
* gather_hbainfo - HBA characteristics for the applications * gather_hbainfo - HBA characteristics for the applications
* @param adapter : HBA soft state * @adapter : HBA soft state
* @param hinfo : pointer to the caller's host info strucuture * @hinfo : pointer to the caller's host info strucuture
*/ */
static int static int
gather_hbainfo(adapter_t *adapter, mraid_hba_info_t *hinfo) gather_hbainfo(adapter_t *adapter, mraid_hba_info_t *hinfo)
@ -3839,16 +3922,15 @@ gather_hbainfo(adapter_t *adapter, mraid_hba_info_t *hinfo)
/** /**
* megaraid_sysfs_alloc_resources - allocate sysfs related resources * megaraid_sysfs_alloc_resources - allocate sysfs related resources
* @adapter : controller's soft state
* *
* Allocate packets required to issue FW calls whenever the sysfs attributes * Allocate packets required to issue FW calls whenever the sysfs attributes
* are read. These attributes would require up-to-date information from the * are read. These attributes would require up-to-date information from the
* FW. Also set up resources for mutual exclusion to share these resources and * FW. Also set up resources for mutual exclusion to share these resources and
* the wait queue. * the wait queue.
* *
* @param adapter : controller's soft state * Return 0 on success.
* * Return -ERROR_CODE on failure.
* @return 0 on success
* @return -ERROR_CODE on failure
*/ */
static int static int
megaraid_sysfs_alloc_resources(adapter_t *adapter) megaraid_sysfs_alloc_resources(adapter_t *adapter)
@ -3885,10 +3967,9 @@ megaraid_sysfs_alloc_resources(adapter_t *adapter)
/** /**
* megaraid_sysfs_free_resources - free sysfs related resources * megaraid_sysfs_free_resources - free sysfs related resources
* @adapter : controller's soft state
* *
* Free packets allocated for sysfs FW commands * Free packets allocated for sysfs FW commands
*
* @param adapter : controller's soft state
*/ */
static void static void
megaraid_sysfs_free_resources(adapter_t *adapter) megaraid_sysfs_free_resources(adapter_t *adapter)
@ -3907,10 +3988,9 @@ megaraid_sysfs_free_resources(adapter_t *adapter)
/** /**
* megaraid_sysfs_get_ldmap_done - callback for get ldmap * megaraid_sysfs_get_ldmap_done - callback for get ldmap
* @uioc : completed packet
* *
* Callback routine called in the ISR/tasklet context for get ldmap call * Callback routine called in the ISR/tasklet context for get ldmap call
*
* @param uioc : completed packet
*/ */
static void static void
megaraid_sysfs_get_ldmap_done(uioc_t *uioc) megaraid_sysfs_get_ldmap_done(uioc_t *uioc)
@ -3926,12 +4006,11 @@ megaraid_sysfs_get_ldmap_done(uioc_t *uioc)
/** /**
* megaraid_sysfs_get_ldmap_timeout - timeout handling for get ldmap * megaraid_sysfs_get_ldmap_timeout - timeout handling for get ldmap
* @data : timed out packet
* *
* Timeout routine to recover and return to application, in case the adapter * Timeout routine to recover and return to application, in case the adapter
* has stopped responding. A timeout of 60 seconds for this command seem like * has stopped responding. A timeout of 60 seconds for this command seems like
* a good value * a good value.
*
* @param uioc : timed out packet
*/ */
static void static void
megaraid_sysfs_get_ldmap_timeout(unsigned long data) megaraid_sysfs_get_ldmap_timeout(unsigned long data)
@ -3948,6 +4027,7 @@ megaraid_sysfs_get_ldmap_timeout(unsigned long data)
/** /**
* megaraid_sysfs_get_ldmap - get update logical drive map * megaraid_sysfs_get_ldmap - get update logical drive map
* @adapter : controller's soft state
* *
* This routine will be called whenever user reads the logical drive * This routine will be called whenever user reads the logical drive
* attributes, go get the current logical drive mapping table from the * attributes, go get the current logical drive mapping table from the
@ -3959,10 +4039,8 @@ megaraid_sysfs_get_ldmap_timeout(unsigned long data)
* standalone libary. For now, this should suffice since there is no other * standalone libary. For now, this should suffice since there is no other
* user of this interface. * user of this interface.
* *
* @param adapter : controller's soft state * Return 0 on success.
* * Return -1 on failure.
* @return 0 on success
* @return -1 on failure
*/ */
static int static int
megaraid_sysfs_get_ldmap(adapter_t *adapter) megaraid_sysfs_get_ldmap(adapter_t *adapter)
@ -4064,13 +4142,12 @@ megaraid_sysfs_get_ldmap(adapter_t *adapter)
/** /**
* megaraid_sysfs_show_app_hndl - display application handle for this adapter * megaraid_sysfs_show_app_hndl - display application handle for this adapter
* @cdev : class device object representation for the host
* @buf : buffer to send data to
* *
* Display the handle used by the applications while executing management * Display the handle used by the applications while executing management
* tasks on the adapter. We invoke a management module API to get the adapter * tasks on the adapter. We invoke a management module API to get the adapter
* handle, since we do not interface with applications directly. * handle, since we do not interface with applications directly.
*
* @param cdev : class device object representation for the host
* @param buf : buffer to send data to
*/ */
static ssize_t static ssize_t
megaraid_sysfs_show_app_hndl(struct class_device *cdev, char *buf) megaraid_sysfs_show_app_hndl(struct class_device *cdev, char *buf)
@ -4087,16 +4164,18 @@ megaraid_sysfs_show_app_hndl(struct class_device *cdev, char *buf)
/** /**
* megaraid_sysfs_show_ldnum - display the logical drive number for this device * megaraid_sysfs_show_ldnum - display the logical drive number for this device
* @dev : device object representation for the scsi device
* @attr : device attribute to show
* @buf : buffer to send data to
* *
* Display the logical drive number for the device in question, if it a valid * Display the logical drive number for the device in question, if it a valid
* logical drive. For physical devices, "-1" is returned * logical drive. For physical devices, "-1" is returned.
* The logical drive number is displayed in following format *
* The logical drive number is displayed in following format:
* *
* <SCSI ID> <LD NUM> <LD STICKY ID> <APP ADAPTER HANDLE> * <SCSI ID> <LD NUM> <LD STICKY ID> <APP ADAPTER HANDLE>
* <int> <int> <int> <int>
* *
* @param dev : device object representation for the scsi device * <int> <int> <int> <int>
* @param buf : buffer to send data to
*/ */
static ssize_t static ssize_t
megaraid_sysfs_show_ldnum(struct device *dev, struct device_attribute *attr, char *buf) megaraid_sysfs_show_ldnum(struct device *dev, struct device_attribute *attr, char *buf)

View file

@ -21,8 +21,8 @@
#include "megaraid_ioctl.h" #include "megaraid_ioctl.h"
#define MEGARAID_VERSION "2.20.4.9" #define MEGARAID_VERSION "2.20.5.1"
#define MEGARAID_EXT_VERSION "(Release Date: Sun Jul 16 12:27:22 EST 2006)" #define MEGARAID_EXT_VERSION "(Release Date: Thu Nov 16 15:32:35 EST 2006)"
/* /*
@ -146,27 +146,27 @@ typedef struct {
/** /**
* mraid_device_t - adapter soft state structure for mailbox controllers * mraid_device_t - adapter soft state structure for mailbox controllers
* @param una_mbox64 : 64-bit mbox - unaligned * @una_mbox64 : 64-bit mbox - unaligned
* @param una_mbox64_dma : mbox dma addr - unaligned * @una_mbox64_dma : mbox dma addr - unaligned
* @param mbox : 32-bit mbox - aligned * @mbox : 32-bit mbox - aligned
* @param mbox64 : 64-bit mbox - aligned * @mbox64 : 64-bit mbox - aligned
* @param mbox_dma : mbox dma addr - aligned * @mbox_dma : mbox dma addr - aligned
* @param mailbox_lock : exclusion lock for the mailbox * @mailbox_lock : exclusion lock for the mailbox
* @param baseport : base port of hba memory * @baseport : base port of hba memory
* @param baseaddr : mapped addr of hba memory * @baseaddr : mapped addr of hba memory
* @param mbox_pool : pool of mailboxes * @mbox_pool : pool of mailboxes
* @param mbox_pool_handle : handle for the mailbox pool memory * @mbox_pool_handle : handle for the mailbox pool memory
* @param epthru_pool : a pool for extended passthru commands * @epthru_pool : a pool for extended passthru commands
* @param epthru_pool_handle : handle to the pool above * @epthru_pool_handle : handle to the pool above
* @param sg_pool : pool of scatter-gather lists for this driver * @sg_pool : pool of scatter-gather lists for this driver
* @param sg_pool_handle : handle to the pool above * @sg_pool_handle : handle to the pool above
* @param ccb_list : list of our command control blocks * @ccb_list : list of our command control blocks
* @param uccb_list : list of cmd control blocks for mgmt module * @uccb_list : list of cmd control blocks for mgmt module
* @param umbox64 : array of mailbox for user commands (cmm) * @umbox64 : array of mailbox for user commands (cmm)
* @param pdrv_state : array for state of each physical drive. * @pdrv_state : array for state of each physical drive.
* @param last_disp : flag used to show device scanning * @last_disp : flag used to show device scanning
* @param hw_error : set if FW not responding * @hw_error : set if FW not responding
* @param fast_load : If set, skip physical device scanning * @fast_load : If set, skip physical device scanning
* @channel_class : channel class, RAID or SCSI * @channel_class : channel class, RAID or SCSI
* @sysfs_sem : semaphore to serialize access to sysfs res. * @sysfs_sem : semaphore to serialize access to sysfs res.
* @sysfs_uioc : management packet to issue FW calls from sysfs * @sysfs_uioc : management packet to issue FW calls from sysfs

View file

@ -78,10 +78,10 @@ static struct file_operations lsi_fops = {
/** /**
* mraid_mm_open - open routine for char node interface * mraid_mm_open - open routine for char node interface
* @inod : unused * @inode : unused
* @filep : unused * @filep : unused
* *
* allow ioctl operations by apps only if they superuser privilege * Allow ioctl operations by apps only if they have superuser privilege.
*/ */
static int static int
mraid_mm_open(struct inode *inode, struct file *filep) mraid_mm_open(struct inode *inode, struct file *filep)
@ -214,7 +214,9 @@ mraid_mm_ioctl(struct inode *inode, struct file *filep, unsigned int cmd,
/** /**
* mraid_mm_get_adapter - Returns corresponding adapters for the mimd packet * mraid_mm_get_adapter - Returns corresponding adapters for the mimd packet
* @umimd : User space mimd_t ioctl packet * @umimd : User space mimd_t ioctl packet
* @adapter : pointer to the adapter (OUT) * @rval : returned success/error status
*
* The function return value is a pointer to the located @adapter.
*/ */
static mraid_mmadp_t * static mraid_mmadp_t *
mraid_mm_get_adapter(mimd_t __user *umimd, int *rval) mraid_mm_get_adapter(mimd_t __user *umimd, int *rval)
@ -252,11 +254,11 @@ mraid_mm_get_adapter(mimd_t __user *umimd, int *rval)
return adapter; return adapter;
} }
/* /**
* handle_drvrcmd - This routine checks if the opcode is a driver * handle_drvrcmd - Checks if the opcode is a driver cmd and if it is, handles it.
* cmd and if it is, handles it.
* @arg : packet sent by the user app * @arg : packet sent by the user app
* @old_ioctl : mimd if 1; uioc otherwise * @old_ioctl : mimd if 1; uioc otherwise
* @rval : pointer for command's returned value (not function status)
*/ */
static int static int
handle_drvrcmd(void __user *arg, uint8_t old_ioctl, int *rval) handle_drvrcmd(void __user *arg, uint8_t old_ioctl, int *rval)
@ -322,8 +324,8 @@ handle_drvrcmd(void __user *arg, uint8_t old_ioctl, int *rval)
/** /**
* mimd_to_kioc - Converter from old to new ioctl format * mimd_to_kioc - Converter from old to new ioctl format
*
* @umimd : user space old MIMD IOCTL * @umimd : user space old MIMD IOCTL
* @adp : adapter softstate
* @kioc : kernel space new format IOCTL * @kioc : kernel space new format IOCTL
* *
* Routine to convert MIMD interface IOCTL to new interface IOCTL packet. The * Routine to convert MIMD interface IOCTL to new interface IOCTL packet. The
@ -474,7 +476,6 @@ mimd_to_kioc(mimd_t __user *umimd, mraid_mmadp_t *adp, uioc_t *kioc)
/** /**
* mraid_mm_attch_buf - Attach a free dma buffer for required size * mraid_mm_attch_buf - Attach a free dma buffer for required size
*
* @adp : Adapter softstate * @adp : Adapter softstate
* @kioc : kioc that the buffer needs to be attached to * @kioc : kioc that the buffer needs to be attached to
* @xferlen : required length for buffer * @xferlen : required length for buffer
@ -607,7 +608,6 @@ mraid_mm_alloc_kioc(mraid_mmadp_t *adp)
/** /**
* mraid_mm_dealloc_kioc - Return kioc to free pool * mraid_mm_dealloc_kioc - Return kioc to free pool
*
* @adp : Adapter softstate * @adp : Adapter softstate
* @kioc : uioc_t node to be returned to free pool * @kioc : uioc_t node to be returned to free pool
*/ */
@ -652,7 +652,6 @@ mraid_mm_dealloc_kioc(mraid_mmadp_t *adp, uioc_t *kioc)
/** /**
* lld_ioctl - Routine to issue ioctl to low level drvr * lld_ioctl - Routine to issue ioctl to low level drvr
*
* @adp : The adapter handle * @adp : The adapter handle
* @kioc : The ioctl packet with kernel addresses * @kioc : The ioctl packet with kernel addresses
*/ */
@ -705,7 +704,6 @@ lld_ioctl(mraid_mmadp_t *adp, uioc_t *kioc)
/** /**
* ioctl_done - callback from the low level driver * ioctl_done - callback from the low level driver
*
* @kioc : completed ioctl packet * @kioc : completed ioctl packet
*/ */
static void static void
@ -756,9 +754,8 @@ ioctl_done(uioc_t *kioc)
} }
/* /**
* lld_timedout : callback from the expired timer * lld_timedout - callback from the expired timer
*
* @ptr : ioctl packet that timed out * @ptr : ioctl packet that timed out
*/ */
static void static void
@ -776,8 +773,7 @@ lld_timedout(unsigned long ptr)
/** /**
* kioc_to_mimd : Converter from new back to old format * kioc_to_mimd - Converter from new back to old format
*
* @kioc : Kernel space IOCTL packet (successfully issued) * @kioc : Kernel space IOCTL packet (successfully issued)
* @mimd : User space MIMD packet * @mimd : User space MIMD packet
*/ */
@ -855,7 +851,6 @@ kioc_to_mimd(uioc_t *kioc, mimd_t __user *mimd)
/** /**
* hinfo_to_cinfo - Convert new format hba info into old format * hinfo_to_cinfo - Convert new format hba info into old format
*
* @hinfo : New format, more comprehensive adapter info * @hinfo : New format, more comprehensive adapter info
* @cinfo : Old format adapter info to support mimd_t apps * @cinfo : Old format adapter info to support mimd_t apps
*/ */
@ -878,10 +873,9 @@ hinfo_to_cinfo(mraid_hba_info_t *hinfo, mcontroller_t *cinfo)
} }
/* /**
* mraid_mm_register_adp - Registration routine for low level drvrs * mraid_mm_register_adp - Registration routine for low level drivers
* * @lld_adp : Adapter objejct
* @adp : Adapter objejct
*/ */
int int
mraid_mm_register_adp(mraid_mmadp_t *lld_adp) mraid_mm_register_adp(mraid_mmadp_t *lld_adp)
@ -1007,15 +1001,14 @@ mraid_mm_register_adp(mraid_mmadp_t *lld_adp)
/** /**
* mraid_mm_adapter_app_handle - return the application handle for this adapter * mraid_mm_adapter_app_handle - return the application handle for this adapter
* @unique_id : adapter unique identifier
* *
* For the given driver data, locate the adadpter in our global list and * For the given driver data, locate the adapter in our global list and
* return the corresponding handle, which is also used by applications to * return the corresponding handle, which is also used by applications to
* uniquely identify an adapter. * uniquely identify an adapter.
* *
* @param unique_id : adapter unique identifier * Return adapter handle if found in the list.
* * Return 0 if adapter could not be located, should never happen though.
* @return adapter handle if found in the list
* @return 0 if adapter could not be located, should never happen though
*/ */
uint32_t uint32_t
mraid_mm_adapter_app_handle(uint32_t unique_id) mraid_mm_adapter_app_handle(uint32_t unique_id)
@ -1040,7 +1033,6 @@ mraid_mm_adapter_app_handle(uint32_t unique_id)
/** /**
* mraid_mm_setup_dma_pools - Set up dma buffer pools per adapter * mraid_mm_setup_dma_pools - Set up dma buffer pools per adapter
*
* @adp : Adapter softstate * @adp : Adapter softstate
* *
* We maintain a pool of dma buffers per each adapter. Each pool has one * We maintain a pool of dma buffers per each adapter. Each pool has one
@ -1093,11 +1085,11 @@ mraid_mm_setup_dma_pools(mraid_mmadp_t *adp)
} }
/* /**
* mraid_mm_unregister_adp - Unregister routine for low level drivers * mraid_mm_unregister_adp - Unregister routine for low level drivers
* Assume no outstanding ioctls to llds.
*
* @unique_id : UID of the adpater * @unique_id : UID of the adpater
*
* Assumes no outstanding ioctls to llds.
*/ */
int int
mraid_mm_unregister_adp(uint32_t unique_id) mraid_mm_unregister_adp(uint32_t unique_id)
@ -1131,7 +1123,6 @@ mraid_mm_unregister_adp(uint32_t unique_id)
/** /**
* mraid_mm_free_adp_resources - Free adapter softstate * mraid_mm_free_adp_resources - Free adapter softstate
*
* @adp : Adapter softstate * @adp : Adapter softstate
*/ */
static void static void
@ -1162,7 +1153,6 @@ mraid_mm_free_adp_resources(mraid_mmadp_t *adp)
/** /**
* mraid_mm_teardown_dma_pools - Free all per adapter dma buffers * mraid_mm_teardown_dma_pools - Free all per adapter dma buffers
*
* @adp : Adapter softstate * @adp : Adapter softstate
*/ */
static void static void
@ -1190,7 +1180,7 @@ mraid_mm_teardown_dma_pools(mraid_mmadp_t *adp)
} }
/** /**
* mraid_mm_init : Module entry point * mraid_mm_init - Module entry point
*/ */
static int __init static int __init
mraid_mm_init(void) mraid_mm_init(void)
@ -1214,10 +1204,13 @@ mraid_mm_init(void)
} }
/**
* mraid_mm_compat_ioctl : 32bit to 64bit ioctl conversion routine
*/
#ifdef CONFIG_COMPAT #ifdef CONFIG_COMPAT
/**
* mraid_mm_compat_ioctl - 32bit to 64bit ioctl conversion routine
* @filep : file operations pointer (ignored)
* @cmd : ioctl command
* @arg : user ioctl packet
*/
static long static long
mraid_mm_compat_ioctl(struct file *filep, unsigned int cmd, mraid_mm_compat_ioctl(struct file *filep, unsigned int cmd,
unsigned long arg) unsigned long arg)
@ -1231,7 +1224,7 @@ mraid_mm_compat_ioctl(struct file *filep, unsigned int cmd,
#endif #endif
/** /**
* mraid_mm_exit : Module exit point * mraid_mm_exit - Module exit point
*/ */
static void __exit static void __exit
mraid_mm_exit(void) mraid_mm_exit(void)

View file

@ -15,7 +15,7 @@
#ifndef LSI_MEGARAID_SAS_H #ifndef LSI_MEGARAID_SAS_H
#define LSI_MEGARAID_SAS_H #define LSI_MEGARAID_SAS_H
/** /*
* MegaRAID SAS Driver meta data * MegaRAID SAS Driver meta data
*/ */
#define MEGASAS_VERSION "00.00.03.05" #define MEGASAS_VERSION "00.00.03.05"
@ -40,7 +40,7 @@
* "message frames" * "message frames"
*/ */
/** /*
* FW posts its state in upper 4 bits of outbound_msg_0 register * FW posts its state in upper 4 bits of outbound_msg_0 register
*/ */
#define MFI_STATE_MASK 0xF0000000 #define MFI_STATE_MASK 0xF0000000
@ -58,7 +58,7 @@
#define MEGAMFI_FRAME_SIZE 64 #define MEGAMFI_FRAME_SIZE 64
/** /*
* During FW init, clear pending cmds & reset state using inbound_msg_0 * During FW init, clear pending cmds & reset state using inbound_msg_0
* *
* ABORT : Abort all pending cmds * ABORT : Abort all pending cmds
@ -78,7 +78,7 @@
MFI_INIT_MFIMODE| \ MFI_INIT_MFIMODE| \
MFI_INIT_ABORT MFI_INIT_ABORT
/** /*
* MFI frame flags * MFI frame flags
*/ */
#define MFI_FRAME_POST_IN_REPLY_QUEUE 0x0000 #define MFI_FRAME_POST_IN_REPLY_QUEUE 0x0000
@ -92,12 +92,12 @@
#define MFI_FRAME_DIR_READ 0x0010 #define MFI_FRAME_DIR_READ 0x0010
#define MFI_FRAME_DIR_BOTH 0x0018 #define MFI_FRAME_DIR_BOTH 0x0018
/** /*
* Definition for cmd_status * Definition for cmd_status
*/ */
#define MFI_CMD_STATUS_POLL_MODE 0xFF #define MFI_CMD_STATUS_POLL_MODE 0xFF
/** /*
* MFI command opcodes * MFI command opcodes
*/ */
#define MFI_CMD_INIT 0x00 #define MFI_CMD_INIT 0x00
@ -128,7 +128,7 @@
#define MR_DCMD_CLUSTER_RESET_ALL 0x08010100 #define MR_DCMD_CLUSTER_RESET_ALL 0x08010100
#define MR_DCMD_CLUSTER_RESET_LD 0x08010200 #define MR_DCMD_CLUSTER_RESET_LD 0x08010200
/** /*
* MFI command completion codes * MFI command completion codes
*/ */
enum MFI_STAT { enum MFI_STAT {

View file

@ -290,7 +290,6 @@ typedef struct _nsp_hw_data {
#endif #endif
} nsp_hw_data; } nsp_hw_data;
/**************************************************************************** /****************************************************************************
* *
*/ */
@ -302,22 +301,13 @@ static int nsp_cs_config (struct pcmcia_device *link);
/* Linux SCSI subsystem specific functions */ /* Linux SCSI subsystem specific functions */
static struct Scsi_Host *nsp_detect (struct scsi_host_template *sht); static struct Scsi_Host *nsp_detect (struct scsi_host_template *sht);
#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,5,0))
static int nsp_detect_old (struct scsi_host_template *sht);
static int nsp_release_old(struct Scsi_Host *shpnt);
#endif
static const char *nsp_info (struct Scsi_Host *shpnt); static const char *nsp_info (struct Scsi_Host *shpnt);
static int nsp_proc_info ( static int nsp_proc_info (
#if (LINUX_VERSION_CODE > KERNEL_VERSION(2,5,73))
struct Scsi_Host *host, struct Scsi_Host *host,
#endif
char *buffer, char *buffer,
char **start, char **start,
off_t offset, off_t offset,
int length, int length,
#if !(LINUX_VERSION_CODE > KERNEL_VERSION(2,5,73))
int hostno,
#endif
int inout); int inout);
static int nsp_queuecommand(struct scsi_cmnd *SCpnt, static int nsp_queuecommand(struct scsi_cmnd *SCpnt,
void (* done)(struct scsi_cmnd *SCpnt)); void (* done)(struct scsi_cmnd *SCpnt));
@ -356,7 +346,6 @@ static struct Scsi_Host *nsp_detect(struct scsi_host_template *sht);
static int __init nsp_cs_init(void); static int __init nsp_cs_init(void);
static void __exit nsp_cs_exit(void); static void __exit nsp_cs_exit(void);
/* Debug */ /* Debug */
#ifdef NSP_DEBUG #ifdef NSP_DEBUG
static void show_command (struct scsi_cmnd *SCpnt); static void show_command (struct scsi_cmnd *SCpnt);
@ -401,7 +390,6 @@ enum _burst_mode {
BURST_MEM32 = 2, BURST_MEM32 = 2,
}; };
/************************************************************************** /**************************************************************************
* SCSI messaage * SCSI messaage
*/ */
@ -413,62 +401,8 @@ enum _burst_mode {
#define MSG_EXT_SDTR 0x01 #define MSG_EXT_SDTR 0x01
/**************************************************************************
* Compatibility functions
*/
/* for Kernel 2.4 */
#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,0))
# define scsi_register_host(template) scsi_register_module(MODULE_SCSI_HA, template)
# define scsi_unregister_host(template) scsi_unregister_module(MODULE_SCSI_HA, template)
# define scsi_host_put(host) scsi_unregister(host)
typedef void irqreturn_t;
# define IRQ_NONE /* */
# define IRQ_HANDLED /* */
# define IRQ_RETVAL(x) /* */
/* This is ad-hoc version of scsi_host_get_next() */
static inline struct Scsi_Host *scsi_host_get_next(struct Scsi_Host *host)
{
if (host == NULL) {
return scsi_hostlist;
} else {
return host->next;
}
}
/* This is ad-hoc version of scsi_host_hn_get() */
static inline struct Scsi_Host *scsi_host_hn_get(unsigned short hostno)
{
struct Scsi_Host *host;
for (host = scsi_host_get_next(NULL); host != NULL;
host = scsi_host_get_next(host)) {
if (host->host_no == hostno) {
break;
}
}
return host;
}
static void cs_error(struct pcmcia_device *handle, int func, int ret)
{
error_info_t err = { func, ret };
pcmcia_report_error(handle, &err);
}
/* scatter-gather table */
# define BUFFER_ADDR (SCpnt->SCp.buffer->address)
#endif
/* for Kernel 2.6 */
#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,0))
/* scatter-gather table */ /* scatter-gather table */
# define BUFFER_ADDR ((char *)((unsigned int)(SCpnt->SCp.buffer->page) + SCpnt->SCp.buffer->offset)) # define BUFFER_ADDR ((char *)((unsigned int)(SCpnt->SCp.buffer->page) + SCpnt->SCp.buffer->offset))
#endif
#endif /*__nsp_cs__*/ #endif /*__nsp_cs__*/
/* end */ /* end */

View file

@ -140,6 +140,8 @@ qla2x00_sysfs_write_nvram(struct kobject *kobj, char *buf, loff_t off,
ha->isp_ops.write_nvram(ha, (uint8_t *)buf, ha->nvram_base, count); ha->isp_ops.write_nvram(ha, (uint8_t *)buf, ha->nvram_base, count);
spin_unlock_irqrestore(&ha->hardware_lock, flags); spin_unlock_irqrestore(&ha->hardware_lock, flags);
set_bit(ISP_ABORT_NEEDED, &ha->dpc_flags);
return (count); return (count);
} }
@ -653,6 +655,43 @@ qla2x00_beacon_store(struct class_device *cdev, const char *buf,
return count; return count;
} }
static ssize_t
qla2x00_optrom_bios_version_show(struct class_device *cdev, char *buf)
{
scsi_qla_host_t *ha = to_qla_host(class_to_shost(cdev));
return snprintf(buf, PAGE_SIZE, "%d.%02d\n", ha->bios_revision[1],
ha->bios_revision[0]);
}
static ssize_t
qla2x00_optrom_efi_version_show(struct class_device *cdev, char *buf)
{
scsi_qla_host_t *ha = to_qla_host(class_to_shost(cdev));
return snprintf(buf, PAGE_SIZE, "%d.%02d\n", ha->efi_revision[1],
ha->efi_revision[0]);
}
static ssize_t
qla2x00_optrom_fcode_version_show(struct class_device *cdev, char *buf)
{
scsi_qla_host_t *ha = to_qla_host(class_to_shost(cdev));
return snprintf(buf, PAGE_SIZE, "%d.%02d\n", ha->fcode_revision[1],
ha->fcode_revision[0]);
}
static ssize_t
qla2x00_optrom_fw_version_show(struct class_device *cdev, char *buf)
{
scsi_qla_host_t *ha = to_qla_host(class_to_shost(cdev));
return snprintf(buf, PAGE_SIZE, "%d.%02d.%02d %d\n",
ha->fw_revision[0], ha->fw_revision[1], ha->fw_revision[2],
ha->fw_revision[3]);
}
static CLASS_DEVICE_ATTR(driver_version, S_IRUGO, qla2x00_drvr_version_show, static CLASS_DEVICE_ATTR(driver_version, S_IRUGO, qla2x00_drvr_version_show,
NULL); NULL);
static CLASS_DEVICE_ATTR(fw_version, S_IRUGO, qla2x00_fw_version_show, NULL); static CLASS_DEVICE_ATTR(fw_version, S_IRUGO, qla2x00_fw_version_show, NULL);
@ -669,6 +708,14 @@ static CLASS_DEVICE_ATTR(zio_timer, S_IRUGO | S_IWUSR, qla2x00_zio_timer_show,
qla2x00_zio_timer_store); qla2x00_zio_timer_store);
static CLASS_DEVICE_ATTR(beacon, S_IRUGO | S_IWUSR, qla2x00_beacon_show, static CLASS_DEVICE_ATTR(beacon, S_IRUGO | S_IWUSR, qla2x00_beacon_show,
qla2x00_beacon_store); qla2x00_beacon_store);
static CLASS_DEVICE_ATTR(optrom_bios_version, S_IRUGO,
qla2x00_optrom_bios_version_show, NULL);
static CLASS_DEVICE_ATTR(optrom_efi_version, S_IRUGO,
qla2x00_optrom_efi_version_show, NULL);
static CLASS_DEVICE_ATTR(optrom_fcode_version, S_IRUGO,
qla2x00_optrom_fcode_version_show, NULL);
static CLASS_DEVICE_ATTR(optrom_fw_version, S_IRUGO,
qla2x00_optrom_fw_version_show, NULL);
struct class_device_attribute *qla2x00_host_attrs[] = { struct class_device_attribute *qla2x00_host_attrs[] = {
&class_device_attr_driver_version, &class_device_attr_driver_version,
@ -683,6 +730,10 @@ struct class_device_attribute *qla2x00_host_attrs[] = {
&class_device_attr_zio, &class_device_attr_zio,
&class_device_attr_zio_timer, &class_device_attr_zio_timer,
&class_device_attr_beacon, &class_device_attr_beacon,
&class_device_attr_optrom_bios_version,
&class_device_attr_optrom_efi_version,
&class_device_attr_optrom_fcode_version,
&class_device_attr_optrom_fw_version,
NULL, NULL,
}; };
@ -836,21 +887,24 @@ qla2x00_get_fc_host_stats(struct Scsi_Host *shost)
link_stat_t stat_buf; link_stat_t stat_buf;
struct fc_host_statistics *pfc_host_stat; struct fc_host_statistics *pfc_host_stat;
rval = QLA_FUNCTION_FAILED;
pfc_host_stat = &ha->fc_host_stat; pfc_host_stat = &ha->fc_host_stat;
memset(pfc_host_stat, -1, sizeof(struct fc_host_statistics)); memset(pfc_host_stat, -1, sizeof(struct fc_host_statistics));
if (IS_QLA24XX(ha) || IS_QLA54XX(ha)) { if (IS_QLA24XX(ha) || IS_QLA54XX(ha)) {
rval = qla24xx_get_isp_stats(ha, (uint32_t *)&stat_buf, rval = qla24xx_get_isp_stats(ha, (uint32_t *)&stat_buf,
sizeof(stat_buf) / 4, mb_stat); sizeof(stat_buf) / 4, mb_stat);
} else { } else if (atomic_read(&ha->loop_state) == LOOP_READY &&
!test_bit(ABORT_ISP_ACTIVE, &ha->dpc_flags) &&
!test_bit(ISP_ABORT_NEEDED, &ha->dpc_flags) &&
!ha->dpc_active) {
/* Must be in a 'READY' state for statistics retrieval. */
rval = qla2x00_get_link_status(ha, ha->loop_id, &stat_buf, rval = qla2x00_get_link_status(ha, ha->loop_id, &stat_buf,
mb_stat); mb_stat);
} }
if (rval != 0) {
qla_printk(KERN_WARNING, ha, if (rval != QLA_SUCCESS)
"Unable to retrieve host statistics (%d).\n", mb_stat[0]); goto done;
return pfc_host_stat;
}
pfc_host_stat->link_failure_count = stat_buf.link_fail_cnt; pfc_host_stat->link_failure_count = stat_buf.link_fail_cnt;
pfc_host_stat->loss_of_sync_count = stat_buf.loss_sync_cnt; pfc_host_stat->loss_of_sync_count = stat_buf.loss_sync_cnt;
@ -858,7 +912,7 @@ qla2x00_get_fc_host_stats(struct Scsi_Host *shost)
pfc_host_stat->prim_seq_protocol_err_count = stat_buf.prim_seq_err_cnt; pfc_host_stat->prim_seq_protocol_err_count = stat_buf.prim_seq_err_cnt;
pfc_host_stat->invalid_tx_word_count = stat_buf.inval_xmit_word_cnt; pfc_host_stat->invalid_tx_word_count = stat_buf.inval_xmit_word_cnt;
pfc_host_stat->invalid_crc_count = stat_buf.inval_crc_cnt; pfc_host_stat->invalid_crc_count = stat_buf.inval_crc_cnt;
done:
return pfc_host_stat; return pfc_host_stat;
} }

View file

@ -2045,6 +2045,29 @@ struct isp_operations {
uint32_t, uint32_t); uint32_t, uint32_t);
int (*write_optrom) (struct scsi_qla_host *, uint8_t *, uint32_t, int (*write_optrom) (struct scsi_qla_host *, uint8_t *, uint32_t,
uint32_t); uint32_t);
int (*get_flash_version) (struct scsi_qla_host *, void *);
};
/* MSI-X Support *************************************************************/
#define QLA_MSIX_CHIP_REV_24XX 3
#define QLA_MSIX_FW_MODE(m) (((m) & (BIT_7|BIT_8|BIT_9)) >> 7)
#define QLA_MSIX_FW_MODE_1(m) (QLA_MSIX_FW_MODE(m) == 1)
#define QLA_MSIX_DEFAULT 0x00
#define QLA_MSIX_RSP_Q 0x01
#define QLA_MSIX_ENTRIES 2
#define QLA_MIDX_DEFAULT 0
#define QLA_MIDX_RSP_Q 1
struct scsi_qla_host;
struct qla_msix_entry {
int have_irq;
uint16_t msix_vector;
uint16_t msix_entry;
}; };
/* /*
@ -2077,6 +2100,7 @@ typedef struct scsi_qla_host {
uint32_t enable_lip_full_login :1; uint32_t enable_lip_full_login :1;
uint32_t enable_target_reset :1; uint32_t enable_target_reset :1;
uint32_t enable_led_scheme :1; uint32_t enable_led_scheme :1;
uint32_t inta_enabled :1;
uint32_t msi_enabled :1; uint32_t msi_enabled :1;
uint32_t msix_enabled :1; uint32_t msix_enabled :1;
uint32_t disable_serdes :1; uint32_t disable_serdes :1;
@ -2316,8 +2340,6 @@ typedef struct scsi_qla_host {
#define MBX_INTR_WAIT 2 #define MBX_INTR_WAIT 2
#define MBX_UPDATE_FLASH_ACTIVE 3 #define MBX_UPDATE_FLASH_ACTIVE 3
spinlock_t mbx_reg_lock; /* Mbx Cmd Register Lock */
struct semaphore mbx_cmd_sem; /* Serialialize mbx access */ struct semaphore mbx_cmd_sem; /* Serialialize mbx access */
struct semaphore mbx_intr_sem; /* Used for completion notification */ struct semaphore mbx_intr_sem; /* Used for completion notification */
@ -2358,6 +2380,7 @@ typedef struct scsi_qla_host {
uint8_t host_str[16]; uint8_t host_str[16];
uint32_t pci_attr; uint32_t pci_attr;
uint16_t chip_revision;
uint16_t product_id[4]; uint16_t product_id[4];
@ -2379,6 +2402,15 @@ typedef struct scsi_qla_host {
#define QLA_SREADING 1 #define QLA_SREADING 1
#define QLA_SWRITING 2 #define QLA_SWRITING 2
/* PCI expansion ROM image information. */
#define ROM_CODE_TYPE_BIOS 0
#define ROM_CODE_TYPE_FCODE 1
#define ROM_CODE_TYPE_EFI 3
uint8_t bios_revision[2];
uint8_t efi_revision[2];
uint8_t fcode_revision[16];
uint32_t fw_revision[4];
/* Needed for BEACON */ /* Needed for BEACON */
uint16_t beacon_blink_led; uint16_t beacon_blink_led;
uint8_t beacon_color_state; uint8_t beacon_color_state;
@ -2391,6 +2423,8 @@ typedef struct scsi_qla_host {
uint16_t zio_mode; uint16_t zio_mode;
uint16_t zio_timer; uint16_t zio_timer;
struct fc_host_statistics fc_host_stat; struct fc_host_statistics fc_host_stat;
struct qla_msix_entry msix_entries[QLA_MSIX_ENTRIES];
} scsi_qla_host_t; } scsi_qla_host_t;

View file

@ -224,6 +224,9 @@ extern irqreturn_t qla24xx_intr_handler(int, void *);
extern void qla2x00_process_response_queue(struct scsi_qla_host *); extern void qla2x00_process_response_queue(struct scsi_qla_host *);
extern void qla24xx_process_response_queue(struct scsi_qla_host *); extern void qla24xx_process_response_queue(struct scsi_qla_host *);
extern int qla2x00_request_irqs(scsi_qla_host_t *);
extern void qla2x00_free_irqs(scsi_qla_host_t *);
/* /*
* Global Function Prototypes in qla_sup.c source file. * Global Function Prototypes in qla_sup.c source file.
*/ */
@ -259,6 +262,9 @@ extern uint8_t *qla24xx_read_optrom_data(struct scsi_qla_host *, uint8_t *,
extern int qla24xx_write_optrom_data(struct scsi_qla_host *, uint8_t *, extern int qla24xx_write_optrom_data(struct scsi_qla_host *, uint8_t *,
uint32_t, uint32_t); uint32_t, uint32_t);
extern int qla2x00_get_flash_version(scsi_qla_host_t *, void *);
extern int qla24xx_get_flash_version(scsi_qla_host_t *, void *);
/* /*
* Global Function Prototypes in qla_dbg.c source file. * Global Function Prototypes in qla_dbg.c source file.
*/ */

View file

@ -65,7 +65,7 @@ qla2x00_initialize_adapter(scsi_qla_host_t *ha)
ha->flags.reset_active = 0; ha->flags.reset_active = 0;
atomic_set(&ha->loop_down_timer, LOOP_DOWN_TIME); atomic_set(&ha->loop_down_timer, LOOP_DOWN_TIME);
atomic_set(&ha->loop_state, LOOP_DOWN); atomic_set(&ha->loop_state, LOOP_DOWN);
ha->device_flags = 0; ha->device_flags = DFLG_NO_CABLE;
ha->dpc_flags = 0; ha->dpc_flags = 0;
ha->flags.management_server_logged_in = 0; ha->flags.management_server_logged_in = 0;
ha->marker_needed = 0; ha->marker_needed = 0;
@ -77,16 +77,23 @@ qla2x00_initialize_adapter(scsi_qla_host_t *ha)
qla_printk(KERN_INFO, ha, "Configuring PCI space...\n"); qla_printk(KERN_INFO, ha, "Configuring PCI space...\n");
rval = ha->isp_ops.pci_config(ha); rval = ha->isp_ops.pci_config(ha);
if (rval) { if (rval) {
DEBUG2(printk("scsi(%ld): Unable to configure PCI space=n", DEBUG2(printk("scsi(%ld): Unable to configure PCI space.\n",
ha->host_no)); ha->host_no));
return (rval); return (rval);
} }
ha->isp_ops.reset_chip(ha); ha->isp_ops.reset_chip(ha);
ha->isp_ops.get_flash_version(ha, ha->request_ring);
qla_printk(KERN_INFO, ha, "Configure NVRAM parameters...\n"); qla_printk(KERN_INFO, ha, "Configure NVRAM parameters...\n");
ha->isp_ops.nvram_config(ha); rval = ha->isp_ops.nvram_config(ha);
if (rval) {
DEBUG2(printk("scsi(%ld): Unable to verify NVRAM data.\n",
ha->host_no));
return rval;
}
if (ha->flags.disable_serdes) { if (ha->flags.disable_serdes) {
/* Mask HBA via NVRAM settings? */ /* Mask HBA via NVRAM settings? */
@ -293,6 +300,8 @@ qla24xx_pci_config(scsi_qla_host_t *ha)
d &= ~PCI_ROM_ADDRESS_ENABLE; d &= ~PCI_ROM_ADDRESS_ENABLE;
pci_write_config_dword(ha->pdev, PCI_ROM_ADDRESS, d); pci_write_config_dword(ha->pdev, PCI_ROM_ADDRESS, d);
pci_read_config_word(ha->pdev, PCI_REVISION_ID, &ha->chip_revision);
/* Get PCI bus information. */ /* Get PCI bus information. */
spin_lock_irqsave(&ha->hardware_lock, flags); spin_lock_irqsave(&ha->hardware_lock, flags);
ha->pci_attr = RD_REG_DWORD(&reg->ctrl_status); ha->pci_attr = RD_REG_DWORD(&reg->ctrl_status);
@ -1351,6 +1360,39 @@ qla2x00_configure_hba(scsi_qla_host_t *ha)
return(rval); return(rval);
} }
static inline void
qla2x00_set_model_info(scsi_qla_host_t *ha, uint8_t *model, size_t len, char *def)
{
char *st, *en;
uint16_t index;
if (memcmp(model, BINZERO, len) != 0) {
strncpy(ha->model_number, model, len);
st = en = ha->model_number;
en += len - 1;
while (en > st) {
if (*en != 0x20 && *en != 0x00)
break;
*en-- = '\0';
}
index = (ha->pdev->subsystem_device & 0xff);
if (ha->pdev->subsystem_vendor == PCI_VENDOR_ID_QLOGIC &&
index < QLA_MODEL_NAMES)
ha->model_desc = qla2x00_model_name[index * 2 + 1];
} else {
index = (ha->pdev->subsystem_device & 0xff);
if (ha->pdev->subsystem_vendor == PCI_VENDOR_ID_QLOGIC &&
index < QLA_MODEL_NAMES) {
strcpy(ha->model_number,
qla2x00_model_name[index * 2]);
ha->model_desc = qla2x00_model_name[index * 2 + 1];
} else {
strcpy(ha->model_number, def);
}
}
}
/* /*
* NVRAM configuration for ISP 2xxx * NVRAM configuration for ISP 2xxx
* *
@ -1367,7 +1409,6 @@ qla2x00_configure_hba(scsi_qla_host_t *ha)
int int
qla2x00_nvram_config(scsi_qla_host_t *ha) qla2x00_nvram_config(scsi_qla_host_t *ha)
{ {
int rval;
uint8_t chksum = 0; uint8_t chksum = 0;
uint16_t cnt; uint16_t cnt;
uint8_t *dptr1, *dptr2; uint8_t *dptr1, *dptr2;
@ -1376,8 +1417,6 @@ qla2x00_nvram_config(scsi_qla_host_t *ha)
uint8_t *ptr = (uint8_t *)ha->request_ring; uint8_t *ptr = (uint8_t *)ha->request_ring;
struct device_reg_2xxx __iomem *reg = &ha->iobase->isp; struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
rval = QLA_SUCCESS;
/* Determine NVRAM starting address. */ /* Determine NVRAM starting address. */
ha->nvram_size = sizeof(nvram_t); ha->nvram_size = sizeof(nvram_t);
ha->nvram_base = 0; ha->nvram_base = 0;
@ -1401,55 +1440,7 @@ qla2x00_nvram_config(scsi_qla_host_t *ha)
qla_printk(KERN_WARNING, ha, "Inconsistent NVRAM detected: " qla_printk(KERN_WARNING, ha, "Inconsistent NVRAM detected: "
"checksum=0x%x id=%c version=0x%x.\n", chksum, nv->id[0], "checksum=0x%x id=%c version=0x%x.\n", chksum, nv->id[0],
nv->nvram_version); nv->nvram_version);
qla_printk(KERN_WARNING, ha, "Falling back to functioning (yet " return QLA_FUNCTION_FAILED;
"invalid -- WWPN) defaults.\n");
/*
* Set default initialization control block.
*/
memset(nv, 0, ha->nvram_size);
nv->parameter_block_version = ICB_VERSION;
if (IS_QLA23XX(ha)) {
nv->firmware_options[0] = BIT_2 | BIT_1;
nv->firmware_options[1] = BIT_7 | BIT_5;
nv->add_firmware_options[0] = BIT_5;
nv->add_firmware_options[1] = BIT_5 | BIT_4;
nv->frame_payload_size = __constant_cpu_to_le16(2048);
nv->special_options[1] = BIT_7;
} else if (IS_QLA2200(ha)) {
nv->firmware_options[0] = BIT_2 | BIT_1;
nv->firmware_options[1] = BIT_7 | BIT_5;
nv->add_firmware_options[0] = BIT_5;
nv->add_firmware_options[1] = BIT_5 | BIT_4;
nv->frame_payload_size = __constant_cpu_to_le16(1024);
} else if (IS_QLA2100(ha)) {
nv->firmware_options[0] = BIT_3 | BIT_1;
nv->firmware_options[1] = BIT_5;
nv->frame_payload_size = __constant_cpu_to_le16(1024);
}
nv->max_iocb_allocation = __constant_cpu_to_le16(256);
nv->execution_throttle = __constant_cpu_to_le16(16);
nv->retry_count = 8;
nv->retry_delay = 1;
nv->port_name[0] = 33;
nv->port_name[3] = 224;
nv->port_name[4] = 139;
nv->login_timeout = 4;
/*
* Set default host adapter parameters
*/
nv->host_p[1] = BIT_2;
nv->reset_delay = 5;
nv->port_down_retry_count = 8;
nv->max_luns_per_target = __constant_cpu_to_le16(8);
nv->link_down_timeout = 60;
rval = 1;
} }
#if defined(CONFIG_IA64_GENERIC) || defined(CONFIG_IA64_SGI_SN2) #if defined(CONFIG_IA64_GENERIC) || defined(CONFIG_IA64_SGI_SN2)
@ -1489,33 +1480,8 @@ qla2x00_nvram_config(scsi_qla_host_t *ha)
strcpy(ha->model_number, "QLA2300"); strcpy(ha->model_number, "QLA2300");
} }
} else { } else {
if (rval == 0 && qla2x00_set_model_info(ha, nv->model_number,
memcmp(nv->model_number, BINZERO, sizeof(nv->model_number), "QLA23xx");
sizeof(nv->model_number)) != 0) {
char *st, *en;
strncpy(ha->model_number, nv->model_number,
sizeof(nv->model_number));
st = en = ha->model_number;
en += sizeof(nv->model_number) - 1;
while (en > st) {
if (*en != 0x20 && *en != 0x00)
break;
*en-- = '\0';
}
} else {
uint16_t index;
index = (ha->pdev->subsystem_device & 0xff);
if (index < QLA_MODEL_NAMES) {
strcpy(ha->model_number,
qla2x00_model_name[index * 2]);
ha->model_desc =
qla2x00_model_name[index * 2 + 1];
} else {
strcpy(ha->model_number, "QLA23xx");
}
}
} }
} else if (IS_QLA2200(ha)) { } else if (IS_QLA2200(ha)) {
nv->firmware_options[0] |= BIT_2; nv->firmware_options[0] |= BIT_2;
@ -1687,11 +1653,7 @@ qla2x00_nvram_config(scsi_qla_host_t *ha)
} }
} }
if (rval) { return QLA_SUCCESS;
DEBUG2_3(printk(KERN_WARNING
"scsi(%ld): NVRAM configuration failed!\n", ha->host_no));
}
return (rval);
} }
static void static void
@ -3107,7 +3069,11 @@ qla2x00_abort_isp(scsi_qla_host_t *ha)
} }
spin_unlock_irqrestore(&ha->hardware_lock, flags); spin_unlock_irqrestore(&ha->hardware_lock, flags);
ha->isp_ops.nvram_config(ha); ha->isp_ops.get_flash_version(ha, ha->request_ring);
rval = ha->isp_ops.nvram_config(ha);
if (rval)
goto isp_abort_retry;
if (!qla2x00_restart_isp(ha)) { if (!qla2x00_restart_isp(ha)) {
clear_bit(RESET_MARKER_NEEDED, &ha->dpc_flags); clear_bit(RESET_MARKER_NEEDED, &ha->dpc_flags);
@ -3137,6 +3103,7 @@ qla2x00_abort_isp(scsi_qla_host_t *ha)
} }
} }
} else { /* failed the ISP abort */ } else { /* failed the ISP abort */
isp_abort_retry:
ha->flags.online = 1; ha->flags.online = 1;
if (test_bit(ISP_ABORT_RETRY, &ha->dpc_flags)) { if (test_bit(ISP_ABORT_RETRY, &ha->dpc_flags)) {
if (ha->isp_abort_cnt == 0) { if (ha->isp_abort_cnt == 0) {
@ -3326,7 +3293,6 @@ qla24xx_reset_adapter(scsi_qla_host_t *ha)
int int
qla24xx_nvram_config(scsi_qla_host_t *ha) qla24xx_nvram_config(scsi_qla_host_t *ha)
{ {
int rval;
struct init_cb_24xx *icb; struct init_cb_24xx *icb;
struct nvram_24xx *nv; struct nvram_24xx *nv;
uint32_t *dptr; uint32_t *dptr;
@ -3334,7 +3300,6 @@ qla24xx_nvram_config(scsi_qla_host_t *ha)
uint32_t chksum; uint32_t chksum;
uint16_t cnt; uint16_t cnt;
rval = QLA_SUCCESS;
icb = (struct init_cb_24xx *)ha->init_cb; icb = (struct init_cb_24xx *)ha->init_cb;
nv = (struct nvram_24xx *)ha->request_ring; nv = (struct nvram_24xx *)ha->request_ring;
@ -3367,51 +3332,7 @@ qla24xx_nvram_config(scsi_qla_host_t *ha)
qla_printk(KERN_WARNING, ha, "Inconsistent NVRAM detected: " qla_printk(KERN_WARNING, ha, "Inconsistent NVRAM detected: "
"checksum=0x%x id=%c version=0x%x.\n", chksum, nv->id[0], "checksum=0x%x id=%c version=0x%x.\n", chksum, nv->id[0],
le16_to_cpu(nv->nvram_version)); le16_to_cpu(nv->nvram_version));
qla_printk(KERN_WARNING, ha, "Falling back to functioning (yet " return QLA_FUNCTION_FAILED;
"invalid -- WWPN) defaults.\n");
/*
* Set default initialization control block.
*/
memset(nv, 0, ha->nvram_size);
nv->nvram_version = __constant_cpu_to_le16(ICB_VERSION);
nv->version = __constant_cpu_to_le16(ICB_VERSION);
nv->frame_payload_size = __constant_cpu_to_le16(2048);
nv->execution_throttle = __constant_cpu_to_le16(0xFFFF);
nv->exchange_count = __constant_cpu_to_le16(0);
nv->hard_address = __constant_cpu_to_le16(124);
nv->port_name[0] = 0x21;
nv->port_name[1] = 0x00 + PCI_FUNC(ha->pdev->devfn);
nv->port_name[2] = 0x00;
nv->port_name[3] = 0xe0;
nv->port_name[4] = 0x8b;
nv->port_name[5] = 0x1c;
nv->port_name[6] = 0x55;
nv->port_name[7] = 0x86;
nv->node_name[0] = 0x20;
nv->node_name[1] = 0x00;
nv->node_name[2] = 0x00;
nv->node_name[3] = 0xe0;
nv->node_name[4] = 0x8b;
nv->node_name[5] = 0x1c;
nv->node_name[6] = 0x55;
nv->node_name[7] = 0x86;
nv->login_retry_count = __constant_cpu_to_le16(8);
nv->interrupt_delay_timer = __constant_cpu_to_le16(0);
nv->login_timeout = __constant_cpu_to_le16(0);
nv->firmware_options_1 =
__constant_cpu_to_le32(BIT_14|BIT_13|BIT_2|BIT_1);
nv->firmware_options_2 = __constant_cpu_to_le32(2 << 4);
nv->firmware_options_2 |= __constant_cpu_to_le32(BIT_12);
nv->firmware_options_3 = __constant_cpu_to_le32(2 << 13);
nv->host_p = __constant_cpu_to_le32(BIT_11|BIT_10);
nv->efi_parameters = __constant_cpu_to_le32(0);
nv->reset_delay = 5;
nv->max_luns_per_target = __constant_cpu_to_le16(128);
nv->port_down_retry_count = __constant_cpu_to_le16(30);
nv->link_down_timeout = __constant_cpu_to_le16(30);
rval = 1;
} }
/* Reset Initialization control block */ /* Reset Initialization control block */
@ -3438,25 +3359,8 @@ qla24xx_nvram_config(scsi_qla_host_t *ha)
/* /*
* Setup driver NVRAM options. * Setup driver NVRAM options.
*/ */
if (memcmp(nv->model_name, BINZERO, sizeof(nv->model_name)) != 0) { qla2x00_set_model_info(ha, nv->model_name, sizeof(nv->model_name),
char *st, *en; "QLA2462");
uint16_t index;
strncpy(ha->model_number, nv->model_name,
sizeof(nv->model_name));
st = en = ha->model_number;
en += sizeof(nv->model_name) - 1;
while (en > st) {
if (*en != 0x20 && *en != 0x00)
break;
*en-- = '\0';
}
index = (ha->pdev->subsystem_device & 0xff);
if (index < QLA_MODEL_NAMES)
ha->model_desc = qla2x00_model_name[index * 2 + 1];
} else
strcpy(ha->model_number, "QLA2462");
/* Use alternate WWN? */ /* Use alternate WWN? */
if (nv->host_p & __constant_cpu_to_le32(BIT_15)) { if (nv->host_p & __constant_cpu_to_le32(BIT_15)) {
@ -3575,11 +3479,7 @@ qla24xx_nvram_config(scsi_qla_host_t *ha)
ha->flags.process_response_queue = 1; ha->flags.process_response_queue = 1;
} }
if (rval) { return QLA_SUCCESS;
DEBUG2_3(printk(KERN_WARNING
"scsi(%ld): NVRAM configuration failed!\n", ha->host_no));
}
return (rval);
} }
static int static int

View file

@ -86,12 +86,8 @@ qla2100_intr_handler(int irq, void *dev_id)
if (test_bit(MBX_INTR_WAIT, &ha->mbx_cmd_flags) && if (test_bit(MBX_INTR_WAIT, &ha->mbx_cmd_flags) &&
(status & MBX_INTERRUPT) && ha->flags.mbox_int) { (status & MBX_INTERRUPT) && ha->flags.mbox_int) {
spin_lock_irqsave(&ha->mbx_reg_lock, flags);
set_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags); set_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags);
up(&ha->mbx_intr_sem); up(&ha->mbx_intr_sem);
spin_unlock_irqrestore(&ha->mbx_reg_lock, flags);
} }
return (IRQ_HANDLED); return (IRQ_HANDLED);
@ -199,12 +195,8 @@ qla2300_intr_handler(int irq, void *dev_id)
if (test_bit(MBX_INTR_WAIT, &ha->mbx_cmd_flags) && if (test_bit(MBX_INTR_WAIT, &ha->mbx_cmd_flags) &&
(status & MBX_INTERRUPT) && ha->flags.mbox_int) { (status & MBX_INTERRUPT) && ha->flags.mbox_int) {
spin_lock_irqsave(&ha->mbx_reg_lock, flags);
set_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags); set_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags);
up(&ha->mbx_intr_sem); up(&ha->mbx_intr_sem);
spin_unlock_irqrestore(&ha->mbx_reg_lock, flags);
} }
return (IRQ_HANDLED); return (IRQ_HANDLED);
@ -654,10 +646,8 @@ qla2x00_ramp_up_queue_depth(scsi_qla_host_t *ha, srb_t *sp)
fcport->last_queue_full + ql2xqfullrampup * HZ)) fcport->last_queue_full + ql2xqfullrampup * HZ))
return; return;
spin_unlock_irq(&ha->hardware_lock);
starget_for_each_device(sdev->sdev_target, fcport, starget_for_each_device(sdev->sdev_target, fcport,
qla2x00_adjust_sdev_qdepth_up); qla2x00_adjust_sdev_qdepth_up);
spin_lock_irq(&ha->hardware_lock);
} }
/** /**
@ -927,10 +917,8 @@ qla2x00_status_entry(scsi_qla_host_t *ha, void *pkt)
/* Adjust queue depth for all luns on the port. */ /* Adjust queue depth for all luns on the port. */
fcport->last_queue_full = jiffies; fcport->last_queue_full = jiffies;
spin_unlock_irq(&ha->hardware_lock);
starget_for_each_device(cp->device->sdev_target, starget_for_each_device(cp->device->sdev_target,
fcport, qla2x00_adjust_sdev_qdepth_down); fcport, qla2x00_adjust_sdev_qdepth_down);
spin_lock_irq(&ha->hardware_lock);
break; break;
} }
if (lscsi_status != SS_CHECK_CONDITION) if (lscsi_status != SS_CHECK_CONDITION)
@ -995,6 +983,22 @@ qla2x00_status_entry(scsi_qla_host_t *ha, void *pkt)
if (lscsi_status != 0) { if (lscsi_status != 0) {
cp->result = DID_OK << 16 | lscsi_status; cp->result = DID_OK << 16 | lscsi_status;
if (lscsi_status == SAM_STAT_TASK_SET_FULL) {
DEBUG2(printk(KERN_INFO
"scsi(%ld): QUEUE FULL status detected "
"0x%x-0x%x.\n", ha->host_no, comp_status,
scsi_status));
/*
* Adjust queue depth for all luns on the
* port.
*/
fcport->last_queue_full = jiffies;
starget_for_each_device(
cp->device->sdev_target, fcport,
qla2x00_adjust_sdev_qdepth_down);
break;
}
if (lscsi_status != SS_CHECK_CONDITION) if (lscsi_status != SS_CHECK_CONDITION)
break; break;
@ -1482,12 +1486,8 @@ qla24xx_intr_handler(int irq, void *dev_id)
if (test_bit(MBX_INTR_WAIT, &ha->mbx_cmd_flags) && if (test_bit(MBX_INTR_WAIT, &ha->mbx_cmd_flags) &&
(status & MBX_INTERRUPT) && ha->flags.mbox_int) { (status & MBX_INTERRUPT) && ha->flags.mbox_int) {
spin_lock_irqsave(&ha->mbx_reg_lock, flags);
set_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags); set_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags);
up(&ha->mbx_intr_sem); up(&ha->mbx_intr_sem);
spin_unlock_irqrestore(&ha->mbx_reg_lock, flags);
} }
return IRQ_HANDLED; return IRQ_HANDLED;
@ -1536,3 +1536,216 @@ qla24xx_ms_entry(scsi_qla_host_t *ha, struct ct_entry_24xx *pkt)
qla2x00_sp_compl(ha, sp); qla2x00_sp_compl(ha, sp);
} }
static irqreturn_t
qla24xx_msix_rsp_q(int irq, void *dev_id)
{
scsi_qla_host_t *ha;
struct device_reg_24xx __iomem *reg;
unsigned long flags;
ha = dev_id;
reg = &ha->iobase->isp24;
spin_lock_irqsave(&ha->hardware_lock, flags);
qla24xx_process_response_queue(ha);
WRT_REG_DWORD(&reg->hccr, HCCRX_CLR_RISC_INT);
RD_REG_DWORD_RELAXED(&reg->hccr);
spin_unlock_irqrestore(&ha->hardware_lock, flags);
return IRQ_HANDLED;
}
static irqreturn_t
qla24xx_msix_default(int irq, void *dev_id)
{
scsi_qla_host_t *ha;
struct device_reg_24xx __iomem *reg;
int status;
unsigned long flags;
unsigned long iter;
uint32_t stat;
uint32_t hccr;
uint16_t mb[4];
ha = dev_id;
reg = &ha->iobase->isp24;
status = 0;
spin_lock_irqsave(&ha->hardware_lock, flags);
for (iter = 50; iter--; ) {
stat = RD_REG_DWORD(&reg->host_status);
if (stat & HSRX_RISC_PAUSED) {
hccr = RD_REG_DWORD(&reg->hccr);
qla_printk(KERN_INFO, ha, "RISC paused -- HCCR=%x, "
"Dumping firmware!\n", hccr);
ha->isp_ops.fw_dump(ha, 1);
set_bit(ISP_ABORT_NEEDED, &ha->dpc_flags);
break;
} else if ((stat & HSRX_RISC_INT) == 0)
break;
switch (stat & 0xff) {
case 0x1:
case 0x2:
case 0x10:
case 0x11:
qla24xx_mbx_completion(ha, MSW(stat));
status |= MBX_INTERRUPT;
break;
case 0x12:
mb[0] = MSW(stat);
mb[1] = RD_REG_WORD(&reg->mailbox1);
mb[2] = RD_REG_WORD(&reg->mailbox2);
mb[3] = RD_REG_WORD(&reg->mailbox3);
qla2x00_async_event(ha, mb);
break;
case 0x13:
qla24xx_process_response_queue(ha);
break;
default:
DEBUG2(printk("scsi(%ld): Unrecognized interrupt type "
"(%d).\n",
ha->host_no, stat & 0xff));
break;
}
WRT_REG_DWORD(&reg->hccr, HCCRX_CLR_RISC_INT);
RD_REG_DWORD_RELAXED(&reg->hccr);
}
spin_unlock_irqrestore(&ha->hardware_lock, flags);
if (test_bit(MBX_INTR_WAIT, &ha->mbx_cmd_flags) &&
(status & MBX_INTERRUPT) && ha->flags.mbox_int) {
set_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags);
up(&ha->mbx_intr_sem);
}
return IRQ_HANDLED;
}
/* Interrupt handling helpers. */
struct qla_init_msix_entry {
uint16_t entry;
uint16_t index;
const char *name;
irqreturn_t (*handler)(int, void *);
};
static struct qla_init_msix_entry imsix_entries[QLA_MSIX_ENTRIES] = {
{ QLA_MSIX_DEFAULT, QLA_MIDX_DEFAULT,
"qla2xxx (default)", qla24xx_msix_default },
{ QLA_MSIX_RSP_Q, QLA_MIDX_RSP_Q,
"qla2xxx (rsp_q)", qla24xx_msix_rsp_q },
};
static void
qla24xx_disable_msix(scsi_qla_host_t *ha)
{
int i;
struct qla_msix_entry *qentry;
for (i = 0; i < QLA_MSIX_ENTRIES; i++) {
qentry = &ha->msix_entries[imsix_entries[i].index];
if (qentry->have_irq)
free_irq(qentry->msix_vector, ha);
}
pci_disable_msix(ha->pdev);
}
static int
qla24xx_enable_msix(scsi_qla_host_t *ha)
{
int i, ret;
struct msix_entry entries[QLA_MSIX_ENTRIES];
struct qla_msix_entry *qentry;
for (i = 0; i < QLA_MSIX_ENTRIES; i++)
entries[i].entry = imsix_entries[i].entry;
ret = pci_enable_msix(ha->pdev, entries, ARRAY_SIZE(entries));
if (ret) {
qla_printk(KERN_WARNING, ha,
"MSI-X: Failed to enable support -- %d/%d\n",
QLA_MSIX_ENTRIES, ret);
goto msix_out;
}
ha->flags.msix_enabled = 1;
for (i = 0; i < QLA_MSIX_ENTRIES; i++) {
qentry = &ha->msix_entries[imsix_entries[i].index];
qentry->msix_vector = entries[i].vector;
qentry->msix_entry = entries[i].entry;
qentry->have_irq = 0;
ret = request_irq(qentry->msix_vector,
imsix_entries[i].handler, 0, imsix_entries[i].name, ha);
if (ret) {
qla_printk(KERN_WARNING, ha,
"MSI-X: Unable to register handler -- %x/%d.\n",
imsix_entries[i].index, ret);
qla24xx_disable_msix(ha);
goto msix_out;
}
qentry->have_irq = 1;
}
msix_out:
return ret;
}
int
qla2x00_request_irqs(scsi_qla_host_t *ha)
{
int ret;
/* If possible, enable MSI-X. */
if (!IS_QLA2432(ha))
goto skip_msix;
if (ha->chip_revision < QLA_MSIX_CHIP_REV_24XX ||
!QLA_MSIX_FW_MODE_1(ha->fw_attributes)) {
DEBUG2(qla_printk(KERN_WARNING, ha,
"MSI-X: Unsupported ISP2432 (0x%X, 0x%X).\n",
ha->chip_revision, ha->fw_attributes));
goto skip_msix;
}
ret = qla24xx_enable_msix(ha);
if (!ret) {
DEBUG2(qla_printk(KERN_INFO, ha,
"MSI-X: Enabled (0x%X, 0x%X).\n", ha->chip_revision,
ha->fw_attributes));
return ret;
}
qla_printk(KERN_WARNING, ha,
"MSI-X: Falling back-to INTa mode -- %d.\n", ret);
skip_msix:
ret = request_irq(ha->pdev->irq, ha->isp_ops.intr_handler,
IRQF_DISABLED|IRQF_SHARED, QLA2XXX_DRIVER_NAME, ha);
if (!ret) {
ha->flags.inta_enabled = 1;
ha->host->irq = ha->pdev->irq;
} else {
qla_printk(KERN_WARNING, ha,
"Failed to reserve interrupt %d already in use.\n",
ha->pdev->irq);
}
return ret;
}
void
qla2x00_free_irqs(scsi_qla_host_t *ha)
{
if (ha->flags.msix_enabled)
qla24xx_disable_msix(ha);
else if (ha->flags.inta_enabled)
free_irq(ha->host->irq, ha);
}

View file

@ -55,7 +55,6 @@ qla2x00_mailbox_command(scsi_qla_host_t *ha, mbx_cmd_t *mcp)
uint16_t __iomem *optr; uint16_t __iomem *optr;
uint32_t cnt; uint32_t cnt;
uint32_t mboxes; uint32_t mboxes;
unsigned long mbx_flags = 0;
unsigned long wait_time; unsigned long wait_time;
rval = QLA_SUCCESS; rval = QLA_SUCCESS;
@ -81,10 +80,6 @@ qla2x00_mailbox_command(scsi_qla_host_t *ha, mbx_cmd_t *mcp)
/* Save mailbox command for debug */ /* Save mailbox command for debug */
ha->mcp = mcp; ha->mcp = mcp;
/* Try to get mailbox register access */
if (!abort_active)
spin_lock_irqsave(&ha->mbx_reg_lock, mbx_flags);
DEBUG11(printk("scsi(%ld): prepare to issue mbox cmd=0x%x.\n", DEBUG11(printk("scsi(%ld): prepare to issue mbox cmd=0x%x.\n",
ha->host_no, mcp->mb[0])); ha->host_no, mcp->mb[0]));
@ -161,9 +156,6 @@ qla2x00_mailbox_command(scsi_qla_host_t *ha, mbx_cmd_t *mcp)
WRT_REG_WORD(&reg->isp.hccr, HCCR_SET_HOST_INT); WRT_REG_WORD(&reg->isp.hccr, HCCR_SET_HOST_INT);
spin_unlock_irqrestore(&ha->hardware_lock, flags); spin_unlock_irqrestore(&ha->hardware_lock, flags);
if (!abort_active)
spin_unlock_irqrestore(&ha->mbx_reg_lock, mbx_flags);
/* Wait for either the timer to expire /* Wait for either the timer to expire
* or the mbox completion interrupt * or the mbox completion interrupt
*/ */
@ -184,8 +176,6 @@ qla2x00_mailbox_command(scsi_qla_host_t *ha, mbx_cmd_t *mcp)
else else
WRT_REG_WORD(&reg->isp.hccr, HCCR_SET_HOST_INT); WRT_REG_WORD(&reg->isp.hccr, HCCR_SET_HOST_INT);
spin_unlock_irqrestore(&ha->hardware_lock, flags); spin_unlock_irqrestore(&ha->hardware_lock, flags);
if (!abort_active)
spin_unlock_irqrestore(&ha->mbx_reg_lock, mbx_flags);
wait_time = jiffies + mcp->tov * HZ; /* wait at most tov secs */ wait_time = jiffies + mcp->tov * HZ; /* wait at most tov secs */
while (!ha->flags.mbox_int) { while (!ha->flags.mbox_int) {
@ -201,9 +191,6 @@ qla2x00_mailbox_command(scsi_qla_host_t *ha, mbx_cmd_t *mcp)
} /* while */ } /* while */
} }
if (!abort_active)
spin_lock_irqsave(&ha->mbx_reg_lock, mbx_flags);
/* Check whether we timed out */ /* Check whether we timed out */
if (ha->flags.mbox_int) { if (ha->flags.mbox_int) {
uint16_t *iptr2; uint16_t *iptr2;
@ -256,9 +243,6 @@ qla2x00_mailbox_command(scsi_qla_host_t *ha, mbx_cmd_t *mcp)
rval = QLA_FUNCTION_TIMEOUT; rval = QLA_FUNCTION_TIMEOUT;
} }
if (!abort_active)
spin_unlock_irqrestore(&ha->mbx_reg_lock, mbx_flags);
ha->flags.mbox_busy = 0; ha->flags.mbox_busy = 0;
/* Clean up */ /* Clean up */
@ -1713,7 +1697,7 @@ qla24xx_fabric_logout(scsi_qla_host_t *ha, uint16_t loop_id, uint8_t domain,
lg->entry_count = 1; lg->entry_count = 1;
lg->nport_handle = cpu_to_le16(loop_id); lg->nport_handle = cpu_to_le16(loop_id);
lg->control_flags = lg->control_flags =
__constant_cpu_to_le16(LCF_COMMAND_LOGO|LCF_EXPL_LOGO); __constant_cpu_to_le16(LCF_COMMAND_LOGO|LCF_IMPL_LOGO);
lg->port_id[0] = al_pa; lg->port_id[0] = al_pa;
lg->port_id[1] = area; lg->port_id[1] = area;
lg->port_id[2] = domain; lg->port_id[2] = domain;

View file

@ -1485,6 +1485,7 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
ha->isp_ops.fw_dump = qla2100_fw_dump; ha->isp_ops.fw_dump = qla2100_fw_dump;
ha->isp_ops.read_optrom = qla2x00_read_optrom_data; ha->isp_ops.read_optrom = qla2x00_read_optrom_data;
ha->isp_ops.write_optrom = qla2x00_write_optrom_data; ha->isp_ops.write_optrom = qla2x00_write_optrom_data;
ha->isp_ops.get_flash_version = qla2x00_get_flash_version;
if (IS_QLA2100(ha)) { if (IS_QLA2100(ha)) {
host->max_id = MAX_TARGETS_2100; host->max_id = MAX_TARGETS_2100;
ha->mbx_count = MAILBOX_REGISTER_COUNT_2100; ha->mbx_count = MAILBOX_REGISTER_COUNT_2100;
@ -1550,6 +1551,7 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
ha->isp_ops.beacon_on = qla24xx_beacon_on; ha->isp_ops.beacon_on = qla24xx_beacon_on;
ha->isp_ops.beacon_off = qla24xx_beacon_off; ha->isp_ops.beacon_off = qla24xx_beacon_off;
ha->isp_ops.beacon_blink = qla24xx_beacon_blink; ha->isp_ops.beacon_blink = qla24xx_beacon_blink;
ha->isp_ops.get_flash_version = qla24xx_get_flash_version;
ha->gid_list_info_size = 8; ha->gid_list_info_size = 8;
ha->optrom_size = OPTROM_SIZE_24XX; ha->optrom_size = OPTROM_SIZE_24XX;
} }
@ -1564,14 +1566,6 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
INIT_LIST_HEAD(&ha->list); INIT_LIST_HEAD(&ha->list);
INIT_LIST_HEAD(&ha->fcports); INIT_LIST_HEAD(&ha->fcports);
/*
* These locks are used to prevent more than one CPU
* from modifying the queue at the same time. The
* higher level "host_lock" will reduce most
* contention for these locks.
*/
spin_lock_init(&ha->mbx_reg_lock);
qla2x00_config_dma_addressing(ha); qla2x00_config_dma_addressing(ha);
if (qla2x00_mem_alloc(ha)) { if (qla2x00_mem_alloc(ha)) {
qla_printk(KERN_WARNING, ha, qla_printk(KERN_WARNING, ha,
@ -1615,15 +1609,9 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
host->max_lun = MAX_LUNS; host->max_lun = MAX_LUNS;
host->transportt = qla2xxx_transport_template; host->transportt = qla2xxx_transport_template;
ret = request_irq(pdev->irq, ha->isp_ops.intr_handler, ret = qla2x00_request_irqs(ha);
IRQF_DISABLED|IRQF_SHARED, QLA2XXX_DRIVER_NAME, ha); if (ret)
if (ret) {
qla_printk(KERN_WARNING, ha,
"Failed to reserve interrupt %d already in use.\n",
pdev->irq);
goto probe_failed; goto probe_failed;
}
host->irq = pdev->irq;
/* Initialized the timer */ /* Initialized the timer */
qla2x00_start_timer(ha, qla2x00_timer, WATCH_INTERVAL); qla2x00_start_timer(ha, qla2x00_timer, WATCH_INTERVAL);
@ -1753,9 +1741,7 @@ qla2x00_free_device(scsi_qla_host_t *ha)
qla2x00_mem_free(ha); qla2x00_mem_free(ha);
/* Detach interrupts */ qla2x00_free_irqs(ha);
if (ha->host->irq)
free_irq(ha->host->irq, ha);
/* release io space registers */ /* release io space registers */
if (ha->iobase) if (ha->iobase)

View file

@ -611,7 +611,6 @@ qla24xx_write_flash_data(scsi_qla_host_t *ha, uint32_t *dwptr, uint32_t faddr,
flash_conf_to_access_addr(0x0339), flash_conf_to_access_addr(0x0339),
(fdata & 0xff00) | ((fdata << 16) & (fdata & 0xff00) | ((fdata << 16) &
0xff0000) | ((fdata >> 16) & 0xff)); 0xff0000) | ((fdata >> 16) & 0xff));
fdata = (faddr & sec_mask) << 2;
ret = qla24xx_write_flash_dword(ha, conf_addr, ret = qla24xx_write_flash_dword(ha, conf_addr,
(fdata & 0xff00) |((fdata << 16) & (fdata & 0xff00) |((fdata << 16) &
0xff0000) | ((fdata >> 16) & 0xff)); 0xff0000) | ((fdata >> 16) & 0xff));
@ -1383,6 +1382,29 @@ qla2x00_get_flash_manufacturer(scsi_qla_host_t *ha, uint8_t *man_id,
qla2x00_write_flash_byte(ha, 0x5555, 0xf0); qla2x00_write_flash_byte(ha, 0x5555, 0xf0);
} }
static void
qla2x00_read_flash_data(scsi_qla_host_t *ha, uint8_t *tmp_buf, uint32_t saddr,
uint32_t length)
{
struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
uint32_t midpoint, ilength;
uint8_t data;
midpoint = length / 2;
WRT_REG_WORD(&reg->nvram, 0);
RD_REG_WORD(&reg->nvram);
for (ilength = 0; ilength < length; saddr++, ilength++, tmp_buf++) {
if (ilength == midpoint) {
WRT_REG_WORD(&reg->nvram, NVR_SELECT);
RD_REG_WORD(&reg->nvram);
}
data = qla2x00_read_flash_byte(ha, saddr);
if (saddr % 100)
udelay(10);
*tmp_buf = data;
}
}
static inline void static inline void
qla2x00_suspend_hba(struct scsi_qla_host *ha) qla2x00_suspend_hba(struct scsi_qla_host *ha)
@ -1722,3 +1744,327 @@ qla24xx_write_optrom_data(struct scsi_qla_host *ha, uint8_t *buf,
return rval; return rval;
} }
/**
* qla2x00_get_fcode_version() - Determine an FCODE image's version.
* @ha: HA context
* @pcids: Pointer to the FCODE PCI data structure
*
* The process of retrieving the FCODE version information is at best
* described as interesting.
*
* Within the first 100h bytes of the image an ASCII string is present
* which contains several pieces of information including the FCODE
* version. Unfortunately it seems the only reliable way to retrieve
* the version is by scanning for another sentinel within the string,
* the FCODE build date:
*
* ... 2.00.02 10/17/02 ...
*
* Returns QLA_SUCCESS on successful retrieval of version.
*/
static void
qla2x00_get_fcode_version(scsi_qla_host_t *ha, uint32_t pcids)
{
int ret = QLA_FUNCTION_FAILED;
uint32_t istart, iend, iter, vend;
uint8_t do_next, rbyte, *vbyte;
memset(ha->fcode_revision, 0, sizeof(ha->fcode_revision));
/* Skip the PCI data structure. */
istart = pcids +
((qla2x00_read_flash_byte(ha, pcids + 0x0B) << 8) |
qla2x00_read_flash_byte(ha, pcids + 0x0A));
iend = istart + 0x100;
do {
/* Scan for the sentinel date string...eeewww. */
do_next = 0;
iter = istart;
while ((iter < iend) && !do_next) {
iter++;
if (qla2x00_read_flash_byte(ha, iter) == '/') {
if (qla2x00_read_flash_byte(ha, iter + 2) ==
'/')
do_next++;
else if (qla2x00_read_flash_byte(ha,
iter + 3) == '/')
do_next++;
}
}
if (!do_next)
break;
/* Backtrack to previous ' ' (space). */
do_next = 0;
while ((iter > istart) && !do_next) {
iter--;
if (qla2x00_read_flash_byte(ha, iter) == ' ')
do_next++;
}
if (!do_next)
break;
/*
* Mark end of version tag, and find previous ' ' (space) or
* string length (recent FCODE images -- major hack ahead!!!).
*/
vend = iter - 1;
do_next = 0;
while ((iter > istart) && !do_next) {
iter--;
rbyte = qla2x00_read_flash_byte(ha, iter);
if (rbyte == ' ' || rbyte == 0xd || rbyte == 0x10)
do_next++;
}
if (!do_next)
break;
/* Mark beginning of version tag, and copy data. */
iter++;
if ((vend - iter) &&
((vend - iter) < sizeof(ha->fcode_revision))) {
vbyte = ha->fcode_revision;
while (iter <= vend) {
*vbyte++ = qla2x00_read_flash_byte(ha, iter);
iter++;
}
ret = QLA_SUCCESS;
}
} while (0);
if (ret != QLA_SUCCESS)
memset(ha->fcode_revision, 0, sizeof(ha->fcode_revision));
}
int
qla2x00_get_flash_version(scsi_qla_host_t *ha, void *mbuf)
{
int ret = QLA_SUCCESS;
uint8_t code_type, last_image;
uint32_t pcihdr, pcids;
uint8_t *dbyte;
uint16_t *dcode;
if (!ha->pio_address || !mbuf)
return QLA_FUNCTION_FAILED;
memset(ha->bios_revision, 0, sizeof(ha->bios_revision));
memset(ha->efi_revision, 0, sizeof(ha->efi_revision));
memset(ha->fcode_revision, 0, sizeof(ha->fcode_revision));
memset(ha->fw_revision, 0, sizeof(ha->fw_revision));
qla2x00_flash_enable(ha);
/* Begin with first PCI expansion ROM header. */
pcihdr = 0;
last_image = 1;
do {
/* Verify PCI expansion ROM header. */
if (qla2x00_read_flash_byte(ha, pcihdr) != 0x55 ||
qla2x00_read_flash_byte(ha, pcihdr + 0x01) != 0xaa) {
/* No signature */
DEBUG2(printk("scsi(%ld): No matching ROM "
"signature.\n", ha->host_no));
ret = QLA_FUNCTION_FAILED;
break;
}
/* Locate PCI data structure. */
pcids = pcihdr +
((qla2x00_read_flash_byte(ha, pcihdr + 0x19) << 8) |
qla2x00_read_flash_byte(ha, pcihdr + 0x18));
/* Validate signature of PCI data structure. */
if (qla2x00_read_flash_byte(ha, pcids) != 'P' ||
qla2x00_read_flash_byte(ha, pcids + 0x1) != 'C' ||
qla2x00_read_flash_byte(ha, pcids + 0x2) != 'I' ||
qla2x00_read_flash_byte(ha, pcids + 0x3) != 'R') {
/* Incorrect header. */
DEBUG2(printk("%s(): PCI data struct not found "
"pcir_adr=%x.\n", __func__, pcids));
ret = QLA_FUNCTION_FAILED;
break;
}
/* Read version */
code_type = qla2x00_read_flash_byte(ha, pcids + 0x14);
switch (code_type) {
case ROM_CODE_TYPE_BIOS:
/* Intel x86, PC-AT compatible. */
ha->bios_revision[0] =
qla2x00_read_flash_byte(ha, pcids + 0x12);
ha->bios_revision[1] =
qla2x00_read_flash_byte(ha, pcids + 0x13);
DEBUG3(printk("%s(): read BIOS %d.%d.\n", __func__,
ha->bios_revision[1], ha->bios_revision[0]));
break;
case ROM_CODE_TYPE_FCODE:
/* Open Firmware standard for PCI (FCode). */
/* Eeeewww... */
qla2x00_get_fcode_version(ha, pcids);
break;
case ROM_CODE_TYPE_EFI:
/* Extensible Firmware Interface (EFI). */
ha->efi_revision[0] =
qla2x00_read_flash_byte(ha, pcids + 0x12);
ha->efi_revision[1] =
qla2x00_read_flash_byte(ha, pcids + 0x13);
DEBUG3(printk("%s(): read EFI %d.%d.\n", __func__,
ha->efi_revision[1], ha->efi_revision[0]));
break;
default:
DEBUG2(printk("%s(): Unrecognized code type %x at "
"pcids %x.\n", __func__, code_type, pcids));
break;
}
last_image = qla2x00_read_flash_byte(ha, pcids + 0x15) & BIT_7;
/* Locate next PCI expansion ROM. */
pcihdr += ((qla2x00_read_flash_byte(ha, pcids + 0x11) << 8) |
qla2x00_read_flash_byte(ha, pcids + 0x10)) * 512;
} while (!last_image);
if (IS_QLA2322(ha)) {
/* Read firmware image information. */
memset(ha->fw_revision, 0, sizeof(ha->fw_revision));
dbyte = mbuf;
memset(dbyte, 0, 8);
dcode = (uint16_t *)dbyte;
qla2x00_read_flash_data(ha, dbyte, FA_RISC_CODE_ADDR * 4 + 10,
8);
DEBUG3(printk("%s(%ld): dumping fw ver from flash:\n",
__func__, ha->host_no));
DEBUG3(qla2x00_dump_buffer((uint8_t *)dbyte, 8));
if ((dcode[0] == 0xffff && dcode[1] == 0xffff &&
dcode[2] == 0xffff && dcode[3] == 0xffff) ||
(dcode[0] == 0 && dcode[1] == 0 && dcode[2] == 0 &&
dcode[3] == 0)) {
DEBUG2(printk("%s(): Unrecognized fw revision at "
"%x.\n", __func__, FA_RISC_CODE_ADDR * 4));
} else {
/* values are in big endian */
ha->fw_revision[0] = dbyte[0] << 16 | dbyte[1];
ha->fw_revision[1] = dbyte[2] << 16 | dbyte[3];
ha->fw_revision[2] = dbyte[4] << 16 | dbyte[5];
}
}
qla2x00_flash_disable(ha);
return ret;
}
int
qla24xx_get_flash_version(scsi_qla_host_t *ha, void *mbuf)
{
int ret = QLA_SUCCESS;
uint32_t pcihdr, pcids;
uint32_t *dcode;
uint8_t *bcode;
uint8_t code_type, last_image;
int i;
if (!mbuf)
return QLA_FUNCTION_FAILED;
memset(ha->bios_revision, 0, sizeof(ha->bios_revision));
memset(ha->efi_revision, 0, sizeof(ha->efi_revision));
memset(ha->fcode_revision, 0, sizeof(ha->fcode_revision));
memset(ha->fw_revision, 0, sizeof(ha->fw_revision));
dcode = mbuf;
/* Begin with first PCI expansion ROM header. */
pcihdr = 0;
last_image = 1;
do {
/* Verify PCI expansion ROM header. */
qla24xx_read_flash_data(ha, dcode, pcihdr >> 2, 0x20);
bcode = mbuf + (pcihdr % 4);
if (bcode[0x0] != 0x55 || bcode[0x1] != 0xaa) {
/* No signature */
DEBUG2(printk("scsi(%ld): No matching ROM "
"signature.\n", ha->host_no));
ret = QLA_FUNCTION_FAILED;
break;
}
/* Locate PCI data structure. */
pcids = pcihdr + ((bcode[0x19] << 8) | bcode[0x18]);
qla24xx_read_flash_data(ha, dcode, pcids >> 2, 0x20);
bcode = mbuf + (pcihdr % 4);
/* Validate signature of PCI data structure. */
if (bcode[0x0] != 'P' || bcode[0x1] != 'C' ||
bcode[0x2] != 'I' || bcode[0x3] != 'R') {
/* Incorrect header. */
DEBUG2(printk("%s(): PCI data struct not found "
"pcir_adr=%x.\n", __func__, pcids));
ret = QLA_FUNCTION_FAILED;
break;
}
/* Read version */
code_type = bcode[0x14];
switch (code_type) {
case ROM_CODE_TYPE_BIOS:
/* Intel x86, PC-AT compatible. */
ha->bios_revision[0] = bcode[0x12];
ha->bios_revision[1] = bcode[0x13];
DEBUG3(printk("%s(): read BIOS %d.%d.\n", __func__,
ha->bios_revision[1], ha->bios_revision[0]));
break;
case ROM_CODE_TYPE_FCODE:
/* Open Firmware standard for PCI (FCode). */
ha->fcode_revision[0] = bcode[0x12];
ha->fcode_revision[1] = bcode[0x13];
DEBUG3(printk("%s(): read FCODE %d.%d.\n", __func__,
ha->fcode_revision[1], ha->fcode_revision[0]));
break;
case ROM_CODE_TYPE_EFI:
/* Extensible Firmware Interface (EFI). */
ha->efi_revision[0] = bcode[0x12];
ha->efi_revision[1] = bcode[0x13];
DEBUG3(printk("%s(): read EFI %d.%d.\n", __func__,
ha->efi_revision[1], ha->efi_revision[0]));
break;
default:
DEBUG2(printk("%s(): Unrecognized code type %x at "
"pcids %x.\n", __func__, code_type, pcids));
break;
}
last_image = bcode[0x15] & BIT_7;
/* Locate next PCI expansion ROM. */
pcihdr += ((bcode[0x11] << 8) | bcode[0x10]) * 512;
} while (!last_image);
/* Read firmware image information. */
memset(ha->fw_revision, 0, sizeof(ha->fw_revision));
dcode = mbuf;
qla24xx_read_flash_data(ha, dcode, FA_RISC_CODE_ADDR + 4, 4);
for (i = 0; i < 4; i++)
dcode[i] = be32_to_cpu(dcode[i]);
if ((dcode[0] == 0xffffffff && dcode[1] == 0xffffffff &&
dcode[2] == 0xffffffff && dcode[3] == 0xffffffff) ||
(dcode[0] == 0 && dcode[1] == 0 && dcode[2] == 0 &&
dcode[3] == 0)) {
DEBUG2(printk("%s(): Unrecognized fw version at %x.\n",
__func__, FA_RISC_CODE_ADDR));
} else {
ha->fw_revision[0] = dcode[0];
ha->fw_revision[1] = dcode[1];
ha->fw_revision[2] = dcode[2];
ha->fw_revision[3] = dcode[3];
}
return ret;
}

View file

@ -7,7 +7,7 @@
/* /*
* Driver version * Driver version
*/ */
#define QLA2XXX_VERSION "8.01.07-k4" #define QLA2XXX_VERSION "8.01.07-k5"
#define QLA_DRIVER_MAJOR_VER 8 #define QLA_DRIVER_MAJOR_VER 8
#define QLA_DRIVER_MINOR_VER 1 #define QLA_DRIVER_MINOR_VER 1

View file

@ -672,27 +672,6 @@ void __scsi_done(struct scsi_cmnd *cmd)
blk_complete_request(rq); blk_complete_request(rq);
} }
/*
* Function: scsi_retry_command
*
* Purpose: Send a command back to the low level to be retried.
*
* Notes: This command is always executed in the context of the
* bottom half handler, or the error handler thread. Low
* level drivers should not become re-entrant as a result of
* this.
*/
int scsi_retry_command(struct scsi_cmnd *cmd)
{
/*
* Zero the sense information from the last time we tried
* this command.
*/
memset(cmd->sense_buffer, 0, sizeof(cmd->sense_buffer));
return scsi_queue_insert(cmd, SCSI_MLQUEUE_EH_RETRY);
}
/* /*
* Function: scsi_finish_command * Function: scsi_finish_command
* *

View file

@ -51,10 +51,10 @@
#include "scsi_logging.h" #include "scsi_logging.h"
#include "scsi_debug.h" #include "scsi_debug.h"
#define SCSI_DEBUG_VERSION "1.80" #define SCSI_DEBUG_VERSION "1.81"
static const char * scsi_debug_version_date = "20061018"; static const char * scsi_debug_version_date = "20070104";
/* Additional Sense Code (ASC) used */ /* Additional Sense Code (ASC) */
#define NO_ADDITIONAL_SENSE 0x0 #define NO_ADDITIONAL_SENSE 0x0
#define LOGICAL_UNIT_NOT_READY 0x4 #define LOGICAL_UNIT_NOT_READY 0x4
#define UNRECOVERED_READ_ERR 0x11 #define UNRECOVERED_READ_ERR 0x11
@ -65,9 +65,13 @@ static const char * scsi_debug_version_date = "20061018";
#define INVALID_FIELD_IN_PARAM_LIST 0x26 #define INVALID_FIELD_IN_PARAM_LIST 0x26
#define POWERON_RESET 0x29 #define POWERON_RESET 0x29
#define SAVING_PARAMS_UNSUP 0x39 #define SAVING_PARAMS_UNSUP 0x39
#define TRANSPORT_PROBLEM 0x4b
#define THRESHOLD_EXCEEDED 0x5d #define THRESHOLD_EXCEEDED 0x5d
#define LOW_POWER_COND_ON 0x5e #define LOW_POWER_COND_ON 0x5e
/* Additional Sense Code Qualifier (ASCQ) */
#define ACK_NAK_TO 0x3
#define SDEBUG_TAGGED_QUEUING 0 /* 0 | MSG_SIMPLE_TAG | MSG_ORDERED_TAG */ #define SDEBUG_TAGGED_QUEUING 0 /* 0 | MSG_SIMPLE_TAG | MSG_ORDERED_TAG */
/* Default values for driver parameters */ /* Default values for driver parameters */
@ -95,15 +99,20 @@ static const char * scsi_debug_version_date = "20061018";
#define SCSI_DEBUG_OPT_MEDIUM_ERR 2 #define SCSI_DEBUG_OPT_MEDIUM_ERR 2
#define SCSI_DEBUG_OPT_TIMEOUT 4 #define SCSI_DEBUG_OPT_TIMEOUT 4
#define SCSI_DEBUG_OPT_RECOVERED_ERR 8 #define SCSI_DEBUG_OPT_RECOVERED_ERR 8
#define SCSI_DEBUG_OPT_TRANSPORT_ERR 16
/* When "every_nth" > 0 then modulo "every_nth" commands: /* When "every_nth" > 0 then modulo "every_nth" commands:
* - a no response is simulated if SCSI_DEBUG_OPT_TIMEOUT is set * - a no response is simulated if SCSI_DEBUG_OPT_TIMEOUT is set
* - a RECOVERED_ERROR is simulated on successful read and write * - a RECOVERED_ERROR is simulated on successful read and write
* commands if SCSI_DEBUG_OPT_RECOVERED_ERR is set. * commands if SCSI_DEBUG_OPT_RECOVERED_ERR is set.
* - a TRANSPORT_ERROR is simulated on successful read and write
* commands if SCSI_DEBUG_OPT_TRANSPORT_ERR is set.
* *
* When "every_nth" < 0 then after "- every_nth" commands: * When "every_nth" < 0 then after "- every_nth" commands:
* - a no response is simulated if SCSI_DEBUG_OPT_TIMEOUT is set * - a no response is simulated if SCSI_DEBUG_OPT_TIMEOUT is set
* - a RECOVERED_ERROR is simulated on successful read and write * - a RECOVERED_ERROR is simulated on successful read and write
* commands if SCSI_DEBUG_OPT_RECOVERED_ERR is set. * commands if SCSI_DEBUG_OPT_RECOVERED_ERR is set.
* - a TRANSPORT_ERROR is simulated on successful read and write
* commands if SCSI_DEBUG_OPT_TRANSPORT_ERR is set.
* This will continue until some other action occurs (e.g. the user * This will continue until some other action occurs (e.g. the user
* writing a new value (other than -1 or 1) to every_nth via sysfs). * writing a new value (other than -1 or 1) to every_nth via sysfs).
*/ */
@ -315,6 +324,7 @@ int scsi_debug_queuecommand(struct scsi_cmnd * SCpnt, done_funct_t done)
int target = SCpnt->device->id; int target = SCpnt->device->id;
struct sdebug_dev_info * devip = NULL; struct sdebug_dev_info * devip = NULL;
int inj_recovered = 0; int inj_recovered = 0;
int inj_transport = 0;
int delay_override = 0; int delay_override = 0;
if (done == NULL) if (done == NULL)
@ -352,6 +362,8 @@ int scsi_debug_queuecommand(struct scsi_cmnd * SCpnt, done_funct_t done)
return 0; /* ignore command causing timeout */ return 0; /* ignore command causing timeout */
else if (SCSI_DEBUG_OPT_RECOVERED_ERR & scsi_debug_opts) else if (SCSI_DEBUG_OPT_RECOVERED_ERR & scsi_debug_opts)
inj_recovered = 1; /* to reads and writes below */ inj_recovered = 1; /* to reads and writes below */
else if (SCSI_DEBUG_OPT_TRANSPORT_ERR & scsi_debug_opts)
inj_transport = 1; /* to reads and writes below */
} }
if (devip->wlun) { if (devip->wlun) {
@ -468,7 +480,11 @@ int scsi_debug_queuecommand(struct scsi_cmnd * SCpnt, done_funct_t done)
mk_sense_buffer(devip, RECOVERED_ERROR, mk_sense_buffer(devip, RECOVERED_ERROR,
THRESHOLD_EXCEEDED, 0); THRESHOLD_EXCEEDED, 0);
errsts = check_condition_result; errsts = check_condition_result;
} } else if (inj_transport && (0 == errsts)) {
mk_sense_buffer(devip, ABORTED_COMMAND,
TRANSPORT_PROBLEM, ACK_NAK_TO);
errsts = check_condition_result;
}
break; break;
case REPORT_LUNS: /* mandatory, ignore unit attention */ case REPORT_LUNS: /* mandatory, ignore unit attention */
delay_override = 1; delay_override = 1;
@ -531,6 +547,9 @@ int scsi_debug_queuecommand(struct scsi_cmnd * SCpnt, done_funct_t done)
delay_override = 1; delay_override = 1;
errsts = check_readiness(SCpnt, 0, devip); errsts = check_readiness(SCpnt, 0, devip);
break; break;
case WRITE_BUFFER:
errsts = check_readiness(SCpnt, 1, devip);
break;
default: default:
if (SCSI_DEBUG_OPT_NOISE & scsi_debug_opts) if (SCSI_DEBUG_OPT_NOISE & scsi_debug_opts)
printk(KERN_INFO "scsi_debug: Opcode: 0x%x not " printk(KERN_INFO "scsi_debug: Opcode: 0x%x not "
@ -954,7 +973,9 @@ static int resp_inquiry(struct scsi_cmnd * scp, int target,
int alloc_len, n, ret; int alloc_len, n, ret;
alloc_len = (cmd[3] << 8) + cmd[4]; alloc_len = (cmd[3] << 8) + cmd[4];
arr = kzalloc(SDEBUG_MAX_INQ_ARR_SZ, GFP_KERNEL); arr = kzalloc(SDEBUG_MAX_INQ_ARR_SZ, GFP_ATOMIC);
if (! arr)
return DID_REQUEUE << 16;
if (devip->wlun) if (devip->wlun)
pq_pdt = 0x1e; /* present, wlun */ pq_pdt = 0x1e; /* present, wlun */
else if (scsi_debug_no_lun_0 && (0 == devip->lun)) else if (scsi_debug_no_lun_0 && (0 == devip->lun))
@ -1217,7 +1238,9 @@ static int resp_report_tgtpgs(struct scsi_cmnd * scp,
alen = ((cmd[6] << 24) + (cmd[7] << 16) + (cmd[8] << 8) alen = ((cmd[6] << 24) + (cmd[7] << 16) + (cmd[8] << 8)
+ cmd[9]); + cmd[9]);
arr = kzalloc(SDEBUG_MAX_TGTPGS_ARR_SZ, GFP_KERNEL); arr = kzalloc(SDEBUG_MAX_TGTPGS_ARR_SZ, GFP_ATOMIC);
if (! arr)
return DID_REQUEUE << 16;
/* /*
* EVPD page 0x88 states we have two ports, one * EVPD page 0x88 states we have two ports, one
* real and a fake port with no device connected. * real and a fake port with no device connected.
@ -1996,6 +2019,8 @@ static int scsi_debug_slave_configure(struct scsi_device * sdp)
if (sdp->host->max_cmd_len != SCSI_DEBUG_MAX_CMD_LEN) if (sdp->host->max_cmd_len != SCSI_DEBUG_MAX_CMD_LEN)
sdp->host->max_cmd_len = SCSI_DEBUG_MAX_CMD_LEN; sdp->host->max_cmd_len = SCSI_DEBUG_MAX_CMD_LEN;
devip = devInfoReg(sdp); devip = devInfoReg(sdp);
if (NULL == devip)
return 1; /* no resources, will be marked offline */
sdp->hostdata = devip; sdp->hostdata = devip;
if (sdp->host->cmd_per_lun) if (sdp->host->cmd_per_lun)
scsi_adjust_queue_depth(sdp, SDEBUG_TAGGED_QUEUING, scsi_adjust_queue_depth(sdp, SDEBUG_TAGGED_QUEUING,
@ -2044,7 +2069,7 @@ static struct sdebug_dev_info * devInfoReg(struct scsi_device * sdev)
} }
} }
if (NULL == open_devip) { /* try and make a new one */ if (NULL == open_devip) { /* try and make a new one */
open_devip = kzalloc(sizeof(*open_devip),GFP_KERNEL); open_devip = kzalloc(sizeof(*open_devip),GFP_ATOMIC);
if (NULL == open_devip) { if (NULL == open_devip) {
printk(KERN_ERR "%s: out of memory at line %d\n", printk(KERN_ERR "%s: out of memory at line %d\n",
__FUNCTION__, __LINE__); __FUNCTION__, __LINE__);
@ -2388,7 +2413,7 @@ MODULE_PARM_DESC(max_luns, "number of LUNs per target to simulate(def=1)");
MODULE_PARM_DESC(no_lun_0, "no LU number 0 (def=0 -> have lun 0)"); MODULE_PARM_DESC(no_lun_0, "no LU number 0 (def=0 -> have lun 0)");
MODULE_PARM_DESC(num_parts, "number of partitions(def=0)"); MODULE_PARM_DESC(num_parts, "number of partitions(def=0)");
MODULE_PARM_DESC(num_tgts, "number of targets per host to simulate(def=1)"); MODULE_PARM_DESC(num_tgts, "number of targets per host to simulate(def=1)");
MODULE_PARM_DESC(opts, "1->noise, 2->medium_error, 4->... (def=0)"); MODULE_PARM_DESC(opts, "1->noise, 2->medium_err, 4->timeout, 8->recovered_err... (def=0)");
MODULE_PARM_DESC(ptype, "SCSI peripheral type(def=0[disk])"); MODULE_PARM_DESC(ptype, "SCSI peripheral type(def=0[disk])");
MODULE_PARM_DESC(scsi_level, "SCSI level to simulate(def=5[SPC-3])"); MODULE_PARM_DESC(scsi_level, "SCSI level to simulate(def=5[SPC-3])");
MODULE_PARM_DESC(virtual_gb, "virtual gigabyte size (def=0 -> use dev_size_mb)"); MODULE_PARM_DESC(virtual_gb, "virtual gigabyte size (def=0 -> use dev_size_mb)");
@ -2943,7 +2968,6 @@ static int sdebug_add_adapter(void)
struct list_head *lh, *lh_sf; struct list_head *lh, *lh_sf;
sdbg_host = kzalloc(sizeof(*sdbg_host),GFP_KERNEL); sdbg_host = kzalloc(sizeof(*sdbg_host),GFP_KERNEL);
if (NULL == sdbg_host) { if (NULL == sdbg_host) {
printk(KERN_ERR "%s: out of memory at line %d\n", printk(KERN_ERR "%s: out of memory at line %d\n",
__FUNCTION__, __LINE__); __FUNCTION__, __LINE__);

View file

@ -359,6 +359,11 @@ static int scsi_check_sense(struct scsi_cmnd *scmd)
return SUCCESS; return SUCCESS;
case MEDIUM_ERROR: case MEDIUM_ERROR:
if (sshdr.asc == 0x11 || /* UNRECOVERED READ ERR */
sshdr.asc == 0x13 || /* AMNF DATA FIELD */
sshdr.asc == 0x14) { /* RECORD NOT FOUND */
return SUCCESS;
}
return NEEDS_RETRY; return NEEDS_RETRY;
case HARDWARE_ERROR: case HARDWARE_ERROR:
@ -452,6 +457,128 @@ static void scsi_eh_done(struct scsi_cmnd *scmd)
complete(eh_action); complete(eh_action);
} }
/**
* scsi_try_host_reset - ask host adapter to reset itself
* @scmd: SCSI cmd to send hsot reset.
**/
static int scsi_try_host_reset(struct scsi_cmnd *scmd)
{
unsigned long flags;
int rtn;
SCSI_LOG_ERROR_RECOVERY(3, printk("%s: Snd Host RST\n",
__FUNCTION__));
if (!scmd->device->host->hostt->eh_host_reset_handler)
return FAILED;
rtn = scmd->device->host->hostt->eh_host_reset_handler(scmd);
if (rtn == SUCCESS) {
if (!scmd->device->host->hostt->skip_settle_delay)
ssleep(HOST_RESET_SETTLE_TIME);
spin_lock_irqsave(scmd->device->host->host_lock, flags);
scsi_report_bus_reset(scmd->device->host,
scmd_channel(scmd));
spin_unlock_irqrestore(scmd->device->host->host_lock, flags);
}
return rtn;
}
/**
* scsi_try_bus_reset - ask host to perform a bus reset
* @scmd: SCSI cmd to send bus reset.
**/
static int scsi_try_bus_reset(struct scsi_cmnd *scmd)
{
unsigned long flags;
int rtn;
SCSI_LOG_ERROR_RECOVERY(3, printk("%s: Snd Bus RST\n",
__FUNCTION__));
if (!scmd->device->host->hostt->eh_bus_reset_handler)
return FAILED;
rtn = scmd->device->host->hostt->eh_bus_reset_handler(scmd);
if (rtn == SUCCESS) {
if (!scmd->device->host->hostt->skip_settle_delay)
ssleep(BUS_RESET_SETTLE_TIME);
spin_lock_irqsave(scmd->device->host->host_lock, flags);
scsi_report_bus_reset(scmd->device->host,
scmd_channel(scmd));
spin_unlock_irqrestore(scmd->device->host->host_lock, flags);
}
return rtn;
}
/**
* scsi_try_bus_device_reset - Ask host to perform a BDR on a dev
* @scmd: SCSI cmd used to send BDR
*
* Notes:
* There is no timeout for this operation. if this operation is
* unreliable for a given host, then the host itself needs to put a
* timer on it, and set the host back to a consistent state prior to
* returning.
**/
static int scsi_try_bus_device_reset(struct scsi_cmnd *scmd)
{
int rtn;
if (!scmd->device->host->hostt->eh_device_reset_handler)
return FAILED;
rtn = scmd->device->host->hostt->eh_device_reset_handler(scmd);
if (rtn == SUCCESS) {
scmd->device->was_reset = 1;
scmd->device->expecting_cc_ua = 1;
}
return rtn;
}
static int __scsi_try_to_abort_cmd(struct scsi_cmnd *scmd)
{
if (!scmd->device->host->hostt->eh_abort_handler)
return FAILED;
return scmd->device->host->hostt->eh_abort_handler(scmd);
}
/**
* scsi_try_to_abort_cmd - Ask host to abort a running command.
* @scmd: SCSI cmd to abort from Lower Level.
*
* Notes:
* This function will not return until the user's completion function
* has been called. there is no timeout on this operation. if the
* author of the low-level driver wishes this operation to be timed,
* they can provide this facility themselves. helper functions in
* scsi_error.c can be supplied to make this easier to do.
**/
static int scsi_try_to_abort_cmd(struct scsi_cmnd *scmd)
{
/*
* scsi_done was called just after the command timed out and before
* we had a chance to process it. (db)
*/
if (scmd->serial_number == 0)
return SUCCESS;
return __scsi_try_to_abort_cmd(scmd);
}
static void scsi_abort_eh_cmnd(struct scsi_cmnd *scmd)
{
if (__scsi_try_to_abort_cmd(scmd) != SUCCESS)
if (scsi_try_bus_device_reset(scmd) != SUCCESS)
if (scsi_try_bus_reset(scmd) != SUCCESS)
scsi_try_host_reset(scmd);
}
/** /**
* scsi_send_eh_cmnd - submit a scsi command as part of error recory * scsi_send_eh_cmnd - submit a scsi command as part of error recory
* @scmd: SCSI command structure to hijack * @scmd: SCSI command structure to hijack
@ -579,13 +706,7 @@ static int scsi_send_eh_cmnd(struct scsi_cmnd *scmd, unsigned char *cmnd,
break; break;
} }
} else { } else {
/* scsi_abort_eh_cmnd(scmd);
* FIXME(eric) - we are not tracking whether we could
* abort a timed out command or not. not sure how
* we should treat them differently anyways.
*/
if (shost->hostt->eh_abort_handler)
shost->hostt->eh_abort_handler(scmd);
rtn = FAILED; rtn = FAILED;
} }
@ -672,8 +793,8 @@ EXPORT_SYMBOL(scsi_eh_finish_cmd);
* XXX: Long term this code should go away, but that needs an audit of * XXX: Long term this code should go away, but that needs an audit of
* all LLDDs first. * all LLDDs first.
**/ **/
static int scsi_eh_get_sense(struct list_head *work_q, int scsi_eh_get_sense(struct list_head *work_q,
struct list_head *done_q) struct list_head *done_q)
{ {
struct scsi_cmnd *scmd, *next; struct scsi_cmnd *scmd, *next;
int rtn; int rtn;
@ -715,31 +836,7 @@ static int scsi_eh_get_sense(struct list_head *work_q,
return list_empty(work_q); return list_empty(work_q);
} }
EXPORT_SYMBOL_GPL(scsi_eh_get_sense);
/**
* scsi_try_to_abort_cmd - Ask host to abort a running command.
* @scmd: SCSI cmd to abort from Lower Level.
*
* Notes:
* This function will not return until the user's completion function
* has been called. there is no timeout on this operation. if the
* author of the low-level driver wishes this operation to be timed,
* they can provide this facility themselves. helper functions in
* scsi_error.c can be supplied to make this easier to do.
**/
static int scsi_try_to_abort_cmd(struct scsi_cmnd *scmd)
{
if (!scmd->device->host->hostt->eh_abort_handler)
return FAILED;
/*
* scsi_done was called just after the command timed out and before
* we had a chance to process it. (db)
*/
if (scmd->serial_number == 0)
return SUCCESS;
return scmd->device->host->hostt->eh_abort_handler(scmd);
}
/** /**
* scsi_eh_tur - Send TUR to device. * scsi_eh_tur - Send TUR to device.
@ -814,32 +911,6 @@ static int scsi_eh_abort_cmds(struct list_head *work_q,
return list_empty(work_q); return list_empty(work_q);
} }
/**
* scsi_try_bus_device_reset - Ask host to perform a BDR on a dev
* @scmd: SCSI cmd used to send BDR
*
* Notes:
* There is no timeout for this operation. if this operation is
* unreliable for a given host, then the host itself needs to put a
* timer on it, and set the host back to a consistent state prior to
* returning.
**/
static int scsi_try_bus_device_reset(struct scsi_cmnd *scmd)
{
int rtn;
if (!scmd->device->host->hostt->eh_device_reset_handler)
return FAILED;
rtn = scmd->device->host->hostt->eh_device_reset_handler(scmd);
if (rtn == SUCCESS) {
scmd->device->was_reset = 1;
scmd->device->expecting_cc_ua = 1;
}
return rtn;
}
/** /**
* scsi_eh_try_stu - Send START_UNIT to device. * scsi_eh_try_stu - Send START_UNIT to device.
* @scmd: Scsi cmd to send START_UNIT * @scmd: Scsi cmd to send START_UNIT
@ -970,64 +1041,6 @@ static int scsi_eh_bus_device_reset(struct Scsi_Host *shost,
return list_empty(work_q); return list_empty(work_q);
} }
/**
* scsi_try_bus_reset - ask host to perform a bus reset
* @scmd: SCSI cmd to send bus reset.
**/
static int scsi_try_bus_reset(struct scsi_cmnd *scmd)
{
unsigned long flags;
int rtn;
SCSI_LOG_ERROR_RECOVERY(3, printk("%s: Snd Bus RST\n",
__FUNCTION__));
if (!scmd->device->host->hostt->eh_bus_reset_handler)
return FAILED;
rtn = scmd->device->host->hostt->eh_bus_reset_handler(scmd);
if (rtn == SUCCESS) {
if (!scmd->device->host->hostt->skip_settle_delay)
ssleep(BUS_RESET_SETTLE_TIME);
spin_lock_irqsave(scmd->device->host->host_lock, flags);
scsi_report_bus_reset(scmd->device->host,
scmd_channel(scmd));
spin_unlock_irqrestore(scmd->device->host->host_lock, flags);
}
return rtn;
}
/**
* scsi_try_host_reset - ask host adapter to reset itself
* @scmd: SCSI cmd to send hsot reset.
**/
static int scsi_try_host_reset(struct scsi_cmnd *scmd)
{
unsigned long flags;
int rtn;
SCSI_LOG_ERROR_RECOVERY(3, printk("%s: Snd Host RST\n",
__FUNCTION__));
if (!scmd->device->host->hostt->eh_host_reset_handler)
return FAILED;
rtn = scmd->device->host->hostt->eh_host_reset_handler(scmd);
if (rtn == SUCCESS) {
if (!scmd->device->host->hostt->skip_settle_delay)
ssleep(HOST_RESET_SETTLE_TIME);
spin_lock_irqsave(scmd->device->host->host_lock, flags);
scsi_report_bus_reset(scmd->device->host,
scmd_channel(scmd));
spin_unlock_irqrestore(scmd->device->host->host_lock, flags);
}
return rtn;
}
/** /**
* scsi_eh_bus_reset - send a bus reset * scsi_eh_bus_reset - send a bus reset
* @shost: scsi host being recovered. * @shost: scsi host being recovered.
@ -1411,9 +1424,9 @@ static void scsi_restart_operations(struct Scsi_Host *shost)
* @eh_done_q: list_head for processed commands. * @eh_done_q: list_head for processed commands.
* *
**/ **/
static void scsi_eh_ready_devs(struct Scsi_Host *shost, void scsi_eh_ready_devs(struct Scsi_Host *shost,
struct list_head *work_q, struct list_head *work_q,
struct list_head *done_q) struct list_head *done_q)
{ {
if (!scsi_eh_stu(shost, work_q, done_q)) if (!scsi_eh_stu(shost, work_q, done_q))
if (!scsi_eh_bus_device_reset(shost, work_q, done_q)) if (!scsi_eh_bus_device_reset(shost, work_q, done_q))
@ -1421,6 +1434,7 @@ static void scsi_eh_ready_devs(struct Scsi_Host *shost,
if (!scsi_eh_host_reset(work_q, done_q)) if (!scsi_eh_host_reset(work_q, done_q))
scsi_eh_offline_sdevs(work_q, done_q); scsi_eh_offline_sdevs(work_q, done_q);
} }
EXPORT_SYMBOL_GPL(scsi_eh_ready_devs);
/** /**
* scsi_eh_flush_done_q - finish processed commands or retry them. * scsi_eh_flush_done_q - finish processed commands or retry them.

View file

@ -1399,7 +1399,7 @@ static void scsi_softirq_done(struct request *rq)
scsi_finish_command(cmd); scsi_finish_command(cmd);
break; break;
case NEEDS_RETRY: case NEEDS_RETRY:
scsi_retry_command(cmd); scsi_queue_insert(cmd, SCSI_MLQUEUE_EH_RETRY);
break; break;
case ADD_TO_MLQUEUE: case ADD_TO_MLQUEUE:
scsi_queue_insert(cmd, SCSI_MLQUEUE_DEVICE_BUSY); scsi_queue_insert(cmd, SCSI_MLQUEUE_DEVICE_BUSY);
@ -2249,6 +2249,8 @@ void *scsi_kmap_atomic_sg(struct scatterlist *sg, int sg_count,
size_t sg_len = 0, len_complete = 0; size_t sg_len = 0, len_complete = 0;
struct page *page; struct page *page;
WARN_ON(!irqs_disabled());
for (i = 0; i < sg_count; i++) { for (i = 0; i < sg_count; i++) {
len_complete = sg_len; /* Complete sg-entries */ len_complete = sg_len; /* Complete sg-entries */
sg_len += sg[i].length; sg_len += sg[i].length;

View file

@ -28,7 +28,6 @@ extern int scsi_dispatch_cmd(struct scsi_cmnd *cmd);
extern int scsi_setup_command_freelist(struct Scsi_Host *shost); extern int scsi_setup_command_freelist(struct Scsi_Host *shost);
extern void scsi_destroy_command_freelist(struct Scsi_Host *shost); extern void scsi_destroy_command_freelist(struct Scsi_Host *shost);
extern void __scsi_done(struct scsi_cmnd *cmd); extern void __scsi_done(struct scsi_cmnd *cmd);
extern int scsi_retry_command(struct scsi_cmnd *cmd);
#ifdef CONFIG_SCSI_LOGGING #ifdef CONFIG_SCSI_LOGGING
void scsi_log_send(struct scsi_cmnd *cmd); void scsi_log_send(struct scsi_cmnd *cmd);
void scsi_log_completion(struct scsi_cmnd *cmd, int disposition); void scsi_log_completion(struct scsi_cmnd *cmd, int disposition);
@ -58,6 +57,11 @@ extern int scsi_error_handler(void *host);
extern int scsi_decide_disposition(struct scsi_cmnd *cmd); extern int scsi_decide_disposition(struct scsi_cmnd *cmd);
extern void scsi_eh_wakeup(struct Scsi_Host *shost); extern void scsi_eh_wakeup(struct Scsi_Host *shost);
extern int scsi_eh_scmd_add(struct scsi_cmnd *, int); extern int scsi_eh_scmd_add(struct scsi_cmnd *, int);
void scsi_eh_ready_devs(struct Scsi_Host *shost,
struct list_head *work_q,
struct list_head *done_q);
int scsi_eh_get_sense(struct list_head *work_q,
struct list_head *done_q);
/* scsi_lib.c */ /* scsi_lib.c */
extern int scsi_maybe_unblock_host(struct scsi_device *sdev); extern int scsi_maybe_unblock_host(struct scsi_device *sdev);

View file

@ -1029,7 +1029,7 @@ static int scsi_probe_and_add_lun(struct scsi_target *starget,
sdev_printk(KERN_INFO, sdev, sdev_printk(KERN_INFO, sdev,
"scsi scan: consider passing scsi_mod." "scsi scan: consider passing scsi_mod."
"dev_flags=%s:%s:0x240 or 0x800240\n", "dev_flags=%s:%s:0x240 or 0x1000240\n",
scsi_inq_str(vend, result, 8, 16), scsi_inq_str(vend, result, 8, 16),
scsi_inq_str(mod, result, 16, 32)); scsi_inq_str(mod, result, 16, 32));
}); });

View file

@ -336,6 +336,51 @@ show_sas_device_type(struct class_device *cdev, char *buf)
} }
static CLASS_DEVICE_ATTR(device_type, S_IRUGO, show_sas_device_type, NULL); static CLASS_DEVICE_ATTR(device_type, S_IRUGO, show_sas_device_type, NULL);
static ssize_t do_sas_phy_enable(struct class_device *cdev,
size_t count, int enable)
{
struct sas_phy *phy = transport_class_to_phy(cdev);
struct Scsi_Host *shost = dev_to_shost(phy->dev.parent);
struct sas_internal *i = to_sas_internal(shost->transportt);
int error;
error = i->f->phy_enable(phy, enable);
if (error)
return error;
phy->enabled = enable;
return count;
};
static ssize_t store_sas_phy_enable(struct class_device *cdev,
const char *buf, size_t count)
{
if (count < 1)
return -EINVAL;
switch (buf[0]) {
case '0':
do_sas_phy_enable(cdev, count, 0);
break;
case '1':
do_sas_phy_enable(cdev, count, 1);
break;
default:
return -EINVAL;
}
return count;
}
static ssize_t show_sas_phy_enable(struct class_device *cdev, char *buf)
{
struct sas_phy *phy = transport_class_to_phy(cdev);
return snprintf(buf, 20, "%d", phy->enabled);
}
static CLASS_DEVICE_ATTR(enable, S_IRUGO | S_IWUSR, show_sas_phy_enable,
store_sas_phy_enable);
static ssize_t do_sas_phy_reset(struct class_device *cdev, static ssize_t do_sas_phy_reset(struct class_device *cdev,
size_t count, int hard_reset) size_t count, int hard_reset)
{ {
@ -435,6 +480,7 @@ struct sas_phy *sas_phy_alloc(struct device *parent, int number)
return NULL; return NULL;
phy->number = number; phy->number = number;
phy->enabled = 1;
device_initialize(&phy->dev); device_initialize(&phy->dev);
phy->dev.parent = get_device(parent); phy->dev.parent = get_device(parent);
@ -579,8 +625,19 @@ static void sas_port_release(struct device *dev)
static void sas_port_create_link(struct sas_port *port, static void sas_port_create_link(struct sas_port *port,
struct sas_phy *phy) struct sas_phy *phy)
{ {
sysfs_create_link(&port->dev.kobj, &phy->dev.kobj, phy->dev.bus_id); int res;
sysfs_create_link(&phy->dev.kobj, &port->dev.kobj, "port");
res = sysfs_create_link(&port->dev.kobj, &phy->dev.kobj,
phy->dev.bus_id);
if (res)
goto err;
res = sysfs_create_link(&phy->dev.kobj, &port->dev.kobj, "port");
if (res)
goto err;
return;
err:
printk(KERN_ERR "%s: Cannot create port links, err=%d\n",
__FUNCTION__, res);
} }
static void sas_port_delete_link(struct sas_port *port, static void sas_port_delete_link(struct sas_port *port,
@ -818,13 +875,20 @@ EXPORT_SYMBOL(sas_port_delete_phy);
void sas_port_mark_backlink(struct sas_port *port) void sas_port_mark_backlink(struct sas_port *port)
{ {
int res;
struct device *parent = port->dev.parent->parent->parent; struct device *parent = port->dev.parent->parent->parent;
if (port->is_backlink) if (port->is_backlink)
return; return;
port->is_backlink = 1; port->is_backlink = 1;
sysfs_create_link(&port->dev.kobj, &parent->kobj, res = sysfs_create_link(&port->dev.kobj, &parent->kobj,
parent->bus_id); parent->bus_id);
if (res)
goto err;
return;
err:
printk(KERN_ERR "%s: Cannot create port backlink, err=%d\n",
__FUNCTION__, res);
} }
EXPORT_SYMBOL(sas_port_mark_backlink); EXPORT_SYMBOL(sas_port_mark_backlink);
@ -1237,7 +1301,7 @@ int sas_rphy_add(struct sas_rphy *rphy)
if (identify->device_type == SAS_END_DEVICE && if (identify->device_type == SAS_END_DEVICE &&
rphy->scsi_target_id != -1) { rphy->scsi_target_id != -1) {
scsi_scan_target(&rphy->dev, 0, scsi_scan_target(&rphy->dev, 0,
rphy->scsi_target_id, ~0, 0); rphy->scsi_target_id, SCAN_WILD_CARD, 0);
} }
return 0; return 0;
@ -1253,7 +1317,7 @@ EXPORT_SYMBOL(sas_rphy_add);
* Note: * Note:
* This function must only be called on a remote * This function must only be called on a remote
* PHY that has not sucessfully been added using * PHY that has not sucessfully been added using
* sas_rphy_add(). * sas_rphy_add() (or has been sas_rphy_remove()'d)
*/ */
void sas_rphy_free(struct sas_rphy *rphy) void sas_rphy_free(struct sas_rphy *rphy)
{ {
@ -1272,18 +1336,30 @@ void sas_rphy_free(struct sas_rphy *rphy)
EXPORT_SYMBOL(sas_rphy_free); EXPORT_SYMBOL(sas_rphy_free);
/** /**
* sas_rphy_delete -- remove SAS remote PHY * sas_rphy_delete -- remove and free SAS remote PHY
* @rphy: SAS remote PHY to remove * @rphy: SAS remote PHY to remove and free
* *
* Removes the specified SAS remote PHY. * Removes the specified SAS remote PHY and frees it.
*/ */
void void
sas_rphy_delete(struct sas_rphy *rphy) sas_rphy_delete(struct sas_rphy *rphy)
{
sas_rphy_remove(rphy);
sas_rphy_free(rphy);
}
EXPORT_SYMBOL(sas_rphy_delete);
/**
* sas_rphy_remove -- remove SAS remote PHY
* @rphy: SAS remote phy to remove
*
* Removes the specified SAS remote PHY.
*/
void
sas_rphy_remove(struct sas_rphy *rphy)
{ {
struct device *dev = &rphy->dev; struct device *dev = &rphy->dev;
struct sas_port *parent = dev_to_sas_port(dev->parent); struct sas_port *parent = dev_to_sas_port(dev->parent);
struct Scsi_Host *shost = dev_to_shost(parent->dev.parent);
struct sas_host_attrs *sas_host = to_sas_host_attrs(shost);
switch (rphy->identify.device_type) { switch (rphy->identify.device_type) {
case SAS_END_DEVICE: case SAS_END_DEVICE:
@ -1299,17 +1375,10 @@ sas_rphy_delete(struct sas_rphy *rphy)
transport_remove_device(dev); transport_remove_device(dev);
device_del(dev); device_del(dev);
transport_destroy_device(dev);
mutex_lock(&sas_host->lock);
list_del(&rphy->list);
mutex_unlock(&sas_host->lock);
parent->rphy = NULL; parent->rphy = NULL;
put_device(dev);
} }
EXPORT_SYMBOL(sas_rphy_delete); EXPORT_SYMBOL(sas_rphy_remove);
/** /**
* scsi_is_sas_rphy -- check if a struct device represents a SAS remote PHY * scsi_is_sas_rphy -- check if a struct device represents a SAS remote PHY
@ -1389,6 +1458,10 @@ static int sas_user_scan(struct Scsi_Host *shost, uint channel,
SETUP_TEMPLATE_RW(phy_attrs, field, S_IRUGO | S_IWUSR, 1, \ SETUP_TEMPLATE_RW(phy_attrs, field, S_IRUGO | S_IWUSR, 1, \
!i->f->set_phy_speed, S_IRUGO) !i->f->set_phy_speed, S_IRUGO)
#define SETUP_OPTIONAL_PHY_ATTRIBUTE_RW(field, func) \
SETUP_TEMPLATE_RW(phy_attrs, field, S_IRUGO | S_IWUSR, 1, \
!i->f->func, S_IRUGO)
#define SETUP_PORT_ATTRIBUTE(field) \ #define SETUP_PORT_ATTRIBUTE(field) \
SETUP_TEMPLATE(port_attrs, field, S_IRUGO, 1) SETUP_TEMPLATE(port_attrs, field, S_IRUGO, 1)
@ -1396,10 +1469,10 @@ static int sas_user_scan(struct Scsi_Host *shost, uint channel,
SETUP_TEMPLATE(phy_attrs, field, S_IRUGO, i->f->func) SETUP_TEMPLATE(phy_attrs, field, S_IRUGO, i->f->func)
#define SETUP_PHY_ATTRIBUTE_WRONLY(field) \ #define SETUP_PHY_ATTRIBUTE_WRONLY(field) \
SETUP_TEMPLATE(phy_attrs, field, S_IWUGO, 1) SETUP_TEMPLATE(phy_attrs, field, S_IWUSR, 1)
#define SETUP_OPTIONAL_PHY_ATTRIBUTE_WRONLY(field, func) \ #define SETUP_OPTIONAL_PHY_ATTRIBUTE_WRONLY(field, func) \
SETUP_TEMPLATE(phy_attrs, field, S_IWUGO, i->f->func) SETUP_TEMPLATE(phy_attrs, field, S_IWUSR, i->f->func)
#define SETUP_END_DEV_ATTRIBUTE(field) \ #define SETUP_END_DEV_ATTRIBUTE(field) \
SETUP_TEMPLATE(end_dev_attrs, field, S_IRUGO, 1) SETUP_TEMPLATE(end_dev_attrs, field, S_IRUGO, 1)
@ -1479,6 +1552,7 @@ sas_attach_transport(struct sas_function_template *ft)
SETUP_PHY_ATTRIBUTE(phy_reset_problem_count); SETUP_PHY_ATTRIBUTE(phy_reset_problem_count);
SETUP_OPTIONAL_PHY_ATTRIBUTE_WRONLY(link_reset, phy_reset); SETUP_OPTIONAL_PHY_ATTRIBUTE_WRONLY(link_reset, phy_reset);
SETUP_OPTIONAL_PHY_ATTRIBUTE_WRONLY(hard_reset, phy_reset); SETUP_OPTIONAL_PHY_ATTRIBUTE_WRONLY(hard_reset, phy_reset);
SETUP_OPTIONAL_PHY_ATTRIBUTE_RW(enable, phy_enable);
i->phy_attrs[count] = NULL; i->phy_attrs[count] = NULL;
count = 0; count = 0;
@ -1587,7 +1661,7 @@ static void __exit sas_transport_exit(void)
} }
MODULE_AUTHOR("Christoph Hellwig"); MODULE_AUTHOR("Christoph Hellwig");
MODULE_DESCRIPTION("SAS Transphy Attributes"); MODULE_DESCRIPTION("SAS Transport Attributes");
MODULE_LICENSE("GPL"); MODULE_LICENSE("GPL");
module_init(sas_transport_init); module_init(sas_transport_init);

View file

@ -46,7 +46,6 @@
* two cc/ua clears */ * two cc/ua clears */
/* Private data accessors (keep these out of the header file) */ /* Private data accessors (keep these out of the header file) */
#define spi_dv_pending(x) (((struct spi_transport_attrs *)&(x)->starget_data)->dv_pending)
#define spi_dv_in_progress(x) (((struct spi_transport_attrs *)&(x)->starget_data)->dv_in_progress) #define spi_dv_in_progress(x) (((struct spi_transport_attrs *)&(x)->starget_data)->dv_in_progress)
#define spi_dv_mutex(x) (((struct spi_transport_attrs *)&(x)->starget_data)->dv_mutex) #define spi_dv_mutex(x) (((struct spi_transport_attrs *)&(x)->starget_data)->dv_mutex)

View file

@ -123,6 +123,7 @@ sim710_probe_common(struct device *dev, unsigned long base_addr,
hostdata->differential = differential; hostdata->differential = differential;
hostdata->clock = clock; hostdata->clock = clock;
hostdata->chip710 = 1; hostdata->chip710 = 1;
hostdata->burst_length = 8;
/* and register the chip */ /* and register the chip */
if((host = NCR_700_detect(&sim710_driver_template, hostdata, dev)) if((host = NCR_700_detect(&sim710_driver_template, hostdata, dev))

159
drivers/scsi/sni_53c710.c Normal file
View file

@ -0,0 +1,159 @@
/* -*- mode: c; c-basic-offset: 8 -*- */
/* SNI RM driver
*
* Copyright (C) 2001 by James.Bottomley@HansenPartnership.com
**-----------------------------------------------------------------------------
**
** This program is free software; you can redistribute it and/or modify
** it under the terms of the GNU General Public License as published by
** the Free Software Foundation; either version 2 of the License, or
** (at your option) any later version.
**
** This program is distributed in the hope that it will be useful,
** but WITHOUT ANY WARRANTY; without even the implied warranty of
** MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
** GNU General Public License for more details.
**
** You should have received a copy of the GNU General Public License
** along with this program; if not, write to the Free Software
** Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
**
**-----------------------------------------------------------------------------
*/
/*
* Based on lasi700.c
*/
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/init.h>
#include <linux/types.h>
#include <linux/stat.h>
#include <linux/mm.h>
#include <linux/blkdev.h>
#include <linux/sched.h>
#include <linux/ioport.h>
#include <linux/dma-mapping.h>
#include <linux/platform_device.h>
#include <asm/page.h>
#include <asm/pgtable.h>
#include <asm/irq.h>
#include <asm/delay.h>
#include <scsi/scsi_host.h>
#include <scsi/scsi_device.h>
#include <scsi/scsi_transport.h>
#include <scsi/scsi_transport_spi.h>
#include "53c700.h"
MODULE_AUTHOR("Thomas Bogendörfer");
MODULE_DESCRIPTION("SNI RM 53c710 SCSI Driver");
MODULE_LICENSE("GPL");
#define SNIRM710_CLOCK 32
static struct scsi_host_template snirm710_template = {
.name = "SNI RM SCSI 53c710",
.proc_name = "snirm_53c710",
.this_id = 7,
.module = THIS_MODULE,
};
static int __init snirm710_probe(struct platform_device *dev)
{
unsigned long base;
struct NCR_700_Host_Parameters *hostdata;
struct Scsi_Host *host;
struct resource *res;
res = platform_get_resource(dev, IORESOURCE_MEM, 0);
if (!res)
return -ENODEV;
base = res->start;
hostdata = kzalloc(sizeof(*hostdata), GFP_KERNEL);
if (!hostdata) {
printk(KERN_ERR "%s: Failed to allocate host data\n",
dev->dev.bus_id);
return -ENOMEM;
}
hostdata->dev = &dev->dev;
dma_set_mask(&dev->dev, DMA_32BIT_MASK);
hostdata->base = ioremap_nocache(CPHYSADDR(base), 0x100);
hostdata->differential = 0;
hostdata->clock = SNIRM710_CLOCK;
hostdata->force_le_on_be = 1;
hostdata->chip710 = 1;
hostdata->burst_length = 4;
host = NCR_700_detect(&snirm710_template, hostdata, &dev->dev);
if (!host)
goto out_kfree;
host->this_id = 7;
host->base = base;
host->irq = platform_get_irq(dev, 0);
if(request_irq(host->irq, NCR_700_intr, SA_SHIRQ, "snirm710", host)) {
printk(KERN_ERR "snirm710: request_irq failed!\n");
goto out_put_host;
}
dev_set_drvdata(&dev->dev, host);
scsi_scan_host(host);
return 0;
out_put_host:
scsi_host_put(host);
out_kfree:
iounmap(hostdata->base);
kfree(hostdata);
return -ENODEV;
}
static int __exit snirm710_driver_remove(struct platform_device *dev)
{
struct Scsi_Host *host = dev_get_drvdata(&dev->dev);
struct NCR_700_Host_Parameters *hostdata =
(struct NCR_700_Host_Parameters *)host->hostdata[0];
scsi_remove_host(host);
NCR_700_release(host);
free_irq(host->irq, host);
iounmap(hostdata->base);
kfree(hostdata);
return 0;
}
static struct platform_driver snirm710_driver = {
.probe = snirm710_probe,
.remove = __devexit_p(snirm710_driver_remove),
.driver = {
.name = "snirm_53c710",
},
};
static int __init snirm710_init(void)
{
int err;
if ((err = platform_driver_register(&snirm710_driver))) {
printk(KERN_ERR "Driver registration failed\n");
return err;
}
return 0;
}
static void __exit snirm710_exit(void)
{
platform_driver_unregister(&snirm710_driver);
}
module_init(snirm710_init);
module_exit(snirm710_exit);

View file

@ -9,7 +9,7 @@
Steve Hirsch, Andreas Koppenh"ofer, Michael Leodolter, Eyal Lebedinsky, Steve Hirsch, Andreas Koppenh"ofer, Michael Leodolter, Eyal Lebedinsky,
Michael Schaefer, J"org Weule, and Eric Youngdale. Michael Schaefer, J"org Weule, and Eric Youngdale.
Copyright 1992 - 2006 Kai Makisara Copyright 1992 - 2007 Kai Makisara
email Kai.Makisara@kolumbus.fi email Kai.Makisara@kolumbus.fi
Some small formal changes - aeb, 950809 Some small formal changes - aeb, 950809
@ -17,7 +17,7 @@
Last modified: 18-JAN-1998 Richard Gooch <rgooch@atnf.csiro.au> Devfs support Last modified: 18-JAN-1998 Richard Gooch <rgooch@atnf.csiro.au> Devfs support
*/ */
static const char *verstr = "20061107"; static const char *verstr = "20070203";
#include <linux/module.h> #include <linux/module.h>
@ -1168,6 +1168,7 @@ static int st_open(struct inode *inode, struct file *filp)
STps = &(STp->ps[i]); STps = &(STp->ps[i]);
STps->rw = ST_IDLE; STps->rw = ST_IDLE;
} }
STp->try_dio_now = STp->try_dio;
STp->recover_count = 0; STp->recover_count = 0;
DEB( STp->nbr_waits = STp->nbr_finished = 0; DEB( STp->nbr_waits = STp->nbr_finished = 0;
STp->nbr_requests = STp->nbr_dio = STp->nbr_pages = STp->nbr_combinable = 0; ) STp->nbr_requests = STp->nbr_dio = STp->nbr_pages = STp->nbr_combinable = 0; )
@ -1400,9 +1401,9 @@ static int setup_buffering(struct scsi_tape *STp, const char __user *buf,
struct st_buffer *STbp = STp->buffer; struct st_buffer *STbp = STp->buffer;
if (is_read) if (is_read)
i = STp->try_dio && try_rdio; i = STp->try_dio_now && try_rdio;
else else
i = STp->try_dio && try_wdio; i = STp->try_dio_now && try_wdio;
if (i && ((unsigned long)buf & queue_dma_alignment( if (i && ((unsigned long)buf & queue_dma_alignment(
STp->device->request_queue)) == 0) { STp->device->request_queue)) == 0) {
@ -1599,7 +1600,7 @@ st_write(struct file *filp, const char __user *buf, size_t count, loff_t * ppos)
STm->do_async_writes && STps->eof < ST_EOM_OK; STm->do_async_writes && STps->eof < ST_EOM_OK;
if (STp->block_size != 0 && STm->do_buffer_writes && if (STp->block_size != 0 && STm->do_buffer_writes &&
!(STp->try_dio && try_wdio) && STps->eof < ST_EOM_OK && !(STp->try_dio_now && try_wdio) && STps->eof < ST_EOM_OK &&
STbp->buffer_bytes < STbp->buffer_size) { STbp->buffer_bytes < STbp->buffer_size) {
STp->dirty = 1; STp->dirty = 1;
/* Don't write a buffer that is not full enough. */ /* Don't write a buffer that is not full enough. */
@ -1769,7 +1770,7 @@ static long read_tape(struct scsi_tape *STp, long count,
if (STp->block_size == 0) if (STp->block_size == 0)
blks = bytes = count; blks = bytes = count;
else { else {
if (!(STp->try_dio && try_rdio) && STm->do_read_ahead) { if (!(STp->try_dio_now && try_rdio) && STm->do_read_ahead) {
blks = (STp->buffer)->buffer_blocks; blks = (STp->buffer)->buffer_blocks;
bytes = blks * STp->block_size; bytes = blks * STp->block_size;
} else { } else {
@ -1948,10 +1949,12 @@ st_read(struct file *filp, char __user *buf, size_t count, loff_t * ppos)
goto out; goto out;
STm = &(STp->modes[STp->current_mode]); STm = &(STp->modes[STp->current_mode]);
if (!(STm->do_read_ahead) && STp->block_size != 0 && if (STp->block_size != 0 && (count % STp->block_size) != 0) {
(count % STp->block_size) != 0) { if (!STm->do_read_ahead) {
retval = (-EINVAL); /* Read must be integral number of blocks */ retval = (-EINVAL); /* Read must be integral number of blocks */
goto out; goto out;
}
STp->try_dio_now = 0; /* Direct i/o can't handle split blocks */
} }
STps = &(STp->ps[STp->partition]); STps = &(STp->ps[STp->partition]);

Some files were not shown because too many files have changed in this diff Show more