- Move tagged queueing control from ADA to ATA XPT. It allows to control
  device command queue length correctly. First step to support < 32 tags.
- Limit queue for non-tagged devices by 2 slots for ahci(4) and siis(4).
- Implement quirk matching for ATA devices.
- Move xpt_schedule_dev_sendq() from header to source file.
- Move delayed queue shrinking to the more expected place - element freeing.
- Remove some SCSIsms in ATA.
This commit is contained in:
Alexander Motin 2009-11-11 11:10:36 +00:00
parent b426306074
commit 30a4094f86
Notes: svn2git 2020-12-20 02:59:44 +00:00
svn path=/head/; revision=199178
11 changed files with 154 additions and 160 deletions

View file

@ -509,3 +509,38 @@ ata_max_mode(struct ata_params *ap, int mode, int maxmode)
return (mode);
}
int
ata_identify_match(caddr_t identbuffer, caddr_t table_entry)
{
struct scsi_inquiry_pattern *entry;
struct ata_params *ident;
entry = (struct scsi_inquiry_pattern *)table_entry;
ident = (struct ata_params *)identbuffer;
if ((cam_strmatch(ident->model, entry->product,
sizeof(ident->model)) == 0)
&& (cam_strmatch(ident->revision, entry->revision,
sizeof(ident->revision)) == 0)) {
return (0);
}
return (-1);
}
int
ata_static_identify_match(caddr_t identbuffer, caddr_t table_entry)
{
struct scsi_static_inquiry_pattern *entry;
struct ata_params *ident;
entry = (struct scsi_static_inquiry_pattern *)table_entry;
ident = (struct ata_params *)identbuffer;
if ((cam_strmatch(ident->model, entry->product,
sizeof(ident->model)) == 0)
&& (cam_strmatch(ident->revision, entry->revision,
sizeof(ident->revision)) == 0)) {
return (0);
}
return (-1);
}

View file

@ -114,4 +114,7 @@ int ata_max_wmode(struct ata_params *ap);
int ata_max_umode(struct ata_params *ap);
int ata_max_mode(struct ata_params *ap, int mode, int maxmode);
int ata_identify_match(caddr_t identbuffer, caddr_t table_entry);
int ata_static_identify_match(caddr_t identbuffer, caddr_t table_entry);
#endif

View file

@ -122,9 +122,17 @@ struct ada_quirk_entry {
ada_quirks quirks;
};
//static struct ada_quirk_entry ada_quirk_table[] =
//{
//};
static struct ada_quirk_entry ada_quirk_table[] =
{
{
/* Default */
{
T_ANY, SIP_MEDIA_REMOVABLE|SIP_MEDIA_FIXED,
/*vendor*/"*", /*product*/"*", /*revision*/"*"
},
/*quirks*/0
},
};
static disk_strategy_t adastrategy;
static dumper_t adadump;
@ -618,7 +626,7 @@ adaregister(struct cam_periph *periph, void *arg)
if (cgd->ident_data.support.command2 & ATA_SUPPORT_FLUSHCACHE)
softc->flags |= ADA_FLAG_CAN_FLUSHCACHE;
if (cgd->ident_data.satacapabilities & ATA_SUPPORT_NCQ &&
cgd->ident_data.queue >= 31)
cgd->inq_flags & SID_CmdQue)
softc->flags |= ADA_FLAG_CAN_NCQ;
softc->state = ADA_STATE_NORMAL;
@ -627,12 +635,10 @@ adaregister(struct cam_periph *periph, void *arg)
/*
* See if this device has any quirks.
*/
// match = cam_quirkmatch((caddr_t)&cgd->inq_data,
// (caddr_t)ada_quirk_table,
// sizeof(ada_quirk_table)/sizeof(*ada_quirk_table),
// sizeof(*ada_quirk_table), scsi_inquiry_match);
match = NULL;
match = cam_quirkmatch((caddr_t)&cgd->ident_data,
(caddr_t)ada_quirk_table,
sizeof(ada_quirk_table)/sizeof(*ada_quirk_table),
sizeof(*ada_quirk_table), ata_identify_match);
if (match != NULL)
softc->quirks = ((struct ada_quirk_entry *)match)->quirks;
else
@ -700,11 +706,6 @@ adaregister(struct cam_periph *periph, void *arg)
dp->secsize, dp->heads,
dp->secs_per_track, dp->cylinders);
xpt_announce_periph(periph, announce_buf);
if (softc->flags & ADA_FLAG_CAN_NCQ) {
printf("%s%d: Native Command Queueing enabled\n",
periph->periph_name, periph->unit_number);
}
/*
* Add async callbacks for bus reset and
* bus device reset calls. I don't bother

View file

@ -66,17 +66,12 @@ __FBSDID("$FreeBSD$");
#include <machine/stdarg.h> /* for xpt_print below */
#include "opt_cam.h"
struct scsi_quirk_entry {
struct ata_quirk_entry {
struct scsi_inquiry_pattern inq_pat;
u_int8_t quirks;
#define CAM_QUIRK_NOLUNS 0x01
#define CAM_QUIRK_NOSERIAL 0x02
#define CAM_QUIRK_HILUNS 0x04
#define CAM_QUIRK_NOHILUNS 0x08
u_int mintags;
#define CAM_QUIRK_MAXTAGS 0x01
u_int maxtags;
};
#define SCSI_QUIRK(dev) ((struct scsi_quirk_entry *)((dev)->quirk))
static periph_init_t probe_periph_init;
@ -138,7 +133,7 @@ typedef struct {
struct cam_periph *periph;
} probe_softc;
static struct scsi_quirk_entry scsi_quirk_table[] =
static struct ata_quirk_entry ata_quirk_table[] =
{
{
/* Default tagged queuing parameters for all devices */
@ -146,12 +141,12 @@ static struct scsi_quirk_entry scsi_quirk_table[] =
T_ANY, SIP_MEDIA_REMOVABLE|SIP_MEDIA_FIXED,
/*vendor*/"*", /*product*/"*", /*revision*/"*"
},
/*quirks*/0, /*mintags*/2, /*maxtags*/32
/*quirks*/0, /*maxtags*/0
},
};
static const int scsi_quirk_table_size =
sizeof(scsi_quirk_table) / sizeof(*scsi_quirk_table);
static const int ata_quirk_table_size =
sizeof(ata_quirk_table) / sizeof(*ata_quirk_table);
static cam_status proberegister(struct cam_periph *periph,
void *arg);
@ -162,7 +157,7 @@ static void probestart(struct cam_periph *periph, union ccb *start_ccb);
// struct cam_ed *device);
static void probedone(struct cam_periph *periph, union ccb *done_ccb);
static void probecleanup(struct cam_periph *periph);
static void scsi_find_quirk(struct cam_ed *device);
static void ata_find_quirk(struct cam_ed *device);
static void ata_scan_bus(struct cam_periph *periph, union ccb *ccb);
static void ata_scan_lun(struct cam_periph *periph,
struct cam_path *path, cam_flags flags,
@ -172,10 +167,9 @@ static struct cam_ed *
ata_alloc_device(struct cam_eb *bus, struct cam_et *target,
lun_id_t lun_id);
static void ata_device_transport(struct cam_path *path);
static void scsi_set_transfer_settings(struct ccb_trans_settings *cts,
static void ata_set_transfer_settings(struct ccb_trans_settings *cts,
struct cam_ed *device,
int async_update);
static void scsi_toggle_tags(struct cam_path *path);
static void ata_dev_async(u_int32_t async_code,
struct cam_eb *bus,
struct cam_et *target,
@ -717,6 +711,17 @@ device_fail: if (cam_periph_error(done_ccb, 0, 0,
path->device->flags |= CAM_DEV_IDENTIFY_DATA_VALID;
}
if (ident_buf->satacapabilities & ATA_SUPPORT_NCQ) {
path->device->mintags = path->device->maxtags =
ATA_QUEUE_LEN(ident_buf->queue) + 1;
}
ata_find_quirk(path->device);
/* XXX: If not all tags allowed, we must to tell SIM which are. */
if (path->device->mintags < path->bus->sim->max_tagged_dev_openings)
path->device->mintags = path->device->maxtags = 0;
if (path->device->mintags != 0) {
xpt_start_tags(path);
}
ata_device_transport(path);
PROBE_SET_ACTION(softc, PROBE_SETMODE);
xpt_release_ccb(done_ccb);
@ -776,7 +781,6 @@ device_fail: if (cam_periph_error(done_ccb, 0, 0,
return;
}
scsi_find_quirk(path->device);
ata_device_transport(path);
if (periph->path->device->flags & CAM_DEV_UNCONFIGURED) {
path->device->flags &= ~CAM_DEV_UNCONFIGURED;
@ -853,24 +857,23 @@ probecleanup(struct cam_periph *periph)
}
static void
scsi_find_quirk(struct cam_ed *device)
ata_find_quirk(struct cam_ed *device)
{
struct scsi_quirk_entry *quirk;
struct ata_quirk_entry *quirk;
caddr_t match;
match = cam_quirkmatch((caddr_t)&device->inq_data,
(caddr_t)scsi_quirk_table,
sizeof(scsi_quirk_table) /
sizeof(*scsi_quirk_table),
sizeof(*scsi_quirk_table), scsi_inquiry_match);
match = cam_quirkmatch((caddr_t)&device->ident_data,
(caddr_t)ata_quirk_table,
ata_quirk_table_size,
sizeof(*ata_quirk_table), ata_identify_match);
if (match == NULL)
panic("xpt_find_quirk: device didn't match wildcard entry!!");
quirk = (struct scsi_quirk_entry *)match;
quirk = (struct ata_quirk_entry *)match;
device->quirk = quirk;
device->mintags = quirk->mintags;
device->maxtags = quirk->maxtags;
if (quirk->quirks & CAM_QUIRK_MAXTAGS)
device->mintags = device->maxtags = quirk->maxtags;
}
typedef struct {
@ -1101,7 +1104,7 @@ static struct cam_ed *
ata_alloc_device(struct cam_eb *bus, struct cam_et *target, lun_id_t lun_id)
{
struct cam_path path;
struct scsi_quirk_entry *quirk;
struct ata_quirk_entry *quirk;
struct cam_ed *device;
struct cam_ed *cur_device;
@ -1113,10 +1116,10 @@ ata_alloc_device(struct cam_eb *bus, struct cam_et *target, lun_id_t lun_id)
* Take the default quirk entry until we have inquiry
* data and can determine a better quirk to use.
*/
quirk = &scsi_quirk_table[scsi_quirk_table_size - 1];
quirk = &ata_quirk_table[ata_quirk_table_size - 1];
device->quirk = (void *)quirk;
device->mintags = quirk->mintags;
device->maxtags = quirk->maxtags;
device->mintags = 0;
device->maxtags = 0;
bzero(&device->inq_data, sizeof(device->inq_data));
device->inq_flags = 0;
device->queue_flags = 0;
@ -1199,7 +1202,7 @@ ata_action(union ccb *start_ccb)
switch (start_ccb->ccb_h.func_code) {
case XPT_SET_TRAN_SETTINGS:
{
scsi_set_transfer_settings(&start_ccb->cts,
ata_set_transfer_settings(&start_ccb->cts,
start_ccb->ccb_h.path->device,
/*async_update*/FALSE);
break;
@ -1227,7 +1230,7 @@ ata_action(union ccb *start_ccb)
}
static void
scsi_set_transfer_settings(struct ccb_trans_settings *cts, struct cam_ed *device,
ata_set_transfer_settings(struct ccb_trans_settings *cts, struct cam_ed *device,
int async_update)
{
struct ccb_pathinq cpi;
@ -1379,24 +1382,7 @@ scsi_set_transfer_settings(struct ccb_trans_settings *cts, struct cam_ed *device
device->tag_delay_count = CAM_TAG_DELAY_COUNT;
device->flags |= CAM_DEV_TAG_AFTER_COUNT;
} else {
struct ccb_relsim crs;
xpt_freeze_devq(cts->ccb_h.path, /*count*/1);
device->inq_flags &= ~SID_CmdQue;
xpt_dev_ccbq_resize(cts->ccb_h.path,
sim->max_dev_openings);
device->flags &= ~CAM_DEV_TAG_AFTER_COUNT;
device->tag_delay_count = 0;
xpt_setup_ccb(&crs.ccb_h, cts->ccb_h.path,
CAM_PRIORITY_NORMAL);
crs.ccb_h.func_code = XPT_REL_SIMQ;
crs.release_flags = RELSIM_RELEASE_AFTER_QEMPTY;
crs.openings
= crs.release_timeout
= crs.qfrozen_cnt
= 0;
xpt_action((union ccb *)&crs);
xpt_stop_tags(cts->ccb_h.path);
}
}
}
@ -1404,39 +1390,6 @@ scsi_set_transfer_settings(struct ccb_trans_settings *cts, struct cam_ed *device
(*(sim->sim_action))(sim, (union ccb *)cts);
}
static void
scsi_toggle_tags(struct cam_path *path)
{
struct cam_ed *dev;
/*
* Give controllers a chance to renegotiate
* before starting tag operations. We
* "toggle" tagged queuing off then on
* which causes the tag enable command delay
* counter to come into effect.
*/
dev = path->device;
if ((dev->flags & CAM_DEV_TAG_AFTER_COUNT) != 0
|| ((dev->inq_flags & SID_CmdQue) != 0
&& (dev->inq_flags & (SID_Sync|SID_WBus16|SID_WBus32)) != 0)) {
struct ccb_trans_settings cts;
xpt_setup_ccb(&cts.ccb_h, path, CAM_PRIORITY_NORMAL);
cts.protocol = PROTO_SCSI;
cts.protocol_version = PROTO_VERSION_UNSPECIFIED;
cts.transport = XPORT_UNSPECIFIED;
cts.transport_version = XPORT_VERSION_UNSPECIFIED;
cts.proto_specific.scsi.flags = 0;
cts.proto_specific.scsi.valid = CTS_SCSI_VALID_TQ;
scsi_set_transfer_settings(&cts, path->device,
/*async_update*/TRUE);
cts.proto_specific.scsi.flags = CTS_SCSI_FLAGS_TAG_ENB;
scsi_set_transfer_settings(&cts, path->device,
/*async_update*/TRUE);
}
}
/*
* Handle any per-device event notifications that require action by the XPT.
*/
@ -1469,15 +1422,6 @@ ata_dev_async(u_int32_t async_code, struct cam_eb *bus, struct cam_et *target,
status = CAM_REQ_CMP_ERR;
if (status == CAM_REQ_CMP) {
/*
* Allow transfer negotiation to occur in a
* tag free environment.
*/
if (async_code == AC_SENT_BDR
|| async_code == AC_BUS_RESET)
scsi_toggle_tags(&newpath);
if (async_code == AC_INQ_CHANGED) {
/*
* We've sent a start unit command, or
@ -1498,7 +1442,7 @@ ata_dev_async(u_int32_t async_code, struct cam_eb *bus, struct cam_et *target,
struct ccb_trans_settings *settings;
settings = (struct ccb_trans_settings *)async_arg;
scsi_set_transfer_settings(settings, device,
ata_set_transfer_settings(settings, device,
/*async_update*/TRUE);
}
}

View file

@ -165,8 +165,12 @@ cam_strmatch(const u_int8_t *str, const u_int8_t *pattern, int str_len)
str++;
str_len--;
}
while (str_len > 0 && *str++ == ' ')
while (str_len > 0 && *str == ' ') {
str++;
str_len--;
}
if (str_len > 0 && *str == 0)
str_len = 0;
return (str_len);
}

View file

@ -307,7 +307,7 @@ struct ccb_getdev {
struct scsi_inquiry_data inq_data;
struct ata_params ident_data;
u_int8_t serial_num[252];
u_int8_t reserved;
u_int8_t inq_flags;
u_int8_t serial_num_len;
};

View file

@ -285,7 +285,6 @@ static xpt_devicefunc_t xptsetasyncfunc;
static xpt_busfunc_t xptsetasyncbusfunc;
static cam_status xptregister(struct cam_periph *periph,
void *arg);
static void xpt_start_tags(struct cam_path *path);
static __inline int xpt_schedule_dev_allocq(struct cam_eb *bus,
struct cam_ed *dev);
static __inline int periph_is_queued(struct cam_periph *periph);
@ -299,12 +298,6 @@ xpt_schedule_dev_allocq(struct cam_eb *bus, struct cam_ed *dev)
int retval;
if (dev->ccbq.devq_openings > 0) {
if ((dev->flags & CAM_DEV_RESIZE_QUEUE_NEEDED) != 0) {
cam_ccbq_resize(&dev->ccbq,
dev->ccbq.dev_openings
+ dev->ccbq.dev_active);
dev->flags &= ~CAM_DEV_RESIZE_QUEUE_NEEDED;
}
/*
* The priority of a device waiting for CCB resources
* is that of the the highest priority peripheral driver
@ -320,6 +313,27 @@ xpt_schedule_dev_allocq(struct cam_eb *bus, struct cam_ed *dev)
return (retval);
}
static __inline int
xpt_schedule_dev_sendq(struct cam_eb *bus, struct cam_ed *dev)
{
int retval;
if (dev->ccbq.dev_openings > 0) {
/*
* The priority of a device waiting for controller
* resources is that of the the highest priority CCB
* enqueued.
*/
retval =
xpt_schedule_dev(&bus->sim->devq->send_queue,
&dev->send_ccb_entry.pinfo,
CAMQ_GET_HEAD(&dev->ccbq.queue)->priority);
} else {
retval = 0;
}
return (retval);
}
static __inline int
periph_is_queued(struct cam_periph *periph)
{
@ -2657,6 +2671,7 @@ xpt_action_default(union ccb *start_ccb)
cgd->protocol = dev->protocol;
cgd->inq_data = dev->inq_data;
cgd->ident_data = dev->ident_data;
cgd->inq_flags = dev->inq_flags;
cgd->ccb_h.status = CAM_REQ_CMP;
cgd->serial_num_len = dev->serial_num_len;
if ((dev->serial_num_len > 0)
@ -3747,6 +3762,11 @@ xpt_release_ccb(union ccb *free_ccb)
mtx_assert(sim->mtx, MA_OWNED);
cam_ccbq_release_opening(&device->ccbq);
if (device->flags & CAM_DEV_RESIZE_QUEUE_NEEDED) {
device->flags &= ~CAM_DEV_RESIZE_QUEUE_NEEDED;
cam_ccbq_resize(&device->ccbq,
device->ccbq.dev_openings + device->ccbq.dev_active);
}
if (sim->ccb_count > sim->max_ccbs) {
xpt_free_ccb(free_ccb);
sim->ccb_count--;
@ -4573,7 +4593,7 @@ xpt_find_device(struct cam_et *target, lun_id_t lun_id)
return (device);
}
static void
void
xpt_start_tags(struct cam_path *path)
{
struct ccb_relsim crs;
@ -4602,6 +4622,30 @@ xpt_start_tags(struct cam_path *path)
xpt_action((union ccb *)&crs);
}
void
xpt_stop_tags(struct cam_path *path)
{
struct ccb_relsim crs;
struct cam_ed *device;
struct cam_sim *sim;
device = path->device;
sim = path->bus->sim;
device->flags &= ~CAM_DEV_TAG_AFTER_COUNT;
device->tag_delay_count = 0;
xpt_freeze_devq(path, /*count*/1);
device->inq_flags &= ~SID_CmdQue;
xpt_dev_ccbq_resize(path, sim->max_dev_openings);
xpt_setup_ccb(&crs.ccb_h, path, CAM_PRIORITY_NORMAL);
crs.ccb_h.func_code = XPT_REL_SIMQ;
crs.release_flags = RELSIM_RELEASE_AFTER_QEMPTY;
crs.openings
= crs.release_timeout
= crs.qfrozen_cnt
= 0;
xpt_action((union ccb *)&crs);
}
static int busses_to_config;
static int busses_to_reset;

View file

@ -176,29 +176,8 @@ void xpt_run_dev_sendq(struct cam_eb *bus);
int xpt_schedule_dev(struct camq *queue, cam_pinfo *dev_pinfo,
u_int32_t new_priority);
u_int32_t xpt_dev_ccbq_resize(struct cam_path *path, int newopenings);
static __inline int
xpt_schedule_dev_sendq(struct cam_eb *bus, struct cam_ed *dev)
{
int retval;
if (dev->ccbq.dev_openings > 0) {
/*
* The priority of a device waiting for controller
* resources is that of the the highest priority CCB
* enqueued.
*/
retval =
xpt_schedule_dev(&bus->sim->devq->send_queue,
&dev->send_ccb_entry.pinfo,
CAMQ_GET_HEAD(&dev->ccbq.queue)->priority);
} else {
retval = 0;
}
return (retval);
}
void xpt_start_tags(struct cam_path *path);
void xpt_stop_tags(struct cam_path *path);
MALLOC_DECLARE(M_CAMXPT);

View file

@ -2274,24 +2274,7 @@ scsi_set_transfer_settings(struct ccb_trans_settings *cts, struct cam_ed *device
device->tag_delay_count = CAM_TAG_DELAY_COUNT;
device->flags |= CAM_DEV_TAG_AFTER_COUNT;
} else {
struct ccb_relsim crs;
xpt_freeze_devq(cts->ccb_h.path, /*count*/1);
device->inq_flags &= ~SID_CmdQue;
xpt_dev_ccbq_resize(cts->ccb_h.path,
sim->max_dev_openings);
device->flags &= ~CAM_DEV_TAG_AFTER_COUNT;
device->tag_delay_count = 0;
xpt_setup_ccb(&crs.ccb_h, cts->ccb_h.path,
CAM_PRIORITY_NORMAL);
crs.ccb_h.func_code = XPT_REL_SIMQ;
crs.release_flags = RELSIM_RELEASE_AFTER_QEMPTY;
crs.openings
= crs.release_timeout
= crs.qfrozen_cnt
= 0;
xpt_action((union ccb *)&crs);
xpt_stop_tags(cts->ccb_h.path);
}
}
}

View file

@ -733,7 +733,8 @@ ahci_ch_attach(device_t dev)
}
/* Construct SIM entry */
ch->sim = cam_sim_alloc(ahciaction, ahcipoll, "ahcich", ch,
device_get_unit(dev), &ch->mtx, ch->numslots, 0, devq);
device_get_unit(dev), &ch->mtx,
min(2, ch->numslots), ch->numslots, devq);
if (ch->sim == NULL) {
device_printf(dev, "unable to allocate sim\n");
error = ENOMEM;

View file

@ -454,7 +454,7 @@ siis_ch_attach(device_t dev)
}
/* Construct SIM entry */
ch->sim = cam_sim_alloc(siisaction, siispoll, "siisch", ch,
device_get_unit(dev), &ch->mtx, SIIS_MAX_SLOTS, 0, devq);
device_get_unit(dev), &ch->mtx, 2, SIIS_MAX_SLOTS, devq);
if (ch->sim == NULL) {
device_printf(dev, "unable to allocate sim\n");
error = ENOMEM;