xhci: Streams and UAS cleanups, misc cleanups for 3.15

Hi Greg,
 
 Here's 76 patches to queue to usb-next for 3.15.
 
 The bulk of this rather large pull request is the UAS driver cleanup, the
 xHCI streams fixes, and the new userspace API for usbfs to be able to use
 and alloc/free bulk streams.  I've hammered on these changes, and the UAS
 driver seems solid.  The performance numbers are pretty spiffy too:
 
 root@xanatos:~# echo 3 > /proc/sys/vm/drop_caches; dd if=/dev/sdb of=/dev/null bs=4k count=1000M iflag=count_bytes
 256000+0 records in
 256000+0 records out
 1048576000 bytes (1.0 GB) copied, 3.28557 s, 319 MB/s
 
 That's about 100 MB/s faster than my fastest Bulk-only-Transport mass
 storage drive.
 
 There's a couple of miscellaneous cleanup patches and non-urgent bug fixes
 in here as well:
 
 7969943789 xhci: add the meaningful IRQ description if it is empty
 bcffae7708 xhci: Prevent runtime pm from autosuspending during initialization
 e587b8b270 xhci: make warnings greppable
 25cd2882e2 usb/xhci: Change how we indicate a host supports Link PM.
 
 Sarah Sharp
 -----BEGIN PGP SIGNATURE-----
 Version: GnuPG v1.4.14 (GNU/Linux)
 
 iQIcBAABAgAGBQJTGQxbAAoJEBMGWMLi1Gc5MWkP/2nsJsQm4SPDUeeDAX0q1GyN
 r6gBEdmbK3D3CstPYIfZi4ngZrVFXUp3+F+vWUtz7IZbXCfGaOdm1gAOLXiIvaKR
 ALpR0uG8csCfd9kHTSzMYM5SLvnCJOOyc/of+wplUg1VKTEOXUlFw82huS3D6hLt
 QwgIf3RWihdqsbvVVrhUoMMBiR6/5Dqzo1IGAzB1EG/2o4pEokTLwRzi7Druc2xy
 JRuZCMYdwAamDhbCyCI9VHhDg3Bmr1vMwdVJixGPjeq+pLkeP17QmGaq268JtIo+
 OdQhEedZv8Wu4GyKf76xsD5kNiUJhyjKCg0MRBVORP0XTv4Mbrvh9evAYD9+uhlE
 asjE7VsTNkxRlX0tlmwswvLuWAXGpVjiFHU0FJHxmGDCpkOmZF7wp42zcyfXEh2T
 X5VrnXibfy08zbg/iRWlDQlBUkQM+L0/NFlnliWDNzGUl3k+SMB3TpjV8zvkwgTO
 FejbsAu1n9QUudqWBHMTdzieNxBshOOP4vvdtrNUeTBG7hjrMmgyAY5muX+oybSY
 +DU6KCPsS/1Xm9eIYn8RQKZ7bBJUaaTn0owToNhtmO9rwDYYhHZ8XkviFxlKD0s0
 YAHDHDbX1FJzkXHSwuS0tRljg1h0u85IIH7jE2f1LFO3YkOJ47OWaNIT5pItrxHI
 iAFtj3jqewbwsNu43h8m
 =/Do3
 -----END PGP SIGNATURE-----

Merge tag 'for-usb-next-2014-03-06' of git://git.kernel.org/pub/scm/linux/kernel/git/sarah/xhci into usb-next

Sarah writes:

xhci: Streams and UAS cleanups, misc cleanups for 3.15

Hi Greg,

Here's 76 patches to queue to usb-next for 3.15.

The bulk of this rather large pull request is the UAS driver cleanup, the
xHCI streams fixes, and the new userspace API for usbfs to be able to use
and alloc/free bulk streams.  I've hammered on these changes, and the UAS
driver seems solid.  The performance numbers are pretty spiffy too:

root@xanatos:~# echo 3 > /proc/sys/vm/drop_caches; dd if=/dev/sdb of=/dev/null bs=4k count=1000M iflag=count_bytes
256000+0 records in
256000+0 records out
1048576000 bytes (1.0 GB) copied, 3.28557 s, 319 MB/s

That's about 100 MB/s faster than my fastest Bulk-only-Transport mass
storage drive.

There's a couple of miscellaneous cleanup patches and non-urgent bug fixes
in here as well:

7969943789 xhci: add the meaningful IRQ description if it is empty
bcffae7708 xhci: Prevent runtime pm from autosuspending during initialization
e587b8b270 xhci: make warnings greppable
25cd2882e2 usb/xhci: Change how we indicate a host supports Link PM.

Sarah Sharp
This commit is contained in:
Greg Kroah-Hartman 2014-03-07 12:53:41 -08:00
commit c9050b6494
26 changed files with 1186 additions and 435 deletions

View file

@ -9063,8 +9063,7 @@ S: Maintained
F: drivers/net/wireless/ath/ar5523/
USB ATTACHED SCSI
M: Matthew Wilcox <willy@linux.intel.com>
M: Sarah Sharp <sarah.a.sharp@linux.intel.com>
M: Hans de Goede <hdegoede@redhat.com>
M: Gerd Hoffmann <kraxel@redhat.com>
L: linux-usb@vger.kernel.org
L: linux-scsi@vger.kernel.org

View file

@ -10,7 +10,6 @@
#define USB_MAXALTSETTING 128 /* Hard limit */
#define USB_MAXENDPOINTS 30 /* Hard limit */
#define USB_MAXCONFIG 8 /* Arbitrary limit */

View file

@ -769,6 +769,88 @@ static int check_ctrlrecip(struct dev_state *ps, unsigned int requesttype,
return ret;
}
static struct usb_host_endpoint *ep_to_host_endpoint(struct usb_device *dev,
unsigned char ep)
{
if (ep & USB_ENDPOINT_DIR_MASK)
return dev->ep_in[ep & USB_ENDPOINT_NUMBER_MASK];
else
return dev->ep_out[ep & USB_ENDPOINT_NUMBER_MASK];
}
static int parse_usbdevfs_streams(struct dev_state *ps,
struct usbdevfs_streams __user *streams,
unsigned int *num_streams_ret,
unsigned int *num_eps_ret,
struct usb_host_endpoint ***eps_ret,
struct usb_interface **intf_ret)
{
unsigned int i, num_streams, num_eps;
struct usb_host_endpoint **eps;
struct usb_interface *intf = NULL;
unsigned char ep;
int ifnum, ret;
if (get_user(num_streams, &streams->num_streams) ||
get_user(num_eps, &streams->num_eps))
return -EFAULT;
if (num_eps < 1 || num_eps > USB_MAXENDPOINTS)
return -EINVAL;
/* The XHCI controller allows max 2 ^ 16 streams */
if (num_streams_ret && (num_streams < 2 || num_streams > 65536))
return -EINVAL;
eps = kmalloc(num_eps * sizeof(*eps), GFP_KERNEL);
if (!eps)
return -ENOMEM;
for (i = 0; i < num_eps; i++) {
if (get_user(ep, &streams->eps[i])) {
ret = -EFAULT;
goto error;
}
eps[i] = ep_to_host_endpoint(ps->dev, ep);
if (!eps[i]) {
ret = -EINVAL;
goto error;
}
/* usb_alloc/free_streams operate on an usb_interface */
ifnum = findintfep(ps->dev, ep);
if (ifnum < 0) {
ret = ifnum;
goto error;
}
if (i == 0) {
ret = checkintf(ps, ifnum);
if (ret < 0)
goto error;
intf = usb_ifnum_to_if(ps->dev, ifnum);
} else {
/* Verify all eps belong to the same interface */
if (ifnum != intf->altsetting->desc.bInterfaceNumber) {
ret = -EINVAL;
goto error;
}
}
}
if (num_streams_ret)
*num_streams_ret = num_streams;
*num_eps_ret = num_eps;
*eps_ret = eps;
*intf_ret = intf;
return 0;
error:
kfree(eps);
return ret;
}
static int match_devt(struct device *dev, void *data)
{
return dev->devt == (dev_t) (unsigned long) data;
@ -1143,6 +1225,9 @@ static int proc_setintf(struct dev_state *ps, void __user *arg)
return -EFAULT;
if ((ret = checkintf(ps, setintf.interface)))
return ret;
destroy_async_on_interface(ps, setintf.interface);
return usb_set_interface(ps->dev, setintf.interface,
setintf.altsetting);
}
@ -1205,6 +1290,8 @@ static int proc_do_submiturb(struct dev_state *ps, struct usbdevfs_urb *uurb,
struct usb_ctrlrequest *dr = NULL;
unsigned int u, totlen, isofrmlen;
int i, ret, is_in, num_sgs = 0, ifnum = -1;
int number_of_packets = 0;
unsigned int stream_id = 0;
void *buf;
if (uurb->flags & ~(USBDEVFS_URB_ISO_ASAP |
@ -1225,15 +1312,10 @@ static int proc_do_submiturb(struct dev_state *ps, struct usbdevfs_urb *uurb,
if (ret)
return ret;
}
if ((uurb->endpoint & USB_ENDPOINT_DIR_MASK) != 0) {
is_in = 1;
ep = ps->dev->ep_in[uurb->endpoint & USB_ENDPOINT_NUMBER_MASK];
} else {
is_in = 0;
ep = ps->dev->ep_out[uurb->endpoint & USB_ENDPOINT_NUMBER_MASK];
}
ep = ep_to_host_endpoint(ps->dev, uurb->endpoint);
if (!ep)
return -ENOENT;
is_in = (uurb->endpoint & USB_ENDPOINT_DIR_MASK) != 0;
u = 0;
switch(uurb->type) {
@ -1258,7 +1340,6 @@ static int proc_do_submiturb(struct dev_state *ps, struct usbdevfs_urb *uurb,
le16_to_cpup(&dr->wIndex));
if (ret)
goto error;
uurb->number_of_packets = 0;
uurb->buffer_length = le16_to_cpup(&dr->wLength);
uurb->buffer += 8;
if ((dr->bRequestType & USB_DIR_IN) && uurb->buffer_length) {
@ -1288,17 +1369,17 @@ static int proc_do_submiturb(struct dev_state *ps, struct usbdevfs_urb *uurb,
uurb->type = USBDEVFS_URB_TYPE_INTERRUPT;
goto interrupt_urb;
}
uurb->number_of_packets = 0;
num_sgs = DIV_ROUND_UP(uurb->buffer_length, USB_SG_SIZE);
if (num_sgs == 1 || num_sgs > ps->dev->bus->sg_tablesize)
num_sgs = 0;
if (ep->streams)
stream_id = uurb->stream_id;
break;
case USBDEVFS_URB_TYPE_INTERRUPT:
if (!usb_endpoint_xfer_int(&ep->desc))
return -EINVAL;
interrupt_urb:
uurb->number_of_packets = 0;
break;
case USBDEVFS_URB_TYPE_ISO:
@ -1308,15 +1389,16 @@ static int proc_do_submiturb(struct dev_state *ps, struct usbdevfs_urb *uurb,
return -EINVAL;
if (!usb_endpoint_xfer_isoc(&ep->desc))
return -EINVAL;
number_of_packets = uurb->number_of_packets;
isofrmlen = sizeof(struct usbdevfs_iso_packet_desc) *
uurb->number_of_packets;
number_of_packets;
if (!(isopkt = kmalloc(isofrmlen, GFP_KERNEL)))
return -ENOMEM;
if (copy_from_user(isopkt, iso_frame_desc, isofrmlen)) {
ret = -EFAULT;
goto error;
}
for (totlen = u = 0; u < uurb->number_of_packets; u++) {
for (totlen = u = 0; u < number_of_packets; u++) {
/*
* arbitrary limit need for USB 3.0
* bMaxBurst (0~15 allowed, 1~16 packets)
@ -1347,7 +1429,7 @@ static int proc_do_submiturb(struct dev_state *ps, struct usbdevfs_urb *uurb,
ret = -EFAULT;
goto error;
}
as = alloc_async(uurb->number_of_packets);
as = alloc_async(number_of_packets);
if (!as) {
ret = -ENOMEM;
goto error;
@ -1441,7 +1523,8 @@ static int proc_do_submiturb(struct dev_state *ps, struct usbdevfs_urb *uurb,
as->urb->setup_packet = (unsigned char *)dr;
dr = NULL;
as->urb->start_frame = uurb->start_frame;
as->urb->number_of_packets = uurb->number_of_packets;
as->urb->number_of_packets = number_of_packets;
as->urb->stream_id = stream_id;
if (uurb->type == USBDEVFS_URB_TYPE_ISO ||
ps->dev->speed == USB_SPEED_HIGH)
as->urb->interval = 1 << min(15, ep->desc.bInterval - 1);
@ -1449,7 +1532,7 @@ static int proc_do_submiturb(struct dev_state *ps, struct usbdevfs_urb *uurb,
as->urb->interval = ep->desc.bInterval;
as->urb->context = as;
as->urb->complete = async_completed;
for (totlen = u = 0; u < uurb->number_of_packets; u++) {
for (totlen = u = 0; u < number_of_packets; u++) {
as->urb->iso_frame_desc[u].offset = totlen;
as->urb->iso_frame_desc[u].length = isopkt[u].length;
totlen += isopkt[u].length;
@ -1999,6 +2082,45 @@ static int proc_disconnect_claim(struct dev_state *ps, void __user *arg)
return claimintf(ps, dc.interface);
}
static int proc_alloc_streams(struct dev_state *ps, void __user *arg)
{
unsigned num_streams, num_eps;
struct usb_host_endpoint **eps;
struct usb_interface *intf;
int r;
r = parse_usbdevfs_streams(ps, arg, &num_streams, &num_eps,
&eps, &intf);
if (r)
return r;
destroy_async_on_interface(ps,
intf->altsetting[0].desc.bInterfaceNumber);
r = usb_alloc_streams(intf, eps, num_eps, num_streams, GFP_KERNEL);
kfree(eps);
return r;
}
static int proc_free_streams(struct dev_state *ps, void __user *arg)
{
unsigned num_eps;
struct usb_host_endpoint **eps;
struct usb_interface *intf;
int r;
r = parse_usbdevfs_streams(ps, arg, NULL, &num_eps, &eps, &intf);
if (r)
return r;
destroy_async_on_interface(ps,
intf->altsetting[0].desc.bInterfaceNumber);
r = usb_free_streams(intf, eps, num_eps, GFP_KERNEL);
kfree(eps);
return r;
}
/*
* NOTE: All requests here that have interface numbers as parameters
* are assuming that somehow the configuration has been prevented from
@ -2175,6 +2297,12 @@ static long usbdev_do_ioctl(struct file *file, unsigned int cmd,
case USBDEVFS_DISCONNECT_CLAIM:
ret = proc_disconnect_claim(ps, p);
break;
case USBDEVFS_ALLOC_STREAMS:
ret = proc_alloc_streams(ps, p);
break;
case USBDEVFS_FREE_STREAMS:
ret = proc_free_streams(ps, p);
break;
}
usb_unlock_device(dev);
if (ret >= 0)

View file

@ -400,8 +400,9 @@ static int usb_unbind_interface(struct device *dev)
{
struct usb_driver *driver = to_usb_driver(dev->driver);
struct usb_interface *intf = to_usb_interface(dev);
struct usb_host_endpoint *ep, **eps = NULL;
struct usb_device *udev;
int error, r, lpm_disable_error;
int i, j, error, r, lpm_disable_error;
intf->condition = USB_INTERFACE_UNBINDING;
@ -425,6 +426,26 @@ static int usb_unbind_interface(struct device *dev)
driver->disconnect(intf);
usb_cancel_queued_reset(intf);
/* Free streams */
for (i = 0, j = 0; i < intf->cur_altsetting->desc.bNumEndpoints; i++) {
ep = &intf->cur_altsetting->endpoint[i];
if (ep->streams == 0)
continue;
if (j == 0) {
eps = kmalloc(USB_MAXENDPOINTS * sizeof(void *),
GFP_KERNEL);
if (!eps) {
dev_warn(dev, "oom, leaking streams\n");
break;
}
}
eps[j++] = ep;
}
if (j) {
usb_free_streams(intf, eps, j, GFP_KERNEL);
kfree(eps);
}
/* Reset other interface state.
* We cannot do a Set-Interface if the device is suspended or
* if it is prepared for a system sleep (since installing a new

View file

@ -2049,7 +2049,7 @@ int usb_alloc_streams(struct usb_interface *interface,
{
struct usb_hcd *hcd;
struct usb_device *dev;
int i;
int i, ret;
dev = interface_to_usbdev(interface);
hcd = bus_to_hcd(dev->bus);
@ -2058,13 +2058,24 @@ int usb_alloc_streams(struct usb_interface *interface,
if (dev->speed != USB_SPEED_SUPER)
return -EINVAL;
for (i = 0; i < num_eps; i++) {
/* Streams only apply to bulk endpoints. */
for (i = 0; i < num_eps; i++)
if (!usb_endpoint_xfer_bulk(&eps[i]->desc))
return -EINVAL;
/* Re-alloc is not allowed */
if (eps[i]->streams)
return -EINVAL;
}
return hcd->driver->alloc_streams(hcd, dev, eps, num_eps,
ret = hcd->driver->alloc_streams(hcd, dev, eps, num_eps,
num_streams, mem_flags);
if (ret < 0)
return ret;
for (i = 0; i < num_eps; i++)
eps[i]->streams = ret;
return ret;
}
EXPORT_SYMBOL_GPL(usb_alloc_streams);
@ -2078,8 +2089,7 @@ EXPORT_SYMBOL_GPL(usb_alloc_streams);
* Reverts a group of bulk endpoints back to not using stream IDs.
* Can fail if we are given bad arguments, or HCD is broken.
*
* Return: On success, the number of allocated streams. On failure, a negative
* error code.
* Return: 0 on success. On failure, a negative error code.
*/
int usb_free_streams(struct usb_interface *interface,
struct usb_host_endpoint **eps, unsigned int num_eps,
@ -2087,19 +2097,26 @@ int usb_free_streams(struct usb_interface *interface,
{
struct usb_hcd *hcd;
struct usb_device *dev;
int i;
int i, ret;
dev = interface_to_usbdev(interface);
hcd = bus_to_hcd(dev->bus);
if (dev->speed != USB_SPEED_SUPER)
return -EINVAL;
/* Streams only apply to bulk endpoints. */
/* Double-free is not allowed */
for (i = 0; i < num_eps; i++)
if (!eps[i] || !usb_endpoint_xfer_bulk(&eps[i]->desc))
if (!eps[i] || !eps[i]->streams)
return -EINVAL;
return hcd->driver->free_streams(hcd, dev, eps, num_eps, mem_flags);
ret = hcd->driver->free_streams(hcd, dev, eps, num_eps, mem_flags);
if (ret < 0)
return ret;
for (i = 0; i < num_eps; i++)
eps[i]->streams = 0;
return ret;
}
EXPORT_SYMBOL_GPL(usb_free_streams);

View file

@ -141,19 +141,27 @@ static int usb_device_supports_lpm(struct usb_device *udev)
return 0;
}
/* All USB 3.0 must support LPM, but we need their max exit latency
* information from the SuperSpeed Extended Capabilities BOS descriptor.
/*
* According to the USB 3.0 spec, all USB 3.0 devices must support LPM.
* However, there are some that don't, and they set the U1/U2 exit
* latencies to zero.
*/
if (!udev->bos->ss_cap) {
dev_warn(&udev->dev, "No LPM exit latency info found. "
"Power management will be impacted.\n");
dev_info(&udev->dev, "No LPM exit latency info found, disabling LPM.\n");
return 0;
}
if (udev->parent->lpm_capable)
return 1;
dev_warn(&udev->dev, "Parent hub missing LPM exit latency info. "
"Power management will be impacted.\n");
if (udev->bos->ss_cap->bU1devExitLat == 0 &&
udev->bos->ss_cap->bU2DevExitLat == 0) {
if (udev->parent)
dev_info(&udev->dev, "LPM exit latency is zeroed, disabling LPM.\n");
else
dev_info(&udev->dev, "We don't know the algorithms for LPM for this host, disabling LPM.\n");
return 0;
}
if (!udev->parent || udev->parent->lpm_capable)
return 1;
return 0;
}
@ -4750,6 +4758,8 @@ static void hub_events(void)
/* deal with port status changes */
for (i = 1; i <= hdev->maxchild; i++) {
struct usb_device *udev = hub->ports[i - 1]->child;
if (test_bit(i, hub->busy_bits))
continue;
connect_change = test_bit(i, hub->change_bits);
@ -4848,8 +4858,6 @@ static void hub_events(void)
*/
if (hub_port_warm_reset_required(hub, portstatus)) {
int status;
struct usb_device *udev =
hub->ports[i - 1]->child;
dev_dbg(hub_dev, "warm reset port %d\n", i);
if (!udev ||
@ -4866,6 +4874,24 @@ static void hub_events(void)
usb_unlock_device(udev);
connect_change = 0;
}
/*
* On disconnect USB3 protocol ports transit from U0 to
* SS.Inactive to Rx.Detect. If this happens a warm-
* reset is not needed, but a (re)connect may happen
* before khubd runs and sees the disconnect, and the
* device may be an unknown state.
*
* If the port went through SS.Inactive without khubd
* seeing it the C_LINK_STATE change flag will be set,
* and we reset the dev to put it in a known state.
*/
} else if (udev && hub_is_superspeed(hub->hdev) &&
(portchange & USB_PORT_STAT_C_LINK_STATE) &&
(portstatus & USB_PORT_STAT_CONNECTION)) {
usb_lock_device(udev);
usb_reset_device(udev);
usb_unlock_device(udev);
connect_change = 0;
}
if (connect_change)
@ -5123,7 +5149,7 @@ static int usb_reset_and_verify_device(struct usb_device *udev)
struct usb_hcd *hcd = bus_to_hcd(udev->bus);
struct usb_device_descriptor descriptor = udev->descriptor;
struct usb_host_bos *bos;
int i, ret = 0;
int i, j, ret = 0;
int port1 = udev->portnum;
if (udev->state == USB_STATE_NOTATTACHED ||
@ -5249,6 +5275,9 @@ static int usb_reset_and_verify_device(struct usb_device *udev)
ret);
goto re_enumerate;
}
/* Resetting also frees any allocated streams */
for (j = 0; j < intf->cur_altsetting->desc.bNumEndpoints; j++)
intf->cur_altsetting->endpoint[j].streams = 0;
}
done:

View file

@ -1293,8 +1293,7 @@ int usb_set_interface(struct usb_device *dev, int interface, int alternate)
struct usb_interface *iface;
struct usb_host_interface *alt;
struct usb_hcd *hcd = bus_to_hcd(dev->bus);
int ret;
int manual = 0;
int i, ret, manual = 0;
unsigned int epaddr;
unsigned int pipe;
@ -1329,6 +1328,10 @@ int usb_set_interface(struct usb_device *dev, int interface, int alternate)
mutex_unlock(hcd->bandwidth_mutex);
return -ENOMEM;
}
/* Changing alt-setting also frees any allocated streams */
for (i = 0; i < iface->cur_altsetting->desc.bNumEndpoints; i++)
iface->cur_altsetting->endpoint[i].streams = 0;
ret = usb_hcd_alloc_bandwidth(dev, NULL, iface->cur_altsetting, alt);
if (ret < 0) {
dev_info(&dev->dev, "Not enough bandwidth for altsetting %d\n",

View file

@ -732,9 +732,11 @@ int xhci_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue,
/* Set the U1 and U2 exit latencies. */
memcpy(buf, &usb_bos_descriptor,
USB_DT_BOS_SIZE + USB_DT_USB_SS_CAP_SIZE);
if ((xhci->quirks & XHCI_LPM_SUPPORT)) {
temp = readl(&xhci->cap_regs->hcs_params3);
buf[12] = HCS_U1_LATENCY(temp);
put_unaligned_le16(HCS_U2_LATENCY(temp), &buf[13]);
}
/* Indicate whether the host has LTM support. */
temp = readl(&xhci->cap_regs->hcc_params);

View file

@ -149,14 +149,140 @@ static void xhci_link_rings(struct xhci_hcd *xhci, struct xhci_ring *ring,
}
}
/*
* We need a radix tree for mapping physical addresses of TRBs to which stream
* ID they belong to. We need to do this because the host controller won't tell
* us which stream ring the TRB came from. We could store the stream ID in an
* event data TRB, but that doesn't help us for the cancellation case, since the
* endpoint may stop before it reaches that event data TRB.
*
* The radix tree maps the upper portion of the TRB DMA address to a ring
* segment that has the same upper portion of DMA addresses. For example, say I
* have segments of size 1KB, that are always 1KB aligned. A segment may
* start at 0x10c91000 and end at 0x10c913f0. If I use the upper 10 bits, the
* key to the stream ID is 0x43244. I can use the DMA address of the TRB to
* pass the radix tree a key to get the right stream ID:
*
* 0x10c90fff >> 10 = 0x43243
* 0x10c912c0 >> 10 = 0x43244
* 0x10c91400 >> 10 = 0x43245
*
* Obviously, only those TRBs with DMA addresses that are within the segment
* will make the radix tree return the stream ID for that ring.
*
* Caveats for the radix tree:
*
* The radix tree uses an unsigned long as a key pair. On 32-bit systems, an
* unsigned long will be 32-bits; on a 64-bit system an unsigned long will be
* 64-bits. Since we only request 32-bit DMA addresses, we can use that as the
* key on 32-bit or 64-bit systems (it would also be fine if we asked for 64-bit
* PCI DMA addresses on a 64-bit system). There might be a problem on 32-bit
* extended systems (where the DMA address can be bigger than 32-bits),
* if we allow the PCI dma mask to be bigger than 32-bits. So don't do that.
*/
static int xhci_insert_segment_mapping(struct radix_tree_root *trb_address_map,
struct xhci_ring *ring,
struct xhci_segment *seg,
gfp_t mem_flags)
{
unsigned long key;
int ret;
key = (unsigned long)(seg->dma >> TRB_SEGMENT_SHIFT);
/* Skip any segments that were already added. */
if (radix_tree_lookup(trb_address_map, key))
return 0;
ret = radix_tree_maybe_preload(mem_flags);
if (ret)
return ret;
ret = radix_tree_insert(trb_address_map,
key, ring);
radix_tree_preload_end();
return ret;
}
static void xhci_remove_segment_mapping(struct radix_tree_root *trb_address_map,
struct xhci_segment *seg)
{
unsigned long key;
key = (unsigned long)(seg->dma >> TRB_SEGMENT_SHIFT);
if (radix_tree_lookup(trb_address_map, key))
radix_tree_delete(trb_address_map, key);
}
static int xhci_update_stream_segment_mapping(
struct radix_tree_root *trb_address_map,
struct xhci_ring *ring,
struct xhci_segment *first_seg,
struct xhci_segment *last_seg,
gfp_t mem_flags)
{
struct xhci_segment *seg;
struct xhci_segment *failed_seg;
int ret;
if (WARN_ON_ONCE(trb_address_map == NULL))
return 0;
seg = first_seg;
do {
ret = xhci_insert_segment_mapping(trb_address_map,
ring, seg, mem_flags);
if (ret)
goto remove_streams;
if (seg == last_seg)
return 0;
seg = seg->next;
} while (seg != first_seg);
return 0;
remove_streams:
failed_seg = seg;
seg = first_seg;
do {
xhci_remove_segment_mapping(trb_address_map, seg);
if (seg == failed_seg)
return ret;
seg = seg->next;
} while (seg != first_seg);
return ret;
}
static void xhci_remove_stream_mapping(struct xhci_ring *ring)
{
struct xhci_segment *seg;
if (WARN_ON_ONCE(ring->trb_address_map == NULL))
return;
seg = ring->first_seg;
do {
xhci_remove_segment_mapping(ring->trb_address_map, seg);
seg = seg->next;
} while (seg != ring->first_seg);
}
static int xhci_update_stream_mapping(struct xhci_ring *ring, gfp_t mem_flags)
{
return xhci_update_stream_segment_mapping(ring->trb_address_map, ring,
ring->first_seg, ring->last_seg, mem_flags);
}
/* XXX: Do we need the hcd structure in all these functions? */
void xhci_ring_free(struct xhci_hcd *xhci, struct xhci_ring *ring)
{
if (!ring)
return;
if (ring->first_seg)
if (ring->first_seg) {
if (ring->type == TYPE_STREAM)
xhci_remove_stream_mapping(ring);
xhci_free_segments_for_ring(xhci, ring->first_seg);
}
kfree(ring);
}
@ -349,6 +475,21 @@ int xhci_ring_expansion(struct xhci_hcd *xhci, struct xhci_ring *ring,
if (ret)
return -ENOMEM;
if (ring->type == TYPE_STREAM)
ret = xhci_update_stream_segment_mapping(ring->trb_address_map,
ring, first, last, flags);
if (ret) {
struct xhci_segment *next;
do {
next = first->next;
xhci_segment_free(xhci, first);
if (first == last)
break;
first = next;
} while (true);
return ret;
}
xhci_link_rings(xhci, ring, first, last, num_segs);
xhci_dbg_trace(xhci, trace_xhci_dbg_ring_expansion,
"ring expansion succeed, now has %d segments",
@ -434,12 +575,12 @@ static void xhci_free_stream_ctx(struct xhci_hcd *xhci,
struct xhci_stream_ctx *stream_ctx, dma_addr_t dma)
{
struct device *dev = xhci_to_hcd(xhci)->self.controller;
size_t size = sizeof(struct xhci_stream_ctx) * num_stream_ctxs;
if (num_stream_ctxs > MEDIUM_STREAM_ARRAY_SIZE)
dma_free_coherent(dev,
sizeof(struct xhci_stream_ctx)*num_stream_ctxs,
if (size > MEDIUM_STREAM_ARRAY_SIZE)
dma_free_coherent(dev, size,
stream_ctx, dma);
else if (num_stream_ctxs <= SMALL_STREAM_ARRAY_SIZE)
else if (size <= SMALL_STREAM_ARRAY_SIZE)
return dma_pool_free(xhci->small_streams_pool,
stream_ctx, dma);
else
@ -462,12 +603,12 @@ static struct xhci_stream_ctx *xhci_alloc_stream_ctx(struct xhci_hcd *xhci,
gfp_t mem_flags)
{
struct device *dev = xhci_to_hcd(xhci)->self.controller;
size_t size = sizeof(struct xhci_stream_ctx) * num_stream_ctxs;
if (num_stream_ctxs > MEDIUM_STREAM_ARRAY_SIZE)
return dma_alloc_coherent(dev,
sizeof(struct xhci_stream_ctx)*num_stream_ctxs,
if (size > MEDIUM_STREAM_ARRAY_SIZE)
return dma_alloc_coherent(dev, size,
dma, mem_flags);
else if (num_stream_ctxs <= SMALL_STREAM_ARRAY_SIZE)
else if (size <= SMALL_STREAM_ARRAY_SIZE)
return dma_pool_alloc(xhci->small_streams_pool,
mem_flags, dma);
else
@ -510,36 +651,6 @@ struct xhci_ring *xhci_stream_id_to_ring(
* The number of stream contexts in the stream context array may be bigger than
* the number of streams the driver wants to use. This is because the number of
* stream context array entries must be a power of two.
*
* We need a radix tree for mapping physical addresses of TRBs to which stream
* ID they belong to. We need to do this because the host controller won't tell
* us which stream ring the TRB came from. We could store the stream ID in an
* event data TRB, but that doesn't help us for the cancellation case, since the
* endpoint may stop before it reaches that event data TRB.
*
* The radix tree maps the upper portion of the TRB DMA address to a ring
* segment that has the same upper portion of DMA addresses. For example, say I
* have segments of size 1KB, that are always 64-byte aligned. A segment may
* start at 0x10c91000 and end at 0x10c913f0. If I use the upper 10 bits, the
* key to the stream ID is 0x43244. I can use the DMA address of the TRB to
* pass the radix tree a key to get the right stream ID:
*
* 0x10c90fff >> 10 = 0x43243
* 0x10c912c0 >> 10 = 0x43244
* 0x10c91400 >> 10 = 0x43245
*
* Obviously, only those TRBs with DMA addresses that are within the segment
* will make the radix tree return the stream ID for that ring.
*
* Caveats for the radix tree:
*
* The radix tree uses an unsigned long as a key pair. On 32-bit systems, an
* unsigned long will be 32-bits; on a 64-bit system an unsigned long will be
* 64-bits. Since we only request 32-bit DMA addresses, we can use that as the
* key on 32-bit or 64-bit systems (it would also be fine if we asked for 64-bit
* PCI DMA addresses on a 64-bit system). There might be a problem on 32-bit
* extended systems (where the DMA address can be bigger than 32-bits),
* if we allow the PCI dma mask to be bigger than 32-bits. So don't do that.
*/
struct xhci_stream_info *xhci_alloc_stream_info(struct xhci_hcd *xhci,
unsigned int num_stream_ctxs,
@ -548,7 +659,6 @@ struct xhci_stream_info *xhci_alloc_stream_info(struct xhci_hcd *xhci,
struct xhci_stream_info *stream_info;
u32 cur_stream;
struct xhci_ring *cur_ring;
unsigned long key;
u64 addr;
int ret;
@ -603,6 +713,7 @@ struct xhci_stream_info *xhci_alloc_stream_info(struct xhci_hcd *xhci,
if (!cur_ring)
goto cleanup_rings;
cur_ring->stream_id = cur_stream;
cur_ring->trb_address_map = &stream_info->trb_address_map;
/* Set deq ptr, cycle bit, and stream context type */
addr = cur_ring->first_seg->dma |
SCT_FOR_CTX(SCT_PRI_TR) |
@ -612,10 +723,7 @@ struct xhci_stream_info *xhci_alloc_stream_info(struct xhci_hcd *xhci,
xhci_dbg(xhci, "Setting stream %d ring ptr to 0x%08llx\n",
cur_stream, (unsigned long long) addr);
key = (unsigned long)
(cur_ring->first_seg->dma >> TRB_SEGMENT_SHIFT);
ret = radix_tree_insert(&stream_info->trb_address_map,
key, cur_ring);
ret = xhci_update_stream_mapping(cur_ring, mem_flags);
if (ret) {
xhci_ring_free(xhci, cur_ring);
stream_info->stream_rings[cur_stream] = NULL;
@ -635,9 +743,6 @@ struct xhci_stream_info *xhci_alloc_stream_info(struct xhci_hcd *xhci,
for (cur_stream = 1; cur_stream < num_streams; cur_stream++) {
cur_ring = stream_info->stream_rings[cur_stream];
if (cur_ring) {
addr = cur_ring->first_seg->dma;
radix_tree_delete(&stream_info->trb_address_map,
addr >> TRB_SEGMENT_SHIFT);
xhci_ring_free(xhci, cur_ring);
stream_info->stream_rings[cur_stream] = NULL;
}
@ -698,7 +803,6 @@ void xhci_free_stream_info(struct xhci_hcd *xhci,
{
int cur_stream;
struct xhci_ring *cur_ring;
dma_addr_t addr;
if (!stream_info)
return;
@ -707,9 +811,6 @@ void xhci_free_stream_info(struct xhci_hcd *xhci,
cur_stream++) {
cur_ring = stream_info->stream_rings[cur_stream];
if (cur_ring) {
addr = cur_ring->first_seg->dma;
radix_tree_delete(&stream_info->trb_address_map,
addr >> TRB_SEGMENT_SHIFT);
xhci_ring_free(xhci, cur_ring);
stream_info->stream_rings[cur_stream] = NULL;
}
@ -1711,7 +1812,6 @@ void xhci_mem_cleanup(struct xhci_hcd *xhci)
if (xhci->lpm_command)
xhci_free_command(xhci, xhci->lpm_command);
xhci->cmd_ring_reserved_trbs = 0;
if (xhci->cmd_ring)
xhci_ring_free(xhci, xhci->cmd_ring);
xhci->cmd_ring = NULL;
@ -1776,6 +1876,7 @@ void xhci_mem_cleanup(struct xhci_hcd *xhci)
}
no_bw:
xhci->cmd_ring_reserved_trbs = 0;
xhci->num_usb2_ports = 0;
xhci->num_usb3_ports = 0;
xhci->num_active_eps = 0;
@ -2274,11 +2375,12 @@ int xhci_mem_init(struct xhci_hcd *xhci, gfp_t flags)
/*
* Initialize the ring segment pool. The ring must be a contiguous
* structure comprised of TRBs. The TRBs must be 16 byte aligned,
* however, the command ring segment needs 64-byte aligned segments,
* so we pick the greater alignment need.
* however, the command ring segment needs 64-byte aligned segments
* and our use of dma addresses in the trb_address_map radix tree needs
* TRB_SEGMENT_SIZE alignment, so we pick the greater alignment need.
*/
xhci->segment_pool = dma_pool_create("xHCI ring segments", dev,
TRB_SEGMENT_SIZE, 64, xhci->page_size);
TRB_SEGMENT_SIZE, TRB_SEGMENT_SIZE, xhci->page_size);
/* See Table 46 and Note on Figure 55 */
xhci->device_pool = dma_pool_create("xHCI input/output contexts", dev,

View file

@ -190,6 +190,10 @@ static int xhci_pci_probe(struct pci_dev *dev, const struct pci_device_id *id)
struct usb_hcd *hcd;
driver = (struct hc_driver *)id->driver_data;
/* Prevent runtime suspending between USB-2 and USB-3 initialization */
pm_runtime_get_noresume(&dev->dev);
/* Register the USB 2.0 roothub.
* FIXME: USB core must know to register the USB 2.0 roothub first.
* This is sort of silly, because we could just set the HCD driver flags
@ -199,7 +203,7 @@ static int xhci_pci_probe(struct pci_dev *dev, const struct pci_device_id *id)
retval = usb_hcd_pci_probe(dev, id);
if (retval)
return retval;
goto put_runtime_pm;
/* USB 2.0 roothub is stored in the PCI device now. */
hcd = dev_get_drvdata(&dev->dev);
@ -222,11 +226,11 @@ static int xhci_pci_probe(struct pci_dev *dev, const struct pci_device_id *id)
goto put_usb3_hcd;
/* Roothub already marked as USB 3.0 speed */
/* We know the LPM timeout algorithms for this host, let the USB core
* enable and disable LPM for devices under the USB 3.0 roothub.
*/
if (xhci->quirks & XHCI_LPM_SUPPORT)
hcd_to_bus(xhci->shared_hcd)->root_hub->lpm_capable = 1;
if (HCC_MAX_PSA(xhci->hcc_params) >= 4)
xhci->shared_hcd->can_do_streams = 1;
/* USB-2 and USB-3 roothubs initialized, allow runtime pm suspend */
pm_runtime_put_noidle(&dev->dev);
return 0;
@ -234,6 +238,8 @@ static int xhci_pci_probe(struct pci_dev *dev, const struct pci_device_id *id)
usb_put_hcd(xhci->shared_hcd);
dealloc_usb2_hcd:
usb_hcd_pci_remove(dev);
put_runtime_pm:
pm_runtime_put_noidle(&dev->dev);
return retval;
}

View file

@ -158,6 +158,9 @@ static int xhci_plat_probe(struct platform_device *pdev)
*/
*((struct xhci_hcd **) xhci->shared_hcd->hcd_priv) = xhci;
if (HCC_MAX_PSA(xhci->hcc_params) >= 4)
xhci->shared_hcd->can_do_streams = 1;
ret = usb_add_hcd(xhci->shared_hcd, irq, IRQF_SHARED);
if (ret)
goto put_usb3_hcd;

View file

@ -546,9 +546,9 @@ void xhci_find_new_dequeue_state(struct xhci_hcd *xhci,
struct xhci_dequeue_state *state)
{
struct xhci_virt_device *dev = xhci->devs[slot_id];
struct xhci_virt_ep *ep = &dev->eps[ep_index];
struct xhci_ring *ep_ring;
struct xhci_generic_trb *trb;
struct xhci_ep_ctx *ep_ctx;
dma_addr_t addr;
ep_ring = xhci_triad_to_transfer_ring(xhci, slot_id,
@ -573,8 +573,16 @@ void xhci_find_new_dequeue_state(struct xhci_hcd *xhci,
/* Dig out the cycle state saved by the xHC during the stop ep cmd */
xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb,
"Finding endpoint context");
ep_ctx = xhci_get_ep_ctx(xhci, dev->out_ctx, ep_index);
/* 4.6.9 the css flag is written to the stream context for streams */
if (ep->ep_state & EP_HAS_STREAMS) {
struct xhci_stream_ctx *ctx =
&ep->stream_info->stream_ctx_array[stream_id];
state->new_cycle_state = 0x1 & le64_to_cpu(ctx->stream_ring);
} else {
struct xhci_ep_ctx *ep_ctx
= xhci_get_ep_ctx(xhci, dev->out_ctx, ep_index);
state->new_cycle_state = 0x1 & le64_to_cpu(ep_ctx->deq);
}
state->new_deq_ptr = cur_td->last_trb;
xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb,
@ -892,6 +900,57 @@ static void xhci_handle_cmd_stop_ep(struct xhci_hcd *xhci, int slot_id,
/* Return to the event handler with xhci->lock re-acquired */
}
static void xhci_kill_ring_urbs(struct xhci_hcd *xhci, struct xhci_ring *ring)
{
struct xhci_td *cur_td;
while (!list_empty(&ring->td_list)) {
cur_td = list_first_entry(&ring->td_list,
struct xhci_td, td_list);
list_del_init(&cur_td->td_list);
if (!list_empty(&cur_td->cancelled_td_list))
list_del_init(&cur_td->cancelled_td_list);
xhci_giveback_urb_in_irq(xhci, cur_td, -ESHUTDOWN);
}
}
static void xhci_kill_endpoint_urbs(struct xhci_hcd *xhci,
int slot_id, int ep_index)
{
struct xhci_td *cur_td;
struct xhci_virt_ep *ep;
struct xhci_ring *ring;
ep = &xhci->devs[slot_id]->eps[ep_index];
if ((ep->ep_state & EP_HAS_STREAMS) ||
(ep->ep_state & EP_GETTING_NO_STREAMS)) {
int stream_id;
for (stream_id = 0; stream_id < ep->stream_info->num_streams;
stream_id++) {
xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb,
"Killing URBs for slot ID %u, ep index %u, stream %u",
slot_id, ep_index, stream_id + 1);
xhci_kill_ring_urbs(xhci,
ep->stream_info->stream_rings[stream_id]);
}
} else {
ring = ep->ring;
if (!ring)
return;
xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb,
"Killing URBs for slot ID %u, ep index %u",
slot_id, ep_index);
xhci_kill_ring_urbs(xhci, ring);
}
while (!list_empty(&ep->cancelled_td_list)) {
cur_td = list_first_entry(&ep->cancelled_td_list,
struct xhci_td, cancelled_td_list);
list_del_init(&cur_td->cancelled_td_list);
xhci_giveback_urb_in_irq(xhci, cur_td, -ESHUTDOWN);
}
}
/* Watchdog timer function for when a stop endpoint command fails to complete.
* In this case, we assume the host controller is broken or dying or dead. The
* host may still be completing some other events, so we have to be careful to
@ -915,9 +974,6 @@ void xhci_stop_endpoint_command_watchdog(unsigned long arg)
{
struct xhci_hcd *xhci;
struct xhci_virt_ep *ep;
struct xhci_virt_ep *temp_ep;
struct xhci_ring *ring;
struct xhci_td *cur_td;
int ret, i, j;
unsigned long flags;
@ -974,34 +1030,8 @@ void xhci_stop_endpoint_command_watchdog(unsigned long arg)
for (i = 0; i < MAX_HC_SLOTS; i++) {
if (!xhci->devs[i])
continue;
for (j = 0; j < 31; j++) {
temp_ep = &xhci->devs[i]->eps[j];
ring = temp_ep->ring;
if (!ring)
continue;
xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb,
"Killing URBs for slot ID %u, "
"ep index %u", i, j);
while (!list_empty(&ring->td_list)) {
cur_td = list_first_entry(&ring->td_list,
struct xhci_td,
td_list);
list_del_init(&cur_td->td_list);
if (!list_empty(&cur_td->cancelled_td_list))
list_del_init(&cur_td->cancelled_td_list);
xhci_giveback_urb_in_irq(xhci, cur_td,
-ESHUTDOWN);
}
while (!list_empty(&temp_ep->cancelled_td_list)) {
cur_td = list_first_entry(
&temp_ep->cancelled_td_list,
struct xhci_td,
cancelled_td_list);
list_del_init(&cur_td->cancelled_td_list);
xhci_giveback_urb_in_irq(xhci, cur_td,
-ESHUTDOWN);
}
}
for (j = 0; j < 31; j++)
xhci_kill_endpoint_urbs(xhci, i, j);
}
spin_unlock_irqrestore(&xhci->lock, flags);
xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb,
@ -1073,17 +1103,18 @@ static void xhci_handle_cmd_set_deq(struct xhci_hcd *xhci, int slot_id,
unsigned int stream_id;
struct xhci_ring *ep_ring;
struct xhci_virt_device *dev;
struct xhci_virt_ep *ep;
struct xhci_ep_ctx *ep_ctx;
struct xhci_slot_ctx *slot_ctx;
ep_index = TRB_TO_EP_INDEX(le32_to_cpu(trb->generic.field[3]));
stream_id = TRB_TO_STREAM_ID(le32_to_cpu(trb->generic.field[2]));
dev = xhci->devs[slot_id];
ep = &dev->eps[ep_index];
ep_ring = xhci_stream_id_to_ring(dev, ep_index, stream_id);
if (!ep_ring) {
xhci_warn(xhci, "WARN Set TR deq ptr command for "
"freed stream ID %u\n",
xhci_warn(xhci, "WARN Set TR deq ptr command for freed stream ID %u\n",
stream_id);
/* XXX: Harmless??? */
dev->eps[ep_index].ep_state &= ~SET_DEQ_PENDING;
@ -1099,12 +1130,10 @@ static void xhci_handle_cmd_set_deq(struct xhci_hcd *xhci, int slot_id,
switch (cmd_comp_code) {
case COMP_TRB_ERR:
xhci_warn(xhci, "WARN Set TR Deq Ptr cmd invalid because "
"of stream ID configuration\n");
xhci_warn(xhci, "WARN Set TR Deq Ptr cmd invalid because of stream ID configuration\n");
break;
case COMP_CTX_STATE:
xhci_warn(xhci, "WARN Set TR Deq Ptr cmd failed due "
"to incorrect slot or ep state.\n");
xhci_warn(xhci, "WARN Set TR Deq Ptr cmd failed due to incorrect slot or ep state.\n");
ep_state = le32_to_cpu(ep_ctx->ep_info);
ep_state &= EP_STATE_MASK;
slot_state = le32_to_cpu(slot_ctx->dev_state);
@ -1114,12 +1143,11 @@ static void xhci_handle_cmd_set_deq(struct xhci_hcd *xhci, int slot_id,
slot_state, ep_state);
break;
case COMP_EBADSLT:
xhci_warn(xhci, "WARN Set TR Deq Ptr cmd failed because "
"slot %u was not enabled.\n", slot_id);
xhci_warn(xhci, "WARN Set TR Deq Ptr cmd failed because slot %u was not enabled.\n",
slot_id);
break;
default:
xhci_warn(xhci, "WARN Set TR Deq Ptr cmd with unknown "
"completion code of %u.\n",
xhci_warn(xhci, "WARN Set TR Deq Ptr cmd with unknown completion code of %u.\n",
cmd_comp_code);
break;
}
@ -1130,23 +1158,28 @@ static void xhci_handle_cmd_set_deq(struct xhci_hcd *xhci, int slot_id,
* cancelling URBs, which might not be an error...
*/
} else {
u64 deq;
/* 4.6.10 deq ptr is written to the stream ctx for streams */
if (ep->ep_state & EP_HAS_STREAMS) {
struct xhci_stream_ctx *ctx =
&ep->stream_info->stream_ctx_array[stream_id];
deq = le64_to_cpu(ctx->stream_ring) & SCTX_DEQ_MASK;
} else {
deq = le64_to_cpu(ep_ctx->deq) & ~EP_CTX_CYCLE_MASK;
}
xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb,
"Successful Set TR Deq Ptr cmd, deq = @%08llx",
le64_to_cpu(ep_ctx->deq));
if (xhci_trb_virt_to_dma(dev->eps[ep_index].queued_deq_seg,
dev->eps[ep_index].queued_deq_ptr) ==
(le64_to_cpu(ep_ctx->deq) & ~(EP_CTX_CYCLE_MASK))) {
"Successful Set TR Deq Ptr cmd, deq = @%08llx", deq);
if (xhci_trb_virt_to_dma(ep->queued_deq_seg,
ep->queued_deq_ptr) == deq) {
/* Update the ring's dequeue segment and dequeue pointer
* to reflect the new position.
*/
update_ring_for_set_deq_completion(xhci, dev,
ep_ring, ep_index);
} else {
xhci_warn(xhci, "Mismatch between completed Set TR Deq "
"Ptr command & xHCI internal state.\n");
xhci_warn(xhci, "Mismatch between completed Set TR Deq Ptr command & xHCI internal state.\n");
xhci_warn(xhci, "ep deq seg = %p, deq ptr = %p\n",
dev->eps[ep_index].queued_deq_seg,
dev->eps[ep_index].queued_deq_ptr);
ep->queued_deq_seg, ep->queued_deq_ptr);
}
}
@ -4070,6 +4103,7 @@ static int queue_set_tr_deq(struct xhci_hcd *xhci, int slot_id,
u32 trb_slot_id = SLOT_ID_FOR_TRB(slot_id);
u32 trb_ep_index = EP_ID_FOR_TRB(ep_index);
u32 trb_stream_id = STREAM_ID_FOR_TRB(stream_id);
u32 trb_sct = 0;
u32 type = TRB_TYPE(TRB_SET_DEQ);
struct xhci_virt_ep *ep;
@ -4088,7 +4122,9 @@ static int queue_set_tr_deq(struct xhci_hcd *xhci, int slot_id,
}
ep->queued_deq_seg = deq_seg;
ep->queued_deq_ptr = deq_ptr;
return queue_command(xhci, lower_32_bits(addr) | cycle_state,
if (stream_id)
trb_sct = SCT_FOR_TRB(SCT_PRI_TR);
return queue_command(xhci, lower_32_bits(addr) | trb_sct | cycle_state,
upper_32_bits(addr), trb_stream_id,
trb_slot_id | trb_ep_index | type, false);
}

View file

@ -390,6 +390,10 @@ static int xhci_try_enable_msi(struct usb_hcd *hcd)
}
legacy_irq:
if (!strlen(hcd->irq_descr))
snprintf(hcd->irq_descr, sizeof(hcd->irq_descr), "%s:usb%d",
hcd->driver->description, hcd->self.busnum);
/* fall back to legacy interrupt*/
ret = request_irq(pdev->irq, &usb_hcd_irq, IRQF_SHARED,
hcd->irq_descr, hcd);
@ -2678,6 +2682,20 @@ static int xhci_configure_endpoint(struct xhci_hcd *xhci,
return ret;
}
static void xhci_check_bw_drop_ep_streams(struct xhci_hcd *xhci,
struct xhci_virt_device *vdev, int i)
{
struct xhci_virt_ep *ep = &vdev->eps[i];
if (ep->ep_state & EP_HAS_STREAMS) {
xhci_warn(xhci, "WARN: endpoint 0x%02x has streams on set_interface, freeing streams.\n",
xhci_get_endpoint_address(i));
xhci_free_stream_info(xhci, ep->stream_info);
ep->stream_info = NULL;
ep->ep_state &= ~EP_HAS_STREAMS;
}
}
/* Called after one or more calls to xhci_add_endpoint() or
* xhci_drop_endpoint(). If this call fails, the USB core is expected
* to call xhci_reset_bandwidth().
@ -2742,8 +2760,10 @@ int xhci_check_bandwidth(struct usb_hcd *hcd, struct usb_device *udev)
/* Free any rings that were dropped, but not changed. */
for (i = 1; i < 31; ++i) {
if ((le32_to_cpu(ctrl_ctx->drop_flags) & (1 << (i + 1))) &&
!(le32_to_cpu(ctrl_ctx->add_flags) & (1 << (i + 1))))
!(le32_to_cpu(ctrl_ctx->add_flags) & (1 << (i + 1)))) {
xhci_free_or_cache_endpoint_ring(xhci, virt_dev, i);
xhci_check_bw_drop_ep_streams(xhci, virt_dev, i);
}
}
xhci_zero_in_ctx(xhci, virt_dev);
/*
@ -2759,6 +2779,7 @@ int xhci_check_bandwidth(struct usb_hcd *hcd, struct usb_device *udev)
if (virt_dev->eps[i].ring) {
xhci_free_or_cache_endpoint_ring(xhci, virt_dev, i);
}
xhci_check_bw_drop_ep_streams(xhci, virt_dev, i);
virt_dev->eps[i].ring = virt_dev->eps[i].new_ring;
virt_dev->eps[i].new_ring = NULL;
}
@ -2954,7 +2975,7 @@ static int xhci_check_streams_endpoint(struct xhci_hcd *xhci,
ret = xhci_check_args(xhci_to_hcd(xhci), udev, ep, 1, true, __func__);
if (ret <= 0)
return -EINVAL;
if (ep->ss_ep_comp.bmAttributes == 0) {
if (usb_ss_max_streams(&ep->ss_ep_comp) == 0) {
xhci_warn(xhci, "WARN: SuperSpeed Endpoint Companion"
" descriptor for ep 0x%x does not support streams\n",
ep->desc.bEndpointAddress);
@ -3121,6 +3142,12 @@ int xhci_alloc_streams(struct usb_hcd *hcd, struct usb_device *udev,
xhci_dbg(xhci, "Driver wants %u stream IDs (including stream 0).\n",
num_streams);
/* MaxPSASize value 0 (2 streams) means streams are not supported */
if (HCC_MAX_PSA(xhci->hcc_params) < 4) {
xhci_dbg(xhci, "xHCI controller does not support streams.\n");
return -ENOSYS;
}
config_cmd = xhci_alloc_command(xhci, true, true, mem_flags);
if (!config_cmd) {
xhci_dbg(xhci, "Could not allocate xHCI command structure.\n");
@ -3519,6 +3546,8 @@ int xhci_discover_or_reset_device(struct usb_hcd *hcd, struct usb_device *udev)
struct xhci_virt_ep *ep = &virt_dev->eps[i];
if (ep->ep_state & EP_HAS_STREAMS) {
xhci_warn(xhci, "WARN: endpoint 0x%02x has streams on device reset, freeing streams.\n",
xhci_get_endpoint_address(i));
xhci_free_stream_info(xhci, ep->stream_info);
ep->stream_info = NULL;
ep->ep_state &= ~EP_HAS_STREAMS;

View file

@ -703,6 +703,7 @@ struct xhci_ep_ctx {
/* deq bitmasks */
#define EP_CTX_CYCLE_MASK (1 << 0)
#define SCTX_DEQ_MASK (~0xfL)
/**
@ -1118,9 +1119,10 @@ enum xhci_setup_dev {
#define TRB_TO_SUSPEND_PORT(p) (((p) & (1 << 23)) >> 23)
#define LAST_EP_INDEX 30
/* Set TR Dequeue Pointer command TRB fields */
/* Set TR Dequeue Pointer command TRB fields, 6.4.3.9 */
#define TRB_TO_STREAM_ID(p) ((((p) & (0xffff << 16)) >> 16))
#define STREAM_ID_FOR_TRB(p) ((((p)) & 0xffff) << 16)
#define SCT_FOR_TRB(p) (((p) << 1) & 0x7)
/* Port Status Change Event TRB fields */
@ -1341,6 +1343,7 @@ struct xhci_ring {
unsigned int num_trbs_free_temp;
enum xhci_ring_type type;
bool last_td_was_short;
struct radix_tree_root *trb_address_map;
};
struct xhci_erst_entry {

View file

@ -204,7 +204,7 @@ config USB_STORAGE_ENE_UB6250
config USB_UAS
tristate "USB Attached SCSI"
depends on SCSI && BROKEN
depends on SCSI && USB_STORAGE
help
The USB Attached SCSI protocol is supported by some USB
storage devices. It permits higher performance by supporting

View file

@ -0,0 +1,96 @@
#include <linux/usb.h>
#include <linux/usb/hcd.h>
#include "usb.h"
static int uas_is_interface(struct usb_host_interface *intf)
{
return (intf->desc.bInterfaceClass == USB_CLASS_MASS_STORAGE &&
intf->desc.bInterfaceSubClass == USB_SC_SCSI &&
intf->desc.bInterfaceProtocol == USB_PR_UAS);
}
static int uas_isnt_supported(struct usb_device *udev)
{
struct usb_hcd *hcd = bus_to_hcd(udev->bus);
dev_warn(&udev->dev, "The driver for the USB controller %s does not "
"support scatter-gather which is\n",
hcd->driver->description);
dev_warn(&udev->dev, "required by the UAS driver. Please try an"
"alternative USB controller if you wish to use UAS.\n");
return -ENODEV;
}
static int uas_find_uas_alt_setting(struct usb_interface *intf)
{
int i;
struct usb_device *udev = interface_to_usbdev(intf);
int sg_supported = udev->bus->sg_tablesize != 0;
for (i = 0; i < intf->num_altsetting; i++) {
struct usb_host_interface *alt = &intf->altsetting[i];
if (uas_is_interface(alt)) {
if (!sg_supported)
return uas_isnt_supported(udev);
return alt->desc.bAlternateSetting;
}
}
return -ENODEV;
}
static int uas_find_endpoints(struct usb_host_interface *alt,
struct usb_host_endpoint *eps[])
{
struct usb_host_endpoint *endpoint = alt->endpoint;
unsigned i, n_endpoints = alt->desc.bNumEndpoints;
for (i = 0; i < n_endpoints; i++) {
unsigned char *extra = endpoint[i].extra;
int len = endpoint[i].extralen;
while (len >= 3) {
if (extra[1] == USB_DT_PIPE_USAGE) {
unsigned pipe_id = extra[2];
if (pipe_id > 0 && pipe_id < 5)
eps[pipe_id - 1] = &endpoint[i];
break;
}
len -= extra[0];
extra += extra[0];
}
}
if (!eps[0] || !eps[1] || !eps[2] || !eps[3])
return -ENODEV;
return 0;
}
static int uas_use_uas_driver(struct usb_interface *intf,
const struct usb_device_id *id)
{
struct usb_host_endpoint *eps[4] = { };
struct usb_device *udev = interface_to_usbdev(intf);
struct usb_hcd *hcd = bus_to_hcd(udev->bus);
unsigned long flags = id->driver_info;
int r, alt;
usb_stor_adjust_quirks(udev, &flags);
if (flags & US_FL_IGNORE_UAS)
return 0;
if (udev->speed >= USB_SPEED_SUPER && !hcd->can_do_streams)
return 0;
alt = uas_find_uas_alt_setting(intf);
if (alt < 0)
return 0;
r = uas_find_endpoints(&intf->altsetting[alt], eps);
if (r < 0)
return 0;
return 1;
}

File diff suppressed because it is too large Load diff

View file

@ -2086,6 +2086,11 @@ UNUSUAL_DEV( 0xed10, 0x7636, 0x0001, 0x0001,
"Digital MP3 Audio Player",
USB_SC_DEVICE, USB_PR_DEVICE, NULL, US_FL_NOT_LOCKABLE ),
/* Unusual uas devices */
#if IS_ENABLED(CONFIG_USB_UAS)
#include "unusual_uas.h"
#endif
/* Control/Bulk transport for all SubClass values */
USUAL_DEV(USB_SC_RBC, USB_PR_CB),
USUAL_DEV(USB_SC_8020, USB_PR_CB),

View file

@ -0,0 +1,52 @@
/* Driver for USB Attached SCSI devices - Unusual Devices File
*
* (c) 2013 Hans de Goede <hdegoede@redhat.com>
*
* Based on the same file for the usb-storage driver, which is:
* (c) 2000-2002 Matthew Dharm (mdharm-usb@one-eyed-alien.net)
* (c) 2000 Adam J. Richter (adam@yggdrasil.com), Yggdrasil Computing, Inc.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
* Free Software Foundation; either version 2, or (at your option) any
* later version.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License for more details.
*
* You should have received a copy of the GNU General Public License along
* with this program; if not, write to the Free Software Foundation, Inc.,
* 675 Mass Ave, Cambridge, MA 02139, USA.
*/
/*
* IMPORTANT NOTE: This file must be included in another file which defines
* a UNUSUAL_DEV macro before this file is included.
*/
/*
* If you edit this file, please try to keep it sorted first by VendorID,
* then by ProductID.
*
* If you want to add an entry for this file, be sure to include the
* following information:
* - a patch that adds the entry for your device, including your
* email address right above the entry (plus maybe a brief
* explanation of the reason for the entry),
* - lsusb -v output for the device
* Send your submission to Hans de Goede <hdegoede@redhat.com>
* and don't forget to CC: the USB development list <linux-usb@vger.kernel.org>
*/
/*
* This is an example entry for the US_FL_IGNORE_UAS flag. Once we have an
* actual entry using US_FL_IGNORE_UAS this entry should be removed.
*
* UNUSUAL_DEV( 0xabcd, 0x1234, 0x0100, 0x0100,
* "Example",
* "Storage with broken UAS",
* USB_SC_DEVICE, USB_PR_DEVICE, NULL,
* US_FL_IGNORE_UAS),
*/

View file

@ -72,6 +72,10 @@
#include "sierra_ms.h"
#include "option_ms.h"
#if IS_ENABLED(CONFIG_USB_UAS)
#include "uas-detect.h"
#endif
/* Some informational data */
MODULE_AUTHOR("Matthew Dharm <mdharm-usb@one-eyed-alien.net>");
MODULE_DESCRIPTION("USB Mass Storage driver for Linux");
@ -459,14 +463,14 @@ static int associate_dev(struct us_data *us, struct usb_interface *intf)
#define TOLOWER(x) ((x) | 0x20)
/* Adjust device flags based on the "quirks=" module parameter */
static void adjust_quirks(struct us_data *us)
void usb_stor_adjust_quirks(struct usb_device *udev, unsigned long *fflags)
{
char *p;
u16 vid = le16_to_cpu(us->pusb_dev->descriptor.idVendor);
u16 pid = le16_to_cpu(us->pusb_dev->descriptor.idProduct);
u16 vid = le16_to_cpu(udev->descriptor.idVendor);
u16 pid = le16_to_cpu(udev->descriptor.idProduct);
unsigned f = 0;
unsigned int mask = (US_FL_SANE_SENSE | US_FL_BAD_SENSE |
US_FL_FIX_CAPACITY |
US_FL_FIX_CAPACITY | US_FL_IGNORE_UAS |
US_FL_CAPACITY_HEURISTICS | US_FL_IGNORE_DEVICE |
US_FL_NOT_LOCKABLE | US_FL_MAX_SECTORS_64 |
US_FL_CAPACITY_OK | US_FL_IGNORE_RESIDUE |
@ -537,14 +541,18 @@ static void adjust_quirks(struct us_data *us)
case 's':
f |= US_FL_SINGLE_LUN;
break;
case 'u':
f |= US_FL_IGNORE_UAS;
break;
case 'w':
f |= US_FL_NO_WP_DETECT;
break;
/* Ignore unrecognized flag characters */
}
}
us->fflags = (us->fflags & ~mask) | f;
*fflags = (*fflags & ~mask) | f;
}
EXPORT_SYMBOL_GPL(usb_stor_adjust_quirks);
/* Get the unusual_devs entries and the string descriptors */
static int get_device_info(struct us_data *us, const struct usb_device_id *id,
@ -564,7 +572,7 @@ static int get_device_info(struct us_data *us, const struct usb_device_id *id,
idesc->bInterfaceProtocol :
unusual_dev->useTransport;
us->fflags = id->driver_info;
adjust_quirks(us);
usb_stor_adjust_quirks(us->pusb_dev, &us->fflags);
if (us->fflags & US_FL_IGNORE_DEVICE) {
dev_info(pdev, "device ignored\n");
@ -1035,6 +1043,12 @@ static int storage_probe(struct usb_interface *intf,
int result;
int size;
/* If uas is enabled and this device can do uas then ignore it. */
#if IS_ENABLED(CONFIG_USB_UAS)
if (uas_use_uas_driver(intf, id))
return -ENXIO;
#endif
/*
* If the device isn't standard (is handled by a subdriver
* module) then don't accept it.

View file

@ -201,4 +201,7 @@ extern int usb_stor_probe1(struct us_data **pus,
extern int usb_stor_probe2(struct us_data *us);
extern void usb_stor_disconnect(struct usb_interface *intf);
extern void usb_stor_adjust_quirks(struct usb_device *dev,
unsigned long *fflags);
#endif

View file

@ -57,6 +57,7 @@ struct ep_device;
* @extra: descriptors following this endpoint in the configuration
* @extralen: how many bytes of "extra" are valid
* @enabled: URBs may be submitted to this endpoint
* @streams: number of USB-3 streams allocated on the endpoint
*
* USB requests are always queued to a given endpoint, identified by a
* descriptor within an active interface in a given USB configuration.
@ -71,6 +72,7 @@ struct usb_host_endpoint {
unsigned char *extra; /* Extra descriptors */
int extralen;
int enabled;
int streams;
};
/* host-side wrapper for one interface setting's parsed descriptors */
@ -202,6 +204,8 @@ static inline void usb_set_intfdata(struct usb_interface *intf, void *data)
struct usb_interface *usb_get_intf(struct usb_interface *intf);
void usb_put_intf(struct usb_interface *intf);
/* Hard limit */
#define USB_MAXENDPOINTS 30
/* this maximum is arbitrary */
#define USB_MAXINTERFACES 32
#define USB_MAXIADS (USB_MAXINTERFACES/2)

View file

@ -143,6 +143,7 @@ struct usb_hcd {
unsigned authorized_default:1;
unsigned has_tt:1; /* Integrated TT in root hub */
unsigned amd_resume_bug:1; /* AMD remote wakeup quirk */
unsigned can_do_streams:1; /* HC supports streams */
unsigned int irq; /* irq allocated */
void __iomem *regs; /* device memory/io */

View file

@ -9,7 +9,7 @@ struct iu {
__u8 iu_id;
__u8 rsvd1;
__be16 tag;
};
} __attribute__((__packed__));
enum {
IU_ID_COMMAND = 0x01,
@ -52,7 +52,7 @@ struct command_iu {
__u8 rsvd7;
struct scsi_lun lun;
__u8 cdb[16]; /* XXX: Overflow-checking tools may misunderstand */
};
} __attribute__((__packed__));
struct task_mgmt_iu {
__u8 iu_id;
@ -62,7 +62,7 @@ struct task_mgmt_iu {
__u8 rsvd2;
__be16 task_tag;
struct scsi_lun lun;
};
} __attribute__((__packed__));
/*
* Also used for the Read Ready and Write Ready IUs since they have the
@ -77,15 +77,15 @@ struct sense_iu {
__u8 rsvd7[7];
__be16 len;
__u8 sense[SCSI_SENSE_BUFFERSIZE];
};
} __attribute__((__packed__));
struct response_ui {
struct response_iu {
__u8 iu_id;
__u8 rsvd1;
__be16 tag;
__be16 add_response_info;
__u8 add_response_info[3];
__u8 response_code;
};
} __attribute__((__packed__));
struct usb_pipe_usage_descriptor {
__u8 bLength;

View file

@ -67,8 +67,10 @@
/* Initial READ(10) (and others) must be retried */ \
US_FLAG(WRITE_CACHE, 0x00200000) \
/* Write Cache status is not available */ \
US_FLAG(NEEDS_CAP16, 0x00400000)
/* cannot handle READ_CAPACITY_10 */
US_FLAG(NEEDS_CAP16, 0x00400000) \
/* cannot handle READ_CAPACITY_10 */ \
US_FLAG(IGNORE_UAS, 0x00800000) \
/* Device advertises UAS but it is broken */
#define US_FLAG(name, value) US_FL_##name = value ,
enum { US_DO_ALL_FLAGS };

View file

@ -102,7 +102,10 @@ struct usbdevfs_urb {
int buffer_length;
int actual_length;
int start_frame;
int number_of_packets;
union {
int number_of_packets; /* Only used for isoc urbs */
unsigned int stream_id; /* Only used with bulk streams */
};
int error_count;
unsigned int signr; /* signal to be sent on completion,
or 0 if none should be sent. */
@ -144,6 +147,11 @@ struct usbdevfs_disconnect_claim {
char driver[USBDEVFS_MAXDRIVERNAME + 1];
};
struct usbdevfs_streams {
unsigned int num_streams; /* Not used by USBDEVFS_FREE_STREAMS */
unsigned int num_eps;
unsigned char eps[0];
};
#define USBDEVFS_CONTROL _IOWR('U', 0, struct usbdevfs_ctrltransfer)
#define USBDEVFS_CONTROL32 _IOWR('U', 0, struct usbdevfs_ctrltransfer32)
@ -176,5 +184,7 @@ struct usbdevfs_disconnect_claim {
#define USBDEVFS_RELEASE_PORT _IOR('U', 25, unsigned int)
#define USBDEVFS_GET_CAPABILITIES _IOR('U', 26, __u32)
#define USBDEVFS_DISCONNECT_CLAIM _IOR('U', 27, struct usbdevfs_disconnect_claim)
#define USBDEVFS_ALLOC_STREAMS _IOR('U', 28, struct usbdevfs_streams)
#define USBDEVFS_FREE_STREAMS _IOR('U', 29, struct usbdevfs_streams)
#endif /* _UAPI_LINUX_USBDEVICE_FS_H */