USB: imx21-hcd: Fix isochronous endpoint idle

Release the hardware resources and reset the internal HCD state
associated with an isochronous endpoint when the last URB queued
for it completes.

Previously this was only done in then endpoint_disable() method
causing usbtest 15 and 16 to hang when run twice in succession
without a disconnect.

Signed-off-by: Martin Fuzzey <mfuzzey@gmail.com>
Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
This commit is contained in:
Martin Fuzzey 2010-10-01 00:21:48 +02:00 committed by Greg Kroah-Hartman
parent b2a068d058
commit 7a7e789642

View file

@ -390,15 +390,19 @@ static void schedule_nonisoc_etd(struct imx21 *imx21, struct urb *urb);
/* Endpoint now idle - release it's ETD(s) or asssign to queued request */ /* Endpoint now idle - release it's ETD(s) or asssign to queued request */
static void ep_idle(struct imx21 *imx21, struct ep_priv *ep_priv) static void ep_idle(struct imx21 *imx21, struct ep_priv *ep_priv)
{ {
int etd_num;
int i; int i;
for (i = 0; i < NUM_ISO_ETDS; i++) { for (i = 0; i < NUM_ISO_ETDS; i++) {
etd_num = ep_priv->etd[i]; int etd_num = ep_priv->etd[i];
struct etd_priv *etd;
if (etd_num < 0) if (etd_num < 0)
continue; continue;
etd = &imx21->etd[etd_num];
ep_priv->etd[i] = -1; ep_priv->etd[i] = -1;
free_dmem(imx21, etd); /* for isoc */
if (list_empty(&imx21->queue_for_etd)) { if (list_empty(&imx21->queue_for_etd)) {
free_etd(imx21, etd_num); free_etd(imx21, etd_num);
continue; continue;
@ -576,30 +580,43 @@ static struct ep_priv *alloc_isoc_ep(
int i; int i;
ep_priv = kzalloc(sizeof(struct ep_priv), GFP_ATOMIC); ep_priv = kzalloc(sizeof(struct ep_priv), GFP_ATOMIC);
if (ep_priv == NULL) if (!ep_priv)
return NULL; return NULL;
/* Allocate the ETDs */ for (i = 0; i < NUM_ISO_ETDS; i++)
for (i = 0; i < NUM_ISO_ETDS; i++) { ep_priv->etd[i] = -1;
ep_priv->etd[i] = alloc_etd(imx21);
if (ep_priv->etd[i] < 0) {
int j;
dev_err(imx21->dev, "isoc: Couldn't allocate etd\n");
for (j = 0; j < i; j++)
free_etd(imx21, ep_priv->etd[j]);
goto alloc_etd_failed;
}
imx21->etd[ep_priv->etd[i]].ep = ep;
}
INIT_LIST_HEAD(&ep_priv->td_list); INIT_LIST_HEAD(&ep_priv->td_list);
ep_priv->ep = ep; ep_priv->ep = ep;
ep->hcpriv = ep_priv; ep->hcpriv = ep_priv;
return ep_priv; return ep_priv;
}
static int alloc_isoc_etds(struct imx21 *imx21, struct ep_priv *ep_priv)
{
int i, j;
int etd_num;
/* Allocate the ETDs if required */
for (i = 0; i < NUM_ISO_ETDS; i++) {
if (ep_priv->etd[i] < 0) {
etd_num = alloc_etd(imx21);
if (etd_num < 0)
goto alloc_etd_failed;
ep_priv->etd[i] = etd_num;
imx21->etd[etd_num].ep = ep_priv->ep;
}
}
return 0;
alloc_etd_failed: alloc_etd_failed:
kfree(ep_priv); dev_err(imx21->dev, "isoc: Couldn't allocate etd\n");
return NULL; for (j = 0; j < i; j++) {
free_etd(imx21, ep_priv->etd[j]);
ep_priv->etd[j] = -1;
}
return -ENOMEM;
} }
static int imx21_hc_urb_enqueue_isoc(struct usb_hcd *hcd, static int imx21_hc_urb_enqueue_isoc(struct usb_hcd *hcd,
@ -639,6 +656,10 @@ static int imx21_hc_urb_enqueue_isoc(struct usb_hcd *hcd,
ep_priv = ep->hcpriv; ep_priv = ep->hcpriv;
} }
ret = alloc_isoc_etds(imx21, ep_priv);
if (ret)
goto alloc_etd_failed;
ret = usb_hcd_link_urb_to_ep(hcd, urb); ret = usb_hcd_link_urb_to_ep(hcd, urb);
if (ret) if (ret)
goto link_failed; goto link_failed;
@ -718,6 +739,7 @@ static int imx21_hc_urb_enqueue_isoc(struct usb_hcd *hcd,
usb_hcd_unlink_urb_from_ep(hcd, urb); usb_hcd_unlink_urb_from_ep(hcd, urb);
link_failed: link_failed:
alloc_etd_failed:
alloc_ep_failed: alloc_ep_failed:
spin_unlock_irqrestore(&imx21->lock, flags); spin_unlock_irqrestore(&imx21->lock, flags);
kfree(urb_priv->isoc_td); kfree(urb_priv->isoc_td);