summaryrefslogtreecommitdiffstats
path: root/drivers/usb/class/cdc-wdm.c
diff options
context:
space:
mode:
authorBjørn Mork <bjorn@mork.no>2013-12-20 14:07:24 +0100
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>2013-12-20 12:06:46 -0800
commit8dd5cd5395b90070d98149d0a94e5981a74cd2ec (patch)
treee4c61bc41c7b2c580fedc55056cfa74939500958 /drivers/usb/class/cdc-wdm.c
parentbee53637e5836a7cc642ca036e44bd77caf7bc35 (diff)
downloadtalos-op-linux-8dd5cd5395b90070d98149d0a94e5981a74cd2ec.tar.gz
talos-op-linux-8dd5cd5395b90070d98149d0a94e5981a74cd2ec.zip
usb: cdc-wdm: avoid hanging on zero length reads
commit 73e06865ead1 ("USB: cdc-wdm: support back-to-back USB_CDC_NOTIFY_RESPONSE_AVAILABLE notifications") implemented queued response handling. This added a new requirement: The read urb must be resubmitted every time we clear the WDM_READ flag if the response counter indicates that the device is waiting for a read. Fix by factoring out the code handling the WMD_READ clearing and possible urb submission, calling it everywhere we clear the flag. Without this fix, the driver ends up in a state where the read urb is inactive, but the response counter is positive after a zero length read. This prevents the read urb from ever being submitted again and the driver appears to be hanging. Fixes: 73e06865ead1 ("USB: cdc-wdm: support back-to-back USB_CDC_NOTIFY_RESPONSE_AVAILABLE notifications") Cc: Greg Suarez <gsuarez@smithmicro.com> Signed-off-by: Bjørn Mork <bjorn@mork.no> Cc: stable <stable@vger.kernel.org> # 3.13 Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
Diffstat (limited to 'drivers/usb/class/cdc-wdm.c')
-rw-r--r--drivers/usb/class/cdc-wdm.c70
1 files changed, 38 insertions, 32 deletions
diff --git a/drivers/usb/class/cdc-wdm.c b/drivers/usb/class/cdc-wdm.c
index 4d387596f3f0..750afa9774b4 100644
--- a/drivers/usb/class/cdc-wdm.c
+++ b/drivers/usb/class/cdc-wdm.c
@@ -432,6 +432,38 @@ outnl:
return rv < 0 ? rv : count;
}
+/*
+ * clear WDM_READ flag and possibly submit the read urb if resp_count
+ * is non-zero.
+ *
+ * Called with desc->iuspin locked
+ */
+static int clear_wdm_read_flag(struct wdm_device *desc)
+{
+ int rv = 0;
+
+ clear_bit(WDM_READ, &desc->flags);
+
+ /* submit read urb only if the device is waiting for it */
+ if (!--desc->resp_count)
+ goto out;
+
+ set_bit(WDM_RESPONDING, &desc->flags);
+ spin_unlock_irq(&desc->iuspin);
+ rv = usb_submit_urb(desc->response, GFP_KERNEL);
+ spin_lock_irq(&desc->iuspin);
+ if (rv) {
+ dev_err(&desc->intf->dev,
+ "usb_submit_urb failed with result %d\n", rv);
+
+ /* make sure the next notification trigger a submit */
+ clear_bit(WDM_RESPONDING, &desc->flags);
+ desc->resp_count = 0;
+ }
+out:
+ return rv;
+}
+
static ssize_t wdm_read
(struct file *file, char __user *buffer, size_t count, loff_t *ppos)
{
@@ -503,8 +535,10 @@ retry:
if (!desc->reslength) { /* zero length read */
dev_dbg(&desc->intf->dev, "%s: zero length - clearing WDM_READ\n", __func__);
- clear_bit(WDM_READ, &desc->flags);
+ rv = clear_wdm_read_flag(desc);
spin_unlock_irq(&desc->iuspin);
+ if (rv < 0)
+ goto err;
goto retry;
}
cntr = desc->length;
@@ -526,37 +560,9 @@ retry:
desc->length -= cntr;
/* in case we had outstanding data */
- if (!desc->length) {
- clear_bit(WDM_READ, &desc->flags);
-
- if (--desc->resp_count) {
- set_bit(WDM_RESPONDING, &desc->flags);
- spin_unlock_irq(&desc->iuspin);
-
- rv = usb_submit_urb(desc->response, GFP_KERNEL);
- if (rv) {
- dev_err(&desc->intf->dev,
- "%s: usb_submit_urb failed with result %d\n",
- __func__, rv);
- spin_lock_irq(&desc->iuspin);
- clear_bit(WDM_RESPONDING, &desc->flags);
- spin_unlock_irq(&desc->iuspin);
-
- if (rv == -ENOMEM) {
- rv = schedule_work(&desc->rxwork);
- if (rv)
- dev_err(&desc->intf->dev, "Cannot schedule work\n");
- } else {
- spin_lock_irq(&desc->iuspin);
- desc->resp_count = 0;
- spin_unlock_irq(&desc->iuspin);
- }
- }
- } else
- spin_unlock_irq(&desc->iuspin);
- } else
- spin_unlock_irq(&desc->iuspin);
-
+ if (!desc->length)
+ clear_wdm_read_flag(desc);
+ spin_unlock_irq(&desc->iuspin);
rv = cntr;
err:
OpenPOWER on IntegriCloud