diff options
Diffstat (limited to 'drivers')
-rw-r--r-- | drivers/usb/core/hcd.c | 17 | ||||
-rw-r--r-- | drivers/usb/gadget/Kconfig | 29 | ||||
-rw-r--r-- | drivers/usb/gadget/Makefile | 1 | ||||
-rw-r--r-- | drivers/usb/gadget/fusb300_udc.c | 5 | ||||
-rw-r--r-- | drivers/usb/gadget/gadget_chips.h | 8 | ||||
-rw-r--r-- | drivers/usb/gadget/net2272.c | 2718 | ||||
-rw-r--r-- | drivers/usb/gadget/net2272.h | 601 | ||||
-rw-r--r-- | drivers/usb/host/ehci-hcd.c | 2 | ||||
-rw-r--r-- | drivers/usb/host/ehci-s5p.c | 95 | ||||
-rw-r--r-- | drivers/usb/host/xhci-dbg.c | 22 | ||||
-rw-r--r-- | drivers/usb/host/xhci-mem.c | 26 | ||||
-rw-r--r-- | drivers/usb/host/xhci-ring.c | 42 | ||||
-rw-r--r-- | drivers/usb/host/xhci.c | 10 | ||||
-rw-r--r-- | drivers/usb/host/xhci.h | 7 | ||||
-rw-r--r-- | drivers/usb/renesas_usbhs/Makefile | 2 | ||||
-rw-r--r-- | drivers/usb/renesas_usbhs/common.c | 11 | ||||
-rw-r--r-- | drivers/usb/renesas_usbhs/common.h | 44 | ||||
-rw-r--r-- | drivers/usb/renesas_usbhs/fifo.c | 994 | ||||
-rw-r--r-- | drivers/usb/renesas_usbhs/fifo.h | 104 | ||||
-rw-r--r-- | drivers/usb/renesas_usbhs/mod_gadget.c | 754 | ||||
-rw-r--r-- | drivers/usb/renesas_usbhs/pipe.c | 293 | ||||
-rw-r--r-- | drivers/usb/renesas_usbhs/pipe.h | 44 | ||||
-rw-r--r-- | drivers/usb/wusbcore/cbaf.c | 4 |
23 files changed, 4915 insertions, 918 deletions
diff --git a/drivers/usb/core/hcd.c b/drivers/usb/core/hcd.c index ace9f8442e5d..8669ba3fe794 100644 --- a/drivers/usb/core/hcd.c +++ b/drivers/usb/core/hcd.c @@ -337,6 +337,17 @@ static const u8 ss_rh_config_descriptor[] = { 0x02, 0x00 /* __le16 ss_wBytesPerInterval; 15 bits for max 15 ports */ }; +/* authorized_default behaviour: + * -1 is authorized for all devices except wireless (old behaviour) + * 0 is unauthorized for all devices + * 1 is authorized for all devices + */ +static int authorized_default = -1; +module_param(authorized_default, int, S_IRUGO|S_IWUSR); +MODULE_PARM_DESC(authorized_default, + "Default USB device authorization: 0 is not authorized, 1 is " + "authorized, -1 is authorized except for wireless USB (default, " + "old behaviour"); /*-------------------------------------------------------------------------*/ /** @@ -2371,7 +2382,11 @@ int usb_add_hcd(struct usb_hcd *hcd, dev_info(hcd->self.controller, "%s\n", hcd->product_desc); - hcd->authorized_default = hcd->wireless? 0 : 1; + /* Keep old behaviour if authorized_default is not in [0, 1]. */ + if (authorized_default < 0 || authorized_default > 1) + hcd->authorized_default = hcd->wireless? 0 : 1; + else + hcd->authorized_default = authorized_default; set_bit(HCD_FLAG_HW_ACCESSIBLE, &hcd->flags); /* HC is in reset state, but accessible. Now do the one-time init, diff --git a/drivers/usb/gadget/Kconfig b/drivers/usb/gadget/Kconfig index 029e288805b6..9468adbe42bb 100644 --- a/drivers/usb/gadget/Kconfig +++ b/drivers/usb/gadget/Kconfig @@ -480,6 +480,35 @@ config USB_CI13XXX_PCI default USB_GADGET select USB_GADGET_SELECTED +config USB_GADGET_NET2272 + boolean "PLX NET2272" + select USB_GADGET_DUALSPEED + help + PLX NET2272 is a USB peripheral controller which supports + both full and high speed USB 2.0 data transfers. + + It has three configurable endpoints, as well as endpoint zero + (for control transfer). + Say "y" to link the driver statically, or "m" to build a + dynamically linked module called "net2272" and force all + gadget drivers to also be dynamically linked. + +config USB_GADGET_NET2272_DMA + boolean "Support external DMA controller" + depends on USB_GADGET_NET2272 + help + The NET2272 part can optionally support an external DMA + controller, but your board has to have support in the + driver itself. + + If unsure, say "N" here. The driver works fine in PIO mode. + +config USB_NET2272 + tristate + depends on USB_GADGET_NET2272 + default USB_GADGET + select USB_GADGET_SELECTED + config USB_GADGET_NET2280 boolean "NetChip 228x" depends on PCI diff --git a/drivers/usb/gadget/Makefile b/drivers/usb/gadget/Makefile index 4fe92b18a055..345261738b13 100644 --- a/drivers/usb/gadget/Makefile +++ b/drivers/usb/gadget/Makefile @@ -4,6 +4,7 @@ ccflags-$(CONFIG_USB_GADGET_DEBUG) := -DDEBUG obj-$(CONFIG_USB_DUMMY_HCD) += dummy_hcd.o +obj-$(CONFIG_USB_NET2272) += net2272.o obj-$(CONFIG_USB_NET2280) += net2280.o obj-$(CONFIG_USB_AMD5536UDC) += amd5536udc.o obj-$(CONFIG_USB_PXA25X) += pxa25x_udc.o diff --git a/drivers/usb/gadget/fusb300_udc.c b/drivers/usb/gadget/fusb300_udc.c index 763d462454b9..b82a1149145a 100644 --- a/drivers/usb/gadget/fusb300_udc.c +++ b/drivers/usb/gadget/fusb300_udc.c @@ -980,11 +980,6 @@ static void set_address(struct fusb300 *fusb300, struct usb_ctrlrequest *ctrl) } \ } while (0) -static void fusb300_ep0_complete(struct usb_ep *ep, - struct usb_request *req) -{ -} - static int setup_packet(struct fusb300 *fusb300, struct usb_ctrlrequest *ctrl) { u8 *p = (u8 *)ctrl; diff --git a/drivers/usb/gadget/gadget_chips.h b/drivers/usb/gadget/gadget_chips.h index bcdac7c73e89..13c2f9e94405 100644 --- a/drivers/usb/gadget/gadget_chips.h +++ b/drivers/usb/gadget/gadget_chips.h @@ -15,6 +15,12 @@ #ifndef __GADGET_CHIPS_H #define __GADGET_CHIPS_H +#ifdef CONFIG_USB_GADGET_NET2272 +#define gadget_is_net2272(g) !strcmp("net2272", (g)->name) +#else +#define gadget_is_net2272(g) 0 +#endif + #ifdef CONFIG_USB_GADGET_NET2280 #define gadget_is_net2280(g) !strcmp("net2280", (g)->name) #else @@ -223,6 +229,8 @@ static inline int usb_gadget_controller_number(struct usb_gadget *gadget) return 0x29; else if (gadget_is_s3c_hsudc(gadget)) return 0x30; + else if (gadget_is_net2272(gadget)) + return 0x31; return -ENOENT; } diff --git a/drivers/usb/gadget/net2272.c b/drivers/usb/gadget/net2272.c new file mode 100644 index 000000000000..29151c44f476 --- /dev/null +++ b/drivers/usb/gadget/net2272.c @@ -0,0 +1,2718 @@ +/* + * Driver for PLX NET2272 USB device controller + * + * Copyright (C) 2005-2006 PLX Technology, Inc. + * Copyright (C) 2006-2011 Analog Devices, Inc. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + */ + +#include <linux/delay.h> +#include <linux/device.h> +#include <linux/errno.h> +#include <linux/gpio.h> +#include <linux/init.h> +#include <linux/interrupt.h> +#include <linux/io.h> +#include <linux/ioport.h> +#include <linux/irq.h> +#include <linux/kernel.h> +#include <linux/list.h> +#include <linux/module.h> +#include <linux/moduleparam.h> +#include <linux/pci.h> +#include <linux/platform_device.h> +#include <linux/sched.h> +#include <linux/slab.h> +#include <linux/timer.h> +#include <linux/usb.h> +#include <linux/usb/ch9.h> +#include <linux/usb/gadget.h> + +#include <asm/byteorder.h> +#include <asm/system.h> +#include <asm/unaligned.h> + +#include "net2272.h" + +#define DRIVER_DESC "PLX NET2272 USB Peripheral Controller" + +static const char driver_name[] = "net2272"; +static const char driver_vers[] = "2006 October 17/mainline"; +static const char driver_desc[] = DRIVER_DESC; + +static const char ep0name[] = "ep0"; +static const char * const ep_name[] = { + ep0name, + "ep-a", "ep-b", "ep-c", +}; + +#define DMA_ADDR_INVALID (~(dma_addr_t)0) +#ifdef CONFIG_USB_GADGET_NET2272_DMA +/* + * use_dma: the NET2272 can use an external DMA controller. + * Note that since there is no generic DMA api, some functions, + * notably request_dma, start_dma, and cancel_dma will need to be + * modified for your platform's particular dma controller. + * + * If use_dma is disabled, pio will be used instead. + */ +static int use_dma = 0; +module_param(use_dma, bool, 0644); + +/* + * dma_ep: selects the endpoint for use with dma (1=ep-a, 2=ep-b) + * The NET2272 can only use dma for a single endpoint at a time. + * At some point this could be modified to allow either endpoint + * to take control of dma as it becomes available. + * + * Note that DMA should not be used on OUT endpoints unless it can + * be guaranteed that no short packets will arrive on an IN endpoint + * while the DMA operation is pending. Otherwise the OUT DMA will + * terminate prematurely (See NET2272 Errata 630-0213-0101) + */ +static ushort dma_ep = 1; +module_param(dma_ep, ushort, 0644); + +/* + * dma_mode: net2272 dma mode setting (see LOCCTL1 definiton): + * mode 0 == Slow DREQ mode + * mode 1 == Fast DREQ mode + * mode 2 == Burst mode + */ +static ushort dma_mode = 2; +module_param(dma_mode, ushort, 0644); +#else +#define use_dma 0 +#define dma_ep 1 +#define dma_mode 2 +#endif + +/* + * fifo_mode: net2272 buffer configuration: + * mode 0 == ep-{a,b,c} 512db each + * mode 1 == ep-a 1k, ep-{b,c} 512db + * mode 2 == ep-a 1k, ep-b 1k, ep-c 512db + * mode 3 == ep-a 1k, ep-b disabled, ep-c 512db + */ +static ushort fifo_mode = 0; +module_param(fifo_mode, ushort, 0644); + +/* + * enable_suspend: When enabled, the driver will respond to + * USB suspend requests by powering down the NET2272. Otherwise, + * USB suspend requests will be ignored. This is acceptible for + * self-powered devices. For bus powered devices set this to 1. + */ +static ushort enable_suspend = 0; +module_param(enable_suspend, ushort, 0644); + +static void assert_out_naking(struct net2272_ep *ep, const char *where) +{ + u8 tmp; + +#ifndef DEBUG + return; +#endif + + tmp = net2272_ep_read(ep, EP_STAT0); + if ((tmp & (1 << NAK_OUT_PACKETS)) == 0) { + dev_dbg(ep->dev->dev, "%s %s %02x !NAK\n", + ep->ep.name, where, tmp); + net2272_ep_write(ep, EP_RSPSET, 1 << ALT_NAK_OUT_PACKETS); + } +} +#define ASSERT_OUT_NAKING(ep) assert_out_naking(ep, __func__) + +static void stop_out_naking(struct net2272_ep *ep) +{ + u8 tmp = net2272_ep_read(ep, EP_STAT0); + + if ((tmp & (1 << NAK_OUT_PACKETS)) != 0) + net2272_ep_write(ep, EP_RSPCLR, 1 << ALT_NAK_OUT_PACKETS); +} + +#define PIPEDIR(bAddress) (usb_pipein(bAddress) ? "in" : "out") + +static char *type_string(u8 bmAttributes) +{ + switch ((bmAttributes) & USB_ENDPOINT_XFERTYPE_MASK) { + case USB_ENDPOINT_XFER_BULK: return "bulk"; + case USB_ENDPOINT_XFER_ISOC: return "iso"; + case USB_ENDPOINT_XFER_INT: return "intr"; + default: return "control"; + } +} + +static char *buf_state_string(unsigned state) +{ + switch (state) { + case BUFF_FREE: return "free"; + case BUFF_VALID: return "valid"; + case BUFF_LCL: return "local"; + case BUFF_USB: return "usb"; + default: return "unknown"; + } +} + +static char *dma_mode_string(void) +{ + if (!use_dma) + return "PIO"; + switch (dma_mode) { + case 0: return "SLOW DREQ"; + case 1: return "FAST DREQ"; + case 2: return "BURST"; + default: return "invalid"; + } +} + +static void net2272_dequeue_all(struct net2272_ep *); +static int net2272_kick_dma(struct net2272_ep *, struct net2272_request *); +static int net2272_fifo_status(struct usb_ep *); + +static struct usb_ep_ops net2272_ep_ops; + +/*---------------------------------------------------------------------------*/ + +static int +net2272_enable(struct usb_ep *_ep, const struct usb_endpoint_descriptor *desc) +{ + struct net2272 *dev; + struct net2272_ep *ep; + u32 max; + u8 tmp; + unsigned long flags; + + ep = container_of(_ep, struct net2272_ep, ep); + if (!_ep || !desc || ep->desc || _ep->name == ep0name + || desc->bDescriptorType != USB_DT_ENDPOINT) + return -EINVAL; + dev = ep->dev; + if (!dev->driver || dev->gadget.speed == USB_SPEED_UNKNOWN) + return -ESHUTDOWN; + + max = le16_to_cpu(desc->wMaxPacketSize) & 0x1fff; + + spin_lock_irqsave(&dev->lock, flags); + _ep->maxpacket = max & 0x7fff; + ep->desc = desc; + + /* net2272_ep_reset() has already been called */ + ep->stopped = 0; + ep->wedged = 0; + + /* set speed-dependent max packet */ + net2272_ep_write(ep, EP_MAXPKT0, max & 0xff); + net2272_ep_write(ep, EP_MAXPKT1, (max & 0xff00) >> 8); + + /* set type, direction, address; reset fifo counters */ + net2272_ep_write(ep, EP_STAT1, 1 << BUFFER_FLUSH); + tmp = usb_endpoint_type(desc); + if (usb_endpoint_xfer_bulk(desc)) { + /* catch some particularly blatant driver bugs */ + if ((dev->gadget.speed == USB_SPEED_HIGH && max != 512) || + (dev->gadget.speed == USB_SPEED_FULL && max > 64)) { + spin_unlock_irqrestore(&dev->lock, flags); + return -ERANGE; + } + } + ep->is_iso = usb_endpoint_xfer_isoc(desc) ? 1 : 0; + tmp <<= ENDPOINT_TYPE; + tmp |= ((desc->bEndpointAddress & 0x0f) << ENDPOINT_NUMBER); + tmp |= usb_endpoint_dir_in(desc) << ENDPOINT_DIRECTION; + tmp |= (1 << ENDPOINT_ENABLE); + + /* for OUT transfers, block the rx fifo until a read is posted */ + ep->is_in = usb_endpoint_dir_in(desc); + if (!ep->is_in) + net2272_ep_write(ep, EP_RSPSET, 1 << ALT_NAK_OUT_PACKETS); + + net2272_ep_write(ep, EP_CFG, tmp); + + /* enable irqs */ + tmp = (1 << ep->num) | net2272_read(dev, IRQENB0); + net2272_write(dev, IRQENB0, tmp); + + tmp = (1 << DATA_PACKET_RECEIVED_INTERRUPT_ENABLE) + | (1 << DATA_PACKET_TRANSMITTED_INTERRUPT_ENABLE) + | net2272_ep_read(ep, EP_IRQENB); + net2272_ep_write(ep, EP_IRQENB, tmp); + + tmp = desc->bEndpointAddress; + dev_dbg(dev->dev, "enabled %s (ep%d%s-%s) max %04x cfg %02x\n", + _ep->name, tmp & 0x0f, PIPEDIR(tmp), + type_string(desc->bmAttributes), max, + net2272_ep_read(ep, EP_CFG)); + + spin_unlock_irqrestore(&dev->lock, flags); + return 0; +} + +static void net2272_ep_reset(struct net2272_ep *ep) +{ + u8 tmp; + + ep->desc = NULL; + INIT_LIST_HEAD(&ep->queue); + + ep->ep.maxpacket = ~0; + ep->ep.ops = &net2272_ep_ops; + + /* disable irqs, endpoint */ + net2272_ep_write(ep, EP_IRQENB, 0); + + /* init to our chosen defaults, notably so that we NAK OUT + * packets until the driver queues a read. + */ + tmp = (1 << NAK_OUT_PACKETS_MODE) | (1 << ALT_NAK_OUT_PACKETS); + net2272_ep_write(ep, EP_RSPSET, tmp); + + tmp = (1 << INTERRUPT_MODE) | (1 << HIDE_STATUS_PHASE); + if (ep->num != 0) + tmp |= (1 << ENDPOINT_TOGGLE) | (1 << ENDPOINT_HALT); + + net2272_ep_write(ep, EP_RSPCLR, tmp); + + /* scrub most status bits, and flush any fifo state */ + net2272_ep_write(ep, EP_STAT0, + (1 << DATA_IN_TOKEN_INTERRUPT) + | (1 << DATA_OUT_TOKEN_INTERRUPT) + | (1 << DATA_PACKET_TRANSMITTED_INTERRUPT) + | (1 << DATA_PACKET_RECEIVED_INTERRUPT) + | (1 << SHORT_PACKET_TRANSFERRED_INTERRUPT)); + + net2272_ep_write(ep, EP_STAT1, + (1 << TIMEOUT) + | (1 << USB_OUT_ACK_SENT) + | (1 << USB_OUT_NAK_SENT) + | (1 << USB_IN_ACK_RCVD) + | (1 << USB_IN_NAK_SENT) + | (1 << USB_STALL_SENT) + | (1 << LOCAL_OUT_ZLP) + | (1 << BUFFER_FLUSH)); + + /* fifo size is handled seperately */ +} + +static int net2272_disable(struct usb_ep *_ep) +{ + struct net2272_ep *ep; + unsigned long flags; + + ep = container_of(_ep, struct net2272_ep, ep); + if (!_ep || !ep->desc || _ep->name == ep0name) + return -EINVAL; + + spin_lock_irqsave(&ep->dev->lock, flags); + net2272_dequeue_all(ep); + net2272_ep_reset(ep); + + dev_vdbg(ep->dev->dev, "disabled %s\n", _ep->name); + + spin_unlock_irqrestore(&ep->dev->lock, flags); + return 0; +} + +/*---------------------------------------------------------------------------*/ + +static struct usb_request * +net2272_alloc_request(struct usb_ep *_ep, gfp_t gfp_flags) +{ + struct net2272_ep *ep; + struct net2272_request *req; + + if (!_ep) + return NULL; + ep = container_of(_ep, struct net2272_ep, ep); + + req = kzalloc(sizeof(*req), gfp_flags); + if (!req) + return NULL; + + req->req.dma = DMA_ADDR_INVALID; + INIT_LIST_HEAD(&req->queue); + + return &req->req; +} + +static void +net2272_free_request(struct usb_ep *_ep, struct usb_request *_req) +{ + struct net2272_ep *ep; + struct net2272_request *req; + + ep = container_of(_ep, struct net2272_ep, ep); + if (!_ep || !_req) + return; + + req = container_of(_req, struct net2272_request, req); + WARN_ON(!list_empty(&req->queue)); + kfree(req); +} + +static void +net2272_done(struct net2272_ep *ep, struct net2272_request *req, int status) +{ + struct net2272 *dev; + unsigned stopped = ep->stopped; + + if (ep->num == 0) { + if (ep->dev->protocol_stall) { + ep->stopped = 1; + set_halt(ep); + } + allow_status(ep); + } + + list_del_init(&req->queue); + + if (req->req.status == -EINPROGRESS) + req->req.status = status; + else + status = req->req.status; + + dev = ep->dev; + if (use_dma && req->mapped) { + dma_unmap_single(dev->dev, req->req.dma, req->req.length, + ep->is_in ? DMA_TO_DEVICE : DMA_FROM_DEVICE); + req->req.dma = DMA_ADDR_INVALID; + req->mapped = 0; + } + + if (status && status != -ESHUTDOWN) + dev_vdbg(dev->dev, "complete %s req %p stat %d len %u/%u buf %p\n", + ep->ep.name, &req->req, status, + req->req.actual, req->req.length, req->req.buf); + + /* don't modify queue heads during completion callback */ + ep->stopped = 1; + spin_unlock(&dev->lock); + req->req.complete(&ep->ep, &req->req); + spin_lock(&dev->lock); + ep->stopped = stopped; +} + +static int +net2272_write_packet(struct net2272_ep *ep, u8 *buf, + struct net2272_request *req, unsigned max) +{ + u16 __iomem *ep_data = net2272_reg_addr(ep->dev, EP_DATA); + u16 *bufp; + unsigned length, count; + u8 tmp; + + length = min(req->req.length - req->req.actual, max); + req->req.actual += length; + + dev_vdbg(ep->dev->dev, "write packet %s req %p max %u len %u avail %u\n", + ep->ep.name, req, max, length, + (net2272_ep_read(ep, EP_AVAIL1) << 8) | net2272_ep_read(ep, EP_AVAIL0)); + + count = length; + bufp = (u16 *)buf; + + while (likely(count >= 2)) { + /* no byte-swap required; chip endian set during init */ + writew(*bufp++, ep_data); + count -= 2; + } + buf = (u8 *)bufp; + + /* write final byte by placing the NET2272 into 8-bit mode */ + if (unlikely(count)) { + tmp = net2272_read(ep->dev, LOCCTL); + net2272_write(ep->dev, LOCCTL, tmp & ~(1 << DATA_WIDTH)); + writeb(*buf, ep_data); + net2272_write(ep->dev, LOCCTL, tmp); + } + return length; +} + +/* returns: 0: still running, 1: completed, negative: errno */ +static int +net2272_write_fifo(struct net2272_ep *ep, struct net2272_request *req) +{ + u8 *buf; + unsigned count, max; + int status; + + dev_vdbg(ep->dev->dev, "write_fifo %s actual %d len %d\n", + ep->ep.name, req->req.actual, req->req.length); + + /* + * Keep loading the endpoint until the final packet is loaded, + * or the endpoint buffer is full. + */ + top: + /* + * Clear interrupt status + * - Packet Transmitted interrupt will become set again when the + * host successfully takes another packet + */ + net2272_ep_write(ep, EP_STAT0, (1 << DATA_PACKET_TRANSMITTED_INTERRUPT)); + while (!(net2272_ep_read(ep, EP_STAT0) & (1 << BUFFER_FULL))) { + buf = req->req.buf + req->req.actual; + prefetch(buf); + + /* force pagesel */ + net2272_ep_read(ep, EP_STAT0); + + max = (net2272_ep_read(ep, EP_AVAIL1) << 8) | + (net2272_ep_read(ep, EP_AVAIL0)); + + if (max < ep->ep.maxpacket) + max = (net2272_ep_read(ep, EP_AVAIL1) << 8) + | (net2272_ep_read(ep, EP_AVAIL0)); + + count = net2272_write_packet(ep, buf, req, max); + /* see if we are done */ + if (req->req.length == req->req.actual) { + /* validate short or zlp packet */ + if (count < ep->ep.maxpacket) + set_fifo_bytecount(ep, 0); + net2272_done(ep, req, 0); + + if (!list_empty(&ep->queue)) { + req = list_entry(ep->queue.next, + struct net2272_request, + queue); + status = net2272_kick_dma(ep, req); + + if (status < 0) + if ((net2272_ep_read(ep, EP_STAT0) + & (1 << BUFFER_EMPTY))) + goto top; + } + return 1; + } + net2272_ep_write(ep, EP_STAT0, (1 << DATA_PACKET_TRANSMITTED_INTERRUPT)); + } + return 0; +} + +static void +net2272_out_flush(struct net2272_ep *ep) +{ + ASSERT_OUT_NAKING(ep); + + net2272_ep_write(ep, EP_STAT0, (1 << DATA_OUT_TOKEN_INTERRUPT) + | (1 << DATA_PACKET_RECEIVED_INTERRUPT)); + net2272_ep_write(ep, EP_STAT1, 1 << BUFFER_FLUSH); +} + +static int +net2272_read_packet(struct net2272_ep *ep, u8 *buf, + struct net2272_request *req, unsigned avail) +{ + u16 __iomem *ep_data = net2272_reg_addr(ep->dev, EP_DATA); + unsigned is_short; + u16 *bufp; + + req->req.actual += avail; + + dev_vdbg(ep->dev->dev, "read packet %s req %p len %u avail %u\n", + ep->ep.name, req, avail, + (net2272_ep_read(ep, EP_AVAIL1) << 8) | net2272_ep_read(ep, EP_AVAIL0)); + + is_short = (avail < ep->ep.maxpacket); + + if (unlikely(avail == 0)) { + /* remove any zlp from the buffer */ + (void)readw(ep_data); + return is_short; + } + + /* Ensure we get the final byte */ + if (unlikely(avail % 2)) + avail++; + bufp = (u16 *)buf; + + do { + *bufp++ = readw(ep_data); + avail -= 2; + } while (avail); + + /* + * To avoid false endpoint available race condition must read + * ep stat0 twice in the case of a short transfer + */ + if (net2272_ep_read(ep, EP_STAT0) & (1 << SHORT_PACKET_TRANSFERRED_INTERRUPT)) + net2272_ep_read(ep, EP_STAT0); + + return is_short; +} + +static int +net2272_read_fifo(struct net2272_ep *ep, struct net2272_request *req) +{ + u8 *buf; + unsigned is_short; + int count; + int tmp; + int cleanup = 0; + int status = -1; + + dev_vdbg(ep->dev->dev, "read_fifo %s actual %d len %d\n", + ep->ep.name, req->req.actual, req->req.length); + + top: + do { + buf = req->req.buf + req->req.actual; + prefetchw(buf); + + count = (net2272_ep_read(ep, EP_AVAIL1) << 8) + | net2272_ep_read(ep, EP_AVAIL0); + + net2272_ep_write(ep, EP_STAT0, + (1 << SHORT_PACKET_TRANSFERRED_INTERRUPT) | + (1 << DATA_PACKET_RECEIVED_INTERRUPT)); + + tmp = req->req.length - req->req.actual; + + if (count > tmp) { + if ((tmp % ep->ep.maxpacket) != 0) { + dev_err(ep->dev->dev, + "%s out fifo %d bytes, expected %d\n", + ep->ep.name, count, tmp); + cleanup = 1; + } + count = (tmp > 0) ? tmp : 0; + } + + is_short = net2272_read_packet(ep, buf, req, count); + + /* completion */ + if (unlikely(cleanup || is_short || + ((req->req.actual == req->req.length) + && !req->req.zero))) { + + if (cleanup) { + net2272_out_flush(ep); + net2272_done(ep, req, -EOVERFLOW); + } else + net2272_done(ep, req, 0); + + /* re-initialize endpoint transfer registers + * otherwise they may result in erroneous pre-validation + * for subsequent control reads + */ + if (unlikely(ep->num == 0)) { + net2272_ep_write(ep, EP_TRANSFER2, 0); + net2272_ep_write(ep, EP_TRANSFER1, 0); + net2272_ep_write(ep, EP_TRANSFER0, 0); + } + + if (!list_empty(&ep->queue)) { + req = list_entry(ep->queue.next, + struct net2272_request, queue); + status = net2272_kick_dma(ep, req); + if ((status < 0) && + !(net2272_ep_read(ep, EP_STAT0) & (1 << BUFFER_EMPTY))) + goto top; + } + return 1; + } + } while (!(net2272_ep_read(ep, EP_STAT0) & (1 << BUFFER_EMPTY))); + + return 0; +} + +static void +net2272_pio_advance(struct net2272_ep *ep) +{ + struct net2272_request *req; + + if (unlikely(list_empty(&ep->queue))) + return; + + req = list_entry(ep->queue.next, struct net2272_request, queue); + (ep->is_in ? net2272_write_fifo : net2272_read_fifo)(ep, req); +} + +/* returns 0 on success, else negative errno */ +static int +net2272_request_dma(struct net2272 *dev, unsigned ep, u32 buf, + unsigned len, unsigned dir) +{ + dev_vdbg(dev->dev, "request_dma ep %d buf %08x len %d dir %d\n", + ep, buf, len, dir); + + /* The NET2272 only supports a single dma channel */ + if (dev->dma_busy) + return -EBUSY; + /* + * EP_TRANSFER (used to determine the number of bytes received + * in an OUT transfer) is 24 bits wide; don't ask for more than that. + */ + if ((dir == 1) && (len > 0x1000000)) + return -EINVAL; + + dev->dma_busy = 1; + + /* initialize platform's dma */ +#ifdef CONFIG_PCI + /* NET2272 addr, buffer addr, length, etc. */ + switch (dev->dev_id) { + case PCI_DEVICE_ID_RDK1: + /* Setup PLX 9054 DMA mode */ + writel((1 << LOCAL_BUS_WIDTH) | + (1 << TA_READY_INPUT_ENABLE) | + (0 << LOCAL_BURST_ENABLE) | + (1 << DONE_INTERRUPT_ENABLE) | + (1 << LOCAL_ADDRESSING_MODE) | + (1 << DEMAND_MODE) | + (1 << DMA_EOT_ENABLE) | + (1 << FAST_SLOW_TERMINATE_MODE_SELECT) | + (1 << DMA_CHANNEL_INTERRUPT_SELECT), + dev->rdk1.plx9054_base_addr + DMAMODE0); + + writel(0x100000, dev->rdk1.plx9054_base_addr + DMALADR0); + writel(buf, dev->rdk1.plx9054_base_addr + DMAPADR0); + writel(len, dev->rdk1.plx9054_base_addr + DMASIZ0); + writel((dir << DIRECTION_OF_TRANSFER) | + (1 << INTERRUPT_AFTER_TERMINAL_COUNT), + dev->rdk1.plx9054_base_addr + DMADPR0); + writel((1 << LOCAL_DMA_CHANNEL_0_INTERRUPT_ENABLE) | + readl(dev->rdk1.plx9054_base_addr + INTCSR), + dev->rdk1.plx9054_base_addr + INTCSR); + + break; + } +#endif + + net2272_write(dev, DMAREQ, + (0 << DMA_BUFFER_VALID) | + (1 << DMA_REQUEST_ENABLE) | + (1 << DMA_CONTROL_DACK) | + (dev->dma_eot_polarity << EOT_POLARITY) | + (dev->dma_dack_polarity << DACK_POLARITY) | + (dev->dma_dreq_polarity << DREQ_POLARITY) | + ((ep >> 1) << DMA_ENDPOINT_SELECT)); + + (void) net2272_read(dev, SCRATCH); + + return 0; +} + +static void +net2272_start_dma(struct net2272 *dev) +{ + /* start platform's dma controller */ +#ifdef CONFIG_PCI + switch (dev->dev_id) { + case PCI_DEVICE_ID_RDK1: + writeb((1 << CHANNEL_ENABLE) | (1 << CHANNEL_START), + dev->rdk1.plx9054_base_addr + DMACSR0); + break; + } +#endif +} + +/* returns 0 on success, else negative errno */ +static int +net2272_kick_dma(struct net2272_ep *ep, struct net2272_request *req) +{ + unsigned size; + u8 tmp; + + if (!use_dma || (ep->num < 1) || (ep->num > 2) || !ep->dma) + return -EINVAL; + + /* don't use dma for odd-length transfers + * otherwise, we'd need to deal with the last byte with pio + */ + if (req->req.length & 1) + return -EINVAL; + + dev_vdbg(ep->dev->dev, "kick_dma %s req %p dma %08x\n", + ep->ep.name, req, req->req.dma); + + net2272_ep_write(ep, EP_RSPSET, 1 << ALT_NAK_OUT_PACKETS); + + /* The NET2272 can only use DMA on one endpoint at a time */ + if (ep->dev->dma_busy) + return -EBUSY; + + /* Make sure we only DMA an even number of bytes (we'll use + * pio to complete the transfer) + */ + size = req->req.length; + size &= ~1; + + /* device-to-host transfer */ + if (ep->is_in) { + /* initialize platform's dma controller */ + if (net2272_request_dma(ep->dev, ep->num, req->req.dma, size, 0)) + /* unable to obtain DMA channel; return error and use pio mode */ + return -EBUSY; + req->req.actual += size; + + /* host-to-device transfer */ + } else { + tmp = net2272_ep_read(ep, EP_STAT0); + + /* initialize platform's dma controller */ + if (net2272_request_dma(ep->dev, ep->num, req->req.dma, size, 1)) + /* unable to obtain DMA channel; return error and use pio mode */ + return -EBUSY; + + if (!(tmp & (1 << BUFFER_EMPTY))) + ep->not_empty = 1; + else + ep->not_empty = 0; + + + /* allow the endpoint's buffer to fill */ + net2272_ep_write(ep, EP_RSPCLR, 1 << ALT_NAK_OUT_PACKETS); + + /* this transfer completed and data's already in the fifo + * return error so pio gets used. + */ + if (tmp & (1 << SHORT_PACKET_TRANSFERRED_INTERRUPT)) { + + /* deassert dreq */ + net2272_write(ep->dev, DMAREQ, + (0 << DMA_BUFFER_VALID) | + (0 << DMA_REQUEST_ENABLE) | + (1 << DMA_CONTROL_DACK) | + (ep->dev->dma_eot_polarity << EOT_POLARITY) | + (ep->dev->dma_dack_polarity << DACK_POLARITY) | + (ep->dev->dma_dreq_polarity << DREQ_POLARITY) | + ((ep->num >> 1) << DMA_ENDPOINT_SELECT)); + + return -EBUSY; + } + } + + /* Don't use per-packet interrupts: use dma interrupts only */ + net2272_ep_write(ep, EP_IRQENB, 0); + + net2272_start_dma(ep->dev); + + return 0; +} + +static void net2272_cancel_dma(struct net2272 *dev) +{ +#ifdef CONFIG_PCI + switch (dev->dev_id) { + case PCI_DEVICE_ID_RDK1: + writeb(0, dev->rdk1.plx9054_base_addr + DMACSR0); + writeb(1 << CHANNEL_ABORT, dev->rdk1.plx9054_base_addr + DMACSR0); + while (!(readb(dev->rdk1.plx9054_base_addr + DMACSR0) & + (1 << CHANNEL_DONE))) + continue; /* wait for dma to stabalize */ + + /* dma abort generates an interrupt */ + writeb(1 << CHANNEL_CLEAR_INTERRUPT, + dev->rdk1.plx9054_base_addr + DMACSR0); + break; + } +#endif + + dev->dma_busy = 0; +} + +/*---------------------------------------------------------------------------*/ + +static int +net2272_queue(struct usb_ep *_ep, struct usb_request *_req, gfp_t gfp_flags) +{ + struct net2272_request *req; + struct net2272_ep *ep; + struct net2272 *dev; + unsigned long flags; + int status = -1; + u8 s; + + req = container_of(_req, struct net2272_request, req); + if (!_req || !_req->complete || !_req->buf + || !list_empty(&req->queue)) + return -EINVAL; + ep = container_of(_ep, struct net2272_ep, ep); + if (!_ep || (!ep->desc && ep->num != 0)) + return -EINVAL; + dev = ep->dev; + if (!dev->driver || dev->gadget.speed == USB_SPEED_UNKNOWN) + return -ESHUTDOWN; + + /* set up dma mapping in case the caller didn't */ + if (use_dma && ep->dma && _req->dma == DMA_ADDR_INVALID) { + _req->dma = dma_map_single(dev->dev, _req->buf, _req->length, + ep->is_in ? DMA_TO_DEVICE : DMA_FROM_DEVICE); + req->mapped = 1; + } + + dev_vdbg(dev->dev, "%s queue req %p, len %d buf %p dma %08x %s\n", + _ep->name, _req, _req->length, _req->buf, + _req->dma, _req->zero ? "zero" : "!zero"); + + spin_lock_irqsave(&dev->lock, flags); + + _req->status = -EINPROGRESS; + _req->actual = 0; + + /* kickstart this i/o queue? */ + if (list_empty(&ep->queue) && !ep->stopped) { + /* maybe there's no control data, just status ack */ + if (ep->num == 0 && _req->length == 0) { + net2272_done(ep, req, 0); + dev_vdbg(dev->dev, "%s status ack\n", ep->ep.name); + goto done; + } + + /* Return zlp, don't let it block subsequent packets */ + s = net2272_ep_read(ep, EP_STAT0); + if (s & (1 << BUFFER_EMPTY)) { + /* Buffer is empty check for a blocking zlp, handle it */ + if ((s & (1 << NAK_OUT_PACKETS)) && + net2272_ep_read(ep, EP_STAT1) & (1 << LOCAL_OUT_ZLP)) { + dev_dbg(dev->dev, "WARNING: returning ZLP short packet termination!\n"); + /* + * Request is going to terminate with a short packet ... + * hope the client is ready for it! + */ + status = net2272_read_fifo(ep, req); + /* clear short packet naking */ + net2272_ep_write(ep, EP_STAT0, (1 << NAK_OUT_PACKETS)); + goto done; + } + } + + /* try dma first */ + status = net2272_kick_dma(ep, req); + + if (status < 0) { + /* dma failed (most likely in use by another endpoint) + * fallback to pio + */ + status = 0; + + if (ep->is_in) + status = net2272_write_fifo(ep, req); + else { + s = net2272_ep_read(ep, EP_STAT0); + if ((s & (1 << BUFFER_EMPTY)) == 0) + status = net2272_read_fifo(ep, req); + } + + if (unlikely(status != 0)) { + if (status > 0) + status = 0; + req = NULL; + } + } + } + if (likely(req != 0)) + list_add_tail(&req->queue, &ep->queue); + + if (likely(!list_empty(&ep->queue))) + net2272_ep_write(ep, EP_RSPCLR, 1 << ALT_NAK_OUT_PACKETS); + done: + spin_unlock_irqrestore(&dev->lock, flags); + + return 0; +} + +/* dequeue ALL requests */ +static void +net2272_dequeue_all(struct net2272_ep *ep) +{ + struct net2272_request *req; + + /* called with spinlock held */ + ep->stopped = 1; + + while (!list_empty(&ep->queue)) { + req = list_entry(ep->queue.next, + struct net2272_request, + queue); + net2272_done(ep, req, -ESHUTDOWN); + } +} + +/* dequeue JUST ONE request */ +static int +net2272_dequeue(struct usb_ep *_ep, struct usb_request *_req) +{ + struct net2272_ep *ep; + struct net2272_request *req; + unsigned long flags; + int stopped; + + ep = container_of(_ep, struct net2272_ep, ep); + if (!_ep || (!ep->desc && ep->num != 0) || !_req) + return -EINVAL; + + spin_lock_irqsave(&ep->dev->lock, flags); + stopped = ep->stopped; + ep->stopped = 1; + + /* make sure it's still queued on this endpoint */ + list_for_each_entry(req, &ep->queue, queue) { + if (&req->req == _req) + break; + } + if (&req->req != _req) { + spin_unlock_irqrestore(&ep->dev->lock, flags); + return -EINVAL; + } + + /* queue head may be partially complete */ + if (ep->queue.next == &req->queue) { + dev_dbg(ep->dev->dev, "unlink (%s) pio\n", _ep->name); + net2272_done(ep, req, -ECONNRESET); + } + req = NULL; + ep->stopped = stopped; + + spin_unlock_irqrestore(&ep->dev->lock, flags); + return 0; +} + +/*---------------------------------------------------------------------------*/ + +static int +net2272_set_halt_and_wedge(struct usb_ep *_ep, int value, int wedged) +{ + struct net2272_ep *ep; + unsigned long flags; + int ret = 0; + + ep = container_of(_ep, struct net2272_ep, ep); + if (!_ep || (!ep->desc && ep->num != 0)) + return -EINVAL; + if (!ep->dev->driver || ep->dev->gadget.speed == USB_SPEED_UNKNOWN) + return -ESHUTDOWN; + if (ep->desc /* not ep0 */ && usb_endpoint_xfer_isoc(ep->desc)) + return -EINVAL; + + spin_lock_irqsave(&ep->dev->lock, flags); + if (!list_empty(&ep->queue)) + ret = -EAGAIN; + else if (ep->is_in && value && net2272_fifo_status(_ep) != 0) + ret = -EAGAIN; + else { + dev_vdbg(ep->dev->dev, "%s %s %s\n", _ep->name, + value ? "set" : "clear", + wedged ? "wedge" : "halt"); + /* set/clear */ + if (value) { + if (ep->num == 0) + ep->dev->protocol_stall = 1; + else + set_halt(ep); + if (wedged) + ep->wedged = 1; + } else { + clear_halt(ep); + ep->wedged = 0; + } + } + spin_unlock_irqrestore(&ep->dev->lock, flags); + + return ret; +} + +static int +net2272_set_halt(struct usb_ep *_ep, int value) +{ + return net2272_set_halt_and_wedge(_ep, value, 0); +} + +static int +net2272_set_wedge(struct usb_ep *_ep) +{ + if (!_ep || _ep->name == ep0name) + return -EINVAL; + return net2272_set_halt_and_wedge(_ep, 1, 1); +} + +static int +net2272_fifo_status(struct usb_ep *_ep) +{ + struct net2272_ep *ep; + u16 avail; + + ep = container_of(_ep, struct net2272_ep, ep); + if (!_ep || (!ep->desc && ep->num != 0)) + return -ENODEV; + if (!ep->dev->driver || ep->dev->gadget.speed == USB_SPEED_UNKNOWN) + return -ESHUTDOWN; + + avail = net2272_ep_read(ep, EP_AVAIL1) << 8; + avail |= net2272_ep_read(ep, EP_AVAIL0); + if (avail > ep->fifo_size) + return -EOVERFLOW; + if (ep->is_in) + avail = ep->fifo_size - avail; + return avail; +} + +static void +net2272_fifo_flush(struct usb_ep *_ep) +{ + struct net2272_ep *ep; + + ep = container_of(_ep, struct net2272_ep, ep); + if (!_ep || (!ep->desc && ep->num != 0)) + return; + if (!ep->dev->driver || ep->dev->gadget.speed == USB_SPEED_UNKNOWN) + return; + + net2272_ep_write(ep, EP_STAT1, 1 << BUFFER_FLUSH); +} + +static struct usb_ep_ops net2272_ep_ops = { + .enable = net2272_enable, + .disable = net2272_disable, + + .alloc_request = net2272_alloc_request, + .free_request = net2272_free_request, + + .queue = net2272_queue, + .dequeue = net2272_dequeue, + + .set_halt = net2272_set_halt, + .set_wedge = net2272_set_wedge, + .fifo_status = net2272_fifo_status, + .fifo_flush = net2272_fifo_flush, +}; + +/*---------------------------------------------------------------------------*/ + +static int +net2272_get_frame(struct usb_gadget *_gadget) +{ + struct net2272 *dev; + unsigned long flags; + u16 ret; + + if (!_gadget) + return -ENODEV; + dev = container_of(_gadget, struct net2272, gadget); + spin_lock_irqsave(&dev->lock, flags); + + ret = net2272_read(dev, FRAME1) << 8; + ret |= net2272_read(dev, FRAME0); + + spin_unlock_irqrestore(&dev->lock, flags); + return ret; +} + +static int +net2272_wakeup(struct usb_gadget *_gadget) +{ + struct net2272 *dev; + u8 tmp; + unsigned long flags; + + if (!_gadget) + return 0; + dev = container_of(_gadget, struct net2272, gadget); + + spin_lock_irqsave(&dev->lock, flags); + tmp = net2272_read(dev, USBCTL0); + if (tmp & (1 << IO_WAKEUP_ENABLE)) + net2272_write(dev, USBCTL1, (1 << GENERATE_RESUME)); + + spin_unlock_irqrestore(&dev->lock, flags); + + return 0; +} + +static int +net2272_set_selfpowered(struct usb_gadget *_gadget, int value) +{ + struct net2272 *dev; + + if (!_gadget) + return -ENODEV; + dev = container_of(_gadget, struct net2272, gadget); + + dev->is_selfpowered = value; + + return 0; +} + +static int +net2272_pullup(struct usb_gadget *_gadget, int is_on) +{ + struct net2272 *dev; + u8 tmp; + unsigned long flags; + + if (!_gadget) + return -ENODEV; + dev = container_of(_gadget, struct net2272, gadget); + + spin_lock_irqsave(&dev->lock, flags); + tmp = net2272_read(dev, USBCTL0); + dev->softconnect = (is_on != 0); + if (is_on) + tmp |= (1 << USB_DETECT_ENABLE); + else + tmp &= ~(1 << USB_DETECT_ENABLE); + net2272_write(dev, USBCTL0, tmp); + spin_unlock_irqrestore(&dev->lock, flags); + + return 0; +} + +static const struct usb_gadget_ops net2272_ops = { + .get_frame = net2272_get_frame, + .wakeup = net2272_wakeup, + .set_selfpowered = net2272_set_selfpowered, + .pullup = net2272_pullup +}; + +/*---------------------------------------------------------------------------*/ + +static ssize_t +net2272_show_registers(struct device *_dev, struct device_attribute *attr, char *buf) +{ + struct net2272 *dev; + char *next; + unsigned size, t; + unsigned long flags; + u8 t1, t2; + int i; + const char *s; + + dev = dev_get_drvdata(_dev); + next = buf; + size = PAGE_SIZE; + spin_lock_irqsave(&dev->lock, flags); + + if (dev->driver) + s = dev->driver->driver.name; + else + s = "(none)"; + + /* Main Control Registers */ + t = scnprintf(next, size, "%s version %s," + "chiprev %02x, locctl %02x\n" + "irqenb0 %02x irqenb1 %02x " + "irqstat0 %02x irqstat1 %02x\n", + driver_name, driver_vers, dev->chiprev, + net2272_read(dev, LOCCTL), + net2272_read(dev, IRQENB0), + net2272_read(dev, IRQENB1), + net2272_read(dev, IRQSTAT0), + net2272_read(dev, IRQSTAT1)); + size -= t; + next += t; + + /* DMA */ + t1 = net2272_read(dev, DMAREQ); + t = scnprintf(next, size, "\ndmareq %02x: %s %s%s%s%s\n", + t1, ep_name[(t1 & 0x01) + 1], + t1 & (1 << DMA_CONTROL_DACK) ? "dack " : "", + t1 & (1 << DMA_REQUEST_ENABLE) ? "reqenb " : "", + t1 & (1 << DMA_REQUEST) ? "req " : "", + t1 & (1 << DMA_BUFFER_VALID) ? "valid " : ""); + size -= t; + next += t; + + /* USB Control Registers */ + t1 = net2272_read(dev, USBCTL1); + if (t1 & (1 << VBUS_PIN)) { + if (t1 & (1 << USB_HIGH_SPEED)) + s = "high speed"; + else if (dev->gadget.speed == USB_SPEED_UNKNOWN) + s = "powered"; + else + s = "full speed"; + } else + s = "not attached"; + t = scnprintf(next, size, + "usbctl0 %02x usbctl1 %02x addr 0x%02x (%s)\n", + net2272_read(dev, USBCTL0), t1, + net2272_read(dev, OURADDR), s); + size -= t; + next += t; + + /* Endpoint Registers */ + for (i = 0; i < 4; ++i) { + struct net2272_ep *ep; + + ep = &dev->ep[i]; + if (i && !ep->desc) + continue; + + t1 = net2272_ep_read(ep, EP_CFG); + t2 = net2272_ep_read(ep, EP_RSPSET); + t = scnprintf(next, size, + "\n%s\tcfg %02x rsp (%02x) %s%s%s%s%s%s%s%s" + "irqenb %02x\n", + ep->ep.name, t1, t2, + (t2 & (1 << ALT_NAK_OUT_PACKETS)) ? "NAK " : "", + (t2 & (1 << HIDE_STATUS_PHASE)) ? "hide " : "", + (t2 & (1 << AUTOVALIDATE)) ? "auto " : "", + (t2 & (1 << INTERRUPT_MODE)) ? "interrupt " : "", + (t2 & (1 << CONTROL_STATUS_PHASE_HANDSHAKE)) ? "status " : "", + (t2 & (1 << NAK_OUT_PACKETS_MODE)) ? "NAKmode " : "", + (t2 & (1 << ENDPOINT_TOGGLE)) ? "DATA1 " : "DATA0 ", + (t2 & (1 << ENDPOINT_HALT)) ? "HALT " : "", + net2272_ep_read(ep, EP_IRQENB)); + size -= t; + next += t; + + t = scnprintf(next, size, + "\tstat0 %02x stat1 %02x avail %04x " + "(ep%d%s-%s)%s\n", + net2272_ep_read(ep, EP_STAT0), + net2272_ep_read(ep, EP_STAT1), + (net2272_ep_read(ep, EP_AVAIL1) << 8) | net2272_ep_read(ep, EP_AVAIL0), + t1 & 0x0f, + ep->is_in ? "in" : "out", + type_string(t1 >> 5), + ep->stopped ? "*" : ""); + size -= t; + next += t; + + t = scnprintf(next, size, + "\tep_transfer %06x\n", + ((net2272_ep_read(ep, EP_TRANSFER2) & 0xff) << 16) | + ((net2272_ep_read(ep, EP_TRANSFER1) & 0xff) << 8) | + ((net2272_ep_read(ep, EP_TRANSFER0) & 0xff))); + size -= t; + next += t; + + t1 = net2272_ep_read(ep, EP_BUFF_STATES) & 0x03; + t2 = (net2272_ep_read(ep, EP_BUFF_STATES) >> 2) & 0x03; + t = scnprintf(next, size, + "\tbuf-a %s buf-b %s\n", + buf_state_string(t1), + buf_state_string(t2)); + size -= t; + next += t; + } + + spin_unlock_irqrestore(&dev->lock, flags); + + return PAGE_SIZE - size; +} +static DEVICE_ATTR(registers, S_IRUGO, net2272_show_registers, NULL); + +/*---------------------------------------------------------------------------*/ + +static void +net2272_set_fifo_mode(struct net2272 *dev, int mode) +{ + u8 tmp; + + tmp = net2272_read(dev, LOCCTL) & 0x3f; + tmp |= (mode << 6); + net2272_write(dev, LOCCTL, tmp); + + INIT_LIST_HEAD(&dev->gadget.ep_list); + + /* always ep-a, ep-c ... maybe not ep-b */ + list_add_tail(&dev->ep[1].ep.ep_list, &dev->gadget.ep_list); + + switch (mode) { + case 0: + list_add_tail(&dev->ep[2].ep.ep_list, &dev->gadget.ep_list); + dev->ep[1].fifo_size = dev->ep[2].fifo_size = 512; + break; + case 1: + list_add_tail(&dev->ep[2].ep.ep_list, &dev->gadget.ep_list); + dev->ep[1].fifo_size = 1024; + dev->ep[2].fifo_size = 512; + break; + case 2: + list_add_tail(&dev->ep[2].ep.ep_list, &dev->gadget.ep_list); + dev->ep[1].fifo_size = dev->ep[2].fifo_size = 1024; + break; + case 3: + dev->ep[1].fifo_size = 1024; + break; + } + + /* ep-c is always 2 512 byte buffers */ + list_add_tail(&dev->ep[3].ep.ep_list, &dev->gadget.ep_list); + dev->ep[3].fifo_size = 512; +} + +/*---------------------------------------------------------------------------*/ + +static struct net2272 *the_controller; + +static void +net2272_usb_reset(struct net2272 *dev) +{ + dev->gadget.speed = USB_SPEED_UNKNOWN; + + net2272_cancel_dma(dev); + + net2272_write(dev, IRQENB0, 0); + net2272_write(dev, IRQENB1, 0); + + /* clear irq state */ + net2272_write(dev, IRQSTAT0, 0xff); + net2272_write(dev, IRQSTAT1, ~(1 << SUSPEND_REQUEST_INTERRUPT)); + + net2272_write(dev, DMAREQ, + (0 << DMA_BUFFER_VALID) | + (0 << DMA_REQUEST_ENABLE) | + (1 << DMA_CONTROL_DACK) | + (dev->dma_eot_polarity << EOT_POLARITY) | + (dev->dma_dack_polarity << DACK_POLARITY) | + (dev->dma_dreq_polarity << DREQ_POLARITY) | + ((dma_ep >> 1) << DMA_ENDPOINT_SELECT)); + + net2272_cancel_dma(dev); + net2272_set_fifo_mode(dev, (fifo_mode <= 3) ? fifo_mode : 0); + + /* Set the NET2272 ep fifo data width to 16-bit mode and for correct byte swapping + * note that the higher level gadget drivers are expected to convert data to little endian. + * Enable byte swap for your local bus/cpu if needed by setting BYTE_SWAP in LOCCTL here + */ + net2272_write(dev, LOCCTL, net2272_read(dev, LOCCTL) | (1 << DATA_WIDTH)); + net2272_write(dev, LOCCTL1, (dma_mode << DMA_MODE)); +} + +static void +net2272_usb_reinit(struct net2272 *dev) +{ + int i; + + /* basic endpoint init */ + for (i = 0; i < 4; ++i) { + struct net2272_ep *ep = &dev->ep[i]; + + ep->ep.name = ep_name[i]; + ep->dev = dev; + ep->num = i; + ep->not_empty = 0; + + if (use_dma && ep->num == dma_ep) + ep->dma = 1; + + if (i > 0 && i <= 3) + ep->fifo_size = 512; + else + ep->fifo_size = 64; + net2272_ep_reset(ep); + } + dev->ep[0].ep.maxpacket = 64; + + dev->gadget.ep0 = &dev->ep[0].ep; + dev->ep[0].stopped = 0; + INIT_LIST_HEAD(&dev->gadget.ep0->ep_list); +} + +static void +net2272_ep0_start(struct net2272 *dev) +{ + struct net2272_ep *ep0 = &dev->ep[0]; + + net2272_ep_write(ep0, EP_RSPSET, + (1 << NAK_OUT_PACKETS_MODE) | + (1 << ALT_NAK_OUT_PACKETS)); + net2272_ep_write(ep0, EP_RSPCLR, + (1 << HIDE_STATUS_PHASE) | + (1 << CONTROL_STATUS_PHASE_HANDSHAKE)); + net2272_write(dev, USBCTL0, + (dev->softconnect << USB_DETECT_ENABLE) | + (1 << USB_ROOT_PORT_WAKEUP_ENABLE) | + (1 << IO_WAKEUP_ENABLE)); + net2272_write(dev, IRQENB0, + (1 << SETUP_PACKET_INTERRUPT_ENABLE) | + (1 << ENDPOINT_0_INTERRUPT_ENABLE) | + (1 << DMA_DONE_INTERRUPT_ENABLE)); + net2272_write(dev, IRQENB1, + (1 << VBUS_INTERRUPT_ENABLE) | + (1 << ROOT_PORT_RESET_INTERRUPT_ENABLE) | + (1 << SUSPEND_REQUEST_CHANGE_INTERRUPT_ENABLE)); +} + +/* when a driver is successfully registered, it will receive + * control requests including set_configuration(), which enables + * non-control requests. then usb traffic follows until a + * disconnect is reported. then a host may connect again, or + * the driver might get unbound. + */ +int usb_gadget_probe_driver(struct usb_gadget_driver *driver, + int (*bind)(struct usb_gadget *)) +{ + struct net2272 *dev = the_controller; + int ret; + unsigned i; + + if (!driver || !bind || !driver->unbind || !driver->setup || + driver->speed != USB_SPEED_HIGH) + return -EINVAL; + if (!dev) + return -ENODEV; + if (dev->driver) + return -EBUSY; + + for (i = 0; i < 4; ++i) + dev->ep[i].irqs = 0; + /* hook up the driver ... */ + dev->softconnect = 1; + driver->driver.bus = NULL; + dev->driver = driver; + dev->gadget.dev.driver = &driver->driver; + ret = bind(&dev->gadget); + if (ret) { + dev_dbg(dev->dev, "bind to driver %s --> %d\n", + driver->driver.name, ret); + dev->driver = NULL; + dev->gadget.dev.driver = NULL; + return ret; + } + + /* ... then enable host detection and ep0; and we're ready + * for set_configuration as well as eventual disconnect. + */ + net2272_ep0_start(dev); + + dev_dbg(dev->dev, "%s ready\n", driver->driver.name); + + return 0; +} +EXPORT_SYMBOL(usb_gadget_probe_driver); + +static void +stop_activity(struct net2272 *dev, struct usb_gadget_driver *driver) +{ + int i; + + /* don't disconnect if it's not connected */ + if (dev->gadget.speed == USB_SPEED_UNKNOWN) + driver = NULL; + + /* stop hardware; prevent new request submissions; + * and kill any outstanding requests. + */ + net2272_usb_reset(dev); + for (i = 0; i < 4; ++i) + net2272_dequeue_all(&dev->ep[i]); + + /* report disconnect; the driver is already quiesced */ + if (driver) { + spin_unlock(&dev->lock); + driver->disconnect(&dev->gadget); + spin_lock(&dev->lock); + + } + net2272_usb_reinit(dev); +} + +int usb_gadget_unregister_driver(struct usb_gadget_driver *driver) +{ + struct net2272 *dev = the_controller; + unsigned long flags; + + if (!dev) + return -ENODEV; + if (!driver || driver != dev->driver) + return -EINVAL; + + spin_lock_irqsave(&dev->lock, flags); + stop_activity(dev, driver); + spin_unlock_irqrestore(&dev->lock, flags); + + net2272_pullup(&dev->gadget, 0); + + driver->unbind(&dev->gadget); + dev->gadget.dev.driver = NULL; + dev->driver = NULL; + + dev_dbg(dev->dev, "unregistered driver '%s'\n", driver->driver.name); + return 0; +} +EXPORT_SYMBOL(usb_gadget_unregister_driver); + +/*---------------------------------------------------------------------------*/ +/* handle ep-a/ep-b dma completions */ +static void +net2272_handle_dma(struct net2272_ep *ep) +{ + struct net2272_request *req; + unsigned len; + int status; + + if (!list_empty(&ep->queue)) + req = list_entry(ep->queue.next, + struct net2272_request, queue); + else + req = NULL; + + dev_vdbg(ep->dev->dev, "handle_dma %s req %p\n", ep->ep.name, req); + + /* Ensure DREQ is de-asserted */ + net2272_write(ep->dev, DMAREQ, + (0 << DMA_BUFFER_VALID) + | (0 << DMA_REQUEST_ENABLE) + | (1 << DMA_CONTROL_DACK) + | (ep->dev->dma_eot_polarity << EOT_POLARITY) + | (ep->dev->dma_dack_polarity << DACK_POLARITY) + | (ep->dev->dma_dreq_polarity << DREQ_POLARITY) + | ((ep->dma >> 1) << DMA_ENDPOINT_SELECT)); + + ep->dev->dma_busy = 0; + + net2272_ep_write(ep, EP_IRQENB, + (1 << DATA_PACKET_RECEIVED_INTERRUPT_ENABLE) + | (1 << DATA_PACKET_TRANSMITTED_INTERRUPT_ENABLE) + | net2272_ep_read(ep, EP_IRQENB)); + + /* device-to-host transfer completed */ + if (ep->is_in) { + /* validate a short packet or zlp if necessary */ + if ((req->req.length % ep->ep.maxpacket != 0) || + req->req.zero) + set_fifo_bytecount(ep, 0); + + net2272_done(ep, req, 0); + if (!list_empty(&ep->queue)) { + req = list_entry(ep->queue.next, + struct net2272_request, queue); + status = net2272_kick_dma(ep, req); + if (status < 0) + net2272_pio_advance(ep); + } + + /* host-to-device transfer completed */ + } else { + /* terminated with a short packet? */ + if (net2272_read(ep->dev, IRQSTAT0) & + (1 << DMA_DONE_INTERRUPT)) { + /* abort system dma */ + net2272_cancel_dma(ep->dev); + } + + /* EP_TRANSFER will contain the number of bytes + * actually received. + * NOTE: There is no overflow detection on EP_TRANSFER: + * We can't deal with transfers larger than 2^24 bytes! + */ + len = (net2272_ep_read(ep, EP_TRANSFER2) << 16) + | (net2272_ep_read(ep, EP_TRANSFER1) << 8) + | (net2272_ep_read(ep, EP_TRANSFER0)); + + if (ep->not_empty) + len += 4; + + req->req.actual += len; + + /* get any remaining data */ + net2272_pio_advance(ep); + } +} + +/*---------------------------------------------------------------------------*/ + +static void +net2272_handle_ep(struct net2272_ep *ep) +{ + struct net2272_request *req; + u8 stat0, stat1; + + if (!list_empty(&ep->queue)) + req = list_entry(ep->queue.next, + struct net2272_request, queue); + else + req = NULL; + + /* ack all, and handle what we care about */ + stat0 = net2272_ep_read(ep, EP_STAT0); + stat1 = net2272_ep_read(ep, EP_STAT1); + ep->irqs++; + + dev_vdbg(ep->dev->dev, "%s ack ep_stat0 %02x, ep_stat1 %02x, req %p\n", + ep->ep.name, stat0, stat1, req ? &req->req : 0); + + net2272_ep_write(ep, EP_STAT0, stat0 & + ~((1 << NAK_OUT_PACKETS) + | (1 << SHORT_PACKET_TRANSFERRED_INTERRUPT))); + net2272_ep_write(ep, EP_STAT1, stat1); + + /* data packet(s) received (in the fifo, OUT) + * direction must be validated, otherwise control read status phase + * could be interpreted as a valid packet + */ + if (!ep->is_in && (stat0 & (1 << DATA_PACKET_RECEIVED_INTERRUPT))) + net2272_pio_advance(ep); + /* data packet(s) transmitted (IN) */ + else if (stat0 & (1 << DATA_PACKET_TRANSMITTED_INTERRUPT)) + net2272_pio_advance(ep); +} + +static struct net2272_ep * +net2272_get_ep_by_addr(struct net2272 *dev, u16 wIndex) +{ + struct net2272_ep *ep; + + if ((wIndex & USB_ENDPOINT_NUMBER_MASK) == 0) + return &dev->ep[0]; + + list_for_each_entry(ep, &dev->gadget.ep_list, ep.ep_list) { + u8 bEndpointAddress; + + if (!ep->desc) + continue; + bEndpointAddress = ep->desc->bEndpointAddress; + if ((wIndex ^ bEndpointAddress) & USB_DIR_IN) + continue; + if ((wIndex & 0x0f) == (bEndpointAddress & 0x0f)) + return ep; + } + return NULL; +} + +/* + * USB Test Packet: + * JKJKJKJK * 9 + * JJKKJJKK * 8 + * JJJJKKKK * 8 + * JJJJJJJKKKKKKK * 8 + * JJJJJJJK * 8 + * {JKKKKKKK * 10}, JK + */ +static const u8 net2272_test_packet[] = { + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0xAA, 0xAA, 0xAA, 0xAA, 0xAA, 0xAA, 0xAA, 0xAA, + 0xEE, 0xEE, 0xEE, 0xEE, 0xEE, 0xEE, 0xEE, 0xEE, + 0xFE, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, + 0x7F, 0xBF, 0xDF, 0xEF, 0xF7, 0xFB, 0xFD, + 0xFC, 0x7E, 0xBF, 0xDF, 0xEF, 0xF7, 0xFD, 0x7E +}; + +static void +net2272_set_test_mode(struct net2272 *dev, int mode) +{ + int i; + + /* Disable all net2272 interrupts: + * Nothing but a power cycle should stop the test. + */ + net2272_write(dev, IRQENB0, 0x00); + net2272_write(dev, IRQENB1, 0x00); + + /* Force tranceiver to high-speed */ + net2272_write(dev, XCVRDIAG, 1 << FORCE_HIGH_SPEED); + + net2272_write(dev, PAGESEL, 0); + net2272_write(dev, EP_STAT0, 1 << DATA_PACKET_TRANSMITTED_INTERRUPT); + net2272_write(dev, EP_RSPCLR, + (1 << CONTROL_STATUS_PHASE_HANDSHAKE) + | (1 << HIDE_STATUS_PHASE)); + net2272_write(dev, EP_CFG, 1 << ENDPOINT_DIRECTION); + net2272_write(dev, EP_STAT1, 1 << BUFFER_FLUSH); + + /* wait for status phase to complete */ + while (!(net2272_read(dev, EP_STAT0) & + (1 << DATA_PACKET_TRANSMITTED_INTERRUPT))) + ; + + /* Enable test mode */ + net2272_write(dev, USBTEST, mode); + + /* load test packet */ + if (mode == TEST_PACKET) { + /* switch to 8 bit mode */ + net2272_write(dev, LOCCTL, net2272_read(dev, LOCCTL) & + ~(1 << DATA_WIDTH)); + + for (i = 0; i < sizeof(net2272_test_packet); ++i) + net2272_write(dev, EP_DATA, net2272_test_packet[i]); + + /* Validate test packet */ + net2272_write(dev, EP_TRANSFER0, 0); + } +} + +static void +net2272_handle_stat0_irqs(struct net2272 *dev, u8 stat) +{ + struct net2272_ep *ep; + u8 num, scratch; + + /* starting a control request? */ + if (unlikely(stat & (1 << SETUP_PACKET_INTERRUPT))) { + union { + u8 raw[8]; + struct usb_ctrlrequest r; + } u; + int tmp = 0; + struct net2272_request *req; + + if (dev->gadget.speed == USB_SPEED_UNKNOWN) { + if (net2272_read(dev, USBCTL1) & (1 << USB_HIGH_SPEED)) + dev->gadget.speed = USB_SPEED_HIGH; + else + dev->gadget.speed = USB_SPEED_FULL; + dev_dbg(dev->dev, "%s speed\n", + (dev->gadget.speed == USB_SPEED_HIGH) ? "high" : "full"); + } + + ep = &dev->ep[0]; + ep->irqs++; + + /* make sure any leftover interrupt state is cleared */ + stat &= ~(1 << ENDPOINT_0_INTERRUPT); + while (!list_empty(&ep->queue)) { + req = list_entry(ep->queue.next, + struct net2272_request, queue); + net2272_done(ep, req, + (req->req.actual == req->req.length) ? 0 : -EPROTO); + } + ep->stopped = 0; + dev->protocol_stall = 0; + net2272_ep_write(ep, EP_STAT0, + (1 << DATA_IN_TOKEN_INTERRUPT) + | (1 << DATA_OUT_TOKEN_INTERRUPT) + | (1 << DATA_PACKET_TRANSMITTED_INTERRUPT) + | (1 << DATA_PACKET_RECEIVED_INTERRUPT) + | (1 << SHORT_PACKET_TRANSFERRED_INTERRUPT)); + net2272_ep_write(ep, EP_STAT1, + (1 << TIMEOUT) + | (1 << USB_OUT_ACK_SENT) + | (1 << USB_OUT_NAK_SENT) + | (1 << USB_IN_ACK_RCVD) + | (1 << USB_IN_NAK_SENT) + | (1 << USB_STALL_SENT) + | (1 << LOCAL_OUT_ZLP)); + + /* + * Ensure Control Read pre-validation setting is beyond maximum size + * - Control Writes can leave non-zero values in EP_TRANSFER. If + * an EP0 transfer following the Control Write is a Control Read, + * the NET2272 sees the non-zero EP_TRANSFER as an unexpected + * pre-validation count. + * - Setting EP_TRANSFER beyond the maximum EP0 transfer size ensures + * the pre-validation count cannot cause an unexpected validatation + */ + net2272_write(dev, PAGESEL, 0); + net2272_write(dev, EP_TRANSFER2, 0xff); + net2272_write(dev, EP_TRANSFER1, 0xff); + net2272_write(dev, EP_TRANSFER0, 0xff); + + u.raw[0] = net2272_read(dev, SETUP0); + u.raw[1] = net2272_read(dev, SETUP1); + u.raw[2] = net2272_read(dev, SETUP2); + u.raw[3] = net2272_read(dev, SETUP3); + u.raw[4] = net2272_read(dev, SETUP4); + u.raw[5] = net2272_read(dev, SETUP5); + u.raw[6] = net2272_read(dev, SETUP6); + u.raw[7] = net2272_read(dev, SETUP7); + /* + * If you have a big endian cpu make sure le16_to_cpus + * performs the proper byte swapping here... + */ + le16_to_cpus(&u.r.wValue); + le16_to_cpus(&u.r.wIndex); + le16_to_cpus(&u.r.wLength); + + /* ack the irq */ + net2272_write(dev, IRQSTAT0, 1 << SETUP_PACKET_INTERRUPT); + stat ^= (1 << SETUP_PACKET_INTERRUPT); + + /* watch control traffic at the token level, and force + * synchronization before letting the status phase happen. + */ + ep->is_in = (u.r.bRequestType & USB_DIR_IN) != 0; + if (ep->is_in) { + scratch = (1 << DATA_PACKET_TRANSMITTED_INTERRUPT_ENABLE) + | (1 << DATA_OUT_TOKEN_INTERRUPT_ENABLE) + | (1 << DATA_IN_TOKEN_INTERRUPT_ENABLE); + stop_out_naking(ep); + } else + scratch = (1 << DATA_PACKET_RECEIVED_INTERRUPT_ENABLE) + | (1 << DATA_OUT_TOKEN_INTERRUPT_ENABLE) + | (1 << DATA_IN_TOKEN_INTERRUPT_ENABLE); + net2272_ep_write(ep, EP_IRQENB, scratch); + + if ((u.r.bRequestType & USB_TYPE_MASK) != USB_TYPE_STANDARD) + goto delegate; + switch (u.r.bRequest) { + case USB_REQ_GET_STATUS: { + struct net2272_ep *e; + u16 status = 0; + + switch (u.r.bRequestType & USB_RECIP_MASK) { + case USB_RECIP_ENDPOINT: + e = net2272_get_ep_by_addr(dev, u.r.wIndex); + if (!e || u.r.wLength > 2) + goto do_stall; + if (net2272_ep_read(e, EP_RSPSET) & (1 << ENDPOINT_HALT)) + status = __constant_cpu_to_le16(1); + else + status = __constant_cpu_to_le16(0); + + /* don't bother with a request object! */ + net2272_ep_write(&dev->ep[0], EP_IRQENB, 0); + writew(status, net2272_reg_addr(dev, EP_DATA)); + set_fifo_bytecount(&dev->ep[0], 0); + allow_status(ep); + dev_vdbg(dev->dev, "%s stat %02x\n", + ep->ep.name, status); + goto next_endpoints; + case USB_RECIP_DEVICE: + if (u.r.wLength > 2) + goto do_stall; + if (dev->is_selfpowered) + status = (1 << USB_DEVICE_SELF_POWERED); + + /* don't bother with a request object! */ + net2272_ep_write(&dev->ep[0], EP_IRQENB, 0); + writew(status, net2272_reg_addr(dev, EP_DATA)); + set_fifo_bytecount(&dev->ep[0], 0); + allow_status(ep); + dev_vdbg(dev->dev, "device stat %02x\n", status); + goto next_endpoints; + case USB_RECIP_INTERFACE: + if (u.r.wLength > 2) + goto do_stall; + + /* don't bother with a request object! */ + net2272_ep_write(&dev->ep[0], EP_IRQENB, 0); + writew(status, net2272_reg_addr(dev, EP_DATA)); + set_fifo_bytecount(&dev->ep[0], 0); + allow_status(ep); + dev_vdbg(dev->dev, "interface status %02x\n", status); + goto next_endpoints; + } + + break; + } + case USB_REQ_CLEAR_FEATURE: { + struct net2272_ep *e; + + if (u.r.bRequestType != USB_RECIP_ENDPOINT) + goto delegate; + if (u.r.wValue != USB_ENDPOINT_HALT || + u.r.wLength != 0) + goto do_stall; + e = net2272_get_ep_by_addr(dev, u.r.wIndex); + if (!e) + goto do_stall; + if (e->wedged) { + dev_vdbg(dev->dev, "%s wedged, halt not cleared\n", + ep->ep.name); + } else { + dev_vdbg(dev->dev, "%s clear halt\n", ep->ep.name); + clear_halt(e); + } + allow_status(ep); + goto next_endpoints; + } + case USB_REQ_SET_FEATURE: { + struct net2272_ep *e; + + if (u.r.bRequestType == USB_RECIP_DEVICE) { + if (u.r.wIndex != NORMAL_OPERATION) + net2272_set_test_mode(dev, (u.r.wIndex >> 8)); + allow_status(ep); + dev_vdbg(dev->dev, "test mode: %d\n", u.r.wIndex); + goto next_endpoints; + } else if (u.r.bRequestType != USB_RECIP_ENDPOINT) + goto delegate; + if (u.r.wValue != USB_ENDPOINT_HALT || + u.r.wLength != 0) + goto do_stall; + e = net2272_get_ep_by_addr(dev, u.r.wIndex); + if (!e) + goto do_stall; + set_halt(e); + allow_status(ep); + dev_vdbg(dev->dev, "%s set halt\n", ep->ep.name); + goto next_endpoints; + } + case USB_REQ_SET_ADDRESS: { + net2272_write(dev, OURADDR, u.r.wValue & 0xff); + allow_status(ep); + break; + } + default: + delegate: + dev_vdbg(dev->dev, "setup %02x.%02x v%04x i%04x " + "ep_cfg %08x\n", + u.r.bRequestType, u.r.bRequest, + u.r.wValue, u.r.wIndex, + net2272_ep_read(ep, EP_CFG)); + spin_unlock(&dev->lock); + tmp = dev->driver->setup(&dev->gadget, &u.r); + spin_lock(&dev->lock); + } + + /* stall ep0 on error */ + if (tmp < 0) { + do_stall: + dev_vdbg(dev->dev, "req %02x.%02x protocol STALL; stat %d\n", + u.r.bRequestType, u.r.bRequest, tmp); + dev->protocol_stall = 1; + } + /* endpoint dma irq? */ + } else if (stat & (1 << DMA_DONE_INTERRUPT)) { + net2272_cancel_dma(dev); + net2272_write(dev, IRQSTAT0, 1 << DMA_DONE_INTERRUPT); + stat &= ~(1 << DMA_DONE_INTERRUPT); + num = (net2272_read(dev, DMAREQ) & (1 << DMA_ENDPOINT_SELECT)) + ? 2 : 1; + + ep = &dev->ep[num]; + net2272_handle_dma(ep); + } + + next_endpoints: + /* endpoint data irq? */ + scratch = stat & 0x0f; + stat &= ~0x0f; + for (num = 0; scratch; num++) { + u8 t; + + /* does this endpoint's FIFO and queue need tending? */ + t = 1 << num; + if ((scratch & t) == 0) + continue; + scratch ^= t; + + ep = &dev->ep[num]; + net2272_handle_ep(ep); + } + + /* some interrupts we can just ignore */ + stat &= ~(1 << SOF_INTERRUPT); + + if (stat) + dev_dbg(dev->dev, "unhandled irqstat0 %02x\n", stat); +} + +static void +net2272_handle_stat1_irqs(struct net2272 *dev, u8 stat) +{ + u8 tmp, mask; + + /* after disconnect there's nothing else to do! */ + tmp = (1 << VBUS_INTERRUPT) | (1 << ROOT_PORT_RESET_INTERRUPT); + mask = (1 << USB_HIGH_SPEED) | (1 << USB_FULL_SPEED); + + if (stat & tmp) { + net2272_write(dev, IRQSTAT1, tmp); + if ((((stat & (1 << ROOT_PORT_RESET_INTERRUPT)) && + ((net2272_read(dev, USBCTL1) & mask) == 0)) + || ((net2272_read(dev, USBCTL1) & (1 << VBUS_PIN)) + == 0)) + && (dev->gadget.speed != USB_SPEED_UNKNOWN)) { + dev_dbg(dev->dev, "disconnect %s\n", + dev->driver->driver.name); + stop_activity(dev, dev->driver); + net2272_ep0_start(dev); + return; + } + stat &= ~tmp; + + if (!stat) + return; + } + + tmp = (1 << SUSPEND_REQUEST_CHANGE_INTERRUPT); + if (stat & tmp) { + net2272_write(dev, IRQSTAT1, tmp); + if (stat & (1 << SUSPEND_REQUEST_INTERRUPT)) { + if (dev->driver->suspend) + dev->driver->suspend(&dev->gadget); + if (!enable_suspend) { + stat &= ~(1 << SUSPEND_REQUEST_INTERRUPT); + dev_dbg(dev->dev, "Suspend disabled, ignoring\n"); + } + } else { + if (dev->driver->resume) + dev->driver->resume(&dev->gadget); + } + stat &= ~tmp; + } + + /* clear any other status/irqs */ + if (stat) + net2272_write(dev, IRQSTAT1, stat); + + /* some status we can just ignore */ + stat &= ~((1 << CONTROL_STATUS_INTERRUPT) + | (1 << SUSPEND_REQUEST_INTERRUPT) + | (1 << RESUME_INTERRUPT)); + if (!stat) + return; + else + dev_dbg(dev->dev, "unhandled irqstat1 %02x\n", stat); +} + +static irqreturn_t net2272_irq(int irq, void *_dev) +{ + struct net2272 *dev = _dev; +#if defined(PLX_PCI_RDK) || defined(PLX_PCI_RDK2) + u32 intcsr; +#endif +#if defined(PLX_PCI_RDK) + u8 dmareq; +#endif + spin_lock(&dev->lock); +#if defined(PLX_PCI_RDK) + intcsr = readl(dev->rdk1.plx9054_base_addr + INTCSR); + + if ((intcsr & LOCAL_INTERRUPT_TEST) == LOCAL_INTERRUPT_TEST) { + writel(intcsr & ~(1 << PCI_INTERRUPT_ENABLE), + dev->rdk1.plx9054_base_addr + INTCSR); + net2272_handle_stat1_irqs(dev, net2272_read(dev, IRQSTAT1)); + net2272_handle_stat0_irqs(dev, net2272_read(dev, IRQSTAT0)); + intcsr = readl(dev->rdk1.plx9054_base_addr + INTCSR); + writel(intcsr | (1 << PCI_INTERRUPT_ENABLE), + dev->rdk1.plx9054_base_addr + INTCSR); + } + if ((intcsr & DMA_CHANNEL_0_TEST) == DMA_CHANNEL_0_TEST) { + writeb((1 << CHANNEL_CLEAR_INTERRUPT | (0 << CHANNEL_ENABLE)), + dev->rdk1.plx9054_base_addr + DMACSR0); + + dmareq = net2272_read(dev, DMAREQ); + if (dmareq & 0x01) + net2272_handle_dma(&dev->ep[2]); + else + net2272_handle_dma(&dev->ep[1]); + } +#endif +#if defined(PLX_PCI_RDK2) + /* see if PCI int for us by checking irqstat */ + intcsr = readl(dev->rdk2.fpga_base_addr + RDK2_IRQSTAT); + if (!intcsr & (1 << NET2272_PCI_IRQ)) + return IRQ_NONE; + /* check dma interrupts */ +#endif + /* Platform/devcice interrupt handler */ +#if !defined(PLX_PCI_RDK) + net2272_handle_stat1_irqs(dev, net2272_read(dev, IRQSTAT1)); + net2272_handle_stat0_irqs(dev, net2272_read(dev, IRQSTAT0)); +#endif + spin_unlock(&dev->lock); + + return IRQ_HANDLED; +} + +static int net2272_present(struct net2272 *dev) +{ + /* + * Quick test to see if CPU can communicate properly with the NET2272. + * Verifies connection using writes and reads to write/read and + * read-only registers. + * + * This routine is strongly recommended especially during early bring-up + * of new hardware, however for designs that do not apply Power On System + * Tests (POST) it may discarded (or perhaps minimized). + */ + unsigned int ii; + u8 val, refval; + + /* Verify NET2272 write/read SCRATCH register can write and read */ + refval = net2272_read(dev, SCRATCH); + for (ii = 0; ii < 0x100; ii += 7) { + net2272_write(dev, SCRATCH, ii); + val = net2272_read(dev, SCRATCH); + if (val != ii) { + dev_dbg(dev->dev, + "%s: write/read SCRATCH register test failed: " + "wrote:0x%2.2x, read:0x%2.2x\n", + __func__, ii, val); + return -EINVAL; + } + } + /* To be nice, we write the original SCRATCH value back: */ + net2272_write(dev, SCRATCH, refval); + + /* Verify NET2272 CHIPREV register is read-only: */ + refval = net2272_read(dev, CHIPREV_2272); + for (ii = 0; ii < 0x100; ii += 7) { + net2272_write(dev, CHIPREV_2272, ii); + val = net2272_read(dev, CHIPREV_2272); + if (val != refval) { + dev_dbg(dev->dev, + "%s: write/read CHIPREV register test failed: " + "wrote 0x%2.2x, read:0x%2.2x expected:0x%2.2x\n", + __func__, ii, val, refval); + return -EINVAL; + } + } + + /* + * Verify NET2272's "NET2270 legacy revision" register + * - NET2272 has two revision registers. The NET2270 legacy revision + * register should read the same value, regardless of the NET2272 + * silicon revision. The legacy register applies to NET2270 + * firmware being applied to the NET2272. + */ + val = net2272_read(dev, CHIPREV_LEGACY); + if (val != NET2270_LEGACY_REV) { + /* + * Unexpected legacy revision value + * - Perhaps the chip is a NET2270? + */ + dev_dbg(dev->dev, + "%s: WARNING: UNEXPECTED NET2272 LEGACY REGISTER VALUE:\n" + " - CHIPREV_LEGACY: expected 0x%2.2x, got:0x%2.2x. (Not NET2272?)\n", + __func__, NET2270_LEGACY_REV, val); + return -EINVAL; + } + + /* + * Verify NET2272 silicon revision + * - This revision register is appropriate for the silicon version + * of the NET2272 + */ + val = net2272_read(dev, CHIPREV_2272); + switch (val) { + case CHIPREV_NET2272_R1: + /* + * NET2272 Rev 1 has DMA related errata: + * - Newer silicon (Rev 1A or better) required + */ + dev_dbg(dev->dev, + "%s: Rev 1 detected: newer silicon recommended for DMA support\n", + __func__); + break; + case CHIPREV_NET2272_R1A: + break; + default: + /* NET2272 silicon version *may* not work with this firmware */ + dev_dbg(dev->dev, + "%s: unexpected silicon revision register value: " + " CHIPREV_2272: 0x%2.2x\n", + __func__, val); + /* + * Return Success, even though the chip rev is not an expected value + * - Older, pre-built firmware can attempt to operate on newer silicon + * - Often, new silicon is perfectly compatible + */ + } + + /* Success: NET2272 checks out OK */ + return 0; +} + +static void +net2272_gadget_release(struct device *_dev) +{ + struct net2272 *dev = dev_get_drvdata(_dev); + kfree(dev); +} + +/*---------------------------------------------------------------------------*/ + +static void __devexit +net2272_remove(struct net2272 *dev) +{ + /* start with the driver above us */ + if (dev->driver) { + /* should have been done already by driver model core */ + dev_warn(dev->dev, "pci remove, driver '%s' is still registered\n", + dev->driver->driver.name); + usb_gadget_unregister_driver(dev->driver); + } + + free_irq(dev->irq, dev); + iounmap(dev->base_addr); + + device_unregister(&dev->gadget.dev); + device_remove_file(dev->dev, &dev_attr_registers); + + dev_info(dev->dev, "unbind\n"); + the_controller = NULL; +} + +static struct net2272 * __devinit +net2272_probe_init(struct device *dev, unsigned int irq) +{ + struct net2272 *ret; + + if (the_controller) { + dev_warn(dev, "ignoring\n"); + return ERR_PTR(-EBUSY); + } + + if (!irq) { + dev_dbg(dev, "No IRQ!\n"); + return ERR_PTR(-ENODEV); + } + + /* alloc, and start init */ + ret = kzalloc(sizeof(*ret), GFP_KERNEL); + if (!ret) + return ERR_PTR(-ENOMEM); + + spin_lock_init(&ret->lock); + ret->irq = irq; + ret->dev = dev; + ret->gadget.ops = &net2272_ops; + ret->gadget.is_dualspeed = 1; + + /* the "gadget" abstracts/virtualizes the controller */ + dev_set_name(&ret->gadget.dev, "gadget"); + ret->gadget.dev.parent = dev; + ret->gadget.dev.dma_mask = dev->dma_mask; + ret->gadget.dev.release = net2272_gadget_release; + ret->gadget.name = driver_name; + + return ret; +} + +static int __devinit +net2272_probe_fin(struct net2272 *dev, unsigned int irqflags) +{ + int ret; + + /* See if there... */ + if (net2272_present(dev)) { + dev_warn(dev->dev, "2272 not found!\n"); + ret = -ENODEV; + goto err; + } + + net2272_usb_reset(dev); + net2272_usb_reinit(dev); + + ret = request_irq(dev->irq, net2272_irq, irqflags, driver_name, dev); + if (ret) { + dev_err(dev->dev, "request interrupt %i failed\n", dev->irq); + goto err; + } + + dev->chiprev = net2272_read(dev, CHIPREV_2272); + + /* done */ + dev_info(dev->dev, "%s\n", driver_desc); + dev_info(dev->dev, "irq %i, mem %p, chip rev %04x, dma %s\n", + dev->irq, dev->base_addr, dev->chiprev, + dma_mode_string()); + dev_info(dev->dev, "version: %s\n", driver_vers); + + the_controller = dev; + + ret = device_register(&dev->gadget.dev); + if (ret) + goto err_irq; + ret = device_create_file(dev->dev, &dev_attr_registers); + if (ret) + goto err_dev_reg; + + return 0; + + err_dev_reg: + device_unregister(&dev->gadget.dev); + err_irq: + free_irq(dev->irq, dev); + err: + return ret; +} + +#ifdef CONFIG_PCI + +/* + * wrap this driver around the specified device, but + * don't respond over USB until a gadget driver binds to us + */ + +static int __devinit +net2272_rdk1_probe(struct pci_dev *pdev, struct net2272 *dev) +{ + unsigned long resource, len, tmp; + void __iomem *mem_mapped_addr[4]; + int ret, i; + + /* + * BAR 0 holds PLX 9054 config registers + * BAR 1 is i/o memory; unused here + * BAR 2 holds EPLD config registers + * BAR 3 holds NET2272 registers + */ + + /* Find and map all address spaces */ + for (i = 0; i < 4; ++i) { + if (i == 1) + continue; /* BAR1 unused */ + + resource = pci_resource_start(pdev, i); + len = pci_resource_len(pdev, i); + + if (!request_mem_region(resource, len, driver_name)) { + dev_dbg(dev->dev, "controller already in use\n"); + ret = -EBUSY; + goto err; + } + + mem_mapped_addr[i] = ioremap_nocache(resource, len); + if (mem_mapped_addr[i] == NULL) { + release_mem_region(resource, len); + dev_dbg(dev->dev, "can't map memory\n"); + ret = -EFAULT; + goto err; + } + } + + dev->rdk1.plx9054_base_addr = mem_mapped_addr[0]; + dev->rdk1.epld_base_addr = mem_mapped_addr[2]; + dev->base_addr = mem_mapped_addr[3]; + + /* Set PLX 9054 bus width (16 bits) */ + tmp = readl(dev->rdk1.plx9054_base_addr + LBRD1); + writel((tmp & ~(3 << MEMORY_SPACE_LOCAL_BUS_WIDTH)) | W16_BIT, + dev->rdk1.plx9054_base_addr + LBRD1); + + /* Enable PLX 9054 Interrupts */ + writel(readl(dev->rdk1.plx9054_base_addr + INTCSR) | + (1 << PCI_INTERRUPT_ENABLE) | + (1 << LOCAL_INTERRUPT_INPUT_ENABLE), + dev->rdk1.plx9054_base_addr + INTCSR); + + writeb((1 << CHANNEL_CLEAR_INTERRUPT | (0 << CHANNEL_ENABLE)), + dev->rdk1.plx9054_base_addr + DMACSR0); + + /* reset */ + writeb((1 << EPLD_DMA_ENABLE) | + (1 << DMA_CTL_DACK) | + (1 << DMA_TIMEOUT_ENABLE) | + (1 << USER) | + (0 << MPX_MODE) | + (1 << BUSWIDTH) | + (1 << NET2272_RESET), + dev->base_addr + EPLD_IO_CONTROL_REGISTER); + + mb(); + writeb(readb(dev->base_addr + EPLD_IO_CONTROL_REGISTER) & + ~(1 << NET2272_RESET), + dev->base_addr + EPLD_IO_CONTROL_REGISTER); + udelay(200); + + return 0; + + err: + while (--i >= 0) { + iounmap(mem_mapped_addr[i]); + release_mem_region(pci_resource_start(pdev, i), + pci_resource_len(pdev, i)); + } + + return ret; +} + +static int __devinit +net2272_rdk2_probe(struct pci_dev *pdev, struct net2272 *dev) +{ + unsigned long resource, len; + void __iomem *mem_mapped_addr[2]; + int ret, i; + + /* + * BAR 0 holds FGPA config registers + * BAR 1 holds NET2272 registers + */ + + /* Find and map all address spaces, bar2-3 unused in rdk 2 */ + for (i = 0; i < 2; ++i) { + resource = pci_resource_start(pdev, i); + len = pci_resource_len(pdev, i); + + if (!request_mem_region(resource, len, driver_name)) { + dev_dbg(dev->dev, "controller already in use\n"); + ret = -EBUSY; + goto err; + } + + mem_mapped_addr[i] = ioremap_nocache(resource, len); + if (mem_mapped_addr[i] == NULL) { + release_mem_region(resource, len); + dev_dbg(dev->dev, "can't map memory\n"); + ret = -EFAULT; + goto err; + } + } + + dev->rdk2.fpga_base_addr = mem_mapped_addr[0]; + dev->base_addr = mem_mapped_addr[1]; + + mb(); + /* Set 2272 bus width (16 bits) and reset */ + writel((1 << CHIP_RESET), dev->rdk2.fpga_base_addr + RDK2_LOCCTLRDK); + udelay(200); + writel((1 << BUS_WIDTH), dev->rdk2.fpga_base_addr + RDK2_LOCCTLRDK); + /* Print fpga version number */ + dev_info(dev->dev, "RDK2 FPGA version %08x\n", + readl(dev->rdk2.fpga_base_addr + RDK2_FPGAREV)); + /* Enable FPGA Interrupts */ + writel((1 << NET2272_PCI_IRQ), dev->rdk2.fpga_base_addr + RDK2_IRQENB); + + return 0; + + err: + while (--i >= 0) { + iounmap(mem_mapped_addr[i]); + release_mem_region(pci_resource_start(pdev, i), + pci_resource_len(pdev, i)); + } + + return ret; +} + +static int __devinit +net2272_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id) +{ + struct net2272 *dev; + int ret; + + dev = net2272_probe_init(&pdev->dev, pdev->irq); + if (IS_ERR(dev)) + return PTR_ERR(dev); + dev->dev_id = pdev->device; + + if (pci_enable_device(pdev) < 0) { + ret = -ENODEV; + goto err_free; + } + + pci_set_master(pdev); + + switch (pdev->device) { + case PCI_DEVICE_ID_RDK1: ret = net2272_rdk1_probe(pdev, dev); break; + case PCI_DEVICE_ID_RDK2: ret = net2272_rdk2_probe(pdev, dev); break; + default: BUG(); + } + if (ret) + goto err_pci; + + ret = net2272_probe_fin(dev, 0); + if (ret) + goto err_pci; + + pci_set_drvdata(pdev, dev); + + return 0; + + err_pci: + pci_disable_device(pdev); + err_free: + kfree(dev); + + return ret; +} + +static void __devexit +net2272_rdk1_remove(struct pci_dev *pdev, struct net2272 *dev) +{ + int i; + + /* disable PLX 9054 interrupts */ + writel(readl(dev->rdk1.plx9054_base_addr + INTCSR) & + ~(1 << PCI_INTERRUPT_ENABLE), + dev->rdk1.plx9054_base_addr + INTCSR); + + /* clean up resources allocated during probe() */ + iounmap(dev->rdk1.plx9054_base_addr); + iounmap(dev->rdk1.epld_base_addr); + + for (i = 0; i < 4; ++i) { + if (i == 1) + continue; /* BAR1 unused */ + release_mem_region(pci_resource_start(pdev, i), + pci_resource_len(pdev, i)); + } +} + +static void __devexit +net2272_rdk2_remove(struct pci_dev *pdev, struct net2272 *dev) +{ + int i; + + /* disable fpga interrupts + writel(readl(dev->rdk1.plx9054_base_addr + INTCSR) & + ~(1 << PCI_INTERRUPT_ENABLE), + dev->rdk1.plx9054_base_addr + INTCSR); + */ + + /* clean up resources allocated during probe() */ + iounmap(dev->rdk2.fpga_base_addr); + + for (i = 0; i < 2; ++i) + release_mem_region(pci_resource_start(pdev, i), + pci_resource_len(pdev, i)); +} + +static void __devexit +net2272_pci_remove(struct pci_dev *pdev) +{ + struct net2272 *dev = pci_get_drvdata(pdev); + + net2272_remove(dev); + + switch (pdev->device) { + case PCI_DEVICE_ID_RDK1: net2272_rdk1_remove(pdev, dev); break; + case PCI_DEVICE_ID_RDK2: net2272_rdk2_remove(pdev, dev); break; + default: BUG(); + } + + pci_disable_device(pdev); + + kfree(dev); +} + +/* Table of matching PCI IDs */ +static struct pci_device_id __devinitdata pci_ids[] = { + { /* RDK 1 card */ + .class = ((PCI_CLASS_BRIDGE_OTHER << 8) | 0xfe), + .class_mask = 0, + .vendor = PCI_VENDOR_ID_PLX, + .device = PCI_DEVICE_ID_RDK1, + .subvendor = PCI_ANY_ID, + .subdevice = PCI_ANY_ID, + }, + { /* RDK 2 card */ + .class = ((PCI_CLASS_BRIDGE_OTHER << 8) | 0xfe), + .class_mask = 0, + .vendor = PCI_VENDOR_ID_PLX, + .device = PCI_DEVICE_ID_RDK2, + .subvendor = PCI_ANY_ID, + .subdevice = PCI_ANY_ID, + }, + { } +}; +MODULE_DEVICE_TABLE(pci, pci_ids); + +static struct pci_driver net2272_pci_driver = { + .name = driver_name, + .id_table = pci_ids, + + .probe = net2272_pci_probe, + .remove = __devexit_p(net2272_pci_remove), +}; + +#else +# define pci_register_driver(x) 1 +# define pci_unregister_driver(x) 1 +#endif + +/*---------------------------------------------------------------------------*/ + +static int __devinit +net2272_plat_probe(struct platform_device *pdev) +{ + struct net2272 *dev; + int ret; + unsigned int irqflags; + resource_size_t base, len; + struct resource *iomem, *iomem_bus, *irq_res; + + irq_res = platform_get_resource(pdev, IORESOURCE_IRQ, 0); + iomem = platform_get_resource(pdev, IORESOURCE_MEM, 0); + iomem_bus = platform_get_resource(pdev, IORESOURCE_BUS, 0); + if (!irq_res || !iomem) { + dev_err(&pdev->dev, "must provide irq/base addr"); + return -EINVAL; + } + + dev = net2272_probe_init(&pdev->dev, irq_res->start); + if (IS_ERR(dev)) + return PTR_ERR(dev); + + irqflags = 0; + if (irq_res->flags & IORESOURCE_IRQ_HIGHEDGE) + irqflags |= IRQF_TRIGGER_RISING; + if (irq_res->flags & IORESOURCE_IRQ_LOWEDGE) + irqflags |= IRQF_TRIGGER_FALLING; + if (irq_res->flags & IORESOURCE_IRQ_HIGHLEVEL) + irqflags |= IRQF_TRIGGER_HIGH; + if (irq_res->flags & IORESOURCE_IRQ_LOWLEVEL) + irqflags |= IRQF_TRIGGER_LOW; + + base = iomem->start; + len = resource_size(iomem); + if (iomem_bus) + dev->base_shift = iomem_bus->start; + + if (!request_mem_region(base, len, driver_name)) { + dev_dbg(dev->dev, "get request memory region!\n"); + ret = -EBUSY; + goto err; + } + dev->base_addr = ioremap_nocache(base, len); + if (!dev->base_addr) { + dev_dbg(dev->dev, "can't map memory\n"); + ret = -EFAULT; + goto err_req; + } + + ret = net2272_probe_fin(dev, IRQF_TRIGGER_LOW); + if (ret) + goto err_io; + + platform_set_drvdata(pdev, dev); + dev_info(&pdev->dev, "running in 16-bit, %sbyte swap local bus mode\n", + (net2272_read(dev, LOCCTL) & (1 << BYTE_SWAP)) ? "" : "no "); + + the_controller = dev; + + return 0; + + err_io: + iounmap(dev->base_addr); + err_req: + release_mem_region(base, len); + err: + return ret; +} + +static int __devexit +net2272_plat_remove(struct platform_device *pdev) +{ + struct net2272 *dev = platform_get_drvdata(pdev); + + net2272_remove(dev); + + release_mem_region(pdev->resource[0].start, + resource_size(&pdev->resource[0])); + + kfree(dev); + + return 0; +} + +static struct platform_driver net2272_plat_driver = { + .probe = net2272_plat_probe, + .remove = __devexit_p(net2272_plat_remove), + .driver = { + .name = driver_name, + .owner = THIS_MODULE, + }, + /* FIXME .suspend, .resume */ +}; + +static int __init net2272_init(void) +{ + return pci_register_driver(&net2272_pci_driver) & + platform_driver_register(&net2272_plat_driver); +} +module_init(net2272_init); + +static void __exit net2272_cleanup(void) +{ + pci_unregister_driver(&net2272_pci_driver); + platform_driver_unregister(&net2272_plat_driver); +} +module_exit(net2272_cleanup); + +MODULE_DESCRIPTION(DRIVER_DESC); +MODULE_AUTHOR("PLX Technology, Inc."); +MODULE_LICENSE("GPL"); diff --git a/drivers/usb/gadget/net2272.h b/drivers/usb/gadget/net2272.h new file mode 100644 index 000000000000..e59505789359 --- /dev/null +++ b/drivers/usb/gadget/net2272.h @@ -0,0 +1,601 @@ +/* + * PLX NET2272 high/full speed USB device controller + * + * Copyright (C) 2005-2006 PLX Technology, Inc. + * Copyright (C) 2006-2011 Analog Devices, Inc. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + */ + +#ifndef __NET2272_H__ +#define __NET2272_H__ + +/* Main Registers */ +#define REGADDRPTR 0x00 +#define REGDATA 0x01 +#define IRQSTAT0 0x02 +#define ENDPOINT_0_INTERRUPT 0 +#define ENDPOINT_A_INTERRUPT 1 +#define ENDPOINT_B_INTERRUPT 2 +#define ENDPOINT_C_INTERRUPT 3 +#define VIRTUALIZED_ENDPOINT_INTERRUPT 4 +#define SETUP_PACKET_INTERRUPT 5 +#define DMA_DONE_INTERRUPT 6 +#define SOF_INTERRUPT 7 +#define IRQSTAT1 0x03 +#define CONTROL_STATUS_INTERRUPT 1 +#define VBUS_INTERRUPT 2 +#define SUSPEND_REQUEST_INTERRUPT 3 +#define SUSPEND_REQUEST_CHANGE_INTERRUPT 4 +#define RESUME_INTERRUPT 5 +#define ROOT_PORT_RESET_INTERRUPT 6 +#define RESET_STATUS 7 +#define PAGESEL 0x04 +#define DMAREQ 0x1c +#define DMA_ENDPOINT_SELECT 0 +#define DREQ_POLARITY 1 +#define DACK_POLARITY 2 +#define EOT_POLARITY 3 +#define DMA_CONTROL_DACK 4 +#define DMA_REQUEST_ENABLE 5 +#define DMA_REQUEST 6 +#define DMA_BUFFER_VALID 7 +#define SCRATCH 0x1d +#define IRQENB0 0x20 +#define ENDPOINT_0_INTERRUPT_ENABLE 0 +#define ENDPOINT_A_INTERRUPT_ENABLE 1 +#define ENDPOINT_B_INTERRUPT_ENABLE 2 +#define ENDPOINT_C_INTERRUPT_ENABLE 3 +#define VIRTUALIZED_ENDPOINT_INTERRUPT_ENABLE 4 +#define SETUP_PACKET_INTERRUPT_ENABLE 5 +#define DMA_DONE_INTERRUPT_ENABLE 6 +#define SOF_INTERRUPT_ENABLE 7 +#define IRQENB1 0x21 +#define VBUS_INTERRUPT_ENABLE 2 +#define SUSPEND_REQUEST_INTERRUPT_ENABLE 3 +#define SUSPEND_REQUEST_CHANGE_INTERRUPT_ENABLE 4 +#define RESUME_INTERRUPT_ENABLE 5 +#define ROOT_PORT_RESET_INTERRUPT_ENABLE 6 +#define LOCCTL 0x22 +#define DATA_WIDTH 0 +#define LOCAL_CLOCK_OUTPUT 1 +#define LOCAL_CLOCK_OUTPUT_OFF 0 +#define LOCAL_CLOCK_OUTPUT_3_75MHZ 1 +#define LOCAL_CLOCK_OUTPUT_7_5MHZ 2 +#define LOCAL_CLOCK_OUTPUT_15MHZ 3 +#define LOCAL_CLOCK_OUTPUT_30MHZ 4 +#define LOCAL_CLOCK_OUTPUT_60MHZ 5 +#define DMA_SPLIT_BUS_MODE 4 +#define BYTE_SWAP 5 +#define BUFFER_CONFIGURATION 6 +#define BUFFER_CONFIGURATION_EPA512_EPB512 0 +#define BUFFER_CONFIGURATION_EPA1024_EPB512 1 +#define BUFFER_CONFIGURATION_EPA1024_EPB1024 2 +#define BUFFER_CONFIGURATION_EPA1024DB 3 +#define CHIPREV_LEGACY 0x23 +#define NET2270_LEGACY_REV 0x40 +#define LOCCTL1 0x24 +#define DMA_MODE 0 +#define SLOW_DREQ 0 +#define FAST_DREQ 1 +#define BURST_MODE 2 +#define DMA_DACK_ENABLE 2 +#define CHIPREV_2272 0x25 +#define CHIPREV_NET2272_R1 0x10 +#define CHIPREV_NET2272_R1A 0x11 +/* USB Registers */ +#define USBCTL0 0x18 +#define IO_WAKEUP_ENABLE 1 +#define USB_DETECT_ENABLE 3 +#define USB_ROOT_PORT_WAKEUP_ENABLE 5 +#define USBCTL1 0x19 +#define VBUS_PIN 0 +#define USB_FULL_SPEED 1 +#define USB_HIGH_SPEED 2 +#define GENERATE_RESUME 3 +#define VIRTUAL_ENDPOINT_ENABLE 4 +#define FRAME0 0x1a +#define FRAME1 0x1b +#define OURADDR 0x30 +#define FORCE_IMMEDIATE 7 +#define USBDIAG 0x31 +#define FORCE_TRANSMIT_CRC_ERROR 0 +#define PREVENT_TRANSMIT_BIT_STUFF 1 +#define FORCE_RECEIVE_ERROR 2 +#define FAST_TIMES 4 +#define USBTEST 0x32 +#define TEST_MODE_SELECT 0 +#define NORMAL_OPERATION 0 +#define TEST_J 1 +#define TEST_K 2 +#define TEST_SE0_NAK 3 +#define TEST_PACKET 4 +#define TEST_FORCE_ENABLE 5 +#define XCVRDIAG 0x33 +#define FORCE_FULL_SPEED 2 +#define FORCE_HIGH_SPEED 3 +#define OPMODE 4 +#define NORMAL_OPERATION 0 +#define NON_DRIVING 1 +#define DISABLE_BITSTUFF_AND_NRZI_ENCODE 2 +#define LINESTATE 6 +#define SE0_STATE 0 +#define J_STATE 1 +#define K_STATE 2 +#define SE1_STATE 3 +#define VIRTOUT0 0x34 +#define VIRTOUT1 0x35 +#define VIRTIN0 0x36 +#define VIRTIN1 0x37 +#define SETUP0 0x40 +#define SETUP1 0x41 +#define SETUP2 0x42 +#define SETUP3 0x43 +#define SETUP4 0x44 +#define SETUP5 0x45 +#define SETUP6 0x46 +#define SETUP7 0x47 +/* Endpoint Registers (Paged via PAGESEL) */ +#define EP_DATA 0x05 +#define EP_STAT0 0x06 +#define DATA_IN_TOKEN_INTERRUPT 0 +#define DATA_OUT_TOKEN_INTERRUPT 1 +#define DATA_PACKET_TRANSMITTED_INTERRUPT 2 +#define DATA_PACKET_RECEIVED_INTERRUPT 3 +#define SHORT_PACKET_TRANSFERRED_INTERRUPT 4 +#define NAK_OUT_PACKETS 5 +#define BUFFER_EMPTY 6 +#define BUFFER_FULL 7 +#define EP_STAT1 0x07 +#define TIMEOUT 0 +#define USB_OUT_ACK_SENT 1 +#define USB_OUT_NAK_SENT 2 +#define USB_IN_ACK_RCVD 3 +#define USB_IN_NAK_SENT 4 +#define USB_STALL_SENT 5 +#define LOCAL_OUT_ZLP 6 +#define BUFFER_FLUSH 7 +#define EP_TRANSFER0 0x08 +#define EP_TRANSFER1 0x09 +#define EP_TRANSFER2 0x0a +#define EP_IRQENB 0x0b +#define DATA_IN_TOKEN_INTERRUPT_ENABLE 0 +#define DATA_OUT_TOKEN_INTERRUPT_ENABLE 1 +#define DATA_PACKET_TRANSMITTED_INTERRUPT_ENABLE 2 +#define DATA_PACKET_RECEIVED_INTERRUPT_ENABLE 3 +#define SHORT_PACKET_TRANSFERRED_INTERRUPT_ENABLE 4 +#define EP_AVAIL0 0x0c +#define EP_AVAIL1 0x0d +#define EP_RSPCLR 0x0e +#define EP_RSPSET 0x0f +#define ENDPOINT_HALT 0 +#define ENDPOINT_TOGGLE 1 +#define NAK_OUT_PACKETS_MODE 2 +#define CONTROL_STATUS_PHASE_HANDSHAKE 3 +#define INTERRUPT_MODE 4 +#define AUTOVALIDATE 5 +#define HIDE_STATUS_PHASE 6 +#define ALT_NAK_OUT_PACKETS 7 +#define EP_MAXPKT0 0x28 +#define EP_MAXPKT1 0x29 +#define ADDITIONAL_TRANSACTION_OPPORTUNITIES 3 +#define NONE_ADDITIONAL_TRANSACTION 0 +#define ONE_ADDITIONAL_TRANSACTION 1 +#define TWO_ADDITIONAL_TRANSACTION 2 +#define EP_CFG 0x2a +#define ENDPOINT_NUMBER 0 +#define ENDPOINT_DIRECTION 4 +#define ENDPOINT_TYPE 5 +#define ENDPOINT_ENABLE 7 +#define EP_HBW 0x2b +#define HIGH_BANDWIDTH_OUT_TRANSACTION_PID 0 +#define DATA0_PID 0 +#define DATA1_PID 1 +#define DATA2_PID 2 +#define MDATA_PID 3 +#define EP_BUFF_STATES 0x2c +#define BUFFER_A_STATE 0 +#define BUFFER_B_STATE 2 +#define BUFF_FREE 0 +#define BUFF_VALID 1 +#define BUFF_LCL 2 +#define BUFF_USB 3 + +/*---------------------------------------------------------------------------*/ + +#define PCI_DEVICE_ID_RDK1 0x9054 + +/* PCI-RDK EPLD Registers */ +#define RDK_EPLD_IO_REGISTER1 0x00000000 +#define RDK_EPLD_USB_RESET 0 +#define RDK_EPLD_USB_POWERDOWN 1 +#define RDK_EPLD_USB_WAKEUP 2 +#define RDK_EPLD_USB_EOT 3 +#define RDK_EPLD_DPPULL 4 +#define RDK_EPLD_IO_REGISTER2 0x00000004 +#define RDK_EPLD_BUSWIDTH 0 +#define RDK_EPLD_USER 2 +#define RDK_EPLD_RESET_INTERRUPT_ENABLE 3 +#define RDK_EPLD_DMA_TIMEOUT_ENABLE 4 +#define RDK_EPLD_STATUS_REGISTER 0x00000008 +#define RDK_EPLD_USB_LRESET 0 +#define RDK_EPLD_REVISION_REGISTER 0x0000000c + +/* PCI-RDK PLX 9054 Registers */ +#define INTCSR 0x68 +#define PCI_INTERRUPT_ENABLE 8 +#define LOCAL_INTERRUPT_INPUT_ENABLE 11 +#define LOCAL_INPUT_INTERRUPT_ACTIVE 15 +#define LOCAL_DMA_CHANNEL_0_INTERRUPT_ENABLE 18 +#define LOCAL_DMA_CHANNEL_1_INTERRUPT_ENABLE 19 +#define DMA_CHANNEL_0_INTERRUPT_ACTIVE 21 +#define DMA_CHANNEL_1_INTERRUPT_ACTIVE 22 +#define CNTRL 0x6C +#define RELOAD_CONFIGURATION_REGISTERS 29 +#define PCI_ADAPTER_SOFTWARE_RESET 30 +#define DMAMODE0 0x80 +#define LOCAL_BUS_WIDTH 0 +#define INTERNAL_WAIT_STATES 2 +#define TA_READY_INPUT_ENABLE 6 +#define LOCAL_BURST_ENABLE 8 +#define SCATTER_GATHER_MODE 9 +#define DONE_INTERRUPT_ENABLE 10 +#define LOCAL_ADDRESSING_MODE 11 +#define DEMAND_MODE 12 +#define DMA_EOT_ENABLE 14 +#define FAST_SLOW_TERMINATE_MODE_SELECT 15 +#define DMA_CHANNEL_INTERRUPT_SELECT 17 +#define DMAPADR0 0x84 +#define DMALADR0 0x88 +#define DMASIZ0 0x8c +#define DMADPR0 0x90 +#define DESCRIPTOR_LOCATION 0 +#define END_OF_CHAIN 1 +#define INTERRUPT_AFTER_TERMINAL_COUNT 2 +#define DIRECTION_OF_TRANSFER 3 +#define DMACSR0 0xa8 +#define CHANNEL_ENABLE 0 +#define CHANNEL_START 1 +#define CHANNEL_ABORT 2 +#define CHANNEL_CLEAR_INTERRUPT 3 +#define CHANNEL_DONE 4 +#define DMATHR 0xb0 +#define LBRD1 0xf8 +#define MEMORY_SPACE_LOCAL_BUS_WIDTH 0 +#define W8_BIT 0 +#define W16_BIT 1 + +/* Special OR'ing of INTCSR bits */ +#define LOCAL_INTERRUPT_TEST \ + ((1 << LOCAL_INPUT_INTERRUPT_ACTIVE) | \ + (1 << LOCAL_INTERRUPT_INPUT_ENABLE)) + +#define DMA_CHANNEL_0_TEST \ + ((1 << DMA_CHANNEL_0_INTERRUPT_ACTIVE) | \ + (1 << LOCAL_DMA_CHANNEL_0_INTERRUPT_ENABLE)) + +#define DMA_CHANNEL_1_TEST \ + ((1 << DMA_CHANNEL_1_INTERRUPT_ACTIVE) | \ + (1 << LOCAL_DMA_CHANNEL_1_INTERRUPT_ENABLE)) + +/* EPLD Registers */ +#define RDK_EPLD_IO_REGISTER1 0x00000000 +#define RDK_EPLD_USB_RESET 0 +#define RDK_EPLD_USB_POWERDOWN 1 +#define RDK_EPLD_USB_WAKEUP 2 +#define RDK_EPLD_USB_EOT 3 +#define RDK_EPLD_DPPULL 4 +#define RDK_EPLD_IO_REGISTER2 0x00000004 +#define RDK_EPLD_BUSWIDTH 0 +#define RDK_EPLD_USER 2 +#define RDK_EPLD_RESET_INTERRUPT_ENABLE 3 +#define RDK_EPLD_DMA_TIMEOUT_ENABLE 4 +#define RDK_EPLD_STATUS_REGISTER 0x00000008 +#define RDK_EPLD_USB_LRESET 0 +#define RDK_EPLD_REVISION_REGISTER 0x0000000c + +#define EPLD_IO_CONTROL_REGISTER 0x400 +#define NET2272_RESET 0 +#define BUSWIDTH 1 +#define MPX_MODE 3 +#define USER 4 +#define DMA_TIMEOUT_ENABLE 5 +#define DMA_CTL_DACK 6 +#define EPLD_DMA_ENABLE 7 +#define EPLD_DMA_CONTROL_REGISTER 0x800 +#define SPLIT_DMA_MODE 0 +#define SPLIT_DMA_DIRECTION 1 +#define SPLIT_DMA_ENABLE 2 +#define SPLIT_DMA_INTERRUPT_ENABLE 3 +#define SPLIT_DMA_INTERRUPT 4 +#define EPLD_DMA_MODE 5 +#define EPLD_DMA_CONTROLLER_ENABLE 7 +#define SPLIT_DMA_ADDRESS_LOW 0xc00 +#define SPLIT_DMA_ADDRESS_HIGH 0x1000 +#define SPLIT_DMA_BYTE_COUNT_LOW 0x1400 +#define SPLIT_DMA_BYTE_COUNT_HIGH 0x1800 +#define EPLD_REVISION_REGISTER 0x1c00 +#define SPLIT_DMA_RAM 0x4000 +#define DMA_RAM_SIZE 0x1000 + +/*---------------------------------------------------------------------------*/ + +#define PCI_DEVICE_ID_RDK2 0x3272 + +/* PCI-RDK version 2 registers */ + +/* Main Control Registers */ + +#define RDK2_IRQENB 0x00 +#define RDK2_IRQSTAT 0x04 +#define PB7 23 +#define PB6 22 +#define PB5 21 +#define PB4 20 +#define PB3 19 +#define PB2 18 +#define PB1 17 +#define PB0 16 +#define GP3 23 +#define GP2 23 +#define GP1 23 +#define GP0 23 +#define DMA_RETRY_ABORT 6 +#define DMA_PAUSE_DONE 5 +#define DMA_ABORT_DONE 4 +#define DMA_OUT_FIFO_TRANSFER_DONE 3 +#define DMA_LOCAL_DONE 2 +#define DMA_PCI_DONE 1 +#define NET2272_PCI_IRQ 0 + +#define RDK2_LOCCTLRDK 0x08 +#define CHIP_RESET 3 +#define SPLIT_DMA 2 +#define MULTIPLEX_MODE 1 +#define BUS_WIDTH 0 + +#define RDK2_GPIOCTL 0x10 +#define GP3_OUT_ENABLE 7 +#define GP2_OUT_ENABLE 6 +#define GP1_OUT_ENABLE 5 +#define GP0_OUT_ENABLE 4 +#define GP3_DATA 3 +#define GP2_DATA 2 +#define GP1_DATA 1 +#define GP0_DATA 0 + +#define RDK2_LEDSW 0x14 +#define LED3 27 +#define LED2 26 +#define LED1 25 +#define LED0 24 +#define PBUTTON 16 +#define DIPSW 0 + +#define RDK2_DIAG 0x18 +#define RDK2_FAST_TIMES 2 +#define FORCE_PCI_SERR 1 +#define FORCE_PCI_INT 0 +#define RDK2_FPGAREV 0x1C + +/* Dma Control registers */ +#define RDK2_DMACTL 0x80 +#define ADDR_HOLD 24 +#define RETRY_COUNT 16 /* 23:16 */ +#define FIFO_THRESHOLD 11 /* 15:11 */ +#define MEM_WRITE_INVALIDATE 10 +#define READ_MULTIPLE 9 +#define READ_LINE 8 +#define RDK2_DMA_MODE 6 /* 7:6 */ +#define CONTROL_DACK 5 +#define EOT_ENABLE 4 +#define EOT_POLARITY 3 +#define DACK_POLARITY 2 +#define DREQ_POLARITY 1 +#define DMA_ENABLE 0 + +#define RDK2_DMASTAT 0x84 +#define GATHER_COUNT 12 /* 14:12 */ +#define FIFO_COUNT 6 /* 11:6 */ +#define FIFO_FLUSH 5 +#define FIFO_TRANSFER 4 +#define PAUSE_DONE 3 +#define ABORT_DONE 2 +#define DMA_ABORT 1 +#define DMA_START 0 + +#define RDK2_DMAPCICOUNT 0x88 +#define DMA_DIRECTION 31 +#define DMA_PCI_BYTE_COUNT 0 /* 0:23 */ + +#define RDK2_DMALOCCOUNT 0x8C /* 0:23 dma local byte count */ + +#define RDK2_DMAADDR 0x90 /* 2:31 PCI bus starting address */ + +/*---------------------------------------------------------------------------*/ + +#define REG_INDEXED_THRESHOLD (1 << 5) + +/* DRIVER DATA STRUCTURES and UTILITIES */ +struct net2272_ep { + struct usb_ep ep; + struct net2272 *dev; + unsigned long irqs; + + /* analogous to a host-side qh */ + struct list_head queue; + const struct usb_endpoint_descriptor *desc; + unsigned num:8, + fifo_size:12, + stopped:1, + wedged:1, + is_in:1, + is_iso:1, + dma:1, + not_empty:1; +}; + +struct net2272 { + /* each device provides one gadget, several endpoints */ + struct usb_gadget gadget; + struct device *dev; + unsigned short dev_id; + + spinlock_t lock; + struct net2272_ep ep[4]; + struct usb_gadget_driver *driver; + unsigned protocol_stall:1, + softconnect:1, + is_selfpowered:1, + wakeup:1, + dma_eot_polarity:1, + dma_dack_polarity:1, + dma_dreq_polarity:1, + dma_busy:1; + u16 chiprev; + u8 pagesel; + + unsigned int irq; + unsigned short fifo_mode; + + unsigned int base_shift; + u16 __iomem *base_addr; + union { +#ifdef CONFIG_PCI + struct { + void __iomem *plx9054_base_addr; + void __iomem *epld_base_addr; + } rdk1; + struct { + /* Bar0, Bar1 is base_addr both mem-mapped */ + void __iomem *fpga_base_addr; + } rdk2; +#endif + }; +}; + +static void __iomem * +net2272_reg_addr(struct net2272 *dev, unsigned int reg) +{ + return dev->base_addr + (reg << dev->base_shift); +} + +static void +net2272_write(struct net2272 *dev, unsigned int reg, u8 value) +{ + if (reg >= REG_INDEXED_THRESHOLD) { + /* + * Indexed register; use REGADDRPTR/REGDATA + * - Save and restore REGADDRPTR. This prevents REGADDRPTR from + * changes between other code sections, but it is time consuming. + * - Performance tips: either do not save and restore REGADDRPTR (if it + * is safe) or do save/restore operations only in critical sections. + u8 tmp = readb(dev->base_addr + REGADDRPTR); + */ + writeb((u8)reg, net2272_reg_addr(dev, REGADDRPTR)); + writeb(value, net2272_reg_addr(dev, REGDATA)); + /* writeb(tmp, net2272_reg_addr(dev, REGADDRPTR)); */ + } else + writeb(value, net2272_reg_addr(dev, reg)); +} + +static u8 +net2272_read(struct net2272 *dev, unsigned int reg) +{ + u8 ret; + + if (reg >= REG_INDEXED_THRESHOLD) { + /* + * Indexed register; use REGADDRPTR/REGDATA + * - Save and restore REGADDRPTR. This prevents REGADDRPTR from + * changes between other code sections, but it is time consuming. + * - Performance tips: either do not save and restore REGADDRPTR (if it + * is safe) or do save/restore operations only in critical sections. + u8 tmp = readb(dev->base_addr + REGADDRPTR); + */ + writeb((u8)reg, net2272_reg_addr(dev, REGADDRPTR)); + ret = readb(net2272_reg_addr(dev, REGDATA)); + /* writeb(tmp, net2272_reg_addr(dev, REGADDRPTR)); */ + } else + ret = readb(net2272_reg_addr(dev, reg)); + + return ret; +} + +static void +net2272_ep_write(struct net2272_ep *ep, unsigned int reg, u8 value) +{ + struct net2272 *dev = ep->dev; + + if (dev->pagesel != ep->num) { + net2272_write(dev, PAGESEL, ep->num); + dev->pagesel = ep->num; + } + net2272_write(dev, reg, value); +} + +static u8 +net2272_ep_read(struct net2272_ep *ep, unsigned int reg) +{ + struct net2272 *dev = ep->dev; + + if (dev->pagesel != ep->num) { + net2272_write(dev, PAGESEL, ep->num); + dev->pagesel = ep->num; + } + return net2272_read(dev, reg); +} + +static void allow_status(struct net2272_ep *ep) +{ + /* ep0 only */ + net2272_ep_write(ep, EP_RSPCLR, + (1 << CONTROL_STATUS_PHASE_HANDSHAKE) | + (1 << ALT_NAK_OUT_PACKETS) | + (1 << NAK_OUT_PACKETS_MODE)); + ep->stopped = 1; +} + +static void set_halt(struct net2272_ep *ep) +{ + /* ep0 and bulk/intr endpoints */ + net2272_ep_write(ep, EP_RSPCLR, 1 << CONTROL_STATUS_PHASE_HANDSHAKE); + net2272_ep_write(ep, EP_RSPSET, 1 << ENDPOINT_HALT); +} + +static void clear_halt(struct net2272_ep *ep) +{ + /* ep0 and bulk/intr endpoints */ + net2272_ep_write(ep, EP_RSPCLR, + (1 << ENDPOINT_HALT) | (1 << ENDPOINT_TOGGLE)); +} + +/* count (<= 4) bytes in the next fifo write will be valid */ +static void set_fifo_bytecount(struct net2272_ep *ep, unsigned count) +{ + /* net2272_ep_write will truncate to u8 for us */ + net2272_ep_write(ep, EP_TRANSFER2, count >> 16); + net2272_ep_write(ep, EP_TRANSFER1, count >> 8); + net2272_ep_write(ep, EP_TRANSFER0, count); +} + +struct net2272_request { + struct usb_request req; + struct list_head queue; + unsigned mapped:1, + valid:1; +}; + +#endif diff --git a/drivers/usb/host/ehci-hcd.c b/drivers/usb/host/ehci-hcd.c index b435ed67dd5c..e18862c5b059 100644 --- a/drivers/usb/host/ehci-hcd.c +++ b/drivers/usb/host/ehci-hcd.c @@ -110,7 +110,7 @@ MODULE_PARM_DESC (ignore_oc, "ignore bogus hardware overcurrent indications"); /* for link power management(LPM) feature */ static unsigned int hird; module_param(hird, int, S_IRUGO); -MODULE_PARM_DESC(hird, "host initiated resume duration, +1 for each 75us\n"); +MODULE_PARM_DESC(hird, "host initiated resume duration, +1 for each 75us"); #define INTR_MASK (STS_IAA | STS_FATAL | STS_PCD | STS_ERR | STS_INT) diff --git a/drivers/usb/host/ehci-s5p.c b/drivers/usb/host/ehci-s5p.c index e3374c8f7b3f..b3958b3d3163 100644 --- a/drivers/usb/host/ehci-s5p.c +++ b/drivers/usb/host/ehci-s5p.c @@ -189,6 +189,100 @@ static void s5p_ehci_shutdown(struct platform_device *pdev) hcd->driver->shutdown(hcd); } +#ifdef CONFIG_PM +static int s5p_ehci_suspend(struct device *dev) +{ + struct s5p_ehci_hcd *s5p_ehci = dev_get_drvdata(dev); + struct usb_hcd *hcd = s5p_ehci->hcd; + struct ehci_hcd *ehci = hcd_to_ehci(hcd); + struct platform_device *pdev = to_platform_device(dev); + struct s5p_ehci_platdata *pdata = pdev->dev.platform_data; + unsigned long flags; + int rc = 0; + + if (time_before(jiffies, ehci->next_statechange)) + msleep(20); + + /* + * Root hub was already suspended. Disable irq emission and + * mark HW unaccessible. The PM and USB cores make sure that + * the root hub is either suspended or stopped. + */ + ehci_prepare_ports_for_controller_suspend(ehci, device_may_wakeup(dev)); + spin_lock_irqsave(&ehci->lock, flags); + ehci_writel(ehci, 0, &ehci->regs->intr_enable); + (void)ehci_readl(ehci, &ehci->regs->intr_enable); + + clear_bit(HCD_FLAG_HW_ACCESSIBLE, &hcd->flags); + spin_unlock_irqrestore(&ehci->lock, flags); + + if (pdata && pdata->phy_exit) + pdata->phy_exit(pdev, S5P_USB_PHY_HOST); + + return rc; +} + +static int s5p_ehci_resume(struct device *dev) +{ + struct s5p_ehci_hcd *s5p_ehci = dev_get_drvdata(dev); + struct usb_hcd *hcd = s5p_ehci->hcd; + struct ehci_hcd *ehci = hcd_to_ehci(hcd); + struct platform_device *pdev = to_platform_device(dev); + struct s5p_ehci_platdata *pdata = pdev->dev.platform_data; + + if (pdata && pdata->phy_init) + pdata->phy_init(pdev, S5P_USB_PHY_HOST); + + if (time_before(jiffies, ehci->next_statechange)) + msleep(100); + + /* Mark hardware accessible again as we are out of D3 state by now */ + set_bit(HCD_FLAG_HW_ACCESSIBLE, &hcd->flags); + + if (ehci_readl(ehci, &ehci->regs->configured_flag) == FLAG_CF) { + int mask = INTR_MASK; + + ehci_prepare_ports_for_controller_resume(ehci); + if (!hcd->self.root_hub->do_remote_wakeup) + mask &= ~STS_PCD; + ehci_writel(ehci, mask, &ehci->regs->intr_enable); + ehci_readl(ehci, &ehci->regs->intr_enable); + return 0; + } + + usb_root_hub_lost_power(hcd->self.root_hub); + + (void) ehci_halt(ehci); + (void) ehci_reset(ehci); + + /* emptying the schedule aborts any urbs */ + spin_lock_irq(&ehci->lock); + if (ehci->reclaim) + end_unlink_async(ehci); + ehci_work(ehci); + spin_unlock_irq(&ehci->lock); + + ehci_writel(ehci, ehci->command, &ehci->regs->command); + ehci_writel(ehci, FLAG_CF, &ehci->regs->configured_flag); + ehci_readl(ehci, &ehci->regs->command); /* unblock posted writes */ + + /* here we "know" root ports should always stay powered */ + ehci_port_power(ehci, 1); + + hcd->state = HC_STATE_SUSPENDED; + + return 0; +} +#else +#define s5p_ehci_suspend NULL +#define s5p_ehci_resume NULL +#endif + +static const struct dev_pm_ops s5p_ehci_pm_ops = { + .suspend = s5p_ehci_suspend, + .resume = s5p_ehci_resume, +}; + static struct platform_driver s5p_ehci_driver = { .probe = s5p_ehci_probe, .remove = __devexit_p(s5p_ehci_remove), @@ -196,6 +290,7 @@ static struct platform_driver s5p_ehci_driver = { .driver = { .name = "s5p-ehci", .owner = THIS_MODULE, + .pm = &s5p_ehci_pm_ops, } }; diff --git a/drivers/usb/host/xhci-dbg.c b/drivers/usb/host/xhci-dbg.c index 1f50b4468e87..e9b0f043455d 100644 --- a/drivers/usb/host/xhci-dbg.c +++ b/drivers/usb/host/xhci-dbg.c @@ -266,11 +266,11 @@ void xhci_debug_trb(struct xhci_hcd *xhci, union xhci_trb *trb) xhci_dbg(xhci, "Interrupter target = 0x%x\n", GET_INTR_TARGET(le32_to_cpu(trb->link.intr_target))); xhci_dbg(xhci, "Cycle bit = %u\n", - (unsigned int) (le32_to_cpu(trb->link.control) & TRB_CYCLE)); + le32_to_cpu(trb->link.control) & TRB_CYCLE); xhci_dbg(xhci, "Toggle cycle bit = %u\n", - (unsigned int) (le32_to_cpu(trb->link.control) & LINK_TOGGLE)); + le32_to_cpu(trb->link.control) & LINK_TOGGLE); xhci_dbg(xhci, "No Snoop bit = %u\n", - (unsigned int) (le32_to_cpu(trb->link.control) & TRB_NO_SNOOP)); + le32_to_cpu(trb->link.control) & TRB_NO_SNOOP); break; case TRB_TYPE(TRB_TRANSFER): address = le64_to_cpu(trb->trans_event.buffer); @@ -284,9 +284,9 @@ void xhci_debug_trb(struct xhci_hcd *xhci, union xhci_trb *trb) address = le64_to_cpu(trb->event_cmd.cmd_trb); xhci_dbg(xhci, "Command TRB pointer = %llu\n", address); xhci_dbg(xhci, "Completion status = %u\n", - (unsigned int) GET_COMP_CODE(le32_to_cpu(trb->event_cmd.status))); + GET_COMP_CODE(le32_to_cpu(trb->event_cmd.status))); xhci_dbg(xhci, "Flags = 0x%x\n", - (unsigned int) le32_to_cpu(trb->event_cmd.flags)); + le32_to_cpu(trb->event_cmd.flags)); break; default: xhci_dbg(xhci, "Unknown TRB with TRB type ID %u\n", @@ -318,10 +318,10 @@ void xhci_debug_segment(struct xhci_hcd *xhci, struct xhci_segment *seg) for (i = 0; i < TRBS_PER_SEGMENT; ++i) { trb = &seg->trbs[i]; xhci_dbg(xhci, "@%016llx %08x %08x %08x %08x\n", addr, - (u32)lower_32_bits(le64_to_cpu(trb->link.segment_ptr)), - (u32)upper_32_bits(le64_to_cpu(trb->link.segment_ptr)), - (unsigned int) le32_to_cpu(trb->link.intr_target), - (unsigned int) le32_to_cpu(trb->link.control)); + lower_32_bits(le64_to_cpu(trb->link.segment_ptr)), + upper_32_bits(le64_to_cpu(trb->link.segment_ptr)), + le32_to_cpu(trb->link.intr_target), + le32_to_cpu(trb->link.control)); addr += sizeof(*trb); } } @@ -402,8 +402,8 @@ void xhci_dbg_erst(struct xhci_hcd *xhci, struct xhci_erst *erst) addr, lower_32_bits(le64_to_cpu(entry->seg_addr)), upper_32_bits(le64_to_cpu(entry->seg_addr)), - (unsigned int) le32_to_cpu(entry->seg_size), - (unsigned int) le32_to_cpu(entry->rsvd)); + le32_to_cpu(entry->seg_size), + le32_to_cpu(entry->rsvd)); addr += sizeof(*entry); } } diff --git a/drivers/usb/host/xhci-mem.c b/drivers/usb/host/xhci-mem.c index 0f8e1d29a858..1370db808fc6 100644 --- a/drivers/usb/host/xhci-mem.c +++ b/drivers/usb/host/xhci-mem.c @@ -89,8 +89,8 @@ static void xhci_link_segments(struct xhci_hcd *xhci, struct xhci_segment *prev, return; prev->next = next; if (link_trbs) { - prev->trbs[TRBS_PER_SEGMENT-1].link. - segment_ptr = cpu_to_le64(next->dma); + prev->trbs[TRBS_PER_SEGMENT-1].link.segment_ptr = + cpu_to_le64(next->dma); /* Set the last TRB in the segment to have a TRB type ID of Link TRB */ val = le32_to_cpu(prev->trbs[TRBS_PER_SEGMENT-1].link.control); @@ -187,8 +187,8 @@ static struct xhci_ring *xhci_ring_alloc(struct xhci_hcd *xhci, if (link_trbs) { /* See section 4.9.2.1 and 6.4.4.1 */ - prev->trbs[TRBS_PER_SEGMENT-1].link. - control |= cpu_to_le32(LINK_TOGGLE); + prev->trbs[TRBS_PER_SEGMENT-1].link.control |= + cpu_to_le32(LINK_TOGGLE); xhci_dbg(xhci, "Wrote link toggle flag to" " segment %p (virtual), 0x%llx (DMA)\n", prev, (unsigned long long)prev->dma); @@ -549,8 +549,8 @@ struct xhci_stream_info *xhci_alloc_stream_info(struct xhci_hcd *xhci, addr = cur_ring->first_seg->dma | SCT_FOR_CTX(SCT_PRI_TR) | cur_ring->cycle_state; - stream_info->stream_ctx_array[cur_stream]. - stream_ring = cpu_to_le64(addr); + stream_info->stream_ctx_array[cur_stream].stream_ring = + cpu_to_le64(addr); xhci_dbg(xhci, "Setting stream %d ring ptr to 0x%08llx\n", cur_stream, (unsigned long long) addr); @@ -786,7 +786,7 @@ int xhci_alloc_virt_device(struct xhci_hcd *xhci, int slot_id, xhci_dbg(xhci, "Set slot id %d dcbaa entry %p to 0x%llx\n", slot_id, &xhci->dcbaa->dev_context_ptrs[slot_id], - (unsigned long long) le64_to_cpu(xhci->dcbaa->dev_context_ptrs[slot_id])); + le64_to_cpu(xhci->dcbaa->dev_context_ptrs[slot_id])); return 1; fail: @@ -890,19 +890,19 @@ int xhci_setup_addressable_virt_dev(struct xhci_hcd *xhci, struct usb_device *ud ctrl_ctx->add_flags = cpu_to_le32(SLOT_FLAG | EP0_FLAG); /* 3) Only the control endpoint is valid - one endpoint context */ - slot_ctx->dev_info |= cpu_to_le32(LAST_CTX(1) | (u32) udev->route); + slot_ctx->dev_info |= cpu_to_le32(LAST_CTX(1) | udev->route); switch (udev->speed) { case USB_SPEED_SUPER: - slot_ctx->dev_info |= cpu_to_le32((u32) SLOT_SPEED_SS); + slot_ctx->dev_info |= cpu_to_le32(SLOT_SPEED_SS); break; case USB_SPEED_HIGH: - slot_ctx->dev_info |= cpu_to_le32((u32) SLOT_SPEED_HS); + slot_ctx->dev_info |= cpu_to_le32(SLOT_SPEED_HS); break; case USB_SPEED_FULL: - slot_ctx->dev_info |= cpu_to_le32((u32) SLOT_SPEED_FS); + slot_ctx->dev_info |= cpu_to_le32(SLOT_SPEED_FS); break; case USB_SPEED_LOW: - slot_ctx->dev_info |= cpu_to_le32((u32) SLOT_SPEED_LS); + slot_ctx->dev_info |= cpu_to_le32(SLOT_SPEED_LS); break; case USB_SPEED_WIRELESS: xhci_dbg(xhci, "FIXME xHCI doesn't support wireless speeds\n"); @@ -916,7 +916,7 @@ int xhci_setup_addressable_virt_dev(struct xhci_hcd *xhci, struct usb_device *ud port_num = xhci_find_real_port_number(xhci, udev); if (!port_num) return -EINVAL; - slot_ctx->dev_info2 |= cpu_to_le32((u32) ROOT_HUB_PORT(port_num)); + slot_ctx->dev_info2 |= cpu_to_le32(ROOT_HUB_PORT(port_num)); /* Set the port number in the virtual_device to the faked port number */ for (top_dev = udev; top_dev->parent && top_dev->parent->parent; top_dev = top_dev->parent) diff --git a/drivers/usb/host/xhci-ring.c b/drivers/usb/host/xhci-ring.c index 800f417c7309..e5530181baa3 100644 --- a/drivers/usb/host/xhci-ring.c +++ b/drivers/usb/host/xhci-ring.c @@ -113,15 +113,13 @@ static int last_trb(struct xhci_hcd *xhci, struct xhci_ring *ring, if (ring == xhci->event_ring) return trb == &seg->trbs[TRBS_PER_SEGMENT]; else - return (le32_to_cpu(trb->link.control) & TRB_TYPE_BITMASK) - == TRB_TYPE(TRB_LINK); + return TRB_TYPE_LINK_LE32(trb->link.control); } static int enqueue_is_link_trb(struct xhci_ring *ring) { struct xhci_link_trb *link = &ring->enqueue->link; - return ((le32_to_cpu(link->control) & TRB_TYPE_BITMASK) == - TRB_TYPE(TRB_LINK)); + return TRB_TYPE_LINK_LE32(link->control); } /* Updates trb to point to the next TRB in the ring, and updates seg if the next @@ -372,7 +370,7 @@ static struct xhci_segment *find_trb_seg( while (cur_seg->trbs > trb || &cur_seg->trbs[TRBS_PER_SEGMENT - 1] < trb) { generic_trb = &cur_seg->trbs[TRBS_PER_SEGMENT - 1].generic; - if (le32_to_cpu(generic_trb->field[3]) & LINK_TOGGLE) + if (generic_trb->field[3] & cpu_to_le32(LINK_TOGGLE)) *cycle_state ^= 0x1; cur_seg = cur_seg->next; if (cur_seg == start_seg) @@ -489,8 +487,8 @@ void xhci_find_new_dequeue_state(struct xhci_hcd *xhci, } trb = &state->new_deq_ptr->generic; - if ((le32_to_cpu(trb->field[3]) & TRB_TYPE_BITMASK) == - TRB_TYPE(TRB_LINK) && (le32_to_cpu(trb->field[3]) & LINK_TOGGLE)) + if (TRB_TYPE_LINK_LE32(trb->field[3]) && + (trb->field[3] & cpu_to_le32(LINK_TOGGLE))) state->new_cycle_state ^= 0x1; next_trb(xhci, ep_ring, &state->new_deq_seg, &state->new_deq_ptr); @@ -525,8 +523,7 @@ static void td_to_noop(struct xhci_hcd *xhci, struct xhci_ring *ep_ring, for (cur_seg = cur_td->start_seg, cur_trb = cur_td->first_trb; true; next_trb(xhci, ep_ring, &cur_seg, &cur_trb)) { - if ((le32_to_cpu(cur_trb->generic.field[3]) & TRB_TYPE_BITMASK) - == TRB_TYPE(TRB_LINK)) { + if (TRB_TYPE_LINK_LE32(cur_trb->generic.field[3])) { /* Unchain any chained Link TRBs, but * leave the pointers intact. */ @@ -1000,7 +997,7 @@ static void handle_reset_ep_completion(struct xhci_hcd *xhci, * but we don't care. */ xhci_dbg(xhci, "Ignoring reset ep completion code of %u\n", - (unsigned int) GET_COMP_CODE(le32_to_cpu(event->status))); + GET_COMP_CODE(le32_to_cpu(event->status))); /* HW with the reset endpoint quirk needs to have a configure endpoint * command complete before the endpoint can be used. Queue that here @@ -1458,7 +1455,8 @@ static int xhci_requires_manual_halt_cleanup(struct xhci_hcd *xhci, * endpoint anyway. Check if a babble halted the * endpoint. */ - if ((le32_to_cpu(ep_ctx->ep_info) & EP_STATE_MASK) == EP_STATE_HALTED) + if ((ep_ctx->ep_info & cpu_to_le32(EP_STATE_MASK)) == + cpu_to_le32(EP_STATE_HALTED)) return 1; return 0; @@ -1752,10 +1750,8 @@ static int process_isoc_td(struct xhci_hcd *xhci, struct xhci_td *td, for (cur_trb = ep_ring->dequeue, cur_seg = ep_ring->deq_seg; cur_trb != event_trb; next_trb(xhci, ep_ring, &cur_seg, &cur_trb)) { - if ((le32_to_cpu(cur_trb->generic.field[3]) & - TRB_TYPE_BITMASK) != TRB_TYPE(TRB_TR_NOOP) && - (le32_to_cpu(cur_trb->generic.field[3]) & - TRB_TYPE_BITMASK) != TRB_TYPE(TRB_LINK)) + if (!TRB_TYPE_NOOP_LE32(cur_trb->generic.field[3]) && + !TRB_TYPE_LINK_LE32(cur_trb->generic.field[3])) len += TRB_LEN(le32_to_cpu(cur_trb->generic.field[2])); } len += TRB_LEN(le32_to_cpu(cur_trb->generic.field[2])) - @@ -1888,10 +1884,8 @@ static int process_bulk_intr_td(struct xhci_hcd *xhci, struct xhci_td *td, for (cur_trb = ep_ring->dequeue, cur_seg = ep_ring->deq_seg; cur_trb != event_trb; next_trb(xhci, ep_ring, &cur_seg, &cur_trb)) { - if ((le32_to_cpu(cur_trb->generic.field[3]) & - TRB_TYPE_BITMASK) != TRB_TYPE(TRB_TR_NOOP) && - (le32_to_cpu(cur_trb->generic.field[3]) & - TRB_TYPE_BITMASK) != TRB_TYPE(TRB_LINK)) + if (!TRB_TYPE_NOOP_LE32(cur_trb->generic.field[3]) && + !TRB_TYPE_LINK_LE32(cur_trb->generic.field[3])) td->urb->actual_length += TRB_LEN(le32_to_cpu(cur_trb->generic.field[2])); } @@ -2046,8 +2040,8 @@ static int handle_tx_event(struct xhci_hcd *xhci, TRB_TO_SLOT_ID(le32_to_cpu(event->flags)), ep_index); xhci_dbg(xhci, "Event TRB with TRB type ID %u\n", - (unsigned int) (le32_to_cpu(event->flags) - & TRB_TYPE_BITMASK)>>10); + (le32_to_cpu(event->flags) & + TRB_TYPE_BITMASK)>>10); xhci_print_trb_offsets(xhci, (union xhci_trb *) event); if (ep->skip) { ep->skip = false; @@ -2104,9 +2098,7 @@ static int handle_tx_event(struct xhci_hcd *xhci, * corresponding TD has been cancelled. Just ignore * the TD. */ - if ((le32_to_cpu(event_trb->generic.field[3]) - & TRB_TYPE_BITMASK) - == TRB_TYPE(TRB_TR_NOOP)) { + if (TRB_TYPE_NOOP_LE32(event_trb->generic.field[3])) { xhci_dbg(xhci, "event_trb is a no-op TRB. Skip it\n"); goto cleanup; @@ -2432,7 +2424,7 @@ static int prepare_ring(struct xhci_hcd *xhci, struct xhci_ring *ep_ring, next->link.control |= cpu_to_le32(TRB_CHAIN); wmb(); - next->link.control ^= cpu_to_le32((u32) TRB_CYCLE); + next->link.control ^= cpu_to_le32(TRB_CYCLE); /* Toggle the cycle bit after the last ring segment. */ if (last_trb_on_last_seg(xhci, ring, ring->enq_seg, next)) { diff --git a/drivers/usb/host/xhci.c b/drivers/usb/host/xhci.c index 06e7023258d0..d0a65401670b 100644 --- a/drivers/usb/host/xhci.c +++ b/drivers/usb/host/xhci.c @@ -1340,8 +1340,8 @@ int xhci_drop_endpoint(struct usb_hcd *hcd, struct usb_device *udev, /* If the HC already knows the endpoint is disabled, * or the HCD has noted it is disabled, ignore this request */ - if ((le32_to_cpu(ep_ctx->ep_info) & EP_STATE_MASK) == - EP_STATE_DISABLED || + if (((ep_ctx->ep_info & cpu_to_le32(EP_STATE_MASK)) == + cpu_to_le32(EP_STATE_DISABLED)) || le32_to_cpu(ctrl_ctx->drop_flags) & xhci_get_endpoint_flag(&ep->desc)) { xhci_warn(xhci, "xHCI %s called with disabled ep %p\n", @@ -1732,8 +1732,7 @@ static int xhci_configure_endpoint(struct xhci_hcd *xhci, /* Enqueue pointer can be left pointing to the link TRB, * we must handle that */ - if ((le32_to_cpu(command->command_trb->link.control) - & TRB_TYPE_BITMASK) == TRB_TYPE(TRB_LINK)) + if (TRB_TYPE_LINK_LE32(command->command_trb->link.control)) command->command_trb = xhci->cmd_ring->enq_seg->next->trbs; @@ -2533,8 +2532,7 @@ int xhci_discover_or_reset_device(struct usb_hcd *hcd, struct usb_device *udev) /* Enqueue pointer can be left pointing to the link TRB, * we must handle that */ - if ((le32_to_cpu(reset_device_cmd->command_trb->link.control) - & TRB_TYPE_BITMASK) == TRB_TYPE(TRB_LINK)) + if (TRB_TYPE_LINK_LE32(reset_device_cmd->command_trb->link.control)) reset_device_cmd->command_trb = xhci->cmd_ring->enq_seg->next->trbs; diff --git a/drivers/usb/host/xhci.h b/drivers/usb/host/xhci.h index 7d1ea3bf5e1f..a2cc76741695 100644 --- a/drivers/usb/host/xhci.h +++ b/drivers/usb/host/xhci.h @@ -1070,6 +1070,13 @@ union xhci_trb { /* Get NEC firmware revision. */ #define TRB_NEC_GET_FW 49 +#define TRB_TYPE_LINK(x) (((x) & TRB_TYPE_BITMASK) == TRB_TYPE(TRB_LINK)) +/* Above, but for __le32 types -- can avoid work by swapping constants: */ +#define TRB_TYPE_LINK_LE32(x) (((x) & cpu_to_le32(TRB_TYPE_BITMASK)) == \ + cpu_to_le32(TRB_TYPE(TRB_LINK))) +#define TRB_TYPE_NOOP_LE32(x) (((x) & cpu_to_le32(TRB_TYPE_BITMASK)) == \ + cpu_to_le32(TRB_TYPE(TRB_TR_NOOP))) + #define NEC_FW_MINOR(p) (((p) >> 0) & 0xff) #define NEC_FW_MAJOR(p) (((p) >> 8) & 0xff) diff --git a/drivers/usb/renesas_usbhs/Makefile b/drivers/usb/renesas_usbhs/Makefile index b8798ad16278..ce08345fa15a 100644 --- a/drivers/usb/renesas_usbhs/Makefile +++ b/drivers/usb/renesas_usbhs/Makefile @@ -4,6 +4,6 @@ obj-$(CONFIG_USB_RENESAS_USBHS) += renesas_usbhs.o -renesas_usbhs-y := common.o mod.o pipe.o +renesas_usbhs-y := common.o mod.o pipe.o fifo.o renesas_usbhs-$(CONFIG_USB_RENESAS_USBHS_UDC) += mod_gadget.o diff --git a/drivers/usb/renesas_usbhs/common.c b/drivers/usb/renesas_usbhs/common.c index f3664d6af661..665259aec871 100644 --- a/drivers/usb/renesas_usbhs/common.c +++ b/drivers/usb/renesas_usbhs/common.c @@ -304,6 +304,8 @@ static int __devinit usbhs_probe(struct platform_device *pdev) priv->dparam->pipe_type = usbhsc_default_pipe_type; priv->dparam->pipe_size = ARRAY_SIZE(usbhsc_default_pipe_type); } + if (!priv->dparam->pio_dma_border) + priv->dparam->pio_dma_border = 64; /* 64byte */ /* FIXME */ /* runtime power control ? */ @@ -323,10 +325,14 @@ static int __devinit usbhs_probe(struct platform_device *pdev) if (ret < 0) goto probe_end_iounmap; - ret = usbhs_mod_probe(priv); + ret = usbhs_fifo_probe(priv); if (ret < 0) goto probe_end_pipe_exit; + ret = usbhs_mod_probe(priv); + if (ret < 0) + goto probe_end_fifo_exit; + /* dev_set_drvdata should be called after usbhs_mod_init */ dev_set_drvdata(&pdev->dev, priv); @@ -374,6 +380,8 @@ probe_end_call_remove: usbhs_platform_call(priv, hardware_exit, pdev); probe_end_mod_exit: usbhs_mod_remove(priv); +probe_end_fifo_exit: + usbhs_fifo_remove(priv); probe_end_pipe_exit: usbhs_pipe_remove(priv); probe_end_iounmap: @@ -404,6 +412,7 @@ static int __devexit usbhs_remove(struct platform_device *pdev) usbhs_platform_call(priv, hardware_exit, pdev); usbhs_mod_remove(priv); + usbhs_fifo_remove(priv); usbhs_pipe_remove(priv); iounmap(priv->base); kfree(priv); diff --git a/drivers/usb/renesas_usbhs/common.h b/drivers/usb/renesas_usbhs/common.h index 0aadcb402764..b410463a1212 100644 --- a/drivers/usb/renesas_usbhs/common.h +++ b/drivers/usb/renesas_usbhs/common.h @@ -36,6 +36,12 @@ struct usbhs_priv; #define CFIFO 0x0014 #define CFIFOSEL 0x0020 #define CFIFOCTR 0x0022 +#define D0FIFO 0x0100 +#define D0FIFOSEL 0x0028 +#define D0FIFOCTR 0x002A +#define D1FIFO 0x0120 +#define D1FIFOSEL 0x002C +#define D1FIFOCTR 0x002E #define INTENB0 0x0030 #define INTENB1 0x0032 #define BRDYENB 0x0036 @@ -60,6 +66,30 @@ struct usbhs_priv; #define PIPEMAXP 0x006C #define PIPEPERI 0x006E #define PIPEnCTR 0x0070 +#define PIPE1TRE 0x0090 +#define PIPE1TRN 0x0092 +#define PIPE2TRE 0x0094 +#define PIPE2TRN 0x0096 +#define PIPE3TRE 0x0098 +#define PIPE3TRN 0x009A +#define PIPE4TRE 0x009C +#define PIPE4TRN 0x009E +#define PIPE5TRE 0x00A0 +#define PIPE5TRN 0x00A2 +#define PIPEBTRE 0x00A4 +#define PIPEBTRN 0x00A6 +#define PIPECTRE 0x00A8 +#define PIPECTRN 0x00AA +#define PIPEDTRE 0x00AC +#define PIPEDTRN 0x00AE +#define PIPEETRE 0x00B0 +#define PIPEETRN 0x00B2 +#define PIPEFTRE 0x00B4 +#define PIPEFTRN 0x00B6 +#define PIPE9TRE 0x00B8 +#define PIPE9TRN 0x00BA +#define PIPEATRE 0x00BC +#define PIPEATRN 0x00BE /* SYSCFG */ #define SCKE (1 << 10) /* USB Module Clock Enable */ @@ -78,6 +108,7 @@ struct usbhs_priv; #define RHST_HIGH_SPEED 3 /* High-speed connection */ /* CFIFOSEL */ +#define DREQE (1 << 12) /* DMA Transfer Request Enable */ #define MBW_32 (0x2 << 10) /* CFIFO Port Access Bit Width */ /* CFIFOCTR */ @@ -164,6 +195,10 @@ struct usbhs_priv; #define CCPL (1 << 2) /* Control Transfer End Enable */ +/* PIPEnTRE */ +#define TRENB (1 << 9) /* Transaction Counter Enable */ +#define TRCLR (1 << 8) /* Transaction Counter Clear */ + /* FRMNUM */ #define FRNM_MASK (0x7FF) @@ -194,6 +229,11 @@ struct usbhs_priv { * pipe control */ struct usbhs_pipe_info pipe_info; + + /* + * fifo control + */ + struct usbhs_fifo_info fifo_info; }; /* @@ -204,6 +244,10 @@ void usbhs_write(struct usbhs_priv *priv, u32 reg, u16 data); void usbhs_bset(struct usbhs_priv *priv, u32 reg, u16 mask, u16 data); int usbhsc_drvcllbck_notify_hotplug(struct platform_device *pdev); + +#define usbhs_lock(p, f) spin_lock_irqsave(usbhs_priv_to_lock(p), f) +#define usbhs_unlock(p, f) spin_unlock_irqrestore(usbhs_priv_to_lock(p), f) + /* * sysconfig */ diff --git a/drivers/usb/renesas_usbhs/fifo.c b/drivers/usb/renesas_usbhs/fifo.c new file mode 100644 index 000000000000..2016a2448ccb --- /dev/null +++ b/drivers/usb/renesas_usbhs/fifo.c @@ -0,0 +1,994 @@ +/* + * Renesas USB driver + * + * Copyright (C) 2011 Renesas Solutions Corp. + * Kuninori Morimoto <kuninori.morimoto.gx@renesas.com> + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA + * + */ +#include <linux/delay.h> +#include <linux/io.h> +#include "./common.h" +#include "./pipe.h" + +#define usbhsf_get_cfifo(p) (&((p)->fifo_info.cfifo)) +#define usbhsf_get_d0fifo(p) (&((p)->fifo_info.d0fifo)) +#define usbhsf_get_d1fifo(p) (&((p)->fifo_info.d1fifo)) + +#define usbhsf_fifo_is_busy(f) ((f)->pipe) /* see usbhs_pipe_select_fifo */ + +/* + * packet info function + */ +static int usbhsf_null_handle(struct usbhs_pkt *pkt, int *is_done) +{ + struct usbhs_priv *priv = usbhs_pipe_to_priv(pkt->pipe); + struct device *dev = usbhs_priv_to_dev(priv); + + dev_err(dev, "null handler\n"); + + return -EINVAL; +} + +static struct usbhs_pkt_handle usbhsf_null_handler = { + .prepare = usbhsf_null_handle, + .try_run = usbhsf_null_handle, +}; + +void usbhs_pkt_init(struct usbhs_pkt *pkt) +{ + pkt->dma = DMA_ADDR_INVALID; + INIT_LIST_HEAD(&pkt->node); +} + +void usbhs_pkt_push(struct usbhs_pipe *pipe, struct usbhs_pkt *pkt, + struct usbhs_pkt_handle *handler, + void *buf, int len, int zero) +{ + struct usbhs_priv *priv = usbhs_pipe_to_priv(pipe); + struct device *dev = usbhs_priv_to_dev(priv); + unsigned long flags; + + /******************** spin lock ********************/ + usbhs_lock(priv, flags); + + if (!handler) { + dev_err(dev, "no handler function\n"); + handler = &usbhsf_null_handler; + } + + list_del_init(&pkt->node); + list_add_tail(&pkt->node, &pipe->list); + + pkt->pipe = pipe; + pkt->buf = buf; + pkt->handler = handler; + pkt->length = len; + pkt->zero = zero; + pkt->actual = 0; + + usbhs_unlock(priv, flags); + /******************** spin unlock ******************/ + + usbhs_pkt_start(pipe); +} + +static void __usbhsf_pkt_del(struct usbhs_pkt *pkt) +{ + list_del_init(&pkt->node); +} + +static struct usbhs_pkt *__usbhsf_pkt_get(struct usbhs_pipe *pipe) +{ + if (list_empty(&pipe->list)) + return NULL; + + return list_entry(pipe->list.next, struct usbhs_pkt, node); +} + +struct usbhs_pkt *usbhs_pkt_pop(struct usbhs_pipe *pipe, struct usbhs_pkt *pkt) +{ + struct usbhs_priv *priv = usbhs_pipe_to_priv(pipe); + unsigned long flags; + + /******************** spin lock ********************/ + usbhs_lock(priv, flags); + + if (!pkt) + pkt = __usbhsf_pkt_get(pipe); + + if (pkt) + __usbhsf_pkt_del(pkt); + + usbhs_unlock(priv, flags); + /******************** spin unlock ******************/ + + return pkt; +} + +int __usbhs_pkt_handler(struct usbhs_pipe *pipe, int type) +{ + struct usbhs_priv *priv = usbhs_pipe_to_priv(pipe); + struct usbhs_pipe_info *info = usbhs_priv_to_pipeinfo(priv); + struct usbhs_pkt *pkt; + struct device *dev = usbhs_priv_to_dev(priv); + int (*func)(struct usbhs_pkt *pkt, int *is_done); + unsigned long flags; + int ret = 0; + int is_done = 0; + + /******************** spin lock ********************/ + usbhs_lock(priv, flags); + + pkt = __usbhsf_pkt_get(pipe); + if (!pkt) + goto __usbhs_pkt_handler_end; + + switch (type) { + case USBHSF_PKT_PREPARE: + func = pkt->handler->prepare; + break; + case USBHSF_PKT_TRY_RUN: + func = pkt->handler->try_run; + break; + case USBHSF_PKT_DMA_DONE: + func = pkt->handler->dma_done; + break; + default: + dev_err(dev, "unknown pkt hander\n"); + goto __usbhs_pkt_handler_end; + } + + ret = func(pkt, &is_done); + + if (is_done) + __usbhsf_pkt_del(pkt); + +__usbhs_pkt_handler_end: + usbhs_unlock(priv, flags); + /******************** spin unlock ******************/ + + if (is_done) { + info->done(pkt); + usbhs_pkt_start(pipe); + } + + return ret; +} + +/* + * irq enable/disable function + */ +#define usbhsf_irq_empty_ctrl(p, e) usbhsf_irq_callback_ctrl(p, bempsts, e) +#define usbhsf_irq_ready_ctrl(p, e) usbhsf_irq_callback_ctrl(p, brdysts, e) +#define usbhsf_irq_callback_ctrl(pipe, status, enable) \ + ({ \ + struct usbhs_priv *priv = usbhs_pipe_to_priv(pipe); \ + struct usbhs_mod *mod = usbhs_mod_get_current(priv); \ + u16 status = (1 << usbhs_pipe_number(pipe)); \ + if (!mod) \ + return; \ + if (enable) \ + mod->irq_##status |= status; \ + else \ + mod->irq_##status &= ~status; \ + usbhs_irq_callback_update(priv, mod); \ + }) + +static void usbhsf_tx_irq_ctrl(struct usbhs_pipe *pipe, int enable) +{ + /* + * And DCP pipe can NOT use "ready interrupt" for "send" + * it should use "empty" interrupt. + * see + * "Operation" - "Interrupt Function" - "BRDY Interrupt" + * + * on the other hand, normal pipe can use "ready interrupt" for "send" + * even though it is single/double buffer + */ + if (usbhs_pipe_is_dcp(pipe)) + usbhsf_irq_empty_ctrl(pipe, enable); + else + usbhsf_irq_ready_ctrl(pipe, enable); +} + +static void usbhsf_rx_irq_ctrl(struct usbhs_pipe *pipe, int enable) +{ + usbhsf_irq_ready_ctrl(pipe, enable); +} + +/* + * FIFO ctrl + */ +static void usbhsf_send_terminator(struct usbhs_pipe *pipe, + struct usbhs_fifo *fifo) +{ + struct usbhs_priv *priv = usbhs_pipe_to_priv(pipe); + + usbhs_bset(priv, fifo->ctr, BVAL, BVAL); +} + +static int usbhsf_fifo_barrier(struct usbhs_priv *priv, + struct usbhs_fifo *fifo) +{ + int timeout = 1024; + + do { + /* The FIFO port is accessible */ + if (usbhs_read(priv, fifo->ctr) & FRDY) + return 0; + + udelay(10); + } while (timeout--); + + return -EBUSY; +} + +static void usbhsf_fifo_clear(struct usbhs_pipe *pipe, + struct usbhs_fifo *fifo) +{ + struct usbhs_priv *priv = usbhs_pipe_to_priv(pipe); + + if (!usbhs_pipe_is_dcp(pipe)) + usbhsf_fifo_barrier(priv, fifo); + + usbhs_write(priv, fifo->ctr, BCLR); +} + +static int usbhsf_fifo_rcv_len(struct usbhs_priv *priv, + struct usbhs_fifo *fifo) +{ + return usbhs_read(priv, fifo->ctr) & DTLN_MASK; +} + +static void usbhsf_fifo_unselect(struct usbhs_pipe *pipe, + struct usbhs_fifo *fifo) +{ + struct usbhs_priv *priv = usbhs_pipe_to_priv(pipe); + + usbhs_pipe_select_fifo(pipe, NULL); + usbhs_write(priv, fifo->sel, 0); +} + +static int usbhsf_fifo_select(struct usbhs_pipe *pipe, + struct usbhs_fifo *fifo, + int write) +{ + struct usbhs_priv *priv = usbhs_pipe_to_priv(pipe); + struct device *dev = usbhs_priv_to_dev(priv); + int timeout = 1024; + u16 mask = ((1 << 5) | 0xF); /* mask of ISEL | CURPIPE */ + u16 base = usbhs_pipe_number(pipe); /* CURPIPE */ + + if (usbhs_pipe_is_busy(pipe) || + usbhsf_fifo_is_busy(fifo)) + return -EBUSY; + + if (usbhs_pipe_is_dcp(pipe)) + base |= (1 == write) << 5; /* ISEL */ + + /* "base" will be used below */ + usbhs_write(priv, fifo->sel, base | MBW_32); + + /* check ISEL and CURPIPE value */ + while (timeout--) { + if (base == (mask & usbhs_read(priv, fifo->sel))) { + usbhs_pipe_select_fifo(pipe, fifo); + return 0; + } + udelay(10); + } + + dev_err(dev, "fifo select error\n"); + + return -EIO; +} + +/* + * PIO fifo functions + */ +static int usbhsf_pio_try_push(struct usbhs_pkt *pkt, int *is_done) +{ + struct usbhs_pipe *pipe = pkt->pipe; + struct usbhs_priv *priv = usbhs_pipe_to_priv(pipe); + struct device *dev = usbhs_priv_to_dev(priv); + struct usbhs_fifo *fifo = usbhsf_get_cfifo(priv); /* CFIFO */ + void __iomem *addr = priv->base + fifo->port; + u8 *buf; + int maxp = usbhs_pipe_get_maxpacket(pipe); + int total_len; + int i, ret, len; + int is_short; + + ret = usbhsf_fifo_select(pipe, fifo, 1); + if (ret < 0) + return 0; + + ret = usbhs_pipe_is_accessible(pipe); + if (ret < 0) + goto usbhs_fifo_write_busy; + + ret = usbhsf_fifo_barrier(priv, fifo); + if (ret < 0) + goto usbhs_fifo_write_busy; + + buf = pkt->buf + pkt->actual; + len = pkt->length - pkt->actual; + len = min(len, maxp); + total_len = len; + is_short = total_len < maxp; + + /* + * FIXME + * + * 32-bit access only + */ + if (len >= 4 && !((unsigned long)buf & 0x03)) { + iowrite32_rep(addr, buf, len / 4); + len %= 4; + buf += total_len - len; + } + + /* the rest operation */ + for (i = 0; i < len; i++) + iowrite8(buf[i], addr + (0x03 - (i & 0x03))); + + /* + * variable update + */ + pkt->actual += total_len; + + if (pkt->actual < pkt->length) + *is_done = 0; /* there are remainder data */ + else if (is_short) + *is_done = 1; /* short packet */ + else + *is_done = !pkt->zero; /* send zero packet ? */ + + /* + * pipe/irq handling + */ + if (is_short) + usbhsf_send_terminator(pipe, fifo); + + usbhsf_tx_irq_ctrl(pipe, !*is_done); + usbhs_pipe_enable(pipe); + + dev_dbg(dev, " send %d (%d/ %d/ %d/ %d)\n", + usbhs_pipe_number(pipe), + pkt->length, pkt->actual, *is_done, pkt->zero); + + /* + * Transmission end + */ + if (*is_done) { + if (usbhs_pipe_is_dcp(pipe)) + usbhs_dcp_control_transfer_done(pipe); + } + + usbhsf_fifo_unselect(pipe, fifo); + + return 0; + +usbhs_fifo_write_busy: + usbhsf_fifo_unselect(pipe, fifo); + + /* + * pipe is busy. + * retry in interrupt + */ + usbhsf_tx_irq_ctrl(pipe, 1); + + return ret; +} + +struct usbhs_pkt_handle usbhs_fifo_pio_push_handler = { + .prepare = usbhsf_pio_try_push, + .try_run = usbhsf_pio_try_push, +}; + +static int usbhsf_prepare_pop(struct usbhs_pkt *pkt, int *is_done) +{ + struct usbhs_pipe *pipe = pkt->pipe; + + if (usbhs_pipe_is_busy(pipe)) + return 0; + + /* + * pipe enable to prepare packet receive + */ + + usbhs_pipe_enable(pipe); + usbhsf_rx_irq_ctrl(pipe, 1); + + return 0; +} + +static int usbhsf_pio_try_pop(struct usbhs_pkt *pkt, int *is_done) +{ + struct usbhs_pipe *pipe = pkt->pipe; + struct usbhs_priv *priv = usbhs_pipe_to_priv(pipe); + struct device *dev = usbhs_priv_to_dev(priv); + struct usbhs_fifo *fifo = usbhsf_get_cfifo(priv); /* CFIFO */ + void __iomem *addr = priv->base + fifo->port; + u8 *buf; + u32 data = 0; + int maxp = usbhs_pipe_get_maxpacket(pipe); + int rcv_len, len; + int i, ret; + int total_len = 0; + + ret = usbhsf_fifo_select(pipe, fifo, 0); + if (ret < 0) + return 0; + + ret = usbhsf_fifo_barrier(priv, fifo); + if (ret < 0) + goto usbhs_fifo_read_busy; + + rcv_len = usbhsf_fifo_rcv_len(priv, fifo); + + buf = pkt->buf + pkt->actual; + len = pkt->length - pkt->actual; + len = min(len, rcv_len); + total_len = len; + + /* + * Buffer clear if Zero-Length packet + * + * see + * "Operation" - "FIFO Buffer Memory" - "FIFO Port Function" + */ + if (0 == rcv_len) { + usbhsf_fifo_clear(pipe, fifo); + goto usbhs_fifo_read_end; + } + + /* + * FIXME + * + * 32-bit access only + */ + if (len >= 4 && !((unsigned long)buf & 0x03)) { + ioread32_rep(addr, buf, len / 4); + len %= 4; + buf += total_len - len; + } + + /* the rest operation */ + for (i = 0; i < len; i++) { + if (!(i & 0x03)) + data = ioread32(addr); + + buf[i] = (data >> ((i & 0x03) * 8)) & 0xff; + } + + pkt->actual += total_len; + +usbhs_fifo_read_end: + if ((pkt->actual == pkt->length) || /* receive all data */ + (total_len < maxp)) { /* short packet */ + *is_done = 1; + usbhsf_rx_irq_ctrl(pipe, 0); + usbhs_pipe_disable(pipe); + } + + dev_dbg(dev, " recv %d (%d/ %d/ %d/ %d)\n", + usbhs_pipe_number(pipe), + pkt->length, pkt->actual, *is_done, pkt->zero); + +usbhs_fifo_read_busy: + usbhsf_fifo_unselect(pipe, fifo); + + return ret; +} + +struct usbhs_pkt_handle usbhs_fifo_pio_pop_handler = { + .prepare = usbhsf_prepare_pop, + .try_run = usbhsf_pio_try_pop, +}; + +/* + * handler function + */ +static int usbhsf_ctrl_stage_end(struct usbhs_pkt *pkt, int *is_done) +{ + usbhs_dcp_control_transfer_done(pkt->pipe); + + *is_done = 1; + + return 0; +} + +struct usbhs_pkt_handle usbhs_ctrl_stage_end_handler = { + .prepare = usbhsf_ctrl_stage_end, + .try_run = usbhsf_ctrl_stage_end, +}; + +/* + * DMA fifo functions + */ +static struct dma_chan *usbhsf_dma_chan_get(struct usbhs_fifo *fifo, + struct usbhs_pkt *pkt) +{ + if (&usbhs_fifo_dma_push_handler == pkt->handler) + return fifo->tx_chan; + + if (&usbhs_fifo_dma_pop_handler == pkt->handler) + return fifo->rx_chan; + + return NULL; +} + +static struct usbhs_fifo *usbhsf_get_dma_fifo(struct usbhs_priv *priv, + struct usbhs_pkt *pkt) +{ + struct usbhs_fifo *fifo; + + /* DMA :: D0FIFO */ + fifo = usbhsf_get_d0fifo(priv); + if (usbhsf_dma_chan_get(fifo, pkt) && + !usbhsf_fifo_is_busy(fifo)) + return fifo; + + /* DMA :: D1FIFO */ + fifo = usbhsf_get_d1fifo(priv); + if (usbhsf_dma_chan_get(fifo, pkt) && + !usbhsf_fifo_is_busy(fifo)) + return fifo; + + return NULL; +} + +#define usbhsf_dma_start(p, f) __usbhsf_dma_ctrl(p, f, DREQE) +#define usbhsf_dma_stop(p, f) __usbhsf_dma_ctrl(p, f, 0) +static void __usbhsf_dma_ctrl(struct usbhs_pipe *pipe, + struct usbhs_fifo *fifo, + u16 dreqe) +{ + struct usbhs_priv *priv = usbhs_pipe_to_priv(pipe); + + usbhs_bset(priv, fifo->sel, DREQE, dreqe); +} + +#define usbhsf_dma_map(p) __usbhsf_dma_map_ctrl(p, 1) +#define usbhsf_dma_unmap(p) __usbhsf_dma_map_ctrl(p, 0) +static int __usbhsf_dma_map_ctrl(struct usbhs_pkt *pkt, int map) +{ + struct usbhs_pipe *pipe = pkt->pipe; + struct usbhs_priv *priv = usbhs_pipe_to_priv(pipe); + struct usbhs_pipe_info *info = usbhs_priv_to_pipeinfo(priv); + + return info->dma_map_ctrl(pkt, map); +} + +static void usbhsf_dma_complete(void *arg); +static void usbhsf_dma_prepare_tasklet(unsigned long data) +{ + struct usbhs_pkt *pkt = (struct usbhs_pkt *)data; + struct usbhs_pipe *pipe = pkt->pipe; + struct usbhs_fifo *fifo = usbhs_pipe_to_fifo(pipe); + struct usbhs_priv *priv = usbhs_pipe_to_priv(pipe); + struct scatterlist sg; + struct dma_async_tx_descriptor *desc; + struct dma_chan *chan = usbhsf_dma_chan_get(fifo, pkt); + struct device *dev = usbhs_priv_to_dev(priv); + enum dma_data_direction dir; + dma_cookie_t cookie; + + dir = usbhs_pipe_is_dir_in(pipe) ? DMA_FROM_DEVICE : DMA_TO_DEVICE; + + sg_init_table(&sg, 1); + sg_set_page(&sg, virt_to_page(pkt->dma), + pkt->length, offset_in_page(pkt->dma)); + sg_dma_address(&sg) = pkt->dma + pkt->actual; + sg_dma_len(&sg) = pkt->trans; + + desc = chan->device->device_prep_slave_sg(chan, &sg, 1, dir, + DMA_PREP_INTERRUPT | + DMA_CTRL_ACK); + if (!desc) + return; + + desc->callback = usbhsf_dma_complete; + desc->callback_param = pipe; + + cookie = desc->tx_submit(desc); + if (cookie < 0) { + dev_err(dev, "Failed to submit dma descriptor\n"); + return; + } + + dev_dbg(dev, " %s %d (%d/ %d)\n", + fifo->name, usbhs_pipe_number(pipe), pkt->length, pkt->zero); + + usbhsf_dma_start(pipe, fifo); + dma_async_issue_pending(chan); +} + +static int usbhsf_dma_prepare_push(struct usbhs_pkt *pkt, int *is_done) +{ + struct usbhs_pipe *pipe = pkt->pipe; + struct usbhs_priv *priv = usbhs_pipe_to_priv(pipe); + struct usbhs_fifo *fifo; + int len = pkt->length - pkt->actual; + int ret; + + if (usbhs_pipe_is_busy(pipe)) + return 0; + + /* use PIO if packet is less than pio_dma_border or pipe is DCP */ + if ((len < usbhs_get_dparam(priv, pio_dma_border)) || + usbhs_pipe_is_dcp(pipe)) + goto usbhsf_pio_prepare_push; + + if (len % 4) /* 32bit alignment */ + goto usbhsf_pio_prepare_push; + + /* get enable DMA fifo */ + fifo = usbhsf_get_dma_fifo(priv, pkt); + if (!fifo) + goto usbhsf_pio_prepare_push; + + if (usbhsf_dma_map(pkt) < 0) + goto usbhsf_pio_prepare_push; + + ret = usbhsf_fifo_select(pipe, fifo, 0); + if (ret < 0) + goto usbhsf_pio_prepare_push_unmap; + + pkt->trans = len; + + tasklet_init(&fifo->tasklet, + usbhsf_dma_prepare_tasklet, + (unsigned long)pkt); + + tasklet_schedule(&fifo->tasklet); + + return 0; + +usbhsf_pio_prepare_push_unmap: + usbhsf_dma_unmap(pkt); +usbhsf_pio_prepare_push: + /* + * change handler to PIO + */ + pkt->handler = &usbhs_fifo_pio_push_handler; + + return pkt->handler->prepare(pkt, is_done); +} + +static int usbhsf_dma_push_done(struct usbhs_pkt *pkt, int *is_done) +{ + struct usbhs_pipe *pipe = pkt->pipe; + + pkt->actual = pkt->trans; + + *is_done = !pkt->zero; /* send zero packet ? */ + + usbhsf_dma_stop(pipe, pipe->fifo); + usbhsf_dma_unmap(pkt); + usbhsf_fifo_unselect(pipe, pipe->fifo); + + return 0; +} + +struct usbhs_pkt_handle usbhs_fifo_dma_push_handler = { + .prepare = usbhsf_dma_prepare_push, + .dma_done = usbhsf_dma_push_done, +}; + +static int usbhsf_dma_try_pop(struct usbhs_pkt *pkt, int *is_done) +{ + struct usbhs_pipe *pipe = pkt->pipe; + struct usbhs_priv *priv = usbhs_pipe_to_priv(pipe); + struct usbhs_fifo *fifo; + int len, ret; + + if (usbhs_pipe_is_busy(pipe)) + return 0; + + if (usbhs_pipe_is_dcp(pipe)) + goto usbhsf_pio_prepare_pop; + + /* get enable DMA fifo */ + fifo = usbhsf_get_dma_fifo(priv, pkt); + if (!fifo) + goto usbhsf_pio_prepare_pop; + + ret = usbhsf_fifo_select(pipe, fifo, 0); + if (ret < 0) + goto usbhsf_pio_prepare_pop; + + /* use PIO if packet is less than pio_dma_border */ + len = usbhsf_fifo_rcv_len(priv, fifo); + len = min(pkt->length - pkt->actual, len); + if (len % 4) /* 32bit alignment */ + goto usbhsf_pio_prepare_pop_unselect; + + if (len < usbhs_get_dparam(priv, pio_dma_border)) + goto usbhsf_pio_prepare_pop_unselect; + + ret = usbhsf_fifo_barrier(priv, fifo); + if (ret < 0) + goto usbhsf_pio_prepare_pop_unselect; + + if (usbhsf_dma_map(pkt) < 0) + goto usbhsf_pio_prepare_pop_unselect; + + /* DMA */ + + /* + * usbhs_fifo_dma_pop_handler :: prepare + * enabled irq to come here. + * but it is no longer needed for DMA. disable it. + */ + usbhsf_rx_irq_ctrl(pipe, 0); + + pkt->trans = len; + + tasklet_init(&fifo->tasklet, + usbhsf_dma_prepare_tasklet, + (unsigned long)pkt); + + tasklet_schedule(&fifo->tasklet); + + return 0; + +usbhsf_pio_prepare_pop_unselect: + usbhsf_fifo_unselect(pipe, fifo); +usbhsf_pio_prepare_pop: + + /* + * change handler to PIO + */ + pkt->handler = &usbhs_fifo_pio_pop_handler; + + return pkt->handler->try_run(pkt, is_done); +} + +static int usbhsf_dma_pop_done(struct usbhs_pkt *pkt, int *is_done) +{ + struct usbhs_pipe *pipe = pkt->pipe; + int maxp = usbhs_pipe_get_maxpacket(pipe); + + usbhsf_dma_stop(pipe, pipe->fifo); + usbhsf_dma_unmap(pkt); + usbhsf_fifo_unselect(pipe, pipe->fifo); + + pkt->actual += pkt->trans; + + if ((pkt->actual == pkt->length) || /* receive all data */ + (pkt->trans < maxp)) { /* short packet */ + *is_done = 1; + } else { + /* re-enable */ + usbhsf_prepare_pop(pkt, is_done); + } + + return 0; +} + +struct usbhs_pkt_handle usbhs_fifo_dma_pop_handler = { + .prepare = usbhsf_prepare_pop, + .try_run = usbhsf_dma_try_pop, + .dma_done = usbhsf_dma_pop_done +}; + +/* + * DMA setting + */ +static bool usbhsf_dma_filter(struct dma_chan *chan, void *param) +{ + struct sh_dmae_slave *slave = param; + + /* + * FIXME + * + * usbhs doesn't recognize id = 0 as valid DMA + */ + if (0 == slave->slave_id) + return false; + + chan->private = slave; + + return true; +} + +static void usbhsf_dma_quit(struct usbhs_priv *priv, struct usbhs_fifo *fifo) +{ + if (fifo->tx_chan) + dma_release_channel(fifo->tx_chan); + if (fifo->rx_chan) + dma_release_channel(fifo->rx_chan); + + fifo->tx_chan = NULL; + fifo->rx_chan = NULL; +} + +static void usbhsf_dma_init(struct usbhs_priv *priv, + struct usbhs_fifo *fifo) +{ + struct device *dev = usbhs_priv_to_dev(priv); + dma_cap_mask_t mask; + + dma_cap_zero(mask); + dma_cap_set(DMA_SLAVE, mask); + fifo->tx_chan = dma_request_channel(mask, usbhsf_dma_filter, + &fifo->tx_slave); + + dma_cap_zero(mask); + dma_cap_set(DMA_SLAVE, mask); + fifo->rx_chan = dma_request_channel(mask, usbhsf_dma_filter, + &fifo->rx_slave); + + if (fifo->tx_chan || fifo->rx_chan) + dev_info(dev, "enable DMAEngine (%s%s%s)\n", + fifo->name, + fifo->tx_chan ? "[TX]" : " ", + fifo->rx_chan ? "[RX]" : " "); +} + +/* + * irq functions + */ +static int usbhsf_irq_empty(struct usbhs_priv *priv, + struct usbhs_irq_state *irq_state) +{ + struct usbhs_pipe *pipe; + struct device *dev = usbhs_priv_to_dev(priv); + int i, ret; + + if (!irq_state->bempsts) { + dev_err(dev, "debug %s !!\n", __func__); + return -EIO; + } + + dev_dbg(dev, "irq empty [0x%04x]\n", irq_state->bempsts); + + /* + * search interrupted "pipe" + * not "uep". + */ + usbhs_for_each_pipe_with_dcp(pipe, priv, i) { + if (!(irq_state->bempsts & (1 << i))) + continue; + + ret = usbhs_pkt_run(pipe); + if (ret < 0) + dev_err(dev, "irq_empty run_error %d : %d\n", i, ret); + } + + return 0; +} + +static int usbhsf_irq_ready(struct usbhs_priv *priv, + struct usbhs_irq_state *irq_state) +{ + struct usbhs_pipe *pipe; + struct device *dev = usbhs_priv_to_dev(priv); + int i, ret; + + if (!irq_state->brdysts) { + dev_err(dev, "debug %s !!\n", __func__); + return -EIO; + } + + dev_dbg(dev, "irq ready [0x%04x]\n", irq_state->brdysts); + + /* + * search interrupted "pipe" + * not "uep". + */ + usbhs_for_each_pipe_with_dcp(pipe, priv, i) { + if (!(irq_state->brdysts & (1 << i))) + continue; + + ret = usbhs_pkt_run(pipe); + if (ret < 0) + dev_err(dev, "irq_ready run_error %d : %d\n", i, ret); + } + + return 0; +} + +static void usbhsf_dma_complete(void *arg) +{ + struct usbhs_pipe *pipe = arg; + struct usbhs_priv *priv = usbhs_pipe_to_priv(pipe); + struct device *dev = usbhs_priv_to_dev(priv); + int ret; + + ret = usbhs_pkt_dmadone(pipe); + if (ret < 0) + dev_err(dev, "dma_complete run_error %d : %d\n", + usbhs_pipe_number(pipe), ret); +} + +/* + * fifo init + */ +void usbhs_fifo_init(struct usbhs_priv *priv) +{ + struct usbhs_mod *mod = usbhs_mod_get_current(priv); + struct usbhs_fifo *cfifo = usbhsf_get_cfifo(priv); + struct usbhs_fifo *d0fifo = usbhsf_get_d0fifo(priv); + struct usbhs_fifo *d1fifo = usbhsf_get_d1fifo(priv); + + mod->irq_empty = usbhsf_irq_empty; + mod->irq_ready = usbhsf_irq_ready; + mod->irq_bempsts = 0; + mod->irq_brdysts = 0; + + cfifo->pipe = NULL; + cfifo->tx_chan = NULL; + cfifo->rx_chan = NULL; + + d0fifo->pipe = NULL; + d0fifo->tx_chan = NULL; + d0fifo->rx_chan = NULL; + + d1fifo->pipe = NULL; + d1fifo->tx_chan = NULL; + d1fifo->rx_chan = NULL; + + usbhsf_dma_init(priv, usbhsf_get_d0fifo(priv)); + usbhsf_dma_init(priv, usbhsf_get_d1fifo(priv)); +} + +void usbhs_fifo_quit(struct usbhs_priv *priv) +{ + struct usbhs_mod *mod = usbhs_mod_get_current(priv); + + mod->irq_empty = NULL; + mod->irq_ready = NULL; + mod->irq_bempsts = 0; + mod->irq_brdysts = 0; + + usbhsf_dma_quit(priv, usbhsf_get_d0fifo(priv)); + usbhsf_dma_quit(priv, usbhsf_get_d1fifo(priv)); +} + +int usbhs_fifo_probe(struct usbhs_priv *priv) +{ + struct usbhs_fifo *fifo; + + /* CFIFO */ + fifo = usbhsf_get_cfifo(priv); + fifo->name = "CFIFO"; + fifo->port = CFIFO; + fifo->sel = CFIFOSEL; + fifo->ctr = CFIFOCTR; + + /* D0FIFO */ + fifo = usbhsf_get_d0fifo(priv); + fifo->name = "D0FIFO"; + fifo->port = D0FIFO; + fifo->sel = D0FIFOSEL; + fifo->ctr = D0FIFOCTR; + fifo->tx_slave.slave_id = usbhs_get_dparam(priv, d0_tx_id); + fifo->rx_slave.slave_id = usbhs_get_dparam(priv, d0_rx_id); + + /* D1FIFO */ + fifo = usbhsf_get_d1fifo(priv); + fifo->name = "D1FIFO"; + fifo->port = D1FIFO; + fifo->sel = D1FIFOSEL; + fifo->ctr = D1FIFOCTR; + fifo->tx_slave.slave_id = usbhs_get_dparam(priv, d1_tx_id); + fifo->rx_slave.slave_id = usbhs_get_dparam(priv, d1_rx_id); + + return 0; +} + +void usbhs_fifo_remove(struct usbhs_priv *priv) +{ +} diff --git a/drivers/usb/renesas_usbhs/fifo.h b/drivers/usb/renesas_usbhs/fifo.h new file mode 100644 index 000000000000..ed6d8e56c13c --- /dev/null +++ b/drivers/usb/renesas_usbhs/fifo.h @@ -0,0 +1,104 @@ +/* + * Renesas USB driver + * + * Copyright (C) 2011 Renesas Solutions Corp. + * Kuninori Morimoto <kuninori.morimoto.gx@renesas.com> + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA + * + */ +#ifndef RENESAS_USB_FIFO_H +#define RENESAS_USB_FIFO_H + +#include <linux/interrupt.h> +#include <linux/sh_dma.h> +#include <asm/dma.h> +#include "pipe.h" + +#define DMA_ADDR_INVALID (~(dma_addr_t)0) + +struct usbhs_fifo { + char *name; + u32 port; /* xFIFO */ + u32 sel; /* xFIFOSEL */ + u32 ctr; /* xFIFOCTR */ + + struct usbhs_pipe *pipe; + struct tasklet_struct tasklet; + + struct dma_chan *tx_chan; + struct dma_chan *rx_chan; + + struct sh_dmae_slave tx_slave; + struct sh_dmae_slave rx_slave; +}; + +struct usbhs_fifo_info { + struct usbhs_fifo cfifo; + struct usbhs_fifo d0fifo; + struct usbhs_fifo d1fifo; +}; + +struct usbhs_pkt_handle; +struct usbhs_pkt { + struct list_head node; + struct usbhs_pipe *pipe; + struct usbhs_pkt_handle *handler; + dma_addr_t dma; + void *buf; + int length; + int trans; + int actual; + int zero; +}; + +struct usbhs_pkt_handle { + int (*prepare)(struct usbhs_pkt *pkt, int *is_done); + int (*try_run)(struct usbhs_pkt *pkt, int *is_done); + int (*dma_done)(struct usbhs_pkt *pkt, int *is_done); +}; + +/* + * fifo + */ +int usbhs_fifo_probe(struct usbhs_priv *priv); +void usbhs_fifo_remove(struct usbhs_priv *priv); +void usbhs_fifo_init(struct usbhs_priv *priv); +void usbhs_fifo_quit(struct usbhs_priv *priv); + +/* + * packet info + */ +enum { + USBHSF_PKT_PREPARE, + USBHSF_PKT_TRY_RUN, + USBHSF_PKT_DMA_DONE, +}; + +extern struct usbhs_pkt_handle usbhs_fifo_pio_push_handler; +extern struct usbhs_pkt_handle usbhs_fifo_pio_pop_handler; +extern struct usbhs_pkt_handle usbhs_ctrl_stage_end_handler; + +extern struct usbhs_pkt_handle usbhs_fifo_dma_push_handler; +extern struct usbhs_pkt_handle usbhs_fifo_dma_pop_handler; + + +void usbhs_pkt_init(struct usbhs_pkt *pkt); +void usbhs_pkt_push(struct usbhs_pipe *pipe, struct usbhs_pkt *pkt, + struct usbhs_pkt_handle *handler, + void *buf, int len, int zero); +struct usbhs_pkt *usbhs_pkt_pop(struct usbhs_pipe *pipe, struct usbhs_pkt *pkt); +int __usbhs_pkt_handler(struct usbhs_pipe *pipe, int type); + +#define usbhs_pkt_start(p) __usbhs_pkt_handler(p, USBHSF_PKT_PREPARE) +#define usbhs_pkt_run(p) __usbhs_pkt_handler(p, USBHSF_PKT_TRY_RUN) +#define usbhs_pkt_dmadone(p) __usbhs_pkt_handler(p, USBHSF_PKT_DMA_DONE) + +#endif /* RENESAS_USB_FIFO_H */ diff --git a/drivers/usb/renesas_usbhs/mod_gadget.c b/drivers/usb/renesas_usbhs/mod_gadget.c index 547486ccd059..46e247ad14f3 100644 --- a/drivers/usb/renesas_usbhs/mod_gadget.c +++ b/drivers/usb/renesas_usbhs/mod_gadget.c @@ -26,21 +26,19 @@ */ struct usbhsg_request { struct usb_request req; - struct list_head node; + struct usbhs_pkt pkt; }; #define EP_NAME_SIZE 8 struct usbhsg_gpriv; -struct usbhsg_pipe_handle; struct usbhsg_uep { struct usb_ep ep; struct usbhs_pipe *pipe; - struct list_head list; char ep_name[EP_NAME_SIZE]; struct usbhsg_gpriv *gpriv; - struct usbhsg_pipe_handle *handler; + struct usbhs_pkt_handle *handler; }; struct usbhsg_gpriv { @@ -58,12 +56,6 @@ struct usbhsg_gpriv { #define USBHSG_STATUS_WEDGE (1 << 2) }; -struct usbhsg_pipe_handle { - int (*prepare)(struct usbhsg_uep *uep, struct usbhsg_request *ureq); - int (*try_run)(struct usbhsg_uep *uep, struct usbhsg_request *ureq); - void (*irq_mask)(struct usbhsg_uep *uep, int enable); -}; - struct usbhsg_recip_handle { char *name; int (*device)(struct usbhs_priv *priv, struct usbhsg_uep *uep, @@ -100,7 +92,6 @@ struct usbhsg_recip_handle { container_of(r, struct usbhsg_request, req) #define usbhsg_ep_to_uep(e) container_of(e, struct usbhsg_uep, ep) -#define usbhsg_gpriv_to_lock(gp) usbhs_priv_to_lock((gp)->mod.priv) #define usbhsg_gpriv_to_dev(gp) usbhs_priv_to_dev((gp)->mod.priv) #define usbhsg_gpriv_to_priv(gp) ((gp)->mod.priv) #define usbhsg_gpriv_to_dcp(gp) ((gp)->uep) @@ -110,6 +101,10 @@ struct usbhsg_recip_handle { #define usbhsg_pipe_to_uep(p) ((p)->mod_private) #define usbhsg_is_dcp(u) ((u) == usbhsg_gpriv_to_dcp((u)->gpriv)) +#define usbhsg_ureq_to_pkt(u) (&(u)->pkt) +#define usbhsg_pkt_to_ureq(i) \ + container_of(i, struct usbhsg_request, pkt) + #define usbhsg_is_not_connected(gp) ((gp)->gadget.speed == USB_SPEED_UNKNOWN) /* status */ @@ -119,35 +114,6 @@ struct usbhsg_recip_handle { #define usbhsg_status_has(gp, b) (gp->status & b) /* - * usbhsg_trylock - * - * This driver don't use spin_try_lock - * to avoid warning of CONFIG_DEBUG_SPINLOCK - */ -static spinlock_t *usbhsg_trylock(struct usbhsg_gpriv *gpriv, - unsigned long *flags) -{ - spinlock_t *lock = usbhsg_gpriv_to_lock(gpriv); - - /* check spin lock status - * to avoid deadlock/nest */ - if (spin_is_locked(lock)) - return NULL; - - spin_lock_irqsave(lock, *flags); - - return lock; -} - -static void usbhsg_unlock(spinlock_t *lock, unsigned long *flags) -{ - if (!lock) - return; - - spin_unlock_irqrestore(lock, *flags); -} - -/* * list push/pop */ static void usbhsg_queue_push(struct usbhsg_uep *uep, @@ -156,79 +122,17 @@ static void usbhsg_queue_push(struct usbhsg_uep *uep, struct usbhsg_gpriv *gpriv = usbhsg_uep_to_gpriv(uep); struct device *dev = usbhsg_gpriv_to_dev(gpriv); struct usbhs_pipe *pipe = usbhsg_uep_to_pipe(uep); + struct usbhs_pkt *pkt = usbhsg_ureq_to_pkt(ureq); + struct usb_request *req = &ureq->req; - /* - ********* assume under spin lock ********* - */ - list_del_init(&ureq->node); - list_add_tail(&ureq->node, &uep->list); - ureq->req.actual = 0; - ureq->req.status = -EINPROGRESS; + req->actual = 0; + req->status = -EINPROGRESS; + usbhs_pkt_push(pipe, pkt, uep->handler, + req->buf, req->length, req->zero); dev_dbg(dev, "pipe %d : queue push (%d)\n", usbhs_pipe_number(pipe), - ureq->req.length); -} - -static struct usbhsg_request *usbhsg_queue_get(struct usbhsg_uep *uep) -{ - /* - ********* assume under spin lock ********* - */ - if (list_empty(&uep->list)) - return NULL; - - return list_entry(uep->list.next, struct usbhsg_request, node); -} - -#define usbhsg_queue_prepare(uep) __usbhsg_queue_handler(uep, 1); -#define usbhsg_queue_handle(uep) __usbhsg_queue_handler(uep, 0); -static int __usbhsg_queue_handler(struct usbhsg_uep *uep, int prepare) -{ - struct usbhsg_gpriv *gpriv = usbhsg_uep_to_gpriv(uep); - struct device *dev = usbhsg_gpriv_to_dev(gpriv); - struct usbhsg_request *ureq; - spinlock_t *lock; - unsigned long flags; - int ret = 0; - - if (!uep->handler) { - dev_err(dev, "no handler function\n"); - return -EIO; - } - - /* - * CAUTION [*queue handler*] - * - * This function will be called for start/restart queue operation. - * OTOH the most much worry for USB driver is spinlock nest. - * Specially it are - * - usb_ep_ops :: queue - * - usb_request :: complete - * - * But the caller of this function need not care about spinlock. - * This function is using usbhsg_trylock for it. - * if "is_locked" is 1, this mean this function lock it. - * but if it is 0, this mean it is already under spin lock. - * see also - * CAUTION [*endpoint queue*] - * CAUTION [*request complete*] - */ - - /****************** spin try lock *******************/ - lock = usbhsg_trylock(gpriv, &flags); - - ureq = usbhsg_queue_get(uep); - if (ureq) { - if (prepare) - ret = uep->handler->prepare(uep, ureq); - else - ret = uep->handler->try_run(uep, ureq); - } - usbhsg_unlock(lock, &flags); - /******************** spin unlock ******************/ - - return ret; + req->length); } static void usbhsg_queue_pop(struct usbhsg_uep *uep, @@ -239,289 +143,88 @@ static void usbhsg_queue_pop(struct usbhsg_uep *uep, struct usbhs_pipe *pipe = usbhsg_uep_to_pipe(uep); struct device *dev = usbhsg_gpriv_to_dev(gpriv); - /* - ********* assume under spin lock ********* - */ - - /* - * CAUTION [*request complete*] - * - * There is a possibility not to be called in correct order - * if "complete" is called without spinlock. - * - * So, this function assume it is under spinlock, - * and call usb_request :: complete. - * - * But this "complete" will push next usb_request. - * It mean "usb_ep_ops :: queue" which is using spinlock is called - * under spinlock. - * - * To avoid dead-lock, this driver is using usbhsg_trylock. - * CAUTION [*endpoint queue*] - * CAUTION [*queue handler*] - */ - dev_dbg(dev, "pipe %d : queue pop\n", usbhs_pipe_number(pipe)); - list_del_init(&ureq->node); - ureq->req.status = status; ureq->req.complete(&uep->ep, &ureq->req); - - /* more request ? */ - if (0 == status) - usbhsg_queue_prepare(uep); } -/* - * irq enable/disable function - */ -#define usbhsg_irq_callback_ctrl(uep, status, enable) \ - ({ \ - struct usbhsg_gpriv *gpriv = usbhsg_uep_to_gpriv(uep); \ - struct usbhs_pipe *pipe = usbhsg_uep_to_pipe(uep); \ - struct usbhs_priv *priv = usbhsg_gpriv_to_priv(gpriv); \ - struct usbhs_mod *mod = usbhs_mod_get_current(priv); \ - if (!mod) \ - return; \ - if (enable) \ - mod->irq_##status |= (1 << usbhs_pipe_number(pipe)); \ - else \ - mod->irq_##status &= ~(1 << usbhs_pipe_number(pipe)); \ - usbhs_irq_callback_update(priv, mod); \ - }) - -static void usbhsg_irq_empty_ctrl(struct usbhsg_uep *uep, int enable) +static void usbhsg_queue_done(struct usbhs_pkt *pkt) { - usbhsg_irq_callback_ctrl(uep, bempsts, enable); -} + struct usbhs_pipe *pipe = pkt->pipe; + struct usbhsg_uep *uep = usbhsg_pipe_to_uep(pipe); + struct usbhsg_request *ureq = usbhsg_pkt_to_ureq(pkt); -static void usbhsg_irq_ready_ctrl(struct usbhsg_uep *uep, int enable) -{ - usbhsg_irq_callback_ctrl(uep, brdysts, enable); -} + ureq->req.actual = pkt->actual; -/* - * handler function - */ -static int usbhsg_try_run_ctrl_stage_end(struct usbhsg_uep *uep, - struct usbhsg_request *ureq) -{ - struct usbhs_pipe *pipe = usbhsg_uep_to_pipe(uep); - - /* - ********* assume under spin lock ********* - */ - - usbhs_dcp_control_transfer_done(pipe); usbhsg_queue_pop(uep, ureq, 0); - - return 0; } -static int usbhsg_try_run_send_packet(struct usbhsg_uep *uep, - struct usbhsg_request *ureq) +static int usbhsg_dma_map(struct device *dev, + struct usbhs_pkt *pkt, + enum dma_data_direction dir) { - struct usbhs_pipe *pipe = usbhsg_uep_to_pipe(uep); + struct usbhsg_request *ureq = usbhsg_pkt_to_ureq(pkt); struct usb_request *req = &ureq->req; - struct usbhsg_gpriv *gpriv = usbhsg_uep_to_gpriv(uep); - struct device *dev = usbhsg_gpriv_to_dev(gpriv); - void *buf; - int remainder, send; - int is_done = 0; - int enable; - int maxp; - - /* - ********* assume under spin lock ********* - */ - - maxp = usbhs_pipe_get_maxpacket(pipe); - buf = req->buf + req->actual; - remainder = req->length - req->actual; - - send = usbhs_fifo_write(pipe, buf, remainder); - - /* - * send < 0 : pipe busy - * send = 0 : send zero packet - * send > 0 : send data - * - * send <= max_packet - */ - if (send > 0) - req->actual += send; - - /* send all packet ? */ - if (send < remainder) - is_done = 0; /* there are remainder data */ - else if (send < maxp) - is_done = 1; /* short packet */ - else - is_done = !req->zero; /* send zero packet ? */ - - dev_dbg(dev, " send %d (%d/ %d/ %d/ %d)\n", - usbhs_pipe_number(pipe), - remainder, send, is_done, req->zero); - /* - * enable interrupt and send again in irq handler - * if it still have remainder data which should be sent. - */ - enable = !is_done; - uep->handler->irq_mask(uep, enable); - - /* - * usbhs_fifo_enable execute - * - after callback_update, - * - before queue_pop / stage_end - */ - usbhs_fifo_enable(pipe); - - /* - * all data were sent ? - */ - if (is_done) { - /* it care below call in - "function mode" */ - if (usbhsg_is_dcp(uep)) - usbhs_dcp_control_transfer_done(pipe); - - usbhsg_queue_pop(uep, ureq, 0); + if (pkt->dma != DMA_ADDR_INVALID) { + dev_err(dev, "dma is already mapped\n"); + return -EIO; } - return 0; -} - -static int usbhsg_prepare_send_packet(struct usbhsg_uep *uep, - struct usbhsg_request *ureq) -{ - struct usbhs_pipe *pipe = usbhsg_uep_to_pipe(uep); - - /* - ********* assume under spin lock ********* - */ + if (req->dma == DMA_ADDR_INVALID) { + pkt->dma = dma_map_single(dev, pkt->buf, pkt->length, dir); + } else { + dma_sync_single_for_device(dev, req->dma, req->length, dir); + pkt->dma = req->dma; + } - usbhs_fifo_prepare_write(pipe); - usbhsg_try_run_send_packet(uep, ureq); + if (dma_mapping_error(dev, pkt->dma)) { + dev_err(dev, "dma mapping error %x\n", pkt->dma); + return -EIO; + } return 0; } -static int usbhsg_try_run_receive_packet(struct usbhsg_uep *uep, - struct usbhsg_request *ureq) +static int usbhsg_dma_unmap(struct device *dev, + struct usbhs_pkt *pkt, + enum dma_data_direction dir) { - struct usbhs_pipe *pipe = usbhsg_uep_to_pipe(uep); + struct usbhsg_request *ureq = usbhsg_pkt_to_ureq(pkt); struct usb_request *req = &ureq->req; - struct usbhsg_gpriv *gpriv = usbhsg_uep_to_gpriv(uep); - struct device *dev = usbhsg_gpriv_to_dev(gpriv); - void *buf; - int maxp; - int remainder, recv; - int is_done = 0; - - /* - ********* assume under spin lock ********* - */ - - maxp = usbhs_pipe_get_maxpacket(pipe); - buf = req->buf + req->actual; - remainder = req->length - req->actual; - - recv = usbhs_fifo_read(pipe, buf, remainder); - /* - * recv < 0 : pipe busy - * recv >= 0 : receive data - * - * recv <= max_packet - */ - if (recv < 0) - return -EBUSY; - /* update parameters */ - req->actual += recv; - - if ((recv == remainder) || /* receive all data */ - (recv < maxp)) /* short packet */ - is_done = 1; - - dev_dbg(dev, " recv %d (%d/ %d/ %d/ %d)\n", - usbhs_pipe_number(pipe), - remainder, recv, is_done, req->zero); + if (pkt->dma == DMA_ADDR_INVALID) { + dev_err(dev, "dma is not mapped\n"); + return -EIO; + } - /* read all data ? */ - if (is_done) { - int disable = 0; + if (req->dma == DMA_ADDR_INVALID) + dma_unmap_single(dev, pkt->dma, pkt->length, dir); + else + dma_sync_single_for_cpu(dev, req->dma, req->length, dir); - uep->handler->irq_mask(uep, disable); - usbhs_fifo_disable(pipe); - usbhsg_queue_pop(uep, ureq, 0); - } + pkt->dma = DMA_ADDR_INVALID; return 0; } -static int usbhsg_prepare_receive_packet(struct usbhsg_uep *uep, - struct usbhsg_request *ureq) +static int usbhsg_dma_map_ctrl(struct usbhs_pkt *pkt, int map) { - struct usbhs_pipe *pipe = usbhsg_uep_to_pipe(uep); - int enable = 1; - int ret; - - /* - ********* assume under spin lock ********* - */ - - ret = usbhs_fifo_prepare_read(pipe); - if (ret < 0) - return ret; + struct usbhs_pipe *pipe = pkt->pipe; + struct usbhsg_uep *uep = usbhsg_pipe_to_uep(pipe); + struct usbhsg_gpriv *gpriv = usbhsg_uep_to_gpriv(uep); + struct device *dev = usbhsg_gpriv_to_dev(gpriv); + enum dma_data_direction dir; - /* - * data will be read in interrupt handler - */ - uep->handler->irq_mask(uep, enable); + dir = usbhs_pipe_is_dir_in(pipe) ? DMA_FROM_DEVICE : DMA_TO_DEVICE; - return ret; + if (map) + return usbhsg_dma_map(dev, pkt, dir); + else + return usbhsg_dma_unmap(dev, pkt, dir); } -static struct usbhsg_pipe_handle usbhsg_handler_send_by_empty = { - .prepare = usbhsg_prepare_send_packet, - .try_run = usbhsg_try_run_send_packet, - .irq_mask = usbhsg_irq_empty_ctrl, -}; - -static struct usbhsg_pipe_handle usbhsg_handler_send_by_ready = { - .prepare = usbhsg_prepare_send_packet, - .try_run = usbhsg_try_run_send_packet, - .irq_mask = usbhsg_irq_ready_ctrl, -}; - -static struct usbhsg_pipe_handle usbhsg_handler_recv_by_ready = { - .prepare = usbhsg_prepare_receive_packet, - .try_run = usbhsg_try_run_receive_packet, - .irq_mask = usbhsg_irq_ready_ctrl, -}; - -static struct usbhsg_pipe_handle usbhsg_handler_ctrl_stage_end = { - .prepare = usbhsg_try_run_ctrl_stage_end, - .try_run = usbhsg_try_run_ctrl_stage_end, -}; - -/* - * DCP pipe can NOT use "ready interrupt" for "send" - * it should use "empty" interrupt. - * see - * "Operation" - "Interrupt Function" - "BRDY Interrupt" - * - * on the other hand, normal pipe can use "ready interrupt" for "send" - * even though it is single/double buffer - */ -#define usbhsg_handler_send_ctrl usbhsg_handler_send_by_empty -#define usbhsg_handler_recv_ctrl usbhsg_handler_recv_by_ready - -#define usbhsg_handler_send_packet usbhsg_handler_send_by_ready -#define usbhsg_handler_recv_packet usbhsg_handler_recv_by_ready - /* * USB_TYPE_STANDARD / clear feature functions */ @@ -546,15 +249,13 @@ static int usbhsg_recip_handler_std_clear_endpoint(struct usbhs_priv *priv, struct usbhs_pipe *pipe = usbhsg_uep_to_pipe(uep); if (!usbhsg_status_has(gpriv, USBHSG_STATUS_WEDGE)) { - usbhs_fifo_disable(pipe); + usbhs_pipe_disable(pipe); usbhs_pipe_clear_sequence(pipe); - usbhs_fifo_enable(pipe); + usbhs_pipe_enable(pipe); } usbhsg_recip_handler_std_control_done(priv, uep, ctrl); - usbhsg_queue_prepare(uep); - return 0; } @@ -575,6 +276,7 @@ static int usbhsg_recip_run_handle(struct usbhs_priv *priv, struct usbhsg_gpriv *gpriv = usbhsg_priv_to_gpriv(priv); struct device *dev = usbhsg_gpriv_to_dev(gpriv); struct usbhsg_uep *uep; + struct usbhs_pipe *pipe; int recip = ctrl->bRequestType & USB_RECIP_MASK; int nth = le16_to_cpu(ctrl->wIndex) & USB_ENDPOINT_NUMBER_MASK; int ret; @@ -583,9 +285,11 @@ static int usbhsg_recip_run_handle(struct usbhs_priv *priv, char *msg; uep = usbhsg_gpriv_to_nth_uep(gpriv, nth); - if (!usbhsg_uep_to_pipe(uep)) { + pipe = usbhsg_uep_to_pipe(uep); + if (!pipe) { dev_err(dev, "wrong recip request\n"); - return -EINVAL; + ret = -EINVAL; + goto usbhsg_recip_run_handle_end; } switch (recip) { @@ -608,10 +312,20 @@ static int usbhsg_recip_run_handle(struct usbhs_priv *priv, } if (func) { + unsigned long flags; + dev_dbg(dev, "%s (pipe %d :%s)\n", handler->name, nth, msg); + + /******************** spin lock ********************/ + usbhs_lock(priv, flags); ret = func(priv, uep, ctrl); + usbhs_unlock(priv, flags); + /******************** spin unlock ******************/ } +usbhsg_recip_run_handle_end: + usbhs_pkt_start(pipe); + return ret; } @@ -660,13 +374,13 @@ static int usbhsg_irq_ctrl_stage(struct usbhs_priv *priv, switch (stage) { case READ_DATA_STAGE: - dcp->handler = &usbhsg_handler_send_ctrl; + dcp->handler = &usbhs_fifo_pio_push_handler; break; case WRITE_DATA_STAGE: - dcp->handler = &usbhsg_handler_recv_ctrl; + dcp->handler = &usbhs_fifo_pio_pop_handler; break; case NODATA_STATUS_STAGE: - dcp->handler = &usbhsg_handler_ctrl_stage_end; + dcp->handler = &usbhs_ctrl_stage_end_handler; break; default: return ret; @@ -695,128 +409,27 @@ static int usbhsg_irq_ctrl_stage(struct usbhs_priv *priv, ret = gpriv->driver->setup(&gpriv->gadget, &ctrl); if (ret < 0) - usbhs_fifo_stall(pipe); + usbhs_pipe_stall(pipe); return ret; } -static int usbhsg_irq_empty(struct usbhs_priv *priv, - struct usbhs_irq_state *irq_state) -{ - struct usbhsg_gpriv *gpriv = usbhsg_priv_to_gpriv(priv); - struct usbhsg_uep *uep; - struct usbhs_pipe *pipe; - struct device *dev = usbhsg_gpriv_to_dev(gpriv); - int i, ret; - - if (!irq_state->bempsts) { - dev_err(dev, "debug %s !!\n", __func__); - return -EIO; - } - - dev_dbg(dev, "irq empty [0x%04x]\n", irq_state->bempsts); - - /* - * search interrupted "pipe" - * not "uep". - */ - usbhs_for_each_pipe_with_dcp(pipe, priv, i) { - if (!(irq_state->bempsts & (1 << i))) - continue; - - uep = usbhsg_pipe_to_uep(pipe); - ret = usbhsg_queue_handle(uep); - if (ret < 0) - dev_err(dev, "send error %d : %d\n", i, ret); - } - - return 0; -} - -static int usbhsg_irq_ready(struct usbhs_priv *priv, - struct usbhs_irq_state *irq_state) -{ - struct usbhsg_gpriv *gpriv = usbhsg_priv_to_gpriv(priv); - struct usbhsg_uep *uep; - struct usbhs_pipe *pipe; - struct device *dev = usbhsg_gpriv_to_dev(gpriv); - int i, ret; - - if (!irq_state->brdysts) { - dev_err(dev, "debug %s !!\n", __func__); - return -EIO; - } - - dev_dbg(dev, "irq ready [0x%04x]\n", irq_state->brdysts); - - /* - * search interrupted "pipe" - * not "uep". - */ - usbhs_for_each_pipe_with_dcp(pipe, priv, i) { - if (!(irq_state->brdysts & (1 << i))) - continue; - - uep = usbhsg_pipe_to_uep(pipe); - ret = usbhsg_queue_handle(uep); - if (ret < 0) - dev_err(dev, "receive error %d : %d\n", i, ret); - } - - return 0; -} - /* * * usb_dcp_ops * */ -static int usbhsg_dcp_enable(struct usbhsg_uep *uep) -{ - struct usbhsg_gpriv *gpriv = usbhsg_uep_to_gpriv(uep); - struct usbhs_priv *priv = usbhsg_gpriv_to_priv(gpriv); - struct usbhs_pipe *pipe; - - /* - ********* assume under spin lock ********* - */ - - pipe = usbhs_dcp_malloc(priv); - if (!pipe) - return -EIO; - - uep->pipe = pipe; - uep->pipe->mod_private = uep; - INIT_LIST_HEAD(&uep->list); - - return 0; -} - -#define usbhsg_dcp_disable usbhsg_pipe_disable static int usbhsg_pipe_disable(struct usbhsg_uep *uep) { struct usbhs_pipe *pipe = usbhsg_uep_to_pipe(uep); - struct usbhsg_request *ureq; - int disable = 0; - - /* - ********* assume under spin lock ********* - */ - - usbhs_fifo_disable(pipe); + struct usbhs_pkt *pkt; - /* - * disable pipe irq - */ - usbhsg_irq_empty_ctrl(uep, disable); - usbhsg_irq_ready_ctrl(uep, disable); + usbhs_pipe_disable(pipe); while (1) { - ureq = usbhsg_queue_get(uep); - if (!ureq) + pkt = usbhs_pkt_pop(pipe, NULL); + if (!pkt) break; - - usbhsg_queue_pop(uep, ureq, -ECONNRESET); } return 0; @@ -843,8 +456,6 @@ static int usbhsg_ep_enable(struct usb_ep *ep, struct usbhsg_gpriv *gpriv = usbhsg_uep_to_gpriv(uep); struct usbhs_priv *priv = usbhsg_gpriv_to_priv(gpriv); struct usbhs_pipe *pipe; - spinlock_t *lock; - unsigned long flags; int ret = -EIO; /* @@ -854,46 +465,27 @@ static int usbhsg_ep_enable(struct usb_ep *ep, if (uep->pipe) return 0; - /******************** spin lock ********************/ - lock = usbhsg_trylock(gpriv, &flags); - pipe = usbhs_pipe_malloc(priv, desc); if (pipe) { uep->pipe = pipe; pipe->mod_private = uep; - INIT_LIST_HEAD(&uep->list); if (usb_endpoint_dir_in(desc)) - uep->handler = &usbhsg_handler_send_packet; + uep->handler = &usbhs_fifo_pio_push_handler; else - uep->handler = &usbhsg_handler_recv_packet; + uep->handler = &usbhs_fifo_pio_pop_handler; ret = 0; } - usbhsg_unlock(lock, &flags); - /******************** spin unlock ******************/ - return ret; } static int usbhsg_ep_disable(struct usb_ep *ep) { struct usbhsg_uep *uep = usbhsg_ep_to_uep(ep); - struct usbhsg_gpriv *gpriv = usbhsg_uep_to_gpriv(uep); - spinlock_t *lock; - unsigned long flags; - int ret; - /******************** spin lock ********************/ - lock = usbhsg_trylock(gpriv, &flags); - - ret = usbhsg_pipe_disable(uep); - - usbhsg_unlock(lock, &flags); - /******************** spin unlock ******************/ - - return ret; + return usbhsg_pipe_disable(uep); } static struct usb_request *usbhsg_ep_alloc_request(struct usb_ep *ep, @@ -905,7 +497,10 @@ static struct usb_request *usbhsg_ep_alloc_request(struct usb_ep *ep, if (!ureq) return NULL; - INIT_LIST_HEAD(&ureq->node); + usbhs_pkt_init(usbhsg_ureq_to_pkt(ureq)); + + ureq->req.dma = DMA_ADDR_INVALID; + return &ureq->req; } @@ -914,7 +509,7 @@ static void usbhsg_ep_free_request(struct usb_ep *ep, { struct usbhsg_request *ureq = usbhsg_req_to_ureq(req); - WARN_ON(!list_empty(&ureq->node)); + WARN_ON(!list_empty(&ureq->pkt.node)); kfree(ureq); } @@ -925,69 +520,27 @@ static int usbhsg_ep_queue(struct usb_ep *ep, struct usb_request *req, struct usbhsg_gpriv *gpriv = usbhsg_uep_to_gpriv(uep); struct usbhsg_request *ureq = usbhsg_req_to_ureq(req); struct usbhs_pipe *pipe = usbhsg_uep_to_pipe(uep); - spinlock_t *lock; - unsigned long flags; - int ret = 0; - - /* - * CAUTION [*endpoint queue*] - * - * This function will be called from usb_request :: complete - * or usb driver timing. - * If this function is called from usb_request :: complete, - * it is already under spinlock on this driver. - * but it is called frm usb driver, this function should call spinlock. - * - * This function is using usbshg_trylock to solve this issue. - * if "is_locked" is 1, this mean this function lock it. - * but if it is 0, this mean it is already under spin lock. - * see also - * CAUTION [*queue handler*] - * CAUTION [*request complete*] - */ - - /******************** spin lock ********************/ - lock = usbhsg_trylock(gpriv, &flags); /* param check */ if (usbhsg_is_not_connected(gpriv) || unlikely(!gpriv->driver) || unlikely(!pipe)) - ret = -ESHUTDOWN; - else - usbhsg_queue_push(uep, ureq); - - usbhsg_unlock(lock, &flags); - /******************** spin unlock ******************/ + return -ESHUTDOWN; - usbhsg_queue_prepare(uep); + usbhsg_queue_push(uep, ureq); - return ret; + return 0; } static int usbhsg_ep_dequeue(struct usb_ep *ep, struct usb_request *req) { struct usbhsg_uep *uep = usbhsg_ep_to_uep(ep); struct usbhsg_request *ureq = usbhsg_req_to_ureq(req); - struct usbhsg_gpriv *gpriv = usbhsg_uep_to_gpriv(uep); - spinlock_t *lock; - unsigned long flags; - - /* - * see - * CAUTION [*queue handler*] - * CAUTION [*endpoint queue*] - * CAUTION [*request complete*] - */ - - /******************** spin lock ********************/ - lock = usbhsg_trylock(gpriv, &flags); + struct usbhs_pipe *pipe = usbhsg_uep_to_pipe(uep); + usbhs_pkt_pop(pipe, usbhsg_ureq_to_pkt(ureq)); usbhsg_queue_pop(uep, ureq, -ECONNRESET); - usbhsg_unlock(lock, &flags); - /******************** spin unlock ******************/ - return 0; } @@ -996,42 +549,32 @@ static int __usbhsg_ep_set_halt_wedge(struct usb_ep *ep, int halt, int wedge) struct usbhsg_uep *uep = usbhsg_ep_to_uep(ep); struct usbhs_pipe *pipe = usbhsg_uep_to_pipe(uep); struct usbhsg_gpriv *gpriv = usbhsg_uep_to_gpriv(uep); + struct usbhs_priv *priv = usbhsg_gpriv_to_priv(gpriv); struct device *dev = usbhsg_gpriv_to_dev(gpriv); - spinlock_t *lock; unsigned long flags; - int ret = -EAGAIN; - /* - * see - * CAUTION [*queue handler*] - * CAUTION [*endpoint queue*] - * CAUTION [*request complete*] - */ + usbhsg_pipe_disable(uep); - /******************** spin lock ********************/ - lock = usbhsg_trylock(gpriv, &flags); - if (!usbhsg_queue_get(uep)) { - - dev_dbg(dev, "set halt %d (pipe %d)\n", - halt, usbhs_pipe_number(pipe)); + dev_dbg(dev, "set halt %d (pipe %d)\n", + halt, usbhs_pipe_number(pipe)); - if (halt) - usbhs_fifo_stall(pipe); - else - usbhs_fifo_disable(pipe); + /******************** spin lock ********************/ + usbhs_lock(priv, flags); - if (halt && wedge) - usbhsg_status_set(gpriv, USBHSG_STATUS_WEDGE); - else - usbhsg_status_clr(gpriv, USBHSG_STATUS_WEDGE); + if (halt) + usbhs_pipe_stall(pipe); + else + usbhs_pipe_disable(pipe); - ret = 0; - } + if (halt && wedge) + usbhsg_status_set(gpriv, USBHSG_STATUS_WEDGE); + else + usbhsg_status_clr(gpriv, USBHSG_STATUS_WEDGE); - usbhsg_unlock(lock, &flags); + usbhs_unlock(priv, flags); /******************** spin unlock ******************/ - return ret; + return 0; } static int usbhsg_ep_set_halt(struct usb_ep *ep, int value) @@ -1067,28 +610,40 @@ static int usbhsg_try_start(struct usbhs_priv *priv, u32 status) struct usbhsg_uep *dcp = usbhsg_gpriv_to_dcp(gpriv); struct usbhs_mod *mod = usbhs_mod_get_current(priv); struct device *dev = usbhs_priv_to_dev(priv); - spinlock_t *lock; unsigned long flags; + int ret = 0; /******************** spin lock ********************/ - lock = usbhsg_trylock(gpriv, &flags); + usbhs_lock(priv, flags); - /* - * enable interrupt and systems if ready - */ usbhsg_status_set(gpriv, status); if (!(usbhsg_status_has(gpriv, USBHSG_STATUS_STARTED) && usbhsg_status_has(gpriv, USBHSG_STATUS_REGISTERD))) - goto usbhsg_try_start_unlock; + ret = -1; /* not ready */ + + usbhs_unlock(priv, flags); + /******************** spin unlock ********************/ + + if (ret < 0) + return 0; /* not ready is not error */ + /* + * enable interrupt and systems if ready + */ dev_dbg(dev, "start gadget\n"); /* * pipe initialize and enable DCP */ - usbhs_pipe_init(priv); + usbhs_pipe_init(priv, + usbhsg_queue_done, + usbhsg_dma_map_ctrl); + usbhs_fifo_init(priv); usbhsg_uep_init(gpriv); - usbhsg_dcp_enable(dcp); + + /* dcp init */ + dcp->pipe = usbhs_dcp_malloc(priv); + dcp->pipe->mod_private = dcp; /* * system config enble @@ -1105,16 +660,8 @@ static int usbhsg_try_start(struct usbhs_priv *priv, u32 status) */ mod->irq_dev_state = usbhsg_irq_dev_state; mod->irq_ctrl_stage = usbhsg_irq_ctrl_stage; - mod->irq_empty = usbhsg_irq_empty; - mod->irq_ready = usbhsg_irq_ready; - mod->irq_bempsts = 0; - mod->irq_brdysts = 0; usbhs_irq_callback_update(priv, mod); -usbhsg_try_start_unlock: - usbhsg_unlock(lock, &flags); - /******************** spin unlock ********************/ - return 0; } @@ -1124,31 +671,33 @@ static int usbhsg_try_stop(struct usbhs_priv *priv, u32 status) struct usbhs_mod *mod = usbhs_mod_get_current(priv); struct usbhsg_uep *dcp = usbhsg_gpriv_to_dcp(gpriv); struct device *dev = usbhs_priv_to_dev(priv); - spinlock_t *lock; unsigned long flags; + int ret = 0; /******************** spin lock ********************/ - lock = usbhsg_trylock(gpriv, &flags); + usbhs_lock(priv, flags); - /* - * disable interrupt and systems if 1st try - */ usbhsg_status_clr(gpriv, status); if (!usbhsg_status_has(gpriv, USBHSG_STATUS_STARTED) && !usbhsg_status_has(gpriv, USBHSG_STATUS_REGISTERD)) - goto usbhsg_try_stop_unlock; + ret = -1; /* already done */ + + usbhs_unlock(priv, flags); + /******************** spin unlock ********************/ + + if (ret < 0) + return 0; /* already done is not error */ + + /* + * disable interrupt and systems if 1st try + */ + usbhs_fifo_quit(priv); /* disable all irq */ mod->irq_dev_state = NULL; mod->irq_ctrl_stage = NULL; - mod->irq_empty = NULL; - mod->irq_ready = NULL; - mod->irq_bempsts = 0; - mod->irq_brdysts = 0; usbhs_irq_callback_update(priv, mod); - usbhsg_dcp_disable(dcp); - gpriv->gadget.speed = USB_SPEED_UNKNOWN; /* disable sys */ @@ -1156,8 +705,7 @@ static int usbhsg_try_stop(struct usbhs_priv *priv, u32 status) usbhs_sys_function_ctrl(priv, 0); usbhs_sys_usb_ctrl(priv, 0); - usbhsg_unlock(lock, &flags); - /******************** spin unlock ********************/ + usbhsg_pipe_disable(dcp); if (gpriv->driver && gpriv->driver->disconnect) @@ -1166,11 +714,6 @@ static int usbhsg_try_stop(struct usbhs_priv *priv, u32 status) dev_dbg(dev, "stop gadget\n"); return 0; - -usbhsg_try_stop_unlock: - usbhsg_unlock(lock, &flags); - - return 0; } /* @@ -1350,7 +893,6 @@ int __devinit usbhs_mod_gadget_probe(struct usbhs_priv *priv) uep->ep.name = uep->ep_name; uep->ep.ops = &usbhsg_ep_ops; INIT_LIST_HEAD(&uep->ep.ep_list); - INIT_LIST_HEAD(&uep->list); /* init DCP */ if (usbhsg_is_dcp(uep)) { diff --git a/drivers/usb/renesas_usbhs/pipe.c b/drivers/usb/renesas_usbhs/pipe.c index bc4521c54261..d0ae846632cd 100644 --- a/drivers/usb/renesas_usbhs/pipe.c +++ b/drivers/usb/renesas_usbhs/pipe.c @@ -15,7 +15,6 @@ * */ #include <linux/delay.h> -#include <linux/io.h> #include <linux/slab.h> #include "./common.h" #include "./pipe.h" @@ -23,13 +22,8 @@ /* * macros */ -#define usbhsp_priv_to_pipeinfo(pr) (&(pr)->pipe_info) -#define usbhsp_pipe_to_priv(p) ((p)->priv) - #define usbhsp_addr_offset(p) ((usbhs_pipe_number(p) - 1) * 2) -#define usbhsp_is_dcp(p) ((p)->priv->pipe_info.pipe == (p)) - #define usbhsp_flags_set(p, f) ((p)->flags |= USBHS_PIPE_FLAGS_##f) #define usbhsp_flags_clr(p, f) ((p)->flags &= ~USBHS_PIPE_FLAGS_##f) #define usbhsp_flags_has(p, f) ((p)->flags & USBHS_PIPE_FLAGS_##f) @@ -77,10 +71,10 @@ void usbhs_usbreq_set_val(struct usbhs_priv *priv, struct usb_ctrlrequest *req) */ static void usbhsp_pipectrl_set(struct usbhs_pipe *pipe, u16 mask, u16 val) { - struct usbhs_priv *priv = usbhsp_pipe_to_priv(pipe); + struct usbhs_priv *priv = usbhs_pipe_to_priv(pipe); int offset = usbhsp_addr_offset(pipe); - if (usbhsp_is_dcp(pipe)) + if (usbhs_pipe_is_dcp(pipe)) usbhs_bset(priv, DCPCTR, mask, val); else usbhs_bset(priv, PIPEnCTR + offset, mask, val); @@ -88,10 +82,10 @@ static void usbhsp_pipectrl_set(struct usbhs_pipe *pipe, u16 mask, u16 val) static u16 usbhsp_pipectrl_get(struct usbhs_pipe *pipe) { - struct usbhs_priv *priv = usbhsp_pipe_to_priv(pipe); + struct usbhs_priv *priv = usbhs_pipe_to_priv(pipe); int offset = usbhsp_addr_offset(pipe); - if (usbhsp_is_dcp(pipe)) + if (usbhs_pipe_is_dcp(pipe)) return usbhs_read(priv, DCPCTR); else return usbhs_read(priv, PIPEnCTR + offset); @@ -104,9 +98,9 @@ static void __usbhsp_pipe_xxx_set(struct usbhs_pipe *pipe, u16 dcp_reg, u16 pipe_reg, u16 mask, u16 val) { - struct usbhs_priv *priv = usbhsp_pipe_to_priv(pipe); + struct usbhs_priv *priv = usbhs_pipe_to_priv(pipe); - if (usbhsp_is_dcp(pipe)) + if (usbhs_pipe_is_dcp(pipe)) usbhs_bset(priv, dcp_reg, mask, val); else usbhs_bset(priv, pipe_reg, mask, val); @@ -115,9 +109,9 @@ static void __usbhsp_pipe_xxx_set(struct usbhs_pipe *pipe, static u16 __usbhsp_pipe_xxx_get(struct usbhs_pipe *pipe, u16 dcp_reg, u16 pipe_reg) { - struct usbhs_priv *priv = usbhsp_pipe_to_priv(pipe); + struct usbhs_priv *priv = usbhs_pipe_to_priv(pipe); - if (usbhsp_is_dcp(pipe)) + if (usbhs_pipe_is_dcp(pipe)) return usbhs_read(priv, dcp_reg); else return usbhs_read(priv, pipe_reg); @@ -136,7 +130,7 @@ static void usbhsp_pipe_cfg_set(struct usbhs_pipe *pipe, u16 mask, u16 val) */ static void usbhsp_pipe_buf_set(struct usbhs_pipe *pipe, u16 mask, u16 val) { - if (usbhsp_is_dcp(pipe)) + if (usbhs_pipe_is_dcp(pipe)) return; __usbhsp_pipe_xxx_set(pipe, 0, PIPEBUF, mask, val); @@ -160,7 +154,7 @@ static u16 usbhsp_pipe_maxp_get(struct usbhs_pipe *pipe) */ static void usbhsp_pipe_select(struct usbhs_pipe *pipe) { - struct usbhs_priv *priv = usbhsp_pipe_to_priv(pipe); + struct usbhs_priv *priv = usbhs_pipe_to_priv(pipe); /* * On pipe, this is necessary before @@ -182,7 +176,7 @@ static void usbhsp_pipe_select(struct usbhs_pipe *pipe) static int usbhsp_pipe_barrier(struct usbhs_pipe *pipe) { - struct usbhs_priv *priv = usbhsp_pipe_to_priv(pipe); + struct usbhs_priv *priv = usbhs_pipe_to_priv(pipe); int timeout = 1024; u16 val; @@ -205,7 +199,7 @@ static int usbhsp_pipe_barrier(struct usbhs_pipe *pipe) * - "Pipe Control Registers Switching Procedure" */ usbhs_write(priv, CFIFOSEL, 0); - usbhs_fifo_disable(pipe); + usbhs_pipe_disable(pipe); do { val = usbhsp_pipectrl_get(pipe); @@ -220,7 +214,7 @@ static int usbhsp_pipe_barrier(struct usbhs_pipe *pipe) return -EBUSY; } -static int usbhsp_pipe_is_accessible(struct usbhs_pipe *pipe) +int usbhs_pipe_is_accessible(struct usbhs_pipe *pipe) { u16 val; @@ -253,7 +247,7 @@ static void __usbhsp_pid_try_nak_if_stall(struct usbhs_pipe *pipe) } } -void usbhs_fifo_disable(struct usbhs_pipe *pipe) +void usbhs_pipe_disable(struct usbhs_pipe *pipe) { int timeout = 1024; u16 val; @@ -273,7 +267,7 @@ void usbhs_fifo_disable(struct usbhs_pipe *pipe) } while (timeout--); } -void usbhs_fifo_enable(struct usbhs_pipe *pipe) +void usbhs_pipe_enable(struct usbhs_pipe *pipe) { /* see "Pipe n Control Register" - "PID" */ __usbhsp_pid_try_nak_if_stall(pipe); @@ -281,7 +275,7 @@ void usbhs_fifo_enable(struct usbhs_pipe *pipe) usbhsp_pipectrl_set(pipe, PID_MASK, PID_BUF); } -void usbhs_fifo_stall(struct usbhs_pipe *pipe) +void usbhs_pipe_stall(struct usbhs_pipe *pipe) { u16 pid = usbhsp_pipectrl_get(pipe); @@ -302,191 +296,6 @@ void usbhs_fifo_stall(struct usbhs_pipe *pipe) } /* - * CFIFO ctrl - */ -void usbhs_fifo_send_terminator(struct usbhs_pipe *pipe) -{ - struct usbhs_priv *priv = usbhsp_pipe_to_priv(pipe); - - usbhs_bset(priv, CFIFOCTR, BVAL, BVAL); -} - -static void usbhsp_fifo_clear(struct usbhs_pipe *pipe) -{ - struct usbhs_priv *priv = usbhsp_pipe_to_priv(pipe); - - usbhs_write(priv, CFIFOCTR, BCLR); -} - -static int usbhsp_fifo_barrier(struct usbhs_priv *priv) -{ - int timeout = 1024; - - do { - /* The FIFO port is accessible */ - if (usbhs_read(priv, CFIFOCTR) & FRDY) - return 0; - - udelay(10); - } while (timeout--); - - return -EBUSY; -} - -static int usbhsp_fifo_rcv_len(struct usbhs_priv *priv) -{ - return usbhs_read(priv, CFIFOCTR) & DTLN_MASK; -} - -static int usbhsp_fifo_select(struct usbhs_pipe *pipe, int write) -{ - struct usbhs_priv *priv = usbhsp_pipe_to_priv(pipe); - struct device *dev = usbhs_priv_to_dev(priv); - int timeout = 1024; - u16 mask = ((1 << 5) | 0xF); /* mask of ISEL | CURPIPE */ - u16 base = usbhs_pipe_number(pipe); /* CURPIPE */ - - if (usbhsp_is_dcp(pipe)) - base |= (1 == write) << 5; /* ISEL */ - - /* "base" will be used below */ - usbhs_write(priv, CFIFOSEL, base | MBW_32); - - /* check ISEL and CURPIPE value */ - while (timeout--) { - if (base == (mask & usbhs_read(priv, CFIFOSEL))) - return 0; - udelay(10); - } - - dev_err(dev, "fifo select error\n"); - - return -EIO; -} - -int usbhs_fifo_prepare_write(struct usbhs_pipe *pipe) -{ - return usbhsp_fifo_select(pipe, 1); -} - -int usbhs_fifo_write(struct usbhs_pipe *pipe, u8 *buf, int len) -{ - struct usbhs_priv *priv = usbhsp_pipe_to_priv(pipe); - void __iomem *addr = priv->base + CFIFO; - int maxp = usbhs_pipe_get_maxpacket(pipe); - int total_len; - int i, ret; - - ret = usbhsp_pipe_is_accessible(pipe); - if (ret < 0) - return ret; - - ret = usbhsp_fifo_select(pipe, 1); - if (ret < 0) - return ret; - - ret = usbhsp_fifo_barrier(priv); - if (ret < 0) - return ret; - - len = min(len, maxp); - total_len = len; - - /* - * FIXME - * - * 32-bit access only - */ - if (len >= 4 && - !((unsigned long)buf & 0x03)) { - iowrite32_rep(addr, buf, len / 4); - len %= 4; - buf += total_len - len; - } - - /* the rest operation */ - for (i = 0; i < len; i++) - iowrite8(buf[i], addr + (0x03 - (i & 0x03))); - - if (total_len < maxp) - usbhs_fifo_send_terminator(pipe); - - return total_len; -} - -int usbhs_fifo_prepare_read(struct usbhs_pipe *pipe) -{ - int ret; - - /* - * select pipe and enable it to prepare packet receive - */ - ret = usbhsp_fifo_select(pipe, 0); - if (ret < 0) - return ret; - - usbhs_fifo_enable(pipe); - - return ret; -} - -int usbhs_fifo_read(struct usbhs_pipe *pipe, u8 *buf, int len) -{ - struct usbhs_priv *priv = usbhsp_pipe_to_priv(pipe); - void __iomem *addr = priv->base + CFIFO; - int rcv_len; - int i, ret; - int total_len; - u32 data = 0; - - ret = usbhsp_fifo_select(pipe, 0); - if (ret < 0) - return ret; - - ret = usbhsp_fifo_barrier(priv); - if (ret < 0) - return ret; - - rcv_len = usbhsp_fifo_rcv_len(priv); - - /* - * Buffer clear if Zero-Length packet - * - * see - * "Operation" - "FIFO Buffer Memory" - "FIFO Port Function" - */ - if (0 == rcv_len) { - usbhsp_fifo_clear(pipe); - return 0; - } - - len = min(rcv_len, len); - total_len = len; - - /* - * FIXME - * - * 32-bit access only - */ - if (len >= 4 && - !((unsigned long)buf & 0x03)) { - ioread32_rep(addr, buf, len / 4); - len %= 4; - buf += rcv_len - len; - } - - /* the rest operation */ - for (i = 0; i < len; i++) { - if (!(i & 0x03)) - data = ioread32(addr); - - buf[i] = (data >> ((i & 0x03) * 8)) & 0xff; - } - - return total_len; -} - -/* * pipe setup */ static int usbhsp_possible_double_buffer(struct usbhs_pipe *pipe) @@ -519,7 +328,7 @@ static u16 usbhsp_setup_pipecfg(struct usbhs_pipe *pipe, }; int is_double = usbhsp_possible_double_buffer(pipe); - if (usbhsp_is_dcp(pipe)) + if (usbhs_pipe_is_dcp(pipe)) return -EINVAL; /* @@ -550,12 +359,15 @@ static u16 usbhsp_setup_pipecfg(struct usbhs_pipe *pipe, /* DIR */ if (usb_endpoint_dir_in(desc)) - usbhsp_flags_set(pipe, IS_DIR_IN); + usbhsp_flags_set(pipe, IS_DIR_HOST); if ((is_host && usb_endpoint_dir_out(desc)) || (!is_host && usb_endpoint_dir_in(desc))) dir |= DIR_OUT; + if (!dir) + usbhsp_flags_set(pipe, IS_DIR_IN); + /* SHTNAK */ if (usbhsp_type_is(pipe, USB_ENDPOINT_XFER_BULK) && !dir) @@ -587,8 +399,8 @@ static u16 usbhsp_setup_pipebuff(struct usbhs_pipe *pipe, const struct usb_endpoint_descriptor *desc, int is_host) { - struct usbhs_priv *priv = usbhsp_pipe_to_priv(pipe); - struct usbhs_pipe_info *info = usbhsp_priv_to_pipeinfo(priv); + struct usbhs_priv *priv = usbhs_pipe_to_priv(pipe); + struct usbhs_pipe_info *info = usbhs_priv_to_pipeinfo(priv); struct device *dev = usbhs_priv_to_dev(priv); int pipe_num = usbhs_pipe_number(pipe); int is_double = usbhsp_possible_double_buffer(pipe); @@ -666,7 +478,7 @@ static u16 usbhsp_setup_pipebuff(struct usbhs_pipe *pipe, */ int usbhs_pipe_get_maxpacket(struct usbhs_pipe *pipe) { - u16 mask = usbhsp_is_dcp(pipe) ? DCP_MAXP_MASK : PIPE_MAXP_MASK; + u16 mask = usbhs_pipe_is_dcp(pipe) ? DCP_MAXP_MASK : PIPE_MAXP_MASK; usbhsp_pipe_select(pipe); @@ -678,6 +490,11 @@ int usbhs_pipe_is_dir_in(struct usbhs_pipe *pipe) return usbhsp_flags_has(pipe, IS_DIR_IN); } +int usbhs_pipe_is_dir_host(struct usbhs_pipe *pipe) +{ + return usbhsp_flags_has(pipe, IS_DIR_HOST); +} + void usbhs_pipe_clear_sequence(struct usbhs_pipe *pipe) { usbhsp_pipectrl_set(pipe, SQCLR, SQCLR); @@ -714,12 +531,20 @@ static struct usbhs_pipe *usbhsp_get_pipe(struct usbhs_priv *priv, u32 type) return pipe; } -void usbhs_pipe_init(struct usbhs_priv *priv) +void usbhs_pipe_init(struct usbhs_priv *priv, + void (*done)(struct usbhs_pkt *pkt), + int (*dma_map_ctrl)(struct usbhs_pkt *pkt, int map)) { - struct usbhs_pipe_info *info = usbhsp_priv_to_pipeinfo(priv); + struct usbhs_pipe_info *info = usbhs_priv_to_pipeinfo(priv); + struct device *dev = usbhs_priv_to_dev(priv); struct usbhs_pipe *pipe; int i; + if (!done) { + dev_err(dev, "no done function\n"); + return; + } + /* * FIXME * @@ -738,10 +563,17 @@ void usbhs_pipe_init(struct usbhs_priv *priv) info->bufnmb_last++; usbhsp_flags_init(pipe); + pipe->fifo = NULL; pipe->mod_private = NULL; + INIT_LIST_HEAD(&pipe->list); - usbhsp_fifo_clear(pipe); + /* pipe force init */ + usbhsp_pipectrl_set(pipe, ACLRM, ACLRM); + usbhsp_pipectrl_set(pipe, ACLRM, 0); } + + info->done = done; + info->dma_map_ctrl = dma_map_ctrl; } struct usbhs_pipe *usbhs_pipe_malloc(struct usbhs_priv *priv, @@ -761,7 +593,9 @@ struct usbhs_pipe *usbhs_pipe_malloc(struct usbhs_priv *priv, return NULL; } - usbhs_fifo_disable(pipe); + INIT_LIST_HEAD(&pipe->list); + + usbhs_pipe_disable(pipe); /* make sure pipe is not busy */ ret = usbhsp_pipe_barrier(pipe); @@ -774,11 +608,6 @@ struct usbhs_pipe *usbhs_pipe_malloc(struct usbhs_priv *priv, pipebuf = usbhsp_setup_pipebuff(pipe, desc, is_host); pipemaxp = usbhsp_setup_pipemaxp(pipe, desc, is_host); - /* buffer clear - * see PIPECFG :: BFRE */ - usbhsp_pipectrl_set(pipe, ACLRM, ACLRM); - usbhsp_pipectrl_set(pipe, ACLRM, 0); - usbhsp_pipe_select(pipe); usbhsp_pipe_cfg_set(pipe, 0xFFFF, pipecfg); usbhsp_pipe_buf_set(pipe, 0xFFFF, pipebuf); @@ -794,6 +623,18 @@ struct usbhs_pipe *usbhs_pipe_malloc(struct usbhs_priv *priv, return pipe; } +void usbhs_pipe_select_fifo(struct usbhs_pipe *pipe, struct usbhs_fifo *fifo) +{ + if (pipe->fifo) + pipe->fifo->pipe = NULL; + + pipe->fifo = fifo; + + if (fifo) + fifo->pipe = pipe; +} + + /* * dcp control */ @@ -813,25 +654,25 @@ struct usbhs_pipe *usbhs_dcp_malloc(struct usbhs_priv *priv) usbhsp_pipe_select(pipe); usbhs_pipe_clear_sequence(pipe); + INIT_LIST_HEAD(&pipe->list); return pipe; } void usbhs_dcp_control_transfer_done(struct usbhs_pipe *pipe) { - WARN_ON(!usbhsp_is_dcp(pipe)); + WARN_ON(!usbhs_pipe_is_dcp(pipe)); - usbhs_fifo_enable(pipe); + usbhs_pipe_enable(pipe); usbhsp_pipectrl_set(pipe, CCPL, CCPL); } - /* * pipe module function */ int usbhs_pipe_probe(struct usbhs_priv *priv) { - struct usbhs_pipe_info *info = usbhsp_priv_to_pipeinfo(priv); + struct usbhs_pipe_info *info = usbhs_priv_to_pipeinfo(priv); struct usbhs_pipe *pipe; struct device *dev = usbhs_priv_to_dev(priv); u32 *pipe_type = usbhs_get_dparam(priv, pipe_type); @@ -868,7 +709,7 @@ int usbhs_pipe_probe(struct usbhs_priv *priv) void usbhs_pipe_remove(struct usbhs_priv *priv) { - struct usbhs_pipe_info *info = usbhsp_priv_to_pipeinfo(priv); + struct usbhs_pipe_info *info = usbhs_priv_to_pipeinfo(priv); kfree(info->pipe); } diff --git a/drivers/usb/renesas_usbhs/pipe.h b/drivers/usb/renesas_usbhs/pipe.h index 1cca9b7fb266..35e100477e55 100644 --- a/drivers/usb/renesas_usbhs/pipe.h +++ b/drivers/usb/renesas_usbhs/pipe.h @@ -18,6 +18,7 @@ #define RENESAS_USB_PIPE_H #include "./common.h" +#include "./fifo.h" /* * struct @@ -26,10 +27,13 @@ struct usbhs_pipe { u32 pipe_type; /* USB_ENDPOINT_XFER_xxx */ struct usbhs_priv *priv; + struct usbhs_fifo *fifo; + struct list_head list; u32 flags; #define USBHS_PIPE_FLAGS_IS_USED (1 << 0) #define USBHS_PIPE_FLAGS_IS_DIR_IN (1 << 1) +#define USBHS_PIPE_FLAGS_IS_DIR_HOST (1 << 2) void *mod_private; }; @@ -38,6 +42,9 @@ struct usbhs_pipe_info { struct usbhs_pipe *pipe; int size; /* array size of "pipe" */ int bufnmb_last; /* FIXME : driver needs good allocator */ + + void (*done)(struct usbhs_pkt *pkt); + int (*dma_map_ctrl)(struct usbhs_pkt *pkt, int map); }; /* @@ -55,25 +62,9 @@ struct usbhs_pipe_info { __usbhs_for_each_pipe(0, pos, &((priv)->pipe_info), i) /* - * pipe module probe / remove - */ -int usbhs_pipe_probe(struct usbhs_priv *priv); -void usbhs_pipe_remove(struct usbhs_priv *priv); - -/* - * cfifo + * data */ -int usbhs_fifo_write(struct usbhs_pipe *pipe, u8 *buf, int len); -int usbhs_fifo_read(struct usbhs_pipe *pipe, u8 *buf, int len); -int usbhs_fifo_prepare_write(struct usbhs_pipe *pipe); -int usbhs_fifo_prepare_read(struct usbhs_pipe *pipe); - -void usbhs_fifo_enable(struct usbhs_pipe *pipe); -void usbhs_fifo_disable(struct usbhs_pipe *pipe); -void usbhs_fifo_stall(struct usbhs_pipe *pipe); - -void usbhs_fifo_send_terminator(struct usbhs_pipe *pipe); - +#define usbhs_priv_to_pipeinfo(pr) (&(pr)->pipe_info) /* * usb request @@ -87,13 +78,26 @@ void usbhs_usbreq_set_val(struct usbhs_priv *priv, struct usb_ctrlrequest *req); struct usbhs_pipe *usbhs_pipe_malloc(struct usbhs_priv *priv, const struct usb_endpoint_descriptor *desc); - +int usbhs_pipe_probe(struct usbhs_priv *priv); +void usbhs_pipe_remove(struct usbhs_priv *priv); int usbhs_pipe_is_dir_in(struct usbhs_pipe *pipe); -void usbhs_pipe_init(struct usbhs_priv *priv); +int usbhs_pipe_is_dir_host(struct usbhs_pipe *pipe); +void usbhs_pipe_init(struct usbhs_priv *priv, + void (*done)(struct usbhs_pkt *pkt), + int (*dma_map_ctrl)(struct usbhs_pkt *pkt, int map)); int usbhs_pipe_get_maxpacket(struct usbhs_pipe *pipe); void usbhs_pipe_clear_sequence(struct usbhs_pipe *pipe); +int usbhs_pipe_is_accessible(struct usbhs_pipe *pipe); +void usbhs_pipe_enable(struct usbhs_pipe *pipe); +void usbhs_pipe_disable(struct usbhs_pipe *pipe); +void usbhs_pipe_stall(struct usbhs_pipe *pipe); +void usbhs_pipe_select_fifo(struct usbhs_pipe *pipe, struct usbhs_fifo *fifo); +#define usbhs_pipe_to_priv(p) ((p)->priv) #define usbhs_pipe_number(p) (int)((p) - (p)->priv->pipe_info.pipe) +#define usbhs_pipe_is_dcp(p) ((p)->priv->pipe_info.pipe == (p)) +#define usbhs_pipe_to_fifo(p) ((p)->fifo) +#define usbhs_pipe_is_busy(p) usbhs_pipe_to_fifo(p) /* * dcp control diff --git a/drivers/usb/wusbcore/cbaf.c b/drivers/usb/wusbcore/cbaf.c index c0c5665e60a9..200fd7c6c7d5 100644 --- a/drivers/usb/wusbcore/cbaf.c +++ b/drivers/usb/wusbcore/cbaf.c @@ -298,7 +298,7 @@ static int cbaf_cdid_get(struct cbaf *cbaf) if (result < needed) { dev_err(dev, "Not enough data in DEVICE_INFO reply (%zu vs " "%zu bytes needed)\n", (size_t)result, needed); - return result; + return -ENOENT; } strlcpy(cbaf->device_name, di->DeviceFriendlyName, CBA_NAME_LEN); @@ -350,7 +350,7 @@ static ssize_t cbaf_wusb_chid_store(struct device *dev, return result; result = cbaf_cdid_get(cbaf); if (result < 0) - return -result; + return result; return size; } static DEVICE_ATTR(wusb_chid, 0600, cbaf_wusb_chid_show, cbaf_wusb_chid_store); |