diff options
author | Greg Kroah-Hartman <gregkh@suse.de> | 2011-01-24 08:11:18 +0900 |
---|---|---|
committer | Greg Kroah-Hartman <gregkh@suse.de> | 2011-01-24 08:14:07 +0900 |
commit | fd96d0d8d8079b1ea7a7e8943a4da9dfc9621124 (patch) | |
tree | 1d1314722847d56e3b703ec721f9534e93fa5580 | |
parent | 3c47eb06f08eb970ea9d696bcdb57a175d37b470 (diff) | |
parent | 50d64676d132a8a72a1a1657d7b3e6efa53da1ac (diff) | |
download | blackbird-op-linux-fd96d0d8d8079b1ea7a7e8943a4da9dfc9621124.tar.gz blackbird-op-linux-fd96d0d8d8079b1ea7a7e8943a4da9dfc9621124.zip |
Merge branch 'for-usb-linus' of master.kernel.org:/pub/scm/linux/kernel/git/sarah/xhci into usb-linus
* 'for-usb-linus' of master.kernel.org:/pub/scm/linux/kernel/git/sarah/xhci:
xhci: Remove more doorbell-related reads
xHCI: fix printk_ratelimit() usage
xHCI: replace dev_dbg() with xhci_dbg()
xHCI: fix cycle bit set in giveback_first_trb()
xHCI: remove redundant parameter in giveback_first_trb()
xHCI: fix queue_trb in isoc transfer
xhci: Use GFP_NOIO during device reset.
usb: Realloc xHCI structures after a hub is verified.
xhci: Do not run xhci_cleanup_msix with irq disabled
xHCI: synchronize irq in xhci_suspend()
xhci: Resume bus on any port status change.
-rw-r--r-- | drivers/usb/core/hcd-pci.c | 7 | ||||
-rw-r--r-- | drivers/usb/core/hub.c | 21 | ||||
-rw-r--r-- | drivers/usb/host/xhci-ring.c | 91 | ||||
-rw-r--r-- | drivers/usb/host/xhci.c | 60 | ||||
-rw-r--r-- | drivers/usb/host/xhci.h | 16 | ||||
-rw-r--r-- | include/linux/usb/hcd.h | 1 |
6 files changed, 110 insertions, 86 deletions
diff --git a/drivers/usb/core/hcd-pci.c b/drivers/usb/core/hcd-pci.c index b55d46070a25..f71e8e307e0f 100644 --- a/drivers/usb/core/hcd-pci.c +++ b/drivers/usb/core/hcd-pci.c @@ -405,7 +405,12 @@ static int suspend_common(struct device *dev, bool do_wakeup) return retval; } - synchronize_irq(pci_dev->irq); + /* If MSI-X is enabled, the driver will have synchronized all vectors + * in pci_suspend(). If MSI or legacy PCI is enabled, that will be + * synchronized here. + */ + if (!hcd->msix_enabled) + synchronize_irq(pci_dev->irq); /* Downstream ports from this root hub should already be quiesced, so * there will be no DMA activity. Now we can shut down the upstream diff --git a/drivers/usb/core/hub.c b/drivers/usb/core/hub.c index b98efae6a1cf..4310cc4b1cb5 100644 --- a/drivers/usb/core/hub.c +++ b/drivers/usb/core/hub.c @@ -676,6 +676,8 @@ static void hub_init_func3(struct work_struct *ws); static void hub_activate(struct usb_hub *hub, enum hub_activation_type type) { struct usb_device *hdev = hub->hdev; + struct usb_hcd *hcd; + int ret; int port1; int status; bool need_debounce_delay = false; @@ -714,6 +716,25 @@ static void hub_activate(struct usb_hub *hub, enum hub_activation_type type) usb_autopm_get_interface_no_resume( to_usb_interface(hub->intfdev)); return; /* Continues at init2: below */ + } else if (type == HUB_RESET_RESUME) { + /* The internal host controller state for the hub device + * may be gone after a host power loss on system resume. + * Update the device's info so the HW knows it's a hub. + */ + hcd = bus_to_hcd(hdev->bus); + if (hcd->driver->update_hub_device) { + ret = hcd->driver->update_hub_device(hcd, hdev, + &hub->tt, GFP_NOIO); + if (ret < 0) { + dev_err(hub->intfdev, "Host not " + "accepting hub info " + "update.\n"); + dev_err(hub->intfdev, "LS/FS devices " + "and hubs may not work " + "under this hub\n."); + } + } + hub_power_on(hub, true); } else { hub_power_on(hub, true); } diff --git a/drivers/usb/host/xhci-ring.c b/drivers/usb/host/xhci-ring.c index df558f6f84e3..3e8211c1ce5a 100644 --- a/drivers/usb/host/xhci-ring.c +++ b/drivers/usb/host/xhci-ring.c @@ -308,11 +308,8 @@ static int room_on_ring(struct xhci_hcd *xhci, struct xhci_ring *ring, /* Ring the host controller doorbell after placing a command on the ring */ void xhci_ring_cmd_db(struct xhci_hcd *xhci) { - u32 temp; - xhci_dbg(xhci, "// Ding dong!\n"); - temp = xhci_readl(xhci, &xhci->dba->doorbell[0]) & DB_MASK; - xhci_writel(xhci, temp | DB_TARGET_HOST, &xhci->dba->doorbell[0]); + xhci_writel(xhci, DB_VALUE_HOST, &xhci->dba->doorbell[0]); /* Flush PCI posted writes */ xhci_readl(xhci, &xhci->dba->doorbell[0]); } @@ -322,26 +319,24 @@ void xhci_ring_ep_doorbell(struct xhci_hcd *xhci, unsigned int ep_index, unsigned int stream_id) { - struct xhci_virt_ep *ep; - unsigned int ep_state; - u32 field; __u32 __iomem *db_addr = &xhci->dba->doorbell[slot_id]; + struct xhci_virt_ep *ep = &xhci->devs[slot_id]->eps[ep_index]; + unsigned int ep_state = ep->ep_state; - ep = &xhci->devs[slot_id]->eps[ep_index]; - ep_state = ep->ep_state; /* Don't ring the doorbell for this endpoint if there are pending - * cancellations because the we don't want to interrupt processing. + * cancellations because we don't want to interrupt processing. * We don't want to restart any stream rings if there's a set dequeue * pointer command pending because the device can choose to start any * stream once the endpoint is on the HW schedule. * FIXME - check all the stream rings for pending cancellations. */ - if (!(ep_state & EP_HALT_PENDING) && !(ep_state & SET_DEQ_PENDING) - && !(ep_state & EP_HALTED)) { - field = xhci_readl(xhci, db_addr) & DB_MASK; - field |= EPI_TO_DB(ep_index) | STREAM_ID_TO_DB(stream_id); - xhci_writel(xhci, field, db_addr); - } + if ((ep_state & EP_HALT_PENDING) || (ep_state & SET_DEQ_PENDING) || + (ep_state & EP_HALTED)) + return; + xhci_writel(xhci, DB_VALUE(ep_index, stream_id), db_addr); + /* The CPU has better things to do at this point than wait for a + * write-posting flush. It'll get there soon enough. + */ } /* Ring the doorbell for any rings with pending URBs */ @@ -1188,7 +1183,7 @@ static void handle_port_status(struct xhci_hcd *xhci, addr = &xhci->op_regs->port_status_base + NUM_PORT_REGS * (port_id - 1); temp = xhci_readl(xhci, addr); - if ((temp & PORT_CONNECT) && (hcd->state == HC_STATE_SUSPENDED)) { + if (hcd->state == HC_STATE_SUSPENDED) { xhci_dbg(xhci, "resume root hub\n"); usb_hcd_resume_root_hub(hcd); } @@ -1710,8 +1705,7 @@ static int process_bulk_intr_td(struct xhci_hcd *xhci, struct xhci_td *td, /* Others already handled above */ break; } - dev_dbg(&td->urb->dev->dev, - "ep %#x - asked for %d bytes, " + xhci_dbg(xhci, "ep %#x - asked for %d bytes, " "%d bytes untransferred\n", td->urb->ep->desc.bEndpointAddress, td->urb->transfer_buffer_length, @@ -2389,7 +2383,8 @@ static unsigned int count_sg_trbs_needed(struct xhci_hcd *xhci, struct urb *urb) } xhci_dbg(xhci, "\n"); if (!in_interrupt()) - dev_dbg(&urb->dev->dev, "ep %#x - urb len = %d, sglist used, num_trbs = %d\n", + xhci_dbg(xhci, "ep %#x - urb len = %d, sglist used, " + "num_trbs = %d\n", urb->ep->desc.bEndpointAddress, urb->transfer_buffer_length, num_trbs); @@ -2414,14 +2409,17 @@ static void check_trb_math(struct urb *urb, int num_trbs, int running_total) static void giveback_first_trb(struct xhci_hcd *xhci, int slot_id, unsigned int ep_index, unsigned int stream_id, int start_cycle, - struct xhci_generic_trb *start_trb, struct xhci_td *td) + struct xhci_generic_trb *start_trb) { /* * Pass all the TRBs to the hardware at once and make sure this write * isn't reordered. */ wmb(); - start_trb->field[3] |= start_cycle; + if (start_cycle) + start_trb->field[3] |= start_cycle; + else + start_trb->field[3] &= ~0x1; xhci_ring_ep_doorbell(xhci, slot_id, ep_index, stream_id); } @@ -2449,7 +2447,7 @@ int xhci_queue_intr_tx(struct xhci_hcd *xhci, gfp_t mem_flags, * to set the polling interval (once the API is added). */ if (xhci_interval != ep_interval) { - if (!printk_ratelimit()) + if (printk_ratelimit()) dev_dbg(&urb->dev->dev, "Driver uses different interval" " (%d microframe%s) than xHCI " "(%d microframe%s)\n", @@ -2551,9 +2549,11 @@ static int queue_bulk_sg_tx(struct xhci_hcd *xhci, gfp_t mem_flags, u32 remainder = 0; /* Don't change the cycle bit of the first TRB until later */ - if (first_trb) + if (first_trb) { first_trb = false; - else + if (start_cycle == 0) + field |= 0x1; + } else field |= ep_ring->cycle_state; /* Chain all the TRBs together; clear the chain bit in the last @@ -2625,7 +2625,7 @@ static int queue_bulk_sg_tx(struct xhci_hcd *xhci, gfp_t mem_flags, check_trb_math(urb, num_trbs, running_total); giveback_first_trb(xhci, slot_id, ep_index, urb->stream_id, - start_cycle, start_trb, td); + start_cycle, start_trb); return 0; } @@ -2671,7 +2671,8 @@ int xhci_queue_bulk_tx(struct xhci_hcd *xhci, gfp_t mem_flags, /* FIXME: this doesn't deal with URB_ZERO_PACKET - need one more */ if (!in_interrupt()) - dev_dbg(&urb->dev->dev, "ep %#x - urb len = %#x (%d), addr = %#llx, num_trbs = %d\n", + xhci_dbg(xhci, "ep %#x - urb len = %#x (%d), " + "addr = %#llx, num_trbs = %d\n", urb->ep->desc.bEndpointAddress, urb->transfer_buffer_length, urb->transfer_buffer_length, @@ -2711,9 +2712,11 @@ int xhci_queue_bulk_tx(struct xhci_hcd *xhci, gfp_t mem_flags, field = 0; /* Don't change the cycle bit of the first TRB until later */ - if (first_trb) + if (first_trb) { first_trb = false; - else + if (start_cycle == 0) + field |= 0x1; + } else field |= ep_ring->cycle_state; /* Chain all the TRBs together; clear the chain bit in the last @@ -2757,7 +2760,7 @@ int xhci_queue_bulk_tx(struct xhci_hcd *xhci, gfp_t mem_flags, check_trb_math(urb, num_trbs, running_total); giveback_first_trb(xhci, slot_id, ep_index, urb->stream_id, - start_cycle, start_trb, td); + start_cycle, start_trb); return 0; } @@ -2818,13 +2821,17 @@ int xhci_queue_ctrl_tx(struct xhci_hcd *xhci, gfp_t mem_flags, /* Queue setup TRB - see section 6.4.1.2.1 */ /* FIXME better way to translate setup_packet into two u32 fields? */ setup = (struct usb_ctrlrequest *) urb->setup_packet; + field = 0; + field |= TRB_IDT | TRB_TYPE(TRB_SETUP); + if (start_cycle == 0) + field |= 0x1; queue_trb(xhci, ep_ring, false, true, /* FIXME endianness is probably going to bite my ass here. */ setup->bRequestType | setup->bRequest << 8 | setup->wValue << 16, setup->wIndex | setup->wLength << 16, TRB_LEN(8) | TRB_INTR_TARGET(0), /* Immediate data in pointer */ - TRB_IDT | TRB_TYPE(TRB_SETUP)); + field); /* If there's data, queue data TRBs */ field = 0; @@ -2859,7 +2866,7 @@ int xhci_queue_ctrl_tx(struct xhci_hcd *xhci, gfp_t mem_flags, field | TRB_IOC | TRB_TYPE(TRB_STATUS) | ep_ring->cycle_state); giveback_first_trb(xhci, slot_id, ep_index, 0, - start_cycle, start_trb, td); + start_cycle, start_trb); return 0; } @@ -2900,6 +2907,7 @@ static int xhci_queue_isoc_tx(struct xhci_hcd *xhci, gfp_t mem_flags, int running_total, trb_buff_len, td_len, td_remain_len, ret; u64 start_addr, addr; int i, j; + bool more_trbs_coming; ep_ring = xhci->devs[slot_id]->eps[ep_index].ring; @@ -2910,7 +2918,7 @@ static int xhci_queue_isoc_tx(struct xhci_hcd *xhci, gfp_t mem_flags, } if (!in_interrupt()) - dev_dbg(&urb->dev->dev, "ep %#x - urb len = %#x (%d)," + xhci_dbg(xhci, "ep %#x - urb len = %#x (%d)," " addr = %#llx, num_tds = %d\n", urb->ep->desc.bEndpointAddress, urb->transfer_buffer_length, @@ -2950,7 +2958,10 @@ static int xhci_queue_isoc_tx(struct xhci_hcd *xhci, gfp_t mem_flags, field |= TRB_TYPE(TRB_ISOC); /* Assume URB_ISO_ASAP is set */ field |= TRB_SIA; - if (i > 0) + if (i == 0) { + if (start_cycle == 0) + field |= 0x1; + } else field |= ep_ring->cycle_state; first_trb = false; } else { @@ -2965,9 +2976,11 @@ static int xhci_queue_isoc_tx(struct xhci_hcd *xhci, gfp_t mem_flags, */ if (j < trbs_per_td - 1) { field |= TRB_CHAIN; + more_trbs_coming = true; } else { td->last_trb = ep_ring->enqueue; field |= TRB_IOC; + more_trbs_coming = false; } /* Calculate TRB length */ @@ -2980,7 +2993,7 @@ static int xhci_queue_isoc_tx(struct xhci_hcd *xhci, gfp_t mem_flags, length_field = TRB_LEN(trb_buff_len) | remainder | TRB_INTR_TARGET(0); - queue_trb(xhci, ep_ring, false, false, + queue_trb(xhci, ep_ring, false, more_trbs_coming, lower_32_bits(addr), upper_32_bits(addr), length_field, @@ -3003,10 +3016,8 @@ static int xhci_queue_isoc_tx(struct xhci_hcd *xhci, gfp_t mem_flags, } } - wmb(); - start_trb->field[3] |= start_cycle; - - xhci_ring_ep_doorbell(xhci, slot_id, ep_index, urb->stream_id); + giveback_first_trb(xhci, slot_id, ep_index, urb->stream_id, + start_cycle, start_trb); return 0; } @@ -3064,7 +3075,7 @@ int xhci_queue_isoc_tx_prepare(struct xhci_hcd *xhci, gfp_t mem_flags, * to set the polling interval (once the API is added). */ if (xhci_interval != ep_interval) { - if (!printk_ratelimit()) + if (printk_ratelimit()) dev_dbg(&urb->dev->dev, "Driver uses different interval" " (%d microframe%s) than xHCI " "(%d microframe%s)\n", diff --git a/drivers/usb/host/xhci.c b/drivers/usb/host/xhci.c index 45e4a3108cc3..34cf4e165877 100644 --- a/drivers/usb/host/xhci.c +++ b/drivers/usb/host/xhci.c @@ -226,7 +226,8 @@ static int xhci_setup_msi(struct xhci_hcd *xhci) static int xhci_setup_msix(struct xhci_hcd *xhci) { int i, ret = 0; - struct pci_dev *pdev = to_pci_dev(xhci_to_hcd(xhci)->self.controller); + struct usb_hcd *hcd = xhci_to_hcd(xhci); + struct pci_dev *pdev = to_pci_dev(hcd->self.controller); /* * calculate number of msi-x vectors supported. @@ -265,6 +266,7 @@ static int xhci_setup_msix(struct xhci_hcd *xhci) goto disable_msix; } + hcd->msix_enabled = 1; return ret; disable_msix: @@ -280,7 +282,8 @@ free_entries: /* Free any IRQs and disable MSI-X */ static void xhci_cleanup_msix(struct xhci_hcd *xhci) { - struct pci_dev *pdev = to_pci_dev(xhci_to_hcd(xhci)->self.controller); + struct usb_hcd *hcd = xhci_to_hcd(xhci); + struct pci_dev *pdev = to_pci_dev(hcd->self.controller); xhci_free_irq(xhci); @@ -292,6 +295,7 @@ static void xhci_cleanup_msix(struct xhci_hcd *xhci) pci_disable_msi(pdev); } + hcd->msix_enabled = 0; return; } @@ -508,9 +512,10 @@ void xhci_stop(struct usb_hcd *hcd) spin_lock_irq(&xhci->lock); xhci_halt(xhci); xhci_reset(xhci); - xhci_cleanup_msix(xhci); spin_unlock_irq(&xhci->lock); + xhci_cleanup_msix(xhci); + #ifdef CONFIG_USB_XHCI_HCD_DEBUGGING /* Tell the event ring poll function not to reschedule */ xhci->zombie = 1; @@ -544,9 +549,10 @@ void xhci_shutdown(struct usb_hcd *hcd) spin_lock_irq(&xhci->lock); xhci_halt(xhci); - xhci_cleanup_msix(xhci); spin_unlock_irq(&xhci->lock); + xhci_cleanup_msix(xhci); + xhci_dbg(xhci, "xhci_shutdown completed - status = %x\n", xhci_readl(xhci, &xhci->op_regs->status)); } @@ -647,6 +653,7 @@ int xhci_suspend(struct xhci_hcd *xhci) int rc = 0; struct usb_hcd *hcd = xhci_to_hcd(xhci); u32 command; + int i; spin_lock_irq(&xhci->lock); clear_bit(HCD_FLAG_HW_ACCESSIBLE, &hcd->flags); @@ -677,10 +684,15 @@ int xhci_suspend(struct xhci_hcd *xhci) spin_unlock_irq(&xhci->lock); return -ETIMEDOUT; } - /* step 5: remove core well power */ - xhci_cleanup_msix(xhci); spin_unlock_irq(&xhci->lock); + /* step 5: remove core well power */ + /* synchronize irq when using MSI-X */ + if (xhci->msix_entries) { + for (i = 0; i < xhci->msix_count; i++) + synchronize_irq(xhci->msix_entries[i].vector); + } + return rc; } @@ -694,7 +706,6 @@ int xhci_resume(struct xhci_hcd *xhci, bool hibernated) { u32 command, temp = 0; struct usb_hcd *hcd = xhci_to_hcd(xhci); - struct pci_dev *pdev = to_pci_dev(hcd->self.controller); int old_state, retval; old_state = hcd->state; @@ -729,9 +740,8 @@ int xhci_resume(struct xhci_hcd *xhci, bool hibernated) xhci_dbg(xhci, "Stop HCD\n"); xhci_halt(xhci); xhci_reset(xhci); - if (hibernated) - xhci_cleanup_msix(xhci); spin_unlock_irq(&xhci->lock); + xhci_cleanup_msix(xhci); #ifdef CONFIG_USB_XHCI_HCD_DEBUGGING /* Tell the event ring poll function not to reschedule */ @@ -765,30 +775,6 @@ int xhci_resume(struct xhci_hcd *xhci, bool hibernated) return retval; } - spin_unlock_irq(&xhci->lock); - /* Re-setup MSI-X */ - if (hcd->irq) - free_irq(hcd->irq, hcd); - hcd->irq = -1; - - retval = xhci_setup_msix(xhci); - if (retval) - /* fall back to msi*/ - retval = xhci_setup_msi(xhci); - - if (retval) { - /* fall back to legacy interrupt*/ - retval = request_irq(pdev->irq, &usb_hcd_irq, IRQF_SHARED, - hcd->irq_descr, hcd); - if (retval) { - xhci_err(xhci, "request interrupt %d failed\n", - pdev->irq); - return retval; - } - hcd->irq = pdev->irq; - } - - spin_lock_irq(&xhci->lock); /* step 4: set Run/Stop bit */ command = xhci_readl(xhci, &xhci->op_regs->command); command |= CMD_RUN; @@ -2445,8 +2431,12 @@ int xhci_alloc_dev(struct usb_hcd *hcd, struct usb_device *udev) xhci_err(xhci, "Error while assigning device slot ID\n"); return 0; } - /* xhci_alloc_virt_device() does not touch rings; no need to lock */ - if (!xhci_alloc_virt_device(xhci, xhci->slot_id, udev, GFP_KERNEL)) { + /* xhci_alloc_virt_device() does not touch rings; no need to lock. + * Use GFP_NOIO, since this function can be called from + * xhci_discover_or_reset_device(), which may be called as part of + * mass storage driver error handling. + */ + if (!xhci_alloc_virt_device(xhci, xhci->slot_id, udev, GFP_NOIO)) { /* Disable slot, if we can do it without mem alloc */ xhci_warn(xhci, "Could not allocate xHCI USB device data structures\n"); spin_lock_irqsave(&xhci->lock, flags); diff --git a/drivers/usb/host/xhci.h b/drivers/usb/host/xhci.h index 170c367112d2..7f236fd22015 100644 --- a/drivers/usb/host/xhci.h +++ b/drivers/usb/host/xhci.h @@ -436,22 +436,18 @@ struct xhci_run_regs { /** * struct doorbell_array * + * Bits 0 - 7: Endpoint target + * Bits 8 - 15: RsvdZ + * Bits 16 - 31: Stream ID + * * Section 5.6 */ struct xhci_doorbell_array { u32 doorbell[256]; }; -#define DB_TARGET_MASK 0xFFFFFF00 -#define DB_STREAM_ID_MASK 0x0000FFFF -#define DB_TARGET_HOST 0x0 -#define DB_STREAM_ID_HOST 0x0 -#define DB_MASK (0xff << 8) - -/* Endpoint Target - bits 0:7 */ -#define EPI_TO_DB(p) (((p) + 1) & 0xff) -#define STREAM_ID_TO_DB(p) (((p) & 0xffff) << 16) - +#define DB_VALUE(ep, stream) ((((ep) + 1) & 0xff) | ((stream) << 16)) +#define DB_VALUE_HOST 0x00000000 /** * struct xhci_protocol_caps diff --git a/include/linux/usb/hcd.h b/include/linux/usb/hcd.h index dd6ee49a0844..a854fe89484e 100644 --- a/include/linux/usb/hcd.h +++ b/include/linux/usb/hcd.h @@ -112,6 +112,7 @@ struct usb_hcd { /* Flags that get set only during HCD registration or removal. */ unsigned rh_registered:1;/* is root hub registered? */ unsigned rh_pollable:1; /* may we poll the root hub? */ + unsigned msix_enabled:1; /* driver has MSI-X enabled? */ /* The next flag is a stopgap, to be removed when all the HCDs * support the new root-hub polling mechanism. */ |