diff options
Diffstat (limited to 'drivers/usb/host')
33 files changed, 2557 insertions, 288 deletions
diff --git a/drivers/usb/host/Kconfig b/drivers/usb/host/Kconfig index 9b43b226817f..2678a1624fcc 100644 --- a/drivers/usb/host/Kconfig +++ b/drivers/usb/host/Kconfig @@ -90,14 +90,25 @@ config USB_EHCI_TT_NEWSCHED config USB_EHCI_BIG_ENDIAN_MMIO bool - depends on USB_EHCI_HCD && (PPC_CELLEB || PPC_PS3 || 440EPX || ARCH_IXP4XX) + depends on USB_EHCI_HCD && (PPC_CELLEB || PPC_PS3 || 440EPX || ARCH_IXP4XX || XPS_USB_HCD_XILINX) default y config USB_EHCI_BIG_ENDIAN_DESC bool - depends on USB_EHCI_HCD && (440EPX || ARCH_IXP4XX) + depends on USB_EHCI_HCD && (440EPX || ARCH_IXP4XX || XPS_USB_HCD_XILINX) default y +config XPS_USB_HCD_XILINX + bool "Use Xilinx usb host EHCI controller core" + depends on USB_EHCI_HCD && (PPC32 || MICROBLAZE) + select USB_EHCI_BIG_ENDIAN_DESC + select USB_EHCI_BIG_ENDIAN_MMIO + ---help--- + Xilinx xps USB host controller core is EHCI compilant and has + transaction translator built-in. It can be configured to either + support both high speed and full speed devices, or high speed + devices only. + config USB_EHCI_FSL bool "Support for Freescale on-chip EHCI USB controller" depends on USB_EHCI_HCD && FSL_SOC @@ -105,6 +116,13 @@ config USB_EHCI_FSL ---help--- Variation of ARC USB block used in some Freescale chips. +config USB_EHCI_MXC + bool "Support for Freescale on-chip EHCI USB controller" + depends on USB_EHCI_HCD && ARCH_MXC + select USB_EHCI_ROOT_HUB_TT + ---help--- + Variation of ARC USB block used in some Freescale chips. + config USB_EHCI_HCD_PPC_OF bool "EHCI support for PPC USB controller on OF platform bus" depends on USB_EHCI_HCD && PPC_OF diff --git a/drivers/usb/host/ehci-au1xxx.c b/drivers/usb/host/ehci-au1xxx.c index ed77be76d6bb..dbfb482a94e3 100644 --- a/drivers/usb/host/ehci-au1xxx.c +++ b/drivers/usb/host/ehci-au1xxx.c @@ -297,7 +297,7 @@ static int ehci_hcd_au1xxx_drv_resume(struct device *dev) return 0; } -static struct dev_pm_ops au1xxx_ehci_pmops = { +static const struct dev_pm_ops au1xxx_ehci_pmops = { .suspend = ehci_hcd_au1xxx_drv_suspend, .resume = ehci_hcd_au1xxx_drv_resume, }; diff --git a/drivers/usb/host/ehci-hcd.c b/drivers/usb/host/ehci-hcd.c index f5f5601701c9..1ec3857f22e6 100644 --- a/drivers/usb/host/ehci-hcd.c +++ b/drivers/usb/host/ehci-hcd.c @@ -210,7 +210,7 @@ static int handshake_on_error_set_halt(struct ehci_hcd *ehci, void __iomem *ptr, if (error) { ehci_halt(ehci); ehci_to_hcd(ehci)->state = HC_STATE_HALT; - ehci_err(ehci, "force halt; handhake %p %08x %08x -> %d\n", + ehci_err(ehci, "force halt; handshake %p %08x %08x -> %d\n", ptr, mask, done, error); } @@ -549,7 +549,7 @@ static int ehci_init(struct usb_hcd *hcd) /* controllers may cache some of the periodic schedule ... */ hcc_params = ehci_readl(ehci, &ehci->caps->hcc_params); if (HCC_ISOC_CACHE(hcc_params)) // full frame cache - ehci->i_thresh = 8; + ehci->i_thresh = 2 + 8; else // N microframes cached ehci->i_thresh = 2 + HCC_ISOC_THRES(hcc_params); @@ -605,6 +605,8 @@ static int ehci_init(struct usb_hcd *hcd) } ehci->command = temp; + /* Accept arbitrarily long scatter-gather lists */ + hcd->self.sg_tablesize = ~0; return 0; } @@ -785,9 +787,10 @@ static irqreturn_t ehci_irq (struct usb_hcd *hcd) /* start 20 msec resume signaling from this port, * and make khubd collect PORT_STAT_C_SUSPEND to - * stop that signaling. + * stop that signaling. Use 5 ms extra for safety, + * like usb_port_resume() does. */ - ehci->reset_done [i] = jiffies + msecs_to_jiffies (20); + ehci->reset_done[i] = jiffies + msecs_to_jiffies(25); ehci_dbg (ehci, "port %d remote wakeup\n", i + 1); mod_timer(&hcd->rh_timer, ehci->reset_done[i]); } @@ -1105,11 +1108,21 @@ MODULE_LICENSE ("GPL"); #define PLATFORM_DRIVER ehci_fsl_driver #endif +#ifdef CONFIG_USB_EHCI_MXC +#include "ehci-mxc.c" +#define PLATFORM_DRIVER ehci_mxc_driver +#endif + #ifdef CONFIG_SOC_AU1200 #include "ehci-au1xxx.c" #define PLATFORM_DRIVER ehci_hcd_au1xxx_driver #endif +#ifdef CONFIG_ARCH_OMAP34XX +#include "ehci-omap.c" +#define PLATFORM_DRIVER ehci_hcd_omap_driver +#endif + #ifdef CONFIG_PPC_PS3 #include "ehci-ps3.c" #define PS3_SYSTEM_BUS_DRIVER ps3_ehci_driver @@ -1120,6 +1133,11 @@ MODULE_LICENSE ("GPL"); #define OF_PLATFORM_DRIVER ehci_hcd_ppc_of_driver #endif +#ifdef CONFIG_XPS_USB_HCD_XILINX +#include "ehci-xilinx-of.c" +#define OF_PLATFORM_DRIVER ehci_hcd_xilinx_of_driver +#endif + #ifdef CONFIG_PLAT_ORION #include "ehci-orion.c" #define PLATFORM_DRIVER ehci_orion_driver diff --git a/drivers/usb/host/ehci-hub.c b/drivers/usb/host/ehci-hub.c index 1b6f1c0e5cee..c75d9270c752 100644 --- a/drivers/usb/host/ehci-hub.c +++ b/drivers/usb/host/ehci-hub.c @@ -120,9 +120,26 @@ static int ehci_bus_suspend (struct usb_hcd *hcd) del_timer_sync(&ehci->watchdog); del_timer_sync(&ehci->iaa_watchdog); - port = HCS_N_PORTS (ehci->hcs_params); spin_lock_irq (&ehci->lock); + /* Once the controller is stopped, port resumes that are already + * in progress won't complete. Hence if remote wakeup is enabled + * for the root hub and any ports are in the middle of a resume or + * remote wakeup, we must fail the suspend. + */ + if (hcd->self.root_hub->do_remote_wakeup) { + port = HCS_N_PORTS(ehci->hcs_params); + while (port--) { + if (ehci->reset_done[port] != 0) { + spin_unlock_irq(&ehci->lock); + ehci_dbg(ehci, "suspend failed because " + "port %d is resuming\n", + port + 1); + return -EBUSY; + } + } + } + /* stop schedules, clean any completed work */ if (HC_IS_RUNNING(hcd->state)) { ehci_quiesce (ehci); @@ -138,6 +155,7 @@ static int ehci_bus_suspend (struct usb_hcd *hcd) */ ehci->bus_suspended = 0; ehci->owned_ports = 0; + port = HCS_N_PORTS(ehci->hcs_params); while (port--) { u32 __iomem *reg = &ehci->regs->port_status [port]; u32 t1 = ehci_readl(ehci, reg) & ~PORT_RWC_BITS; @@ -236,7 +254,7 @@ static int ehci_bus_resume (struct usb_hcd *hcd) } if (unlikely(ehci->debug)) { - if (ehci->debug && !dbgp_reset_prep()) + if (!dbgp_reset_prep()) ehci->debug = NULL; else dbgp_external_startup(); diff --git a/drivers/usb/host/ehci-mxc.c b/drivers/usb/host/ehci-mxc.c new file mode 100644 index 000000000000..35c56f40bdbb --- /dev/null +++ b/drivers/usb/host/ehci-mxc.c @@ -0,0 +1,296 @@ +/* + * Copyright (c) 2008 Sascha Hauer <s.hauer@pengutronix.de>, Pengutronix + * Copyright (c) 2009 Daniel Mack <daniel@caiaq.de> + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or (at your + * option) any later version. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY + * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software Foundation, + * Inc., 675 Mass Ave, Cambridge, MA 02139, USA. + */ + +#include <linux/platform_device.h> +#include <linux/clk.h> +#include <linux/delay.h> +#include <linux/usb/otg.h> + +#include <mach/mxc_ehci.h> + +#define ULPI_VIEWPORT_OFFSET 0x170 +#define PORTSC_OFFSET 0x184 +#define USBMODE_OFFSET 0x1a8 +#define USBMODE_CM_HOST 3 + +struct ehci_mxc_priv { + struct clk *usbclk, *ahbclk; + struct usb_hcd *hcd; +}; + +/* called during probe() after chip reset completes */ +static int ehci_mxc_setup(struct usb_hcd *hcd) +{ + struct ehci_hcd *ehci = hcd_to_ehci(hcd); + int retval; + + /* EHCI registers start at offset 0x100 */ + ehci->caps = hcd->regs + 0x100; + ehci->regs = hcd->regs + 0x100 + + HC_LENGTH(ehci_readl(ehci, &ehci->caps->hc_capbase)); + dbg_hcs_params(ehci, "reset"); + dbg_hcc_params(ehci, "reset"); + + /* cache this readonly data; minimize chip reads */ + ehci->hcs_params = ehci_readl(ehci, &ehci->caps->hcs_params); + + retval = ehci_halt(ehci); + if (retval) + return retval; + + /* data structure init */ + retval = ehci_init(hcd); + if (retval) + return retval; + + hcd->has_tt = 1; + + ehci->sbrn = 0x20; + + ehci_reset(ehci); + + ehci_port_power(ehci, 0); + return 0; +} + +static const struct hc_driver ehci_mxc_hc_driver = { + .description = hcd_name, + .product_desc = "Freescale On-Chip EHCI Host Controller", + .hcd_priv_size = sizeof(struct ehci_hcd), + + /* + * generic hardware linkage + */ + .irq = ehci_irq, + .flags = HCD_USB2 | HCD_MEMORY, + + /* + * basic lifecycle operations + */ + .reset = ehci_mxc_setup, + .start = ehci_run, + .stop = ehci_stop, + .shutdown = ehci_shutdown, + + /* + * managing i/o requests and associated device resources + */ + .urb_enqueue = ehci_urb_enqueue, + .urb_dequeue = ehci_urb_dequeue, + .endpoint_disable = ehci_endpoint_disable, + + /* + * scheduling support + */ + .get_frame_number = ehci_get_frame, + + /* + * root hub support + */ + .hub_status_data = ehci_hub_status_data, + .hub_control = ehci_hub_control, + .bus_suspend = ehci_bus_suspend, + .bus_resume = ehci_bus_resume, + .relinquish_port = ehci_relinquish_port, + .port_handed_over = ehci_port_handed_over, +}; + +static int ehci_mxc_drv_probe(struct platform_device *pdev) +{ + struct mxc_usbh_platform_data *pdata = pdev->dev.platform_data; + struct usb_hcd *hcd; + struct resource *res; + int irq, ret, temp; + struct ehci_mxc_priv *priv; + struct device *dev = &pdev->dev; + + dev_info(&pdev->dev, "initializing i.MX USB Controller\n"); + + if (!pdata) { + dev_err(dev, "No platform data given, bailing out.\n"); + return -EINVAL; + } + + irq = platform_get_irq(pdev, 0); + + hcd = usb_create_hcd(&ehci_mxc_hc_driver, dev, dev_name(dev)); + if (!hcd) + return -ENOMEM; + + priv = kzalloc(sizeof(*priv), GFP_KERNEL); + if (!priv) { + ret = -ENOMEM; + goto err_alloc; + } + + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + if (!res) { + dev_err(dev, "Found HC with no register addr. Check setup!\n"); + ret = -ENODEV; + goto err_get_resource; + } + + hcd->rsrc_start = res->start; + hcd->rsrc_len = resource_size(res); + + if (!request_mem_region(hcd->rsrc_start, hcd->rsrc_len, hcd_name)) { + dev_dbg(dev, "controller already in use\n"); + ret = -EBUSY; + goto err_request_mem; + } + + hcd->regs = ioremap(hcd->rsrc_start, hcd->rsrc_len); + if (!hcd->regs) { + dev_err(dev, "error mapping memory\n"); + ret = -EFAULT; + goto err_ioremap; + } + + /* enable clocks */ + priv->usbclk = clk_get(dev, "usb"); + if (IS_ERR(priv->usbclk)) { + ret = PTR_ERR(priv->usbclk); + goto err_clk; + } + clk_enable(priv->usbclk); + + if (!cpu_is_mx35()) { + priv->ahbclk = clk_get(dev, "usb_ahb"); + if (IS_ERR(priv->ahbclk)) { + ret = PTR_ERR(priv->ahbclk); + goto err_clk_ahb; + } + clk_enable(priv->ahbclk); + } + + /* set USBMODE to host mode */ + temp = readl(hcd->regs + USBMODE_OFFSET); + writel(temp | USBMODE_CM_HOST, hcd->regs + USBMODE_OFFSET); + + /* set up the PORTSCx register */ + writel(pdata->portsc, hcd->regs + PORTSC_OFFSET); + mdelay(10); + + /* setup USBCONTROL. */ + ret = mxc_set_usbcontrol(pdev->id, pdata->flags); + if (ret < 0) + goto err_init; + + /* call platform specific init function */ + if (pdata->init) { + ret = pdata->init(pdev); + if (ret) { + dev_err(dev, "platform init failed\n"); + goto err_init; + } + } + + /* most platforms need some time to settle changed IO settings */ + mdelay(10); + + /* Initialize the transceiver */ + if (pdata->otg) { + pdata->otg->io_priv = hcd->regs + ULPI_VIEWPORT_OFFSET; + if (otg_init(pdata->otg) != 0) + dev_err(dev, "unable to init transceiver\n"); + else if (otg_set_vbus(pdata->otg, 1) != 0) + dev_err(dev, "unable to enable vbus on transceiver\n"); + } + + priv->hcd = hcd; + platform_set_drvdata(pdev, priv); + + ret = usb_add_hcd(hcd, irq, IRQF_DISABLED | IRQF_SHARED); + if (ret) + goto err_add; + + return 0; + +err_add: + if (pdata && pdata->exit) + pdata->exit(pdev); +err_init: + if (priv->ahbclk) { + clk_disable(priv->ahbclk); + clk_put(priv->ahbclk); + } +err_clk_ahb: + clk_disable(priv->usbclk); + clk_put(priv->usbclk); +err_clk: + iounmap(hcd->regs); +err_ioremap: + release_mem_region(hcd->rsrc_start, hcd->rsrc_len); +err_request_mem: +err_get_resource: + kfree(priv); +err_alloc: + usb_put_hcd(hcd); + return ret; +} + +static int __exit ehci_mxc_drv_remove(struct platform_device *pdev) +{ + struct mxc_usbh_platform_data *pdata = pdev->dev.platform_data; + struct ehci_mxc_priv *priv = platform_get_drvdata(pdev); + struct usb_hcd *hcd = priv->hcd; + + if (pdata && pdata->exit) + pdata->exit(pdev); + + if (pdata->otg) + otg_shutdown(pdata->otg); + + usb_remove_hcd(hcd); + iounmap(hcd->regs); + release_mem_region(hcd->rsrc_start, hcd->rsrc_len); + usb_put_hcd(hcd); + platform_set_drvdata(pdev, NULL); + + clk_disable(priv->usbclk); + clk_put(priv->usbclk); + if (priv->ahbclk) { + clk_disable(priv->ahbclk); + clk_put(priv->ahbclk); + } + + kfree(priv); + + return 0; +} + +static void ehci_mxc_drv_shutdown(struct platform_device *pdev) +{ + struct ehci_mxc_priv *priv = platform_get_drvdata(pdev); + struct usb_hcd *hcd = priv->hcd; + + if (hcd->driver->shutdown) + hcd->driver->shutdown(hcd); +} + +MODULE_ALIAS("platform:mxc-ehci"); + +static struct platform_driver ehci_mxc_driver = { + .probe = ehci_mxc_drv_probe, + .remove = __exit_p(ehci_mxc_drv_remove), + .shutdown = ehci_mxc_drv_shutdown, + .driver = { + .name = "mxc-ehci", + }, +}; diff --git a/drivers/usb/host/ehci-omap.c b/drivers/usb/host/ehci-omap.c new file mode 100644 index 000000000000..74d07f4e8b7d --- /dev/null +++ b/drivers/usb/host/ehci-omap.c @@ -0,0 +1,756 @@ +/* + * ehci-omap.c - driver for USBHOST on OMAP 34xx processor + * + * Bus Glue for OMAP34xx USBHOST 3 port EHCI controller + * Tested on OMAP3430 ES2.0 SDP + * + * Copyright (C) 2007-2008 Texas Instruments, Inc. + * Author: Vikram Pandita <vikram.pandita@ti.com> + * + * Copyright (C) 2009 Nokia Corporation + * Contact: Felipe Balbi <felipe.balbi@nokia.com> + * + * Based on "ehci-fsl.c" and "ehci-au1xxx.c" ehci glue layers + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + * + * TODO (last updated Feb 23rd, 2009): + * - add kernel-doc + * - enable AUTOIDLE + * - move DPLL5 programming to clock fw + * - add suspend/resume + * - move workarounds to board-files + */ + +#include <linux/platform_device.h> +#include <linux/clk.h> +#include <linux/gpio.h> +#include <plat/usb.h> + +/* + * OMAP USBHOST Register addresses: VIRTUAL ADDRESSES + * Use ehci_omap_readl()/ehci_omap_writel() functions + */ + +/* TLL Register Set */ +#define OMAP_USBTLL_REVISION (0x00) +#define OMAP_USBTLL_SYSCONFIG (0x10) +#define OMAP_USBTLL_SYSCONFIG_CACTIVITY (1 << 8) +#define OMAP_USBTLL_SYSCONFIG_SIDLEMODE (1 << 3) +#define OMAP_USBTLL_SYSCONFIG_ENAWAKEUP (1 << 2) +#define OMAP_USBTLL_SYSCONFIG_SOFTRESET (1 << 1) +#define OMAP_USBTLL_SYSCONFIG_AUTOIDLE (1 << 0) + +#define OMAP_USBTLL_SYSSTATUS (0x14) +#define OMAP_USBTLL_SYSSTATUS_RESETDONE (1 << 0) + +#define OMAP_USBTLL_IRQSTATUS (0x18) +#define OMAP_USBTLL_IRQENABLE (0x1C) + +#define OMAP_TLL_SHARED_CONF (0x30) +#define OMAP_TLL_SHARED_CONF_USB_90D_DDR_EN (1 << 6) +#define OMAP_TLL_SHARED_CONF_USB_180D_SDR_EN (1 << 5) +#define OMAP_TLL_SHARED_CONF_USB_DIVRATION (1 << 2) +#define OMAP_TLL_SHARED_CONF_FCLK_REQ (1 << 1) +#define OMAP_TLL_SHARED_CONF_FCLK_IS_ON (1 << 0) + +#define OMAP_TLL_CHANNEL_CONF(num) (0x040 + 0x004 * num) +#define OMAP_TLL_CHANNEL_CONF_ULPINOBITSTUFF (1 << 11) +#define OMAP_TLL_CHANNEL_CONF_ULPI_ULPIAUTOIDLE (1 << 10) +#define OMAP_TLL_CHANNEL_CONF_UTMIAUTOIDLE (1 << 9) +#define OMAP_TLL_CHANNEL_CONF_ULPIDDRMODE (1 << 8) +#define OMAP_TLL_CHANNEL_CONF_CHANEN (1 << 0) + +#define OMAP_TLL_ULPI_FUNCTION_CTRL(num) (0x804 + 0x100 * num) +#define OMAP_TLL_ULPI_INTERFACE_CTRL(num) (0x807 + 0x100 * num) +#define OMAP_TLL_ULPI_OTG_CTRL(num) (0x80A + 0x100 * num) +#define OMAP_TLL_ULPI_INT_EN_RISE(num) (0x80D + 0x100 * num) +#define OMAP_TLL_ULPI_INT_EN_FALL(num) (0x810 + 0x100 * num) +#define OMAP_TLL_ULPI_INT_STATUS(num) (0x813 + 0x100 * num) +#define OMAP_TLL_ULPI_INT_LATCH(num) (0x814 + 0x100 * num) +#define OMAP_TLL_ULPI_DEBUG(num) (0x815 + 0x100 * num) +#define OMAP_TLL_ULPI_SCRATCH_REGISTER(num) (0x816 + 0x100 * num) + +#define OMAP_TLL_CHANNEL_COUNT 3 +#define OMAP_TLL_CHANNEL_1_EN_MASK (1 << 1) +#define OMAP_TLL_CHANNEL_2_EN_MASK (1 << 2) +#define OMAP_TLL_CHANNEL_3_EN_MASK (1 << 4) + +/* UHH Register Set */ +#define OMAP_UHH_REVISION (0x00) +#define OMAP_UHH_SYSCONFIG (0x10) +#define OMAP_UHH_SYSCONFIG_MIDLEMODE (1 << 12) +#define OMAP_UHH_SYSCONFIG_CACTIVITY (1 << 8) +#define OMAP_UHH_SYSCONFIG_SIDLEMODE (1 << 3) +#define OMAP_UHH_SYSCONFIG_ENAWAKEUP (1 << 2) +#define OMAP_UHH_SYSCONFIG_SOFTRESET (1 << 1) +#define OMAP_UHH_SYSCONFIG_AUTOIDLE (1 << 0) + +#define OMAP_UHH_SYSSTATUS (0x14) +#define OMAP_UHH_HOSTCONFIG (0x40) +#define OMAP_UHH_HOSTCONFIG_ULPI_BYPASS (1 << 0) +#define OMAP_UHH_HOSTCONFIG_ULPI_P1_BYPASS (1 << 0) +#define OMAP_UHH_HOSTCONFIG_ULPI_P2_BYPASS (1 << 11) +#define OMAP_UHH_HOSTCONFIG_ULPI_P3_BYPASS (1 << 12) +#define OMAP_UHH_HOSTCONFIG_INCR4_BURST_EN (1 << 2) +#define OMAP_UHH_HOSTCONFIG_INCR8_BURST_EN (1 << 3) +#define OMAP_UHH_HOSTCONFIG_INCR16_BURST_EN (1 << 4) +#define OMAP_UHH_HOSTCONFIG_INCRX_ALIGN_EN (1 << 5) +#define OMAP_UHH_HOSTCONFIG_P1_CONNECT_STATUS (1 << 8) +#define OMAP_UHH_HOSTCONFIG_P2_CONNECT_STATUS (1 << 9) +#define OMAP_UHH_HOSTCONFIG_P3_CONNECT_STATUS (1 << 10) + +#define OMAP_UHH_DEBUG_CSR (0x44) + +/* EHCI Register Set */ +#define EHCI_INSNREG05_ULPI (0xA4) +#define EHCI_INSNREG05_ULPI_CONTROL_SHIFT 31 +#define EHCI_INSNREG05_ULPI_PORTSEL_SHIFT 24 +#define EHCI_INSNREG05_ULPI_OPSEL_SHIFT 22 +#define EHCI_INSNREG05_ULPI_REGADD_SHIFT 16 +#define EHCI_INSNREG05_ULPI_EXTREGADD_SHIFT 8 +#define EHCI_INSNREG05_ULPI_WRDATA_SHIFT 0 + +/*-------------------------------------------------------------------------*/ + +static inline void ehci_omap_writel(void __iomem *base, u32 reg, u32 val) +{ + __raw_writel(val, base + reg); +} + +static inline u32 ehci_omap_readl(void __iomem *base, u32 reg) +{ + return __raw_readl(base + reg); +} + +static inline void ehci_omap_writeb(void __iomem *base, u8 reg, u8 val) +{ + __raw_writeb(val, base + reg); +} + +static inline u8 ehci_omap_readb(void __iomem *base, u8 reg) +{ + return __raw_readb(base + reg); +} + +/*-------------------------------------------------------------------------*/ + +struct ehci_hcd_omap { + struct ehci_hcd *ehci; + struct device *dev; + + struct clk *usbhost_ick; + struct clk *usbhost2_120m_fck; + struct clk *usbhost1_48m_fck; + struct clk *usbtll_fck; + struct clk *usbtll_ick; + + /* FIXME the following two workarounds are + * board specific not silicon-specific so these + * should be moved to board-file instead. + * + * Maybe someone from TI will know better which + * board is affected and needs the workarounds + * to be applied + */ + + /* gpio for resetting phy */ + int reset_gpio_port[OMAP3_HS_USB_PORTS]; + + /* phy reset workaround */ + int phy_reset; + + /* desired phy_mode: TLL, PHY */ + enum ehci_hcd_omap_mode port_mode[OMAP3_HS_USB_PORTS]; + + void __iomem *uhh_base; + void __iomem *tll_base; + void __iomem *ehci_base; +}; + +/*-------------------------------------------------------------------------*/ + +static void omap_usb_utmi_init(struct ehci_hcd_omap *omap, u8 tll_channel_mask) +{ + unsigned reg; + int i; + + /* Program the 3 TLL channels upfront */ + for (i = 0; i < OMAP_TLL_CHANNEL_COUNT; i++) { + reg = ehci_omap_readl(omap->tll_base, OMAP_TLL_CHANNEL_CONF(i)); + + /* Disable AutoIdle, BitStuffing and use SDR Mode */ + reg &= ~(OMAP_TLL_CHANNEL_CONF_UTMIAUTOIDLE + | OMAP_TLL_CHANNEL_CONF_ULPINOBITSTUFF + | OMAP_TLL_CHANNEL_CONF_ULPIDDRMODE); + ehci_omap_writel(omap->tll_base, OMAP_TLL_CHANNEL_CONF(i), reg); + } + + /* Program Common TLL register */ + reg = ehci_omap_readl(omap->tll_base, OMAP_TLL_SHARED_CONF); + reg |= (OMAP_TLL_SHARED_CONF_FCLK_IS_ON + | OMAP_TLL_SHARED_CONF_USB_DIVRATION + | OMAP_TLL_SHARED_CONF_USB_180D_SDR_EN); + reg &= ~OMAP_TLL_SHARED_CONF_USB_90D_DDR_EN; + + ehci_omap_writel(omap->tll_base, OMAP_TLL_SHARED_CONF, reg); + + /* Enable channels now */ + for (i = 0; i < OMAP_TLL_CHANNEL_COUNT; i++) { + reg = ehci_omap_readl(omap->tll_base, OMAP_TLL_CHANNEL_CONF(i)); + + /* Enable only the reg that is needed */ + if (!(tll_channel_mask & 1<<i)) + continue; + + reg |= OMAP_TLL_CHANNEL_CONF_CHANEN; + ehci_omap_writel(omap->tll_base, OMAP_TLL_CHANNEL_CONF(i), reg); + + ehci_omap_writeb(omap->tll_base, + OMAP_TLL_ULPI_SCRATCH_REGISTER(i), 0xbe); + dev_dbg(omap->dev, "ULPI_SCRATCH_REG[ch=%d]= 0x%02x\n", + i+1, ehci_omap_readb(omap->tll_base, + OMAP_TLL_ULPI_SCRATCH_REGISTER(i))); + } +} + +/*-------------------------------------------------------------------------*/ + +/* omap_start_ehc + * - Start the TI USBHOST controller + */ +static int omap_start_ehc(struct ehci_hcd_omap *omap, struct usb_hcd *hcd) +{ + unsigned long timeout = jiffies + msecs_to_jiffies(1000); + u8 tll_ch_mask = 0; + unsigned reg = 0; + int ret = 0; + + dev_dbg(omap->dev, "starting TI EHCI USB Controller\n"); + + /* Enable Clocks for USBHOST */ + omap->usbhost_ick = clk_get(omap->dev, "usbhost_ick"); + if (IS_ERR(omap->usbhost_ick)) { + ret = PTR_ERR(omap->usbhost_ick); + goto err_host_ick; + } + clk_enable(omap->usbhost_ick); + + omap->usbhost2_120m_fck = clk_get(omap->dev, "usbhost_120m_fck"); + if (IS_ERR(omap->usbhost2_120m_fck)) { + ret = PTR_ERR(omap->usbhost2_120m_fck); + goto err_host_120m_fck; + } + clk_enable(omap->usbhost2_120m_fck); + + omap->usbhost1_48m_fck = clk_get(omap->dev, "usbhost_48m_fck"); + if (IS_ERR(omap->usbhost1_48m_fck)) { + ret = PTR_ERR(omap->usbhost1_48m_fck); + goto err_host_48m_fck; + } + clk_enable(omap->usbhost1_48m_fck); + + if (omap->phy_reset) { + /* Refer: ISSUE1 */ + if (gpio_is_valid(omap->reset_gpio_port[0])) { + gpio_request(omap->reset_gpio_port[0], + "USB1 PHY reset"); + gpio_direction_output(omap->reset_gpio_port[0], 0); + } + + if (gpio_is_valid(omap->reset_gpio_port[1])) { + gpio_request(omap->reset_gpio_port[1], + "USB2 PHY reset"); + gpio_direction_output(omap->reset_gpio_port[1], 0); + } + + /* Hold the PHY in RESET for enough time till DIR is high */ + udelay(10); + } + + /* Configure TLL for 60Mhz clk for ULPI */ + omap->usbtll_fck = clk_get(omap->dev, "usbtll_fck"); + if (IS_ERR(omap->usbtll_fck)) { + ret = PTR_ERR(omap->usbtll_fck); + goto err_tll_fck; + } + clk_enable(omap->usbtll_fck); + + omap->usbtll_ick = clk_get(omap->dev, "usbtll_ick"); + if (IS_ERR(omap->usbtll_ick)) { + ret = PTR_ERR(omap->usbtll_ick); + goto err_tll_ick; + } + clk_enable(omap->usbtll_ick); + + /* perform TLL soft reset, and wait until reset is complete */ + ehci_omap_writel(omap->tll_base, OMAP_USBTLL_SYSCONFIG, + OMAP_USBTLL_SYSCONFIG_SOFTRESET); + + /* Wait for TLL reset to complete */ + while (!(ehci_omap_readl(omap->tll_base, OMAP_USBTLL_SYSSTATUS) + & OMAP_USBTLL_SYSSTATUS_RESETDONE)) { + cpu_relax(); + + if (time_after(jiffies, timeout)) { + dev_dbg(omap->dev, "operation timed out\n"); + ret = -EINVAL; + goto err_sys_status; + } + } + + dev_dbg(omap->dev, "TLL RESET DONE\n"); + + /* (1<<3) = no idle mode only for initial debugging */ + ehci_omap_writel(omap->tll_base, OMAP_USBTLL_SYSCONFIG, + OMAP_USBTLL_SYSCONFIG_ENAWAKEUP | + OMAP_USBTLL_SYSCONFIG_SIDLEMODE | + OMAP_USBTLL_SYSCONFIG_CACTIVITY); + + + /* Put UHH in NoIdle/NoStandby mode */ + reg = ehci_omap_readl(omap->uhh_base, OMAP_UHH_SYSCONFIG); + reg |= (OMAP_UHH_SYSCONFIG_ENAWAKEUP + | OMAP_UHH_SYSCONFIG_SIDLEMODE + | OMAP_UHH_SYSCONFIG_CACTIVITY + | OMAP_UHH_SYSCONFIG_MIDLEMODE); + reg &= ~OMAP_UHH_SYSCONFIG_AUTOIDLE; + + ehci_omap_writel(omap->uhh_base, OMAP_UHH_SYSCONFIG, reg); + + reg = ehci_omap_readl(omap->uhh_base, OMAP_UHH_HOSTCONFIG); + + /* setup ULPI bypass and burst configurations */ + reg |= (OMAP_UHH_HOSTCONFIG_INCR4_BURST_EN + | OMAP_UHH_HOSTCONFIG_INCR8_BURST_EN + | OMAP_UHH_HOSTCONFIG_INCR16_BURST_EN); + reg &= ~OMAP_UHH_HOSTCONFIG_INCRX_ALIGN_EN; + + if (omap->port_mode[0] == EHCI_HCD_OMAP_MODE_UNKNOWN) + reg &= ~OMAP_UHH_HOSTCONFIG_P1_CONNECT_STATUS; + if (omap->port_mode[1] == EHCI_HCD_OMAP_MODE_UNKNOWN) + reg &= ~OMAP_UHH_HOSTCONFIG_P2_CONNECT_STATUS; + if (omap->port_mode[2] == EHCI_HCD_OMAP_MODE_UNKNOWN) + reg &= ~OMAP_UHH_HOSTCONFIG_P3_CONNECT_STATUS; + + /* Bypass the TLL module for PHY mode operation */ + if (omap_rev() <= OMAP3430_REV_ES2_1) { + dev_dbg(omap->dev, "OMAP3 ES version <= ES2.1 \n"); + if ((omap->port_mode[0] == EHCI_HCD_OMAP_MODE_PHY) || + (omap->port_mode[1] == EHCI_HCD_OMAP_MODE_PHY) || + (omap->port_mode[2] == EHCI_HCD_OMAP_MODE_PHY)) + reg &= ~OMAP_UHH_HOSTCONFIG_ULPI_BYPASS; + else + reg |= OMAP_UHH_HOSTCONFIG_ULPI_BYPASS; + } else { + dev_dbg(omap->dev, "OMAP3 ES version > ES2.1\n"); + if (omap->port_mode[0] == EHCI_HCD_OMAP_MODE_PHY) + reg &= ~OMAP_UHH_HOSTCONFIG_ULPI_P1_BYPASS; + else if (omap->port_mode[0] == EHCI_HCD_OMAP_MODE_TLL) + reg |= OMAP_UHH_HOSTCONFIG_ULPI_P1_BYPASS; + + if (omap->port_mode[1] == EHCI_HCD_OMAP_MODE_PHY) + reg &= ~OMAP_UHH_HOSTCONFIG_ULPI_P2_BYPASS; + else if (omap->port_mode[1] == EHCI_HCD_OMAP_MODE_TLL) + reg |= OMAP_UHH_HOSTCONFIG_ULPI_P2_BYPASS; + + if (omap->port_mode[2] == EHCI_HCD_OMAP_MODE_PHY) + reg &= ~OMAP_UHH_HOSTCONFIG_ULPI_P3_BYPASS; + else if (omap->port_mode[2] == EHCI_HCD_OMAP_MODE_TLL) + reg |= OMAP_UHH_HOSTCONFIG_ULPI_P3_BYPASS; + + } + ehci_omap_writel(omap->uhh_base, OMAP_UHH_HOSTCONFIG, reg); + dev_dbg(omap->dev, "UHH setup done, uhh_hostconfig=%x\n", reg); + + + if ((omap->port_mode[0] == EHCI_HCD_OMAP_MODE_TLL) || + (omap->port_mode[1] == EHCI_HCD_OMAP_MODE_TLL) || + (omap->port_mode[2] == EHCI_HCD_OMAP_MODE_TLL)) { + + if (omap->port_mode[0] == EHCI_HCD_OMAP_MODE_TLL) + tll_ch_mask |= OMAP_TLL_CHANNEL_1_EN_MASK; + if (omap->port_mode[1] == EHCI_HCD_OMAP_MODE_TLL) + tll_ch_mask |= OMAP_TLL_CHANNEL_2_EN_MASK; + if (omap->port_mode[2] == EHCI_HCD_OMAP_MODE_TLL) + tll_ch_mask |= OMAP_TLL_CHANNEL_3_EN_MASK; + + /* Enable UTMI mode for required TLL channels */ + omap_usb_utmi_init(omap, tll_ch_mask); + } + + if (omap->phy_reset) { + /* Refer ISSUE1: + * Hold the PHY in RESET for enough time till + * PHY is settled and ready + */ + udelay(10); + + if (gpio_is_valid(omap->reset_gpio_port[0])) + gpio_set_value(omap->reset_gpio_port[0], 1); + + if (gpio_is_valid(omap->reset_gpio_port[1])) + gpio_set_value(omap->reset_gpio_port[1], 1); + } + + return 0; + +err_sys_status: + clk_disable(omap->usbtll_ick); + clk_put(omap->usbtll_ick); + +err_tll_ick: + clk_disable(omap->usbtll_fck); + clk_put(omap->usbtll_fck); + +err_tll_fck: + clk_disable(omap->usbhost1_48m_fck); + clk_put(omap->usbhost1_48m_fck); + + if (omap->phy_reset) { + if (gpio_is_valid(omap->reset_gpio_port[0])) + gpio_free(omap->reset_gpio_port[0]); + + if (gpio_is_valid(omap->reset_gpio_port[1])) + gpio_free(omap->reset_gpio_port[1]); + } + +err_host_48m_fck: + clk_disable(omap->usbhost2_120m_fck); + clk_put(omap->usbhost2_120m_fck); + +err_host_120m_fck: + clk_disable(omap->usbhost_ick); + clk_put(omap->usbhost_ick); + +err_host_ick: + return ret; +} + +static void omap_stop_ehc(struct ehci_hcd_omap *omap, struct usb_hcd *hcd) +{ + unsigned long timeout = jiffies + msecs_to_jiffies(100); + + dev_dbg(omap->dev, "stopping TI EHCI USB Controller\n"); + + /* Reset OMAP modules for insmod/rmmod to work */ + ehci_omap_writel(omap->uhh_base, OMAP_UHH_SYSCONFIG, + OMAP_UHH_SYSCONFIG_SOFTRESET); + while (!(ehci_omap_readl(omap->uhh_base, OMAP_UHH_SYSSTATUS) + & (1 << 0))) { + cpu_relax(); + + if (time_after(jiffies, timeout)) + dev_dbg(omap->dev, "operation timed out\n"); + } + + while (!(ehci_omap_readl(omap->uhh_base, OMAP_UHH_SYSSTATUS) + & (1 << 1))) { + cpu_relax(); + + if (time_after(jiffies, timeout)) + dev_dbg(omap->dev, "operation timed out\n"); + } + + while (!(ehci_omap_readl(omap->uhh_base, OMAP_UHH_SYSSTATUS) + & (1 << 2))) { + cpu_relax(); + + if (time_after(jiffies, timeout)) + dev_dbg(omap->dev, "operation timed out\n"); + } + + ehci_omap_writel(omap->tll_base, OMAP_USBTLL_SYSCONFIG, (1 << 1)); + + while (!(ehci_omap_readl(omap->tll_base, OMAP_USBTLL_SYSSTATUS) + & (1 << 0))) { + cpu_relax(); + + if (time_after(jiffies, timeout)) + dev_dbg(omap->dev, "operation timed out\n"); + } + + if (omap->usbtll_fck != NULL) { + clk_disable(omap->usbtll_fck); + clk_put(omap->usbtll_fck); + omap->usbtll_fck = NULL; + } + + if (omap->usbhost_ick != NULL) { + clk_disable(omap->usbhost_ick); + clk_put(omap->usbhost_ick); + omap->usbhost_ick = NULL; + } + + if (omap->usbhost1_48m_fck != NULL) { + clk_disable(omap->usbhost1_48m_fck); + clk_put(omap->usbhost1_48m_fck); + omap->usbhost1_48m_fck = NULL; + } + + if (omap->usbhost2_120m_fck != NULL) { + clk_disable(omap->usbhost2_120m_fck); + clk_put(omap->usbhost2_120m_fck); + omap->usbhost2_120m_fck = NULL; + } + + if (omap->usbtll_ick != NULL) { + clk_disable(omap->usbtll_ick); + clk_put(omap->usbtll_ick); + omap->usbtll_ick = NULL; + } + + if (omap->phy_reset) { + if (gpio_is_valid(omap->reset_gpio_port[0])) + gpio_free(omap->reset_gpio_port[0]); + + if (gpio_is_valid(omap->reset_gpio_port[1])) + gpio_free(omap->reset_gpio_port[1]); + } + + dev_dbg(omap->dev, "Clock to USB host has been disabled\n"); +} + +/*-------------------------------------------------------------------------*/ + +static const struct hc_driver ehci_omap_hc_driver; + +/* configure so an HC device and id are always provided */ +/* always called with process context; sleeping is OK */ + +/** + * ehci_hcd_omap_probe - initialize TI-based HCDs + * + * Allocates basic resources for this USB host controller, and + * then invokes the start() method for the HCD associated with it + * through the hotplug entry's driver_data. + */ +static int ehci_hcd_omap_probe(struct platform_device *pdev) +{ + struct ehci_hcd_omap_platform_data *pdata = pdev->dev.platform_data; + struct ehci_hcd_omap *omap; + struct resource *res; + struct usb_hcd *hcd; + + int irq = platform_get_irq(pdev, 0); + int ret = -ENODEV; + + if (!pdata) { + dev_dbg(&pdev->dev, "missing platform_data\n"); + goto err_pdata; + } + + if (usb_disabled()) + goto err_disabled; + + omap = kzalloc(sizeof(*omap), GFP_KERNEL); + if (!omap) { + ret = -ENOMEM; + goto err_disabled; + } + + hcd = usb_create_hcd(&ehci_omap_hc_driver, &pdev->dev, + dev_name(&pdev->dev)); + if (!hcd) { + dev_dbg(&pdev->dev, "failed to create hcd with err %d\n", ret); + ret = -ENOMEM; + goto err_create_hcd; + } + + platform_set_drvdata(pdev, omap); + omap->dev = &pdev->dev; + omap->phy_reset = pdata->phy_reset; + omap->reset_gpio_port[0] = pdata->reset_gpio_port[0]; + omap->reset_gpio_port[1] = pdata->reset_gpio_port[1]; + omap->reset_gpio_port[2] = pdata->reset_gpio_port[2]; + omap->port_mode[0] = pdata->port_mode[0]; + omap->port_mode[1] = pdata->port_mode[1]; + omap->port_mode[2] = pdata->port_mode[2]; + omap->ehci = hcd_to_ehci(hcd); + omap->ehci->sbrn = 0x20; + + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + + hcd->rsrc_start = res->start; + hcd->rsrc_len = resource_size(res); + + hcd->regs = ioremap(hcd->rsrc_start, hcd->rsrc_len); + if (!hcd->regs) { + dev_err(&pdev->dev, "EHCI ioremap failed\n"); + ret = -ENOMEM; + goto err_ioremap; + } + + /* we know this is the memory we want, no need to ioremap again */ + omap->ehci->caps = hcd->regs; + omap->ehci_base = hcd->regs; + + res = platform_get_resource(pdev, IORESOURCE_MEM, 1); + omap->uhh_base = ioremap(res->start, resource_size(res)); + if (!omap->uhh_base) { + dev_err(&pdev->dev, "UHH ioremap failed\n"); + ret = -ENOMEM; + goto err_uhh_ioremap; + } + + res = platform_get_resource(pdev, IORESOURCE_MEM, 2); + omap->tll_base = ioremap(res->start, resource_size(res)); + if (!omap->tll_base) { + dev_err(&pdev->dev, "TLL ioremap failed\n"); + ret = -ENOMEM; + goto err_tll_ioremap; + } + + ret = omap_start_ehc(omap, hcd); + if (ret) { + dev_dbg(&pdev->dev, "failed to start ehci\n"); + goto err_start; + } + + omap->ehci->regs = hcd->regs + + HC_LENGTH(readl(&omap->ehci->caps->hc_capbase)); + + /* cache this readonly data; minimize chip reads */ + omap->ehci->hcs_params = readl(&omap->ehci->caps->hcs_params); + + /* SET 1 micro-frame Interrupt interval */ + writel(readl(&omap->ehci->regs->command) | (1 << 16), + &omap->ehci->regs->command); + + ret = usb_add_hcd(hcd, irq, IRQF_DISABLED | IRQF_SHARED); + if (ret) { + dev_dbg(&pdev->dev, "failed to add hcd with err %d\n", ret); + goto err_add_hcd; + } + + return 0; + +err_add_hcd: + omap_stop_ehc(omap, hcd); + +err_start: + iounmap(omap->tll_base); + +err_tll_ioremap: + iounmap(omap->uhh_base); + +err_uhh_ioremap: + iounmap(hcd->regs); + +err_ioremap: + usb_put_hcd(hcd); + +err_create_hcd: + kfree(omap); +err_disabled: +err_pdata: + return ret; +} + +/* may be called without controller electrically present */ +/* may be called with controller, bus, and devices active */ + +/** + * ehci_hcd_omap_remove - shutdown processing for EHCI HCDs + * @pdev: USB Host Controller being removed + * + * Reverses the effect of usb_ehci_hcd_omap_probe(), first invoking + * the HCD's stop() method. It is always called from a thread + * context, normally "rmmod", "apmd", or something similar. + */ +static int ehci_hcd_omap_remove(struct platform_device *pdev) +{ + struct ehci_hcd_omap *omap = platform_get_drvdata(pdev); + struct usb_hcd *hcd = ehci_to_hcd(omap->ehci); + + usb_remove_hcd(hcd); + omap_stop_ehc(omap, hcd); + iounmap(hcd->regs); + iounmap(omap->tll_base); + iounmap(omap->uhh_base); + usb_put_hcd(hcd); + + return 0; +} + +static void ehci_hcd_omap_shutdown(struct platform_device *pdev) +{ + struct ehci_hcd_omap *omap = platform_get_drvdata(pdev); + struct usb_hcd *hcd = ehci_to_hcd(omap->ehci); + + if (hcd->driver->shutdown) + hcd->driver->shutdown(hcd); +} + +static struct platform_driver ehci_hcd_omap_driver = { + .probe = ehci_hcd_omap_probe, + .remove = ehci_hcd_omap_remove, + .shutdown = ehci_hcd_omap_shutdown, + /*.suspend = ehci_hcd_omap_suspend, */ + /*.resume = ehci_hcd_omap_resume, */ + .driver = { + .name = "ehci-omap", + } +}; + +/*-------------------------------------------------------------------------*/ + +static const struct hc_driver ehci_omap_hc_driver = { + .description = hcd_name, + .product_desc = "OMAP-EHCI Host Controller", + .hcd_priv_size = sizeof(struct ehci_hcd), + + /* + * generic hardware linkage + */ + .irq = ehci_irq, + .flags = HCD_MEMORY | HCD_USB2, + + /* + * basic lifecycle operations + */ + .reset = ehci_init, + .start = ehci_run, + .stop = ehci_stop, + .shutdown = ehci_shutdown, + + /* + * managing i/o requests and associated device resources + */ + .urb_enqueue = ehci_urb_enqueue, + .urb_dequeue = ehci_urb_dequeue, + .endpoint_disable = ehci_endpoint_disable, + .endpoint_reset = ehci_endpoint_reset, + + /* + * scheduling support + */ + .get_frame_number = ehci_get_frame, + + /* + * root hub support + */ + .hub_status_data = ehci_hub_status_data, + .hub_control = ehci_hub_control, + .bus_suspend = ehci_bus_suspend, + .bus_resume = ehci_bus_resume, + + .clear_tt_buffer_complete = ehci_clear_tt_buffer_complete, +}; + +MODULE_ALIAS("platform:omap-ehci"); +MODULE_AUTHOR("Texas Instruments, Inc."); +MODULE_AUTHOR("Felipe Balbi <felipe.balbi@nokia.com>"); + diff --git a/drivers/usb/host/ehci-q.c b/drivers/usb/host/ehci-q.c index 139a2cc3f641..89521775c567 100644 --- a/drivers/usb/host/ehci-q.c +++ b/drivers/usb/host/ehci-q.c @@ -616,9 +616,11 @@ qh_urb_transaction ( ) { struct ehci_qtd *qtd, *qtd_prev; dma_addr_t buf; - int len, maxpacket; + int len, this_sg_len, maxpacket; int is_input; u32 token; + int i; + struct scatterlist *sg; /* * URBs map to sequences of QTDs: one logical transaction @@ -659,7 +661,20 @@ qh_urb_transaction ( /* * data transfer stage: buffer setup */ - buf = urb->transfer_dma; + i = urb->num_sgs; + if (len > 0 && i > 0) { + sg = urb->sg->sg; + buf = sg_dma_address(sg); + + /* urb->transfer_buffer_length may be smaller than the + * size of the scatterlist (or vice versa) + */ + this_sg_len = min_t(int, sg_dma_len(sg), len); + } else { + sg = NULL; + buf = urb->transfer_dma; + this_sg_len = len; + } if (is_input) token |= (1 /* "in" */ << 8); @@ -675,7 +690,9 @@ qh_urb_transaction ( for (;;) { int this_qtd_len; - this_qtd_len = qtd_fill(ehci, qtd, buf, len, token, maxpacket); + this_qtd_len = qtd_fill(ehci, qtd, buf, this_sg_len, token, + maxpacket); + this_sg_len -= this_qtd_len; len -= this_qtd_len; buf += this_qtd_len; @@ -691,8 +708,13 @@ qh_urb_transaction ( if ((maxpacket & (this_qtd_len + (maxpacket - 1))) == 0) token ^= QTD_TOGGLE; - if (likely (len <= 0)) - break; + if (likely(this_sg_len <= 0)) { + if (--i <= 0 || len <= 0) + break; + sg = sg_next(sg); + buf = sg_dma_address(sg); + this_sg_len = min_t(int, sg_dma_len(sg), len); + } qtd_prev = qtd; qtd = ehci_qtd_alloc (ehci, flags); @@ -827,9 +849,10 @@ qh_make ( * But interval 1 scheduling is simpler, and * includes high bandwidth. */ - dbg ("intr period %d uframes, NYET!", - urb->interval); - goto done; + urb->interval = 1; + } else if (qh->period > ehci->periodic_size) { + qh->period = ehci->periodic_size; + urb->interval = qh->period << 3; } } else { int think_time; @@ -852,6 +875,10 @@ qh_make ( usb_calc_bus_time (urb->dev->speed, is_input, 0, max_packet (maxp))); qh->period = urb->interval; + if (qh->period > ehci->periodic_size) { + qh->period = ehci->periodic_size; + urb->interval = qh->period; + } } } diff --git a/drivers/usb/host/ehci-sched.c b/drivers/usb/host/ehci-sched.c index a5535b5e3fe2..1e391e624c8a 100644 --- a/drivers/usb/host/ehci-sched.c +++ b/drivers/usb/host/ehci-sched.c @@ -1385,7 +1385,7 @@ sitd_slot_ok ( * given EHCI_TUNE_FLS and the slop). Or, write a smarter scheduler! */ -#define SCHEDULE_SLOP 10 /* frames */ +#define SCHEDULE_SLOP 80 /* microframes */ static int iso_stream_schedule ( @@ -1394,12 +1394,13 @@ iso_stream_schedule ( struct ehci_iso_stream *stream ) { - u32 now, start, max, period; + u32 now, next, start, period; int status; unsigned mod = ehci->periodic_size << 3; struct ehci_iso_sched *sched = urb->hcpriv; + struct pci_dev *pdev; - if (sched->span > (mod - 8 * SCHEDULE_SLOP)) { + if (sched->span > (mod - SCHEDULE_SLOP)) { ehci_dbg (ehci, "iso request %p too long\n", urb); status = -EFBIG; goto fail; @@ -1418,26 +1419,35 @@ iso_stream_schedule ( now = ehci_readl(ehci, &ehci->regs->frame_index) % mod; - /* when's the last uframe this urb could start? */ - max = now + mod; - /* Typical case: reuse current schedule, stream is still active. * Hopefully there are no gaps from the host falling behind * (irq delays etc), but if there are we'll take the next * slot in the schedule, implicitly assuming URB_ISO_ASAP. */ if (likely (!list_empty (&stream->td_list))) { + pdev = to_pci_dev(ehci_to_hcd(ehci)->self.controller); start = stream->next_uframe; - if (start < now) - start += mod; + + /* For high speed devices, allow scheduling within the + * isochronous scheduling threshold. For full speed devices, + * don't. (Work around for Intel ICH9 bug.) + */ + if (!stream->highspeed && + pdev->vendor == PCI_VENDOR_ID_INTEL) + next = now + ehci->i_thresh; + else + next = now; /* Fell behind (by up to twice the slop amount)? */ - if (start >= max - 2 * 8 * SCHEDULE_SLOP) + if (((start - next) & (mod - 1)) >= + mod - 2 * SCHEDULE_SLOP) start += period * DIV_ROUND_UP( - max - start, period) - mod; + (next - start) & (mod - 1), + period); /* Tried to schedule too far into the future? */ - if (unlikely((start + sched->span) >= max)) { + if (unlikely(((start - now) & (mod - 1)) + sched->span + >= mod - 2 * SCHEDULE_SLOP)) { status = -EFBIG; goto fail; } @@ -1451,7 +1461,7 @@ iso_stream_schedule ( * can also help high bandwidth if the dma and irq loads don't * jump until after the queue is primed. */ - start = SCHEDULE_SLOP * 8 + (now & ~0x07); + start = SCHEDULE_SLOP + (now & ~0x07); start %= mod; stream->next_uframe = start; @@ -1482,7 +1492,7 @@ iso_stream_schedule ( /* no room in the schedule */ ehci_dbg (ehci, "iso %ssched full %p (now %d max %d)\n", list_empty (&stream->td_list) ? "" : "re", - urb, now, max); + urb, now, now + mod); status = -ENOSPC; fail: diff --git a/drivers/usb/host/ehci-xilinx-of.c b/drivers/usb/host/ehci-xilinx-of.c new file mode 100644 index 000000000000..a5861531ad3e --- /dev/null +++ b/drivers/usb/host/ehci-xilinx-of.c @@ -0,0 +1,300 @@ +/* + * EHCI HCD (Host Controller Driver) for USB. + * + * Bus Glue for Xilinx EHCI core on the of_platform bus + * + * Copyright (c) 2009 Xilinx, Inc. + * + * Based on "ehci-ppc-of.c" by Valentine Barshak <vbarshak@ru.mvista.com> + * and "ehci-ppc-soc.c" by Stefan Roese <sr@denx.de> + * and "ohci-ppc-of.c" by Sylvain Munaut <tnt@246tNt.com> + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or (at your + * option) any later version. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY + * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software Foundation, + * Inc., 675 Mass Ave, Cambridge, MA 02139, USA. + * + */ + +#include <linux/signal.h> + +#include <linux/of.h> +#include <linux/of_platform.h> + +/** + * ehci_xilinx_of_setup - Initialize the device for ehci_reset() + * @hcd: Pointer to the usb_hcd device to which the host controller bound + * + * called during probe() after chip reset completes. + */ +static int ehci_xilinx_of_setup(struct usb_hcd *hcd) +{ + struct ehci_hcd *ehci = hcd_to_ehci(hcd); + int retval; + + retval = ehci_halt(ehci); + if (retval) + return retval; + + retval = ehci_init(hcd); + if (retval) + return retval; + + ehci->sbrn = 0x20; + + return ehci_reset(ehci); +} + +/** + * ehci_xilinx_port_handed_over - hand the port out if failed to enable it + * @hcd: Pointer to the usb_hcd device to which the host controller bound + * @portnum:Port number to which the device is attached. + * + * This function is used as a place to tell the user that the Xilinx USB host + * controller does support LS devices. And in an HS only configuration, it + * does not support FS devices either. It is hoped that this can help a + * confused user. + * + * There are cases when the host controller fails to enable the port due to, + * for example, insufficient power that can be supplied to the device from + * the USB bus. In those cases, the messages printed here are not helpful. + */ +static int ehci_xilinx_port_handed_over(struct usb_hcd *hcd, int portnum) +{ + dev_warn(hcd->self.controller, "port %d cannot be enabled\n", portnum); + if (hcd->has_tt) { + dev_warn(hcd->self.controller, + "Maybe you have connected a low speed device?\n"); + + dev_warn(hcd->self.controller, + "We do not support low speed devices\n"); + } else { + dev_warn(hcd->self.controller, + "Maybe your device is not a high speed device?\n"); + dev_warn(hcd->self.controller, + "The USB host controller does not support full speed " + "nor low speed devices\n"); + dev_warn(hcd->self.controller, + "You can reconfigure the host controller to have " + "full speed support\n"); + } + + return 0; +} + + +static const struct hc_driver ehci_xilinx_of_hc_driver = { + .description = hcd_name, + .product_desc = "OF EHCI", + .hcd_priv_size = sizeof(struct ehci_hcd), + + /* + * generic hardware linkage + */ + .irq = ehci_irq, + .flags = HCD_MEMORY | HCD_USB2, + + /* + * basic lifecycle operations + */ + .reset = ehci_xilinx_of_setup, + .start = ehci_run, + .stop = ehci_stop, + .shutdown = ehci_shutdown, + + /* + * managing i/o requests and associated device resources + */ + .urb_enqueue = ehci_urb_enqueue, + .urb_dequeue = ehci_urb_dequeue, + .endpoint_disable = ehci_endpoint_disable, + + /* + * scheduling support + */ + .get_frame_number = ehci_get_frame, + + /* + * root hub support + */ + .hub_status_data = ehci_hub_status_data, + .hub_control = ehci_hub_control, +#ifdef CONFIG_PM + .bus_suspend = ehci_bus_suspend, + .bus_resume = ehci_bus_resume, +#endif + .relinquish_port = NULL, + .port_handed_over = ehci_xilinx_port_handed_over, + + .clear_tt_buffer_complete = ehci_clear_tt_buffer_complete, +}; + +/** + * ehci_hcd_xilinx_of_probe - Probe method for the USB host controller + * @op: pointer to the of_device to which the host controller bound + * @match: pointer to of_device_id structure, not used + * + * This function requests resources and sets up appropriate properties for the + * host controller. Because the Xilinx USB host controller can be configured + * as HS only or HS/FS only, it checks the configuration in the device tree + * entry, and sets an appropriate value for hcd->has_tt. + */ +static int __devinit +ehci_hcd_xilinx_of_probe(struct of_device *op, const struct of_device_id *match) +{ + struct device_node *dn = op->node; + struct usb_hcd *hcd; + struct ehci_hcd *ehci; + struct resource res; + int irq; + int rv; + int *value; + + if (usb_disabled()) + return -ENODEV; + + dev_dbg(&op->dev, "initializing XILINX-OF USB Controller\n"); + + rv = of_address_to_resource(dn, 0, &res); + if (rv) + return rv; + + hcd = usb_create_hcd(&ehci_xilinx_of_hc_driver, &op->dev, + "XILINX-OF USB"); + if (!hcd) + return -ENOMEM; + + hcd->rsrc_start = res.start; + hcd->rsrc_len = res.end - res.start + 1; + + if (!request_mem_region(hcd->rsrc_start, hcd->rsrc_len, hcd_name)) { + printk(KERN_ERR __FILE__ ": request_mem_region failed\n"); + rv = -EBUSY; + goto err_rmr; + } + + irq = irq_of_parse_and_map(dn, 0); + if (irq == NO_IRQ) { + printk(KERN_ERR __FILE__ ": irq_of_parse_and_map failed\n"); + rv = -EBUSY; + goto err_irq; + } + + hcd->regs = ioremap(hcd->rsrc_start, hcd->rsrc_len); + if (!hcd->regs) { + printk(KERN_ERR __FILE__ ": ioremap failed\n"); + rv = -ENOMEM; + goto err_ioremap; + } + + ehci = hcd_to_ehci(hcd); + + /* This core always has big-endian register interface and uses + * big-endian memory descriptors. + */ + ehci->big_endian_mmio = 1; + ehci->big_endian_desc = 1; + + /* Check whether the FS support option is selected in the hardware. + */ + value = (int *)of_get_property(dn, "xlnx,support-usb-fs", NULL); + if (value && (*value == 1)) { + ehci_dbg(ehci, "USB host controller supports FS devices\n"); + hcd->has_tt = 1; + } else { + ehci_dbg(ehci, + "USB host controller is HS only\n"); + hcd->has_tt = 0; + } + + /* Debug registers are at the first 0x100 region + */ + ehci->caps = hcd->regs + 0x100; + ehci->regs = hcd->regs + 0x100 + + HC_LENGTH(ehci_readl(ehci, &ehci->caps->hc_capbase)); + + /* cache this readonly data; minimize chip reads */ + ehci->hcs_params = ehci_readl(ehci, &ehci->caps->hcs_params); + + rv = usb_add_hcd(hcd, irq, 0); + if (rv == 0) + return 0; + + iounmap(hcd->regs); + +err_ioremap: +err_irq: + release_mem_region(hcd->rsrc_start, hcd->rsrc_len); +err_rmr: + usb_put_hcd(hcd); + + return rv; +} + +/** + * ehci_hcd_xilinx_of_remove - shutdown hcd and release resources + * @op: pointer to of_device structure that is to be removed + * + * Remove the hcd structure, and release resources that has been requested + * during probe. + */ +static int ehci_hcd_xilinx_of_remove(struct of_device *op) +{ + struct usb_hcd *hcd = dev_get_drvdata(&op->dev); + dev_set_drvdata(&op->dev, NULL); + + dev_dbg(&op->dev, "stopping XILINX-OF USB Controller\n"); + + usb_remove_hcd(hcd); + + iounmap(hcd->regs); + release_mem_region(hcd->rsrc_start, hcd->rsrc_len); + + usb_put_hcd(hcd); + + return 0; +} + +/** + * ehci_hcd_xilinx_of_shutdown - shutdown the hcd + * @op: pointer to of_device structure that is to be removed + * + * Properly shutdown the hcd, call driver's shutdown routine. + */ +static int ehci_hcd_xilinx_of_shutdown(struct of_device *op) +{ + struct usb_hcd *hcd = dev_get_drvdata(&op->dev); + + if (hcd->driver->shutdown) + hcd->driver->shutdown(hcd); + + return 0; +} + + +static struct of_device_id ehci_hcd_xilinx_of_match[] = { + {.compatible = "xlnx,xps-usb-host-1.00.a",}, + {}, +}; +MODULE_DEVICE_TABLE(of, ehci_hcd_xilinx_of_match); + +static struct of_platform_driver ehci_hcd_xilinx_of_driver = { + .name = "xilinx-of-ehci", + .match_table = ehci_hcd_xilinx_of_match, + .probe = ehci_hcd_xilinx_of_probe, + .remove = ehci_hcd_xilinx_of_remove, + .shutdown = ehci_hcd_xilinx_of_shutdown, + .driver = { + .name = "xilinx-of-ehci", + .owner = THIS_MODULE, + }, +}; diff --git a/drivers/usb/host/fhci-hcd.c b/drivers/usb/host/fhci-hcd.c index 0951818ef93b..78e7c3cfcb72 100644 --- a/drivers/usb/host/fhci-hcd.c +++ b/drivers/usb/host/fhci-hcd.c @@ -242,9 +242,10 @@ err: static void fhci_usb_free(void *lld) { struct fhci_usb *usb = lld; - struct fhci_hcd *fhci = usb->fhci; + struct fhci_hcd *fhci; if (usb) { + fhci = usb->fhci; fhci_config_transceiver(fhci, FHCI_PORT_POWER_OFF); fhci_ep0_free(usb); kfree(usb->actual_frame); diff --git a/drivers/usb/host/fhci-sched.c b/drivers/usb/host/fhci-sched.c index 62a226b61670..ff43747a614f 100644 --- a/drivers/usb/host/fhci-sched.c +++ b/drivers/usb/host/fhci-sched.c @@ -37,7 +37,7 @@ static void recycle_frame(struct fhci_usb *usb, struct packet *pkt) pkt->info = 0; pkt->priv_data = NULL; - cq_put(usb->ep0->empty_frame_Q, pkt); + cq_put(&usb->ep0->empty_frame_Q, pkt); } /* confirm submitted packet */ @@ -57,7 +57,7 @@ void fhci_transaction_confirm(struct fhci_usb *usb, struct packet *pkt) if ((td->data + td->actual_len) && trans_len) memcpy(td->data + td->actual_len, pkt->data, trans_len); - cq_put(usb->ep0->dummy_packets_Q, pkt->data); + cq_put(&usb->ep0->dummy_packets_Q, pkt->data); } recycle_frame(usb, pkt); @@ -213,7 +213,7 @@ static int add_packet(struct fhci_usb *usb, struct ed *ed, struct td *td) } /* update frame object fields before transmitting */ - pkt = cq_get(usb->ep0->empty_frame_Q); + pkt = cq_get(&usb->ep0->empty_frame_Q); if (!pkt) { fhci_dbg(usb->fhci, "there is no empty frame\n"); return -1; @@ -222,7 +222,7 @@ static int add_packet(struct fhci_usb *usb, struct ed *ed, struct td *td) pkt->info = 0; if (data == NULL) { - data = cq_get(usb->ep0->dummy_packets_Q); + data = cq_get(&usb->ep0->dummy_packets_Q); BUG_ON(!data); pkt->info = PKT_DUMMY_PACKET; } @@ -246,7 +246,7 @@ static int add_packet(struct fhci_usb *usb, struct ed *ed, struct td *td) list_del_init(&td->frame_lh); td->status = USB_TD_OK; if (pkt->info & PKT_DUMMY_PACKET) - cq_put(usb->ep0->dummy_packets_Q, pkt->data); + cq_put(&usb->ep0->dummy_packets_Q, pkt->data); recycle_frame(usb, pkt); usb->actual_frame->total_bytes -= (len + PROTOCOL_OVERHEAD); fhci_err(usb->fhci, "host transaction failed\n"); @@ -627,7 +627,7 @@ irqreturn_t fhci_irq(struct usb_hcd *hcd) /* - * Process normal completions(error or sucess) and clean the schedule. + * Process normal completions(error or success) and clean the schedule. * * This is the main path for handing urbs back to drivers. The only other patth * is process_del_list(),which unlinks URBs by scanning EDs,instead of scanning diff --git a/drivers/usb/host/fhci-tds.c b/drivers/usb/host/fhci-tds.c index b40332290319..d224ab467a40 100644 --- a/drivers/usb/host/fhci-tds.c +++ b/drivers/usb/host/fhci-tds.c @@ -106,33 +106,33 @@ void fhci_ep0_free(struct fhci_usb *usb) cpm_muram_free(cpm_muram_offset(ep->td_base)); if (ep->conf_frame_Q) { - size = cq_howmany(ep->conf_frame_Q); + size = cq_howmany(&ep->conf_frame_Q); for (; size; size--) { - struct packet *pkt = cq_get(ep->conf_frame_Q); + struct packet *pkt = cq_get(&ep->conf_frame_Q); kfree(pkt); } - cq_delete(ep->conf_frame_Q); + cq_delete(&ep->conf_frame_Q); } if (ep->empty_frame_Q) { - size = cq_howmany(ep->empty_frame_Q); + size = cq_howmany(&ep->empty_frame_Q); for (; size; size--) { - struct packet *pkt = cq_get(ep->empty_frame_Q); + struct packet *pkt = cq_get(&ep->empty_frame_Q); kfree(pkt); } - cq_delete(ep->empty_frame_Q); + cq_delete(&ep->empty_frame_Q); } if (ep->dummy_packets_Q) { - size = cq_howmany(ep->dummy_packets_Q); + size = cq_howmany(&ep->dummy_packets_Q); for (; size; size--) { - u8 *buff = cq_get(ep->dummy_packets_Q); + u8 *buff = cq_get(&ep->dummy_packets_Q); kfree(buff); } - cq_delete(ep->dummy_packets_Q); + cq_delete(&ep->dummy_packets_Q); } kfree(ep); @@ -175,10 +175,9 @@ u32 fhci_create_ep(struct fhci_usb *usb, enum fhci_mem_alloc data_mem, ep->td_base = cpm_muram_addr(ep_offset); /* zero all queue pointers */ - ep->conf_frame_Q = cq_new(ring_len + 2); - ep->empty_frame_Q = cq_new(ring_len + 2); - ep->dummy_packets_Q = cq_new(ring_len + 2); - if (!ep->conf_frame_Q || !ep->empty_frame_Q || !ep->dummy_packets_Q) { + if (cq_new(&ep->conf_frame_Q, ring_len + 2) || + cq_new(&ep->empty_frame_Q, ring_len + 2) || + cq_new(&ep->dummy_packets_Q, ring_len + 2)) { err_for = "frame_queues"; goto err; } @@ -199,8 +198,8 @@ u32 fhci_create_ep(struct fhci_usb *usb, enum fhci_mem_alloc data_mem, err_for = "buffer"; goto err; } - cq_put(ep->empty_frame_Q, pkt); - cq_put(ep->dummy_packets_Q, buff); + cq_put(&ep->empty_frame_Q, pkt); + cq_put(&ep->dummy_packets_Q, buff); } /* we put the endpoint parameter RAM right behind the TD ring */ @@ -319,7 +318,7 @@ static void fhci_td_transaction_confirm(struct fhci_usb *usb) if ((buf == DUMMY2_BD_BUFFER) && !(td_status & ~TD_W)) continue; - pkt = cq_get(ep->conf_frame_Q); + pkt = cq_get(&ep->conf_frame_Q); if (!pkt) fhci_err(usb->fhci, "no frame to confirm\n"); @@ -460,9 +459,9 @@ u32 fhci_host_transaction(struct fhci_usb *usb, out_be16(&td->length, pkt->len); /* put the frame to the confirmation queue */ - cq_put(ep->conf_frame_Q, pkt); + cq_put(&ep->conf_frame_Q, pkt); - if (cq_howmany(ep->conf_frame_Q) == 1) + if (cq_howmany(&ep->conf_frame_Q) == 1) out_8(&usb->fhci->regs->usb_comm, USB_CMD_STR_FIFO); return 0; diff --git a/drivers/usb/host/fhci.h b/drivers/usb/host/fhci.h index 7116284ed21a..72dae1c5ab38 100644 --- a/drivers/usb/host/fhci.h +++ b/drivers/usb/host/fhci.h @@ -423,9 +423,9 @@ struct endpoint { struct usb_td __iomem *td_base; /* first TD in the ring */ struct usb_td __iomem *conf_td; /* next TD for confirm after transac */ struct usb_td __iomem *empty_td;/* next TD for new transaction req. */ - struct kfifo *empty_frame_Q; /* Empty frames list to use */ - struct kfifo *conf_frame_Q; /* frames passed to TDs,waiting for tx */ - struct kfifo *dummy_packets_Q;/* dummy packets for the CRC overun */ + struct kfifo empty_frame_Q; /* Empty frames list to use */ + struct kfifo conf_frame_Q; /* frames passed to TDs,waiting for tx */ + struct kfifo dummy_packets_Q;/* dummy packets for the CRC overun */ bool already_pushed_dummy_bd; }; @@ -493,9 +493,9 @@ static inline struct usb_hcd *fhci_to_hcd(struct fhci_hcd *fhci) } /* fifo of pointers */ -static inline struct kfifo *cq_new(int size) +static inline int cq_new(struct kfifo *fifo, int size) { - return kfifo_alloc(size * sizeof(void *), GFP_KERNEL, NULL); + return kfifo_alloc(fifo, size * sizeof(void *), GFP_KERNEL); } static inline void cq_delete(struct kfifo *kfifo) @@ -505,19 +505,19 @@ static inline void cq_delete(struct kfifo *kfifo) static inline unsigned int cq_howmany(struct kfifo *kfifo) { - return __kfifo_len(kfifo) / sizeof(void *); + return kfifo_len(kfifo) / sizeof(void *); } static inline int cq_put(struct kfifo *kfifo, void *p) { - return __kfifo_put(kfifo, (void *)&p, sizeof(p)); + return kfifo_in(kfifo, (void *)&p, sizeof(p)); } static inline void *cq_get(struct kfifo *kfifo) { void *p = NULL; - __kfifo_get(kfifo, (void *)&p, sizeof(p)); + kfifo_out(kfifo, (void *)&p, sizeof(p)); return p; } diff --git a/drivers/usb/host/isp1362-hcd.c b/drivers/usb/host/isp1362-hcd.c index 5c774ab98252..42971657fde2 100644 --- a/drivers/usb/host/isp1362-hcd.c +++ b/drivers/usb/host/isp1362-hcd.c @@ -80,7 +80,7 @@ #include <linux/platform_device.h> #include <linux/pm.h> #include <linux/io.h> -#include <linux/bitops.h> +#include <linux/bitmap.h> #include <asm/irq.h> #include <asm/system.h> @@ -190,10 +190,8 @@ static int claim_ptd_buffers(struct isp1362_ep_queue *epq, struct isp1362_ep *ep, u16 len) { int ptd_offset = -EINVAL; - int index; int num_ptds = ((len + PTD_HEADER_SIZE - 1) / epq->blk_size) + 1; - int found = -1; - int last = -1; + int found; BUG_ON(len > epq->buf_size); @@ -205,20 +203,9 @@ static int claim_ptd_buffers(struct isp1362_ep_queue *epq, epq->name, len, epq->blk_size, num_ptds, epq->buf_map, epq->skip_map); BUG_ON(ep->num_ptds != 0); - for (index = 0; index <= epq->buf_count - num_ptds; index++) { - if (test_bit(index, &epq->buf_map)) - continue; - found = index; - for (last = index + 1; last < index + num_ptds; last++) { - if (test_bit(last, &epq->buf_map)) { - found = -1; - break; - } - } - if (found >= 0) - break; - } - if (found < 0) + found = bitmap_find_next_zero_area(&epq->buf_map, epq->buf_count, 0, + num_ptds, 0); + if (found >= epq->buf_count) return -EOVERFLOW; DBG(1, "%s: Found %d PTDs[%d] for %d/%d byte\n", __func__, @@ -230,8 +217,7 @@ static int claim_ptd_buffers(struct isp1362_ep_queue *epq, epq->buf_avail -= num_ptds; BUG_ON(epq->buf_avail > epq->buf_count); ep->ptd_index = found; - for (index = found; index < last; index++) - __set_bit(index, &epq->buf_map); + bitmap_set(&epq->buf_map, found, num_ptds); DBG(1, "%s: Done %s PTD[%d] $%04x, avail %d count %d claimed %d %08lx:%08lx\n", __func__, epq->name, ep->ptd_index, ep->ptd_offset, epq->buf_avail, epq->buf_count, num_ptds, epq->buf_map, epq->skip_map); @@ -2284,10 +2270,10 @@ static int isp1362_mem_config(struct usb_hcd *hcd) dev_info(hcd->self.controller, "ISP1362 Memory usage:\n"); dev_info(hcd->self.controller, " ISTL: 2 * %4d: %4d @ $%04x:$%04x\n", istl_size / 2, istl_size, 0, istl_size / 2); - dev_info(hcd->self.controller, " INTL: %4d * (%3lu+8): %4d @ $%04x\n", + dev_info(hcd->self.controller, " INTL: %4d * (%3zu+8): %4d @ $%04x\n", ISP1362_INTL_BUFFERS, intl_blksize - PTD_HEADER_SIZE, intl_size, istl_size); - dev_info(hcd->self.controller, " ATL : %4d * (%3lu+8): %4d @ $%04x\n", + dev_info(hcd->self.controller, " ATL : %4d * (%3zu+8): %4d @ $%04x\n", atl_buffers, atl_blksize - PTD_HEADER_SIZE, atl_size, istl_size + intl_size); dev_info(hcd->self.controller, " USED/FREE: %4d %4d\n", total, @@ -2711,6 +2697,8 @@ static int __init isp1362_probe(struct platform_device *pdev) void __iomem *data_reg; int irq; int retval = 0; + struct resource *irq_res; + unsigned int irq_flags = 0; /* basic sanity checks first. board-specific init logic should * have initialized this the three resources and probably board @@ -2724,11 +2712,12 @@ static int __init isp1362_probe(struct platform_device *pdev) data = platform_get_resource(pdev, IORESOURCE_MEM, 0); addr = platform_get_resource(pdev, IORESOURCE_MEM, 1); - irq = platform_get_irq(pdev, 0); - if (!addr || !data || irq < 0) { + irq_res = platform_get_resource(pdev, IORESOURCE_IRQ, 0); + if (!addr || !data || !irq_res) { retval = -ENODEV; goto err1; } + irq = irq_res->start; #ifdef CONFIG_USB_HCD_DMA if (pdev->dev.dma_mask) { @@ -2795,12 +2784,16 @@ static int __init isp1362_probe(struct platform_device *pdev) } #endif -#ifdef CONFIG_ARM - if (isp1362_hcd->board) - set_irq_type(irq, isp1362_hcd->board->int_act_high ? IRQT_RISING : IRQT_FALLING); -#endif + if (irq_res->flags & IORESOURCE_IRQ_HIGHEDGE) + irq_flags |= IRQF_TRIGGER_RISING; + if (irq_res->flags & IORESOURCE_IRQ_LOWEDGE) + irq_flags |= IRQF_TRIGGER_FALLING; + if (irq_res->flags & IORESOURCE_IRQ_HIGHLEVEL) + irq_flags |= IRQF_TRIGGER_HIGH; + if (irq_res->flags & IORESOURCE_IRQ_LOWLEVEL) + irq_flags |= IRQF_TRIGGER_LOW; - retval = usb_add_hcd(hcd, irq, IRQF_TRIGGER_LOW | IRQF_DISABLED | IRQF_SHARED); + retval = usb_add_hcd(hcd, irq, irq_flags | IRQF_DISABLED | IRQF_SHARED); if (retval != 0) goto err6; pr_info("%s, irq %d\n", hcd->product_desc, irq); diff --git a/drivers/usb/host/isp1362.h b/drivers/usb/host/isp1362.h index 1a253ebf7e50..5151516ea1de 100644 --- a/drivers/usb/host/isp1362.h +++ b/drivers/usb/host/isp1362.h @@ -534,8 +534,8 @@ struct isp1362_hcd { /* periodic schedule: isochronous */ struct list_head isoc; - int istl_flip:1; - int irq_active:1; + unsigned int istl_flip:1; + unsigned int irq_active:1; /* Schedules for the current frame */ struct isp1362_ep_queue atl_queue; diff --git a/drivers/usb/host/isp1760-hcd.c b/drivers/usb/host/isp1760-hcd.c index 9600a58299db..27b8f7cb4471 100644 --- a/drivers/usb/host/isp1760-hcd.c +++ b/drivers/usb/host/isp1760-hcd.c @@ -1039,12 +1039,12 @@ static void do_atl_int(struct usb_hcd *usb_hcd) if (!nakcount && (dw3 & DW3_QTD_ACTIVE)) { u32 buffstatus; - /* XXX + /* * NAKs are handled in HW by the chip. Usually if the * device is not able to send data fast enough. - * This did not trigger for a long time now. + * This happens mostly on slower hardware. */ - printk(KERN_ERR "Reloading ptd %p/%p... qh %p readed: " + printk(KERN_NOTICE "Reloading ptd %p/%p... qh %p read: " "%d of %zu done: %08x cur: %08x\n", qtd, urb, qh, PTD_XFERRED_LENGTH(dw3), qtd->length, done_map, diff --git a/drivers/usb/host/ohci-at91.c b/drivers/usb/host/ohci-at91.c index 7ccffcbe7b6f..68b83ab70719 100644 --- a/drivers/usb/host/ohci-at91.c +++ b/drivers/usb/host/ohci-at91.c @@ -35,7 +35,7 @@ extern int usb_disabled(void); static void at91_start_clock(void) { - if (cpu_is_at91sam9261()) + if (cpu_is_at91sam9261() || cpu_is_at91sam9g10()) clk_enable(hclk); clk_enable(iclk); clk_enable(fclk); @@ -46,7 +46,7 @@ static void at91_stop_clock(void) { clk_disable(fclk); clk_disable(iclk); - if (cpu_is_at91sam9261()) + if (cpu_is_at91sam9261() || cpu_is_at91sam9g10()) clk_disable(hclk); clocked = 0; } @@ -142,7 +142,7 @@ static int usb_hcd_at91_probe(const struct hc_driver *driver, iclk = clk_get(&pdev->dev, "ohci_clk"); fclk = clk_get(&pdev->dev, "uhpck"); - if (cpu_is_at91sam9261()) + if (cpu_is_at91sam9261() || cpu_is_at91sam9g10()) hclk = clk_get(&pdev->dev, "hck0"); at91_start_hc(pdev); @@ -155,7 +155,7 @@ static int usb_hcd_at91_probe(const struct hc_driver *driver, /* Error handling */ at91_stop_hc(pdev); - if (cpu_is_at91sam9261()) + if (cpu_is_at91sam9261() || cpu_is_at91sam9g10()) clk_put(hclk); clk_put(fclk); clk_put(iclk); @@ -192,7 +192,7 @@ static void usb_hcd_at91_remove(struct usb_hcd *hcd, release_mem_region(hcd->rsrc_start, hcd->rsrc_len); usb_put_hcd(hcd); - if (cpu_is_at91sam9261()) + if (cpu_is_at91sam9261() || cpu_is_at91sam9g10()) clk_put(hclk); clk_put(fclk); clk_put(iclk); diff --git a/drivers/usb/host/ohci-au1xxx.c b/drivers/usb/host/ohci-au1xxx.c index e4380082ebb1..17a6043c1fa0 100644 --- a/drivers/usb/host/ohci-au1xxx.c +++ b/drivers/usb/host/ohci-au1xxx.c @@ -294,7 +294,7 @@ static int ohci_hcd_au1xxx_drv_resume(struct device *dev) return 0; } -static struct dev_pm_ops au1xxx_ohci_pmops = { +static const struct dev_pm_ops au1xxx_ohci_pmops = { .suspend = ohci_hcd_au1xxx_drv_suspend, .resume = ohci_hcd_au1xxx_drv_resume, }; diff --git a/drivers/usb/host/ohci-pnx4008.c b/drivers/usb/host/ohci-pnx4008.c index 100bf3d8437c..2769326da42e 100644 --- a/drivers/usb/host/ohci-pnx4008.c +++ b/drivers/usb/host/ohci-pnx4008.c @@ -98,8 +98,8 @@ #define ISP1301_I2C_INTERRUPT_RISING 0xE #define ISP1301_I2C_REG_CLEAR_ADDR 1 -struct i2c_driver isp1301_driver; -struct i2c_client *isp1301_i2c_client; +static struct i2c_driver isp1301_driver; +static struct i2c_client *isp1301_i2c_client; extern int usb_disabled(void); extern int ocpi_enable(void); @@ -120,12 +120,12 @@ static int isp1301_remove(struct i2c_client *client) return 0; } -const struct i2c_device_id isp1301_id[] = { +static const struct i2c_device_id isp1301_id[] = { { "isp1301_pnx", 0 }, { } }; -struct i2c_driver isp1301_driver = { +static struct i2c_driver isp1301_driver = { .driver = { .name = "isp1301_pnx", }, diff --git a/drivers/usb/host/ohci-pxa27x.c b/drivers/usb/host/ohci-pxa27x.c index f1c06202fdf2..a18debdd79b8 100644 --- a/drivers/usb/host/ohci-pxa27x.c +++ b/drivers/usb/host/ohci-pxa27x.c @@ -518,7 +518,7 @@ static int ohci_hcd_pxa27x_drv_resume(struct device *dev) return 0; } -static struct dev_pm_ops ohci_hcd_pxa27x_pm_ops = { +static const struct dev_pm_ops ohci_hcd_pxa27x_pm_ops = { .suspend = ohci_hcd_pxa27x_drv_suspend, .resume = ohci_hcd_pxa27x_drv_resume, }; diff --git a/drivers/usb/host/r8a66597-hcd.c b/drivers/usb/host/r8a66597-hcd.c index e33d36256350..b7a661c02bcd 100644 --- a/drivers/usb/host/r8a66597-hcd.c +++ b/drivers/usb/host/r8a66597-hcd.c @@ -822,8 +822,6 @@ static void force_dequeue(struct r8a66597 *r8a66597, u16 pipenum, u16 address) return; list_for_each_entry_safe(td, next, list, queue) { - if (!td) - continue; if (td->address != address) continue; @@ -2025,8 +2023,6 @@ static struct r8a66597_device *get_r8a66597_device(struct r8a66597 *r8a66597, struct list_head *list = &r8a66597->child_device; list_for_each_entry(dev, list, device_list) { - if (!dev) - continue; if (dev->usb_address != addr) continue; @@ -2357,7 +2353,7 @@ static int r8a66597_resume(struct device *dev) return 0; } -static struct dev_pm_ops r8a66597_dev_pm_ops = { +static const struct dev_pm_ops r8a66597_dev_pm_ops = { .suspend = r8a66597_suspend, .resume = r8a66597_resume, .poweroff = r8a66597_suspend, diff --git a/drivers/usb/host/uhci-hcd.c b/drivers/usb/host/uhci-hcd.c index 5cd0e48f67fb..99cd00fd3514 100644 --- a/drivers/usb/host/uhci-hcd.c +++ b/drivers/usb/host/uhci-hcd.c @@ -749,7 +749,20 @@ static int uhci_rh_suspend(struct usb_hcd *hcd) spin_lock_irq(&uhci->lock); if (!test_bit(HCD_FLAG_HW_ACCESSIBLE, &hcd->flags)) rc = -ESHUTDOWN; - else if (!uhci->dead) + else if (uhci->dead) + ; /* Dead controllers tell no tales */ + + /* Once the controller is stopped, port resumes that are already + * in progress won't complete. Hence if remote wakeup is enabled + * for the root hub and any ports are in the middle of a resume or + * remote wakeup, we must fail the suspend. + */ + else if (hcd->self.root_hub->do_remote_wakeup && + uhci->resuming_ports) { + dev_dbg(uhci_dev(uhci), "suspend failed because a port " + "is resuming\n"); + rc = -EBUSY; + } else suspend_rh(uhci, UHCI_RH_SUSPENDED); spin_unlock_irq(&uhci->lock); return rc; diff --git a/drivers/usb/host/uhci-hub.c b/drivers/usb/host/uhci-hub.c index 885b585360b9..8270055848ca 100644 --- a/drivers/usb/host/uhci-hub.c +++ b/drivers/usb/host/uhci-hub.c @@ -167,7 +167,7 @@ static void uhci_check_ports(struct uhci_hcd *uhci) /* Port received a wakeup request */ set_bit(port, &uhci->resuming_ports); uhci->ports_timeout = jiffies + - msecs_to_jiffies(20); + msecs_to_jiffies(25); /* Make sure we see the port again * after the resuming period is over. */ diff --git a/drivers/usb/host/whci/debug.c b/drivers/usb/host/whci/debug.c index 2273c815941f..8c1c610c9513 100644 --- a/drivers/usb/host/whci/debug.c +++ b/drivers/usb/host/whci/debug.c @@ -31,17 +31,29 @@ struct whc_dbg { void qset_print(struct seq_file *s, struct whc_qset *qset) { + static const char *qh_type[] = { + "ctrl", "isoc", "bulk", "intr", "rsvd", "rsvd", "rsvd", "lpintr", }; struct whc_std *std; struct urb *urb = NULL; int i; - seq_printf(s, "qset %08x\n", (u32)qset->qset_dma); + seq_printf(s, "qset %08x", (u32)qset->qset_dma); + if (&qset->list_node == qset->whc->async_list.prev) { + seq_printf(s, " (dummy)\n"); + } else { + seq_printf(s, " ep%d%s-%s maxpkt: %d\n", + qset->qh.info1 & 0x0f, + (qset->qh.info1 >> 4) & 0x1 ? "in" : "out", + qh_type[(qset->qh.info1 >> 5) & 0x7], + (qset->qh.info1 >> 16) & 0xffff); + } seq_printf(s, " -> %08x\n", (u32)qset->qh.link); seq_printf(s, " info: %08x %08x %08x\n", - qset->qh.info1, qset->qh.info2, qset->qh.info3); - seq_printf(s, " sts: %04x errs: %d\n", qset->qh.status, qset->qh.err_count); + qset->qh.info1, qset->qh.info2, qset->qh.info3); + seq_printf(s, " sts: %04x errs: %d curwin: %08x\n", + qset->qh.status, qset->qh.err_count, qset->qh.cur_window); seq_printf(s, " TD: sts: %08x opts: %08x\n", - qset->qh.overlay.qtd.status, qset->qh.overlay.qtd.options); + qset->qh.overlay.qtd.status, qset->qh.overlay.qtd.options); for (i = 0; i < WHCI_QSET_TD_MAX; i++) { seq_printf(s, " %c%c TD[%d]: sts: %08x opts: %08x ptr: %08x\n", diff --git a/drivers/usb/host/whci/hcd.c b/drivers/usb/host/whci/hcd.c index 687b622a1612..e0d3401285c8 100644 --- a/drivers/usb/host/whci/hcd.c +++ b/drivers/usb/host/whci/hcd.c @@ -250,6 +250,7 @@ static int whc_probe(struct umc_dev *umc) } usb_hcd->wireless = 1; + usb_hcd->self.sg_tablesize = 2048; /* somewhat arbitrary */ wusbhc = usb_hcd_to_wusbhc(usb_hcd); whc = wusbhc_to_whc(wusbhc); diff --git a/drivers/usb/host/whci/qset.c b/drivers/usb/host/whci/qset.c index 1b9dc1571570..7d4204db0f61 100644 --- a/drivers/usb/host/whci/qset.c +++ b/drivers/usb/host/whci/qset.c @@ -49,16 +49,19 @@ struct whc_qset *qset_alloc(struct whc *whc, gfp_t mem_flags) * state * @urb: an urb for a transfer to this endpoint */ -static void qset_fill_qh(struct whc_qset *qset, struct urb *urb) +static void qset_fill_qh(struct whc *whc, struct whc_qset *qset, struct urb *urb) { struct usb_device *usb_dev = urb->dev; + struct wusb_dev *wusb_dev = usb_dev->wusb_dev; struct usb_wireless_ep_comp_descriptor *epcd; bool is_out; + uint8_t phy_rate; is_out = usb_pipeout(urb->pipe); - epcd = (struct usb_wireless_ep_comp_descriptor *)qset->ep->extra; + qset->max_packet = le16_to_cpu(urb->ep->desc.wMaxPacketSize); + epcd = (struct usb_wireless_ep_comp_descriptor *)qset->ep->extra; if (epcd) { qset->max_seq = epcd->bMaxSequence; qset->max_burst = epcd->bMaxBurst; @@ -67,12 +70,28 @@ static void qset_fill_qh(struct whc_qset *qset, struct urb *urb) qset->max_burst = 1; } + /* + * Initial PHY rate is 53.3 Mbit/s for control endpoints or + * the maximum supported by the device for other endpoints + * (unless limited by the user). + */ + if (usb_pipecontrol(urb->pipe)) + phy_rate = UWB_PHY_RATE_53; + else { + uint16_t phy_rates; + + phy_rates = le16_to_cpu(wusb_dev->wusb_cap_descr->wPHYRates); + phy_rate = fls(phy_rates) - 1; + if (phy_rate > whc->wusbhc.phy_rate) + phy_rate = whc->wusbhc.phy_rate; + } + qset->qh.info1 = cpu_to_le32( QH_INFO1_EP(usb_pipeendpoint(urb->pipe)) | (is_out ? QH_INFO1_DIR_OUT : QH_INFO1_DIR_IN) | usb_pipe_to_qh_type(urb->pipe) | QH_INFO1_DEV_INFO_IDX(wusb_port_no_to_idx(usb_dev->portnum)) - | QH_INFO1_MAX_PKT_LEN(usb_maxpacket(urb->dev, urb->pipe, is_out)) + | QH_INFO1_MAX_PKT_LEN(qset->max_packet) ); qset->qh.info2 = cpu_to_le32( QH_INFO2_BURST(qset->max_burst) @@ -86,7 +105,7 @@ static void qset_fill_qh(struct whc_qset *qset, struct urb *urb) * strength and can presumably guess the Tx power required * from that? */ qset->qh.info3 = cpu_to_le32( - QH_INFO3_TX_RATE_53_3 + QH_INFO3_TX_RATE(phy_rate) | QH_INFO3_TX_PWR(0) /* 0 == max power */ ); @@ -148,7 +167,7 @@ struct whc_qset *get_qset(struct whc *whc, struct urb *urb, qset->ep = urb->ep; urb->ep->hcpriv = qset; - qset_fill_qh(qset, urb); + qset_fill_qh(whc, qset, urb); } return qset; } @@ -241,6 +260,36 @@ static void qset_remove_qtd(struct whc *whc, struct whc_qset *qset) qset->ntds--; } +static void qset_copy_bounce_to_sg(struct whc *whc, struct whc_std *std) +{ + struct scatterlist *sg; + void *bounce; + size_t remaining, offset; + + bounce = std->bounce_buf; + remaining = std->len; + + sg = std->bounce_sg; + offset = std->bounce_offset; + + while (remaining) { + size_t len; + + len = min(sg->length - offset, remaining); + memcpy(sg_virt(sg) + offset, bounce, len); + + bounce += len; + remaining -= len; + + offset += len; + if (offset >= sg->length) { + sg = sg_next(sg); + offset = 0; + } + } + +} + /** * qset_free_std - remove an sTD and free it. * @whc: the WHCI host controller @@ -249,13 +298,29 @@ static void qset_remove_qtd(struct whc *whc, struct whc_qset *qset) void qset_free_std(struct whc *whc, struct whc_std *std) { list_del(&std->list_node); - if (std->num_pointers) { - dma_unmap_single(whc->wusbhc.dev, std->dma_addr, - std->num_pointers * sizeof(struct whc_page_list_entry), - DMA_TO_DEVICE); + if (std->bounce_buf) { + bool is_out = usb_pipeout(std->urb->pipe); + dma_addr_t dma_addr; + + if (std->num_pointers) + dma_addr = le64_to_cpu(std->pl_virt[0].buf_ptr); + else + dma_addr = std->dma_addr; + + dma_unmap_single(whc->wusbhc.dev, dma_addr, + std->len, is_out ? DMA_TO_DEVICE : DMA_FROM_DEVICE); + if (!is_out) + qset_copy_bounce_to_sg(whc, std); + kfree(std->bounce_buf); + } + if (std->pl_virt) { + if (std->dma_addr) + dma_unmap_single(whc->wusbhc.dev, std->dma_addr, + std->num_pointers * sizeof(struct whc_page_list_entry), + DMA_TO_DEVICE); kfree(std->pl_virt); + std->pl_virt = NULL; } - kfree(std); } @@ -293,12 +358,17 @@ static int qset_fill_page_list(struct whc *whc, struct whc_std *std, gfp_t mem_f { dma_addr_t dma_addr = std->dma_addr; dma_addr_t sp, ep; - size_t std_len = std->len; size_t pl_len; int p; - sp = ALIGN(dma_addr, WHCI_PAGE_SIZE); - ep = dma_addr + std_len; + /* Short buffers don't need a page list. */ + if (std->len <= WHCI_PAGE_SIZE) { + std->num_pointers = 0; + return 0; + } + + sp = dma_addr & ~(WHCI_PAGE_SIZE-1); + ep = dma_addr + std->len; std->num_pointers = DIV_ROUND_UP(ep - sp, WHCI_PAGE_SIZE); pl_len = std->num_pointers * sizeof(struct whc_page_list_entry); @@ -309,7 +379,7 @@ static int qset_fill_page_list(struct whc *whc, struct whc_std *std, gfp_t mem_f for (p = 0; p < std->num_pointers; p++) { std->pl_virt[p].buf_ptr = cpu_to_le64(dma_addr); - dma_addr = ALIGN(dma_addr + WHCI_PAGE_SIZE, WHCI_PAGE_SIZE); + dma_addr = (dma_addr + WHCI_PAGE_SIZE) & ~(WHCI_PAGE_SIZE-1); } return 0; @@ -339,6 +409,218 @@ static void urb_dequeue_work(struct work_struct *work) spin_unlock_irqrestore(&whc->lock, flags); } +static struct whc_std *qset_new_std(struct whc *whc, struct whc_qset *qset, + struct urb *urb, gfp_t mem_flags) +{ + struct whc_std *std; + + std = kzalloc(sizeof(struct whc_std), mem_flags); + if (std == NULL) + return NULL; + + std->urb = urb; + std->qtd = NULL; + + INIT_LIST_HEAD(&std->list_node); + list_add_tail(&std->list_node, &qset->stds); + + return std; +} + +static int qset_add_urb_sg(struct whc *whc, struct whc_qset *qset, struct urb *urb, + gfp_t mem_flags) +{ + size_t remaining; + struct scatterlist *sg; + int i; + int ntds = 0; + struct whc_std *std = NULL; + struct whc_page_list_entry *entry; + dma_addr_t prev_end = 0; + size_t pl_len; + int p = 0; + + remaining = urb->transfer_buffer_length; + + for_each_sg(urb->sg->sg, sg, urb->num_sgs, i) { + dma_addr_t dma_addr; + size_t dma_remaining; + dma_addr_t sp, ep; + int num_pointers; + + if (remaining == 0) { + break; + } + + dma_addr = sg_dma_address(sg); + dma_remaining = min_t(size_t, sg_dma_len(sg), remaining); + + while (dma_remaining) { + size_t dma_len; + + /* + * We can use the previous std (if it exists) provided that: + * - the previous one ended on a page boundary. + * - the current one begins on a page boundary. + * - the previous one isn't full. + * + * If a new std is needed but the previous one + * was not a whole number of packets then this + * sg list cannot be mapped onto multiple + * qTDs. Return an error and let the caller + * sort it out. + */ + if (!std + || (prev_end & (WHCI_PAGE_SIZE-1)) + || (dma_addr & (WHCI_PAGE_SIZE-1)) + || std->len + WHCI_PAGE_SIZE > QTD_MAX_XFER_SIZE) { + if (std->len % qset->max_packet != 0) + return -EINVAL; + std = qset_new_std(whc, qset, urb, mem_flags); + if (std == NULL) { + return -ENOMEM; + } + ntds++; + p = 0; + } + + dma_len = dma_remaining; + + /* + * If the remainder of this element doesn't + * fit in a single qTD, limit the qTD to a + * whole number of packets. This allows the + * remainder to go into the next qTD. + */ + if (std->len + dma_len > QTD_MAX_XFER_SIZE) { + dma_len = (QTD_MAX_XFER_SIZE / qset->max_packet) + * qset->max_packet - std->len; + } + + std->len += dma_len; + std->ntds_remaining = -1; /* filled in later */ + + sp = dma_addr & ~(WHCI_PAGE_SIZE-1); + ep = dma_addr + dma_len; + num_pointers = DIV_ROUND_UP(ep - sp, WHCI_PAGE_SIZE); + std->num_pointers += num_pointers; + + pl_len = std->num_pointers * sizeof(struct whc_page_list_entry); + + std->pl_virt = krealloc(std->pl_virt, pl_len, mem_flags); + if (std->pl_virt == NULL) { + return -ENOMEM; + } + + for (;p < std->num_pointers; p++, entry++) { + std->pl_virt[p].buf_ptr = cpu_to_le64(dma_addr); + dma_addr = (dma_addr + WHCI_PAGE_SIZE) & ~(WHCI_PAGE_SIZE-1); + } + + prev_end = dma_addr = ep; + dma_remaining -= dma_len; + remaining -= dma_len; + } + } + + /* Now the number of stds is know, go back and fill in + std->ntds_remaining. */ + list_for_each_entry(std, &qset->stds, list_node) { + if (std->ntds_remaining == -1) { + pl_len = std->num_pointers * sizeof(struct whc_page_list_entry); + std->ntds_remaining = ntds--; + std->dma_addr = dma_map_single(whc->wusbhc.dev, std->pl_virt, + pl_len, DMA_TO_DEVICE); + } + } + return 0; +} + +/** + * qset_add_urb_sg_linearize - add an urb with sg list, copying the data + * + * If the URB contains an sg list whose elements cannot be directly + * mapped to qTDs then the data must be transferred via bounce + * buffers. + */ +static int qset_add_urb_sg_linearize(struct whc *whc, struct whc_qset *qset, + struct urb *urb, gfp_t mem_flags) +{ + bool is_out = usb_pipeout(urb->pipe); + size_t max_std_len; + size_t remaining; + int ntds = 0; + struct whc_std *std = NULL; + void *bounce = NULL; + struct scatterlist *sg; + int i; + + /* limit maximum bounce buffer to 16 * 3.5 KiB ~= 28 k */ + max_std_len = qset->max_burst * qset->max_packet; + + remaining = urb->transfer_buffer_length; + + for_each_sg(urb->sg->sg, sg, urb->sg->nents, i) { + size_t len; + size_t sg_remaining; + void *orig; + + if (remaining == 0) { + break; + } + + sg_remaining = min_t(size_t, remaining, sg->length); + orig = sg_virt(sg); + + while (sg_remaining) { + if (!std || std->len == max_std_len) { + std = qset_new_std(whc, qset, urb, mem_flags); + if (std == NULL) + return -ENOMEM; + std->bounce_buf = kmalloc(max_std_len, mem_flags); + if (std->bounce_buf == NULL) + return -ENOMEM; + std->bounce_sg = sg; + std->bounce_offset = orig - sg_virt(sg); + bounce = std->bounce_buf; + ntds++; + } + + len = min(sg_remaining, max_std_len - std->len); + + if (is_out) + memcpy(bounce, orig, len); + + std->len += len; + std->ntds_remaining = -1; /* filled in later */ + + bounce += len; + orig += len; + sg_remaining -= len; + remaining -= len; + } + } + + /* + * For each of the new sTDs, map the bounce buffers, create + * page lists (if necessary), and fill in std->ntds_remaining. + */ + list_for_each_entry(std, &qset->stds, list_node) { + if (std->ntds_remaining != -1) + continue; + + std->dma_addr = dma_map_single(&whc->umc->dev, std->bounce_buf, std->len, + is_out ? DMA_TO_DEVICE : DMA_FROM_DEVICE); + + if (qset_fill_page_list(whc, std, mem_flags) < 0) + return -ENOMEM; + + std->ntds_remaining = ntds--; + } + + return 0; +} + /** * qset_add_urb - add an urb to the qset's queue. * @@ -353,10 +635,7 @@ int qset_add_urb(struct whc *whc, struct whc_qset *qset, struct urb *urb, int remaining = urb->transfer_buffer_length; u64 transfer_dma = urb->transfer_dma; int ntds_remaining; - - ntds_remaining = DIV_ROUND_UP(remaining, QTD_MAX_XFER_SIZE); - if (ntds_remaining == 0) - ntds_remaining = 1; + int ret; wurb = kzalloc(sizeof(struct whc_urb), mem_flags); if (wurb == NULL) @@ -366,32 +645,39 @@ int qset_add_urb(struct whc *whc, struct whc_qset *qset, struct urb *urb, wurb->urb = urb; INIT_WORK(&wurb->dequeue_work, urb_dequeue_work); + if (urb->sg) { + ret = qset_add_urb_sg(whc, qset, urb, mem_flags); + if (ret == -EINVAL) { + qset_free_stds(qset, urb); + ret = qset_add_urb_sg_linearize(whc, qset, urb, mem_flags); + } + if (ret < 0) + goto err_no_mem; + return 0; + } + + ntds_remaining = DIV_ROUND_UP(remaining, QTD_MAX_XFER_SIZE); + if (ntds_remaining == 0) + ntds_remaining = 1; + while (ntds_remaining) { struct whc_std *std; size_t std_len; - std = kmalloc(sizeof(struct whc_std), mem_flags); - if (std == NULL) - goto err_no_mem; - std_len = remaining; if (std_len > QTD_MAX_XFER_SIZE) std_len = QTD_MAX_XFER_SIZE; - std->urb = urb; + std = qset_new_std(whc, qset, urb, mem_flags); + if (std == NULL) + goto err_no_mem; + std->dma_addr = transfer_dma; std->len = std_len; std->ntds_remaining = ntds_remaining; - std->qtd = NULL; - INIT_LIST_HEAD(&std->list_node); - list_add_tail(&std->list_node, &qset->stds); - - if (std_len > WHCI_PAGE_SIZE) { - if (qset_fill_page_list(whc, std, mem_flags) < 0) - goto err_no_mem; - } else - std->num_pointers = 0; + if (qset_fill_page_list(whc, std, mem_flags) < 0) + goto err_no_mem; ntds_remaining--; remaining -= std_len; diff --git a/drivers/usb/host/whci/whcd.h b/drivers/usb/host/whci/whcd.h index 24e94d983c5e..c80c7d93bc4a 100644 --- a/drivers/usb/host/whci/whcd.h +++ b/drivers/usb/host/whci/whcd.h @@ -84,6 +84,11 @@ struct whc { * @len: the length of data in the associated TD. * @ntds_remaining: number of TDs (starting from this one) in this transfer. * + * @bounce_buf: a bounce buffer if the std was from an urb with a sg + * list that could not be mapped to qTDs directly. + * @bounce_sg: the first scatterlist element bounce_buf is for. + * @bounce_offset: the offset into bounce_sg for the start of bounce_buf. + * * Queued URBs may require more TDs than are available in a qset so we * use a list of these "software TDs" (sTDs) to hold per-TD data. */ @@ -97,6 +102,10 @@ struct whc_std { int num_pointers; dma_addr_t dma_addr; struct whc_page_list_entry *pl_virt; + + void *bounce_buf; + struct scatterlist *bounce_sg; + unsigned bounce_offset; }; /** diff --git a/drivers/usb/host/whci/whci-hc.h b/drivers/usb/host/whci/whci-hc.h index e8d0001605be..4d4cbc0730bf 100644 --- a/drivers/usb/host/whci/whci-hc.h +++ b/drivers/usb/host/whci/whci-hc.h @@ -172,14 +172,7 @@ struct whc_qhead { #define QH_INFO3_MAX_DELAY(d) ((d) << 0) /* maximum stream delay in 125 us units (isoc only) */ #define QH_INFO3_INTERVAL(i) ((i) << 16) /* segment interval in 125 us units (isoc only) */ -#define QH_INFO3_TX_RATE_53_3 (0 << 24) -#define QH_INFO3_TX_RATE_80 (1 << 24) -#define QH_INFO3_TX_RATE_106_7 (2 << 24) -#define QH_INFO3_TX_RATE_160 (3 << 24) -#define QH_INFO3_TX_RATE_200 (4 << 24) -#define QH_INFO3_TX_RATE_320 (5 << 24) -#define QH_INFO3_TX_RATE_400 (6 << 24) -#define QH_INFO3_TX_RATE_480 (7 << 24) +#define QH_INFO3_TX_RATE(r) ((r) << 24) /* PHY rate (see [ECMA-368] section 10.3.1.1) */ #define QH_INFO3_TX_PWR(p) ((p) << 29) /* transmit power (see [WUSB] section 5.2.1.2) */ #define QH_STATUS_FLOW_CTRL (1 << 15) @@ -267,8 +260,9 @@ struct whc_qset { unsigned reset:1; struct urb *pause_after_urb; struct completion remove_complete; - int max_burst; - int max_seq; + uint16_t max_packet; + uint8_t max_burst; + uint8_t max_seq; }; static inline void whc_qset_set_link_ptr(u64 *ptr, u64 target) diff --git a/drivers/usb/host/xhci-hcd.c b/drivers/usb/host/xhci-hcd.c index 932f99938481..5e92c72df642 100644 --- a/drivers/usb/host/xhci-hcd.c +++ b/drivers/usb/host/xhci-hcd.c @@ -67,22 +67,14 @@ static int handshake(struct xhci_hcd *xhci, void __iomem *ptr, } /* - * Force HC into halt state. - * - * Disable any IRQs and clear the run/stop bit. - * HC will complete any current and actively pipelined transactions, and - * should halt within 16 microframes of the run/stop bit being cleared. - * Read HC Halted bit in the status register to see when the HC is finished. - * XXX: shouldn't we set HC_STATE_HALT here somewhere? + * Disable interrupts and begin the xHCI halting process. */ -int xhci_halt(struct xhci_hcd *xhci) +void xhci_quiesce(struct xhci_hcd *xhci) { u32 halted; u32 cmd; u32 mask; - xhci_dbg(xhci, "// Halt the HC\n"); - /* Disable all interrupts from the host controller */ mask = ~(XHCI_IRQS); halted = xhci_readl(xhci, &xhci->op_regs->status) & STS_HALT; if (!halted) @@ -91,6 +83,21 @@ int xhci_halt(struct xhci_hcd *xhci) cmd = xhci_readl(xhci, &xhci->op_regs->command); cmd &= mask; xhci_writel(xhci, cmd, &xhci->op_regs->command); +} + +/* + * Force HC into halt state. + * + * Disable any IRQs and clear the run/stop bit. + * HC will complete any current and actively pipelined transactions, and + * should halt within 16 microframes of the run/stop bit being cleared. + * Read HC Halted bit in the status register to see when the HC is finished. + * XXX: shouldn't we set HC_STATE_HALT here somewhere? + */ +int xhci_halt(struct xhci_hcd *xhci) +{ + xhci_dbg(xhci, "// Halt the HC\n"); + xhci_quiesce(xhci); return handshake(xhci, &xhci->op_regs->status, STS_HALT, STS_HALT, XHCI_MAX_HALT_USEC); @@ -124,28 +131,6 @@ int xhci_reset(struct xhci_hcd *xhci) return handshake(xhci, &xhci->op_regs->command, CMD_RESET, 0, 250 * 1000); } -/* - * Stop the HC from processing the endpoint queues. - */ -static void xhci_quiesce(struct xhci_hcd *xhci) -{ - /* - * Queues are per endpoint, so we need to disable an endpoint or slot. - * - * To disable a slot, we need to insert a disable slot command on the - * command ring and ring the doorbell. This will also free any internal - * resources associated with the slot (which might not be what we want). - * - * A Release Endpoint command sounds better - doesn't free internal HC - * memory, but removes the endpoints from the schedule and releases the - * bandwidth, disables the doorbells, and clears the endpoint enable - * flag. Usually used prior to a set interface command. - * - * TODO: Implement after command ring code is done. - */ - BUG_ON(!HC_IS_RUNNING(xhci_to_hcd(xhci)->state)); - xhci_dbg(xhci, "Finished quiescing -- code not written yet\n"); -} #if 0 /* Set up MSI-X table for entry 0 (may claim other entries later) */ @@ -261,8 +246,14 @@ static void xhci_work(struct xhci_hcd *xhci) /* Flush posted writes */ xhci_readl(xhci, &xhci->ir_set->irq_pending); - /* FIXME this should be a delayed service routine that clears the EHB */ - xhci_handle_event(xhci); + if (xhci->xhc_state & XHCI_STATE_DYING) + xhci_dbg(xhci, "xHCI dying, ignoring interrupt. " + "Shouldn't IRQs be disabled?\n"); + else + /* FIXME this should be a delayed service routine + * that clears the EHB. + */ + xhci_handle_event(xhci); /* Clear the event handler busy flag (RW1C); the event ring should be empty. */ temp_64 = xhci_read_64(xhci, &xhci->ir_set->erst_dequeue); @@ -335,7 +326,7 @@ void xhci_event_ring_work(unsigned long arg) spin_lock_irqsave(&xhci->lock, flags); temp = xhci_readl(xhci, &xhci->op_regs->status); xhci_dbg(xhci, "op reg status = 0x%x\n", temp); - if (temp == 0xffffffff) { + if (temp == 0xffffffff || (xhci->xhc_state & XHCI_STATE_DYING)) { xhci_dbg(xhci, "HW died, polling stopped.\n"); spin_unlock_irqrestore(&xhci->lock, flags); return; @@ -490,8 +481,6 @@ void xhci_stop(struct usb_hcd *hcd) struct xhci_hcd *xhci = hcd_to_xhci(hcd); spin_lock_irq(&xhci->lock); - if (HC_IS_RUNNING(hcd->state)) - xhci_quiesce(xhci); xhci_halt(xhci); xhci_reset(xhci); spin_unlock_irq(&xhci->lock); @@ -727,16 +716,22 @@ int xhci_urb_enqueue(struct usb_hcd *hcd, struct urb *urb, gfp_t mem_flags) * atomic context to this function, which may allocate memory. */ spin_lock_irqsave(&xhci->lock, flags); + if (xhci->xhc_state & XHCI_STATE_DYING) + goto dying; ret = xhci_queue_ctrl_tx(xhci, GFP_ATOMIC, urb, slot_id, ep_index); spin_unlock_irqrestore(&xhci->lock, flags); } else if (usb_endpoint_xfer_bulk(&urb->ep->desc)) { spin_lock_irqsave(&xhci->lock, flags); + if (xhci->xhc_state & XHCI_STATE_DYING) + goto dying; ret = xhci_queue_bulk_tx(xhci, GFP_ATOMIC, urb, slot_id, ep_index); spin_unlock_irqrestore(&xhci->lock, flags); } else if (usb_endpoint_xfer_int(&urb->ep->desc)) { spin_lock_irqsave(&xhci->lock, flags); + if (xhci->xhc_state & XHCI_STATE_DYING) + goto dying; ret = xhci_queue_intr_tx(xhci, GFP_ATOMIC, urb, slot_id, ep_index); spin_unlock_irqrestore(&xhci->lock, flags); @@ -745,6 +740,12 @@ int xhci_urb_enqueue(struct usb_hcd *hcd, struct urb *urb, gfp_t mem_flags) } exit: return ret; +dying: + xhci_dbg(xhci, "Ep 0x%x: URB %p submitted for " + "non-responsive xHCI host.\n", + urb->ep->desc.bEndpointAddress, urb); + spin_unlock_irqrestore(&xhci->lock, flags); + return -ESHUTDOWN; } /* @@ -806,6 +807,17 @@ int xhci_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status) kfree(td); return ret; } + if (xhci->xhc_state & XHCI_STATE_DYING) { + xhci_dbg(xhci, "Ep 0x%x: URB %p to be canceled on " + "non-responsive xHCI host.\n", + urb->ep->desc.bEndpointAddress, urb); + /* Let the stop endpoint command watchdog timer (which set this + * state) finish cleaning up the endpoint TD lists. We must + * have caught it in the middle of dropping a lock and giving + * back an URB. + */ + goto done; + } xhci_dbg(xhci, "Cancel URB %p\n", urb); xhci_dbg(xhci, "Event ring:\n"); @@ -817,12 +829,16 @@ int xhci_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status) xhci_debug_ring(xhci, ep_ring); td = (struct xhci_td *) urb->hcpriv; - ep->cancels_pending++; list_add_tail(&td->cancelled_td_list, &ep->cancelled_td_list); /* Queue a stop endpoint command, but only if this is * the first cancellation to be handled. */ - if (ep->cancels_pending == 1) { + if (!(ep->ep_state & EP_HALT_PENDING)) { + ep->ep_state |= EP_HALT_PENDING; + ep->stop_cmds_pending++; + ep->stop_cmd_timer.expires = jiffies + + XHCI_STOP_EP_CMD_TIMEOUT * HZ; + add_timer(&ep->stop_cmd_timer); xhci_queue_stop_endpoint(xhci, urb->dev->slot_id, ep_index); xhci_ring_cmd_db(xhci); } @@ -1246,13 +1262,35 @@ int xhci_check_bandwidth(struct usb_hcd *hcd, struct usb_device *udev) LAST_CTX_TO_EP_NUM(slot_ctx->dev_info)); xhci_zero_in_ctx(xhci, virt_dev); - /* Free any old rings */ + /* Install new rings and free or cache any old rings */ for (i = 1; i < 31; ++i) { - if (virt_dev->eps[i].new_ring) { - xhci_ring_free(xhci, virt_dev->eps[i].ring); - virt_dev->eps[i].ring = virt_dev->eps[i].new_ring; - virt_dev->eps[i].new_ring = NULL; + int rings_cached; + + if (!virt_dev->eps[i].new_ring) + continue; + /* Only cache or free the old ring if it exists. + * It may not if this is the first add of an endpoint. + */ + if (virt_dev->eps[i].ring) { + rings_cached = virt_dev->num_rings_cached; + if (rings_cached < XHCI_MAX_RINGS_CACHED) { + virt_dev->num_rings_cached++; + rings_cached = virt_dev->num_rings_cached; + virt_dev->ring_cache[rings_cached] = + virt_dev->eps[i].ring; + xhci_dbg(xhci, "Cached old ring, " + "%d ring%s cached\n", + rings_cached, + (rings_cached > 1) ? "s" : ""); + } else { + xhci_ring_free(xhci, virt_dev->eps[i].ring); + xhci_dbg(xhci, "Ring cache full (%d rings), " + "freeing ring\n", + virt_dev->num_rings_cached); + } } + virt_dev->eps[i].ring = virt_dev->eps[i].new_ring; + virt_dev->eps[i].new_ring = NULL; } return ret; @@ -1427,16 +1465,27 @@ void xhci_endpoint_reset(struct usb_hcd *hcd, void xhci_free_dev(struct usb_hcd *hcd, struct usb_device *udev) { struct xhci_hcd *xhci = hcd_to_xhci(hcd); + struct xhci_virt_device *virt_dev; unsigned long flags; u32 state; + int i; if (udev->slot_id == 0) return; + virt_dev = xhci->devs[udev->slot_id]; + if (!virt_dev) + return; + + /* Stop any wayward timer functions (which may grab the lock) */ + for (i = 0; i < 31; ++i) { + virt_dev->eps[i].ep_state &= ~EP_HALT_PENDING; + del_timer_sync(&virt_dev->eps[i].stop_cmd_timer); + } spin_lock_irqsave(&xhci->lock, flags); /* Don't disable the slot if the host controller is dead. */ state = xhci_readl(xhci, &xhci->op_regs->status); - if (state == 0xffffffff) { + if (state == 0xffffffff || (xhci->xhc_state & XHCI_STATE_DYING)) { xhci_free_virt_device(xhci, udev->slot_id); spin_unlock_irqrestore(&xhci->lock, flags); return; diff --git a/drivers/usb/host/xhci-mem.c b/drivers/usb/host/xhci-mem.c index b8fd270a8b0d..bffcef7a5545 100644 --- a/drivers/usb/host/xhci-mem.c +++ b/drivers/usb/host/xhci-mem.c @@ -125,6 +125,23 @@ void xhci_ring_free(struct xhci_hcd *xhci, struct xhci_ring *ring) kfree(ring); } +static void xhci_initialize_ring_info(struct xhci_ring *ring) +{ + /* The ring is empty, so the enqueue pointer == dequeue pointer */ + ring->enqueue = ring->first_seg->trbs; + ring->enq_seg = ring->first_seg; + ring->dequeue = ring->enqueue; + ring->deq_seg = ring->first_seg; + /* The ring is initialized to 0. The producer must write 1 to the cycle + * bit to handover ownership of the TRB, so PCS = 1. The consumer must + * compare CCS to the cycle bit to check ownership, so CCS = 1. + */ + ring->cycle_state = 1; + /* Not necessary for new rings, but needed for re-initialized rings */ + ring->enq_updates = 0; + ring->deq_updates = 0; +} + /** * Create a new ring with zero or more segments. * @@ -173,17 +190,7 @@ static struct xhci_ring *xhci_ring_alloc(struct xhci_hcd *xhci, " segment %p (virtual), 0x%llx (DMA)\n", prev, (unsigned long long)prev->dma); } - /* The ring is empty, so the enqueue pointer == dequeue pointer */ - ring->enqueue = ring->first_seg->trbs; - ring->enq_seg = ring->first_seg; - ring->dequeue = ring->enqueue; - ring->deq_seg = ring->first_seg; - /* The ring is initialized to 0. The producer must write 1 to the cycle - * bit to handover ownership of the TRB, so PCS = 1. The consumer must - * compare CCS to the cycle bit to check ownership, so CCS = 1. - */ - ring->cycle_state = 1; - + xhci_initialize_ring_info(ring); return ring; fail: @@ -191,6 +198,27 @@ fail: return 0; } +/* Zero an endpoint ring (except for link TRBs) and move the enqueue and dequeue + * pointers to the beginning of the ring. + */ +static void xhci_reinit_cached_ring(struct xhci_hcd *xhci, + struct xhci_ring *ring) +{ + struct xhci_segment *seg = ring->first_seg; + do { + memset(seg->trbs, 0, + sizeof(union xhci_trb)*TRBS_PER_SEGMENT); + /* All endpoint rings have link TRBs */ + xhci_link_segments(xhci, seg, seg->next, 1); + seg = seg->next; + } while (seg != ring->first_seg); + xhci_initialize_ring_info(ring); + /* td list should be empty since all URBs have been cancelled, + * but just in case... + */ + INIT_LIST_HEAD(&ring->td_list); +} + #define CTX_SIZE(_hcc) (HCC_64BYTE_CONTEXT(_hcc) ? 64 : 32) struct xhci_container_ctx *xhci_alloc_container_ctx(struct xhci_hcd *xhci, @@ -248,6 +276,15 @@ struct xhci_ep_ctx *xhci_get_ep_ctx(struct xhci_hcd *xhci, (ctx->bytes + (ep_index * CTX_SIZE(xhci->hcc_params))); } +static void xhci_init_endpoint_timer(struct xhci_hcd *xhci, + struct xhci_virt_ep *ep) +{ + init_timer(&ep->stop_cmd_timer); + ep->stop_cmd_timer.data = (unsigned long) ep; + ep->stop_cmd_timer.function = xhci_stop_endpoint_command_watchdog; + ep->xhci = xhci; +} + /* All the xhci_tds in the ring's TD list should be freed at this point */ void xhci_free_virt_device(struct xhci_hcd *xhci, int slot_id) { @@ -267,6 +304,12 @@ void xhci_free_virt_device(struct xhci_hcd *xhci, int slot_id) if (dev->eps[i].ring) xhci_ring_free(xhci, dev->eps[i].ring); + if (dev->ring_cache) { + for (i = 0; i < dev->num_rings_cached; i++) + xhci_ring_free(xhci, dev->ring_cache[i]); + kfree(dev->ring_cache); + } + if (dev->in_ctx) xhci_free_container_ctx(xhci, dev->in_ctx); if (dev->out_ctx) @@ -309,15 +352,25 @@ int xhci_alloc_virt_device(struct xhci_hcd *xhci, int slot_id, xhci_dbg(xhci, "Slot %d input ctx = 0x%llx (dma)\n", slot_id, (unsigned long long)dev->in_ctx->dma); - /* Initialize the cancellation list for each endpoint */ - for (i = 0; i < 31; i++) + /* Initialize the cancellation list and watchdog timers for each ep */ + for (i = 0; i < 31; i++) { + xhci_init_endpoint_timer(xhci, &dev->eps[i]); INIT_LIST_HEAD(&dev->eps[i].cancelled_td_list); + } /* Allocate endpoint 0 ring */ dev->eps[0].ring = xhci_ring_alloc(xhci, 1, true, flags); if (!dev->eps[0].ring) goto fail; + /* Allocate pointers to the ring cache */ + dev->ring_cache = kzalloc( + sizeof(struct xhci_ring *)*XHCI_MAX_RINGS_CACHED, + flags); + if (!dev->ring_cache) + goto fail; + dev->num_rings_cached = 0; + init_completion(&dev->cmd_completion); INIT_LIST_HEAD(&dev->cmd_list); @@ -544,8 +597,16 @@ int xhci_endpoint_init(struct xhci_hcd *xhci, /* Set up the endpoint ring */ virt_dev->eps[ep_index].new_ring = xhci_ring_alloc(xhci, 1, true, mem_flags); - if (!virt_dev->eps[ep_index].new_ring) - return -ENOMEM; + if (!virt_dev->eps[ep_index].new_ring) { + /* Attempt to use the ring cache */ + if (virt_dev->num_rings_cached == 0) + return -ENOMEM; + virt_dev->eps[ep_index].new_ring = + virt_dev->ring_cache[virt_dev->num_rings_cached]; + virt_dev->ring_cache[virt_dev->num_rings_cached] = NULL; + virt_dev->num_rings_cached--; + xhci_reinit_cached_ring(xhci, virt_dev->eps[ep_index].new_ring); + } ep_ring = virt_dev->eps[ep_index].new_ring; ep_ctx->deq = ep_ring->first_seg->dma | ep_ring->cycle_state; @@ -768,14 +829,17 @@ struct xhci_command *xhci_alloc_command(struct xhci_hcd *xhci, command->in_ctx = xhci_alloc_container_ctx(xhci, XHCI_CTX_TYPE_INPUT, mem_flags); - if (!command->in_ctx) + if (!command->in_ctx) { + kfree(command); return NULL; + } if (allocate_completion) { command->completion = kzalloc(sizeof(struct completion), mem_flags); if (!command->completion) { xhci_free_container_ctx(xhci, command->in_ctx); + kfree(command); return NULL; } init_completion(command->completion); @@ -848,6 +912,163 @@ void xhci_mem_cleanup(struct xhci_hcd *xhci) xhci->page_shift = 0; } +static int xhci_test_trb_in_td(struct xhci_hcd *xhci, + struct xhci_segment *input_seg, + union xhci_trb *start_trb, + union xhci_trb *end_trb, + dma_addr_t input_dma, + struct xhci_segment *result_seg, + char *test_name, int test_number) +{ + unsigned long long start_dma; + unsigned long long end_dma; + struct xhci_segment *seg; + + start_dma = xhci_trb_virt_to_dma(input_seg, start_trb); + end_dma = xhci_trb_virt_to_dma(input_seg, end_trb); + + seg = trb_in_td(input_seg, start_trb, end_trb, input_dma); + if (seg != result_seg) { + xhci_warn(xhci, "WARN: %s TRB math test %d failed!\n", + test_name, test_number); + xhci_warn(xhci, "Tested TRB math w/ seg %p and " + "input DMA 0x%llx\n", + input_seg, + (unsigned long long) input_dma); + xhci_warn(xhci, "starting TRB %p (0x%llx DMA), " + "ending TRB %p (0x%llx DMA)\n", + start_trb, start_dma, + end_trb, end_dma); + xhci_warn(xhci, "Expected seg %p, got seg %p\n", + result_seg, seg); + return -1; + } + return 0; +} + +/* TRB math checks for xhci_trb_in_td(), using the command and event rings. */ +static int xhci_check_trb_in_td_math(struct xhci_hcd *xhci, gfp_t mem_flags) +{ + struct { + dma_addr_t input_dma; + struct xhci_segment *result_seg; + } simple_test_vector [] = { + /* A zeroed DMA field should fail */ + { 0, NULL }, + /* One TRB before the ring start should fail */ + { xhci->event_ring->first_seg->dma - 16, NULL }, + /* One byte before the ring start should fail */ + { xhci->event_ring->first_seg->dma - 1, NULL }, + /* Starting TRB should succeed */ + { xhci->event_ring->first_seg->dma, xhci->event_ring->first_seg }, + /* Ending TRB should succeed */ + { xhci->event_ring->first_seg->dma + (TRBS_PER_SEGMENT - 1)*16, + xhci->event_ring->first_seg }, + /* One byte after the ring end should fail */ + { xhci->event_ring->first_seg->dma + (TRBS_PER_SEGMENT - 1)*16 + 1, NULL }, + /* One TRB after the ring end should fail */ + { xhci->event_ring->first_seg->dma + (TRBS_PER_SEGMENT)*16, NULL }, + /* An address of all ones should fail */ + { (dma_addr_t) (~0), NULL }, + }; + struct { + struct xhci_segment *input_seg; + union xhci_trb *start_trb; + union xhci_trb *end_trb; + dma_addr_t input_dma; + struct xhci_segment *result_seg; + } complex_test_vector [] = { + /* Test feeding a valid DMA address from a different ring */ + { .input_seg = xhci->event_ring->first_seg, + .start_trb = xhci->event_ring->first_seg->trbs, + .end_trb = &xhci->event_ring->first_seg->trbs[TRBS_PER_SEGMENT - 1], + .input_dma = xhci->cmd_ring->first_seg->dma, + .result_seg = NULL, + }, + /* Test feeding a valid end TRB from a different ring */ + { .input_seg = xhci->event_ring->first_seg, + .start_trb = xhci->event_ring->first_seg->trbs, + .end_trb = &xhci->cmd_ring->first_seg->trbs[TRBS_PER_SEGMENT - 1], + .input_dma = xhci->cmd_ring->first_seg->dma, + .result_seg = NULL, + }, + /* Test feeding a valid start and end TRB from a different ring */ + { .input_seg = xhci->event_ring->first_seg, + .start_trb = xhci->cmd_ring->first_seg->trbs, + .end_trb = &xhci->cmd_ring->first_seg->trbs[TRBS_PER_SEGMENT - 1], + .input_dma = xhci->cmd_ring->first_seg->dma, + .result_seg = NULL, + }, + /* TRB in this ring, but after this TD */ + { .input_seg = xhci->event_ring->first_seg, + .start_trb = &xhci->event_ring->first_seg->trbs[0], + .end_trb = &xhci->event_ring->first_seg->trbs[3], + .input_dma = xhci->event_ring->first_seg->dma + 4*16, + .result_seg = NULL, + }, + /* TRB in this ring, but before this TD */ + { .input_seg = xhci->event_ring->first_seg, + .start_trb = &xhci->event_ring->first_seg->trbs[3], + .end_trb = &xhci->event_ring->first_seg->trbs[6], + .input_dma = xhci->event_ring->first_seg->dma + 2*16, + .result_seg = NULL, + }, + /* TRB in this ring, but after this wrapped TD */ + { .input_seg = xhci->event_ring->first_seg, + .start_trb = &xhci->event_ring->first_seg->trbs[TRBS_PER_SEGMENT - 3], + .end_trb = &xhci->event_ring->first_seg->trbs[1], + .input_dma = xhci->event_ring->first_seg->dma + 2*16, + .result_seg = NULL, + }, + /* TRB in this ring, but before this wrapped TD */ + { .input_seg = xhci->event_ring->first_seg, + .start_trb = &xhci->event_ring->first_seg->trbs[TRBS_PER_SEGMENT - 3], + .end_trb = &xhci->event_ring->first_seg->trbs[1], + .input_dma = xhci->event_ring->first_seg->dma + (TRBS_PER_SEGMENT - 4)*16, + .result_seg = NULL, + }, + /* TRB not in this ring, and we have a wrapped TD */ + { .input_seg = xhci->event_ring->first_seg, + .start_trb = &xhci->event_ring->first_seg->trbs[TRBS_PER_SEGMENT - 3], + .end_trb = &xhci->event_ring->first_seg->trbs[1], + .input_dma = xhci->cmd_ring->first_seg->dma + 2*16, + .result_seg = NULL, + }, + }; + + unsigned int num_tests; + int i, ret; + + num_tests = sizeof(simple_test_vector) / sizeof(simple_test_vector[0]); + for (i = 0; i < num_tests; i++) { + ret = xhci_test_trb_in_td(xhci, + xhci->event_ring->first_seg, + xhci->event_ring->first_seg->trbs, + &xhci->event_ring->first_seg->trbs[TRBS_PER_SEGMENT - 1], + simple_test_vector[i].input_dma, + simple_test_vector[i].result_seg, + "Simple", i); + if (ret < 0) + return ret; + } + + num_tests = sizeof(complex_test_vector) / sizeof(complex_test_vector[0]); + for (i = 0; i < num_tests; i++) { + ret = xhci_test_trb_in_td(xhci, + complex_test_vector[i].input_seg, + complex_test_vector[i].start_trb, + complex_test_vector[i].end_trb, + complex_test_vector[i].input_dma, + complex_test_vector[i].result_seg, + "Complex", i); + if (ret < 0) + return ret; + } + xhci_dbg(xhci, "TRB math tests passed.\n"); + return 0; +} + + int xhci_mem_init(struct xhci_hcd *xhci, gfp_t flags) { dma_addr_t dma; @@ -951,6 +1172,8 @@ int xhci_mem_init(struct xhci_hcd *xhci, gfp_t flags) xhci->event_ring = xhci_ring_alloc(xhci, ERST_NUM_SEGS, false, flags); if (!xhci->event_ring) goto fail; + if (xhci_check_trb_in_td_math(xhci, flags) < 0) + goto fail; xhci->erst.entries = pci_alloc_consistent(to_pci_dev(dev), sizeof(struct xhci_erst_entry)*ERST_NUM_SEGS, &dma); diff --git a/drivers/usb/host/xhci-pci.c b/drivers/usb/host/xhci-pci.c index 06595ec27bb7..e097008d6fb1 100644 --- a/drivers/usb/host/xhci-pci.c +++ b/drivers/usb/host/xhci-pci.c @@ -54,6 +54,8 @@ static int xhci_pci_setup(struct usb_hcd *hcd) struct pci_dev *pdev = to_pci_dev(hcd->self.controller); int retval; + hcd->self.sg_tablesize = TRBS_PER_SEGMENT - 1; + xhci->cap_regs = hcd->regs; xhci->op_regs = hcd->regs + HC_LENGTH(xhci_readl(xhci, &xhci->cap_regs->hc_capbase)); diff --git a/drivers/usb/host/xhci-ring.c b/drivers/usb/host/xhci-ring.c index 821b7b4709de..ee7bc7ecbc59 100644 --- a/drivers/usb/host/xhci-ring.c +++ b/drivers/usb/host/xhci-ring.c @@ -306,7 +306,7 @@ static void ring_ep_doorbell(struct xhci_hcd *xhci, /* Don't ring the doorbell for this endpoint if there are pending * cancellations because the we don't want to interrupt processing. */ - if (!ep->cancels_pending && !(ep_state & SET_DEQ_PENDING) + if (!(ep_state & EP_HALT_PENDING) && !(ep_state & SET_DEQ_PENDING) && !(ep_state & EP_HALTED)) { field = xhci_readl(xhci, db_addr) & DB_MASK; xhci_writel(xhci, field | EPI_TO_DB(ep_index), db_addr); @@ -475,6 +475,35 @@ void xhci_queue_new_dequeue_state(struct xhci_hcd *xhci, ep->ep_state |= SET_DEQ_PENDING; } +static inline void xhci_stop_watchdog_timer_in_irq(struct xhci_hcd *xhci, + struct xhci_virt_ep *ep) +{ + ep->ep_state &= ~EP_HALT_PENDING; + /* Can't del_timer_sync in interrupt, so we attempt to cancel. If the + * timer is running on another CPU, we don't decrement stop_cmds_pending + * (since we didn't successfully stop the watchdog timer). + */ + if (del_timer(&ep->stop_cmd_timer)) + ep->stop_cmds_pending--; +} + +/* Must be called with xhci->lock held in interrupt context */ +static void xhci_giveback_urb_in_irq(struct xhci_hcd *xhci, + struct xhci_td *cur_td, int status, char *adjective) +{ + struct usb_hcd *hcd = xhci_to_hcd(xhci); + + cur_td->urb->hcpriv = NULL; + usb_hcd_unlink_urb_from_ep(hcd, cur_td->urb); + xhci_dbg(xhci, "Giveback %s URB %p\n", adjective, cur_td->urb); + + spin_unlock(&xhci->lock); + usb_hcd_giveback_urb(hcd, cur_td->urb, status); + kfree(cur_td); + spin_lock(&xhci->lock); + xhci_dbg(xhci, "%s URB given back\n", adjective); +} + /* * When we get a command completion for a Stop Endpoint Command, we need to * unlink any cancelled TDs from the ring. There are two ways to do that: @@ -497,9 +526,6 @@ static void handle_stopped_endpoint(struct xhci_hcd *xhci, struct xhci_td *last_unlinked_td; struct xhci_dequeue_state deq_state; -#ifdef CONFIG_USB_HCD_STAT - ktime_t stop_time = ktime_get(); -#endif memset(&deq_state, 0, sizeof(deq_state)); slot_id = TRB_TO_SLOT_ID(trb->generic.field[3]); @@ -507,8 +533,11 @@ static void handle_stopped_endpoint(struct xhci_hcd *xhci, ep = &xhci->devs[slot_id]->eps[ep_index]; ep_ring = ep->ring; - if (list_empty(&ep->cancelled_td_list)) + if (list_empty(&ep->cancelled_td_list)) { + xhci_stop_watchdog_timer_in_irq(xhci, ep); + ring_ep_doorbell(xhci, slot_id, ep_index); return; + } /* Fix up the ep ring first, so HW stops executing cancelled TDs. * We have the xHCI lock, so nothing can modify this list until we drop @@ -535,9 +564,9 @@ static void handle_stopped_endpoint(struct xhci_hcd *xhci, * the cancelled TD list for URB completion later. */ list_del(&cur_td->td_list); - ep->cancels_pending--; } last_unlinked_td = cur_td; + xhci_stop_watchdog_timer_in_irq(xhci, ep); /* If necessary, queue a Set Transfer Ring Dequeue Pointer command */ if (deq_state.new_deq_ptr && deq_state.new_deq_seg) { @@ -561,27 +590,136 @@ static void handle_stopped_endpoint(struct xhci_hcd *xhci, list_del(&cur_td->cancelled_td_list); /* Clean up the cancelled URB */ -#ifdef CONFIG_USB_HCD_STAT - hcd_stat_update(xhci->tp_stat, cur_td->urb->actual_length, - ktime_sub(stop_time, cur_td->start_time)); -#endif - cur_td->urb->hcpriv = NULL; - usb_hcd_unlink_urb_from_ep(xhci_to_hcd(xhci), cur_td->urb); - - xhci_dbg(xhci, "Giveback cancelled URB %p\n", cur_td->urb); - spin_unlock(&xhci->lock); /* Doesn't matter what we pass for status, since the core will * just overwrite it (because the URB has been unlinked). */ - usb_hcd_giveback_urb(xhci_to_hcd(xhci), cur_td->urb, 0); - kfree(cur_td); + xhci_giveback_urb_in_irq(xhci, cur_td, 0, "cancelled"); - spin_lock(&xhci->lock); + /* Stop processing the cancelled list if the watchdog timer is + * running. + */ + if (xhci->xhc_state & XHCI_STATE_DYING) + return; } while (cur_td != last_unlinked_td); /* Return to the event handler with xhci->lock re-acquired */ } +/* Watchdog timer function for when a stop endpoint command fails to complete. + * In this case, we assume the host controller is broken or dying or dead. The + * host may still be completing some other events, so we have to be careful to + * let the event ring handler and the URB dequeueing/enqueueing functions know + * through xhci->state. + * + * The timer may also fire if the host takes a very long time to respond to the + * command, and the stop endpoint command completion handler cannot delete the + * timer before the timer function is called. Another endpoint cancellation may + * sneak in before the timer function can grab the lock, and that may queue + * another stop endpoint command and add the timer back. So we cannot use a + * simple flag to say whether there is a pending stop endpoint command for a + * particular endpoint. + * + * Instead we use a combination of that flag and a counter for the number of + * pending stop endpoint commands. If the timer is the tail end of the last + * stop endpoint command, and the endpoint's command is still pending, we assume + * the host is dying. + */ +void xhci_stop_endpoint_command_watchdog(unsigned long arg) +{ + struct xhci_hcd *xhci; + struct xhci_virt_ep *ep; + struct xhci_virt_ep *temp_ep; + struct xhci_ring *ring; + struct xhci_td *cur_td; + int ret, i, j; + + ep = (struct xhci_virt_ep *) arg; + xhci = ep->xhci; + + spin_lock(&xhci->lock); + + ep->stop_cmds_pending--; + if (xhci->xhc_state & XHCI_STATE_DYING) { + xhci_dbg(xhci, "Stop EP timer ran, but another timer marked " + "xHCI as DYING, exiting.\n"); + spin_unlock(&xhci->lock); + return; + } + if (!(ep->stop_cmds_pending == 0 && (ep->ep_state & EP_HALT_PENDING))) { + xhci_dbg(xhci, "Stop EP timer ran, but no command pending, " + "exiting.\n"); + spin_unlock(&xhci->lock); + return; + } + + xhci_warn(xhci, "xHCI host not responding to stop endpoint command.\n"); + xhci_warn(xhci, "Assuming host is dying, halting host.\n"); + /* Oops, HC is dead or dying or at least not responding to the stop + * endpoint command. + */ + xhci->xhc_state |= XHCI_STATE_DYING; + /* Disable interrupts from the host controller and start halting it */ + xhci_quiesce(xhci); + spin_unlock(&xhci->lock); + + ret = xhci_halt(xhci); + + spin_lock(&xhci->lock); + if (ret < 0) { + /* This is bad; the host is not responding to commands and it's + * not allowing itself to be halted. At least interrupts are + * disabled, so we can set HC_STATE_HALT and notify the + * USB core. But if we call usb_hc_died(), it will attempt to + * disconnect all device drivers under this host. Those + * disconnect() methods will wait for all URBs to be unlinked, + * so we must complete them. + */ + xhci_warn(xhci, "Non-responsive xHCI host is not halting.\n"); + xhci_warn(xhci, "Completing active URBs anyway.\n"); + /* We could turn all TDs on the rings to no-ops. This won't + * help if the host has cached part of the ring, and is slow if + * we want to preserve the cycle bit. Skip it and hope the host + * doesn't touch the memory. + */ + } + for (i = 0; i < MAX_HC_SLOTS; i++) { + if (!xhci->devs[i]) + continue; + for (j = 0; j < 31; j++) { + temp_ep = &xhci->devs[i]->eps[j]; + ring = temp_ep->ring; + if (!ring) + continue; + xhci_dbg(xhci, "Killing URBs for slot ID %u, " + "ep index %u\n", i, j); + while (!list_empty(&ring->td_list)) { + cur_td = list_first_entry(&ring->td_list, + struct xhci_td, + td_list); + list_del(&cur_td->td_list); + if (!list_empty(&cur_td->cancelled_td_list)) + list_del(&cur_td->cancelled_td_list); + xhci_giveback_urb_in_irq(xhci, cur_td, + -ESHUTDOWN, "killed"); + } + while (!list_empty(&temp_ep->cancelled_td_list)) { + cur_td = list_first_entry( + &temp_ep->cancelled_td_list, + struct xhci_td, + cancelled_td_list); + list_del(&cur_td->cancelled_td_list); + xhci_giveback_urb_in_irq(xhci, cur_td, + -ESHUTDOWN, "killed"); + } + } + } + spin_unlock(&xhci->lock); + xhci_to_hcd(xhci)->state = HC_STATE_HALT; + xhci_dbg(xhci, "Calling usb_hc_died()\n"); + usb_hc_died(xhci_to_hcd(xhci)); + xhci_dbg(xhci, "xHCI host controller is dead.\n"); +} + /* * When we get a completion for a Set Transfer Ring Dequeue Pointer command, * we need to clear the set deq pending flag in the endpoint ring state, so that @@ -765,28 +903,32 @@ static void handle_cmd_completion(struct xhci_hcd *xhci, virt_dev->in_ctx); /* Input ctx add_flags are the endpoint index plus one */ ep_index = xhci_last_valid_endpoint(ctrl_ctx->add_flags) - 1; - ep_ring = xhci->devs[slot_id]->eps[ep_index].ring; - if (!ep_ring) { - /* This must have been an initial configure endpoint */ - xhci->devs[slot_id]->cmd_status = - GET_COMP_CODE(event->status); - complete(&xhci->devs[slot_id]->cmd_completion); - break; - } - ep_state = xhci->devs[slot_id]->eps[ep_index].ep_state; - xhci_dbg(xhci, "Completed config ep cmd - last ep index = %d, " - "state = %d\n", ep_index, ep_state); + /* A usb_set_interface() call directly after clearing a halted + * condition may race on this quirky hardware. + * Not worth worrying about, since this is prototype hardware. + */ if (xhci->quirks & XHCI_RESET_EP_QUIRK && - ep_state & EP_HALTED) { + ep_index != (unsigned int) -1 && + ctrl_ctx->add_flags - SLOT_FLAG == + ctrl_ctx->drop_flags) { + ep_ring = xhci->devs[slot_id]->eps[ep_index].ring; + ep_state = xhci->devs[slot_id]->eps[ep_index].ep_state; + if (!(ep_state & EP_HALTED)) + goto bandwidth_change; + xhci_dbg(xhci, "Completed config ep cmd - " + "last ep index = %d, state = %d\n", + ep_index, ep_state); /* Clear our internal halted state and restart ring */ xhci->devs[slot_id]->eps[ep_index].ep_state &= ~EP_HALTED; ring_ep_doorbell(xhci, slot_id, ep_index); - } else { - xhci->devs[slot_id]->cmd_status = - GET_COMP_CODE(event->status); - complete(&xhci->devs[slot_id]->cmd_completion); + break; } +bandwidth_change: + xhci_dbg(xhci, "Completed config ep cmd\n"); + xhci->devs[slot_id]->cmd_status = + GET_COMP_CODE(event->status); + complete(&xhci->devs[slot_id]->cmd_completion); break; case TRB_TYPE(TRB_EVAL_CONTEXT): virt_dev = xhci->devs[slot_id]; @@ -849,8 +991,7 @@ static void handle_port_status(struct xhci_hcd *xhci, * TRB in this TD, this function returns that TRB's segment. Otherwise it * returns 0. */ -static struct xhci_segment *trb_in_td( - struct xhci_segment *start_seg, +struct xhci_segment *trb_in_td(struct xhci_segment *start_seg, union xhci_trb *start_trb, union xhci_trb *end_trb, dma_addr_t suspect_dma) @@ -900,6 +1041,45 @@ static struct xhci_segment *trb_in_td( return 0; } +static void xhci_cleanup_halted_endpoint(struct xhci_hcd *xhci, + unsigned int slot_id, unsigned int ep_index, + struct xhci_td *td, union xhci_trb *event_trb) +{ + struct xhci_virt_ep *ep = &xhci->devs[slot_id]->eps[ep_index]; + ep->ep_state |= EP_HALTED; + ep->stopped_td = td; + ep->stopped_trb = event_trb; + xhci_queue_reset_ep(xhci, slot_id, ep_index); + xhci_cleanup_stalled_ring(xhci, td->urb->dev, ep_index); + xhci_ring_cmd_db(xhci); +} + +/* Check if an error has halted the endpoint ring. The class driver will + * cleanup the halt for a non-default control endpoint if we indicate a stall. + * However, a babble and other errors also halt the endpoint ring, and the class + * driver won't clear the halt in that case, so we need to issue a Set Transfer + * Ring Dequeue Pointer command manually. + */ +static int xhci_requires_manual_halt_cleanup(struct xhci_hcd *xhci, + struct xhci_ep_ctx *ep_ctx, + unsigned int trb_comp_code) +{ + /* TRB completion codes that may require a manual halt cleanup */ + if (trb_comp_code == COMP_TX_ERR || + trb_comp_code == COMP_BABBLE || + trb_comp_code == COMP_SPLIT_ERR) + /* The 0.96 spec says a babbling control endpoint + * is not halted. The 0.96 spec says it is. Some HW + * claims to be 0.95 compliant, but it halts the control + * endpoint anyway. Check if a babble halted the + * endpoint. + */ + if ((ep_ctx->ep_info & EP_STATE_MASK) == EP_STATE_HALTED) + return 1; + + return 0; +} + /* * If this function returns an error condition, it means it got a Transfer * event with a corrupted Slot ID, Endpoint ID, or TRB DMA address. @@ -1002,6 +1182,7 @@ static int handle_tx_event(struct xhci_hcd *xhci, xhci_warn(xhci, "WARN: TRB error on endpoint\n"); status = -EILSEQ; break; + case COMP_SPLIT_ERR: case COMP_TX_ERR: xhci_warn(xhci, "WARN: transfer error on endpoint\n"); status = -EPROTO; @@ -1015,6 +1196,16 @@ static int handle_tx_event(struct xhci_hcd *xhci, status = -ENOSR; break; default: + if (trb_comp_code >= 224 && trb_comp_code <= 255) { + /* Vendor defined "informational" completion code, + * treat as not-an-error. + */ + xhci_dbg(xhci, "Vendor defined info completion code %u\n", + trb_comp_code); + xhci_dbg(xhci, "Treating code as success.\n"); + status = 0; + break; + } xhci_warn(xhci, "ERROR Unknown event condition, HC probably busted\n"); urb = NULL; goto cleanup; @@ -1043,15 +1234,14 @@ static int handle_tx_event(struct xhci_hcd *xhci, else status = 0; break; - case COMP_BABBLE: - /* The 0.96 spec says a babbling control endpoint - * is not halted. The 0.96 spec says it is. Some HW - * claims to be 0.95 compliant, but it halts the control - * endpoint anyway. Check if a babble halted the - * endpoint. - */ - if (ep_ctx->ep_info != EP_STATE_HALTED) + + default: + if (!xhci_requires_manual_halt_cleanup(xhci, + ep_ctx, trb_comp_code)) break; + xhci_dbg(xhci, "TRB error code %u, " + "halted endpoint index = %u\n", + trb_comp_code, ep_index); /* else fall through */ case COMP_STALL: /* Did we transfer part of the data (middle) phase? */ @@ -1063,15 +1253,9 @@ static int handle_tx_event(struct xhci_hcd *xhci, else td->urb->actual_length = 0; - ep->stopped_td = td; - ep->stopped_trb = event_trb; - xhci_queue_reset_ep(xhci, slot_id, ep_index); - xhci_cleanup_stalled_ring(xhci, td->urb->dev, ep_index); - xhci_ring_cmd_db(xhci); + xhci_cleanup_halted_endpoint(xhci, + slot_id, ep_index, td, event_trb); goto td_cleanup; - default: - /* Others already handled above */ - break; } /* * Did we transfer any data, despite the errors that might have @@ -1209,16 +1393,25 @@ static int handle_tx_event(struct xhci_hcd *xhci, ep->stopped_td = td; ep->stopped_trb = event_trb; } else { - if (trb_comp_code == COMP_STALL || - trb_comp_code == COMP_BABBLE) { + if (trb_comp_code == COMP_STALL) { /* The transfer is completed from the driver's * perspective, but we need to issue a set dequeue * command for this stalled endpoint to move the dequeue * pointer past the TD. We can't do that here because - * the halt condition must be cleared first. + * the halt condition must be cleared first. Let the + * USB class driver clear the stall later. */ ep->stopped_td = td; ep->stopped_trb = event_trb; + } else if (xhci_requires_manual_halt_cleanup(xhci, + ep_ctx, trb_comp_code)) { + /* Other types of errors halt the endpoint, but the + * class driver doesn't call usb_reset_endpoint() unless + * the error is -EPIPE. Clear the halted status in the + * xHCI hardware manually. + */ + xhci_cleanup_halted_endpoint(xhci, + slot_id, ep_index, td, event_trb); } else { /* Update ring dequeue pointer */ while (ep_ring->dequeue != td->last_trb) @@ -1249,10 +1442,9 @@ td_cleanup: } list_del(&td->td_list); /* Was this TD slated to be cancelled but completed anyway? */ - if (!list_empty(&td->cancelled_td_list)) { + if (!list_empty(&td->cancelled_td_list)) list_del(&td->cancelled_td_list); - ep->cancels_pending--; - } + /* Leave the TD around for the reset endpoint function to use * (but only if it's not a control endpoint, since we already * queued the Set TR dequeue pointer command for stalled @@ -1331,6 +1523,14 @@ void xhci_handle_event(struct xhci_hcd *xhci) default: xhci->error_bitmask |= 1 << 3; } + /* Any of the above functions may drop and re-acquire the lock, so check + * to make sure a watchdog timer didn't mark the host as non-responsive. + */ + if (xhci->xhc_state & XHCI_STATE_DYING) { + xhci_dbg(xhci, "xHCI host dying, returning from " + "event handler.\n"); + return; + } if (update_ptrs) { /* Update SW and HC event ring dequeue pointer */ @@ -1555,6 +1755,21 @@ int xhci_queue_intr_tx(struct xhci_hcd *xhci, gfp_t mem_flags, return xhci_queue_bulk_tx(xhci, GFP_ATOMIC, urb, slot_id, ep_index); } +/* + * The TD size is the number of bytes remaining in the TD (including this TRB), + * right shifted by 10. + * It must fit in bits 21:17, so it can't be bigger than 31. + */ +static u32 xhci_td_remainder(unsigned int remainder) +{ + u32 max = (1 << (21 - 17 + 1)) - 1; + + if ((remainder >> 10) >= max) + return max << 17; + else + return (remainder >> 10) << 17; +} + static int queue_bulk_sg_tx(struct xhci_hcd *xhci, gfp_t mem_flags, struct urb *urb, int slot_id, unsigned int ep_index) { @@ -1612,6 +1827,7 @@ static int queue_bulk_sg_tx(struct xhci_hcd *xhci, gfp_t mem_flags, do { u32 field = 0; u32 length_field = 0; + u32 remainder = 0; /* Don't change the cycle bit of the first TRB until later */ if (first_trb) @@ -1641,8 +1857,10 @@ static int queue_bulk_sg_tx(struct xhci_hcd *xhci, gfp_t mem_flags, (unsigned int) (addr + TRB_MAX_BUFF_SIZE) & ~(TRB_MAX_BUFF_SIZE - 1), (unsigned int) addr + trb_buff_len); } + remainder = xhci_td_remainder(urb->transfer_buffer_length - + running_total) ; length_field = TRB_LEN(trb_buff_len) | - TD_REMAINDER(urb->transfer_buffer_length - running_total) | + remainder | TRB_INTR_TARGET(0); queue_trb(xhci, ep_ring, false, lower_32_bits(addr), @@ -1755,6 +1973,7 @@ int xhci_queue_bulk_tx(struct xhci_hcd *xhci, gfp_t mem_flags, /* Queue the first TRB, even if it's zero-length */ do { + u32 remainder = 0; field = 0; /* Don't change the cycle bit of the first TRB until later */ @@ -1773,8 +1992,10 @@ int xhci_queue_bulk_tx(struct xhci_hcd *xhci, gfp_t mem_flags, td->last_trb = ep_ring->enqueue; field |= TRB_IOC; } + remainder = xhci_td_remainder(urb->transfer_buffer_length - + running_total); length_field = TRB_LEN(trb_buff_len) | - TD_REMAINDER(urb->transfer_buffer_length - running_total) | + remainder | TRB_INTR_TARGET(0); queue_trb(xhci, ep_ring, false, lower_32_bits(addr), @@ -1862,7 +2083,7 @@ int xhci_queue_ctrl_tx(struct xhci_hcd *xhci, gfp_t mem_flags, /* If there's data, queue data TRBs */ field = 0; length_field = TRB_LEN(urb->transfer_buffer_length) | - TD_REMAINDER(urb->transfer_buffer_length) | + xhci_td_remainder(urb->transfer_buffer_length) | TRB_INTR_TARGET(0); if (urb->transfer_buffer_length > 0) { if (setup->bRequestType & USB_DIR_IN) diff --git a/drivers/usb/host/xhci.h b/drivers/usb/host/xhci.h index 4b254b6fa245..877813505ef2 100644 --- a/drivers/usb/host/xhci.h +++ b/drivers/usb/host/xhci.h @@ -652,13 +652,17 @@ struct xhci_virt_ep { struct xhci_ring *new_ring; unsigned int ep_state; #define SET_DEQ_PENDING (1 << 0) -#define EP_HALTED (1 << 1) +#define EP_HALTED (1 << 1) /* For stall handling */ +#define EP_HALT_PENDING (1 << 2) /* For URB cancellation */ /* ---- Related to URB cancellation ---- */ struct list_head cancelled_td_list; - unsigned int cancels_pending; /* The TRB that was last reported in a stopped endpoint ring */ union xhci_trb *stopped_trb; struct xhci_td *stopped_td; + /* Watchdog timer for stop endpoint command to cancel URBs */ + struct timer_list stop_cmd_timer; + int stop_cmds_pending; + struct xhci_hcd *xhci; }; struct xhci_virt_device { @@ -673,6 +677,10 @@ struct xhci_virt_device { struct xhci_container_ctx *out_ctx; /* Used for addressing devices and configuration changes */ struct xhci_container_ctx *in_ctx; + /* Rings saved to ensure old alt settings can be re-instated */ + struct xhci_ring **ring_cache; + int num_rings_cached; +#define XHCI_MAX_RINGS_CACHED 31 struct xhci_virt_ep eps[31]; struct completion cmd_completion; /* Status of the last command issued for this device */ @@ -824,9 +832,6 @@ struct xhci_event_cmd { /* Normal TRB fields */ /* transfer_len bitmasks - bits 0:16 */ #define TRB_LEN(p) ((p) & 0x1ffff) -/* TD size - number of bytes remaining in the TD (including this TRB): - * bits 17 - 21. Shift the number of bytes by 10. */ -#define TD_REMAINDER(p) ((((p) >> 10) & 0x1f) << 17) /* Interrupter Target - which MSI-X vector to target the completion event at */ #define TRB_INTR_TARGET(p) (((p) & 0x3ff) << 22) #define GET_INTR_TARGET(p) (((p) >> 22) & 0x3ff) @@ -1022,6 +1027,8 @@ struct xhci_scratchpad { #define ERST_ENTRIES 1 /* Poll every 60 seconds */ #define POLL_TIMEOUT 60 +/* Stop endpoint command timeout (secs) for URB cancellation watchdog timer */ +#define XHCI_STOP_EP_CMD_TIMEOUT 5 /* XXX: Make these module parameters */ @@ -1083,6 +1090,21 @@ struct xhci_hcd { struct timer_list event_ring_timer; int zombie; #endif + /* Host controller watchdog timer structures */ + unsigned int xhc_state; +/* Host controller is dying - not responding to commands. "I'm not dead yet!" + * + * xHC interrupts have been disabled and a watchdog timer will (or has already) + * halt the xHCI host, and complete all URBs with an -ESHUTDOWN code. Any code + * that sees this status (other than the timer that set it) should stop touching + * hardware immediately. Interrupt handlers should return immediately when + * they see this status (any time they drop and re-acquire xhci->lock). + * xhci_urb_dequeue() should call usb_hcd_check_unlink_urb() and return without + * putting the TD on the canceled list, etc. + * + * There are no reports of xHCI host controllers that display this issue. + */ +#define XHCI_STATE_DYING (1 << 0) /* Statistics */ int noops_submitted; int noops_handled; @@ -1223,6 +1245,7 @@ void xhci_unregister_pci(void); #endif /* xHCI host controller glue */ +void xhci_quiesce(struct xhci_hcd *xhci); int xhci_halt(struct xhci_hcd *xhci); int xhci_reset(struct xhci_hcd *xhci); int xhci_init(struct usb_hcd *hcd); @@ -1246,6 +1269,9 @@ void xhci_reset_bandwidth(struct usb_hcd *hcd, struct usb_device *udev); /* xHCI ring, segment, TRB, and TD functions */ dma_addr_t xhci_trb_virt_to_dma(struct xhci_segment *seg, union xhci_trb *trb); +struct xhci_segment *trb_in_td(struct xhci_segment *start_seg, + union xhci_trb *start_trb, union xhci_trb *end_trb, + dma_addr_t suspect_dma); void xhci_ring_cmd_db(struct xhci_hcd *xhci); void *xhci_setup_one_noop(struct xhci_hcd *xhci); void xhci_handle_event(struct xhci_hcd *xhci); @@ -1278,6 +1304,7 @@ void xhci_cleanup_stalled_ring(struct xhci_hcd *xhci, void xhci_queue_config_ep_quirk(struct xhci_hcd *xhci, unsigned int slot_id, unsigned int ep_index, struct xhci_dequeue_state *deq_state); +void xhci_stop_endpoint_command_watchdog(unsigned long arg); /* xHCI roothub code */ int xhci_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue, u16 wIndex, |