diff options
Diffstat (limited to 'drivers/usb')
99 files changed, 4850 insertions, 3443 deletions
diff --git a/drivers/usb/chipidea/core.c b/drivers/usb/chipidea/core.c index 69426e644d17..3dbb4a21ab44 100644 --- a/drivers/usb/chipidea/core.c +++ b/drivers/usb/chipidea/core.c @@ -914,6 +914,7 @@ static int ci_hdrc_probe(struct platform_device *pdev) if (!ci) return -ENOMEM; + spin_lock_init(&ci->lock); ci->dev = dev; ci->platdata = dev_get_platdata(dev); ci->imx28_write_fix = !!(ci->platdata->flags & diff --git a/drivers/usb/chipidea/udc.c b/drivers/usb/chipidea/udc.c index a9b07befd398..cf132f057137 100644 --- a/drivers/usb/chipidea/udc.c +++ b/drivers/usb/chipidea/udc.c @@ -821,7 +821,7 @@ static int _ep_queue(struct usb_ep *ep, struct usb_request *req, } if (usb_endpoint_xfer_isoc(hwep->ep.desc) && - hwreq->req.length > (1 + hwep->ep.mult) * hwep->ep.maxpacket) { + hwreq->req.length > hwep->ep.mult * hwep->ep.maxpacket) { dev_err(hwep->ci->dev, "request length too big for isochronous\n"); return -EMSGSIZE; } @@ -1253,8 +1253,8 @@ static int ep_enable(struct usb_ep *ep, hwep->num = usb_endpoint_num(desc); hwep->type = usb_endpoint_type(desc); - hwep->ep.maxpacket = usb_endpoint_maxp(desc) & 0x07ff; - hwep->ep.mult = QH_ISO_MULT(usb_endpoint_maxp(desc)); + hwep->ep.maxpacket = usb_endpoint_maxp(desc); + hwep->ep.mult = usb_endpoint_maxp_mult(desc); if (hwep->type == USB_ENDPOINT_XFER_CONTROL) cap |= QH_IOS; @@ -1889,8 +1889,6 @@ static int udc_start(struct ci_hdrc *ci) struct usb_otg_caps *otg_caps = &ci->platdata->ci_otg_caps; int retval = 0; - spin_lock_init(&ci->lock); - ci->gadget.ops = &usb_gadget_ops; ci->gadget.speed = USB_SPEED_UNKNOWN; ci->gadget.max_speed = USB_SPEED_HIGH; diff --git a/drivers/usb/class/cdc-acm.c b/drivers/usb/class/cdc-acm.c index 5112c2f88d3a..d053075a8444 100644 --- a/drivers/usb/class/cdc-acm.c +++ b/drivers/usb/class/cdc-acm.c @@ -133,8 +133,8 @@ static int acm_ctrl_msg(struct acm *acm, int request, int value, buf, len, 5000); dev_dbg(&acm->control->dev, - "%s - rq 0x%02x, val %#x, len %#x, result %d\n", - __func__, request, value, len, retval); + "%s - rq 0x%02x, val %#x, len %#x, result %d\n", + __func__, request, value, len, retval); usb_autopm_put_interface(acm->control); @@ -158,6 +158,17 @@ static inline int acm_set_control(struct acm *acm, int control) #define acm_send_break(acm, ms) \ acm_ctrl_msg(acm, USB_CDC_REQ_SEND_BREAK, ms, NULL, 0) +static void acm_kill_urbs(struct acm *acm) +{ + int i; + + usb_kill_urb(acm->ctrlurb); + for (i = 0; i < ACM_NW; i++) + usb_kill_urb(acm->wb[i].urb); + for (i = 0; i < acm->rx_buflimit; i++) + usb_kill_urb(acm->read_urbs[i]); +} + /* * Write buffer management. * All of these assume proper locks taken by the caller. @@ -291,13 +302,13 @@ static void acm_ctrl_irq(struct urb *urb) case -ESHUTDOWN: /* this urb is terminated, clean up */ dev_dbg(&acm->control->dev, - "%s - urb shutting down with status: %d\n", - __func__, status); + "%s - urb shutting down with status: %d\n", + __func__, status); return; default: dev_dbg(&acm->control->dev, - "%s - nonzero urb status received: %d\n", - __func__, status); + "%s - nonzero urb status received: %d\n", + __func__, status); goto exit; } @@ -306,16 +317,16 @@ static void acm_ctrl_irq(struct urb *urb) data = (unsigned char *)(dr + 1); switch (dr->bNotificationType) { case USB_CDC_NOTIFY_NETWORK_CONNECTION: - dev_dbg(&acm->control->dev, "%s - network connection: %d\n", - __func__, dr->wValue); + dev_dbg(&acm->control->dev, + "%s - network connection: %d\n", __func__, dr->wValue); break; case USB_CDC_NOTIFY_SERIAL_STATE: newctrl = get_unaligned_le16(data); if (!acm->clocal && (acm->ctrlin & ~newctrl & ACM_CTRL_DCD)) { - dev_dbg(&acm->control->dev, "%s - calling hangup\n", - __func__); + dev_dbg(&acm->control->dev, + "%s - calling hangup\n", __func__); tty_port_tty_hangup(&acm->port, false); } @@ -357,8 +368,8 @@ static void acm_ctrl_irq(struct urb *urb) exit: retval = usb_submit_urb(urb, GFP_ATOMIC); if (retval && retval != -EPERM) - dev_err(&acm->control->dev, "%s - usb_submit_urb failed: %d\n", - __func__, retval); + dev_err(&acm->control->dev, + "%s - usb_submit_urb failed: %d\n", __func__, retval); } static int acm_submit_read_urb(struct acm *acm, int index, gfp_t mem_flags) @@ -372,8 +383,8 @@ static int acm_submit_read_urb(struct acm *acm, int index, gfp_t mem_flags) if (res) { if (res != -EPERM) { dev_err(&acm->data->dev, - "urb %d failed submission with %d\n", - index, res); + "urb %d failed submission with %d\n", + index, res); } set_bit(index, &acm->read_urbs_free); return res; @@ -416,30 +427,43 @@ static void acm_read_bulk_callback(struct urb *urb) int status = urb->status; dev_vdbg(&acm->data->dev, "got urb %d, len %d, status %d\n", - rb->index, urb->actual_length, - status); + rb->index, urb->actual_length, status); + + set_bit(rb->index, &acm->read_urbs_free); if (!acm->dev) { - set_bit(rb->index, &acm->read_urbs_free); dev_dbg(&acm->data->dev, "%s - disconnected\n", __func__); return; } - if (status) { - set_bit(rb->index, &acm->read_urbs_free); - if ((status != -ENOENT) || (urb->actual_length == 0)) - return; + switch (status) { + case 0: + usb_mark_last_busy(acm->dev); + acm_process_read_urb(acm, urb); + break; + case -EPIPE: + set_bit(EVENT_RX_STALL, &acm->flags); + schedule_work(&acm->work); + return; + case -ENOENT: + case -ECONNRESET: + case -ESHUTDOWN: + dev_dbg(&acm->data->dev, + "%s - urb shutting down with status: %d\n", + __func__, status); + return; + default: + dev_dbg(&acm->data->dev, + "%s - nonzero urb status received: %d\n", + __func__, status); + break; } - usb_mark_last_busy(acm->dev); - - acm_process_read_urb(acm, urb); /* * Unthrottle may run on another CPU which needs to see events * in the same order. Submission has an implict barrier */ smp_mb__before_atomic(); - set_bit(rb->index, &acm->read_urbs_free); /* throttle device if requested by tty */ spin_lock_irqsave(&acm->read_lock, flags); @@ -469,14 +493,30 @@ static void acm_write_bulk(struct urb *urb) spin_lock_irqsave(&acm->write_lock, flags); acm_write_done(acm, wb); spin_unlock_irqrestore(&acm->write_lock, flags); + set_bit(EVENT_TTY_WAKEUP, &acm->flags); schedule_work(&acm->work); } static void acm_softint(struct work_struct *work) { + int i; struct acm *acm = container_of(work, struct acm, work); - tty_port_tty_wakeup(&acm->port); + if (test_bit(EVENT_RX_STALL, &acm->flags)) { + if (!(usb_autopm_get_interface(acm->data))) { + for (i = 0; i < acm->rx_buflimit; i++) + usb_kill_urb(acm->read_urbs[i]); + usb_clear_halt(acm->dev, acm->in); + acm_submit_read_urbs(acm, GFP_KERNEL); + usb_autopm_put_interface(acm->data); + } + clear_bit(EVENT_RX_STALL, &acm->flags); + } + + if (test_bit(EVENT_TTY_WAKEUP, &acm->flags)) { + tty_port_tty_wakeup(&acm->port); + clear_bit(EVENT_TTY_WAKEUP, &acm->flags); + } } /* @@ -608,7 +648,6 @@ static void acm_port_shutdown(struct tty_port *port) struct acm *acm = container_of(port, struct acm, port); struct urb *urb; struct acm_wb *wb; - int i; /* * Need to grab write_lock to prevent race with resume, but no need to @@ -630,11 +669,7 @@ static void acm_port_shutdown(struct tty_port *port) usb_autopm_put_interface_async(acm->control); } - usb_kill_urb(acm->ctrlurb); - for (i = 0; i < ACM_NW; i++) - usb_kill_urb(acm->wb[i].urb); - for (i = 0; i < acm->rx_buflimit; i++) - usb_kill_urb(acm->read_urbs[i]); + acm_kill_urbs(acm); } static void acm_tty_cleanup(struct tty_struct *tty) @@ -837,8 +872,8 @@ static int acm_tty_break_ctl(struct tty_struct *tty, int state) retval = acm_send_break(acm, state ? 0xffff : 0); if (retval < 0) - dev_dbg(&acm->control->dev, "%s - send break failed\n", - __func__); + dev_dbg(&acm->control->dev, + "%s - send break failed\n", __func__); return retval; } @@ -929,8 +964,6 @@ static int wait_serial_change(struct acm *acm, unsigned long arg) DECLARE_WAITQUEUE(wait, current); struct async_icount old, new; - if (arg & (TIOCM_DSR | TIOCM_RI | TIOCM_CD)) - return -EINVAL; do { spin_lock_irq(&acm->read_lock); old = acm->oldcount; @@ -1079,19 +1112,17 @@ static void acm_write_buffers_free(struct acm *acm) { int i; struct acm_wb *wb; - struct usb_device *usb_dev = interface_to_usbdev(acm->control); for (wb = &acm->wb[0], i = 0; i < ACM_NW; i++, wb++) - usb_free_coherent(usb_dev, acm->writesize, wb->buf, wb->dmah); + usb_free_coherent(acm->dev, acm->writesize, wb->buf, wb->dmah); } static void acm_read_buffers_free(struct acm *acm) { - struct usb_device *usb_dev = interface_to_usbdev(acm->control); int i; for (i = 0; i < acm->rx_buflimit; i++) - usb_free_coherent(usb_dev, acm->readsize, + usb_free_coherent(acm->dev, acm->readsize, acm->read_buffers[i].base, acm->read_buffers[i].dma); } @@ -1150,6 +1181,8 @@ static int acm_probe(struct usb_interface *intf, if (quirks == IGNORE_DEVICE) return -ENODEV; + memset(&h, 0x00, sizeof(struct usb_cdc_parsed_header)); + num_rx_buf = (quirks == SINGLE_RX_URB) ? 1 : ACM_NR; /* handle quirks deadly to normal probing*/ @@ -1334,9 +1367,16 @@ made_compressed_probe: spin_lock_init(&acm->write_lock); spin_lock_init(&acm->read_lock); mutex_init(&acm->mutex); - acm->is_int_ep = usb_endpoint_xfer_int(epread); - if (acm->is_int_ep) + if (usb_endpoint_xfer_int(epread)) { acm->bInterval = epread->bInterval; + acm->in = usb_rcvintpipe(usb_dev, epread->bEndpointAddress); + } else { + acm->in = usb_rcvbulkpipe(usb_dev, epread->bEndpointAddress); + } + if (usb_endpoint_xfer_int(epwrite)) + acm->out = usb_sndintpipe(usb_dev, epwrite->bEndpointAddress); + else + acm->out = usb_sndbulkpipe(usb_dev, epwrite->bEndpointAddress); tty_port_init(&acm->port); acm->port.ops = &acm_port_ops; init_usb_anchor(&acm->delayed); @@ -1371,20 +1411,15 @@ made_compressed_probe: urb->transfer_flags |= URB_NO_TRANSFER_DMA_MAP; urb->transfer_dma = rb->dma; - if (acm->is_int_ep) { - usb_fill_int_urb(urb, acm->dev, - usb_rcvintpipe(usb_dev, epread->bEndpointAddress), - rb->base, + if (usb_endpoint_xfer_int(epread)) + usb_fill_int_urb(urb, acm->dev, acm->in, rb->base, acm->readsize, acm_read_bulk_callback, rb, acm->bInterval); - } else { - usb_fill_bulk_urb(urb, acm->dev, - usb_rcvbulkpipe(usb_dev, epread->bEndpointAddress), - rb->base, + else + usb_fill_bulk_urb(urb, acm->dev, acm->in, rb->base, acm->readsize, acm_read_bulk_callback, rb); - } acm->read_urbs[i] = urb; __set_bit(i, &acm->read_urbs_free); @@ -1397,12 +1432,10 @@ made_compressed_probe: goto alloc_fail7; if (usb_endpoint_xfer_int(epwrite)) - usb_fill_int_urb(snd->urb, usb_dev, - usb_sndintpipe(usb_dev, epwrite->bEndpointAddress), + usb_fill_int_urb(snd->urb, usb_dev, acm->out, NULL, acm->writesize, acm_write_bulk, snd, epwrite->bInterval); else - usb_fill_bulk_urb(snd->urb, usb_dev, - usb_sndbulkpipe(usb_dev, epwrite->bEndpointAddress), + usb_fill_bulk_urb(snd->urb, usb_dev, acm->out, NULL, acm->writesize, acm_write_bulk, snd); snd->urb->transfer_flags |= URB_NO_TRANSFER_DMA_MAP; if (quirks & SEND_ZERO_PACKET) @@ -1474,8 +1507,8 @@ skip_countries: } if (quirks & CLEAR_HALT_CONDITIONS) { - usb_clear_halt(usb_dev, usb_rcvbulkpipe(usb_dev, epread->bEndpointAddress)); - usb_clear_halt(usb_dev, usb_sndbulkpipe(usb_dev, epwrite->bEndpointAddress)); + usb_clear_halt(usb_dev, acm->in); + usb_clear_halt(usb_dev, acm->out); } return 0; @@ -1509,25 +1542,10 @@ alloc_fail: return rv; } -static void stop_data_traffic(struct acm *acm) -{ - int i; - - usb_kill_urb(acm->ctrlurb); - for (i = 0; i < ACM_NW; i++) - usb_kill_urb(acm->wb[i].urb); - for (i = 0; i < acm->rx_buflimit; i++) - usb_kill_urb(acm->read_urbs[i]); - - cancel_work_sync(&acm->work); -} - static void acm_disconnect(struct usb_interface *intf) { struct acm *acm = usb_get_intfdata(intf); - struct usb_device *usb_dev = interface_to_usbdev(intf); struct tty_struct *tty; - int i; /* sibling interface is already cleaning up */ if (!acm) @@ -1553,17 +1571,13 @@ static void acm_disconnect(struct usb_interface *intf) tty_kref_put(tty); } - stop_data_traffic(acm); + acm_kill_urbs(acm); + cancel_work_sync(&acm->work); tty_unregister_device(acm_tty_driver, acm->minor); - usb_free_urb(acm->ctrlurb); - for (i = 0; i < ACM_NW; i++) - usb_free_urb(acm->wb[i].urb); - for (i = 0; i < acm->rx_buflimit; i++) - usb_free_urb(acm->read_urbs[i]); acm_write_buffers_free(acm); - usb_free_coherent(usb_dev, acm->ctrlsize, acm->ctrl_buffer, acm->ctrl_dma); + usb_free_coherent(acm->dev, acm->ctrlsize, acm->ctrl_buffer, acm->ctrl_dma); acm_read_buffers_free(acm); if (!acm->combined_interfaces) @@ -1592,7 +1606,8 @@ static int acm_suspend(struct usb_interface *intf, pm_message_t message) if (cnt) return 0; - stop_data_traffic(acm); + acm_kill_urbs(acm); + cancel_work_sync(&acm->work); return 0; } @@ -1646,6 +1661,15 @@ static int acm_reset_resume(struct usb_interface *intf) #endif /* CONFIG_PM */ +static int acm_pre_reset(struct usb_interface *intf) +{ + struct acm *acm = usb_get_intfdata(intf); + + clear_bit(EVENT_RX_STALL, &acm->flags); + + return 0; +} + #define NOKIA_PCSUITE_ACM_INFO(x) \ USB_DEVICE_AND_INTERFACE_INFO(0x0421, x, \ USB_CLASS_COMM, USB_CDC_SUBCLASS_ACM, \ @@ -1887,6 +1911,7 @@ static struct usb_driver acm_driver = { .resume = acm_resume, .reset_resume = acm_reset_resume, #endif + .pre_reset = acm_pre_reset, .id_table = acm_ids, #ifdef CONFIG_PM .supports_autosuspend = 1, diff --git a/drivers/usb/class/cdc-acm.h b/drivers/usb/class/cdc-acm.h index 1f1eabfd8462..c980f11cdf56 100644 --- a/drivers/usb/class/cdc-acm.h +++ b/drivers/usb/class/cdc-acm.h @@ -83,6 +83,7 @@ struct acm { struct usb_device *dev; /* the corresponding usb device */ struct usb_interface *control; /* control interface */ struct usb_interface *data; /* data interface */ + unsigned in, out; /* i/o pipes */ struct tty_port port; /* our tty port data */ struct urb *ctrlurb; /* urbs */ u8 *ctrl_buffer; /* buffers of urbs */ @@ -102,6 +103,9 @@ struct acm { spinlock_t write_lock; struct mutex mutex; bool disconnected; + unsigned long flags; +# define EVENT_TTY_WAKEUP 0 +# define EVENT_RX_STALL 1 struct usb_cdc_line_coding line; /* bits, stop, parity */ struct work_struct work; /* work queue entry for line discipline waking up */ unsigned int ctrlin; /* input control lines (DCD, DSR, RI, break, overruns) */ @@ -116,7 +120,6 @@ struct acm { unsigned int ctrl_caps; /* control capabilities from the class specific header */ unsigned int susp_count; /* number of suspended interfaces */ unsigned int combined_interfaces:1; /* control and data collapsed */ - unsigned int is_int_ep:1; /* interrupt endpoints contrary to spec used */ unsigned int throttled:1; /* actually throttled */ unsigned int throttle_req:1; /* throttle requested */ u8 bInterval; diff --git a/drivers/usb/core/devices.c b/drivers/usb/core/devices.c index ef04b50e6bbb..f2987ddb1cde 100644 --- a/drivers/usb/core/devices.c +++ b/drivers/usb/core/devices.c @@ -182,14 +182,8 @@ static char *usb_dump_endpoint_descriptor(int speed, char *start, char *end, dir = usb_endpoint_dir_in(desc) ? 'I' : 'O'; - if (speed == USB_SPEED_HIGH) { - switch (usb_endpoint_maxp(desc) & (0x03 << 11)) { - case 1 << 11: - bandwidth = 2; break; - case 2 << 11: - bandwidth = 3; break; - } - } + if (speed == USB_SPEED_HIGH) + bandwidth = usb_endpoint_maxp_mult(desc); /* this isn't checking for illegal values */ switch (usb_endpoint_type(desc)) { @@ -233,7 +227,7 @@ static char *usb_dump_endpoint_descriptor(int speed, char *start, char *end, start += sprintf(start, format_endpt, desc->bEndpointAddress, dir, desc->bmAttributes, type, - (usb_endpoint_maxp(desc) & 0x07ff) * + usb_endpoint_maxp(desc) * bandwidth, interval, unit); return start; diff --git a/drivers/usb/core/endpoint.c b/drivers/usb/core/endpoint.c index b73b25bd1541..a60bc830a056 100644 --- a/drivers/usb/core/endpoint.c +++ b/drivers/usb/core/endpoint.c @@ -52,8 +52,7 @@ static ssize_t wMaxPacketSize_show(struct device *dev, struct device_attribute *attr, char *buf) { struct ep_device *ep = to_ep_device(dev); - return sprintf(buf, "%04x\n", - usb_endpoint_maxp(ep->desc) & 0x07ff); + return sprintf(buf, "%04x\n", usb_endpoint_maxp(ep->desc)); } static DEVICE_ATTR_RO(wMaxPacketSize); diff --git a/drivers/usb/core/hub.c b/drivers/usb/core/hub.c index 76e80d8657d2..71bf1c7635de 100644 --- a/drivers/usb/core/hub.c +++ b/drivers/usb/core/hub.c @@ -103,6 +103,8 @@ EXPORT_SYMBOL_GPL(ehci_cf_port_reset_rwsem); static void hub_release(struct kref *kref); static int usb_reset_and_verify_device(struct usb_device *udev); +static void hub_usb3_port_prepare_disable(struct usb_hub *hub, + struct usb_port *port_dev); static inline char *portspeed(struct usb_hub *hub, int portstatus) { @@ -901,82 +903,28 @@ static int hub_set_port_link_state(struct usb_hub *hub, int port1, } /* - * If USB 3.0 ports are placed into the Disabled state, they will no longer - * detect any device connects or disconnects. This is generally not what the - * USB core wants, since it expects a disabled port to produce a port status - * change event when a new device connects. - * - * Instead, set the link state to Disabled, wait for the link to settle into - * that state, clear any change bits, and then put the port into the RxDetect - * state. + * USB-3 does not have a similar link state as USB-2 that will avoid negotiating + * a connection with a plugged-in cable but will signal the host when the cable + * is unplugged. Disable remote wake and set link state to U3 for USB-3 devices */ -static int hub_usb3_port_disable(struct usb_hub *hub, int port1) -{ - int ret; - int total_time; - u16 portchange, portstatus; - - if (!hub_is_superspeed(hub->hdev)) - return -EINVAL; - - ret = hub_port_status(hub, port1, &portstatus, &portchange); - if (ret < 0) - return ret; - - /* - * USB controller Advanced Micro Devices, Inc. [AMD] FCH USB XHCI - * Controller [1022:7814] will have spurious result making the following - * usb 3.0 device hotplugging route to the 2.0 root hub and recognized - * as high-speed device if we set the usb 3.0 port link state to - * Disabled. Since it's already in USB_SS_PORT_LS_RX_DETECT state, we - * check the state here to avoid the bug. - */ - if ((portstatus & USB_PORT_STAT_LINK_STATE) == - USB_SS_PORT_LS_RX_DETECT) { - dev_dbg(&hub->ports[port1 - 1]->dev, - "Not disabling port; link state is RxDetect\n"); - return ret; - } - - ret = hub_set_port_link_state(hub, port1, USB_SS_PORT_LS_SS_DISABLED); - if (ret) - return ret; - - /* Wait for the link to enter the disabled state. */ - for (total_time = 0; ; total_time += HUB_DEBOUNCE_STEP) { - ret = hub_port_status(hub, port1, &portstatus, &portchange); - if (ret < 0) - return ret; - - if ((portstatus & USB_PORT_STAT_LINK_STATE) == - USB_SS_PORT_LS_SS_DISABLED) - break; - if (total_time >= HUB_DEBOUNCE_TIMEOUT) - break; - msleep(HUB_DEBOUNCE_STEP); - } - if (total_time >= HUB_DEBOUNCE_TIMEOUT) - dev_warn(&hub->ports[port1 - 1]->dev, - "Could not disable after %d ms\n", total_time); - - return hub_set_port_link_state(hub, port1, USB_SS_PORT_LS_RX_DETECT); -} - static int hub_port_disable(struct usb_hub *hub, int port1, int set_state) { struct usb_port *port_dev = hub->ports[port1 - 1]; struct usb_device *hdev = hub->hdev; int ret = 0; - if (port_dev->child && set_state) - usb_set_device_state(port_dev->child, USB_STATE_NOTATTACHED); if (!hub->error) { - if (hub_is_superspeed(hub->hdev)) - ret = hub_usb3_port_disable(hub, port1); - else + if (hub_is_superspeed(hub->hdev)) { + hub_usb3_port_prepare_disable(hub, port_dev); + ret = hub_set_port_link_state(hub, port_dev->portnum, + USB_SS_PORT_LS_U3); + } else { ret = usb_clear_port_feature(hdev, port1, USB_PORT_FEAT_ENABLE); + } } + if (port_dev->child && set_state) + usb_set_device_state(port_dev->child, USB_STATE_NOTATTACHED); if (ret && ret != -ENODEV) dev_err(&port_dev->dev, "cannot disable (err = %d)\n", ret); return ret; @@ -4142,6 +4090,26 @@ void usb_unlocked_enable_lpm(struct usb_device *udev) } EXPORT_SYMBOL_GPL(usb_unlocked_enable_lpm); +/* usb3 devices use U3 for disabled, make sure remote wakeup is disabled */ +static void hub_usb3_port_prepare_disable(struct usb_hub *hub, + struct usb_port *port_dev) +{ + struct usb_device *udev = port_dev->child; + int ret; + + if (udev && udev->port_is_suspended && udev->do_remote_wakeup) { + ret = hub_set_port_link_state(hub, port_dev->portnum, + USB_SS_PORT_LS_U0); + if (!ret) { + msleep(USB_RESUME_TIMEOUT); + ret = usb_disable_remote_wakeup(udev); + } + if (ret) + dev_warn(&udev->dev, + "Port disable: can't disable remote wake\n"); + udev->do_remote_wakeup = 0; + } +} #else /* CONFIG_PM */ @@ -4149,6 +4117,9 @@ EXPORT_SYMBOL_GPL(usb_unlocked_enable_lpm); #define hub_resume NULL #define hub_reset_resume NULL +static inline void hub_usb3_port_prepare_disable(struct usb_hub *hub, + struct usb_port *port_dev) { } + int usb_disable_lpm(struct usb_device *udev) { return 0; diff --git a/drivers/usb/core/urb.c b/drivers/usb/core/urb.c index 0be49a1e3e66..d75cb8c0f7df 100644 --- a/drivers/usb/core/urb.c +++ b/drivers/usb/core/urb.c @@ -412,11 +412,8 @@ int usb_submit_urb(struct urb *urb, gfp_t mem_flags) } /* "high bandwidth" mode, 1-3 packets/uframe? */ - if (dev->speed == USB_SPEED_HIGH) { - int mult = 1 + ((max >> 11) & 0x03); - max &= 0x07ff; - max *= mult; - } + if (dev->speed == USB_SPEED_HIGH) + max *= usb_endpoint_maxp_mult(&ep->desc); if (urb->number_of_packets <= 0) return -EINVAL; diff --git a/drivers/usb/dwc2/Makefile b/drivers/usb/dwc2/Makefile index 50fdaace1e73..b9237e1e45d0 100644 --- a/drivers/usb/dwc2/Makefile +++ b/drivers/usb/dwc2/Makefile @@ -3,6 +3,7 @@ ccflags-$(CONFIG_USB_DWC2_VERBOSE) += -DVERBOSE_DEBUG obj-$(CONFIG_USB_DWC2) += dwc2.o dwc2-y := core.o core_intr.o platform.o +dwc2-y += params.o ifneq ($(filter y,$(CONFIG_USB_DWC2_HOST) $(CONFIG_USB_DWC2_DUAL_ROLE)),) dwc2-y += hcd.o hcd_intr.o diff --git a/drivers/usb/dwc2/core.c b/drivers/usb/dwc2/core.c index 4c0fa0b17353..11d8ae9aead1 100644 --- a/drivers/usb/dwc2/core.c +++ b/drivers/usb/dwc2/core.c @@ -135,7 +135,7 @@ int dwc2_exit_hibernation(struct dwc2_hsotg *hsotg, bool restore) u32 pcgcctl; int ret = 0; - if (!hsotg->core_params->hibernation) + if (!hsotg->params.hibernation) return -ENOTSUPP; pcgcctl = dwc2_readl(hsotg->regs + PCGCTL); @@ -188,7 +188,7 @@ int dwc2_enter_hibernation(struct dwc2_hsotg *hsotg) u32 pcgcctl; int ret = 0; - if (!hsotg->core_params->hibernation) + if (!hsotg->params.hibernation) return -ENOTSUPP; /* Backup all registers */ @@ -445,7 +445,7 @@ static bool dwc2_force_mode(struct dwc2_hsotg *hsotg, bool host) * the force mode. We only need to call this once during probe if * dr_mode == OTG. */ -static void dwc2_clear_force_mode(struct dwc2_hsotg *hsotg) +void dwc2_clear_force_mode(struct dwc2_hsotg *hsotg) { u32 gusbcfg; @@ -541,7 +541,7 @@ void dwc2_dump_host_registers(struct dwc2_hsotg *hsotg) addr = hsotg->regs + HAINTMSK; dev_dbg(hsotg->dev, "HAINTMSK @0x%08lX : 0x%08X\n", (unsigned long)addr, dwc2_readl(addr)); - if (hsotg->core_params->dma_desc_enable > 0) { + if (hsotg->params.dma_desc_enable > 0) { addr = hsotg->regs + HFLBADDR; dev_dbg(hsotg->dev, "HFLBADDR @0x%08lX : 0x%08X\n", (unsigned long)addr, dwc2_readl(addr)); @@ -551,7 +551,7 @@ void dwc2_dump_host_registers(struct dwc2_hsotg *hsotg) dev_dbg(hsotg->dev, "HPRT0 @0x%08lX : 0x%08X\n", (unsigned long)addr, dwc2_readl(addr)); - for (i = 0; i < hsotg->core_params->host_channels; i++) { + for (i = 0; i < hsotg->params.host_channels; i++) { dev_dbg(hsotg->dev, "Host Channel %d Specific Registers\n", i); addr = hsotg->regs + HCCHAR(i); dev_dbg(hsotg->dev, "HCCHAR @0x%08lX : 0x%08X\n", @@ -571,7 +571,7 @@ void dwc2_dump_host_registers(struct dwc2_hsotg *hsotg) addr = hsotg->regs + HCDMA(i); dev_dbg(hsotg->dev, "HCDMA @0x%08lX : 0x%08X\n", (unsigned long)addr, dwc2_readl(addr)); - if (hsotg->core_params->dma_desc_enable > 0) { + if (hsotg->params.dma_desc_enable > 0) { addr = hsotg->regs + HCDMAB(i); dev_dbg(hsotg->dev, "HCDMAB @0x%08lX : 0x%08X\n", (unsigned long)addr, dwc2_readl(addr)); @@ -735,704 +735,13 @@ void dwc2_flush_rx_fifo(struct dwc2_hsotg *hsotg) udelay(1); } -#define DWC2_OUT_OF_BOUNDS(a, b, c) ((a) < (b) || (a) > (c)) - -/* Parameter access functions */ -void dwc2_set_param_otg_cap(struct dwc2_hsotg *hsotg, int val) -{ - int valid = 1; - - switch (val) { - case DWC2_CAP_PARAM_HNP_SRP_CAPABLE: - if (hsotg->hw_params.op_mode != GHWCFG2_OP_MODE_HNP_SRP_CAPABLE) - valid = 0; - break; - case DWC2_CAP_PARAM_SRP_ONLY_CAPABLE: - switch (hsotg->hw_params.op_mode) { - case GHWCFG2_OP_MODE_HNP_SRP_CAPABLE: - case GHWCFG2_OP_MODE_SRP_ONLY_CAPABLE: - case GHWCFG2_OP_MODE_SRP_CAPABLE_DEVICE: - case GHWCFG2_OP_MODE_SRP_CAPABLE_HOST: - break; - default: - valid = 0; - break; - } - break; - case DWC2_CAP_PARAM_NO_HNP_SRP_CAPABLE: - /* always valid */ - break; - default: - valid = 0; - break; - } - - if (!valid) { - if (val >= 0) - dev_err(hsotg->dev, - "%d invalid for otg_cap parameter. Check HW configuration.\n", - val); - switch (hsotg->hw_params.op_mode) { - case GHWCFG2_OP_MODE_HNP_SRP_CAPABLE: - val = DWC2_CAP_PARAM_HNP_SRP_CAPABLE; - break; - case GHWCFG2_OP_MODE_SRP_ONLY_CAPABLE: - case GHWCFG2_OP_MODE_SRP_CAPABLE_DEVICE: - case GHWCFG2_OP_MODE_SRP_CAPABLE_HOST: - val = DWC2_CAP_PARAM_SRP_ONLY_CAPABLE; - break; - default: - val = DWC2_CAP_PARAM_NO_HNP_SRP_CAPABLE; - break; - } - dev_dbg(hsotg->dev, "Setting otg_cap to %d\n", val); - } - - hsotg->core_params->otg_cap = val; -} - -void dwc2_set_param_dma_enable(struct dwc2_hsotg *hsotg, int val) -{ - int valid = 1; - - if (val > 0 && hsotg->hw_params.arch == GHWCFG2_SLAVE_ONLY_ARCH) - valid = 0; - if (val < 0) - valid = 0; - - if (!valid) { - if (val >= 0) - dev_err(hsotg->dev, - "%d invalid for dma_enable parameter. Check HW configuration.\n", - val); - val = hsotg->hw_params.arch != GHWCFG2_SLAVE_ONLY_ARCH; - dev_dbg(hsotg->dev, "Setting dma_enable to %d\n", val); - } - - hsotg->core_params->dma_enable = val; -} - -void dwc2_set_param_dma_desc_enable(struct dwc2_hsotg *hsotg, int val) -{ - int valid = 1; - - if (val > 0 && (hsotg->core_params->dma_enable <= 0 || - !hsotg->hw_params.dma_desc_enable)) - valid = 0; - if (val < 0) - valid = 0; - - if (!valid) { - if (val >= 0) - dev_err(hsotg->dev, - "%d invalid for dma_desc_enable parameter. Check HW configuration.\n", - val); - val = (hsotg->core_params->dma_enable > 0 && - hsotg->hw_params.dma_desc_enable); - dev_dbg(hsotg->dev, "Setting dma_desc_enable to %d\n", val); - } - - hsotg->core_params->dma_desc_enable = val; -} - -void dwc2_set_param_dma_desc_fs_enable(struct dwc2_hsotg *hsotg, int val) -{ - int valid = 1; - - if (val > 0 && (hsotg->core_params->dma_enable <= 0 || - !hsotg->hw_params.dma_desc_enable)) - valid = 0; - if (val < 0) - valid = 0; - - if (!valid) { - if (val >= 0) - dev_err(hsotg->dev, - "%d invalid for dma_desc_fs_enable parameter. Check HW configuration.\n", - val); - val = (hsotg->core_params->dma_enable > 0 && - hsotg->hw_params.dma_desc_enable); - } - - hsotg->core_params->dma_desc_fs_enable = val; - dev_dbg(hsotg->dev, "Setting dma_desc_fs_enable to %d\n", val); -} - -void dwc2_set_param_host_support_fs_ls_low_power(struct dwc2_hsotg *hsotg, - int val) -{ - if (DWC2_OUT_OF_BOUNDS(val, 0, 1)) { - if (val >= 0) { - dev_err(hsotg->dev, - "Wrong value for host_support_fs_low_power\n"); - dev_err(hsotg->dev, - "host_support_fs_low_power must be 0 or 1\n"); - } - val = 0; - dev_dbg(hsotg->dev, - "Setting host_support_fs_low_power to %d\n", val); - } - - hsotg->core_params->host_support_fs_ls_low_power = val; -} - -void dwc2_set_param_enable_dynamic_fifo(struct dwc2_hsotg *hsotg, int val) -{ - int valid = 1; - - if (val > 0 && !hsotg->hw_params.enable_dynamic_fifo) - valid = 0; - if (val < 0) - valid = 0; - - if (!valid) { - if (val >= 0) - dev_err(hsotg->dev, - "%d invalid for enable_dynamic_fifo parameter. Check HW configuration.\n", - val); - val = hsotg->hw_params.enable_dynamic_fifo; - dev_dbg(hsotg->dev, "Setting enable_dynamic_fifo to %d\n", val); - } - - hsotg->core_params->enable_dynamic_fifo = val; -} - -void dwc2_set_param_host_rx_fifo_size(struct dwc2_hsotg *hsotg, int val) -{ - int valid = 1; - - if (val < 16 || val > hsotg->hw_params.host_rx_fifo_size) - valid = 0; - - if (!valid) { - if (val >= 0) - dev_err(hsotg->dev, - "%d invalid for host_rx_fifo_size. Check HW configuration.\n", - val); - val = hsotg->hw_params.host_rx_fifo_size; - dev_dbg(hsotg->dev, "Setting host_rx_fifo_size to %d\n", val); - } - - hsotg->core_params->host_rx_fifo_size = val; -} - -void dwc2_set_param_host_nperio_tx_fifo_size(struct dwc2_hsotg *hsotg, int val) -{ - int valid = 1; - - if (val < 16 || val > hsotg->hw_params.host_nperio_tx_fifo_size) - valid = 0; - - if (!valid) { - if (val >= 0) - dev_err(hsotg->dev, - "%d invalid for host_nperio_tx_fifo_size. Check HW configuration.\n", - val); - val = hsotg->hw_params.host_nperio_tx_fifo_size; - dev_dbg(hsotg->dev, "Setting host_nperio_tx_fifo_size to %d\n", - val); - } - - hsotg->core_params->host_nperio_tx_fifo_size = val; -} - -void dwc2_set_param_host_perio_tx_fifo_size(struct dwc2_hsotg *hsotg, int val) -{ - int valid = 1; - - if (val < 16 || val > hsotg->hw_params.host_perio_tx_fifo_size) - valid = 0; - - if (!valid) { - if (val >= 0) - dev_err(hsotg->dev, - "%d invalid for host_perio_tx_fifo_size. Check HW configuration.\n", - val); - val = hsotg->hw_params.host_perio_tx_fifo_size; - dev_dbg(hsotg->dev, "Setting host_perio_tx_fifo_size to %d\n", - val); - } - - hsotg->core_params->host_perio_tx_fifo_size = val; -} - -void dwc2_set_param_max_transfer_size(struct dwc2_hsotg *hsotg, int val) -{ - int valid = 1; - - if (val < 2047 || val > hsotg->hw_params.max_transfer_size) - valid = 0; - - if (!valid) { - if (val >= 0) - dev_err(hsotg->dev, - "%d invalid for max_transfer_size. Check HW configuration.\n", - val); - val = hsotg->hw_params.max_transfer_size; - dev_dbg(hsotg->dev, "Setting max_transfer_size to %d\n", val); - } - - hsotg->core_params->max_transfer_size = val; -} - -void dwc2_set_param_max_packet_count(struct dwc2_hsotg *hsotg, int val) -{ - int valid = 1; - - if (val < 15 || val > hsotg->hw_params.max_packet_count) - valid = 0; - - if (!valid) { - if (val >= 0) - dev_err(hsotg->dev, - "%d invalid for max_packet_count. Check HW configuration.\n", - val); - val = hsotg->hw_params.max_packet_count; - dev_dbg(hsotg->dev, "Setting max_packet_count to %d\n", val); - } - - hsotg->core_params->max_packet_count = val; -} - -void dwc2_set_param_host_channels(struct dwc2_hsotg *hsotg, int val) -{ - int valid = 1; - - if (val < 1 || val > hsotg->hw_params.host_channels) - valid = 0; - - if (!valid) { - if (val >= 0) - dev_err(hsotg->dev, - "%d invalid for host_channels. Check HW configuration.\n", - val); - val = hsotg->hw_params.host_channels; - dev_dbg(hsotg->dev, "Setting host_channels to %d\n", val); - } - - hsotg->core_params->host_channels = val; -} - -void dwc2_set_param_phy_type(struct dwc2_hsotg *hsotg, int val) -{ - int valid = 0; - u32 hs_phy_type, fs_phy_type; - - if (DWC2_OUT_OF_BOUNDS(val, DWC2_PHY_TYPE_PARAM_FS, - DWC2_PHY_TYPE_PARAM_ULPI)) { - if (val >= 0) { - dev_err(hsotg->dev, "Wrong value for phy_type\n"); - dev_err(hsotg->dev, "phy_type must be 0, 1 or 2\n"); - } - - valid = 0; - } - - hs_phy_type = hsotg->hw_params.hs_phy_type; - fs_phy_type = hsotg->hw_params.fs_phy_type; - if (val == DWC2_PHY_TYPE_PARAM_UTMI && - (hs_phy_type == GHWCFG2_HS_PHY_TYPE_UTMI || - hs_phy_type == GHWCFG2_HS_PHY_TYPE_UTMI_ULPI)) - valid = 1; - else if (val == DWC2_PHY_TYPE_PARAM_ULPI && - (hs_phy_type == GHWCFG2_HS_PHY_TYPE_ULPI || - hs_phy_type == GHWCFG2_HS_PHY_TYPE_UTMI_ULPI)) - valid = 1; - else if (val == DWC2_PHY_TYPE_PARAM_FS && - fs_phy_type == GHWCFG2_FS_PHY_TYPE_DEDICATED) - valid = 1; - - if (!valid) { - if (val >= 0) - dev_err(hsotg->dev, - "%d invalid for phy_type. Check HW configuration.\n", - val); - val = DWC2_PHY_TYPE_PARAM_FS; - if (hs_phy_type != GHWCFG2_HS_PHY_TYPE_NOT_SUPPORTED) { - if (hs_phy_type == GHWCFG2_HS_PHY_TYPE_UTMI || - hs_phy_type == GHWCFG2_HS_PHY_TYPE_UTMI_ULPI) - val = DWC2_PHY_TYPE_PARAM_UTMI; - else - val = DWC2_PHY_TYPE_PARAM_ULPI; - } - dev_dbg(hsotg->dev, "Setting phy_type to %d\n", val); - } - - hsotg->core_params->phy_type = val; -} - -static int dwc2_get_param_phy_type(struct dwc2_hsotg *hsotg) -{ - return hsotg->core_params->phy_type; -} - -void dwc2_set_param_speed(struct dwc2_hsotg *hsotg, int val) -{ - int valid = 1; - - if (DWC2_OUT_OF_BOUNDS(val, 0, 1)) { - if (val >= 0) { - dev_err(hsotg->dev, "Wrong value for speed parameter\n"); - dev_err(hsotg->dev, "max_speed parameter must be 0 or 1\n"); - } - valid = 0; - } - - if (val == DWC2_SPEED_PARAM_HIGH && - dwc2_get_param_phy_type(hsotg) == DWC2_PHY_TYPE_PARAM_FS) - valid = 0; - - if (!valid) { - if (val >= 0) - dev_err(hsotg->dev, - "%d invalid for speed parameter. Check HW configuration.\n", - val); - val = dwc2_get_param_phy_type(hsotg) == DWC2_PHY_TYPE_PARAM_FS ? - DWC2_SPEED_PARAM_FULL : DWC2_SPEED_PARAM_HIGH; - dev_dbg(hsotg->dev, "Setting speed to %d\n", val); - } - - hsotg->core_params->speed = val; -} - -void dwc2_set_param_host_ls_low_power_phy_clk(struct dwc2_hsotg *hsotg, int val) -{ - int valid = 1; - - if (DWC2_OUT_OF_BOUNDS(val, DWC2_HOST_LS_LOW_POWER_PHY_CLK_PARAM_48MHZ, - DWC2_HOST_LS_LOW_POWER_PHY_CLK_PARAM_6MHZ)) { - if (val >= 0) { - dev_err(hsotg->dev, - "Wrong value for host_ls_low_power_phy_clk parameter\n"); - dev_err(hsotg->dev, - "host_ls_low_power_phy_clk must be 0 or 1\n"); - } - valid = 0; - } - - if (val == DWC2_HOST_LS_LOW_POWER_PHY_CLK_PARAM_48MHZ && - dwc2_get_param_phy_type(hsotg) == DWC2_PHY_TYPE_PARAM_FS) - valid = 0; - - if (!valid) { - if (val >= 0) - dev_err(hsotg->dev, - "%d invalid for host_ls_low_power_phy_clk. Check HW configuration.\n", - val); - val = dwc2_get_param_phy_type(hsotg) == DWC2_PHY_TYPE_PARAM_FS - ? DWC2_HOST_LS_LOW_POWER_PHY_CLK_PARAM_6MHZ - : DWC2_HOST_LS_LOW_POWER_PHY_CLK_PARAM_48MHZ; - dev_dbg(hsotg->dev, "Setting host_ls_low_power_phy_clk to %d\n", - val); - } - - hsotg->core_params->host_ls_low_power_phy_clk = val; -} - -void dwc2_set_param_phy_ulpi_ddr(struct dwc2_hsotg *hsotg, int val) -{ - if (DWC2_OUT_OF_BOUNDS(val, 0, 1)) { - if (val >= 0) { - dev_err(hsotg->dev, "Wrong value for phy_ulpi_ddr\n"); - dev_err(hsotg->dev, "phy_upli_ddr must be 0 or 1\n"); - } - val = 0; - dev_dbg(hsotg->dev, "Setting phy_upli_ddr to %d\n", val); - } - - hsotg->core_params->phy_ulpi_ddr = val; -} - -void dwc2_set_param_phy_ulpi_ext_vbus(struct dwc2_hsotg *hsotg, int val) -{ - if (DWC2_OUT_OF_BOUNDS(val, 0, 1)) { - if (val >= 0) { - dev_err(hsotg->dev, - "Wrong value for phy_ulpi_ext_vbus\n"); - dev_err(hsotg->dev, - "phy_ulpi_ext_vbus must be 0 or 1\n"); - } - val = 0; - dev_dbg(hsotg->dev, "Setting phy_ulpi_ext_vbus to %d\n", val); - } - - hsotg->core_params->phy_ulpi_ext_vbus = val; -} - -void dwc2_set_param_phy_utmi_width(struct dwc2_hsotg *hsotg, int val) -{ - int valid = 0; - - switch (hsotg->hw_params.utmi_phy_data_width) { - case GHWCFG4_UTMI_PHY_DATA_WIDTH_8: - valid = (val == 8); - break; - case GHWCFG4_UTMI_PHY_DATA_WIDTH_16: - valid = (val == 16); - break; - case GHWCFG4_UTMI_PHY_DATA_WIDTH_8_OR_16: - valid = (val == 8 || val == 16); - break; - } - - if (!valid) { - if (val >= 0) { - dev_err(hsotg->dev, - "%d invalid for phy_utmi_width. Check HW configuration.\n", - val); - } - val = (hsotg->hw_params.utmi_phy_data_width == - GHWCFG4_UTMI_PHY_DATA_WIDTH_8) ? 8 : 16; - dev_dbg(hsotg->dev, "Setting phy_utmi_width to %d\n", val); - } - - hsotg->core_params->phy_utmi_width = val; -} - -void dwc2_set_param_ulpi_fs_ls(struct dwc2_hsotg *hsotg, int val) -{ - if (DWC2_OUT_OF_BOUNDS(val, 0, 1)) { - if (val >= 0) { - dev_err(hsotg->dev, "Wrong value for ulpi_fs_ls\n"); - dev_err(hsotg->dev, "ulpi_fs_ls must be 0 or 1\n"); - } - val = 0; - dev_dbg(hsotg->dev, "Setting ulpi_fs_ls to %d\n", val); - } - - hsotg->core_params->ulpi_fs_ls = val; -} - -void dwc2_set_param_ts_dline(struct dwc2_hsotg *hsotg, int val) -{ - if (DWC2_OUT_OF_BOUNDS(val, 0, 1)) { - if (val >= 0) { - dev_err(hsotg->dev, "Wrong value for ts_dline\n"); - dev_err(hsotg->dev, "ts_dline must be 0 or 1\n"); - } - val = 0; - dev_dbg(hsotg->dev, "Setting ts_dline to %d\n", val); - } - - hsotg->core_params->ts_dline = val; -} - -void dwc2_set_param_i2c_enable(struct dwc2_hsotg *hsotg, int val) -{ - int valid = 1; - - if (DWC2_OUT_OF_BOUNDS(val, 0, 1)) { - if (val >= 0) { - dev_err(hsotg->dev, "Wrong value for i2c_enable\n"); - dev_err(hsotg->dev, "i2c_enable must be 0 or 1\n"); - } - - valid = 0; - } - - if (val == 1 && !(hsotg->hw_params.i2c_enable)) - valid = 0; - - if (!valid) { - if (val >= 0) - dev_err(hsotg->dev, - "%d invalid for i2c_enable. Check HW configuration.\n", - val); - val = hsotg->hw_params.i2c_enable; - dev_dbg(hsotg->dev, "Setting i2c_enable to %d\n", val); - } - - hsotg->core_params->i2c_enable = val; -} - -void dwc2_set_param_en_multiple_tx_fifo(struct dwc2_hsotg *hsotg, int val) -{ - int valid = 1; - - if (DWC2_OUT_OF_BOUNDS(val, 0, 1)) { - if (val >= 0) { - dev_err(hsotg->dev, - "Wrong value for en_multiple_tx_fifo,\n"); - dev_err(hsotg->dev, - "en_multiple_tx_fifo must be 0 or 1\n"); - } - valid = 0; - } - - if (val == 1 && !hsotg->hw_params.en_multiple_tx_fifo) - valid = 0; - - if (!valid) { - if (val >= 0) - dev_err(hsotg->dev, - "%d invalid for parameter en_multiple_tx_fifo. Check HW configuration.\n", - val); - val = hsotg->hw_params.en_multiple_tx_fifo; - dev_dbg(hsotg->dev, "Setting en_multiple_tx_fifo to %d\n", val); - } - - hsotg->core_params->en_multiple_tx_fifo = val; -} - -void dwc2_set_param_reload_ctl(struct dwc2_hsotg *hsotg, int val) -{ - int valid = 1; - - if (DWC2_OUT_OF_BOUNDS(val, 0, 1)) { - if (val >= 0) { - dev_err(hsotg->dev, - "'%d' invalid for parameter reload_ctl\n", val); - dev_err(hsotg->dev, "reload_ctl must be 0 or 1\n"); - } - valid = 0; - } - - if (val == 1 && hsotg->hw_params.snpsid < DWC2_CORE_REV_2_92a) - valid = 0; - - if (!valid) { - if (val >= 0) - dev_err(hsotg->dev, - "%d invalid for parameter reload_ctl. Check HW configuration.\n", - val); - val = hsotg->hw_params.snpsid >= DWC2_CORE_REV_2_92a; - dev_dbg(hsotg->dev, "Setting reload_ctl to %d\n", val); - } - - hsotg->core_params->reload_ctl = val; -} - -void dwc2_set_param_ahbcfg(struct dwc2_hsotg *hsotg, int val) -{ - if (val != -1) - hsotg->core_params->ahbcfg = val; - else - hsotg->core_params->ahbcfg = GAHBCFG_HBSTLEN_INCR4 << - GAHBCFG_HBSTLEN_SHIFT; -} - -void dwc2_set_param_otg_ver(struct dwc2_hsotg *hsotg, int val) -{ - if (DWC2_OUT_OF_BOUNDS(val, 0, 1)) { - if (val >= 0) { - dev_err(hsotg->dev, - "'%d' invalid for parameter otg_ver\n", val); - dev_err(hsotg->dev, - "otg_ver must be 0 (for OTG 1.3 support) or 1 (for OTG 2.0 support)\n"); - } - val = 0; - dev_dbg(hsotg->dev, "Setting otg_ver to %d\n", val); - } - - hsotg->core_params->otg_ver = val; -} - -static void dwc2_set_param_uframe_sched(struct dwc2_hsotg *hsotg, int val) -{ - if (DWC2_OUT_OF_BOUNDS(val, 0, 1)) { - if (val >= 0) { - dev_err(hsotg->dev, - "'%d' invalid for parameter uframe_sched\n", - val); - dev_err(hsotg->dev, "uframe_sched must be 0 or 1\n"); - } - val = 1; - dev_dbg(hsotg->dev, "Setting uframe_sched to %d\n", val); - } - - hsotg->core_params->uframe_sched = val; -} - -static void dwc2_set_param_external_id_pin_ctl(struct dwc2_hsotg *hsotg, - int val) -{ - if (DWC2_OUT_OF_BOUNDS(val, 0, 1)) { - if (val >= 0) { - dev_err(hsotg->dev, - "'%d' invalid for parameter external_id_pin_ctl\n", - val); - dev_err(hsotg->dev, "external_id_pin_ctl must be 0 or 1\n"); - } - val = 0; - dev_dbg(hsotg->dev, "Setting external_id_pin_ctl to %d\n", val); - } - - hsotg->core_params->external_id_pin_ctl = val; -} - -static void dwc2_set_param_hibernation(struct dwc2_hsotg *hsotg, - int val) -{ - if (DWC2_OUT_OF_BOUNDS(val, 0, 1)) { - if (val >= 0) { - dev_err(hsotg->dev, - "'%d' invalid for parameter hibernation\n", - val); - dev_err(hsotg->dev, "hibernation must be 0 or 1\n"); - } - val = 0; - dev_dbg(hsotg->dev, "Setting hibernation to %d\n", val); - } - - hsotg->core_params->hibernation = val; -} - -/* - * This function is called during module intialization to pass module parameters - * for the DWC_otg core. - */ -void dwc2_set_parameters(struct dwc2_hsotg *hsotg, - const struct dwc2_core_params *params) -{ - dev_dbg(hsotg->dev, "%s()\n", __func__); - - dwc2_set_param_otg_cap(hsotg, params->otg_cap); - dwc2_set_param_dma_enable(hsotg, params->dma_enable); - dwc2_set_param_dma_desc_enable(hsotg, params->dma_desc_enable); - dwc2_set_param_dma_desc_fs_enable(hsotg, params->dma_desc_fs_enable); - dwc2_set_param_host_support_fs_ls_low_power(hsotg, - params->host_support_fs_ls_low_power); - dwc2_set_param_enable_dynamic_fifo(hsotg, - params->enable_dynamic_fifo); - dwc2_set_param_host_rx_fifo_size(hsotg, - params->host_rx_fifo_size); - dwc2_set_param_host_nperio_tx_fifo_size(hsotg, - params->host_nperio_tx_fifo_size); - dwc2_set_param_host_perio_tx_fifo_size(hsotg, - params->host_perio_tx_fifo_size); - dwc2_set_param_max_transfer_size(hsotg, - params->max_transfer_size); - dwc2_set_param_max_packet_count(hsotg, - params->max_packet_count); - dwc2_set_param_host_channels(hsotg, params->host_channels); - dwc2_set_param_phy_type(hsotg, params->phy_type); - dwc2_set_param_speed(hsotg, params->speed); - dwc2_set_param_host_ls_low_power_phy_clk(hsotg, - params->host_ls_low_power_phy_clk); - dwc2_set_param_phy_ulpi_ddr(hsotg, params->phy_ulpi_ddr); - dwc2_set_param_phy_ulpi_ext_vbus(hsotg, - params->phy_ulpi_ext_vbus); - dwc2_set_param_phy_utmi_width(hsotg, params->phy_utmi_width); - dwc2_set_param_ulpi_fs_ls(hsotg, params->ulpi_fs_ls); - dwc2_set_param_ts_dline(hsotg, params->ts_dline); - dwc2_set_param_i2c_enable(hsotg, params->i2c_enable); - dwc2_set_param_en_multiple_tx_fifo(hsotg, - params->en_multiple_tx_fifo); - dwc2_set_param_reload_ctl(hsotg, params->reload_ctl); - dwc2_set_param_ahbcfg(hsotg, params->ahbcfg); - dwc2_set_param_otg_ver(hsotg, params->otg_ver); - dwc2_set_param_uframe_sched(hsotg, params->uframe_sched); - dwc2_set_param_external_id_pin_ctl(hsotg, params->external_id_pin_ctl); - dwc2_set_param_hibernation(hsotg, params->hibernation); -} - /* * Forces either host or device mode if the controller is not * currently in that mode. * * Returns true if the mode was forced. */ -static bool dwc2_force_mode_if_needed(struct dwc2_hsotg *hsotg, bool host) +bool dwc2_force_mode_if_needed(struct dwc2_hsotg *hsotg, bool host) { if (host && dwc2_is_host_mode(hsotg)) return false; @@ -1442,232 +751,9 @@ static bool dwc2_force_mode_if_needed(struct dwc2_hsotg *hsotg, bool host) return dwc2_force_mode(hsotg, host); } -/* - * Gets host hardware parameters. Forces host mode if not currently in - * host mode. Should be called immediately after a core soft reset in - * order to get the reset values. - */ -static void dwc2_get_host_hwparams(struct dwc2_hsotg *hsotg) -{ - struct dwc2_hw_params *hw = &hsotg->hw_params; - u32 gnptxfsiz; - u32 hptxfsiz; - bool forced; - - if (hsotg->dr_mode == USB_DR_MODE_PERIPHERAL) - return; - - forced = dwc2_force_mode_if_needed(hsotg, true); - - gnptxfsiz = dwc2_readl(hsotg->regs + GNPTXFSIZ); - hptxfsiz = dwc2_readl(hsotg->regs + HPTXFSIZ); - dev_dbg(hsotg->dev, "gnptxfsiz=%08x\n", gnptxfsiz); - dev_dbg(hsotg->dev, "hptxfsiz=%08x\n", hptxfsiz); - - if (forced) - dwc2_clear_force_mode(hsotg); - - hw->host_nperio_tx_fifo_size = (gnptxfsiz & FIFOSIZE_DEPTH_MASK) >> - FIFOSIZE_DEPTH_SHIFT; - hw->host_perio_tx_fifo_size = (hptxfsiz & FIFOSIZE_DEPTH_MASK) >> - FIFOSIZE_DEPTH_SHIFT; -} - -/* - * Gets device hardware parameters. Forces device mode if not - * currently in device mode. Should be called immediately after a core - * soft reset in order to get the reset values. - */ -static void dwc2_get_dev_hwparams(struct dwc2_hsotg *hsotg) -{ - struct dwc2_hw_params *hw = &hsotg->hw_params; - bool forced; - u32 gnptxfsiz; - - if (hsotg->dr_mode == USB_DR_MODE_HOST) - return; - - forced = dwc2_force_mode_if_needed(hsotg, false); - - gnptxfsiz = dwc2_readl(hsotg->regs + GNPTXFSIZ); - dev_dbg(hsotg->dev, "gnptxfsiz=%08x\n", gnptxfsiz); - - if (forced) - dwc2_clear_force_mode(hsotg); - - hw->dev_nperio_tx_fifo_size = (gnptxfsiz & FIFOSIZE_DEPTH_MASK) >> - FIFOSIZE_DEPTH_SHIFT; -} - -/** - * During device initialization, read various hardware configuration - * registers and interpret the contents. - */ -int dwc2_get_hwparams(struct dwc2_hsotg *hsotg) -{ - struct dwc2_hw_params *hw = &hsotg->hw_params; - unsigned width; - u32 hwcfg1, hwcfg2, hwcfg3, hwcfg4; - u32 grxfsiz; - - /* - * Attempt to ensure this device is really a DWC_otg Controller. - * Read and verify the GSNPSID register contents. The value should be - * 0x45f42xxx or 0x45f43xxx, which corresponds to either "OT2" or "OT3", - * as in "OTG version 2.xx" or "OTG version 3.xx". - */ - hw->snpsid = dwc2_readl(hsotg->regs + GSNPSID); - if ((hw->snpsid & 0xfffff000) != 0x4f542000 && - (hw->snpsid & 0xfffff000) != 0x4f543000) { - dev_err(hsotg->dev, "Bad value for GSNPSID: 0x%08x\n", - hw->snpsid); - return -ENODEV; - } - - dev_dbg(hsotg->dev, "Core Release: %1x.%1x%1x%1x (snpsid=%x)\n", - hw->snpsid >> 12 & 0xf, hw->snpsid >> 8 & 0xf, - hw->snpsid >> 4 & 0xf, hw->snpsid & 0xf, hw->snpsid); - - hwcfg1 = dwc2_readl(hsotg->regs + GHWCFG1); - hwcfg2 = dwc2_readl(hsotg->regs + GHWCFG2); - hwcfg3 = dwc2_readl(hsotg->regs + GHWCFG3); - hwcfg4 = dwc2_readl(hsotg->regs + GHWCFG4); - grxfsiz = dwc2_readl(hsotg->regs + GRXFSIZ); - - dev_dbg(hsotg->dev, "hwcfg1=%08x\n", hwcfg1); - dev_dbg(hsotg->dev, "hwcfg2=%08x\n", hwcfg2); - dev_dbg(hsotg->dev, "hwcfg3=%08x\n", hwcfg3); - dev_dbg(hsotg->dev, "hwcfg4=%08x\n", hwcfg4); - dev_dbg(hsotg->dev, "grxfsiz=%08x\n", grxfsiz); - - /* - * Host specific hardware parameters. Reading these parameters - * requires the controller to be in host mode. The mode will - * be forced, if necessary, to read these values. - */ - dwc2_get_host_hwparams(hsotg); - dwc2_get_dev_hwparams(hsotg); - - /* hwcfg1 */ - hw->dev_ep_dirs = hwcfg1; - - /* hwcfg2 */ - hw->op_mode = (hwcfg2 & GHWCFG2_OP_MODE_MASK) >> - GHWCFG2_OP_MODE_SHIFT; - hw->arch = (hwcfg2 & GHWCFG2_ARCHITECTURE_MASK) >> - GHWCFG2_ARCHITECTURE_SHIFT; - hw->enable_dynamic_fifo = !!(hwcfg2 & GHWCFG2_DYNAMIC_FIFO); - hw->host_channels = 1 + ((hwcfg2 & GHWCFG2_NUM_HOST_CHAN_MASK) >> - GHWCFG2_NUM_HOST_CHAN_SHIFT); - hw->hs_phy_type = (hwcfg2 & GHWCFG2_HS_PHY_TYPE_MASK) >> - GHWCFG2_HS_PHY_TYPE_SHIFT; - hw->fs_phy_type = (hwcfg2 & GHWCFG2_FS_PHY_TYPE_MASK) >> - GHWCFG2_FS_PHY_TYPE_SHIFT; - hw->num_dev_ep = (hwcfg2 & GHWCFG2_NUM_DEV_EP_MASK) >> - GHWCFG2_NUM_DEV_EP_SHIFT; - hw->nperio_tx_q_depth = - (hwcfg2 & GHWCFG2_NONPERIO_TX_Q_DEPTH_MASK) >> - GHWCFG2_NONPERIO_TX_Q_DEPTH_SHIFT << 1; - hw->host_perio_tx_q_depth = - (hwcfg2 & GHWCFG2_HOST_PERIO_TX_Q_DEPTH_MASK) >> - GHWCFG2_HOST_PERIO_TX_Q_DEPTH_SHIFT << 1; - hw->dev_token_q_depth = - (hwcfg2 & GHWCFG2_DEV_TOKEN_Q_DEPTH_MASK) >> - GHWCFG2_DEV_TOKEN_Q_DEPTH_SHIFT; - - /* hwcfg3 */ - width = (hwcfg3 & GHWCFG3_XFER_SIZE_CNTR_WIDTH_MASK) >> - GHWCFG3_XFER_SIZE_CNTR_WIDTH_SHIFT; - hw->max_transfer_size = (1 << (width + 11)) - 1; - width = (hwcfg3 & GHWCFG3_PACKET_SIZE_CNTR_WIDTH_MASK) >> - GHWCFG3_PACKET_SIZE_CNTR_WIDTH_SHIFT; - hw->max_packet_count = (1 << (width + 4)) - 1; - hw->i2c_enable = !!(hwcfg3 & GHWCFG3_I2C); - hw->total_fifo_size = (hwcfg3 & GHWCFG3_DFIFO_DEPTH_MASK) >> - GHWCFG3_DFIFO_DEPTH_SHIFT; - - /* hwcfg4 */ - hw->en_multiple_tx_fifo = !!(hwcfg4 & GHWCFG4_DED_FIFO_EN); - hw->num_dev_perio_in_ep = (hwcfg4 & GHWCFG4_NUM_DEV_PERIO_IN_EP_MASK) >> - GHWCFG4_NUM_DEV_PERIO_IN_EP_SHIFT; - hw->dma_desc_enable = !!(hwcfg4 & GHWCFG4_DESC_DMA); - hw->power_optimized = !!(hwcfg4 & GHWCFG4_POWER_OPTIMIZ); - hw->utmi_phy_data_width = (hwcfg4 & GHWCFG4_UTMI_PHY_DATA_WIDTH_MASK) >> - GHWCFG4_UTMI_PHY_DATA_WIDTH_SHIFT; - - /* fifo sizes */ - hw->host_rx_fifo_size = (grxfsiz & GRXFSIZ_DEPTH_MASK) >> - GRXFSIZ_DEPTH_SHIFT; - - dev_dbg(hsotg->dev, "Detected values from hardware:\n"); - dev_dbg(hsotg->dev, " op_mode=%d\n", - hw->op_mode); - dev_dbg(hsotg->dev, " arch=%d\n", - hw->arch); - dev_dbg(hsotg->dev, " dma_desc_enable=%d\n", - hw->dma_desc_enable); - dev_dbg(hsotg->dev, " power_optimized=%d\n", - hw->power_optimized); - dev_dbg(hsotg->dev, " i2c_enable=%d\n", - hw->i2c_enable); - dev_dbg(hsotg->dev, " hs_phy_type=%d\n", - hw->hs_phy_type); - dev_dbg(hsotg->dev, " fs_phy_type=%d\n", - hw->fs_phy_type); - dev_dbg(hsotg->dev, " utmi_phy_data_width=%d\n", - hw->utmi_phy_data_width); - dev_dbg(hsotg->dev, " num_dev_ep=%d\n", - hw->num_dev_ep); - dev_dbg(hsotg->dev, " num_dev_perio_in_ep=%d\n", - hw->num_dev_perio_in_ep); - dev_dbg(hsotg->dev, " host_channels=%d\n", - hw->host_channels); - dev_dbg(hsotg->dev, " max_transfer_size=%d\n", - hw->max_transfer_size); - dev_dbg(hsotg->dev, " max_packet_count=%d\n", - hw->max_packet_count); - dev_dbg(hsotg->dev, " nperio_tx_q_depth=0x%0x\n", - hw->nperio_tx_q_depth); - dev_dbg(hsotg->dev, " host_perio_tx_q_depth=0x%0x\n", - hw->host_perio_tx_q_depth); - dev_dbg(hsotg->dev, " dev_token_q_depth=0x%0x\n", - hw->dev_token_q_depth); - dev_dbg(hsotg->dev, " enable_dynamic_fifo=%d\n", - hw->enable_dynamic_fifo); - dev_dbg(hsotg->dev, " en_multiple_tx_fifo=%d\n", - hw->en_multiple_tx_fifo); - dev_dbg(hsotg->dev, " total_fifo_size=%d\n", - hw->total_fifo_size); - dev_dbg(hsotg->dev, " host_rx_fifo_size=%d\n", - hw->host_rx_fifo_size); - dev_dbg(hsotg->dev, " host_nperio_tx_fifo_size=%d\n", - hw->host_nperio_tx_fifo_size); - dev_dbg(hsotg->dev, " host_perio_tx_fifo_size=%d\n", - hw->host_perio_tx_fifo_size); - dev_dbg(hsotg->dev, "\n"); - - return 0; -} - -/* - * Sets all parameters to the given value. - * - * Assumes that the dwc2_core_params struct contains only integers. - */ -void dwc2_set_all_params(struct dwc2_core_params *params, int value) -{ - int *p = (int *)params; - size_t size = sizeof(*params) / sizeof(*p); - int i; - - for (i = 0; i < size; i++) - p[i] = value; -} - - u16 dwc2_get_otg_version(struct dwc2_hsotg *hsotg) { - return hsotg->core_params->otg_ver == 1 ? 0x0200 : 0x0103; + return hsotg->params.otg_ver == 1 ? 0x0200 : 0x0103; } bool dwc2_is_controller_alive(struct dwc2_hsotg *hsotg) diff --git a/drivers/usb/dwc2/core.h b/drivers/usb/dwc2/core.h index 2a21a0414b1d..9548d3e03453 100644 --- a/drivers/usb/dwc2/core.h +++ b/drivers/usb/dwc2/core.h @@ -172,6 +172,11 @@ struct dwc2_hsotg_req; * @periodic: Set if this is a periodic ep, such as Interrupt * @isochronous: Set if this is a isochronous ep * @send_zlp: Set if we need to send a zero-length packet. + * @desc_list_dma: The DMA address of descriptor chain currently in use. + * @desc_list: Pointer to descriptor DMA chain head currently in use. + * @desc_count: Count of entries within the DMA descriptor chain of EP. + * @isoc_chain_num: Number of ISOC chain currently in use - either 0 or 1. + * @next_desc: index of next free descriptor in the ISOC chain under SW control. * @total_data: The total number of data bytes done. * @fifo_size: The size of the FIFO (for periodic IN endpoints) * @fifo_load: The amount of data loaded into the FIFO (periodic IN) @@ -219,6 +224,13 @@ struct dwc2_hsotg_ep { #define TARGET_FRAME_INITIAL 0xFFFFFFFF bool frame_overrun; + dma_addr_t desc_list_dma; + struct dwc2_dma_desc *desc_list; + u8 desc_count; + + unsigned char isoc_chain_num; + unsigned int next_desc; + char name[10]; }; @@ -286,7 +298,7 @@ enum dwc2_ep0_state { * @otg_ver: OTG version supported * 0 - 1.3 (default) * 1 - 2.0 - * @dma_enable: Specifies whether to use slave or DMA mode for accessing + * @host_dma: Specifies whether to use slave or DMA mode for accessing * the data FIFOs. The driver will automatically detect the * value for this parameter if none is specified. * 0 - Slave (always available) @@ -314,7 +326,8 @@ enum dwc2_ep0_state { * @enable_dynamic_fifo: 0 - Use coreConsultant-specified FIFO size parameters * 1 - Allow dynamic FIFO sizing (default, if available) * @en_multiple_tx_fifo: Specifies whether dedicated per-endpoint transmit FIFOs - * are enabled + * are enabled for non-periodic IN endpoints in device + * mode. * @host_rx_fifo_size: Number of 4-byte words in the Rx FIFO in host mode when * dynamic FIFO sizing is enabled * 16 to 32768 @@ -417,6 +430,20 @@ enum dwc2_ep0_state { * needed. * 0 - No (default) * 1 - Yes + * @g_dma: Enables gadget dma usage (default: autodetect). + * @g_dma_desc: Enables gadget descriptor DMA (default: autodetect). + * @g_rx_fifo_size: The periodic rx fifo size for the device, in + * DWORDS from 16-32768 (default: 2048 if + * possible, otherwise autodetect). + * @g_np_tx_fifo_size: The non-periodic tx fifo size for the device in + * DWORDS from 16-32768 (default: 1024 if + * possible, otherwise autodetect). + * @g_tx_fifo_size: An array of TX fifo sizes in dedicated fifo + * mode. Each value corresponds to one EP + * starting from EP1 (max 15 values). Sizes are + * in DWORDS with possible values from from + * 16-32768 (default: 256, 256, 256, 256, 768, + * 768, 768, 768, 0, 0, 0, 0, 0, 0, 0). * * The following parameters may be specified when starting the module. These * parameters define how the DWC_otg controller should be configured. A @@ -430,11 +457,18 @@ struct dwc2_core_params { * dwc2_set_all_params! */ int otg_cap; +#define DWC2_CAP_PARAM_HNP_SRP_CAPABLE 0 +#define DWC2_CAP_PARAM_SRP_ONLY_CAPABLE 1 +#define DWC2_CAP_PARAM_NO_HNP_SRP_CAPABLE 2 + int otg_ver; - int dma_enable; int dma_desc_enable; int dma_desc_fs_enable; int speed; +#define DWC2_SPEED_PARAM_HIGH 0 +#define DWC2_SPEED_PARAM_FULL 1 +#define DWC2_SPEED_PARAM_LOW 2 + int enable_dynamic_fifo; int en_multiple_tx_fifo; int host_rx_fifo_size; @@ -444,19 +478,44 @@ struct dwc2_core_params { int max_packet_count; int host_channels; int phy_type; +#define DWC2_PHY_TYPE_PARAM_FS 0 +#define DWC2_PHY_TYPE_PARAM_UTMI 1 +#define DWC2_PHY_TYPE_PARAM_ULPI 2 + int phy_utmi_width; int phy_ulpi_ddr; int phy_ulpi_ext_vbus; +#define DWC2_PHY_ULPI_INTERNAL_VBUS 0 +#define DWC2_PHY_ULPI_EXTERNAL_VBUS 1 + int i2c_enable; int ulpi_fs_ls; int host_support_fs_ls_low_power; int host_ls_low_power_phy_clk; +#define DWC2_HOST_LS_LOW_POWER_PHY_CLK_PARAM_48MHZ 0 +#define DWC2_HOST_LS_LOW_POWER_PHY_CLK_PARAM_6MHZ 1 + int ts_dline; int reload_ctl; int ahbcfg; int uframe_sched; int external_id_pin_ctl; int hibernation; + + /* + * The following parameters are *only* set via device + * properties and cannot be set directly in this structure. + */ + + /* Host parameters */ + bool host_dma; + + /* Gadget parameters */ + bool g_dma; + bool g_dma_desc; + u16 g_rx_fifo_size; + u16 g_np_tx_fifo_size; + u32 g_tx_fifo_size[MAX_EPS_CHANNELS]; }; /** @@ -516,10 +575,9 @@ struct dwc2_hw_params { unsigned op_mode:3; unsigned arch:2; unsigned dma_desc_enable:1; - unsigned dma_desc_fs_enable:1; unsigned enable_dynamic_fifo:1; unsigned en_multiple_tx_fifo:1; - unsigned host_rx_fifo_size:16; + unsigned rx_fifo_size:16; unsigned host_nperio_tx_fifo_size:16; unsigned dev_nperio_tx_fifo_size:16; unsigned host_perio_tx_fifo_size:16; @@ -839,11 +897,13 @@ struct dwc2_hregs_backup { * @ctrl_req: Request for EP0 control packets. * @ep0_state: EP0 control transfers state * @test_mode: USB test mode requested by the host + * @setup_desc_dma: EP0 setup stage desc chain DMA address + * @setup_desc: EP0 setup stage desc chain pointer + * @ctrl_in_desc_dma: EP0 IN data phase desc chain DMA address + * @ctrl_in_desc: EP0 IN data phase desc chain pointer + * @ctrl_out_desc_dma: EP0 OUT data phase desc chain DMA address + * @ctrl_out_desc: EP0 OUT data phase desc chain pointer * @eps: The endpoints being supplied to the gadget framework - * @g_using_dma: Indicate if dma usage is enabled - * @g_rx_fifo_sz: Contains rx fifo size value - * @g_np_g_tx_fifo_sz: Contains Non-Periodic tx fifo size value - * @g_tx_fifo_sz: Contains tx fifo size value per endpoints */ struct dwc2_hsotg { struct device *dev; @@ -851,7 +911,7 @@ struct dwc2_hsotg { /** Params detected from hardware */ struct dwc2_hw_params hw_params; /** Params to actually use */ - struct dwc2_core_params *core_params; + struct dwc2_core_params params; enum usb_otg_state op_state; enum usb_dr_mode dr_mode; unsigned int hcd_enabled:1; @@ -891,6 +951,8 @@ struct dwc2_hsotg { #define DWC2_CORE_REV_2_94a 0x4f54294a #define DWC2_CORE_REV_3_00a 0x4f54300a #define DWC2_CORE_REV_3_10a 0x4f54310a +#define DWC2_FS_IOT_REV_1_00a 0x5531100a +#define DWC2_HS_IOT_REV_1_00a 0x5532100a #if IS_ENABLED(CONFIG_USB_DWC2_HOST) || IS_ENABLED(CONFIG_USB_DWC2_DUAL_ROLE) union dwc2_hcd_internal_flags { @@ -986,15 +1048,18 @@ struct dwc2_hsotg { enum dwc2_ep0_state ep0_state; u8 test_mode; + dma_addr_t setup_desc_dma[2]; + struct dwc2_dma_desc *setup_desc[2]; + dma_addr_t ctrl_in_desc_dma; + struct dwc2_dma_desc *ctrl_in_desc; + dma_addr_t ctrl_out_desc_dma; + struct dwc2_dma_desc *ctrl_out_desc; + struct usb_gadget gadget; unsigned int enabled:1; unsigned int connected:1; struct dwc2_hsotg_ep *eps_in[MAX_EPS_CHANNELS]; struct dwc2_hsotg_ep *eps_out[MAX_EPS_CHANNELS]; - u32 g_using_dma; - u32 g_rx_fifo_sz; - u32 g_np_g_tx_fifo_sz; - u32 g_tx_fifo_sz[MAX_EPS_CHANNELS]; #endif /* CONFIG_USB_DWC2_PERIPHERAL || CONFIG_USB_DWC2_DUAL_ROLE */ }; @@ -1016,6 +1081,22 @@ enum dwc2_halt_status { DWC2_HC_XFER_URB_DEQUEUE, }; +/* Core version information */ +static inline bool dwc2_is_iot(struct dwc2_hsotg *hsotg) +{ + return (hsotg->hw_params.snpsid & 0xfff00000) == 0x55300000; +} + +static inline bool dwc2_is_fs_iot(struct dwc2_hsotg *hsotg) +{ + return (hsotg->hw_params.snpsid & 0xffff0000) == 0x55310000; +} + +static inline bool dwc2_is_hs_iot(struct dwc2_hsotg *hsotg) +{ + return (hsotg->hw_params.snpsid & 0xffff0000) == 0x55320000; +} + /* * The following functions support initialization of the core driver component * and the DWC_otg controller @@ -1025,6 +1106,8 @@ extern int dwc2_core_reset_and_force_dr_mode(struct dwc2_hsotg *hsotg); extern int dwc2_enter_hibernation(struct dwc2_hsotg *hsotg); extern int dwc2_exit_hibernation(struct dwc2_hsotg *hsotg, bool restore); +bool dwc2_force_mode_if_needed(struct dwc2_hsotg *hsotg, bool host); +void dwc2_clear_force_mode(struct dwc2_hsotg *hsotg); void dwc2_force_dr_mode(struct dwc2_hsotg *hsotg); extern bool dwc2_is_controller_alive(struct dwc2_hsotg *hsotg); @@ -1044,217 +1127,16 @@ extern void dwc2_disable_global_interrupts(struct dwc2_hsotg *hcd); /* This function should be called on every hardware interrupt. */ extern irqreturn_t dwc2_handle_common_intr(int irq, void *dev); -/* OTG Core Parameters */ - -/* - * Specifies the OTG capabilities. The driver will automatically - * detect the value for this parameter if none is specified. - * 0 - HNP and SRP capable (default) - * 1 - SRP Only capable - * 2 - No HNP/SRP capable - */ -extern void dwc2_set_param_otg_cap(struct dwc2_hsotg *hsotg, int val); -#define DWC2_CAP_PARAM_HNP_SRP_CAPABLE 0 -#define DWC2_CAP_PARAM_SRP_ONLY_CAPABLE 1 -#define DWC2_CAP_PARAM_NO_HNP_SRP_CAPABLE 2 - -/* - * Specifies whether to use slave or DMA mode for accessing the data - * FIFOs. The driver will automatically detect the value for this - * parameter if none is specified. - * 0 - Slave - * 1 - DMA (default, if available) - */ -extern void dwc2_set_param_dma_enable(struct dwc2_hsotg *hsotg, int val); - -/* - * When DMA mode is enabled specifies whether to use - * address DMA or DMA Descritor mode for accessing the data - * FIFOs in device mode. The driver will automatically detect - * the value for this parameter if none is specified. - * 0 - address DMA - * 1 - DMA Descriptor(default, if available) - */ -extern void dwc2_set_param_dma_desc_enable(struct dwc2_hsotg *hsotg, int val); - -/* - * When DMA mode is enabled specifies whether to use - * address DMA or DMA Descritor mode with full speed devices - * for accessing the data FIFOs in host mode. - * 0 - address DMA - * 1 - FS DMA Descriptor(default, if available) - */ -extern void dwc2_set_param_dma_desc_fs_enable(struct dwc2_hsotg *hsotg, - int val); - -/* - * Specifies the maximum speed of operation in host and device mode. - * The actual speed depends on the speed of the attached device and - * the value of phy_type. The actual speed depends on the speed of the - * attached device. - * 0 - High Speed (default) - * 1 - Full Speed - */ -extern void dwc2_set_param_speed(struct dwc2_hsotg *hsotg, int val); -#define DWC2_SPEED_PARAM_HIGH 0 -#define DWC2_SPEED_PARAM_FULL 1 - -/* - * Specifies whether low power mode is supported when attached - * to a Full Speed or Low Speed device in host mode. - * - * 0 - Don't support low power mode (default) - * 1 - Support low power mode - */ -extern void dwc2_set_param_host_support_fs_ls_low_power( - struct dwc2_hsotg *hsotg, int val); - -/* - * Specifies the PHY clock rate in low power mode when connected to a - * Low Speed device in host mode. This parameter is applicable only if - * HOST_SUPPORT_FS_LS_LOW_POWER is enabled. If PHY_TYPE is set to FS - * then defaults to 6 MHZ otherwise 48 MHZ. - * - * 0 - 48 MHz - * 1 - 6 MHz - */ -extern void dwc2_set_param_host_ls_low_power_phy_clk(struct dwc2_hsotg *hsotg, - int val); -#define DWC2_HOST_LS_LOW_POWER_PHY_CLK_PARAM_48MHZ 0 -#define DWC2_HOST_LS_LOW_POWER_PHY_CLK_PARAM_6MHZ 1 - -/* - * 0 - Use cC FIFO size parameters - * 1 - Allow dynamic FIFO sizing (default) - */ -extern void dwc2_set_param_enable_dynamic_fifo(struct dwc2_hsotg *hsotg, - int val); - -/* - * Number of 4-byte words in the Rx FIFO in host mode when dynamic - * FIFO sizing is enabled. - * 16 to 32768 (default 1024) - */ -extern void dwc2_set_param_host_rx_fifo_size(struct dwc2_hsotg *hsotg, int val); - -/* - * Number of 4-byte words in the non-periodic Tx FIFO in host mode - * when Dynamic FIFO sizing is enabled in the core. - * 16 to 32768 (default 256) - */ -extern void dwc2_set_param_host_nperio_tx_fifo_size(struct dwc2_hsotg *hsotg, - int val); - -/* - * Number of 4-byte words in the host periodic Tx FIFO when dynamic - * FIFO sizing is enabled. - * 16 to 32768 (default 256) - */ -extern void dwc2_set_param_host_perio_tx_fifo_size(struct dwc2_hsotg *hsotg, - int val); - -/* - * The maximum transfer size supported in bytes. - * 2047 to 65,535 (default 65,535) - */ -extern void dwc2_set_param_max_transfer_size(struct dwc2_hsotg *hsotg, int val); - -/* - * The maximum number of packets in a transfer. - * 15 to 511 (default 511) - */ -extern void dwc2_set_param_max_packet_count(struct dwc2_hsotg *hsotg, int val); - -/* - * The number of host channel registers to use. - * 1 to 16 (default 11) - * Note: The FPGA configuration supports a maximum of 11 host channels. - */ -extern void dwc2_set_param_host_channels(struct dwc2_hsotg *hsotg, int val); - -/* - * Specifies the type of PHY interface to use. By default, the driver - * will automatically detect the phy_type. - * - * 0 - Full Speed PHY - * 1 - UTMI+ (default) - * 2 - ULPI - */ -extern void dwc2_set_param_phy_type(struct dwc2_hsotg *hsotg, int val); -#define DWC2_PHY_TYPE_PARAM_FS 0 -#define DWC2_PHY_TYPE_PARAM_UTMI 1 -#define DWC2_PHY_TYPE_PARAM_ULPI 2 - -/* - * Specifies the UTMI+ Data Width. This parameter is - * applicable for a PHY_TYPE of UTMI+ or ULPI. (For a ULPI - * PHY_TYPE, this parameter indicates the data width between - * the MAC and the ULPI Wrapper.) Also, this parameter is - * applicable only if the OTG_HSPHY_WIDTH cC parameter was set - * to "8 and 16 bits", meaning that the core has been - * configured to work at either data path width. - * - * 8 or 16 bits (default 16) - */ -extern void dwc2_set_param_phy_utmi_width(struct dwc2_hsotg *hsotg, int val); - -/* - * Specifies whether the ULPI operates at double or single - * data rate. This parameter is only applicable if PHY_TYPE is - * ULPI. - * - * 0 - single data rate ULPI interface with 8 bit wide data - * bus (default) - * 1 - double data rate ULPI interface with 4 bit wide data - * bus - */ -extern void dwc2_set_param_phy_ulpi_ddr(struct dwc2_hsotg *hsotg, int val); - -/* - * Specifies whether to use the internal or external supply to - * drive the vbus with a ULPI phy. - */ -extern void dwc2_set_param_phy_ulpi_ext_vbus(struct dwc2_hsotg *hsotg, int val); -#define DWC2_PHY_ULPI_INTERNAL_VBUS 0 -#define DWC2_PHY_ULPI_EXTERNAL_VBUS 1 - -/* - * Specifies whether to use the I2Cinterface for full speed PHY. This - * parameter is only applicable if PHY_TYPE is FS. - * 0 - No (default) - * 1 - Yes - */ -extern void dwc2_set_param_i2c_enable(struct dwc2_hsotg *hsotg, int val); - -extern void dwc2_set_param_ulpi_fs_ls(struct dwc2_hsotg *hsotg, int val); - -extern void dwc2_set_param_ts_dline(struct dwc2_hsotg *hsotg, int val); - -/* - * Specifies whether dedicated transmit FIFOs are - * enabled for non periodic IN endpoints in device mode - * 0 - No - * 1 - Yes - */ -extern void dwc2_set_param_en_multiple_tx_fifo(struct dwc2_hsotg *hsotg, - int val); - -extern void dwc2_set_param_reload_ctl(struct dwc2_hsotg *hsotg, int val); - -extern void dwc2_set_param_ahbcfg(struct dwc2_hsotg *hsotg, int val); - -extern void dwc2_set_param_otg_ver(struct dwc2_hsotg *hsotg, int val); - -extern void dwc2_set_parameters(struct dwc2_hsotg *hsotg, - const struct dwc2_core_params *params); - -extern void dwc2_set_all_params(struct dwc2_core_params *params, int value); - -extern int dwc2_get_hwparams(struct dwc2_hsotg *hsotg); +/* The device ID match table */ +extern const struct of_device_id dwc2_of_match_table[]; extern int dwc2_lowlevel_hw_enable(struct dwc2_hsotg *hsotg); extern int dwc2_lowlevel_hw_disable(struct dwc2_hsotg *hsotg); +/* Parameters */ +int dwc2_get_hwparams(struct dwc2_hsotg *hsotg); +int dwc2_init_params(struct dwc2_hsotg *hsotg); + /* * The following functions check the controller's OTG operation mode * capability (GHWCFG2.OTG_MODE). diff --git a/drivers/usb/dwc2/core_intr.c b/drivers/usb/dwc2/core_intr.c index d85c5c9f96c1..5b228ba6045f 100644 --- a/drivers/usb/dwc2/core_intr.c +++ b/drivers/usb/dwc2/core_intr.c @@ -159,9 +159,9 @@ static void dwc2_handle_otg_intr(struct dwc2_hsotg *hsotg) " ++OTG Interrupt: Session Request Success Status Change++\n"); gotgctl = dwc2_readl(hsotg->regs + GOTGCTL); if (gotgctl & GOTGCTL_SESREQSCS) { - if (hsotg->core_params->phy_type == + if (hsotg->params.phy_type == DWC2_PHY_TYPE_PARAM_FS - && hsotg->core_params->i2c_enable > 0) { + && hsotg->params.i2c_enable > 0) { hsotg->srp_success = 1; } else { /* Clear Session Request */ @@ -370,7 +370,7 @@ static void dwc2_handle_wakeup_detected_intr(struct dwc2_hsotg *hsotg) /* Change to L0 state */ hsotg->lx_state = DWC2_L0; } else { - if (hsotg->core_params->hibernation) + if (hsotg->params.hibernation) return; if (hsotg->lx_state != DWC2_L1) { diff --git a/drivers/usb/dwc2/debugfs.c b/drivers/usb/dwc2/debugfs.c index 55d91f24f94a..0a130916a91c 100644 --- a/drivers/usb/dwc2/debugfs.c +++ b/drivers/usb/dwc2/debugfs.c @@ -213,7 +213,7 @@ static int fifo_show(struct seq_file *seq, void *v) val = dwc2_readl(regs + GNPTXFSIZ); seq_printf(seq, "NPTXFIFO: Size %d, Start 0x%08x\n", val >> FIFOSIZE_DEPTH_SHIFT, - val & FIFOSIZE_DEPTH_MASK); + val & FIFOSIZE_STARTADDR_MASK); seq_puts(seq, "\nPeriodic TXFIFOs:\n"); diff --git a/drivers/usb/dwc2/gadget.c b/drivers/usb/dwc2/gadget.c index 24fbebc9b409..b95930f20d90 100644 --- a/drivers/usb/dwc2/gadget.c +++ b/drivers/usb/dwc2/gadget.c @@ -93,7 +93,18 @@ static void dwc2_hsotg_dump(struct dwc2_hsotg *hsotg); */ static inline bool using_dma(struct dwc2_hsotg *hsotg) { - return hsotg->g_using_dma; + return hsotg->params.g_dma; +} + +/* + * using_desc_dma - return the descriptor DMA status of the driver. + * @hsotg: The driver state. + * + * Return true if we're using descriptor DMA. + */ +static inline bool using_desc_dma(struct dwc2_hsotg *hsotg) +{ + return hsotg->params.g_dma_desc; } /** @@ -190,16 +201,17 @@ static void dwc2_hsotg_init_fifo(struct dwc2_hsotg *hsotg) unsigned int addr; int timeout; u32 val; + u32 *txfsz = hsotg->params.g_tx_fifo_size; /* Reset fifo map if not correctly cleared during previous session */ WARN_ON(hsotg->fifo_map); hsotg->fifo_map = 0; /* set RX/NPTX FIFO sizes */ - dwc2_writel(hsotg->g_rx_fifo_sz, hsotg->regs + GRXFSIZ); - dwc2_writel((hsotg->g_rx_fifo_sz << FIFOSIZE_STARTADDR_SHIFT) | - (hsotg->g_np_g_tx_fifo_sz << FIFOSIZE_DEPTH_SHIFT), - hsotg->regs + GNPTXFSIZ); + dwc2_writel(hsotg->params.g_rx_fifo_size, hsotg->regs + GRXFSIZ); + dwc2_writel((hsotg->params.g_rx_fifo_size << FIFOSIZE_STARTADDR_SHIFT) | + (hsotg->params.g_np_tx_fifo_size << FIFOSIZE_DEPTH_SHIFT), + hsotg->regs + GNPTXFSIZ); /* * arange all the rest of the TX FIFOs, as some versions of this @@ -209,7 +221,7 @@ static void dwc2_hsotg_init_fifo(struct dwc2_hsotg *hsotg) */ /* start at the end of the GNPTXFSIZ, rounded up */ - addr = hsotg->g_rx_fifo_sz + hsotg->g_np_g_tx_fifo_sz; + addr = hsotg->params.g_rx_fifo_size + hsotg->params.g_np_tx_fifo_size; /* * Configure fifos sizes from provided configuration and assign @@ -217,15 +229,16 @@ static void dwc2_hsotg_init_fifo(struct dwc2_hsotg *hsotg) * given endpoint. */ for (ep = 1; ep < MAX_EPS_CHANNELS; ep++) { - if (!hsotg->g_tx_fifo_sz[ep]) + if (!txfsz[ep]) continue; val = addr; - val |= hsotg->g_tx_fifo_sz[ep] << FIFOSIZE_DEPTH_SHIFT; - WARN_ONCE(addr + hsotg->g_tx_fifo_sz[ep] > hsotg->fifo_mem, + val |= txfsz[ep] << FIFOSIZE_DEPTH_SHIFT; + WARN_ONCE(addr + txfsz[ep] > hsotg->fifo_mem, "insufficient fifo memory"); - addr += hsotg->g_tx_fifo_sz[ep]; + addr += txfsz[ep]; dwc2_writel(val, hsotg->regs + DPTXFSIZN(ep)); + val = dwc2_readl(hsotg->regs + DPTXFSIZN(ep)); } /* @@ -303,12 +316,55 @@ static void dwc2_hsotg_unmap_dma(struct dwc2_hsotg *hsotg, struct dwc2_hsotg_req *hs_req) { struct usb_request *req = &hs_req->req; + usb_gadget_unmap_request(&hsotg->gadget, req, hs_ep->dir_in); +} - /* ignore this if we're not moving any data */ - if (hs_req->req.length == 0) - return; +/* + * dwc2_gadget_alloc_ctrl_desc_chains - allocate DMA descriptor chains + * for Control endpoint + * @hsotg: The device state. + * + * This function will allocate 4 descriptor chains for EP 0: 2 for + * Setup stage, per one for IN and OUT data/status transactions. + */ +static int dwc2_gadget_alloc_ctrl_desc_chains(struct dwc2_hsotg *hsotg) +{ + hsotg->setup_desc[0] = + dmam_alloc_coherent(hsotg->dev, + sizeof(struct dwc2_dma_desc), + &hsotg->setup_desc_dma[0], + GFP_KERNEL); + if (!hsotg->setup_desc[0]) + goto fail; + + hsotg->setup_desc[1] = + dmam_alloc_coherent(hsotg->dev, + sizeof(struct dwc2_dma_desc), + &hsotg->setup_desc_dma[1], + GFP_KERNEL); + if (!hsotg->setup_desc[1]) + goto fail; + + hsotg->ctrl_in_desc = + dmam_alloc_coherent(hsotg->dev, + sizeof(struct dwc2_dma_desc), + &hsotg->ctrl_in_desc_dma, + GFP_KERNEL); + if (!hsotg->ctrl_in_desc) + goto fail; + + hsotg->ctrl_out_desc = + dmam_alloc_coherent(hsotg->dev, + sizeof(struct dwc2_dma_desc), + &hsotg->ctrl_out_desc_dma, + GFP_KERNEL); + if (!hsotg->ctrl_out_desc) + goto fail; - usb_gadget_unmap_request(&hsotg->gadget, req, hs_ep->dir_in); + return 0; + +fail: + return -ENOMEM; } /** @@ -541,6 +597,273 @@ static u32 dwc2_hsotg_read_frameno(struct dwc2_hsotg *hsotg) } /** + * dwc2_gadget_get_chain_limit - get the maximum data payload value of the + * DMA descriptor chain prepared for specific endpoint + * @hs_ep: The endpoint + * + * Return the maximum data that can be queued in one go on a given endpoint + * depending on its descriptor chain capacity so that transfers that + * are too long can be split. + */ +static unsigned int dwc2_gadget_get_chain_limit(struct dwc2_hsotg_ep *hs_ep) +{ + int is_isoc = hs_ep->isochronous; + unsigned int maxsize; + + if (is_isoc) + maxsize = hs_ep->dir_in ? DEV_DMA_ISOC_TX_NBYTES_LIMIT : + DEV_DMA_ISOC_RX_NBYTES_LIMIT; + else + maxsize = DEV_DMA_NBYTES_LIMIT; + + /* Above size of one descriptor was chosen, multiple it */ + maxsize *= MAX_DMA_DESC_NUM_GENERIC; + + return maxsize; +} + +/* + * dwc2_gadget_get_desc_params - get DMA descriptor parameters. + * @hs_ep: The endpoint + * @mask: RX/TX bytes mask to be defined + * + * Returns maximum data payload for one descriptor after analyzing endpoint + * characteristics. + * DMA descriptor transfer bytes limit depends on EP type: + * Control out - MPS, + * Isochronous - descriptor rx/tx bytes bitfield limit, + * Control In/Bulk/Interrupt - multiple of mps. This will allow to not + * have concatenations from various descriptors within one packet. + * + * Selects corresponding mask for RX/TX bytes as well. + */ +static u32 dwc2_gadget_get_desc_params(struct dwc2_hsotg_ep *hs_ep, u32 *mask) +{ + u32 mps = hs_ep->ep.maxpacket; + int dir_in = hs_ep->dir_in; + u32 desc_size = 0; + + if (!hs_ep->index && !dir_in) { + desc_size = mps; + *mask = DEV_DMA_NBYTES_MASK; + } else if (hs_ep->isochronous) { + if (dir_in) { + desc_size = DEV_DMA_ISOC_TX_NBYTES_LIMIT; + *mask = DEV_DMA_ISOC_TX_NBYTES_MASK; + } else { + desc_size = DEV_DMA_ISOC_RX_NBYTES_LIMIT; + *mask = DEV_DMA_ISOC_RX_NBYTES_MASK; + } + } else { + desc_size = DEV_DMA_NBYTES_LIMIT; + *mask = DEV_DMA_NBYTES_MASK; + + /* Round down desc_size to be mps multiple */ + desc_size -= desc_size % mps; + } + + return desc_size; +} + +/* + * dwc2_gadget_config_nonisoc_xfer_ddma - prepare non ISOC DMA desc chain. + * @hs_ep: The endpoint + * @dma_buff: DMA address to use + * @len: Length of the transfer + * + * This function will iterate over descriptor chain and fill its entries + * with corresponding information based on transfer data. + */ +static void dwc2_gadget_config_nonisoc_xfer_ddma(struct dwc2_hsotg_ep *hs_ep, + dma_addr_t dma_buff, + unsigned int len) +{ + struct dwc2_hsotg *hsotg = hs_ep->parent; + int dir_in = hs_ep->dir_in; + struct dwc2_dma_desc *desc = hs_ep->desc_list; + u32 mps = hs_ep->ep.maxpacket; + u32 maxsize = 0; + u32 offset = 0; + u32 mask = 0; + int i; + + maxsize = dwc2_gadget_get_desc_params(hs_ep, &mask); + + hs_ep->desc_count = (len / maxsize) + + ((len % maxsize) ? 1 : 0); + if (len == 0) + hs_ep->desc_count = 1; + + for (i = 0; i < hs_ep->desc_count; ++i) { + desc->status = 0; + desc->status |= (DEV_DMA_BUFF_STS_HBUSY + << DEV_DMA_BUFF_STS_SHIFT); + + if (len > maxsize) { + if (!hs_ep->index && !dir_in) + desc->status |= (DEV_DMA_L | DEV_DMA_IOC); + + desc->status |= (maxsize << + DEV_DMA_NBYTES_SHIFT & mask); + desc->buf = dma_buff + offset; + + len -= maxsize; + offset += maxsize; + } else { + desc->status |= (DEV_DMA_L | DEV_DMA_IOC); + + if (dir_in) + desc->status |= (len % mps) ? DEV_DMA_SHORT : + ((hs_ep->send_zlp) ? DEV_DMA_SHORT : 0); + if (len > maxsize) + dev_err(hsotg->dev, "wrong len %d\n", len); + + desc->status |= + len << DEV_DMA_NBYTES_SHIFT & mask; + desc->buf = dma_buff + offset; + } + + desc->status &= ~DEV_DMA_BUFF_STS_MASK; + desc->status |= (DEV_DMA_BUFF_STS_HREADY + << DEV_DMA_BUFF_STS_SHIFT); + desc++; + } +} + +/* + * dwc2_gadget_fill_isoc_desc - fills next isochronous descriptor in chain. + * @hs_ep: The isochronous endpoint. + * @dma_buff: usb requests dma buffer. + * @len: usb request transfer length. + * + * Finds out index of first free entry either in the bottom or up half of + * descriptor chain depend on which is under SW control and not processed + * by HW. Then fills that descriptor with the data of the arrived usb request, + * frame info, sets Last and IOC bits increments next_desc. If filled + * descriptor is not the first one, removes L bit from the previous descriptor + * status. + */ +static int dwc2_gadget_fill_isoc_desc(struct dwc2_hsotg_ep *hs_ep, + dma_addr_t dma_buff, unsigned int len) +{ + struct dwc2_dma_desc *desc; + struct dwc2_hsotg *hsotg = hs_ep->parent; + u32 index; + u32 maxsize = 0; + u32 mask = 0; + + maxsize = dwc2_gadget_get_desc_params(hs_ep, &mask); + if (len > maxsize) { + dev_err(hsotg->dev, "wrong len %d\n", len); + return -EINVAL; + } + + /* + * If SW has already filled half of chain, then return and wait for + * the other chain to be processed by HW. + */ + if (hs_ep->next_desc == MAX_DMA_DESC_NUM_GENERIC / 2) + return -EBUSY; + + /* Increment frame number by interval for IN */ + if (hs_ep->dir_in) + dwc2_gadget_incr_frame_num(hs_ep); + + index = (MAX_DMA_DESC_NUM_GENERIC / 2) * hs_ep->isoc_chain_num + + hs_ep->next_desc; + + /* Sanity check of calculated index */ + if ((hs_ep->isoc_chain_num && index > MAX_DMA_DESC_NUM_GENERIC) || + (!hs_ep->isoc_chain_num && index > MAX_DMA_DESC_NUM_GENERIC / 2)) { + dev_err(hsotg->dev, "wrong index %d for iso chain\n", index); + return -EINVAL; + } + + desc = &hs_ep->desc_list[index]; + + /* Clear L bit of previous desc if more than one entries in the chain */ + if (hs_ep->next_desc) + hs_ep->desc_list[index - 1].status &= ~DEV_DMA_L; + + dev_dbg(hsotg->dev, "%s: Filling ep %d, dir %s isoc desc # %d\n", + __func__, hs_ep->index, hs_ep->dir_in ? "in" : "out", index); + + desc->status = 0; + desc->status |= (DEV_DMA_BUFF_STS_HBUSY << DEV_DMA_BUFF_STS_SHIFT); + + desc->buf = dma_buff; + desc->status |= (DEV_DMA_L | DEV_DMA_IOC | + ((len << DEV_DMA_NBYTES_SHIFT) & mask)); + + if (hs_ep->dir_in) { + desc->status |= ((hs_ep->mc << DEV_DMA_ISOC_PID_SHIFT) & + DEV_DMA_ISOC_PID_MASK) | + ((len % hs_ep->ep.maxpacket) ? + DEV_DMA_SHORT : 0) | + ((hs_ep->target_frame << + DEV_DMA_ISOC_FRNUM_SHIFT) & + DEV_DMA_ISOC_FRNUM_MASK); + } + + desc->status &= ~DEV_DMA_BUFF_STS_MASK; + desc->status |= (DEV_DMA_BUFF_STS_HREADY << DEV_DMA_BUFF_STS_SHIFT); + + /* Update index of last configured entry in the chain */ + hs_ep->next_desc++; + + return 0; +} + +/* + * dwc2_gadget_start_isoc_ddma - start isochronous transfer in DDMA + * @hs_ep: The isochronous endpoint. + * + * Prepare first descriptor chain for isochronous endpoints. Afterwards + * write DMA address to HW and enable the endpoint. + * + * Switch between descriptor chains via isoc_chain_num to give SW opportunity + * to prepare second descriptor chain while first one is being processed by HW. + */ +static void dwc2_gadget_start_isoc_ddma(struct dwc2_hsotg_ep *hs_ep) +{ + struct dwc2_hsotg *hsotg = hs_ep->parent; + struct dwc2_hsotg_req *hs_req, *treq; + int index = hs_ep->index; + int ret; + u32 dma_reg; + u32 depctl; + u32 ctrl; + + if (list_empty(&hs_ep->queue)) { + dev_dbg(hsotg->dev, "%s: No requests in queue\n", __func__); + return; + } + + list_for_each_entry_safe(hs_req, treq, &hs_ep->queue, queue) { + ret = dwc2_gadget_fill_isoc_desc(hs_ep, hs_req->req.dma, + hs_req->req.length); + if (ret) { + dev_dbg(hsotg->dev, "%s: desc chain full\n", __func__); + break; + } + } + + depctl = hs_ep->dir_in ? DIEPCTL(index) : DOEPCTL(index); + dma_reg = hs_ep->dir_in ? DIEPDMA(index) : DOEPDMA(index); + + /* write descriptor chain address to control register */ + dwc2_writel(hs_ep->desc_list_dma, hsotg->regs + dma_reg); + + ctrl = dwc2_readl(hsotg->regs + depctl); + ctrl |= DXEPCTL_EPENA | DXEPCTL_CNAK; + dwc2_writel(ctrl, hsotg->regs + depctl); + + /* Switch ISOC descriptor chain number being processed by SW*/ + hs_ep->isoc_chain_num = (hs_ep->isoc_chain_num ^ 1) & 0x1; + hs_ep->next_desc = 0; +} + +/** * dwc2_hsotg_start_req - start a USB request from an endpoint's queue * @hsotg: The controller state. * @hs_ep: The endpoint to process a request for @@ -565,6 +888,7 @@ static void dwc2_hsotg_start_req(struct dwc2_hsotg *hsotg, unsigned length; unsigned packets; unsigned maxreq; + unsigned int dma_reg; if (index != 0) { if (hs_ep->req && !continuing) { @@ -579,6 +903,7 @@ static void dwc2_hsotg_start_req(struct dwc2_hsotg *hsotg, } } + dma_reg = dir_in ? DIEPDMA(index) : DOEPDMA(index); epctrl_reg = dir_in ? DIEPCTL(index) : DOEPCTL(index); epsize_reg = dir_in ? DIEPTSIZ(index) : DOEPTSIZ(index); @@ -598,7 +923,11 @@ static void dwc2_hsotg_start_req(struct dwc2_hsotg *hsotg, dev_dbg(hsotg->dev, "ureq->length:%d ureq->actual:%d\n", ureq->length, ureq->actual); - maxreq = get_ep_limit(hs_ep); + if (!using_desc_dma(hsotg)) + maxreq = get_ep_limit(hs_ep); + else + maxreq = dwc2_gadget_get_chain_limit(hs_ep); + if (length > maxreq) { int round = maxreq % hs_ep->ep.maxpacket; @@ -650,22 +979,51 @@ static void dwc2_hsotg_start_req(struct dwc2_hsotg *hsotg, /* store the request as the current one we're doing */ hs_ep->req = hs_req; - /* write size / packets */ - dwc2_writel(epsize, hsotg->regs + epsize_reg); + if (using_desc_dma(hsotg)) { + u32 offset = 0; + u32 mps = hs_ep->ep.maxpacket; - if (using_dma(hsotg) && !continuing) { - unsigned int dma_reg; + /* Adjust length: EP0 - MPS, other OUT EPs - multiple of MPS */ + if (!dir_in) { + if (!index) + length = mps; + else if (length % mps) + length += (mps - (length % mps)); + } /* - * write DMA address to control register, buffer already - * synced by dwc2_hsotg_ep_queue(). + * If more data to send, adjust DMA for EP0 out data stage. + * ureq->dma stays unchanged, hence increment it by already + * passed passed data count before starting new transaction. */ + if (!index && hsotg->ep0_state == DWC2_EP0_DATA_OUT && + continuing) + offset = ureq->actual; - dma_reg = dir_in ? DIEPDMA(index) : DOEPDMA(index); - dwc2_writel(ureq->dma, hsotg->regs + dma_reg); + /* Fill DDMA chain entries */ + dwc2_gadget_config_nonisoc_xfer_ddma(hs_ep, ureq->dma + offset, + length); - dev_dbg(hsotg->dev, "%s: %pad => 0x%08x\n", - __func__, &ureq->dma, dma_reg); + /* write descriptor chain address to control register */ + dwc2_writel(hs_ep->desc_list_dma, hsotg->regs + dma_reg); + + dev_dbg(hsotg->dev, "%s: %08x pad => 0x%08x\n", + __func__, (u32)hs_ep->desc_list_dma, dma_reg); + } else { + /* write size / packets */ + dwc2_writel(epsize, hsotg->regs + epsize_reg); + + if (using_dma(hsotg) && !continuing && (length != 0)) { + /* + * write DMA address to control register, buffer + * already synced by dwc2_hsotg_ep_queue(). + */ + + dwc2_writel(ureq->dma, hsotg->regs + dma_reg); + + dev_dbg(hsotg->dev, "%s: %pad => 0x%08x\n", + __func__, &ureq->dma, dma_reg); + } } if (hs_ep->isochronous && hs_ep->interval == 1) { @@ -738,13 +1096,8 @@ static int dwc2_hsotg_map_dma(struct dwc2_hsotg *hsotg, struct dwc2_hsotg_ep *hs_ep, struct usb_request *req) { - struct dwc2_hsotg_req *hs_req = our_req(req); int ret; - /* if the length is zero, ignore the DMA data */ - if (hs_req->req.length == 0) - return 0; - ret = usb_gadget_map_request(&hsotg->gadget, req, hs_ep->dir_in); if (ret) goto dma_error; @@ -835,6 +1188,41 @@ static bool dwc2_gadget_target_frame_elapsed(struct dwc2_hsotg_ep *hs_ep) return false; } +/* + * dwc2_gadget_set_ep0_desc_chain - Set EP's desc chain pointers + * @hsotg: The driver state + * @hs_ep: the ep descriptor chain is for + * + * Called to update EP0 structure's pointers depend on stage of + * control transfer. + */ +static int dwc2_gadget_set_ep0_desc_chain(struct dwc2_hsotg *hsotg, + struct dwc2_hsotg_ep *hs_ep) +{ + switch (hsotg->ep0_state) { + case DWC2_EP0_SETUP: + case DWC2_EP0_STATUS_OUT: + hs_ep->desc_list = hsotg->setup_desc[0]; + hs_ep->desc_list_dma = hsotg->setup_desc_dma[0]; + break; + case DWC2_EP0_DATA_IN: + case DWC2_EP0_STATUS_IN: + hs_ep->desc_list = hsotg->ctrl_in_desc; + hs_ep->desc_list_dma = hsotg->ctrl_in_desc_dma; + break; + case DWC2_EP0_DATA_OUT: + hs_ep->desc_list = hsotg->ctrl_out_desc; + hs_ep->desc_list_dma = hsotg->ctrl_out_desc_dma; + break; + default: + dev_err(hsotg->dev, "invalid EP 0 state in queue %d\n", + hsotg->ep0_state); + return -EINVAL; + } + + return 0; +} + static int dwc2_hsotg_ep_queue(struct usb_ep *ep, struct usb_request *req, gfp_t gfp_flags) { @@ -870,10 +1258,32 @@ static int dwc2_hsotg_ep_queue(struct usb_ep *ep, struct usb_request *req, if (ret) return ret; } + /* If using descriptor DMA configure EP0 descriptor chain pointers */ + if (using_desc_dma(hs) && !hs_ep->index) { + ret = dwc2_gadget_set_ep0_desc_chain(hs, hs_ep); + if (ret) + return ret; + } first = list_empty(&hs_ep->queue); list_add_tail(&hs_req->queue, &hs_ep->queue); + /* + * Handle DDMA isochronous transfers separately - just add new entry + * to the half of descriptor chain that is not processed by HW. + * Transfer will be started once SW gets either one of NAK or + * OutTknEpDis interrupts. + */ + if (using_desc_dma(hs) && hs_ep->isochronous && + hs_ep->target_frame != TARGET_FRAME_INITIAL) { + ret = dwc2_gadget_fill_isoc_desc(hs_ep, hs_req->req.dma, + hs_req->req.length); + if (ret) + dev_dbg(hs->dev, "%s: ISO desc chain full\n", __func__); + + return 0; + } + if (first) { if (!hs_ep->isochronous) { dwc2_hsotg_start_req(hs, hs_ep, hs_req, false); @@ -1099,10 +1509,8 @@ static int dwc2_hsotg_ep_sethalt(struct usb_ep *ep, int value, bool now); */ static struct dwc2_hsotg_req *get_ep_head(struct dwc2_hsotg_ep *hs_ep) { - if (list_empty(&hs_ep->queue)) - return NULL; - - return list_first_entry(&hs_ep->queue, struct dwc2_hsotg_req, queue); + return list_first_entry_or_null(&hs_ep->queue, struct dwc2_hsotg_req, + queue); } /** @@ -1440,14 +1848,21 @@ static void dwc2_hsotg_program_zlp(struct dwc2_hsotg *hsotg, if (hs_ep->dir_in) dev_dbg(hsotg->dev, "Sending zero-length packet on ep%d\n", - index); + index); else dev_dbg(hsotg->dev, "Receiving zero-length packet on ep%d\n", - index); + index); + if (using_desc_dma(hsotg)) { + /* Not specific buffer needed for ep0 ZLP */ + dma_addr_t dma = hs_ep->desc_list_dma; - dwc2_writel(DXEPTSIZ_MC(1) | DXEPTSIZ_PKTCNT(1) | - DXEPTSIZ_XFERSIZE(0), hsotg->regs + - epsiz_reg); + dwc2_gadget_set_ep0_desc_chain(hsotg, hs_ep); + dwc2_gadget_config_nonisoc_xfer_ddma(hs_ep, dma, 0); + } else { + dwc2_writel(DXEPTSIZ_MC(1) | DXEPTSIZ_PKTCNT(1) | + DXEPTSIZ_XFERSIZE(0), hsotg->regs + + epsiz_reg); + } ctrl = dwc2_readl(hsotg->regs + epctl_reg); ctrl |= DXEPCTL_CNAK; /* clear NAK set by core */ @@ -1510,6 +1925,10 @@ static void dwc2_hsotg_complete_request(struct dwc2_hsotg *hsotg, spin_lock(&hsotg->lock); } + /* In DDMA don't need to proceed to starting of next ISOC request */ + if (using_desc_dma(hsotg) && hs_ep->isochronous) + return; + /* * Look to see if there is anything else to do. Note, the completion * of the previous request may have caused a new request to be started @@ -1521,6 +1940,115 @@ static void dwc2_hsotg_complete_request(struct dwc2_hsotg *hsotg, } } +/* + * dwc2_gadget_complete_isoc_request_ddma - complete an isoc request in DDMA + * @hs_ep: The endpoint the request was on. + * + * Get first request from the ep queue, determine descriptor on which complete + * happened. SW based on isoc_chain_num discovers which half of the descriptor + * chain is currently in use by HW, adjusts dma_address and calculates index + * of completed descriptor based on the value of DEPDMA register. Update actual + * length of request, giveback to gadget. + */ +static void dwc2_gadget_complete_isoc_request_ddma(struct dwc2_hsotg_ep *hs_ep) +{ + struct dwc2_hsotg *hsotg = hs_ep->parent; + struct dwc2_hsotg_req *hs_req; + struct usb_request *ureq; + int index; + dma_addr_t dma_addr; + u32 dma_reg; + u32 depdma; + u32 desc_sts; + u32 mask; + + hs_req = get_ep_head(hs_ep); + if (!hs_req) { + dev_warn(hsotg->dev, "%s: ISOC EP queue empty\n", __func__); + return; + } + ureq = &hs_req->req; + + dma_addr = hs_ep->desc_list_dma; + + /* + * If lower half of descriptor chain is currently use by SW, + * that means higher half is being processed by HW, so shift + * DMA address to higher half of descriptor chain. + */ + if (!hs_ep->isoc_chain_num) + dma_addr += sizeof(struct dwc2_dma_desc) * + (MAX_DMA_DESC_NUM_GENERIC / 2); + + dma_reg = hs_ep->dir_in ? DIEPDMA(hs_ep->index) : DOEPDMA(hs_ep->index); + depdma = dwc2_readl(hsotg->regs + dma_reg); + + index = (depdma - dma_addr) / sizeof(struct dwc2_dma_desc) - 1; + desc_sts = hs_ep->desc_list[index].status; + + mask = hs_ep->dir_in ? DEV_DMA_ISOC_TX_NBYTES_MASK : + DEV_DMA_ISOC_RX_NBYTES_MASK; + ureq->actual = ureq->length - + ((desc_sts & mask) >> DEV_DMA_ISOC_NBYTES_SHIFT); + + /* Adjust actual length for ISOC Out if length is not align of 4 */ + if (!hs_ep->dir_in && ureq->length & 0x3) + ureq->actual += 4 - (ureq->length & 0x3); + + dwc2_hsotg_complete_request(hsotg, hs_ep, hs_req, 0); +} + +/* + * dwc2_gadget_start_next_isoc_ddma - start next isoc request, if any. + * @hs_ep: The isochronous endpoint to be re-enabled. + * + * If ep has been disabled due to last descriptor servicing (IN endpoint) or + * BNA (OUT endpoint) check the status of other half of descriptor chain that + * was under SW control till HW was busy and restart the endpoint if needed. + */ +static void dwc2_gadget_start_next_isoc_ddma(struct dwc2_hsotg_ep *hs_ep) +{ + struct dwc2_hsotg *hsotg = hs_ep->parent; + u32 depctl; + u32 dma_reg; + u32 ctrl; + u32 dma_addr = hs_ep->desc_list_dma; + unsigned char index = hs_ep->index; + + dma_reg = hs_ep->dir_in ? DIEPDMA(index) : DOEPDMA(index); + depctl = hs_ep->dir_in ? DIEPCTL(index) : DOEPCTL(index); + + ctrl = dwc2_readl(hsotg->regs + depctl); + + /* + * EP was disabled if HW has processed last descriptor or BNA was set. + * So restart ep if SW has prepared new descriptor chain in ep_queue + * routine while HW was busy. + */ + if (!(ctrl & DXEPCTL_EPENA)) { + if (!hs_ep->next_desc) { + dev_dbg(hsotg->dev, "%s: No more ISOC requests\n", + __func__); + return; + } + + dma_addr += sizeof(struct dwc2_dma_desc) * + (MAX_DMA_DESC_NUM_GENERIC / 2) * + hs_ep->isoc_chain_num; + dwc2_writel(dma_addr, hsotg->regs + dma_reg); + + ctrl |= DXEPCTL_EPENA | DXEPCTL_CNAK; + dwc2_writel(ctrl, hsotg->regs + depctl); + + /* Switch ISOC descriptor chain number being processed by SW*/ + hs_ep->isoc_chain_num = (hs_ep->isoc_chain_num ^ 1) & 0x1; + hs_ep->next_desc = 0; + + dev_dbg(hsotg->dev, "%s: Restarted isochronous endpoint\n", + __func__); + } +} + /** * dwc2_hsotg_rx_data - receive data from the FIFO for an endpoint * @hsotg: The device state. @@ -1618,6 +2146,36 @@ static void dwc2_hsotg_change_ep_iso_parity(struct dwc2_hsotg *hsotg, dwc2_writel(ctrl, hsotg->regs + epctl_reg); } +/* + * dwc2_gadget_get_xfersize_ddma - get transferred bytes amount from desc + * @hs_ep - The endpoint on which transfer went + * + * Iterate over endpoints descriptor chain and get info on bytes remained + * in DMA descriptors after transfer has completed. Used for non isoc EPs. + */ +static unsigned int dwc2_gadget_get_xfersize_ddma(struct dwc2_hsotg_ep *hs_ep) +{ + struct dwc2_hsotg *hsotg = hs_ep->parent; + unsigned int bytes_rem = 0; + struct dwc2_dma_desc *desc = hs_ep->desc_list; + int i; + u32 status; + + if (!desc) + return -EINVAL; + + for (i = 0; i < hs_ep->desc_count; ++i) { + status = desc->status; + bytes_rem += status & DEV_DMA_NBYTES_MASK; + + if (status & DEV_DMA_STS_MASK) + dev_err(hsotg->dev, "descriptor %d closed with %x\n", + i, status & DEV_DMA_STS_MASK); + } + + return bytes_rem; +} + /** * dwc2_hsotg_handle_outdone - handle receiving OutDone/SetupDone from RXFIFO * @hsotg: The device instance @@ -1648,6 +2206,9 @@ static void dwc2_hsotg_handle_outdone(struct dwc2_hsotg *hsotg, int epnum) return; } + if (using_desc_dma(hsotg)) + size_left = dwc2_gadget_get_xfersize_ddma(hs_ep); + if (using_dma(hsotg)) { unsigned size_done; @@ -1682,7 +2243,9 @@ static void dwc2_hsotg_handle_outdone(struct dwc2_hsotg *hsotg, int epnum) */ } - if (epnum == 0 && hsotg->ep0_state == DWC2_EP0_DATA_OUT) { + /* DDMA IN status phase will start from StsPhseRcvd interrupt */ + if (!using_desc_dma(hsotg) && epnum == 0 && + hsotg->ep0_state == DWC2_EP0_DATA_OUT) { /* Move to STATUS IN */ dwc2_hsotg_ep0_zlp(hsotg, true); return; @@ -1812,17 +2375,17 @@ static u32 dwc2_hsotg_ep0_mps(unsigned int mps) * @hsotg: The driver state. * @ep: The index number of the endpoint * @mps: The maximum packet size in bytes + * @mc: The multicount value * * Configure the maximum packet size for the given endpoint, updating * the hardware control registers to reflect this. */ static void dwc2_hsotg_set_ep_maxpacket(struct dwc2_hsotg *hsotg, - unsigned int ep, unsigned int mps, unsigned int dir_in) + unsigned int ep, unsigned int mps, + unsigned int mc, unsigned int dir_in) { struct dwc2_hsotg_ep *hs_ep; void __iomem *regs = hsotg->regs; - u32 mpsval; - u32 mcval; u32 reg; hs_ep = index_to_ep(hsotg, ep, dir_in); @@ -1830,32 +2393,32 @@ static void dwc2_hsotg_set_ep_maxpacket(struct dwc2_hsotg *hsotg, return; if (ep == 0) { + u32 mps_bytes = mps; + /* EP0 is a special case */ - mpsval = dwc2_hsotg_ep0_mps(mps); - if (mpsval > 3) + mps = dwc2_hsotg_ep0_mps(mps_bytes); + if (mps > 3) goto bad_mps; - hs_ep->ep.maxpacket = mps; + hs_ep->ep.maxpacket = mps_bytes; hs_ep->mc = 1; } else { - mpsval = mps & DXEPCTL_MPS_MASK; - if (mpsval > 1024) + if (mps > 1024) goto bad_mps; - mcval = ((mps >> 11) & 0x3) + 1; - hs_ep->mc = mcval; - if (mcval > 3) + hs_ep->mc = mc; + if (mc > 3) goto bad_mps; - hs_ep->ep.maxpacket = mpsval; + hs_ep->ep.maxpacket = mps; } if (dir_in) { reg = dwc2_readl(regs + DIEPCTL(ep)); reg &= ~DXEPCTL_MPS_MASK; - reg |= mpsval; + reg |= mps; dwc2_writel(reg, regs + DIEPCTL(ep)); } else { reg = dwc2_readl(regs + DOEPCTL(ep)); reg &= ~DXEPCTL_MPS_MASK; - reg |= mpsval; + reg |= mps; dwc2_writel(reg, regs + DOEPCTL(ep)); } @@ -1954,6 +2517,13 @@ static void dwc2_hsotg_complete_in(struct dwc2_hsotg *hsotg, /* Finish ZLP handling for IN EP0 transactions */ if (hs_ep->index == 0 && hsotg->ep0_state == DWC2_EP0_STATUS_IN) { dev_dbg(hsotg->dev, "zlp packet sent\n"); + + /* + * While send zlp for DWC2_EP0_STATUS_IN EP direction was + * changed to IN. Change back to complete OUT transfer request + */ + hs_ep->dir_in = 0; + dwc2_hsotg_complete_request(hsotg, hs_ep, hs_req, 0); if (hsotg->test_mode) { int ret; @@ -1979,8 +2549,14 @@ static void dwc2_hsotg_complete_in(struct dwc2_hsotg *hsotg, * past the end of the buffer (DMA transfers are always 32bit * aligned). */ - - size_left = DXEPTSIZ_XFERSIZE_GET(epsize); + if (using_desc_dma(hsotg)) { + size_left = dwc2_gadget_get_xfersize_ddma(hs_ep); + if (size_left < 0) + dev_err(hsotg->dev, "error parsing DDMA results %d\n", + size_left); + } else { + size_left = DXEPTSIZ_XFERSIZE_GET(epsize); + } size_done = hs_ep->size_loaded - size_left; size_done += hs_ep->last_load; @@ -2128,12 +2704,28 @@ static void dwc2_gadget_handle_out_token_ep_disabled(struct dwc2_hsotg_ep *ep) struct dwc2_hsotg *hsotg = ep->parent; int dir_in = ep->dir_in; u32 doepmsk; + u32 tmp; if (dir_in || !ep->isochronous) return; + /* + * Store frame in which irq was asserted here, as + * it can change while completing request below. + */ + tmp = dwc2_hsotg_read_frameno(hsotg); + dwc2_hsotg_complete_request(hsotg, ep, get_ep_head(ep), -ENODATA); + if (using_desc_dma(hsotg)) { + if (ep->target_frame == TARGET_FRAME_INITIAL) { + /* Start first ISO Out */ + ep->target_frame = tmp; + dwc2_gadget_start_isoc_ddma(ep); + } + return; + } + if (ep->interval > 1 && ep->target_frame == TARGET_FRAME_INITIAL) { u32 dsts; @@ -2182,6 +2774,12 @@ static void dwc2_gadget_handle_nak(struct dwc2_hsotg_ep *hs_ep) if (hs_ep->target_frame == TARGET_FRAME_INITIAL) { hs_ep->target_frame = dwc2_hsotg_read_frameno(hsotg); + + if (using_desc_dma(hsotg)) { + dwc2_gadget_start_isoc_ddma(hs_ep); + return; + } + if (hs_ep->interval > 1) { u32 ctrl = dwc2_readl(hsotg->regs + DIEPCTL(hs_ep->index)); @@ -2237,8 +2835,15 @@ static void dwc2_hsotg_epint(struct dwc2_hsotg *hsotg, unsigned int idx, if (idx == 0 && (ints & (DXEPINT_SETUP | DXEPINT_SETUP_RCVD))) ints &= ~DXEPINT_XFERCOMPL; - if (ints & DXEPINT_STSPHSERCVD) - dev_dbg(hsotg->dev, "%s: StsPhseRcvd asserted\n", __func__); + /* + * Don't process XferCompl interrupt in DDMA if EP0 is still in SETUP + * stage and xfercomplete was generated without SETUP phase done + * interrupt. SW should parse received setup packet only after host's + * exit from setup phase of control transfer. + */ + if (using_desc_dma(hsotg) && idx == 0 && !hs_ep->dir_in && + hsotg->ep0_state == DWC2_EP0_SETUP && !(ints & DXEPINT_SETUP)) + ints &= ~DXEPINT_XFERCOMPL; if (ints & DXEPINT_XFERCOMPL) { dev_dbg(hsotg->dev, @@ -2246,11 +2851,17 @@ static void dwc2_hsotg_epint(struct dwc2_hsotg *hsotg, unsigned int idx, __func__, dwc2_readl(hsotg->regs + epctl_reg), dwc2_readl(hsotg->regs + epsiz_reg)); - /* - * we get OutDone from the FIFO, so we only need to look - * at completing IN requests here - */ - if (dir_in) { + /* In DDMA handle isochronous requests separately */ + if (using_desc_dma(hsotg) && hs_ep->isochronous) { + dwc2_gadget_complete_isoc_request_ddma(hs_ep); + /* Try to start next isoc request */ + dwc2_gadget_start_next_isoc_ddma(hs_ep); + } else if (dir_in) { + /* + * We get OutDone from the FIFO, so we only + * need to look at completing IN requests here + * if operating slave mode + */ if (hs_ep->isochronous && hs_ep->interval > 1) dwc2_gadget_incr_frame_num(hs_ep); @@ -2302,9 +2913,30 @@ static void dwc2_hsotg_epint(struct dwc2_hsotg *hsotg, unsigned int idx, } } + if (ints & DXEPINT_STSPHSERCVD) { + dev_dbg(hsotg->dev, "%s: StsPhseRcvd\n", __func__); + + /* Move to STATUS IN for DDMA */ + if (using_desc_dma(hsotg)) + dwc2_hsotg_ep0_zlp(hsotg, true); + } + if (ints & DXEPINT_BACK2BACKSETUP) dev_dbg(hsotg->dev, "%s: B2BSetup/INEPNakEff\n", __func__); + if (ints & DXEPINT_BNAINTR) { + dev_dbg(hsotg->dev, "%s: BNA interrupt\n", __func__); + + /* + * Try to start next isoc request, if any. + * Sometimes the endpoint remains enabled after BNA interrupt + * assertion, which is not expected, hence we can enter here + * couple of times. + */ + if (hs_ep->isochronous) + dwc2_gadget_start_next_isoc_ddma(hs_ep); + } + if (dir_in && !hs_ep->isochronous) { /* not sure if this is important, but we'll clear it anyway */ if (ints & DXEPINT_INTKNTXFEMP) { @@ -2372,6 +3004,8 @@ static void dwc2_hsotg_irq_enumdone(struct dwc2_hsotg *hsotg) case DSTS_ENUMSPD_LS: hsotg->gadget.speed = USB_SPEED_LOW; + ep0_mps = 8; + ep_mps = 8; /* * note, we don't actually support LS in this driver at the * moment, and the documentation seems to imply that it isn't @@ -2390,13 +3024,15 @@ static void dwc2_hsotg_irq_enumdone(struct dwc2_hsotg *hsotg) if (ep0_mps) { int i; /* Initialize ep0 for both in and out directions */ - dwc2_hsotg_set_ep_maxpacket(hsotg, 0, ep0_mps, 1); - dwc2_hsotg_set_ep_maxpacket(hsotg, 0, ep0_mps, 0); + dwc2_hsotg_set_ep_maxpacket(hsotg, 0, ep0_mps, 0, 1); + dwc2_hsotg_set_ep_maxpacket(hsotg, 0, ep0_mps, 0, 0); for (i = 1; i < hsotg->num_of_eps; i++) { if (hsotg->eps_in[i]) - dwc2_hsotg_set_ep_maxpacket(hsotg, i, ep_mps, 1); + dwc2_hsotg_set_ep_maxpacket(hsotg, i, ep_mps, + 0, 1); if (hsotg->eps_out[i]) - dwc2_hsotg_set_ep_maxpacket(hsotg, i, ep_mps, 0); + dwc2_hsotg_set_ep_maxpacket(hsotg, i, ep_mps, + 0, 0); } } @@ -2516,6 +3152,7 @@ void dwc2_hsotg_core_init_disconnected(struct dwc2_hsotg *hsotg, u32 intmsk; u32 val; u32 usbcfg; + u32 dcfg = 0; /* Kill any ep0 requests as controller will be reinitialized */ kill_all_requests(hsotg, hsotg->eps_out[0], -ECONNRESET); @@ -2534,10 +3171,17 @@ void dwc2_hsotg_core_init_disconnected(struct dwc2_hsotg *hsotg, usbcfg &= ~(GUSBCFG_TOUTCAL_MASK | GUSBCFG_PHYIF16 | GUSBCFG_SRPCAP | GUSBCFG_HNPCAP); - /* set the PLL on, remove the HNP/SRP and set the PHY */ - val = (hsotg->phyif == GUSBCFG_PHYIF8) ? 9 : 5; - usbcfg |= hsotg->phyif | GUSBCFG_TOUTCAL(7) | - (val << GUSBCFG_USBTRDTIM_SHIFT); + if (hsotg->params.phy_type == DWC2_PHY_TYPE_PARAM_FS && + (hsotg->params.speed == DWC2_SPEED_PARAM_FULL || + hsotg->params.speed == DWC2_SPEED_PARAM_LOW)) { + /* FS/LS Dedicated Transceiver Interface */ + usbcfg |= GUSBCFG_PHYSEL; + } else { + /* set the PLL on, remove the HNP/SRP and set the PHY */ + val = (hsotg->phyif == GUSBCFG_PHYIF8) ? 9 : 5; + usbcfg |= hsotg->phyif | GUSBCFG_TOUTCAL(7) | + (val << GUSBCFG_USBTRDTIM_SHIFT); + } dwc2_writel(usbcfg, hsotg->regs + GUSBCFG); dwc2_hsotg_init_fifo(hsotg); @@ -2545,7 +3189,23 @@ void dwc2_hsotg_core_init_disconnected(struct dwc2_hsotg *hsotg, if (!is_usb_reset) __orr32(hsotg->regs + DCTL, DCTL_SFTDISCON); - dwc2_writel(DCFG_EPMISCNT(1) | DCFG_DEVSPD_HS, hsotg->regs + DCFG); + dcfg |= DCFG_EPMISCNT(1); + + switch (hsotg->params.speed) { + case DWC2_SPEED_PARAM_LOW: + dcfg |= DCFG_DEVSPD_LS; + break; + case DWC2_SPEED_PARAM_FULL: + if (hsotg->params.phy_type == DWC2_PHY_TYPE_PARAM_FS) + dcfg |= DCFG_DEVSPD_FS48; + else + dcfg |= DCFG_DEVSPD_FS; + break; + default: + dcfg |= DCFG_DEVSPD_HS; + } + + dwc2_writel(dcfg, hsotg->regs + DCFG); /* Clear any pending OTG interrupts */ dwc2_writel(0xffffffff, hsotg->regs + GOTGINT); @@ -2556,23 +3216,31 @@ void dwc2_hsotg_core_init_disconnected(struct dwc2_hsotg *hsotg, GINTSTS_GOUTNAKEFF | GINTSTS_GINNAKEFF | GINTSTS_USBRST | GINTSTS_RESETDET | GINTSTS_ENUMDONE | GINTSTS_OTGINT | - GINTSTS_USBSUSP | GINTSTS_WKUPINT | - GINTSTS_INCOMPL_SOIN | GINTSTS_INCOMPL_SOOUT; + GINTSTS_USBSUSP | GINTSTS_WKUPINT; - if (hsotg->core_params->external_id_pin_ctl <= 0) + if (!using_desc_dma(hsotg)) + intmsk |= GINTSTS_INCOMPL_SOIN | GINTSTS_INCOMPL_SOOUT; + + if (hsotg->params.external_id_pin_ctl <= 0) intmsk |= GINTSTS_CONIDSTSCHNG; dwc2_writel(intmsk, hsotg->regs + GINTMSK); - if (using_dma(hsotg)) + if (using_dma(hsotg)) { dwc2_writel(GAHBCFG_GLBL_INTR_EN | GAHBCFG_DMA_EN | (GAHBCFG_HBSTLEN_INCR4 << GAHBCFG_HBSTLEN_SHIFT), hsotg->regs + GAHBCFG); - else + + /* Set DDMA mode support in the core if needed */ + if (using_desc_dma(hsotg)) + __orr32(hsotg->regs + DCFG, DCFG_DESCDMA_EN); + + } else { dwc2_writel(((hsotg->dedicated_fifos) ? (GAHBCFG_NP_TXF_EMP_LVL | GAHBCFG_P_TXF_EMP_LVL) : 0) | GAHBCFG_GLBL_INTR_EN, hsotg->regs + GAHBCFG); + } /* * If INTknTXFEmpMsk is enabled, it's important to disable ep interrupts @@ -2588,13 +3256,18 @@ void dwc2_hsotg_core_init_disconnected(struct dwc2_hsotg *hsotg, /* * don't need XferCompl, we get that from RXFIFO in slave mode. In - * DMA mode we may need this. + * DMA mode we may need this and StsPhseRcvd. */ - dwc2_writel((using_dma(hsotg) ? (DIEPMSK_XFERCOMPLMSK) : 0) | + dwc2_writel((using_dma(hsotg) ? (DIEPMSK_XFERCOMPLMSK | + DOEPMSK_STSPHSERCVDMSK) : 0) | DOEPMSK_EPDISBLDMSK | DOEPMSK_AHBERRMSK | - DOEPMSK_SETUPMSK | DOEPMSK_STSPHSERCVDMSK, + DOEPMSK_SETUPMSK, hsotg->regs + DOEPMSK); + /* Enable BNA interrupt for DDMA */ + if (using_desc_dma(hsotg)) + __orr32(hsotg->regs + DOEPMSK, DOEPMSK_BNAMSK); + dwc2_writel(0, hsotg->regs + DAINTMSK); dev_dbg(hsotg->dev, "EP0: DIEPCTL0=0x%08x, DOEPCTL0=0x%08x\n", @@ -2935,6 +3608,95 @@ irq_retry: return IRQ_HANDLED; } +static int dwc2_hsotg_wait_bit_set(struct dwc2_hsotg *hs_otg, u32 reg, + u32 bit, u32 timeout) +{ + u32 i; + + for (i = 0; i < timeout; i++) { + if (dwc2_readl(hs_otg->regs + reg) & bit) + return 0; + udelay(1); + } + + return -ETIMEDOUT; +} + +static void dwc2_hsotg_ep_stop_xfr(struct dwc2_hsotg *hsotg, + struct dwc2_hsotg_ep *hs_ep) +{ + u32 epctrl_reg; + u32 epint_reg; + + epctrl_reg = hs_ep->dir_in ? DIEPCTL(hs_ep->index) : + DOEPCTL(hs_ep->index); + epint_reg = hs_ep->dir_in ? DIEPINT(hs_ep->index) : + DOEPINT(hs_ep->index); + + dev_dbg(hsotg->dev, "%s: stopping transfer on %s\n", __func__, + hs_ep->name); + + if (hs_ep->dir_in) { + if (hsotg->dedicated_fifos || hs_ep->periodic) { + __orr32(hsotg->regs + epctrl_reg, DXEPCTL_SNAK); + /* Wait for Nak effect */ + if (dwc2_hsotg_wait_bit_set(hsotg, epint_reg, + DXEPINT_INEPNAKEFF, 100)) + dev_warn(hsotg->dev, + "%s: timeout DIEPINT.NAKEFF\n", + __func__); + } else { + __orr32(hsotg->regs + DCTL, DCTL_SGNPINNAK); + /* Wait for Nak effect */ + if (dwc2_hsotg_wait_bit_set(hsotg, GINTSTS, + GINTSTS_GINNAKEFF, 100)) + dev_warn(hsotg->dev, + "%s: timeout GINTSTS.GINNAKEFF\n", + __func__); + } + } else { + if (!(dwc2_readl(hsotg->regs + GINTSTS) & GINTSTS_GOUTNAKEFF)) + __orr32(hsotg->regs + DCTL, DCTL_SGOUTNAK); + + /* Wait for global nak to take effect */ + if (dwc2_hsotg_wait_bit_set(hsotg, GINTSTS, + GINTSTS_GOUTNAKEFF, 100)) + dev_warn(hsotg->dev, "%s: timeout GINTSTS.GOUTNAKEFF\n", + __func__); + } + + /* Disable ep */ + __orr32(hsotg->regs + epctrl_reg, DXEPCTL_EPDIS | DXEPCTL_SNAK); + + /* Wait for ep to be disabled */ + if (dwc2_hsotg_wait_bit_set(hsotg, epint_reg, DXEPINT_EPDISBLD, 100)) + dev_warn(hsotg->dev, + "%s: timeout DOEPCTL.EPDisable\n", __func__); + + /* Clear EPDISBLD interrupt */ + __orr32(hsotg->regs + epint_reg, DXEPINT_EPDISBLD); + + if (hs_ep->dir_in) { + unsigned short fifo_index; + + if (hsotg->dedicated_fifos || hs_ep->periodic) + fifo_index = hs_ep->fifo_index; + else + fifo_index = 0; + + /* Flush TX FIFO */ + dwc2_flush_tx_fifo(hsotg, fifo_index); + + /* Clear Global In NP NAK in Shared FIFO for non periodic ep */ + if (!hsotg->dedicated_fifos && !hs_ep->periodic) + __orr32(hsotg->regs + DCTL, DCTL_CGNPINNAK); + + } else { + /* Remove global NAKs */ + __orr32(hsotg->regs + DCTL, DCTL_CGOUTNAK); + } +} + /** * dwc2_hsotg_ep_enable - enable the given endpoint * @ep: The USB endpint to configure @@ -2952,6 +3714,7 @@ static int dwc2_hsotg_ep_enable(struct usb_ep *ep, u32 epctrl_reg; u32 epctrl; u32 mps; + u32 mc; u32 mask; unsigned int dir_in; unsigned int i, val, size; @@ -2975,6 +3738,7 @@ static int dwc2_hsotg_ep_enable(struct usb_ep *ep, } mps = usb_endpoint_maxp(desc); + mc = usb_endpoint_maxp_mult(desc); /* note, we handle this here instead of dwc2_hsotg_set_ep_maxpacket */ @@ -2984,6 +3748,18 @@ static int dwc2_hsotg_ep_enable(struct usb_ep *ep, dev_dbg(hsotg->dev, "%s: read DxEPCTL=0x%08x from 0x%08x\n", __func__, epctrl, epctrl_reg); + /* Allocate DMA descriptor chain for non-ctrl endpoints */ + if (using_desc_dma(hsotg)) { + hs_ep->desc_list = dma_alloc_coherent(hsotg->dev, + MAX_DMA_DESC_NUM_GENERIC * + sizeof(struct dwc2_dma_desc), + &hs_ep->desc_list_dma, GFP_KERNEL); + if (!hs_ep->desc_list) { + ret = -ENOMEM; + goto error2; + } + } + spin_lock_irqsave(&hsotg->lock, flags); epctrl &= ~(DXEPCTL_EPTYPE_MASK | DXEPCTL_MPS_MASK); @@ -2996,7 +3772,7 @@ static int dwc2_hsotg_ep_enable(struct usb_ep *ep, epctrl |= DXEPCTL_USBACTEP; /* update the endpoint state */ - dwc2_hsotg_set_ep_maxpacket(hsotg, hs_ep->index, mps, dir_in); + dwc2_hsotg_set_ep_maxpacket(hsotg, hs_ep->index, mps, mc, dir_in); /* default, set to non-periodic */ hs_ep->isochronous = 0; @@ -3011,6 +3787,8 @@ static int dwc2_hsotg_ep_enable(struct usb_ep *ep, hs_ep->isochronous = 1; hs_ep->interval = 1 << (desc->bInterval - 1); hs_ep->target_frame = TARGET_FRAME_INITIAL; + hs_ep->isoc_chain_num = 0; + hs_ep->next_desc = 0; if (dir_in) { hs_ep->periodic = 1; mask = dwc2_readl(hsotg->regs + DIEPMSK); @@ -3067,7 +3845,7 @@ static int dwc2_hsotg_ep_enable(struct usb_ep *ep, dev_err(hsotg->dev, "%s: No suitable fifo found\n", __func__); ret = -ENOMEM; - goto error; + goto error1; } hsotg->fifo_map |= 1 << fifo_index; epctrl |= DXEPCTL_TXFNUM(fifo_index); @@ -3089,8 +3867,17 @@ static int dwc2_hsotg_ep_enable(struct usb_ep *ep, /* enable the endpoint interrupt */ dwc2_hsotg_ctrl_epint(hsotg, index, dir_in, 1); -error: +error1: spin_unlock_irqrestore(&hsotg->lock, flags); + +error2: + if (ret && using_desc_dma(hsotg) && hs_ep->desc_list) { + dma_free_coherent(hsotg->dev, MAX_DMA_DESC_NUM_GENERIC * + sizeof(struct dwc2_dma_desc), + hs_ep->desc_list, hs_ep->desc_list_dma); + hs_ep->desc_list = NULL; + } + return ret; } @@ -3115,11 +3902,23 @@ static int dwc2_hsotg_ep_disable(struct usb_ep *ep) return -EINVAL; } + /* Remove DMA memory allocated for non-control Endpoints */ + if (using_desc_dma(hsotg)) { + dma_free_coherent(hsotg->dev, MAX_DMA_DESC_NUM_GENERIC * + sizeof(struct dwc2_dma_desc), + hs_ep->desc_list, hs_ep->desc_list_dma); + hs_ep->desc_list = NULL; + } + epctrl_reg = dir_in ? DIEPCTL(index) : DOEPCTL(index); spin_lock_irqsave(&hsotg->lock, flags); ctrl = dwc2_readl(hsotg->regs + epctrl_reg); + + if (ctrl & DXEPCTL_EPENA) + dwc2_hsotg_ep_stop_xfr(hsotg, hs_ep); + ctrl &= ~DXEPCTL_EPENA; ctrl &= ~DXEPCTL_USBACTEP; ctrl |= DXEPCTL_SNAK; @@ -3158,77 +3957,6 @@ static bool on_list(struct dwc2_hsotg_ep *ep, struct dwc2_hsotg_req *test) return false; } -static int dwc2_hsotg_wait_bit_set(struct dwc2_hsotg *hs_otg, u32 reg, - u32 bit, u32 timeout) -{ - u32 i; - - for (i = 0; i < timeout; i++) { - if (dwc2_readl(hs_otg->regs + reg) & bit) - return 0; - udelay(1); - } - - return -ETIMEDOUT; -} - -static void dwc2_hsotg_ep_stop_xfr(struct dwc2_hsotg *hsotg, - struct dwc2_hsotg_ep *hs_ep) -{ - u32 epctrl_reg; - u32 epint_reg; - - epctrl_reg = hs_ep->dir_in ? DIEPCTL(hs_ep->index) : - DOEPCTL(hs_ep->index); - epint_reg = hs_ep->dir_in ? DIEPINT(hs_ep->index) : - DOEPINT(hs_ep->index); - - dev_dbg(hsotg->dev, "%s: stopping transfer on %s\n", __func__, - hs_ep->name); - if (hs_ep->dir_in) { - __orr32(hsotg->regs + epctrl_reg, DXEPCTL_SNAK); - /* Wait for Nak effect */ - if (dwc2_hsotg_wait_bit_set(hsotg, epint_reg, - DXEPINT_INEPNAKEFF, 100)) - dev_warn(hsotg->dev, - "%s: timeout DIEPINT.NAKEFF\n", __func__); - } else { - if (!(dwc2_readl(hsotg->regs + GINTSTS) & GINTSTS_GOUTNAKEFF)) - __orr32(hsotg->regs + DCTL, DCTL_SGOUTNAK); - - /* Wait for global nak to take effect */ - if (dwc2_hsotg_wait_bit_set(hsotg, GINTSTS, - GINTSTS_GOUTNAKEFF, 100)) - dev_warn(hsotg->dev, - "%s: timeout GINTSTS.GOUTNAKEFF\n", __func__); - } - - /* Disable ep */ - __orr32(hsotg->regs + epctrl_reg, DXEPCTL_EPDIS | DXEPCTL_SNAK); - - /* Wait for ep to be disabled */ - if (dwc2_hsotg_wait_bit_set(hsotg, epint_reg, DXEPINT_EPDISBLD, 100)) - dev_warn(hsotg->dev, - "%s: timeout DOEPCTL.EPDisable\n", __func__); - - if (hs_ep->dir_in) { - if (hsotg->dedicated_fifos) { - dwc2_writel(GRSTCTL_TXFNUM(hs_ep->fifo_index) | - GRSTCTL_TXFFLSH, hsotg->regs + GRSTCTL); - /* Wait for fifo flush */ - if (dwc2_hsotg_wait_bit_set(hsotg, GRSTCTL, - GRSTCTL_TXFFLSH, 100)) - dev_warn(hsotg->dev, - "%s: timeout flushing fifos\n", - __func__); - } - /* TODO: Flush shared tx fifo */ - } else { - /* Remove global NAKs */ - __bic32(hsotg->regs + DCTL, DCTL_SGOUTNAK); - } -} - /** * dwc2_hsotg_ep_dequeue - dequeue given endpoint * @ep: The endpoint to dequeue. @@ -3665,14 +4393,21 @@ static void dwc2_hsotg_initep(struct dwc2_hsotg *hsotg, hs_ep->parent = hsotg; hs_ep->ep.name = hs_ep->name; - usb_ep_set_maxpacket_limit(&hs_ep->ep, epnum ? 1024 : EP0_MPS_LIMIT); + + if (hsotg->params.speed == DWC2_SPEED_PARAM_LOW) + usb_ep_set_maxpacket_limit(&hs_ep->ep, 8); + else + usb_ep_set_maxpacket_limit(&hs_ep->ep, + epnum ? 1024 : EP0_MPS_LIMIT); hs_ep->ep.ops = &dwc2_hsotg_ep_ops; if (epnum == 0) { hs_ep->ep.caps.type_control = true; } else { - hs_ep->ep.caps.type_iso = true; - hs_ep->ep.caps.type_bulk = true; + if (hsotg->params.speed != DWC2_SPEED_PARAM_LOW) { + hs_ep->ep.caps.type_iso = true; + hs_ep->ep.caps.type_bulk = true; + } hs_ep->ep.caps.type_int = true; } @@ -3802,51 +4537,6 @@ static void dwc2_hsotg_dump(struct dwc2_hsotg *hsotg) #endif } -#ifdef CONFIG_OF -static void dwc2_hsotg_of_probe(struct dwc2_hsotg *hsotg) -{ - struct device_node *np = hsotg->dev->of_node; - u32 len = 0; - u32 i = 0; - - /* Enable dma if requested in device tree */ - hsotg->g_using_dma = of_property_read_bool(np, "g-use-dma"); - - /* - * Register TX periodic fifo size per endpoint. - * EP0 is excluded since it has no fifo configuration. - */ - if (!of_find_property(np, "g-tx-fifo-size", &len)) - goto rx_fifo; - - len /= sizeof(u32); - - /* Read tx fifo sizes other than ep0 */ - if (of_property_read_u32_array(np, "g-tx-fifo-size", - &hsotg->g_tx_fifo_sz[1], len)) - goto rx_fifo; - - /* Add ep0 */ - len++; - - /* Make remaining TX fifos unavailable */ - if (len < MAX_EPS_CHANNELS) { - for (i = len; i < MAX_EPS_CHANNELS; i++) - hsotg->g_tx_fifo_sz[i] = 0; - } - -rx_fifo: - /* Register RX fifo size */ - of_property_read_u32(np, "g-rx-fifo-size", &hsotg->g_rx_fifo_sz); - - /* Register NPTX fifo size */ - of_property_read_u32(np, "g-np-tx-fifo-size", - &hsotg->g_np_g_tx_fifo_sz); -} -#else -static inline void dwc2_hsotg_of_probe(struct dwc2_hsotg *hsotg) { } -#endif - /** * dwc2_gadget_init - init function for gadget * @dwc2: The data structure for the DWC2 driver. @@ -3857,33 +4547,11 @@ int dwc2_gadget_init(struct dwc2_hsotg *hsotg, int irq) struct device *dev = hsotg->dev; int epnum; int ret; - int i; - u32 p_tx_fifo[] = DWC2_G_P_LEGACY_TX_FIFO_SIZE; - - /* Initialize to legacy fifo configuration values */ - hsotg->g_rx_fifo_sz = 2048; - hsotg->g_np_g_tx_fifo_sz = 1024; - memcpy(&hsotg->g_tx_fifo_sz[1], p_tx_fifo, sizeof(p_tx_fifo)); - /* Device tree specific probe */ - dwc2_hsotg_of_probe(hsotg); - - /* Check against largest possible value. */ - if (hsotg->g_np_g_tx_fifo_sz > - hsotg->hw_params.dev_nperio_tx_fifo_size) { - dev_warn(dev, "Specified GNPTXFDEP=%d > %d\n", - hsotg->g_np_g_tx_fifo_sz, - hsotg->hw_params.dev_nperio_tx_fifo_size); - hsotg->g_np_g_tx_fifo_sz = - hsotg->hw_params.dev_nperio_tx_fifo_size; - } /* Dump fifo information */ dev_dbg(dev, "NonPeriodic TXFIFO size: %d\n", - hsotg->g_np_g_tx_fifo_sz); - dev_dbg(dev, "RXFIFO size: %d\n", hsotg->g_rx_fifo_sz); - for (i = 0; i < MAX_EPS_CHANNELS; i++) - dev_dbg(dev, "Periodic TXFIFO%2d size: %d\n", i, - hsotg->g_tx_fifo_sz[i]); + hsotg->params.g_np_tx_fifo_size); + dev_dbg(dev, "RXFIFO size: %d\n", hsotg->params.g_rx_fifo_size); hsotg->gadget.max_speed = USB_SPEED_HIGH; hsotg->gadget.ops = &dwc2_hsotg_gadget_ops; @@ -3909,6 +4577,12 @@ int dwc2_gadget_init(struct dwc2_hsotg *hsotg, int irq) if (!hsotg->ep0_buff) return -ENOMEM; + if (using_desc_dma(hsotg)) { + ret = dwc2_gadget_alloc_ctrl_desc_chains(hsotg); + if (ret < 0) + return ret; + } + ret = devm_request_irq(hsotg->dev, irq, dwc2_hsotg_irq, IRQF_SHARED, dev_name(hsotg->dev), hsotg); if (ret < 0) { diff --git a/drivers/usb/dwc2/hcd.c b/drivers/usb/dwc2/hcd.c index df5a06578005..911c3b36ac06 100644 --- a/drivers/usb/dwc2/hcd.c +++ b/drivers/usb/dwc2/hcd.c @@ -79,9 +79,9 @@ static void dwc2_enable_common_interrupts(struct dwc2_hsotg *hsotg) /* Enable the interrupts in the GINTMSK */ intmsk = GINTSTS_MODEMIS | GINTSTS_OTGINT; - if (hsotg->core_params->dma_enable <= 0) + if (hsotg->params.host_dma <= 0) intmsk |= GINTSTS_RXFLVL; - if (hsotg->core_params->external_id_pin_ctl <= 0) + if (hsotg->params.external_id_pin_ctl <= 0) intmsk |= GINTSTS_CONIDSTSCHNG; intmsk |= GINTSTS_WKUPINT | GINTSTS_USBSUSP | @@ -100,8 +100,8 @@ static void dwc2_init_fs_ls_pclk_sel(struct dwc2_hsotg *hsotg) if ((hsotg->hw_params.hs_phy_type == GHWCFG2_HS_PHY_TYPE_ULPI && hsotg->hw_params.fs_phy_type == GHWCFG2_FS_PHY_TYPE_DEDICATED && - hsotg->core_params->ulpi_fs_ls > 0) || - hsotg->core_params->phy_type == DWC2_PHY_TYPE_PARAM_FS) { + hsotg->params.ulpi_fs_ls > 0) || + hsotg->params.phy_type == DWC2_PHY_TYPE_PARAM_FS) { /* Full speed PHY */ val = HCFG_FSLSPCLKSEL_48_MHZ; } else { @@ -152,7 +152,7 @@ static int dwc2_fs_phy_init(struct dwc2_hsotg *hsotg, bool select_phy) if (dwc2_is_host_mode(hsotg)) dwc2_init_fs_ls_pclk_sel(hsotg); - if (hsotg->core_params->i2c_enable > 0) { + if (hsotg->params.i2c_enable > 0) { dev_dbg(hsotg->dev, "FS PHY enabling I2C\n"); /* Program GUSBCFG.OtgUtmiFsSel to I2C */ @@ -189,20 +189,20 @@ static int dwc2_hs_phy_init(struct dwc2_hsotg *hsotg, bool select_phy) * so only program the first time. Do a soft reset immediately after * setting phyif. */ - switch (hsotg->core_params->phy_type) { + switch (hsotg->params.phy_type) { case DWC2_PHY_TYPE_PARAM_ULPI: /* ULPI interface */ dev_dbg(hsotg->dev, "HS ULPI PHY selected\n"); usbcfg |= GUSBCFG_ULPI_UTMI_SEL; usbcfg &= ~(GUSBCFG_PHYIF16 | GUSBCFG_DDRSEL); - if (hsotg->core_params->phy_ulpi_ddr > 0) + if (hsotg->params.phy_ulpi_ddr > 0) usbcfg |= GUSBCFG_DDRSEL; break; case DWC2_PHY_TYPE_PARAM_UTMI: /* UTMI+ interface */ dev_dbg(hsotg->dev, "HS UTMI+ PHY selected\n"); usbcfg &= ~(GUSBCFG_ULPI_UTMI_SEL | GUSBCFG_PHYIF16); - if (hsotg->core_params->phy_utmi_width == 16) + if (hsotg->params.phy_utmi_width == 16) usbcfg |= GUSBCFG_PHYIF16; break; default: @@ -230,9 +230,10 @@ static int dwc2_phy_init(struct dwc2_hsotg *hsotg, bool select_phy) u32 usbcfg; int retval = 0; - if (hsotg->core_params->speed == DWC2_SPEED_PARAM_FULL && - hsotg->core_params->phy_type == DWC2_PHY_TYPE_PARAM_FS) { - /* If FS mode with FS PHY */ + if ((hsotg->params.speed == DWC2_SPEED_PARAM_FULL || + hsotg->params.speed == DWC2_SPEED_PARAM_LOW) && + hsotg->params.phy_type == DWC2_PHY_TYPE_PARAM_FS) { + /* If FS/LS mode with FS/LS PHY */ retval = dwc2_fs_phy_init(hsotg, select_phy); if (retval) return retval; @@ -245,7 +246,7 @@ static int dwc2_phy_init(struct dwc2_hsotg *hsotg, bool select_phy) if (hsotg->hw_params.hs_phy_type == GHWCFG2_HS_PHY_TYPE_ULPI && hsotg->hw_params.fs_phy_type == GHWCFG2_FS_PHY_TYPE_DEDICATED && - hsotg->core_params->ulpi_fs_ls > 0) { + hsotg->params.ulpi_fs_ls > 0) { dev_dbg(hsotg->dev, "Setting ULPI FSLS\n"); usbcfg = dwc2_readl(hsotg->regs + GUSBCFG); usbcfg |= GUSBCFG_ULPI_FS_LS; @@ -272,9 +273,9 @@ static int dwc2_gahbcfg_init(struct dwc2_hsotg *hsotg) case GHWCFG2_INT_DMA_ARCH: dev_dbg(hsotg->dev, "Internal DMA Mode\n"); - if (hsotg->core_params->ahbcfg != -1) { + if (hsotg->params.ahbcfg != -1) { ahbcfg &= GAHBCFG_CTRL_MASK; - ahbcfg |= hsotg->core_params->ahbcfg & + ahbcfg |= hsotg->params.ahbcfg & ~GAHBCFG_CTRL_MASK; } break; @@ -285,21 +286,21 @@ static int dwc2_gahbcfg_init(struct dwc2_hsotg *hsotg) break; } - dev_dbg(hsotg->dev, "dma_enable:%d dma_desc_enable:%d\n", - hsotg->core_params->dma_enable, - hsotg->core_params->dma_desc_enable); + dev_dbg(hsotg->dev, "host_dma:%d dma_desc_enable:%d\n", + hsotg->params.host_dma, + hsotg->params.dma_desc_enable); - if (hsotg->core_params->dma_enable > 0) { - if (hsotg->core_params->dma_desc_enable > 0) + if (hsotg->params.host_dma > 0) { + if (hsotg->params.dma_desc_enable > 0) dev_dbg(hsotg->dev, "Using Descriptor DMA mode\n"); else dev_dbg(hsotg->dev, "Using Buffer DMA mode\n"); } else { dev_dbg(hsotg->dev, "Using Slave mode\n"); - hsotg->core_params->dma_desc_enable = 0; + hsotg->params.dma_desc_enable = 0; } - if (hsotg->core_params->dma_enable > 0) + if (hsotg->params.host_dma > 0) ahbcfg |= GAHBCFG_DMA_EN; dwc2_writel(ahbcfg, hsotg->regs + GAHBCFG); @@ -316,10 +317,10 @@ static void dwc2_gusbcfg_init(struct dwc2_hsotg *hsotg) switch (hsotg->hw_params.op_mode) { case GHWCFG2_OP_MODE_HNP_SRP_CAPABLE: - if (hsotg->core_params->otg_cap == + if (hsotg->params.otg_cap == DWC2_CAP_PARAM_HNP_SRP_CAPABLE) usbcfg |= GUSBCFG_HNPCAP; - if (hsotg->core_params->otg_cap != + if (hsotg->params.otg_cap != DWC2_CAP_PARAM_NO_HNP_SRP_CAPABLE) usbcfg |= GUSBCFG_SRPCAP; break; @@ -327,7 +328,7 @@ static void dwc2_gusbcfg_init(struct dwc2_hsotg *hsotg) case GHWCFG2_OP_MODE_SRP_ONLY_CAPABLE: case GHWCFG2_OP_MODE_SRP_CAPABLE_DEVICE: case GHWCFG2_OP_MODE_SRP_CAPABLE_HOST: - if (hsotg->core_params->otg_cap != + if (hsotg->params.otg_cap != DWC2_CAP_PARAM_NO_HNP_SRP_CAPABLE) usbcfg |= GUSBCFG_SRPCAP; break; @@ -390,7 +391,7 @@ static void dwc2_disable_host_interrupts(struct dwc2_hsotg *hsotg) */ static void dwc2_calculate_dynamic_fifo(struct dwc2_hsotg *hsotg) { - struct dwc2_core_params *params = hsotg->core_params; + struct dwc2_core_params *params = &hsotg->params; struct dwc2_hw_params *hw = &hsotg->hw_params; u32 rxfsiz, nptxfsiz, ptxfsiz, total_fifo_size; @@ -449,7 +450,7 @@ static void dwc2_calculate_dynamic_fifo(struct dwc2_hsotg *hsotg) static void dwc2_config_fifos(struct dwc2_hsotg *hsotg) { - struct dwc2_core_params *params = hsotg->core_params; + struct dwc2_core_params *params = &hsotg->params; u32 nptxfsiz, hptxfsiz, dfifocfg, grxfsiz; if (!params->enable_dynamic_fifo) @@ -490,7 +491,7 @@ static void dwc2_config_fifos(struct dwc2_hsotg *hsotg) dev_dbg(hsotg->dev, "new hptxfsiz=%08x\n", dwc2_readl(hsotg->regs + HPTXFSIZ)); - if (hsotg->core_params->en_multiple_tx_fifo > 0 && + if (hsotg->params.en_multiple_tx_fifo > 0 && hsotg->hw_params.snpsid <= DWC2_CORE_REV_2_94a) { /* * Global DFIFOCFG calculation for Host mode - @@ -598,7 +599,7 @@ static void dwc2_dump_channel_info(struct dwc2_hsotg *hsotg, struct dwc2_host_chan *chan) { #ifdef VERBOSE_DEBUG - int num_channels = hsotg->core_params->host_channels; + int num_channels = hsotg->params.host_channels; struct dwc2_qh *qh; u32 hcchar; u32 hcsplt; @@ -648,6 +649,35 @@ static void dwc2_dump_channel_info(struct dwc2_hsotg *hsotg, #endif /* VERBOSE_DEBUG */ } +static int _dwc2_hcd_start(struct usb_hcd *hcd); + +static void dwc2_host_start(struct dwc2_hsotg *hsotg) +{ + struct usb_hcd *hcd = dwc2_hsotg_to_hcd(hsotg); + + hcd->self.is_b_host = dwc2_hcd_is_b_host(hsotg); + _dwc2_hcd_start(hcd); +} + +static void dwc2_host_disconnect(struct dwc2_hsotg *hsotg) +{ + struct usb_hcd *hcd = dwc2_hsotg_to_hcd(hsotg); + + hcd->self.is_b_host = 0; +} + +static void dwc2_host_hub_info(struct dwc2_hsotg *hsotg, void *context, + int *hub_addr, int *hub_port) +{ + struct urb *urb = context; + + if (urb->dev->tt) + *hub_addr = urb->dev->tt->hub->devnum; + else + *hub_addr = 0; + *hub_port = urb->dev->ttport; +} + /* * ========================================================================= * Low Level Host Channel Access Functions @@ -741,7 +771,7 @@ static void dwc2_hc_enable_dma_ints(struct dwc2_hsotg *hsotg, * For Descriptor DMA mode core halts the channel on AHB error. * Interrupt is not required. */ - if (hsotg->core_params->dma_desc_enable <= 0) { + if (hsotg->params.dma_desc_enable <= 0) { if (dbg_hc(chan)) dev_vdbg(hsotg->dev, "desc DMA disabled\n"); hcintmsk |= HCINTMSK_AHBERR; @@ -774,7 +804,7 @@ static void dwc2_hc_enable_ints(struct dwc2_hsotg *hsotg, { u32 intmsk; - if (hsotg->core_params->dma_enable > 0) { + if (hsotg->params.host_dma > 0) { if (dbg_hc(chan)) dev_vdbg(hsotg->dev, "DMA enabled\n"); dwc2_hc_enable_dma_ints(hsotg, chan); @@ -994,7 +1024,7 @@ void dwc2_hc_halt(struct dwc2_hsotg *hsotg, struct dwc2_host_chan *chan, /* No need to set the bit in DDMA for disabling the channel */ /* TODO check it everywhere channel is disabled */ - if (hsotg->core_params->dma_desc_enable <= 0) { + if (hsotg->params.dma_desc_enable <= 0) { if (dbg_hc(chan)) dev_vdbg(hsotg->dev, "desc DMA disabled\n"); hcchar |= HCCHAR_CHENA; @@ -1004,7 +1034,7 @@ void dwc2_hc_halt(struct dwc2_hsotg *hsotg, struct dwc2_host_chan *chan, } hcchar |= HCCHAR_CHDIS; - if (hsotg->core_params->dma_enable <= 0) { + if (hsotg->params.host_dma <= 0) { if (dbg_hc(chan)) dev_vdbg(hsotg->dev, "DMA not enabled\n"); hcchar |= HCCHAR_CHENA; @@ -1143,7 +1173,7 @@ static void dwc2_hc_set_even_odd_frame(struct dwc2_hsotg *hsotg, fifo_space = (dwc2_readl(hsotg->regs + HPTXSTS) & TXSTS_FSPCAVAIL_MASK) >> TXSTS_FSPCAVAIL_SHIFT; bytes_in_fifo = sizeof(u32) * - (hsotg->core_params->host_perio_tx_fifo_size - + (hsotg->params.host_perio_tx_fifo_size - fifo_space); /* @@ -1339,8 +1369,8 @@ static void dwc2_hc_do_ping(struct dwc2_hsotg *hsotg, static void dwc2_hc_start_transfer(struct dwc2_hsotg *hsotg, struct dwc2_host_chan *chan) { - u32 max_hc_xfer_size = hsotg->core_params->max_transfer_size; - u16 max_hc_pkt_count = hsotg->core_params->max_packet_count; + u32 max_hc_xfer_size = hsotg->params.max_transfer_size; + u16 max_hc_pkt_count = hsotg->params.max_packet_count; u32 hcchar; u32 hctsiz = 0; u16 num_packets; @@ -1350,7 +1380,7 @@ static void dwc2_hc_start_transfer(struct dwc2_hsotg *hsotg, dev_vdbg(hsotg->dev, "%s()\n", __func__); if (chan->do_ping) { - if (hsotg->core_params->dma_enable <= 0) { + if (hsotg->params.host_dma <= 0) { if (dbg_hc(chan)) dev_vdbg(hsotg->dev, "ping, no DMA\n"); dwc2_hc_do_ping(hsotg, chan); @@ -1478,7 +1508,7 @@ static void dwc2_hc_start_transfer(struct dwc2_hsotg *hsotg, TSIZ_SC_MC_PID_SHIFT); } - if (hsotg->core_params->dma_enable > 0) { + if (hsotg->params.host_dma > 0) { dwc2_writel((u32)chan->xfer_dma, hsotg->regs + HCDMA(chan->hc_num)); if (dbg_hc(chan)) @@ -1521,7 +1551,7 @@ static void dwc2_hc_start_transfer(struct dwc2_hsotg *hsotg, chan->xfer_started = 1; chan->requests++; - if (hsotg->core_params->dma_enable <= 0 && + if (hsotg->params.host_dma <= 0 && !chan->ep_is_in && chan->xfer_len > 0) /* Load OUT packet into the appropriate Tx FIFO */ dwc2_hc_write_packet(hsotg, chan); @@ -1799,12 +1829,12 @@ void dwc2_hcd_start(struct dwc2_hsotg *hsotg) /* Must be called with interrupt disabled and spinlock held */ static void dwc2_hcd_cleanup_channels(struct dwc2_hsotg *hsotg) { - int num_channels = hsotg->core_params->host_channels; + int num_channels = hsotg->params.host_channels; struct dwc2_host_chan *channel; u32 hcchar; int i; - if (hsotg->core_params->dma_enable <= 0) { + if (hsotg->params.host_dma <= 0) { /* Flush out any channel requests in slave mode */ for (i = 0; i < num_channels; i++) { channel = hsotg->hc_ptr_array[i]; @@ -1840,9 +1870,9 @@ static void dwc2_hcd_cleanup_channels(struct dwc2_hsotg *hsotg) channel->qh = NULL; } /* All channels have been freed, mark them available */ - if (hsotg->core_params->uframe_sched > 0) { + if (hsotg->params.uframe_sched > 0) { hsotg->available_host_channels = - hsotg->core_params->host_channels; + hsotg->params.host_channels; } else { hsotg->non_periodic_channels = 0; hsotg->periodic_channels = 0; @@ -2077,7 +2107,7 @@ static int dwc2_hcd_urb_dequeue(struct dwc2_hsotg *hsotg, * Free the QTD and clean up the associated QH. Leave the QH in the * schedule if it has any remaining QTDs. */ - if (hsotg->core_params->dma_desc_enable <= 0) { + if (hsotg->params.dma_desc_enable <= 0) { u8 in_process = urb_qtd->in_process; dwc2_hcd_qtd_unlink_and_free(hsotg, urb_qtd, qh); @@ -2185,13 +2215,13 @@ static int dwc2_core_init(struct dwc2_hsotg *hsotg, bool initial_setup) /* Set ULPI External VBUS bit if needed */ usbcfg &= ~GUSBCFG_ULPI_EXT_VBUS_DRV; - if (hsotg->core_params->phy_ulpi_ext_vbus == + if (hsotg->params.phy_ulpi_ext_vbus == DWC2_PHY_ULPI_EXTERNAL_VBUS) usbcfg |= GUSBCFG_ULPI_EXT_VBUS_DRV; /* Set external TS Dline pulsing bit if needed */ usbcfg &= ~GUSBCFG_TERMSELDLPULSE; - if (hsotg->core_params->ts_dline > 0) + if (hsotg->params.ts_dline > 0) usbcfg |= GUSBCFG_TERMSELDLPULSE; dwc2_writel(usbcfg, hsotg->regs + GUSBCFG); @@ -2230,10 +2260,10 @@ static int dwc2_core_init(struct dwc2_hsotg *hsotg, bool initial_setup) /* Program the GOTGCTL register */ otgctl = dwc2_readl(hsotg->regs + GOTGCTL); otgctl &= ~GOTGCTL_OTGVER; - if (hsotg->core_params->otg_ver > 0) + if (hsotg->params.otg_ver > 0) otgctl |= GOTGCTL_OTGVER; dwc2_writel(otgctl, hsotg->regs + GOTGCTL); - dev_dbg(hsotg->dev, "OTG VER PARAM: %d\n", hsotg->core_params->otg_ver); + dev_dbg(hsotg->dev, "OTG VER PARAM: %d\n", hsotg->params.otg_ver); /* Clear the SRP success bit for FS-I2c */ hsotg->srp_success = 0; @@ -2277,7 +2307,8 @@ static void dwc2_core_host_init(struct dwc2_hsotg *hsotg) /* Initialize Host Configuration Register */ dwc2_init_fs_ls_pclk_sel(hsotg); - if (hsotg->core_params->speed == DWC2_SPEED_PARAM_FULL) { + if (hsotg->params.speed == DWC2_SPEED_PARAM_FULL || + hsotg->params.speed == DWC2_SPEED_PARAM_LOW) { hcfg = dwc2_readl(hsotg->regs + HCFG); hcfg |= HCFG_FSLSSUPP; dwc2_writel(hcfg, hsotg->regs + HCFG); @@ -2288,13 +2319,13 @@ static void dwc2_core_host_init(struct dwc2_hsotg *hsotg) * runtime. This bit needs to be programmed during initial configuration * and its value must not be changed during runtime. */ - if (hsotg->core_params->reload_ctl > 0) { + if (hsotg->params.reload_ctl > 0) { hfir = dwc2_readl(hsotg->regs + HFIR); hfir |= HFIR_RLDCTRL; dwc2_writel(hfir, hsotg->regs + HFIR); } - if (hsotg->core_params->dma_desc_enable > 0) { + if (hsotg->params.dma_desc_enable > 0) { u32 op_mode = hsotg->hw_params.op_mode; if (hsotg->hw_params.snpsid < DWC2_CORE_REV_2_90a || @@ -2306,7 +2337,7 @@ static void dwc2_core_host_init(struct dwc2_hsotg *hsotg) "Hardware does not support descriptor DMA mode -\n"); dev_err(hsotg->dev, "falling back to buffer DMA mode.\n"); - hsotg->core_params->dma_desc_enable = 0; + hsotg->params.dma_desc_enable = 0; } else { hcfg = dwc2_readl(hsotg->regs + HCFG); hcfg |= HCFG_DESCDMA; @@ -2332,12 +2363,12 @@ static void dwc2_core_host_init(struct dwc2_hsotg *hsotg) otgctl &= ~GOTGCTL_HSTSETHNPEN; dwc2_writel(otgctl, hsotg->regs + GOTGCTL); - if (hsotg->core_params->dma_desc_enable <= 0) { + if (hsotg->params.dma_desc_enable <= 0) { int num_channels, i; u32 hcchar; /* Flush out any leftover queued requests */ - num_channels = hsotg->core_params->host_channels; + num_channels = hsotg->params.host_channels; for (i = 0; i < num_channels; i++) { hcchar = dwc2_readl(hsotg->regs + HCCHAR(i)); hcchar &= ~HCCHAR_CHENA; @@ -2399,9 +2430,9 @@ static void dwc2_hcd_reinit(struct dwc2_hsotg *hsotg) hsotg->flags.d32 = 0; hsotg->non_periodic_qh_ptr = &hsotg->non_periodic_sched_active; - if (hsotg->core_params->uframe_sched > 0) { + if (hsotg->params.uframe_sched > 0) { hsotg->available_host_channels = - hsotg->core_params->host_channels; + hsotg->params.host_channels; } else { hsotg->non_periodic_channels = 0; hsotg->periodic_channels = 0; @@ -2415,7 +2446,7 @@ static void dwc2_hcd_reinit(struct dwc2_hsotg *hsotg) hc_list_entry) list_del_init(&chan->hc_list_entry); - num_channels = hsotg->core_params->host_channels; + num_channels = hsotg->params.host_channels; for (i = 0; i < num_channels; i++) { chan = hsotg->hc_ptr_array[i]; list_add_tail(&chan->hc_list_entry, &hsotg->free_hc_list); @@ -2457,7 +2488,7 @@ static void dwc2_hc_init_xfer(struct dwc2_hsotg *hsotg, chan->do_ping = 0; chan->ep_is_in = 0; chan->data_pid_start = DWC2_HC_PID_SETUP; - if (hsotg->core_params->dma_enable > 0) + if (hsotg->params.host_dma > 0) chan->xfer_dma = urb->setup_dma; else chan->xfer_buf = urb->setup_packet; @@ -2484,7 +2515,7 @@ static void dwc2_hc_init_xfer(struct dwc2_hsotg *hsotg, chan->do_ping = 0; chan->data_pid_start = DWC2_HC_PID_DATA1; chan->xfer_len = 0; - if (hsotg->core_params->dma_enable > 0) + if (hsotg->params.host_dma > 0) chan->xfer_dma = hsotg->status_buf_dma; else chan->xfer_buf = hsotg->status_buf; @@ -2502,13 +2533,13 @@ static void dwc2_hc_init_xfer(struct dwc2_hsotg *hsotg, case USB_ENDPOINT_XFER_ISOC: chan->ep_type = USB_ENDPOINT_XFER_ISOC; - if (hsotg->core_params->dma_desc_enable > 0) + if (hsotg->params.dma_desc_enable > 0) break; frame_desc = &urb->iso_descs[qtd->isoc_frame_index]; frame_desc->status = 0; - if (hsotg->core_params->dma_enable > 0) { + if (hsotg->params.host_dma > 0) { chan->xfer_dma = urb->dma; chan->xfer_dma += frame_desc->offset + qtd->isoc_split_offset; @@ -2690,7 +2721,7 @@ static int dwc2_assign_and_init_hc(struct dwc2_hsotg *hsotg, struct dwc2_qh *qh) !dwc2_hcd_is_pipe_in(&urb->pipe_info)) urb->actual_length = urb->length; - if (hsotg->core_params->dma_enable > 0) + if (hsotg->params.host_dma > 0) chan->xfer_dma = urb->dma + urb->actual_length; else chan->xfer_buf = (u8 *)urb->buf + urb->actual_length; @@ -2715,7 +2746,7 @@ static int dwc2_assign_and_init_hc(struct dwc2_hsotg *hsotg, struct dwc2_qh *qh) */ chan->multi_count = dwc2_hb_mult(qh->maxp); - if (hsotg->core_params->dma_desc_enable > 0) { + if (hsotg->params.dma_desc_enable > 0) { chan->desc_list_addr = qh->desc_list_dma; chan->desc_list_sz = qh->desc_list_sz; } @@ -2752,7 +2783,7 @@ enum dwc2_transaction_type dwc2_hcd_select_transactions( while (qh_ptr != &hsotg->periodic_sched_ready) { if (list_empty(&hsotg->free_hc_list)) break; - if (hsotg->core_params->uframe_sched > 0) { + if (hsotg->params.uframe_sched > 0) { if (hsotg->available_host_channels <= 1) break; hsotg->available_host_channels--; @@ -2776,17 +2807,17 @@ enum dwc2_transaction_type dwc2_hcd_select_transactions( * schedule. Some free host channels may not be used if they are * reserved for periodic transfers. */ - num_channels = hsotg->core_params->host_channels; + num_channels = hsotg->params.host_channels; qh_ptr = hsotg->non_periodic_sched_inactive.next; while (qh_ptr != &hsotg->non_periodic_sched_inactive) { - if (hsotg->core_params->uframe_sched <= 0 && + if (hsotg->params.uframe_sched <= 0 && hsotg->non_periodic_channels >= num_channels - hsotg->periodic_channels) break; if (list_empty(&hsotg->free_hc_list)) break; qh = list_entry(qh_ptr, struct dwc2_qh, qh_list_entry); - if (hsotg->core_params->uframe_sched > 0) { + if (hsotg->params.uframe_sched > 0) { if (hsotg->available_host_channels < 1) break; hsotg->available_host_channels--; @@ -2808,7 +2839,7 @@ enum dwc2_transaction_type dwc2_hcd_select_transactions( else ret_val = DWC2_TRANSACTION_ALL; - if (hsotg->core_params->uframe_sched <= 0) + if (hsotg->params.uframe_sched <= 0) hsotg->non_periodic_channels++; } @@ -2847,8 +2878,8 @@ static int dwc2_queue_transaction(struct dwc2_hsotg *hsotg, list_move_tail(&chan->split_order_list_entry, &hsotg->split_order); - if (hsotg->core_params->dma_enable > 0) { - if (hsotg->core_params->dma_desc_enable > 0) { + if (hsotg->params.host_dma > 0) { + if (hsotg->params.dma_desc_enable > 0) { if (!chan->xfer_started || chan->ep_type == USB_ENDPOINT_XFER_ISOC) { dwc2_hcd_start_xfer_ddma(hsotg, chan->qh); @@ -2957,7 +2988,7 @@ static void dwc2_process_periodic_channels(struct dwc2_hsotg *hsotg) * The flag prevents any halts to get into the request queue in * the middle of multiple high-bandwidth packets getting queued. */ - if (hsotg->core_params->dma_enable <= 0 && + if (hsotg->params.host_dma <= 0 && qh->channel->multi_count > 1) hsotg->queuing_high_bandwidth = 1; @@ -2976,7 +3007,7 @@ static void dwc2_process_periodic_channels(struct dwc2_hsotg *hsotg) * controller automatically handles multiple packets for * high-bandwidth transfers. */ - if (hsotg->core_params->dma_enable > 0 || status == 0 || + if (hsotg->params.host_dma > 0 || status == 0 || qh->channel->requests == qh->channel->multi_count) { qh_ptr = qh_ptr->next; /* @@ -2993,7 +3024,7 @@ static void dwc2_process_periodic_channels(struct dwc2_hsotg *hsotg) exit: if (no_queue_space || no_fifo_space || - (hsotg->core_params->dma_enable <= 0 && + (hsotg->params.host_dma <= 0 && !list_empty(&hsotg->periodic_sched_assigned))) { /* * May need to queue more transactions as the request @@ -3073,7 +3104,7 @@ static void dwc2_process_non_periodic_channels(struct dwc2_hsotg *hsotg) tx_status = dwc2_readl(hsotg->regs + GNPTXSTS); qspcavail = (tx_status & TXSTS_QSPCAVAIL_MASK) >> TXSTS_QSPCAVAIL_SHIFT; - if (hsotg->core_params->dma_enable <= 0 && qspcavail == 0) { + if (hsotg->params.host_dma <= 0 && qspcavail == 0) { no_queue_space = 1; break; } @@ -3106,7 +3137,7 @@ next: hsotg->non_periodic_qh_ptr->next; } while (hsotg->non_periodic_qh_ptr != orig_qh_ptr); - if (hsotg->core_params->dma_enable <= 0) { + if (hsotg->params.host_dma <= 0) { tx_status = dwc2_readl(hsotg->regs + GNPTXSTS); qspcavail = (tx_status & TXSTS_QSPCAVAIL_MASK) >> TXSTS_QSPCAVAIL_SHIFT; @@ -3307,7 +3338,7 @@ static void dwc2_port_suspend(struct dwc2_hsotg *hsotg, u16 windex) * If hibernation is supported, Phy clock will be suspended * after registers are backuped. */ - if (!hsotg->core_params->hibernation) { + if (!hsotg->params.hibernation) { /* Suspend the Phy Clock */ pcgctl = dwc2_readl(hsotg->regs + PCGCTL); pcgctl |= PCGCTL_STOPPCLK; @@ -3342,7 +3373,7 @@ static void dwc2_port_resume(struct dwc2_hsotg *hsotg) * If hibernation is supported, Phy clock is already resumed * after registers restore. */ - if (!hsotg->core_params->hibernation) { + if (!hsotg->params.hibernation) { pcgctl = dwc2_readl(hsotg->regs + PCGCTL); pcgctl &= ~PCGCTL_STOPPCLK; dwc2_writel(pcgctl, hsotg->regs + PCGCTL); @@ -3569,7 +3600,7 @@ static int dwc2_hcd_hub_control(struct dwc2_hsotg *hsotg, u16 typereq, port_status |= USB_PORT_STAT_TEST; /* USB_PORT_FEAT_INDICATOR unsupported always 0 */ - if (hsotg->core_params->dma_desc_fs_enable) { + if (hsotg->params.dma_desc_fs_enable) { /* * Enable descriptor DMA only if a full speed * device is connected. @@ -3583,7 +3614,7 @@ static int dwc2_hcd_hub_control(struct dwc2_hsotg *hsotg, u16 typereq, u32 hcfg; dev_info(hsotg->dev, "Enabling descriptor DMA mode\n"); - hsotg->core_params->dma_desc_enable = 1; + hsotg->params.dma_desc_enable = 1; hcfg = dwc2_readl(hsotg->regs + HCFG); hcfg |= HCFG_DESCDMA; dwc2_writel(hcfg, hsotg->regs + HCFG); @@ -3824,7 +3855,7 @@ void dwc2_hcd_dump_state(struct dwc2_hsotg *hsotg) u32 p_tx_status; int i; - num_channels = hsotg->core_params->host_channels; + num_channels = hsotg->params.host_channels; dev_dbg(hsotg->dev, "\n"); dev_dbg(hsotg->dev, "************************************************************\n"); @@ -4020,35 +4051,6 @@ static struct dwc2_hsotg *dwc2_hcd_to_hsotg(struct usb_hcd *hcd) return p->hsotg; } -static int _dwc2_hcd_start(struct usb_hcd *hcd); - -void dwc2_host_start(struct dwc2_hsotg *hsotg) -{ - struct usb_hcd *hcd = dwc2_hsotg_to_hcd(hsotg); - - hcd->self.is_b_host = dwc2_hcd_is_b_host(hsotg); - _dwc2_hcd_start(hcd); -} - -void dwc2_host_disconnect(struct dwc2_hsotg *hsotg) -{ - struct usb_hcd *hcd = dwc2_hsotg_to_hcd(hsotg); - - hcd->self.is_b_host = 0; -} - -void dwc2_host_hub_info(struct dwc2_hsotg *hsotg, void *context, int *hub_addr, - int *hub_port) -{ - struct urb *urb = context; - - if (urb->dev->tt) - *hub_addr = urb->dev->tt->hub->devnum; - else - *hub_addr = 0; - *hub_port = urb->dev->ttport; -} - /** * dwc2_host_get_tt_info() - Get the dwc2_tt associated with context * @@ -4365,7 +4367,7 @@ static int _dwc2_hcd_suspend(struct usb_hcd *hcd) if (!HCD_HW_ACCESSIBLE(hcd)) goto unlock; - if (!hsotg->core_params->hibernation) + if (!hsotg->params.hibernation) goto skip_power_saving; /* @@ -4417,7 +4419,7 @@ static int _dwc2_hcd_resume(struct usb_hcd *hcd) if (hsotg->lx_state != DWC2_L2) goto unlock; - if (!hsotg->core_params->hibernation) { + if (!hsotg->params.hibernation) { hsotg->lx_state = DWC2_L0; goto unlock; } @@ -4510,9 +4512,6 @@ static void dwc2_dump_urb_info(struct usb_hcd *hcd, struct urb *urb, case PIPE_ISOCHRONOUS: pipetype = "ISOCHRONOUS"; break; - default: - pipetype = "UNKNOWN"; - break; } dev_vdbg(hsotg->dev, " Endpoint type: %s %s (%s)\n", pipetype, @@ -4609,8 +4608,6 @@ static int _dwc2_hcd_urb_enqueue(struct usb_hcd *hcd, struct urb *urb, case PIPE_INTERRUPT: ep_type = USB_ENDPOINT_XFER_INT; break; - default: - dev_warn(hsotg->dev, "Wrong ep type\n"); } dwc2_urb = dwc2_hcd_urb_alloc(hsotg, urb->number_of_packets, @@ -4919,7 +4916,7 @@ static void dwc2_hcd_free(struct dwc2_hsotg *hsotg) } } - if (hsotg->core_params->dma_enable > 0) { + if (hsotg->params.host_dma > 0) { if (hsotg->status_buf) { dma_free_coherent(hsotg->dev, DWC2_HCD_STATUS_BUF_SIZE, hsotg->status_buf, @@ -4999,16 +4996,16 @@ int dwc2_hcd_init(struct dwc2_hsotg *hsotg, int irq) hsotg->last_frame_num = HFNUM_MAX_FRNUM; /* Check if the bus driver or platform code has setup a dma_mask */ - if (hsotg->core_params->dma_enable > 0 && + if (hsotg->params.host_dma > 0 && hsotg->dev->dma_mask == NULL) { dev_warn(hsotg->dev, "dma_mask not set, disabling DMA\n"); - hsotg->core_params->dma_enable = 0; - hsotg->core_params->dma_desc_enable = 0; + hsotg->params.host_dma = 0; + hsotg->params.dma_desc_enable = 0; } /* Set device flags indicating whether the HCD supports DMA */ - if (hsotg->core_params->dma_enable > 0) { + if (hsotg->params.host_dma > 0) { if (dma_set_mask(hsotg->dev, DMA_BIT_MASK(32)) < 0) dev_warn(hsotg->dev, "can't set DMA mask\n"); if (dma_set_coherent_mask(hsotg->dev, DMA_BIT_MASK(32)) < 0) @@ -5019,7 +5016,7 @@ int dwc2_hcd_init(struct dwc2_hsotg *hsotg, int irq) if (!hcd) goto error1; - if (hsotg->core_params->dma_enable <= 0) + if (hsotg->params.host_dma <= 0) hcd->self.uses_dma = 0; hcd->has_tt = 1; @@ -5067,7 +5064,7 @@ int dwc2_hcd_init(struct dwc2_hsotg *hsotg, int irq) * in the controller. Initialize the channel descriptor array. */ INIT_LIST_HEAD(&hsotg->free_hc_list); - num_channels = hsotg->core_params->host_channels; + num_channels = hsotg->params.host_channels; memset(&hsotg->hc_ptr_array[0], 0, sizeof(hsotg->hc_ptr_array)); for (i = 0; i < num_channels; i++) { @@ -5091,7 +5088,7 @@ int dwc2_hcd_init(struct dwc2_hsotg *hsotg, int irq) * done after usb_add_hcd since that function allocates the DMA buffer * pool. */ - if (hsotg->core_params->dma_enable > 0) + if (hsotg->params.host_dma > 0) hsotg->status_buf = dma_alloc_coherent(hsotg->dev, DWC2_HCD_STATUS_BUF_SIZE, &hsotg->status_buf_dma, GFP_KERNEL); @@ -5107,10 +5104,10 @@ int dwc2_hcd_init(struct dwc2_hsotg *hsotg, int irq) * DMA mode. * Alignment must be set to 512 bytes. */ - if (hsotg->core_params->dma_desc_enable || - hsotg->core_params->dma_desc_fs_enable) { + if (hsotg->params.dma_desc_enable || + hsotg->params.dma_desc_fs_enable) { hsotg->desc_gen_cache = kmem_cache_create("dwc2-gen-desc", - sizeof(struct dwc2_hcd_dma_desc) * + sizeof(struct dwc2_dma_desc) * MAX_DMA_DESC_NUM_GENERIC, 512, SLAB_CACHE_DMA, NULL); if (!hsotg->desc_gen_cache) { @@ -5121,12 +5118,12 @@ int dwc2_hcd_init(struct dwc2_hsotg *hsotg, int irq) * Disable descriptor dma mode since it will not be * usable. */ - hsotg->core_params->dma_desc_enable = 0; - hsotg->core_params->dma_desc_fs_enable = 0; + hsotg->params.dma_desc_enable = 0; + hsotg->params.dma_desc_fs_enable = 0; } hsotg->desc_hsisoc_cache = kmem_cache_create("dwc2-hsisoc-desc", - sizeof(struct dwc2_hcd_dma_desc) * + sizeof(struct dwc2_dma_desc) * MAX_DMA_DESC_NUM_HS_ISOC, 512, 0, NULL); if (!hsotg->desc_hsisoc_cache) { dev_err(hsotg->dev, @@ -5138,8 +5135,8 @@ int dwc2_hcd_init(struct dwc2_hsotg *hsotg, int irq) * Disable descriptor dma mode since it will not be * usable. */ - hsotg->core_params->dma_desc_enable = 0; - hsotg->core_params->dma_desc_fs_enable = 0; + hsotg->params.dma_desc_enable = 0; + hsotg->params.dma_desc_fs_enable = 0; } } @@ -5184,7 +5181,6 @@ error3: error2: usb_put_hcd(hcd); error1: - kfree(hsotg->core_params); #ifdef CONFIG_USB_DWC2_TRACK_MISSED_SOFS kfree(hsotg->last_frame_num_array); @@ -5250,7 +5246,7 @@ int dwc2_backup_host_registers(struct dwc2_hsotg *hsotg) hr = &hsotg->hr_backup; hr->hcfg = dwc2_readl(hsotg->regs + HCFG); hr->haintmsk = dwc2_readl(hsotg->regs + HAINTMSK); - for (i = 0; i < hsotg->core_params->host_channels; ++i) + for (i = 0; i < hsotg->params.host_channels; ++i) hr->hcintmsk[i] = dwc2_readl(hsotg->regs + HCINTMSK(i)); hr->hprt0 = dwc2_read_hprt0(hsotg); @@ -5286,7 +5282,7 @@ int dwc2_restore_host_registers(struct dwc2_hsotg *hsotg) dwc2_writel(hr->hcfg, hsotg->regs + HCFG); dwc2_writel(hr->haintmsk, hsotg->regs + HAINTMSK); - for (i = 0; i < hsotg->core_params->host_channels; ++i) + for (i = 0; i < hsotg->params.host_channels; ++i) dwc2_writel(hr->hcintmsk[i], hsotg->regs + HCINTMSK(i)); dwc2_writel(hr->hprt0, hsotg->regs + HPRT0); diff --git a/drivers/usb/dwc2/hcd.h b/drivers/usb/dwc2/hcd.h index 7758bfb644ff..1ed5fa2beff4 100644 --- a/drivers/usb/dwc2/hcd.h +++ b/drivers/usb/dwc2/hcd.h @@ -348,7 +348,7 @@ struct dwc2_qh { struct list_head qtd_list; struct dwc2_host_chan *channel; struct list_head qh_list_entry; - struct dwc2_hcd_dma_desc *desc_list; + struct dwc2_dma_desc *desc_list; dma_addr_t desc_list_dma; u32 desc_list_sz; u32 *n_bytes; @@ -793,11 +793,6 @@ extern void dwc2_hcd_dump_frrem(struct dwc2_hsotg *hsotg); #define URB_SEND_ZERO_PACKET 0x2 /* Host driver callbacks */ - -extern void dwc2_host_start(struct dwc2_hsotg *hsotg); -extern void dwc2_host_disconnect(struct dwc2_hsotg *hsotg); -extern void dwc2_host_hub_info(struct dwc2_hsotg *hsotg, void *context, - int *hub_addr, int *hub_port); extern struct dwc2_tt *dwc2_host_get_tt_info(struct dwc2_hsotg *hsotg, void *context, gfp_t mem_flags, int *ttport); diff --git a/drivers/usb/dwc2/hcd_ddma.c b/drivers/usb/dwc2/hcd_ddma.c index 0e1d42b5dec5..cf0367768cb3 100644 --- a/drivers/usb/dwc2/hcd_ddma.c +++ b/drivers/usb/dwc2/hcd_ddma.c @@ -95,7 +95,7 @@ static int dwc2_desc_list_alloc(struct dwc2_hsotg *hsotg, struct dwc2_qh *qh, else desc_cache = hsotg->desc_gen_cache; - qh->desc_list_sz = sizeof(struct dwc2_hcd_dma_desc) * + qh->desc_list_sz = sizeof(struct dwc2_dma_desc) * dwc2_max_desc_num(qh); qh->desc_list = kmem_cache_zalloc(desc_cache, flags | GFP_DMA); @@ -297,7 +297,7 @@ static void dwc2_release_channel_ddma(struct dwc2_hsotg *hsotg, struct dwc2_host_chan *chan = qh->channel; if (dwc2_qh_is_non_per(qh)) { - if (hsotg->core_params->uframe_sched > 0) + if (hsotg->params.uframe_sched > 0) hsotg->available_host_channels++; else hsotg->non_periodic_channels--; @@ -322,7 +322,7 @@ static void dwc2_release_channel_ddma(struct dwc2_hsotg *hsotg, qh->ntd = 0; if (qh->desc_list) - memset(qh->desc_list, 0, sizeof(struct dwc2_hcd_dma_desc) * + memset(qh->desc_list, 0, sizeof(struct dwc2_dma_desc) * dwc2_max_desc_num(qh)); } @@ -404,7 +404,7 @@ void dwc2_hcd_qh_free_ddma(struct dwc2_hsotg *hsotg, struct dwc2_qh *qh) if ((qh->ep_type == USB_ENDPOINT_XFER_ISOC || qh->ep_type == USB_ENDPOINT_XFER_INT) && - (hsotg->core_params->uframe_sched > 0 || + (hsotg->params.uframe_sched > 0 || !hsotg->periodic_channels) && hsotg->frame_list) { dwc2_per_sched_disable(hsotg); dwc2_frame_list_free(hsotg); @@ -542,7 +542,7 @@ static void dwc2_fill_host_isoc_dma_desc(struct dwc2_hsotg *hsotg, struct dwc2_qh *qh, u32 max_xfer_size, u16 idx) { - struct dwc2_hcd_dma_desc *dma_desc = &qh->desc_list[idx]; + struct dwc2_dma_desc *dma_desc = &qh->desc_list[idx]; struct dwc2_hcd_iso_packet_desc *frame_desc; memset(dma_desc, 0, sizeof(*dma_desc)); @@ -571,8 +571,8 @@ static void dwc2_fill_host_isoc_dma_desc(struct dwc2_hsotg *hsotg, dma_sync_single_for_device(hsotg->dev, qh->desc_list_dma + - (idx * sizeof(struct dwc2_hcd_dma_desc)), - sizeof(struct dwc2_hcd_dma_desc), + (idx * sizeof(struct dwc2_dma_desc)), + sizeof(struct dwc2_dma_desc), DMA_TO_DEVICE); } @@ -645,8 +645,8 @@ static void dwc2_init_isoc_dma_desc(struct dwc2_hsotg *hsotg, qh->desc_list[idx].status |= HOST_DMA_IOC; dma_sync_single_for_device(hsotg->dev, qh->desc_list_dma + (idx * - sizeof(struct dwc2_hcd_dma_desc)), - sizeof(struct dwc2_hcd_dma_desc), + sizeof(struct dwc2_dma_desc)), + sizeof(struct dwc2_dma_desc), DMA_TO_DEVICE); } #else @@ -679,8 +679,8 @@ static void dwc2_init_isoc_dma_desc(struct dwc2_hsotg *hsotg, qh->desc_list[idx].status |= HOST_DMA_IOC; dma_sync_single_for_device(hsotg->dev, qh->desc_list_dma + - (idx * sizeof(struct dwc2_hcd_dma_desc)), - sizeof(struct dwc2_hcd_dma_desc), + (idx * sizeof(struct dwc2_dma_desc)), + sizeof(struct dwc2_dma_desc), DMA_TO_DEVICE); #endif } @@ -690,11 +690,11 @@ static void dwc2_fill_host_dma_desc(struct dwc2_hsotg *hsotg, struct dwc2_qtd *qtd, struct dwc2_qh *qh, int n_desc) { - struct dwc2_hcd_dma_desc *dma_desc = &qh->desc_list[n_desc]; + struct dwc2_dma_desc *dma_desc = &qh->desc_list[n_desc]; int len = chan->xfer_len; - if (len > MAX_DMA_DESC_SIZE - (chan->max_packet - 1)) - len = MAX_DMA_DESC_SIZE - (chan->max_packet - 1); + if (len > HOST_DMA_NBYTES_LIMIT - (chan->max_packet - 1)) + len = HOST_DMA_NBYTES_LIMIT - (chan->max_packet - 1); if (chan->ep_is_in) { int num_packets; @@ -721,8 +721,8 @@ static void dwc2_fill_host_dma_desc(struct dwc2_hsotg *hsotg, dma_sync_single_for_device(hsotg->dev, qh->desc_list_dma + - (n_desc * sizeof(struct dwc2_hcd_dma_desc)), - sizeof(struct dwc2_hcd_dma_desc), + (n_desc * sizeof(struct dwc2_dma_desc)), + sizeof(struct dwc2_dma_desc), DMA_TO_DEVICE); /* @@ -778,8 +778,8 @@ static void dwc2_init_non_isoc_dma_desc(struct dwc2_hsotg *hsotg, dma_sync_single_for_device(hsotg->dev, qh->desc_list_dma + ((n_desc - 1) * - sizeof(struct dwc2_hcd_dma_desc)), - sizeof(struct dwc2_hcd_dma_desc), + sizeof(struct dwc2_dma_desc)), + sizeof(struct dwc2_dma_desc), DMA_TO_DEVICE); } dwc2_fill_host_dma_desc(hsotg, chan, qtd, qh, n_desc); @@ -808,8 +808,8 @@ static void dwc2_init_non_isoc_dma_desc(struct dwc2_hsotg *hsotg, n_desc - 1, &qh->desc_list[n_desc - 1]); dma_sync_single_for_device(hsotg->dev, qh->desc_list_dma + (n_desc - 1) * - sizeof(struct dwc2_hcd_dma_desc), - sizeof(struct dwc2_hcd_dma_desc), + sizeof(struct dwc2_dma_desc), + sizeof(struct dwc2_dma_desc), DMA_TO_DEVICE); if (n_desc > 1) { qh->desc_list[0].status |= HOST_DMA_A; @@ -817,7 +817,7 @@ static void dwc2_init_non_isoc_dma_desc(struct dwc2_hsotg *hsotg, &qh->desc_list[0]); dma_sync_single_for_device(hsotg->dev, qh->desc_list_dma, - sizeof(struct dwc2_hcd_dma_desc), + sizeof(struct dwc2_dma_desc), DMA_TO_DEVICE); } chan->ntd = n_desc; @@ -893,7 +893,7 @@ static int dwc2_cmpl_host_isoc_dma_desc(struct dwc2_hsotg *hsotg, struct dwc2_qtd *qtd, struct dwc2_qh *qh, u16 idx) { - struct dwc2_hcd_dma_desc *dma_desc; + struct dwc2_dma_desc *dma_desc; struct dwc2_hcd_iso_packet_desc *frame_desc; u16 remain = 0; int rc = 0; @@ -902,8 +902,8 @@ static int dwc2_cmpl_host_isoc_dma_desc(struct dwc2_hsotg *hsotg, return -EINVAL; dma_sync_single_for_cpu(hsotg->dev, qh->desc_list_dma + (idx * - sizeof(struct dwc2_hcd_dma_desc)), - sizeof(struct dwc2_hcd_dma_desc), + sizeof(struct dwc2_dma_desc)), + sizeof(struct dwc2_dma_desc), DMA_FROM_DEVICE); dma_desc = &qh->desc_list[idx]; @@ -1066,7 +1066,7 @@ stop_scan: static int dwc2_update_non_isoc_urb_state_ddma(struct dwc2_hsotg *hsotg, struct dwc2_host_chan *chan, struct dwc2_qtd *qtd, - struct dwc2_hcd_dma_desc *dma_desc, + struct dwc2_dma_desc *dma_desc, enum dwc2_halt_status halt_status, u32 n_bytes, int *xfer_done) { @@ -1154,7 +1154,7 @@ static int dwc2_process_non_isoc_desc(struct dwc2_hsotg *hsotg, { struct dwc2_qh *qh = chan->qh; struct dwc2_hcd_urb *urb = qtd->urb; - struct dwc2_hcd_dma_desc *dma_desc; + struct dwc2_dma_desc *dma_desc; u32 n_bytes; int failed; @@ -1165,8 +1165,8 @@ static int dwc2_process_non_isoc_desc(struct dwc2_hsotg *hsotg, dma_sync_single_for_cpu(hsotg->dev, qh->desc_list_dma + (desc_num * - sizeof(struct dwc2_hcd_dma_desc)), - sizeof(struct dwc2_hcd_dma_desc), + sizeof(struct dwc2_dma_desc)), + sizeof(struct dwc2_dma_desc), DMA_FROM_DEVICE); dma_desc = &qh->desc_list[desc_num]; diff --git a/drivers/usb/dwc2/hcd_intr.c b/drivers/usb/dwc2/hcd_intr.c index 906f223542ee..b8f4b6aaf1d0 100644 --- a/drivers/usb/dwc2/hcd_intr.c +++ b/drivers/usb/dwc2/hcd_intr.c @@ -256,7 +256,7 @@ static void dwc2_perio_tx_fifo_empty_intr(struct dwc2_hsotg *hsotg) static void dwc2_hprt0_enable(struct dwc2_hsotg *hsotg, u32 hprt0, u32 *hprt0_modify) { - struct dwc2_core_params *params = hsotg->core_params; + struct dwc2_core_params *params = &hsotg->params; int do_reset = 0; u32 usbcfg; u32 prtspd; @@ -395,10 +395,10 @@ static void dwc2_port_intr(struct dwc2_hsotg *hsotg) dwc2_hprt0_enable(hsotg, hprt0, &hprt0_modify); } else { hsotg->flags.b.port_enable_change = 1; - if (hsotg->core_params->dma_desc_fs_enable) { + if (hsotg->params.dma_desc_fs_enable) { u32 hcfg; - hsotg->core_params->dma_desc_enable = 0; + hsotg->params.dma_desc_enable = 0; hsotg->new_connection = false; hcfg = dwc2_readl(hsotg->regs + HCFG); hcfg &= ~HCFG_DESCDMA; @@ -604,7 +604,7 @@ static enum dwc2_halt_status dwc2_update_isoc_urb_state( /* Skip whole frame */ if (chan->qh->do_split && chan->ep_type == USB_ENDPOINT_XFER_ISOC && chan->ep_is_in && - hsotg->core_params->dma_enable > 0) { + hsotg->params.host_dma > 0) { qtd->complete_split = 0; qtd->isoc_split_offset = 0; } @@ -743,7 +743,7 @@ cleanup: dwc2_hc_cleanup(hsotg, chan); list_add_tail(&chan->hc_list_entry, &hsotg->free_hc_list); - if (hsotg->core_params->uframe_sched > 0) { + if (hsotg->params.uframe_sched > 0) { hsotg->available_host_channels++; } else { switch (chan->ep_type) { @@ -789,7 +789,7 @@ static void dwc2_halt_channel(struct dwc2_hsotg *hsotg, if (dbg_hc(chan)) dev_vdbg(hsotg->dev, "%s()\n", __func__); - if (hsotg->core_params->dma_enable > 0) { + if (hsotg->params.host_dma > 0) { if (dbg_hc(chan)) dev_vdbg(hsotg->dev, "DMA enabled\n"); dwc2_release_channel(hsotg, chan, qtd, halt_status); @@ -915,6 +915,8 @@ static int dwc2_xfercomp_isoc_split_in(struct dwc2_hsotg *hsotg, { struct dwc2_hcd_iso_packet_desc *frame_desc; u32 len; + u32 hctsiz; + u32 pid; if (!qtd->urb) return 0; @@ -932,7 +934,10 @@ static int dwc2_xfercomp_isoc_split_in(struct dwc2_hsotg *hsotg, qtd->isoc_split_offset += len; - if (frame_desc->actual_length >= frame_desc->length) { + hctsiz = dwc2_readl(hsotg->regs + HCTSIZ(chnum)); + pid = (hctsiz & TSIZ_SC_MC_PID_MASK) >> TSIZ_SC_MC_PID_SHIFT; + + if (frame_desc->actual_length >= frame_desc->length || pid == 0) { frame_desc->status = 0; qtd->isoc_frame_index++; qtd->complete_split = 0; @@ -974,7 +979,7 @@ static void dwc2_hc_xfercomp_intr(struct dwc2_hsotg *hsotg, pipe_type = dwc2_hcd_get_pipe_type(&urb->pipe_info); - if (hsotg->core_params->dma_desc_enable > 0) { + if (hsotg->params.dma_desc_enable > 0) { dwc2_hcd_complete_xfer_ddma(hsotg, chan, chnum, halt_status); if (pipe_type == USB_ENDPOINT_XFER_ISOC) /* Do not disable the interrupt, just clear it */ @@ -985,7 +990,7 @@ static void dwc2_hc_xfercomp_intr(struct dwc2_hsotg *hsotg, /* Handle xfer complete on CSPLIT */ if (chan->qh->do_split) { if (chan->ep_type == USB_ENDPOINT_XFER_ISOC && chan->ep_is_in && - hsotg->core_params->dma_enable > 0) { + hsotg->params.host_dma > 0) { if (qtd->complete_split && dwc2_xfercomp_isoc_split_in(hsotg, chan, chnum, qtd)) @@ -1097,7 +1102,7 @@ static void dwc2_hc_stall_intr(struct dwc2_hsotg *hsotg, dev_dbg(hsotg->dev, "--Host Channel %d Interrupt: STALL Received--\n", chnum); - if (hsotg->core_params->dma_desc_enable > 0) { + if (hsotg->params.dma_desc_enable > 0) { dwc2_hcd_complete_xfer_ddma(hsotg, chan, chnum, DWC2_HC_XFER_STALL); goto handle_stall_done; @@ -1207,7 +1212,7 @@ static void dwc2_hc_nak_intr(struct dwc2_hsotg *hsotg, switch (dwc2_hcd_get_pipe_type(&qtd->urb->pipe_info)) { case USB_ENDPOINT_XFER_CONTROL: case USB_ENDPOINT_XFER_BULK: - if (hsotg->core_params->dma_enable > 0 && chan->ep_is_in) { + if (hsotg->params.host_dma > 0 && chan->ep_is_in) { /* * NAK interrupts are enabled on bulk/control IN * transfers in DMA mode for the sole purpose of @@ -1353,7 +1358,7 @@ static void dwc2_hc_nyet_intr(struct dwc2_hsotg *hsotg, */ if (chan->do_split && chan->complete_split) { if (chan->ep_is_in && chan->ep_type == USB_ENDPOINT_XFER_ISOC && - hsotg->core_params->dma_enable > 0) { + hsotg->params.host_dma > 0) { qtd->complete_split = 0; qtd->isoc_split_offset = 0; qtd->isoc_frame_index++; @@ -1374,7 +1379,7 @@ static void dwc2_hc_nyet_intr(struct dwc2_hsotg *hsotg, struct dwc2_qh *qh = chan->qh; bool past_end; - if (hsotg->core_params->uframe_sched <= 0) { + if (hsotg->params.uframe_sched <= 0) { int frnum = dwc2_hcd_get_frame_number(hsotg); /* Don't have num_hs_transfers; simple logic */ @@ -1467,7 +1472,7 @@ static void dwc2_hc_babble_intr(struct dwc2_hsotg *hsotg, dwc2_hc_handle_tt_clear(hsotg, chan, qtd); - if (hsotg->core_params->dma_desc_enable > 0) { + if (hsotg->params.dma_desc_enable > 0) { dwc2_hcd_complete_xfer_ddma(hsotg, chan, chnum, DWC2_HC_XFER_BABBLE_ERR); goto disable_int; @@ -1572,7 +1577,7 @@ static void dwc2_hc_ahberr_intr(struct dwc2_hsotg *hsotg, dev_err(hsotg->dev, " Interval: %d\n", urb->interval); /* Core halts the channel for Descriptor DMA mode */ - if (hsotg->core_params->dma_desc_enable > 0) { + if (hsotg->params.dma_desc_enable > 0) { dwc2_hcd_complete_xfer_ddma(hsotg, chan, chnum, DWC2_HC_XFER_AHB_ERR); goto handle_ahberr_done; @@ -1604,7 +1609,7 @@ static void dwc2_hc_xacterr_intr(struct dwc2_hsotg *hsotg, dwc2_hc_handle_tt_clear(hsotg, chan, qtd); - if (hsotg->core_params->dma_desc_enable > 0) { + if (hsotg->params.dma_desc_enable > 0) { dwc2_hcd_complete_xfer_ddma(hsotg, chan, chnum, DWC2_HC_XFER_XACT_ERR); goto handle_xacterr_done; @@ -1798,8 +1803,8 @@ static void dwc2_hc_chhltd_intr_dma(struct dwc2_hsotg *hsotg, if (chan->halt_status == DWC2_HC_XFER_URB_DEQUEUE || (chan->halt_status == DWC2_HC_XFER_AHB_ERR && - hsotg->core_params->dma_desc_enable <= 0)) { - if (hsotg->core_params->dma_desc_enable > 0) + hsotg->params.dma_desc_enable <= 0)) { + if (hsotg->params.dma_desc_enable > 0) dwc2_hcd_complete_xfer_ddma(hsotg, chan, chnum, chan->halt_status); else @@ -1830,7 +1835,7 @@ static void dwc2_hc_chhltd_intr_dma(struct dwc2_hsotg *hsotg, } else if (chan->hcint & HCINTMSK_STALL) { dwc2_hc_stall_intr(hsotg, chan, chnum, qtd); } else if ((chan->hcint & HCINTMSK_XACTERR) && - hsotg->core_params->dma_desc_enable <= 0) { + hsotg->params.dma_desc_enable <= 0) { if (out_nak_enh) { if (chan->hcint & (HCINTMSK_NYET | HCINTMSK_NAK | HCINTMSK_ACK)) { @@ -1850,10 +1855,10 @@ static void dwc2_hc_chhltd_intr_dma(struct dwc2_hsotg *hsotg, */ dwc2_hc_xacterr_intr(hsotg, chan, chnum, qtd); } else if ((chan->hcint & HCINTMSK_XCS_XACT) && - hsotg->core_params->dma_desc_enable > 0) { + hsotg->params.dma_desc_enable > 0) { dwc2_hc_xacterr_intr(hsotg, chan, chnum, qtd); } else if ((chan->hcint & HCINTMSK_AHBERR) && - hsotg->core_params->dma_desc_enable > 0) { + hsotg->params.dma_desc_enable > 0) { dwc2_hc_ahberr_intr(hsotg, chan, chnum, qtd); } else if (chan->hcint & HCINTMSK_BBLERR) { dwc2_hc_babble_intr(hsotg, chan, chnum, qtd); @@ -1946,7 +1951,7 @@ static void dwc2_hc_chhltd_intr(struct dwc2_hsotg *hsotg, dev_vdbg(hsotg->dev, "--Host Channel %d Interrupt: Channel Halted--\n", chnum); - if (hsotg->core_params->dma_enable > 0) { + if (hsotg->params.host_dma > 0) { dwc2_hc_chhltd_intr_dma(hsotg, chan, chnum, qtd); } else { if (!dwc2_halt_status_ok(hsotg, chan, chnum, qtd)) @@ -2023,7 +2028,7 @@ static void dwc2_hc_n_intr(struct dwc2_hsotg *hsotg, int chnum) * interrupt unmasked */ WARN_ON(hcint != HCINTMSK_CHHLTD); - if (hsotg->core_params->dma_desc_enable > 0) + if (hsotg->params.dma_desc_enable > 0) dwc2_hcd_complete_xfer_ddma(hsotg, chan, chnum, chan->halt_status); else @@ -2051,7 +2056,7 @@ static void dwc2_hc_n_intr(struct dwc2_hsotg *hsotg, int chnum) qtd = list_first_entry(&chan->qh->qtd_list, struct dwc2_qtd, qtd_list_entry); - if (hsotg->core_params->dma_enable <= 0) { + if (hsotg->params.host_dma <= 0) { if ((hcint & HCINTMSK_CHHLTD) && hcint != HCINTMSK_CHHLTD) hcint &= ~HCINTMSK_CHHLTD; } @@ -2156,7 +2161,7 @@ static void dwc2_hc_intr(struct dwc2_hsotg *hsotg) } } - for (i = 0; i < hsotg->core_params->host_channels; i++) { + for (i = 0; i < hsotg->params.host_channels; i++) { if (haint & (1 << i)) dwc2_hc_n_intr(hsotg, i); } diff --git a/drivers/usb/dwc2/hcd_queue.c b/drivers/usb/dwc2/hcd_queue.c index 13754353251f..5713f03a4e56 100644 --- a/drivers/usb/dwc2/hcd_queue.c +++ b/drivers/usb/dwc2/hcd_queue.c @@ -75,7 +75,7 @@ static int dwc2_periodic_channel_available(struct dwc2_hsotg *hsotg) int status; int num_channels; - num_channels = hsotg->core_params->host_channels; + num_channels = hsotg->params.host_channels; if (hsotg->periodic_channels + hsotg->non_periodic_channels < num_channels && hsotg->periodic_channels < num_channels - 1) { @@ -355,6 +355,37 @@ static void pmap_unschedule(unsigned long *map, int bits_per_period, } } +/** + * dwc2_get_ls_map() - Get the map used for the given qh + * + * @hsotg: The HCD state structure for the DWC OTG controller. + * @qh: QH for the periodic transfer. + * + * We'll always get the periodic map out of our TT. Note that even if we're + * running the host straight in low speed / full speed mode it appears as if + * a TT is allocated for us, so we'll use it. If that ever changes we can + * add logic here to get a map out of "hsotg" if !qh->do_split. + * + * Returns: the map or NULL if a map couldn't be found. + */ +static unsigned long *dwc2_get_ls_map(struct dwc2_hsotg *hsotg, + struct dwc2_qh *qh) +{ + unsigned long *map; + + /* Don't expect to be missing a TT and be doing low speed scheduling */ + if (WARN_ON(!qh->dwc_tt)) + return NULL; + + /* Get the map and adjust if this is a multi_tt hub */ + map = qh->dwc_tt->periodic_bitmaps; + if (qh->dwc_tt->usb_tt->multi) + map += DWC2_ELEMENTS_PER_LS_BITMAP * qh->ttport; + + return map; +} + +#ifdef DWC2_PRINT_SCHEDULE /* * cat_printf() - A printf() + strcat() helper * @@ -454,35 +485,6 @@ static void pmap_print(unsigned long *map, int bits_per_period, } } -/** - * dwc2_get_ls_map() - Get the map used for the given qh - * - * @hsotg: The HCD state structure for the DWC OTG controller. - * @qh: QH for the periodic transfer. - * - * We'll always get the periodic map out of our TT. Note that even if we're - * running the host straight in low speed / full speed mode it appears as if - * a TT is allocated for us, so we'll use it. If that ever changes we can - * add logic here to get a map out of "hsotg" if !qh->do_split. - * - * Returns: the map or NULL if a map couldn't be found. - */ -static unsigned long *dwc2_get_ls_map(struct dwc2_hsotg *hsotg, - struct dwc2_qh *qh) -{ - unsigned long *map; - - /* Don't expect to be missing a TT and be doing low speed scheduling */ - if (WARN_ON(!qh->dwc_tt)) - return NULL; - - /* Get the map and adjust if this is a multi_tt hub */ - map = qh->dwc_tt->periodic_bitmaps; - if (qh->dwc_tt->usb_tt->multi) - map += DWC2_ELEMENTS_PER_LS_BITMAP * qh->ttport; - - return map; -} struct dwc2_qh_print_data { struct dwc2_hsotg *hsotg; @@ -519,9 +521,6 @@ static void dwc2_qh_schedule_print(struct dwc2_hsotg *hsotg, * If we don't have tracing turned on, don't run unless the special * define is turned on. */ -#ifndef DWC2_PRINT_SCHEDULE - return; -#endif if (qh->schedule_low_speed) { unsigned long *map = dwc2_get_ls_map(hsotg, qh); @@ -559,8 +558,12 @@ static void dwc2_qh_schedule_print(struct dwc2_hsotg *hsotg, DWC2_HS_SCHEDULE_UFRAMES, "uFrame", "us", dwc2_qh_print, &print_data); } - + return; } +#else +static inline void dwc2_qh_schedule_print(struct dwc2_hsotg *hsotg, + struct dwc2_qh *qh) {}; +#endif /** * dwc2_ls_pmap_schedule() - Schedule a low speed QH @@ -1104,7 +1107,7 @@ static void dwc2_pick_first_frame(struct dwc2_hsotg *hsotg, struct dwc2_qh *qh) next_active_frame = earliest_frame; /* Get the "no microframe schduler" out of the way... */ - if (hsotg->core_params->uframe_sched <= 0) { + if (hsotg->params.uframe_sched <= 0) { if (qh->do_split) /* Splits are active at microframe 0 minus 1 */ next_active_frame |= 0x7; @@ -1197,7 +1200,7 @@ static int dwc2_do_reserve(struct dwc2_hsotg *hsotg, struct dwc2_qh *qh) { int status; - if (hsotg->core_params->uframe_sched > 0) { + if (hsotg->params.uframe_sched > 0) { status = dwc2_uframe_schedule(hsotg, qh); } else { status = dwc2_periodic_channel_available(hsotg); @@ -1218,7 +1221,7 @@ static int dwc2_do_reserve(struct dwc2_hsotg *hsotg, struct dwc2_qh *qh) return status; } - if (hsotg->core_params->uframe_sched <= 0) + if (hsotg->params.uframe_sched <= 0) /* Reserve periodic channel */ hsotg->periodic_channels++; @@ -1254,7 +1257,7 @@ static void dwc2_do_unreserve(struct dwc2_hsotg *hsotg, struct dwc2_qh *qh) /* Update claimed usecs per (micro)frame */ hsotg->periodic_usecs -= qh->host_us; - if (hsotg->core_params->uframe_sched > 0) { + if (hsotg->params.uframe_sched > 0) { dwc2_uframe_unschedule(hsotg, qh); } else { /* Release periodic channel reservation */ @@ -1328,7 +1331,7 @@ static int dwc2_check_max_xfer_size(struct dwc2_hsotg *hsotg, int status = 0; max_xfer_size = dwc2_max_packet(qh->maxp) * dwc2_hb_mult(qh->maxp); - max_channel_xfer_size = hsotg->core_params->max_transfer_size; + max_channel_xfer_size = hsotg->params.max_transfer_size; if (max_xfer_size > max_channel_xfer_size) { dev_err(hsotg->dev, @@ -1391,7 +1394,7 @@ static int dwc2_schedule_periodic(struct dwc2_hsotg *hsotg, struct dwc2_qh *qh) qh->unreserve_pending = 0; - if (hsotg->core_params->dma_desc_enable > 0) + if (hsotg->params.dma_desc_enable > 0) /* Don't rely on SOF and start in ready schedule */ list_add_tail(&qh->qh_list_entry, &hsotg->periodic_sched_ready); else @@ -1599,7 +1602,7 @@ struct dwc2_qh *dwc2_hcd_qh_create(struct dwc2_hsotg *hsotg, dwc2_qh_init(hsotg, qh, urb, mem_flags); - if (hsotg->core_params->dma_desc_enable > 0 && + if (hsotg->params.dma_desc_enable > 0 && dwc2_hcd_qh_init_ddma(hsotg, qh, mem_flags) < 0) { dwc2_hcd_qh_free(hsotg, qh); return NULL; @@ -1711,7 +1714,7 @@ void dwc2_hcd_qh_unlink(struct dwc2_hsotg *hsotg, struct dwc2_qh *qh) dwc2_deschedule_periodic(hsotg, qh); hsotg->periodic_qh_count--; if (!hsotg->periodic_qh_count && - hsotg->core_params->dma_desc_enable <= 0) { + hsotg->params.dma_desc_enable <= 0) { intr_mask = dwc2_readl(hsotg->regs + GINTMSK); intr_mask &= ~GINTSTS_SOF; dwc2_writel(intr_mask, hsotg->regs + GINTMSK); diff --git a/drivers/usb/dwc2/hw.h b/drivers/usb/dwc2/hw.h index 91058441e62a..5be056b39e5c 100644 --- a/drivers/usb/dwc2/hw.h +++ b/drivers/usb/dwc2/hw.h @@ -412,6 +412,7 @@ /* Device mode registers */ #define DCFG HSOTG_REG(0x800) +#define DCFG_DESCDMA_EN (1 << 23) #define DCFG_EPMISCNT_MASK (0x1f << 18) #define DCFG_EPMISCNT_SHIFT 18 #define DCFG_EPMISCNT_LIMIT 0x1f @@ -473,6 +474,7 @@ #define DIEPMSK_XFERCOMPLMSK (1 << 0) #define DOEPMSK HSOTG_REG(0x814) +#define DOEPMSK_BNAMSK (1 << 9) #define DOEPMSK_BACK2BACKSETUP (1 << 6) #define DOEPMSK_STSPHSERCVDMSK (1 << 5) #define DOEPMSK_OUTTKNEPDISMSK (1 << 4) @@ -790,7 +792,8 @@ #define HCFIFO(_ch) HSOTG_REG(0x1000 + 0x1000 * (_ch)) /** - * struct dwc2_hcd_dma_desc - Host-mode DMA descriptor structure + * struct dwc2_dma_desc - DMA descriptor structure, + * used for both host and gadget modes * * @status: DMA descriptor status quadlet * @buf: DMA descriptor data buffer pointer @@ -798,10 +801,12 @@ * DMA Descriptor structure contains two quadlets: * Status quadlet and Data buffer pointer. */ -struct dwc2_hcd_dma_desc { +struct dwc2_dma_desc { u32 status; u32 buf; -}; +} __packed; + +/* Host Mode DMA descriptor status quadlet */ #define HOST_DMA_A (1 << 31) #define HOST_DMA_STS_MASK (0x3 << 28) @@ -817,8 +822,43 @@ struct dwc2_hcd_dma_desc { #define HOST_DMA_ISOC_NBYTES_SHIFT 0 #define HOST_DMA_NBYTES_MASK (0x1ffff << 0) #define HOST_DMA_NBYTES_SHIFT 0 +#define HOST_DMA_NBYTES_LIMIT 131071 + +/* Device Mode DMA descriptor status quadlet */ + +#define DEV_DMA_BUFF_STS_MASK (0x3 << 30) +#define DEV_DMA_BUFF_STS_SHIFT 30 +#define DEV_DMA_BUFF_STS_HREADY 0 +#define DEV_DMA_BUFF_STS_DMABUSY 1 +#define DEV_DMA_BUFF_STS_DMADONE 2 +#define DEV_DMA_BUFF_STS_HBUSY 3 +#define DEV_DMA_STS_MASK (0x3 << 28) +#define DEV_DMA_STS_SHIFT 28 +#define DEV_DMA_STS_SUCC 0 +#define DEV_DMA_STS_BUFF_FLUSH 1 +#define DEV_DMA_STS_BUFF_ERR 3 +#define DEV_DMA_L (1 << 27) +#define DEV_DMA_SHORT (1 << 26) +#define DEV_DMA_IOC (1 << 25) +#define DEV_DMA_SR (1 << 24) +#define DEV_DMA_MTRF (1 << 23) +#define DEV_DMA_ISOC_PID_MASK (0x3 << 23) +#define DEV_DMA_ISOC_PID_SHIFT 23 +#define DEV_DMA_ISOC_PID_DATA0 0 +#define DEV_DMA_ISOC_PID_DATA2 1 +#define DEV_DMA_ISOC_PID_DATA1 2 +#define DEV_DMA_ISOC_PID_MDATA 3 +#define DEV_DMA_ISOC_FRNUM_MASK (0x7ff << 12) +#define DEV_DMA_ISOC_FRNUM_SHIFT 12 +#define DEV_DMA_ISOC_TX_NBYTES_MASK (0xfff << 0) +#define DEV_DMA_ISOC_TX_NBYTES_LIMIT 0xfff +#define DEV_DMA_ISOC_RX_NBYTES_MASK (0x7ff << 0) +#define DEV_DMA_ISOC_RX_NBYTES_LIMIT 0x7ff +#define DEV_DMA_ISOC_NBYTES_SHIFT 0 +#define DEV_DMA_NBYTES_MASK (0xffff << 0) +#define DEV_DMA_NBYTES_SHIFT 0 +#define DEV_DMA_NBYTES_LIMIT 0xffff -#define MAX_DMA_DESC_SIZE 131071 #define MAX_DMA_DESC_NUM_GENERIC 64 #define MAX_DMA_DESC_NUM_HS_ISOC 256 diff --git a/drivers/usb/dwc2/params.c b/drivers/usb/dwc2/params.c new file mode 100644 index 000000000000..a786256535b6 --- /dev/null +++ b/drivers/usb/dwc2/params.c @@ -0,0 +1,1435 @@ +/* + * Copyright (C) 2004-2016 Synopsys, Inc. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions, and the following disclaimer, + * without modification. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. The names of the above-listed copyright holders may not be used + * to endorse or promote products derived from this software without + * specific prior written permission. + * + * ALTERNATIVELY, this software may be distributed under the terms of the + * GNU General Public License ("GPL") as published by the Free Software + * Foundation; either version 2 of the License, or (at your option) any + * later version. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS + * IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, + * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF + * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING + * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/of_device.h> + +#include "core.h" + +static const struct dwc2_core_params params_hi6220 = { + .otg_cap = 2, /* No HNP/SRP capable */ + .otg_ver = 0, /* 1.3 */ + .dma_desc_enable = 0, + .dma_desc_fs_enable = 0, + .speed = 0, /* High Speed */ + .enable_dynamic_fifo = 1, + .en_multiple_tx_fifo = 1, + .host_rx_fifo_size = 512, + .host_nperio_tx_fifo_size = 512, + .host_perio_tx_fifo_size = 512, + .max_transfer_size = 65535, + .max_packet_count = 511, + .host_channels = 16, + .phy_type = 1, /* UTMI */ + .phy_utmi_width = 8, + .phy_ulpi_ddr = 0, /* Single */ + .phy_ulpi_ext_vbus = 0, + .i2c_enable = 0, + .ulpi_fs_ls = 0, + .host_support_fs_ls_low_power = 0, + .host_ls_low_power_phy_clk = 0, /* 48 MHz */ + .ts_dline = 0, + .reload_ctl = 0, + .ahbcfg = GAHBCFG_HBSTLEN_INCR16 << + GAHBCFG_HBSTLEN_SHIFT, + .uframe_sched = 0, + .external_id_pin_ctl = -1, + .hibernation = -1, +}; + +static const struct dwc2_core_params params_bcm2835 = { + .otg_cap = 0, /* HNP/SRP capable */ + .otg_ver = 0, /* 1.3 */ + .dma_desc_enable = 0, + .dma_desc_fs_enable = 0, + .speed = 0, /* High Speed */ + .enable_dynamic_fifo = 1, + .en_multiple_tx_fifo = 1, + .host_rx_fifo_size = 774, /* 774 DWORDs */ + .host_nperio_tx_fifo_size = 256, /* 256 DWORDs */ + .host_perio_tx_fifo_size = 512, /* 512 DWORDs */ + .max_transfer_size = 65535, + .max_packet_count = 511, + .host_channels = 8, + .phy_type = 1, /* UTMI */ + .phy_utmi_width = 8, /* 8 bits */ + .phy_ulpi_ddr = 0, /* Single */ + .phy_ulpi_ext_vbus = 0, + .i2c_enable = 0, + .ulpi_fs_ls = 0, + .host_support_fs_ls_low_power = 0, + .host_ls_low_power_phy_clk = 0, /* 48 MHz */ + .ts_dline = 0, + .reload_ctl = 0, + .ahbcfg = 0x10, + .uframe_sched = 0, + .external_id_pin_ctl = -1, + .hibernation = -1, +}; + +static const struct dwc2_core_params params_rk3066 = { + .otg_cap = 2, /* non-HNP/non-SRP */ + .otg_ver = -1, + .dma_desc_enable = 0, + .dma_desc_fs_enable = 0, + .speed = -1, + .enable_dynamic_fifo = 1, + .en_multiple_tx_fifo = -1, + .host_rx_fifo_size = 525, /* 525 DWORDs */ + .host_nperio_tx_fifo_size = 128, /* 128 DWORDs */ + .host_perio_tx_fifo_size = 256, /* 256 DWORDs */ + .max_transfer_size = -1, + .max_packet_count = -1, + .host_channels = -1, + .phy_type = -1, + .phy_utmi_width = -1, + .phy_ulpi_ddr = -1, + .phy_ulpi_ext_vbus = -1, + .i2c_enable = -1, + .ulpi_fs_ls = -1, + .host_support_fs_ls_low_power = -1, + .host_ls_low_power_phy_clk = -1, + .ts_dline = -1, + .reload_ctl = -1, + .ahbcfg = GAHBCFG_HBSTLEN_INCR16 << + GAHBCFG_HBSTLEN_SHIFT, + .uframe_sched = -1, + .external_id_pin_ctl = -1, + .hibernation = -1, +}; + +static const struct dwc2_core_params params_ltq = { + .otg_cap = 2, /* non-HNP/non-SRP */ + .otg_ver = -1, + .dma_desc_enable = -1, + .dma_desc_fs_enable = -1, + .speed = -1, + .enable_dynamic_fifo = -1, + .en_multiple_tx_fifo = -1, + .host_rx_fifo_size = 288, /* 288 DWORDs */ + .host_nperio_tx_fifo_size = 128, /* 128 DWORDs */ + .host_perio_tx_fifo_size = 96, /* 96 DWORDs */ + .max_transfer_size = 65535, + .max_packet_count = 511, + .host_channels = -1, + .phy_type = -1, + .phy_utmi_width = -1, + .phy_ulpi_ddr = -1, + .phy_ulpi_ext_vbus = -1, + .i2c_enable = -1, + .ulpi_fs_ls = -1, + .host_support_fs_ls_low_power = -1, + .host_ls_low_power_phy_clk = -1, + .ts_dline = -1, + .reload_ctl = -1, + .ahbcfg = GAHBCFG_HBSTLEN_INCR16 << + GAHBCFG_HBSTLEN_SHIFT, + .uframe_sched = -1, + .external_id_pin_ctl = -1, + .hibernation = -1, +}; + +static const struct dwc2_core_params params_amlogic = { + .otg_cap = DWC2_CAP_PARAM_NO_HNP_SRP_CAPABLE, + .otg_ver = -1, + .dma_desc_enable = 0, + .dma_desc_fs_enable = 0, + .speed = DWC2_SPEED_PARAM_HIGH, + .enable_dynamic_fifo = 1, + .en_multiple_tx_fifo = -1, + .host_rx_fifo_size = 512, + .host_nperio_tx_fifo_size = 500, + .host_perio_tx_fifo_size = 500, + .max_transfer_size = -1, + .max_packet_count = -1, + .host_channels = 16, + .phy_type = DWC2_PHY_TYPE_PARAM_UTMI, + .phy_utmi_width = -1, + .phy_ulpi_ddr = -1, + .phy_ulpi_ext_vbus = -1, + .i2c_enable = -1, + .ulpi_fs_ls = -1, + .host_support_fs_ls_low_power = -1, + .host_ls_low_power_phy_clk = -1, + .ts_dline = -1, + .reload_ctl = 1, + .ahbcfg = GAHBCFG_HBSTLEN_INCR8 << + GAHBCFG_HBSTLEN_SHIFT, + .uframe_sched = 0, + .external_id_pin_ctl = -1, + .hibernation = -1, +}; + +static const struct dwc2_core_params params_default = { + .otg_cap = -1, + .otg_ver = -1, + + /* + * Disable descriptor dma mode by default as the HW can support + * it, but does not support it for SPLIT transactions. + * Disable it for FS devices as well. + */ + .dma_desc_enable = 0, + .dma_desc_fs_enable = 0, + + .speed = -1, + .enable_dynamic_fifo = -1, + .en_multiple_tx_fifo = -1, + .host_rx_fifo_size = -1, + .host_nperio_tx_fifo_size = -1, + .host_perio_tx_fifo_size = -1, + .max_transfer_size = -1, + .max_packet_count = -1, + .host_channels = -1, + .phy_type = -1, + .phy_utmi_width = -1, + .phy_ulpi_ddr = -1, + .phy_ulpi_ext_vbus = -1, + .i2c_enable = -1, + .ulpi_fs_ls = -1, + .host_support_fs_ls_low_power = -1, + .host_ls_low_power_phy_clk = -1, + .ts_dline = -1, + .reload_ctl = -1, + .ahbcfg = -1, + .uframe_sched = -1, + .external_id_pin_ctl = -1, + .hibernation = -1, +}; + +const struct of_device_id dwc2_of_match_table[] = { + { .compatible = "brcm,bcm2835-usb", .data = ¶ms_bcm2835 }, + { .compatible = "hisilicon,hi6220-usb", .data = ¶ms_hi6220 }, + { .compatible = "rockchip,rk3066-usb", .data = ¶ms_rk3066 }, + { .compatible = "lantiq,arx100-usb", .data = ¶ms_ltq }, + { .compatible = "lantiq,xrx200-usb", .data = ¶ms_ltq }, + { .compatible = "snps,dwc2", .data = NULL }, + { .compatible = "samsung,s3c6400-hsotg", .data = NULL}, + { .compatible = "amlogic,meson8b-usb", .data = ¶ms_amlogic }, + { .compatible = "amlogic,meson-gxbb-usb", .data = ¶ms_amlogic }, + { .compatible = "amcc,dwc-otg", .data = NULL }, + {}, +}; +MODULE_DEVICE_TABLE(of, dwc2_of_match_table); + +static void dwc2_get_device_property(struct dwc2_hsotg *hsotg, + char *property, u8 size, u64 *value) +{ + u8 val8; + u16 val16; + u32 val32; + + switch (size) { + case 0: + *value = device_property_read_bool(hsotg->dev, property); + break; + case 1: + if (device_property_read_u8(hsotg->dev, property, &val8)) + return; + + *value = val8; + break; + case 2: + if (device_property_read_u16(hsotg->dev, property, &val16)) + return; + + *value = val16; + break; + case 4: + if (device_property_read_u32(hsotg->dev, property, &val32)) + return; + + *value = val32; + break; + case 8: + if (device_property_read_u64(hsotg->dev, property, value)) + return; + + break; + default: + /* + * The size is checked by the only function that calls + * this so this should never happen. + */ + WARN_ON(1); + return; + } +} + +static void dwc2_set_core_param(void *param, u8 size, u64 value) +{ + switch (size) { + case 0: + *((bool *)param) = !!value; + break; + case 1: + *((u8 *)param) = (u8)value; + break; + case 2: + *((u16 *)param) = (u16)value; + break; + case 4: + *((u32 *)param) = (u32)value; + break; + case 8: + *((u64 *)param) = (u64)value; + break; + default: + /* + * The size is checked by the only function that calls + * this so this should never happen. + */ + WARN_ON(1); + return; + } +} + +/** + * dwc2_set_param() - Set a core parameter + * + * @hsotg: Programming view of the DWC_otg controller + * @param: Pointer to the parameter to set + * @lookup: True if the property should be looked up + * @property: The device property to read + * @legacy: The param value to set if @property is not available. This + * will typically be the legacy value set in the static + * params structure. + * @def: The default value + * @min: The minimum value + * @max: The maximum value + * @size: The size of the core parameter in bytes, or 0 for bool. + * + * This function looks up @property and sets the @param to that value. + * If the property doesn't exist it uses the passed-in @value. It will + * verify that the value falls between @min and @max. If it doesn't, + * it will output an error and set the parameter to either @def or, + * failing that, to @min. + * + * The @size is used to write to @param and to query the device + * properties so that this same function can be used with different + * types of parameters. + */ +static void dwc2_set_param(struct dwc2_hsotg *hsotg, void *param, + bool lookup, char *property, u64 legacy, + u64 def, u64 min, u64 max, u8 size) +{ + u64 sizemax; + u64 value; + + if (WARN_ON(!hsotg || !param || !property)) + return; + + if (WARN((size > 8) || ((size & (size - 1)) != 0), + "Invalid size %d for %s\n", size, property)) + return; + + dev_vdbg(hsotg->dev, "%s: Setting %s: legacy=%llu, def=%llu, min=%llu, max=%llu, size=%d\n", + __func__, property, legacy, def, min, max, size); + + sizemax = (1ULL << (size * 8)) - 1; + value = legacy; + + /* Override legacy settings. */ + if (lookup) + dwc2_get_device_property(hsotg, property, size, &value); + + /* + * While the value is not valid, try setting it to the default + * value, and failing that, set it to the minimum. + */ + while ((value < min) || (value > max)) { + /* Print an error unless the value is set to auto. */ + if (value != sizemax) + dev_err(hsotg->dev, "Invalid value %llu for param %s\n", + value, property); + + /* + * If we are already the default, just set it to the + * minimum. + */ + if (value == def) { + dev_vdbg(hsotg->dev, "%s: setting value to min=%llu\n", + __func__, min); + value = min; + break; + } + + /* Try the default value */ + dev_vdbg(hsotg->dev, "%s: setting value to default=%llu\n", + __func__, def); + value = def; + } + + dev_dbg(hsotg->dev, "Setting %s to %llu\n", property, value); + dwc2_set_core_param(param, size, value); +} + +/** + * dwc2_set_param_u16() - Set a u16 parameter + * + * See dwc2_set_param(). + */ +static void dwc2_set_param_u16(struct dwc2_hsotg *hsotg, u16 *param, + bool lookup, char *property, u16 legacy, + u16 def, u16 min, u16 max) +{ + dwc2_set_param(hsotg, param, lookup, property, + legacy, def, min, max, 2); +} + +/** + * dwc2_set_param_bool() - Set a bool parameter + * + * See dwc2_set_param(). + * + * Note: there is no 'legacy' argument here because there is no legacy + * source of bool params. + */ +static void dwc2_set_param_bool(struct dwc2_hsotg *hsotg, bool *param, + bool lookup, char *property, + bool def, bool min, bool max) +{ + dwc2_set_param(hsotg, param, lookup, property, + def, def, min, max, 0); +} + +#define DWC2_OUT_OF_BOUNDS(a, b, c) ((a) < (b) || (a) > (c)) + +/* Parameter access functions */ +static void dwc2_set_param_otg_cap(struct dwc2_hsotg *hsotg, int val) +{ + int valid = 1; + + switch (val) { + case DWC2_CAP_PARAM_HNP_SRP_CAPABLE: + if (hsotg->hw_params.op_mode != GHWCFG2_OP_MODE_HNP_SRP_CAPABLE) + valid = 0; + break; + case DWC2_CAP_PARAM_SRP_ONLY_CAPABLE: + switch (hsotg->hw_params.op_mode) { + case GHWCFG2_OP_MODE_HNP_SRP_CAPABLE: + case GHWCFG2_OP_MODE_SRP_ONLY_CAPABLE: + case GHWCFG2_OP_MODE_SRP_CAPABLE_DEVICE: + case GHWCFG2_OP_MODE_SRP_CAPABLE_HOST: + break; + default: + valid = 0; + break; + } + break; + case DWC2_CAP_PARAM_NO_HNP_SRP_CAPABLE: + /* always valid */ + break; + default: + valid = 0; + break; + } + + if (!valid) { + if (val >= 0) + dev_err(hsotg->dev, + "%d invalid for otg_cap parameter. Check HW configuration.\n", + val); + switch (hsotg->hw_params.op_mode) { + case GHWCFG2_OP_MODE_HNP_SRP_CAPABLE: + val = DWC2_CAP_PARAM_HNP_SRP_CAPABLE; + break; + case GHWCFG2_OP_MODE_SRP_ONLY_CAPABLE: + case GHWCFG2_OP_MODE_SRP_CAPABLE_DEVICE: + case GHWCFG2_OP_MODE_SRP_CAPABLE_HOST: + val = DWC2_CAP_PARAM_SRP_ONLY_CAPABLE; + break; + default: + val = DWC2_CAP_PARAM_NO_HNP_SRP_CAPABLE; + break; + } + dev_dbg(hsotg->dev, "Setting otg_cap to %d\n", val); + } + + hsotg->params.otg_cap = val; +} + +static void dwc2_set_param_dma_desc_enable(struct dwc2_hsotg *hsotg, int val) +{ + int valid = 1; + + if (val > 0 && (hsotg->params.host_dma <= 0 || + !hsotg->hw_params.dma_desc_enable)) + valid = 0; + if (val < 0) + valid = 0; + + if (!valid) { + if (val >= 0) + dev_err(hsotg->dev, + "%d invalid for dma_desc_enable parameter. Check HW configuration.\n", + val); + val = (hsotg->params.host_dma > 0 && + hsotg->hw_params.dma_desc_enable); + dev_dbg(hsotg->dev, "Setting dma_desc_enable to %d\n", val); + } + + hsotg->params.dma_desc_enable = val; +} + +static void dwc2_set_param_dma_desc_fs_enable(struct dwc2_hsotg *hsotg, int val) +{ + int valid = 1; + + if (val > 0 && (hsotg->params.host_dma <= 0 || + !hsotg->hw_params.dma_desc_enable)) + valid = 0; + if (val < 0) + valid = 0; + + if (!valid) { + if (val >= 0) + dev_err(hsotg->dev, + "%d invalid for dma_desc_fs_enable parameter. Check HW configuration.\n", + val); + val = (hsotg->params.host_dma > 0 && + hsotg->hw_params.dma_desc_enable); + } + + hsotg->params.dma_desc_fs_enable = val; + dev_dbg(hsotg->dev, "Setting dma_desc_fs_enable to %d\n", val); +} + +static void +dwc2_set_param_host_support_fs_ls_low_power(struct dwc2_hsotg *hsotg, + int val) +{ + if (DWC2_OUT_OF_BOUNDS(val, 0, 1)) { + if (val >= 0) { + dev_err(hsotg->dev, + "Wrong value for host_support_fs_low_power\n"); + dev_err(hsotg->dev, + "host_support_fs_low_power must be 0 or 1\n"); + } + val = 0; + dev_dbg(hsotg->dev, + "Setting host_support_fs_low_power to %d\n", val); + } + + hsotg->params.host_support_fs_ls_low_power = val; +} + +static void dwc2_set_param_enable_dynamic_fifo(struct dwc2_hsotg *hsotg, + int val) +{ + int valid = 1; + + if (val > 0 && !hsotg->hw_params.enable_dynamic_fifo) + valid = 0; + if (val < 0) + valid = 0; + + if (!valid) { + if (val >= 0) + dev_err(hsotg->dev, + "%d invalid for enable_dynamic_fifo parameter. Check HW configuration.\n", + val); + val = hsotg->hw_params.enable_dynamic_fifo; + dev_dbg(hsotg->dev, "Setting enable_dynamic_fifo to %d\n", val); + } + + hsotg->params.enable_dynamic_fifo = val; +} + +static void dwc2_set_param_host_rx_fifo_size(struct dwc2_hsotg *hsotg, int val) +{ + int valid = 1; + + if (val < 16 || val > hsotg->hw_params.rx_fifo_size) + valid = 0; + + if (!valid) { + if (val >= 0) + dev_err(hsotg->dev, + "%d invalid for host_rx_fifo_size. Check HW configuration.\n", + val); + val = hsotg->hw_params.rx_fifo_size; + dev_dbg(hsotg->dev, "Setting host_rx_fifo_size to %d\n", val); + } + + hsotg->params.host_rx_fifo_size = val; +} + +static void dwc2_set_param_host_nperio_tx_fifo_size(struct dwc2_hsotg *hsotg, + int val) +{ + int valid = 1; + + if (val < 16 || val > hsotg->hw_params.host_nperio_tx_fifo_size) + valid = 0; + + if (!valid) { + if (val >= 0) + dev_err(hsotg->dev, + "%d invalid for host_nperio_tx_fifo_size. Check HW configuration.\n", + val); + val = hsotg->hw_params.host_nperio_tx_fifo_size; + dev_dbg(hsotg->dev, "Setting host_nperio_tx_fifo_size to %d\n", + val); + } + + hsotg->params.host_nperio_tx_fifo_size = val; +} + +static void dwc2_set_param_host_perio_tx_fifo_size(struct dwc2_hsotg *hsotg, + int val) +{ + int valid = 1; + + if (val < 16 || val > hsotg->hw_params.host_perio_tx_fifo_size) + valid = 0; + + if (!valid) { + if (val >= 0) + dev_err(hsotg->dev, + "%d invalid for host_perio_tx_fifo_size. Check HW configuration.\n", + val); + val = hsotg->hw_params.host_perio_tx_fifo_size; + dev_dbg(hsotg->dev, "Setting host_perio_tx_fifo_size to %d\n", + val); + } + + hsotg->params.host_perio_tx_fifo_size = val; +} + +static void dwc2_set_param_max_transfer_size(struct dwc2_hsotg *hsotg, int val) +{ + int valid = 1; + + if (val < 2047 || val > hsotg->hw_params.max_transfer_size) + valid = 0; + + if (!valid) { + if (val >= 0) + dev_err(hsotg->dev, + "%d invalid for max_transfer_size. Check HW configuration.\n", + val); + val = hsotg->hw_params.max_transfer_size; + dev_dbg(hsotg->dev, "Setting max_transfer_size to %d\n", val); + } + + hsotg->params.max_transfer_size = val; +} + +static void dwc2_set_param_max_packet_count(struct dwc2_hsotg *hsotg, int val) +{ + int valid = 1; + + if (val < 15 || val > hsotg->hw_params.max_packet_count) + valid = 0; + + if (!valid) { + if (val >= 0) + dev_err(hsotg->dev, + "%d invalid for max_packet_count. Check HW configuration.\n", + val); + val = hsotg->hw_params.max_packet_count; + dev_dbg(hsotg->dev, "Setting max_packet_count to %d\n", val); + } + + hsotg->params.max_packet_count = val; +} + +static void dwc2_set_param_host_channels(struct dwc2_hsotg *hsotg, int val) +{ + int valid = 1; + + if (val < 1 || val > hsotg->hw_params.host_channels) + valid = 0; + + if (!valid) { + if (val >= 0) + dev_err(hsotg->dev, + "%d invalid for host_channels. Check HW configuration.\n", + val); + val = hsotg->hw_params.host_channels; + dev_dbg(hsotg->dev, "Setting host_channels to %d\n", val); + } + + hsotg->params.host_channels = val; +} + +static void dwc2_set_param_phy_type(struct dwc2_hsotg *hsotg, int val) +{ + int valid = 0; + u32 hs_phy_type, fs_phy_type; + + if (DWC2_OUT_OF_BOUNDS(val, DWC2_PHY_TYPE_PARAM_FS, + DWC2_PHY_TYPE_PARAM_ULPI)) { + if (val >= 0) { + dev_err(hsotg->dev, "Wrong value for phy_type\n"); + dev_err(hsotg->dev, "phy_type must be 0, 1 or 2\n"); + } + + valid = 0; + } + + hs_phy_type = hsotg->hw_params.hs_phy_type; + fs_phy_type = hsotg->hw_params.fs_phy_type; + if (val == DWC2_PHY_TYPE_PARAM_UTMI && + (hs_phy_type == GHWCFG2_HS_PHY_TYPE_UTMI || + hs_phy_type == GHWCFG2_HS_PHY_TYPE_UTMI_ULPI)) + valid = 1; + else if (val == DWC2_PHY_TYPE_PARAM_ULPI && + (hs_phy_type == GHWCFG2_HS_PHY_TYPE_ULPI || + hs_phy_type == GHWCFG2_HS_PHY_TYPE_UTMI_ULPI)) + valid = 1; + else if (val == DWC2_PHY_TYPE_PARAM_FS && + fs_phy_type == GHWCFG2_FS_PHY_TYPE_DEDICATED) + valid = 1; + + if (!valid) { + if (val >= 0) + dev_err(hsotg->dev, + "%d invalid for phy_type. Check HW configuration.\n", + val); + val = DWC2_PHY_TYPE_PARAM_FS; + if (hs_phy_type != GHWCFG2_HS_PHY_TYPE_NOT_SUPPORTED) { + if (hs_phy_type == GHWCFG2_HS_PHY_TYPE_UTMI || + hs_phy_type == GHWCFG2_HS_PHY_TYPE_UTMI_ULPI) + val = DWC2_PHY_TYPE_PARAM_UTMI; + else + val = DWC2_PHY_TYPE_PARAM_ULPI; + } + dev_dbg(hsotg->dev, "Setting phy_type to %d\n", val); + } + + hsotg->params.phy_type = val; +} + +static int dwc2_get_param_phy_type(struct dwc2_hsotg *hsotg) +{ + return hsotg->params.phy_type; +} + +static void dwc2_set_param_speed(struct dwc2_hsotg *hsotg, int val) +{ + int valid = 1; + + if (DWC2_OUT_OF_BOUNDS(val, 0, 2)) { + if (val >= 0) { + dev_err(hsotg->dev, "Wrong value for speed parameter\n"); + dev_err(hsotg->dev, "max_speed parameter must be 0, 1, or 2\n"); + } + valid = 0; + } + + if (dwc2_is_hs_iot(hsotg) && + val == DWC2_SPEED_PARAM_LOW) + valid = 0; + + if (val == DWC2_SPEED_PARAM_HIGH && + dwc2_get_param_phy_type(hsotg) == DWC2_PHY_TYPE_PARAM_FS) + valid = 0; + + if (!valid) { + if (val >= 0) + dev_err(hsotg->dev, + "%d invalid for speed parameter. Check HW configuration.\n", + val); + val = dwc2_get_param_phy_type(hsotg) == DWC2_PHY_TYPE_PARAM_FS ? + DWC2_SPEED_PARAM_FULL : DWC2_SPEED_PARAM_HIGH; + dev_dbg(hsotg->dev, "Setting speed to %d\n", val); + } + + hsotg->params.speed = val; +} + +static void dwc2_set_param_host_ls_low_power_phy_clk(struct dwc2_hsotg *hsotg, + int val) +{ + int valid = 1; + + if (DWC2_OUT_OF_BOUNDS(val, DWC2_HOST_LS_LOW_POWER_PHY_CLK_PARAM_48MHZ, + DWC2_HOST_LS_LOW_POWER_PHY_CLK_PARAM_6MHZ)) { + if (val >= 0) { + dev_err(hsotg->dev, + "Wrong value for host_ls_low_power_phy_clk parameter\n"); + dev_err(hsotg->dev, + "host_ls_low_power_phy_clk must be 0 or 1\n"); + } + valid = 0; + } + + if (val == DWC2_HOST_LS_LOW_POWER_PHY_CLK_PARAM_48MHZ && + dwc2_get_param_phy_type(hsotg) == DWC2_PHY_TYPE_PARAM_FS) + valid = 0; + + if (!valid) { + if (val >= 0) + dev_err(hsotg->dev, + "%d invalid for host_ls_low_power_phy_clk. Check HW configuration.\n", + val); + val = dwc2_get_param_phy_type(hsotg) == DWC2_PHY_TYPE_PARAM_FS + ? DWC2_HOST_LS_LOW_POWER_PHY_CLK_PARAM_6MHZ + : DWC2_HOST_LS_LOW_POWER_PHY_CLK_PARAM_48MHZ; + dev_dbg(hsotg->dev, "Setting host_ls_low_power_phy_clk to %d\n", + val); + } + + hsotg->params.host_ls_low_power_phy_clk = val; +} + +static void dwc2_set_param_phy_ulpi_ddr(struct dwc2_hsotg *hsotg, int val) +{ + if (DWC2_OUT_OF_BOUNDS(val, 0, 1)) { + if (val >= 0) { + dev_err(hsotg->dev, "Wrong value for phy_ulpi_ddr\n"); + dev_err(hsotg->dev, "phy_upli_ddr must be 0 or 1\n"); + } + val = 0; + dev_dbg(hsotg->dev, "Setting phy_upli_ddr to %d\n", val); + } + + hsotg->params.phy_ulpi_ddr = val; +} + +static void dwc2_set_param_phy_ulpi_ext_vbus(struct dwc2_hsotg *hsotg, int val) +{ + if (DWC2_OUT_OF_BOUNDS(val, 0, 1)) { + if (val >= 0) { + dev_err(hsotg->dev, + "Wrong value for phy_ulpi_ext_vbus\n"); + dev_err(hsotg->dev, + "phy_ulpi_ext_vbus must be 0 or 1\n"); + } + val = 0; + dev_dbg(hsotg->dev, "Setting phy_ulpi_ext_vbus to %d\n", val); + } + + hsotg->params.phy_ulpi_ext_vbus = val; +} + +static void dwc2_set_param_phy_utmi_width(struct dwc2_hsotg *hsotg, int val) +{ + int valid = 0; + + switch (hsotg->hw_params.utmi_phy_data_width) { + case GHWCFG4_UTMI_PHY_DATA_WIDTH_8: + valid = (val == 8); + break; + case GHWCFG4_UTMI_PHY_DATA_WIDTH_16: + valid = (val == 16); + break; + case GHWCFG4_UTMI_PHY_DATA_WIDTH_8_OR_16: + valid = (val == 8 || val == 16); + break; + } + + if (!valid) { + if (val >= 0) { + dev_err(hsotg->dev, + "%d invalid for phy_utmi_width. Check HW configuration.\n", + val); + } + val = (hsotg->hw_params.utmi_phy_data_width == + GHWCFG4_UTMI_PHY_DATA_WIDTH_8) ? 8 : 16; + dev_dbg(hsotg->dev, "Setting phy_utmi_width to %d\n", val); + } + + hsotg->params.phy_utmi_width = val; +} + +static void dwc2_set_param_ulpi_fs_ls(struct dwc2_hsotg *hsotg, int val) +{ + if (DWC2_OUT_OF_BOUNDS(val, 0, 1)) { + if (val >= 0) { + dev_err(hsotg->dev, "Wrong value for ulpi_fs_ls\n"); + dev_err(hsotg->dev, "ulpi_fs_ls must be 0 or 1\n"); + } + val = 0; + dev_dbg(hsotg->dev, "Setting ulpi_fs_ls to %d\n", val); + } + + hsotg->params.ulpi_fs_ls = val; +} + +static void dwc2_set_param_ts_dline(struct dwc2_hsotg *hsotg, int val) +{ + if (DWC2_OUT_OF_BOUNDS(val, 0, 1)) { + if (val >= 0) { + dev_err(hsotg->dev, "Wrong value for ts_dline\n"); + dev_err(hsotg->dev, "ts_dline must be 0 or 1\n"); + } + val = 0; + dev_dbg(hsotg->dev, "Setting ts_dline to %d\n", val); + } + + hsotg->params.ts_dline = val; +} + +static void dwc2_set_param_i2c_enable(struct dwc2_hsotg *hsotg, int val) +{ + int valid = 1; + + if (DWC2_OUT_OF_BOUNDS(val, 0, 1)) { + if (val >= 0) { + dev_err(hsotg->dev, "Wrong value for i2c_enable\n"); + dev_err(hsotg->dev, "i2c_enable must be 0 or 1\n"); + } + + valid = 0; + } + + if (val == 1 && !(hsotg->hw_params.i2c_enable)) + valid = 0; + + if (!valid) { + if (val >= 0) + dev_err(hsotg->dev, + "%d invalid for i2c_enable. Check HW configuration.\n", + val); + val = hsotg->hw_params.i2c_enable; + dev_dbg(hsotg->dev, "Setting i2c_enable to %d\n", val); + } + + hsotg->params.i2c_enable = val; +} + +static void dwc2_set_param_en_multiple_tx_fifo(struct dwc2_hsotg *hsotg, + int val) +{ + int valid = 1; + + if (DWC2_OUT_OF_BOUNDS(val, 0, 1)) { + if (val >= 0) { + dev_err(hsotg->dev, + "Wrong value for en_multiple_tx_fifo,\n"); + dev_err(hsotg->dev, + "en_multiple_tx_fifo must be 0 or 1\n"); + } + valid = 0; + } + + if (val == 1 && !hsotg->hw_params.en_multiple_tx_fifo) + valid = 0; + + if (!valid) { + if (val >= 0) + dev_err(hsotg->dev, + "%d invalid for parameter en_multiple_tx_fifo. Check HW configuration.\n", + val); + val = hsotg->hw_params.en_multiple_tx_fifo; + dev_dbg(hsotg->dev, "Setting en_multiple_tx_fifo to %d\n", val); + } + + hsotg->params.en_multiple_tx_fifo = val; +} + +static void dwc2_set_param_reload_ctl(struct dwc2_hsotg *hsotg, int val) +{ + int valid = 1; + + if (DWC2_OUT_OF_BOUNDS(val, 0, 1)) { + if (val >= 0) { + dev_err(hsotg->dev, + "'%d' invalid for parameter reload_ctl\n", val); + dev_err(hsotg->dev, "reload_ctl must be 0 or 1\n"); + } + valid = 0; + } + + if (val == 1 && hsotg->hw_params.snpsid < DWC2_CORE_REV_2_92a) + valid = 0; + + if (!valid) { + if (val >= 0) + dev_err(hsotg->dev, + "%d invalid for parameter reload_ctl. Check HW configuration.\n", + val); + val = hsotg->hw_params.snpsid >= DWC2_CORE_REV_2_92a; + dev_dbg(hsotg->dev, "Setting reload_ctl to %d\n", val); + } + + hsotg->params.reload_ctl = val; +} + +static void dwc2_set_param_ahbcfg(struct dwc2_hsotg *hsotg, int val) +{ + if (val != -1) + hsotg->params.ahbcfg = val; + else + hsotg->params.ahbcfg = GAHBCFG_HBSTLEN_INCR4 << + GAHBCFG_HBSTLEN_SHIFT; +} + +static void dwc2_set_param_otg_ver(struct dwc2_hsotg *hsotg, int val) +{ + if (DWC2_OUT_OF_BOUNDS(val, 0, 1)) { + if (val >= 0) { + dev_err(hsotg->dev, + "'%d' invalid for parameter otg_ver\n", val); + dev_err(hsotg->dev, + "otg_ver must be 0 (for OTG 1.3 support) or 1 (for OTG 2.0 support)\n"); + } + val = 0; + dev_dbg(hsotg->dev, "Setting otg_ver to %d\n", val); + } + + hsotg->params.otg_ver = val; +} + +static void dwc2_set_param_uframe_sched(struct dwc2_hsotg *hsotg, int val) +{ + if (DWC2_OUT_OF_BOUNDS(val, 0, 1)) { + if (val >= 0) { + dev_err(hsotg->dev, + "'%d' invalid for parameter uframe_sched\n", + val); + dev_err(hsotg->dev, "uframe_sched must be 0 or 1\n"); + } + val = 1; + dev_dbg(hsotg->dev, "Setting uframe_sched to %d\n", val); + } + + hsotg->params.uframe_sched = val; +} + +static void dwc2_set_param_external_id_pin_ctl(struct dwc2_hsotg *hsotg, + int val) +{ + if (DWC2_OUT_OF_BOUNDS(val, 0, 1)) { + if (val >= 0) { + dev_err(hsotg->dev, + "'%d' invalid for parameter external_id_pin_ctl\n", + val); + dev_err(hsotg->dev, "external_id_pin_ctl must be 0 or 1\n"); + } + val = 0; + dev_dbg(hsotg->dev, "Setting external_id_pin_ctl to %d\n", val); + } + + hsotg->params.external_id_pin_ctl = val; +} + +static void dwc2_set_param_hibernation(struct dwc2_hsotg *hsotg, + int val) +{ + if (DWC2_OUT_OF_BOUNDS(val, 0, 1)) { + if (val >= 0) { + dev_err(hsotg->dev, + "'%d' invalid for parameter hibernation\n", + val); + dev_err(hsotg->dev, "hibernation must be 0 or 1\n"); + } + val = 0; + dev_dbg(hsotg->dev, "Setting hibernation to %d\n", val); + } + + hsotg->params.hibernation = val; +} + +static void dwc2_set_param_tx_fifo_sizes(struct dwc2_hsotg *hsotg) +{ + int i; + int num; + char *property = "g-tx-fifo-size"; + struct dwc2_core_params *p = &hsotg->params; + + memset(p->g_tx_fifo_size, 0, sizeof(p->g_tx_fifo_size)); + + /* Read tx fifo sizes */ + num = device_property_read_u32_array(hsotg->dev, property, NULL, 0); + + if (num > 0) { + device_property_read_u32_array(hsotg->dev, property, + &p->g_tx_fifo_size[1], + num); + } else { + u32 p_tx_fifo[] = DWC2_G_P_LEGACY_TX_FIFO_SIZE; + + memcpy(&p->g_tx_fifo_size[1], + p_tx_fifo, + sizeof(p_tx_fifo)); + + num = ARRAY_SIZE(p_tx_fifo); + } + + for (i = 0; i < num; i++) { + if ((i + 1) >= ARRAY_SIZE(p->g_tx_fifo_size)) + break; + + dev_dbg(hsotg->dev, "Setting %s[%d] to %d\n", + property, i + 1, p->g_tx_fifo_size[i + 1]); + } +} + +static void dwc2_set_gadget_dma(struct dwc2_hsotg *hsotg) +{ + struct dwc2_hw_params *hw = &hsotg->hw_params; + struct dwc2_core_params *p = &hsotg->params; + bool dma_capable = !(hw->arch == GHWCFG2_SLAVE_ONLY_ARCH); + + /* Buffer DMA */ + dwc2_set_param_bool(hsotg, &p->g_dma, + false, "gadget-dma", + true, false, + dma_capable); + + /* DMA Descriptor */ + dwc2_set_param_bool(hsotg, &p->g_dma_desc, false, + "gadget-dma-desc", + p->g_dma, false, + !!hw->dma_desc_enable); +} + +/** + * dwc2_set_parameters() - Set all core parameters. + * + * @hsotg: Programming view of the DWC_otg controller + * @params: The parameters to set + */ +static void dwc2_set_parameters(struct dwc2_hsotg *hsotg, + const struct dwc2_core_params *params) +{ + struct dwc2_hw_params *hw = &hsotg->hw_params; + struct dwc2_core_params *p = &hsotg->params; + bool dma_capable = !(hw->arch == GHWCFG2_SLAVE_ONLY_ARCH); + + dwc2_set_param_otg_cap(hsotg, params->otg_cap); + if ((hsotg->dr_mode == USB_DR_MODE_HOST) || + (hsotg->dr_mode == USB_DR_MODE_OTG)) { + dev_dbg(hsotg->dev, "Setting HOST parameters\n"); + + dwc2_set_param_bool(hsotg, &p->host_dma, + false, "host-dma", + true, false, + dma_capable); + } + dwc2_set_param_dma_desc_enable(hsotg, params->dma_desc_enable); + dwc2_set_param_dma_desc_fs_enable(hsotg, params->dma_desc_fs_enable); + + dwc2_set_param_host_support_fs_ls_low_power(hsotg, + params->host_support_fs_ls_low_power); + dwc2_set_param_enable_dynamic_fifo(hsotg, + params->enable_dynamic_fifo); + dwc2_set_param_host_rx_fifo_size(hsotg, + params->host_rx_fifo_size); + dwc2_set_param_host_nperio_tx_fifo_size(hsotg, + params->host_nperio_tx_fifo_size); + dwc2_set_param_host_perio_tx_fifo_size(hsotg, + params->host_perio_tx_fifo_size); + dwc2_set_param_max_transfer_size(hsotg, + params->max_transfer_size); + dwc2_set_param_max_packet_count(hsotg, + params->max_packet_count); + dwc2_set_param_host_channels(hsotg, params->host_channels); + dwc2_set_param_phy_type(hsotg, params->phy_type); + dwc2_set_param_speed(hsotg, params->speed); + dwc2_set_param_host_ls_low_power_phy_clk(hsotg, + params->host_ls_low_power_phy_clk); + dwc2_set_param_phy_ulpi_ddr(hsotg, params->phy_ulpi_ddr); + dwc2_set_param_phy_ulpi_ext_vbus(hsotg, + params->phy_ulpi_ext_vbus); + dwc2_set_param_phy_utmi_width(hsotg, params->phy_utmi_width); + dwc2_set_param_ulpi_fs_ls(hsotg, params->ulpi_fs_ls); + dwc2_set_param_ts_dline(hsotg, params->ts_dline); + dwc2_set_param_i2c_enable(hsotg, params->i2c_enable); + dwc2_set_param_en_multiple_tx_fifo(hsotg, + params->en_multiple_tx_fifo); + dwc2_set_param_reload_ctl(hsotg, params->reload_ctl); + dwc2_set_param_ahbcfg(hsotg, params->ahbcfg); + dwc2_set_param_otg_ver(hsotg, params->otg_ver); + dwc2_set_param_uframe_sched(hsotg, params->uframe_sched); + dwc2_set_param_external_id_pin_ctl(hsotg, params->external_id_pin_ctl); + dwc2_set_param_hibernation(hsotg, params->hibernation); + + /* + * Set devicetree-only parameters. These parameters do not + * take any values from @params. + */ + if ((hsotg->dr_mode == USB_DR_MODE_PERIPHERAL) || + (hsotg->dr_mode == USB_DR_MODE_OTG)) { + dev_dbg(hsotg->dev, "Setting peripheral device properties\n"); + + dwc2_set_gadget_dma(hsotg); + + /* + * The values for g_rx_fifo_size (2048) and + * g_np_tx_fifo_size (1024) come from the legacy s3c + * gadget driver. These defaults have been hard-coded + * for some time so many platforms depend on these + * values. Leave them as defaults for now and only + * auto-detect if the hardware does not support the + * default. + */ + dwc2_set_param_u16(hsotg, &p->g_rx_fifo_size, + true, "g-rx-fifo-size", 2048, + hw->rx_fifo_size, + 16, hw->rx_fifo_size); + + dwc2_set_param_u16(hsotg, &p->g_np_tx_fifo_size, + true, "g-np-tx-fifo-size", 1024, + hw->dev_nperio_tx_fifo_size, + 16, hw->dev_nperio_tx_fifo_size); + + dwc2_set_param_tx_fifo_sizes(hsotg); + } +} + +/* + * Gets host hardware parameters. Forces host mode if not currently in + * host mode. Should be called immediately after a core soft reset in + * order to get the reset values. + */ +static void dwc2_get_host_hwparams(struct dwc2_hsotg *hsotg) +{ + struct dwc2_hw_params *hw = &hsotg->hw_params; + u32 gnptxfsiz; + u32 hptxfsiz; + bool forced; + + if (hsotg->dr_mode == USB_DR_MODE_PERIPHERAL) + return; + + forced = dwc2_force_mode_if_needed(hsotg, true); + + gnptxfsiz = dwc2_readl(hsotg->regs + GNPTXFSIZ); + hptxfsiz = dwc2_readl(hsotg->regs + HPTXFSIZ); + dev_dbg(hsotg->dev, "gnptxfsiz=%08x\n", gnptxfsiz); + dev_dbg(hsotg->dev, "hptxfsiz=%08x\n", hptxfsiz); + + if (forced) + dwc2_clear_force_mode(hsotg); + + hw->host_nperio_tx_fifo_size = (gnptxfsiz & FIFOSIZE_DEPTH_MASK) >> + FIFOSIZE_DEPTH_SHIFT; + hw->host_perio_tx_fifo_size = (hptxfsiz & FIFOSIZE_DEPTH_MASK) >> + FIFOSIZE_DEPTH_SHIFT; +} + +/* + * Gets device hardware parameters. Forces device mode if not + * currently in device mode. Should be called immediately after a core + * soft reset in order to get the reset values. + */ +static void dwc2_get_dev_hwparams(struct dwc2_hsotg *hsotg) +{ + struct dwc2_hw_params *hw = &hsotg->hw_params; + bool forced; + u32 gnptxfsiz; + + if (hsotg->dr_mode == USB_DR_MODE_HOST) + return; + + forced = dwc2_force_mode_if_needed(hsotg, false); + + gnptxfsiz = dwc2_readl(hsotg->regs + GNPTXFSIZ); + dev_dbg(hsotg->dev, "gnptxfsiz=%08x\n", gnptxfsiz); + + if (forced) + dwc2_clear_force_mode(hsotg); + + hw->dev_nperio_tx_fifo_size = (gnptxfsiz & FIFOSIZE_DEPTH_MASK) >> + FIFOSIZE_DEPTH_SHIFT; +} + +/** + * During device initialization, read various hardware configuration + * registers and interpret the contents. + */ +int dwc2_get_hwparams(struct dwc2_hsotg *hsotg) +{ + struct dwc2_hw_params *hw = &hsotg->hw_params; + unsigned int width; + u32 hwcfg1, hwcfg2, hwcfg3, hwcfg4; + u32 grxfsiz; + + /* + * Attempt to ensure this device is really a DWC_otg Controller. + * Read and verify the GSNPSID register contents. The value should be + * 0x45f42xxx or 0x45f43xxx, which corresponds to either "OT2" or "OT3", + * as in "OTG version 2.xx" or "OTG version 3.xx". + */ + hw->snpsid = dwc2_readl(hsotg->regs + GSNPSID); + if ((hw->snpsid & 0xfffff000) != 0x4f542000 && + (hw->snpsid & 0xfffff000) != 0x4f543000 && + (hw->snpsid & 0xffff0000) != 0x55310000 && + (hw->snpsid & 0xffff0000) != 0x55320000) { + dev_err(hsotg->dev, "Bad value for GSNPSID: 0x%08x\n", + hw->snpsid); + return -ENODEV; + } + + dev_dbg(hsotg->dev, "Core Release: %1x.%1x%1x%1x (snpsid=%x)\n", + hw->snpsid >> 12 & 0xf, hw->snpsid >> 8 & 0xf, + hw->snpsid >> 4 & 0xf, hw->snpsid & 0xf, hw->snpsid); + + hwcfg1 = dwc2_readl(hsotg->regs + GHWCFG1); + hwcfg2 = dwc2_readl(hsotg->regs + GHWCFG2); + hwcfg3 = dwc2_readl(hsotg->regs + GHWCFG3); + hwcfg4 = dwc2_readl(hsotg->regs + GHWCFG4); + grxfsiz = dwc2_readl(hsotg->regs + GRXFSIZ); + + dev_dbg(hsotg->dev, "hwcfg1=%08x\n", hwcfg1); + dev_dbg(hsotg->dev, "hwcfg2=%08x\n", hwcfg2); + dev_dbg(hsotg->dev, "hwcfg3=%08x\n", hwcfg3); + dev_dbg(hsotg->dev, "hwcfg4=%08x\n", hwcfg4); + dev_dbg(hsotg->dev, "grxfsiz=%08x\n", grxfsiz); + + /* + * Host specific hardware parameters. Reading these parameters + * requires the controller to be in host mode. The mode will + * be forced, if necessary, to read these values. + */ + dwc2_get_host_hwparams(hsotg); + dwc2_get_dev_hwparams(hsotg); + + /* hwcfg1 */ + hw->dev_ep_dirs = hwcfg1; + + /* hwcfg2 */ + hw->op_mode = (hwcfg2 & GHWCFG2_OP_MODE_MASK) >> + GHWCFG2_OP_MODE_SHIFT; + hw->arch = (hwcfg2 & GHWCFG2_ARCHITECTURE_MASK) >> + GHWCFG2_ARCHITECTURE_SHIFT; + hw->enable_dynamic_fifo = !!(hwcfg2 & GHWCFG2_DYNAMIC_FIFO); + hw->host_channels = 1 + ((hwcfg2 & GHWCFG2_NUM_HOST_CHAN_MASK) >> + GHWCFG2_NUM_HOST_CHAN_SHIFT); + hw->hs_phy_type = (hwcfg2 & GHWCFG2_HS_PHY_TYPE_MASK) >> + GHWCFG2_HS_PHY_TYPE_SHIFT; + hw->fs_phy_type = (hwcfg2 & GHWCFG2_FS_PHY_TYPE_MASK) >> + GHWCFG2_FS_PHY_TYPE_SHIFT; + hw->num_dev_ep = (hwcfg2 & GHWCFG2_NUM_DEV_EP_MASK) >> + GHWCFG2_NUM_DEV_EP_SHIFT; + hw->nperio_tx_q_depth = + (hwcfg2 & GHWCFG2_NONPERIO_TX_Q_DEPTH_MASK) >> + GHWCFG2_NONPERIO_TX_Q_DEPTH_SHIFT << 1; + hw->host_perio_tx_q_depth = + (hwcfg2 & GHWCFG2_HOST_PERIO_TX_Q_DEPTH_MASK) >> + GHWCFG2_HOST_PERIO_TX_Q_DEPTH_SHIFT << 1; + hw->dev_token_q_depth = + (hwcfg2 & GHWCFG2_DEV_TOKEN_Q_DEPTH_MASK) >> + GHWCFG2_DEV_TOKEN_Q_DEPTH_SHIFT; + + /* hwcfg3 */ + width = (hwcfg3 & GHWCFG3_XFER_SIZE_CNTR_WIDTH_MASK) >> + GHWCFG3_XFER_SIZE_CNTR_WIDTH_SHIFT; + hw->max_transfer_size = (1 << (width + 11)) - 1; + width = (hwcfg3 & GHWCFG3_PACKET_SIZE_CNTR_WIDTH_MASK) >> + GHWCFG3_PACKET_SIZE_CNTR_WIDTH_SHIFT; + hw->max_packet_count = (1 << (width + 4)) - 1; + hw->i2c_enable = !!(hwcfg3 & GHWCFG3_I2C); + hw->total_fifo_size = (hwcfg3 & GHWCFG3_DFIFO_DEPTH_MASK) >> + GHWCFG3_DFIFO_DEPTH_SHIFT; + + /* hwcfg4 */ + hw->en_multiple_tx_fifo = !!(hwcfg4 & GHWCFG4_DED_FIFO_EN); + hw->num_dev_perio_in_ep = (hwcfg4 & GHWCFG4_NUM_DEV_PERIO_IN_EP_MASK) >> + GHWCFG4_NUM_DEV_PERIO_IN_EP_SHIFT; + hw->dma_desc_enable = !!(hwcfg4 & GHWCFG4_DESC_DMA); + hw->power_optimized = !!(hwcfg4 & GHWCFG4_POWER_OPTIMIZ); + hw->utmi_phy_data_width = (hwcfg4 & GHWCFG4_UTMI_PHY_DATA_WIDTH_MASK) >> + GHWCFG4_UTMI_PHY_DATA_WIDTH_SHIFT; + + /* fifo sizes */ + hw->rx_fifo_size = (grxfsiz & GRXFSIZ_DEPTH_MASK) >> + GRXFSIZ_DEPTH_SHIFT; + + dev_dbg(hsotg->dev, "Detected values from hardware:\n"); + dev_dbg(hsotg->dev, " op_mode=%d\n", + hw->op_mode); + dev_dbg(hsotg->dev, " arch=%d\n", + hw->arch); + dev_dbg(hsotg->dev, " dma_desc_enable=%d\n", + hw->dma_desc_enable); + dev_dbg(hsotg->dev, " power_optimized=%d\n", + hw->power_optimized); + dev_dbg(hsotg->dev, " i2c_enable=%d\n", + hw->i2c_enable); + dev_dbg(hsotg->dev, " hs_phy_type=%d\n", + hw->hs_phy_type); + dev_dbg(hsotg->dev, " fs_phy_type=%d\n", + hw->fs_phy_type); + dev_dbg(hsotg->dev, " utmi_phy_data_width=%d\n", + hw->utmi_phy_data_width); + dev_dbg(hsotg->dev, " num_dev_ep=%d\n", + hw->num_dev_ep); + dev_dbg(hsotg->dev, " num_dev_perio_in_ep=%d\n", + hw->num_dev_perio_in_ep); + dev_dbg(hsotg->dev, " host_channels=%d\n", + hw->host_channels); + dev_dbg(hsotg->dev, " max_transfer_size=%d\n", + hw->max_transfer_size); + dev_dbg(hsotg->dev, " max_packet_count=%d\n", + hw->max_packet_count); + dev_dbg(hsotg->dev, " nperio_tx_q_depth=0x%0x\n", + hw->nperio_tx_q_depth); + dev_dbg(hsotg->dev, " host_perio_tx_q_depth=0x%0x\n", + hw->host_perio_tx_q_depth); + dev_dbg(hsotg->dev, " dev_token_q_depth=0x%0x\n", + hw->dev_token_q_depth); + dev_dbg(hsotg->dev, " enable_dynamic_fifo=%d\n", + hw->enable_dynamic_fifo); + dev_dbg(hsotg->dev, " en_multiple_tx_fifo=%d\n", + hw->en_multiple_tx_fifo); + dev_dbg(hsotg->dev, " total_fifo_size=%d\n", + hw->total_fifo_size); + dev_dbg(hsotg->dev, " rx_fifo_size=%d\n", + hw->rx_fifo_size); + dev_dbg(hsotg->dev, " host_nperio_tx_fifo_size=%d\n", + hw->host_nperio_tx_fifo_size); + dev_dbg(hsotg->dev, " host_perio_tx_fifo_size=%d\n", + hw->host_perio_tx_fifo_size); + dev_dbg(hsotg->dev, "\n"); + + return 0; +} + +int dwc2_init_params(struct dwc2_hsotg *hsotg) +{ + const struct of_device_id *match; + struct dwc2_core_params params; + + match = of_match_device(dwc2_of_match_table, hsotg->dev); + if (match && match->data) + params = *((struct dwc2_core_params *)match->data); + else + params = params_default; + + if (dwc2_is_fs_iot(hsotg)) { + params.speed = DWC2_SPEED_PARAM_FULL; + params.phy_type = DWC2_PHY_TYPE_PARAM_FS; + } + + dwc2_set_parameters(hsotg, ¶ms); + + return 0; +} diff --git a/drivers/usb/dwc2/pci.c b/drivers/usb/dwc2/pci.c index ae419615a176..a23329e3d7cd 100644 --- a/drivers/usb/dwc2/pci.c +++ b/drivers/usb/dwc2/pci.c @@ -62,6 +62,20 @@ struct dwc2_pci_glue { struct platform_device *phy; }; +static int dwc2_pci_quirks(struct pci_dev *pdev, struct platform_device *dwc2) +{ + if (pdev->vendor == PCI_VENDOR_ID_SYNOPSYS && + pdev->device == PCI_PRODUCT_ID_HAPS_HSOTG) { + struct property_entry properties[] = { + { }, + }; + + return platform_device_add_properties(dwc2, properties); + } + + return 0; +} + static void dwc2_pci_remove(struct pci_dev *pci) { struct dwc2_pci_glue *glue = pci_get_drvdata(pci); @@ -122,6 +136,10 @@ static int dwc2_pci_probe(struct pci_dev *pci, return PTR_ERR(phy); } + ret = dwc2_pci_quirks(pci, dwc2); + if (ret) + goto err; + ret = platform_device_add(dwc2); if (ret) { dev_err(dev, "failed to register dwc2 device\n"); diff --git a/drivers/usb/dwc2/platform.c b/drivers/usb/dwc2/platform.c index 8e1728b39a49..4fc8c603afb8 100644 --- a/drivers/usb/dwc2/platform.c +++ b/drivers/usb/dwc2/platform.c @@ -55,165 +55,6 @@ static const char dwc2_driver_name[] = "dwc2"; -static const struct dwc2_core_params params_hi6220 = { - .otg_cap = 2, /* No HNP/SRP capable */ - .otg_ver = 0, /* 1.3 */ - .dma_enable = 1, - .dma_desc_enable = 0, - .dma_desc_fs_enable = 0, - .speed = 0, /* High Speed */ - .enable_dynamic_fifo = 1, - .en_multiple_tx_fifo = 1, - .host_rx_fifo_size = 512, - .host_nperio_tx_fifo_size = 512, - .host_perio_tx_fifo_size = 512, - .max_transfer_size = 65535, - .max_packet_count = 511, - .host_channels = 16, - .phy_type = 1, /* UTMI */ - .phy_utmi_width = 8, - .phy_ulpi_ddr = 0, /* Single */ - .phy_ulpi_ext_vbus = 0, - .i2c_enable = 0, - .ulpi_fs_ls = 0, - .host_support_fs_ls_low_power = 0, - .host_ls_low_power_phy_clk = 0, /* 48 MHz */ - .ts_dline = 0, - .reload_ctl = 0, - .ahbcfg = GAHBCFG_HBSTLEN_INCR16 << - GAHBCFG_HBSTLEN_SHIFT, - .uframe_sched = 0, - .external_id_pin_ctl = -1, - .hibernation = -1, -}; - -static const struct dwc2_core_params params_bcm2835 = { - .otg_cap = 0, /* HNP/SRP capable */ - .otg_ver = 0, /* 1.3 */ - .dma_enable = 1, - .dma_desc_enable = 0, - .dma_desc_fs_enable = 0, - .speed = 0, /* High Speed */ - .enable_dynamic_fifo = 1, - .en_multiple_tx_fifo = 1, - .host_rx_fifo_size = 774, /* 774 DWORDs */ - .host_nperio_tx_fifo_size = 256, /* 256 DWORDs */ - .host_perio_tx_fifo_size = 512, /* 512 DWORDs */ - .max_transfer_size = 65535, - .max_packet_count = 511, - .host_channels = 8, - .phy_type = 1, /* UTMI */ - .phy_utmi_width = 8, /* 8 bits */ - .phy_ulpi_ddr = 0, /* Single */ - .phy_ulpi_ext_vbus = 0, - .i2c_enable = 0, - .ulpi_fs_ls = 0, - .host_support_fs_ls_low_power = 0, - .host_ls_low_power_phy_clk = 0, /* 48 MHz */ - .ts_dline = 0, - .reload_ctl = 0, - .ahbcfg = 0x10, - .uframe_sched = 0, - .external_id_pin_ctl = -1, - .hibernation = -1, -}; - -static const struct dwc2_core_params params_rk3066 = { - .otg_cap = 2, /* non-HNP/non-SRP */ - .otg_ver = -1, - .dma_enable = -1, - .dma_desc_enable = 0, - .dma_desc_fs_enable = 0, - .speed = -1, - .enable_dynamic_fifo = 1, - .en_multiple_tx_fifo = -1, - .host_rx_fifo_size = 525, /* 525 DWORDs */ - .host_nperio_tx_fifo_size = 128, /* 128 DWORDs */ - .host_perio_tx_fifo_size = 256, /* 256 DWORDs */ - .max_transfer_size = -1, - .max_packet_count = -1, - .host_channels = -1, - .phy_type = -1, - .phy_utmi_width = -1, - .phy_ulpi_ddr = -1, - .phy_ulpi_ext_vbus = -1, - .i2c_enable = -1, - .ulpi_fs_ls = -1, - .host_support_fs_ls_low_power = -1, - .host_ls_low_power_phy_clk = -1, - .ts_dline = -1, - .reload_ctl = -1, - .ahbcfg = GAHBCFG_HBSTLEN_INCR16 << - GAHBCFG_HBSTLEN_SHIFT, - .uframe_sched = -1, - .external_id_pin_ctl = -1, - .hibernation = -1, -}; - -static const struct dwc2_core_params params_ltq = { - .otg_cap = 2, /* non-HNP/non-SRP */ - .otg_ver = -1, - .dma_enable = -1, - .dma_desc_enable = -1, - .dma_desc_fs_enable = -1, - .speed = -1, - .enable_dynamic_fifo = -1, - .en_multiple_tx_fifo = -1, - .host_rx_fifo_size = 288, /* 288 DWORDs */ - .host_nperio_tx_fifo_size = 128, /* 128 DWORDs */ - .host_perio_tx_fifo_size = 96, /* 96 DWORDs */ - .max_transfer_size = 65535, - .max_packet_count = 511, - .host_channels = -1, - .phy_type = -1, - .phy_utmi_width = -1, - .phy_ulpi_ddr = -1, - .phy_ulpi_ext_vbus = -1, - .i2c_enable = -1, - .ulpi_fs_ls = -1, - .host_support_fs_ls_low_power = -1, - .host_ls_low_power_phy_clk = -1, - .ts_dline = -1, - .reload_ctl = -1, - .ahbcfg = GAHBCFG_HBSTLEN_INCR16 << - GAHBCFG_HBSTLEN_SHIFT, - .uframe_sched = -1, - .external_id_pin_ctl = -1, - .hibernation = -1, -}; - -static const struct dwc2_core_params params_amlogic = { - .otg_cap = DWC2_CAP_PARAM_NO_HNP_SRP_CAPABLE, - .otg_ver = -1, - .dma_enable = 1, - .dma_desc_enable = 0, - .dma_desc_fs_enable = 0, - .speed = DWC2_SPEED_PARAM_HIGH, - .enable_dynamic_fifo = 1, - .en_multiple_tx_fifo = -1, - .host_rx_fifo_size = 512, - .host_nperio_tx_fifo_size = 500, - .host_perio_tx_fifo_size = 500, - .max_transfer_size = -1, - .max_packet_count = -1, - .host_channels = 16, - .phy_type = DWC2_PHY_TYPE_PARAM_UTMI, - .phy_utmi_width = -1, - .phy_ulpi_ddr = -1, - .phy_ulpi_ext_vbus = -1, - .i2c_enable = -1, - .ulpi_fs_ls = -1, - .host_support_fs_ls_low_power = -1, - .host_ls_low_power_phy_clk = -1, - .ts_dline = -1, - .reload_ctl = 1, - .ahbcfg = GAHBCFG_HBSTLEN_INCR8 << - GAHBCFG_HBSTLEN_SHIFT, - .uframe_sched = 0, - .external_id_pin_ctl = -1, - .hibernation = -1, -}; - /* * Check the dr_mode against the module configuration and hardware * capabilities. @@ -510,20 +351,6 @@ static void dwc2_driver_shutdown(struct platform_device *dev) disable_irq(hsotg->irq); } -static const struct of_device_id dwc2_of_match_table[] = { - { .compatible = "brcm,bcm2835-usb", .data = ¶ms_bcm2835 }, - { .compatible = "hisilicon,hi6220-usb", .data = ¶ms_hi6220 }, - { .compatible = "rockchip,rk3066-usb", .data = ¶ms_rk3066 }, - { .compatible = "lantiq,arx100-usb", .data = ¶ms_ltq }, - { .compatible = "lantiq,xrx200-usb", .data = ¶ms_ltq }, - { .compatible = "snps,dwc2", .data = NULL }, - { .compatible = "samsung,s3c6400-hsotg", .data = NULL}, - { .compatible = "amlogic,meson8b-usb", .data = ¶ms_amlogic }, - { .compatible = "amlogic,meson-gxbb-usb", .data = ¶ms_amlogic }, - {}, -}; -MODULE_DEVICE_TABLE(of, dwc2_of_match_table); - /** * dwc2_driver_probe() - Called when the DWC_otg core is bound to the DWC_otg * driver @@ -538,30 +365,10 @@ MODULE_DEVICE_TABLE(of, dwc2_of_match_table); */ static int dwc2_driver_probe(struct platform_device *dev) { - const struct of_device_id *match; - const struct dwc2_core_params *params; - struct dwc2_core_params defparams; struct dwc2_hsotg *hsotg; struct resource *res; int retval; - match = of_match_device(dwc2_of_match_table, &dev->dev); - if (match && match->data) { - params = match->data; - } else { - /* Default all params to autodetect */ - dwc2_set_all_params(&defparams, -1); - params = &defparams; - - /* - * Disable descriptor dma mode by default as the HW can support - * it, but does not support it for SPLIT transactions. - * Disable it for FS devices as well. - */ - defparams.dma_desc_enable = 0; - defparams.dma_desc_fs_enable = 0; - } - hsotg = devm_kzalloc(&dev->dev, sizeof(*hsotg), GFP_KERNEL); if (!hsotg) return -ENOMEM; @@ -591,13 +398,6 @@ static int dwc2_driver_probe(struct platform_device *dev) spin_lock_init(&hsotg->lock); - hsotg->core_params = devm_kzalloc(&dev->dev, - sizeof(*hsotg->core_params), GFP_KERNEL); - if (!hsotg->core_params) - return -ENOMEM; - - dwc2_set_all_params(hsotg->core_params, -1); - hsotg->irq = platform_get_irq(dev, 0); if (hsotg->irq < 0) { dev_err(&dev->dev, "missing IRQ resource\n"); @@ -631,11 +431,12 @@ static int dwc2_driver_probe(struct platform_device *dev) if (retval) goto error; - /* Validate parameter values */ - dwc2_set_parameters(hsotg, params); - dwc2_force_dr_mode(hsotg); + retval = dwc2_init_params(hsotg); + if (retval) + goto error; + if (hsotg->dr_mode != USB_DR_MODE_HOST) { retval = dwc2_gadget_init(hsotg, hsotg->irq); if (retval) diff --git a/drivers/usb/dwc3/Kconfig b/drivers/usb/dwc3/Kconfig index b97cde76914d..c5aa235863e8 100644 --- a/drivers/usb/dwc3/Kconfig +++ b/drivers/usb/dwc3/Kconfig @@ -62,7 +62,7 @@ config USB_DWC3_OMAP config USB_DWC3_EXYNOS tristate "Samsung Exynos Platform" - depends on ARCH_EXYNOS && OF || COMPILE_TEST + depends on (ARCH_EXYNOS || COMPILE_TEST) && OF default USB_DWC3 help Recent Exynos5 SoCs ship with one DesignWare Core USB3 IP inside, @@ -70,7 +70,7 @@ config USB_DWC3_EXYNOS config USB_DWC3_PCI tristate "PCIe-based Platforms" - depends on PCI + depends on PCI && ACPI default USB_DWC3 help If you're using the DesignWare Core IP with a PCIe, please say @@ -98,7 +98,7 @@ config USB_DWC3_OF_SIMPLE config USB_DWC3_ST tristate "STMicroelectronics Platforms" - depends on ARCH_STI && OF + depends on (ARCH_STI || COMPILE_TEST) && OF default USB_DWC3 help STMicroelectronics SoCs with one DesignWare Core USB3 IP diff --git a/drivers/usb/dwc3/Makefile b/drivers/usb/dwc3/Makefile index 22420e17d68b..ffca34029b21 100644 --- a/drivers/usb/dwc3/Makefile +++ b/drivers/usb/dwc3/Makefile @@ -3,7 +3,11 @@ CFLAGS_trace.o := -I$(src) obj-$(CONFIG_USB_DWC3) += dwc3.o -dwc3-y := core.o debug.o trace.o +dwc3-y := core.o + +ifneq ($(CONFIG_FTRACE),) + dwc3-y += trace.o +endif ifneq ($(filter y,$(CONFIG_USB_DWC3_HOST) $(CONFIG_USB_DWC3_DUAL_ROLE)),) dwc3-y += host.o diff --git a/drivers/usb/dwc3/core.c b/drivers/usb/dwc3/core.c index 7287a763cd0c..369bab16a824 100644 --- a/drivers/usb/dwc3/core.c +++ b/drivers/usb/dwc3/core.c @@ -169,33 +169,6 @@ static int dwc3_core_soft_reset(struct dwc3 *dwc) return -ETIMEDOUT; } -/** - * dwc3_soft_reset - Issue soft reset - * @dwc: Pointer to our controller context structure - */ -static int dwc3_soft_reset(struct dwc3 *dwc) -{ - unsigned long timeout; - u32 reg; - - timeout = jiffies + msecs_to_jiffies(500); - dwc3_writel(dwc->regs, DWC3_DCTL, DWC3_DCTL_CSFTRST); - do { - reg = dwc3_readl(dwc->regs, DWC3_DCTL); - if (!(reg & DWC3_DCTL_CSFTRST)) - break; - - if (time_after(jiffies, timeout)) { - dev_err(dwc->dev, "Reset Timed Out\n"); - return -ETIMEDOUT; - } - - cpu_relax(); - } while (true); - - return 0; -} - /* * dwc3_frame_length_adjustment - Adjusts frame length if required * @dwc3: Pointer to our controller context structure @@ -229,7 +202,7 @@ static void dwc3_frame_length_adjustment(struct dwc3 *dwc) static void dwc3_free_one_event_buffer(struct dwc3 *dwc, struct dwc3_event_buffer *evt) { - dma_free_coherent(dwc->dev, evt->length, evt->buf, evt->dma); + dma_free_coherent(dwc->sysdev, evt->length, evt->buf, evt->dma); } /** @@ -251,7 +224,11 @@ static struct dwc3_event_buffer *dwc3_alloc_one_event_buffer(struct dwc3 *dwc, evt->dwc = dwc; evt->length = length; - evt->buf = dma_alloc_coherent(dwc->dev, length, + evt->cache = devm_kzalloc(dwc->dev, length, GFP_KERNEL); + if (!evt->cache) + return ERR_PTR(-ENOMEM); + + evt->buf = dma_alloc_coherent(dwc->sysdev, length, &evt->dma, GFP_KERNEL); if (!evt->buf) return ERR_PTR(-ENOMEM); @@ -305,13 +282,7 @@ static int dwc3_event_buffers_setup(struct dwc3 *dwc) struct dwc3_event_buffer *evt; evt = dwc->ev_buf; - dwc3_trace(trace_dwc3_core, - "Event buf %p dma %08llx length %d\n", - evt->buf, (unsigned long long) evt->dma, - evt->length); - evt->lpos = 0; - dwc3_writel(dwc->regs, DWC3_GEVNTADRLO(0), lower_32_bits(evt->dma)); dwc3_writel(dwc->regs, DWC3_GEVNTADRHI(0), @@ -370,11 +341,11 @@ static int dwc3_setup_scratch_buffers(struct dwc3 *dwc) if (!WARN_ON(dwc->scratchbuf)) return 0; - scratch_addr = dma_map_single(dwc->dev, dwc->scratchbuf, + scratch_addr = dma_map_single(dwc->sysdev, dwc->scratchbuf, dwc->nr_scratch * DWC3_SCRATCHBUF_SIZE, DMA_BIDIRECTIONAL); - if (dma_mapping_error(dwc->dev, scratch_addr)) { - dev_err(dwc->dev, "failed to map scratch buffer\n"); + if (dma_mapping_error(dwc->sysdev, scratch_addr)) { + dev_err(dwc->sysdev, "failed to map scratch buffer\n"); ret = -EFAULT; goto err0; } @@ -398,7 +369,7 @@ static int dwc3_setup_scratch_buffers(struct dwc3 *dwc) return 0; err1: - dma_unmap_single(dwc->dev, dwc->scratch_addr, dwc->nr_scratch * + dma_unmap_single(dwc->sysdev, dwc->scratch_addr, dwc->nr_scratch * DWC3_SCRATCHBUF_SIZE, DMA_BIDIRECTIONAL); err0: @@ -417,7 +388,7 @@ static void dwc3_free_scratch_buffers(struct dwc3 *dwc) if (!WARN_ON(dwc->scratchbuf)) return; - dma_unmap_single(dwc->dev, dwc->scratch_addr, dwc->nr_scratch * + dma_unmap_single(dwc->sysdev, dwc->scratch_addr, dwc->nr_scratch * DWC3_SCRATCHBUF_SIZE, DMA_BIDIRECTIONAL); kfree(dwc->scratchbuf); } @@ -428,9 +399,6 @@ static void dwc3_core_num_eps(struct dwc3 *dwc) dwc->num_in_eps = DWC3_NUM_IN_EPS(parms); dwc->num_out_eps = DWC3_NUM_EPS(parms) - dwc->num_in_eps; - - dwc3_trace(trace_dwc3_core, "found %d IN and %d OUT endpoints", - dwc->num_in_eps, dwc->num_out_eps); } static void dwc3_cache_hwparams(struct dwc3 *dwc) @@ -524,13 +492,6 @@ static int dwc3_phy_setup(struct dwc3 *dwc) } /* FALLTHROUGH */ case DWC3_GHWPARAMS3_HSPHY_IFC_ULPI: - /* Making sure the interface and PHY are operational */ - ret = dwc3_soft_reset(dwc); - if (ret) - return ret; - - udelay(1); - ret = dwc3_ulpi_init(dwc); if (ret) return ret; @@ -594,19 +555,12 @@ static void dwc3_core_exit(struct dwc3 *dwc) phy_power_off(dwc->usb3_generic_phy); } -/** - * dwc3_core_init - Low-level initialization of DWC3 Core - * @dwc: Pointer to our controller context structure - * - * Returns 0 on success otherwise negative errno. - */ -static int dwc3_core_init(struct dwc3 *dwc) +static bool dwc3_core_is_valid(struct dwc3 *dwc) { - u32 hwparams4 = dwc->hwparams.hwparams4; - u32 reg; - int ret; + u32 reg; reg = dwc3_readl(dwc->regs, DWC3_GSNPSID); + /* This should read as U3 followed by revision number */ if ((reg & DWC3_GSNPSID_MASK) == 0x55330000) { /* Detected DWC_usb3 IP */ @@ -616,36 +570,16 @@ static int dwc3_core_init(struct dwc3 *dwc) dwc->revision = dwc3_readl(dwc->regs, DWC3_VER_NUMBER); dwc->revision |= DWC3_REVISION_IS_DWC31; } else { - dev_err(dwc->dev, "this is not a DesignWare USB3 DRD Core\n"); - ret = -ENODEV; - goto err0; + return false; } - /* - * Write Linux Version Code to our GUID register so it's easy to figure - * out which kernel version a bug was found. - */ - dwc3_writel(dwc->regs, DWC3_GUID, LINUX_VERSION_CODE); - - /* Handle USB2.0-only core configuration */ - if (DWC3_GHWPARAMS3_SSPHY_IFC(dwc->hwparams.hwparams3) == - DWC3_GHWPARAMS3_SSPHY_IFC_DIS) { - if (dwc->maximum_speed == USB_SPEED_SUPER) - dwc->maximum_speed = USB_SPEED_HIGH; - } - - /* issue device SoftReset too */ - ret = dwc3_soft_reset(dwc); - if (ret) - goto err0; - - ret = dwc3_core_soft_reset(dwc); - if (ret) - goto err0; + return true; +} - ret = dwc3_phy_setup(dwc); - if (ret) - goto err0; +static void dwc3_core_setup_global_control(struct dwc3 *dwc) +{ + u32 hwparams4 = dwc->hwparams.hwparams4; + u32 reg; reg = dwc3_readl(dwc->regs, DWC3_GCTL); reg &= ~DWC3_GCTL_SCALEDOWN_MASK; @@ -683,13 +617,13 @@ static int dwc3_core_init(struct dwc3 *dwc) reg |= DWC3_GCTL_GBLHIBERNATIONEN; break; default: - dwc3_trace(trace_dwc3_core, "No power optimization available\n"); + /* nothing */ + break; } /* check if current dwc3 is on simulation board */ if (dwc->hwparams.hwparams6 & DWC3_GHWPARAMS6_EN_FPGA) { - dwc3_trace(trace_dwc3_core, - "running on FPGA platform\n"); + dev_info(dwc->dev, "Running with FPGA optmizations\n"); dwc->is_fpga = true; } @@ -714,7 +648,47 @@ static int dwc3_core_init(struct dwc3 *dwc) reg |= DWC3_GCTL_U2RSTECN; dwc3_writel(dwc->regs, DWC3_GCTL, reg); +} + +/** + * dwc3_core_init - Low-level initialization of DWC3 Core + * @dwc: Pointer to our controller context structure + * + * Returns 0 on success otherwise negative errno. + */ +static int dwc3_core_init(struct dwc3 *dwc) +{ + u32 reg; + int ret; + + if (!dwc3_core_is_valid(dwc)) { + dev_err(dwc->dev, "this is not a DesignWare USB3 DRD Core\n"); + ret = -ENODEV; + goto err0; + } + + /* + * Write Linux Version Code to our GUID register so it's easy to figure + * out which kernel version a bug was found. + */ + dwc3_writel(dwc->regs, DWC3_GUID, LINUX_VERSION_CODE); + + /* Handle USB2.0-only core configuration */ + if (DWC3_GHWPARAMS3_SSPHY_IFC(dwc->hwparams.hwparams3) == + DWC3_GHWPARAMS3_SSPHY_IFC_DIS) { + if (dwc->maximum_speed == USB_SPEED_SUPER) + dwc->maximum_speed = USB_SPEED_HIGH; + } + + ret = dwc3_core_soft_reset(dwc); + if (ret) + goto err0; + ret = dwc3_phy_setup(dwc); + if (ret) + goto err0; + + dwc3_core_setup_global_control(dwc); dwc3_core_num_eps(dwc); ret = dwc3_setup_scratch_buffers(dwc); @@ -766,18 +740,27 @@ static int dwc3_core_init(struct dwc3 *dwc) dwc3_writel(dwc->regs, DWC3_GUCTL2, reg); } + /* + * Enable hardware control of sending remote wakeup in HS when + * the device is in the L1 state. + */ + if (dwc->revision >= DWC3_REVISION_290A) { + reg = dwc3_readl(dwc->regs, DWC3_GUCTL1); + reg |= DWC3_GUCTL1_DEV_L1_EXIT_BY_HW; + dwc3_writel(dwc->regs, DWC3_GUCTL1, reg); + } + return 0; err4: - phy_power_off(dwc->usb2_generic_phy); + phy_power_off(dwc->usb3_generic_phy); err3: - phy_power_off(dwc->usb3_generic_phy); + phy_power_off(dwc->usb2_generic_phy); err2: usb_phy_set_suspend(dwc->usb2_phy, 1); usb_phy_set_suspend(dwc->usb3_phy, 1); - dwc3_core_exit(dwc); err1: usb_phy_shutdown(dwc->usb2_phy); @@ -920,57 +903,13 @@ static void dwc3_core_exit_mode(struct dwc3 *dwc) } } -#define DWC3_ALIGN_MASK (16 - 1) - -static int dwc3_probe(struct platform_device *pdev) +static void dwc3_get_properties(struct dwc3 *dwc) { - struct device *dev = &pdev->dev; - struct resource *res; - struct dwc3 *dwc; + struct device *dev = dwc->dev; u8 lpm_nyet_threshold; u8 tx_de_emphasis; u8 hird_threshold; - int ret; - - void __iomem *regs; - void *mem; - - mem = devm_kzalloc(dev, sizeof(*dwc) + DWC3_ALIGN_MASK, GFP_KERNEL); - if (!mem) - return -ENOMEM; - - dwc = PTR_ALIGN(mem, DWC3_ALIGN_MASK + 1); - dwc->mem = mem; - dwc->dev = dev; - - res = platform_get_resource(pdev, IORESOURCE_MEM, 0); - if (!res) { - dev_err(dev, "missing memory resource\n"); - return -ENODEV; - } - - dwc->xhci_resources[0].start = res->start; - dwc->xhci_resources[0].end = dwc->xhci_resources[0].start + - DWC3_XHCI_REGS_END; - dwc->xhci_resources[0].flags = res->flags; - dwc->xhci_resources[0].name = res->name; - - res->start += DWC3_GLOBALS_REGS_START; - - /* - * Request memory region but exclude xHCI regs, - * since it will be requested by the xhci-plat driver. - */ - regs = devm_ioremap_resource(dev, res); - if (IS_ERR(regs)) { - ret = PTR_ERR(regs); - goto err0; - } - - dwc->regs = regs; - dwc->regs_size = resource_size(res); - /* default to highest possible threshold */ lpm_nyet_threshold = 0xff; @@ -987,6 +926,13 @@ static int dwc3_probe(struct platform_device *pdev) dwc->dr_mode = usb_get_dr_mode(dev); dwc->hsphy_mode = of_usb_get_phy_mode(dev->of_node); + dwc->sysdev_is_parent = device_property_read_bool(dev, + "linux,sysdev_is_parent"); + if (dwc->sysdev_is_parent) + dwc->sysdev = dwc->dev->parent; + else + dwc->sysdev = dwc->dev; + dwc->has_lpm_erratum = device_property_read_bool(dev, "snps,has-lpm-erratum"); device_property_read_u8(dev, "snps,lpm-nyet-threshold", @@ -1042,6 +988,112 @@ static int dwc3_probe(struct platform_device *pdev) dwc->hird_threshold = hird_threshold | (dwc->is_utmi_l1_suspend << 4); + dwc->imod_interval = 0; +} + +/* check whether the core supports IMOD */ +bool dwc3_has_imod(struct dwc3 *dwc) +{ + return ((dwc3_is_usb3(dwc) && + dwc->revision >= DWC3_REVISION_300A) || + (dwc3_is_usb31(dwc) && + dwc->revision >= DWC3_USB31_REVISION_120A)); +} + +static void dwc3_check_params(struct dwc3 *dwc) +{ + struct device *dev = dwc->dev; + + /* Check for proper value of imod_interval */ + if (dwc->imod_interval && !dwc3_has_imod(dwc)) { + dev_warn(dwc->dev, "Interrupt moderation not supported\n"); + dwc->imod_interval = 0; + } + + /* + * Workaround for STAR 9000961433 which affects only version + * 3.00a of the DWC_usb3 core. This prevents the controller + * interrupt from being masked while handling events. IMOD + * allows us to work around this issue. Enable it for the + * affected version. + */ + if (!dwc->imod_interval && + (dwc->revision == DWC3_REVISION_300A)) + dwc->imod_interval = 1; + + /* Check the maximum_speed parameter */ + switch (dwc->maximum_speed) { + case USB_SPEED_LOW: + case USB_SPEED_FULL: + case USB_SPEED_HIGH: + case USB_SPEED_SUPER: + case USB_SPEED_SUPER_PLUS: + break; + default: + dev_err(dev, "invalid maximum_speed parameter %d\n", + dwc->maximum_speed); + /* fall through */ + case USB_SPEED_UNKNOWN: + /* default to superspeed */ + dwc->maximum_speed = USB_SPEED_SUPER; + + /* + * default to superspeed plus if we are capable. + */ + if (dwc3_is_usb31(dwc) && + (DWC3_GHWPARAMS3_SSPHY_IFC(dwc->hwparams.hwparams3) == + DWC3_GHWPARAMS3_SSPHY_IFC_GEN2)) + dwc->maximum_speed = USB_SPEED_SUPER_PLUS; + + break; + } +} + +static int dwc3_probe(struct platform_device *pdev) +{ + struct device *dev = &pdev->dev; + struct resource *res; + struct dwc3 *dwc; + + int ret; + + void __iomem *regs; + + dwc = devm_kzalloc(dev, sizeof(*dwc), GFP_KERNEL); + if (!dwc) + return -ENOMEM; + + dwc->dev = dev; + + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + if (!res) { + dev_err(dev, "missing memory resource\n"); + return -ENODEV; + } + + dwc->xhci_resources[0].start = res->start; + dwc->xhci_resources[0].end = dwc->xhci_resources[0].start + + DWC3_XHCI_REGS_END; + dwc->xhci_resources[0].flags = res->flags; + dwc->xhci_resources[0].name = res->name; + + res->start += DWC3_GLOBALS_REGS_START; + + /* + * Request memory region but exclude xHCI regs, + * since it will be requested by the xhci-plat driver. + */ + regs = devm_ioremap_resource(dev, res); + if (IS_ERR(regs)) { + ret = PTR_ERR(regs); + goto err0; + } + + dwc->regs = regs; + dwc->regs_size = resource_size(res); + + dwc3_get_properties(dwc); + platform_set_drvdata(pdev, dwc); dwc3_cache_hwparams(dwc); @@ -1051,12 +1103,6 @@ static int dwc3_probe(struct platform_device *pdev) spin_lock_init(&dwc->lock); - if (!dev->dma_mask) { - dev->dma_mask = dev->parent->dma_mask; - dev->dma_parms = dev->parent->dma_parms; - dma_set_coherent_mask(dev, dev->parent->coherent_dma_mask); - } - pm_runtime_set_active(dev); pm_runtime_use_autosuspend(dev); pm_runtime_set_autosuspend_delay(dev, DWC3_DEFAULT_AUTOSUSPEND_DELAY); @@ -1088,32 +1134,7 @@ static int dwc3_probe(struct platform_device *pdev) goto err4; } - /* Check the maximum_speed parameter */ - switch (dwc->maximum_speed) { - case USB_SPEED_LOW: - case USB_SPEED_FULL: - case USB_SPEED_HIGH: - case USB_SPEED_SUPER: - case USB_SPEED_SUPER_PLUS: - break; - default: - dev_err(dev, "invalid maximum_speed parameter %d\n", - dwc->maximum_speed); - /* fall through */ - case USB_SPEED_UNKNOWN: - /* default to superspeed */ - dwc->maximum_speed = USB_SPEED_SUPER; - - /* - * default to superspeed plus if we are capable. - */ - if (dwc3_is_usb31(dwc) && - (DWC3_GHWPARAMS3_SSPHY_IFC(dwc->hwparams.hwparams3) == - DWC3_GHWPARAMS3_SSPHY_IFC_GEN2)) - dwc->maximum_speed = USB_SPEED_SUPER_PLUS; - - break; - } + dwc3_check_params(dwc); ret = dwc3_core_init_mode(dwc); if (ret) diff --git a/drivers/usb/dwc3/core.h b/drivers/usb/dwc3/core.h index 6b60e42626a2..de5a8570be04 100644 --- a/drivers/usb/dwc3/core.h +++ b/drivers/usb/dwc3/core.h @@ -26,6 +26,7 @@ #include <linux/dma-mapping.h> #include <linux/mm.h> #include <linux/debugfs.h> +#include <linux/wait.h> #include <linux/usb/ch9.h> #include <linux/usb/gadget.h> @@ -37,6 +38,7 @@ #define DWC3_MSG_MAX 500 /* Global constants */ +#define DWC3_PULL_UP_TIMEOUT 500 /* ms */ #define DWC3_ZLP_BUF_SIZE 1024 /* size of a superspeed bulk */ #define DWC3_EP0_BOUNCE_SIZE 512 #define DWC3_ENDPOINTS_NUM 32 @@ -65,6 +67,7 @@ #define DWC3_DEVICE_EVENT_OVERFLOW 11 #define DWC3_GEVNTCOUNT_MASK 0xfffc +#define DWC3_GEVNTCOUNT_EHB (1 << 31) #define DWC3_GSNPSID_MASK 0xffff0000 #define DWC3_GSNPSREV_MASK 0xffff @@ -147,6 +150,8 @@ #define DWC3_DEPCMDPAR0 0x08 #define DWC3_DEPCMD 0x0c +#define DWC3_DEV_IMOD(n) (0xca00 + (n * 0x4)) + /* OTG Registers */ #define DWC3_OCFG 0xcc00 #define DWC3_OCTL 0xcc04 @@ -198,6 +203,9 @@ #define DWC3_GCTL_GBLHIBERNATIONEN (1 << 1) #define DWC3_GCTL_DSBLCLKGTNG (1 << 0) +/* Global User Control 1 Register */ +#define DWC3_GUCTL1_DEV_L1_EXIT_BY_HW (1 << 24) + /* Global USB2 PHY Configuration Register */ #define DWC3_GUSB2PHYCFG_PHYSOFTRST (1 << 31) #define DWC3_GUSB2PHYCFG_U2_FREECLK_EXISTS (1 << 30) @@ -450,6 +458,8 @@ #define DWC3_DEPCMD_SETTRANSFRESOURCE (0x02 << 0) #define DWC3_DEPCMD_SETEPCONFIG (0x01 << 0) +#define DWC3_DEPCMD_CMD(x) ((x) & 0xf) + /* The EP number goes 0..31 so ep0 is always out and ep1 is always in */ #define DWC3_DALEPENA_EP(n) (1 << n) @@ -458,6 +468,11 @@ #define DWC3_DEPCMD_TYPE_BULK 2 #define DWC3_DEPCMD_TYPE_INTR 3 +#define DWC3_DEV_IMOD_COUNT_SHIFT 16 +#define DWC3_DEV_IMOD_COUNT_MASK (0xffff << 16) +#define DWC3_DEV_IMOD_INTERVAL_SHIFT 0 +#define DWC3_DEV_IMOD_INTERVAL_MASK (0xffff << 0) + /* Structures */ struct dwc3_trb; @@ -465,6 +480,7 @@ struct dwc3_trb; /** * struct dwc3_event_buffer - Software event buffer representation * @buf: _THE_ buffer + * @cache: The buffer cache used in the threaded interrupt * @length: size of this buffer * @lpos: event offset * @count: cache of last read event count register @@ -474,6 +490,7 @@ struct dwc3_trb; */ struct dwc3_event_buffer { void *buf; + void *cache; unsigned length; unsigned int lpos; unsigned int count; @@ -499,6 +516,7 @@ struct dwc3_event_buffer { * @endpoint: usb endpoint * @pending_list: list of pending requests for this endpoint * @started_list: list of started requests on this endpoint + * @wait_end_transfer: wait_queue_head_t for waiting on End Transfer complete * @lock: spinlock for endpoint request queue traversal * @regs: pointer to first endpoint register * @trb_pool: array of transaction buffers @@ -524,12 +542,13 @@ struct dwc3_ep { struct list_head pending_list; struct list_head started_list; + wait_queue_head_t wait_end_transfer; + spinlock_t lock; void __iomem *regs; struct dwc3_trb *trb_pool; dma_addr_t trb_pool_dma; - const struct usb_ss_ep_comp_descriptor *comp_desc; struct dwc3 *dwc; u32 saved_state; @@ -540,6 +559,8 @@ struct dwc3_ep { #define DWC3_EP_BUSY (1 << 4) #define DWC3_EP_PENDING_REQUEST (1 << 5) #define DWC3_EP_MISSED_ISOC (1 << 6) +#define DWC3_EP_END_TRANSFER_PENDING (1 << 7) +#define DWC3_EP_TRANSFER_STARTED (1 << 8) /* This last one is specific to EP0 */ #define DWC3_EP0_DIR_IN (1 << 31) @@ -703,7 +724,7 @@ struct dwc3_hwparams { * @dep: struct dwc3_ep owning this request * @sg: pointer to first incomplete sg * @num_pending_sgs: counter to pending sgs - * @first_trb_index: index to first trb used by this request + * @remaining: amount of data remaining * @epnum: endpoint number to which this request refers * @trb: pointer to struct dwc3_trb * @trb_dma: DMA address of @trb @@ -718,7 +739,7 @@ struct dwc3_request { struct scatterlist *sg; unsigned num_pending_sgs; - u8 first_trb_index; + unsigned remaining; u8 epnum; struct dwc3_trb *trb; dma_addr_t trb_dma; @@ -748,6 +769,7 @@ struct dwc3_scratchpad_array { * @ep0_usb_req: dummy req used while handling STD USB requests * @ep0_bounce_addr: dma address of ep0_bounce * @scratch_addr: dma address of scratchbuf + * @ep0_in_setup: one control transfer is completed and enter setup phase * @lock: for synchronizing * @dev: pointer to our struct device * @xhci: pointer to our xHCI child @@ -784,7 +806,6 @@ struct dwc3_scratchpad_array { * @ep0state: state of endpoint zero * @link_state: link state * @speed: device speed (super, high, full, low) - * @mem: points to start of memory which is used for this struct. * @hwparams: copy of hwparams registers * @root: debugfs root folder pointer * @regset: debugfs pointer to regdump file @@ -798,6 +819,7 @@ struct dwc3_scratchpad_array { * @ep0_bounced: true when we used bounce buffer * @ep0_expect_in: true when we expect a DATA IN transfer * @has_hibernation: true when dwc3 was configured with Hibernation + * @sysdev_is_parent: true when dwc3 device has a parent driver * @has_lpm_erratum: true when core was configured with LPM Erratum. Note that * there's now way for software to detect this in runtime. * @is_utmi_l1_suspend: the core asserts output signal @@ -833,6 +855,8 @@ struct dwc3_scratchpad_array { * 1 - -3.5dB de-emphasis * 2 - No de-emphasis * 3 - Reserved + * @imod_interval: set the interrupt moderation interval in 250ns + * increments or 0 to disable. */ struct dwc3 { struct usb_ctrlrequest *ctrl_req; @@ -846,11 +870,13 @@ struct dwc3 { dma_addr_t ep0_bounce_addr; dma_addr_t scratch_addr; struct dwc3_request ep0_usb_req; + struct completion ep0_in_setup; /* device lock */ spinlock_t lock; struct device *dev; + struct device *sysdev; struct platform_device *xhci; struct resource xhci_resources[DWC3_XHCI_RESOURCES_NUM]; @@ -909,6 +935,7 @@ struct dwc3 { #define DWC3_REVISION_260A 0x5533260a #define DWC3_REVISION_270A 0x5533270a #define DWC3_REVISION_280A 0x5533280a +#define DWC3_REVISION_290A 0x5533290a #define DWC3_REVISION_300A 0x5533300a #define DWC3_REVISION_310A 0x5533310a @@ -918,6 +945,7 @@ struct dwc3 { */ #define DWC3_REVISION_IS_DWC31 0x80000000 #define DWC3_USB31_REVISION_110A (0x3131302a | DWC3_REVISION_IS_DWC31) +#define DWC3_USB31_REVISION_120A (0x3132302a | DWC3_REVISION_IS_DWC31) enum dwc3_ep0_next ep0_next_event; enum dwc3_ep0_state ep0state; @@ -934,8 +962,6 @@ struct dwc3 { u8 num_out_eps; u8 num_in_eps; - void *mem; - struct dwc3_hwparams hwparams; struct dentry *root; struct debugfs_regset32 *regset; @@ -952,6 +978,7 @@ struct dwc3 { unsigned ep0_bounced:1; unsigned ep0_expect_in:1; unsigned has_hibernation:1; + unsigned sysdev_is_parent:1; unsigned has_lpm_erratum:1; unsigned is_utmi_l1_suspend:1; unsigned is_fpga:1; @@ -978,6 +1005,8 @@ struct dwc3 { unsigned tx_de_emphasis_quirk:1; unsigned tx_de_emphasis:2; + + u16 imod_interval; }; /* -------------------------------------------------------------------------- */ @@ -1039,12 +1068,16 @@ struct dwc3_event_depevt { /* Control-only Status */ #define DEPEVT_STATUS_CONTROL_DATA 1 #define DEPEVT_STATUS_CONTROL_STATUS 2 +#define DEPEVT_STATUS_CONTROL_PHASE(n) ((n) & 3) /* In response to Start Transfer */ #define DEPEVT_TRANSFER_NO_RESOURCE 1 #define DEPEVT_TRANSFER_BUS_EXPIRY 2 u32 parameters:16; + +/* For Command Complete Events */ +#define DEPEVT_PARAMETER_CMD(n) (((n) & (0xf << 8)) >> 8) } __packed; /** @@ -1133,12 +1166,20 @@ struct dwc3_gadget_ep_cmd_params { void dwc3_set_mode(struct dwc3 *dwc, u32 mode); u32 dwc3_core_fifo_space(struct dwc3_ep *dep, u8 type); +/* check whether we are on the DWC_usb3 core */ +static inline bool dwc3_is_usb3(struct dwc3 *dwc) +{ + return !(dwc->revision & DWC3_REVISION_IS_DWC31); +} + /* check whether we are on the DWC_usb31 core */ static inline bool dwc3_is_usb31(struct dwc3 *dwc) { return !!(dwc->revision & DWC3_REVISION_IS_DWC31); } +bool dwc3_has_imod(struct dwc3 *dwc); + #if IS_ENABLED(CONFIG_USB_DWC3_HOST) || IS_ENABLED(CONFIG_USB_DWC3_DUAL_ROLE) int dwc3_host_init(struct dwc3 *dwc); void dwc3_host_exit(struct dwc3 *dwc); diff --git a/drivers/usb/dwc3/debug.c b/drivers/usb/dwc3/debug.c deleted file mode 100644 index 0be6885bc370..000000000000 --- a/drivers/usb/dwc3/debug.c +++ /dev/null @@ -1,32 +0,0 @@ -/** - * debug.c - DesignWare USB3 DRD Controller Debug/Trace Support - * - * Copyright (C) 2014 Texas Instruments Incorporated - http://www.ti.com - * - * Author: Felipe Balbi <balbi@ti.com> - * - * This program is free software: you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 of - * the License as published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - */ - -#include "debug.h" - -void dwc3_trace(void (*trace)(struct va_format *), const char *fmt, ...) -{ - struct va_format vaf; - va_list args; - - va_start(args, fmt); - vaf.fmt = fmt; - vaf.va = &args; - - trace(&vaf); - - va_end(args); -} diff --git a/drivers/usb/dwc3/debug.h b/drivers/usb/dwc3/debug.h index 33ab2a203c1b..eeed4ffd8131 100644 --- a/drivers/usb/dwc3/debug.h +++ b/drivers/usb/dwc3/debug.h @@ -124,6 +124,22 @@ dwc3_gadget_link_string(enum dwc3_link_state link_state) } } +static inline const char *dwc3_ep0_state_string(enum dwc3_ep0_state state) +{ + switch (state) { + case EP0_UNCONNECTED: + return "Unconnected"; + case EP0_SETUP_PHASE: + return "Setup Phase"; + case EP0_DATA_PHASE: + return "Data Phase"; + case EP0_STATUS_PHASE: + return "Status Phase"; + default: + return "UNKNOWN"; + } +} + /** * dwc3_gadget_event_string - returns event name * @event: the event code @@ -184,10 +200,11 @@ dwc3_gadget_event_string(const struct dwc3_event_devt *event) * @event: then event code */ static inline const char * -dwc3_ep_event_string(const struct dwc3_event_depevt *event) +dwc3_ep_event_string(const struct dwc3_event_depevt *event, u32 ep0state) { u8 epnum = event->endpoint_number; static char str[256]; + size_t len; int status; int ret; @@ -199,6 +216,10 @@ dwc3_ep_event_string(const struct dwc3_event_depevt *event) switch (event->endpoint_event) { case DWC3_DEPEVT_XFERCOMPLETE: strcat(str, "Transfer Complete"); + len = strlen(str); + + if (epnum <= 1) + sprintf(str + len, " [%s]", dwc3_ep0_state_string(ep0state)); break; case DWC3_DEPEVT_XFERINPROGRESS: strcat(str, "Transfer In-Progress"); @@ -207,6 +228,19 @@ dwc3_ep_event_string(const struct dwc3_event_depevt *event) strcat(str, "Transfer Not Ready"); status = event->status & DEPEVT_STATUS_TRANSFER_ACTIVE; strcat(str, status ? " (Active)" : " (Not Active)"); + + /* Control Endpoints */ + if (epnum <= 1) { + int phase = DEPEVT_STATUS_CONTROL_PHASE(event->status); + + switch (phase) { + case DEPEVT_STATUS_CONTROL_DATA: + strcat(str, " [Data Phase]"); + break; + case DEPEVT_STATUS_CONTROL_STATUS: + strcat(str, " [Status Phase]"); + } + } break; case DWC3_DEPEVT_RXTXFIFOEVT: strcat(str, "FIFO"); @@ -270,14 +304,14 @@ static inline const char *dwc3_gadget_event_type_string(u8 event) } } -static inline const char *dwc3_decode_event(u32 event) +static inline const char *dwc3_decode_event(u32 event, u32 ep0state) { const union dwc3_event evt = (union dwc3_event) event; if (evt.type.is_devspec) return dwc3_gadget_event_string(&evt.devt); else - return dwc3_ep_event_string(&evt.depevt); + return dwc3_ep_event_string(&evt.depevt, ep0state); } static inline const char *dwc3_ep_cmd_status_string(int status) @@ -310,7 +344,6 @@ static inline const char *dwc3_gadget_generic_cmd_status_string(int status) } } -void dwc3_trace(void (*trace)(struct va_format *), const char *fmt, ...); #ifdef CONFIG_DEBUG_FS extern void dwc3_debugfs_init(struct dwc3 *); diff --git a/drivers/usb/dwc3/dwc3-exynos.c b/drivers/usb/dwc3/dwc3-exynos.c index 2f1fb7e7aa54..e27899bb5706 100644 --- a/drivers/usb/dwc3/dwc3-exynos.c +++ b/drivers/usb/dwc3/dwc3-exynos.c @@ -20,7 +20,6 @@ #include <linux/kernel.h> #include <linux/slab.h> #include <linux/platform_device.h> -#include <linux/dma-mapping.h> #include <linux/clk.h> #include <linux/usb/otg.h> #include <linux/usb/usb_phy_generic.h> @@ -117,15 +116,6 @@ static int dwc3_exynos_probe(struct platform_device *pdev) if (!exynos) return -ENOMEM; - /* - * Right now device-tree probed devices don't get dma_mask set. - * Since shared usb code relies on it, set it here for now. - * Once we move to full device tree support this will vanish off. - */ - ret = dma_coerce_mask_and_coherent(dev, DMA_BIT_MASK(32)); - if (ret) - return ret; - platform_set_drvdata(pdev, exynos); exynos->dev = dev; diff --git a/drivers/usb/dwc3/dwc3-pci.c b/drivers/usb/dwc3/dwc3-pci.c index 6df0f5dad9a4..2b73339f286b 100644 --- a/drivers/usb/dwc3/dwc3-pci.c +++ b/drivers/usb/dwc3/dwc3-pci.c @@ -39,6 +39,27 @@ #define PCI_DEVICE_ID_INTEL_APL 0x5aaa #define PCI_DEVICE_ID_INTEL_KBP 0xa2b0 +#define PCI_INTEL_BXT_DSM_UUID "732b85d5-b7a7-4a1b-9ba0-4bbd00ffd511" +#define PCI_INTEL_BXT_FUNC_PMU_PWR 4 +#define PCI_INTEL_BXT_STATE_D0 0 +#define PCI_INTEL_BXT_STATE_D3 3 + +/** + * struct dwc3_pci - Driver private structure + * @dwc3: child dwc3 platform_device + * @pci: our link to PCI bus + * @uuid: _DSM UUID + * @has_dsm_for_pm: true for devices which need to run _DSM on runtime PM + */ +struct dwc3_pci { + struct platform_device *dwc3; + struct pci_dev *pci; + + u8 uuid[16]; + + unsigned int has_dsm_for_pm:1; +}; + static const struct acpi_gpio_params reset_gpios = { 0, 0, false }; static const struct acpi_gpio_params cs_gpios = { 1, 0, false }; @@ -48,8 +69,21 @@ static const struct acpi_gpio_mapping acpi_dwc3_byt_gpios[] = { { }, }; -static int dwc3_pci_quirks(struct pci_dev *pdev, struct platform_device *dwc3) +static int dwc3_pci_quirks(struct dwc3_pci *dwc) { + struct platform_device *dwc3 = dwc->dwc3; + struct pci_dev *pdev = dwc->pci; + int ret; + + struct property_entry sysdev_property[] = { + PROPERTY_ENTRY_BOOL("linux,sysdev_is_parent"), + { }, + }; + + ret = platform_device_add_properties(dwc3, sysdev_property); + if (ret) + return ret; + if (pdev->vendor == PCI_VENDOR_ID_AMD && pdev->device == PCI_DEVICE_ID_AMD_NL_USB) { struct property_entry properties[] = { @@ -89,6 +123,12 @@ static int dwc3_pci_quirks(struct pci_dev *pdev, struct platform_device *dwc3) if (ret < 0) return ret; + if (pdev->device == PCI_DEVICE_ID_INTEL_BXT || + pdev->device == PCI_DEVICE_ID_INTEL_BXT_M) { + acpi_str_to_uuid(PCI_INTEL_BXT_DSM_UUID, dwc->uuid); + dwc->has_dsm_for_pm = true; + } + if (pdev->device == PCI_DEVICE_ID_INTEL_BYT) { struct gpio_desc *gpio; @@ -139,8 +179,8 @@ static int dwc3_pci_quirks(struct pci_dev *pdev, struct platform_device *dwc3) static int dwc3_pci_probe(struct pci_dev *pci, const struct pci_device_id *id) { + struct dwc3_pci *dwc; struct resource res[2]; - struct platform_device *dwc3; int ret; struct device *dev = &pci->dev; @@ -152,11 +192,13 @@ static int dwc3_pci_probe(struct pci_dev *pci, pci_set_master(pci); - dwc3 = platform_device_alloc("dwc3", PLATFORM_DEVID_AUTO); - if (!dwc3) { - dev_err(dev, "couldn't allocate dwc3 device\n"); + dwc = devm_kzalloc(dev, sizeof(*dwc), GFP_KERNEL); + if (!dwc) + return -ENOMEM; + + dwc->dwc3 = platform_device_alloc("dwc3", PLATFORM_DEVID_AUTO); + if (!dwc->dwc3) return -ENOMEM; - } memset(res, 0x00, sizeof(struct resource) * ARRAY_SIZE(res)); @@ -169,20 +211,21 @@ static int dwc3_pci_probe(struct pci_dev *pci, res[1].name = "dwc_usb3"; res[1].flags = IORESOURCE_IRQ; - ret = platform_device_add_resources(dwc3, res, ARRAY_SIZE(res)); + ret = platform_device_add_resources(dwc->dwc3, res, ARRAY_SIZE(res)); if (ret) { dev_err(dev, "couldn't add resources to dwc3 device\n"); return ret; } - dwc3->dev.parent = dev; - ACPI_COMPANION_SET(&dwc3->dev, ACPI_COMPANION(dev)); + dwc->pci = pci; + dwc->dwc3->dev.parent = dev; + ACPI_COMPANION_SET(&dwc->dwc3->dev, ACPI_COMPANION(dev)); - ret = dwc3_pci_quirks(pci, dwc3); + ret = dwc3_pci_quirks(dwc); if (ret) goto err; - ret = platform_device_add(dwc3); + ret = platform_device_add(dwc->dwc3); if (ret) { dev_err(dev, "failed to register dwc3 device\n"); goto err; @@ -190,21 +233,23 @@ static int dwc3_pci_probe(struct pci_dev *pci, device_init_wakeup(dev, true); device_set_run_wake(dev, true); - pci_set_drvdata(pci, dwc3); + pci_set_drvdata(pci, dwc); pm_runtime_put(dev); return 0; err: - platform_device_put(dwc3); + platform_device_put(dwc->dwc3); return ret; } static void dwc3_pci_remove(struct pci_dev *pci) { + struct dwc3_pci *dwc = pci_get_drvdata(pci); + device_init_wakeup(&pci->dev, false); pm_runtime_get(&pci->dev); acpi_dev_remove_driver_gpios(ACPI_COMPANION(&pci->dev)); - platform_device_unregister(pci_get_drvdata(pci)); + platform_device_unregister(dwc->dwc3); } static const struct pci_device_id dwc3_pci_id_table[] = { @@ -234,40 +279,75 @@ static const struct pci_device_id dwc3_pci_id_table[] = { }; MODULE_DEVICE_TABLE(pci, dwc3_pci_id_table); +#if defined(CONFIG_PM) || defined(CONFIG_PM_SLEEP) +static int dwc3_pci_dsm(struct dwc3_pci *dwc, int param) +{ + union acpi_object *obj; + union acpi_object tmp; + union acpi_object argv4 = ACPI_INIT_DSM_ARGV4(1, &tmp); + + if (!dwc->has_dsm_for_pm) + return 0; + + tmp.type = ACPI_TYPE_INTEGER; + tmp.integer.value = param; + + obj = acpi_evaluate_dsm(ACPI_HANDLE(&dwc->pci->dev), dwc->uuid, + 1, PCI_INTEL_BXT_FUNC_PMU_PWR, &argv4); + if (!obj) { + dev_err(&dwc->pci->dev, "failed to evaluate _DSM\n"); + return -EIO; + } + + ACPI_FREE(obj); + + return 0; +} +#endif /* CONFIG_PM || CONFIG_PM_SLEEP */ + #ifdef CONFIG_PM static int dwc3_pci_runtime_suspend(struct device *dev) { + struct dwc3_pci *dwc = dev_get_drvdata(dev); + if (device_run_wake(dev)) - return 0; + return dwc3_pci_dsm(dwc, PCI_INTEL_BXT_STATE_D3); return -EBUSY; } static int dwc3_pci_runtime_resume(struct device *dev) { - struct platform_device *dwc3 = dev_get_drvdata(dev); + struct dwc3_pci *dwc = dev_get_drvdata(dev); + struct platform_device *dwc3 = dwc->dwc3; + int ret; + + ret = dwc3_pci_dsm(dwc, PCI_INTEL_BXT_STATE_D0); + if (ret) + return ret; return pm_runtime_get(&dwc3->dev); } #endif /* CONFIG_PM */ #ifdef CONFIG_PM_SLEEP -static int dwc3_pci_pm_dummy(struct device *dev) +static int dwc3_pci_suspend(struct device *dev) { - /* - * There's nothing to do here. No, seriously. Everything is either taken - * care either by PCI subsystem or dwc3/core.c, so we have nothing - * missing here. - * - * So you'd think we didn't need this at all, but PCI subsystem will - * bail out if we don't have a valid callback :-s - */ - return 0; + struct dwc3_pci *dwc = dev_get_drvdata(dev); + + return dwc3_pci_dsm(dwc, PCI_INTEL_BXT_STATE_D3); +} + +static int dwc3_pci_resume(struct device *dev) +{ + struct dwc3_pci *dwc = dev_get_drvdata(dev); + + return dwc3_pci_dsm(dwc, PCI_INTEL_BXT_STATE_D0); } #endif /* CONFIG_PM_SLEEP */ static struct dev_pm_ops dwc3_pci_dev_pm_ops = { - SET_SYSTEM_SLEEP_PM_OPS(dwc3_pci_pm_dummy, dwc3_pci_pm_dummy) + SET_SYSTEM_SLEEP_PM_OPS(dwc3_pci_suspend, dwc3_pci_resume) SET_RUNTIME_PM_OPS(dwc3_pci_runtime_suspend, dwc3_pci_runtime_resume, NULL) }; diff --git a/drivers/usb/dwc3/dwc3-st.c b/drivers/usb/dwc3/dwc3-st.c index 89a2f712fdfe..dfbf464eb88c 100644 --- a/drivers/usb/dwc3/dwc3-st.c +++ b/drivers/usb/dwc3/dwc3-st.c @@ -31,6 +31,7 @@ #include <linux/slab.h> #include <linux/regmap.h> #include <linux/reset.h> +#include <linux/pinctrl/consumer.h> #include <linux/usb/of.h> #include "core.h" @@ -218,7 +219,6 @@ static int st_dwc3_probe(struct platform_device *pdev) if (IS_ERR(regmap)) return PTR_ERR(regmap); - dma_set_coherent_mask(dev, dev->coherent_dma_mask); dwc3_data->dev = dev; dwc3_data->regmap = regmap; diff --git a/drivers/usb/dwc3/ep0.c b/drivers/usb/dwc3/ep0.c index fe79d771dee4..4878d187c7d4 100644 --- a/drivers/usb/dwc3/ep0.c +++ b/drivers/usb/dwc3/ep0.c @@ -39,22 +39,6 @@ static void __dwc3_ep0_do_control_status(struct dwc3 *dwc, struct dwc3_ep *dep); static void __dwc3_ep0_do_control_data(struct dwc3 *dwc, struct dwc3_ep *dep, struct dwc3_request *req); -static const char *dwc3_ep0_state_string(enum dwc3_ep0_state state) -{ - switch (state) { - case EP0_UNCONNECTED: - return "Unconnected"; - case EP0_SETUP_PHASE: - return "Setup Phase"; - case EP0_DATA_PHASE: - return "Data Phase"; - case EP0_STATUS_PHASE: - return "Status Phase"; - default: - return "UNKNOWN"; - } -} - static int dwc3_ep0_start_trans(struct dwc3 *dwc, u8 epnum, dma_addr_t buf_dma, u32 len, u32 type, bool chain) { @@ -65,10 +49,8 @@ static int dwc3_ep0_start_trans(struct dwc3 *dwc, u8 epnum, dma_addr_t buf_dma, int ret; dep = dwc->eps[epnum]; - if (dep->flags & DWC3_EP_BUSY) { - dwc3_trace(trace_dwc3_ep0, "%s still busy", dep->name); + if (dep->flags & DWC3_EP_BUSY) return 0; - } trb = &dwc->ep0_trb[dep->trb_enqueue]; @@ -99,11 +81,8 @@ static int dwc3_ep0_start_trans(struct dwc3 *dwc, u8 epnum, dma_addr_t buf_dma, trace_dwc3_prepare_trb(dep, trb); ret = dwc3_send_gadget_ep_cmd(dep, DWC3_DEPCMD_STARTTRANSFER, ¶ms); - if (ret < 0) { - dwc3_trace(trace_dwc3_ep0, "%s STARTTRANSFER failed", - dep->name); + if (ret < 0) return ret; - } dep->flags |= DWC3_EP_BUSY; dep->resource_index = dwc3_gadget_ep_get_transfer_index(dep); @@ -163,9 +142,6 @@ static int __dwc3_gadget_ep0_queue(struct dwc3_ep *dep, if (dwc->ep0state == EP0_STATUS_PHASE) __dwc3_ep0_do_control_status(dwc, dwc->eps[direction]); - else - dwc3_trace(trace_dwc3_ep0, - "too early for delayed status"); return 0; } @@ -229,9 +205,8 @@ int dwc3_gadget_ep0_queue(struct usb_ep *ep, struct usb_request *request, spin_lock_irqsave(&dwc->lock, flags); if (!dep->endpoint.desc) { - dwc3_trace(trace_dwc3_ep0, - "trying to queue request %p to disabled %s", - request, dep->name); + dev_err(dwc->dev, "%s: can't queue to disabled endpoint\n", + dep->name); ret = -ESHUTDOWN; goto out; } @@ -242,11 +217,6 @@ int dwc3_gadget_ep0_queue(struct usb_ep *ep, struct usb_request *request, goto out; } - dwc3_trace(trace_dwc3_ep0, - "queueing request %p to %s length %d state '%s'", - request, dep->name, request->length, - dwc3_ep0_state_string(dwc->ep0state)); - ret = __dwc3_gadget_ep0_queue(dep, req); out: @@ -308,6 +278,8 @@ void dwc3_ep0_out_start(struct dwc3 *dwc) { int ret; + complete(&dwc->ep0_in_setup); + ret = dwc3_ep0_start_trans(dwc, 0, dwc->ctrl_req_addr, 8, DWC3_TRBCTL_CONTROL_SETUP, false); WARN_ON(ret < 0); @@ -395,126 +367,198 @@ static int dwc3_ep0_handle_status(struct dwc3 *dwc, return __dwc3_gadget_ep0_queue(dep, &dwc->ep0_usb_req); } -static int dwc3_ep0_handle_feature(struct dwc3 *dwc, +static int dwc3_ep0_handle_u1(struct dwc3 *dwc, enum usb_device_state state, + int set) +{ + u32 reg; + + if (state != USB_STATE_CONFIGURED) + return -EINVAL; + if ((dwc->speed != DWC3_DSTS_SUPERSPEED) && + (dwc->speed != DWC3_DSTS_SUPERSPEED_PLUS)) + return -EINVAL; + + reg = dwc3_readl(dwc->regs, DWC3_DCTL); + if (set) + reg |= DWC3_DCTL_INITU1ENA; + else + reg &= ~DWC3_DCTL_INITU1ENA; + dwc3_writel(dwc->regs, DWC3_DCTL, reg); + + return 0; +} + +static int dwc3_ep0_handle_u2(struct dwc3 *dwc, enum usb_device_state state, + int set) +{ + u32 reg; + + + if (state != USB_STATE_CONFIGURED) + return -EINVAL; + if ((dwc->speed != DWC3_DSTS_SUPERSPEED) && + (dwc->speed != DWC3_DSTS_SUPERSPEED_PLUS)) + return -EINVAL; + + reg = dwc3_readl(dwc->regs, DWC3_DCTL); + if (set) + reg |= DWC3_DCTL_INITU2ENA; + else + reg &= ~DWC3_DCTL_INITU2ENA; + dwc3_writel(dwc->regs, DWC3_DCTL, reg); + + return 0; +} + +static int dwc3_ep0_handle_test(struct dwc3 *dwc, enum usb_device_state state, + u32 wIndex, int set) +{ + if ((wIndex & 0xff) != 0) + return -EINVAL; + if (!set) + return -EINVAL; + + switch (wIndex >> 8) { + case TEST_J: + case TEST_K: + case TEST_SE0_NAK: + case TEST_PACKET: + case TEST_FORCE_EN: + dwc->test_mode_nr = wIndex >> 8; + dwc->test_mode = true; + break; + default: + return -EINVAL; + } + + return 0; +} + +static int dwc3_ep0_handle_device(struct dwc3 *dwc, struct usb_ctrlrequest *ctrl, int set) { - struct dwc3_ep *dep; - u32 recip; + enum usb_device_state state; u32 wValue; u32 wIndex; - u32 reg; - int ret; - enum usb_device_state state; + int ret = 0; wValue = le16_to_cpu(ctrl->wValue); wIndex = le16_to_cpu(ctrl->wIndex); - recip = ctrl->bRequestType & USB_RECIP_MASK; state = dwc->gadget.state; - switch (recip) { - case USB_RECIP_DEVICE: + switch (wValue) { + case USB_DEVICE_REMOTE_WAKEUP: + break; + /* + * 9.4.1 says only only for SS, in AddressState only for + * default control pipe + */ + case USB_DEVICE_U1_ENABLE: + ret = dwc3_ep0_handle_u1(dwc, state, set); + break; + case USB_DEVICE_U2_ENABLE: + ret = dwc3_ep0_handle_u2(dwc, state, set); + break; + case USB_DEVICE_LTM_ENABLE: + ret = -EINVAL; + break; + case USB_DEVICE_TEST_MODE: + ret = dwc3_ep0_handle_test(dwc, state, wIndex, set); + break; + default: + ret = -EINVAL; + } - switch (wValue) { - case USB_DEVICE_REMOTE_WAKEUP: - break; + return ret; +} + +static int dwc3_ep0_handle_intf(struct dwc3 *dwc, + struct usb_ctrlrequest *ctrl, int set) +{ + enum usb_device_state state; + u32 wValue; + u32 wIndex; + int ret = 0; + + wValue = le16_to_cpu(ctrl->wValue); + wIndex = le16_to_cpu(ctrl->wIndex); + state = dwc->gadget.state; + + switch (wValue) { + case USB_INTRF_FUNC_SUSPEND: /* - * 9.4.1 says only only for SS, in AddressState only for - * default control pipe + * REVISIT: Ideally we would enable some low power mode here, + * however it's unclear what we should be doing here. + * + * For now, we're not doing anything, just making sure we return + * 0 so USB Command Verifier tests pass without any errors. */ - case USB_DEVICE_U1_ENABLE: - if (state != USB_STATE_CONFIGURED) - return -EINVAL; - if ((dwc->speed != DWC3_DSTS_SUPERSPEED) && - (dwc->speed != DWC3_DSTS_SUPERSPEED_PLUS)) - return -EINVAL; + break; + default: + ret = -EINVAL; + } - reg = dwc3_readl(dwc->regs, DWC3_DCTL); - if (set) - reg |= DWC3_DCTL_INITU1ENA; - else - reg &= ~DWC3_DCTL_INITU1ENA; - dwc3_writel(dwc->regs, DWC3_DCTL, reg); - break; + return ret; +} - case USB_DEVICE_U2_ENABLE: - if (state != USB_STATE_CONFIGURED) - return -EINVAL; - if ((dwc->speed != DWC3_DSTS_SUPERSPEED) && - (dwc->speed != DWC3_DSTS_SUPERSPEED_PLUS)) - return -EINVAL; +static int dwc3_ep0_handle_endpoint(struct dwc3 *dwc, + struct usb_ctrlrequest *ctrl, int set) +{ + struct dwc3_ep *dep; + enum usb_device_state state; + u32 wValue; + u32 wIndex; + int ret; - reg = dwc3_readl(dwc->regs, DWC3_DCTL); - if (set) - reg |= DWC3_DCTL_INITU2ENA; - else - reg &= ~DWC3_DCTL_INITU2ENA; - dwc3_writel(dwc->regs, DWC3_DCTL, reg); - break; + wValue = le16_to_cpu(ctrl->wValue); + wIndex = le16_to_cpu(ctrl->wIndex); + state = dwc->gadget.state; - case USB_DEVICE_LTM_ENABLE: + switch (wValue) { + case USB_ENDPOINT_HALT: + dep = dwc3_wIndex_to_dep(dwc, ctrl->wIndex); + if (!dep) return -EINVAL; - case USB_DEVICE_TEST_MODE: - if ((wIndex & 0xff) != 0) - return -EINVAL; - if (!set) - return -EINVAL; - - switch (wIndex >> 8) { - case TEST_J: - case TEST_K: - case TEST_SE0_NAK: - case TEST_PACKET: - case TEST_FORCE_EN: - dwc->test_mode_nr = wIndex >> 8; - dwc->test_mode = true; - break; - default: - return -EINVAL; - } + if (set == 0 && (dep->flags & DWC3_EP_WEDGE)) break; - default: + + ret = __dwc3_gadget_ep_set_halt(dep, set, true); + if (ret) return -EINVAL; - } break; + default: + return -EINVAL; + } + return 0; +} + +static int dwc3_ep0_handle_feature(struct dwc3 *dwc, + struct usb_ctrlrequest *ctrl, int set) +{ + u32 recip; + int ret; + enum usb_device_state state; + + recip = ctrl->bRequestType & USB_RECIP_MASK; + state = dwc->gadget.state; + + switch (recip) { + case USB_RECIP_DEVICE: + ret = dwc3_ep0_handle_device(dwc, ctrl, set); + break; case USB_RECIP_INTERFACE: - switch (wValue) { - case USB_INTRF_FUNC_SUSPEND: - if (wIndex & USB_INTRF_FUNC_SUSPEND_LP) - /* XXX enable Low power suspend */ - ; - if (wIndex & USB_INTRF_FUNC_SUSPEND_RW) - /* XXX enable remote wakeup */ - ; - break; - default: - return -EINVAL; - } + ret = dwc3_ep0_handle_intf(dwc, ctrl, set); break; - case USB_RECIP_ENDPOINT: - switch (wValue) { - case USB_ENDPOINT_HALT: - dep = dwc3_wIndex_to_dep(dwc, ctrl->wIndex); - if (!dep) - return -EINVAL; - if (set == 0 && (dep->flags & DWC3_EP_WEDGE)) - break; - ret = __dwc3_gadget_ep_set_halt(dep, set, true); - if (ret) - return -EINVAL; - break; - default: - return -EINVAL; - } + ret = dwc3_ep0_handle_endpoint(dwc, ctrl, set); break; - default: - return -EINVAL; + ret = -EINVAL; } - return 0; + return ret; } static int dwc3_ep0_set_address(struct dwc3 *dwc, struct usb_ctrlrequest *ctrl) @@ -525,13 +569,12 @@ static int dwc3_ep0_set_address(struct dwc3 *dwc, struct usb_ctrlrequest *ctrl) addr = le16_to_cpu(ctrl->wValue); if (addr > 127) { - dwc3_trace(trace_dwc3_ep0, "invalid device address %d", addr); + dev_err(dwc->dev, "invalid device address %d\n", addr); return -EINVAL; } if (state == USB_STATE_CONFIGURED) { - dwc3_trace(trace_dwc3_ep0, - "trying to set address when configured"); + dev_err(dwc->dev, "can't SetAddress() from Configured State\n"); return -EINVAL; } @@ -716,35 +759,27 @@ static int dwc3_ep0_std_request(struct dwc3 *dwc, struct usb_ctrlrequest *ctrl) switch (ctrl->bRequest) { case USB_REQ_GET_STATUS: - dwc3_trace(trace_dwc3_ep0, "USB_REQ_GET_STATUS"); ret = dwc3_ep0_handle_status(dwc, ctrl); break; case USB_REQ_CLEAR_FEATURE: - dwc3_trace(trace_dwc3_ep0, "USB_REQ_CLEAR_FEATURE"); ret = dwc3_ep0_handle_feature(dwc, ctrl, 0); break; case USB_REQ_SET_FEATURE: - dwc3_trace(trace_dwc3_ep0, "USB_REQ_SET_FEATURE"); ret = dwc3_ep0_handle_feature(dwc, ctrl, 1); break; case USB_REQ_SET_ADDRESS: - dwc3_trace(trace_dwc3_ep0, "USB_REQ_SET_ADDRESS"); ret = dwc3_ep0_set_address(dwc, ctrl); break; case USB_REQ_SET_CONFIGURATION: - dwc3_trace(trace_dwc3_ep0, "USB_REQ_SET_CONFIGURATION"); ret = dwc3_ep0_set_config(dwc, ctrl); break; case USB_REQ_SET_SEL: - dwc3_trace(trace_dwc3_ep0, "USB_REQ_SET_SEL"); ret = dwc3_ep0_set_sel(dwc, ctrl); break; case USB_REQ_SET_ISOCH_DELAY: - dwc3_trace(trace_dwc3_ep0, "USB_REQ_SET_ISOCH_DELAY"); ret = dwc3_ep0_set_isoch_delay(dwc, ctrl); break; default: - dwc3_trace(trace_dwc3_ep0, "Forwarding to gadget driver"); ret = dwc3_ep0_delegate_req(dwc, ctrl); break; } @@ -820,9 +855,6 @@ static void dwc3_ep0_complete_data(struct dwc3 *dwc, status = DWC3_TRB_SIZE_TRBSTS(trb->size); if (status == DWC3_TRBSTS_SETUP_PENDING) { dwc->setup_packet_pending = true; - - dwc3_trace(trace_dwc3_ep0, "Setup Pending received"); - if (r) dwc3_gadget_giveback(ep0, r, -ECONNRESET); @@ -912,7 +944,7 @@ static void dwc3_ep0_complete_status(struct dwc3 *dwc, ret = dwc3_gadget_set_test_mode(dwc, dwc->test_mode_nr); if (ret < 0) { - dwc3_trace(trace_dwc3_ep0, "Invalid Test #%d", + dev_err(dwc->dev, "invalid test #%d\n", dwc->test_mode_nr); dwc3_ep0_stall_and_restart(dwc); return; @@ -920,10 +952,8 @@ static void dwc3_ep0_complete_status(struct dwc3 *dwc, } status = DWC3_TRB_SIZE_TRBSTS(trb->size); - if (status == DWC3_TRBSTS_SETUP_PENDING) { + if (status == DWC3_TRBSTS_SETUP_PENDING) dwc->setup_packet_pending = true; - dwc3_trace(trace_dwc3_ep0, "Setup Pending received"); - } dwc->ep0state = EP0_SETUP_PHASE; dwc3_ep0_out_start(dwc); @@ -940,17 +970,14 @@ static void dwc3_ep0_xfer_complete(struct dwc3 *dwc, switch (dwc->ep0state) { case EP0_SETUP_PHASE: - dwc3_trace(trace_dwc3_ep0, "Setup Phase"); dwc3_ep0_inspect_setup(dwc, event); break; case EP0_DATA_PHASE: - dwc3_trace(trace_dwc3_ep0, "Data Phase"); dwc3_ep0_complete_data(dwc, event); break; case EP0_STATUS_PHASE: - dwc3_trace(trace_dwc3_ep0, "Status Phase"); dwc3_ep0_complete_status(dwc, event); break; default: @@ -974,12 +1001,10 @@ static void __dwc3_ep0_do_control_data(struct dwc3 *dwc, u32 transfer_size = 0; u32 maxpacket; - ret = usb_gadget_map_request(&dwc->gadget, &req->request, - dep->number); - if (ret) { - dwc3_trace(trace_dwc3_ep0, "failed to map request"); + ret = usb_gadget_map_request_by_dev(dwc->sysdev, + &req->request, dep->number); + if (ret) return; - } maxpacket = dep->endpoint.maxpacket; @@ -1002,12 +1027,10 @@ static void __dwc3_ep0_do_control_data(struct dwc3 *dwc, dwc->ep0_bounce_addr, transfer_size, DWC3_TRBCTL_CONTROL_DATA, false); } else { - ret = usb_gadget_map_request(&dwc->gadget, &req->request, - dep->number); - if (ret) { - dwc3_trace(trace_dwc3_ep0, "failed to map request"); + ret = usb_gadget_map_request_by_dev(dwc->sysdev, + &req->request, dep->number); + if (ret) return; - } ret = dwc3_ep0_start_trans(dwc, dep->number, req->request.dma, req->request.length, DWC3_TRBCTL_CONTROL_DATA, @@ -1065,8 +1088,6 @@ static void dwc3_ep0_xfernotready(struct dwc3 *dwc, { switch (event->status) { case DEPEVT_STATUS_CONTROL_DATA: - dwc3_trace(trace_dwc3_ep0, "Control Data"); - /* * We already have a DATA transfer in the controller's cache, * if we receive a XferNotReady(DATA) we will ignore it, unless @@ -1079,8 +1100,7 @@ static void dwc3_ep0_xfernotready(struct dwc3 *dwc, if (dwc->ep0_expect_in != event->endpoint_number) { struct dwc3_ep *dep = dwc->eps[dwc->ep0_expect_in]; - dwc3_trace(trace_dwc3_ep0, - "Wrong direction for Data phase"); + dev_err(dwc->dev, "unexpected direction for Data Phase\n"); dwc3_ep0_end_control_data(dwc, dep); dwc3_ep0_stall_and_restart(dwc); return; @@ -1092,13 +1112,10 @@ static void dwc3_ep0_xfernotready(struct dwc3 *dwc, if (dwc->ep0_next_event != DWC3_EP0_NRDY_STATUS) return; - dwc3_trace(trace_dwc3_ep0, "Control Status"); - dwc->ep0state = EP0_STATUS_PHASE; if (dwc->delayed_status) { WARN_ON_ONCE(event->endpoint_number != 1); - dwc3_trace(trace_dwc3_ep0, "Delayed Status"); return; } @@ -1109,10 +1126,6 @@ static void dwc3_ep0_xfernotready(struct dwc3 *dwc, void dwc3_ep0_interrupt(struct dwc3 *dwc, const struct dwc3_event_depevt *event) { - dwc3_trace(trace_dwc3_ep0, "%s: state '%s'", - dwc3_ep_event_string(event), - dwc3_ep0_state_string(dwc->ep0state)); - switch (event->endpoint_event) { case DWC3_DEPEVT_XFERCOMPLETE: dwc3_ep0_xfer_complete(dwc, event); diff --git a/drivers/usb/dwc3/gadget.c b/drivers/usb/dwc3/gadget.c index 1dfa56a5f1c5..678559525618 100644 --- a/drivers/usb/dwc3/gadget.c +++ b/drivers/usb/dwc3/gadget.c @@ -139,9 +139,6 @@ int dwc3_gadget_set_link_state(struct dwc3 *dwc, enum dwc3_link_state state) udelay(5); } - dwc3_trace(trace_dwc3_gadget, - "link state change request timed out"); - return -ETIMEDOUT; } @@ -178,6 +175,7 @@ void dwc3_gadget_giveback(struct dwc3_ep *dep, struct dwc3_request *req, req->started = false; list_del(&req->list); req->trb = NULL; + req->remaining = 0; if (req->request.status == -EINPROGRESS) req->request.status = status; @@ -185,8 +183,8 @@ void dwc3_gadget_giveback(struct dwc3_ep *dep, struct dwc3_request *req, if (dwc->ep0_bounced && dep->number == 0) dwc->ep0_bounced = false; else - usb_gadget_unmap_request(&dwc->gadget, &req->request, - req->direction); + usb_gadget_unmap_request_by_dev(dwc->sysdev, + &req->request, req->direction); trace_dwc3_gadget_giveback(req); @@ -216,7 +214,7 @@ int dwc3_send_gadget_generic_command(struct dwc3 *dwc, unsigned cmd, u32 param) ret = -EINVAL; break; } - } while (timeout--); + } while (--timeout); if (!timeout) { ret = -ETIMEDOUT; @@ -233,6 +231,7 @@ static int __dwc3_gadget_wakeup(struct dwc3 *dwc); int dwc3_send_gadget_ep_cmd(struct dwc3_ep *dep, unsigned cmd, struct dwc3_gadget_ep_cmd_params *params) { + const struct usb_endpoint_descriptor *desc = dep->endpoint.desc; struct dwc3 *dwc = dep->dwc; u32 timeout = 500; u32 reg; @@ -258,7 +257,7 @@ int dwc3_send_gadget_ep_cmd(struct dwc3_ep *dep, unsigned cmd, } } - if (cmd == DWC3_DEPCMD_STARTTRANSFER) { + if (DWC3_DEPCMD_CMD(cmd) == DWC3_DEPCMD_STARTTRANSFER) { int needs_wakeup; needs_wakeup = (dwc->link_state == DWC3_LINK_STATE_U1 || @@ -276,7 +275,28 @@ int dwc3_send_gadget_ep_cmd(struct dwc3_ep *dep, unsigned cmd, dwc3_writel(dep->regs, DWC3_DEPCMDPAR1, params->param1); dwc3_writel(dep->regs, DWC3_DEPCMDPAR2, params->param2); - dwc3_writel(dep->regs, DWC3_DEPCMD, cmd | DWC3_DEPCMD_CMDACT); + /* + * Synopsys Databook 2.60a states in section 6.3.2.5.6 of that if we're + * not relying on XferNotReady, we can make use of a special "No + * Response Update Transfer" command where we should clear both CmdAct + * and CmdIOC bits. + * + * With this, we don't need to wait for command completion and can + * straight away issue further commands to the endpoint. + * + * NOTICE: We're making an assumption that control endpoints will never + * make use of Update Transfer command. This is a safe assumption + * because we can never have more than one request at a time with + * Control Endpoints. If anybody changes that assumption, this chunk + * needs to be updated accordingly. + */ + if (DWC3_DEPCMD_CMD(cmd) == DWC3_DEPCMD_UPDATETRANSFER && + !usb_endpoint_xfer_isoc(desc)) + cmd &= ~(DWC3_DEPCMD_CMDIOC | DWC3_DEPCMD_CMDACT); + else + cmd |= DWC3_DEPCMD_CMDACT; + + dwc3_writel(dep->regs, DWC3_DEPCMD, cmd); do { reg = dwc3_readl(dep->regs, DWC3_DEPCMD); if (!(reg & DWC3_DEPCMD_CMDACT)) { @@ -318,6 +338,20 @@ int dwc3_send_gadget_ep_cmd(struct dwc3_ep *dep, unsigned cmd, trace_dwc3_gadget_ep_cmd(dep, cmd, params, cmd_status); + if (ret == 0) { + switch (DWC3_DEPCMD_CMD(cmd)) { + case DWC3_DEPCMD_STARTTRANSFER: + dep->flags |= DWC3_EP_TRANSFER_STARTED; + break; + case DWC3_DEPCMD_ENDTRANSFER: + dep->flags &= ~DWC3_EP_TRANSFER_STARTED; + break; + default: + /* nothing */ + break; + } + } + if (unlikely(susphy)) { reg = dwc3_readl(dwc->regs, DWC3_GUSB2PHYCFG(0)); reg |= DWC3_GUSB2PHYCFG_SUSPHY; @@ -365,7 +399,7 @@ static int dwc3_alloc_trb_pool(struct dwc3_ep *dep) if (dep->trb_pool) return 0; - dep->trb_pool = dma_alloc_coherent(dwc->dev, + dep->trb_pool = dma_alloc_coherent(dwc->sysdev, sizeof(struct dwc3_trb) * DWC3_TRB_NUM, &dep->trb_pool_dma, GFP_KERNEL); if (!dep->trb_pool) { @@ -381,7 +415,7 @@ static void dwc3_free_trb_pool(struct dwc3_ep *dep) { struct dwc3 *dwc = dep->dwc; - dma_free_coherent(dwc->dev, sizeof(struct dwc3_trb) * DWC3_TRB_NUM, + dma_free_coherent(dwc->sysdev, sizeof(struct dwc3_trb) * DWC3_TRB_NUM, dep->trb_pool, dep->trb_pool_dma); dep->trb_pool = NULL; @@ -454,16 +488,19 @@ static int dwc3_gadget_start_config(struct dwc3 *dwc, struct dwc3_ep *dep) } static int dwc3_gadget_set_ep_config(struct dwc3 *dwc, struct dwc3_ep *dep, - const struct usb_endpoint_descriptor *desc, - const struct usb_ss_ep_comp_descriptor *comp_desc, bool modify, bool restore) { + const struct usb_ss_ep_comp_descriptor *comp_desc; + const struct usb_endpoint_descriptor *desc; struct dwc3_gadget_ep_cmd_params params; if (dev_WARN_ONCE(dwc->dev, modify && restore, "Can't modify and restore\n")) return -EINVAL; + comp_desc = dep->endpoint.comp_desc; + desc = dep->endpoint.desc; + memset(¶ms, 0x00, sizeof(params)); params.param0 = DWC3_DEPCFG_EP_TYPE(usb_endpoint_type(desc)) @@ -542,24 +579,21 @@ static int dwc3_gadget_set_xfer_resource(struct dwc3 *dwc, struct dwc3_ep *dep) * Caller should take care of locking */ static int __dwc3_gadget_ep_enable(struct dwc3_ep *dep, - const struct usb_endpoint_descriptor *desc, - const struct usb_ss_ep_comp_descriptor *comp_desc, bool modify, bool restore) { + const struct usb_endpoint_descriptor *desc = dep->endpoint.desc; struct dwc3 *dwc = dep->dwc; + u32 reg; int ret; - dwc3_trace(trace_dwc3_gadget, "Enabling %s", dep->name); - if (!(dep->flags & DWC3_EP_ENABLED)) { ret = dwc3_gadget_start_config(dwc, dep); if (ret) return ret; } - ret = dwc3_gadget_set_ep_config(dwc, dep, desc, comp_desc, modify, - restore); + ret = dwc3_gadget_set_ep_config(dwc, dep, modify, restore); if (ret) return ret; @@ -567,17 +601,18 @@ static int __dwc3_gadget_ep_enable(struct dwc3_ep *dep, struct dwc3_trb *trb_st_hw; struct dwc3_trb *trb_link; - dep->endpoint.desc = desc; - dep->comp_desc = comp_desc; dep->type = usb_endpoint_type(desc); dep->flags |= DWC3_EP_ENABLED; + dep->flags &= ~DWC3_EP_END_TRANSFER_PENDING; reg = dwc3_readl(dwc->regs, DWC3_DALEPENA); reg |= DWC3_DALEPENA_EP(dep->number); dwc3_writel(dwc->regs, DWC3_DALEPENA, reg); + init_waitqueue_head(&dep->wait_end_transfer); + if (usb_endpoint_xfer_control(desc)) - return 0; + goto out; /* Initialize the TRB ring */ dep->trb_dequeue = 0; @@ -595,6 +630,39 @@ static int __dwc3_gadget_ep_enable(struct dwc3_ep *dep, trb_link->ctrl |= DWC3_TRB_CTRL_HWO; } + /* + * Issue StartTransfer here with no-op TRB so we can always rely on No + * Response Update Transfer command. + */ + if (usb_endpoint_xfer_bulk(desc)) { + struct dwc3_gadget_ep_cmd_params params; + struct dwc3_trb *trb; + dma_addr_t trb_dma; + u32 cmd; + + memset(¶ms, 0, sizeof(params)); + trb = &dep->trb_pool[0]; + trb_dma = dwc3_trb_dma_offset(dep, trb); + + params.param0 = upper_32_bits(trb_dma); + params.param1 = lower_32_bits(trb_dma); + + cmd = DWC3_DEPCMD_STARTTRANSFER; + + ret = dwc3_send_gadget_ep_cmd(dep, cmd, ¶ms); + if (ret < 0) + return ret; + + dep->flags |= DWC3_EP_BUSY; + + dep->resource_index = dwc3_gadget_ep_get_transfer_index(dep); + WARN_ON_ONCE(!dep->resource_index); + } + + +out: + trace_dwc3_gadget_ep_enable(dep); + return 0; } @@ -632,7 +700,7 @@ static int __dwc3_gadget_ep_disable(struct dwc3_ep *dep) struct dwc3 *dwc = dep->dwc; u32 reg; - dwc3_trace(trace_dwc3_gadget, "Disabling %s", dep->name); + trace_dwc3_gadget_ep_disable(dep); dwc3_remove_requests(dwc, dep); @@ -645,10 +713,14 @@ static int __dwc3_gadget_ep_disable(struct dwc3_ep *dep) dwc3_writel(dwc->regs, DWC3_DALEPENA, reg); dep->stream_capable = false; - dep->endpoint.desc = NULL; - dep->comp_desc = NULL; dep->type = 0; - dep->flags = 0; + dep->flags &= DWC3_EP_END_TRANSFER_PENDING; + + /* Clear out the ep descriptors for non-ep0 */ + if (dep->number > 1) { + dep->endpoint.comp_desc = NULL; + dep->endpoint.desc = NULL; + } return 0; } @@ -695,7 +767,7 @@ static int dwc3_gadget_ep_enable(struct usb_ep *ep, return 0; spin_lock_irqsave(&dwc->lock, flags); - ret = __dwc3_gadget_ep_enable(dep, desc, ep->comp_desc, false, false); + ret = __dwc3_gadget_ep_enable(dep, false, false); spin_unlock_irqrestore(&dwc->lock, flags); return ret; @@ -771,10 +843,9 @@ static void dwc3_prepare_one_trb(struct dwc3_ep *dep, unsigned length, unsigned chain, unsigned node) { struct dwc3_trb *trb; - - dwc3_trace(trace_dwc3_gadget, "%s: req %p dma %08llx length %d%s", - dep->name, req, (unsigned long long) dma, - length, chain ? " chain" : ""); + struct dwc3 *dwc = dep->dwc; + struct usb_gadget *gadget = &dwc->gadget; + enum usb_device_speed speed = gadget->speed; trb = &dep->trb_pool[dep->trb_enqueue]; @@ -782,7 +853,6 @@ static void dwc3_prepare_one_trb(struct dwc3_ep *dep, dwc3_gadget_move_started_request(req); req->trb = trb; req->trb_dma = dwc3_trb_dma_offset(dep, trb); - req->first_trb_index = dep->trb_enqueue; dep->queued_requests++; } @@ -798,10 +868,16 @@ static void dwc3_prepare_one_trb(struct dwc3_ep *dep, break; case USB_ENDPOINT_XFER_ISOC: - if (!node) + if (!node) { trb->ctrl = DWC3_TRBCTL_ISOCHRONOUS_FIRST; - else + + if (speed == USB_SPEED_HIGH) { + struct usb_ep *ep = &dep->endpoint; + trb->size |= DWC3_TRB_SIZE_PCM1(ep->mult - 1); + } + } else { trb->ctrl = DWC3_TRBCTL_ISOCHRONOUS; + } /* always enable Interrupt on Missed ISOC */ trb->ctrl |= DWC3_TRB_CTRL_ISP_IMI; @@ -816,15 +892,21 @@ static void dwc3_prepare_one_trb(struct dwc3_ep *dep, * This is only possible with faulty memory because we * checked it already :) */ - BUG(); + dev_WARN(dwc->dev, "Unknown endpoint type %d\n", + usb_endpoint_type(dep->endpoint.desc)); } /* always enable Continue on Short Packet */ - trb->ctrl |= DWC3_TRB_CTRL_CSP; + if (usb_endpoint_dir_out(dep->endpoint.desc)) { + trb->ctrl |= DWC3_TRB_CTRL_CSP; + + if (req->request.short_not_ok) + trb->ctrl |= DWC3_TRB_CTRL_ISP_IMI; + } if ((!req->request.no_interrupt && !chain) || (dwc3_calc_trbs_left(dep) == 0)) - trb->ctrl |= DWC3_TRB_CTRL_IOC | DWC3_TRB_CTRL_ISP_IMI; + trb->ctrl |= DWC3_TRB_CTRL_IOC; if (chain) trb->ctrl |= DWC3_TRB_CTRL_CHN; @@ -859,6 +941,7 @@ static struct dwc3_trb *dwc3_ep_prev_trb(struct dwc3_ep *dep, u8 index) static u32 dwc3_calc_trbs_left(struct dwc3_ep *dep) { struct dwc3_trb *tmp; + struct dwc3 *dwc = dep->dwc; u8 trbs_left; /* @@ -870,7 +953,8 @@ static u32 dwc3_calc_trbs_left(struct dwc3_ep *dep) */ if (dep->trb_enqueue == dep->trb_dequeue) { tmp = dwc3_ep_prev_trb(dep, dep->trb_enqueue); - if (tmp->ctrl & DWC3_TRB_CTRL_HWO) + if (dev_WARN_ONCE(dwc->dev, tmp->ctrl & DWC3_TRB_CTRL_HWO, + "%s No TRBS left\n", dep->name)) return 0; return DWC3_TRB_NUM - 1; @@ -941,6 +1025,24 @@ static void dwc3_prepare_trbs(struct dwc3_ep *dep) if (!dwc3_calc_trbs_left(dep)) return; + /* + * We can get in a situation where there's a request in the started list + * but there weren't enough TRBs to fully kick it in the first time + * around, so it has been waiting for more TRBs to be freed up. + * + * In that case, we should check if we have a request with pending_sgs + * in the started list and prepare TRBs for that request first, + * otherwise we will prepare TRBs completely out of order and that will + * break things. + */ + list_for_each_entry(req, &dep->started_list, list) { + if (req->num_pending_sgs > 0) + dwc3_prepare_one_trb_sg(dep, req); + + if (!dwc3_calc_trbs_left(dep)) + return; + } + list_for_each_entry_safe(req, n, &dep->pending_list, list) { if (req->num_pending_sgs > 0) dwc3_prepare_one_trb_sg(dep, req); @@ -956,7 +1058,6 @@ static int __dwc3_gadget_kick_transfer(struct dwc3_ep *dep, u16 cmd_param) { struct dwc3_gadget_ep_cmd_params params; struct dwc3_request *req; - struct dwc3 *dwc = dep->dwc; int starting; int ret; u32 cmd; @@ -989,9 +1090,10 @@ static int __dwc3_gadget_kick_transfer(struct dwc3_ep *dep, u16 cmd_param) * here and stop, unmap, free and del each of the linked * requests instead of what we do now. */ - usb_gadget_unmap_request(&dwc->gadget, &req->request, - req->direction); - list_del(&req->list); + if (req->trb) + memset(req->trb, 0, sizeof(struct dwc3_trb)); + dep->queued_requests--; + dwc3_gadget_giveback(dep, req, ret); return ret; } @@ -1005,14 +1107,21 @@ static int __dwc3_gadget_kick_transfer(struct dwc3_ep *dep, u16 cmd_param) return 0; } +static int __dwc3_gadget_get_frame(struct dwc3 *dwc) +{ + u32 reg; + + reg = dwc3_readl(dwc->regs, DWC3_DSTS); + return DWC3_DSTS_SOFFN(reg); +} + static void __dwc3_gadget_start_isoc(struct dwc3 *dwc, struct dwc3_ep *dep, u32 cur_uf) { u32 uf; if (list_empty(&dep->pending_list)) { - dwc3_trace(trace_dwc3_gadget, - "ISOC ep %s run out for requests", + dev_info(dwc->dev, "%s: ran out of requests\n", dep->name); dep->flags |= DWC3_EP_PENDING_REQUEST; return; @@ -1041,16 +1150,15 @@ static int __dwc3_gadget_ep_queue(struct dwc3_ep *dep, struct dwc3_request *req) int ret; if (!dep->endpoint.desc) { - dwc3_trace(trace_dwc3_gadget, - "trying to queue request %p to disabled %s", - &req->request, dep->endpoint.name); + dev_err(dwc->dev, "%s: can't queue to disabled endpoint\n", + dep->name); return -ESHUTDOWN; } if (WARN(req->dep != dep, "request %p belongs to '%s'\n", &req->request, req->dep->name)) { - dwc3_trace(trace_dwc3_gadget, "request %p belongs to '%s'", - &req->request, req->dep->name); + dev_err(dwc->dev, "%s: request %p belongs to '%s'\n", + dep->name, &req->request, req->dep->name); return -EINVAL; } @@ -1063,8 +1171,8 @@ static int __dwc3_gadget_ep_queue(struct dwc3_ep *dep, struct dwc3_request *req) trace_dwc3_ep_queue(req); - ret = usb_gadget_map_request(&dwc->gadget, &req->request, - dep->direction); + ret = usb_gadget_map_request_by_dev(dwc->sysdev, &req->request, + dep->direction); if (ret) return ret; @@ -1082,10 +1190,17 @@ static int __dwc3_gadget_ep_queue(struct dwc3_ep *dep, struct dwc3_request *req) * errors which will force us issue EndTransfer command. */ if (usb_endpoint_xfer_isoc(dep->endpoint.desc)) { - if ((dep->flags & DWC3_EP_PENDING_REQUEST) && - list_empty(&dep->started_list)) { - dwc3_stop_active_transfer(dwc, dep->number, true); - dep->flags = DWC3_EP_ENABLED; + if ((dep->flags & DWC3_EP_PENDING_REQUEST)) { + if (dep->flags & DWC3_EP_TRANSFER_STARTED) { + dwc3_stop_active_transfer(dwc, dep->number, true); + dep->flags = DWC3_EP_ENABLED; + } else { + u32 cur_uf; + + cur_uf = __dwc3_gadget_get_frame(dwc); + __dwc3_gadget_start_isoc(dwc, dep, cur_uf); + dep->flags &= ~DWC3_EP_PENDING_REQUEST; + } } return 0; } @@ -1094,10 +1209,6 @@ static int __dwc3_gadget_ep_queue(struct dwc3_ep *dep, struct dwc3_request *req) return 0; ret = __dwc3_gadget_kick_transfer(dep, 0); - if (ret && ret != -EBUSY) - dwc3_trace(trace_dwc3_gadget, - "%s: failed to kick transfers", - dep->name); if (ret == -EBUSY) ret = 0; @@ -1116,7 +1227,6 @@ static int __dwc3_gadget_ep_queue_zlp(struct dwc3 *dwc, struct dwc3_ep *dep) struct usb_request *request; struct usb_ep *ep = &dep->endpoint; - dwc3_trace(trace_dwc3_gadget, "queueing ZLP"); request = dwc3_gadget_ep_alloc_request(ep, GFP_ATOMIC); if (!request) return -ENOMEM; @@ -1235,9 +1345,6 @@ int __dwc3_gadget_ep_set_halt(struct dwc3_ep *dep, int value, int protocol) if (!protocol && ((dep->direction && transfer_in_flight) || (!dep->direction && started))) { - dwc3_trace(trace_dwc3_gadget, - "%s: pending request, cannot halt", - dep->name); return -EAGAIN; } @@ -1331,10 +1438,8 @@ static const struct usb_ep_ops dwc3_gadget_ep_ops = { static int dwc3_gadget_get_frame(struct usb_gadget *g) { struct dwc3 *dwc = gadget_to_dwc(g); - u32 reg; - reg = dwc3_readl(dwc->regs, DWC3_DSTS); - return DWC3_DSTS_SOFFN(reg); + return __dwc3_gadget_get_frame(dwc); } static int __dwc3_gadget_wakeup(struct dwc3 *dwc) @@ -1357,10 +1462,8 @@ static int __dwc3_gadget_wakeup(struct dwc3 *dwc) speed = reg & DWC3_DSTS_CONNECTSPD; if ((speed == DWC3_DSTS_SUPERSPEED) || - (speed == DWC3_DSTS_SUPERSPEED_PLUS)) { - dwc3_trace(trace_dwc3_gadget, "no wakeup on SuperSpeed"); + (speed == DWC3_DSTS_SUPERSPEED_PLUS)) return 0; - } link_state = DWC3_DSTS_USBLNKST(reg); @@ -1369,9 +1472,6 @@ static int __dwc3_gadget_wakeup(struct dwc3 *dwc) case DWC3_LINK_STATE_U3: /* in HS, means SUSPEND */ break; default: - dwc3_trace(trace_dwc3_gadget, - "can't wakeup from '%s'", - dwc3_gadget_link_string(link_state)); return -EINVAL; } @@ -1476,11 +1576,6 @@ static int dwc3_gadget_run_stop(struct dwc3 *dwc, int is_on, int suspend) if (!timeout) return -ETIMEDOUT; - dwc3_trace(trace_dwc3_gadget, "gadget %s data soft-%s", - dwc->gadget_driver - ? dwc->gadget_driver->function : "no-function", - is_on ? "connect" : "disconnect"); - return 0; } @@ -1492,6 +1587,21 @@ static int dwc3_gadget_pullup(struct usb_gadget *g, int is_on) is_on = !!is_on; + /* + * Per databook, when we want to stop the gadget, if a control transfer + * is still in process, complete it and get the core into setup phase. + */ + if (!is_on && dwc->ep0state != EP0_SETUP_PHASE) { + reinit_completion(&dwc->ep0_in_setup); + + ret = wait_for_completion_timeout(&dwc->ep0_in_setup, + msecs_to_jiffies(DWC3_PULL_UP_TIMEOUT)); + if (ret == 0) { + dev_err(dwc->dev, "timed out waiting for SETUP phase\n"); + return -ETIMEDOUT; + } + } + spin_lock_irqsave(&dwc->lock, flags); ret = dwc3_gadget_run_stop(dwc, is_on, false); spin_unlock_irqrestore(&dwc->lock, flags); @@ -1509,11 +1619,13 @@ static void dwc3_gadget_enable_irq(struct dwc3 *dwc) DWC3_DEVTEN_CMDCMPLTEN | DWC3_DEVTEN_ERRTICERREN | DWC3_DEVTEN_WKUPEVTEN | - DWC3_DEVTEN_ULSTCNGEN | DWC3_DEVTEN_CONNECTDONEEN | DWC3_DEVTEN_USBRSTEN | DWC3_DEVTEN_DISCONNEVTEN); + if (dwc->revision < DWC3_REVISION_250A) + reg |= DWC3_DEVTEN_ULSTCNGEN; + dwc3_writel(dwc->regs, DWC3_DEVTEN, reg); } @@ -1573,6 +1685,17 @@ static int __dwc3_gadget_start(struct dwc3 *dwc) int ret = 0; u32 reg; + /* + * Use IMOD if enabled via dwc->imod_interval. Otherwise, if + * the core supports IMOD, disable it. + */ + if (dwc->imod_interval) { + dwc3_writel(dwc->regs, DWC3_DEV_IMOD(0), dwc->imod_interval); + dwc3_writel(dwc->regs, DWC3_GEVNTCOUNT(0), DWC3_GEVNTCOUNT_EHB); + } else if (dwc3_has_imod(dwc)) { + dwc3_writel(dwc->regs, DWC3_DEV_IMOD(0), 0); + } + reg = dwc3_readl(dwc->regs, DWC3_DCFG); reg &= ~(DWC3_DCFG_SPEED_MASK); @@ -1633,16 +1756,14 @@ static int __dwc3_gadget_start(struct dwc3 *dwc) dwc3_gadget_ep0_desc.wMaxPacketSize = cpu_to_le16(512); dep = dwc->eps[0]; - ret = __dwc3_gadget_ep_enable(dep, &dwc3_gadget_ep0_desc, NULL, false, - false); + ret = __dwc3_gadget_ep_enable(dep, false, false); if (ret) { dev_err(dwc->dev, "failed to enable %s\n", dep->name); goto err0; } dep = dwc->eps[1]; - ret = __dwc3_gadget_ep_enable(dep, &dwc3_gadget_ep0_desc, NULL, false, - false); + ret = __dwc3_gadget_ep_enable(dep, false, false); if (ret) { dev_err(dwc->dev, "failed to enable %s\n", dep->name); goto err1; @@ -1708,9 +1829,6 @@ err0: static void __dwc3_gadget_stop(struct dwc3 *dwc) { - if (pm_runtime_suspended(dwc->dev)) - return; - dwc3_gadget_disable_irq(dwc); __dwc3_gadget_ep_disable(dwc->eps[0]); __dwc3_gadget_ep_disable(dwc->eps[1]); @@ -1720,9 +1838,30 @@ static int dwc3_gadget_stop(struct usb_gadget *g) { struct dwc3 *dwc = gadget_to_dwc(g); unsigned long flags; + int epnum; spin_lock_irqsave(&dwc->lock, flags); + + if (pm_runtime_suspended(dwc->dev)) + goto out; + __dwc3_gadget_stop(dwc); + + for (epnum = 2; epnum < DWC3_ENDPOINTS_NUM; epnum++) { + struct dwc3_ep *dep = dwc->eps[epnum]; + + if (!dep) + continue; + + if (!(dep->flags & DWC3_EP_END_TRANSFER_PENDING)) + continue; + + wait_event_lock_irq(dep->wait_end_transfer, + !(dep->flags & DWC3_EP_END_TRANSFER_PENDING), + dwc->lock); + } + +out: dwc->gadget_driver = NULL; spin_unlock_irqrestore(&dwc->lock, flags); @@ -1765,9 +1904,13 @@ static int dwc3_gadget_init_hw_endpoints(struct dwc3 *dwc, (epnum & 1) ? "in" : "out"); dep->endpoint.name = dep->name; - spin_lock_init(&dep->lock); - dwc3_trace(trace_dwc3_gadget, "initializing %s", dep->name); + if (!(dep->number > 1)) { + dep->endpoint.desc = &dwc3_gadget_ep0_desc; + dep->endpoint.comp_desc = NULL; + } + + spin_lock_init(&dep->lock); if (epnum == 0 || epnum == 1) { usb_ep_set_maxpacket_limit(&dep->endpoint, 512); @@ -1815,15 +1958,13 @@ static int dwc3_gadget_init_endpoints(struct dwc3 *dwc) ret = dwc3_gadget_init_hw_endpoints(dwc, dwc->num_out_eps, 0); if (ret < 0) { - dwc3_trace(trace_dwc3_gadget, - "failed to allocate OUT endpoints"); + dev_err(dwc->dev, "failed to initialize OUT endpoints\n"); return ret; } ret = dwc3_gadget_init_hw_endpoints(dwc, dwc->num_in_eps, 1); if (ret < 0) { - dwc3_trace(trace_dwc3_gadget, - "failed to allocate IN endpoints"); + dev_err(dwc->dev, "failed to initialize IN endpoints\n"); return ret; } @@ -1892,15 +2033,12 @@ static int __dwc3_cleanup_done_trbs(struct dwc3 *dwc, struct dwc3_ep *dep, return 1; count = trb->size & DWC3_TRB_SIZE_MASK; - req->request.actual += count; + req->remaining += count; if (dep->direction) { if (count) { trb_status = DWC3_TRB_SIZE_TRBSTS(trb->size); if (trb_status == DWC3_TRBSTS_MISSED_ISOC) { - dwc3_trace(trace_dwc3_gadget, - "%s: incomplete IN transfer", - dep->name); /* * If missed isoc occurred and there is * no request queued then issue END @@ -1946,11 +2084,10 @@ static int dwc3_cleanup_done_reqs(struct dwc3 *dwc, struct dwc3_ep *dep, struct dwc3_request *req, *n; struct dwc3_trb *trb; bool ioc = false; - int ret; + int ret = 0; list_for_each_entry_safe(req, n, &dep->started_list, list) { unsigned length; - unsigned actual; int chain; length = req->request.length; @@ -1964,6 +2101,9 @@ static int dwc3_cleanup_done_reqs(struct dwc3 *dwc, struct dwc3_ep *dep, for_each_sg(sg, s, pending, i) { trb = &dep->trb_pool[dep->trb_dequeue]; + if (trb->ctrl & DWC3_TRB_CTRL_HWO) + break; + req->sg = sg_next(s); req->num_pending_sgs--; @@ -1978,17 +2118,9 @@ static int dwc3_cleanup_done_reqs(struct dwc3 *dwc, struct dwc3_ep *dep, event, status, chain); } - /* - * We assume here we will always receive the entire data block - * which we should receive. Meaning, if we program RX to - * receive 4K but we receive only 2K, we assume that's all we - * should receive and we simply bounce the request back to the - * gadget driver for further processing. - */ - actual = length - req->request.actual; - req->request.actual = actual; + req->request.actual = length - req->remaining; - if (ret && chain && (actual < length) && req->num_pending_sgs) + if ((req->request.actual < length) && req->num_pending_sgs) return __dwc3_gadget_kick_transfer(dep, 0); dwc3_gadget_giveback(dep, req, status); @@ -2096,10 +2228,12 @@ static void dwc3_endpoint_interrupt(struct dwc3 *dwc, { struct dwc3_ep *dep; u8 epnum = event->endpoint_number; + u8 cmd; dep = dwc->eps[epnum]; - if (!(dep->flags & DWC3_EP_ENABLED)) + if (!(dep->flags & DWC3_EP_ENABLED) && + !(dep->flags & DWC3_EP_END_TRANSFER_PENDING)) return; if (epnum == 0 || epnum == 1) { @@ -2112,9 +2246,7 @@ static void dwc3_endpoint_interrupt(struct dwc3 *dwc, dep->resource_index = 0; if (usb_endpoint_xfer_isoc(dep->endpoint.desc)) { - dwc3_trace(trace_dwc3_gadget, - "%s is an Isochronous endpoint", - dep->name); + dev_err(dwc->dev, "XferComplete for Isochronous endpoint\n"); return; } @@ -2127,22 +2259,11 @@ static void dwc3_endpoint_interrupt(struct dwc3 *dwc, if (usb_endpoint_xfer_isoc(dep->endpoint.desc)) { dwc3_gadget_start_isoc(dwc, dep, event); } else { - int active; int ret; - active = event->status & DEPEVT_STATUS_TRANSFER_ACTIVE; - - dwc3_trace(trace_dwc3_gadget, "%s: reason %s", - dep->name, active ? "Transfer Active" - : "Transfer Not Active"); - ret = __dwc3_gadget_kick_transfer(dep, 0); if (!ret || ret == -EBUSY) return; - - dwc3_trace(trace_dwc3_gadget, - "%s: failed to kick transfers", - dep->name); } break; @@ -2152,26 +2273,16 @@ static void dwc3_endpoint_interrupt(struct dwc3 *dwc, dep->name); return; } + break; + case DWC3_DEPEVT_EPCMDCMPLT: + cmd = DEPEVT_PARAMETER_CMD(event->parameters); - switch (event->status) { - case DEPEVT_STREAMEVT_FOUND: - dwc3_trace(trace_dwc3_gadget, - "Stream %d found and started", - event->parameters); - - break; - case DEPEVT_STREAMEVT_NOTFOUND: - /* FALLTHROUGH */ - default: - dwc3_trace(trace_dwc3_gadget, - "unable to find suitable stream"); + if (cmd == DWC3_DEPCMD_ENDTRANSFER) { + dep->flags &= ~DWC3_EP_END_TRANSFER_PENDING; + wake_up(&dep->wait_end_transfer); } break; case DWC3_DEPEVT_RXTXFIFOEVT: - dwc3_trace(trace_dwc3_gadget, "%s FIFO Overrun", dep->name); - break; - case DWC3_DEPEVT_EPCMDCMPLT: - dwc3_trace(trace_dwc3_gadget, "Endpoint Command Complete"); break; } } @@ -2224,7 +2335,8 @@ static void dwc3_stop_active_transfer(struct dwc3 *dwc, u32 epnum, bool force) dep = dwc->eps[epnum]; - if (!dep->resource_index) + if ((dep->flags & DWC3_EP_END_TRANSFER_PENDING) || + !dep->resource_index) return; /* @@ -2268,25 +2380,9 @@ static void dwc3_stop_active_transfer(struct dwc3 *dwc, u32 epnum, bool force) dep->resource_index = 0; dep->flags &= ~DWC3_EP_BUSY; - if (dwc3_is_usb31(dwc) || dwc->revision < DWC3_REVISION_310A) + if (dwc3_is_usb31(dwc) || dwc->revision < DWC3_REVISION_310A) { + dep->flags |= DWC3_EP_END_TRANSFER_PENDING; udelay(100); -} - -static void dwc3_stop_active_transfers(struct dwc3 *dwc) -{ - u32 epnum; - - for (epnum = 2; epnum < DWC3_ENDPOINTS_NUM; epnum++) { - struct dwc3_ep *dep; - - dep = dwc->eps[epnum]; - if (!dep) - continue; - - if (!(dep->flags & DWC3_EP_ENABLED)) - continue; - - dwc3_remove_requests(dwc, dep); } } @@ -2375,8 +2471,6 @@ static void dwc3_gadget_reset_interrupt(struct dwc3 *dwc) reg &= ~DWC3_DCTL_TSTCTRL_MASK; dwc3_writel(dwc->regs, DWC3_DCTL, reg); dwc->test_mode = false; - - dwc3_stop_active_transfers(dwc); dwc3_clear_stall_all_ep(dwc); /* Reset device address to zero */ @@ -2385,32 +2479,6 @@ static void dwc3_gadget_reset_interrupt(struct dwc3 *dwc) dwc3_writel(dwc->regs, DWC3_DCFG, reg); } -static void dwc3_update_ram_clk_sel(struct dwc3 *dwc, u32 speed) -{ - u32 reg; - u32 usb30_clock = DWC3_GCTL_CLK_BUS; - - /* - * We change the clock only at SS but I dunno why I would want to do - * this. Maybe it becomes part of the power saving plan. - */ - - if ((speed != DWC3_DSTS_SUPERSPEED) && - (speed != DWC3_DSTS_SUPERSPEED_PLUS)) - return; - - /* - * RAMClkSel is reset to 0 after USB reset, so it must be reprogrammed - * each time on Connect Done. - */ - if (!usb30_clock) - return; - - reg = dwc3_readl(dwc->regs, DWC3_GCTL); - reg |= DWC3_GCTL_RAMCLKSEL(usb30_clock); - dwc3_writel(dwc->regs, DWC3_GCTL, reg); -} - static void dwc3_gadget_conndone_interrupt(struct dwc3 *dwc) { struct dwc3_ep *dep; @@ -2422,7 +2490,14 @@ static void dwc3_gadget_conndone_interrupt(struct dwc3 *dwc) speed = reg & DWC3_DSTS_CONNECTSPD; dwc->speed = speed; - dwc3_update_ram_clk_sel(dwc, speed); + /* + * RAMClkSel is reset to 0 after USB reset, so it must be reprogrammed + * each time on Connect Done. + * + * Currently we always use the reset value. If any platform + * wants to set this to a different value, we need to add a + * setting and update GCTL.RAMCLKSEL here. + */ switch (speed) { case DWC3_DSTS_SUPERSPEED_PLUS: @@ -2504,16 +2579,14 @@ static void dwc3_gadget_conndone_interrupt(struct dwc3 *dwc) } dep = dwc->eps[0]; - ret = __dwc3_gadget_ep_enable(dep, &dwc3_gadget_ep0_desc, NULL, true, - false); + ret = __dwc3_gadget_ep_enable(dep, true, false); if (ret) { dev_err(dwc->dev, "failed to enable %s\n", dep->name); return; } dep = dwc->eps[1]; - ret = __dwc3_gadget_ep_enable(dep, &dwc3_gadget_ep0_desc, NULL, true, - false); + ret = __dwc3_gadget_ep_enable(dep, true, false); if (ret) { dev_err(dwc->dev, "failed to enable %s\n", dep->name); return; @@ -2570,8 +2643,6 @@ static void dwc3_gadget_linksts_change_interrupt(struct dwc3 *dwc, (pwropt != DWC3_GHWPARAMS1_EN_PWROPT_HIB)) { if ((dwc->link_state == DWC3_LINK_STATE_U3) && (next == DWC3_LINK_STATE_RESUME)) { - dwc3_trace(trace_dwc3_gadget, - "ignoring transition U3 -> Resume"); return; } } @@ -2705,11 +2776,7 @@ static void dwc3_gadget_interrupt(struct dwc3 *dwc, break; case DWC3_DEVICE_EVENT_EOPF: /* It changed to be suspend event for version 2.30a and above */ - if (dwc->revision < DWC3_REVISION_230A) { - dwc3_trace(trace_dwc3_gadget, "End of Periodic Frame"); - } else { - dwc3_trace(trace_dwc3_gadget, "U3/L1-L2 Suspend Event"); - + if (dwc->revision >= DWC3_REVISION_230A) { /* * Ignore suspend event until the gadget enters into * USB_STATE_CONFIGURED state. @@ -2720,16 +2787,9 @@ static void dwc3_gadget_interrupt(struct dwc3 *dwc, } break; case DWC3_DEVICE_EVENT_SOF: - dwc3_trace(trace_dwc3_gadget, "Start of Periodic Frame"); - break; case DWC3_DEVICE_EVENT_ERRATIC_ERROR: - dwc3_trace(trace_dwc3_gadget, "Erratic Error"); - break; case DWC3_DEVICE_EVENT_CMD_CMPL: - dwc3_trace(trace_dwc3_gadget, "Command Complete"); - break; case DWC3_DEVICE_EVENT_OVERFLOW: - dwc3_trace(trace_dwc3_gadget, "Overflow"); break; default: dev_WARN(dwc->dev, "UNKNOWN IRQ %d\n", event->type); @@ -2739,7 +2799,7 @@ static void dwc3_gadget_interrupt(struct dwc3 *dwc, static void dwc3_process_event_entry(struct dwc3 *dwc, const union dwc3_event *event) { - trace_dwc3_event(event->raw); + trace_dwc3_event(event->raw, dwc); /* Endpoint IRQ, handle it and return early */ if (event->type.is_devspec == 0) { @@ -2772,7 +2832,7 @@ static irqreturn_t dwc3_process_event_buf(struct dwc3_event_buffer *evt) while (left > 0) { union dwc3_event event; - event.raw = *(u32 *) (evt->buf + evt->lpos); + event.raw = *(u32 *) (evt->cache + evt->lpos); dwc3_process_event_entry(dwc, &event); @@ -2785,10 +2845,8 @@ static irqreturn_t dwc3_process_event_buf(struct dwc3_event_buffer *evt) * boundary so I worry about that once we try to handle * that. */ - evt->lpos = (evt->lpos + 4) % DWC3_EVENT_BUFFERS_SIZE; + evt->lpos = (evt->lpos + 4) % evt->length; left -= 4; - - dwc3_writel(dwc->regs, DWC3_GEVNTCOUNT(0), 4); } evt->count = 0; @@ -2800,6 +2858,11 @@ static irqreturn_t dwc3_process_event_buf(struct dwc3_event_buffer *evt) reg &= ~DWC3_GEVNTSIZ_INTMASK; dwc3_writel(dwc->regs, DWC3_GEVNTSIZ(0), reg); + if (dwc->imod_interval) { + dwc3_writel(dwc->regs, DWC3_GEVNTCOUNT(0), DWC3_GEVNTCOUNT_EHB); + dwc3_writel(dwc->regs, DWC3_DEV_IMOD(0), dwc->imod_interval); + } + return ret; } @@ -2820,6 +2883,7 @@ static irqreturn_t dwc3_thread_interrupt(int irq, void *_evt) static irqreturn_t dwc3_check_event_buf(struct dwc3_event_buffer *evt) { struct dwc3 *dwc = evt->dwc; + u32 amount; u32 count; u32 reg; @@ -2843,6 +2907,14 @@ static irqreturn_t dwc3_check_event_buf(struct dwc3_event_buffer *evt) reg |= DWC3_GEVNTSIZ_INTMASK; dwc3_writel(dwc->regs, DWC3_GEVNTSIZ(0), reg); + amount = min(count, evt->length - evt->lpos); + memcpy(evt->cache + evt->lpos, evt->buf + evt->lpos, amount); + + if (amount < count) + memcpy(evt->cache, evt->buf, count - amount); + + dwc3_writel(dwc->regs, DWC3_GEVNTCOUNT(0), count); + return IRQ_WAKE_THREAD; } @@ -2853,6 +2925,39 @@ static irqreturn_t dwc3_interrupt(int irq, void *_evt) return dwc3_check_event_buf(evt); } +static int dwc3_gadget_get_irq(struct dwc3 *dwc) +{ + struct platform_device *dwc3_pdev = to_platform_device(dwc->dev); + int irq; + + irq = platform_get_irq_byname(dwc3_pdev, "peripheral"); + if (irq > 0) + goto out; + + if (irq == -EPROBE_DEFER) + goto out; + + irq = platform_get_irq_byname(dwc3_pdev, "dwc_usb3"); + if (irq > 0) + goto out; + + if (irq == -EPROBE_DEFER) + goto out; + + irq = platform_get_irq(dwc3_pdev, 0); + if (irq > 0) + goto out; + + if (irq != -EPROBE_DEFER) + dev_err(dwc->dev, "missing peripheral IRQ\n"); + + if (!irq) + irq = -EINVAL; + +out: + return irq; +} + /** * dwc3_gadget_init - Initializes gadget related registers * @dwc: pointer to our controller context structure @@ -2861,35 +2966,18 @@ static irqreturn_t dwc3_interrupt(int irq, void *_evt) */ int dwc3_gadget_init(struct dwc3 *dwc) { - int ret, irq; - struct platform_device *dwc3_pdev = to_platform_device(dwc->dev); + int ret; + int irq; - irq = platform_get_irq_byname(dwc3_pdev, "peripheral"); - if (irq == -EPROBE_DEFER) - return irq; - - if (irq <= 0) { - irq = platform_get_irq_byname(dwc3_pdev, "dwc_usb3"); - if (irq == -EPROBE_DEFER) - return irq; - - if (irq <= 0) { - irq = platform_get_irq(dwc3_pdev, 0); - if (irq <= 0) { - if (irq != -EPROBE_DEFER) { - dev_err(dwc->dev, - "missing peripheral IRQ\n"); - } - if (!irq) - irq = -EINVAL; - return irq; - } - } + irq = dwc3_gadget_get_irq(dwc); + if (irq < 0) { + ret = irq; + goto err0; } dwc->irq_gadget = irq; - dwc->ctrl_req = dma_alloc_coherent(dwc->dev, sizeof(*dwc->ctrl_req), + dwc->ctrl_req = dma_alloc_coherent(dwc->sysdev, sizeof(*dwc->ctrl_req), &dwc->ctrl_req_addr, GFP_KERNEL); if (!dwc->ctrl_req) { dev_err(dwc->dev, "failed to allocate ctrl request\n"); @@ -2897,8 +2985,9 @@ int dwc3_gadget_init(struct dwc3 *dwc) goto err0; } - dwc->ep0_trb = dma_alloc_coherent(dwc->dev, sizeof(*dwc->ep0_trb) * 2, - &dwc->ep0_trb_addr, GFP_KERNEL); + dwc->ep0_trb = dma_alloc_coherent(dwc->sysdev, + sizeof(*dwc->ep0_trb) * 2, + &dwc->ep0_trb_addr, GFP_KERNEL); if (!dwc->ep0_trb) { dev_err(dwc->dev, "failed to allocate ep0 trb\n"); ret = -ENOMEM; @@ -2911,7 +3000,7 @@ int dwc3_gadget_init(struct dwc3 *dwc) goto err2; } - dwc->ep0_bounce = dma_alloc_coherent(dwc->dev, + dwc->ep0_bounce = dma_alloc_coherent(dwc->sysdev, DWC3_EP0_BOUNCE_SIZE, &dwc->ep0_bounce_addr, GFP_KERNEL); if (!dwc->ep0_bounce) { @@ -2926,6 +3015,8 @@ int dwc3_gadget_init(struct dwc3 *dwc) goto err4; } + init_completion(&dwc->ep0_in_setup); + dwc->gadget.ops = &dwc3_gadget_ops; dwc->gadget.speed = USB_SPEED_UNKNOWN; dwc->gadget.sg_supported = true; @@ -2949,8 +3040,7 @@ int dwc3_gadget_init(struct dwc3 *dwc) * composite.c that we are USB 2.0 + LPM ECN. */ if (dwc->revision < DWC3_REVISION_220A) - dwc3_trace(trace_dwc3_gadget, - "Changing max_speed on rev %08x", + dev_info(dwc->dev, "changing max_speed on rev %08x\n", dwc->revision); dwc->gadget.max_speed = dwc->maximum_speed; @@ -2983,18 +3073,18 @@ err5: err4: dwc3_gadget_free_endpoints(dwc); - dma_free_coherent(dwc->dev, DWC3_EP0_BOUNCE_SIZE, + dma_free_coherent(dwc->sysdev, DWC3_EP0_BOUNCE_SIZE, dwc->ep0_bounce, dwc->ep0_bounce_addr); err3: kfree(dwc->setup_buf); err2: - dma_free_coherent(dwc->dev, sizeof(*dwc->ep0_trb) * 2, + dma_free_coherent(dwc->sysdev, sizeof(*dwc->ep0_trb) * 2, dwc->ep0_trb, dwc->ep0_trb_addr); err1: - dma_free_coherent(dwc->dev, sizeof(*dwc->ctrl_req), + dma_free_coherent(dwc->sysdev, sizeof(*dwc->ctrl_req), dwc->ctrl_req, dwc->ctrl_req_addr); err0: @@ -3009,16 +3099,16 @@ void dwc3_gadget_exit(struct dwc3 *dwc) dwc3_gadget_free_endpoints(dwc); - dma_free_coherent(dwc->dev, DWC3_EP0_BOUNCE_SIZE, + dma_free_coherent(dwc->sysdev, DWC3_EP0_BOUNCE_SIZE, dwc->ep0_bounce, dwc->ep0_bounce_addr); kfree(dwc->setup_buf); kfree(dwc->zlp_buf); - dma_free_coherent(dwc->dev, sizeof(*dwc->ep0_trb) * 2, + dma_free_coherent(dwc->sysdev, sizeof(*dwc->ep0_trb) * 2, dwc->ep0_trb, dwc->ep0_trb_addr); - dma_free_coherent(dwc->dev, sizeof(*dwc->ctrl_req), + dma_free_coherent(dwc->sysdev, sizeof(*dwc->ctrl_req), dwc->ctrl_req, dwc->ctrl_req_addr); } diff --git a/drivers/usb/dwc3/gadget.h b/drivers/usb/dwc3/gadget.h index e4a1d974a5ae..3129bcf74d7d 100644 --- a/drivers/usb/dwc3/gadget.h +++ b/drivers/usb/dwc3/gadget.h @@ -62,10 +62,7 @@ struct dwc3; static inline struct dwc3_request *next_request(struct list_head *list) { - if (list_empty(list)) - return NULL; - - return list_first_entry(list, struct dwc3_request, list); + return list_first_entry_or_null(list, struct dwc3_request, list); } static inline void dwc3_gadget_move_started_request(struct dwc3_request *req) diff --git a/drivers/usb/dwc3/host.c b/drivers/usb/dwc3/host.c index f6533c68fed1..487f0ff6ae25 100644 --- a/drivers/usb/dwc3/host.c +++ b/drivers/usb/dwc3/host.c @@ -19,6 +19,39 @@ #include "core.h" +static int dwc3_host_get_irq(struct dwc3 *dwc) +{ + struct platform_device *dwc3_pdev = to_platform_device(dwc->dev); + int irq; + + irq = platform_get_irq_byname(dwc3_pdev, "host"); + if (irq > 0) + goto out; + + if (irq == -EPROBE_DEFER) + goto out; + + irq = platform_get_irq_byname(dwc3_pdev, "dwc_usb3"); + if (irq > 0) + goto out; + + if (irq == -EPROBE_DEFER) + goto out; + + irq = platform_get_irq(dwc3_pdev, 0); + if (irq > 0) + goto out; + + if (irq != -EPROBE_DEFER) + dev_err(dwc->dev, "missing host IRQ\n"); + + if (!irq) + irq = -EINVAL; + +out: + return irq; +} + int dwc3_host_init(struct dwc3 *dwc) { struct property_entry props[2]; @@ -27,39 +60,18 @@ int dwc3_host_init(struct dwc3 *dwc) struct resource *res; struct platform_device *dwc3_pdev = to_platform_device(dwc->dev); - irq = platform_get_irq_byname(dwc3_pdev, "host"); - if (irq == -EPROBE_DEFER) + irq = dwc3_host_get_irq(dwc); + if (irq < 0) return irq; - if (irq <= 0) { - irq = platform_get_irq_byname(dwc3_pdev, "dwc_usb3"); - if (irq == -EPROBE_DEFER) - return irq; - - if (irq <= 0) { - irq = platform_get_irq(dwc3_pdev, 0); - if (irq <= 0) { - if (irq != -EPROBE_DEFER) { - dev_err(dwc->dev, - "missing host IRQ\n"); - } - if (!irq) - irq = -EINVAL; - return irq; - } else { - res = platform_get_resource(dwc3_pdev, - IORESOURCE_IRQ, 0); - } - } else { - res = platform_get_resource_byname(dwc3_pdev, - IORESOURCE_IRQ, - "dwc_usb3"); - } - - } else { + res = platform_get_resource_byname(dwc3_pdev, IORESOURCE_IRQ, "host"); + if (!res) res = platform_get_resource_byname(dwc3_pdev, IORESOURCE_IRQ, - "host"); - } + "dwc_usb3"); + if (!res) + res = platform_get_resource(dwc3_pdev, IORESOURCE_IRQ, 0); + if (!res) + return -ENOMEM; dwc->xhci_resources[1].start = irq; dwc->xhci_resources[1].end = irq; @@ -72,11 +84,7 @@ int dwc3_host_init(struct dwc3 *dwc) return -ENOMEM; } - dma_set_coherent_mask(&xhci->dev, dwc->dev->coherent_dma_mask); - xhci->dev.parent = dwc->dev; - xhci->dev.dma_mask = dwc->dev->dma_mask; - xhci->dev.dma_parms = dwc->dev->dma_parms; dwc->xhci = xhci; @@ -99,9 +107,9 @@ int dwc3_host_init(struct dwc3 *dwc) } phy_create_lookup(dwc->usb2_generic_phy, "usb2-phy", - dev_name(&xhci->dev)); + dev_name(dwc->dev)); phy_create_lookup(dwc->usb3_generic_phy, "usb3-phy", - dev_name(&xhci->dev)); + dev_name(dwc->dev)); ret = platform_device_add(xhci); if (ret) { @@ -112,9 +120,9 @@ int dwc3_host_init(struct dwc3 *dwc) return 0; err2: phy_remove_lookup(dwc->usb2_generic_phy, "usb2-phy", - dev_name(&xhci->dev)); + dev_name(dwc->dev)); phy_remove_lookup(dwc->usb3_generic_phy, "usb3-phy", - dev_name(&xhci->dev)); + dev_name(dwc->dev)); err1: platform_device_put(xhci); return ret; @@ -123,8 +131,8 @@ err1: void dwc3_host_exit(struct dwc3 *dwc) { phy_remove_lookup(dwc->usb2_generic_phy, "usb2-phy", - dev_name(&dwc->xhci->dev)); + dev_name(dwc->dev)); phy_remove_lookup(dwc->usb3_generic_phy, "usb3-phy", - dev_name(&dwc->xhci->dev)); + dev_name(dwc->dev)); platform_device_unregister(dwc->xhci); } diff --git a/drivers/usb/dwc3/io.h b/drivers/usb/dwc3/io.h index a06f9a8fecc7..c69b06696824 100644 --- a/drivers/usb/dwc3/io.h +++ b/drivers/usb/dwc3/io.h @@ -40,8 +40,7 @@ static inline u32 dwc3_readl(void __iomem *base, u32 offset) * documentation, so we revert it back to the proper addresses, the * same way they are described on SNPS documentation */ - dwc3_trace(trace_dwc3_readl, "addr %p value %08x", - base - DWC3_GLOBALS_REGS_START + offset, value); + trace_dwc3_readl(base - DWC3_GLOBALS_REGS_START, offset, value); return value; } @@ -60,8 +59,7 @@ static inline void dwc3_writel(void __iomem *base, u32 offset, u32 value) * documentation, so we revert it back to the proper addresses, the * same way they are described on SNPS documentation */ - dwc3_trace(trace_dwc3_writel, "addr %p value %08x", - base - DWC3_GLOBALS_REGS_START + offset, value); + trace_dwc3_writel(base - DWC3_GLOBALS_REGS_START, offset, value); } #endif /* __DRIVERS_USB_DWC3_IO_H */ diff --git a/drivers/usb/dwc3/trace.h b/drivers/usb/dwc3/trace.h index d24cefd191b5..2b124f94d858 100644 --- a/drivers/usb/dwc3/trace.h +++ b/drivers/usb/dwc3/trace.h @@ -37,47 +37,66 @@ DECLARE_EVENT_CLASS(dwc3_log_msg, TP_printk("%s", __get_str(msg)) ); -DEFINE_EVENT(dwc3_log_msg, dwc3_readl, +DEFINE_EVENT(dwc3_log_msg, dwc3_gadget, TP_PROTO(struct va_format *vaf), TP_ARGS(vaf) ); -DEFINE_EVENT(dwc3_log_msg, dwc3_writel, +DEFINE_EVENT(dwc3_log_msg, dwc3_core, TP_PROTO(struct va_format *vaf), TP_ARGS(vaf) ); -DEFINE_EVENT(dwc3_log_msg, dwc3_gadget, +DEFINE_EVENT(dwc3_log_msg, dwc3_ep0, TP_PROTO(struct va_format *vaf), TP_ARGS(vaf) ); -DEFINE_EVENT(dwc3_log_msg, dwc3_core, - TP_PROTO(struct va_format *vaf), - TP_ARGS(vaf) +DECLARE_EVENT_CLASS(dwc3_log_io, + TP_PROTO(void *base, u32 offset, u32 value), + TP_ARGS(base, offset, value), + TP_STRUCT__entry( + __field(void *, base) + __field(u32, offset) + __field(u32, value) + ), + TP_fast_assign( + __entry->base = base; + __entry->offset = offset; + __entry->value = value; + ), + TP_printk("addr %p value %08x", __entry->base + __entry->offset, + __entry->value) ); -DEFINE_EVENT(dwc3_log_msg, dwc3_ep0, - TP_PROTO(struct va_format *vaf), - TP_ARGS(vaf) +DEFINE_EVENT(dwc3_log_io, dwc3_readl, + TP_PROTO(void *base, u32 offset, u32 value), + TP_ARGS(base, offset, value) +); + +DEFINE_EVENT(dwc3_log_io, dwc3_writel, + TP_PROTO(void *base, u32 offset, u32 value), + TP_ARGS(base, offset, value) ); DECLARE_EVENT_CLASS(dwc3_log_event, - TP_PROTO(u32 event), - TP_ARGS(event), + TP_PROTO(u32 event, struct dwc3 *dwc), + TP_ARGS(event, dwc), TP_STRUCT__entry( __field(u32, event) + __field(u32, ep0state) ), TP_fast_assign( __entry->event = event; + __entry->ep0state = dwc->ep0state; ), TP_printk("event (%08x): %s", __entry->event, - dwc3_decode_event(__entry->event)) + dwc3_decode_event(__entry->event, __entry->ep0state)) ); DEFINE_EVENT(dwc3_log_event, dwc3_event, - TP_PROTO(u32 event), - TP_ARGS(event) + TP_PROTO(u32 event, struct dwc3 *dwc), + TP_ARGS(event, dwc) ); DECLARE_EVENT_CLASS(dwc3_log_ctrl, @@ -237,6 +256,7 @@ DECLARE_EVENT_CLASS(dwc3_log_trb, __field(u32, bph) __field(u32, size) __field(u32, ctrl) + __field(u32, type) ), TP_fast_assign( snprintf(__get_str(name), DWC3_MSG_MAX, "%s", dep->name); @@ -247,11 +267,31 @@ DECLARE_EVENT_CLASS(dwc3_log_trb, __entry->bph = trb->bph; __entry->size = trb->size; __entry->ctrl = trb->ctrl; + __entry->type = usb_endpoint_type(dep->endpoint.desc); ), - TP_printk("%s: %d/%d trb %p buf %08x%08x size %d ctrl %08x (%c%c%c%c:%c%c:%s)", + TP_printk("%s: %d/%d trb %p buf %08x%08x size %s%d ctrl %08x (%c%c%c%c:%c%c:%s)", __get_str(name), __entry->queued, __entry->allocated, __entry->trb, __entry->bph, __entry->bpl, - __entry->size, __entry->ctrl, + ({char *s; + int pcm = ((__entry->size >> 24) & 3) + 1; + switch (__entry->type) { + case USB_ENDPOINT_XFER_INT: + case USB_ENDPOINT_XFER_ISOC: + switch (pcm) { + case 1: + s = "1x "; + break; + case 2: + s = "2x "; + break; + case 3: + s = "3x "; + break; + } + default: + s = ""; + } s; }), + DWC3_TRB_SIZE_LENGTH(__entry->size), __entry->ctrl, __entry->ctrl & DWC3_TRB_CTRL_HWO ? 'H' : 'h', __entry->ctrl & DWC3_TRB_CTRL_LST ? 'L' : 'l', __entry->ctrl & DWC3_TRB_CTRL_CHN ? 'C' : 'c', @@ -301,6 +341,57 @@ DEFINE_EVENT(dwc3_log_trb, dwc3_complete_trb, TP_ARGS(dep, trb) ); +DECLARE_EVENT_CLASS(dwc3_log_ep, + TP_PROTO(struct dwc3_ep *dep), + TP_ARGS(dep), + TP_STRUCT__entry( + __dynamic_array(char, name, DWC3_MSG_MAX) + __field(unsigned, maxpacket) + __field(unsigned, maxpacket_limit) + __field(unsigned, max_streams) + __field(unsigned, maxburst) + __field(unsigned, flags) + __field(unsigned, direction) + __field(u8, trb_enqueue) + __field(u8, trb_dequeue) + ), + TP_fast_assign( + snprintf(__get_str(name), DWC3_MSG_MAX, "%s", dep->name); + __entry->maxpacket = dep->endpoint.maxpacket; + __entry->maxpacket_limit = dep->endpoint.maxpacket_limit; + __entry->max_streams = dep->endpoint.max_streams; + __entry->maxburst = dep->endpoint.maxburst; + __entry->flags = dep->flags; + __entry->direction = dep->direction; + __entry->trb_enqueue = dep->trb_enqueue; + __entry->trb_dequeue = dep->trb_dequeue; + ), + TP_printk("%s: mps %d/%d streams %d burst %d ring %d/%d flags %c:%c%c%c%c%c:%c:%c", + __get_str(name), __entry->maxpacket, + __entry->maxpacket_limit, __entry->max_streams, + __entry->maxburst, __entry->trb_enqueue, + __entry->trb_dequeue, + __entry->flags & DWC3_EP_ENABLED ? 'E' : 'e', + __entry->flags & DWC3_EP_STALL ? 'S' : 's', + __entry->flags & DWC3_EP_WEDGE ? 'W' : 'w', + __entry->flags & DWC3_EP_BUSY ? 'B' : 'b', + __entry->flags & DWC3_EP_PENDING_REQUEST ? 'P' : 'p', + __entry->flags & DWC3_EP_MISSED_ISOC ? 'M' : 'm', + __entry->flags & DWC3_EP_END_TRANSFER_PENDING ? 'E' : 'e', + __entry->direction ? '<' : '>' + ) +); + +DEFINE_EVENT(dwc3_log_ep, dwc3_gadget_ep_enable, + TP_PROTO(struct dwc3_ep *dep), + TP_ARGS(dep) +); + +DEFINE_EVENT(dwc3_log_ep, dwc3_gadget_ep_disable, + TP_PROTO(struct dwc3_ep *dep), + TP_ARGS(dep) +); + #endif /* __DWC3_TRACE_H */ /* this part has to be here */ diff --git a/drivers/usb/gadget/composite.c b/drivers/usb/gadget/composite.c index 32176f779861..41ab61f9b6e0 100644 --- a/drivers/usb/gadget/composite.c +++ b/drivers/usb/gadget/composite.c @@ -201,7 +201,12 @@ ep_found: _ep->desc = chosen_desc; _ep->comp_desc = NULL; _ep->maxburst = 0; - _ep->mult = 0; + _ep->mult = 1; + + if (g->speed == USB_SPEED_HIGH && (usb_endpoint_xfer_isoc(_ep->desc) || + usb_endpoint_xfer_int(_ep->desc))) + _ep->mult = usb_endpoint_maxp_mult(_ep->desc); + if (!want_comp_desc) return 0; @@ -218,7 +223,7 @@ ep_found: switch (usb_endpoint_type(_ep->desc)) { case USB_ENDPOINT_XFER_ISOC: /* mult: bits 1:0 of bmAttributes */ - _ep->mult = comp_desc->bmAttributes & 0x3; + _ep->mult = (comp_desc->bmAttributes & 0x3) + 1; case USB_ENDPOINT_XFER_BULK: case USB_ENDPOINT_XFER_INT: _ep->maxburst = comp_desc->bMaxBurst + 1; @@ -2382,18 +2387,8 @@ EXPORT_SYMBOL_GPL(usb_composite_setup_continue); static char *composite_default_mfr(struct usb_gadget *gadget) { - char *mfr; - int len; - - len = snprintf(NULL, 0, "%s %s with %s", init_utsname()->sysname, - init_utsname()->release, gadget->name); - len++; - mfr = kmalloc(len, GFP_KERNEL); - if (!mfr) - return NULL; - snprintf(mfr, len, "%s %s with %s", init_utsname()->sysname, - init_utsname()->release, gadget->name); - return mfr; + return kasprintf(GFP_KERNEL, "%s %s with %s", init_utsname()->sysname, + init_utsname()->release, gadget->name); } void usb_composite_overwrite_options(struct usb_composite_dev *cdev, diff --git a/drivers/usb/gadget/function/f_fs.c b/drivers/usb/gadget/function/f_fs.c index e40d47d47d82..0780d8311ec6 100644 --- a/drivers/usb/gadget/function/f_fs.c +++ b/drivers/usb/gadget/function/f_fs.c @@ -266,7 +266,7 @@ static void ffs_ep0_complete(struct usb_ep *ep, struct usb_request *req) { struct ffs_data *ffs = req->context; - complete_all(&ffs->ep0req_completion); + complete(&ffs->ep0req_completion); } static int __ffs_ep0_queue_wait(struct ffs_data *ffs, char *data, size_t len) @@ -3225,11 +3225,11 @@ static bool ffs_func_req_match(struct usb_function *f, switch (creq->bRequestType & USB_RECIP_MASK) { case USB_RECIP_INTERFACE: - return ffs_func_revmap_intf(func, - le16_to_cpu(creq->wIndex) >= 0); + return (ffs_func_revmap_intf(func, + le16_to_cpu(creq->wIndex)) >= 0); case USB_RECIP_ENDPOINT: - return ffs_func_revmap_ep(func, - le16_to_cpu(creq->wIndex) >= 0); + return (ffs_func_revmap_ep(func, + le16_to_cpu(creq->wIndex)) >= 0); default: return (bool) (func->ffs->user_flags & FUNCTIONFS_ALL_CTRL_RECIP); diff --git a/drivers/usb/gadget/function/f_hid.c b/drivers/usb/gadget/function/f_hid.c index e2966f87c860..7abd70b2a588 100644 --- a/drivers/usb/gadget/function/f_hid.c +++ b/drivers/usb/gadget/function/f_hid.c @@ -98,6 +98,60 @@ static struct hid_descriptor hidg_desc = { /*.desc[0].wDescriptorLenght = DYNAMIC */ }; +/* Super-Speed Support */ + +static struct usb_endpoint_descriptor hidg_ss_in_ep_desc = { + .bLength = USB_DT_ENDPOINT_SIZE, + .bDescriptorType = USB_DT_ENDPOINT, + .bEndpointAddress = USB_DIR_IN, + .bmAttributes = USB_ENDPOINT_XFER_INT, + /*.wMaxPacketSize = DYNAMIC */ + .bInterval = 4, /* FIXME: Add this field in the + * HID gadget configuration? + * (struct hidg_func_descriptor) + */ +}; + +static struct usb_ss_ep_comp_descriptor hidg_ss_in_comp_desc = { + .bLength = sizeof(hidg_ss_in_comp_desc), + .bDescriptorType = USB_DT_SS_ENDPOINT_COMP, + + /* .bMaxBurst = 0, */ + /* .bmAttributes = 0, */ + /* .wBytesPerInterval = DYNAMIC */ +}; + +static struct usb_endpoint_descriptor hidg_ss_out_ep_desc = { + .bLength = USB_DT_ENDPOINT_SIZE, + .bDescriptorType = USB_DT_ENDPOINT, + .bEndpointAddress = USB_DIR_OUT, + .bmAttributes = USB_ENDPOINT_XFER_INT, + /*.wMaxPacketSize = DYNAMIC */ + .bInterval = 4, /* FIXME: Add this field in the + * HID gadget configuration? + * (struct hidg_func_descriptor) + */ +}; + +static struct usb_ss_ep_comp_descriptor hidg_ss_out_comp_desc = { + .bLength = sizeof(hidg_ss_out_comp_desc), + .bDescriptorType = USB_DT_SS_ENDPOINT_COMP, + + /* .bMaxBurst = 0, */ + /* .bmAttributes = 0, */ + /* .wBytesPerInterval = DYNAMIC */ +}; + +static struct usb_descriptor_header *hidg_ss_descriptors[] = { + (struct usb_descriptor_header *)&hidg_interface_desc, + (struct usb_descriptor_header *)&hidg_desc, + (struct usb_descriptor_header *)&hidg_ss_in_ep_desc, + (struct usb_descriptor_header *)&hidg_ss_in_comp_desc, + (struct usb_descriptor_header *)&hidg_ss_out_ep_desc, + (struct usb_descriptor_header *)&hidg_ss_out_comp_desc, + NULL, +}; + /* High-Speed Support */ static struct usb_endpoint_descriptor hidg_hs_in_ep_desc = { @@ -624,8 +678,14 @@ static int hidg_bind(struct usb_configuration *c, struct usb_function *f) /* set descriptor dynamic values */ hidg_interface_desc.bInterfaceSubClass = hidg->bInterfaceSubClass; hidg_interface_desc.bInterfaceProtocol = hidg->bInterfaceProtocol; + hidg_ss_in_ep_desc.wMaxPacketSize = cpu_to_le16(hidg->report_length); + hidg_ss_in_comp_desc.wBytesPerInterval = + cpu_to_le16(hidg->report_length); hidg_hs_in_ep_desc.wMaxPacketSize = cpu_to_le16(hidg->report_length); hidg_fs_in_ep_desc.wMaxPacketSize = cpu_to_le16(hidg->report_length); + hidg_ss_out_ep_desc.wMaxPacketSize = cpu_to_le16(hidg->report_length); + hidg_ss_out_comp_desc.wBytesPerInterval = + cpu_to_le16(hidg->report_length); hidg_hs_out_ep_desc.wMaxPacketSize = cpu_to_le16(hidg->report_length); hidg_fs_out_ep_desc.wMaxPacketSize = cpu_to_le16(hidg->report_length); /* @@ -641,8 +701,13 @@ static int hidg_bind(struct usb_configuration *c, struct usb_function *f) hidg_hs_out_ep_desc.bEndpointAddress = hidg_fs_out_ep_desc.bEndpointAddress; + hidg_ss_in_ep_desc.bEndpointAddress = + hidg_fs_in_ep_desc.bEndpointAddress; + hidg_ss_out_ep_desc.bEndpointAddress = + hidg_fs_out_ep_desc.bEndpointAddress; + status = usb_assign_descriptors(f, hidg_fs_descriptors, - hidg_hs_descriptors, NULL, NULL); + hidg_hs_descriptors, hidg_ss_descriptors, NULL); if (status) goto fail; diff --git a/drivers/usb/gadget/function/f_ncm.c b/drivers/usb/gadget/function/f_ncm.c index 639603722709..e8008fa35e1e 100644 --- a/drivers/usb/gadget/function/f_ncm.c +++ b/drivers/usb/gadget/function/f_ncm.c @@ -998,7 +998,7 @@ static struct sk_buff *package_for_tx(struct f_ncm *ncm) /* Merge the skbs */ swap(skb2, ncm->skb_tx_data); if (ncm->skb_tx_data) { - dev_kfree_skb_any(ncm->skb_tx_data); + dev_consume_skb_any(ncm->skb_tx_data); ncm->skb_tx_data = NULL; } @@ -1009,7 +1009,7 @@ static struct sk_buff *package_for_tx(struct f_ncm *ncm) /* Copy NTB across. */ ntb_iter = (void *) skb_put(skb2, ncm->skb_tx_ndp->len); memcpy(ntb_iter, ncm->skb_tx_ndp->data, ncm->skb_tx_ndp->len); - dev_kfree_skb_any(ncm->skb_tx_ndp); + dev_consume_skb_any(ncm->skb_tx_ndp); ncm->skb_tx_ndp = NULL; /* Insert zero'd datagram. */ @@ -1078,6 +1078,7 @@ static struct sk_buff *ncm_wrap_ntb(struct gether *port, if (!ncm->skb_tx_data) goto err; + ncm->skb_tx_data->dev = ncm->netdev; ntb_data = (void *) skb_put(ncm->skb_tx_data, ncb_len); memset(ntb_data, 0, ncb_len); /* dwSignature */ @@ -1096,6 +1097,8 @@ static struct sk_buff *ncm_wrap_ntb(struct gether *port, GFP_ATOMIC); if (!ncm->skb_tx_ndp) goto err; + + ncm->skb_tx_ndp->dev = ncm->netdev; ntb_ndp = (void *) skb_put(ncm->skb_tx_ndp, opts->ndp_size); memset(ntb_ndp, 0, ncb_len); @@ -1133,7 +1136,7 @@ static struct sk_buff *ncm_wrap_ntb(struct gether *port, memset(ntb_data, 0, dgram_pad); ntb_data = (void *) skb_put(ncm->skb_tx_data, skb->len); memcpy(ntb_data, skb->data, skb->len); - dev_kfree_skb_any(skb); + dev_consume_skb_any(skb); skb = NULL; } else if (ncm->skb_tx_data && ncm->timer_force_tx) { @@ -1329,7 +1332,7 @@ static int ncm_unwrap_ntb(struct gether *port, } while (ndp_len > 2 * (opts->dgram_item_len * 2)); } while (ndp_index); - dev_kfree_skb_any(skb); + dev_consume_skb_any(skb); VDBG(port->func.config->cdev, "Parsed NTB with %d frames\n", dgram_counter); diff --git a/drivers/usb/gadget/function/f_uac2.c b/drivers/usb/gadget/function/f_uac2.c index cd214ec8a601..969cfe741380 100644 --- a/drivers/usb/gadget/function/f_uac2.c +++ b/drivers/usb/gadget/function/f_uac2.c @@ -1067,13 +1067,13 @@ afunc_bind(struct usb_configuration *cfg, struct usb_function *fn) agdev->out_ep = usb_ep_autoconfig(gadget, &fs_epout_desc); if (!agdev->out_ep) { dev_err(dev, "%s:%d Error!\n", __func__, __LINE__); - goto err; + return ret; } agdev->in_ep = usb_ep_autoconfig(gadget, &fs_epin_desc); if (!agdev->in_ep) { dev_err(dev, "%s:%d Error!\n", __func__, __LINE__); - goto err; + return ret; } uac2->p_prm.uac2 = uac2; @@ -1091,7 +1091,7 @@ afunc_bind(struct usb_configuration *cfg, struct usb_function *fn) ret = usb_assign_descriptors(fn, fs_audio_desc, hs_audio_desc, NULL, NULL); if (ret) - goto err; + return ret; prm = &agdev->uac2.c_prm; prm->max_psize = hs_epout_desc.wMaxPacketSize; @@ -1106,19 +1106,19 @@ afunc_bind(struct usb_configuration *cfg, struct usb_function *fn) prm->rbuf = kzalloc(prm->max_psize * USB_XFERS, GFP_KERNEL); if (!prm->rbuf) { prm->max_psize = 0; - goto err_free_descs; + goto err; } ret = alsa_uac2_init(agdev); if (ret) - goto err_free_descs; + goto err; return 0; -err_free_descs: - usb_free_all_descriptors(fn); err: kfree(agdev->uac2.p_prm.rbuf); kfree(agdev->uac2.c_prm.rbuf); +err_free_descs: + usb_free_all_descriptors(fn); return -EINVAL; } diff --git a/drivers/usb/gadget/function/rndis.c b/drivers/usb/gadget/function/rndis.c index 766c328c15c0..a3b5e468b116 100644 --- a/drivers/usb/gadget/function/rndis.c +++ b/drivers/usb/gadget/function/rndis.c @@ -80,8 +80,7 @@ static const struct file_operations rndis_proc_fops; #endif /* CONFIG_USB_GADGET_DEBUG_FILES */ /* supported OIDs */ -static const u32 oid_supported_list[] = -{ +static const u32 oid_supported_list[] = { /* the general stuff */ RNDIS_OID_GEN_SUPPORTED_LIST, RNDIS_OID_GEN_HARDWARE_STATUS, diff --git a/drivers/usb/gadget/function/rndis.h b/drivers/usb/gadget/function/rndis.h index ef92eb66d8ad..21e0430ffb98 100644 --- a/drivers/usb/gadget/function/rndis.h +++ b/drivers/usb/gadget/function/rndis.h @@ -22,8 +22,7 @@ #define RNDIS_MAXIMUM_FRAME_SIZE 1518 #define RNDIS_MAX_TOTAL_SIZE 1558 -typedef struct rndis_init_msg_type -{ +typedef struct rndis_init_msg_type { __le32 MessageType; __le32 MessageLength; __le32 RequestID; @@ -32,8 +31,7 @@ typedef struct rndis_init_msg_type __le32 MaxTransferSize; } rndis_init_msg_type; -typedef struct rndis_init_cmplt_type -{ +typedef struct rndis_init_cmplt_type { __le32 MessageType; __le32 MessageLength; __le32 RequestID; @@ -49,15 +47,13 @@ typedef struct rndis_init_cmplt_type __le32 AFListSize; } rndis_init_cmplt_type; -typedef struct rndis_halt_msg_type -{ +typedef struct rndis_halt_msg_type { __le32 MessageType; __le32 MessageLength; __le32 RequestID; } rndis_halt_msg_type; -typedef struct rndis_query_msg_type -{ +typedef struct rndis_query_msg_type { __le32 MessageType; __le32 MessageLength; __le32 RequestID; @@ -67,8 +63,7 @@ typedef struct rndis_query_msg_type __le32 DeviceVcHandle; } rndis_query_msg_type; -typedef struct rndis_query_cmplt_type -{ +typedef struct rndis_query_cmplt_type { __le32 MessageType; __le32 MessageLength; __le32 RequestID; @@ -77,8 +72,7 @@ typedef struct rndis_query_cmplt_type __le32 InformationBufferOffset; } rndis_query_cmplt_type; -typedef struct rndis_set_msg_type -{ +typedef struct rndis_set_msg_type { __le32 MessageType; __le32 MessageLength; __le32 RequestID; @@ -88,31 +82,27 @@ typedef struct rndis_set_msg_type __le32 DeviceVcHandle; } rndis_set_msg_type; -typedef struct rndis_set_cmplt_type -{ +typedef struct rndis_set_cmplt_type { __le32 MessageType; __le32 MessageLength; __le32 RequestID; __le32 Status; } rndis_set_cmplt_type; -typedef struct rndis_reset_msg_type -{ +typedef struct rndis_reset_msg_type { __le32 MessageType; __le32 MessageLength; __le32 Reserved; } rndis_reset_msg_type; -typedef struct rndis_reset_cmplt_type -{ +typedef struct rndis_reset_cmplt_type { __le32 MessageType; __le32 MessageLength; __le32 Status; __le32 AddressingReset; } rndis_reset_cmplt_type; -typedef struct rndis_indicate_status_msg_type -{ +typedef struct rndis_indicate_status_msg_type { __le32 MessageType; __le32 MessageLength; __le32 Status; @@ -120,23 +110,20 @@ typedef struct rndis_indicate_status_msg_type __le32 StatusBufferOffset; } rndis_indicate_status_msg_type; -typedef struct rndis_keepalive_msg_type -{ +typedef struct rndis_keepalive_msg_type { __le32 MessageType; __le32 MessageLength; __le32 RequestID; } rndis_keepalive_msg_type; -typedef struct rndis_keepalive_cmplt_type -{ +typedef struct rndis_keepalive_cmplt_type { __le32 MessageType; __le32 MessageLength; __le32 RequestID; __le32 Status; } rndis_keepalive_cmplt_type; -struct rndis_packet_msg_type -{ +struct rndis_packet_msg_type { __le32 MessageType; __le32 MessageLength; __le32 DataOffset; @@ -150,8 +137,7 @@ struct rndis_packet_msg_type __le32 Reserved; } __attribute__ ((packed)); -struct rndis_config_parameter -{ +struct rndis_config_parameter { __le32 ParameterNameOffset; __le32 ParameterNameLength; __le32 ParameterType; @@ -160,23 +146,20 @@ struct rndis_config_parameter }; /* implementation specific */ -enum rndis_state -{ +enum rndis_state { RNDIS_UNINITIALIZED, RNDIS_INITIALIZED, RNDIS_DATA_INITIALIZED, }; -typedef struct rndis_resp_t -{ +typedef struct rndis_resp_t { struct list_head list; u8 *buf; u32 length; int send; } rndis_resp_t; -typedef struct rndis_params -{ +typedef struct rndis_params { int confignr; u8 used; u16 saved_filter; diff --git a/drivers/usb/gadget/function/u_ether.c b/drivers/usb/gadget/function/u_ether.c index fe1811650dbc..119a2e5848e8 100644 --- a/drivers/usb/gadget/function/u_ether.c +++ b/drivers/usb/gadget/function/u_ether.c @@ -224,7 +224,7 @@ rx_submit(struct eth_dev *dev, struct usb_request *req, gfp_t gfp_flags) if (dev->port_usb->is_fixed) size = max_t(size_t, size, dev->port_usb->fixed_out_len); - skb = alloc_skb(size + NET_IP_ALIGN, gfp_flags); + skb = __netdev_alloc_skb(dev->net, size + NET_IP_ALIGN, gfp_flags); if (skb == NULL) { DBG(dev, "no rx skb\n"); goto enomem; @@ -455,16 +455,17 @@ static void tx_complete(struct usb_ep *ep, struct usb_request *req) /* FALLTHROUGH */ case -ECONNRESET: /* unlink */ case -ESHUTDOWN: /* disconnect etc */ + dev_kfree_skb_any(skb); break; case 0: dev->net->stats.tx_bytes += skb->len; + dev_consume_skb_any(skb); } dev->net->stats.tx_packets++; spin_lock(&dev->req_lock); list_add(&req->list, &dev->tx_reqs); spin_unlock(&dev->req_lock); - dev_kfree_skb_any(skb); atomic_dec(&dev->tx_qlen); if (netif_carrier_ok(dev->net)) @@ -588,14 +589,6 @@ static netdev_tx_t eth_start_xmit(struct sk_buff *skb, req->length = length; - /* throttle high/super speed IRQ rate back slightly */ - if (gadget_is_dualspeed(dev->gadget)) - req->no_interrupt = (((dev->gadget->speed == USB_SPEED_HIGH || - dev->gadget->speed == USB_SPEED_SUPER)) && - !list_empty(&dev->tx_reqs)) - ? ((atomic_read(&dev->tx_qlen) % dev->qmult) != 0) - : 0; - retval = usb_ep_queue(in, req, GFP_ATOMIC); switch (retval) { default: diff --git a/drivers/usb/gadget/function/u_serial.c b/drivers/usb/gadget/function/u_serial.c index 62ec842874aa..000677c991b0 100644 --- a/drivers/usb/gadget/function/u_serial.c +++ b/drivers/usb/gadget/function/u_serial.c @@ -1256,7 +1256,8 @@ static void gserial_console_exit(void) struct gscons_info *info = &gscons_info; unregister_console(&gserial_cons); - kthread_stop(info->console_thread); + if (info->console_thread != NULL) + kthread_stop(info->console_thread); gs_buf_free(&info->con_buf); } diff --git a/drivers/usb/gadget/function/uvc.h b/drivers/usb/gadget/function/uvc.h index 7d3bb6272e06..11d70dead32b 100644 --- a/drivers/usb/gadget/function/uvc.h +++ b/drivers/usb/gadget/function/uvc.h @@ -26,14 +26,12 @@ #define UVC_EVENT_DATA (V4L2_EVENT_PRIVATE_START + 5) #define UVC_EVENT_LAST (V4L2_EVENT_PRIVATE_START + 5) -struct uvc_request_data -{ +struct uvc_request_data { __s32 length; __u8 data[60]; }; -struct uvc_event -{ +struct uvc_event { union { enum usb_device_speed speed; struct usb_ctrlrequest req; @@ -104,8 +102,7 @@ extern unsigned int uvc_gadget_trace_param; * Structures */ -struct uvc_video -{ +struct uvc_video { struct usb_ep *ep; /* Frame parameters */ @@ -134,15 +131,13 @@ struct uvc_video unsigned int fid; }; -enum uvc_state -{ +enum uvc_state { UVC_STATE_DISCONNECTED, UVC_STATE_CONNECTED, UVC_STATE_STREAMING, }; -struct uvc_device -{ +struct uvc_device { struct video_device vdev; struct v4l2_device v4l2_dev; enum uvc_state state; @@ -175,8 +170,7 @@ static inline struct uvc_device *to_uvc(struct usb_function *f) return container_of(f, struct uvc_device, func); } -struct uvc_file_handle -{ +struct uvc_file_handle { struct v4l2_fh vfh; struct uvc_video *device; }; diff --git a/drivers/usb/gadget/function/uvc_v4l2.c b/drivers/usb/gadget/function/uvc_v4l2.c index f4ccbd56f4d2..3e22b45687d3 100644 --- a/drivers/usb/gadget/function/uvc_v4l2.c +++ b/drivers/usb/gadget/function/uvc_v4l2.c @@ -53,8 +53,7 @@ uvc_send_response(struct uvc_device *uvc, struct uvc_request_data *data) * V4L2 ioctls */ -struct uvc_format -{ +struct uvc_format { u8 bpp; u32 fcc; }; diff --git a/drivers/usb/gadget/function/uvc_video.c b/drivers/usb/gadget/function/uvc_video.c index 3d0d5d94a62f..0f01c04d7cbd 100644 --- a/drivers/usb/gadget/function/uvc_video.c +++ b/drivers/usb/gadget/function/uvc_video.c @@ -243,7 +243,7 @@ uvc_video_alloc_requests(struct uvc_video *video) req_size = video->ep->maxpacket * max_t(unsigned int, video->ep->maxburst, 1) - * (video->ep->mult + 1); + * (video->ep->mult); for (i = 0; i < UVC_NUM_REQUESTS; ++i) { video->req_buffer[i] = kmalloc(req_size, GFP_KERNEL); diff --git a/drivers/usb/gadget/udc/atmel_usba_udc.c b/drivers/usb/gadget/udc/atmel_usba_udc.c index 1ef7a9a9d7f5..f3212db9bc37 100644 --- a/drivers/usb/gadget/udc/atmel_usba_udc.c +++ b/drivers/usb/gadget/udc/atmel_usba_udc.c @@ -529,7 +529,7 @@ usba_ep_enable(struct usb_ep *_ep, const struct usb_endpoint_descriptor *desc) DBG(DBG_GADGET, "%s: ep_enable: desc=%p\n", ep->ep.name, desc); - maxpacket = usb_endpoint_maxp(desc) & 0x7ff; + maxpacket = usb_endpoint_maxp(desc); if (((desc->bEndpointAddress & USB_ENDPOINT_NUMBER_MASK) != ep->index) || ep->index == 0 @@ -573,7 +573,7 @@ usba_ep_enable(struct usb_ep *_ep, const struct usb_endpoint_descriptor *desc) * Bits 11:12 specify number of _additional_ * transactions per microframe. */ - nr_trans = ((usb_endpoint_maxp(desc) >> 11) & 3) + 1; + nr_trans = usb_endpoint_maxp_mult(desc); if (nr_trans > 3) return -EINVAL; diff --git a/drivers/usb/gadget/udc/bdc/bdc_cmd.c b/drivers/usb/gadget/udc/bdc/bdc_cmd.c index 4d5e9188beae..6e920f1dce02 100644 --- a/drivers/usb/gadget/udc/bdc/bdc_cmd.c +++ b/drivers/usb/gadget/udc/bdc/bdc_cmd.c @@ -182,7 +182,7 @@ int bdc_config_ep(struct bdc *bdc, struct bdc_ep *ep) usb_endpoint_xfer_int(desc)) { param2 |= si; - mbs = (usb_endpoint_maxp(desc) & 0x1800) >> 11; + mbs = usb_endpoint_maxp_mult(desc); param2 |= mbs << MB_SHIFT; } break; diff --git a/drivers/usb/gadget/udc/bdc/bdc_ep.c b/drivers/usb/gadget/udc/bdc/bdc_ep.c index ccaa74ab6c0e..ff1ef24d1777 100644 --- a/drivers/usb/gadget/udc/bdc/bdc_ep.c +++ b/drivers/usb/gadget/udc/bdc/bdc_ep.c @@ -446,7 +446,7 @@ static int setup_bd_list_xfr(struct bdc *bdc, struct bdc_req *req, int num_bds) bd_xfr->start_bdi = bd_list->eqp_bdi; bd = bdi_to_bd(ep, bd_list->eqp_bdi); req_len = req->usb_req.length; - maxp = usb_endpoint_maxp(ep->desc) & 0x7ff; + maxp = usb_endpoint_maxp(ep->desc); tfs = roundup(req->usb_req.length, maxp); tfs = tfs/maxp; dev_vdbg(bdc->dev, "%s ep:%s num_bds:%d tfs:%d r_len:%d bd:%p\n", diff --git a/drivers/usb/gadget/udc/dummy_hcd.c b/drivers/usb/gadget/udc/dummy_hcd.c index 77d07904f932..02b14e91ae6c 100644 --- a/drivers/usb/gadget/udc/dummy_hcd.c +++ b/drivers/usb/gadget/udc/dummy_hcd.c @@ -503,7 +503,7 @@ static int dummy_enable(struct usb_ep *_ep, * maximum packet size. * For SS devices the wMaxPacketSize is limited by 1024. */ - max = usb_endpoint_maxp(desc) & 0x7ff; + max = usb_endpoint_maxp(desc); /* drivers must not request bad settings, since lower levels * (hardware or its drivers) may not check. some endpoints @@ -1483,8 +1483,7 @@ static int periodic_bytes(struct dummy *dum, struct dummy_ep *ep) int tmp; /* high bandwidth mode */ - tmp = usb_endpoint_maxp(ep->desc); - tmp = (tmp >> 11) & 0x03; + tmp = usb_endpoint_maxp_mult(ep->desc); tmp *= 8 /* applies to entire frame */; limit += limit * tmp; } diff --git a/drivers/usb/gadget/udc/fsl_udc_core.c b/drivers/usb/gadget/udc/fsl_udc_core.c index aab5221d6c2e..71094e479a96 100644 --- a/drivers/usb/gadget/udc/fsl_udc_core.c +++ b/drivers/usb/gadget/udc/fsl_udc_core.c @@ -585,8 +585,7 @@ static int fsl_ep_enable(struct usb_ep *_ep, break; case USB_ENDPOINT_XFER_ISOC: /* Calculate transactions needed for high bandwidth iso */ - mult = (unsigned char)(1 + ((max >> 11) & 0x03)); - max = max & 0x7ff; /* bit 0~10 */ + mult = usb_endpoint_maxp_mult(desc); /* 3 transactions at most */ if (mult > 3) goto en_done; diff --git a/drivers/usb/gadget/udc/fusb300_udc.c b/drivers/usb/gadget/udc/fusb300_udc.c index 948845c90e47..42ff308578df 100644 --- a/drivers/usb/gadget/udc/fusb300_udc.c +++ b/drivers/usb/gadget/udc/fusb300_udc.c @@ -218,7 +218,7 @@ static int config_ep(struct fusb300_ep *ep, (info.type == USB_ENDPOINT_XFER_ISOC)) { info.interval = desc->bInterval; if (info.type == USB_ENDPOINT_XFER_ISOC) - info.bw_num = ((desc->wMaxPacketSize & 0x1800) >> 11); + info.bw_num = usb_endpoint_maxp_mult(desc); } ep_fifo_setting(fusb300, info); diff --git a/drivers/usb/gadget/udc/gr_udc.c b/drivers/usb/gadget/udc/gr_udc.c index 39b7136d31d9..b16f8af34050 100644 --- a/drivers/usb/gadget/udc/gr_udc.c +++ b/drivers/usb/gadget/udc/gr_udc.c @@ -1539,7 +1539,7 @@ static int gr_ep_enable(struct usb_ep *_ep, * additional transactions. */ max = 0x7ff & usb_endpoint_maxp(desc); - nt = 0x3 & (usb_endpoint_maxp(desc) >> 11); + nt = usb_endpoint_maxp_mult(desc) - 1; buffer_size = GR_BUFFER_SIZE(epctrl); if (nt && (mode == 0 || mode == 2)) { dev_err(dev->dev, diff --git a/drivers/usb/gadget/udc/mv_u3d_core.c b/drivers/usb/gadget/udc/mv_u3d_core.c index b9e19a591322..8d726bd767fd 100644 --- a/drivers/usb/gadget/udc/mv_u3d_core.c +++ b/drivers/usb/gadget/udc/mv_u3d_core.c @@ -462,6 +462,12 @@ static int mv_u3d_req_to_trb(struct mv_u3d_req *req) req->trb_head->trb_hw, trb_num * sizeof(*trb_hw), DMA_BIDIRECTIONAL); + if (dma_mapping_error(u3d->gadget.dev.parent, + req->trb_head->trb_dma)) { + kfree(req->trb_head->trb_hw); + kfree(req->trb_head); + return -EFAULT; + } req->chain = 1; } @@ -487,30 +493,32 @@ mv_u3d_start_queue(struct mv_u3d_ep *ep) ret = usb_gadget_map_request(&u3d->gadget, &req->req, mv_u3d_ep_dir(ep)); if (ret) - return ret; + goto break_processing; req->req.status = -EINPROGRESS; req->req.actual = 0; req->trb_count = 0; - /* build trbs and push them to device queue */ - if (!mv_u3d_req_to_trb(req)) { - ret = mv_u3d_queue_trb(ep, req); - if (ret) { - ep->processing = 0; - return ret; - } - } else { - ep->processing = 0; + /* build trbs */ + ret = mv_u3d_req_to_trb(req); + if (ret) { dev_err(u3d->dev, "%s, mv_u3d_req_to_trb fail\n", __func__); - return -ENOMEM; + goto break_processing; } + /* and push them to device queue */ + ret = mv_u3d_queue_trb(ep, req); + if (ret) + goto break_processing; + /* irq handler advances the queue */ - if (req) - list_add_tail(&req->queue, &ep->queue); + list_add_tail(&req->queue, &ep->queue); return 0; + +break_processing: + ep->processing = 0; + return ret; } static int mv_u3d_ep_enable(struct usb_ep *_ep, diff --git a/drivers/usb/gadget/udc/mv_udc_core.c b/drivers/usb/gadget/udc/mv_udc_core.c index ce73b3552269..d82a91bddbd9 100644 --- a/drivers/usb/gadget/udc/mv_udc_core.c +++ b/drivers/usb/gadget/udc/mv_udc_core.c @@ -494,8 +494,7 @@ static int mv_ep_enable(struct usb_ep *_ep, break; case USB_ENDPOINT_XFER_ISOC: /* Calculate transactions needed for high bandwidth iso */ - mult = (unsigned char)(1 + ((max >> 11) & 0x03)); - max = max & 0x7ff; /* bit 0~10 */ + mult = usb_endpoint_maxp_mult(desc); /* 3 transactions at most */ if (mult > 3) goto en_done; diff --git a/drivers/usb/gadget/udc/net2272.c b/drivers/usb/gadget/udc/net2272.c index 7c6113432093..078c91d546e0 100644 --- a/drivers/usb/gadget/udc/net2272.c +++ b/drivers/usb/gadget/udc/net2272.c @@ -202,10 +202,10 @@ net2272_enable(struct usb_ep *_ep, const struct usb_endpoint_descriptor *desc) if (!dev->driver || dev->gadget.speed == USB_SPEED_UNKNOWN) return -ESHUTDOWN; - max = usb_endpoint_maxp(desc) & 0x1fff; + max = usb_endpoint_maxp(desc); spin_lock_irqsave(&dev->lock, flags); - _ep->maxpacket = max & 0x7fff; + _ep->maxpacket = max; ep->desc = desc; /* net2272_ep_reset() has already been called */ diff --git a/drivers/usb/gadget/udc/net2280.c b/drivers/usb/gadget/udc/net2280.c index 61c938c36d88..85504419ab31 100644 --- a/drivers/usb/gadget/udc/net2280.c +++ b/drivers/usb/gadget/udc/net2280.c @@ -224,14 +224,14 @@ net2280_enable(struct usb_ep *_ep, const struct usb_endpoint_descriptor *desc) } /* sanity check ep-e/ep-f since their fifos are small */ - max = usb_endpoint_maxp(desc) & 0x1fff; + max = usb_endpoint_maxp(desc); if (ep->num > 4 && max > 64 && (dev->quirks & PLX_LEGACY)) { ret = -ERANGE; goto print_err; } spin_lock_irqsave(&dev->lock, flags); - _ep->maxpacket = max & 0x7ff; + _ep->maxpacket = max; ep->desc = desc; /* ep_reset() has already been called */ @@ -1839,7 +1839,7 @@ static ssize_t queues_show(struct device *_dev, struct device_attribute *attr, ep->ep.name, t & USB_ENDPOINT_NUMBER_MASK, (t & USB_DIR_IN) ? "in" : "out", type_string(d->bmAttributes), - usb_endpoint_maxp(d) & 0x1fff, + usb_endpoint_maxp(d), ep->dma ? "dma" : "pio", ep->fifo_size ); } else /* ep0 should only have one transfer queued */ diff --git a/drivers/usb/gadget/udc/s3c2410_udc.c b/drivers/usb/gadget/udc/s3c2410_udc.c index eb3571ee59e3..4643a01262b4 100644 --- a/drivers/usb/gadget/udc/s3c2410_udc.c +++ b/drivers/usb/gadget/udc/s3c2410_udc.c @@ -1047,10 +1047,10 @@ static int s3c2410_udc_ep_enable(struct usb_ep *_ep, if (!dev->driver || dev->gadget.speed == USB_SPEED_UNKNOWN) return -ESHUTDOWN; - max = usb_endpoint_maxp(desc) & 0x1fff; + max = usb_endpoint_maxp(desc); local_irq_save(flags); - _ep->maxpacket = max & 0x7ff; + _ep->maxpacket = max; ep->ep.desc = desc; ep->halted = 0; ep->bEndpointAddress = desc->bEndpointAddress; diff --git a/drivers/usb/host/ehci-q.c b/drivers/usb/host/ehci-q.c index eca3710d8fc4..8f3f055c05fa 100644 --- a/drivers/usb/host/ehci-q.c +++ b/drivers/usb/host/ehci-q.c @@ -550,11 +550,6 @@ qh_completions (struct ehci_hcd *ehci, struct ehci_qh *qh) /*-------------------------------------------------------------------------*/ -// high bandwidth multiplier, as encoded in highspeed endpoint descriptors -#define hb_mult(wMaxPacketSize) (1 + (((wMaxPacketSize) >> 11) & 0x03)) -// ... and packet size, for any kind of endpoint descriptor -#define max_packet(wMaxPacketSize) ((wMaxPacketSize) & 0x07ff) - /* * reverse of qh_urb_transaction: free a list of TDs. * used for cleanup after errors, before HC sees an URB's TDs. @@ -651,7 +646,7 @@ qh_urb_transaction ( token |= (1 /* "in" */ << 8); /* else it's already initted to "out" pid (0 << 8) */ - maxpacket = max_packet(usb_maxpacket(urb->dev, urb->pipe, !is_input)); + maxpacket = usb_maxpacket(urb->dev, urb->pipe, !is_input); /* * buffer gets wrapped in one or more qtds; @@ -770,9 +765,11 @@ qh_make ( gfp_t flags ) { struct ehci_qh *qh = ehci_qh_alloc (ehci, flags); + struct usb_host_endpoint *ep; u32 info1 = 0, info2 = 0; int is_input, type; int maxp = 0; + int mult; struct usb_tt *tt = urb->dev->tt; struct ehci_qh_hw *hw; @@ -787,13 +784,15 @@ qh_make ( is_input = usb_pipein (urb->pipe); type = usb_pipetype (urb->pipe); - maxp = usb_maxpacket (urb->dev, urb->pipe, !is_input); + ep = usb_pipe_endpoint (urb->dev, urb->pipe); + maxp = usb_endpoint_maxp (&ep->desc); + mult = usb_endpoint_maxp_mult (&ep->desc); /* 1024 byte maxpacket is a hardware ceiling. High bandwidth * acts like up to 3KB, but is built from smaller packets. */ - if (max_packet(maxp) > 1024) { - ehci_dbg(ehci, "bogus qh maxpacket %d\n", max_packet(maxp)); + if (maxp > 1024) { + ehci_dbg(ehci, "bogus qh maxpacket %d\n", maxp); goto done; } @@ -809,8 +808,7 @@ qh_make ( unsigned tmp; qh->ps.usecs = NS_TO_US(usb_calc_bus_time(USB_SPEED_HIGH, - is_input, 0, - hb_mult(maxp) * max_packet(maxp))); + is_input, 0, mult * maxp)); qh->ps.phase = NO_FRAME; if (urb->dev->speed == USB_SPEED_HIGH) { @@ -854,7 +852,7 @@ qh_make ( think_time = tt ? tt->think_time : 0; qh->ps.tt_usecs = NS_TO_US(think_time + usb_calc_bus_time (urb->dev->speed, - is_input, 0, max_packet (maxp))); + is_input, 0, maxp)); if (urb->interval > ehci->periodic_size) urb->interval = ehci->periodic_size; qh->ps.period = urb->interval; @@ -925,11 +923,11 @@ qh_make ( * to help them do so. So now people expect to use * such nonconformant devices with Linux too; sigh. */ - info1 |= max_packet(maxp) << 16; + info1 |= maxp << 16; info2 |= (EHCI_TUNE_MULT_HS << 30); } else { /* PIPE_INTERRUPT */ - info1 |= max_packet (maxp) << 16; - info2 |= hb_mult (maxp) << 30; + info1 |= maxp << 16; + info2 |= mult << 30; } break; default: @@ -1221,7 +1219,7 @@ static int submit_single_step_set_feature( token |= (1 /* "in" */ << 8); /*This is IN stage*/ - maxpacket = max_packet(usb_maxpacket(urb->dev, urb->pipe, 0)); + maxpacket = usb_maxpacket(urb->dev, urb->pipe, 0); qtd_fill(ehci, qtd, buf, len, token, maxpacket); diff --git a/drivers/usb/host/ehci-sched.c b/drivers/usb/host/ehci-sched.c index 1dfe54f14737..980a6b3b2da2 100644 --- a/drivers/usb/host/ehci-sched.c +++ b/drivers/usb/host/ehci-sched.c @@ -1064,11 +1064,10 @@ iso_stream_init( /* knows about ITD vs SITD */ if (dev->speed == USB_SPEED_HIGH) { - unsigned multi = hb_mult(maxp); + unsigned multi = usb_endpoint_maxp_mult(&urb->ep->desc); stream->highspeed = 1; - maxp = max_packet(maxp); buf1 |= maxp; maxp *= multi; diff --git a/drivers/usb/host/ohci-da8xx.c b/drivers/usb/host/ohci-da8xx.c index bd6cf3c9ec60..b3de8bc6cbeb 100644 --- a/drivers/usb/host/ohci-da8xx.c +++ b/drivers/usb/host/ohci-da8xx.c @@ -27,7 +27,7 @@ #include "ohci.h" #define DRIVER_DESC "DA8XX" -#define DRV_NAME "ohci" +#define DRV_NAME "ohci-da8xx" static struct hc_driver __read_mostly ohci_da8xx_hc_driver; diff --git a/drivers/usb/host/ohci-mem.c b/drivers/usb/host/ohci-mem.c index c9e315c6808a..ed8a762b8670 100644 --- a/drivers/usb/host/ohci-mem.c +++ b/drivers/usb/host/ohci-mem.c @@ -88,10 +88,9 @@ td_alloc (struct ohci_hcd *hc, gfp_t mem_flags) dma_addr_t dma; struct td *td; - td = dma_pool_alloc (hc->td_cache, mem_flags, &dma); + td = dma_pool_zalloc (hc->td_cache, mem_flags, &dma); if (td) { /* in case hc fetches it, make it look dead */ - memset (td, 0, sizeof *td); td->hwNextTD = cpu_to_hc32 (hc, dma); td->td_dma = dma; /* hashed in td_fill */ @@ -122,9 +121,8 @@ ed_alloc (struct ohci_hcd *hc, gfp_t mem_flags) dma_addr_t dma; struct ed *ed; - ed = dma_pool_alloc (hc->ed_cache, mem_flags, &dma); + ed = dma_pool_zalloc (hc->ed_cache, mem_flags, &dma); if (ed) { - memset (ed, 0, sizeof (*ed)); INIT_LIST_HEAD (&ed->td_list); ed->dma = dma; } diff --git a/drivers/usb/host/pci-quirks.c b/drivers/usb/host/pci-quirks.c index d793f548dfe2..a9a1e4c40480 100644 --- a/drivers/usb/host/pci-quirks.c +++ b/drivers/usb/host/pci-quirks.c @@ -995,6 +995,14 @@ static void quirk_usb_handoff_xhci(struct pci_dev *pdev) } val = readl(base + ext_cap_offset); + /* Auto handoff never worked for these devices. Force it and continue */ + if ((pdev->vendor == PCI_VENDOR_ID_TI && pdev->device == 0x8241) || + (pdev->vendor == PCI_VENDOR_ID_RENESAS + && pdev->device == 0x0014)) { + val = (val | XHCI_HC_OS_OWNED) & ~XHCI_HC_BIOS_OWNED; + writel(val, base + ext_cap_offset); + } + /* If the BIOS owns the HC, signal that the OS wants it, and wait */ if (val & XHCI_HC_BIOS_OWNED) { writel(val | XHCI_HC_OS_OWNED, base + ext_cap_offset); diff --git a/drivers/usb/host/xhci-mem.c b/drivers/usb/host/xhci-mem.c index 6afe32381209..321de2e0161b 100644 --- a/drivers/usb/host/xhci-mem.c +++ b/drivers/usb/host/xhci-mem.c @@ -1032,7 +1032,6 @@ int xhci_alloc_virt_device(struct xhci_hcd *xhci, int slot_id, goto fail; dev->num_rings_cached = 0; - init_completion(&dev->cmd_completion); dev->udev = udev; /* Point to output device context in dcbaa. */ @@ -1370,7 +1369,7 @@ static u32 xhci_get_endpoint_max_burst(struct usb_device *udev, if (udev->speed == USB_SPEED_HIGH && (usb_endpoint_xfer_isoc(&ep->desc) || usb_endpoint_xfer_int(&ep->desc))) - return (usb_endpoint_maxp(&ep->desc) & 0x1800) >> 11; + return usb_endpoint_maxp_mult(&ep->desc) - 1; return 0; } @@ -1415,10 +1414,10 @@ static u32 xhci_get_max_esit_payload(struct usb_device *udev, else if (udev->speed >= USB_SPEED_SUPER) return le16_to_cpu(ep->ss_ep_comp.wBytesPerInterval); - max_packet = GET_MAX_PACKET(usb_endpoint_maxp(&ep->desc)); - max_burst = (usb_endpoint_maxp(&ep->desc) & 0x1800) >> 11; + max_packet = usb_endpoint_maxp(&ep->desc); + max_burst = usb_endpoint_maxp_mult(&ep->desc); /* A 0 in max burst means 1 transfer per ESIT */ - return max_packet * (max_burst + 1); + return max_packet * max_burst; } /* Set up an endpoint with one ring segment. Do not allocate stream rings. @@ -1461,7 +1460,7 @@ int xhci_endpoint_init(struct xhci_hcd *xhci, max_esit_payload = xhci_get_max_esit_payload(udev, ep); interval = xhci_get_endpoint_interval(udev, ep); mult = xhci_get_endpoint_mult(udev, ep); - max_packet = GET_MAX_PACKET(usb_endpoint_maxp(&ep->desc)); + max_packet = usb_endpoint_maxp(&ep->desc); max_burst = xhci_get_endpoint_max_burst(udev, ep); avg_trb_len = max_esit_payload; @@ -2384,7 +2383,7 @@ int xhci_mem_init(struct xhci_hcd *xhci, gfp_t flags) * "physically contiguous and 64-byte (cache line) aligned". */ xhci->dcbaa = dma_alloc_coherent(dev, sizeof(*xhci->dcbaa), &dma, - GFP_KERNEL); + flags); if (!xhci->dcbaa) goto fail; memset(xhci->dcbaa, 0, sizeof *(xhci->dcbaa)); @@ -2480,7 +2479,7 @@ int xhci_mem_init(struct xhci_hcd *xhci, gfp_t flags) xhci->erst.entries = dma_alloc_coherent(dev, sizeof(struct xhci_erst_entry) * ERST_NUM_SEGS, &dma, - GFP_KERNEL); + flags); if (!xhci->erst.entries) goto fail; xhci_dbg_trace(xhci, trace_xhci_dbg_init, @@ -2536,7 +2535,6 @@ int xhci_mem_init(struct xhci_hcd *xhci, gfp_t flags) * something other than the default (~1ms minimum between interrupts). * See section 5.5.1.2. */ - init_completion(&xhci->addr_dev); for (i = 0; i < MAX_HC_SLOTS; ++i) xhci->devs[i] = NULL; for (i = 0; i < USB_MAXCHILDREN; ++i) { diff --git a/drivers/usb/host/xhci-mtk-sch.c b/drivers/usb/host/xhci-mtk-sch.c index 73f763c4f5f5..6e7ddf6cafae 100644 --- a/drivers/usb/host/xhci-mtk-sch.c +++ b/drivers/usb/host/xhci-mtk-sch.c @@ -337,7 +337,7 @@ int xhci_mtk_add_ep_quirk(struct usb_hcd *hcd, struct usb_device *udev, xhci_dbg(xhci, "%s() type:%d, speed:%d, mpkt:%d, dir:%d, ep:%p\n", __func__, usb_endpoint_type(&ep->desc), udev->speed, - GET_MAX_PACKET(usb_endpoint_maxp(&ep->desc)), + usb_endpoint_maxp(&ep->desc), usb_endpoint_dir_in(&ep->desc), ep); if (!need_bw_sch(ep, udev->speed, slot_ctx->tt_info & TT_SLOT)) { @@ -403,7 +403,7 @@ void xhci_mtk_drop_ep_quirk(struct usb_hcd *hcd, struct usb_device *udev, xhci_dbg(xhci, "%s() type:%d, speed:%d, mpks:%d, dir:%d, ep:%p\n", __func__, usb_endpoint_type(&ep->desc), udev->speed, - GET_MAX_PACKET(usb_endpoint_maxp(&ep->desc)), + usb_endpoint_maxp(&ep->desc), usb_endpoint_dir_in(&ep->desc), ep); if (!need_bw_sch(ep, udev->speed, slot_ctx->tt_info & TT_SLOT)) diff --git a/drivers/usb/host/xhci-plat.c b/drivers/usb/host/xhci-plat.c index ed56bf9ed885..ddfab301e366 100644 --- a/drivers/usb/host/xhci-plat.c +++ b/drivers/usb/host/xhci-plat.c @@ -100,6 +100,12 @@ static const struct xhci_plat_priv xhci_plat_renesas_rcar_gen3 = { .plat_start = xhci_rcar_start, }; +static const struct xhci_plat_priv xhci_plat_renesas_rcar_r8a7796 = { + .firmware_name = XHCI_RCAR_FIRMWARE_NAME_V3, + .init_quirk = xhci_rcar_init_quirk, + .plat_start = xhci_rcar_start, +}; + static const struct of_device_id usb_xhci_of_match[] = { { .compatible = "generic-xhci", @@ -124,6 +130,9 @@ static const struct of_device_id usb_xhci_of_match[] = { .compatible = "renesas,xhci-r8a7795", .data = &xhci_plat_renesas_rcar_gen3, }, { + .compatible = "renesas,xhci-r8a7796", + .data = &xhci_plat_renesas_rcar_r8a7796, + }, { .compatible = "renesas,rcar-gen2-xhci", .data = &xhci_plat_renesas_rcar_gen2, }, { diff --git a/drivers/usb/host/xhci-rcar.c b/drivers/usb/host/xhci-rcar.c index 0e4535e632ec..d28df386e780 100644 --- a/drivers/usb/host/xhci-rcar.c +++ b/drivers/usb/host/xhci-rcar.c @@ -19,6 +19,8 @@ #include "xhci-rcar.h" /* +* - The V3 firmware is for r8a7796 (with good performance). +* - The V2 firmware can be used on both r8a7795 (es1.x) and r8a7796. * - The V2 firmware is possible to use on R-Car Gen2. However, the V2 causes * performance degradation. So, this driver continues to use the V1 if R-Car * Gen2. @@ -26,6 +28,7 @@ */ MODULE_FIRMWARE(XHCI_RCAR_FIRMWARE_NAME_V1); MODULE_FIRMWARE(XHCI_RCAR_FIRMWARE_NAME_V2); +MODULE_FIRMWARE(XHCI_RCAR_FIRMWARE_NAME_V3); /*** Register Offset ***/ #define RCAR_USB3_INT_ENA 0x224 /* Interrupt Enable */ @@ -92,6 +95,7 @@ static int xhci_rcar_is_gen3(struct device *dev) struct device_node *node = dev->of_node; return of_device_is_compatible(node, "renesas,xhci-r8a7795") || + of_device_is_compatible(node, "renesas,xhci-r8a7796") || of_device_is_compatible(node, "renesas,rcar-gen3-xhci"); } diff --git a/drivers/usb/host/xhci-rcar.h b/drivers/usb/host/xhci-rcar.h index 2941a25cfe98..d2ffe20401cf 100644 --- a/drivers/usb/host/xhci-rcar.h +++ b/drivers/usb/host/xhci-rcar.h @@ -13,6 +13,7 @@ #define XHCI_RCAR_FIRMWARE_NAME_V1 "r8a779x_usb3_v1.dlmem" #define XHCI_RCAR_FIRMWARE_NAME_V2 "r8a779x_usb3_v2.dlmem" +#define XHCI_RCAR_FIRMWARE_NAME_V3 "r8a779x_usb3_v3.dlmem" #if IS_ENABLED(CONFIG_USB_XHCI_RCAR) void xhci_rcar_start(struct usb_hcd *hcd); diff --git a/drivers/usb/host/xhci-ring.c b/drivers/usb/host/xhci-ring.c index 797137e26549..bdf6b13d9b67 100644 --- a/drivers/usb/host/xhci-ring.c +++ b/drivers/usb/host/xhci-ring.c @@ -89,6 +89,11 @@ dma_addr_t xhci_trb_virt_to_dma(struct xhci_segment *seg, return seg->dma + (segment_offset * sizeof(*trb)); } +static bool trb_is_noop(union xhci_trb *trb) +{ + return TRB_TYPE_NOOP_LE32(trb->generic.field[3]); +} + static bool trb_is_link(union xhci_trb *trb) { return TRB_TYPE_LINK_LE32(trb->link.control); @@ -110,6 +115,20 @@ static bool link_trb_toggles_cycle(union xhci_trb *trb) return le32_to_cpu(trb->link.control) & LINK_TOGGLE; } +static bool last_td_in_urb(struct xhci_td *td) +{ + struct urb_priv *urb_priv = td->urb->hcpriv; + + return urb_priv->td_cnt == urb_priv->length; +} + +static void inc_td_cnt(struct urb *urb) +{ + struct urb_priv *urb_priv = urb->hcpriv; + + urb_priv->td_cnt++; +} + /* Updates trb to point to the next TRB in the ring, and updates seg if the next * TRB is in a new segment. This does not skip over link TRBs, and it does not * effect the ring dequeue or enqueue pointers. @@ -303,7 +322,6 @@ static int xhci_abort_cmd_ring(struct xhci_hcd *xhci) "maybe the host is dead\n"); del_timer(&xhci->cmd_timer); xhci->xhc_state |= XHCI_STATE_DYING; - xhci_quiesce(xhci); xhci_halt(xhci); return -ESHUTDOWN; } @@ -473,9 +491,8 @@ void xhci_find_new_dequeue_state(struct xhci_hcd *xhci, if (new_deq == cur_td->last_trb) td_last_trb_found = true; - if (cycle_found && - TRB_TYPE_LINK_LE32(new_deq->generic.field[3]) && - new_deq->generic.field[3] & cpu_to_le32(LINK_TOGGLE)) + if (cycle_found && trb_is_link(new_deq) && + link_trb_toggles_cycle(new_deq)) state->new_cycle_state ^= 0x1; next_trb(xhci, ep_ring, &new_seg, &new_deq); @@ -511,54 +528,32 @@ void xhci_find_new_dequeue_state(struct xhci_hcd *xhci, * of this TD.) This is used to remove partially enqueued isoc TDs from a ring. */ static void td_to_noop(struct xhci_hcd *xhci, struct xhci_ring *ep_ring, - struct xhci_td *cur_td, bool flip_cycle) + struct xhci_td *td, bool flip_cycle) { - struct xhci_segment *cur_seg; - union xhci_trb *cur_trb; - - for (cur_seg = cur_td->start_seg, cur_trb = cur_td->first_trb; - true; - next_trb(xhci, ep_ring, &cur_seg, &cur_trb)) { - if (TRB_TYPE_LINK_LE32(cur_trb->generic.field[3])) { - /* Unchain any chained Link TRBs, but - * leave the pointers intact. - */ - cur_trb->generic.field[3] &= cpu_to_le32(~TRB_CHAIN); - /* Flip the cycle bit (link TRBs can't be the first - * or last TRB). - */ - if (flip_cycle) - cur_trb->generic.field[3] ^= - cpu_to_le32(TRB_CYCLE); - xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb, - "Cancel (unchain) link TRB"); - xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb, - "Address = %p (0x%llx dma); " - "in seg %p (0x%llx dma)", - cur_trb, - (unsigned long long)xhci_trb_virt_to_dma(cur_seg, cur_trb), - cur_seg, - (unsigned long long)cur_seg->dma); + struct xhci_segment *seg = td->start_seg; + union xhci_trb *trb = td->first_trb; + + while (1) { + if (trb_is_link(trb)) { + /* unchain chained link TRBs */ + trb->link.control &= cpu_to_le32(~TRB_CHAIN); } else { - cur_trb->generic.field[0] = 0; - cur_trb->generic.field[1] = 0; - cur_trb->generic.field[2] = 0; + trb->generic.field[0] = 0; + trb->generic.field[1] = 0; + trb->generic.field[2] = 0; /* Preserve only the cycle bit of this TRB */ - cur_trb->generic.field[3] &= cpu_to_le32(TRB_CYCLE); - /* Flip the cycle bit except on the first or last TRB */ - if (flip_cycle && cur_trb != cur_td->first_trb && - cur_trb != cur_td->last_trb) - cur_trb->generic.field[3] ^= - cpu_to_le32(TRB_CYCLE); - cur_trb->generic.field[3] |= cpu_to_le32( + trb->generic.field[3] &= cpu_to_le32(TRB_CYCLE); + trb->generic.field[3] |= cpu_to_le32( TRB_TYPE(TRB_TR_NOOP)); - xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb, - "TRB to noop at offset 0x%llx", - (unsigned long long) - xhci_trb_virt_to_dma(cur_seg, cur_trb)); } - if (cur_trb == cur_td->last_trb) + /* flip cycle if asked to */ + if (flip_cycle && trb != td->first_trb && trb != td->last_trb) + trb->generic.field[3] ^= cpu_to_le32(TRB_CYCLE); + + if (trb == td->last_trb) break; + + next_trb(xhci, ep_ring, &seg, &trb); } } @@ -574,39 +569,33 @@ static void xhci_stop_watchdog_timer_in_irq(struct xhci_hcd *xhci, ep->stop_cmds_pending--; } -/* Must be called with xhci->lock held in interrupt context */ +/* + * Must be called with xhci->lock held in interrupt context, + * releases and re-acquires xhci->lock + */ static void xhci_giveback_urb_in_irq(struct xhci_hcd *xhci, - struct xhci_td *cur_td, int status) + struct xhci_td *cur_td, int status) { - struct usb_hcd *hcd; - struct urb *urb; - struct urb_priv *urb_priv; + struct urb *urb = cur_td->urb; + struct urb_priv *urb_priv = urb->hcpriv; + struct usb_hcd *hcd = bus_to_hcd(urb->dev->bus); - urb = cur_td->urb; - urb_priv = urb->hcpriv; - urb_priv->td_cnt++; - hcd = bus_to_hcd(urb->dev->bus); - - /* Only giveback urb when this is the last td in urb */ - if (urb_priv->td_cnt == urb_priv->length) { - if (usb_pipetype(urb->pipe) == PIPE_ISOCHRONOUS) { - xhci_to_hcd(xhci)->self.bandwidth_isoc_reqs--; - if (xhci_to_hcd(xhci)->self.bandwidth_isoc_reqs == 0) { - if (xhci->quirks & XHCI_AMD_PLL_FIX) - usb_amd_quirk_pll_enable(); - } + if (usb_pipetype(urb->pipe) == PIPE_ISOCHRONOUS) { + xhci_to_hcd(xhci)->self.bandwidth_isoc_reqs--; + if (xhci_to_hcd(xhci)->self.bandwidth_isoc_reqs == 0) { + if (xhci->quirks & XHCI_AMD_PLL_FIX) + usb_amd_quirk_pll_enable(); } - usb_hcd_unlink_urb_from_ep(hcd, urb); - - spin_unlock(&xhci->lock); - usb_hcd_giveback_urb(hcd, urb, status); - xhci_urb_free_priv(urb_priv); - spin_lock(&xhci->lock); } + xhci_urb_free_priv(urb_priv); + usb_hcd_unlink_urb_from_ep(hcd, urb); + spin_unlock(&xhci->lock); + usb_hcd_giveback_urb(hcd, urb, status); + spin_lock(&xhci->lock); } -void xhci_unmap_td_bounce_buffer(struct xhci_hcd *xhci, struct xhci_ring *ring, - struct xhci_td *td) +static void xhci_unmap_td_bounce_buffer(struct xhci_hcd *xhci, + struct xhci_ring *ring, struct xhci_td *td) { struct device *dev = xhci_to_hcd(xhci)->self.controller; struct xhci_segment *seg = td->bounce_seg; @@ -752,7 +741,9 @@ remove_finished_td: ep_ring = xhci_urb_to_transfer_ring(xhci, cur_td->urb); if (ep_ring && cur_td->bounce_seg) xhci_unmap_td_bounce_buffer(xhci, ep_ring, cur_td); - xhci_giveback_urb_in_irq(xhci, cur_td, 0); + inc_td_cnt(cur_td->urb); + if (last_td_in_urb(cur_td)) + xhci_giveback_urb_in_irq(xhci, cur_td, 0); /* Stop processing the cancelled list if the watchdog timer is * running. @@ -777,7 +768,10 @@ static void xhci_kill_ring_urbs(struct xhci_hcd *xhci, struct xhci_ring *ring) if (cur_td->bounce_seg) xhci_unmap_td_bounce_buffer(xhci, ring, cur_td); - xhci_giveback_urb_in_irq(xhci, cur_td, -ESHUTDOWN); + + inc_td_cnt(cur_td->urb); + if (last_td_in_urb(cur_td)) + xhci_giveback_urb_in_irq(xhci, cur_td, -ESHUTDOWN); } } @@ -814,7 +808,10 @@ static void xhci_kill_endpoint_urbs(struct xhci_hcd *xhci, cur_td = list_first_entry(&ep->cancelled_td_list, struct xhci_td, cancelled_td_list); list_del_init(&cur_td->cancelled_td_list); - xhci_giveback_urb_in_irq(xhci, cur_td, -ESHUTDOWN); + + inc_td_cnt(cur_td->urb); + if (last_td_in_urb(cur_td)) + xhci_giveback_urb_in_irq(xhci, cur_td, -ESHUTDOWN); } } @@ -1003,8 +1000,7 @@ static void xhci_handle_cmd_set_deq(struct xhci_hcd *xhci, int slot_id, break; case COMP_CTX_STATE: xhci_warn(xhci, "WARN Set TR Deq Ptr cmd failed due to incorrect slot or ep state.\n"); - ep_state = le32_to_cpu(ep_ctx->ep_info); - ep_state &= EP_STATE_MASK; + ep_state = GET_EP_CTX_STATE(ep_ctx); slot_state = le32_to_cpu(slot_ctx->dev_state); slot_state = GET_SLOT_STATE(slot_state); xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb, @@ -1096,12 +1092,12 @@ static void xhci_handle_cmd_reset_ep(struct xhci_hcd *xhci, int slot_id, } static void xhci_handle_cmd_enable_slot(struct xhci_hcd *xhci, int slot_id, - u32 cmd_comp_code) + struct xhci_command *command, u32 cmd_comp_code) { if (cmd_comp_code == COMP_SUCCESS) - xhci->slot_id = slot_id; + command->slot_id = slot_id; else - xhci->slot_id = 0; + command->slot_id = 0; } static void xhci_handle_cmd_disable_slot(struct xhci_hcd *xhci, int slot_id) @@ -1183,7 +1179,7 @@ static void xhci_handle_cmd_nec_get_fw(struct xhci_hcd *xhci, struct xhci_event_cmd *event) { if (!(xhci->quirks & XHCI_NEC_HOST)) { - xhci->error_bitmask |= 1 << 6; + xhci_warn(xhci, "WARN NEC_GET_FW command on non-NEC host\n"); return; } xhci_dbg_trace(xhci, trace_xhci_dbg_quirks, @@ -1325,14 +1321,13 @@ static void handle_cmd_completion(struct xhci_hcd *xhci, cmd_trb = xhci->cmd_ring->dequeue; cmd_dequeue_dma = xhci_trb_virt_to_dma(xhci->cmd_ring->deq_seg, cmd_trb); - /* Is the command ring deq ptr out of sync with the deq seg ptr? */ - if (cmd_dequeue_dma == 0) { - xhci->error_bitmask |= 1 << 4; - return; - } - /* Does the DMA address match our internal dequeue pointer address? */ - if (cmd_dma != (u64) cmd_dequeue_dma) { - xhci->error_bitmask |= 1 << 5; + /* + * Check whether the completion event is for our internal kept + * command. + */ + if (!cmd_dequeue_dma || cmd_dma != (u64)cmd_dequeue_dma) { + xhci_warn(xhci, + "ERROR mismatched command completion event\n"); return; } @@ -1371,7 +1366,7 @@ static void handle_cmd_completion(struct xhci_hcd *xhci, cmd_type = TRB_FIELD_TO_TYPE(le32_to_cpu(cmd_trb->generic.field[3])); switch (cmd_type) { case TRB_ENABLE_SLOT: - xhci_handle_cmd_enable_slot(xhci, slot_id, cmd_comp_code); + xhci_handle_cmd_enable_slot(xhci, slot_id, cmd, cmd_comp_code); break; case TRB_DISABLE_SLOT: xhci_handle_cmd_disable_slot(xhci, slot_id); @@ -1418,7 +1413,7 @@ static void handle_cmd_completion(struct xhci_hcd *xhci, break; default: /* Skip over unknown commands on the event ring */ - xhci->error_bitmask |= 1 << 6; + xhci_info(xhci, "INFO unknown command type %d\n", cmd_type); break; } @@ -1519,10 +1514,10 @@ static void handle_port_status(struct xhci_hcd *xhci, bool bogus_port_status = false; /* Port status change events always have a successful completion code */ - if (GET_COMP_CODE(le32_to_cpu(event->generic.field[2])) != COMP_SUCCESS) { - xhci_warn(xhci, "WARN: xHC returned failed port status event\n"); - xhci->error_bitmask |= 1 << 8; - } + if (GET_COMP_CODE(le32_to_cpu(event->generic.field[2])) != COMP_SUCCESS) + xhci_warn(xhci, + "WARN: xHC returned failed port status event\n"); + port_id = GET_PORT_ID(le32_to_cpu(event->generic.field[0])); xhci_dbg(xhci, "Port Status Change Event for port %d\n", port_id); @@ -1759,7 +1754,7 @@ struct xhci_segment *trb_in_td(struct xhci_hcd *xhci, static void xhci_cleanup_halted_endpoint(struct xhci_hcd *xhci, unsigned int slot_id, unsigned int ep_index, unsigned int stream_id, - struct xhci_td *td, union xhci_trb *event_trb) + struct xhci_td *td, union xhci_trb *ep_trb) { struct xhci_virt_ep *ep = &xhci->devs[slot_id]->eps[ep_index]; struct xhci_command *command; @@ -1798,8 +1793,7 @@ static int xhci_requires_manual_halt_cleanup(struct xhci_hcd *xhci, * endpoint anyway. Check if a babble halted the * endpoint. */ - if ((ep_ctx->ep_info & cpu_to_le32(EP_STATE_MASK)) == - cpu_to_le32(EP_STATE_HALTED)) + if (GET_EP_CTX_STATE(ep_ctx) == EP_STATE_HALTED) return 1; return 0; @@ -1824,7 +1818,7 @@ int xhci_is_vendor_info_code(struct xhci_hcd *xhci, unsigned int trb_comp_code) * Return 1 if the urb can be given back. */ static int finish_td(struct xhci_hcd *xhci, struct xhci_td *td, - union xhci_trb *event_trb, struct xhci_transfer_event *event, + union xhci_trb *ep_trb, struct xhci_transfer_event *event, struct xhci_virt_ep *ep, int *status, bool skip) { struct xhci_virt_device *xdev; @@ -1833,7 +1827,6 @@ static int finish_td(struct xhci_hcd *xhci, struct xhci_td *td, int ep_index; struct urb *urb = NULL; struct xhci_ep_ctx *ep_ctx; - int ret = 0; struct urb_priv *urb_priv; u32 trb_comp_code; @@ -1866,7 +1859,7 @@ static int finish_td(struct xhci_hcd *xhci, struct xhci_td *td, * The class driver clears the device side halt later. */ xhci_cleanup_halted_endpoint(xhci, slot_id, ep_index, - ep_ring->stream_id, td, event_trb); + ep_ring->stream_id, td, ep_trb); } else { /* Update ring dequeue pointer */ while (ep_ring->dequeue != td->last_trb) @@ -1889,41 +1882,54 @@ td_cleanup: * unsigned). Play it safe and say we didn't transfer anything. */ if (urb->actual_length > urb->transfer_buffer_length) { - xhci_warn(xhci, "URB transfer length is wrong, xHC issue? req. len = %u, act. len = %u\n", - urb->transfer_buffer_length, - urb->actual_length); + xhci_warn(xhci, "URB req %u and actual %u transfer length mismatch\n", + urb->transfer_buffer_length, urb->actual_length); urb->actual_length = 0; - if (td->urb->transfer_flags & URB_SHORT_NOT_OK) - *status = -EREMOTEIO; - else - *status = 0; + *status = 0; } list_del_init(&td->td_list); /* Was this TD slated to be cancelled but completed anyway? */ if (!list_empty(&td->cancelled_td_list)) list_del_init(&td->cancelled_td_list); - urb_priv->td_cnt++; + inc_td_cnt(urb); /* Giveback the urb when all the tds are completed */ - if (urb_priv->td_cnt == urb_priv->length) { - ret = 1; - if (usb_pipetype(urb->pipe) == PIPE_ISOCHRONOUS) { - xhci_to_hcd(xhci)->self.bandwidth_isoc_reqs--; - if (xhci_to_hcd(xhci)->self.bandwidth_isoc_reqs == 0) { - if (xhci->quirks & XHCI_AMD_PLL_FIX) - usb_amd_quirk_pll_enable(); - } - } + if (last_td_in_urb(td)) { + if ((urb->actual_length != urb->transfer_buffer_length && + (urb->transfer_flags & URB_SHORT_NOT_OK)) || + (*status != 0 && !usb_endpoint_xfer_isoc(&urb->ep->desc))) + xhci_dbg(xhci, "Giveback URB %p, len = %d, expected = %d, status = %d\n", + urb, urb->actual_length, + urb->transfer_buffer_length, *status); + + /* set isoc urb status to 0 just as EHCI, UHCI, and OHCI */ + if (usb_pipetype(urb->pipe) == PIPE_ISOCHRONOUS) + *status = 0; + xhci_giveback_urb_in_irq(xhci, td, *status); } + return 0; +} - return ret; +/* sum trb lengths from ring dequeue up to stop_trb, _excluding_ stop_trb */ +static int sum_trb_lengths(struct xhci_hcd *xhci, struct xhci_ring *ring, + union xhci_trb *stop_trb) +{ + u32 sum; + union xhci_trb *trb = ring->dequeue; + struct xhci_segment *seg = ring->deq_seg; + + for (sum = 0; trb != stop_trb; next_trb(xhci, ring, &seg, &trb)) { + if (!trb_is_noop(trb) && !trb_is_link(trb)) + sum += TRB_LEN(le32_to_cpu(trb->generic.field[2])); + } + return sum; } /* * Process control tds, update urb status and actual_length. */ static int process_ctrl_td(struct xhci_hcd *xhci, struct xhci_td *td, - union xhci_trb *event_trb, struct xhci_transfer_event *event, + union xhci_trb *ep_trb, struct xhci_transfer_event *event, struct xhci_virt_ep *ep, int *status) { struct xhci_virt_device *xdev; @@ -1932,6 +1938,8 @@ static int process_ctrl_td(struct xhci_hcd *xhci, struct xhci_td *td, int ep_index; struct xhci_ep_ctx *ep_ctx; u32 trb_comp_code; + u32 remaining, requested; + bool on_data_stage; slot_id = TRB_TO_SLOT_ID(le32_to_cpu(event->flags)); xdev = xhci->devs[slot_id]; @@ -1939,195 +1947,161 @@ static int process_ctrl_td(struct xhci_hcd *xhci, struct xhci_td *td, ep_ring = xhci_dma_to_transfer_ring(ep, le64_to_cpu(event->buffer)); ep_ctx = xhci_get_ep_ctx(xhci, xdev->out_ctx, ep_index); trb_comp_code = GET_COMP_CODE(le32_to_cpu(event->transfer_len)); + requested = td->urb->transfer_buffer_length; + remaining = EVENT_TRB_LEN(le32_to_cpu(event->transfer_len)); + + /* not setup (dequeue), or status stage means we are at data stage */ + on_data_stage = (ep_trb != ep_ring->dequeue && ep_trb != td->last_trb); switch (trb_comp_code) { case COMP_SUCCESS: - if (event_trb == ep_ring->dequeue) { - xhci_warn(xhci, "WARN: Success on ctrl setup TRB " - "without IOC set??\n"); - *status = -ESHUTDOWN; - } else if (event_trb != td->last_trb) { - xhci_warn(xhci, "WARN: Success on ctrl data TRB " - "without IOC set??\n"); + if (ep_trb != td->last_trb) { + xhci_warn(xhci, "WARN: Success on ctrl %s TRB without IOC set?\n", + on_data_stage ? "data" : "setup"); *status = -ESHUTDOWN; - } else { - *status = 0; + break; } + *status = 0; break; case COMP_SHORT_TX: - if (td->urb->transfer_flags & URB_SHORT_NOT_OK) - *status = -EREMOTEIO; - else - *status = 0; + *status = 0; break; case COMP_STOP_SHORT: - if (event_trb == ep_ring->dequeue || event_trb == td->last_trb) - xhci_warn(xhci, "WARN: Stopped Short Packet on ctrl setup or status TRB\n"); + if (on_data_stage) + td->urb->actual_length = remaining; else - td->urb->actual_length = - EVENT_TRB_LEN(le32_to_cpu(event->transfer_len)); - - return finish_td(xhci, td, event_trb, event, ep, status, false); + xhci_warn(xhci, "WARN: Stopped Short Packet on ctrl setup or status TRB\n"); + goto finish_td; case COMP_STOP: - /* Did we stop at data stage? */ - if (event_trb != ep_ring->dequeue && event_trb != td->last_trb) - td->urb->actual_length = - td->urb->transfer_buffer_length - - EVENT_TRB_LEN(le32_to_cpu(event->transfer_len)); - /* fall through */ + if (on_data_stage) + td->urb->actual_length = requested - remaining; + goto finish_td; case COMP_STOP_INVAL: - return finish_td(xhci, td, event_trb, event, ep, status, false); + goto finish_td; default: if (!xhci_requires_manual_halt_cleanup(xhci, - ep_ctx, trb_comp_code)) + ep_ctx, trb_comp_code)) break; - xhci_dbg(xhci, "TRB error code %u, " - "halted endpoint index = %u\n", - trb_comp_code, ep_index); + xhci_dbg(xhci, "TRB error %u, halted endpoint index = %u\n", + trb_comp_code, ep_index); /* else fall through */ case COMP_STALL: /* Did we transfer part of the data (middle) phase? */ - if (event_trb != ep_ring->dequeue && - event_trb != td->last_trb) - td->urb->actual_length = - td->urb->transfer_buffer_length - - EVENT_TRB_LEN(le32_to_cpu(event->transfer_len)); + if (on_data_stage) + td->urb->actual_length = requested - remaining; else if (!td->urb_length_set) td->urb->actual_length = 0; - - return finish_td(xhci, td, event_trb, event, ep, status, false); + goto finish_td; } + + /* stopped at setup stage, no data transferred */ + if (ep_trb == ep_ring->dequeue) + goto finish_td; + /* - * Did we transfer any data, despite the errors that might have - * happened? I.e. did we get past the setup stage? + * if on data stage then update the actual_length of the URB and flag it + * as set, so it won't be overwritten in the event for the last TRB. */ - if (event_trb != ep_ring->dequeue) { - /* The event was for the status stage */ - if (event_trb == td->last_trb) { - if (td->urb_length_set) { - /* Don't overwrite a previously set error code - */ - if ((*status == -EINPROGRESS || *status == 0) && - (td->urb->transfer_flags - & URB_SHORT_NOT_OK)) - /* Did we already see a short data - * stage? */ - *status = -EREMOTEIO; - } else { - td->urb->actual_length = - td->urb->transfer_buffer_length; - } - } else { - /* - * Maybe the event was for the data stage? If so, update - * already the actual_length of the URB and flag it as - * set, so that it is not overwritten in the event for - * the last TRB. - */ - td->urb_length_set = true; - td->urb->actual_length = - td->urb->transfer_buffer_length - - EVENT_TRB_LEN(le32_to_cpu(event->transfer_len)); - xhci_dbg(xhci, "Waiting for status " - "stage event\n"); - return 0; - } + if (on_data_stage) { + td->urb_length_set = true; + td->urb->actual_length = requested - remaining; + xhci_dbg(xhci, "Waiting for status stage event\n"); + return 0; } - return finish_td(xhci, td, event_trb, event, ep, status, false); + /* at status stage */ + if (!td->urb_length_set) + td->urb->actual_length = requested; + +finish_td: + return finish_td(xhci, td, ep_trb, event, ep, status, false); } /* * Process isochronous tds, update urb packet status and actual_length. */ static int process_isoc_td(struct xhci_hcd *xhci, struct xhci_td *td, - union xhci_trb *event_trb, struct xhci_transfer_event *event, + union xhci_trb *ep_trb, struct xhci_transfer_event *event, struct xhci_virt_ep *ep, int *status) { struct xhci_ring *ep_ring; struct urb_priv *urb_priv; int idx; - int len = 0; - union xhci_trb *cur_trb; - struct xhci_segment *cur_seg; struct usb_iso_packet_descriptor *frame; u32 trb_comp_code; - bool skip_td = false; + bool sum_trbs_for_length = false; + u32 remaining, requested, ep_trb_len; + int short_framestatus; ep_ring = xhci_dma_to_transfer_ring(ep, le64_to_cpu(event->buffer)); trb_comp_code = GET_COMP_CODE(le32_to_cpu(event->transfer_len)); urb_priv = td->urb->hcpriv; idx = urb_priv->td_cnt; frame = &td->urb->iso_frame_desc[idx]; + requested = frame->length; + remaining = EVENT_TRB_LEN(le32_to_cpu(event->transfer_len)); + ep_trb_len = TRB_LEN(le32_to_cpu(ep_trb->generic.field[2])); + short_framestatus = td->urb->transfer_flags & URB_SHORT_NOT_OK ? + -EREMOTEIO : 0; /* handle completion code */ switch (trb_comp_code) { case COMP_SUCCESS: - if (EVENT_TRB_LEN(le32_to_cpu(event->transfer_len)) == 0) { - frame->status = 0; + if (remaining) { + frame->status = short_framestatus; + if (xhci->quirks & XHCI_TRUST_TX_LENGTH) + sum_trbs_for_length = true; break; } - if ((xhci->quirks & XHCI_TRUST_TX_LENGTH)) - trb_comp_code = COMP_SHORT_TX; - /* fallthrough */ - case COMP_STOP_SHORT: + frame->status = 0; + break; case COMP_SHORT_TX: - frame->status = td->urb->transfer_flags & URB_SHORT_NOT_OK ? - -EREMOTEIO : 0; + frame->status = short_framestatus; + sum_trbs_for_length = true; break; case COMP_BW_OVER: frame->status = -ECOMM; - skip_td = true; break; case COMP_BUFF_OVER: case COMP_BABBLE: frame->status = -EOVERFLOW; - skip_td = true; break; case COMP_DEV_ERR: case COMP_STALL: frame->status = -EPROTO; - skip_td = true; break; case COMP_TX_ERR: frame->status = -EPROTO; - if (event_trb != td->last_trb) + if (ep_trb != td->last_trb) return 0; - skip_td = true; break; case COMP_STOP: + sum_trbs_for_length = true; + break; + case COMP_STOP_SHORT: + /* field normally containing residue now contains tranferred */ + frame->status = short_framestatus; + requested = remaining; + break; case COMP_STOP_INVAL: + requested = 0; + remaining = 0; break; default: + sum_trbs_for_length = true; frame->status = -1; break; } - if (trb_comp_code == COMP_SUCCESS || skip_td) { - frame->actual_length = frame->length; - td->urb->actual_length += frame->length; - } else if (trb_comp_code == COMP_STOP_SHORT) { - frame->actual_length = - EVENT_TRB_LEN(le32_to_cpu(event->transfer_len)); - td->urb->actual_length += frame->actual_length; - } else { - for (cur_trb = ep_ring->dequeue, - cur_seg = ep_ring->deq_seg; cur_trb != event_trb; - next_trb(xhci, ep_ring, &cur_seg, &cur_trb)) { - if (!TRB_TYPE_NOOP_LE32(cur_trb->generic.field[3]) && - !TRB_TYPE_LINK_LE32(cur_trb->generic.field[3])) - len += TRB_LEN(le32_to_cpu(cur_trb->generic.field[2])); - } - len += TRB_LEN(le32_to_cpu(cur_trb->generic.field[2])) - - EVENT_TRB_LEN(le32_to_cpu(event->transfer_len)); + if (sum_trbs_for_length) + frame->actual_length = sum_trb_lengths(xhci, ep_ring, ep_trb) + + ep_trb_len - remaining; + else + frame->actual_length = requested; - if (trb_comp_code != COMP_STOP_INVAL) { - frame->actual_length = len; - td->urb->actual_length += len; - } - } + td->urb->actual_length += frame->actual_length; - return finish_td(xhci, td, event_trb, event, ep, status, false); + return finish_td(xhci, td, ep_trb, event, ep, status, false); } static int skip_isoc_td(struct xhci_hcd *xhci, struct xhci_td *td, @@ -2162,119 +2136,62 @@ static int skip_isoc_td(struct xhci_hcd *xhci, struct xhci_td *td, * Process bulk and interrupt tds, update urb status and actual_length. */ static int process_bulk_intr_td(struct xhci_hcd *xhci, struct xhci_td *td, - union xhci_trb *event_trb, struct xhci_transfer_event *event, + union xhci_trb *ep_trb, struct xhci_transfer_event *event, struct xhci_virt_ep *ep, int *status) { struct xhci_ring *ep_ring; - union xhci_trb *cur_trb; - struct xhci_segment *cur_seg; u32 trb_comp_code; + u32 remaining, requested, ep_trb_len; ep_ring = xhci_dma_to_transfer_ring(ep, le64_to_cpu(event->buffer)); trb_comp_code = GET_COMP_CODE(le32_to_cpu(event->transfer_len)); + remaining = EVENT_TRB_LEN(le32_to_cpu(event->transfer_len)); + ep_trb_len = TRB_LEN(le32_to_cpu(ep_trb->generic.field[2])); + requested = td->urb->transfer_buffer_length; switch (trb_comp_code) { case COMP_SUCCESS: - /* Double check that the HW transferred everything. */ - if (event_trb != td->last_trb || - EVENT_TRB_LEN(le32_to_cpu(event->transfer_len)) != 0) { - xhci_warn(xhci, "WARN Successful completion " - "on short TX\n"); - if (td->urb->transfer_flags & URB_SHORT_NOT_OK) - *status = -EREMOTEIO; - else - *status = 0; - if ((xhci->quirks & XHCI_TRUST_TX_LENGTH)) - trb_comp_code = COMP_SHORT_TX; - } else { - *status = 0; + /* handle success with untransferred data as short packet */ + if (ep_trb != td->last_trb || remaining) { + xhci_warn(xhci, "WARN Successful completion on short TX\n"); + xhci_dbg(xhci, "ep %#x - asked for %d bytes, %d bytes untransferred\n", + td->urb->ep->desc.bEndpointAddress, + requested, remaining); } + *status = 0; break; - case COMP_STOP_SHORT: case COMP_SHORT_TX: - if (td->urb->transfer_flags & URB_SHORT_NOT_OK) - *status = -EREMOTEIO; - else - *status = 0; + xhci_dbg(xhci, "ep %#x - asked for %d bytes, %d bytes untransferred\n", + td->urb->ep->desc.bEndpointAddress, + requested, remaining); + *status = 0; + break; + case COMP_STOP_SHORT: + td->urb->actual_length = remaining; + goto finish_td; + case COMP_STOP_INVAL: + /* stopped on ep trb with invalid length, exclude it */ + ep_trb_len = 0; + remaining = 0; break; default: - /* Others already handled above */ + /* do nothing */ break; } - if (trb_comp_code == COMP_SHORT_TX) - xhci_dbg(xhci, "ep %#x - asked for %d bytes, " - "%d bytes untransferred\n", - td->urb->ep->desc.bEndpointAddress, - td->urb->transfer_buffer_length, - EVENT_TRB_LEN(le32_to_cpu(event->transfer_len))); - /* Stopped - short packet completion */ - if (trb_comp_code == COMP_STOP_SHORT) { - td->urb->actual_length = - EVENT_TRB_LEN(le32_to_cpu(event->transfer_len)); - if (td->urb->transfer_buffer_length < - td->urb->actual_length) { - xhci_warn(xhci, "HC gave bad length of %d bytes txed\n", - EVENT_TRB_LEN(le32_to_cpu(event->transfer_len))); - td->urb->actual_length = 0; - /* status will be set by usb core for canceled urbs */ - } - /* Fast path - was this the last TRB in the TD for this URB? */ - } else if (event_trb == td->last_trb) { - if (EVENT_TRB_LEN(le32_to_cpu(event->transfer_len)) != 0) { - td->urb->actual_length = - td->urb->transfer_buffer_length - - EVENT_TRB_LEN(le32_to_cpu(event->transfer_len)); - if (td->urb->transfer_buffer_length < - td->urb->actual_length) { - xhci_warn(xhci, "HC gave bad length " - "of %d bytes left\n", - EVENT_TRB_LEN(le32_to_cpu(event->transfer_len))); - td->urb->actual_length = 0; - if (td->urb->transfer_flags & URB_SHORT_NOT_OK) - *status = -EREMOTEIO; - else - *status = 0; - } - /* Don't overwrite a previously set error code */ - if (*status == -EINPROGRESS) { - if (td->urb->transfer_flags & URB_SHORT_NOT_OK) - *status = -EREMOTEIO; - else - *status = 0; - } - } else { - td->urb->actual_length = - td->urb->transfer_buffer_length; - /* Ignore a short packet completion if the - * untransferred length was zero. - */ - if (*status == -EREMOTEIO) - *status = 0; - } - } else { - /* Slow path - walk the list, starting from the dequeue - * pointer, to get the actual length transferred. - */ + if (ep_trb == td->last_trb) + td->urb->actual_length = requested - remaining; + else + td->urb->actual_length = + sum_trb_lengths(xhci, ep_ring, ep_trb) + + ep_trb_len - remaining; +finish_td: + if (remaining > requested) { + xhci_warn(xhci, "bad transfer trb length %d in event trb\n", + remaining); td->urb->actual_length = 0; - for (cur_trb = ep_ring->dequeue, cur_seg = ep_ring->deq_seg; - cur_trb != event_trb; - next_trb(xhci, ep_ring, &cur_seg, &cur_trb)) { - if (!TRB_TYPE_NOOP_LE32(cur_trb->generic.field[3]) && - !TRB_TYPE_LINK_LE32(cur_trb->generic.field[3])) - td->urb->actual_length += - TRB_LEN(le32_to_cpu(cur_trb->generic.field[2])); - } - /* If the ring didn't stop on a Link or No-op TRB, add - * in the actual bytes transferred from the Normal TRB - */ - if (trb_comp_code != COMP_STOP_INVAL) - td->urb->actual_length += - TRB_LEN(le32_to_cpu(cur_trb->generic.field[2])) - - EVENT_TRB_LEN(le32_to_cpu(event->transfer_len)); } - - return finish_td(xhci, td, event_trb, event, ep, status, false); + return finish_td(xhci, td, ep_trb, event, ep, status, false); } /* @@ -2293,16 +2210,13 @@ static int handle_tx_event(struct xhci_hcd *xhci, unsigned int slot_id; int ep_index; struct xhci_td *td = NULL; - dma_addr_t event_dma; - struct xhci_segment *event_seg; - union xhci_trb *event_trb; - struct urb *urb = NULL; + dma_addr_t ep_trb_dma; + struct xhci_segment *ep_seg; + union xhci_trb *ep_trb; int status = -EINPROGRESS; - struct urb_priv *urb_priv; struct xhci_ep_ctx *ep_ctx; struct list_head *tmp; u32 trb_comp_code; - int ret = 0; int td_num = 0; bool handling_skipped_tds = false; @@ -2328,9 +2242,7 @@ static int handle_tx_event(struct xhci_hcd *xhci, ep = &xdev->eps[ep_index]; ep_ring = xhci_dma_to_transfer_ring(ep, le64_to_cpu(event->buffer)); ep_ctx = xhci_get_ep_ctx(xhci, xdev->out_ctx, ep_index); - if (!ep_ring || - (le32_to_cpu(ep_ctx->ep_info) & EP_STATE_MASK) == - EP_STATE_DISABLED) { + if (!ep_ring || GET_EP_CTX_STATE(ep_ctx) == EP_STATE_DISABLED) { xhci_err(xhci, "ERROR Transfer event for disabled endpoint " "or incorrect stream ring\n"); xhci_err(xhci, "@%016llx %08x %08x %08x %08x\n", @@ -2352,7 +2264,7 @@ static int handle_tx_event(struct xhci_hcd *xhci, td_num++; } - event_dma = le64_to_cpu(event->buffer); + ep_trb_dma = le64_to_cpu(event->buffer); trb_comp_code = GET_COMP_CODE(le32_to_cpu(event->transfer_len)); /* Look for common error cases */ switch (trb_comp_code) { @@ -2480,7 +2392,6 @@ static int handle_tx_event(struct xhci_hcd *xhci, xhci_dbg(xhci, "td_list is empty while skip " "flag set. Clear skip flag.\n"); } - ret = 0; goto cleanup; } @@ -2489,7 +2400,6 @@ static int handle_tx_event(struct xhci_hcd *xhci, ep->skip = false; xhci_dbg(xhci, "All tds on the ep_ring skipped. " "Clear skip flag.\n"); - ret = 0; goto cleanup; } @@ -2498,8 +2408,8 @@ static int handle_tx_event(struct xhci_hcd *xhci, td_num--; /* Is this a TRB in the currently executing TD? */ - event_seg = trb_in_td(xhci, ep_ring->deq_seg, ep_ring->dequeue, - td->last_trb, event_dma, false); + ep_seg = trb_in_td(xhci, ep_ring->deq_seg, ep_ring->dequeue, + td->last_trb, ep_trb_dma, false); /* * Skip the Force Stopped Event. The event_trb(event_dma) of FSE @@ -2509,13 +2419,12 @@ static int handle_tx_event(struct xhci_hcd *xhci, * last TRB of the previous TD. The command completion handle * will take care the rest. */ - if (!event_seg && (trb_comp_code == COMP_STOP || + if (!ep_seg && (trb_comp_code == COMP_STOP || trb_comp_code == COMP_STOP_INVAL)) { - ret = 0; goto cleanup; } - if (!event_seg) { + if (!ep_seg) { if (!ep->skip || !usb_endpoint_xfer_isoc(&td->urb->ep->desc)) { /* Some host controllers give a spurious @@ -2525,7 +2434,6 @@ static int handle_tx_event(struct xhci_hcd *xhci, if ((xhci->quirks & XHCI_SPURIOUS_SUCCESS) && ep_ring->last_td_was_short) { ep_ring->last_td_was_short = false; - ret = 0; goto cleanup; } /* HC is busted, give up! */ @@ -2536,11 +2444,11 @@ static int handle_tx_event(struct xhci_hcd *xhci, trb_comp_code); trb_in_td(xhci, ep_ring->deq_seg, ep_ring->dequeue, td->last_trb, - event_dma, true); + ep_trb_dma, true); return -ESHUTDOWN; } - ret = skip_isoc_td(xhci, td, event, ep, &status); + skip_isoc_td(xhci, td, event, ep, &status); goto cleanup; } if (trb_comp_code == COMP_SHORT_TX) @@ -2553,36 +2461,28 @@ static int handle_tx_event(struct xhci_hcd *xhci, ep->skip = false; } - event_trb = &event_seg->trbs[(event_dma - event_seg->dma) / - sizeof(*event_trb)]; + ep_trb = &ep_seg->trbs[(ep_trb_dma - ep_seg->dma) / + sizeof(*ep_trb)]; /* * No-op TRB should not trigger interrupts. - * If event_trb is a no-op TRB, it means the + * If ep_trb is a no-op TRB, it means the * corresponding TD has been cancelled. Just ignore * the TD. */ - if (TRB_TYPE_NOOP_LE32(event_trb->generic.field[3])) { - xhci_dbg(xhci, - "event_trb is a no-op TRB. Skip it\n"); + if (trb_is_noop(ep_trb)) { + xhci_dbg(xhci, "ep_trb is a no-op TRB. Skip it\n"); goto cleanup; } - /* Now update the urb's actual_length and give back to - * the core - */ + /* update the urb's actual_length and give back to the core */ if (usb_endpoint_xfer_control(&td->urb->ep->desc)) - ret = process_ctrl_td(xhci, td, event_trb, event, ep, - &status); + process_ctrl_td(xhci, td, ep_trb, event, ep, &status); else if (usb_endpoint_xfer_isoc(&td->urb->ep->desc)) - ret = process_isoc_td(xhci, td, event_trb, event, ep, - &status); + process_isoc_td(xhci, td, ep_trb, event, ep, &status); else - ret = process_bulk_intr_td(xhci, td, event_trb, event, - ep, &status); - + process_bulk_intr_td(xhci, td, ep_trb, event, ep, + &status); cleanup: - - handling_skipped_tds = ep->skip && trb_comp_code != COMP_MISSED_INT && trb_comp_code != COMP_PING_ERR; @@ -2594,33 +2494,6 @@ cleanup: if (!handling_skipped_tds) inc_deq(xhci, xhci->event_ring); - if (ret) { - urb = td->urb; - urb_priv = urb->hcpriv; - - xhci_urb_free_priv(urb_priv); - - usb_hcd_unlink_urb_from_ep(bus_to_hcd(urb->dev->bus), urb); - if ((urb->actual_length != urb->transfer_buffer_length && - (urb->transfer_flags & - URB_SHORT_NOT_OK)) || - (status != 0 && - !usb_endpoint_xfer_isoc(&urb->ep->desc))) - xhci_dbg(xhci, "Giveback URB %p, len = %d, " - "expected = %d, status = %d\n", - urb, urb->actual_length, - urb->transfer_buffer_length, - status); - spin_unlock(&xhci->lock); - /* EHCI, UHCI, and OHCI always unconditionally set the - * urb->status of an isochronous endpoint to 0. - */ - if (usb_pipetype(urb->pipe) == PIPE_ISOCHRONOUS) - status = 0; - usb_hcd_giveback_urb(bus_to_hcd(urb->dev->bus), urb, status); - spin_lock(&xhci->lock); - } - /* * If ep->skip is set, it means there are missed tds on the * endpoint ring need to take care of. @@ -2644,18 +2517,17 @@ static int xhci_handle_event(struct xhci_hcd *xhci) int update_ptrs = 1; int ret; + /* Event ring hasn't been allocated yet. */ if (!xhci->event_ring || !xhci->event_ring->dequeue) { - xhci->error_bitmask |= 1 << 1; - return 0; + xhci_err(xhci, "ERROR event ring not ready\n"); + return -ENOMEM; } event = xhci->event_ring->dequeue; /* Does the HC or OS own the TRB? */ if ((le32_to_cpu(event->event_cmd.flags) & TRB_CYCLE) != - xhci->event_ring->cycle_state) { - xhci->error_bitmask |= 1 << 2; + xhci->event_ring->cycle_state) return 0; - } /* * Barrier between reading the TRB_CYCLE (valid) flag above and any @@ -2663,7 +2535,7 @@ static int xhci_handle_event(struct xhci_hcd *xhci) */ rmb(); /* FIXME: Handle more event types. */ - switch ((le32_to_cpu(event->event_cmd.flags) & TRB_TYPE_BITMASK)) { + switch (le32_to_cpu(event->event_cmd.flags) & TRB_TYPE_BITMASK) { case TRB_TYPE(TRB_COMPLETION): handle_cmd_completion(xhci, &event->event_cmd); break; @@ -2673,9 +2545,7 @@ static int xhci_handle_event(struct xhci_hcd *xhci) break; case TRB_TYPE(TRB_TRANSFER): ret = handle_tx_event(xhci, &event->trans_event); - if (ret < 0) - xhci->error_bitmask |= 1 << 9; - else + if (ret >= 0) update_ptrs = 0; break; case TRB_TYPE(TRB_DEV_NOTE): @@ -2686,7 +2556,9 @@ static int xhci_handle_event(struct xhci_hcd *xhci) TRB_TYPE(48)) handle_vendor_event(xhci, event); else - xhci->error_bitmask |= 1 << 3; + xhci_warn(xhci, "ERROR unknown event type %d\n", + TRB_FIELD_TO_TYPE( + le32_to_cpu(event->event_cmd.flags))); } /* Any of the above functions may drop and re-acquire the lock, so check * to make sure a watchdog timer didn't mark the host as non-responsive. @@ -2931,8 +2803,7 @@ static int prepare_transfer(struct xhci_hcd *xhci, return -EINVAL; } - ret = prepare_ring(xhci, ep_ring, - le32_to_cpu(ep_ctx->ep_info) & EP_STATE_MASK, + ret = prepare_ring(xhci, ep_ring, GET_EP_CTX_STATE(ep_ctx), num_trbs, mem_flags); if (ret) return ret; @@ -3120,7 +2991,7 @@ static u32 xhci_td_remainder(struct xhci_hcd *xhci, int transferred, if (xhci->quirks & XHCI_MTK_HOST) trb_buff_len = 0; - maxp = GET_MAX_PACKET(usb_endpoint_maxp(&urb->ep->desc)); + maxp = usb_endpoint_maxp(&urb->ep->desc); total_packet_count = DIV_ROUND_UP(td_total_len, maxp); /* Queueing functions don't count the current TRB into transferred */ @@ -3136,7 +3007,7 @@ static int xhci_align_td(struct xhci_hcd *xhci, struct urb *urb, u32 enqd_len, unsigned int max_pkt; u32 new_buff_len; - max_pkt = GET_MAX_PACKET(usb_endpoint_maxp(&urb->ep->desc)); + max_pkt = usb_endpoint_maxp(&urb->ep->desc); unalign = (enqd_len + *trb_buff_len) % max_pkt; /* we got lucky, last normal TRB data on segment is packet aligned */ @@ -3650,7 +3521,7 @@ static int xhci_queue_isoc_tx(struct xhci_hcd *xhci, gfp_t mem_flags, addr = start_addr + urb->iso_frame_desc[i].offset; td_len = urb->iso_frame_desc[i].length; td_remain_len = td_len; - max_pkt = GET_MAX_PACKET(usb_endpoint_maxp(&urb->ep->desc)); + max_pkt = usb_endpoint_maxp(&urb->ep->desc); total_pkt_count = DIV_ROUND_UP(td_len, max_pkt); /* A zero-length transfer still involves at least one packet. */ @@ -3828,7 +3699,7 @@ int xhci_queue_isoc_tx_prepare(struct xhci_hcd *xhci, gfp_t mem_flags, /* Check the ring to guarantee there is enough room for the whole urb. * Do not insert any td of the urb to the ring if the check failed. */ - ret = prepare_ring(xhci, ep_ring, le32_to_cpu(ep_ctx->ep_info) & EP_STATE_MASK, + ret = prepare_ring(xhci, ep_ring, GET_EP_CTX_STATE(ep_ctx), num_trbs, mem_flags); if (ret) return ret; @@ -3841,8 +3712,7 @@ int xhci_queue_isoc_tx_prepare(struct xhci_hcd *xhci, gfp_t mem_flags, /* Calculate the start frame and put it in urb->start_frame. */ if (HCC_CFC(xhci->hcc_params) && !list_empty(&ep_ring->td_list)) { - if ((le32_to_cpu(ep_ctx->ep_info) & EP_STATE_MASK) == - EP_STATE_RUNNING) { + if (GET_EP_CTX_STATE(ep_ctx) == EP_STATE_RUNNING) { urb->start_frame = xep->next_frame_id; goto skip_start_over; } diff --git a/drivers/usb/host/xhci.c b/drivers/usb/host/xhci.c index 1a4ca02729c2..1cd56417cbec 100644 --- a/drivers/usb/host/xhci.c +++ b/drivers/usb/host/xhci.c @@ -113,12 +113,12 @@ int xhci_halt(struct xhci_hcd *xhci) ret = xhci_handshake(&xhci->op_regs->status, STS_HALT, STS_HALT, XHCI_MAX_HALT_USEC); - if (!ret) { - xhci->xhc_state |= XHCI_STATE_HALTED; - xhci->cmd_ring_state = CMD_RING_STATE_STOPPED; - } else - xhci_warn(xhci, "Host not halted after %u microseconds.\n", - XHCI_MAX_HALT_USEC); + if (ret) { + xhci_warn(xhci, "Host halt failed, %d\n", ret); + return ret; + } + xhci->xhc_state |= XHCI_STATE_HALTED; + xhci->cmd_ring_state = CMD_RING_STATE_STOPPED; return ret; } @@ -167,6 +167,12 @@ int xhci_reset(struct xhci_hcd *xhci) int ret, i; state = readl(&xhci->op_regs->status); + + if (state == ~(u32)0) { + xhci_warn(xhci, "Host not accessible, reset failed.\n"); + return -ENODEV; + } + if ((state & STS_HALT) == 0) { xhci_warn(xhci, "Host controller not halted, aborting reset.\n"); return 0; @@ -690,7 +696,6 @@ void xhci_stop(struct usb_hcd *hcd) xhci->cmd_ring_state = CMD_RING_STATE_STOPPED; xhci_halt(xhci); xhci_reset(xhci); - spin_unlock_irq(&xhci->lock); } @@ -1645,8 +1650,7 @@ int xhci_drop_endpoint(struct usb_hcd *hcd, struct usb_device *udev, /* If the HC already knows the endpoint is disabled, * or the HCD has noted it is disabled, ignore this request */ - if (((ep_ctx->ep_info & cpu_to_le32(EP_STATE_MASK)) == - cpu_to_le32(EP_STATE_DISABLED)) || + if ((GET_EP_CTX_STATE(ep_ctx) == EP_STATE_DISABLED) || le32_to_cpu(ctrl_ctx->drop_flags) & xhci_get_endpoint_flag(&ep->desc)) { /* Do not warn when called after a usb_device_reset */ @@ -3209,7 +3213,7 @@ int xhci_alloc_streams(struct usb_hcd *hcd, struct usb_device *udev, for (i = 0; i < num_eps; i++) { ep_index = xhci_get_endpoint_index(&eps[i]->desc); - max_packet = GET_MAX_PACKET(usb_endpoint_maxp(&eps[i]->desc)); + max_packet = usb_endpoint_maxp(&eps[i]->desc); vdev->eps[ep_index].stream_info = xhci_alloc_stream_info(xhci, num_stream_ctxs, num_streams, @@ -3683,27 +3687,26 @@ int xhci_alloc_dev(struct usb_hcd *hcd, struct usb_device *udev) int ret, slot_id; struct xhci_command *command; - command = xhci_alloc_command(xhci, false, false, GFP_KERNEL); + command = xhci_alloc_command(xhci, false, true, GFP_KERNEL); if (!command) return 0; /* xhci->slot_id and xhci->addr_dev are not thread-safe */ mutex_lock(&xhci->mutex); spin_lock_irqsave(&xhci->lock, flags); - command->completion = &xhci->addr_dev; ret = xhci_queue_slot_control(xhci, command, TRB_ENABLE_SLOT, 0); if (ret) { spin_unlock_irqrestore(&xhci->lock, flags); mutex_unlock(&xhci->mutex); xhci_dbg(xhci, "FIXME: allocate a command ring segment\n"); - kfree(command); + xhci_free_command(xhci, command); return 0; } xhci_ring_cmd_db(xhci); spin_unlock_irqrestore(&xhci->lock, flags); wait_for_completion(command->completion); - slot_id = xhci->slot_id; + slot_id = command->slot_id; mutex_unlock(&xhci->mutex); if (!slot_id || command->status != COMP_SUCCESS) { @@ -3711,7 +3714,7 @@ int xhci_alloc_dev(struct usb_hcd *hcd, struct usb_device *udev) xhci_err(xhci, "Max number of devices this xHCI host supports is %u.\n", HCS_MAX_SLOTS( readl(&xhci->cap_regs->hcs_params1))); - kfree(command); + xhci_free_command(xhci, command); return 0; } @@ -3747,7 +3750,7 @@ int xhci_alloc_dev(struct usb_hcd *hcd, struct usb_device *udev) #endif - kfree(command); + xhci_free_command(xhci, command); /* Is this a LS or FS device under a HS hub? */ /* Hub or peripherial? */ return 1; @@ -3755,6 +3758,7 @@ int xhci_alloc_dev(struct usb_hcd *hcd, struct usb_device *udev) disable_slot: /* Disable slot, if we can do it without mem alloc */ spin_lock_irqsave(&xhci->lock, flags); + kfree(command->completion); command->completion = NULL; command->status = 0; if (!xhci_queue_slot_control(xhci, command, TRB_DISABLE_SLOT, @@ -3816,14 +3820,13 @@ static int xhci_setup_device(struct usb_hcd *hcd, struct usb_device *udev, } } - command = xhci_alloc_command(xhci, false, false, GFP_KERNEL); + command = xhci_alloc_command(xhci, false, true, GFP_KERNEL); if (!command) { ret = -ENOMEM; goto out; } command->in_ctx = virt_dev->in_ctx; - command->completion = &xhci->addr_dev; slot_ctx = xhci_get_slot_ctx(xhci, virt_dev->in_ctx); ctrl_ctx = xhci_get_input_control_ctx(virt_dev->in_ctx); @@ -3941,7 +3944,10 @@ static int xhci_setup_device(struct usb_hcd *hcd, struct usb_device *udev, le32_to_cpu(slot_ctx->dev_state) & DEV_ADDR_MASK); out: mutex_unlock(&xhci->mutex); - kfree(command); + if (command) { + kfree(command->completion); + kfree(command); + } return ret; } diff --git a/drivers/usb/host/xhci.h b/drivers/usb/host/xhci.h index f945380035d0..8ccc11a974b8 100644 --- a/drivers/usb/host/xhci.h +++ b/drivers/usb/host/xhci.h @@ -709,6 +709,8 @@ struct xhci_ep_ctx { #define EP_STATE_HALTED 2 #define EP_STATE_STOPPED 3 #define EP_STATE_ERROR 4 +#define GET_EP_CTX_STATE(ctx) (le32_to_cpu((ctx)->ep_info) & EP_STATE_MASK) + /* Mult - Max number of burtst within an interval, in EP companion desc. */ #define EP_MULT(p) (((p) & 0x3) << 8) #define CTX_TO_EP_MULT(p) (((p) >> 8) & 0x3) @@ -747,11 +749,6 @@ struct xhci_ep_ctx { #define MAX_PACKET_MASK (0xffff << 16) #define MAX_PACKET_DECODED(p) (((p) >> 16) & 0xffff) -/* Get max packet size from ep desc. Bit 10..0 specify the max packet size. - * USB2.0 spec 9.6.6. - */ -#define GET_MAX_PACKET(p) ((p) & 0x7ff) - /* tx_info bitmasks */ #define EP_AVG_TRB_LENGTH(p) ((p) & 0xffff) #define EP_MAX_ESIT_PAYLOAD_LO(p) (((p) & 0xffff) << 16) @@ -789,6 +786,7 @@ struct xhci_command { /* Input context for changing device state */ struct xhci_container_ctx *in_ctx; u32 status; + int slot_id; /* If completion is null, no one is waiting on this command * and the structure can be freed after the command completes. */ @@ -997,7 +995,6 @@ struct xhci_virt_device { int num_rings_cached; #define XHCI_MAX_RINGS_CACHED 31 struct xhci_virt_ep eps[31]; - struct completion cmd_completion; u8 fake_port; u8 real_port; struct xhci_interval_bw_table *bw_table; @@ -1583,8 +1580,6 @@ struct xhci_hcd { /* slot enabling and address device helpers */ /* these are not thread safe so use mutex */ struct mutex mutex; - struct completion addr_dev; - int slot_id; /* For USB 3.0 LPM enable/disable. */ struct xhci_command *lpm_command; /* Internal mirror of the HW's dcbaa */ @@ -1618,8 +1613,6 @@ struct xhci_hcd { #define XHCI_STATE_DYING (1 << 0) #define XHCI_STATE_HALTED (1 << 1) #define XHCI_STATE_REMOVING (1 << 2) - /* Statistics */ - int error_bitmask; unsigned int quirks; #define XHCI_LINK_TRB_QUIRK (1 << 0) #define XHCI_RESET_EP_QUIRK (1 << 1) diff --git a/drivers/usb/misc/usbtest.c b/drivers/usb/misc/usbtest.c index 5c8210dc6fd9..3525626bf086 100644 --- a/drivers/usb/misc/usbtest.c +++ b/drivers/usb/misc/usbtest.c @@ -1915,7 +1915,7 @@ static struct urb *iso_alloc_urb( if (bytes < 0 || !desc) return NULL; maxp = 0x7ff & usb_endpoint_maxp(desc); - maxp *= 1 + (0x3 & (usb_endpoint_maxp(desc) >> 11)); + maxp *= usb_endpoint_maxp_mult(desc); packets = DIV_ROUND_UP(bytes, maxp); urb = usb_alloc_urb(packets, GFP_KERNEL); @@ -2001,8 +2001,8 @@ test_queue(struct usbtest_dev *dev, struct usbtest_param_32 *param, "iso period %d %sframes, wMaxPacket %d, transactions: %d\n", 1 << (desc->bInterval - 1), (udev->speed == USB_SPEED_HIGH) ? "micro" : "", - usb_endpoint_maxp(desc) & 0x7ff, - 1 + (0x3 & (usb_endpoint_maxp(desc) >> 11))); + usb_endpoint_maxp(desc), + usb_endpoint_maxp_mult(desc)); dev_info(&dev->intf->dev, "total %lu msec (%lu packets)\n", diff --git a/drivers/usb/mtu3/mtu3_core.c b/drivers/usb/mtu3/mtu3_core.c index 520e55a760c9..603b7f84d31e 100644 --- a/drivers/usb/mtu3/mtu3_core.c +++ b/drivers/usb/mtu3/mtu3_core.c @@ -696,7 +696,7 @@ static irqreturn_t mtu3_u2_common_isr(struct mtu3 *mtu) return IRQ_HANDLED; } -irqreturn_t mtu3_irq(int irq, void *data) +static irqreturn_t mtu3_irq(int irq, void *data) { struct mtu3 *mtu = (struct mtu3 *)data; unsigned long flags; diff --git a/drivers/usb/mtu3/mtu3_qmu.c b/drivers/usb/mtu3/mtu3_qmu.c index a6dd292c63ec..7d9ba8a52368 100644 --- a/drivers/usb/mtu3/mtu3_qmu.c +++ b/drivers/usb/mtu3/mtu3_qmu.c @@ -168,7 +168,7 @@ static struct qmu_gpd *advance_deq_gpd(struct mtu3_gpd_ring *ring) } /* check if a ring is emtpy */ -int gpd_ring_empty(struct mtu3_gpd_ring *ring) +static int gpd_ring_empty(struct mtu3_gpd_ring *ring) { struct qmu_gpd *enq = ring->enqueue; struct qmu_gpd *next; diff --git a/drivers/usb/musb/da8xx.c b/drivers/usb/musb/da8xx.c index 210b7e43a6fd..e89708d839e5 100644 --- a/drivers/usb/musb/da8xx.c +++ b/drivers/usb/musb/da8xx.c @@ -6,6 +6,9 @@ * Based on the DaVinci "glue layer" code. * Copyright (C) 2005-2006 by Texas Instruments * + * DT support + * Copyright (c) 2016 Petr Kulhavy <petr@barix.com> + * * This file is part of the Inventra Controller Driver for Linux. * * The Inventra Controller Driver for Linux is free software; you @@ -340,6 +343,13 @@ static int da8xx_musb_set_mode(struct musb *musb, u8 musb_mode) struct da8xx_glue *glue = dev_get_drvdata(musb->controller->parent); enum phy_mode phy_mode; + /* + * The PHY has some issues when it is forced in device or host mode. + * Unless the user request another mode, configure the PHY in OTG mode. + */ + if (!musb->is_initialized) + return phy_set_mode(glue->phy, PHY_MODE_USB_OTG); + switch (musb_mode) { case MUSB_HOST: /* Force VBUS valid, ID = 0 */ phy_mode = PHY_MODE_USB_HOST; @@ -366,6 +376,12 @@ static int da8xx_musb_init(struct musb *musb) musb->mregs += DA8XX_MENTOR_CORE_OFFSET; + ret = clk_prepare_enable(glue->clk); + if (ret) { + dev_err(glue->dev, "failed to enable clock\n"); + return ret; + } + /* Returns zero if e.g. not clocked */ rev = musb_readl(reg_base, DA8XX_USB_REVISION_REG); if (!rev) @@ -377,12 +393,6 @@ static int da8xx_musb_init(struct musb *musb) goto fail; } - ret = clk_prepare_enable(glue->clk); - if (ret) { - dev_err(glue->dev, "failed to enable clock\n"); - goto fail; - } - setup_timer(&otg_workaround, otg_timer, (unsigned long)musb); /* Reset the controller */ @@ -392,7 +402,7 @@ static int da8xx_musb_init(struct musb *musb) ret = phy_init(glue->phy); if (ret) { dev_err(glue->dev, "Failed to init phy.\n"); - goto err_phy_init; + goto fail; } ret = phy_power_on(glue->phy); @@ -412,9 +422,8 @@ static int da8xx_musb_init(struct musb *musb) err_phy_power_on: phy_exit(glue->phy); -err_phy_init: - clk_disable_unprepare(glue->clk); fail: + clk_disable_unprepare(glue->clk); return ret; } @@ -433,6 +442,21 @@ static int da8xx_musb_exit(struct musb *musb) return 0; } +static inline u8 get_vbus_power(struct device *dev) +{ + struct regulator *vbus_supply; + int current_uA; + + vbus_supply = regulator_get_optional(dev, "vbus"); + if (IS_ERR(vbus_supply)) + return 255; + current_uA = regulator_get_current_limit(vbus_supply); + regulator_put(vbus_supply); + if (current_uA <= 0 || current_uA > 510000) + return 255; + return current_uA / 1000 / 2; +} + static const struct musb_platform_ops da8xx_ops = { .quirks = MUSB_DMA_CPPI | MUSB_INDEXED_EP, .init = da8xx_musb_init, @@ -458,6 +482,12 @@ static const struct platform_device_info da8xx_dev_info = { .dma_mask = DMA_BIT_MASK(32), }; +static const struct musb_hdrc_config da8xx_config = { + .ram_bits = 10, + .num_eps = 5, + .multipoint = 1, +}; + static int da8xx_probe(struct platform_device *pdev) { struct resource musb_resources[2]; @@ -465,6 +495,7 @@ static int da8xx_probe(struct platform_device *pdev) struct da8xx_glue *glue; struct platform_device_info pinfo; struct clk *clk; + struct device_node *np = pdev->dev.of_node; int ret; glue = devm_kzalloc(&pdev->dev, sizeof(*glue), GFP_KERNEL); @@ -479,13 +510,24 @@ static int da8xx_probe(struct platform_device *pdev) glue->phy = devm_phy_get(&pdev->dev, "usb-phy"); if (IS_ERR(glue->phy)) { - dev_err(&pdev->dev, "failed to get phy\n"); + if (PTR_ERR(glue->phy) != -EPROBE_DEFER) + dev_err(&pdev->dev, "failed to get phy\n"); return PTR_ERR(glue->phy); } glue->dev = &pdev->dev; glue->clk = clk; + if (IS_ENABLED(CONFIG_OF) && np) { + pdata = devm_kzalloc(&pdev->dev, sizeof(*pdata), GFP_KERNEL); + if (!pdata) + return -ENOMEM; + + pdata->config = &da8xx_config; + pdata->mode = musb_get_mode(&pdev->dev); + pdata->power = get_vbus_power(&pdev->dev); + } + pdata->platform_ops = &da8xx_ops; glue->usb_phy = usb_phy_generic_register(); @@ -536,11 +578,22 @@ static int da8xx_remove(struct platform_device *pdev) return 0; } +#ifdef CONFIG_OF +static const struct of_device_id da8xx_id_table[] = { + { + .compatible = "ti,da830-musb", + }, + {}, +}; +MODULE_DEVICE_TABLE(of, da8xx_id_table); +#endif + static struct platform_driver da8xx_driver = { .probe = da8xx_probe, .remove = da8xx_remove, .driver = { .name = "musb-da8xx", + .of_match_table = of_match_ptr(da8xx_id_table), }, }; diff --git a/drivers/usb/musb/musb_core.c b/drivers/usb/musb/musb_core.c index 27dadc0d9114..9e226468a13e 100644 --- a/drivers/usb/musb/musb_core.c +++ b/drivers/usb/musb/musb_core.c @@ -100,6 +100,7 @@ #include <linux/io.h> #include <linux/dma-mapping.h> #include <linux/usb.h> +#include <linux/usb/of.h> #include "musb_core.h" #include "musb_trace.h" @@ -130,6 +131,24 @@ static inline struct musb *dev_to_musb(struct device *dev) return dev_get_drvdata(dev); } +enum musb_mode musb_get_mode(struct device *dev) +{ + enum usb_dr_mode mode; + + mode = usb_get_dr_mode(dev); + switch (mode) { + case USB_DR_MODE_HOST: + return MUSB_HOST; + case USB_DR_MODE_PERIPHERAL: + return MUSB_PERIPHERAL; + case USB_DR_MODE_OTG: + case USB_DR_MODE_UNKNOWN: + default: + return MUSB_OTG; + } +} +EXPORT_SYMBOL_GPL(musb_get_mode); + /*-------------------------------------------------------------------------*/ #ifndef CONFIG_BLACKFIN @@ -569,10 +588,7 @@ static irqreturn_t musb_stage0_irq(struct musb *musb, u8 int_usb, if (devctl & MUSB_DEVCTL_HM) { switch (musb->xceiv->otg->state) { case OTG_STATE_A_SUSPEND: - /* remote wakeup? later, GetPortStatus - * will stop RESUME signaling - */ - + /* remote wakeup? */ musb->port1_status |= (USB_PORT_STAT_C_SUSPEND << 16) | MUSB_PORT_STAT_RESUME; @@ -986,7 +1002,7 @@ b_host: } #endif - schedule_work(&musb->irq_work); + schedule_delayed_work(&musb->irq_work, 0); return handled; } @@ -1855,14 +1871,23 @@ static void musb_pm_runtime_check_session(struct musb *musb) MUSB_DEVCTL_HR; switch (devctl & ~s) { case MUSB_QUIRK_B_INVALID_VBUS_91: - if (!musb->session && !musb->quirk_invalid_vbus) { - musb->quirk_invalid_vbus = true; + if (musb->quirk_retries--) { musb_dbg(musb, - "First invalid vbus, assume no session"); + "Poll devctl on invalid vbus, assume no session"); + schedule_delayed_work(&musb->irq_work, + msecs_to_jiffies(1000)); + return; } - break; case MUSB_QUIRK_A_DISCONNECT_19: + if (musb->quirk_retries--) { + musb_dbg(musb, + "Poll devctl on possible host mode disconnect"); + schedule_delayed_work(&musb->irq_work, + msecs_to_jiffies(1000)); + + return; + } if (!musb->session) break; musb_dbg(musb, "Allow PM on possible host mode disconnect"); @@ -1886,9 +1911,9 @@ static void musb_pm_runtime_check_session(struct musb *musb) if (error < 0) dev_err(musb->controller, "Could not enable: %i\n", error); + musb->quirk_retries = 3; } else { musb_dbg(musb, "Allow PM with no session: %02x", devctl); - musb->quirk_invalid_vbus = false; pm_runtime_mark_last_busy(musb->controller); pm_runtime_put_autosuspend(musb->controller); } @@ -1899,7 +1924,7 @@ static void musb_pm_runtime_check_session(struct musb *musb) /* Only used to provide driver mode change events */ static void musb_irq_work(struct work_struct *data) { - struct musb *musb = container_of(data, struct musb, irq_work); + struct musb *musb = container_of(data, struct musb, irq_work.work); musb_pm_runtime_check_session(musb); @@ -1969,6 +1994,7 @@ static struct musb *allocate_instance(struct device *dev, INIT_LIST_HEAD(&musb->control); INIT_LIST_HEAD(&musb->in_bulk); INIT_LIST_HEAD(&musb->out_bulk); + INIT_LIST_HEAD(&musb->pending_list); musb->vbuserr_retry = VBUSERR_RETRY_COUNT; musb->a_wait_bcon = OTG_TIME_A_WAIT_BCON; @@ -2018,6 +2044,84 @@ static void musb_free(struct musb *musb) musb_host_free(musb); } +struct musb_pending_work { + int (*callback)(struct musb *musb, void *data); + void *data; + struct list_head node; +}; + +/* + * Called from musb_runtime_resume(), musb_resume(), and + * musb_queue_resume_work(). Callers must take musb->lock. + */ +static int musb_run_resume_work(struct musb *musb) +{ + struct musb_pending_work *w, *_w; + unsigned long flags; + int error = 0; + + spin_lock_irqsave(&musb->list_lock, flags); + list_for_each_entry_safe(w, _w, &musb->pending_list, node) { + if (w->callback) { + error = w->callback(musb, w->data); + if (error < 0) { + dev_err(musb->controller, + "resume callback %p failed: %i\n", + w->callback, error); + } + } + list_del(&w->node); + devm_kfree(musb->controller, w); + } + spin_unlock_irqrestore(&musb->list_lock, flags); + + return error; +} + +/* + * Called to run work if device is active or else queue the work to happen + * on resume. Caller must take musb->lock and must hold an RPM reference. + * + * Note that we cowardly refuse queuing work after musb PM runtime + * resume is done calling musb_run_resume_work() and return -EINPROGRESS + * instead. + */ +int musb_queue_resume_work(struct musb *musb, + int (*callback)(struct musb *musb, void *data), + void *data) +{ + struct musb_pending_work *w; + unsigned long flags; + int error; + + if (WARN_ON(!callback)) + return -EINVAL; + + if (pm_runtime_active(musb->controller)) + return callback(musb, data); + + w = devm_kzalloc(musb->controller, sizeof(*w), GFP_ATOMIC); + if (!w) + return -ENOMEM; + + w->callback = callback; + w->data = data; + spin_lock_irqsave(&musb->list_lock, flags); + if (musb->is_runtime_suspended) { + list_add_tail(&w->node, &musb->pending_list); + error = 0; + } else { + dev_err(musb->controller, "could not add resume work %p\n", + callback); + devm_kfree(musb->controller, w); + error = -EINPROGRESS; + } + spin_unlock_irqrestore(&musb->list_lock, flags); + + return error; +} +EXPORT_SYMBOL_GPL(musb_queue_resume_work); + static void musb_deassert_reset(struct work_struct *work) { struct musb *musb; @@ -2065,6 +2169,7 @@ musb_init_controller(struct device *dev, int nIrq, void __iomem *ctrl) } spin_lock_init(&musb->lock); + spin_lock_init(&musb->list_lock); musb->board_set_power = plat->set_power; musb->min_power = plat->min_power; musb->ops = plat->platform_ops; @@ -2114,11 +2219,6 @@ musb_init_controller(struct device *dev, int nIrq, void __iomem *ctrl) musb->io.ep_offset = musb_flat_ep_offset; musb->io.ep_select = musb_flat_ep_select; } - /* And override them with platform specific ops if specified. */ - if (musb->ops->ep_offset) - musb->io.ep_offset = musb->ops->ep_offset; - if (musb->ops->ep_select) - musb->io.ep_select = musb->ops->ep_select; /* At least tusb6010 has its own offsets */ if (musb->ops->ep_offset) @@ -2213,7 +2313,7 @@ musb_init_controller(struct device *dev, int nIrq, void __iomem *ctrl) musb_generic_disable(musb); /* Init IRQ workqueue before request_irq */ - INIT_WORK(&musb->irq_work, musb_irq_work); + INIT_DELAYED_WORK(&musb->irq_work, musb_irq_work); INIT_DELAYED_WORK(&musb->deassert_reset_work, musb_deassert_reset); INIT_DELAYED_WORK(&musb->finish_resume_work, musb_host_finish_resume); @@ -2296,6 +2396,7 @@ musb_init_controller(struct device *dev, int nIrq, void __iomem *ctrl) if (status) goto fail5; + musb->is_initialized = 1; pm_runtime_mark_last_busy(musb->controller); pm_runtime_put_autosuspend(musb->controller); @@ -2309,7 +2410,7 @@ fail4: musb_host_cleanup(musb); fail3: - cancel_work_sync(&musb->irq_work); + cancel_delayed_work_sync(&musb->irq_work); cancel_delayed_work_sync(&musb->finish_resume_work); cancel_delayed_work_sync(&musb->deassert_reset_work); if (musb->dma_controller) @@ -2329,8 +2430,9 @@ fail2: musb_platform_exit(musb); fail1: - dev_err(musb->controller, - "musb_init_controller failed with status %d\n", status); + if (status != -EPROBE_DEFER) + dev_err(musb->controller, + "%s failed with status %d\n", __func__, status); musb_free(musb); @@ -2376,7 +2478,7 @@ static int musb_remove(struct platform_device *pdev) */ musb_exit_debugfs(musb); - cancel_work_sync(&musb->irq_work); + cancel_delayed_work_sync(&musb->irq_work); cancel_delayed_work_sync(&musb->finish_resume_work); cancel_delayed_work_sync(&musb->deassert_reset_work); pm_runtime_get_sync(musb->controller); @@ -2562,6 +2664,7 @@ static int musb_suspend(struct device *dev) musb_platform_disable(musb); musb_generic_disable(musb); + WARN_ON(!list_empty(&musb->pending_list)); spin_lock_irqsave(&musb->lock, flags); @@ -2583,9 +2686,11 @@ static int musb_suspend(struct device *dev) static int musb_resume(struct device *dev) { - struct musb *musb = dev_to_musb(dev); - u8 devctl; - u8 mask; + struct musb *musb = dev_to_musb(dev); + unsigned long flags; + int error; + u8 devctl; + u8 mask; /* * For static cmos like DaVinci, register values were preserved @@ -2619,6 +2724,13 @@ static int musb_resume(struct device *dev) musb_start(musb); + spin_lock_irqsave(&musb->lock, flags); + error = musb_run_resume_work(musb); + if (error) + dev_err(musb->controller, "resume work failed with %i\n", + error); + spin_unlock_irqrestore(&musb->lock, flags); + return 0; } @@ -2627,14 +2739,16 @@ static int musb_runtime_suspend(struct device *dev) struct musb *musb = dev_to_musb(dev); musb_save_context(musb); + musb->is_runtime_suspended = 1; return 0; } static int musb_runtime_resume(struct device *dev) { - struct musb *musb = dev_to_musb(dev); - static int first = 1; + struct musb *musb = dev_to_musb(dev); + unsigned long flags; + int error; /* * When pm_runtime_get_sync called for the first time in driver @@ -2645,9 +2759,10 @@ static int musb_runtime_resume(struct device *dev) * Also context restore without save does not make * any sense */ - if (!first) - musb_restore_context(musb); - first = 0; + if (!musb->is_initialized) + return 0; + + musb_restore_context(musb); if (musb->need_finish_resume) { musb->need_finish_resume = 0; @@ -2655,6 +2770,14 @@ static int musb_runtime_resume(struct device *dev) msecs_to_jiffies(USB_RESUME_TIMEOUT)); } + spin_lock_irqsave(&musb->lock, flags); + error = musb_run_resume_work(musb); + if (error) + dev_err(musb->controller, "resume work failed with %i\n", + error); + musb->is_runtime_suspended = 0; + spin_unlock_irqrestore(&musb->lock, flags); + return 0; } diff --git a/drivers/usb/musb/musb_core.h b/drivers/usb/musb/musb_core.h index 2cb88a498f8a..a611e2f67bdc 100644 --- a/drivers/usb/musb/musb_core.h +++ b/drivers/usb/musb/musb_core.h @@ -303,13 +303,14 @@ struct musb_context_registers { struct musb { /* device lock */ spinlock_t lock; + spinlock_t list_lock; /* resume work list lock */ struct musb_io io; const struct musb_platform_ops *ops; struct musb_context_registers context; irqreturn_t (*isr)(int, void *); - struct work_struct irq_work; + struct delayed_work irq_work; struct delayed_work deassert_reset_work; struct delayed_work finish_resume_work; struct delayed_work gadget_work; @@ -337,6 +338,7 @@ struct musb { struct list_head control; /* of musb_qh */ struct list_head in_bulk; /* of musb_qh */ struct list_head out_bulk; /* of musb_qh */ + struct list_head pending_list; /* pending work list */ struct timer_list otg_timer; struct notifier_block nb; @@ -379,12 +381,15 @@ struct musb { int port_mode; /* MUSB_PORT_MODE_* */ bool session; - bool quirk_invalid_vbus; + unsigned long quirk_retries; bool is_host; int a_wait_bcon; /* VBUS timeout in msecs */ unsigned long idle_timeout; /* Next timeout in jiffies */ + unsigned is_initialized:1; + unsigned is_runtime_suspended:1; + /* active means connected and not suspended */ unsigned is_active:1; @@ -540,6 +545,10 @@ extern irqreturn_t musb_interrupt(struct musb *); extern void musb_hnp_stop(struct musb *musb); +int musb_queue_resume_work(struct musb *musb, + int (*callback)(struct musb *musb, void *data), + void *data); + static inline void musb_platform_set_vbus(struct musb *musb, int is_on) { if (musb->ops->set_vbus) @@ -617,4 +626,10 @@ static inline void musb_platform_post_root_reset_end(struct musb *musb) musb->ops->post_root_reset_end(musb); } +/* + * gets the "dr_mode" property from DT and converts it into musb_mode + * if the property is not found or not recognized returns MUSB_OTG + */ +extern enum musb_mode musb_get_mode(struct device *dev); + #endif /* __MUSB_CORE_H__ */ diff --git a/drivers/usb/musb/musb_dsps.c b/drivers/usb/musb/musb_dsps.c index 0f17d2140db6..feae1561b9ab 100644 --- a/drivers/usb/musb/musb_dsps.c +++ b/drivers/usb/musb/musb_dsps.c @@ -185,24 +185,19 @@ static void dsps_musb_disable(struct musb *musb) musb_writel(reg_base, wrp->coreintr_clear, wrp->usb_bitmap); musb_writel(reg_base, wrp->epintr_clear, wrp->txep_bitmap | wrp->rxep_bitmap); + del_timer_sync(&glue->timer); musb_writeb(musb->mregs, MUSB_DEVCTL, 0); } -static void otg_timer(unsigned long _musb) +/* Caller must take musb->lock */ +static int dsps_check_status(struct musb *musb, void *unused) { - struct musb *musb = (void *)_musb; void __iomem *mregs = musb->mregs; struct device *dev = musb->controller; struct dsps_glue *glue = dev_get_drvdata(dev->parent); const struct dsps_musb_wrapper *wrp = glue->wrp; u8 devctl; - unsigned long flags; int skip_session = 0; - int err; - - err = pm_runtime_get_sync(dev); - if (err < 0) - dev_err(dev, "Poll could not pm_runtime_get: %i\n", err); /* * We poll because DSPS IP's won't expose several OTG-critical @@ -212,7 +207,6 @@ static void otg_timer(unsigned long _musb) dev_dbg(musb->controller, "Poll devctl %02x (%s)\n", devctl, usb_otg_state_string(musb->xceiv->otg->state)); - spin_lock_irqsave(&musb->lock, flags); switch (musb->xceiv->otg->state) { case OTG_STATE_A_WAIT_VRISE: mod_timer(&glue->timer, jiffies + @@ -245,8 +239,30 @@ static void otg_timer(unsigned long _musb) default: break; } - spin_unlock_irqrestore(&musb->lock, flags); + return 0; +} + +static void otg_timer(unsigned long _musb) +{ + struct musb *musb = (void *)_musb; + struct device *dev = musb->controller; + unsigned long flags; + int err; + + err = pm_runtime_get(dev); + if ((err != -EINPROGRESS) && err < 0) { + dev_err(dev, "Poll could not pm_runtime_get: %i\n", err); + pm_runtime_put_noidle(dev); + + return; + } + + spin_lock_irqsave(&musb->lock, flags); + err = musb_queue_resume_work(musb, dsps_check_status, NULL); + if (err < 0) + dev_err(dev, "%s resume work: %i\n", __func__, err); + spin_unlock_irqrestore(&musb->lock, flags); pm_runtime_mark_last_busy(dev); pm_runtime_put_autosuspend(dev); } @@ -767,28 +783,13 @@ static int dsps_probe(struct platform_device *pdev) platform_set_drvdata(pdev, glue); pm_runtime_enable(&pdev->dev); - pm_runtime_use_autosuspend(&pdev->dev); - pm_runtime_set_autosuspend_delay(&pdev->dev, 200); - - ret = pm_runtime_get_sync(&pdev->dev); - if (ret < 0) { - dev_err(&pdev->dev, "pm_runtime_get_sync FAILED"); - goto err2; - } - ret = dsps_create_musb_pdev(glue, pdev); if (ret) - goto err3; - - pm_runtime_mark_last_busy(&pdev->dev); - pm_runtime_put_autosuspend(&pdev->dev); + goto err; return 0; -err3: - pm_runtime_put_sync(&pdev->dev); -err2: - pm_runtime_dont_use_autosuspend(&pdev->dev); +err: pm_runtime_disable(&pdev->dev); return ret; } @@ -799,9 +800,6 @@ static int dsps_remove(struct platform_device *pdev) platform_device_unregister(glue->musb); - /* disable usbss clocks */ - pm_runtime_dont_use_autosuspend(&pdev->dev); - pm_runtime_put_sync(&pdev->dev); pm_runtime_disable(&pdev->dev); return 0; diff --git a/drivers/usb/musb/musb_gadget.c b/drivers/usb/musb/musb_gadget.c index 4042ea017985..1acc4864f9f6 100644 --- a/drivers/usb/musb/musb_gadget.c +++ b/drivers/usb/musb/musb_gadget.c @@ -974,8 +974,8 @@ static int musb_gadget_enable(struct usb_ep *ep, goto fail; /* REVISIT this rules out high bandwidth periodic transfers */ - tmp = usb_endpoint_maxp(desc); - if (tmp & ~0x07ff) { + tmp = usb_endpoint_maxp_mult(desc) - 1; + if (tmp) { int ok; if (usb_endpoint_dir_in(desc)) @@ -987,12 +987,12 @@ static int musb_gadget_enable(struct usb_ep *ep, musb_dbg(musb, "no support for high bandwidth ISO"); goto fail; } - musb_ep->hb_mult = (tmp >> 11) & 3; + musb_ep->hb_mult = tmp; } else { musb_ep->hb_mult = 0; } - musb_ep->packet_sz = tmp & 0x7ff; + musb_ep->packet_sz = usb_endpoint_maxp(desc); tmp = musb_ep->packet_sz * (musb_ep->hb_mult + 1); /* enable the interrupts for the endpoint, set the endpoint @@ -1114,7 +1114,7 @@ static int musb_gadget_enable(struct usb_ep *ep, musb_ep->dma ? "dma, " : "", musb_ep->packet_sz); - schedule_work(&musb->irq_work); + schedule_delayed_work(&musb->irq_work, 0); fail: spin_unlock_irqrestore(&musb->lock, flags); @@ -1158,7 +1158,7 @@ static int musb_gadget_disable(struct usb_ep *ep) musb_ep->desc = NULL; musb_ep->end_point.desc = NULL; - schedule_work(&musb->irq_work); + schedule_delayed_work(&musb->irq_work, 0); spin_unlock_irqrestore(&(musb->lock), flags); @@ -1222,13 +1222,22 @@ void musb_ep_restart(struct musb *musb, struct musb_request *req) rxstate(musb, req); } +static int musb_ep_restart_resume_work(struct musb *musb, void *data) +{ + struct musb_request *req = data; + + musb_ep_restart(musb, req); + + return 0; +} + static int musb_gadget_queue(struct usb_ep *ep, struct usb_request *req, gfp_t gfp_flags) { struct musb_ep *musb_ep; struct musb_request *request; struct musb *musb; - int status = 0; + int status; unsigned long lockflags; if (!ep || !req) @@ -1245,6 +1254,17 @@ static int musb_gadget_queue(struct usb_ep *ep, struct usb_request *req, if (request->ep != musb_ep) return -EINVAL; + status = pm_runtime_get(musb->controller); + if ((status != -EINPROGRESS) && status < 0) { + dev_err(musb->controller, + "pm runtime get failed in %s\n", + __func__); + pm_runtime_put_noidle(musb->controller); + + return status; + } + status = 0; + trace_musb_req_enq(request); /* request is mine now... */ @@ -1255,7 +1275,6 @@ static int musb_gadget_queue(struct usb_ep *ep, struct usb_request *req, map_dma_buffer(request, musb, musb_ep); - pm_runtime_get_sync(musb->controller); spin_lock_irqsave(&musb->lock, lockflags); /* don't queue if the ep is down */ @@ -1271,8 +1290,14 @@ static int musb_gadget_queue(struct usb_ep *ep, struct usb_request *req, list_add_tail(&request->list, &musb_ep->req_list); /* it this is the head of the queue, start i/o ... */ - if (!musb_ep->busy && &request->list == musb_ep->req_list.next) - musb_ep_restart(musb, request); + if (!musb_ep->busy && &request->list == musb_ep->req_list.next) { + status = musb_queue_resume_work(musb, + musb_ep_restart_resume_work, + request); + if (status < 0) + dev_err(musb->controller, "%s resume work: %i\n", + __func__, status); + } unlock: spin_unlock_irqrestore(&musb->lock, lockflags); @@ -1969,7 +1994,7 @@ static int musb_gadget_stop(struct usb_gadget *g) */ /* Force check of devctl register for PM runtime */ - schedule_work(&musb->irq_work); + schedule_delayed_work(&musb->irq_work, 0); pm_runtime_mark_last_busy(musb->controller); pm_runtime_put_autosuspend(musb->controller); diff --git a/drivers/usb/musb/musb_host.c b/drivers/usb/musb/musb_host.c index 53bc4ceefe89..f6cdbad00dac 100644 --- a/drivers/usb/musb/musb_host.c +++ b/drivers/usb/musb/musb_host.c @@ -2237,7 +2237,7 @@ static int musb_urb_enqueue( * Some musb cores don't support high bandwidth ISO transfers; and * we don't (yet!) support high bandwidth interrupt transfers. */ - qh->hb_mult = 1 + ((qh->maxpacket >> 11) & 0x03); + qh->hb_mult = usb_endpoint_maxp_mult(epd); if (qh->hb_mult > 1) { int ok = (qh->type == USB_ENDPOINT_XFER_ISOC); diff --git a/drivers/usb/musb/musb_virthub.c b/drivers/usb/musb/musb_virthub.c index 61b5f1c3c5bc..0b4595439d51 100644 --- a/drivers/usb/musb/musb_virthub.c +++ b/drivers/usb/musb/musb_virthub.c @@ -132,7 +132,6 @@ void musb_port_suspend(struct musb *musb, bool do_suspend) musb_dbg(musb, "Root port resuming, power %02x", power); - /* later, GetPortStatus will stop RESUME signaling */ musb->port1_status |= MUSB_PORT_STAT_RESUME; schedule_delayed_work(&musb->finish_resume_work, msecs_to_jiffies(USB_RESUME_TIMEOUT)); diff --git a/drivers/usb/musb/omap2430.c b/drivers/usb/musb/omap2430.c index cc1225485509..8b73214a9ea3 100644 --- a/drivers/usb/musb/omap2430.c +++ b/drivers/usb/musb/omap2430.c @@ -277,12 +277,12 @@ static int omap2430_musb_init(struct musb *musb) if (status == -ENXIO) return status; - pr_err("HS USB OTG: no transceiver configured\n"); + dev_dbg(dev, "HS USB OTG: no transceiver configured\n"); return -EPROBE_DEFER; } if (IS_ERR(musb->phy)) { - pr_err("HS USB OTG: no PHY configured\n"); + dev_err(dev, "HS USB OTG: no PHY configured\n"); return PTR_ERR(musb->phy); } musb->isr = omap2430_musb_interrupt; @@ -301,7 +301,7 @@ static int omap2430_musb_init(struct musb *musb) musb_writel(musb->mregs, OTG_INTERFSEL, l); - pr_debug("HS USB OTG: revision 0x%x, sysconfig 0x%02x, " + dev_dbg(dev, "HS USB OTG: revision 0x%x, sysconfig 0x%02x, " "sysstatus 0x%x, intrfsel 0x%x, simenable 0x%x\n", musb_readl(musb->mregs, OTG_REVISION), musb_readl(musb->mregs, OTG_SYSCONFIG), @@ -513,17 +513,18 @@ static int omap2430_probe(struct platform_device *pdev) } pm_runtime_enable(glue->dev); - pm_runtime_use_autosuspend(glue->dev); - pm_runtime_set_autosuspend_delay(glue->dev, 100); ret = platform_device_add(musb); if (ret) { dev_err(&pdev->dev, "failed to register musb device\n"); - goto err2; + goto err3; } return 0; +err3: + pm_runtime_disable(glue->dev); + err2: platform_device_put(musb); @@ -535,10 +536,7 @@ static int omap2430_remove(struct platform_device *pdev) { struct omap2430_glue *glue = platform_get_drvdata(pdev); - pm_runtime_get_sync(glue->dev); platform_device_unregister(glue->musb); - pm_runtime_put_sync(glue->dev); - pm_runtime_dont_use_autosuspend(glue->dev); pm_runtime_disable(glue->dev); return 0; diff --git a/drivers/usb/musb/sunxi.c b/drivers/usb/musb/sunxi.c index 1408245be18e..d0be0eadd0d9 100644 --- a/drivers/usb/musb/sunxi.c +++ b/drivers/usb/musb/sunxi.c @@ -186,16 +186,6 @@ static irqreturn_t sunxi_musb_interrupt(int irq, void *__hci) if (musb->int_usb) writeb(musb->int_usb, musb->mregs + SUNXI_MUSB_INTRUSB); - /* - * sunxi musb often signals babble on low / full speed device - * disconnect, without ever raising MUSB_INTR_DISCONNECT, since - * normally babble never happens treat it as disconnect. - */ - if ((musb->int_usb & MUSB_INTR_BABBLE) && is_host_active(musb)) { - musb->int_usb &= ~MUSB_INTR_BABBLE; - musb->int_usb |= MUSB_INTR_DISCONNECT; - } - if ((musb->int_usb & MUSB_INTR_RESET) && !is_host_active(musb)) { /* ep0 FADDR must be 0 when (re)entering peripheral mode */ musb_ep_select(musb->mregs, 0); @@ -390,6 +380,20 @@ static int sunxi_musb_set_mode(struct musb *musb, u8 mode) return 0; } +static int sunxi_musb_recover(struct musb *musb) +{ + struct sunxi_glue *glue = dev_get_drvdata(musb->controller->parent); + + /* + * Schedule a phy_set_mode with the current glue->phy_mode value, + * this will force end the current session. + */ + set_bit(SUNXI_MUSB_FL_PHY_MODE_PEND, &glue->flags); + schedule_work(&glue->work); + + return 0; +} + /* * sunxi musb register layout * 0x00 - 0x17 fifo regs, 1 long per fifo @@ -618,6 +622,7 @@ static const struct musb_platform_ops sunxi_musb_ops = { .dma_init = sunxi_musb_dma_controller_create, .dma_exit = sunxi_musb_dma_controller_destroy, .set_mode = sunxi_musb_set_mode, + .recover = sunxi_musb_recover, .set_vbus = sunxi_musb_set_vbus, .pre_root_reset_end = sunxi_musb_pre_root_reset_end, .post_root_reset_end = sunxi_musb_post_root_reset_end, diff --git a/drivers/usb/musb/tusb6010.c b/drivers/usb/musb/tusb6010.c index df7c9f46be54..e85cc8e4e7a9 100644 --- a/drivers/usb/musb/tusb6010.c +++ b/drivers/usb/musb/tusb6010.c @@ -724,7 +724,7 @@ tusb_otg_ints(struct musb *musb, u32 int_src, void __iomem *tbase) dev_dbg(musb->controller, "vbus change, %s, otg %03x\n", usb_otg_state_string(musb->xceiv->otg->state), otg_stat); idle_timeout = jiffies + (1 * HZ); - schedule_work(&musb->irq_work); + schedule_delayed_work(&musb->irq_work, 0); } else /* A-dev state machine */ { dev_dbg(musb->controller, "vbus change, %s, otg %03x\n", @@ -814,7 +814,7 @@ tusb_otg_ints(struct musb *musb, u32 int_src, void __iomem *tbase) break; } } - schedule_work(&musb->irq_work); + schedule_delayed_work(&musb->irq_work, 0); return idle_timeout; } @@ -864,7 +864,7 @@ static irqreturn_t tusb_musb_interrupt(int irq, void *__hci) musb_writel(tbase, TUSB_PRCM_WAKEUP_CLEAR, reg); if (reg & ~TUSB_PRCM_WNORCS) { musb->is_active = 1; - schedule_work(&musb->irq_work); + schedule_delayed_work(&musb->irq_work, 0); } dev_dbg(musb->controller, "wake %sactive %02x\n", musb->is_active ? "" : "in", reg); diff --git a/drivers/usb/phy/Kconfig b/drivers/usb/phy/Kconfig index b9c409a18faa..61cef7511a50 100644 --- a/drivers/usb/phy/Kconfig +++ b/drivers/usb/phy/Kconfig @@ -84,6 +84,7 @@ config SAMSUNG_USBPHY config TWL6030_USB tristate "TWL6030 USB Transceiver Driver" depends on TWL4030_CORE && OMAP_USB2 && USB_MUSB_OMAP2PLUS + depends on OF help Enable this to support the USB OTG transceiver on TWL6030 family chips. This TWL6030 transceiver has the VBUS and ID GND diff --git a/drivers/usb/phy/phy-am335x-control.c b/drivers/usb/phy/phy-am335x-control.c index 42a1afe36a90..5f5f19813fde 100644 --- a/drivers/usb/phy/phy-am335x-control.c +++ b/drivers/usb/phy/phy-am335x-control.c @@ -134,10 +134,12 @@ struct phy_control *am335x_get_phy_control(struct device *dev) return NULL; dev = bus_find_device(&platform_bus_type, NULL, node, match); + of_node_put(node); if (!dev) return NULL; ctrl_usb = dev_get_drvdata(dev); + put_device(dev); if (!ctrl_usb) return NULL; return &ctrl_usb->phy_ctrl; diff --git a/drivers/usb/phy/phy-generic.c b/drivers/usb/phy/phy-generic.c index 8311ba2968cd..89d6e7a5fdb7 100644 --- a/drivers/usb/phy/phy-generic.c +++ b/drivers/usb/phy/phy-generic.c @@ -59,6 +59,15 @@ EXPORT_SYMBOL_GPL(usb_phy_generic_unregister); static int nop_set_suspend(struct usb_phy *x, int suspend) { + struct usb_phy_generic *nop = dev_get_drvdata(x->dev); + + if (!IS_ERR(nop->clk)) { + if (suspend) + clk_disable_unprepare(nop->clk); + else + clk_prepare_enable(nop->clk); + } + return 0; } diff --git a/drivers/usb/phy/phy-isp1301-omap.c b/drivers/usb/phy/phy-isp1301-omap.c index 8d111ec653e4..042c5a8fd423 100644 --- a/drivers/usb/phy/phy-isp1301-omap.c +++ b/drivers/usb/phy/phy-isp1301-omap.c @@ -94,7 +94,7 @@ struct isp1301 { #if defined(CONFIG_MACH_OMAP_H2) || defined(CONFIG_MACH_OMAP_H3) -#if defined(CONFIG_TPS65010) || (defined(CONFIG_TPS65010_MODULE) && defined(MODULE)) +#if IS_REACHABLE(CONFIG_TPS65010) #include <linux/i2c/tps65010.h> diff --git a/drivers/usb/phy/phy-twl6030-usb.c b/drivers/usb/phy/phy-twl6030-usb.c index a72e8d670adc..628b600b02b1 100644 --- a/drivers/usb/phy/phy-twl6030-usb.c +++ b/drivers/usb/phy/phy-twl6030-usb.c @@ -108,7 +108,6 @@ struct twl6030_usb { enum musb_vbus_id_status linkstat; u8 asleep; bool vbus_enable; - const char *regulator; }; #define comparator_to_twl(x) container_of((x), struct twl6030_usb, comparator) @@ -166,7 +165,7 @@ static int twl6030_usb_ldo_init(struct twl6030_usb *twl) /* Program MISC2 register and set bit VUSB_IN_VBAT */ twl6030_writeb(twl, TWL6030_MODULE_ID0, 0x10, TWL6030_MISC2); - twl->usb3v3 = regulator_get(twl->dev, twl->regulator); + twl->usb3v3 = regulator_get(twl->dev, "usb"); if (IS_ERR(twl->usb3v3)) return -ENODEV; @@ -341,7 +340,11 @@ static int twl6030_usb_probe(struct platform_device *pdev) int status, err; struct device_node *np = pdev->dev.of_node; struct device *dev = &pdev->dev; - struct twl4030_usb_data *pdata = dev_get_platdata(dev); + + if (!np) { + dev_err(dev, "no DT info\n"); + return -EINVAL; + } twl = devm_kzalloc(dev, sizeof(*twl), GFP_KERNEL); if (!twl) @@ -361,18 +364,6 @@ static int twl6030_usb_probe(struct platform_device *pdev) return -EPROBE_DEFER; } - if (np) { - twl->regulator = "usb"; - } else if (pdata) { - if (pdata->features & TWL6032_SUBCLASS) - twl->regulator = "ldousb"; - else - twl->regulator = "vusb"; - } else { - dev_err(&pdev->dev, "twl6030 initialized without pdata\n"); - return -EINVAL; - } - /* init spinlock for workqueue */ spin_lock_init(&twl->lock); @@ -436,13 +427,11 @@ static int twl6030_usb_remove(struct platform_device *pdev) return 0; } -#ifdef CONFIG_OF static const struct of_device_id twl6030_usb_id_table[] = { { .compatible = "ti,twl6030-usb" }, {} }; MODULE_DEVICE_TABLE(of, twl6030_usb_id_table); -#endif static struct platform_driver twl6030_usb_driver = { .probe = twl6030_usb_probe, diff --git a/drivers/usb/renesas_usbhs/fifo.c b/drivers/usb/renesas_usbhs/fifo.c index 857e78337324..d1af831f43eb 100644 --- a/drivers/usb/renesas_usbhs/fifo.c +++ b/drivers/usb/renesas_usbhs/fifo.c @@ -100,10 +100,7 @@ static void __usbhsf_pkt_del(struct usbhs_pkt *pkt) static struct usbhs_pkt *__usbhsf_pkt_get(struct usbhs_pipe *pipe) { - if (list_empty(&pipe->list)) - return NULL; - - return list_first_entry(&pipe->list, struct usbhs_pkt, node); + return list_first_entry_or_null(&pipe->list, struct usbhs_pkt, node); } static void usbhsf_fifo_clear(struct usbhs_pipe *pipe, diff --git a/drivers/usb/serial/cp210x.c b/drivers/usb/serial/cp210x.c index f61477bed3a8..243ac5ebe46a 100644 --- a/drivers/usb/serial/cp210x.c +++ b/drivers/usb/serial/cp210x.c @@ -131,6 +131,7 @@ static const struct usb_device_id id_table[] = { { USB_DEVICE(0x10C4, 0x88A4) }, /* MMB Networks ZigBee USB Device */ { USB_DEVICE(0x10C4, 0x88A5) }, /* Planet Innovation Ingeni ZigBee USB Device */ { USB_DEVICE(0x10C4, 0x8946) }, /* Ketra N1 Wireless Interface */ + { USB_DEVICE(0x10C4, 0x8962) }, /* Brim Brothers charging dock */ { USB_DEVICE(0x10C4, 0x8977) }, /* CEL MeshWorks DevKit Device */ { USB_DEVICE(0x10C4, 0x8998) }, /* KCF Technologies PRN */ { USB_DEVICE(0x10C4, 0x8A2A) }, /* HubZ dual ZigBee and Z-Wave dongle */ diff --git a/drivers/usb/serial/ftdi_sio.c b/drivers/usb/serial/ftdi_sio.c index 0ff7f38d7800..6e9fc8bcc285 100644 --- a/drivers/usb/serial/ftdi_sio.c +++ b/drivers/usb/serial/ftdi_sio.c @@ -1012,6 +1012,8 @@ static const struct usb_device_id id_table_combined[] = { { USB_DEVICE(ICPDAS_VID, ICPDAS_I7561U_PID) }, { USB_DEVICE(ICPDAS_VID, ICPDAS_I7563U_PID) }, { USB_DEVICE(WICED_VID, WICED_USB20706V2_PID) }, + { USB_DEVICE(TI_VID, TI_CC3200_LAUNCHPAD_PID), + .driver_info = (kernel_ulong_t)&ftdi_jtag_quirk }, { } /* Terminating entry */ }; diff --git a/drivers/usb/serial/ftdi_sio_ids.h b/drivers/usb/serial/ftdi_sio_ids.h index 21011c0a4c64..48ee04c94a75 100644 --- a/drivers/usb/serial/ftdi_sio_ids.h +++ b/drivers/usb/serial/ftdi_sio_ids.h @@ -596,6 +596,12 @@ #define STK541_PID 0x2109 /* Zigbee Controller */ /* + * Texas Instruments + */ +#define TI_VID 0x0451 +#define TI_CC3200_LAUNCHPAD_PID 0xC32A /* SimpleLink Wi-Fi CC3200 LaunchPad */ + +/* * Blackfin gnICE JTAG * http://docs.blackfin.uclinux.org/doku.php?id=hw:jtag:gnice */ diff --git a/drivers/usb/storage/transport.c b/drivers/usb/storage/transport.c index ffd086733421..1a59f335b063 100644 --- a/drivers/usb/storage/transport.c +++ b/drivers/usb/storage/transport.c @@ -954,10 +954,15 @@ int usb_stor_CB_transport(struct scsi_cmnd *srb, struct us_data *us) /* COMMAND STAGE */ /* let's send the command via the control pipe */ + /* + * Command is sometime (f.e. after scsi_eh_prep_cmnd) on the stack. + * Stack may be vmallocated. So no DMA for us. Make a copy. + */ + memcpy(us->iobuf, srb->cmnd, srb->cmd_len); result = usb_stor_ctrl_transfer(us, us->send_ctrl_pipe, US_CBI_ADSC, USB_TYPE_CLASS | USB_RECIP_INTERFACE, 0, - us->ifnum, srb->cmnd, srb->cmd_len); + us->ifnum, us->iobuf, srb->cmd_len); /* check the return code for the command */ usb_stor_dbg(us, "Call to usb_stor_ctrl_transfer() returned %d\n", diff --git a/drivers/usb/usbip/vudc_dev.c b/drivers/usb/usbip/vudc_dev.c index c5375e74ffc5..d5edd765c7c7 100644 --- a/drivers/usb/usbip/vudc_dev.c +++ b/drivers/usb/usbip/vudc_dev.c @@ -259,7 +259,7 @@ static int vep_enable(struct usb_ep *_ep, spin_lock_irqsave(&udc->lock, flags); - maxp = usb_endpoint_maxp(desc) & 0x7ff; + maxp = usb_endpoint_maxp(desc); _ep->maxpacket = maxp; ep->desc = desc; ep->type = usb_endpoint_type(desc); |