summaryrefslogtreecommitdiffstats
path: root/drivers/usb/host/xhci-mtk-sch.c
diff options
context:
space:
mode:
authorIkjoon Jang <ikjn@chromium.org>2021-01-13 11:05:11 +0100
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>2021-01-26 15:54:04 +0100
commit1d69f9d901ef14d81c3b004e3282b8cc7b456280 (patch)
tree4b2e86af69021e903bb87ccc269a00dc56d4b837 /drivers/usb/host/xhci-mtk-sch.c
parentusb: gadget: aspeed: add missing of_node_put (diff)
downloadlinux-1d69f9d901ef14d81c3b004e3282b8cc7b456280.tar.xz
linux-1d69f9d901ef14d81c3b004e3282b8cc7b456280.zip
usb: xhci-mtk: fix unreleased bandwidth data
xhci-mtk needs XHCI_MTK_HOST quirk functions in add_endpoint() and drop_endpoint() to handle its own sw bandwidth management. It stores bandwidth data into an internal table every time add_endpoint() is called, and drops those in drop_endpoint(). But when bandwidth allocation fails at one endpoint, all earlier allocation from the same interface could still remain at the table. This patch moves bandwidth management codes to check_bandwidth() and reset_bandwidth() path. To do so, this patch also adds those functions to xhci_driver_overrides and lets mtk-xhci to release all failed endpoints in reset_bandwidth() path. Fixes: 08e469de87a2 ("usb: xhci-mtk: supports bandwidth scheduling with multi-TT") Signed-off-by: Ikjoon Jang <ikjn@chromium.org> Link: https://lore.kernel.org/r/20210113180444.v6.1.Id0d31b5f3ddf5e734d2ab11161ac5821921b1e1e@changeid Cc: stable <stable@vger.kernel.org> Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
Diffstat (limited to 'drivers/usb/host/xhci-mtk-sch.c')
-rw-r--r--drivers/usb/host/xhci-mtk-sch.c123
1 files changed, 86 insertions, 37 deletions
diff --git a/drivers/usb/host/xhci-mtk-sch.c b/drivers/usb/host/xhci-mtk-sch.c
index 45c54d56ecbd..a313e75ff1c6 100644
--- a/drivers/usb/host/xhci-mtk-sch.c
+++ b/drivers/usb/host/xhci-mtk-sch.c
@@ -200,6 +200,7 @@ static struct mu3h_sch_ep_info *create_sch_ep(struct usb_device *udev,
sch_ep->sch_tt = tt;
sch_ep->ep = ep;
+ INIT_LIST_HEAD(&sch_ep->tt_endpoint);
return sch_ep;
}
@@ -583,6 +584,8 @@ int xhci_mtk_sch_init(struct xhci_hcd_mtk *mtk)
mtk->sch_array = sch_array;
+ INIT_LIST_HEAD(&mtk->bw_ep_list_new);
+
return 0;
}
EXPORT_SYMBOL_GPL(xhci_mtk_sch_init);
@@ -601,19 +604,14 @@ int xhci_mtk_add_ep_quirk(struct usb_hcd *hcd, struct usb_device *udev,
struct xhci_ep_ctx *ep_ctx;
struct xhci_slot_ctx *slot_ctx;
struct xhci_virt_device *virt_dev;
- struct mu3h_sch_bw_info *sch_bw;
struct mu3h_sch_ep_info *sch_ep;
- struct mu3h_sch_bw_info *sch_array;
unsigned int ep_index;
- int bw_index;
- int ret = 0;
xhci = hcd_to_xhci(hcd);
virt_dev = xhci->devs[udev->slot_id];
ep_index = xhci_get_endpoint_index(&ep->desc);
slot_ctx = xhci_get_slot_ctx(xhci, virt_dev->in_ctx);
ep_ctx = xhci_get_ep_ctx(xhci, virt_dev->in_ctx, ep_index);
- sch_array = mtk->sch_array;
xhci_dbg(xhci, "%s() type:%d, speed:%d, mpkt:%d, dir:%d, ep:%p\n",
__func__, usb_endpoint_type(&ep->desc), udev->speed,
@@ -632,39 +630,34 @@ int xhci_mtk_add_ep_quirk(struct usb_hcd *hcd, struct usb_device *udev,
return 0;
}
- bw_index = get_bw_index(xhci, udev, ep);
- sch_bw = &sch_array[bw_index];
-
sch_ep = create_sch_ep(udev, ep, ep_ctx);
if (IS_ERR_OR_NULL(sch_ep))
return -ENOMEM;
setup_sch_info(udev, ep_ctx, sch_ep);
- ret = check_sch_bw(udev, sch_bw, sch_ep);
- if (ret) {
- xhci_err(xhci, "Not enough bandwidth!\n");
- if (is_fs_or_ls(udev->speed))
- drop_tt(udev);
-
- kfree(sch_ep);
- return -ENOSPC;
- }
+ list_add_tail(&sch_ep->endpoint, &mtk->bw_ep_list_new);
- list_add_tail(&sch_ep->endpoint, &sch_bw->bw_ep_list);
+ return 0;
+}
+EXPORT_SYMBOL_GPL(xhci_mtk_add_ep_quirk);
- ep_ctx->reserved[0] |= cpu_to_le32(EP_BPKTS(sch_ep->pkts)
- | EP_BCSCOUNT(sch_ep->cs_count) | EP_BBM(sch_ep->burst_mode));
- ep_ctx->reserved[1] |= cpu_to_le32(EP_BOFFSET(sch_ep->offset)
- | EP_BREPEAT(sch_ep->repeat));
+static void xhci_mtk_drop_ep(struct xhci_hcd_mtk *mtk, struct usb_device *udev,
+ struct mu3h_sch_ep_info *sch_ep)
+{
+ struct xhci_hcd *xhci = hcd_to_xhci(mtk->hcd);
+ int bw_index = get_bw_index(xhci, udev, sch_ep->ep);
+ struct mu3h_sch_bw_info *sch_bw = &mtk->sch_array[bw_index];
- xhci_dbg(xhci, " PKTS:%x, CSCOUNT:%x, BM:%x, OFFSET:%x, REPEAT:%x\n",
- sch_ep->pkts, sch_ep->cs_count, sch_ep->burst_mode,
- sch_ep->offset, sch_ep->repeat);
+ update_bus_bw(sch_bw, sch_ep, 0);
+ list_del(&sch_ep->endpoint);
- return 0;
+ if (sch_ep->sch_tt) {
+ list_del(&sch_ep->tt_endpoint);
+ drop_tt(udev);
+ }
+ kfree(sch_ep);
}
-EXPORT_SYMBOL_GPL(xhci_mtk_add_ep_quirk);
void xhci_mtk_drop_ep_quirk(struct usb_hcd *hcd, struct usb_device *udev,
struct usb_host_endpoint *ep)
@@ -675,7 +668,7 @@ void xhci_mtk_drop_ep_quirk(struct usb_hcd *hcd, struct usb_device *udev,
struct xhci_virt_device *virt_dev;
struct mu3h_sch_bw_info *sch_array;
struct mu3h_sch_bw_info *sch_bw;
- struct mu3h_sch_ep_info *sch_ep;
+ struct mu3h_sch_ep_info *sch_ep, *tmp;
int bw_index;
xhci = hcd_to_xhci(hcd);
@@ -694,17 +687,73 @@ void xhci_mtk_drop_ep_quirk(struct usb_hcd *hcd, struct usb_device *udev,
bw_index = get_bw_index(xhci, udev, ep);
sch_bw = &sch_array[bw_index];
- list_for_each_entry(sch_ep, &sch_bw->bw_ep_list, endpoint) {
+ list_for_each_entry_safe(sch_ep, tmp, &sch_bw->bw_ep_list, endpoint) {
if (sch_ep->ep == ep) {
- update_bus_bw(sch_bw, sch_ep, 0);
- list_del(&sch_ep->endpoint);
- if (is_fs_or_ls(udev->speed)) {
- list_del(&sch_ep->tt_endpoint);
- drop_tt(udev);
- }
- kfree(sch_ep);
- break;
+ xhci_mtk_drop_ep(mtk, udev, sch_ep);
}
}
}
EXPORT_SYMBOL_GPL(xhci_mtk_drop_ep_quirk);
+
+int xhci_mtk_check_bandwidth(struct usb_hcd *hcd, struct usb_device *udev)
+{
+ struct xhci_hcd_mtk *mtk = hcd_to_mtk(hcd);
+ struct xhci_hcd *xhci = hcd_to_xhci(hcd);
+ struct xhci_virt_device *virt_dev = xhci->devs[udev->slot_id];
+ struct mu3h_sch_bw_info *sch_bw;
+ struct mu3h_sch_ep_info *sch_ep, *tmp;
+ int bw_index, ret;
+
+ dev_dbg(&udev->dev, "%s\n", __func__);
+
+ list_for_each_entry(sch_ep, &mtk->bw_ep_list_new, endpoint) {
+ bw_index = get_bw_index(xhci, udev, sch_ep->ep);
+ sch_bw = &mtk->sch_array[bw_index];
+
+ ret = check_sch_bw(udev, sch_bw, sch_ep);
+ if (ret) {
+ xhci_err(xhci, "Not enough bandwidth!\n");
+ return -ENOSPC;
+ }
+ }
+
+ list_for_each_entry_safe(sch_ep, tmp, &mtk->bw_ep_list_new, endpoint) {
+ struct xhci_ep_ctx *ep_ctx;
+ struct usb_host_endpoint *ep = sch_ep->ep;
+ unsigned int ep_index = xhci_get_endpoint_index(&ep->desc);
+
+ bw_index = get_bw_index(xhci, udev, ep);
+ sch_bw = &mtk->sch_array[bw_index];
+
+ list_move_tail(&sch_ep->endpoint, &sch_bw->bw_ep_list);
+
+ ep_ctx = xhci_get_ep_ctx(xhci, virt_dev->in_ctx, ep_index);
+ ep_ctx->reserved[0] |= cpu_to_le32(EP_BPKTS(sch_ep->pkts)
+ | EP_BCSCOUNT(sch_ep->cs_count)
+ | EP_BBM(sch_ep->burst_mode));
+ ep_ctx->reserved[1] |= cpu_to_le32(EP_BOFFSET(sch_ep->offset)
+ | EP_BREPEAT(sch_ep->repeat));
+
+ xhci_dbg(xhci, " PKTS:%x, CSCOUNT:%x, BM:%x, OFFSET:%x, REPEAT:%x\n",
+ sch_ep->pkts, sch_ep->cs_count, sch_ep->burst_mode,
+ sch_ep->offset, sch_ep->repeat);
+ }
+
+ return xhci_check_bandwidth(hcd, udev);
+}
+EXPORT_SYMBOL_GPL(xhci_mtk_check_bandwidth);
+
+void xhci_mtk_reset_bandwidth(struct usb_hcd *hcd, struct usb_device *udev)
+{
+ struct xhci_hcd_mtk *mtk = hcd_to_mtk(hcd);
+ struct mu3h_sch_ep_info *sch_ep, *tmp;
+
+ dev_dbg(&udev->dev, "%s\n", __func__);
+
+ list_for_each_entry_safe(sch_ep, tmp, &mtk->bw_ep_list_new, endpoint) {
+ xhci_mtk_drop_ep(mtk, udev, sch_ep);
+ }
+
+ xhci_reset_bandwidth(hcd, udev);
+}
+EXPORT_SYMBOL_GPL(xhci_mtk_reset_bandwidth);