}
EXPORT_SYMBOL_GPL(xhci_remove_secondary_interrupter);
+/* Cleanup roothub bandwidth data */
+static void xhci_rh_bw_cleanup(struct xhci_hcd *xhci)
+{
+ struct xhci_root_port_bw_info *rh_bw;
+ struct xhci_tt_bw_info *tt_info, *tt_next;
+ struct list_head *eps, *ep, *ep_next;
+
+ for (int i = 0; i < xhci->max_ports; i++) {
+ rh_bw = &xhci->rh_bw[i];
+
+ /* Clear and free all TT bandwidth entries */
+ list_for_each_entry_safe(tt_info, tt_next, &rh_bw->tts, tt_list) {
+ list_del(&tt_info->tt_list);
+ kfree(tt_info);
+ }
+
+ /* Clear per-interval endpoint lists */
+ for (int j = 0; j < XHCI_MAX_INTERVAL; j++) {
+ eps = &rh_bw->bw_table.interval_bw[j].endpoints;
+
+ list_for_each_safe(ep, ep_next, eps)
+ list_del_init(ep);
+ }
+ }
+}
+
void xhci_mem_cleanup(struct xhci_hcd *xhci)
{
struct device *dev = xhci_to_hcd(xhci)->self.sysdev;
- int i, j;
+ int i;
cancel_delayed_work_sync(&xhci->cmd_timer);
xhci_dbg_trace(xhci, trace_xhci_dbg_init, "Freed command ring");
xhci_cleanup_command_queue(xhci);
- for (i = 0; i < xhci->max_ports && xhci->rh_bw; i++) {
- struct xhci_interval_bw_table *bwt = &xhci->rh_bw[i].bw_table;
- for (j = 0; j < XHCI_MAX_INTERVAL; j++) {
- struct list_head *ep = &bwt->interval_bw[j].endpoints;
- while (!list_empty(ep))
- list_del_init(ep->next);
- }
- }
-
for (i = xhci->max_slots; i > 0; i--)
xhci_free_virt_devices_depth_first(xhci, i);
scratchpad_free(xhci);
- if (!xhci->rh_bw)
- goto no_bw;
-
- for (i = 0; i < xhci->max_ports; i++) {
- struct xhci_tt_bw_info *tt, *n;
- list_for_each_entry_safe(tt, n, &xhci->rh_bw[i].tts, tt_list) {
- list_del(&tt->tt_list);
- kfree(tt);
- }
- }
+ if (xhci->rh_bw)
+ xhci_rh_bw_cleanup(xhci);
-no_bw:
xhci->cmd_ring_reserved_trbs = 0;
xhci->usb2_rhub.num_ports = 0;
xhci->usb3_rhub.num_ports = 0;