#define EHCI_TUNE_MULT_TT 1
#define EHCI_TUNE_FLS 2 /* (small) 256 frame schedule */
-#define EHCI_IAA_JIFFIES (HZ/100) /* arbitrary; ~10 msec */
+#define EHCI_IAA_MSECS 10 /* arbitrary */
#define EHCI_IO_JIFFIES (HZ/10) /* io watchdog > irq_thresh */
#define EHCI_ASYNC_JIFFIES (HZ/20) /* async idle timeout */
#define EHCI_SHRINK_JIFFIES (HZ/200) /* async qh unlink delay */
/*-------------------------------------------------------------------------*/
+static void end_unlink_async (struct ehci_hcd *ehci, struct pt_regs *regs);
static void ehci_work(struct ehci_hcd *ehci, struct pt_regs *regs);
#include "ehci-hub.c"
/*-------------------------------------------------------------------------*/
-static void ehci_watchdog (unsigned long param)
+static void ehci_iaa_watchdog (unsigned long param)
{
struct ehci_hcd *ehci = (struct ehci_hcd *) param;
unsigned long flags;
+ u32 status;
spin_lock_irqsave (&ehci->lock, flags);
+ WARN_ON(!ehci->reclaim);
- /* lost IAA irqs wedge things badly; seen with a vt8235 */
+ /* lost IAA irqs wedge things badly; seen first with a vt8235 */
if (ehci->reclaim) {
- u32 status = readl (&ehci->regs->status);
-
+ status = readl (&ehci->regs->status);
if (status & STS_IAA) {
ehci_vdbg (ehci, "lost IAA\n");
COUNT (ehci->stats.lost_iaa);
writel (STS_IAA, &ehci->regs->status);
- ehci->reclaim_ready = 1;
+ end_unlink_async (ehci, NULL);
}
}
- /* stop async processing after it's idled a bit */
+ spin_unlock_irqrestore (&ehci->lock, flags);
+}
+
+static void ehci_watchdog (unsigned long param)
+{
+ struct ehci_hcd *ehci = (struct ehci_hcd *) param;
+ unsigned long flags;
+
+ spin_lock_irqsave (&ehci->lock, flags);
+
+ /* stop async processing after it's idled a bit */
if (test_bit (TIMER_ASYNC_OFF, &ehci->actions))
- start_unlink_async (ehci, ehci->async);
+ start_unlink_async (ehci, ehci->async);
/* ehci could run by timer, without IRQs ... */
ehci_work (ehci, NULL);
static void ehci_work (struct ehci_hcd *ehci, struct pt_regs *regs)
{
timer_action_done (ehci, TIMER_IO_WATCHDOG);
- if (ehci->reclaim_ready)
- end_unlink_async (ehci, regs);
/* another CPU may drop ehci->lock during a schedule scan while
* it reports urb completions. this flag guards against bogus
/* no more interrupts ... */
del_timer_sync (&ehci->watchdog);
+ del_timer_sync (&ehci->iaa_watchdog);
spin_lock_irq(&ehci->lock);
if (HC_IS_RUNNING (hcd->state))
ehci->watchdog.function = ehci_watchdog;
ehci->watchdog.data = (unsigned long) ehci;
+ init_timer(&ehci->iaa_watchdog);
+ ehci->iaa_watchdog.function = ehci_iaa_watchdog;
+ ehci->iaa_watchdog.data = (unsigned long) ehci;
+
/*
* hw default: 1K periodic list heads, one per frame.
* periodic_size can shrink by USBCMD update if hcc_params allows.
ehci->i_thresh = 2 + HCC_ISOC_THRES(hcc_params);
ehci->reclaim = NULL;
- ehci->reclaim_ready = 0;
ehci->next_uframe = -1;
/*
/* complete the unlinking of some qh [4.15.2.3] */
if (status & STS_IAA) {
COUNT (ehci->stats.reclaim);
- ehci->reclaim_ready = 1;
+ end_unlink_async (ehci, regs);
bh = 1;
}
static void unlink_async (struct ehci_hcd *ehci, struct ehci_qh *qh)
{
- /* if we need to use IAA and it's busy, defer */
- if (qh->qh_state == QH_STATE_LINKED
- && ehci->reclaim
- && HC_IS_RUNNING (ehci_to_hcd(ehci)->state)) {
+ // BUG_ON(qh->qh_state != QH_STATE_LINKED);
+
+ /* failfast */
+ if (!HC_IS_RUNNING (ehci_to_hcd(ehci)->state))
+ end_unlink_async (ehci, NULL);
+
+ /* defer till later if busy */
+ else if (ehci->reclaim) {
struct ehci_qh *last;
for (last = ehci->reclaim;
qh->qh_state = QH_STATE_UNLINK_WAIT;
last->reclaim = qh;
- /* bypass IAA if the hc can't care */
- } else if (!HC_IS_RUNNING (ehci_to_hcd(ehci)->state) && ehci->reclaim)
- end_unlink_async (ehci, NULL);
-
- /* something else might have unlinked the qh by now */
- if (qh->qh_state == QH_STATE_LINKED)
+ /* start IAA cycle */
+ } else
start_unlink_async (ehci, qh);
}
qh = (struct ehci_qh *) urb->hcpriv;
if (!qh)
break;
- unlink_async (ehci, qh);
+ switch (qh->qh_state) {
+ case QH_STATE_LINKED:
+ case QH_STATE_COMPLETING:
+ unlink_async (ehci, qh);
+ break;
+ case QH_STATE_UNLINK:
+ case QH_STATE_UNLINK_WAIT:
+ /* already started */
+ break;
+ case QH_STATE_IDLE:
+ WARN_ON(1);
+ break;
+ }
break;
case PIPE_INTERRUPT:
unlink_async (ehci, qh);
/* FALL THROUGH */
case QH_STATE_UNLINK: /* wait for hw to finish? */
+ case QH_STATE_UNLINK_WAIT:
idle_timeout:
spin_unlock_irqrestore (&ehci->lock, flags);
schedule_timeout_uninterruptible(1);
/* async schedule support */
struct ehci_qh *async;
struct ehci_qh *reclaim;
- unsigned reclaim_ready : 1;
unsigned scanning : 1;
/* periodic schedule support */
struct dma_pool *itd_pool; /* itd per iso urb */
struct dma_pool *sitd_pool; /* sitd per split iso urb */
+ struct timer_list iaa_watchdog;
struct timer_list watchdog;
unsigned long actions;
unsigned stamp;
}
+static inline void
+iaa_watchdog_start (struct ehci_hcd *ehci)
+{
+ WARN_ON(timer_pending(&ehci->iaa_watchdog));
+ mod_timer (&ehci->iaa_watchdog,
+ jiffies + msecs_to_jiffies(EHCI_IAA_MSECS));
+}
+
+static inline void iaa_watchdog_done (struct ehci_hcd *ehci)
+{
+ del_timer (&ehci->iaa_watchdog);
+}
+
enum ehci_timer_action {
TIMER_IO_WATCHDOG,
- TIMER_IAA_WATCHDOG,
TIMER_ASYNC_SHRINK,
TIMER_ASYNC_OFF,
};
unsigned long t;
switch (action) {
- case TIMER_IAA_WATCHDOG:
- t = EHCI_IAA_JIFFIES;
- break;
case TIMER_IO_WATCHDOG:
t = EHCI_IO_JIFFIES;
break;
// async queue SHRINK often precedes IAA. while it's ready
// to go OFF neither can matter, and afterwards the IO
// watchdog stops unless there's still periodic traffic.
- if (action != TIMER_IAA_WATCHDOG
- && t > ehci->watchdog.expires
+ if (time_before_eq(t, ehci->watchdog.expires)
&& timer_pending (&ehci->watchdog))
return;
mod_timer (&ehci->watchdog, t);