]> err.no Git - linux-2.6/blobdiff - drivers/infiniband/hw/ipath/ipath_intr.c
Merge branches 'at91', 'dyntick', 'ep93xx', 'iop', 'ixp', 'misc', 'orion', 'omap...
[linux-2.6] / drivers / infiniband / hw / ipath / ipath_intr.c
index 5608e3268a62287bb89eeae3f6250984a08357db..26900b3b7a4ee1bcef2fe064a1a8148900128fae 100644 (file)
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2006, 2007 QLogic Corporation. All rights reserved.
+ * Copyright (c) 2006, 2007, 2008 QLogic Corporation. All rights reserved.
  * Copyright (c) 2003, 2004, 2005, 2006 PathScale, Inc. All rights reserved.
  *
  * This software is available to you under a choice of one of two
 #include "ipath_verbs.h"
 #include "ipath_common.h"
 
-/*
- * clear (write) a pio buffer, to clear a parity error.   This routine
- * should only be called when in freeze mode, and the buffer should be
- * canceled afterwards.
- */
-static void ipath_clrpiobuf(struct ipath_devdata *dd, u32 pnum)
-{
-       u32 __iomem *pbuf;
-       u32 dwcnt; /* dword count to write */
-       if (pnum < dd->ipath_piobcnt2k) {
-               pbuf = (u32 __iomem *) (dd->ipath_pio2kbase + pnum *
-                       dd->ipath_palign);
-               dwcnt = dd->ipath_piosize2k >> 2;
-       }
-       else {
-               pbuf = (u32 __iomem *) (dd->ipath_pio4kbase +
-                       (pnum - dd->ipath_piobcnt2k) * dd->ipath_4kalign);
-               dwcnt = dd->ipath_piosize4k >> 2;
-       }
-       dev_info(&dd->pcidev->dev,
-               "Rewrite PIO buffer %u, to recover from parity error\n",
-               pnum);
-
-       /* no flush required, since already in freeze */
-       writel(dwcnt + 1, pbuf);
-       while (--dwcnt)
-               writel(0, pbuf++);
-}
 
 /*
  * Called when we might have an error that is specific to a particular
  * PIO buffer, and may need to cancel that buffer, so it can be re-used.
- * If rewrite is true, and bits are set in the sendbufferror registers,
- * we'll write to the buffer, for error recovery on parity errors.
  */
-static void ipath_disarm_senderrbufs(struct ipath_devdata *dd, int rewrite)
+void ipath_disarm_senderrbufs(struct ipath_devdata *dd)
 {
        u32 piobcnt;
        unsigned long sbuf[4];
@@ -87,12 +57,14 @@ static void ipath_disarm_senderrbufs(struct ipath_devdata *dd, int rewrite)
                dd, dd->ipath_kregs->kr_sendbuffererror);
        sbuf[1] = ipath_read_kreg64(
                dd, dd->ipath_kregs->kr_sendbuffererror + 1);
-       if (piobcnt > 128) {
+       if (piobcnt > 128)
                sbuf[2] = ipath_read_kreg64(
                        dd, dd->ipath_kregs->kr_sendbuffererror + 2);
+       if (piobcnt > 192)
                sbuf[3] = ipath_read_kreg64(
                        dd, dd->ipath_kregs->kr_sendbuffererror + 3);
-       }
+       else
+               sbuf[3] = 0;
 
        if (sbuf[0] || sbuf[1] || (piobcnt > 128 && (sbuf[2] || sbuf[3]))) {
                int i;
@@ -107,11 +79,8 @@ static void ipath_disarm_senderrbufs(struct ipath_devdata *dd, int rewrite)
                }
 
                for (i = 0; i < piobcnt; i++)
-                       if (test_bit(i, sbuf)) {
-                               if (rewrite)
-                                       ipath_clrpiobuf(dd, i);
+                       if (test_bit(i, sbuf))
                                ipath_disarm_piobufs(dd, i, 1);
-                       }
                /* ignore armlaunch errs for a bit */
                dd->ipath_lastcancel = jiffies+3;
        }
@@ -162,7 +131,7 @@ static u64 handle_e_sum_errs(struct ipath_devdata *dd, ipath_err_t errs)
 {
        u64 ignore_this_time = 0;
 
-       ipath_disarm_senderrbufs(dd, 0);
+       ipath_disarm_senderrbufs(dd);
        if ((errs & E_SUM_LINK_PKTERRS) &&
            !(dd->ipath_flags & IPATH_LINKACTIVE)) {
                /*
@@ -299,6 +268,18 @@ static void handle_e_ibstatuschanged(struct ipath_devdata *dd,
        lastlstate = ipath_ib_linkstate(dd, dd->ipath_lastibcstat);
        ltstate = ipath_ib_linktrstate(dd, ibcs); /* linktrainingtate */
 
+       /*
+        * Since going into a recovery state causes the link state to go
+        * down and since recovery is transitory, it is better if we "miss"
+        * ever seeing the link training state go into recovery (i.e.,
+        * ignore this transition for link state special handling purposes)
+        * without even updating ipath_lastibcstat.
+        */
+       if ((ltstate == INFINIPATH_IBCS_LT_STATE_RECOVERRETRAIN) ||
+           (ltstate == INFINIPATH_IBCS_LT_STATE_RECOVERWAITRMT) ||
+           (ltstate == INFINIPATH_IBCS_LT_STATE_RECOVERIDLE))
+               goto done;
+
        /*
         * if linkstate transitions into INIT from any of the various down
         * states, or if it transitions from any of the up (INIT or better)
@@ -316,7 +297,7 @@ static void handle_e_ibstatuschanged(struct ipath_devdata *dd,
                }
        } else if ((lastlstate >= INFINIPATH_IBCS_L_STATE_INIT ||
                (dd->ipath_flags & IPATH_IB_FORCE_NOTIFY)) &&
-               ltstate <= INFINIPATH_IBCS_LT_STATE_CFGDEBOUNCE &&
+               ltstate <= INFINIPATH_IBCS_LT_STATE_CFGWAITRMT &&
                ltstate != INFINIPATH_IBCS_LT_STATE_LINKUP) {
                int handled;
                handled = dd->ipath_f_ib_updown(dd, 0, ibcs);
@@ -353,7 +334,8 @@ static void handle_e_ibstatuschanged(struct ipath_devdata *dd,
                 */
                if (lastlts == INFINIPATH_IBCS_LT_STATE_POLLACTIVE ||
                    lastlts == INFINIPATH_IBCS_LT_STATE_POLLQUIET) {
-                       if (++dd->ipath_ibpollcnt == 40) {
+                       if (!(dd->ipath_flags & IPATH_IB_AUTONEG_INPROG) &&
+                            (++dd->ipath_ibpollcnt == 40)) {
                                dd->ipath_flags |= IPATH_NOCABLE;
                                *dd->ipath_statusp |=
                                        IPATH_STATUS_IB_NOCABLE;
@@ -418,6 +400,8 @@ static void handle_e_ibstatuschanged(struct ipath_devdata *dd,
                        dd->ipath_flags &= ~(IPATH_LINKUNK | IPATH_LINKINIT
                                | IPATH_LINKDOWN | IPATH_LINKARMED |
                                IPATH_NOCABLE);
+                       if (dd->ipath_flags & IPATH_HAS_SEND_DMA)
+                               ipath_restart_sdma(dd);
                        signal_ib_event(dd, IB_EVENT_PORT_ACTIVE);
                        /* LED active not handled in chip _f_updown */
                        dd->ipath_f_setextled(dd, lstate, ltstate);
@@ -460,10 +444,12 @@ static void handle_e_ibstatuschanged(struct ipath_devdata *dd,
 
 skip_ibchange:
        dd->ipath_lastibcstat = ibcs;
+done:
+       return;
 }
 
 static void handle_supp_msgs(struct ipath_devdata *dd,
-                            unsigned supp_msgs, char *msg, int msgsz)
+                            unsigned supp_msgs, char *msg, u32 msgsz)
 {
        /*
         * Print the message unless it's ibc status change only, which
@@ -471,12 +457,19 @@ static void handle_supp_msgs(struct ipath_devdata *dd,
         */
        if (dd->ipath_lasterror & ~INFINIPATH_E_IBSTATUSCHANGED) {
                int iserr;
-               iserr = ipath_decode_err(msg, msgsz,
+               ipath_err_t mask;
+               iserr = ipath_decode_err(dd, msg, msgsz,
                                         dd->ipath_lasterror &
                                         ~INFINIPATH_E_IBSTATUSCHANGED);
-               if (dd->ipath_lasterror &
-                       ~(INFINIPATH_E_RRCVEGRFULL |
-                       INFINIPATH_E_RRCVHDRFULL | INFINIPATH_E_PKTERRS))
+
+               mask = INFINIPATH_E_RRCVEGRFULL | INFINIPATH_E_RRCVHDRFULL |
+                       INFINIPATH_E_PKTERRS | INFINIPATH_E_SDMADISABLED;
+
+               /* if we're in debug, then don't mask SDMADISABLED msgs */
+               if (ipath_debug & __IPATH_DBG)
+                       mask &= ~INFINIPATH_E_SDMADISABLED;
+
+               if (dd->ipath_lasterror & ~mask)
                        ipath_dev_err(dd, "Suppressed %u messages for "
                                      "fast-repeating errors (%s) (%llx)\n",
                                      supp_msgs, msg,
@@ -503,7 +496,7 @@ static void handle_supp_msgs(struct ipath_devdata *dd,
 
 static unsigned handle_frequent_errors(struct ipath_devdata *dd,
                                       ipath_err_t errs, char *msg,
-                                      int msgsz, int *noprint)
+                                      u32 msgsz, int *noprint)
 {
        unsigned long nc;
        static unsigned long nextmsg_time;
@@ -533,19 +526,125 @@ static unsigned handle_frequent_errors(struct ipath_devdata *dd,
        return supp_msgs;
 }
 
+static void handle_sdma_errors(struct ipath_devdata *dd, ipath_err_t errs)
+{
+       unsigned long flags;
+       int expected;
+
+       if (ipath_debug & __IPATH_DBG) {
+               char msg[128];
+               ipath_decode_err(dd, msg, sizeof msg, errs &
+                       INFINIPATH_E_SDMAERRS);
+               ipath_dbg("errors %lx (%s)\n", (unsigned long)errs, msg);
+       }
+       if (ipath_debug & __IPATH_VERBDBG) {
+               unsigned long tl, hd, status, lengen;
+               tl = ipath_read_kreg64(dd, dd->ipath_kregs->kr_senddmatail);
+               hd = ipath_read_kreg64(dd, dd->ipath_kregs->kr_senddmahead);
+               status = ipath_read_kreg64(dd
+                       , dd->ipath_kregs->kr_senddmastatus);
+               lengen = ipath_read_kreg64(dd,
+                       dd->ipath_kregs->kr_senddmalengen);
+               ipath_cdbg(VERBOSE, "sdma tl 0x%lx hd 0x%lx status 0x%lx "
+                       "lengen 0x%lx\n", tl, hd, status, lengen);
+       }
+
+       spin_lock_irqsave(&dd->ipath_sdma_lock, flags);
+       __set_bit(IPATH_SDMA_DISABLED, &dd->ipath_sdma_status);
+       expected = test_bit(IPATH_SDMA_ABORTING, &dd->ipath_sdma_status);
+       spin_unlock_irqrestore(&dd->ipath_sdma_lock, flags);
+       if (!expected)
+               ipath_cancel_sends(dd, 1);
+}
+
+static void handle_sdma_intr(struct ipath_devdata *dd, u64 istat)
+{
+       unsigned long flags;
+       int expected;
+
+       if ((istat & INFINIPATH_I_SDMAINT) &&
+           !test_bit(IPATH_SDMA_SHUTDOWN, &dd->ipath_sdma_status))
+               ipath_sdma_intr(dd);
+
+       if (istat & INFINIPATH_I_SDMADISABLED) {
+               expected = test_bit(IPATH_SDMA_ABORTING,
+                       &dd->ipath_sdma_status);
+               ipath_dbg("%s SDmaDisabled intr\n",
+                       expected ? "expected" : "unexpected");
+               spin_lock_irqsave(&dd->ipath_sdma_lock, flags);
+               __set_bit(IPATH_SDMA_DISABLED, &dd->ipath_sdma_status);
+               spin_unlock_irqrestore(&dd->ipath_sdma_lock, flags);
+               if (!expected)
+                       ipath_cancel_sends(dd, 1);
+               if (!test_bit(IPATH_SDMA_SHUTDOWN, &dd->ipath_sdma_status))
+                       tasklet_hi_schedule(&dd->ipath_sdma_abort_task);
+       }
+}
+
+static int handle_hdrq_full(struct ipath_devdata *dd)
+{
+       int chkerrpkts = 0;
+       u32 hd, tl;
+       u32 i;
+
+       ipath_stats.sps_hdrqfull++;
+       for (i = 0; i < dd->ipath_cfgports; i++) {
+               struct ipath_portdata *pd = dd->ipath_pd[i];
+
+               if (i == 0) {
+                       /*
+                        * For kernel receive queues, we just want to know
+                        * if there are packets in the queue that we can
+                        * process.
+                        */
+                       if (pd->port_head != ipath_get_hdrqtail(pd))
+                               chkerrpkts |= 1 << i;
+                       continue;
+               }
+
+               /* Skip if user context is not open */
+               if (!pd || !pd->port_cnt)
+                       continue;
+
+               /* Don't report the same point multiple times. */
+               if (dd->ipath_flags & IPATH_NODMA_RTAIL)
+                       tl = ipath_read_ureg32(dd, ur_rcvhdrtail, i);
+               else
+                       tl = ipath_get_rcvhdrtail(pd);
+               if (tl == pd->port_lastrcvhdrqtail)
+                       continue;
+
+               hd = ipath_read_ureg32(dd, ur_rcvhdrhead, i);
+               if (hd == (tl + 1) || (!hd && tl == dd->ipath_hdrqlast)) {
+                       pd->port_lastrcvhdrqtail = tl;
+                       pd->port_hdrqfull++;
+                       /* flush hdrqfull so that poll() sees it */
+                       wmb();
+                       wake_up_interruptible(&pd->port_wait);
+               }
+       }
+
+       return chkerrpkts;
+}
+
 static int handle_errors(struct ipath_devdata *dd, ipath_err_t errs)
 {
        char msg[128];
        u64 ignore_this_time = 0;
-       int i, iserr = 0;
+       u64 iserr = 0;
        int chkerrpkts = 0, noprint = 0;
        unsigned supp_msgs;
        int log_idx;
 
-       supp_msgs = handle_frequent_errors(dd, errs, msg, sizeof msg, &noprint);
+       /*
+        * don't report errors that are masked, either at init
+        * (not set in ipath_errormask), or temporarily (set in
+        * ipath_maskederrs)
+        */
+       errs &= dd->ipath_errormask & ~dd->ipath_maskederrs;
 
-       /* don't report errors that are masked */
-       errs &= ~dd->ipath_maskederrs;
+       supp_msgs = handle_frequent_errors(dd, errs, msg, (u32)sizeof msg,
+               &noprint);
 
        /* do these first, they are most important */
        if (errs & INFINIPATH_E_HARDWARE) {
@@ -560,6 +659,9 @@ static int handle_errors(struct ipath_devdata *dd, ipath_err_t errs)
                }
        }
 
+       if (errs & INFINIPATH_E_SDMAERRS)
+               handle_sdma_errors(dd, errs);
+
        if (!noprint && (errs & ~dd->ipath_e_bitsextant))
                ipath_dev_err(dd, "error interrupt with unknown errors "
                              "%llx set\n", (unsigned long long)
@@ -590,18 +692,19 @@ static int handle_errors(struct ipath_devdata *dd, ipath_err_t errs)
                 * ones on this particular interrupt, which also isn't great
                 */
                dd->ipath_maskederrs |= dd->ipath_lasterror | errs;
+
                dd->ipath_errormask &= ~dd->ipath_maskederrs;
                ipath_write_kreg(dd, dd->ipath_kregs->kr_errormask,
-                       dd->ipath_errormask);
-               s_iserr = ipath_decode_err(msg, sizeof msg,
-                       dd->ipath_maskederrs);
+                                dd->ipath_errormask);
+               s_iserr = ipath_decode_err(dd, msg, sizeof msg,
+                                          dd->ipath_maskederrs);
 
                if (dd->ipath_maskederrs &
-                       ~(INFINIPATH_E_RRCVEGRFULL |
-                       INFINIPATH_E_RRCVHDRFULL | INFINIPATH_E_PKTERRS))
+                   ~(INFINIPATH_E_RRCVEGRFULL |
+                     INFINIPATH_E_RRCVHDRFULL | INFINIPATH_E_PKTERRS))
                        ipath_dev_err(dd, "Temporarily disabling "
                            "error(s) %llx reporting; too frequent (%s)\n",
-                               (unsigned long long)dd->ipath_maskederrs,
+                               (unsigned long long) dd->ipath_maskederrs,
                                msg);
                else {
                        /*
@@ -643,26 +746,43 @@ static int handle_errors(struct ipath_devdata *dd, ipath_err_t errs)
                          INFINIPATH_E_IBSTATUSCHANGED);
        }
 
-       /* likely due to cancel, so suppress */
+       if (errs & INFINIPATH_E_SENDSPECIALTRIGGER) {
+               dd->ipath_spectriggerhit++;
+               ipath_dbg("%lu special trigger hits\n",
+                       dd->ipath_spectriggerhit);
+       }
+
+       /* likely due to cancel; so suppress message unless verbose */
        if ((errs & (INFINIPATH_E_SPKTLEN | INFINIPATH_E_SPIOARMLAUNCH)) &&
                dd->ipath_lastcancel > jiffies) {
-               ipath_dbg("Suppressed armlaunch/spktlen after error send cancel\n");
+               /* armlaunch takes precedence; it often causes both. */
+               ipath_cdbg(VERBOSE,
+                       "Suppressed %s error (%llx) after sendbuf cancel\n",
+                       (errs &  INFINIPATH_E_SPIOARMLAUNCH) ?
+                       "armlaunch" : "sendpktlen", (unsigned long long)errs);
                errs &= ~(INFINIPATH_E_SPIOARMLAUNCH | INFINIPATH_E_SPKTLEN);
        }
 
        if (!errs)
                return 0;
 
-       if (!noprint)
+       if (!noprint) {
+               ipath_err_t mask;
                /*
-                * the ones we mask off are handled specially below or above
+                * The ones we mask off are handled specially below
+                * or above.  Also mask SDMADISABLED by default as it
+                * is too chatty.
                 */
-               ipath_decode_err(msg, sizeof msg,
-                                errs & ~(INFINIPATH_E_IBSTATUSCHANGED |
-                                         INFINIPATH_E_RRCVEGRFULL |
-                                         INFINIPATH_E_RRCVHDRFULL |
-                                         INFINIPATH_E_HARDWARE));
-       else
+               mask = INFINIPATH_E_IBSTATUSCHANGED |
+                       INFINIPATH_E_RRCVEGRFULL | INFINIPATH_E_RRCVHDRFULL |
+                       INFINIPATH_E_HARDWARE | INFINIPATH_E_SDMADISABLED;
+
+               /* if we're in debug, then don't mask SDMADISABLED msgs */
+               if (ipath_debug & __IPATH_DBG)
+                       mask &= ~INFINIPATH_E_SDMADISABLED;
+
+               ipath_decode_err(dd, msg, sizeof msg, errs & ~mask);
+       } else
                /* so we don't need if (!noprint) at strlcat's below */
                *msg = 0;
 
@@ -687,40 +807,8 @@ static int handle_errors(struct ipath_devdata *dd, ipath_err_t errs)
         * fast_stats, no more than every 5 seconds, user ports get printed
         * on close
         */
-       if (errs & INFINIPATH_E_RRCVHDRFULL) {
-               u32 hd, tl;
-               ipath_stats.sps_hdrqfull++;
-               for (i = 0; i < dd->ipath_cfgports; i++) {
-                       struct ipath_portdata *pd = dd->ipath_pd[i];
-                       if (i == 0) {
-                               hd = pd->port_head;
-                               tl = (u32) le64_to_cpu(
-                                       *dd->ipath_hdrqtailptr);
-                       } else if (pd && pd->port_cnt &&
-                                  pd->port_rcvhdrtail_kvaddr) {
-                               /*
-                                * don't report same point multiple times,
-                                * except kernel
-                                */
-                               tl = *(u64 *) pd->port_rcvhdrtail_kvaddr;
-                               if (tl == pd->port_lastrcvhdrqtail)
-                                       continue;
-                               hd = ipath_read_ureg32(dd, ur_rcvhdrhead,
-                                                      i);
-                       } else
-                               continue;
-                       if (hd == (tl + 1) ||
-                           (!hd && tl == dd->ipath_hdrqlast)) {
-                               if (i == 0)
-                                       chkerrpkts = 1;
-                               pd->port_lastrcvhdrqtail = tl;
-                               pd->port_hdrqfull++;
-                               /* flush hdrqfull so that poll() sees it */
-                               wmb();
-                               wake_up_interruptible(&pd->port_wait);
-                       }
-               }
-       }
+       if (errs & INFINIPATH_E_RRCVHDRFULL)
+               chkerrpkts |= handle_hdrq_full(dd);
        if (errs & INFINIPATH_E_RRCVEGRFULL) {
                struct ipath_portdata *pd = dd->ipath_pd[0];
 
@@ -731,9 +819,8 @@ static int handle_errors(struct ipath_devdata *dd, ipath_err_t errs)
                 * vs user)
                 */
                ipath_stats.sps_etidfull++;
-               if (pd->port_head !=
-                   (u32) le64_to_cpu(*dd->ipath_hdrqtailptr))
-                       chkerrpkts = 1;
+               if (pd->port_head != ipath_get_hdrqtail(pd))
+                       chkerrpkts |= 1;
        }
 
        /*
@@ -772,9 +859,6 @@ static int handle_errors(struct ipath_devdata *dd, ipath_err_t errs)
        if (!noprint && *msg) {
                if (iserr)
                        ipath_dev_err(dd, "%s error\n", msg);
-               else
-                       dev_info(&dd->pcidev->dev, "%s packet problems\n",
-                               msg);
        }
        if (dd->ipath_state_wanted & dd->ipath_flags) {
                ipath_cdbg(VERBOSE, "driver wanted state %x, iflags now %x, "
@@ -786,15 +870,14 @@ static int handle_errors(struct ipath_devdata *dd, ipath_err_t errs)
        return chkerrpkts;
 }
 
-
 /*
  * try to cleanup as much as possible for anything that might have gone
  * wrong while in freeze mode, such as pio buffers being written by user
  * processes (causing armlaunch), send errors due to going into freeze mode,
  * etc., and try to avoid causing extra interrupts while doing so.
  * Forcibly update the in-memory pioavail register copies after cleanup
- * because the chip won't do it for anything changing while in freeze mode
- * (we don't want to wait for the next pio buffer state change).
+ * because the chip won't do it while in freeze mode (the register values
+ * themselves are kept correct).
  * Make sure that we don't lose any important interrupts by using the chip
  * feature that says that writing 0 to a bit in *clear that is set in
  * *status will cause an interrupt to be generated again (if allowed by
@@ -802,48 +885,21 @@ static int handle_errors(struct ipath_devdata *dd, ipath_err_t errs)
  */
 void ipath_clear_freeze(struct ipath_devdata *dd)
 {
-       int i, im;
-       u64 val;
-       unsigned long flags;
-
        /* disable error interrupts, to avoid confusion */
        ipath_write_kreg(dd, dd->ipath_kregs->kr_errormask, 0ULL);
 
        /* also disable interrupts; errormask is sometimes overwriten */
        ipath_write_kreg(dd, dd->ipath_kregs->kr_intmask, 0ULL);
 
-       /*
-        * clear all sends, because they have may been
-        * completed by usercode while in freeze mode, and
-        * therefore would not be sent, and eventually
-        * might cause the process to run out of bufs
-        */
-       ipath_cancel_sends(dd, 0);
+       ipath_cancel_sends(dd, 1);
+
+       /* clear the freeze, and be sure chip saw it */
        ipath_write_kreg(dd, dd->ipath_kregs->kr_control,
                         dd->ipath_control);
-
-       /* ensure pio avail updates continue */
-       spin_lock_irqsave(&dd->ipath_sendctrl_lock, flags);
-       ipath_write_kreg(dd, dd->ipath_kregs->kr_sendctrl,
-                dd->ipath_sendctrl & ~INFINIPATH_S_PIOBUFAVAILUPD);
-       ipath_read_kreg64(dd, dd->ipath_kregs->kr_scratch);
-       ipath_write_kreg(dd, dd->ipath_kregs->kr_sendctrl,
-                        dd->ipath_sendctrl);
        ipath_read_kreg64(dd, dd->ipath_kregs->kr_scratch);
-       spin_unlock_irqrestore(&dd->ipath_sendctrl_lock, flags);
 
-       /*
-        * We just enabled pioavailupdate, so dma copy is almost certainly
-        * not yet right, so read the registers directly.  Similar to init
-        */
-       for (i = 0; i < dd->ipath_pioavregs; i++) {
-               /* deal with 6110 chip bug */
-               im = (i > 3 && (dd->ipath_flags & IPATH_SWAP_PIOBUFS)) ?
-                       i ^ 1 : i;
-               val = ipath_read_kreg64(dd, (0x1000 / sizeof(u64)) + im);
-               dd->ipath_pioavailregs_dma[i] = cpu_to_le64(val);
-               dd->ipath_pioavailshadow[i] = val;
-       }
+       /* force in-memory update now we are out of freeze */
+       ipath_force_pio_avail_update(dd);
 
        /*
         * force new interrupt if any hwerr, error or interrupt bits are
@@ -958,7 +1014,7 @@ set:
  * process was waiting for a packet to arrive, and didn't want
  * to poll
  */
-static void handle_urcv(struct ipath_devdata *dd, u32 istat)
+static void handle_urcv(struct ipath_devdata *dd, u64 istat)
 {
        u64 portr;
        int i;
@@ -974,12 +1030,13 @@ static void handle_urcv(struct ipath_devdata *dd, u32 istat)
         * and ipath_poll_next()...
         */
        rmb();
-       portr = ((istat >> INFINIPATH_I_RCVAVAIL_SHIFT) &
-                dd->ipath_i_rcvavail_mask)
-               | ((istat >> INFINIPATH_I_RCVURG_SHIFT) &
-                  dd->ipath_i_rcvurg_mask);
+       portr = ((istat >> dd->ipath_i_rcvavail_shift) &
+                dd->ipath_i_rcvavail_mask) |
+               ((istat >> dd->ipath_i_rcvurg_shift) &
+                dd->ipath_i_rcvurg_mask);
        for (i = 1; i < dd->ipath_cfgports; i++) {
                struct ipath_portdata *pd = dd->ipath_pd[i];
+
                if (portr & (1 << i) && pd && pd->port_cnt) {
                        if (test_and_clear_bit(IPATH_PORT_WAITING_RCV,
                                               &pd->port_flag)) {
@@ -996,7 +1053,7 @@ static void handle_urcv(struct ipath_devdata *dd, u32 istat)
        }
        if (rcvdint) {
                /* only want to take one interrupt, so turn off the rcv
-                * interrupt for all the ports that we did the wakeup on
+                * interrupt for all the ports that we set the rcv_waiting
                 * (but never for kernel port)
                 */
                ipath_write_kreg(dd, dd->ipath_kregs->kr_rcvctrl,
@@ -1007,12 +1064,11 @@ static void handle_urcv(struct ipath_devdata *dd, u32 istat)
 irqreturn_t ipath_intr(int irq, void *data)
 {
        struct ipath_devdata *dd = data;
-       u32 istat, chk0rcv = 0;
+       u64 istat, chk0rcv = 0;
        ipath_err_t estat = 0;
        irqreturn_t ret;
        static unsigned unexpected = 0;
-       static const u32 port0rbits = (1U<<INFINIPATH_I_RCVAVAIL_SHIFT) |
-                (1U<<INFINIPATH_I_RCVURG_SHIFT);
+       u64 kportrbits;
 
        ipath_stats.sps_ints++;
 
@@ -1061,17 +1117,17 @@ irqreturn_t ipath_intr(int irq, void *data)
 
        if (unlikely(istat & ~dd->ipath_i_bitsextant))
                ipath_dev_err(dd,
-                             "interrupt with unknown interrupts %x set\n",
-                             istat & (u32) ~ dd->ipath_i_bitsextant);
-       else
-               ipath_cdbg(VERBOSE, "intr stat=0x%x\n", istat);
+                             "interrupt with unknown interrupts %Lx set\n",
+                             istat & ~dd->ipath_i_bitsextant);
+       else if (istat & ~INFINIPATH_I_ERROR) /* errors do own printing */
+               ipath_cdbg(VERBOSE, "intr stat=0x%Lx\n", istat);
 
-       if (unlikely(istat & INFINIPATH_I_ERROR)) {
+       if (istat & INFINIPATH_I_ERROR) {
                ipath_stats.sps_errints++;
                estat = ipath_read_kreg64(dd,
                                          dd->ipath_kregs->kr_errorstatus);
                if (!estat)
-                       dev_info(&dd->pcidev->dev, "error interrupt (%x), "
+                       dev_info(&dd->pcidev->dev, "error interrupt (%Lx), "
                                 "but no error bits set!\n", istat);
                else if (estat == -1LL)
                        /*
@@ -1081,9 +1137,7 @@ irqreturn_t ipath_intr(int irq, void *data)
                        ipath_dev_err(dd, "Read of error status failed "
                                      "(all bits set); ignoring\n");
                else
-                       if (handle_errors(dd, estat))
-                               /* force calling ipath_kreceive() */
-                               chk0rcv = 1;
+                       chk0rcv |= handle_errors(dd, estat);
        }
 
        if (istat & INFINIPATH_I_GPIO) {
@@ -1101,8 +1155,7 @@ irqreturn_t ipath_intr(int irq, void *data)
 
                gpiostatus = ipath_read_kreg32(
                        dd, dd->ipath_kregs->kr_gpio_status);
-               /* First the error-counter case.
-                */
+               /* First the error-counter case. */
                if ((gpiostatus & IPATH_GPIO_ERRINTR_MASK) &&
                    (dd->ipath_flags & IPATH_GPIO_ERRINTRS)) {
                        /* want to clear the bits we see asserted. */
@@ -1164,7 +1217,6 @@ irqreturn_t ipath_intr(int irq, void *data)
                                        (u64) to_clear);
                }
        }
-       chk0rcv |= istat & port0rbits;
 
        /*
         * Clear the interrupt bits we found set, unless they are receive
@@ -1177,22 +1229,25 @@ irqreturn_t ipath_intr(int irq, void *data)
        ipath_write_kreg(dd, dd->ipath_kregs->kr_intclear, istat);
 
        /*
-        * handle port0 receive  before checking for pio buffers available,
-        * since receives can overflow; piobuf waiters can afford a few
-        * extra cycles, since they were waiting anyway, and user's waiting
-        * for receive are at the bottom.
+        * Handle kernel receive queues before checking for pio buffers
+        * available since receives can overflow; piobuf waiters can afford
+        * a few extra cycles, since they were waiting anyway, and user's
+        * waiting for receive are at the bottom.
         */
-       if (chk0rcv) {
+       kportrbits = (1ULL << dd->ipath_i_rcvavail_shift) |
+               (1ULL << dd->ipath_i_rcvurg_shift);
+       if (chk0rcv || (istat & kportrbits)) {
+               istat &= ~kportrbits;
                ipath_kreceive(dd->ipath_pd[0]);
-               istat &= ~port0rbits;
        }
 
-       if (istat & ((dd->ipath_i_rcvavail_mask <<
-                     INFINIPATH_I_RCVAVAIL_SHIFT)
-                    | (dd->ipath_i_rcvurg_mask <<
-                       INFINIPATH_I_RCVURG_SHIFT)))
+       if (istat & ((dd->ipath_i_rcvavail_mask << dd->ipath_i_rcvavail_shift) |
+                    (dd->ipath_i_rcvurg_mask << dd->ipath_i_rcvurg_shift)))
                handle_urcv(dd, istat);
 
+       if (istat & (INFINIPATH_I_SDMAINT | INFINIPATH_I_SDMADISABLED))
+               handle_sdma_intr(dd, istat);
+
        if (istat & INFINIPATH_I_SPIOBUFAVAIL) {
                unsigned long flags;
 
@@ -1203,6 +1258,7 @@ irqreturn_t ipath_intr(int irq, void *data)
                ipath_read_kreg64(dd, dd->ipath_kregs->kr_scratch);
                spin_unlock_irqrestore(&dd->ipath_sendctrl_lock, flags);
 
+               /* always process; sdma verbs uses PIO for acks and VL15  */
                handle_layer_pioavail(dd);
        }