]> err.no Git - linux-2.6/blobdiff - drivers/infiniband/hw/mthca/mthca_qp.c
IB/mthca: Use uninitialized_var() for f0
[linux-2.6] / drivers / infiniband / hw / mthca / mthca_qp.c
index 8fe6fee7a97ae21f036c4866e2236256e794a0e1..0e9ef24f6638d428de406330da42ed5c18d3f82c 100644 (file)
@@ -37,6 +37,7 @@
 
 #include <linux/string.h>
 #include <linux/slab.h>
+#include <linux/sched.h>
 
 #include <asm/io.h>
 
@@ -295,7 +296,7 @@ static int to_mthca_st(int transport)
        }
 }
 
-static void store_attrs(struct mthca_sqp *sqp, struct ib_qp_attr *attr,
+static void store_attrs(struct mthca_sqp *sqp, const struct ib_qp_attr *attr,
                        int attr_mask)
 {
        if (attr_mask & IB_QP_PKEY_INDEX)
@@ -327,7 +328,7 @@ static void init_port(struct mthca_dev *dev, int port)
                mthca_warn(dev, "INIT_IB returned status %02x.\n", status);
 }
 
-static __be32 get_hw_access_flags(struct mthca_qp *qp, struct ib_qp_attr *attr,
+static __be32 get_hw_access_flags(struct mthca_qp *qp, const struct ib_qp_attr *attr,
                                  int attr_mask)
 {
        u8 dest_rd_atomic;
@@ -510,7 +511,7 @@ out:
        return err;
 }
 
-static int mthca_path_set(struct mthca_dev *dev, struct ib_ah_attr *ah,
+static int mthca_path_set(struct mthca_dev *dev, const struct ib_ah_attr *ah,
                          struct mthca_qp_path *path, u8 port)
 {
        path->g_mylmc     = ah->src_path_bits & 0x7f;
@@ -538,12 +539,12 @@ static int mthca_path_set(struct mthca_dev *dev, struct ib_ah_attr *ah,
        return 0;
 }
 
-int mthca_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, int attr_mask,
-                   struct ib_udata *udata)
+static int __mthca_modify_qp(struct ib_qp *ibqp,
+                            const struct ib_qp_attr *attr, int attr_mask,
+                            enum ib_qp_state cur_state, enum ib_qp_state new_state)
 {
        struct mthca_dev *dev = to_mdev(ibqp->device);
        struct mthca_qp *qp = to_mqp(ibqp);
-       enum ib_qp_state cur_state, new_state;
        struct mthca_mailbox *mailbox;
        struct mthca_qp_param *qp_param;
        struct mthca_qp_context *qp_context;
@@ -551,60 +552,6 @@ int mthca_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, int attr_mask,
        u8 status;
        int err = -EINVAL;
 
-       mutex_lock(&qp->mutex);
-
-       if (attr_mask & IB_QP_CUR_STATE) {
-               cur_state = attr->cur_qp_state;
-       } else {
-               spin_lock_irq(&qp->sq.lock);
-               spin_lock(&qp->rq.lock);
-               cur_state = qp->state;
-               spin_unlock(&qp->rq.lock);
-               spin_unlock_irq(&qp->sq.lock);
-       }
-
-       new_state = attr_mask & IB_QP_STATE ? attr->qp_state : cur_state;
-
-       if (!ib_modify_qp_is_ok(cur_state, new_state, ibqp->qp_type, attr_mask)) {
-               mthca_dbg(dev, "Bad QP transition (transport %d) "
-                         "%d->%d with attr 0x%08x\n",
-                         qp->transport, cur_state, new_state,
-                         attr_mask);
-               goto out;
-       }
-
-       if (cur_state == new_state && cur_state == IB_QPS_RESET) {
-               err = 0;
-               goto out;
-       }
-
-       if ((attr_mask & IB_QP_PKEY_INDEX) &&
-            attr->pkey_index >= dev->limits.pkey_table_len) {
-               mthca_dbg(dev, "P_Key index (%u) too large. max is %d\n",
-                         attr->pkey_index, dev->limits.pkey_table_len-1);
-               goto out;
-       }
-
-       if ((attr_mask & IB_QP_PORT) &&
-           (attr->port_num == 0 || attr->port_num > dev->limits.num_ports)) {
-               mthca_dbg(dev, "Port number (%u) is invalid\n", attr->port_num);
-               goto out;
-       }
-
-       if (attr_mask & IB_QP_MAX_QP_RD_ATOMIC &&
-           attr->max_rd_atomic > dev->limits.max_qp_init_rdma) {
-               mthca_dbg(dev, "Max rdma_atomic as initiator %u too large (max is %d)\n",
-                         attr->max_rd_atomic, dev->limits.max_qp_init_rdma);
-               goto out;
-       }
-
-       if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC &&
-           attr->max_dest_rd_atomic > 1 << dev->qp_table.rdb_shift) {
-               mthca_dbg(dev, "Max rdma_atomic as responder %u too large (max %d)\n",
-                         attr->max_dest_rd_atomic, 1 << dev->qp_table.rdb_shift);
-               goto out;
-       }
-
        mailbox = mthca_alloc_mailbox(dev, GFP_KERNEL);
        if (IS_ERR(mailbox)) {
                err = PTR_ERR(mailbox);
@@ -701,6 +648,19 @@ int mthca_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, int attr_mask,
                qp_param->opt_param_mask |= cpu_to_be32(MTHCA_QP_OPTPAR_PRIMARY_ADDR_PATH);
        }
 
+       if (ibqp->qp_type == IB_QPT_RC &&
+           cur_state == IB_QPS_INIT && new_state == IB_QPS_RTR) {
+               u8 sched_queue = ibqp->uobject ? 0x2 : 0x1;
+
+               if (mthca_is_memfree(dev))
+                       qp_context->rlkey_arbel_sched_queue |= sched_queue;
+               else
+                       qp_context->tavor_sched_queue |= cpu_to_be32(sched_queue);
+
+               qp_param->opt_param_mask |=
+                       cpu_to_be32(MTHCA_QP_OPTPAR_SCHED_QUEUE);
+       }
+
        if (attr_mask & IB_QP_TIMEOUT) {
                qp_context->pri_path.ackto = attr->timeout << 3;
                qp_param->opt_param_mask |= cpu_to_be32(MTHCA_QP_OPTPAR_ACK_TIMEOUT);
@@ -878,6 +838,98 @@ int mthca_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, int attr_mask,
 
 out_mailbox:
        mthca_free_mailbox(dev, mailbox);
+out:
+       return err;
+}
+
+static const struct ib_qp_attr dummy_init_attr = { .port_num = 1 };
+static const int dummy_init_attr_mask[] = {
+       [IB_QPT_UD]  = (IB_QP_PKEY_INDEX                |
+                       IB_QP_PORT                      |
+                       IB_QP_QKEY),
+       [IB_QPT_UC]  = (IB_QP_PKEY_INDEX                |
+                       IB_QP_PORT                      |
+                       IB_QP_ACCESS_FLAGS),
+       [IB_QPT_RC]  = (IB_QP_PKEY_INDEX                |
+                       IB_QP_PORT                      |
+                       IB_QP_ACCESS_FLAGS),
+       [IB_QPT_SMI] = (IB_QP_PKEY_INDEX                |
+                       IB_QP_QKEY),
+       [IB_QPT_GSI] = (IB_QP_PKEY_INDEX                |
+                       IB_QP_QKEY),
+};
+
+int mthca_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, int attr_mask,
+                   struct ib_udata *udata)
+{
+       struct mthca_dev *dev = to_mdev(ibqp->device);
+       struct mthca_qp *qp = to_mqp(ibqp);
+       enum ib_qp_state cur_state, new_state;
+       int err = -EINVAL;
+
+       mutex_lock(&qp->mutex);
+       if (attr_mask & IB_QP_CUR_STATE) {
+               cur_state = attr->cur_qp_state;
+       } else {
+               spin_lock_irq(&qp->sq.lock);
+               spin_lock(&qp->rq.lock);
+               cur_state = qp->state;
+               spin_unlock(&qp->rq.lock);
+               spin_unlock_irq(&qp->sq.lock);
+       }
+
+       new_state = attr_mask & IB_QP_STATE ? attr->qp_state : cur_state;
+
+       if (!ib_modify_qp_is_ok(cur_state, new_state, ibqp->qp_type, attr_mask)) {
+               mthca_dbg(dev, "Bad QP transition (transport %d) "
+                         "%d->%d with attr 0x%08x\n",
+                         qp->transport, cur_state, new_state,
+                         attr_mask);
+               goto out;
+       }
+
+       if ((attr_mask & IB_QP_PKEY_INDEX) &&
+            attr->pkey_index >= dev->limits.pkey_table_len) {
+               mthca_dbg(dev, "P_Key index (%u) too large. max is %d\n",
+                         attr->pkey_index, dev->limits.pkey_table_len-1);
+               goto out;
+       }
+
+       if ((attr_mask & IB_QP_PORT) &&
+           (attr->port_num == 0 || attr->port_num > dev->limits.num_ports)) {
+               mthca_dbg(dev, "Port number (%u) is invalid\n", attr->port_num);
+               goto out;
+       }
+
+       if (attr_mask & IB_QP_MAX_QP_RD_ATOMIC &&
+           attr->max_rd_atomic > dev->limits.max_qp_init_rdma) {
+               mthca_dbg(dev, "Max rdma_atomic as initiator %u too large (max is %d)\n",
+                         attr->max_rd_atomic, dev->limits.max_qp_init_rdma);
+               goto out;
+       }
+
+       if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC &&
+           attr->max_dest_rd_atomic > 1 << dev->qp_table.rdb_shift) {
+               mthca_dbg(dev, "Max rdma_atomic as responder %u too large (max %d)\n",
+                         attr->max_dest_rd_atomic, 1 << dev->qp_table.rdb_shift);
+               goto out;
+       }
+
+       if (cur_state == new_state && cur_state == IB_QPS_RESET) {
+               err = 0;
+               goto out;
+       }
+
+       if (cur_state == IB_QPS_RESET && new_state == IB_QPS_ERR) {
+               err = __mthca_modify_qp(ibqp, &dummy_init_attr,
+                                       dummy_init_attr_mask[ibqp->qp_type],
+                                       IB_QPS_RESET, IB_QPS_INIT);
+               if (err)
+                       goto out;
+               cur_state = IB_QPS_INIT;
+       }
+
+       err = __mthca_modify_qp(ibqp, attr, attr_mask, cur_state, new_state);
 
 out:
        mutex_unlock(&qp->mutex);
@@ -1539,7 +1591,13 @@ int mthca_tavor_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
        int i;
        int size;
        int size0 = 0;
-       u32 f0;
+       /*
+        * f0 is only used if nreq != 0, and f0 will be initialized
+        * the first time through the main loop, since size0 == 0 the
+        * first time through.  So nreq cannot become non-zero without
+        * initializing f0, and f0 is in fact never used uninitialized.
+        */
+       u32 uninitialized_var(f0);
        int ind;
        u8 op0 = 0;
 
@@ -1849,6 +1907,7 @@ int mthca_tavor_post_receive(struct ib_qp *ibqp, struct ib_recv_wr *wr,
                                      dev->kar + MTHCA_RECEIVE_DOORBELL,
                                      MTHCA_GET_DOORBELL_LOCK(&dev->doorbell_lock));
 
+                       qp->rq.next_ind = ind;
                        qp->rq.head += MTHCA_TAVOR_MAX_WQES_PER_RECV_DB;
                        size0 = 0;
                }
@@ -1893,7 +1952,13 @@ int mthca_arbel_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
        int i;
        int size;
        int size0 = 0;
-       u32 f0;
+       /*
+        * f0 is only used if nreq != 0, and f0 will be initialized
+        * the first time through the main loop, since size0 == 0 the
+        * first time through.  So nreq cannot become non-zero without
+        * initializing f0, and f0 is in fact never used uninitialized.
+        */
+       u32 uninitialized_var(f0);
        int ind;
        u8 op0 = 0;
 
@@ -2231,10 +2296,10 @@ void mthca_free_err_wqe(struct mthca_dev *dev, struct mthca_qp *qp, int is_send,
        struct mthca_next_seg *next;
 
        /*
-        * For SRQs, all WQEs generate a CQE, so we're always at the
-        * end of the doorbell chain.
+        * For SRQs, all receive WQEs generate a CQE, so we're always
+        * at the end of the doorbell chain.
         */
-       if (qp->ibqp.srq) {
+       if (qp->ibqp.srq && !is_send) {
                *new_wqe = 0;
                return;
        }