]> err.no Git - linux-2.6/blobdiff - drivers/infiniband/hw/mlx4/qp.c
IB/mlx4: Fix warning in rounding up queue sizes
[linux-2.6] / drivers / infiniband / hw / mlx4 / qp.c
index a824bc5f79fdf5ee60ea4b580cef06819819ab28..4c15fa3426b80572f6320598cd7a286e6aabb482 100644 (file)
@@ -189,18 +189,28 @@ static int send_wqe_overhead(enum ib_qp_type type)
 }
 
 static int set_rq_size(struct mlx4_ib_dev *dev, struct ib_qp_cap *cap,
-                      struct mlx4_ib_qp *qp)
+                      int is_user, int has_srq, struct mlx4_ib_qp *qp)
 {
        /* Sanity check RQ size before proceeding */
        if (cap->max_recv_wr  > dev->dev->caps.max_wqes  ||
            cap->max_recv_sge > dev->dev->caps.max_rq_sg)
                return -EINVAL;
 
-       qp->rq.max = cap->max_recv_wr ? roundup_pow_of_two(cap->max_recv_wr) : 0;
+       if (has_srq) {
+               /* QPs attached to an SRQ should have no RQ */
+               if (cap->max_recv_wr)
+                       return -EINVAL;
 
-       qp->rq.wqe_shift = ilog2(roundup_pow_of_two(cap->max_recv_sge *
-                                                   sizeof (struct mlx4_wqe_data_seg)));
-       qp->rq.max_gs    = (1 << qp->rq.wqe_shift) / sizeof (struct mlx4_wqe_data_seg);
+               qp->rq.max = qp->rq.max_gs = 0;
+       } else {
+               /* HW requires >= 1 RQ entry with >= 1 gather entry */
+               if (is_user && (!cap->max_recv_wr || !cap->max_recv_sge))
+                       return -EINVAL;
+
+               qp->rq.max       = roundup_pow_of_two(max(1U, cap->max_recv_wr));
+               qp->rq.max_gs    = roundup_pow_of_two(max(1U, cap->max_recv_sge));
+               qp->rq.wqe_shift = ilog2(qp->rq.max_gs * sizeof (struct mlx4_wqe_data_seg));
+       }
 
        cap->max_recv_wr  = qp->rq.max;
        cap->max_recv_sge = qp->rq.max_gs;
@@ -270,9 +280,7 @@ static int create_qp_common(struct mlx4_ib_dev *dev, struct ib_pd *pd,
                            struct ib_qp_init_attr *init_attr,
                            struct ib_udata *udata, int sqpn, struct mlx4_ib_qp *qp)
 {
-       struct mlx4_wqe_ctrl_seg *ctrl;
        int err;
-       int i;
 
        mutex_init(&qp->mutex);
        spin_lock_init(&qp->sq.lock);
@@ -287,7 +295,7 @@ static int create_qp_common(struct mlx4_ib_dev *dev, struct ib_pd *pd,
        qp->sq.head         = 0;
        qp->sq.tail         = 0;
 
-       err = set_rq_size(dev, &init_attr->cap, qp);
+       err = set_rq_size(dev, &init_attr->cap, !!pd->uobject, !!init_attr->srq, qp);
        if (err)
                goto err;
 
@@ -319,20 +327,24 @@ static int create_qp_common(struct mlx4_ib_dev *dev, struct ib_pd *pd,
                if (err)
                        goto err_mtt;
 
-               err = mlx4_ib_db_map_user(to_mucontext(pd->uobject->context),
-                                         ucmd.db_addr, &qp->db);
-               if (err)
-                       goto err_mtt;
+               if (!init_attr->srq) {
+                       err = mlx4_ib_db_map_user(to_mucontext(pd->uobject->context),
+                                                 ucmd.db_addr, &qp->db);
+                       if (err)
+                               goto err_mtt;
+               }
        } else {
                err = set_kernel_sq_size(dev, &init_attr->cap, init_attr->qp_type, qp);
                if (err)
                        goto err;
 
-               err = mlx4_ib_db_alloc(dev, &qp->db, 0);
-               if (err)
-                       goto err;
+               if (!init_attr->srq) {
+                       err = mlx4_ib_db_alloc(dev, &qp->db, 0);
+                       if (err)
+                               goto err;
 
-               *qp->db.db = 0;
+                       *qp->db.db = 0;
+               }
 
                if (mlx4_buf_alloc(dev->dev, qp->buf_size, PAGE_SIZE * 2, &qp->buf)) {
                        err = -ENOMEM;
@@ -348,11 +360,6 @@ static int create_qp_common(struct mlx4_ib_dev *dev, struct ib_pd *pd,
                if (err)
                        goto err_mtt;
 
-               for (i = 0; i < qp->sq.max; ++i) {
-                       ctrl = get_send_wqe(qp, i);
-                       ctrl->owner_opcode = cpu_to_be32(1 << 31);
-               }
-
                qp->sq.wrid  = kmalloc(qp->sq.max * sizeof (u64), GFP_KERNEL);
                qp->rq.wrid  = kmalloc(qp->rq.max * sizeof (u64), GFP_KERNEL);
 
@@ -386,7 +393,7 @@ static int create_qp_common(struct mlx4_ib_dev *dev, struct ib_pd *pd,
        return 0;
 
 err_wrid:
-       if (pd->uobject)
+       if (pd->uobject && !init_attr->srq)
                mlx4_ib_db_unmap_user(to_mucontext(pd->uobject->context), &qp->db);
        else {
                kfree(qp->sq.wrid);
@@ -403,7 +410,7 @@ err_buf:
                mlx4_buf_free(dev->dev, qp->buf_size, &qp->buf);
 
 err_db:
-       if (!pd->uobject)
+       if (!pd->uobject && !init_attr->srq)
                mlx4_ib_db_free(dev, &qp->db);
 
 err:
@@ -481,14 +488,16 @@ static void destroy_qp_common(struct mlx4_ib_dev *dev, struct mlx4_ib_qp *qp,
        mlx4_mtt_cleanup(dev->dev, &qp->mtt);
 
        if (is_user) {
-               mlx4_ib_db_unmap_user(to_mucontext(qp->ibqp.uobject->context),
-                                     &qp->db);
+               if (!qp->ibqp.srq)
+                       mlx4_ib_db_unmap_user(to_mucontext(qp->ibqp.uobject->context),
+                                             &qp->db);
                ib_umem_release(qp->umem);
        } else {
                kfree(qp->sq.wrid);
                kfree(qp->rq.wrid);
                mlx4_buf_free(dev->dev, qp->buf_size, &qp->buf);
-               mlx4_ib_db_free(dev, &qp->db);
+               if (!qp->ibqp.srq)
+                       mlx4_ib_db_free(dev, &qp->db);
        }
 }
 
@@ -763,11 +772,6 @@ static int __mlx4_ib_modify_qp(struct ib_qp *ibqp,
                optpar |= MLX4_QP_OPTPAR_PKEY_INDEX;
        }
 
-       if (attr_mask & IB_QP_RNR_RETRY) {
-               context->params1 |= cpu_to_be32(attr->rnr_retry << 13);
-               optpar |= MLX4_QP_OPTPAR_RNR_RETRY;
-       }
-
        if (attr_mask & IB_QP_AV) {
                if (mlx4_set_path(dev, &attr->ah_attr, &context->pri_path,
                                  attr_mask & IB_QP_PORT ? attr->port_num : qp->port)) {
@@ -803,6 +807,12 @@ static int __mlx4_ib_modify_qp(struct ib_qp *ibqp,
 
        context->pd         = cpu_to_be32(to_mpd(ibqp->pd)->pdn);
        context->params1    = cpu_to_be32(MLX4_IB_ACK_REQ_FREQ << 28);
+
+       if (attr_mask & IB_QP_RNR_RETRY) {
+               context->params1 |= cpu_to_be32(attr->rnr_retry << 13);
+               optpar |= MLX4_QP_OPTPAR_RNR_RETRY;
+       }
+
        if (attr_mask & IB_QP_RETRY_CNT) {
                context->params1 |= cpu_to_be32(attr->retry_cnt << 16);
                optpar |= MLX4_QP_OPTPAR_RETRY_COUNT;
@@ -852,7 +862,7 @@ static int __mlx4_ib_modify_qp(struct ib_qp *ibqp,
        if (ibqp->srq)
                context->srqn = cpu_to_be32(1 << 24 | to_msrq(ibqp->srq)->msrq.srqn);
 
-       if (cur_state == IB_QPS_RESET && new_state == IB_QPS_INIT)
+       if (!ibqp->srq && cur_state == IB_QPS_RESET && new_state == IB_QPS_INIT)
                context->db_rec_addr = cpu_to_be64(qp->db.dma);
 
        if (cur_state == IB_QPS_INIT &&
@@ -872,6 +882,21 @@ static int __mlx4_ib_modify_qp(struct ib_qp *ibqp,
        else
                sqd_event = 0;
 
+       /*
+        * Before passing a kernel QP to the HW, make sure that the
+        * ownership bits of the send queue are set so that the
+        * hardware doesn't start processing stale work requests.
+        */
+       if (!ibqp->uobject && cur_state == IB_QPS_RESET && new_state == IB_QPS_INIT) {
+               struct mlx4_wqe_ctrl_seg *ctrl;
+               int i;
+
+               for (i = 0; i < qp->sq.max; ++i) {
+                       ctrl = get_send_wqe(qp, i);
+                       ctrl->owner_opcode = cpu_to_be32(1 << 31);
+               }
+       }
+
        err = mlx4_qp_modify(dev->dev, &qp->mtt, to_mlx4_state(cur_state),
                             to_mlx4_state(new_state), context, optpar,
                             sqd_event, &qp->mqp);
@@ -919,7 +944,8 @@ static int __mlx4_ib_modify_qp(struct ib_qp *ibqp,
                qp->rq.tail = 0;
                qp->sq.head = 0;
                qp->sq.tail = 0;
-               *qp->db.db  = 0;
+               if (!ibqp->srq)
+                       *qp->db.db  = 0;
        }
 
 out: