]> err.no Git - linux-2.6/commitdiff
IB/mlx4: Implement query QP
authorJack Morgenstein <jackm@dev.mellanox.co.il>
Thu, 21 Jun 2007 09:27:47 +0000 (12:27 +0300)
committerRoland Dreier <rolandd@cisco.com>
Thu, 12 Jul 2007 22:41:00 +0000 (15:41 -0700)
Signed-off-by: Jack Morgenstein <jackm@dev.mellanox.co.il>
Signed-off-by: Roland Dreier <rolandd@cisco.com>
drivers/infiniband/hw/mlx4/main.c
drivers/infiniband/hw/mlx4/mlx4_ib.h
drivers/infiniband/hw/mlx4/qp.c
drivers/net/mlx4/qp.c
include/linux/mlx4/qp.h

index 2fc8ccebaac13a9d72fd919dee0a2f7fc6c0de6d..6b9870a50bea828296f43a6d059a5a30c0004a79 100644 (file)
@@ -523,6 +523,7 @@ static void *mlx4_ib_add(struct mlx4_dev *dev)
                (1ull << IB_USER_VERBS_CMD_DESTROY_CQ)          |
                (1ull << IB_USER_VERBS_CMD_CREATE_QP)           |
                (1ull << IB_USER_VERBS_CMD_MODIFY_QP)           |
+               (1ull << IB_USER_VERBS_CMD_QUERY_QP)            |
                (1ull << IB_USER_VERBS_CMD_DESTROY_QP)          |
                (1ull << IB_USER_VERBS_CMD_ATTACH_MCAST)        |
                (1ull << IB_USER_VERBS_CMD_DETACH_MCAST)        |
@@ -550,6 +551,7 @@ static void *mlx4_ib_add(struct mlx4_dev *dev)
        ibdev->ib_dev.post_srq_recv     = mlx4_ib_post_srq_recv;
        ibdev->ib_dev.create_qp         = mlx4_ib_create_qp;
        ibdev->ib_dev.modify_qp         = mlx4_ib_modify_qp;
+       ibdev->ib_dev.query_qp          = mlx4_ib_query_qp;
        ibdev->ib_dev.destroy_qp        = mlx4_ib_destroy_qp;
        ibdev->ib_dev.post_send         = mlx4_ib_post_send;
        ibdev->ib_dev.post_recv         = mlx4_ib_post_recv;
index 40b83914b7b2793b9ae5ec6dfd9259031c5f2adc..d6dc57c5ccca65357f8d5d977d31b4e3aeca3ed7 100644 (file)
@@ -267,6 +267,8 @@ struct ib_qp *mlx4_ib_create_qp(struct ib_pd *pd,
 int mlx4_ib_destroy_qp(struct ib_qp *qp);
 int mlx4_ib_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
                      int attr_mask, struct ib_udata *udata);
+int mlx4_ib_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *qp_attr, int qp_attr_mask,
+                    struct ib_qp_init_attr *qp_init_attr);
 int mlx4_ib_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
                      struct ib_send_wr **bad_wr);
 int mlx4_ib_post_recv(struct ib_qp *ibqp, struct ib_recv_wr *wr,
index 28a08bdd1800badc5ff207d274055ccd5ae760bd..40042184ad589b0b8b9d124b6622637f11251f02 100644 (file)
@@ -1455,3 +1455,140 @@ out:
 
        return err;
 }
+
+static inline enum ib_qp_state to_ib_qp_state(enum mlx4_qp_state mlx4_state)
+{
+       switch (mlx4_state) {
+       case MLX4_QP_STATE_RST:      return IB_QPS_RESET;
+       case MLX4_QP_STATE_INIT:     return IB_QPS_INIT;
+       case MLX4_QP_STATE_RTR:      return IB_QPS_RTR;
+       case MLX4_QP_STATE_RTS:      return IB_QPS_RTS;
+       case MLX4_QP_STATE_SQ_DRAINING:
+       case MLX4_QP_STATE_SQD:      return IB_QPS_SQD;
+       case MLX4_QP_STATE_SQER:     return IB_QPS_SQE;
+       case MLX4_QP_STATE_ERR:      return IB_QPS_ERR;
+       default:                     return -1;
+       }
+}
+
+static inline enum ib_mig_state to_ib_mig_state(int mlx4_mig_state)
+{
+       switch (mlx4_mig_state) {
+       case MLX4_QP_PM_ARMED:          return IB_MIG_ARMED;
+       case MLX4_QP_PM_REARM:          return IB_MIG_REARM;
+       case MLX4_QP_PM_MIGRATED:       return IB_MIG_MIGRATED;
+       default: return -1;
+       }
+}
+
+static int to_ib_qp_access_flags(int mlx4_flags)
+{
+       int ib_flags = 0;
+
+       if (mlx4_flags & MLX4_QP_BIT_RRE)
+               ib_flags |= IB_ACCESS_REMOTE_READ;
+       if (mlx4_flags & MLX4_QP_BIT_RWE)
+               ib_flags |= IB_ACCESS_REMOTE_WRITE;
+       if (mlx4_flags & MLX4_QP_BIT_RAE)
+               ib_flags |= IB_ACCESS_REMOTE_ATOMIC;
+
+       return ib_flags;
+}
+
+static void to_ib_ah_attr(struct mlx4_dev *dev, struct ib_ah_attr *ib_ah_attr,
+                               struct mlx4_qp_path *path)
+{
+       memset(ib_ah_attr, 0, sizeof *path);
+       ib_ah_attr->port_num      = path->sched_queue & 0x40 ? 2 : 1;
+
+       if (ib_ah_attr->port_num == 0 || ib_ah_attr->port_num > dev->caps.num_ports)
+               return;
+
+       ib_ah_attr->dlid          = be16_to_cpu(path->rlid);
+       ib_ah_attr->sl            = (path->sched_queue >> 2) & 0xf;
+       ib_ah_attr->src_path_bits = path->grh_mylmc & 0x7f;
+       ib_ah_attr->static_rate   = path->static_rate ? path->static_rate - 5 : 0;
+       ib_ah_attr->ah_flags      = (path->grh_mylmc & (1 << 7)) ? IB_AH_GRH : 0;
+       if (ib_ah_attr->ah_flags) {
+               ib_ah_attr->grh.sgid_index = path->mgid_index;
+               ib_ah_attr->grh.hop_limit  = path->hop_limit;
+               ib_ah_attr->grh.traffic_class =
+                       (be32_to_cpu(path->tclass_flowlabel) >> 20) & 0xff;
+               ib_ah_attr->grh.flow_label =
+                       be32_to_cpu(path->tclass_flowlabel) & 0xffffff;
+               memcpy(ib_ah_attr->grh.dgid.raw,
+                       path->rgid, sizeof ib_ah_attr->grh.dgid.raw);
+       }
+}
+
+int mlx4_ib_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *qp_attr, int qp_attr_mask,
+                    struct ib_qp_init_attr *qp_init_attr)
+{
+       struct mlx4_ib_dev *dev = to_mdev(ibqp->device);
+       struct mlx4_ib_qp *qp = to_mqp(ibqp);
+       struct mlx4_qp_context context;
+       int mlx4_state;
+       int err;
+
+       if (qp->state == IB_QPS_RESET) {
+               qp_attr->qp_state = IB_QPS_RESET;
+               goto done;
+       }
+
+       err = mlx4_qp_query(dev->dev, &qp->mqp, &context);
+       if (err)
+               return -EINVAL;
+
+       mlx4_state = be32_to_cpu(context.flags) >> 28;
+
+       qp_attr->qp_state            = to_ib_qp_state(mlx4_state);
+       qp_attr->path_mtu            = context.mtu_msgmax >> 5;
+       qp_attr->path_mig_state      =
+               to_ib_mig_state((be32_to_cpu(context.flags) >> 11) & 0x3);
+       qp_attr->qkey                = be32_to_cpu(context.qkey);
+       qp_attr->rq_psn              = be32_to_cpu(context.rnr_nextrecvpsn) & 0xffffff;
+       qp_attr->sq_psn              = be32_to_cpu(context.next_send_psn) & 0xffffff;
+       qp_attr->dest_qp_num         = be32_to_cpu(context.remote_qpn) & 0xffffff;
+       qp_attr->qp_access_flags     =
+               to_ib_qp_access_flags(be32_to_cpu(context.params2));
+
+       if (qp->ibqp.qp_type == IB_QPT_RC || qp->ibqp.qp_type == IB_QPT_UC) {
+               to_ib_ah_attr(dev->dev, &qp_attr->ah_attr, &context.pri_path);
+               to_ib_ah_attr(dev->dev, &qp_attr->alt_ah_attr, &context.alt_path);
+               qp_attr->alt_pkey_index = context.alt_path.pkey_index & 0x7f;
+               qp_attr->alt_port_num   = qp_attr->alt_ah_attr.port_num;
+       }
+
+       qp_attr->pkey_index = context.pri_path.pkey_index & 0x7f;
+       qp_attr->port_num   = context.pri_path.sched_queue & 0x40 ? 2 : 1;
+
+       /* qp_attr->en_sqd_async_notify is only applicable in modify qp */
+       qp_attr->sq_draining = mlx4_state == MLX4_QP_STATE_SQ_DRAINING;
+
+       qp_attr->max_rd_atomic = 1 << ((be32_to_cpu(context.params1) >> 21) & 0x7);
+
+       qp_attr->max_dest_rd_atomic =
+               1 << ((be32_to_cpu(context.params2) >> 21) & 0x7);
+       qp_attr->min_rnr_timer      =
+               (be32_to_cpu(context.rnr_nextrecvpsn) >> 24) & 0x1f;
+       qp_attr->timeout            = context.pri_path.ackto >> 3;
+       qp_attr->retry_cnt          = (be32_to_cpu(context.params1) >> 16) & 0x7;
+       qp_attr->rnr_retry          = (be32_to_cpu(context.params1) >> 13) & 0x7;
+       qp_attr->alt_timeout        = context.alt_path.ackto >> 3;
+
+done:
+       qp_attr->cur_qp_state        = qp_attr->qp_state;
+       if (!ibqp->uobject) {
+               qp_attr->cap.max_send_wr     = qp->sq.wqe_cnt;
+               qp_attr->cap.max_recv_wr     = qp->rq.wqe_cnt;
+               qp_attr->cap.max_send_sge    = qp->sq.max_gs;
+               qp_attr->cap.max_recv_sge    = qp->rq.max_gs;
+               qp_attr->cap.max_inline_data = (1 << qp->sq.wqe_shift) -
+                       send_wqe_overhead(qp->ibqp.qp_type) -
+                       sizeof (struct mlx4_wqe_inline_seg);
+               qp_init_attr->cap            = qp_attr->cap;
+       }
+
+       return 0;
+}
+
index 492cfaaaa75cbf3b687214df37462f268118ce7e..19b48c71cf7f2e315eb31ba8a47af6b824c2d181 100644 (file)
@@ -277,3 +277,24 @@ void mlx4_cleanup_qp_table(struct mlx4_dev *dev)
        mlx4_CONF_SPECIAL_QP(dev, 0);
        mlx4_bitmap_cleanup(&mlx4_priv(dev)->qp_table.bitmap);
 }
+
+int mlx4_qp_query(struct mlx4_dev *dev, struct mlx4_qp *qp,
+                 struct mlx4_qp_context *context)
+{
+       struct mlx4_cmd_mailbox *mailbox;
+       int err;
+
+       mailbox = mlx4_alloc_cmd_mailbox(dev);
+       if (IS_ERR(mailbox))
+               return PTR_ERR(mailbox);
+
+       err = mlx4_cmd_box(dev, 0, mailbox->dma, qp->qpn, 0,
+                          MLX4_CMD_QUERY_QP, MLX4_CMD_TIME_CLASS_A);
+       if (!err)
+               memcpy(context, mailbox->buf + 8, sizeof *context);
+
+       mlx4_free_cmd_mailbox(dev, mailbox);
+       return err;
+}
+EXPORT_SYMBOL_GPL(mlx4_qp_query);
+
index 10c57d2791449150c1c721e5b28e11694576292f..3968b943259ae26a5c85fcf2019ab33fad79c958 100644 (file)
@@ -282,6 +282,9 @@ int mlx4_qp_modify(struct mlx4_dev *dev, struct mlx4_mtt *mtt,
                   struct mlx4_qp_context *context, enum mlx4_qp_optpar optpar,
                   int sqd_event, struct mlx4_qp *qp);
 
+int mlx4_qp_query(struct mlx4_dev *dev, struct mlx4_qp *qp,
+                 struct mlx4_qp_context *context);
+
 static inline struct mlx4_qp *__mlx4_qp_lookup(struct mlx4_dev *dev, u32 qpn)
 {
        return radix_tree_lookup(&dev->qp_table_tree, qpn & (dev->caps.num_qps - 1));