2 * Copyright (c) 2004, 2005 Topspin Communications. All rights reserved.
3 * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved.
4 * Copyright (c) 2005 Cisco Systems. All rights reserved.
5 * Copyright (c) 2005 Mellanox Technologies. All rights reserved.
6 * Copyright (c) 2004 Voltaire, Inc. All rights reserved.
8 * This software is available to you under a choice of one of two
9 * licenses. You may choose to be licensed under the terms of the GNU
10 * General Public License (GPL) Version 2, available from the file
11 * COPYING in the main directory of this source tree, or the
12 * OpenIB.org BSD license below:
14 * Redistribution and use in source and binary forms, with or
15 * without modification, are permitted provided that the following
18 * - Redistributions of source code must retain the above
19 * copyright notice, this list of conditions and the following
22 * - Redistributions in binary form must reproduce the above
23 * copyright notice, this list of conditions and the following
24 * disclaimer in the documentation and/or other materials
25 * provided with the distribution.
27 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
28 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
29 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
30 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
31 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
32 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
33 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
36 * $Id: mthca_provider.c 1397 2004-12-28 05:09:00Z roland $
39 #include <rdma/ib_smi.h>
42 #include "mthca_dev.h"
43 #include "mthca_cmd.h"
44 #include "mthca_user.h"
45 #include "mthca_memfree.h"
47 static int mthca_query_device(struct ib_device *ibdev,
48 struct ib_device_attr *props)
50 struct ib_smp *in_mad = NULL;
51 struct ib_smp *out_mad = NULL;
53 struct mthca_dev* mdev = to_mdev(ibdev);
57 in_mad = kmalloc(sizeof *in_mad, GFP_KERNEL);
58 out_mad = kmalloc(sizeof *out_mad, GFP_KERNEL);
59 if (!in_mad || !out_mad)
62 memset(props, 0, sizeof *props);
64 props->fw_ver = mdev->fw_ver;
66 memset(in_mad, 0, sizeof *in_mad);
67 in_mad->base_version = 1;
68 in_mad->mgmt_class = IB_MGMT_CLASS_SUBN_LID_ROUTED;
69 in_mad->class_version = 1;
70 in_mad->method = IB_MGMT_METHOD_GET;
71 in_mad->attr_id = IB_SMP_ATTR_NODE_INFO;
73 err = mthca_MAD_IFC(mdev, 1, 1,
74 1, NULL, NULL, in_mad, out_mad,
83 props->device_cap_flags = mdev->device_cap_flags;
84 props->vendor_id = be32_to_cpup((__be32 *) (out_mad->data + 36)) &
86 props->vendor_part_id = be16_to_cpup((__be16 *) (out_mad->data + 30));
87 props->hw_ver = be32_to_cpup((__be32 *) (out_mad->data + 32));
88 memcpy(&props->sys_image_guid, out_mad->data + 4, 8);
89 memcpy(&props->node_guid, out_mad->data + 12, 8);
91 props->max_mr_size = ~0ull;
92 props->max_qp = mdev->limits.num_qps - mdev->limits.reserved_qps;
93 props->max_qp_wr = 0xffff;
94 props->max_sge = mdev->limits.max_sg;
95 props->max_cq = mdev->limits.num_cqs - mdev->limits.reserved_cqs;
96 props->max_cqe = 0xffff;
97 props->max_mr = mdev->limits.num_mpts - mdev->limits.reserved_mrws;
98 props->max_pd = mdev->limits.num_pds - mdev->limits.reserved_pds;
99 props->max_qp_rd_atom = 1 << mdev->qp_table.rdb_shift;
100 props->max_qp_init_rd_atom = 1 << mdev->qp_table.rdb_shift;
101 props->local_ca_ack_delay = mdev->limits.local_ca_ack_delay;
102 props->atomic_cap = mdev->limits.flags & DEV_LIM_FLAG_ATOMIC ?
103 IB_ATOMIC_HCA : IB_ATOMIC_NONE;
112 static int mthca_query_port(struct ib_device *ibdev,
113 u8 port, struct ib_port_attr *props)
115 struct ib_smp *in_mad = NULL;
116 struct ib_smp *out_mad = NULL;
120 in_mad = kmalloc(sizeof *in_mad, GFP_KERNEL);
121 out_mad = kmalloc(sizeof *out_mad, GFP_KERNEL);
122 if (!in_mad || !out_mad)
125 memset(props, 0, sizeof *props);
127 memset(in_mad, 0, sizeof *in_mad);
128 in_mad->base_version = 1;
129 in_mad->mgmt_class = IB_MGMT_CLASS_SUBN_LID_ROUTED;
130 in_mad->class_version = 1;
131 in_mad->method = IB_MGMT_METHOD_GET;
132 in_mad->attr_id = IB_SMP_ATTR_PORT_INFO;
133 in_mad->attr_mod = cpu_to_be32(port);
135 err = mthca_MAD_IFC(to_mdev(ibdev), 1, 1,
136 port, NULL, NULL, in_mad, out_mad,
145 props->lid = be16_to_cpup((__be16 *) (out_mad->data + 16));
146 props->lmc = out_mad->data[34] & 0x7;
147 props->sm_lid = be16_to_cpup((__be16 *) (out_mad->data + 18));
148 props->sm_sl = out_mad->data[36] & 0xf;
149 props->state = out_mad->data[32] & 0xf;
150 props->phys_state = out_mad->data[33] >> 4;
151 props->port_cap_flags = be32_to_cpup((__be32 *) (out_mad->data + 20));
152 props->gid_tbl_len = to_mdev(ibdev)->limits.gid_table_len;
153 props->max_msg_sz = 0x80000000;
154 props->pkey_tbl_len = to_mdev(ibdev)->limits.pkey_table_len;
155 props->bad_pkey_cntr = be16_to_cpup((__be16 *) (out_mad->data + 46));
156 props->qkey_viol_cntr = be16_to_cpup((__be16 *) (out_mad->data + 48));
157 props->active_width = out_mad->data[31] & 0xf;
158 props->active_speed = out_mad->data[35] >> 4;
159 props->max_mtu = out_mad->data[41] & 0xf;
160 props->active_mtu = out_mad->data[36] >> 4;
161 props->subnet_timeout = out_mad->data[51] & 0x1f;
169 static int mthca_modify_port(struct ib_device *ibdev,
170 u8 port, int port_modify_mask,
171 struct ib_port_modify *props)
173 struct mthca_set_ib_param set_ib;
174 struct ib_port_attr attr;
178 if (down_interruptible(&to_mdev(ibdev)->cap_mask_mutex))
181 err = mthca_query_port(ibdev, port, &attr);
185 set_ib.set_si_guid = 0;
186 set_ib.reset_qkey_viol = !!(port_modify_mask & IB_PORT_RESET_QKEY_CNTR);
188 set_ib.cap_mask = (attr.port_cap_flags | props->set_port_cap_mask) &
189 ~props->clr_port_cap_mask;
191 err = mthca_SET_IB(to_mdev(ibdev), &set_ib, port, &status);
200 up(&to_mdev(ibdev)->cap_mask_mutex);
204 static int mthca_query_pkey(struct ib_device *ibdev,
205 u8 port, u16 index, u16 *pkey)
207 struct ib_smp *in_mad = NULL;
208 struct ib_smp *out_mad = NULL;
212 in_mad = kmalloc(sizeof *in_mad, GFP_KERNEL);
213 out_mad = kmalloc(sizeof *out_mad, GFP_KERNEL);
214 if (!in_mad || !out_mad)
217 memset(in_mad, 0, sizeof *in_mad);
218 in_mad->base_version = 1;
219 in_mad->mgmt_class = IB_MGMT_CLASS_SUBN_LID_ROUTED;
220 in_mad->class_version = 1;
221 in_mad->method = IB_MGMT_METHOD_GET;
222 in_mad->attr_id = IB_SMP_ATTR_PKEY_TABLE;
223 in_mad->attr_mod = cpu_to_be32(index / 32);
225 err = mthca_MAD_IFC(to_mdev(ibdev), 1, 1,
226 port, NULL, NULL, in_mad, out_mad,
235 *pkey = be16_to_cpu(((__be16 *) out_mad->data)[index % 32]);
243 static int mthca_query_gid(struct ib_device *ibdev, u8 port,
244 int index, union ib_gid *gid)
246 struct ib_smp *in_mad = NULL;
247 struct ib_smp *out_mad = NULL;
251 in_mad = kmalloc(sizeof *in_mad, GFP_KERNEL);
252 out_mad = kmalloc(sizeof *out_mad, GFP_KERNEL);
253 if (!in_mad || !out_mad)
256 memset(in_mad, 0, sizeof *in_mad);
257 in_mad->base_version = 1;
258 in_mad->mgmt_class = IB_MGMT_CLASS_SUBN_LID_ROUTED;
259 in_mad->class_version = 1;
260 in_mad->method = IB_MGMT_METHOD_GET;
261 in_mad->attr_id = IB_SMP_ATTR_PORT_INFO;
262 in_mad->attr_mod = cpu_to_be32(port);
264 err = mthca_MAD_IFC(to_mdev(ibdev), 1, 1,
265 port, NULL, NULL, in_mad, out_mad,
274 memcpy(gid->raw, out_mad->data + 8, 8);
276 memset(in_mad, 0, sizeof *in_mad);
277 in_mad->base_version = 1;
278 in_mad->mgmt_class = IB_MGMT_CLASS_SUBN_LID_ROUTED;
279 in_mad->class_version = 1;
280 in_mad->method = IB_MGMT_METHOD_GET;
281 in_mad->attr_id = IB_SMP_ATTR_GUID_INFO;
282 in_mad->attr_mod = cpu_to_be32(index / 8);
284 err = mthca_MAD_IFC(to_mdev(ibdev), 1, 1,
285 port, NULL, NULL, in_mad, out_mad,
294 memcpy(gid->raw + 8, out_mad->data + (index % 8) * 16, 8);
302 static struct ib_ucontext *mthca_alloc_ucontext(struct ib_device *ibdev,
303 struct ib_udata *udata)
305 struct mthca_alloc_ucontext_resp uresp;
306 struct mthca_ucontext *context;
309 memset(&uresp, 0, sizeof uresp);
311 uresp.qp_tab_size = to_mdev(ibdev)->limits.num_qps;
312 if (mthca_is_memfree(to_mdev(ibdev)))
313 uresp.uarc_size = to_mdev(ibdev)->uar_table.uarc_size;
317 context = kmalloc(sizeof *context, GFP_KERNEL);
319 return ERR_PTR(-ENOMEM);
321 err = mthca_uar_alloc(to_mdev(ibdev), &context->uar);
327 context->db_tab = mthca_init_user_db_tab(to_mdev(ibdev));
328 if (IS_ERR(context->db_tab)) {
329 err = PTR_ERR(context->db_tab);
330 mthca_uar_free(to_mdev(ibdev), &context->uar);
335 if (ib_copy_to_udata(udata, &uresp, sizeof uresp)) {
336 mthca_cleanup_user_db_tab(to_mdev(ibdev), &context->uar, context->db_tab);
337 mthca_uar_free(to_mdev(ibdev), &context->uar);
339 return ERR_PTR(-EFAULT);
342 return &context->ibucontext;
345 static int mthca_dealloc_ucontext(struct ib_ucontext *context)
347 mthca_cleanup_user_db_tab(to_mdev(context->device), &to_mucontext(context)->uar,
348 to_mucontext(context)->db_tab);
349 mthca_uar_free(to_mdev(context->device), &to_mucontext(context)->uar);
350 kfree(to_mucontext(context));
355 static int mthca_mmap_uar(struct ib_ucontext *context,
356 struct vm_area_struct *vma)
358 if (vma->vm_end - vma->vm_start != PAGE_SIZE)
361 vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
363 if (io_remap_pfn_range(vma, vma->vm_start,
364 to_mucontext(context)->uar.pfn,
365 PAGE_SIZE, vma->vm_page_prot))
371 static struct ib_pd *mthca_alloc_pd(struct ib_device *ibdev,
372 struct ib_ucontext *context,
373 struct ib_udata *udata)
378 pd = kmalloc(sizeof *pd, GFP_KERNEL);
380 return ERR_PTR(-ENOMEM);
382 err = mthca_pd_alloc(to_mdev(ibdev), !context, pd);
389 if (ib_copy_to_udata(udata, &pd->pd_num, sizeof (__u32))) {
390 mthca_pd_free(to_mdev(ibdev), pd);
392 return ERR_PTR(-EFAULT);
399 static int mthca_dealloc_pd(struct ib_pd *pd)
401 mthca_pd_free(to_mdev(pd->device), to_mpd(pd));
407 static struct ib_ah *mthca_ah_create(struct ib_pd *pd,
408 struct ib_ah_attr *ah_attr)
413 ah = kmalloc(sizeof *ah, GFP_ATOMIC);
415 return ERR_PTR(-ENOMEM);
417 err = mthca_create_ah(to_mdev(pd->device), to_mpd(pd), ah_attr, ah);
426 static int mthca_ah_destroy(struct ib_ah *ah)
428 mthca_destroy_ah(to_mdev(ah->device), to_mah(ah));
434 static struct ib_srq *mthca_create_srq(struct ib_pd *pd,
435 struct ib_srq_init_attr *init_attr,
436 struct ib_udata *udata)
438 struct mthca_create_srq ucmd;
439 struct mthca_ucontext *context = NULL;
440 struct mthca_srq *srq;
443 srq = kmalloc(sizeof *srq, GFP_KERNEL);
445 return ERR_PTR(-ENOMEM);
448 context = to_mucontext(pd->uobject->context);
450 if (ib_copy_from_udata(&ucmd, udata, sizeof ucmd))
451 return ERR_PTR(-EFAULT);
453 err = mthca_map_user_db(to_mdev(pd->device), &context->uar,
454 context->db_tab, ucmd.db_index,
460 srq->mr.ibmr.lkey = ucmd.lkey;
461 srq->db_index = ucmd.db_index;
464 err = mthca_alloc_srq(to_mdev(pd->device), to_mpd(pd),
465 &init_attr->attr, srq);
467 if (err && pd->uobject)
468 mthca_unmap_user_db(to_mdev(pd->device), &context->uar,
469 context->db_tab, ucmd.db_index);
474 if (context && ib_copy_to_udata(udata, &srq->srqn, sizeof (__u32))) {
475 mthca_free_srq(to_mdev(pd->device), srq);
488 static int mthca_destroy_srq(struct ib_srq *srq)
490 struct mthca_ucontext *context;
493 context = to_mucontext(srq->uobject->context);
495 mthca_unmap_user_db(to_mdev(srq->device), &context->uar,
496 context->db_tab, to_msrq(srq)->db_index);
499 mthca_free_srq(to_mdev(srq->device), to_msrq(srq));
505 static struct ib_qp *mthca_create_qp(struct ib_pd *pd,
506 struct ib_qp_init_attr *init_attr,
507 struct ib_udata *udata)
509 struct mthca_create_qp ucmd;
513 switch (init_attr->qp_type) {
518 struct mthca_ucontext *context;
520 qp = kmalloc(sizeof *qp, GFP_KERNEL);
522 return ERR_PTR(-ENOMEM);
525 context = to_mucontext(pd->uobject->context);
527 if (ib_copy_from_udata(&ucmd, udata, sizeof ucmd))
528 return ERR_PTR(-EFAULT);
530 err = mthca_map_user_db(to_mdev(pd->device), &context->uar,
532 ucmd.sq_db_index, ucmd.sq_db_page);
538 err = mthca_map_user_db(to_mdev(pd->device), &context->uar,
540 ucmd.rq_db_index, ucmd.rq_db_page);
542 mthca_unmap_user_db(to_mdev(pd->device),
550 qp->mr.ibmr.lkey = ucmd.lkey;
551 qp->sq.db_index = ucmd.sq_db_index;
552 qp->rq.db_index = ucmd.rq_db_index;
555 err = mthca_alloc_qp(to_mdev(pd->device), to_mpd(pd),
556 to_mcq(init_attr->send_cq),
557 to_mcq(init_attr->recv_cq),
558 init_attr->qp_type, init_attr->sq_sig_type,
559 &init_attr->cap, qp);
561 if (err && pd->uobject) {
562 context = to_mucontext(pd->uobject->context);
564 mthca_unmap_user_db(to_mdev(pd->device),
568 mthca_unmap_user_db(to_mdev(pd->device),
574 qp->ibqp.qp_num = qp->qpn;
580 /* Don't allow userspace to create special QPs */
582 return ERR_PTR(-EINVAL);
584 qp = kmalloc(sizeof (struct mthca_sqp), GFP_KERNEL);
586 return ERR_PTR(-ENOMEM);
588 qp->ibqp.qp_num = init_attr->qp_type == IB_QPT_SMI ? 0 : 1;
590 err = mthca_alloc_sqp(to_mdev(pd->device), to_mpd(pd),
591 to_mcq(init_attr->send_cq),
592 to_mcq(init_attr->recv_cq),
593 init_attr->sq_sig_type, &init_attr->cap,
594 qp->ibqp.qp_num, init_attr->port_num,
599 /* Don't support raw QPs */
600 return ERR_PTR(-ENOSYS);
608 init_attr->cap.max_inline_data = 0;
609 init_attr->cap.max_send_wr = qp->sq.max;
610 init_attr->cap.max_recv_wr = qp->rq.max;
611 init_attr->cap.max_send_sge = qp->sq.max_gs;
612 init_attr->cap.max_recv_sge = qp->rq.max_gs;
617 static int mthca_destroy_qp(struct ib_qp *qp)
620 mthca_unmap_user_db(to_mdev(qp->device),
621 &to_mucontext(qp->uobject->context)->uar,
622 to_mucontext(qp->uobject->context)->db_tab,
623 to_mqp(qp)->sq.db_index);
624 mthca_unmap_user_db(to_mdev(qp->device),
625 &to_mucontext(qp->uobject->context)->uar,
626 to_mucontext(qp->uobject->context)->db_tab,
627 to_mqp(qp)->rq.db_index);
629 mthca_free_qp(to_mdev(qp->device), to_mqp(qp));
634 static struct ib_cq *mthca_create_cq(struct ib_device *ibdev, int entries,
635 struct ib_ucontext *context,
636 struct ib_udata *udata)
638 struct mthca_create_cq ucmd;
644 if (ib_copy_from_udata(&ucmd, udata, sizeof ucmd))
645 return ERR_PTR(-EFAULT);
647 err = mthca_map_user_db(to_mdev(ibdev), &to_mucontext(context)->uar,
648 to_mucontext(context)->db_tab,
649 ucmd.set_db_index, ucmd.set_db_page);
653 err = mthca_map_user_db(to_mdev(ibdev), &to_mucontext(context)->uar,
654 to_mucontext(context)->db_tab,
655 ucmd.arm_db_index, ucmd.arm_db_page);
660 cq = kmalloc(sizeof *cq, GFP_KERNEL);
667 cq->mr.ibmr.lkey = ucmd.lkey;
668 cq->set_ci_db_index = ucmd.set_db_index;
669 cq->arm_db_index = ucmd.arm_db_index;
672 for (nent = 1; nent <= entries; nent <<= 1)
675 err = mthca_init_cq(to_mdev(ibdev), nent,
676 context ? to_mucontext(context) : NULL,
677 context ? ucmd.pdn : to_mdev(ibdev)->driver_pd.pd_num,
682 if (context && ib_copy_to_udata(udata, &cq->cqn, sizeof (__u32))) {
683 mthca_free_cq(to_mdev(ibdev), cq);
694 mthca_unmap_user_db(to_mdev(ibdev), &to_mucontext(context)->uar,
695 to_mucontext(context)->db_tab, ucmd.arm_db_index);
699 mthca_unmap_user_db(to_mdev(ibdev), &to_mucontext(context)->uar,
700 to_mucontext(context)->db_tab, ucmd.set_db_index);
705 static int mthca_destroy_cq(struct ib_cq *cq)
708 mthca_unmap_user_db(to_mdev(cq->device),
709 &to_mucontext(cq->uobject->context)->uar,
710 to_mucontext(cq->uobject->context)->db_tab,
711 to_mcq(cq)->arm_db_index);
712 mthca_unmap_user_db(to_mdev(cq->device),
713 &to_mucontext(cq->uobject->context)->uar,
714 to_mucontext(cq->uobject->context)->db_tab,
715 to_mcq(cq)->set_ci_db_index);
717 mthca_free_cq(to_mdev(cq->device), to_mcq(cq));
723 static inline u32 convert_access(int acc)
725 return (acc & IB_ACCESS_REMOTE_ATOMIC ? MTHCA_MPT_FLAG_ATOMIC : 0) |
726 (acc & IB_ACCESS_REMOTE_WRITE ? MTHCA_MPT_FLAG_REMOTE_WRITE : 0) |
727 (acc & IB_ACCESS_REMOTE_READ ? MTHCA_MPT_FLAG_REMOTE_READ : 0) |
728 (acc & IB_ACCESS_LOCAL_WRITE ? MTHCA_MPT_FLAG_LOCAL_WRITE : 0) |
729 MTHCA_MPT_FLAG_LOCAL_READ;
732 static struct ib_mr *mthca_get_dma_mr(struct ib_pd *pd, int acc)
737 mr = kmalloc(sizeof *mr, GFP_KERNEL);
739 return ERR_PTR(-ENOMEM);
741 err = mthca_mr_alloc_notrans(to_mdev(pd->device),
743 convert_access(acc), mr);
753 static struct ib_mr *mthca_reg_phys_mr(struct ib_pd *pd,
754 struct ib_phys_buf *buffer_list,
768 /* First check that we have enough alignment */
769 if ((*iova_start & ~PAGE_MASK) != (buffer_list[0].addr & ~PAGE_MASK))
770 return ERR_PTR(-EINVAL);
772 if (num_phys_buf > 1 &&
773 ((buffer_list[0].addr + buffer_list[0].size) & ~PAGE_MASK))
774 return ERR_PTR(-EINVAL);
778 for (i = 0; i < num_phys_buf; ++i) {
779 if (i != 0 && buffer_list[i].addr & ~PAGE_MASK)
780 return ERR_PTR(-EINVAL);
781 if (i != 0 && i != num_phys_buf - 1 &&
782 (buffer_list[i].size & ~PAGE_MASK))
783 return ERR_PTR(-EINVAL);
785 total_size += buffer_list[i].size;
787 mask |= buffer_list[i].addr;
790 /* Find largest page shift we can use to cover buffers */
791 for (shift = PAGE_SHIFT; shift < 31; ++shift)
792 if (num_phys_buf > 1) {
793 if ((1ULL << shift) & mask)
797 buffer_list[0].size +
798 (buffer_list[0].addr & ((1ULL << shift) - 1)))
802 buffer_list[0].size += buffer_list[0].addr & ((1ULL << shift) - 1);
803 buffer_list[0].addr &= ~0ull << shift;
805 mr = kmalloc(sizeof *mr, GFP_KERNEL);
807 return ERR_PTR(-ENOMEM);
810 for (i = 0; i < num_phys_buf; ++i)
811 npages += (buffer_list[i].size + (1ULL << shift) - 1) >> shift;
816 page_list = kmalloc(npages * sizeof *page_list, GFP_KERNEL);
819 return ERR_PTR(-ENOMEM);
823 for (i = 0; i < num_phys_buf; ++i)
825 j < (buffer_list[i].size + (1ULL << shift) - 1) >> shift;
827 page_list[n++] = buffer_list[i].addr + ((u64) j << shift);
829 mthca_dbg(to_mdev(pd->device), "Registering memory at %llx (iova %llx) "
830 "in PD %x; shift %d, npages %d.\n",
831 (unsigned long long) buffer_list[0].addr,
832 (unsigned long long) *iova_start,
836 err = mthca_mr_alloc_phys(to_mdev(pd->device),
838 page_list, shift, npages,
839 *iova_start, total_size,
840 convert_access(acc), mr);
852 static struct ib_mr *mthca_reg_user_mr(struct ib_pd *pd, struct ib_umem *region,
853 int acc, struct ib_udata *udata)
855 struct mthca_dev *dev = to_mdev(pd->device);
856 struct ib_umem_chunk *chunk;
863 shift = ffs(region->page_size) - 1;
865 mr = kmalloc(sizeof *mr, GFP_KERNEL);
867 return ERR_PTR(-ENOMEM);
870 list_for_each_entry(chunk, ®ion->chunk_list, list)
873 mr->mtt = mthca_alloc_mtt(dev, n);
874 if (IS_ERR(mr->mtt)) {
875 err = PTR_ERR(mr->mtt);
879 pages = (u64 *) __get_free_page(GFP_KERNEL);
887 list_for_each_entry(chunk, ®ion->chunk_list, list)
888 for (j = 0; j < chunk->nmap; ++j) {
889 len = sg_dma_len(&chunk->page_list[j]) >> shift;
890 for (k = 0; k < len; ++k) {
891 pages[i++] = sg_dma_address(&chunk->page_list[j]) +
892 region->page_size * k;
894 * Be friendly to WRITE_MTT command
895 * and leave two empty slots for the
896 * index and reserved fields of the
899 if (i == PAGE_SIZE / sizeof (u64) - 2) {
900 err = mthca_write_mtt(dev, mr->mtt,
911 err = mthca_write_mtt(dev, mr->mtt, n, pages, i);
913 free_page((unsigned long) pages);
917 err = mthca_mr_alloc(dev, to_mpd(pd)->pd_num, shift, region->virt_base,
918 region->length, convert_access(acc), mr);
926 mthca_free_mtt(dev, mr->mtt);
933 static int mthca_dereg_mr(struct ib_mr *mr)
935 struct mthca_mr *mmr = to_mmr(mr);
936 mthca_free_mr(to_mdev(mr->device), mmr);
941 static struct ib_fmr *mthca_alloc_fmr(struct ib_pd *pd, int mr_access_flags,
942 struct ib_fmr_attr *fmr_attr)
944 struct mthca_fmr *fmr;
947 fmr = kmalloc(sizeof *fmr, GFP_KERNEL);
949 return ERR_PTR(-ENOMEM);
951 memcpy(&fmr->attr, fmr_attr, sizeof *fmr_attr);
952 err = mthca_fmr_alloc(to_mdev(pd->device), to_mpd(pd)->pd_num,
953 convert_access(mr_access_flags), fmr);
963 static int mthca_dealloc_fmr(struct ib_fmr *fmr)
965 struct mthca_fmr *mfmr = to_mfmr(fmr);
968 err = mthca_free_fmr(to_mdev(fmr->device), mfmr);
976 static int mthca_unmap_fmr(struct list_head *fmr_list)
981 struct mthca_dev *mdev = NULL;
983 list_for_each_entry(fmr, fmr_list, list) {
984 if (mdev && to_mdev(fmr->device) != mdev)
986 mdev = to_mdev(fmr->device);
992 if (mthca_is_memfree(mdev)) {
993 list_for_each_entry(fmr, fmr_list, list)
994 mthca_arbel_fmr_unmap(mdev, to_mfmr(fmr));
998 list_for_each_entry(fmr, fmr_list, list)
999 mthca_tavor_fmr_unmap(mdev, to_mfmr(fmr));
1001 err = mthca_SYNC_TPT(mdev, &status);
1009 static ssize_t show_rev(struct class_device *cdev, char *buf)
1011 struct mthca_dev *dev = container_of(cdev, struct mthca_dev, ib_dev.class_dev);
1012 return sprintf(buf, "%x\n", dev->rev_id);
1015 static ssize_t show_fw_ver(struct class_device *cdev, char *buf)
1017 struct mthca_dev *dev = container_of(cdev, struct mthca_dev, ib_dev.class_dev);
1018 return sprintf(buf, "%x.%x.%x\n", (int) (dev->fw_ver >> 32),
1019 (int) (dev->fw_ver >> 16) & 0xffff,
1020 (int) dev->fw_ver & 0xffff);
1023 static ssize_t show_hca(struct class_device *cdev, char *buf)
1025 struct mthca_dev *dev = container_of(cdev, struct mthca_dev, ib_dev.class_dev);
1026 switch (dev->pdev->device) {
1027 case PCI_DEVICE_ID_MELLANOX_TAVOR:
1028 return sprintf(buf, "MT23108\n");
1029 case PCI_DEVICE_ID_MELLANOX_ARBEL_COMPAT:
1030 return sprintf(buf, "MT25208 (MT23108 compat mode)\n");
1031 case PCI_DEVICE_ID_MELLANOX_ARBEL:
1032 return sprintf(buf, "MT25208\n");
1033 case PCI_DEVICE_ID_MELLANOX_SINAI:
1034 case PCI_DEVICE_ID_MELLANOX_SINAI_OLD:
1035 return sprintf(buf, "MT25204\n");
1037 return sprintf(buf, "unknown\n");
1041 static ssize_t show_board(struct class_device *cdev, char *buf)
1043 struct mthca_dev *dev = container_of(cdev, struct mthca_dev, ib_dev.class_dev);
1044 return sprintf(buf, "%.*s\n", MTHCA_BOARD_ID_LEN, dev->board_id);
1047 static CLASS_DEVICE_ATTR(hw_rev, S_IRUGO, show_rev, NULL);
1048 static CLASS_DEVICE_ATTR(fw_ver, S_IRUGO, show_fw_ver, NULL);
1049 static CLASS_DEVICE_ATTR(hca_type, S_IRUGO, show_hca, NULL);
1050 static CLASS_DEVICE_ATTR(board_id, S_IRUGO, show_board, NULL);
1052 static struct class_device_attribute *mthca_class_attributes[] = {
1053 &class_device_attr_hw_rev,
1054 &class_device_attr_fw_ver,
1055 &class_device_attr_hca_type,
1056 &class_device_attr_board_id
1059 int mthca_register_device(struct mthca_dev *dev)
1064 strlcpy(dev->ib_dev.name, "mthca%d", IB_DEVICE_NAME_MAX);
1065 dev->ib_dev.owner = THIS_MODULE;
1067 dev->ib_dev.uverbs_abi_ver = MTHCA_UVERBS_ABI_VERSION;
1068 dev->ib_dev.node_type = IB_NODE_CA;
1069 dev->ib_dev.phys_port_cnt = dev->limits.num_ports;
1070 dev->ib_dev.dma_device = &dev->pdev->dev;
1071 dev->ib_dev.class_dev.dev = &dev->pdev->dev;
1072 dev->ib_dev.query_device = mthca_query_device;
1073 dev->ib_dev.query_port = mthca_query_port;
1074 dev->ib_dev.modify_port = mthca_modify_port;
1075 dev->ib_dev.query_pkey = mthca_query_pkey;
1076 dev->ib_dev.query_gid = mthca_query_gid;
1077 dev->ib_dev.alloc_ucontext = mthca_alloc_ucontext;
1078 dev->ib_dev.dealloc_ucontext = mthca_dealloc_ucontext;
1079 dev->ib_dev.mmap = mthca_mmap_uar;
1080 dev->ib_dev.alloc_pd = mthca_alloc_pd;
1081 dev->ib_dev.dealloc_pd = mthca_dealloc_pd;
1082 dev->ib_dev.create_ah = mthca_ah_create;
1083 dev->ib_dev.destroy_ah = mthca_ah_destroy;
1085 if (dev->mthca_flags & MTHCA_FLAG_SRQ) {
1086 dev->ib_dev.create_srq = mthca_create_srq;
1087 dev->ib_dev.modify_srq = mthca_modify_srq;
1088 dev->ib_dev.destroy_srq = mthca_destroy_srq;
1090 if (mthca_is_memfree(dev))
1091 dev->ib_dev.post_srq_recv = mthca_arbel_post_srq_recv;
1093 dev->ib_dev.post_srq_recv = mthca_tavor_post_srq_recv;
1096 dev->ib_dev.create_qp = mthca_create_qp;
1097 dev->ib_dev.modify_qp = mthca_modify_qp;
1098 dev->ib_dev.destroy_qp = mthca_destroy_qp;
1099 dev->ib_dev.create_cq = mthca_create_cq;
1100 dev->ib_dev.destroy_cq = mthca_destroy_cq;
1101 dev->ib_dev.poll_cq = mthca_poll_cq;
1102 dev->ib_dev.get_dma_mr = mthca_get_dma_mr;
1103 dev->ib_dev.reg_phys_mr = mthca_reg_phys_mr;
1104 dev->ib_dev.reg_user_mr = mthca_reg_user_mr;
1105 dev->ib_dev.dereg_mr = mthca_dereg_mr;
1107 if (dev->mthca_flags & MTHCA_FLAG_FMR) {
1108 dev->ib_dev.alloc_fmr = mthca_alloc_fmr;
1109 dev->ib_dev.unmap_fmr = mthca_unmap_fmr;
1110 dev->ib_dev.dealloc_fmr = mthca_dealloc_fmr;
1111 if (mthca_is_memfree(dev))
1112 dev->ib_dev.map_phys_fmr = mthca_arbel_map_phys_fmr;
1114 dev->ib_dev.map_phys_fmr = mthca_tavor_map_phys_fmr;
1117 dev->ib_dev.attach_mcast = mthca_multicast_attach;
1118 dev->ib_dev.detach_mcast = mthca_multicast_detach;
1119 dev->ib_dev.process_mad = mthca_process_mad;
1121 if (mthca_is_memfree(dev)) {
1122 dev->ib_dev.req_notify_cq = mthca_arbel_arm_cq;
1123 dev->ib_dev.post_send = mthca_arbel_post_send;
1124 dev->ib_dev.post_recv = mthca_arbel_post_receive;
1126 dev->ib_dev.req_notify_cq = mthca_tavor_arm_cq;
1127 dev->ib_dev.post_send = mthca_tavor_post_send;
1128 dev->ib_dev.post_recv = mthca_tavor_post_receive;
1131 init_MUTEX(&dev->cap_mask_mutex);
1133 ret = ib_register_device(&dev->ib_dev);
1137 for (i = 0; i < ARRAY_SIZE(mthca_class_attributes); ++i) {
1138 ret = class_device_create_file(&dev->ib_dev.class_dev,
1139 mthca_class_attributes[i]);
1141 ib_unregister_device(&dev->ib_dev);
1149 void mthca_unregister_device(struct mthca_dev *dev)
1151 ib_unregister_device(&dev->ib_dev);