2 * Copyright (c) 2004, 2005 Topspin Communications. All rights reserved.
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
32 * $Id: mthca_provider.c 1397 2004-12-28 05:09:00Z roland $
37 #include "mthca_dev.h"
38 #include "mthca_cmd.h"
40 static int mthca_query_device(struct ib_device *ibdev,
41 struct ib_device_attr *props)
43 struct ib_smp *in_mad = NULL;
44 struct ib_smp *out_mad = NULL;
46 struct mthca_dev* mdev = to_mdev(ibdev);
50 in_mad = kmalloc(sizeof *in_mad, GFP_KERNEL);
51 out_mad = kmalloc(sizeof *out_mad, GFP_KERNEL);
52 if (!in_mad || !out_mad)
55 props->fw_ver = mdev->fw_ver;
57 memset(in_mad, 0, sizeof *in_mad);
58 in_mad->base_version = 1;
59 in_mad->mgmt_class = IB_MGMT_CLASS_SUBN_LID_ROUTED;
60 in_mad->class_version = 1;
61 in_mad->method = IB_MGMT_METHOD_GET;
62 in_mad->attr_id = IB_SMP_ATTR_NODE_INFO;
64 err = mthca_MAD_IFC(mdev, 1, 1,
65 1, NULL, NULL, in_mad, out_mad,
74 props->device_cap_flags = mdev->device_cap_flags;
75 props->vendor_id = be32_to_cpup((u32 *) (out_mad->data + 36)) &
77 props->vendor_part_id = be16_to_cpup((u16 *) (out_mad->data + 30));
78 props->hw_ver = be16_to_cpup((u16 *) (out_mad->data + 32));
79 memcpy(&props->sys_image_guid, out_mad->data + 4, 8);
80 memcpy(&props->node_guid, out_mad->data + 12, 8);
89 static int mthca_query_port(struct ib_device *ibdev,
90 u8 port, struct ib_port_attr *props)
92 struct ib_smp *in_mad = NULL;
93 struct ib_smp *out_mad = NULL;
97 in_mad = kmalloc(sizeof *in_mad, GFP_KERNEL);
98 out_mad = kmalloc(sizeof *out_mad, GFP_KERNEL);
99 if (!in_mad || !out_mad)
102 memset(in_mad, 0, sizeof *in_mad);
103 in_mad->base_version = 1;
104 in_mad->mgmt_class = IB_MGMT_CLASS_SUBN_LID_ROUTED;
105 in_mad->class_version = 1;
106 in_mad->method = IB_MGMT_METHOD_GET;
107 in_mad->attr_id = IB_SMP_ATTR_PORT_INFO;
108 in_mad->attr_mod = cpu_to_be32(port);
110 err = mthca_MAD_IFC(to_mdev(ibdev), 1, 1,
111 port, NULL, NULL, in_mad, out_mad,
120 props->lid = be16_to_cpup((u16 *) (out_mad->data + 16));
121 props->lmc = out_mad->data[34] & 0x7;
122 props->sm_lid = be16_to_cpup((u16 *) (out_mad->data + 18));
123 props->sm_sl = out_mad->data[36] & 0xf;
124 props->state = out_mad->data[32] & 0xf;
125 props->phys_state = out_mad->data[33] >> 4;
126 props->port_cap_flags = be32_to_cpup((u32 *) (out_mad->data + 20));
127 props->gid_tbl_len = to_mdev(ibdev)->limits.gid_table_len;
128 props->pkey_tbl_len = to_mdev(ibdev)->limits.pkey_table_len;
129 props->qkey_viol_cntr = be16_to_cpup((u16 *) (out_mad->data + 48));
130 props->active_width = out_mad->data[31] & 0xf;
131 props->active_speed = out_mad->data[35] >> 4;
139 static int mthca_modify_port(struct ib_device *ibdev,
140 u8 port, int port_modify_mask,
141 struct ib_port_modify *props)
143 struct mthca_set_ib_param set_ib;
144 struct ib_port_attr attr;
148 if (down_interruptible(&to_mdev(ibdev)->cap_mask_mutex))
151 err = mthca_query_port(ibdev, port, &attr);
155 set_ib.set_si_guid = 0;
156 set_ib.reset_qkey_viol = !!(port_modify_mask & IB_PORT_RESET_QKEY_CNTR);
158 set_ib.cap_mask = (attr.port_cap_flags | props->set_port_cap_mask) &
159 ~props->clr_port_cap_mask;
161 err = mthca_SET_IB(to_mdev(ibdev), &set_ib, port, &status);
170 up(&to_mdev(ibdev)->cap_mask_mutex);
174 static int mthca_query_pkey(struct ib_device *ibdev,
175 u8 port, u16 index, u16 *pkey)
177 struct ib_smp *in_mad = NULL;
178 struct ib_smp *out_mad = NULL;
182 in_mad = kmalloc(sizeof *in_mad, GFP_KERNEL);
183 out_mad = kmalloc(sizeof *out_mad, GFP_KERNEL);
184 if (!in_mad || !out_mad)
187 memset(in_mad, 0, sizeof *in_mad);
188 in_mad->base_version = 1;
189 in_mad->mgmt_class = IB_MGMT_CLASS_SUBN_LID_ROUTED;
190 in_mad->class_version = 1;
191 in_mad->method = IB_MGMT_METHOD_GET;
192 in_mad->attr_id = IB_SMP_ATTR_PKEY_TABLE;
193 in_mad->attr_mod = cpu_to_be32(index / 32);
195 err = mthca_MAD_IFC(to_mdev(ibdev), 1, 1,
196 port, NULL, NULL, in_mad, out_mad,
205 *pkey = be16_to_cpu(((u16 *) out_mad->data)[index % 32]);
213 static int mthca_query_gid(struct ib_device *ibdev, u8 port,
214 int index, union ib_gid *gid)
216 struct ib_smp *in_mad = NULL;
217 struct ib_smp *out_mad = NULL;
221 in_mad = kmalloc(sizeof *in_mad, GFP_KERNEL);
222 out_mad = kmalloc(sizeof *out_mad, GFP_KERNEL);
223 if (!in_mad || !out_mad)
226 memset(in_mad, 0, sizeof *in_mad);
227 in_mad->base_version = 1;
228 in_mad->mgmt_class = IB_MGMT_CLASS_SUBN_LID_ROUTED;
229 in_mad->class_version = 1;
230 in_mad->method = IB_MGMT_METHOD_GET;
231 in_mad->attr_id = IB_SMP_ATTR_PORT_INFO;
232 in_mad->attr_mod = cpu_to_be32(port);
234 err = mthca_MAD_IFC(to_mdev(ibdev), 1, 1,
235 port, NULL, NULL, in_mad, out_mad,
244 memcpy(gid->raw, out_mad->data + 8, 8);
246 memset(in_mad, 0, sizeof *in_mad);
247 in_mad->base_version = 1;
248 in_mad->mgmt_class = IB_MGMT_CLASS_SUBN_LID_ROUTED;
249 in_mad->class_version = 1;
250 in_mad->method = IB_MGMT_METHOD_GET;
251 in_mad->attr_id = IB_SMP_ATTR_GUID_INFO;
252 in_mad->attr_mod = cpu_to_be32(index / 8);
254 err = mthca_MAD_IFC(to_mdev(ibdev), 1, 1,
255 port, NULL, NULL, in_mad, out_mad,
264 memcpy(gid->raw + 8, out_mad->data + (index % 8) * 16, 8);
272 static struct ib_pd *mthca_alloc_pd(struct ib_device *ibdev)
277 pd = kmalloc(sizeof *pd, GFP_KERNEL);
279 return ERR_PTR(-ENOMEM);
281 err = mthca_pd_alloc(to_mdev(ibdev), pd);
290 static int mthca_dealloc_pd(struct ib_pd *pd)
292 mthca_pd_free(to_mdev(pd->device), to_mpd(pd));
298 static struct ib_ah *mthca_ah_create(struct ib_pd *pd,
299 struct ib_ah_attr *ah_attr)
304 ah = kmalloc(sizeof *ah, GFP_KERNEL);
306 return ERR_PTR(-ENOMEM);
308 err = mthca_create_ah(to_mdev(pd->device), to_mpd(pd), ah_attr, ah);
317 static int mthca_ah_destroy(struct ib_ah *ah)
319 mthca_destroy_ah(to_mdev(ah->device), to_mah(ah));
325 static struct ib_qp *mthca_create_qp(struct ib_pd *pd,
326 struct ib_qp_init_attr *init_attr)
331 switch (init_attr->qp_type) {
336 qp = kmalloc(sizeof *qp, GFP_KERNEL);
338 return ERR_PTR(-ENOMEM);
340 qp->sq.max = init_attr->cap.max_send_wr;
341 qp->rq.max = init_attr->cap.max_recv_wr;
342 qp->sq.max_gs = init_attr->cap.max_send_sge;
343 qp->rq.max_gs = init_attr->cap.max_recv_sge;
345 err = mthca_alloc_qp(to_mdev(pd->device), to_mpd(pd),
346 to_mcq(init_attr->send_cq),
347 to_mcq(init_attr->recv_cq),
348 init_attr->qp_type, init_attr->sq_sig_type,
350 qp->ibqp.qp_num = qp->qpn;
356 qp = kmalloc(sizeof (struct mthca_sqp), GFP_KERNEL);
358 return ERR_PTR(-ENOMEM);
360 qp->sq.max = init_attr->cap.max_send_wr;
361 qp->rq.max = init_attr->cap.max_recv_wr;
362 qp->sq.max_gs = init_attr->cap.max_send_sge;
363 qp->rq.max_gs = init_attr->cap.max_recv_sge;
365 qp->ibqp.qp_num = init_attr->qp_type == IB_QPT_SMI ? 0 : 1;
367 err = mthca_alloc_sqp(to_mdev(pd->device), to_mpd(pd),
368 to_mcq(init_attr->send_cq),
369 to_mcq(init_attr->recv_cq),
370 init_attr->sq_sig_type,
371 qp->ibqp.qp_num, init_attr->port_num,
376 /* Don't support raw QPs */
377 return ERR_PTR(-ENOSYS);
385 init_attr->cap.max_inline_data = 0;
390 static int mthca_destroy_qp(struct ib_qp *qp)
392 mthca_free_qp(to_mdev(qp->device), to_mqp(qp));
397 static struct ib_cq *mthca_create_cq(struct ib_device *ibdev, int entries)
403 cq = kmalloc(sizeof *cq, GFP_KERNEL);
405 return ERR_PTR(-ENOMEM);
407 for (nent = 1; nent <= entries; nent <<= 1)
410 err = mthca_init_cq(to_mdev(ibdev), nent, cq);
419 static int mthca_destroy_cq(struct ib_cq *cq)
421 mthca_free_cq(to_mdev(cq->device), to_mcq(cq));
427 static inline u32 convert_access(int acc)
429 return (acc & IB_ACCESS_REMOTE_ATOMIC ? MTHCA_MPT_FLAG_ATOMIC : 0) |
430 (acc & IB_ACCESS_REMOTE_WRITE ? MTHCA_MPT_FLAG_REMOTE_WRITE : 0) |
431 (acc & IB_ACCESS_REMOTE_READ ? MTHCA_MPT_FLAG_REMOTE_READ : 0) |
432 (acc & IB_ACCESS_LOCAL_WRITE ? MTHCA_MPT_FLAG_LOCAL_WRITE : 0) |
433 MTHCA_MPT_FLAG_LOCAL_READ;
436 static struct ib_mr *mthca_get_dma_mr(struct ib_pd *pd, int acc)
441 mr = kmalloc(sizeof *mr, GFP_KERNEL);
443 return ERR_PTR(-ENOMEM);
445 err = mthca_mr_alloc_notrans(to_mdev(pd->device),
447 convert_access(acc), mr);
457 static struct ib_mr *mthca_reg_phys_mr(struct ib_pd *pd,
458 struct ib_phys_buf *buffer_list,
472 /* First check that we have enough alignment */
473 if ((*iova_start & ~PAGE_MASK) != (buffer_list[0].addr & ~PAGE_MASK))
474 return ERR_PTR(-EINVAL);
476 if (num_phys_buf > 1 &&
477 ((buffer_list[0].addr + buffer_list[0].size) & ~PAGE_MASK))
478 return ERR_PTR(-EINVAL);
482 for (i = 0; i < num_phys_buf; ++i) {
483 if (buffer_list[i].addr & ~PAGE_MASK)
484 return ERR_PTR(-EINVAL);
485 if (i != 0 && i != num_phys_buf - 1 &&
486 (buffer_list[i].size & ~PAGE_MASK))
487 return ERR_PTR(-EINVAL);
489 total_size += buffer_list[i].size;
491 mask |= buffer_list[i].addr;
494 /* Find largest page shift we can use to cover buffers */
495 for (shift = PAGE_SHIFT; shift < 31; ++shift)
496 if (num_phys_buf > 1) {
497 if ((1ULL << shift) & mask)
501 buffer_list[0].size +
502 (buffer_list[0].addr & ((1ULL << shift) - 1)))
506 buffer_list[0].size += buffer_list[0].addr & ((1ULL << shift) - 1);
507 buffer_list[0].addr &= ~0ull << shift;
509 mr = kmalloc(sizeof *mr, GFP_KERNEL);
511 return ERR_PTR(-ENOMEM);
514 for (i = 0; i < num_phys_buf; ++i)
515 npages += (buffer_list[i].size + (1ULL << shift) - 1) >> shift;
520 page_list = kmalloc(npages * sizeof *page_list, GFP_KERNEL);
523 return ERR_PTR(-ENOMEM);
527 for (i = 0; i < num_phys_buf; ++i)
529 j < (buffer_list[i].size + (1ULL << shift) - 1) >> shift;
531 page_list[n++] = buffer_list[i].addr + ((u64) j << shift);
533 mthca_dbg(to_mdev(pd->device), "Registering memory at %llx (iova %llx) "
534 "in PD %x; shift %d, npages %d.\n",
535 (unsigned long long) buffer_list[0].addr,
536 (unsigned long long) *iova_start,
540 err = mthca_mr_alloc_phys(to_mdev(pd->device),
542 page_list, shift, npages,
543 *iova_start, total_size,
544 convert_access(acc), mr);
555 static int mthca_dereg_mr(struct ib_mr *mr)
557 mthca_free_mr(to_mdev(mr->device), to_mmr(mr));
562 static ssize_t show_rev(struct class_device *cdev, char *buf)
564 struct mthca_dev *dev = container_of(cdev, struct mthca_dev, ib_dev.class_dev);
565 return sprintf(buf, "%x\n", dev->rev_id);
568 static ssize_t show_fw_ver(struct class_device *cdev, char *buf)
570 struct mthca_dev *dev = container_of(cdev, struct mthca_dev, ib_dev.class_dev);
571 return sprintf(buf, "%x.%x.%x\n", (int) (dev->fw_ver >> 32),
572 (int) (dev->fw_ver >> 16) & 0xffff,
573 (int) dev->fw_ver & 0xffff);
576 static ssize_t show_hca(struct class_device *cdev, char *buf)
578 struct mthca_dev *dev = container_of(cdev, struct mthca_dev, ib_dev.class_dev);
579 switch (dev->hca_type) {
580 case TAVOR: return sprintf(buf, "MT23108\n");
581 case ARBEL_COMPAT: return sprintf(buf, "MT25208 (MT23108 compat mode)\n");
582 case ARBEL_NATIVE: return sprintf(buf, "MT25208\n");
583 default: return sprintf(buf, "unknown\n");
587 static CLASS_DEVICE_ATTR(hw_rev, S_IRUGO, show_rev, NULL);
588 static CLASS_DEVICE_ATTR(fw_ver, S_IRUGO, show_fw_ver, NULL);
589 static CLASS_DEVICE_ATTR(hca_type, S_IRUGO, show_hca, NULL);
591 static struct class_device_attribute *mthca_class_attributes[] = {
592 &class_device_attr_hw_rev,
593 &class_device_attr_fw_ver,
594 &class_device_attr_hca_type
597 int mthca_register_device(struct mthca_dev *dev)
602 strlcpy(dev->ib_dev.name, "mthca%d", IB_DEVICE_NAME_MAX);
603 dev->ib_dev.node_type = IB_NODE_CA;
604 dev->ib_dev.phys_port_cnt = dev->limits.num_ports;
605 dev->ib_dev.dma_device = &dev->pdev->dev;
606 dev->ib_dev.class_dev.dev = &dev->pdev->dev;
607 dev->ib_dev.query_device = mthca_query_device;
608 dev->ib_dev.query_port = mthca_query_port;
609 dev->ib_dev.modify_port = mthca_modify_port;
610 dev->ib_dev.query_pkey = mthca_query_pkey;
611 dev->ib_dev.query_gid = mthca_query_gid;
612 dev->ib_dev.alloc_pd = mthca_alloc_pd;
613 dev->ib_dev.dealloc_pd = mthca_dealloc_pd;
614 dev->ib_dev.create_ah = mthca_ah_create;
615 dev->ib_dev.destroy_ah = mthca_ah_destroy;
616 dev->ib_dev.create_qp = mthca_create_qp;
617 dev->ib_dev.modify_qp = mthca_modify_qp;
618 dev->ib_dev.destroy_qp = mthca_destroy_qp;
619 dev->ib_dev.create_cq = mthca_create_cq;
620 dev->ib_dev.destroy_cq = mthca_destroy_cq;
621 dev->ib_dev.poll_cq = mthca_poll_cq;
622 dev->ib_dev.get_dma_mr = mthca_get_dma_mr;
623 dev->ib_dev.reg_phys_mr = mthca_reg_phys_mr;
624 dev->ib_dev.dereg_mr = mthca_dereg_mr;
625 dev->ib_dev.attach_mcast = mthca_multicast_attach;
626 dev->ib_dev.detach_mcast = mthca_multicast_detach;
627 dev->ib_dev.process_mad = mthca_process_mad;
629 if (dev->hca_type == ARBEL_NATIVE) {
630 dev->ib_dev.req_notify_cq = mthca_arbel_arm_cq;
631 dev->ib_dev.post_send = mthca_arbel_post_send;
632 dev->ib_dev.post_recv = mthca_arbel_post_receive;
634 dev->ib_dev.req_notify_cq = mthca_tavor_arm_cq;
635 dev->ib_dev.post_send = mthca_tavor_post_send;
636 dev->ib_dev.post_recv = mthca_tavor_post_receive;
639 init_MUTEX(&dev->cap_mask_mutex);
641 ret = ib_register_device(&dev->ib_dev);
645 for (i = 0; i < ARRAY_SIZE(mthca_class_attributes); ++i) {
646 ret = class_device_create_file(&dev->ib_dev.class_dev,
647 mthca_class_attributes[i]);
649 ib_unregister_device(&dev->ib_dev);
657 void mthca_unregister_device(struct mthca_dev *dev)
659 ib_unregister_device(&dev->ib_dev);