]> err.no Git - linux-2.6/blob - drivers/infiniband/core/user_mad.c
[IB] ib_umad: various cleanups
[linux-2.6] / drivers / infiniband / core / user_mad.c
1 /*
2  * Copyright (c) 2004 Topspin Communications.  All rights reserved.
3  * Copyright (c) 2005 Voltaire, Inc. All rights reserved. 
4  * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved.
5  *
6  * This software is available to you under a choice of one of two
7  * licenses.  You may choose to be licensed under the terms of the GNU
8  * General Public License (GPL) Version 2, available from the file
9  * COPYING in the main directory of this source tree, or the
10  * OpenIB.org BSD license below:
11  *
12  *     Redistribution and use in source and binary forms, with or
13  *     without modification, are permitted provided that the following
14  *     conditions are met:
15  *
16  *      - Redistributions of source code must retain the above
17  *        copyright notice, this list of conditions and the following
18  *        disclaimer.
19  *
20  *      - Redistributions in binary form must reproduce the above
21  *        copyright notice, this list of conditions and the following
22  *        disclaimer in the documentation and/or other materials
23  *        provided with the distribution.
24  *
25  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
26  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
27  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
28  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
29  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
30  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
31  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
32  * SOFTWARE.
33  *
34  * $Id: user_mad.c 2814 2005-07-06 19:14:09Z halr $
35  */
36
37 #include <linux/module.h>
38 #include <linux/init.h>
39 #include <linux/device.h>
40 #include <linux/err.h>
41 #include <linux/fs.h>
42 #include <linux/cdev.h>
43 #include <linux/pci.h>
44 #include <linux/dma-mapping.h>
45 #include <linux/poll.h>
46 #include <linux/rwsem.h>
47 #include <linux/kref.h>
48
49 #include <asm/uaccess.h>
50 #include <asm/semaphore.h>
51
52 #include <rdma/ib_mad.h>
53 #include <rdma/ib_user_mad.h>
54
55 MODULE_AUTHOR("Roland Dreier");
56 MODULE_DESCRIPTION("InfiniBand userspace MAD packet access");
57 MODULE_LICENSE("Dual BSD/GPL");
58
59 enum {
60         IB_UMAD_MAX_PORTS  = 64,
61         IB_UMAD_MAX_AGENTS = 32,
62
63         IB_UMAD_MAJOR      = 231,
64         IB_UMAD_MINOR_BASE = 0
65 };
66
67 struct ib_umad_port {
68         int                    devnum;
69         struct cdev            dev;
70         struct class_device    class_dev;
71
72         int                    sm_devnum;
73         struct cdev            sm_dev;
74         struct class_device    sm_class_dev;
75         struct semaphore       sm_sem;
76
77         struct ib_device      *ib_dev;
78         struct ib_umad_device *umad_dev;
79         u8                     port_num;
80 };
81
82 struct ib_umad_device {
83         int                  start_port, end_port;
84         struct kref          ref;
85         struct ib_umad_port  port[0];
86 };
87
88 struct ib_umad_file {
89         struct ib_umad_port *port;
90         spinlock_t           recv_lock;
91         struct list_head     recv_list;
92         wait_queue_head_t    recv_wait;
93         struct rw_semaphore  agent_mutex;
94         struct ib_mad_agent *agent[IB_UMAD_MAX_AGENTS];
95         struct ib_mr        *mr[IB_UMAD_MAX_AGENTS];
96 };
97
98 struct ib_umad_packet {
99         struct ib_mad_send_buf *msg;
100         struct list_head   list;
101         int                length;
102         struct ib_user_mad mad;
103 };
104
105 static const dev_t base_dev = MKDEV(IB_UMAD_MAJOR, IB_UMAD_MINOR_BASE);
106 static spinlock_t map_lock;
107 static DECLARE_BITMAP(dev_map, IB_UMAD_MAX_PORTS * 2);
108
109 static void ib_umad_add_one(struct ib_device *device);
110 static void ib_umad_remove_one(struct ib_device *device);
111
112 static int queue_packet(struct ib_umad_file *file,
113                         struct ib_mad_agent *agent,
114                         struct ib_umad_packet *packet)
115 {
116         int ret = 1;
117
118         down_read(&file->agent_mutex);
119         for (packet->mad.hdr.id = 0;
120              packet->mad.hdr.id < IB_UMAD_MAX_AGENTS;
121              packet->mad.hdr.id++)
122                 if (agent == file->agent[packet->mad.hdr.id]) {
123                         spin_lock_irq(&file->recv_lock);
124                         list_add_tail(&packet->list, &file->recv_list);
125                         spin_unlock_irq(&file->recv_lock);
126                         wake_up_interruptible(&file->recv_wait);
127                         ret = 0;
128                         break;
129                 }
130
131         up_read(&file->agent_mutex);
132
133         return ret;
134 }
135
136 static void send_handler(struct ib_mad_agent *agent,
137                          struct ib_mad_send_wc *send_wc)
138 {
139         struct ib_umad_file *file = agent->context;
140         struct ib_umad_packet *timeout;
141         struct ib_umad_packet *packet = send_wc->send_buf->context[0];
142
143         ib_destroy_ah(packet->msg->ah);
144         ib_free_send_mad(packet->msg);
145
146         if (send_wc->status == IB_WC_RESP_TIMEOUT_ERR) {
147                 timeout = kzalloc(sizeof *timeout + IB_MGMT_MAD_HDR, GFP_KERNEL);
148                 if (!timeout)
149                         goto out;
150
151                 timeout->length         = IB_MGMT_MAD_HDR;
152                 timeout->mad.hdr.id     = packet->mad.hdr.id;
153                 timeout->mad.hdr.status = ETIMEDOUT;
154                 memcpy(timeout->mad.data, packet->mad.data,
155                        sizeof (struct ib_mad_hdr));
156
157                 if (!queue_packet(file, agent, timeout))
158                                 return;
159         }
160 out:
161         kfree(packet);
162 }
163
164 static void recv_handler(struct ib_mad_agent *agent,
165                          struct ib_mad_recv_wc *mad_recv_wc)
166 {
167         struct ib_umad_file *file = agent->context;
168         struct ib_umad_packet *packet;
169         int length;
170
171         if (mad_recv_wc->wc->status != IB_WC_SUCCESS)
172                 goto out;
173
174         length = mad_recv_wc->mad_len;
175         packet = kzalloc(sizeof *packet + length, GFP_KERNEL);
176         if (!packet)
177                 goto out;
178
179         packet->length = length;
180
181         ib_coalesce_recv_mad(mad_recv_wc, packet->mad.data);
182
183         packet->mad.hdr.status    = 0;
184         packet->mad.hdr.length    = length + sizeof (struct ib_user_mad);
185         packet->mad.hdr.qpn       = cpu_to_be32(mad_recv_wc->wc->src_qp);
186         packet->mad.hdr.lid       = cpu_to_be16(mad_recv_wc->wc->slid);
187         packet->mad.hdr.sl        = mad_recv_wc->wc->sl;
188         packet->mad.hdr.path_bits = mad_recv_wc->wc->dlid_path_bits;
189         packet->mad.hdr.grh_present = !!(mad_recv_wc->wc->wc_flags & IB_WC_GRH);
190         if (packet->mad.hdr.grh_present) {
191                 /* XXX parse GRH */
192                 packet->mad.hdr.gid_index       = 0;
193                 packet->mad.hdr.hop_limit       = 0;
194                 packet->mad.hdr.traffic_class   = 0;
195                 memset(packet->mad.hdr.gid, 0, 16);
196                 packet->mad.hdr.flow_label      = 0;
197         }
198
199         if (queue_packet(file, agent, packet))
200                 kfree(packet);
201
202 out:
203         ib_free_recv_mad(mad_recv_wc);
204 }
205
206 static ssize_t ib_umad_read(struct file *filp, char __user *buf,
207                             size_t count, loff_t *pos)
208 {
209         struct ib_umad_file *file = filp->private_data;
210         struct ib_umad_packet *packet;
211         ssize_t ret;
212
213         if (count < sizeof (struct ib_user_mad) + sizeof (struct ib_mad))
214                 return -EINVAL;
215
216         spin_lock_irq(&file->recv_lock);
217
218         while (list_empty(&file->recv_list)) {
219                 spin_unlock_irq(&file->recv_lock);
220
221                 if (filp->f_flags & O_NONBLOCK)
222                         return -EAGAIN;
223
224                 if (wait_event_interruptible(file->recv_wait,
225                                              !list_empty(&file->recv_list)))
226                         return -ERESTARTSYS;
227
228                 spin_lock_irq(&file->recv_lock);
229         }
230
231         packet = list_entry(file->recv_list.next, struct ib_umad_packet, list);
232         list_del(&packet->list);
233
234         spin_unlock_irq(&file->recv_lock);
235
236         if (count < packet->length + sizeof (struct ib_user_mad)) {
237                 /* Return length needed (and first RMPP segment) if too small */
238                 if (copy_to_user(buf, &packet->mad,
239                                  sizeof (struct ib_user_mad) + sizeof (struct ib_mad)))
240                         ret = -EFAULT;
241                 else
242                         ret = -ENOSPC;
243         } else if (copy_to_user(buf, &packet->mad,
244                                 packet->length + sizeof (struct ib_user_mad)))
245                 ret = -EFAULT;
246         else
247                 ret = packet->length + sizeof (struct ib_user_mad);
248         if (ret < 0) {
249                 /* Requeue packet */
250                 spin_lock_irq(&file->recv_lock);
251                 list_add(&packet->list, &file->recv_list);
252                 spin_unlock_irq(&file->recv_lock);
253         } else
254                 kfree(packet);
255         return ret;
256 }
257
258 static ssize_t ib_umad_write(struct file *filp, const char __user *buf,
259                              size_t count, loff_t *pos)
260 {
261         struct ib_umad_file *file = filp->private_data;
262         struct ib_umad_packet *packet;
263         struct ib_mad_agent *agent;
264         struct ib_ah_attr ah_attr;
265         struct ib_ah *ah;
266         struct ib_rmpp_mad *rmpp_mad;
267         u8 method;
268         __be64 *tid;
269         int ret, length, hdr_len, copy_offset;
270         int rmpp_active = 0;
271
272         if (count < sizeof (struct ib_user_mad))
273                 return -EINVAL;
274
275         length = count - sizeof (struct ib_user_mad);
276         packet = kmalloc(sizeof *packet + IB_MGMT_RMPP_HDR, GFP_KERNEL);
277         if (!packet)
278                 return -ENOMEM;
279
280         if (copy_from_user(&packet->mad, buf,
281                             sizeof (struct ib_user_mad) + IB_MGMT_RMPP_HDR)) {
282                 ret = -EFAULT;
283                 goto err;
284         }
285
286         if (packet->mad.hdr.id < 0 ||
287             packet->mad.hdr.id >= IB_UMAD_MAX_AGENTS) {
288                 ret = -EINVAL;
289                 goto err;
290         }
291
292         down_read(&file->agent_mutex);
293
294         agent = file->agent[packet->mad.hdr.id];
295         if (!agent) {
296                 ret = -EINVAL;
297                 goto err_up;
298         }
299
300         memset(&ah_attr, 0, sizeof ah_attr);
301         ah_attr.dlid          = be16_to_cpu(packet->mad.hdr.lid);
302         ah_attr.sl            = packet->mad.hdr.sl;
303         ah_attr.src_path_bits = packet->mad.hdr.path_bits;
304         ah_attr.port_num      = file->port->port_num;
305         if (packet->mad.hdr.grh_present) {
306                 ah_attr.ah_flags = IB_AH_GRH;
307                 memcpy(ah_attr.grh.dgid.raw, packet->mad.hdr.gid, 16);
308                 ah_attr.grh.flow_label     = be32_to_cpu(packet->mad.hdr.flow_label);
309                 ah_attr.grh.hop_limit      = packet->mad.hdr.hop_limit;
310                 ah_attr.grh.traffic_class  = packet->mad.hdr.traffic_class;
311         }
312
313         ah = ib_create_ah(agent->qp->pd, &ah_attr);
314         if (IS_ERR(ah)) {
315                 ret = PTR_ERR(ah);
316                 goto err_up;
317         }
318
319         rmpp_mad = (struct ib_rmpp_mad *) packet->mad.data;
320         if (ib_get_rmpp_flags(&rmpp_mad->rmpp_hdr) & IB_MGMT_RMPP_FLAG_ACTIVE) {
321                 /* RMPP active */
322                 if (!agent->rmpp_version) {
323                         ret = -EINVAL;
324                         goto err_ah;
325                 }
326
327                 /* Validate that the management class can support RMPP */
328                 if (rmpp_mad->mad_hdr.mgmt_class == IB_MGMT_CLASS_SUBN_ADM) {
329                         hdr_len = IB_MGMT_SA_HDR;
330                 } else if ((rmpp_mad->mad_hdr.mgmt_class >= IB_MGMT_CLASS_VENDOR_RANGE2_START) &&
331                             (rmpp_mad->mad_hdr.mgmt_class <= IB_MGMT_CLASS_VENDOR_RANGE2_END)) {
332                                 hdr_len = IB_MGMT_VENDOR_HDR;
333                 } else {
334                         ret = -EINVAL;
335                         goto err_ah;
336                 }
337                 rmpp_active = 1;
338                 copy_offset = IB_MGMT_RMPP_HDR;
339         } else {
340                 hdr_len = IB_MGMT_MAD_HDR;
341                 copy_offset = IB_MGMT_MAD_HDR;
342         }
343
344         packet->msg = ib_create_send_mad(agent,
345                                          be32_to_cpu(packet->mad.hdr.qpn),
346                                          0, rmpp_active,
347                                          hdr_len, length - hdr_len,
348                                          GFP_KERNEL);
349         if (IS_ERR(packet->msg)) {
350                 ret = PTR_ERR(packet->msg);
351                 goto err_ah;
352         }
353
354         packet->msg->ah         = ah;
355         packet->msg->timeout_ms = packet->mad.hdr.timeout_ms;
356         packet->msg->retries    = packet->mad.hdr.retries;
357         packet->msg->context[0] = packet;
358
359         /* Copy MAD headers (RMPP header in place) */
360         memcpy(packet->msg->mad, packet->mad.data, IB_MGMT_MAD_HDR);
361         /* Now, copy rest of message from user into send buffer */
362         if (copy_from_user(packet->msg->mad + copy_offset,
363                            buf + sizeof (struct ib_user_mad) + copy_offset,
364                            length - copy_offset)) {
365                 ret = -EFAULT;
366                 goto err_msg;
367         }
368
369         /*
370          * If userspace is generating a request that will generate a
371          * response, we need to make sure the high-order part of the
372          * transaction ID matches the agent being used to send the
373          * MAD.
374          */
375         method = ((struct ib_mad_hdr *) packet->msg->mad)->method;
376
377         if (!(method & IB_MGMT_METHOD_RESP)       &&
378             method != IB_MGMT_METHOD_TRAP_REPRESS &&
379             method != IB_MGMT_METHOD_SEND) {
380                 tid = &((struct ib_mad_hdr *) packet->msg->mad)->tid;
381                 *tid = cpu_to_be64(((u64) agent->hi_tid) << 32 |
382                                    (be64_to_cpup(tid) & 0xffffffff));
383         }
384
385         ret = ib_post_send_mad(packet->msg, NULL);
386         if (ret)
387                 goto err_msg;
388
389         up_read(&file->agent_mutex);
390
391         return count;
392
393 err_msg:
394         ib_free_send_mad(packet->msg);
395
396 err_ah:
397         ib_destroy_ah(ah);
398
399 err_up:
400         up_read(&file->agent_mutex);
401
402 err:
403         kfree(packet);
404         return ret;
405 }
406
407 static unsigned int ib_umad_poll(struct file *filp, struct poll_table_struct *wait)
408 {
409         struct ib_umad_file *file = filp->private_data;
410
411         /* we will always be able to post a MAD send */
412         unsigned int mask = POLLOUT | POLLWRNORM;
413
414         poll_wait(filp, &file->recv_wait, wait);
415
416         if (!list_empty(&file->recv_list))
417                 mask |= POLLIN | POLLRDNORM;
418
419         return mask;
420 }
421
422 static int ib_umad_reg_agent(struct ib_umad_file *file, unsigned long arg)
423 {
424         struct ib_user_mad_reg_req ureq;
425         struct ib_mad_reg_req req;
426         struct ib_mad_agent *agent;
427         int agent_id;
428         int ret;
429
430         down_write(&file->agent_mutex);
431
432         if (copy_from_user(&ureq, (void __user *) arg, sizeof ureq)) {
433                 ret = -EFAULT;
434                 goto out;
435         }
436
437         if (ureq.qpn != 0 && ureq.qpn != 1) {
438                 ret = -EINVAL;
439                 goto out;
440         }
441
442         for (agent_id = 0; agent_id < IB_UMAD_MAX_AGENTS; ++agent_id)
443                 if (!file->agent[agent_id])
444                         goto found;
445
446         ret = -ENOMEM;
447         goto out;
448
449 found:
450         if (ureq.mgmt_class) {
451                 req.mgmt_class         = ureq.mgmt_class;
452                 req.mgmt_class_version = ureq.mgmt_class_version;
453                 memcpy(req.method_mask, ureq.method_mask, sizeof req.method_mask);
454                 memcpy(req.oui,         ureq.oui,         sizeof req.oui);
455         }
456
457         agent = ib_register_mad_agent(file->port->ib_dev, file->port->port_num,
458                                       ureq.qpn ? IB_QPT_GSI : IB_QPT_SMI,
459                                       ureq.mgmt_class ? &req : NULL,
460                                       ureq.rmpp_version,
461                                       send_handler, recv_handler, file);
462         if (IS_ERR(agent)) {
463                 ret = PTR_ERR(agent);
464                 goto out;
465         }
466
467         file->agent[agent_id] = agent;
468
469         file->mr[agent_id] = ib_get_dma_mr(agent->qp->pd, IB_ACCESS_LOCAL_WRITE);
470         if (IS_ERR(file->mr[agent_id])) {
471                 ret = -ENOMEM;
472                 goto err;
473         }
474
475         if (put_user(agent_id,
476                      (u32 __user *) (arg + offsetof(struct ib_user_mad_reg_req, id)))) {
477                 ret = -EFAULT;
478                 goto err_mr;
479         }
480
481         ret = 0;
482         goto out;
483
484 err_mr:
485         ib_dereg_mr(file->mr[agent_id]);
486
487 err:
488         file->agent[agent_id] = NULL;
489         ib_unregister_mad_agent(agent);
490
491 out:
492         up_write(&file->agent_mutex);
493         return ret;
494 }
495
496 static int ib_umad_unreg_agent(struct ib_umad_file *file, unsigned long arg)
497 {
498         u32 id;
499         int ret = 0;
500
501         down_write(&file->agent_mutex);
502
503         if (get_user(id, (u32 __user *) arg)) {
504                 ret = -EFAULT;
505                 goto out;
506         }
507
508         if (id < 0 || id >= IB_UMAD_MAX_AGENTS || !file->agent[id]) {
509                 ret = -EINVAL;
510                 goto out;
511         }
512
513         ib_dereg_mr(file->mr[id]);
514         ib_unregister_mad_agent(file->agent[id]);
515         file->agent[id] = NULL;
516
517 out:
518         up_write(&file->agent_mutex);
519         return ret;
520 }
521
522 static long ib_umad_ioctl(struct file *filp, unsigned int cmd,
523                           unsigned long arg)
524 {
525         switch (cmd) {
526         case IB_USER_MAD_REGISTER_AGENT:
527                 return ib_umad_reg_agent(filp->private_data, arg);
528         case IB_USER_MAD_UNREGISTER_AGENT:
529                 return ib_umad_unreg_agent(filp->private_data, arg);
530         default:
531                 return -ENOIOCTLCMD;
532         }
533 }
534
535 static int ib_umad_open(struct inode *inode, struct file *filp)
536 {
537         struct ib_umad_port *port =
538                 container_of(inode->i_cdev, struct ib_umad_port, dev);
539         struct ib_umad_file *file;
540
541         file = kzalloc(sizeof *file, GFP_KERNEL);
542         if (!file)
543                 return -ENOMEM;
544
545         spin_lock_init(&file->recv_lock);
546         init_rwsem(&file->agent_mutex);
547         INIT_LIST_HEAD(&file->recv_list);
548         init_waitqueue_head(&file->recv_wait);
549
550         file->port = port;
551         filp->private_data = file;
552
553         return 0;
554 }
555
556 static int ib_umad_close(struct inode *inode, struct file *filp)
557 {
558         struct ib_umad_file *file = filp->private_data;
559         struct ib_umad_packet *packet, *tmp;
560         int i;
561
562         for (i = 0; i < IB_UMAD_MAX_AGENTS; ++i)
563                 if (file->agent[i]) {
564                         ib_dereg_mr(file->mr[i]);
565                         ib_unregister_mad_agent(file->agent[i]);
566                 }
567
568         list_for_each_entry_safe(packet, tmp, &file->recv_list, list)
569                 kfree(packet);
570
571         kfree(file);
572
573         return 0;
574 }
575
576 static struct file_operations umad_fops = {
577         .owner          = THIS_MODULE,
578         .read           = ib_umad_read,
579         .write          = ib_umad_write,
580         .poll           = ib_umad_poll,
581         .unlocked_ioctl = ib_umad_ioctl,
582         .compat_ioctl   = ib_umad_ioctl,
583         .open           = ib_umad_open,
584         .release        = ib_umad_close
585 };
586
587 static int ib_umad_sm_open(struct inode *inode, struct file *filp)
588 {
589         struct ib_umad_port *port =
590                 container_of(inode->i_cdev, struct ib_umad_port, sm_dev);
591         struct ib_port_modify props = {
592                 .set_port_cap_mask = IB_PORT_SM
593         };
594         int ret;
595
596         if (filp->f_flags & O_NONBLOCK) {
597                 if (down_trylock(&port->sm_sem))
598                         return -EAGAIN;
599         } else {
600                 if (down_interruptible(&port->sm_sem))
601                         return -ERESTARTSYS;
602         }
603
604         ret = ib_modify_port(port->ib_dev, port->port_num, 0, &props);
605         if (ret) {
606                 up(&port->sm_sem);
607                 return ret;
608         }
609
610         filp->private_data = port;
611
612         return 0;
613 }
614
615 static int ib_umad_sm_close(struct inode *inode, struct file *filp)
616 {
617         struct ib_umad_port *port = filp->private_data;
618         struct ib_port_modify props = {
619                 .clr_port_cap_mask = IB_PORT_SM
620         };
621         int ret;
622
623         ret = ib_modify_port(port->ib_dev, port->port_num, 0, &props);
624         up(&port->sm_sem);
625
626         return ret;
627 }
628
629 static struct file_operations umad_sm_fops = {
630         .owner   = THIS_MODULE,
631         .open    = ib_umad_sm_open,
632         .release = ib_umad_sm_close
633 };
634
635 static struct ib_client umad_client = {
636         .name   = "umad",
637         .add    = ib_umad_add_one,
638         .remove = ib_umad_remove_one
639 };
640
641 static ssize_t show_ibdev(struct class_device *class_dev, char *buf)
642 {
643         struct ib_umad_port *port = class_get_devdata(class_dev);
644
645         return sprintf(buf, "%s\n", port->ib_dev->name);
646 }
647 static CLASS_DEVICE_ATTR(ibdev, S_IRUGO, show_ibdev, NULL);
648
649 static ssize_t show_port(struct class_device *class_dev, char *buf)
650 {
651         struct ib_umad_port *port = class_get_devdata(class_dev);
652
653         return sprintf(buf, "%d\n", port->port_num);
654 }
655 static CLASS_DEVICE_ATTR(port, S_IRUGO, show_port, NULL);
656
657 static void ib_umad_release_dev(struct kref *ref)
658 {
659         struct ib_umad_device *dev =
660                 container_of(ref, struct ib_umad_device, ref);
661
662         kfree(dev);
663 }
664
665 static void ib_umad_release_port(struct class_device *class_dev)
666 {
667         struct ib_umad_port *port = class_get_devdata(class_dev);
668
669         if (class_dev == &port->class_dev) {
670                 cdev_del(&port->dev);
671                 clear_bit(port->devnum, dev_map);
672         } else {
673                 cdev_del(&port->sm_dev);
674                 clear_bit(port->sm_devnum, dev_map);
675         }
676
677         kref_put(&port->umad_dev->ref, ib_umad_release_dev);
678 }
679
680 static struct class umad_class = {
681         .name    = "infiniband_mad",
682         .release = ib_umad_release_port
683 };
684
685 static ssize_t show_abi_version(struct class *class, char *buf)
686 {
687         return sprintf(buf, "%d\n", IB_USER_MAD_ABI_VERSION);
688 }
689 static CLASS_ATTR(abi_version, S_IRUGO, show_abi_version, NULL);
690
691 static int ib_umad_init_port(struct ib_device *device, int port_num,
692                              struct ib_umad_port *port)
693 {
694         spin_lock(&map_lock);
695         port->devnum = find_first_zero_bit(dev_map, IB_UMAD_MAX_PORTS);
696         if (port->devnum >= IB_UMAD_MAX_PORTS) {
697                 spin_unlock(&map_lock);
698                 return -1;
699         }
700         port->sm_devnum = find_next_zero_bit(dev_map, IB_UMAD_MAX_PORTS * 2, IB_UMAD_MAX_PORTS);
701         if (port->sm_devnum >= IB_UMAD_MAX_PORTS * 2) {
702                 spin_unlock(&map_lock);
703                 return -1;
704         }
705         set_bit(port->devnum, dev_map);
706         set_bit(port->sm_devnum, dev_map);
707         spin_unlock(&map_lock);
708
709         port->ib_dev   = device;
710         port->port_num = port_num;
711         init_MUTEX(&port->sm_sem);
712
713         cdev_init(&port->dev, &umad_fops);
714         port->dev.owner = THIS_MODULE;
715         kobject_set_name(&port->dev.kobj, "umad%d", port->devnum);
716         if (cdev_add(&port->dev, base_dev + port->devnum, 1))
717                 return -1;
718
719         port->class_dev.class = &umad_class;
720         port->class_dev.dev   = device->dma_device;
721         port->class_dev.devt  = port->dev.dev;
722
723         snprintf(port->class_dev.class_id, BUS_ID_SIZE, "umad%d", port->devnum);
724
725         if (class_device_register(&port->class_dev))
726                 goto err_cdev;
727
728         class_set_devdata(&port->class_dev, port);
729         kref_get(&port->umad_dev->ref);
730
731         if (class_device_create_file(&port->class_dev, &class_device_attr_ibdev))
732                 goto err_class;
733         if (class_device_create_file(&port->class_dev, &class_device_attr_port))
734                 goto err_class;
735
736         cdev_init(&port->sm_dev, &umad_sm_fops);
737         port->sm_dev.owner = THIS_MODULE;
738         kobject_set_name(&port->dev.kobj, "issm%d", port->sm_devnum - IB_UMAD_MAX_PORTS);
739         if (cdev_add(&port->sm_dev, base_dev + port->sm_devnum, 1))
740                 return -1;
741
742         port->sm_class_dev.class = &umad_class;
743         port->sm_class_dev.dev   = device->dma_device;
744         port->sm_class_dev.devt  = port->sm_dev.dev;
745
746         snprintf(port->sm_class_dev.class_id, BUS_ID_SIZE, "issm%d", port->sm_devnum - IB_UMAD_MAX_PORTS);
747
748         if (class_device_register(&port->sm_class_dev))
749                 goto err_sm_cdev;
750
751         class_set_devdata(&port->sm_class_dev, port);
752         kref_get(&port->umad_dev->ref);
753
754         if (class_device_create_file(&port->sm_class_dev, &class_device_attr_ibdev))
755                 goto err_sm_class;
756         if (class_device_create_file(&port->sm_class_dev, &class_device_attr_port))
757                 goto err_sm_class;
758
759         return 0;
760
761 err_sm_class:
762         class_device_unregister(&port->sm_class_dev);
763
764 err_sm_cdev:
765         cdev_del(&port->sm_dev);
766
767 err_class:
768         class_device_unregister(&port->class_dev);
769
770 err_cdev:
771         cdev_del(&port->dev);
772         clear_bit(port->devnum, dev_map);
773
774         return -1;
775 }
776
777 static void ib_umad_add_one(struct ib_device *device)
778 {
779         struct ib_umad_device *umad_dev;
780         int s, e, i;
781
782         if (device->node_type == IB_NODE_SWITCH)
783                 s = e = 0;
784         else {
785                 s = 1;
786                 e = device->phys_port_cnt;
787         }
788
789         umad_dev = kzalloc(sizeof *umad_dev +
790                            (e - s + 1) * sizeof (struct ib_umad_port),
791                            GFP_KERNEL);
792         if (!umad_dev)
793                 return;
794
795         kref_init(&umad_dev->ref);
796
797         umad_dev->start_port = s;
798         umad_dev->end_port   = e;
799
800         for (i = s; i <= e; ++i) {
801                 umad_dev->port[i - s].umad_dev = umad_dev;
802
803                 if (ib_umad_init_port(device, i, &umad_dev->port[i - s]))
804                         goto err;
805         }
806
807         ib_set_client_data(device, &umad_client, umad_dev);
808
809         return;
810
811 err:
812         while (--i >= s) {
813                 class_device_unregister(&umad_dev->port[i - s].class_dev);
814                 class_device_unregister(&umad_dev->port[i - s].sm_class_dev);
815         }
816
817         kref_put(&umad_dev->ref, ib_umad_release_dev);
818 }
819
820 static void ib_umad_remove_one(struct ib_device *device)
821 {
822         struct ib_umad_device *umad_dev = ib_get_client_data(device, &umad_client);
823         int i;
824
825         if (!umad_dev)
826                 return;
827
828         for (i = 0; i <= umad_dev->end_port - umad_dev->start_port; ++i) {
829                 class_device_unregister(&umad_dev->port[i].class_dev);
830                 class_device_unregister(&umad_dev->port[i].sm_class_dev);
831         }
832
833         kref_put(&umad_dev->ref, ib_umad_release_dev);
834 }
835
836 static int __init ib_umad_init(void)
837 {
838         int ret;
839
840         spin_lock_init(&map_lock);
841
842         ret = register_chrdev_region(base_dev, IB_UMAD_MAX_PORTS * 2,
843                                      "infiniband_mad");
844         if (ret) {
845                 printk(KERN_ERR "user_mad: couldn't register device number\n");
846                 goto out;
847         }
848
849         ret = class_register(&umad_class);
850         if (ret) {
851                 printk(KERN_ERR "user_mad: couldn't create class infiniband_mad\n");
852                 goto out_chrdev;
853         }
854
855         ret = class_create_file(&umad_class, &class_attr_abi_version);
856         if (ret) {
857                 printk(KERN_ERR "user_mad: couldn't create abi_version attribute\n");
858                 goto out_class;
859         }
860
861         ret = ib_register_client(&umad_client);
862         if (ret) {
863                 printk(KERN_ERR "user_mad: couldn't register ib_umad client\n");
864                 goto out_class;
865         }
866
867         return 0;
868
869 out_class:
870         class_unregister(&umad_class);
871
872 out_chrdev:
873         unregister_chrdev_region(base_dev, IB_UMAD_MAX_PORTS * 2);
874
875 out:
876         return ret;
877 }
878
879 static void __exit ib_umad_cleanup(void)
880 {
881         ib_unregister_client(&umad_client);
882         class_unregister(&umad_class);
883         unregister_chrdev_region(base_dev, IB_UMAD_MAX_PORTS * 2);
884 }
885
886 module_init(ib_umad_init);
887 module_exit(ib_umad_cleanup);