]> err.no Git - linux-2.6/blob - drivers/infiniband/hw/ipath/ipath_mad.c
[PATCH] IB/ipath: update copyrights and other strings to reflect new company name
[linux-2.6] / drivers / infiniband / hw / ipath / ipath_mad.c
1 /*
2  * Copyright (c) 2006 QLogic, Inc. All rights reserved.
3  * Copyright (c) 2005, 2006 PathScale, Inc. All rights reserved.
4  *
5  * This software is available to you under a choice of one of two
6  * licenses.  You may choose to be licensed under the terms of the GNU
7  * General Public License (GPL) Version 2, available from the file
8  * COPYING in the main directory of this source tree, or the
9  * OpenIB.org BSD license below:
10  *
11  *     Redistribution and use in source and binary forms, with or
12  *     without modification, are permitted provided that the following
13  *     conditions are met:
14  *
15  *      - Redistributions of source code must retain the above
16  *        copyright notice, this list of conditions and the following
17  *        disclaimer.
18  *
19  *      - Redistributions in binary form must reproduce the above
20  *        copyright notice, this list of conditions and the following
21  *        disclaimer in the documentation and/or other materials
22  *        provided with the distribution.
23  *
24  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
25  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
26  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
27  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
28  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
29  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
30  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
31  * SOFTWARE.
32  */
33
34 #include <rdma/ib_smi.h>
35
36 #include "ipath_kernel.h"
37 #include "ipath_verbs.h"
38 #include "ips_common.h"
39
40 #define IB_SMP_UNSUP_VERSION    __constant_htons(0x0004)
41 #define IB_SMP_UNSUP_METHOD     __constant_htons(0x0008)
42 #define IB_SMP_UNSUP_METH_ATTR  __constant_htons(0x000C)
43 #define IB_SMP_INVALID_FIELD    __constant_htons(0x001C)
44
45 static int reply(struct ib_smp *smp)
46 {
47         /*
48          * The verbs framework will handle the directed/LID route
49          * packet changes.
50          */
51         smp->method = IB_MGMT_METHOD_GET_RESP;
52         if (smp->mgmt_class == IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE)
53                 smp->status |= IB_SMP_DIRECTION;
54         return IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_REPLY;
55 }
56
57 static int recv_subn_get_nodedescription(struct ib_smp *smp,
58                                          struct ib_device *ibdev)
59 {
60         if (smp->attr_mod)
61                 smp->status |= IB_SMP_INVALID_FIELD;
62
63         strncpy(smp->data, ibdev->node_desc, sizeof(smp->data));
64
65         return reply(smp);
66 }
67
68 struct nodeinfo {
69         u8 base_version;
70         u8 class_version;
71         u8 node_type;
72         u8 num_ports;
73         __be64 sys_guid;
74         __be64 node_guid;
75         __be64 port_guid;
76         __be16 partition_cap;
77         __be16 device_id;
78         __be32 revision;
79         u8 local_port_num;
80         u8 vendor_id[3];
81 } __attribute__ ((packed));
82
83 static int recv_subn_get_nodeinfo(struct ib_smp *smp,
84                                   struct ib_device *ibdev, u8 port)
85 {
86         struct nodeinfo *nip = (struct nodeinfo *)&smp->data;
87         struct ipath_devdata *dd = to_idev(ibdev)->dd;
88         u32 vendor, boardid, majrev, minrev;
89
90         if (smp->attr_mod)
91                 smp->status |= IB_SMP_INVALID_FIELD;
92
93         nip->base_version = 1;
94         nip->class_version = 1;
95         nip->node_type = 1;     /* channel adapter */
96         /*
97          * XXX The num_ports value will need a layer function to get
98          * the value if we ever have more than one IB port on a chip.
99          * We will also need to get the GUID for the port.
100          */
101         nip->num_ports = ibdev->phys_port_cnt;
102         /* This is already in network order */
103         nip->sys_guid = to_idev(ibdev)->sys_image_guid;
104         nip->node_guid = ipath_layer_get_guid(dd);
105         nip->port_guid = nip->sys_guid;
106         nip->partition_cap = cpu_to_be16(ipath_layer_get_npkeys(dd));
107         nip->device_id = cpu_to_be16(ipath_layer_get_deviceid(dd));
108         ipath_layer_query_device(dd, &vendor, &boardid, &majrev, &minrev);
109         nip->revision = cpu_to_be32((majrev << 16) | minrev);
110         nip->local_port_num = port;
111         nip->vendor_id[0] = 0;
112         nip->vendor_id[1] = vendor >> 8;
113         nip->vendor_id[2] = vendor;
114
115         return reply(smp);
116 }
117
118 static int recv_subn_get_guidinfo(struct ib_smp *smp,
119                                   struct ib_device *ibdev)
120 {
121         u32 startgx = 8 * be32_to_cpu(smp->attr_mod);
122         __be64 *p = (__be64 *) smp->data;
123
124         /* 32 blocks of 8 64-bit GUIDs per block */
125
126         memset(smp->data, 0, sizeof(smp->data));
127
128         /*
129          * We only support one GUID for now.  If this changes, the
130          * portinfo.guid_cap field needs to be updated too.
131          */
132         if (startgx == 0)
133                 /* The first is a copy of the read-only HW GUID. */
134                 *p = ipath_layer_get_guid(to_idev(ibdev)->dd);
135         else
136                 smp->status |= IB_SMP_INVALID_FIELD;
137
138         return reply(smp);
139 }
140
141 static int recv_subn_get_portinfo(struct ib_smp *smp,
142                                   struct ib_device *ibdev, u8 port)
143 {
144         struct ipath_ibdev *dev;
145         struct ib_port_info *pip = (struct ib_port_info *)smp->data;
146         u16 lid;
147         u8 ibcstat;
148         u8 mtu;
149         int ret;
150
151         if (be32_to_cpu(smp->attr_mod) > ibdev->phys_port_cnt) {
152                 smp->status |= IB_SMP_INVALID_FIELD;
153                 ret = reply(smp);
154                 goto bail;
155         }
156
157         dev = to_idev(ibdev);
158
159         /* Clear all fields.  Only set the non-zero fields. */
160         memset(smp->data, 0, sizeof(smp->data));
161
162         /* Only return the mkey if the protection field allows it. */
163         if (smp->method == IB_MGMT_METHOD_SET || dev->mkey == smp->mkey ||
164             (dev->mkeyprot_resv_lmc >> 6) == 0)
165                 pip->mkey = dev->mkey;
166         pip->gid_prefix = dev->gid_prefix;
167         lid = ipath_layer_get_lid(dev->dd);
168         pip->lid = lid ? cpu_to_be16(lid) : IB_LID_PERMISSIVE;
169         pip->sm_lid = cpu_to_be16(dev->sm_lid);
170         pip->cap_mask = cpu_to_be32(dev->port_cap_flags);
171         /* pip->diag_code; */
172         pip->mkey_lease_period = cpu_to_be16(dev->mkey_lease_period);
173         pip->local_port_num = port;
174         pip->link_width_enabled = dev->link_width_enabled;
175         pip->link_width_supported = 3;  /* 1x or 4x */
176         pip->link_width_active = 2;     /* 4x */
177         pip->linkspeed_portstate = 0x10;        /* 2.5Gbps */
178         ibcstat = ipath_layer_get_lastibcstat(dev->dd);
179         pip->linkspeed_portstate |= ((ibcstat >> 4) & 0x3) + 1;
180         pip->portphysstate_linkdown =
181                 (ipath_cvt_physportstate[ibcstat & 0xf] << 4) |
182                 (ipath_layer_get_linkdowndefaultstate(dev->dd) ? 1 : 2);
183         pip->mkeyprot_resv_lmc = dev->mkeyprot_resv_lmc;
184         pip->linkspeedactive_enabled = 0x11;    /* 2.5Gbps, 2.5Gbps */
185         switch (ipath_layer_get_ibmtu(dev->dd)) {
186         case 4096:
187                 mtu = IB_MTU_4096;
188                 break;
189         case 2048:
190                 mtu = IB_MTU_2048;
191                 break;
192         case 1024:
193                 mtu = IB_MTU_1024;
194                 break;
195         case 512:
196                 mtu = IB_MTU_512;
197                 break;
198         case 256:
199                 mtu = IB_MTU_256;
200                 break;
201         default:                /* oops, something is wrong */
202                 mtu = IB_MTU_2048;
203                 break;
204         }
205         pip->neighbormtu_mastersmsl = (mtu << 4) | dev->sm_sl;
206         pip->vlcap_inittype = 0x10;     /* VLCap = VL0, InitType = 0 */
207         pip->vl_high_limit = dev->vl_high_limit;
208         /* pip->vl_arb_high_cap; // only one VL */
209         /* pip->vl_arb_low_cap; // only one VL */
210         /* InitTypeReply = 0 */
211         pip->inittypereply_mtucap = IB_MTU_4096;
212         // HCAs ignore VLStallCount and HOQLife
213         /* pip->vlstallcnt_hoqlife; */
214         pip->operationalvl_pei_peo_fpi_fpo = 0x10;      /* OVLs = 1 */
215         pip->mkey_violations = cpu_to_be16(dev->mkey_violations);
216         /* P_KeyViolations are counted by hardware. */
217         pip->pkey_violations =
218                 cpu_to_be16((ipath_layer_get_cr_errpkey(dev->dd) -
219                              dev->z_pkey_violations) & 0xFFFF);
220         pip->qkey_violations = cpu_to_be16(dev->qkey_violations);
221         /* Only the hardware GUID is supported for now */
222         pip->guid_cap = 1;
223         pip->clientrereg_resv_subnetto = dev->subnet_timeout;
224         /* 32.768 usec. response time (guessing) */
225         pip->resv_resptimevalue = 3;
226         pip->localphyerrors_overrunerrors =
227                 (ipath_layer_get_phyerrthreshold(dev->dd) << 4) |
228                 ipath_layer_get_overrunthreshold(dev->dd);
229         /* pip->max_credit_hint; */
230         /* pip->link_roundtrip_latency[3]; */
231
232         ret = reply(smp);
233
234 bail:
235         return ret;
236 }
237
238 static int recv_subn_get_pkeytable(struct ib_smp *smp,
239                                    struct ib_device *ibdev)
240 {
241         u32 startpx = 32 * (be32_to_cpu(smp->attr_mod) & 0xffff);
242         u16 *p = (u16 *) smp->data;
243         __be16 *q = (__be16 *) smp->data;
244
245         /* 64 blocks of 32 16-bit P_Key entries */
246
247         memset(smp->data, 0, sizeof(smp->data));
248         if (startpx == 0) {
249                 struct ipath_ibdev *dev = to_idev(ibdev);
250                 unsigned i, n = ipath_layer_get_npkeys(dev->dd);
251
252                 ipath_layer_get_pkeys(dev->dd, p);
253
254                 for (i = 0; i < n; i++)
255                         q[i] = cpu_to_be16(p[i]);
256         } else
257                 smp->status |= IB_SMP_INVALID_FIELD;
258
259         return reply(smp);
260 }
261
262 static int recv_subn_set_guidinfo(struct ib_smp *smp,
263                                   struct ib_device *ibdev)
264 {
265         /* The only GUID we support is the first read-only entry. */
266         return recv_subn_get_guidinfo(smp, ibdev);
267 }
268
269 /**
270  * recv_subn_set_portinfo - set port information
271  * @smp: the incoming SM packet
272  * @ibdev: the infiniband device
273  * @port: the port on the device
274  *
275  * Set Portinfo (see ch. 14.2.5.6).
276  */
277 static int recv_subn_set_portinfo(struct ib_smp *smp,
278                                   struct ib_device *ibdev, u8 port)
279 {
280         struct ib_port_info *pip = (struct ib_port_info *)smp->data;
281         struct ib_event event;
282         struct ipath_ibdev *dev;
283         u32 flags;
284         char clientrereg = 0;
285         u16 lid, smlid;
286         u8 lwe;
287         u8 lse;
288         u8 state;
289         u16 lstate;
290         u32 mtu;
291         int ret;
292
293         if (be32_to_cpu(smp->attr_mod) > ibdev->phys_port_cnt)
294                 goto err;
295
296         dev = to_idev(ibdev);
297         event.device = ibdev;
298         event.element.port_num = port;
299
300         dev->mkey = pip->mkey;
301         dev->gid_prefix = pip->gid_prefix;
302         dev->mkey_lease_period = be16_to_cpu(pip->mkey_lease_period);
303
304         lid = be16_to_cpu(pip->lid);
305         if (lid != ipath_layer_get_lid(dev->dd)) {
306                 /* Must be a valid unicast LID address. */
307                 if (lid == 0 || lid >= IPS_MULTICAST_LID_BASE)
308                         goto err;
309                 ipath_set_sps_lid(dev->dd, lid, pip->mkeyprot_resv_lmc & 7);
310                 event.event = IB_EVENT_LID_CHANGE;
311                 ib_dispatch_event(&event);
312         }
313
314         smlid = be16_to_cpu(pip->sm_lid);
315         if (smlid != dev->sm_lid) {
316                 /* Must be a valid unicast LID address. */
317                 if (smlid == 0 || smlid >= IPS_MULTICAST_LID_BASE)
318                         goto err;
319                 dev->sm_lid = smlid;
320                 event.event = IB_EVENT_SM_CHANGE;
321                 ib_dispatch_event(&event);
322         }
323
324         /* Only 4x supported but allow 1x or 4x to be set (see 14.2.6.6). */
325         lwe = pip->link_width_enabled;
326         if ((lwe >= 4 && lwe <= 8) || (lwe >= 0xC && lwe <= 0xFE))
327                 goto err;
328         if (lwe == 0xFF)
329                 dev->link_width_enabled = 3;    /* 1x or 4x */
330         else if (lwe)
331                 dev->link_width_enabled = lwe;
332
333         /* Only 2.5 Gbs supported. */
334         lse = pip->linkspeedactive_enabled & 0xF;
335         if (lse >= 2 && lse <= 0xE)
336                 goto err;
337
338         /* Set link down default state. */
339         switch (pip->portphysstate_linkdown & 0xF) {
340         case 0: /* NOP */
341                 break;
342         case 1: /* SLEEP */
343                 if (ipath_layer_set_linkdowndefaultstate(dev->dd, 1))
344                         goto err;
345                 break;
346         case 2: /* POLL */
347                 if (ipath_layer_set_linkdowndefaultstate(dev->dd, 0))
348                         goto err;
349                 break;
350         default:
351                 goto err;
352         }
353
354         dev->mkeyprot_resv_lmc = pip->mkeyprot_resv_lmc;
355         dev->vl_high_limit = pip->vl_high_limit;
356
357         switch ((pip->neighbormtu_mastersmsl >> 4) & 0xF) {
358         case IB_MTU_256:
359                 mtu = 256;
360                 break;
361         case IB_MTU_512:
362                 mtu = 512;
363                 break;
364         case IB_MTU_1024:
365                 mtu = 1024;
366                 break;
367         case IB_MTU_2048:
368                 mtu = 2048;
369                 break;
370         case IB_MTU_4096:
371                 mtu = 4096;
372                 break;
373         default:
374                 /* XXX We have already partially updated our state! */
375                 goto err;
376         }
377         ipath_layer_set_mtu(dev->dd, mtu);
378
379         dev->sm_sl = pip->neighbormtu_mastersmsl & 0xF;
380
381         /* We only support VL0 */
382         if (((pip->operationalvl_pei_peo_fpi_fpo >> 4) & 0xF) > 1)
383                 goto err;
384
385         if (pip->mkey_violations == 0)
386                 dev->mkey_violations = 0;
387
388         /*
389          * Hardware counter can't be reset so snapshot and subtract
390          * later.
391          */
392         if (pip->pkey_violations == 0)
393                 dev->z_pkey_violations =
394                         ipath_layer_get_cr_errpkey(dev->dd);
395
396         if (pip->qkey_violations == 0)
397                 dev->qkey_violations = 0;
398
399         if (ipath_layer_set_phyerrthreshold(
400                     dev->dd,
401                     (pip->localphyerrors_overrunerrors >> 4) & 0xF))
402                 goto err;
403
404         if (ipath_layer_set_overrunthreshold(
405                     dev->dd,
406                     (pip->localphyerrors_overrunerrors & 0xF)))
407                 goto err;
408
409         dev->subnet_timeout = pip->clientrereg_resv_subnetto & 0x1F;
410
411         if (pip->clientrereg_resv_subnetto & 0x80) {
412                 clientrereg = 1;
413                 event.event = IB_EVENT_CLIENT_REREGISTER;
414                 ib_dispatch_event(&event);
415         }
416
417         /*
418          * Do the port state change now that the other link parameters
419          * have been set.
420          * Changing the port physical state only makes sense if the link
421          * is down or is being set to down.
422          */
423         state = pip->linkspeed_portstate & 0xF;
424         flags = ipath_layer_get_flags(dev->dd);
425         lstate = (pip->portphysstate_linkdown >> 4) & 0xF;
426         if (lstate && !(state == IB_PORT_DOWN || state == IB_PORT_NOP))
427                 goto err;
428
429         /*
430          * Only state changes of DOWN, ARM, and ACTIVE are valid
431          * and must be in the correct state to take effect (see 7.2.6).
432          */
433         switch (state) {
434         case IB_PORT_NOP:
435                 if (lstate == 0)
436                         break;
437                 /* FALLTHROUGH */
438         case IB_PORT_DOWN:
439                 if (lstate == 0)
440                         if (ipath_layer_get_linkdowndefaultstate(dev->dd))
441                                 lstate = IPATH_IB_LINKDOWN_SLEEP;
442                         else
443                                 lstate = IPATH_IB_LINKDOWN;
444                 else if (lstate == 1)
445                         lstate = IPATH_IB_LINKDOWN_SLEEP;
446                 else if (lstate == 2)
447                         lstate = IPATH_IB_LINKDOWN;
448                 else if (lstate == 3)
449                         lstate = IPATH_IB_LINKDOWN_DISABLE;
450                 else
451                         goto err;
452                 ipath_layer_set_linkstate(dev->dd, lstate);
453                 if (flags & IPATH_LINKACTIVE) {
454                         event.event = IB_EVENT_PORT_ERR;
455                         ib_dispatch_event(&event);
456                 }
457                 break;
458         case IB_PORT_ARMED:
459                 if (!(flags & (IPATH_LINKINIT | IPATH_LINKACTIVE)))
460                         break;
461                 ipath_layer_set_linkstate(dev->dd, IPATH_IB_LINKARM);
462                 if (flags & IPATH_LINKACTIVE) {
463                         event.event = IB_EVENT_PORT_ERR;
464                         ib_dispatch_event(&event);
465                 }
466                 break;
467         case IB_PORT_ACTIVE:
468                 if (!(flags & IPATH_LINKARMED))
469                         break;
470                 ipath_layer_set_linkstate(dev->dd, IPATH_IB_LINKACTIVE);
471                 event.event = IB_EVENT_PORT_ACTIVE;
472                 ib_dispatch_event(&event);
473                 break;
474         default:
475                 /* XXX We have already partially updated our state! */
476                 goto err;
477         }
478
479         ret = recv_subn_get_portinfo(smp, ibdev, port);
480
481         if (clientrereg)
482                 pip->clientrereg_resv_subnetto |= 0x80;
483
484         goto done;
485
486 err:
487         smp->status |= IB_SMP_INVALID_FIELD;
488         ret = recv_subn_get_portinfo(smp, ibdev, port);
489
490 done:
491         return ret;
492 }
493
494 static int recv_subn_set_pkeytable(struct ib_smp *smp,
495                                    struct ib_device *ibdev)
496 {
497         u32 startpx = 32 * (be32_to_cpu(smp->attr_mod) & 0xffff);
498         __be16 *p = (__be16 *) smp->data;
499         u16 *q = (u16 *) smp->data;
500         struct ipath_ibdev *dev = to_idev(ibdev);
501         unsigned i, n = ipath_layer_get_npkeys(dev->dd);
502
503         for (i = 0; i < n; i++)
504                 q[i] = be16_to_cpu(p[i]);
505
506         if (startpx != 0 ||
507             ipath_layer_set_pkeys(dev->dd, q) != 0)
508                 smp->status |= IB_SMP_INVALID_FIELD;
509
510         return recv_subn_get_pkeytable(smp, ibdev);
511 }
512
513 #define IB_PMA_CLASS_PORT_INFO          __constant_htons(0x0001)
514 #define IB_PMA_PORT_SAMPLES_CONTROL     __constant_htons(0x0010)
515 #define IB_PMA_PORT_SAMPLES_RESULT      __constant_htons(0x0011)
516 #define IB_PMA_PORT_COUNTERS            __constant_htons(0x0012)
517 #define IB_PMA_PORT_COUNTERS_EXT        __constant_htons(0x001D)
518 #define IB_PMA_PORT_SAMPLES_RESULT_EXT  __constant_htons(0x001E)
519
520 struct ib_perf {
521         u8 base_version;
522         u8 mgmt_class;
523         u8 class_version;
524         u8 method;
525         __be16 status;
526         __be16 unused;
527         __be64 tid;
528         __be16 attr_id;
529         __be16 resv;
530         __be32 attr_mod;
531         u8 reserved[40];
532         u8 data[192];
533 } __attribute__ ((packed));
534
535 struct ib_pma_classportinfo {
536         u8 base_version;
537         u8 class_version;
538         __be16 cap_mask;
539         u8 reserved[3];
540         u8 resp_time_value;     /* only lower 5 bits */
541         union ib_gid redirect_gid;
542         __be32 redirect_tc_sl_fl;       /* 8, 4, 20 bits respectively */
543         __be16 redirect_lid;
544         __be16 redirect_pkey;
545         __be32 redirect_qp;     /* only lower 24 bits */
546         __be32 redirect_qkey;
547         union ib_gid trap_gid;
548         __be32 trap_tc_sl_fl;   /* 8, 4, 20 bits respectively */
549         __be16 trap_lid;
550         __be16 trap_pkey;
551         __be32 trap_hl_qp;      /* 8, 24 bits respectively */
552         __be32 trap_qkey;
553 } __attribute__ ((packed));
554
555 struct ib_pma_portsamplescontrol {
556         u8 opcode;
557         u8 port_select;
558         u8 tick;
559         u8 counter_width;       /* only lower 3 bits */
560         __be32 counter_mask0_9; /* 2, 10 * 3, bits */
561         __be16 counter_mask10_14;       /* 1, 5 * 3, bits */
562         u8 sample_mechanisms;
563         u8 sample_status;       /* only lower 2 bits */
564         __be64 option_mask;
565         __be64 vendor_mask;
566         __be32 sample_start;
567         __be32 sample_interval;
568         __be16 tag;
569         __be16 counter_select[15];
570 } __attribute__ ((packed));
571
572 struct ib_pma_portsamplesresult {
573         __be16 tag;
574         __be16 sample_status;   /* only lower 2 bits */
575         __be32 counter[15];
576 } __attribute__ ((packed));
577
578 struct ib_pma_portsamplesresult_ext {
579         __be16 tag;
580         __be16 sample_status;   /* only lower 2 bits */
581         __be32 extended_width;  /* only upper 2 bits */
582         __be64 counter[15];
583 } __attribute__ ((packed));
584
585 struct ib_pma_portcounters {
586         u8 reserved;
587         u8 port_select;
588         __be16 counter_select;
589         __be16 symbol_error_counter;
590         u8 link_error_recovery_counter;
591         u8 link_downed_counter;
592         __be16 port_rcv_errors;
593         __be16 port_rcv_remphys_errors;
594         __be16 port_rcv_switch_relay_errors;
595         __be16 port_xmit_discards;
596         u8 port_xmit_constraint_errors;
597         u8 port_rcv_constraint_errors;
598         u8 reserved1;
599         u8 lli_ebor_errors;     /* 4, 4, bits */
600         __be16 reserved2;
601         __be16 vl15_dropped;
602         __be32 port_xmit_data;
603         __be32 port_rcv_data;
604         __be32 port_xmit_packets;
605         __be32 port_rcv_packets;
606 } __attribute__ ((packed));
607
608 #define IB_PMA_SEL_SYMBOL_ERROR                 __constant_htons(0x0001)
609 #define IB_PMA_SEL_LINK_ERROR_RECOVERY          __constant_htons(0x0002)
610 #define IB_PMA_SEL_LINK_DOWNED                  __constant_htons(0x0004)
611 #define IB_PMA_SEL_PORT_RCV_ERRORS              __constant_htons(0x0008)
612 #define IB_PMA_SEL_PORT_RCV_REMPHYS_ERRORS      __constant_htons(0x0010)
613 #define IB_PMA_SEL_PORT_XMIT_DISCARDS           __constant_htons(0x0040)
614 #define IB_PMA_SEL_PORT_XMIT_DATA               __constant_htons(0x1000)
615 #define IB_PMA_SEL_PORT_RCV_DATA                __constant_htons(0x2000)
616 #define IB_PMA_SEL_PORT_XMIT_PACKETS            __constant_htons(0x4000)
617 #define IB_PMA_SEL_PORT_RCV_PACKETS             __constant_htons(0x8000)
618
619 struct ib_pma_portcounters_ext {
620         u8 reserved;
621         u8 port_select;
622         __be16 counter_select;
623         __be32 reserved1;
624         __be64 port_xmit_data;
625         __be64 port_rcv_data;
626         __be64 port_xmit_packets;
627         __be64 port_rcv_packets;
628         __be64 port_unicast_xmit_packets;
629         __be64 port_unicast_rcv_packets;
630         __be64 port_multicast_xmit_packets;
631         __be64 port_multicast_rcv_packets;
632 } __attribute__ ((packed));
633
634 #define IB_PMA_SELX_PORT_XMIT_DATA              __constant_htons(0x0001)
635 #define IB_PMA_SELX_PORT_RCV_DATA               __constant_htons(0x0002)
636 #define IB_PMA_SELX_PORT_XMIT_PACKETS           __constant_htons(0x0004)
637 #define IB_PMA_SELX_PORT_RCV_PACKETS            __constant_htons(0x0008)
638 #define IB_PMA_SELX_PORT_UNI_XMIT_PACKETS       __constant_htons(0x0010)
639 #define IB_PMA_SELX_PORT_UNI_RCV_PACKETS        __constant_htons(0x0020)
640 #define IB_PMA_SELX_PORT_MULTI_XMIT_PACKETS     __constant_htons(0x0040)
641 #define IB_PMA_SELX_PORT_MULTI_RCV_PACKETS      __constant_htons(0x0080)
642
643 static int recv_pma_get_classportinfo(struct ib_perf *pmp)
644 {
645         struct ib_pma_classportinfo *p =
646                 (struct ib_pma_classportinfo *)pmp->data;
647
648         memset(pmp->data, 0, sizeof(pmp->data));
649
650         if (pmp->attr_mod != 0)
651                 pmp->status |= IB_SMP_INVALID_FIELD;
652
653         /* Indicate AllPortSelect is valid (only one port anyway) */
654         p->cap_mask = __constant_cpu_to_be16(1 << 8);
655         p->base_version = 1;
656         p->class_version = 1;
657         /*
658          * Expected response time is 4.096 usec. * 2^18 == 1.073741824
659          * sec.
660          */
661         p->resp_time_value = 18;
662
663         return reply((struct ib_smp *) pmp);
664 }
665
666 /*
667  * The PortSamplesControl.CounterMasks field is an array of 3 bit fields
668  * which specify the N'th counter's capabilities. See ch. 16.1.3.2.
669  * We support 5 counters which only count the mandatory quantities.
670  */
671 #define COUNTER_MASK(q, n) (q << ((9 - n) * 3))
672 #define COUNTER_MASK0_9 \
673         __constant_cpu_to_be32(COUNTER_MASK(1, 0) | \
674                                COUNTER_MASK(1, 1) | \
675                                COUNTER_MASK(1, 2) | \
676                                COUNTER_MASK(1, 3) | \
677                                COUNTER_MASK(1, 4))
678
679 static int recv_pma_get_portsamplescontrol(struct ib_perf *pmp,
680                                            struct ib_device *ibdev, u8 port)
681 {
682         struct ib_pma_portsamplescontrol *p =
683                 (struct ib_pma_portsamplescontrol *)pmp->data;
684         struct ipath_ibdev *dev = to_idev(ibdev);
685         unsigned long flags;
686         u8 port_select = p->port_select;
687
688         memset(pmp->data, 0, sizeof(pmp->data));
689
690         p->port_select = port_select;
691         if (pmp->attr_mod != 0 ||
692             (port_select != port && port_select != 0xFF))
693                 pmp->status |= IB_SMP_INVALID_FIELD;
694         /*
695          * Ticks are 10x the link transfer period which for 2.5Gbs is 4
696          * nsec.  0 == 4 nsec., 1 == 8 nsec., ..., 255 == 1020 nsec.  Sample
697          * intervals are counted in ticks.  Since we use Linux timers, that
698          * count in jiffies, we can't sample for less than 1000 ticks if HZ
699          * == 1000 (4000 ticks if HZ is 250).
700          */
701         /* XXX This is WRONG. */
702         p->tick = 250;          /* 1 usec. */
703         p->counter_width = 4;   /* 32 bit counters */
704         p->counter_mask0_9 = COUNTER_MASK0_9;
705         spin_lock_irqsave(&dev->pending_lock, flags);
706         p->sample_status = dev->pma_sample_status;
707         p->sample_start = cpu_to_be32(dev->pma_sample_start);
708         p->sample_interval = cpu_to_be32(dev->pma_sample_interval);
709         p->tag = cpu_to_be16(dev->pma_tag);
710         p->counter_select[0] = dev->pma_counter_select[0];
711         p->counter_select[1] = dev->pma_counter_select[1];
712         p->counter_select[2] = dev->pma_counter_select[2];
713         p->counter_select[3] = dev->pma_counter_select[3];
714         p->counter_select[4] = dev->pma_counter_select[4];
715         spin_unlock_irqrestore(&dev->pending_lock, flags);
716
717         return reply((struct ib_smp *) pmp);
718 }
719
720 static int recv_pma_set_portsamplescontrol(struct ib_perf *pmp,
721                                            struct ib_device *ibdev, u8 port)
722 {
723         struct ib_pma_portsamplescontrol *p =
724                 (struct ib_pma_portsamplescontrol *)pmp->data;
725         struct ipath_ibdev *dev = to_idev(ibdev);
726         unsigned long flags;
727         u32 start;
728         int ret;
729
730         if (pmp->attr_mod != 0 ||
731             (p->port_select != port && p->port_select != 0xFF)) {
732                 pmp->status |= IB_SMP_INVALID_FIELD;
733                 ret = reply((struct ib_smp *) pmp);
734                 goto bail;
735         }
736
737         start = be32_to_cpu(p->sample_start);
738         if (start != 0) {
739                 spin_lock_irqsave(&dev->pending_lock, flags);
740                 if (dev->pma_sample_status == IB_PMA_SAMPLE_STATUS_DONE) {
741                         dev->pma_sample_status =
742                                 IB_PMA_SAMPLE_STATUS_STARTED;
743                         dev->pma_sample_start = start;
744                         dev->pma_sample_interval =
745                                 be32_to_cpu(p->sample_interval);
746                         dev->pma_tag = be16_to_cpu(p->tag);
747                         if (p->counter_select[0])
748                                 dev->pma_counter_select[0] =
749                                         p->counter_select[0];
750                         if (p->counter_select[1])
751                                 dev->pma_counter_select[1] =
752                                         p->counter_select[1];
753                         if (p->counter_select[2])
754                                 dev->pma_counter_select[2] =
755                                         p->counter_select[2];
756                         if (p->counter_select[3])
757                                 dev->pma_counter_select[3] =
758                                         p->counter_select[3];
759                         if (p->counter_select[4])
760                                 dev->pma_counter_select[4] =
761                                         p->counter_select[4];
762                 }
763                 spin_unlock_irqrestore(&dev->pending_lock, flags);
764         }
765         ret = recv_pma_get_portsamplescontrol(pmp, ibdev, port);
766
767 bail:
768         return ret;
769 }
770
771 static u64 get_counter(struct ipath_ibdev *dev, __be16 sel)
772 {
773         u64 ret;
774
775         switch (sel) {
776         case IB_PMA_PORT_XMIT_DATA:
777                 ret = dev->ipath_sword;
778                 break;
779         case IB_PMA_PORT_RCV_DATA:
780                 ret = dev->ipath_rword;
781                 break;
782         case IB_PMA_PORT_XMIT_PKTS:
783                 ret = dev->ipath_spkts;
784                 break;
785         case IB_PMA_PORT_RCV_PKTS:
786                 ret = dev->ipath_rpkts;
787                 break;
788         case IB_PMA_PORT_XMIT_WAIT:
789                 ret = dev->ipath_xmit_wait;
790                 break;
791         default:
792                 ret = 0;
793         }
794
795         return ret;
796 }
797
798 static int recv_pma_get_portsamplesresult(struct ib_perf *pmp,
799                                           struct ib_device *ibdev)
800 {
801         struct ib_pma_portsamplesresult *p =
802                 (struct ib_pma_portsamplesresult *)pmp->data;
803         struct ipath_ibdev *dev = to_idev(ibdev);
804         int i;
805
806         memset(pmp->data, 0, sizeof(pmp->data));
807         p->tag = cpu_to_be16(dev->pma_tag);
808         p->sample_status = cpu_to_be16(dev->pma_sample_status);
809         for (i = 0; i < ARRAY_SIZE(dev->pma_counter_select); i++)
810                 p->counter[i] = cpu_to_be32(
811                         get_counter(dev, dev->pma_counter_select[i]));
812
813         return reply((struct ib_smp *) pmp);
814 }
815
816 static int recv_pma_get_portsamplesresult_ext(struct ib_perf *pmp,
817                                               struct ib_device *ibdev)
818 {
819         struct ib_pma_portsamplesresult_ext *p =
820                 (struct ib_pma_portsamplesresult_ext *)pmp->data;
821         struct ipath_ibdev *dev = to_idev(ibdev);
822         int i;
823
824         memset(pmp->data, 0, sizeof(pmp->data));
825         p->tag = cpu_to_be16(dev->pma_tag);
826         p->sample_status = cpu_to_be16(dev->pma_sample_status);
827         /* 64 bits */
828         p->extended_width = __constant_cpu_to_be32(0x80000000);
829         for (i = 0; i < ARRAY_SIZE(dev->pma_counter_select); i++)
830                 p->counter[i] = cpu_to_be64(
831                         get_counter(dev, dev->pma_counter_select[i]));
832
833         return reply((struct ib_smp *) pmp);
834 }
835
836 static int recv_pma_get_portcounters(struct ib_perf *pmp,
837                                      struct ib_device *ibdev, u8 port)
838 {
839         struct ib_pma_portcounters *p = (struct ib_pma_portcounters *)
840                 pmp->data;
841         struct ipath_ibdev *dev = to_idev(ibdev);
842         struct ipath_layer_counters cntrs;
843         u8 port_select = p->port_select;
844
845         ipath_layer_get_counters(dev->dd, &cntrs);
846
847         /* Adjust counters for any resets done. */
848         cntrs.symbol_error_counter -= dev->z_symbol_error_counter;
849         cntrs.link_error_recovery_counter -=
850                 dev->z_link_error_recovery_counter;
851         cntrs.link_downed_counter -= dev->z_link_downed_counter;
852         cntrs.port_rcv_errors += dev->rcv_errors;
853         cntrs.port_rcv_errors -= dev->z_port_rcv_errors;
854         cntrs.port_rcv_remphys_errors -= dev->z_port_rcv_remphys_errors;
855         cntrs.port_xmit_discards -= dev->z_port_xmit_discards;
856         cntrs.port_xmit_data -= dev->z_port_xmit_data;
857         cntrs.port_rcv_data -= dev->z_port_rcv_data;
858         cntrs.port_xmit_packets -= dev->z_port_xmit_packets;
859         cntrs.port_rcv_packets -= dev->z_port_rcv_packets;
860
861         memset(pmp->data, 0, sizeof(pmp->data));
862
863         p->port_select = port_select;
864         if (pmp->attr_mod != 0 ||
865             (port_select != port && port_select != 0xFF))
866                 pmp->status |= IB_SMP_INVALID_FIELD;
867
868         if (cntrs.symbol_error_counter > 0xFFFFUL)
869                 p->symbol_error_counter = __constant_cpu_to_be16(0xFFFF);
870         else
871                 p->symbol_error_counter =
872                         cpu_to_be16((u16)cntrs.symbol_error_counter);
873         if (cntrs.link_error_recovery_counter > 0xFFUL)
874                 p->link_error_recovery_counter = 0xFF;
875         else
876                 p->link_error_recovery_counter =
877                         (u8)cntrs.link_error_recovery_counter;
878         if (cntrs.link_downed_counter > 0xFFUL)
879                 p->link_downed_counter = 0xFF;
880         else
881                 p->link_downed_counter = (u8)cntrs.link_downed_counter;
882         if (cntrs.port_rcv_errors > 0xFFFFUL)
883                 p->port_rcv_errors = __constant_cpu_to_be16(0xFFFF);
884         else
885                 p->port_rcv_errors =
886                         cpu_to_be16((u16) cntrs.port_rcv_errors);
887         if (cntrs.port_rcv_remphys_errors > 0xFFFFUL)
888                 p->port_rcv_remphys_errors = __constant_cpu_to_be16(0xFFFF);
889         else
890                 p->port_rcv_remphys_errors =
891                         cpu_to_be16((u16)cntrs.port_rcv_remphys_errors);
892         if (cntrs.port_xmit_discards > 0xFFFFUL)
893                 p->port_xmit_discards = __constant_cpu_to_be16(0xFFFF);
894         else
895                 p->port_xmit_discards =
896                         cpu_to_be16((u16)cntrs.port_xmit_discards);
897         if (cntrs.port_xmit_data > 0xFFFFFFFFUL)
898                 p->port_xmit_data = __constant_cpu_to_be32(0xFFFFFFFF);
899         else
900                 p->port_xmit_data = cpu_to_be32((u32)cntrs.port_xmit_data);
901         if (cntrs.port_rcv_data > 0xFFFFFFFFUL)
902                 p->port_rcv_data = __constant_cpu_to_be32(0xFFFFFFFF);
903         else
904                 p->port_rcv_data = cpu_to_be32((u32)cntrs.port_rcv_data);
905         if (cntrs.port_xmit_packets > 0xFFFFFFFFUL)
906                 p->port_xmit_packets = __constant_cpu_to_be32(0xFFFFFFFF);
907         else
908                 p->port_xmit_packets =
909                         cpu_to_be32((u32)cntrs.port_xmit_packets);
910         if (cntrs.port_rcv_packets > 0xFFFFFFFFUL)
911                 p->port_rcv_packets = __constant_cpu_to_be32(0xFFFFFFFF);
912         else
913                 p->port_rcv_packets =
914                         cpu_to_be32((u32) cntrs.port_rcv_packets);
915
916         return reply((struct ib_smp *) pmp);
917 }
918
919 static int recv_pma_get_portcounters_ext(struct ib_perf *pmp,
920                                          struct ib_device *ibdev, u8 port)
921 {
922         struct ib_pma_portcounters_ext *p =
923                 (struct ib_pma_portcounters_ext *)pmp->data;
924         struct ipath_ibdev *dev = to_idev(ibdev);
925         u64 swords, rwords, spkts, rpkts, xwait;
926         u8 port_select = p->port_select;
927
928         ipath_layer_snapshot_counters(dev->dd, &swords, &rwords, &spkts,
929                                       &rpkts, &xwait);
930
931         /* Adjust counters for any resets done. */
932         swords -= dev->z_port_xmit_data;
933         rwords -= dev->z_port_rcv_data;
934         spkts -= dev->z_port_xmit_packets;
935         rpkts -= dev->z_port_rcv_packets;
936
937         memset(pmp->data, 0, sizeof(pmp->data));
938
939         p->port_select = port_select;
940         if (pmp->attr_mod != 0 ||
941             (port_select != port && port_select != 0xFF))
942                 pmp->status |= IB_SMP_INVALID_FIELD;
943
944         p->port_xmit_data = cpu_to_be64(swords);
945         p->port_rcv_data = cpu_to_be64(rwords);
946         p->port_xmit_packets = cpu_to_be64(spkts);
947         p->port_rcv_packets = cpu_to_be64(rpkts);
948         p->port_unicast_xmit_packets = cpu_to_be64(dev->n_unicast_xmit);
949         p->port_unicast_rcv_packets = cpu_to_be64(dev->n_unicast_rcv);
950         p->port_multicast_xmit_packets = cpu_to_be64(dev->n_multicast_xmit);
951         p->port_multicast_rcv_packets = cpu_to_be64(dev->n_multicast_rcv);
952
953         return reply((struct ib_smp *) pmp);
954 }
955
956 static int recv_pma_set_portcounters(struct ib_perf *pmp,
957                                      struct ib_device *ibdev, u8 port)
958 {
959         struct ib_pma_portcounters *p = (struct ib_pma_portcounters *)
960                 pmp->data;
961         struct ipath_ibdev *dev = to_idev(ibdev);
962         struct ipath_layer_counters cntrs;
963
964         /*
965          * Since the HW doesn't support clearing counters, we save the
966          * current count and subtract it from future responses.
967          */
968         ipath_layer_get_counters(dev->dd, &cntrs);
969
970         if (p->counter_select & IB_PMA_SEL_SYMBOL_ERROR)
971                 dev->z_symbol_error_counter = cntrs.symbol_error_counter;
972
973         if (p->counter_select & IB_PMA_SEL_LINK_ERROR_RECOVERY)
974                 dev->z_link_error_recovery_counter =
975                         cntrs.link_error_recovery_counter;
976
977         if (p->counter_select & IB_PMA_SEL_LINK_DOWNED)
978                 dev->z_link_downed_counter = cntrs.link_downed_counter;
979
980         if (p->counter_select & IB_PMA_SEL_PORT_RCV_ERRORS)
981                 dev->z_port_rcv_errors =
982                         cntrs.port_rcv_errors + dev->rcv_errors;
983
984         if (p->counter_select & IB_PMA_SEL_PORT_RCV_REMPHYS_ERRORS)
985                 dev->z_port_rcv_remphys_errors =
986                         cntrs.port_rcv_remphys_errors;
987
988         if (p->counter_select & IB_PMA_SEL_PORT_XMIT_DISCARDS)
989                 dev->z_port_xmit_discards = cntrs.port_xmit_discards;
990
991         if (p->counter_select & IB_PMA_SEL_PORT_XMIT_DATA)
992                 dev->z_port_xmit_data = cntrs.port_xmit_data;
993
994         if (p->counter_select & IB_PMA_SEL_PORT_RCV_DATA)
995                 dev->z_port_rcv_data = cntrs.port_rcv_data;
996
997         if (p->counter_select & IB_PMA_SEL_PORT_XMIT_PACKETS)
998                 dev->z_port_xmit_packets = cntrs.port_xmit_packets;
999
1000         if (p->counter_select & IB_PMA_SEL_PORT_RCV_PACKETS)
1001                 dev->z_port_rcv_packets = cntrs.port_rcv_packets;
1002
1003         return recv_pma_get_portcounters(pmp, ibdev, port);
1004 }
1005
1006 static int recv_pma_set_portcounters_ext(struct ib_perf *pmp,
1007                                          struct ib_device *ibdev, u8 port)
1008 {
1009         struct ib_pma_portcounters *p = (struct ib_pma_portcounters *)
1010                 pmp->data;
1011         struct ipath_ibdev *dev = to_idev(ibdev);
1012         u64 swords, rwords, spkts, rpkts, xwait;
1013
1014         ipath_layer_snapshot_counters(dev->dd, &swords, &rwords, &spkts,
1015                                       &rpkts, &xwait);
1016
1017         if (p->counter_select & IB_PMA_SELX_PORT_XMIT_DATA)
1018                 dev->z_port_xmit_data = swords;
1019
1020         if (p->counter_select & IB_PMA_SELX_PORT_RCV_DATA)
1021                 dev->z_port_rcv_data = rwords;
1022
1023         if (p->counter_select & IB_PMA_SELX_PORT_XMIT_PACKETS)
1024                 dev->z_port_xmit_packets = spkts;
1025
1026         if (p->counter_select & IB_PMA_SELX_PORT_RCV_PACKETS)
1027                 dev->z_port_rcv_packets = rpkts;
1028
1029         if (p->counter_select & IB_PMA_SELX_PORT_UNI_XMIT_PACKETS)
1030                 dev->n_unicast_xmit = 0;
1031
1032         if (p->counter_select & IB_PMA_SELX_PORT_UNI_RCV_PACKETS)
1033                 dev->n_unicast_rcv = 0;
1034
1035         if (p->counter_select & IB_PMA_SELX_PORT_MULTI_XMIT_PACKETS)
1036                 dev->n_multicast_xmit = 0;
1037
1038         if (p->counter_select & IB_PMA_SELX_PORT_MULTI_RCV_PACKETS)
1039                 dev->n_multicast_rcv = 0;
1040
1041         return recv_pma_get_portcounters_ext(pmp, ibdev, port);
1042 }
1043
1044 static int process_subn(struct ib_device *ibdev, int mad_flags,
1045                         u8 port_num, struct ib_mad *in_mad,
1046                         struct ib_mad *out_mad)
1047 {
1048         struct ib_smp *smp = (struct ib_smp *)out_mad;
1049         struct ipath_ibdev *dev = to_idev(ibdev);
1050         int ret;
1051
1052         *out_mad = *in_mad;
1053         if (smp->class_version != 1) {
1054                 smp->status |= IB_SMP_UNSUP_VERSION;
1055                 ret = reply(smp);
1056                 goto bail;
1057         }
1058
1059         /* Is the mkey in the process of expiring? */
1060         if (dev->mkey_lease_timeout && jiffies >= dev->mkey_lease_timeout) {
1061                 /* Clear timeout and mkey protection field. */
1062                 dev->mkey_lease_timeout = 0;
1063                 dev->mkeyprot_resv_lmc &= 0x3F;
1064         }
1065
1066         /*
1067          * M_Key checking depends on
1068          * Portinfo:M_Key_protect_bits
1069          */
1070         if ((mad_flags & IB_MAD_IGNORE_MKEY) == 0 && dev->mkey != 0 &&
1071             dev->mkey != smp->mkey &&
1072             (smp->method == IB_MGMT_METHOD_SET ||
1073              (smp->method == IB_MGMT_METHOD_GET &&
1074               (dev->mkeyprot_resv_lmc >> 7) != 0))) {
1075                 if (dev->mkey_violations != 0xFFFF)
1076                         ++dev->mkey_violations;
1077                 if (dev->mkey_lease_timeout ||
1078                     dev->mkey_lease_period == 0) {
1079                         ret = IB_MAD_RESULT_SUCCESS |
1080                                 IB_MAD_RESULT_CONSUMED;
1081                         goto bail;
1082                 }
1083                 dev->mkey_lease_timeout = jiffies +
1084                         dev->mkey_lease_period * HZ;
1085                 /* Future: Generate a trap notice. */
1086                 ret = IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_CONSUMED;
1087                 goto bail;
1088         } else if (dev->mkey_lease_timeout)
1089                 dev->mkey_lease_timeout = 0;
1090
1091         switch (smp->method) {
1092         case IB_MGMT_METHOD_GET:
1093                 switch (smp->attr_id) {
1094                 case IB_SMP_ATTR_NODE_DESC:
1095                         ret = recv_subn_get_nodedescription(smp, ibdev);
1096                         goto bail;
1097                 case IB_SMP_ATTR_NODE_INFO:
1098                         ret = recv_subn_get_nodeinfo(smp, ibdev, port_num);
1099                         goto bail;
1100                 case IB_SMP_ATTR_GUID_INFO:
1101                         ret = recv_subn_get_guidinfo(smp, ibdev);
1102                         goto bail;
1103                 case IB_SMP_ATTR_PORT_INFO:
1104                         ret = recv_subn_get_portinfo(smp, ibdev, port_num);
1105                         goto bail;
1106                 case IB_SMP_ATTR_PKEY_TABLE:
1107                         ret = recv_subn_get_pkeytable(smp, ibdev);
1108                         goto bail;
1109                 case IB_SMP_ATTR_SM_INFO:
1110                         if (dev->port_cap_flags & IB_PORT_SM_DISABLED) {
1111                                 ret = IB_MAD_RESULT_SUCCESS |
1112                                         IB_MAD_RESULT_CONSUMED;
1113                                 goto bail;
1114                         }
1115                         if (dev->port_cap_flags & IB_PORT_SM) {
1116                                 ret = IB_MAD_RESULT_SUCCESS;
1117                                 goto bail;
1118                         }
1119                         /* FALLTHROUGH */
1120                 default:
1121                         smp->status |= IB_SMP_UNSUP_METH_ATTR;
1122                         ret = reply(smp);
1123                         goto bail;
1124                 }
1125
1126         case IB_MGMT_METHOD_SET:
1127                 switch (smp->attr_id) {
1128                 case IB_SMP_ATTR_GUID_INFO:
1129                         ret = recv_subn_set_guidinfo(smp, ibdev);
1130                         goto bail;
1131                 case IB_SMP_ATTR_PORT_INFO:
1132                         ret = recv_subn_set_portinfo(smp, ibdev, port_num);
1133                         goto bail;
1134                 case IB_SMP_ATTR_PKEY_TABLE:
1135                         ret = recv_subn_set_pkeytable(smp, ibdev);
1136                         goto bail;
1137                 case IB_SMP_ATTR_SM_INFO:
1138                         if (dev->port_cap_flags & IB_PORT_SM_DISABLED) {
1139                                 ret = IB_MAD_RESULT_SUCCESS |
1140                                         IB_MAD_RESULT_CONSUMED;
1141                                 goto bail;
1142                         }
1143                         if (dev->port_cap_flags & IB_PORT_SM) {
1144                                 ret = IB_MAD_RESULT_SUCCESS;
1145                                 goto bail;
1146                         }
1147                         /* FALLTHROUGH */
1148                 default:
1149                         smp->status |= IB_SMP_UNSUP_METH_ATTR;
1150                         ret = reply(smp);
1151                         goto bail;
1152                 }
1153
1154         case IB_MGMT_METHOD_GET_RESP:
1155                 /*
1156                  * The ib_mad module will call us to process responses
1157                  * before checking for other consumers.
1158                  * Just tell the caller to process it normally.
1159                  */
1160                 ret = IB_MAD_RESULT_FAILURE;
1161                 goto bail;
1162         default:
1163                 smp->status |= IB_SMP_UNSUP_METHOD;
1164                 ret = reply(smp);
1165         }
1166
1167 bail:
1168         return ret;
1169 }
1170
1171 static int process_perf(struct ib_device *ibdev, u8 port_num,
1172                         struct ib_mad *in_mad,
1173                         struct ib_mad *out_mad)
1174 {
1175         struct ib_perf *pmp = (struct ib_perf *)out_mad;
1176         int ret;
1177
1178         *out_mad = *in_mad;
1179         if (pmp->class_version != 1) {
1180                 pmp->status |= IB_SMP_UNSUP_VERSION;
1181                 ret = reply((struct ib_smp *) pmp);
1182                 goto bail;
1183         }
1184
1185         switch (pmp->method) {
1186         case IB_MGMT_METHOD_GET:
1187                 switch (pmp->attr_id) {
1188                 case IB_PMA_CLASS_PORT_INFO:
1189                         ret = recv_pma_get_classportinfo(pmp);
1190                         goto bail;
1191                 case IB_PMA_PORT_SAMPLES_CONTROL:
1192                         ret = recv_pma_get_portsamplescontrol(pmp, ibdev,
1193                                                               port_num);
1194                         goto bail;
1195                 case IB_PMA_PORT_SAMPLES_RESULT:
1196                         ret = recv_pma_get_portsamplesresult(pmp, ibdev);
1197                         goto bail;
1198                 case IB_PMA_PORT_SAMPLES_RESULT_EXT:
1199                         ret = recv_pma_get_portsamplesresult_ext(pmp,
1200                                                                  ibdev);
1201                         goto bail;
1202                 case IB_PMA_PORT_COUNTERS:
1203                         ret = recv_pma_get_portcounters(pmp, ibdev,
1204                                                         port_num);
1205                         goto bail;
1206                 case IB_PMA_PORT_COUNTERS_EXT:
1207                         ret = recv_pma_get_portcounters_ext(pmp, ibdev,
1208                                                             port_num);
1209                         goto bail;
1210                 default:
1211                         pmp->status |= IB_SMP_UNSUP_METH_ATTR;
1212                         ret = reply((struct ib_smp *) pmp);
1213                         goto bail;
1214                 }
1215
1216         case IB_MGMT_METHOD_SET:
1217                 switch (pmp->attr_id) {
1218                 case IB_PMA_PORT_SAMPLES_CONTROL:
1219                         ret = recv_pma_set_portsamplescontrol(pmp, ibdev,
1220                                                               port_num);
1221                         goto bail;
1222                 case IB_PMA_PORT_COUNTERS:
1223                         ret = recv_pma_set_portcounters(pmp, ibdev,
1224                                                         port_num);
1225                         goto bail;
1226                 case IB_PMA_PORT_COUNTERS_EXT:
1227                         ret = recv_pma_set_portcounters_ext(pmp, ibdev,
1228                                                             port_num);
1229                         goto bail;
1230                 default:
1231                         pmp->status |= IB_SMP_UNSUP_METH_ATTR;
1232                         ret = reply((struct ib_smp *) pmp);
1233                         goto bail;
1234                 }
1235
1236         case IB_MGMT_METHOD_GET_RESP:
1237                 /*
1238                  * The ib_mad module will call us to process responses
1239                  * before checking for other consumers.
1240                  * Just tell the caller to process it normally.
1241                  */
1242                 ret = IB_MAD_RESULT_FAILURE;
1243                 goto bail;
1244         default:
1245                 pmp->status |= IB_SMP_UNSUP_METHOD;
1246                 ret = reply((struct ib_smp *) pmp);
1247         }
1248
1249 bail:
1250         return ret;
1251 }
1252
1253 /**
1254  * ipath_process_mad - process an incoming MAD packet
1255  * @ibdev: the infiniband device this packet came in on
1256  * @mad_flags: MAD flags
1257  * @port_num: the port number this packet came in on
1258  * @in_wc: the work completion entry for this packet
1259  * @in_grh: the global route header for this packet
1260  * @in_mad: the incoming MAD
1261  * @out_mad: any outgoing MAD reply
1262  *
1263  * Returns IB_MAD_RESULT_SUCCESS if this is a MAD that we are not
1264  * interested in processing.
1265  *
1266  * Note that the verbs framework has already done the MAD sanity checks,
1267  * and hop count/pointer updating for IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE
1268  * MADs.
1269  *
1270  * This is called by the ib_mad module.
1271  */
1272 int ipath_process_mad(struct ib_device *ibdev, int mad_flags, u8 port_num,
1273                       struct ib_wc *in_wc, struct ib_grh *in_grh,
1274                       struct ib_mad *in_mad, struct ib_mad *out_mad)
1275 {
1276         struct ipath_ibdev *dev = to_idev(ibdev);
1277         int ret;
1278
1279         /*
1280          * Snapshot current HW counters to "clear" them.
1281          * This should be done when the driver is loaded except that for
1282          * some reason we get a zillion errors when brining up the link.
1283          */
1284         if (dev->rcv_errors == 0) {
1285                 struct ipath_layer_counters cntrs;
1286
1287                 ipath_layer_get_counters(to_idev(ibdev)->dd, &cntrs);
1288                 dev->rcv_errors++;
1289                 dev->z_symbol_error_counter = cntrs.symbol_error_counter;
1290                 dev->z_link_error_recovery_counter =
1291                         cntrs.link_error_recovery_counter;
1292                 dev->z_link_downed_counter = cntrs.link_downed_counter;
1293                 dev->z_port_rcv_errors = cntrs.port_rcv_errors + 1;
1294                 dev->z_port_rcv_remphys_errors =
1295                         cntrs.port_rcv_remphys_errors;
1296                 dev->z_port_xmit_discards = cntrs.port_xmit_discards;
1297                 dev->z_port_xmit_data = cntrs.port_xmit_data;
1298                 dev->z_port_rcv_data = cntrs.port_rcv_data;
1299                 dev->z_port_xmit_packets = cntrs.port_xmit_packets;
1300                 dev->z_port_rcv_packets = cntrs.port_rcv_packets;
1301         }
1302         switch (in_mad->mad_hdr.mgmt_class) {
1303         case IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE:
1304         case IB_MGMT_CLASS_SUBN_LID_ROUTED:
1305                 ret = process_subn(ibdev, mad_flags, port_num,
1306                                    in_mad, out_mad);
1307                 goto bail;
1308         case IB_MGMT_CLASS_PERF_MGMT:
1309                 ret = process_perf(ibdev, port_num, in_mad, out_mad);
1310                 goto bail;
1311         default:
1312                 ret = IB_MAD_RESULT_SUCCESS;
1313         }
1314
1315 bail:
1316         return ret;
1317 }