]> err.no Git - linux-2.6/blob - drivers/infiniband/hw/ipath/ipath_layer.c
IB/ipath: simplify layering code
[linux-2.6] / drivers / infiniband / hw / ipath / ipath_layer.c
1 /*
2  * Copyright (c) 2006 QLogic, Inc. All rights reserved.
3  * Copyright (c) 2003, 2004, 2005, 2006 PathScale, Inc. All rights reserved.
4  *
5  * This software is available to you under a choice of one of two
6  * licenses.  You may choose to be licensed under the terms of the GNU
7  * General Public License (GPL) Version 2, available from the file
8  * COPYING in the main directory of this source tree, or the
9  * OpenIB.org BSD license below:
10  *
11  *     Redistribution and use in source and binary forms, with or
12  *     without modification, are permitted provided that the following
13  *     conditions are met:
14  *
15  *      - Redistributions of source code must retain the above
16  *        copyright notice, this list of conditions and the following
17  *        disclaimer.
18  *
19  *      - Redistributions in binary form must reproduce the above
20  *        copyright notice, this list of conditions and the following
21  *        disclaimer in the documentation and/or other materials
22  *        provided with the distribution.
23  *
24  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
25  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
26  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
27  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
28  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
29  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
30  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
31  * SOFTWARE.
32  */
33
34 /*
35  * These are the routines used by layered drivers, currently just the
36  * layered ethernet driver and verbs layer.
37  */
38
39 #include <linux/io.h>
40 #include <linux/pci.h>
41 #include <asm/byteorder.h>
42
43 #include "ipath_kernel.h"
44 #include "ipath_layer.h"
45 #include "ipath_verbs.h"
46 #include "ipath_common.h"
47
48 /* Acquire before ipath_devs_lock. */
49 static DEFINE_MUTEX(ipath_layer_mutex);
50
51 u16 ipath_layer_rcv_opcode;
52
53 static int (*layer_intr)(void *, u32);
54 static int (*layer_rcv)(void *, void *, struct sk_buff *);
55 static int (*layer_rcv_lid)(void *, void *);
56
57 static void *(*layer_add_one)(int, struct ipath_devdata *);
58 static void (*layer_remove_one)(void *);
59
60 int __ipath_layer_intr(struct ipath_devdata *dd, u32 arg)
61 {
62         int ret = -ENODEV;
63
64         if (dd->ipath_layer.l_arg && layer_intr)
65                 ret = layer_intr(dd->ipath_layer.l_arg, arg);
66
67         return ret;
68 }
69
70 int ipath_layer_intr(struct ipath_devdata *dd, u32 arg)
71 {
72         int ret;
73
74         mutex_lock(&ipath_layer_mutex);
75
76         ret = __ipath_layer_intr(dd, arg);
77
78         mutex_unlock(&ipath_layer_mutex);
79
80         return ret;
81 }
82
83 int __ipath_layer_rcv(struct ipath_devdata *dd, void *hdr,
84                       struct sk_buff *skb)
85 {
86         int ret = -ENODEV;
87
88         if (dd->ipath_layer.l_arg && layer_rcv)
89                 ret = layer_rcv(dd->ipath_layer.l_arg, hdr, skb);
90
91         return ret;
92 }
93
94 int __ipath_layer_rcv_lid(struct ipath_devdata *dd, void *hdr)
95 {
96         int ret = -ENODEV;
97
98         if (dd->ipath_layer.l_arg && layer_rcv_lid)
99                 ret = layer_rcv_lid(dd->ipath_layer.l_arg, hdr);
100
101         return ret;
102 }
103
104 void ipath_layer_lid_changed(struct ipath_devdata *dd)
105 {
106         mutex_lock(&ipath_layer_mutex);
107
108         if (dd->ipath_layer.l_arg && layer_intr)
109                 layer_intr(dd->ipath_layer.l_arg, IPATH_LAYER_INT_LID);
110
111         mutex_unlock(&ipath_layer_mutex);
112 }
113
114 void ipath_layer_add(struct ipath_devdata *dd)
115 {
116         mutex_lock(&ipath_layer_mutex);
117
118         if (layer_add_one)
119                 dd->ipath_layer.l_arg =
120                         layer_add_one(dd->ipath_unit, dd);
121
122         mutex_unlock(&ipath_layer_mutex);
123 }
124
125 void ipath_layer_remove(struct ipath_devdata *dd)
126 {
127         mutex_lock(&ipath_layer_mutex);
128
129         if (dd->ipath_layer.l_arg && layer_remove_one) {
130                 layer_remove_one(dd->ipath_layer.l_arg);
131                 dd->ipath_layer.l_arg = NULL;
132         }
133
134         mutex_unlock(&ipath_layer_mutex);
135 }
136
137 int ipath_layer_register(void *(*l_add)(int, struct ipath_devdata *),
138                          void (*l_remove)(void *),
139                          int (*l_intr)(void *, u32),
140                          int (*l_rcv)(void *, void *, struct sk_buff *),
141                          u16 l_rcv_opcode,
142                          int (*l_rcv_lid)(void *, void *))
143 {
144         struct ipath_devdata *dd, *tmp;
145         unsigned long flags;
146
147         mutex_lock(&ipath_layer_mutex);
148
149         layer_add_one = l_add;
150         layer_remove_one = l_remove;
151         layer_intr = l_intr;
152         layer_rcv = l_rcv;
153         layer_rcv_lid = l_rcv_lid;
154         ipath_layer_rcv_opcode = l_rcv_opcode;
155
156         spin_lock_irqsave(&ipath_devs_lock, flags);
157
158         list_for_each_entry_safe(dd, tmp, &ipath_dev_list, ipath_list) {
159                 if (!(dd->ipath_flags & IPATH_INITTED))
160                         continue;
161
162                 if (dd->ipath_layer.l_arg)
163                         continue;
164
165                 if (!(*dd->ipath_statusp & IPATH_STATUS_SMA))
166                         *dd->ipath_statusp |= IPATH_STATUS_OIB_SMA;
167
168                 spin_unlock_irqrestore(&ipath_devs_lock, flags);
169                 dd->ipath_layer.l_arg = l_add(dd->ipath_unit, dd);
170                 spin_lock_irqsave(&ipath_devs_lock, flags);
171         }
172
173         spin_unlock_irqrestore(&ipath_devs_lock, flags);
174         mutex_unlock(&ipath_layer_mutex);
175
176         return 0;
177 }
178
179 EXPORT_SYMBOL_GPL(ipath_layer_register);
180
181 void ipath_layer_unregister(void)
182 {
183         struct ipath_devdata *dd, *tmp;
184         unsigned long flags;
185
186         mutex_lock(&ipath_layer_mutex);
187         spin_lock_irqsave(&ipath_devs_lock, flags);
188
189         list_for_each_entry_safe(dd, tmp, &ipath_dev_list, ipath_list) {
190                 if (dd->ipath_layer.l_arg && layer_remove_one) {
191                         spin_unlock_irqrestore(&ipath_devs_lock, flags);
192                         layer_remove_one(dd->ipath_layer.l_arg);
193                         spin_lock_irqsave(&ipath_devs_lock, flags);
194                         dd->ipath_layer.l_arg = NULL;
195                 }
196         }
197
198         spin_unlock_irqrestore(&ipath_devs_lock, flags);
199
200         layer_add_one = NULL;
201         layer_remove_one = NULL;
202         layer_intr = NULL;
203         layer_rcv = NULL;
204         layer_rcv_lid = NULL;
205
206         mutex_unlock(&ipath_layer_mutex);
207 }
208
209 EXPORT_SYMBOL_GPL(ipath_layer_unregister);
210
211 int ipath_layer_open(struct ipath_devdata *dd, u32 * pktmax)
212 {
213         int ret;
214         u32 intval = 0;
215
216         mutex_lock(&ipath_layer_mutex);
217
218         if (!dd->ipath_layer.l_arg) {
219                 ret = -EINVAL;
220                 goto bail;
221         }
222
223         ret = ipath_setrcvhdrsize(dd, IPATH_HEADER_QUEUE_WORDS);
224
225         if (ret < 0)
226                 goto bail;
227
228         *pktmax = dd->ipath_ibmaxlen;
229
230         if (*dd->ipath_statusp & IPATH_STATUS_IB_READY)
231                 intval |= IPATH_LAYER_INT_IF_UP;
232         if (dd->ipath_lid)
233                 intval |= IPATH_LAYER_INT_LID;
234         if (dd->ipath_mlid)
235                 intval |= IPATH_LAYER_INT_BCAST;
236         /*
237          * do this on open, in case low level is already up and
238          * just layered driver was reloaded, etc.
239          */
240         if (intval)
241                 layer_intr(dd->ipath_layer.l_arg, intval);
242
243         ret = 0;
244 bail:
245         mutex_unlock(&ipath_layer_mutex);
246
247         return ret;
248 }
249
250 EXPORT_SYMBOL_GPL(ipath_layer_open);
251
252 u16 ipath_layer_get_lid(struct ipath_devdata *dd)
253 {
254         return dd->ipath_lid;
255 }
256
257 EXPORT_SYMBOL_GPL(ipath_layer_get_lid);
258
259 /**
260  * ipath_layer_get_mac - get the MAC address
261  * @dd: the infinipath device
262  * @mac: the MAC is put here
263  *
264  * This is the EUID-64 OUI octets (top 3), then
265  * skip the next 2 (which should both be zero or 0xff).
266  * The returned MAC is in network order
267  * mac points to at least 6 bytes of buffer
268  * We assume that by the time the LID is set, that the GUID is as valid
269  * as it's ever going to be, rather than adding yet another status bit.
270  */
271
272 int ipath_layer_get_mac(struct ipath_devdata *dd, u8 * mac)
273 {
274         u8 *guid;
275
276         guid = (u8 *) &dd->ipath_guid;
277
278         mac[0] = guid[0];
279         mac[1] = guid[1];
280         mac[2] = guid[2];
281         mac[3] = guid[5];
282         mac[4] = guid[6];
283         mac[5] = guid[7];
284         if ((guid[3] || guid[4]) && !(guid[3] == 0xff && guid[4] == 0xff))
285                 ipath_dbg("Warning, guid bytes 3 and 4 not 0 or 0xffff: "
286                           "%x %x\n", guid[3], guid[4]);
287         return 0;
288 }
289
290 EXPORT_SYMBOL_GPL(ipath_layer_get_mac);
291
292 u16 ipath_layer_get_bcast(struct ipath_devdata *dd)
293 {
294         return dd->ipath_mlid;
295 }
296
297 EXPORT_SYMBOL_GPL(ipath_layer_get_bcast);
298
299 int ipath_layer_send_hdr(struct ipath_devdata *dd, struct ether_header *hdr)
300 {
301         int ret = 0;
302         u32 __iomem *piobuf;
303         u32 plen, *uhdr;
304         size_t count;
305         __be16 vlsllnh;
306
307         if (!(dd->ipath_flags & IPATH_RCVHDRSZ_SET)) {
308                 ipath_dbg("send while not open\n");
309                 ret = -EINVAL;
310         } else
311                 if ((dd->ipath_flags & (IPATH_LINKUNK | IPATH_LINKDOWN)) ||
312                     dd->ipath_lid == 0) {
313                         /*
314                          * lid check is for when sma hasn't yet configured
315                          */
316                         ret = -ENETDOWN;
317                         ipath_cdbg(VERBOSE, "send while not ready, "
318                                    "mylid=%u, flags=0x%x\n",
319                                    dd->ipath_lid, dd->ipath_flags);
320                 }
321
322         vlsllnh = *((__be16 *) hdr);
323         if (vlsllnh != htons(IPATH_LRH_BTH)) {
324                 ipath_dbg("Warning: lrh[0] wrong (%x, not %x); "
325                           "not sending\n", be16_to_cpu(vlsllnh),
326                           IPATH_LRH_BTH);
327                 ret = -EINVAL;
328         }
329         if (ret)
330                 goto done;
331
332         /* Get a PIO buffer to use. */
333         piobuf = ipath_getpiobuf(dd, NULL);
334         if (piobuf == NULL) {
335                 ret = -EBUSY;
336                 goto done;
337         }
338
339         plen = (sizeof(*hdr) >> 2); /* actual length */
340         ipath_cdbg(EPKT, "0x%x+1w pio %p\n", plen, piobuf);
341
342         writeq(plen+1, piobuf); /* len (+1 for pad) to pbc, no flags */
343         ipath_flush_wc();
344         piobuf += 2;
345         uhdr = (u32 *)hdr;
346         count = plen-1; /* amount we can copy before trigger word */
347         __iowrite32_copy(piobuf, uhdr, count);
348         ipath_flush_wc();
349         __raw_writel(uhdr[count], piobuf + count);
350         ipath_flush_wc(); /* ensure it's sent, now */
351
352         ipath_stats.sps_ether_spkts++;  /* ether packet sent */
353
354 done:
355         return ret;
356 }
357
358 EXPORT_SYMBOL_GPL(ipath_layer_send_hdr);
359
360 int ipath_layer_set_piointbufavail_int(struct ipath_devdata *dd)
361 {
362         set_bit(IPATH_S_PIOINTBUFAVAIL, &dd->ipath_sendctrl);
363
364         ipath_write_kreg(dd, dd->ipath_kregs->kr_sendctrl,
365                          dd->ipath_sendctrl);
366         return 0;
367 }
368
369 EXPORT_SYMBOL_GPL(ipath_layer_set_piointbufavail_int);