2 * Copyright (c) 2004, 2005 Topspin Communications. All rights reserved.
3 * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved.
4 * Copyright (c) 2005 Mellanox Technologies. All rights reserved.
5 * Copyright (c) 2004, 2005 Voltaire, Inc. All rights reserved.
7 * This software is available to you under a choice of one of two
8 * licenses. You may choose to be licensed under the terms of the GNU
9 * General Public License (GPL) Version 2, available from the file
10 * COPYING in the main directory of this source tree, or the
11 * OpenIB.org BSD license below:
13 * Redistribution and use in source and binary forms, with or
14 * without modification, are permitted provided that the following
17 * - Redistributions of source code must retain the above
18 * copyright notice, this list of conditions and the following
21 * - Redistributions in binary form must reproduce the above
22 * copyright notice, this list of conditions and the following
23 * disclaimer in the documentation and/or other materials
24 * provided with the distribution.
26 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
27 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
28 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
29 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
30 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
31 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
32 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
36 #include <linux/delay.h>
37 #include <linux/dma-mapping.h>
39 #include <rdma/ib_cache.h>
41 #include <linux/tcp.h>
45 #ifdef CONFIG_INFINIBAND_IPOIB_DEBUG_DATA
46 static int data_debug_level;
48 module_param(data_debug_level, int, 0644);
49 MODULE_PARM_DESC(data_debug_level,
50 "Enable data path debug tracing if > 0");
53 static DEFINE_MUTEX(pkey_mutex);
55 struct ipoib_ah *ipoib_create_ah(struct net_device *dev,
56 struct ib_pd *pd, struct ib_ah_attr *attr)
60 ah = kmalloc(sizeof *ah, GFP_KERNEL);
68 ah->ah = ib_create_ah(pd, attr);
73 ipoib_dbg(netdev_priv(dev), "Created ah %p\n", ah->ah);
78 void ipoib_free_ah(struct kref *kref)
80 struct ipoib_ah *ah = container_of(kref, struct ipoib_ah, ref);
81 struct ipoib_dev_priv *priv = netdev_priv(ah->dev);
85 spin_lock_irqsave(&priv->lock, flags);
86 list_add_tail(&ah->list, &priv->dead_ahs);
87 spin_unlock_irqrestore(&priv->lock, flags);
90 static void ipoib_ud_dma_unmap_rx(struct ipoib_dev_priv *priv,
91 u64 mapping[IPOIB_UD_RX_SG])
93 if (ipoib_ud_need_sg(priv->max_ib_mtu)) {
94 ib_dma_unmap_single(priv->ca, mapping[0], IPOIB_UD_HEAD_SIZE,
96 ib_dma_unmap_page(priv->ca, mapping[1], PAGE_SIZE,
99 ib_dma_unmap_single(priv->ca, mapping[0],
100 IPOIB_UD_BUF_SIZE(priv->max_ib_mtu),
104 static void ipoib_ud_skb_put_frags(struct ipoib_dev_priv *priv,
108 if (ipoib_ud_need_sg(priv->max_ib_mtu)) {
109 skb_frag_t *frag = &skb_shinfo(skb)->frags[0];
112 * There is only two buffers needed for max_payload = 4K,
113 * first buf size is IPOIB_UD_HEAD_SIZE
115 skb->tail += IPOIB_UD_HEAD_SIZE;
118 size = length - IPOIB_UD_HEAD_SIZE;
121 skb->data_len += size;
122 skb->truesize += size;
124 skb_put(skb, length);
128 static int ipoib_ib_post_receive(struct net_device *dev, int id)
130 struct ipoib_dev_priv *priv = netdev_priv(dev);
131 struct ib_recv_wr *bad_wr;
134 priv->rx_wr.wr_id = id | IPOIB_OP_RECV;
135 priv->rx_sge[0].addr = priv->rx_ring[id].mapping[0];
136 priv->rx_sge[1].addr = priv->rx_ring[id].mapping[1];
139 ret = ib_post_recv(priv->qp, &priv->rx_wr, &bad_wr);
141 ipoib_warn(priv, "receive failed for buf %d (%d)\n", id, ret);
142 ipoib_ud_dma_unmap_rx(priv, priv->rx_ring[id].mapping);
143 dev_kfree_skb_any(priv->rx_ring[id].skb);
144 priv->rx_ring[id].skb = NULL;
150 static struct sk_buff *ipoib_alloc_rx_skb(struct net_device *dev, int id)
152 struct ipoib_dev_priv *priv = netdev_priv(dev);
157 if (ipoib_ud_need_sg(priv->max_ib_mtu))
158 buf_size = IPOIB_UD_HEAD_SIZE;
160 buf_size = IPOIB_UD_BUF_SIZE(priv->max_ib_mtu);
162 skb = dev_alloc_skb(buf_size + 4);
167 * IB will leave a 40 byte gap for a GRH and IPoIB adds a 4 byte
168 * header. So we need 4 more bytes to get to 48 and align the
169 * IP header to a multiple of 16.
173 mapping = priv->rx_ring[id].mapping;
174 mapping[0] = ib_dma_map_single(priv->ca, skb->data, buf_size,
176 if (unlikely(ib_dma_mapping_error(priv->ca, mapping[0])))
179 if (ipoib_ud_need_sg(priv->max_ib_mtu)) {
180 struct page *page = alloc_page(GFP_ATOMIC);
183 skb_fill_page_desc(skb, 0, page, 0, PAGE_SIZE);
185 ib_dma_map_page(priv->ca, skb_shinfo(skb)->frags[0].page,
186 0, PAGE_SIZE, DMA_FROM_DEVICE);
187 if (unlikely(ib_dma_mapping_error(priv->ca, mapping[1])))
191 priv->rx_ring[id].skb = skb;
195 ib_dma_unmap_single(priv->ca, mapping[0], buf_size, DMA_FROM_DEVICE);
197 dev_kfree_skb_any(skb);
201 static int ipoib_ib_post_receives(struct net_device *dev)
203 struct ipoib_dev_priv *priv = netdev_priv(dev);
206 for (i = 0; i < ipoib_recvq_size; ++i) {
207 if (!ipoib_alloc_rx_skb(dev, i)) {
208 ipoib_warn(priv, "failed to allocate receive buffer %d\n", i);
211 if (ipoib_ib_post_receive(dev, i)) {
212 ipoib_warn(priv, "ipoib_ib_post_receive failed for buf %d\n", i);
220 static void ipoib_ib_handle_rx_wc(struct net_device *dev, struct ib_wc *wc)
222 struct ipoib_dev_priv *priv = netdev_priv(dev);
223 unsigned int wr_id = wc->wr_id & ~IPOIB_OP_RECV;
225 u64 mapping[IPOIB_UD_RX_SG];
227 ipoib_dbg_data(priv, "recv completion: id %d, status: %d\n",
230 if (unlikely(wr_id >= ipoib_recvq_size)) {
231 ipoib_warn(priv, "recv completion event with wrid %d (> %d)\n",
232 wr_id, ipoib_recvq_size);
236 skb = priv->rx_ring[wr_id].skb;
238 if (unlikely(wc->status != IB_WC_SUCCESS)) {
239 if (wc->status != IB_WC_WR_FLUSH_ERR)
240 ipoib_warn(priv, "failed recv event "
241 "(status=%d, wrid=%d vend_err %x)\n",
242 wc->status, wr_id, wc->vendor_err);
243 ipoib_ud_dma_unmap_rx(priv, priv->rx_ring[wr_id].mapping);
244 dev_kfree_skb_any(skb);
245 priv->rx_ring[wr_id].skb = NULL;
250 * Drop packets that this interface sent, ie multicast packets
251 * that the HCA has replicated.
253 if (wc->slid == priv->local_lid && wc->src_qp == priv->qp->qp_num)
256 memcpy(mapping, priv->rx_ring[wr_id].mapping,
257 IPOIB_UD_RX_SG * sizeof *mapping);
260 * If we can't allocate a new RX buffer, dump
261 * this packet and reuse the old buffer.
263 if (unlikely(!ipoib_alloc_rx_skb(dev, wr_id))) {
264 ++dev->stats.rx_dropped;
268 ipoib_dbg_data(priv, "received %d bytes, SLID 0x%04x\n",
269 wc->byte_len, wc->slid);
271 ipoib_ud_dma_unmap_rx(priv, mapping);
272 ipoib_ud_skb_put_frags(priv, skb, wc->byte_len);
274 skb_pull(skb, IB_GRH_BYTES);
276 skb->protocol = ((struct ipoib_header *) skb->data)->proto;
277 skb_reset_mac_header(skb);
278 skb_pull(skb, IPOIB_ENCAP_LEN);
280 dev->last_rx = jiffies;
281 ++dev->stats.rx_packets;
282 dev->stats.rx_bytes += skb->len;
285 /* XXX get correct PACKET_ type here */
286 skb->pkt_type = PACKET_HOST;
288 if (test_bit(IPOIB_FLAG_CSUM, &priv->flags) && likely(wc->csum_ok))
289 skb->ip_summed = CHECKSUM_UNNECESSARY;
291 netif_receive_skb(skb);
294 if (unlikely(ipoib_ib_post_receive(dev, wr_id)))
295 ipoib_warn(priv, "ipoib_ib_post_receive failed "
296 "for buf %d\n", wr_id);
299 static int ipoib_dma_map_tx(struct ib_device *ca,
300 struct ipoib_tx_buf *tx_req)
302 struct sk_buff *skb = tx_req->skb;
303 u64 *mapping = tx_req->mapping;
307 if (skb_headlen(skb)) {
308 mapping[0] = ib_dma_map_single(ca, skb->data, skb_headlen(skb),
310 if (unlikely(ib_dma_mapping_error(ca, mapping[0])))
317 for (i = 0; i < skb_shinfo(skb)->nr_frags; ++i) {
318 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
319 mapping[i + off] = ib_dma_map_page(ca, frag->page,
320 frag->page_offset, frag->size,
322 if (unlikely(ib_dma_mapping_error(ca, mapping[i + off])))
329 skb_frag_t *frag = &skb_shinfo(skb)->frags[i - 1];
330 ib_dma_unmap_page(ca, mapping[i - !off], frag->size, DMA_TO_DEVICE);
334 ib_dma_unmap_single(ca, mapping[0], skb_headlen(skb), DMA_TO_DEVICE);
339 static void ipoib_dma_unmap_tx(struct ib_device *ca,
340 struct ipoib_tx_buf *tx_req)
342 struct sk_buff *skb = tx_req->skb;
343 u64 *mapping = tx_req->mapping;
347 if (skb_headlen(skb)) {
348 ib_dma_unmap_single(ca, mapping[0], skb_headlen(skb), DMA_TO_DEVICE);
353 for (i = 0; i < skb_shinfo(skb)->nr_frags; ++i) {
354 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
355 ib_dma_unmap_page(ca, mapping[i + off], frag->size,
360 static void ipoib_ib_handle_tx_wc(struct net_device *dev, struct ib_wc *wc)
362 struct ipoib_dev_priv *priv = netdev_priv(dev);
363 unsigned int wr_id = wc->wr_id;
364 struct ipoib_tx_buf *tx_req;
366 ipoib_dbg_data(priv, "send completion: id %d, status: %d\n",
369 if (unlikely(wr_id >= ipoib_sendq_size)) {
370 ipoib_warn(priv, "send completion event with wrid %d (> %d)\n",
371 wr_id, ipoib_sendq_size);
375 tx_req = &priv->tx_ring[wr_id];
377 ipoib_dma_unmap_tx(priv->ca, tx_req);
379 ++dev->stats.tx_packets;
380 dev->stats.tx_bytes += tx_req->skb->len;
382 dev_kfree_skb_any(tx_req->skb);
385 if (unlikely(--priv->tx_outstanding == ipoib_sendq_size >> 1) &&
386 netif_queue_stopped(dev) &&
387 test_bit(IPOIB_FLAG_ADMIN_UP, &priv->flags))
388 netif_wake_queue(dev);
390 if (wc->status != IB_WC_SUCCESS &&
391 wc->status != IB_WC_WR_FLUSH_ERR)
392 ipoib_warn(priv, "failed send event "
393 "(status=%d, wrid=%d vend_err %x)\n",
394 wc->status, wr_id, wc->vendor_err);
397 static int poll_tx(struct ipoib_dev_priv *priv)
401 n = ib_poll_cq(priv->send_cq, MAX_SEND_CQE, priv->send_wc);
402 for (i = 0; i < n; ++i)
403 ipoib_ib_handle_tx_wc(priv->dev, priv->send_wc + i);
405 return n == MAX_SEND_CQE;
408 int ipoib_poll(struct napi_struct *napi, int budget)
410 struct ipoib_dev_priv *priv = container_of(napi, struct ipoib_dev_priv, napi);
411 struct net_device *dev = priv->dev;
419 while (done < budget) {
420 int max = (budget - done);
422 t = min(IPOIB_NUM_WC, max);
423 n = ib_poll_cq(priv->recv_cq, t, priv->ibwc);
425 for (i = 0; i < n; i++) {
426 struct ib_wc *wc = priv->ibwc + i;
428 if (wc->wr_id & IPOIB_OP_RECV) {
430 if (wc->wr_id & IPOIB_OP_CM)
431 ipoib_cm_handle_rx_wc(dev, wc);
433 ipoib_ib_handle_rx_wc(dev, wc);
435 ipoib_cm_handle_tx_wc(priv->dev, wc);
443 netif_rx_complete(dev, napi);
444 if (unlikely(ib_req_notify_cq(priv->recv_cq,
446 IB_CQ_REPORT_MISSED_EVENTS)) &&
447 netif_rx_reschedule(dev, napi))
454 void ipoib_ib_completion(struct ib_cq *cq, void *dev_ptr)
456 struct net_device *dev = dev_ptr;
457 struct ipoib_dev_priv *priv = netdev_priv(dev);
459 netif_rx_schedule(dev, &priv->napi);
462 static void drain_tx_cq(struct net_device *dev)
464 struct ipoib_dev_priv *priv = netdev_priv(dev);
467 spin_lock_irqsave(&priv->tx_lock, flags);
468 while (poll_tx(priv))
471 if (netif_queue_stopped(dev))
472 mod_timer(&priv->poll_timer, jiffies + 1);
474 spin_unlock_irqrestore(&priv->tx_lock, flags);
477 void ipoib_send_comp_handler(struct ib_cq *cq, void *dev_ptr)
479 drain_tx_cq((struct net_device *)dev_ptr);
482 static inline int post_send(struct ipoib_dev_priv *priv,
484 struct ib_ah *address, u32 qpn,
485 struct ipoib_tx_buf *tx_req,
486 void *head, int hlen)
488 struct ib_send_wr *bad_wr;
490 struct sk_buff *skb = tx_req->skb;
491 skb_frag_t *frags = skb_shinfo(skb)->frags;
492 int nr_frags = skb_shinfo(skb)->nr_frags;
493 u64 *mapping = tx_req->mapping;
495 if (skb_headlen(skb)) {
496 priv->tx_sge[0].addr = mapping[0];
497 priv->tx_sge[0].length = skb_headlen(skb);
502 for (i = 0; i < nr_frags; ++i) {
503 priv->tx_sge[i + off].addr = mapping[i + off];
504 priv->tx_sge[i + off].length = frags[i].size;
506 priv->tx_wr.num_sge = nr_frags + off;
507 priv->tx_wr.wr_id = wr_id;
508 priv->tx_wr.wr.ud.remote_qpn = qpn;
509 priv->tx_wr.wr.ud.ah = address;
512 priv->tx_wr.wr.ud.mss = skb_shinfo(skb)->gso_size;
513 priv->tx_wr.wr.ud.header = head;
514 priv->tx_wr.wr.ud.hlen = hlen;
515 priv->tx_wr.opcode = IB_WR_LSO;
517 priv->tx_wr.opcode = IB_WR_SEND;
519 return ib_post_send(priv->qp, &priv->tx_wr, &bad_wr);
522 void ipoib_send(struct net_device *dev, struct sk_buff *skb,
523 struct ipoib_ah *address, u32 qpn)
525 struct ipoib_dev_priv *priv = netdev_priv(dev);
526 struct ipoib_tx_buf *tx_req;
530 if (skb_is_gso(skb)) {
531 hlen = skb_transport_offset(skb) + tcp_hdrlen(skb);
533 if (unlikely(!skb_pull(skb, hlen))) {
534 ipoib_warn(priv, "linear data too small\n");
535 ++dev->stats.tx_dropped;
536 ++dev->stats.tx_errors;
537 dev_kfree_skb_any(skb);
541 if (unlikely(skb->len > priv->mcast_mtu + IPOIB_ENCAP_LEN)) {
542 ipoib_warn(priv, "packet len %d (> %d) too long to send, dropping\n",
543 skb->len, priv->mcast_mtu + IPOIB_ENCAP_LEN);
544 ++dev->stats.tx_dropped;
545 ++dev->stats.tx_errors;
546 ipoib_cm_skb_too_long(dev, skb, priv->mcast_mtu);
553 ipoib_dbg_data(priv, "sending packet, length=%d address=%p qpn=0x%06x\n",
554 skb->len, address, qpn);
557 * We put the skb into the tx_ring _before_ we call post_send()
558 * because it's entirely possible that the completion handler will
559 * run before we execute anything after the post_send(). That
560 * means we have to make sure everything is properly recorded and
561 * our state is consistent before we call post_send().
563 tx_req = &priv->tx_ring[priv->tx_head & (ipoib_sendq_size - 1)];
565 if (unlikely(ipoib_dma_map_tx(priv->ca, tx_req))) {
566 ++dev->stats.tx_errors;
567 dev_kfree_skb_any(skb);
571 if (skb->ip_summed == CHECKSUM_PARTIAL)
572 priv->tx_wr.send_flags |= IB_SEND_IP_CSUM;
574 priv->tx_wr.send_flags &= ~IB_SEND_IP_CSUM;
576 if (++priv->tx_outstanding == ipoib_sendq_size) {
577 ipoib_dbg(priv, "TX ring full, stopping kernel net queue\n");
578 if (ib_req_notify_cq(priv->send_cq, IB_CQ_NEXT_COMP))
579 ipoib_warn(priv, "request notify on send CQ failed\n");
580 netif_stop_queue(dev);
583 if (unlikely(post_send(priv, priv->tx_head & (ipoib_sendq_size - 1),
584 address->ah, qpn, tx_req, phead, hlen))) {
585 ipoib_warn(priv, "post_send failed\n");
586 ++dev->stats.tx_errors;
587 --priv->tx_outstanding;
588 ipoib_dma_unmap_tx(priv->ca, tx_req);
589 dev_kfree_skb_any(skb);
590 if (netif_queue_stopped(dev))
591 netif_wake_queue(dev);
593 dev->trans_start = jiffies;
595 address->last_send = priv->tx_head;
601 if (unlikely(priv->tx_outstanding > MAX_SEND_CQE))
602 while (poll_tx(priv))
606 static void __ipoib_reap_ah(struct net_device *dev)
608 struct ipoib_dev_priv *priv = netdev_priv(dev);
609 struct ipoib_ah *ah, *tah;
610 LIST_HEAD(remove_list);
612 spin_lock_irq(&priv->tx_lock);
613 spin_lock(&priv->lock);
614 list_for_each_entry_safe(ah, tah, &priv->dead_ahs, list)
615 if ((int) priv->tx_tail - (int) ah->last_send >= 0) {
617 ib_destroy_ah(ah->ah);
620 spin_unlock(&priv->lock);
621 spin_unlock_irq(&priv->tx_lock);
624 void ipoib_reap_ah(struct work_struct *work)
626 struct ipoib_dev_priv *priv =
627 container_of(work, struct ipoib_dev_priv, ah_reap_task.work);
628 struct net_device *dev = priv->dev;
630 __ipoib_reap_ah(dev);
632 if (!test_bit(IPOIB_STOP_REAPER, &priv->flags))
633 queue_delayed_work(ipoib_workqueue, &priv->ah_reap_task,
634 round_jiffies_relative(HZ));
637 static void ipoib_ib_tx_timer_func(unsigned long ctx)
639 drain_tx_cq((struct net_device *)ctx);
642 int ipoib_ib_dev_open(struct net_device *dev)
644 struct ipoib_dev_priv *priv = netdev_priv(dev);
647 if (ib_find_pkey(priv->ca, priv->port, priv->pkey, &priv->pkey_index)) {
648 ipoib_warn(priv, "P_Key 0x%04x not found\n", priv->pkey);
649 clear_bit(IPOIB_PKEY_ASSIGNED, &priv->flags);
652 set_bit(IPOIB_PKEY_ASSIGNED, &priv->flags);
654 ret = ipoib_init_qp(dev);
656 ipoib_warn(priv, "ipoib_init_qp returned %d\n", ret);
660 ret = ipoib_ib_post_receives(dev);
662 ipoib_warn(priv, "ipoib_ib_post_receives returned %d\n", ret);
663 ipoib_ib_dev_stop(dev, 1);
667 ret = ipoib_cm_dev_open(dev);
669 ipoib_warn(priv, "ipoib_cm_dev_open returned %d\n", ret);
670 ipoib_ib_dev_stop(dev, 1);
674 clear_bit(IPOIB_STOP_REAPER, &priv->flags);
675 queue_delayed_work(ipoib_workqueue, &priv->ah_reap_task,
676 round_jiffies_relative(HZ));
678 init_timer(&priv->poll_timer);
679 priv->poll_timer.function = ipoib_ib_tx_timer_func;
680 priv->poll_timer.data = (unsigned long)dev;
682 set_bit(IPOIB_FLAG_INITIALIZED, &priv->flags);
687 static void ipoib_pkey_dev_check_presence(struct net_device *dev)
689 struct ipoib_dev_priv *priv = netdev_priv(dev);
692 if (ib_find_pkey(priv->ca, priv->port, priv->pkey, &pkey_index))
693 clear_bit(IPOIB_PKEY_ASSIGNED, &priv->flags);
695 set_bit(IPOIB_PKEY_ASSIGNED, &priv->flags);
698 int ipoib_ib_dev_up(struct net_device *dev)
700 struct ipoib_dev_priv *priv = netdev_priv(dev);
702 ipoib_pkey_dev_check_presence(dev);
704 if (!test_bit(IPOIB_PKEY_ASSIGNED, &priv->flags)) {
705 ipoib_dbg(priv, "PKEY is not assigned.\n");
709 set_bit(IPOIB_FLAG_OPER_UP, &priv->flags);
711 return ipoib_mcast_start_thread(dev);
714 int ipoib_ib_dev_down(struct net_device *dev, int flush)
716 struct ipoib_dev_priv *priv = netdev_priv(dev);
718 ipoib_dbg(priv, "downing ib_dev\n");
720 clear_bit(IPOIB_FLAG_OPER_UP, &priv->flags);
721 netif_carrier_off(dev);
723 /* Shutdown the P_Key thread if still active */
724 if (!test_bit(IPOIB_PKEY_ASSIGNED, &priv->flags)) {
725 mutex_lock(&pkey_mutex);
726 set_bit(IPOIB_PKEY_STOP, &priv->flags);
727 cancel_delayed_work(&priv->pkey_poll_task);
728 mutex_unlock(&pkey_mutex);
730 flush_workqueue(ipoib_workqueue);
733 ipoib_mcast_stop_thread(dev, flush);
734 ipoib_mcast_dev_flush(dev);
736 ipoib_flush_paths(dev);
741 static int recvs_pending(struct net_device *dev)
743 struct ipoib_dev_priv *priv = netdev_priv(dev);
747 for (i = 0; i < ipoib_recvq_size; ++i)
748 if (priv->rx_ring[i].skb)
754 void ipoib_drain_cq(struct net_device *dev)
756 struct ipoib_dev_priv *priv = netdev_priv(dev);
759 n = ib_poll_cq(priv->recv_cq, IPOIB_NUM_WC, priv->ibwc);
760 for (i = 0; i < n; ++i) {
762 * Convert any successful completions to flush
763 * errors to avoid passing packets up the
764 * stack after bringing the device down.
766 if (priv->ibwc[i].status == IB_WC_SUCCESS)
767 priv->ibwc[i].status = IB_WC_WR_FLUSH_ERR;
769 if (priv->ibwc[i].wr_id & IPOIB_OP_RECV) {
770 if (priv->ibwc[i].wr_id & IPOIB_OP_CM)
771 ipoib_cm_handle_rx_wc(dev, priv->ibwc + i);
773 ipoib_ib_handle_rx_wc(dev, priv->ibwc + i);
775 ipoib_cm_handle_tx_wc(dev, priv->ibwc + i);
777 } while (n == IPOIB_NUM_WC);
779 while (poll_tx(priv))
783 int ipoib_ib_dev_stop(struct net_device *dev, int flush)
785 struct ipoib_dev_priv *priv = netdev_priv(dev);
786 struct ib_qp_attr qp_attr;
788 struct ipoib_tx_buf *tx_req;
791 clear_bit(IPOIB_FLAG_INITIALIZED, &priv->flags);
793 ipoib_cm_dev_stop(dev);
796 * Move our QP to the error state and then reinitialize in
797 * when all work requests have completed or have been flushed.
799 qp_attr.qp_state = IB_QPS_ERR;
800 if (ib_modify_qp(priv->qp, &qp_attr, IB_QP_STATE))
801 ipoib_warn(priv, "Failed to modify QP to ERROR state\n");
803 /* Wait for all sends and receives to complete */
806 while (priv->tx_head != priv->tx_tail || recvs_pending(dev)) {
807 if (time_after(jiffies, begin + 5 * HZ)) {
808 ipoib_warn(priv, "timing out; %d sends %d receives not completed\n",
809 priv->tx_head - priv->tx_tail, recvs_pending(dev));
812 * assume the HW is wedged and just free up
813 * all our pending work requests.
815 while ((int) priv->tx_tail - (int) priv->tx_head < 0) {
816 tx_req = &priv->tx_ring[priv->tx_tail &
817 (ipoib_sendq_size - 1)];
818 ipoib_dma_unmap_tx(priv->ca, tx_req);
819 dev_kfree_skb_any(tx_req->skb);
821 --priv->tx_outstanding;
824 for (i = 0; i < ipoib_recvq_size; ++i) {
825 struct ipoib_rx_buf *rx_req;
827 rx_req = &priv->rx_ring[i];
830 ipoib_ud_dma_unmap_rx(priv,
831 priv->rx_ring[i].mapping);
832 dev_kfree_skb_any(rx_req->skb);
844 ipoib_dbg(priv, "All sends and receives done.\n");
847 del_timer_sync(&priv->poll_timer);
848 qp_attr.qp_state = IB_QPS_RESET;
849 if (ib_modify_qp(priv->qp, &qp_attr, IB_QP_STATE))
850 ipoib_warn(priv, "Failed to modify QP to RESET state\n");
852 /* Wait for all AHs to be reaped */
853 set_bit(IPOIB_STOP_REAPER, &priv->flags);
854 cancel_delayed_work(&priv->ah_reap_task);
856 flush_workqueue(ipoib_workqueue);
860 while (!list_empty(&priv->dead_ahs)) {
861 __ipoib_reap_ah(dev);
863 if (time_after(jiffies, begin + HZ)) {
864 ipoib_warn(priv, "timing out; will leak address handles\n");
871 ib_req_notify_cq(priv->recv_cq, IB_CQ_NEXT_COMP);
876 int ipoib_ib_dev_init(struct net_device *dev, struct ib_device *ca, int port)
878 struct ipoib_dev_priv *priv = netdev_priv(dev);
884 if (ipoib_transport_dev_init(dev, ca)) {
885 printk(KERN_WARNING "%s: ipoib_transport_dev_init failed\n", ca->name);
889 if (dev->flags & IFF_UP) {
890 if (ipoib_ib_dev_open(dev)) {
891 ipoib_transport_dev_cleanup(dev);
899 static void __ipoib_ib_dev_flush(struct ipoib_dev_priv *priv, int pkey_event)
901 struct ipoib_dev_priv *cpriv;
902 struct net_device *dev = priv->dev;
905 mutex_lock(&priv->vlan_mutex);
908 * Flush any child interfaces too -- they might be up even if
909 * the parent is down.
911 list_for_each_entry(cpriv, &priv->child_intfs, list)
912 __ipoib_ib_dev_flush(cpriv, pkey_event);
914 mutex_unlock(&priv->vlan_mutex);
916 if (!test_bit(IPOIB_FLAG_INITIALIZED, &priv->flags)) {
917 ipoib_dbg(priv, "Not flushing - IPOIB_FLAG_INITIALIZED not set.\n");
921 if (!test_bit(IPOIB_FLAG_ADMIN_UP, &priv->flags)) {
922 ipoib_dbg(priv, "Not flushing - IPOIB_FLAG_ADMIN_UP not set.\n");
927 if (ib_find_pkey(priv->ca, priv->port, priv->pkey, &new_index)) {
928 clear_bit(IPOIB_PKEY_ASSIGNED, &priv->flags);
929 ipoib_ib_dev_down(dev, 0);
930 ipoib_ib_dev_stop(dev, 0);
931 if (ipoib_pkey_dev_delay_open(dev))
935 /* restart QP only if P_Key index is changed */
936 if (test_and_set_bit(IPOIB_PKEY_ASSIGNED, &priv->flags) &&
937 new_index == priv->pkey_index) {
938 ipoib_dbg(priv, "Not flushing - P_Key index not changed.\n");
941 priv->pkey_index = new_index;
944 ipoib_dbg(priv, "flushing\n");
946 ipoib_ib_dev_down(dev, 0);
949 ipoib_ib_dev_stop(dev, 0);
950 ipoib_ib_dev_open(dev);
954 * The device could have been brought down between the start and when
955 * we get here, don't bring it back up if it's not configured up
957 if (test_bit(IPOIB_FLAG_ADMIN_UP, &priv->flags)) {
958 ipoib_ib_dev_up(dev);
959 ipoib_mcast_restart_task(&priv->restart_task);
963 void ipoib_ib_dev_flush(struct work_struct *work)
965 struct ipoib_dev_priv *priv =
966 container_of(work, struct ipoib_dev_priv, flush_task);
968 ipoib_dbg(priv, "Flushing %s\n", priv->dev->name);
969 __ipoib_ib_dev_flush(priv, 0);
972 void ipoib_pkey_event(struct work_struct *work)
974 struct ipoib_dev_priv *priv =
975 container_of(work, struct ipoib_dev_priv, pkey_event_task);
977 ipoib_dbg(priv, "Flushing %s and restarting its QP\n", priv->dev->name);
978 __ipoib_ib_dev_flush(priv, 1);
981 void ipoib_ib_dev_cleanup(struct net_device *dev)
983 struct ipoib_dev_priv *priv = netdev_priv(dev);
985 ipoib_dbg(priv, "cleaning up ib_dev\n");
987 ipoib_mcast_stop_thread(dev, 1);
988 ipoib_mcast_dev_flush(dev);
990 ipoib_transport_dev_cleanup(dev);
994 * Delayed P_Key Assigment Interim Support
996 * The following is initial implementation of delayed P_Key assigment
997 * mechanism. It is using the same approach implemented for the multicast
998 * group join. The single goal of this implementation is to quickly address
999 * Bug #2507. This implementation will probably be removed when the P_Key
1000 * change async notification is available.
1003 void ipoib_pkey_poll(struct work_struct *work)
1005 struct ipoib_dev_priv *priv =
1006 container_of(work, struct ipoib_dev_priv, pkey_poll_task.work);
1007 struct net_device *dev = priv->dev;
1009 ipoib_pkey_dev_check_presence(dev);
1011 if (test_bit(IPOIB_PKEY_ASSIGNED, &priv->flags))
1014 mutex_lock(&pkey_mutex);
1015 if (!test_bit(IPOIB_PKEY_STOP, &priv->flags))
1016 queue_delayed_work(ipoib_workqueue,
1017 &priv->pkey_poll_task,
1019 mutex_unlock(&pkey_mutex);
1023 int ipoib_pkey_dev_delay_open(struct net_device *dev)
1025 struct ipoib_dev_priv *priv = netdev_priv(dev);
1027 /* Look for the interface pkey value in the IB Port P_Key table and */
1028 /* set the interface pkey assigment flag */
1029 ipoib_pkey_dev_check_presence(dev);
1031 /* P_Key value not assigned yet - start polling */
1032 if (!test_bit(IPOIB_PKEY_ASSIGNED, &priv->flags)) {
1033 mutex_lock(&pkey_mutex);
1034 clear_bit(IPOIB_PKEY_STOP, &priv->flags);
1035 queue_delayed_work(ipoib_workqueue,
1036 &priv->pkey_poll_task,
1038 mutex_unlock(&pkey_mutex);