2 * linux/drivers/net/ehea/ehea_main.c
4 * eHEA ethernet device driver for IBM eServer System p
6 * (C) Copyright IBM Corp. 2006
9 * Christoph Raisch <raisch@de.ibm.com>
10 * Jan-Bernd Themann <themann@de.ibm.com>
11 * Thomas Klein <tklein@de.ibm.com>
14 * This program is free software; you can redistribute it and/or modify
15 * it under the terms of the GNU General Public License as published by
16 * the Free Software Foundation; either version 2, or (at your option)
19 * This program is distributed in the hope that it will be useful,
20 * but WITHOUT ANY WARRANTY; without even the implied warranty of
21 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
22 * GNU General Public License for more details.
24 * You should have received a copy of the GNU General Public License
25 * along with this program; if not, write to the Free Software
26 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
31 #include <linux/tcp.h>
32 #include <linux/udp.h>
34 #include <linux/list.h>
35 #include <linux/if_ether.h>
40 #include "ehea_phyp.h"
43 MODULE_LICENSE("GPL");
44 MODULE_AUTHOR("Christoph Raisch <raisch@de.ibm.com>");
45 MODULE_DESCRIPTION("IBM eServer HEA Driver");
46 MODULE_VERSION(DRV_VERSION);
49 static int msg_level = -1;
50 static int rq1_entries = EHEA_DEF_ENTRIES_RQ1;
51 static int rq2_entries = EHEA_DEF_ENTRIES_RQ2;
52 static int rq3_entries = EHEA_DEF_ENTRIES_RQ3;
53 static int sq_entries = EHEA_DEF_ENTRIES_SQ;
54 static int use_mcs = 0;
55 static int num_tx_qps = EHEA_NUM_TX_QP;
57 module_param(msg_level, int, 0);
58 module_param(rq1_entries, int, 0);
59 module_param(rq2_entries, int, 0);
60 module_param(rq3_entries, int, 0);
61 module_param(sq_entries, int, 0);
62 module_param(use_mcs, int, 0);
63 module_param(num_tx_qps, int, 0);
65 MODULE_PARM_DESC(num_tx_qps, "Number of TX-QPS");
66 MODULE_PARM_DESC(msg_level, "msg_level");
67 MODULE_PARM_DESC(rq3_entries, "Number of entries for Receive Queue 3 "
68 "[2^x - 1], x = [6..14]. Default = "
69 __MODULE_STRING(EHEA_DEF_ENTRIES_RQ3) ")");
70 MODULE_PARM_DESC(rq2_entries, "Number of entries for Receive Queue 2 "
71 "[2^x - 1], x = [6..14]. Default = "
72 __MODULE_STRING(EHEA_DEF_ENTRIES_RQ2) ")");
73 MODULE_PARM_DESC(rq1_entries, "Number of entries for Receive Queue 1 "
74 "[2^x - 1], x = [6..14]. Default = "
75 __MODULE_STRING(EHEA_DEF_ENTRIES_RQ1) ")");
76 MODULE_PARM_DESC(sq_entries, " Number of entries for the Send Queue "
77 "[2^x - 1], x = [6..14]. Default = "
78 __MODULE_STRING(EHEA_DEF_ENTRIES_SQ) ")");
79 MODULE_PARM_DESC(use_mcs, " 0:NAPI, 1:Multiple receive queues, Default = 1 ");
81 static int port_name_cnt = 0;
83 static int __devinit ehea_probe_adapter(struct ibmebus_dev *dev,
84 const struct of_device_id *id);
86 static int __devexit ehea_remove(struct ibmebus_dev *dev);
88 static struct of_device_id ehea_device_table[] = {
91 .compatible = "IBM,lhea",
96 static struct ibmebus_driver ehea_driver = {
98 .id_table = ehea_device_table,
99 .probe = ehea_probe_adapter,
100 .remove = ehea_remove,
103 void ehea_dump(void *adr, int len, char *msg) {
105 unsigned char *deb = adr;
106 for (x = 0; x < len; x += 16) {
107 printk(DRV_NAME " %s adr=%p ofs=%04x %016lx %016lx\n", msg,
108 deb, x, *((u64*)&deb[0]), *((u64*)&deb[8]));
113 static struct net_device_stats *ehea_get_stats(struct net_device *dev)
115 struct ehea_port *port = netdev_priv(dev);
116 struct net_device_stats *stats = &port->stats;
117 struct hcp_ehea_port_cb2 *cb2;
118 u64 hret, rx_packets;
121 memset(stats, 0, sizeof(*stats));
123 cb2 = kzalloc(PAGE_SIZE, GFP_KERNEL);
125 ehea_error("no mem for cb2");
129 hret = ehea_h_query_ehea_port(port->adapter->handle,
130 port->logical_port_id,
131 H_PORT_CB2, H_PORT_CB2_ALL, cb2);
132 if (hret != H_SUCCESS) {
133 ehea_error("query_ehea_port failed");
137 if (netif_msg_hw(port))
138 ehea_dump(cb2, sizeof(*cb2), "net_device_stats");
141 for (i = 0; i < port->num_def_qps; i++)
142 rx_packets += port->port_res[i].rx_packets;
144 stats->tx_packets = cb2->txucp + cb2->txmcp + cb2->txbcp;
145 stats->multicast = cb2->rxmcp;
146 stats->rx_errors = cb2->rxuerr;
147 stats->rx_bytes = cb2->rxo;
148 stats->tx_bytes = cb2->txo;
149 stats->rx_packets = rx_packets;
157 static void ehea_refill_rq1(struct ehea_port_res *pr, int index, int nr_of_wqes)
159 struct sk_buff **skb_arr_rq1 = pr->rq1_skba.arr;
160 struct net_device *dev = pr->port->netdev;
161 int max_index_mask = pr->rq1_skba.len - 1;
167 for (i = 0; i < nr_of_wqes; i++) {
168 if (!skb_arr_rq1[index]) {
169 skb_arr_rq1[index] = netdev_alloc_skb(dev,
171 if (!skb_arr_rq1[index]) {
172 ehea_error("%s: no mem for skb/%d wqes filled",
178 index &= max_index_mask;
181 ehea_update_rq1a(pr->qp, i);
184 static int ehea_init_fill_rq1(struct ehea_port_res *pr, int nr_rq1a)
187 struct sk_buff **skb_arr_rq1 = pr->rq1_skba.arr;
188 struct net_device *dev = pr->port->netdev;
191 for (i = 0; i < pr->rq1_skba.len; i++) {
192 skb_arr_rq1[i] = netdev_alloc_skb(dev, EHEA_L_PKT_SIZE);
193 if (!skb_arr_rq1[i]) {
194 ehea_error("%s: no mem for skb/%d wqes filled",
201 ehea_update_rq1a(pr->qp, nr_rq1a);
206 static int ehea_refill_rq_def(struct ehea_port_res *pr,
207 struct ehea_q_skb_arr *q_skba, int rq_nr,
208 int num_wqes, int wqe_type, int packet_size)
210 struct net_device *dev = pr->port->netdev;
211 struct ehea_qp *qp = pr->qp;
212 struct sk_buff **skb_arr = q_skba->arr;
213 struct ehea_rwqe *rwqe;
214 int i, index, max_index_mask, fill_wqes;
217 fill_wqes = q_skba->os_skbs + num_wqes;
222 index = q_skba->index;
223 max_index_mask = q_skba->len - 1;
224 for (i = 0; i < fill_wqes; i++) {
225 struct sk_buff *skb = netdev_alloc_skb(dev, packet_size);
227 ehea_error("%s: no mem for skb/%d wqes filled",
228 pr->port->netdev->name, i);
229 q_skba->os_skbs = fill_wqes - i;
233 skb_reserve(skb, NET_IP_ALIGN);
235 skb_arr[index] = skb;
237 rwqe = ehea_get_next_rwqe(qp, rq_nr);
238 rwqe->wr_id = EHEA_BMASK_SET(EHEA_WR_ID_TYPE, wqe_type)
239 | EHEA_BMASK_SET(EHEA_WR_ID_INDEX, index);
240 rwqe->sg_list[0].l_key = pr->recv_mr.lkey;
241 rwqe->sg_list[0].vaddr = (u64)skb->data;
242 rwqe->sg_list[0].len = packet_size;
243 rwqe->data_segments = 1;
246 index &= max_index_mask;
248 q_skba->index = index;
253 ehea_update_rq2a(pr->qp, i);
255 ehea_update_rq3a(pr->qp, i);
261 static int ehea_refill_rq2(struct ehea_port_res *pr, int nr_of_wqes)
263 return ehea_refill_rq_def(pr, &pr->rq2_skba, 2,
264 nr_of_wqes, EHEA_RWQE2_TYPE,
265 EHEA_RQ2_PKT_SIZE + NET_IP_ALIGN);
269 static int ehea_refill_rq3(struct ehea_port_res *pr, int nr_of_wqes)
271 return ehea_refill_rq_def(pr, &pr->rq3_skba, 3,
272 nr_of_wqes, EHEA_RWQE3_TYPE,
273 EHEA_MAX_PACKET_SIZE + NET_IP_ALIGN);
276 static inline int ehea_check_cqe(struct ehea_cqe *cqe, int *rq_num)
278 *rq_num = (cqe->type & EHEA_CQE_TYPE_RQ) >> 5;
279 if ((cqe->status & EHEA_CQE_STAT_ERR_MASK) == 0)
281 if (((cqe->status & EHEA_CQE_STAT_ERR_TCP) != 0) &&
282 (cqe->header_length == 0))
287 static inline void ehea_fill_skb(struct net_device *dev,
288 struct sk_buff *skb, struct ehea_cqe *cqe)
290 int length = cqe->num_bytes_transfered - 4; /*remove CRC */
292 skb_put(skb, length);
293 skb->ip_summed = CHECKSUM_UNNECESSARY;
294 skb->protocol = eth_type_trans(skb, dev);
297 static inline struct sk_buff *get_skb_by_index(struct sk_buff **skb_array,
299 struct ehea_cqe *cqe)
301 int skb_index = EHEA_BMASK_GET(EHEA_WR_ID_INDEX, cqe->wr_id);
311 prefetchw(pref + EHEA_CACHE_LINE);
313 pref = (skb_array[x]->data);
315 prefetch(pref + EHEA_CACHE_LINE);
316 prefetch(pref + EHEA_CACHE_LINE * 2);
317 prefetch(pref + EHEA_CACHE_LINE * 3);
318 skb = skb_array[skb_index];
319 skb_array[skb_index] = NULL;
323 static inline struct sk_buff *get_skb_by_index_ll(struct sk_buff **skb_array,
324 int arr_len, int wqe_index)
335 prefetchw(pref + EHEA_CACHE_LINE);
337 pref = (skb_array[x]->data);
339 prefetchw(pref + EHEA_CACHE_LINE);
341 skb = skb_array[wqe_index];
342 skb_array[wqe_index] = NULL;
346 static int ehea_treat_poll_error(struct ehea_port_res *pr, int rq,
347 struct ehea_cqe *cqe, int *processed_rq2,
352 if (cqe->status & EHEA_CQE_STAT_ERR_TCP)
353 pr->p_stats.err_tcp_cksum++;
354 if (cqe->status & EHEA_CQE_STAT_ERR_IP)
355 pr->p_stats.err_ip_cksum++;
356 if (cqe->status & EHEA_CQE_STAT_ERR_CRC)
357 pr->p_stats.err_frame_crc++;
359 if (netif_msg_rx_err(pr->port)) {
360 ehea_error("CQE Error for QP %d", pr->qp->init_attr.qp_nr);
361 ehea_dump(cqe, sizeof(*cqe), "CQE");
366 skb = get_skb_by_index(pr->rq2_skba.arr, pr->rq2_skba.len, cqe);
368 } else if (rq == 3) {
370 skb = get_skb_by_index(pr->rq3_skba.arr, pr->rq3_skba.len, cqe);
374 if (cqe->status & EHEA_CQE_STAT_FAT_ERR_MASK) {
375 ehea_error("Critical receive error. Resetting port.");
376 queue_work(pr->port->adapter->ehea_wq, &pr->port->reset_task);
383 static struct ehea_cqe *ehea_proc_rwqes(struct net_device *dev,
384 struct ehea_port_res *pr,
387 struct ehea_port *port = pr->port;
388 struct ehea_qp *qp = pr->qp;
389 struct ehea_cqe *cqe;
391 struct sk_buff **skb_arr_rq1 = pr->rq1_skba.arr;
392 struct sk_buff **skb_arr_rq2 = pr->rq2_skba.arr;
393 struct sk_buff **skb_arr_rq3 = pr->rq3_skba.arr;
394 int skb_arr_rq1_len = pr->rq1_skba.len;
395 int skb_arr_rq2_len = pr->rq2_skba.len;
396 int skb_arr_rq3_len = pr->rq3_skba.len;
397 int processed, processed_rq1, processed_rq2, processed_rq3;
398 int wqe_index, last_wqe_index, rq, my_quota, port_reset;
400 processed = processed_rq1 = processed_rq2 = processed_rq3 = 0;
402 my_quota = min(*budget, dev->quota);
404 cqe = ehea_poll_rq1(qp, &wqe_index);
405 while ((my_quota > 0) && cqe) {
410 if (netif_msg_rx_status(port))
411 ehea_dump(cqe, sizeof(*cqe), "CQE");
413 last_wqe_index = wqe_index;
415 if (!ehea_check_cqe(cqe, &rq)) {
416 if (rq == 1) { /* LL RQ1 */
417 skb = get_skb_by_index_ll(skb_arr_rq1,
420 if (unlikely(!skb)) {
421 if (netif_msg_rx_err(port))
422 ehea_error("LL rq1: skb=NULL");
424 skb = netdev_alloc_skb(port->netdev,
429 skb_copy_to_linear_data(skb, ((char*)cqe) + 64,
430 cqe->num_bytes_transfered - 4);
431 ehea_fill_skb(dev, skb, cqe);
432 } else if (rq == 2) { /* RQ2 */
433 skb = get_skb_by_index(skb_arr_rq2,
434 skb_arr_rq2_len, cqe);
435 if (unlikely(!skb)) {
436 if (netif_msg_rx_err(port))
437 ehea_error("rq2: skb=NULL");
440 ehea_fill_skb(port->netdev, skb, cqe);
443 skb = get_skb_by_index(skb_arr_rq3,
444 skb_arr_rq3_len, cqe);
445 if (unlikely(!skb)) {
446 if (netif_msg_rx_err(port))
447 ehea_error("rq3: skb=NULL");
450 ehea_fill_skb(port->netdev, skb, cqe);
454 if (cqe->status & EHEA_CQE_VLAN_TAG_XTRACT)
455 vlan_hwaccel_receive_skb(skb, port->vgrp,
458 netif_receive_skb(skb);
460 pr->p_stats.poll_receive_errors++;
461 port_reset = ehea_treat_poll_error(pr, rq, cqe,
467 cqe = ehea_poll_rq1(qp, &wqe_index);
470 pr->rx_packets += processed;
471 *budget -= processed;
473 ehea_refill_rq1(pr, last_wqe_index, processed_rq1);
474 ehea_refill_rq2(pr, processed_rq2);
475 ehea_refill_rq3(pr, processed_rq3);
477 cqe = ehea_poll_rq1(qp, &wqe_index);
481 static struct ehea_cqe *ehea_proc_cqes(struct ehea_port_res *pr, int my_quota)
484 struct ehea_cq *send_cq = pr->send_cq;
485 struct ehea_cqe *cqe;
486 int quota = my_quota;
492 cqe = ehea_poll_cq(send_cq);
493 while(cqe && (quota > 0)) {
494 ehea_inc_cq(send_cq);
498 if (cqe->status & EHEA_CQE_STAT_ERR_MASK) {
499 ehea_error("Send Completion Error: Resetting port");
500 if (netif_msg_tx_err(pr->port))
501 ehea_dump(cqe, sizeof(*cqe), "Send CQE");
502 queue_work(pr->port->adapter->ehea_wq,
503 &pr->port->reset_task);
507 if (netif_msg_tx_done(pr->port))
508 ehea_dump(cqe, sizeof(*cqe), "CQE");
510 if (likely(EHEA_BMASK_GET(EHEA_WR_ID_TYPE, cqe->wr_id)
511 == EHEA_SWQE2_TYPE)) {
513 index = EHEA_BMASK_GET(EHEA_WR_ID_INDEX, cqe->wr_id);
514 skb = pr->sq_skba.arr[index];
516 pr->sq_skba.arr[index] = NULL;
519 swqe_av += EHEA_BMASK_GET(EHEA_WR_ID_REFILL, cqe->wr_id);
522 cqe = ehea_poll_cq(send_cq);
525 ehea_update_feca(send_cq, cqe_counter);
526 atomic_add(swqe_av, &pr->swqe_avail);
528 spin_lock_irqsave(&pr->netif_queue, flags);
530 if (pr->queue_stopped && (atomic_read(&pr->swqe_avail)
531 >= pr->swqe_refill_th)) {
532 netif_wake_queue(pr->port->netdev);
533 pr->queue_stopped = 0;
535 spin_unlock_irqrestore(&pr->netif_queue, flags);
540 #define EHEA_NAPI_POLL_NUM_BEFORE_IRQ 16
542 static int ehea_poll(struct net_device *dev, int *budget)
544 struct ehea_port_res *pr = dev->priv;
545 struct ehea_cqe *cqe;
546 struct ehea_cqe *cqe_skb = NULL;
547 int force_irq, wqe_index;
549 cqe = ehea_poll_rq1(pr->qp, &wqe_index);
550 cqe_skb = ehea_poll_cq(pr->send_cq);
552 force_irq = (pr->poll_counter > EHEA_NAPI_POLL_NUM_BEFORE_IRQ);
554 if ((!cqe && !cqe_skb) || force_irq) {
555 pr->poll_counter = 0;
556 netif_rx_complete(dev);
557 ehea_reset_cq_ep(pr->recv_cq);
558 ehea_reset_cq_ep(pr->send_cq);
559 ehea_reset_cq_n1(pr->recv_cq);
560 ehea_reset_cq_n1(pr->send_cq);
561 cqe = ehea_poll_rq1(pr->qp, &wqe_index);
562 cqe_skb = ehea_poll_cq(pr->send_cq);
564 if (!cqe && !cqe_skb)
567 if (!netif_rx_reschedule(dev, dev->quota))
571 cqe = ehea_proc_rwqes(dev, pr, budget);
572 cqe_skb = ehea_proc_cqes(pr, 300);
580 static irqreturn_t ehea_recv_irq_handler(int irq, void *param)
582 struct ehea_port_res *pr = param;
584 netif_rx_schedule(pr->d_netdev);
589 static irqreturn_t ehea_qp_aff_irq_handler(int irq, void *param)
591 struct ehea_port *port = param;
592 struct ehea_eqe *eqe;
596 eqe = ehea_poll_eq(port->qp_eq);
599 qp_token = EHEA_BMASK_GET(EHEA_EQE_QP_TOKEN, eqe->entry);
600 ehea_error("QP aff_err: entry=0x%lx, token=0x%x",
601 eqe->entry, qp_token);
603 qp = port->port_res[qp_token].qp;
604 ehea_error_data(port->adapter, qp->fw_handle);
605 eqe = ehea_poll_eq(port->qp_eq);
608 queue_work(port->adapter->ehea_wq, &port->reset_task);
613 static struct ehea_port *ehea_get_port(struct ehea_adapter *adapter,
618 for (i = 0; i < EHEA_MAX_PORTS; i++)
619 if (adapter->port[i])
620 if (adapter->port[i]->logical_port_id == logical_port)
621 return adapter->port[i];
625 int ehea_sense_port_attr(struct ehea_port *port)
629 struct hcp_ehea_port_cb0 *cb0;
631 cb0 = kzalloc(PAGE_SIZE, GFP_ATOMIC); /* May be called via */
632 if (!cb0) { /* ehea_neq_tasklet() */
633 ehea_error("no mem for cb0");
638 hret = ehea_h_query_ehea_port(port->adapter->handle,
639 port->logical_port_id, H_PORT_CB0,
640 EHEA_BMASK_SET(H_PORT_CB0_ALL, 0xFFFF),
642 if (hret != H_SUCCESS) {
648 port->mac_addr = cb0->port_mac_addr << 16;
650 if (!is_valid_ether_addr((u8*)&port->mac_addr)) {
651 ret = -EADDRNOTAVAIL;
656 switch (cb0->port_speed) {
658 port->port_speed = EHEA_SPEED_10M;
659 port->full_duplex = 0;
662 port->port_speed = EHEA_SPEED_10M;
663 port->full_duplex = 1;
666 port->port_speed = EHEA_SPEED_100M;
667 port->full_duplex = 0;
670 port->port_speed = EHEA_SPEED_100M;
671 port->full_duplex = 1;
674 port->port_speed = EHEA_SPEED_1G;
675 port->full_duplex = 1;
678 port->port_speed = EHEA_SPEED_10G;
679 port->full_duplex = 1;
682 port->port_speed = 0;
683 port->full_duplex = 0;
688 port->num_mcs = cb0->num_default_qps;
690 /* Number of default QPs */
692 port->num_def_qps = cb0->num_default_qps;
694 port->num_def_qps = 1;
696 if (!port->num_def_qps) {
701 port->num_tx_qps = num_tx_qps;
703 if (port->num_def_qps >= port->num_tx_qps)
704 port->num_add_tx_qps = 0;
706 port->num_add_tx_qps = port->num_tx_qps - port->num_def_qps;
710 if (ret || netif_msg_probe(port))
711 ehea_dump(cb0, sizeof(*cb0), "ehea_sense_port_attr");
717 int ehea_set_portspeed(struct ehea_port *port, u32 port_speed)
719 struct hcp_ehea_port_cb4 *cb4;
723 cb4 = kzalloc(PAGE_SIZE, GFP_KERNEL);
725 ehea_error("no mem for cb4");
730 cb4->port_speed = port_speed;
732 netif_carrier_off(port->netdev);
734 hret = ehea_h_modify_ehea_port(port->adapter->handle,
735 port->logical_port_id,
736 H_PORT_CB4, H_PORT_CB4_SPEED, cb4);
737 if (hret == H_SUCCESS) {
738 port->autoneg = port_speed == EHEA_SPEED_AUTONEG ? 1 : 0;
740 hret = ehea_h_query_ehea_port(port->adapter->handle,
741 port->logical_port_id,
742 H_PORT_CB4, H_PORT_CB4_SPEED,
744 if (hret == H_SUCCESS) {
745 switch (cb4->port_speed) {
747 port->port_speed = EHEA_SPEED_10M;
748 port->full_duplex = 0;
751 port->port_speed = EHEA_SPEED_10M;
752 port->full_duplex = 1;
755 port->port_speed = EHEA_SPEED_100M;
756 port->full_duplex = 0;
759 port->port_speed = EHEA_SPEED_100M;
760 port->full_duplex = 1;
763 port->port_speed = EHEA_SPEED_1G;
764 port->full_duplex = 1;
767 port->port_speed = EHEA_SPEED_10G;
768 port->full_duplex = 1;
771 port->port_speed = 0;
772 port->full_duplex = 0;
776 ehea_error("Failed sensing port speed");
780 if (hret == H_AUTHORITY) {
781 ehea_info("Hypervisor denied setting port speed");
785 ehea_error("Failed setting port speed");
788 netif_carrier_on(port->netdev);
794 static void ehea_parse_eqe(struct ehea_adapter *adapter, u64 eqe)
799 struct ehea_port *port;
801 ec = EHEA_BMASK_GET(NEQE_EVENT_CODE, eqe);
802 portnum = EHEA_BMASK_GET(NEQE_PORTNUM, eqe);
803 port = ehea_get_port(adapter, portnum);
806 case EHEA_EC_PORTSTATE_CHG: /* port state change */
809 ehea_error("unknown portnum %x", portnum);
813 if (EHEA_BMASK_GET(NEQE_PORT_UP, eqe)) {
814 if (!netif_carrier_ok(port->netdev)) {
815 ret = ehea_sense_port_attr(port);
817 ehea_error("failed resensing port "
822 if (netif_msg_link(port))
823 ehea_info("%s: Logical port up: %dMbps "
828 1 ? "Full" : "Half");
830 netif_carrier_on(port->netdev);
831 netif_wake_queue(port->netdev);
834 if (netif_carrier_ok(port->netdev)) {
835 if (netif_msg_link(port))
836 ehea_info("%s: Logical port down",
838 netif_carrier_off(port->netdev);
839 netif_stop_queue(port->netdev);
842 if (EHEA_BMASK_GET(NEQE_EXTSWITCH_PORT_UP, eqe)) {
843 if (netif_msg_link(port))
844 ehea_info("%s: Physical port up",
847 if (netif_msg_link(port))
848 ehea_info("%s: Physical port down",
852 if (EHEA_BMASK_GET(NEQE_EXTSWITCH_PRIMARY, eqe))
853 ehea_info("External switch port is primary port");
855 ehea_info("External switch port is backup port");
858 case EHEA_EC_ADAPTER_MALFUNC:
859 ehea_error("Adapter malfunction");
861 case EHEA_EC_PORT_MALFUNC:
862 ehea_info("Port malfunction: Device: %s", port->netdev->name);
863 netif_carrier_off(port->netdev);
864 netif_stop_queue(port->netdev);
867 ehea_error("unknown event code %x, eqe=0x%lX", ec, eqe);
872 static void ehea_neq_tasklet(unsigned long data)
874 struct ehea_adapter *adapter = (struct ehea_adapter*)data;
875 struct ehea_eqe *eqe;
878 eqe = ehea_poll_eq(adapter->neq);
879 ehea_debug("eqe=%p", eqe);
882 ehea_debug("*eqe=%lx", eqe->entry);
883 ehea_parse_eqe(adapter, eqe->entry);
884 eqe = ehea_poll_eq(adapter->neq);
885 ehea_debug("next eqe=%p", eqe);
888 event_mask = EHEA_BMASK_SET(NELR_PORTSTATE_CHG, 1)
889 | EHEA_BMASK_SET(NELR_ADAPTER_MALFUNC, 1)
890 | EHEA_BMASK_SET(NELR_PORT_MALFUNC, 1);
892 ehea_h_reset_events(adapter->handle,
893 adapter->neq->fw_handle, event_mask);
896 static irqreturn_t ehea_interrupt_neq(int irq, void *param)
898 struct ehea_adapter *adapter = param;
899 tasklet_hi_schedule(&adapter->neq_tasklet);
904 static int ehea_fill_port_res(struct ehea_port_res *pr)
907 struct ehea_qp_init_attr *init_attr = &pr->qp->init_attr;
909 ret = ehea_init_fill_rq1(pr, init_attr->act_nr_rwqes_rq1
910 - init_attr->act_nr_rwqes_rq2
911 - init_attr->act_nr_rwqes_rq3 - 1);
913 ret |= ehea_refill_rq2(pr, init_attr->act_nr_rwqes_rq2 - 1);
915 ret |= ehea_refill_rq3(pr, init_attr->act_nr_rwqes_rq3 - 1);
920 static int ehea_reg_interrupts(struct net_device *dev)
922 struct ehea_port *port = netdev_priv(dev);
923 struct ehea_port_res *pr;
927 snprintf(port->int_aff_name, EHEA_IRQ_NAME_SIZE - 1, "%s-aff",
930 ret = ibmebus_request_irq(NULL, port->qp_eq->attr.ist1,
931 ehea_qp_aff_irq_handler,
932 IRQF_DISABLED, port->int_aff_name, port);
934 ehea_error("failed registering irq for qp_aff_irq_handler:"
935 "ist=%X", port->qp_eq->attr.ist1);
939 if (netif_msg_ifup(port))
940 ehea_info("irq_handle 0x%X for function qp_aff_irq_handler "
941 "registered", port->qp_eq->attr.ist1);
944 for (i = 0; i < port->num_def_qps + port->num_add_tx_qps; i++) {
945 pr = &port->port_res[i];
946 snprintf(pr->int_send_name, EHEA_IRQ_NAME_SIZE - 1,
947 "%s-queue%d", dev->name, i);
948 ret = ibmebus_request_irq(NULL, pr->eq->attr.ist1,
949 ehea_recv_irq_handler,
950 IRQF_DISABLED, pr->int_send_name,
953 ehea_error("failed registering irq for ehea_queue "
954 "port_res_nr:%d, ist=%X", i,
958 if (netif_msg_ifup(port))
959 ehea_info("irq_handle 0x%X for function ehea_queue_int "
960 "%d registered", pr->eq->attr.ist1, i);
968 u32 ist = port->port_res[i].eq->attr.ist1;
969 ibmebus_free_irq(NULL, ist, &port->port_res[i]);
973 ibmebus_free_irq(NULL, port->qp_eq->attr.ist1, port);
974 i = port->num_def_qps;
980 static void ehea_free_interrupts(struct net_device *dev)
982 struct ehea_port *port = netdev_priv(dev);
983 struct ehea_port_res *pr;
988 for (i = 0; i < port->num_def_qps + port->num_add_tx_qps; i++) {
989 pr = &port->port_res[i];
990 ibmebus_free_irq(NULL, pr->eq->attr.ist1, pr);
991 if (netif_msg_intr(port))
992 ehea_info("free send irq for res %d with handle 0x%X",
993 i, pr->eq->attr.ist1);
996 /* associated events */
997 ibmebus_free_irq(NULL, port->qp_eq->attr.ist1, port);
998 if (netif_msg_intr(port))
999 ehea_info("associated event interrupt for handle 0x%X freed",
1000 port->qp_eq->attr.ist1);
1003 static int ehea_configure_port(struct ehea_port *port)
1007 struct hcp_ehea_port_cb0 *cb0;
1010 cb0 = kzalloc(PAGE_SIZE, GFP_KERNEL);
1014 cb0->port_rc = EHEA_BMASK_SET(PXLY_RC_VALID, 1)
1015 | EHEA_BMASK_SET(PXLY_RC_IP_CHKSUM, 1)
1016 | EHEA_BMASK_SET(PXLY_RC_TCP_UDP_CHKSUM, 1)
1017 | EHEA_BMASK_SET(PXLY_RC_VLAN_XTRACT, 1)
1018 | EHEA_BMASK_SET(PXLY_RC_VLAN_TAG_FILTER,
1019 PXLY_RC_VLAN_FILTER)
1020 | EHEA_BMASK_SET(PXLY_RC_JUMBO_FRAME, 1);
1022 for (i = 0; i < port->num_mcs; i++)
1024 cb0->default_qpn_arr[i] =
1025 port->port_res[i].qp->init_attr.qp_nr;
1027 cb0->default_qpn_arr[i] =
1028 port->port_res[0].qp->init_attr.qp_nr;
1030 if (netif_msg_ifup(port))
1031 ehea_dump(cb0, sizeof(*cb0), "ehea_configure_port");
1033 mask = EHEA_BMASK_SET(H_PORT_CB0_PRC, 1)
1034 | EHEA_BMASK_SET(H_PORT_CB0_DEFQPNARRAY, 1);
1036 hret = ehea_h_modify_ehea_port(port->adapter->handle,
1037 port->logical_port_id,
1038 H_PORT_CB0, mask, cb0);
1040 if (hret != H_SUCCESS)
1051 int ehea_gen_smrs(struct ehea_port_res *pr)
1054 struct ehea_adapter *adapter = pr->port->adapter;
1056 ret = ehea_gen_smr(adapter, &adapter->mr, &pr->send_mr);
1060 ret = ehea_gen_smr(adapter, &adapter->mr, &pr->recv_mr);
1067 ehea_rem_mr(&pr->send_mr);
1069 ehea_error("Generating SMRS failed\n");
1073 int ehea_rem_smrs(struct ehea_port_res *pr)
1075 if ((ehea_rem_mr(&pr->send_mr))
1076 || (ehea_rem_mr(&pr->recv_mr)))
1082 static int ehea_init_q_skba(struct ehea_q_skb_arr *q_skba, int max_q_entries)
1084 int arr_size = sizeof(void*) * max_q_entries;
1086 q_skba->arr = vmalloc(arr_size);
1090 memset(q_skba->arr, 0, arr_size);
1092 q_skba->len = max_q_entries;
1094 q_skba->os_skbs = 0;
1099 static int ehea_init_port_res(struct ehea_port *port, struct ehea_port_res *pr,
1100 struct port_res_cfg *pr_cfg, int queue_token)
1102 struct ehea_adapter *adapter = port->adapter;
1103 enum ehea_eq_type eq_type = EHEA_EQ;
1104 struct ehea_qp_init_attr *init_attr = NULL;
1107 memset(pr, 0, sizeof(struct ehea_port_res));
1110 spin_lock_init(&pr->xmit_lock);
1111 spin_lock_init(&pr->netif_queue);
1113 pr->eq = ehea_create_eq(adapter, eq_type, EHEA_MAX_ENTRIES_EQ, 0);
1115 ehea_error("create_eq failed (eq)");
1119 pr->recv_cq = ehea_create_cq(adapter, pr_cfg->max_entries_rcq,
1121 port->logical_port_id);
1123 ehea_error("create_cq failed (cq_recv)");
1127 pr->send_cq = ehea_create_cq(adapter, pr_cfg->max_entries_scq,
1129 port->logical_port_id);
1131 ehea_error("create_cq failed (cq_send)");
1135 if (netif_msg_ifup(port))
1136 ehea_info("Send CQ: act_nr_cqes=%d, Recv CQ: act_nr_cqes=%d",
1137 pr->send_cq->attr.act_nr_of_cqes,
1138 pr->recv_cq->attr.act_nr_of_cqes);
1140 init_attr = kzalloc(sizeof(*init_attr), GFP_KERNEL);
1143 ehea_error("no mem for ehea_qp_init_attr");
1147 init_attr->low_lat_rq1 = 1;
1148 init_attr->signalingtype = 1; /* generate CQE if specified in WQE */
1149 init_attr->rq_count = 3;
1150 init_attr->qp_token = queue_token;
1151 init_attr->max_nr_send_wqes = pr_cfg->max_entries_sq;
1152 init_attr->max_nr_rwqes_rq1 = pr_cfg->max_entries_rq1;
1153 init_attr->max_nr_rwqes_rq2 = pr_cfg->max_entries_rq2;
1154 init_attr->max_nr_rwqes_rq3 = pr_cfg->max_entries_rq3;
1155 init_attr->wqe_size_enc_sq = EHEA_SG_SQ;
1156 init_attr->wqe_size_enc_rq1 = EHEA_SG_RQ1;
1157 init_attr->wqe_size_enc_rq2 = EHEA_SG_RQ2;
1158 init_attr->wqe_size_enc_rq3 = EHEA_SG_RQ3;
1159 init_attr->rq2_threshold = EHEA_RQ2_THRESHOLD;
1160 init_attr->rq3_threshold = EHEA_RQ3_THRESHOLD;
1161 init_attr->port_nr = port->logical_port_id;
1162 init_attr->send_cq_handle = pr->send_cq->fw_handle;
1163 init_attr->recv_cq_handle = pr->recv_cq->fw_handle;
1164 init_attr->aff_eq_handle = port->qp_eq->fw_handle;
1166 pr->qp = ehea_create_qp(adapter, adapter->pd, init_attr);
1168 ehea_error("create_qp failed");
1173 if (netif_msg_ifup(port))
1174 ehea_info("QP: qp_nr=%d\n act_nr_snd_wqe=%d\n nr_rwqe_rq1=%d\n "
1175 "nr_rwqe_rq2=%d\n nr_rwqe_rq3=%d", init_attr->qp_nr,
1176 init_attr->act_nr_send_wqes,
1177 init_attr->act_nr_rwqes_rq1,
1178 init_attr->act_nr_rwqes_rq2,
1179 init_attr->act_nr_rwqes_rq3);
1181 ret = ehea_init_q_skba(&pr->sq_skba, init_attr->act_nr_send_wqes + 1);
1182 ret |= ehea_init_q_skba(&pr->rq1_skba, init_attr->act_nr_rwqes_rq1 + 1);
1183 ret |= ehea_init_q_skba(&pr->rq2_skba, init_attr->act_nr_rwqes_rq2 + 1);
1184 ret |= ehea_init_q_skba(&pr->rq3_skba, init_attr->act_nr_rwqes_rq3 + 1);
1188 pr->swqe_refill_th = init_attr->act_nr_send_wqes / 10;
1189 if (ehea_gen_smrs(pr) != 0) {
1194 atomic_set(&pr->swqe_avail, init_attr->act_nr_send_wqes - 1);
1198 pr->d_netdev = alloc_netdev(0, "", ether_setup);
1201 pr->d_netdev->priv = pr;
1202 pr->d_netdev->weight = 64;
1203 pr->d_netdev->poll = ehea_poll;
1204 set_bit(__LINK_STATE_START, &pr->d_netdev->state);
1205 strcpy(pr->d_netdev->name, port->netdev->name);
1212 vfree(pr->sq_skba.arr);
1213 vfree(pr->rq1_skba.arr);
1214 vfree(pr->rq2_skba.arr);
1215 vfree(pr->rq3_skba.arr);
1216 ehea_destroy_qp(pr->qp);
1217 ehea_destroy_cq(pr->send_cq);
1218 ehea_destroy_cq(pr->recv_cq);
1219 ehea_destroy_eq(pr->eq);
1224 static int ehea_clean_portres(struct ehea_port *port, struct ehea_port_res *pr)
1228 free_netdev(pr->d_netdev);
1230 ret = ehea_destroy_qp(pr->qp);
1233 ehea_destroy_cq(pr->send_cq);
1234 ehea_destroy_cq(pr->recv_cq);
1235 ehea_destroy_eq(pr->eq);
1237 for (i = 0; i < pr->rq1_skba.len; i++)
1238 if (pr->rq1_skba.arr[i])
1239 dev_kfree_skb(pr->rq1_skba.arr[i]);
1241 for (i = 0; i < pr->rq2_skba.len; i++)
1242 if (pr->rq2_skba.arr[i])
1243 dev_kfree_skb(pr->rq2_skba.arr[i]);
1245 for (i = 0; i < pr->rq3_skba.len; i++)
1246 if (pr->rq3_skba.arr[i])
1247 dev_kfree_skb(pr->rq3_skba.arr[i]);
1249 for (i = 0; i < pr->sq_skba.len; i++)
1250 if (pr->sq_skba.arr[i])
1251 dev_kfree_skb(pr->sq_skba.arr[i]);
1253 vfree(pr->rq1_skba.arr);
1254 vfree(pr->rq2_skba.arr);
1255 vfree(pr->rq3_skba.arr);
1256 vfree(pr->sq_skba.arr);
1257 ret = ehea_rem_smrs(pr);
1263 * The write_* functions store information in swqe which is used by
1264 * the hardware to calculate the ip/tcp/udp checksum
1267 static inline void write_ip_start_end(struct ehea_swqe *swqe,
1268 const struct sk_buff *skb)
1270 swqe->ip_start = skb_network_offset(skb);
1271 swqe->ip_end = (u8)(swqe->ip_start + ip_hdrlen(skb) - 1);
1274 static inline void write_tcp_offset_end(struct ehea_swqe *swqe,
1275 const struct sk_buff *skb)
1278 (u8)(swqe->ip_end + 1 + offsetof(struct tcphdr, check));
1280 swqe->tcp_end = (u16)skb->len - 1;
1283 static inline void write_udp_offset_end(struct ehea_swqe *swqe,
1284 const struct sk_buff *skb)
1287 (u8)(swqe->ip_end + 1 + offsetof(struct udphdr, check));
1289 swqe->tcp_end = (u16)skb->len - 1;
1293 static void write_swqe2_TSO(struct sk_buff *skb,
1294 struct ehea_swqe *swqe, u32 lkey)
1296 struct ehea_vsgentry *sg1entry = &swqe->u.immdata_desc.sg_entry;
1297 u8 *imm_data = &swqe->u.immdata_desc.immediate_data[0];
1298 int skb_data_size = skb->len - skb->data_len;
1302 /* Packet is TCP with TSO enabled */
1303 swqe->tx_control |= EHEA_SWQE_TSO;
1304 swqe->mss = skb_shinfo(skb)->gso_size;
1305 /* copy only eth/ip/tcp headers to immediate data and
1306 * the rest of skb->data to sg1entry
1308 headersize = ETH_HLEN + ip_hdrlen(skb) + tcp_hdrlen(skb);
1310 skb_data_size = skb->len - skb->data_len;
1312 if (skb_data_size >= headersize) {
1313 /* copy immediate data */
1314 skb_copy_from_linear_data(skb, imm_data, headersize);
1315 swqe->immediate_data_length = headersize;
1317 if (skb_data_size > headersize) {
1318 /* set sg1entry data */
1319 sg1entry->l_key = lkey;
1320 sg1entry->len = skb_data_size - headersize;
1322 tmp_addr = (u64)(skb->data + headersize);
1323 sg1entry->vaddr = tmp_addr;
1324 swqe->descriptors++;
1327 ehea_error("cannot handle fragmented headers");
1330 static void write_swqe2_nonTSO(struct sk_buff *skb,
1331 struct ehea_swqe *swqe, u32 lkey)
1333 int skb_data_size = skb->len - skb->data_len;
1334 u8 *imm_data = &swqe->u.immdata_desc.immediate_data[0];
1335 struct ehea_vsgentry *sg1entry = &swqe->u.immdata_desc.sg_entry;
1338 /* Packet is any nonTSO type
1340 * Copy as much as possible skb->data to immediate data and
1341 * the rest to sg1entry
1343 if (skb_data_size >= SWQE2_MAX_IMM) {
1344 /* copy immediate data */
1345 skb_copy_from_linear_data(skb, imm_data, SWQE2_MAX_IMM);
1347 swqe->immediate_data_length = SWQE2_MAX_IMM;
1349 if (skb_data_size > SWQE2_MAX_IMM) {
1350 /* copy sg1entry data */
1351 sg1entry->l_key = lkey;
1352 sg1entry->len = skb_data_size - SWQE2_MAX_IMM;
1353 tmp_addr = (u64)(skb->data + SWQE2_MAX_IMM);
1354 sg1entry->vaddr = tmp_addr;
1355 swqe->descriptors++;
1358 skb_copy_from_linear_data(skb, imm_data, skb_data_size);
1359 swqe->immediate_data_length = skb_data_size;
1363 static inline void write_swqe2_data(struct sk_buff *skb, struct net_device *dev,
1364 struct ehea_swqe *swqe, u32 lkey)
1366 struct ehea_vsgentry *sg_list, *sg1entry, *sgentry;
1368 int nfrags, sg1entry_contains_frag_data, i;
1371 nfrags = skb_shinfo(skb)->nr_frags;
1372 sg1entry = &swqe->u.immdata_desc.sg_entry;
1373 sg_list = (struct ehea_vsgentry*)&swqe->u.immdata_desc.sg_list;
1374 swqe->descriptors = 0;
1375 sg1entry_contains_frag_data = 0;
1377 if ((dev->features & NETIF_F_TSO) && skb_shinfo(skb)->gso_size)
1378 write_swqe2_TSO(skb, swqe, lkey);
1380 write_swqe2_nonTSO(skb, swqe, lkey);
1382 /* write descriptors */
1384 if (swqe->descriptors == 0) {
1385 /* sg1entry not yet used */
1386 frag = &skb_shinfo(skb)->frags[0];
1388 /* copy sg1entry data */
1389 sg1entry->l_key = lkey;
1390 sg1entry->len = frag->size;
1391 tmp_addr = (u64)(page_address(frag->page)
1392 + frag->page_offset);
1393 sg1entry->vaddr = tmp_addr;
1394 swqe->descriptors++;
1395 sg1entry_contains_frag_data = 1;
1398 for (i = sg1entry_contains_frag_data; i < nfrags; i++) {
1400 frag = &skb_shinfo(skb)->frags[i];
1401 sgentry = &sg_list[i - sg1entry_contains_frag_data];
1403 sgentry->l_key = lkey;
1404 sgentry->len = frag->size;
1406 tmp_addr = (u64)(page_address(frag->page)
1407 + frag->page_offset);
1408 sgentry->vaddr = tmp_addr;
1409 swqe->descriptors++;
1414 static int ehea_broadcast_reg_helper(struct ehea_port *port, u32 hcallid)
1420 /* De/Register untagged packets */
1421 reg_type = EHEA_BCMC_BROADCAST | EHEA_BCMC_UNTAGGED;
1422 hret = ehea_h_reg_dereg_bcmc(port->adapter->handle,
1423 port->logical_port_id,
1424 reg_type, port->mac_addr, 0, hcallid);
1425 if (hret != H_SUCCESS) {
1426 ehea_error("reg_dereg_bcmc failed (tagged)");
1431 /* De/Register VLAN packets */
1432 reg_type = EHEA_BCMC_BROADCAST | EHEA_BCMC_VLANID_ALL;
1433 hret = ehea_h_reg_dereg_bcmc(port->adapter->handle,
1434 port->logical_port_id,
1435 reg_type, port->mac_addr, 0, hcallid);
1436 if (hret != H_SUCCESS) {
1437 ehea_error("reg_dereg_bcmc failed (vlan)");
1444 static int ehea_set_mac_addr(struct net_device *dev, void *sa)
1446 struct ehea_port *port = netdev_priv(dev);
1447 struct sockaddr *mac_addr = sa;
1448 struct hcp_ehea_port_cb0 *cb0;
1452 if (!is_valid_ether_addr(mac_addr->sa_data)) {
1453 ret = -EADDRNOTAVAIL;
1457 cb0 = kzalloc(PAGE_SIZE, GFP_KERNEL);
1459 ehea_error("no mem for cb0");
1464 memcpy(&(cb0->port_mac_addr), &(mac_addr->sa_data[0]), ETH_ALEN);
1466 cb0->port_mac_addr = cb0->port_mac_addr >> 16;
1468 hret = ehea_h_modify_ehea_port(port->adapter->handle,
1469 port->logical_port_id, H_PORT_CB0,
1470 EHEA_BMASK_SET(H_PORT_CB0_MAC, 1), cb0);
1471 if (hret != H_SUCCESS) {
1476 memcpy(dev->dev_addr, mac_addr->sa_data, dev->addr_len);
1478 /* Deregister old MAC in pHYP */
1479 ret = ehea_broadcast_reg_helper(port, H_DEREG_BCMC);
1483 port->mac_addr = cb0->port_mac_addr << 16;
1485 /* Register new MAC in pHYP */
1486 ret = ehea_broadcast_reg_helper(port, H_REG_BCMC);
1497 static void ehea_promiscuous_error(u64 hret, int enable)
1499 if (hret == H_AUTHORITY)
1500 ehea_info("Hypervisor denied %sabling promiscuous mode",
1501 enable == 1 ? "en" : "dis");
1503 ehea_error("failed %sabling promiscuous mode",
1504 enable == 1 ? "en" : "dis");
1507 static void ehea_promiscuous(struct net_device *dev, int enable)
1509 struct ehea_port *port = netdev_priv(dev);
1510 struct hcp_ehea_port_cb7 *cb7;
1513 if ((enable && port->promisc) || (!enable && !port->promisc))
1516 cb7 = kzalloc(PAGE_SIZE, GFP_ATOMIC);
1518 ehea_error("no mem for cb7");
1522 /* Modify Pxs_DUCQPN in CB7 */
1523 cb7->def_uc_qpn = enable == 1 ? port->port_res[0].qp->fw_handle : 0;
1525 hret = ehea_h_modify_ehea_port(port->adapter->handle,
1526 port->logical_port_id,
1527 H_PORT_CB7, H_PORT_CB7_DUCQPN, cb7);
1529 ehea_promiscuous_error(hret, enable);
1533 port->promisc = enable;
1539 static u64 ehea_multicast_reg_helper(struct ehea_port *port, u64 mc_mac_addr,
1545 reg_type = EHEA_BCMC_SCOPE_ALL | EHEA_BCMC_MULTICAST
1546 | EHEA_BCMC_UNTAGGED;
1548 hret = ehea_h_reg_dereg_bcmc(port->adapter->handle,
1549 port->logical_port_id,
1550 reg_type, mc_mac_addr, 0, hcallid);
1554 reg_type = EHEA_BCMC_SCOPE_ALL | EHEA_BCMC_MULTICAST
1555 | EHEA_BCMC_VLANID_ALL;
1557 hret = ehea_h_reg_dereg_bcmc(port->adapter->handle,
1558 port->logical_port_id,
1559 reg_type, mc_mac_addr, 0, hcallid);
1564 static int ehea_drop_multicast_list(struct net_device *dev)
1566 struct ehea_port *port = netdev_priv(dev);
1567 struct ehea_mc_list *mc_entry = port->mc_list;
1568 struct list_head *pos;
1569 struct list_head *temp;
1573 list_for_each_safe(pos, temp, &(port->mc_list->list)) {
1574 mc_entry = list_entry(pos, struct ehea_mc_list, list);
1576 hret = ehea_multicast_reg_helper(port, mc_entry->macaddr,
1579 ehea_error("failed deregistering mcast MAC");
1589 static void ehea_allmulti(struct net_device *dev, int enable)
1591 struct ehea_port *port = netdev_priv(dev);
1594 if (!port->allmulti) {
1596 /* Enable ALLMULTI */
1597 ehea_drop_multicast_list(dev);
1598 hret = ehea_multicast_reg_helper(port, 0, H_REG_BCMC);
1602 ehea_error("failed enabling IFF_ALLMULTI");
1606 /* Disable ALLMULTI */
1607 hret = ehea_multicast_reg_helper(port, 0, H_DEREG_BCMC);
1611 ehea_error("failed disabling IFF_ALLMULTI");
1615 static void ehea_add_multicast_entry(struct ehea_port* port, u8* mc_mac_addr)
1617 struct ehea_mc_list *ehea_mcl_entry;
1620 ehea_mcl_entry = kzalloc(sizeof(*ehea_mcl_entry), GFP_ATOMIC);
1621 if (!ehea_mcl_entry) {
1622 ehea_error("no mem for mcl_entry");
1626 INIT_LIST_HEAD(&ehea_mcl_entry->list);
1628 memcpy(&ehea_mcl_entry->macaddr, mc_mac_addr, ETH_ALEN);
1630 hret = ehea_multicast_reg_helper(port, ehea_mcl_entry->macaddr,
1633 list_add(&ehea_mcl_entry->list, &port->mc_list->list);
1635 ehea_error("failed registering mcast MAC");
1636 kfree(ehea_mcl_entry);
1640 static void ehea_set_multicast_list(struct net_device *dev)
1642 struct ehea_port *port = netdev_priv(dev);
1643 struct dev_mc_list *k_mcl_entry;
1646 if (dev->flags & IFF_PROMISC) {
1647 ehea_promiscuous(dev, 1);
1650 ehea_promiscuous(dev, 0);
1652 if (dev->flags & IFF_ALLMULTI) {
1653 ehea_allmulti(dev, 1);
1656 ehea_allmulti(dev, 0);
1658 if (dev->mc_count) {
1659 ret = ehea_drop_multicast_list(dev);
1661 /* Dropping the current multicast list failed.
1662 * Enabling ALL_MULTI is the best we can do.
1664 ehea_allmulti(dev, 1);
1667 if (dev->mc_count > port->adapter->max_mc_mac) {
1668 ehea_info("Mcast registration limit reached (0x%lx). "
1670 port->adapter->max_mc_mac);
1674 for (i = 0, k_mcl_entry = dev->mc_list;
1676 i++, k_mcl_entry = k_mcl_entry->next) {
1677 ehea_add_multicast_entry(port, k_mcl_entry->dmi_addr);
1684 static int ehea_change_mtu(struct net_device *dev, int new_mtu)
1686 if ((new_mtu < 68) || (new_mtu > EHEA_MAX_PACKET_SIZE))
1692 static void ehea_xmit2(struct sk_buff *skb, struct net_device *dev,
1693 struct ehea_swqe *swqe, u32 lkey)
1695 if (skb->protocol == htons(ETH_P_IP)) {
1696 const struct iphdr *iph = ip_hdr(skb);
1698 swqe->tx_control |= EHEA_SWQE_CRC
1699 | EHEA_SWQE_IP_CHECKSUM
1700 | EHEA_SWQE_TCP_CHECKSUM
1701 | EHEA_SWQE_IMM_DATA_PRESENT
1702 | EHEA_SWQE_DESCRIPTORS_PRESENT;
1704 write_ip_start_end(swqe, skb);
1706 if (iph->protocol == IPPROTO_UDP) {
1707 if ((iph->frag_off & IP_MF) ||
1708 (iph->frag_off & IP_OFFSET))
1709 /* IP fragment, so don't change cs */
1710 swqe->tx_control &= ~EHEA_SWQE_TCP_CHECKSUM;
1712 write_udp_offset_end(swqe, skb);
1714 } else if (iph->protocol == IPPROTO_TCP) {
1715 write_tcp_offset_end(swqe, skb);
1718 /* icmp (big data) and ip segmentation packets (all other ip
1719 packets) do not require any special handling */
1722 /* Other Ethernet Protocol */
1723 swqe->tx_control |= EHEA_SWQE_CRC
1724 | EHEA_SWQE_IMM_DATA_PRESENT
1725 | EHEA_SWQE_DESCRIPTORS_PRESENT;
1728 write_swqe2_data(skb, dev, swqe, lkey);
1731 static void ehea_xmit3(struct sk_buff *skb, struct net_device *dev,
1732 struct ehea_swqe *swqe)
1734 int nfrags = skb_shinfo(skb)->nr_frags;
1735 u8 *imm_data = &swqe->u.immdata_nodesc.immediate_data[0];
1739 if (skb->protocol == htons(ETH_P_IP)) {
1740 const struct iphdr *iph = ip_hdr(skb);
1742 write_ip_start_end(swqe, skb);
1744 if (iph->protocol == IPPROTO_TCP) {
1745 swqe->tx_control |= EHEA_SWQE_CRC
1746 | EHEA_SWQE_IP_CHECKSUM
1747 | EHEA_SWQE_TCP_CHECKSUM
1748 | EHEA_SWQE_IMM_DATA_PRESENT;
1750 write_tcp_offset_end(swqe, skb);
1752 } else if (iph->protocol == IPPROTO_UDP) {
1753 if ((iph->frag_off & IP_MF) ||
1754 (iph->frag_off & IP_OFFSET))
1755 /* IP fragment, so don't change cs */
1756 swqe->tx_control |= EHEA_SWQE_CRC
1757 | EHEA_SWQE_IMM_DATA_PRESENT;
1759 swqe->tx_control |= EHEA_SWQE_CRC
1760 | EHEA_SWQE_IP_CHECKSUM
1761 | EHEA_SWQE_TCP_CHECKSUM
1762 | EHEA_SWQE_IMM_DATA_PRESENT;
1764 write_udp_offset_end(swqe, skb);
1767 /* icmp (big data) and
1768 ip segmentation packets (all other ip packets) */
1769 swqe->tx_control |= EHEA_SWQE_CRC
1770 | EHEA_SWQE_IP_CHECKSUM
1771 | EHEA_SWQE_IMM_DATA_PRESENT;
1774 /* Other Ethernet Protocol */
1775 swqe->tx_control |= EHEA_SWQE_CRC | EHEA_SWQE_IMM_DATA_PRESENT;
1777 /* copy (immediate) data */
1779 /* data is in a single piece */
1780 skb_copy_from_linear_data(skb, imm_data, skb->len);
1782 /* first copy data from the skb->data buffer ... */
1783 skb_copy_from_linear_data(skb, imm_data,
1784 skb->len - skb->data_len);
1785 imm_data += skb->len - skb->data_len;
1787 /* ... then copy data from the fragments */
1788 for (i = 0; i < nfrags; i++) {
1789 frag = &skb_shinfo(skb)->frags[i];
1791 page_address(frag->page) + frag->page_offset,
1793 imm_data += frag->size;
1796 swqe->immediate_data_length = skb->len;
1800 static inline int ehea_hash_skb(struct sk_buff *skb, int num_qps)
1805 if ((skb->protocol == htons(ETH_P_IP)) &&
1806 (skb->nh.iph->protocol == IPPROTO_TCP)) {
1807 tcp = (struct tcphdr*)(skb->nh.raw + (skb->nh.iph->ihl * 4));
1808 tmp = (tcp->source + (tcp->dest << 16)) % 31;
1809 tmp += skb->nh.iph->daddr % 31;
1810 return tmp % num_qps;
1816 static int ehea_start_xmit(struct sk_buff *skb, struct net_device *dev)
1818 struct ehea_port *port = netdev_priv(dev);
1819 struct ehea_swqe *swqe;
1820 unsigned long flags;
1823 struct ehea_port_res *pr;
1825 pr = &port->port_res[ehea_hash_skb(skb, port->num_tx_qps)];
1827 if (!spin_trylock(&pr->xmit_lock))
1828 return NETDEV_TX_BUSY;
1830 if (pr->queue_stopped) {
1831 spin_unlock(&pr->xmit_lock);
1832 return NETDEV_TX_BUSY;
1835 swqe = ehea_get_swqe(pr->qp, &swqe_index);
1836 memset(swqe, 0, SWQE_HEADER_SIZE);
1837 atomic_dec(&pr->swqe_avail);
1839 if (skb->len <= SWQE3_MAX_IMM) {
1840 u32 sig_iv = port->sig_comp_iv;
1841 u32 swqe_num = pr->swqe_id_counter;
1842 ehea_xmit3(skb, dev, swqe);
1843 swqe->wr_id = EHEA_BMASK_SET(EHEA_WR_ID_TYPE, EHEA_SWQE3_TYPE)
1844 | EHEA_BMASK_SET(EHEA_WR_ID_COUNT, swqe_num);
1845 if (pr->swqe_ll_count >= (sig_iv - 1)) {
1846 swqe->wr_id |= EHEA_BMASK_SET(EHEA_WR_ID_REFILL,
1848 swqe->tx_control |= EHEA_SWQE_SIGNALLED_COMPLETION;
1849 pr->swqe_ll_count = 0;
1851 pr->swqe_ll_count += 1;
1854 EHEA_BMASK_SET(EHEA_WR_ID_TYPE, EHEA_SWQE2_TYPE)
1855 | EHEA_BMASK_SET(EHEA_WR_ID_COUNT, pr->swqe_id_counter)
1856 | EHEA_BMASK_SET(EHEA_WR_ID_REFILL, 1)
1857 | EHEA_BMASK_SET(EHEA_WR_ID_INDEX, pr->sq_skba.index);
1858 pr->sq_skba.arr[pr->sq_skba.index] = skb;
1860 pr->sq_skba.index++;
1861 pr->sq_skba.index &= (pr->sq_skba.len - 1);
1863 lkey = pr->send_mr.lkey;
1864 ehea_xmit2(skb, dev, swqe, lkey);
1865 swqe->tx_control |= EHEA_SWQE_SIGNALLED_COMPLETION;
1867 pr->swqe_id_counter += 1;
1869 if (port->vgrp && vlan_tx_tag_present(skb)) {
1870 swqe->tx_control |= EHEA_SWQE_VLAN_INSERT;
1871 swqe->vlan_tag = vlan_tx_tag_get(skb);
1874 if (netif_msg_tx_queued(port)) {
1875 ehea_info("post swqe on QP %d", pr->qp->init_attr.qp_nr);
1876 ehea_dump(swqe, 512, "swqe");
1879 ehea_post_swqe(pr->qp, swqe);
1882 if (unlikely(atomic_read(&pr->swqe_avail) <= 1)) {
1883 spin_lock_irqsave(&pr->netif_queue, flags);
1884 if (unlikely(atomic_read(&pr->swqe_avail) <= 1)) {
1885 pr->p_stats.queue_stopped++;
1886 netif_stop_queue(dev);
1887 pr->queue_stopped = 1;
1889 spin_unlock_irqrestore(&pr->netif_queue, flags);
1891 dev->trans_start = jiffies;
1892 spin_unlock(&pr->xmit_lock);
1894 return NETDEV_TX_OK;
1897 static void ehea_vlan_rx_register(struct net_device *dev,
1898 struct vlan_group *grp)
1900 struct ehea_port *port = netdev_priv(dev);
1901 struct ehea_adapter *adapter = port->adapter;
1902 struct hcp_ehea_port_cb1 *cb1;
1907 cb1 = kzalloc(PAGE_SIZE, GFP_KERNEL);
1909 ehea_error("no mem for cb1");
1914 memset(cb1->vlan_filter, 0, sizeof(cb1->vlan_filter));
1916 memset(cb1->vlan_filter, 0xFF, sizeof(cb1->vlan_filter));
1918 hret = ehea_h_modify_ehea_port(adapter->handle, port->logical_port_id,
1919 H_PORT_CB1, H_PORT_CB1_ALL, cb1);
1920 if (hret != H_SUCCESS)
1921 ehea_error("modify_ehea_port failed");
1928 static void ehea_vlan_rx_add_vid(struct net_device *dev, unsigned short vid)
1930 struct ehea_port *port = netdev_priv(dev);
1931 struct ehea_adapter *adapter = port->adapter;
1932 struct hcp_ehea_port_cb1 *cb1;
1936 cb1 = kzalloc(PAGE_SIZE, GFP_KERNEL);
1938 ehea_error("no mem for cb1");
1942 hret = ehea_h_query_ehea_port(adapter->handle, port->logical_port_id,
1943 H_PORT_CB1, H_PORT_CB1_ALL, cb1);
1944 if (hret != H_SUCCESS) {
1945 ehea_error("query_ehea_port failed");
1950 cb1->vlan_filter[index] |= ((u64)(1 << (vid & 0x3F)));
1952 hret = ehea_h_modify_ehea_port(adapter->handle, port->logical_port_id,
1953 H_PORT_CB1, H_PORT_CB1_ALL, cb1);
1954 if (hret != H_SUCCESS)
1955 ehea_error("modify_ehea_port failed");
1961 static void ehea_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid)
1963 struct ehea_port *port = netdev_priv(dev);
1964 struct ehea_adapter *adapter = port->adapter;
1965 struct hcp_ehea_port_cb1 *cb1;
1969 vlan_group_set_device(port->vgrp, vid, NULL);
1971 cb1 = kzalloc(PAGE_SIZE, GFP_KERNEL);
1973 ehea_error("no mem for cb1");
1977 hret = ehea_h_query_ehea_port(adapter->handle, port->logical_port_id,
1978 H_PORT_CB1, H_PORT_CB1_ALL, cb1);
1979 if (hret != H_SUCCESS) {
1980 ehea_error("query_ehea_port failed");
1985 cb1->vlan_filter[index] &= ~((u64)(1 << (vid & 0x3F)));
1987 hret = ehea_h_modify_ehea_port(adapter->handle, port->logical_port_id,
1988 H_PORT_CB1, H_PORT_CB1_ALL, cb1);
1989 if (hret != H_SUCCESS)
1990 ehea_error("modify_ehea_port failed");
1996 int ehea_activate_qp(struct ehea_adapter *adapter, struct ehea_qp *qp)
2002 struct hcp_modify_qp_cb0* cb0;
2004 cb0 = kzalloc(PAGE_SIZE, GFP_KERNEL);
2010 hret = ehea_h_query_ehea_qp(adapter->handle, 0, qp->fw_handle,
2011 EHEA_BMASK_SET(H_QPCB0_ALL, 0xFFFF), cb0);
2012 if (hret != H_SUCCESS) {
2013 ehea_error("query_ehea_qp failed (1)");
2017 cb0->qp_ctl_reg = H_QP_CR_STATE_INITIALIZED;
2018 hret = ehea_h_modify_ehea_qp(adapter->handle, 0, qp->fw_handle,
2019 EHEA_BMASK_SET(H_QPCB0_QP_CTL_REG, 1), cb0,
2020 &dummy64, &dummy64, &dummy16, &dummy16);
2021 if (hret != H_SUCCESS) {
2022 ehea_error("modify_ehea_qp failed (1)");
2026 hret = ehea_h_query_ehea_qp(adapter->handle, 0, qp->fw_handle,
2027 EHEA_BMASK_SET(H_QPCB0_ALL, 0xFFFF), cb0);
2028 if (hret != H_SUCCESS) {
2029 ehea_error("query_ehea_qp failed (2)");
2033 cb0->qp_ctl_reg = H_QP_CR_ENABLED | H_QP_CR_STATE_INITIALIZED;
2034 hret = ehea_h_modify_ehea_qp(adapter->handle, 0, qp->fw_handle,
2035 EHEA_BMASK_SET(H_QPCB0_QP_CTL_REG, 1), cb0,
2036 &dummy64, &dummy64, &dummy16, &dummy16);
2037 if (hret != H_SUCCESS) {
2038 ehea_error("modify_ehea_qp failed (2)");
2042 hret = ehea_h_query_ehea_qp(adapter->handle, 0, qp->fw_handle,
2043 EHEA_BMASK_SET(H_QPCB0_ALL, 0xFFFF), cb0);
2044 if (hret != H_SUCCESS) {
2045 ehea_error("query_ehea_qp failed (3)");
2049 cb0->qp_ctl_reg = H_QP_CR_ENABLED | H_QP_CR_STATE_RDY2SND;
2050 hret = ehea_h_modify_ehea_qp(adapter->handle, 0, qp->fw_handle,
2051 EHEA_BMASK_SET(H_QPCB0_QP_CTL_REG, 1), cb0,
2052 &dummy64, &dummy64, &dummy16, &dummy16);
2053 if (hret != H_SUCCESS) {
2054 ehea_error("modify_ehea_qp failed (3)");
2058 hret = ehea_h_query_ehea_qp(adapter->handle, 0, qp->fw_handle,
2059 EHEA_BMASK_SET(H_QPCB0_ALL, 0xFFFF), cb0);
2060 if (hret != H_SUCCESS) {
2061 ehea_error("query_ehea_qp failed (4)");
2071 static int ehea_port_res_setup(struct ehea_port *port, int def_qps,
2075 struct port_res_cfg pr_cfg, pr_cfg_small_rx;
2076 enum ehea_eq_type eq_type = EHEA_EQ;
2078 port->qp_eq = ehea_create_eq(port->adapter, eq_type,
2079 EHEA_MAX_ENTRIES_EQ, 1);
2082 ehea_error("ehea_create_eq failed (qp_eq)");
2086 pr_cfg.max_entries_rcq = rq1_entries + rq2_entries + rq3_entries;
2087 pr_cfg.max_entries_scq = sq_entries * 2;
2088 pr_cfg.max_entries_sq = sq_entries;
2089 pr_cfg.max_entries_rq1 = rq1_entries;
2090 pr_cfg.max_entries_rq2 = rq2_entries;
2091 pr_cfg.max_entries_rq3 = rq3_entries;
2093 pr_cfg_small_rx.max_entries_rcq = 1;
2094 pr_cfg_small_rx.max_entries_scq = sq_entries;
2095 pr_cfg_small_rx.max_entries_sq = sq_entries;
2096 pr_cfg_small_rx.max_entries_rq1 = 1;
2097 pr_cfg_small_rx.max_entries_rq2 = 1;
2098 pr_cfg_small_rx.max_entries_rq3 = 1;
2100 for (i = 0; i < def_qps; i++) {
2101 ret = ehea_init_port_res(port, &port->port_res[i], &pr_cfg, i);
2105 for (i = def_qps; i < def_qps + add_tx_qps; i++) {
2106 ret = ehea_init_port_res(port, &port->port_res[i],
2107 &pr_cfg_small_rx, i);
2116 ehea_clean_portres(port, &port->port_res[i]);
2119 ehea_destroy_eq(port->qp_eq);
2123 static int ehea_clean_all_portres(struct ehea_port *port)
2128 for(i = 0; i < port->num_def_qps + port->num_add_tx_qps; i++)
2129 ret |= ehea_clean_portres(port, &port->port_res[i]);
2131 ret |= ehea_destroy_eq(port->qp_eq);
2136 static void ehea_remove_adapter_mr (struct ehea_adapter *adapter)
2140 for (i=0; i < EHEA_MAX_PORTS; i++)
2141 if (adapter->port[i])
2144 ehea_rem_mr(&adapter->mr);
2147 static int ehea_add_adapter_mr (struct ehea_adapter *adapter)
2151 for (i=0; i < EHEA_MAX_PORTS; i++)
2152 if (adapter->port[i])
2155 return ehea_reg_kernel_mr(adapter, &adapter->mr);
2158 static int ehea_up(struct net_device *dev)
2161 struct ehea_port *port = netdev_priv(dev);
2164 if (port->state == EHEA_PORT_UP)
2167 ret = ehea_port_res_setup(port, port->num_def_qps,
2168 port->num_add_tx_qps);
2170 ehea_error("port_res_failed");
2174 /* Set default QP for this port */
2175 ret = ehea_configure_port(port);
2177 ehea_error("ehea_configure_port failed. ret:%d", ret);
2181 ret = ehea_broadcast_reg_helper(port, H_REG_BCMC);
2184 ehea_error("out_clean_pr");
2187 mac_addr = (*(u64*)dev->dev_addr) >> 16;
2189 ret = ehea_reg_interrupts(dev);
2191 ehea_error("out_dereg_bc");
2195 for(i = 0; i < port->num_def_qps + port->num_add_tx_qps; i++) {
2196 ret = ehea_activate_qp(port->adapter, port->port_res[i].qp);
2198 ehea_error("activate_qp failed");
2203 for(i = 0; i < port->num_def_qps; i++) {
2204 ret = ehea_fill_port_res(&port->port_res[i]);
2206 ehea_error("out_free_irqs");
2212 port->state = EHEA_PORT_UP;
2216 ehea_free_interrupts(dev);
2219 ehea_broadcast_reg_helper(port, H_DEREG_BCMC);
2222 ehea_clean_all_portres(port);
2227 static int ehea_open(struct net_device *dev)
2230 struct ehea_port *port = netdev_priv(dev);
2232 down(&port->port_lock);
2234 if (netif_msg_ifup(port))
2235 ehea_info("enabling port %s", dev->name);
2239 netif_start_queue(dev);
2241 up(&port->port_lock);
2246 static int ehea_down(struct net_device *dev)
2249 struct ehea_port *port = netdev_priv(dev);
2251 if (port->state == EHEA_PORT_DOWN)
2254 ehea_drop_multicast_list(dev);
2255 ehea_free_interrupts(dev);
2257 for (i = 0; i < port->num_def_qps; i++)
2258 while (test_bit(__LINK_STATE_RX_SCHED,
2259 &port->port_res[i].d_netdev->state))
2262 ehea_broadcast_reg_helper(port, H_DEREG_BCMC);
2263 ret = ehea_clean_all_portres(port);
2264 port->state = EHEA_PORT_DOWN;
2268 static int ehea_stop(struct net_device *dev)
2271 struct ehea_port *port = netdev_priv(dev);
2273 if (netif_msg_ifdown(port))
2274 ehea_info("disabling port %s", dev->name);
2276 flush_workqueue(port->adapter->ehea_wq);
2277 down(&port->port_lock);
2278 netif_stop_queue(dev);
2279 ret = ehea_down(dev);
2280 up(&port->port_lock);
2284 static void ehea_reset_port(struct work_struct *work)
2287 struct ehea_port *port =
2288 container_of(work, struct ehea_port, reset_task);
2289 struct net_device *dev = port->netdev;
2292 down(&port->port_lock);
2293 netif_stop_queue(dev);
2294 netif_poll_disable(dev);
2296 ret = ehea_down(dev);
2298 ehea_error("ehea_down failed. not all resources are freed");
2302 ehea_error("Reset device %s failed: ret=%d", dev->name, ret);
2306 if (netif_msg_timer(port))
2307 ehea_info("Device %s resetted successfully", dev->name);
2309 netif_poll_enable(dev);
2310 netif_wake_queue(dev);
2312 up(&port->port_lock);
2316 static void ehea_tx_watchdog(struct net_device *dev)
2318 struct ehea_port *port = netdev_priv(dev);
2320 if (netif_carrier_ok(dev))
2321 queue_work(port->adapter->ehea_wq, &port->reset_task);
2324 int ehea_sense_adapter_attr(struct ehea_adapter *adapter)
2326 struct hcp_query_ehea *cb;
2330 cb = kzalloc(PAGE_SIZE, GFP_KERNEL);
2336 hret = ehea_h_query_ehea(adapter->handle, cb);
2338 if (hret != H_SUCCESS) {
2343 adapter->max_mc_mac = cb->max_mc_mac - 1;
2352 int ehea_get_jumboframe_status(struct ehea_port *port, int *jumbo)
2354 struct hcp_ehea_port_cb4 *cb4;
2360 /* (Try to) enable *jumbo frames */
2361 cb4 = kzalloc(PAGE_SIZE, GFP_KERNEL);
2363 ehea_error("no mem for cb4");
2367 hret = ehea_h_query_ehea_port(port->adapter->handle,
2368 port->logical_port_id,
2370 H_PORT_CB4_JUMBO, cb4);
2371 if (hret == H_SUCCESS) {
2372 if (cb4->jumbo_frame)
2375 cb4->jumbo_frame = 1;
2376 hret = ehea_h_modify_ehea_port(port->adapter->
2383 if (hret == H_SUCCESS)
2395 static ssize_t ehea_show_port_id(struct device *dev,
2396 struct device_attribute *attr, char *buf)
2398 struct ehea_port *port = container_of(dev, struct ehea_port, ofdev.dev);
2399 return sprintf(buf, "0x%X", port->logical_port_id);
2402 static DEVICE_ATTR(log_port_id, S_IRUSR | S_IRGRP | S_IROTH, ehea_show_port_id,
2405 static void __devinit logical_port_release(struct device *dev)
2407 struct ehea_port *port = container_of(dev, struct ehea_port, ofdev.dev);
2408 of_node_put(port->ofdev.node);
2411 static int ehea_driver_sysfs_add(struct device *dev,
2412 struct device_driver *driver)
2416 ret = sysfs_create_link(&driver->kobj, &dev->kobj,
2417 kobject_name(&dev->kobj));
2419 ret = sysfs_create_link(&dev->kobj, &driver->kobj,
2422 sysfs_remove_link(&driver->kobj,
2423 kobject_name(&dev->kobj));
2428 static void ehea_driver_sysfs_remove(struct device *dev,
2429 struct device_driver *driver)
2431 struct device_driver *drv = driver;
2434 sysfs_remove_link(&drv->kobj, kobject_name(&dev->kobj));
2435 sysfs_remove_link(&dev->kobj, "driver");
2439 static struct device *ehea_register_port(struct ehea_port *port,
2440 struct device_node *dn)
2444 port->ofdev.node = of_node_get(dn);
2445 port->ofdev.dev.parent = &port->adapter->ebus_dev->ofdev.dev;
2446 port->ofdev.dev.bus = &ibmebus_bus_type;
2448 sprintf(port->ofdev.dev.bus_id, "port%d", port_name_cnt++);
2449 port->ofdev.dev.release = logical_port_release;
2451 ret = of_device_register(&port->ofdev);
2453 ehea_error("failed to register device. ret=%d", ret);
2457 ret = device_create_file(&port->ofdev.dev, &dev_attr_log_port_id);
2459 ehea_error("failed to register attributes, ret=%d", ret);
2460 goto out_unreg_of_dev;
2463 ret = ehea_driver_sysfs_add(&port->ofdev.dev, &ehea_driver.driver);
2465 ehea_error("failed to register sysfs driver link");
2466 goto out_rem_dev_file;
2469 return &port->ofdev.dev;
2472 device_remove_file(&port->ofdev.dev, &dev_attr_log_port_id);
2474 of_device_unregister(&port->ofdev);
2479 static void ehea_unregister_port(struct ehea_port *port)
2481 ehea_driver_sysfs_remove(&port->ofdev.dev, &ehea_driver.driver);
2482 device_remove_file(&port->ofdev.dev, &dev_attr_log_port_id);
2483 of_device_unregister(&port->ofdev);
2486 struct ehea_port *ehea_setup_single_port(struct ehea_adapter *adapter,
2487 u32 logical_port_id,
2488 struct device_node *dn)
2491 struct net_device *dev;
2492 struct ehea_port *port;
2493 struct device *port_dev;
2496 /* allocate memory for the port structures */
2497 dev = alloc_etherdev(sizeof(struct ehea_port));
2500 ehea_error("no mem for net_device");
2505 port = netdev_priv(dev);
2507 sema_init(&port->port_lock, 1);
2508 port->state = EHEA_PORT_DOWN;
2509 port->sig_comp_iv = sq_entries / 10;
2511 port->adapter = adapter;
2513 port->logical_port_id = logical_port_id;
2515 port->msg_enable = netif_msg_init(msg_level, EHEA_MSG_DEFAULT);
2517 port->mc_list = kzalloc(sizeof(struct ehea_mc_list), GFP_KERNEL);
2518 if (!port->mc_list) {
2520 goto out_free_ethdev;
2523 INIT_LIST_HEAD(&port->mc_list->list);
2525 ret = ehea_sense_port_attr(port);
2527 goto out_free_mc_list;
2529 port_dev = ehea_register_port(port, dn);
2531 goto out_free_mc_list;
2533 SET_NETDEV_DEV(dev, port_dev);
2535 /* initialize net_device structure */
2536 SET_MODULE_OWNER(dev);
2538 memcpy(dev->dev_addr, &port->mac_addr, ETH_ALEN);
2540 dev->open = ehea_open;
2541 dev->poll = ehea_poll;
2543 dev->stop = ehea_stop;
2544 dev->hard_start_xmit = ehea_start_xmit;
2545 dev->get_stats = ehea_get_stats;
2546 dev->set_multicast_list = ehea_set_multicast_list;
2547 dev->set_mac_address = ehea_set_mac_addr;
2548 dev->change_mtu = ehea_change_mtu;
2549 dev->vlan_rx_register = ehea_vlan_rx_register;
2550 dev->vlan_rx_add_vid = ehea_vlan_rx_add_vid;
2551 dev->vlan_rx_kill_vid = ehea_vlan_rx_kill_vid;
2552 dev->features = NETIF_F_SG | NETIF_F_FRAGLIST | NETIF_F_TSO
2553 | NETIF_F_HIGHDMA | NETIF_F_HW_CSUM | NETIF_F_HW_VLAN_TX
2554 | NETIF_F_HW_VLAN_RX | NETIF_F_HW_VLAN_FILTER
2556 dev->tx_timeout = &ehea_tx_watchdog;
2557 dev->watchdog_timeo = EHEA_WATCH_DOG_TIMEOUT;
2559 INIT_WORK(&port->reset_task, ehea_reset_port);
2561 ehea_set_ethtool_ops(dev);
2563 ret = register_netdev(dev);
2565 ehea_error("register_netdev failed. ret=%d", ret);
2566 goto out_unreg_port;
2569 ret = ehea_get_jumboframe_status(port, &jumbo);
2571 ehea_error("failed determining jumbo frame status for %s",
2572 port->netdev->name);
2574 ehea_info("%s: Jumbo frames are %sabled", dev->name,
2575 jumbo == 1 ? "en" : "dis");
2580 ehea_unregister_port(port);
2583 kfree(port->mc_list);
2589 ehea_error("setting up logical port with id=%d failed, ret=%d",
2590 logical_port_id, ret);
2594 static void ehea_shutdown_single_port(struct ehea_port *port)
2596 unregister_netdev(port->netdev);
2597 ehea_unregister_port(port);
2598 kfree(port->mc_list);
2599 free_netdev(port->netdev);
2602 static int ehea_setup_ports(struct ehea_adapter *adapter)
2604 struct device_node *lhea_dn;
2605 struct device_node *eth_dn = NULL;
2607 u32 *dn_log_port_id;
2610 lhea_dn = adapter->ebus_dev->ofdev.node;
2611 while ((eth_dn = of_get_next_child(lhea_dn, eth_dn))) {
2613 dn_log_port_id = (u32*)get_property(eth_dn, "ibm,hea-port-no",
2615 if (!dn_log_port_id) {
2616 ehea_error("bad device node: eth_dn name=%s",
2621 if (ehea_add_adapter_mr(adapter)) {
2622 ehea_error("creating MR failed");
2623 of_node_put(eth_dn);
2627 adapter->port[i] = ehea_setup_single_port(adapter,
2630 if (adapter->port[i])
2631 ehea_info("%s -> logical port id #%d",
2632 adapter->port[i]->netdev->name,
2635 ehea_remove_adapter_mr(adapter);
2643 static struct device_node *ehea_get_eth_dn(struct ehea_adapter *adapter,
2644 u32 logical_port_id)
2646 struct device_node *lhea_dn;
2647 struct device_node *eth_dn = NULL;
2648 u32 *dn_log_port_id;
2650 lhea_dn = adapter->ebus_dev->ofdev.node;
2651 while ((eth_dn = of_get_next_child(lhea_dn, eth_dn))) {
2653 dn_log_port_id = (u32*)get_property(eth_dn, "ibm,hea-port-no",
2656 if (*dn_log_port_id == logical_port_id)
2663 static ssize_t ehea_probe_port(struct device *dev,
2664 struct device_attribute *attr,
2665 const char *buf, size_t count)
2667 struct ehea_adapter *adapter = dev->driver_data;
2668 struct ehea_port *port;
2669 struct device_node *eth_dn = NULL;
2672 u32 logical_port_id;
2674 sscanf(buf, "%X", &logical_port_id);
2676 port = ehea_get_port(adapter, logical_port_id);
2679 ehea_info("adding port with logical port id=%d failed. port "
2680 "already configured as %s.", logical_port_id,
2681 port->netdev->name);
2685 eth_dn = ehea_get_eth_dn(adapter, logical_port_id);
2688 ehea_info("no logical port with id %d found", logical_port_id);
2692 if (ehea_add_adapter_mr(adapter)) {
2693 ehea_error("creating MR failed");
2697 port = ehea_setup_single_port(adapter, logical_port_id, eth_dn);
2699 of_node_put(eth_dn);
2702 for (i=0; i < EHEA_MAX_PORTS; i++)
2703 if (!adapter->port[i]) {
2704 adapter->port[i] = port;
2708 ehea_info("added %s (logical port id=%d)", port->netdev->name,
2711 ehea_remove_adapter_mr(adapter);
2715 return (ssize_t) count;
2718 static ssize_t ehea_remove_port(struct device *dev,
2719 struct device_attribute *attr,
2720 const char *buf, size_t count)
2722 struct ehea_adapter *adapter = dev->driver_data;
2723 struct ehea_port *port;
2725 u32 logical_port_id;
2727 sscanf(buf, "%X", &logical_port_id);
2729 port = ehea_get_port(adapter, logical_port_id);
2732 ehea_info("removed %s (logical port id=%d)", port->netdev->name,
2735 ehea_shutdown_single_port(port);
2737 for (i=0; i < EHEA_MAX_PORTS; i++)
2738 if (adapter->port[i] == port) {
2739 adapter->port[i] = NULL;
2743 ehea_error("removing port with logical port id=%d failed. port "
2744 "not configured.", logical_port_id);
2748 ehea_remove_adapter_mr(adapter);
2750 return (ssize_t) count;
2753 static DEVICE_ATTR(probe_port, S_IWUSR, NULL, ehea_probe_port);
2754 static DEVICE_ATTR(remove_port, S_IWUSR, NULL, ehea_remove_port);
2756 int ehea_create_device_sysfs(struct ibmebus_dev *dev)
2758 int ret = device_create_file(&dev->ofdev.dev, &dev_attr_probe_port);
2762 ret = device_create_file(&dev->ofdev.dev, &dev_attr_remove_port);
2767 void ehea_remove_device_sysfs(struct ibmebus_dev *dev)
2769 device_remove_file(&dev->ofdev.dev, &dev_attr_probe_port);
2770 device_remove_file(&dev->ofdev.dev, &dev_attr_remove_port);
2773 static int __devinit ehea_probe_adapter(struct ibmebus_dev *dev,
2774 const struct of_device_id *id)
2776 struct ehea_adapter *adapter;
2777 u64 *adapter_handle;
2780 if (!dev || !dev->ofdev.node) {
2781 ehea_error("Invalid ibmebus device probed");
2785 adapter = kzalloc(sizeof(*adapter), GFP_KERNEL);
2788 dev_err(&dev->ofdev.dev, "no mem for ehea_adapter\n");
2792 adapter->ebus_dev = dev;
2794 adapter_handle = (u64*)get_property(dev->ofdev.node, "ibm,hea-handle",
2797 adapter->handle = *adapter_handle;
2799 if (!adapter->handle) {
2800 dev_err(&dev->ofdev.dev, "failed getting handle for adapter"
2801 " '%s'\n", dev->ofdev.node->full_name);
2806 adapter->pd = EHEA_PD_ID;
2808 dev->ofdev.dev.driver_data = adapter;
2811 /* initialize adapter and ports */
2812 /* get adapter properties */
2813 ret = ehea_sense_adapter_attr(adapter);
2815 dev_err(&dev->ofdev.dev, "sense_adapter_attr failed: %d", ret);
2819 adapter->neq = ehea_create_eq(adapter,
2820 EHEA_NEQ, EHEA_MAX_ENTRIES_EQ, 1);
2821 if (!adapter->neq) {
2823 dev_err(&dev->ofdev.dev, "NEQ creation failed");
2827 tasklet_init(&adapter->neq_tasklet, ehea_neq_tasklet,
2828 (unsigned long)adapter);
2830 ret = ibmebus_request_irq(NULL, adapter->neq->attr.ist1,
2831 ehea_interrupt_neq, IRQF_DISABLED,
2832 "ehea_neq", adapter);
2834 dev_err(&dev->ofdev.dev, "requesting NEQ IRQ failed");
2838 adapter->ehea_wq = create_workqueue("ehea_wq");
2839 if (!adapter->ehea_wq) {
2844 ret = ehea_create_device_sysfs(dev);
2848 ret = ehea_setup_ports(adapter);
2850 dev_err(&dev->ofdev.dev, "setup_ports failed");
2851 goto out_rem_dev_sysfs;
2858 ehea_remove_device_sysfs(dev);
2861 destroy_workqueue(adapter->ehea_wq);
2864 ibmebus_free_irq(NULL, adapter->neq->attr.ist1, adapter);
2867 ehea_destroy_eq(adapter->neq);
2875 static int __devexit ehea_remove(struct ibmebus_dev *dev)
2877 struct ehea_adapter *adapter = dev->ofdev.dev.driver_data;
2880 for (i = 0; i < EHEA_MAX_PORTS; i++)
2881 if (adapter->port[i]) {
2882 ehea_shutdown_single_port(adapter->port[i]);
2883 adapter->port[i] = NULL;
2886 ehea_remove_device_sysfs(dev);
2888 destroy_workqueue(adapter->ehea_wq);
2890 ibmebus_free_irq(NULL, adapter->neq->attr.ist1, adapter);
2891 tasklet_kill(&adapter->neq_tasklet);
2893 ehea_destroy_eq(adapter->neq);
2894 ehea_remove_adapter_mr(adapter);
2899 static int check_module_parm(void)
2903 if ((rq1_entries < EHEA_MIN_ENTRIES_QP) ||
2904 (rq1_entries > EHEA_MAX_ENTRIES_RQ1)) {
2905 ehea_info("Bad parameter: rq1_entries");
2908 if ((rq2_entries < EHEA_MIN_ENTRIES_QP) ||
2909 (rq2_entries > EHEA_MAX_ENTRIES_RQ2)) {
2910 ehea_info("Bad parameter: rq2_entries");
2913 if ((rq3_entries < EHEA_MIN_ENTRIES_QP) ||
2914 (rq3_entries > EHEA_MAX_ENTRIES_RQ3)) {
2915 ehea_info("Bad parameter: rq3_entries");
2918 if ((sq_entries < EHEA_MIN_ENTRIES_QP) ||
2919 (sq_entries > EHEA_MAX_ENTRIES_SQ)) {
2920 ehea_info("Bad parameter: sq_entries");
2927 int __init ehea_module_init(void)
2931 printk(KERN_INFO "IBM eHEA ethernet device driver (Release %s)\n",
2934 ret = check_module_parm();
2937 ret = ibmebus_register_driver(&ehea_driver);
2939 ehea_error("failed registering eHEA device driver on ebus");
2945 static void __exit ehea_module_exit(void)
2947 ibmebus_unregister_driver(&ehea_driver);
2950 module_init(ehea_module_init);
2951 module_exit(ehea_module_exit);