2 * Copyright (c) 2003-2007 Chelsio, Inc. All rights reserved.
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
32 #include <linux/module.h>
33 #include <linux/moduleparam.h>
34 #include <linux/init.h>
35 #include <linux/pci.h>
36 #include <linux/dma-mapping.h>
37 #include <linux/netdevice.h>
38 #include <linux/etherdevice.h>
39 #include <linux/if_vlan.h>
40 #include <linux/mii.h>
41 #include <linux/sockios.h>
42 #include <linux/workqueue.h>
43 #include <linux/proc_fs.h>
44 #include <linux/rtnetlink.h>
45 #include <asm/uaccess.h>
48 #include "cxgb3_ioctl.h"
50 #include "cxgb3_offload.h"
53 #include "cxgb3_ctl_defs.h"
55 #include "firmware_exports.h"
58 MAX_TXQ_ENTRIES = 16384,
59 MAX_CTRL_TXQ_ENTRIES = 1024,
60 MAX_RSPQ_ENTRIES = 16384,
61 MAX_RX_BUFFERS = 16384,
62 MAX_RX_JUMBO_BUFFERS = 16384,
64 MIN_CTRL_TXQ_ENTRIES = 4,
65 MIN_RSPQ_ENTRIES = 32,
69 #define PORT_MASK ((1 << MAX_NPORTS) - 1)
71 #define DFLT_MSG_ENABLE (NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK | \
72 NETIF_MSG_TIMER | NETIF_MSG_IFDOWN | NETIF_MSG_IFUP |\
73 NETIF_MSG_RX_ERR | NETIF_MSG_TX_ERR)
75 #define EEPROM_MAGIC 0x38E2F10C
77 #define CH_DEVICE(devid, ssid, idx) \
78 { PCI_VENDOR_ID_CHELSIO, devid, PCI_ANY_ID, ssid, 0, 0, idx }
80 static const struct pci_device_id cxgb3_pci_tbl[] = {
81 CH_DEVICE(0x20, 1, 0), /* PE9000 */
82 CH_DEVICE(0x21, 1, 1), /* T302E */
83 CH_DEVICE(0x22, 1, 2), /* T310E */
84 CH_DEVICE(0x23, 1, 3), /* T320X */
85 CH_DEVICE(0x24, 1, 1), /* T302X */
86 CH_DEVICE(0x25, 1, 3), /* T320E */
87 CH_DEVICE(0x26, 1, 2), /* T310X */
88 CH_DEVICE(0x30, 1, 2), /* T3B10 */
89 CH_DEVICE(0x31, 1, 3), /* T3B20 */
90 CH_DEVICE(0x32, 1, 1), /* T3B02 */
94 MODULE_DESCRIPTION(DRV_DESC);
95 MODULE_AUTHOR("Chelsio Communications");
96 MODULE_LICENSE("Dual BSD/GPL");
97 MODULE_VERSION(DRV_VERSION);
98 MODULE_DEVICE_TABLE(pci, cxgb3_pci_tbl);
100 static int dflt_msg_enable = DFLT_MSG_ENABLE;
102 module_param(dflt_msg_enable, int, 0644);
103 MODULE_PARM_DESC(dflt_msg_enable, "Chelsio T3 default message enable bitmap");
106 * The driver uses the best interrupt scheme available on a platform in the
107 * order MSI-X, MSI, legacy pin interrupts. This parameter determines which
108 * of these schemes the driver may consider as follows:
110 * msi = 2: choose from among all three options
111 * msi = 1: only consider MSI and pin interrupts
112 * msi = 0: force pin interrupts
116 module_param(msi, int, 0644);
117 MODULE_PARM_DESC(msi, "whether to use MSI or MSI-X");
120 * The driver enables offload as a default.
121 * To disable it, use ofld_disable = 1.
124 static int ofld_disable = 0;
126 module_param(ofld_disable, int, 0644);
127 MODULE_PARM_DESC(ofld_disable, "whether to enable offload at init time or not");
130 * We have work elements that we need to cancel when an interface is taken
131 * down. Normally the work elements would be executed by keventd but that
132 * can deadlock because of linkwatch. If our close method takes the rtnl
133 * lock and linkwatch is ahead of our work elements in keventd, linkwatch
134 * will block keventd as it needs the rtnl lock, and we'll deadlock waiting
135 * for our work to complete. Get our own work queue to solve this.
137 static struct workqueue_struct *cxgb3_wq;
140 * link_report - show link status and link speed/duplex
141 * @p: the port whose settings are to be reported
143 * Shows the link status, speed, and duplex of a port.
145 static void link_report(struct net_device *dev)
147 if (!netif_carrier_ok(dev))
148 printk(KERN_INFO "%s: link down\n", dev->name);
150 const char *s = "10Mbps";
151 const struct port_info *p = netdev_priv(dev);
153 switch (p->link_config.speed) {
165 printk(KERN_INFO "%s: link up, %s, %s-duplex\n", dev->name, s,
166 p->link_config.duplex == DUPLEX_FULL ? "full" : "half");
171 * t3_os_link_changed - handle link status changes
172 * @adapter: the adapter associated with the link change
173 * @port_id: the port index whose limk status has changed
174 * @link_stat: the new status of the link
175 * @speed: the new speed setting
176 * @duplex: the new duplex setting
177 * @pause: the new flow-control setting
179 * This is the OS-dependent handler for link status changes. The OS
180 * neutral handler takes care of most of the processing for these events,
181 * then calls this handler for any OS-specific processing.
183 void t3_os_link_changed(struct adapter *adapter, int port_id, int link_stat,
184 int speed, int duplex, int pause)
186 struct net_device *dev = adapter->port[port_id];
188 /* Skip changes from disabled ports. */
189 if (!netif_running(dev))
192 if (link_stat != netif_carrier_ok(dev)) {
194 netif_carrier_on(dev);
196 netif_carrier_off(dev);
201 static void cxgb_set_rxmode(struct net_device *dev)
203 struct t3_rx_mode rm;
204 struct port_info *pi = netdev_priv(dev);
206 init_rx_mode(&rm, dev, dev->mc_list);
207 t3_mac_set_rx_mode(&pi->mac, &rm);
211 * link_start - enable a port
212 * @dev: the device to enable
214 * Performs the MAC and PHY actions needed to enable a port.
216 static void link_start(struct net_device *dev)
218 struct t3_rx_mode rm;
219 struct port_info *pi = netdev_priv(dev);
220 struct cmac *mac = &pi->mac;
222 init_rx_mode(&rm, dev, dev->mc_list);
224 t3_mac_set_mtu(mac, dev->mtu);
225 t3_mac_set_address(mac, 0, dev->dev_addr);
226 t3_mac_set_rx_mode(mac, &rm);
227 t3_link_start(&pi->phy, mac, &pi->link_config);
228 t3_mac_enable(mac, MAC_DIRECTION_RX | MAC_DIRECTION_TX);
231 static inline void cxgb_disable_msi(struct adapter *adapter)
233 if (adapter->flags & USING_MSIX) {
234 pci_disable_msix(adapter->pdev);
235 adapter->flags &= ~USING_MSIX;
236 } else if (adapter->flags & USING_MSI) {
237 pci_disable_msi(adapter->pdev);
238 adapter->flags &= ~USING_MSI;
243 * Interrupt handler for asynchronous events used with MSI-X.
245 static irqreturn_t t3_async_intr_handler(int irq, void *cookie)
247 t3_slow_intr_handler(cookie);
252 * Name the MSI-X interrupts.
254 static void name_msix_vecs(struct adapter *adap)
256 int i, j, msi_idx = 1, n = sizeof(adap->msix_info[0].desc) - 1;
258 snprintf(adap->msix_info[0].desc, n, "%s", adap->name);
259 adap->msix_info[0].desc[n] = 0;
261 for_each_port(adap, j) {
262 struct net_device *d = adap->port[j];
263 const struct port_info *pi = netdev_priv(d);
265 for (i = 0; i < pi->nqsets; i++, msi_idx++) {
266 snprintf(adap->msix_info[msi_idx].desc, n,
267 "%s (queue %d)", d->name, i);
268 adap->msix_info[msi_idx].desc[n] = 0;
273 static int request_msix_data_irqs(struct adapter *adap)
275 int i, j, err, qidx = 0;
277 for_each_port(adap, i) {
278 int nqsets = adap2pinfo(adap, i)->nqsets;
280 for (j = 0; j < nqsets; ++j) {
281 err = request_irq(adap->msix_info[qidx + 1].vec,
282 t3_intr_handler(adap,
285 adap->msix_info[qidx + 1].desc,
286 &adap->sge.qs[qidx]);
289 free_irq(adap->msix_info[qidx + 1].vec,
290 &adap->sge.qs[qidx]);
300 * setup_rss - configure RSS
303 * Sets up RSS to distribute packets to multiple receive queues. We
304 * configure the RSS CPU lookup table to distribute to the number of HW
305 * receive queues, and the response queue lookup table to narrow that
306 * down to the response queues actually configured for each port.
307 * We always configure the RSS mapping for two ports since the mapping
308 * table has plenty of entries.
310 static void setup_rss(struct adapter *adap)
313 unsigned int nq0 = adap2pinfo(adap, 0)->nqsets;
314 unsigned int nq1 = adap->port[1] ? adap2pinfo(adap, 1)->nqsets : 1;
315 u8 cpus[SGE_QSETS + 1];
316 u16 rspq_map[RSS_TABLE_SIZE];
318 for (i = 0; i < SGE_QSETS; ++i)
320 cpus[SGE_QSETS] = 0xff; /* terminator */
322 for (i = 0; i < RSS_TABLE_SIZE / 2; ++i) {
323 rspq_map[i] = i % nq0;
324 rspq_map[i + RSS_TABLE_SIZE / 2] = (i % nq1) + nq0;
327 t3_config_rss(adap, F_RQFEEDBACKENABLE | F_TNLLKPEN | F_TNLMAPEN |
328 F_TNLPRTEN | F_TNL2TUPEN | F_TNL4TUPEN |
329 V_RRCPLCPUSIZE(6), cpus, rspq_map);
333 * If we have multiple receive queues per port serviced by NAPI we need one
334 * netdevice per queue as NAPI operates on netdevices. We already have one
335 * netdevice, namely the one associated with the interface, so we use dummy
336 * ones for any additional queues. Note that these netdevices exist purely
337 * so that NAPI has something to work with, they do not represent network
338 * ports and are not registered.
340 static int init_dummy_netdevs(struct adapter *adap)
342 int i, j, dummy_idx = 0;
343 struct net_device *nd;
345 for_each_port(adap, i) {
346 struct net_device *dev = adap->port[i];
347 const struct port_info *pi = netdev_priv(dev);
349 for (j = 0; j < pi->nqsets - 1; j++) {
350 if (!adap->dummy_netdev[dummy_idx]) {
351 nd = alloc_netdev(0, "", ether_setup);
357 set_bit(__LINK_STATE_START, &nd->state);
358 adap->dummy_netdev[dummy_idx] = nd;
360 strcpy(adap->dummy_netdev[dummy_idx]->name, dev->name);
367 while (--dummy_idx >= 0) {
368 free_netdev(adap->dummy_netdev[dummy_idx]);
369 adap->dummy_netdev[dummy_idx] = NULL;
375 * Wait until all NAPI handlers are descheduled. This includes the handlers of
376 * both netdevices representing interfaces and the dummy ones for the extra
379 static void quiesce_rx(struct adapter *adap)
382 struct net_device *dev;
384 for_each_port(adap, i) {
386 while (test_bit(__LINK_STATE_RX_SCHED, &dev->state))
390 for (i = 0; i < ARRAY_SIZE(adap->dummy_netdev); i++) {
391 dev = adap->dummy_netdev[i];
393 while (test_bit(__LINK_STATE_RX_SCHED, &dev->state))
399 * setup_sge_qsets - configure SGE Tx/Rx/response queues
402 * Determines how many sets of SGE queues to use and initializes them.
403 * We support multiple queue sets per port if we have MSI-X, otherwise
404 * just one queue set per port.
406 static int setup_sge_qsets(struct adapter *adap)
408 int i, j, err, irq_idx = 0, qset_idx = 0, dummy_dev_idx = 0;
409 unsigned int ntxq = is_offload(adap) ? SGE_TXQ_PER_SET : 1;
411 if (adap->params.rev > 0 && !(adap->flags & USING_MSI))
414 for_each_port(adap, i) {
415 struct net_device *dev = adap->port[i];
416 const struct port_info *pi = netdev_priv(dev);
418 for (j = 0; j < pi->nqsets; ++j, ++qset_idx) {
419 err = t3_sge_alloc_qset(adap, qset_idx, 1,
420 (adap->flags & USING_MSIX) ? qset_idx + 1 :
422 &adap->params.sge.qset[qset_idx], ntxq,
424 adap-> dummy_netdev[dummy_dev_idx++]);
426 t3_free_sge_resources(adap);
435 static ssize_t attr_show(struct device *d, struct device_attribute *attr,
437 ssize_t(*format) (struct adapter *, char *))
440 struct adapter *adap = to_net_dev(d)->priv;
442 /* Synchronize with ioctls that may shut down the device */
444 len = (*format) (adap, buf);
449 static ssize_t attr_store(struct device *d, struct device_attribute *attr,
450 const char *buf, size_t len,
451 ssize_t(*set) (struct adapter *, unsigned int),
452 unsigned int min_val, unsigned int max_val)
457 struct adapter *adap = to_net_dev(d)->priv;
459 if (!capable(CAP_NET_ADMIN))
462 val = simple_strtoul(buf, &endp, 0);
463 if (endp == buf || val < min_val || val > max_val)
467 ret = (*set) (adap, val);
474 #define CXGB3_SHOW(name, val_expr) \
475 static ssize_t format_##name(struct adapter *adap, char *buf) \
477 return sprintf(buf, "%u\n", val_expr); \
479 static ssize_t show_##name(struct device *d, struct device_attribute *attr, \
482 return attr_show(d, attr, buf, format_##name); \
485 static ssize_t set_nfilters(struct adapter *adap, unsigned int val)
487 if (adap->flags & FULL_INIT_DONE)
489 if (val && adap->params.rev == 0)
491 if (val > t3_mc5_size(&adap->mc5) - adap->params.mc5.nservers)
493 adap->params.mc5.nfilters = val;
497 static ssize_t store_nfilters(struct device *d, struct device_attribute *attr,
498 const char *buf, size_t len)
500 return attr_store(d, attr, buf, len, set_nfilters, 0, ~0);
503 static ssize_t set_nservers(struct adapter *adap, unsigned int val)
505 if (adap->flags & FULL_INIT_DONE)
507 if (val > t3_mc5_size(&adap->mc5) - adap->params.mc5.nfilters)
509 adap->params.mc5.nservers = val;
513 static ssize_t store_nservers(struct device *d, struct device_attribute *attr,
514 const char *buf, size_t len)
516 return attr_store(d, attr, buf, len, set_nservers, 0, ~0);
519 #define CXGB3_ATTR_R(name, val_expr) \
520 CXGB3_SHOW(name, val_expr) \
521 static DEVICE_ATTR(name, S_IRUGO, show_##name, NULL)
523 #define CXGB3_ATTR_RW(name, val_expr, store_method) \
524 CXGB3_SHOW(name, val_expr) \
525 static DEVICE_ATTR(name, S_IRUGO | S_IWUSR, show_##name, store_method)
527 CXGB3_ATTR_R(cam_size, t3_mc5_size(&adap->mc5));
528 CXGB3_ATTR_RW(nfilters, adap->params.mc5.nfilters, store_nfilters);
529 CXGB3_ATTR_RW(nservers, adap->params.mc5.nservers, store_nservers);
531 static struct attribute *cxgb3_attrs[] = {
532 &dev_attr_cam_size.attr,
533 &dev_attr_nfilters.attr,
534 &dev_attr_nservers.attr,
538 static struct attribute_group cxgb3_attr_group = {.attrs = cxgb3_attrs };
540 static ssize_t tm_attr_show(struct device *d, struct device_attribute *attr,
541 char *buf, int sched)
544 unsigned int v, addr, bpt, cpt;
545 struct adapter *adap = to_net_dev(d)->priv;
547 addr = A_TP_TX_MOD_Q1_Q0_RATE_LIMIT - sched / 2;
549 t3_write_reg(adap, A_TP_TM_PIO_ADDR, addr);
550 v = t3_read_reg(adap, A_TP_TM_PIO_DATA);
553 bpt = (v >> 8) & 0xff;
556 len = sprintf(buf, "disabled\n");
558 v = (adap->params.vpd.cclk * 1000) / cpt;
559 len = sprintf(buf, "%u Kbps\n", (v * bpt) / 125);
565 static ssize_t tm_attr_store(struct device *d, struct device_attribute *attr,
566 const char *buf, size_t len, int sched)
571 struct adapter *adap = to_net_dev(d)->priv;
573 if (!capable(CAP_NET_ADMIN))
576 val = simple_strtoul(buf, &endp, 0);
577 if (endp == buf || val > 10000000)
581 ret = t3_config_sched(adap, val, sched);
588 #define TM_ATTR(name, sched) \
589 static ssize_t show_##name(struct device *d, struct device_attribute *attr, \
592 return tm_attr_show(d, attr, buf, sched); \
594 static ssize_t store_##name(struct device *d, struct device_attribute *attr, \
595 const char *buf, size_t len) \
597 return tm_attr_store(d, attr, buf, len, sched); \
599 static DEVICE_ATTR(name, S_IRUGO | S_IWUSR, show_##name, store_##name)
610 static struct attribute *offload_attrs[] = {
611 &dev_attr_sched0.attr,
612 &dev_attr_sched1.attr,
613 &dev_attr_sched2.attr,
614 &dev_attr_sched3.attr,
615 &dev_attr_sched4.attr,
616 &dev_attr_sched5.attr,
617 &dev_attr_sched6.attr,
618 &dev_attr_sched7.attr,
622 static struct attribute_group offload_attr_group = {.attrs = offload_attrs };
625 * Sends an sk_buff to an offload queue driver
626 * after dealing with any active network taps.
628 static inline int offload_tx(struct t3cdev *tdev, struct sk_buff *skb)
633 ret = t3_offload_tx(tdev, skb);
638 static int write_smt_entry(struct adapter *adapter, int idx)
640 struct cpl_smt_write_req *req;
641 struct sk_buff *skb = alloc_skb(sizeof(*req), GFP_KERNEL);
646 req = (struct cpl_smt_write_req *)__skb_put(skb, sizeof(*req));
647 req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
648 OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_SMT_WRITE_REQ, idx));
649 req->mtu_idx = NMTUS - 1; /* should be 0 but there's a T3 bug */
651 memset(req->src_mac1, 0, sizeof(req->src_mac1));
652 memcpy(req->src_mac0, adapter->port[idx]->dev_addr, ETH_ALEN);
654 offload_tx(&adapter->tdev, skb);
658 static int init_smt(struct adapter *adapter)
662 for_each_port(adapter, i)
663 write_smt_entry(adapter, i);
667 static void init_port_mtus(struct adapter *adapter)
669 unsigned int mtus = adapter->port[0]->mtu;
671 if (adapter->port[1])
672 mtus |= adapter->port[1]->mtu << 16;
673 t3_write_reg(adapter, A_TP_MTU_PORT_TABLE, mtus);
676 static void send_pktsched_cmd(struct adapter *adap, int sched, int qidx, int lo,
680 struct mngt_pktsched_wr *req;
682 skb = alloc_skb(sizeof(*req), GFP_KERNEL | __GFP_NOFAIL);
683 req = (struct mngt_pktsched_wr *)skb_put(skb, sizeof(*req));
684 req->wr_hi = htonl(V_WR_OP(FW_WROPCODE_MNGT));
685 req->mngt_opcode = FW_MNGTOPCODE_PKTSCHED_SET;
691 t3_mgmt_tx(adap, skb);
694 static void bind_qsets(struct adapter *adap)
698 for_each_port(adap, i) {
699 const struct port_info *pi = adap2pinfo(adap, i);
701 for (j = 0; j < pi->nqsets; ++j)
702 send_pktsched_cmd(adap, 1, pi->first_qset + j, -1,
708 * cxgb_up - enable the adapter
709 * @adapter: adapter being enabled
711 * Called when the first port is enabled, this function performs the
712 * actions necessary to make an adapter operational, such as completing
713 * the initialization of HW modules, and enabling interrupts.
715 * Must be called with the rtnl lock held.
717 static int cxgb_up(struct adapter *adap)
721 if (!(adap->flags & FULL_INIT_DONE)) {
722 err = t3_check_fw_version(adap);
726 err = init_dummy_netdevs(adap);
730 err = t3_init_hw(adap, 0);
734 err = setup_sge_qsets(adap);
739 adap->flags |= FULL_INIT_DONE;
744 if (adap->flags & USING_MSIX) {
745 name_msix_vecs(adap);
746 err = request_irq(adap->msix_info[0].vec,
747 t3_async_intr_handler, 0,
748 adap->msix_info[0].desc, adap);
752 if (request_msix_data_irqs(adap)) {
753 free_irq(adap->msix_info[0].vec, adap);
756 } else if ((err = request_irq(adap->pdev->irq,
757 t3_intr_handler(adap,
758 adap->sge.qs[0].rspq.
760 (adap->flags & USING_MSI) ? 0 : SA_SHIRQ,
765 t3_intr_enable(adap);
767 if ((adap->flags & (USING_MSIX | QUEUES_BOUND)) == USING_MSIX)
769 adap->flags |= QUEUES_BOUND;
774 CH_ERR(adap, "request_irq failed, err %d\n", err);
779 * Release resources when all the ports and offloading have been stopped.
781 static void cxgb_down(struct adapter *adapter)
783 t3_sge_stop(adapter);
784 spin_lock_irq(&adapter->work_lock); /* sync with PHY intr task */
785 t3_intr_disable(adapter);
786 spin_unlock_irq(&adapter->work_lock);
788 if (adapter->flags & USING_MSIX) {
791 free_irq(adapter->msix_info[0].vec, adapter);
792 for_each_port(adapter, i)
793 n += adap2pinfo(adapter, i)->nqsets;
795 for (i = 0; i < n; ++i)
796 free_irq(adapter->msix_info[i + 1].vec,
797 &adapter->sge.qs[i]);
799 free_irq(adapter->pdev->irq, adapter);
801 flush_workqueue(cxgb3_wq); /* wait for external IRQ handler */
805 static void schedule_chk_task(struct adapter *adap)
809 timeo = adap->params.linkpoll_period ?
810 (HZ * adap->params.linkpoll_period) / 10 :
811 adap->params.stats_update_period * HZ;
813 queue_delayed_work(cxgb3_wq, &adap->adap_check_task, timeo);
816 static int offload_open(struct net_device *dev)
818 struct adapter *adapter = dev->priv;
819 struct t3cdev *tdev = T3CDEV(dev);
820 int adap_up = adapter->open_device_map & PORT_MASK;
823 if (test_and_set_bit(OFFLOAD_DEVMAP_BIT, &adapter->open_device_map))
826 if (!adap_up && (err = cxgb_up(adapter)) < 0)
829 t3_tp_set_offload_mode(adapter, 1);
830 tdev->lldev = adapter->port[0];
831 err = cxgb3_offload_activate(adapter);
835 init_port_mtus(adapter);
836 t3_load_mtus(adapter, adapter->params.mtus, adapter->params.a_wnd,
837 adapter->params.b_wnd,
838 adapter->params.rev == 0 ?
839 adapter->port[0]->mtu : 0xffff);
842 /* Never mind if the next step fails */
843 sysfs_create_group(&tdev->lldev->dev.kobj, &offload_attr_group);
845 /* Call back all registered clients */
846 cxgb3_add_clients(tdev);
849 /* restore them in case the offload module has changed them */
851 t3_tp_set_offload_mode(adapter, 0);
852 clear_bit(OFFLOAD_DEVMAP_BIT, &adapter->open_device_map);
853 cxgb3_set_dummy_ops(tdev);
858 static int offload_close(struct t3cdev *tdev)
860 struct adapter *adapter = tdev2adap(tdev);
862 if (!test_bit(OFFLOAD_DEVMAP_BIT, &adapter->open_device_map))
865 /* Call back all registered clients */
866 cxgb3_remove_clients(tdev);
868 sysfs_remove_group(&tdev->lldev->dev.kobj, &offload_attr_group);
871 cxgb3_set_dummy_ops(tdev);
872 t3_tp_set_offload_mode(adapter, 0);
873 clear_bit(OFFLOAD_DEVMAP_BIT, &adapter->open_device_map);
875 if (!adapter->open_device_map)
878 cxgb3_offload_deactivate(adapter);
882 static int cxgb_open(struct net_device *dev)
885 struct adapter *adapter = dev->priv;
886 struct port_info *pi = netdev_priv(dev);
887 int other_ports = adapter->open_device_map & PORT_MASK;
889 if (!adapter->open_device_map && (err = cxgb_up(adapter)) < 0)
892 set_bit(pi->port_id, &adapter->open_device_map);
894 err = offload_open(dev);
897 "Could not initialize offload capabilities\n");
901 t3_port_intr_enable(adapter, pi->port_id);
902 netif_start_queue(dev);
904 schedule_chk_task(adapter);
909 static int cxgb_close(struct net_device *dev)
911 struct adapter *adapter = dev->priv;
912 struct port_info *p = netdev_priv(dev);
914 t3_port_intr_disable(adapter, p->port_id);
915 netif_stop_queue(dev);
916 p->phy.ops->power_down(&p->phy, 1);
917 netif_carrier_off(dev);
918 t3_mac_disable(&p->mac, MAC_DIRECTION_TX | MAC_DIRECTION_RX);
920 spin_lock(&adapter->work_lock); /* sync with update task */
921 clear_bit(p->port_id, &adapter->open_device_map);
922 spin_unlock(&adapter->work_lock);
924 if (!(adapter->open_device_map & PORT_MASK))
925 cancel_rearming_delayed_workqueue(cxgb3_wq,
926 &adapter->adap_check_task);
928 if (!adapter->open_device_map)
934 static struct net_device_stats *cxgb_get_stats(struct net_device *dev)
936 struct adapter *adapter = dev->priv;
937 struct port_info *p = netdev_priv(dev);
938 struct net_device_stats *ns = &p->netstats;
939 const struct mac_stats *pstats;
941 spin_lock(&adapter->stats_lock);
942 pstats = t3_mac_update_stats(&p->mac);
943 spin_unlock(&adapter->stats_lock);
945 ns->tx_bytes = pstats->tx_octets;
946 ns->tx_packets = pstats->tx_frames;
947 ns->rx_bytes = pstats->rx_octets;
948 ns->rx_packets = pstats->rx_frames;
949 ns->multicast = pstats->rx_mcast_frames;
951 ns->tx_errors = pstats->tx_underrun;
952 ns->rx_errors = pstats->rx_symbol_errs + pstats->rx_fcs_errs +
953 pstats->rx_too_long + pstats->rx_jabber + pstats->rx_short +
954 pstats->rx_fifo_ovfl;
956 /* detailed rx_errors */
957 ns->rx_length_errors = pstats->rx_jabber + pstats->rx_too_long;
958 ns->rx_over_errors = 0;
959 ns->rx_crc_errors = pstats->rx_fcs_errs;
960 ns->rx_frame_errors = pstats->rx_symbol_errs;
961 ns->rx_fifo_errors = pstats->rx_fifo_ovfl;
962 ns->rx_missed_errors = pstats->rx_cong_drops;
964 /* detailed tx_errors */
965 ns->tx_aborted_errors = 0;
966 ns->tx_carrier_errors = 0;
967 ns->tx_fifo_errors = pstats->tx_underrun;
968 ns->tx_heartbeat_errors = 0;
969 ns->tx_window_errors = 0;
973 static u32 get_msglevel(struct net_device *dev)
975 struct adapter *adapter = dev->priv;
977 return adapter->msg_enable;
980 static void set_msglevel(struct net_device *dev, u32 val)
982 struct adapter *adapter = dev->priv;
984 adapter->msg_enable = val;
987 static char stats_strings[][ETH_GSTRING_LEN] = {
990 "TxMulticastFramesOK",
991 "TxBroadcastFramesOK",
1000 "TxFrames512To1023 ",
1001 "TxFrames1024To1518 ",
1002 "TxFrames1519ToMax ",
1006 "RxMulticastFramesOK",
1007 "RxBroadcastFramesOK",
1018 "RxFrames128To255 ",
1019 "RxFrames256To511 ",
1020 "RxFrames512To1023 ",
1021 "RxFrames1024To1518 ",
1022 "RxFrames1519ToMax ",
1033 static int get_stats_count(struct net_device *dev)
1035 return ARRAY_SIZE(stats_strings);
1038 #define T3_REGMAP_SIZE (3 * 1024)
1040 static int get_regs_len(struct net_device *dev)
1042 return T3_REGMAP_SIZE;
1045 static int get_eeprom_len(struct net_device *dev)
1050 static void get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
1053 struct adapter *adapter = dev->priv;
1055 t3_get_fw_version(adapter, &fw_vers);
1057 strcpy(info->driver, DRV_NAME);
1058 strcpy(info->version, DRV_VERSION);
1059 strcpy(info->bus_info, pci_name(adapter->pdev));
1061 strcpy(info->fw_version, "N/A");
1063 snprintf(info->fw_version, sizeof(info->fw_version),
1065 G_FW_VERSION_TYPE(fw_vers) ? "T" : "N",
1066 G_FW_VERSION_MAJOR(fw_vers),
1067 G_FW_VERSION_MINOR(fw_vers),
1068 G_FW_VERSION_MICRO(fw_vers));
1072 static void get_strings(struct net_device *dev, u32 stringset, u8 * data)
1074 if (stringset == ETH_SS_STATS)
1075 memcpy(data, stats_strings, sizeof(stats_strings));
1078 static unsigned long collect_sge_port_stats(struct adapter *adapter,
1079 struct port_info *p, int idx)
1082 unsigned long tot = 0;
1084 for (i = 0; i < p->nqsets; ++i)
1085 tot += adapter->sge.qs[i + p->first_qset].port_stats[idx];
1089 static void get_stats(struct net_device *dev, struct ethtool_stats *stats,
1092 struct adapter *adapter = dev->priv;
1093 struct port_info *pi = netdev_priv(dev);
1094 const struct mac_stats *s;
1096 spin_lock(&adapter->stats_lock);
1097 s = t3_mac_update_stats(&pi->mac);
1098 spin_unlock(&adapter->stats_lock);
1100 *data++ = s->tx_octets;
1101 *data++ = s->tx_frames;
1102 *data++ = s->tx_mcast_frames;
1103 *data++ = s->tx_bcast_frames;
1104 *data++ = s->tx_pause;
1105 *data++ = s->tx_underrun;
1106 *data++ = s->tx_fifo_urun;
1108 *data++ = s->tx_frames_64;
1109 *data++ = s->tx_frames_65_127;
1110 *data++ = s->tx_frames_128_255;
1111 *data++ = s->tx_frames_256_511;
1112 *data++ = s->tx_frames_512_1023;
1113 *data++ = s->tx_frames_1024_1518;
1114 *data++ = s->tx_frames_1519_max;
1116 *data++ = s->rx_octets;
1117 *data++ = s->rx_frames;
1118 *data++ = s->rx_mcast_frames;
1119 *data++ = s->rx_bcast_frames;
1120 *data++ = s->rx_pause;
1121 *data++ = s->rx_fcs_errs;
1122 *data++ = s->rx_symbol_errs;
1123 *data++ = s->rx_short;
1124 *data++ = s->rx_jabber;
1125 *data++ = s->rx_too_long;
1126 *data++ = s->rx_fifo_ovfl;
1128 *data++ = s->rx_frames_64;
1129 *data++ = s->rx_frames_65_127;
1130 *data++ = s->rx_frames_128_255;
1131 *data++ = s->rx_frames_256_511;
1132 *data++ = s->rx_frames_512_1023;
1133 *data++ = s->rx_frames_1024_1518;
1134 *data++ = s->rx_frames_1519_max;
1136 *data++ = pi->phy.fifo_errors;
1138 *data++ = collect_sge_port_stats(adapter, pi, SGE_PSTAT_TSO);
1139 *data++ = collect_sge_port_stats(adapter, pi, SGE_PSTAT_VLANEX);
1140 *data++ = collect_sge_port_stats(adapter, pi, SGE_PSTAT_VLANINS);
1141 *data++ = collect_sge_port_stats(adapter, pi, SGE_PSTAT_TX_CSUM);
1142 *data++ = collect_sge_port_stats(adapter, pi, SGE_PSTAT_RX_CSUM_GOOD);
1143 *data++ = s->rx_cong_drops;
1146 static inline void reg_block_dump(struct adapter *ap, void *buf,
1147 unsigned int start, unsigned int end)
1149 u32 *p = buf + start;
1151 for (; start <= end; start += sizeof(u32))
1152 *p++ = t3_read_reg(ap, start);
1155 static void get_regs(struct net_device *dev, struct ethtool_regs *regs,
1158 struct adapter *ap = dev->priv;
1162 * bits 0..9: chip version
1163 * bits 10..15: chip revision
1164 * bit 31: set for PCIe cards
1166 regs->version = 3 | (ap->params.rev << 10) | (is_pcie(ap) << 31);
1169 * We skip the MAC statistics registers because they are clear-on-read.
1170 * Also reading multi-register stats would need to synchronize with the
1171 * periodic mac stats accumulation. Hard to justify the complexity.
1173 memset(buf, 0, T3_REGMAP_SIZE);
1174 reg_block_dump(ap, buf, 0, A_SG_RSPQ_CREDIT_RETURN);
1175 reg_block_dump(ap, buf, A_SG_HI_DRB_HI_THRSH, A_ULPRX_PBL_ULIMIT);
1176 reg_block_dump(ap, buf, A_ULPTX_CONFIG, A_MPS_INT_CAUSE);
1177 reg_block_dump(ap, buf, A_CPL_SWITCH_CNTRL, A_CPL_MAP_TBL_DATA);
1178 reg_block_dump(ap, buf, A_SMB_GLOBAL_TIME_CFG, A_XGM_SERDES_STAT3);
1179 reg_block_dump(ap, buf, A_XGM_SERDES_STATUS0,
1180 XGM_REG(A_XGM_SERDES_STAT3, 1));
1181 reg_block_dump(ap, buf, XGM_REG(A_XGM_SERDES_STATUS0, 1),
1182 XGM_REG(A_XGM_RX_SPI4_SOP_EOP_CNT, 1));
1185 static int restart_autoneg(struct net_device *dev)
1187 struct port_info *p = netdev_priv(dev);
1189 if (!netif_running(dev))
1191 if (p->link_config.autoneg != AUTONEG_ENABLE)
1193 p->phy.ops->autoneg_restart(&p->phy);
1197 static int cxgb3_phys_id(struct net_device *dev, u32 data)
1200 struct adapter *adapter = dev->priv;
1205 for (i = 0; i < data * 2; i++) {
1206 t3_set_reg_field(adapter, A_T3DBG_GPIO_EN, F_GPIO0_OUT_VAL,
1207 (i & 1) ? F_GPIO0_OUT_VAL : 0);
1208 if (msleep_interruptible(500))
1211 t3_set_reg_field(adapter, A_T3DBG_GPIO_EN, F_GPIO0_OUT_VAL,
1216 static int get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
1218 struct port_info *p = netdev_priv(dev);
1220 cmd->supported = p->link_config.supported;
1221 cmd->advertising = p->link_config.advertising;
1223 if (netif_carrier_ok(dev)) {
1224 cmd->speed = p->link_config.speed;
1225 cmd->duplex = p->link_config.duplex;
1231 cmd->port = (cmd->supported & SUPPORTED_TP) ? PORT_TP : PORT_FIBRE;
1232 cmd->phy_address = p->phy.addr;
1233 cmd->transceiver = XCVR_EXTERNAL;
1234 cmd->autoneg = p->link_config.autoneg;
1240 static int speed_duplex_to_caps(int speed, int duplex)
1246 if (duplex == DUPLEX_FULL)
1247 cap = SUPPORTED_10baseT_Full;
1249 cap = SUPPORTED_10baseT_Half;
1252 if (duplex == DUPLEX_FULL)
1253 cap = SUPPORTED_100baseT_Full;
1255 cap = SUPPORTED_100baseT_Half;
1258 if (duplex == DUPLEX_FULL)
1259 cap = SUPPORTED_1000baseT_Full;
1261 cap = SUPPORTED_1000baseT_Half;
1264 if (duplex == DUPLEX_FULL)
1265 cap = SUPPORTED_10000baseT_Full;
1270 #define ADVERTISED_MASK (ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full | \
1271 ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full | \
1272 ADVERTISED_1000baseT_Half | ADVERTISED_1000baseT_Full | \
1273 ADVERTISED_10000baseT_Full)
1275 static int set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
1277 struct port_info *p = netdev_priv(dev);
1278 struct link_config *lc = &p->link_config;
1280 if (!(lc->supported & SUPPORTED_Autoneg))
1281 return -EOPNOTSUPP; /* can't change speed/duplex */
1283 if (cmd->autoneg == AUTONEG_DISABLE) {
1284 int cap = speed_duplex_to_caps(cmd->speed, cmd->duplex);
1286 if (!(lc->supported & cap) || cmd->speed == SPEED_1000)
1288 lc->requested_speed = cmd->speed;
1289 lc->requested_duplex = cmd->duplex;
1290 lc->advertising = 0;
1292 cmd->advertising &= ADVERTISED_MASK;
1293 cmd->advertising &= lc->supported;
1294 if (!cmd->advertising)
1296 lc->requested_speed = SPEED_INVALID;
1297 lc->requested_duplex = DUPLEX_INVALID;
1298 lc->advertising = cmd->advertising | ADVERTISED_Autoneg;
1300 lc->autoneg = cmd->autoneg;
1301 if (netif_running(dev))
1302 t3_link_start(&p->phy, &p->mac, lc);
1306 static void get_pauseparam(struct net_device *dev,
1307 struct ethtool_pauseparam *epause)
1309 struct port_info *p = netdev_priv(dev);
1311 epause->autoneg = (p->link_config.requested_fc & PAUSE_AUTONEG) != 0;
1312 epause->rx_pause = (p->link_config.fc & PAUSE_RX) != 0;
1313 epause->tx_pause = (p->link_config.fc & PAUSE_TX) != 0;
1316 static int set_pauseparam(struct net_device *dev,
1317 struct ethtool_pauseparam *epause)
1319 struct port_info *p = netdev_priv(dev);
1320 struct link_config *lc = &p->link_config;
1322 if (epause->autoneg == AUTONEG_DISABLE)
1323 lc->requested_fc = 0;
1324 else if (lc->supported & SUPPORTED_Autoneg)
1325 lc->requested_fc = PAUSE_AUTONEG;
1329 if (epause->rx_pause)
1330 lc->requested_fc |= PAUSE_RX;
1331 if (epause->tx_pause)
1332 lc->requested_fc |= PAUSE_TX;
1333 if (lc->autoneg == AUTONEG_ENABLE) {
1334 if (netif_running(dev))
1335 t3_link_start(&p->phy, &p->mac, lc);
1337 lc->fc = lc->requested_fc & (PAUSE_RX | PAUSE_TX);
1338 if (netif_running(dev))
1339 t3_mac_set_speed_duplex_fc(&p->mac, -1, -1, lc->fc);
1344 static u32 get_rx_csum(struct net_device *dev)
1346 struct port_info *p = netdev_priv(dev);
1348 return p->rx_csum_offload;
1351 static int set_rx_csum(struct net_device *dev, u32 data)
1353 struct port_info *p = netdev_priv(dev);
1355 p->rx_csum_offload = data;
1359 static void get_sge_param(struct net_device *dev, struct ethtool_ringparam *e)
1361 struct adapter *adapter = dev->priv;
1363 e->rx_max_pending = MAX_RX_BUFFERS;
1364 e->rx_mini_max_pending = 0;
1365 e->rx_jumbo_max_pending = MAX_RX_JUMBO_BUFFERS;
1366 e->tx_max_pending = MAX_TXQ_ENTRIES;
1368 e->rx_pending = adapter->params.sge.qset[0].fl_size;
1369 e->rx_mini_pending = adapter->params.sge.qset[0].rspq_size;
1370 e->rx_jumbo_pending = adapter->params.sge.qset[0].jumbo_size;
1371 e->tx_pending = adapter->params.sge.qset[0].txq_size[0];
1374 static int set_sge_param(struct net_device *dev, struct ethtool_ringparam *e)
1377 struct adapter *adapter = dev->priv;
1379 if (e->rx_pending > MAX_RX_BUFFERS ||
1380 e->rx_jumbo_pending > MAX_RX_JUMBO_BUFFERS ||
1381 e->tx_pending > MAX_TXQ_ENTRIES ||
1382 e->rx_mini_pending > MAX_RSPQ_ENTRIES ||
1383 e->rx_mini_pending < MIN_RSPQ_ENTRIES ||
1384 e->rx_pending < MIN_FL_ENTRIES ||
1385 e->rx_jumbo_pending < MIN_FL_ENTRIES ||
1386 e->tx_pending < adapter->params.nports * MIN_TXQ_ENTRIES)
1389 if (adapter->flags & FULL_INIT_DONE)
1392 for (i = 0; i < SGE_QSETS; ++i) {
1393 struct qset_params *q = &adapter->params.sge.qset[i];
1395 q->rspq_size = e->rx_mini_pending;
1396 q->fl_size = e->rx_pending;
1397 q->jumbo_size = e->rx_jumbo_pending;
1398 q->txq_size[0] = e->tx_pending;
1399 q->txq_size[1] = e->tx_pending;
1400 q->txq_size[2] = e->tx_pending;
1405 static int set_coalesce(struct net_device *dev, struct ethtool_coalesce *c)
1407 struct adapter *adapter = dev->priv;
1408 struct qset_params *qsp = &adapter->params.sge.qset[0];
1409 struct sge_qset *qs = &adapter->sge.qs[0];
1411 if (c->rx_coalesce_usecs * 10 > M_NEWTIMER)
1414 qsp->coalesce_usecs = c->rx_coalesce_usecs;
1415 t3_update_qset_coalesce(qs, qsp);
1419 static int get_coalesce(struct net_device *dev, struct ethtool_coalesce *c)
1421 struct adapter *adapter = dev->priv;
1422 struct qset_params *q = adapter->params.sge.qset;
1424 c->rx_coalesce_usecs = q->coalesce_usecs;
1428 static int get_eeprom(struct net_device *dev, struct ethtool_eeprom *e,
1432 struct adapter *adapter = dev->priv;
1434 u8 *buf = kmalloc(EEPROMSIZE, GFP_KERNEL);
1438 e->magic = EEPROM_MAGIC;
1439 for (i = e->offset & ~3; !err && i < e->offset + e->len; i += 4)
1440 err = t3_seeprom_read(adapter, i, (u32 *) & buf[i]);
1443 memcpy(data, buf + e->offset, e->len);
1448 static int set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom,
1453 u32 aligned_offset, aligned_len, *p;
1454 struct adapter *adapter = dev->priv;
1456 if (eeprom->magic != EEPROM_MAGIC)
1459 aligned_offset = eeprom->offset & ~3;
1460 aligned_len = (eeprom->len + (eeprom->offset & 3) + 3) & ~3;
1462 if (aligned_offset != eeprom->offset || aligned_len != eeprom->len) {
1463 buf = kmalloc(aligned_len, GFP_KERNEL);
1466 err = t3_seeprom_read(adapter, aligned_offset, (u32 *) buf);
1467 if (!err && aligned_len > 4)
1468 err = t3_seeprom_read(adapter,
1469 aligned_offset + aligned_len - 4,
1470 (u32 *) & buf[aligned_len - 4]);
1473 memcpy(buf + (eeprom->offset & 3), data, eeprom->len);
1477 err = t3_seeprom_wp(adapter, 0);
1481 for (p = (u32 *) buf; !err && aligned_len; aligned_len -= 4, p++) {
1482 err = t3_seeprom_write(adapter, aligned_offset, *p);
1483 aligned_offset += 4;
1487 err = t3_seeprom_wp(adapter, 1);
1494 static void get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
1498 memset(&wol->sopass, 0, sizeof(wol->sopass));
1501 static const struct ethtool_ops cxgb_ethtool_ops = {
1502 .get_settings = get_settings,
1503 .set_settings = set_settings,
1504 .get_drvinfo = get_drvinfo,
1505 .get_msglevel = get_msglevel,
1506 .set_msglevel = set_msglevel,
1507 .get_ringparam = get_sge_param,
1508 .set_ringparam = set_sge_param,
1509 .get_coalesce = get_coalesce,
1510 .set_coalesce = set_coalesce,
1511 .get_eeprom_len = get_eeprom_len,
1512 .get_eeprom = get_eeprom,
1513 .set_eeprom = set_eeprom,
1514 .get_pauseparam = get_pauseparam,
1515 .set_pauseparam = set_pauseparam,
1516 .get_rx_csum = get_rx_csum,
1517 .set_rx_csum = set_rx_csum,
1518 .get_tx_csum = ethtool_op_get_tx_csum,
1519 .set_tx_csum = ethtool_op_set_tx_csum,
1520 .get_sg = ethtool_op_get_sg,
1521 .set_sg = ethtool_op_set_sg,
1522 .get_link = ethtool_op_get_link,
1523 .get_strings = get_strings,
1524 .phys_id = cxgb3_phys_id,
1525 .nway_reset = restart_autoneg,
1526 .get_stats_count = get_stats_count,
1527 .get_ethtool_stats = get_stats,
1528 .get_regs_len = get_regs_len,
1529 .get_regs = get_regs,
1531 .get_tso = ethtool_op_get_tso,
1532 .set_tso = ethtool_op_set_tso,
1533 .get_perm_addr = ethtool_op_get_perm_addr
1536 static int in_range(int val, int lo, int hi)
1538 return val < 0 || (val <= hi && val >= lo);
1541 static int cxgb_extension_ioctl(struct net_device *dev, void __user *useraddr)
1545 struct adapter *adapter = dev->priv;
1547 if (copy_from_user(&cmd, useraddr, sizeof(cmd)))
1551 case CHELSIO_SETREG:{
1552 struct ch_reg edata;
1554 if (!capable(CAP_NET_ADMIN))
1556 if (copy_from_user(&edata, useraddr, sizeof(edata)))
1558 if ((edata.addr & 3) != 0
1559 || edata.addr >= adapter->mmio_len)
1561 writel(edata.val, adapter->regs + edata.addr);
1564 case CHELSIO_GETREG:{
1565 struct ch_reg edata;
1567 if (copy_from_user(&edata, useraddr, sizeof(edata)))
1569 if ((edata.addr & 3) != 0
1570 || edata.addr >= adapter->mmio_len)
1572 edata.val = readl(adapter->regs + edata.addr);
1573 if (copy_to_user(useraddr, &edata, sizeof(edata)))
1577 case CHELSIO_SET_QSET_PARAMS:{
1579 struct qset_params *q;
1580 struct ch_qset_params t;
1582 if (!capable(CAP_NET_ADMIN))
1584 if (copy_from_user(&t, useraddr, sizeof(t)))
1586 if (t.qset_idx >= SGE_QSETS)
1588 if (!in_range(t.intr_lat, 0, M_NEWTIMER) ||
1589 !in_range(t.cong_thres, 0, 255) ||
1590 !in_range(t.txq_size[0], MIN_TXQ_ENTRIES,
1592 !in_range(t.txq_size[1], MIN_TXQ_ENTRIES,
1594 !in_range(t.txq_size[2], MIN_CTRL_TXQ_ENTRIES,
1595 MAX_CTRL_TXQ_ENTRIES) ||
1596 !in_range(t.fl_size[0], MIN_FL_ENTRIES,
1598 || !in_range(t.fl_size[1], MIN_FL_ENTRIES,
1599 MAX_RX_JUMBO_BUFFERS)
1600 || !in_range(t.rspq_size, MIN_RSPQ_ENTRIES,
1603 if ((adapter->flags & FULL_INIT_DONE) &&
1604 (t.rspq_size >= 0 || t.fl_size[0] >= 0 ||
1605 t.fl_size[1] >= 0 || t.txq_size[0] >= 0 ||
1606 t.txq_size[1] >= 0 || t.txq_size[2] >= 0 ||
1607 t.polling >= 0 || t.cong_thres >= 0))
1610 q = &adapter->params.sge.qset[t.qset_idx];
1612 if (t.rspq_size >= 0)
1613 q->rspq_size = t.rspq_size;
1614 if (t.fl_size[0] >= 0)
1615 q->fl_size = t.fl_size[0];
1616 if (t.fl_size[1] >= 0)
1617 q->jumbo_size = t.fl_size[1];
1618 if (t.txq_size[0] >= 0)
1619 q->txq_size[0] = t.txq_size[0];
1620 if (t.txq_size[1] >= 0)
1621 q->txq_size[1] = t.txq_size[1];
1622 if (t.txq_size[2] >= 0)
1623 q->txq_size[2] = t.txq_size[2];
1624 if (t.cong_thres >= 0)
1625 q->cong_thres = t.cong_thres;
1626 if (t.intr_lat >= 0) {
1627 struct sge_qset *qs =
1628 &adapter->sge.qs[t.qset_idx];
1630 q->coalesce_usecs = t.intr_lat;
1631 t3_update_qset_coalesce(qs, q);
1633 if (t.polling >= 0) {
1634 if (adapter->flags & USING_MSIX)
1635 q->polling = t.polling;
1637 /* No polling with INTx for T3A */
1638 if (adapter->params.rev == 0 &&
1639 !(adapter->flags & USING_MSI))
1642 for (i = 0; i < SGE_QSETS; i++) {
1643 q = &adapter->params.sge.
1645 q->polling = t.polling;
1651 case CHELSIO_GET_QSET_PARAMS:{
1652 struct qset_params *q;
1653 struct ch_qset_params t;
1655 if (copy_from_user(&t, useraddr, sizeof(t)))
1657 if (t.qset_idx >= SGE_QSETS)
1660 q = &adapter->params.sge.qset[t.qset_idx];
1661 t.rspq_size = q->rspq_size;
1662 t.txq_size[0] = q->txq_size[0];
1663 t.txq_size[1] = q->txq_size[1];
1664 t.txq_size[2] = q->txq_size[2];
1665 t.fl_size[0] = q->fl_size;
1666 t.fl_size[1] = q->jumbo_size;
1667 t.polling = q->polling;
1668 t.intr_lat = q->coalesce_usecs;
1669 t.cong_thres = q->cong_thres;
1671 if (copy_to_user(useraddr, &t, sizeof(t)))
1675 case CHELSIO_SET_QSET_NUM:{
1676 struct ch_reg edata;
1677 struct port_info *pi = netdev_priv(dev);
1678 unsigned int i, first_qset = 0, other_qsets = 0;
1680 if (!capable(CAP_NET_ADMIN))
1682 if (adapter->flags & FULL_INIT_DONE)
1684 if (copy_from_user(&edata, useraddr, sizeof(edata)))
1686 if (edata.val < 1 ||
1687 (edata.val > 1 && !(adapter->flags & USING_MSIX)))
1690 for_each_port(adapter, i)
1691 if (adapter->port[i] && adapter->port[i] != dev)
1692 other_qsets += adap2pinfo(adapter, i)->nqsets;
1694 if (edata.val + other_qsets > SGE_QSETS)
1697 pi->nqsets = edata.val;
1699 for_each_port(adapter, i)
1700 if (adapter->port[i]) {
1701 pi = adap2pinfo(adapter, i);
1702 pi->first_qset = first_qset;
1703 first_qset += pi->nqsets;
1707 case CHELSIO_GET_QSET_NUM:{
1708 struct ch_reg edata;
1709 struct port_info *pi = netdev_priv(dev);
1711 edata.cmd = CHELSIO_GET_QSET_NUM;
1712 edata.val = pi->nqsets;
1713 if (copy_to_user(useraddr, &edata, sizeof(edata)))
1717 case CHELSIO_LOAD_FW:{
1719 struct ch_mem_range t;
1721 if (!capable(CAP_NET_ADMIN))
1723 if (copy_from_user(&t, useraddr, sizeof(t)))
1726 fw_data = kmalloc(t.len, GFP_KERNEL);
1731 (fw_data, useraddr + sizeof(t), t.len)) {
1736 ret = t3_load_fw(adapter, fw_data, t.len);
1742 case CHELSIO_SETMTUTAB:{
1746 if (!is_offload(adapter))
1748 if (!capable(CAP_NET_ADMIN))
1750 if (offload_running(adapter))
1752 if (copy_from_user(&m, useraddr, sizeof(m)))
1754 if (m.nmtus != NMTUS)
1756 if (m.mtus[0] < 81) /* accommodate SACK */
1759 /* MTUs must be in ascending order */
1760 for (i = 1; i < NMTUS; ++i)
1761 if (m.mtus[i] < m.mtus[i - 1])
1764 memcpy(adapter->params.mtus, m.mtus,
1765 sizeof(adapter->params.mtus));
1768 case CHELSIO_GET_PM:{
1769 struct tp_params *p = &adapter->params.tp;
1770 struct ch_pm m = {.cmd = CHELSIO_GET_PM };
1772 if (!is_offload(adapter))
1774 m.tx_pg_sz = p->tx_pg_size;
1775 m.tx_num_pg = p->tx_num_pgs;
1776 m.rx_pg_sz = p->rx_pg_size;
1777 m.rx_num_pg = p->rx_num_pgs;
1778 m.pm_total = p->pmtx_size + p->chan_rx_size * p->nchan;
1779 if (copy_to_user(useraddr, &m, sizeof(m)))
1783 case CHELSIO_SET_PM:{
1785 struct tp_params *p = &adapter->params.tp;
1787 if (!is_offload(adapter))
1789 if (!capable(CAP_NET_ADMIN))
1791 if (adapter->flags & FULL_INIT_DONE)
1793 if (copy_from_user(&m, useraddr, sizeof(m)))
1795 if (!m.rx_pg_sz || (m.rx_pg_sz & (m.rx_pg_sz - 1)) ||
1796 !m.tx_pg_sz || (m.tx_pg_sz & (m.tx_pg_sz - 1)))
1797 return -EINVAL; /* not power of 2 */
1798 if (!(m.rx_pg_sz & 0x14000))
1799 return -EINVAL; /* not 16KB or 64KB */
1800 if (!(m.tx_pg_sz & 0x1554000))
1802 if (m.tx_num_pg == -1)
1803 m.tx_num_pg = p->tx_num_pgs;
1804 if (m.rx_num_pg == -1)
1805 m.rx_num_pg = p->rx_num_pgs;
1806 if (m.tx_num_pg % 24 || m.rx_num_pg % 24)
1808 if (m.rx_num_pg * m.rx_pg_sz > p->chan_rx_size ||
1809 m.tx_num_pg * m.tx_pg_sz > p->chan_tx_size)
1811 p->rx_pg_size = m.rx_pg_sz;
1812 p->tx_pg_size = m.tx_pg_sz;
1813 p->rx_num_pgs = m.rx_num_pg;
1814 p->tx_num_pgs = m.tx_num_pg;
1817 case CHELSIO_GET_MEM:{
1818 struct ch_mem_range t;
1822 if (!is_offload(adapter))
1824 if (!(adapter->flags & FULL_INIT_DONE))
1825 return -EIO; /* need the memory controllers */
1826 if (copy_from_user(&t, useraddr, sizeof(t)))
1828 if ((t.addr & 7) || (t.len & 7))
1830 if (t.mem_id == MEM_CM)
1832 else if (t.mem_id == MEM_PMRX)
1833 mem = &adapter->pmrx;
1834 else if (t.mem_id == MEM_PMTX)
1835 mem = &adapter->pmtx;
1841 * bits 0..9: chip version
1842 * bits 10..15: chip revision
1844 t.version = 3 | (adapter->params.rev << 10);
1845 if (copy_to_user(useraddr, &t, sizeof(t)))
1849 * Read 256 bytes at a time as len can be large and we don't
1850 * want to use huge intermediate buffers.
1852 useraddr += sizeof(t); /* advance to start of buffer */
1854 unsigned int chunk =
1855 min_t(unsigned int, t.len, sizeof(buf));
1858 t3_mc7_bd_read(mem, t.addr / 8, chunk / 8,
1862 if (copy_to_user(useraddr, buf, chunk))
1870 case CHELSIO_SET_TRACE_FILTER:{
1872 const struct trace_params *tp;
1874 if (!capable(CAP_NET_ADMIN))
1876 if (!offload_running(adapter))
1878 if (copy_from_user(&t, useraddr, sizeof(t)))
1881 tp = (const struct trace_params *)&t.sip;
1883 t3_config_trace_filter(adapter, tp, 0,
1887 t3_config_trace_filter(adapter, tp, 1,
1892 case CHELSIO_SET_PKTSCHED:{
1893 struct ch_pktsched_params p;
1895 if (!capable(CAP_NET_ADMIN))
1897 if (!adapter->open_device_map)
1898 return -EAGAIN; /* uP and SGE must be running */
1899 if (copy_from_user(&p, useraddr, sizeof(p)))
1901 send_pktsched_cmd(adapter, p.sched, p.idx, p.min, p.max,
1912 static int cxgb_ioctl(struct net_device *dev, struct ifreq *req, int cmd)
1915 struct adapter *adapter = dev->priv;
1916 struct port_info *pi = netdev_priv(dev);
1917 struct mii_ioctl_data *data = if_mii(req);
1921 data->phy_id = pi->phy.addr;
1925 struct cphy *phy = &pi->phy;
1927 if (!phy->mdio_read)
1929 if (is_10G(adapter)) {
1930 mmd = data->phy_id >> 8;
1933 else if (mmd > MDIO_DEV_XGXS)
1937 phy->mdio_read(adapter, data->phy_id & 0x1f,
1938 mmd, data->reg_num, &val);
1941 phy->mdio_read(adapter, data->phy_id & 0x1f,
1942 0, data->reg_num & 0x1f,
1945 data->val_out = val;
1949 struct cphy *phy = &pi->phy;
1951 if (!capable(CAP_NET_ADMIN))
1953 if (!phy->mdio_write)
1955 if (is_10G(adapter)) {
1956 mmd = data->phy_id >> 8;
1959 else if (mmd > MDIO_DEV_XGXS)
1963 phy->mdio_write(adapter,
1964 data->phy_id & 0x1f, mmd,
1969 phy->mdio_write(adapter,
1970 data->phy_id & 0x1f, 0,
1971 data->reg_num & 0x1f,
1976 return cxgb_extension_ioctl(dev, req->ifr_data);
1983 static int cxgb_change_mtu(struct net_device *dev, int new_mtu)
1986 struct adapter *adapter = dev->priv;
1987 struct port_info *pi = netdev_priv(dev);
1989 if (new_mtu < 81) /* accommodate SACK */
1991 if ((ret = t3_mac_set_mtu(&pi->mac, new_mtu)))
1994 init_port_mtus(adapter);
1995 if (adapter->params.rev == 0 && offload_running(adapter))
1996 t3_load_mtus(adapter, adapter->params.mtus,
1997 adapter->params.a_wnd, adapter->params.b_wnd,
1998 adapter->port[0]->mtu);
2002 static int cxgb_set_mac_addr(struct net_device *dev, void *p)
2004 struct adapter *adapter = dev->priv;
2005 struct port_info *pi = netdev_priv(dev);
2006 struct sockaddr *addr = p;
2008 if (!is_valid_ether_addr(addr->sa_data))
2011 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
2012 t3_mac_set_address(&pi->mac, 0, dev->dev_addr);
2013 if (offload_running(adapter))
2014 write_smt_entry(adapter, pi->port_id);
2019 * t3_synchronize_rx - wait for current Rx processing on a port to complete
2020 * @adap: the adapter
2023 * Ensures that current Rx processing on any of the queues associated with
2024 * the given port completes before returning. We do this by acquiring and
2025 * releasing the locks of the response queues associated with the port.
2027 static void t3_synchronize_rx(struct adapter *adap, const struct port_info *p)
2031 for (i = 0; i < p->nqsets; i++) {
2032 struct sge_rspq *q = &adap->sge.qs[i + p->first_qset].rspq;
2034 spin_lock_irq(&q->lock);
2035 spin_unlock_irq(&q->lock);
2039 static void vlan_rx_register(struct net_device *dev, struct vlan_group *grp)
2041 struct adapter *adapter = dev->priv;
2042 struct port_info *pi = netdev_priv(dev);
2045 if (adapter->params.rev > 0)
2046 t3_set_vlan_accel(adapter, 1 << pi->port_id, grp != NULL);
2048 /* single control for all ports */
2049 unsigned int i, have_vlans = 0;
2050 for_each_port(adapter, i)
2051 have_vlans |= adap2pinfo(adapter, i)->vlan_grp != NULL;
2053 t3_set_vlan_accel(adapter, 1, have_vlans);
2055 t3_synchronize_rx(adapter, pi);
2058 static void vlan_rx_kill_vid(struct net_device *dev, unsigned short vid)
2063 #ifdef CONFIG_NET_POLL_CONTROLLER
2064 static void cxgb_netpoll(struct net_device *dev)
2066 struct adapter *adapter = dev->priv;
2067 struct sge_qset *qs = dev2qset(dev);
2069 t3_intr_handler(adapter, qs->rspq.polling) (adapter->pdev->irq,
2075 * Periodic accumulation of MAC statistics.
2077 static void mac_stats_update(struct adapter *adapter)
2081 for_each_port(adapter, i) {
2082 struct net_device *dev = adapter->port[i];
2083 struct port_info *p = netdev_priv(dev);
2085 if (netif_running(dev)) {
2086 spin_lock(&adapter->stats_lock);
2087 t3_mac_update_stats(&p->mac);
2088 spin_unlock(&adapter->stats_lock);
2093 static void check_link_status(struct adapter *adapter)
2097 for_each_port(adapter, i) {
2098 struct net_device *dev = adapter->port[i];
2099 struct port_info *p = netdev_priv(dev);
2101 if (!(p->port_type->caps & SUPPORTED_IRQ) && netif_running(dev))
2102 t3_link_changed(adapter, i);
2106 static void t3_adap_check_task(struct work_struct *work)
2108 struct adapter *adapter = container_of(work, struct adapter,
2109 adap_check_task.work);
2110 const struct adapter_params *p = &adapter->params;
2112 adapter->check_task_cnt++;
2114 /* Check link status for PHYs without interrupts */
2115 if (p->linkpoll_period)
2116 check_link_status(adapter);
2118 /* Accumulate MAC stats if needed */
2119 if (!p->linkpoll_period ||
2120 (adapter->check_task_cnt * p->linkpoll_period) / 10 >=
2121 p->stats_update_period) {
2122 mac_stats_update(adapter);
2123 adapter->check_task_cnt = 0;
2126 /* Schedule the next check update if any port is active. */
2127 spin_lock(&adapter->work_lock);
2128 if (adapter->open_device_map & PORT_MASK)
2129 schedule_chk_task(adapter);
2130 spin_unlock(&adapter->work_lock);
2134 * Processes external (PHY) interrupts in process context.
2136 static void ext_intr_task(struct work_struct *work)
2138 struct adapter *adapter = container_of(work, struct adapter,
2139 ext_intr_handler_task);
2141 t3_phy_intr_handler(adapter);
2143 /* Now reenable external interrupts */
2144 spin_lock_irq(&adapter->work_lock);
2145 if (adapter->slow_intr_mask) {
2146 adapter->slow_intr_mask |= F_T3DBG;
2147 t3_write_reg(adapter, A_PL_INT_CAUSE0, F_T3DBG);
2148 t3_write_reg(adapter, A_PL_INT_ENABLE0,
2149 adapter->slow_intr_mask);
2151 spin_unlock_irq(&adapter->work_lock);
2155 * Interrupt-context handler for external (PHY) interrupts.
2157 void t3_os_ext_intr_handler(struct adapter *adapter)
2160 * Schedule a task to handle external interrupts as they may be slow
2161 * and we use a mutex to protect MDIO registers. We disable PHY
2162 * interrupts in the meantime and let the task reenable them when
2165 spin_lock(&adapter->work_lock);
2166 if (adapter->slow_intr_mask) {
2167 adapter->slow_intr_mask &= ~F_T3DBG;
2168 t3_write_reg(adapter, A_PL_INT_ENABLE0,
2169 adapter->slow_intr_mask);
2170 queue_work(cxgb3_wq, &adapter->ext_intr_handler_task);
2172 spin_unlock(&adapter->work_lock);
2175 void t3_fatal_err(struct adapter *adapter)
2177 unsigned int fw_status[4];
2179 if (adapter->flags & FULL_INIT_DONE) {
2180 t3_sge_stop(adapter);
2181 t3_intr_disable(adapter);
2183 CH_ALERT(adapter, "encountered fatal error, operation suspended\n");
2184 if (!t3_cim_ctl_blk_read(adapter, 0xa0, 4, fw_status))
2185 CH_ALERT(adapter, "FW status: 0x%x, 0x%x, 0x%x, 0x%x\n",
2186 fw_status[0], fw_status[1],
2187 fw_status[2], fw_status[3]);
2191 static int __devinit cxgb_enable_msix(struct adapter *adap)
2193 struct msix_entry entries[SGE_QSETS + 1];
2196 for (i = 0; i < ARRAY_SIZE(entries); ++i)
2197 entries[i].entry = i;
2199 err = pci_enable_msix(adap->pdev, entries, ARRAY_SIZE(entries));
2201 for (i = 0; i < ARRAY_SIZE(entries); ++i)
2202 adap->msix_info[i].vec = entries[i].vector;
2204 dev_info(&adap->pdev->dev,
2205 "only %d MSI-X vectors left, not using MSI-X\n", err);
2209 static void __devinit print_port_info(struct adapter *adap,
2210 const struct adapter_info *ai)
2212 static const char *pci_variant[] = {
2213 "PCI", "PCI-X", "PCI-X ECC", "PCI-X 266", "PCI Express"
2220 snprintf(buf, sizeof(buf), "%s x%d",
2221 pci_variant[adap->params.pci.variant],
2222 adap->params.pci.width);
2224 snprintf(buf, sizeof(buf), "%s %dMHz/%d-bit",
2225 pci_variant[adap->params.pci.variant],
2226 adap->params.pci.speed, adap->params.pci.width);
2228 for_each_port(adap, i) {
2229 struct net_device *dev = adap->port[i];
2230 const struct port_info *pi = netdev_priv(dev);
2232 if (!test_bit(i, &adap->registered_device_map))
2234 printk(KERN_INFO "%s: %s %s RNIC (rev %d) %s%s\n",
2235 dev->name, ai->desc, pi->port_type->desc,
2236 adap->params.rev, buf,
2237 (adap->flags & USING_MSIX) ? " MSI-X" :
2238 (adap->flags & USING_MSI) ? " MSI" : "");
2239 if (adap->name == dev->name && adap->params.vpd.mclk)
2240 printk(KERN_INFO "%s: %uMB CM, %uMB PMTX, %uMB PMRX\n",
2241 adap->name, t3_mc7_size(&adap->cm) >> 20,
2242 t3_mc7_size(&adap->pmtx) >> 20,
2243 t3_mc7_size(&adap->pmrx) >> 20);
2247 static int __devinit init_one(struct pci_dev *pdev,
2248 const struct pci_device_id *ent)
2250 static int version_printed;
2252 int i, err, pci_using_dac = 0;
2253 unsigned long mmio_start, mmio_len;
2254 const struct adapter_info *ai;
2255 struct adapter *adapter = NULL;
2256 struct port_info *pi;
2258 if (!version_printed) {
2259 printk(KERN_INFO "%s - version %s\n", DRV_DESC, DRV_VERSION);
2264 cxgb3_wq = create_singlethread_workqueue(DRV_NAME);
2266 printk(KERN_ERR DRV_NAME
2267 ": cannot initialize work queue\n");
2272 err = pci_request_regions(pdev, DRV_NAME);
2274 /* Just info, some other driver may have claimed the device. */
2275 dev_info(&pdev->dev, "cannot obtain PCI resources\n");
2279 err = pci_enable_device(pdev);
2281 dev_err(&pdev->dev, "cannot enable PCI device\n");
2282 goto out_release_regions;
2285 if (!pci_set_dma_mask(pdev, DMA_64BIT_MASK)) {
2287 err = pci_set_consistent_dma_mask(pdev, DMA_64BIT_MASK);
2289 dev_err(&pdev->dev, "unable to obtain 64-bit DMA for "
2290 "coherent allocations\n");
2291 goto out_disable_device;
2293 } else if ((err = pci_set_dma_mask(pdev, DMA_32BIT_MASK)) != 0) {
2294 dev_err(&pdev->dev, "no usable DMA configuration\n");
2295 goto out_disable_device;
2298 pci_set_master(pdev);
2300 mmio_start = pci_resource_start(pdev, 0);
2301 mmio_len = pci_resource_len(pdev, 0);
2302 ai = t3_get_adapter_info(ent->driver_data);
2304 adapter = kzalloc(sizeof(*adapter), GFP_KERNEL);
2307 goto out_disable_device;
2310 adapter->regs = ioremap_nocache(mmio_start, mmio_len);
2311 if (!adapter->regs) {
2312 dev_err(&pdev->dev, "cannot map device registers\n");
2314 goto out_free_adapter;
2317 adapter->pdev = pdev;
2318 adapter->name = pci_name(pdev);
2319 adapter->msg_enable = dflt_msg_enable;
2320 adapter->mmio_len = mmio_len;
2322 mutex_init(&adapter->mdio_lock);
2323 spin_lock_init(&adapter->work_lock);
2324 spin_lock_init(&adapter->stats_lock);
2326 INIT_LIST_HEAD(&adapter->adapter_list);
2327 INIT_WORK(&adapter->ext_intr_handler_task, ext_intr_task);
2328 INIT_DELAYED_WORK(&adapter->adap_check_task, t3_adap_check_task);
2330 for (i = 0; i < ai->nports; ++i) {
2331 struct net_device *netdev;
2333 netdev = alloc_etherdev(sizeof(struct port_info));
2339 SET_MODULE_OWNER(netdev);
2340 SET_NETDEV_DEV(netdev, &pdev->dev);
2342 adapter->port[i] = netdev;
2343 pi = netdev_priv(netdev);
2344 pi->rx_csum_offload = 1;
2349 netif_carrier_off(netdev);
2350 netdev->irq = pdev->irq;
2351 netdev->mem_start = mmio_start;
2352 netdev->mem_end = mmio_start + mmio_len - 1;
2353 netdev->priv = adapter;
2354 netdev->features |= NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_TSO;
2355 netdev->features |= NETIF_F_LLTX;
2357 netdev->features |= NETIF_F_HIGHDMA;
2359 netdev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
2360 netdev->vlan_rx_register = vlan_rx_register;
2361 netdev->vlan_rx_kill_vid = vlan_rx_kill_vid;
2363 netdev->open = cxgb_open;
2364 netdev->stop = cxgb_close;
2365 netdev->hard_start_xmit = t3_eth_xmit;
2366 netdev->get_stats = cxgb_get_stats;
2367 netdev->set_multicast_list = cxgb_set_rxmode;
2368 netdev->do_ioctl = cxgb_ioctl;
2369 netdev->change_mtu = cxgb_change_mtu;
2370 netdev->set_mac_address = cxgb_set_mac_addr;
2371 #ifdef CONFIG_NET_POLL_CONTROLLER
2372 netdev->poll_controller = cxgb_netpoll;
2374 netdev->weight = 64;
2376 SET_ETHTOOL_OPS(netdev, &cxgb_ethtool_ops);
2379 pci_set_drvdata(pdev, adapter->port[0]);
2380 if (t3_prep_adapter(adapter, ai, 1) < 0) {
2386 * The card is now ready to go. If any errors occur during device
2387 * registration we do not fail the whole card but rather proceed only
2388 * with the ports we manage to register successfully. However we must
2389 * register at least one net device.
2391 for_each_port(adapter, i) {
2392 err = register_netdev(adapter->port[i]);
2394 dev_warn(&pdev->dev,
2395 "cannot register net device %s, skipping\n",
2396 adapter->port[i]->name);
2399 * Change the name we use for messages to the name of
2400 * the first successfully registered interface.
2402 if (!adapter->registered_device_map)
2403 adapter->name = adapter->port[i]->name;
2405 __set_bit(i, &adapter->registered_device_map);
2408 if (!adapter->registered_device_map) {
2409 dev_err(&pdev->dev, "could not register any net devices\n");
2413 /* Driver's ready. Reflect it on LEDs */
2414 t3_led_ready(adapter);
2416 if (is_offload(adapter)) {
2417 __set_bit(OFFLOAD_DEVMAP_BIT, &adapter->registered_device_map);
2418 cxgb3_adapter_ofld(adapter);
2421 /* See what interrupts we'll be using */
2422 if (msi > 1 && cxgb_enable_msix(adapter) == 0)
2423 adapter->flags |= USING_MSIX;
2424 else if (msi > 0 && pci_enable_msi(pdev) == 0)
2425 adapter->flags |= USING_MSI;
2427 err = sysfs_create_group(&adapter->port[0]->dev.kobj,
2430 print_port_info(adapter, ai);
2434 iounmap(adapter->regs);
2435 for (i = ai->nports - 1; i >= 0; --i)
2436 if (adapter->port[i])
2437 free_netdev(adapter->port[i]);
2443 pci_disable_device(pdev);
2444 out_release_regions:
2445 pci_release_regions(pdev);
2446 pci_set_drvdata(pdev, NULL);
2450 static void __devexit remove_one(struct pci_dev *pdev)
2452 struct net_device *dev = pci_get_drvdata(pdev);
2456 struct adapter *adapter = dev->priv;
2458 t3_sge_stop(adapter);
2459 sysfs_remove_group(&adapter->port[0]->dev.kobj,
2462 for_each_port(adapter, i)
2463 if (test_bit(i, &adapter->registered_device_map))
2464 unregister_netdev(adapter->port[i]);
2466 if (is_offload(adapter)) {
2467 cxgb3_adapter_unofld(adapter);
2468 if (test_bit(OFFLOAD_DEVMAP_BIT,
2469 &adapter->open_device_map))
2470 offload_close(&adapter->tdev);
2473 t3_free_sge_resources(adapter);
2474 cxgb_disable_msi(adapter);
2476 for (i = 0; i < ARRAY_SIZE(adapter->dummy_netdev); i++)
2477 if (adapter->dummy_netdev[i]) {
2478 free_netdev(adapter->dummy_netdev[i]);
2479 adapter->dummy_netdev[i] = NULL;
2482 for_each_port(adapter, i)
2483 if (adapter->port[i])
2484 free_netdev(adapter->port[i]);
2486 iounmap(adapter->regs);
2488 pci_release_regions(pdev);
2489 pci_disable_device(pdev);
2490 pci_set_drvdata(pdev, NULL);
2494 static struct pci_driver driver = {
2496 .id_table = cxgb3_pci_tbl,
2498 .remove = __devexit_p(remove_one),
2501 static int __init cxgb3_init_module(void)
2505 cxgb3_offload_init();
2507 ret = pci_register_driver(&driver);
2511 static void __exit cxgb3_cleanup_module(void)
2513 pci_unregister_driver(&driver);
2515 destroy_workqueue(cxgb3_wq);
2518 module_init(cxgb3_init_module);
2519 module_exit(cxgb3_cleanup_module);