2 * Copyright (c) 2003-2007 Chelsio, Inc. All rights reserved.
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
32 #include <linux/module.h>
33 #include <linux/moduleparam.h>
34 #include <linux/init.h>
35 #include <linux/pci.h>
36 #include <linux/dma-mapping.h>
37 #include <linux/netdevice.h>
38 #include <linux/etherdevice.h>
39 #include <linux/if_vlan.h>
40 #include <linux/mii.h>
41 #include <linux/sockios.h>
42 #include <linux/workqueue.h>
43 #include <linux/proc_fs.h>
44 #include <linux/rtnetlink.h>
45 #include <linux/firmware.h>
46 #include <asm/uaccess.h>
49 #include "cxgb3_ioctl.h"
51 #include "cxgb3_offload.h"
54 #include "cxgb3_ctl_defs.h"
56 #include "firmware_exports.h"
59 MAX_TXQ_ENTRIES = 16384,
60 MAX_CTRL_TXQ_ENTRIES = 1024,
61 MAX_RSPQ_ENTRIES = 16384,
62 MAX_RX_BUFFERS = 16384,
63 MAX_RX_JUMBO_BUFFERS = 16384,
65 MIN_CTRL_TXQ_ENTRIES = 4,
66 MIN_RSPQ_ENTRIES = 32,
70 #define PORT_MASK ((1 << MAX_NPORTS) - 1)
72 #define DFLT_MSG_ENABLE (NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK | \
73 NETIF_MSG_TIMER | NETIF_MSG_IFDOWN | NETIF_MSG_IFUP |\
74 NETIF_MSG_RX_ERR | NETIF_MSG_TX_ERR)
76 #define EEPROM_MAGIC 0x38E2F10C
78 #define CH_DEVICE(devid, ssid, idx) \
79 { PCI_VENDOR_ID_CHELSIO, devid, PCI_ANY_ID, ssid, 0, 0, idx }
81 static const struct pci_device_id cxgb3_pci_tbl[] = {
82 CH_DEVICE(0x20, 1, 0), /* PE9000 */
83 CH_DEVICE(0x21, 1, 1), /* T302E */
84 CH_DEVICE(0x22, 1, 2), /* T310E */
85 CH_DEVICE(0x23, 1, 3), /* T320X */
86 CH_DEVICE(0x24, 1, 1), /* T302X */
87 CH_DEVICE(0x25, 1, 3), /* T320E */
88 CH_DEVICE(0x26, 1, 2), /* T310X */
89 CH_DEVICE(0x30, 1, 2), /* T3B10 */
90 CH_DEVICE(0x31, 1, 3), /* T3B20 */
91 CH_DEVICE(0x32, 1, 1), /* T3B02 */
95 MODULE_DESCRIPTION(DRV_DESC);
96 MODULE_AUTHOR("Chelsio Communications");
97 MODULE_LICENSE("Dual BSD/GPL");
98 MODULE_VERSION(DRV_VERSION);
99 MODULE_DEVICE_TABLE(pci, cxgb3_pci_tbl);
101 static int dflt_msg_enable = DFLT_MSG_ENABLE;
103 module_param(dflt_msg_enable, int, 0644);
104 MODULE_PARM_DESC(dflt_msg_enable, "Chelsio T3 default message enable bitmap");
107 * The driver uses the best interrupt scheme available on a platform in the
108 * order MSI-X, MSI, legacy pin interrupts. This parameter determines which
109 * of these schemes the driver may consider as follows:
111 * msi = 2: choose from among all three options
112 * msi = 1: only consider MSI and pin interrupts
113 * msi = 0: force pin interrupts
117 module_param(msi, int, 0644);
118 MODULE_PARM_DESC(msi, "whether to use MSI or MSI-X");
121 * The driver enables offload as a default.
122 * To disable it, use ofld_disable = 1.
125 static int ofld_disable = 0;
127 module_param(ofld_disable, int, 0644);
128 MODULE_PARM_DESC(ofld_disable, "whether to enable offload at init time or not");
131 * We have work elements that we need to cancel when an interface is taken
132 * down. Normally the work elements would be executed by keventd but that
133 * can deadlock because of linkwatch. If our close method takes the rtnl
134 * lock and linkwatch is ahead of our work elements in keventd, linkwatch
135 * will block keventd as it needs the rtnl lock, and we'll deadlock waiting
136 * for our work to complete. Get our own work queue to solve this.
138 static struct workqueue_struct *cxgb3_wq;
141 * link_report - show link status and link speed/duplex
142 * @p: the port whose settings are to be reported
144 * Shows the link status, speed, and duplex of a port.
146 static void link_report(struct net_device *dev)
148 if (!netif_carrier_ok(dev))
149 printk(KERN_INFO "%s: link down\n", dev->name);
151 const char *s = "10Mbps";
152 const struct port_info *p = netdev_priv(dev);
154 switch (p->link_config.speed) {
166 printk(KERN_INFO "%s: link up, %s, %s-duplex\n", dev->name, s,
167 p->link_config.duplex == DUPLEX_FULL ? "full" : "half");
172 * t3_os_link_changed - handle link status changes
173 * @adapter: the adapter associated with the link change
174 * @port_id: the port index whose limk status has changed
175 * @link_stat: the new status of the link
176 * @speed: the new speed setting
177 * @duplex: the new duplex setting
178 * @pause: the new flow-control setting
180 * This is the OS-dependent handler for link status changes. The OS
181 * neutral handler takes care of most of the processing for these events,
182 * then calls this handler for any OS-specific processing.
184 void t3_os_link_changed(struct adapter *adapter, int port_id, int link_stat,
185 int speed, int duplex, int pause)
187 struct net_device *dev = adapter->port[port_id];
189 /* Skip changes from disabled ports. */
190 if (!netif_running(dev))
193 if (link_stat != netif_carrier_ok(dev)) {
195 netif_carrier_on(dev);
197 netif_carrier_off(dev);
202 static void cxgb_set_rxmode(struct net_device *dev)
204 struct t3_rx_mode rm;
205 struct port_info *pi = netdev_priv(dev);
207 init_rx_mode(&rm, dev, dev->mc_list);
208 t3_mac_set_rx_mode(&pi->mac, &rm);
212 * link_start - enable a port
213 * @dev: the device to enable
215 * Performs the MAC and PHY actions needed to enable a port.
217 static void link_start(struct net_device *dev)
219 struct t3_rx_mode rm;
220 struct port_info *pi = netdev_priv(dev);
221 struct cmac *mac = &pi->mac;
223 init_rx_mode(&rm, dev, dev->mc_list);
225 t3_mac_set_mtu(mac, dev->mtu);
226 t3_mac_set_address(mac, 0, dev->dev_addr);
227 t3_mac_set_rx_mode(mac, &rm);
228 t3_link_start(&pi->phy, mac, &pi->link_config);
229 t3_mac_enable(mac, MAC_DIRECTION_RX | MAC_DIRECTION_TX);
232 static inline void cxgb_disable_msi(struct adapter *adapter)
234 if (adapter->flags & USING_MSIX) {
235 pci_disable_msix(adapter->pdev);
236 adapter->flags &= ~USING_MSIX;
237 } else if (adapter->flags & USING_MSI) {
238 pci_disable_msi(adapter->pdev);
239 adapter->flags &= ~USING_MSI;
244 * Interrupt handler for asynchronous events used with MSI-X.
246 static irqreturn_t t3_async_intr_handler(int irq, void *cookie)
248 t3_slow_intr_handler(cookie);
253 * Name the MSI-X interrupts.
255 static void name_msix_vecs(struct adapter *adap)
257 int i, j, msi_idx = 1, n = sizeof(adap->msix_info[0].desc) - 1;
259 snprintf(adap->msix_info[0].desc, n, "%s", adap->name);
260 adap->msix_info[0].desc[n] = 0;
262 for_each_port(adap, j) {
263 struct net_device *d = adap->port[j];
264 const struct port_info *pi = netdev_priv(d);
266 for (i = 0; i < pi->nqsets; i++, msi_idx++) {
267 snprintf(adap->msix_info[msi_idx].desc, n,
268 "%s (queue %d)", d->name, i);
269 adap->msix_info[msi_idx].desc[n] = 0;
274 static int request_msix_data_irqs(struct adapter *adap)
276 int i, j, err, qidx = 0;
278 for_each_port(adap, i) {
279 int nqsets = adap2pinfo(adap, i)->nqsets;
281 for (j = 0; j < nqsets; ++j) {
282 err = request_irq(adap->msix_info[qidx + 1].vec,
283 t3_intr_handler(adap,
286 adap->msix_info[qidx + 1].desc,
287 &adap->sge.qs[qidx]);
290 free_irq(adap->msix_info[qidx + 1].vec,
291 &adap->sge.qs[qidx]);
301 * setup_rss - configure RSS
304 * Sets up RSS to distribute packets to multiple receive queues. We
305 * configure the RSS CPU lookup table to distribute to the number of HW
306 * receive queues, and the response queue lookup table to narrow that
307 * down to the response queues actually configured for each port.
308 * We always configure the RSS mapping for two ports since the mapping
309 * table has plenty of entries.
311 static void setup_rss(struct adapter *adap)
314 unsigned int nq0 = adap2pinfo(adap, 0)->nqsets;
315 unsigned int nq1 = adap->port[1] ? adap2pinfo(adap, 1)->nqsets : 1;
316 u8 cpus[SGE_QSETS + 1];
317 u16 rspq_map[RSS_TABLE_SIZE];
319 for (i = 0; i < SGE_QSETS; ++i)
321 cpus[SGE_QSETS] = 0xff; /* terminator */
323 for (i = 0; i < RSS_TABLE_SIZE / 2; ++i) {
324 rspq_map[i] = i % nq0;
325 rspq_map[i + RSS_TABLE_SIZE / 2] = (i % nq1) + nq0;
328 t3_config_rss(adap, F_RQFEEDBACKENABLE | F_TNLLKPEN | F_TNLMAPEN |
329 F_TNLPRTEN | F_TNL2TUPEN | F_TNL4TUPEN |
330 V_RRCPLCPUSIZE(6), cpus, rspq_map);
334 * If we have multiple receive queues per port serviced by NAPI we need one
335 * netdevice per queue as NAPI operates on netdevices. We already have one
336 * netdevice, namely the one associated with the interface, so we use dummy
337 * ones for any additional queues. Note that these netdevices exist purely
338 * so that NAPI has something to work with, they do not represent network
339 * ports and are not registered.
341 static int init_dummy_netdevs(struct adapter *adap)
343 int i, j, dummy_idx = 0;
344 struct net_device *nd;
346 for_each_port(adap, i) {
347 struct net_device *dev = adap->port[i];
348 const struct port_info *pi = netdev_priv(dev);
350 for (j = 0; j < pi->nqsets - 1; j++) {
351 if (!adap->dummy_netdev[dummy_idx]) {
352 nd = alloc_netdev(0, "", ether_setup);
358 set_bit(__LINK_STATE_START, &nd->state);
359 adap->dummy_netdev[dummy_idx] = nd;
361 strcpy(adap->dummy_netdev[dummy_idx]->name, dev->name);
368 while (--dummy_idx >= 0) {
369 free_netdev(adap->dummy_netdev[dummy_idx]);
370 adap->dummy_netdev[dummy_idx] = NULL;
376 * Wait until all NAPI handlers are descheduled. This includes the handlers of
377 * both netdevices representing interfaces and the dummy ones for the extra
380 static void quiesce_rx(struct adapter *adap)
383 struct net_device *dev;
385 for_each_port(adap, i) {
387 while (test_bit(__LINK_STATE_RX_SCHED, &dev->state))
391 for (i = 0; i < ARRAY_SIZE(adap->dummy_netdev); i++) {
392 dev = adap->dummy_netdev[i];
394 while (test_bit(__LINK_STATE_RX_SCHED, &dev->state))
400 * setup_sge_qsets - configure SGE Tx/Rx/response queues
403 * Determines how many sets of SGE queues to use and initializes them.
404 * We support multiple queue sets per port if we have MSI-X, otherwise
405 * just one queue set per port.
407 static int setup_sge_qsets(struct adapter *adap)
409 int i, j, err, irq_idx = 0, qset_idx = 0, dummy_dev_idx = 0;
410 unsigned int ntxq = is_offload(adap) ? SGE_TXQ_PER_SET : 1;
412 if (adap->params.rev > 0 && !(adap->flags & USING_MSI))
415 for_each_port(adap, i) {
416 struct net_device *dev = adap->port[i];
417 const struct port_info *pi = netdev_priv(dev);
419 for (j = 0; j < pi->nqsets; ++j, ++qset_idx) {
420 err = t3_sge_alloc_qset(adap, qset_idx, 1,
421 (adap->flags & USING_MSIX) ? qset_idx + 1 :
423 &adap->params.sge.qset[qset_idx], ntxq,
425 adap-> dummy_netdev[dummy_dev_idx++]);
427 t3_free_sge_resources(adap);
436 static ssize_t attr_show(struct device *d, struct device_attribute *attr,
438 ssize_t(*format) (struct net_device *, char *))
442 /* Synchronize with ioctls that may shut down the device */
444 len = (*format) (to_net_dev(d), buf);
449 static ssize_t attr_store(struct device *d, struct device_attribute *attr,
450 const char *buf, size_t len,
451 ssize_t(*set) (struct net_device *, unsigned int),
452 unsigned int min_val, unsigned int max_val)
458 if (!capable(CAP_NET_ADMIN))
461 val = simple_strtoul(buf, &endp, 0);
462 if (endp == buf || val < min_val || val > max_val)
466 ret = (*set) (to_net_dev(d), val);
473 #define CXGB3_SHOW(name, val_expr) \
474 static ssize_t format_##name(struct net_device *dev, char *buf) \
476 struct adapter *adap = dev->priv; \
477 return sprintf(buf, "%u\n", val_expr); \
479 static ssize_t show_##name(struct device *d, struct device_attribute *attr, \
482 return attr_show(d, attr, buf, format_##name); \
485 static ssize_t set_nfilters(struct net_device *dev, unsigned int val)
487 struct adapter *adap = dev->priv;
488 int min_tids = is_offload(adap) ? MC5_MIN_TIDS : 0;
490 if (adap->flags & FULL_INIT_DONE)
492 if (val && adap->params.rev == 0)
494 if (val > t3_mc5_size(&adap->mc5) - adap->params.mc5.nservers -
497 adap->params.mc5.nfilters = val;
501 static ssize_t store_nfilters(struct device *d, struct device_attribute *attr,
502 const char *buf, size_t len)
504 return attr_store(d, attr, buf, len, set_nfilters, 0, ~0);
507 static ssize_t set_nservers(struct net_device *dev, unsigned int val)
509 struct adapter *adap = dev->priv;
511 if (adap->flags & FULL_INIT_DONE)
513 if (val > t3_mc5_size(&adap->mc5) - adap->params.mc5.nfilters -
516 adap->params.mc5.nservers = val;
520 static ssize_t store_nservers(struct device *d, struct device_attribute *attr,
521 const char *buf, size_t len)
523 return attr_store(d, attr, buf, len, set_nservers, 0, ~0);
526 #define CXGB3_ATTR_R(name, val_expr) \
527 CXGB3_SHOW(name, val_expr) \
528 static DEVICE_ATTR(name, S_IRUGO, show_##name, NULL)
530 #define CXGB3_ATTR_RW(name, val_expr, store_method) \
531 CXGB3_SHOW(name, val_expr) \
532 static DEVICE_ATTR(name, S_IRUGO | S_IWUSR, show_##name, store_method)
534 CXGB3_ATTR_R(cam_size, t3_mc5_size(&adap->mc5));
535 CXGB3_ATTR_RW(nfilters, adap->params.mc5.nfilters, store_nfilters);
536 CXGB3_ATTR_RW(nservers, adap->params.mc5.nservers, store_nservers);
538 static struct attribute *cxgb3_attrs[] = {
539 &dev_attr_cam_size.attr,
540 &dev_attr_nfilters.attr,
541 &dev_attr_nservers.attr,
545 static struct attribute_group cxgb3_attr_group = {.attrs = cxgb3_attrs };
547 static ssize_t tm_attr_show(struct device *d, struct device_attribute *attr,
548 char *buf, int sched)
551 unsigned int v, addr, bpt, cpt;
552 struct adapter *adap = to_net_dev(d)->priv;
554 addr = A_TP_TX_MOD_Q1_Q0_RATE_LIMIT - sched / 2;
556 t3_write_reg(adap, A_TP_TM_PIO_ADDR, addr);
557 v = t3_read_reg(adap, A_TP_TM_PIO_DATA);
560 bpt = (v >> 8) & 0xff;
563 len = sprintf(buf, "disabled\n");
565 v = (adap->params.vpd.cclk * 1000) / cpt;
566 len = sprintf(buf, "%u Kbps\n", (v * bpt) / 125);
572 static ssize_t tm_attr_store(struct device *d, struct device_attribute *attr,
573 const char *buf, size_t len, int sched)
578 struct adapter *adap = to_net_dev(d)->priv;
580 if (!capable(CAP_NET_ADMIN))
583 val = simple_strtoul(buf, &endp, 0);
584 if (endp == buf || val > 10000000)
588 ret = t3_config_sched(adap, val, sched);
595 #define TM_ATTR(name, sched) \
596 static ssize_t show_##name(struct device *d, struct device_attribute *attr, \
599 return tm_attr_show(d, attr, buf, sched); \
601 static ssize_t store_##name(struct device *d, struct device_attribute *attr, \
602 const char *buf, size_t len) \
604 return tm_attr_store(d, attr, buf, len, sched); \
606 static DEVICE_ATTR(name, S_IRUGO | S_IWUSR, show_##name, store_##name)
617 static struct attribute *offload_attrs[] = {
618 &dev_attr_sched0.attr,
619 &dev_attr_sched1.attr,
620 &dev_attr_sched2.attr,
621 &dev_attr_sched3.attr,
622 &dev_attr_sched4.attr,
623 &dev_attr_sched5.attr,
624 &dev_attr_sched6.attr,
625 &dev_attr_sched7.attr,
629 static struct attribute_group offload_attr_group = {.attrs = offload_attrs };
632 * Sends an sk_buff to an offload queue driver
633 * after dealing with any active network taps.
635 static inline int offload_tx(struct t3cdev *tdev, struct sk_buff *skb)
640 ret = t3_offload_tx(tdev, skb);
645 static int write_smt_entry(struct adapter *adapter, int idx)
647 struct cpl_smt_write_req *req;
648 struct sk_buff *skb = alloc_skb(sizeof(*req), GFP_KERNEL);
653 req = (struct cpl_smt_write_req *)__skb_put(skb, sizeof(*req));
654 req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
655 OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_SMT_WRITE_REQ, idx));
656 req->mtu_idx = NMTUS - 1; /* should be 0 but there's a T3 bug */
658 memset(req->src_mac1, 0, sizeof(req->src_mac1));
659 memcpy(req->src_mac0, adapter->port[idx]->dev_addr, ETH_ALEN);
661 offload_tx(&adapter->tdev, skb);
665 static int init_smt(struct adapter *adapter)
669 for_each_port(adapter, i)
670 write_smt_entry(adapter, i);
674 static void init_port_mtus(struct adapter *adapter)
676 unsigned int mtus = adapter->port[0]->mtu;
678 if (adapter->port[1])
679 mtus |= adapter->port[1]->mtu << 16;
680 t3_write_reg(adapter, A_TP_MTU_PORT_TABLE, mtus);
683 static void send_pktsched_cmd(struct adapter *adap, int sched, int qidx, int lo,
687 struct mngt_pktsched_wr *req;
689 skb = alloc_skb(sizeof(*req), GFP_KERNEL | __GFP_NOFAIL);
690 req = (struct mngt_pktsched_wr *)skb_put(skb, sizeof(*req));
691 req->wr_hi = htonl(V_WR_OP(FW_WROPCODE_MNGT));
692 req->mngt_opcode = FW_MNGTOPCODE_PKTSCHED_SET;
698 t3_mgmt_tx(adap, skb);
701 static void bind_qsets(struct adapter *adap)
705 for_each_port(adap, i) {
706 const struct port_info *pi = adap2pinfo(adap, i);
708 for (j = 0; j < pi->nqsets; ++j)
709 send_pktsched_cmd(adap, 1, pi->first_qset + j, -1,
714 #define FW_FNAME "t3fw-%d.%d.bin"
716 static int upgrade_fw(struct adapter *adap)
720 const struct firmware *fw;
721 struct device *dev = &adap->pdev->dev;
723 snprintf(buf, sizeof(buf), FW_FNAME, FW_VERSION_MAJOR,
725 ret = request_firmware(&fw, buf, dev);
727 dev_err(dev, "could not upgrade firmware: unable to load %s\n",
731 ret = t3_load_fw(adap, fw->data, fw->size);
732 release_firmware(fw);
737 * cxgb_up - enable the adapter
738 * @adapter: adapter being enabled
740 * Called when the first port is enabled, this function performs the
741 * actions necessary to make an adapter operational, such as completing
742 * the initialization of HW modules, and enabling interrupts.
744 * Must be called with the rtnl lock held.
746 static int cxgb_up(struct adapter *adap)
750 if (!(adap->flags & FULL_INIT_DONE)) {
751 err = t3_check_fw_version(adap);
753 err = upgrade_fw(adap);
757 err = init_dummy_netdevs(adap);
761 err = t3_init_hw(adap, 0);
765 err = setup_sge_qsets(adap);
770 adap->flags |= FULL_INIT_DONE;
775 if (adap->flags & USING_MSIX) {
776 name_msix_vecs(adap);
777 err = request_irq(adap->msix_info[0].vec,
778 t3_async_intr_handler, 0,
779 adap->msix_info[0].desc, adap);
783 if (request_msix_data_irqs(adap)) {
784 free_irq(adap->msix_info[0].vec, adap);
787 } else if ((err = request_irq(adap->pdev->irq,
788 t3_intr_handler(adap,
789 adap->sge.qs[0].rspq.
791 (adap->flags & USING_MSI) ?
797 t3_intr_enable(adap);
799 if ((adap->flags & (USING_MSIX | QUEUES_BOUND)) == USING_MSIX)
801 adap->flags |= QUEUES_BOUND;
806 CH_ERR(adap, "request_irq failed, err %d\n", err);
811 * Release resources when all the ports and offloading have been stopped.
813 static void cxgb_down(struct adapter *adapter)
815 t3_sge_stop(adapter);
816 spin_lock_irq(&adapter->work_lock); /* sync with PHY intr task */
817 t3_intr_disable(adapter);
818 spin_unlock_irq(&adapter->work_lock);
820 if (adapter->flags & USING_MSIX) {
823 free_irq(adapter->msix_info[0].vec, adapter);
824 for_each_port(adapter, i)
825 n += adap2pinfo(adapter, i)->nqsets;
827 for (i = 0; i < n; ++i)
828 free_irq(adapter->msix_info[i + 1].vec,
829 &adapter->sge.qs[i]);
831 free_irq(adapter->pdev->irq, adapter);
833 flush_workqueue(cxgb3_wq); /* wait for external IRQ handler */
837 static void schedule_chk_task(struct adapter *adap)
841 timeo = adap->params.linkpoll_period ?
842 (HZ * adap->params.linkpoll_period) / 10 :
843 adap->params.stats_update_period * HZ;
845 queue_delayed_work(cxgb3_wq, &adap->adap_check_task, timeo);
848 static int offload_open(struct net_device *dev)
850 struct adapter *adapter = dev->priv;
851 struct t3cdev *tdev = T3CDEV(dev);
852 int adap_up = adapter->open_device_map & PORT_MASK;
855 if (test_and_set_bit(OFFLOAD_DEVMAP_BIT, &adapter->open_device_map))
858 if (!adap_up && (err = cxgb_up(adapter)) < 0)
861 t3_tp_set_offload_mode(adapter, 1);
862 tdev->lldev = adapter->port[0];
863 err = cxgb3_offload_activate(adapter);
867 init_port_mtus(adapter);
868 t3_load_mtus(adapter, adapter->params.mtus, adapter->params.a_wnd,
869 adapter->params.b_wnd,
870 adapter->params.rev == 0 ?
871 adapter->port[0]->mtu : 0xffff);
874 /* Never mind if the next step fails */
875 sysfs_create_group(&tdev->lldev->dev.kobj, &offload_attr_group);
877 /* Call back all registered clients */
878 cxgb3_add_clients(tdev);
881 /* restore them in case the offload module has changed them */
883 t3_tp_set_offload_mode(adapter, 0);
884 clear_bit(OFFLOAD_DEVMAP_BIT, &adapter->open_device_map);
885 cxgb3_set_dummy_ops(tdev);
890 static int offload_close(struct t3cdev *tdev)
892 struct adapter *adapter = tdev2adap(tdev);
894 if (!test_bit(OFFLOAD_DEVMAP_BIT, &adapter->open_device_map))
897 /* Call back all registered clients */
898 cxgb3_remove_clients(tdev);
900 sysfs_remove_group(&tdev->lldev->dev.kobj, &offload_attr_group);
903 cxgb3_set_dummy_ops(tdev);
904 t3_tp_set_offload_mode(adapter, 0);
905 clear_bit(OFFLOAD_DEVMAP_BIT, &adapter->open_device_map);
907 if (!adapter->open_device_map)
910 cxgb3_offload_deactivate(adapter);
914 static int cxgb_open(struct net_device *dev)
917 struct adapter *adapter = dev->priv;
918 struct port_info *pi = netdev_priv(dev);
919 int other_ports = adapter->open_device_map & PORT_MASK;
921 if (!adapter->open_device_map && (err = cxgb_up(adapter)) < 0)
924 set_bit(pi->port_id, &adapter->open_device_map);
926 err = offload_open(dev);
929 "Could not initialize offload capabilities\n");
933 t3_port_intr_enable(adapter, pi->port_id);
934 netif_start_queue(dev);
936 schedule_chk_task(adapter);
941 static int cxgb_close(struct net_device *dev)
943 struct adapter *adapter = dev->priv;
944 struct port_info *p = netdev_priv(dev);
946 t3_port_intr_disable(adapter, p->port_id);
947 netif_stop_queue(dev);
948 p->phy.ops->power_down(&p->phy, 1);
949 netif_carrier_off(dev);
950 t3_mac_disable(&p->mac, MAC_DIRECTION_TX | MAC_DIRECTION_RX);
952 spin_lock(&adapter->work_lock); /* sync with update task */
953 clear_bit(p->port_id, &adapter->open_device_map);
954 spin_unlock(&adapter->work_lock);
956 if (!(adapter->open_device_map & PORT_MASK))
957 cancel_rearming_delayed_workqueue(cxgb3_wq,
958 &adapter->adap_check_task);
960 if (!adapter->open_device_map)
966 static struct net_device_stats *cxgb_get_stats(struct net_device *dev)
968 struct adapter *adapter = dev->priv;
969 struct port_info *p = netdev_priv(dev);
970 struct net_device_stats *ns = &p->netstats;
971 const struct mac_stats *pstats;
973 spin_lock(&adapter->stats_lock);
974 pstats = t3_mac_update_stats(&p->mac);
975 spin_unlock(&adapter->stats_lock);
977 ns->tx_bytes = pstats->tx_octets;
978 ns->tx_packets = pstats->tx_frames;
979 ns->rx_bytes = pstats->rx_octets;
980 ns->rx_packets = pstats->rx_frames;
981 ns->multicast = pstats->rx_mcast_frames;
983 ns->tx_errors = pstats->tx_underrun;
984 ns->rx_errors = pstats->rx_symbol_errs + pstats->rx_fcs_errs +
985 pstats->rx_too_long + pstats->rx_jabber + pstats->rx_short +
986 pstats->rx_fifo_ovfl;
988 /* detailed rx_errors */
989 ns->rx_length_errors = pstats->rx_jabber + pstats->rx_too_long;
990 ns->rx_over_errors = 0;
991 ns->rx_crc_errors = pstats->rx_fcs_errs;
992 ns->rx_frame_errors = pstats->rx_symbol_errs;
993 ns->rx_fifo_errors = pstats->rx_fifo_ovfl;
994 ns->rx_missed_errors = pstats->rx_cong_drops;
996 /* detailed tx_errors */
997 ns->tx_aborted_errors = 0;
998 ns->tx_carrier_errors = 0;
999 ns->tx_fifo_errors = pstats->tx_underrun;
1000 ns->tx_heartbeat_errors = 0;
1001 ns->tx_window_errors = 0;
1005 static u32 get_msglevel(struct net_device *dev)
1007 struct adapter *adapter = dev->priv;
1009 return adapter->msg_enable;
1012 static void set_msglevel(struct net_device *dev, u32 val)
1014 struct adapter *adapter = dev->priv;
1016 adapter->msg_enable = val;
1019 static char stats_strings[][ETH_GSTRING_LEN] = {
1022 "TxMulticastFramesOK",
1023 "TxBroadcastFramesOK",
1030 "TxFrames128To255 ",
1031 "TxFrames256To511 ",
1032 "TxFrames512To1023 ",
1033 "TxFrames1024To1518 ",
1034 "TxFrames1519ToMax ",
1038 "RxMulticastFramesOK",
1039 "RxBroadcastFramesOK",
1050 "RxFrames128To255 ",
1051 "RxFrames256To511 ",
1052 "RxFrames512To1023 ",
1053 "RxFrames1024To1518 ",
1054 "RxFrames1519ToMax ",
1064 "CheckTXEnToggled ",
1069 static int get_stats_count(struct net_device *dev)
1071 return ARRAY_SIZE(stats_strings);
1074 #define T3_REGMAP_SIZE (3 * 1024)
1076 static int get_regs_len(struct net_device *dev)
1078 return T3_REGMAP_SIZE;
1081 static int get_eeprom_len(struct net_device *dev)
1086 static void get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
1089 struct adapter *adapter = dev->priv;
1091 t3_get_fw_version(adapter, &fw_vers);
1093 strcpy(info->driver, DRV_NAME);
1094 strcpy(info->version, DRV_VERSION);
1095 strcpy(info->bus_info, pci_name(adapter->pdev));
1097 strcpy(info->fw_version, "N/A");
1099 snprintf(info->fw_version, sizeof(info->fw_version),
1101 G_FW_VERSION_TYPE(fw_vers) ? "T" : "N",
1102 G_FW_VERSION_MAJOR(fw_vers),
1103 G_FW_VERSION_MINOR(fw_vers),
1104 G_FW_VERSION_MICRO(fw_vers));
1108 static void get_strings(struct net_device *dev, u32 stringset, u8 * data)
1110 if (stringset == ETH_SS_STATS)
1111 memcpy(data, stats_strings, sizeof(stats_strings));
1114 static unsigned long collect_sge_port_stats(struct adapter *adapter,
1115 struct port_info *p, int idx)
1118 unsigned long tot = 0;
1120 for (i = 0; i < p->nqsets; ++i)
1121 tot += adapter->sge.qs[i + p->first_qset].port_stats[idx];
1125 static void get_stats(struct net_device *dev, struct ethtool_stats *stats,
1128 struct adapter *adapter = dev->priv;
1129 struct port_info *pi = netdev_priv(dev);
1130 const struct mac_stats *s;
1132 spin_lock(&adapter->stats_lock);
1133 s = t3_mac_update_stats(&pi->mac);
1134 spin_unlock(&adapter->stats_lock);
1136 *data++ = s->tx_octets;
1137 *data++ = s->tx_frames;
1138 *data++ = s->tx_mcast_frames;
1139 *data++ = s->tx_bcast_frames;
1140 *data++ = s->tx_pause;
1141 *data++ = s->tx_underrun;
1142 *data++ = s->tx_fifo_urun;
1144 *data++ = s->tx_frames_64;
1145 *data++ = s->tx_frames_65_127;
1146 *data++ = s->tx_frames_128_255;
1147 *data++ = s->tx_frames_256_511;
1148 *data++ = s->tx_frames_512_1023;
1149 *data++ = s->tx_frames_1024_1518;
1150 *data++ = s->tx_frames_1519_max;
1152 *data++ = s->rx_octets;
1153 *data++ = s->rx_frames;
1154 *data++ = s->rx_mcast_frames;
1155 *data++ = s->rx_bcast_frames;
1156 *data++ = s->rx_pause;
1157 *data++ = s->rx_fcs_errs;
1158 *data++ = s->rx_symbol_errs;
1159 *data++ = s->rx_short;
1160 *data++ = s->rx_jabber;
1161 *data++ = s->rx_too_long;
1162 *data++ = s->rx_fifo_ovfl;
1164 *data++ = s->rx_frames_64;
1165 *data++ = s->rx_frames_65_127;
1166 *data++ = s->rx_frames_128_255;
1167 *data++ = s->rx_frames_256_511;
1168 *data++ = s->rx_frames_512_1023;
1169 *data++ = s->rx_frames_1024_1518;
1170 *data++ = s->rx_frames_1519_max;
1172 *data++ = pi->phy.fifo_errors;
1174 *data++ = collect_sge_port_stats(adapter, pi, SGE_PSTAT_TSO);
1175 *data++ = collect_sge_port_stats(adapter, pi, SGE_PSTAT_VLANEX);
1176 *data++ = collect_sge_port_stats(adapter, pi, SGE_PSTAT_VLANINS);
1177 *data++ = collect_sge_port_stats(adapter, pi, SGE_PSTAT_TX_CSUM);
1178 *data++ = collect_sge_port_stats(adapter, pi, SGE_PSTAT_RX_CSUM_GOOD);
1179 *data++ = s->rx_cong_drops;
1181 *data++ = s->num_toggled;
1182 *data++ = s->num_resets;
1185 static inline void reg_block_dump(struct adapter *ap, void *buf,
1186 unsigned int start, unsigned int end)
1188 u32 *p = buf + start;
1190 for (; start <= end; start += sizeof(u32))
1191 *p++ = t3_read_reg(ap, start);
1194 static void get_regs(struct net_device *dev, struct ethtool_regs *regs,
1197 struct adapter *ap = dev->priv;
1201 * bits 0..9: chip version
1202 * bits 10..15: chip revision
1203 * bit 31: set for PCIe cards
1205 regs->version = 3 | (ap->params.rev << 10) | (is_pcie(ap) << 31);
1208 * We skip the MAC statistics registers because they are clear-on-read.
1209 * Also reading multi-register stats would need to synchronize with the
1210 * periodic mac stats accumulation. Hard to justify the complexity.
1212 memset(buf, 0, T3_REGMAP_SIZE);
1213 reg_block_dump(ap, buf, 0, A_SG_RSPQ_CREDIT_RETURN);
1214 reg_block_dump(ap, buf, A_SG_HI_DRB_HI_THRSH, A_ULPRX_PBL_ULIMIT);
1215 reg_block_dump(ap, buf, A_ULPTX_CONFIG, A_MPS_INT_CAUSE);
1216 reg_block_dump(ap, buf, A_CPL_SWITCH_CNTRL, A_CPL_MAP_TBL_DATA);
1217 reg_block_dump(ap, buf, A_SMB_GLOBAL_TIME_CFG, A_XGM_SERDES_STAT3);
1218 reg_block_dump(ap, buf, A_XGM_SERDES_STATUS0,
1219 XGM_REG(A_XGM_SERDES_STAT3, 1));
1220 reg_block_dump(ap, buf, XGM_REG(A_XGM_SERDES_STATUS0, 1),
1221 XGM_REG(A_XGM_RX_SPI4_SOP_EOP_CNT, 1));
1224 static int restart_autoneg(struct net_device *dev)
1226 struct port_info *p = netdev_priv(dev);
1228 if (!netif_running(dev))
1230 if (p->link_config.autoneg != AUTONEG_ENABLE)
1232 p->phy.ops->autoneg_restart(&p->phy);
1236 static int cxgb3_phys_id(struct net_device *dev, u32 data)
1239 struct adapter *adapter = dev->priv;
1244 for (i = 0; i < data * 2; i++) {
1245 t3_set_reg_field(adapter, A_T3DBG_GPIO_EN, F_GPIO0_OUT_VAL,
1246 (i & 1) ? F_GPIO0_OUT_VAL : 0);
1247 if (msleep_interruptible(500))
1250 t3_set_reg_field(adapter, A_T3DBG_GPIO_EN, F_GPIO0_OUT_VAL,
1255 static int get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
1257 struct port_info *p = netdev_priv(dev);
1259 cmd->supported = p->link_config.supported;
1260 cmd->advertising = p->link_config.advertising;
1262 if (netif_carrier_ok(dev)) {
1263 cmd->speed = p->link_config.speed;
1264 cmd->duplex = p->link_config.duplex;
1270 cmd->port = (cmd->supported & SUPPORTED_TP) ? PORT_TP : PORT_FIBRE;
1271 cmd->phy_address = p->phy.addr;
1272 cmd->transceiver = XCVR_EXTERNAL;
1273 cmd->autoneg = p->link_config.autoneg;
1279 static int speed_duplex_to_caps(int speed, int duplex)
1285 if (duplex == DUPLEX_FULL)
1286 cap = SUPPORTED_10baseT_Full;
1288 cap = SUPPORTED_10baseT_Half;
1291 if (duplex == DUPLEX_FULL)
1292 cap = SUPPORTED_100baseT_Full;
1294 cap = SUPPORTED_100baseT_Half;
1297 if (duplex == DUPLEX_FULL)
1298 cap = SUPPORTED_1000baseT_Full;
1300 cap = SUPPORTED_1000baseT_Half;
1303 if (duplex == DUPLEX_FULL)
1304 cap = SUPPORTED_10000baseT_Full;
1309 #define ADVERTISED_MASK (ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full | \
1310 ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full | \
1311 ADVERTISED_1000baseT_Half | ADVERTISED_1000baseT_Full | \
1312 ADVERTISED_10000baseT_Full)
1314 static int set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
1316 struct port_info *p = netdev_priv(dev);
1317 struct link_config *lc = &p->link_config;
1319 if (!(lc->supported & SUPPORTED_Autoneg))
1320 return -EOPNOTSUPP; /* can't change speed/duplex */
1322 if (cmd->autoneg == AUTONEG_DISABLE) {
1323 int cap = speed_duplex_to_caps(cmd->speed, cmd->duplex);
1325 if (!(lc->supported & cap) || cmd->speed == SPEED_1000)
1327 lc->requested_speed = cmd->speed;
1328 lc->requested_duplex = cmd->duplex;
1329 lc->advertising = 0;
1331 cmd->advertising &= ADVERTISED_MASK;
1332 cmd->advertising &= lc->supported;
1333 if (!cmd->advertising)
1335 lc->requested_speed = SPEED_INVALID;
1336 lc->requested_duplex = DUPLEX_INVALID;
1337 lc->advertising = cmd->advertising | ADVERTISED_Autoneg;
1339 lc->autoneg = cmd->autoneg;
1340 if (netif_running(dev))
1341 t3_link_start(&p->phy, &p->mac, lc);
1345 static void get_pauseparam(struct net_device *dev,
1346 struct ethtool_pauseparam *epause)
1348 struct port_info *p = netdev_priv(dev);
1350 epause->autoneg = (p->link_config.requested_fc & PAUSE_AUTONEG) != 0;
1351 epause->rx_pause = (p->link_config.fc & PAUSE_RX) != 0;
1352 epause->tx_pause = (p->link_config.fc & PAUSE_TX) != 0;
1355 static int set_pauseparam(struct net_device *dev,
1356 struct ethtool_pauseparam *epause)
1358 struct port_info *p = netdev_priv(dev);
1359 struct link_config *lc = &p->link_config;
1361 if (epause->autoneg == AUTONEG_DISABLE)
1362 lc->requested_fc = 0;
1363 else if (lc->supported & SUPPORTED_Autoneg)
1364 lc->requested_fc = PAUSE_AUTONEG;
1368 if (epause->rx_pause)
1369 lc->requested_fc |= PAUSE_RX;
1370 if (epause->tx_pause)
1371 lc->requested_fc |= PAUSE_TX;
1372 if (lc->autoneg == AUTONEG_ENABLE) {
1373 if (netif_running(dev))
1374 t3_link_start(&p->phy, &p->mac, lc);
1376 lc->fc = lc->requested_fc & (PAUSE_RX | PAUSE_TX);
1377 if (netif_running(dev))
1378 t3_mac_set_speed_duplex_fc(&p->mac, -1, -1, lc->fc);
1383 static u32 get_rx_csum(struct net_device *dev)
1385 struct port_info *p = netdev_priv(dev);
1387 return p->rx_csum_offload;
1390 static int set_rx_csum(struct net_device *dev, u32 data)
1392 struct port_info *p = netdev_priv(dev);
1394 p->rx_csum_offload = data;
1398 static void get_sge_param(struct net_device *dev, struct ethtool_ringparam *e)
1400 const struct adapter *adapter = dev->priv;
1401 const struct port_info *pi = netdev_priv(dev);
1402 const struct qset_params *q = &adapter->params.sge.qset[pi->first_qset];
1404 e->rx_max_pending = MAX_RX_BUFFERS;
1405 e->rx_mini_max_pending = 0;
1406 e->rx_jumbo_max_pending = MAX_RX_JUMBO_BUFFERS;
1407 e->tx_max_pending = MAX_TXQ_ENTRIES;
1409 e->rx_pending = q->fl_size;
1410 e->rx_mini_pending = q->rspq_size;
1411 e->rx_jumbo_pending = q->jumbo_size;
1412 e->tx_pending = q->txq_size[0];
1415 static int set_sge_param(struct net_device *dev, struct ethtool_ringparam *e)
1418 struct qset_params *q;
1419 struct adapter *adapter = dev->priv;
1420 const struct port_info *pi = netdev_priv(dev);
1422 if (e->rx_pending > MAX_RX_BUFFERS ||
1423 e->rx_jumbo_pending > MAX_RX_JUMBO_BUFFERS ||
1424 e->tx_pending > MAX_TXQ_ENTRIES ||
1425 e->rx_mini_pending > MAX_RSPQ_ENTRIES ||
1426 e->rx_mini_pending < MIN_RSPQ_ENTRIES ||
1427 e->rx_pending < MIN_FL_ENTRIES ||
1428 e->rx_jumbo_pending < MIN_FL_ENTRIES ||
1429 e->tx_pending < adapter->params.nports * MIN_TXQ_ENTRIES)
1432 if (adapter->flags & FULL_INIT_DONE)
1435 q = &adapter->params.sge.qset[pi->first_qset];
1436 for (i = 0; i < pi->nqsets; ++i, ++q) {
1437 q->rspq_size = e->rx_mini_pending;
1438 q->fl_size = e->rx_pending;
1439 q->jumbo_size = e->rx_jumbo_pending;
1440 q->txq_size[0] = e->tx_pending;
1441 q->txq_size[1] = e->tx_pending;
1442 q->txq_size[2] = e->tx_pending;
1447 static int set_coalesce(struct net_device *dev, struct ethtool_coalesce *c)
1449 struct adapter *adapter = dev->priv;
1450 struct qset_params *qsp = &adapter->params.sge.qset[0];
1451 struct sge_qset *qs = &adapter->sge.qs[0];
1453 if (c->rx_coalesce_usecs * 10 > M_NEWTIMER)
1456 qsp->coalesce_usecs = c->rx_coalesce_usecs;
1457 t3_update_qset_coalesce(qs, qsp);
1461 static int get_coalesce(struct net_device *dev, struct ethtool_coalesce *c)
1463 struct adapter *adapter = dev->priv;
1464 struct qset_params *q = adapter->params.sge.qset;
1466 c->rx_coalesce_usecs = q->coalesce_usecs;
1470 static int get_eeprom(struct net_device *dev, struct ethtool_eeprom *e,
1474 struct adapter *adapter = dev->priv;
1476 u8 *buf = kmalloc(EEPROMSIZE, GFP_KERNEL);
1480 e->magic = EEPROM_MAGIC;
1481 for (i = e->offset & ~3; !err && i < e->offset + e->len; i += 4)
1482 err = t3_seeprom_read(adapter, i, (u32 *) & buf[i]);
1485 memcpy(data, buf + e->offset, e->len);
1490 static int set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom,
1495 u32 aligned_offset, aligned_len, *p;
1496 struct adapter *adapter = dev->priv;
1498 if (eeprom->magic != EEPROM_MAGIC)
1501 aligned_offset = eeprom->offset & ~3;
1502 aligned_len = (eeprom->len + (eeprom->offset & 3) + 3) & ~3;
1504 if (aligned_offset != eeprom->offset || aligned_len != eeprom->len) {
1505 buf = kmalloc(aligned_len, GFP_KERNEL);
1508 err = t3_seeprom_read(adapter, aligned_offset, (u32 *) buf);
1509 if (!err && aligned_len > 4)
1510 err = t3_seeprom_read(adapter,
1511 aligned_offset + aligned_len - 4,
1512 (u32 *) & buf[aligned_len - 4]);
1515 memcpy(buf + (eeprom->offset & 3), data, eeprom->len);
1519 err = t3_seeprom_wp(adapter, 0);
1523 for (p = (u32 *) buf; !err && aligned_len; aligned_len -= 4, p++) {
1524 err = t3_seeprom_write(adapter, aligned_offset, *p);
1525 aligned_offset += 4;
1529 err = t3_seeprom_wp(adapter, 1);
1536 static void get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
1540 memset(&wol->sopass, 0, sizeof(wol->sopass));
1543 static const struct ethtool_ops cxgb_ethtool_ops = {
1544 .get_settings = get_settings,
1545 .set_settings = set_settings,
1546 .get_drvinfo = get_drvinfo,
1547 .get_msglevel = get_msglevel,
1548 .set_msglevel = set_msglevel,
1549 .get_ringparam = get_sge_param,
1550 .set_ringparam = set_sge_param,
1551 .get_coalesce = get_coalesce,
1552 .set_coalesce = set_coalesce,
1553 .get_eeprom_len = get_eeprom_len,
1554 .get_eeprom = get_eeprom,
1555 .set_eeprom = set_eeprom,
1556 .get_pauseparam = get_pauseparam,
1557 .set_pauseparam = set_pauseparam,
1558 .get_rx_csum = get_rx_csum,
1559 .set_rx_csum = set_rx_csum,
1560 .get_tx_csum = ethtool_op_get_tx_csum,
1561 .set_tx_csum = ethtool_op_set_tx_csum,
1562 .get_sg = ethtool_op_get_sg,
1563 .set_sg = ethtool_op_set_sg,
1564 .get_link = ethtool_op_get_link,
1565 .get_strings = get_strings,
1566 .phys_id = cxgb3_phys_id,
1567 .nway_reset = restart_autoneg,
1568 .get_stats_count = get_stats_count,
1569 .get_ethtool_stats = get_stats,
1570 .get_regs_len = get_regs_len,
1571 .get_regs = get_regs,
1573 .get_tso = ethtool_op_get_tso,
1574 .set_tso = ethtool_op_set_tso,
1575 .get_perm_addr = ethtool_op_get_perm_addr
1578 static int in_range(int val, int lo, int hi)
1580 return val < 0 || (val <= hi && val >= lo);
1583 static int cxgb_extension_ioctl(struct net_device *dev, void __user *useraddr)
1587 struct adapter *adapter = dev->priv;
1589 if (copy_from_user(&cmd, useraddr, sizeof(cmd)))
1593 case CHELSIO_SET_QSET_PARAMS:{
1595 struct qset_params *q;
1596 struct ch_qset_params t;
1598 if (!capable(CAP_NET_ADMIN))
1600 if (copy_from_user(&t, useraddr, sizeof(t)))
1602 if (t.qset_idx >= SGE_QSETS)
1604 if (!in_range(t.intr_lat, 0, M_NEWTIMER) ||
1605 !in_range(t.cong_thres, 0, 255) ||
1606 !in_range(t.txq_size[0], MIN_TXQ_ENTRIES,
1608 !in_range(t.txq_size[1], MIN_TXQ_ENTRIES,
1610 !in_range(t.txq_size[2], MIN_CTRL_TXQ_ENTRIES,
1611 MAX_CTRL_TXQ_ENTRIES) ||
1612 !in_range(t.fl_size[0], MIN_FL_ENTRIES,
1614 || !in_range(t.fl_size[1], MIN_FL_ENTRIES,
1615 MAX_RX_JUMBO_BUFFERS)
1616 || !in_range(t.rspq_size, MIN_RSPQ_ENTRIES,
1619 if ((adapter->flags & FULL_INIT_DONE) &&
1620 (t.rspq_size >= 0 || t.fl_size[0] >= 0 ||
1621 t.fl_size[1] >= 0 || t.txq_size[0] >= 0 ||
1622 t.txq_size[1] >= 0 || t.txq_size[2] >= 0 ||
1623 t.polling >= 0 || t.cong_thres >= 0))
1626 q = &adapter->params.sge.qset[t.qset_idx];
1628 if (t.rspq_size >= 0)
1629 q->rspq_size = t.rspq_size;
1630 if (t.fl_size[0] >= 0)
1631 q->fl_size = t.fl_size[0];
1632 if (t.fl_size[1] >= 0)
1633 q->jumbo_size = t.fl_size[1];
1634 if (t.txq_size[0] >= 0)
1635 q->txq_size[0] = t.txq_size[0];
1636 if (t.txq_size[1] >= 0)
1637 q->txq_size[1] = t.txq_size[1];
1638 if (t.txq_size[2] >= 0)
1639 q->txq_size[2] = t.txq_size[2];
1640 if (t.cong_thres >= 0)
1641 q->cong_thres = t.cong_thres;
1642 if (t.intr_lat >= 0) {
1643 struct sge_qset *qs =
1644 &adapter->sge.qs[t.qset_idx];
1646 q->coalesce_usecs = t.intr_lat;
1647 t3_update_qset_coalesce(qs, q);
1649 if (t.polling >= 0) {
1650 if (adapter->flags & USING_MSIX)
1651 q->polling = t.polling;
1653 /* No polling with INTx for T3A */
1654 if (adapter->params.rev == 0 &&
1655 !(adapter->flags & USING_MSI))
1658 for (i = 0; i < SGE_QSETS; i++) {
1659 q = &adapter->params.sge.
1661 q->polling = t.polling;
1667 case CHELSIO_GET_QSET_PARAMS:{
1668 struct qset_params *q;
1669 struct ch_qset_params t;
1671 if (copy_from_user(&t, useraddr, sizeof(t)))
1673 if (t.qset_idx >= SGE_QSETS)
1676 q = &adapter->params.sge.qset[t.qset_idx];
1677 t.rspq_size = q->rspq_size;
1678 t.txq_size[0] = q->txq_size[0];
1679 t.txq_size[1] = q->txq_size[1];
1680 t.txq_size[2] = q->txq_size[2];
1681 t.fl_size[0] = q->fl_size;
1682 t.fl_size[1] = q->jumbo_size;
1683 t.polling = q->polling;
1684 t.intr_lat = q->coalesce_usecs;
1685 t.cong_thres = q->cong_thres;
1687 if (copy_to_user(useraddr, &t, sizeof(t)))
1691 case CHELSIO_SET_QSET_NUM:{
1692 struct ch_reg edata;
1693 struct port_info *pi = netdev_priv(dev);
1694 unsigned int i, first_qset = 0, other_qsets = 0;
1696 if (!capable(CAP_NET_ADMIN))
1698 if (adapter->flags & FULL_INIT_DONE)
1700 if (copy_from_user(&edata, useraddr, sizeof(edata)))
1702 if (edata.val < 1 ||
1703 (edata.val > 1 && !(adapter->flags & USING_MSIX)))
1706 for_each_port(adapter, i)
1707 if (adapter->port[i] && adapter->port[i] != dev)
1708 other_qsets += adap2pinfo(adapter, i)->nqsets;
1710 if (edata.val + other_qsets > SGE_QSETS)
1713 pi->nqsets = edata.val;
1715 for_each_port(adapter, i)
1716 if (adapter->port[i]) {
1717 pi = adap2pinfo(adapter, i);
1718 pi->first_qset = first_qset;
1719 first_qset += pi->nqsets;
1723 case CHELSIO_GET_QSET_NUM:{
1724 struct ch_reg edata;
1725 struct port_info *pi = netdev_priv(dev);
1727 edata.cmd = CHELSIO_GET_QSET_NUM;
1728 edata.val = pi->nqsets;
1729 if (copy_to_user(useraddr, &edata, sizeof(edata)))
1733 case CHELSIO_LOAD_FW:{
1735 struct ch_mem_range t;
1737 if (!capable(CAP_NET_ADMIN))
1739 if (copy_from_user(&t, useraddr, sizeof(t)))
1742 fw_data = kmalloc(t.len, GFP_KERNEL);
1747 (fw_data, useraddr + sizeof(t), t.len)) {
1752 ret = t3_load_fw(adapter, fw_data, t.len);
1758 case CHELSIO_SETMTUTAB:{
1762 if (!is_offload(adapter))
1764 if (!capable(CAP_NET_ADMIN))
1766 if (offload_running(adapter))
1768 if (copy_from_user(&m, useraddr, sizeof(m)))
1770 if (m.nmtus != NMTUS)
1772 if (m.mtus[0] < 81) /* accommodate SACK */
1775 /* MTUs must be in ascending order */
1776 for (i = 1; i < NMTUS; ++i)
1777 if (m.mtus[i] < m.mtus[i - 1])
1780 memcpy(adapter->params.mtus, m.mtus,
1781 sizeof(adapter->params.mtus));
1784 case CHELSIO_GET_PM:{
1785 struct tp_params *p = &adapter->params.tp;
1786 struct ch_pm m = {.cmd = CHELSIO_GET_PM };
1788 if (!is_offload(adapter))
1790 m.tx_pg_sz = p->tx_pg_size;
1791 m.tx_num_pg = p->tx_num_pgs;
1792 m.rx_pg_sz = p->rx_pg_size;
1793 m.rx_num_pg = p->rx_num_pgs;
1794 m.pm_total = p->pmtx_size + p->chan_rx_size * p->nchan;
1795 if (copy_to_user(useraddr, &m, sizeof(m)))
1799 case CHELSIO_SET_PM:{
1801 struct tp_params *p = &adapter->params.tp;
1803 if (!is_offload(adapter))
1805 if (!capable(CAP_NET_ADMIN))
1807 if (adapter->flags & FULL_INIT_DONE)
1809 if (copy_from_user(&m, useraddr, sizeof(m)))
1811 if (!m.rx_pg_sz || (m.rx_pg_sz & (m.rx_pg_sz - 1)) ||
1812 !m.tx_pg_sz || (m.tx_pg_sz & (m.tx_pg_sz - 1)))
1813 return -EINVAL; /* not power of 2 */
1814 if (!(m.rx_pg_sz & 0x14000))
1815 return -EINVAL; /* not 16KB or 64KB */
1816 if (!(m.tx_pg_sz & 0x1554000))
1818 if (m.tx_num_pg == -1)
1819 m.tx_num_pg = p->tx_num_pgs;
1820 if (m.rx_num_pg == -1)
1821 m.rx_num_pg = p->rx_num_pgs;
1822 if (m.tx_num_pg % 24 || m.rx_num_pg % 24)
1824 if (m.rx_num_pg * m.rx_pg_sz > p->chan_rx_size ||
1825 m.tx_num_pg * m.tx_pg_sz > p->chan_tx_size)
1827 p->rx_pg_size = m.rx_pg_sz;
1828 p->tx_pg_size = m.tx_pg_sz;
1829 p->rx_num_pgs = m.rx_num_pg;
1830 p->tx_num_pgs = m.tx_num_pg;
1833 case CHELSIO_GET_MEM:{
1834 struct ch_mem_range t;
1838 if (!is_offload(adapter))
1840 if (!(adapter->flags & FULL_INIT_DONE))
1841 return -EIO; /* need the memory controllers */
1842 if (copy_from_user(&t, useraddr, sizeof(t)))
1844 if ((t.addr & 7) || (t.len & 7))
1846 if (t.mem_id == MEM_CM)
1848 else if (t.mem_id == MEM_PMRX)
1849 mem = &adapter->pmrx;
1850 else if (t.mem_id == MEM_PMTX)
1851 mem = &adapter->pmtx;
1857 * bits 0..9: chip version
1858 * bits 10..15: chip revision
1860 t.version = 3 | (adapter->params.rev << 10);
1861 if (copy_to_user(useraddr, &t, sizeof(t)))
1865 * Read 256 bytes at a time as len can be large and we don't
1866 * want to use huge intermediate buffers.
1868 useraddr += sizeof(t); /* advance to start of buffer */
1870 unsigned int chunk =
1871 min_t(unsigned int, t.len, sizeof(buf));
1874 t3_mc7_bd_read(mem, t.addr / 8, chunk / 8,
1878 if (copy_to_user(useraddr, buf, chunk))
1886 case CHELSIO_SET_TRACE_FILTER:{
1888 const struct trace_params *tp;
1890 if (!capable(CAP_NET_ADMIN))
1892 if (!offload_running(adapter))
1894 if (copy_from_user(&t, useraddr, sizeof(t)))
1897 tp = (const struct trace_params *)&t.sip;
1899 t3_config_trace_filter(adapter, tp, 0,
1903 t3_config_trace_filter(adapter, tp, 1,
1914 static int cxgb_ioctl(struct net_device *dev, struct ifreq *req, int cmd)
1917 struct adapter *adapter = dev->priv;
1918 struct port_info *pi = netdev_priv(dev);
1919 struct mii_ioctl_data *data = if_mii(req);
1923 data->phy_id = pi->phy.addr;
1927 struct cphy *phy = &pi->phy;
1929 if (!phy->mdio_read)
1931 if (is_10G(adapter)) {
1932 mmd = data->phy_id >> 8;
1935 else if (mmd > MDIO_DEV_XGXS)
1939 phy->mdio_read(adapter, data->phy_id & 0x1f,
1940 mmd, data->reg_num, &val);
1943 phy->mdio_read(adapter, data->phy_id & 0x1f,
1944 0, data->reg_num & 0x1f,
1947 data->val_out = val;
1951 struct cphy *phy = &pi->phy;
1953 if (!capable(CAP_NET_ADMIN))
1955 if (!phy->mdio_write)
1957 if (is_10G(adapter)) {
1958 mmd = data->phy_id >> 8;
1961 else if (mmd > MDIO_DEV_XGXS)
1965 phy->mdio_write(adapter,
1966 data->phy_id & 0x1f, mmd,
1971 phy->mdio_write(adapter,
1972 data->phy_id & 0x1f, 0,
1973 data->reg_num & 0x1f,
1978 return cxgb_extension_ioctl(dev, req->ifr_data);
1985 static int cxgb_change_mtu(struct net_device *dev, int new_mtu)
1988 struct adapter *adapter = dev->priv;
1989 struct port_info *pi = netdev_priv(dev);
1991 if (new_mtu < 81) /* accommodate SACK */
1993 if ((ret = t3_mac_set_mtu(&pi->mac, new_mtu)))
1996 init_port_mtus(adapter);
1997 if (adapter->params.rev == 0 && offload_running(adapter))
1998 t3_load_mtus(adapter, adapter->params.mtus,
1999 adapter->params.a_wnd, adapter->params.b_wnd,
2000 adapter->port[0]->mtu);
2004 static int cxgb_set_mac_addr(struct net_device *dev, void *p)
2006 struct adapter *adapter = dev->priv;
2007 struct port_info *pi = netdev_priv(dev);
2008 struct sockaddr *addr = p;
2010 if (!is_valid_ether_addr(addr->sa_data))
2013 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
2014 t3_mac_set_address(&pi->mac, 0, dev->dev_addr);
2015 if (offload_running(adapter))
2016 write_smt_entry(adapter, pi->port_id);
2021 * t3_synchronize_rx - wait for current Rx processing on a port to complete
2022 * @adap: the adapter
2025 * Ensures that current Rx processing on any of the queues associated with
2026 * the given port completes before returning. We do this by acquiring and
2027 * releasing the locks of the response queues associated with the port.
2029 static void t3_synchronize_rx(struct adapter *adap, const struct port_info *p)
2033 for (i = 0; i < p->nqsets; i++) {
2034 struct sge_rspq *q = &adap->sge.qs[i + p->first_qset].rspq;
2036 spin_lock_irq(&q->lock);
2037 spin_unlock_irq(&q->lock);
2041 static void vlan_rx_register(struct net_device *dev, struct vlan_group *grp)
2043 struct adapter *adapter = dev->priv;
2044 struct port_info *pi = netdev_priv(dev);
2047 if (adapter->params.rev > 0)
2048 t3_set_vlan_accel(adapter, 1 << pi->port_id, grp != NULL);
2050 /* single control for all ports */
2051 unsigned int i, have_vlans = 0;
2052 for_each_port(adapter, i)
2053 have_vlans |= adap2pinfo(adapter, i)->vlan_grp != NULL;
2055 t3_set_vlan_accel(adapter, 1, have_vlans);
2057 t3_synchronize_rx(adapter, pi);
2060 static void vlan_rx_kill_vid(struct net_device *dev, unsigned short vid)
2065 #ifdef CONFIG_NET_POLL_CONTROLLER
2066 static void cxgb_netpoll(struct net_device *dev)
2068 struct adapter *adapter = dev->priv;
2069 struct sge_qset *qs = dev2qset(dev);
2071 t3_intr_handler(adapter, qs->rspq.polling) (adapter->pdev->irq,
2077 * Periodic accumulation of MAC statistics.
2079 static void mac_stats_update(struct adapter *adapter)
2083 for_each_port(adapter, i) {
2084 struct net_device *dev = adapter->port[i];
2085 struct port_info *p = netdev_priv(dev);
2087 if (netif_running(dev)) {
2088 spin_lock(&adapter->stats_lock);
2089 t3_mac_update_stats(&p->mac);
2090 spin_unlock(&adapter->stats_lock);
2095 static void check_link_status(struct adapter *adapter)
2099 for_each_port(adapter, i) {
2100 struct net_device *dev = adapter->port[i];
2101 struct port_info *p = netdev_priv(dev);
2103 if (!(p->port_type->caps & SUPPORTED_IRQ) && netif_running(dev))
2104 t3_link_changed(adapter, i);
2108 static void check_t3b2_mac(struct adapter *adapter)
2112 rtnl_lock(); /* synchronize with ifdown */
2113 for_each_port(adapter, i) {
2114 struct net_device *dev = adapter->port[i];
2115 struct port_info *p = netdev_priv(dev);
2118 if (!netif_running(dev))
2122 if (netif_running(dev))
2123 status = t3b2_mac_watchdog_task(&p->mac);
2125 p->mac.stats.num_toggled++;
2126 else if (status == 2) {
2127 struct cmac *mac = &p->mac;
2129 t3_mac_set_mtu(mac, dev->mtu);
2130 t3_mac_set_address(mac, 0, dev->dev_addr);
2131 cxgb_set_rxmode(dev);
2132 t3_link_start(&p->phy, mac, &p->link_config);
2133 t3_mac_enable(mac, MAC_DIRECTION_RX | MAC_DIRECTION_TX);
2134 t3_port_intr_enable(adapter, p->port_id);
2135 p->mac.stats.num_resets++;
2142 static void t3_adap_check_task(struct work_struct *work)
2144 struct adapter *adapter = container_of(work, struct adapter,
2145 adap_check_task.work);
2146 const struct adapter_params *p = &adapter->params;
2148 adapter->check_task_cnt++;
2150 /* Check link status for PHYs without interrupts */
2151 if (p->linkpoll_period)
2152 check_link_status(adapter);
2154 /* Accumulate MAC stats if needed */
2155 if (!p->linkpoll_period ||
2156 (adapter->check_task_cnt * p->linkpoll_period) / 10 >=
2157 p->stats_update_period) {
2158 mac_stats_update(adapter);
2159 adapter->check_task_cnt = 0;
2162 if (p->rev == T3_REV_B2)
2163 check_t3b2_mac(adapter);
2165 /* Schedule the next check update if any port is active. */
2166 spin_lock(&adapter->work_lock);
2167 if (adapter->open_device_map & PORT_MASK)
2168 schedule_chk_task(adapter);
2169 spin_unlock(&adapter->work_lock);
2173 * Processes external (PHY) interrupts in process context.
2175 static void ext_intr_task(struct work_struct *work)
2177 struct adapter *adapter = container_of(work, struct adapter,
2178 ext_intr_handler_task);
2180 t3_phy_intr_handler(adapter);
2182 /* Now reenable external interrupts */
2183 spin_lock_irq(&adapter->work_lock);
2184 if (adapter->slow_intr_mask) {
2185 adapter->slow_intr_mask |= F_T3DBG;
2186 t3_write_reg(adapter, A_PL_INT_CAUSE0, F_T3DBG);
2187 t3_write_reg(adapter, A_PL_INT_ENABLE0,
2188 adapter->slow_intr_mask);
2190 spin_unlock_irq(&adapter->work_lock);
2194 * Interrupt-context handler for external (PHY) interrupts.
2196 void t3_os_ext_intr_handler(struct adapter *adapter)
2199 * Schedule a task to handle external interrupts as they may be slow
2200 * and we use a mutex to protect MDIO registers. We disable PHY
2201 * interrupts in the meantime and let the task reenable them when
2204 spin_lock(&adapter->work_lock);
2205 if (adapter->slow_intr_mask) {
2206 adapter->slow_intr_mask &= ~F_T3DBG;
2207 t3_write_reg(adapter, A_PL_INT_ENABLE0,
2208 adapter->slow_intr_mask);
2209 queue_work(cxgb3_wq, &adapter->ext_intr_handler_task);
2211 spin_unlock(&adapter->work_lock);
2214 void t3_fatal_err(struct adapter *adapter)
2216 unsigned int fw_status[4];
2218 if (adapter->flags & FULL_INIT_DONE) {
2219 t3_sge_stop(adapter);
2220 t3_intr_disable(adapter);
2222 CH_ALERT(adapter, "encountered fatal error, operation suspended\n");
2223 if (!t3_cim_ctl_blk_read(adapter, 0xa0, 4, fw_status))
2224 CH_ALERT(adapter, "FW status: 0x%x, 0x%x, 0x%x, 0x%x\n",
2225 fw_status[0], fw_status[1],
2226 fw_status[2], fw_status[3]);
2230 static int __devinit cxgb_enable_msix(struct adapter *adap)
2232 struct msix_entry entries[SGE_QSETS + 1];
2235 for (i = 0; i < ARRAY_SIZE(entries); ++i)
2236 entries[i].entry = i;
2238 err = pci_enable_msix(adap->pdev, entries, ARRAY_SIZE(entries));
2240 for (i = 0; i < ARRAY_SIZE(entries); ++i)
2241 adap->msix_info[i].vec = entries[i].vector;
2243 dev_info(&adap->pdev->dev,
2244 "only %d MSI-X vectors left, not using MSI-X\n", err);
2248 static void __devinit print_port_info(struct adapter *adap,
2249 const struct adapter_info *ai)
2251 static const char *pci_variant[] = {
2252 "PCI", "PCI-X", "PCI-X ECC", "PCI-X 266", "PCI Express"
2259 snprintf(buf, sizeof(buf), "%s x%d",
2260 pci_variant[adap->params.pci.variant],
2261 adap->params.pci.width);
2263 snprintf(buf, sizeof(buf), "%s %dMHz/%d-bit",
2264 pci_variant[adap->params.pci.variant],
2265 adap->params.pci.speed, adap->params.pci.width);
2267 for_each_port(adap, i) {
2268 struct net_device *dev = adap->port[i];
2269 const struct port_info *pi = netdev_priv(dev);
2271 if (!test_bit(i, &adap->registered_device_map))
2273 printk(KERN_INFO "%s: %s %s RNIC (rev %d) %s%s\n",
2274 dev->name, ai->desc, pi->port_type->desc,
2275 adap->params.rev, buf,
2276 (adap->flags & USING_MSIX) ? " MSI-X" :
2277 (adap->flags & USING_MSI) ? " MSI" : "");
2278 if (adap->name == dev->name && adap->params.vpd.mclk)
2279 printk(KERN_INFO "%s: %uMB CM, %uMB PMTX, %uMB PMRX\n",
2280 adap->name, t3_mc7_size(&adap->cm) >> 20,
2281 t3_mc7_size(&adap->pmtx) >> 20,
2282 t3_mc7_size(&adap->pmrx) >> 20);
2286 static int __devinit init_one(struct pci_dev *pdev,
2287 const struct pci_device_id *ent)
2289 static int version_printed;
2291 int i, err, pci_using_dac = 0;
2292 unsigned long mmio_start, mmio_len;
2293 const struct adapter_info *ai;
2294 struct adapter *adapter = NULL;
2295 struct port_info *pi;
2297 if (!version_printed) {
2298 printk(KERN_INFO "%s - version %s\n", DRV_DESC, DRV_VERSION);
2303 cxgb3_wq = create_singlethread_workqueue(DRV_NAME);
2305 printk(KERN_ERR DRV_NAME
2306 ": cannot initialize work queue\n");
2311 err = pci_request_regions(pdev, DRV_NAME);
2313 /* Just info, some other driver may have claimed the device. */
2314 dev_info(&pdev->dev, "cannot obtain PCI resources\n");
2318 err = pci_enable_device(pdev);
2320 dev_err(&pdev->dev, "cannot enable PCI device\n");
2321 goto out_release_regions;
2324 if (!pci_set_dma_mask(pdev, DMA_64BIT_MASK)) {
2326 err = pci_set_consistent_dma_mask(pdev, DMA_64BIT_MASK);
2328 dev_err(&pdev->dev, "unable to obtain 64-bit DMA for "
2329 "coherent allocations\n");
2330 goto out_disable_device;
2332 } else if ((err = pci_set_dma_mask(pdev, DMA_32BIT_MASK)) != 0) {
2333 dev_err(&pdev->dev, "no usable DMA configuration\n");
2334 goto out_disable_device;
2337 pci_set_master(pdev);
2339 mmio_start = pci_resource_start(pdev, 0);
2340 mmio_len = pci_resource_len(pdev, 0);
2341 ai = t3_get_adapter_info(ent->driver_data);
2343 adapter = kzalloc(sizeof(*adapter), GFP_KERNEL);
2346 goto out_disable_device;
2349 adapter->regs = ioremap_nocache(mmio_start, mmio_len);
2350 if (!adapter->regs) {
2351 dev_err(&pdev->dev, "cannot map device registers\n");
2353 goto out_free_adapter;
2356 adapter->pdev = pdev;
2357 adapter->name = pci_name(pdev);
2358 adapter->msg_enable = dflt_msg_enable;
2359 adapter->mmio_len = mmio_len;
2361 mutex_init(&adapter->mdio_lock);
2362 spin_lock_init(&adapter->work_lock);
2363 spin_lock_init(&adapter->stats_lock);
2365 INIT_LIST_HEAD(&adapter->adapter_list);
2366 INIT_WORK(&adapter->ext_intr_handler_task, ext_intr_task);
2367 INIT_DELAYED_WORK(&adapter->adap_check_task, t3_adap_check_task);
2369 for (i = 0; i < ai->nports; ++i) {
2370 struct net_device *netdev;
2372 netdev = alloc_etherdev(sizeof(struct port_info));
2378 SET_MODULE_OWNER(netdev);
2379 SET_NETDEV_DEV(netdev, &pdev->dev);
2381 adapter->port[i] = netdev;
2382 pi = netdev_priv(netdev);
2383 pi->rx_csum_offload = 1;
2388 netif_carrier_off(netdev);
2389 netdev->irq = pdev->irq;
2390 netdev->mem_start = mmio_start;
2391 netdev->mem_end = mmio_start + mmio_len - 1;
2392 netdev->priv = adapter;
2393 netdev->features |= NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_TSO;
2394 netdev->features |= NETIF_F_LLTX;
2396 netdev->features |= NETIF_F_HIGHDMA;
2398 netdev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
2399 netdev->vlan_rx_register = vlan_rx_register;
2400 netdev->vlan_rx_kill_vid = vlan_rx_kill_vid;
2402 netdev->open = cxgb_open;
2403 netdev->stop = cxgb_close;
2404 netdev->hard_start_xmit = t3_eth_xmit;
2405 netdev->get_stats = cxgb_get_stats;
2406 netdev->set_multicast_list = cxgb_set_rxmode;
2407 netdev->do_ioctl = cxgb_ioctl;
2408 netdev->change_mtu = cxgb_change_mtu;
2409 netdev->set_mac_address = cxgb_set_mac_addr;
2410 #ifdef CONFIG_NET_POLL_CONTROLLER
2411 netdev->poll_controller = cxgb_netpoll;
2413 netdev->weight = 64;
2415 SET_ETHTOOL_OPS(netdev, &cxgb_ethtool_ops);
2418 pci_set_drvdata(pdev, adapter->port[0]);
2419 if (t3_prep_adapter(adapter, ai, 1) < 0) {
2425 * The card is now ready to go. If any errors occur during device
2426 * registration we do not fail the whole card but rather proceed only
2427 * with the ports we manage to register successfully. However we must
2428 * register at least one net device.
2430 for_each_port(adapter, i) {
2431 err = register_netdev(adapter->port[i]);
2433 dev_warn(&pdev->dev,
2434 "cannot register net device %s, skipping\n",
2435 adapter->port[i]->name);
2438 * Change the name we use for messages to the name of
2439 * the first successfully registered interface.
2441 if (!adapter->registered_device_map)
2442 adapter->name = adapter->port[i]->name;
2444 __set_bit(i, &adapter->registered_device_map);
2447 if (!adapter->registered_device_map) {
2448 dev_err(&pdev->dev, "could not register any net devices\n");
2452 /* Driver's ready. Reflect it on LEDs */
2453 t3_led_ready(adapter);
2455 if (is_offload(adapter)) {
2456 __set_bit(OFFLOAD_DEVMAP_BIT, &adapter->registered_device_map);
2457 cxgb3_adapter_ofld(adapter);
2460 /* See what interrupts we'll be using */
2461 if (msi > 1 && cxgb_enable_msix(adapter) == 0)
2462 adapter->flags |= USING_MSIX;
2463 else if (msi > 0 && pci_enable_msi(pdev) == 0)
2464 adapter->flags |= USING_MSI;
2466 err = sysfs_create_group(&adapter->port[0]->dev.kobj,
2469 print_port_info(adapter, ai);
2473 iounmap(adapter->regs);
2474 for (i = ai->nports - 1; i >= 0; --i)
2475 if (adapter->port[i])
2476 free_netdev(adapter->port[i]);
2482 pci_disable_device(pdev);
2483 out_release_regions:
2484 pci_release_regions(pdev);
2485 pci_set_drvdata(pdev, NULL);
2489 static void __devexit remove_one(struct pci_dev *pdev)
2491 struct net_device *dev = pci_get_drvdata(pdev);
2495 struct adapter *adapter = dev->priv;
2497 t3_sge_stop(adapter);
2498 sysfs_remove_group(&adapter->port[0]->dev.kobj,
2501 for_each_port(adapter, i)
2502 if (test_bit(i, &adapter->registered_device_map))
2503 unregister_netdev(adapter->port[i]);
2505 if (is_offload(adapter)) {
2506 cxgb3_adapter_unofld(adapter);
2507 if (test_bit(OFFLOAD_DEVMAP_BIT,
2508 &adapter->open_device_map))
2509 offload_close(&adapter->tdev);
2512 t3_free_sge_resources(adapter);
2513 cxgb_disable_msi(adapter);
2515 for (i = 0; i < ARRAY_SIZE(adapter->dummy_netdev); i++)
2516 if (adapter->dummy_netdev[i]) {
2517 free_netdev(adapter->dummy_netdev[i]);
2518 adapter->dummy_netdev[i] = NULL;
2521 for_each_port(adapter, i)
2522 if (adapter->port[i])
2523 free_netdev(adapter->port[i]);
2525 iounmap(adapter->regs);
2527 pci_release_regions(pdev);
2528 pci_disable_device(pdev);
2529 pci_set_drvdata(pdev, NULL);
2533 static struct pci_driver driver = {
2535 .id_table = cxgb3_pci_tbl,
2537 .remove = __devexit_p(remove_one),
2540 static int __init cxgb3_init_module(void)
2544 cxgb3_offload_init();
2546 ret = pci_register_driver(&driver);
2550 static void __exit cxgb3_cleanup_module(void)
2552 pci_unregister_driver(&driver);
2554 destroy_workqueue(cxgb3_wq);
2557 module_init(cxgb3_init_module);
2558 module_exit(cxgb3_cleanup_module);