2 * Copyright (c) 2003-2007 Chelsio, Inc. All rights reserved.
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
32 #include <linux/module.h>
33 #include <linux/moduleparam.h>
34 #include <linux/init.h>
35 #include <linux/pci.h>
36 #include <linux/dma-mapping.h>
37 #include <linux/netdevice.h>
38 #include <linux/etherdevice.h>
39 #include <linux/if_vlan.h>
40 #include <linux/mii.h>
41 #include <linux/sockios.h>
42 #include <linux/workqueue.h>
43 #include <linux/proc_fs.h>
44 #include <linux/rtnetlink.h>
45 #include <asm/uaccess.h>
48 #include "cxgb3_ioctl.h"
50 #include "cxgb3_offload.h"
53 #include "cxgb3_ctl_defs.h"
55 #include "firmware_exports.h"
58 MAX_TXQ_ENTRIES = 16384,
59 MAX_CTRL_TXQ_ENTRIES = 1024,
60 MAX_RSPQ_ENTRIES = 16384,
61 MAX_RX_BUFFERS = 16384,
62 MAX_RX_JUMBO_BUFFERS = 16384,
64 MIN_CTRL_TXQ_ENTRIES = 4,
65 MIN_RSPQ_ENTRIES = 32,
69 #define PORT_MASK ((1 << MAX_NPORTS) - 1)
71 #define DFLT_MSG_ENABLE (NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK | \
72 NETIF_MSG_TIMER | NETIF_MSG_IFDOWN | NETIF_MSG_IFUP |\
73 NETIF_MSG_RX_ERR | NETIF_MSG_TX_ERR)
75 #define EEPROM_MAGIC 0x38E2F10C
77 #define to_net_dev(class) container_of(class, struct net_device, class_dev)
79 #define CH_DEVICE(devid, ssid, idx) \
80 { PCI_VENDOR_ID_CHELSIO, devid, PCI_ANY_ID, ssid, 0, 0, idx }
82 static const struct pci_device_id cxgb3_pci_tbl[] = {
83 CH_DEVICE(0x20, 1, 0), /* PE9000 */
84 CH_DEVICE(0x21, 1, 1), /* T302E */
85 CH_DEVICE(0x22, 1, 2), /* T310E */
86 CH_DEVICE(0x23, 1, 3), /* T320X */
87 CH_DEVICE(0x24, 1, 1), /* T302X */
88 CH_DEVICE(0x25, 1, 3), /* T320E */
89 CH_DEVICE(0x26, 1, 2), /* T310X */
90 CH_DEVICE(0x30, 1, 2), /* T3B10 */
91 CH_DEVICE(0x31, 1, 3), /* T3B20 */
92 CH_DEVICE(0x32, 1, 1), /* T3B02 */
96 MODULE_DESCRIPTION(DRV_DESC);
97 MODULE_AUTHOR("Chelsio Communications");
98 MODULE_LICENSE("Dual BSD/GPL");
99 MODULE_VERSION(DRV_VERSION);
100 MODULE_DEVICE_TABLE(pci, cxgb3_pci_tbl);
102 static int dflt_msg_enable = DFLT_MSG_ENABLE;
104 module_param(dflt_msg_enable, int, 0644);
105 MODULE_PARM_DESC(dflt_msg_enable, "Chelsio T3 default message enable bitmap");
108 * The driver uses the best interrupt scheme available on a platform in the
109 * order MSI-X, MSI, legacy pin interrupts. This parameter determines which
110 * of these schemes the driver may consider as follows:
112 * msi = 2: choose from among all three options
113 * msi = 1: only consider MSI and pin interrupts
114 * msi = 0: force pin interrupts
118 module_param(msi, int, 0644);
119 MODULE_PARM_DESC(msi, "whether to use MSI or MSI-X");
122 * The driver enables offload as a default.
123 * To disable it, use ofld_disable = 1.
126 static int ofld_disable = 0;
128 module_param(ofld_disable, int, 0644);
129 MODULE_PARM_DESC(ofld_disable, "whether to enable offload at init time or not");
132 * We have work elements that we need to cancel when an interface is taken
133 * down. Normally the work elements would be executed by keventd but that
134 * can deadlock because of linkwatch. If our close method takes the rtnl
135 * lock and linkwatch is ahead of our work elements in keventd, linkwatch
136 * will block keventd as it needs the rtnl lock, and we'll deadlock waiting
137 * for our work to complete. Get our own work queue to solve this.
139 static struct workqueue_struct *cxgb3_wq;
142 * link_report - show link status and link speed/duplex
143 * @p: the port whose settings are to be reported
145 * Shows the link status, speed, and duplex of a port.
147 static void link_report(struct net_device *dev)
149 if (!netif_carrier_ok(dev))
150 printk(KERN_INFO "%s: link down\n", dev->name);
152 const char *s = "10Mbps";
153 const struct port_info *p = netdev_priv(dev);
155 switch (p->link_config.speed) {
167 printk(KERN_INFO "%s: link up, %s, %s-duplex\n", dev->name, s,
168 p->link_config.duplex == DUPLEX_FULL ? "full" : "half");
173 * t3_os_link_changed - handle link status changes
174 * @adapter: the adapter associated with the link change
175 * @port_id: the port index whose limk status has changed
176 * @link_stat: the new status of the link
177 * @speed: the new speed setting
178 * @duplex: the new duplex setting
179 * @pause: the new flow-control setting
181 * This is the OS-dependent handler for link status changes. The OS
182 * neutral handler takes care of most of the processing for these events,
183 * then calls this handler for any OS-specific processing.
185 void t3_os_link_changed(struct adapter *adapter, int port_id, int link_stat,
186 int speed, int duplex, int pause)
188 struct net_device *dev = adapter->port[port_id];
190 /* Skip changes from disabled ports. */
191 if (!netif_running(dev))
194 if (link_stat != netif_carrier_ok(dev)) {
196 netif_carrier_on(dev);
198 netif_carrier_off(dev);
203 static void cxgb_set_rxmode(struct net_device *dev)
205 struct t3_rx_mode rm;
206 struct port_info *pi = netdev_priv(dev);
208 init_rx_mode(&rm, dev, dev->mc_list);
209 t3_mac_set_rx_mode(&pi->mac, &rm);
213 * link_start - enable a port
214 * @dev: the device to enable
216 * Performs the MAC and PHY actions needed to enable a port.
218 static void link_start(struct net_device *dev)
220 struct t3_rx_mode rm;
221 struct port_info *pi = netdev_priv(dev);
222 struct cmac *mac = &pi->mac;
224 init_rx_mode(&rm, dev, dev->mc_list);
226 t3_mac_set_mtu(mac, dev->mtu);
227 t3_mac_set_address(mac, 0, dev->dev_addr);
228 t3_mac_set_rx_mode(mac, &rm);
229 t3_link_start(&pi->phy, mac, &pi->link_config);
230 t3_mac_enable(mac, MAC_DIRECTION_RX | MAC_DIRECTION_TX);
233 static inline void cxgb_disable_msi(struct adapter *adapter)
235 if (adapter->flags & USING_MSIX) {
236 pci_disable_msix(adapter->pdev);
237 adapter->flags &= ~USING_MSIX;
238 } else if (adapter->flags & USING_MSI) {
239 pci_disable_msi(adapter->pdev);
240 adapter->flags &= ~USING_MSI;
245 * Interrupt handler for asynchronous events used with MSI-X.
247 static irqreturn_t t3_async_intr_handler(int irq, void *cookie)
249 t3_slow_intr_handler(cookie);
254 * Name the MSI-X interrupts.
256 static void name_msix_vecs(struct adapter *adap)
258 int i, j, msi_idx = 1, n = sizeof(adap->msix_info[0].desc) - 1;
260 snprintf(adap->msix_info[0].desc, n, "%s", adap->name);
261 adap->msix_info[0].desc[n] = 0;
263 for_each_port(adap, j) {
264 struct net_device *d = adap->port[j];
265 const struct port_info *pi = netdev_priv(d);
267 for (i = 0; i < pi->nqsets; i++, msi_idx++) {
268 snprintf(adap->msix_info[msi_idx].desc, n,
269 "%s (queue %d)", d->name, i);
270 adap->msix_info[msi_idx].desc[n] = 0;
275 static int request_msix_data_irqs(struct adapter *adap)
277 int i, j, err, qidx = 0;
279 for_each_port(adap, i) {
280 int nqsets = adap2pinfo(adap, i)->nqsets;
282 for (j = 0; j < nqsets; ++j) {
283 err = request_irq(adap->msix_info[qidx + 1].vec,
284 t3_intr_handler(adap,
287 adap->msix_info[qidx + 1].desc,
288 &adap->sge.qs[qidx]);
291 free_irq(adap->msix_info[qidx + 1].vec,
292 &adap->sge.qs[qidx]);
302 * setup_rss - configure RSS
305 * Sets up RSS to distribute packets to multiple receive queues. We
306 * configure the RSS CPU lookup table to distribute to the number of HW
307 * receive queues, and the response queue lookup table to narrow that
308 * down to the response queues actually configured for each port.
309 * We always configure the RSS mapping for two ports since the mapping
310 * table has plenty of entries.
312 static void setup_rss(struct adapter *adap)
315 unsigned int nq0 = adap2pinfo(adap, 0)->nqsets;
316 unsigned int nq1 = adap->port[1] ? adap2pinfo(adap, 1)->nqsets : 1;
317 u8 cpus[SGE_QSETS + 1];
318 u16 rspq_map[RSS_TABLE_SIZE];
320 for (i = 0; i < SGE_QSETS; ++i)
322 cpus[SGE_QSETS] = 0xff; /* terminator */
324 for (i = 0; i < RSS_TABLE_SIZE / 2; ++i) {
325 rspq_map[i] = i % nq0;
326 rspq_map[i + RSS_TABLE_SIZE / 2] = (i % nq1) + nq0;
329 t3_config_rss(adap, F_RQFEEDBACKENABLE | F_TNLLKPEN | F_TNLMAPEN |
330 F_TNLPRTEN | F_TNL2TUPEN | F_TNL4TUPEN |
331 V_RRCPLCPUSIZE(6), cpus, rspq_map);
335 * If we have multiple receive queues per port serviced by NAPI we need one
336 * netdevice per queue as NAPI operates on netdevices. We already have one
337 * netdevice, namely the one associated with the interface, so we use dummy
338 * ones for any additional queues. Note that these netdevices exist purely
339 * so that NAPI has something to work with, they do not represent network
340 * ports and are not registered.
342 static int init_dummy_netdevs(struct adapter *adap)
344 int i, j, dummy_idx = 0;
345 struct net_device *nd;
347 for_each_port(adap, i) {
348 struct net_device *dev = adap->port[i];
349 const struct port_info *pi = netdev_priv(dev);
351 for (j = 0; j < pi->nqsets - 1; j++) {
352 if (!adap->dummy_netdev[dummy_idx]) {
353 nd = alloc_netdev(0, "", ether_setup);
359 set_bit(__LINK_STATE_START, &nd->state);
360 adap->dummy_netdev[dummy_idx] = nd;
362 strcpy(adap->dummy_netdev[dummy_idx]->name, dev->name);
369 while (--dummy_idx >= 0) {
370 free_netdev(adap->dummy_netdev[dummy_idx]);
371 adap->dummy_netdev[dummy_idx] = NULL;
377 * Wait until all NAPI handlers are descheduled. This includes the handlers of
378 * both netdevices representing interfaces and the dummy ones for the extra
381 static void quiesce_rx(struct adapter *adap)
384 struct net_device *dev;
386 for_each_port(adap, i) {
388 while (test_bit(__LINK_STATE_RX_SCHED, &dev->state))
392 for (i = 0; i < ARRAY_SIZE(adap->dummy_netdev); i++) {
393 dev = adap->dummy_netdev[i];
395 while (test_bit(__LINK_STATE_RX_SCHED, &dev->state))
401 * setup_sge_qsets - configure SGE Tx/Rx/response queues
404 * Determines how many sets of SGE queues to use and initializes them.
405 * We support multiple queue sets per port if we have MSI-X, otherwise
406 * just one queue set per port.
408 static int setup_sge_qsets(struct adapter *adap)
410 int i, j, err, irq_idx = 0, qset_idx = 0, dummy_dev_idx = 0;
411 unsigned int ntxq = is_offload(adap) ? SGE_TXQ_PER_SET : 1;
413 if (adap->params.rev > 0 && !(adap->flags & USING_MSI))
416 for_each_port(adap, i) {
417 struct net_device *dev = adap->port[i];
418 const struct port_info *pi = netdev_priv(dev);
420 for (j = 0; j < pi->nqsets; ++j, ++qset_idx) {
421 err = t3_sge_alloc_qset(adap, qset_idx, 1,
422 (adap->flags & USING_MSIX) ? qset_idx + 1 :
424 &adap->params.sge.qset[qset_idx], ntxq,
426 adap-> dummy_netdev[dummy_dev_idx++]);
428 t3_free_sge_resources(adap);
437 static ssize_t attr_show(struct class_device *cd, char *buf,
438 ssize_t(*format) (struct adapter *, char *))
441 struct adapter *adap = to_net_dev(cd)->priv;
443 /* Synchronize with ioctls that may shut down the device */
445 len = (*format) (adap, buf);
450 static ssize_t attr_store(struct class_device *cd, const char *buf, size_t len,
451 ssize_t(*set) (struct adapter *, unsigned int),
452 unsigned int min_val, unsigned int max_val)
457 struct adapter *adap = to_net_dev(cd)->priv;
459 if (!capable(CAP_NET_ADMIN))
462 val = simple_strtoul(buf, &endp, 0);
463 if (endp == buf || val < min_val || val > max_val)
467 ret = (*set) (adap, val);
474 #define CXGB3_SHOW(name, val_expr) \
475 static ssize_t format_##name(struct adapter *adap, char *buf) \
477 return sprintf(buf, "%u\n", val_expr); \
479 static ssize_t show_##name(struct class_device *cd, char *buf) \
481 return attr_show(cd, buf, format_##name); \
484 static ssize_t set_nfilters(struct adapter *adap, unsigned int val)
486 if (adap->flags & FULL_INIT_DONE)
488 if (val && adap->params.rev == 0)
490 if (val > t3_mc5_size(&adap->mc5) - adap->params.mc5.nservers)
492 adap->params.mc5.nfilters = val;
496 static ssize_t store_nfilters(struct class_device *cd, const char *buf,
499 return attr_store(cd, buf, len, set_nfilters, 0, ~0);
502 static ssize_t set_nservers(struct adapter *adap, unsigned int val)
504 if (adap->flags & FULL_INIT_DONE)
506 if (val > t3_mc5_size(&adap->mc5) - adap->params.mc5.nfilters)
508 adap->params.mc5.nservers = val;
512 static ssize_t store_nservers(struct class_device *cd, const char *buf,
515 return attr_store(cd, buf, len, set_nservers, 0, ~0);
518 #define CXGB3_ATTR_R(name, val_expr) \
519 CXGB3_SHOW(name, val_expr) \
520 static CLASS_DEVICE_ATTR(name, S_IRUGO, show_##name, NULL)
522 #define CXGB3_ATTR_RW(name, val_expr, store_method) \
523 CXGB3_SHOW(name, val_expr) \
524 static CLASS_DEVICE_ATTR(name, S_IRUGO | S_IWUSR, show_##name, store_method)
526 CXGB3_ATTR_R(cam_size, t3_mc5_size(&adap->mc5));
527 CXGB3_ATTR_RW(nfilters, adap->params.mc5.nfilters, store_nfilters);
528 CXGB3_ATTR_RW(nservers, adap->params.mc5.nservers, store_nservers);
530 static struct attribute *cxgb3_attrs[] = {
531 &class_device_attr_cam_size.attr,
532 &class_device_attr_nfilters.attr,
533 &class_device_attr_nservers.attr,
537 static struct attribute_group cxgb3_attr_group = {.attrs = cxgb3_attrs };
539 static ssize_t tm_attr_show(struct class_device *cd, char *buf, int sched)
542 unsigned int v, addr, bpt, cpt;
543 struct adapter *adap = to_net_dev(cd)->priv;
545 addr = A_TP_TX_MOD_Q1_Q0_RATE_LIMIT - sched / 2;
547 t3_write_reg(adap, A_TP_TM_PIO_ADDR, addr);
548 v = t3_read_reg(adap, A_TP_TM_PIO_DATA);
551 bpt = (v >> 8) & 0xff;
554 len = sprintf(buf, "disabled\n");
556 v = (adap->params.vpd.cclk * 1000) / cpt;
557 len = sprintf(buf, "%u Kbps\n", (v * bpt) / 125);
563 static ssize_t tm_attr_store(struct class_device *cd, const char *buf,
564 size_t len, int sched)
569 struct adapter *adap = to_net_dev(cd)->priv;
571 if (!capable(CAP_NET_ADMIN))
574 val = simple_strtoul(buf, &endp, 0);
575 if (endp == buf || val > 10000000)
579 ret = t3_config_sched(adap, val, sched);
586 #define TM_ATTR(name, sched) \
587 static ssize_t show_##name(struct class_device *cd, char *buf) \
589 return tm_attr_show(cd, buf, sched); \
591 static ssize_t store_##name(struct class_device *cd, const char *buf, size_t len) \
593 return tm_attr_store(cd, buf, len, sched); \
595 static CLASS_DEVICE_ATTR(name, S_IRUGO | S_IWUSR, show_##name, store_##name)
606 static struct attribute *offload_attrs[] = {
607 &class_device_attr_sched0.attr,
608 &class_device_attr_sched1.attr,
609 &class_device_attr_sched2.attr,
610 &class_device_attr_sched3.attr,
611 &class_device_attr_sched4.attr,
612 &class_device_attr_sched5.attr,
613 &class_device_attr_sched6.attr,
614 &class_device_attr_sched7.attr,
618 static struct attribute_group offload_attr_group = {.attrs = offload_attrs };
621 * Sends an sk_buff to an offload queue driver
622 * after dealing with any active network taps.
624 static inline int offload_tx(struct t3cdev *tdev, struct sk_buff *skb)
629 ret = t3_offload_tx(tdev, skb);
634 static int write_smt_entry(struct adapter *adapter, int idx)
636 struct cpl_smt_write_req *req;
637 struct sk_buff *skb = alloc_skb(sizeof(*req), GFP_KERNEL);
642 req = (struct cpl_smt_write_req *)__skb_put(skb, sizeof(*req));
643 req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
644 OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_SMT_WRITE_REQ, idx));
645 req->mtu_idx = NMTUS - 1; /* should be 0 but there's a T3 bug */
647 memset(req->src_mac1, 0, sizeof(req->src_mac1));
648 memcpy(req->src_mac0, adapter->port[idx]->dev_addr, ETH_ALEN);
650 offload_tx(&adapter->tdev, skb);
654 static int init_smt(struct adapter *adapter)
658 for_each_port(adapter, i)
659 write_smt_entry(adapter, i);
663 static void init_port_mtus(struct adapter *adapter)
665 unsigned int mtus = adapter->port[0]->mtu;
667 if (adapter->port[1])
668 mtus |= adapter->port[1]->mtu << 16;
669 t3_write_reg(adapter, A_TP_MTU_PORT_TABLE, mtus);
672 static void send_pktsched_cmd(struct adapter *adap, int sched, int qidx, int lo,
676 struct mngt_pktsched_wr *req;
678 skb = alloc_skb(sizeof(*req), GFP_KERNEL | __GFP_NOFAIL);
679 req = (struct mngt_pktsched_wr *)skb_put(skb, sizeof(*req));
680 req->wr_hi = htonl(V_WR_OP(FW_WROPCODE_MNGT));
681 req->mngt_opcode = FW_MNGTOPCODE_PKTSCHED_SET;
687 t3_mgmt_tx(adap, skb);
690 static void bind_qsets(struct adapter *adap)
694 for_each_port(adap, i) {
695 const struct port_info *pi = adap2pinfo(adap, i);
697 for (j = 0; j < pi->nqsets; ++j)
698 send_pktsched_cmd(adap, 1, pi->first_qset + j, -1,
704 * cxgb_up - enable the adapter
705 * @adapter: adapter being enabled
707 * Called when the first port is enabled, this function performs the
708 * actions necessary to make an adapter operational, such as completing
709 * the initialization of HW modules, and enabling interrupts.
711 * Must be called with the rtnl lock held.
713 static int cxgb_up(struct adapter *adap)
717 if (!(adap->flags & FULL_INIT_DONE)) {
718 err = t3_check_fw_version(adap);
722 err = init_dummy_netdevs(adap);
726 err = t3_init_hw(adap, 0);
730 err = setup_sge_qsets(adap);
735 adap->flags |= FULL_INIT_DONE;
740 if (adap->flags & USING_MSIX) {
741 name_msix_vecs(adap);
742 err = request_irq(adap->msix_info[0].vec,
743 t3_async_intr_handler, 0,
744 adap->msix_info[0].desc, adap);
748 if (request_msix_data_irqs(adap)) {
749 free_irq(adap->msix_info[0].vec, adap);
752 } else if ((err = request_irq(adap->pdev->irq,
753 t3_intr_handler(adap,
754 adap->sge.qs[0].rspq.
756 (adap->flags & USING_MSI) ? 0 : SA_SHIRQ,
761 t3_intr_enable(adap);
763 if ((adap->flags & (USING_MSIX | QUEUES_BOUND)) == USING_MSIX)
765 adap->flags |= QUEUES_BOUND;
770 CH_ERR(adap, "request_irq failed, err %d\n", err);
775 * Release resources when all the ports and offloading have been stopped.
777 static void cxgb_down(struct adapter *adapter)
779 t3_sge_stop(adapter);
780 spin_lock_irq(&adapter->work_lock); /* sync with PHY intr task */
781 t3_intr_disable(adapter);
782 spin_unlock_irq(&adapter->work_lock);
784 if (adapter->flags & USING_MSIX) {
787 free_irq(adapter->msix_info[0].vec, adapter);
788 for_each_port(adapter, i)
789 n += adap2pinfo(adapter, i)->nqsets;
791 for (i = 0; i < n; ++i)
792 free_irq(adapter->msix_info[i + 1].vec,
793 &adapter->sge.qs[i]);
795 free_irq(adapter->pdev->irq, adapter);
797 flush_workqueue(cxgb3_wq); /* wait for external IRQ handler */
801 static void schedule_chk_task(struct adapter *adap)
805 timeo = adap->params.linkpoll_period ?
806 (HZ * adap->params.linkpoll_period) / 10 :
807 adap->params.stats_update_period * HZ;
809 queue_delayed_work(cxgb3_wq, &adap->adap_check_task, timeo);
812 static int offload_open(struct net_device *dev)
814 struct adapter *adapter = dev->priv;
815 struct t3cdev *tdev = T3CDEV(dev);
816 int adap_up = adapter->open_device_map & PORT_MASK;
819 if (test_and_set_bit(OFFLOAD_DEVMAP_BIT, &adapter->open_device_map))
822 if (!adap_up && (err = cxgb_up(adapter)) < 0)
825 t3_tp_set_offload_mode(adapter, 1);
826 tdev->lldev = adapter->port[0];
827 err = cxgb3_offload_activate(adapter);
831 init_port_mtus(adapter);
832 t3_load_mtus(adapter, adapter->params.mtus, adapter->params.a_wnd,
833 adapter->params.b_wnd,
834 adapter->params.rev == 0 ?
835 adapter->port[0]->mtu : 0xffff);
838 /* Never mind if the next step fails */
839 sysfs_create_group(&tdev->lldev->class_dev.kobj, &offload_attr_group);
841 /* Call back all registered clients */
842 cxgb3_add_clients(tdev);
845 /* restore them in case the offload module has changed them */
847 t3_tp_set_offload_mode(adapter, 0);
848 clear_bit(OFFLOAD_DEVMAP_BIT, &adapter->open_device_map);
849 cxgb3_set_dummy_ops(tdev);
854 static int offload_close(struct t3cdev *tdev)
856 struct adapter *adapter = tdev2adap(tdev);
858 if (!test_bit(OFFLOAD_DEVMAP_BIT, &adapter->open_device_map))
861 /* Call back all registered clients */
862 cxgb3_remove_clients(tdev);
864 sysfs_remove_group(&tdev->lldev->class_dev.kobj, &offload_attr_group);
867 cxgb3_set_dummy_ops(tdev);
868 t3_tp_set_offload_mode(adapter, 0);
869 clear_bit(OFFLOAD_DEVMAP_BIT, &adapter->open_device_map);
871 if (!adapter->open_device_map)
874 cxgb3_offload_deactivate(adapter);
878 static int cxgb_open(struct net_device *dev)
881 struct adapter *adapter = dev->priv;
882 struct port_info *pi = netdev_priv(dev);
883 int other_ports = adapter->open_device_map & PORT_MASK;
885 if (!adapter->open_device_map && (err = cxgb_up(adapter)) < 0)
888 set_bit(pi->port_id, &adapter->open_device_map);
890 err = offload_open(dev);
893 "Could not initialize offload capabilities\n");
897 t3_port_intr_enable(adapter, pi->port_id);
898 netif_start_queue(dev);
900 schedule_chk_task(adapter);
905 static int cxgb_close(struct net_device *dev)
907 struct adapter *adapter = dev->priv;
908 struct port_info *p = netdev_priv(dev);
910 t3_port_intr_disable(adapter, p->port_id);
911 netif_stop_queue(dev);
912 p->phy.ops->power_down(&p->phy, 1);
913 netif_carrier_off(dev);
914 t3_mac_disable(&p->mac, MAC_DIRECTION_TX | MAC_DIRECTION_RX);
916 spin_lock(&adapter->work_lock); /* sync with update task */
917 clear_bit(p->port_id, &adapter->open_device_map);
918 spin_unlock(&adapter->work_lock);
920 if (!(adapter->open_device_map & PORT_MASK))
921 cancel_rearming_delayed_workqueue(cxgb3_wq,
922 &adapter->adap_check_task);
924 if (!adapter->open_device_map)
930 static struct net_device_stats *cxgb_get_stats(struct net_device *dev)
932 struct adapter *adapter = dev->priv;
933 struct port_info *p = netdev_priv(dev);
934 struct net_device_stats *ns = &p->netstats;
935 const struct mac_stats *pstats;
937 spin_lock(&adapter->stats_lock);
938 pstats = t3_mac_update_stats(&p->mac);
939 spin_unlock(&adapter->stats_lock);
941 ns->tx_bytes = pstats->tx_octets;
942 ns->tx_packets = pstats->tx_frames;
943 ns->rx_bytes = pstats->rx_octets;
944 ns->rx_packets = pstats->rx_frames;
945 ns->multicast = pstats->rx_mcast_frames;
947 ns->tx_errors = pstats->tx_underrun;
948 ns->rx_errors = pstats->rx_symbol_errs + pstats->rx_fcs_errs +
949 pstats->rx_too_long + pstats->rx_jabber + pstats->rx_short +
950 pstats->rx_fifo_ovfl;
952 /* detailed rx_errors */
953 ns->rx_length_errors = pstats->rx_jabber + pstats->rx_too_long;
954 ns->rx_over_errors = 0;
955 ns->rx_crc_errors = pstats->rx_fcs_errs;
956 ns->rx_frame_errors = pstats->rx_symbol_errs;
957 ns->rx_fifo_errors = pstats->rx_fifo_ovfl;
958 ns->rx_missed_errors = pstats->rx_cong_drops;
960 /* detailed tx_errors */
961 ns->tx_aborted_errors = 0;
962 ns->tx_carrier_errors = 0;
963 ns->tx_fifo_errors = pstats->tx_underrun;
964 ns->tx_heartbeat_errors = 0;
965 ns->tx_window_errors = 0;
969 static u32 get_msglevel(struct net_device *dev)
971 struct adapter *adapter = dev->priv;
973 return adapter->msg_enable;
976 static void set_msglevel(struct net_device *dev, u32 val)
978 struct adapter *adapter = dev->priv;
980 adapter->msg_enable = val;
983 static char stats_strings[][ETH_GSTRING_LEN] = {
986 "TxMulticastFramesOK",
987 "TxBroadcastFramesOK",
996 "TxFrames512To1023 ",
997 "TxFrames1024To1518 ",
998 "TxFrames1519ToMax ",
1002 "RxMulticastFramesOK",
1003 "RxBroadcastFramesOK",
1014 "RxFrames128To255 ",
1015 "RxFrames256To511 ",
1016 "RxFrames512To1023 ",
1017 "RxFrames1024To1518 ",
1018 "RxFrames1519ToMax ",
1029 static int get_stats_count(struct net_device *dev)
1031 return ARRAY_SIZE(stats_strings);
1034 #define T3_REGMAP_SIZE (3 * 1024)
1036 static int get_regs_len(struct net_device *dev)
1038 return T3_REGMAP_SIZE;
1041 static int get_eeprom_len(struct net_device *dev)
1046 static void get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
1049 struct adapter *adapter = dev->priv;
1051 t3_get_fw_version(adapter, &fw_vers);
1053 strcpy(info->driver, DRV_NAME);
1054 strcpy(info->version, DRV_VERSION);
1055 strcpy(info->bus_info, pci_name(adapter->pdev));
1057 strcpy(info->fw_version, "N/A");
1059 snprintf(info->fw_version, sizeof(info->fw_version),
1061 G_FW_VERSION_TYPE(fw_vers) ? "T" : "N",
1062 G_FW_VERSION_MAJOR(fw_vers),
1063 G_FW_VERSION_MINOR(fw_vers),
1064 G_FW_VERSION_MICRO(fw_vers));
1068 static void get_strings(struct net_device *dev, u32 stringset, u8 * data)
1070 if (stringset == ETH_SS_STATS)
1071 memcpy(data, stats_strings, sizeof(stats_strings));
1074 static unsigned long collect_sge_port_stats(struct adapter *adapter,
1075 struct port_info *p, int idx)
1078 unsigned long tot = 0;
1080 for (i = 0; i < p->nqsets; ++i)
1081 tot += adapter->sge.qs[i + p->first_qset].port_stats[idx];
1085 static void get_stats(struct net_device *dev, struct ethtool_stats *stats,
1088 struct adapter *adapter = dev->priv;
1089 struct port_info *pi = netdev_priv(dev);
1090 const struct mac_stats *s;
1092 spin_lock(&adapter->stats_lock);
1093 s = t3_mac_update_stats(&pi->mac);
1094 spin_unlock(&adapter->stats_lock);
1096 *data++ = s->tx_octets;
1097 *data++ = s->tx_frames;
1098 *data++ = s->tx_mcast_frames;
1099 *data++ = s->tx_bcast_frames;
1100 *data++ = s->tx_pause;
1101 *data++ = s->tx_underrun;
1102 *data++ = s->tx_fifo_urun;
1104 *data++ = s->tx_frames_64;
1105 *data++ = s->tx_frames_65_127;
1106 *data++ = s->tx_frames_128_255;
1107 *data++ = s->tx_frames_256_511;
1108 *data++ = s->tx_frames_512_1023;
1109 *data++ = s->tx_frames_1024_1518;
1110 *data++ = s->tx_frames_1519_max;
1112 *data++ = s->rx_octets;
1113 *data++ = s->rx_frames;
1114 *data++ = s->rx_mcast_frames;
1115 *data++ = s->rx_bcast_frames;
1116 *data++ = s->rx_pause;
1117 *data++ = s->rx_fcs_errs;
1118 *data++ = s->rx_symbol_errs;
1119 *data++ = s->rx_short;
1120 *data++ = s->rx_jabber;
1121 *data++ = s->rx_too_long;
1122 *data++ = s->rx_fifo_ovfl;
1124 *data++ = s->rx_frames_64;
1125 *data++ = s->rx_frames_65_127;
1126 *data++ = s->rx_frames_128_255;
1127 *data++ = s->rx_frames_256_511;
1128 *data++ = s->rx_frames_512_1023;
1129 *data++ = s->rx_frames_1024_1518;
1130 *data++ = s->rx_frames_1519_max;
1132 *data++ = pi->phy.fifo_errors;
1134 *data++ = collect_sge_port_stats(adapter, pi, SGE_PSTAT_TSO);
1135 *data++ = collect_sge_port_stats(adapter, pi, SGE_PSTAT_VLANEX);
1136 *data++ = collect_sge_port_stats(adapter, pi, SGE_PSTAT_VLANINS);
1137 *data++ = collect_sge_port_stats(adapter, pi, SGE_PSTAT_TX_CSUM);
1138 *data++ = collect_sge_port_stats(adapter, pi, SGE_PSTAT_RX_CSUM_GOOD);
1139 *data++ = s->rx_cong_drops;
1142 static inline void reg_block_dump(struct adapter *ap, void *buf,
1143 unsigned int start, unsigned int end)
1145 u32 *p = buf + start;
1147 for (; start <= end; start += sizeof(u32))
1148 *p++ = t3_read_reg(ap, start);
1151 static void get_regs(struct net_device *dev, struct ethtool_regs *regs,
1154 struct adapter *ap = dev->priv;
1158 * bits 0..9: chip version
1159 * bits 10..15: chip revision
1160 * bit 31: set for PCIe cards
1162 regs->version = 3 | (ap->params.rev << 10) | (is_pcie(ap) << 31);
1165 * We skip the MAC statistics registers because they are clear-on-read.
1166 * Also reading multi-register stats would need to synchronize with the
1167 * periodic mac stats accumulation. Hard to justify the complexity.
1169 memset(buf, 0, T3_REGMAP_SIZE);
1170 reg_block_dump(ap, buf, 0, A_SG_RSPQ_CREDIT_RETURN);
1171 reg_block_dump(ap, buf, A_SG_HI_DRB_HI_THRSH, A_ULPRX_PBL_ULIMIT);
1172 reg_block_dump(ap, buf, A_ULPTX_CONFIG, A_MPS_INT_CAUSE);
1173 reg_block_dump(ap, buf, A_CPL_SWITCH_CNTRL, A_CPL_MAP_TBL_DATA);
1174 reg_block_dump(ap, buf, A_SMB_GLOBAL_TIME_CFG, A_XGM_SERDES_STAT3);
1175 reg_block_dump(ap, buf, A_XGM_SERDES_STATUS0,
1176 XGM_REG(A_XGM_SERDES_STAT3, 1));
1177 reg_block_dump(ap, buf, XGM_REG(A_XGM_SERDES_STATUS0, 1),
1178 XGM_REG(A_XGM_RX_SPI4_SOP_EOP_CNT, 1));
1181 static int restart_autoneg(struct net_device *dev)
1183 struct port_info *p = netdev_priv(dev);
1185 if (!netif_running(dev))
1187 if (p->link_config.autoneg != AUTONEG_ENABLE)
1189 p->phy.ops->autoneg_restart(&p->phy);
1193 static int cxgb3_phys_id(struct net_device *dev, u32 data)
1196 struct adapter *adapter = dev->priv;
1201 for (i = 0; i < data * 2; i++) {
1202 t3_set_reg_field(adapter, A_T3DBG_GPIO_EN, F_GPIO0_OUT_VAL,
1203 (i & 1) ? F_GPIO0_OUT_VAL : 0);
1204 if (msleep_interruptible(500))
1207 t3_set_reg_field(adapter, A_T3DBG_GPIO_EN, F_GPIO0_OUT_VAL,
1212 static int get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
1214 struct port_info *p = netdev_priv(dev);
1216 cmd->supported = p->link_config.supported;
1217 cmd->advertising = p->link_config.advertising;
1219 if (netif_carrier_ok(dev)) {
1220 cmd->speed = p->link_config.speed;
1221 cmd->duplex = p->link_config.duplex;
1227 cmd->port = (cmd->supported & SUPPORTED_TP) ? PORT_TP : PORT_FIBRE;
1228 cmd->phy_address = p->phy.addr;
1229 cmd->transceiver = XCVR_EXTERNAL;
1230 cmd->autoneg = p->link_config.autoneg;
1236 static int speed_duplex_to_caps(int speed, int duplex)
1242 if (duplex == DUPLEX_FULL)
1243 cap = SUPPORTED_10baseT_Full;
1245 cap = SUPPORTED_10baseT_Half;
1248 if (duplex == DUPLEX_FULL)
1249 cap = SUPPORTED_100baseT_Full;
1251 cap = SUPPORTED_100baseT_Half;
1254 if (duplex == DUPLEX_FULL)
1255 cap = SUPPORTED_1000baseT_Full;
1257 cap = SUPPORTED_1000baseT_Half;
1260 if (duplex == DUPLEX_FULL)
1261 cap = SUPPORTED_10000baseT_Full;
1266 #define ADVERTISED_MASK (ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full | \
1267 ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full | \
1268 ADVERTISED_1000baseT_Half | ADVERTISED_1000baseT_Full | \
1269 ADVERTISED_10000baseT_Full)
1271 static int set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
1273 struct port_info *p = netdev_priv(dev);
1274 struct link_config *lc = &p->link_config;
1276 if (!(lc->supported & SUPPORTED_Autoneg))
1277 return -EOPNOTSUPP; /* can't change speed/duplex */
1279 if (cmd->autoneg == AUTONEG_DISABLE) {
1280 int cap = speed_duplex_to_caps(cmd->speed, cmd->duplex);
1282 if (!(lc->supported & cap) || cmd->speed == SPEED_1000)
1284 lc->requested_speed = cmd->speed;
1285 lc->requested_duplex = cmd->duplex;
1286 lc->advertising = 0;
1288 cmd->advertising &= ADVERTISED_MASK;
1289 cmd->advertising &= lc->supported;
1290 if (!cmd->advertising)
1292 lc->requested_speed = SPEED_INVALID;
1293 lc->requested_duplex = DUPLEX_INVALID;
1294 lc->advertising = cmd->advertising | ADVERTISED_Autoneg;
1296 lc->autoneg = cmd->autoneg;
1297 if (netif_running(dev))
1298 t3_link_start(&p->phy, &p->mac, lc);
1302 static void get_pauseparam(struct net_device *dev,
1303 struct ethtool_pauseparam *epause)
1305 struct port_info *p = netdev_priv(dev);
1307 epause->autoneg = (p->link_config.requested_fc & PAUSE_AUTONEG) != 0;
1308 epause->rx_pause = (p->link_config.fc & PAUSE_RX) != 0;
1309 epause->tx_pause = (p->link_config.fc & PAUSE_TX) != 0;
1312 static int set_pauseparam(struct net_device *dev,
1313 struct ethtool_pauseparam *epause)
1315 struct port_info *p = netdev_priv(dev);
1316 struct link_config *lc = &p->link_config;
1318 if (epause->autoneg == AUTONEG_DISABLE)
1319 lc->requested_fc = 0;
1320 else if (lc->supported & SUPPORTED_Autoneg)
1321 lc->requested_fc = PAUSE_AUTONEG;
1325 if (epause->rx_pause)
1326 lc->requested_fc |= PAUSE_RX;
1327 if (epause->tx_pause)
1328 lc->requested_fc |= PAUSE_TX;
1329 if (lc->autoneg == AUTONEG_ENABLE) {
1330 if (netif_running(dev))
1331 t3_link_start(&p->phy, &p->mac, lc);
1333 lc->fc = lc->requested_fc & (PAUSE_RX | PAUSE_TX);
1334 if (netif_running(dev))
1335 t3_mac_set_speed_duplex_fc(&p->mac, -1, -1, lc->fc);
1340 static u32 get_rx_csum(struct net_device *dev)
1342 struct port_info *p = netdev_priv(dev);
1344 return p->rx_csum_offload;
1347 static int set_rx_csum(struct net_device *dev, u32 data)
1349 struct port_info *p = netdev_priv(dev);
1351 p->rx_csum_offload = data;
1355 static void get_sge_param(struct net_device *dev, struct ethtool_ringparam *e)
1357 struct adapter *adapter = dev->priv;
1359 e->rx_max_pending = MAX_RX_BUFFERS;
1360 e->rx_mini_max_pending = 0;
1361 e->rx_jumbo_max_pending = MAX_RX_JUMBO_BUFFERS;
1362 e->tx_max_pending = MAX_TXQ_ENTRIES;
1364 e->rx_pending = adapter->params.sge.qset[0].fl_size;
1365 e->rx_mini_pending = adapter->params.sge.qset[0].rspq_size;
1366 e->rx_jumbo_pending = adapter->params.sge.qset[0].jumbo_size;
1367 e->tx_pending = adapter->params.sge.qset[0].txq_size[0];
1370 static int set_sge_param(struct net_device *dev, struct ethtool_ringparam *e)
1373 struct adapter *adapter = dev->priv;
1375 if (e->rx_pending > MAX_RX_BUFFERS ||
1376 e->rx_jumbo_pending > MAX_RX_JUMBO_BUFFERS ||
1377 e->tx_pending > MAX_TXQ_ENTRIES ||
1378 e->rx_mini_pending > MAX_RSPQ_ENTRIES ||
1379 e->rx_mini_pending < MIN_RSPQ_ENTRIES ||
1380 e->rx_pending < MIN_FL_ENTRIES ||
1381 e->rx_jumbo_pending < MIN_FL_ENTRIES ||
1382 e->tx_pending < adapter->params.nports * MIN_TXQ_ENTRIES)
1385 if (adapter->flags & FULL_INIT_DONE)
1388 for (i = 0; i < SGE_QSETS; ++i) {
1389 struct qset_params *q = &adapter->params.sge.qset[i];
1391 q->rspq_size = e->rx_mini_pending;
1392 q->fl_size = e->rx_pending;
1393 q->jumbo_size = e->rx_jumbo_pending;
1394 q->txq_size[0] = e->tx_pending;
1395 q->txq_size[1] = e->tx_pending;
1396 q->txq_size[2] = e->tx_pending;
1401 static int set_coalesce(struct net_device *dev, struct ethtool_coalesce *c)
1403 struct adapter *adapter = dev->priv;
1404 struct qset_params *qsp = &adapter->params.sge.qset[0];
1405 struct sge_qset *qs = &adapter->sge.qs[0];
1407 if (c->rx_coalesce_usecs * 10 > M_NEWTIMER)
1410 qsp->coalesce_usecs = c->rx_coalesce_usecs;
1411 t3_update_qset_coalesce(qs, qsp);
1415 static int get_coalesce(struct net_device *dev, struct ethtool_coalesce *c)
1417 struct adapter *adapter = dev->priv;
1418 struct qset_params *q = adapter->params.sge.qset;
1420 c->rx_coalesce_usecs = q->coalesce_usecs;
1424 static int get_eeprom(struct net_device *dev, struct ethtool_eeprom *e,
1428 struct adapter *adapter = dev->priv;
1430 u8 *buf = kmalloc(EEPROMSIZE, GFP_KERNEL);
1434 e->magic = EEPROM_MAGIC;
1435 for (i = e->offset & ~3; !err && i < e->offset + e->len; i += 4)
1436 err = t3_seeprom_read(adapter, i, (u32 *) & buf[i]);
1439 memcpy(data, buf + e->offset, e->len);
1444 static int set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom,
1449 u32 aligned_offset, aligned_len, *p;
1450 struct adapter *adapter = dev->priv;
1452 if (eeprom->magic != EEPROM_MAGIC)
1455 aligned_offset = eeprom->offset & ~3;
1456 aligned_len = (eeprom->len + (eeprom->offset & 3) + 3) & ~3;
1458 if (aligned_offset != eeprom->offset || aligned_len != eeprom->len) {
1459 buf = kmalloc(aligned_len, GFP_KERNEL);
1462 err = t3_seeprom_read(adapter, aligned_offset, (u32 *) buf);
1463 if (!err && aligned_len > 4)
1464 err = t3_seeprom_read(adapter,
1465 aligned_offset + aligned_len - 4,
1466 (u32 *) & buf[aligned_len - 4]);
1469 memcpy(buf + (eeprom->offset & 3), data, eeprom->len);
1473 err = t3_seeprom_wp(adapter, 0);
1477 for (p = (u32 *) buf; !err && aligned_len; aligned_len -= 4, p++) {
1478 err = t3_seeprom_write(adapter, aligned_offset, *p);
1479 aligned_offset += 4;
1483 err = t3_seeprom_wp(adapter, 1);
1490 static void get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
1494 memset(&wol->sopass, 0, sizeof(wol->sopass));
1497 static const struct ethtool_ops cxgb_ethtool_ops = {
1498 .get_settings = get_settings,
1499 .set_settings = set_settings,
1500 .get_drvinfo = get_drvinfo,
1501 .get_msglevel = get_msglevel,
1502 .set_msglevel = set_msglevel,
1503 .get_ringparam = get_sge_param,
1504 .set_ringparam = set_sge_param,
1505 .get_coalesce = get_coalesce,
1506 .set_coalesce = set_coalesce,
1507 .get_eeprom_len = get_eeprom_len,
1508 .get_eeprom = get_eeprom,
1509 .set_eeprom = set_eeprom,
1510 .get_pauseparam = get_pauseparam,
1511 .set_pauseparam = set_pauseparam,
1512 .get_rx_csum = get_rx_csum,
1513 .set_rx_csum = set_rx_csum,
1514 .get_tx_csum = ethtool_op_get_tx_csum,
1515 .set_tx_csum = ethtool_op_set_tx_csum,
1516 .get_sg = ethtool_op_get_sg,
1517 .set_sg = ethtool_op_set_sg,
1518 .get_link = ethtool_op_get_link,
1519 .get_strings = get_strings,
1520 .phys_id = cxgb3_phys_id,
1521 .nway_reset = restart_autoneg,
1522 .get_stats_count = get_stats_count,
1523 .get_ethtool_stats = get_stats,
1524 .get_regs_len = get_regs_len,
1525 .get_regs = get_regs,
1527 .get_tso = ethtool_op_get_tso,
1528 .set_tso = ethtool_op_set_tso,
1529 .get_perm_addr = ethtool_op_get_perm_addr
1532 static int in_range(int val, int lo, int hi)
1534 return val < 0 || (val <= hi && val >= lo);
1537 static int cxgb_extension_ioctl(struct net_device *dev, void __user *useraddr)
1541 struct adapter *adapter = dev->priv;
1543 if (copy_from_user(&cmd, useraddr, sizeof(cmd)))
1547 case CHELSIO_SETREG:{
1548 struct ch_reg edata;
1550 if (!capable(CAP_NET_ADMIN))
1552 if (copy_from_user(&edata, useraddr, sizeof(edata)))
1554 if ((edata.addr & 3) != 0
1555 || edata.addr >= adapter->mmio_len)
1557 writel(edata.val, adapter->regs + edata.addr);
1560 case CHELSIO_GETREG:{
1561 struct ch_reg edata;
1563 if (copy_from_user(&edata, useraddr, sizeof(edata)))
1565 if ((edata.addr & 3) != 0
1566 || edata.addr >= adapter->mmio_len)
1568 edata.val = readl(adapter->regs + edata.addr);
1569 if (copy_to_user(useraddr, &edata, sizeof(edata)))
1573 case CHELSIO_SET_QSET_PARAMS:{
1575 struct qset_params *q;
1576 struct ch_qset_params t;
1578 if (!capable(CAP_NET_ADMIN))
1580 if (copy_from_user(&t, useraddr, sizeof(t)))
1582 if (t.qset_idx >= SGE_QSETS)
1584 if (!in_range(t.intr_lat, 0, M_NEWTIMER) ||
1585 !in_range(t.cong_thres, 0, 255) ||
1586 !in_range(t.txq_size[0], MIN_TXQ_ENTRIES,
1588 !in_range(t.txq_size[1], MIN_TXQ_ENTRIES,
1590 !in_range(t.txq_size[2], MIN_CTRL_TXQ_ENTRIES,
1591 MAX_CTRL_TXQ_ENTRIES) ||
1592 !in_range(t.fl_size[0], MIN_FL_ENTRIES,
1594 || !in_range(t.fl_size[1], MIN_FL_ENTRIES,
1595 MAX_RX_JUMBO_BUFFERS)
1596 || !in_range(t.rspq_size, MIN_RSPQ_ENTRIES,
1599 if ((adapter->flags & FULL_INIT_DONE) &&
1600 (t.rspq_size >= 0 || t.fl_size[0] >= 0 ||
1601 t.fl_size[1] >= 0 || t.txq_size[0] >= 0 ||
1602 t.txq_size[1] >= 0 || t.txq_size[2] >= 0 ||
1603 t.polling >= 0 || t.cong_thres >= 0))
1606 q = &adapter->params.sge.qset[t.qset_idx];
1608 if (t.rspq_size >= 0)
1609 q->rspq_size = t.rspq_size;
1610 if (t.fl_size[0] >= 0)
1611 q->fl_size = t.fl_size[0];
1612 if (t.fl_size[1] >= 0)
1613 q->jumbo_size = t.fl_size[1];
1614 if (t.txq_size[0] >= 0)
1615 q->txq_size[0] = t.txq_size[0];
1616 if (t.txq_size[1] >= 0)
1617 q->txq_size[1] = t.txq_size[1];
1618 if (t.txq_size[2] >= 0)
1619 q->txq_size[2] = t.txq_size[2];
1620 if (t.cong_thres >= 0)
1621 q->cong_thres = t.cong_thres;
1622 if (t.intr_lat >= 0) {
1623 struct sge_qset *qs =
1624 &adapter->sge.qs[t.qset_idx];
1626 q->coalesce_usecs = t.intr_lat;
1627 t3_update_qset_coalesce(qs, q);
1629 if (t.polling >= 0) {
1630 if (adapter->flags & USING_MSIX)
1631 q->polling = t.polling;
1633 /* No polling with INTx for T3A */
1634 if (adapter->params.rev == 0 &&
1635 !(adapter->flags & USING_MSI))
1638 for (i = 0; i < SGE_QSETS; i++) {
1639 q = &adapter->params.sge.
1641 q->polling = t.polling;
1647 case CHELSIO_GET_QSET_PARAMS:{
1648 struct qset_params *q;
1649 struct ch_qset_params t;
1651 if (copy_from_user(&t, useraddr, sizeof(t)))
1653 if (t.qset_idx >= SGE_QSETS)
1656 q = &adapter->params.sge.qset[t.qset_idx];
1657 t.rspq_size = q->rspq_size;
1658 t.txq_size[0] = q->txq_size[0];
1659 t.txq_size[1] = q->txq_size[1];
1660 t.txq_size[2] = q->txq_size[2];
1661 t.fl_size[0] = q->fl_size;
1662 t.fl_size[1] = q->jumbo_size;
1663 t.polling = q->polling;
1664 t.intr_lat = q->coalesce_usecs;
1665 t.cong_thres = q->cong_thres;
1667 if (copy_to_user(useraddr, &t, sizeof(t)))
1671 case CHELSIO_SET_QSET_NUM:{
1672 struct ch_reg edata;
1673 struct port_info *pi = netdev_priv(dev);
1674 unsigned int i, first_qset = 0, other_qsets = 0;
1676 if (!capable(CAP_NET_ADMIN))
1678 if (adapter->flags & FULL_INIT_DONE)
1680 if (copy_from_user(&edata, useraddr, sizeof(edata)))
1682 if (edata.val < 1 ||
1683 (edata.val > 1 && !(adapter->flags & USING_MSIX)))
1686 for_each_port(adapter, i)
1687 if (adapter->port[i] && adapter->port[i] != dev)
1688 other_qsets += adap2pinfo(adapter, i)->nqsets;
1690 if (edata.val + other_qsets > SGE_QSETS)
1693 pi->nqsets = edata.val;
1695 for_each_port(adapter, i)
1696 if (adapter->port[i]) {
1697 pi = adap2pinfo(adapter, i);
1698 pi->first_qset = first_qset;
1699 first_qset += pi->nqsets;
1703 case CHELSIO_GET_QSET_NUM:{
1704 struct ch_reg edata;
1705 struct port_info *pi = netdev_priv(dev);
1707 edata.cmd = CHELSIO_GET_QSET_NUM;
1708 edata.val = pi->nqsets;
1709 if (copy_to_user(useraddr, &edata, sizeof(edata)))
1713 case CHELSIO_LOAD_FW:{
1715 struct ch_mem_range t;
1717 if (!capable(CAP_NET_ADMIN))
1719 if (copy_from_user(&t, useraddr, sizeof(t)))
1722 fw_data = kmalloc(t.len, GFP_KERNEL);
1727 (fw_data, useraddr + sizeof(t), t.len)) {
1732 ret = t3_load_fw(adapter, fw_data, t.len);
1738 case CHELSIO_SETMTUTAB:{
1742 if (!is_offload(adapter))
1744 if (!capable(CAP_NET_ADMIN))
1746 if (offload_running(adapter))
1748 if (copy_from_user(&m, useraddr, sizeof(m)))
1750 if (m.nmtus != NMTUS)
1752 if (m.mtus[0] < 81) /* accommodate SACK */
1755 /* MTUs must be in ascending order */
1756 for (i = 1; i < NMTUS; ++i)
1757 if (m.mtus[i] < m.mtus[i - 1])
1760 memcpy(adapter->params.mtus, m.mtus,
1761 sizeof(adapter->params.mtus));
1764 case CHELSIO_GET_PM:{
1765 struct tp_params *p = &adapter->params.tp;
1766 struct ch_pm m = {.cmd = CHELSIO_GET_PM };
1768 if (!is_offload(adapter))
1770 m.tx_pg_sz = p->tx_pg_size;
1771 m.tx_num_pg = p->tx_num_pgs;
1772 m.rx_pg_sz = p->rx_pg_size;
1773 m.rx_num_pg = p->rx_num_pgs;
1774 m.pm_total = p->pmtx_size + p->chan_rx_size * p->nchan;
1775 if (copy_to_user(useraddr, &m, sizeof(m)))
1779 case CHELSIO_SET_PM:{
1781 struct tp_params *p = &adapter->params.tp;
1783 if (!is_offload(adapter))
1785 if (!capable(CAP_NET_ADMIN))
1787 if (adapter->flags & FULL_INIT_DONE)
1789 if (copy_from_user(&m, useraddr, sizeof(m)))
1791 if (!m.rx_pg_sz || (m.rx_pg_sz & (m.rx_pg_sz - 1)) ||
1792 !m.tx_pg_sz || (m.tx_pg_sz & (m.tx_pg_sz - 1)))
1793 return -EINVAL; /* not power of 2 */
1794 if (!(m.rx_pg_sz & 0x14000))
1795 return -EINVAL; /* not 16KB or 64KB */
1796 if (!(m.tx_pg_sz & 0x1554000))
1798 if (m.tx_num_pg == -1)
1799 m.tx_num_pg = p->tx_num_pgs;
1800 if (m.rx_num_pg == -1)
1801 m.rx_num_pg = p->rx_num_pgs;
1802 if (m.tx_num_pg % 24 || m.rx_num_pg % 24)
1804 if (m.rx_num_pg * m.rx_pg_sz > p->chan_rx_size ||
1805 m.tx_num_pg * m.tx_pg_sz > p->chan_tx_size)
1807 p->rx_pg_size = m.rx_pg_sz;
1808 p->tx_pg_size = m.tx_pg_sz;
1809 p->rx_num_pgs = m.rx_num_pg;
1810 p->tx_num_pgs = m.tx_num_pg;
1813 case CHELSIO_GET_MEM:{
1814 struct ch_mem_range t;
1818 if (!is_offload(adapter))
1820 if (!(adapter->flags & FULL_INIT_DONE))
1821 return -EIO; /* need the memory controllers */
1822 if (copy_from_user(&t, useraddr, sizeof(t)))
1824 if ((t.addr & 7) || (t.len & 7))
1826 if (t.mem_id == MEM_CM)
1828 else if (t.mem_id == MEM_PMRX)
1829 mem = &adapter->pmrx;
1830 else if (t.mem_id == MEM_PMTX)
1831 mem = &adapter->pmtx;
1837 * bits 0..9: chip version
1838 * bits 10..15: chip revision
1840 t.version = 3 | (adapter->params.rev << 10);
1841 if (copy_to_user(useraddr, &t, sizeof(t)))
1845 * Read 256 bytes at a time as len can be large and we don't
1846 * want to use huge intermediate buffers.
1848 useraddr += sizeof(t); /* advance to start of buffer */
1850 unsigned int chunk =
1851 min_t(unsigned int, t.len, sizeof(buf));
1854 t3_mc7_bd_read(mem, t.addr / 8, chunk / 8,
1858 if (copy_to_user(useraddr, buf, chunk))
1866 case CHELSIO_SET_TRACE_FILTER:{
1868 const struct trace_params *tp;
1870 if (!capable(CAP_NET_ADMIN))
1872 if (!offload_running(adapter))
1874 if (copy_from_user(&t, useraddr, sizeof(t)))
1877 tp = (const struct trace_params *)&t.sip;
1879 t3_config_trace_filter(adapter, tp, 0,
1883 t3_config_trace_filter(adapter, tp, 1,
1888 case CHELSIO_SET_PKTSCHED:{
1889 struct ch_pktsched_params p;
1891 if (!capable(CAP_NET_ADMIN))
1893 if (!adapter->open_device_map)
1894 return -EAGAIN; /* uP and SGE must be running */
1895 if (copy_from_user(&p, useraddr, sizeof(p)))
1897 send_pktsched_cmd(adapter, p.sched, p.idx, p.min, p.max,
1908 static int cxgb_ioctl(struct net_device *dev, struct ifreq *req, int cmd)
1911 struct adapter *adapter = dev->priv;
1912 struct port_info *pi = netdev_priv(dev);
1913 struct mii_ioctl_data *data = if_mii(req);
1917 data->phy_id = pi->phy.addr;
1921 struct cphy *phy = &pi->phy;
1923 if (!phy->mdio_read)
1925 if (is_10G(adapter)) {
1926 mmd = data->phy_id >> 8;
1929 else if (mmd > MDIO_DEV_XGXS)
1933 phy->mdio_read(adapter, data->phy_id & 0x1f,
1934 mmd, data->reg_num, &val);
1937 phy->mdio_read(adapter, data->phy_id & 0x1f,
1938 0, data->reg_num & 0x1f,
1941 data->val_out = val;
1945 struct cphy *phy = &pi->phy;
1947 if (!capable(CAP_NET_ADMIN))
1949 if (!phy->mdio_write)
1951 if (is_10G(adapter)) {
1952 mmd = data->phy_id >> 8;
1955 else if (mmd > MDIO_DEV_XGXS)
1959 phy->mdio_write(adapter,
1960 data->phy_id & 0x1f, mmd,
1965 phy->mdio_write(adapter,
1966 data->phy_id & 0x1f, 0,
1967 data->reg_num & 0x1f,
1972 return cxgb_extension_ioctl(dev, req->ifr_data);
1979 static int cxgb_change_mtu(struct net_device *dev, int new_mtu)
1982 struct adapter *adapter = dev->priv;
1983 struct port_info *pi = netdev_priv(dev);
1985 if (new_mtu < 81) /* accommodate SACK */
1987 if ((ret = t3_mac_set_mtu(&pi->mac, new_mtu)))
1990 init_port_mtus(adapter);
1991 if (adapter->params.rev == 0 && offload_running(adapter))
1992 t3_load_mtus(adapter, adapter->params.mtus,
1993 adapter->params.a_wnd, adapter->params.b_wnd,
1994 adapter->port[0]->mtu);
1998 static int cxgb_set_mac_addr(struct net_device *dev, void *p)
2000 struct adapter *adapter = dev->priv;
2001 struct port_info *pi = netdev_priv(dev);
2002 struct sockaddr *addr = p;
2004 if (!is_valid_ether_addr(addr->sa_data))
2007 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
2008 t3_mac_set_address(&pi->mac, 0, dev->dev_addr);
2009 if (offload_running(adapter))
2010 write_smt_entry(adapter, pi->port_id);
2015 * t3_synchronize_rx - wait for current Rx processing on a port to complete
2016 * @adap: the adapter
2019 * Ensures that current Rx processing on any of the queues associated with
2020 * the given port completes before returning. We do this by acquiring and
2021 * releasing the locks of the response queues associated with the port.
2023 static void t3_synchronize_rx(struct adapter *adap, const struct port_info *p)
2027 for (i = 0; i < p->nqsets; i++) {
2028 struct sge_rspq *q = &adap->sge.qs[i + p->first_qset].rspq;
2030 spin_lock_irq(&q->lock);
2031 spin_unlock_irq(&q->lock);
2035 static void vlan_rx_register(struct net_device *dev, struct vlan_group *grp)
2037 struct adapter *adapter = dev->priv;
2038 struct port_info *pi = netdev_priv(dev);
2041 if (adapter->params.rev > 0)
2042 t3_set_vlan_accel(adapter, 1 << pi->port_id, grp != NULL);
2044 /* single control for all ports */
2045 unsigned int i, have_vlans = 0;
2046 for_each_port(adapter, i)
2047 have_vlans |= adap2pinfo(adapter, i)->vlan_grp != NULL;
2049 t3_set_vlan_accel(adapter, 1, have_vlans);
2051 t3_synchronize_rx(adapter, pi);
2054 static void vlan_rx_kill_vid(struct net_device *dev, unsigned short vid)
2059 #ifdef CONFIG_NET_POLL_CONTROLLER
2060 static void cxgb_netpoll(struct net_device *dev)
2062 struct adapter *adapter = dev->priv;
2063 struct sge_qset *qs = dev2qset(dev);
2065 t3_intr_handler(adapter, qs->rspq.polling) (adapter->pdev->irq,
2071 * Periodic accumulation of MAC statistics.
2073 static void mac_stats_update(struct adapter *adapter)
2077 for_each_port(adapter, i) {
2078 struct net_device *dev = adapter->port[i];
2079 struct port_info *p = netdev_priv(dev);
2081 if (netif_running(dev)) {
2082 spin_lock(&adapter->stats_lock);
2083 t3_mac_update_stats(&p->mac);
2084 spin_unlock(&adapter->stats_lock);
2089 static void check_link_status(struct adapter *adapter)
2093 for_each_port(adapter, i) {
2094 struct net_device *dev = adapter->port[i];
2095 struct port_info *p = netdev_priv(dev);
2097 if (!(p->port_type->caps & SUPPORTED_IRQ) && netif_running(dev))
2098 t3_link_changed(adapter, i);
2102 static void t3_adap_check_task(struct work_struct *work)
2104 struct adapter *adapter = container_of(work, struct adapter,
2105 adap_check_task.work);
2106 const struct adapter_params *p = &adapter->params;
2108 adapter->check_task_cnt++;
2110 /* Check link status for PHYs without interrupts */
2111 if (p->linkpoll_period)
2112 check_link_status(adapter);
2114 /* Accumulate MAC stats if needed */
2115 if (!p->linkpoll_period ||
2116 (adapter->check_task_cnt * p->linkpoll_period) / 10 >=
2117 p->stats_update_period) {
2118 mac_stats_update(adapter);
2119 adapter->check_task_cnt = 0;
2122 /* Schedule the next check update if any port is active. */
2123 spin_lock(&adapter->work_lock);
2124 if (adapter->open_device_map & PORT_MASK)
2125 schedule_chk_task(adapter);
2126 spin_unlock(&adapter->work_lock);
2130 * Processes external (PHY) interrupts in process context.
2132 static void ext_intr_task(struct work_struct *work)
2134 struct adapter *adapter = container_of(work, struct adapter,
2135 ext_intr_handler_task);
2137 t3_phy_intr_handler(adapter);
2139 /* Now reenable external interrupts */
2140 spin_lock_irq(&adapter->work_lock);
2141 if (adapter->slow_intr_mask) {
2142 adapter->slow_intr_mask |= F_T3DBG;
2143 t3_write_reg(adapter, A_PL_INT_CAUSE0, F_T3DBG);
2144 t3_write_reg(adapter, A_PL_INT_ENABLE0,
2145 adapter->slow_intr_mask);
2147 spin_unlock_irq(&adapter->work_lock);
2151 * Interrupt-context handler for external (PHY) interrupts.
2153 void t3_os_ext_intr_handler(struct adapter *adapter)
2156 * Schedule a task to handle external interrupts as they may be slow
2157 * and we use a mutex to protect MDIO registers. We disable PHY
2158 * interrupts in the meantime and let the task reenable them when
2161 spin_lock(&adapter->work_lock);
2162 if (adapter->slow_intr_mask) {
2163 adapter->slow_intr_mask &= ~F_T3DBG;
2164 t3_write_reg(adapter, A_PL_INT_ENABLE0,
2165 adapter->slow_intr_mask);
2166 queue_work(cxgb3_wq, &adapter->ext_intr_handler_task);
2168 spin_unlock(&adapter->work_lock);
2171 void t3_fatal_err(struct adapter *adapter)
2173 unsigned int fw_status[4];
2175 if (adapter->flags & FULL_INIT_DONE) {
2176 t3_sge_stop(adapter);
2177 t3_intr_disable(adapter);
2179 CH_ALERT(adapter, "encountered fatal error, operation suspended\n");
2180 if (!t3_cim_ctl_blk_read(adapter, 0xa0, 4, fw_status))
2181 CH_ALERT(adapter, "FW status: 0x%x, 0x%x, 0x%x, 0x%x\n",
2182 fw_status[0], fw_status[1],
2183 fw_status[2], fw_status[3]);
2187 static int __devinit cxgb_enable_msix(struct adapter *adap)
2189 struct msix_entry entries[SGE_QSETS + 1];
2192 for (i = 0; i < ARRAY_SIZE(entries); ++i)
2193 entries[i].entry = i;
2195 err = pci_enable_msix(adap->pdev, entries, ARRAY_SIZE(entries));
2197 for (i = 0; i < ARRAY_SIZE(entries); ++i)
2198 adap->msix_info[i].vec = entries[i].vector;
2200 dev_info(&adap->pdev->dev,
2201 "only %d MSI-X vectors left, not using MSI-X\n", err);
2205 static void __devinit print_port_info(struct adapter *adap,
2206 const struct adapter_info *ai)
2208 static const char *pci_variant[] = {
2209 "PCI", "PCI-X", "PCI-X ECC", "PCI-X 266", "PCI Express"
2216 snprintf(buf, sizeof(buf), "%s x%d",
2217 pci_variant[adap->params.pci.variant],
2218 adap->params.pci.width);
2220 snprintf(buf, sizeof(buf), "%s %dMHz/%d-bit",
2221 pci_variant[adap->params.pci.variant],
2222 adap->params.pci.speed, adap->params.pci.width);
2224 for_each_port(adap, i) {
2225 struct net_device *dev = adap->port[i];
2226 const struct port_info *pi = netdev_priv(dev);
2228 if (!test_bit(i, &adap->registered_device_map))
2230 printk(KERN_INFO "%s: %s %s RNIC (rev %d) %s%s\n",
2231 dev->name, ai->desc, pi->port_type->desc,
2232 adap->params.rev, buf,
2233 (adap->flags & USING_MSIX) ? " MSI-X" :
2234 (adap->flags & USING_MSI) ? " MSI" : "");
2235 if (adap->name == dev->name && adap->params.vpd.mclk)
2236 printk(KERN_INFO "%s: %uMB CM, %uMB PMTX, %uMB PMRX\n",
2237 adap->name, t3_mc7_size(&adap->cm) >> 20,
2238 t3_mc7_size(&adap->pmtx) >> 20,
2239 t3_mc7_size(&adap->pmrx) >> 20);
2243 static int __devinit init_one(struct pci_dev *pdev,
2244 const struct pci_device_id *ent)
2246 static int version_printed;
2248 int i, err, pci_using_dac = 0;
2249 unsigned long mmio_start, mmio_len;
2250 const struct adapter_info *ai;
2251 struct adapter *adapter = NULL;
2252 struct port_info *pi;
2254 if (!version_printed) {
2255 printk(KERN_INFO "%s - version %s\n", DRV_DESC, DRV_VERSION);
2260 cxgb3_wq = create_singlethread_workqueue(DRV_NAME);
2262 printk(KERN_ERR DRV_NAME
2263 ": cannot initialize work queue\n");
2268 err = pci_request_regions(pdev, DRV_NAME);
2270 /* Just info, some other driver may have claimed the device. */
2271 dev_info(&pdev->dev, "cannot obtain PCI resources\n");
2275 err = pci_enable_device(pdev);
2277 dev_err(&pdev->dev, "cannot enable PCI device\n");
2278 goto out_release_regions;
2281 if (!pci_set_dma_mask(pdev, DMA_64BIT_MASK)) {
2283 err = pci_set_consistent_dma_mask(pdev, DMA_64BIT_MASK);
2285 dev_err(&pdev->dev, "unable to obtain 64-bit DMA for "
2286 "coherent allocations\n");
2287 goto out_disable_device;
2289 } else if ((err = pci_set_dma_mask(pdev, DMA_32BIT_MASK)) != 0) {
2290 dev_err(&pdev->dev, "no usable DMA configuration\n");
2291 goto out_disable_device;
2294 pci_set_master(pdev);
2296 mmio_start = pci_resource_start(pdev, 0);
2297 mmio_len = pci_resource_len(pdev, 0);
2298 ai = t3_get_adapter_info(ent->driver_data);
2300 adapter = kzalloc(sizeof(*adapter), GFP_KERNEL);
2303 goto out_disable_device;
2306 adapter->regs = ioremap_nocache(mmio_start, mmio_len);
2307 if (!adapter->regs) {
2308 dev_err(&pdev->dev, "cannot map device registers\n");
2310 goto out_free_adapter;
2313 adapter->pdev = pdev;
2314 adapter->name = pci_name(pdev);
2315 adapter->msg_enable = dflt_msg_enable;
2316 adapter->mmio_len = mmio_len;
2318 mutex_init(&adapter->mdio_lock);
2319 spin_lock_init(&adapter->work_lock);
2320 spin_lock_init(&adapter->stats_lock);
2322 INIT_LIST_HEAD(&adapter->adapter_list);
2323 INIT_WORK(&adapter->ext_intr_handler_task, ext_intr_task);
2324 INIT_DELAYED_WORK(&adapter->adap_check_task, t3_adap_check_task);
2326 for (i = 0; i < ai->nports; ++i) {
2327 struct net_device *netdev;
2329 netdev = alloc_etherdev(sizeof(struct port_info));
2335 SET_MODULE_OWNER(netdev);
2336 SET_NETDEV_DEV(netdev, &pdev->dev);
2338 adapter->port[i] = netdev;
2339 pi = netdev_priv(netdev);
2340 pi->rx_csum_offload = 1;
2345 netif_carrier_off(netdev);
2346 netdev->irq = pdev->irq;
2347 netdev->mem_start = mmio_start;
2348 netdev->mem_end = mmio_start + mmio_len - 1;
2349 netdev->priv = adapter;
2350 netdev->features |= NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_TSO;
2351 netdev->features |= NETIF_F_LLTX;
2353 netdev->features |= NETIF_F_HIGHDMA;
2355 netdev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
2356 netdev->vlan_rx_register = vlan_rx_register;
2357 netdev->vlan_rx_kill_vid = vlan_rx_kill_vid;
2359 netdev->open = cxgb_open;
2360 netdev->stop = cxgb_close;
2361 netdev->hard_start_xmit = t3_eth_xmit;
2362 netdev->get_stats = cxgb_get_stats;
2363 netdev->set_multicast_list = cxgb_set_rxmode;
2364 netdev->do_ioctl = cxgb_ioctl;
2365 netdev->change_mtu = cxgb_change_mtu;
2366 netdev->set_mac_address = cxgb_set_mac_addr;
2367 #ifdef CONFIG_NET_POLL_CONTROLLER
2368 netdev->poll_controller = cxgb_netpoll;
2370 netdev->weight = 64;
2372 SET_ETHTOOL_OPS(netdev, &cxgb_ethtool_ops);
2375 pci_set_drvdata(pdev, adapter->port[0]);
2376 if (t3_prep_adapter(adapter, ai, 1) < 0) {
2382 * The card is now ready to go. If any errors occur during device
2383 * registration we do not fail the whole card but rather proceed only
2384 * with the ports we manage to register successfully. However we must
2385 * register at least one net device.
2387 for_each_port(adapter, i) {
2388 err = register_netdev(adapter->port[i]);
2390 dev_warn(&pdev->dev,
2391 "cannot register net device %s, skipping\n",
2392 adapter->port[i]->name);
2395 * Change the name we use for messages to the name of
2396 * the first successfully registered interface.
2398 if (!adapter->registered_device_map)
2399 adapter->name = adapter->port[i]->name;
2401 __set_bit(i, &adapter->registered_device_map);
2404 if (!adapter->registered_device_map) {
2405 dev_err(&pdev->dev, "could not register any net devices\n");
2409 /* Driver's ready. Reflect it on LEDs */
2410 t3_led_ready(adapter);
2412 if (is_offload(adapter)) {
2413 __set_bit(OFFLOAD_DEVMAP_BIT, &adapter->registered_device_map);
2414 cxgb3_adapter_ofld(adapter);
2417 /* See what interrupts we'll be using */
2418 if (msi > 1 && cxgb_enable_msix(adapter) == 0)
2419 adapter->flags |= USING_MSIX;
2420 else if (msi > 0 && pci_enable_msi(pdev) == 0)
2421 adapter->flags |= USING_MSI;
2423 err = sysfs_create_group(&adapter->port[0]->class_dev.kobj,
2426 print_port_info(adapter, ai);
2430 iounmap(adapter->regs);
2431 for (i = ai->nports - 1; i >= 0; --i)
2432 if (adapter->port[i])
2433 free_netdev(adapter->port[i]);
2439 pci_disable_device(pdev);
2440 out_release_regions:
2441 pci_release_regions(pdev);
2442 pci_set_drvdata(pdev, NULL);
2446 static void __devexit remove_one(struct pci_dev *pdev)
2448 struct net_device *dev = pci_get_drvdata(pdev);
2452 struct adapter *adapter = dev->priv;
2454 t3_sge_stop(adapter);
2455 sysfs_remove_group(&adapter->port[0]->class_dev.kobj,
2458 for_each_port(adapter, i)
2459 if (test_bit(i, &adapter->registered_device_map))
2460 unregister_netdev(adapter->port[i]);
2462 if (is_offload(adapter)) {
2463 cxgb3_adapter_unofld(adapter);
2464 if (test_bit(OFFLOAD_DEVMAP_BIT,
2465 &adapter->open_device_map))
2466 offload_close(&adapter->tdev);
2469 t3_free_sge_resources(adapter);
2470 cxgb_disable_msi(adapter);
2472 for (i = 0; i < ARRAY_SIZE(adapter->dummy_netdev); i++)
2473 if (adapter->dummy_netdev[i]) {
2474 free_netdev(adapter->dummy_netdev[i]);
2475 adapter->dummy_netdev[i] = NULL;
2478 for_each_port(adapter, i)
2479 if (adapter->port[i])
2480 free_netdev(adapter->port[i]);
2482 iounmap(adapter->regs);
2484 pci_release_regions(pdev);
2485 pci_disable_device(pdev);
2486 pci_set_drvdata(pdev, NULL);
2490 static struct pci_driver driver = {
2492 .id_table = cxgb3_pci_tbl,
2494 .remove = __devexit_p(remove_one),
2497 static int __init cxgb3_init_module(void)
2501 cxgb3_offload_init();
2503 ret = pci_register_driver(&driver);
2507 static void __exit cxgb3_cleanup_module(void)
2509 pci_unregister_driver(&driver);
2511 destroy_workqueue(cxgb3_wq);
2514 module_init(cxgb3_init_module);
2515 module_exit(cxgb3_cleanup_module);