2 * Copyright (c) 2006-2007 Chelsio, Inc. All rights reserved.
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
33 #include <linux/list.h>
34 #include <net/neighbour.h>
35 #include <linux/notifier.h>
36 #include <asm/atomic.h>
37 #include <linux/proc_fs.h>
38 #include <linux/if_vlan.h>
39 #include <net/netevent.h>
40 #include <linux/highmem.h>
41 #include <linux/vmalloc.h>
45 #include "cxgb3_ioctl.h"
46 #include "cxgb3_ctl_defs.h"
47 #include "cxgb3_defs.h"
49 #include "firmware_exports.h"
50 #include "cxgb3_offload.h"
52 static LIST_HEAD(client_list);
53 static LIST_HEAD(ofld_dev_list);
54 static DEFINE_MUTEX(cxgb3_db_lock);
56 static DEFINE_RWLOCK(adapter_list_lock);
57 static LIST_HEAD(adapter_list);
59 static const unsigned int MAX_ATIDS = 64 * 1024;
60 static const unsigned int ATID_BASE = 0x100000;
62 static inline int offload_activated(struct t3cdev *tdev)
64 const struct adapter *adapter = tdev2adap(tdev);
66 return (test_bit(OFFLOAD_DEVMAP_BIT, &adapter->open_device_map));
70 * cxgb3_register_client - register an offload client
73 * Add the client to the client list,
74 * and call backs the client for each activated offload device
76 void cxgb3_register_client(struct cxgb3_client *client)
80 mutex_lock(&cxgb3_db_lock);
81 list_add_tail(&client->client_list, &client_list);
84 list_for_each_entry(tdev, &ofld_dev_list, ofld_dev_list) {
85 if (offload_activated(tdev))
89 mutex_unlock(&cxgb3_db_lock);
92 EXPORT_SYMBOL(cxgb3_register_client);
95 * cxgb3_unregister_client - unregister an offload client
98 * Remove the client to the client list,
99 * and call backs the client for each activated offload device.
101 void cxgb3_unregister_client(struct cxgb3_client *client)
105 mutex_lock(&cxgb3_db_lock);
106 list_del(&client->client_list);
108 if (client->remove) {
109 list_for_each_entry(tdev, &ofld_dev_list, ofld_dev_list) {
110 if (offload_activated(tdev))
111 client->remove(tdev);
114 mutex_unlock(&cxgb3_db_lock);
117 EXPORT_SYMBOL(cxgb3_unregister_client);
120 * cxgb3_add_clients - activate registered clients for an offload device
121 * @tdev: the offload device
123 * Call backs all registered clients once a offload device is activated
125 void cxgb3_add_clients(struct t3cdev *tdev)
127 struct cxgb3_client *client;
129 mutex_lock(&cxgb3_db_lock);
130 list_for_each_entry(client, &client_list, client_list) {
134 mutex_unlock(&cxgb3_db_lock);
138 * cxgb3_remove_clients - deactivates registered clients
139 * for an offload device
140 * @tdev: the offload device
142 * Call backs all registered clients once a offload device is deactivated
144 void cxgb3_remove_clients(struct t3cdev *tdev)
146 struct cxgb3_client *client;
148 mutex_lock(&cxgb3_db_lock);
149 list_for_each_entry(client, &client_list, client_list) {
151 client->remove(tdev);
153 mutex_unlock(&cxgb3_db_lock);
156 static struct net_device *get_iff_from_mac(struct adapter *adapter,
157 const unsigned char *mac,
162 for_each_port(adapter, i) {
163 const struct vlan_group *grp;
164 struct net_device *dev = adapter->port[i];
165 const struct port_info *p = netdev_priv(dev);
167 if (!memcmp(dev->dev_addr, mac, ETH_ALEN)) {
168 if (vlan && vlan != VLAN_VID_MASK) {
170 dev = grp ? grp->vlan_devices[vlan] : NULL;
180 static int cxgb_ulp_iscsi_ctl(struct adapter *adapter, unsigned int req,
184 struct ulp_iscsi_info *uiip = data;
187 case ULP_ISCSI_GET_PARAMS:
188 uiip->pdev = adapter->pdev;
189 uiip->llimit = t3_read_reg(adapter, A_ULPRX_ISCSI_LLIMIT);
190 uiip->ulimit = t3_read_reg(adapter, A_ULPRX_ISCSI_ULIMIT);
191 uiip->tagmask = t3_read_reg(adapter, A_ULPRX_ISCSI_TAGMASK);
193 * On tx, the iscsi pdu has to be <= tx page size and has to
194 * fit into the Tx PM FIFO.
196 uiip->max_txsz = min(adapter->params.tp.tx_pg_size,
197 t3_read_reg(adapter, A_PM1_TX_CFG) >> 17);
198 /* on rx, the iscsi pdu has to be < rx page size and the
199 whole pdu + cpl headers has to fit into one sge buffer */
200 uiip->max_rxsz = min_t(unsigned int,
201 adapter->params.tp.rx_pg_size,
202 (adapter->sge.qs[0].fl[1].buf_size -
203 sizeof(struct cpl_rx_data) * 2 -
204 sizeof(struct cpl_rx_data_ddp)));
206 case ULP_ISCSI_SET_PARAMS:
207 t3_write_reg(adapter, A_ULPRX_ISCSI_TAGMASK, uiip->tagmask);
215 /* Response queue used for RDMA events. */
216 #define ASYNC_NOTIF_RSPQ 0
218 static int cxgb_rdma_ctl(struct adapter *adapter, unsigned int req, void *data)
223 case RDMA_GET_PARAMS:{
224 struct rdma_info *req = data;
225 struct pci_dev *pdev = adapter->pdev;
227 req->udbell_physbase = pci_resource_start(pdev, 2);
228 req->udbell_len = pci_resource_len(pdev, 2);
230 t3_read_reg(adapter, A_ULPTX_TPT_LLIMIT);
231 req->tpt_top = t3_read_reg(adapter, A_ULPTX_TPT_ULIMIT);
233 t3_read_reg(adapter, A_ULPTX_PBL_LLIMIT);
234 req->pbl_top = t3_read_reg(adapter, A_ULPTX_PBL_ULIMIT);
235 req->rqt_base = t3_read_reg(adapter, A_ULPRX_RQ_LLIMIT);
236 req->rqt_top = t3_read_reg(adapter, A_ULPRX_RQ_ULIMIT);
237 req->kdb_addr = adapter->regs + A_SG_KDOORBELL;
243 struct rdma_cq_op *req = data;
245 /* may be called in any context */
246 spin_lock_irqsave(&adapter->sge.reg_lock, flags);
247 ret = t3_sge_cqcntxt_op(adapter, req->id, req->op,
249 spin_unlock_irqrestore(&adapter->sge.reg_lock, flags);
253 struct ch_mem_range *t = data;
256 if ((t->addr & 7) || (t->len & 7))
258 if (t->mem_id == MEM_CM)
260 else if (t->mem_id == MEM_PMRX)
261 mem = &adapter->pmrx;
262 else if (t->mem_id == MEM_PMTX)
263 mem = &adapter->pmtx;
268 t3_mc7_bd_read(mem, t->addr / 8, t->len / 8,
275 struct rdma_cq_setup *req = data;
277 spin_lock_irq(&adapter->sge.reg_lock);
279 t3_sge_init_cqcntxt(adapter, req->id,
280 req->base_addr, req->size,
282 req->ovfl_mode, req->credits,
284 spin_unlock_irq(&adapter->sge.reg_lock);
287 case RDMA_CQ_DISABLE:
288 spin_lock_irq(&adapter->sge.reg_lock);
289 ret = t3_sge_disable_cqcntxt(adapter, *(unsigned int *)data);
290 spin_unlock_irq(&adapter->sge.reg_lock);
292 case RDMA_CTRL_QP_SETUP:{
293 struct rdma_ctrlqp_setup *req = data;
295 spin_lock_irq(&adapter->sge.reg_lock);
296 ret = t3_sge_init_ecntxt(adapter, FW_RI_SGEEC_START, 0,
299 req->base_addr, req->size,
300 FW_RI_TID_START, 1, 0);
301 spin_unlock_irq(&adapter->sge.reg_lock);
310 static int cxgb_offload_ctl(struct t3cdev *tdev, unsigned int req, void *data)
312 struct adapter *adapter = tdev2adap(tdev);
313 struct tid_range *tid;
315 struct iff_mac *iffmacp;
316 struct ddp_params *ddpp;
317 struct adap_ports *ports;
321 case GET_MAX_OUTSTANDING_WR:
322 *(unsigned int *)data = FW_WR_NUM;
325 *(unsigned int *)data = WR_FLITS;
327 case GET_TX_MAX_CHUNK:
328 *(unsigned int *)data = 1 << 20; /* 1MB */
332 tid->num = t3_mc5_size(&adapter->mc5) -
333 adapter->params.mc5.nroutes -
334 adapter->params.mc5.nfilters - adapter->params.mc5.nservers;
339 tid->num = adapter->params.mc5.nservers;
340 tid->base = t3_mc5_size(&adapter->mc5) - tid->num -
341 adapter->params.mc5.nfilters - adapter->params.mc5.nroutes;
343 case GET_L2T_CAPACITY:
344 *(unsigned int *)data = 2048;
349 mtup->mtus = adapter->params.mtus;
351 case GET_IFF_FROM_MAC:
353 iffmacp->dev = get_iff_from_mac(adapter, iffmacp->mac_addr,
359 ddpp->llimit = t3_read_reg(adapter, A_ULPRX_TDDP_LLIMIT);
360 ddpp->ulimit = t3_read_reg(adapter, A_ULPRX_TDDP_ULIMIT);
361 ddpp->tag_mask = t3_read_reg(adapter, A_ULPRX_TDDP_TAGMASK);
365 ports->nports = adapter->params.nports;
366 for_each_port(adapter, i)
367 ports->lldevs[i] = adapter->port[i];
369 case ULP_ISCSI_GET_PARAMS:
370 case ULP_ISCSI_SET_PARAMS:
371 if (!offload_running(adapter))
373 return cxgb_ulp_iscsi_ctl(adapter, req, data);
374 case RDMA_GET_PARAMS:
377 case RDMA_CQ_DISABLE:
378 case RDMA_CTRL_QP_SETUP:
380 if (!offload_running(adapter))
382 return cxgb_rdma_ctl(adapter, req, data);
390 * Dummy handler for Rx offload packets in case we get an offload packet before
391 * proper processing is setup. This complains and drops the packet as it isn't
392 * normal to get offload packets at this stage.
394 static int rx_offload_blackhole(struct t3cdev *dev, struct sk_buff **skbs,
397 CH_ERR(tdev2adap(dev), "%d unexpected offload packets, first data %u\n",
398 n, ntohl(*(__be32 *)skbs[0]->data));
400 dev_kfree_skb_any(skbs[n]);
404 static void dummy_neigh_update(struct t3cdev *dev, struct neighbour *neigh)
408 void cxgb3_set_dummy_ops(struct t3cdev *dev)
410 dev->recv = rx_offload_blackhole;
411 dev->neigh_update = dummy_neigh_update;
415 * Free an active-open TID.
417 void *cxgb3_free_atid(struct t3cdev *tdev, int atid)
419 struct tid_info *t = &(T3C_DATA(tdev))->tid_maps;
420 union active_open_entry *p = atid2entry(t, atid);
421 void *ctx = p->t3c_tid.ctx;
423 spin_lock_bh(&t->atid_lock);
427 spin_unlock_bh(&t->atid_lock);
432 EXPORT_SYMBOL(cxgb3_free_atid);
435 * Free a server TID and return it to the free pool.
437 void cxgb3_free_stid(struct t3cdev *tdev, int stid)
439 struct tid_info *t = &(T3C_DATA(tdev))->tid_maps;
440 union listen_entry *p = stid2entry(t, stid);
442 spin_lock_bh(&t->stid_lock);
446 spin_unlock_bh(&t->stid_lock);
449 EXPORT_SYMBOL(cxgb3_free_stid);
451 void cxgb3_insert_tid(struct t3cdev *tdev, struct cxgb3_client *client,
452 void *ctx, unsigned int tid)
454 struct tid_info *t = &(T3C_DATA(tdev))->tid_maps;
456 t->tid_tab[tid].client = client;
457 t->tid_tab[tid].ctx = ctx;
458 atomic_inc(&t->tids_in_use);
461 EXPORT_SYMBOL(cxgb3_insert_tid);
464 * Populate a TID_RELEASE WR. The skb must be already propely sized.
466 static inline void mk_tid_release(struct sk_buff *skb, unsigned int tid)
468 struct cpl_tid_release *req;
470 skb->priority = CPL_PRIORITY_SETUP;
471 req = (struct cpl_tid_release *)__skb_put(skb, sizeof(*req));
472 req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
473 OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_TID_RELEASE, tid));
476 static void t3_process_tid_release_list(struct work_struct *work)
478 struct t3c_data *td = container_of(work, struct t3c_data,
481 struct t3cdev *tdev = td->dev;
484 spin_lock_bh(&td->tid_release_lock);
485 while (td->tid_release_list) {
486 struct t3c_tid_entry *p = td->tid_release_list;
488 td->tid_release_list = (struct t3c_tid_entry *)p->ctx;
489 spin_unlock_bh(&td->tid_release_lock);
491 skb = alloc_skb(sizeof(struct cpl_tid_release),
492 GFP_KERNEL | __GFP_NOFAIL);
493 mk_tid_release(skb, p - td->tid_maps.tid_tab);
494 cxgb3_ofld_send(tdev, skb);
496 spin_lock_bh(&td->tid_release_lock);
498 spin_unlock_bh(&td->tid_release_lock);
501 /* use ctx as a next pointer in the tid release list */
502 void cxgb3_queue_tid_release(struct t3cdev *tdev, unsigned int tid)
504 struct t3c_data *td = T3C_DATA(tdev);
505 struct t3c_tid_entry *p = &td->tid_maps.tid_tab[tid];
507 spin_lock_bh(&td->tid_release_lock);
508 p->ctx = (void *)td->tid_release_list;
509 td->tid_release_list = p;
511 schedule_work(&td->tid_release_task);
512 spin_unlock_bh(&td->tid_release_lock);
515 EXPORT_SYMBOL(cxgb3_queue_tid_release);
518 * Remove a tid from the TID table. A client may defer processing its last
519 * CPL message if it is locked at the time it arrives, and while the message
520 * sits in the client's backlog the TID may be reused for another connection.
521 * To handle this we atomically switch the TID association if it still points
522 * to the original client context.
524 void cxgb3_remove_tid(struct t3cdev *tdev, void *ctx, unsigned int tid)
526 struct tid_info *t = &(T3C_DATA(tdev))->tid_maps;
528 BUG_ON(tid >= t->ntids);
529 if (tdev->type == T3A)
530 (void)cmpxchg(&t->tid_tab[tid].ctx, ctx, NULL);
534 skb = alloc_skb(sizeof(struct cpl_tid_release), GFP_ATOMIC);
536 mk_tid_release(skb, tid);
537 cxgb3_ofld_send(tdev, skb);
538 t->tid_tab[tid].ctx = NULL;
540 cxgb3_queue_tid_release(tdev, tid);
542 atomic_dec(&t->tids_in_use);
545 EXPORT_SYMBOL(cxgb3_remove_tid);
547 int cxgb3_alloc_atid(struct t3cdev *tdev, struct cxgb3_client *client,
551 struct tid_info *t = &(T3C_DATA(tdev))->tid_maps;
553 spin_lock_bh(&t->atid_lock);
555 union active_open_entry *p = t->afree;
557 atid = (p - t->atid_tab) + t->atid_base;
559 p->t3c_tid.ctx = ctx;
560 p->t3c_tid.client = client;
563 spin_unlock_bh(&t->atid_lock);
567 EXPORT_SYMBOL(cxgb3_alloc_atid);
569 int cxgb3_alloc_stid(struct t3cdev *tdev, struct cxgb3_client *client,
573 struct tid_info *t = &(T3C_DATA(tdev))->tid_maps;
575 spin_lock_bh(&t->stid_lock);
577 union listen_entry *p = t->sfree;
579 stid = (p - t->stid_tab) + t->stid_base;
581 p->t3c_tid.ctx = ctx;
582 p->t3c_tid.client = client;
585 spin_unlock_bh(&t->stid_lock);
589 EXPORT_SYMBOL(cxgb3_alloc_stid);
591 static int do_smt_write_rpl(struct t3cdev *dev, struct sk_buff *skb)
593 struct cpl_smt_write_rpl *rpl = cplhdr(skb);
595 if (rpl->status != CPL_ERR_NONE)
597 "Unexpected SMT_WRITE_RPL status %u for entry %u\n",
598 rpl->status, GET_TID(rpl));
600 return CPL_RET_BUF_DONE;
603 static int do_l2t_write_rpl(struct t3cdev *dev, struct sk_buff *skb)
605 struct cpl_l2t_write_rpl *rpl = cplhdr(skb);
607 if (rpl->status != CPL_ERR_NONE)
609 "Unexpected L2T_WRITE_RPL status %u for entry %u\n",
610 rpl->status, GET_TID(rpl));
612 return CPL_RET_BUF_DONE;
615 static int do_act_open_rpl(struct t3cdev *dev, struct sk_buff *skb)
617 struct cpl_act_open_rpl *rpl = cplhdr(skb);
618 unsigned int atid = G_TID(ntohl(rpl->atid));
619 struct t3c_tid_entry *t3c_tid;
621 t3c_tid = lookup_atid(&(T3C_DATA(dev))->tid_maps, atid);
622 if (t3c_tid->ctx && t3c_tid->client && t3c_tid->client->handlers &&
623 t3c_tid->client->handlers[CPL_ACT_OPEN_RPL]) {
624 return t3c_tid->client->handlers[CPL_ACT_OPEN_RPL] (dev, skb,
628 printk(KERN_ERR "%s: received clientless CPL command 0x%x\n",
629 dev->name, CPL_ACT_OPEN_RPL);
630 return CPL_RET_BUF_DONE | CPL_RET_BAD_MSG;
634 static int do_stid_rpl(struct t3cdev *dev, struct sk_buff *skb)
636 union opcode_tid *p = cplhdr(skb);
637 unsigned int stid = G_TID(ntohl(p->opcode_tid));
638 struct t3c_tid_entry *t3c_tid;
640 t3c_tid = lookup_stid(&(T3C_DATA(dev))->tid_maps, stid);
641 if (t3c_tid->ctx && t3c_tid->client->handlers &&
642 t3c_tid->client->handlers[p->opcode]) {
643 return t3c_tid->client->handlers[p->opcode] (dev, skb,
646 printk(KERN_ERR "%s: received clientless CPL command 0x%x\n",
647 dev->name, p->opcode);
648 return CPL_RET_BUF_DONE | CPL_RET_BAD_MSG;
652 static int do_hwtid_rpl(struct t3cdev *dev, struct sk_buff *skb)
654 union opcode_tid *p = cplhdr(skb);
655 unsigned int hwtid = G_TID(ntohl(p->opcode_tid));
656 struct t3c_tid_entry *t3c_tid;
658 t3c_tid = lookup_tid(&(T3C_DATA(dev))->tid_maps, hwtid);
659 if (t3c_tid->ctx && t3c_tid->client->handlers &&
660 t3c_tid->client->handlers[p->opcode]) {
661 return t3c_tid->client->handlers[p->opcode]
662 (dev, skb, t3c_tid->ctx);
664 printk(KERN_ERR "%s: received clientless CPL command 0x%x\n",
665 dev->name, p->opcode);
666 return CPL_RET_BUF_DONE | CPL_RET_BAD_MSG;
670 static int do_cr(struct t3cdev *dev, struct sk_buff *skb)
672 struct cpl_pass_accept_req *req = cplhdr(skb);
673 unsigned int stid = G_PASS_OPEN_TID(ntohl(req->tos_tid));
674 struct t3c_tid_entry *t3c_tid;
676 t3c_tid = lookup_stid(&(T3C_DATA(dev))->tid_maps, stid);
677 if (t3c_tid->ctx && t3c_tid->client->handlers &&
678 t3c_tid->client->handlers[CPL_PASS_ACCEPT_REQ]) {
679 return t3c_tid->client->handlers[CPL_PASS_ACCEPT_REQ]
680 (dev, skb, t3c_tid->ctx);
682 printk(KERN_ERR "%s: received clientless CPL command 0x%x\n",
683 dev->name, CPL_PASS_ACCEPT_REQ);
684 return CPL_RET_BUF_DONE | CPL_RET_BAD_MSG;
688 static int do_abort_req_rss(struct t3cdev *dev, struct sk_buff *skb)
690 union opcode_tid *p = cplhdr(skb);
691 unsigned int hwtid = G_TID(ntohl(p->opcode_tid));
692 struct t3c_tid_entry *t3c_tid;
694 t3c_tid = lookup_tid(&(T3C_DATA(dev))->tid_maps, hwtid);
695 if (t3c_tid->ctx && t3c_tid->client->handlers &&
696 t3c_tid->client->handlers[p->opcode]) {
697 return t3c_tid->client->handlers[p->opcode]
698 (dev, skb, t3c_tid->ctx);
700 struct cpl_abort_req_rss *req = cplhdr(skb);
701 struct cpl_abort_rpl *rpl;
703 struct sk_buff *skb =
704 alloc_skb(sizeof(struct cpl_abort_rpl), GFP_ATOMIC);
706 printk("do_abort_req_rss: couldn't get skb!\n");
709 skb->priority = CPL_PRIORITY_DATA;
710 __skb_put(skb, sizeof(struct cpl_abort_rpl));
713 htonl(V_WR_OP(FW_WROPCODE_OFLD_HOST_ABORT_CON_RPL));
714 rpl->wr.wr_lo = htonl(V_WR_TID(GET_TID(req)));
716 htonl(MK_OPCODE_TID(CPL_ABORT_RPL, GET_TID(req)));
717 rpl->cmd = req->status;
718 cxgb3_ofld_send(dev, skb);
720 return CPL_RET_BUF_DONE;
724 static int do_act_establish(struct t3cdev *dev, struct sk_buff *skb)
726 struct cpl_act_establish *req = cplhdr(skb);
727 unsigned int atid = G_PASS_OPEN_TID(ntohl(req->tos_tid));
728 struct t3c_tid_entry *t3c_tid;
730 t3c_tid = lookup_atid(&(T3C_DATA(dev))->tid_maps, atid);
731 if (t3c_tid->ctx && t3c_tid->client->handlers &&
732 t3c_tid->client->handlers[CPL_ACT_ESTABLISH]) {
733 return t3c_tid->client->handlers[CPL_ACT_ESTABLISH]
734 (dev, skb, t3c_tid->ctx);
736 printk(KERN_ERR "%s: received clientless CPL command 0x%x\n",
737 dev->name, CPL_PASS_ACCEPT_REQ);
738 return CPL_RET_BUF_DONE | CPL_RET_BAD_MSG;
742 static int do_set_tcb_rpl(struct t3cdev *dev, struct sk_buff *skb)
744 struct cpl_set_tcb_rpl *rpl = cplhdr(skb);
746 if (rpl->status != CPL_ERR_NONE)
748 "Unexpected SET_TCB_RPL status %u for tid %u\n",
749 rpl->status, GET_TID(rpl));
750 return CPL_RET_BUF_DONE;
753 static int do_trace(struct t3cdev *dev, struct sk_buff *skb)
755 struct cpl_trace_pkt *p = cplhdr(skb);
757 skb->protocol = htons(0xffff);
758 skb->dev = dev->lldev;
759 skb_pull(skb, sizeof(*p));
760 skb->mac.raw = skb->data;
761 netif_receive_skb(skb);
765 static int do_term(struct t3cdev *dev, struct sk_buff *skb)
767 unsigned int hwtid = ntohl(skb->priority) >> 8 & 0xfffff;
768 unsigned int opcode = G_OPCODE(ntohl(skb->csum));
769 struct t3c_tid_entry *t3c_tid;
771 t3c_tid = lookup_tid(&(T3C_DATA(dev))->tid_maps, hwtid);
772 if (t3c_tid->ctx && t3c_tid->client->handlers &&
773 t3c_tid->client->handlers[opcode]) {
774 return t3c_tid->client->handlers[opcode] (dev, skb,
777 printk(KERN_ERR "%s: received clientless CPL command 0x%x\n",
779 return CPL_RET_BUF_DONE | CPL_RET_BAD_MSG;
783 static int nb_callback(struct notifier_block *self, unsigned long event,
787 case (NETEVENT_NEIGH_UPDATE):{
788 cxgb_neigh_update((struct neighbour *)ctx);
791 case (NETEVENT_PMTU_UPDATE):
793 case (NETEVENT_REDIRECT):{
794 struct netevent_redirect *nr = ctx;
795 cxgb_redirect(nr->old, nr->new);
796 cxgb_neigh_update(nr->new->neighbour);
805 static struct notifier_block nb = {
806 .notifier_call = nb_callback
810 * Process a received packet with an unknown/unexpected CPL opcode.
812 static int do_bad_cpl(struct t3cdev *dev, struct sk_buff *skb)
814 printk(KERN_ERR "%s: received bad CPL command 0x%x\n", dev->name,
816 return CPL_RET_BUF_DONE | CPL_RET_BAD_MSG;
820 * Handlers for each CPL opcode
822 static cpl_handler_func cpl_handlers[NUM_CPL_CMDS];
825 * Add a new handler to the CPL dispatch table. A NULL handler may be supplied
826 * to unregister an existing handler.
828 void t3_register_cpl_handler(unsigned int opcode, cpl_handler_func h)
830 if (opcode < NUM_CPL_CMDS)
831 cpl_handlers[opcode] = h ? h : do_bad_cpl;
833 printk(KERN_ERR "T3C: handler registration for "
834 "opcode %x failed\n", opcode);
837 EXPORT_SYMBOL(t3_register_cpl_handler);
840 * T3CDEV's receive method.
842 int process_rx(struct t3cdev *dev, struct sk_buff **skbs, int n)
845 struct sk_buff *skb = *skbs++;
846 unsigned int opcode = G_OPCODE(ntohl(skb->csum));
847 int ret = cpl_handlers[opcode] (dev, skb);
850 if (ret & CPL_RET_UNKNOWN_TID) {
851 union opcode_tid *p = cplhdr(skb);
853 printk(KERN_ERR "%s: CPL message (opcode %u) had "
854 "unknown TID %u\n", dev->name, opcode,
855 G_TID(ntohl(p->opcode_tid)));
858 if (ret & CPL_RET_BUF_DONE)
865 * Sends an sk_buff to a T3C driver after dealing with any active network taps.
867 int cxgb3_ofld_send(struct t3cdev *dev, struct sk_buff *skb)
872 r = dev->send(dev, skb);
877 EXPORT_SYMBOL(cxgb3_ofld_send);
879 static int is_offloading(struct net_device *dev)
881 struct adapter *adapter;
884 read_lock_bh(&adapter_list_lock);
885 list_for_each_entry(adapter, &adapter_list, adapter_list) {
886 for_each_port(adapter, i) {
887 if (dev == adapter->port[i]) {
888 read_unlock_bh(&adapter_list_lock);
893 read_unlock_bh(&adapter_list_lock);
897 void cxgb_neigh_update(struct neighbour *neigh)
899 struct net_device *dev = neigh->dev;
901 if (dev && (is_offloading(dev))) {
902 struct t3cdev *tdev = T3CDEV(dev);
905 t3_l2t_update(tdev, neigh);
909 static void set_l2t_ix(struct t3cdev *tdev, u32 tid, struct l2t_entry *e)
912 struct cpl_set_tcb_field *req;
914 skb = alloc_skb(sizeof(*req), GFP_ATOMIC);
916 printk(KERN_ERR "%s: cannot allocate skb!\n", __FUNCTION__);
919 skb->priority = CPL_PRIORITY_CONTROL;
920 req = (struct cpl_set_tcb_field *)skb_put(skb, sizeof(*req));
921 req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
922 OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_SET_TCB_FIELD, tid));
925 req->word = htons(W_TCB_L2T_IX);
926 req->mask = cpu_to_be64(V_TCB_L2T_IX(M_TCB_L2T_IX));
927 req->val = cpu_to_be64(V_TCB_L2T_IX(e->idx));
928 tdev->send(tdev, skb);
931 void cxgb_redirect(struct dst_entry *old, struct dst_entry *new)
933 struct net_device *olddev, *newdev;
939 struct t3c_tid_entry *te;
941 olddev = old->neighbour->dev;
942 newdev = new->neighbour->dev;
943 if (!is_offloading(olddev))
945 if (!is_offloading(newdev)) {
946 printk(KERN_WARNING "%s: Redirect to non-offload"
947 "device ignored.\n", __FUNCTION__);
950 tdev = T3CDEV(olddev);
952 if (tdev != T3CDEV(newdev)) {
953 printk(KERN_WARNING "%s: Redirect to different "
954 "offload device ignored.\n", __FUNCTION__);
958 /* Add new L2T entry */
959 e = t3_l2t_get(tdev, new->neighbour, newdev);
961 printk(KERN_ERR "%s: couldn't allocate new l2t entry!\n",
966 /* Walk tid table and notify clients of dst change. */
967 ti = &(T3C_DATA(tdev))->tid_maps;
968 for (tid = 0; tid < ti->ntids; tid++) {
969 te = lookup_tid(ti, tid);
971 if (te->ctx && te->client && te->client->redirect) {
972 update_tcb = te->client->redirect(te->ctx, old, new, e);
974 l2t_hold(L2DATA(tdev), e);
975 set_l2t_ix(tdev, tid, e);
979 l2t_release(L2DATA(tdev), e);
983 * Allocate a chunk of memory using kmalloc or, if that fails, vmalloc.
984 * The allocated memory is cleared.
986 void *cxgb_alloc_mem(unsigned long size)
988 void *p = kmalloc(size, GFP_KERNEL);
998 * Free memory allocated through t3_alloc_mem().
1000 void cxgb_free_mem(void *addr)
1002 unsigned long p = (unsigned long)addr;
1004 if (p >= VMALLOC_START && p < VMALLOC_END)
1011 * Allocate and initialize the TID tables. Returns 0 on success.
1013 static int init_tid_tabs(struct tid_info *t, unsigned int ntids,
1014 unsigned int natids, unsigned int nstids,
1015 unsigned int atid_base, unsigned int stid_base)
1017 unsigned long size = ntids * sizeof(*t->tid_tab) +
1018 natids * sizeof(*t->atid_tab) + nstids * sizeof(*t->stid_tab);
1020 t->tid_tab = cxgb_alloc_mem(size);
1024 t->stid_tab = (union listen_entry *)&t->tid_tab[ntids];
1025 t->atid_tab = (union active_open_entry *)&t->stid_tab[nstids];
1028 t->stid_base = stid_base;
1031 t->atid_base = atid_base;
1033 t->stids_in_use = t->atids_in_use = 0;
1034 atomic_set(&t->tids_in_use, 0);
1035 spin_lock_init(&t->stid_lock);
1036 spin_lock_init(&t->atid_lock);
1039 * Setup the free lists for stid_tab and atid_tab.
1043 t->stid_tab[nstids - 1].next = &t->stid_tab[nstids];
1044 t->sfree = t->stid_tab;
1048 t->atid_tab[natids - 1].next = &t->atid_tab[natids];
1049 t->afree = t->atid_tab;
1054 static void free_tid_maps(struct tid_info *t)
1056 cxgb_free_mem(t->tid_tab);
1059 static inline void add_adapter(struct adapter *adap)
1061 write_lock_bh(&adapter_list_lock);
1062 list_add_tail(&adap->adapter_list, &adapter_list);
1063 write_unlock_bh(&adapter_list_lock);
1066 static inline void remove_adapter(struct adapter *adap)
1068 write_lock_bh(&adapter_list_lock);
1069 list_del(&adap->adapter_list);
1070 write_unlock_bh(&adapter_list_lock);
1073 int cxgb3_offload_activate(struct adapter *adapter)
1075 struct t3cdev *dev = &adapter->tdev;
1078 struct tid_range stid_range, tid_range;
1079 struct mtutab mtutab;
1080 unsigned int l2t_capacity;
1082 t = kcalloc(1, sizeof(*t), GFP_KERNEL);
1087 if (dev->ctl(dev, GET_TX_MAX_CHUNK, &t->tx_max_chunk) < 0 ||
1088 dev->ctl(dev, GET_MAX_OUTSTANDING_WR, &t->max_wrs) < 0 ||
1089 dev->ctl(dev, GET_L2T_CAPACITY, &l2t_capacity) < 0 ||
1090 dev->ctl(dev, GET_MTUS, &mtutab) < 0 ||
1091 dev->ctl(dev, GET_TID_RANGE, &tid_range) < 0 ||
1092 dev->ctl(dev, GET_STID_RANGE, &stid_range) < 0)
1096 L2DATA(dev) = t3_init_l2t(l2t_capacity);
1100 natids = min(tid_range.num / 2, MAX_ATIDS);
1101 err = init_tid_tabs(&t->tid_maps, tid_range.num, natids,
1102 stid_range.num, ATID_BASE, stid_range.base);
1106 t->mtus = mtutab.mtus;
1107 t->nmtus = mtutab.size;
1109 INIT_WORK(&t->tid_release_task, t3_process_tid_release_list);
1110 spin_lock_init(&t->tid_release_lock);
1111 INIT_LIST_HEAD(&t->list_node);
1115 dev->recv = process_rx;
1116 dev->neigh_update = t3_l2t_update;
1118 /* Register netevent handler once */
1119 if (list_empty(&adapter_list))
1120 register_netevent_notifier(&nb);
1122 add_adapter(adapter);
1126 t3_free_l2t(L2DATA(dev));
1133 void cxgb3_offload_deactivate(struct adapter *adapter)
1135 struct t3cdev *tdev = &adapter->tdev;
1136 struct t3c_data *t = T3C_DATA(tdev);
1138 remove_adapter(adapter);
1139 if (list_empty(&adapter_list))
1140 unregister_netevent_notifier(&nb);
1142 free_tid_maps(&t->tid_maps);
1143 T3C_DATA(tdev) = NULL;
1144 t3_free_l2t(L2DATA(tdev));
1145 L2DATA(tdev) = NULL;
1149 static inline void register_tdev(struct t3cdev *tdev)
1153 mutex_lock(&cxgb3_db_lock);
1154 snprintf(tdev->name, sizeof(tdev->name), "ofld_dev%d", unit++);
1155 list_add_tail(&tdev->ofld_dev_list, &ofld_dev_list);
1156 mutex_unlock(&cxgb3_db_lock);
1159 static inline void unregister_tdev(struct t3cdev *tdev)
1161 mutex_lock(&cxgb3_db_lock);
1162 list_del(&tdev->ofld_dev_list);
1163 mutex_unlock(&cxgb3_db_lock);
1166 void __devinit cxgb3_adapter_ofld(struct adapter *adapter)
1168 struct t3cdev *tdev = &adapter->tdev;
1170 INIT_LIST_HEAD(&tdev->ofld_dev_list);
1172 cxgb3_set_dummy_ops(tdev);
1173 tdev->send = t3_offload_tx;
1174 tdev->ctl = cxgb_offload_ctl;
1175 tdev->type = adapter->params.rev == 0 ? T3A : T3B;
1177 register_tdev(tdev);
1180 void __devexit cxgb3_adapter_unofld(struct adapter *adapter)
1182 struct t3cdev *tdev = &adapter->tdev;
1185 tdev->neigh_update = NULL;
1187 unregister_tdev(tdev);
1190 void __init cxgb3_offload_init(void)
1194 for (i = 0; i < NUM_CPL_CMDS; ++i)
1195 cpl_handlers[i] = do_bad_cpl;
1197 t3_register_cpl_handler(CPL_SMT_WRITE_RPL, do_smt_write_rpl);
1198 t3_register_cpl_handler(CPL_L2T_WRITE_RPL, do_l2t_write_rpl);
1199 t3_register_cpl_handler(CPL_PASS_OPEN_RPL, do_stid_rpl);
1200 t3_register_cpl_handler(CPL_CLOSE_LISTSRV_RPL, do_stid_rpl);
1201 t3_register_cpl_handler(CPL_PASS_ACCEPT_REQ, do_cr);
1202 t3_register_cpl_handler(CPL_PASS_ESTABLISH, do_hwtid_rpl);
1203 t3_register_cpl_handler(CPL_ABORT_RPL_RSS, do_hwtid_rpl);
1204 t3_register_cpl_handler(CPL_ABORT_RPL, do_hwtid_rpl);
1205 t3_register_cpl_handler(CPL_RX_URG_NOTIFY, do_hwtid_rpl);
1206 t3_register_cpl_handler(CPL_RX_DATA, do_hwtid_rpl);
1207 t3_register_cpl_handler(CPL_TX_DATA_ACK, do_hwtid_rpl);
1208 t3_register_cpl_handler(CPL_TX_DMA_ACK, do_hwtid_rpl);
1209 t3_register_cpl_handler(CPL_ACT_OPEN_RPL, do_act_open_rpl);
1210 t3_register_cpl_handler(CPL_PEER_CLOSE, do_hwtid_rpl);
1211 t3_register_cpl_handler(CPL_CLOSE_CON_RPL, do_hwtid_rpl);
1212 t3_register_cpl_handler(CPL_ABORT_REQ_RSS, do_abort_req_rss);
1213 t3_register_cpl_handler(CPL_ACT_ESTABLISH, do_act_establish);
1214 t3_register_cpl_handler(CPL_SET_TCB_RPL, do_set_tcb_rpl);
1215 t3_register_cpl_handler(CPL_RDMA_TERMINATE, do_term);
1216 t3_register_cpl_handler(CPL_RDMA_EC_STATUS, do_hwtid_rpl);
1217 t3_register_cpl_handler(CPL_TRACE_PKT, do_trace);
1218 t3_register_cpl_handler(CPL_RX_DATA_DDP, do_hwtid_rpl);
1219 t3_register_cpl_handler(CPL_RX_DDP_COMPLETE, do_hwtid_rpl);
1220 t3_register_cpl_handler(CPL_ISCSI_HDR, do_hwtid_rpl);