2 * linux/drivers/s390/net/qeth_tso.c ($Revision: 1.7 $)
4 * Header file for qeth TCP Segmentation Offload support.
6 * Copyright 2004 IBM Corporation
8 * Author(s): Frank Pavlic <pavlic@de.ibm.com>
10 * $Revision: 1.7 $ $Date: 2005/04/01 21:40:41 $
14 #include <linux/skbuff.h>
15 #include <linux/tcp.h>
17 #include <linux/ipv6.h>
18 #include <net/ip6_checksum.h>
24 * skb already partially prepared
25 * classic qdio header in skb->data
27 static inline struct qeth_hdr_tso *
28 qeth_tso_prepare_skb(struct qeth_card *card, struct sk_buff **skb)
32 QETH_DBF_TEXT(trace, 5, "tsoprsk");
33 rc = qeth_realloc_headroom(card, skb,sizeof(struct qeth_hdr_ext_tso));
37 return qeth_push_skb(card, skb, sizeof(struct qeth_hdr_ext_tso));
41 * fill header for a TSO packet
44 qeth_tso_fill_header(struct qeth_card *card, struct sk_buff *skb)
46 struct qeth_hdr_tso *hdr;
50 QETH_DBF_TEXT(trace, 5, "tsofhdr");
52 hdr = (struct qeth_hdr_tso *) skb->data;
55 /*fix header to TSO values ...*/
56 hdr->hdr.hdr.l3.id = QETH_HEADER_TYPE_TSO;
57 /*set values which are fix for the first approach ...*/
58 hdr->ext.hdr_tot_len = (__u16) sizeof(struct qeth_hdr_ext_tso);
59 hdr->ext.imb_hdr_no = 1;
60 hdr->ext.hdr_type = 1;
61 hdr->ext.hdr_version = 1;
62 hdr->ext.hdr_len = 28;
63 /*insert non-fix values */
64 hdr->ext.mss = skb_shinfo(skb)->tso_size;
65 hdr->ext.dg_hdr_len = (__u16)(iph->ihl*4 + tcph->doff*4);
66 hdr->ext.payload_len = (__u16)(skb->len - hdr->ext.dg_hdr_len -
67 sizeof(struct qeth_hdr_tso));
71 * change some header values as requested by hardware
74 qeth_tso_set_tcpip_header(struct qeth_card *card, struct sk_buff *skb)
85 if (skb->protocol == ETH_P_IPV6) {
86 ip6h->payload_len = 0;
87 tcph->check = ~csum_ipv6_magic(&ip6h->saddr, &ip6h->daddr,
91 /*OSA want us to set these values ...*/
92 tcph->check = ~csum_tcpudp_magic(iph->saddr, iph->daddr,
98 static inline struct qeth_hdr_tso *
99 qeth_tso_prepare_packet(struct qeth_card *card, struct sk_buff *skb,
100 int ipv, int cast_type)
102 struct qeth_hdr_tso *hdr;
105 QETH_DBF_TEXT(trace, 5, "tsoprep");
107 /*get headroom for tso qdio header */
108 hdr = (struct qeth_hdr_tso *) qeth_tso_prepare_skb(card, &skb);
110 QETH_DBF_TEXT_(trace, 4, "2err%d", rc);
113 memset(hdr, 0, sizeof(struct qeth_hdr_tso));
114 /*fill first 32 bytes of qdio header as used
115 *FIXME: TSO has two struct members
116 * with different names but same size
118 qeth_fill_header(card, &hdr->hdr, skb, ipv, cast_type);
119 qeth_tso_fill_header(card, skb);
120 qeth_tso_set_tcpip_header(card, skb);
125 qeth_tso_get_queue_buffer(struct qeth_qdio_out_q *queue)
127 struct qeth_qdio_out_buffer *buffer;
130 QETH_DBF_TEXT(trace, 5, "tsobuf");
132 /* force to non-packing*/
135 buffer = &queue->bufs[queue->next_buf_to_fill];
136 /* get a new buffer if current is already in use*/
137 if ((atomic_read(&buffer->state) == QETH_QDIO_BUF_EMPTY) &&
138 (buffer->next_element_to_fill > 0)) {
139 atomic_set(&buffer->state, QETH_QDIO_BUF_PRIMED);
140 queue->next_buf_to_fill = (queue->next_buf_to_fill + 1) %
141 QDIO_MAX_BUFFERS_PER_Q;
149 qeth_tso_fill_buffer(struct qeth_qdio_out_buffer *buf,
152 int length, length_here, element;
154 struct qdio_buffer *buffer;
155 struct qeth_hdr_tso *hdr;
158 QETH_DBF_TEXT(trace, 3, "tsfilbuf");
160 /*increment user count and queue skb ...*/
161 atomic_inc(&skb->users);
162 skb_queue_tail(&buf->skb_list, skb);
164 /*initialize all variables...*/
165 buffer = buf->buffer;
166 hdr = (struct qeth_hdr_tso *)skb->data;
167 hdr_len = sizeof(struct qeth_hdr_tso) + hdr->ext.dg_hdr_len;
168 data = skb->data + hdr_len;
169 length = skb->len - hdr_len;
170 element = buf->next_element_to_fill;
171 /*fill first buffer entry only with header information */
172 buffer->element[element].addr = skb->data;
173 buffer->element[element].length = hdr_len;
174 buffer->element[element].flags = SBAL_FLAGS_FIRST_FRAG;
175 buf->next_element_to_fill++;
176 /*check if we have frags ...*/
177 if (skb_shinfo(skb)->nr_frags > 0) {
180 __qeth_fill_buffer_frag(skb, buffer,1,
181 (int *)&buf->next_element_to_fill);
185 /*... if not, use this */
188 /* length_here is the remaining amount of data in this page */
189 length_here = PAGE_SIZE - ((unsigned long) data % PAGE_SIZE);
190 if (length < length_here)
191 length_here = length;
192 buffer->element[element].addr = data;
193 buffer->element[element].length = length_here;
194 length -= length_here;
196 buffer->element[element].flags =
197 SBAL_FLAGS_LAST_FRAG;
199 buffer->element[element].flags =
200 SBAL_FLAGS_MIDDLE_FRAG;
204 buf->next_element_to_fill = element;
206 /*prime buffer now ...*/
207 atomic_set(&buf->state, QETH_QDIO_BUF_PRIMED);
212 qeth_tso_send_packet(struct qeth_card *card, struct sk_buff *skb,
213 struct qeth_qdio_out_q *queue, int ipv, int cast_type)
216 struct qeth_hdr_tso *hdr;
217 struct qeth_qdio_out_buffer *buffer;
220 QETH_DBF_TEXT(trace, 3, "tsosend");
222 if (!(hdr = qeth_tso_prepare_packet(card, skb, ipv, cast_type)))
224 /*check if skb fits in one SBAL ...*/
225 if (!(qeth_get_elements_no(card, (void*)hdr, skb)))
227 /*lock queue, force switching to non-packing and send it ...*/
228 while (atomic_compare_and_swap(QETH_OUT_Q_UNLOCKED,
231 start_index = queue->next_buf_to_fill;
232 buffer = &queue->bufs[queue->next_buf_to_fill];
233 /*check if card is too busy ...*/
234 if (atomic_read(&buffer->state) != QETH_QDIO_BUF_EMPTY){
235 card->stats.tx_dropped++;
238 /*let's force to non-packing and get a new SBAL*/
239 flush_cnt += qeth_tso_get_queue_buffer(queue);
240 buffer = &queue->bufs[queue->next_buf_to_fill];
241 if (atomic_read(&buffer->state) != QETH_QDIO_BUF_EMPTY) {
242 card->stats.tx_dropped++;
245 flush_cnt += qeth_tso_fill_buffer(buffer, skb);
246 queue->next_buf_to_fill = (queue->next_buf_to_fill + 1) %
247 QDIO_MAX_BUFFERS_PER_Q;
249 atomic_set(&queue->state, QETH_OUT_Q_UNLOCKED);
251 qeth_flush_buffers(queue, 0, start_index, flush_cnt);
252 /*do some statistics */
253 card->stats.tx_packets++;
254 card->stats.tx_bytes += skb->len;