2 * linux/drivers/s390/net/qeth_tso.c ($Revision: 1.6 $)
4 * Header file for qeth TCP Segmentation Offload support.
6 * Copyright 2004 IBM Corporation
8 * Author(s): Frank Pavlic <pavlic@de.ibm.com>
10 * $Revision: 1.6 $ $Date: 2005/03/24 09:04:18 $
14 #include <linux/skbuff.h>
15 #include <linux/tcp.h>
17 #include <linux/ipv6.h>
18 #include <net/ip6_checksum.h>
24 * skb already partially prepared
25 * classic qdio header in skb->data
27 static inline struct qeth_hdr_tso *
28 qeth_tso_prepare_skb(struct qeth_card *card, struct sk_buff **skb)
32 QETH_DBF_TEXT(trace, 5, "tsoprsk");
33 rc = qeth_realloc_headroom(card, skb,sizeof(struct qeth_hdr_ext_tso));
37 return qeth_push_skb(card, skb, sizeof(struct qeth_hdr_ext_tso));
41 * fill header for a TSO packet
44 qeth_tso_fill_header(struct qeth_card *card, struct sk_buff *skb)
46 struct qeth_hdr_tso *hdr;
50 QETH_DBF_TEXT(trace, 5, "tsofhdr");
52 hdr = (struct qeth_hdr_tso *) skb->data;
55 /*fix header to TSO values ...*/
56 hdr->hdr.hdr.l3.id = QETH_HEADER_TYPE_TSO;
57 /*set values which are fix for the first approach ...*/
58 hdr->ext.hdr_tot_len = (__u16) sizeof(struct qeth_hdr_ext_tso);
59 hdr->ext.imb_hdr_no = 1;
60 hdr->ext.hdr_type = 1;
61 hdr->ext.hdr_version = 1;
62 hdr->ext.hdr_len = 28;
63 /*insert non-fix values */
64 hdr->ext.mss = skb_shinfo(skb)->tso_size;
65 hdr->ext.dg_hdr_len = (__u16)(iph->ihl*4 + tcph->doff*4);
66 hdr->ext.payload_len = (__u16)(skb->len - hdr->ext.dg_hdr_len -
67 sizeof(struct qeth_hdr_tso));
71 * change some header values as requested by hardware
74 qeth_tso_set_tcpip_header(struct qeth_card *card, struct sk_buff *skb)
85 if (skb->protocol == ETH_P_IPV6) {
86 ip6h->payload_len = 0;
87 tcph->check = ~csum_ipv6_magic(&ip6h->saddr, &ip6h->daddr,
91 /*OSA want us to set these values ...*/
92 tcph->check = ~csum_tcpudp_magic(iph->saddr, iph->daddr,
98 static inline struct qeth_hdr_tso *
99 qeth_tso_prepare_packet(struct qeth_card *card, struct sk_buff *skb,
100 int ipv, int cast_type)
102 struct qeth_hdr_tso *hdr;
105 QETH_DBF_TEXT(trace, 5, "tsoprep");
107 /*get headroom for tso qdio header */
108 hdr = (struct qeth_hdr_tso *) qeth_tso_prepare_skb(card, &skb);
110 QETH_DBF_TEXT_(trace, 4, "2err%d", rc);
113 memset(hdr, 0, sizeof(struct qeth_hdr_tso));
114 /*fill first 32 bytes of qdio header as used
115 *FIXME: TSO has two struct members
116 * with different names but same size
118 qeth_fill_header(card, &hdr->hdr, skb, ipv, cast_type);
119 qeth_tso_fill_header(card, skb);
120 qeth_tso_set_tcpip_header(card, skb);
125 qeth_tso_get_queue_buffer(struct qeth_qdio_out_q *queue)
127 struct qeth_qdio_out_buffer *buffer;
130 QETH_DBF_TEXT(trace, 5, "tsobuf");
132 /* force to non-packing*/
135 buffer = &queue->bufs[queue->next_buf_to_fill];
136 /* get a new buffer if current is already in use*/
137 if ((atomic_read(&buffer->state) == QETH_QDIO_BUF_EMPTY) &&
138 (buffer->next_element_to_fill > 0)) {
139 atomic_set(&buffer->state, QETH_QDIO_BUF_PRIMED);
140 queue->next_buf_to_fill = (queue->next_buf_to_fill + 1) %
141 QDIO_MAX_BUFFERS_PER_Q;
148 __qeth_tso_fill_buffer_frag(struct qeth_qdio_out_buffer *buf,
151 struct skb_frag_struct *frag;
152 struct qdio_buffer *buffer;
153 int fragno, cnt, element;
156 QETH_DBF_TEXT(trace, 6, "tsfilfrg");
158 /*initialize variables ...*/
159 fragno = skb_shinfo(skb)->nr_frags;
160 buffer = buf->buffer;
161 element = buf->next_element_to_fill;
162 /*fill buffer elements .....*/
163 for (cnt = 0; cnt < fragno; cnt++) {
164 frag = &skb_shinfo(skb)->frags[cnt];
165 addr = (page_to_pfn(frag->page) << PAGE_SHIFT) +
167 buffer->element[element].addr = (char *)addr;
168 buffer->element[element].length = frag->size;
169 if (cnt < (fragno - 1))
170 buffer->element[element].flags =
171 SBAL_FLAGS_MIDDLE_FRAG;
173 buffer->element[element].flags =
174 SBAL_FLAGS_LAST_FRAG;
177 buf->next_element_to_fill = element;
181 qeth_tso_fill_buffer(struct qeth_qdio_out_buffer *buf,
184 int length, length_here, element;
186 struct qdio_buffer *buffer;
187 struct qeth_hdr_tso *hdr;
190 QETH_DBF_TEXT(trace, 3, "tsfilbuf");
192 /*increment user count and queue skb ...*/
193 atomic_inc(&skb->users);
194 skb_queue_tail(&buf->skb_list, skb);
196 /*initialize all variables...*/
197 buffer = buf->buffer;
198 hdr = (struct qeth_hdr_tso *)skb->data;
199 hdr_len = sizeof(struct qeth_hdr_tso) + hdr->ext.dg_hdr_len;
200 data = skb->data + hdr_len;
201 length = skb->len - hdr_len;
202 element = buf->next_element_to_fill;
203 /*fill first buffer entry only with header information */
204 buffer->element[element].addr = skb->data;
205 buffer->element[element].length = hdr_len;
206 buffer->element[element].flags = SBAL_FLAGS_FIRST_FRAG;
207 buf->next_element_to_fill++;
209 if (skb_shinfo(skb)->nr_frags > 0) {
210 __qeth_tso_fill_buffer_frag(buf, skb);
214 /*start filling buffer entries ...*/
217 /* length_here is the remaining amount of data in this page */
218 length_here = PAGE_SIZE - ((unsigned long) data % PAGE_SIZE);
219 if (length < length_here)
220 length_here = length;
221 buffer->element[element].addr = data;
222 buffer->element[element].length = length_here;
223 length -= length_here;
225 buffer->element[element].flags =
226 SBAL_FLAGS_LAST_FRAG;
228 buffer->element[element].flags =
229 SBAL_FLAGS_MIDDLE_FRAG;
233 /*set the buffer to primed ...*/
234 buf->next_element_to_fill = element;
236 atomic_set(&buf->state, QETH_QDIO_BUF_PRIMED);
241 qeth_tso_send_packet(struct qeth_card *card, struct sk_buff *skb,
242 struct qeth_qdio_out_q *queue, int ipv, int cast_type)
245 struct qeth_hdr_tso *hdr;
246 struct qeth_qdio_out_buffer *buffer;
249 QETH_DBF_TEXT(trace, 3, "tsosend");
251 if (!(hdr = qeth_tso_prepare_packet(card, skb, ipv, cast_type)))
253 /*check if skb fits in one SBAL ...*/
254 if (!(qeth_get_elements_no(card, (void*)hdr, skb)))
256 /*lock queue, force switching to non-packing and send it ...*/
257 while (atomic_compare_and_swap(QETH_OUT_Q_UNLOCKED,
260 start_index = queue->next_buf_to_fill;
261 buffer = &queue->bufs[queue->next_buf_to_fill];
262 /*check if card is too busy ...*/
263 if (atomic_read(&buffer->state) != QETH_QDIO_BUF_EMPTY){
264 card->stats.tx_dropped++;
267 /*let's force to non-packing and get a new SBAL*/
268 flush_cnt += qeth_tso_get_queue_buffer(queue);
269 buffer = &queue->bufs[queue->next_buf_to_fill];
270 if (atomic_read(&buffer->state) != QETH_QDIO_BUF_EMPTY) {
271 card->stats.tx_dropped++;
274 flush_cnt += qeth_tso_fill_buffer(buffer, skb);
275 queue->next_buf_to_fill = (queue->next_buf_to_fill + 1) %
276 QDIO_MAX_BUFFERS_PER_Q;
278 atomic_set(&queue->state, QETH_OUT_Q_UNLOCKED);
280 qeth_flush_buffers(queue, 0, start_index, flush_cnt);
281 /*do some statistics */
282 card->stats.tx_packets++;
283 card->stats.tx_bytes += skb->len;