]> git.gir.st - tmk_keyboard.git/blob - tmk_core/tool/mbed/mbed-sdk/libraries/net/lwip/lwip/core/tcp_out.c
Merge commit '1fe4406f374291ab2e86e95a97341fd9c475fcb8'
[tmk_keyboard.git] / tmk_core / tool / mbed / mbed-sdk / libraries / net / lwip / lwip / core / tcp_out.c
1 /**
2 * @file
3 * Transmission Control Protocol, outgoing traffic
4 *
5 * The output functions of TCP.
6 *
7 */
8
9 /*
10 * Copyright (c) 2001-2004 Swedish Institute of Computer Science.
11 * All rights reserved.
12 *
13 * Redistribution and use in source and binary forms, with or without modification,
14 * are permitted provided that the following conditions are met:
15 *
16 * 1. Redistributions of source code must retain the above copyright notice,
17 * this list of conditions and the following disclaimer.
18 * 2. Redistributions in binary form must reproduce the above copyright notice,
19 * this list of conditions and the following disclaimer in the documentation
20 * and/or other materials provided with the distribution.
21 * 3. The name of the author may not be used to endorse or promote products
22 * derived from this software without specific prior written permission.
23 *
24 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED
25 * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
26 * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT
27 * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
28 * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT
29 * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
30 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
31 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
32 * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY
33 * OF SUCH DAMAGE.
34 *
35 * This file is part of the lwIP TCP/IP stack.
36 *
37 * Author: Adam Dunkels <adam@sics.se>
38 *
39 */
40
41 #include "lwip/opt.h"
42
43 #if LWIP_TCP /* don't build if not configured for use in lwipopts.h */
44
45 #include "lwip/tcp_impl.h"
46 #include "lwip/def.h"
47 #include "lwip/mem.h"
48 #include "lwip/memp.h"
49 #include "lwip/sys.h"
50 #include "lwip/ip_addr.h"
51 #include "lwip/netif.h"
52 #include "lwip/inet_chksum.h"
53 #include "lwip/stats.h"
54 #include "lwip/snmp.h"
55
56 #include <string.h>
57
58 /* Define some copy-macros for checksum-on-copy so that the code looks
59 nicer by preventing too many ifdef's. */
60 #if TCP_CHECKSUM_ON_COPY
61 #define TCP_DATA_COPY(dst, src, len, seg) do { \
62 tcp_seg_add_chksum(LWIP_CHKSUM_COPY(dst, src, len), \
63 len, &seg->chksum, &seg->chksum_swapped); \
64 seg->flags |= TF_SEG_DATA_CHECKSUMMED; } while(0)
65 #define TCP_DATA_COPY2(dst, src, len, chksum, chksum_swapped) \
66 tcp_seg_add_chksum(LWIP_CHKSUM_COPY(dst, src, len), len, chksum, chksum_swapped);
67 #else /* TCP_CHECKSUM_ON_COPY*/
68 #define TCP_DATA_COPY(dst, src, len, seg) MEMCPY(dst, src, len)
69 #define TCP_DATA_COPY2(dst, src, len, chksum, chksum_swapped) MEMCPY(dst, src, len)
70 #endif /* TCP_CHECKSUM_ON_COPY*/
71
72 /** Define this to 1 for an extra check that the output checksum is valid
73 * (usefule when the checksum is generated by the application, not the stack) */
74 #ifndef TCP_CHECKSUM_ON_COPY_SANITY_CHECK
75 #define TCP_CHECKSUM_ON_COPY_SANITY_CHECK 0
76 #endif
77
78 /* Forward declarations.*/
79 static void tcp_output_segment(struct tcp_seg *seg, struct tcp_pcb *pcb);
80
81 /** Allocate a pbuf and create a tcphdr at p->payload, used for output
82 * functions other than the default tcp_output -> tcp_output_segment
83 * (e.g. tcp_send_empty_ack, etc.)
84 *
85 * @param pcb tcp pcb for which to send a packet (used to initialize tcp_hdr)
86 * @param optlen length of header-options
87 * @param datalen length of tcp data to reserve in pbuf
88 * @param seqno_be seqno in network byte order (big-endian)
89 * @return pbuf with p->payload being the tcp_hdr
90 */
91 static struct pbuf *
92 tcp_output_alloc_header(struct tcp_pcb *pcb, u16_t optlen, u16_t datalen,
93 u32_t seqno_be /* already in network byte order */)
94 {
95 struct tcp_hdr *tcphdr;
96 struct pbuf *p = pbuf_alloc(PBUF_IP, TCP_HLEN + optlen + datalen, PBUF_RAM);
97 if (p != NULL) {
98 LWIP_ASSERT("check that first pbuf can hold struct tcp_hdr",
99 (p->len >= TCP_HLEN + optlen));
100 tcphdr = (struct tcp_hdr *)p->payload;
101 tcphdr->src = htons(pcb->local_port);
102 tcphdr->dest = htons(pcb->remote_port);
103 tcphdr->seqno = seqno_be;
104 tcphdr->ackno = htonl(pcb->rcv_nxt);
105 TCPH_HDRLEN_FLAGS_SET(tcphdr, (5 + optlen / 4), TCP_ACK);
106 tcphdr->wnd = htons(pcb->rcv_ann_wnd);
107 tcphdr->chksum = 0;
108 tcphdr->urgp = 0;
109
110 /* If we're sending a packet, update the announced right window edge */
111 pcb->rcv_ann_right_edge = pcb->rcv_nxt + pcb->rcv_ann_wnd;
112 }
113 return p;
114 }
115
116 /**
117 * Called by tcp_close() to send a segment including FIN flag but not data.
118 *
119 * @param pcb the tcp_pcb over which to send a segment
120 * @return ERR_OK if sent, another err_t otherwise
121 */
122 err_t
123 tcp_send_fin(struct tcp_pcb *pcb)
124 {
125 /* first, try to add the fin to the last unsent segment */
126 if (pcb->unsent != NULL) {
127 struct tcp_seg *last_unsent;
128 for (last_unsent = pcb->unsent; last_unsent->next != NULL;
129 last_unsent = last_unsent->next);
130
131 if ((TCPH_FLAGS(last_unsent->tcphdr) & (TCP_SYN | TCP_FIN | TCP_RST)) == 0) {
132 /* no SYN/FIN/RST flag in the header, we can add the FIN flag */
133 TCPH_SET_FLAG(last_unsent->tcphdr, TCP_FIN);
134 return ERR_OK;
135 }
136 }
137 /* no data, no length, flags, copy=1, no optdata */
138 return tcp_enqueue_flags(pcb, TCP_FIN);
139 }
140
141 /**
142 * Create a TCP segment with prefilled header.
143 *
144 * Called by tcp_write and tcp_enqueue_flags.
145 *
146 * @param pcb Protocol control block for the TCP connection.
147 * @param p pbuf that is used to hold the TCP header.
148 * @param flags TCP flags for header.
149 * @param seqno TCP sequence number of this packet
150 * @param optflags options to include in TCP header
151 * @return a new tcp_seg pointing to p, or NULL.
152 * The TCP header is filled in except ackno and wnd.
153 * p is freed on failure.
154 */
155 static struct tcp_seg *
156 tcp_create_segment(struct tcp_pcb *pcb, struct pbuf *p, u8_t flags, u32_t seqno, u8_t optflags)
157 {
158 struct tcp_seg *seg;
159 u8_t optlen = LWIP_TCP_OPT_LENGTH(optflags);
160
161 if ((seg = (struct tcp_seg *)memp_malloc(MEMP_TCP_SEG)) == NULL) {
162 LWIP_DEBUGF(TCP_OUTPUT_DEBUG | 2, ("tcp_create_segment: no memory.\n"));
163 pbuf_free(p);
164 return NULL;
165 }
166 seg->flags = optflags;
167 seg->next = NULL;
168 seg->p = p;
169 seg->len = p->tot_len - optlen;
170 #if TCP_OVERSIZE_DBGCHECK
171 seg->oversize_left = 0;
172 #endif /* TCP_OVERSIZE_DBGCHECK */
173 #if TCP_CHECKSUM_ON_COPY
174 seg->chksum = 0;
175 seg->chksum_swapped = 0;
176 /* check optflags */
177 LWIP_ASSERT("invalid optflags passed: TF_SEG_DATA_CHECKSUMMED",
178 (optflags & TF_SEG_DATA_CHECKSUMMED) == 0);
179 #endif /* TCP_CHECKSUM_ON_COPY */
180
181 /* build TCP header */
182 if (pbuf_header(p, TCP_HLEN)) {
183 LWIP_DEBUGF(TCP_OUTPUT_DEBUG | 2, ("tcp_create_segment: no room for TCP header in pbuf.\n"));
184 TCP_STATS_INC(tcp.err);
185 tcp_seg_free(seg);
186 return NULL;
187 }
188 seg->tcphdr = (struct tcp_hdr *)seg->p->payload;
189 seg->tcphdr->src = htons(pcb->local_port);
190 seg->tcphdr->dest = htons(pcb->remote_port);
191 seg->tcphdr->seqno = htonl(seqno);
192 /* ackno is set in tcp_output */
193 TCPH_HDRLEN_FLAGS_SET(seg->tcphdr, (5 + optlen / 4), flags);
194 /* wnd and chksum are set in tcp_output */
195 seg->tcphdr->urgp = 0;
196 return seg;
197 }
198
199 /**
200 * Allocate a PBUF_RAM pbuf, perhaps with extra space at the end.
201 *
202 * This function is like pbuf_alloc(layer, length, PBUF_RAM) except
203 * there may be extra bytes available at the end.
204 *
205 * @param layer flag to define header size.
206 * @param length size of the pbuf's payload.
207 * @param max_length maximum usable size of payload+oversize.
208 * @param oversize pointer to a u16_t that will receive the number of usable tail bytes.
209 * @param pcb The TCP connection that willo enqueue the pbuf.
210 * @param apiflags API flags given to tcp_write.
211 * @param first_seg true when this pbuf will be used in the first enqueued segment.
212 * @param
213 */
214 #if TCP_OVERSIZE
215 static struct pbuf *
216 tcp_pbuf_prealloc(pbuf_layer layer, u16_t length, u16_t max_length,
217 u16_t *oversize, struct tcp_pcb *pcb, u8_t apiflags,
218 u8_t first_seg)
219 {
220 struct pbuf *p;
221 u16_t alloc = length;
222
223 #if LWIP_NETIF_TX_SINGLE_PBUF
224 LWIP_UNUSED_ARG(max_length);
225 LWIP_UNUSED_ARG(pcb);
226 LWIP_UNUSED_ARG(apiflags);
227 LWIP_UNUSED_ARG(first_seg);
228 /* always create MSS-sized pbufs */
229 alloc = TCP_MSS;
230 #else /* LWIP_NETIF_TX_SINGLE_PBUF */
231 if (length < max_length) {
232 /* Should we allocate an oversized pbuf, or just the minimum
233 * length required? If tcp_write is going to be called again
234 * before this segment is transmitted, we want the oversized
235 * buffer. If the segment will be transmitted immediately, we can
236 * save memory by allocating only length. We use a simple
237 * heuristic based on the following information:
238 *
239 * Did the user set TCP_WRITE_FLAG_MORE?
240 *
241 * Will the Nagle algorithm defer transmission of this segment?
242 */
243 if ((apiflags & TCP_WRITE_FLAG_MORE) ||
244 (!(pcb->flags & TF_NODELAY) &&
245 (!first_seg ||
246 pcb->unsent != NULL ||
247 pcb->unacked != NULL))) {
248 alloc = LWIP_MIN(max_length, LWIP_MEM_ALIGN_SIZE(length + TCP_OVERSIZE));
249 }
250 }
251 #endif /* LWIP_NETIF_TX_SINGLE_PBUF */
252 p = pbuf_alloc(layer, alloc, PBUF_RAM);
253 if (p == NULL) {
254 return NULL;
255 }
256 LWIP_ASSERT("need unchained pbuf", p->next == NULL);
257 *oversize = p->len - length;
258 /* trim p->len to the currently used size */
259 p->len = p->tot_len = length;
260 return p;
261 }
262 #else /* TCP_OVERSIZE */
263 #define tcp_pbuf_prealloc(layer, length, mx, os, pcb, api, fst) pbuf_alloc((layer), (length), PBUF_RAM)
264 #endif /* TCP_OVERSIZE */
265
266 #if TCP_CHECKSUM_ON_COPY
267 /** Add a checksum of newly added data to the segment */
268 static void
269 tcp_seg_add_chksum(u16_t chksum, u16_t len, u16_t *seg_chksum,
270 u8_t *seg_chksum_swapped)
271 {
272 u32_t helper;
273 /* add chksum to old chksum and fold to u16_t */
274 helper = chksum + *seg_chksum;
275 chksum = FOLD_U32T(helper);
276 if ((len & 1) != 0) {
277 *seg_chksum_swapped = 1 - *seg_chksum_swapped;
278 chksum = SWAP_BYTES_IN_WORD(chksum);
279 }
280 *seg_chksum = chksum;
281 }
282 #endif /* TCP_CHECKSUM_ON_COPY */
283
284 /** Checks if tcp_write is allowed or not (checks state, snd_buf and snd_queuelen).
285 *
286 * @param pcb the tcp pcb to check for
287 * @param len length of data to send (checked agains snd_buf)
288 * @return ERR_OK if tcp_write is allowed to proceed, another err_t otherwise
289 */
290 static err_t
291 tcp_write_checks(struct tcp_pcb *pcb, u16_t len)
292 {
293 /* connection is in invalid state for data transmission? */
294 if ((pcb->state != ESTABLISHED) &&
295 (pcb->state != CLOSE_WAIT) &&
296 (pcb->state != SYN_SENT) &&
297 (pcb->state != SYN_RCVD)) {
298 LWIP_DEBUGF(TCP_OUTPUT_DEBUG | LWIP_DBG_STATE | LWIP_DBG_LEVEL_SEVERE, ("tcp_write() called in invalid state\n"));
299 return ERR_CONN;
300 } else if (len == 0) {
301 return ERR_OK;
302 }
303
304 /* fail on too much data */
305 if (len > pcb->snd_buf) {
306 LWIP_DEBUGF(TCP_OUTPUT_DEBUG | 3, ("tcp_write: too much data (len=%"U16_F" > snd_buf=%"U16_F")\n",
307 len, pcb->snd_buf));
308 pcb->flags |= TF_NAGLEMEMERR;
309 return ERR_MEM;
310 }
311
312 LWIP_DEBUGF(TCP_QLEN_DEBUG, ("tcp_write: queuelen: %"U16_F"\n", (u16_t)pcb->snd_queuelen));
313
314 /* If total number of pbufs on the unsent/unacked queues exceeds the
315 * configured maximum, return an error */
316 /* check for configured max queuelen and possible overflow */
317 if ((pcb->snd_queuelen >= TCP_SND_QUEUELEN) || (pcb->snd_queuelen > TCP_SNDQUEUELEN_OVERFLOW)) {
318 LWIP_DEBUGF(TCP_OUTPUT_DEBUG | 3, ("tcp_write: too long queue %"U16_F" (max %"U16_F")\n",
319 pcb->snd_queuelen, TCP_SND_QUEUELEN));
320 TCP_STATS_INC(tcp.memerr);
321 pcb->flags |= TF_NAGLEMEMERR;
322 return ERR_MEM;
323 }
324 if (pcb->snd_queuelen != 0) {
325 LWIP_ASSERT("tcp_write: pbufs on queue => at least one queue non-empty",
326 pcb->unacked != NULL || pcb->unsent != NULL);
327 } else {
328 LWIP_ASSERT("tcp_write: no pbufs on queue => both queues empty",
329 pcb->unacked == NULL && pcb->unsent == NULL);
330 }
331 return ERR_OK;
332 }
333
334 /**
335 * Write data for sending (but does not send it immediately).
336 *
337 * It waits in the expectation of more data being sent soon (as
338 * it can send them more efficiently by combining them together).
339 * To prompt the system to send data now, call tcp_output() after
340 * calling tcp_write().
341 *
342 * @param pcb Protocol control block for the TCP connection to enqueue data for.
343 * @param arg Pointer to the data to be enqueued for sending.
344 * @param len Data length in bytes
345 * @param apiflags combination of following flags :
346 * - TCP_WRITE_FLAG_COPY (0x01) data will be copied into memory belonging to the stack
347 * - TCP_WRITE_FLAG_MORE (0x02) for TCP connection, PSH flag will be set on last segment sent,
348 * @return ERR_OK if enqueued, another err_t on error
349 */
350 err_t
351 tcp_write(struct tcp_pcb *pcb, const void *arg, u16_t len, u8_t apiflags)
352 {
353 struct pbuf *concat_p = NULL;
354 struct tcp_seg *last_unsent = NULL, *seg = NULL, *prev_seg = NULL, *queue = NULL;
355 u16_t pos = 0; /* position in 'arg' data */
356 u16_t queuelen;
357 u8_t optlen = 0;
358 u8_t optflags = 0;
359 #if TCP_OVERSIZE
360 u16_t oversize = 0;
361 u16_t oversize_used = 0;
362 #endif /* TCP_OVERSIZE */
363 #if TCP_CHECKSUM_ON_COPY
364 u16_t concat_chksum = 0;
365 u8_t concat_chksum_swapped = 0;
366 u16_t concat_chksummed = 0;
367 #endif /* TCP_CHECKSUM_ON_COPY */
368 err_t err;
369
370 #if LWIP_NETIF_TX_SINGLE_PBUF
371 /* Always copy to try to create single pbufs for TX */
372 apiflags |= TCP_WRITE_FLAG_COPY;
373 #endif /* LWIP_NETIF_TX_SINGLE_PBUF */
374
375 LWIP_DEBUGF(TCP_OUTPUT_DEBUG, ("tcp_write(pcb=%p, data=%p, len=%"U16_F", apiflags=%"U16_F")\n",
376 (void *)pcb, arg, len, (u16_t)apiflags));
377 LWIP_ERROR("tcp_write: arg == NULL (programmer violates API)",
378 arg != NULL, return ERR_ARG;);
379
380 err = tcp_write_checks(pcb, len);
381 if (err != ERR_OK) {
382 return err;
383 }
384 queuelen = pcb->snd_queuelen;
385
386 #if LWIP_TCP_TIMESTAMPS
387 if ((pcb->flags & TF_TIMESTAMP)) {
388 optflags = TF_SEG_OPTS_TS;
389 optlen = LWIP_TCP_OPT_LENGTH(TF_SEG_OPTS_TS);
390 }
391 #endif /* LWIP_TCP_TIMESTAMPS */
392
393
394 /*
395 * TCP segmentation is done in three phases with increasing complexity:
396 *
397 * 1. Copy data directly into an oversized pbuf.
398 * 2. Chain a new pbuf to the end of pcb->unsent.
399 * 3. Create new segments.
400 *
401 * We may run out of memory at any point. In that case we must
402 * return ERR_MEM and not change anything in pcb. Therefore, all
403 * changes are recorded in local variables and committed at the end
404 * of the function. Some pcb fields are maintained in local copies:
405 *
406 * queuelen = pcb->snd_queuelen
407 * oversize = pcb->unsent_oversize
408 *
409 * These variables are set consistently by the phases:
410 *
411 * seg points to the last segment tampered with.
412 *
413 * pos records progress as data is segmented.
414 */
415
416 /* Find the tail of the unsent queue. */
417 if (pcb->unsent != NULL) {
418 u16_t space;
419 u16_t unsent_optlen;
420
421 /* @todo: this could be sped up by keeping last_unsent in the pcb */
422 for (last_unsent = pcb->unsent; last_unsent->next != NULL;
423 last_unsent = last_unsent->next);
424
425 /* Usable space at the end of the last unsent segment */
426 unsent_optlen = LWIP_TCP_OPT_LENGTH(last_unsent->flags);
427 space = pcb->mss - (last_unsent->len + unsent_optlen);
428
429 /*
430 * Phase 1: Copy data directly into an oversized pbuf.
431 *
432 * The number of bytes copied is recorded in the oversize_used
433 * variable. The actual copying is done at the bottom of the
434 * function.
435 */
436 #if TCP_OVERSIZE
437 #if TCP_OVERSIZE_DBGCHECK
438 /* check that pcb->unsent_oversize matches last_unsent->unsent_oversize */
439 LWIP_ASSERT("unsent_oversize mismatch (pcb vs. last_unsent)",
440 pcb->unsent_oversize == last_unsent->oversize_left);
441 #endif /* TCP_OVERSIZE_DBGCHECK */
442 oversize = pcb->unsent_oversize;
443 if (oversize > 0) {
444 LWIP_ASSERT("inconsistent oversize vs. space", oversize_used <= space);
445 seg = last_unsent;
446 oversize_used = oversize < len ? oversize : len;
447 pos += oversize_used;
448 oversize -= oversize_used;
449 space -= oversize_used;
450 }
451 /* now we are either finished or oversize is zero */
452 LWIP_ASSERT("inconsistend oversize vs. len", (oversize == 0) || (pos == len));
453 #endif /* TCP_OVERSIZE */
454
455 /*
456 * Phase 2: Chain a new pbuf to the end of pcb->unsent.
457 *
458 * We don't extend segments containing SYN/FIN flags or options
459 * (len==0). The new pbuf is kept in concat_p and pbuf_cat'ed at
460 * the end.
461 */
462 if ((pos < len) && (space > 0) && (last_unsent->len > 0)) {
463 u16_t seglen = space < len - pos ? space : len - pos;
464 seg = last_unsent;
465
466 /* Create a pbuf with a copy or reference to seglen bytes. We
467 * can use PBUF_RAW here since the data appears in the middle of
468 * a segment. A header will never be prepended. */
469 if (apiflags & TCP_WRITE_FLAG_COPY) {
470 /* Data is copied */
471 if ((concat_p = tcp_pbuf_prealloc(PBUF_RAW, seglen, space, &oversize, pcb, apiflags, 1)) == NULL) {
472 LWIP_DEBUGF(TCP_OUTPUT_DEBUG | 2,
473 ("tcp_write : could not allocate memory for pbuf copy size %"U16_F"\n",
474 seglen));
475 goto memerr;
476 }
477 #if TCP_OVERSIZE_DBGCHECK
478 last_unsent->oversize_left = oversize;
479 #endif /* TCP_OVERSIZE_DBGCHECK */
480 TCP_DATA_COPY2(concat_p->payload, (u8_t*)arg + pos, seglen, &concat_chksum, &concat_chksum_swapped);
481 #if TCP_CHECKSUM_ON_COPY
482 concat_chksummed += seglen;
483 #endif /* TCP_CHECKSUM_ON_COPY */
484 } else {
485 /* Data is not copied */
486 if ((concat_p = pbuf_alloc(PBUF_RAW, seglen, PBUF_ROM)) == NULL) {
487 LWIP_DEBUGF(TCP_OUTPUT_DEBUG | 2,
488 ("tcp_write: could not allocate memory for zero-copy pbuf\n"));
489 goto memerr;
490 }
491 #if TCP_CHECKSUM_ON_COPY
492 /* calculate the checksum of nocopy-data */
493 tcp_seg_add_chksum(~inet_chksum((u8_t*)arg + pos, seglen), seglen,
494 &concat_chksum, &concat_chksum_swapped);
495 concat_chksummed += seglen;
496 #endif /* TCP_CHECKSUM_ON_COPY */
497 /* reference the non-volatile payload data */
498 concat_p->payload = (u8_t*)arg + pos;
499 }
500
501 pos += seglen;
502 queuelen += pbuf_clen(concat_p);
503 }
504 } else {
505 #if TCP_OVERSIZE
506 LWIP_ASSERT("unsent_oversize mismatch (pcb->unsent is NULL)",
507 pcb->unsent_oversize == 0);
508 #endif /* TCP_OVERSIZE */
509 }
510
511 /*
512 * Phase 3: Create new segments.
513 *
514 * The new segments are chained together in the local 'queue'
515 * variable, ready to be appended to pcb->unsent.
516 */
517 while (pos < len) {
518 struct pbuf *p;
519 u16_t left = len - pos;
520 u16_t max_len = pcb->mss - optlen;
521 u16_t seglen = left > max_len ? max_len : left;
522 #if TCP_CHECKSUM_ON_COPY
523 u16_t chksum = 0;
524 u8_t chksum_swapped = 0;
525 #endif /* TCP_CHECKSUM_ON_COPY */
526
527 if (apiflags & TCP_WRITE_FLAG_COPY) {
528 /* If copy is set, memory should be allocated and data copied
529 * into pbuf */
530 if ((p = tcp_pbuf_prealloc(PBUF_TRANSPORT, seglen + optlen, pcb->mss, &oversize, pcb, apiflags, queue == NULL)) == NULL) {
531 LWIP_DEBUGF(TCP_OUTPUT_DEBUG | 2, ("tcp_write : could not allocate memory for pbuf copy size %"U16_F"\n", seglen));
532 goto memerr;
533 }
534 LWIP_ASSERT("tcp_write: check that first pbuf can hold the complete seglen",
535 (p->len >= seglen));
536 TCP_DATA_COPY2((char *)p->payload + optlen, (u8_t*)arg + pos, seglen, &chksum, &chksum_swapped);
537 } else {
538 /* Copy is not set: First allocate a pbuf for holding the data.
539 * Since the referenced data is available at least until it is
540 * sent out on the link (as it has to be ACKed by the remote
541 * party) we can safely use PBUF_ROM instead of PBUF_REF here.
542 */
543 struct pbuf *p2;
544 #if TCP_OVERSIZE
545 LWIP_ASSERT("oversize == 0", oversize == 0);
546 #endif /* TCP_OVERSIZE */
547 if ((p2 = pbuf_alloc(PBUF_TRANSPORT, seglen, PBUF_ROM)) == NULL) {
548 LWIP_DEBUGF(TCP_OUTPUT_DEBUG | 2, ("tcp_write: could not allocate memory for zero-copy pbuf\n"));
549 goto memerr;
550 }
551 #if TCP_CHECKSUM_ON_COPY
552 /* calculate the checksum of nocopy-data */
553 chksum = ~inet_chksum((u8_t*)arg + pos, seglen);
554 #endif /* TCP_CHECKSUM_ON_COPY */
555 /* reference the non-volatile payload data */
556 p2->payload = (u8_t*)arg + pos;
557
558 /* Second, allocate a pbuf for the headers. */
559 if ((p = pbuf_alloc(PBUF_TRANSPORT, optlen, PBUF_RAM)) == NULL) {
560 /* If allocation fails, we have to deallocate the data pbuf as
561 * well. */
562 pbuf_free(p2);
563 LWIP_DEBUGF(TCP_OUTPUT_DEBUG | 2, ("tcp_write: could not allocate memory for header pbuf\n"));
564 goto memerr;
565 }
566 /* Concatenate the headers and data pbufs together. */
567 pbuf_cat(p/*header*/, p2/*data*/);
568 }
569
570 queuelen += pbuf_clen(p);
571
572 /* Now that there are more segments queued, we check again if the
573 * length of the queue exceeds the configured maximum or
574 * overflows. */
575 if ((queuelen > TCP_SND_QUEUELEN) || (queuelen > TCP_SNDQUEUELEN_OVERFLOW)) {
576 LWIP_DEBUGF(TCP_OUTPUT_DEBUG | 2, ("tcp_write: queue too long %"U16_F" (%"U16_F")\n", queuelen, TCP_SND_QUEUELEN));
577 pbuf_free(p);
578 goto memerr;
579 }
580
581 if ((seg = tcp_create_segment(pcb, p, 0, pcb->snd_lbb + pos, optflags)) == NULL) {
582 goto memerr;
583 }
584 #if TCP_OVERSIZE_DBGCHECK
585 seg->oversize_left = oversize;
586 #endif /* TCP_OVERSIZE_DBGCHECK */
587 #if TCP_CHECKSUM_ON_COPY
588 seg->chksum = chksum;
589 seg->chksum_swapped = chksum_swapped;
590 seg->flags |= TF_SEG_DATA_CHECKSUMMED;
591 #endif /* TCP_CHECKSUM_ON_COPY */
592
593 /* first segment of to-be-queued data? */
594 if (queue == NULL) {
595 queue = seg;
596 } else {
597 /* Attach the segment to the end of the queued segments */
598 LWIP_ASSERT("prev_seg != NULL", prev_seg != NULL);
599 prev_seg->next = seg;
600 }
601 /* remember last segment of to-be-queued data for next iteration */
602 prev_seg = seg;
603
604 LWIP_DEBUGF(TCP_OUTPUT_DEBUG | LWIP_DBG_TRACE, ("tcp_write: queueing %"U32_F":%"U32_F"\n",
605 ntohl(seg->tcphdr->seqno),
606 ntohl(seg->tcphdr->seqno) + TCP_TCPLEN(seg)));
607
608 pos += seglen;
609 }
610
611 /*
612 * All three segmentation phases were successful. We can commit the
613 * transaction.
614 */
615
616 /*
617 * Phase 1: If data has been added to the preallocated tail of
618 * last_unsent, we update the length fields of the pbuf chain.
619 */
620 #if TCP_OVERSIZE
621 if (oversize_used > 0) {
622 struct pbuf *p;
623 /* Bump tot_len of whole chain, len of tail */
624 for (p = last_unsent->p; p; p = p->next) {
625 p->tot_len += oversize_used;
626 if (p->next == NULL) {
627 TCP_DATA_COPY((char *)p->payload + p->len, arg, oversize_used, last_unsent);
628 p->len += oversize_used;
629 }
630 }
631 last_unsent->len += oversize_used;
632 #if TCP_OVERSIZE_DBGCHECK
633 last_unsent->oversize_left -= oversize_used;
634 #endif /* TCP_OVERSIZE_DBGCHECK */
635 }
636 pcb->unsent_oversize = oversize;
637 #endif /* TCP_OVERSIZE */
638
639 /*
640 * Phase 2: concat_p can be concatenated onto last_unsent->p
641 */
642 if (concat_p != NULL) {
643 LWIP_ASSERT("tcp_write: cannot concatenate when pcb->unsent is empty",
644 (last_unsent != NULL));
645 pbuf_cat(last_unsent->p, concat_p);
646 last_unsent->len += concat_p->tot_len;
647 #if TCP_CHECKSUM_ON_COPY
648 if (concat_chksummed) {
649 if (concat_chksum_swapped) {
650 concat_chksum = SWAP_BYTES_IN_WORD(concat_chksum);
651 }
652 tcp_seg_add_chksum(concat_chksum, concat_chksummed, &last_unsent->chksum,
653 &last_unsent->chksum_swapped);
654 last_unsent->flags |= TF_SEG_DATA_CHECKSUMMED;
655 }
656 #endif /* TCP_CHECKSUM_ON_COPY */
657 }
658
659 /*
660 * Phase 3: Append queue to pcb->unsent. Queue may be NULL, but that
661 * is harmless
662 */
663 if (last_unsent == NULL) {
664 pcb->unsent = queue;
665 } else {
666 last_unsent->next = queue;
667 }
668
669 /*
670 * Finally update the pcb state.
671 */
672 pcb->snd_lbb += len;
673 pcb->snd_buf -= len;
674 pcb->snd_queuelen = queuelen;
675
676 LWIP_DEBUGF(TCP_QLEN_DEBUG, ("tcp_write: %"S16_F" (after enqueued)\n",
677 pcb->snd_queuelen));
678 if (pcb->snd_queuelen != 0) {
679 LWIP_ASSERT("tcp_write: valid queue length",
680 pcb->unacked != NULL || pcb->unsent != NULL);
681 }
682
683 /* Set the PSH flag in the last segment that we enqueued. */
684 if (seg != NULL && seg->tcphdr != NULL && ((apiflags & TCP_WRITE_FLAG_MORE)==0)) {
685 TCPH_SET_FLAG(seg->tcphdr, TCP_PSH);
686 }
687
688 return ERR_OK;
689 memerr:
690 pcb->flags |= TF_NAGLEMEMERR;
691 TCP_STATS_INC(tcp.memerr);
692
693 if (concat_p != NULL) {
694 pbuf_free(concat_p);
695 }
696 if (queue != NULL) {
697 tcp_segs_free(queue);
698 }
699 if (pcb->snd_queuelen != 0) {
700 LWIP_ASSERT("tcp_write: valid queue length", pcb->unacked != NULL ||
701 pcb->unsent != NULL);
702 }
703 LWIP_DEBUGF(TCP_QLEN_DEBUG | LWIP_DBG_STATE, ("tcp_write: %"S16_F" (with mem err)\n", pcb->snd_queuelen));
704 return ERR_MEM;
705 }
706
707 /**
708 * Enqueue TCP options for transmission.
709 *
710 * Called by tcp_connect(), tcp_listen_input(), and tcp_send_ctrl().
711 *
712 * @param pcb Protocol control block for the TCP connection.
713 * @param flags TCP header flags to set in the outgoing segment.
714 * @param optdata pointer to TCP options, or NULL.
715 * @param optlen length of TCP options in bytes.
716 */
717 err_t
718 tcp_enqueue_flags(struct tcp_pcb *pcb, u8_t flags)
719 {
720 struct pbuf *p;
721 struct tcp_seg *seg;
722 u8_t optflags = 0;
723 u8_t optlen = 0;
724
725 LWIP_DEBUGF(TCP_QLEN_DEBUG, ("tcp_enqueue_flags: queuelen: %"U16_F"\n", (u16_t)pcb->snd_queuelen));
726
727 LWIP_ASSERT("tcp_enqueue_flags: need either TCP_SYN or TCP_FIN in flags (programmer violates API)",
728 (flags & (TCP_SYN | TCP_FIN)) != 0);
729
730 /* check for configured max queuelen and possible overflow */
731 if ((pcb->snd_queuelen >= TCP_SND_QUEUELEN) || (pcb->snd_queuelen > TCP_SNDQUEUELEN_OVERFLOW)) {
732 LWIP_DEBUGF(TCP_OUTPUT_DEBUG | 3, ("tcp_enqueue_flags: too long queue %"U16_F" (max %"U16_F")\n",
733 pcb->snd_queuelen, TCP_SND_QUEUELEN));
734 TCP_STATS_INC(tcp.memerr);
735 pcb->flags |= TF_NAGLEMEMERR;
736 return ERR_MEM;
737 }
738
739 if (flags & TCP_SYN) {
740 optflags = TF_SEG_OPTS_MSS;
741 }
742 #if LWIP_TCP_TIMESTAMPS
743 if ((pcb->flags & TF_TIMESTAMP)) {
744 optflags |= TF_SEG_OPTS_TS;
745 }
746 #endif /* LWIP_TCP_TIMESTAMPS */
747 optlen = LWIP_TCP_OPT_LENGTH(optflags);
748
749 /* tcp_enqueue_flags is always called with either SYN or FIN in flags.
750 * We need one available snd_buf byte to do that.
751 * This means we can't send FIN while snd_buf==0. A better fix would be to
752 * not include SYN and FIN sequence numbers in the snd_buf count. */
753 if (pcb->snd_buf == 0) {
754 LWIP_DEBUGF(TCP_OUTPUT_DEBUG | 3, ("tcp_enqueue_flags: no send buffer available\n"));
755 TCP_STATS_INC(tcp.memerr);
756 return ERR_MEM;
757 }
758
759 /* Allocate pbuf with room for TCP header + options */
760 if ((p = pbuf_alloc(PBUF_TRANSPORT, optlen, PBUF_RAM)) == NULL) {
761 pcb->flags |= TF_NAGLEMEMERR;
762 TCP_STATS_INC(tcp.memerr);
763 return ERR_MEM;
764 }
765 LWIP_ASSERT("tcp_enqueue_flags: check that first pbuf can hold optlen",
766 (p->len >= optlen));
767
768 /* Allocate memory for tcp_seg, and fill in fields. */
769 if ((seg = tcp_create_segment(pcb, p, flags, pcb->snd_lbb, optflags)) == NULL) {
770 pcb->flags |= TF_NAGLEMEMERR;
771 TCP_STATS_INC(tcp.memerr);
772 return ERR_MEM;
773 }
774 LWIP_ASSERT("seg->tcphdr not aligned", ((mem_ptr_t)seg->tcphdr % MEM_ALIGNMENT) == 0);
775 LWIP_ASSERT("tcp_enqueue_flags: invalid segment length", seg->len == 0);
776
777 LWIP_DEBUGF(TCP_OUTPUT_DEBUG | LWIP_DBG_TRACE,
778 ("tcp_enqueue_flags: queueing %"U32_F":%"U32_F" (0x%"X16_F")\n",
779 ntohl(seg->tcphdr->seqno),
780 ntohl(seg->tcphdr->seqno) + TCP_TCPLEN(seg),
781 (u16_t)flags));
782
783 /* Now append seg to pcb->unsent queue */
784 if (pcb->unsent == NULL) {
785 pcb->unsent = seg;
786 } else {
787 struct tcp_seg *useg;
788 for (useg = pcb->unsent; useg->next != NULL; useg = useg->next);
789 useg->next = seg;
790 }
791 #if TCP_OVERSIZE
792 /* The new unsent tail has no space */
793 pcb->unsent_oversize = 0;
794 #endif /* TCP_OVERSIZE */
795
796 /* SYN and FIN bump the sequence number */
797 if ((flags & TCP_SYN) || (flags & TCP_FIN)) {
798 pcb->snd_lbb++;
799 /* optlen does not influence snd_buf */
800 pcb->snd_buf--;
801 }
802 if (flags & TCP_FIN) {
803 pcb->flags |= TF_FIN;
804 }
805
806 /* update number of segments on the queues */
807 pcb->snd_queuelen += pbuf_clen(seg->p);
808 LWIP_DEBUGF(TCP_QLEN_DEBUG, ("tcp_enqueue_flags: %"S16_F" (after enqueued)\n", pcb->snd_queuelen));
809 if (pcb->snd_queuelen != 0) {
810 LWIP_ASSERT("tcp_enqueue_flags: invalid queue length",
811 pcb->unacked != NULL || pcb->unsent != NULL);
812 }
813
814 return ERR_OK;
815 }
816
817
818 #if LWIP_TCP_TIMESTAMPS
819 /* Build a timestamp option (12 bytes long) at the specified options pointer)
820 *
821 * @param pcb tcp_pcb
822 * @param opts option pointer where to store the timestamp option
823 */
824 static void
825 tcp_build_timestamp_option(struct tcp_pcb *pcb, u32_t *opts)
826 {
827 /* Pad with two NOP options to make everything nicely aligned */
828 opts[0] = PP_HTONL(0x0101080A);
829 opts[1] = htonl(sys_now());
830 opts[2] = htonl(pcb->ts_recent);
831 }
832 #endif
833
834 /** Send an ACK without data.
835 *
836 * @param pcb Protocol control block for the TCP connection to send the ACK
837 */
838 err_t
839 tcp_send_empty_ack(struct tcp_pcb *pcb)
840 {
841 struct pbuf *p;
842 struct tcp_hdr *tcphdr;
843 u8_t optlen = 0;
844
845 #if LWIP_TCP_TIMESTAMPS
846 if (pcb->flags & TF_TIMESTAMP) {
847 optlen = LWIP_TCP_OPT_LENGTH(TF_SEG_OPTS_TS);
848 }
849 #endif
850
851 p = tcp_output_alloc_header(pcb, optlen, 0, htonl(pcb->snd_nxt));
852 if (p == NULL) {
853 LWIP_DEBUGF(TCP_OUTPUT_DEBUG, ("tcp_output: (ACK) could not allocate pbuf\n"));
854 return ERR_BUF;
855 }
856 tcphdr = (struct tcp_hdr *)p->payload;
857 LWIP_DEBUGF(TCP_OUTPUT_DEBUG,
858 ("tcp_output: sending ACK for %"U32_F"\n", pcb->rcv_nxt));
859 /* remove ACK flags from the PCB, as we send an empty ACK now */
860 pcb->flags &= ~(TF_ACK_DELAY | TF_ACK_NOW);
861
862 /* NB. MSS option is only sent on SYNs, so ignore it here */
863 #if LWIP_TCP_TIMESTAMPS
864 pcb->ts_lastacksent = pcb->rcv_nxt;
865
866 if (pcb->flags & TF_TIMESTAMP) {
867 tcp_build_timestamp_option(pcb, (u32_t *)(tcphdr + 1));
868 }
869 #endif
870
871 #if CHECKSUM_GEN_TCP
872 tcphdr->chksum = inet_chksum_pseudo(p, &(pcb->local_ip), &(pcb->remote_ip),
873 IP_PROTO_TCP, p->tot_len);
874 #endif
875 #if LWIP_NETIF_HWADDRHINT
876 ip_output_hinted(p, &(pcb->local_ip), &(pcb->remote_ip), pcb->ttl, pcb->tos,
877 IP_PROTO_TCP, &(pcb->addr_hint));
878 #else /* LWIP_NETIF_HWADDRHINT*/
879 ip_output(p, &(pcb->local_ip), &(pcb->remote_ip), pcb->ttl, pcb->tos,
880 IP_PROTO_TCP);
881 #endif /* LWIP_NETIF_HWADDRHINT*/
882 pbuf_free(p);
883
884 return ERR_OK;
885 }
886
887 /**
888 * Find out what we can send and send it
889 *
890 * @param pcb Protocol control block for the TCP connection to send data
891 * @return ERR_OK if data has been sent or nothing to send
892 * another err_t on error
893 */
894 err_t
895 tcp_output(struct tcp_pcb *pcb)
896 {
897 struct tcp_seg *seg, *useg;
898 u32_t wnd, snd_nxt;
899 #if TCP_CWND_DEBUG
900 s16_t i = 0;
901 #endif /* TCP_CWND_DEBUG */
902
903 /* First, check if we are invoked by the TCP input processing
904 code. If so, we do not output anything. Instead, we rely on the
905 input processing code to call us when input processing is done
906 with. */
907 if (tcp_input_pcb == pcb) {
908 return ERR_OK;
909 }
910
911 wnd = LWIP_MIN(pcb->snd_wnd, pcb->cwnd);
912
913 seg = pcb->unsent;
914
915 /* If the TF_ACK_NOW flag is set and no data will be sent (either
916 * because the ->unsent queue is empty or because the window does
917 * not allow it), construct an empty ACK segment and send it.
918 *
919 * If data is to be sent, we will just piggyback the ACK (see below).
920 */
921 if (pcb->flags & TF_ACK_NOW &&
922 (seg == NULL ||
923 ntohl(seg->tcphdr->seqno) - pcb->lastack + seg->len > wnd)) {
924 return tcp_send_empty_ack(pcb);
925 }
926
927 /* useg should point to last segment on unacked queue */
928 useg = pcb->unacked;
929 if (useg != NULL) {
930 for (; useg->next != NULL; useg = useg->next);
931 }
932
933 #if TCP_OUTPUT_DEBUG
934 if (seg == NULL) {
935 LWIP_DEBUGF(TCP_OUTPUT_DEBUG, ("tcp_output: nothing to send (%p)\n",
936 (void*)pcb->unsent));
937 }
938 #endif /* TCP_OUTPUT_DEBUG */
939 #if TCP_CWND_DEBUG
940 if (seg == NULL) {
941 LWIP_DEBUGF(TCP_CWND_DEBUG, ("tcp_output: snd_wnd %"U16_F
942 ", cwnd %"U16_F", wnd %"U32_F
943 ", seg == NULL, ack %"U32_F"\n",
944 pcb->snd_wnd, pcb->cwnd, wnd, pcb->lastack));
945 } else {
946 LWIP_DEBUGF(TCP_CWND_DEBUG,
947 ("tcp_output: snd_wnd %"U16_F", cwnd %"U16_F", wnd %"U32_F
948 ", effwnd %"U32_F", seq %"U32_F", ack %"U32_F"\n",
949 pcb->snd_wnd, pcb->cwnd, wnd,
950 ntohl(seg->tcphdr->seqno) - pcb->lastack + seg->len,
951 ntohl(seg->tcphdr->seqno), pcb->lastack));
952 }
953 #endif /* TCP_CWND_DEBUG */
954 /* data available and window allows it to be sent? */
955 while (seg != NULL &&
956 ntohl(seg->tcphdr->seqno) - pcb->lastack + seg->len <= wnd) {
957 LWIP_ASSERT("RST not expected here!",
958 (TCPH_FLAGS(seg->tcphdr) & TCP_RST) == 0);
959 /* Stop sending if the nagle algorithm would prevent it
960 * Don't stop:
961 * - if tcp_write had a memory error before (prevent delayed ACK timeout) or
962 * - if FIN was already enqueued for this PCB (SYN is always alone in a segment -
963 * either seg->next != NULL or pcb->unacked == NULL;
964 * RST is no sent using tcp_write/tcp_output.
965 */
966 if((tcp_do_output_nagle(pcb) == 0) &&
967 ((pcb->flags & (TF_NAGLEMEMERR | TF_FIN)) == 0)){
968 break;
969 }
970 #if TCP_CWND_DEBUG
971 LWIP_DEBUGF(TCP_CWND_DEBUG, ("tcp_output: snd_wnd %"U16_F", cwnd %"U16_F", wnd %"U32_F", effwnd %"U32_F", seq %"U32_F", ack %"U32_F", i %"S16_F"\n",
972 pcb->snd_wnd, pcb->cwnd, wnd,
973 ntohl(seg->tcphdr->seqno) + seg->len -
974 pcb->lastack,
975 ntohl(seg->tcphdr->seqno), pcb->lastack, i));
976 ++i;
977 #endif /* TCP_CWND_DEBUG */
978
979 pcb->unsent = seg->next;
980
981 if (pcb->state != SYN_SENT) {
982 TCPH_SET_FLAG(seg->tcphdr, TCP_ACK);
983 pcb->flags &= ~(TF_ACK_DELAY | TF_ACK_NOW);
984 }
985
986 tcp_output_segment(seg, pcb);
987 snd_nxt = ntohl(seg->tcphdr->seqno) + TCP_TCPLEN(seg);
988 if (TCP_SEQ_LT(pcb->snd_nxt, snd_nxt)) {
989 pcb->snd_nxt = snd_nxt;
990 }
991 /* put segment on unacknowledged list if length > 0 */
992 if (TCP_TCPLEN(seg) > 0) {
993 seg->next = NULL;
994 /* unacked list is empty? */
995 if (pcb->unacked == NULL) {
996 pcb->unacked = seg;
997 useg = seg;
998 /* unacked list is not empty? */
999 } else {
1000 /* In the case of fast retransmit, the packet should not go to the tail
1001 * of the unacked queue, but rather somewhere before it. We need to check for
1002 * this case. -STJ Jul 27, 2004 */
1003 if (TCP_SEQ_LT(ntohl(seg->tcphdr->seqno), ntohl(useg->tcphdr->seqno))) {
1004 /* add segment to before tail of unacked list, keeping the list sorted */
1005 struct tcp_seg **cur_seg = &(pcb->unacked);
1006 while (*cur_seg &&
1007 TCP_SEQ_LT(ntohl((*cur_seg)->tcphdr->seqno), ntohl(seg->tcphdr->seqno))) {
1008 cur_seg = &((*cur_seg)->next );
1009 }
1010 seg->next = (*cur_seg);
1011 (*cur_seg) = seg;
1012 } else {
1013 /* add segment to tail of unacked list */
1014 useg->next = seg;
1015 useg = useg->next;
1016 }
1017 }
1018 /* do not queue empty segments on the unacked list */
1019 } else {
1020 tcp_seg_free(seg);
1021 }
1022 seg = pcb->unsent;
1023 }
1024 #if TCP_OVERSIZE
1025 if (pcb->unsent == NULL) {
1026 /* last unsent has been removed, reset unsent_oversize */
1027 pcb->unsent_oversize = 0;
1028 }
1029 #endif /* TCP_OVERSIZE */
1030
1031 if (seg != NULL && pcb->persist_backoff == 0 &&
1032 ntohl(seg->tcphdr->seqno) - pcb->lastack + seg->len > pcb->snd_wnd) {
1033 /* prepare for persist timer */
1034 pcb->persist_cnt = 0;
1035 pcb->persist_backoff = 1;
1036 }
1037
1038 pcb->flags &= ~TF_NAGLEMEMERR;
1039 return ERR_OK;
1040 }
1041
1042 /**
1043 * Called by tcp_output() to actually send a TCP segment over IP.
1044 *
1045 * @param seg the tcp_seg to send
1046 * @param pcb the tcp_pcb for the TCP connection used to send the segment
1047 */
1048 static void
1049 tcp_output_segment(struct tcp_seg *seg, struct tcp_pcb *pcb)
1050 {
1051 u16_t len;
1052 struct netif *netif;
1053 u32_t *opts;
1054
1055 /** @bug Exclude retransmitted segments from this count. */
1056 snmp_inc_tcpoutsegs();
1057
1058 /* The TCP header has already been constructed, but the ackno and
1059 wnd fields remain. */
1060 seg->tcphdr->ackno = htonl(pcb->rcv_nxt);
1061
1062 /* advertise our receive window size in this TCP segment */
1063 seg->tcphdr->wnd = htons(pcb->rcv_ann_wnd);
1064
1065 pcb->rcv_ann_right_edge = pcb->rcv_nxt + pcb->rcv_ann_wnd;
1066
1067 /* Add any requested options. NB MSS option is only set on SYN
1068 packets, so ignore it here */
1069 LWIP_ASSERT("seg->tcphdr not aligned", ((mem_ptr_t)seg->tcphdr % MEM_ALIGNMENT) == 0);
1070 opts = (u32_t *)(void *)(seg->tcphdr + 1);
1071 if (seg->flags & TF_SEG_OPTS_MSS) {
1072 TCP_BUILD_MSS_OPTION(*opts);
1073 opts += 1;
1074 }
1075 #if LWIP_TCP_TIMESTAMPS
1076 pcb->ts_lastacksent = pcb->rcv_nxt;
1077
1078 if (seg->flags & TF_SEG_OPTS_TS) {
1079 tcp_build_timestamp_option(pcb, opts);
1080 opts += 3;
1081 }
1082 #endif
1083
1084 /* Set retransmission timer running if it is not currently enabled
1085 This must be set before checking the route. */
1086 if (pcb->rtime == -1) {
1087 pcb->rtime = 0;
1088 }
1089
1090 /* If we don't have a local IP address, we get one by
1091 calling ip_route(). */
1092 if (ip_addr_isany(&(pcb->local_ip))) {
1093 netif = ip_route(&(pcb->remote_ip));
1094 if (netif == NULL) {
1095 return;
1096 }
1097 ip_addr_copy(pcb->local_ip, netif->ip_addr);
1098 }
1099
1100 if (pcb->rttest == 0) {
1101 pcb->rttest = tcp_ticks;
1102 pcb->rtseq = ntohl(seg->tcphdr->seqno);
1103
1104 LWIP_DEBUGF(TCP_RTO_DEBUG, ("tcp_output_segment: rtseq %"U32_F"\n", pcb->rtseq));
1105 }
1106 LWIP_DEBUGF(TCP_OUTPUT_DEBUG, ("tcp_output_segment: %"U32_F":%"U32_F"\n",
1107 htonl(seg->tcphdr->seqno), htonl(seg->tcphdr->seqno) +
1108 seg->len));
1109
1110 len = (u16_t)((u8_t *)seg->tcphdr - (u8_t *)seg->p->payload);
1111
1112 seg->p->len -= len;
1113 seg->p->tot_len -= len;
1114
1115 seg->p->payload = seg->tcphdr;
1116
1117 seg->tcphdr->chksum = 0;
1118 #if CHECKSUM_GEN_TCP
1119 #if TCP_CHECKSUM_ON_COPY
1120 {
1121 u32_t acc;
1122 #if TCP_CHECKSUM_ON_COPY_SANITY_CHECK
1123 u16_t chksum_slow = inet_chksum_pseudo(seg->p, &(pcb->local_ip),
1124 &(pcb->remote_ip),
1125 IP_PROTO_TCP, seg->p->tot_len);
1126 #endif /* TCP_CHECKSUM_ON_COPY_SANITY_CHECK */
1127 if ((seg->flags & TF_SEG_DATA_CHECKSUMMED) == 0) {
1128 LWIP_ASSERT("data included but not checksummed",
1129 seg->p->tot_len == (TCPH_HDRLEN(seg->tcphdr) * 4));
1130 }
1131
1132 /* rebuild TCP header checksum (TCP header changes for retransmissions!) */
1133 acc = inet_chksum_pseudo_partial(seg->p, &(pcb->local_ip),
1134 &(pcb->remote_ip),
1135 IP_PROTO_TCP, seg->p->tot_len, TCPH_HDRLEN(seg->tcphdr) * 4);
1136 /* add payload checksum */
1137 if (seg->chksum_swapped) {
1138 seg->chksum = SWAP_BYTES_IN_WORD(seg->chksum);
1139 seg->chksum_swapped = 0;
1140 }
1141 acc += (u16_t)~(seg->chksum);
1142 seg->tcphdr->chksum = FOLD_U32T(acc);
1143 #if TCP_CHECKSUM_ON_COPY_SANITY_CHECK
1144 if (chksum_slow != seg->tcphdr->chksum) {
1145 LWIP_DEBUGF(TCP_DEBUG | LWIP_DBG_LEVEL_WARNING,
1146 ("tcp_output_segment: calculated checksum is %"X16_F" instead of %"X16_F"\n",
1147 seg->tcphdr->chksum, chksum_slow));
1148 seg->tcphdr->chksum = chksum_slow;
1149 }
1150 #endif /* TCP_CHECKSUM_ON_COPY_SANITY_CHECK */
1151 }
1152 #else /* TCP_CHECKSUM_ON_COPY */
1153 seg->tcphdr->chksum = inet_chksum_pseudo(seg->p, &(pcb->local_ip),
1154 &(pcb->remote_ip),
1155 IP_PROTO_TCP, seg->p->tot_len);
1156 #endif /* TCP_CHECKSUM_ON_COPY */
1157 #endif /* CHECKSUM_GEN_TCP */
1158 TCP_STATS_INC(tcp.xmit);
1159
1160 #if LWIP_NETIF_HWADDRHINT
1161 ip_output_hinted(seg->p, &(pcb->local_ip), &(pcb->remote_ip), pcb->ttl, pcb->tos,
1162 IP_PROTO_TCP, &(pcb->addr_hint));
1163 #else /* LWIP_NETIF_HWADDRHINT*/
1164 ip_output(seg->p, &(pcb->local_ip), &(pcb->remote_ip), pcb->ttl, pcb->tos,
1165 IP_PROTO_TCP);
1166 #endif /* LWIP_NETIF_HWADDRHINT*/
1167 }
1168
1169 /**
1170 * Send a TCP RESET packet (empty segment with RST flag set) either to
1171 * abort a connection or to show that there is no matching local connection
1172 * for a received segment.
1173 *
1174 * Called by tcp_abort() (to abort a local connection), tcp_input() (if no
1175 * matching local pcb was found), tcp_listen_input() (if incoming segment
1176 * has ACK flag set) and tcp_process() (received segment in the wrong state)
1177 *
1178 * Since a RST segment is in most cases not sent for an active connection,
1179 * tcp_rst() has a number of arguments that are taken from a tcp_pcb for
1180 * most other segment output functions.
1181 *
1182 * @param seqno the sequence number to use for the outgoing segment
1183 * @param ackno the acknowledge number to use for the outgoing segment
1184 * @param local_ip the local IP address to send the segment from
1185 * @param remote_ip the remote IP address to send the segment to
1186 * @param local_port the local TCP port to send the segment from
1187 * @param remote_port the remote TCP port to send the segment to
1188 */
1189 void
1190 tcp_rst(u32_t seqno, u32_t ackno,
1191 ip_addr_t *local_ip, ip_addr_t *remote_ip,
1192 u16_t local_port, u16_t remote_port)
1193 {
1194 struct pbuf *p;
1195 struct tcp_hdr *tcphdr;
1196 p = pbuf_alloc(PBUF_IP, TCP_HLEN, PBUF_RAM);
1197 if (p == NULL) {
1198 LWIP_DEBUGF(TCP_DEBUG, ("tcp_rst: could not allocate memory for pbuf\n"));
1199 return;
1200 }
1201 LWIP_ASSERT("check that first pbuf can hold struct tcp_hdr",
1202 (p->len >= sizeof(struct tcp_hdr)));
1203
1204 tcphdr = (struct tcp_hdr *)p->payload;
1205 tcphdr->src = htons(local_port);
1206 tcphdr->dest = htons(remote_port);
1207 tcphdr->seqno = htonl(seqno);
1208 tcphdr->ackno = htonl(ackno);
1209 TCPH_HDRLEN_FLAGS_SET(tcphdr, TCP_HLEN/4, TCP_RST | TCP_ACK);
1210 tcphdr->wnd = PP_HTONS(TCP_WND);
1211 tcphdr->chksum = 0;
1212 tcphdr->urgp = 0;
1213
1214 #if CHECKSUM_GEN_TCP
1215 tcphdr->chksum = inet_chksum_pseudo(p, local_ip, remote_ip,
1216 IP_PROTO_TCP, p->tot_len);
1217 #endif
1218 TCP_STATS_INC(tcp.xmit);
1219 snmp_inc_tcpoutrsts();
1220 /* Send output with hardcoded TTL since we have no access to the pcb */
1221 ip_output(p, local_ip, remote_ip, TCP_TTL, 0, IP_PROTO_TCP);
1222 pbuf_free(p);
1223 LWIP_DEBUGF(TCP_RST_DEBUG, ("tcp_rst: seqno %"U32_F" ackno %"U32_F".\n", seqno, ackno));
1224 }
1225
1226 /**
1227 * Requeue all unacked segments for retransmission
1228 *
1229 * Called by tcp_slowtmr() for slow retransmission.
1230 *
1231 * @param pcb the tcp_pcb for which to re-enqueue all unacked segments
1232 */
1233 void
1234 tcp_rexmit_rto(struct tcp_pcb *pcb)
1235 {
1236 struct tcp_seg *seg;
1237
1238 if (pcb->unacked == NULL) {
1239 return;
1240 }
1241
1242 /* Move all unacked segments to the head of the unsent queue */
1243 for (seg = pcb->unacked; seg->next != NULL; seg = seg->next);
1244 /* concatenate unsent queue after unacked queue */
1245 seg->next = pcb->unsent;
1246 /* unsent queue is the concatenated queue (of unacked, unsent) */
1247 pcb->unsent = pcb->unacked;
1248 /* unacked queue is now empty */
1249 pcb->unacked = NULL;
1250
1251 /* increment number of retransmissions */
1252 ++pcb->nrtx;
1253
1254 /* Don't take any RTT measurements after retransmitting. */
1255 pcb->rttest = 0;
1256
1257 /* Do the actual retransmission */
1258 tcp_output(pcb);
1259 }
1260
1261 /**
1262 * Requeue the first unacked segment for retransmission
1263 *
1264 * Called by tcp_receive() for fast retramsmit.
1265 *
1266 * @param pcb the tcp_pcb for which to retransmit the first unacked segment
1267 */
1268 void
1269 tcp_rexmit(struct tcp_pcb *pcb)
1270 {
1271 struct tcp_seg *seg;
1272 struct tcp_seg **cur_seg;
1273
1274 if (pcb->unacked == NULL) {
1275 return;
1276 }
1277
1278 /* Move the first unacked segment to the unsent queue */
1279 /* Keep the unsent queue sorted. */
1280 seg = pcb->unacked;
1281 pcb->unacked = seg->next;
1282
1283 cur_seg = &(pcb->unsent);
1284 while (*cur_seg &&
1285 TCP_SEQ_LT(ntohl((*cur_seg)->tcphdr->seqno), ntohl(seg->tcphdr->seqno))) {
1286 cur_seg = &((*cur_seg)->next );
1287 }
1288 seg->next = *cur_seg;
1289 *cur_seg = seg;
1290
1291 ++pcb->nrtx;
1292
1293 /* Don't take any rtt measurements after retransmitting. */
1294 pcb->rttest = 0;
1295
1296 /* Do the actual retransmission. */
1297 snmp_inc_tcpretranssegs();
1298 /* No need to call tcp_output: we are always called from tcp_input()
1299 and thus tcp_output directly returns. */
1300 }
1301
1302
1303 /**
1304 * Handle retransmission after three dupacks received
1305 *
1306 * @param pcb the tcp_pcb for which to retransmit the first unacked segment
1307 */
1308 void
1309 tcp_rexmit_fast(struct tcp_pcb *pcb)
1310 {
1311 if (pcb->unacked != NULL && !(pcb->flags & TF_INFR)) {
1312 /* This is fast retransmit. Retransmit the first unacked segment. */
1313 LWIP_DEBUGF(TCP_FR_DEBUG,
1314 ("tcp_receive: dupacks %"U16_F" (%"U32_F
1315 "), fast retransmit %"U32_F"\n",
1316 (u16_t)pcb->dupacks, pcb->lastack,
1317 ntohl(pcb->unacked->tcphdr->seqno)));
1318 tcp_rexmit(pcb);
1319
1320 /* Set ssthresh to half of the minimum of the current
1321 * cwnd and the advertised window */
1322 if (pcb->cwnd > pcb->snd_wnd) {
1323 pcb->ssthresh = pcb->snd_wnd / 2;
1324 } else {
1325 pcb->ssthresh = pcb->cwnd / 2;
1326 }
1327
1328 /* The minimum value for ssthresh should be 2 MSS */
1329 if (pcb->ssthresh < 2*pcb->mss) {
1330 LWIP_DEBUGF(TCP_FR_DEBUG,
1331 ("tcp_receive: The minimum value for ssthresh %"U16_F
1332 " should be min 2 mss %"U16_F"...\n",
1333 pcb->ssthresh, 2*pcb->mss));
1334 pcb->ssthresh = 2*pcb->mss;
1335 }
1336
1337 pcb->cwnd = pcb->ssthresh + 3 * pcb->mss;
1338 pcb->flags |= TF_INFR;
1339 }
1340 }
1341
1342
1343 /**
1344 * Send keepalive packets to keep a connection active although
1345 * no data is sent over it.
1346 *
1347 * Called by tcp_slowtmr()
1348 *
1349 * @param pcb the tcp_pcb for which to send a keepalive packet
1350 */
1351 void
1352 tcp_keepalive(struct tcp_pcb *pcb)
1353 {
1354 struct pbuf *p;
1355 struct tcp_hdr *tcphdr;
1356
1357 LWIP_DEBUGF(TCP_DEBUG, ("tcp_keepalive: sending KEEPALIVE probe to %"U16_F".%"U16_F".%"U16_F".%"U16_F"\n",
1358 ip4_addr1_16(&pcb->remote_ip), ip4_addr2_16(&pcb->remote_ip),
1359 ip4_addr3_16(&pcb->remote_ip), ip4_addr4_16(&pcb->remote_ip)));
1360
1361 LWIP_DEBUGF(TCP_DEBUG, ("tcp_keepalive: tcp_ticks %"U32_F" pcb->tmr %"U32_F" pcb->keep_cnt_sent %"U16_F"\n",
1362 tcp_ticks, pcb->tmr, pcb->keep_cnt_sent));
1363
1364 p = tcp_output_alloc_header(pcb, 0, 0, htonl(pcb->snd_nxt - 1));
1365 if(p == NULL) {
1366 LWIP_DEBUGF(TCP_DEBUG,
1367 ("tcp_keepalive: could not allocate memory for pbuf\n"));
1368 return;
1369 }
1370 tcphdr = (struct tcp_hdr *)p->payload;
1371
1372 #if CHECKSUM_GEN_TCP
1373 tcphdr->chksum = inet_chksum_pseudo(p, &pcb->local_ip, &pcb->remote_ip,
1374 IP_PROTO_TCP, p->tot_len);
1375 #endif
1376 TCP_STATS_INC(tcp.xmit);
1377
1378 /* Send output to IP */
1379 #if LWIP_NETIF_HWADDRHINT
1380 ip_output_hinted(p, &pcb->local_ip, &pcb->remote_ip, pcb->ttl, 0, IP_PROTO_TCP,
1381 &(pcb->addr_hint));
1382 #else /* LWIP_NETIF_HWADDRHINT*/
1383 ip_output(p, &pcb->local_ip, &pcb->remote_ip, pcb->ttl, 0, IP_PROTO_TCP);
1384 #endif /* LWIP_NETIF_HWADDRHINT*/
1385
1386 pbuf_free(p);
1387
1388 LWIP_DEBUGF(TCP_DEBUG, ("tcp_keepalive: seqno %"U32_F" ackno %"U32_F".\n",
1389 pcb->snd_nxt - 1, pcb->rcv_nxt));
1390 }
1391
1392
1393 /**
1394 * Send persist timer zero-window probes to keep a connection active
1395 * when a window update is lost.
1396 *
1397 * Called by tcp_slowtmr()
1398 *
1399 * @param pcb the tcp_pcb for which to send a zero-window probe packet
1400 */
1401 void
1402 tcp_zero_window_probe(struct tcp_pcb *pcb)
1403 {
1404 struct pbuf *p;
1405 struct tcp_hdr *tcphdr;
1406 struct tcp_seg *seg;
1407 u16_t len;
1408 u8_t is_fin;
1409
1410 LWIP_DEBUGF(TCP_DEBUG,
1411 ("tcp_zero_window_probe: sending ZERO WINDOW probe to %"
1412 U16_F".%"U16_F".%"U16_F".%"U16_F"\n",
1413 ip4_addr1_16(&pcb->remote_ip), ip4_addr2_16(&pcb->remote_ip),
1414 ip4_addr3_16(&pcb->remote_ip), ip4_addr4_16(&pcb->remote_ip)));
1415
1416 LWIP_DEBUGF(TCP_DEBUG,
1417 ("tcp_zero_window_probe: tcp_ticks %"U32_F
1418 " pcb->tmr %"U32_F" pcb->keep_cnt_sent %"U16_F"\n",
1419 tcp_ticks, pcb->tmr, pcb->keep_cnt_sent));
1420
1421 seg = pcb->unacked;
1422
1423 if(seg == NULL) {
1424 seg = pcb->unsent;
1425 }
1426 if(seg == NULL) {
1427 return;
1428 }
1429
1430 is_fin = ((TCPH_FLAGS(seg->tcphdr) & TCP_FIN) != 0) && (seg->len == 0);
1431 /* we want to send one seqno: either FIN or data (no options) */
1432 len = is_fin ? 0 : 1;
1433
1434 p = tcp_output_alloc_header(pcb, 0, len, seg->tcphdr->seqno);
1435 if(p == NULL) {
1436 LWIP_DEBUGF(TCP_DEBUG, ("tcp_zero_window_probe: no memory for pbuf\n"));
1437 return;
1438 }
1439 tcphdr = (struct tcp_hdr *)p->payload;
1440
1441 if (is_fin) {
1442 /* FIN segment, no data */
1443 TCPH_FLAGS_SET(tcphdr, TCP_ACK | TCP_FIN);
1444 } else {
1445 /* Data segment, copy in one byte from the head of the unacked queue */
1446 struct tcp_hdr *thdr = (struct tcp_hdr *)seg->p->payload;
1447 char *d = ((char *)p->payload + TCP_HLEN);
1448 pbuf_copy_partial(seg->p, d, 1, TCPH_HDRLEN(thdr) * 4);
1449 }
1450
1451 #if CHECKSUM_GEN_TCP
1452 tcphdr->chksum = inet_chksum_pseudo(p, &pcb->local_ip, &pcb->remote_ip,
1453 IP_PROTO_TCP, p->tot_len);
1454 #endif
1455 TCP_STATS_INC(tcp.xmit);
1456
1457 /* Send output to IP */
1458 #if LWIP_NETIF_HWADDRHINT
1459 ip_output_hinted(p, &pcb->local_ip, &pcb->remote_ip, pcb->ttl, 0, IP_PROTO_TCP,
1460 &(pcb->addr_hint));
1461 #else /* LWIP_NETIF_HWADDRHINT*/
1462 ip_output(p, &pcb->local_ip, &pcb->remote_ip, pcb->ttl, 0, IP_PROTO_TCP);
1463 #endif /* LWIP_NETIF_HWADDRHINT*/
1464
1465 pbuf_free(p);
1466
1467 LWIP_DEBUGF(TCP_DEBUG, ("tcp_zero_window_probe: seqno %"U32_F
1468 " ackno %"U32_F".\n",
1469 pcb->snd_nxt - 1, pcb->rcv_nxt));
1470 }
1471 #endif /* LWIP_TCP */
Imprint / Impressum