[prev in list] [next in list] [prev in thread] [next in thread] 

List:       git-commits-head
Subject:    [SCTP] Only consider C-E bundling up until C-E has been sent.
From:       Linux Kernel Mailing List <linux-kernel () vger ! kernel ! org>
Date:       2003-02-28 21:14:38
[Download RAW message or body]

ChangeSet 1.914.192.5, 2003/02/28 15:14:38-06:00, jgrimm@touki.austin.ibm.com

	[SCTP] Only consider C-E bundling up until C-E has been sent.
	
	Yes, it is _that_ obvious.  If someone does a connect (its not 
	required, but one can) the C-E may have already been sent by
	the time the first DATA is available.  Don't calculate in the C-E
	bundling overhead if we've already sent the C-E.    


# This patch includes the following deltas:
#	           ChangeSet	1.914.192.4 -> 1.914.192.5
#	net/sctp/associola.c	1.33    -> 1.34   
#	net/sctp/sm_make_chunk.c	1.30    -> 1.31   
#	 net/sctp/ulpqueue.c	1.12    -> 1.12.1.1
#	   net/sctp/socket.c	1.42    -> 1.43   
#

 associola.c     |    9 ++++-----
 sm_make_chunk.c |    2 +-
 socket.c        |    1 -
 ulpqueue.c      |   43 ++++++++++++++++++++-----------------------
 4 files changed, 25 insertions(+), 30 deletions(-)


diff -Nru a/net/sctp/associola.c b/net/sctp/associola.c
--- a/net/sctp/associola.c	Thu Mar 20 23:07:02 2003
+++ b/net/sctp/associola.c	Thu Mar 20 23:07:02 2003
@@ -413,17 +413,16 @@
 	 * If not and the current association PMTU is higher than the new
 	 * peer's PMTU, reset the association PMTU to the new peer's PMTU.
 	 */
-	if (asoc->pmtu) {
+	if (asoc->pmtu)
 		asoc->pmtu = min_t(int, peer->pmtu, asoc->pmtu);
-	} else {
+	else
 		asoc->pmtu = peer->pmtu;
-	}
 
 	SCTP_DEBUG_PRINTK("sctp_assoc_add_peer:association %p PMTU set to "
 			  "%d\n", asoc, asoc->pmtu);
 
-	asoc->frag_point = asoc->pmtu -
-		(SCTP_IP_OVERHEAD + sizeof(sctp_data_chunk_t));
+	asoc->frag_point = asoc->pmtu;
+	asoc->frag_point -= SCTP_IP_OVERHEAD + sizeof(struct sctp_data_chunk);
 
 	/* The asoc->peer.port might not be meaningful yet, but
 	 * initialize the packet structure anyway.
diff -Nru a/net/sctp/sm_make_chunk.c b/net/sctp/sm_make_chunk.c
--- a/net/sctp/sm_make_chunk.c	Thu Mar 20 23:07:02 2003
+++ b/net/sctp/sm_make_chunk.c	Thu Mar 20 23:07:02 2003
@@ -1153,7 +1153,7 @@
 	first_len = max;
 
 	/* Encourage Cookie-ECHO bundling. */
-	if (asoc->state < SCTP_STATE_ESTABLISHED) {
+	if (asoc->state < SCTP_STATE_COOKIE_ECHOED) {
 		whole = msg_len / (max - SCTP_ARBITRARY_COOKIE_ECHO_LEN);
 
 		/* Account for the DATA to be bundled with the COOKIE-ECHO. */
diff -Nru a/net/sctp/socket.c b/net/sctp/socket.c
--- a/net/sctp/socket.c	Thu Mar 20 23:07:02 2003
+++ b/net/sctp/socket.c	Thu Mar 20 23:07:02 2003
@@ -1775,7 +1775,6 @@
 				       int *optlen)
 {
 	struct sctp_status status;
-	sctp_endpoint_t *ep;
 	sctp_association_t *assoc = NULL;
 	struct sctp_transport *transport;
 	sctp_assoc_t associd;
diff -Nru a/net/sctp/ulpqueue.c b/net/sctp/ulpqueue.c
--- a/net/sctp/ulpqueue.c	Thu Mar 20 23:07:02 2003
+++ b/net/sctp/ulpqueue.c	Thu Mar 20 23:07:02 2003
@@ -220,7 +220,7 @@
 	if (sctp_event2skb(event)->list)
 		sctp_skb_list_tail(sctp_event2skb(event)->list, queue);
 	else
-		skb_queue_tail(queue, sctp_event2skb(event));
+		__skb_queue_tail(queue, sctp_event2skb(event));
 
 	/* Did we just complete partial delivery and need to get
 	 * rolling again?  Move pending data to the receive
@@ -247,14 +247,14 @@
 static inline void sctp_ulpq_store_reasm(struct sctp_ulpq *ulpq,
 					 struct sctp_ulpevent *event)
 {
-	struct sk_buff *pos, *tmp;
+	struct sk_buff *pos;
 	struct sctp_ulpevent *cevent;
 	__u32 tsn, ctsn;
 
 	tsn = event->sndrcvinfo.sinfo_tsn;
 
 	/* Find the right place in this list. We store them by TSN.  */
-	sctp_skb_for_each(pos, &ulpq->reasm, tmp) {
+	skb_queue_walk(&ulpq->reasm, pos) {
 		cevent = sctp_skb2event(pos);
 		ctsn = cevent->sndrcvinfo.sinfo_tsn;
 
@@ -334,7 +334,7 @@
  */
 static inline struct sctp_ulpevent *sctp_ulpq_retrieve_reassembled(struct sctp_ulpq *ulpq)
 {
-	struct sk_buff *pos, *tmp;
+	struct sk_buff *pos;
 	struct sctp_ulpevent *cevent;
 	struct sk_buff *first_frag = NULL;
 	__u32 ctsn, next_tsn;
@@ -355,7 +355,7 @@
 	 * fragment in order. If not, first_frag is reset to NULL and we
 	 * start the next pass when we find another first fragment.
 	 */
-	sctp_skb_for_each(pos, &ulpq->reasm, tmp) {
+	skb_queue_walk(&ulpq->reasm, pos) {
 		cevent = sctp_skb2event(pos);
 		ctsn = cevent->sndrcvinfo.sinfo_tsn;
 
@@ -374,29 +374,26 @@
 
 		case SCTP_DATA_LAST_FRAG:
 			if (first_frag && (ctsn == next_tsn))
-				retval = sctp_make_reassembled_event(
-						first_frag, pos);
+				goto found;
 			else
 				first_frag = NULL;
 			break;
 		};
 
-		/* We have the reassembled event. There is no need to look
-		 * further.
-		 */
-		if (retval) {
-			retval->msg_flags |= MSG_EOR;
-			break;
-		}
 	}
-
+done:
 	return retval;
+found:
+	retval = sctp_make_reassembled_event(first_frag, pos);
+	if (retval)
+		retval->msg_flags |= MSG_EOR;
+	goto done;
 }
 
 /* Retrieve the next set of fragments of a partial message. */
 static inline struct sctp_ulpevent *sctp_ulpq_retrieve_partial(struct sctp_ulpq *ulpq)
 {
-	struct sk_buff *pos, *tmp, *last_frag, *first_frag;
+	struct sk_buff *pos, *last_frag, *first_frag;
 	struct sctp_ulpevent *cevent;
 	__u32 ctsn, next_tsn;
 	int is_last;
@@ -415,7 +412,7 @@
 	next_tsn = 0;
 	is_last = 0;
 
-	sctp_skb_for_each(pos, &ulpq->reasm, tmp) {
+	skb_queue_walk(&ulpq->reasm, pos) {
 		cevent = sctp_skb2event(pos);
 		ctsn = cevent->sndrcvinfo.sinfo_tsn;
 
@@ -448,7 +445,7 @@
 	 */
 done:
 	retval = sctp_make_reassembled_event(first_frag, last_frag);
-	if (is_last)
+	if (retval && is_last)
 		retval->msg_flags |= MSG_EOR;
 
 	return retval;
@@ -490,7 +487,7 @@
 /* Retrieve the first part (sequential fragments) for partial delivery.  */
 static inline struct sctp_ulpevent *sctp_ulpq_retrieve_first(struct sctp_ulpq *ulpq)
 {
-	struct sk_buff *pos, *tmp, *last_frag, *first_frag;
+	struct sk_buff *pos, *last_frag, *first_frag;
 	struct sctp_ulpevent *cevent;
 	__u32 ctsn, next_tsn;
 	struct sctp_ulpevent *retval;
@@ -507,7 +504,7 @@
 	retval = NULL;
 	next_tsn = 0;
 
-	sctp_skb_for_each(pos, &ulpq->reasm, tmp) {
+	skb_queue_walk(&ulpq->reasm, pos) {
 		cevent = sctp_skb2event(pos);
 		ctsn = cevent->sndrcvinfo.sinfo_tsn;
 
@@ -590,7 +587,7 @@
 static inline void sctp_ulpq_store_ordered(struct sctp_ulpq *ulpq,
 					   struct sctp_ulpevent *event)
 {
-	struct sk_buff *pos, *tmp;
+	struct sk_buff *pos;
 	struct sctp_ulpevent *cevent;
 	__u16 sid, csid;
 	__u16 ssn, cssn;
@@ -601,7 +598,7 @@
 	/* Find the right place in this list.  We store them by
 	 * stream ID and then by SSN.
 	 */
-	sctp_skb_for_each(pos, &ulpq->lobby, tmp) {
+	skb_queue_walk(&ulpq->lobby, pos) {
 		cevent = (struct sctp_ulpevent *) pos->cb;
 		csid = cevent->sndrcvinfo.sinfo_stream;
 		cssn = cevent->sndrcvinfo.sinfo_ssn;
@@ -786,7 +783,7 @@
 					      SCTP_PARTIAL_DELIVERY_ABORTED,
 					      priority);
 	if (ev)
-		skb_queue_tail(&sk->receive_queue, sctp_event2skb(ev));
+		__skb_queue_tail(&sk->receive_queue, sctp_event2skb(ev));
 
 	/* If there is data waiting, send it up the socket now. */
 	if (sctp_ulpq_clear_pd(ulpq) || ev)
-
To unsubscribe from this list: send the line "unsubscribe bk-commits-head" in
the body of a message to majordomo@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
[prev in list] [next in list] [prev in thread] [next in thread] 

Configure | About | News | Add a list | Sponsored by KoreLogic