How to run tcp test untill all data are acknowlwdged

295 views
Skip to first unread message

Debobroto Das

unread,
May 6, 2020, 4:57:22 PM5/6/20
to iperf-dev
I am running a IPerf3 test with version 3.6. 
Client Side Command is : 

client side output is : iperf3 --client 2001:1:1:1:0:0003:0001:0001 --port 22001 --cport 32005 --connect-timeout 9999   --set-mss 1400 -w 32M -k 512 -b 1M -l 1024  --json --logfile /home/deba/Desktop/PyDcnTE/testAndMeasurement/TEST_RESULTS/ECMP2-h0p0l0-h1p3l1

{
"start": {
"connected": [{
"socket": 6,
"local_host": "2001:1:1:1::",
"local_port": 32003,
"remote_host": "2001:1:1:1:0:1:1:1",
"remote_port": 22003
}],
"version": "iperf 3.6",
"system_info": "Linux deba 5.3.0-51-generic #44-Ubuntu SMP Wed Apr 22 21:09:44 UTC 2020 x86_64",
"timestamp": {
"time": "Wed, 06 May 2020 20:45:16 GMT",
"timesecs": 1588797916
},
"connecting_to": {
"host": "2001:1:1:1:0:0001:0001:0001",
"port": 22003
},
"cookie": "wsplstq7rfg6wvs2tg4f4yvqddwknamorn5g",
"tcp_mss": 1400,
"sock_bufsize": 33554432,
"sndbuf_actual": 33554432,
"rcvbuf_actual": 33554432,
"test_start": {
"protocol": "TCP",
"num_streams": 1,
"blksize": 1024,
"omit": 0,
"duration": 0,
"bytes": 0,
"blocks": 512,
"reverse": 0,
"tos": 0
}
},
"intervals": [{
"streams": [{
"socket": 6,
"start": 0,
"end": 3.8311259746551514,
"seconds": 3.8311259746551514,
"bytes": 1024,
"bits_per_second": 2138.2747667902986,
"retransmits": 2,
"snd_cwnd": 13880,
"rtt": 4130742,
"rttvar": 1675515,
"pmtu": 1500,
"omitted": false
}],
"sum": {
"start": 0,
"end": 3.8311259746551514,
"seconds": 3.8311259746551514,
"bytes": 1024,
"bits_per_second": 2138.2747667902986,
"retransmits": 2,
"omitted": false
}
}, {
"streams": [{
"socket": 6,
"start": 3.8311560153961182,
"end": 4.0000650882720947,
"seconds": 0.16890907287597656,
"bytes": 499712,
"bits_per_second": 23667739.878812518,
"retransmits": 0,
"snd_cwnd": 13880,
"rtt": 4130742,
"rttvar": 1675515,
"pmtu": 1500,
"omitted": false
}],
"sum": {
"start": 3.8311560153961182,
"end": 4.0000650882720947,
"seconds": 0.16890907287597656,
"bytes": 499712,
"bits_per_second": 23667739.878812518,
"retransmits": 0,
"omitted": false
}
}, {
"streams": [{
"socket": 6,
"start": 4.0000650882720947,
"end": 4.1870839595794678,
"seconds": 0.18701887130737305,
"bytes": 23552,
"bits_per_second": 1007470.5225352938,
"retransmits": 0,
"snd_cwnd": 13880,
"rtt": 4130742,
"rttvar": 1675515,
"pmtu": 1500,
"omitted": false
}],
"sum": {
"start": 4.0000650882720947,
"end": 4.1870839595794678,
"seconds": 0.18701887130737305,
"bytes": 23552,
"bits_per_second": 1007470.5225352938,
"retransmits": 0,
"omitted": false
}
}],
"end": {
"streams": [{
"sender": {
"socket": 6,
"start": 0,
"end": 4.1870839595794678,
"seconds": 4.1870839595794678,
"bytes": 524288,
"bits_per_second": 1001724.3600773788,
"retransmits": 2,
"max_snd_cwnd": 13880,
"max_rtt": 4130742,
"min_rtt": 4130742,
"mean_rtt": 4130742
},
"receiver": {
"socket": 6,
"start": 0,
"end": 7.5810551643371582,
"seconds": 4.1870839595794678,
"bytes": 9352,
"bits_per_second": 9868.8109211960145
}
}],
"sum_sent": {
"start": 0,
"end": 4.1870839595794678,
"seconds": 4.1870839595794678,
"bytes": 524288,
"bits_per_second": 1001724.3600773788,
"retransmits": 2
},
"sum_received": {
"start": 0,
"end": 7.5810551643371582,
"seconds": 7.5810551643371582,
"bytes": 9352,
"bits_per_second": 9868.8109211960145
},
"cpu_utilization_percent": {
"host_total": 9.8738826562728317,
"host_user": 3.9232641193602875,
"host_system": 5.9506142940230751,
"remote_total": 0.001013123231655008,
"remote_user": 0,
"remote_system": 0.001013123231655008
},
"sender_tcp_congestion": "cubic",
"receiver_tcp_congestion": "cubic"
}
}
{
"start": {
"connected": [{
"socket": 6,
"local_host": "2001:1:1:1::",
"local_port": 32004,
"remote_host": "2001:1:1:1:0:1:1:1",
"remote_port": 22004
}],
"version": "iperf 3.6",
"system_info": "Linux deba 5.3.0-51-generic #44-Ubuntu SMP Wed Apr 22 21:09:44 UTC 2020 x86_64",
"timestamp": {
"time": "Wed, 06 May 2020 20:45:17 GMT",
"timesecs": 1588797917
},
"connecting_to": {
"host": "2001:1:1:1:0:0001:0001:0001",
"port": 22004
},
"cookie": "vwlipthb26v7l64caw5rdue2d234cm5c4k2h",
"tcp_mss": 1400,
"sock_bufsize": 33554432,
"sndbuf_actual": 33554432,
"rcvbuf_actual": 33554432,
"test_start": {
"protocol": "TCP",
"num_streams": 1,
"blksize": 1024,
"omit": 0,
"duration": 0,
"bytes": 0,
"blocks": 512,
"reverse": 0,
"tos": 0
}
},
"intervals": [{
"streams": [{
"socket": 6,
"start": 0,
"end": 3.7933268547058105,
"seconds": 3.7933268547058105,
"bytes": 1024,
"bits_per_second": 2159.5818957276028,
"retransmits": 2,
"snd_cwnd": 13880,
"rtt": 3608222,
"rttvar": 1381055,
"pmtu": 1500,
"omitted": false
}],
"sum": {
"start": 0,
"end": 3.7933268547058105,
"seconds": 3.7933268547058105,
"bytes": 1024,
"bits_per_second": 2159.5818957276028,
"retransmits": 2,
"omitted": false
}
}, {
"streams": [{
"socket": 6,
"start": 3.7933530807495117,
"end": 4.00177788734436,
"seconds": 0.20842480659484863,
"bytes": 499712,
"bits_per_second": 19180519.177695647,
"retransmits": 0,
"snd_cwnd": 13880,
"rtt": 3608222,
"rttvar": 1381055,
"pmtu": 1500,
"omitted": false
}],
"sum": {
"start": 3.7933530807495117,
"end": 4.00177788734436,
"seconds": 0.20842480659484863,
"bytes": 499712,
"bits_per_second": 19180519.177695647,
"retransmits": 0,
"omitted": false
}
}, {
"streams": [{
"socket": 6,
"start": 4.00177788734436,
"end": 4.1870839595794678,
"seconds": 0.18530607223510742,
"bytes": 23552,
"bits_per_second": 1016782.6543802993,
"retransmits": 0,
"snd_cwnd": 13880,
"rtt": 3608222,
"rttvar": 1381055,
"pmtu": 1500,
"omitted": false
}],
"sum": {
"start": 4.00177788734436,
"end": 4.1870839595794678,
"seconds": 0.18530607223510742,
"bytes": 23552,
"bits_per_second": 1016782.6543802993,
"retransmits": 0,
"omitted": false
}
}],
"end": {
"streams": [{
"sender": {
"socket": 6,
"start": 0,
"end": 4.1870839595794678,
"seconds": 4.1870839595794678,
"bytes": 524288,
"bits_per_second": 1001724.3600773788,
"retransmits": 2,
"max_snd_cwnd": 13880,
"max_rtt": 3608222,
"min_rtt": 3608222,
"mean_rtt": 3608222
},
"receiver": {
"socket": 6,
"start": 0,
"end": 7.54690408706665,
"seconds": 4.1870839595794678,
"bytes": 13516,
"bits_per_second": 14327.464448011484
}
}],
"sum_sent": {
"start": 0,
"end": 4.1870839595794678,
"seconds": 4.1870839595794678,
"bytes": 524288,
"bits_per_second": 1001724.3600773788,
"retransmits": 2
},
"sum_received": {
"start": 0,
"end": 7.54690408706665,
"seconds": 7.54690408706665,
"bytes": 13516,
"bits_per_second": 14327.464448011484
},

"cpu_utilization_percent": {
"host_total": 9.73222675724573,
"host_user": 3.529030571195038,
"host_system": 6.2031918362791219,
"remote_total": 0.001029517691090115,
"remote_user": 0,
"remote_system": 0.001029517691090115
},
"sender_tcp_congestion": "cubic",
"receiver_tcp_congestion": "cubic"
}
}


I am running the test in a mininet based environment, where I expect packet loss. 

Now, here in the report number of bytes sent (found in sum_sent section) is 524288 and number of bytes received (found in sum_received section) is 13516
So, the total packet loss is  (524288-13516) bytes. Am I correctly interpreting the report?

Next, this is a tcp protocol test. So tcp is supposed to retransmit if some part of the sent data is not acknowledged. But the difference is number of bytes sent and received shows IPerf is not retransmitting all the data.  I want to test what is the total time (flow completion time) to perfectly send the data to server.
If some packet is lost in the way, I want to send it again and want to find what is the total number of actual retransmit for the whole flow? 
How, I can do this tests?

DavidBO

unread,
May 11, 2020, 5:21:47 AM5/11/20
to iperf-dev
I have submitted enhancement request #994 that may help to improve iperf3 behavior in such cases and allow more data to be received by the server.

However, regarding this specific case,the network throughput is 14327bps, which is about 1.8KB/sec.  That means it will take about 5 minutes until all the 500KB sent will be received.  Is that a real case?  With such low network throughput, data may need be sent using a lower rate.

Debobroto Das

unread,
May 11, 2020, 2:55:57 PM5/11/20
to iperf-dev
Thanks for your reply.

How sending at lower rate can help here? Can you kindly explain a little bit more detail. 

DavidBO

unread,
May 11, 2020, 4:04:10 PM5/11/20
to iperf-dev
I now see that in addition to limiting data rate it would also help to reduce the TCP window size.  Following is the explanation.
The test was run in rate of 1Mbps (-b 1M) for 4 seconds, meaning about 500KBytes were sent.  As the TCP window size was set to 32MB (-w 32M) it means that all data could be sent without waiting for an ack from the server.  Therefore, the client sent all the data, which was buffered at the sending machine.  Since the network throughput is only 1.8KB/sec it would take about 5 minutes for all the data to be sent. Currently iperf client closes the interface with the server almost immediately after it finishes sending all the data.  That means that all the data that is still buffered at the client machine will never be sent.  The proposal is to make sure that only small amount of data is buffered when the test ends.  (It may be possible to add to iperf a mechanism to allow making sure all data was arrived, but that will require setting a timeout for the response of more than 5 minutes which is not common.)

With current iperf implementation I see two possible approaches:
  • Reduce the sending rate (-b parameter), e.g. to 10Kbps.  As this is below of the network throughput, all or most of the data will be sent immediately to the server and will not be buffered at the client machine.  Therefore, when the test ends, almost all the data will already arrive to the server.
  • Reduce window size (-w parameter) to few packets (-l parameter).  E.g. with the "-l 1024" set "-w 5K".  Window size defined the buffer size that can be used for packets that were not acked yet by the server.  Therefore when the buffer is full (about 5 packets in this example) the client cannot send more packets until it receives ack that a packet(s) was received by the server. This will ensure that client buffer will include only few packets (that may be lost) when the test ends.
I believe that limiting the window size is more important, but that also both approaches should be used. 


On Monday, May 11, 2020 at 9:55:57 PM UTC+3, Debobroto Das wrote:
Thanks for your reply.

How sending at lower rate can help here? Can you kindly explain a little bit more detail. 

On Monday, May 11, 2020 at 5:21:47 AM UTC-4, DavidBO wrote:
I have submitted enhancement request #994 that may help to improve iperf3 behavior in such cases and allow more data to be received by the server.

However, regarding this specific case,the network throughput is 14327bps, which is about 1.8KB/sec.  That means it will take about 5 minutes until all the 500KB sent will be received.  Is that a real case?  With such low network throughput, data may need be sent using a lower rate.

On Wednesday, May 6, 2020 at 11:57:22 PM UTC+3, Debobroto Das wrote:
I am running a IPerf3 test with version 3.6. 
Client Side Command is : 

client side output is : iperf3 --client 2001:1:1:1:0:0003:0001:0001 --port 22001 --cport 32005 --connect-timeout 9999   --set-mss 1400 -w 32M -k 512 -b 1M -l 1024  --json --logfile /home/deba/Desktop/PyDcnTE/testAndMeasurement/TEST_RESULTS/ECMP2-h0p0l0-h1p3l1

[DELETED]

Debobroto Das

unread,
May 11, 2020, 4:10:42 PM5/11/20
to iper...@googlegroups.com
Currently iperf client closes the interface with the server almost immediately after it finishes sending all the data.  That means that all the data that is still buffered at the client machine will never be sent.  The proposal is to make sure that only small amount of data is buffered when the test ends.  (It may be possible to add to iperf a mechanism to allow making sure all data was arrived, but that will require setting a timeout for the response of more than 5 minutes which is not common.)

-- I believe this is the most natural requirement. 

I am actually working on simulation and my project is a research based project. Here we are trying to find various protocol's performance.

Regarding the rate and window size, reducing the window size is more helpful in my case.

Another question, can I access your patch for my research purpose? 


--
You received this message because you are subscribed to the Google Groups "iperf-dev" group.
To unsubscribe from this group and stop receiving emails from it, send an email to iperf-dev+...@googlegroups.com.
To view this discussion on the web visit https://groups.google.com/d/msgid/iperf-dev/a4172a39-f0fe-462b-ad32-8a1fd011941c%40googlegroups.com.
Reply all
Reply to author
Forward
0 new messages