i have an issue when use t-rex in a vm.
i use a vm as a traffic generator and the other as a DUT.
On the host i use an ovs-dpdk with :
a bridge 0 for connect ens3 interface of each vm
a bridge 1 for manage vm
a bridge 2 for connect ens5 interface of each vm
i configure multi queue when i start qemu, but i have the following message when i try so use more than 1 cpu (t-rex 1.28)
root@vm-test-ans-2:~/v2.18# ./t-rex-64 -i -c 3
Killing Scapy server... Scapy server is killed
Starting Scapy server.... Scapy server is started
Starting TRex v2.18 please wait ...
zmq publisher at: tcp://*:4500
Number of ports found: 2
set driver name net_virtio
ERROR the number of cores should be 1 when the driver support only one tx queue and one rx queue
root@vm-test-ans-2:~/v2.18#
my script for start qemu
ovs-vsctl del-port br1 tap2
ovs-vsctl del-port br1 tap3
ovs-vsctl del-port br0 dpdkvhostuser3
ovs-vsctl del-port br2 dpdkvhostuser5
ovs-vsctl del-port br0 dpdkvhostuser7
ovs-vsctl del-port br2 dpdkvhostuser9
ovs-vsctl add-port br0 dpdkvhostuser3 -- set Interface dpdkvhostuser3 type=dpdkvhostuserclient options:vhost-server-path=/tmp/dpdkvhostuser3 -- set
Interface dpdkvhostuser3 options:n_rxq=2
ovs-vsctl add-port br2 dpdkvhostuser5 -- set Interface dpdkvhostuser5 type=dpdkvhostuserclient options:vhost-server-path=/tmp/dpdkvhostuser5 -- set
Interface dpdkvhostuser5 options:n_rxq=2
ovs-vsctl add-port br0 dpdkvhostuser7 -- set Interface dpdkvhostuser7 type=dpdkvhostuserclient options:vhost-server-path=/tmp/dpdkvhostuser7 -- set
Interface dpdkvhostuser7 options:n_rxq=2
ovs-vsctl add-port br2 dpdkvhostuser9 -- set Interface dpdkvhostuser9 type=dpdkvhostuserclient options:vhost-server-path=/tmp/dpdkvhostuser9 -- set
Interface dpdkvhostuser9 options:n_rxq=2
taskset -c 14-17 qemu-system-x86_64 -display vnc=:11 -k fr -m 4096M \
-cpu host -smp cores=4,threads=1,sockets=1 -machine accel=kvm \
-object memory-backend-file,id=mem,size=4G,mem-path=/dev/hugepages/,share=on \
-numa node,memdev=mem -mem-prealloc \
-chardev socket,path=/tmp/dpdkvhostuser3,id=chr0,server \
-netdev type=vhost-user,id=net0,chardev=chr0,vhostforce=on,queues=2 \
-device virtio-net-pci,mac=00:6d:10:72:75:01,netdev=net0,mq=on,vectors=6,ioeventfd=on \
-netdev type=tap,ifname=tap2,id=net1,script=/etc/ovs-scripts/ovs-ifupbr1,downscript=/etc/ovs-scripts/ovs-ifdownbr1 \
-device virtio-net-pci,mac=00:6d:10:72:75:02,netdev=net1 \
-chardev socket,path=/tmp/dpdkvhostuser5,id=chr2,server \
-netdev type=vhost-user,id=net2,chardev=chr2,vhostforce=on,queues=2 \
-device virtio-net-pci,mac=00:6d:10:72:75:03,netdev=net2,mq=on,vectors=6,ioeventfd=on \
-monitor telnet::55422,server,nowait -serial telnet::55522,server,nowait \
-drive file=/u0/qemu/pfsvcpe_ubuntu1610-1.img,format=raw,if=virtio &
taskset -c 30-39 qemu-system-x86_64 -display vnc=:12 -k fr -m 8192 \
-cpu host -smp cores=10,threads=1,sockets=1 -machine accel=kvm \
-object memory-backend-file,id=mem,size=8G,mem-path=/dev/hugepages/,share=on \
-numa node,memdev=mem -mem-prealloc \
-chardev socket,path=/tmp/dpdkvhostuser7,id=chr0,server \
-netdev type=vhost-user,id=net0,chardev=chr0,vhostforce=on,queues=2 \
-device virtio-net-pci,mac=00:6d:10:72:76:04,netdev=net0,mq=on,vectors=6,ioeventfd=on \
-netdev type=tap,ifname=tap3,id=net1,script=/etc/ovs-scripts/ovs-ifupbr1,downscript=/etc/ovs-scripts/ovs-ifdownbr1 \
-device virtio-net-pci,mac=00:6d:10:72:76:02,netdev=net1 \
-chardev socket,path=/tmp/dpdkvhostuser9,id=chr2,server \
-netdev type=vhost-user,id=net2,chardev=chr2,vhostforce=on,queues=2 \
-device virtio-net-pci,mac=00:6d:10:72:76:06,netdev=net2,mq=on,vectors=6,ioeventfd=on \
-monitor telnet::55432,server,nowait -serial telnet::55532,server,nowait \
-drive file=/u0/qemu/pfsvcpe_ubuntu1610-2.img,format=raw,if=virtio &
sleep 10
/root/taskset_qemu.py 55422 "0=14"
/root/taskset_qemu.py 55422 "1=15"
/root/taskset_qemu.py 55422 "2=16"
/root/taskset_qemu.py 55422 "3=17"
/root/taskset_qemu.py 55432 "0=30"
/root/taskset_qemu.py 55432 "1=31"
/root/taskset_qemu.py 55432 "2=32"
/root/taskset_qemu.py 55432 "3=33"
/root/taskset_qemu.py 55432 "4=34"
/root/taskset_qemu.py 55432 "5=35"
/root/taskset_qemu.py 55432 "6=36"
/root/taskset_qemu.py 55432 "7=37"
/root/taskset_qemu.py 55432 "8=38"
/root/taskset_qemu.py 55432 "9=39"
the output of dpif-netdev/pmd-rxq-show
root@vcpeserver:/u0/qemu# ovs-appctl dpif-netdev/pmd-rxq-show
pmd thread numa_id 0 core_id 9:
isolated : false
port: dpdkvhostuser7 queue-id: 0
port: dpdkvhostuser9 queue-id: 0
port: dpdkvhostuser3 queue-id: 0
port: dpdkvhostuser5 queue-id: 0
pmd thread numa_id 0 core_id 8:
isolated : false
port: dpdkvhostuser7 queue-id: 1
port: dpdkvhostuser9 queue-id: 1
port: dpdkvhostuser3 queue-id: 1
port: dpdkvhostuser5 queue-id: 1
root@vcpeserver:/u0/qemu#
i can't generate more than 800kpps with one cpu ...
Regards,
Olivier
--
You received this message because you are subscribed to the Google Groups "TRex Traffic Generator" group.
To unsubscribe from this group and stop receiving emails from it, send an email to trex-tgn+u...@googlegroups.com.
To post to this group, send email to trex...@googlegroups.com.
To view this discussion on the web visit https://groups.google.com/d/msgid/trex-tgn/90fee07c-19b5-492e-8ce8-6a592ebea277%40googlegroups.com.
For more options, visit https://groups.google.com/d/optout.