From ac3303db957814b6202530af6ce2e3da47525b5d Mon Sep 17 00:00:00 2001 From: Miruna Paun Date: Mon, 3 Jul 2017 19:45:58 +0200 Subject: Updating the benchmarks chapter with all the needed info LXCR-7844 Signed-off-by: Miruna Paun --- .../doc/benchmarks.xml | 1037 +++++++++++++++++++- 1 file changed, 1034 insertions(+), 3 deletions(-) diff --git a/doc/book-enea-nfv-access-platform-guide/doc/benchmarks.xml b/doc/book-enea-nfv-access-platform-guide/doc/benchmarks.xml index 5d6e268..7155e44 100644 --- a/doc/book-enea-nfv-access-platform-guide/doc/benchmarks.xml +++ b/doc/book-enea-nfv-access-platform-guide/doc/benchmarks.xml @@ -1,14 +1,1045 @@ - + Benchmarks -
- +
+ Hardware Setup
+ +
+ Bios + + +
+ +
+ Use Cases + +
+ Docker related benchmarks + +
+ Use Case - Forward traffic in docker + + Benchmarking traffic forwarding using testpmd in a Docker + container. + + Pktgen is used to generate UDP traffic that will reach testpmd, + running in a docker image. It will then be forwarded back to source on + the return trip (Forwarding). + + This test measures: + + + + pktgen TX, RX in packets per second (pps) and Mbps + + + + testpmd TX, RX in packets per second (pps) + + + + divide testpmd RX / pktgen TX in pps to obtain throughput in + percentages (%) + + + +
+ Test Setup for Target 1 + + Start by following the steps below: + + SSD boot using the following grub.cfg + entry: linux (hd0,gpt3)/boot/bzImage root=/dev/sda3 ip=dhcp nohz_full=1-7 / +isolcpus=1-7 rcu-nocbs=1-7 rcu_nocb_poll intel_pstate=disable / +clocksource=tsc tsc=reliable nohpet nosoftlockup intel_idle.max_cstate=0 / +processor.max_cstate=0 mce=ignore_ce audit=0 nmi_watchdog=0 iommu=pt / +intel_iommu=on hugepagesz=1GB hugepages=8 default_hugepagesz=1GB / +hugepagesz=2M hugepages=2048 vfio_iommu_type1.allow_unsafe_interrupts=1 + + Kill unnecessary services:killall ovsdb-server ovs-vswitchd +rm -rf /etc/openvswitch/* +mkdir -p /var/run/openvswitchMount hugepages and configure + DPDK:mkdir -p /mnt/huge +mount -t hugetlbfs nodev /mnt/huge +modprobe igb_uio +dpdk-devbind --bind=igb_uio 0000:03:00.0Run + pktgen:cd /usr/share/apps/pktgen/ +./pktgen -c 0xF -n 1 -- -P -m "[3:2].0"In the pktgen console + run:strTo change framesize for + pktgen, from [64, 128, 256, 512]:set 0 size <number> +
+ +
+ Test Setup for Target 2 + + Start by following the steps below: + + SSD boot using the following grub.cfg + entry: + + linux (hd0,gpt3)/boot/bzImage root=/dev/sda3 ip=dhcp nohz_full=1-7 / +isolcpus=1-7 rcu-nocbs=1-7 rcu_nocb_poll intel_pstate=disable / +clocksource=tsc tsc=reliable nohpet nosoftlockup intel_idle.max_cstate=0 / +processor.max_cstate=0 mce=ignore_ce audit=0 nmi_watchdog=0 iommu=pt / +intel_iommu=on hugepagesz=1GB hugepages=8 default_hugepagesz=1GB / +hugepagesz=2M hugepages=2048 vfio_iommu_type1.allow_unsafe_interrupts=1 + + It is expected to have docker/guest image on target. Configure + the OVS bridge:# OVS old config clean-up +killall ovsdb-server ovs-vswitchd +rm -rf /etc/openvswitch/* +mkdir -p /var/run/openvswitch + +# Mount hugepages and bind interfaces to dpdk +mkdir -p /mnt/huge +mount -t hugetlbfs nodev /mnt/huge +modprobe igb_uio +dpdk-devbind --bind=igb_uio 0000:03:00.0 + +# configure openvswitch with DPDK +export DB_SOCK=/var/run/openvswitch/db.sock +ovsdb-tool create /etc/openvswitch/conf.db / +/usr/share/openvswitch/vswitch.ovsschema +ovsdb-server --remote=punix:$DB_SOCK / +--remote=db:Open_vSwitch,Open_vSwitch,manager_options --pidfile --detach +ovs-vsctl --no-wait init +ovs-vsctl --no-wait set Open_vSwitch . other_config:dpdk-lcore-mask=0x10 +ovs-vsctl --no-wait set Open_vSwitch . other_config:pmd-cpu-mask=0xc +ovs-vsctl --no-wait set Open_vSwitch . other_config:dpdk-socket-mem=2048 +ovs-vsctl --no-wait set Open_vSwitch . other_config:dpdk-init=true +ovs-vswitchd unix:$DB_SOCK --pidfile --detach / +--log-file=/var/log/openvswitch/ovs-vswitchd.log + +ovs-vsctl add-br ovsbr0 -- set bridge ovsbr0 datapath_type=netdev +ovs-vsctl add-port ovsbr0 vhost-user1 / +-- set Interface vhost-user1 type=dpdkvhostuser ofport_request=1 +ovs-vsctl add-port ovsbr0 dpdk0 -- set Interface / +dpdk0 type=dpdk options:dpdk-devargs=0000:03:00.0 ofport_request=2 + +# configure static flows +ovs-ofctl del-flows ovsbr0 +ovs-ofctl add-flow ovsbr0 in_port=1,action=output:2 +ovs-ofctl add-flow ovsbr0 in_port=2,action=output:1Import a + Docker container:docker import enea-image-virtualization-guest-qemux86-64.tar.gz el7_guestStart + the Docker container:docker run -it --rm -v /var/run/openvswitch/:/var/run/openvswitch/ / +-v /mnt/huge:/mnt/huge el7_guest /bin/bashStart the testpmd + application in Docker:testpmd -c 0x30 -n 2 --file-prefix prog1 --socket-mem 512 --no-pci / +--vdev=virtio_user0,path=/var/run/openvswitch/vhost-user1 / +-d /usr/lib/librte_pmd_virtio.so.1.1 -- --burst 64 --disable-hw-vlan / +--disable-rss -i --portmask=0x1 --coremask=0x20 --nb-cores=1 / +--rxq=1 --txq=1 --txd=512 --rxd=512 --txqflags=0xf00 --port-topology=chainedTo + start traffic forwarding, run the + following command in testpmd CLI:startTo + start traffic but in termination + mode (no traffic sent on TX), run following command in testpmd + CLI:set fwd rxonly +start + Results in forwarding mode + + + + + Bytes + + pktgen pps + TX + + pktgen MBits/s + TX + + pktgen pps + RX + + pktgen MBits/s + RX + + testpmd pps + RX + + testpmd pps + TX + + throughput + (%) + + + + 64 + + 14890993 + + 10006 + + 7706039 + + 5178 + + 7692807 + + 7692864 + + 51.74% + + + + 128 + + 8435104 + + 9999 + + 7689458 + + 9060 + + 7684962 + + 7684904 + + 90.6% + + + + 256 + + 4532384 + + 9999 + + 4532386 + + 9998 + + 4532403 + + 4532403 + + 99.9% + + + +
+ Results in termination mode + + + + + Bytes + + pktgen pps + TX + + testpmd pps + RX + + throughput + (%) + + + + 64 + + 14890993 + + 7330403 + + 49,2% + + + + 128 + + 8435104 + + 7330379 + + 86,9% + + + + 256 + + 4532484 + + 4532407 + + 99,9% + + + +
+
+
+ +
+ Use Case - Forward traffic from Docker to another Docker on the + same host + + Benchmark a combo test using testpmd running in two Docker + instances, one which Forwards traffic to the second one, which + Terminates it. + + Packets are generated with pktgen and TX-d to the first testpmd, + which will RX and Forward them to the second testpmd, which will RX + and terminate them. + + Measurements are made in: + + + + pktgen TX in pps and Mbits/s + + + + testpmd TX and RX pps in Docker1 + + + + testpmd RX pps in Docker2 + + + + Throughput found as a percent, by dividing Docker2 testpmd RX pps by pktgen + TX pps. + +
+ Test Setup for Target 1 + + Start by following the steps below: + + SSD boot using the following grub.cfg + entry: + + linux (hd0,gpt3)/boot/bzImage root=/dev/sda3 ip=dhcp nohz_full=1-7 / +isolcpus=1-7 rcu-nocbs=1-7 rcu_nocb_poll intel_pstate=disable / +clocksource=tsc tsc=reliable nohpet nosoftlockup intel_idle.max_cstate=0 / +processor.max_cstate=0 mce=ignore_ce audit=0 nmi_watchdog=0 iommu=pt / +intel_iommu=on hugepagesz=1GB hugepages=8 default_hugepagesz=1GB / +hugepagesz=2M hugepages=2048 vfio_iommu_type1.allow_unsafe_interrupts=1 + + Configure DPDK:mkdir -p /mnt/huge +mount -t hugetlbfs nodev /mnt/huge +modprobe igb_uio +dpdk-devbind --bind=igb_uio 0000:03:00.0Run + pktgen:cd /usr/share/apps/pktgen/ +./pktgen -c 0xF -n 1 -- -P -m "[3:2].0"Choose one of the + values from [64, 128, 256, 512] to change the packet + size:set 0 size <number> +
+ +
+ Test Setup for Target 2 + + Start by following the steps below: + + SSD boot using the following grub.cfg + entry: + + linux (hd0,gpt3)/boot/bzImage root=/dev/sda3 ip=dhcp nohz_full=1-7 / +isolcpus=1-7 rcu-nocbs=1-7 rcu_nocb_poll intel_pstate=disable / +clocksource=tsc tsc=reliable nohpet nosoftlockup intel_idle.max_cstate=0 / +processor.max_cstate=0 mce=ignore_ce audit=0 nmi_watchdog=0 / +iommu=pt intel_iommu=on hugepagesz=1GB hugepages=8 default_hugepagesz=1GB / +hugepagesz=2M hugepages=2048 vfio_iommu_type1.allow_unsafe_interrupts=1 + + killall ovsdb-server ovs-vswitchd +rm -rf /etc/openvswitch/* +mkdir -p /var/run/openvswitchConfigure DPDK:mkdir -p /mnt/huge +mount -t hugetlbfs nodev /mnt/huge +modprobe igb_uio +dpdk-devbind --bind=igb_uio 0000:03:00.0Configure the OVS + bridge:export DB_SOCK=/var/run/openvswitch/db.sock +ovsdb-tool create /etc/openvswitch/conf.db / +/usr/share/openvswitch/vswitch.ovsschema +ovsdb-server --remote=punix:$DB_SOCK / +--remote=db:Open_vSwitch,Open_vSwitch,manager_options --pidfile --detach +ovs-vsctl --no-wait init +ovs-vsctl --no-wait set Open_vSwitch . other_config:dpdk-lcore-mask=0x10 +ovs-vsctl --no-wait set Open_vSwitch . other_config:pmd-cpu-mask=0xcc +ovs-vsctl --no-wait set Open_vSwitch . other_config:dpdk-socket-mem=2048 +ovs-vsctl --no-wait set Open_vSwitch . other_config:dpdk-init=true +ovs-vswitchd unix:$DB_SOCK --pidfile --detach / +--log-file=/var/log/openvswitch/ovs-vswitchd.log +ovs-vsctl add-br ovsbr0 -- set bridge ovsbr0 datapath_type=netdev +ovs-vsctl add-port ovsbr0 vhost-user1 -- set Interface / +vhost-user1 type=dpdkvhostuser ofport_request=1 +ovs-vsctl add-port ovsbr0 vhost-user2 -- set Interface / +vhost-user2 type=dpdkvhostuser ofport_request=2 +ovs-vsctl add-port ovsbr0 dpdk0 -- set Interface dpdk0 / +type=dpdk options:dpdk-devargs=0000:03:00.0 ofport_request=3 +ovs-ofctl del-flows ovsbr0 +ovs-ofctl add-flow ovsbr0 in_port=3,action=output:2 +ovs-ofctl add-flow ovsbr0 in_port=2,action=output:1Import a + Docker container:docker import enea-image-virtualization-guest-qemux86-64.tar.gz el7_guestStart + the first Docker:docker run -it --rm --cpuset-cpus=4,5 / +-v /var/run/openvswitch/:/var/run/openvswitch/ / +-v /mnt/huge:/mnt/huge el7_guest /bin/bashStart the testpmd + application in Docker1:testpmd -c 0x30 -n 2 --file-prefix prog1 --socket-mem 512 --no-pci / +--vdev=virtio_user0,path=/var/run/openvswitch/vhost-user1 / +-d /usr/lib/librte_pmd_virtio.so.1.1 -- --burst 64 --disable-hw-vlan / +--disable-rss -i --portmask=0x1 --coremask=0x20 --nb-cores=1 / +--rxq=1 --txq=1 --txd=512 --rxd=512 --txqflags=0xf00 --port-topology=chainedConfigure + it in termination mode:set fwd rxonlyRun + the testpmd application:startOpen a + new console to the host and start the second docker + instance:docker run -it --rm --cpuset-cpus=0,1 -v /var/run/openvswitch/:/var/run/openvswitch/ / +-v /mnt/huge:/mnt/huge el7_guest /bin/bashIn the second + container start testpmd:testpmd -c 0x0F --file-prefix prog2 --socket-mem 512 --no-pci / +--vdev=virtio_user0,path=/var/run/openvswitch/vhost-user2 / +-d /usr/lib/librte_pmd_virtio.so.1.1 -- -i --disable-hw-vlanRun + the TestPmd application in the second docker:testpmd -c 0x3 -n 2 --file-prefix prog2 --socket-mem 512 --no-pci / +--vdev=virtio_user0,path=/var/run/openvswitch/vhost-user2 / +-d /usr/lib/librte_pmd_virtio.so.1.1 -- --burst 64 --disable-hw-vlan / +--disable-rss -i --portmask=0x1 --coremask=0x2 --nb-cores=1 --rxq=1 / +--txq=1 --txd=512 --rxd=512 --txqflags=0xf00 --port-topology=chainedIn + the testpmd shell, run:startStart + pktgen traffic by running the following command in pktgen + CLI:start 0To record traffic + results:show port stats 0This + should be used in testpmd applications. + + + Results + + + + + Bytes + + Target 1 - + pktgen pps TX + + Target 2 - + (forwarding) testpmd pps RX + + Target 2 - + (forwarding) testpmd pps TX + + Target 2 - + (termination) testpmd pps RX + + + + 64 + + 14844628 + + 5643565 + + 3459922 + + 3457326 + + + + 128 + + 8496962 + + 5667860 + + 3436811 + + 3438918 + + + + 256 + + 4532372 + + 4532362 + + 3456623 + + 3457115 + + + + 512 + + 2367641 + + 2349450 + + 2349450 + + 2349446 + + + +
+
+
+
+ +
+ VM related benchmarks + +
+ Use Case - Forward/termination traffic in one VM + + Benchmarking traffic (UDP) forwarding and termination using + testpmd in a virtual machine. + + The Pktgen application is used to generate traffic that will + reach testpmd running on a virtual machine, and be forwarded back to + source on the return trip. With the same setup a second measurement + will be done with traffic termination in the virtual machine. + + This test case measures: + + + + pktgen TX, RX in packets per second (pps) and Mbps + + + + testpmd TX, RX in packets per second (pps) + + + + divide testpmd RX by + pktgen TX in pps to obtain the + throughput in percentages (%) + + + +
+ Test Setup for Target 1 + + Start with the steps below: + + SSD boot using the following grub.cfg + entry: linux (hd0,gpt3)/boot/bzImage root=/dev/sda3 ip=dhcp nohz_full=1-7 / +isolcpus=1-7 rcu-nocbs=1-7 rcu_nocb_poll intel_pstate=disable / +clocksource=tsc tsc=reliable nohpet nosoftlockup intel_idle.max_cstate=0 / +processor.max_cstate=0 mce=ignore_ce audit=0 nmi_watchdog=0 iommu=pt / +intel_iommu=on hugepagesz=1GB hugepages=8 default_hugepagesz=1GB / +hugepagesz=2M hugepages=2048 vfio_iommu_type1.allow_unsafe_interrupts=1 + + Kill unnecessary services: killall ovsdb-server ovs-vswitchd +rm -rf /etc/openvswitch/* +mkdir -p /var/run/openvswitchConfigure DPDK:mkdir -p /mnt/huge +mount -t hugetlbfs nodev /mnt/huge +modprobe igb_uio +dpdk-devbind --bind=igb_uio 0000:03:00.0Run + pktgen:cd /usr/share/apps/pktgen/ +./pktgen -c 0x7 -n 4 --proc-type auto --socket-mem 256 / +-w 0000:03:00.0 -- -P -m "[1:2].0"Set pktgen frame size to + use from [64, 128, 256, 512]:set 0 size 64 +
+ +
+ Test Setup for Target 2 + + Start by following the steps below: + + SSD boot using the following grub.cfg + entry: linux (hd0,gpt3)/boot/bzImage root=/dev/sda3 ip=dhcp nohz_full=1-7 / +isolcpus=1-7 rcu-nocbs=1-7 rcu_nocb_poll intel_pstate=disable / +clocksource=tsc tsc=reliable nohpet nosoftlockup intel_idle.max_cstate=0 / +processor.max_cstate=0 mce=ignore_ce audit=0 nmi_watchdog=0 iommu=pt / +intel_iommu=on hugepagesz=1GB hugepages=8 default_hugepagesz=1GB / +hugepagesz=2M hugepages=2048 vfio_iommu_type1.allow_unsafe_interrupts=1Kill + unnecessary services: killall ovsdb-server ovs-vswitchd +rm -rf /etc/openvswitch/* +mkdir -p /var/run/openvswitchConfigure DPDK:mkdir -p /mnt/huge +mount -t hugetlbfs nodev /mnt/huge +modprobe igb_uio +dpdk-devbind --bind=igb_uio 0000:03:00.0Configure + OVS:export DB_SOCK=/var/run/openvswitch/db.sock +ovsdb-tool create /etc/openvswitch/conf.db / +/usr/share/openvswitch/vswitch.ovsschema +ovsdb-server --remote=punix:$DB_SOCK / +--remote=db:Open_vSwitch,Open_vSwitch,manager_options --pidfile --detach +ovs-vsctl --no-wait init +ovs-vsctl --no-wait set Open_vSwitch . other_config:dpdk-lcore-mask=0x10 +ovs-vsctl --no-wait set Open_vSwitch . other_config:pmd-cpu-mask=0xc +ovs-vsctl --no-wait set Open_vSwitch . other_config:dpdk-socket-mem=2048 +ovs-vsctl --no-wait set Open_vSwitch . other_config:dpdk-init=true +ovs-vswitchd unix:$DB_SOCK --pidfile --detach / +--log-file=/var/log/openvswitch/ovs-vswitchd.log + +ovs-vsctl add-br ovsbr0 -- set bridge ovsbr0 datapath_type=netdev +ovs-vsctl add-port ovsbr0 vhost-user1 / +-- set Interface vhost-user1 type=dpdkvhostuser -- set Interface / +vhost-user1 ofport_request=2 +ovs-vsctl add-port ovsbr0 dpdk0 -- set Interface dpdk0 / +type=dpdk options:dpdk-devargs=0000:03:00.0 / +-- set Interface dpdk0 ofport_request=1 +chmod 777 /var/run/openvswitch/vhost-user1 + +ovs-ofctl del-flows ovsbr0 +ovs-ofctl add-flow ovsbr0 in_port=1,action=output:2 +ovs-ofctl add-flow ovsbr0 in_port=2,action=output:1Launch + QEMU:taskset -c 0,1 qemu-system-x86_64 -cpu host,+invtsc,migratable=no / +-M q35 -smp cores=2,sockets=1 -vcpu 0,affinity=0 -vcpu 1,affinity=1 / +-enable-kvm -nographic -realtime mlock=on -kernel /mnt/qemu/bzImage / +-drive file=/mnt/qemu/enea-image-virtualization-guest-qemux86-64.ext4,/ +if=virtio,format=raw -m 4096 -object memory-backend-file,id=mem,/ +size=4096M,mem-path=/mnt/huge,share=on -numa node,memdev=mem / +-mem-prealloc -chardev socket,id=char0,path=/var/run/openvswitch/vhost-user1 / +-netdev type=vhost-user,id=mynet1,chardev=char0,vhostforce / +-device virtio-net-pci,mac=52:54:00:00:00:01,netdev=mynet1,/ +mrg_rxbuf=on,rx_queue_size=1024,csum=off,gso=off,guest_tso4=off,/ +guest_tso6=off,guest_ecn=off -append 'root=/dev/vda console=ttyS0 / +hugepagesz=2M hugepages=1024 isolcpus=1 nohz_full=1 rcu_nocbs=1 / +irqaffinity=0 rcu_nocb_poll intel_pstate=disable intel_idle.max_cstate=0 / +processor.max_cstate=0 mce=ignore_ce audit=0'Inside QEMU, + configure DPDK: mkdir -p /mnt/huge +mount -t hugetlbfs nodev /mnt/huge +modprobe igb_uio +dpdk-devbind --bind=igb_uio 0000:00:02.0Inside QEMU, run + testpmd: testpmd -c 0x3 -n 2 -d librte_pmd_virtio.so.1.1 / +-- --burst 64 --disable-hw-vlan --disable-rss -i --portmask=0x1 / +--coremask=0x2 --nb-cores=1 --rxq=1 --txq=1 --txd=512 --rxd=512 / +--txqflags=0xf00 --port-topology=chainedFor the Forwarding test, start testpmd + directly:startFor the Termination test, set testpmd to only + receive, then start it:set fwd rxonly +startOn target 1, you may start pktgen traffic + now:start 0On target 2, use this + command to refresh the testpmd display and note the highest + values:show port stats 0To stop + traffic from pktgen, in order to choose a different frame + size:stop 0To clear numbers in + testpmd:clear port stats +show port stats 0 + Results in forwarding mode + + + + + Bytes + + pktgen pps + RX + + pktgen pps + TX + + testpmd pps + RX + + testpmd pps + TX + + pktgen MBits/s + RX + + pktgen MBits/s + TX + + throughput + (%) + + + + 64 + + 7755769 + + 14858714 + + 7755447 + + 7755447 + + 5207 + + 9984 + + 52.2 + + + + 128 + + 7714626 + + 8435184 + + 7520349 + + 6932520 + + 8204 + + 9986 + + 82.1 + + + + 256 + + 4528847 + + 4528854 + + 4529030 + + 4529034 + + 9999 + + 9999 + + 99.9 + + + +
+ Results in termination mode + + + + + Bytes + + pktgen pps + TX + + testpmd pps + RX + + pktgen MBits/s + TX + + throughput + (%) + + + + 64 + + 15138992 + + 7290663 + + 10063 + + 48.2 + + + + 128 + + 8426825 + + 6902646 + + 9977 + + 81.9 + + + + 256 + + 4528957 + + 4528912 + + 9999 + + 100 + + + +
+
+
+ +
+ Use Case - Forward traffic between two VMs + + Benchmark a combo test using two virtual machines, the first + with traffic forwarding to the second, which terminates it. + + Measurements are made in: + + + + pktgen TX in pps and Mbits/s + + + + testpmd TX and RX pps in VM1 + + + + testpmd RX pps in VM2 + + + + throughput in percents, by dividing + VM2 testpmd RX pps by pktgen TX + pps + + + +
+ Test Setup for Target 1 + + Start by doing the following: + + SSD boot using the following grub.cfg + entry: linux (hd0,gpt3)/boot/bzImage root=/dev/sda3 ip=dhcp nohz_full=1-7 / +isolcpus=1-7 rcu-nocbs=1-7 rcu_nocb_poll intel_pstate=disable / +clocksource=tsc tsc=reliable nohpet nosoftlockup intel_idle.max_cstate=0 / +processor.max_cstate=0 mce=ignore_ce audit=0 nmi_watchdog=0 iommu=pt / +intel_iommu=on hugepagesz=1GB hugepages=8 default_hugepagesz=1GB / +hugepagesz=2M hugepages=2048 vfio_iommu_type1.allow_unsafe_interrupts=1Kill + Services:killall ovsdb-server ovs-vswitchd +rm -rf /etc/openvswitch/* +mkdir -p /var/run/openvswitchConfigure DPDK:mkdir -p /mnt/huge +mount -t hugetlbfs nodev /mnt/huge +modprobe igb_uio +dpdk-devbind --bind=igb_uio 0000:03:00.0Run + pktgen:cd /usr/share/apps/pktgen/ +./pktgen -c 0x7 -n 4 --proc-type auto --socket-mem 256 / +-w 0000:03:00.0 -- -P -m "[1:2].0"Set pktgen frame size to + use from [64, 128, 256, 512]:set 0 size 64 +
+ +
+ Test Setup for Target 2 + + Start by doing the following: + + SSD boot using the following grub.cfg + entry: linux (hd0,gpt3)/boot/bzImage root=/dev/sda3 ip=dhcp nohz_full=1-7 / +isolcpus=1-7 rcu-nocbs=1-7 rcu_nocb_poll intel_pstate=disable / +clocksource=tsc tsc=reliable nohpet nosoftlockup intel_idle.max_cstate=0 / +processor.max_cstate=0 mce=ignore_ce audit=0 nmi_watchdog=0 iommu=pt / +intel_iommu=on hugepagesz=1GB hugepages=8 default_hugepagesz=1GB / +hugepagesz=2M hugepages=2048 vfio_iommu_type1.allow_unsafe_interrupts=1Kill + Services:killall ovsdb-server ovs-vswitchd +rm -rf /etc/openvswitch/* +mkdir -p /var/run/openvswitchConfigure DPDK:mkdir -p /mnt/huge +mount -t hugetlbfs nodev /mnt/huge +modprobe igb_uio +dpdk-devbind --bind=igb_uio 0000:03:00.0Configure + OVS:export DB_SOCK=/var/run/openvswitch/db.sock +ovsdb-tool create /etc/openvswitch/conf.db / +/usr/share/openvswitch/vswitch.ovsschema +ovsdb-server --remote=punix:$DB_SOCK / +--remote=db:Open_vSwitch,Open_vSwitch,manager_options --pidfile --detach +ovs-vsctl --no-wait init +ovs-vsctl --no-wait set Open_vSwitch . other_config:dpdk-lcore-mask=0x10 +ovs-vsctl --no-wait set Open_vSwitch . other_config:pmd-cpu-mask=0xc +ovs-vsctl --no-wait set Open_vSwitch . other_config:dpdk-socket-mem=2048 +ovs-vsctl --no-wait set Open_vSwitch . other_config:dpdk-init=true +ovs-vswitchd unix:$DB_SOCK --pidfile / +--detach --log-file=/var/log/openvswitch/ovs-vswitchd.log + + +ovs-vsctl add-br ovsbr0 -- set bridge ovsbr0 datapath_type=netdev +ovs-vsctl add-port ovsbr0 dpdk0 / +-- set Interface dpdk0 type=dpdk options:dpdk-devargs=0000:03:00.0 ofport_request=1 +ovs-vsctl add-port ovsbr0 vhost-user1 / +-- set Interface vhost-user1 type=dpdkvhostuser ofport_request=2 +ovs-vsctl add-port ovsbr0 vhost-user2 / +-- set Interface vhost-user2 type=dpdkvhostuser ofport_request=3 + + +ovs-ofctl del-flows ovsbr0 +ovs-ofctl add-flow ovsbr0 in_port=1,action=output:2 +ovs-ofctl add-flow ovsbr0 in_port=2,action=output:3Launch + first QEMU instance, VM1:taskset -c 0,1 qemu-system-x86_64 -cpu host,+invtsc,migratable=no -M q35 / +-smp cores=2,sockets=1 -vcpu 0,affinity=0 -vcpu 1,affinity=1 -enable-kvm / +-nographic -realtime mlock=on -kernel /home/root/qemu/bzImage / +-drive file=/home/root/qemu/enea-image-virtualization-guest-qemux86-64.ext4,/ +if=virtio,format=raw -m 2048 -object memory-backend-file,id=mem,/ +size=2048M,mem-path=/mnt/huge,share=on -numa node,memdev=mem / +-mem-prealloc -chardev socket,id=char0,path=/var/run/openvswitch/vhost-user1 / +-netdev type=vhost-user,id=mynet1,chardev=char0,vhostforce / +-device virtio-net-pci,mac=52:54:00:00:00:01,netdev=mynet1,/ +mrg_rxbuf=on,rx_queue_size=1024,csum=off,gso=off,guest_tso4=off,/ +guest_tso6=off,guest_ecn=off -append 'root=/dev/vda console=ttyS0 / +hugepagesz=2M hugepages=512 isolcpus=1 nohz_full=1 rcu_nocbs=1 / +irqaffinity=0 rcu_nocb_poll intel_pstate=disable intel_idle.max_cstate=0 / +processor.max_cstate=0 mce=ignore_ce audit=0'Connect to + Target 2 through a new SSH session and run a second QEMU instance + (to get its own console, separate from instance VM1). We shall call + this VM2:taskset -c 4,5 qemu-system-x86_64 -cpu host,+invtsc,migratable=no / +-M q35 -smp cores=2,sockets=1 -vcpu 0,affinity=4 -vcpu 1,affinity=5 / +-enable-kvm -nographic -realtime mlock=on -kernel /home/root/qemu2/bzImage / +-drive file=/home/root/qemu2/enea-image-virtualization-guest-qemux86-64.ext4,/ +if=virtio,format=raw -m 2048 -object memory-backend-file,id=mem,size=2048M,/ +mem-path=/mnt/huge,share=on -numa node,memdev=mem -mem-prealloc / +-chardev socket,id=char1,path=/var/run/openvswitch/vhost-user2 / +-netdev type=vhost-user,id=mynet1,chardev=char1,vhostforce / +-device virtio-net-pci,mac=52:54:00:00:00:02,netdev=mynet1,/ +mrg_rxbuf=on,rx_queue_size=1024,csum=off,gso=off,guest_tso4=off,/ +guest_tso6=off,guest_ecn=off -append 'root=/dev/vda console=ttyS0 / +hugepagesz=2M hugepages=512 isolcpus=1 nohz_full=1 rcu_nocbs=1 / +irqaffinity=0 rcu_nocb_poll intel_pstate=disable intel_idle.max_cstate=0 / +processor.max_cstate=0 mce=ignore_ce audit=0'Configure DPDK + inside VM1:mkdir -p /mnt/huge +mount -t hugetlbfs nodev /mnt/huge +modprobe igb_uio +dpdk-devbind --bind=igb_uio 0000:00:02.0Run testpmd inside + VM1:testpmd -c 0x3 -n 2 -d librte_pmd_virtio.so.1.1 / +-- --burst 64 --disable-hw-vlan --disable-rss -i / +--portmask=0x1 --coremask=0x2 --nb-cores=1 --rxq=1 / +--txq=1 --txd=512 --rxd=512 --txqflags=0xf00 --port-topology=chainedStart + testpmd inside VM1:startConfigure + DPDK inside VM2:mkdir -p /mnt/huge +mount -t hugetlbfs nodev /mnt/huge +modprobe igb_uio +dpdk-devbind --bind=igb_uio 0000:00:02.0Run testpmd inside + VM2:testpmd -c 0x3 -n 2 -d librte_pmd_virtio.so.1.1 / +-- --burst 64 --disable-hw-vlan --disable-rss -i --portmask=0x1 / +--coremask=0x2 --nb-cores=1 --rxq=1 --txq=1 --txd=512 / +--rxd=512 --txqflags=0xf00 --port-topology=chainedSet VM2 for + termination and start testpmd:set fwd rxonly +startOn target 1, start pktgen traffic:start 0Use + this command to refresh testpmd display in VM1 and VM2 and note the + highest values:show port stats 0To + stop traffic from pktgen, in order to choose a different frame + size:stop 0To clear numbers in + testpmd:clear port stats +show port stats 0For VM1, we record the stats relevant for + forwarding: + + + + RX, TX in pps + + + + Only Rx-pps and Tx-pps numbers are important here, they change + every time stats are displayed as long as there is traffic. Run the + command a few times and pick the best (maximum) values seen. + + For VM2, we record the stats relevant for termination: + + + + RX in pps (TX will be 0) + + + + For pktgen, we record only the TX side, because flow is + terminated, with no RX traffic reaching pktgen: + + + + TX in pps and Mbit/s + + + + + Results in forwarding mode + + + + + Bytes + + pktgen pps + TX + + VM1 testpmd pps + RX + + VM1 testpmd pps + TX + + VM2 testpmd pps + RX + + pktgen MBits/s + TX + + throughput + (%) + + + + 64 + + 14845113 + + 6826540 + + 5389680 + + 5383577 + + 9975 + + 36.2 + + + + 128 + + 8426683 + + 6825857 + + 5386971 + + 5384530 + + 9976 + + 63.9 + + + + 256 + + 4528894 + + 4507975 + + 4507958 + + 4532457 + + 9999 + + 100 + + + +
+
+
+
+
\ No newline at end of file -- cgit v1.2.3-54-g00ecf