summaryrefslogtreecommitdiffstats
path: root/doc/book-enea-nfv-access-guide/doc/benchmarks.xml
diff options
context:
space:
mode:
Diffstat (limited to 'doc/book-enea-nfv-access-guide/doc/benchmarks.xml')
-rw-r--r--doc/book-enea-nfv-access-guide/doc/benchmarks.xml1637
1 files changed, 1637 insertions, 0 deletions
diff --git a/doc/book-enea-nfv-access-guide/doc/benchmarks.xml b/doc/book-enea-nfv-access-guide/doc/benchmarks.xml
new file mode 100644
index 0000000..cba6150
--- /dev/null
+++ b/doc/book-enea-nfv-access-guide/doc/benchmarks.xml
@@ -0,0 +1,1637 @@
1<?xml version="1.0" encoding="ISO-8859-1"?>
2<!DOCTYPE chapter PUBLIC "-//OASIS//DTD DocBook XML V4.2//EN"
3"http://www.oasis-open.org/docbook/xml/4.2/docbookx.dtd">
4<chapter id="benchmarks">
5 <title>Benchmarks</title>
6
7 <section id="hw-setup">
8 <title>Hardware Setup</title>
9
10 <para>The following table describes all the needed prequisites for an apt
11 hardware setup:</para>
12
13 <table>
14 <title>Hardware Setup</title>
15
16 <tgroup cols="2">
17 <colspec align="left" />
18
19 <thead>
20 <row>
21 <entry align="center">Item</entry>
22
23 <entry align="center">Description</entry>
24 </row>
25 </thead>
26
27 <tbody>
28 <row>
29 <entry align="left">Server Platform</entry>
30
31 <entry align="left">Supermicro X10SDV-4C-TLN2F
32 http://www.supermicro.com/products/motherboard/xeon/d/X10SDV-4C-TLN2F.cfm</entry>
33 </row>
34
35 <row>
36 <entry align="left">ARCH</entry>
37
38 <entry>x86-64</entry>
39 </row>
40
41 <row>
42 <entry align="left">Processor</entry>
43
44 <entry>1 x Intel Xeon D-1521 (Broadwell), 4 cores, 8
45 hyper-threaded cores per processor</entry>
46 </row>
47
48 <row>
49 <entry align="left">CPU freq</entry>
50
51 <entry>2.40 GHz</entry>
52 </row>
53
54 <row>
55 <entry align="left">RAM</entry>
56
57 <entry>16GB</entry>
58 </row>
59
60 <row>
61 <entry align="left">Network</entry>
62
63 <entry>Dual integrated 10G ports</entry>
64 </row>
65
66 <row>
67 <entry align="left">Storage</entry>
68
69 <entry>Samsung 850 Pro 128GB SSD</entry>
70 </row>
71 </tbody>
72 </tgroup>
73 </table>
74
75 <para>Generic tests configuration:</para>
76
77 <itemizedlist>
78 <listitem>
79 <para>All tests use one port, one core and one Rx/TX queue for fast
80 path traffic.</para>
81 </listitem>
82 </itemizedlist>
83 </section>
84
85 <section id="bios">
86 <title>BIOS Settings</title>
87
88 <para>The table below details the BIOS settings for which the default
89 values were changed when doing performance measurements.</para>
90
91 <table>
92 <title>BIOS Settings</title>
93
94 <tgroup cols="4">
95 <colspec align="left" />
96
97 <thead>
98 <row>
99 <entry align="center">Menu Path</entry>
100
101 <entry align="center">Setting Name</entry>
102
103 <entry align="center">Enea NFV Access value</entry>
104
105 <entry align="center">BIOS Default value</entry>
106 </row>
107 </thead>
108
109 <tbody>
110 <row>
111 <entry align="left">CPU Configuration</entry>
112
113 <entry align="left">Direct Cache Access (DCA)</entry>
114
115 <entry>Enable</entry>
116
117 <entry>Auto</entry>
118 </row>
119
120 <row>
121 <entry>CPU Configuration / Advanced Power Management
122 Configuration</entry>
123
124 <entry align="left">EIST (P-States)</entry>
125
126 <entry>Disable</entry>
127
128 <entry>Enable</entry>
129 </row>
130
131 <row>
132 <entry>CPU Configuration / Advanced Power Management Configuration
133 / CPU C State Control</entry>
134
135 <entry align="left">CPU C State</entry>
136
137 <entry>Disable</entry>
138
139 <entry>Enable</entry>
140 </row>
141
142 <row>
143 <entry>CPU Configuration / Advanced Power Management Configuration
144 / CPU Advanced PM Turning / Energy Perf BIAS</entry>
145
146 <entry align="left">Energy Performance Tuning</entry>
147
148 <entry>Disable</entry>
149
150 <entry>Enable</entry>
151 </row>
152
153 <row>
154 <entry>CPU Configuration / Advanced Power Management Configuration
155 / CPU Advanced PM Turning / Energy Perf BIAS</entry>
156
157 <entry align="left">Energy Performance BIAS Setting</entry>
158
159 <entry>Performance</entry>
160
161 <entry>Balanced Performance</entry>
162 </row>
163
164 <row>
165 <entry>CPU Configuration / Advanced Power Management Configuration
166 / CPU Advanced PM Turning / Energy Perf BIAS</entry>
167
168 <entry align="left">Power/Performance Switch</entry>
169
170 <entry>Disable</entry>
171
172 <entry>Enable</entry>
173 </row>
174
175 <row>
176 <entry>CPU Configuration / Advanced Power Management Configuration
177 / CPU Advanced PM Turning / Program PowerCTL _MSR</entry>
178
179 <entry align="left">Energy Efficient Turbo</entry>
180
181 <entry>Disable</entry>
182
183 <entry>Enable</entry>
184 </row>
185
186 <row>
187 <entry>Chipset Configuration / North Bridge / IIO
188 Configuration</entry>
189
190 <entry align="left">EV DFX Features</entry>
191
192 <entry>Enable</entry>
193
194 <entry>Disable</entry>
195 </row>
196
197 <row>
198 <entry>Chipset Configuration / North Bridge / Memory
199 Configuration</entry>
200
201 <entry align="left">Enforce POR</entry>
202
203 <entry>Disable</entry>
204
205 <entry>Enable</entry>
206 </row>
207
208 <row>
209 <entry>Chipset Configuration / North Bridge / Memory
210 Configuration</entry>
211
212 <entry align="left">Memory Frequency</entry>
213
214 <entry>2400</entry>
215
216 <entry>Auto</entry>
217 </row>
218
219 <row>
220 <entry>Chipset Configuration / North Bridge / Memory
221 Configuration</entry>
222
223 <entry align="left">DRAM RAPL Baseline</entry>
224
225 <entry>Disable</entry>
226
227 <entry>DRAM RAPL Mode 1</entry>
228 </row>
229 </tbody>
230 </tgroup>
231 </table>
232 </section>
233
234 <section id="use-cases">
235 <title>Use Cases</title>
236
237 <section id="docker-benchmarks">
238 <title>Docker related benchmarks</title>
239
240 <section>
241 <title>Forward traffic in Docker</title>
242
243 <para>Benchmarking traffic forwarding using testpmd in a Docker
244 container.</para>
245
246 <para>Pktgen is used to generate UDP traffic that will reach testpmd,
247 running in a Docker image. It will then be forwarded back to source on
248 the return trip (<emphasis role="bold">Forwarding</emphasis>).</para>
249
250 <para>This test measures:</para>
251
252 <itemizedlist>
253 <listitem>
254 <para>pktgen TX, RX in packets per second (pps) and Mbps</para>
255 </listitem>
256
257 <listitem>
258 <para>testpmd TX, RX in packets per second (pps)</para>
259 </listitem>
260
261 <listitem>
262 <para>divide testpmd RX / pktgen TX in pps to obtain throughput in
263 percentages (%)</para>
264 </listitem>
265 </itemizedlist>
266
267 <section id="usecase-one">
268 <title>Test Setup for Target 1</title>
269
270 <para>Start by following the steps below:</para>
271
272 <para>SSD boot using the following <literal>grub.cfg</literal>
273 entry: <programlisting>linux (hd0,gpt3)/boot/bzImage root=/dev/sda3 ip=dhcp nohz_full=1-7 /
274isolcpus=1-7 rcu-nocbs=1-7 rcu_nocb_poll intel_pstate=disable /
275clocksource=tsc tsc=reliable nohpet nosoftlockup intel_idle.max_cstate=0 /
276processor.max_cstate=0 mce=ignore_ce audit=0 nmi_watchdog=0 iommu=pt /
277intel_iommu=on hugepagesz=1GB hugepages=8 default_hugepagesz=1GB /
278hugepagesz=2M hugepages=2048 vfio_iommu_type1.allow_unsafe_interrupts=1</programlisting></para>
279
280 <para>Kill unnecessary services:<programlisting>killall ovsdb-server ovs-vswitchd
281rm -rf /etc/openvswitch/*
282mkdir -p /var/run/openvswitch</programlisting>Mount hugepages and configure
283 DPDK:<programlisting>mkdir -p /mnt/huge
284mount -t hugetlbfs nodev /mnt/huge
285modprobe igb_uio
286dpdk-devbind --bind=igb_uio 0000:03:00.0</programlisting>Run
287 pktgen:<programlisting>cd /usr/share/apps/pktgen/
288./pktgen -c 0xF -n 1 -- -P -m "[3:2].0"</programlisting>In the pktgen console
289 run:<programlisting>str</programlisting>To change framesize for
290 pktgen, from [64, 128, 256, 512]:<programlisting>set 0 size &amp;lt;number&amp;gt;</programlisting></para>
291 </section>
292
293 <section id="usecase-two">
294 <title>Test Setup for Target 2</title>
295
296 <para>Start by following the steps below:</para>
297
298 <para>SSD boot using the following <literal>grub.cfg</literal>
299 entry:</para>
300
301 <programlisting>linux (hd0,gpt3)/boot/bzImage root=/dev/sda3 ip=dhcp nohz_full=1-7 /
302isolcpus=1-7 rcu-nocbs=1-7 rcu_nocb_poll intel_pstate=disable /
303clocksource=tsc tsc=reliable nohpet nosoftlockup intel_idle.max_cstate=0 /
304processor.max_cstate=0 mce=ignore_ce audit=0 nmi_watchdog=0 iommu=pt /
305intel_iommu=on hugepagesz=1GB hugepages=8 default_hugepagesz=1GB /
306hugepagesz=2M hugepages=2048 vfio_iommu_type1.allow_unsafe_interrupts=1</programlisting>
307
308 <para>It is expected to have Docker/guest image on target. Configure
309 the OVS bridge:<programlisting># OVS old config clean-up
310killall ovsdb-server ovs-vswitchd
311rm -rf /etc/openvswitch/*
312mkdir -p /var/run/openvswitch
313
314# Mount hugepages and bind interfaces to dpdk
315mkdir -p /mnt/huge
316mount -t hugetlbfs nodev /mnt/huge
317modprobe igb_uio
318dpdk-devbind --bind=igb_uio 0000:03:00.0
319
320# configure openvswitch with DPDK
321export DB_SOCK=/var/run/openvswitch/db.sock
322ovsdb-tool create /etc/openvswitch/conf.db /
323/usr/share/openvswitch/vswitch.ovsschema
324ovsdb-server --remote=punix:$DB_SOCK /
325--remote=db:Open_vSwitch,Open_vSwitch,manager_options --pidfile --detach
326ovs-vsctl --no-wait init
327ovs-vsctl --no-wait set Open_vSwitch . other_config:dpdk-lcore-mask=0x10
328ovs-vsctl --no-wait set Open_vSwitch . other_config:pmd-cpu-mask=0xc
329ovs-vsctl --no-wait set Open_vSwitch . other_config:dpdk-socket-mem=2048
330ovs-vsctl --no-wait set Open_vSwitch . other_config:dpdk-init=true
331ovs-vswitchd unix:$DB_SOCK --pidfile --detach /
332--log-file=/var/log/openvswitch/ovs-vswitchd.log
333
334ovs-vsctl add-br ovsbr0 -- set bridge ovsbr0 datapath_type=netdev
335ovs-vsctl add-port ovsbr0 vhost-user1 /
336-- set Interface vhost-user1 type=dpdkvhostuser ofport_request=1
337ovs-vsctl add-port ovsbr0 dpdk0 -- set Interface /
338dpdk0 type=dpdk options:dpdk-devargs=0000:03:00.0 ofport_request=2
339
340# configure static flows
341ovs-ofctl del-flows ovsbr0
342ovs-ofctl add-flow ovsbr0 in_port=1,action=output:2
343ovs-ofctl add-flow ovsbr0 in_port=2,action=output:1</programlisting>Import a
344 Docker container:<programlisting>docker import enea-image-virtualization-guest-qemux86-64.tar.gz el7_guest</programlisting>Start
345 the Docker container:<programlisting>docker run -it --rm -v /var/run/openvswitch/:/var/run/openvswitch/ /
346-v /mnt/huge:/mnt/huge el7_guest /bin/bash</programlisting>Start the testpmd
347 application in Docker:<programlisting>testpmd -c 0x30 -n 2 --file-prefix prog1 --socket-mem 512 --no-pci /
348--vdev=virtio_user0,path=/var/run/openvswitch/vhost-user1 /
349-d /usr/lib/librte_pmd_virtio.so.1.1 -- --burst 64 --disable-hw-vlan /
350--disable-rss -i --portmask=0x1 --coremask=0x20 --nb-cores=1 /
351--rxq=1 --txq=1 --txd=512 --rxd=512 --txqflags=0xf00 --port-topology=chained</programlisting>To
352 start traffic <emphasis role="bold">forwarding</emphasis>, run the
353 following command in testpmd CLI:<programlisting>start</programlisting>To
354 start traffic but in <emphasis role="bold">termination</emphasis>
355 mode (no traffic sent on TX), run following command in testpmd
356 CLI:<programlisting>set fwd rxonly
357start</programlisting><table>
358 <title>Results in forwarding mode</title>
359
360 <tgroup cols="8">
361 <tbody>
362 <row>
363 <entry align="center"><emphasis
364 role="bold">Bytes</emphasis></entry>
365
366 <entry align="center"><emphasis role="bold">pktgen pps
367 TX</emphasis></entry>
368
369 <entry align="center"><emphasis role="bold">pktgen MBits/s
370 TX</emphasis></entry>
371
372 <entry align="center"><emphasis role="bold">pktgen pps
373 RX</emphasis></entry>
374
375 <entry align="center"><emphasis role="bold">pktgen MBits/s
376 RX</emphasis></entry>
377
378 <entry align="center"><emphasis role="bold">testpmd pps
379 RX</emphasis></entry>
380
381 <entry align="center"><emphasis role="bold">testpmd pps
382 TX</emphasis></entry>
383
384 <entry align="center"><emphasis role="bold">throughput
385 (%)</emphasis></entry>
386 </row>
387
388 <row>
389 <entry role="bold"><emphasis
390 role="bold">64</emphasis></entry>
391
392 <entry>14890993</entry>
393
394 <entry>10006</entry>
395
396 <entry>7706039</entry>
397
398 <entry>5178</entry>
399
400 <entry>7692807</entry>
401
402 <entry>7692864</entry>
403
404 <entry>51.74%</entry>
405 </row>
406
407 <row>
408 <entry><emphasis role="bold">128</emphasis></entry>
409
410 <entry>8435104</entry>
411
412 <entry>9999</entry>
413
414 <entry>7689458</entry>
415
416 <entry>9060</entry>
417
418 <entry>7684962</entry>
419
420 <entry>7684904</entry>
421
422 <entry>90.6%</entry>
423 </row>
424
425 <row>
426 <entry role="bold"><emphasis
427 role="bold">256</emphasis></entry>
428
429 <entry>4532384</entry>
430
431 <entry>9999</entry>
432
433 <entry>4532386</entry>
434
435 <entry>9998</entry>
436
437 <entry>4532403</entry>
438
439 <entry>4532403</entry>
440
441 <entry>99.9%</entry>
442 </row>
443 </tbody>
444 </tgroup>
445 </table><table>
446 <title>Results in termination mode</title>
447
448 <tgroup cols="4">
449 <tbody>
450 <row>
451 <entry align="center"><emphasis
452 role="bold">Bytes</emphasis></entry>
453
454 <entry align="center"><emphasis role="bold">pktgen pps
455 TX</emphasis></entry>
456
457 <entry align="center"><emphasis role="bold">testpmd pps
458 RX</emphasis></entry>
459
460 <entry align="center"><emphasis role="bold">throughput
461 (%)</emphasis></entry>
462 </row>
463
464 <row>
465 <entry role="bold"><emphasis
466 role="bold">64</emphasis></entry>
467
468 <entry>14890993</entry>
469
470 <entry>7330403</entry>
471
472 <entry>49,2%</entry>
473 </row>
474
475 <row>
476 <entry><emphasis role="bold">128</emphasis></entry>
477
478 <entry>8435104</entry>
479
480 <entry>7330379</entry>
481
482 <entry>86,9%</entry>
483 </row>
484
485 <row>
486 <entry role="bold"><emphasis
487 role="bold">256</emphasis></entry>
488
489 <entry>4532484</entry>
490
491 <entry>4532407</entry>
492
493 <entry>99,9%</entry>
494 </row>
495 </tbody>
496 </tgroup>
497 </table></para>
498 </section>
499 </section>
500
501 <section id="usecase-three-four">
502 <title>Forward traffic from Docker to another Docker on the same
503 host</title>
504
505 <para>Benchmark a combo test using testpmd running in two Docker
506 instances, one which Forwards traffic to the second one, which
507 Terminates it.</para>
508
509 <para>Packets are generated with pktgen and TX-d to the first testpmd,
510 which will RX and Forward them to the second testpmd, which will RX
511 and terminate them.</para>
512
513 <para>Measurements are made in:</para>
514
515 <itemizedlist>
516 <listitem>
517 <para>pktgen TX in pps and Mbits/s</para>
518 </listitem>
519
520 <listitem>
521 <para>testpmd TX and RX pps in Docker1</para>
522 </listitem>
523
524 <listitem>
525 <para>testpmd RX pps in Docker2</para>
526 </listitem>
527 </itemizedlist>
528
529 <para>Throughput found as a percent, by dividing Docker2 <emphasis
530 role="bold">testpmd RX pps</emphasis> by <emphasis role="bold">pktgen
531 TX pps</emphasis>.</para>
532
533 <section id="target-one-usecase-three">
534 <title>Test Setup for Target 1</title>
535
536 <para>Start by following the steps below:</para>
537
538 <para>SSD boot using the following <literal>grub.cfg</literal>
539 entry:</para>
540
541 <programlisting>linux (hd0,gpt3)/boot/bzImage root=/dev/sda3 ip=dhcp nohz_full=1-7 /
542isolcpus=1-7 rcu-nocbs=1-7 rcu_nocb_poll intel_pstate=disable /
543clocksource=tsc tsc=reliable nohpet nosoftlockup intel_idle.max_cstate=0 /
544processor.max_cstate=0 mce=ignore_ce audit=0 nmi_watchdog=0 iommu=pt /
545intel_iommu=on hugepagesz=1GB hugepages=8 default_hugepagesz=1GB /
546hugepagesz=2M hugepages=2048 vfio_iommu_type1.allow_unsafe_interrupts=1</programlisting>
547
548 <para>Configure DPDK:<programlisting>mkdir -p /mnt/huge
549mount -t hugetlbfs nodev /mnt/huge
550modprobe igb_uio
551dpdk-devbind --bind=igb_uio 0000:03:00.0</programlisting>Run
552 pktgen:<programlisting>cd /usr/share/apps/pktgen/
553./pktgen -c 0xF -n 1 -- -P -m "[3:2].0"</programlisting>Choose one of the
554 values from [64, 128, 256, 512] to change the packet
555 size:<programlisting>set 0 size &lt;number&gt;</programlisting></para>
556 </section>
557
558 <section id="target-two-usecase-four">
559 <title>Test Setup for Target 2</title>
560
561 <para>Start by following the steps below:</para>
562
563 <para>SSD boot using the following <literal>grub.cfg</literal>
564 entry:</para>
565
566 <programlisting>linux (hd0,gpt3)/boot/bzImage root=/dev/sda3 ip=dhcp nohz_full=1-7 /
567isolcpus=1-7 rcu-nocbs=1-7 rcu_nocb_poll intel_pstate=disable /
568clocksource=tsc tsc=reliable nohpet nosoftlockup intel_idle.max_cstate=0 /
569processor.max_cstate=0 mce=ignore_ce audit=0 nmi_watchdog=0 /
570iommu=pt intel_iommu=on hugepagesz=1GB hugepages=8 default_hugepagesz=1GB /
571hugepagesz=2M hugepages=2048 vfio_iommu_type1.allow_unsafe_interrupts=1</programlisting>
572
573 <para><programlisting>killall ovsdb-server ovs-vswitchd
574rm -rf /etc/openvswitch/*
575mkdir -p /var/run/openvswitch</programlisting>Configure DPDK:<programlisting>mkdir -p /mnt/huge
576mount -t hugetlbfs nodev /mnt/huge
577modprobe igb_uio
578dpdk-devbind --bind=igb_uio 0000:03:00.0</programlisting>Configure the OVS
579 bridge:<programlisting>export DB_SOCK=/var/run/openvswitch/db.sock
580ovsdb-tool create /etc/openvswitch/conf.db /
581/usr/share/openvswitch/vswitch.ovsschema
582ovsdb-server --remote=punix:$DB_SOCK /
583--remote=db:Open_vSwitch,Open_vSwitch,manager_options --pidfile --detach
584ovs-vsctl --no-wait init
585ovs-vsctl --no-wait set Open_vSwitch . other_config:dpdk-lcore-mask=0x10
586ovs-vsctl --no-wait set Open_vSwitch . other_config:pmd-cpu-mask=0xcc
587ovs-vsctl --no-wait set Open_vSwitch . other_config:dpdk-socket-mem=2048
588ovs-vsctl --no-wait set Open_vSwitch . other_config:dpdk-init=true
589ovs-vswitchd unix:$DB_SOCK --pidfile --detach /
590--log-file=/var/log/openvswitch/ovs-vswitchd.log
591ovs-vsctl add-br ovsbr0 -- set bridge ovsbr0 datapath_type=netdev
592ovs-vsctl add-port ovsbr0 vhost-user1 -- set Interface /
593vhost-user1 type=dpdkvhostuser ofport_request=1
594ovs-vsctl add-port ovsbr0 vhost-user2 -- set Interface /
595vhost-user2 type=dpdkvhostuser ofport_request=2
596ovs-vsctl add-port ovsbr0 dpdk0 -- set Interface dpdk0 /
597type=dpdk options:dpdk-devargs=0000:03:00.0 ofport_request=3
598ovs-ofctl del-flows ovsbr0
599ovs-ofctl add-flow ovsbr0 in_port=3,action=output:2
600ovs-ofctl add-flow ovsbr0 in_port=2,action=output:1</programlisting>Import a
601 Docker container:<programlisting>docker import enea-image-virtualization-guest-qemux86-64.tar.gz el7_guest</programlisting>Start
602 the first Docker:<programlisting>docker run -it --rm --cpuset-cpus=4,5 /
603-v /var/run/openvswitch/:/var/run/openvswitch/ /
604-v /mnt/huge:/mnt/huge el7_guest /bin/bash</programlisting>Start the testpmd
605 application in Docker1:<programlisting>testpmd -c 0x30 -n 2 --file-prefix prog1 --socket-mem 512 --no-pci /
606--vdev=virtio_user0,path=/var/run/openvswitch/vhost-user1 /
607-d /usr/lib/librte_pmd_virtio.so.1.1 -- --burst 64 --disable-hw-vlan /
608--disable-rss -i --portmask=0x1 --coremask=0x20 --nb-cores=1 /
609--rxq=1 --txq=1 --txd=512 --rxd=512 --txqflags=0xf00 --port-topology=chained</programlisting>Configure
610 it in termination mode:<programlisting>set fwd rxonly</programlisting>Run
611 the testpmd application:<programlisting>start</programlisting>Open a
612 new console to the host and start the second Docker
613 instance:<programlisting>docker run -it --rm --cpuset-cpus=0,1 -v /var/run/openvswitch/:/var/run/openvswitch/ /
614-v /mnt/huge:/mnt/huge el7_guest /bin/bash</programlisting>In the second
615 container start testpmd:<programlisting>testpmd -c 0x0F --file-prefix prog2 --socket-mem 512 --no-pci /
616--vdev=virtio_user0,path=/var/run/openvswitch/vhost-user2 /
617-d /usr/lib/librte_pmd_virtio.so.1.1 -- -i --disable-hw-vlan</programlisting>Run
618 the TestPmd application in the second Docker:<programlisting>testpmd -c 0x3 -n 2 --file-prefix prog2 --socket-mem 512 --no-pci /
619--vdev=virtio_user0,path=/var/run/openvswitch/vhost-user2 /
620-d /usr/lib/librte_pmd_virtio.so.1.1 -- --burst 64 --disable-hw-vlan /
621--disable-rss -i --portmask=0x1 --coremask=0x2 --nb-cores=1 --rxq=1 /
622--txq=1 --txd=512 --rxd=512 --txqflags=0xf00 --port-topology=chained</programlisting>In
623 the testpmd shell, run:<programlisting>start</programlisting>Start
624 pktgen traffic by running the following command in pktgen
625 CLI:<programlisting>start 0</programlisting>To record traffic
626 results:<programlisting>show port stats 0</programlisting>This
627 should be used in testpmd applications.</para>
628
629 <table>
630 <title>Results</title>
631
632 <tgroup cols="5">
633 <tbody>
634 <row>
635 <entry align="center"><emphasis
636 role="bold">Bytes</emphasis></entry>
637
638 <entry align="center"><emphasis role="bold">Target 1 -
639 pktgen pps TX</emphasis></entry>
640
641 <entry align="center"><emphasis role="bold">Target 2 -
642 (forwarding) testpmd pps RX</emphasis></entry>
643
644 <entry align="center"><emphasis role="bold">Target 2 -
645 (forwarding) testpmd pps TX</emphasis></entry>
646
647 <entry align="center"><emphasis role="bold">Target 2 -
648 (termination) testpmd pps RX</emphasis></entry>
649 </row>
650
651 <row>
652 <entry role="bold"><emphasis
653 role="bold">64</emphasis></entry>
654
655 <entry>14844628</entry>
656
657 <entry>5643565</entry>
658
659 <entry>3459922</entry>
660
661 <entry>3457326</entry>
662 </row>
663
664 <row>
665 <entry><emphasis role="bold">128</emphasis></entry>
666
667 <entry>8496962</entry>
668
669 <entry>5667860</entry>
670
671 <entry>3436811</entry>
672
673 <entry>3438918</entry>
674 </row>
675
676 <row>
677 <entry role="bold"><emphasis
678 role="bold">256</emphasis></entry>
679
680 <entry>4532372</entry>
681
682 <entry>4532362</entry>
683
684 <entry>3456623</entry>
685
686 <entry>3457115</entry>
687 </row>
688
689 <row>
690 <entry><emphasis role="bold">512</emphasis></entry>
691
692 <entry>2367641</entry>
693
694 <entry>2349450</entry>
695
696 <entry>2349450</entry>
697
698 <entry>2349446</entry>
699 </row>
700 </tbody>
701 </tgroup>
702 </table>
703 </section>
704 </section>
705
706 <section id="pxe-config-docker">
707 <title>SR-IOV in in Docker</title>
708
709 <para>PCI passthrough tests using pktgen and testpmd in Docker.</para>
710
711 <para>pktgen[DPDK]Docker - PHY - Docker[DPDK] testpmd</para>
712
713 <para>Measurements:</para>
714
715 <itemizedlist>
716 <listitem>
717 <para>RX packets per second in testpmd (with testpmd configured in
718 rxonly mode).</para>
719 </listitem>
720 </itemizedlist>
721
722 <section id="target-setup">
723 <title>Test Setup</title>
724
725 <para>Boot Enea NFV Access from SSD:<programlisting>linux (hd0,gpt3)/boot/bzImage root=/dev/sda3 ip=dhcp nohz_full=1-7 /
726isolcpus=1-7 rcu-nocbs=1-7 rcu_nocb_poll intel_pstate=disable clocksource=tsc /
727tsc=reliable nohpet nosoftlockup intel_idle.max_cstate=0 processor.max_cstate=0 /
728mce=ignore_ce audit=0 nmi_watchdog=0 iommu=pt intel_iommu=on hugepagesz=1GB /
729hugepages=8 default_hugepagesz=1GB hugepagesz=2M hugepages=2048 /
730vfio_iommu_type1.allow_unsafe_interrupts=1l</programlisting>Allow unsafe
731 interrupts:<programlisting>echo 1 &gt; /sys/module/vfio_iommu_type1/parameters/allow_unsafe_interrupts</programlisting>Configure
732 DPDK:<programlisting>mkdir -p /mnt/huge
733mount -t hugetlbfs nodev /mnt/huge
734dpdk-devbind.py --bind=ixgbe 0000:03:00.0
735ifconfig eno3 192.168.1.2
736echo 2 &gt; /sys/class/net/eno3/device/sriov_numvfs
737modprobe vfio-pci
738dpdk-devbind.py --bind=vfio-pci 0000:03:10.0
739dpdk-devbind.py --bind=vfio-pci 0000:03:10.2</programlisting>Start two docker
740 containers:<programlisting>docker run --privileged -it --rm -v /mnt/huge:/mnt/huge/ /
741--device /dev/vfio/vfio el7_guest /bin/bash
742docker run --privileged -it --rm -v /mnt/huge:/mnt/huge/ /
743--device /dev/vfio/vfio el7_guest /bin/bash</programlisting>In the first
744 container start pktgen:<programlisting>cd /usr/share/apps/pktgen/
745./pktgen -c 0x1f -w 0000:03:10.0 -n 1 --file-prefix pg1 /
746--socket-mem 1024 -- -P -m "[3:4].0"</programlisting>In the pktgen prompt set
747 the destination MAC address:<programlisting>set mac 0 XX:XX:XX:XX:XX:XX
748str</programlisting>In the second container start testpmd:<programlisting>testpmd -c 0x7 -n 1 -w 0000:03:10.2 -- -i --portmask=0x1 /
749--txd=256 --rxd=256 --port-topology=chained</programlisting>In the testpmd
750 prompt set <emphasis role="bold">forwarding</emphasis>
751 rxonly:<programlisting>set fwd rxonly
752start</programlisting><table>
753 <title>Results</title>
754
755 <tgroup cols="5">
756 <tbody>
757 <row>
758 <entry align="center"><emphasis
759 role="bold">Bytes</emphasis></entry>
760
761 <entry align="center"><emphasis role="bold">pktgen pps
762 TX</emphasis></entry>
763
764 <entry align="center"><emphasis role="bold">testpmd pps
765 RX</emphasis></entry>
766
767 <entry align="center"><emphasis role="bold">pktgen MBits/s
768 TX</emphasis></entry>
769
770 <entry align="center"><emphasis role="bold">throughput
771 (%)</emphasis></entry>
772 </row>
773
774 <row>
775 <entry role="bold"><emphasis
776 role="bold">64</emphasis></entry>
777
778 <entry>14525286</entry>
779
780 <entry>14190869</entry>
781
782 <entry>9739</entry>
783
784 <entry>97.7</entry>
785 </row>
786
787 <row>
788 <entry><emphasis role="bold">128</emphasis></entry>
789
790 <entry>8456960</entry>
791
792 <entry>8412172</entry>
793
794 <entry>10013</entry>
795
796 <entry>99.4</entry>
797 </row>
798
799 <row>
800 <entry role="bold"><emphasis
801 role="bold">256</emphasis></entry>
802
803 <entry>4566624</entry>
804
805 <entry>4526587</entry>
806
807 <entry>10083</entry>
808
809 <entry>99.1</entry>
810 </row>
811
812 <row>
813 <entry><emphasis role="bold">512</emphasis></entry>
814
815 <entry>2363744</entry>
816
817 <entry>2348015</entry>
818
819 <entry>10060</entry>
820
821 <entry>99.3</entry>
822 </row>
823 </tbody>
824 </tgroup>
825 </table></para>
826 </section>
827 </section>
828 </section>
829
830 <section id="vm-benchmarks">
831 <title>VM related benchmarks</title>
832
833 <section id="usecase-four">
834 <title>Forward/termination traffic in one VM</title>
835
836 <para>Benchmarking traffic (UDP) forwarding and termination using
837 testpmd in a virtual machine.</para>
838
839 <para>The Pktgen application is used to generate traffic that will
840 reach testpmd running on a virtual machine, and be forwarded back to
841 source on the return trip. With the same setup a second measurement
842 will be done with traffic termination in the virtual machine.</para>
843
844 <para>This test case measures:</para>
845
846 <itemizedlist>
847 <listitem>
848 <para>pktgen TX, RX in packets per second (pps) and Mbps</para>
849 </listitem>
850
851 <listitem>
852 <para>testpmd TX, RX in packets per second (pps)</para>
853 </listitem>
854
855 <listitem>
856 <para>divide <emphasis role="bold">testpmd RX</emphasis> by
857 <emphasis role="bold">pktgen TX</emphasis> in pps to obtain the
858 throughput in percentages (%)</para>
859 </listitem>
860 </itemizedlist>
861
862 <section id="targetone-usecasefour">
863 <title>Test Setup for Target 1</title>
864
865 <para>Start with the steps below:</para>
866
867 <para>SSD boot using the following <literal>grub.cfg
868 </literal>entry: <programlisting>linux (hd0,gpt3)/boot/bzImage root=/dev/sda3 ip=dhcp nohz_full=1-7 /
869isolcpus=1-7 rcu-nocbs=1-7 rcu_nocb_poll intel_pstate=disable /
870clocksource=tsc tsc=reliable nohpet nosoftlockup intel_idle.max_cstate=0 /
871processor.max_cstate=0 mce=ignore_ce audit=0 nmi_watchdog=0 iommu=pt /
872intel_iommu=on hugepagesz=1GB hugepages=8 default_hugepagesz=1GB /
873hugepagesz=2M hugepages=2048 vfio_iommu_type1.allow_unsafe_interrupts=1</programlisting></para>
874
875 <para>Kill unnecessary services: <programlisting>killall ovsdb-server ovs-vswitchd
876rm -rf /etc/openvswitch/*
877mkdir -p /var/run/openvswitch</programlisting>Configure DPDK:<programlisting>mkdir -p /mnt/huge
878mount -t hugetlbfs nodev /mnt/huge
879modprobe igb_uio
880dpdk-devbind --bind=igb_uio 0000:03:00.0</programlisting>Run
881 pktgen:<programlisting>cd /usr/share/apps/pktgen/
882./pktgen -c 0x7 -n 4 --proc-type auto --socket-mem 256 /
883-w 0000:03:00.0 -- -P -m "[1:2].0"</programlisting>Set pktgen frame size to
884 use from [64, 128, 256, 512]:<programlisting>set 0 size 64</programlisting></para>
885 </section>
886
887 <section id="targettwo-usecasefive">
888 <title>Test Setup for Target 2</title>
889
890 <para>Start by following the steps below:</para>
891
892 <para>SSD boot using the following <literal>grub.cfg</literal>
893 entry: <programlisting>linux (hd0,gpt3)/boot/bzImage root=/dev/sda3 ip=dhcp nohz_full=1-7 /
894isolcpus=1-7 rcu-nocbs=1-7 rcu_nocb_poll intel_pstate=disable /
895clocksource=tsc tsc=reliable nohpet nosoftlockup intel_idle.max_cstate=0 /
896processor.max_cstate=0 mce=ignore_ce audit=0 nmi_watchdog=0 iommu=pt /
897intel_iommu=on hugepagesz=1GB hugepages=8 default_hugepagesz=1GB /
898hugepagesz=2M hugepages=2048 vfio_iommu_type1.allow_unsafe_interrupts=1</programlisting>Kill
899 unnecessary services: <programlisting>killall ovsdb-server ovs-vswitchd
900rm -rf /etc/openvswitch/*
901mkdir -p /var/run/openvswitch</programlisting>Configure DPDK:<programlisting>mkdir -p /mnt/huge
902mount -t hugetlbfs nodev /mnt/huge
903modprobe igb_uio
904dpdk-devbind --bind=igb_uio 0000:03:00.0</programlisting>Configure
905 OVS:<programlisting>export DB_SOCK=/var/run/openvswitch/db.sock
906ovsdb-tool create /etc/openvswitch/conf.db /
907/usr/share/openvswitch/vswitch.ovsschema
908ovsdb-server --remote=punix:$DB_SOCK /
909--remote=db:Open_vSwitch,Open_vSwitch,manager_options --pidfile --detach
910ovs-vsctl --no-wait init
911ovs-vsctl --no-wait set Open_vSwitch . other_config:dpdk-lcore-mask=0x10
912ovs-vsctl --no-wait set Open_vSwitch . other_config:pmd-cpu-mask=0xc
913ovs-vsctl --no-wait set Open_vSwitch . other_config:dpdk-socket-mem=2048
914ovs-vsctl --no-wait set Open_vSwitch . other_config:dpdk-init=true
915ovs-vswitchd unix:$DB_SOCK --pidfile --detach /
916--log-file=/var/log/openvswitch/ovs-vswitchd.log
917
918ovs-vsctl add-br ovsbr0 -- set bridge ovsbr0 datapath_type=netdev
919ovs-vsctl add-port ovsbr0 vhost-user1 /
920-- set Interface vhost-user1 type=dpdkvhostuser -- set Interface /
921vhost-user1 ofport_request=2
922ovs-vsctl add-port ovsbr0 dpdk0 -- set Interface dpdk0 /
923type=dpdk options:dpdk-devargs=0000:03:00.0 /
924-- set Interface dpdk0 ofport_request=1
925chmod 777 /var/run/openvswitch/vhost-user1
926
927ovs-ofctl del-flows ovsbr0
928ovs-ofctl add-flow ovsbr0 in_port=1,action=output:2
929ovs-ofctl add-flow ovsbr0 in_port=2,action=output:1</programlisting>Launch
930 QEMU:<programlisting>taskset -c 0,1 qemu-system-x86_64 -cpu host,+invtsc,migratable=no /
931-M q35 -smp cores=2,sockets=1 -vcpu 0,affinity=0 -vcpu 1,affinity=1 /
932-enable-kvm -nographic -realtime mlock=on -kernel /mnt/qemu/bzImage /
933-drive file=/mnt/qemu/enea-image-virtualization-guest-qemux86-64.ext4,/
934if=virtio,format=raw -m 4096 -object memory-backend-file,id=mem,/
935size=4096M,mem-path=/mnt/huge,share=on -numa node,memdev=mem /
936-mem-prealloc -chardev socket,id=char0,path=/var/run/openvswitch/vhost-user1 /
937-netdev type=vhost-user,id=mynet1,chardev=char0,vhostforce /
938-device virtio-net-pci,mac=52:54:00:00:00:01,netdev=mynet1,/
939mrg_rxbuf=on,rx_queue_size=1024,csum=off,gso=off,guest_tso4=off,/
940guest_tso6=off,guest_ecn=off -append 'root=/dev/vda console=ttyS0 /
941hugepagesz=2M hugepages=1024 isolcpus=1 nohz_full=1 rcu_nocbs=1 /
942irqaffinity=0 rcu_nocb_poll intel_pstate=disable intel_idle.max_cstate=0 /
943processor.max_cstate=0 mce=ignore_ce audit=0'</programlisting>Inside QEMU,
944 configure DPDK: <programlisting>mkdir -p /mnt/huge
945mount -t hugetlbfs nodev /mnt/huge
946modprobe igb_uio
947dpdk-devbind --bind=igb_uio 0000:00:02.0</programlisting>Inside QEMU, run
948 testpmd: <programlisting>testpmd -c 0x3 -n 2 -d librte_pmd_virtio.so.1.1 /
949-- --burst 64 --disable-hw-vlan --disable-rss -i --portmask=0x1 /
950--coremask=0x2 --nb-cores=1 --rxq=1 --txq=1 --txd=512 --rxd=512 /
951--txqflags=0xf00 --port-topology=chained</programlisting>For the <emphasis
952 role="bold">Forwarding test</emphasis>, start testpmd
953 directly:<programlisting>start</programlisting>For the <emphasis
954 role="bold">Termination test</emphasis>, set testpmd to only
955 receive, then start it:<programlisting>set fwd rxonly
956start</programlisting>On target 1, you may start pktgen traffic
957 now:<programlisting>start 0</programlisting>On target 2, use this
958 command to refresh the testpmd display and note the highest
959 values:<programlisting>show port stats 0</programlisting>To stop
960 traffic from pktgen, in order to choose a different frame
961 size:<programlisting>stop 0</programlisting>To clear numbers in
962 testpmd:<programlisting>clear port stats
963show port stats 0</programlisting><table>
964 <title>Results in forwarding mode</title>
965
966 <tgroup cols="8">
967 <tbody>
968 <row>
969 <entry align="center"><emphasis
970 role="bold">Bytes</emphasis></entry>
971
972 <entry align="center"><emphasis role="bold">pktgen pps
973 RX</emphasis></entry>
974
975 <entry align="center"><emphasis role="bold">pktgen pps
976 TX</emphasis></entry>
977
978 <entry align="center"><emphasis role="bold">testpmd pps
979 RX</emphasis></entry>
980
981 <entry align="center"><emphasis role="bold">testpmd pps
982 TX</emphasis></entry>
983
984 <entry align="center"><emphasis role="bold">pktgen MBits/s
985 RX</emphasis></entry>
986
987 <entry align="center"><emphasis role="bold">pktgen MBits/s
988 TX</emphasis></entry>
989
990 <entry align="center"><emphasis role="bold">throughput
991 (%)</emphasis></entry>
992 </row>
993
994 <row>
995 <entry role="bold"><emphasis
996 role="bold">64</emphasis></entry>
997
998 <entry>7755769</entry>
999
1000 <entry>14858714</entry>
1001
1002 <entry>7755447</entry>
1003
1004 <entry>7755447</entry>
1005
1006 <entry>5207</entry>
1007
1008 <entry>9984</entry>
1009
1010 <entry>52.2</entry>
1011 </row>
1012
1013 <row>
1014 <entry><emphasis role="bold">128</emphasis></entry>
1015
1016 <entry>7714626</entry>
1017
1018 <entry>8435184</entry>
1019
1020 <entry>7520349</entry>
1021
1022 <entry>6932520</entry>
1023
1024 <entry>8204</entry>
1025
1026 <entry>9986</entry>
1027
1028 <entry>82.1</entry>
1029 </row>
1030
1031 <row>
1032 <entry role="bold"><emphasis
1033 role="bold">256</emphasis></entry>
1034
1035 <entry>4528847</entry>
1036
1037 <entry>4528854</entry>
1038
1039 <entry>4529030</entry>
1040
1041 <entry>4529034</entry>
1042
1043 <entry>9999</entry>
1044
1045 <entry>9999</entry>
1046
1047 <entry>99.9</entry>
1048 </row>
1049 </tbody>
1050 </tgroup>
1051 </table><table>
1052 <title>Results in termination mode</title>
1053
1054 <tgroup cols="5">
1055 <tbody>
1056 <row>
1057 <entry align="center"><emphasis
1058 role="bold">Bytes</emphasis></entry>
1059
1060 <entry align="center"><emphasis role="bold">pktgen pps
1061 TX</emphasis></entry>
1062
1063 <entry align="center"><emphasis role="bold">testpmd pps
1064 RX</emphasis></entry>
1065
1066 <entry align="center"><emphasis role="bold">pktgen MBits/s
1067 TX</emphasis></entry>
1068
1069 <entry align="center"><emphasis role="bold">throughput
1070 (%)</emphasis></entry>
1071 </row>
1072
1073 <row>
1074 <entry role="bold"><emphasis
1075 role="bold">64</emphasis></entry>
1076
1077 <entry>15138992</entry>
1078
1079 <entry>7290663</entry>
1080
1081 <entry>10063</entry>
1082
1083 <entry>48.2</entry>
1084 </row>
1085
1086 <row>
1087 <entry><emphasis role="bold">128</emphasis></entry>
1088
1089 <entry>8426825</entry>
1090
1091 <entry>6902646</entry>
1092
1093 <entry>9977</entry>
1094
1095 <entry>81.9</entry>
1096 </row>
1097
1098 <row>
1099 <entry role="bold"><emphasis
1100 role="bold">256</emphasis></entry>
1101
1102 <entry>4528957</entry>
1103
1104 <entry>4528912</entry>
1105
1106 <entry>9999</entry>
1107
1108 <entry>100</entry>
1109 </row>
1110 </tbody>
1111 </tgroup>
1112 </table></para>
1113 </section>
1114 </section>
1115
1116 <section id="usecase-six">
1117 <title>Forward traffic between two VMs</title>
1118
1119 <para>Benchmark a combo test using two virtual machines, the first
1120 with traffic forwarding to the second, which terminates it.</para>
1121
1122 <para>Measurements are made in:</para>
1123
1124 <itemizedlist>
1125 <listitem>
1126 <para>pktgen TX in pps and Mbits/s</para>
1127 </listitem>
1128
1129 <listitem>
1130 <para>testpmd TX and RX pps in VM1</para>
1131 </listitem>
1132
1133 <listitem>
1134 <para>testpmd RX pps in VM2</para>
1135 </listitem>
1136
1137 <listitem>
1138 <para>throughput in percents, by dividing<emphasis role="bold">
1139 VM2 testpmd RX pps</emphasis> by <emphasis role="bold">pktgen TX
1140 pps</emphasis></para>
1141 </listitem>
1142 </itemizedlist>
1143
1144 <section id="targetone-usecase-five">
1145 <title>Test Setup for Target 1</title>
1146
1147 <para>Start by doing the following:</para>
1148
1149 <para>SSD boot using the following <literal>grub.cfg</literal>
1150 entry: <programlisting>linux (hd0,gpt3)/boot/bzImage root=/dev/sda3 ip=dhcp nohz_full=1-7 /
1151isolcpus=1-7 rcu-nocbs=1-7 rcu_nocb_poll intel_pstate=disable /
1152clocksource=tsc tsc=reliable nohpet nosoftlockup intel_idle.max_cstate=0 /
1153processor.max_cstate=0 mce=ignore_ce audit=0 nmi_watchdog=0 iommu=pt /
1154intel_iommu=on hugepagesz=1GB hugepages=8 default_hugepagesz=1GB /
1155hugepagesz=2M hugepages=2048 vfio_iommu_type1.allow_unsafe_interrupts=1</programlisting>Kill
1156 Services:<programlisting>killall ovsdb-server ovs-vswitchd
1157rm -rf /etc/openvswitch/*
1158mkdir -p /var/run/openvswitch</programlisting>Configure DPDK:<programlisting>mkdir -p /mnt/huge
1159mount -t hugetlbfs nodev /mnt/huge
1160modprobe igb_uio
1161dpdk-devbind --bind=igb_uio 0000:03:00.0</programlisting>Run
1162 pktgen:<programlisting>cd /usr/share/apps/pktgen/
1163./pktgen -c 0x7 -n 4 --proc-type auto --socket-mem 256 /
1164-w 0000:03:00.0 -- -P -m "[1:2].0"</programlisting>Set pktgen frame size to
1165 use from [64, 128, 256, 512]:<programlisting>set 0 size 64</programlisting></para>
1166 </section>
1167
1168 <section id="targettwo-usecase-six">
1169 <title>Test Setup for Target 2</title>
1170
1171 <para>Start by doing the following:</para>
1172
1173 <para>SSD boot using the following <literal>grub.cfg</literal>
1174 entry: <programlisting>linux (hd0,gpt3)/boot/bzImage root=/dev/sda3 ip=dhcp nohz_full=1-7 /
1175isolcpus=1-7 rcu-nocbs=1-7 rcu_nocb_poll intel_pstate=disable /
1176clocksource=tsc tsc=reliable nohpet nosoftlockup intel_idle.max_cstate=0 /
1177processor.max_cstate=0 mce=ignore_ce audit=0 nmi_watchdog=0 iommu=pt /
1178intel_iommu=on hugepagesz=1GB hugepages=8 default_hugepagesz=1GB /
1179hugepagesz=2M hugepages=2048 vfio_iommu_type1.allow_unsafe_interrupts=1</programlisting>Kill
1180 Services:<programlisting>killall ovsdb-server ovs-vswitchd
1181rm -rf /etc/openvswitch/*
1182mkdir -p /var/run/openvswitch</programlisting>Configure DPDK:<programlisting>mkdir -p /mnt/huge
1183mount -t hugetlbfs nodev /mnt/huge
1184modprobe igb_uio
1185dpdk-devbind --bind=igb_uio 0000:03:00.0</programlisting>Configure
1186 OVS:<programlisting>export DB_SOCK=/var/run/openvswitch/db.sock
1187ovsdb-tool create /etc/openvswitch/conf.db /
1188/usr/share/openvswitch/vswitch.ovsschema
1189ovsdb-server --remote=punix:$DB_SOCK /
1190--remote=db:Open_vSwitch,Open_vSwitch,manager_options --pidfile --detach
1191ovs-vsctl --no-wait init
1192ovs-vsctl --no-wait set Open_vSwitch . other_config:dpdk-lcore-mask=0x10
1193ovs-vsctl --no-wait set Open_vSwitch . other_config:pmd-cpu-mask=0xc
1194ovs-vsctl --no-wait set Open_vSwitch . other_config:dpdk-socket-mem=2048
1195ovs-vsctl --no-wait set Open_vSwitch . other_config:dpdk-init=true
1196ovs-vswitchd unix:$DB_SOCK --pidfile /
1197--detach --log-file=/var/log/openvswitch/ovs-vswitchd.log
1198
1199
1200ovs-vsctl add-br ovsbr0 -- set bridge ovsbr0 datapath_type=netdev
1201ovs-vsctl add-port ovsbr0 dpdk0 /
1202-- set Interface dpdk0 type=dpdk options:dpdk-devargs=0000:03:00.0 ofport_request=1
1203ovs-vsctl add-port ovsbr0 vhost-user1 /
1204-- set Interface vhost-user1 type=dpdkvhostuser ofport_request=2
1205ovs-vsctl add-port ovsbr0 vhost-user2 /
1206-- set Interface vhost-user2 type=dpdkvhostuser ofport_request=3
1207
1208
1209ovs-ofctl del-flows ovsbr0
1210ovs-ofctl add-flow ovsbr0 in_port=1,action=output:2
1211ovs-ofctl add-flow ovsbr0 in_port=2,action=output:3</programlisting>Launch
1212 first QEMU instance, VM1:<programlisting>taskset -c 0,1 qemu-system-x86_64 -cpu host,+invtsc,migratable=no -M q35 /
1213-smp cores=2,sockets=1 -vcpu 0,affinity=0 -vcpu 1,affinity=1 -enable-kvm /
1214-nographic -realtime mlock=on -kernel /home/root/qemu/bzImage /
1215-drive file=/home/root/qemu/enea-image-virtualization-guest-qemux86-64.ext4,/
1216if=virtio,format=raw -m 2048 -object memory-backend-file,id=mem,/
1217size=2048M,mem-path=/mnt/huge,share=on -numa node,memdev=mem /
1218-mem-prealloc -chardev socket,id=char0,path=/var/run/openvswitch/vhost-user1 /
1219-netdev type=vhost-user,id=mynet1,chardev=char0,vhostforce /
1220-device virtio-net-pci,mac=52:54:00:00:00:01,netdev=mynet1,/
1221mrg_rxbuf=on,rx_queue_size=1024,csum=off,gso=off,guest_tso4=off,/
1222guest_tso6=off,guest_ecn=off -append 'root=/dev/vda console=ttyS0 /
1223hugepagesz=2M hugepages=512 isolcpus=1 nohz_full=1 rcu_nocbs=1 /
1224irqaffinity=0 rcu_nocb_poll intel_pstate=disable intel_idle.max_cstate=0 /
1225processor.max_cstate=0 mce=ignore_ce audit=0'</programlisting>Connect to
1226 Target 2 through a new SSH session and run a second QEMU instance
1227 (to get its own console, separate from instance VM1). We shall call
1228 this VM2:<programlisting>taskset -c 4,5 qemu-system-x86_64 -cpu host,+invtsc,migratable=no /
1229-M q35 -smp cores=2,sockets=1 -vcpu 0,affinity=4 -vcpu 1,affinity=5 /
1230-enable-kvm -nographic -realtime mlock=on -kernel /home/root/qemu2/bzImage /
1231-drive file=/home/root/qemu2/enea-image-virtualization-guest-qemux86-64.ext4,/
1232if=virtio,format=raw -m 2048 -object memory-backend-file,id=mem,size=2048M,/
1233mem-path=/mnt/huge,share=on -numa node,memdev=mem -mem-prealloc /
1234-chardev socket,id=char1,path=/var/run/openvswitch/vhost-user2 /
1235-netdev type=vhost-user,id=mynet1,chardev=char1,vhostforce /
1236-device virtio-net-pci,mac=52:54:00:00:00:02,netdev=mynet1,/
1237mrg_rxbuf=on,rx_queue_size=1024,csum=off,gso=off,guest_tso4=off,/
1238guest_tso6=off,guest_ecn=off -append 'root=/dev/vda console=ttyS0 /
1239hugepagesz=2M hugepages=512 isolcpus=1 nohz_full=1 rcu_nocbs=1 /
1240irqaffinity=0 rcu_nocb_poll intel_pstate=disable intel_idle.max_cstate=0 /
1241processor.max_cstate=0 mce=ignore_ce audit=0'</programlisting>Configure DPDK
1242 inside VM1:<programlisting>mkdir -p /mnt/huge
1243mount -t hugetlbfs nodev /mnt/huge
1244modprobe igb_uio
1245dpdk-devbind --bind=igb_uio 0000:00:02.0</programlisting>Run testpmd inside
1246 VM1:<programlisting>testpmd -c 0x3 -n 2 -d librte_pmd_virtio.so.1.1 /
1247-- --burst 64 --disable-hw-vlan --disable-rss -i /
1248--portmask=0x1 --coremask=0x2 --nb-cores=1 --rxq=1 /
1249--txq=1 --txd=512 --rxd=512 --txqflags=0xf00 --port-topology=chained</programlisting>Start
1250 testpmd inside VM1:<programlisting>start</programlisting>Configure
1251 DPDK inside VM2:<programlisting>mkdir -p /mnt/huge
1252mount -t hugetlbfs nodev /mnt/huge
1253modprobe igb_uio
1254dpdk-devbind --bind=igb_uio 0000:00:02.0</programlisting>Run testpmd inside
1255 VM2:<programlisting>testpmd -c 0x3 -n 2 -d librte_pmd_virtio.so.1.1 /
1256-- --burst 64 --disable-hw-vlan --disable-rss -i --portmask=0x1 /
1257--coremask=0x2 --nb-cores=1 --rxq=1 --txq=1 --txd=512 /
1258--rxd=512 --txqflags=0xf00 --port-topology=chained</programlisting>Set VM2 for
1259 termination and start testpmd:<programlisting>set fwd rxonly
1260start</programlisting>On target 1, start pktgen traffic:<programlisting>start 0</programlisting>Use
1261 this command to refresh testpmd display in VM1 and VM2 and note the
1262 highest values:<programlisting>show port stats 0</programlisting>To
1263 stop traffic from pktgen, in order to choose a different frame
1264 size:<programlisting>stop 0</programlisting>To clear numbers in
1265 testpmd:<programlisting>clear port stats
1266show port stats 0</programlisting>For VM1, we record the stats relevant for
1267 <emphasis role="bold">forwarding</emphasis>:</para>
1268
1269 <itemizedlist>
1270 <listitem>
1271 <para>RX, TX in pps</para>
1272 </listitem>
1273 </itemizedlist>
1274
1275 <para>Only Rx-pps and Tx-pps numbers are important here, they change
1276 every time stats are displayed as long as there is traffic. Run the
1277 command a few times and pick the best (maximum) values seen.</para>
1278
1279 <para>For VM2, we record the stats relevant for <emphasis
1280 role="bold">termination</emphasis>:</para>
1281
1282 <itemizedlist>
1283 <listitem>
1284 <para>RX in pps (TX will be 0)</para>
1285 </listitem>
1286 </itemizedlist>
1287
1288 <para>For pktgen, we record only the TX side, because flow is
1289 terminated, with no RX traffic reaching pktgen:</para>
1290
1291 <itemizedlist>
1292 <listitem>
1293 <para>TX in pps and Mbit/s</para>
1294 </listitem>
1295 </itemizedlist>
1296
1297 <table>
1298 <title>Results in forwarding mode</title>
1299
1300 <tgroup cols="7">
1301 <tbody>
1302 <row>
1303 <entry align="center"><emphasis
1304 role="bold">Bytes</emphasis></entry>
1305
1306 <entry align="center"><emphasis role="bold">pktgen pps
1307 TX</emphasis></entry>
1308
1309 <entry align="center"><emphasis role="bold">VM1 testpmd pps
1310 RX</emphasis></entry>
1311
1312 <entry align="center"><emphasis role="bold">VM1 testpmd pps
1313 TX</emphasis></entry>
1314
1315 <entry align="center"><emphasis role="bold">VM2 testpmd pps
1316 RX</emphasis></entry>
1317
1318 <entry align="center"><emphasis role="bold">pktgen MBits/s
1319 TX</emphasis></entry>
1320
1321 <entry align="center"><emphasis role="bold">throughput
1322 (%)</emphasis></entry>
1323 </row>
1324
1325 <row>
1326 <entry role="bold"><emphasis
1327 role="bold">64</emphasis></entry>
1328
1329 <entry>14845113</entry>
1330
1331 <entry>6826540</entry>
1332
1333 <entry>5389680</entry>
1334
1335 <entry>5383577</entry>
1336
1337 <entry>9975</entry>
1338
1339 <entry>36.2</entry>
1340 </row>
1341
1342 <row>
1343 <entry><emphasis role="bold">128</emphasis></entry>
1344
1345 <entry>8426683</entry>
1346
1347 <entry>6825857</entry>
1348
1349 <entry>5386971</entry>
1350
1351 <entry>5384530</entry>
1352
1353 <entry>9976</entry>
1354
1355 <entry>63.9</entry>
1356 </row>
1357
1358 <row>
1359 <entry role="bold"><emphasis
1360 role="bold">256</emphasis></entry>
1361
1362 <entry>4528894</entry>
1363
1364 <entry>4507975</entry>
1365
1366 <entry>4507958</entry>
1367
1368 <entry>4532457</entry>
1369
1370 <entry>9999</entry>
1371
1372 <entry>100</entry>
1373 </row>
1374 </tbody>
1375 </tgroup>
1376 </table>
1377 </section>
1378 </section>
1379
1380 <section id="pxe-config-vm">
1381 <title>SR-IOV in Virtual Machines</title>
1382
1383 <para>PCI passthrough tests using pktgen and testpmd in virtual
1384 machines.</para>
1385
1386 <para>pktgen[DPDK]VM - PHY - VM[DPDK] testpmd.</para>
1387
1388 <para>Measurements:</para>
1389
1390 <itemizedlist>
1391 <listitem>
1392 <para>pktgen to testpmd in <emphasis
1393 role="bold">forwarding</emphasis> mode.</para>
1394 </listitem>
1395
1396 <listitem>
1397 <para>pktgen to testpmd in <emphasis
1398 role="bold">termination</emphasis> mode.</para>
1399 </listitem>
1400 </itemizedlist>
1401
1402 <section id="test-setup-target-four">
1403 <title>Test Setup</title>
1404
1405 <para>SSD boot using the following <literal>grub.cfg</literal>
1406 entry: <programlisting>linux (hd0,gpt3)/boot/bzImage root=/dev/sda3 ip=dhcp nohz_full=1-7 /
1407isolcpus=1-7 rcu-nocbs=1-7 rcu_nocb_poll intel_pstate=disable /
1408clocksource=tsc tsc=reliable nohpet nosoftlockup intel_idle.max_cstate=0 /
1409processor.max_cstate=0 mce=ignore_ce audit=0 nmi_watchdog=0 iommu=pt /
1410intel_iommu=on hugepagesz=1GB hugepages=8 default_hugepagesz=1GB /
1411hugepagesz=2M hugepages=2048 vfio_iommu_type1.allow_unsafe_interrupts=1</programlisting>Stop
1412 other services and mount hugepages: <programlisting>systemctl stop openvswitch
1413mkdir -p /mnt/huge
1414mount -t hugetlbfs hugetlbfs /mnt/huge</programlisting>Configure SR-IOV
1415 interfaces:<programlisting>/usr/share/usertools/dpdk-devbind.py --bind=ixgbe 0000:03:00.0
1416echo 2 &gt; /sys/class/net/eno3/device/sriov_numvfs
1417ifconfig eno3 10.0.0.1
1418modprobe vfio_pci
1419/usr/share/usertools/dpdk-devbind.py --bind=vfio-pci 0000:03:10.0
1420/usr/share/usertools/dpdk-devbind.py --bind=vfio-pci 0000:03:10.2
1421ip link set eno3 vf 0 mac 0c:c4:7a:E5:0F:48
1422ip link set eno3 vf 1 mac 0c:c4:7a:BF:52:E7</programlisting>Launch two QEMU
1423 instances: <programlisting>taskset -c 4,5 qemu-system-x86_64 -cpu host,+invtsc,migratable=no -M /
1424q35 -smp cores=2,sockets=1 -vcpu 0,affinity=4 -vcpu 1,affinity=5 -enable-kvm /
1425-nographic -kernel /mnt/qemu/bzImage /
1426-drive file=/mnt/qemu/enea-image-virtualization-guest-qemux86-64.ext4,if=virtio,/
1427format=raw -m 4096 -object memory-backend-file,id=mem,size=4096M,mem-path=/mnt/huge,/
1428share=on -numa node,memdev=mem -mem-prealloc -device vfio-pci,host=03:10.0 /
1429-append 'root=/dev/vda console=ttyS0 hugepagesz=2M hugepages=1024 /
1430isolcpus=1 nohz_full=1 rcu_nocbs=1 irqaffinity=0 rcu_nocb_poll /
1431intel_pstate=disable intel_idle.max_cstate=0 /
1432processor.max_cstate=0 mce=ignore_ce audit=0'
1433
1434
1435taskset -c 2,3 qemu-system-x86_64 -cpu host,+invtsc,migratable=no -M /
1436q35 -smp cores=2,sockets=1 -vcpu 0,affinity=2 -vcpu 1,affinity=3 -enable-kvm /
1437-nographic -kernel /mnt/qemu/bzImage /
1438-drive file=/mnt/qemu/enea-image2-virtualization-guest-qemux86-64.ext4,if=virtio,/
1439format=raw -m 4096 -object memory-backend-file,id=mem,size=4096M,mem-path=/mnt/huge,/
1440share=on -numa node,memdev=mem -mem-prealloc -device vfio-pci,host=03:10.2 /
1441-append 'root=/dev/vda console=ttyS0 hugepagesz=2M hugepages=1024 /
1442isolcpus=1 nohz_full=1 rcu_nocbs=1 irqaffinity=0 rcu_nocb_poll /
1443intel_pstate=disable intel_idle.max_cstate=0 processor.max_cstate=0 /
1444mce=ignore_ce audit=0'</programlisting>In the first VM, mount hugepages and
1445 start pktgen:<programlisting>mkdir -p /mnt/huge &amp;&amp; \
1446mount -t hugetlbfs hugetlbfs /mnt/huge
1447modprobe igb_uio
1448/usr/share/usertools/dpdk-devbind.py --bind=igb_uio 0000:00:03.0
1449cd /usr/share/apps/pktgen
1450./pktgen -c 0x3 -- -P -m "1.0"</programlisting>In the pktgen console set the
1451 MAC of the destination and start generating
1452 packages:<programlisting>set mac 0 0C:C4:7A:BF:52:E7
1453str</programlisting>In the second VM, mount hugepages and start
1454 testpmd:<programlisting>mkdir -p /mnt/huge &amp;&amp; \
1455mount -t hugetlbfs hugetlbfs /mnt/huge
1456modprobe igb_uio
1457/usr/share/usertools/dpdk-devbind.py --bind=igb_uio 0000:00:03.0
1458testpmd -c 0x3 -n 2 -- -i --txd=512 --rxd=512 --port-topology=chained /
1459--eth-peer=0,0c:c4:7a:e5:0f:48</programlisting>In order to enable <emphasis
1460 role="bold">forwarding</emphasis> mode, in the testpmd console,
1461 run:<programlisting>set fwd mac
1462start</programlisting>In order to enable <emphasis
1463 role="bold">termination</emphasis> mode, in the testpmd console,
1464 run:<programlisting>set fwd rxonly
1465start</programlisting><table>
1466 <title>Results in forwarding mode</title>
1467
1468 <tgroup cols="5">
1469 <tbody>
1470 <row>
1471 <entry align="center"><emphasis
1472 role="bold">Bytes</emphasis></entry>
1473
1474 <entry align="center"><emphasis role="bold">VM1 pktgen pps
1475 TX</emphasis></entry>
1476
1477 <entry align="center"><emphasis role="bold">VM1 pktgen pps
1478 RX</emphasis></entry>
1479
1480 <entry align="center"><emphasis role="bold">VM2 testpmd
1481 pps RX</emphasis></entry>
1482
1483 <entry align="center"><emphasis role="bold">VM2 testpmd
1484 pps RX</emphasis></entry>
1485 </row>
1486
1487 <row>
1488 <entry role="bold"><emphasis
1489 role="bold">64</emphasis></entry>
1490
1491 <entry>7105645</entry>
1492
1493 <entry>7103976</entry>
1494
1495 <entry>7101487</entry>
1496
1497 <entry>7101487</entry>
1498 </row>
1499
1500 <row>
1501 <entry><emphasis role="bold">128</emphasis></entry>
1502
1503 <entry>5722795</entry>
1504
1505 <entry>5722252</entry>
1506
1507 <entry>5704219</entry>
1508
1509 <entry>5704219</entry>
1510 </row>
1511
1512 <row>
1513 <entry role="bold"><emphasis
1514 role="bold">256</emphasis></entry>
1515
1516 <entry>3454075</entry>
1517
1518 <entry>3455144</entry>
1519
1520 <entry>3452020</entry>
1521
1522 <entry>3452020</entry>
1523 </row>
1524
1525 <row>
1526 <entry role="bold"><emphasis
1527 role="bold">512</emphasis></entry>
1528
1529 <entry>1847751</entry>
1530
1531 <entry>1847751</entry>
1532
1533 <entry>1847751</entry>
1534
1535 <entry>1847751</entry>
1536 </row>
1537
1538 <row>
1539 <entry role="bold"><emphasis
1540 role="bold">1024</emphasis></entry>
1541
1542 <entry>956214</entry>
1543
1544 <entry>956214</entry>
1545
1546 <entry>956214</entry>
1547
1548 <entry>956214</entry>
1549 </row>
1550
1551 <row>
1552 <entry role="bold"><emphasis
1553 role="bold">1500</emphasis></entry>
1554
1555 <entry>797174</entry>
1556
1557 <entry>797174</entry>
1558
1559 <entry>797174</entry>
1560
1561 <entry>797174</entry>
1562 </row>
1563 </tbody>
1564 </tgroup>
1565 </table><table>
1566 <title>Results in termination mode</title>
1567
1568 <tgroup cols="3">
1569 <tbody>
1570 <row>
1571 <entry align="center"><emphasis
1572 role="bold">Bytes</emphasis></entry>
1573
1574 <entry align="center"><emphasis role="bold">VM1 pktgen pps
1575 TX</emphasis></entry>
1576
1577 <entry align="center"><emphasis role="bold">VM2 testpmd
1578 RX</emphasis></entry>
1579 </row>
1580
1581 <row>
1582 <entry role="bold"><emphasis
1583 role="bold">64</emphasis></entry>
1584
1585 <entry>14204580</entry>
1586
1587 <entry>14205063</entry>
1588 </row>
1589
1590 <row>
1591 <entry><emphasis role="bold">128</emphasis></entry>
1592
1593 <entry>8424611</entry>
1594
1595 <entry>8424611</entry>
1596 </row>
1597
1598 <row>
1599 <entry role="bold"><emphasis
1600 role="bold">256</emphasis></entry>
1601
1602 <entry>4529024</entry>
1603
1604 <entry>4529024</entry>
1605 </row>
1606
1607 <row>
1608 <entry><emphasis role="bold">512</emphasis></entry>
1609
1610 <entry>2348640</entry>
1611
1612 <entry>2348640</entry>
1613 </row>
1614
1615 <row>
1616 <entry><emphasis role="bold">1024</emphasis></entry>
1617
1618 <entry>1197101</entry>
1619
1620 <entry>1197101</entry>
1621 </row>
1622
1623 <row>
1624 <entry><emphasis role="bold">1500</emphasis></entry>
1625
1626 <entry>822244</entry>
1627
1628 <entry>822244</entry>
1629 </row>
1630 </tbody>
1631 </tgroup>
1632 </table></para>
1633 </section>
1634 </section>
1635 </section>
1636 </section>
1637</chapter> \ No newline at end of file