summaryrefslogtreecommitdiffstats
path: root/doc/book-enea-nfv-access-guide/doc
diff options
context:
space:
mode:
authorGabriel Ionescu <gabriel.ionescu@enea.com>2017-12-08 11:56:28 +0100
committerGabriel Ionescu <gabriel.ionescu@enea.com>2017-12-08 11:56:28 +0100
commit9d3e535588314681acd6e46e3de640290bd6ef0c (patch)
tree1952775f9461ac725739ed9b3053a42fedf8feed /doc/book-enea-nfv-access-guide/doc
parent7253de7a4dc7b8e83dd3b3c79bbb837c510ba4d4 (diff)
downloadel_releases-nfv-access-9d3e535588314681acd6e46e3de640290bd6ef0c.tar.gz
Chapter 7: Update benchmarks chapter for Cavium board
Signed-off-by: Gabriel Ionescu <gabriel.ionescu@enea.com>
Diffstat (limited to 'doc/book-enea-nfv-access-guide/doc')
-rw-r--r--doc/book-enea-nfv-access-guide/doc/benchmarks.xml1645
1 files changed, 742 insertions, 903 deletions
diff --git a/doc/book-enea-nfv-access-guide/doc/benchmarks.xml b/doc/book-enea-nfv-access-guide/doc/benchmarks.xml
index 3279601..def0f89 100644
--- a/doc/book-enea-nfv-access-guide/doc/benchmarks.xml
+++ b/doc/book-enea-nfv-access-guide/doc/benchmarks.xml
@@ -14,7 +14,7 @@
14 <title>Hardware Setup</title> 14 <title>Hardware Setup</title>
15 15
16 <tgroup cols="2"> 16 <tgroup cols="2">
17 <colspec align="left" /> 17 <colspec align="left"/>
18 18
19 <thead> 19 <thead>
20 <row> 20 <row>
@@ -28,27 +28,25 @@
28 <row> 28 <row>
29 <entry align="left">Server Platform</entry> 29 <entry align="left">Server Platform</entry>
30 30
31 <entry align="left">Supermicro X10SDV-4C-TLN2F 31 <entry align="left">Cavium CN8304</entry>
32 http://www.supermicro.com/products/motherboard/xeon/d/X10SDV-4C-TLN2F.cfm</entry>
33 </row> 32 </row>
34 33
35 <row> 34 <row>
36 <entry align="left">ARCH</entry> 35 <entry align="left">ARCH</entry>
37 36
38 <entry>x86-64</entry> 37 <entry>aarch64</entry>
39 </row> 38 </row>
40 39
41 <row> 40 <row>
42 <entry align="left">Processor</entry> 41 <entry align="left">Processor</entry>
43 42
44 <entry>1 x Intel Xeon D-1521 (Broadwell), 4 cores, 8 43 <entry>Cavium OcteonTX CN83XX</entry>
45 hyper-threaded cores per processor</entry>
46 </row> 44 </row>
47 45
48 <row> 46 <row>
49 <entry align="left">CPU freq</entry> 47 <entry align="left">CPU freq</entry>
50 48
51 <entry>2.40 GHz</entry> 49 <entry>1.8 GHz</entry>
52 </row> 50 </row>
53 51
54 <row> 52 <row>
@@ -60,13 +58,13 @@
60 <row> 58 <row>
61 <entry align="left">Network</entry> 59 <entry align="left">Network</entry>
62 60
63 <entry>Dual integrated 10G ports</entry> 61 <entry>3x10G ports</entry>
64 </row> 62 </row>
65 63
66 <row> 64 <row>
67 <entry align="left">Storage</entry> 65 <entry align="left">Storage</entry>
68 66
69 <entry>Samsung 850 Pro 128GB SSD</entry> 67 <entry>Seagate 500GB HDD</entry>
70 </row> 68 </row>
71 </tbody> 69 </tbody>
72 </tgroup> 70 </tgroup>
@@ -82,155 +80,6 @@
82 </itemizedlist> 80 </itemizedlist>
83 </section> 81 </section>
84 82
85 <section id="bios">
86 <title>BIOS Settings</title>
87
88 <para>The table below details the BIOS settings for which the default
89 values were changed when doing performance measurements.</para>
90
91 <table>
92 <title>BIOS Settings</title>
93
94 <tgroup cols="4">
95 <colspec align="left" />
96
97 <thead>
98 <row>
99 <entry align="center">Menu Path</entry>
100
101 <entry align="center">Setting Name</entry>
102
103 <entry align="center">Enea NFV Access value</entry>
104
105 <entry align="center">BIOS Default value</entry>
106 </row>
107 </thead>
108
109 <tbody>
110 <row>
111 <entry align="left">CPU Configuration</entry>
112
113 <entry align="left">Direct Cache Access (DCA)</entry>
114
115 <entry>Enable</entry>
116
117 <entry>Auto</entry>
118 </row>
119
120 <row>
121 <entry>CPU Configuration / Advanced Power Management
122 Configuration</entry>
123
124 <entry align="left">EIST (P-States)</entry>
125
126 <entry>Disable</entry>
127
128 <entry>Enable</entry>
129 </row>
130
131 <row>
132 <entry>CPU Configuration / Advanced Power Management Configuration
133 / CPU C State Control</entry>
134
135 <entry align="left">CPU C State</entry>
136
137 <entry>Disable</entry>
138
139 <entry>Enable</entry>
140 </row>
141
142 <row>
143 <entry>CPU Configuration / Advanced Power Management Configuration
144 / CPU Advanced PM Turning / Energy Perf BIAS</entry>
145
146 <entry align="left">Energy Performance Tuning</entry>
147
148 <entry>Disable</entry>
149
150 <entry>Enable</entry>
151 </row>
152
153 <row>
154 <entry>CPU Configuration / Advanced Power Management Configuration
155 / CPU Advanced PM Turning / Energy Perf BIAS</entry>
156
157 <entry align="left">Energy Performance BIAS Setting</entry>
158
159 <entry>Performance</entry>
160
161 <entry>Balanced Performance</entry>
162 </row>
163
164 <row>
165 <entry>CPU Configuration / Advanced Power Management Configuration
166 / CPU Advanced PM Turning / Energy Perf BIAS</entry>
167
168 <entry align="left">Power/Performance Switch</entry>
169
170 <entry>Disable</entry>
171
172 <entry>Enable</entry>
173 </row>
174
175 <row>
176 <entry>CPU Configuration / Advanced Power Management Configuration
177 / CPU Advanced PM Turning / Program PowerCTL _MSR</entry>
178
179 <entry align="left">Energy Efficient Turbo</entry>
180
181 <entry>Disable</entry>
182
183 <entry>Enable</entry>
184 </row>
185
186 <row>
187 <entry>Chipset Configuration / North Bridge / IIO
188 Configuration</entry>
189
190 <entry align="left">EV DFX Features</entry>
191
192 <entry>Enable</entry>
193
194 <entry>Disable</entry>
195 </row>
196
197 <row>
198 <entry>Chipset Configuration / North Bridge / Memory
199 Configuration</entry>
200
201 <entry align="left">Enforce POR</entry>
202
203 <entry>Disable</entry>
204
205 <entry>Enable</entry>
206 </row>
207
208 <row>
209 <entry>Chipset Configuration / North Bridge / Memory
210 Configuration</entry>
211
212 <entry align="left">Memory Frequency</entry>
213
214 <entry>2400</entry>
215
216 <entry>Auto</entry>
217 </row>
218
219 <row>
220 <entry>Chipset Configuration / North Bridge / Memory
221 Configuration</entry>
222
223 <entry align="left">DRAM RAPL Baseline</entry>
224
225 <entry>Disable</entry>
226
227 <entry>DRAM RAPL Mode 1</entry>
228 </row>
229 </tbody>
230 </tgroup>
231 </table>
232 </section>
233
234 <section id="use-cases"> 83 <section id="use-cases">
235 <title>Use Cases</title> 84 <title>Use Cases</title>
236 85
@@ -251,7 +100,7 @@
251 100
252 <itemizedlist> 101 <itemizedlist>
253 <listitem> 102 <listitem>
254 <para>pktgen TX, RX in packets per second (pps) and Mbps</para> 103 <para>pktgen TX, RX in packets per second (pps) and MBps</para>
255 </listitem> 104 </listitem>
256 105
257 <listitem> 106 <listitem>
@@ -269,25 +118,21 @@
269 118
270 <para>Start by following the steps below:</para> 119 <para>Start by following the steps below:</para>
271 120
272 <para>SSD boot using the following <literal>grub.cfg</literal> 121 <para>Boot the board using the following U-Boot commands:
273 entry: <programlisting>linux (hd0,gpt3)/boot/bzImage root=/dev/sda3 ip=dhcp nohz_full=1-7 / 122 <programlisting>setenv boot_board 'setenv userbootparams nohz_full=1-23 isolcpus=1-23 \
274isolcpus=1-7 rcu-nocbs=1-7 rcu_nocb_poll intel_pstate=disable / 123rcu-nocbs=1-23 rcu_nocb_poll clocksource=tsc tsc=reliable nohpet \
275clocksource=tsc tsc=reliable nohpet nosoftlockup intel_idle.max_cstate=0 / 124nosoftlockup audit=0 nmi_watchdog=0; setenv satapart 2; run bootsata'
276processor.max_cstate=0 mce=ignore_ce audit=0 nmi_watchdog=0 iommu=pt / 125run boot_board</programlisting></para>
277intel_iommu=on hugepagesz=1GB hugepages=8 default_hugepagesz=1GB /
278hugepagesz=2M hugepages=2048 vfio_iommu_type1.allow_unsafe_interrupts=1</programlisting></para>
279 126
280 <para>Kill unnecessary services:<programlisting>killall ovsdb-server ovs-vswitchd 127 <para>Configure hugepages and set up DPDK:<programlisting>echo 4 &gt; /proc/sys/vm/nr_hugepages
281rm -rf /etc/openvswitch/* 128modprobe vfio-pci
282mkdir -p /var/run/openvswitch</programlisting>Mount hugepages and configure 129ifconfig enP1p1s0f1 down
283 DPDK:<programlisting>mkdir -p /mnt/huge 130dpdk-devbind -b vfio-pci 0001:01:00.1</programlisting>Run
284mount -t hugetlbfs nodev /mnt/huge
285modprobe igb_uio
286dpdk-devbind --bind=igb_uio 0000:03:00.0</programlisting>Run
287 pktgen:<programlisting>cd /usr/share/apps/pktgen/ 131 pktgen:<programlisting>cd /usr/share/apps/pktgen/
288./pktgen -c 0xF -n 1 -- -P -m "[3:2].0"</programlisting>In the pktgen console 132./pktgen -v -c 0x7 -n 4 --proc-type auto -d /usr/lib/librte_pmd_thunderx_nicvf.so.1.1 \
289 run:<programlisting>str</programlisting>To change framesize for 133-w 0001:01:00.1 -- -P -m "[1:2].0"</programlisting>In the pktgen console
290 pktgen, from [64, 128, 256, 512]:<programlisting>set 0 size &amp;lt;number&amp;gt;</programlisting></para> 134 run:<programlisting>str</programlisting>Choose one of the values
135 from [64, 128, 256, 512] to change the packet size:<programlisting>set 0 size &lt;number&gt;</programlisting></para>
291 </section> 136 </section>
292 137
293 <section id="usecase-two"> 138 <section id="usecase-two">
@@ -295,64 +140,64 @@ dpdk-devbind --bind=igb_uio 0000:03:00.0</programlisting>Run
295 140
296 <para>Start by following the steps below:</para> 141 <para>Start by following the steps below:</para>
297 142
298 <para>SSD boot using the following <literal>grub.cfg</literal> 143 <para>Boot the board using the following U-Boot commands:</para>
299 entry:</para>
300 144
301 <programlisting>linux (hd0,gpt3)/boot/bzImage root=/dev/sda3 ip=dhcp nohz_full=1-7 / 145 <programlisting>setenv boot_board 'setenv userbootparams nohz_full=1-23 isolcpus=1-23 \
302isolcpus=1-7 rcu-nocbs=1-7 rcu_nocb_poll intel_pstate=disable / 146rcu-nocbs=1-23 rcu_nocb_poll clocksource=tsc tsc=reliable nohpet \
303clocksource=tsc tsc=reliable nohpet nosoftlockup intel_idle.max_cstate=0 / 147nosoftlockup audit=0 nmi_watchdog=0; setenv satapart 2; run bootsata'
304processor.max_cstate=0 mce=ignore_ce audit=0 nmi_watchdog=0 iommu=pt / 148run boot_board</programlisting>
305intel_iommu=on hugepagesz=1GB hugepages=8 default_hugepagesz=1GB / 149
306hugepagesz=2M hugepages=2048 vfio_iommu_type1.allow_unsafe_interrupts=1</programlisting> 150 <para>It is expected that a NFV Access guest image is present on the
151 target. </para>
307 152
308 <para>It is expected to have Docker/guest image on target. Configure 153 <para>Set up DPDK and configure the OVS bridge:<programlisting># Clean up old OVS old config
309 the OVS bridge:<programlisting># OVS old config clean-up
310killall ovsdb-server ovs-vswitchd 154killall ovsdb-server ovs-vswitchd
311rm -rf /etc/openvswitch/* 155rm -rf /etc/openvswitch/*
156rm -rf /var/run/openvswitch/*
157rm -rf /var/log/openvswitch/*
312mkdir -p /var/run/openvswitch 158mkdir -p /var/run/openvswitch
313 159
314# Mount hugepages and bind interfaces to dpdk 160# Configure hugepages and bind interfaces to dpdk
315mkdir -p /mnt/huge 161echo 20 &gt; /proc/sys/vm/nr_hugepages
316mount -t hugetlbfs nodev /mnt/huge 162modprobe vfio-pci
317modprobe igb_uio 163ifconfig enP1p1s0f1 down
318dpdk-devbind --bind=igb_uio 0000:03:00.0 164dpdk-devbind --b vfio-pci 0001:01:00.1
319 165
320# configure openvswitch with DPDK 166# configure openvswitch with DPDK
321export DB_SOCK=/var/run/openvswitch/db.sock 167export DB_SOCK=/var/run/openvswitch/db.sock
322ovsdb-tool create /etc/openvswitch/conf.db / 168ovsdb-tool create /etc/openvswitch/conf.db /usr/share/openvswitch/vswitch.ovsschema
323/usr/share/openvswitch/vswitch.ovsschema 169ovsdb-server --remote=punix:$DB_SOCK \
324ovsdb-server --remote=punix:$DB_SOCK / 170 --remote=db:Open_vSwitch,Open_vSwitch,manager_options --pidfile --detach
325--remote=db:Open_vSwitch,Open_vSwitch,manager_options --pidfile --detach
326ovs-vsctl --no-wait init 171ovs-vsctl --no-wait init
327ovs-vsctl --no-wait set Open_vSwitch . other_config:dpdk-lcore-mask=0x10 172ovs-vsctl --no-wait set Open_vSwitch . other_config:dpdk-lcore-mask=0x10
328ovs-vsctl --no-wait set Open_vSwitch . other_config:pmd-cpu-mask=0xc 173ovs-vsctl --no-wait set Open_vSwitch . other_config:pmd-cpu-mask=0xc
329ovs-vsctl --no-wait set Open_vSwitch . other_config:dpdk-socket-mem=2048 174ovs-vsctl --no-wait set Open_vSwitch . other_config:dpdk-socket-mem=2048
330ovs-vsctl --no-wait set Open_vSwitch . other_config:dpdk-init=true 175ovs-vsctl --no-wait set Open_vSwitch . other_config:dpdk-init=true
331ovs-vswitchd unix:$DB_SOCK --pidfile --detach / 176ovs-vswitchd unix:$DB_SOCK --pidfile --detach \
332--log-file=/var/log/openvswitch/ovs-vswitchd.log 177 --log-file=/var/log/openvswitch/ovs-vswitchd.log
333 178
334ovs-vsctl add-br ovsbr0 -- set bridge ovsbr0 datapath_type=netdev 179ovs-vsctl add-br ovsbr0 -- set bridge ovsbr0 datapath_type=netdev
335ovs-vsctl add-port ovsbr0 vhost-user1 / 180ovs-vsctl add-port ovsbr0 vhost-user1 -- set Interface vhost-user1 \
336-- set Interface vhost-user1 type=dpdkvhostuser ofport_request=1 181 type=dpdkvhostuser -- set Interface vhost-user1 ofport_request=2
337ovs-vsctl add-port ovsbr0 dpdk0 -- set Interface / 182ovs-vsctl add-port ovsbr0 dpdk0 -- set Interface dpdk0 type=dpdk \
338dpdk0 type=dpdk options:dpdk-devargs=0000:03:00.0 ofport_request=2 183 options:dpdk-devargs=0001:01:00.1 -- set Interface dpdk0 ofport_request=1
339 184
340# configure static flows 185# configure static flows
341ovs-ofctl del-flows ovsbr0 186ovs-ofctl del-flows ovsbr0
342ovs-ofctl add-flow ovsbr0 in_port=1,action=output:2 187ovs-ofctl add-flow ovsbr0 in_port=1,action=output:2
343ovs-ofctl add-flow ovsbr0 in_port=2,action=output:1</programlisting>Import a 188ovs-ofctl add-flow ovsbr0 in_port=2,action=output:1</programlisting>Import a
344 Docker container:<programlisting>docker import enea-nfv-access-guest-qemux86-64.tar.gz el7_guest</programlisting>Start 189 Docker container:<programlisting>docker import enea-nfv-access-guest-qemuarm64.tar.gz nfv_container</programlisting>Start
345 the Docker container:<programlisting>docker run -it --rm -v /var/run/openvswitch/:/var/run/openvswitch/ / 190 the Docker container:<programlisting>docker run -v /var/run/openvswitch/:/var/run/openvswitch/ \
346-v /mnt/huge:/mnt/huge el7_guest /bin/bash</programlisting>Start the testpmd 191 -v /dev/hugepages/:/dev/hugepages/ -p nfv_container /bin/bash</programlisting>Start
347 application in Docker:<programlisting>testpmd -c 0x30 -n 2 --file-prefix prog1 --socket-mem 512 --no-pci / 192 the testpmd application in Docker:<programlisting>testpmd -c 0x30 -n 2 --file-prefix prog1 --socket-mem 512 \
348--vdev=virtio_user0,path=/var/run/openvswitch/vhost-user1 / 193 --no-pci --vdev=virtio_user0,path=/var/run/openvswitch/vhost-user1 \
349-d /usr/lib/librte_pmd_virtio.so.1.1 -- --burst 64 --disable-hw-vlan / 194 -d /usr/lib/librte_pmd_virtio.so.1.1 -- --burst 64 --disable-hw-vlan \
350--disable-rss -i --portmask=0x1 --coremask=0x20 --nb-cores=1 / 195 --disable-rss -i --portmask=0x1 --coremask=0x20 --nb-cores=1 --rxq=1 \
351--rxq=1 --txq=1 --txd=512 --rxd=512 --txqflags=0xf00 --port-topology=chained</programlisting>To 196 --txq=1 --txd=512 --rxd=512 --txqflags=0xf00 --port-topology=chained</programlisting>To
352 start traffic <emphasis role="bold">forwarding</emphasis>, run the 197 start traffic <emphasis role="bold">forwarding</emphasis>, run the
353 following command in testpmd CLI:<programlisting>start</programlisting>To 198 following command in testpmd CLI:<programlisting>start</programlisting>To
354 start traffic but in <emphasis role="bold">termination</emphasis> 199 start traffic but in <emphasis role="bold">termination</emphasis>
355 mode (no traffic sent on TX), run following command in testpmd 200 mode (no traffic sent on TX), run the following commands in testpmd
356 CLI:<programlisting>set fwd rxonly 201 CLI:<programlisting>set fwd rxonly
357start</programlisting><table> 202start</programlisting><table>
358 <title>Results in forwarding mode</title> 203 <title>Results in forwarding mode</title>
@@ -389,56 +234,92 @@ start</programlisting><table>
389 <entry role="bold"><emphasis 234 <entry role="bold"><emphasis
390 role="bold">64</emphasis></entry> 235 role="bold">64</emphasis></entry>
391 236
392 <entry>14877658</entry> 237 <entry>14682363</entry>
393 238
394 <entry>9997</entry> 239 <entry>9867</entry>
395 240
396 <entry>7832352</entry> 241 <entry>1666666</entry>
397 242
398 <entry>5264</entry> 243 <entry>1119</entry>
399 244
400 <entry>7831250</entry> 245 <entry>1976488</entry>
401 246
402 <entry>7831250</entry> 247 <entry>1666694</entry>
403 248
404 <entry>52,65%</entry> 249 <entry>13.46%</entry>
405 </row> 250 </row>
406 251
407 <row> 252 <row>
408 <entry><emphasis role="bold">128</emphasis></entry> 253 <entry><emphasis role="bold">128</emphasis></entry>
409 254
410 <entry>8441305</entry> 255 <entry>8445993</entry>
411 256
412 <entry>9994</entry> 257 <entry>10000</entry>
413 258
414 <entry>7533893</entry> 259 <entry>1600567</entry>
415 260
416 <entry>8922</entry> 261 <entry>1894</entry>
417 262
418 <entry>7535127</entry> 263 <entry>1886851</entry>
419 264
420 <entry>7682007</entry> 265 <entry>1600573</entry>
421 266
422 <entry>89,27%</entry> 267 <entry>22.34%</entry>
423 </row> 268 </row>
424 269
425 <row> 270 <row>
426 <entry role="bold"><emphasis 271 <entry role="bold"><emphasis
427 role="bold">256</emphasis></entry> 272 role="bold">256</emphasis></entry>
428 273
429 <entry>4528831</entry> 274 <entry>4529011</entry>
430 275
431 <entry>9999</entry> 276 <entry>10000</entry>
277
278 <entry>1491449</entry>
279
280 <entry>3292</entry>
281
282 <entry>1715763</entry>
283
284 <entry>1491445</entry>
285
286 <entry>37.88%</entry>
287 </row>
288
289 <row>
290 <entry><emphasis role="bold">512</emphasis></entry>
291
292 <entry>2349638</entry>
293
294 <entry>10000</entry>
295
296 <entry>1422338</entry>
432 297
433 <entry>4528845</entry> 298 <entry>6052</entry>
299
300 <entry>1555351</entry>
301
302 <entry>1422330</entry>
303
304 <entry>66.20%</entry>
305 </row>
306
307 <row>
308 <entry><emphasis role="bold">1024</emphasis></entry>
309
310 <entry>1197323</entry>
311
312 <entry>10000</entry>
313
314 <entry>1197325</entry>
434 315
435 <entry>9999</entry> 316 <entry>9999</entry>
436 317
437 <entry>4528738</entry> 318 <entry>1197320</entry>
438 319
439 <entry>4528738</entry> 320 <entry>1197320</entry>
440 321
441 <entry>100%</entry> 322 <entry>100.00%</entry>
442 </row> 323 </row>
443 </tbody> 324 </tbody>
444 </tgroup> 325 </tgroup>
@@ -465,32 +346,52 @@ start</programlisting><table>
465 <entry role="bold"><emphasis 346 <entry role="bold"><emphasis
466 role="bold">64</emphasis></entry> 347 role="bold">64</emphasis></entry>
467 348
468 <entry>14877775</entry> 349 <entry>14676922</entry>
469 350
470 <entry>8060974</entry> 351 <entry>1984693</entry>
471 352
472 <entry>54,1%</entry> 353 <entry>13.52%</entry>
473 </row> 354 </row>
474 355
475 <row> 356 <row>
476 <entry><emphasis role="bold">128</emphasis></entry> 357 <entry><emphasis role="bold">128</emphasis></entry>
477 358
478 <entry>8441403</entry> 359 <entry>8445991</entry>
479 360
480 <entry>8023555</entry> 361 <entry>1895099</entry>
481 362
482 <entry>95,0%</entry> 363 <entry>22.44%</entry>
483 </row> 364 </row>
484 365
485 <row> 366 <row>
486 <entry role="bold"><emphasis 367 <entry role="bold"><emphasis
487 role="bold">256</emphasis></entry> 368 role="bold">256</emphasis></entry>
488 369
489 <entry>4528864</entry> 370 <entry>4528379</entry>
490 371
491 <entry>4528840</entry> 372 <entry>1722004</entry>
492 373
493 <entry>99,9%</entry> 374 <entry>38.03%</entry>
375 </row>
376
377 <row>
378 <entry><emphasis role="bold">512</emphasis></entry>
379
380 <entry>2349639</entry>
381
382 <entry>1560988</entry>
383
384 <entry>66.44%</entry>
385 </row>
386
387 <row>
388 <entry><emphasis role="bold">1024</emphasis></entry>
389
390 <entry>1197325</entry>
391
392 <entry>1197325</entry>
393
394 <entry>100.00%</entry>
494 </row> 395 </row>
495 </tbody> 396 </tbody>
496 </tgroup> 397 </tgroup>
@@ -503,56 +404,56 @@ start</programlisting><table>
503 host</title> 404 host</title>
504 405
505 <para>Benchmark a combo test using testpmd running in two Docker 406 <para>Benchmark a combo test using testpmd running in two Docker
506 instances, one which Forwards traffic to the second one, which 407 instances, one which forwards traffic to the second one, which
507 Terminates it.</para> 408 terminates it.</para>
508 409
509 <para>Packets are generated with pktgen and TX-d to the first testpmd, 410 <para>Packets are generated with pktgen and transmitted to the first
510 which will RX and Forward them to the second testpmd, which will RX 411 testpmd instance, which will forward them to the second testpmd
511 and terminate them.</para> 412 instance, which terminates them.</para>
512 413
513 <para>Measurements are made in:</para> 414 <para>This test measures:</para>
514 415
515 <itemizedlist> 416 <itemizedlist>
516 <listitem> 417 <listitem>
517 <para>pktgen TX in pps and Mbits/s</para> 418 <para>pktgen TX, RX in packets per second (pps) and MBps</para>
518 </listitem> 419 </listitem>
519 420
520 <listitem> 421 <listitem>
521 <para>testpmd TX and RX pps in Docker1</para> 422 <para>testpmd TX, RX in packets per second in the first Docker
423 container</para>
522 </listitem> 424 </listitem>
523 425
524 <listitem> 426 <listitem>
525 <para>testpmd RX pps in Docker2</para> 427 <para>testpmd TX, RX in packets per second in the second Docker
428 container</para>
526 </listitem> 429 </listitem>
527 </itemizedlist>
528 430
529 <para>Throughput found as a percent, by dividing Docker2 <emphasis 431 <listitem>
530 role="bold">testpmd RX pps</emphasis> by <emphasis role="bold">pktgen 432 <para>divide testpmd RX pps for the second Docker container to
531 TX pps</emphasis>.</para> 433 pktgen TX pps to obtain throughput in percentages (%)</para>
434 </listitem>
435 </itemizedlist>
532 436
533 <section id="target-one-usecase-three"> 437 <section id="target-one-usecase-three">
534 <title>Test Setup for Target 1</title> 438 <title>Test Setup for Target 1</title>
535 439
536 <para>Start by following the steps below:</para> 440 <para>Start by following the steps below:</para>
537 441
538 <para>SSD boot using the following <literal>grub.cfg</literal> 442 <para>Boot the board using the following U-Boot commands: </para>
539 entry:</para>
540 443
541 <programlisting>linux (hd0,gpt3)/boot/bzImage root=/dev/sda3 ip=dhcp nohz_full=1-7 / 444 <programlisting>setenv boot_board 'setenv userbootparams nohz_full=1-23 isolcpus=1-23 \
542isolcpus=1-7 rcu-nocbs=1-7 rcu_nocb_poll intel_pstate=disable / 445rcu-nocbs=1-23 rcu_nocb_poll clocksource=tsc tsc=reliable nohpet \
543clocksource=tsc tsc=reliable nohpet nosoftlockup intel_idle.max_cstate=0 / 446nosoftlockup audit=0 nmi_watchdog=0; setenv satapart 2; run bootsata'
544processor.max_cstate=0 mce=ignore_ce audit=0 nmi_watchdog=0 iommu=pt / 447run boot_board</programlisting>
545intel_iommu=on hugepagesz=1GB hugepages=8 default_hugepagesz=1GB /
546hugepagesz=2M hugepages=2048 vfio_iommu_type1.allow_unsafe_interrupts=1</programlisting>
547 448
548 <para>Configure DPDK:<programlisting>mkdir -p /mnt/huge 449 <para>Configure hugepages and set up DPDK:<programlisting>echo 4 &gt; /proc/sys/vm/nr_hugepages
549mount -t hugetlbfs nodev /mnt/huge 450modprobe vfio-pci
550modprobe igb_uio 451ifconfig enP1p1s0f1 down
551dpdk-devbind --bind=igb_uio 0000:03:00.0</programlisting>Run 452dpdk-devbind -b vfio-pci 0001:01:00.1</programlisting>Run
552 pktgen:<programlisting>cd /usr/share/apps/pktgen/ 453 pktgen:<programlisting>cd /usr/share/apps/pktgen/
553./pktgen -c 0xF -n 1 -- -P -m "[3:2].0"</programlisting>Choose one of the 454./pktgen -v -c 0x7 -n 4 --proc-type auto -d /usr/lib/librte_pmd_thunderx_nicvf.so.1.1 \
554 values from [64, 128, 256, 512] to change the packet 455-w 0001:01:00.1 -- -P -m "[1:2].0"</programlisting>Choose one of the values
555 size:<programlisting>set 0 size &lt;number&gt;</programlisting></para> 456 from [64, 128, 256, 512] to change the packet size:<programlisting>set 0 size &lt;number&gt;</programlisting></para>
556 </section> 457 </section>
557 458
558 <section id="target-two-usecase-four"> 459 <section id="target-two-usecase-four">
@@ -563,73 +464,79 @@ dpdk-devbind --bind=igb_uio 0000:03:00.0</programlisting>Run
563 <para>SSD boot using the following <literal>grub.cfg</literal> 464 <para>SSD boot using the following <literal>grub.cfg</literal>
564 entry:</para> 465 entry:</para>
565 466
566 <programlisting>linux (hd0,gpt3)/boot/bzImage root=/dev/sda3 ip=dhcp nohz_full=1-7 / 467 <programlisting>setenv boot_board 'setenv userbootparams nohz_full=1-23 isolcpus=1-23 \
567isolcpus=1-7 rcu-nocbs=1-7 rcu_nocb_poll intel_pstate=disable / 468rcu-nocbs=1-23 rcu_nocb_poll clocksource=tsc tsc=reliable nohpet \
568clocksource=tsc tsc=reliable nohpet nosoftlockup intel_idle.max_cstate=0 / 469nosoftlockup audit=0 nmi_watchdog=0; setenv satapart 2; run bootsata'
569processor.max_cstate=0 mce=ignore_ce audit=0 nmi_watchdog=0 / 470run boot_board</programlisting>
570iommu=pt intel_iommu=on hugepagesz=1GB hugepages=8 default_hugepagesz=1GB /
571hugepagesz=2M hugepages=2048 vfio_iommu_type1.allow_unsafe_interrupts=1</programlisting>
572 471
573 <para><programlisting>killall ovsdb-server ovs-vswitchd 472 <para>Set up DPDK and configure the OVS bridge:<programlisting># Clean up old OVS old config
473killall ovsdb-server ovs-vswitchd
574rm -rf /etc/openvswitch/* 474rm -rf /etc/openvswitch/*
575mkdir -p /var/run/openvswitch</programlisting>Configure DPDK:<programlisting>mkdir -p /mnt/huge 475rm -rf /var/run/openvswitch/*
576mount -t hugetlbfs nodev /mnt/huge 476rm -rf /var/log/openvswitch/*
577modprobe igb_uio 477mkdir -p /var/run/openvswitch
578dpdk-devbind --bind=igb_uio 0000:03:00.0</programlisting>Configure the OVS 478
579 bridge:<programlisting>export DB_SOCK=/var/run/openvswitch/db.sock 479# Configure hugepages and bind interfaces to dpdk
580ovsdb-tool create /etc/openvswitch/conf.db / 480echo 20 &gt; /proc/sys/vm/nr_hugepages
581/usr/share/openvswitch/vswitch.ovsschema 481modprobe vfio-pci
582ovsdb-server --remote=punix:$DB_SOCK / 482ifconfig enP1p1s0f1 down
583--remote=db:Open_vSwitch,Open_vSwitch,manager_options --pidfile --detach 483dpdk-devbind --b vfio-pci 0001:01:00.1
484
485# configure openvswitch with DPDK
486export DB_SOCK=/var/run/openvswitch/db.sock
487ovsdb-tool create /etc/openvswitch/conf.db /usr/share/openvswitch/vswitch.ovsschema
488ovsdb-server --remote=punix:$DB_SOCK \
489 --remote=db:Open_vSwitch,Open_vSwitch,manager_options --pidfile --detach
584ovs-vsctl --no-wait init 490ovs-vsctl --no-wait init
585ovs-vsctl --no-wait set Open_vSwitch . other_config:dpdk-lcore-mask=0x10 491ovs-vsctl --no-wait set Open_vSwitch . other_config:dpdk-lcore-mask=0x10
586ovs-vsctl --no-wait set Open_vSwitch . other_config:pmd-cpu-mask=0xcc 492ovs-vsctl --no-wait set Open_vSwitch . other_config:pmd-cpu-mask=0xc
587ovs-vsctl --no-wait set Open_vSwitch . other_config:dpdk-socket-mem=2048 493ovs-vsctl --no-wait set Open_vSwitch . other_config:dpdk-socket-mem=2048
588ovs-vsctl --no-wait set Open_vSwitch . other_config:dpdk-init=true 494ovs-vsctl --no-wait set Open_vSwitch . other_config:dpdk-init=true
589ovs-vswitchd unix:$DB_SOCK --pidfile --detach / 495ovs-vswitchd unix:$DB_SOCK --pidfile --detach \
590--log-file=/var/log/openvswitch/ovs-vswitchd.log 496 --log-file=/var/log/openvswitch/ovs-vswitchd.log
497
591ovs-vsctl add-br ovsbr0 -- set bridge ovsbr0 datapath_type=netdev 498ovs-vsctl add-br ovsbr0 -- set bridge ovsbr0 datapath_type=netdev
592ovs-vsctl add-port ovsbr0 vhost-user1 -- set Interface / 499ovs-vsctl add-port ovsbr0 vhost-user1 -- set Interface vhost-user1 \
593vhost-user1 type=dpdkvhostuser ofport_request=1 500 type=dpdkvhostuser -- set Interface vhost-user1 ofport_request=1
594ovs-vsctl add-port ovsbr0 vhost-user2 -- set Interface / 501ovs-vsctl add-port ovsbr0 vhost-user2 -- set Interface vhost-user2 \
595vhost-user2 type=dpdkvhostuser ofport_request=2 502 type=dpdkvhostuser -- set Interface vhost-user2 ofport_request=2
596ovs-vsctl add-port ovsbr0 dpdk0 -- set Interface dpdk0 / 503ovs-vsctl add-port ovsbr0 dpdk0 -- set Interface dpdk0 type=dpdk \
597type=dpdk options:dpdk-devargs=0000:03:00.0 ofport_request=3 504 options:dpdk-devargs=0001:01:00.1 -- set Interface dpdk0 ofport_request=3
505
506# configure static flows
598ovs-ofctl del-flows ovsbr0 507ovs-ofctl del-flows ovsbr0
599ovs-ofctl add-flow ovsbr0 in_port=3,action=output:2 508ovs-ofctl add-flow ovsbr0 in_port=3,action=output:2
600ovs-ofctl add-flow ovsbr0 in_port=2,action=output:1</programlisting>Import a 509ovs-ofctl add-flow ovsbr0 in_port=2,action=output:1</programlisting>Import a
601 Docker container:<programlisting>docker import enea-nfv-access-guest-qemux86-64.tar.gz el7_guest</programlisting>Start 510 Docker container:<programlisting>docker import enea-nfv-access-guest-qemuarm64.tar.gz nfv_container</programlisting>Start
602 the first Docker:<programlisting>docker run -it --rm --cpuset-cpus=4,5 / 511 the first Docker container:<programlisting>docker run -v /var/run/openvswitch/:/var/run/openvswitch/ \
603-v /var/run/openvswitch/:/var/run/openvswitch/ / 512 -v /dev/hugepages/:/dev/hugepages/ nfv_container /bin/bash</programlisting>Start
604-v /mnt/huge:/mnt/huge el7_guest /bin/bash</programlisting>Start the testpmd 513 testpmd in the first Docker container:<programlisting>testpmd -c 0x300 -n 4 --file-prefix prog2 --socket-mem 512 \
605 application in Docker1:<programlisting>testpmd -c 0x30 -n 2 --file-prefix prog1 --socket-mem 512 --no-pci / 514 --no-pci --vdev=virtio_user0,path=/var/run/openvswitch/vhost-user1 \
606--vdev=virtio_user0,path=/var/run/openvswitch/vhost-user1 / 515 -d /usr/lib/librte_pmd_virtio.so.1.1 -- --burst 64 --disable-hw-vlan \
607-d /usr/lib/librte_pmd_virtio.so.1.1 -- --burst 64 --disable-hw-vlan / 516 --disable-rss -i --portmask=0x1 --coremask=0x200 --nb-cores=1 --rxq=1 \
608--disable-rss -i --portmask=0x1 --coremask=0x20 --nb-cores=1 / 517 --txq=1 --txd=512 --rxd=512 --txqflags=0xf00 --port-topology=chained</programlisting>Configure
609--rxq=1 --txq=1 --txd=512 --rxd=512 --txqflags=0xf00 --port-topology=chained</programlisting>Configure
610 it in termination mode:<programlisting>set fwd rxonly</programlisting>Run 518 it in termination mode:<programlisting>set fwd rxonly</programlisting>Run
611 the testpmd application:<programlisting>start</programlisting>Open a 519 the testpmd application:<programlisting>start</programlisting>Open a
612 new console to the host and start the second Docker 520 new console to the host and start the second Docker
613 instance:<programlisting>docker run -it --rm --cpuset-cpus=0,1 -v /var/run/openvswitch/:/var/run/openvswitch/ / 521 instance:<programlisting>docker run -v /var/run/openvswitch/:/var/run/openvswitch/ \
614-v /mnt/huge:/mnt/huge el7_guest /bin/bash</programlisting>In the second 522 -v /dev/hugepages/:/dev/hugepages/ nfv_container /bin/bash</programlisting>In
615 container start testpmd:<programlisting>testpmd -c 0x0F --file-prefix prog2 --socket-mem 512 --no-pci / 523 the second container start testpmd:<programlisting>testpmd -c 0x0F --file-prefix prog2 --socket-mem 512 --no-pci /
616--vdev=virtio_user0,path=/var/run/openvswitch/vhost-user2 / 524--vdev=virtio_user0,path=/var/run/openvswitch/vhost-user2 /
617-d /usr/lib/librte_pmd_virtio.so.1.1 -- -i --disable-hw-vlan</programlisting>Run 525-d /usr/lib/librte_pmd_virtio.so.1.1 -- -i --disable-hw-vlan</programlisting>Start
618 the TestPmd application in the second Docker:<programlisting>testpmd -c 0x3 -n 2 --file-prefix prog2 --socket-mem 512 --no-pci / 526 testpmd in the second Docker container:<programlisting>testpmd -c 0x30 -n 4 --file-prefix prog1 --socket-mem 512 \
619--vdev=virtio_user0,path=/var/run/openvswitch/vhost-user2 / 527 --no-pci --vdev=virtio_user0,path=/var/run/openvswitch/vhost-user2 \
620-d /usr/lib/librte_pmd_virtio.so.1.1 -- --burst 64 --disable-hw-vlan / 528 -d /usr/lib/librte_pmd_virtio.so.1.1 -- --burst 64 --disable-hw-vlan \
621--disable-rss -i --portmask=0x1 --coremask=0x2 --nb-cores=1 --rxq=1 / 529 --disable-rss -i --portmask=0x1 --coremask=0x20 --nb-cores=1 --rxq=1 \
622--txq=1 --txd=512 --rxd=512 --txqflags=0xf00 --port-topology=chained</programlisting>In 530 --txq=1 --txd=512 --rxd=512 --txqflags=0xf00 --port-topology=chained</programlisting>In
623 the testpmd shell, run:<programlisting>start</programlisting>Start 531 the testpmd shell, run:<programlisting>start</programlisting>Start
624 pktgen traffic by running the following command in pktgen 532 pktgen traffic by running the following command in pktgen
625 CLI:<programlisting>start 0</programlisting>To record traffic 533 CLI:<programlisting>start 0</programlisting>To record traffic
626 results:<programlisting>show port stats 0</programlisting>This 534 results, run:<programlisting>show port stats 0</programlisting></para>
627 should be used in testpmd applications.</para>
628 535
629 <table> 536 <table>
630 <title>Results</title> 537 <title>Results</title>
631 538
632 <tgroup cols="5"> 539 <tgroup cols="6">
633 <tbody> 540 <tbody>
634 <row> 541 <row>
635 <entry align="center"><emphasis 542 <entry align="center"><emphasis
@@ -646,183 +553,99 @@ ovs-ofctl add-flow ovsbr0 in_port=2,action=output:1</programlisting>Import a
646 553
647 <entry align="center"><emphasis role="bold">Target 2 - 554 <entry align="center"><emphasis role="bold">Target 2 -
648 (termination) testpmd pps RX</emphasis></entry> 555 (termination) testpmd pps RX</emphasis></entry>
556
557 <entry align="center"><emphasis role="bold">Throughput
558 (%)</emphasis></entry>
649 </row> 559 </row>
650 560
651 <row> 561 <row>
652 <entry role="bold"><emphasis 562 <entry role="bold"><emphasis
653 role="bold">64</emphasis></entry> 563 role="bold">64</emphasis></entry>
654 564
655 <entry>14877713</entry> 565 <entry>14683140</entry>
566
567 <entry>1979807</entry>
656 568
657 <entry>5031270</entry> 569 <entry>1366712</entry>
658 570
659 <entry>5031214</entry> 571 <entry>1366690</entry>
660 572
661 <entry>5031346</entry> 573 <entry>9.31%</entry>
662 </row> 574 </row>
663 575
664 <row> 576 <row>
665 <entry><emphasis role="bold">128</emphasis></entry> 577 <entry><emphasis role="bold">128</emphasis></entry>
666 578
667 <entry>8441271</entry> 579 <entry>8446005</entry>
668 580
669 <entry>4670165</entry> 581 <entry>1893514</entry>
670 582
671 <entry>4670165</entry> 583 <entry>1286628</entry>
672 584
673 <entry>4670261</entry> 585 <entry>1286621</entry>
586
587 <entry>15.23%</entry>
674 </row> 588 </row>
675 589
676 <row> 590 <row>
677 <entry role="bold"><emphasis 591 <entry role="bold"><emphasis
678 role="bold">256</emphasis></entry> 592 role="bold">256</emphasis></entry>
679 593
680 <entry>4528844</entry> 594 <entry>4529011</entry>
595
596 <entry>1716427</entry>
681 597
682 <entry>4490268</entry> 598 <entry>1140234</entry>
683 599
684 <entry>4490268</entry> 600 <entry>1140232</entry>
685 601
686 <entry>4490234</entry> 602 <entry>25.18%</entry>
687 </row> 603 </row>
688 604
689 <row> 605 <row>
690 <entry><emphasis role="bold">512</emphasis></entry> 606 <entry><emphasis role="bold">512</emphasis></entry>
691 607
692 <entry>2349458</entry> 608 <entry>2349638</entry>
693
694 <entry>2349553</entry>
695 609
696 <entry>2349553</entry> 610 <entry>1556898</entry>
697 611
698 <entry>2349545</entry> 612 <entry>1016661</entry>
699 </row>
700 </tbody>
701 </tgroup>
702 </table>
703 </section>
704 </section>
705
706 <section id="pxe-config-docker">
707 <title>SR-IOV in in Docker</title>
708
709 <para>PCI passthrough tests using pktgen and testpmd in Docker.</para>
710
711 <para>pktgen[DPDK]Docker - PHY - Docker[DPDK] testpmd</para>
712
713 <para>Measurements:</para>
714
715 <itemizedlist>
716 <listitem>
717 <para>RX packets per second in testpmd (with testpmd configured in
718 rxonly mode).</para>
719 </listitem>
720 </itemizedlist>
721
722 <section id="target-setup">
723 <title>Test Setup</title>
724
725 <para>Boot Enea NFV Access from SSD:<programlisting>linux (hd0,gpt3)/boot/bzImage root=/dev/sda3 ip=dhcp nohz_full=1-7 /
726isolcpus=1-7 rcu-nocbs=1-7 rcu_nocb_poll intel_pstate=disable clocksource=tsc /
727tsc=reliable nohpet nosoftlockup intel_idle.max_cstate=0 processor.max_cstate=0 /
728mce=ignore_ce audit=0 nmi_watchdog=0 iommu=pt intel_iommu=on hugepagesz=1GB /
729hugepages=8 default_hugepagesz=1GB hugepagesz=2M hugepages=2048 /
730vfio_iommu_type1.allow_unsafe_interrupts=1l</programlisting>Allow unsafe
731 interrupts:<programlisting>echo 1 &gt; /sys/module/vfio_iommu_type1/parameters/allow_unsafe_interrupts</programlisting>Configure
732 DPDK:<programlisting>mkdir -p /mnt/huge
733mount -t hugetlbfs nodev /mnt/huge
734dpdk-devbind.py --bind=ixgbe 0000:03:00.0
735ifconfig eno3 192.168.1.2
736echo 2 &gt; /sys/class/net/eno3/device/sriov_numvfs
737modprobe vfio-pci
738dpdk-devbind.py --bind=vfio-pci 0000:03:10.0
739dpdk-devbind.py --bind=vfio-pci 0000:03:10.2</programlisting>Start two docker
740 containers:<programlisting>docker run --privileged -it --rm -v /mnt/huge:/mnt/huge/ /
741--device /dev/vfio/vfio el7_guest /bin/bash
742docker run --privileged -it --rm -v /mnt/huge:/mnt/huge/ /
743--device /dev/vfio/vfio el7_guest /bin/bash</programlisting>In the first
744 container start pktgen:<programlisting>cd /usr/share/apps/pktgen/
745./pktgen -c 0x1f -w 0000:03:10.0 -n 1 --file-prefix pg1 /
746--socket-mem 1024 -- -P -m "[3:4].0"</programlisting>In the pktgen prompt set
747 the destination MAC address:<programlisting>set mac 0 XX:XX:XX:XX:XX:XX
748str</programlisting>In the second container start testpmd:<programlisting>testpmd -c 0x7 -n 1 -w 0000:03:10.2 -- -i --portmask=0x1 /
749--txd=256 --rxd=256 --port-topology=chained</programlisting>In the testpmd
750 prompt set <emphasis role="bold">forwarding</emphasis>
751 rxonly:<programlisting>set fwd rxonly
752start</programlisting><table>
753 <title>Results</title>
754
755 <tgroup cols="5">
756 <tbody>
757 <row>
758 <entry align="center"><emphasis
759 role="bold">Bytes</emphasis></entry>
760
761 <entry align="center"><emphasis role="bold">pktgen pps
762 TX</emphasis></entry>
763
764 <entry align="center"><emphasis role="bold">testpmd pps
765 RX</emphasis></entry>
766
767 <entry align="center"><emphasis role="bold">pktgen MBits/s
768 TX</emphasis></entry>
769 613
770 <entry align="center"><emphasis role="bold">throughput 614 <entry>1016659</entry>
771 (%)</emphasis></entry>
772 </row>
773 615
774 <row> 616 <entry>43.27%</entry>
775 <entry role="bold"><emphasis 617 </row>
776 role="bold">64</emphasis></entry>
777
778 <entry>14204211</entry>
779
780 <entry>14204561</entry>
781
782 <entry>9545</entry>
783
784 <entry>100</entry>
785 </row>
786
787 <row>
788 <entry><emphasis role="bold">128</emphasis></entry>
789 618
790 <entry>8440340</entry> 619 <row>
620 <entry><emphasis role="bold">1024</emphasis></entry>
791 621
792 <entry>8440201</entry> 622 <entry>1197326</entry>
793 623
794 <entry>9993</entry> 624 <entry>1197319</entry>
795 625
796 <entry>99.9</entry> 626 <entry>869654</entry>
797 </row>
798 627
799 <row> 628 <entry>869652</entry>
800 <entry role="bold"><emphasis
801 role="bold">256</emphasis></entry>
802 629
803 <entry>4533828</entry> 630 <entry>72.63%</entry>
631 </row>
804 632
805 <entry>4533891</entry> 633 <row>
634 <entry><emphasis role="bold">1500</emphasis></entry>
806 635
807 <entry>10010</entry> 636 <entry>822373</entry>
808 637
809 <entry>100</entry> 638 <entry>822369</entry>
810 </row>
811 639
812 <row> 640 <entry>760826</entry>
813 <entry><emphasis role="bold">512</emphasis></entry>
814 641
815 <entry>2349886</entry> 642 <entry>760821</entry>
816 643
817 <entry>2349715</entry> 644 <entry>92.52%</entry>
818 645 </row>
819 <entry>10000</entry> 646 </tbody>
820 647 </tgroup>
821 <entry>99.9</entry> 648 </table>
822 </row>
823 </tbody>
824 </tgroup>
825 </table></para>
826 </section> 649 </section>
827 </section> 650 </section>
828 </section> 651 </section>
@@ -836,16 +659,16 @@ start</programlisting><table>
836 <para>Benchmarking traffic (UDP) forwarding and termination using 659 <para>Benchmarking traffic (UDP) forwarding and termination using
837 testpmd in a virtual machine.</para> 660 testpmd in a virtual machine.</para>
838 661
839 <para>The Pktgen application is used to generate traffic that will 662 <para>The pktgen application is used to generate traffic that will
840 reach testpmd running on a virtual machine, and be forwarded back to 663 reach testpmd running in a virtual machine, from where it will be
841 source on the return trip. With the same setup a second measurement 664 forwarded back to source. Within the same setup, a second measurement
842 will be done with traffic termination in the virtual machine.</para> 665 will be done with traffic termination in the virtual machine.</para>
843 666
844 <para>This test case measures:</para> 667 <para>This test case measures:</para>
845 668
846 <itemizedlist> 669 <itemizedlist>
847 <listitem> 670 <listitem>
848 <para>pktgen TX, RX in packets per second (pps) and Mbps</para> 671 <para>pktgen TX, RX in packets per second (pps) and MBps</para>
849 </listitem> 672 </listitem>
850 673
851 <listitem> 674 <listitem>
@@ -864,24 +687,20 @@ start</programlisting><table>
864 687
865 <para>Start with the steps below:</para> 688 <para>Start with the steps below:</para>
866 689
867 <para>SSD boot using the following <literal>grub.cfg 690 <para>Boot the board using the following U-Boot commands:
868 </literal>entry: <programlisting>linux (hd0,gpt3)/boot/bzImage root=/dev/sda3 ip=dhcp nohz_full=1-7 / 691 <programlisting>setenv boot_board 'setenv userbootparams nohz_full=1-23 isolcpus=1-23 \
869isolcpus=1-7 rcu-nocbs=1-7 rcu_nocb_poll intel_pstate=disable / 692rcu-nocbs=1-23 rcu_nocb_poll clocksource=tsc tsc=reliable nohpet \
870clocksource=tsc tsc=reliable nohpet nosoftlockup intel_idle.max_cstate=0 / 693nosoftlockup audit=0 nmi_watchdog=0; setenv satapart 2; run bootsata'
871processor.max_cstate=0 mce=ignore_ce audit=0 nmi_watchdog=0 iommu=pt / 694run boot_board</programlisting></para>
872intel_iommu=on hugepagesz=1GB hugepages=8 default_hugepagesz=1GB /
873hugepagesz=2M hugepages=2048 vfio_iommu_type1.allow_unsafe_interrupts=1</programlisting></para>
874 695
875 <para>Kill unnecessary services: <programlisting>killall ovsdb-server ovs-vswitchd 696 <para>Configure hugepages and set up DPDK:<programlisting>echo 4 &gt; /proc/sys/vm/nr_hugepages
876rm -rf /etc/openvswitch/* 697modprobe vfio-pci
877mkdir -p /var/run/openvswitch</programlisting>Configure DPDK:<programlisting>mkdir -p /mnt/huge 698ifconfig enP1p1s0f1 down
878mount -t hugetlbfs nodev /mnt/huge 699dpdk-devbind -b vfio-pci 0001:01:00.1</programlisting>Run
879modprobe igb_uio
880dpdk-devbind --bind=igb_uio 0000:03:00.0</programlisting>Run
881 pktgen:<programlisting>cd /usr/share/apps/pktgen/ 700 pktgen:<programlisting>cd /usr/share/apps/pktgen/
882./pktgen -c 0x7 -n 4 --proc-type auto --socket-mem 256 / 701./pktgen -v -c 0x7 -n 4 --proc-type auto -d /usr/lib/librte_pmd_thunderx_nicvf.so.1.1 \
883-w 0000:03:00.0 -- -P -m "[1:2].0"</programlisting>Set pktgen frame size to 702-w 0001:01:00.1 -- -P -m "[1:2].0"</programlisting>Choose one of the values
884 use from [64, 128, 256, 512]:<programlisting>set 0 size 64</programlisting></para> 703 from [64, 128, 256, 512] to change the packet size:<programlisting>set 0 size &lt;number&gt;</programlisting></para>
885 </section> 704 </section>
886 705
887 <section id="targettwo-usecasefive"> 706 <section id="targettwo-usecasefive">
@@ -889,76 +708,137 @@ dpdk-devbind --bind=igb_uio 0000:03:00.0</programlisting>Run
889 708
890 <para>Start by following the steps below:</para> 709 <para>Start by following the steps below:</para>
891 710
892 <para>SSD boot using the following <literal>grub.cfg</literal> 711 <para>Boot the board using the following U-Boot commands:
893 entry: <programlisting>linux (hd0,gpt3)/boot/bzImage root=/dev/sda3 ip=dhcp nohz_full=1-7 / 712 <programlisting>setenv boot_board 'setenv userbootparams nohz_full=1-23 isolcpus=1-23 \
894isolcpus=1-7 rcu-nocbs=1-7 rcu_nocb_poll intel_pstate=disable / 713rcu-nocbs=1-23 rcu_nocb_poll clocksource=tsc tsc=reliable nohpet \
895clocksource=tsc tsc=reliable nohpet nosoftlockup intel_idle.max_cstate=0 / 714nosoftlockup audit=0 nmi_watchdog=0; setenv satapart 2; run bootsata'
896processor.max_cstate=0 mce=ignore_ce audit=0 nmi_watchdog=0 iommu=pt / 715run boot_board</programlisting>Kill unnecessary services: <programlisting>killall ovsdb-server ovs-vswitchd
897intel_iommu=on hugepagesz=1GB hugepages=8 default_hugepagesz=1GB /
898hugepagesz=2M hugepages=2048 vfio_iommu_type1.allow_unsafe_interrupts=1</programlisting>Kill
899 unnecessary services: <programlisting>killall ovsdb-server ovs-vswitchd
900rm -rf /etc/openvswitch/* 716rm -rf /etc/openvswitch/*
901mkdir -p /var/run/openvswitch</programlisting>Configure DPDK:<programlisting>mkdir -p /mnt/huge 717rm -rf /var/run/openvswitch/*
902mount -t hugetlbfs nodev /mnt/huge 718mkdir -p /var/run/openvswitch</programlisting>Configure hugepages, set up
903modprobe igb_uio 719 DPDK:<programlisting>echo 20 &gt; /proc/sys/vm/nr_hugepages
904dpdk-devbind --bind=igb_uio 0000:03:00.0</programlisting>Configure 720modprobe vfio-pci
721dpdk-devbind --bind=vfio-pci 0001:01:00.1</programlisting>Configure
905 OVS:<programlisting>export DB_SOCK=/var/run/openvswitch/db.sock 722 OVS:<programlisting>export DB_SOCK=/var/run/openvswitch/db.sock
906ovsdb-tool create /etc/openvswitch/conf.db / 723ovsdb-tool create /etc/openvswitch/conf.db /usr/share/openvswitch/vswitch.ovsschema
907/usr/share/openvswitch/vswitch.ovsschema 724ovsdb-server --remote=punix:$DB_SOCK \
908ovsdb-server --remote=punix:$DB_SOCK / 725 --remote=db:Open_vSwitch,Open_vSwitch,manager_options --pidfile --detach
909--remote=db:Open_vSwitch,Open_vSwitch,manager_options --pidfile --detach
910ovs-vsctl --no-wait init 726ovs-vsctl --no-wait init
911ovs-vsctl --no-wait set Open_vSwitch . other_config:dpdk-lcore-mask=0x10 727ovs-vsctl --no-wait set Open_vSwitch . other_config:dpdk-lcore-mask=0x10
912ovs-vsctl --no-wait set Open_vSwitch . other_config:pmd-cpu-mask=0xc 728ovs-vsctl --no-wait set Open_vSwitch . other_config:pmd-cpu-mask=0xc
913ovs-vsctl --no-wait set Open_vSwitch . other_config:dpdk-socket-mem=2048 729ovs-vsctl --no-wait set Open_vSwitch . other_config:dpdk-socket-mem=2048
914ovs-vsctl --no-wait set Open_vSwitch . other_config:dpdk-init=true 730ovs-vsctl --no-wait set Open_vSwitch . other_config:dpdk-init=true
915ovs-vswitchd unix:$DB_SOCK --pidfile --detach / 731ovs-vswitchd unix:$DB_SOCK --pidfile --detach \
916--log-file=/var/log/openvswitch/ovs-vswitchd.log 732 --log-file=/var/log/openvswitch/ovs-vswitchd.log
917 733
918ovs-vsctl add-br ovsbr0 -- set bridge ovsbr0 datapath_type=netdev 734ovs-vsctl add-br ovsbr0 -- set bridge ovsbr0 \
919ovs-vsctl add-port ovsbr0 vhost-user1 / 735 datapath_type=netdev
920-- set Interface vhost-user1 type=dpdkvhostuser -- set Interface / 736ovs-vsctl add-port ovsbr0 vhost-user1 -- set Interface \
921vhost-user1 ofport_request=2 737 vhost-user1 type=dpdkvhostuser -- set Interface vhost-user1 ofport_request=2
922ovs-vsctl add-port ovsbr0 dpdk0 -- set Interface dpdk0 / 738ovs-vsctl add-port ovsbr0 dpdk0 -- set Interface dpdk0 \
923type=dpdk options:dpdk-devargs=0000:03:00.0 / 739 type=dpdk options:dpdk-devargs=0001:01:00.1 -- set Interface dpdk0 ofport_request=1
924-- set Interface dpdk0 ofport_request=1
925chmod 777 /var/run/openvswitch/vhost-user1
926 740
927ovs-ofctl del-flows ovsbr0 741ovs-ofctl del-flows ovsbr0
928ovs-ofctl add-flow ovsbr0 in_port=1,action=output:2 742ovs-ofctl add-flow ovsbr0 in_port=1,action=output:2
929ovs-ofctl add-flow ovsbr0 in_port=2,action=output:1</programlisting>Launch 743ovs-ofctl add-flow ovsbr0 in_port=2,action=output:1</programlisting>Create an
930 QEMU:<programlisting>taskset -c 0,1 qemu-system-x86_64 -cpu host,+invtsc,migratable=no / 744 XML file with the content below (e.g.
931-M q35 -smp cores=2,sockets=1 -vcpu 0,affinity=0 -vcpu 1,affinity=1 / 745 /home/root/guest.xml):<programlisting>&lt;domain type='kvm' xmlns:qemu='http://libvirt.org/schemas/domain/qemu/1.0'&gt;
932-enable-kvm -nographic -realtime mlock=on -kernel /mnt/qemu/bzImage / 746 &lt;name&gt;nfv-ovs-vm&lt;/name&gt;
933-drive file=/mnt/qemu/enea-nfv-access-guest-qemux86-64.ext4,/ 747 &lt;uuid&gt;ed204646-1ad5-11e7-93ae-92361f002671&lt;/uuid&gt;
934if=virtio,format=raw -m 4096 -object memory-backend-file,id=mem,/ 748 &lt;memory unit='KiB'&gt;4194304&lt;/memory&gt;
935size=4096M,mem-path=/mnt/huge,share=on -numa node,memdev=mem / 749 &lt;currentMemory unit='KiB'&gt;4194304&lt;/currentMemory&gt;
936-mem-prealloc -chardev socket,id=char0,path=/var/run/openvswitch/vhost-user1 / 750
937-netdev type=vhost-user,id=mynet1,chardev=char0,vhostforce / 751 &lt;memoryBacking&gt;
938-device virtio-net-pci,mac=52:54:00:00:00:01,netdev=mynet1,/ 752 &lt;hugepages&gt;
939mrg_rxbuf=on,rx_queue_size=1024,csum=off,gso=off,guest_tso4=off,/ 753 &lt;page size='512' unit='M' nodeset='0'/&gt;
940guest_tso6=off,guest_ecn=off -append 'root=/dev/vda console=ttyS0 / 754 &lt;/hugepages&gt;
941hugepagesz=2M hugepages=1024 isolcpus=1 nohz_full=1 rcu_nocbs=1 / 755 &lt;/memoryBacking&gt;
942irqaffinity=0 rcu_nocb_poll intel_pstate=disable intel_idle.max_cstate=0 / 756
943processor.max_cstate=0 mce=ignore_ce audit=0'</programlisting>Inside QEMU, 757 &lt;os&gt;
944 configure DPDK: <programlisting>mkdir -p /mnt/huge 758 &lt;type arch='aarch64' machine='virt,gic_version=3'&gt;hvm&lt;/type&gt;
945mount -t hugetlbfs nodev /mnt/huge 759 &lt;kernel&gt;Image&lt;/kernel&gt;
946modprobe igb_uio 760 &lt;cmdline&gt;root=/dev/vda console=ttyAMA0,115200n8 maxcpus=24 coherent_pool=16M debug hugepagesz=512M hugepages=3 audit=0 isolcpus=1 nohz_full=1 rcu_nocbs=1 irqaffinity=0&lt;/cmdline&gt;
947dpdk-devbind --bind=igb_uio 0000:00:02.0</programlisting>Inside QEMU, run 761 &lt;boot dev='hd'/&gt;
948 testpmd: <programlisting>testpmd -c 0x3 -n 2 -d librte_pmd_virtio.so.1.1 / 762 &lt;/os&gt;
949-- --burst 64 --disable-hw-vlan --disable-rss -i --portmask=0x1 / 763
950--coremask=0x2 --nb-cores=1 --rxq=1 --txq=1 --txd=512 --rxd=512 / 764 &lt;features&gt;
951--txqflags=0xf00 --port-topology=chained</programlisting>For the <emphasis 765 &lt;acpi/&gt;
952 role="bold">Forwarding test</emphasis>, start testpmd 766 &lt;apic/&gt;
953 directly:<programlisting>start</programlisting>For the <emphasis 767 &lt;/features&gt;
954 role="bold">Termination test</emphasis>, set testpmd to only 768
955 receive, then start it:<programlisting>set fwd rxonly 769 &lt;vcpu placement='static'&gt;2&lt;/vcpu&gt;
770
771 &lt;cpu mode='host-model'&gt;
772 &lt;model fallback='allow'/&gt;
773 &lt;topology sockets='1' cores='2' threads='1'/&gt;
774 &lt;numa&gt;
775 &lt;cell id='0' cpus='0' memory='4194304' unit='KiB' memAccess='shared'/&gt;
776 &lt;/numa&gt;
777 &lt;/cpu&gt;
778
779 &lt;cputune&gt;
780 &lt;vcpupin vcpu="0" cpuset="4"/&gt;
781 &lt;vcpupin vcpu="1" cpuset="5"/&gt;
782 &lt;/cputune&gt;
783
784 &lt;on_poweroff&gt;destroy&lt;/on_poweroff&gt;
785 &lt;on_reboot&gt;restart&lt;/on_reboot&gt;
786 &lt;on_crash&gt;destroy&lt;/on_crash&gt;
787
788 &lt;devices&gt;
789 &lt;emulator&gt;/usr/bin/qemu-system-aarch64&lt;/emulator&gt;
790 &lt;disk type='file' device='disk'&gt;
791 &lt;driver name='qemu' type='raw' cache='none'/&gt;
792 &lt;source file='enea-nfv-access-guest-qemuarm64.ext4'/&gt;
793 &lt;target dev='vda' bus='virtio'/&gt;
794 &lt;/disk&gt;
795
796 &lt;serial type='pty'&gt;
797 &lt;target port='0'/&gt;
798 &lt;/serial&gt;
799
800 &lt;console type='pty'&gt;
801 &lt;target type='serial' port='0'/&gt;
802 &lt;/console&gt;
803 &lt;/devices&gt;
804
805 &lt;qemu:commandline&gt;
806 &lt;qemu:arg value='-chardev'/&gt;
807 &lt;qemu:arg value='socket,id=charnet0,path=/var/run/openvswitch/vhost-user1'/&gt;
808
809 &lt;qemu:arg value='-netdev'/&gt;
810 &lt;qemu:arg value='type=vhost-user,id=hostnet0,chardev=charnet0'/&gt;
811
812 &lt;qemu:arg value='-device'/&gt;
813 &lt;qemu:arg value='virtio-net-pci,netdev=hostnet0,id=net0,mac=00:00:00:00:00:01,bus=pcie.0,addr=0x2'/&gt;
814 &lt;/qemu:commandline&gt;
815&lt;/domain&gt;</programlisting></para>
816
817 <para>Start the virtual machine, by running:</para>
818
819 <para><programlisting>virsh create /home/root/guest.xml</programlisting></para>
820
821 <para>Connect to the virtual machines console:</para>
822
823 <para><programlisting>virsh console nfv-ovs-vm</programlisting></para>
824
825 <para>Inside the VM, configure DPDK: <programlisting>ifconfig enp0s2 down
826echo 1 &gt; /sys/module/vfio/parameters/enable_unsafe_noiommu_mode
827modprobe vfio-pci
828dpdk-devbind -b vfio-pci 0000:00:02.0</programlisting>Inside the VM, start
829 testpmd: <programlisting>testpmd -v -c 0x3 -n 4 -d /usr/lib/librte_pmd_virtio.so.1.1 \
830-w 0000:00:02.0 -- -i --disable-hw-vlan-filter --no-flush-rx \
831--port-topology=chained</programlisting>For the <emphasis
832 role="bold">Forwarding test</emphasis>, run:<programlisting>set fwd io
833start</programlisting>For the <emphasis role="bold">Termination
834 test</emphasis>, set testpmd to only receive, then start
835 it:<programlisting>set fwd rxonly
956start</programlisting>On target 1, you may start pktgen traffic 836start</programlisting>On target 1, you may start pktgen traffic
957 now:<programlisting>start 0</programlisting>On target 2, use this 837 now:<programlisting>start 0</programlisting>On target 2, use this
958 command to refresh the testpmd display and note the highest 838 command to refresh the testpmd display traffic
959 values:<programlisting>show port stats 0</programlisting>To stop 839 statistics:<programlisting>show port stats 0</programlisting>To stop
960 traffic from pktgen, in order to choose a different frame 840 generating traffic in order to choose a different frame size,
961 size:<programlisting>stop 0</programlisting>To clear numbers in 841 run:<programlisting>stop 0</programlisting>To clear numbers in
962 testpmd:<programlisting>clear port stats 842 testpmd:<programlisting>clear port stats
963show port stats 0</programlisting><table> 843show port stats 0</programlisting><table>
964 <title>Results in forwarding mode</title> 844 <title>Results in forwarding mode</title>
@@ -995,56 +875,92 @@ show port stats 0</programlisting><table>
995 <entry role="bold"><emphasis 875 <entry role="bold"><emphasis
996 role="bold">64</emphasis></entry> 876 role="bold">64</emphasis></entry>
997 877
998 <entry>7926325</entry> 878 <entry>1555163</entry>
999 879
1000 <entry>14877576</entry> 880 <entry>14686542</entry>
1001 881
1002 <entry>7926515</entry> 882 <entry>1978791</entry>
1003 883
1004 <entry>7926515</entry> 884 <entry>1554707</entry>
1005 885
1006 <entry>5326</entry> 886 <entry>1044</entry>
1007 887
1008 <entry>9997</entry> 888 <entry>9867</entry>
1009 889
1010 <entry>53.2</entry> 890 <entry>13.47%</entry>
1011 </row> 891 </row>
1012 892
1013 <row> 893 <row>
1014 <entry><emphasis role="bold">128</emphasis></entry> 894 <entry><emphasis role="bold">128</emphasis></entry>
1015 895
1016 <entry>7502802</entry> 896 <entry>1504275</entry>
1017 897
1018 <entry>8441253</entry> 898 <entry>8447999</entry>
1019 899
1020 <entry>7785983</entry> 900 <entry>1901468</entry>
1021 901
1022 <entry>7494959</entry> 902 <entry>1504266</entry>
1023 903
1024 <entry>8883</entry> 904 <entry>1781</entry>
1025 905
1026 <entry>9994</entry> 906 <entry>10000</entry>
1027 907
1028 <entry>88.8</entry> 908 <entry>22.51%</entry>
1029 </row> 909 </row>
1030 910
1031 <row> 911 <row>
1032 <entry role="bold"><emphasis 912 <entry role="bold"><emphasis
1033 role="bold">256</emphasis></entry> 913 role="bold">256</emphasis></entry>
1034 914
1035 <entry>4528631</entry> 915 <entry>1423564</entry>
1036 916
1037 <entry>4528782</entry> 917 <entry>4529012</entry>
1038 918
1039 <entry>4529515</entry> 919 <entry>1718299</entry>
1040 920
1041 <entry>4529515</entry> 921 <entry>1423553</entry>
1042 922
1043 <entry>9999</entry> 923 <entry>3142</entry>
924
925 <entry>10000</entry>
926
927 <entry>37.94%</entry>
928 </row>
929
930 <row>
931 <entry><emphasis role="bold">512</emphasis></entry>
932
933 <entry>1360379</entry>
934
935 <entry>2349636</entry>
936
937 <entry>1554844</entry>
938
939 <entry>1360456</entry>
940
941 <entry>5789</entry>
942
943 <entry>10000</entry>
944
945 <entry>66.17%</entry>
946 </row>
947
948 <row>
949 <entry><emphasis role="bold">1024</emphasis></entry>
950
951 <entry>1197327</entry>
952
953 <entry>1197329</entry>
954
955 <entry>1197319</entry>
956
957 <entry>1197329</entry>
1044 958
1045 <entry>9999</entry> 959 <entry>9999</entry>
1046 960
1047 <entry>99.9</entry> 961 <entry>10000</entry>
962
963 <entry>100.00%</entry>
1048 </row> 964 </row>
1049 </tbody> 965 </tbody>
1050 </tgroup> 966 </tgroup>
@@ -1074,38 +990,62 @@ show port stats 0</programlisting><table>
1074 <entry role="bold"><emphasis 990 <entry role="bold"><emphasis
1075 role="bold">64</emphasis></entry> 991 role="bold">64</emphasis></entry>
1076 992
1077 <entry>14877764</entry> 993 <entry>14695621</entry>
1078 994
1079 <entry>8090855</entry> 995 <entry>1983227</entry>
1080 996
1081 <entry>9997</entry> 997 <entry>9875</entry>
1082 998
1083 <entry>54.3</entry> 999 <entry>13.50%</entry>
1084 </row> 1000 </row>
1085 1001
1086 <row> 1002 <row>
1087 <entry><emphasis role="bold">128</emphasis></entry> 1003 <entry><emphasis role="bold">128</emphasis></entry>
1088 1004
1089 <entry>8441309</entry> 1005 <entry>8446022</entry>
1006
1007 <entry>1897546</entry>
1008
1009 <entry>10000</entry>
1010
1011 <entry>22.47%</entry>
1012 </row>
1013
1014 <row>
1015 <entry><emphasis role="bold">256</emphasis></entry>
1016
1017 <entry>4529011</entry>
1090 1018
1091 <entry>8082971</entry> 1019 <entry>1724323</entry>
1092 1020
1093 <entry>9994</entry> 1021 <entry>10000</entry>
1094 1022
1095 <entry>95.7</entry> 1023 <entry>38.07%</entry>
1024 </row>
1025
1026 <row>
1027 <entry><emphasis role="bold">512</emphasis></entry>
1028
1029 <entry>2349638</entry>
1030
1031 <entry>1562212</entry>
1032
1033 <entry>10000</entry>
1034
1035 <entry>66.49%</entry>
1096 </row> 1036 </row>
1097 1037
1098 <row> 1038 <row>
1099 <entry role="bold"><emphasis 1039 <entry role="bold"><emphasis
1100 role="bold">256</emphasis></entry> 1040 role="bold">1024</emphasis></entry>
1101 1041
1102 <entry>4528867</entry> 1042 <entry>1197323</entry>
1103 1043
1104 <entry>4528780</entry> 1044 <entry>1197324</entry>
1105 1045
1106 <entry>9999</entry> 1046 <entry>10000</entry>
1107 1047
1108 <entry>99.9</entry> 1048 <entry>100.00%</entry>
1109 </row> 1049 </row>
1110 </tbody> 1050 </tbody>
1111 </tgroup> 1051 </tgroup>
@@ -1123,7 +1063,7 @@ show port stats 0</programlisting><table>
1123 1063
1124 <itemizedlist> 1064 <itemizedlist>
1125 <listitem> 1065 <listitem>
1126 <para>pktgen TX in pps and Mbits/s</para> 1066 <para>pktgen TX in pps and MBps</para>
1127 </listitem> 1067 </listitem>
1128 1068
1129 <listitem> 1069 <listitem>
@@ -1135,9 +1075,9 @@ show port stats 0</programlisting><table>
1135 </listitem> 1075 </listitem>
1136 1076
1137 <listitem> 1077 <listitem>
1138 <para>throughput in percents, by dividing<emphasis role="bold"> 1078 <para>divide<emphasis role="bold"> VM2 testpmd RX pps</emphasis>
1139 VM2 testpmd RX pps</emphasis> by <emphasis role="bold">pktgen TX 1079 by <emphasis role="bold">pktgen TX pps </emphasis>to obtain the
1140 pps</emphasis></para> 1080 throughput in percentages (%)</para>
1141 </listitem> 1081 </listitem>
1142 </itemizedlist> 1082 </itemizedlist>
1143 1083
@@ -1146,23 +1086,19 @@ show port stats 0</programlisting><table>
1146 1086
1147 <para>Start by doing the following:</para> 1087 <para>Start by doing the following:</para>
1148 1088
1149 <para>SSD boot using the following <literal>grub.cfg</literal> 1089 <para>Boot the board using the following U-Boot commands:
1150 entry: <programlisting>linux (hd0,gpt3)/boot/bzImage root=/dev/sda3 ip=dhcp nohz_full=1-7 / 1090 <programlisting>setenv boot_board 'setenv userbootparams nohz_full=1-23 isolcpus=1-23 \
1151isolcpus=1-7 rcu-nocbs=1-7 rcu_nocb_poll intel_pstate=disable / 1091rcu-nocbs=1-23 rcu_nocb_poll clocksource=tsc tsc=reliable nohpet \
1152clocksource=tsc tsc=reliable nohpet nosoftlockup intel_idle.max_cstate=0 / 1092nosoftlockup audit=0 nmi_watchdog=0; setenv satapart 2; run bootsata'
1153processor.max_cstate=0 mce=ignore_ce audit=0 nmi_watchdog=0 iommu=pt / 1093run boot_board</programlisting>Configure hugepages and set up
1154intel_iommu=on hugepagesz=1GB hugepages=8 default_hugepagesz=1GB / 1094 DPDK:<programlisting>echo 4 &gt; /proc/sys/vm/nr_hugepages
1155hugepagesz=2M hugepages=2048 vfio_iommu_type1.allow_unsafe_interrupts=1</programlisting>Kill 1095modprobe vfio-pci
1156 Services:<programlisting>killall ovsdb-server ovs-vswitchd 1096ifconfig enP1p1s0f1 down
1157rm -rf /etc/openvswitch/* 1097dpdk-devbind -b vfio-pci 0001:01:00.1</programlisting>Run
1158mkdir -p /var/run/openvswitch</programlisting>Configure DPDK:<programlisting>mkdir -p /mnt/huge
1159mount -t hugetlbfs nodev /mnt/huge
1160modprobe igb_uio
1161dpdk-devbind --bind=igb_uio 0000:03:00.0</programlisting>Run
1162 pktgen:<programlisting>cd /usr/share/apps/pktgen/ 1098 pktgen:<programlisting>cd /usr/share/apps/pktgen/
1163./pktgen -c 0x7 -n 4 --proc-type auto --socket-mem 256 / 1099./pktgen -v -c 0x7 -n 4 --proc-type auto -d /usr/lib/librte_pmd_thunderx_nicvf.so.1.1 \
1164-w 0000:03:00.0 -- -P -m "[1:2].0"</programlisting>Set pktgen frame size to 1100-w 0001:01:00.1 -- -P -m "[1:2].0"</programlisting>Choose one of the values
1165 use from [64, 128, 256, 512]:<programlisting>set 0 size 64</programlisting></para> 1101 from [64, 128, 256, 512] to change the packet size:<programlisting>set 0 size &lt;number&gt;</programlisting></para>
1166 </section> 1102 </section>
1167 1103
1168 <section id="targettwo-usecase-six"> 1104 <section id="targettwo-usecase-six">
@@ -1179,83 +1115,210 @@ intel_iommu=on hugepagesz=1GB hugepages=8 default_hugepagesz=1GB /
1179hugepagesz=2M hugepages=2048 vfio_iommu_type1.allow_unsafe_interrupts=1</programlisting>Kill 1115hugepagesz=2M hugepages=2048 vfio_iommu_type1.allow_unsafe_interrupts=1</programlisting>Kill
1180 Services:<programlisting>killall ovsdb-server ovs-vswitchd 1116 Services:<programlisting>killall ovsdb-server ovs-vswitchd
1181rm -rf /etc/openvswitch/* 1117rm -rf /etc/openvswitch/*
1182mkdir -p /var/run/openvswitch</programlisting>Configure DPDK:<programlisting>mkdir -p /mnt/huge 1118mkdir -p /var/run/openvswitch</programlisting>Configure hugepages, set up
1183mount -t hugetlbfs nodev /mnt/huge 1119 DPDK:<programlisting>echo 20 &gt; /proc/sys/vm/nr_hugepages
1184modprobe igb_uio 1120modprobe vfio-pci
1185dpdk-devbind --bind=igb_uio 0000:03:00.0</programlisting>Configure 1121dpdk-devbind --bind=vfio-pci 0001:01:00.1</programlisting>Configure
1186 OVS:<programlisting>export DB_SOCK=/var/run/openvswitch/db.sock 1122 OVS:<programlisting>export DB_SOCK=/var/run/openvswitch/db.sock
1187ovsdb-tool create /etc/openvswitch/conf.db / 1123ovsdb-tool create /etc/openvswitch/conf.db /usr/share/openvswitch/vswitch.ovsschema
1188/usr/share/openvswitch/vswitch.ovsschema 1124ovsdb-server --remote=punix:$DB_SOCK --remote=db:Open_vSwitch,Open_vSwitch,manager_options \
1189ovsdb-server --remote=punix:$DB_SOCK / 1125 --pidfile --detach
1190--remote=db:Open_vSwitch,Open_vSwitch,manager_options --pidfile --detach
1191ovs-vsctl --no-wait init 1126ovs-vsctl --no-wait init
1192ovs-vsctl --no-wait set Open_vSwitch . other_config:dpdk-lcore-mask=0x10 1127ovs-vsctl --no-wait set Open_vSwitch . other_config:dpdk-lcore-mask=0x10
1193ovs-vsctl --no-wait set Open_vSwitch . other_config:pmd-cpu-mask=0xc 1128ovs-vsctl --no-wait set Open_vSwitch . other_config:pmd-cpu-mask=0xc
1194ovs-vsctl --no-wait set Open_vSwitch . other_config:dpdk-socket-mem=2048 1129ovs-vsctl --no-wait set Open_vSwitch . other_config:dpdk-socket-mem=2048
1195ovs-vsctl --no-wait set Open_vSwitch . other_config:dpdk-init=true 1130ovs-vsctl --no-wait set Open_vSwitch . other_config:dpdk-init=true
1196ovs-vswitchd unix:$DB_SOCK --pidfile / 1131ovs-vswitchd unix:$DB_SOCK --pidfile --detach --log-file=/var/log/openvswitch/ovs-vswitchd.log
1197--detach --log-file=/var/log/openvswitch/ovs-vswitchd.log
1198
1199 1132
1200ovs-vsctl add-br ovsbr0 -- set bridge ovsbr0 datapath_type=netdev 1133ovs-vsctl add-br ovsbr0 -- set bridge ovsbr0 datapath_type=netdev
1201ovs-vsctl add-port ovsbr0 dpdk0 / 1134ovs-vsctl add-port ovsbr0 dpdk0 -- set Interface dpdk0 type=dpdk \
1202-- set Interface dpdk0 type=dpdk options:dpdk-devargs=0000:03:00.0 ofport_request=1 1135 options:dpdk-devargs=0001:01:00.1 -- set Interface dpdk0 ofport_request=1
1203ovs-vsctl add-port ovsbr0 vhost-user1 / 1136ovs-vsctl add-port ovsbr0 vhost-user1 -- set Interface vhost-user1 type=dpdkvhostuser \
1204-- set Interface vhost-user1 type=dpdkvhostuser ofport_request=2 1137 -- set Interface vhost-user1 ofport_request=2
1205ovs-vsctl add-port ovsbr0 vhost-user2 / 1138ovs-vsctl add-port ovsbr0 vhost-user2 -- set Interface vhost-user2 type=dpdkvhostuser \
1206-- set Interface vhost-user2 type=dpdkvhostuser ofport_request=3 1139 -- set Interface vhost-user2 ofport_request=3
1207
1208 1140
1209ovs-ofctl del-flows ovsbr0 1141ovs-ofctl del-flows ovsbr0
1210ovs-ofctl add-flow ovsbr0 in_port=1,action=output:2 1142ovs-ofctl add-flow ovsbr0 in_port=1,action=output:2
1211ovs-ofctl add-flow ovsbr0 in_port=2,action=output:3</programlisting>Launch 1143ovs-ofctl add-flow ovsbr0 in_port=2,action=output:3</programlisting>Create an
1212 first QEMU instance, VM1:<programlisting>taskset -c 0,1 qemu-system-x86_64 -cpu host,+invtsc,migratable=no -M q35 / 1144 XML with the content below and then run <command>virsh create
1213-smp cores=2,sockets=1 -vcpu 0,affinity=0 -vcpu 1,affinity=1 -enable-kvm / 1145 &lt;XML_FILE&gt;</command><programlisting>&lt;domain type='kvm' xmlns:qemu='http://libvirt.org/schemas/domain/qemu/1.0'&gt;
1214-nographic -realtime mlock=on -kernel /home/root/qemu/bzImage / 1146 &lt;name&gt;nfv-ovs-vm1&lt;/name&gt;
1215-drive file=/home/root/qemu/enea-nfv-access-guest-qemux86-64.ext4,/ 1147 &lt;uuid&gt;ed204646-1ad5-11e7-93ae-92361f002671&lt;/uuid&gt;
1216if=virtio,format=raw -m 2048 -object memory-backend-file,id=mem,/ 1148 &lt;memory unit='KiB'&gt;4194304&lt;/memory&gt;
1217size=2048M,mem-path=/mnt/huge,share=on -numa node,memdev=mem / 1149 &lt;currentMemory unit='KiB'&gt;4194304&lt;/currentMemory&gt;
1218-mem-prealloc -chardev socket,id=char0,path=/var/run/openvswitch/vhost-user1 / 1150
1219-netdev type=vhost-user,id=mynet1,chardev=char0,vhostforce / 1151 &lt;memoryBacking&gt;
1220-device virtio-net-pci,mac=52:54:00:00:00:01,netdev=mynet1,/ 1152 &lt;hugepages&gt;
1221mrg_rxbuf=on,rx_queue_size=1024,csum=off,gso=off,guest_tso4=off,/ 1153 &lt;page size='512' unit='M' nodeset='0'/&gt;
1222guest_tso6=off,guest_ecn=off -append 'root=/dev/vda console=ttyS0 / 1154 &lt;/hugepages&gt;
1223hugepagesz=2M hugepages=512 isolcpus=1 nohz_full=1 rcu_nocbs=1 / 1155 &lt;/memoryBacking&gt;
1224irqaffinity=0 rcu_nocb_poll intel_pstate=disable intel_idle.max_cstate=0 / 1156
1225processor.max_cstate=0 mce=ignore_ce audit=0'</programlisting>Connect to 1157 &lt;os&gt;
1226 Target 2 through a new SSH session and run a second QEMU instance 1158 &lt;type arch='aarch64' machine='virt,gic_version=3'&gt;hvm&lt;/type&gt;
1227 (to get its own console, separate from instance VM1). We shall call 1159 &lt;kernel&gt;Image&lt;/kernel&gt;
1228 this VM2:<programlisting>taskset -c 4,5 qemu-system-x86_64 -cpu host,+invtsc,migratable=no / 1160 &lt;cmdline&gt;root=/dev/vda console=ttyAMA0,115200n8 maxcpus=24 coherent_pool=16M debug hugepagesz=512M hugepages=3 audit=0 isolcpus=1 nohz_full=1 rcu_nocbs=1 irqaffinity=0&lt;/cmdline&gt;
1229-M q35 -smp cores=2,sockets=1 -vcpu 0,affinity=4 -vcpu 1,affinity=5 / 1161 &lt;boot dev='hd'/&gt;
1230-enable-kvm -nographic -realtime mlock=on -kernel /home/root/qemu2/bzImage / 1162 &lt;/os&gt;
1231-drive file=/home/root/qemu2/enea-nfv-access-guest-qemux86-64.ext4,/ 1163
1232if=virtio,format=raw -m 2048 -object memory-backend-file,id=mem,size=2048M,/ 1164 &lt;features&gt;
1233mem-path=/mnt/huge,share=on -numa node,memdev=mem -mem-prealloc / 1165 &lt;acpi/&gt;
1234-chardev socket,id=char1,path=/var/run/openvswitch/vhost-user2 / 1166 &lt;apic/&gt;
1235-netdev type=vhost-user,id=mynet1,chardev=char1,vhostforce / 1167 &lt;/features&gt;
1236-device virtio-net-pci,mac=52:54:00:00:00:02,netdev=mynet1,/ 1168
1237mrg_rxbuf=on,rx_queue_size=1024,csum=off,gso=off,guest_tso4=off,/ 1169 &lt;vcpu placement='static'&gt;2&lt;/vcpu&gt;
1238guest_tso6=off,guest_ecn=off -append 'root=/dev/vda console=ttyS0 / 1170
1239hugepagesz=2M hugepages=512 isolcpus=1 nohz_full=1 rcu_nocbs=1 / 1171 &lt;cpu mode='host-model'&gt;
1240irqaffinity=0 rcu_nocb_poll intel_pstate=disable intel_idle.max_cstate=0 / 1172 &lt;model fallback='allow'/&gt;
1241processor.max_cstate=0 mce=ignore_ce audit=0'</programlisting>Configure DPDK 1173 &lt;topology sockets='1' cores='2' threads='1'/&gt;
1242 inside VM1:<programlisting>mkdir -p /mnt/huge 1174 &lt;numa&gt;
1243mount -t hugetlbfs nodev /mnt/huge 1175 &lt;cell id='0' cpus='0' memory='4194304' unit='KiB' memAccess='shared'/&gt;
1244modprobe igb_uio 1176 &lt;/numa&gt;
1245dpdk-devbind --bind=igb_uio 0000:00:02.0</programlisting>Run testpmd inside 1177 &lt;/cpu&gt;
1246 VM1:<programlisting>testpmd -c 0x3 -n 2 -d librte_pmd_virtio.so.1.1 / 1178
1247-- --burst 64 --disable-hw-vlan --disable-rss -i / 1179 &lt;cputune&gt;
1248--portmask=0x1 --coremask=0x2 --nb-cores=1 --rxq=1 / 1180 &lt;vcpupin vcpu="0" cpuset="4"/&gt;
1249--txq=1 --txd=512 --rxd=512 --txqflags=0xf00 --port-topology=chained</programlisting>Start 1181 &lt;vcpupin vcpu="1" cpuset="5"/&gt;
1250 testpmd inside VM1:<programlisting>start</programlisting>Configure 1182 &lt;/cputune&gt;
1251 DPDK inside VM2:<programlisting>mkdir -p /mnt/huge 1183
1252mount -t hugetlbfs nodev /mnt/huge 1184 &lt;on_poweroff&gt;destroy&lt;/on_poweroff&gt;
1253modprobe igb_uio 1185 &lt;on_reboot&gt;restart&lt;/on_reboot&gt;
1254dpdk-devbind --bind=igb_uio 0000:00:02.0</programlisting>Run testpmd inside 1186 &lt;on_crash&gt;destroy&lt;/on_crash&gt;
1255 VM2:<programlisting>testpmd -c 0x3 -n 2 -d librte_pmd_virtio.so.1.1 / 1187
1256-- --burst 64 --disable-hw-vlan --disable-rss -i --portmask=0x1 / 1188 &lt;devices&gt;
1257--coremask=0x2 --nb-cores=1 --rxq=1 --txq=1 --txd=512 / 1189 &lt;emulator&gt;/usr/bin/qemu-system-aarch64&lt;/emulator&gt;
1258--rxd=512 --txqflags=0xf00 --port-topology=chained</programlisting>Set VM2 for 1190 &lt;disk type='file' device='disk'&gt;
1191 &lt;driver name='qemu' type='raw' cache='none'/&gt;
1192 &lt;source file='enea-nfv-access-guest-qemuarm64.ext4'/&gt;
1193 &lt;target dev='vda' bus='virtio'/&gt;
1194 &lt;/disk&gt;
1195
1196 &lt;serial type='pty'&gt;
1197 &lt;target port='0'/&gt;
1198 &lt;/serial&gt;
1199
1200 &lt;console type='pty'&gt;
1201 &lt;target type='serial' port='0'/&gt;
1202 &lt;/console&gt;
1203 &lt;/devices&gt;
1204
1205 &lt;qemu:commandline&gt;
1206 &lt;qemu:arg value='-chardev'/&gt;
1207 &lt;qemu:arg value='socket,id=charnet0,path=/var/run/openvswitch/vhost-user1'/&gt;
1208
1209 &lt;qemu:arg value='-netdev'/&gt;
1210 &lt;qemu:arg value='type=vhost-user,id=hostnet0,chardev=charnet0'/&gt;
1211
1212 &lt;qemu:arg value='-device'/&gt;
1213 &lt;qemu:arg value='virtio-net-pci,netdev=hostnet0,id=net0,mac=00:00:00:00:00:01,bus=pcie.0,addr=0x2'/&gt;
1214 &lt;/qemu:commandline&gt;
1215&lt;/domain&gt;
1216</programlisting></para>
1217
1218 <para>Connect to the first virtual machines console, by
1219 running:</para>
1220
1221 <para><programlisting>virsh console nfv-ovs-vm1</programlisting></para>
1222
1223 <para>The first virtual machine shall be called VM1.</para>
1224
1225 <para>Connect to Target 2 through a new SSH session and run launch a
1226 second VM by creating another XML file and running <command>virsh
1227 create &lt;XML_FILE2&gt;</command><programlisting>&lt;domain type='kvm' xmlns:qemu='http://libvirt.org/schemas/domain/qemu/1.0'&gt;
1228 &lt;name&gt;nfv-ovs-vm2&lt;/name&gt;
1229 &lt;uuid&gt;ed204646-1ad5-11e7-93ae-92361f002623&lt;/uuid&gt;
1230 &lt;memory unit='KiB'&gt;4194304&lt;/memory&gt;
1231 &lt;currentMemory unit='KiB'&gt;4194304&lt;/currentMemory&gt;
1232
1233 &lt;memoryBacking&gt;
1234 &lt;hugepages&gt;
1235 &lt;page size='512' unit='M' nodeset='0'/&gt;
1236 &lt;/hugepages&gt;
1237 &lt;/memoryBacking&gt;
1238
1239 &lt;os&gt;
1240 &lt;type arch='aarch64' machine='virt,gic_version=3'&gt;hvm&lt;/type&gt;
1241 &lt;kernel&gt;Image&lt;/kernel&gt;
1242 &lt;cmdline&gt;root=/dev/vda console=ttyAMA0,115200n8 maxcpus=24 coherent_pool=16M debug hugepagesz=512M hugepages=3 audit=0 isolcpus=1 nohz_full=1 rcu_nocbs=1 irqaffinity=0&lt;/cmdline&gt;
1243 &lt;boot dev='hd'/&gt;
1244 &lt;/os&gt;
1245
1246 &lt;features&gt;
1247 &lt;acpi/&gt;
1248 &lt;apic/&gt;
1249 &lt;/features&gt;
1250
1251 &lt;vcpu placement='static'&gt;2&lt;/vcpu&gt;
1252
1253 &lt;cpu mode='host-model'&gt;
1254 &lt;model fallback='allow'/&gt;
1255 &lt;topology sockets='1' cores='2' threads='1'/&gt;
1256 &lt;numa&gt;
1257 &lt;cell id='0' cpus='0' memory='4194304' unit='KiB' memAccess='shared'/&gt;
1258 &lt;/numa&gt;
1259 &lt;/cpu&gt;
1260
1261 &lt;cputune&gt;
1262 &lt;vcpupin vcpu="0" cpuset="6"/&gt;
1263 &lt;vcpupin vcpu="1" cpuset="7"/&gt;
1264 &lt;/cputune&gt;
1265
1266 &lt;on_poweroff&gt;destroy&lt;/on_poweroff&gt;
1267 &lt;on_reboot&gt;restart&lt;/on_reboot&gt;
1268 &lt;on_crash&gt;destroy&lt;/on_crash&gt;
1269
1270 &lt;devices&gt;
1271 &lt;emulator&gt;/usr/bin/qemu-system-aarch64&lt;/emulator&gt;
1272 &lt;disk type='file' device='disk'&gt;
1273 &lt;driver name='qemu' type='raw' cache='none'/&gt;
1274 &lt;source file='enea-nfv-access-guest-qemuarm64.ext4'/&gt;
1275 &lt;target dev='vda' bus='virtio'/&gt;
1276 &lt;/disk&gt;
1277
1278 &lt;serial type='pty'&gt;
1279 &lt;target port='0'/&gt;
1280 &lt;/serial&gt;
1281
1282 &lt;console type='pty'&gt;
1283 &lt;target type='serial' port='0'/&gt;
1284 &lt;/console&gt;
1285 &lt;/devices&gt;
1286
1287 &lt;qemu:commandline&gt;
1288 &lt;qemu:arg value='-chardev'/&gt;
1289 &lt;qemu:arg value='socket,id=charnet1,path=/var/run/openvswitch/vhost-user2'/&gt;
1290
1291 &lt;qemu:arg value='-netdev'/&gt;
1292 &lt;qemu:arg value='type=vhost-user,id=hostnet0,chardev=charnet1'/&gt;
1293
1294 &lt;qemu:arg value='-device'/&gt;
1295 &lt;qemu:arg value='virtio-net-pci,netdev=hostnet0,id=net0,mac=00:00:00:00:00:02,bus=pcie.0,addr=0x2'/&gt;
1296 &lt;/qemu:commandline&gt;
1297&lt;/domain&gt;
1298</programlisting></para>
1299
1300 <para>Connect to the second virtual machines console, by
1301 running:</para>
1302
1303 <para><programlisting>virsh console nfv-ovs-vm2</programlisting></para>
1304
1305 <para>The second virtual machine shall be called VM2.</para>
1306
1307 <para>Configure DPDK inside VM1:<programlisting>ifconfig enp0s2 down
1308echo 1 &gt; /sys/module/vfio/parameters/enable_unsafe_noiommu_mode
1309modprobe vfio-pci
1310dpdk-devbind -b vfio-pci 0000:00:02.0</programlisting>Run testpmd inside
1311 VM1:<programlisting>testpmd -v -c 0x3 -n 4 -d /usr/lib/librte_pmd_virtio.so.1.1 \
1312 -w 0000:00:02.0 -- -i --disable-hw-vlan-filter \
1313 --no-flush-rx --port-topology=chained</programlisting>Start testpmd inside
1314 VM1:<programlisting>start</programlisting>Configure DPDK inside
1315 VM2:<programlisting>ifconfig enp0s2 down
1316echo 1 &gt; /sys/module/vfio/parameters/enable_unsafe_noiommu_mode
1317modprobe vfio-pci
1318dpdk-devbind -b vfio-pci 0000:00:02.0</programlisting>Run testpmd inside
1319 VM2:<programlisting>testpmd -v -c 0x3 -n 4 -d /usr/lib/librte_pmd_virtio.so.1.1 \
1320 -w 0000:00:02.0 -- -i --disable-hw-vlan-filter \
1321 --no-flush-rx --port-topology=chained</programlisting>Set VM2 for
1259 termination and start testpmd:<programlisting>set fwd rxonly 1322 termination and start testpmd:<programlisting>set fwd rxonly
1260start</programlisting>On target 1, start pktgen traffic:<programlisting>start 0</programlisting>Use 1323start</programlisting>On target 1, start pktgen traffic:<programlisting>start 0</programlisting>Use
1261 this command to refresh testpmd display in VM1 and VM2 and note the 1324 this command to refresh testpmd display in VM1 and VM2 and note the
@@ -1326,312 +1389,88 @@ show port stats 0</programlisting>For VM1, we record the stats relevant for
1326 <entry role="bold"><emphasis 1389 <entry role="bold"><emphasis
1327 role="bold">64</emphasis></entry> 1390 role="bold">64</emphasis></entry>
1328 1391
1329 <entry>14877757</entry> 1392 <entry>14692306</entry>
1330 1393
1331 <entry>7712835</entry> 1394 <entry>1986888</entry>
1332 1395
1333 <entry>6024320</entry> 1396 <entry>1278884</entry>
1334 1397
1335 <entry>6015525</entry> 1398 <entry>1278792</entry>
1336 1399
1337 <entry>9997</entry> 1400 <entry>9870</entry>
1338 1401
1339 <entry>40.0</entry> 1402 <entry>8.70%</entry>
1340 </row> 1403 </row>
1341 1404
1342 <row> 1405 <row>
1343 <entry><emphasis role="bold">128</emphasis></entry> 1406 <entry><emphasis role="bold">128</emphasis></entry>
1344 1407
1345 <entry>8441333</entry> 1408 <entry>8445997</entry>
1346 1409
1347 <entry>7257432</entry> 1410 <entry>1910675</entry>
1348 1411
1349 <entry>5717540</entry> 1412 <entry>1205371</entry>
1350 1413
1351 <entry>5716752</entry> 1414 <entry>1205371</entry>
1352 1415
1353 <entry>9994</entry> 1416 <entry>10000</entry>
1354 1417
1355 <entry>67.7</entry> 1418 <entry>14.27%</entry>
1356 </row> 1419 </row>
1357 1420
1358 <row> 1421 <row>
1359 <entry role="bold"><emphasis 1422 <entry role="bold"><emphasis
1360 role="bold">256</emphasis></entry> 1423 role="bold">256</emphasis></entry>
1361 1424
1362 <entry>4528865</entry> 1425 <entry>4529126</entry>
1363 1426
1364 <entry>4528717</entry> 1427 <entry>1723468</entry>
1365 1428
1366 <entry>4528717</entry> 1429 <entry>1080976</entry>
1367 1430
1368 <entry>4528621</entry> 1431 <entry>1080977</entry>
1369 1432
1370 <entry>9999</entry> 1433 <entry>10000</entry>
1371 1434
1372 <entry>99.9</entry> 1435 <entry>23.87%</entry>
1373 </row> 1436 </row>
1374 </tbody>
1375 </tgroup>
1376 </table>
1377 </section>
1378 </section>
1379
1380 <section id="pxe-config-vm">
1381 <title>SR-IOV in Virtual Machines</title>
1382
1383 <para>PCI passthrough tests using pktgen and testpmd in virtual
1384 machines.</para>
1385 1437
1386 <para>pktgen[DPDK]VM - PHY - VM[DPDK] testpmd.</para> 1438 <row>
1387 1439 <entry><emphasis role="bold">512</emphasis></entry>
1388 <para>Measurements:</para>
1389
1390 <itemizedlist>
1391 <listitem>
1392 <para>pktgen to testpmd in <emphasis
1393 role="bold">forwarding</emphasis> mode.</para>
1394 </listitem>
1395
1396 <listitem>
1397 <para>pktgen to testpmd in <emphasis
1398 role="bold">termination</emphasis> mode.</para>
1399 </listitem>
1400 </itemizedlist>
1401
1402 <section id="test-setup-target-four">
1403 <title>Test Setup</title>
1404
1405 <para>SSD boot using the following <literal>grub.cfg</literal>
1406 entry: <programlisting>linux (hd0,gpt3)/boot/bzImage root=/dev/sda3 ip=dhcp nohz_full=1-7 /
1407isolcpus=1-7 rcu-nocbs=1-7 rcu_nocb_poll intel_pstate=disable /
1408clocksource=tsc tsc=reliable nohpet nosoftlockup intel_idle.max_cstate=0 /
1409processor.max_cstate=0 mce=ignore_ce audit=0 nmi_watchdog=0 iommu=pt /
1410intel_iommu=on hugepagesz=1GB hugepages=8 default_hugepagesz=1GB /
1411hugepagesz=2M hugepages=2048 vfio_iommu_type1.allow_unsafe_interrupts=1</programlisting>Stop
1412 other services and mount hugepages: <programlisting>systemctl stop openvswitch
1413mkdir -p /mnt/huge
1414mount -t hugetlbfs hugetlbfs /mnt/huge</programlisting>Configure SR-IOV
1415 interfaces:<programlisting>/usr/share/usertools/dpdk-devbind.py --bind=ixgbe 0000:03:00.0
1416echo 2 &gt; /sys/class/net/eno3/device/sriov_numvfs
1417ifconfig eno3 10.0.0.1
1418modprobe vfio_pci
1419/usr/share/usertools/dpdk-devbind.py --bind=vfio-pci 0000:03:10.0
1420/usr/share/usertools/dpdk-devbind.py --bind=vfio-pci 0000:03:10.2
1421ip link set eno3 vf 0 mac 0c:c4:7a:E5:0F:48
1422ip link set eno3 vf 1 mac 0c:c4:7a:BF:52:E7</programlisting>Launch two QEMU
1423 instances: <programlisting>taskset -c 4,5 qemu-system-x86_64 -cpu host,+invtsc,migratable=no -M /
1424q35 -smp cores=2,sockets=1 -vcpu 0,affinity=4 -vcpu 1,affinity=5 -enable-kvm /
1425-nographic -kernel /mnt/qemu/bzImage /
1426-drive file=/mnt/qemu/enea-nfv-access-guest-qemux86-64.ext4,if=virtio,/
1427format=raw -m 4096 -object memory-backend-file,id=mem,size=4096M,mem-path=/mnt/huge,/
1428share=on -numa node,memdev=mem -mem-prealloc -device vfio-pci,host=03:10.0 /
1429-append 'root=/dev/vda console=ttyS0 hugepagesz=2M hugepages=1024 /
1430isolcpus=1 nohz_full=1 rcu_nocbs=1 irqaffinity=0 rcu_nocb_poll /
1431intel_pstate=disable intel_idle.max_cstate=0 /
1432processor.max_cstate=0 mce=ignore_ce audit=0'
1433
1434
1435taskset -c 2,3 qemu-system-x86_64 -cpu host,+invtsc,migratable=no -M /
1436q35 -smp cores=2,sockets=1 -vcpu 0,affinity=2 -vcpu 1,affinity=3 -enable-kvm /
1437-nographic -kernel /mnt/qemu/bzImage /
1438-drive file=/mnt/qemu/enea-nfv-access-guest-qemux86-64.ext4,if=virtio,/
1439format=raw -m 4096 -object memory-backend-file,id=mem,size=4096M,mem-path=/mnt/huge,/
1440share=on -numa node,memdev=mem -mem-prealloc -device vfio-pci,host=03:10.2 /
1441-append 'root=/dev/vda console=ttyS0 hugepagesz=2M hugepages=1024 /
1442isolcpus=1 nohz_full=1 rcu_nocbs=1 irqaffinity=0 rcu_nocb_poll /
1443intel_pstate=disable intel_idle.max_cstate=0 processor.max_cstate=0 /
1444mce=ignore_ce audit=0'</programlisting>In the first VM, mount hugepages and
1445 start pktgen:<programlisting>mkdir -p /mnt/huge &amp;&amp; \
1446mount -t hugetlbfs hugetlbfs /mnt/huge
1447modprobe igb_uio
1448/usr/share/usertools/dpdk-devbind.py --bind=igb_uio 0000:00:03.0
1449cd /usr/share/apps/pktgen
1450./pktgen -c 0x3 -- -P -m "1.0"</programlisting>In the pktgen console set the
1451 MAC of the destination and start generating
1452 packages:<programlisting>set mac 0 0C:C4:7A:BF:52:E7
1453str</programlisting>In the second VM, mount hugepages and start
1454 testpmd:<programlisting>mkdir -p /mnt/huge &amp;&amp; \
1455mount -t hugetlbfs hugetlbfs /mnt/huge
1456modprobe igb_uio
1457/usr/share/usertools/dpdk-devbind.py --bind=igb_uio 0000:00:03.0
1458testpmd -c 0x3 -n 2 -- -i --txd=512 --rxd=512 --port-topology=chained /
1459--eth-peer=0,0c:c4:7a:e5:0f:48</programlisting>In order to enable <emphasis
1460 role="bold">forwarding</emphasis> mode, in the testpmd console,
1461 run:<programlisting>set fwd mac
1462start</programlisting>In order to enable <emphasis
1463 role="bold">termination</emphasis> mode, in the testpmd console,
1464 run:<programlisting>set fwd rxonly
1465start</programlisting><table>
1466 <title>Results in forwarding mode</title>
1467
1468 <tgroup cols="5">
1469 <tbody>
1470 <row>
1471 <entry align="center"><emphasis
1472 role="bold">Bytes</emphasis></entry>
1473
1474 <entry align="center"><emphasis role="bold">VM1 pktgen pps
1475 TX</emphasis></entry>
1476
1477 <entry align="center"><emphasis role="bold">VM1 pktgen pps
1478 RX</emphasis></entry>
1479
1480 <entry align="center"><emphasis role="bold">VM2 testpmd
1481 pps RX</emphasis></entry>
1482
1483 <entry align="center"><emphasis role="bold">VM2 testpmd
1484 pps TX</emphasis></entry>
1485 </row>
1486
1487 <row>
1488 <entry role="bold"><emphasis
1489 role="bold">64</emphasis></entry>
1490
1491 <entry>7102096</entry>
1492
1493 <entry>7101897</entry>
1494
1495 <entry>7103853</entry>
1496
1497 <entry>7103793</entry>
1498 </row>
1499
1500 <row>
1501 <entry><emphasis role="bold">128</emphasis></entry>
1502
1503 <entry>5720016</entry>
1504
1505 <entry>5720256</entry>
1506
1507 <entry>5722081</entry>
1508
1509 <entry>5722083</entry>
1510 </row>
1511
1512 <row>
1513 <entry role="bold"><emphasis
1514 role="bold">256</emphasis></entry>
1515
1516 <entry>3456619</entry>
1517
1518 <entry>3456164</entry>
1519
1520 <entry>3456319</entry>
1521
1522 <entry>3456321</entry>
1523 </row>
1524
1525 <row>
1526 <entry role="bold"><emphasis
1527 role="bold">512</emphasis></entry>
1528
1529 <entry>1846671</entry>
1530
1531 <entry>1846628</entry>
1532
1533 <entry>1846652</entry>
1534
1535 <entry>1846657</entry>
1536 </row>
1537
1538 <row>
1539 <entry role="bold"><emphasis
1540 role="bold">1024</emphasis></entry>
1541
1542 <entry>940799</entry>
1543
1544 <entry>940748</entry>
1545
1546 <entry>940788</entry>
1547
1548 <entry>940788</entry>
1549 </row>
1550
1551 <row>
1552 <entry role="bold"><emphasis
1553 role="bold">1500</emphasis></entry>
1554
1555 <entry>649594</entry>
1556
1557 <entry>649526</entry>
1558
1559 <entry>649563</entry>
1560
1561 <entry>649563</entry>
1562 </row>
1563 </tbody>
1564 </tgroup>
1565 </table><table>
1566 <title>Results in termination mode</title>
1567
1568 <tgroup cols="3">
1569 <tbody>
1570 <row>
1571 <entry align="center"><emphasis
1572 role="bold">Bytes</emphasis></entry>
1573
1574 <entry align="center"><emphasis role="bold">VM1 pktgen pps
1575 TX</emphasis></entry>
1576
1577 <entry align="center"><emphasis role="bold">VM2 testpmd
1578 RX</emphasis></entry>
1579 </row>
1580
1581 <row>
1582 <entry role="bold"><emphasis
1583 role="bold">64</emphasis></entry>
1584
1585 <entry>14202904</entry>
1586
1587 <entry>14203944</entry>
1588 </row>
1589
1590 <row>
1591 <entry><emphasis role="bold">128</emphasis></entry>
1592
1593 <entry>8434766</entry>
1594 1440
1595 <entry>8437525</entry> 1441 <entry>2349638</entry>
1596 </row>
1597 1442
1598 <row> 1443 <entry>1559367</entry>
1599 <entry role="bold"><emphasis
1600 role="bold">256</emphasis></entry>
1601 1444
1602 <entry>4532131</entry> 1445 <entry>972923</entry>
1603 1446
1604 <entry>4532348</entry> 1447 <entry>972921</entry>
1605 </row>
1606 1448
1607 <row> 1449 <entry>10000</entry>
1608 <entry><emphasis role="bold">512</emphasis></entry>
1609 1450
1610 <entry>2349344</entry> 1451 <entry>41.41%</entry>
1452 </row>
1611 1453
1612 <entry>2349032</entry> 1454 <row>
1613 </row> 1455 <entry><emphasis role="bold">1024</emphasis></entry>
1614 1456
1615 <row> 1457 <entry>1197322</entry>
1616 <entry><emphasis role="bold">1024</emphasis></entry>
1617 1458
1618 <entry>1197293</entry> 1459 <entry>1197318</entry>
1619 1460
1620 <entry>1196699</entry> 1461 <entry>839508</entry>
1621 </row>
1622 1462
1623 <row> 1463 <entry>839508</entry>
1624 <entry><emphasis role="bold">1500</emphasis></entry>
1625 1464
1626 <entry>822321</entry> 1465 <entry>10000</entry>
1627 1466
1628 <entry>822276</entry> 1467 <entry>70.12%</entry>
1629 </row> 1468 </row>
1630 </tbody> 1469 </tbody>
1631 </tgroup> 1470 </tgroup>
1632 </table></para> 1471 </table>
1633 </section> 1472 </section>
1634 </section> 1473 </section>
1635 </section> 1474 </section>
1636 </section> 1475 </section>
1637</chapter> \ No newline at end of file 1476</chapter>