summaryrefslogtreecommitdiffstats
path: root/doc/book-enea-nfv-access-guide/doc/hypervisor_virtualization.xml
blob: dce585548f1fa62a8d9b8e45df931fd3ef98b969 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
<?xml version="1.0" encoding="ISO-8859-1"?>
<!DOCTYPE chapter PUBLIC "-//OASIS//DTD DocBook XML V4.2//EN"
"http://www.oasis-open.org/docbook/xml/4.2/docbookx.dtd">
<chapter id="hypervisor_virt">
  <title>Hypervisor Virtualization</title>

  <para>The KVM, Kernel-based Virtual Machine, is a virtualization
  infrastructure for the Linux kernel which turns it into a hypervisor. KVM
  requires a processor with a hardware virtualization extension.</para>

  <para>KVM uses QEMU, an open source machine emulator and virtualizer, to
  virtualize a complete system. With KVM it is possible to run multiple guests
  of a variety of operating systems, each with a complete set of virtualized
  hardware.</para>

  <section id="launch_virt_machine">
    <title>Launching a Virtual Machine</title>

    <para>QEMU can make use of KVM when running a target architecture that is
    the same as the host architecture. For instance, when running
    qemu-system-x86_64 on an x86-64 compatible processor (containing
    virtualization extensions Intel VT or AMD-V), you can take advantage of
    the KVM acceleration, giving you benefit for your host and your guest
    system.</para>

    <para>Enea NFV Access includes an optimizied version of QEMU with KVM-only
    support. To use KVM pass<command> --enable-kvm</command> to QEMU.</para>

    <para>The following is an example of starting a guest:</para>

    <programlisting>taskset -c 0,1 qemu-system-x86_64 \
-cpu host -M q35 -smp cores=2,sockets=1 \
-vcpu 0,affinity=0 -vcpu 1,affinity=1  \
-enable-kvm -nographic \
-kernel bzImage \
-drive file=enea-image-virtualization-guest-qemux86-64.ext4,if=virtio,format=raw \
-append 'root=/dev/vda console=ttyS0,115200' \
-m 4096 \
-object memory-backend-file,id=mem,size=4096M,mem-path=/dev/hugepages,share=on \
-numa node,memdev=mem -mem-prealloc</programlisting>
  </section>

  <section id="qemu_boot">
    <title>Main QEMU boot options</title>

    <para>Below are detailed all the pertinent boot options for the QEMU
    emulator:</para>

    <itemizedlist>
      <listitem>
        <para>SMP - at least 2 cores should be enabled in order to isolate
        application(s) running in virtual machine(s) on specific cores for
        better performance.</para>

        <programlisting>-smp cores=2,threads=1,sockets=1 \</programlisting>
      </listitem>

      <listitem>
        <para>CPU affinity - associate virtual CPUs with physical CPUs and
        optionally assign a default real time priority to the virtual CPU
        process in the host kernel. This option allows you to start qemu vCPUs
        on isolated physical CPUs.</para>

        <programlisting>-vcpu 0,affinity=0   \</programlisting>
      </listitem>

      <listitem>
        <para>Hugepages - KVM guests can be deployed with huge page memory
        support in order to reduce memory consumption and improve performance,
        by reducing CPU cache usage. By using huge pages for a KVM guest, less
        memory is used for page tables and TLB (Translation Lookaside Buffer)
        misses are reduced, thereby significantly increasing performance,
        especially for memory-intensive situations.</para>

        <programlisting>-object memory-backend-file,id=mem,size=4096M,mem-path=/dev/hugepages,share=on \</programlisting>
      </listitem>

      <listitem>
        <para>Memory preallocation - preallocate huge pages at startup time
        can improve performance but it may affect the qemu boot time.</para>

        <programlisting>-mem-prealloc \</programlisting>
      </listitem>

      <listitem>
        <para>Enable realtime characteristics - run qemu with realtime
        features. While that mildly implies that "-realtime" alone might do
        something, it's just an identifier for options that are partially
        realtime. If you're running in a realtime or low latency environment,
        you don't want your pages to be swapped out and mlock does that, thus
        mlock=on. If you want VM density, then you may want swappable VMs,
        thus mlock=off.</para>

        <programlisting>-realtime mlock=on \</programlisting>
      </listitem>
    </itemizedlist>

    <para>If the hardware does not have an IOMMU (known as "Intel VT-d" on
    Intel-based machines and "AMD I/O Virtualization Technology" on AMD-based
    machines), it will not be possible to assign devices in KVM.
    Virtualization Technology features (VT-d, VT-x, etc.) must be enabled from
    BIOS on the host target before starting a virtual machine.</para>
  </section>

  <section id="net_in_guest">
    <title>Networking in guest</title>

    <section id="vhost-user-support">
      <title>Using vhost-user support</title>

      <para>The goal of vhost-user is to implement a Virtio transport, staying
      as close as possible to the vhost paradigm of using shared memory,
      ioeventfds and irqfds. A UNIX domain socket based mechanism allows the
      set up of resources used by a number of Vrings shared between two
      userspace processes, which will be placed in shared memory.</para>

      <para>To run QEMU with the vhost-user backend, you have to provide the
      named UNIX domain socket which needs to be already opened by the
      backend:</para>

      <programlisting>-object memory-backend-file,id=mem,size=4096M,mem-path=/dev/hugepages,share=on \
-chardev socket,id=char0,path=/var/run/openvswitch/vhost-user1 \
-netdev type=vhost-user,id=mynet1,chardev=char0,vhostforce \
-device virtio-net-pci,netdev=mynet1,mac=52:54:00:00:00:01  \</programlisting>

      <para>The vHost User standard uses a client-server model. The server
      creates and manages the vHost User sockets and the client connects to
      the sockets created by the server. It is recommended to use QEMU as
      server so the vhost-user client can be restarted without affecting the
      server, otherwise if the server side dies all clients need to be
      restarted.</para>

      <para>Using vhost-user in QEMU as server will offer the flexibility to
      stop and start the virtual machine with no impact on virtual switch from
      the host (vhost-user-client).</para>

      <programlisting>-chardev socket,id=char0,path=/var/run/openvswitch/vhost-user1,server \</programlisting>
    </section>

    <section id="tap-interface">
      <title>Using TAP Interfaces</title>

      <para>QEMU can use TAP interfaces to provide full networking capability
      for the guest OS:</para>

      <programlisting>-netdev tap,id=net0,ifname=tap0,script=no,downscript=no \
-device virtio-net-pci,netdev=net0,mac=22:EA:FB:A8:25:AE \</programlisting>
    </section>

    <section id="vfio-passthrough">
      <title>VFIO passthrough VF (SR-IOV) to guest</title>

      <para>KVM hypervisor support for attaching PCI devices on the host
      system to guests. PCI passthrough allows guests to have exclusive access
      to PCI devices for a range of tasks. PCI passthrough allows PCI devices
      to appear and behave as if they were physically attached to the guest
      operating system.</para>

      <para>Preparing an Intel system for PCI passthrough:</para>

      <itemizedlist>
        <listitem>
          <para>Enable the Intel VT-d extensions in BIOS</para>
        </listitem>

        <listitem>
          <para>Activate Intel VT-d in the kernel by using
          <literal>intel_iommu=on</literal> as a kernel boot parameter</para>
        </listitem>

        <listitem>
          <para>Allow unsafe interrupts in case the system doesn't support
          interrupt remapping. This can be done using
          <literal>vfio_iommu_type1.allow_unsafe_interrupts=1</literal> as a
          boot kernel parameter.</para>
        </listitem>
      </itemizedlist>

      <para>Create guest with direct passthrough via VFIO framework like
      so:</para>

      <programlisting>-device vfio-pci,host=0000:03:10.2 \</programlisting>

      <para>On the host, one or more VirtualFunctions (VFs) must be created in
      order to be allocated for a guest network to access, before starting
      QEMU:</para>

      <programlisting>$ echo 2 &gt; /sys/class/net/eno3/device/sriov_numvfs
$ modprobe vfio_pci
$ dpdk-devbind.py --bind=vfio-pci 0000:03:10.2</programlisting>
    </section>

    <section id="multiqueue">
      <title>Multi-queue</title>

      <section id="qemu-multiqueue-support">
        <title>QEMU multi queue support configuration</title>

        <programlisting>-chardev socket,id=char0,path=/var/run/openvswitch/vhost-user1 \
-netdev type=vhost-user,id=net0,chardev=char0,queues=2 \
-device virtio-net-pci,netdev=net0,mac=22:EA:FB:A8:25:AE,mq=on,vectors=6
where vectors is calculated as: 2 + 2 * queues number.</programlisting>
      </section>

      <section id="inside-guest">
        <title>Inside guest</title>

        <para>Linux kernel virtio-net driver (one queue is enabled by
        default):</para>

        <programlisting>$ ethtool -L combined 2 eth0
DPDK Virtio PMD
$ testpmd -c 0x7 -- -i --rxq=2 --txq=2 --nb-cores=2 ...</programlisting>

        <para>For QEMU documentation please see: <ulink
        url="https://qemu.weilnetz.de/doc/qemu-doc.html">https://qemu.weilnetz.de/doc/qemu-doc.html</ulink>.</para>
      </section>
    </section>
  </section>

  <section id="libvirt">
    <title>Libvirt</title>

    <para>One way to manage guests in Enea NFV Access is by using
    <literal>libvirt</literal>. Libvirt is used in conjunction with a daemon
    (<literal>libvirtd</literal>) and a command line utility (virsh) to manage
    virtualized environments.</para>

    <para>The libvirt library is a hypervisor-independent virtualization API
    and toolkit that is able to interact with the virtualization capabilities
    of a range of operating systems. Libvirt provides a common, generic and
    stable layer to securely manage domains on a node. As nodes may be
    remotely located, libvirt provides all methods required to provision,
    create, modify, monitor, control, migrate and stop the domains, within the
    limits of hypervisor support for these operations.</para>

    <para>The libvirt daemon runs on the Enea NFV Access host. All tools built
    on libvirt API connect to the daemon to request the desired operation, and
    to collect information about the configuration and resources of the host
    system and guests. <literal>virsh</literal> is a command line interface
    tool for managing guests and the hypervisor. The virsh tool is built on
    the libvirt management API.</para>

    <para><emphasis role="bold">Major functionality provided by
    libvirt</emphasis></para>

    <para>The following is a summary from the libvirt <ulink
    url="http://wiki.libvirt.org/page/FAQ#What_is_libvirt.3F">home
    page</ulink> describing the major libvirt features:</para>

    <itemizedlist>
      <listitem>
        <para><emphasis role="bold">VM management:</emphasis> Various domain
        lifecycle operations such as start, stop, pause, save, restore, and
        migrate. Hotplug operations for many device types including disk and
        network interfaces, memory, and cpus.</para>
      </listitem>

      <listitem>
        <para><emphasis role="bold">Remote machine support:</emphasis> All
        libvirt functionality is accessible on any machine running the libvirt
        daemon, including remote machines. A variety of network transports are
        supported for connecting remotely, with the simplest being
        <literal>SSH</literal>, which requires no extra explicit
        configuration. For more information, see: <ulink
        url="http://libvirt.org/remote.html">http://libvirt.org/remote.html</ulink>.</para>
      </listitem>

      <listitem>
        <para><emphasis role="bold">Network interface management:</emphasis>
        Any host running the libvirt daemon can be used to manage physical and
        logical network interfaces. Enumerate existing interfaces, as well as
        configure (and create) interfaces, bridges, vlans, and bond devices.
        For more details see: <ulink
        url="https://fedorahosted.org/netcf/">https://fedorahosted.org/netcf/</ulink>.</para>
      </listitem>

      <listitem>
        <para><emphasis role="bold">Virtual NAT and Route based
        networking:</emphasis> Any host running the libvirt daemon can manage
        and create virtual networks. Libvirt virtual networks use firewall
        rules to act as a router, providing VMs transparent access to the host
        machines network. For more information, see: <ulink
        url="http://libvirt.org/archnetwork.html">http://libvirt.org/archnetwork.html</ulink>.</para>
      </listitem>

      <listitem>
        <para><emphasis role="bold">Storage management:</emphasis> Any host
        running the libvirt daemon can be used to manage various types of
        storage: create file images of various formats (raw, qcow2, etc.),
        mount NFS shares, enumerate existing LVM volume groups, create new LVM
        volume groups and logical volumes, partition raw disk devices, mount
        iSCSI shares, and much more. For more details, see: <ulink
        url="http://libvirt.org/storage.html">http://libvirt.org/storage.html</ulink>.</para>
      </listitem>

      <listitem>
        <para><emphasis role="bold">Libvirt Configuration:</emphasis> A
        properly running libvirt requires that the following elements be in
        place:</para>

        <itemizedlist>
          <listitem>
            <para>Configuration files, located in the directory
            <literal>/etc/libvirt</literal>. They include the daemon's
            configuration file <literal>libvirtd.conf</literal>, and
            hypervisor-specific configuration files, like
            <literal>qemu.conf</literal> for the QEMU.</para>
          </listitem>

          <listitem>
            <para>A running libvirtd daemon. The daemon is started
            automatically in Enea NFV Access host.</para>
          </listitem>

          <listitem>
            <para>Configuration files for the libvirt domains, or guests, to
            be managed by the KVM host. The specifics for guest domains shall
            be defined in an XML file of a format specified at <ulink
            url="http://libvirt.org/formatdomain.html">http://libvirt.org/formatdomain.html</ulink>.
            XML formats for other structures are specified at <ulink type=""
            url="http://libvirt.org/format.html">http://libvirt.org/format.html</ulink>.</para>
          </listitem>
        </itemizedlist>
      </listitem>
    </itemizedlist>

    <section id="boot-kvm-guest">
      <title>Booting a KVM Guest</title>

      <para>There are several ways to boot a KVM guest. Here we describe how
      to boot using a raw image. A direct kernel boot can be performed by
      transferring the guest kernel and the file system files to the host and
      specifying a <literal>&lt;kernel&gt;</literal> and an
      <literal>&lt;initrd&gt;</literal> element inside the
      <literal>&lt;os&gt;</literal> element of the guest XML file, as in the
      following example:</para>

      <programlisting>&lt;os&gt;
  &lt;kernel&gt;bzImage&lt;/kernel&gt;
&lt;/os&gt;
&lt;devices&gt;
  &lt;disk type='file' device='disk'&gt;
    &lt;driver name='qemu' type='raw' cache='none'/&gt;
    &lt;source file='enea-image-virtualization-guest-qemux86-64.ext4'/&gt;
    &lt;target dev='vda' bus='virtio'/&gt;
  &lt;/disk&gt;
&lt;/devices&gt;</programlisting>
    </section>

    <section id="start-guest">
      <title>Starting a Guest</title>

      <para>Command <command>virsh create</command> starts a guest:</para>

      <programlisting>virsh create example-guest-x86.xml</programlisting>

      <para>If further configurations are needed before the guest is reachable
      through <literal>ssh</literal>, a console can be started using command
      <command>virsh console</command>. The example below shows how to start a
      console where kvm-example-guest is the name of the guest defined in the
      guest XML file:</para>

      <programlisting>virsh console kvm-example-guest</programlisting>

      <para>This requires that the guest domain has a console configured in
      the guest XML file:</para>

      <programlisting>&lt;os&gt;
  &lt;cmdline&gt;console=ttyS0,115200&lt;/cmdline&gt;
&lt;/os&gt;
&lt;devices&gt;
  &lt;console type='pty'&gt;
     &lt;target type='serial' port='0'/&gt;
  &lt;/console&gt;
&lt;/devices&gt;</programlisting>
    </section>

    <section id="isolation">
      <title>Isolation</title>

      <para>It may be desirable to isolate execution in a guest, to a specific
      guest core. It might also be desirable to run a guest on a specific host
      core.</para>

      <para>To pin the virtual CPUs of the guest to specific cores, configure
      the <literal>&lt;cputune&gt;</literal> contents as follows:</para>

      <orderedlist>
        <listitem>
          <para>First explicitly state on which host core each guest core
          shall run, by mapping <literal>vcpu</literal> to
          <literal>cpuset</literal> in the <literal>&lt;vcpupin&gt;</literal>
          tag.</para>
        </listitem>

        <listitem>
          <para>In the <literal>&lt;cputune&gt;</literal> tag it is further
          possible to specify on which CPU the emulator shall run by adding
          the cpuset to the <literal>&lt;emulatorpin&gt;</literal> tag.</para>

          <programlisting>&lt;vcpu placement='static'&gt;2&lt;/vcpu&gt;
&lt;cputune&gt;
  &lt;vcpupin vcpu='0' cpuset='2'/&gt;
  &lt;vcpupin vcpu='1' cpuset='3'/&gt;
  &lt;emulatorpin cpuset="2"/&gt;
&lt;/cputune&gt;</programlisting>

          <para><literal>libvirt</literal> will group all threads belonging to
          a qemu instance into cgroups that will be created for that purpose.
          It is possible to supply a base name for those cgroups using the
          <literal>&lt;resource&gt;</literal> tag:</para>

          <programlisting>&lt;resource&gt;
  &lt;partition&gt;/rt&lt;/partition&gt;
&lt;/resource&gt;</programlisting>
        </listitem>
      </orderedlist>
    </section>

    <section id="network-libvirt">
      <title>Networking using libvirt</title>

      <para>Command <command>virsh net-create</command> starts a network. If
      any networks are listed in the guest XML file, those networks must be
      started before the guest is started. As an example, if the network is
      defined in a file named example-net.xml, it is started as
      follows:</para>

      <programlisting>virsh net-create example-net.xml
&lt;network&gt;
   &lt;name&gt;sriov&lt;/name&gt;
   &lt;forward mode='hostdev' managed='yes'&gt;
     &lt;pf dev='eno3'/&gt;
   &lt;/forward&gt;
&lt;/network&gt;</programlisting>

      <para><literal>libvirt</literal> is a virtualization API that supports
      virtual network creation. These networks can be connected to guests and
      containers by referencing the network in the guest XML file. It is
      possible to have a virtual network persistently running on the host by
      starting the network with command <command>virsh net-define</command>
      instead of the previously mentioned <command>virsh
      net-create</command>.</para>

      <para>An example for the sample network defined in
      <literal>meta-vt/recipes-example/virt-example/files/example-net.xml</literal>:</para>

      <programlisting>virsh net-define example-net.xml</programlisting>

      <para>Command <command>virsh net-autostart</command> enables a
      persistent network to start automatically when the libvirt daemon
      starts:</para>

      <programlisting>virsh net-autostart example-net</programlisting>

      <para>Guest configuration file (xml) must be updated to access newly
      created network like so:</para>

      <programlisting>    &lt;interface type='network'&gt;
      &lt;source network='sriov'/&gt;
    &lt;/interface&gt;</programlisting>

      <para>The following presented here are a few modes of network access
      from guest using <command>virsh</command>:</para>

      <itemizedlist>
        <listitem>
          <para><emphasis role="bold">vhost-user interface</emphasis></para>

          <para>See the Open vSwitch chapter on how to create vhost-user
          interface using Open vSwitch. Currently there is no Open vSwitch
          support for networks that are managed by libvirt (e.g. NAT). As of
          now, only bridged networks are supported (those where the user has
          to manually create the bridge).</para>

          <programlisting>    &lt;interface type='vhostuser'&gt;
      &lt;mac address='00:00:00:00:00:01'/&gt;
      &lt;source type='unix' path='/var/run/openvswitch/vhost-user1' mode='client'/&gt;
       &lt;model type='virtio'/&gt;
      &lt;driver queues='1'&gt;
        &lt;host mrg_rxbuf='off'/&gt;
      &lt;/driver&gt;
    &lt;/interface&gt;</programlisting>
        </listitem>

        <listitem>
          <para><emphasis role="bold">PCI passthrough
          (SR-IOV)</emphasis></para>

          <para>KVM hypervisor support for attaching PCI devices on the host
          system to guests. PCI passthrough allows guests to have exclusive
          access to PCI devices for a range of tasks. PCI passthrough allows
          PCI devices to appear and behave as if they were physically attached
          to the guest operating system.</para>

          <para>Preparing an Intel system for PCI passthrough is done like
          so:</para>

          <itemizedlist>
            <listitem>
              <para>Enable the Intel VT-d extensions in BIOS</para>
            </listitem>

            <listitem>
              <para>Activate Intel VT-d in the kernel by using
              <literal>intel_iommu=on</literal> as a kernel boot
              parameter</para>
            </listitem>

            <listitem>
              <para>Allow unsafe interrupts in case the system doesn't support
              interrupt remapping. This can be done using
              <literal>vfio_iommu_type1.allow_unsafe_interrupts=1</literal> as
              a boot kernel parameter.</para>
            </listitem>
          </itemizedlist>

          <para>VFs must be created on the host before starting the
          guest:</para>

          <programlisting>$ echo 2 &gt; /sys/class/net/eno3/device/sriov_numvfs
$ modprobe vfio_pci
$ dpdk-devbind.py --bind=vfio-pci 0000:03:10.0
   &lt;interface type='hostdev' managed='yes'&gt;
     &lt;source&gt;
       &lt;address type='pci' domain='0x0' bus='0x03' slot='0x10' function='0x0'/&gt;
     &lt;/source&gt;
     &lt;mac address='52:54:00:6d:90:02'/&gt;
   &lt;/interface&gt;</programlisting>
        </listitem>

        <listitem>
          <para><emphasis role="bold">Bridge interface</emphasis></para>

          <para>In case an OVS bridge exists on host, it can be used to
          connect the guest:</para>

          <programlisting>    &lt;interface type='bridge'&gt;
      &lt;mac address='52:54:00:71:b1:b6'/&gt;
      &lt;source bridge='ovsbr0'/&gt;
      &lt;virtualport type='openvswitch'/&gt;
      &lt;address type='pci' domain='0x0000' bus='0x00' slot='0x03' function='0x0'/&gt;
    &lt;/interface&gt;</programlisting>

          <para>For further details on the network XML format, see <ulink
          url="http://libvirt.org/formatnetwork.html">http://libvirt.org/formatnetwork.html</ulink>.</para>
        </listitem>
      </itemizedlist>
    </section>

    <section id="libvirt-guest-config-ex">
      <title>Libvirt guest configuration examples</title>

      <section id="guest-config-vhost-user-interface">
        <title>Guest configuration with vhost-user interface</title>

        <programlisting>&lt;domain type='kvm'&gt;
  &lt;name&gt;vm_vhost&lt;/name&gt;
  &lt;uuid&gt;4a9b3f53-fa2a-47f3-a757-dd87720d9d1d&lt;/uuid&gt;
  &lt;memory unit='KiB'&gt;4194304&lt;/memory&gt;
  &lt;currentMemory unit='KiB'&gt;4194304&lt;/currentMemory&gt;
  &lt;memoryBacking&gt;
    &lt;hugepages&gt;
      &lt;page size='1' unit='G' nodeset='0'/&gt;
    &lt;/hugepages&gt;
  &lt;/memoryBacking&gt;
  &lt;vcpu placement='static'&gt;2&lt;/vcpu&gt;
  &lt;cputune&gt;
    &lt;shares&gt;4096&lt;/shares&gt;
    &lt;vcpupin vcpu='0' cpuset='4'/&gt;
    &lt;vcpupin vcpu='1' cpuset='5'/&gt;
    &lt;emulatorpin cpuset='4,5'/&gt;
  &lt;/cputune&gt;
  &lt;os&gt;
    &lt;type arch='x86_64' machine='pc'&gt;hvm&lt;/type&gt;
    &lt;kernel&gt;/mnt/qemu/bzImage&lt;/kernel&gt;
    &lt;cmdline&gt;root=/dev/vda console=ttyS0,115200&lt;/cmdline&gt;
    &lt;boot dev='hd'/&gt;
  &lt;/os&gt;
  &lt;features&gt;
    &lt;acpi/&gt;
    &lt;apic/&gt;
  &lt;/features&gt;
  &lt;cpu mode='host-model'&gt;
    &lt;model fallback='allow'/&gt;
    &lt;topology sockets='2' cores='1' threads='1'/&gt;
    &lt;numa&gt;
      &lt;cell id='0' cpus='0-1' memory='4194304' unit='KiB' memAccess='shared'/&gt;
    &lt;/numa&gt;
  &lt;/cpu&gt;
  &lt;on_poweroff&gt;destroy&lt;/on_poweroff&gt;
  &lt;on_reboot&gt;restart&lt;/on_reboot&gt;
  &lt;on_crash&gt;destroy&lt;/on_crash&gt;
  &lt;devices&gt;
    &lt;emulator&gt;/usr/bin/qemu-system-x86_64&lt;/emulator&gt;
    &lt;disk type='file' device='disk'&gt;
      &lt;driver name='qemu' type='raw' cache='none'/&gt;
      &lt;source file='/mnt/qemu/enea-image-virtualization-guest-qemux86-64.ext4'/&gt;
      &lt;target dev='vda' bus='virtio'/&gt;
    &lt;/disk&gt;
    &lt;interface type='vhostuser'&gt;
      &lt;mac address='00:00:00:00:00:01'/&gt;
      &lt;source type='unix' path='/var/run/openvswitch/vhost-user1' mode='client'/&gt;
       &lt;model type='virtio'/&gt;
      &lt;driver queues='1'&gt;
        &lt;host mrg_rxbuf='off'/&gt;
      &lt;/driver&gt;
    &lt;/interface&gt;
    &lt;serial type='pty'&gt;
      &lt;target port='0'/&gt;
    &lt;/serial&gt;
    &lt;console type='pty'&gt;
      &lt;target type='serial' port='0'/&gt;
    &lt;/console&gt;
  &lt;/devices&gt;
&lt;/domain&gt;</programlisting>
      </section>

      <section id="guest-config-pci-passthrough">
        <title>Guest configuration with PCI passthrough</title>

        <programlisting>&lt;domain type='kvm'&gt;
  &lt;name&gt;vm_sriov1&lt;/name&gt;
  &lt;uuid&gt;4a9b3f53-fa2a-47f3-a757-dd87720d9d1d&lt;/uuid&gt;
  &lt;memory unit='KiB'&gt;4194304&lt;/memory&gt;
  &lt;currentMemory unit='KiB'&gt;4194304&lt;/currentMemory&gt;
  &lt;memoryBacking&gt;
    &lt;hugepages&gt;
      &lt;page size='1' unit='G' nodeset='0'/&gt;
    &lt;/hugepages&gt;
  &lt;/memoryBacking&gt;
  &lt;vcpu&gt;2&lt;/vcpu&gt;
  &lt;os&gt;
    &lt;type arch='x86_64' machine='q35'&gt;hvm&lt;/type&gt;
    &lt;kernel&gt;/mnt/qemu/bzImage&lt;/kernel&gt;
    &lt;cmdline&gt;root=/dev/vda console=ttyS0,115200&lt;/cmdline&gt;
    &lt;boot dev='hd'/&gt;
  &lt;/os&gt;
  &lt;features&gt;
    &lt;acpi/&gt;
    &lt;apic/&gt;
  &lt;/features&gt;
  &lt;cpu mode='host-model'&gt;
    &lt;model fallback='allow'/&gt;
    &lt;topology sockets='1' cores='2' threads='1'/&gt;
    &lt;numa&gt;
      &lt;cell id='0' cpus='0' memory='4194304' unit='KiB' memAccess='shared'/&gt;
    &lt;/numa&gt;
  &lt;/cpu&gt;
  &lt;on_poweroff&gt;destroy&lt;/on_poweroff&gt;
  &lt;on_reboot&gt;restart&lt;/on_reboot&gt;
  &lt;on_crash&gt;destroy&lt;/on_crash&gt;
  &lt;devices&gt;
    &lt;emulator&gt;/usr/bin/qemu-system-x86_64&lt;/emulator&gt;
    &lt;disk type='file' device='disk'&gt;
      &lt;driver name='qemu' type='raw' cache='none'/&gt;
      &lt;source file='/mnt/qemu/enea-image-virtualization-guest-qemux86-64.ext4'/&gt;
      &lt;target dev='vda' bus='virtio'/&gt;
    &lt;/disk&gt;
   &lt;interface type='hostdev' managed='yes'&gt;
     &lt;source&gt;
       &lt;address type='pci' domain='0x0' bus='0x03' slot='0x10' function='0x0'/&gt;
     &lt;/source&gt;
     &lt;mac address='52:54:00:6d:90:02'/&gt;
   &lt;/interface&gt;
    &lt;serial type='pty'&gt;
      &lt;target port='0'/&gt;
    &lt;/serial&gt;
    &lt;console type='pty'&gt;
      &lt;target type='serial' port='0'/&gt;
    &lt;/console&gt;
  &lt;/devices&gt;
&lt;/domain&gt;</programlisting>
      </section>

      <section id="guest-config-bridge-interface">
        <title>Guest configuration with bridge interface</title>

        <programlisting>&lt;domain type='kvm'&gt;
  &lt;name&gt;vm_bridge&lt;/name&gt;
  &lt;uuid&gt;4a9b3f53-fa2a-47f3-a757-dd87720d9d1d&lt;/uuid&gt;
  &lt;memory unit='KiB'&gt;4194304&lt;/memory&gt;
  &lt;currentMemory unit='KiB'&gt;4194304&lt;/currentMemory&gt;
  &lt;memoryBacking&gt;
    &lt;hugepages&gt;
      &lt;page size='1' unit='G' nodeset='0'/&gt;
    &lt;/hugepages&gt;
  &lt;/memoryBacking&gt;
  &lt;vcpu placement='static'&gt;2&lt;/vcpu&gt;
  &lt;cputune&gt;
    &lt;shares&gt;4096&lt;/shares&gt;
    &lt;vcpupin vcpu='0' cpuset='4'/&gt;
    &lt;vcpupin vcpu='1' cpuset='5'/&gt;
    &lt;emulatorpin cpuset='4,5'/&gt;
  &lt;/cputune&gt;
  &lt;os&gt;
    &lt;type arch='x86_64' machine='q35'&gt;hvm&lt;/type&gt;
    &lt;kernel&gt;/mnt/qemu/bzImage&lt;/kernel&gt;
    &lt;cmdline&gt;root=/dev/vda console=ttyS0,115200&lt;/cmdline&gt;
    &lt;boot dev='hd'/&gt;
  &lt;/os&gt;
  &lt;features&gt;
    &lt;acpi/&gt;
    &lt;apic/&gt;
  &lt;/features&gt;
  &lt;cpu mode='host-model'&gt;
    &lt;model fallback='allow'/&gt;
    &lt;topology sockets='2' cores='1' threads='1'/&gt;
    &lt;numa&gt;
      &lt;cell id='0' cpus='0-1' memory='4194304' unit='KiB' memAccess='shared'/&gt;
    &lt;/numa&gt;
  &lt;/cpu&gt;
  &lt;on_poweroff&gt;destroy&lt;/on_poweroff&gt;
  &lt;on_reboot&gt;restart&lt;/on_reboot&gt;
  &lt;on_crash&gt;destroy&lt;/on_crash&gt;
  &lt;devices&gt;
    &lt;emulator&gt;/usr/bin/qemu-system-x86_64&lt;/emulator&gt;
    &lt;disk type='file' device='disk'&gt;
      &lt;driver name='qemu' type='raw' cache='none'/&gt;
      &lt;source file='/mnt/qemu/enea-image-virtualization-guest-qemux86-64.ext4'/&gt;
      &lt;target dev='vda' bus='virtio'/&gt;
    &lt;/disk&gt;
    &lt;interface type='bridge'&gt;
      &lt;mac address='52:54:00:71:b1:b6'/&gt;
      &lt;source bridge='ovsbr0'/&gt;
      &lt;virtualport type='openvswitch'/&gt;
      &lt;address type='pci' domain='0x0000' bus='0x00' slot='0x03' function='0x0'/&gt;
    &lt;/interface&gt;
    &lt;serial type='pty'&gt;
      &lt;target port='0'/&gt;
    &lt;/serial&gt;
    &lt;console type='pty'&gt;
      &lt;target type='serial' port='0'/&gt;
    &lt;/console&gt;
  &lt;/devices&gt;
&lt;/domain&gt;</programlisting>
      </section>
    </section>
  </section>
</chapter>