summaryrefslogtreecommitdiffstats
path: root/doc/book-enea-edge-getting-started/doc/advanced_configurations.xml
blob: 14981e98e21c82abe46528ea067966547ed7e57a (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986
987
988
989
990
991
992
993
994
995
996
997
998
999
1000
1001
1002
1003
1004
1005
1006
1007
1008
1009
1010
1011
1012
1013
1014
1015
1016
1017
1018
1019
1020
1021
1022
1023
1024
1025
1026
1027
1028
1029
1030
1031
1032
1033
1034
1035
1036
1037
1038
1039
1040
1041
1042
1043
1044
1045
1046
1047
1048
1049
1050
1051
1052
1053
1054
1055
1056
1057
1058
1059
1060
1061
1062
1063
1064
1065
1066
1067
1068
1069
1070
1071
1072
1073
1074
1075
1076
1077
1078
1079
1080
1081
1082
1083
1084
1085
1086
1087
1088
1089
1090
1091
1092
1093
1094
1095
1096
1097
1098
1099
1100
1101
1102
1103
1104
1105
1106
1107
1108
1109
1110
1111
1112
1113
1114
1115
1116
1117
1118
1119
1120
1121
1122
1123
1124
1125
1126
1127
1128
1129
1130
1131
1132
1133
1134
1135
1136
1137
1138
1139
1140
1141
1142
1143
1144
1145
1146
1147
1148
1149
1150
1151
1152
1153
1154
1155
1156
1157
1158
1159
1160
1161
1162
1163
1164
1165
1166
1167
1168
1169
1170
1171
1172
1173
1174
1175
1176
1177
1178
1179
1180
1181
1182
1183
1184
1185
1186
1187
1188
1189
1190
1191
1192
1193
1194
1195
1196
1197
1198
1199
1200
1201
1202
1203
1204
1205
1206
1207
1208
1209
1210
1211
1212
1213
1214
1215
1216
1217
1218
1219
1220
1221
1222
1223
1224
1225
1226
1227
1228
1229
1230
1231
1232
1233
1234
1235
1236
1237
1238
1239
1240
1241
1242
1243
1244
1245
1246
1247
1248
1249
1250
1251
1252
1253
1254
1255
1256
1257
1258
1259
1260
1261
1262
1263
1264
1265
1266
1267
1268
1269
1270
1271
1272
1273
1274
1275
1276
1277
1278
1279
1280
1281
1282
1283
1284
1285
1286
1287
1288
1289
1290
1291
1292
1293
1294
1295
1296
1297
1298
1299
1300
1301
1302
1303
1304
1305
1306
1307
1308
1309
1310
1311
1312
1313
1314
1315
1316
1317
1318
1319
1320
1321
1322
1323
1324
1325
1326
1327
1328
1329
1330
1331
1332
1333
1334
1335
1336
1337
1338
1339
1340
1341
1342
1343
1344
1345
1346
1347
1348
1349
1350
1351
1352
1353
1354
1355
1356
1357
1358
1359
1360
1361
1362
1363
1364
1365
1366
1367
1368
1369
1370
1371
1372
1373
1374
1375
1376
1377
1378
1379
1380
1381
1382
1383
1384
1385
1386
1387
1388
1389
1390
1391
1392
1393
1394
1395
1396
1397
1398
1399
1400
1401
1402
1403
1404
1405
1406
1407
1408
1409
1410
1411
1412
1413
1414
1415
1416
1417
1418
1419
1420
1421
1422
1423
1424
1425
1426
1427
1428
1429
1430
1431
1432
1433
1434
1435
1436
1437
1438
1439
1440
1441
1442
1443
1444
1445
1446
1447
1448
1449
1450
1451
1452
1453
1454
1455
1456
1457
1458
1459
<?xml version="1.0" encoding="UTF-8"?>
<!DOCTYPE chapter PUBLIC "-//OASIS//DTD DocBook XML V4.2//EN"
"http://www.oasis-open.org/docbook/xml/4.2/docbookx.dtd">
<chapter id="advanced_conf">
  <title>Advanced Configurations</title>

  <para>This chapter describes possible configurations for advanced features
  such as the Hugepage Reservation Service customization, UEFI Secure Boot and
  Bare Metal Provisioning.</para>

  <section id="bare_meta_prov">
    <title>Bare Metal Provisioning</title>

    <para>Bare Metal Provisioning can be used for automated deployment of the
    Enea Edge Runtime on a large number of uCPE devices. The uCPE devices may
    have no previous operating system installed, or are reinstalled without
    preserving any existing data. Enea Edge Bare Metal Provisioning is based
    on standardized Pre-Boot Execution environment (PXE) booting.</para>

    <para>The Bare Metal Provisioning process begins by PXE booting an Enea
    Edge installer <literal>initramfs</literal> image. The installer downloads
    a configuration file, as well as the Enea Edge Runtime image and then
    proceeds to install the system by dividing the disk into 2 partitions: a
    GPT partition containing the GRUB boot loader and a second partition
    containing the Enea Edge Runtime root filesystem. When the installation is
    complete, the uCPE device is automatically rebooted into the Enea Edge
    Runtime.</para>

    <note>
      <para>The <literal>.hddimg</literal>, <literal>initramfs</literal>, and
      <literal>bzImage</literal> files are available in the
      <filename>Enea_Edge_Runtime_&lt;processor&gt;_&lt;version&gt;-&lt;build_number&gt;.tar.gz</filename>
      file downloaded with your release.</para>
    </note>

    <section id="bare_meta_prov_prereq">
      <title>Prerequisites</title>

      <itemizedlist>
        <listitem>
          <para>The uCPE devices to be installed are connected in a working
          PXE network boot environment. The PXE server can be set up using any
          Linux distribution that includes TFTP and DHCP software packages.
          Refer to the documentation for your distribution for setup
          instructions.</para>
        </listitem>

        <listitem>
          <para>An HTTP server must be available and accessible from the uCPE
          devices in the provisioning network. Note that the installer will
          use the same interface that the uCPE device is PXE-booted from, to
          obtain an IP address using DHCP and access the HTTP server.</para>
        </listitem>

        <listitem>
          <para>The uCPE devices are preconfigured in BIOS to boot from the
          hard drive where the Enea Edge Runtime will be installed.</para>
        </listitem>

        <listitem>
          <para>CSM support and Dual Boot must be enabled in BIOS (i.e. PXE
          booting in legacy mode), while the final Enea Edge Runtime image
          will boot in UEFI mode. Doing a mass deployment using legacy PXE
          booting usually means that Secure Boot is disabled, Secure Boot will
          need to be enabled afterwards.</para>
        </listitem>

        <listitem>
          <para>A remote management tool such as <literal>ipmitool</literal>
          (or a vendor-specific tool) is available for use to set the next
          boot option to PXE and reboot the uCPE devices in order to begin the
          installation.</para>
        </listitem>
      </itemizedlist>
    </section>

    <section id="bare_meta_prov_server">
      <title>Server Configuration</title>

      <para>The following images provided with your Enea Edge release need to
      be made available on the PXE and HTTP servers:</para>

      <orderedlist>
        <listitem>
          <para>Copy the Enea Edge installer <literal>initramfs</literal>
          image and kernel <literal>bzImage</literal> for your uCPE device
          architecture to the TFTP directory on the PXE server (e.g
          <literal>/var/lib/tftpboot</literal>).</para>
        </listitem>

        <listitem>
          <para>Compress the Enea Edge Runtime <literal>.hddimg</literal>
          image for the uCPE device architecture using <literal>gzip</literal>
          and copy the resulting <literal>hddimg.gz</literal> file to the HTTP
          server.</para>
        </listitem>
      </orderedlist>

      <section id="bare_meta_prov_install_config">
        <title>Installation Configuration File</title>

        <para>An installation configuration file needs to be prepared on the
        HTTP server. The format of the configuration file is a list of
        "<literal>name = value</literal>" pairs and the available parameters
        are described below:</para>

        <itemizedlist>
          <listitem>
            <para><literal>image_url</literal> (mandatory). The HTTP server
            URL used for downloading the Enea Edge Runtime image.</para>
          </listitem>

          <listitem>
            <para><literal>install_drive</literal> (optional). The name of the
            drive where the Enea Edge Runtime will be installed (e.g
            <literal>/dev/sda</literal>). If not set, the installer will use
            the largest detected (non-USB) drive on the uCPE device.</para>
          </listitem>

          <listitem>
            <para><literal>prompt_user</literal> (optional). If the parameter
            is set to "yes", the installer will ask for confirmation before
            formatting and partitioning the drive. The default behavior is to
            proceed automatically without any user interaction.</para>
          </listitem>
        </itemizedlist>

        <para>Installation Configuration File Example:</para>

        <programlisting>image_url = http://192.168.1.100/enea-edge-runtime-xeon-d.hddimg.gz
install_drive = /dev/sda</programlisting>

        <note>
          <para>The installation configuration file needs to use the Linux
          end-of-line format (\n), not the Windows format (\r\n).</para>
        </note>
      </section>

      <section id="bare_meta_prov_pxe">
        <title>PXE Configuration</title>

        <para>A PXE entry for the Enea Edge installation needs to be added as
        the default boot selection in the pxelinux configuration file (e.g
        <literal>/var/lib/tftpboot/pxelinux.cfg/default</literal>). The PXE
        entry should have the following settings:</para>

        <programlisting>default enea_edge_runtime
label enea_edge_runtime
menu label ^ENEA_EDGE_INSTALLER
kernel &lt;Path to kernel&gt;
append root=/dev/ram0 initrd=&lt;Path to initramfs&gt; LABEL=pxe-installer \
   INSTALL_CFG=http://&lt;Server IP&gt;/&lt;Path to install config file&gt; \
   console=ttyS0,115200 earlyprintk=ttyS0,115200
ipappend 2</programlisting>
      </section>
    </section>

    <section id="bare_meta_prov_inst">
      <title>Starting the Installation</title>

      <para>To initiate the installation, set the boot device (for next boot
      only) to PXE and reboot the uCPE devices. How to do this depends on the
      remote management capabilities of the uCPE devices and may require
      vendor-specific tools.</para>

      <para>Example initiation using <literal>ipmitool</literal>:</para>

      <programlisting>ipmitool -U &lt;user&gt; -P &lt;password&gt; -H &lt;uCPE device IPMI IP address&gt; chassis bootdev pxe
ipmitool -U &lt;user&gt; -P &lt;password&gt; -H &lt;uCPE device IPMI IP address&gt; power reset   </programlisting>

      <para>The uCPE devices should be configured in BIOS to boot from the
      installation drive first in order to automatically start Enea Edge
      Runtime when the installation is finished.</para>
    </section>
  </section>

  <section id="uefi_secure_boot">
    <title>UEFI Secure Boot</title>

    <para>Secure Boot was designed to enhance security in the pre-boot
    environment. It prevents malicious software and applications from being
    loaded during the system start-up process.</para>

    <para>The basic principle of UEFI Secure Boot is that it requires all
    artifacts involved in the boot process (bootloaders, kernel, initramfs) to
    be signed using a set of private keys. On a Secure Boot enabled uCPE
    device these artifacts are checked against a set of public certificates
    which correspond to these keys. If there are any mismatches the boot
    process will fail at the stage(s) they are detected.</para>

    <para>For more information about Secure Boot please refer to <ulink
    url="https://www.uefi.org/sites/default/files/resources/UEFI_Secure_Boot_in_Modern_Computer_Security_Solutions_2013.pdf">Secure
    Boot in Modern Computer Security Solutions</ulink>.</para>

    <section id="secure_boot_keys">
      <title>Enabling UEFI Secure Boot</title>

      <para>All Enea Edge image artifacts delivered with the release are
      signed using the Enea UEFI Secure boot private keys. These artifacts can
      be used on a uCPE device that doesn't have Secure Boot enabled. To use
      the Secure Boot feature, however, the user must make the Enea UEFI
      Secure Boot public certificates available on the uCPE device before
      enabling the feature in BIOS. This process is called
      "Provisioning".</para>

      <section id="manual_key_provisioning">
        <title>Provisioning the Enea UEFI Secure Boot Certificates</title>

        <para>The UEFI firmware is normally shipped with factory preloaded
        certificates. If these do not already include Certificates from Enea,
        they will need to be appended or replaced with the Enea
        Certificates.</para>

        <para><emphasis role="bold">UEFI Secure Boot certificates provided
        with your release:</emphasis></para>

        <itemizedlist>
          <listitem>
            <para><literal>Platform Key (PK)</literal>: this key protects the
            next key from uncontrolled modification. Once this key is
            enrolled, Secure Boot enters into <literal>User Mode</literal>.
            The drivers and loaders signed with the <literal>Platform
            Key</literal> can then be loaded by the firmware.</para>
          </listitem>

          <listitem>
            <para><literal>Key Exchange key (KEK)</literal>: this key allows
            other certificates which have a connection to the private portion
            of the <literal>Platform Key</literal> to be used.</para>
          </listitem>

          <listitem>
            <para><literal>Authorized Signature (DB)</literal>: contains the
            <literal>trusted keys</literal> used for authenticating any
            drivers or applications executed in the UEFI environment.</para>
          </listitem>
        </itemizedlist>

        <para>The Enea UEFI Secure Boot certificates are installed together
        with the Enea Edge Runtime onto the hard drive. They can be found on
        the EFI partition (usually the first partition of the drive) under
        <literal>/uefi_sb_keys</literal>.</para>

        <para>These certificates need to be manually enrolled in BIOS. The
        exact details on how to proceed may vary depending the version of the
        UEFI firmware.</para>

        <para>The UEFI firmware is normally shipped with factory preloaded
        certificates. If these do not already include Certificates from Enea,
        they will need to be appended or replaced with the Enea
        Certificates.</para>
      </section>

      <section id="enable_secure_boot">
        <title>Enabling Secure Boot in BIOS</title>

        <para>Once the certificates are enrolled, Secure Boot needs to be
        enabled in BIOS and the device rebooted.</para>
      </section>
    </section>
  </section>

  <section id="hugepage_reservation">
    <title>Hugepage Reservation Service</title>

    <para>Huge page memory support is a mechanism that allows the Linux kernel
    to support larger page sizes than the default page unit (4KB). Huge pages
    are contiguous blocks of memory that come in 2MB or 1GB sizes. By using
    huge page allocations, less memory is used for page tables and the
    Translation Lookaside Buffer (TLB) miss rate is significantly reduced.
    Enea Edge Runtime uses huge page backed memory in KVM guests in order to
    improve performance.</para>

    <para>Enea Edge Runtime implements an automatic hugepage allocation
    service that is triggered at each startup. The service is skipped if
    hugepages have been allocated in the kernel boot command line.</para>

    <para>There are two strategies outlined for hugepage allocation:</para>

    <itemizedlist>
      <listitem>
        <para>If a system has an amount of memory up to 8GB, the allocation
        algorithm will reserve up to 30% (no more than 2GB), for the OS and
        the rest as 2MB hugepages.</para>
      </listitem>

      <listitem>
        <para>If a system has an amount of memory that's higher than 8GB, the
        allocation algorithm will reserve all but 2GB of memory as 1GB
        hugepages, leaving the rest (2GB) to be used by the OS.</para>
      </listitem>
    </itemizedlist>

    <note>
      <para>This is a best effort reservation after kernel boot, so the
      results may vary accordingly.</para>
    </note>

    <section id="hugepage_customizing_auto">
      <title>Customizing Automatic Hugepage Reservation</title>

      <para>Configuration of Hugepage reservation is done in
      <literal>/etc/enea/hugepages.cfg</literal>.</para>

      <para><emphasis role="bold">Parameters used by the automatic algorithm:
      </emphasis></para>

      <itemizedlist spacing="compact">
        <listitem>
          <para><literal>hugepage_setup</literal>: Enables the automatic
          configuration algorithm. It has only one value,
          <literal>auto</literal>. For manual configuration comment or remove
          this parameter. Use the other parameter descriptions as a
          template/example.</para>
        </listitem>

        <listitem>
          <para><literal>threshold_to_use_1g</literal>: Decides the threshold
          which instructs the algorithm to use 1GB hugepages. If a system's
          memory is higher than <literal>threshold_to_use_1g</literal>, then
          the algorithm will use 1GB hugepages, otherwise it will use 2MB
          hugepages.</para>
        </listitem>

        <listitem>
          <para><literal>percent_os_alloc</literal>: Decides how much memory
          to try to reserve for userspace applications. The algorithm will try
          to reserve at least the value of <literal>percent_os_alloc</literal>
          of the total system memory for userspace applications.</para>
        </listitem>

        <listitem>
          <para><literal>maximum_os_alloc_mb</literal>: Maximum amount of
          memory to allocate for userspace applications. If
          <literal>percent_os_alloc</literal> of the total system memory
          exceeds <literal>maximum_os_alloc_mb</literal> then the maximum
          allocated memory for userspace applications is
          <literal>maximum_os_alloc_mb</literal>.</para>
        </listitem>
      </itemizedlist>

      <para><emphasis role="bold">Example of automatic Hugepage
      Configuration:</emphasis></para>

      <programlisting>hugepage_setup = auto
threshold_to_use_1g = 8192
percent_os_alloc = 30
maximum_os_alloc_mb = 2048</programlisting>

      <para>The following possible allocations can result (based on total
      system memory available):</para>

      <itemizedlist>
        <listitem>
          <para>2GB of memory: approximately 30% will be allocated for the OS
          and the rest will be allocated as 2MB hugepages.</para>
        </listitem>

        <listitem>
          <para>4GB of memory: approximately 30% will be allocated for the OS
          and the rest will be allocated as 2MB hugepages.</para>
        </listitem>

        <listitem>
          <para>16GB of memory: approximately 2GB will be allocated for the OS
          and the rest as 1GB hugepages.</para>
        </listitem>
      </itemizedlist>

      <note>
        <para>The memory allocated for the kernel and hugepages might vary
        slightly depending on how much memory is available.</para>
      </note>
    </section>

    <section id="hugepage_customizing_man">
      <title>Customizing Manual Hugepage Reservation</title>

      <para>The automatic algorithm can be disabled and hugepages in turn,
      configured manually. To do this, comment the line which defines
      <literal>hugepage_setup</literal> as <literal>auto</literal> and
      configure memory for each CPU socket in the following manner:</para>

      <programlisting>&lt;NUMA node&gt;.&lt;hugepage size&gt; = &lt;number of pages&gt;</programlisting>

      <para>Where <literal>&lt;NUMA node&gt;</literal> refers to a node which
      is part of the system's NUMA topology, <literal>&lt;hugepage
      size&gt;</literal> decides what type of hugepages should be set and
      <literal>&lt;number of hugepages&gt;</literal> is how many hugepages of
      <literal>&lt;hugepage size&gt;</literal> should be allocated.</para>

      <para>To list the available system nodes, run:</para>

      <programlisting>ls -d /sys/devices/system/node/node* </programlisting>

      <para>To list available hugepage sizes, per node, run:</para>

      <programlisting>ls -d /sys/devices/system/node/node*/hugepages/hugepages-*</programlisting>

      <para>Example of Manual Hugepage Configuration, configuring the system
      to allocate three 1GB hugepages and 512 of 2MB hugepages on node:</para>

      <programlisting>node0.2048kB = 512
node0.1048576kB = 3 </programlisting>

      <note>
        <para>Make sure there are no hugepages reserved in the kernel boot
        command line, these will override any manual configuration done in the
        service.</para>
      </note>
    </section>
  </section>

  <section id="create_certificate">
    <title>Tomcat Certificate Generation</title>

    <para>The self-signed Tomcat Certificate the Enea Edge Management
    application uses is generated at installation time. It can be regenerated
    anytime after installation by using the
    <filename>createCertificate.sh</filename> script.</para>

    <para>On the CentOS 7 server open a terminal, log into a bash shell with
    the root account and perform the following:</para>

    <orderedlist>
      <listitem>
        <para>Extract
        <literal>Enea_Edge_Management_&lt;version&gt;-build&lt;build_number&gt;.tar.gz</literal></para>

        <para>The directory in which the archive has been unpacked will be
        denoted as: <literal>&lt;EEMg-installerdir&gt;</literal>.</para>
      </listitem>

      <listitem>
        <para>Enter <literal>&lt;EEMg-installerdir&gt;/dist</literal>.</para>
      </listitem>

      <listitem>
        <para>Run the following command and change <literal>/opt/ems</literal>
        to the location where the Enea Edge Management application is
        installed:</para>

        <programlisting>./createCertificate.sh EneaEdgeManagement &lt;IP_or_domain&gt; \
            /opt/ems [&lt;service_username&gt;]</programlisting>

        <para>Where the following need to be included:</para>

        <itemizedlist>
          <listitem>
            <para>The IP or domain name of the server the Enea Edge Management
            application is running on.</para>
          </listitem>

          <listitem>
            <para>The service username, which is the one set when installing
            the Enea Edge Management application. For more details see <olink
            targetdoc="book_enea_edge_getting_started"
            targetptr="fresh_ucpemg_install">Fresh Installation of the Enea
            Edge Management application in the <ns:include
            href="../../s_docbuild/olinkdb/pardoc-names.xml"
            xmlns:ns="http://www.w3.org/2001/XInclude"
            xpointer="element(book_enea_edge_getting_started/1)" /></olink>
            Manual. Providing the service username is optional. If it is not
            provided, the default value will be used.</para>
          </listitem>
        </itemizedlist>
      </listitem>

      <listitem>
        <para>Restart the Enea Edge Management service:</para>

        <programlisting>service ucpemanager restart</programlisting>
      </listitem>
    </orderedlist>
  </section>

  <section id="nfvi_backup">
    <title>NFV Infrastructure Backup Operations</title>

    <para>In the following sections are described the operations that can be
    performed in order to backup and restore the NFV Infrastructure (NFVi)
    configuration. This allows for recovery from unintended configuration
    errors.</para>

    <section id="nfvi_snapshot">
      <title>NFVi Snapshotting</title>

      <para>To perform a snapshot (a backup of the current state) of the NFVi,
      perform the following steps in the Enea Edge Management
      application:</para>

      <orderedlist>
        <listitem>
          <para>Select the uCPE device, access the <emphasis
          role="bold">Configuration</emphasis> menu, and select <emphasis
          role="bold">NFVI Backup</emphasis>. The <emphasis role="bold">NFVI
          Config Archive Table</emphasis> will be shown.</para>
        </listitem>

        <listitem>
          <para>Press the <emphasis role="bold">Create</emphasis>
          button.</para>
        </listitem>

        <listitem>
          <para>Edit the default file name, if needed, and press the <emphasis
          role="bold">Create</emphasis> button. The current window will close
          and the success message <emphasis role="bold">Created
          successfully</emphasis> will be shown.</para>
        </listitem>
      </orderedlist>

      <para>A new entry will be added in the <emphasis role="bold">NFVI Config
      Archive Table</emphasis> and the <emphasis
      role="bold">BackupArchiveCreated</emphasis> event can be seen by
      accessing the <emphasis role="bold">Fault</emphasis> menu and triggering
      the <emphasis role="bold">Events</emphasis> window. The backup file will
      be created on the uCPE device.</para>
    </section>

    <section id="restore_nfvi_backup">
      <title>Restoring an NFVi backup</title>

      <para>When needed, an NFVi backup can be restored in the following
      way:</para>

      <orderedlist>
        <listitem>
          <para>Select the uCPE device, access the <emphasis
          role="bold">Configuration</emphasis> menu and select <emphasis
          role="bold">NFVI Backup</emphasis>. The <emphasis role="bold">NFVI
          Config Archive Table</emphasis> will be shown.</para>
        </listitem>

        <listitem>
          <para>Select an existing backup from the NFVI Config Archive
          Table.</para>
        </listitem>

        <listitem>
          <para>Press the <emphasis role="bold">Restore</emphasis> button and
          <emphasis role="bold">Confirm</emphasis> the action.</para>
        </listitem>
      </orderedlist>

      <para>A success message will be shown in the same window: <emphasis
      role="bold">Restored NFVI configuration</emphasis>. Two events will
      appear in the <emphasis role="bold">Events</emphasis> window (accessible
      through the <emphasis role="bold">Fault</emphasis> menu): <emphasis
      role="bold">Restore started</emphasis> and <emphasis role="bold">Restore
      complete</emphasis>.</para>

      <note>
        <para>When restoring an NFVi backup, connectivity with the uCPE device
        may be lost for a period of time. After a successful restoration the
        connection with the device will be restored. Any saved custom scripts
        of type <filename>once-before-startup</filename> or
        <filename>once-after-startup</filename> will be re-executed as part of
        the NFVi restore process.</para>
      </note>

      <para>When restoring an NFVi backup all VNF instances will be removed.
      Instances that were present at the moment the backup was taken will be
      restored to their initial state, as if newly created. All snapshots
      associated with previously instantiated VNFs will be deleted.</para>

      <note>
        <para>If the VNF descriptor associated with the VNFs that were saved
        as part of the backup is deleted after the backup is performed, then a
        backup restore will not be able to recreate those VNFs.</para>
      </note>
    </section>

    <section id="delete_nfvi_backup">
      <title>Deleting an NFVi backup</title>

      <para>Deleting an older NFVi backup can be performed in the following
      way:</para>

      <orderedlist>
        <listitem>
          <para>Select the uCPE device, access the <emphasis
          role="bold">Configuration</emphasis> menu, and select <emphasis
          role="bold">NFVI Backup</emphasis>. The <emphasis role="bold">NFVI
          Config Archive Table</emphasis> will be shown.</para>
        </listitem>

        <listitem>
          <para>Select an existing backup from the NFVI Config Archive
          Table.</para>
        </listitem>

        <listitem>
          <para>Press the <emphasis role="bold">Delete</emphasis>
          button.</para>
        </listitem>
      </orderedlist>

      <para>A success message will be shown in the same window: <emphasis
      role="bold">NFVI configuration backup deleted</emphasis>. The backup
      file will be deleted from the uCPE device and the table will be
      updated.</para>
    </section>
  </section>

  <section id="mgmt_db_backup">
    <title>Database backup for the Enea Edge Management application</title>

    <para>The following handles advanced configuration for a database backup.
    A backup can be used to restore the database in case of corruption.</para>

    <section id="db_backup_once">
      <title>One-time backup of the database</title>

      <para>The following steps are used to perform a database backup from the
      Enea Edge Management application:</para>

      <orderedlist>
        <listitem>
          <para>In the <emphasis role="bold">System Backup</emphasis> window,
          select the <emphasis role="bold">Backups</emphasis> tab.</para>
        </listitem>

        <listitem>
          <para>Press the <emphasis role="bold">Backup Now</emphasis> button
          in the <emphasis role="bold">Backups</emphasis> tab.</para>
        </listitem>

        <listitem>
          <para>In the new window select the <emphasis role="bold">Database
          Export</emphasis> option from the <emphasis role="bold">Backup
          Type</emphasis> drop-down.</para>
        </listitem>

        <listitem>
          <para>Set the <emphasis role="bold">Execution</emphasis> option to
          <emphasis role="bold">Now</emphasis>.</para>
        </listitem>

        <listitem>
          <para>Press the <emphasis role="bold">Execute</emphasis> button and
          wait until the Status message shows: <emphasis role="bold">System
          Backup Completed</emphasis>.</para>
        </listitem>
      </orderedlist>

      <para>Once the backup is successfully executed it will be visible in the
      <emphasis role="bold">System Backup</emphasis> window with a name that
      respects the following format:
      <literal>DatabaseExport_[date]_[timestamp].zip</literal>.</para>
    </section>

    <section id="db_backup_restore">
      <title>Restoring the database from a backup</title>

      <para>The following steps are used to perform a database restore from
      the Enea Edge Management application:</para>

      <orderedlist>
        <listitem>
          <para>In the <emphasis role="bold">System Backup</emphasis> window,
          select the <emphasis role="bold">Backups</emphasis> tab.</para>
        </listitem>

        <listitem>
          <para>Select the backup you want to restore from the list.</para>

          <note>
            <para>Database backups follow the naming convention:
            <literal>DatabaseExport_[date]_[timestamp].zip</literal>.</para>
          </note>
        </listitem>

        <listitem>
          <para>Click the <emphasis role="bold">Restore</emphasis>
          button.</para>
        </listitem>

        <listitem>
          <para>In the window that appears, click <emphasis
          role="bold">OK</emphasis> to proceed with the restore
          operation.</para>
        </listitem>

        <listitem>
          <para>Follow the instructions in the pop-up window to restart the
          server using the CentOS 7 command:</para>

          <programlisting>sudo systemctl restart ucpemanager.service</programlisting>
        </listitem>
      </orderedlist>
    </section>

    <section id="db_scheduled_backup">
      <title>Scheduled backup of the database</title>

      <para>The following steps are used to perform a scheduled database
      backup from the Enea Edge Management application:</para>

      <orderedlist>
        <listitem>
          <para>In the <emphasis role="bold">System Backup</emphasis> window,
          select the <emphasis role="bold">Backups</emphasis> tab.</para>
        </listitem>

        <listitem>
          <para>Press the <emphasis role="bold">Backup Now</emphasis> button
          in the <emphasis role="bold">Backups</emphasis> tab.</para>
        </listitem>

        <listitem>
          <para>In the new window select the <emphasis role="bold">Database
          Export</emphasis> option from the <emphasis role="bold">Backup
          Type</emphasis> drop-down.</para>
        </listitem>

        <listitem>
          <para>Set the <emphasis role="bold">Execution</emphasis> option to
          <emphasis role="bold">Scheduled</emphasis>.</para>
        </listitem>

        <listitem>
          <para>Fill in the <emphasis role="bold">Description</emphasis>
          field.</para>
        </listitem>

        <listitem>
          <para>Set the <emphasis role="bold">Begin Date</emphasis>.</para>
        </listitem>

        <listitem>
          <para>Fill in the non-mandatory fields accordingly.</para>

          <note>
            <para>The <emphasis role="bold">End Date</emphasis> field becomes
            available once the <emphasis role="bold">Periodicity</emphasis>
            option is set to something other than <emphasis
            role="bold">Once</emphasis>.</para>
          </note>
        </listitem>

        <listitem>
          <para>Click the <emphasis role="bold">Schedule</emphasis> button to
          schedule the backup.</para>
        </listitem>
      </orderedlist>

      <figure>
        <title>Scheduled Backup Configuration Example</title>

        <mediaobject>
          <imageobject>
            <imagedata align="center" contentwidth="615"
                       fileref="images/scheduled_backup.png" />
          </imageobject>
        </mediaobject>
      </figure>

      <para>The backups created will be visible in the <emphasis
      role="bold">System Backup</emphasis> window with a name following the
      format: <literal>DatabaseExport_[date]_[timestamp].zip</literal>.</para>
    </section>
  </section>

  <section condition="hidden" id="high_availability_ig">
    <title>Installing the Enea Edge Management application in High
    Availability Mode</title>

    <para>The following describes the setup needed for running the Enea Edge
    Management application in High Availability (HA) mode, with a MariaDB
    database cluster. A setup example is illustrated in the diagram
    below.</para>

    <figure>
      <title>The High Availability setup</title>

      <mediaobject>
        <imageobject>
          <imagedata align="center" contentwidth="600"
                     fileref="images/high_av_setup.png" />
        </imageobject>
      </mediaobject>
    </figure>

    <para>The setup consists of two sub-clusters, placed in different
    geographical locations, each consisting of three MariaDB servers. In this
    example, two of the machines are running only MariaDB, while the rest are
    hosting a MariaDB server and an Enea Edge Management instance. The entire
    network is hidden behind a firewall that performs NAT. While in protection
    (cluster) mode, the Enea Edge Management application instances promote a
    single Virtual IP (VIP). The cluster can be reached either from the
    primary server IP or via VIP (recommended), since the VIP interface is
    spawned on the primary server. In case of a primary failure, a newly
    elected primary (from one of the backups) will re-spawn the VIP interface.
    The firewall in this scenario will allocate a public IP for the outside
    world that will be translated into the VIP. Also in this example, the
    traffic towards the cluster from outside the firewall is done through
    HTTPS, which will be translated to HTTP inside the private network.</para>

    <note>
      <para>By default, communication to the Enea Edge Management application
      is done with HTTPS. To enable HTTP, go to <emphasis
      role="bold">System</emphasis>, <emphasis
      role="bold">Configuration</emphasis>, click on the <emphasis
      role="bold">Web Access</emphasis> tab and check the <emphasis
      role="bold">Enable HTTP</emphasis> checkbox.</para>
    </note>

    <para><emphasis role="bold">High-Availability - Design
    Limitations</emphasis></para>

    <para>Enea's solution for Edge Management High-Availability is designed to
    maximize resiliency of the management function. However, the current
    design does not provide guaranteed consistency across all Enea Edge
    Management instances, i.e. some information might become out-of-sync in
    certain conditions.</para>

    <para>In order to reach full consistency, design would rely on
    transactional data updates across the Primary instance and all Backups,
    but the latency would significantly increase for all management
    operations, which is considered to be a severe limitation.</para>

    <para>In the current version of the Enea Edge Management application when
    in High Availability mode, there are two kinds of data that needs to be
    synchronized: data in the database and data residing in the Primary
    instance filesystem. This means that there are intervals when data is
    being synchronized from the Primary instance to the Backup(s) and Primary
    failure at this time will result in incomplete/inconsistent data on the
    Backup(s). For example, a VNF which has been onboarded on the Primary
    instance will not be visible in a Backup instance(s) until both database
    and filesystem have been synchronized. If a Backup instance becomes the
    active one before synchronization takes place, this would result in an
    out-of-sync state.</para>

    <section id="ha_reqs">
      <title>Requirements for High Availability</title>

      <para>The following hardware is needed for deploying the base
      configuration:</para>

      <itemizedlist>
        <listitem>
          <para>Machines running the Enea Edge Management application and
          MariaDB:</para>

          <itemizedlist spacing="compact">
            <listitem>
              <para>4 CPU cores</para>
            </listitem>

            <listitem>
              <para>12 - 16 GB memory</para>
            </listitem>

            <listitem>
              <para>256 - 512 GB hard disk</para>
            </listitem>
          </itemizedlist>
        </listitem>

        <listitem>
          <para>Machines running only MariaDB:</para>

          <itemizedlist spacing="compact">
            <listitem>
              <para>2 CPU cores</para>
            </listitem>

            <listitem>
              <para>8 GB memory</para>
            </listitem>

            <listitem>
              <para>256 - 512 GB hard disk</para>
            </listitem>
          </itemizedlist>
        </listitem>
      </itemizedlist>
    </section>

    <section id="firewall_rules">
      <title>Firewall Rules</title>

      <para>Please refer to <olink targetdoc="book_enea_edge_getting_started"
      targetptr="firewall_config">Firewall Configuration in the <xi:include
      href="../../s_docbuild/olinkdb/pardoc-names.xml"
      xmlns:xi="http://www.w3.org/2001/XInclude"
      xpointer="element(book_enea_edge_getting_started/1)" /></olink> Manual.
      In addition, the following configuration is needed:</para>

      <orderedlist>
        <listitem>
          <para>Disable <literal>SELINUX</literal> on all database servers by
          editing <literal>/etc/sysconfig/selinux</literal> and changing the
          following:</para>

          <programlisting>SELINUX=disabled
SELINUXTYPE=targeted</programlisting>
        </listitem>

        <listitem>
          <para>Reboot the server:</para>

          <programlisting>[root@localhost ~]# sudo shutdown -r now</programlisting>
        </listitem>
      </orderedlist>

      <para>The following ports should be opened in the local firewall (not
      the one doing NAT), for each Enea Edge Management machine:</para>

      <table>
        <title>Ports for Enea Edge Management Machines</title>

        <tgroup cols="2">
          <colspec align="left" />

          <thead>
            <row>
              <entry align="center">Port (Protocol)</entry>

              <entry align="center">Usage</entry>
            </row>
          </thead>

          <tbody>
            <row>
              <entry>80 (TCP)</entry>

              <entry>HTTP</entry>
            </row>

            <row>
              <entry>443 (TCP)</entry>

              <entry>HTTPS</entry>
            </row>

            <row>
              <entry>54327 (UDP)</entry>

              <entry>Cluster multicasting (Hazelcast)</entry>
            </row>

            <row>
              <entry>5701 - 5708 (TCP)</entry>

              <entry>Hazelcast communications</entry>
            </row>

            <row>
              <entry>4334 (TCP)</entry>

              <entry>NETCONF call-home</entry>
            </row>

            <row>
              <entry>7000 - 7009 (TCP)</entry>

              <entry>Reverse SSH connection pool</entry>
            </row>
          </tbody>
        </tgroup>
      </table>

      <para>For MariaDB, the following ports must be opened (on each MariaDB
      server, including the ones hosting the Enea Edge Management
      application):</para>

      <table>
        <title>Ports for MariaDB Machines</title>

        <tgroup cols="2">
          <colspec align="left" />

          <thead>
            <row>
              <entry align="center">Port (Protocol)</entry>

              <entry align="center">Usage</entry>
            </row>
          </thead>

          <tbody>
            <row>
              <entry>3306 (TCP)</entry>

              <entry>Client connections</entry>
            </row>

            <row>
              <entry>4567 (UDP/TCP)</entry>

              <entry>Galera cluster replication with multicasting</entry>
            </row>

            <row>
              <entry>4568 (TCP)</entry>

              <entry>Incremental state transfer</entry>
            </row>

            <row>
              <entry>4444 (TCP)</entry>

              <entry>State snapshot transfer</entry>
            </row>
          </tbody>
        </tgroup>
      </table>

      <para>The following ports should be accessible externally and translated
      to the Virtual IP side as shown below:</para>

      <table>
        <title>Ports for Virtual IP</title>

        <tgroup cols="3">
          <colspec align="left" />

          <thead>
            <row>
              <entry align="center">External Port (Protocol)</entry>

              <entry align="center">Usage</entry>

              <entry align="center">Local Port (Protocol)</entry>
            </row>
          </thead>

          <tbody>
            <row>
              <entry>443 (TCP)</entry>

              <entry>HTTPS to/back HTTP</entry>

              <entry>80 (TCP)</entry>
            </row>

            <row>
              <entry>4334 (TCP)</entry>

              <entry>NETCONF call-home</entry>

              <entry>4334 (TCP)</entry>
            </row>

            <row>
              <entry>7000 - 7009 (TCP)</entry>

              <entry>Reverse SSH connection pool</entry>

              <entry>7000 - 7009 (TCP)</entry>
            </row>
          </tbody>
        </tgroup>
      </table>
    </section>

    <section id="ha_installation">
      <title>Installing High Availability</title>

      <para>The Enea Edge Management application can be installed in High
      Availability mode with a MariaDB database cluster by performing the
      following steps. The mandatory Java configuration is also
      detailed.</para>

      <section id="ha_mariadb">
        <title>Installing and configuring the MariaDB cluster</title>

        <para>Install the latest MariaDB packages on all servers.</para>

        <note>
          <para>The setup was tested using MariaDB 10.5.8, built for CentOS
          7.</para>
        </note>

        <para><emphasis role="bold">How to install MariaDB</emphasis></para>

        <orderedlist>
          <listitem>
            <para>Install the MariaDB official yum repository. Create a file
            named <literal>mariadb.repo</literal> in
            <literal>/etc/yum.repos.d/</literal> with the following
            content:</para>

            <programlisting># MariaDB 10.5 CentOS repository list - created 2021-02-16 08:46 UTC
# http://downloads.mariadb.org/mariadb/repositories/
[mariadb]
name = MariaDB
baseurl = http://yum.mariadb.org/10.5/centos7-amd64
gpgkey=https://yum.mariadb.org/RPM-GPG-KEY-MariaDB
gpgcheck=1
</programlisting>

            <note>
              <para>For more information about setting the repository, consult
              <ulink
              url="https://downloads.mariadb.org/mariadb/repositories/#mirror=nxtHost">Downloads
              - Setting up MariaDB Repositories</ulink>.</para>
            </note>
          </listitem>

          <listitem>
            <para>Make sure the following packages are installed:</para>

            <programlisting>MariaDB-compat-10.5.8-1.el7.centos.x86_64
MariaDB-common-10.5.8-1.el7.centos.x86_64
MariaDB-server-10.5.8-1.el7.centos.x86_64
MariaDB-client-10.5.8-1.el7.centos.x86_64
galera-4-26.4.6-1.el7.centos.x86_64</programlisting>

            <para>These provide the MariaDB server, client and the Galera
            <literal>wsrep</literal> provider library.</para>
          </listitem>

          <listitem>
            <para>Copy the <literal>wsrep</literal> template:</para>

            <programlisting>[root@localhost ~]# cp /usr/share/mysql/wsrep.cnf /etc/my.cnf.d
            </programlisting>
          </listitem>

          <listitem>
            <para>Change the following configuration in
            <filename>/etc/my.cnf.d/wsrep.cnf</filename>:</para>

            <programlisting># Full path to wsrep provider library or 'none'
<emphasis role="bold">wsrep_provider=/usr/lib64/galera-4/libgalera_smm.so</emphasis>

# Provider specific configuration options
#wsrep_provider_options=

# Logical cluster name. Should be the same for all nodes.
<emphasis role="bold">wsrep_cluster_name="EneaEdgeManagement"</emphasis>

# Group communication system handle
<emphasis role="bold">wsrep_cluster_address="gcomm://192.168.10.11,192.168.10.12,..,192.168.10.16"</emphasis>

# Human-readable node name (non-unique). Hostname by default.
<emphasis role="bold">wsrep_node_name=Node1</emphasis> # current node's name. set node name for each server in \
the cluster

# Base replication &lt;address|hostname&gt;[:port] of the node.
# The values supplied will be used as defaults for state transfer receiving,
# listening ports and so on. Default: address of the first network interface.\
<emphasis role="bold">wsrep_node_address=192.168.10.11</emphasis> 
#current node's interface IP . must be set for each node in the cluster</programlisting>

            <note>
              <para>Steps 2, 3 and 4 must be performed for each MariaDB node
              in the cluster.</para>
            </note>
          </listitem>

          <listitem>
            <para>Bootstrap the first node in the cluster (referred to as
            <literal>Node1</literal> in this example), by running:</para>

            <programlisting>[root@localhost ~]# galera_new_cluster</programlisting>

            <para>This script passes the
            <literal>--wsrep-new-cluster</literal> to
            <literal>mysqld</literal> which tells the node that there is no
            pre-existing cluster to connect to. The node will create a new
            UUID to identify the new cluster.</para>

            <note>
              <para>Do not execute this script when connecting to an existing
              cluster. It will create a new UUID to identify the cluster
              again, and the node won't reconnect to the old cluster.</para>
            </note>
          </listitem>

          <listitem>
            <para>Go to <literal>Node1</literal> and start the service:</para>

            <programlisting>[root@localhost ~]# systemctl start mariadb</programlisting>

            <para>Subsequently, start the service on the other servers.</para>
          </listitem>

          <listitem>
            <para>Verify that the nodes have entered the cluster:</para>

            <programlisting>[root@localhost ~]# mysql --host=localhost --user=root -p
MariaDB [(none)]&gt; show status like 'wsrep_cluster_conf_%';
+-----------------------+-------+
| Variable_name         | Value |
+-----------------------+-------+
| wsrep_cluster_conf_id | 3     |
+-----------------------+-------+
1 row in set (0.001 sec)</programlisting>
          </listitem>

          <listitem>
            <para>Run the initial configuration script (only once, on one of
            the machines in the cluster):</para>

            <programlisting>[root@localhost ~]# mysql_secure_installation

Switch to unix_socket authentication [Y/n] <emphasis role="bold">Y</emphasis>
Enabled successfully!
Reloading privilege tables..
 ... Success!
…
Change the root password? [Y/n] <emphasis role="bold">Y</emphasis>
New password: 
Re-enter new password: 
Password updated successfully!
Reloading privilege tables..
 ... Success!
…
Remove anonymous users? [Y/n] <emphasis role="bold">Y</emphasis>
 ... Success!
…
Disallow root login remotely? [Y/n] <emphasis role="bold">Y</emphasis>
 ... Success!
…
Remove test database and access to it? [Y/n] <emphasis role="bold">Y (optional)</emphasis>
 - Dropping test database...
 ... Success!
 - Removing privileges on test database...
 ... Success!
Reload privilege tables now? [Y/n] <emphasis role="bold">Y</emphasis>
 ... Success!

Cleaning up...

All done!  If you've completed all of the above steps, your MariaDB
installation should now be secure.

Thanks for using MariaDB!</programlisting>
          </listitem>

          <listitem>
            <para>Create the initial database and grant access to it:</para>

            <programlisting>[root@localhost application]# mysql --host=localhost --user=root -p
MariaDB [(none)]&gt; CREATE DATABASE EneaEdgeManagement CHARACTER SET='utf8' \
COLLATE='utf8_bin';
Query OK, 1 row affected (0.004 sec)

MariaDB [(none)]&gt; GRANT ALL PRIVILEGES ON EneaEdgeManagement.* \
TO 'enea'@'%' IDENTIFIED BY 'somepassword' WITH GRANT OPTION;</programlisting>
          </listitem>
        </orderedlist>
      </section>

      <section id="ha_java_sdk_install">
        <title>Installing the Java SDK</title>

        <para>Please refer to <olink
        targetdoc="book_enea_edge_getting_started"
        targetptr="openjdk_postgresql_config">Configuring OpenJDK and
        PostgreSQL in the <xi:include
        href="../../s_docbuild/olinkdb/pardoc-names.xml"
        xmlns:xi="http://www.w3.org/2001/XInclude"
        xpointer="element(book_enea_edge_getting_started/1)" /></olink>
        Manual, for details on how to install and configure Java
        OpenJDK.</para>

        <para>Make sure the <literal>JAVA_HOME</literal> variable points to
        the OpenJDK 11 installation:</para>

        <programlisting>[root@localhost ~]# echo $JAVA_HOME
/usr/lib/jvm/java-11-openjdk-11.0.10.0.9-0.el7_9.x86_64
</programlisting>
      </section>

      <section id="ha_ucpe_mg">
        <title>Installing the Enea Edge Management application in High
        Availability mode</title>

        <para>These steps must be taken on each of the CentOS 7 machines that
        will host the Enea Edge Management application. It will be installed
        and the setup for the external database cluster will be
        prepared.</para>

        <para>As the root user, go to the distribution folder of the Enea Edge
        Management application, and run:</para>

        <programlisting>[root@localhost distro]#./install.sh /opt/ \
Enea_Edge_Management_&lt;version&gt;-build&lt;build_number&gt;.tar.gz
This will install the Enea Edge Management application into the /opt/ucpemanager folder.
Select the following options, while asked by the installation script:
Are you using the embedded PostgreSQL database? [Y/N]: <emphasis role="bold">N</emphasis>
External database selected, getting user information ...
Press 1 for PostgreSQL, 2 for MariaDB, 3 for SQL Server, 4 for Oracle and 5 \
for MySQL: <emphasis role="bold">2</emphasis>
Specify database server name(s) or IP Address(es): \
<emphasis role="bold">192.168.10.11,192.168.10.12,…,192.168.10.16 *(see note)</emphasis>
Specify database ID (or name) [ucpemanager]: 
Specify database server port [3306]: 
Specify database user name [root]: enea
Specify database password [root]: somepassword
Specify database startup thread pool size [1]: 
Creating database configuration file \
/opt/ucpemanager/application/config/databaseConfig.xml ...
Done .
…
Installing ucpemanager service ..
Specify service username [EneaEdgeManagement]: 
Specify service password [EneaEdgeManagement]: somepassword
…
Specify the IP address of the local interface: <emphasis role="bold">192.168.10.11</emphasis>
Is this server part of a cluster? [Y/N]: <emphasis role="bold">Y</emphasis>
Specify the name of the cluster [EneaEdgeManagement]: 
Specify the shared (virtual) cluster IP address: <emphasis role="bold">192.168.10.10</emphasis>
Specify the netmask for the cluster IP address [255.255.255.0]: 
Specify the IP address of the (optional) arbiter: <emphasis role="bold">192.168.10.13</emphasis>
Specify the port associated with the (optional) arbiter [3306]: 
HA Configuration files modified successfully.
Configuration complete.</programlisting>

        <note>
          <para>For each Enea Edge Management installation, place the local
          interface IP first in the list of IPs. This will optimize database
          communication, since the Enea Edge Management application uses the
          list of IPs sequentially, therefore using the internal loopback
          interface for communicating with the database.</para>
        </note>

        <para>The arbiter IP and port are used to mitigate split brain
        situations. Providing an empty arbiter IP implies that no arbiter
        checking is enabled - in other words, it implies that arbiter
        connectivity is not checked. It is highly recommended to introduce an
        arbiter to help with split brain scenarios. For instance, one of the
        MariaDB cluster nodes can be used, with the default port
        <literal>3306</literal> (the MariaDB listening port).</para>

        <para>Once the servers are up and running, log into the <emphasis
        role="bold">Primary</emphasis> and go to <emphasis
        role="bold">System</emphasis> and select <emphasis role="bold">Cluster
        View</emphasis>. The list of Enea Edge Management servers should be
        displayed, with one listed as Primary and the rest as Backup.</para>

        <note>
          <para>To avoid split-brain (network-partitioning) scenarios, it is
          possible for the user to specify an IP:port tuple for a system that
          acts as an Arbiter. In case both Primary and Backup servers cannot
          reach the Arbiter, there will be no Primary system to connect to and
          the management services will be unavailable.</para>

          <para>This can be manually remedied by either fixing the network
          situation that caused the partitioning or modifying Arbiter
          information in the configuration file on the server that the user
          decides should be the (one and only) Primary server.</para>
        </note>

        <para><remark>ELCCR-907</remark>At times the VIP address is not
        acquired by any of the backup nodes if a power failure happens on
        primary node in a High Availability setup. To recover, follow the
        official documentation available at <ulink
        url="https://mariadb.com/kb/en/galera-cluster-recovery/">Galera
        Cluster Recovery</ulink>.</para>

        <para>To change the configuration at a later point after the
        installation, either uninstall and then reinstall the product, or
        contact Enea Support.</para>
      </section>
    </section>

    <section id="ha_upgrade">
      <title>Upgrading a High Availability Deployment</title>

      <para>Upgrading a High Availability deployment is a highly complex,
      multi-step process that requires care to ensure both consistency and
      high-availability. Some steps need to be done manually.</para>

      <orderedlist>
        <listitem>
          <para>We start with the assumption that
          <literal>EneaEdgeManagement-1 </literal>is the "PRIMARY"
          server.</para>
        </listitem>

        <listitem>
          <para>Shut down database services on one side of the network, for
          example: MariaDB-4, MariaDB-5 and MariaDB-6.</para>
        </listitem>

        <listitem>
          <para>Disconnect the network interfaces towards the VPN for machines
          MariaDB-4, MariaDB-5 and MariaDB-6. This will prevent any attempts
          at failover/synchronization.</para>
        </listitem>

        <listitem>
          <para>Run the upgrade process on
          <literal>EneaEdgeManagement-3</literal> and
          <literal>EneaEdgeManagement-4</literal>. This will upgrade the
          service to the current release. Once the upgrade process completes,
          shutdown the Enea Edge Management service on both machines.</para>
        </listitem>

        <listitem>
          <para>Disconnect the <literal>EneaEdgeManagement-2</literal> machine
          from the network (which will take MariaDB-2 offline as well). At
          this point, only the "PRIMARY" server is running, this is the start
          of the interval when we are susceptible to single-server
          failure.</para>
        </listitem>

        <listitem>
          <para>Shutdown the MariaDB-2 process and run the Enea Edge
          Management upgrade process on
          <literal>EneaEdgeManagement-2</literal>. This will upgrade the
          service to the current release. Once the upgrade process completes,
          shutdown the Enea Edge Management service on the machine.</para>
        </listitem>

        <listitem>
          <para>Reconnect the network interfaces towards the VPN for MariaDB-4
          (<literal>EneaEdgeManagement-3</literal>), MariaDB-5
          (<literal>EneaEdgeManagement-4</literal>) and MariaDB-6
          (<literal>EneaEdgeManagement-2</literal>). Restart database services
          on MariaDB-2, MariaDB-4, MariaDB-5 and MariaDB-6. This will allow
          database services on all machines to synchronize, any data that has
          been modified during the upgrade process will be made
          consistent.</para>
        </listitem>

        <listitem>
          <para>Shutdown the "Primary" server
          (<literal>EneaEdgeManagement-1</literal>). At this point, the
          service is no longer available.</para>
        </listitem>

        <listitem>
          <para>Start the Enea Edge Management service on
          <literal>EneaEdgeManagement-2</literal>. This machine will come up
          as the new "PRIMARY" with the upgraded software. As part of the
          startup process, it will upgrade the database and perform any other
          upgrade-related functionality.</para>
        </listitem>

        <listitem>
          <para>At this point (once startup completes), service is available.
          However, the system is still susceptible to single-server
          failure.</para>
        </listitem>

        <listitem>
          <para>Start the Enea Edge Management services on
          <literal>EneaEdgeManagement-3</literal> and
          <literal>EneaEdgeManagement-4</literal>. At this point, they will
          operate in highly-available mode.</para>
        </listitem>

        <listitem>
          <para>Upgrade the Enea Edge Management application on
          <literal>EneaEdgeManagement-1</literal> (the one that has been shut
          down). Once that upgrade is complete and the service restarts, the
          entire setup has been upgraded to the new version.</para>
        </listitem>
      </orderedlist>
    </section>
  </section>
</chapter>