From 380e975b1b93e83705c8ed30197b1c23f8193814 Mon Sep 17 00:00:00 2001 From: Miruna Paun Date: Mon, 25 Sep 2017 13:57:48 +0200 Subject: Create new version of NFV Core 1.0 Installation Guide USERDOCAP-240 Signed-off-by: Miruna Paun --- Makefile | 6 +- README | 21 + .../doc/about_release.xml | 31 +- book-enea-nfv-core-installation-guide/doc/book.xml | 11 +- .../doc/hardware_requirements.xml | 77 - .../doc/help_hw_req.xml | 37 - .../doc/high_availability.xml | 794 ++++++++ .../doc/hw_features.xml | 13 + .../doc/images/DNS_Hostname.png | Bin 0 -> 17676 bytes .../doc/images/DNS_Hostname.svg | 373 ++++ .../doc/images/additional_services.png | Bin 0 -> 40675 bytes .../doc/images/additional_services.svg | 776 ++++++++ .../doc/images/allocate_nodes.png | Bin 0 -> 32846 bytes .../doc/images/allocate_nodes.svg | 639 ++++++ .../doc/images/assign_roles.png | Bin 0 -> 75425 bytes .../doc/images/assign_roles.svg | 3 + .../doc/images/compute_kvm.png | Bin 0 -> 63466 bytes .../doc/images/compute_kvm.svg | 1176 +++++++++++ .../doc/images/config_nodes.png | Bin 0 -> 73785 bytes .../doc/images/config_nodes.svg | 3 + .../doc/images/config_nodes_2.png | Bin 0 -> 78067 bytes .../doc/images/config_nodes_2.svg | 3 + .../doc/images/dr_fault_mg.png | Bin 0 -> 119096 bytes .../doc/images/dr_fault_mg.svg | 3 + .../doc/images/dr_fault_mg_2.png | Bin 0 -> 91761 bytes .../doc/images/dr_fault_mg_2.svg | 3 + .../doc/images/features_groups.png | Bin 0 -> 41533 bytes .../doc/images/features_groups.svg | 791 ++++++++ .../doc/images/fuel_install_static_ip.png | Bin 0 -> 315240 bytes .../doc/images/fuel_install_static_ip.svg | 3 + .../doc/images/fuel_user.png | Bin 0 -> 18465 bytes .../doc/images/fuel_user.svg | 386 ++++ .../doc/images/functional_blocks.png | Bin 0 -> 176800 bytes .../doc/images/functional_blocks.svg | 3 + .../doc/images/general_settings.png | Bin 0 -> 115081 bytes .../doc/images/general_settings.svg | 2081 +++++++++++++++++++ .../doc/images/hugepages.png | Bin 0 -> 131041 bytes .../doc/images/hugepages.svg | 3 + .../doc/images/net_setup_1.png | Bin 0 -> 19800 bytes .../doc/images/net_setup_1.svg | 410 ++++ .../doc/images/net_setup_2.png | Bin 0 -> 19721 bytes .../doc/images/net_setup_2.svg | 408 ++++ .../doc/images/networks-tab.png | Bin 0 -> 115308 bytes .../doc/images/networks_tab.svg | 2085 ++++++++++++++++++++ .../doc/images/neutron_L3.png | Bin 0 -> 83588 bytes .../doc/images/neutron_L3.svg | 1529 ++++++++++++++ .../doc/images/neutron_vlan.png | Bin 0 -> 64831 bytes .../doc/images/neutron_vlan.svg | 1200 +++++++++++ .../doc/images/newton_debian.png | Bin 0 -> 51821 bytes .../doc/images/newton_debian.svg | 972 +++++++++ .../doc/images/nodes.png | Bin 0 -> 11591 bytes .../doc/images/nodes.svg | 266 +++ .../doc/images/openstack_services.png | Bin 0 -> 92276 bytes .../doc/images/openstack_services.svg | 1681 ++++++++++++++++ .../doc/images/other.png | Bin 0 -> 78056 bytes .../doc/images/other.svg | 1432 ++++++++++++++ .../doc/images/other_networks.png | Bin 0 -> 101959 bytes .../doc/images/other_networks.svg | 1851 +++++++++++++++++ .../doc/images/storage_backends.png | Bin 0 -> 44247 bytes .../doc/images/storage_backends.svg | 839 ++++++++ .../doc/images/time_sync.png | Bin 0 -> 20531 bytes .../doc/images/time_sync.svg | 423 ++++ .../doc/installation_deployment.xml | 869 -------- .../doc/installation_health_check.xml | 27 - .../doc/installation_instructions.xml | 1259 ++++++++++++ .../doc/post_deploy_config.xml | 117 ++ .../doc/preface.xml | 130 -- .../doc/reference_index.xml-NOTES | 121 -- .../doc/tor_config_req.xml | 28 - .../doc/use_cases_per_target_node.xml | 2 +- book-enea-nfv-core-release-info/doc/book.xml | 5 +- .../doc/build_boot_generated.xml | 55 + .../doc/build_boot_template.xml | 34 + .../doc/machine_list_generated.xml | 8 + .../doc/pkgdiff_generated.xml | 0 gen_known_issues.py | 2 +- init.mk | 49 +- initbuildboot.sh | 115 ++ manifest_conf.mk | 8 + 79 files changed, 21841 insertions(+), 1320 deletions(-) create mode 100644 README delete mode 100644 book-enea-nfv-core-installation-guide/doc/hardware_requirements.xml delete mode 100644 book-enea-nfv-core-installation-guide/doc/help_hw_req.xml create mode 100644 book-enea-nfv-core-installation-guide/doc/high_availability.xml create mode 100644 book-enea-nfv-core-installation-guide/doc/hw_features.xml create mode 100644 book-enea-nfv-core-installation-guide/doc/images/DNS_Hostname.png create mode 100644 book-enea-nfv-core-installation-guide/doc/images/DNS_Hostname.svg create mode 100644 book-enea-nfv-core-installation-guide/doc/images/additional_services.png create mode 100644 book-enea-nfv-core-installation-guide/doc/images/additional_services.svg create mode 100644 book-enea-nfv-core-installation-guide/doc/images/allocate_nodes.png create mode 100644 book-enea-nfv-core-installation-guide/doc/images/allocate_nodes.svg create mode 100644 book-enea-nfv-core-installation-guide/doc/images/assign_roles.png create mode 100644 book-enea-nfv-core-installation-guide/doc/images/assign_roles.svg create mode 100644 book-enea-nfv-core-installation-guide/doc/images/compute_kvm.png create mode 100644 book-enea-nfv-core-installation-guide/doc/images/compute_kvm.svg create mode 100644 book-enea-nfv-core-installation-guide/doc/images/config_nodes.png create mode 100644 book-enea-nfv-core-installation-guide/doc/images/config_nodes.svg create mode 100644 book-enea-nfv-core-installation-guide/doc/images/config_nodes_2.png create mode 100644 book-enea-nfv-core-installation-guide/doc/images/config_nodes_2.svg create mode 100644 book-enea-nfv-core-installation-guide/doc/images/dr_fault_mg.png create mode 100644 book-enea-nfv-core-installation-guide/doc/images/dr_fault_mg.svg create mode 100644 book-enea-nfv-core-installation-guide/doc/images/dr_fault_mg_2.png create mode 100644 book-enea-nfv-core-installation-guide/doc/images/dr_fault_mg_2.svg create mode 100644 book-enea-nfv-core-installation-guide/doc/images/features_groups.png create mode 100644 book-enea-nfv-core-installation-guide/doc/images/features_groups.svg create mode 100644 book-enea-nfv-core-installation-guide/doc/images/fuel_install_static_ip.png create mode 100644 book-enea-nfv-core-installation-guide/doc/images/fuel_install_static_ip.svg create mode 100644 book-enea-nfv-core-installation-guide/doc/images/fuel_user.png create mode 100644 book-enea-nfv-core-installation-guide/doc/images/fuel_user.svg create mode 100644 book-enea-nfv-core-installation-guide/doc/images/functional_blocks.png create mode 100644 book-enea-nfv-core-installation-guide/doc/images/functional_blocks.svg create mode 100644 book-enea-nfv-core-installation-guide/doc/images/general_settings.png create mode 100644 book-enea-nfv-core-installation-guide/doc/images/general_settings.svg create mode 100644 book-enea-nfv-core-installation-guide/doc/images/hugepages.png create mode 100644 book-enea-nfv-core-installation-guide/doc/images/hugepages.svg create mode 100644 book-enea-nfv-core-installation-guide/doc/images/net_setup_1.png create mode 100644 book-enea-nfv-core-installation-guide/doc/images/net_setup_1.svg create mode 100644 book-enea-nfv-core-installation-guide/doc/images/net_setup_2.png create mode 100644 book-enea-nfv-core-installation-guide/doc/images/net_setup_2.svg create mode 100644 book-enea-nfv-core-installation-guide/doc/images/networks-tab.png create mode 100644 book-enea-nfv-core-installation-guide/doc/images/networks_tab.svg create mode 100644 book-enea-nfv-core-installation-guide/doc/images/neutron_L3.png create mode 100644 book-enea-nfv-core-installation-guide/doc/images/neutron_L3.svg create mode 100644 book-enea-nfv-core-installation-guide/doc/images/neutron_vlan.png create mode 100644 book-enea-nfv-core-installation-guide/doc/images/neutron_vlan.svg create mode 100644 book-enea-nfv-core-installation-guide/doc/images/newton_debian.png create mode 100644 book-enea-nfv-core-installation-guide/doc/images/newton_debian.svg create mode 100644 book-enea-nfv-core-installation-guide/doc/images/nodes.png create mode 100644 book-enea-nfv-core-installation-guide/doc/images/nodes.svg create mode 100644 book-enea-nfv-core-installation-guide/doc/images/openstack_services.png create mode 100644 book-enea-nfv-core-installation-guide/doc/images/openstack_services.svg create mode 100644 book-enea-nfv-core-installation-guide/doc/images/other.png create mode 100644 book-enea-nfv-core-installation-guide/doc/images/other.svg create mode 100644 book-enea-nfv-core-installation-guide/doc/images/other_networks.png create mode 100644 book-enea-nfv-core-installation-guide/doc/images/other_networks.svg create mode 100644 book-enea-nfv-core-installation-guide/doc/images/storage_backends.png create mode 100644 book-enea-nfv-core-installation-guide/doc/images/storage_backends.svg create mode 100644 book-enea-nfv-core-installation-guide/doc/images/time_sync.png create mode 100644 book-enea-nfv-core-installation-guide/doc/images/time_sync.svg delete mode 100644 book-enea-nfv-core-installation-guide/doc/installation_deployment.xml delete mode 100644 book-enea-nfv-core-installation-guide/doc/installation_health_check.xml create mode 100644 book-enea-nfv-core-installation-guide/doc/installation_instructions.xml create mode 100644 book-enea-nfv-core-installation-guide/doc/post_deploy_config.xml delete mode 100644 book-enea-nfv-core-installation-guide/doc/preface.xml delete mode 100644 book-enea-nfv-core-installation-guide/doc/reference_index.xml-NOTES delete mode 100644 book-enea-nfv-core-installation-guide/doc/tor_config_req.xml create mode 100644 book-enea-nfv-core-release-info/doc/build_boot_generated.xml create mode 100644 book-enea-nfv-core-release-info/doc/build_boot_template.xml create mode 100644 book-enea-nfv-core-release-info/doc/machine_list_generated.xml create mode 100644 book-enea-nfv-core-release-info/doc/pkgdiff_generated.xml create mode 100644 initbuildboot.sh create mode 100644 manifest_conf.mk diff --git a/Makefile b/Makefile index 78dcfa9..52d72c2 100644 --- a/Makefile +++ b/Makefile @@ -11,7 +11,7 @@ DOCBOOKMAKE = $(SUBSYSROOT)/s_docbuild/docmake DOCBOOKTEMPLATE = $(SUBSYSROOT)/s_docbuild/template #Path to the OLINK database including leading part of file name (will add -$(FORMAT).db) -DOCBOOKOLINKDB_BASE = $(SUBSYSROOT)/s_docbuild/olinkdb/olink-targetdb-master +DOCBOOKOLINKDB_BASE = $(SUBSYSROOT)/s_docbuild/olinkdb/olink-targetdb-ose5-master DOCBOOK_OLINKS ?= yes DOCBOOK_FO_USEFOP ?= yes @@ -132,7 +132,7 @@ dist: doc # Default FORMATs ifeq ($(FORMAT),) -FORMAT=pdf +FORMAT=pdf html eclipse endif @@ -186,4 +186,4 @@ $(TMPCLONEROOT_DOCSRC_COMMON): $(VERB)if [ ! -d "$(TMPCLONEROOT)" ] ; then mkdir -p "$(TMPCLONEROOT)" ; fi $(VERB)if [ ! -d $(TMPCLONEROOT_DOCSRC_COMMON) ]; then mkdir -p $(TMPCLONEROOT_DOCSRC_COMMON) ; fi @echo "**** Copy docsrc_common/ files to $(TMPCLONEROOT_DOCSRC_COMMON)" - $(VERB)cat docsrc_common/pardoc-distro.xml >$(TMPPARDOC) \ No newline at end of file + $(VERB)cat docsrc_common/pardoc-distro.xml >$(TMPPARDOC) diff --git a/README b/README new file mode 100644 index 0000000..08661a9 --- /dev/null +++ b/README @@ -0,0 +1,21 @@ + +virtualization profile + +___Prerequisites-commands:Ubuntu14.04.5LTS +sudo apt-get update +sudo apt-get install sed wget subversion git-core coreutils unzip texi2html texinfo libsdl1.2-dev docbook-utils fop gawk python-pysqlite2 diffstat make gcc build-essential xsltproc g++ desktop-file-utils chrpath libgl1-mesa-dev libglu1-mesa-dev autoconf automake groff libtool xterm libxml-parser-perl +___END + +___RepoInstall-commands:Ubuntu14.04.5LTS +mkdir ./bin +curl https://storage.googleapis.com/git-repo-downloads/repo > ./bin/repo +chmod a+x ./bin/repo +export PATH=./bin/repo:$PATH +___END + +___RepoClone-commands: +mkdir enea-linux +cd enea-linux +repo init -u git@git.enea.com:linux/manifests/el_manifests-virtualization.git -b refs/tags/EL6 -m /default.xml +repo sync +___END diff --git a/book-enea-nfv-core-installation-guide/doc/about_release.xml b/book-enea-nfv-core-installation-guide/doc/about_release.xml index 9622958..54cf634 100644 --- a/book-enea-nfv-core-installation-guide/doc/about_release.xml +++ b/book-enea-nfv-core-installation-guide/doc/about_release.xml @@ -4,18 +4,18 @@ About This Release -
- Abstract - - This document describes how to install the 1.0 release of Enea NFV - Core using Fuel as a deployment tool, and different target node pool(s). - It covers usage, limitations, dependencies and required system - resources. -
-
Introduction + This document is intended as both a User and a + Installation Manual for the ENEA NFV Core 1.0 Release, which was designed + specifically for testing and integration work by Cavium. + + This document describes how to install the Enea NFV Core 1.0 release + using Fuel as a deployment tool, and different target node pool(s). It + covers usage, limitations, dependencies and required system + resources. + This document provides guidelines on how to install and configure the 1.0 release of ENFV Core when using Fuel as a deployment tool, including the required software and hardware configurations, resulting in @@ -29,4 +29,17 @@ The audience of this document is assumed to have good grasp of networking and Unix/Linux administration knowledge.
+ +
+ Definitions and Acronyms + + Definitions: + + + + Jump server - physical server which runs the virtualized + OpenStack/OPNFV Installer + + +
\ No newline at end of file diff --git a/book-enea-nfv-core-installation-guide/doc/book.xml b/book-enea-nfv-core-installation-guide/doc/book.xml index d0352da..62d46f6 100644 --- a/book-enea-nfv-core-installation-guide/doc/book.xml +++ b/book-enea-nfv-core-installation-guide/doc/book.xml @@ -9,12 +9,9 @@ xmlns:xi="http://www.w3.org/2001/XInclude" /> - - - - - - + + + + - diff --git a/book-enea-nfv-core-installation-guide/doc/hardware_requirements.xml b/book-enea-nfv-core-installation-guide/doc/hardware_requirements.xml deleted file mode 100644 index 934276b..0000000 --- a/book-enea-nfv-core-installation-guide/doc/hardware_requirements.xml +++ /dev/null @@ -1,77 +0,0 @@ - - - - Hardware requirements - - The following minimum hardware requirements must be met for the - installation of ENFV Core using Fuel, to be successful: - - - - - - Hardware Aspect - - Requirement - - - - Nr. of nodes - - Minimum 5 (3 for non-redundant deployment): - - 1 Fuel deployment master (may be virtualized) - - - - 3 Controllers (1 co-located mongo/ceilometer role, 2 - Ceph-OSD roles) - - - - 1 Compute (1 co-located Ceph-OSD role) - - - - - - CPU - - Minimum 1 socket x86_AMD64 with Virtualization - support - - - - RAM - - Minimum 16GB/server (depending on VNF work load) - - - - Disk - - Minimum 256GB 10kRPM spinning disks - - - - Networks - - - - 4 Tagged VLANs (PUBLIC, MGMT, STORAGE, PRIVATE) - - - - 1 Un-Tagged VLAN for PXE Boot - ADMIN Network - - Note: These can be allocated to a single NIC - - or spread out over multiple NICs as supported by your - hardware. - - - - - \ No newline at end of file diff --git a/book-enea-nfv-core-installation-guide/doc/help_hw_req.xml b/book-enea-nfv-core-installation-guide/doc/help_hw_req.xml deleted file mode 100644 index 4b5f676..0000000 --- a/book-enea-nfv-core-installation-guide/doc/help_hw_req.xml +++ /dev/null @@ -1,37 +0,0 @@ - - - - Help with Hardware Requirements - - For information on compatible hardware types available for use, please - see the Fuel - Plugin Developers Guideupdate this link with the correct - ENFV guide if needed. . When choosing the hardware on which you - will deploy your OpenStack environment, you should consider the - following: - - - - CPU – the number of virtual machines that you plan to deploy - in your cloud environment and the CPU per virtual machine. - - - - Memory – depends on the amount of RAM assigned per virtual - machine and the controller node. - - - - Storage – depends on the local-drive space per virtual - machine, remote volumes that can be attached to a virtual machine, and - object storage. - - - - Networking – depends on the chosen Network Topology, the - network bandwidth per virtual machine, and network storage. - - - \ No newline at end of file diff --git a/book-enea-nfv-core-installation-guide/doc/high_availability.xml b/book-enea-nfv-core-installation-guide/doc/high_availability.xml new file mode 100644 index 0000000..e489101 --- /dev/null +++ b/book-enea-nfv-core-installation-guide/doc/high_availability.xml @@ -0,0 +1,794 @@ + + + High Availability Guide + + ENEA NFV Core 1.0 has been designed to provide high availability + characteristics that are needed for developing and deploying telco-grade NFV + solutions on top of our OPNFV based platform. + + The High Availability subject in general is very wide and still an + important focus in both opensource communities and independent/proprietary + solutions market. ENEA NFV Core 1.0 aims to initially leverage the efforts + in the upstream OPNFV and OpenStack opensource projects, combining solutions + from both worlds in an effort to provide flexibility and a wide enough use + case coverage. ENEA has a long time expertise and proprietary solutions + addressing High Availability for telco applications, which are subject to + integrating with the NFV based solutions, however the initial scope for ENEA + NFV Core is to leverage as much as possible the OPNFV Reference Platform and + open source projects in general, such as it will be seen further ahead in + this chapter. + +
+ High Availability Levels + + The base for the feature set in ENEA NFV Core is divided into three + levels: + + + + Hardware Fault + + + + NFV Platform HA + + + + VNF High Availability + + + + The same division of levels of fault management can be seen in the + scope of the High Availability for OPNFV (Availability) project. OPNFV + also hosts the Doctor Project which is a fault management and maintenance + project to develop and realize the consequent implementation for the OPNFV + reference platform. + + These two projects complement each other. + + The Availability project addresses HA requirement and solutions from + the perspective of the three levels mentioned above and produces high + level requirements and API definitions for High Availability of OPNFV, HA + Gap Analysis Report for OpenStack and more recently works on optimizing + existing OPNFV test frameworks, such as Yardstick, and develops test cases + which realize HA specific use cases and scenarios such as derived from the + HA requirements. + + The Doctor Project on the other hand aims to build fault management + and maintenance framework for high availability of Network Services on top + of virtualized infrastructure; the key feature is immediate notification + of unavailability of virtualized resources from VIM, to process recovery + of VNFs on them. The Doctor project has also collaborated with the + Availability project on identifying gaps in upstream project, mainly + OpenStack but not exclusive, and has worked towards implementing missing + features or improving the functionality, one good example being the Aodh + event based alarms, which allows for fast notifications when certain + predefined events occur. The Doctor project also produced an architecture + design and a reference implementation based on opensource components, + which will be presented later on in this document. +
+ +
+ Doctor Architecture + + The Doctor documentation shows the detailed architecture for Fault + Management and NFVI Maintenance . The two are very similar so we will + focus on the Fault Management. + + The architecture specifies a set of functional blocks: + + + + Monitor - monitors the virtualized infrastructure capturing + fault events in the Software and Hardware; for this particular + component we chose Zabbix which is integrated into the platform by + means of the Fuel Zabbix Plugin, available upstream. + + + + Inspector - this component is able to receive notifications from + Monitor components and also OpenStack core components, which allows it + to create logic relationships between entities, identify affected + resources when faults occur, and communicates with Controllers to + update the states of the virtual and physical resources. For this + component ENEA NFV Core 1.0 makes use of Vitrage , an OpenStack + related project used for Root Cause Analysis, which has been adapted + to server as a Doctor Inspector. The integration into the platform is + realized with the help of a Fuel Plugin which has been developed + internally by ENEA. + + + + Controller - OpenStack core components act as Controllers, which + are responsible for maintaining the resource map between physical and + virtual resources, they accept update requests from the Inspector and + are responsible for sending failure event notifications to the + Notifier. Components such as Nova, Neutron, Glance, Heat act as + Controllers in the Doctor Architecture. + + + + Notifier - the focus of this component is on selecting and + aggregating failure events received from the controller based on + policies mandated by the Consumer. The role of the Notifier is + accomplished by the Aodh component in OpenStack. + + + + Besides the Doctor components there are a couple other blocks + mentioned in the architecture: + + + + Administrator - this represents the human role of administrating + the platform by means of dedicated interfaces, either visual + dashboards, like OpenStack Horizon or Fuel Dashboard, or via CLI + tools, like the OpenStack unified CLI that can be accessed + traditionally from one of the servers that act as OpenStack Controller + nodes. In the case of ENEA NFV Core 1.0, the Administrator can also + access the Zabbix dashboard for doing further configurations. The same + applies for the Vitrage tool, which comes with its own Horizon + dashboard which enables the user to visually inspect the faults + reported by the monitoring tools and also creates visual + representations of the virtual and physical resources, the + relationships between them and the fault correlation. For Vitrage, + users will usually want to configure additional usecases and describe + relationships between components, via template files written in yaml + format. More information about using Vitrage will be presented in a + following section. + + + + Consumer - this block is vaguely described in the Doctor + Architecture and it's out of its scope. Doctor only deals with fault + detection and management, making sure faults are handled as soon as + possible after detection, identifies affected virtual resources and + updates the states of them, but since the actual VNFs are managed, + according to the ETSI architecture, by a different entity, Doctor does + not deal with recovery actions of the VNFs. The role of the Consumer + thus falls in the task of a VNF Manager and Orchestrator. ENEA NFV + Core 1.0 provides VNF management capabilities using Tacker, which is + an OpenStack project that implements a generic VNF Manager and + Orchestrator according to the ETSI MANO Architectural + Framework. + + + + The functional blocks overview in the picture below has been + complemented to show the components used for realizing the Doctor + Architecture: + + + + + + + +
+ Doctor Fault Management + + The architecture described in the Doctor project has been + demonstrated in various PoCs and demos, but always using sample + components for either the consumer or the monitor. ENEA has worked with + upstream projects, Doctor and Vitrage, to realize the goals of the + Doctor project by using real components, as described before. + + The two pictures below show a typical fault management scenario, + as described in the Doctor documentation. + + + + + + + + + + + + + + ENEA NFV Core 1.0 uses the same approach described above, but it's + worth going through each step and detail them. + + + + When creating a VNF, the user will have to enable the + monitoring capabilities of Tacker, by passing a template which + specifies that an alarm will be created when the VM represented by + this VNF changes state. The support for alarm monitoring in Tacker + is captured in the Alarm Monitoring Framework spec in OpenStack + documentation. In a few words, Tacker should be able to create a VNF + and then create an Aodh alarm of type event which triggers when the + instance is in state ERROR. The action to take when this event + triggers is to perform an HTTP call, to an URL managed by Tacker. As + a result of this action, Tacker can detect when an instance has + failed (for whatever reasons) and will respawn it somewhere + else. + + + + The subscribe response in this case is an empty operation, the + Notifier (Aodh) only has to confirm that the alarm has been + created. + + + + The NFVI sends monitoring events for resources the VIM has + been subscribed to. Note: this subscription message exchange between + the VIM and NFVI is not shown in this message flow. This steps is + related to Vitrage's capability of receiving notifications from + OpenStack services, at this moment Vitrage supports notifications + from nova.host, nova.instances, nova.zone, cinder.volume, + neutron.network, neutron.port and heat.stack OpenStack + datasources. + + + + This steps describes faults being detected by Zabbix which are + sent to the Inspector (Vitrage) as soon as detected, using a push + approach by means of sending an AMQP message to a dedicated message + queue managed by Vitrage. For example, if nova-compute fails on one + of the compute nodes, Zabbix will format a message specifying all + the needed details needed for processing the fault, e.g. a + timestamp, what host failed, what event occurred and others. + + + + Database lookup to find the virtual resources affected by the + detected fault. In this step Vitrage will perform various + calculations to detect what virtual resources are affected by the + raw failure presented by Zabbix. Vitrage can be configured via + templates to correlate instances with the physical hosts they are + running on, so that if a compute node fails, then instances running + on that host will be affected. A typical usecase is to mark the + compute node down (a.k.a mark_host_down) and update the states of + all instances running on them, by issuing Nova API calls for each of + these instances. Step 5c) shows the Controller (Nova in this case) + acting upon the state change of the instance and issues an event + alarm to Aodh. + + + + The Notifier will acknowledge the alarm event request from + Nova and will trigger the alarm(s) created by Tacker in step 1). + Since Tacker has configured the alarm to send an HTTP request, Aodh + will perform that HTTP call at the URL managed by Tacker. + + + + The Consumer (Tacker) will react to the HTTP call and perform + the action configured by the user (e.g. respawn the VNF). + + + + The action is sent to the Controller (Nova) so that the VNF is + recreated. + + + + + The ENEA NFV Core 1.0 Pre-Release fully covers the required + Doctor functionality only for the Vitrage and Zabbix + components. + +
+ +
+ Zabbix Configuration for Push Notifications + + Vitrage supports Zabbix datasource by means of regularly polling + the Zabbix agents, which need to be configured in advance. The Vitrage + plugin developed internally by ENEA can automatically configure Zabbix + so that everything works as expected. + + However, polling is not fast enough for a telco usecase, so it is + necessary to configure pushed notifications for Zabbix . This requires + manual configuration on one of the controller nodes, since Zabbix uses a + centralized database which makes the configuration available on all the + other nodes. + + The Zabbix configuration dashboard is available at the same IP + address where OpenStack can be reached, e.g. + http://<vip__zbx_vip_mgmt>/zabbix. + + To forward zabbix events to Vitrage a new media script needs to be + created and associated with a user. Follow the steps below as a Zabbix + Admin user: + + + + Create a new media type [Admininstration Media Types Create + Media Type] + + + + Name: Vitrage Notifications + + + + Type: Script + + + + Script name: zabbix_vitrage.py + + + + + + Modify the Media for the Admin user [Administration + Users] + + + + Type: Vitrage Notifications + + + + Send to: rabbit://rabbit_user:rabbit_pass@127.0.0.1:5672/ + --- Vitrage message bus url (you need to search for this in + /etc/vitrage/vitrage.conf or /etc/nova/nova.conf + transport_url) + + + + When active: 1-7,00:00-24:00 + + + + Use if severity: (all) + + + + Status: Enabled + + + + + + Configure Action [Configuration Actions Create Action + Action] + + + + Name: Forward to Vitrage + + + + Default Subject: {TRIGGER.STATUS} + + + + Default Message: host={HOST.NAME1} hostid={HOST.ID1} + hostip={HOST.IP1} triggerid={TRIGGER.ID} + description={TRIGGER.NAME} rawtext={TRIGGER.NAME.ORIG} + expression={TRIGGER.EXPRESSION} value={TRIGGER.VALUE} + priority={TRIGGER.NSEVERITY} lastchange={EVENT.DATE} + {EVENT.TIME} + + + + + + To send events add under the Conditions tab: 'Maintenance + status not in 'maintenance'". + + + + Finally, add an operation: + + + + Send to Users: Admin + + + + Send only to: Vitrage Notifications + + + + + + Using these instructions, Zabbix will call the zabbix_vitrage.py + script, which is made readily available by the Fuel Vitrage Plugin, + passing the arguments described in step 3). The zabbix_vitrage.py script + will then interpret the parameters and format an AMQP message will be + sent to the vitrage.notifications queue, which is managed by the + vitrage-graph service. +
+ +
+ Vitrage Configuration + + The Vitrage team has been collaborating with OPNFV Doctor Project + in order to support Vitrage as an Inspector Component. The Doctor + usecase for Vitrage is described in an OpenStack blueprint . + Additionally, ENEA NFV Core has complemented Vitrage with the capability + of setting states of failed instances by implementing an action type in + Vitrage which calls Nova APIs to set instances in error state. There is + also an action type which allows fencing failed hosts. + + In order to make use of these features, Vitrage supports + additional configurations via yaml templates that must be placed in + /etc/vitrage/templates on the nodes have the Vitrage role. + + The example below shows how to program Vitrage to mark failed + compute hosts as down and then to change the state of the instances to + Error, by creating Vitrage deduced alarms. + + metadata: + name: test_nova_mark_instance_err + description: test description +definitions: + entities: + - entity: + category: ALARM + type: zabbix + rawtext: Nova Compute process is not running on {HOST.NAME} + template_id: zabbix_alarm + - entity: + category: RESOURCE + type: nova.host + template_id: host + - entity: + category: RESOURCE + type: nova.instance + template_id: instance + relationships: + - relationship: + source: zabbix_alarm + relationship_type: on + target: host + template_id: nova_process_not_running + - relationship: + source: host + target: instance + relationship_type: contains + template_id : host_contains_instance +scenarios: + - scenario: + condition: nova_process_not_running and host_contains_instance + actions: + - action: + action_type: mark_down + action_target: + target: host + - action: + action_type: set_instance_state + action_target: + target: instance + - action: + action_type: set_state + action_target: + target: instance + properties: + state: ERROR + + For the action type of fencing a similar action item must be + added: + + - scenario: + condition: critical_problem_on_host + actions: + - action: + action_type: fence + action_target: + target: host + + After a template is configured, it is required to restart the + vitrage-api and vitrage-graph services: + + root@node-6:~# systemctl restart vitrage-api +root@node-6:~# systemctl restart vitrage-graph +
+ +
+ Vitrage Customizations + + ENEA NFV Core 1.0 has added custom features for Vitrage which + allow two kinds of action: + + + + Perform actions Northbound of the VIM + + + + Nova force host down on compute + + + + Setting instance state to error in nova; this is used in + conjunction with an alarm created by Tacker, as described + before, should allow Tacker to detect when an instance is + affected and take proper actions. + + + + + + Perform actions Southbound of the VIM. + + Vitrage templates allow us to program fencing actions for + hosts with failed services. In the event of that systemd is unable + to recover from a critical process or other type of sofware error + ocurs on Hardware supporting them, we can program a fencing of that + Node which will perform a reboot thus attempting to recover a failed + node. + + +
+
+ +
+ Pacemaker High Availability + + Many of the OpenStack solutions which offer High Availability + characteristics employ pacemaker for achieving highly available OpenStack + services. Traditionally pacemaker has been used for managing only the + control plane services, so it can effectively provide redundancy and + recovery for the Controller nodes only. One reason for this is that + Controller nodes and Compute nodes essentially have very different High + Availability requirements that need to be considered. Typically, for + Controller nodes, the services that run on them are stateless, with few + exceptions, where only one instance of a given service is allowed, but for + which redundancy is still desired, one good example being an AMQP service + (e.g. RabbitMQ). Compute nodes HA requirements depend on the type of + services that run on them, but typically it is desired that failures on + these nodes is detected as soon as possible so that the instances that run + on them can be either migrated, resurrected or restarted. One other aspect + is that sometimes failures on the physical hosts do not necessarily cause + a failure on the services (VNFs), but having these services incapacitated + can prevent accessing and controlling the services. + + So Controller High Availability is one subject which is in general + well understood and experimented with, and the base of achieving this is + Pacemaker using Corosync underneath. + + Extending the use of pacemaker to Compute nodes was thought as a + possible solution for providing VNF high availability, but this turns out + to be a problem which is not easy to solve. On one hand pacemaker as a + clustering tool can only scale properly up to limited number of nodes, + usually less than 128. This poses a problem for large scale deployments + where hundreds of compute nodes are required. On the other hand, Compute + node HA requires other considerations and calls for specially designed + solutions. + +
+ Pacemaker Remote + + As mentioned earlier, pacemaker and corosync do not scale well + over a large cluster, because each node has to talk to everyone, + essentially creating a mesh configuration. Some solution to this problem + could be partitioning the cluster into smaller groups, but this solution + has its limitation and it's generally difficult to manage. + + A better solution is using pacemaker-remote, a feature of + pacemaker which allows extending the cluster beyond the usual limits by + using the pacemaker monitoring capabilities, essentially creating a new + type of resource which enables adding light weight nodes to the cluster. + More information about pacemaker-remote can be found on the official + clusterlabs website. + + Please note that at this moment pacemaker remote must be + configured manually after deployment. Here are the manual steps for + doing so: + + + + Logon to the Fuel Master using the default credentials if not + changed (root/r00tme) + + + + Type fuel node to obtain the list of nodes, their roles and + the IP addresses + + [root@fuel ~]# fuel node +id | status | name | cluster | ip | mac | roles / + | pending_roles | online | group_id +---+--------+------------------+---------+-----------+-------------------+----------/ +-----------------+---------------+--------+--------- + 1 | ready | Untitled (8c:d4) | 1 | 10.20.0.4 | 68:05:ca:46:8c:d4 | ceph-osd,/ + controller | | 1 | 1 + 4 | ready | Untitled (8c:c2) | 1 | 10.20.0.6 | 68:05:ca:46:8c:c2 | ceph-osd,/ + compute | | 1 | 1 + 5 | ready | Untitled (8c:c9) | 1 | 10.20.0.7 | 68:05:ca:46:8c:c9 | ceph-osd,/ + compute | | 1 | 1 + 2 | ready | Untitled (8b:64) | 1 | 10.20.0.3 | 68:05:ca:46:8b:64 | / +controller, mongo, tacker | | 1 | 1 + 3 | ready | Untitled (8c:45) | 1 | 10.20.0.5 | 68:05:ca:46:8c:45 | / +controller, vitrage | | 1 | 1 + + + + Each controller has a unique pacemaker authkey, we need to + keep one an propagate it to the other servers. Assuming node-1, + node-2 and node-3 are the controllers, execute the following from + the Fuel console: + + [root@fuel ~]# scp node-1:/etc/pacemaker/authkey . +[root@fuel ~]# scp authkey node-2:/etc/pacemaker/ +[root@fuel ~]# scp authkey node-3:/etc/pacemaker/ +[root@fuel ~]# scp authkey node-3:/etc/pacemaker/ +[root@fuel ~]# scp authkey node-4:~ +[root@fuel ~]# scp authkey node-5:~ + + + + For each compute node, log on to it using the corresponding + IP. + + + + Install the required packages: + + root@node-4:~# apt-get install pacemaker-remote resource-agents crmsh + + + + Copy the authkey from the Fuel master and make sure the right + permissions are set: + + [root@node-4:~]# cp authkey /etc/pacemaker +[root@node-4:~]# chown root:haclient /etc/pacemaker/authkey + + + + Add iptables rule for the default port (3121). Also save it to + /etc/iptables/rules.v4 to make it persistent: + + root@node-4:~# iptables -A INPUT -s 192.168.0.0/24 -p tcp -m multiport / +--dports 3121 -m comment --comment "pacemaker_remoted from 192.168.0.0/24" -j ACCEPT + + + + Start the pacemaker-remote service + + [root@node-4:~]# systemctl start pacemaker-remote.service + + + + Log on one of the controller nodes and configure the + pacemaker-remote resources: + + [root@node-1:~]# pcs resource create node-4.domain.tld remote +[root@node-1:~]# pcs constraint location node-4.domain.tld prefers / +node-1.domain.tld=100 node-2.domain.tld=100 node-3.domain.tld=100 +[root@node-1:~]# pcs constraint location node-4.domain.tld avoids node-5.domain.tld +[root@node-1:~]# pcs resource create node-5.domain.tld remote +[root@node-1:~]# pcs constraint location node-5.domain.tld prefers / +node-1.domain.tld=100 node-2.domain.tld=100 node-3.domain.tld=100 +[root@node-1:~]# pcs constraint location node-5.domain.tld avoids node-4.domain.tld + + + + Remote nodes should now appear online: + + [root@node-1:~]# pcs status +Cluster name: OpenStack +Last updated: Thu Aug 24 12:00:21 2017 Last change: Thu Aug 24 11:57:32 2017 / +by root via cibadmin on node-1.domain.tld +Stack: corosync +Current DC: node-1.domain.tld (version 1.1.14-70404b0) - partition with quorum +5 nodes and 78 resources configured + +Online: [ node-1.domain.tld node-2.domain.tld node-3.domain.tld ] +RemoteOnline: [ node-4.domain.tld node-5.domain.tld ] + + +
+ +
+ Pacemaker Fencing + + ENEA NFV Core 1.0 makes use of the fencing capabilities of + Pacemaker to isolate faulty nodes and trigger recovery actions by means + of power cycling the failed nodes. Fencing is configured by creating + STONITH type resources for each of the servers in the cluster, both + Controller nodes and Compute nodes. The STONITH adapter for fencing the + nodes is fence_ipmilan, which makes use of the IPMI capabilities of the + Cavium ThunderX servers. + + Here are the steps for enabling fencing capabilities in the + cluster: + + + + Logon to the Fuel Master using the default credentials if not + changed (root/r00tme). + + + + Type fuel node to obtain the list of nodes, their roles and + the IP addresses: + + [root@fuel ~]# fuel node +id | status | name | cluster | ip | mac | roles / + | pending_roles | online | group_id +---+--------+------------------+---------+-----------+-------------------+----------/ +-----------------+---------------+--------+--------- + 1 | ready | Untitled (8c:d4) | 1 | 10.20.0.4 | 68:05:ca:46:8c:d4 | ceph-osd,/ + controller | | 1 | 1 + 4 | ready | Untitled (8c:c2) | 1 | 10.20.0.6 | 68:05:ca:46:8c:c2 | ceph-osd,/ + compute | | 1 | 1 + 5 | ready | Untitled (8c:c9) | 1 | 10.20.0.7 | 68:05:ca:46:8c:c9 | ceph-osd,/ + compute | | 1 | 1 + 2 | ready | Untitled (8b:64) | 1 | 10.20.0.3 | 68:05:ca:46:8b:64 | / +controller, mongo, tacker | | 1 | 1 + 3 | ready | Untitled (8c:45) | 1 | 10.20.0.5 | 68:05:ca:46:8c:45 | / +controller, vitrage | | 1 | 1 + + + + + Logon to each server to install additional packages: + + [root@node-1:~]# apt-get install fence-agents ipmitool + + + + Configure pacemaker fencing resources; this needs to be done + once on one of the controllers. The parameters will vary, depending + on the BMC addresses of each node and credentials. + + [root@node-1:~]# crm configure primitive ipmi-fencing-node-1 / +stonith::fence_ipmilan params pcmk_host_list="node-1.domain.tld" / +ipaddr=10.0.100.151 login=ADMIN passwd=ADMIN op monitor interval="60s" +[root@node-1:~]# crm configure primitive ipmi-fencing-node-2 / +stonith::fence_ipmilan params pcmk_host_list="node-2.domain.tld" / +ipaddr=10.0.100.152 login=ADMIN passwd=ADMIN op monitor interval="60s" +[root@node-1:~]# crm configure primitive ipmi-fencing-node-3 / +stonith::fence_ipmilan params pcmk_host_list="node-3.domain.tld" / +ipaddr=10.0.100.153 login=ADMIN passwd=ADMIN op monitor interval="60s" +[root@node-1:~]# crm configure primitive ipmi-fencing-node-4 / +stonith::fence_ipmilan params pcmk_host_list="node-4.domain.tld" / +ipaddr=10.0.100.154 login=ADMIN passwd=ADMIN op monitor interval="60s" +[root@node-1:~]# crm configure primitive ipmi-fencing-node-5 / +stonith::fence_ipmilan params pcmk_host_list="node-5.domain.tld" / +ipaddr=10.0.100.155 login=ADMIN passwd=ADMIN op monitor interval="60s" + + + + Activate fencing by enabling stonith property in pacemaker (by + default it is disabled); this also needs to be done only once, on + one of the controllers. + + [root@node-1:~]# pcs property set stonith-enabled=true + + +
+
+ +
+ OpenStack Resource Agents + + The OpenStack community has been working for some time on + identifying possible solutions for enabling High Availability for Compute + nodes, although initially the subject of HA on compute node was very + controversial as not being something that should concern the cloud + platform. Over time it became obvious that even on a true cloud platform, + where services are designed to run without being affected by the + availability of the cloud platform, fault management and recovery is still + very important and desirable. This is very much the case for NFV + applications, where, in the good tradition of telecom applications, the + operators must have complete engineering control over the resources it + owns and manages. + + The work for compute node high availability is captured in an + OpenStack user story and documented upstream, showing proposed solutions, + summit talks and presentations. + + A number of these solutions make use of OpenStack Resource Agents, + which are basically a set of specialized pacemaker resources which are + capable of identifying failures in compute nodes and can perform automatic + evacuation of the instances affected by these failures. + + ENEA NFV Core 1.0 aims to validate and integrate this work and to + make this feature available in the platform to be used as an alternative + to the Doctor framework, where simple, autonomous recovery of the running + instances is desired. +
+
\ No newline at end of file diff --git a/book-enea-nfv-core-installation-guide/doc/hw_features.xml b/book-enea-nfv-core-installation-guide/doc/hw_features.xml new file mode 100644 index 0000000..dcf00d2 --- /dev/null +++ b/book-enea-nfv-core-installation-guide/doc/hw_features.xml @@ -0,0 +1,13 @@ + + + Hardware Features + + ENEA NFV Core 1.0 leverages certain Hardware features within the ThunderX + SoCs, the most important of them being DPDK and SR-IOV. + + ENEA's mission, through the OPNFV Armband project, is to extend the + OPNFV capabilities on aarch64 commercial servers, including the Cavium + ThunderX networking servers. ENEA NFV Core further extends the support for + Cavium ThunderX by enabling DPDK and SR-IOV capabilities which do not work + by default. + \ No newline at end of file diff --git a/book-enea-nfv-core-installation-guide/doc/images/DNS_Hostname.png b/book-enea-nfv-core-installation-guide/doc/images/DNS_Hostname.png new file mode 100644 index 0000000..0c6445a Binary files /dev/null and b/book-enea-nfv-core-installation-guide/doc/images/DNS_Hostname.png differ diff --git a/book-enea-nfv-core-installation-guide/doc/images/DNS_Hostname.svg b/book-enea-nfv-core-installation-guide/doc/images/DNS_Hostname.svg new file mode 100644 index 0000000..fbc9e04 --- /dev/null +++ b/book-enea-nfv-core-installation-guide/doc/images/DNS_Hostname.svg @@ -0,0 +1,373 @@ + + + + + + + + image/svg+xml + + + + + + + + + diff --git a/book-enea-nfv-core-installation-guide/doc/images/additional_services.png b/book-enea-nfv-core-installation-guide/doc/images/additional_services.png new file mode 100644 index 0000000..27c0d92 Binary files /dev/null and b/book-enea-nfv-core-installation-guide/doc/images/additional_services.png differ diff --git a/book-enea-nfv-core-installation-guide/doc/images/additional_services.svg b/book-enea-nfv-core-installation-guide/doc/images/additional_services.svg new file mode 100644 index 0000000..086b468 --- /dev/null +++ b/book-enea-nfv-core-installation-guide/doc/images/additional_services.svg @@ -0,0 +1,776 @@ + + + + + + + + image/svg+xml + + + + + + + + + diff --git a/book-enea-nfv-core-installation-guide/doc/images/allocate_nodes.png b/book-enea-nfv-core-installation-guide/doc/images/allocate_nodes.png new file mode 100644 index 0000000..c2e5dd6 Binary files /dev/null and b/book-enea-nfv-core-installation-guide/doc/images/allocate_nodes.png differ diff --git a/book-enea-nfv-core-installation-guide/doc/images/allocate_nodes.svg b/book-enea-nfv-core-installation-guide/doc/images/allocate_nodes.svg new file mode 100644 index 0000000..a90fd44 --- /dev/null +++ b/book-enea-nfv-core-installation-guide/doc/images/allocate_nodes.svg @@ -0,0 +1,639 @@ + + + + + + + + image/svg+xml + + + + + + + + + diff --git a/book-enea-nfv-core-installation-guide/doc/images/assign_roles.png b/book-enea-nfv-core-installation-guide/doc/images/assign_roles.png new file mode 100644 index 0000000..387ac63 Binary files /dev/null and b/book-enea-nfv-core-installation-guide/doc/images/assign_roles.png differ diff --git a/book-enea-nfv-core-installation-guide/doc/images/assign_roles.svg b/book-enea-nfv-core-installation-guide/doc/images/assign_roles.svg new file mode 100644 index 0000000..bebd83e --- /dev/null +++ b/book-enea-nfv-core-installation-guide/doc/images/assign_roles.svg @@ -0,0 +1,3 @@ + + + \ No newline at end of file diff --git a/book-enea-nfv-core-installation-guide/doc/images/compute_kvm.png b/book-enea-nfv-core-installation-guide/doc/images/compute_kvm.png new file mode 100644 index 0000000..229ce04 Binary files /dev/null and b/book-enea-nfv-core-installation-guide/doc/images/compute_kvm.png differ diff --git a/book-enea-nfv-core-installation-guide/doc/images/compute_kvm.svg b/book-enea-nfv-core-installation-guide/doc/images/compute_kvm.svg new file mode 100644 index 0000000..2adcc32 --- /dev/null +++ b/book-enea-nfv-core-installation-guide/doc/images/compute_kvm.svg @@ -0,0 +1,1176 @@ + + + + + + + + image/svg+xml + + + + + + + + + diff --git a/book-enea-nfv-core-installation-guide/doc/images/config_nodes.png b/book-enea-nfv-core-installation-guide/doc/images/config_nodes.png new file mode 100644 index 0000000..fcb40d1 Binary files /dev/null and b/book-enea-nfv-core-installation-guide/doc/images/config_nodes.png differ diff --git a/book-enea-nfv-core-installation-guide/doc/images/config_nodes.svg b/book-enea-nfv-core-installation-guide/doc/images/config_nodes.svg new file mode 100644 index 0000000..18fb3fe --- /dev/null +++ b/book-enea-nfv-core-installation-guide/doc/images/config_nodes.svg @@ -0,0 +1,3 @@ + + + \ No newline at end of file diff --git a/book-enea-nfv-core-installation-guide/doc/images/config_nodes_2.png b/book-enea-nfv-core-installation-guide/doc/images/config_nodes_2.png new file mode 100644 index 0000000..b50a893 Binary files /dev/null and b/book-enea-nfv-core-installation-guide/doc/images/config_nodes_2.png differ diff --git a/book-enea-nfv-core-installation-guide/doc/images/config_nodes_2.svg b/book-enea-nfv-core-installation-guide/doc/images/config_nodes_2.svg new file mode 100644 index 0000000..f0eb66f --- /dev/null +++ b/book-enea-nfv-core-installation-guide/doc/images/config_nodes_2.svg @@ -0,0 +1,3 @@ + + + \ No newline at end of file diff --git a/book-enea-nfv-core-installation-guide/doc/images/dr_fault_mg.png b/book-enea-nfv-core-installation-guide/doc/images/dr_fault_mg.png new file mode 100644 index 0000000..ab7437b Binary files /dev/null and b/book-enea-nfv-core-installation-guide/doc/images/dr_fault_mg.png differ diff --git a/book-enea-nfv-core-installation-guide/doc/images/dr_fault_mg.svg b/book-enea-nfv-core-installation-guide/doc/images/dr_fault_mg.svg new file mode 100644 index 0000000..6621ee2 --- /dev/null +++ b/book-enea-nfv-core-installation-guide/doc/images/dr_fault_mg.svg @@ -0,0 +1,3 @@ + + + \ No newline at end of file diff --git a/book-enea-nfv-core-installation-guide/doc/images/dr_fault_mg_2.png b/book-enea-nfv-core-installation-guide/doc/images/dr_fault_mg_2.png new file mode 100644 index 0000000..8c9a68e Binary files /dev/null and b/book-enea-nfv-core-installation-guide/doc/images/dr_fault_mg_2.png differ diff --git a/book-enea-nfv-core-installation-guide/doc/images/dr_fault_mg_2.svg b/book-enea-nfv-core-installation-guide/doc/images/dr_fault_mg_2.svg new file mode 100644 index 0000000..74f78c6 --- /dev/null +++ b/book-enea-nfv-core-installation-guide/doc/images/dr_fault_mg_2.svg @@ -0,0 +1,3 @@ + + + \ No newline at end of file diff --git a/book-enea-nfv-core-installation-guide/doc/images/features_groups.png b/book-enea-nfv-core-installation-guide/doc/images/features_groups.png new file mode 100644 index 0000000..cd99a42 Binary files /dev/null and b/book-enea-nfv-core-installation-guide/doc/images/features_groups.png differ diff --git a/book-enea-nfv-core-installation-guide/doc/images/features_groups.svg b/book-enea-nfv-core-installation-guide/doc/images/features_groups.svg new file mode 100644 index 0000000..9de72f7 --- /dev/null +++ b/book-enea-nfv-core-installation-guide/doc/images/features_groups.svg @@ -0,0 +1,791 @@ + + + + + + + + image/svg+xml + + + + + + + + + diff --git a/book-enea-nfv-core-installation-guide/doc/images/fuel_install_static_ip.png b/book-enea-nfv-core-installation-guide/doc/images/fuel_install_static_ip.png new file mode 100644 index 0000000..5947ba7 Binary files /dev/null and b/book-enea-nfv-core-installation-guide/doc/images/fuel_install_static_ip.png differ diff --git a/book-enea-nfv-core-installation-guide/doc/images/fuel_install_static_ip.svg b/book-enea-nfv-core-installation-guide/doc/images/fuel_install_static_ip.svg new file mode 100644 index 0000000..cbc8322 --- /dev/null +++ b/book-enea-nfv-core-installation-guide/doc/images/fuel_install_static_ip.svg @@ -0,0 +1,3 @@ + + + \ No newline at end of file diff --git a/book-enea-nfv-core-installation-guide/doc/images/fuel_user.png b/book-enea-nfv-core-installation-guide/doc/images/fuel_user.png new file mode 100644 index 0000000..e4c6935 Binary files /dev/null and b/book-enea-nfv-core-installation-guide/doc/images/fuel_user.png differ diff --git a/book-enea-nfv-core-installation-guide/doc/images/fuel_user.svg b/book-enea-nfv-core-installation-guide/doc/images/fuel_user.svg new file mode 100644 index 0000000..6848c5e --- /dev/null +++ b/book-enea-nfv-core-installation-guide/doc/images/fuel_user.svg @@ -0,0 +1,386 @@ + + + + + + + + image/svg+xml + + + + + + + + + diff --git a/book-enea-nfv-core-installation-guide/doc/images/functional_blocks.png b/book-enea-nfv-core-installation-guide/doc/images/functional_blocks.png new file mode 100644 index 0000000..2673c9a Binary files /dev/null and b/book-enea-nfv-core-installation-guide/doc/images/functional_blocks.png differ diff --git a/book-enea-nfv-core-installation-guide/doc/images/functional_blocks.svg b/book-enea-nfv-core-installation-guide/doc/images/functional_blocks.svg new file mode 100644 index 0000000..0796831 --- /dev/null +++ b/book-enea-nfv-core-installation-guide/doc/images/functional_blocks.svg @@ -0,0 +1,3 @@ + + + \ No newline at end of file diff --git a/book-enea-nfv-core-installation-guide/doc/images/general_settings.png b/book-enea-nfv-core-installation-guide/doc/images/general_settings.png new file mode 100644 index 0000000..ccfb37c Binary files /dev/null and b/book-enea-nfv-core-installation-guide/doc/images/general_settings.png differ diff --git a/book-enea-nfv-core-installation-guide/doc/images/general_settings.svg b/book-enea-nfv-core-installation-guide/doc/images/general_settings.svg new file mode 100644 index 0000000..7cebf91 --- /dev/null +++ b/book-enea-nfv-core-installation-guide/doc/images/general_settings.svg @@ -0,0 +1,2081 @@ + + + + + + + + image/svg+xml + + + + + + + + + diff --git a/book-enea-nfv-core-installation-guide/doc/images/hugepages.png b/book-enea-nfv-core-installation-guide/doc/images/hugepages.png new file mode 100644 index 0000000..2854ec0 Binary files /dev/null and b/book-enea-nfv-core-installation-guide/doc/images/hugepages.png differ diff --git a/book-enea-nfv-core-installation-guide/doc/images/hugepages.svg b/book-enea-nfv-core-installation-guide/doc/images/hugepages.svg new file mode 100644 index 0000000..4d280ad --- /dev/null +++ b/book-enea-nfv-core-installation-guide/doc/images/hugepages.svg @@ -0,0 +1,3 @@ + + + \ No newline at end of file diff --git a/book-enea-nfv-core-installation-guide/doc/images/net_setup_1.png b/book-enea-nfv-core-installation-guide/doc/images/net_setup_1.png new file mode 100644 index 0000000..5a2b97e Binary files /dev/null and b/book-enea-nfv-core-installation-guide/doc/images/net_setup_1.png differ diff --git a/book-enea-nfv-core-installation-guide/doc/images/net_setup_1.svg b/book-enea-nfv-core-installation-guide/doc/images/net_setup_1.svg new file mode 100644 index 0000000..41f0e5d --- /dev/null +++ b/book-enea-nfv-core-installation-guide/doc/images/net_setup_1.svg @@ -0,0 +1,410 @@ + + + + + + + + image/svg+xml + + + + + + + + + diff --git a/book-enea-nfv-core-installation-guide/doc/images/net_setup_2.png b/book-enea-nfv-core-installation-guide/doc/images/net_setup_2.png new file mode 100644 index 0000000..68f98f4 Binary files /dev/null and b/book-enea-nfv-core-installation-guide/doc/images/net_setup_2.png differ diff --git a/book-enea-nfv-core-installation-guide/doc/images/net_setup_2.svg b/book-enea-nfv-core-installation-guide/doc/images/net_setup_2.svg new file mode 100644 index 0000000..8ff3670 --- /dev/null +++ b/book-enea-nfv-core-installation-guide/doc/images/net_setup_2.svg @@ -0,0 +1,408 @@ + + + + + + + + image/svg+xml + + + + + + + + + diff --git a/book-enea-nfv-core-installation-guide/doc/images/networks-tab.png b/book-enea-nfv-core-installation-guide/doc/images/networks-tab.png new file mode 100644 index 0000000..976051d Binary files /dev/null and b/book-enea-nfv-core-installation-guide/doc/images/networks-tab.png differ diff --git a/book-enea-nfv-core-installation-guide/doc/images/networks_tab.svg b/book-enea-nfv-core-installation-guide/doc/images/networks_tab.svg new file mode 100644 index 0000000..aef6b1b --- /dev/null +++ b/book-enea-nfv-core-installation-guide/doc/images/networks_tab.svg @@ -0,0 +1,2085 @@ + + + + + + + + image/svg+xml + + + + + + + + + diff --git a/book-enea-nfv-core-installation-guide/doc/images/neutron_L3.png b/book-enea-nfv-core-installation-guide/doc/images/neutron_L3.png new file mode 100644 index 0000000..6e617a2 Binary files /dev/null and b/book-enea-nfv-core-installation-guide/doc/images/neutron_L3.png differ diff --git a/book-enea-nfv-core-installation-guide/doc/images/neutron_L3.svg b/book-enea-nfv-core-installation-guide/doc/images/neutron_L3.svg new file mode 100644 index 0000000..147ccd1 --- /dev/null +++ b/book-enea-nfv-core-installation-guide/doc/images/neutron_L3.svg @@ -0,0 +1,1529 @@ + + + + + + + + image/svg+xml + + + + + + + + + diff --git a/book-enea-nfv-core-installation-guide/doc/images/neutron_vlan.png b/book-enea-nfv-core-installation-guide/doc/images/neutron_vlan.png new file mode 100644 index 0000000..415047e Binary files /dev/null and b/book-enea-nfv-core-installation-guide/doc/images/neutron_vlan.png differ diff --git a/book-enea-nfv-core-installation-guide/doc/images/neutron_vlan.svg b/book-enea-nfv-core-installation-guide/doc/images/neutron_vlan.svg new file mode 100644 index 0000000..95d11d9 --- /dev/null +++ b/book-enea-nfv-core-installation-guide/doc/images/neutron_vlan.svg @@ -0,0 +1,1200 @@ + + + + + + + + image/svg+xml + + + + + + + + + diff --git a/book-enea-nfv-core-installation-guide/doc/images/newton_debian.png b/book-enea-nfv-core-installation-guide/doc/images/newton_debian.png new file mode 100644 index 0000000..6c43941 Binary files /dev/null and b/book-enea-nfv-core-installation-guide/doc/images/newton_debian.png differ diff --git a/book-enea-nfv-core-installation-guide/doc/images/newton_debian.svg b/book-enea-nfv-core-installation-guide/doc/images/newton_debian.svg new file mode 100644 index 0000000..ee9d667 --- /dev/null +++ b/book-enea-nfv-core-installation-guide/doc/images/newton_debian.svg @@ -0,0 +1,972 @@ + + + + + + + + image/svg+xml + + + + + + + + + diff --git a/book-enea-nfv-core-installation-guide/doc/images/nodes.png b/book-enea-nfv-core-installation-guide/doc/images/nodes.png new file mode 100644 index 0000000..8f4b0a4 Binary files /dev/null and b/book-enea-nfv-core-installation-guide/doc/images/nodes.png differ diff --git a/book-enea-nfv-core-installation-guide/doc/images/nodes.svg b/book-enea-nfv-core-installation-guide/doc/images/nodes.svg new file mode 100644 index 0000000..a5ac3de --- /dev/null +++ b/book-enea-nfv-core-installation-guide/doc/images/nodes.svg @@ -0,0 +1,266 @@ + + + + + + + + image/svg+xml + + + + + + + + + diff --git a/book-enea-nfv-core-installation-guide/doc/images/openstack_services.png b/book-enea-nfv-core-installation-guide/doc/images/openstack_services.png new file mode 100644 index 0000000..ca32d7c Binary files /dev/null and b/book-enea-nfv-core-installation-guide/doc/images/openstack_services.png differ diff --git a/book-enea-nfv-core-installation-guide/doc/images/openstack_services.svg b/book-enea-nfv-core-installation-guide/doc/images/openstack_services.svg new file mode 100644 index 0000000..61d78e5 --- /dev/null +++ b/book-enea-nfv-core-installation-guide/doc/images/openstack_services.svg @@ -0,0 +1,1681 @@ + + + + + + + + image/svg+xml + + + + + + + + + diff --git a/book-enea-nfv-core-installation-guide/doc/images/other.png b/book-enea-nfv-core-installation-guide/doc/images/other.png new file mode 100644 index 0000000..4bc1699 Binary files /dev/null and b/book-enea-nfv-core-installation-guide/doc/images/other.png differ diff --git a/book-enea-nfv-core-installation-guide/doc/images/other.svg b/book-enea-nfv-core-installation-guide/doc/images/other.svg new file mode 100644 index 0000000..8f419b3 --- /dev/null +++ b/book-enea-nfv-core-installation-guide/doc/images/other.svg @@ -0,0 +1,1432 @@ + + + + + + + + image/svg+xml + + + + + + + + + diff --git a/book-enea-nfv-core-installation-guide/doc/images/other_networks.png b/book-enea-nfv-core-installation-guide/doc/images/other_networks.png new file mode 100644 index 0000000..a088abc Binary files /dev/null and b/book-enea-nfv-core-installation-guide/doc/images/other_networks.png differ diff --git a/book-enea-nfv-core-installation-guide/doc/images/other_networks.svg b/book-enea-nfv-core-installation-guide/doc/images/other_networks.svg new file mode 100644 index 0000000..aeaabb6 --- /dev/null +++ b/book-enea-nfv-core-installation-guide/doc/images/other_networks.svg @@ -0,0 +1,1851 @@ + + + + + + + + image/svg+xml + + + + + + + + + diff --git a/book-enea-nfv-core-installation-guide/doc/images/storage_backends.png b/book-enea-nfv-core-installation-guide/doc/images/storage_backends.png new file mode 100644 index 0000000..0c10a66 Binary files /dev/null and b/book-enea-nfv-core-installation-guide/doc/images/storage_backends.png differ diff --git a/book-enea-nfv-core-installation-guide/doc/images/storage_backends.svg b/book-enea-nfv-core-installation-guide/doc/images/storage_backends.svg new file mode 100644 index 0000000..3c68ae2 --- /dev/null +++ b/book-enea-nfv-core-installation-guide/doc/images/storage_backends.svg @@ -0,0 +1,839 @@ + + + + + + + + image/svg+xml + + + + + + + + + diff --git a/book-enea-nfv-core-installation-guide/doc/images/time_sync.png b/book-enea-nfv-core-installation-guide/doc/images/time_sync.png new file mode 100644 index 0000000..fb52f94 Binary files /dev/null and b/book-enea-nfv-core-installation-guide/doc/images/time_sync.png differ diff --git a/book-enea-nfv-core-installation-guide/doc/images/time_sync.svg b/book-enea-nfv-core-installation-guide/doc/images/time_sync.svg new file mode 100644 index 0000000..cd54015 --- /dev/null +++ b/book-enea-nfv-core-installation-guide/doc/images/time_sync.svg @@ -0,0 +1,423 @@ + + + + + + + + image/svg+xml + + + + + + + + + diff --git a/book-enea-nfv-core-installation-guide/doc/installation_deployment.xml b/book-enea-nfv-core-installation-guide/doc/installation_deployment.xml deleted file mode 100644 index 2dbef3f..0000000 --- a/book-enea-nfv-core-installation-guide/doc/installation_deployment.xml +++ /dev/null @@ -1,869 +0,0 @@ - - - - ENFV Core Software Installation and Deployment - - This section describes the installation of the ENFV Core installation - server (Fuel master) as well as the deployment of the full ENFV Core - reference platform stack across a server cluster. - -
- Install Fuel master - - - - Mount the ENFV Core Fuel ISO file/media as a boot device to the - jump host server. - - - - Reboot the jump host to establish the Fuel server: - - - - The system now boots from the ISO image - - - - Select ”Fuel Install (Static IP)” (See figure - below) - - Insert the appropriate figure/screenshot - - - - Press [Enter] - - - - - - Wait until the Fuel setup screen is shown, this can take up to - 30 minutes. - - - - In the ”Fuel User” section, confirm/change the - default password (see figure below) Insert the appropriate - figure/screenshot - - - - Enter ”admin” in the Fuel password input - - - - Enter ”admin” in the Confirm password - input - - - - Select ”Check” and press [Enter] - - - - - - In the ”Network Setup” section, configure - DHCP/Static IP information for your FUEL node. E.g.: ETH0 is - 10.20.0.2/24 for FUEL booting and ETH1 is DHCP in your corporate/lab - network (see figure below) Insert the appropriate - figure/screenshot. - - - - Configure eth1 or other network interfaces here as well (if - you have them present on your FUEL server). - - - - - - In the ”PXE Setup” section (see figure below), - change the following fields to appropriate values, for example: - - - - DHCP Pool Start 10.20.0.3 - - - - DHCP Pool End 10.20.0.254 - - - - DHCP Pool Gateway 10.20.0.2 (IP address of Fuel node) - - Insert the appropriate figure/screenshot - - - - - - In the ”DNS & Hostname” section (see figure - below), change the following fields to appropriate values: - - - - Hostname - - - - Domain - - - - Search Domain - - - - External DNS - - - - Hostname to test DNS - - - - Select <Check> and press [Enter] - - Insert the appropriate figure/screenshot - - - - - - You have the option to enable PROXY SUPPORT. In the - ”Bootstrap Image” section (see figure below), edit the - following fields to define a proxy. This cannot be used in tandem with - local repository support. - - - - Navigate to ”HTTP proxy” and enter your http - proxy address - - - - Select <Check> and press [Enter] - - Insert the appropriate figure/screenshot - - - - - - In the ”Time Sync” section (see figure below), - change the following fields to appropriate values: - - - - NTP Server 1 <Customer NTP server 1> - - - - NTP Server 2 <Customer NTP server 2> - - - - NTP Server 3 <Customer NTP server 3> - - Insert the appropriate figure/screenshot - - - - - - Start the installation - - - - Select "Quit Setup" and press [Save and Quit]. - - - - The installation will now start. Wait until the login screen - is shown. - - - - -
- -
- Boot the Node Servers - - After the Fuel Master node has rebooted from the steps mentioned - above and is at the login prompt, you should boot the Node Servers (the - Compute/Control/Storage blades, nested or real) with a PXE booting scheme - so that the FUEL Master can pick them up for control. - - - - Enable PXE booting - - - - For every controller and compute server: enable PXE Booting - as the first boot device in the BIOS boot order menu, and hard - disk as the second boot device in the same menu. - - - - - - Reboot all the control and compute blades - - - - Wait for the availability of nodes to show up in the Fuel - GUI. - - - - Connect to the FUEL UI via the URL provided in the Console - (default: https://10.20.0.2:8443). - - - - Wait until all nodes are displayed in top right corner of - the Fuel GUI: Total nodes and Unallocated nodes (see figure - below): Insert the appropriate - figure/screenshot - - - - -
- -
- Install additional Plugins/Features on the FUEL node - - Use the following procedure to install additional plugins and - features: - - - - SSH to your FUEL node (e.g. root@10.20.0.2 pwd: r00tme) - - - - Select wanted plugins/features from the /opt/enfv/ - directory. - - - - Install each wanted plugin with the command: - - $ fuel plugins --install /opt/enfv/<plugin-name>-<version>.<arch>.rpm - - Expected output (see figure below): - - Plugin ....... was successfully installed. - - Insert the appropriate figure/screenshot - - -
- -
- Create an OpenStack Environment - - Follow the procedure below to create an OpenStack - environment: - - - - Connect to Fuel WEB UI with a browser (default: - https://10.20.0.2:8443) (login: admin/admin) - - - - Create and name a new OpenStack environment that you want to - install - - - - Select ”Mitaka on Ubuntu 14.04” and press - [Next] - - - - Select ”compute virtualization method”, then select - ”QEMU-KVM as hypervisor” and press [Next] - - - - Select ”network mode” - - - - Select ”Neutron with ML2 plugin” - - - - Select ”Neutron with tunneling segmentation”, - required when using the ODL or ONOS plugins. - - - - Press [Next] - - - - - - Select ”Storage Back-ends”, then ”Ceph for - block storage” and press [Next] - - - - Select the ”additional services” you wish to - install - - - - Check option ”Install Ceilometer and Aodh” and - press [Next] - - - - - - Create the new environment by clicking the [Create] - Button. - - -
- -
- Configure the network environment - - To configure the network environment please follow these - steps: - - - - Open the environment you previously created - - - - Open the networks tab and select the ”default” Node - Networks group on the left pane (see figure below) . - - Insert the appropriate figure/screenshot - - - - Update the Public Network configuration and change the following - fields to appropriate values: - - - - CIDR to <CIDR for Public IP Addresses> - - - - IP Range Start to <Public IP Address start> - - - - IP Range End to <Public IP Address end> - - - - Gateway to <Gateway for Public IP Addresses> - - - - Check <VLAN tagging> - - - - Set appropriate VLAN ID - - - - - - Update the Storage Network Configuration: - - - - Set CIDR to an appropriate value (default - 192.168.1.0/24) - - - - Set IP Range Start to an appropriate value (default - 192.168.1.1) - - - - Set IP Range End to an appropriate value (default - 192.168.1.254) - - - - Set VLAN to an appropriate value (default 102) - - - - - - Update the Management Network configuration: - - - - Set CIDR to an appropriate value (default - 192.168.0.0/24) - - - - Set IP Range Start to an appropriate value (default - 192.168.0.1) - - - - Set IP Range End to an appropriate value (default - 192.168.0.254) - - - - Check <VLAN tagging> - - - - Set appropriate VLAN ID (default 101) - - - - - - Update the Private Network information: - - - - Set CIDR to an appropriate value (default - 192.168.2.0/24) - - - - Set IP Range Start to an appropriate value (default - 192.168.2.1) - - - - Set IP Range End to an appropriate value (default - 192.168.2.254) - - - - Check <VLAN tagging> - - - - Set appropriate VLAN tag (default 103) - - - - - - Select the ”Neutron L3” Node Networks group on the - left pane - - - - Update the Floating Network configuration: - - - - Set the Floating IP range start (default - 172.16.0.130) - - - - Set the Floating IP range end (default 172.16.0.254) - - - - Set the Floating network name (default - admin_floating_net) - - - - - - Update the Internal Network configuration: - - - - Set Internal network CIDR to an appropriate value (default - 192.168.111.0/24) - - - - Set Internal network gateway to an appropriate value - - - - Set the Internal network name (default - admin_internal_net) - - - - - - Update the Guest OS DNS servers by setting Guest OS DNS Server - values appropriately - - - - Save Settings - - - - Select the ”Other” Node Networks group on the left - pane (see figure below) Insert the appropriate - figure/screenshot - - - - Update the Public Network assignment by checking the box for - ”Assign public network to all nodes” (required by - OpenDaylight). - - - - Update Host OS DNS Servers by providing the DNS server - settings - - - - Update Host OS NTP Servers by providing the NTP server - settings - - -
- -
- Select Hypervisor type - - Select the Hypervisor type in the following way: - - - - In the FUEL UI of your Environment, click the - ”Settings” Tab - - - - Select ”Compute” on the left side pane (see figure - below), check the KVM box and press [Save settings] - - Insert the appropriate figure/screenshot - - -
- -
- Enable Plugins - - To enable needed plugins, follow these steps: - - - - In the FUEL UI of your Environment, click the - ”Settings” Tab - - - - Select "Other" on the left side pane (see figure below), then - enable and configure the plugins of your choice Insert the - appropriate figure/screenshot - - -
- -
- Allocate Nodes to Environment and assign Functional Roles - - This is accomplished in the following way: - - - - Click on the ”Nodes” Tab in the FUEL WEB UI (see - figure below) Insert the appropriate - figure/screenshot - - - - Assign roles (see figure below): - - - - Click on the [+Add Nodes>] button - - - - Check <Controller>, <Telemetry - MongoDB> and - optionally an SDN Controller role (OpenDaylight controller/ONOS) - in the ”Assign Roles” Section. - - - - Check one node which you want to act as a Controller from - the bottom half of the screen - - - - Click [Apply Changes] - - - - Click on the [+Add Nodes] button - - - - Check the <Controller> and <Storage - Ceph OSD> - roles - - - - Check the next two nodes you want to act as Controllers from - the bottom half of the screen - - - - Click [Apply Changes] - - - - Click on the [+Add Nodes] button - - - - Check the <Compute> and <Storage - Ceph OSD> - roles - - - - Check the Nodes you want to act as Computes from the bottom - half of the screen - - - - Click [Apply Changes] Insert the appropriate - figure/screenshot - - - - - - Configure Interfaces (see figure below): - - - - Check [Select <All>] to select all allocated - nodes - - - - Click [Configure Interfaces] - - - - Assign interfaces (bonded) for mgmt-, admin-, private-, - public- and storage networks, and click [Apply] Insert the - appropriate figure/screenshot - - - - -
- -
- OPTIONAL - Set Local Mirror Repos - - The following steps must be executed if you are in an environment - with no connection to the Internet. The Fuel server delivers a local repo - that can be used for installation/deployment of Openstack. - - - - In the Fuel UI of your environment, click the Settings Tab and - select "General" from the left pane - - - - Replace the URI values for the ”Name” values - outlined below: - - - - ”ubuntu” URI=”deb - http://<ip-of-fuel-server>:8080/mirrors/ubuntu/trusty main - universe multiverse” - - - - ”ubuntu-security” URI=”deb - http://<ip-of-fuelserver>:8080/mirrors/ubuntu/trusty-security - main universe multiverse” - - - - ”ubuntu-updates” URI=”deb - http://<ip-of-fuelserver>:8080/mirrors/ubuntu/trusty-updates - main universe multiverse” - - - - ”mos” URI=”deb - http://<ip-of-fuel-server>::8080/mitaka-9.0/ubuntu/x86_64 - mos9.0 main restricted” - - - - ”Auxiliary” URI=”deb - http://<ip-of-fuel-server>:8080/mitaka-9.0/ubuntu/auxiliary - auxiliary main restricted” - - - - - - Click [Save Settings] at the bottom to save your changes. - - -
- -
- Target Specific Configuration - - - - Set up targets for provisioning with non-default - ”Offloading Modes”. - - Some target nodes may require additional configuration after - they are PXE booted (bootstrapped). The most frequent changes occur in - the defaults of ethernet device ”Offloading Modes” - settings (e.g. certain target ethernet drivers may strip VLAN traffic - by default). - - If your target ethernet drivers have incorrect - ”Offloading Modes” defaults, in the ”Configure - interfaces” page (described above), expand the affected - interface’s ”Offloading Modes” and (un)check the - settings you need (see figure below): Insert the appropriate - figure/screenshot - - - - Set up targets for ”Verify Networks” with - non-default ”Offloading Modes”. - - Please check the Release - Notes for the 1.0 release of ENFV Core when using Fuel as a deployment - tool, Change this to an Olink to the Release notes - once the ID has been created for that book. Add the ID to - pardoc-names.xml and pardoc-common if needed, then updated the - OlinkDBfor an updated and comprehensive list of known issues - and limitations, including the ”Offloading Modes” not - being applied during the ”Verify Networks” step. - - Setting custom ”Offloading Modes” in Fuel GUI will - only apply during provisioning and not during ”Verify - Networks”. If your targets need this change, you have to apply - ”Offloading Modes” settings manually to bootstrapped - nodes. E.g.: Our driver has the ”rx-vlan-filter” default - ”on” (expected ”off”) on the Openstack - interface ”eth1”, preventing VLAN traffic from passing - during ”Verify Networks”. - - - - From Fuel master console identify target nodes' admin IPs - (see figure below): - - $ fuel nodes - - Insert the appropriate figure/screenshot - - - - SSH into each of the target nodes and disable the - ”rx-vlan-filter” on the affected physical - interface(s) allocated for OpenStack traffic (eth1): - - $ ssh root@10.20.0.6 ethtool -K eth1 rx-vlan-filter off - - - - Repeat the step above for all affected nodes/interfaces in - the POD - - - - -
- -
- Verify Networks - - It is important that the Verify Networks action is performed as it - will verify that Communicate what is Communicate and does this - apply to our settings works for the networks you have setup. - Also, check that packages needed for a successful deployment can be - fetched: - - - - From the FUEL UI in your environment, select the Networks Tab, - then select ”Connectivity check” on the left pane (see - figure below): Insert the appropriate - figure/screenshot - - - - Select [Verify Networks] - - - - Continue to fix your topology (physical switch, etc) until the - ”Verification Succeeded” and ”Your network is - configured correctly” messages are shown. - - -
- -
- Deploy your Environment - - In order to deploy your environment, do the following steps: - - - - In the Fuel GUI, click on the ”Dashboard” - Tab - - - - Click on [Deploy Changes] in the ”Ready to Deploy?” - section - - - - Examine any information notice that pops up and click - [Deploy] - - - - Wait for your deployment to complete. You can view the - ”Dashboard” Tab to see the progress and status of your - deployment. -
-
\ No newline at end of file diff --git a/book-enea-nfv-core-installation-guide/doc/installation_health_check.xml b/book-enea-nfv-core-installation-guide/doc/installation_health_check.xml deleted file mode 100644 index 9cfaaa5..0000000 --- a/book-enea-nfv-core-installation-guide/doc/installation_health_check.xml +++ /dev/null @@ -1,27 +0,0 @@ - - - - Installation Health-Check - - To ensure apt performance, the system health-check must be performed. - This is done in the following way (see figure below): - - insert figure - - - - Click the ”Health Check” tab inside your Environment - in the FUEL Web UI - - - - Check the [Select All] option, then click [Run Tests] - - - - Allow tests to run and investigate results where - appropriate - - - \ No newline at end of file diff --git a/book-enea-nfv-core-installation-guide/doc/installation_instructions.xml b/book-enea-nfv-core-installation-guide/doc/installation_instructions.xml new file mode 100644 index 0000000..0e539de --- /dev/null +++ b/book-enea-nfv-core-installation-guide/doc/installation_instructions.xml @@ -0,0 +1,1259 @@ + + + + Installation Instructions + + ENEA NFV Core 1.0 leverages the work in the OPNFV Project, delivering + selected Installer DVD images together with instructions on how to setup the + Installers and deploy OPNFV releases on a Pharos compliant test lab. These + images can be accessed on the OPNFV Downloads page. + + ENEA NFV Core uses the Fuel@OPNFV Installer as a deployment facility, + hereafter referred to as Fuel. Fuel is an + automated deployment tool capable of automatically provisioning and + deploying OpenStack on a cluster of servers. ENEA NFV Core 1.0 is based on + the OPNFV release Danube, which is also + available for aarch64 servers through the OPNFV Armband project that is + driven by ENEA, which also uses the Fuel Installer. The Armband project is + out of the scope of this document but there are information available online + on the OPNFV wiki. The OPNFV download page provides general instructions for + building and installing the Fuel Installer iso and also on how to deploy + OPNFV Danube using Fuel on a Pharos compliant test lab . Through the + remainder of this document there will be references to this Installation + Guide, sometimes referred simply as ”the guide” or ”the + installation guide” or the ”Fuel Installation Guide”. The + next sections of this chapter will explain in detail the procedure for + configuring and installing the Fuel Master followed by configuration and + deployment of the ENEA NFV Core 1.0 on the test lab. + + + Covering chapters 1-6 of the Fuel Installation Guide is not + mandatory but useful for better understanding the hardware requirements + and how the deployment process works. Also note that since and ISO is + provided, it is not necessary to build an ISO image from scratch. + + + + the following sections will refer to a specific sub-chapter in the + installation guide, which is indicated at the end of the title in round + parenthesis. Chapter of the installation guide which are not mentioned can + be safely skipped. + + + Before starting the installation of this release of ENFV Core, certain + preparations must be done to ensure optimal performance. + +
+ Retrieving the ISO image + + First, the Fuel deployment ISO image needs to be retrieved. The .iso + image download link for this release can be found in OPNFV documentation and + software downloads + + Update this url link as appropriate + + This refers to chapter 3.1 in the installation guide. In this case + the ENEA provided ISO image is to be used, which should have been made + available. +
+ +
+ Other Preparations + + Chapter 3.3 of the installation guide provides link to further + documentation which is useful but not mandatory. + + Next, familiarize yourself with Fuel by reading the following + documents: + + Should any of these 4 documents be included/referenced as part of + our guide or should we include something else ? + + + + Fuel + Installation Guide + + + + Fuel + User Guide + + + + Fuel + Developer Guide + + + + Fuel + Plugin Developers Guide + + + + Prior to installation, a number of deployment specific parameters + must be collected, such as: + + Change the following parameters as appropriate + + + + Provider sub-net and gateway information + + + + Provider VLAN information + + + + Provider DNS addresses + + + + Provider NTP addresses + + + + Network overlay you plan to deploy (VLAN, VXLAN, FLAT) + + + + How many nodes and what roles you want to deploy (Controllers, + Storage, Computes) + + + + Monitoring options you want to deploy (Ceilometer, Syslog, + etc.). + + + + Other options not covered in the document are available in the + links above. + + This information will be needed for the configuration procedures + provided in this document. + + +
+ +
+ Hardware Requirements + + The following minimum hardware requirements must be met for the + installation of ENFV Core using Fuel, to be successful: + + + + + + Hardware Aspect + + Requirement + + + + Nr. of nodes + + Minimum 6 (3 for non-redundant deployment): + + 1 Fuel deployment master (may be virtualized) + + + + 3 Controllers (1 co-located mongo/ceilometer role, 2 + Ceph-OSD roles) + + + + 1 Compute (1 co-located Ceph-OSD role) + + + + + + CPU + + Minimum 1 socket x86_AMD64 with Virtualization + support + + + + RAM + + Minimum 16GB/server (depending on VNF work load) + + + + Disk + + Minimum 256GB 10kRPM spinning disks + + + + Networks + + + + 4 Tagged VLANs (PUBLIC, MGMT, STORAGE, PRIVATE) + + + + 1 Un-Tagged VLAN for PXE Boot - ADMIN Network + + Note: These can be allocated to a single + NIC - or spread out over multiple NICs as supported by your + hardware. + + + + + + 6 physical nodes 1 x Fuel deployment master (which was virtualized), + x86 based 3 x Cavium ThunderX 1U 48 cores R120-T30 + (https://www.avantek.co.uk/arm-server-r120-t30/) as Controller nodes (for + an HA configuration, 1 collocated mongo/ceilometer role, 1 Ceph-OSD role, + 1 Vitrage Controller role) 2 x Cavium ThunderX 2U 96 cores R270-T60 + (https://www.avantek.co.uk/arm-server-r270-t60/) as Compute nodes (with + collocated Ceph-OSD roles) RAM – 128 GB on the Controller nodes, 256 + GB on the Compute nodes Disk – 1 x 120GB SSD and 1 x 2TB SATA 5400 + rpm Networks – Appart from the integrated NICs also installed was + one Intel ® 82574L PCIe card used for Fuel Admin on each server +
+ +
+ Install Fuel Master + + This section describes the installation of the ENFV Core + installation server (Fuel master) as well as the deployment of the full + ENFV Core reference platform stack across a server cluster. It is + recommended to install the Fuel Master on a VM using virt-manager with a + minimum of 8GB of RAM, 4 CPUs and at least 100GB disk. + + + + Mount the ENEA NFV Core 1.0 ISO file/media as a boot device to + the Fuel Master VM. + + + + Reboot the VM and make sure it boots from the ISO: + + + + The system now boots from the ISO image + + + + Select Fuel Install (Static IP) (See + figure below) + + + + Press [Enter] + + + + + + + + + + + + Wait until the Fuel setup screen is shown, this can take up to + 30 minutes. + + + + In the Fuel User section, confirm/change the + default password (see figure below). + + + + Enter ”admin” in the Fuel password input + + + + Enter ”admin” in the Confirm password + input + + + + Select ”Check” and press [Enter] + + + + + + + + + + + + In the ”Network Setup” section, configure + DHCP/Static IP information for your FUEL node. + + E.g.: ETH0 is 10.20.0.2/24 for FUEL booting and ETH1 is DHCP in + your corporate/lab network (see figure below) . + + + + Configure eth1 here, it should be the interface to the + outside world. In this example ETH1 is configured with + 10.0.6.10/24 and the default gateway is 10.0.6.254 + + + + + + + + + + + + + + + + + + In the PXE setup menu, the default values can be left + unchanged. + + + + In the DNS & Hostname section the + recommended values are as presented in the figure below: + + + + + + + + + + The Bootstrap Image section should be skipped, the ISO will be + configured in advance to use the proper repositories. + + + + In the Time Sync section (see figure below) - + Change the following fields to appropriate values. It is strongly + advised to avoid using fuel.pool.ntp.org values and + instead set them to pool.ntp.org: + + + + + + + + + + Enable experimental and advanced features + + + + In the ”Features groups” section (see figure + below) – enable the checkboxes for Experimental and Advanced + features + + + + Move to the <Apply> button and press + <Enter> + + + + + + + + + + + + Start the installation + + + + Select "Quit Setup" and press [Save and Quit]. + + + + The installation will now start. Wait until the login screen + is shown. + + + + +
+ +
+ Boot the Servers + + Follow the same steps as indicated in the installation guide. Wait + until the Fuel Master installation is complete, which should be indicated + by the VM restarting and prompting for user login. + + After the Fuel Master node has rebooted from the above steps and is + at the login prompt, you should boot the Node Servers (Your + Compute/Control/Storage blades, nested or real) with a PXE booting scheme + so that the FUEL Master can pick them up for control. + + + + Enable PXE booting + + For every controller and compute server: enable PXE Booting as + the first boot device in the UEFI (EDK2) boot order menu, and hard + disk as the second boot device in the same menu. + + + + Reboot all the control and compute blades. + + + + Wait for the availability of nodes showing up in the Fuel + GUI. + + + + Connect to the FUEL UI via the URL provided in the Console + (default: https://10.20.0.2:8443) + + + + Wait until all nodes are displayed in top right corner of + the Fuel GUI: Total nodes and Unallocated nodes (see figure + below). + + + + + + + + + + +
+ +
+ Installing additional Plugins/Features on FUEL + + In order to obtain the set of extra features used by ENEA NFV Core + 1.0 a few extra Fuel plugins have to be installed at this stage. Further + configuration steps will also need to be performed after the installation + is complete, details about these later on. + + The following extra plugins need to be installed: + + + + Fuel Vitrage Plugin + + + + Zabbix for Fuel + + + + Tacker VNF Manager + + + + Login to the Fuel master via ssh using the default credentials (e.g. + root@10.20.0.2 pwd: r00tme) and install the additional plugins: + + $ fuel plugins --install /opt/opnfv/vitrage-1.0-1.0.4-1.noarch.rpm +$ fuel plugins --install zabbix_monitoring-2.5-2.5.3-1.noarch.rpm +$ fuel plugins --install tacker-1.0-1.0.0-1.noarch.rpm + + Expected output: Plugin ....... was successfully installed. +
+ +
+ Create an OpenStack Environment + + Follow the procedure below to create an OpenStack + environment: + + + + Connect to Fuel WEB UI with a browser (default: + https://10.20.0.2:8443) (login: admin/admin) + + + + Create and name a new OpenStack environment that you want to + install. + + + + Select ”<Newton on Debian 9> (aarch64)” and + press <Next> + + + + + + + + + + Select ”compute virtualization method”, then select + ”QEMU-KVM as hypervisor” and press [Next]. + + + + Select ”network mode” + + + + Select ”Neutron with ML2 plugin” + + + + Select ”Neutron with VLAN segmentation” + (recommended when enabling DPDK). + + + + Press [Next] + + + + + + + + + + + + Select ”Storage Back-ends”, then ”Ceph for + block storage” and press [Next] + + + + + + + + + + In the Additional Services select ”Install + Vitrage”: + + + + + + + + + + Create the new environment by clicking the [Create] + Button. + + +
+ +
+ Configure the Network Environment + + To configure the network environment specifically to a DPDK based + scenario, please follow these steps: + + + + Open the environment you previously created. + + + + Open the networks tab and select the ”default” Node + Networks group to on the left pane (see figure below). + + + + + + + + + + Update the Public network configuration and change the following + fields to appropriate values: + + + + CIDR to <CIDR for Public IP Addresses> + + + + IP Range Start to <Public IP Address start> + (recommended to start with x.x.x.41) + + + + IP Range End to <Public IP Address end> (recommended + to end with x.x.x.100) + + + + Gateway to <Gateway for Public IP Addresses> + + + + Check <Use VLAN tagging> if needed. For simplicity + it’s recommended to use the public network in untagged + mode. + + + + Set appropriate VLAN ID + + + + + + Update the Storage Network Configuration: + + + + It’s recommended to keep the default CIDR + + + + Set IP Range Start to an appropriate value (default + 192.168.1.1) + + + + Set IP Range End to an appropriate value (default + 192.168.1.254) + + + + Set VLAN tagging as needed + + + + + + Update the Management Network configuration: + + + + It’s recommended to keep the default CIDR + + + + Set IP Range Start to an appropriate value (default + 192.168.0.1) + + + + Set IP Range End to an appropriate value (default + 192.168.0.254) + + + + Check <VLAN tagging> + + + + Set appropriate VLAN ID (default 101) + + + + + + Update the Private Network information: + + + + It’s recommended to keep the default CIDR + + + + Set IP Range Start to an appropriate value (default + 192.168.2.1) + + + + Set IP Range End to an appropriate value (default + 192.168.2.254) + + + + Check <VLAN tagging> + + + + Set appropriate VLAN tag (default 103) + + + + + + Select the ”Neutron L3” Node Networks group on the + left pane + + + + + + + + + + Update the Floating Network configuration: + + + + Set the Floating IP range start (recommended to start with + x.x.x.101) + + + + Set the Floating IP range end (recommended to end with + x.x.x.200) + + + + + + Update the Internal Network configuration: + + + + It’s recommended to keep the default CIDR and + mask + + + + Set Internal network gateway to an appropriate value + + + + + + Update the Guest OS DNS servers with appropriate values. + + + + Save Settings + + + + Select the ”Other” Node Networks group on the left + pane (see figure below). + + + + + + + + + + Make sure the ”Public Gateway is Available” and + ”Assign public networks to all nodes” are checked. + + + + Update Host OS DNS Servers settings + + + + Update Host OS NTP Servers settings + + +
+ +
+ Select Hypervisor type + + Select the Hypervisor type in the following way: + + + + In the FUEL UI of your Environment, click the + ”Settings” Tab + + + + Select ”Compute” on the left side pane, check the + KVM box and press [Save settings]: + + + + + + + + +
+ +
+ Add/Remove Repositories + + By default OPNFV Fuel uses a set of repositories as package sources. + These hold both OpenStack components as well as other needed + packages. + + In order to speed up the deployment process, Fuel will create its + own local mirror which can be reachable on the Admin interface (e.g. + 10.20.0.2:8080/newton-10.0/ubuntu/x86-64). However, more repositories are + added that need external connection. + + It is possible to avoid using external repositories and make the + entire process completely offline. This way only the most basic packages + will be installed, but the process will be quicker and not depend on an + Internet connection. To do this, just make sure that the Repositories list + contain only ubuntu-local, mos and Auxilliary. + + + + In the FUEL UI of you Environment, click the + ”Settings” Tab + + + + Select ”General” and scroll down to the + Repositories list (see figure below). + + Remove any extra repositories that point to external + repositories by clicking the delete button on the right of the + repository entry. + + + + + + + + +
+ +
+ Enable Plugins + + In the FUEL UI of your Environment, click the + ”Settings” Tab and select OpenStack Services on the left side + pane. Enable the Tacker VNF manager plugin: + + + + + + + + Select "Other" on the left pane and do the following: + + + + Enable and configure Zabbix for Fuel + + + + Enable and configure Fuel Vitrage Plugin + + + + Check ”Use Zabbix Datasource in Vitrage” + + + + + + + + +
+ +
+ Allocate Nodes and assign Functional Roles + + This is accomplished in the following way: + + + + Click on the ”Nodes” Tab in the FUEL WEB UI (see + figure below): + + + + + + + + + + Assign roles (see figure below): + + + + Click on the <+Add Nodes> button + + + + Check <Controller>, <Telemetry - MongoDB> + + + + Check one node which you want to act as a Controller from + the bottom half of the screen. + + + + Click [Apply Changes] + + + + Click on the <+Add Nodes> button + + + + Check the <Controller> and <Storage - Ceph OSD> + roles. + + + + Check one node to assign these roles + + + + Click <Apply Changes> + + + + Click on <+Add Nodes> button + + + + Check <Controller> + + + + Check one node to assign as a Controller + + + + Click <Apply Changes> + + + + Check the <Compute> and <Storage - Ceph OSD> + roles. + + + + Check the Nodes you want to act as Computes from the bottom + half of the screen. + + + + Click <Apply Changes> + + + + + + + + + + + Internally, for testing, the Controller nodes had different + network configuration compare to the Compute nodes, but that is not + mandatory. The 5 nodes in the cluster can have the exact same + configuration + + + + + Configure interfaces for Controller nodes (see figure + below). + + + + Select all allocated nodes + + + + Click [Configure Interfaces] + + + + Assign interfaces (in this case Public, Storage and + Management were set on the first 10GbE Port and Private on the + second 10GbE port, with Admin on a 1Gb port), and click [Apply] + + + + + + + + + + + Configure Compute nodes interfaces: + + + + Select the Compute nodes + + + + Click <Configure Interfaces> + + + + Assign interfaces (in this case Public, Storage and + Management were set on the first 10GbE Port and Private on the + second 10GbE port; Admin is on a 1Gb port) + + + + For the Private network enable DPDK + + + + Click Apply + + + + + + + + + + +
+ +
+ Configure hugepages + + This step is needed for the DPDK based scenarios and it's not + covered in the installation guide. + + does the comment made above still apply and if so, should this + section be removed? + + + + Click on the gear on the right of a Compute node + + + + In the menu that shows up click on Node Attributes + + + + Set Huge Pages for Nova and DPDK to appropriate values (see + figure below); it’s recommended to use at least 2048 pages + of 2MB for each of Nova and DPDK + + + + Click <Save Settings> + + + + + + + + + + + + Perform the same configuration for the other Compute + nodes + + +
+ +
+ Target Specific Configuration + + Follow the guide for setting custom target configuration, as needed. + Skip this step if no specific configurations are required. + + + + Set up targets for provisioning with non-default + ”Offloading Modes”. + + Some target nodes may require additional configuration after + they are PXE booted (bootstrapped). The most frequent changes occur in + the defaults of ethernet device ”Offloading Modes” + settings (e.g. certain target ethernet drivers may strip VLAN traffic + by default). + + If your target ethernet drivers have incorrect + ”Offloading Modes” defaults, in the ”Configure + interfaces” page (described above), expand the affected + interface’s ”Offloading Modes” and (un)check the + settings you need (see figure below): Insert the appropriate + figure/screenshot + + + + Set up targets for ”Verify Networks” with + non-default ”Offloading Modes”. + + Please check the Release + Notes for the 1.0 release of ENFV Core when using Fuel as a deployment + tool, Change this to an Olink to the Release notes + once the ID has been created for that book. Add the ID to + pardoc-names.xml and pardoc-common if needed, then updated the + OlinkDBfor an updated and comprehensive list of known issues + and limitations, including the ”Offloading Modes” not + being applied during the ”Verify Networks” step. + + Setting custom ”Offloading Modes” in Fuel GUI will + only apply during provisioning and not during ”Verify + Networks”. If your targets need this change, you have to apply + ”Offloading Modes” settings manually to bootstrapped + nodes. E.g.: Our driver has the ”rx-vlan-filter” default + ”on” (expected ”off”) on the OpenStack + interface ”eth1”, preventing VLAN traffic from passing + during ”Verify Networks”. + + + + From Fuel master console identify target nodes' admin IPs + (see figure below): + + $ fuel nodes + + Insert the appropriate figure/screenshot + + + + SSH into each of the target nodes and disable the + ”rx-vlan-filter” on the affected physical + interface(s) allocated for OpenStack traffic (eth1): + + $ ssh root@10.20.0.6 ethtool -K eth1 rx-vlan-filter off + + + + Repeat the step above for all affected nodes/interfaces in + the POD + + + + +
+ +
+ Verify Networks + + It is important that the Verify Networks action is performed as it + will verify that Communicate what is Communicate and does this + apply to our settings works for the networks you have setup. + Also, check that packages needed for a successful deployment can be + fetched: + + + + From the FUEL UI in your environment, select the Networks Tab, + then select ”Connectivity check” on the left pane. + + + + Select [Verify Networks] + + + + Continue to fix your topology (physical switch, etc) until the + ”Verification Succeeded” and ”Your network is + configured correctly” messages are shown. + + +
+ +
+ Deploy your Environment + + As instructed in the installation guide, after the configuration is + complete and the network connectivity checked, it’s time to deploy. + From the Dashboard tab click on Deploy. The process should take around 2 + hours the first time after a fresh Fuel Master installation. Part of the + deploy process is to build the target image, which can take around between + 30 and 60 minutes. + + The entire deploy process goes through two phases: + + + + Provisioning – at this stage the nodes have been booted + from PXE and are running a small bootstrap image in ramdisk. The + provisioning process will write the target image on the disk and make + other preparations for running it after reboot + + + + OpenStack installation – at this stage the nodes have been + rebooted on the newly written target image and the OpenStack + components are installed and configured + + +
+ +
+ Installation Health-Check + + Once the deploy process is complete, it is recommended to run a + health check from the Fuel menu, as described in the installation guide. + To ensure apt performance, the system health-check must be performed. This + is done in the following way: + + + + Click the ”Health Check” tab inside your + Environment in the FUEL Web UI + + + + Check the [Select All] option, then click [Run Tests] + + + + Allow tests to run and investigate results where + appropriate + + +
+
\ No newline at end of file diff --git a/book-enea-nfv-core-installation-guide/doc/post_deploy_config.xml b/book-enea-nfv-core-installation-guide/doc/post_deploy_config.xml new file mode 100644 index 0000000..74e18ce --- /dev/null +++ b/book-enea-nfv-core-installation-guide/doc/post_deploy_config.xml @@ -0,0 +1,117 @@ + + + Post-Deploy Configurations + + For running DPDK applications it is useful to isolate the available + cpus between the Linux kernel, ovs-dpdk and nova-compute. + + All of the Hardware nodes can be accessed through ssh from the Fuel + console. Simply create an ssh connection to Fuel (e.g. root@10.20.0.2 pwd: + r00tme) and run the following command to get a list of the servers and the + IPs where they can be reached. + + [root@fuel ~]# fuel node +id | status | name | cluster | ip | mac | roles / + | pending_roles | online | group_id +---+--------+------------------+---------+-----------+-------------------+----------/ +-----------------+---------------+--------+--------- + 4 | ready | Untitled (8c:c2) | 1 | 10.20.0.6 | 68:05:ca:46:8c:c2 | ceph-osd,/ + compute | | 1 | 1 + 2 | ready | Untitled (8c:45) | 1 | 10.20.0.5 | 68:05:ca:46:8c:45 | controller,/ + mongo, tacker | | 1 | 1 + 1 | ready | Untitled (8c:d4) | 1 | 10.20.0.4 | 68:05:ca:46:8c:d4 | ceph-osd,/ + controller | | 1 | 1 + 5 | ready | Untitled (8c:c9) | 1 | 10.20.0.7 | 68:05:ca:46:8c:c9 | ceph-osd,/ + compute | | 1 | 1 + 3 | ready | Untitled (8b:64) | 1 | 10.20.0.3 | 68:05:ca:46:8b:64 | controller,/ + vitrage | | 1 | 1 +[root@fuel ~]# | | 1 | 2 +[root@fuel ~]# ssh node-3 +Warning: Permanently added 'node-3' (ECDSA) to the list of known hosts. + +The programs included with the Debian GNU/Linux system are free software; +the exact distribution terms for each program are described in the +individual files in /usr/share/doc/*/copyright. + +Debian GNU/Linux comes with ABSOLUTELY NO WARRANTY, to the extent +permitted by applicable law. +Last login: Thu Aug 24 19:40:06 2017 from 10.20.0.2 +root@node-3:~# + +
+ CPU isolation configuration + + It is a good idea to isolate the cores that will perform packet + processing and running qemu. The example below shows how to set isolcpus + on a compute node that has 1 x Intel Xeon, processor E5-2660 v4, 14 cores, + 28 hyper-threaded cores. + + root@node-3:~# cat /etc/default/grub | head -n 10 +# If you change this file, run 'update-grub' afterwards to update +# /boot/grub/grub.cfg. +# For full documentation of the options in this file, see: +# info -f grub -n 'Simple configuration' + +GRUB_DEFAULT="Advanced options for Ubuntu>Ubuntu, with Linux 4.4.50-rt62nfv" +GRUB_TIMEOUT=10 +GRUB_DISTRIBUTOR=`lsb_release -i -s 2> /dev/null || echo Debian` +GRUB_CMDLINE_LINUX_DEFAULT="quiet splash" +GRUB_CMDLINE_LINUX=" console=tty0 net.ifnames=1 biosdevname=0 rootdelay=90 / +nomodeset hugepagesz=2M hugepages=1536 isolcpus=10-47,58-95" +root@node-6:~# update-grub +Generating grub configuration file ... +Found linux image: /boot/vmlinuz- 4.10.0-9924-generic +Found initrd image: /boot/initrd.img- 4.10.0-9924-generic +done +root@node-3:~# reboot +Connection to node-3 closed by remote host. +Connection to node-3 closed. +
+ +
+ Nova Compute configurations + + In order to isolate the OpenStack instances on dedicated CPUs, nova + must be configured with vcpu_pin_set. Please refer to the Nova + configuration guide for more information. + + The example below applies again to an Intel Xeon processor E5-2660 + v4. Here the vcpu_pin_set is configured so that pair of thread siblings + are chosen. + + root@node-3:~# cat /etc/nova/nova.conf | grep vcpu_pin_set +vcpu_pin_set = "16-47,64-95" +root@node-3:~# + + After modifying nova configuration options on the Compute nodes, it + is necessary to restart nova-compute to put them into effect. + + root@node-3:~# systemctl restart nova-compute +root@node-3:~# +
+ +
+ OpenvSwitch with DPDK configuration + + OPNFV Danube 1.0 comes with OpenvSwitch as the virtual switch + option. In the selected scenario, OpenvSwitch also uses DPDK for passing + traffic to and from the VMs. + + One of the features that comes with OpenvSwitch v2.7.0 is the + ability to set pmd-cpu-mask. This effectively isolated userspace PMD + (poll-mode-drivers) on the specified set of CPUs. + + By default, the OpenvSwitch that comes installed on the compute + nodes has no pmd-cpu-mask. There is an option to set it from the Fuel menu + before deploy, but it can always be manually set post-deploy as + follows: + + root@node-3:~# ovs-vsctl set Open_vSwitch . other_config:pmd-cpu-mask=7e0 +root@node-3:~# ovs-vsctl get Open_vSwitch . other_config:pmd-cpu-mask +"7e0" +root@node-3:~# + + No restart is required, OpenvSwitch automatically spawns new pmd + threads and sets the affinity as necessary. +
+
\ No newline at end of file diff --git a/book-enea-nfv-core-installation-guide/doc/preface.xml b/book-enea-nfv-core-installation-guide/doc/preface.xml deleted file mode 100644 index 7befbd9..0000000 --- a/book-enea-nfv-core-installation-guide/doc/preface.xml +++ /dev/null @@ -1,130 +0,0 @@ - - - - Preface - - Before starting the installation of this release of ENFV Core, certain - preparations must be done to ensure optimal performance. - -
- Retrieving the ISO image - - First, the Fuel deployment ISO image needs to be retrieved. The .iso - image download link for this release can be found in OPNFV documentation and - software downloads - - Update this url link as appropriate -
- -
- Building the ISO image - - Alternatively, you may build the Fuel .iso from source by cloning - the Fuel git repository. To retrieve the repository for the 1.0 release, - use the following command: - - $ git clone {insert link to our repo here} - - Check-out the ENFV Core 1.0 release tag to set the HEAD to the - baseline required to replicate the current release: - - $ git checkout {insert ENFV Core 1.0 release tag} - - Go to the fuel directory and build the .iso image: - - $ cd fuel/build; make all - - Make sure this command works, if not, update it to the appropriate - one and verify it. - - For more information on how to build, please see the Build - instruction for the 1.0 release of ENFV Core when using Fuel as a - deployment tool - - Change the link above to what is needed -
- -
- Other Preparations - - Next, familiarize yourself with Fuel by reading the following - documents: - - Should any of these 4 documents be included/referenced as part of - our guide or should we include something else ? - - - - Fuel - Installation Guide - - - - Fuel - User Guide - - - - Fuel - Developer Guide - - - - Fuel - Plugin Developers Guide - - - - Prior to installation, a number of deployment specific parameters - must be collected, such as: - - Change the following parameters as appropriate - - - - Provider sub-net and gateway information - - - - Provider VLAN information - - - - Provider DNS addresses - - - - Provider NTP addresses - - - - Network overlay you plan to deploy (VLAN, VXLAN, FLAT) - - - - How many nodes and what roles you want to deploy (Controllers, - Storage, Computes) - - - - Monitoring options you want to deploy (Ceilometer, Syslog, - etc.). - - - - Other options not covered in the document are available in the - links above. - - This information will be needed for the configuration procedures - provided in this document. - - -
-
\ No newline at end of file diff --git a/book-enea-nfv-core-installation-guide/doc/reference_index.xml-NOTES b/book-enea-nfv-core-installation-guide/doc/reference_index.xml-NOTES deleted file mode 100644 index d13d4b2..0000000 --- a/book-enea-nfv-core-installation-guide/doc/reference_index.xml-NOTES +++ /dev/null @@ -1,121 +0,0 @@ - - - - Reference Index - - The following is a collection of relevant references on which to draw - on: - -
- OPNFV - - Should these external remain as they are now, leading to opnfv, or - be changed to something referencing our own products if possible, or - omitted partially/entirely? - - - - OPNFV Home Page: http://www.opnfv.org - - - - OPNFV documentation and software downloads: https://www.opnfv.org/software/download - - -
- -
- OpenStack - - - - OpenStack Mitaka Release artifacts: http://www.openstack.org/software/mitaka - - - - OpenStack documentation: http://docs.openstack.org - - -
- -
- OpenDaylight - - - - OpenDaylight artifacts: http://www.opendaylight.org/software/downloads - - -
- -
- Fuel - - - - The Fuel OpenStack project: https://wiki.openstack.org/wiki/Fuel - - - - Fuel documentation overview: http://docs.openstack.org/developer/fuel-docs - - - - Fuel Installation Guide: http://docs.openstack.org/developer/fueldocs/userdocs/fuel-install-guide.html - - - - Fuel User Guide: http://docs.openstack.org/developer/fuel-docs/userdocs/fueluser-guide.html - - - - Fuel Developer Guide: http://docs.openstack.org/developer/fueldocs/devdocs/develop.html - - - - Fuel Plugin Developers Guide: http://docs.openstack.org/developer/fueldocs/plugindocs/fuel-plugin-sdk-guide.html - - - - Fuel OpenStack Hardware Compatibility List: https://www.mirantis.com/products/openstack-drivers-and-plugins/hardwarecompatibility-list - - -
- -
- Fuel in ENFV - - - - ENFV Installation instruction for the 1.0 release of ENFV when - using Fuel as a deployment tool: http://artifacts.opnfv.org/fuel/colorado/3.0/docs/installationprocedure/index.html - - - - ENFV Build instruction for the 1.0 release of ENFV when using - Fuel as a deployment tool: http://artifacts.opnfv.org/fuel/colorado/3.0/docs/buildprocedure/index.html - - - - ENFV Release Notes for the 1.0 release of ENFV when using Fuel - as a deployment tool: http://artifacts.opnfv.org/fuel/colorado/3.0/docs/releasenotes/index.html - - -
-
\ No newline at end of file diff --git a/book-enea-nfv-core-installation-guide/doc/tor_config_req.xml b/book-enea-nfv-core-installation-guide/doc/tor_config_req.xml deleted file mode 100644 index 5afc26c..0000000 --- a/book-enea-nfv-core-installation-guide/doc/tor_config_req.xml +++ /dev/null @@ -1,28 +0,0 @@ - - - - Top of the Rack (TOR) Configuration Requirements - - The switching infrastructure provides connectivity for the ENFV Core - infrastructure operations, tenant networks (East/West) and provider - connectivity (North/South). It also provides needed connectivity for the - Storage Area Network (SAN). - - To avoid traffic congestion, it is strongly encouraged that 3 - physically separated networks be used: 1 physical network for administration - and control, 1 physical network for tenant private and public networks, and - 1 for SAN. The switching connectivity can (but does not need to) be fully - redundant, in such case it comprises a redundant 10GE switch pair for each - of the 3 networks. - - The physical TOR switches are not automatically configured from the - Fuel ENFV reference platform. All networks involved in the ENFV Core - infrastructure as well as the provider networks and the private tenant VLANs - need to be manually configured. - - Manual configuration of the ENFV Core 1.0 hardware platform should be - carried out according to the ENFV Core - Pharos specification. - \ No newline at end of file diff --git a/book-enea-nfv-core-installation-guide/doc/use_cases_per_target_node.xml b/book-enea-nfv-core-installation-guide/doc/use_cases_per_target_node.xml index d1bb720..3d2582c 100644 --- a/book-enea-nfv-core-installation-guide/doc/use_cases_per_target_node.xml +++ b/book-enea-nfv-core-installation-guide/doc/use_cases_per_target_node.xml @@ -7,6 +7,6 @@ This chapter contains use-case examples for each target node pool(s) used/compatible with the Fuel Deployment Tool. - FIXME Team COSNOS/Enea NFV will need to fill this chapter with any + FIXME Team COSNOS/Enea NFV Core will need to fill this chapter with any and all appropriate use cases. \ No newline at end of file diff --git a/book-enea-nfv-core-release-info/doc/book.xml b/book-enea-nfv-core-release-info/doc/book.xml index d8ad5de..1f865b8 100644 --- a/book-enea-nfv-core-release-info/doc/book.xml +++ b/book-enea-nfv-core-release-info/doc/book.xml @@ -9,11 +9,8 @@ - \ No newline at end of file diff --git a/book-enea-nfv-core-release-info/doc/build_boot_generated.xml b/book-enea-nfv-core-release-info/doc/build_boot_generated.xml new file mode 100644 index 0000000..7affc4e --- /dev/null +++ b/book-enea-nfv-core-release-info/doc/build_boot_generated.xml @@ -0,0 +1,55 @@ + + + + Target Specific Instructions + + If the source has been fetched there will be a target specific README + file containing the build and boot instructions. To make it easier for the + reader, the contents of this file(s) have been extracted into the following + sections. + + + The build process duration may vary and be longer than expected in + some cases, depending on the individual build resources and parameters of + each target/machine supported in this release. + + + NOTE: Only EDIT THE TEMPLATE build_boot_template.xml file here + and also only edit the manifest template ".README" (name starting by a dot)! + A new build_boot_generated.xml file is created from the template and + sections are added below automatically from the README files for each target + when building the book! Only in the template file you see a line below with + SCRIPT_...._HERE and the text telling that this is a template + file. + +
+ Target inteld1521 + + NOTE: DO NOT EDIT THIS GENERATED FILE! Only edit the template + file. + +
+ Build Instructions for inteld1521 +
+ +
+ Boot Instructions for inteld1521 +
+
+ +
+ Target qemux86-64 + + NOTE: DO NOT EDIT THIS GENERATED FILE! Only edit the template + file. + +
+ Build Instructions for qemux86-64 +
+ +
+ Boot Instructions for qemux86-64 +
+
+
\ No newline at end of file diff --git a/book-enea-nfv-core-release-info/doc/build_boot_template.xml b/book-enea-nfv-core-release-info/doc/build_boot_template.xml new file mode 100644 index 0000000..e39dd81 --- /dev/null +++ b/book-enea-nfv-core-release-info/doc/build_boot_template.xml @@ -0,0 +1,34 @@ + + + + Target Specific Instructions + + If the source has been fetched (), there will be a target specific README + file containing the build and boot instructions. To make it easier for the + reader, the contents of this file(s) have been extracted into the following + sections. + + + The build process duration may vary and be longer than expected in + some cases, depending on the individual build resources and parameters of + each target/machine supported in this release. + + + NOTE: Only EDIT THE TEMPLATE build_boot_template.xml file here + and also only edit the manifest template ".README" (name starting by a dot)! + A new build_boot_generated.xml file is created from the template and + sections are added below automatically from the README files for each target + when building the book! Only in the template file you see a line below with + SCRIPT_...._HERE and the text telling that this is a template + file. + + SCRIPT_INCLUDES_BUILD_BOOT_SECTIONS_HERE + + This is a template file which you can edit. When + the book is built, a new XML chapter file is created where the above part of + this file is copied to the new XML file and all from the line above is + replaced automatically by build and boot sections from the README + files! + \ No newline at end of file diff --git a/book-enea-nfv-core-release-info/doc/machine_list_generated.xml b/book-enea-nfv-core-release-info/doc/machine_list_generated.xml new file mode 100644 index 0000000..2b9d17f --- /dev/null +++ b/book-enea-nfv-core-release-info/doc/machine_list_generated.xml @@ -0,0 +1,8 @@ + + + +# Set MACHINE to ONE of the targets in this release! +# export MACHINE=inteld1521 +# export MACHINE=qemux86-64 + diff --git a/book-enea-nfv-core-release-info/doc/pkgdiff_generated.xml b/book-enea-nfv-core-release-info/doc/pkgdiff_generated.xml new file mode 100644 index 0000000..e69de29 diff --git a/gen_known_issues.py b/gen_known_issues.py index be45982..aef605b 100644 --- a/gen_known_issues.py +++ b/gen_known_issues.py @@ -56,7 +56,7 @@ def jira_query(query): conditions = ("project=COSNOSCR", "issueType=bug", "resolution=Unresolved", - 'affectedversion="Cosnos 1.0"' + 'affectedversion="ENC1.0"' ) bugs = [] diff --git a/init.mk b/init.mk index 1c3035c..0950c27 100644 --- a/init.mk +++ b/init.mk @@ -1,5 +1,10 @@ -# ver R1.00/2017-03-16 Creation of this file -# ver R2.00/2017-04-05 Updated the docbuild.git clone command. +# Makefile including this should first set nondefault BRANCH_DOCENEACOMMON before including it +# and should have targets init: s_docbuild and optional initcommon: s_doceneacommon +# Typically let doc: depend on init and initcommon inside Makefile +# Manually optionally set GLOBALTMPCLONEROOT to yes or to a parent dir for tmpcommon +# This init.mk file ver +# R1.00/2016-04-29 +# R2.00/2017-03-30 changed linux/documentation.git and it's old branch to linux/el_releases-common.git and it's new branch ifeq ($(VERBOSE),yes) VERB := @@ -10,6 +15,15 @@ endif #Git repositories to be cloned REPO_DOCBUILD := build/docbuild.git NAME_DOCBUILD := docbuild +REPO_DOCENEACOMMON := linux/el_releases-common.git +NAME_DOCENEACOMMON := doceneacommon + +# Set nondefault BRANCH_DOCENEACOMMON in Makefile +BRANCH_DOCENEACOMMON ?= master +# Separate clones of el_releases-common.git with different branches, needed if cloned globally with risk that different distros can be at same place +ifneq ($(BRANCH_DOCENEACOMMON),master) +NAME_DOCENEACOMMON := doceneacommon_$(BRANCH_DOCENEACOMMON) +endif GLOBALTMPCLONEROOT = ../../tmpcommon ifeq ($(BOOK_GLOBALCLONEROOT),) @@ -26,26 +40,38 @@ ifeq ($(wildcard $(DIR_TMPCLONEROOT)),) $(error Parent of selected clone root does not exist ($(DIR_TMPCLONEROOT))) endif -.PHONY: initbuild usageinit cleaninit +.PHONY: initbuild initcommon usageinit cleaninit # Keep usageinit as default target here to avoid init by mistake usageinit: docusage - @echo 'make initbuild Create s_docbuild and if it does not exist, clone docbuild.git' + @echo 'make initbuild Create s_docbuild and if not exists, clone docbuild.git' + @echo 'make initcommon Create s_doceneacommon and if not exists, clone el_releases-common.git' @echo ' Default clone in tmpcommon' @echo ' BOOK_GLOBALCLONEROOT=yes Clone in $(GLOBALTMPCLONEROOT)' @echo ' BOOK_GLOBALCLONEROOT=parentdir Clone in parentdir/tmpcommon' - + @echo ' Default branch for el_releases-common.git is master' + @echo ' BRANCH_DOCENEACOMMON=... If another branch, Makefile should set this' @echo 'make pullbuild git pull in s_docbuild' + @echo 'make pullcommon git pull in s_doceneacommon' @echo 'make cleaninit Delete all s_* symlinks and local tmpcommon/' +# MOVE these to Makefile and set also non-default BRANCH_DOCENEACOMMON in Makefile initbuild: s_docbuild +initcommon: s_doceneacommon pullbuild: $(VERB)if [ ! -d s_docbuild ]; then echo "ERROR: No s_docbuild exists?" ; exit 10 ; fi $(VERB)cd s_docbuild ; git pull +pullcommon: + $(VERB)if [ ! -d s_doceneacommon ]; then echo "ERROR: No s_doceneacommon exists?" ; exit 10 ; fi + $(VERB)cd s_doceneacommon ; git pull + s_docbuild: $(TMPCLONEROOT)/$(NAME_DOCBUILD) $(VERB)rm s_docbuild 2>/dev/null; ln -s $(TMPCLONEROOT)/$(NAME_DOCBUILD) s_docbuild +s_doceneacommon: $(TMPCLONEROOT)/$(NAME_DOCENEACOMMON) + $(VERB)rm s_doceneacommon 2>/dev/null; ln -s $(TMPCLONEROOT)/$(NAME_DOCENEACOMMON) s_doceneacommon + $(TMPCLONEROOT)/$(NAME_DOCBUILD): $(VERB)if [ ! -d "$(TMPCLONEROOT)" ] ; then mkdir -p "$(TMPCLONEROOT)" ; fi $(VERB)if [ -d "$(TMPCLONEROOT)/$(NAME_DOCBUILD)" ] ; then \ @@ -57,7 +83,18 @@ $(TMPCLONEROOT)/$(NAME_DOCBUILD): git clone -b master git@git.enea.se:$(REPO_DOCBUILD) $(NAME_DOCBUILD) ; \ fi +$(TMPCLONEROOT)/$(NAME_DOCENEACOMMON): + $(VERB)if [ ! -d "$(TMPCLONEROOT)" ] ; then mkdir -p "$(TMPCLONEROOT)" ; fi + $(VERB)if [ -d "$(TMPCLONEROOT)/$(NAME_DOCENEACOMMON)" ] ; then \ + echo "Already exists $(TMPCLONEROOT)/$(NAME_DOCENEACOMMON)" ; \ + echo "Doing git pull instead"; cd $(TMPCLONEROOT)/$(NAME_DOCENEACOMMON) ; git pull ; \ + else \ + echo "Cloning $(REPO_DOCENEACOMMON) in $(TMPCLONEROOT) as $(NAME_DOCENEACOMMON)" ; \ + cd "$(TMPCLONEROOT)"; \ + git clone -b $(BRANCH_DOCENEACOMMON) git@git.enea.com:$(REPO_DOCENEACOMMON) $(NAME_DOCENEACOMMON) ; \ + fi + cleaninit: - @echo "Clean only local tmpcommon/ and all local temporary symlinks s_docbuild etc." + @echo "Clean only local tmpcommon/ and all local temporary symlinks s_docbuild s_targets etc." $(VERB)rm -rf tmpcommon 2>/dev/null ; true $(VERB)rm s_* 2>/dev/null ; true diff --git a/initbuildboot.sh b/initbuildboot.sh new file mode 100644 index 0000000..52999de --- /dev/null +++ b/initbuildboot.sh @@ -0,0 +1,115 @@ +#!/bin/sh +VER="R0.08" +# R0.08/mrpa 2017-04-13 Created the first version of this profile + +BBTEMPLATE= +BBXML= +BB_TARGETREADME_BASE= + +USAGE="`basename $0` -xml buildbootxml-to-create -template templatexml-file -readmebasedir targetreadmebase ($VER) + Currently only supports sequence types Build-command: and Boot-command: + Both files should have path book-*release-info/doc/ + Creates the XML file from the template, inserting build/boot commands + from the various s_manifests/el_manifest-XXX/XXXtargetXXX/README files + at the place in template with >SCRIPT_INCLUDES_BUILD_BOOT_SECTIONS_HERE< + ignoring rest of template + The code tries to fold too long lines, but this is not perfect. Best would + be if the command lines already in README are short enough, e.g. by + using short variables, which work both on shell and uboot command lines" + +while echo "x$1" | egrep '^x-' >/dev/null 2>&1 +do + OPT="$1" ; shift + if [ "$OPT" = "--help" -o "$OPT" = "-h" -o "$OPT" = "-help" ] ; then echo "$USAGE" ; exit ; fi + if [ "$OPT" = "-xml" ] ; then BBXML="$1" ; shift; fi + if [ "$OPT" = "-template" ] ; then BBTEMPLATE="$1" ; shift; fi + if [ "$OPT" = "-readmebasedir" ] ; then BB_TARGETREADME_BASE="$1" ; shift; fi +done +if [ "$BBTEMPLATE" = "" ]; then echo "ERROR: Missing option -template templatefile"; exit ; fi +if [ "$BBXML" = "" ]; then echo "ERROR: Missing option -xml buildbootxml-to-create"; exit ; fi +if [ ! -f "$BBTEMPLATE" ]; then echo "ERROR: Missing templatefile '$BBTEMPLATE'"; exit; fi +if [ ! -d "`dirname \"$BBXML\"`" ]; then echo "ERROR: Missing parent directory for '$BBXML'"; exit ; fi +if [ ! -d "$BB_TARGETREADME_BASE" ]; then echo "ERROR: Missing basedir for README files '$BB_TARGETREADME_BASE'"; exit; fi + +echo "`basename $0` Creating $BBXML from" +TARGETREADMES=`cd $BB_TARGETREADME_BASE ; ls -d */README | tr '\n' ' '` +echo " $TARGETREADMES" + +# README file formats: +# a) Sequence starts: ___ XXXX:yyyy or ___ XXXX:yyyy conffile +# where XXXX is a type, yyyy is text to be in title +# b) Inside sequence: ___ END ends the sequence (ignore rest of line) +# c) Inside sequence: # Documentation line +# d) Inside sequence: Anything else is command or config lines +# Conv.to XML: ">" "<" "&" and put all inside +# *) Anywhere ____xxxx Leading 4 underlines or more, always ignored +# unless one of the recognized XXXX +# *) Anywhere outside sequence, ignore all +# *) There can be multiple of each type of sequence in each README file +# with different yyyy + + +cat $BBTEMPLATE | awk ' + />SCRIPT_INCLUDES_BUILD_BOOT_SECTIONS_HERE$BBXML + + +# Long command lines: The awk code below breaks too long lines, but this is not perfect. +extractcmds_filter() { + echo " " | tr -d '\n' + sed '/^___/d;s/\&/\&/g' | sed 's//\>/g;/^$/d' | \ + awk 'BEGIN { MAX=90; } + ( length($0) > MAX ) { + LINE=$0; + while (length(LINE) > MAX) { + if (index(LINE," ") == 0 ) { + print "ERROR: PROBLEM: No space in too long line:" LINE > "/dev/stderr"; + print $LINE; + next; + } + i=MAX; while ( substr(LINE,i,1) != " " ) { i=i-1; if (i==0) {break;} } + print substr(LINE,0,i) "\\"; + REST=substr(LINE,i+1); + if ( length(REST) == 0 ) { next ; } + LINE=" " REST; + } + if ( length(LINE) > 0 ) { print LINE; next ; } + } + { print;}' + echo "" +} + +extractcmds_for_type() { # target/README BOOTorBUILD + README=$BB_TARGETREADME_BASE/"$1" + CMDTYPE="$2" + COMMANDSFOR=`egrep "___$CMDTYPE:" $README` + for CMDS in $COMMANDSFOR + do + cmdsfor=`echo "$CMDS" | sed 's/[^:]*://'` +#-- echo " $CMDTYPE for $cmdsfor" + cat "$README" | sed -n "/$COMMANDSFOR/,/___END/p" | extractcmds_filter + done +} + +for targetreadme in $TARGETREADMES +do + TARGET=`dirname $targetreadme` + echo "" >>$BBXML + echo "
" >>$BBXML + echo " Target $TARGET" >>$BBXML + echo " NOTE: DO NOT EDIT THIS GENERATED FILE! Only edit the template file." >>$BBXML + echo "
" >>$BBXML + echo " Build Instructions for $TARGET" >>$BBXML + extractcmds_for_type $targetreadme Build-command >>$BBXML + echo "
" >>$BBXML + echo "" >>$BBXML + echo "
" >>$BBXML + echo " Boot Instructions for $TARGET" >>$BBXML + extractcmds_for_type $targetreadme Boot-command >>$BBXML + echo "
" >>$BBXML + echo "
" >>$BBXML +done + +echo "" >>$BBXML +echo "Ready created $BBXML" diff --git a/manifest_conf.mk b/manifest_conf.mk new file mode 100644 index 0000000..31a89f8 --- /dev/null +++ b/manifest_conf.mk @@ -0,0 +1,8 @@ +# To be included in Makefile +# NOTE: MANIFESTHASH needs to be changed to final release tag in format refs/tags/ELnnn before a release +# The values are shown in the release info +# The manifest is used to fetch information into the release info from the distro files +#MANIFESTHASH ?= 0d0f06384afa65eaae4b170e234ee5a813edf44d + #change the above value later to refs/tags/ELnnn (?) +MANIFESTURL := git@git.enea.com:linux/manifests/el_manifests-virtualization.git +PROFILE_NAME := Enea NFV Core -- cgit v1.2.3-54-g00ecf