diff options
author | Amy Fong <amy.fong@windriver.com> | 2014-05-21 14:35:15 -0400 |
---|---|---|
committer | Bruce Ashfield <bruce.ashfield@windriver.com> | 2014-05-23 23:42:55 -0400 |
commit | fb1d6f23fa01c0217ed3f6778d8033dd0030db2a (patch) | |
tree | 36dc89d6b66050a56cbca2f2f7c90229ebcb8854 /meta-openstack/Documentation/testsystem | |
parent | 6350b155270f7f086624db36ecc6e6008ebcd378 (diff) | |
download | meta-cloud-services-fb1d6f23fa01c0217ed3f6778d8033dd0030db2a.tar.gz |
Testing documentation
Add documentation for testing swift, ceph, heat.
Create a script and instructions on a script that launches a controller
and a specified number of compute nodes.
Signed-off-by: Amy Fong <amy.fong@windriver.com>
Diffstat (limited to 'meta-openstack/Documentation/testsystem')
-rw-r--r-- | meta-openstack/Documentation/testsystem/README | 116 | ||||
-rw-r--r-- | meta-openstack/Documentation/testsystem/README.multi-compute | 150 | ||||
-rw-r--r-- | meta-openstack/Documentation/testsystem/README.tests | 9 | ||||
-rwxr-xr-x | meta-openstack/Documentation/testsystem/launch.py | 304 | ||||
-rw-r--r-- | meta-openstack/Documentation/testsystem/sample.cfg | 15 |
5 files changed, 594 insertions, 0 deletions
diff --git a/meta-openstack/Documentation/testsystem/README b/meta-openstack/Documentation/testsystem/README new file mode 100644 index 0000000..ddbc51d --- /dev/null +++ b/meta-openstack/Documentation/testsystem/README | |||
@@ -0,0 +1,116 @@ | |||
1 | OpenStack: Minimally Viable Test System | ||
2 | |||
3 | Usage: | ||
4 | <script> [config file] [start|stop|restart] | ||
5 | |||
6 | This test harness creates a virtual network and the specified virtual | ||
7 | domains enabling the user to create a test system for openstack. | ||
8 | |||
9 | Arguments: | ||
10 | config file: this configuration file specifies the test system | ||
11 | to create, see below for details | ||
12 | start|stop|restart: | ||
13 | start - starts specifies test system | ||
14 | stop - stops specifies test system | ||
15 | restart - reboots specifies test system | ||
16 | |||
17 | Note: On some systems, there may be issues with restart, to workaround, use start with | ||
18 | auto_destroy enabled. | ||
19 | |||
20 | Virtual Network | ||
21 | --------------- | ||
22 | |||
23 | This test harness creates a virtual network (ops_default) using the | ||
24 | network specified in the configuration file. | ||
25 | e.g. | ||
26 | [main] | ||
27 | network: 192.168.122.1 | ||
28 | |||
29 | The script tries to create the virtual network using virbr0, but if this is | ||
30 | in use, then it will retry at virbr1, virbr2, .... etc until it finds one or | ||
31 | it gives up after a number of attempts. | ||
32 | |||
33 | |||
34 | Virtual Domains | ||
35 | --------------- | ||
36 | |||
37 | The script then creates a controller using the specified kernel and disk image | ||
38 | e.g. | ||
39 | [controller] | ||
40 | kernel: /root/images/bzImage | ||
41 | disk: /root/images/controller.ext3 | ||
42 | |||
43 | The script then creates compute nodes by using a section header starting with | ||
44 | the string "compute" along with kernel(s)/disk image(s). | ||
45 | |||
46 | e.g. | ||
47 | [compute0] | ||
48 | kernel: /root/images/bzImage | ||
49 | disk: /root/images/compute1.ext3 | ||
50 | |||
51 | [compute1] | ||
52 | kernel: /root/images/bzImage | ||
53 | disk: /root/images/compute2.ext3 | ||
54 | |||
55 | |||
56 | IP address assignments | ||
57 | ---------------------- | ||
58 | There is an auto_assign_ip variable under the section main. | ||
59 | ie | ||
60 | [main] | ||
61 | auto_assign_ip: False | ||
62 | |||
63 | This value, if True, causes the kernel to be pass ip=dhcp in the kernel's | ||
64 | boot parameters. | ||
65 | |||
66 | If the value is False, the each controller and compute section will be | ||
67 | required to have a value defined for ip. | ||
68 | |||
69 | ie. | ||
70 | [compute0] | ||
71 | kernel: /root/images/bzImage | ||
72 | disk: /root/images/compute1.ext3 | ||
73 | ip: 192.168.122.10 | ||
74 | |||
75 | |||
76 | Other | ||
77 | ----- | ||
78 | |||
79 | The configuration file also specifies the emulator to be used | ||
80 | for the domains: | ||
81 | e.g. | ||
82 | [main] | ||
83 | emulator: /usr/bin/qemu-system-x86_64 | ||
84 | |||
85 | The configuration file also specifies an auto_destroy option | ||
86 | e.g. | ||
87 | [main] | ||
88 | auto_destroy: True | ||
89 | |||
90 | If auto_destroy is enabled (True), if the required controller/compute nodes | ||
91 | are running, then the script will automatically destroy the running domains. | ||
92 | Otherwise, if disabled (False), then the script will display a message that the | ||
93 | domain is active and exit. (auto_destroy is only used when starting systems) | ||
94 | |||
95 | |||
96 | Example configuration file | ||
97 | -------------------------- | ||
98 | [main] | ||
99 | network: 192.168.122.1 | ||
100 | emulator: /usr/bin/qemu-system-x86_64 | ||
101 | auto_destroy: True | ||
102 | auto_assign_ip: True | ||
103 | |||
104 | [controller] | ||
105 | kernel: /root/images/bzImage | ||
106 | disk: /root/images/controller.ext3 | ||
107 | |||
108 | [compute0] | ||
109 | kernel: /root/images/bzImage | ||
110 | disk: /root/images/compute1.ext3 | ||
111 | |||
112 | [compute1] | ||
113 | kernel: /root/images/bzImage | ||
114 | disk: /root/images/compute2.ext3 | ||
115 | ------------------------------------------------- | ||
116 | |||
diff --git a/meta-openstack/Documentation/testsystem/README.multi-compute b/meta-openstack/Documentation/testsystem/README.multi-compute new file mode 100644 index 0000000..f7e6b4e --- /dev/null +++ b/meta-openstack/Documentation/testsystem/README.multi-compute | |||
@@ -0,0 +1,150 @@ | |||
1 | 0. configure configuration files with auto_destroy enabled and auto_assign_ip disabled and specify the | ||
2 | IP addresses of the controller and the compute node as configured during the build | ||
3 | |||
4 | e.g. if the DHCP is supported | ||
5 | |||
6 | [main] | ||
7 | network: 192.168.7.1 | ||
8 | emulator: /usr/bin/qemu-system-x86_64 | ||
9 | auto_destroy: True | ||
10 | auto_assign_ip: True | ||
11 | |||
12 | [controller] | ||
13 | kernel: $HOME/images/bzImage | ||
14 | disk: $HOME/images/controller.ext3 | ||
15 | |||
16 | [computeA] | ||
17 | kernel: $HOME/images/bzImage | ||
18 | disk: $HOME/images/compute.ext3 | ||
19 | |||
20 | [computeB] | ||
21 | kernel: $HOME/images/bzImage | ||
22 | disk: $HOME/images/computeB.ext3 | ||
23 | |||
24 | Start instances: | ||
25 | <launch.py> <config file> start | ||
26 | |||
27 | e.g. if the IP address are specified in build time IP | ||
28 | |||
29 | For the controller: | ||
30 | |||
31 | The build time IP in layers/meta-cloud-services - | ||
32 | layers//meta-cloud-services/meta-openstack-controller-deploy/classes/hosts.bbclass | ||
33 | layers//meta-cloud-services/meta-openstack-compute-deploy/classes/hosts.bbclass | ||
34 | CONTROLLER_IP ?= "128.224.149.121" | ||
35 | |||
36 | Use the controller's ip in the test system's configuration file: | ||
37 | |||
38 | [controller] | ||
39 | ip: 128.224.149.121 | ||
40 | |||
41 | For each compute, use the controller's IP in the bitbake build's build time IP and | ||
42 | use the compute node's ip accordingly | ||
43 | |||
44 | computeA | ||
45 | The build time IP in layers/meta-cloud-services - | ||
46 | layers//meta-cloud-services/meta-openstack-controller-deploy/classes/hosts.bbclass | ||
47 | layers//meta-cloud-services/meta-openstack-compute-deploy/classes/hosts.bbclass | ||
48 | CONTROLLER_IP ?= "128.224.149.121" | ||
49 | COMPUTE_IP ?= "128.224.149.122" | ||
50 | |||
51 | computeB | ||
52 | The build time IP in layers/meta-cloud-services - | ||
53 | layers//meta-cloud-services/meta-openstack-controller-deploy/classes/hosts.bbclass | ||
54 | layers//meta-cloud-services/meta-openstack-compute-deploy/classes/hosts.bbclass | ||
55 | CONTROLLER_IP ?= "128.224.149.121" | ||
56 | COMPUTE_IP ?= "128.224.149.123" | ||
57 | |||
58 | And in the test system's configuration file: | ||
59 | |||
60 | [controller] | ||
61 | ip: 128.224.149.121 | ||
62 | |||
63 | [computeA] | ||
64 | ip: 128.224.149.122 | ||
65 | |||
66 | [computeB] | ||
67 | ip: 128.224.149.123 | ||
68 | |||
69 | Start instances: | ||
70 | <launch.py> <config file> start | ||
71 | |||
72 | |||
73 | 1./etc/hosts - adjust for hostnames | ||
74 | |||
75 | On controller/compute nodes, configure your DNS or /etc/hosts and ensure | ||
76 | it is consistent across all hosts. Make sure that the three hosts can | ||
77 | perform name resolution with each other. As a test, use the ping command | ||
78 | to ping each host from one another. | ||
79 | |||
80 | $ ping HostA | ||
81 | $ ping HostB | ||
82 | $ ping HostC | ||
83 | |||
84 | e.g. /etc/hosts | ||
85 | 127.0.0.1 localhost.localdomain localhost | ||
86 | |||
87 | 192.168.7.2 controller | ||
88 | 192.168.7.4 computeA | ||
89 | 192.168.7.6 computeB | ||
90 | |||
91 | 2. Configure NFS host on controller | ||
92 | |||
93 | /etc/nova/instances needs to be a shared directory for migration to work. | ||
94 | |||
95 | Configure the controller to export this as an NFS export. | ||
96 | |||
97 | cat >> /etc/exports << 'EOF' | ||
98 | /etc/nova/instances *(rw,no_subtree_check,insecure,no_root_squash) | ||
99 | EOF | ||
100 | exportfs -a | ||
101 | |||
102 | On compute nodes: | ||
103 | mount controller:/etc/nova/instances /etc/nova/instances/ | ||
104 | |||
105 | |||
106 | 3. Make sure the controller can see the compute nodes | ||
107 | |||
108 | nova service-list | ||
109 | |||
110 | root@controller:/etc/nova/instances# nova service-list | ||
111 | +------------------+------------+----------+---------+-------+----------------------------+-----------------+ | ||
112 | | Binary | Host | Zone | Status | State | Updated_at | Disabled Reason | | ||
113 | +------------------+------------+----------+---------+-------+----------------------------+-----------------+ | ||
114 | | nova-compute | computeA | nova | enabled | up | 2014-05-16T17:14:24.617143 | - | | ||
115 | | nova-compute | computeB | nova | enabled | up | 2014-05-16T17:14:25.228089 | - | | ||
116 | | nova-conductor | controller | internal | enabled | up | 2014-05-16T17:14:26.932751 | - | | ||
117 | | nova-scheduler | controller | internal | enabled | up | 2014-05-16T17:14:26.984656 | - | | ||
118 | | nova-consoleauth | controller | internal | enabled | up | 2014-05-16T17:14:27.007947 | - | | ||
119 | | nova-cert | controller | internal | enabled | up | 2014-05-16T17:14:27.030701 | - | | ||
120 | | nova-network | controller | internal | enabled | up | 2014-05-16T17:14:27.031366 | - | | ||
121 | +------------------+------------+----------+---------+-------+----------------------------+-----------------+ | ||
122 | |||
123 | root@controller:~# nova hypervisor-list | ||
124 | +----+---------------------+ | ||
125 | | ID | Hypervisor hostname | | ||
126 | +----+---------------------+ | ||
127 | | 1 | computeA | | ||
128 | | 2 | computeB | | ||
129 | +----+---------------------+ | ||
130 | |||
131 | Login to horizon, and select hypervisors, both nodes will be seen | ||
132 | |||
133 | |||
134 | 4. Bootup a guest from the controller: | ||
135 | |||
136 | On controller: | ||
137 | glance image-create --name myFirstImage --is-public true --container-format bare --disk-format qcow2 --file images/cirros-0.3.0-x86_64-disk.img | ||
138 | neutron net-create mynetwork | ||
139 | nova boot --image myFirstImage --flavor 1 myinstance | ||
140 | |||
141 | 5. Do migration from horizon | ||
142 | |||
143 | From horizon, goto instances, myinstance should be running | ||
144 | wait til myinstance is in running state | ||
145 | |||
146 | In Actions, select: Migrate Instance | ||
147 | Select: Confirm migrate/resize when promted | ||
148 | |||
149 | myinstance is now running from the other compute node (computeB in this case) | ||
150 | |||
diff --git a/meta-openstack/Documentation/testsystem/README.tests b/meta-openstack/Documentation/testsystem/README.tests new file mode 100644 index 0000000..924f883 --- /dev/null +++ b/meta-openstack/Documentation/testsystem/README.tests | |||
@@ -0,0 +1,9 @@ | |||
1 | This test system enables the user to run different tests on the system. | ||
2 | |||
3 | Multiple compute node testing can be performed as per: README.multi-compute | ||
4 | |||
5 | Other tests described in Documentation.ND: | ||
6 | ie. | ||
7 | Ceph testing: Documentation.ND/README.ceph-openstack | ||
8 | Swift/cinder: Documentation.ND/README.swift | ||
9 | |||
diff --git a/meta-openstack/Documentation/testsystem/launch.py b/meta-openstack/Documentation/testsystem/launch.py new file mode 100755 index 0000000..e177773 --- /dev/null +++ b/meta-openstack/Documentation/testsystem/launch.py | |||
@@ -0,0 +1,304 @@ | |||
1 | #!/usr/bin/env python | ||
2 | |||
3 | import sys | ||
4 | import grp | ||
5 | import pwd | ||
6 | import os | ||
7 | import libvirt | ||
8 | import ConfigParser | ||
9 | import subprocess | ||
10 | import shutil | ||
11 | import distutils.spawn | ||
12 | import platform | ||
13 | |||
14 | # this does a very basic test to see if the required packages | ||
15 | # are installed, extend list as required | ||
16 | def checkPackages(): | ||
17 | sys_ok = True | ||
18 | check_apps = [ "virsh", "qemu-system-x86_64", "libvirtd" ] | ||
19 | for app in check_apps: | ||
20 | if distutils.spawn.find_executable(app) == None: | ||
21 | print( "Missing: " + app) | ||
22 | sys_ok = False | ||
23 | if not sys_ok: | ||
24 | print("The required libvirt/qemu packages are missing...") | ||
25 | distro = platform.dist()[0] | ||
26 | if distro == "debian" or distro == "Ubuntu": | ||
27 | print( "This appears to be a Debian/Ubuntu distribution\nPlease install " + | ||
28 | "packages like libvirt-bin, qemu-system-x86,..." ) | ||
29 | elif distro == "redhat" or distro == "fedora": | ||
30 | print( "This appears to be a Redhat/Fedora distribution\nPlease install " + | ||
31 | "packages like libvirt-client, libvirt-daemon, qemu-system-x86, ..." ) | ||
32 | exit(1) | ||
33 | return | ||
34 | |||
35 | def networkInterfaces(): | ||
36 | ifaces = [] | ||
37 | for line in open('/proc/net/dev', 'r'): | ||
38 | if_info = line.split(":", 1) | ||
39 | if len(if_info) > 1: | ||
40 | ifaces.append( if_info[0].strip() ) | ||
41 | return ifaces | ||
42 | |||
43 | def destroyNetwork(conn, network_name): | ||
44 | networks = conn.listNetworks() + conn.listDefinedNetworks() | ||
45 | if network_name in networks: | ||
46 | try: | ||
47 | nw = conn.networkLookupByName(network_name) | ||
48 | if nw.isActive(): | ||
49 | nw.destroy() | ||
50 | nw.undefine() | ||
51 | except: | ||
52 | print( "Failed to destroy network: %s" % network_name ) | ||
53 | exit( 1 ) | ||
54 | |||
55 | def restartDomain(conn, domain_name): | ||
56 | try: | ||
57 | domain = conn.lookupByName(domain_name) | ||
58 | except: | ||
59 | print( "restartDomain: Warning domain " + domain_name + " doesn't exist." ) | ||
60 | return | ||
61 | if domain.isActive(): | ||
62 | domain.reboot() | ||
63 | |||
64 | def destroyDomain(conn, auto_destroy, domain_name): | ||
65 | try: | ||
66 | domain = conn.lookupByName(domain_name) | ||
67 | except: | ||
68 | return | ||
69 | if domain.isActive(): | ||
70 | if auto_destroy: | ||
71 | print( "Auto destroy enabled, destroying old instance of domain %s" % domain_name ) | ||
72 | domain.destroy() | ||
73 | else: | ||
74 | print( "Domain %s is active, abort..." % domain_name ) | ||
75 | print( "To stop: virsh -c %s destroy %s " % ( uri , domain_name ) ) | ||
76 | exit(0) | ||
77 | domain.undefine() | ||
78 | |||
79 | def startDomain(conn, auto_destroy, domain_name, xml_desc): | ||
80 | print( "Starting %s...\n%s" % ( domain_name, xml_desc ) ) | ||
81 | destroyDomain(conn, auto_destroy, domain_name) | ||
82 | try: | ||
83 | conn.defineXML(xml_desc) | ||
84 | domain = conn.lookupByName(domain_name) | ||
85 | domain.create() | ||
86 | print( "Starting domain %s..." % domain_name ) | ||
87 | print( "To connect to the console: virsh -c %s console %s" % ( uri, domain_name ) ) | ||
88 | print( "To stop: virsh -c %s destroy %s" % ( uri, domain_name ) ) | ||
89 | except Exception as e: | ||
90 | print( e ) | ||
91 | exit(1) | ||
92 | |||
93 | def make_nw_spec(network_name, bridge_nw_interface, network, auto_assign_ip): | ||
94 | spec = '<network>' | ||
95 | spec += '<name>' + network_name + '</name>' | ||
96 | spec += '<bridge name="' + bridge_nw_interface + '"/>' | ||
97 | spec += '<forward/>' | ||
98 | spec += '<ip address="' + network + '" netmask="255.255.255.0">' | ||
99 | if auto_assign_ip: | ||
100 | nw_parts = network.split('.') | ||
101 | nw_parts[-1] = "2" | ||
102 | start_dhcp = '.'.join(nw_parts) | ||
103 | nw_parts[-1] = "254" | ||
104 | end_dhcp = '.'.join(nw_parts) | ||
105 | spec += '<dhcp>' | ||
106 | spec += '<range start="' + start_dhcp + '" end="' + end_dhcp + '"/>' | ||
107 | spec += '</dhcp>' | ||
108 | spec += '</ip>' | ||
109 | spec += '</network>' | ||
110 | return spec | ||
111 | |||
112 | def make_spec(name, network, kernel, disk, bridge_nw_interface, emulator, auto_assign_ip, ip): | ||
113 | if not os.path.exists(kernel): | ||
114 | print( "Kernel image %s does not exist!" % kernel ) | ||
115 | exit(1) | ||
116 | if not os.path.exists(disk): | ||
117 | print( "Disk %s does not exist!" % disk ) | ||
118 | exit(1) | ||
119 | if auto_assign_ip: | ||
120 | ip_spec = 'dhcp' | ||
121 | else: | ||
122 | ip_spec = ip + '::' + network + ':255.255.255.0:' + name + ':eth0:off' | ||
123 | spec = '<domain type=\'kvm\'>' | ||
124 | spec += ' <name>' + name + '</name>' | ||
125 | spec += ' <memory>4096000</memory>' | ||
126 | spec += ' <currentMemory>4096000</currentMemory>' | ||
127 | spec += ' <vcpu cpuset=\'1\'>1</vcpu>' | ||
128 | spec += ' <cpu>' | ||
129 | spec += ' <model>kvm64</model>' | ||
130 | spec += ' </cpu>' | ||
131 | spec += ' <os>' | ||
132 | spec += ' <type arch=\'x86_64\' machine=\'pc\'>hvm</type>' | ||
133 | spec += ' <kernel>' + kernel + '</kernel>' | ||
134 | spec += ' <boot dev=\'hd\'/>' | ||
135 | spec += ' <cmdline>root=/dev/vda rw console=ttyS0 ip=' + ip_spec + '</cmdline>' | ||
136 | spec += ' </os>' | ||
137 | spec += ' <features>' | ||
138 | spec += ' <acpi/>' | ||
139 | spec += ' <apic/>' | ||
140 | spec += ' <pae/>' | ||
141 | spec += ' </features>' | ||
142 | spec += ' <clock offset=\'utc\'/>' | ||
143 | spec += ' <on_poweroff>destroy</on_poweroff>' | ||
144 | # spec += ' <on_reboot>destroy</on_reboot>' | ||
145 | spec += ' <on_crash>destroy</on_crash>' | ||
146 | spec += ' <devices>' | ||
147 | spec += ' <emulator>' + emulator + '</emulator>' | ||
148 | spec += ' <disk type=\'file\' device=\'disk\'>' | ||
149 | spec += ' <source file=\'' + disk + '\'/>' | ||
150 | spec += ' <target dev=\'vda\' bus=\'virtio\'/>' | ||
151 | spec += ' </disk>' | ||
152 | spec += ' <interface type=\'bridge\'>' | ||
153 | spec += ' <source bridge=\'' + bridge_nw_interface + '\'/>' | ||
154 | spec += ' <model type=\'virtio\' />' | ||
155 | spec += ' </interface>' | ||
156 | spec += ' <serial type=\'pty\'>' | ||
157 | spec += ' <target port=\'0\'/>' | ||
158 | spec += ' <alias name=\'serial0\'/>' | ||
159 | spec += ' </serial>' | ||
160 | spec += ' <console type=\'pty\'>' | ||
161 | spec += ' <target type=\'serial\' port=\'0\'/>' | ||
162 | spec += ' <alias name=\'serial0\'/>' | ||
163 | spec += ' </console>' | ||
164 | spec += ' </devices>' | ||
165 | spec += '</domain>' | ||
166 | return spec | ||
167 | |||
168 | def getConfig(config, section, key): | ||
169 | try: | ||
170 | return os.path.expandvars(config.get(section, key)) | ||
171 | except: | ||
172 | print( "Configuration file error! Missing item (section: %s, key: %s)" % ( section, key ) ) | ||
173 | exit(1) | ||
174 | |||
175 | # does the user have access to libvirt? | ||
176 | eligible_groups = [ "libvirt", "libvirtd" ] | ||
177 | eligible_user = False | ||
178 | euid = os.geteuid() | ||
179 | if euid == 0: | ||
180 | eligible_user = True | ||
181 | else: | ||
182 | username = pwd.getpwuid(euid)[0] | ||
183 | groups = [g.gr_name for g in grp.getgrall() if username in g.gr_mem] | ||
184 | for v in eligible_groups: | ||
185 | if v in groups: | ||
186 | eligible_user = True | ||
187 | |||
188 | checkPackages() | ||
189 | |||
190 | if not eligible_user: | ||
191 | sys.stderr.write("You need to be the 'root' user or in group [" + '|'.join(eligible_groups) + "] to run this script.\n") | ||
192 | exit(1) | ||
193 | |||
194 | if len(sys.argv) != 3: | ||
195 | sys.stderr.write("Usage: "+sys.argv[0]+" [config file] [start|stop|restart]\n") | ||
196 | sys.exit(1) | ||
197 | |||
198 | if not os.path.exists(sys.argv[1]): | ||
199 | sys.stderr.write("Error: config file \"" + sys.argv[1] + "\" was not found!\n") | ||
200 | sys.exit(1) | ||
201 | |||
202 | command = sys.argv[2] | ||
203 | command_options = ["start", "stop", "restart"] | ||
204 | if not command in command_options: | ||
205 | sys.stderr.write("Usage: "+sys.argv[0]+" [config file] [start|stop|restart]\n") | ||
206 | sys.exit(1) | ||
207 | |||
208 | Config = ConfigParser.ConfigParser() | ||
209 | Config.read(sys.argv[1]) | ||
210 | |||
211 | network_addr = getConfig(Config, "main", "network") | ||
212 | getConfig(Config, "main", "auto_destroy") | ||
213 | auto_destroy = Config.getboolean("main", "auto_destroy") | ||
214 | getConfig(Config, "main", "auto_assign_ip") | ||
215 | auto_assign_ip = Config.getboolean("main", "auto_assign_ip") | ||
216 | network_name = 'ops_default' | ||
217 | uri = 'qemu:///system' | ||
218 | |||
219 | # Connect to libvirt | ||
220 | conn = libvirt.open(uri) | ||
221 | if conn is None: | ||
222 | print( "Failed to open connection to the hypervisor" ) | ||
223 | exit(1) | ||
224 | |||
225 | if command == "start": | ||
226 | destroyNetwork(conn, network_name) | ||
227 | |||
228 | # Change the default bridge device from virbr0 to virbr%d. | ||
229 | # This will make libvirt try virbr0, virbr1, etc. until it finds a free one. | ||
230 | cnt = 0 | ||
231 | ifaces = networkInterfaces() | ||
232 | found_virbr = False | ||
233 | while found_virbr == False: | ||
234 | if cnt > 254: | ||
235 | print( "Giving up on looking for a free virbr network interface!" ) | ||
236 | exit(1) | ||
237 | bridge_nw_interface = 'virbr' + str(cnt) | ||
238 | if bridge_nw_interface not in ifaces: | ||
239 | print( "bridge_nw_interface: %s" % bridge_nw_interface ) | ||
240 | network_spec = make_nw_spec(network_name, bridge_nw_interface, network_addr, auto_assign_ip) | ||
241 | try: | ||
242 | conn.networkDefineXML(network_spec) | ||
243 | nw = conn.networkLookupByName(network_name) | ||
244 | nw.create() | ||
245 | found_virbr = True | ||
246 | except: | ||
247 | print( "Network Name: %s" % network_name ) | ||
248 | destroyNetwork( conn, network_name ) | ||
249 | print( "Error creating network interface" ) | ||
250 | cnt += 1 | ||
251 | else: | ||
252 | # verify network exists | ||
253 | try: | ||
254 | nw = conn.networkLookupByName(network_name) | ||
255 | except: | ||
256 | print( "Error! Virtual network " + network_name + " is not defined!" ) | ||
257 | exit(1) | ||
258 | if not nw.isActive(): | ||
259 | print( "Error! Virtual network " + network_name + " is not running!" ) | ||
260 | exit(1) | ||
261 | |||
262 | emulator = getConfig(Config, "main", "emulator") | ||
263 | if not os.path.exists(emulator): | ||
264 | print( "Emulator %s does not exist!" % emulator ) | ||
265 | exit(1) | ||
266 | |||
267 | controller_name = 'controller' | ||
268 | if command == "start": | ||
269 | # Define the controller xml | ||
270 | controller_kernel = getConfig(Config, "controller", "kernel") | ||
271 | controller_disk = getConfig(Config, "controller", "disk") | ||
272 | |||
273 | controller_ip = None | ||
274 | if not auto_assign_ip: | ||
275 | controller_ip = getConfig(Config, "controller", "ip") | ||
276 | controller_spec = make_spec(controller_name, network_addr, controller_kernel, | ||
277 | controller_disk, bridge_nw_interface, emulator, | ||
278 | auto_assign_ip, controller_ip) | ||
279 | |||
280 | # Now that network is setup let's actually run the virtual images | ||
281 | startDomain(conn, auto_destroy, controller_name, controller_spec) | ||
282 | elif command == "stop": | ||
283 | destroyDomain(conn, True, controller_name) | ||
284 | elif command == "restart": | ||
285 | restartDomain(conn, controller_name) | ||
286 | |||
287 | for i in Config.sections(): | ||
288 | if i.startswith("compute"): | ||
289 | if command == "start": | ||
290 | # Define the compute xml | ||
291 | kernel = getConfig(Config, i, "kernel") | ||
292 | disk = getConfig(Config, i, "disk") | ||
293 | compute_ip = None | ||
294 | if not auto_assign_ip: | ||
295 | compute_ip = getConfig(Config, i, "ip") | ||
296 | spec = make_spec(i, network_addr, kernel, disk, bridge_nw_interface, | ||
297 | emulator, auto_assign_ip, compute_ip) | ||
298 | startDomain(conn, auto_destroy, i, spec) | ||
299 | elif command == "stop": | ||
300 | destroyDomain(conn, True, i) | ||
301 | elif command == "restart": | ||
302 | restartDomain(conn, i) | ||
303 | |||
304 | conn.close() | ||
diff --git a/meta-openstack/Documentation/testsystem/sample.cfg b/meta-openstack/Documentation/testsystem/sample.cfg new file mode 100644 index 0000000..60154cf --- /dev/null +++ b/meta-openstack/Documentation/testsystem/sample.cfg | |||
@@ -0,0 +1,15 @@ | |||
1 | [main] | ||
2 | network: 192.168.122.1 | ||
3 | emulator: /usr/bin/qemu-system-x86_64 | ||
4 | auto_destroy: True | ||
5 | auto_assign_ip: False | ||
6 | |||
7 | [controller] | ||
8 | kernel: /root/images/bzImage | ||
9 | disk: /root/images/controller.ext3 | ||
10 | ip: 192.168.122.2 | ||
11 | |||
12 | [compute0] | ||
13 | kernel: /root/images/bzImage | ||
14 | disk: /root/images/compute.ext3 | ||
15 | ip: 192.168.122.3 | ||