From 649327f80dc331943d448e87f73ecaadcc78a22a Mon Sep 17 00:00:00 2001 From: Bruce Ashfield Date: Fri, 23 May 2014 23:49:49 -0400 Subject: docs: move more READMEs into Documentation Signed-off-by: Bruce Ashfield --- meta-openstack/Documentation/README.networking | 208 ++++++++++ .../Documentation/README.networking_flat | 249 ++++++++++++ .../Documentation/README.networking_l3_router | 450 +++++++++++++++++++++ .../Documentation/README.networking_vlan | 382 +++++++++++++++++ meta-openstack/Documentation/README.spice | 82 ++++ meta-openstack/Documentation/README.tempest | 55 +++ 6 files changed, 1426 insertions(+) create mode 100644 meta-openstack/Documentation/README.networking create mode 100644 meta-openstack/Documentation/README.networking_flat create mode 100644 meta-openstack/Documentation/README.networking_l3_router create mode 100644 meta-openstack/Documentation/README.networking_vlan create mode 100644 meta-openstack/Documentation/README.spice create mode 100644 meta-openstack/Documentation/README.tempest (limited to 'meta-openstack/Documentation') diff --git a/meta-openstack/Documentation/README.networking b/meta-openstack/Documentation/README.networking new file mode 100644 index 0000000..2299de3 --- /dev/null +++ b/meta-openstack/Documentation/README.networking @@ -0,0 +1,208 @@ +Networking +============== + +Description +----------- +OpenStack provides tools to setup many different network topologies using +tunnels, Vlans, GREs... the list goes on. In this document we describe how to +setup 3 basic network configurations which can be used as building blocks for a +larger network deployment. Going through these setups also tests that the +Open vSwitch plugin and DHCP and l3 agents are operating correctly. + + +Assumptions +----------- +The following assumes you have built the controller and compute nodes for the +qemux86-64 machine as described in README.setup and have been able to spin-up an +instance successfully. + + +Prerequisites +------------- + +1. Following the instructions in README.setup to spin-up your controller and +compute nodes in VMs will result in NATed tap interfaces on the host. While this +is fine for basic use it will not allow you to use things like GRE tunnels as +the packet will appear to be coming from the host when it arrives at the other +end of the tunnel and will therefore be rejected (since the src IP will not +match the GRE's remote_ip). To get around this we must setup an Open vSwitch +bridge on the host and attach the taps. Open vSwitch must therefore be installed +and running on the host. + +On Ubuntu systems this may be done via: +sudo apt-get install openvswitch-switch openvswitch-common + +2. Also since we will be using an Open vSwitch on the host we need to ensure the +controller and compute network interfaces have different MAC addresses. We +therefor must modify the runqemu script as per the following: + +--- a/scripts/runqemu-internal ++++ b/scripts/runqemu-internal +@@ -252,7 +252,7 @@ else + KERNEL_NETWORK_CMD="ip=192.168.7.$n2::192.168.7.$n1:255.255.255.0" + QEMU_TAP_CMD="-net tap,vlan=0,ifname=$TAP,script=no,downscript=no" + if [ "$KVM_ACTIVE" = "yes" ]; then +- QEMU_NETWORK_CMD="-net nic,model=virtio $QEMU_TAP_CMD,vhost=on" ++ QEMU_NETWORK_CMD="-net nic,macaddr=52:54:00:12:34:$(printf '%x' $((RANDOM % 170))),model=virtio $QEMU_TAP_CMD,vhost=on" + DROOT="/dev/vda" + ROOTFS_OPTIONS="-drive file=$ROOTFS,if=virtio" + else +--- +this will not guarantee distinct MAC addresses but most of the time they will be. + + +Host Open vSwitch bridge +------------------------ +As per the prerequisites we need to setup a bridge on the host to avoid NATed +tap interfaces. After you have used 'runqemu' to boot your controller and +compute nodes perform the following instructions on your host + +(I will assume tap0 - controller, tap1 - compute, use 'ip a s' or 'ifconfig' to +identify the tap interfaces) + +sudo ovs-vsctl add-br br-int +sudo ovs-vsctl add-port br-int tap0 +sudo ovs-vsctl add-port br-int tap1 +sudo ip address del 192.168.7.1/24 dev tap0 +sudo ip address del 192.168.7.3/24 dev tap1 +sudo ip address add 192.168.7.1/24 broadcast 192.168.7.255 dev br-int +sudo route del 192.168.7.2 tap0 +sudo route del 192.168.7.4 tap1 + + +NOTE: Any time you reboot the controller or compute nodes you will +want to remove and re-add the port via: +# ovs-vsctl del-port br-int tapX +# ovs-vsctl add-port br-int tapX +# ip address del 192.168.7.Y/24 dev tapX +(where X and Y are substituted accordingly) +This will also ensure the ARP tables in the vSwitch are updated since +chances are the MAC address will have changed on a reboot due to the +MAC randomizer of prerequisite 2. + + +Controller/Compute network setup +-------------------------------- +The neutron Open vSwitch plugin expects several bridges to exist on +the controller and compute nodes. When the controller and compute +nodes are first booted however these do not exist and depending on how +you are setting up your network this is subject to change and as such +is not 'baked' in to our images. This would normally be setup by +cloud-init, chef, cobbler or some other deployment scripts. Here we +will accomplish it by hand. + +On first boot your network will look like this: (controller node) +---snip--- +root@controller:~# ip a show eth0 +2: eth0: mtu 1500 qdisc pfifo_fast state UP group default qlen 1000 + link/ether 52:54:00:12:34:a9 brd ff:ff:ff:ff:ff:ff + inet 192.168.7.2/24 brd 192.168.7.255 scope global eth0 + valid_lft forever preferred_lft forever + inet6 fe80::5054:ff:fe12:34a9/64 scope link + valid_lft forever preferred_lft forever + +root@controller:~# ovs-vsctl show +524a6c84-226d-427b-8efa-732ed7e7fa43 + Bridge br-int + Port patch-tun + Interface patch-tun + type: patch + options: {peer=patch-int} + Port br-int + Interface br-int + type: internal + Bridge br-tun + Port br-tun + Interface br-tun + type: internal + Port patch-int + Interface patch-int + type: patch + options: {peer=patch-tun} + ovs_version: "2.0.0" +---snip--- + +To complete the expected network configuration you must add a bridge +which will contain the physical interface as one of its ports and move +the IP address from the interface to the bridge. The following will +accomplish this: + +ovs-vsctl add-br br-eth0 +ovs-vsctl add-port br-eth0 eth0 +ip address del 192.168.7.2/24 dev eth0 +ip address add 192.168.7.2/24 broadcast 192.168.7.255 dev br-eth0 +route add default gw 192.168.7.1 + +And now you network will look like the following: +---snip--- +root@controller:~# ip a s +...skip +2: eth0: mtu 1500 qdisc pfifo_fast master ovs-system state UP group default qlen 1000 + link/ether 52:54:00:12:34:a9 brd ff:ff:ff:ff:ff:ff +...skip +7: br-eth0: mtu 1500 qdisc noqueue state UNKNOWN group default + link/ether ae:f8:be:7c:78:42 brd ff:ff:ff:ff:ff:ff + inet 192.168.7.2/24 scope global br-eth0 + valid_lft forever preferred_lft forever + inet6 fe80::e453:1fff:fec1:79ff/64 scope link + valid_lft forever preferred_lft forever + +root@controller:~# ovs-vsctl show +524a6c84-226d-427b-8efa-732ed7e7fa43 + Bridge "br-eth0" + Port "eth0" + Interface "eth0" + Port "br-eth0" + Interface "br-eth0" + type: internal + Bridge br-int + Port patch-tun + Interface patch-tun + type: patch + options: {peer=patch-int} + Port br-int + Interface br-int + type: internal + Bridge br-tun + Port br-tun + Interface br-tun + type: internal + Port patch-int + Interface patch-int + type: patch + options: {peer=patch-tun} + ovs_version: "2.0.0" + +At this point you will want to restart the neutron network services + +(controller) +/etc/init.d/neutron-openvswitch-agent stop +/etc/init.d/neutron-dhcp-agent stop +/etc/init.d/neutron-server reload +/etc/init.d/neutron-dhcp-agent start +/etc/init.d/neutron-openvswitch-agent start + +(Compute) +/etc/init.d/neutron-openvswitch-agent stop +/etc/init.d/nova-compute reload +/etc/init.d/neutron-openvswitch-agent start + + +NOTE: on a reboot the Open vSwitch configuration will remain but at +this point in time you will need to manually move the IP address from +the eth0 interface to the br-eth0 interface using + +ip address del 192.168.7.2/24 dev eth0 +ip address add 192.168.7.2/24 broadcast 192.168.7.255 dev br-eth0 + +With this network configuration on the controller and similar +configuration on the compute node (just replace 192.168.7.2 with +192.168.7.4) everything is ready to configure any of the 3 network +sample configurations. + +Further reading +--------------- + +README.networking_flat +README.networking_vlan +README.networking_l3_router \ No newline at end of file diff --git a/meta-openstack/Documentation/README.networking_flat b/meta-openstack/Documentation/README.networking_flat new file mode 100644 index 0000000..ab18f6f --- /dev/null +++ b/meta-openstack/Documentation/README.networking_flat @@ -0,0 +1,249 @@ +Networking - FLAT network +========================= + +Description +----------- +The flat network will have the VMs share the management network +(192.168.7.0/24). The dhcp-agent will provide the VMs addresses +within the subnet and within its provisioned range. This type of +network will not typically be deployed as everything is accessible by +everything else (VMs can access VMs and the compute and controller +nodes) + + +Assumptions +----------- +It is assumed you have completed the steps described in +README.networking and have provisioned the host vSwitch as well as +created the br-eth0 bridges on the controller and compute nodes. + +At this point you should be able to ping 192.168.7.4 from 192.168.7.4 +and vise versa. + +You have built your controller image including the cirros image (for +which you have already added the image to glance as myFirstImage). + +You have run 'source /etc/nova/openrc' + +Configuration updates +--------------------- +On the controller and (all) compute nodes you must edit the file +/etc/neutron/plugins/openvswitch/ovs_neutron_plugin.ini + +In the [OVS] section set +network_vlan_ranges = ph-eth0:1:1 +bridge_mappings = ph-eth0:br-eth0 + +(*** on compute nodes edit local_ip as well [192.168.7.4]***) + +Restart some services to allow these changes to take effect: +/etc/init.d/neutron-openvswitch-agent reload +(on controller) +/etc/init.d/neutron-server reload +/etc/init.d/neutron-dhcp-agent reload +(on compute) +/etc/init.d/nova-compute reload + + +Create the net and subnet +------------------------- +neutron net-create --provider:physical_network=ph-eth0 \ + --provider:network_type=flat \ + --shared MY_FLAT_NET +Created a new network: ++---------------------------+--------------------------------------+ +| Field | Value | ++---------------------------+--------------------------------------+ +| admin_state_up | True | +| id | 3263aa7f-b86c-4ad3-a28c-c78d4c711583 | +| name | MY_FLAT_NET | +| provider:network_type | flat | +| provider:physical_network | ph-eth0 | +| provider:segmentation_id | | +| shared | True | +| status | ACTIVE | +| subnets | | +| tenant_id | b5890ba3fb234347ae317ca2f8358663 | ++---------------------------+--------------------------------------+ + + +neutron subnet-create MY_FLAT_NET 192.168.7.0/24 --name MY_FLAT_SUBNET \ + --no-gateway --host-route destination=0.0.0.0/0,nexthop=192.168.7.1 \ + --allocation-pool start=192.168.7.230,end=192.168.7.234 +Created a new subnet: ++------------------+--------------------------------------------------------+ +| Field | Value | ++------------------+--------------------------------------------------------+ +| allocation_pools | {"start": "192.168.7.230", "end": "192.168.7.234"} | +| cidr | 192.168.7.0/24 | +| dns_nameservers | | +| enable_dhcp | True | +| gateway_ip | | +| host_routes | {"destination": "0.0.0.0/0", "nexthop": "192.168.7.1"} | +| id | bfa99d99-2ba5-47e9-b71e-0bd8a2961e08 | +| ip_version | 4 | +| name | MY_FLAT_SUBNET | +| network_id | 3263aa7f-b86c-4ad3-a28c-c78d4c711583 | +| tenant_id | b5890ba3fb234347ae317ca2f8358663 | ++------------------+--------------------------------------------------------+ + +Boot the image and test connectivity +------------------------------------ +nova boot --image myFirstImage --flavor m1.small \ + --nic net-id=3263aa7f-b86c-4ad3-a28c-c78d4c711583 myinstance ++--------------------------------------+-----------------------------------------------------+ +| Property | Value | ++--------------------------------------+-----------------------------------------------------+ +| OS-DCF:diskConfig | MANUAL | +| OS-EXT-AZ:availability_zone | nova | +| OS-EXT-SRV-ATTR:host | - | +| OS-EXT-SRV-ATTR:hypervisor_hostname | - | +| OS-EXT-SRV-ATTR:instance_name | instance-00000003 | +| OS-EXT-STS:power_state | 0 | +| OS-EXT-STS:task_state | scheduling | +| OS-EXT-STS:vm_state | building | +| OS-SRV-USG:launched_at | - | +| OS-SRV-USG:terminated_at | - | +| accessIPv4 | | +| accessIPv6 | | +| adminPass | 7Qe9nFekCjYD | +| config_drive | | +| created | 2014-04-10T04:13:38Z | +| flavor | m1.small (2) | +| hostId | | +| id | f85da1da-c318-49fb-8da9-c07644400d4c | +| image | myFirstImage (1da089b1-164d-45d6-9b6c-002f3edb8a7b) | +| key_name | - | +| metadata | {} | +| name | myinstance | +| os-extended-volumes:volumes_attached | [] | +| progress | 0 | +| security_groups | default | +| status | BUILD | +| tenant_id | b5890ba3fb234347ae317ca2f8358663 | +| updated | 2014-04-10T04:13:38Z | +| user_id | 1dfcb72ef6a7428d8dd7300bc7f303d9 | ++--------------------------------------+-----------------------------------------------------+ + +nova list ++--------------------------------------+------------+--------+------------+-------------+---------------------------+ +| ID | Name | Status | Task State | Power State | Networks | ++--------------------------------------+------------+--------+------------+-------------+---------------------------+ +| f85da1da-c318-49fb-8da9-c07644400d4c | myinstance | ACTIVE | - | Running | MY_FLAT_NET=192.168.7.231 | ++--------------------------------------+------------+--------+------------+-------------+---------------------------+ + +nova console-log myinstance +--- +...skip +Starting logging: OK +Initializing random number generator... done. +Starting network... +udhcpc (v1.18.5) started +Sending discover... +Sending select for 192.168.7.231... +Lease of 192.168.7.231 obtained, lease time 86400 +deleting routers +...skip + +ping +--- +root@controller:~# ping -c 1 192.168.7.231 +PING 192.168.7.231 (192.168.7.231) 56(84) bytes of data. +64 bytes from 192.168.7.231: icmp_seq=1 ttl=64 time=2.98 ms + +--- 192.168.7.231 ping statistics --- +1 packets transmitted, 1 received, 0% packet loss, time 0ms +rtt min/avg/max/mdev = 2.988/2.988/2.988/0.000 ms + +You should also be able to ping the compute or controller or other VMs +(if you start them) from within a VM. Pinging targets outside the +subnet requires that you ensure the various interfaces, such as eth0 +have promisc on 'ip link set eth0 promisc on' + +The final Open vSwitch configs +------------------------------ + +Controller +--- +root@controller:~# ovs-vsctl show +524a6c84-226d-427b-8efa-732ed7e7fa43 + Bridge "br-eth0" + Port "eth0" + Interface "eth0" + Port "br-eth0" + Interface "br-eth0" + type: internal + Port "phy-br-eth0" + Interface "phy-br-eth0" + Bridge br-int + Port "tap549fb0c7-1a" + tag: 1 + Interface "tap549fb0c7-1a" + type: internal + Port "int-br-eth0" + Interface "int-br-eth0" + Port patch-tun + Interface patch-tun + type: patch + options: {peer=patch-int} + Port br-int + Interface br-int + type: internal + Bridge br-tun + Port "gre-2" + Interface "gre-2" + type: gre + options: {in_key=flow, local_ip="192.168.7.2", out_key=flow, remote_ip="192.168.7.4"} + Port br-tun + Interface br-tun + type: internal + Port patch-int + Interface patch-int + type: patch + options: {peer=patch-tun} + ovs_version: "2.0.0" + + +Compute +--- +root@compute:~# ovs-vsctl show +99d365d2-f74e-40a8-b9a0-5bb60353675d + Bridge br-tun + Port "gre-1" + Interface "gre-1" + type: gre + options: {in_key=flow, local_ip="192.168.7.4", out_key=flow, remote_ip="192.168.7.2"} + Port br-tun + Interface br-tun + type: internal + Port patch-int + Interface patch-int + type: patch + options: {peer=patch-tun} + Bridge br-int + Port br-int + Interface br-int + type: internal + Port "int-br-eth0" + Interface "int-br-eth0" + Port patch-tun + Interface patch-tun + type: patch + options: {peer=patch-int} + Port "tap93a74250-ef" + tag: 1 + Interface "tap93a74250-ef" + Bridge "br-eth0" + Port "phy-br-eth0" + Interface "phy-br-eth0" + Port "eth0" + Interface "eth0" + Port "br-eth0" + Interface "br-eth0" + type: internal + ovs_version: "2.0.0" + + +References +---------- +http://developer.rackspace.com/blog/neutron-networking-simple-flat-network.html \ No newline at end of file diff --git a/meta-openstack/Documentation/README.networking_l3_router b/meta-openstack/Documentation/README.networking_l3_router new file mode 100644 index 0000000..a16f8c4 --- /dev/null +++ b/meta-openstack/Documentation/README.networking_l3_router @@ -0,0 +1,450 @@ +Networking - l3 router +========================= + +Description +----------- +Using provider networks (such as we did for flat and vlan usecases) +does not scale to large deployments, their downsides become quickly +apparent. The l3-agent provides the ability to create routers that can +handle routing between directly connected LAN interfaces and a single +WAN interface. + +Here we setup a virtual router with a connection to a provider network +(vlan) and 2 attached subnets. We don't use floating IPs for this +demo. + + +Assumptions +----------- +It is assumed you have completed the steps described in +README.networking and have provisioned the host vSwitch as well as +created the br-eth0 bridges on the controller and compute nodes. + +At this point you should be able to ping 192.168.7.4 from 192.168.7.4 +and vise versa. + +You have built your controller image including the cirros image (for +which you have already added the image to glance as myFirstImage). + +You have run 'source /etc/nova/openrc' + +Configuration updates +--------------------- +On the host Open vSwitch add an IP for 192.168.100.1/22 +sudo ip address add 192.168.100.1/22 broadcast 192.168.255.255 dev br-int + +On the controller and (all) compute nodes you must edit the file +/etc/neutron/plugins/openvswitch/ovs_neutron_plugin.ini + +In the [OVS] section set +network_vlan_ranges = ph-eth0:1998:1998 +bridge_mappings = ph-eth0:br-eth0 + +(*** on compute nodes edit local_ip as well [192.168.7.4]***) + +Restart some services to allow these changes to take effect: +/etc/init.d/neutron-openvswitch-agent reload +(on controller) +/etc/init.d/neutron-server reload +/etc/init.d/neutron-dhcp-agent reload +(on compute) +/etc/init.d/nova-compute reload + + +** edit /etc/neutron/l3-agent.ini +use_namespaces = True +external_network_bridge = + +/etc/init.d/neutron-l3-agent restart + + +Create the provider network +--------------------------- +neutron net-create --provider:physical_network=ph-eth0 \ + --provider:network_type=vlan --provider:segmentation_id=1998 \ + --shared --router:external=true GATEWAY_NET + +neutron subnet-create GATEWAY_NET 192.168.100.0/22 \ + --name GATEWAY_SUBNET --gateway=192.168.100.1 \ + --allocation-pool start=192.168.101.1,end=192.168.103.254 + + +Create the router +----------------- +neutron router-create NEUTRON-ROUTER +Created a new router: ++-----------------------+--------------------------------------+ +| Field | Value | ++-----------------------+--------------------------------------+ +| admin_state_up | True | +| external_gateway_info | | +| id | b27d1a20-8a31-46d5-bdef-32a5ccf4ec91 | +| name | NEUTRON-ROUTER | +| status | ACTIVE | +| tenant_id | b5890ba3fb234347ae317ca2f8358663 | ++-----------------------+--------------------------------------+ + +neutron router-gateway-set NEUTRON-ROUTER GATEWAY_NET +Set gateway for router NEUTRON-ROUTER + +Inspect the created network namespaces +-------------------------------------- +root@controller:~# ip netns +qrouter-b27d1a20-8a31-46d5-bdef-32a5ccf4ec91 +qdhcp-498fa1f2-87de-4874-8ca9-f4ba3e394d2a + +ip netns exec qrouter-b27d1a20-8a31-46d5-bdef-32a5ccf4ec91 ip a +1: lo: mtu 65536 qdisc noqueue state UNKNOWN group default + link/loopback 00:00:00:00:00:00 brd 00:00:00:00:00:00 + inet 127.0.0.1/8 scope host lo + valid_lft forever preferred_lft forever + inet6 ::1/128 scope host + valid_lft forever preferred_lft forever +2: sit0: mtu 1480 qdisc noop state DOWN group default + link/sit 0.0.0.0 brd 0.0.0.0 +20: qg-19f6d85f-a6: mtu 1500 qdisc noqueue state UNKNOWN group default + link/ether fa:16:3e:b8:1e:9d brd ff:ff:ff:ff:ff:ff + inet 192.168.101.1/22 brd 192.168.103.255 scope global qg-19f6d85f-a6 + valid_lft forever preferred_lft forever + inet6 fe80::f816:3eff:feb8:1e9d/64 scope link + valid_lft forever preferred_lft forever + + +Attach tenant networks +---------------------- +neutron net-create --provider:network_type=gre --provider:segmentation_id=10 \ + --shared APPS_NET +Created a new network: ++---------------------------+--------------------------------------+ +| Field | Value | ++---------------------------+--------------------------------------+ +| admin_state_up | True | +| id | 52f4549f-aeed-4fcf-997b-4349f591cd5f | +| name | APPS_NET | +| provider:network_type | gre | +| provider:physical_network | | +| provider:segmentation_id | 10 | +| shared | True | +| status | ACTIVE | +| subnets | | +| tenant_id | b5890ba3fb234347ae317ca2f8358663 | ++---------------------------+--------------------------------------+ + +neutron net-create --provider:network_type=gre --provider:segmentation_id=20 \ + --shared DMZ_NET +Created a new network: ++---------------------------+--------------------------------------+ +| Field | Value | ++---------------------------+--------------------------------------+ +| admin_state_up | True | +| id | eeb07b09-4b4a-4c2c-9060-0b8e414a9279 | +| name | DMZ_NET | +| provider:network_type | gre | +| provider:physical_network | | +| provider:segmentation_id | 20 | +| shared | True | +| status | ACTIVE | +| subnets | | +| tenant_id | b5890ba3fb234347ae317ca2f8358663 | ++---------------------------+--------------------------------------+ + +neutron subnet-create APPS_NET 10.241.0.0/22 --name APPS_SUBNET +Created a new subnet: ++------------------+------------------------------------------------+ +| Field | Value | ++------------------+------------------------------------------------+ +| allocation_pools | {"start": "10.241.0.2", "end": "10.241.3.254"} | +| cidr | 10.241.0.0/22 | +| dns_nameservers | | +| enable_dhcp | True | +| gateway_ip | 10.241.0.1 | +| host_routes | | +| id | 45e7d887-1c4c-485a-9247-2a2bec9e3714 | +| ip_version | 4 | +| name | APPS_SUBNET | +| network_id | 52f4549f-aeed-4fcf-997b-4349f591cd5f | +| tenant_id | b5890ba3fb234347ae317ca2f8358663 | ++------------------+------------------------------------------------+ + +neutron subnet-create DMZ_NET 10.242.0.0/22 --name DMZ_SUBNET +Created a new subnet: ++------------------+------------------------------------------------+ +| Field | Value | ++------------------+------------------------------------------------+ +| allocation_pools | {"start": "10.242.0.2", "end": "10.242.3.254"} | +| cidr | 10.242.0.0/22 | +| dns_nameservers | | +| enable_dhcp | True | +| gateway_ip | 10.242.0.1 | +| host_routes | | +| id | 2deda040-be04-432b-baa6-3a2219d22f20 | +| ip_version | 4 | +| name | DMZ_SUBNET | +| network_id | eeb07b09-4b4a-4c2c-9060-0b8e414a9279 | +| tenant_id | b5890ba3fb234347ae317ca2f8358663 | ++------------------+------------------------------------------------+ + +neutron router-interface-add NEUTRON-ROUTER APPS_SUBNET +Added interface 58f3db35-f5df-4fd1-9735-4ff13dd342de to router NEUTRON-ROUTER. + +neutron router-interface-add NEUTRON-ROUTER DMZ_SUBNET +Added interface 9252ec29-7aac-4550-821c-f910f10680cf to router NEUTRON-ROUTER. + +ip netns exec qrouter-b27d1a20-8a31-46d5-bdef-32a5ccf4ec91 ip a +1: lo: mtu 65536 qdisc noqueue state UNKNOWN group default + link/loopback 00:00:00:00:00:00 brd 00:00:00:00:00:00 + inet 127.0.0.1/8 scope host lo + valid_lft forever preferred_lft forever + inet6 ::1/128 scope host + valid_lft forever preferred_lft forever +2: sit0: mtu 1480 qdisc noop state DOWN group default + link/sit 0.0.0.0 brd 0.0.0.0 +20: qg-19f6d85f-a6: mtu 1500 qdisc noqueue state UNKNOWN group default + link/ether fa:16:3e:b8:1e:9d brd ff:ff:ff:ff:ff:ff + inet 192.168.101.1/22 brd 192.168.103.255 scope global qg-19f6d85f-a6 + valid_lft forever preferred_lft forever + inet6 fe80::f816:3eff:feb8:1e9d/64 scope link + valid_lft forever preferred_lft forever +21: qr-58f3db35-f5: mtu 1500 qdisc noqueue state UNKNOWN group default + link/ether fa:16:3e:76:ec:23 brd ff:ff:ff:ff:ff:ff + inet 10.241.0.1/22 brd 10.241.3.255 scope global qr-58f3db35-f5 + valid_lft forever preferred_lft forever + inet6 fe80::f816:3eff:fe76:ec23/64 scope link + valid_lft forever preferred_lft forever +22: qr-9252ec29-7a: mtu 1500 qdisc noqueue state UNKNOWN group default + link/ether fa:16:3e:fb:98:06 brd ff:ff:ff:ff:ff:ff + inet 10.242.0.1/22 brd 10.242.3.255 scope global qr-9252ec29-7a + valid_lft forever preferred_lft forever + inet6 fe80::f816:3eff:fefb:9806/64 scope link + valid_lft forever preferred_lft forever + +Note the two new interfaces. +1 connection to the provider network +2 connections to the subnets (1 to APPS_SUBNET, 1 to DMZ_SUBNET) + +Boot an instance +--------------- +nova boot --flavor=m1.small --image=myFirstImage \ + --nic net-id=52f4549f-aeed-4fcf-997b-4349f591cd5f APPS_INSTANCE ++--------------------------------------+-----------------------------------------------------+ +| Property | Value | ++--------------------------------------+-----------------------------------------------------+ +| OS-DCF:diskConfig | MANUAL | +| OS-EXT-AZ:availability_zone | nova | +| OS-EXT-SRV-ATTR:host | - | +| OS-EXT-SRV-ATTR:hypervisor_hostname | - | +| OS-EXT-SRV-ATTR:instance_name | instance-0000000e | +| OS-EXT-STS:power_state | 0 | +| OS-EXT-STS:task_state | scheduling | +| OS-EXT-STS:vm_state | building | +| OS-SRV-USG:launched_at | - | +| OS-SRV-USG:terminated_at | - | +| accessIPv4 | | +| accessIPv6 | | +| adminPass | jdLkr4i6ATvQ | +| config_drive | | +| created | 2014-04-10T16:27:31Z | +| flavor | m1.small (2) | +| hostId | | +| id | fc849bb9-54d3-4a9a-99a4-6346a6eef404 | +| image | myFirstImage (f22d3ab8-96a5-46db-a029-7d59156c8e31) | +| key_name | - | +| metadata | {} | +| name | APPS_INSTANCE | +| os-extended-volumes:volumes_attached | [] | +| progress | 0 | +| security_groups | default | +| status | BUILD | +| tenant_id | b5890ba3fb234347ae317ca2f8358663 | +| updated | 2014-04-10T16:27:31Z | +| user_id | 1dfcb72ef6a7428d8dd7300bc7f303d9 | ++--------------------------------------+-----------------------------------------------------+ + +nova boot --flavor=m1.small --image=myFirstImage \ + --nic net-id=eeb07b09-4b4a-4c2c-9060-0b8e414a9279 DMZ_INSTANCE ++--------------------------------------+-----------------------------------------------------+ +| Property | Value | ++--------------------------------------+-----------------------------------------------------+ +| OS-DCF:diskConfig | MANUAL | +| OS-EXT-AZ:availability_zone | nova | +| OS-EXT-SRV-ATTR:host | - | +| OS-EXT-SRV-ATTR:hypervisor_hostname | - | +| OS-EXT-SRV-ATTR:instance_name | instance-0000000f | +| OS-EXT-STS:power_state | 0 | +| OS-EXT-STS:task_state | scheduling | +| OS-EXT-STS:vm_state | building | +| OS-SRV-USG:launched_at | - | +| OS-SRV-USG:terminated_at | - | +| accessIPv4 | | +| accessIPv6 | | +| adminPass | 4d7UsUJhSpBd | +| config_drive | | +| created | 2014-04-10T16:29:25Z | +| flavor | m1.small (2) | +| hostId | | +| id | f281c349-d49c-4d6c-bf56-74f04f2e8aec | +| image | myFirstImage (f22d3ab8-96a5-46db-a029-7d59156c8e31) | +| key_name | - | +| metadata | {} | +| name | DMZ_INSTANCE | +| os-extended-volumes:volumes_attached | [] | +| progress | 0 | +| security_groups | default | +| status | BUILD | +| tenant_id | b5890ba3fb234347ae317ca2f8358663 | +| updated | 2014-04-10T16:29:25Z | +| user_id | 1dfcb72ef6a7428d8dd7300bc7f303d9 | ++--------------------------------------+-----------------------------------------------------+ + +Check connectivity +------------------ +nova console-log APPS_INSTANCE +...skip +Starting network... +udhcpc (v1.18.5) started +Sending discover... +Sending select for 10.241.0.2... +Lease of 10.241.0.2 obtained, lease time 86400 +..skip + +nova console-log DMZ_INSTANCE +...skip +Starting network... +udhcpc (v1.18.5) started +Sending discover... +Sending select for 10.242.0.2... +Lease of 10.242.0.2 obtained, lease time 86400 +...skip + +root@controller:~# nova list ++--------------------------------------+---------------+--------+------------+-------------+---------------------+ +| ID | Name | Status | Task State | Power State | Networks | ++--------------------------------------+---------------+--------+------------+-------------+---------------------+ +| fc849bb9-54d3-4a9a-99a4-6346a6eef404 | APPS_INSTANCE | ACTIVE | - | Running | APPS_NET=10.241.0.2 | +| f281c349-d49c-4d6c-bf56-74f04f2e8aec | DMZ_INSTANCE | ACTIVE | - | Running | DMZ_NET=10.242.0.2 | ++--------------------------------------+---------------+--------+------------+-------------+---------------------+ + + +ping +--- +Since we are not using floating IPs you will only be able ping from inside the route namespace + +# ip netns exec qrouter-b27d1a20-8a31-46d5-bdef-32a5ccf4ec91 \ + ping 10.241.0.2 -c 1 +PING 10.241.0.2 (10.241.0.2) 56(84) bytes of data. +64 bytes from 10.241.0.2: icmp_seq=1 ttl=64 time=6.32 ms + +--- 10.241.0.2 ping statistics --- +1 packets transmitted, 1 received, 0% packet loss, time 0ms +rtt min/avg/max/mdev = 6.328/6.328/6.328/0.000 ms + +# ping 10.241.0.2 -c 1 +connect: Network is unreachable + + +The final Open vSwitch configs +------------------------------ + +Controller +--- +root@controller:~# ovs-vsctl show +524a6c84-226d-427b-8efa-732ed7e7fa43 + Bridge "br-eth0" + Port "eth0" + Interface "eth0" + Port "br-eth0" + Interface "br-eth0" + type: internal + Port "phy-br-eth0" + Interface "phy-br-eth0" + Bridge br-tun + Port patch-int + Interface patch-int + type: patch + options: {peer=patch-tun} + Port "gre-2" + Interface "gre-2" + type: gre + options: {in_key=flow, local_ip="192.168.7.2", out_key=flow, remote_ip="192.168.7.4"} + Port br-tun + Interface br-tun + type: internal + Bridge br-int + Port "qr-58f3db35-f5" + tag: 2 + Interface "qr-58f3db35-f5" + type: internal + Port "tap6e65f2e5-39" + tag: 3 + Interface "tap6e65f2e5-39" + type: internal + Port "qr-9252ec29-7a" + tag: 3 + Interface "qr-9252ec29-7a" + type: internal + Port "int-br-eth0" + Interface "int-br-eth0" + Port patch-tun + Interface patch-tun + type: patch + options: {peer=patch-int} + Port "tapcf2a0e68-6b" + tag: 2 + Interface "tapcf2a0e68-6b" + type: internal + Port br-int + Interface br-int + type: internal + Port "qg-19f6d85f-a6" + tag: 1 + Interface "qg-19f6d85f-a6" + type: internal + ovs_version: "2.0.0" + + +Compute +--- +root@compute:~# ovs-vsctl show +99d365d2-f74e-40a8-b9a0-5bb60353675d + Bridge br-int + Port br-int + Interface br-int + type: internal + Port patch-tun + Interface patch-tun + type: patch + options: {peer=patch-int} + Port "tapc2db0bfa-ae" + tag: 1 + Interface "tapc2db0bfa-ae" + Port "tap57fae225-16" + tag: 2 + Interface "tap57fae225-16" + Port "int-br-eth0" + Interface "int-br-eth0" + Bridge "br-eth0" + Port "eth0" + Interface "eth0" + Port "phy-br-eth0" + Interface "phy-br-eth0" + Port "br-eth0" + Interface "br-eth0" + type: internal + Bridge br-tun + Port br-tun + Interface br-tun + type: internal + Port "gre-1" + Interface "gre-1" + type: gre + options: {in_key=flow, local_ip="192.168.7.4", out_key=flow, remote_ip="192.168.7.2"} + Port patch-int + Interface patch-int + type: patch + options: {peer=patch-tun} + ovs_version: "2.0.0" + + +References +---------- +http:// developer.rackspace.com/blog/neutron-networking-l3-agent.html \ No newline at end of file diff --git a/meta-openstack/Documentation/README.networking_vlan b/meta-openstack/Documentation/README.networking_vlan new file mode 100644 index 0000000..6d48e2b --- /dev/null +++ b/meta-openstack/Documentation/README.networking_vlan @@ -0,0 +1,382 @@ +Networking - VLAN network +========================= + +Description +----------- +The vlan network will have the VMs on one of two vlan networks +(DMZ_SUBNET - 172.16.0.0/24, INSIDE_SUBNET - 192.168.100.0/241). We +will continue to use the management network (192.168.7.0/24) for +controller/compute communications. The dhcp-agent will provide the VMs +addresses within each subnet and within its provisioned ranges. This +type of network is more typical of a deployed network since network +traffic can be contained to within the assigned vlan. + + +Assumptions +----------- +It is assumed you have completed the steps described in +README.networking and have provisioned the host vSwitch as well as +created the br-eth0 bridges on the controller and compute nodes. + +At this point you should be able to ping 192.168.7.4 from 192.168.7.4 +and vise versa. + +You have built your controller image including the cirros image (for +which you have already added the image to glance as myFirstImage). + +You have run 'source /etc/nova/openrc' + +Configuration updates +--------------------- +On the host Open vSwitch add an IP for 192.168.100.1/22 +sudo ip address add 192.168.100.1/24 broadcast 192.168.100.255 dev br-int +sudo ip address add 172.16.0.1/24 broadcast 172.16.0.255 dev br-int + +On the controller and (all) compute nodes you must edit the file +/etc/neutron/plugins/openvswitch/ovs_neutron_plugin.ini + +In the [OVS] section set +network_vlan_ranges = ph-eth0:200:200,ph-eth0:300:300 +bridge_mappings = ph-eth0:br-eth0 + +(*** on compute nodes edit local_ip as well [192.168.7.4]***) + +Restart some services to allow these changes to take effect: +/etc/init.d/neutron-openvswitch-agent reload +(on controller) +/etc/init.d/neutron-server reload +/etc/init.d/neutron-dhcp-agent reload +(on compute) +/etc/init.d/nova-compute reload + + +Create the net and subnet +------------------------- +neutron net-create --provider:physical_network=ph-eth0 \ + --provider:network_type=vlan --provider:segmentation_id=200 \ + --shared INSIDE_NET +Created a new network: ++---------------------------+--------------------------------------+ +| Field | Value | ++---------------------------+--------------------------------------+ +| admin_state_up | True | +| id | 587e29d0-eb89-4c0d-948b-845009380097 | +| name | INSIDE_NET | +| provider:network_type | vlan | +| provider:physical_network | ph-eth0 | +| provider:segmentation_id | 200 | +| shared | True | +| status | ACTIVE | +| subnets | | +| tenant_id | b5890ba3fb234347ae317ca2f8358663 | ++---------------------------+--------------------------------------+ + +neutron net-create --provider:physical_network=ph-eth0 \ + --provider:network_type=vlan --provider:segmentation_id=300 \ + --shared DMZ_NET +Created a new network: ++---------------------------+--------------------------------------+ +| Field | Value | ++---------------------------+--------------------------------------+ +| admin_state_up | True | +| id | 498fa1f2-87de-4874-8ca9-f4ba3e394d2a | +| name | DMZ_NET | +| provider:network_type | vlan | +| provider:physical_network | ph-eth0 | +| provider:segmentation_id | 300 | +| shared | True | +| status | ACTIVE | +| subnets | | +| tenant_id | b5890ba3fb234347ae317ca2f8358663 | ++---------------------------+--------------------------------------+ + +neutron subnet-create INSIDE_NET 192.168.100.0/24 \ + --name INSIDE_SUBNET --no-gateway \ + --host-route destination=0.0.0.0/0,nexthop=192.168.100.1 \ + --allocation-pool start=192.168.100.100,end=192.168.100.199 +Created a new subnet: ++------------------+----------------------------------------------------------+ +| Field | Value | ++------------------+----------------------------------------------------------+ +| allocation_pools | {"start": "192.168.100.100", "end": "192.168.100.199"} | +| cidr | 192.168.100.0/24 | +| dns_nameservers | | +| enable_dhcp | True | +| gateway_ip | | +| host_routes | {"destination": "0.0.0.0/0", "nexthop": "192.168.100.1"} | +| id | 2c1a77aa-614c-4a97-9855-a62bb4b4d899 | +| ip_version | 4 | +| name | INSIDE_SUBNET | +| network_id | 587e29d0-eb89-4c0d-948b-845009380097 | +| tenant_id | b5890ba3fb234347ae317ca2f8358663 | ++------------------+----------------------------------------------------------+ + +neutron subnet-create DMZ_NET 172.16.0.0/24 --name DMZ_SUBNET \ + --no-gateway --host-route destination=0.0.0.0/0,nexthop=172.16.0.1 \ + --allocation-pool start=172.16.0.100,end=172.16.0.199 +Created a new subnet: ++------------------+-------------------------------------------------------+ +| Field | Value | ++------------------+-------------------------------------------------------+ +| allocation_pools | {"start": "172.16.0.100", "end": "172.16.0.199"} | +| cidr | 172.16.0.0/24 | +| dns_nameservers | | +| enable_dhcp | True | +| gateway_ip | | +| host_routes | {"destination": "0.0.0.0/0", "nexthop": "172.16.0.1"} | +| id | bfae1a19-e15f-4e5e-94f2-018f24abbc2e | +| ip_version | 4 | +| name | DMZ_SUBNET | +| network_id | 498fa1f2-87de-4874-8ca9-f4ba3e394d2a | +| tenant_id | b5890ba3fb234347ae317ca2f8358663 | ++------------------+-------------------------------------------------------+ + + +Boot the image and test connectivity +------------------------------------ +(note with our current config you might only be able to run 2 instances at + any one time, so you will end up juggling them to test connectivity) + +nova boot --flavor=m1.small --image=myFirstImage \ + --nic net-id=587e29d0-eb89-4c0d-948b-845009380097 INSIDE_INSTANCE ++--------------------------------------+-----------------------------------------------------+ +| Property | Value | ++--------------------------------------+-----------------------------------------------------+ +| OS-DCF:diskConfig | MANUAL | +| OS-EXT-AZ:availability_zone | nova | +| OS-EXT-SRV-ATTR:host | - | +| OS-EXT-SRV-ATTR:hypervisor_hostname | - | +| OS-EXT-SRV-ATTR:instance_name | instance-00000009 | +| OS-EXT-STS:power_state | 0 | +| OS-EXT-STS:task_state | scheduling | +| OS-EXT-STS:vm_state | building | +| OS-SRV-USG:launched_at | - | +| OS-SRV-USG:terminated_at | - | +| accessIPv4 | | +| accessIPv6 | | +| adminPass | 7itgDwsdY8d4 | +| config_drive | | +| created | 2014-04-10T14:31:21Z | +| flavor | m1.small (2) | +| hostId | | +| id | 630affe0-d497-4211-87bb-383254d60428 | +| image | myFirstImage (f22d3ab8-96a5-46db-a029-7d59156c8e31) | +| key_name | - | +| metadata | {} | +| name | INSIDE_INSTANCE | +| os-extended-volumes:volumes_attached | [] | +| progress | 0 | +| security_groups | default | +| status | BUILD | +| tenant_id | b5890ba3fb234347ae317ca2f8358663 | +| updated | 2014-04-10T14:31:21Z | +| user_id | 1dfcb72ef6a7428d8dd7300bc7f303d9 | ++--------------------------------------+-----------------------------------------------------+ + +nova boot --flavor=m1.small --image=myFirstImage \ + --nic net-id=587e29d0-eb89-4c0d-948b-845009380097 INSIDE_INSTANCE2 ++--------------------------------------+-----------------------------------------------------+ +| Property | Value | ++--------------------------------------+-----------------------------------------------------+ +| OS-DCF:diskConfig | MANUAL | +| OS-EXT-AZ:availability_zone | nova | +| OS-EXT-SRV-ATTR:host | - | +| OS-EXT-SRV-ATTR:hypervisor_hostname | - | +| OS-EXT-SRV-ATTR:instance_name | instance-0000000a | +| OS-EXT-STS:power_state | 0 | +| OS-EXT-STS:task_state | scheduling | +| OS-EXT-STS:vm_state | building | +| OS-SRV-USG:launched_at | - | +| OS-SRV-USG:terminated_at | - | +| accessIPv4 | | +| accessIPv6 | | +| adminPass | BF9p6tftS2xJ | +| config_drive | | +| created | 2014-04-10T14:32:07Z | +| flavor | m1.small (2) | +| hostId | | +| id | ff94ee07-ae24-4785-9d51-26de2c23da60 | +| image | myFirstImage (f22d3ab8-96a5-46db-a029-7d59156c8e31) | +| key_name | - | +| metadata | {} | +| name | INSIDE_INSTANCE2 | +| os-extended-volumes:volumes_attached | [] | +| progress | 0 | +| security_groups | default | +| status | BUILD | +| tenant_id | b5890ba3fb234347ae317ca2f8358663 | +| updated | 2014-04-10T14:32:08Z | +| user_id | 1dfcb72ef6a7428d8dd7300bc7f303d9 | ++--------------------------------------+-----------------------------------------------------+ + +root@controller:~# nova list ++--------------------------------------+------------------+--------+------------+-------------+----------------------------+ +| ID | Name | Status | Task State | Power State | Networks | ++--------------------------------------+------------------+--------+------------+-------------+----------------------------+ +| 630affe0-d497-4211-87bb-383254d60428 | INSIDE_INSTANCE | ACTIVE | - | Running | INSIDE_NET=192.168.100.100 | +| ff94ee07-ae24-4785-9d51-26de2c23da60 | INSIDE_INSTANCE2 | ACTIVE | - | Running | INSIDE_NET=192.168.100.102 | ++--------------------------------------+------------------+--------+------------+-------------+----------------------------+ + +nova boot --flavor=m1.small --image=myFirstImage \ + --nic net-id=498fa1f2-87de-4874-8ca9-f4ba3e394d2a DMZ_INSTANCE ++--------------------------------------+-----------------------------------------------------+ +| Property | Value | ++--------------------------------------+-----------------------------------------------------+ +| OS-DCF:diskConfig | MANUAL | +| OS-EXT-AZ:availability_zone | nova | +| OS-EXT-SRV-ATTR:host | - | +| OS-EXT-SRV-ATTR:hypervisor_hostname | - | +| OS-EXT-SRV-ATTR:instance_name | instance-0000000d | +| OS-EXT-STS:power_state | 0 | +| OS-EXT-STS:task_state | scheduling | +| OS-EXT-STS:vm_state | building | +| OS-SRV-USG:launched_at | - | +| OS-SRV-USG:terminated_at | - | +| accessIPv4 | | +| accessIPv6 | | +| adminPass | SvzSpnmB6mXJ | +| config_drive | | +| created | 2014-04-10T14:42:53Z | +| flavor | m1.small (2) | +| hostId | | +| id | 0dab2712-5f1d-4559-bfa4-d09c6304418c | +| image | myFirstImage (f22d3ab8-96a5-46db-a029-7d59156c8e31) | +| key_name | - | +| metadata | {} | +| name | DMZ_INSTANCE | +| os-extended-volumes:volumes_attached | [] | +| progress | 0 | +| security_groups | default | +| status | BUILD | +| tenant_id | b5890ba3fb234347ae317ca2f8358663 | +| updated | 2014-04-10T14:42:54Z | +| user_id | 1dfcb72ef6a7428d8dd7300bc7f303d9 | ++--------------------------------------+-----------------------------------------------------+ + +nova boot --flavor=m1.small --image=myFirstImage \ + --nic net-id=498fa1f2-87de-4874-8ca9-f4ba3e394d2a DMZ_INSTANCE2 +... + +nova console-log INSIDE_INSTANCE2 +--- +...skip +Starting network... +udhcpc (v1.18.5) started +Sending discover... +Sending select for 192.168.100.102... +...skip + +ping +--- + +You should also be able to ping instances on the same subnet but not +those on the other subnet. The controller and compute can not ping +instances on either network (with metadata implemented the controller +should be able to, but currently the metadata agent is not available.) + +dump-flows +---------- +(note the 'vlan' tags) +root@compute:~# ovs-ofctl dump-flows br-int +NXST_FLOW reply (xid=0x4): + cookie=0x0, duration=1640.378s, table=0, n_packets=3, n_bytes=788, idle_age=1628, priority=3,in_port=6,dl_vlan=300 actions=mod_vlan_vid:2,NORMAL + cookie=0x0, duration=2332.714s, table=0, n_packets=6, n_bytes=1588, idle_age=2274, priority=3,in_port=6,dl_vlan=200 actions=mod_vlan_vid:1,NORMAL + cookie=0x0, duration=2837.737s, table=0, n_packets=22, n_bytes=1772, idle_age=1663, priority=2,in_port=6 actions=drop + cookie=0x0, duration=2837.976s, table=0, n_packets=53, n_bytes=5038, idle_age=1535, priority=1 actions=NORMAL + + + +The final Open vSwitch configs +------------------------------ + +Controller +--- +root@controller:~# ovs-vsctl show +524a6c84-226d-427b-8efa-732ed7e7fa43 + Bridge br-tun + Port "gre-2" + Interface "gre-2" + type: gre + options: {in_key=flow, local_ip="192.168.7.2", out_key=flow, remote_ip="192.168.7.4"} + Port br-tun + Interface br-tun + type: internal + Port patch-int + Interface patch-int + type: patch + options: {peer=patch-tun} + Bridge "br-eth0" + Port "eth0" + Interface "eth0" + Port "br-eth0" + Interface "br-eth0" + type: internal + Port "phy-br-eth0" + Interface "phy-br-eth0" + Bridge br-int + Port patch-tun + Interface patch-tun + type: patch + options: {peer=patch-int} + Port "tapafbbdd15-e7" + tag: 1 + Interface "tapafbbdd15-e7" + type: internal + Port "int-br-eth0" + Interface "int-br-eth0" + Port "tapa50c1a18-34" + tag: 2 + Interface "tapa50c1a18-34" + type: internal + Port br-int + Interface br-int + type: internal + ovs_version: "2.0.0" + + +Compute +--- +root@compute:~# ovs-vsctl show +99d365d2-f74e-40a8-b9a0-5bb60353675d + Bridge br-tun + Port br-tun + Interface br-tun + type: internal + Port "gre-1" + Interface "gre-1" + type: gre + options: {in_key=flow, local_ip="192.168.7.4", out_key=flow, remote_ip="192.168.7.2"} + Port patch-int + Interface patch-int + type: patch + options: {peer=patch-tun} + Bridge br-int + Port br-int + Interface br-int + type: internal + Port "int-br-eth0" + Interface "int-br-eth0" + Port patch-tun + Interface patch-tun + type: patch + options: {peer=patch-int} + Port "tap78e1ac37-6c" + tag: 2 + Interface "tap78e1ac37-6c" + Port "tap315398a4-cd" + tag: 1 + Interface "tap315398a4-cd" + Bridge "br-eth0" + Port "phy-br-eth0" + Interface "phy-br-eth0" + Port "eth0" + Interface "eth0" + Port "br-eth0" + Interface "br-eth0" + type: internal + ovs_version: "2.0.0" + + +References +---------- +http://developer.rackspace.com/blog/neutron-networking-vlan-provider-networks.html \ No newline at end of file diff --git a/meta-openstack/Documentation/README.spice b/meta-openstack/Documentation/README.spice new file mode 100644 index 0000000..a6b93b2 --- /dev/null +++ b/meta-openstack/Documentation/README.spice @@ -0,0 +1,82 @@ +OpenStack offers two types of console support, VNC support and SPICE. +The VNC protocol is fairly limited, lacking support for multiple monitors, +bi-directional audio, reliable cut+paste, video streaming and more. +SPICE is a new protocol which aims to address all the limitations in VNC, +to provide good remote desktop support. + +The Controller will have both the proxy for vnc and for spice html5 +running. The nova-spicehtml5proxy service communicates directly with +the hypervisor process using SPICE. + +OpenStack's Dashboard uses a SPICE HTML5 widget in its console tab +to communicate with the nova-spicehtml5proxy service. Since both proxies +are running, the Dashboard will automatically attempt to connect to +whichever console is provided by the compute node. + +Another way to access the spice console is from the controller, +run the following command: + + nova get-spice-console myinstance spice-html5 + +This will give you an URL which will directly give you access to the console +(instead of from Horizon). + +The enable or disable VNC/SPICE, on the compute node, modify +/etc/nova/nova.conf. + +Options for configuring SPICE as the console for OpenStack Compute can be + found below. + +--------------------------------------------------------------------------------- + Configuration option=Default value (Type) Description + + agent_enabled=True (BoolOpt)enable spice guest agent support + + enabled=False (BoolOpt)enable spice related features + + html5proxy_base_url=http://127.0.0.1:6080/spice_auto.html + (StrOpt)location of spice html5 + console proxy, in the form + "http://127.0.0.1:6080/spice_auto.html" + + keymap=en-us (StrOpt)keymap for spice + + server_listen=127.0.0.1 (StrOpt)IP address on which instance + spice + server should listen + + server_proxyclient_address=127.0.0.1 (StrOpt)the address to which proxy + clients (like nova-spicehtml5proxy) + should connect +--------------------------------------------------------------------------------- + +Combinations/behaviour from Compute: + +1. VNC will be provided + +vnc_enabled=True +enabled=True +agent_enabled=True + +2. SPICE will be provided + +vnc_enabled=False +enabled=True +agent_enabled=True + +3. VNC will be provided + +vnc_enabled=True +enabled=False +agent_enabled=False + +4. No console will be provided + +vnc_enabled=False +enabled=False +agent_enabled=False + +After nova.conf is changed on the compute node, restart nova-compute +service. If an instance was running beforehand, it will be necessary to +restart (reboot, soft or hard) the instance to get the new console. + diff --git a/meta-openstack/Documentation/README.tempest b/meta-openstack/Documentation/README.tempest new file mode 100644 index 0000000..884a28a --- /dev/null +++ b/meta-openstack/Documentation/README.tempest @@ -0,0 +1,55 @@ +# enable in local.conf via: +OPENSTACK_CONTROLLER_EXTRA_INSTALL += "tempest keystone-tests glance-tests cinder-tests \ + horizon-tests heat-tests neutron-tests nova-tests ceilometer-tests" + +# For the tempest built-in tests: +--------------------------------- + # edit /etc/tempest/tempest.conf to suit details of the system + % cd /usr/lib/python2.7/site-packages + % nosetests --verbose tempest/api + +OR (less reliable) + + % cd /usr/lib/python2.7/site-packages + % cp /etc/tempest/.testr.conf . + % testr init + % testr run --parallel tempest + +# For individual package tests +------------------------------ +# typical: + % cd /usr/lib/python2.7/site-packages/ + % /etc//run_tests.sh --verbose -N + +# Cinder: +# Notes: tries to run setup.py, --debug works around part of the issue + % cd /usr/lib/python2.7/site-packages/ + % nosetests --verbose cinder/tests + +# Neutron: +# Notes: use nosetests directly + % cd /usr/lib/python2.7/site-packages/ + % nosetests --verbose neutron/tests + +# Nova: +# Notes: vi /usr/lib/python2.7/site-packages/nova/tests/conf_fixture.py +# modify api-paste.ini reference to be /etc/nova/api-paste.ini, the conf +# file isn't being read properly, so some tests will fail to run + % cd / + % nosetests --verbose /usr/lib/python2.7/site-packages/nova/tests + +# keystone: +# + +# Other Notes: +-------------- + + 1) testr: not so good, can be missing, some tools are. use nostests directly + instead. + 2) all run_tests.sh are provided, even though they are similar + + + + + + -- cgit v1.2.3-54-g00ecf