(安装手册)



http://docs.openstack.org/newton/install-guide-ubuntu/



 



(中文文档:)



http://docs.openstack.org/zh_CN/



(词汇表:)



http://docs.openstack.org/mitaka/zh_CN/install-guide-ubuntu/common/glossary.html#term-virtual-network-computing-vnc




准备环境


首先设置控制节点和计算节点的hosts名字为:controller和compute


 


在所有节点上更新源


# apt install software-properties-common


# add-apt-repository cloud-archive:newton


# apt update && apt dist-upgrade


 


以下在控制节点上进行


 


一. 安装openstack配置工具


# apt install python-openstackclient


 


二. 安装SQL


# apt install mariadb-server python-pymysql


vi /etc/mysql/mariadb.conf.d/99-openstack.cnf


[mysqld]


bind-address = 192.168.5.1 (控制节点的管理网ip)


 


default-storage-engine = innodb


innodb_file_per_table


max_connections = 4096


collation-server = utf8_general_ci


character-set-server = utf8

(控制节点ip:192.168.5.1)


# service mysql restart


# mysql_secure_installation


(设置了sql database root密码)


 


三. 安装Message queue


# apt install rabbitmq-server


# rabbitmqctl add_user openstack RABBIT_PASS (设置了RABBIT_PASS 密码)


# rabbitmqctl set_permissions openstack ".*" ".*" ".*"


 


四. 安装Memcached


# apt install memcached python-memcache


vi /etc/memcached.conf


-l 192.168.5.1  (控制节点的管理网ip)

# service memcached restart


 


Keystone


主要作用:管理用户及其权限;管理openstack的service列表,并提供这些service的API endpoint;


openstack的其他组件都要在Keystone中创建用户并注册endpoint。


 


一. 创建keystone数据库:


$ mysql -u root -p


mysql> CREATE DATABASE keystone;


mysql> GRANT ALL PRIVILEGES ON keystone.* TO 'keystone'@'localhost' IDENTIFIED BY 'KEYSTONE_DBPASS';


mysql> GRANT ALL PRIVILEGES ON keystone.* TO 'keystone'@'%' IDENTIFIED BY 'KEYSTONE_DBPASS';


(设置了KEYSTONE_DBPASS 密码)


 


二. 安装并修改keystone


# apt install keystone


# vi /etc/keystone/keystone.conf


[database]


connection = mysql+pymysql://keystone:KEYSTONE_DBPASS@controller/keystone


[token]


provider = fernet


(支持的几种provider:`fernet`, `pkiz`, `pki`, `uuid`,以fernet为例子)


 


三. 填充keystone数据库


# su -s /bin/sh -c "keystone-manage db_sync" keystone


 


四. 初始化Fernet key


# keystone-manage fernet_setup --keystone-user keystone --keystone-group keystone


# keystone-manage credential_setup --keystone-user keystone --keystone-group keystone


五. 启动认证服务


# keystone-manage bootstrap --bootstrap-password ADMIN_PASS \


  --bootstrap-admin-url http://controller:35357/v3/ \


  --bootstrap-internal-url http://controller:35357/v3/ \


  --bootstrap-public-url http://controller:5000/v3/ \


  --bootstrap-region-id RegionOne


(设置了ADMIN_PASS 密码)


 


 


六. 配置httpd


# vi /etc/apache2/apache2.conf


设置ServerName 为 controller  (这里的controller是控制节点的hosts名称)  


# service apache2 restart


 


七. 创建domains, projects, users, and roles.


# rm -f /var/lib/keystone/keystone.db


$ export OS_USERNAME=admin


$ export OS_PASSWORD=ADMIN_PASS


$ export OS_PROJECT_NAME=admin


$ export OS_USER_DOMAIN_NAME=default


$ export OS_PROJECT_DOMAIN_NAME=default


$ export OS_AUTH_URL=http://controller:35357/v3


$ export OS_IDENTITY_API_VERSION=3


 


$ openstack project create --domain default --description "Service Project" service


$ openstack project create --domain default --description "Demo Project" demo


$ openstack user create --domain default --password-prompt demo


(设置了DEMO_PASS 密码 )


$ openstack role create user


$ openstack role add --project demo --user demo user


 


八. 结束后清理配置:


1.出于安全考虑,把临时授权令牌取消掉


# vi /etc/keystone/keystone-paste.ini 


把[pipeline:public_api], [pipeline:admin_api], [pipeline:api_v3] 段中的admin_token_auth移除。


2.清除授权环境变量


$ unset OS_AUTH_URL OS_PASSWORD


 


九. 测试是否安装成功


1.使用admin和demo用户请求令牌测试


$ openstack --os-auth-url http://controller:35357/v3 \


  --os-project-domain-name default --os-user-domain-name default \


  --os-project-name admin --os-username admin token issue


$ openstack --os-auth-url http://controller:5000/v3 \


  --os-project-domain-name default --os-user-domain-name default \


  --os-project-name demo --os-username demo token issue


admin-openrc

export OS_PROJECT_DOMAIN_NAME=default


export OS_USER_DOMAIN_NAME=default


export OS_PROJECT_NAME=admin


export OS_USERNAME=admin


export OS_PASSWORD=ADMIN_PASS


export OS_AUTH_URL=http://controller:35357/v3


export OS_IDENTITY_API_VERSION=3


export OS_IMAGE_API_VERSION=2

$ . admin-openrc


$ openstack token issue


demo-openrc

export OS_PROJECT_DOMAIN_NAME=default


export OS_USER_DOMAIN_NAME=default


export OS_PROJECT_NAME=demo


export OS_USERNAME=demo


export OS_PASSWORD=DEMO_PASS


export OS_AUTH_URL=http://controller:5000/v3


export OS_IDENTITY_API_VERSION=3


export OS_IMAGE_API_VERSION=2

$ . demo-openrc


$ openstack token issue


 


 


Glance


一.创建glance数据库:


$ mysql -u root -p


mysql> CREATE DATABASE glance;


mysql> GRANT ALL PRIVILEGES ON glance.* TO 'glance'@'localhost' IDENTIFIED BY 'GLANCE_DBPASS';


mysql> GRANT ALL PRIVILEGES ON glance.* TO 'glance'@'%' IDENTIFIED BY 'GLANCE_DBPASS';


    (设置了GLANCE_DBPASS 密码)


 


二. 进入admin模式


$ . admin-openrc


 


三. 创建用户、服务、api


$ openstack user create --domain default --password-prompt glance (设置了GLANCE_PASS 密码 )


$ openstack role add --project service --user glance admin


$ openstack service create --name glance --description "OpenStack Image" image


$ openstack endpoint create --region RegionOne image public http://controller:9292


$ openstack endpoint create --region RegionOne image internal http://controller:9292


$ openstack endpoint create --region RegionOne image admin http://controller:9292


 


四. 安装并修改glance


# apt install glance


# vi /etc/glance/glance-api.conf


[database]


connection = mysql+pymysql://glance:GLANCE_DBPASS@controller/glance


(这里的controller是控制节点的hosts名称)


[keystone_authtoken]


auth_uri = http://controller:5000


auth_url = http://controller:35357


memcached_servers = controller:11211


auth_type = password


project_domain_name = default


user_domain_name = default


project_name = service


username = glance


password = GLANCE_PASS


  ([keystone_authtoken] 内其他内容全部清空)


[paste_deploy]


flavor = keystone


[glance_store]


stores = file,http


default_store = file


filesystem_store_datadir = /var/lib/glance/images/


 


# vi /etc/glance/glance-registry.conf


[database]


connection = mysql+pymysql://glance:GLANCE_DBPASS@controller/glance


[keystone_authtoken]


auth_uri = http://controller:5000


auth_url = http://controller:35357


memcached_servers = controller:11211


auth_type = password


project_domain_name = default


user_domain_name = default


project_name = service


username = glance


password = GLANCE_PASS


([keystone_authtoken] 内其他内容全部清空)


[paste_deploy]


flavor = keystone 


 


五. 填充glance数据库


# su -s /bin/sh -c "glance-manage db_sync" glance


 


六. 启动glance服务


# service glance-registry restart


# service glance-api restart


 


七. 创建一个镜像


$ . admin-openrc


$ wget http://download.cirros-cloud.net/0.3.4/cirros-0.3.4-x86_64-disk.img


$ openstack image create "cirros" \


  --file cirros-0.3.4-x86_64-disk.img \


  --disk-format qcow2 --container-format bare \


  --public


 


$ openstack image list


 


 


 


Nova-Controller


一.创建nova数据库:


$ mysql -u root -p


mysql> CREATE DATABASE nova_api;


mysql> CREATE DATABASE nova;


mysql> GRANT ALL PRIVILEGES ON nova_api.* TO 'nova'@'localhost' IDENTIFIED BY 'NOVA_DBPASS';


mysql> GRANT ALL PRIVILEGES ON nova_api.* TO 'nova'@'%' IDENTIFIED BY 'NOVA_DBPASS';


mysql> GRANT ALL PRIVILEGES ON nova.* TO 'nova'@'localhost' IDENTIFIED BY 'NOVA_DBPASS';


mysql> GRANT ALL PRIVILEGES ON nova.* TO 'nova'@'%' IDENTIFIED BY 'NOVA_DBPASS';


 


二. 进入admin模式


$ . admin-openrc


 


三. 创建用户、服务、api


$ openstack user create --domain default --password-prompt nova  (设置了NOVA_PASS 密码 )


$ openstack role add --project service --user nova admin


$ openstack service create --name nova --description "OpenStack Compute" compute


$ openstack endpoint create --region RegionOne compute public http://controller:8774/v2.1/%\(tenant_id\)s


$ openstack endpoint create --region RegionOne compute internal http://controller:8774/v2.1/%\(tenant_id\)s


$ openstack endpoint create --region RegionOne compute admin http://controller:8774/v2.1/%\(tenant_id\)s


 


四. 安装并修改nova


# apt install nova-api nova-conductor nova-consoleauth nova-novncproxy nova-scheduler


# vi /etc/nova/nova.conf


[api_database]


connection = mysql+pymysql://nova:NOVA_DBPASS@controller/nova_api


[database]


connection = mysql+pymysql://nova:NOVA_DBPASS@controller/nova


[DEFAULT]


transport_url = rabbit://openstack:RABBIT_PASS@controller


[DEFAULT]


auth_strategy = keystone


[keystone_authtoken]


auth_uri = http://controller:5000


auth_url = http://controller:35357


memcached_servers = controller:11211


auth_type = password


project_domain_name = default


user_domain_name = default


project_name = service


username = nova


password = NOVA_PASS


  ([keystone_authtoken] 内其他内容全部清空)


[DEFAULT]


my_ip = 192.168.5.1 (控制节点的管理网ip)


[DEFAULT]


use_neutron = True


firewall_driver = nova.virt.firewall.NoopFirewallDriver


[vnc]


vncserver_listen = $my_ip


vncserver_proxyclient_address = $my_ip


[glance]


api_servers = http://controller:9292


[oslo_concurrency]


lock_path = /var/lib/nova/tmp


 


五. 填充nova数据库


# su -s /bin/sh -c "nova-manage api_db sync" nova


# su -s /bin/sh -c "nova-manage db sync" nova


 


六. 启动nova服务


# service nova-api restart


# service nova-consoleauth restart


# service nova-scheduler restart


# service nova-conductor restart


# service nova-novncproxy restart


 


 


Nova-Compute


 


 


# apt install nova-compute


# vi /etc/nova/nova.conf


[DEFAULT]


transport_url = rabbit://openstack:RABBIT_PASS@controller


[DEFAULT]


auth_strategy = keystone


[keystone_authtoken]


auth_uri = http://controller:5000


auth_url = http://controller:35357


memcached_servers = controller:11211


auth_type = password


project_domain_name = default


user_domain_name = default


project_name = service


username = nova


password = NOVA_PASS


  ([keystone_authtoken] 内其他内容全部清空)


[DEFAULT]


my_ip = 192.168.5.13 (计算节点的管理网ip)


[DEFAULT]


use_neutron = True


firewall_driver = nova.virt.firewall.NoopFirewallDriver


[vnc]


enabled = True


vncserver_listen = 0.0.0.0


vncserver_proxyclient_address = $my_ip


novncproxy_base_url = http://controller:6080/vnc_auto.html


[glance]


api_servers = http://controller:9292


[oslo_concurrency]


lock_path = /var/lib/nova/tmp


 


# service nova-compute restart


(在控制节点上在管理状态使用$ openstack compute service list查看是否有本计算节点)


 


 


Neutron-Controller


一.创建neutron数据库:


$ mysql -u root -p


mysql> CREATE DATABASE neutron;


mysql> GRANT ALL PRIVILEGES ON neutron.* TO 'neutron'@'localhost' IDENTIFIED BY 'NEUTRON_DBPASS';


mysql> GRANT ALL PRIVILEGES ON neutron.* TO 'neutron'@'%' IDENTIFIED BY 'NEUTRON_DBPASS';


 


二. 进入admin模式


$ . admin-openrc


 


三. 创建用户、服务、api


$ openstack user create --domain default --password-prompt neutron    (设置了NEUTRON_PASS 密码 )


$ openstack role add --project service --user neutron admin


$ openstack service create --name neutron --description "OpenStack Networking" network


$ openstack endpoint create --region RegionOne network public http://controller:9696


$ openstack endpoint create --region RegionOne network internal http://controller:9696


$ openstack endpoint create --region RegionOne network admin http://controller:9696


 


四.使用网络选项1:提供网络(和自服务网络选项2只有这一步区别)


# apt install neutron-server neutron-plugin-ml2 \


  neutron-linuxbridge-agent neutron-dhcp-agent \


  neutron-metadata-agent


 


# vi /etc/neutron/neutron.conf


[database]


connection = mysql+pymysql://neutron:NEUTRON_DBPASS@controller/neutron


[DEFAULT]


core_plugin = ml2


service_plugins =


[DEFAULT]


transport_url = rabbit://openstack:RABBIT_PASS@controller


[DEFAULT]


auth_strategy = keystone


[keystone_authtoken]


auth_uri = http://controller:5000


auth_url = http://controller:35357


memcached_servers = controller:11211


auth_type = password


project_domain_name = default


user_domain_name = default


project_name = service


username = neutron


password = NEUTRON_PASS


[DEFAULT]


notify_nova_on_port_status_changes = True


notify_nova_on_port_data_changes = True


[nova]


auth_url = http://controller:35357


auth_type = password


project_domain_name = default


user_domain_name = default


region_name = RegionOne


project_name = service


username = nova


password = NOVA_PASS


 


# vi /etc/neutron/plugins/ml2/ml2_conf.ini


[ml2]


type_drivers = flat,vlan


[ml2]


tenant_network_types =


[ml2]


mechanism_drivers = linuxbridge


[ml2]


extension_drivers = port_security


[ml2_type_flat]


flat_networks = provider


[securitygroup]


enable_ipset = True


 


# vi /etc/neutron/plugins/ml2/linuxbridge_agent.ini 


[linux_bridge]


physical_interface_mappings = provider:enp2s0 (provider物理网络接口)


[vxlan]


enable_vxlan = False


[securitygroup]


enable_security_group = True


firewall_driver = neutron.agent.linux.iptables_firewall.IptablesFirewallDriver


 


# vi /etc/neutron/dhcp_agent.ini


 [DEFAULT]


interface_driver = neutron.agent.linux.interface.BridgeInterfaceDriver


dhcp_driver = neutron.agent.linux.dhcp.Dnsmasq


enable_isolated_metadata = True


 


五. 配置元数据代理


# vi /etc/neutron/metadata_agent.ini


[DEFAULT]


nova_metadata_ip = controller


metadata_proxy_shared_secret = METADATA_SECRET


 


六. 为计算节点配置网络服务


# vi /etc/nova/nova.conf


[neutron]


url = http://controller:9696


auth_url = http://controller:35357


auth_type = password


project_domain_name = default


user_domain_name = default


region_name = RegionOne


project_name = service


username = neutron


password = NEUTRON_PASS


service_metadata_proxy = True


metadata_proxy_shared_secret = METADATA_SECRET


 


七. 填充neutron数据库


# su -s /bin/sh -c "neutron-db-manage --config-file /etc/neutron/neutron.conf \


  --config-file /etc/neutron/plugins/ml2/ml2_conf.ini upgrade head" neutron


 


八. 启动neutron服务


# service nova-api restart


# service neutron-server restart


# service neutron-linuxbridge-agent restart


# service neutron-dhcp-agent restart


# service neutron-metadata-agent restart


(对自服务型的网络需要多启动下面这个服务)


# service neutron-l3-agent restart 


 


Neutron-Compute


 


# apt install neutron-linuxbridge-agent


# vi /etc/neutron/neutron.conf


[database]


(注释掉connection,因为计算节点不需要访问数据库) 


[DEFAULT]


transport_url = rabbit://openstack:RABBIT_PASS@controller


[DEFAULT]


auth_strategy = keystone


[keystone_authtoken]


auth_uri = http://controller:5000


auth_url = http://controller:35357


memcached_servers = controller:11211


auth_type = password


project_domain_name = default


user_domain_name = default


project_name = service


username = neutron


password = NEUTRON_PASS


  ([keystone_authtoken] 内其他内容全部清空)


 


二.使用网络选项1:提供网络(和自服务网络选项2只有这一步区别)


# vi /etc/neutron/plugins/ml2/linuxbridge_agent.ini


[linux_bridge]


physical_interface_mappings = provider:enp2s0 (provider物理网络接口)


[vxlan]


enable_vxlan = False


[securitygroup]


enable_security_group = True


firewall_driver = neutron.agent.linux.iptables_firewall.IptablesFirewallDriver


 


三. 为计算节点配置网络服务


# vi  /etc/nova/nova.conf


[neutron]


url = http://controller:9696


auth_url = http://controller:35357


auth_type = password


project_domain_name = default


user_domain_name = default


region_name = RegionOne


project_name = service


username = neutron


password = NEUTRON_PASS


 


四.


# service nova-compute restart


# service neutron-linuxbridge-agent restart


 


(在控制节点上在管理状态使用$ neutron ext-list列出加载的扩展来验证``neutron-server``进程是否正常启动)


 


 


有个keystone、glance、nova、neutron后,现在你的 OpenStack 环境包含了启动一个基本实例所必须的核心组件。你可以参考 :launch-instance 或者添加更多的 OpenStack 服务到你的环境中。


如果使用ceph作为存储后端,需要安装cinder后再创建instance,否则现在就可以创建基于文件系统的instance了。


 


 


 


Cinder-Controller


一.创建cinder数据库:


$ mysql -u root -p


mysql> CREATE DATABASE cinder;


mysql> GRANT ALL PRIVILEGES ON cinder.* TO 'cinder'@'localhost' IDENTIFIED BY 'CINDER_DBPASS';


mysql> GRANT ALL PRIVILEGES ON cinder.* TO 'cinder'@'%' IDENTIFIED BY 'CINDER_DBPASS';


 


二. 进入admin模式


$ . admin-openrc


 


三. 创建用户、服务、api


$ openstack user create --domain default --password-prompt cinder  (设置了CINDER_PASS 密码 )


$ openstack role add --project service --user cinder admin


$ openstack service create --name cinder \


  --description "OpenStack Block Storage" volume


$ openstack service create --name cinderv2 \


  --description "OpenStack Block Storage" volumev2


$ openstack endpoint create --region RegionOne \


  volume public http://controller:8776/v1/%\(tenant_id\)s


$ openstack endpoint create --region RegionOne \


  volume internal http://controller:8776/v1/%\(tenant_id\)s


$ openstack endpoint create --region RegionOne \


  volume admin http://controller:8776/v1/%\(tenant_id\)s


$ openstack endpoint create --region RegionOne \


  volumev2 public http://controller:8776/v2/%\(tenant_id\)s


$ openstack endpoint create --region RegionOne \


  volumev2 internal http://controller:8776/v2/%\(tenant_id\)s


$ openstack endpoint create --region RegionOne \


  volumev2 admin http://controller:8776/v2/%\(tenant_id\)s


 


四. 安装并修改cinder


# apt install cinder-api cinder-scheduler


# vi /etc/cinder/cinder.conf


[database]


connection = mysql+pymysql://cinder:CINDER_DBPASS@controller/cinder


[DEFAULT]


transport_url = rabbit://openstack:RABBIT_PASS@controller


[DEFAULT]


auth_strategy = keystone


[keystone_authtoken]


auth_uri = http://controller:5000


auth_url = http://controller:35357


memcached_servers = controller:11211


auth_type = password


project_domain_name = default


user_domain_name = default


project_name = service


username = cinder


password = CINDER_PASS


  ([keystone_authtoken] 内其他内容全部清空)


[DEFAULT]


my_ip = 192.168.5.1  (控制节点的管理网ip)


[oslo_concurrency]


lock_path = /var/lib/cinder/tmp


 


五. 填充cinder数据库


# su -s /bin/sh -c "cinder-manage db sync" cinder


 


六. 在控制节点配置nova使用块存储


# vi /etc/nova/nova.conf


[cinder]


os_region_name = RegionOne


 


六. 启动nova和cinder服务


# service nova-api restart


# service cinder-scheduler restart


# service cinder-api restart


 


Cinder-Storage


 


一. 创建一个名字为cinder-volumes的卷组



 


二. 安装cinder-volume并配置


# apt install cinder-volume


# vi /etc/cinder/cinder.conf


 [database]


connection = mysql+pymysql://cinder:CINDER_DBPASS@controller/cinder


[DEFAULT]


transport_url = rabbit://openstack:RABBIT_PASS@controller


[DEFAULT]


auth_strategy = keystone


[keystone_authtoken]


auth_uri = http://controller:5000


auth_url = http://controller:35357


memcached_servers = controller:11211


auth_type = password


project_domain_name = default


user_domain_name = default


project_name = service


username = cinder


password = CINDER_PASS


[DEFAULT]


my_ip =192.168.5.13  (存储节点的管理网ip)


[oslo_concurrency]


lock_path = /var/lib/cinder/tmp


(以上与控制节点配置除ip之外完全相同,存储节点需要另外添加以下的)


[lvm1]


volume_driver = cinder.volume.drivers.lvm.LVMVolumeDriver


volume_group = cinder-volumes


iscsi_protocol = iscsi


iscsi_helper = tgtadm


[DEFAULT]


不同节点名称是否可以相同?)


[DEFAULT]


glance_api_servers = http://controller:9292


 


三. 启动cinder-volume服务


# service tgt restart


# service cinder-volume restart


 


四. 查看volume服务状态


# cinder service-list (或openstack volume service list)     


+------------------+-----------------+------+---------+-------+----------------------------+-----------------+


| Binary           | Host            | Zone | Status  | State | Updated_at                 | Disabled Reason |


+------------------+-----------------+------+---------+-------+----------------------------+-----------------+


| cinder-scheduler | controller      | nova | enabled | up    | 2016-12-26T02:14:51.000000 | -               |


| cinder-volume    | compute1@lvm2   | nova | enabled | up    | 2016-12-26T02:14:54.000000 | -               |


| cinder-volume    | compute2@lvm3   | nova | enabled | up    | 2016-12-26T02:14:50.000000 | -               |


| cinder-volume    | controller@lvm1 | nova | enabled | up    | 2016-12-26T02:14:52.000000 | -               |


+------------------+-----------------+------+---------+-------+----------------------------+-----------------+


 


五. 关于配置文件说明


1. Cinder为每一个backend运行一个cinder-volume服务


2. 通过在cinder.conf中设置 storage_availability_zone=az1 可以指定cinder-volume host的Zone。用户创建volume的时候可以选择AZ,配合cinder-scheduler的AvailabilityZoneFilter可以将volume创建到指定的AZ中。 默认的情况下Zone为nova。


 


 创建一个实例


 


一. 创建虚拟网络(opt1)


(在控制节点上)


$ . admin-openrc


$ openstack network create  --share \


  --provider-physical-network provider \


  --provider-network-type flat provider


$ openstack subnet create --network provider \


  --allocation-pool start=192.168.5.10,end=192.168.5.100 \


  --dns-nameserver 219.146.1.66 --gateway 192.168.5.1 \


  --subnet-range 192.168.5.0/24 provider


 


二.创建m1.nano规格的主机模板(flavor )


$ openstack flavor create --id 0 --vcpus 1 --ram 64 --disk 1 m1.nano


 


三. 生成一个键值对


$ . demo-openrc


$ ssh-keygen -q -N ""


$ openstack keypair create --public-key ~/.ssh/id_rsa.pub mykey


$ openstack keypair list


 


四. 增加安全组规则


$ openstack security group rule create --proto icmp default  (default安全组允许 ICMP (ping))


$ openstack security group rule create --proto tcp --dst-port 22 default (default安全组允许安全 shell (SSH))


 


五. 启动一个实例(opt1型网络)


1.确认环境是否正常


$ . demo-openrc


$ openstack flavor list


$ openstack image list


$ openstack network list


$ openstack security group list


2.使用m1.nano模板创建一个实例


$ openstack server create --flavor m1.nano --image cirros \


  --nic net-id=56708ee8-b6c7-4112-b3d1-231bd8db659f --security-group default \


  --key-name mykey instance-a


(net-id 通过openstack network list 查看; instance-a是根据自己需要取的实例名称)


3.查看现有的实例列表


$ openstack server list


(当构建过程完全成功后,状态会从 BUILD变为ACTIVE。并且在计算节点能找到对应的qemu进程)


 


六. 访问一个实例


1.查看某个实例的url(例如访问前面创建的名为instance-a 的实例)


$ openstack console url show 5b08017b-00d4-4476-9380-4f5b6165c6d7


+-------+---------------------------------------------------------------------------------+


| Field | Value                                                                           |


+-------+---------------------------------------------------------------------------------+


| type  | novnc                                                                           |


| url   | http://controller:6080/vnc_auto.html?token=6643713d-f4c8-411c-ac9e-2c5b5a419935 |


+-------+---------------------------------------------------------------------------------+


(5b08017b-00d4-4476-9380-4f5b6165c6d7 通过openstack server list 查看,是要启动实例的ID)


2.访问一个实例


(在浏览器中输入http://controller:6080/vnc_auto.html?token=6643713d-f4c8-411c-ac9e-2c5b5a419935 就可以访问虚拟机了,前提是controller主机名能被识别,或直接换成ip)


 


3.通过ssh访问一个实例


root@controller:~# ssh cirros@192.168.5.17


$ uname -a


Linux instance-a 3.2.0-80-virtual #116-Ubuntu SMP Mon Mar 23 17:28:52 UTC 2015 x86_64 GNU/Linux


创建镜像

. admin-openrc


openstack image create "cirros" --file cirros-0.3.4-x86_64-disk.img --disk-format qcow2 --container-format bare --public


openstack image create "cirros" --file cirros-0.3.4-x86_64-disk.raw --disk-format raw      --container-format bare --public

创建flavor

. admin-openrc


openstack flavor create --id 0 --vcpus 1 --ram 64 --disk 1 m1.nano

创建网络

创建provider网络


# . admin-openrc


# openstack network create  --share --provider-physical-network provider --provider-network-type flat provider


# openstack subnet create --network provider   --allocation-pool start=192.168.5.10,end=192.168.5.100   --dns-nameserver 219.146.1.66 --gateway 192.168.5.254   --subnet-range 192.168.5.0/24 provider


# neutron net-update provider --router:external


 


创建self_service网络


# openstack network create selfservice


# openstack subnet create --network selfservice --dns-nameserver 219.146.1.66 --gateway 172.16.1.1 --subnet-range 172.16.1.0/24  selfservice


 


创建路由器


# openstack router create router 


 


把self_service子网添加到路由器router上


# neutron router-interface-add router selfservice


# neutron router-gateway-set router provider


 

创建虚拟机

nova boot --flavor m1.nano --image cirros  --nic net-id=2a1132f6-d3e8-4842-a200-a17dab5be38c instance-a


nova boot --flavor m1.nano --image cirros  --nic net-id=2a1132f6-d3e8-4842-a200-a17dab5be38c instance-a

 


 


Swift-Controller


一.创建swift数据库:


$ swift不需要创建数据库


 


二. 进入admin模式


$ . admin-openrc


 


三. 创建用户、服务、api


$ openstack user create --domain default --password-prompt swift (设置了SWIFT_PASS 密码 )


$ openstack role add --project service --user swift admin


$ openstack service create --name swift --description "OpenStack Object Storage" object-store


$ openstack endpoint create --region RegionOne object-store public http://controller:8080/v1/AUTH_%\(tenant_id\)s


$ openstack endpoint create --region RegionOne object-store internal http://controller:8080/v1/AUTH_%\(tenant_id\)s


$ openstack endpoint create --region RegionOne object-store admin http://controller:8080/v1


 


四. 安装并修改swift代理


# apt-get install swift swift-proxy python-swiftclient python-keystoneclient python-keystonemiddleware memcached


# mkdir /etc/swift


# curl -o /etc/swift/proxy-server.conf https://git.openstack.org/cgit/openstack/swift/plain/etc/proxy-server.conf-sample?h=stable/newton


# vi  /etc/swift/proxy-server.conf


[DEFAULT]


bind_port = 8080


user = swift


swift_dir = /etc/swift


[pipeline:main]


pipeline = catch_errors gatekeeper healthcheck proxy-logging cache container_sync bulk ratelimit authtoken keystoneauth container-quotas account-quotas slo dlo versioned_writes proxy-logging proxy-server


[app:proxy-server]


use = egg:swift#proxy


account_autocreate = True


[filter:keystoneauth]


use = egg:swift#keystoneauth


operator_roles = admin,user


[filter:authtoken]


paste.filter_factory = keystonemiddleware.auth_token:filter_factory


auth_uri = http://controller:5000


auth_url = http://controller:35357


memcached_servers = controller:11211


auth_type = password


project_domain_name = default


user_domain_name = default


project_name = service


username = swift


password = SWIFT_PASS


delay_auth_decision = True


  ([filter:authtoken] 内其他内容全部清空)


[filter:cache]


use = egg:swift#memcache


memcache_servers = controller:11211


 


Swift-Storage


 


一。配置rsync


(mount的磁盘和目录一定要名称一样)


二. 安装配置swift


# apt-get install swift swift-account swift-container swift-object


# curl -o /etc/swift/account-server.conf https://git.openstack.org/cgit/openstack/swift/plain/etc/account-server.conf-sample?h=stable/newton


# curl -o /etc/swift/container-server.conf https://git.openstack.org/cgit/openstack/swift/plain/etc/container-server.conf-sample?h=stable/newton


# curl -o /etc/swift/object-server.conf https://git.openstack.org/cgit/openstack/swift/plain/etc/object-server.conf-sample?h=stable/newton


 


# vi /etc/swift/account-server.conf


# vi /etc/swift/container-server.conf


# vi /etc/swift/object-server.conf


 


三.修改权限


# chown -R swift:swift /srv/node


# mkdir -p /var/cache/swift


# chown -R root:swift /var/cache/swift


# chmod -R 775 /var/cache/swift


 


Swift-Controller


五. 创建三种ring


#!/bin/bash


cd /etc/swift/


swift-ring-builder account.builder create 10 3 1


swift-ring-builder account.builder add --region 1 --zone 1 --ip 192.168.105.221 --port 6202 --device sda8 --weight 100


swift-ring-builder account.builder add --region 1 --zone 1 --ip 192.168.105.221 --port 6202 --device sda9 --weight 100


swift-ring-builder account.builder add --region 1 --zone 1 --ip 192.168.105.222 --port 6202 --device sda8 --weight 100


swift-ring-builder account.builder add --region 1 --zone 1 --ip 192.168.105.222 --port 6202 --device sda9 --weight 100


swift-ring-builder account.builder


swift-ring-builder account.builder rebalance


cd /etc/swift/


swift-ring-builder container.builder create 10 3 1


swift-ring-builder container.builder add --region 1 --zone 1 --ip 192.168.105.221 --port 6201 --device sda8 --weight 100


swift-ring-builder container.builder add --region 1 --zone 1 --ip 192.168.105.221 --port 6201 --device sda9 --weight 100


swift-ring-builder container.builder add --region 1 --zone 1 --ip 192.168.105.222 --port 6201 --device sda8 --weight 100


swift-ring-builder container.builder add --region 1 --zone 1 --ip 192.168.105.222 --port 6201 --device sda9 --weight 100


swift-ring-builder container.builder


swift-ring-builder container.builder rebalance


cd /etc/swift/


swift-ring-builder object.builder create 10 3 1


swift-ring-builder object.builder add --region 1 --zone 1 --ip 192.168.105.221 --port 6200 --device sda8 --weight 100


swift-ring-builder object.builder add --region 1 --zone 1 --ip 192.168.105.221 --port 6200 --device sda9 --weight 100


swift-ring-builder object.builder add --region 1 --zone 1 --ip 192.168.105.222 --port 6200 --device sda8 --weight 100


swift-ring-builder object.builder add --region 1 --zone 1 --ip 192.168.105.222 --port 6200 --device sda9 --weight 100


swift-ring-builder object.builder


swift-ring-builder object.builder rebalance


 


六.分发配置ring


(到所有的存储节点和proxy service节点)


# scp *.ring.gz 192.168.105.221:/etc/swift/


# scp *.ring.gz 192.168.105.222:/etc/swift/


 


七.配置swift


# curl -o /etc/swift/swift.conf \


https://git.openstack.org/cgit/openstack/swift/plain/etc/swift.conf-sample?h=stable/newton


# vi /etc/swift/swift.conf


[swift-hash]


swift_hash_path_suffix = HASH_PATH_SUFFIX


swift_hash_path_prefix = HASH_PATH_PREFIX


(替换成自己的)


[storage-policy:0]


name = Policy-0


default = yes


 


八.分发swift.conf


(到所有的存储节点和proxy service节点)


 


 


Swift-完成


 


并在所有的节点上:


# chown -R root:swift /etc/swift


在控制节点上:


# service memcached restart


# service swift-proxy restart


在存储节点上:


# swift-init all start


 


============================================================