系统环境

[root@controller ~]# cat /etc/redhat-release 
Fedora release 25 (Twenty Five)
[root@controller ~]# uname -a
Linux controller 4.8.6-300.fc25.x86_64 #1 SMP Tue Nov 1 12:36:38 UTC 2016 x86_64 x86_64 x86_64 GNU/Linux

 网络配置

[root@controller ml2]# vi /etc/sysconfig/network-scripts/ifcfg-eno1

HWADDR=2C:59:E5:47:A8:C8
TYPE=Ethernet
BOOTPROTO=none
DEFROUTE=yes
IPV4_FAILURE_FATAL=no
IPV6INIT=yes
IPV6_AUTOCONF=yes
IPV6_DEFROUTE=yes
IPV6_FAILURE_FATAL=no
IPV6_ADDR_GEN_MODE=stable-privacy
NAME=eno1
UUID=fcd7ea2e-098c-3a09-bfa7-9a089b36ccf3
ONBOOT=yes
AUTOCONNECT_PRIORITY=-999
IPADDR=10.0.100.210
PREFIX=24
GATEWAY=10.0.100.1
DNS1=10.0.100.10
IPV6_PEERDNS=yes
IPV6_PEERROUTES=yes
IPV6_PRIVACY=no


[root@controller ml2]# vi /etc/sysconfig/network-scripts/ifcfg-eno2

HWADDR=2C:59:E5:47:A8:C9
TYPE=Ethernet
BOOTPROTO=static
DEFROUTE=yes
PEERDNS=yes
PEERROUTES=yes
IPV4_FAILURE_FATAL=no
IPV6INIT=yes
IPV6_AUTOCONF=yes
IPV6_DEFROUTE=yes
IPV6_PEERDNS=yes
IPV6_PEERROUTES=yes
IPV6_FAILURE_FATAL=no
IPV6_ADDR_GEN_MODE=stable-privacy
NAME=eno2
UUID=90075ab3-240d-3371-a2f6-76b1076e82e0
ONBOOT=yes
AUTOCONNECT_PRIORITY=-999
IPADDR=10.0.0.17
NETMASK=255.255.255.0
GATEWAY=10.0.0.254
DNS=10.0.100.10

  

一、网卡做聚合

nmcli con add con-name team1 ifname teamif type team config '{"runner":{"name":"roundrobin"}}' 
nmcli connection add con-name team-subif1 ifname ifcfg-enp2s0f1 type team-slave master team1
nmcli connection add con-name team-subif2 ifname ifcfg-enp3s0f0 type team-slave master team1
nmcli connection add con-name team-subif3 ifname ifcfg-enp3s0f1 type team-slave master team1

nmcli con mod team1 ipv4.add 10.0.0.13/24
nmcli con mod team1 ipv4.gate 10.0.0.1
nmcli con mod team1 ipv4.meth man


[root@control3 network-scripts]# nmcli connection show
NAME         UUID                                  TYPE            DEVICE   
enp2s0f0     23dae73f-1caf-3db3-988c-6bdfc184668c  802-3-ethernet  enp2s0f0 
enp2s0f1     2462d65e-7d69-3e6f-aa12-2fd821067e8a  802-3-ethernet  enp2s0f1 
enp3s0f1     30ea3af7-4f31-3564-b14c-bce6b96dab79  802-3-ethernet  enp3s0f1 
team1        18659422-e906-4f5c-bece-39646d28414a  team            teamif   
dhcp         06da1e4f-f3c0-49dd-a5de-dd0d94161015  802-3-ethernet  --       
enp3s0f0     a95ceb14-7539-3c92-8eb3-d082bd964a1a  802-3-ethernet  --       
team-subif1  3b72f586-01f3-4f3e-b2e8-e4820bb4250c  802-3-ethernet  --       
team-subif2  dfd4f154-374c-46e2-8c37-0e2083dc71cf  802-3-ethernet  --       
team-subif3  523762a7-6fd2-4541-93af-97a7497d58ba  802-3-ethernet  --       
team1        2feb427b-ecc7-44ee-9c42-472f8457e7c9  team            --    


[root@control3 network-scripts]# nmcli con del static
Connection 'static' (25569dd0-3853-42b1-9619-6866b0d4dc88) successfully deleted.
Connection 'static' (6a03b679-8d9d-43f4-8c1f-688a2496ef5c) successfully deleted.
Connection 'static' (c0a34979-cda7-424f-bf39-87aa8857e3de) successfully deleted.

 

二、分区

1、查看盘符

[root@comput4 ~]# fdisk -l
Disk /dev/sda: 447.1 GiB, 480103981056 bytes, 937703088 sectors
Units: sectors of 1 * 512 = 512 bytes
Sector size (logical/physical): 512 bytes / 512 bytes
I/O size (minimum/optimal): 512 bytes / 512 bytes
Disklabel type: dos
Disk identifier: 0x26ec1cfd

Device     Boot   Start       End   Sectors  Size Id Type
/dev/sda1  *       2048    976895    974848  476M 83 Linux
/dev/sda2        976896   8976383   7999488  3.8G 82 Linux swap / Solaris
/dev/sda3       8976384 218691583 209715200  100G 83 Linux

2、开始分区

[root@comput4 ~]# fdisk /dev/sda

Welcome to fdisk (util-linux 2.29.1).
Changes will remain in memory only, until you decide to write them.
Be careful before using the write command.


Command (m for help): n
Partition type
   p   primary (3 primary, 0 extended, 1 free)
   e   extended (container for logical partitions)
Select (default e): p

Selected partition 4
First sector (218691584-937703087, default 218691584): 回车
Last sector, +sectors or +size{K,M,G,T,P} (218691584-937703087, default 937703087): 回车

Created a new partition 4 of type 'Linux' and of size 342.9 GiB.

Command (m for help): w
The partition table has been altered.
Calling ioctl() to re-read partition table.
Re-reading the partition table failed.: Device or resource busy

The kernel still uses the old table. The new table will be used at the next reboot or after you run partprobe(8) or kpartx(8).

3、因为partprobe可以使kernel重新读取分区信息,从而避免重启系统。 

[root@comput4 ~]# partprobe

4、检查新分的区

[root@comput4 ~]# fdisk -l
Disk /dev/sda: 447.1 GiB, 480103981056 bytes, 937703088 sectors
Units: sectors of 1 * 512 = 512 bytes
Sector size (logical/physical): 512 bytes / 512 bytes
I/O size (minimum/optimal): 512 bytes / 512 bytes
Disklabel type: dos
Disk identifier: 0x26ec1cfd

Device     Boot     Start       End   Sectors   Size Id Type
/dev/sda1  *         2048    976895    974848   476M 83 Linux
/dev/sda2          976896   8976383   7999488   3.8G 82 Linux swap / Solaris
/dev/sda3         8976384 218691583 209715200   100G 83 Linux
/dev/sda4       218691584 937703087 719011504 342.9G 83 Linux

5、格式化为XFS分区,并挂载

[root@comput4 ~]# mkfs.xfs -d su=64k,sw=4 /dev/sda4 -f
meta-data=/dev/sda4              isize=512    agcount=16, agsize=5617264 blks
         =                       sectsz=512   attr=2, projid32bit=1
         =                       crc=1        finobt=1, sparse=0, rmapbt=0, reflink=0
data     =                       bsize=4096   blocks=89876224, imaxpct=25
         =                       sunit=16     swidth=64 blks
naming   =version 2              bsize=4096   ascii-ci=0 ftype=1
log      =internal log           bsize=4096   blocks=43888, version=2
         =                       sectsz=512   sunit=16 blks, lazy-count=1
realtime =none                   extsz=4096   blocks=0, rtextents=0

[root@comput4 ~]# echo "/dev/sda4 /export/sda4 xfs defaults 0 0"  >> /etc/fstab

[root@comput4 ~]# mkdir -p /export/sda4 && mount -a && mkdir -p /export/sda4/brick

6、检查挂载

[root@comput4 ~]# df -h
Filesystem      Size  Used Avail Use% Mounted on
devtmpfs         32G     0   32G   0% /dev
tmpfs            32G     0   32G   0% /dev/shm
tmpfs            32G  1.8M   32G   1% /run
tmpfs            32G     0   32G   0% /sys/fs/cgroup
/dev/sda3       100G  1.3G   99G   2% /
tmpfs            32G     0   32G   0% /tmp
/dev/sda1       453M  113M  314M  27% /boot
tmpfs           6.3G     0  6.3G   0% /run/user/0
/dev/sda4       343G  383M  343G   1% /export/sda4

7、把现有分区改成lvm类型

#查看现有分区

[root@control4 cinder]# df -h
Filesystem      Size  Used Avail Use% Mounted on
devtmpfs         32G     0   32G   0% /dev
tmpfs            32G     0   32G   0% /dev/shm
tmpfs            32G  1.6M   32G   1% /run
tmpfs            32G     0   32G   0% /sys/fs/cgroup
/dev/sda3       100G  3.4G   97G   4% /
tmpfs            32G   24M   32G   1% /tmp
/dev/sda1       477M  127M  321M  29% /boot
/dev/sda4       343G   33M  343G   1% /export/sda4  #我们要格式化这个分区,改成LVM格式
tmpfs           6.3G     0  6.3G   0% /run/user/0

#卸载sda4

[root@control4 cinder]# umount /export/sda4 

[root@control4 cinder]# df -h
Filesystem      Size  Used Avail Use% Mounted on
devtmpfs         32G     0   32G   0% /dev
tmpfs            32G     0   32G   0% /dev/shm
tmpfs            32G  1.6M   32G   1% /run
tmpfs            32G     0   32G   0% /sys/fs/cgroup
/dev/sda3       100G  3.4G   97G   4% /
tmpfs            32G   24M   32G   1% /tmp
/dev/sda1       477M  127M  321M  29% /boot
tmpfs           6.3G     0  6.3G   0% /run/user/0

#删除开机自动挂载

[root@control4 cinder]# vi /etc/fstab 
/dev/sda4 /export/sda4 xfs defaults 0 0"  #删除开机自动挂载

#检查是否卸载掉

[root@control4 cinder]# df -h
Filesystem      Size  Used Avail Use% Mounted on
devtmpfs         32G     0   32G   0% /dev
tmpfs            32G     0   32G   0% /dev/shm
tmpfs            32G  1.6M   32G   1% /run
tmpfs            32G     0   32G   0% /sys/fs/cgroup
/dev/sda3       100G  3.4G   97G   4% /
tmpfs            32G   24M   32G   1% /tmp
/dev/sda1       477M  127M  321M  29% /boot
tmpfs           6.3G     0  6.3G   0% /run/user/0

#重新分区

[root@control4 cinder]# fdisk /dev/sda4

Welcome to fdisk (util-linux 2.28.2).
Changes will remain in memory only, until you decide to write them.
Be careful before using the write command.

Device /dev/sda4 already contains a xfs signature.
The signature will be removed by a write command.

Device does not contain a recognized partition table.
Created a new DOS disklabel with disk identifier 0x1b5bc4ac.

Command (m for help): n
Partition type
   p   primary (0 primary, 0 extended, 4 free)
   e   extended (container for logical partitions)
Select (default p): p
Partition number (1-4, default 1): 
First sector (2048-718573231, default 2048): 
Last sector, +sectors or +size{K,M,G,T,P} (2048-718573231, default 718573231): 

Created a new partition 1 of type 'Linux' and of size 342.7 GiB.

Command (m for help): t
Selected partition 1
Partition type (type L to list all types): 8e
Changed type of partition 'Linux' to 'Linux LVM'.

Command (m for help): p
Disk /dev/sda4: 342.7 GiB, 367909494784 bytes, 718573232 sectors
Units: sectors of 1 * 512 = 512 bytes
Sector size (logical/physical): 512 bytes / 512 bytes
I/O size (minimum/optimal): 512 bytes / 512 bytes
Disklabel type: dos
Disk identifier: 0x1b5bc4ac

Device      Boot Start       End   Sectors   Size Id Type
/dev/sda4p1       2048 718573231 718571184 342.7G 8e Linux LVM

Command (m for help): w
The partition table has been altered.
Calling ioctl() to re-read partition table.
Re-reading the partition table failed.: Invalid argument

The kernel still uses the old table. The new table will be used at the next reboot or after you run partprobe(8) or kpartx(8).

#刷新不用重启直接生效

[root@control4 cinder]# partprobe

#Create the LVM physical volume /dev/sdb:

[root@control4 cinder]# pvcreate /dev/sda4
WARNING: dos signature detected on /dev/sda4 at offset 510. Wipe it? [y/n]: y
  Wiping dos signature on /dev/sda4.
  Physical volume "/dev/sda4" successfully created.

#Create the LVM volume group cinder-volumes:

[root@control4 cinder]# vgcreate cinder-volumes /dev/sda4
Volume group "cinder-volumes" successfully created

 

三、openstack介绍

Openstack作为一个云平台的管理项目,其功能组件覆盖了网络、虚拟化、操作系统、服务器等多个方面,每个功能组件交由不同的项目委员会来研发和管理,目前核心的项目包括有:

功能

项目名称

描述

计算服务

Nova

负责虚拟机的创建、开关机、挂起、迁移、调整CPU、内存等规则。

对象存储

Swift

用于在大规模可扩展系统中通过内置的冗余及高容差机制实现对象存储的系统。

镜像服务

Glance

用于创建、上传、删除、编辑镜像信息的虚拟机镜像查找及索引系统。

身份服务

Keystone

为其他的功能服务提供身份验证、服务规则及服务令牌的功能。

网络管理

Neutron

用于为其他服务提供云计算的网络虚拟化技术,可自定义各种网络规则,支持主流的网络厂商技术。

块存储

Cinder

为虚拟机实例提供稳定的数据块存储的创建、删除、挂载、卸载、管理等服务。

图形界面

Horizon

为用户提供简单易用的Web管理界面,降低用户对功能服务的操作难度。

测量服务

Ceilometer

收集项目内所有的事件,用于监控、计费或为其他服务提供数据支撑。

部署编排

Heat

实现通过模板方式进行自动化的资源环境部署服务。

数据库服务

Trove

为用户提供可扩展的关系或非关系性数据库服务。

 

四、安装openstack

1、配置/etc/hosts  (这里采用的是ntp时间服务器的方法)

127.0.0.1   localhost localhost.localdomain localhost4 localhost4.localdomain4
::1         localhost localhost.localdomain localhost6 localhost6.localdomain6
10.0.0.17   controller computer ntpserver

2、配置时间同步

[root@control4 ~]# service crond restart
Redirecting to /bin/systemctl restart crond.service

[root@control4 ~]# service crond status
Redirecting to /bin/systemctl status crond.service
● crond.service - Command Scheduler
Loaded: loaded (/usr/lib/systemd/system/crond.service; enabled; vendor preset: enabled)
Active: active (running) since Wed 2017-08-02 16:47:02 CST; 1min 7s ago
Main PID: 4445 (crond)
Tasks: 1 (limit: 9830)
CGroup: /system.slice/crond.service
└─4445 /usr/sbin/crond -n

Aug 02 16:47:02 control4 systemd[1]: Started Command Scheduler.
Aug 02 16:47:02 control4 crond[4445]: (CRON) INFO (Syslog will be used instead of sendmail.)
Aug 02 16:47:02 control4 crond[4445]: (CRON) INFO (RANDOM_DELAY will be scaled with factor 19% if used.)
Aug 02 16:47:02 control4 crond[4445]: (CRON) INFO (running with inotify support)
Aug 02 16:47:02 control4 crond[4445]: (CRON) INFO (@reboot jobs will be run at computer's startup.)

[root@control4 sbin]# find / -name ntpdate
/usr/share/bash-completion/completions/ntpdate

[root@control4 sbin]# crontab -l
*/30 * * * * /usr/share/bash-completion/completions/ntpdate 10.0.100.208

方法二:搭建时间服务器

1、配置/etc/hosts

[root@control4 ~]# cat /etc/hosts
127.0.0.1 localhost localhost.localdomain localhost4 localhost4.localdomain4
::1 localhost localhost.localdomain localhost6 localhost6.localdomain6
10.0.0.17 controller computer ntpserver

2、安装chrony

[root@control4 ~]# yum install chrony
[root@control4 ~]# cp /etc/chrony.conf /etc/chrony.conf.bak
[root@control4 ~]# vi /etc/chrony.conf
server controller iburst
driftfile /var/lib/chrony/drift
makestep 1.0 3
rtcsync
allow 10.0.0.0/24
logdir /var/log/chrony

#设为开机自启动
[root@control4 ~]# systemctl enable chronyd.service
Created symlink /etc/systemd/system/multi-user.target.wants/chronyd.service → /usr/lib/systemd/system/chronyd.service.

#启动服务
[root@control4 ~]# systemctl start chronyd.service

#验证操作
[root@control4 ~]# chronyc sources
210 Number of sources = 1
MS Name/IP address         Stratum Poll Reach LastRx Last sample               
===============================================================================
^? controller                    0   6     0     -     +0ns[   +0ns] +/-    0ns

五、安装计算节点

计算服务	Nova	负责虚拟机的创建、开关机、挂起、迁移、调整CPU、内存等规则。

参考官方文档:https://docs.openstack.org/newton/install-guide-rdo/environment-packages.html

1、安装openstack源

yum install https://repos.fedorapeople.org/repos/openstack/openstack-newton/rdo-release-newton-5.noarch.rpm

yum upgrade

yum install python-openstackclient

2、计算节点 安装mariadb数据库

Install and configure components¶
Install the packages:

# yum install mariadb mariadb-server python2-PyMySQL
Create and edit the /etc/my.cnf.d/openstack.cnf file and complete the following actions:

Create a [mysqld] section, and set the bind-address key to the management IP address of the controller node to enable access by other nodes via the management network. Set additional keys to enable useful options and the UTF-8 character set:

[mysqld]
bind-address = 10.0.0.17

default-storage-engine = innodb
innodb_file_per_table
max_connections = 4096
collation-server = utf8_general_ci
character-set-server = utf8
Finalize installation¶
Start the database service and configure it to start when the system boots:

# systemctl enable mariadb.service
# systemctl start mariadb.service
Secure the database service by running the mysql_secure_installation script. In particular, choose a suitable password for the database root account.

#查看服务

[root@control4 my.cnf.d]# netstat -lntup|grep mysqld
tcp        0      0 10.0.0.17:3306          0.0.0.0:*               LISTEN      40376/mysqld

#初始化mysql

[root@control4 my.cnf.d]# mysql_secure_installation

NOTE: RUNNING ALL PARTS OF THIS SCRIPT IS RECOMMENDED FOR ALL MariaDB
      SERVERS IN PRODUCTION USE!  PLEASE READ EACH STEP CAREFULLY!

In order to log into MariaDB to secure it, we'll need the current
password for the root user.  If you've just installed MariaDB, and
you haven't set the root password yet, the password will be blank,
so you should just press enter here.

Enter current password for root (enter for none): 
OK, successfully used password, moving on...

Setting the root password ensures that nobody can log into the MariaDB
root user without the proper authorisation.

Set root password? [Y/n] y
New password: 
Re-enter new password: 
Password updated successfully!
Reloading privilege tables..
 ... Success!


By default, a MariaDB installation has an anonymous user, allowing anyone
to log into MariaDB without having to have a user account created for
them.  This is intended only for testing, and to make the installation
go a bit smoother.  You should remove them before moving into a
production environment.

Remove anonymous users? [Y/n] y
 ... Success!

Normally, root should only be allowed to connect from 'localhost'.  This
ensures that someone cannot guess at the root password from the network.

Disallow root login remotely? [Y/n] y
 ... Success!

By default, MariaDB comes with a database named 'test' that anyone can
access.  This is also intended only for testing, and should be removed
before moving into a production environment.

Remove test database and access to it? [Y/n] y
 - Dropping test database...
 ... Success!
 - Removing privileges on test database...
 ... Success!

Reloading the privilege tables will ensure that all changes made so far
will take effect immediately.

Reload privilege tables now? [Y/n] y
 ... Success!

Cleaning up...

All done!  If you've completed all of the above steps, your MariaDB
installation should now be secure.

Thanks for using MariaDB!
 
[root@control4 my.cnf.d]# mysql -uroot -p
Enter password: 
Welcome to the MariaDB monitor.  Commands end with ; or \g.
Your MariaDB connection id is 11
Server version: 10.1.25-MariaDB MariaDB Server

Copyright (c) 2000, 2017, Oracle, MariaDB Corporation Ab and others.

Type 'help;' or '\h' for help. Type '\c' to clear the current input statement.

MariaDB [(none)]> show databases;
+--------------------+
| Database           |
+--------------------+
| information_schema |
| mysql              |
| performance_schema |
+--------------------+
3 rows in set (0.00 sec)

MariaDB [(none)]> exit
Bye

3、安装rabbitmq

yum install rabbitmq-server -y

#启动服务

systemctl enable rabbitmq-server.service
systemctl start rabbitmq-server.service

#添加openstack用户

[root@control4 ~]# rabbitmqctl add_user openstack hotdoor
Creating user "openstack"

#允许用户的配置,写入和读取访问权限 openstack

[root@control4 ~]# rabbitmqctl set_permissions openstack ".*" ".*" ".*"
Setting permissions for user "openstack" in vhost "/"

#添加用户到administrator组

[root@control4 ~]# rabbitmqctl set_user_tags openstack administrator
Setting tags for user "openstack" to [administrator]

[root@control4 ~]# systemctl restart rabbitmq-server

#启用管理插件页面

[root@control4 ~]# rabbitmq-plugins enable rabbitmq_management
The following plugins have been enabled:
  amqp_client
  cowlib
  cowboy
  rabbitmq_web_dispatch
  rabbitmq_management_agent
  rabbitmq_management

Applying plugin configuration to rabbit@control4... started 6 plugins.

[root@control4 ~]# systemctl restart rabbitmq-server

#检查端口是否开启

[root@control4 ~]# lsof -i:15672
COMMAND    PID     USER   FD   TYPE DEVICE SIZE/OFF NODE NAME
beam.smp 43523 rabbitmq   54u  IPv4 147406      0t0  TCP *:15672 (LISTEN)

#访问RabbitMQ,访问地址

openstack官网如何下载安装包 openstack安装文档_IPV6

登陆测试,连不上情况一般是防火墙没有关闭所致,请自己行检查。

 4、安装memcached

[root@control4 ~]#  yum install memcached python-memcached -y

[root@control4 ~]# cp /etc/sysconfig/memcached /etc/sysconfig/memcached.bak

[root@control4 ~]# ll /etc/sysconfig/memcached*
-rw-r--r--. 1 root root 87 Nov  2  2016 /etc/sysconfig/memcached
-rw-r--r--. 1 root root 87 Aug 18 12:03 /etc/sysconfig/memcached.bak

[root@control4 ~]# vi /etc/sysconfig/memcached
PORT="11211"
USER="memcached"
MAXCONN="1024"
CACHESIZE="64"
OPTIONS="-l 10.0.0.17,::1"

#启动服务

[root@control4 ~]# systemctl enable memcached.service
Created symlink /etc/systemd/system/multi-user.target.wants/memcached.service → /usr/lib/systemd/system/memcached.service.

[root@control4 ~]# systemctl start memcached.service

#检查服务是否启动

[root@control4 ~]# ss -lntup|grep memcached
udp    UNCONN     0      0      10.0.0.17:11211                 *:*                   users:(("memcached",pid=44897,fd=28))
udp    UNCONN     0      0       ::1:11211                :::*                   users:(("memcached",pid=44897,fd=29))
tcp    LISTEN     0      128    10.0.0.17:11211                 *:*                   users:(("memcached",pid=44897,fd=26))
tcp    LISTEN     0      128     ::1:11211                :::*                   users:(("memcached",pid=44897,fd=27))

 

五、安装身份认证(keystone)  

1、创建keystone数据库

mysql -u root -p

CREATE DATABASE keystone;

GRANT ALL PRIVILEGES ON keystone.* TO 'keystone'@'localhost' \
  IDENTIFIED BY '123456';
 
GRANT ALL PRIVILEGES ON keystone.* TO 'keystone'@'%' \
  IDENTIFIED BY '123456';

#查看添加的用户

MariaDB [mysql]> select user,host from user;
+----------+-----------+
| user     | host      |
+----------+-----------+
| keystone | %         |
| root     | 127.0.0.1 |
| root     | ::1       |
| keystone | localhost |
| root     | localhost |
+----------+-----------+
5 rows in set (0.00 sec)

2、安装openstack-keystone

yum install openstack-keystone httpd mod_wsgi -y

#修改配置文件

cp /etc/keystone/keystone.conf /etc/keystone/keystone.conf.bak

[root@control4 ~]# vi /etc/keystone/keystone.conf

[database]
connection = mysql+pymysql://keystone:123456@controller/keystone

[token]
provider = fernet

#同步数据库和初始化key

su -s /bin/sh -c "keystone-manage db_sync" keystone
keystone-manage fernet_setup --keystone-user keystone --keystone-group keystone
keystone-manage credential_setup --keystone-user keystone --keystone-group keystone

#Bootstrap the Identity service

keystone-manage bootstrap --bootstrap-password 123456 \
  --bootstrap-admin-url http://controller:35357/v3/ \
  --bootstrap-internal-url http://controller:35357/v3/ \
  --bootstrap-public-url http://controller:5000/v3/ \
  --bootstrap-region-id RegionOne

#配置apache

cp /etc/httpd/conf/httpd.conf /etc/httpd/conf/httpd.conf.bak

vi /etc/httpd/conf/httpd.conf
ServerName controller

#创建软链接

ln -s /usr/share/keystone/wsgi-keystone.conf /etc/httpd/conf.d/

#启动服务并检查

[root@control4 ~]# systemctl enable httpd.service
Created symlink /etc/systemd/system/multi-user.target.wants/httpd.service → /usr/lib/systemd/system/httpd.service.

[root@control4 ~]# systemctl start httpd.service

[root@control4 ~]# lsof -i:80
COMMAND   PID   USER   FD   TYPE DEVICE SIZE/OFF NODE NAME
httpd   45766   root    4u  IPv6 149718      0t0  TCP *:http (LISTEN)
httpd   45777 apache    4u  IPv6 149718      0t0  TCP *:http (LISTEN)
httpd   45778 apache    4u  IPv6 149718      0t0  TCP *:http (LISTEN)
httpd   45779 apache    4u  IPv6 149718      0t0  TCP *:http (LISTEN)
httpd   45781 apache    4u  IPv6 149718      0t0  TCP *:http (LISTEN)
httpd   45786 apache    4u  IPv6 149718      0t0  TCP *:http (LISTEN)

#设置环境变量

export OS_USERNAME=admin
export OS_PASSWORD=123456
export OS_PROJECT_NAME=admin
export OS_USER_DOMAIN_NAME=Default
export OS_PROJECT_DOMAIN_NAME=Default
export OS_AUTH_URL=http://controller:35357/v3
export OS_IDENTITY_API_VERSION=3

执行效果如下:

openstack官网如何下载安装包 openstack安装文档_ide_02

3、Create a domain, projects, users, and roles

[root@control4 ~]# openstack project create --domain default \
>   --description "Service Project" service
+-------------+----------------------------------+
| Field       | Value                            |
+-------------+----------------------------------+
| description | Service Project                  |
| domain_id   | default                          |
| enabled     | True                             |
| id          | 4d53bb5db064416ba02284862df47c00 |
| is_domain   | False                            |
| name        | service                          |
| parent_id   | default                          |
+-------------+----------------------------------+
[root@control4 ~]# openstack project create --domain default \
>   --description "Demo Project" demo
+-------------+----------------------------------+
| Field       | Value                            |
+-------------+----------------------------------+
| description | Demo Project                     |
| domain_id   | default                          |
| enabled     | True                             |
| id          | 03cc1f27dd3c42d4a5bf599432a5eefe |
| is_domain   | False                            |
| name        | demo                             |
| parent_id   | default                          |
+-------------+----------------------------------+
[root@control4 ~]# openstack user create --domain default \
>   --password-prompt demo
User Password:
Repeat User Password: #输入密码
+---------------------+----------------------------------+
| Field               | Value                            |
+---------------------+----------------------------------+
| domain_id           | default                          |
| enabled             | True                             |
| id                  | 6d74d170ba314f7696c807bb2ce02ca2 |
| name                | demo                             |
| password_expires_at | None                             |
+---------------------+----------------------------------+
[root@control4 ~]# openstack role create user
+-----------+----------------------------------+
| Field     | Value                            |
+-----------+----------------------------------+
| domain_id | None                             |
| id        | 2981e73e3c35458b81496dcf247741cf |
| name      | user                             |
+-----------+----------------------------------+
[root@control4 ~]#  openstack role add --project demo --user demo user

4、验证操作(Verify operation)

#For security reasons, disable the temporary authentication token mechanism:

vi /etc/keystone/keystone-paste.ini
[pipeline:public_api] 
删除掉: admin_token_auth 
[pipeline:admin_api]
删除掉: admin_token_auth 
[pipeline:api_v3]
删除掉: admin_token_auth

#取消设置临时OS_AUTH_URLOS_PASSWORD 环境变量:

unset OS_AUTH_URL OS_PASSWORD

#这步执行时,需要输入密码

[root@control4 ~]# openstack --os-auth-url http://controller:35357/v3 \
>   --os-project-domain-name Default --os-user-domain-name Default \
>   --os-project-name admin --os-username admin token issue
Password: 
+------------+----------------------------------+
| Field      | Value                            |
+------------+----------------------------------+
| expires    | 2017-08-18 06:27:42+00:00        |
| id         | 1a839e5dc8cb42e9b9061416e4c135be |
| project_id | f02a7283e7574e538aee9f0763780979 |
| user_id    | 9afda0f2d36e445695b2717e676d0548 |
+------------+----------------------------------+

#这步执行时,需要输入密码

[root@control4 ~]# openstack --os-auth-url http://controller:5000/v3 \
>   --os-project-domain-name Default --os-user-domain-name Default \
>   --os-project-name demo --os-username demo token issue
Password: 
+------------+----------------------------------+
| Field      | Value                            |
+------------+----------------------------------+
| expires    | 2017-08-18 06:29:35+00:00        |
| id         | 7c4ec617883a406cb98e27d1b18cc6d6 |
| project_id | 03cc1f27dd3c42d4a5bf599432a5eefe |
| user_id    | 6d74d170ba314f7696c807bb2ce02ca2 |
+------------+----------------------------------+

5、创建OpenStack客户端环境脚本

[root@control4 ~]# vi admin 
export OS_PROJECT_DOMAIN_NAME=Default
export OS_USER_DOMAIN_NAME=Default
export OS_PROJECT_NAME=admin
export OS_USERNAME=admin
export OS_PASSWORD=123456
export OS_AUTH_URL=http://controller:35357/v3
export OS_IDENTITY_API_VERSION=3
export OS_IMAGE_API_VERSION=2
[root@control4 ~]# vi demo 
export OS_PROJECT_DOMAIN_NAME=Default
export OS_USER_DOMAIN_NAME=Default
export OS_PROJECT_NAME=demo
export OS_USERNAME=demo
export OS_PASSWORD=123456
export OS_AUTH_URL=http://controller:5000/v3
export OS_IDENTITY_API_VERSION=3
export OS_IMAGE_API_VERSION=2

#使用脚本

[root@control4 ~]# source admin
[root@control4 ~]# openstack token issue
+------------+----------------------------------+
| Field      | Value                            |
+------------+----------------------------------+
| expires    | 2017-08-18 06:39:40+00:00        |
| id         | 7fb684b09abc405699936998636bf58e |
| project_id | f02a7283e7574e538aee9f0763780979 |
| user_id    | 9afda0f2d36e445695b2717e676d0548 |
+------------+----------------------------------+

六、安装配置Glance

1、创建数据库

[root@control4 ~]# mysql -u root -p
Enter password: 
Welcome to the MariaDB monitor.  Commands end with ; or \g.
Your MariaDB connection id is 22
Server version: 10.1.25-MariaDB MariaDB Server

Copyright (c) 2000, 2017, Oracle, MariaDB Corporation Ab and others.

Type 'help;' or '\h' for help. Type '\c' to clear the current input statement.

MariaDB [(none)]> CREATE DATABASE glance;
Query OK, 1 row affected (0.00 sec)

MariaDB [(none)]> GRANT ALL PRIVILEGES ON glance.* TO 'glance'@'localhost' \
 IDENTIFIED BY '123456';
Query OK, 0 rows affected (0.00 sec)

MariaDB [(none)]> GRANT ALL PRIVILEGES ON glance.* TO 'glance'@'%' \
 IDENTIFIED BY '123456';
Query OK, 0 rows affected (0.00 sec)

MariaDB [(none)]> flush privileges;
Query OK, 0 rows affected (0.00 sec)

exit  #退出数据库

2、创建glance用户

[root@control4 ~]# source admin
[root@control4 ~]# openstack user create --domain default --password-prompt glance
User Password:
Repeat User Password:
+---------------------+----------------------------------+
| Field               | Value                            |
+---------------------+----------------------------------+
| domain_id           | default                          |
| enabled             | True                             |
| id                  | e403ada42383417aaf9f74d0e02ae5f1 |
| name                | glance                           |
| password_expires_at | None                             |
+---------------------+----------------------------------+

3、将admin角色添加到glance用户和 service项目中

[root@control4 ~]# openstack role add --project service --user glance admin

4、创建glance实体

[root@control4 ~]# openstack service create --name glance \
>   --description "OpenStack Image" image
+-------------+----------------------------------+
| Field       | Value                            |
+-------------+----------------------------------+
| description | OpenStack Image                  |
| enabled     | True                             |
| id          | 7f2abaeca7f44539a7d31bcbb01666b9 |
| name        | glance                           |
| type        | image                            |
+-------------+----------------------------------+
[root@control4 ~]# 
[root@control4 ~]# openstack endpoint create --region RegionOne \
>   image public http://controller:9292
+--------------+----------------------------------+
| Field        | Value                            |
+--------------+----------------------------------+
| enabled      | True                             |
| id           | 817e69c34b73446db70576c6bd69e700 |
| interface    | public                           |
| region       | RegionOne                        |
| region_id    | RegionOne                        |
| service_id   | 7f2abaeca7f44539a7d31bcbb01666b9 |
| service_name | glance                           |
| service_type | image                            |
| url          | http://controller:9292           |
+--------------+----------------------------------+
[root@control4 ~]# 
[root@control4 ~]# openstack endpoint create --region RegionOne \
>   image internal http://controller:9292
+--------------+----------------------------------+
| Field        | Value                            |
+--------------+----------------------------------+
| enabled      | True                             |
| id           | c6a8a58436a04c488c462b20cc440f28 |
| interface    | internal                         |
| region       | RegionOne                        |
| region_id    | RegionOne                        |
| service_id   | 7f2abaeca7f44539a7d31bcbb01666b9 |
| service_name | glance                           |
| service_type | image                            |
| url          | http://controller:9292           |
+--------------+----------------------------------+
[root@control4 ~]# 
[root@control4 ~]# openstack endpoint create --region RegionOne \
>   image admin http://controller:9292
+--------------+----------------------------------+
| Field        | Value                            |
+--------------+----------------------------------+
| enabled      | True                             |
| id           | d9cb57549ea945748caa96faa14b3cda |
| interface    | admin                            |
| region       | RegionOne                        |
| region_id    | RegionOne                        |
| service_id   | 7f2abaeca7f44539a7d31bcbb01666b9 |
| service_name | glance                           |
| service_type | image                            |
| url          | http://controller:9292           |
+--------------+----------------------------------+

5、安装glance配置组件

[root@control4 ~]# yum install openstack-glance

#备份配置文件

[root@control4 ~]# cp /etc/glance/glance-api.conf /etc/glance/glance-api.conf.bak

[root@control4 ~]# ll -ld /etc/glance/glance-api.conf*
-rw-r-----. 1 root glance 140377 Oct  6  2016 /etc/glance/glance-api.conf
-rw-r-----. 1 root root   140377 Aug 18 14:01 /etc/glance/glance-api.conf.bak

#修改配置文件

#修改主机名称
[root@control4 ~]# hostnamectl set-hostname controller
[root@control4 ~]# hostname
controller

[root@control4 ~]# vi /etc/glance/glance-api.conf

[database]
connection = mysql+pymysql://glance:hotdoor#899@controller/glance  #注意: @后面接的是主机名称

[keystone_authtoken]
auth_uri = http://controller:5000
auth_url = http://controller:35357
memcached_servers = controller:11211
auth_type = password
project_domain_name = Default
user_domain_name = Default
project_name = service
username = glance
password = hotdoor#899


[paste_deploy]
flavor = keystone

  [glance_store]
  stores = file,http
  default_store = file
  filesystem_store_datadir = /var/lib/glance/images/

6、修改glance-registry.conf配置文件

[root@control4 ~]# cp /etc/glance/glance-registry.conf /etc/glance/glance-registry.conf.bak
[root@control4 ~]# vi /etc/glance/glance-registry.conf

[database]
connection = mysql+pymysql://glance:hotdoor#899@controller/glance

[keystone_authtoken]
auth_uri = http://controller:5000
auth_url = http://controller:35357
memcached_servers = controller:11211
auth_type = password
project_domain_name = Default
user_domain_name = Default
project_name = service
username = glance
password = hotdoor#899

[paste_deploy]
flavor = keystone

#同步数据库(这里会报个错,不用管他

[root@control4 ~]# su -s /bin/sh -c "glance-manage db_sync" glance
Option "verbose" from group "DEFAULT" is deprecated for removal.  Its value may be silently ignored in the future.
/usr/lib/python2.7/site-packages/oslo_db/sqlalchemy/enginefacade.py:1171: OsloDBDeprecationWarning: EngineFacade is deprecated; please use oslo_db.sqlalchemy.enginefacade
  expire_on_commit=expire_on_commit, _conf=conf)
/usr/lib/python2.7/site-packages/pymysql/cursors.py:166: Warning: (1831, u'Duplicate index `ix_image_properties_image_id_name`. This is deprecated and will be disallowed in a future release.')
  result = self._query(query)

#启动服务

[root@control4 ~]# systemctl enable openstack-glance-api.service \
>   openstack-glance-registry.service
Created symlink /etc/systemd/system/multi-user.target.wants/openstack-glance-api.service → /usr/lib/systemd/system/openstack-glance-api.service.
Created symlink /etc/systemd/system/multi-user.target.wants/openstack-glance-registry.service → /usr/lib/systemd/system/openstack-glance-registry.service.
[root@control4 ~]# 
[root@control4 ~]# systemctl start openstack-glance-api.service \
>   openstack-glance-registry.service

7、验证服务

#下载镜像文件

wget http://download.cirros-cloud.net/0.3.4/cirros-0.3.4-x86_64-disk.img

#查看镜像

[root@control4 ~]# ll
total 12992
-rw-r--r--. 1 root root      267 Aug 18 13:32 admin
-rw-------. 1 root root     1777 Aug 17 20:00 anaconda-ks.cfg
-rw-r--r--. 1 root root 13287936 May  8  2015 cirros-0.3.4-x86_64-disk.img
-rw-r--r--. 1 root root      264 Aug 18 13:33 demo

#验证操作

[root@control4 ~]# source admin

[root@control4 ~]# openstack image create "cirros" \
>   --file cirros-0.3.4-x86_64-disk.img \
>   --disk-format qcow2 --container-format bare \
>   --public
+------------------+------------------------------------------------------+
| Field            | Value                                                |
+------------------+------------------------------------------------------+
| checksum         | ee1eca47dc88f4879d8a229cc70a07c6                     |
| container_format | bare                                                 |
| created_at       | 2017-08-18T06:32:49Z                                 |
| disk_format      | qcow2                                                |
| file             | /v2/images/6a4ba2ce-ba0a-4e8e-a9dc-efb8c72b7863/file |
| id               | 6a4ba2ce-ba0a-4e8e-a9dc-efb8c72b7863                 |
| min_disk         | 0                                                    |
| min_ram          | 0                                                    |
| name             | cirros                                               |
| owner            | f02a7283e7574e538aee9f0763780979                     |
| protected        | False                                                |
| schema           | /v2/schemas/image                                    |
| size             | 13287936                                             |
| status           | active                                               |
| tags             |                                                      |
| updated_at       | 2017-08-18T06:32:49Z                                 |
| virtual_size     | None                                                 |
| visibility       | public                                               |
+------------------+------------------------------------------------------+

#查看镜像列表

[root@control4 ~]# openstack image list
+--------------------------------------+--------+--------+
| ID                                   | Name   | Status |
+--------------------------------------+--------+--------+
| 6a4ba2ce-ba0a-4e8e-a9dc-efb8c72b7863 | cirros | active |
+--------------------------------------+--------+--------+

 

七、安装和配置控制器节点Nova

  本节介绍如何在控制器节点上安装和配置代号为Nova的Compute服务。

 1、创建数据库

[root@control4 ~]# mysql -uroot -p
Enter password: 
Welcome to the MariaDB monitor.  Commands end with ; or \g.
Your MariaDB connection id is 31
Server version: 10.1.25-MariaDB MariaDB Server

Copyright (c) 2000, 2017, Oracle, MariaDB Corporation Ab and others.

Type 'help;' or '\h' for help. Type '\c' to clear the current input statement.


MariaDB [(none)]>  CREATE DATABASE nova_api;
Query OK, 1 row affected (0.00 sec)

MariaDB [(none)]>  CREATE DATABASE nova;
Query OK, 1 row affected (0.00 sec)

MariaDB [(none)]> show databases;
+--------------------+
| Database           |
+--------------------+
| glance             |
| information_schema |
| keystone           |
| mysql              |
| nova               |
| nova_api           |
| performance_schema |
+--------------------+
7 rows in set (0.00 sec)
MariaDB [(none)]> GRANT ALL PRIVILEGES ON nova_api.* TO 'nova'@'localhost' \
    ->   IDENTIFIED BY 'hotdoor#899';
Query OK, 0 rows affected (0.00 sec)

MariaDB [(none)]> GRANT ALL PRIVILEGES ON nova_api.* TO 'nova'@'%' \
    ->   IDENTIFIED BY 'hotdoor#899';
Query OK, 0 rows affected (0.00 sec)

MariaDB [(none)]> GRANT ALL PRIVILEGES ON nova.* TO 'nova'@'localhost' \
    ->   IDENTIFIED BY 'hotdoor#899';
Query OK, 0 rows affected (0.00 sec)

MariaDB [(none)]> GRANT ALL PRIVILEGES ON nova.* TO 'nova'@'%' \
    ->   IDENTIFIED BY 'hotdoor#899';
Query OK, 0 rows affected (0.00 sec)

MariaDB [(none)]> flush privileges;
Query OK, 0 rows affected (0.00 sec)

exit  #退出数据库

2、创建nova用户

[root@control4 ~]# openstack user create --domain default \
>   --password-prompt nova
User Password:
Repeat User Password:
+---------------------+----------------------------------+
| Field               | Value                            |
+---------------------+----------------------------------+
| domain_id           | default                          |
| enabled             | True                             |
| id                  | baef83a42538487390a525b3f1a9793b |
| name                | nova                             |
| password_expires_at | None                             |
+---------------------+----------------------------------+

3、创建nova用户

[root@control4 ~]# openstack role add --project service --user nova admin
[root@control4 ~]# 
[root@control4 ~]# openstack service create --name nova \
>   --description "OpenStack Compute" compute
+-------------+----------------------------------+
| Field       | Value                            |
+-------------+----------------------------------+
| description | OpenStack Compute                |
| enabled     | True                             |
| id          | f31c3500c136482fbb962c01feda5350 |
| name        | nova                             |
| type        | compute                          |
+-------------+----------------------------------+
[root@control4 ~]# openstack endpoint create --region RegionOne \
>   compute public http://controller:8774/v2.1/%\(tenant_id\)s
+--------------+-------------------------------------------+
| Field        | Value                                     |
+--------------+-------------------------------------------+
| enabled      | True                                      |
| id           | be0735691ed540249fff9c6c1baf8355          |
| interface    | public                                    |
| region       | RegionOne                                 |
| region_id    | RegionOne                                 |
| service_id   | f31c3500c136482fbb962c01feda5350          |
| service_name | nova                                      |
| service_type | compute                                   |
| url          | http://controller:8774/v2.1/%(tenant_id)s |
+--------------+-------------------------------------------+
[root@control4 ~]# 
[root@control4 ~]# openstack endpoint create --region RegionOne \
>   compute internal http://controller:8774/v2.1/%\(tenant_id\)s
+--------------+-------------------------------------------+
| Field        | Value                                     |
+--------------+-------------------------------------------+
| enabled      | True                                      |
| id           | 06dcbfc21fbd45a0bcc6fe74b215ffa8          |
| interface    | internal                                  |
| region       | RegionOne                                 |
| region_id    | RegionOne                                 |
| service_id   | f31c3500c136482fbb962c01feda5350          |
| service_name | nova                                      |
| service_type | compute                                   |
| url          | http://controller:8774/v2.1/%(tenant_id)s |
+--------------+-------------------------------------------+
[root@control4 ~]# 
[root@control4 ~]# openstack endpoint create --region RegionOne \
>   compute admin http://controller:8774/v2.1/%\(tenant_id\)s
+--------------+-------------------------------------------+
| Field        | Value                                     |
+--------------+-------------------------------------------+
| enabled      | True                                      |
| id           | 72645b92a9284611bf699a90f12630ae          |
| interface    | admin                                     |
| region       | RegionOne                                 |
| region_id    | RegionOne                                 |
| service_id   | f31c3500c136482fbb962c01feda5350          |
| service_name | nova                                      |
| service_type | compute                                   |
| url          | http://controller:8774/v2.1/%(tenant_id)s |
+--------------+-------------------------------------------+

4、安装nova相关软件包

yum install openstack-nova-api openstack-nova-conductor \
  openstack-nova-console openstack-nova-novncproxy \
  openstack-nova-scheduler

5、修改配置文件

[root@control4 nova]# cat nova.conf|grep -v "^#"|grep -v "^$"
[DEFAULT]
enabled_apis = osapi_compute,metadata
transport_url = rabbit://openstack:hotdoor@controller
auth_strategy = keystone
my_ip = 10.0.0.17
use_neutron = True 
firewall_driver = nova.virt.firewall.NoopFirewallDriver
[api_database]
connection = mysql+pymysql://nova:hotdoor@controller/nova_api
[barbican]
[cache]
[cells]
[cinder]
[cloudpipe]
[conductor]
[cors]
[cors.subdomain]
[crypto]
[database]
connection = mysql+pymysql://nova:hotdoor@controller/nova
[ephemeral_storage_encryption]
[glance]
api_servers = http://controller:9292
[guestfs]
[hyperv]
[image_file_url]
[ironic]
[key_manager]
[keystone_authtoken]
auth_uri = http://controller:5000
auth_url = http://controller:35357
memcached_servers = controller:11211
auth_type = password
project_domain_name = Default
user_domain_name = Default
project_name = service
username = nova
password = hotdoor#899
[libvirt]
[matchmaker_redis]
[metrics]
[mks]
[neutron]
[osapi_v21]
[oslo_concurrency]
lock_path = /var/lib/nova/tmp
[oslo_messaging_amqp]
[oslo_messaging_notifications]
[oslo_messaging_rabbit]
[oslo_messaging_zmq]
[oslo_middleware]
[oslo_policy]
[placement]
[placement_database]
[rdp]
[remote_debug]
[serial_console]
[spice]
[ssl]
[trusted_computing]
[upgrade_levels]
[vmware]
[vnc]
vncserver_listen = 10.0.0.17
vncserver_proxyclient_address = 10.0.0.17
[workarounds]
[wsgi]
[xenserver]
[xvp]

6、同步数据库

su -s /bin/sh -c "nova-manage api_db sync" nova
su -s /bin/sh -c "nova-manage db sync" nova

7、重启服务并设为开机自启动

systemctl enable openstack-nova-api.service \
  openstack-nova-consoleauth.service openstack-nova-scheduler.service \
  openstack-nova-conductor.service openstack-nova-novncproxy.service

 systemctl start openstack-nova-api.service \
  openstack-nova-consoleauth.service openstack-nova-scheduler.service \
  openstack-nova-conductor.service openstack-nova-novncproxy.service

 #遇到的问题

坑一:rabbitmq的密码不能有特殊符号,否则同步就会出现。(一定要记住openstack搭建时,密码一定不能用包含特殊符号。)

[root@control4 nova]# su -s /bin/sh -c "nova-manage api_db sync" nova
Traceback (most recent call last):
  File "/usr/bin/nova-manage", line 10, in <module>
    sys.exit(main())
  File "/usr/lib/python2.7/site-packages/nova/cmd/manage.py", line 1580, in main
    config.parse_args(sys.argv)
  File "/usr/lib/python2.7/site-packages/nova/config.py", line 50, in parse_args
    rpc.init(CONF)
  File "/usr/lib/python2.7/site-packages/nova/rpc.py", line 76, in init
    aliases=TRANSPORT_ALIASES)
  File "/usr/lib/python2.7/site-packages/oslo_messaging/transport.py", line 182, in get_transport
    url = TransportURL.parse(conf, url, aliases)
  File "/usr/lib/python2.7/site-packages/oslo_messaging/transport.py", line 459, in parse
    port = int(port)
ValueError: invalid literal for int() with base 10: 'hotdoor'

#解决方法

rabbitmq的密码不能有特殊符号,否则就会报上面这个错误。解决办法,就是修改rabbitmq密码。

transport_url = rabbit://openstack:hotdoor#899@controller  #被这个密码坑了好久,一定不能用特殊符号。

#rabbitmq 修改密码方法

[root@control4 nova]# rabbitmqctl  change_password  openstack hotdoor
Error: unable to connect to node rabbit@control4: nodedown

DIAGNOSTICS
===========
attempted to contact: [rabbit@control4]

rabbit@control4:
  * unable to connect to epmd (port 4369) on control4: nxdomain (non-existing domain)

current node details:
- node name: 'rabbitmq-cli-94@controller'
- home dir: /var/lib/rabbitmq
- cookie hash: fFocvmbatbiHNvfPZ4D/Yw==

#修改密码后,一定要记得密

[root@control4 ~]# vi /etc/nova/nova.conf

transport_url = rabbit://openstack:hotdoor@controller  #记得改这个密码

坑二:出现这种情况是正常的,直接忽略

[root@control4 nova]# su -s /bin/sh -c "nova-manage db sync" nova
WARNING: cell0 mapping not found - not syncing cell0.

 

 八、安装配置计算节点(Install and configure a compute node)

说明:在多集群环境中,可以配置多台计算节点(与控制节点分离)

参考:https://docs.openstack.org/newton/install-guide-rdo/nova-compute-install.html

#先提前处理下面这个问题,否则计算节点安装不成功。

坑一:iptables系统版本太高,需要先卸载,再安装更低的版本(iptables-services-1.6.0-2.fc25.x86_64)

[root@controller ~]# yum install openstack-nova-compute -y

Error: transaction check vs depsolve:
iptables = 1.6.0-2.fc25 is needed by iptables-services-1.6.0-2.fc25.x86_64
To diagnose the problem, try running: 'rpm -Va --nofiles --nodigest'.
You probably have corrupted RPMDB, running 'rpm --rebuilddb' might fix the issue.
The downloaded packages were saved in cache until the next successful transaction.
You can remove cached packages by executing 'dnf clean packages'.
[root@controller ~]# rpm -qa iptables*
iptables-1.6.0-3.fc25.x86_64
iptables-libs-1.6.0-3.fc25.x86_64

解决方法:

#查看版本

[root@controller ~]# rpm -qa iptables*
iptables-1.6.0-3.fc25.x86_64
iptables-libs-1.6.0-3.fc25.x86_64

#卸载旧版本

[root@controller nova]# rpm -e iptables --nodeps
[root@controller nova]# rpm -e iptables-libs --nodeps
[root@controller nova]# rpm -qa iptables*

#安装上面提示的版本

#先安装依赖包
yum install iptables-libs-1.6.0-2.fc25.x86_64 -y

#再安装iptables
yum install iptables-1.6.0-2.fc25.x86_64 -y

#安装计算节点 (compute node),先备份原配置文件,再修改。

#安装计算节点

yum install openstack-nova-compute -y

#备份配置文件

[root@controller ~]# cp /etc/nova/nova.conf /etc/nova/nova.conf.01.bak

[root@controller ~]# ll -ld /etc/nova/nova.conf*
-rw-r-----. 1 root nova 290463 Aug 18 18:54 /etc/nova/nova.conf 
-rw-r-----. 1 root root 290463 Aug 19 12:07 /etc/nova/nova.conf.01.bak 第一阶段配置好的
-rw-r-----. 1 root root 289748 Aug 18 15:32 /etc/nova/nova.conf.old   #原始配置文件

#修改配置文件

[root@controller ~]# vi /etc/nova/nova.conf
[DEFAULT]
enabled_apis = osapi_compute,metadata
transport_url = rabbit://openstack:hotdoor@controller
auth_strategy = keystone
my_ip = 10.0.0.17
use_neutron = True 
firewall_driver = nova.virt.firewall.NoopFirewallDriver
[api_database]
connection = mysql+pymysql://nova:hotdoor@controller/nova_api
[barbican]
[cache]
[cells]
[cinder]
[cloudpipe]
[conductor]
[cors]
[cors.subdomain]
[crypto]
[database]
connection = mysql+pymysql://nova:hotdoor@controller/nova
[ephemeral_storage_encryption]
[glance]
api_servers = http://controller:9292
[guestfs]
[hyperv]
[image_file_url]
[ironic]
[key_manager]
[keystone_authtoken]
auth_uri = http://controller:5000
auth_url = http://controller:35357
memcached_servers = controller:11211
auth_type = password
project_domain_name = Default
user_domain_name = Default
project_name = service
username = nova
password = hotdoor#899
[libvirt]
virt_type=kvm
[matchmaker_redis]
[metrics]
[mks]
[neutron]
[osapi_v21]
[oslo_concurrency]
lock_path = /var/lib/nova/tmp
[oslo_messaging_amqp]
[oslo_messaging_notifications]
[oslo_messaging_rabbit]
[oslo_messaging_zmq]
[oslo_middleware]
[oslo_policy]
[placement]
[placement_database]
[rdp]
[remote_debug]
[serial_console]
[spice]
[ssl]
[trusted_computing]
[upgrade_levels]
[vmware]
[vnc]
enabled = True
vncserver_listen = 0.0.0.0
vncserver_proxyclient_address = 10.0.0.17
novncproxy_base_url = http://controller:6080/vnc_auto.html
[workarounds]
[wsgi]
[xenserver]
[xvp]

#坑二:

注意:
如果nova-compute服务无法启动,请检查 /var/log/nova/nova-compute.log。错误消息 可能表示控制器节点上的防火墙阻止访问端口5672。
AMQP server on controller:5672 is unreachable。

解决方法:

添加一条防火墙规则

#添加一条amqp防火墙规则
iptables -A INPUT -p tcp --dport 5672 -j ACCEPT
iptables-save

#查看是否添加成功
[root@controller ~]# iptables -L
Chain INPUT (policy ACCEPT)
target     prot opt source               destination         
ACCEPT     udp  --  anywhere             anywhere             udp dpt:domain
ACCEPT     tcp  --  anywhere             anywhere             tcp dpt:domain
ACCEPT     udp  --  anywhere             anywhere             udp dpt:bootps
ACCEPT     tcp  --  anywhere             anywhere             tcp dpt:bootps
nova-api-INPUT  all  --  anywhere             anywhere            
ACCEPT     tcp  --  anywhere             anywhere             tcp dpt:amqp

#启动服务

systemctl enable libvirtd.service openstack-nova-compute.service
systemctl start libvirtd.service openstack-nova-compute.service
#备注:一定要详细查看服务是否有红色的地方(有就说明配置文件配置的有问题)
[root@controller ~]# systemctl status libvirtd.service openstack-nova-compute.service
● libvirtd.service - Virtualization daemon
   Loaded: loaded (/usr/lib/systemd/system/libvirtd.service; enabled; vendor preset: enabled)
   Active: active (running) since Sat 2017-08-19 12:52:51 CST; 7s ago
     Docs: man:libvirtd(8)
           http://libvirt.org
 Main PID: 16902 (libvirtd)
    Tasks: 18 (limit: 9830)
   CGroup: /system.slice/libvirtd.service
           ├─ 3192 /sbin/dnsmasq --conf-file=/var/lib/libvirt/dnsmasq/default.conf --leasefile-ro --dhcp-sc
           ├─ 3193 /sbin/dnsmasq --conf-file=/var/lib/libvirt/dnsmasq/default.conf --leasefile-ro --dhcp-sc
           └─16902 /usr/sbin/libvirtd

Aug 19 12:52:51 controller systemd[1]: Starting Virtualization daemon...
Aug 19 12:52:51 controller systemd[1]: Started Virtualization daemon.
Aug 19 12:52:51 controller dnsmasq[3192]: read /etc/hosts - 3 addresses
Aug 19 12:52:51 controller dnsmasq[3192]: read /var/lib/libvirt/dnsmasq/default.addnhosts - 0 addresses
Aug 19 12:52:51 controller dnsmasq-dhcp[3192]: read /var/lib/libvirt/dnsmasq/default.hostsfile

● openstack-nova-compute.service - OpenStack Nova Compute Server
   Loaded: loaded (/usr/lib/systemd/system/openstack-nova-compute.service; enabled; vendor preset: disabled
   Active: active (running) since Sat 2017-08-19 12:52:54 CST; 4s ago
 Main PID: 16921 (nova-compute)
    Tasks: 22 (limit: 9830)
   CGroup: /system.slice/openstack-nova-compute.service
           └─16921 /usr/bin/python2 /usr/bin/nova-compute

Aug 19 12:52:51 controller systemd[1]: Starting OpenStack Nova Compute Server...
Aug 19 12:52:54 controller systemd[1]: Started OpenStack Nova Compute Server.
...skipping...
● libvirtd.service - Virtualization daemon
   Loaded: loaded (/usr/lib/systemd/system/libvirtd.service; enabled; vendor preset: enabled)
   Active: active (running) since Sat 2017-08-19 12:52:51 CST; 7s ago
     Docs: man:libvirtd(8)
           http://libvirt.org
 Main PID: 16902 (libvirtd)
    Tasks: 18 (limit: 9830)
   CGroup: /system.slice/libvirtd.service
           ├─ 3192 /sbin/dnsmasq --conf-file=/var/lib/libvirt/dnsmasq/default.conf --leasefile-ro --dhcp-sc
           ├─ 3193 /sbin/dnsmasq --conf-file=/var/lib/libvirt/dnsmasq/default.conf --leasefile-ro --dhcp-sc
           └─16902 /usr/sbin/libvirtd

Aug 19 12:52:51 controller systemd[1]: Starting Virtualization daemon...
Aug 19 12:52:51 controller systemd[1]: Started Virtualization daemon.
Aug 19 12:52:51 controller dnsmasq[3192]: read /etc/hosts - 3 addresses
Aug 19 12:52:51 controller dnsmasq[3192]: read /var/lib/libvirt/dnsmasq/default.addnhosts - 0 addresses
Aug 19 12:52:51 controller dnsmasq-dhcp[3192]: read /var/lib/libvirt/dnsmasq/default.hostsfile

● openstack-nova-compute.service - OpenStack Nova Compute Server
   Loaded: loaded (/usr/lib/systemd/system/openstack-nova-compute.service; enabled; vendor preset: disabled
   Active: active (running) since Sat 2017-08-19 12:52:54 CST; 4s ago
 Main PID: 16921 (nova-compute)
    Tasks: 22 (limit: 9830)
   CGroup: /system.slice/openstack-nova-compute.service
           └─16921 /usr/bin/python2 /usr/bin/nova-compute

Aug 19 12:52:51 controller systemd[1]: Starting OpenStack Nova Compute Server...
Aug 19 12:52:54 controller systemd[1]: Started OpenStack Nova Compute Server.

 #验证操作

[root@controller ~]# source admin
[root@controller ~]# openstack compute service list
+----+------------------+------------+----------+---------+-------+----------------------------+
| ID | Binary           | Host       | Zone     | Status  | State | Updated At                 |
+----+------------------+------------+----------+---------+-------+----------------------------+
|  1 | nova-consoleauth | controller | internal | enabled | up    | 2017-08-19T05:00:45.000000 |
|  2 | nova-conductor   | controller | internal | enabled | up    | 2017-08-19T05:00:47.000000 |
|  6 | nova-scheduler   | controller | internal | enabled | up    | 2017-08-19T05:00:42.000000 |
| 14 | nova-compute     | controller | nova     | enabled | up    | 2017-08-19T05:00:40.000000 |
+----+------------------+------------+----------+---------+-------+----------------------------+

 

九、控制节点安装配置Neutron (Networking service )

参考:https://docs.openstack.org/newton/install-guide-rdo/neutron-controller-install.html

1.网络服务介绍

OpenStack network service(neutron):是opensatck核心组件之一,提供云计算环境下的虚拟网络功能。

2.网络服务组件介绍

neutron-server:这一部分包含守护进程neutron-server 和各种插件neutron-server 提供 API 的调用请求传给已经配置好的插件进行后续处理。插件需要访问数据库来维护各种配置数据和对应关系,例如路由器、网络、子网、端口、浮动IP、安全组等等。

openstack网络插件和代理:Plug and unplug ports, create networks or subnets, and provide IP addressing. These plug-ins and agents differ depending on the vendor and technologies used in the particular cloud. OpenStack Networking ships with plug-ins and agents for Cisco virtual and physical switches, NEC OpenFlow products, Open vSwitch, Linux bridging, and the VMware NSX product.

常见的代理L3(3层),DHCP(动态主机IP地址),以及插件代理。

详解opensatck网络插件和代理:

插件代理(Plugin Agent):虚拟网络上的数据包的处理则是由这些插件代理来完成的。名字为 neutron-*-agent。在每个计算节点和网络节点上运行。一般来说你选择了什么插件,就需要选择相应的代理。代理与 Neutron Server 及其插件的交互就通过消息队列来支持。

 

提示:插件代理需要部署在每一个运行hypervisor的主机上,它提供本地的vSwitch配置,更多的时候得依赖你具体所使用的插件类型。(常用的插件是OpenvSwitch,还包括Big Switch,Floodinght REST Proxy,Brocade, NSX,
PLUMgrid, Ryu)

DHCP 代理(DHCP Agent):名字为 neutron-dhcp-agent,为各个租户网络提供 DHCP 服务,部署在网络节点上,各个插件也是使用这一个代理。DHCP代理,给租户网络提供动态主机配置服务,主要用途是为租户网络内的虚拟机动态地分配IP地址。

3 层代理(L3 Agent):名字为 neutron-l3-agent, 为客户机访问外部网络提供 3 层转发服务。也部署在网络节点上。提供三层网络功能和网络地址转换(NAT)功能,来让租户的虚拟机可以与外部网络通信。

下面这张图很好的反映了 Neutron 内部各部分服务之间的关系。

openstack官网如何下载安装包 openstack安装文档_IPV6_03

neutron服务进程运行图如下:

openstack官网如何下载安装包 openstack安装文档_ide_04

消息队列:大多数的OpenStack Networking安装都会用到,用于在neutron-server和各种各样的代理进程间路由信息。也为某些特定的插件扮演数据库的角色,以存储网络状态。

点击这里详细了解网络概念

 

3.安装并配置控制节点(Install and configure controller node

1.先决条件

创建数据库并授权:

[root@controller ~]# mysql -uroot -p
Enter password: 
Welcome to the MariaDB monitor.  Commands end with ; or \g.
Your MariaDB connection id is 762
Server version: 10.1.25-MariaDB MariaDB Server

Copyright (c) 2000, 2017, Oracle, MariaDB Corporation Ab and others.

Type 'help;' or '\h' for help. Type '\c' to clear the current input statement.

MariaDB [(none)]> CREATE DATABASE neutron;
Query OK, 1 row affected (0.01 sec)

MariaDB [(none)]> GRANT ALL PRIVILEGES ON neutron.* TO 'neutron'@'localhost' \
    ->   IDENTIFIED BY 'hotdoor';
Query OK, 0 rows affected (0.00 sec)

MariaDB [(none)]> GRANT ALL PRIVILEGES ON neutron.* TO 'neutron'@'%' \
    ->   IDENTIFIED BY 'hotdoor';
Query OK, 0 rows affected (0.00 sec)

MariaDB [(none)]> exit
Bye

2.获得admin账号权限

source openrc

3.创建neutron用户并添加角色到admin

[root@controller ~]# source admin
[root@controller ~]# openstack user create --domain default --password-prompt neutron
User Password:
Repeat User Password:
+---------------------+----------------------------------+
| Field               | Value                            |
+---------------------+----------------------------------+
| domain_id           | default                          |
| enabled             | True                             |
| id                  | ef2ecdc2aa5442c083e22dd05149f7b2 |
| name                | neutron                          |
| password_expires_at | None                             |
+---------------------+----------------------------------+
[root@controller ~]# openstack role add --project service --user neutron admin
[root@controller ~]# openstack service create --name neutron \
>   --description "OpenStack Networking" network
+-------------+----------------------------------+
| Field       | Value                            |
+-------------+----------------------------------+
| description | OpenStack Networking             |
| enabled     | True                             |
| id          | d4beb86cafd845968641bb85876cb255 |
| name        | neutron                          |
| type        | network                          |
+-------------+----------------------------------+
[root@controller ~]# openstack endpoint create --region RegionOne \
>   network public http://controller:9696

+--------------+----------------------------------+
| Field        | Value                            |
+--------------+----------------------------------+
| enabled      | True                             |
| id           | 7147205d0858453fbce949213d71ac6b |
| interface    | public                           |
| region       | RegionOne                        |
| region_id    | RegionOne                        |
| service_id   | d4beb86cafd845968641bb85876cb255 |
| service_name | neutron                          |
| service_type | network                          |
| url          | http://controller:9696           |
+--------------+----------------------------------+
[root@controller ~]# 
[root@controller ~]# openstack endpoint create --region RegionOne \
>   network internal http://controller:9696
+--------------+----------------------------------+
| Field        | Value                            |
+--------------+----------------------------------+
| enabled      | True                             |
| id           | 4bf1515393574b54962faa0d861ba676 |
| interface    | internal                         |
| region       | RegionOne                        |
| region_id    | RegionOne                        |
| service_id   | d4beb86cafd845968641bb85876cb255 |
| service_name | neutron                          |
| service_type | network                          |
| url          | http://controller:9696           |
+--------------+----------------------------------+
[root@controller ~]# 
[root@controller ~]# openstack endpoint create --region RegionOne \
>   network admin http://controller:9696
+--------------+----------------------------------+
| Field        | Value                            |
+--------------+----------------------------------+
| enabled      | True                             |
| id           | 2ee404d255b34d5d89b1a6bfb7cff3ce |
| interface    | admin                            |
| region       | RegionOne                        |
| region_id    | RegionOne                        |
| service_id   | d4beb86cafd845968641bb85876cb255 |
| service_name | neutron                          |
| service_type | network                          |
| url          | http://controller:9696           |
+--------------+----------------------------------+
[root@controller ~]#

  

4.安装软件

[root@controller ~]# yum install openstack-neutron openstack-neutron-ml2 \
>   openstack-neutron-linuxbridge ebtables -y

#注意这里有个坑:执行一次发现没有安装。需要执行二遍上面的命令才能安装成功。否则会找不到配置文件。

Installed:
  conntrack-tools.x86_64 1.4.3-1.fc25                       dibbler-client.x86_64 1.0.1-3.fc25                          
  dnsmasq-utils.x86_64 2.76-2.fc25                          keepalived.x86_64 1.3.5-1.fc25                              
  libnetfilter_cthelper.x86_64 1.0.0-9.fc24                 libnetfilter_cttimeout.x86_64 1.0.0-7.fc24                  
  libnetfilter_queue.x86_64 1.0.2-7.fc24                    libsodium.x86_64 1.0.13-1.fc25                              
  libxslt-python.x86_64 1.1.28-13.fc25                      lm_sensors-libs.x86_64 3.4.0-5.fc25                         
  net-snmp-agent-libs.x86_64 1:5.7.3-15.fc25                openpgm.x86_64 5.2.122-6.fc24                               
  openstack-neutron.noarch 1:9.4.0-1.el7                    openstack-neutron-common.noarch 1:9.4.0-1.el7               
  openstack-neutron-linuxbridge.noarch 1:9.4.0-1.el7        openstack-neutron-ml2.noarch 1:9.4.0-1.el7                  
  python-libxml2.x86_64 2.9.4-2.fc25                        python-logutils.noarch 0.3.3-7.fc25                         
  python-ncclient.noarch 0.4.7-2.fc25                       python-neutron.noarch 1:9.4.0-1.el7                         
  python-neutron-lib.noarch 0.4.0-1.el7                     python-openvswitch.noarch 1:2.6.1-4.1.git20161206.el7       
  python-ryu-common.noarch 4.9-2.el7                        python-webtest.noarch 2.0.23-1.fc25                         
  python-werkzeug.noarch 0.11.10-2.fc25                     python2-designateclient.noarch 2.3.0-1.el7                  
  python2-gevent.x86_64 1.1.2-2.el7                         python2-pecan.noarch 1.1.2-1.fc25                           
  python2-ryu.noarch 4.9-2.el7                              python2-tinyrpc.noarch 0.5-4.20170523git1f38ac.el7          
  python2-waitress.noarch 0.9.0-4.fc25                      python2-zmq.x86_64 15.3.0-2.fc25                            
  zeromq.x86_64 4.1.4-5.fc25                               

Complete!   #看到这个才是安装成功啦。

 

5.配置服务组件

注意:默认配置文件在各发行版本中可能不同。你可能需要添加这些部分,选项而不是修改已经存在的部分和选项。另外,在配置片段中的省略号(...)表示默认的配置选项你应该保留。

vxlan。

 配置参考:https://docs.openstack.org/newton/install-guide-rdo/neutron-controller-install-option2.html

 1.先备份配置文件,再编辑文件/etc/neutron/neutron.conf

[root@controller ~]# cp /etc/neutron/neutron.conf /etc/neutron/neutron.conf.old

#修改配置文件

[root@controller ~]#cp /etc/neutron/neutron.conf /etc/neutron/neutron.conf.old

[root@controller ~]# vi /etc/neutron/neutron.conf

[DEFAULT]
core_plugin = ml2
service_plugins = router
allow_overlapping_ips = True
transport_url = rabbit://openstack:hotdoor@controller
auth_strategy = keystone
notify_nova_on_port_status_changes = True
notify_nova_on_port_data_changes = True
[agent]
[cors]
[cors.subdomain]
[database]
connection = mysql+pymysql://neutron:hotdoor@controller/neutron
[keystone_authtoken]
auth_uri = http://controller:5000
auth_url = http://controller:35357
memcached_servers = controller:11211
auth_type = password
project_domain_name = Default
user_domain_name = Default
project_name = service
username = neutron
password = hotdoor#899
[matchmaker_redis]
[nova]
auth_url = http://controller:35357
auth_type = password
project_domain_name = Default
user_domain_name = Default
region_name = RegionOne
project_name = service
username = nova
password = hotdoor
[oslo_concurrency]
lock_path = /var/lib/neutron/tmp
[oslo_messaging_amqp]
[oslo_messaging_notifications]
[oslo_messaging_rabbit]
[oslo_messaging_zmq]
[oslo_middleware]
[oslo_policy]
[qos]
[quotas]
[ssl]

2、Configure the Linux bridge agent

[root@controller ~]# cp /etc/neutron/plugins/ml2/ml2_conf.ini /etc/neutron/plugins/ml2/ml2_conf.ini.old

[root@controller ~]# vi /etc/neutron/plugins/ml2/ml2_conf.ini

[DEFAULT]
[ml2]
type_drivers = flat,vlan,vxlan
tenant_network_types = vxlan
mechanism_drivers = linuxbridge,l2population
extension_drivers = port_security
[ml2_type_flat]
flat_networks = provider
[ml2_type_geneve]
[ml2_type_gre]
[ml2_type_vlan]
[ml2_type_vxlan]
vni_ranges = 1:1000
[securitygroup]
enable_ipset = True

3、Configure the Linux bridge agent

注释:Linuxbridge代理为实例建立layer-2虚拟网络并且处理安全组规则

修改配置文件:/etc/neutron/plugins/ml2/linuxbridge_agent.ini

在``[linux_bridge]``部分,将公共虚拟网络和公共物理网络接口对应起来:

[linux_bridge]
physical_interface_mappings = provider:PROVIDER_INTERFACE_NAME

将``PUBLIC_INTERFACE_NAME`` 替换为底层的物理公共网络接口。请查看:ref:environment-networking

简单的理解就是一个提供上网的物理网卡

[root@controller ~]# ifconfig

#提供上网的物理网卡,我们这里用eno1

eno1: flags=4163<UP,BROADCAST,RUNNING,MULTICAST>  mtu 1500
        inet 10.0.100.210  netmask 255.255.255.0  broadcast 10.0.100.255
        inet6 fe80::55d6:7864:68db:e9e5  prefixlen 64  scopeid 0x20<link>
        ether 2c:59:e5:47:a8:c8  txqueuelen 1000  (Ethernet)
        RX packets 13005  bytes 9435494 (8.9 MiB)
        RX errors 0  dropped 0  overruns 0  frame 0
        TX packets 4556  bytes 448987 (438.4 KiB)
        TX errors 0  dropped 0 overruns 0  carrier 0  collisions 0
        device memory 0xf5e00000-f5efffff  

eno2: flags=4163<UP,BROADCAST,RUNNING,MULTICAST>  mtu 1500
        inet 10.0.0.17  netmask 255.255.255.0  broadcast 10.0.0.255
        inet6 fe80::8ce1:1156:c667:b371  prefixlen 64  scopeid 0x20<link>
        ether 2c:59:e5:47:a8:c9  txqueuelen 1000  (Ethernet)
        RX packets 4870  bytes 463595 (452.7 KiB)
        RX errors 0  dropped 0  overruns 0  frame 0
        TX packets 10  bytes 716 (716.0 B)
        TX errors 0  dropped 0 overruns 0  carrier 0  collisions 0
        device memory 0xf5c00000-f5cfffff

[root@controller ml2]# cp /etc/neutron/plugins/ml2/linuxbridge_agent.ini /etc/neutron/plugins/ml2/linuxbridge_agent.ini.old
[root@controller ml2]# vi /etc/neutron/plugins/ml2/linuxbridge_agent.ini

[DEFAULT]
[agent]
[linux_bridge]
physical_interface_mappings = provider:eno1  #eno1 配能上外网的网段
[securitygroup]
enable_security_group = True
firewall_driver = neutron.agent.linux.iptables_firewall.IptablesFirewallDriver
[vxlan]
enable_vxlan = True
local_ip = 10.0.0.17  #配内网网段
l2_population = True

4、Configure the layer-3 agent

[root@controller ~]# cp /etc/neutron/l3_agent.ini /etc/neutron/l3_agent.ini.old
[root@controller ~]# vi /etc/neutron/l3_agent.ini

[DEFAULT]
interface_driver = neutron.agent.linux.interface.BridgeInterfaceDriver
[AGENT]

5、Configure the DHCP agent

[root@controller ~]# vi /etc/neutron/l3_agent.ini /etc/neutron/l3_agent.ini.old

[root@controller ~]# vi /etc/neutron/l3_agent.ini

[DEFAULT]
nterface_driver = neutron.agent.linux.interface.BridgeInterfaceDriver
dhcp_driver = neutron.agent.linux.dhcp.Dnsmasq
enable_isolated_metadata = True
[AGENT]

#配置完成,返回到控制节点页面

6、Configure the metadata agent

[root@controller ~]# cp /etc/neutron/metadata_agent.ini /etc/neutron/metadata_agent.ini.old
[root@controller ~]# vi /etc/neutron/metadata_agent.ini

[DEFAULT]
nova_metadata_ip = controller
metadata_proxy_shared_secret = METADATA_SECRET
[AGENT]
[cache]

7、Configure the Compute service to use the Networking service

#配置完成过滤看一下:  cat /etc/nova/nova.conf|grep -v "^#"|grep -v "^$"

vi /etc/nova/nova.conf

[DEFAULT]
enabled_apis = osapi_compute,metadata
transport_url = rabbit://openstack:hotdoor@controller
auth_strategy = keystone
my_ip = 10.0.0.17
use_neutron = True 
firewall_driver = nova.virt.firewall.NoopFirewallDriver
[api_database]
connection = mysql+pymysql://nova:hotdoor@controller/nova_api
[barbican]
[cache]
[cells]
[cinder]
[cloudpipe]
[conductor]
[cors]
[cors.subdomain]
[crypto]
[database]
connection = mysql+pymysql://nova:hotdoor@controller/nova
[ephemeral_storage_encryption]
[glance]
api_servers = http://controller:9292
[guestfs]
[hyperv]
[image_file_url]
[ironic]
[key_manager]
[keystone_authtoken]
auth_uri = http://controller:5000
auth_url = http://controller:35357
memcached_servers = controller:11211
auth_type = password
project_domain_name = Default
user_domain_name = Default
project_name = service
username = nova
password = hotdoor#899
[libvirt]
virt_type=kvm
[matchmaker_redis]
[metrics]
[mks]
[neutron]
url = http://controller:9696
auth_url = http://controller:35357
auth_type = password
project_domain_name = Default
user_domain_name = Default
region_name = RegionOne
project_name = service
username = neutron
password = hotdoor
service_metadata_proxy = True
metadata_proxy_shared_secret = METADATA_SECRET
[osapi_v21]
[oslo_concurrency]
lock_path = /var/lib/nova/tmp
[oslo_messaging_amqp]
[oslo_messaging_notifications]
[oslo_messaging_rabbit]
[oslo_messaging_zmq]
[oslo_middleware]
[oslo_policy]
[placement]
[placement_database]
[rdp]
[remote_debug]
[serial_console]
[spice]
[ssl]
[trusted_computing]
[upgrade_levels]
[vmware]
[vnc]
enabled = True
vncserver_listen = 0.0.0.0
vncserver_proxyclient_address = 10.0.0.17
novncproxy_base_url = http://controller:6080/vnc_auto.html
[workarounds]
[wsgi]
[xenserver]
[xvp]

完成安装。

8、网络服务初始化脚本需要一个超链接 /etc/neutron/plugin.ini``指向ML2插件配置文件/etc/neutron/plugins/ml2/ml2_conf.ini``。如果超链接不存在,使用下面的命令创建它:

ln -s /etc/neutron/plugins/ml2/ml2_conf.ini /etc/neutron/plugin.ini

9、同步数据库

su -s /bin/sh -c "neutron-db-manage --config-file /etc/neutron/neutron.conf \
  --config-file /etc/neutron/plugins/ml2/ml2_conf.ini upgrade head" neutron

#检查数据是否同步成功,看表中是否有数据

[root@controller ~]# mysql -uneutron -p
Enter password: 
Welcome to the MariaDB monitor.  Commands end with ; or \g.
Your MariaDB connection id is 104
Server version: 10.1.25-MariaDB MariaDB Server

Copyright (c) 2000, 2017, Oracle, MariaDB Corporation Ab and others.

Type 'help;' or '\h' for help. Type '\c' to clear the current input statement.

MariaDB [neutron]> show databases;
+--------------------+
| Database           |
+--------------------+
| information_schema |
| neutron            |
+--------------------+
2 rows in set (0.00 sec)

MariaDB [neutron]> use neutron
Database changed
MariaDB [neutron]> show tables;
+-----------------------------------------+
| Tables_in_neutron                       |
+-----------------------------------------+
| address_scopes                          |
| agents                                  |
| alembic_version                         |
| allowedaddresspairs                     |
| arista_provisioned_nets                 |
| arista_provisioned_tenants              |
| arista_provisioned_vms                  |
| auto_allocated_topologies               |
| bgp_peers                               |
| bgp_speaker_dragent_bindings            |
| bgp_speaker_network_bindings            |
| bgp_speaker_peer_bindings               |
| bgp_speakers                            |
| brocadenetworks                         |
| brocadeports                            |
| cisco_csr_identifier_map                |
| cisco_hosting_devices                   |
| cisco_ml2_apic_contracts                |
| cisco_ml2_apic_host_links               |
| cisco_ml2_apic_names                    |
| cisco_ml2_n1kv_network_bindings         |
| cisco_ml2_n1kv_network_profiles         |
| cisco_ml2_n1kv_policy_profiles          |
| cisco_ml2_n1kv_port_bindings            |
| cisco_ml2_n1kv_profile_bindings         |
| cisco_ml2_n1kv_vlan_allocations         |
| cisco_ml2_n1kv_vxlan_allocations        |
| cisco_ml2_nexus_nve                     |
| cisco_ml2_nexusport_bindings            |
| cisco_port_mappings                     |
| cisco_router_mappings                   |
| consistencyhashes                       |
| default_security_group                  |
| dnsnameservers                          |
| dvr_host_macs                           |
| externalnetworks                        |
| extradhcpopts                           |
| firewall_policies                       |
| firewall_rules                          |
| firewalls                               |
| flavors                                 |
| flavorserviceprofilebindings            |
| floatingipdnses                         |
| floatingips                             |
| ha_router_agent_port_bindings           |
| ha_router_networks                      |
| ha_router_vrid_allocations              |
| healthmonitors                          |
| ikepolicies                             |
| ipallocationpools                       |
| ipallocations                           |
| ipamallocationpools                     |
| ipamallocations                         |
| ipamsubnets                             |
| ipsec_site_connections                  |
| ipsecpeercidrs                          |
| ipsecpolicies                           |
| lsn                                     |
| lsn_port                                |
| maclearningstates                       |
| members                                 |
| meteringlabelrules                      |
| meteringlabels                          |
| ml2_brocadenetworks                     |
| ml2_brocadeports                        |
| ml2_distributed_port_bindings           |
| ml2_flat_allocations                    |
| ml2_geneve_allocations                  |
| ml2_geneve_endpoints                    |
| ml2_gre_allocations                     |
| ml2_gre_endpoints                       |
| ml2_nexus_vxlan_allocations             |
| ml2_nexus_vxlan_mcast_groups            |
| ml2_port_binding_levels                 |
| ml2_port_bindings                       |
| ml2_ucsm_port_profiles                  |
| ml2_vlan_allocations                    |
| ml2_vxlan_allocations                   |
| ml2_vxlan_endpoints                     |
| multi_provider_networks                 |
| networkconnections                      |
| networkdhcpagentbindings                |
| networkdnsdomains                       |
| networkgatewaydevicereferences          |
| networkgatewaydevices                   |
| networkgateways                         |
| networkqueuemappings                    |
| networkrbacs                            |
| networks                                |
| networksecuritybindings                 |
| networksegments                         |
| neutron_nsx_network_mappings            |
| neutron_nsx_port_mappings               |
| neutron_nsx_router_mappings             |
| neutron_nsx_security_group_mappings     |
| nexthops                                |
| nsxv_edge_dhcp_static_bindings          |
| nsxv_edge_vnic_bindings                 |
| nsxv_firewall_rule_bindings             |
| nsxv_internal_edges                     |
| nsxv_internal_networks                  |
| nsxv_port_index_mappings                |
| nsxv_port_vnic_mappings                 |
| nsxv_router_bindings                    |
| nsxv_router_ext_attributes              |
| nsxv_rule_mappings                      |
| nsxv_security_group_section_mappings    |
| nsxv_spoofguard_policy_network_mappings |
| nsxv_tz_network_bindings                |
| nsxv_vdr_dhcp_bindings                  |
| nuage_net_partition_router_mapping      |
| nuage_net_partitions                    |
| nuage_provider_net_bindings             |
| nuage_subnet_l2dom_mapping              |
| poolloadbalanceragentbindings           |
| poolmonitorassociations                 |
| pools                                   |
| poolstatisticss                         |
| portbindingports                        |
| portdnses                               |
| portqueuemappings                       |
| ports                                   |
| portsecuritybindings                    |
| providerresourceassociations            |
| provisioningblocks                      |
| qos_bandwidth_limit_rules               |
| qos_dscp_marking_rules                  |
| qos_minimum_bandwidth_rules             |
| qos_network_policy_bindings             |
| qos_policies                            |
| qos_port_policy_bindings                |
| qospolicyrbacs                          |
| qosqueues                               |
| quotas                                  |
| quotausages                             |
| reservations                            |
| resourcedeltas                          |
| router_extra_attributes                 |
| routerl3agentbindings                   |
| routerports                             |
| routerroutes                            |
| routerrules                             |
| routers                                 |
| securitygroupportbindings               |
| securitygrouprules                      |
| securitygroups                          |
| segmenthostmappings                     |
| serviceprofiles                         |
| sessionpersistences                     |
| standardattributes                      |
| subnet_service_types                    |
| subnetpoolprefixes                      |
| subnetpools                             |
| subnetroutes                            |
| subnets                                 |
| subports                                |
| tags                                    |
| trunks                                  |
| tz_network_bindings                     |
| vcns_router_bindings                    |
| vips                                    |
| vpnservices                             |
+-----------------------------------------+
162 rows in set (0.00 sec)

MariaDB [neutron]> exit

10、Restart the Compute API service

systemctl restart openstack-nova-api.service

11、Start the Networking services and configure them to start when the system boots.
For both networking options:

systemctl enable neutron-server.service \
  neutron-linuxbridge-agent.service neutron-dhcp-agent.service \
  neutron-metadata-agent.service
  
systemctl start neutron-server.service \
  neutron-linuxbridge-agent.service neutron-dhcp-agent.service \
  neutron-metadata-agent.service

12、For networking option 2, also enable and start the layer-3 service:

systemctl enable neutron-l3-agent.service
systemctl start neutron-l3-agent.service

安装完成。

 

十一、Install and configure compute node(安装和配置计算节点)

地址: https://docs.openstack.org/newton/install-guide-rdo/neutron-compute-install.html

 如果openstack所有服务都安装在一台服务器上面,那么就不需要配置这步。因为controller node 与 compute nod的/etc/neutron/neutron.conf配置文件参数一样。所以不需要再配置。

#检查服务状态是否正常

[root@controller ~]# rpm -qa openstack-neutron-linuxbridge ebtables ipset

ebtables-2.0.10-21.fc25.x86_64
ipset-6.29-1.fc25.x86_64
openstack-neutron-linuxbridge-9.4.0-1.el7.noarch


[root@controller ~]# systemctl status openstack-nova-compute.service
● openstack-nova-compute.service - OpenStack Nova Compute Server
   Loaded: loaded (/usr/lib/systemd/system/openstack-nova-compute.service; enabled; vendor preset: disabled)
   Active: activating (start) since Sat 2017-08-19 14:00:11 CST; 2h 3min ago
 Main PID: 1587 (nova-compute)
    Tasks: 1 (limit: 9830)
   CGroup: /system.slice/openstack-nova-compute.service
           └─1587 /usr/bin/python2 /usr/bin/nova-compute

Aug 19 14:00:11 controller systemd[1]: Starting OpenStack Nova Compute Server...

[root@controller ~]# systemctl status neutron-linuxbridge-agent.service
● neutron-linuxbridge-agent.service - OpenStack Neutron Linux Bridge Agent
   Loaded: loaded (/usr/lib/systemd/system/neutron-linuxbridge-agent.service; enabled; vendor preset: disabled)
   Active: active (running) since Sat 2017-08-19 15:48:21 CST; 16min ago
 Main PID: 3510 (neutron-linuxbr)
    Tasks: 1 (limit: 9830)
   CGroup: /system.slice/neutron-linuxbridge-agent.service
           └─3510 /usr/bin/python2 /usr/bin/neutron-linuxbridge-agent --config-file /usr/share/neutron/neutron-dis

Aug 19 15:48:21 controller systemd[1]: Starting OpenStack Neutron Linux Bridge Agent...
Aug 19 15:48:21 controller neutron-enable-bridge-firewall.sh[3502]: net.bridge.bridge-nf-call-arptables = 1
Aug 19 15:48:21 controller neutron-enable-bridge-firewall.sh[3502]: net.bridge.bridge-nf-call-iptables = 1
Aug 19 15:48:21 controller neutron-enable-bridge-firewall.sh[3502]: net.bridge.bridge-nf-call-ip6tables = 1
Aug 19 15:48:21 controller systemd[1]: Started OpenStack Neutron Linux Bridge Agent.
Aug 19 15:48:21 controller neutron-linuxbridge-agent[3510]: Guru meditation now registers SIGUSR1 and SIGUSR2 by d
Aug 19 15:48:22 controller neutron-linuxbridge-agent[3510]: Option "verbose" from group "DEFAULT" is deprecated fo
Aug 19 15:48:22 controller neutron-linuxbridge-agent[3510]: Option "notification_driver" from group "DEFAULT" is d
Aug 19 15:48:22 controller sudo[3533]:  neutron : TTY=unknown ; PWD=/ ; USER=root ; COMMAND=/bin/neutron-rootwrap-
lines 1-17/17 (END)

 

十二、验证操作

[root@controller ~]# . admin-openrc
[root@controller ~]# neutron ext-list
+---------------------------+-----------------------------------------------+
| alias                     | name                                          |
+---------------------------+-----------------------------------------------+
| default-subnetpools       | Default Subnetpools                           |
| network-ip-availability   | Network IP Availability                       |
| network_availability_zone | Network Availability Zone                     |
| auto-allocated-topology   | Auto Allocated Topology Services              |
| ext-gw-mode               | Neutron L3 Configurable external gateway mode |
| binding                   | Port Binding                                  |
| agent                     | agent                                         |
| subnet_allocation         | Subnet Allocation                             |
| l3_agent_scheduler        | L3 Agent Scheduler                            |
| tag                       | Tag support                                   |
| external-net              | Neutron external network                      |
| flavors                   | Neutron Service Flavors                       |
| net-mtu                   | Network MTU                                   |
| availability_zone         | Availability Zone                             |
| quotas                    | Quota management support                      |
| l3-ha                     | HA Router extension                           |
| provider                  | Provider Network                              |
| multi-provider            | Multi Provider Network                        |
| address-scope             | Address scope                                 |
| extraroute                | Neutron Extra Route                           |
| subnet-service-types      | Subnet service types                          |
| standard-attr-timestamp   | Resource timestamps                           |
| service-type              | Neutron Service Type Management               |
| l3-flavors                | Router Flavor Extension                       |
| port-security             | Port Security                                 |
| extra_dhcp_opt            | Neutron Extra DHCP opts                       |
| standard-attr-revisions   | Resource revision numbers                     |
| pagination                | Pagination support                            |
| sorting                   | Sorting support                               |
| security-group            | security-group                                |
| dhcp_agent_scheduler      | DHCP Agent Scheduler                          |
| router_availability_zone  | Router Availability Zone                      |
| rbac-policies             | RBAC Policies                                 |
| standard-attr-description | standard-attr-description                     |
| router                    | Neutron L3 Router                             |
| allowed-address-pairs     | Allowed Address Pairs                         |
| project-id                | project_id field enabled                      |
| dvr                       | Distributed Virtual Router                    |
+---------------------------+-----------------------------------------------+

 

十三、Dashboard

1、Install and configure components

yum install openstack-dashboard -y

2、配置openstack-dashboard

[root@controller ~]# cp /etc/openstack-dashboard/local_settings /etc/openstack-dashboard/local_settings.old
[root@controller ~]# vi /etc/openstack-dashboard/local_settings

[root@controller ~]# cat /etc/openstack-dashboard/local_settings |grep -v "^#"|grep -v "^$"
import os
from django.utils.translation import ugettext_lazy as _
from openstack_dashboard import exceptions
from openstack_dashboard.settings import HORIZON_CONFIG
DEBUG = False
WEBROOT = '/dashboard/'
ALLOWED_HOSTS = ['*', ]
OPENSTACK_API_VERSIONS = {
    "identity": 3,
    "image": 2,
    "volume": 2,
}
OPENSTACK_KEYSTONE_MULTIDOMAIN_SUPPORT = True
OPENSTACK_KEYSTONE_DEFAULT_DOMAIN = "default"
LOCAL_PATH = '/tmp'
SECRET_KEY='1b8a3d57c5bde2cf7506'
SESSION_ENGINE = 'django.contrib.sessions.backends.cache'
CACHES = {
    'default': {
        'BACKEND': 'django.core.cache.backends.memcached.MemcachedCache',
        'LOCATION': 'controller:11211',
    },
}
EMAIL_BACKEND = 'django.core.mail.backends.console.EmailBackend'
OPENSTACK_HOST = "controller"
OPENSTACK_KEYSTONE_URL = "http://%s:5000/v3" % OPENSTACK_HOST
OPENSTACK_KEYSTONE_DEFAULT_ROLE = "user"
OPENSTACK_KEYSTONE_BACKEND = {
    'name': 'native',
    'can_edit_user': True,
    'can_edit_group': True,
    'can_edit_project': True,
    'can_edit_domain': True,
    'can_edit_role': True,
}
OPENSTACK_HYPERVISOR_FEATURES = {
    'can_set_mount_point': False,
    'can_set_password': False,
    'requires_keypair': False,
    'enable_quotas': True
}
OPENSTACK_CINDER_FEATURES = {
    'enable_backup': False,
}
OPENSTACK_NEUTRON_NETWORK = {
    'enable_router': True,
    'enable_quotas': True,
    'enable_ipv6': True,
    'enable_distributed_router': False,
    'enable_ha_router': False,
    'enable_lb': True,
    'enable_firewall': True,
    'enable_vpn': True,
    'enable_fip_topology_check': True,
    # Default dns servers you would like to use when a subnet is
    # created.  This is only a default, users can still choose a different
    # list of dns servers when creating a new subnet.
    # The entries below are examples only, and are not appropriate for
    # real deployments
    # 'default_dns_nameservers': ["8.8.8.8", "8.8.4.4", "208.67.222.222"],
    # The profile_support option is used to detect if an external router can be
    # configured via the dashboard. When using specific plugins the
    # profile_support can be turned on if needed.
    'profile_support': None,
    #'profile_support': 'cisco',
    # Set which provider network types are supported. Only the network types
    # in this list will be available to choose from when creating a network.
    # Network types include local, flat, vlan, gre, vxlan and geneve.
    # 'supported_provider_types': ['*'],
    # You can configure available segmentation ID range per network type
    # in your deployment.
    # 'segmentation_id_range': {
    #     'vlan': [1024, 2048],
    #     'vxlan': [4094, 65536],
    # },
    # You can define additional provider network types here.
    # 'extra_provider_types': {
    #     'awesome_type': {
    #         'display_name': 'Awesome New Type',
    #         'require_physical_network': False,
    #         'require_segmentation_id': True,
    #     }
    # },
    # Set which VNIC types are supported for port binding. Only the VNIC
    # types in this list will be available to choose from when creating a
    # port.
    # VNIC types include 'normal', 'macvtap' and 'direct'.
    # Set to empty list or None to disable VNIC type selection.
    'supported_vnic_types': ['*'],
}
OPENSTACK_HEAT_STACK = {
    'enable_user_pass': True,
}
IMAGE_CUSTOM_PROPERTY_TITLES = {
    "architecture": _("Architecture"),
    "kernel_id": _("Kernel ID"),
    "ramdisk_id": _("Ramdisk ID"),
    "image_state": _("Euca2ools state"),
    "project_id": _("Project ID"),
    "image_type": _("Image Type"),
}
IMAGE_RESERVED_CUSTOM_PROPERTIES = []
API_RESULT_LIMIT = 1000
API_RESULT_PAGE_SIZE = 20
SWIFT_FILE_TRANSFER_CHUNK_SIZE = 512 * 1024
INSTANCE_LOG_LENGTH = 35
DROPDOWN_MAX_ITEMS = 30
TIME_ZONE = "UTC"
POLICY_FILES_PATH = '/etc/openstack-dashboard'
LOGGING = {
    'version': 1,
    # When set to True this will disable all logging except
    # for loggers specified in this configuration dictionary. Note that
    # if nothing is specified here and disable_existing_loggers is True,
    # django.db.backends will still log unless it is disabled explicitly.
    'disable_existing_loggers': False,
    'formatters': {
        'operation': {
            # The format of "%(message)s" is defined by
            # OPERATION_LOG_OPTIONS['format']
            'format': '%(asctime)s %(message)s'
        },
    },
    'handlers': {
        'null': {
            'level': 'DEBUG',
            'class': 'logging.NullHandler',
        },
        'console': {
            # Set the level to "DEBUG" for verbose output logging.
            'level': 'INFO',
            'class': 'logging.StreamHandler',
        },
        'operation': {
            'level': 'INFO',
            'class': 'logging.StreamHandler',
            'formatter': 'operation',
        },
    },
    'loggers': {
        # Logging from django.db.backends is VERY verbose, send to null
        # by default.
        'django.db.backends': {
            'handlers': ['null'],
            'propagate': False,
        },
        'requests': {
            'handlers': ['null'],
            'propagate': False,
        },
        'horizon': {
            'handlers': ['console'],
            'level': 'DEBUG',
            'propagate': False,
        },
        'horizon.operation_log': {
            'handlers': ['operation'],
            'level': 'INFO',
            'propagate': False,
        },
        'openstack_dashboard': {
            'handlers': ['console'],
            'level': 'DEBUG',
            'propagate': False,
        },
        'novaclient': {
            'handlers': ['console'],
            'level': 'DEBUG',
            'propagate': False,
        },
        'cinderclient': {
            'handlers': ['console'],
            'level': 'DEBUG',
            'propagate': False,
        },
        'keystoneclient': {
            'handlers': ['console'],
            'level': 'DEBUG',
            'propagate': False,
        },
        'glanceclient': {
            'handlers': ['console'],
            'level': 'DEBUG',
            'propagate': False,
        },
        'neutronclient': {
            'handlers': ['console'],
            'level': 'DEBUG',
            'propagate': False,
        },
        'heatclient': {
            'handlers': ['console'],
            'level': 'DEBUG',
            'propagate': False,
        },
        'ceilometerclient': {
            'handlers': ['console'],
            'level': 'DEBUG',
            'propagate': False,
        },
        'swiftclient': {
            'handlers': ['console'],
            'level': 'DEBUG',
            'propagate': False,
        },
        'openstack_auth': {
            'handlers': ['console'],
            'level': 'DEBUG',
            'propagate': False,
        },
        'nose.plugins.manager': {
            'handlers': ['console'],
            'level': 'DEBUG',
            'propagate': False,
        },
        'django': {
            'handlers': ['console'],
            'level': 'DEBUG',
            'propagate': False,
        },
        'iso8601': {
            'handlers': ['null'],
            'propagate': False,
        },
        'scss': {
            'handlers': ['null'],
            'propagate': False,
        },
    },
}
SECURITY_GROUP_RULES = {
    'all_tcp': {
        'name': _('All TCP'),
        'ip_protocol': 'tcp',
        'from_port': '1',
        'to_port': '65535',
    },
    'all_udp': {
        'name': _('All UDP'),
        'ip_protocol': 'udp',
        'from_port': '1',
        'to_port': '65535',
    },
    'all_icmp': {
        'name': _('All ICMP'),
        'ip_protocol': 'icmp',
        'from_port': '-1',
        'to_port': '-1',
    },
    'ssh': {
        'name': 'SSH',
        'ip_protocol': 'tcp',
        'from_port': '22',
        'to_port': '22',
    },
    'smtp': {
        'name': 'SMTP',
        'ip_protocol': 'tcp',
        'from_port': '25',
        'to_port': '25',
    },
    'dns': {
        'name': 'DNS',
        'ip_protocol': 'tcp',
        'from_port': '53',
        'to_port': '53',
    },
    'http': {
        'name': 'HTTP',
        'ip_protocol': 'tcp',
        'from_port': '80',
        'to_port': '80',
    },
    'pop3': {
        'name': 'POP3',
        'ip_protocol': 'tcp',
        'from_port': '110',
        'to_port': '110',
    },
    'imap': {
        'name': 'IMAP',
        'ip_protocol': 'tcp',
        'from_port': '143',
        'to_port': '143',
    },
    'ldap': {
        'name': 'LDAP',
        'ip_protocol': 'tcp',
        'from_port': '389',
        'to_port': '389',
    },
    'https': {
        'name': 'HTTPS',
        'ip_protocol': 'tcp',
        'from_port': '443',
        'to_port': '443',
    },
    'smtps': {
        'name': 'SMTPS',
        'ip_protocol': 'tcp',
        'from_port': '465',
        'to_port': '465',
    },
    'imaps': {
        'name': 'IMAPS',
        'ip_protocol': 'tcp',
        'from_port': '993',
        'to_port': '993',
    },
    'pop3s': {
        'name': 'POP3S',
        'ip_protocol': 'tcp',
        'from_port': '995',
        'to_port': '995',
    },
    'ms_sql': {
        'name': 'MS SQL',
        'ip_protocol': 'tcp',
        'from_port': '1433',
        'to_port': '1433',
    },
    'mysql': {
        'name': 'MYSQL',
        'ip_protocol': 'tcp',
        'from_port': '3306',
        'to_port': '3306',
    },
    'rdp': {
        'name': 'RDP',
        'ip_protocol': 'tcp',
        'from_port': '3389',
        'to_port': '3389',
    },
}
REST_API_REQUIRED_SETTINGS = ['OPENSTACK_HYPERVISOR_FEATURES',
                              'LAUNCH_INSTANCE_DEFAULTS',
                              'OPENSTACK_IMAGE_FORMATS']
ALLOWED_PRIVATE_SUBNET_CIDR = {'ipv4': [], 'ipv6': []}

3、启动服务并检查

[root@controller ~]# systemctl restart httpd.service memcached.service

[root@controller ~]# systemctl status httpd.service memcached.service

 4、验证操作

配置域名解析: windows的/etc/hosts文件

10.0.0.17 controller computer ntpserver
10.0.100.214 controller

 访问网站:

http://controller/dashboard

openstack官网如何下载安装包 openstack安装文档_openstack官网如何下载安装包_05

十四、Block Storage service

#创建Cinder数据库

1、创建数据库

mysql -u root -p

CREATE DATABASE cinder;

GRANT ALL PRIVILEGES ON cinder.* TO 'cinder'@'localhost' \
  IDENTIFIED BY '123456';

GRANT ALL PRIVILEGES ON cinder.* TO 'cinder'@'%' \
  IDENTIFIED BY '123456';

2、创建服务器凭证

. admin-openrc

openstack user create --domain default --password-prompt cinder

User Password: #输入密码
Repeat User Password: #输入密码

openstack role add --project service --user cinder admin

3、创建服务实体

openstack service create --name cinder \
  --description "OpenStack Block Storage" volume

openstack service create --name cinderv2 \
  --description "OpenStack Block Storage" volumev2

openstack endpoint create --region RegionOne \
  volume public http://controller:8776/v1/%\(tenant_id\)s

openstack endpoint create --region RegionOne \
  volume internal http://controller:8776/v1/%\(tenant_id\)s

openstack endpoint create --region RegionOne \
  volume admin http://controller:8776/v1/%\(tenant_id\)s

openstack endpoint create --region RegionOne \
  volumev2 public http://controller:8776/v2/%\(tenant_id\)s

openstack endpoint create --region RegionOne \
  volumev2 internal http://controller:8776/v2/%\(tenant_id\)s

openstack endpoint create --region RegionOne \
  volumev2 admin http://controller:8776/v2/%\(tenant_id\)s

4、安装和配置组件

yum install openstack-cinder -y

#备份配置文件

cp /etc/cinder/cinder.conf /etc/cinder/cinder.conf.bak
vi /etc/cinder/cinder.conf

[root@controller ~]# cat /etc/cinder/cinder.conf|grep -v "^#"|grep -v "^$"
[DEFAULT]
transport_url = rabbit://openstack:123456@controller
auth_strategy = keystone
my_ip = 10.0.0.17
[BACKEND]
[BRCD_FABRIC_EXAMPLE]
[CISCO_FABRIC_EXAMPLE]
[COORDINATION]
[FC-ZONE-MANAGER]
[KEY_MANAGER]
[barbican]
[cors]
[cors.subdomain]
[database]
connection = mysql+pymysql://cinder:123456@controller/cinder
[key_manager]
[keystone_authtoken]
auth_uri = http://controller:5000
auth_url = http://controller:35357
memcached_servers = controller:11211
auth_type = password
project_domain_name = Default
user_domain_name = Default
project_name = service
username = cinder
password = 123456
[matchmaker_redis]
[oslo_concurrency]
lock_path = /var/lib/cinder/tmp
[oslo_messaging_amqp]
[oslo_messaging_notifications]
[oslo_messaging_rabbit]
[oslo_messaging_zmq]
[oslo_middleware]
[oslo_policy]
[oslo_reports]
[oslo_versionedobjects]
[ssl]
#Configure Compute to use Block Storage(先不操作这步,后面配ceph存储的时候再配置)
#Edit the /etc/nova/nova.conf file and add the following to it:
[cinder]
os_region_name = RegionOne#Restart the Compute API service:(#这步不需要操作)
systemctl restart openstack-nova-api.service

 #启动服务

systemctl enable openstack-cinder-api.service openstack-cinder-scheduler.service
systemctl start openstack-cinder-api.service openstack-cinder-scheduler.service

十五、Install and configure a storage node

1、使用本地存储,需要配置这步。

#系统自带:

[root@controller ~]# rpm -qa lvm2
lvm2-2.02.167-3.fc25.x86_64

#没有就安装

yum install lvm2 -y

systemctl enable lvm2-lvmetad.service
systemctl start lvm2-lvmetad.service

2、Create the LVM physical volume /dev/sdb:

pvcreate /dev/sdb
#结果
Physical volume "/dev/sdb" successfully created

3、Create the LVM volume group cinder-volumes:(块存储服务在此卷组中创建逻辑卷)

vgcreate cinder-volumes /dev/sdb
#结果
Volume group "cinder-volumes" successfully created

6、只有实例可以访问块存储卷。但是,底层操作系统管理与卷关联的设备。默认情况下,LVM卷扫描工具会扫描/dev包含卷的块存储设备的目录。

如果项目在其卷上使用LVM,则扫描工具会检测这些卷并尝试缓存它们,这可能会导致底层操作系统和项目卷的各种问题。您必须重新配置LVM
以仅扫描包含cinder-volumes卷组的设备。编辑 /etc/lvm/lvm.conf文件并完成以下操作:

devices {
filter  =  [“a / sdb /”,“r /.*/”]

filter  =  [“a / sda /”,“a / sdb /”,“r /.*/”]

filter  =  [“a / sda /”,“r /.*/”]

十六、Install and configure components(直接操作这步,安装在控制节点上面。)

1、安装软件包
yum install openstack-cinder targetcli python-keystone

 2、vi /etc/cinder/cinder.conf

 

 

 配置上面配置文件的时候,遇到一个坑:[lvm] 没有这个标签

openstack官网如何下载安装包 openstack安装文档_IPV6_06