使用Ansible 时不可少的时hosts 文件,
每次运行都会用到。但是这个hosts文件经常成为项目难以管理的问题源头。因为需要定义太底层的hosts 文件。实际项目比这个要复杂很多,如果我们能通过面向对象的原理来管理hosts 文件,这样生活就会简单和可控制的多。
经过2年多的实际的项目部署测试和验证,终于摸索出一套可行的办法,现在分享给大家试试。
基本思路是: 1.全局一个input.yml 配置文件入口, 配置文件里冒号:右边的是可以更改的,冒号左边的是不可以更改的。
不同的部署项目,基于相同格式的input.yml风格,采用git 源代码版本管理工具,可以方便的在不同的项目之间或开发和测试部署环境之间切换.
main.yml 内容如下:
- name: hadoop-cluster playbook include many books.
hosts: localhost
gather_facts: False
# become: yes
# become_method: sudo
vars:
projectinfo: “{{ lookup(‘file’,‘input.yml’) | from_yaml }}”
vm_host_list: []
domain_group_dict: {}
pre_tasks:
- set_fact: task_startup_timestamp="{{lookup(‘pipe’,‘date “+%Y-%m-%d %H:%M:%S”’)}}"
- name: "这个是在每个任务执行之前会执行的任务."
shell: echo "任务开始...,检查依赖的文件是否存在."; ./before-run.sh;
# - name: “检查本地项目文件夹里的文件是否存在”
# shell: ./check-file-exist-status.sh
# register: files_status
#
# - name: “if stdout check failed,interrupt execution”
# fail: msg=“出错了,有文件的链接失效,文件不存在”
# when: ‘“does not exist” in files_status.stdout’
- name: "检查role依赖是否正常,版本是否正确" #todo
shell: ./check-role-dependency.sh
register: role_dependency_status
- name: "role依赖缺失"
fail: msg="role依赖存在问题"
when: '"role does not exist" in role_dependency_status.stdout'
- name: "set short hand hostdict"
set_fact: projectroot="{{projectinfo['project_root']}}"
- name: "set short hand vars"
set_fact: commonsetting="{{projectroot['common']}}"
- name: "set short hand vars"
set_fact: hostdict="{{projectroot['all_hosts']}}"
- name: "set hostconfig short hand vars"
set_fact: hostconfig="{{projectroot['host_config']}}"
- name: "set hostconfig short hand vars"
set_fact: hadoopconfig="{{projectroot['host_config']['hadoop_config']}}"
- name: "set hostconfig short hand vars"
set_fact: dnsconfig="{{projectroot['host_config']['dns_config']}}"
- name: "vcenterconfig"
set_fact: vcenterconfig="{{projectroot['vsphere_platform']['vmware_esxi']}}"
- name: "set fact"
set_fact: virtualbox_template_name="{{projectroot['host_config']['vagrant_config']['virtualbox_template_name']}}"
- name: "set fact"
set_fact: vm_bridge_nic_name="eth1"
- name: "批量合并列表合并对象"
set_fact: vm_host_list="{{ vm_host_list }} + {{ hostdict[item] }}"
with_items: "{{hostdict.keys()}}"
when: hostdict[item] is defined and hostdict[item][0].ismaster == true
- name: "生成临时group-domain-ip映射表文本文件/tmp/group_domain_ip.txt"
template: src=templates/group_domain_ip_user_password.txt.j2 dest=/tmp/group_domain_ip_user_password.txt
- name: "把/tmp/group_domain_ip_user_password.txt内容放到注册变量里"
shell: cat /tmp/group_domain_ip_user_password.txt
register: group_domain_ip_user_password
#注意密码和用户名不能包含:和逗号,否则就出错了,因为分割符号是,:无法正确分割..
#hadoop-namenode-hosts:hadoop-namenode1.ascs.tech:10.20.2.1:centos:YOUR_PASSWORD,hadoop-namenode-hosts:hadoop-namenode2.ascs.tech:10.20.2.2:centos:YOUR_PASSWORD,hadoop-....
- set_fact: group_domain_ip_user_password_list={{ group_domain_ip_user_password.stdout.split(',') }}
- add_host:
hostname: "{{item.split(':')[1]}}"
groups: "{{item.split(':')[0]}}"
ansible_host: "{{item.split(':')[2]}}"
# ansible_port: 22
ansible_user: "{{item.split(':')[3]}}"
ansible_ssh_pass: "{{item.split(':')[4]}}"
with_items: "{{group_domain_ip_user_password_list}}"
##特别注意,这里都是root 用户,hadoop 用户还没有创建.
核心关键文件: group_domain_ip_user_password.txt.j2 内容如下
happy:templates happy$ more group_domain_ip_user_password.txt.j2
{% set domain_group_dict={} %}
{%- for key,dict_item in hostdict.items() -%}
{%- for temphost in hostdict[key] -%}
{{key}}:{{temphost.name}}:{{temphost.ip}}:{{temphost.username}}:{{temphost.password}}{%- if not loop.last -%},{%- endif -%}
{%- endfor -%}{%- if not loop.last -%},{%- endif -%}
{%- endfor -%}
配置文件格式和内容如下: input.yml
— #config file version-1.1.0 2018-08-22
project_root: #字典开头的空2格,列表开头的子项空2个空格.
project_info:
project_descripton: “Hadoop集群离线自动化部署”
version: “1.0”
source_code: “”
created_date: “2017-02-01”
author_list:
- name: “”
phone: “”
email: “”
weixin: “”
QQ: “”
vsphere_platform:
virtualbox:
vagrant_offline_install_file: "vagrant_2.0.2_x86_64.rpm"
virtualbox_offline_install_file: "VirtualBox-5.2-5.2.6_120293_el7-1.x86_64.rpm"
vagrant_box_name: "centos1708-kernel4.4.116-docker-17.12.0-jre9-ce-go1.9"
vmware_esxi:
vcenterhostname: "192.168.11.10" #vcenter.example.com 如果域名没有解析,在执行机器上设置hosts也可以
vcenterusername: "administrator@vsphere.local"
vcenterpassword: ""
datacenter: "hewutong"
default_datastore: "cw_m4_sas_datastore" #"cw_m4_pcie_datastore2 cw_m4_sas_datastore"
template: "centos1611_docker_jdk8_template"
virtual_machine_template_disk_size_in_gb: 30
resource_pool: "hadoopcluster"
folder: "/vm"
dnsserver1: "10.20.1.1" #这个是create-dns-record.yml 里面要访问到的IP,也是dns-host[0].ip
dnsserver2: "114.114.114.114"
state: "poweredon"
esxi_nic_network:
vlan: "VM Network" #"192.100.x.x"
gateway: "10.20.0.1" # sudo route add -net 11.23.3.0 -netmask 255.255.255.128 11.23.3.1
netmask: "255.255.0.0"
dnsserver1: "10.20.1.1"
dnsserver2: "114.114.114.114"
datastore:
rabbitmq_datastore: "cw_m4_sas_datastore"
vmware_workstation:
openstack:
huawei_fusion_vsphere:
common:
vm_platform: "vmware-vsphere" #vagrant, vmware-vsphere,huawei-vsphere
period_force_time_sync: "yes"
nic_name: "eens160" #ens160 enp0s3
is_internet_up: false
rabbitmq_datastore: "cw_m4_sas_datastore"
software_root_dir: "/var/server" #这个跟下面的配置是相关的,如果修改了, 下面相关的目录必须跟着改.
citybox_work_network:
vlan: "10.20.0.0_10G-port" #"10.20.x.x"
gateway: "10.20.0.1" #10.20.1.1 to do
netmask: "255.255.0.0"
dnsserver1: "10.20.1.1"
dnsserver2: "114.114.114.114"
network: "10.20.0.0/16"
host_config:
java_config:
#app_home: "/var/server/jre" #jre-8u181-linux-x64.tar.gz
jre_targz: "jre-8u181-linux-x64.tar.gz" #jre-10.0.1_linux-x64_bin.tar.gz #tar -zxvf jre-9.0.4_linux-x64_bin.tar.gz -C jre9 --strip-components=1
jre_foldername: "jre"
jre_version: "1.8"
jdk_targz: "jdk-8u131-linux-x64.tar.gz"
jdk_foldername: "jdk"
jdk_version: "1.8"
go_config:
app_home: "/var/server/go"
app_foldername: "go"
install_filename: "go1.10.linux-amd64.tar.gz"
version: "1.10"
all_hosts:
zookeeper-hosts:
- name: "zka1.example.com"
uuid: "zka1.example.com"
ip: "10.20.2.51"
cpu: "1"
memory: "4096" # 600MB 以上
disk: 30
username: "root"
password: "YOUR_PASSWORD"
datastore: "cw_m4_pcie_datastore1"
host_machine: "192.168.3.11"
ismaster: true
- name: "zka2.example.com"
uuid: "zka2.example.com"
ip: "10.20.2.52"
cpu: "1"
memory: "4096"
disk: 30
username: "root"
password: "YOUR_PASSWORD"
datastore: "cw_m4_pcie_datastore2"
host_machine: "192.168.3.11"
ismaster: true
- name: "zka3.example.com"
uuid: "zka3.example.com"
ip: "10.20.2.53"
cpu: "1"
memory: "4096"
disk: 30
username: "root"
password: "YOUR_PASSWORD"
datastore: "cw_m4_pcie_datastore1"
host_machine: "192.168.3.11"
ismaster: true
- name: "zka4.example.com"
uuid: "zka4.example.com"
ip: "10.20.2.54"
cpu: "1"
memory: "4096"
disk: 30
username: "root"
password: "YOUR_PASSWORD"
datastore: "cw_m4_pcie_datastore2"
host_machine: "192.168.3.11"
ismaster: true
- name: "zka5.example.com"
uuid: "zka5.example.com"
ip: "10.20.2.55"
cpu: "1"
memory: "4096"
disk: 30
username: "root"
password: "YOUR_PASSWORD"
datastore: "cw_m4_pcie_datastore1"
host_machine: "192.168.3.11"
ismaster: true
hadoop-namenode-hosts:
- name: "hadoop-namenode1.example.com"
uuid: "hadoop-namenode1.example.com"
ip: "10.20.2.1"
cpu: "6"
memory: "20480"
disk: "100"
username: "root"
password: "YOUR_PASSWORD"
datastore: "cw_m4_pcie_datastore1"
host_machine: "192.168.3.11"
ismaster: true
- name: "hadoop-namenode2.example.com"
uuid: "hadoop-namenode2.example.com"
ip: "10.20.2.2"
cpu: "6"
memory: "20480"
disk: "100"
username: "root"
password: "YOUR_PASSWORD"
datastore: "cw_m4_pcie_datastore1"
host_machine: "192.168.3.11"
ismaster: true
- name: "hadoop-namenode3.example.com"
uuid: "hadoop-namenode3.example.com"
ip: "10.20.2.3"
cpu: "6"
memory: "20480"
disk: "100"
username: "root"
password: "YOUR_PASSWORD"
datastore: "cw_m4_pcie_datastore2"
host_machine: "192.168.3.11"
ismaster: true
hadoop-datanode-hosts:
- name: "hadoop-datanode1.example.com"
uuid: "hadoop-datanode1.example.com"
ip: "10.20.2.11"
cpu: "6"
memory: "20480"
disk: "200"
username: "root"
password: "YOUR_PASSWORD"
datastore: "cw_m4_pcie_datastore1"
host_machine: "192.168.3.11"
ismaster: true
- name: "hadoop-datanode2.example.com"
uuid: "hadoop-datanode2.example.com"
ip: "10.20.2.12"
cpu: "6"
memory: "20480"
disk: "200"
username: "root"
password: "YOUR_PASSWORD"
datastore: "cw_m4_pcie_datastore2"
host_machine: "192.168.3.11"
ismaster: true
- name: "hadoop-datanode3.example.com"
uuid: "hadoop-datanode3.example.com"
ip: "10.20.2.13"
cpu: "6"
memory: "20480"
disk: "200"
username: "root"
password: "YOUR_PASSWORD"
datastore: "cw_m4_pcie_datastore2"
host_machine: "192.168.3.11"
ismaster: true
- name: "hadoop-datanode4.example.com"
uuid: "hadoop-datanode4.example.com"
ip: "10.20.2.14"
cpu: "6"
memory: "20480"
disk: "800"
username: "root"
password: "YOUR_PASSWORD"
datastore: "cw_m4_sas_datastore"
host_machine: "192.168.3.11"
ismaster: true
- name: "hadoop-datanode5.example.com"
uuid: "hadoop-datanode5.example.com"
ip: "10.20.2.15"
cpu: "6"
memory: "20480"
disk: "800"
username: "root"
password: "YOUR_PASSWORD"
datastore: "cw_m4_sas_datastore"
host_machine: "192.168.3.11"
ismaster: true
- name: "hadoop-datanode6.example.com"
uuid: "hadoop-datanode6.example.com"
ip: "10.20.2.16"
cpu: "6"
memory: "20480"
disk: "800"
username: "root"
password: "YOUR_PASSWORD"
datastore: "cw_m4_sas_datastore"
host_machine: "192.168.3.11"
ismaster: true