手动部署openstack如何避免各种坑

环境: 宿主机:192.168.10.253 为本环境提供本地yum源和NTP服务 (KVM)
contoller
enp1s0 192.168.10.150 gateway 192.168.10.254 dns1 192.168.0.1
enp2s0 10.10.10.150 gateway 10.10.10.254

compute1
enp1s0 192.168.10.151 gateway 192.168.10.254 dns1 192.168.0.1
enp2s0 10.10.10.151 gateway 10.10.10.254

compute2
enp1s0 192.168.10.152 gateway 192.168.10.254 dns1 192.168.0.1
enp2s0 10.10.10.152 gateway 10.10.10.254

10.10.10.150 controler
10.10.10.151 compute1
10.10.10.152 compute2

在controller和所有compute节点准备yum源
mkdir -p /etc/yum.repos.d/repo.bk
mv /etc/yum.repos.d/*.repo /etc/yum.repos.d/repo.bk

cat > /etc/yum.repos.d/local.repo <<EOF
[AppStream]
name=AppStream
baseurl=http://192.168.10.253/CentosStream/AppStream/
gpgcheck=0
enable=1
[Base]
name=Base
baseurl=http://192.168.10.253/CentosStream/BaseOS/
gpgcheck=0
enable=1
EOF

cat > /etc/yum.repos.d/PowerTools.repo <<EOF
[PowerTools]
name=PowerTools
baseurl=https://mirrors.aliyun.com/centos/8-stream/PowerTools/x86_64/os/?spm=a2c6h.25603864.0.0.ab8064afUdQ3VP
gpgcheck=0
enable=1
EOF

cat >/etc/yum.repos.d/Extras.repo <<EOF
[Extras]
name=Extras
baseurl=https://mirrors.aliyun.com/centos/8-stream/extras/x86_64/os/?spm=a2c6h.25603864.0.0.53dd77bdXGPUz3
gpgcheck=0
enable=1
EOF

yum clean all && yum makecache

2:配置ntp
sed -i “s#pool.*iburst#pool 192.168.10.253 iburst#” /etc/chrony.conf
sed -i “s/leapsectz right/UTC/#&/” /etc/chrony.conf
systemctl enable chronyd --now

验证
chronyc sources

cat >>/etc/hosts <<EOF
10.10.10.150 controller
10.10.10.151 compute1
10.10.10.152 compute2
EOF

systemctl disable firewalld.service --now
sed -i “s#SELINUX=.*#SELINUX=disabled#” /etc/selinux/config
setenforce 0

3: 配置ssh互信
在控制节点执行
ssh-keygen -t rsa -q -P “” -f /root/.ssh/id_rsa
ssh-copy-id -i /root/.ssh/id_rsa.pub controller
ssh-copy-id -i /root/.ssh/id_rsa.pub compute1
ssh-copy-id -i /root/.ssh/id_rsa.pub compute2

4: 配置openstack yum源
yum install centos-release-openstack-ussuri

yum upgrade

重启计算机
init 6

======================================

yum install python3-openstackclient -y

yum install openstack-selinux

5: 安装配置数据库
控制节点

yum install mariadb mariadb-server python3-PyMySQL

vim /etc/my.cnf.d/openstack.cnf

[mysqld]
bind-address = 10.10.10.150

default-storage-engine = innodb
innodb_file_per_table = on
max_connections = 4096
collation-server = utf8_general_ci
character-set-server = utf8

重启数据库并设置为开机自启
systemctl enable mariadb.service --now

mysql_secure_installation
回车
n
y
y
y
y

6: 安装消息队列
控制节点
yum install rabbitmq-server

启动服务并开机自启
systemctl enable rabbitmq-server.service --now

添加openstack用户
rabbitmqctl add_user openstack RABBIT_PASS

配置权限
rabbitmqctl set_permissions openstack “." ".” “.*”

7:安装Memcached
控制节点
yum install memcached python3-memcached

修改配置文件
vim /etc/sysconfig/memcached

PORT=“11211”
USER=“memcached”
MAXCONN=“1024”
CACHESIZE=“64”
OPTIONS=“-l 127.0.0.1,::1,controller”

启动服务并开机自启
systemctl enable memcached.service --now

8: 安装并配置Etcd
yum install etcd

备份并修改配置文件
mv /etc/etcd/etcd.conf /etc/etcd/etcd.conf.bk

vim /etc/etcd/etcd.conf

#[Member]
ETCD_DATA_DIR=“/var/lib/etcd/default.etcd”
ETCD_LISTEN_PEER_URLS=“http://10.10.10.150:2380”
ETCD_LISTEN_CLIENT_URLS=“http://10.10.10.150:2379”
ETCD_NAME=“controller”
#[Clustering]
ETCD_INITIAL_ADVERTISE_PEER_URLS=“http://10.10.10.150:2380”
ETCD_ADVERTISE_CLIENT_URLS=“http://10.10.10.150:2379”
ETCD_INITIAL_CLUSTER=“controller=http://10.10.10.150:2380”
ETCD_INITIAL_CLUSTER_TOKEN=“etcd-cluster-01”
ETCD_INITIAL_CLUSTER_STATE=“new”

启动服务并开机自启
systemctl enable etcd --now

最好打个快照

keystone 的安装和配置

进入数据库
mysql -u root -p

创建keystone数据库
CREATE DATABASE keystone;

给keystone数据库授权
GRANT ALL PRIVILEGES ON keystone.* TO ‘keystone’@‘localhost’
IDENTIFIED BY ‘KEYSTONE_DBPASS’;
GRANT ALL PRIVILEGES ON keystone.* TO ‘keystone’@‘%’
IDENTIFIED BY ‘KEYSTONE_DBPASS’;

exit

安装keystone httpd 和python3-mod_wsgi 包
yum install openstack-keystone httpd python3-mod_wsgi

备份和修改配置文件
mv /etc/keystone/keystone.conf /etc/keystone/keystone.conf.bk

vim /etc/keystone/keystone.conf

[database]
connection = mysql+pymysql://keystone:KEYSTONE_DBPASS@controller/keystone

[token]
provider = fernet

同步到keystone数据库
su -s /bin/sh -c “keystone-manage db_sync” keystone

keystone-manage fernet_setup --keystone-user keystone --keystone-group keystone
keystone-manage credential_setup --keystone-user keystone --keystone-group keystone

启动keystone认证服务
keystone-manage bootstrap --bootstrap-password ADMIN_PASS
–bootstrap-admin-url http://controller:5000/v3/
–bootstrap-internal-url http://controller:5000/v3/
–bootstrap-public-url http://controller:5000/v3/
–bootstrap-region-id RegionOne

配置Apache Http服务

vim /etc/httpd/conf/httpd.conf

ServerName controller

创建链接文件
ln -s /usr/share/keystone/wsgi-keystone.conf /etc/httpd/conf.d/

启动httpd服务并开机自启
systemctl enable httpd.service --now

设置管理员账户并设置环境

export OS_USERNAME=admin
export OS_PASSWORD=ADMIN_PASS
export OS_PROJECT_NAME=admin
export OS_USER_DOMAIN_NAME=Default
export OS_PROJECT_DOMAIN_NAME=Default
export OS_AUTH_URL=http://controller:5000/v3
export OS_IDENTITY_API_VERSION=3

创建域,项目,用户,和角色
openstack domain create --description “An Example Domain” example

openstack project create --domain default
–description “Service Project” service

openstack project create --domain default
–description “Demo Project” myproject

openstack user create --domain default
–password-prompt myuser
输入myuser的密码
123456
123456

openstack role create myrole

把myuser用户作为myrole角色到myproject项目中
openstack role add --project myproject --user myuser myrole

验证
unset OS_AUTH_URL OS_PASSWORD

openstack --os-auth-url http://controller:5000/v3
–os-project-domain-name Default --os-user-domain-name Default
–os-project-name admin --os-username admin token issue

输入admin的密码
ADMIN_PASS

openstack --os-auth-url http://controller:5000/v3
–os-project-domain-name Default --os-user-domain-name Default
–os-project-name myproject --os-username myuser token issue

输入 myuser的密码
123456

创建openstack 客户端环境的脚本

admin用户

vim admin-openrc

export OS_PROJECT_DOMAIN_NAME=Default
export OS_USER_DOMAIN_NAME=Default
export OS_PROJECT_NAME=admin
export OS_USERNAME=admin
export OS_PASSWORD=ADMIN_PASS
export OS_AUTH_URL=http://controller:5000/v3
export OS_IDENTITY_API_VERSION=3
export OS_IMAGE_API_VERSION=2

myuser用户

vim demo-openrc

export OS_PROJECT_DOMAIN_NAME=Default
export OS_USER_DOMAIN_NAME=Default
export OS_PROJECT_NAME=myproject
export OS_USERNAME=myuser
export OS_PASSWORD=123456
export OS_AUTH_URL=http://controller:5000/v3
export OS_IDENTITY_API_VERSION=3
export OS_IMAGE_API_VERSION=2

执行脚本
. admin-openrc

openstack token issue

最好在此做快照

===================================

控制节点

glance 的安装和配置(镜像服务)

进入数据库
mysql -u root -p

创建glance数据库
CREATE DATABASE glance;

给glance数据库授权
GRANT ALL PRIVILEGES ON glance.* TO ‘glance’@‘localhost’
IDENTIFIED BY ‘GLANCE_DBPASS’;
GRANT ALL PRIVILEGES ON glance.* TO ‘glance’@‘%’
IDENTIFIED BY ‘GLANCE_DBPASS’;

exit

. admin-openrc

创建glance用户并设置为其设置密码GLANCE_PASS

openstack user create --domain default --password-prompt glance

输入密码:GLANCE_PASS
GLANCE_PASS

为glance用户赋予admin角色来管理镜像服务
openstack role add --project service --user glance admin

创建glance服务实例
openstack service create --name glance
–description “OpenStack Image” image

创建镜像服务的API接口
openstack endpoint create --region RegionOne
image public http://controller:9292

openstack endpoint create --region RegionOne
image internal http://controller:9292

openstack endpoint create --region RegionOne
image admin http://controller:9292

安装和配置镜像服务组件

yum install openstack-glance

备份及修改配置文件

mv /etc/glance/glance-api.conf /etc/glance/glance-api.conf.bk

vim /etc/glance/glance-api.conf

[database]
connection = mysql+pymysql://glance:GLANCE_DBPASS@controller/glance

[keystone_authtoken]
www_authenticate_uri = http://controller:5000
auth_url = http://controller:5000
memcached_servers = controller:11211
auth_type = password
project_domain_name = Default
user_domain_name = Default
project_name = service
username = glance
password = GLANCE_PASS

[paste_deploy]
flavor = keystone

[glance_store]
stores = file,http
default_store = file
filesystem_store_datadir = /var/lib/glance/images/

同步到glance数据库

su -s /bin/sh -c “glance-manage db_sync” glance

重启glance服务并开机自启
systemctl enable openstack-glance-api.service --now

验证
. admin-openrc

wget http://download.cirros-cloud.net/0.4.0/cirros-0.4.0-x86_64-disk.img

上传镜像
glance image-create --name “cirros”
–file cirros-0.4.0-x86_64-disk.img
–disk-format qcow2 --container-format bare
–visibility=public

确认上传是否成功
glance image-list

在此最好快照

控制节点

安装和配置placement

进入数据库
mysql -u root -p

创建placement数据库
CREATE DATABASE placement;

给placement数据库授权
GRANT ALL PRIVILEGES ON placement.* TO ‘placement’@‘localhost’
IDENTIFIED BY ‘PLACEMENT_DBPASS’;
GRANT ALL PRIVILEGES ON placement.* TO ‘placement’@‘%’
IDENTIFIED BY ‘PLACEMENT_DBPASS’;

exit

执行脚本
. admin-openrc

创建一个placement用户 密码:PLACEMENT_PASS
openstack user create --domain default --password-prompt placement

输入密码:PLACEMENT_PASS
PLACEMENT_PASS

openstack role add --project service --user placement admin

openstack service create --name placement
–description “Placement API” placement

openstack endpoint create --region RegionOne
placement public http://controller:8778

openstack endpoint create --region RegionOne
placement internal http://controller:8778

openstack endpoint create --region RegionOne
placement admin http://controller:8778

安装配置组件
yum install openstack-placement-api

备份及配置配置文件
mv /etc/placement/placement.conf /etc/placement/placement.conf.bk

vim /etc/placement/placement.conf

[placement_database]
connection = mysql+pymysql://placement:PLACEMENT_DBPASS@controller/placement

[api]
auth_strategy = keystone

[keystone_authtoken]
auth_url = http://controller:5000/v3
memcached_servers = controller:11211
auth_type = password
project_domain_name = Default
user_domain_name = Default
project_name = service
username = placement
password = PLACEMENT_PASS

同步到placement数据库
su -s /bin/sh -c “placement-manage db sync” placement

vim /etc/httpd/conf.d/00-placement-api.conf
<VirtualHost *:8778>
#在该节点增加以下部分,对目录授权
<Directory /usr/bin>
= 2.4>
Require all granted

<IfVersion < 2.4>
Order allow,deny
Allow from all


重启httpd服务
systemctl restart httpd

验证

执行脚本
. admin-openrc

placement-status upgrade check

pip3 install osc-placement

openstack --os-placement-api-version 1.2 resource class list --sort-column name

openstack --os-placement-api-version 1.6 trait list --sort-column name

placement-status upgrade check

placement-manage db sync

此处最好快照

安装配置compute
控制节点

进入数据库
mysql -u root -p

创建nova_api, nova,nova_cel10数据库

CREATE DATABASE nova_api;
CREATE DATABASE nova;
CREATE DATABASE nova_cell0;

为数据库授权
GRANT ALL PRIVILEGES ON nova_api.* TO ‘nova’@‘localhost’
IDENTIFIED BY ‘NOVA_DBPASS’;
GRANT ALL PRIVILEGES ON nova_api.* TO ‘nova’@‘%’
IDENTIFIED BY ‘NOVA_DBPASS’;

GRANT ALL PRIVILEGES ON nova.* TO ‘nova’@‘localhost’
IDENTIFIED BY ‘NOVA_DBPASS’;
GRANT ALL PRIVILEGES ON nova.* TO ‘nova’@‘%’
IDENTIFIED BY ‘NOVA_DBPASS’;

GRANT ALL PRIVILEGES ON nova_cell0.* TO ‘nova’@‘localhost’
IDENTIFIED BY ‘NOVA_DBPASS’;
GRANT ALL PRIVILEGES ON nova_cell0.* TO ‘nova’@‘%’
IDENTIFIED BY ‘NOVA_DBPASS’;

exit

执行脚本
. admin-openrc

创建nova用户并设置密码NOVA_PASS
openstack user create --domain default --password-prompt nova

输入密码: NOVA_PASS
NOVA_PASS

为nova用户添加admin角色
openstack role add --project service --user nova admin

创建nova服务实例
openstack service create --name nova
–description “OpenStack Compute” compute

为计算节点创建API
openstack endpoint create --region RegionOne
compute public http://controller:8774/v2.1

openstack endpoint create --region RegionOne
compute internal http://controller:8774/v2.1

openstack endpoint create --region RegionOne
compute admin http://controller:8774/v2.1

安装配置compute服务组件
yum install openstack-nova-api openstack-nova-conductor
openstack-nova-novncproxy openstack-nova-scheduler

备份和修改配置文件
mv /etc/nova/nova.conf /etc/nova/nova.conf.bk

vim /etc/nova/nova.conf

[DEFAULT]
enabled_apis = osapi_compute,metadata
transport_url = rabbit://openstack:RABBIT_PASS@controller:5672/
my_ip = 10.10.10.150

[api_database]
connection = mysql+pymysql://nova:NOVA_DBPASS@controller/nova_api

[database]
connection = mysql+pymysql://nova:NOVA_DBPASS@controller/nova

[api]
auth_strategy = keystone

[keystone_authtoken]
www_authenticate_uri = http://controller:5000/
auth_url = http://controller:5000/
memcached_servers = controller:11211
auth_type = password
project_domain_name = Default
user_domain_name = Default
project_name = service
username = nova
password = NOVA_PASS

[vnc]
enabled = true
server_listen = $my_ip
server_proxyclient_address = $my_ip

[glance]
api_servers = http://controller:9292

[placement]
region_name = RegionOne
project_domain_name = Default
project_name = service
auth_type = password
user_domain_name = Default
auth_url = http://controller:5000/v3
username = placement
password = PLACEMENT_PASS

[scheduler]
discover_hosts_in_cells_interval = 300

同步到nova数据库
su -s /bin/sh -c “nova-manage api_db sync” nova

注册到cell0数据库
su -s /bin/sh -c “nova-manage cell_v2 map_cell0” nova

创建cell1
su -s /bin/sh -c “nova-manage cell_v2 create_cell --name=cell1 --verbose” nova

同步到nova数据库
su -s /bin/sh -c “nova-manage db sync” nova

验证cell0和cell1是否正确
su -s /bin/sh -c “nova-manage cell_v2 list_cells” nova

启动nova服务并设置成开机自启

systemctl enable
openstack-nova-api.service
openstack-nova-scheduler.service
openstack-nova-conductor.service
openstack-nova-novncproxy.service

systemctl start
openstack-nova-api.service
openstack-nova-scheduler.service
openstack-nova-conductor.service
openstack-nova-novncproxy.service

===========
计算节点(本环境两个计算节点 10.10.10.151,10.10.10.152)

安装配置nova服务组件
yum install openstack-nova-compute

备份和修改配置文件(只记录了一份配置文件,另一节点修改ip地址即可)
mv /etc/nova/nova.conf /etc/nova/nova.conf.bk

vim /etc/nova/nova.conf
[DEFAULT]
enabled_apis = osapi_compute,metadata
transport_url = rabbit://openstack:RABBIT_PASS@controller
my_ip = 10.10.10.151

[api]
auth_strategy = keystone

[keystone_authtoken]
www_authenticate_uri = http://controller:5000/
auth_url = http://controller:5000/
memcached_servers = controller:11211
auth_type = password
project_domain_name = Default
user_domain_name = Default
project_name = service
username = nova
password = NOVA_PASS

[vnc]
enabled = true
server_listen = 0.0.0.0
server_proxyclient_address = $my_ip
novncproxy_base_url = http://controller:6080/vnc_auto.html

[glance]
api_servers = http://controller:9292

[oslo_concurrency]
lock_path = /var/lib/nova/tmp

[placement]
region_name = RegionOne
project_domain_name = Default
project_name = service
auth_type = password
user_domain_name = Default
auth_url = http://controller:5000/v3
username = placement
password = PLACEMENT_PASS

[libvirt]
virt_type = qemu
disk_cachemodes=“network=writeback”

启动libvirtd和nova-compute服务并设置为开机自启
systemctl enable libvirtd.service openstack-nova-compute.service
systemctl start libvirtd.service openstack-nova-compute.service

如果启动失败查看防火墙是否开启AMPQ服务或者关闭防火墙及seli

systemctl disable firewalld.service --now

sed -i “s#SELINUX=.*#SELINUX=disabled#” /etc/selinux/config

====
再回到控制节点

执行脚本
. admin-openrc

openstack compute service list --service nova-compute

su -s /bin/sh -c “nova-manage cell_v2 discover_hosts --verbose” nova

验证
. admin-openrc

openstack compute service list

openstack catalog list

openstack image list

#更改/etc/nova/plicy.json的宿主

#chown nova:nova /etc/nova/plicy.json

再此所有节点最好再次快照

安装和配置neutron
控制节点

进入数据库:
mysql -u root -p

创建neutron数据库
CREATE DATABASE neutron;

为neutron数据库授权
GRANT ALL PRIVILEGES ON neutron.* TO ‘neutron’@‘localhost’
IDENTIFIED BY ‘NEUTRON_DBPASS’;
GRANT ALL PRIVILEGES ON neutron.* TO ‘neutron’@‘%’
IDENTIFIED BY ‘NEUTRON_DBPASS’;

exit

执行脚本
. admin-openrc

创建neutron用户并设置密码NEUTRON_PASS
openstack user create --domain default --password-prompt neutron

输入密码:NEUTRON_PASS
NEUTRON_PASS

为neutron用户赋予admin角色
openstack role add --project service --user neutron admin

创建neutron服务实例
openstack service create --name neutron
–description “OpenStack Networking” network

创建网络服务的API
openstack endpoint create --region RegionOne
network public http://controller:9696

openstack endpoint create --region RegionOne
network internal http://controller:9696

openstack endpoint create --region RegionOne
network admin http://controller:9696

本次实验选择Networking Option 2: Self-service networks
安装及配置

yum install openstack-neutron openstack-neutron-ml2
openstack-neutron-linuxbridge ebtables

备份和修改组件的配置文件
mv /etc/neutron/neutron.conf /etc/neutron/neutron.conf.bk

vim /etc/neutron/neutron.conf

[database]
connection = mysql+pymysql://neutron:NEUTRON_DBPASS@controller/neutron

[DEFAULT]
core_plugin = ml2
service_plugins = router
allow_overlapping_ips = true
transport_url = rabbit://openstack:RABBIT_PASS@controller
auth_strategy = keystone
notify_nova_on_port_status_changes = true
notify_nova_on_port_data_changes = true

[keystone_authtoken]
www_authenticate_uri = http://controller:5000
auth_url = http://controller:5000
memcached_servers = controller:11211
auth_type = password
project_domain_name = default
user_domain_name = default
project_name = service
username = neutron
password = NEUTRON_PASS

[nova]
auth_url = http://controller:5000
auth_type = password
project_domain_name = default
user_domain_name = default
region_name = RegionOne
project_name = service
username = nova
password = NOVA_PASS

[oslo_concurrency]
lock_path = /var/lib/neutron/tmp

备份修改ML2plug-in配置文件
mv /etc/neutron/plugins/ml2/ml2_conf.ini /etc/neutron/plugins/ml2/ml2_conf.ini.bk

vim /etc/neutron/plugins/ml2/ml2_conf.ini

[ml2]
type_drivers = flat,vlan,vxlan
tenant_network_types = vxlan
mechanism_drivers = linuxbridge,l2population
extension_drivers = port_security

[ml2_type_flat]
flat_networks = provider

[ml2_type_vxlan]
vni_ranges = 1:1000

[securitygroup]
enable_ipset = true

备份修改ML2\linux_bridge_agent.ini
mv /etc/neutron/plugins/ml2/linuxbridge_agent.ini /etc/neutron/plugins/ml2/linuxbridge_agent.ini.bk

vim /etc/neutron/plugins/ml2/linuxbridge_agent.ini

[linux_bridge]
physical_interface_mappings = provider:enp1s0

[vxlan]
enable_vxlan = true
local_ip = 192.168.10.150
l2_population = true

[securitygroup]
enable_security_group = true
firewall_driver = neutron.agent.linux.iptables_firewall.IptablesFirewallDriver

modprobe br_netfilter

vim /etc/sysctl.conf
net.bridge.bridge-nf-call-iptables=1
net.bridge.bridge-nf-call-ip6tables=1

强制保存退出
sysctl -p

修改L3agent的
配置文件
vim /etc/neutron/l3_agent.ini

[DEFAULT] 在DEFAULT下添加如下内容

interface_driver = linuxbridge

配置DHCP agent
vim /etc/neutron/dhcp_agent.ini

[DEFAULT] 在DEFAULT下添加如下内容
interface_driver = linuxbridge
dhcp_driver = neutron.agent.linux.dhcp.Dnsmasq
enable_isolated_metadata = true

配置元数据代理
vim /etc/neutron/metadata_agent.ini

[DEFAULT]
nova_metadata_host = controller
metadata_proxy_shared_secret = METADATA_SECRET

配置nova config
vim /etc/nova/nova.conf
添加如下内容
[neutron]
auth_url = http://controller:5000
auth_type = password
project_domain_name = default
user_domain_name = default
region_name = RegionOne
project_name = service
username = neutron
password = NEUTRON_PASS
service_metadata_proxy = true
metadata_proxy_shared_secret = METADATA_SECRET

===========
回到两个计算节点(compute1, compute2)
安装和配置网络组件

yum install openstack-neutron-linuxbridge ebtables ipset

备份和修改配置文件 (此处自有一个节点的配置文件,另一复制改ip地址即可)

mv /etc/neutron/neutron.conf /etc/neutron/neutron.conf.bk

vim /etc/neutron/neutron.conf

[DEFAULT]
transport_url = rabbit://openstack:RABBIT_PASS@controller
auth_strategy = keystone

[keystone_authtoken]
www_authenticate_uri = http://controller:5000
auth_url = http://controller:5000
memcached_servers = controller:11211
auth_type = password
project_domain_name = default
user_domain_name = default
project_name = service
username = neutron
password = NEUTRON_PASS

[oslo_concurrency]
lock_path = /var/lib/neutron/tmp

配置linux bridge agent
mv /etc/neutron/plugins/ml2/linuxbridge_agent.ini /etc/neutron/plugins/ml2/linuxbridge_agent.ini.bk

vim /etc/neutron/plugins/ml2/linuxbridge_agent.ini

[linux_bridge]
physical_interface_mappings = provider:enp1s0

[vxlan]
enable_vxlan = true
local_ip = 192.168.10.151
l2_population = true

[securitygroup]
enable_security_group = true
firewall_driver = neutron.agent.linux.iptables_firewall.IptablesFirewallDriver

modprobe br_netfilter

vim /etc/sysctl.conf

net.bridge.bridge-nf-call-iptables=1
net.bridge.bridge-nf-call-ip6tables=1

强制保存退出
sysctl -p

修改nova配置文件

vim /etc/nova/nova.conf

添加如下内容
[neutron]
auth_url = http://controller:5000
auth_type = password
project_domain_name = default
user_domain_name = default
region_name = RegionOne
project_name = service
username = neutron
password = NEUTRON_PASS

重启nova服务
systemctl restart openstack-nova-compute.service

启动linux bridge 服务并开机自启
systemctl enable neutron-linuxbridge-agent.service --now

======
控制节点安装确认
添加sudoer权限
修改vim /etc/neutron/neutron.conf文件,修改以下内容
[privsep]
user = neutron
helper_command = sudo privsep-helper

修改vim /etc/sudoers.d/neutron文件,添加以下内容后强制保存退出
neutron ALL = (root) NOPASSWD: ALL

网络服务初始化脚本需要一个软链接指向/etc/neutron/plugins/ml2/ml2_conf.ini文件,创建软链接
ln -s /etc/neutron/plugins/ml2/ml2_conf.ini /etc/neutron/plugin.ini

su -s /bin/sh -c “neutron-db-manage --config-file /etc/neutron/neutron.conf --config-file /etc/neutron/plugins/ml2/ml2_conf.ini upgrade head” neutron

systemctl restart openstack-nova-api

systemctl enable neutron-server.service
neutron-linuxbridge-agent.service neutron-dhcp-agent.service
neutron-metadata-agent.service
systemctl start neutron-server.service
neutron-linuxbridge-agent.service neutron-dhcp-agent.service
neutron-metadata-agent.service
systemctl enable neutron-l3-agent.service --now

执行脚本
. admin-openrc
openstack network agent list

此处快照

安装dashboard
yum install openstack-dashboard

修改 /etc/openstack-dashboard/local_settings 配置文件
OPENSTACK_HOST = “127.0.0.1” 改为
OPENSTACK_HOST = “controller”

#ALLOWED_HOSTS = [‘horizon.example.com’, ‘localhost’] 改为
ALLOWED_HOSTS = [‘*’]

SESSION_ENGINE = ‘django.contrib.sessions.backends.cache’

CACHES = {
‘default’: {
‘BACKEND’: ‘django.core.cache.backends.memcached.MemcachedCache’,
‘LOCATION’: ‘controller:11211’,
}
}

OPENSTACK_KEYSTONE_URL = “http://%s:5000/v3” % OPENSTACK_HOST
OPENSTACK_KEYSTONE_DEFAULT_DOMAIN = “Default”
OPENSTACK_KEYSTONE_DEFAULT_ROLE = “user”
OPENSTACK_KEYSTONE_MULTIDOMAIN_SUPPORT = True
TIME_ZONE = "Asia/Shanghai "
OPENSTACK_API_VERSIONS = {
“identity”: 3,
“image”: 2,
“volume”: 3,
}

修改/etc/httpd/conf.d/openstack-dashboard.conf配置文件
添加
WSGIApplicationGroup %{GLOBAL}

编辑以下文件,找到WEBROOT = ‘/’ 修改为WEBROOT = ‘/dashboard’
vim /usr/share/openstack-dashboard/openstack_dashboard/defaults.py
vim /usr/share/openstack-dashboard/openstack_dashboard/test/settings.py

重启memecache和httpd服务

systemctl restart httpd.service memcached.service

登录
http://10.10.10.150/dashboard
测试

评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值