离线部署openstack 2024.1 nova

发布于:2025-06-12 ⋅ 阅读:(23) ⋅ 点赞:(0)

控制节点计算服务

离线下载

add-apt-repository cloud-archive:caracal
apt install --download-only nova-api nova-conductor nova-scheduler nova-novncproxy python3-nova

mkdir /controller/nova
mv /var/cache/apt/archives/*.deb /controller/nova/
dpkg -i /controller/nova/*.deb

在一个控制节点操作

CREATE DATABASE nova_api;
CREATE DATABASE nova;
CREATE DATABASE nova_cell0;

GRANT ALL PRIVILEGES ON nova_api.* TO 'nova'@'localhost' IDENTIFIED BY 'NOVA_DBPASS';
GRANT ALL PRIVILEGES ON nova.* TO 'nova'@'localhost' IDENTIFIED BY 'NOVA_DBPASS';
GRANT ALL PRIVILEGES ON nova_cell0.* TO 'nova'@'localhost' IDENTIFIED BY 'NOVA_DBPASS';

GRANT ALL PRIVILEGES ON nova_api.* TO 'nova'@'%' IDENTIFIED BY 'NOVA_DBPASS';
GRANT ALL PRIVILEGES ON nova.* TO 'nova'@'%' IDENTIFIED BY 'NOVA_DBPASS';
GRANT ALL PRIVILEGES ON nova_cell0.* TO 'nova'@'%' IDENTIFIED BY 'NOVA_DBPASS';

FLUSH PRIVILEGES;
source ~/admin-openrc
# 创建nova用户,并分配角色
openstack user create --domain default --password NOVA_PASS nova
openstack role add --project service --user nova admin
# 创建nova服务,并创建API服务端点
openstack service create --name nova --description "OpenStack Compute" compute

openstack endpoint create --region RegionOne compute public http://<VIP>:8774/v2.1
openstack endpoint create --region RegionOne compute internal http://<VIP>:8774/v2.1
openstack endpoint create --region RegionOne compute admin http://<VIP>:8774/v2.1

在三个控制节点操作

  • 主要配置
vim /etc/nova/nova.conf

[DEFAULT]
enabled_apis = osapi_compute,metadata
transport_url = rabbit://nova:NOVA_PASS@ip1,ip2,ip3
# 本机IP,其余节点填对应IP
my_ip = <ip1>
use_neutron = true
firewall_driver = nova.virt.firewall.NoopFirewallDriver
block_device_allocate_retries = 600
resume_guests_state_on_host_boot = true

[api_database]
connection = mysql+pymysql://nova:NOVA_DBPASS@<VIP>/nova_api

[database]
connection = mysql+pymysql://nova:NOVA_DBPASS@<VIP>/nova

[placement]
region_name = RegionOne
project_domain_name = Default
project_name = service
auth_type = password
user_domain_name = Default
auth_url = http://<VIP>:5000/v3
username = placement
password = PLACEMENT_PASS

[keystone_authtoken]
www_authenticate_uri = http://<VIP>:5000/v3
auth_url = http://<VIP>:5000/v3
memcached_servers = ip1:11211,ip2:11211,ip3:11211
auth_type = password
project_domain_name = Default
user_domain_name = Default
project_name = service
username = nova
password = NOVA_PASS
service_token_roles_required = true
service_token_roles = admin
region_name = RegionOne

[vnc]
enabled = true
server_listen = <management_network>
server_proxyclient_address = $my_ip
novncproxy_base_url = http://<VIP>:6080/vnc_auto.html

[glance]
api_servers = http://<VIP>:9292

[oslo_concurrency]
lock_path = /var/lib/nova/tmp

[libvirt]
images_type = rbd
images_rbd_pool = vms
images_rbd_ceph_conf = /etc/ceph/ceph.conf
rbd_user = nova
# 用于libvirt对接,下面说明如何获取
rbd_secret_uuid = <Ceph RBD Secret UUID>
virt_type = kvm
cpu_mode = host-model

# nova对接neutron
[neutron]
auth_url = http://<VIP>:5000
auth_type = password
project_domain_name = Default
user_domain_name = Default
region_name = RegionOne
project_name = service
username = neutron
password = NEUTRON_PASS
service_metadata_proxy = true
# 未设置或不一致时会报错,可以使用openssl rand -hex 16
metadata_proxy_shared_secret = Os#123

# nova对接cinder
[cinder]
os_region_name = RegionOne

使libvirt识别nova密钥

  • 确保文件在节点可用:
  • /etc/ceph/ceph.conf
  • /etc/ceph/client.nova.keyring
chown nova:nova /etc/ceph/ceph.client.nova.keyring
chmod 640 /etc/ceph/ceph.client.nova.keyring
# 生成uuid
uuidgen

# 创建密钥配置文件
vim secret.xml
<secret ephemeral='no' private='no'>
<uuid>上面的uuid</uuid>
  <usage type='ceph'>
    <name>client.nova secret</name>
  </usage>
</secret>
# 按照配置文件导入密钥
virsh secret-define --file secret.xml
# 查看nova用户的密钥
ceph auth get-key client.nova
# 注入密钥
virsh secret-set-value --secret <uuid> --base64 <nova.key>

在一个节点执行

# 初始化API数据库
su -s /bin/sh -c "nova-manage api_db sync" nova
# 创建cell0、cell1数据库
# cell0:只记录调度失败的实例信息
# cell1:计算资源池正常的实例
su -s /bin/sh -c "nova-manage cell_v2 map_cell0" nova
su -s /bin/sh -c "nova-manage cell_v2 create_cell --name=cell1 --verbose" nova
# 同步主数据库
su -s /bin/sh -c "nova-manage db sync" nova

在三个控制节点操作

systemctl start nova-api nova-scheduler nova-conductor nova-novncproxy
systemctl enable nova-api nova-scheduler nova-conductor nova-novncproxy

source ~/admin-openrc
openstack compute service list
openstack catalog list

计算节点计算服务

离线下载

apt-get --download-only install nova-compute libvirt-daemon-system libvirt-clients qemu-kvm qemu-utils

mkdir /compute/nova
mv /var/cache/apt/archives/*.deb /compute/nova/
dpkg -i /compute/nova/*.deb

在计算节点操作

vim /etc/nova/nova.conf

[DEFAULT]
enabled_apis = osapi_compute,metadata
transport_url = rabbit://nova:NOVA_PASS@ip1,ip2,ip3
# 本机IP,其余节点填对应IP
my_ip = ip1
use_neutron = true
firewall_driver = nova.virt.firewall.NoopFirewallDriver
resume_guests_state_on_host_boot = true

[api]
auth_strategy = keystone

[keystone_authtoken]
auth_url = http://<VIP>:5000/v3
memcached_servers = ip1:11211,ip2:11211,ip3:11211
auth_type = password
project_domain_name = Default
user_domain_name = Default
project_name = service
username = nova
password = NOVA_PASS
service_token_roles_required = true
service_token_roles = admin
region_name = RegionOne

[vnc]
enabled = true
server_listen = <management_network>
server_proxyclient_address = $my_ip
novncproxy_base_url = http://<VIP>:6080/vnc_auto.html

[glance]
api_servers = http://<VIP>:9292

[oslo_concurrency]
lock_path = /var/lib/nova/tmp

[placement]
region_name = RegionOne
project_domain_name = Default
project_name = service
auth_type = password
user_domain_name = Default
auth_url = http://<VIP>:5000/v3
username = placement
password = PLACEMENT_PASS

[libvirt]
virt_type = kvm
cpu_mode = host-passthrough
images_type = rbd
images_rbd_pool = vms
images_rbd_ceph_conf = /etc/ceph/ceph.conf
rbd_user = nova
# 用于libvirt对接,与控制节点的nova用户的uuid保持一致
rbd_secret_uuid = <Ceph RBD Secret UUID>
# libvirt/QEMU通过网络,访问后端存储时,使用writeback缓存模式
disk_cachemodes="network=writeback"
# 热迁移选项
live_migration_flag = VIR_MIGRATE_UNDEFINE_SOURCE,VIR_MIGRATE_PEER2PEER,VIR_MIGRATE_LIVE,VIR_MIGRATE_TUNNELLED
# 存储空间回收策略:libvirt会把虚拟机内的TRIM/UNMAP操作传递到底层存储,及时释放底层空间
hw_disk_discard = unmap

[cinder]
os_region_name = RegionOne

[neutron]
auth_url = http://<VIP>:5000
auth_type = password
project_domain_name = Default
user_domain_name = Default
region_name = RegionOne
project_name = service
username = neutron
password = NEUTRON_PASS
service_metadata_proxy = true
# 未设置或不一致时会报错,可以使用openssl rand -hex 16
metadata_proxy_shared_secret = Os#123

使libvirt识别nova密钥

  • 确保文件在节点可用:
  • /etc/ceph/ceph.conf
  • /etc/ceph/client.nova.keyring
chown nova:nova /etc/ceph/ceph.client.nova.keyring
chmod 640 /etc/ceph/ceph.client.nova.keyring
# 创建密钥配置文件
vim secret.xml
<secret ephemeral='no' private='no'>
<uuid>与控制节点的nova的uuid相同</uuid>
  <usage type='ceph'>
    <name>client.nova secret</name>
  </usage>
</secret>
# 按照配置文件导入密钥
virsh secret-define --file secret.xml
# 查看nova用户的密钥
ceph auth get-key client.nova
# 注入密钥
virsh secret-set-value --secret <uuid> --base64 <nova.key>
systemctl start nova-compute && systemctl enable nova-compute

在控制节点操作

source ~/admin-openrc
openstack compute service list

cat /var/log/nova/nova-compute.log