This guide provides step-by-step instructions for setting up an OpenStack 2024.1 (Caracal) cluster on four Ubuntu 22.04 LTS VMs:
- Controller Node
- Compute Node
- Object Storage Node
- Block Storage Node
- Prerequisites
- Network Configuration
- Controller Node Setup
- Compute Node Setup
- Block Storage Node Setup
- Object Storage Node Setup
- Validation
- Troubleshooting
- Controller Node: 4 CPU cores, 8GB RAM, 100GB storage
- Compute Node: 8 CPU cores, 16GB RAM, 100GB storage
- Block Storage Node: 2 CPU cores, 4GB RAM, 1TB storage
- Object Storage Node: 2 CPU cores, 4GB RAM, 1TB storage
- Install Ubuntu 22.04 LTS on all four VMs
- Ensure each VM has at least two network interfaces:
- Management network (for internal communication)
- External network (for external access)
# Update all packages
sudo apt update && sudo apt upgrade -y
# Set hostname on each node
# On Controller Node:
sudo hostnamectl set-hostname controller
# On Compute Node:
sudo hostnamectl set-hostname compute1
# On Block Storage Node:
sudo hostnamectl set-hostname block1
# On Object Storage Node:
sudo hostnamectl set-hostname object1
# Edit /etc/hosts on all nodes to include all nodes
sudo nano /etc/hostsAdd the following entries to /etc/hosts (replace with your actual IP addresses):
# Management Network
10.0.0.11 controller
10.0.0.31 compute1
10.0.0.41 block1
10.0.0.51 object1
- Configure a private network (e.g., 10.0.0.0/24) for internal communication
- Ensure all nodes can ping each other
- Set up a self-service network for tenant virtual machines
- Configure network namespaces and bridges
Example network setup on the Controller node:
# Install network utilities
sudo apt install -y bridge-utils
# Configure the interfaces
sudo nano /etc/netplan/01-netcfg.yamlExample configuration:
network:
version: 2
renderer: networkd
ethernets:
ens3:
dhcp4: no
addresses: [10.0.0.11/24]
gateway4: 10.0.0.1
nameservers:
addresses: [8.8.8.8, 8.8.4.4]
ens4:
dhcp4: no
bridges:
br-ex:
interfaces: [ens4]
addresses: [203.0.113.11/24]
gateway4: 203.0.113.1
nameservers:
addresses: [8.8.8.8, 8.8.4.4]
parameters:
stp: falseApply the configuration:
sudo netplan apply# Add OpenStack Caracal repository
sudo add-apt-repository cloud-archive:2024.1 -y
# Update package lists
sudo apt update
# Install OpenStack client
sudo apt install -y python3-openstackclient# Install MariaDB packages
sudo apt install -y mariadb-server python3-pymysql
# Configure MariaDB
sudo nano /etc/mysql/mariadb.conf.d/99-openstack.cnfAdd the following content:
[mysqld]
bind-address = 10.0.0.11
default-storage-engine = innodb
innodb_file_per_table = on
max_connections = 4096
collation-server = utf8_general_ci
character-set-server = utf8
Restart the service:
sudo systemctl restart mariadb
# Secure the database installation
sudo mysql_secure_installation# Install RabbitMQ package
sudo apt install -y rabbitmq-server
# Add OpenStack user
sudo rabbitmqctl add_user openstack RABBIT_PASS
sudo rabbitmqctl set_permissions openstack ".*" ".*" ".*"# Install Memcached packages
sudo apt install -y memcached python3-memcache
# Configure Memcached
sudo nano /etc/memcached.confChange the line -l 127.0.0.1 to -l 10.0.0.11
Restart the service:
sudo systemctl restart memcached# Install etcd package
sudo apt install -y etcd
# Configure etcd
sudo nano /etc/default/etcdAdd the following content:
ETCD_NAME="controller"
ETCD_DATA_DIR="/var/lib/etcd"
ETCD_INITIAL_CLUSTER_STATE="new"
ETCD_INITIAL_CLUSTER_TOKEN="etcd-cluster-01"
ETCD_INITIAL_CLUSTER="controller=http://10.0.0.11:2380"
ETCD_INITIAL_ADVERTISE_PEER_URLS="http://10.0.0.11:2380"
ETCD_ADVERTISE_CLIENT_URLS="http://10.0.0.11:2379"
ETCD_LISTEN_PEER_URLS="http://0.0.0.0:2380"
ETCD_LISTEN_CLIENT_URLS="http://10.0.0.11:2379"
Enable and restart etcd service:
sudo systemctl enable etcd
sudo systemctl restart etcd# Create keystone database
sudo mysql -u root -pRun the following SQL commands:
CREATE DATABASE keystone;
GRANT ALL PRIVILEGES ON keystone.* TO 'keystone'@'localhost' IDENTIFIED BY 'KEYSTONE_DBPASS';
GRANT ALL PRIVILEGES ON keystone.* TO 'keystone'@'%' IDENTIFIED BY 'KEYSTONE_DBPASS';
EXIT;Install and configure Keystone:
# Install keystone packages
sudo apt install -y keystone
# Configure keystone
sudo nano /etc/keystone/keystone.confUpdate the following sections:
[database]
connection = mysql+pymysql://keystone:KEYSTONE_DBPASS@controller/keystone
[token]
provider = fernetPopulate the Identity service database:
sudo su -s /bin/sh -c "keystone-manage db_sync" keystone
# Initialize Fernet key repositories
sudo keystone-manage fernet_setup --keystone-user keystone --keystone-group keystone
sudo keystone-manage credential_setup --keystone-user keystone --keystone-group keystone
# Bootstrap the Identity service
sudo keystone-manage bootstrap --bootstrap-password ADMIN_PASS \
--bootstrap-admin-url http://controller:5000/v3/ \
--bootstrap-internal-url http://controller:5000/v3/ \
--bootstrap-public-url http://controller:5000/v3/ \
--bootstrap-region-id RegionOneConfigure Apache:
sudo nano /etc/apache2/apache2.confAdd the following line:
ServerName controller
Restart Apache:
sudo systemctl restart apache2Create OpenStack client environment scripts:
# Create admin-openrc file
cat > ~/admin-openrc << EOF
export OS_PROJECT_DOMAIN_NAME=Default
export OS_USER_DOMAIN_NAME=Default
export OS_PROJECT_NAME=admin
export OS_USERNAME=admin
export OS_PASSWORD=ADMIN_PASS
export OS_AUTH_URL=http://controller:5000/v3
export OS_IDENTITY_API_VERSION=3
export OS_IMAGE_API_VERSION=2
EOF
# Source the admin credentials
source ~/admin-openrc# Create glance database
sudo mysql -u root -pRun the following SQL commands:
CREATE DATABASE glance;
GRANT ALL PRIVILEGES ON glance.* TO 'glance'@'localhost' IDENTIFIED BY 'GLANCE_DBPASS';
GRANT ALL PRIVILEGES ON glance.* TO 'glance'@'%' IDENTIFIED BY 'GLANCE_DBPASS';
EXIT;Create service credentials:
# Source the admin credentials
source ~/admin-openrc
# Create the glance user
openstack user create --domain default --password GLANCE_PASS glance
# Add the admin role to the glance user and service project
openstack role add --project service --user glance admin
# Create the glance service entity
openstack service create --name glance --description "OpenStack Image" image
# Create the Image service API endpoints
openstack endpoint create --region RegionOne image public http://controller:9292
openstack endpoint create --region RegionOne image internal http://controller:9292
openstack endpoint create --region RegionOne image admin http://controller:9292Install and configure Glance:
# Install glance packages
sudo apt install -y glance
# Configure glance-api.conf
sudo nano /etc/glance/glance-api.confUpdate the following sections:
[database]
connection = mysql+pymysql://glance:GLANCE_DBPASS@controller/glance
[keystone_authtoken]
www_authenticate_uri = http://controller:5000
auth_url = http://controller:5000
memcached_servers = controller:11211
auth_type = password
project_domain_name = Default
user_domain_name = Default
project_name = service
username = glance
password = GLANCE_PASS
[paste_deploy]
flavor = keystone
[glance_store]
stores = file,http
default_store = file
filesystem_store_datadir = /var/lib/glance/images/Populate the Image service database:
sudo su -s /bin/sh -c "glance-manage db_sync" glance
# Restart the Image service
sudo systemctl restart glance-api# Create placement database
sudo mysql -u root -pRun the following SQL commands:
CREATE DATABASE placement;
GRANT ALL PRIVILEGES ON placement.* TO 'placement'@'localhost' IDENTIFIED BY 'PLACEMENT_DBPASS';
GRANT ALL PRIVILEGES ON placement.* TO 'placement'@'%' IDENTIFIED BY 'PLACEMENT_DBPASS';
EXIT;Create service credentials:
# Source the admin credentials
source ~/admin-openrc
# Create the placement user
openstack user create --domain default --password PLACEMENT_PASS placement
# Add the admin role to the placement user
openstack role add --project service --user placement admin
# Create the placement service entity
openstack service create --name placement --description "Placement API" placement
# Create the Placement API endpoints
openstack endpoint create --region RegionOne placement public http://controller:8778
openstack endpoint create --region RegionOne placement internal http://controller:8778
openstack endpoint create --region RegionOne placement admin http://controller:8778Install and configure placement components:
# Install placement packages
sudo apt install -y placement-api
# Configure placement.conf
sudo nano /etc/placement/placement.confUpdate the following sections:
[placement_database]
connection = mysql+pymysql://placement:PLACEMENT_DBPASS@controller/placement
[api]
auth_strategy = keystone
[keystone_authtoken]
auth_url = http://controller:5000/v3
memcached_servers = controller:11211
auth_type = password
project_domain_name = Default
user_domain_name = Default
project_name = service
username = placement
password = PLACEMENT_PASSPopulate the placement database:
sudo su -s /bin/sh -c "placement-manage db sync" placementRestart the Apache service:
sudo systemctl restart apache2# Create nova databases
sudo mysql -u root -pRun the following SQL commands:
CREATE DATABASE nova_api;
CREATE DATABASE nova;
CREATE DATABASE nova_cell0;
GRANT ALL PRIVILEGES ON nova_api.* TO 'nova'@'localhost' IDENTIFIED BY 'NOVA_DBPASS';
GRANT ALL PRIVILEGES ON nova_api.* TO 'nova'@'%' IDENTIFIED BY 'NOVA_DBPASS';
GRANT ALL PRIVILEGES ON nova.* TO 'nova'@'localhost' IDENTIFIED BY 'NOVA_DBPASS';
GRANT ALL PRIVILEGES ON nova.* TO 'nova'@'%' IDENTIFIED BY 'NOVA_DBPASS';
GRANT ALL PRIVILEGES ON nova_cell0.* TO 'nova'@'localhost' IDENTIFIED BY 'NOVA_DBPASS';
GRANT ALL PRIVILEGES ON nova_cell0.* TO 'nova'@'%' IDENTIFIED BY 'NOVA_DBPASS';
EXIT;Create service credentials:
# Source the admin credentials
source ~/admin-openrc
# Create the nova user
openstack user create --domain default --password NOVA_PASS nova
# Add the admin role to the nova user
openstack role add --project service --user nova admin
# Create the nova service entity
openstack service create --name nova --description "OpenStack Compute" compute
# Create the Compute service API endpoints
openstack endpoint create --region RegionOne compute public http://controller:8774/v2.1
openstack endpoint create --region RegionOne compute internal http://controller:8774/v2.1
openstack endpoint create --region RegionOne compute admin http://controller:8774/v2.1Install and configure Nova components:
# Install nova packages
sudo apt install -y nova-api nova-conductor nova-novncproxy nova-scheduler
# Configure nova.conf
sudo nano /etc/nova/nova.confUpdate the following sections:
[api_database]
connection = mysql+pymysql://nova:NOVA_DBPASS@controller/nova_api
[database]
connection = mysql+pymysql://nova:NOVA_DBPASS@controller/nova
[DEFAULT]
transport_url = rabbit://openstack:RABBIT_PASS@controller:5672/
my_ip = 10.0.0.11
use_neutron = true
firewall_driver = nova.virt.firewall.NoopFirewallDriver
[api]
auth_strategy = keystone
[keystone_authtoken]
www_authenticate_uri = http://controller:5000/
auth_url = http://controller:5000/
memcached_servers = controller:11211
auth_type = password
project_domain_name = Default
user_domain_name = Default
project_name = service
username = nova
password = NOVA_PASS
[vnc]
enabled = true
server_listen = $my_ip
server_proxyclient_address = $my_ip
[glance]
api_servers = http://controller:9292
[oslo_concurrency]
lock_path = /var/lib/nova/tmp
[placement]
region_name = RegionOne
project_domain_name = Default
project_name = service
auth_type = password
user_domain_name = Default
auth_url = http://controller:5000/v3
username = placement
password = PLACEMENT_PASSPopulate the Compute databases:
# Populate the nova-api database
sudo su -s /bin/sh -c "nova-manage api_db sync" nova
# Register the cell0 database
sudo su -s /bin/sh -c "nova-manage cell_v2 map_cell0" nova
# Create the cell1 cell
sudo su -s /bin/sh -c "nova-manage cell_v2 create_cell --name=cell1 --verbose" nova
# Populate the nova database
sudo su -s /bin/sh -c "nova-manage db sync" nova
# Verify nova cell setup
sudo su -s /bin/sh -c "nova-manage cell_v2 list_cells" nova
# Restart the Compute services
sudo systemctl restart nova-api nova-scheduler nova-conductor nova-novncproxy# Create neutron database
sudo mysql -u root -pRun the following SQL commands:
CREATE DATABASE neutron;
GRANT ALL PRIVILEGES ON neutron.* TO 'neutron'@'localhost' IDENTIFIED BY 'NEUTRON_DBPASS';
GRANT ALL PRIVILEGES ON neutron.* TO 'neutron'@'%' IDENTIFIED BY 'NEUTRON_DBPASS';
EXIT;Create service credentials:
# Source the admin credentials
source ~/admin-openrc
# Create the neutron user
openstack user create --domain default --password NEUTRON_PASS neutron
# Add the admin role to the neutron user
openstack role add --project service --user neutron admin
# Create the neutron service entity
openstack service create --name neutron --description "OpenStack Networking" network
# Create the Networking service API endpoints
openstack endpoint create --region RegionOne network public http://controller:9696
openstack endpoint create --region RegionOne network internal http://controller:9696
openstack endpoint create --region RegionOne network admin http://controller:9696Configure Networking options:
# Install neutron packages
sudo apt install -y neutron-server neutron-plugin-ml2 neutron-linuxbridge-agent neutron-l3-agent neutron-dhcp-agent neutron-metadata-agent
# Configure neutron.conf
sudo nano /etc/neutron/neutron.confUpdate the following sections:
[database]
connection = mysql+pymysql://neutron:NEUTRON_DBPASS@controller/neutron
[DEFAULT]
core_plugin = ml2
service_plugins = router
allow_overlapping_ips = true
transport_url = rabbit://openstack:RABBIT_PASS@controller
auth_strategy = keystone
notify_nova_on_port_status_changes = true
notify_nova_on_port_data_changes = true
[keystone_authtoken]
www_authenticate_uri = http://controller:5000
auth_url = http://controller:5000
memcached_servers = controller:11211
auth_type = password
project_domain_name = default
user_domain_name = default
project_name = service
username = neutron
password = NEUTRON_PASS
[nova]
auth_url = http://controller:5000
auth_type = password
project_domain_name = default
user_domain_name = default
region_name = RegionOne
project_name = service
username = nova
password = NOVA_PASS
[oslo_concurrency]
lock_path = /var/lib/neutron/tmpConfigure the ML2 plugin:
sudo nano /etc/neutron/plugins/ml2/ml2_conf.iniUpdate the following sections:
[ml2]
type_drivers = flat,vlan,vxlan
tenant_network_types = vxlan
mechanism_drivers = linuxbridge,l2population
extension_drivers = port_security
[ml2_type_flat]
flat_networks = provider
[ml2_type_vxlan]
vni_ranges = 1:1000
[securitygroup]
enable_ipset = trueConfigure the Linux bridge agent:
sudo nano /etc/neutron/plugins/ml2/linuxbridge_agent.iniUpdate the following sections (replace PROVIDER_INTERFACE_NAME with your actual provider network interface):
[linux_bridge]
physical_interface_mappings = provider:PROVIDER_INTERFACE_NAME
[vxlan]
enable_vxlan = true
local_ip = 10.0.0.11
l2_population = true
[securitygroup]
enable_security_group = true
firewall_driver = neutron.agent.linux.iptables_firewall.IptablesFirewallDriverConfigure the L3 agent:
sudo nano /etc/neutron/l3_agent.iniUpdate the following section:
[DEFAULT]
interface_driver = linuxbridgeConfigure the DHCP agent:
sudo nano /etc/neutron/dhcp_agent.iniUpdate the following section:
[DEFAULT]
interface_driver = linuxbridge
dhcp_driver = neutron.agent.linux.dhcp.Dnsmasq
enable_isolated_metadata = trueConfigure the metadata agent:
sudo nano /etc/neutron/metadata_agent.iniUpdate the following section:
[DEFAULT]
nova_metadata_host = controller
metadata_proxy_shared_secret = METADATA_SECRETConfigure Compute to use Networking:
sudo nano /etc/nova/nova.confAdd to the following sections:
[neutron]
auth_url = http://controller:5000
auth_type = password
project_domain_name = default
user_domain_name = default
region_name = RegionOne
project_name = service
username = neutron
password = NEUTRON_PASS
service_metadata_proxy = true
metadata_proxy_shared_secret = METADATA_SECRETPopulate the database:
sudo su -s /bin/sh -c "neutron-db-manage --config-file /etc/neutron/neutron.conf --config-file /etc/neutron/plugins/ml2/ml2_conf.ini upgrade head" neutronRestart Nova API service:
sudo systemctl restart nova-apiRestart the Networking services:
sudo systemctl restart neutron-server neutron-linuxbridge-agent neutron-dhcp-agent neutron-metadata-agent neutron-l3-agent# Install dashboard package
sudo apt install -y openstack-dashboard
# Configure dashboard
sudo nano /etc/openstack-dashboard/local_settings.pyUpdate the following settings:
OPENSTACK_HOST = "controller"
CACHES = {
'default': {
'BACKEND': 'django.core.cache.backends.memcached.MemcachedCache',
'LOCATION': 'controller:11211',
},
}
ALLOWED_HOSTS = ['*']
SESSION_ENGINE = 'django.contrib.sessions.backends.cache'
OPENSTACK_KEYSTONE_URL = "http://%s:5000/v3" % OPENSTACK_HOST
OPENSTACK_KEYSTONE_MULTIDOMAIN_SUPPORT = True
OPENSTACK_API_VERSIONS = {
"identity": 3,
"image": 2,
"volume": 3,
}
OPENSTACK_KEYSTONE_DEFAULT_DOMAIN = "Default"
OPENSTACK_KEYSTONE_DEFAULT_ROLE = "member"
TIME_ZONE = "UTC"Restart the web server:
sudo systemctl restart apache2# Install Nova Compute packages
sudo apt install -y nova-compute nova-compute-kvm
# Configure nova.conf
sudo nano /etc/nova/nova.confUpdate the following sections:
[DEFAULT]
transport_url = rabbit://openstack:RABBIT_PASS@controller
my_ip = 10.0.0.31
use_neutron = true
firewall_driver = nova.virt.firewall.NoopFirewallDriver
[api]
auth_strategy = keystone
[keystone_authtoken]
www_authenticate_uri = http://controller:5000/
auth_url = http://controller:5000/
memcached_servers = controller:11211
auth_type = password
project_domain_name = Default
user_domain_name = Default
project_name = service
username = nova
password = NOVA_PASS
[vnc]
enabled = true
server_listen = 0.0.0.0
server_proxyclient_address = $my_ip
novncproxy_base_url = http://controller:6080/vnc_auto.html
[glance]
api_servers = http://controller:9292
[oslo_concurrency]
lock_path = /var/lib/nova/tmp
[placement]
region_name = RegionOne
project_domain_name = Default
project_name = service
auth_type = password
user_domain_name = Default
auth_url = http://controller:5000/v3
username = placement
password = PLACEMENT_PASSFinalize compute installation:
# Determine if your compute node supports hardware acceleration
egrep -c '(vmx|svm)' /proc/cpuinfo
# If the command returns a value of one or greater, your compute node supports hardware acceleration
# If the command returns a value of zero, configure libvirt to use QEMU instead of KVM
sudo nano /etc/nova/nova-compute.confIf needed, update the following:
[libvirt]
virt_type = qemuRestart Nova compute:
sudo systemctl restart nova-compute# Install neutron packages
sudo apt install -y neutron-linuxbridge-agent
# Configure neutron.conf
sudo nano /etc/neutron/neutron.confUpdate the following sections:
[DEFAULT]
transport_url = rabbit://openstack:RABBIT_PASS@controller
auth_strategy = keystone
[keystone_authtoken]
www_authenticate_uri = http://controller:5000
auth_url = http://controller:5000
memcached_servers = controller:11211
auth_type = password
project_domain_name = default
user_domain_name = default
project_name = service
username = neutron
password = NEUTRON_PASS
[oslo_concurrency]
lock_path = /var/lib/neutron/tmpConfigure the Linux bridge agent:
sudo nano /etc/neutron/plugins/ml2/linuxbridge_agent.iniUpdate the following sections (replace PROVIDER_INTERFACE_NAME with your actual provider network interface):
[linux_bridge]
physical_interface_mappings = provider:PROVIDER_INTERFACE_NAME
[vxlan]
enable_vxlan = true
local_ip = 10.0.0.31 # Compute node IP
l2_population = true
[securitygroup]
enable_security_group = true
firewall_driver = neutron.agent.linux.iptables_firewall.IptablesFirewallDriverConfigure Compute to use Networking:
sudo nano /etc/nova/nova.confAdd to the following sections:
[neutron]
auth_url = http://controller:5000
auth_type = password
project_domain_name = default
user_domain_name = default
region_name = RegionOne
project_name = service
username = neutron
password = NEUTRON_PASSRestart the Nova Compute service:
sudo systemctl restart nova-computeRestart the Linux Bridge agent:
sudo systemctl restart neutron-linuxbridge-agentFirst, on the Controller node, you need to create the Cinder database and service credentials:
# On the Controller node
# Create the cinder database
sudo mysql -u root -pRun the following SQL commands:
CREATE DATABASE cinder;
GRANT ALL PRIVILEGES ON cinder.* TO 'cinder'@'localhost' IDENTIFIED BY 'CINDER_DBPASS';
GRANT ALL PRIVILEGES ON cinder.* TO 'cinder'@'%' IDENTIFIED BY 'CINDER_DBPASS';
EXIT;Create the service credentials:
# Source the admin credentials
source ~/admin-openrc
# Create the cinder user
openstack user create --domain default --password CINDER_PASS cinder
# Add the admin role to the cinder user
openstack role add --project service --user cinder admin
# Create the cinderv2 service entity
openstack service create --name cinderv2 --description "OpenStack Block Storage" volumev2
# Create the cinderv3 service entity
openstack service create --name cinderv3 --description "OpenStack Block Storage" volumev3
# Create the Block Storage service API endpoints
openstack endpoint create --region RegionOne volumev2 public http://controller:8776/v2/%\(project_id\)s
openstack endpoint create --region RegionOne volumev2 internal http://controller:8776/v2/%\(project_id\)s
openstack endpoint create --region RegionOne volumev2 admin http://controller:8776/v2/%\(project_id\)s
openstack endpoint create --region RegionOne volumev3 public http://controller:8776/v3/%\(project_id\)s
openstack endpoint create --region RegionOne volumev3 internal http://controller:8776/v3/%\(project_id\)s
openstack endpoint create --region RegionOne volumev3 admin http://controller:8776/v3/%\(project_id\)sInstall and configure Cinder API components on the Controller node:
# Install cinder packages
sudo apt install -y cinder-api cinder-scheduler
# Configure cinder.conf
sudo nano /etc/cinder/cinder.confUpdate the following sections:
[database]
connection = mysql+pymysql://cinder:CINDER_DBPASS@controller/cinder
[DEFAULT]
transport_url = rabbit://openstack:RABBIT_PASS@controller
auth_strategy = keystone
my_ip = 10.0.0.11
enabled_backends = lvm
glance_api_servers = http://controller:9292
[keystone_authtoken]
www_authenticate_uri = http://controller:5000
auth_url = http://controller:5000
memcached_servers = controller:11211
auth_type = password
project_domain_name = default
user_domain_name = default
project_name = service
username = cinder
password = CINDER_PASS
[oslo_concurrency]
lock_path = /var/lib/cinder/tmpPopulate the Block Storage database:
sudo su -s /bin/sh -c "cinder-manage db sync" cinderConfigure Compute to use Block Storage:
sudo nano /etc/nova/nova.confAdd to the [cinder] section:
[cinder]
os_region_name = RegionOneRestart the Compute API service:
sudo systemctl restart nova-apiRestart the Block Storage services:
sudo systemctl restart cinder-scheduler
sudo systemctl restart apache2Now on the Block Storage Node:
# Install Cinder packages
sudo apt install -y lvm2 thin-provisioning-tools
# Create the LVM physical volume
sudo pvcreate /dev/sdb
# Create the LVM volume group
sudo vgcreate cinder-volumes /dev/sdb
# Configure LVM to only scan devices with the cinder-volumes volume group
sudo nano /etc/lvm/lvm.confUpdate the following section:
devices {
filter = [ "a/sdb/", "r/.*/"]
}
Install and configure Cinder components:
# Install cinder packages
sudo apt install -y cinder-volume
# Configure cinder.conf
sudo nano /etc/cinder/cinder.confUpdate the following sections:
[database]
connection = mysql+pymysql://cinder:CINDER_DBPASS@controller/cinder
[DEFAULT]
transport_url = rabbit://openstack:RABBIT_PASS@controller
auth_strategy = keystone
my_ip = 10.0.0.41
enabled_backends = lvm
glance_api_servers = http://controller:9292
[keystone_authtoken]
www_authenticate_uri = http://controller:5000
auth_url = http://controller:5000
memcached_servers = controller:11211
auth_type = password
project_domain_name = default
user_domain_name = default
project_name = service
username = cinder
password = CINDER_PASS
[lvm]
volume_driver = cinder.volume.drivers.lvm.LVMVolumeDriver
volume_group = cinder-volumes
target_protocol = iscsi
target_helper = tgtadmRestart the Block Storage volume service:
sudo systemctl restart cinder-volumeFirst, on the Controller node, you need to create service credentials:
# On the Controller node
# Source the admin credentials
source ~/admin-openrc
# Create the swift user
openstack user create --domain default --password SWIFT_PASS swift
# Add the admin role to the swift user
openstack role add --project service --user swift admin
# Create the swift service entity
openstack service create --name swift --description "OpenStack Object Storage" object-store
# Create the Object Storage service API endpoints
openstack endpoint create --region RegionOne object-store public http://controller:8080/v1/AUTH_%\(project_id\)s
openstack endpoint create --region RegionOne object-store internal http://controller:8080/v1/AUTH_%\(project_id\)s
openstack endpoint create --region RegionOne object-store admin http://controller:8080/v1Install and configure the Swift proxy on the Controller node:
# Install swift packages
sudo apt install -y swift swift-proxy python3-swiftclient python3-keystoneclient python3-keystonemiddleware
# Create the /etc/swift directory
sudo mkdir -p /etc/swift
# Create the proxy-server.conf file
sudo nano /etc/swift/proxy-server.confAdd the following content:
[DEFAULT]
bind_port = 8080
user = swift
swift_dir = /etc/swift
[pipeline:main]
pipeline = catch_errors gatekeeper healthcheck proxy-logging cache container_sync bulk ratelimit authtoken keystoneauth container-quotas account-quotas slo dlo versioned_writes proxy-logging proxy-server
[app:proxy-server]
use = egg:swift#proxy
account_autocreate = True
[filter:keystoneauth]
use = egg:swift#keystoneauth
operator_roles = admin,member
[filter:authtoken]
paste.filter_factory = keystonemiddleware.auth_token:filter_factory
www_authenticate_uri = http://controller:5000
auth_url = http://controller:5000
memcached_servers = controller:11211
auth_type = password
project_domain_id = default
user_domain_id = default
project_name = service
username = swift
password = SWIFT_PASS
delay_auth_decision = True
[filter:cache]
use = egg:swift#memcache
memcache_servers = controller:11211
[filter:catch_errors]
use = egg:swift#catch_errors
[filter:healthcheck]
use = egg:swift#healthcheck
[filter:proxy-logging]
use = egg:swift#proxy_logging
[filter:gatekeeper]
use = egg:swift#gatekeeper
[filter:container_sync]
use = egg:swift#container_sync
[filter:bulk]
use = egg:swift#bulk
[filter:ratelimit]
use = egg:swift#ratelimit
[filter:container-quotas]
use = egg:swift#container_quotas
[filter:account-quotas]
use = egg:swift#account_quotas
[filter:slo]
use = egg:swift#slo
[filter:dlo]
use = egg:swift#dlo
[filter:versioned_writes]
use = egg:swift#versioned_writes
[filter:tempurl]
use = egg:swift#tempurlNow on the Object Storage Node:
# Install Swift packages
sudo apt install -y swift swift-account swift-container swift-object
# Create the directory structure
sudo mkdir -p /srv/node/sdc
# Format the disk
sudo mkfs.xfs /dev/sdc
# Create mount point directory
sudo mkdir -p /srv/node/sdc
# Add the following line to /etc/fstab
echo "/dev/sdc /srv/node/sdc xfs noatime,nodiratime,nobarrier,logbufs=8 0 2" | sudo tee -a /etc/fstab
# Mount the devices
sudo mount /srv/node/sdc
# Change ownership
sudo chown -R swift:swift /srv/node
# Create the recon directory
sudo mkdir -p /var/cache/swift
sudo chown -R root:swift /var/cache/swift
sudo chmod -R 775 /var/cache/swiftConfigure Swift:
# Edit the account-server.conf file
sudo nano /etc/swift/account-server.confUpdate the following sections:
[DEFAULT]
bind_ip = 10.0.0.51
bind_port = 6202
user = swift
swift_dir = /etc/swift
devices = /srv/node
mount_check = true
[pipeline:main]
pipeline = healthcheck recon account-server
[filter:recon]
use = egg:swift#recon
recon_cache_path = /var/cache/swiftConfigure the container server:
# Edit the container-server.conf file
sudo nano /etc/swift/container-server.confUpdate the following sections:
[DEFAULT]
bind_ip = 10.0.0.51
bind_port = 6201
user = swift
swift_dir = /etc/swift
devices = /srv/node
mount_check = true
[pipeline:main]
pipeline = healthcheck recon container-server
[filter:recon]
use = egg:swift#recon
recon_cache_path = /var/cache/swiftConfigure the object server:
# Edit the object-server.conf file
sudo nano /etc/swift/object-server.confUpdate the following sections:
[DEFAULT]
bind_ip = 10.0.0.51
bind_port = 6200
user = swift
swift_dir = /etc/swift
devices = /srv/node
mount_check = true
[pipeline:main]
pipeline = healthcheck recon object-server
[filter:recon]
use = egg:swift#recon
recon_cache_path = /var/cache/swift# Source the admin credentials
source ~/admin-openrc
# List users
openstack user list
# List services
openstack service list
# List endpoints
openstack endpoint list# Source the admin credentials
source ~/admin-openrc
# Download the cirros image
wget http://download.cirros-cloud.net/0.5.2/cirros-0.5.2-x86_64-disk.img
# Upload the image to the Image service
openstack image create "cirros" \
--file cirros-0.5.2-x86_64-disk.img \
--disk-format qcow2 --container-format bare \
--public
# Verify the image was created
openstack image list# Source the admin credentials
source ~/admin-openrc
# List compute services
openstack compute service list
# List endpoints
openstack catalog list
# List images
openstack image list
# List flavors
openstack flavor list
# Create a default flavor if none exists
openstack flavor create --id 0 --vcpus 1 --ram 512 --disk 1 m1.nano
# Verify connectivity to your compute node
openstack compute service list --service nova-compute# Source the admin credentials
source ~/admin-openrc
# List loaded extensions
openstack extension list --network
# List network components
openstack network agent list
# Create a self-service network (as specified in your setup)
# Create a network
openstack network create selfservice
# Create a subnet
openstack subnet create --network selfservice --subnet-range 192.168.1.0/24 --gateway 192.168.1.1 selfservice-subnet
# Create a router
openstack router create router1
# Add the subnet as an interface to the router
openstack router add subnet router1 selfservice-subnet
# Set the router gateway to the external network
# (assuming you've created an external network named 'external')
# openstack router set --external-gateway external router1# Source the admin credentials
source ~/admin-openrc
# List volume services
openstack volume service list
# Create a volume
openstack volume create --size 1 test-volume
# List volumes
openstack volume list# Source the admin credentials
source ~/admin-openrc
# Create a container
openstack container create test-container
# Upload an object to the container
echo "test file" > test-file.txt
openstack object create test-container test-file.txt
# List containers
openstack container list
# List objects in the container
openstack object list test-container- Open a web browser and navigate to
http://controller/horizon - Log in using admin credentials
- Explore the dashboard to verify all components are visible
# Check status of a specific service (e.g., nova-api)
sudo systemctl status nova-api
# View logs for a specific service
sudo journalctl -u nova-apiOpenStack service logs are typically located in /var/log/<service>:
# Check Nova API logs
sudo tail -f /var/log/nova/nova-api.log
# Check Keystone logs
sudo tail -f /var/log/keystone/keystone.log
# Check Glance API logs
sudo tail -f /var/log/glance/glance-api.logCheck network configuration:
# Verify interfaces are up
ip a
# Check routing table
ip route
# Test connectivity between nodes
ping <node_ip>Reset admin password:
# On the controller node
keystone-manage bootstrap --bootstrap-password ADMIN_PASS \
--bootstrap-admin-url http://controller:5000/v3/ \
--bootstrap-internal-url http://controller:5000/v3/ \
--bootstrap-public-url http://controller:5000/v3/ \
--bootstrap-region-id RegionOneCheck database connectivity:
# Connect to MariaDB
mysql -u root -p
# Verify database exists
SHOW DATABASES;
# Verify user permissions
SELECT User, Host FROM mysql.user;# Source the admin credentials
source ~/admin-openrc
# Verify service registration
openstack service list
# Re-register service if missing
openstack service create --name <service_name> --description "<description>" <service_type>Note: Replace placeholder values (e.g., KEYSTONE_DBPASS, ADMIN_PASS) with secure passwords before running the commands.
Remember to adjust IP addresses according to your network configuration.