From 6da32e13ddd97512a0bef1018854f75f177fc2a5 Mon Sep 17 00:00:00 2001 From: Jake Hutchinson Date: Thu, 6 Jun 2024 11:27:28 +0100 Subject: [PATCH 01/13] Baremetal instance deployment --- .../ansible/deploy-baremetal-instance.yml | 110 ++++++++++++++++++ 1 file changed, 110 insertions(+) create mode 100644 etc/kayobe/ansible/deploy-baremetal-instance.yml diff --git a/etc/kayobe/ansible/deploy-baremetal-instance.yml b/etc/kayobe/ansible/deploy-baremetal-instance.yml new file mode 100644 index 000000000..acc53627b --- /dev/null +++ b/etc/kayobe/ansible/deploy-baremetal-instance.yml @@ -0,0 +1,110 @@ +--- + +- name: Update network allocations for new hypervisors + hosts: compute + gather_facts: false + connection: local + serial: 1 + vars: + network_allocation_path: "{{ kayobe_env_config_path }}/network-allocation.yml" + tasks: + - name: Show baremetal node + ansible.builtin.shell: + cmd: "openstack baremetal node show {{ inventory_hostname }} -f json" + register: node_show + failed_when: false + changed_when: false + delegate_to: localhost + + - name: Set baremetal node JSON variable + ansible.builtin.set_fact: + node_show_json: "{{ node_show.stdout | to_json | from_json }}" + failed_when: false + changed_when: false + + - block: + - name: Slurp network allocations + ansible.builtin.slurp: + path: "{{ network_allocation_path }}" + register: net_alc + + - name: Read network allocations + ansible.builtin.set_fact: + net_alc_yaml: "{{ net_alc['content'] | b64decode | from_yaml }}" + + - name: Write node IP address to allocations + ansible.builtin.set_fact: + new_net_alc: "{{ net_alc_yaml | combine(new_ips, recursive=True) }}" + vars: + new_ips: "{ '{{ admin_oc_net_name }}_ips': { '{{ inventory_hostname }}': '{{ ansible_host }}' } }" + + - name: Write new network allocations + ansible.builtin.copy: + content: "{{ new_net_alc | to_nice_yaml(indent=2) }}" + dest: "{{ network_allocation_path }}" + when: + - '"HTTP 404" not in node_show.stderr' + +- name: Deploy baremetal compute nodes as hypervisors + hosts: compute + gather_facts: false + connection: local + vars: + hypervisor_image: "37825714-27da-48e0-8887-d609349e703b" + key_name: "testing" + availability_zone: "nova" + baremetal_flavor: "baremetal-A" + baremetal_network: "rack-net" + auth: + auth_url: "{{ lookup('env', 'OS_AUTH_URL') }}" + username: "{{ lookup('env', 'OS_USERNAME') }}" + password: "{{ lookup('env', 'OS_PASSWORD') }}" + project_name: "{{ lookup('env', 'OS_PROJECT_NAME') }}" + tasks: + - name: Show baremetal node + ansible.builtin.shell: + cmd: "openstack baremetal node show {{ inventory_hostname }} -f json" + register: node_show + failed_when: false + changed_when: false + delegate_to: localhost + + - name: Set baremetal node JSON variable + ansible.builtin.set_fact: + node_show_json: "{{ node_show.stdout | to_json | from_json }}" + failed_when: false + changed_when: false + + - block: + - name: Create port + openstack.cloud.port: + state: present + name: "{{ inventory_hostname }}" + network: "{{ baremetal_network }}" + auth: "{{ auth }}" + fixed_ips: + - ip_address: "{{ ansible_host }}" + vnic_type: baremetal + delegate_to: localhost + register: port + + - name: Deploy hypervisor image + openstack.cloud.server: + state: present + name: "{{ inventory_hostname }}" + nics: + - port-id: "{{ port.port.id }}" + auth: "{{ auth }}" + availability_zone: "{{ availability_zone }}::{{ node_show_json.uuid }}" + image: "{{ hypervisor_image }}" + flavor: "{{ baremetal_flavor }}" + key_name: "{{ key_name }}" + timeout: 1800 + config_drive: yes + meta: + ironic_node: "{{ inventory_hostname }}" + delegate_to: localhost + register: server + when: + - '"HTTP 404" not in node_show.stderr' + - '"available" in node_show_json.provision_state' From d7e920a13ac1c2c995bdb350c353970b20626f28 Mon Sep 17 00:00:00 2001 From: Jake Hutchinson Date: Thu, 21 Mar 2024 16:37:19 +0000 Subject: [PATCH 02/13] Ironic deployment guide documentation --- doc/source/configuration/index.rst | 1 + doc/source/configuration/ironic.rst | 322 ++++++++++++++++++ .../ansible/deploy-baremetal-instance.yml | 11 - 3 files changed, 323 insertions(+), 11 deletions(-) create mode 100644 doc/source/configuration/ironic.rst diff --git a/doc/source/configuration/index.rst b/doc/source/configuration/index.rst index f8be7891a..d9a49382b 100644 --- a/doc/source/configuration/index.rst +++ b/doc/source/configuration/index.rst @@ -11,6 +11,7 @@ the various features provided. walled-garden release-train host-images + ironic lvm swap cephadm diff --git a/doc/source/configuration/ironic.rst b/doc/source/configuration/ironic.rst new file mode 100644 index 000000000..03b5747b3 --- /dev/null +++ b/doc/source/configuration/ironic.rst @@ -0,0 +1,322 @@ +====== +Ironic +====== + +Ironic networking +================= + +Ironic will require the workload provisioning and cleaning networks to be +configured in ``networks.yml`` + +The workload provisioning network will require an allocation pool for +Ironic Inspection and for Neutron. The Inspector allocation pool will be +used to define static addresses for baremetal nodes during inspection and +the Neutron allocation pool is used to assign addresses dynamically during +baremetal provisioning. + +.. code-block:: yaml + + # Workload provisioning network IP information. + provision_wl_net_cidr: "172.0.0.0/16" + provision_wl_net_allocation_pool_start: "172.0.0.4" + provision_wl_net_allocation_pool_end: "172.0.0.6" + provision_wl_net_inspection_allocation_pool_start: "172.0.1.4" + provision_wl_net_inspection_allocation_pool_end: "172.0.1.250" + provision_wl_net_neutron_allocation_pool_start: "172.0.2.4" + provision_wl_net_neutron_allocation_pool_end: "172.0.2.250" + provision_wl_net_neutron_gateway: "172.0.1.1" + +The cleaning network will also require a Neutron allocation pool. + +.. code-block:: yaml + + # Cleaning network IP information. + cleaning_net_cidr: "172.1.0.0/16" + cleaning_net_allocation_pool_start: "172.1.0.4" + cleaning_net_allocation_pool_end: "172.1.0.6" + cleaning_net_neutron_allocation_pool_start: "172.1.2.4" + cleaning_net_neutron_allocation_pool_end: "172.1.2.250" + cleaning_net_neutron_gateway: "172.1.0.1" + +OpenStack Config +================ + +Overcloud Ironic will be deployed with a listening TFTP server on the +control plane which will provide baremetal nodes that PXE boot with the +Ironic Python Agent (IPA) kernel and ramdisk. Since the TFTP server is +listening exclusively on the internal API network it's neccessary for a +route to exist between the provisoning/cleaning networks and the internal +API network, we can achieve this is by defining a Neutron router using +`OpenStack Config `. + +It not necessary to define the provision and cleaning networks in this +configuration as they will be generated during + +.. code-block:: console + + kayobe overcloud post configure + +The openstack config file could resemble the network, subnet and router +configuration shown below: + +.. code-block:: yaml + + networks: + - "{{ openstack_network_internal }}" + + openstack_network_internal: + name: "internal-net" + project: "admin" + provider_network_type: "vlan" + provider_physical_network: "physnet1" + provider_segmentation_id: 458 + shared: false + external: true + + subnets: + - "{{ openstack_subnet_internal }}" + + openstack_subnet_internal: + name: "internal-net" + project: "admin" + cidr: "10.10.3.0/24" + enable_dhcp: true + allocation_pool_start: "10.10.3.3" + allocation_pool_end: "10.10.3.3" + + openstack_routers: + - "{{ openstack_router_ironic }}" + + openstack_router_ironic: + - name: ironic + project: admin + interfaces: + - net: "provision-net" + subnet: "provision-net" + portip: "172.0.1.1" + - net: "cleaning-net" + subnet: "cleaning-net" + portip: "172.1.0.1" + network: internal-net + +To provision baremetal nodes in Nova you will also require setting a flavour +specific to that type of baremetal host. You will need to replace the custom +resource ``resources:CUSTOM_`` placeholder with +the resource class of your baremetal hosts, you will also need this later when +configuring the baremetal-compute inventory. + +.. code-block:: yaml + + openstack_flavors: + - "{{ openstack_flavor_baremetal_A }}" + # Bare metal compute node. + openstack_flavor_baremetal_A: + name: "baremetal-A" + ram: 1048576 + disk: 480 + vcpus: 256 + extra_specs: + "resources:CUSTOM_": 1 + "resources:VCPU": 0 + "resources:MEMORY_MB": 0 + "resources:DISK_GB": 0 + +Enabling conntrack +================== + +Conntrack_helper will be required when UEFI booting on a cloud with ML2/OVS +and using the iptables firewall_driver, otherwise TFTP traffic is dropped due +to it being UDP. You will need to define some extension drivers in ``neutron.yml`` +to ensure conntrack is enabled in neutron server. + +.. code-block:: yaml + + kolla_neutron_ml2_extension_drivers: + port_security + conntrack_helper + dns_domain_ports + +The neutron l3 agent also requires conntrack to be set as an extension in +``kolla/config/neutron/l3_agent.ini`` + +.. code-block:: ini + + [agent] + extensions = conntrack_helper + +It is also required to load the conntrack kernel module ``nf_nat_tftp``, +``nf_conntrack`` and ``nf_conntrack_tftp`` on network nodes. You can load these +modules using modprobe or define these in /etc/module-load. + +The Ironic neutron router will also need to be configured to use +conntrack_helper. + +.. code-block:: json + + "conntrack_helpers": { + "protocol": "udp", + "port": 69, + "helper": "tftp" + } + +To add the conntrack_helper to the neutron router, you can use the openstack +CLI + +.. code-block:: console + + openstack network l3 conntrack helper create \ + --helper tftp \ + --protocol udp \ + --port 69 \ + + +Baremetal inventory +=================== + +To begin enrolling nodes you will need to define them in the hosts file. + +.. code-block:: ini + + [r1] + hv1 ipmi_address=10.1.28.16 + hv2 ipmi_address=10.1.28.17 + … + + [baremetal-compute:children] + r1 + +The baremetal nodes will also require some extra variables to be defined +in the group_vars for your rack, these should include the BMC credentials +and the Ironic driver you wish to use. + +.. code-block:: yaml + + ironic_driver: redfish + + ironic_driver_info: + redfish_system_id: "{{ ironic_redfish_system_id }}" + redfish_address: "{{ ironic_redfish_address }}" + redfish_username: "{{ ironic_redfish_username }}" + redfish_password: "{{ ironic_redfish_password }}" + redfish_verify_ca: "{{ ironic_redfish_verify_ca }}" + ipmi_address: "{{ ipmi_address }}" + + ironic_properties: + capabilities: "{{ ironic_capabilities }}" + + ironic_resource_class: "example_resouce_class" + ironic_redfish_system_id: "/redfish/v1/Systems/System.Embedded.1" + ironic_redfish_verify_ca: "{{ inspector_rule_var_redfish_verify_ca }}" + ironic_redfish_address: "{{ ipmi_address }}" + ironic_redfish_username: "{{ inspector_redfish_username }}" + ironic_redfish_password: "{{ inspector_redfish_password }}" + ironic_capabilities: "boot_option:local,boot_mode:uefi" + +The typical layout for baremetal nodes are separated by racks, for instance +in rack 1 we have the following configuration set up where the BMC addresses +are defined for all nodes, and Redfish information such as username, passwords +and the system ID are defined for the rack as a whole. + +You can add more racks to the deployment by replicating the rack 1 example and +adding that as an entry to the baremetal-compute group. + +Node enrollment +=============== + +When nodes are defined in the inventory you can begin enrolling them by +invoking the Kayobe commmand + +.. code-block:: console + + (kayobe) $ kayobe baremetal compute register + +Following registration, the baremetal nodes can be inspected and made +available for provisioning by Nova via the Kayobe commands + +.. code-block:: console + + (kayobe) $ kayobe baremetal compute inspect + (kayobe) $ kayobe baremetal compute provide + +Baremetal hypervisors +===================== + +To deploy baremetal hypervisor nodes it will be neccessary to split out +the nodes you wish to use as hypervisors and add it to the Kayobe compute +group to ensure the hypervisor is configured as a compute node during +host configure. + +.. code-block:: ini + + [r1] + hv1 ipmi_address=10.1.28.16 + + [r1-hyp] + hv2 ipmi_address=10.1.28.17 + + [r1:children] + r1-hyp + + [compute:children] + r1-hyp + + [baremetal-compute:children] + r1 + +The hypervisor nodes will also need to define hypervisor specific variables +such as the image to be used, network to provision on and the availability zone. +These can be defined under group_vars. + +.. code-block:: yaml + + hypervisor_image: "37825714-27da-48e0-8887-d609349e703b" + key_name: "testing" + availability_zone: "nova" + baremetal_flavor: "baremetal-A" + baremetal_network: "rack-net" + auth: + auth_url: "{{ lookup('env', 'OS_AUTH_URL') }}" + username: "{{ lookup('env', 'OS_USERNAME') }}" + password: "{{ lookup('env', 'OS_PASSWORD') }}" + project_name: "{{ lookup('env', 'OS_PROJECT_NAME') }}" + +To begin deploying these nodes as instances you will need to run the Ansible +playbook deploy-baremetal-instance.yml. + +.. code-block:: console + + (kayobe) $ kayobe playbook run $KAYOBE_CONFIG_PATH/ansible/deploy-baremetal-instance.yml + +This playbook will update network allocations with the new baremetal hypervisor +IP addresses, create a Neutron port corresponding to the address and deploy +an image on the baremetal instance. + +When the playbook has finished and the rack is successfully imaged, they can be +configured with ``kayobe overcloud host configure`` and kolla compute services +can be deployed with ``kayobe overcloud service deploy``. + +Un-enrolling hypervisors +======================== + +To convert baremetal hypervisors into regular baremetal compute instances you will need +to drain the hypervisor of all running compute instances, you should first invoke the +nova-compute-disable playbook to ensure all Nova services on the baremetal node are disabled +and compute instances will not be allocated to this node. + +.. code-block:: console + + (kayobe) $ kayobe playbook run $KAYOBE_CONFIG_PATH/ansible/nova-compute-disable.yml + +Now the Nova services are disabled you should also ensure any existing compute instances +are moved elsewhere by invoking the nova-compute-drain playbook + +.. code-block:: console + + (kayobe) $ kayobe playbook run $KAYOBE_CONFIG_PATH/ansible/nova-compute-drain.yml + +Now the node has no instances allocated to it you can delete the instance using +the OpenStack CLI and the node will be moved back to ``available`` state. + +.. code-block:: console + + (os-venv) $ openstack server delete ... diff --git a/etc/kayobe/ansible/deploy-baremetal-instance.yml b/etc/kayobe/ansible/deploy-baremetal-instance.yml index acc53627b..145f26dd0 100644 --- a/etc/kayobe/ansible/deploy-baremetal-instance.yml +++ b/etc/kayobe/ansible/deploy-baremetal-instance.yml @@ -49,17 +49,6 @@ hosts: compute gather_facts: false connection: local - vars: - hypervisor_image: "37825714-27da-48e0-8887-d609349e703b" - key_name: "testing" - availability_zone: "nova" - baremetal_flavor: "baremetal-A" - baremetal_network: "rack-net" - auth: - auth_url: "{{ lookup('env', 'OS_AUTH_URL') }}" - username: "{{ lookup('env', 'OS_USERNAME') }}" - password: "{{ lookup('env', 'OS_PASSWORD') }}" - project_name: "{{ lookup('env', 'OS_PROJECT_NAME') }}" tasks: - name: Show baremetal node ansible.builtin.shell: From 8176522cf785a99006e34d813249a82af62c76ab Mon Sep 17 00:00:00 2001 From: Jake Hutchinson Date: Mon, 9 Sep 2024 11:36:13 +0100 Subject: [PATCH 03/13] Sync with client documentation --- doc/source/configuration/ironic.rst | 300 ++++++++++++++++++++-------- 1 file changed, 219 insertions(+), 81 deletions(-) diff --git a/doc/source/configuration/ironic.rst b/doc/source/configuration/ironic.rst index 03b5747b3..ec502cf30 100644 --- a/doc/source/configuration/ironic.rst +++ b/doc/source/configuration/ironic.rst @@ -116,13 +116,13 @@ configuring the baremetal-compute inventory. disk: 480 vcpus: 256 extra_specs: - "resources:CUSTOM_": 1 - "resources:VCPU": 0 - "resources:MEMORY_MB": 0 - "resources:DISK_GB": 0 + "resources:CUSTOM_": 1 + "resources:VCPU": 0 + "resources:MEMORY_MB": 0 + "resources:DISK_GB": 0 -Enabling conntrack -================== +Enabling conntrack (ML2/OVS only) +================================= Conntrack_helper will be required when UEFI booting on a cloud with ML2/OVS and using the iptables firewall_driver, otherwise TFTP traffic is dropped due @@ -173,52 +173,81 @@ CLI Baremetal inventory =================== -To begin enrolling nodes you will need to define them in the hosts file. - -.. code-block:: ini - - [r1] - hv1 ipmi_address=10.1.28.16 - hv2 ipmi_address=10.1.28.17 - … - - [baremetal-compute:children] - r1 - -The baremetal nodes will also require some extra variables to be defined -in the group_vars for your rack, these should include the BMC credentials -and the Ironic driver you wish to use. +The baremetal inventory is constructed with three different group types. +The first group is the default baremetal compute group for Kayobe called +[baremetal-compute] and will contain all baremetal nodes including tenant +and hypervisor nodes. This group acts as a parent for all baremetal nodes +and config that can be shared between all baremetal nodes will be defined +here. + +We will need to create a Kayobe group_vars file for the baremetal-compute +group that contains all the variables we want to define for the group. We +can put all these variables in the inventory in +‘inventory/group_vars/baremetal-compute/ironic-vars’ The ironic_driver_info +template dict contains all variables to be templated into the driver_info +property in Ironic. This includes the BMC address, username, password, +IPA configuration etc. We also currently define the ironic_driver here as +all nodes currently use the Redfish driver. .. code-block:: yaml ironic_driver: redfish ironic_driver_info: - redfish_system_id: "{{ ironic_redfish_system_id }}" - redfish_address: "{{ ironic_redfish_address }}" - redfish_username: "{{ ironic_redfish_username }}" - redfish_password: "{{ ironic_redfish_password }}" - redfish_verify_ca: "{{ ironic_redfish_verify_ca }}" - ipmi_address: "{{ ipmi_address }}" + redfish_system_id: "{{ ironic_redfish_system_id }}" + redfish_address: "{{ ironic_redfish_address }}" + redfish_username: "{{ ironic_redfish_username }}" + redfish_password: "{{ ironic_redfish_password }}" + redfish_verify_ca: "{{ ironic_redfish_verify_ca }}" + ipmi_address: "{{ ipmi_address }}" ironic_properties: - capabilities: "{{ ironic_capabilities }}" + capabilities: "{{ ironic_capabilities }}" - ironic_resource_class: "example_resouce_class" - ironic_redfish_system_id: "/redfish/v1/Systems/System.Embedded.1" - ironic_redfish_verify_ca: "{{ inspector_rule_var_redfish_verify_ca }}" ironic_redfish_address: "{{ ipmi_address }}" ironic_redfish_username: "{{ inspector_redfish_username }}" ironic_redfish_password: "{{ inspector_redfish_password }}" ironic_capabilities: "boot_option:local,boot_mode:uefi" -The typical layout for baremetal nodes are separated by racks, for instance -in rack 1 we have the following configuration set up where the BMC addresses -are defined for all nodes, and Redfish information such as username, passwords -and the system ID are defined for the rack as a whole. +The second group type will be the hardware type that a baremetal node belongs +to, These variables will be in the inventory too in ‘inventory/group_vars/ +baremetal-’ + +Specific variables to the hardware type include the resource_class which is +used to associate the hardware type to the flavour in Nova we defined earlier +in Openstack Config. + +.. code-block:: yaml + + ironic_resource_class: "example_resource_class" + ironic_redfish_system_id: "example_system_id" + ironic_redfish_verify_ca: "{{ inspector_rule_var_redfish_verify_ca }}" + +The third group type will be the rack where the node is installed. This is the +group in which the rack specific networking configuration is defined here and +where the BMC address is entered as a host variable for each baremetal node. +Nodes can now be entered directly into the hosts file as part of this group. + +.. code-block:: ini + + [rack1] + hv001 ipmi_address=10.1.28.16 + hv002 ipmi_address=10.1.28.17 + … + +This rack group contains the baremetal hosts but will also need to be +associated with the baremetal-compute and baremetal-sr645 groups in order for +those variables to be associated with the rack group. + +.. code-block:: ini -You can add more racks to the deployment by replicating the rack 1 example and -adding that as an entry to the baremetal-compute group. + [baremetal-:children] + rack1 + … + + [baremetal-compute:children] + rack1 + … Node enrollment =============== @@ -230,85 +259,194 @@ invoking the Kayobe commmand (kayobe) $ kayobe baremetal compute register -Following registration, the baremetal nodes can be inspected and made -available for provisioning by Nova via the Kayobe commands +All nodes that were not defined in Ironic previously should’ve been enrolled +following this playbook and should now be in ‘manageable’ state if Ironic was +able to reach the BMC of the node. We will need to inspect the baremetal nodes +to gather information about their hardware to prepare for deployment. Kayobe +provides an inspection workflow and can be run using: .. code-block:: console (kayobe) $ kayobe baremetal compute inspect + +Inspection would require PXE booting the nodes into IPA. If the nodes were able +to PXE boot properly they would now be in ‘manageable’ state again. If an error +developed during PXE booting, the nodes will now be in ‘inspect failed’ state +and issues preventing the node from booting or returning introspection data +will need to be resolved before continuing. If the nodes did inspect properly, +they can be cleaned and made available to Nova by running the provide workflow. + +.. code-block:: console + (kayobe) $ kayobe baremetal compute provide Baremetal hypervisors ===================== -To deploy baremetal hypervisor nodes it will be neccessary to split out -the nodes you wish to use as hypervisors and add it to the Kayobe compute -group to ensure the hypervisor is configured as a compute node during -host configure. +Nodes that will not be dedicated as baremetal tenant nodes can be converted +into hypervisors as required. StackHPC Kayobe configuration provides a workflow +to provision baremetal tenants with the purpose of converted these nodes to +hypervisors. To begin the process of converting nodes we will need to define a +child group of the rack which will contain baremetal nodes dedicated to compute +hosts. .. code-block:: ini - [r1] - hv1 ipmi_address=10.1.28.16 + [rack1] + hv001 ipmi_address=10.1.28.16 + hv002 ipmi_address=10.1.28.17 + … - [r1-hyp] - hv2 ipmi_address=10.1.28.17 + [rack1-compute] + hv003 ipmi_address=10.1.28.18 + hv004 ipmi_address=10.1.28.19 + … - [r1:children] - r1-hyp + [rack1:children] + rack1-compute - [compute:children] - r1-hyp + [compute:children] + rack1-compute - [baremetal-compute:children] - r1 +The rack1-compute group as shown above is also associated with the Kayobe +compute group in order for Kayobe to run the compute Kolla workflows on these +nodes during service deployment. -The hypervisor nodes will also need to define hypervisor specific variables -such as the image to be used, network to provision on and the availability zone. -These can be defined under group_vars. +You will also need to setup the Kayobe network configuration for the rack1 +group. In networks.yml you should create an admin network for the rack1 group, +this should consist of the correct CIDR for the rack being deployed. +The configuration should resemble below in networks.yml: .. code-block:: yaml - hypervisor_image: "37825714-27da-48e0-8887-d609349e703b" - key_name: "testing" - availability_zone: "nova" - baremetal_flavor: "baremetal-A" - baremetal_network: "rack-net" - auth: - auth_url: "{{ lookup('env', 'OS_AUTH_URL') }}" - username: "{{ lookup('env', 'OS_USERNAME') }}" - password: "{{ lookup('env', 'OS_PASSWORD') }}" - project_name: "{{ lookup('env', 'OS_PROJECT_NAME') }}" + physical_rack1_admin_oc_net_cidr: “172.16.208.128/27” + physical_rack1_admin_oc_net_gateway: “172.16.208.129” + physical_rack1_admin_net_defroute: true -To begin deploying these nodes as instances you will need to run the Ansible -playbook deploy-baremetal-instance.yml. +You will also need to configure a neutron network for racks to deploy instances +on, we can configure this in openstack-config as before. We will need to define +this network and associate a subnet for it for each rack we want to enroll in +Ironic. + +.. code-block:: yaml + + openstack_network_rack: + name: "rack-net" + project: "admin" + provider_network_type: "vlan" + provider_physical_network: "provider" + provider_segmentation_id: 450 + shared: false + external: false + subnets: + - "{{ openstack_subnet_rack1 }}" + + openstack_subnet_rack1: + name: "rack1-subnet" + project: "admin" + cidr: "172.16.208.128/27" + enable_dhcp: false + gateway_ip: "172.16.208.129" + allocation_pool_start: "172.16.208.130" + allocation_pool_end: "172.16.208.130" + +The subnet configuration largely resembles the Kayobe network configuration, +however we do not need to define an allocation pool or enable dhcp as we will +be associating neutron ports with our hypervisor instances per IP address to +ensure they match up properly. + +Now we should ensure the network interfaces are properly configured for the +rack1-compute group, the interfaces should include the kayobe admin network +for rack1 and the kayobe internal API network and be defined in the group_vars. + +.. code-block:: yaml + + network_interfaces: + - "internal_net" + - "physical_rack1_admin_oc_net" + + admin_oc_net_name: "physical_rack1_admin_oc_net" + + physical_rack1_admin_oc_net_bridge_ports: + - eth0 + physical_rack1_admin_oc_net_interface: br0 + + internal_net_interface: "br0.{{ internal_net_vlan }}" + +We should also ensure some variables are configured properly for our group, +such as the hypervisor image. These variables can be defined anywhere in +group_vars, we can place them in the ironic-vars file we used before for +baremetal node registration. + +.. code-block:: yaml + + hypervisor_image: "" + key_name: "" + availability_zone: "nova" + baremetal_flavor: "" + baremetal_network: "rack-net" + auth: + auth_url: "{{ lookup('env', 'OS_AUTH_URL') }}" + username: "{{ lookup('env', 'OS_USERNAME') }}" + password: "{{ lookup('env', 'OS_PASSWORD') }}" + project_name: "{{ lookup('env', 'OS_PROJECT_NAME') }}" + +With these variables defined we can now begin deploying the baremetal nodes as +instances, to begin we invoke the deploy-baremetal-hypervisor ansible playbook. .. code-block:: console - (kayobe) $ kayobe playbook run $KAYOBE_CONFIG_PATH/ansible/deploy-baremetal-instance.yml + kayobe playbook run $KAYOBE_CONFIG_PATH/ansible/deploy-baremetal-hypervisor.yml + +This playbook will update the Kayobe network allocations with the the admin +network addresses associated with that rack for each baremetal server, e.g. +in the case of rack 1 this will appear in network-allocations.yml as + +.. code-block:: yaml + + physical_rack1_admin_oc_net_ips: + hv003: 172.16.208.133 + hv004: 172.16.208.134 + +Once the network allocations have been updated, the playbook will then create a +Neutron port configured with the address of the baremetal node admin network. +The baremetal hypervisors will then be imaged and deployed associated with that +Neutron port. You should ensure that all nodes are correctly associated with +the right baremetal instance, you can do this by running a baremetal node show +on any given hypervisor node and comparing the server uuid to the metadata on +the Nova instance. -This playbook will update network allocations with the new baremetal hypervisor -IP addresses, create a Neutron port corresponding to the address and deploy -an image on the baremetal instance. +Once the nodes are deployed, we can use Kayobe to configure them as compute +hosts, running kayobe overcloud host configure on these nodes will ensure that +all networking, package and various other host configurations are setup + +.. code-block:: console + + kayobe overcloud host configure --limit baremetal- + +Following host configuration we can begin deploying OpenStack services to the +baremetal hypervisors by invoking kayobe overcloud service deploy. Nova +services will be deployed to the baremetal hosts. + +.. code-block:: console -When the playbook has finished and the rack is successfully imaged, they can be -configured with ``kayobe overcloud host configure`` and kolla compute services -can be deployed with ``kayobe overcloud service deploy``. + kayobe overcloud service deploy --kolla-limit baremetal- Un-enrolling hypervisors ======================== -To convert baremetal hypervisors into regular baremetal compute instances you will need -to drain the hypervisor of all running compute instances, you should first invoke the -nova-compute-disable playbook to ensure all Nova services on the baremetal node are disabled -and compute instances will not be allocated to this node. +To convert baremetal hypervisors into regular baremetal compute instances you +will need to drain the hypervisor of all running compute instances, you should +first invoke the nova-compute-disable playbook to ensure all Nova services on +the baremetal node are disabled and compute instances will not be allocated to +this node. .. code-block:: console (kayobe) $ kayobe playbook run $KAYOBE_CONFIG_PATH/ansible/nova-compute-disable.yml -Now the Nova services are disabled you should also ensure any existing compute instances -are moved elsewhere by invoking the nova-compute-drain playbook +Now the Nova services are disabled you should also ensure any existing compute +instances are moved elsewhere by invoking the nova-compute-drain playbook .. code-block:: console From 828b74fa987760c1559c6ad2305f6e0253567180 Mon Sep 17 00:00:00 2001 From: Jake Hutchinson Date: Mon, 9 Sep 2024 11:42:54 +0100 Subject: [PATCH 04/13] Add warning --- doc/source/configuration/ironic.rst | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/doc/source/configuration/ironic.rst b/doc/source/configuration/ironic.rst index ec502cf30..6e8249600 100644 --- a/doc/source/configuration/ironic.rst +++ b/doc/source/configuration/ironic.rst @@ -2,6 +2,11 @@ Ironic ====== +Experimental documentation +========================== + +This documentation is still WIP and is subject to change. + Ironic networking ================= From eb82723e45b007e9e893acbbd00926dda9f78049 Mon Sep 17 00:00:00 2001 From: Jake Hutchinson Date: Mon, 9 Sep 2024 11:50:28 +0100 Subject: [PATCH 05/13] Fix tox --- doc/source/configuration/ironic.rst | 76 ++++++++++++++--------------- 1 file changed, 38 insertions(+), 38 deletions(-) diff --git a/doc/source/configuration/ironic.rst b/doc/source/configuration/ironic.rst index 6e8249600..bb68effc2 100644 --- a/doc/source/configuration/ironic.rst +++ b/doc/source/configuration/ironic.rst @@ -235,24 +235,24 @@ Nodes can now be entered directly into the hosts file as part of this group. .. code-block:: ini - [rack1] - hv001 ipmi_address=10.1.28.16 - hv002 ipmi_address=10.1.28.17 - … + [rack1] + hv001 ipmi_address=10.1.28.16 + hv002 ipmi_address=10.1.28.17 + … This rack group contains the baremetal hosts but will also need to be associated with the baremetal-compute and baremetal-sr645 groups in order for those variables to be associated with the rack group. - + .. code-block:: ini - [baremetal-:children] - rack1 - … + [baremetal-:children] + rack1 + … - [baremetal-compute:children] - rack1 - … + [baremetal-compute:children] + rack1 + … Node enrollment =============== @@ -297,21 +297,21 @@ hosts. .. code-block:: ini - [rack1] + [rack1] hv001 ipmi_address=10.1.28.16 hv002 ipmi_address=10.1.28.17 … - [rack1-compute] + [rack1-compute] hv003 ipmi_address=10.1.28.18 hv004 ipmi_address=10.1.28.19 … - [rack1:children] - rack1-compute + [rack1:children] + rack1-compute - [compute:children] - rack1-compute + [compute:children] + rack1-compute The rack1-compute group as shown above is also associated with the Kayobe compute group in order for Kayobe to run the compute Kolla workflows on these @@ -324,9 +324,9 @@ The configuration should resemble below in networks.yml: .. code-block:: yaml - physical_rack1_admin_oc_net_cidr: “172.16.208.128/27” - physical_rack1_admin_oc_net_gateway: “172.16.208.129” - physical_rack1_admin_net_defroute: true + physical_rack1_admin_oc_net_cidr: “172.16.208.128/27” + physical_rack1_admin_oc_net_gateway: “172.16.208.129” + physical_rack1_admin_net_defroute: true You will also need to configure a neutron network for racks to deploy instances on, we can configure this in openstack-config as before. We will need to define @@ -335,16 +335,16 @@ Ironic. .. code-block:: yaml - openstack_network_rack: - name: "rack-net" - project: "admin" - provider_network_type: "vlan" - provider_physical_network: "provider" - provider_segmentation_id: 450 - shared: false - external: false - subnets: - - "{{ openstack_subnet_rack1 }}" + openstack_network_rack: + name: "rack-net" + project: "admin" + provider_network_type: "vlan" + provider_physical_network: "provider" + provider_segmentation_id: 450 + shared: false + external: false + subnets: + - "{{ openstack_subnet_rack1 }}" openstack_subnet_rack1: name: "rack1-subnet" @@ -385,12 +385,12 @@ baremetal node registration. .. code-block:: yaml - hypervisor_image: "" - key_name: "" - availability_zone: "nova" - baremetal_flavor: "" - baremetal_network: "rack-net" - auth: + hypervisor_image: "" + key_name: "" + availability_zone: "nova" + baremetal_flavor: "" + baremetal_network: "rack-net" + auth: auth_url: "{{ lookup('env', 'OS_AUTH_URL') }}" username: "{{ lookup('env', 'OS_USERNAME') }}" password: "{{ lookup('env', 'OS_PASSWORD') }}" @@ -401,11 +401,11 @@ instances, to begin we invoke the deploy-baremetal-hypervisor ansible playbook. .. code-block:: console - kayobe playbook run $KAYOBE_CONFIG_PATH/ansible/deploy-baremetal-hypervisor.yml + kayobe playbook run $KAYOBE_CONFIG_PATH/ansible/deploy-baremetal-hypervisor.yml This playbook will update the Kayobe network allocations with the the admin network addresses associated with that rack for each baremetal server, e.g. -in the case of rack 1 this will appear in network-allocations.yml as +in the case of rack 1 this will appear in network-allocations.yml as .. code-block:: yaml From 7f42deee8239e250e8c89a5ff3966dd1eab7f485 Mon Sep 17 00:00:00 2001 From: Jake Hutchinson Date: Mon, 9 Sep 2024 11:57:24 +0100 Subject: [PATCH 06/13] releasenote --- doc/source/configuration/ironic.rst | 4 ++-- .../notes/baremetal-hypervisors-fc1091bd507e809b.yaml | 5 +++++ 2 files changed, 7 insertions(+), 2 deletions(-) create mode 100644 releasenotes/notes/baremetal-hypervisors-fc1091bd507e809b.yaml diff --git a/doc/source/configuration/ironic.rst b/doc/source/configuration/ironic.rst index bb68effc2..b16ba9d14 100644 --- a/doc/source/configuration/ironic.rst +++ b/doc/source/configuration/ironic.rst @@ -397,11 +397,11 @@ baremetal node registration. project_name: "{{ lookup('env', 'OS_PROJECT_NAME') }}" With these variables defined we can now begin deploying the baremetal nodes as -instances, to begin we invoke the deploy-baremetal-hypervisor ansible playbook. +instances, to begin we invoke the deploy-baremetal-instance ansible playbook. .. code-block:: console - kayobe playbook run $KAYOBE_CONFIG_PATH/ansible/deploy-baremetal-hypervisor.yml + kayobe playbook run $KAYOBE_CONFIG_PATH/ansible/deploy-baremetal-instance.yml This playbook will update the Kayobe network allocations with the the admin network addresses associated with that rack for each baremetal server, e.g. diff --git a/releasenotes/notes/baremetal-hypervisors-fc1091bd507e809b.yaml b/releasenotes/notes/baremetal-hypervisors-fc1091bd507e809b.yaml new file mode 100644 index 000000000..2da3fc0a6 --- /dev/null +++ b/releasenotes/notes/baremetal-hypervisors-fc1091bd507e809b.yaml @@ -0,0 +1,5 @@ +--- +features: + - | + Adds Ironic documentation to SKC and a playbook to provision + instances on baremetal nodes. \ No newline at end of file From 683f547ba3e75dc7037abf75ac136f07e544bed4 Mon Sep 17 00:00:00 2001 From: Jake Hutchinson <39007539+assumptionsandg@users.noreply.github.com> Date: Tue, 12 Nov 2024 16:06:28 +0000 Subject: [PATCH 07/13] Update doc/source/configuration/ironic.rst Co-authored-by: Alex-Welsh <112560678+Alex-Welsh@users.noreply.github.com> --- doc/source/configuration/ironic.rst | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-) diff --git a/doc/source/configuration/ironic.rst b/doc/source/configuration/ironic.rst index b16ba9d14..4ece292e2 100644 --- a/doc/source/configuration/ironic.rst +++ b/doc/source/configuration/ironic.rst @@ -2,10 +2,8 @@ Ironic ====== -Experimental documentation -========================== - -This documentation is still WIP and is subject to change. +.. warning:: + This page is a work in progress, and is subject to change. Ironic networking ================= From f3097df643503cfa5e7e095a82ee634b02febf29 Mon Sep 17 00:00:00 2001 From: Jake Hutchinson Date: Mon, 3 Feb 2025 14:25:47 +0000 Subject: [PATCH 08/13] Address review comments --- doc/source/configuration/ironic.rst | 101 +++++++++--------- .../ansible/deploy-baremetal-instance.yml | 6 -- 2 files changed, 48 insertions(+), 59 deletions(-) diff --git a/doc/source/configuration/ironic.rst b/doc/source/configuration/ironic.rst index b16ba9d14..e55a9359b 100644 --- a/doc/source/configuration/ironic.rst +++ b/doc/source/configuration/ironic.rst @@ -46,7 +46,7 @@ The cleaning network will also require a Neutron allocation pool. OpenStack Config ================ -Overcloud Ironic will be deployed with a listening TFTP server on the +Overcloud Ironic is deployed with a listening TFTP server on the control plane which will provide baremetal nodes that PXE boot with the Ironic Python Agent (IPA) kernel and ramdisk. Since the TFTP server is listening exclusively on the internal API network it's neccessary for a @@ -55,13 +55,13 @@ API network, we can achieve this is by defining a Neutron router using `OpenStack Config `. It not necessary to define the provision and cleaning networks in this -configuration as they will be generated during +configuration as this is generated during .. code-block:: console kayobe overcloud post configure -The openstack config file could resemble the network, subnet and router +The OpenStack config file could resemble the network, subnet and router configuration shown below: .. code-block:: yaml @@ -129,10 +129,10 @@ configuring the baremetal-compute inventory. Enabling conntrack (ML2/OVS only) ================================= -Conntrack_helper will be required when UEFI booting on a cloud with ML2/OVS +Conntrack_helper is required when UEFI booting on a cloud with ML2/OVS and using the iptables firewall_driver, otherwise TFTP traffic is dropped due to it being UDP. You will need to define some extension drivers in ``neutron.yml`` -to ensure conntrack is enabled in neutron server. +to ensure conntrack is enabled in Neutron server. .. code-block:: yaml @@ -141,7 +141,7 @@ to ensure conntrack is enabled in neutron server. conntrack_helper dns_domain_ports -The neutron l3 agent also requires conntrack to be set as an extension in +The Neutron l3 agent also requires conntrack to be set as an extension in ``kolla/config/neutron/l3_agent.ini`` .. code-block:: ini @@ -149,12 +149,12 @@ The neutron l3 agent also requires conntrack to be set as an extension in [agent] extensions = conntrack_helper -It is also required to load the conntrack kernel module ``nf_nat_tftp``, -``nf_conntrack`` and ``nf_conntrack_tftp`` on network nodes. You can load these -modules using modprobe or define these in /etc/module-load. +The conntrack kernel modules ``nf_nat_tftp``, ``nf_conntrack``, +and ``nf_conntrack_tftp`` are also required on network nodes. You +can load these modules using modprobe or define these in /etc/module-load. -The Ironic neutron router will also need to be configured to use -conntrack_helper. +The Ironic Neutron router will also need to be configured to use +``conntrack_helper``. .. code-block:: json @@ -164,7 +164,7 @@ conntrack_helper. "helper": "tftp" } -To add the conntrack_helper to the neutron router, you can use the openstack +To add the conntrack_helper to the Neutron router, you can use the OpenStack CLI .. code-block:: console @@ -180,15 +180,15 @@ Baremetal inventory The baremetal inventory is constructed with three different group types. The first group is the default baremetal compute group for Kayobe called -[baremetal-compute] and will contain all baremetal nodes including tenant -and hypervisor nodes. This group acts as a parent for all baremetal nodes -and config that can be shared between all baremetal nodes will be defined -here. +``[baremetal-compute]`` and will contain all baremetal nodes including +baremetal-compute (tenant) nodes and hypervisor nodes. This group acts as +a parent for all baremetal nodes and config that is shared between all +baremetal nodes is defined here. We will need to create a Kayobe group_vars file for the baremetal-compute group that contains all the variables we want to define for the group. We can put all these variables in the inventory in -‘inventory/group_vars/baremetal-compute/ironic-vars’ The ironic_driver_info +``‘inventory/group_vars/baremetal-compute/ironic-vars’`` The ironic_driver_info template dict contains all variables to be templated into the driver_info property in Ironic. This includes the BMC address, username, password, IPA configuration etc. We also currently define the ironic_driver here as @@ -214,13 +214,13 @@ all nodes currently use the Redfish driver. ironic_redfish_password: "{{ inspector_redfish_password }}" ironic_capabilities: "boot_option:local,boot_mode:uefi" -The second group type will be the hardware type that a baremetal node belongs -to, These variables will be in the inventory too in ‘inventory/group_vars/ +The second group type is the hardware type that a baremetal node belongs +to, These variables are in the inventory in ‘inventory/group_vars/ baremetal-’ Specific variables to the hardware type include the resource_class which is used to associate the hardware type to the flavour in Nova we defined earlier -in Openstack Config. +in OpenStack Config. .. code-block:: yaml @@ -228,7 +228,7 @@ in Openstack Config. ironic_redfish_system_id: "example_system_id" ironic_redfish_verify_ca: "{{ inspector_rule_var_redfish_verify_ca }}" -The third group type will be the rack where the node is installed. This is the +The third group type is the rack where the node is installed. This is the group in which the rack specific networking configuration is defined here and where the BMC address is entered as a host variable for each baremetal node. Nodes can now be entered directly into the hosts file as part of this group. @@ -262,34 +262,34 @@ invoking the Kayobe commmand .. code-block:: console - (kayobe) $ kayobe baremetal compute register + kayobe baremetal compute register All nodes that were not defined in Ironic previously should’ve been enrolled following this playbook and should now be in ‘manageable’ state if Ironic was able to reach the BMC of the node. We will need to inspect the baremetal nodes to gather information about their hardware to prepare for deployment. Kayobe -provides an inspection workflow and can be run using: +provides an inspection command and can be run using: .. code-block:: console - (kayobe) $ kayobe baremetal compute inspect + kayobe baremetal compute inspect Inspection would require PXE booting the nodes into IPA. If the nodes were able to PXE boot properly they would now be in ‘manageable’ state again. If an error developed during PXE booting, the nodes will now be in ‘inspect failed’ state and issues preventing the node from booting or returning introspection data will need to be resolved before continuing. If the nodes did inspect properly, -they can be cleaned and made available to Nova by running the provide workflow. +they can be cleaned and made available to Nova by running the provide command. .. code-block:: console - (kayobe) $ kayobe baremetal compute provide + kayobe baremetal compute provide Baremetal hypervisors ===================== Nodes that will not be dedicated as baremetal tenant nodes can be converted -into hypervisors as required. StackHPC Kayobe configuration provides a workflow +into hypervisors as required. StackHPC Kayobe configuration provides a command to provision baremetal tenants with the purpose of converted these nodes to hypervisors. To begin the process of converting nodes we will need to define a child group of the rack which will contain baremetal nodes dedicated to compute @@ -314,10 +314,10 @@ hosts. rack1-compute The rack1-compute group as shown above is also associated with the Kayobe -compute group in order for Kayobe to run the compute Kolla workflows on these -nodes during service deployment. +compute group in order for Kayobe to deploy compute services during Kolla +service deployment. -You will also need to setup the Kayobe network configuration for the rack1 +You will also need to set up the Kayobe network configuration for the rack1 group. In networks.yml you should create an admin network for the rack1 group, this should consist of the correct CIDR for the rack being deployed. The configuration should resemble below in networks.yml: @@ -328,7 +328,7 @@ The configuration should resemble below in networks.yml: physical_rack1_admin_oc_net_gateway: “172.16.208.129” physical_rack1_admin_net_defroute: true -You will also need to configure a neutron network for racks to deploy instances +You will also need to configure a Neutron network for racks to deploy instances on, we can configure this in openstack-config as before. We will need to define this network and associate a subnet for it for each rack we want to enroll in Ironic. @@ -356,8 +356,8 @@ Ironic. allocation_pool_end: "172.16.208.130" The subnet configuration largely resembles the Kayobe network configuration, -however we do not need to define an allocation pool or enable dhcp as we will -be associating neutron ports with our hypervisor instances per IP address to +however we do not need to define an allocation pool or enable DHCP as we will +be associating Neutron ports with our hypervisor instances per IP address to ensure they match up properly. Now we should ensure the network interfaces are properly configured for the @@ -379,9 +379,9 @@ for rack1 and the kayobe internal API network and be defined in the group_vars. internal_net_interface: "br0.{{ internal_net_vlan }}" We should also ensure some variables are configured properly for our group, -such as the hypervisor image. These variables can be defined anywhere in -group_vars, we can place them in the ironic-vars file we used before for -baremetal node registration. +such as the hypervisor image. These variables can be defined in group_vars, +we can place them in the ironic-vars file we used before for baremetal node +registration. .. code-block:: yaml @@ -397,7 +397,7 @@ baremetal node registration. project_name: "{{ lookup('env', 'OS_PROJECT_NAME') }}" With these variables defined we can now begin deploying the baremetal nodes as -instances, to begin we invoke the deploy-baremetal-instance ansible playbook. +instances, to begin we invoke the deploy-baremetal-instance Ansible playbook. .. code-block:: console @@ -418,48 +418,43 @@ Neutron port configured with the address of the baremetal node admin network. The baremetal hypervisors will then be imaged and deployed associated with that Neutron port. You should ensure that all nodes are correctly associated with the right baremetal instance, you can do this by running a baremetal node show -on any given hypervisor node and comparing the server uuid to the metadata on +on any given hypervisor node and comparing the server UUID to the metadata on the Nova instance. Once the nodes are deployed, we can use Kayobe to configure them as compute -hosts, running kayobe overcloud host configure on these nodes will ensure that -all networking, package and various other host configurations are setup +hosts. More information about Kayobe host configuration is available in the +:kayobe-doc: `upstream Kayobe documentation `. .. code-block:: console kayobe overcloud host configure --limit baremetal- Following host configuration we can begin deploying OpenStack services to the -baremetal hypervisors by invoking kayobe overcloud service deploy. Nova -services will be deployed to the baremetal hosts. - -.. code-block:: console - - kayobe overcloud service deploy --kolla-limit baremetal- +baremetal hypervisors by invoking `kayobe overcloud service deploy`. Un-enrolling hypervisors ======================== To convert baremetal hypervisors into regular baremetal compute instances you -will need to drain the hypervisor of all running compute instances, you should -first invoke the nova-compute-disable playbook to ensure all Nova services on +will need to drain the hypervisor of all running compute instances, First invoke +the ``nova-compute-disable.yml`` Ansible playbook to ensure all Nova services on the baremetal node are disabled and compute instances will not be allocated to this node. .. code-block:: console - (kayobe) $ kayobe playbook run $KAYOBE_CONFIG_PATH/ansible/nova-compute-disable.yml + kayobe playbook run $KAYOBE_CONFIG_PATH/ansible/nova-compute-disable.yml Now the Nova services are disabled you should also ensure any existing compute instances are moved elsewhere by invoking the nova-compute-drain playbook .. code-block:: console - (kayobe) $ kayobe playbook run $KAYOBE_CONFIG_PATH/ansible/nova-compute-drain.yml + kayobe playbook run $KAYOBE_CONFIG_PATH/ansible/nova-compute-drain.yml -Now the node has no instances allocated to it you can delete the instance using -the OpenStack CLI and the node will be moved back to ``available`` state. +Now the node has no instances allocated to it you can delete the baremetal instance +using the OpenStack CLI and the node is moved back to ``available`` state. .. code-block:: console - (os-venv) $ openstack server delete ... + openstack server delete ... diff --git a/etc/kayobe/ansible/deploy-baremetal-instance.yml b/etc/kayobe/ansible/deploy-baremetal-instance.yml index 145f26dd0..129cc35ea 100644 --- a/etc/kayobe/ansible/deploy-baremetal-instance.yml +++ b/etc/kayobe/ansible/deploy-baremetal-instance.yml @@ -16,12 +16,6 @@ changed_when: false delegate_to: localhost - - name: Set baremetal node JSON variable - ansible.builtin.set_fact: - node_show_json: "{{ node_show.stdout | to_json | from_json }}" - failed_when: false - changed_when: false - - block: - name: Slurp network allocations ansible.builtin.slurp: From 57fe544b67e1e1a0d9d69deb216241290f37d94f Mon Sep 17 00:00:00 2001 From: Jake Hutchinson Date: Wed, 5 Feb 2025 14:16:58 +0000 Subject: [PATCH 09/13] Fix deploy playbook --- etc/kayobe/ansible/deploy-baremetal-instance.yml | 7 +------ .../notes/baremetal-hypervisors-fc1091bd507e809b.yaml | 2 +- 2 files changed, 2 insertions(+), 7 deletions(-) diff --git a/etc/kayobe/ansible/deploy-baremetal-instance.yml b/etc/kayobe/ansible/deploy-baremetal-instance.yml index 129cc35ea..5aed449c3 100644 --- a/etc/kayobe/ansible/deploy-baremetal-instance.yml +++ b/etc/kayobe/ansible/deploy-baremetal-instance.yml @@ -17,14 +17,9 @@ delegate_to: localhost - block: - - name: Slurp network allocations - ansible.builtin.slurp: - path: "{{ network_allocation_path }}" - register: net_alc - - name: Read network allocations ansible.builtin.set_fact: - net_alc_yaml: "{{ net_alc['content'] | b64decode | from_yaml }}" + net_alc_yaml: "{{ admin_oc_net_ips }}" - name: Write node IP address to allocations ansible.builtin.set_fact: diff --git a/releasenotes/notes/baremetal-hypervisors-fc1091bd507e809b.yaml b/releasenotes/notes/baremetal-hypervisors-fc1091bd507e809b.yaml index 2da3fc0a6..d30b014a4 100644 --- a/releasenotes/notes/baremetal-hypervisors-fc1091bd507e809b.yaml +++ b/releasenotes/notes/baremetal-hypervisors-fc1091bd507e809b.yaml @@ -2,4 +2,4 @@ features: - | Adds Ironic documentation to SKC and a playbook to provision - instances on baremetal nodes. \ No newline at end of file + instances on baremetal nodes. From 8f2fac38c912221f2769d588d0787fa6adab6884 Mon Sep 17 00:00:00 2001 From: Jake Hutchinson Date: Wed, 5 Feb 2025 14:23:44 +0000 Subject: [PATCH 10/13] Fix linter --- .../ansible/deploy-baremetal-instance.yml | 21 ++++++++++--------- 1 file changed, 11 insertions(+), 10 deletions(-) diff --git a/etc/kayobe/ansible/deploy-baremetal-instance.yml b/etc/kayobe/ansible/deploy-baremetal-instance.yml index 5aed449c3..c77704a04 100644 --- a/etc/kayobe/ansible/deploy-baremetal-instance.yml +++ b/etc/kayobe/ansible/deploy-baremetal-instance.yml @@ -9,14 +9,16 @@ network_allocation_path: "{{ kayobe_env_config_path }}/network-allocation.yml" tasks: - name: Show baremetal node - ansible.builtin.shell: + ansible.builtin.command: cmd: "openstack baremetal node show {{ inventory_hostname }} -f json" register: node_show failed_when: false changed_when: false delegate_to: localhost - - block: + - name: Update network allocations + when: '"HTTP 404" not in node_show.stderr' + block: - name: Read network allocations ansible.builtin.set_fact: net_alc_yaml: "{{ admin_oc_net_ips }}" @@ -31,8 +33,6 @@ ansible.builtin.copy: content: "{{ new_net_alc | to_nice_yaml(indent=2) }}" dest: "{{ network_allocation_path }}" - when: - - '"HTTP 404" not in node_show.stderr' - name: Deploy baremetal compute nodes as hypervisors hosts: compute @@ -40,7 +40,7 @@ connection: local tasks: - name: Show baremetal node - ansible.builtin.shell: + ansible.builtin.command: cmd: "openstack baremetal node show {{ inventory_hostname }} -f json" register: node_show failed_when: false @@ -53,8 +53,12 @@ failed_when: false changed_when: false - - block: - - name: Create port + - name: Deploy baremetal instance + when: + - '"HTTP 404" not in node_show.stderr' + - '"available" in node_show_json.provision_state' + block: + - name: Configure network for baremetal instance openstack.cloud.port: state: present name: "{{ inventory_hostname }}" @@ -83,6 +87,3 @@ ironic_node: "{{ inventory_hostname }}" delegate_to: localhost register: server - when: - - '"HTTP 404" not in node_show.stderr' - - '"available" in node_show_json.provision_state' From 40075e8ece1e031bdc65dda5d1c867e30229008b Mon Sep 17 00:00:00 2001 From: Jake Hutchinson Date: Wed, 5 Feb 2025 14:43:07 +0000 Subject: [PATCH 11/13] port --- etc/kayobe/ansible/deploy-baremetal-instance.yml | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/etc/kayobe/ansible/deploy-baremetal-instance.yml b/etc/kayobe/ansible/deploy-baremetal-instance.yml index c77704a04..fdf2843bf 100644 --- a/etc/kayobe/ansible/deploy-baremetal-instance.yml +++ b/etc/kayobe/ansible/deploy-baremetal-instance.yml @@ -58,7 +58,7 @@ - '"HTTP 404" not in node_show.stderr' - '"available" in node_show_json.provision_state' block: - - name: Configure network for baremetal instance + - name: Create port openstack.cloud.port: state: present name: "{{ inventory_hostname }}" @@ -68,14 +68,14 @@ - ip_address: "{{ ansible_host }}" vnic_type: baremetal delegate_to: localhost - register: port + register: osport - name: Deploy hypervisor image openstack.cloud.server: state: present name: "{{ inventory_hostname }}" nics: - - port-id: "{{ port.port.id }}" + - port-id: "{{ osport.port.id }}" auth: "{{ auth }}" availability_zone: "{{ availability_zone }}::{{ node_show_json.uuid }}" image: "{{ hypervisor_image }}" From 8f41ec2dcc0796d048b2c0f7c3c294b20a1414cd Mon Sep 17 00:00:00 2001 From: Jake Hutchinson <39007539+assumptionsandg@users.noreply.github.com> Date: Wed, 9 Apr 2025 14:21:25 +0100 Subject: [PATCH 12/13] Update etc/kayobe/ansible/deploy-baremetal-instance.yml Co-authored-by: Alex-Welsh <112560678+Alex-Welsh@users.noreply.github.com> --- etc/kayobe/ansible/deploy-baremetal-instance.yml | 11 ++--------- 1 file changed, 2 insertions(+), 9 deletions(-) diff --git a/etc/kayobe/ansible/deploy-baremetal-instance.yml b/etc/kayobe/ansible/deploy-baremetal-instance.yml index fdf2843bf..db612e72f 100644 --- a/etc/kayobe/ansible/deploy-baremetal-instance.yml +++ b/etc/kayobe/ansible/deploy-baremetal-instance.yml @@ -19,17 +19,10 @@ - name: Update network allocations when: '"HTTP 404" not in node_show.stderr' block: - - name: Read network allocations - ansible.builtin.set_fact: - net_alc_yaml: "{{ admin_oc_net_ips }}" - - - name: Write node IP address to allocations - ansible.builtin.set_fact: - new_net_alc: "{{ net_alc_yaml | combine(new_ips, recursive=True) }}" + - name: Write new network allocations vars: new_ips: "{ '{{ admin_oc_net_name }}_ips': { '{{ inventory_hostname }}': '{{ ansible_host }}' } }" - - - name: Write new network allocations + new_net_alc: "{{ admin_oc_net_ips | combine(new_ips, recursive=True) }}" ansible.builtin.copy: content: "{{ new_net_alc | to_nice_yaml(indent=2) }}" dest: "{{ network_allocation_path }}" From 42d1bbbc102df7b05af1dc2f81ab2046dcee99a5 Mon Sep 17 00:00:00 2001 From: Jake Hutchinson Date: Wed, 9 Apr 2025 14:32:15 +0100 Subject: [PATCH 13/13] Add variable to skip network allocation population --- .../ansible/deploy-baremetal-instance.yml | 19 ++++++++++--------- etc/kayobe/stackhpc.yml | 4 ++++ 2 files changed, 14 insertions(+), 9 deletions(-) diff --git a/etc/kayobe/ansible/deploy-baremetal-instance.yml b/etc/kayobe/ansible/deploy-baremetal-instance.yml index db612e72f..e925318b3 100644 --- a/etc/kayobe/ansible/deploy-baremetal-instance.yml +++ b/etc/kayobe/ansible/deploy-baremetal-instance.yml @@ -8,17 +8,17 @@ vars: network_allocation_path: "{{ kayobe_env_config_path }}/network-allocation.yml" tasks: - - name: Show baremetal node - ansible.builtin.command: - cmd: "openstack baremetal node show {{ inventory_hostname }} -f json" - register: node_show - failed_when: false - changed_when: false - delegate_to: localhost - - name: Update network allocations - when: '"HTTP 404" not in node_show.stderr' + when: stackhpc_populate_baremetal_network_allocations block: + - name: Show baremetal node + ansible.builtin.command: + cmd: "openstack baremetal node show {{ inventory_hostname }} -f json" + register: node_show + failed_when: false + changed_when: false + delegate_to: localhost + - name: Write new network allocations vars: new_ips: "{ '{{ admin_oc_net_name }}_ips': { '{{ inventory_hostname }}': '{{ ansible_host }}' } }" @@ -26,6 +26,7 @@ ansible.builtin.copy: content: "{{ new_net_alc | to_nice_yaml(indent=2) }}" dest: "{{ network_allocation_path }}" + when: '"HTTP 404" not in node_show.stderr' - name: Deploy baremetal compute nodes as hypervisors hosts: compute diff --git a/etc/kayobe/stackhpc.yml b/etc/kayobe/stackhpc.yml index 422c6bba9..3f51f5bd8 100644 --- a/etc/kayobe/stackhpc.yml +++ b/etc/kayobe/stackhpc.yml @@ -175,6 +175,10 @@ stackhpc_docker_registry_password: "{{ pulp_password }}" # Whether or not to run CIS benchmark hardening playbooks. Default is false. #stackhpc_enable_cis_benchmark_hardening_hook: +# Whether to auto populate the admin network allocations for baremetal hypervisors. +# Default is true. +stackhpc_populate_baremetal_network_allocations: true + ############################################################################### # Octavia Amphora image