From c5983af4ad63fcc3eb1b1a9a9e26f28ac5aa9ae9 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Harald=20Jens=C3=A5s?= Date: Thu, 9 Oct 2025 23:11:30 +0200 Subject: [PATCH] WIP - Secret rotate scenario MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Signed-off-by: Harald Jensås --- scenarios/secret-rotate/README.md | 302 ++++ scenarios/secret-rotate/automation-vars.yml | 156 ++ scenarios/secret-rotate/bootstrap_vars.yml | 60 + scenarios/secret-rotate/heat_template.yaml | 1276 +++++++++++++++++ .../control-plane/control-plane.yaml | 922 ++++++++++++ .../manifests/control-plane/nncp/nncp.yaml | 441 ++++++ .../secret-rotate/manifests/dataplane.yaml | 9 + .../secret-rotate/manifests/edpm/edpm.yaml | 134 ++ .../manifests/networker/networker.yaml | 9 + .../manifests/networker/nodeset/nodeset.yaml | 141 ++ .../manifests/update/update-ovn.yaml | 12 + .../manifests/update/update-reboot.yaml | 15 + .../manifests/update/update-services.yaml | 15 + .../test-operator/automation-vars.yml | 393 +++++ .../test-operator/scripts/create-users.sh | 110 ++ .../test-operator/tempest-tests.yml | 226 +++ zuul.d/jobs.yaml | 63 + zuul.d/projects.yaml | 1 + 18 files changed, 4285 insertions(+) create mode 100644 scenarios/secret-rotate/README.md create mode 100644 scenarios/secret-rotate/automation-vars.yml create mode 100644 scenarios/secret-rotate/bootstrap_vars.yml create mode 100644 scenarios/secret-rotate/heat_template.yaml create mode 100644 scenarios/secret-rotate/manifests/control-plane/control-plane.yaml create mode 100644 scenarios/secret-rotate/manifests/control-plane/nncp/nncp.yaml create mode 100644 scenarios/secret-rotate/manifests/dataplane.yaml create mode 100644 scenarios/secret-rotate/manifests/edpm/edpm.yaml create mode 100644 scenarios/secret-rotate/manifests/networker/networker.yaml create mode 100644 scenarios/secret-rotate/manifests/networker/nodeset/nodeset.yaml create mode 100644 scenarios/secret-rotate/manifests/update/update-ovn.yaml create mode 100644 scenarios/secret-rotate/manifests/update/update-reboot.yaml create mode 100644 scenarios/secret-rotate/manifests/update/update-services.yaml create mode 100644 scenarios/secret-rotate/test-operator/automation-vars.yml create mode 100755 scenarios/secret-rotate/test-operator/scripts/create-users.sh create mode 100644 scenarios/secret-rotate/test-operator/tempest-tests.yml diff --git a/scenarios/secret-rotate/README.md b/scenarios/secret-rotate/README.md new file mode 100644 index 00000000..5bd1b57f --- /dev/null +++ b/scenarios/secret-rotate/README.md @@ -0,0 +1,302 @@ +# secret-rotate Scenario + +## Overview + +A comprehensive secret-rotate scenario that demonstrates a full-featured OpenStack +deployment with advanced services. This scenario deploys a high-availability +3-master OpenShift cluster with a complete OpenStack service stack including +bare metal provisioning (Ironic), load balancing (Octavia), orchestration +(Heat), and telemetry services, representing a production-like OpenStack +environment. + +## Architecture + +- **Controller**: Hotstack controller providing DNS, load balancing, and + orchestration services +- **OpenShift Masters**: 3-node OpenShift cluster for high availability control plane +- **Compute Nodes**: 2 pre-provisioned compute nodes running OpenStack services +- **Networker Nodes**: Dedicated network service nodes +- **Ironic Nodes**: 2 bare metal nodes for Ironic bare metal provisioning testing + +## Features + +- High-availability OpenShift cluster (3 masters) +- Complete OpenStack service stack with all major services +- Ironic bare metal provisioning service +- Octavia load balancing service +- Heat orchestration service +- Telemetry and monitoring services +- Pre-provisioned dataplane nodes +- Dedicated Ironic network for bare metal provisioning +- Multi-network setup with Octavia network +- Advanced service configuration and high availability + +## Networks + +- **machine-net**: 192.168.32.0/20 (OpenShift cluster network) +- **ctlplane-net**: 192.168.122.0/24 (OpenStack control plane) +- **internal-api-net**: 172.17.0.0/24 (OpenStack internal services) +- **storage-net**: 172.18.0.0/24 (Storage backend communication) +- **tenant-net**: 172.19.0.0/24 (Tenant network traffic) +- **octavia-net**: 172.23.0.0/24 (Load balancing service network) +- **ironic-net**: 172.20.1.0/24 (Bare metal provisioning network) + +## OpenStack Services + +This scenario deploys the most comprehensive OpenStack environment: + +### Core Services + +- **Keystone**: Identity service with high availability (3 replicas) +- **Nova**: Compute service with Ironic driver for bare metal +- **Neutron**: Networking service with OVN backend and advanced features +- **Glance**: Image service with Swift backend +- **Cinder**: Block storage service with LVM-iSCSI backend +- **Swift**: Object storage service + +### Advanced Services + +- **Ironic**: Bare metal provisioning service +- **Octavia**: Load balancing as a service +- **Heat**: Orchestration service for infrastructure automation +- **Barbican**: Key management service (available but disabled by default) +- **Manila**: Shared file systems service (available but disabled by default) + +### Supporting Services + +- **Galera**: MySQL database clusters (3 replicas each) +- **RabbitMQ**: Message queuing with cell architecture (3 replicas each) +- **Memcached**: Caching service (3 replicas) +- **OVN**: Open Virtual Network for SDN with high availability + +### Telemetry and Monitoring + +- **Ceilometer**: Data collection service +- **Aodh**: Alarming service +- **Prometheus**: Metrics storage and monitoring stack +- **Autoscaling**: Integration between Heat and Aodh + +## Node Configuration + +### OpenShift Masters + +Three masters provide high availability for the control plane: + +#### Master 0 + +- **Machine IP**: 192.168.34.10 +- **Ctlplane IP**: 192.168.122.10 +- **Storage**: LVMS + 3x Cinder volumes (20GB each) + +#### Master 1 + +- **Machine IP**: 192.168.34.11 +- **Ctlplane IP**: 192.168.122.11 +- **Storage**: LVMS + 3x Cinder volumes (20GB each) + +#### Master 2 + +- **Machine IP**: 192.168.34.12 +- **Ctlplane IP**: 192.168.122.12 +- **Storage**: LVMS + 3x Cinder volumes (20GB each) + +### Compute Nodes + +Pre-provisioned compute nodes running OpenStack services: + +#### Compute Node 0 + +- **Hostname**: edpm-compute-0 +- **IP Address**: 192.168.122.100 +- **Services**: Nova, Neutron, Libvirt, Telemetry + +#### Compute Node 1 + +- **Hostname**: edpm-compute-1 +- **IP Address**: 192.168.122.101 +- **Services**: Nova, Neutron, Libvirt, Telemetry + +### Networker Nodes + +Dedicated nodes for network services (deployment configuration available) + +### Ironic Infrastructure + +Dedicated bare metal provisioning infrastructure: + +#### Ironic Node 0 + +- **Network**: Ironic network (172.20.1.0/24) +- **Purpose**: Bare metal provisioning testing +- **Configuration**: Virtual media boot capable + +#### Ironic Node 1 + +- **Network**: Ironic network (172.20.1.0/24) +- **Purpose**: Bare metal provisioning testing +- **Configuration**: Virtual media boot capable + +## Service Highlights + +### Ironic Bare Metal Service + +- **Driver**: redfish for virtual BMC +- **Networks**: Dedicated Ironic network for provisioning +- **Conductor**: Custom configuration with power state timeout +- **Inspector**: Introspection capabilities (configurable) +- **Integration**: Nova compute-ironic driver for bare metal instances + +### Octavia Load Balancing + +- **Management Network**: Dedicated load balancer management network +- **Availability Zones**: Multi-zone support (zone-1) +- **Amphora Images**: Custom amphora container images +- **HA Configuration**: Health manager, housekeeping, and worker services + +### Heat Orchestration + +- **Template Support**: Full Heat template orchestration +- **Autoscaling**: Integration with Aodh for autoscaling policies +- **Public Endpoints**: Accessible Heat API endpoints +- **Client Configuration**: Configured for public endpoint access + +### Telemetry Stack + +- **Metrics Collection**: Ceilometer data collection +- **Alarming**: Aodh alarm evaluation +- **Storage**: Prometheus-based metric storage +- **Monitoring**: Complete monitoring stack with alerting +- **Persistence**: 10GB persistent storage with 24-hour retention + +## Network Configuration + +### Advanced VLAN Setup + +- **VLAN 20**: Internal API (172.17.0.0/24) +- **VLAN 21**: Storage (172.18.0.0/24) +- **VLAN 22**: Tenant (172.19.0.0/24) +- **VLAN 23**: Octavia (172.23.0.0/24) + +### Load Balancing Architecture + +- **MetalLB**: Layer 2 load balancing for OpenStack services +- **Multiple Pools**: Separate IP pools for different service networks +- **Service HA**: All major services have load balancer configurations +- **NIC Mappings**: Multiple bridge mappings (ocpbr, ironic, octbr) + +## Storage Configuration + +### Cinder Storage + +- **Backend**: LVM-iSCSI with dedicated target IPs +- **Volume Group**: cinder-volumes on dedicated nodes +- **Target Protocol**: iSCSI with lioadm helper +- **Multi-path**: Secondary IP addresses for redundancy + +### Local Storage + +- **TopoLVM**: Local volume management on masters +- **LVMS**: Logical Volume Manager Storage +- **Database Storage**: Persistent storage for Galera clusters (5GB each) +- **Glance Storage**: Local storage for image service (10GB) + +## Usage + +```bash +# Deploy the scenario +ansible-playbook -i inventory.yml bootstrap.yml \ + -e @scenarios/secret-rotate/bootstrap_vars.yml \ + -e @~/cloud-secrets.yaml + +# Run tests +ansible-playbook -i inventory.yml 06-test-operator.yml \ + -e @scenarios/secret-rotate/bootstrap_vars.yml \ + -e @~/cloud-secrets.yaml +``` + +## Deployment Process + +### Multi-Phase Deployment + +The scenario uses a sophisticated deployment process: + +1. **Infrastructure Phase**: Deploy Heat stack with all nodes +2. **Control Plane Phase**: Deploy OpenStack control plane services +3. **Dataplane Phase**: Configure and deploy compute and networker nodes +4. **Integration Phase**: Enable Ironic, Octavia, and telemetry services + +### Service Dependencies + +- **Database Services**: Galera clusters deployed first +- **Core Services**: Keystone, Nova, Neutron deployed in sequence +- **Advanced Services**: Ironic, Octavia, Heat deployed after core services +- **Telemetry**: Monitoring stack deployed last + +## Testing Capabilities + +### Comprehensive Service Testing + +- **Bare Metal Provisioning**: Ironic node enrollment and provisioning +- **Load Balancing**: Octavia load balancer creation and management +- **Orchestration**: Heat stack deployment and autoscaling +- **Telemetry**: Metrics collection and alarming +- **Multi-tenant**: Complete tenant isolation and networking + +### Advanced Features + +- **High Availability**: Service redundancy testing +- **Storage Multipath**: iSCSI multipath configuration testing +- **Network Segmentation**: VLAN and bridge mapping validation +- **Service Integration**: Cross-service functionality testing + +## Update Support + +The scenario includes comprehensive update workflows: + +- **Service Updates**: All OpenStack services support updates +- **OVN Updates**: Network service updates across compute and networker nodes +- **Telemetry Updates**: Monitoring stack updates +- **Ironic Updates**: Bare metal service updates + +## Requirements + +- OpenStack cloud with substantial resources (8+ instances) +- Flavors: hotstack.small (controller), hotstack.xxlarge (masters), + hotstack.large (compute), hotstack.medium (Ironic nodes) +- Images: hotstack-controller, ipxe-boot-usb, CentOS-Stream-GenericCloud-9, + sushy-tools-blank-image +- Support for trunk ports, VLANs, and multiple networks +- Additional storage volumes for Cinder services +- Pull secret for OpenShift installation +- Network connectivity for all defined subnets +- **Resource Intensive**: Requires significant compute and storage resources + +## Notable Features + +- **Production-Like**: Comprehensive service stack suitable for production evaluation +- **High Availability**: 3-master OpenShift with service redundancy +- **Bare Metal Integration**: Full Ironic service stack for bare metal provisioning +- **Load Balancing**: Octavia service for advanced load balancing scenarios +- **Orchestration**: Heat service for infrastructure automation +- **Telemetry**: Complete monitoring and alarming infrastructure +- **Multi-Network**: Advanced network architecture with service-specific networks +- **Educational**: Ideal for learning complete OpenStack service integration + +## Configuration Files + +- `bootstrap_vars.yml`: Infrastructure and OpenShift configuration +- `automation-vars.yml`: Hotloop deployment stages +- `heat_template.yaml`: Comprehensive OpenStack infrastructure template +- `manifests/control-plane/control-plane.yaml`: Complete OpenStack service configuration +- `manifests/control-plane/nncp/nncp.yaml`: Network configuration for all three masters +- `manifests/dataplane.yaml`: Compute node deployment +- `manifests/edpm/edpm.yaml`: EDPM nodeset configuration +- `manifests/networker/networker.yaml`: Network node deployment configuration +- `manifests/update/`: Update workflow definitions +- `test-operator/automation-vars.yml`: Test automation configuration +- `test-operator/manifests/nad.yaml`: Network attachment definitions for testing +- `test-operator/tempest-tests.yml`: Tempest test specifications + +This scenario provides the most comprehensive OpenStack environment available in +Hotstack, suitable for advanced testing, development, and educational purposes +where a complete understanding of OpenStack service integration is required. diff --git a/scenarios/secret-rotate/automation-vars.yml b/scenarios/secret-rotate/automation-vars.yml new file mode 100644 index 00000000..1d3d14b5 --- /dev/null +++ b/scenarios/secret-rotate/automation-vars.yml @@ -0,0 +1,156 @@ +--- +stages: + - name: TopoLVM Dependencies + stages: >- + {{ + lookup("ansible.builtin.template", + "common/stages/topolvm-deps-stages.yaml.j2") + }} + + - name: Dependencies + stages: >- + {{ + lookup("ansible.builtin.template", + "common/stages/deps-stages.yaml.j2") + }} + + - name: Cinder LVM + stages: >- + {{ + lookup("ansible.builtin.file", + "common/stages/cinder-lvm-label-stages.yaml") + }} + + - name: TopoLVM + stages: >- + {{ + lookup("ansible.builtin.template", + "common/stages/topolvm-stages.yaml.j2") + }} + + - name: OLM Openstack + stages: >- + {{ + lookup("ansible.builtin.template", + "common/stages/olm-openstack-stages.yaml.j2") + }} + + - name: NodeNetworkConfigurationPolicy (nncp) + manifest: manifests/control-plane/nncp/nncp.yaml + wait_conditions: + - >- + oc wait -n openstack nncp -l osp/nncm-config-type=standard + --for jsonpath='{.status.conditions[0].reason}'=SuccessfullyConfigured + --timeout=180s + + - name: OpenstackControlPlane + manifest: manifests/control-plane/control-plane.yaml + wait_conditions: + - >- + oc wait -n openstack openstackcontrolplane controlplane + --for condition=Ready --timeout=30m + + - name: Dataplane SSH key secret + shell: >- + oc create -n openstack secret generic dataplane-ansible-ssh-private-key-secret + --save-config --dry-run=client + --from-file=ssh-privatekey=/home/zuul/.ssh/id_rsa + --from-file=ssh-publickey=/home/zuul/.ssh/id_rsa.pub + --type=Opaque -o yaml | oc apply -f - + wait_conditions: + - >- + oc wait -n openstack secret dataplane-ansible-ssh-private-key-secret + --for jsonpath='{.metadata.name}'=dataplane-ansible-ssh-private-key-secret + --timeout=30s + + - name: Nova migration SSH key secret + shell: >- + oc create -n openstack secret generic nova-migration-ssh-key + --save-config --dry-run=client + --from-file=ssh-privatekey=/home/zuul/.ssh/id_nova_migrate + --from-file=ssh-publickey=/home/zuul/.ssh/id_nova_migrate.pub + --type=Opaque -o yaml | oc apply -f - + wait_conditions: + - >- + oc wait -n openstack secret nova-migration-ssh-key + --for jsonpath='{.metadata.name}'=nova-migration-ssh-key + --timeout=30s + + - name: Networker nodeset + manifest: manifests/networker/nodeset/nodeset.yaml + patches: "{{ hotstack_default_nodeset_patches }}" + + - name: EDPM nodeset + manifest: manifests/edpm/edpm.yaml + patches: "{{ hotstack_default_nodeset_patches }}" + + - name: Wait for nodesets (Networker and EDPM) + wait_conditions: + - >- + oc wait -n openstack openstackdataplanenodeset networkers + --for condition=SetupReady --timeout=10m + - >- + oc -n openstack wait openstackdataplanenodeset edpm + --for condition=SetupReady --timeout=10m + + - name: Networker deployment + manifest: manifests/networker/networker.yaml + + - name: EDPM deployment + manifest: manifests/dataplane.yaml + + - name: Wait for deployments (Networker and EDPM) + wait_conditions: + - oc wait -n openstack jobs.batch bootstrap-networker-deploy-networkers --for condition=Complete --timeout=10m + - oc wait -n openstack jobs.batch bootstrap-dataplane-edpm --for condition=Complete --timeout=10m + - oc wait -n openstack jobs.batch configure-network-networker-deploy-networkers --for condition=Complete --timeout=5m + - oc wait -n openstack jobs.batch configure-network-dataplane-edpm --for condition=Complete --timeout=5m + - oc wait -n openstack jobs.batch validate-network-networker-deploy-networkers --for condition=Complete --timeout=1m + - oc wait -n openstack jobs.batch validate-network-dataplane-edpm --for condition=Complete --timeout=1m + - oc wait -n openstack jobs.batch install-os-networker-deploy-networkers --for condition=Complete --timeout=5m + - oc wait -n openstack jobs.batch install-os-dataplane-edpm --for condition=Complete --timeout=5m + - oc wait -n openstack jobs.batch configure-os-networker-deploy-networkers --for condition=Complete --timeout=3m + - oc wait -n openstack jobs.batch configure-os-dataplane-edpm --for condition=Complete --timeout=3m + - oc wait -n openstack jobs.batch ssh-known-hosts-dataplane --for condition=Complete --timeout=1m + - oc wait -n openstack jobs.batch run-os-networker-deploy-networkers --for condition=Complete --timeout=3m + - oc wait -n openstack jobs.batch run-os-dataplane-edpm --for condition=Complete --timeout=3m + - oc wait -n openstack jobs.batch reboot-os-networker-deploy-networkers --for condition=Complete --timeout=5m + - oc wait -n openstack jobs.batch reboot-os-dataplane-edpm --for condition=Complete --timeout=5m + - oc wait -n openstack jobs.batch install-certs-networker-deploy-networkers --for condition=Complete --timeout=5m + - oc wait -n openstack jobs.batch install-certs-dataplane-edpm --for condition=Complete --timeout=5m + - oc wait -n openstack jobs.batch ovn-networker-deploy-networkers --for condition=Complete --timeout=5m + - oc wait -n openstack jobs.batch ovn-dataplane-edpm --for condition=Complete --timeout=5m + - oc wait -n openstack jobs.batch neutron-metadata-networker-deploy-networkers --for condition=Complete --timeout=10m + - oc wait -n openstack openstackdataplanedeployment networker-deploy --for condition=Ready --timeout=10m + - oc wait -n openstack jobs.batch neutron-metadata-dataplane-edpm --for condition=Complete --timeout=10m + - oc wait -n openstack jobs.batch libvirt-dataplane-edpm --for condition=Complete --timeout=20m + - oc wait -n openstack jobs.batch nova-dataplane-edpm --for condition=Complete --timeout=20m + - oc wait -n openstack jobs.batch telemetry-dataplane-edpm --for condition=Complete --timeout=10m + - oc -n openstack wait openstackdataplanedeployment dataplane --for condition=Ready --timeout=10m + - timeout --foreground 15m hotstack-nova-discover-hosts --namespace openstack --num-computes 2 + + - name: "Minor update :: openstack-operators OLM" + stages: >- + {{ + lookup('ansible.builtin.template', + 'common/stages/openstack-olm-update.yaml.j2') + }} + run_conditions: + - >- + {{ + openstack_operators_update is defined and + openstack_operators_update | bool + }} + + - name: "Minor update :: controlplane and dataplane" + stages: >- + {{ + lookup('ansible.builtin.template', + 'common/stages/openstack-update.yaml.j2') + }} + run_conditions: + - >- + {{ + openstack_update is defined and + openstack_update | bool + }} diff --git a/scenarios/secret-rotate/bootstrap_vars.yml b/scenarios/secret-rotate/bootstrap_vars.yml new file mode 100644 index 00000000..ec4e8210 --- /dev/null +++ b/scenarios/secret-rotate/bootstrap_vars.yml @@ -0,0 +1,60 @@ +--- +os_cloud: default +os_floating_network: public +os_router_external_network: public + +scenario: secret-rotate +scenario_dir: scenarios +stack_template_path: "{{ scenario_dir }}/{{ scenario }}/heat_template.yaml" +automation_vars_file: "{{ scenario_dir }}/{{ scenario }}/automation-vars.yml" +test_operator_automation_vars_file: "{{ scenario_dir }}/{{ scenario }}/test-operator/automation-vars.yml" + +openstack_operators_image: quay.io/openstack-k8s-operators/openstack-operator-index:18.0-fr4-latest +openstack_operator_channel: alpha +openstack_operator_starting_csv: null + +openshift_version: stable-4.18 + +ntp_servers: [] +dns_servers: + - 8.8.8.8 + - 8.8.4.4 + +pull_secret_file: ~/pull-secret.txt + +ovn_k8s_gateway_config_host_routing: true +enable_iscsi: true +enable_multipath: true + +cinder_volume_pvs: + - /dev/vdc + - /dev/vdd + - /dev/vde + +stack_name: "hs-{{ scenario }}-{{ zuul.build[:8] | default('no-zuul') }}" +stack_parameters: + # On misconfigured clouds, uncomment these to avoid issues. + # Ref: https://access.redhat.com/solutions/7059376 + # net_value_specs: + # mtu: 1442 + dns_servers: "{{ dns_servers }}" + ntp_servers: "{{ ntp_servers }}" + controller_ssh_pub_key: "{{ controller_ssh_pub_key | default('') }}" + router_external_network: "{{ os_router_external_network | default('public') }}" + floating_ip_network: "{{ os_floating_network | default('public') }}" + controller_params: + image: hotstack-controller + flavor: hotstack.small + ocp_master_params: + image: ipxe-boot-usb + flavor: hotstack.xxlarge + compute_params: + image: CentOS-Stream-GenericCloud-9 + flavor: hotstack.large + networker_params: + image: CentOS-Stream-GenericCloud-9 + flavor: hotstack.small + ironic_params: + image: CentOS-Stream-GenericCloud-9 + cd_image: sushy-tools-blank-image + flavor: hotstack.medium diff --git a/scenarios/secret-rotate/heat_template.yaml b/scenarios/secret-rotate/heat_template.yaml new file mode 100644 index 00000000..f713d940 --- /dev/null +++ b/scenarios/secret-rotate/heat_template.yaml @@ -0,0 +1,1276 @@ +--- +heat_template_version: rocky + +description: > + Heat template to set up infrastructure for openstack-k8s-operators secret-rotate example architecture + +parameters: + dns_servers: + type: comma_delimited_list + default: + - 8.8.8.8 + - 8.8.4.4 + ntp_servers: + type: comma_delimited_list + default: [] + controller_ssh_pub_key: + type: string + dataplane_ssh_pub_key: + type: string + router_external_network: + type: string + default: public + floating_ip_network: + type: string + default: public + net_value_specs: + type: json + default: {} + + controller_params: + type: json + default: + image: hotstack-controller + flavor: hotstack.small + ocp_master_params: + type: json + default: + image: ipxe-boot-usb + flavor: hotstack.xxlarge + ocp_worker_params: + type: json + default: + image: ipxe-boot-usb + flavor: hotstack.xxlarge + compute_params: + type: json + default: + image: CentOS-Stream-GenericCloud-9 + flavor: hotstack.large + networker_params: + type: json + default: + image: CentOS-Stream-GenericCloud-9 + flavor: hotstack.small + bmh_params: + type: json + default: + image: CentOS-Stream-GenericCloud-9 + cd_image: sushy-tools-blank-image + flavor: hotstack.medium + ironic_params: + type: json + default: + image: CentOS-Stream-GenericCloud-9 + cd_image: sushy-tools-blank-image + flavor: hotstack.medium + + +resources: + # + # Networks + # + machine-net: + type: OS::Neutron::Net + properties: + port_security_enabled: false + + ctlplane-net: + type: OS::Neutron::Net + properties: + port_security_enabled: false + + internal-api-net: + type: OS::Neutron::Net + properties: + port_security_enabled: false + + storage-net: + type: OS::Neutron::Net + properties: + port_security_enabled: false + + tenant-net: + type: OS::Neutron::Net + properties: + port_security_enabled: false + + octavia-net: + type: OS::Neutron::Net + properties: + port_security_enabled: false + + ironic-net: + type: OS::Neutron::Net + properties: + port_security_enabled: false + + # + # Subnets + # + machine-subnet: + type: OS::Neutron::Subnet + properties: + network: {get_resource: machine-net} + ip_version: 4 + cidr: 192.168.32.0/20 + enable_dhcp: true + dns_nameservers: + - 192.168.32.3 + + ctlplane-subnet: + type: OS::Neutron::Subnet + properties: + network: {get_resource: ctlplane-net} + ip_version: 4 + cidr: 192.168.122.0/24 + enable_dhcp: false + allocation_pools: [{start: 192.168.122.100, end: 192.168.122.150}] + dns_nameservers: + - 192.168.122.80 + + internal-api-subnet: + type: OS::Neutron::Subnet + properties: + network: {get_resource: internal-api-net} + ip_version: 4 + cidr: 172.17.0.0/24 + enable_dhcp: false + allocation_pools: [{start: 172.17.0.100, end: 172.17.0.150}] + + storage-subnet: + type: OS::Neutron::Subnet + properties: + network: {get_resource: storage-net} + ip_version: 4 + cidr: 172.18.0.0/24 + enable_dhcp: false + allocation_pools: [{start: 172.18.0.100, end: 172.18.0.150}] + + tenant-subnet: + type: OS::Neutron::Subnet + properties: + network: {get_resource: tenant-net} + ip_version: 4 + cidr: 172.19.0.0/24 + enable_dhcp: false + allocation_pools: [{start: 172.19.0.100, end: 172.19.0.150}] + + octavia-subnet: + type: OS::Neutron::Subnet + properties: + network: {get_resource: octavia-net} + ip_version: 4 + cidr: 172.23.0.0/24 + enable_dhcp: false + allocation_pools: [{start: 172.23.0.100, end: 172.23.0.150}] + + ironic-subnet: + type: OS::Neutron::Subnet + properties: + network: {get_resource: ironic-net} + ip_version: 4 + cidr: 172.20.1.0/24 + enable_dhcp: false + allocation_pools: [{start: 172.20.1.100, end: 172.20.1.150}] + + # + # Routers + # + router: + type: OS::Neutron::Router + properties: + admin_state_up: true + external_gateway_info: + network: {get_param: router_external_network} + + machine-net-router-interface: + type: OS::Neutron::RouterInterface + properties: + router: {get_resource: router} + subnet: {get_resource: machine-subnet} + + ctlplane-net-router-interface: + type: OS::Neutron::RouterInterface + properties: + router: {get_resource: router} + subnet: {get_resource: ctlplane-subnet} + + ironic-net-router-interface: + type: OS::Neutron::RouterInterface + properties: + router: {get_resource: router} + subnet: {get_resource: ironic-subnet} + + # + # Instances + # + controller_users: + type: OS::Heat::CloudConfig + properties: + cloud_config: + users: + - default + - name: zuul + gecos: "Zuul user" + sudo: ALL=(ALL) NOPASSWD:ALL + ssh_authorized_keys: + - {get_param: controller_ssh_pub_key} + + # Controller / installer + controller-write-files: + type: OS::Heat::CloudConfig + properties: + cloud_config: + write_files: + - path: /etc/dnsmasq.conf + content: | + # dnsmasq service config + # Include all files in /etc/dnsmasq.d except RPM backup files + conf-dir=/etc/dnsmasq.d,.rpmnew,.rpmsave,.rpmorig + no-resolv + owner: root:dnsmasq + - path: /etc/dnsmasq.d/forwarders.conf + content: + str_replace: + template: | + # DNS forwarders records + server=$dns1 + server=$dns2 + params: + $dns1: {get_param: [dns_servers, 0]} + $dns2: {get_param: [dns_servers, 1]} + owner: root:dnsmasq + - path: /etc/dnsmasq.d/host_records.conf + content: + str_replace: + template: | + # Host records + host-record=controller-0.openstack.lab,$controller0 + host-record=api.ocp.openstack.lab,$api + host-record=api-int.ocp.openstack.lab,$api_int + host-record=master-0.ocp.openstack.lab,$master0 + host-record=master-1.ocp.openstack.lab,$master1 + host-record=master-2.ocp.openstack.lab,$master2 + params: + $controller0: {get_attr: [controller-machine-port, fixed_ips, 0, ip_address]} + $api: {get_attr: [controller-machine-port, fixed_ips, 0, ip_address]} + $api_int: {get_attr: [controller-machine-port, fixed_ips, 0, ip_address]} + $master0: {get_attr: [master0-machine-port, fixed_ips, 0, ip_address]} + $master1: {get_attr: [master1-machine-port, fixed_ips, 0, ip_address]} + $master2: {get_attr: [master2-machine-port, fixed_ips, 0, ip_address]} + owner: root:dnsmasq + - path: /etc/dnsmasq.d/wildcard_records.conf + content: + str_replace: + template: | + # Wildcard records + address=/apps.ocp.openstack.lab/$addr + params: + $addr: {get_attr: [controller-machine-port, fixed_ips, 0, ip_address]} + owner: root:dnsmasq + - path: /etc/resolv.conf + content: | + nameserver: 127.0.0.1 + owner: root:root + - path: /etc/NetworkManager/conf.d/98-rc-manager.conf + content: | + [main] + rc-manager=unmanaged + owner: root:root + - path: /etc/haproxy/haproxy.cfg + content: | + global + log 127.0.0.1 local2 + pidfile /var/run/haproxy.pid + maxconn 4000 + daemon + defaults + mode http + log global + option dontlognull + option http-server-close + option redispatch + retries 3 + timeout http-request 10s + timeout queue 1m + timeout connect 10s + timeout client 1m + timeout server 1m + timeout http-keep-alive 10s + timeout check 10s + maxconn 3000 + listen api-server-6443 + bind *:6443 + mode tcp + server master-0 master-0.ocp.openstack.lab:6443 check inter 1s + server master-1 master-1.ocp.openstack.lab:6443 check inter 1s + server master-2 master-2.ocp.openstack.lab:6443 check inter 1s + listen machine-config-server-22623 + bind *:22623 + mode tcp + server master-0 master-0.ocp.openstack.lab:22623 check inter 1s + server master-1 master-1.ocp.openstack.lab:22623 check inter 1s + server master-2 master-2.ocp.openstack.lab:22623 check inter 1s + listen ingress-router-443 + bind *:443 + mode tcp + balance source + server master-0 master-0.ocp.openstack.lab:443 check inter 1s + server master-1 master-1.ocp.openstack.lab:443 check inter 1s + server master-2 master-2.ocp.openstack.lab:443 check inter 1s + listen ingress-router-80 + bind *:80 + mode tcp + balance source + server master-0 master-0.ocp.openstack.lab:80 check inter 1s + server master-1 master-1.ocp.openstack.lab:80 check inter 1s + server master-2 master-2.ocp.openstack.lab:80 check inter 1s + owner: root:root + + controller-runcmd: + type: OS::Heat::CloudConfig + properties: + cloud_config: + runcmd: + - ['systemctl', 'enable', 'dnsmasq.service'] + - ['systemctl', 'start', 'dnsmasq.service'] + - ['setenforce', 'permissive'] + - ['systemctl', 'enable', 'haproxy.service'] + - ['systemctl', 'start', 'haproxy.service'] + - ['sed', '-i', 's/Listen 80/Listen 8081/g', '/etc/httpd/conf/httpd.conf'] + - ['systemctl', 'enable', 'httpd.service'] + - ['systemctl', 'start', 'httpd.service'] + + controller-init: + type: OS::Heat::MultipartMime + properties: + parts: + - config: {get_resource: controller_users} + - config: {get_resource: controller-write-files} + - config: {get_resource: controller-runcmd} + + controller-machine-port: + type: OS::Neutron::Port + properties: + network: {get_resource: machine-net} + fixed_ips: [{ip_address: 192.168.32.3}] + + controller-floating-ip: + depends_on: machine-net-router-interface + type: OS::Neutron::FloatingIP + properties: + floating_network: {get_param: floating_ip_network} + port_id: {get_resource: controller-machine-port} + + controller: + type: OS::Nova::Server + properties: + image: {get_param: [controller_params, image]} + flavor: {get_param: [controller_params, flavor]} + networks: + - port: {get_resource: controller-machine-port} + user_data_format: RAW + user_data: {get_resource: controller-init} + + # + # OCP Masters + # + + # DHCP Opts value + extra-dhcp-opts-value: + type: OS::Heat::Value + properties: + type: json + value: + extra_dhcp_opts: + - opt_name: "60" + opt_value: "HTTPClient" + ip_version: 4 + - opt_name: "67" + opt_value: + str_replace: + template: http://$server_address:8081/boot-artifacts/agent.x86_64.ipxe + params: + $server_address: {get_attr: [controller-machine-port, fixed_ips, 0, ip_address]} + + master0-machine-port: + type: OS::Neutron::Port + properties: + network: {get_resource: machine-net} + port_security_enabled: false + fixed_ips: [{ip_address: 192.168.34.10}] + value_specs: {get_attr: [extra-dhcp-opts-value, value]} + + master0-ctlplane-trunk-parent-port: + type: OS::Neutron::Port + properties: + network: {get_resource: ctlplane-net} + port_security_enabled: false + fixed_ips: [{ip_address: 192.168.122.10}] + + master0-internal-api-port: + type: OS::Neutron::Port + properties: + network: {get_resource: internal-api-net} + port_security_enabled: false + fixed_ips: [{ip_address: 172.17.0.10}] + + master0-storage-port: + type: OS::Neutron::Port + properties: + network: {get_resource: storage-net} + port_security_enabled: false + fixed_ips: [{ip_address: 172.18.0.10}] + + master0-tenant-port: + type: OS::Neutron::Port + properties: + network: {get_resource: tenant-net} + port_security_enabled: false + fixed_ips: [{ip_address: 172.19.0.10}] + + master0-octavia-port: + type: OS::Neutron::Port + properties: + network: {get_resource: octavia-net} + port_security_enabled: false + fixed_ips: [{ip_address: 172.23.0.10}] + + master0-trunk0: + type: OS::Neutron::Trunk + properties: + port: {get_resource: master0-ctlplane-trunk-parent-port} + sub_ports: + - port: {get_resource: master0-internal-api-port} + segmentation_id: 20 + segmentation_type: vlan + - port: {get_resource: master0-storage-port} + segmentation_id: 21 + segmentation_type: vlan + - port: {get_resource: master0-tenant-port} + segmentation_id: 22 + segmentation_type: vlan + - port: {get_resource: master0-octavia-port} + segmentation_id: 23 + segmentation_type: vlan + + master0-ironic-port: + type: OS::Neutron::Port + properties: + network: {get_resource: ironic-net} + port_security_enabled: false + + master0-lvms-vol0: + type: OS::Cinder::Volume + properties: + size: 20 + + master0-cinder-vol0: + type: OS::Cinder::Volume + properties: + size: 20 + + master0-cinder-vol1: + type: OS::Cinder::Volume + properties: + size: 20 + + master0-cinder-vol2: + type: OS::Cinder::Volume + properties: + size: 20 + + master0: + depends_on: + - master0-lvms-vol0 + - master0-cinder-vol0 + - master0-cinder-vol1 + - master0-cinder-vol2 + type: OS::Nova::Server + properties: + image: {get_param: [ocp_master_params, image]} + flavor: {get_param: [ocp_master_params, flavor]} + block_device_mapping_v2: + - boot_index: -1 + device_type: disk + volume_id: {get_resource: master0-lvms-vol0} + delete_on_termination: true + - boot_index: -1 + device_type: disk + volume_id: {get_resource: master0-cinder-vol0} + delete_on_termination: true + - boot_index: -1 + device_type: disk + volume_id: {get_resource: master0-cinder-vol1} + delete_on_termination: true + - boot_index: -1 + device_type: disk + volume_id: {get_resource: master0-cinder-vol2} + delete_on_termination: true + networks: + - port: {get_resource: master0-machine-port} + - port: {get_attr: [master0-trunk0, port_id]} + - port: {get_resource: master0-ironic-port} + + master1-machine-port: + type: OS::Neutron::Port + properties: + network: {get_resource: machine-net} + port_security_enabled: false + fixed_ips: [{ip_address: 192.168.34.11}] + value_specs: {get_attr: [extra-dhcp-opts-value, value]} + + master1-ctlplane-trunk-parent-port: + type: OS::Neutron::Port + properties: + network: {get_resource: ctlplane-net} + port_security_enabled: false + fixed_ips: [{ip_address: 192.168.122.11}] + + master1-internal-api-port: + type: OS::Neutron::Port + properties: + network: {get_resource: internal-api-net} + port_security_enabled: false + fixed_ips: [{ip_address: 172.17.0.11}] + + master1-storage-port: + type: OS::Neutron::Port + properties: + network: {get_resource: storage-net} + port_security_enabled: false + fixed_ips: [{ip_address: 172.18.0.11}] + + master1-tenant-port: + type: OS::Neutron::Port + properties: + network: {get_resource: tenant-net} + port_security_enabled: false + fixed_ips: [{ip_address: 172.19.0.11}] + + master1-octavia-port: + type: OS::Neutron::Port + properties: + network: {get_resource: octavia-net} + port_security_enabled: false + fixed_ips: [{ip_address: 172.23.0.11}] + + master1-trunk0: + type: OS::Neutron::Trunk + properties: + port: {get_resource: master1-ctlplane-trunk-parent-port} + sub_ports: + - port: {get_resource: master1-internal-api-port} + segmentation_id: 20 + segmentation_type: vlan + - port: {get_resource: master1-storage-port} + segmentation_id: 21 + segmentation_type: vlan + - port: {get_resource: master1-tenant-port} + segmentation_id: 22 + segmentation_type: vlan + - port: {get_resource: master1-octavia-port} + segmentation_id: 23 + segmentation_type: vlan + + master1-ironic-port: + type: OS::Neutron::Port + properties: + network: {get_resource: ironic-net} + port_security_enabled: false + + master1-lvms-vol0: + type: OS::Cinder::Volume + properties: + size: 20 + + master1-cinder-vol0: + type: OS::Cinder::Volume + properties: + size: 20 + + master1-cinder-vol1: + type: OS::Cinder::Volume + properties: + size: 20 + + master1-cinder-vol2: + type: OS::Cinder::Volume + properties: + size: 20 + + master1: + depends_on: + - master1-lvms-vol0 + - master1-cinder-vol0 + - master1-cinder-vol1 + - master1-cinder-vol2 + type: OS::Nova::Server + properties: + image: {get_param: [ocp_master_params, image]} + flavor: {get_param: [ocp_master_params, flavor]} + block_device_mapping_v2: + - boot_index: -1 + device_type: disk + volume_id: {get_resource: master1-lvms-vol0} + delete_on_termination: true + - boot_index: -1 + device_type: disk + volume_id: {get_resource: master1-cinder-vol0} + delete_on_termination: true + - boot_index: -1 + device_type: disk + volume_id: {get_resource: master1-cinder-vol1} + delete_on_termination: true + - boot_index: -1 + device_type: disk + volume_id: {get_resource: master1-cinder-vol2} + delete_on_termination: true + networks: + - port: {get_resource: master1-machine-port} + - port: {get_attr: [master1-trunk0, port_id]} + - port: {get_resource: master1-ironic-port} + + master2-machine-port: + type: OS::Neutron::Port + properties: + network: {get_resource: machine-net} + port_security_enabled: false + fixed_ips: [{ip_address: 192.168.34.12}] + value_specs: {get_attr: [extra-dhcp-opts-value, value]} + + master2-ctlplane-trunk-parent-port: + type: OS::Neutron::Port + properties: + network: {get_resource: ctlplane-net} + port_security_enabled: false + fixed_ips: [{ip_address: 192.168.122.12}] + + master2-internal-api-port: + type: OS::Neutron::Port + properties: + network: {get_resource: internal-api-net} + port_security_enabled: false + fixed_ips: [{ip_address: 172.17.0.12}] + + master2-storage-port: + type: OS::Neutron::Port + properties: + network: {get_resource: storage-net} + port_security_enabled: false + fixed_ips: [{ip_address: 172.18.0.12}] + + master2-tenant-port: + type: OS::Neutron::Port + properties: + network: {get_resource: tenant-net} + port_security_enabled: false + fixed_ips: [{ip_address: 172.19.0.12}] + + master2-octavia-port: + type: OS::Neutron::Port + properties: + network: {get_resource: octavia-net} + port_security_enabled: false + fixed_ips: [{ip_address: 172.23.0.12}] + + master2-trunk0: + type: OS::Neutron::Trunk + properties: + port: {get_resource: master2-ctlplane-trunk-parent-port} + sub_ports: + - port: {get_resource: master2-internal-api-port} + segmentation_id: 20 + segmentation_type: vlan + - port: {get_resource: master2-storage-port} + segmentation_id: 21 + segmentation_type: vlan + - port: {get_resource: master2-tenant-port} + segmentation_id: 22 + segmentation_type: vlan + - port: {get_resource: master2-octavia-port} + segmentation_id: 23 + segmentation_type: vlan + + master2-ironic-port: + type: OS::Neutron::Port + properties: + network: {get_resource: ironic-net} + port_security_enabled: false + + master2-lvms-vol0: + type: OS::Cinder::Volume + properties: + size: 20 + + master2-cinder-vol0: + type: OS::Cinder::Volume + properties: + size: 20 + + master2-cinder-vol1: + type: OS::Cinder::Volume + properties: + size: 20 + + master2-cinder-vol2: + type: OS::Cinder::Volume + properties: + size: 20 + + master2: + depends_on: + - master2-lvms-vol0 + - master2-cinder-vol0 + - master2-cinder-vol1 + - master2-cinder-vol2 + type: OS::Nova::Server + properties: + image: {get_param: [ocp_master_params, image]} + flavor: {get_param: [ocp_master_params, flavor]} + block_device_mapping_v2: + - boot_index: -1 + device_type: disk + volume_id: {get_resource: master2-lvms-vol0} + delete_on_termination: true + - boot_index: -1 + device_type: disk + volume_id: {get_resource: master2-cinder-vol0} + delete_on_termination: true + - boot_index: -1 + device_type: disk + volume_id: {get_resource: master2-cinder-vol1} + delete_on_termination: true + - boot_index: -1 + device_type: disk + volume_id: {get_resource: master2-cinder-vol2} + delete_on_termination: true + networks: + - port: {get_resource: master2-machine-port} + - port: {get_attr: [master2-trunk0, port_id]} + - port: {get_resource: master2-ironic-port} + + # + # Dataplane Nodes + # + dataplane_users: + type: OS::Heat::CloudConfig + properties: + cloud_config: + users: + - default + - name: cloud-admin + gecos: "Cloud Admin User" + sudo: ALL=(ALL) NOPASSWD:ALL + ssh_authorized_keys: + - {get_param: dataplane_ssh_pub_key} + + # compute0 + compute0_hostname: + type: OS::Heat::CloudConfig + properties: + cloud_config: + hostname: edpm-compute-0 + fqdn: edpm-compute-0.ctlplane.openstack.lab + + compute0_init: + type: OS::Heat::MultipartMime + properties: + parts: + - config: {get_resource: dataplane_users} + - config: {get_resource: compute0_hostname} + + compute0-ctlplane-trunk-parent-port: + type: OS::Neutron::Port + properties: + network: {get_resource: ctlplane-net} + port_security_enabled: false + fixed_ips: [{ip_address: 192.168.122.100}] + + compute0-internal-api-port: + type: OS::Neutron::Port + properties: + network: {get_resource: internal-api-net} + port_security_enabled: false + fixed_ips: [{ip_address: 172.17.0.100}] + + compute0-storage-port: + type: OS::Neutron::Port + properties: + network: {get_resource: storage-net} + port_security_enabled: false + fixed_ips: [{ip_address: 172.18.0.100}] + + compute0-tenant-port: + type: OS::Neutron::Port + properties: + network: {get_resource: tenant-net} + port_security_enabled: false + fixed_ips: [{ip_address: 172.19.0.100}] + + compute0-trunk0: + type: OS::Neutron::Trunk + properties: + port: {get_resource: compute0-ctlplane-trunk-parent-port} + sub_ports: + - port: {get_resource: compute0-internal-api-port} + segmentation_id: 20 + segmentation_type: vlan + - port: {get_resource: compute0-storage-port} + segmentation_id: 21 + segmentation_type: vlan + - port: {get_resource: compute0-tenant-port} + segmentation_id: 22 + segmentation_type: vlan + + compute0: + type: OS::Nova::Server + properties: + image: {get_param: [compute_params, image]} + flavor: {get_param: [compute_params, flavor]} + networks: + - port: {get_attr: [compute0-trunk0, port_id]} + user_data_format: RAW + user_data: {get_resource: compute0_init} + config_drive: true + + # compute1 + compute1_hostname: + type: OS::Heat::CloudConfig + properties: + cloud_config: + hostname: edpm-compute-1 + fqdn: edpm-compute-1.ctlplane.openstack.lab + + compute1_init: + type: OS::Heat::MultipartMime + properties: + parts: + - config: {get_resource: dataplane_users} + - config: {get_resource: compute1_hostname} + + compute1-ctlplane-trunk-parent-port: + type: OS::Neutron::Port + properties: + network: {get_resource: ctlplane-net} + port_security_enabled: false + fixed_ips: [{ip_address: 192.168.122.101}] + + compute1-internal-api-port: + type: OS::Neutron::Port + properties: + network: {get_resource: internal-api-net} + port_security_enabled: false + fixed_ips: [{ip_address: 172.17.0.101}] + + compute1-storage-port: + type: OS::Neutron::Port + properties: + network: {get_resource: storage-net} + port_security_enabled: false + fixed_ips: [{ip_address: 172.18.0.101}] + + compute1-tenant-port: + type: OS::Neutron::Port + properties: + network: {get_resource: tenant-net} + port_security_enabled: false + fixed_ips: [{ip_address: 172.19.0.101}] + + compute1-trunk0: + type: OS::Neutron::Trunk + properties: + port: {get_resource: compute1-ctlplane-trunk-parent-port} + sub_ports: + - port: {get_resource: compute1-internal-api-port} + segmentation_id: 20 + segmentation_type: vlan + - port: {get_resource: compute1-storage-port} + segmentation_id: 21 + segmentation_type: vlan + - port: {get_resource: compute1-tenant-port} + segmentation_id: 22 + segmentation_type: vlan + + compute1: + type: OS::Nova::Server + properties: + image: {get_param: [compute_params, image]} + flavor: {get_param: [compute_params, flavor]} + networks: + - port: {get_attr: [compute1-trunk0, port_id]} + user_data_format: RAW + user_data: {get_resource: compute1_init} + config_drive: true + + # + # Networkers + # + networker_init: + type: OS::Heat::MultipartMime + properties: + parts: + - config: {get_resource: dataplane_users} + + # networker0 + networker0-ctlplane-trunk-parent-port: + type: OS::Neutron::Port + properties: + network: {get_resource: ctlplane-net} + port_security_enabled: false + fixed_ips: [{ip_address: 192.168.122.105}] + + networker0-internal-api-port: + type: OS::Neutron::Port + properties: + network: {get_resource: internal-api-net} + port_security_enabled: false + fixed_ips: [{ip_address: 172.17.0.105}] + + networker0-tenant-port: + type: OS::Neutron::Port + properties: + network: {get_resource: tenant-net} + port_security_enabled: false + fixed_ips: [{ip_address: 172.19.0.105}] + + networker0-trunk0: + type: OS::Neutron::Trunk + properties: + port: {get_resource: networker0-ctlplane-trunk-parent-port} + sub_ports: + - port: {get_resource: networker0-internal-api-port} + segmentation_id: 20 + segmentation_type: vlan + - port: {get_resource: networker0-tenant-port} + segmentation_id: 22 + segmentation_type: vlan + + networker0: + type: OS::Nova::Server + properties: + image: {get_param: [networker_params, image]} + flavor: {get_param: [networker_params, flavor]} + networks: + - port: {get_attr: [networker0-trunk0, port_id]} + user_data_format: RAW + user_data: {get_resource: networker_init} + config_drive: true + + # networker1 + networker1-ctlplane-trunk-parent-port: + type: OS::Neutron::Port + properties: + network: {get_resource: ctlplane-net} + port_security_enabled: false + fixed_ips: [{ip_address: 192.168.122.106}] + + networker1-internal-api-port: + type: OS::Neutron::Port + properties: + network: {get_resource: internal-api-net} + port_security_enabled: false + fixed_ips: [{ip_address: 172.17.0.106}] + + networker1-tenant-port: + type: OS::Neutron::Port + properties: + network: {get_resource: tenant-net} + port_security_enabled: false + fixed_ips: [{ip_address: 172.19.0.106}] + + networker1-trunk0: + type: OS::Neutron::Trunk + properties: + port: {get_resource: networker1-ctlplane-trunk-parent-port} + sub_ports: + - port: {get_resource: networker1-internal-api-port} + segmentation_id: 20 + segmentation_type: vlan + - port: {get_resource: networker1-tenant-port} + segmentation_id: 22 + segmentation_type: vlan + + networker1: + type: OS::Nova::Server + properties: + image: {get_param: [networker_params, image]} + flavor: {get_param: [networker_params, flavor]} + networks: + - port: {get_attr: [networker1-trunk0, port_id]} + user_data_format: RAW + user_data: {get_resource: networker_init} + config_drive: true + + # networker2 + networker2-ctlplane-trunk-parent-port: + type: OS::Neutron::Port + properties: + network: {get_resource: ctlplane-net} + port_security_enabled: false + fixed_ips: [{ip_address: 192.168.122.107}] + + networker2-internal-api-port: + type: OS::Neutron::Port + properties: + network: {get_resource: internal-api-net} + port_security_enabled: false + fixed_ips: [{ip_address: 172.17.0.107}] + + networker2-tenant-port: + type: OS::Neutron::Port + properties: + network: {get_resource: tenant-net} + port_security_enabled: false + fixed_ips: [{ip_address: 172.19.0.107}] + + networker2-trunk0: + type: OS::Neutron::Trunk + properties: + port: {get_resource: networker2-ctlplane-trunk-parent-port} + sub_ports: + - port: {get_resource: networker2-internal-api-port} + segmentation_id: 20 + segmentation_type: vlan + - port: {get_resource: networker2-tenant-port} + segmentation_id: 22 + segmentation_type: vlan + + networker2: + type: OS::Nova::Server + properties: + image: {get_param: [networker_params, image]} + flavor: {get_param: [networker_params, flavor]} + networks: + - port: {get_attr: [networker2-trunk0, port_id]} + user_data_format: RAW + user_data: {get_resource: networker_init} + config_drive: true + +outputs: + controller_floating_ip: + description: Controller Floating IP + value: {get_attr: [controller-floating-ip, floating_ip_address]} + + sushy_emulator_uuids: + description: UUIDs of instances to manage with sushy-tools - RedFish virtual BMC + value: {} + + ocp_install_config: + description: OCP install-config.yaml + value: + apiVersion: v1 + baseDomain: openstack.lab + controlPlane: + architecture: amd64 + hyperthreading: Disabled + name: master + replicas: 3 + compute: + - architecture: amd64 + hyperthreading: Disabled + name: worker + replicas: 0 + metadata: + name: ocp + networking: + clusterNetwork: + - cidr: 10.128.0.0/14 + hostPrefix: 23 + machineNetwork: + - cidr: {get_attr: [machine-subnet, cidr]} + serviceNetwork: + - 172.30.0.0/16 + networkType: OVNKubernetes + platform: + none: {} + pullSecret: _replaced_ + sshKey: {get_param: dataplane_ssh_pub_key} + + ocp_agent_config: + description: OCP agent-config.yaml + value: + apiVersion: v1beta1 + kind: AgentConfig + metadata: + name: ocp + rendezvousIP: {get_attr: [master0-machine-port, fixed_ips, 0, ip_address]} + bootArtifactsBaseURL: + str_replace: + template: http://$server_address:8081/boot-artifacts + params: + $server_address: {get_attr: [controller-machine-port, fixed_ips, 0, ip_address]} + additionalNTPSources: {get_param: ntp_servers} + hosts: + - hostname: master-0 + role: master + interfaces: + - name: eth0 + macAddress: {get_attr: [master0-machine-port, mac_address]} + - name: eth1 + macAddress: {get_attr: [master0-ctlplane-trunk-parent-port, mac_address]} + - name: eth2 + macAddress: {get_attr: [master0-ironic-port, mac_address]} + rootDeviceHints: + deviceName: /dev/vda + networkConfig: + interfaces: + - name: eth0 + type: ethernet + state: up + mac-address: {get_attr: [master0-machine-port, mac_address]} + ipv4: + enabled: true + dhcp: true + ipv6: + enabled: false + - name: eth1 + type: ethernet + state: down + mac-address: {get_attr: [master0-ctlplane-trunk-parent-port, mac_address]} + - name: eth2 + type: ethernet + state: down + mac-address: {get_attr: [master0-ironic-port, mac_address]} + - hostname: master-1 + role: master + interfaces: + - name: eth0 + macAddress: {get_attr: [master1-machine-port, mac_address]} + - name: eth1 + macAddress: {get_attr: [master1-ctlplane-trunk-parent-port, mac_address]} + - name: eth2 + macAddress: {get_attr: [master1-ironic-port, mac_address]} + rootDeviceHints: + deviceName: /dev/vda + networkConfig: + interfaces: + - name: eth0 + type: ethernet + state: up + mac-address: {get_attr: [master1-machine-port, mac_address]} + ipv4: + enabled: true + dhcp: true + ipv6: + enabled: false + - name: eth1 + type: ethernet + state: down + mac-address: {get_attr: [master1-ctlplane-trunk-parent-port, mac_address]} + - name: eth2 + type: ethernet + state: down + mac-address: {get_attr: [master1-ironic-port, mac_address]} + - hostname: master-2 + role: master + interfaces: + - name: eth0 + macAddress: {get_attr: [master2-machine-port, mac_address]} + - name: eth1 + macAddress: {get_attr: [master2-ctlplane-trunk-parent-port, mac_address]} + - name: eth2 + macAddress: {get_attr: [master2-ironic-port, mac_address]} + rootDeviceHints: + deviceName: /dev/vda + networkConfig: + interfaces: + - name: eth0 + type: ethernet + state: up + mac-address: {get_attr: [master2-machine-port, mac_address]} + ipv4: + enabled: true + dhcp: true + ipv6: + enabled: false + - name: eth1 + type: ethernet + state: down + mac-address: {get_attr: [master2-ctlplane-trunk-parent-port, mac_address]} + - name: eth2 + type: ethernet + state: down + mac-address: {get_attr: [master2-ironic-port, mac_address]} + + controller_ansible_host: + description: > + Controller ansible host, this struct can be passed to the ansible.builtin.add_host module + value: + name: controller-0 + ansible_ssh_user: zuul + ansible_host: {get_attr: [controller-floating-ip, floating_ip_address]} + ansible_ssh_common_args: '-o StrictHostKeyChecking=no' + groups: controllers + + ansible_inventory: + description: Ansible inventory + value: + all: + children: + controllers: + vars: + ocps: + vars: + computes: + vars: + networkers: + vars: + localhosts: + hosts: + localhost: + ansible_connection: local + controllers: + hosts: + controller0: + ansible_host: {get_attr: [controller-machine-port, fixed_ips, 0, ip_address]} + ansible_user: zuul + ansible_ssh_common_args: '-o StrictHostKeyChecking=no' + ansible_ssh_private_key_file: '~/.ssh/id_rsa' + ocps: + hosts: + master0: + ansible_host: {get_attr: [master0-machine-port, fixed_ips, 0, ip_address]} + ansible_user: core + ansible_ssh_common_args: '-o StrictHostKeyChecking=no' + ansible_ssh_private_key_file: '~/.ssh/id_rsa' + master1: + ansible_host: {get_attr: [master1-machine-port, fixed_ips, 0, ip_address]} + ansible_user: core + ansible_ssh_common_args: '-o StrictHostKeyChecking=no' + ansible_ssh_private_key_file: '~/.ssh/id_rsa' + master2: + ansible_host: {get_attr: [master1-machine-port, fixed_ips, 0, ip_address]} + ansible_user: core + ansible_ssh_common_args: '-o StrictHostKeyChecking=no' + ansible_ssh_private_key_file: '~/.ssh/id_rsa' + computes: + hosts: + compute0: + ansible_host: {get_attr: [compute0-ctlplane-trunk-parent-port, fixed_ips, 0, ip_address]} + ansible_user: cloud-admin + ansible_ssh_common_args: '-o StrictHostKeyChecking=no' + ansible_ssh_private_key_file: '~/.ssh/id_rsa' + compute1: + ansible_host: {get_attr: [compute1-ctlplane-trunk-parent-port, fixed_ips, 0, ip_address]} + ansible_user: cloud-admin + ansible_ssh_common_args: '-o StrictHostKeyChecking=no' + ansible_ssh_private_key_file: '~/.ssh/id_rsa' + networkers: + hosts: + networker0: + ansible_host: {get_attr: [networker0-ctlplane-trunk-parent-port, fixed_ips, 0, ip_address]} + ansible_user: cloud-admin + ansible_ssh_common_args: '-o StrictHostKeyChecking=no' + ansible_ssh_private_key_file: '~/.ssh/id_rsa' + networker1: + ansible_host: {get_attr: [networker1-ctlplane-trunk-parent-port, fixed_ips, 0, ip_address]} + ansible_user: cloud-admin + ansible_ssh_common_args: '-o StrictHostKeyChecking=no' + ansible_ssh_private_key_file: '~/.ssh/id_rsa' + networker2: + ansible_host: {get_attr: [networker2-ctlplane-trunk-parent-port, fixed_ips, 0, ip_address]} + ansible_user: cloud-admin + ansible_ssh_common_args: '-o StrictHostKeyChecking=no' + ansible_ssh_private_key_file: '~/.ssh/id_rsa' diff --git a/scenarios/secret-rotate/manifests/control-plane/control-plane.yaml b/scenarios/secret-rotate/manifests/control-plane/control-plane.yaml new file mode 100644 index 00000000..2f3987b6 --- /dev/null +++ b/scenarios/secret-rotate/manifests/control-plane/control-plane.yaml @@ -0,0 +1,922 @@ +--- +apiVersion: v1 +data: + server-ca-passphrase: MTIzNDU2Nzg= +kind: Secret +metadata: + name: octavia-ca-passphrase + namespace: openstack +type: Opaque +--- +apiVersion: v1 +data: + AdminPassword: MTIzNDU2Nzg= + AodhDatabasePassword: MTIzNDU2Nzg= + AodhPassword: MTIzNDU2Nzg= + BarbicanDatabasePassword: MTIzNDU2Nzg= + BarbicanPassword: MTIzNDU2Nzg= + BarbicanSimpleCryptoKEK: r0wDZ1zrD5upafX9RDfYqvDkW2LENBWH7Gz9+Tr3NdM= + CeilometerPassword: MTIzNDU2Nzg= + CinderDatabasePassword: MTIzNDU2Nzg= + CinderPassword: MTIzNDU2Nzg= + DatabasePassword: MTIzNDU2Nzg= + DbRootPassword: MTIzNDU2Nzg= + DesignateDatabasePassword: MTIzNDU2Nzg= + DesignatePassword: MTIzNDU2Nzg= + GlanceDatabasePassword: MTIzNDU2Nzg= + GlancePassword: MTIzNDU2Nzg= + HeatAuthEncryptionKey: NzY3YzNlZDA1NmNiYWEzYjlkZmVkYjhjNmY4MjViZjA= + HeatDatabasePassword: MTIzNDU2Nzg= + HeatPassword: MTIzNDU2Nzg= + IronicDatabasePassword: MTIzNDU2Nzg= + IronicInspectorDatabasePassword: MTIzNDU2Nzg= + IronicInspectorPassword: MTIzNDU2Nzg= + IronicPassword: MTIzNDU2Nzg= + KeystoneDatabasePassword: MTIzNDU2Nzg= + ManilaDatabasePassword: MTIzNDU2Nzg= + ManilaPassword: MTIzNDU2Nzg= + MetadataSecret: MTIzNDU2Nzg0Mg== + NeutronDatabasePassword: MTIzNDU2Nzg= + NeutronPassword: MTIzNDU2Nzg= + NovaAPIDatabasePassword: MTIzNDU2Nzg= + NovaCell0DatabasePassword: MTIzNDU2Nzg= + NovaCell1DatabasePassword: MTIzNDU2Nzg= + NovaPassword: MTIzNDU2Nzg= + OctaviaDatabasePassword: MTIzNDU2Nzg= + OctaviaHeartbeatKey: MTIzNDU2Nzg= + OctaviaPassword: MTIzNDU2Nzg= + PlacementDatabasePassword: MTIzNDU2Nzg= + PlacementPassword: MTIzNDU2Nzg= + SwiftPassword: MTIzNDU2Nzg= +kind: Secret +metadata: + name: osp-secret + namespace: openstack +type: Opaque +--- +apiVersion: core.openstack.org/v1beta1 +kind: OpenStackControlPlane +metadata: + name: controlplane + namespace: openstack +spec: + barbican: + enabled: true + apiOverride: + route: {} + template: + barbicanAPI: + override: + service: + internal: + metadata: + annotations: + metallb.universe.tf/address-pool: internalapi + metallb.universe.tf/allow-shared-ip: internalapi + metallb.universe.tf/loadBalancerIPs: 172.17.0.80 + spec: + type: LoadBalancer + replicas: 3 + barbicanKeystoneListener: + replicas: 1 + barbicanWorker: + replicas: 3 + databaseInstance: openstack + preserveJobs: false + secret: osp-secret + ceilometer: + template: + passwordSelector: + service: CeilometerPassword + secret: osp-secret + serviceUser: ceilometer + cinder: + apiOverride: + route: + haproxy.router.openshift.io/timeout: 60s + template: + cinderAPI: + override: + service: + internal: + metadata: + annotations: + metallb.universe.tf/address-pool: internalapi + metallb.universe.tf/allow-shared-ip: internalapi + metallb.universe.tf/loadBalancerIPs: 172.17.0.80 + spec: + type: LoadBalancer + replicas: 3 + cinderBackup: + customServiceConfig: | + [DEFAULT] + backup_driver = cinder.backup.drivers.swift.SwiftBackupDriver + networkAttachments: + - storage + replicas: 1 + cinderScheduler: + replicas: 1 + cinderVolumes: + lvm-iscsi: + customServiceConfig: | + [lvm] + image_volume_cache_enabled = false + volume_driver = cinder.volume.drivers.lvm.LVMVolumeDriver + volume_group = cinder-volumes + target_protocol = iscsi + target_helper = lioadm + volume_backend_name = lvm_iscsi + target_ip_address=172.18.0.10 + target_secondary_ip_addresses = 172.19.0.10 + nodeSelector: + openstack.org/cinder-lvm: "" + replicas: 1 + customServiceConfig: | + # Debug logs by default, jobs can override as needed. + [DEFAULT] + debug = true + databaseInstance: openstack + preserveJobs: false + secret: osp-secret + uniquePodNames: true + designate: + template: + preserveJobs: false + dns: + template: + options: + - key: server + values: + - 192.168.32.3 + override: + service: + metadata: + annotations: + metallb.universe.tf/address-pool: ctlplane + metallb.universe.tf/allow-shared-ip: ctlplane + metallb.universe.tf/loadBalancerIPs: 192.168.122.80 + spec: + type: LoadBalancer + replicas: 2 + galera: + enabled: true + templates: + openstack: + replicas: 3 + secret: osp-secret + storageRequest: 5G + openstack-cell1: + replicas: 3 + secret: osp-secret + storageRequest: 5G + glance: + apiOverrides: + default: + route: + haproxy.router.openshift.io/timeout: 60s + template: + customServiceConfig: | + [DEFAULT] + debug = True + enabled_backends = default_backend:swift + + [glance_store] + default_backend = default_backend + + [default_backend] + swift_store_create_container_on_put = True + swift_store_auth_version = 3 + swift_store_auth_address = {{ .KeystoneInternalURL }} + swift_store_endpoint_type = internalURL + swift_store_user = service:glance + swift_store_key = {{ .ServicePassword }} + databaseInstance: openstack + glanceAPIs: + default: + networkAttachments: + - storage + override: + service: + internal: + metadata: + annotations: + metallb.universe.tf/address-pool: internalapi + metallb.universe.tf/allow-shared-ip: internalapi + metallb.universe.tf/loadBalancerIPs: 172.17.0.80 + spec: + type: LoadBalancer + replicas: 3 + preserveJobs: false + storage: + storageClass: lvms-local-storage + storageRequest: 10G + uniquePodNames: true + heat: + apiOverride: + route: {} + cnfAPIOverride: + route: {} + enabled: true + template: + customServiceConfig: | + [clients_heat] + endpoint_type: public + insecure: true + databaseInstance: openstack + heatAPI: + override: + service: + internal: + metadata: + annotations: + metallb.universe.tf/address-pool: internalapi + metallb.universe.tf/allow-shared-ip: internalapi + metallb.universe.tf/loadBalancerIPs: 172.17.0.80 + spec: + type: LoadBalancer + replicas: 1 + heatEngine: + override: + service: + internal: + metadata: + annotations: + metallb.universe.tf/address-pool: internalapi + metallb.universe.tf/allow-shared-ip: internalapi + metallb.universe.tf/loadBalancerIPs: 172.17.0.80 + spec: + type: LoadBalancer + replicas: 1 + preserveJobs: false + secret: osp-secret + horizon: + apiOverride: + route: {} + enabled: false + template: + preserveJobs: false + replicas: 1 + secret: osp-secret + ironic: + enabled: true + template: + databaseInstance: openstack + ironicAPI: + override: + service: + internal: + metadata: + annotations: + metallb.universe.tf/address-pool: ctlplane + metallb.universe.tf/allow-shared-ip: ctlplane + metallb.universe.tf/loadBalancerIPs: 192.168.122.80 + spec: + type: LoadBalancer + replicas: 1 + ironicConductors: + - customServiceConfig: | + [conductor] + power_state_change_timeout = 120 + + [pxe] + kernel_append_params = console=ttyS0 + + [neutron] + cleaning_network = provisioning + provisioning_network = provisioning + rescuing_network = provisioning + inspection_network = provisioning + networkAttachments: + - ironic + provisionNetwork: ironic + replicas: 1 + storageRequest: 10G + ironicInspector: + customServiceConfig: | + [capabilities] + boot_mode = true + + [processing] + update_pxe_enabled = false + inspectionNetwork: ironic + networkAttachments: + - ironic + override: + service: + internal: + metadata: + annotations: + metallb.universe.tf/address-pool: ctlplane + metallb.universe.tf/allow-shared-ip: ctlplane + metallb.universe.tf/loadBalancerIPs: 192.168.122.80 + spec: + type: LoadBalancer + preserveJobs: false + replicas: 0 + ironicNeutronAgent: + replicas: 1 + preserveJobs: false + rpcTransport: oslo + secret: osp-secret + keystone: + apiOverride: + route: {} + template: + databaseInstance: openstack + override: + service: + internal: + metadata: + annotations: + metallb.universe.tf/address-pool: internalapi + metallb.universe.tf/allow-shared-ip: internalapi + metallb.universe.tf/loadBalancerIPs: 172.17.0.80 + spec: + type: LoadBalancer + preserveJobs: false + replicas: 3 + secret: osp-secret + manila: + apiOverride: + route: + haproxy.router.openshift.io/timeout: 60s + enabled: false + template: + manilaAPI: + networkAttachments: + - internalapi + override: + service: + internal: + metadata: + annotations: + metallb.universe.tf/address-pool: internalapi + metallb.universe.tf/allow-shared-ip: internalapi + metallb.universe.tf/loadBalancerIPs: 172.17.0.80 + spec: + type: LoadBalancer + replicas: 1 + manilaScheduler: + replicas: 1 + manilaShares: + share1: + networkAttachments: + - storage + replicas: 1 + preserveJobs: false + memcached: + templates: + memcached: + replicas: 3 + neutron: + apiOverride: + route: {} + template: + customServiceConfig: | + [DEFAULT] + vlan_transparent = true + agent_down_time = 600 + router_distributed = true + router_scheduler_driver = neutron.scheduler.l3_agent_scheduler.ChanceScheduler + allow_automatic_l3agent_failover = true + debug = true + default_availability_zones = zone-1,zone-2 + + [agent] + report_interval = 300 + + [database] + max_retries = -1 + db_max_retries = -1 + + [keystone_authtoken] + region_name = regionOne + memcache_use_advanced_pool = True + + [oslo_messaging_notifications] + driver = noop + + [oslo_middleware] + enable_proxy_headers_parsing = true + + [oslo_policy] + policy_file = /etc/neutron/policy.yaml + + [ovs] + igmp_snooping_enable = true + + [ovn] + ovsdb_probe_interval = 60000 + ovn_emit_need_to_frag = true + + [ml2] + type_drivers = geneve,vxlan,vlan,flat,local + tenant_network_types = geneve,flat + databaseInstance: openstack + networkAttachments: + - internalapi + override: + service: + internal: + metadata: + annotations: + metallb.universe.tf/address-pool: internalapi + metallb.universe.tf/allow-shared-ip: internalapi + metallb.universe.tf/loadBalancerIPs: 172.17.0.80 + spec: + type: LoadBalancer + preserveJobs: false + replicas: 3 + secret: osp-secret + nova: + apiOverride: + route: {} + template: + apiServiceTemplate: + override: + service: + internal: + metadata: + annotations: + metallb.universe.tf/address-pool: internalapi + metallb.universe.tf/allow-shared-ip: internalapi + metallb.universe.tf/loadBalancerIPs: 172.17.0.80 + spec: + type: LoadBalancer + replicas: 3 + cellTemplates: + cell0: + cellDatabaseAccount: nova-cell0 + cellDatabaseInstance: openstack + cellMessageBusInstance: rabbitmq + hasAPIAccess: true + cell1: + cellDatabaseAccount: nova-cell1 + cellDatabaseInstance: openstack-cell1 + cellMessageBusInstance: rabbitmq-cell1 + hasAPIAccess: true + novaComputeTemplates: + compute-ironic: + computeDriver: ironic.IronicDriver + metadataServiceTemplate: + override: + service: + metadata: + annotations: + metallb.universe.tf/address-pool: internalapi + metallb.universe.tf/allow-shared-ip: internalapi + metallb.universe.tf/loadBalancerIPs: 172.17.0.80 + spec: + type: LoadBalancer + replicas: 3 + preserveJobs: false + schedulerServiceTemplate: + replicas: 3 + secret: osp-secret + octavia: + enabled: true + template: + amphoraImageContainerImage: quay.io/gthiemonge/octavia-amphora-image + apacheContainerImage: registry.redhat.io/ubi9/httpd-24:latest + databaseInstance: openstack + lbMgmtNetwork: + availabilityZones: + - zone-1 + octaviaAPI: + networkAttachments: + - internalapi + preserveJobs: false + replicas: 1 + octaviaHealthManager: + networkAttachments: + - octavia + octaviaHousekeeping: + networkAttachments: + - octavia + octaviaWorker: + networkAttachments: + - octavia + preserveJobs: false + secret: osp-secret + ovn: + template: + ovnController: + external-ids: + availability-zones: + - zone-1 + networkAttachment: tenant + nicMappings: + datacentre: ocpbr + ironic: ironic + octavia: octbr + ovnDBCluster: + ovndbcluster-nb: + dbType: NB + networkAttachment: internalapi + replicas: 3 + storageRequest: 10G + ovndbcluster-sb: + dbType: SB + networkAttachment: internalapi + replicas: 3 + storageRequest: 10G + ovnNorthd: + logLevel: info + nThreads: 1 + replicas: 1 + resources: {} + tls: {} + placement: + apiOverride: + route: {} + template: + databaseInstance: openstack + override: + service: + internal: + metadata: + annotations: + metallb.universe.tf/address-pool: internalapi + metallb.universe.tf/allow-shared-ip: internalapi + metallb.universe.tf/loadBalancerIPs: 172.17.0.80 + spec: + type: LoadBalancer + preserveJobs: false + replicas: 3 + secret: osp-secret + rabbitmq: + templates: + rabbitmq: + override: + service: + metadata: + annotations: + metallb.universe.tf/address-pool: internalapi + metallb.universe.tf/loadBalancerIPs: 172.17.0.85 + spec: + type: LoadBalancer + replicas: 3 + rabbitmq-cell1: + override: + service: + metadata: + annotations: + metallb.universe.tf/address-pool: internalapi + metallb.universe.tf/loadBalancerIPs: 172.17.0.86 + spec: + type: LoadBalancer + replicas: 3 + secret: osp-secret + storageClass: lvms-local-storage + swift: + enabled: true + proxyOverride: + route: {} + template: + swiftProxy: + override: + service: + internal: + metadata: + annotations: + metallb.universe.tf/address-pool: internalapi + metallb.universe.tf/allow-shared-ip: internalapi + metallb.universe.tf/loadBalancerIPs: 172.17.0.80 + spec: + type: LoadBalancer + replicas: 1 + swiftRing: + ringReplicas: 1 + swiftStorage: + replicas: 1 + telemetry: + enabled: true + template: + autoscaling: + aodh: + databaseInstance: openstack + memcachedInstance: memcached + passwordSelectors: null + preserveJobs: false + secret: osp-secret + enabled: true + heatInstance: heat + ceilometer: + enabled: true + secret: osp-secret + logging: + enabled: false + port: 10514 + metricStorage: + enabled: true + monitoringStack: + alertingEnabled: true + scrapeInterval: 30s + storage: + persistent: + pvcStorageClass: lvms-local-storage + pvcStorageRequest: 10Gi + retention: 24h + strategy: persistent +--- +apiVersion: k8s.cni.cncf.io/v1 +kind: NetworkAttachmentDefinition +metadata: + labels: + osp/net: ctlplane + osp/net-attach-def-type: standard + name: ctlplane + namespace: openstack +spec: + config: | + { + "cniVersion": "0.3.1", + "name": "ctlplane", + "type": "macvlan", + "master": "ospbr", + "ipam": { + "type": "whereabouts", + "range": "192.168.122.0/24", + "range_start": "192.168.122.30", + "range_end": "192.168.122.70" + } + } +--- +apiVersion: k8s.cni.cncf.io/v1 +kind: NetworkAttachmentDefinition +metadata: + labels: + osp/net: datacentre + osp/net-attach-def-type: standard + name: datacentre + namespace: openstack +spec: + config: | + { + "cniVersion": "0.3.1", + "name": "datacentre", + "type": "bridge", + "bridge": "ospbr", + "ipam": {} + } +--- +apiVersion: k8s.cni.cncf.io/v1 +kind: NetworkAttachmentDefinition +metadata: + labels: + osp/net: internalapi + osp/net-attach-def-type: standard + name: internalapi + namespace: openstack +spec: + config: | + { + "cniVersion": "0.3.1", + "name": "internalapi", + "type": "macvlan", + "master": "internalapi", + "ipam": { + "type": "whereabouts", + "range": "172.17.0.0/24", + "range_start": "172.17.0.30", + "range_end": "172.17.0.70" + } + } +--- +apiVersion: k8s.cni.cncf.io/v1 +kind: NetworkAttachmentDefinition +metadata: + labels: + osp/net: ironic + osp/net-attach-def-type: standard + name: ironic + namespace: openstack +spec: + config: | + { + "cniVersion": "0.3.1", + "name": "ironic", + "type": "bridge", + "bridge": "ironic", + "mtu": 1442, + "ipam": { + "type": "whereabouts", + "range": "172.20.1.0/24", + "range_start": "172.20.1.30", + "range_end": "172.20.1.70" + } + } +--- +apiVersion: k8s.cni.cncf.io/v1 +kind: NetworkAttachmentDefinition +metadata: + labels: + osp/net: octavia + osp/net-attach-def-type: standard + name: octavia + namespace: openstack +spec: + config: | + { + "cniVersion": "0.3.1", + "name": "octavia", + "type": "bridge", + "bridge": "octbr", + "mtu": 1442, + "ipam": { + "type": "whereabouts", + "range": "172.23.0.0/24", + "range_start": "172.23.0.30", + "range_end": "172.23.0.70", + "routes": [ + { + "dst": "172.24.0.0/16", + "gw": "172.23.0.150" + } + ] + } + } +--- +apiVersion: k8s.cni.cncf.io/v1 +kind: NetworkAttachmentDefinition +metadata: + labels: + osp/net: storage + osp/net-attach-def-type: standard + name: storage + namespace: openstack +spec: + config: | + { + "cniVersion": "0.3.1", + "name": "storage", + "type": "macvlan", + "master": "storage", + "ipam": { + "type": "whereabouts", + "range": "172.18.0.0/24", + "range_start": "172.18.0.30", + "range_end": "172.18.0.70" + } + } +--- +apiVersion: k8s.cni.cncf.io/v1 +kind: NetworkAttachmentDefinition +metadata: + labels: + osp/net: tenant + osp/net-attach-def-type: standard + name: tenant + namespace: openstack +spec: + config: | + { + "cniVersion": "0.3.1", + "name": "tenant", + "type": "macvlan", + "master": "tenant", + "ipam": { + "type": "whereabouts", + "range": "172.19.0.0/24", + "range_start": "172.19.0.30", + "range_end": "172.19.0.70" + } + } +--- +apiVersion: metallb.io/v1beta1 +kind: IPAddressPool +metadata: + labels: + osp/lb-addresses-type: standard + name: ctlplane + namespace: metallb-system +spec: + addresses: + - 192.168.122.80-192.168.122.90 +--- +apiVersion: metallb.io/v1beta1 +kind: IPAddressPool +metadata: + labels: + osp/lb-addresses-type: standard + name: internalapi + namespace: metallb-system +spec: + addresses: + - 172.17.0.80-172.17.0.90 +--- +apiVersion: metallb.io/v1beta1 +kind: IPAddressPool +metadata: + labels: + osp/lb-addresses-type: standard + name: storage + namespace: metallb-system +spec: + addresses: + - 172.18.0.80-172.18.0.90 +--- +apiVersion: metallb.io/v1beta1 +kind: IPAddressPool +metadata: + labels: + osp/lb-addresses-type: standard + name: tenant + namespace: metallb-system +spec: + addresses: + - 172.19.0.80-172.19.0.90 +--- +apiVersion: metallb.io/v1beta1 +kind: L2Advertisement +metadata: + name: ctlplane + namespace: metallb-system +spec: + interfaces: + - ospbr + ipAddressPools: + - ctlplane +--- +apiVersion: metallb.io/v1beta1 +kind: L2Advertisement +metadata: + name: internalapi + namespace: metallb-system +spec: + interfaces: + - internalapi + ipAddressPools: + - internalapi +--- +apiVersion: metallb.io/v1beta1 +kind: L2Advertisement +metadata: + name: storage + namespace: metallb-system +spec: + interfaces: + - storage + ipAddressPools: + - storage +--- +apiVersion: metallb.io/v1beta1 +kind: L2Advertisement +metadata: + name: tenant + namespace: metallb-system +spec: + interfaces: + - tenant + ipAddressPools: + - tenant +--- +apiVersion: network.openstack.org/v1beta1 +kind: NetConfig +metadata: + name: netconfig + namespace: openstack +spec: + networks: + - dnsDomain: ctlplane.openstack.lab + mtu: 1442 + name: ctlplane + subnets: + - allocationRanges: + - end: 192.168.122.120 + start: 192.168.122.100 + - end: 192.168.122.200 + start: 192.168.122.150 + cidr: 192.168.122.0/24 + gateway: 192.168.122.1 + name: subnet1 + - dnsDomain: internalapi.openstack.lab + mtu: 1442 + name: internalapi + subnets: + - allocationRanges: + - end: 172.17.0.250 + start: 172.17.0.100 + cidr: 172.17.0.0/24 + name: subnet1 + vlan: 20 + - dnsDomain: storage.openstack.lab + mtu: 1442 + name: storage + subnets: + - allocationRanges: + - end: 172.18.0.250 + start: 172.18.0.100 + cidr: 172.18.0.0/24 + name: subnet1 + vlan: 21 + - dnsDomain: tenant.openstack.lab + mtu: 1442 + name: tenant + subnets: + - allocationRanges: + - end: 172.19.0.250 + start: 172.19.0.100 + cidr: 172.19.0.0/24 + name: subnet1 + vlan: 22 diff --git a/scenarios/secret-rotate/manifests/control-plane/nncp/nncp.yaml b/scenarios/secret-rotate/manifests/control-plane/nncp/nncp.yaml new file mode 100644 index 00000000..5890238a --- /dev/null +++ b/scenarios/secret-rotate/manifests/control-plane/nncp/nncp.yaml @@ -0,0 +1,441 @@ +--- +apiVersion: nmstate.io/v1 +kind: NodeNetworkConfigurationPolicy +metadata: + labels: + osp/nncm-config-type: standard + name: master-0 + namespace: openstack +spec: + desiredState: + dns-resolver: + config: + search: [] + server: + - 192.168.32.3 + interfaces: + - description: internalapi vlan interface + ipv4: + address: + - ip: 172.17.0.10 + prefix-length: "24" + dhcp: false + enabled: true + ipv6: + enabled: false + mtu: 1442 + name: internalapi + state: up + type: vlan + vlan: + base-iface: eth1 + id: "20" + - description: storage vlan interface + ipv4: + address: + - ip: 172.18.0.10 + prefix-length: "24" + dhcp: false + enabled: true + ipv6: + enabled: false + mtu: 1442 + name: storage + state: up + type: vlan + vlan: + base-iface: eth1 + id: "21" + - description: tenant vlan interface + ipv4: + address: + - ip: 172.19.0.10 + prefix-length: "24" + dhcp: false + enabled: true + ipv6: + enabled: false + mtu: 1442 + name: tenant + state: up + type: vlan + vlan: + base-iface: eth1 + id: "22" + - description: ctlplane interface + mtu: 1442 + name: eth1 + state: up + type: ethernet + - bridge: + options: + stp: + enabled: false + port: + - name: eth1 + vlan: {} + description: linux-bridge over ctlplane interface + ipv4: + address: + - ip: 192.168.122.10 + prefix-length: "24" + dhcp: false + enabled: true + ipv6: + enabled: false + mtu: 1442 + name: ospbr + state: up + type: linux-bridge + - description: Octavia vlan host interface + name: octavia + state: up + type: vlan + vlan: + base-iface: eth1 + id: "23" + - bridge: + options: + stp: + enabled: false + port: + - name: octavia + description: Octavia bridge + mtu: 1442 + name: octbr + type: linux-bridge + - bridge: + options: + stp: + enabled: false + port: + - name: eth2 + description: Ironic bridge + ipv4: + address: + - ip: 172.20.1.10 + prefix-length: "24" + enabled: true + ipv6: + enabled: false + mtu: 1442 + name: ironic + type: linux-bridge + - description: Ironic VRF + name: ironicvrf + state: up + type: vrf + vrf: + port: + - ironic + route-table-id: 10 + route-rules: + config: [] + routes: + config: + - destination: 0.0.0.0/0 + metric: 150 + next-hop-address: 172.20.1.1 + next-hop-interface: ironic + table-id: 10 + - destination: 172.20.1.0/24 + metric: 150 + next-hop-address: 192.168.122.1 + next-hop-interface: ospbr + nodeSelector: + kubernetes.io/hostname: master-0 + node-role.kubernetes.io/worker: "" +--- +apiVersion: nmstate.io/v1 +kind: NodeNetworkConfigurationPolicy +metadata: + labels: + osp/nncm-config-type: standard + name: master-1 + namespace: openstack +spec: + desiredState: + dns-resolver: + config: + search: [] + server: + - 192.168.32.3 + interfaces: + - description: internalapi vlan interface + ipv4: + address: + - ip: 172.17.0.11 + prefix-length: "24" + dhcp: false + enabled: true + ipv6: + enabled: false + mtu: 1442 + name: internalapi + state: up + type: vlan + vlan: + base-iface: eth1 + id: "20" + - description: storage vlan interface + ipv4: + address: + - ip: 172.18.0.11 + prefix-length: "24" + dhcp: false + enabled: true + ipv6: + enabled: false + mtu: 1442 + name: storage + state: up + type: vlan + vlan: + base-iface: eth1 + id: "21" + - description: tenant vlan interface + ipv4: + address: + - ip: 172.19.0.11 + prefix-length: "24" + dhcp: false + enabled: true + ipv6: + enabled: false + mtu: 1442 + name: tenant + state: up + type: vlan + vlan: + base-iface: eth1 + id: "22" + - description: ctlplane interface + mtu: 1442 + name: eth1 + state: up + type: ethernet + - bridge: + options: + stp: + enabled: false + port: + - name: eth1 + vlan: {} + description: linux-bridge over ctlplane interface + ipv4: + address: + - ip: 192.168.122.11 + prefix-length: "24" + dhcp: false + enabled: true + ipv6: + enabled: false + mtu: 1442 + name: ospbr + state: up + type: linux-bridge + - description: Octavia vlan host interface + name: octavia + state: up + type: vlan + vlan: + base-iface: eth1 + id: "23" + - bridge: + options: + stp: + enabled: false + port: + - name: octavia + description: Octavia bridge + mtu: 1442 + name: octbr + type: linux-bridge + - bridge: + options: + stp: + enabled: false + port: + - name: eth2 + description: Ironic bridge + ipv4: + address: + - ip: 172.20.1.11 + prefix-length: "24" + enabled: true + ipv6: + enabled: false + mtu: 1442 + name: ironic + type: linux-bridge + - description: Ironic VRF + name: ironicvrf + state: up + type: vrf + vrf: + port: + - ironic + route-table-id: 10 + route-rules: + config: [] + routes: + config: + - destination: 0.0.0.0/0 + metric: 150 + next-hop-address: 172.20.1.1 + next-hop-interface: ironic + table-id: 10 + - destination: 172.20.1.0/24 + metric: 150 + next-hop-address: 192.168.122.1 + next-hop-interface: ospbr + nodeSelector: + kubernetes.io/hostname: master-1 + node-role.kubernetes.io/worker: "" +--- +apiVersion: nmstate.io/v1 +kind: NodeNetworkConfigurationPolicy +metadata: + labels: + osp/nncm-config-type: standard + name: master-2 + namespace: openstack +spec: + desiredState: + dns-resolver: + config: + search: [] + server: + - 192.168.32.3 + interfaces: + - description: internalapi vlan interface + ipv4: + address: + - ip: 172.17.0.12 + prefix-length: "24" + dhcp: false + enabled: true + ipv6: + enabled: false + mtu: 1442 + name: internalapi + state: up + type: vlan + vlan: + base-iface: eth1 + id: "20" + - description: storage vlan interface + ipv4: + address: + - ip: 172.18.0.12 + prefix-length: "24" + dhcp: false + enabled: true + ipv6: + enabled: false + mtu: 1442 + name: storage + state: up + type: vlan + vlan: + base-iface: eth1 + id: "21" + - description: tenant vlan interface + ipv4: + address: + - ip: 172.19.0.12 + prefix-length: "24" + dhcp: false + enabled: true + ipv6: + enabled: false + mtu: 1442 + name: tenant + state: up + type: vlan + vlan: + base-iface: eth1 + id: "22" + - description: ctlplane interface + mtu: 1442 + name: eth1 + state: up + type: ethernet + - bridge: + options: + stp: + enabled: false + port: + - name: eth1 + vlan: {} + description: linux-bridge over ctlplane interface + ipv4: + address: + - ip: 192.168.122.12 + prefix-length: "24" + dhcp: false + enabled: true + ipv6: + enabled: false + mtu: 1442 + name: ospbr + state: up + type: linux-bridge + - description: Octavia vlan host interface + name: octavia + state: up + type: vlan + vlan: + base-iface: eth1 + id: "23" + - bridge: + options: + stp: + enabled: false + port: + - name: octavia + description: Octavia bridge + mtu: 1442 + name: octbr + type: linux-bridge + - bridge: + options: + stp: + enabled: false + port: + - name: eth2 + description: Ironic bridge + ipv4: + address: + - ip: 172.20.1.12 + prefix-length: "24" + enabled: true + ipv6: + enabled: false + mtu: 1442 + name: ironic + type: linux-bridge + - description: Ironic VRF + name: ironicvrf + state: up + type: vrf + vrf: + port: + - ironic + route-table-id: 10 + route-rules: + config: [] + routes: + config: + - destination: 0.0.0.0/0 + metric: 150 + next-hop-address: 172.20.1.1 + next-hop-interface: ironic + table-id: 10 + - destination: 172.20.1.0/24 + metric: 150 + next-hop-address: 192.168.122.1 + next-hop-interface: ospbr + nodeSelector: + kubernetes.io/hostname: master-2 + node-role.kubernetes.io/worker: "" diff --git a/scenarios/secret-rotate/manifests/dataplane.yaml b/scenarios/secret-rotate/manifests/dataplane.yaml new file mode 100644 index 00000000..68b12e70 --- /dev/null +++ b/scenarios/secret-rotate/manifests/dataplane.yaml @@ -0,0 +1,9 @@ +--- +apiVersion: dataplane.openstack.org/v1beta1 +kind: OpenStackDataPlaneDeployment +metadata: + name: dataplane + namespace: openstack +spec: + nodeSets: + - edpm diff --git a/scenarios/secret-rotate/manifests/edpm/edpm.yaml b/scenarios/secret-rotate/manifests/edpm/edpm.yaml new file mode 100644 index 00000000..94b4319d --- /dev/null +++ b/scenarios/secret-rotate/manifests/edpm/edpm.yaml @@ -0,0 +1,134 @@ +--- +apiVersion: v1 +data: + LibvirtPassword: MTIzNDU2Nzg= +kind: Secret +metadata: + name: libvirt-secret + namespace: openstack +type: Opaque +--- +apiVersion: dataplane.openstack.org/v1beta1 +kind: OpenStackDataPlaneNodeSet +metadata: + name: edpm + namespace: openstack +spec: + env: + - name: ANSIBLE_FORCE_COLOR + value: "True" + networkAttachments: + - ctlplane + nodeTemplate: + ansible: + ansiblePort: 22 + ansibleUser: cloud-admin + ansibleVars: + edpm_bootstrap_command: | + # REPLACED + edpm_network_config_hide_sensitive_logs: false + edpm_network_config_template: | + --- + {% set mtu_list = [ctlplane_mtu] %} + {% for network in nodeset_networks %} + {{ mtu_list.append(lookup('vars', networks_lower[network] ~ '_mtu')) }} + {%- endfor %} + {% set min_viable_mtu = mtu_list | max %} + network_config: + - type: ovs_bridge + name: {{ neutron_physical_bridge_name }} + mtu: {{ min_viable_mtu }} + use_dhcp: false + dns_servers: {{ ctlplane_dns_nameservers }} + domain: {{ dns_search_domains }} + addresses: + - ip_netmask: {{ ctlplane_ip }}/{{ ctlplane_cidr }} + routes: {{ ctlplane_host_routes }} + members: + - type: interface + name: nic1 + mtu: {{ min_viable_mtu }} + primary: true + {% for network in nodeset_networks %} + - type: vlan + mtu: {{ lookup('vars', networks_lower[network] ~ '_mtu') }} + vlan_id: {{ lookup('vars', networks_lower[network] ~ '_vlan_id') }} + addresses: + - ip_netmask: >- + {{ + lookup('vars', networks_lower[network] ~ '_ip') + }}/{{ + lookup('vars', networks_lower[network] ~ '_cidr') + }} + routes: {{ lookup('vars', networks_lower[network] ~ '_host_routes') }} + {% endfor %} + edpm_nodes_validation_validate_controllers_icmp: false + edpm_nodes_validation_validate_gateway_icmp: false + edpm_sshd_allowed_ranges: + - 192.168.122.0/24 + - 192.168.32.3/32 + edpm_sshd_configure_firewall: true + gather_facts: false + neutron_physical_bridge_name: br-ex + neutron_public_interface_name: eth0 + timesync_ntp_servers: + - hostname: pool.ntp.org + ansibleSSHPrivateKeySecret: dataplane-ansible-ssh-private-key-secret + managementNetwork: ctlplane + networks: + - defaultRoute: true + name: ctlplane + subnetName: subnet1 + - name: internalapi + subnetName: subnet1 + - name: storage + subnetName: subnet1 + - name: tenant + subnetName: subnet1 + nodes: + edpm-compute-0: + ansible: + ansibleHost: 192.168.122.100 + hostName: edpm-compute-0 + networks: + - defaultRoute: true + fixedIP: 192.168.122.100 + name: ctlplane + subnetName: subnet1 + - name: internalapi + subnetName: subnet1 + - name: storage + subnetName: subnet1 + - name: tenant + subnetName: subnet1 + edpm-compute-1: + ansible: + ansibleHost: 192.168.122.101 + hostName: edpm-compute-1 + networks: + - defaultRoute: true + fixedIP: 192.168.122.101 + name: ctlplane + subnetName: subnet1 + - name: internalapi + subnetName: subnet1 + - name: storage + subnetName: subnet1 + - name: tenant + subnetName: subnet1 + preProvisioned: true + services: + - bootstrap + - configure-network + - validate-network + - install-os + - configure-os + - ssh-known-hosts + - run-os + - reboot-os + - install-certs + - ovn + - neutron-metadata + - libvirt + - nova + - telemetry diff --git a/scenarios/secret-rotate/manifests/networker/networker.yaml b/scenarios/secret-rotate/manifests/networker/networker.yaml new file mode 100644 index 00000000..ab3599b0 --- /dev/null +++ b/scenarios/secret-rotate/manifests/networker/networker.yaml @@ -0,0 +1,9 @@ +--- +apiVersion: dataplane.openstack.org/v1beta1 +kind: OpenStackDataPlaneDeployment +metadata: + name: networker-deploy + namespace: openstack +spec: + nodeSets: + - networkers diff --git a/scenarios/secret-rotate/manifests/networker/nodeset/nodeset.yaml b/scenarios/secret-rotate/manifests/networker/nodeset/nodeset.yaml new file mode 100644 index 00000000..70699bff --- /dev/null +++ b/scenarios/secret-rotate/manifests/networker/nodeset/nodeset.yaml @@ -0,0 +1,141 @@ +--- +apiVersion: v1 +data: + LibvirtPassword: MTIzNDU2Nzg= +kind: Secret +metadata: + name: libvirt-secret + namespace: openstack +type: Opaque +--- +apiVersion: dataplane.openstack.org/v1beta1 +kind: OpenStackDataPlaneNodeSet +metadata: + name: networkers + namespace: openstack +spec: + env: + - name: ANSIBLE_FORCE_COLOR + value: "True" + networkAttachments: + - ctlplane + nodeTemplate: + ansible: + ansiblePort: 22 + ansibleUser: cloud-admin + ansibleVars: + edpm_bootstrap_command: | + # REPLACED + edpm_enable_chassis_gw: true + edpm_network_config_hide_sensitive_logs: false + edpm_network_config_template: | + --- + {% set mtu_list = [ctlplane_mtu] %} + {% for network in nodeset_networks %} + {{ mtu_list.append(lookup('vars', networks_lower[network] ~ '_mtu')) }} + {%- endfor %} + {% set min_viable_mtu = mtu_list | max %} + network_config: + - type: ovs_bridge + name: {{ neutron_physical_bridge_name }} + mtu: {{ min_viable_mtu }} + use_dhcp: false + dns_servers: {{ ctlplane_dns_nameservers }} + domain: {{ dns_search_domains }} + addresses: + - ip_netmask: {{ ctlplane_ip }}/{{ ctlplane_cidr }} + routes: {{ ctlplane_host_routes }} + members: + - type: interface + name: nic1 + mtu: {{ min_viable_mtu }} + # force the MAC address of the bridge to this interface + primary: true + {% for network in nodeset_networks %} + - type: vlan + mtu: {{ lookup('vars', networks_lower[network] ~ '_mtu') }} + vlan_id: {{ lookup('vars', networks_lower[network] ~ '_vlan_id') }} + addresses: + - ip_netmask: >- + {{ + lookup('vars', networks_lower[network] ~ '_ip') + }}/{{ + lookup('vars', networks_lower[network] ~ '_cidr') + }} + routes: {{ lookup('vars', networks_lower[network] ~ '_host_routes') }} + {% endfor %} + edpm_nodes_validation_validate_controllers_icmp: false + edpm_nodes_validation_validate_gateway_icmp: false + edpm_ovn_availability_zones: + - zone-2 + edpm_sshd_allowed_ranges: + - 192.168.122.0/24 + edpm_sshd_configure_firewall: true + gather_facts: false + neutron_physical_bridge_name: br-ex + neutron_public_interface_name: eth0 + timesync_ntp_servers: + - hostname: pool.ntp.org + ansibleSSHPrivateKeySecret: dataplane-ansible-ssh-private-key-secret + managementNetwork: ctlplane + networks: + - defaultRoute: true + name: ctlplane + subnetName: subnet1 + - name: internalapi + subnetName: subnet1 + - name: tenant + subnetName: subnet1 + nodes: + edpm-networker-0: + ansible: + ansibleHost: 192.168.122.105 + hostName: edpm-networker-0 + networks: + - defaultRoute: true + fixedIP: 192.168.122.105 + name: ctlplane + subnetName: subnet1 + - name: internalapi + subnetName: subnet1 + - name: tenant + subnetName: subnet1 + edpm-networker-1: + ansible: + ansibleHost: 192.168.122.106 + hostName: edpm-networker-1 + networks: + - defaultRoute: true + fixedIP: 192.168.122.106 + name: ctlplane + subnetName: subnet1 + - name: internalapi + subnetName: subnet1 + - name: tenant + subnetName: subnet1 + edpm-networker-2: + ansible: + ansibleHost: 192.168.122.107 + hostName: edpm-networker-2 + networks: + - defaultRoute: true + fixedIP: 192.168.122.107 + name: ctlplane + subnetName: subnet1 + - name: internalapi + subnetName: subnet1 + - name: tenant + subnetName: subnet1 + preProvisioned: true + services: + - bootstrap + - configure-network + - validate-network + - install-os + - configure-os + - ssh-known-hosts + - run-os + - reboot-os + - install-certs + - ovn + - neutron-metadata diff --git a/scenarios/secret-rotate/manifests/update/update-ovn.yaml b/scenarios/secret-rotate/manifests/update/update-ovn.yaml new file mode 100644 index 00000000..f63d0463 --- /dev/null +++ b/scenarios/secret-rotate/manifests/update/update-ovn.yaml @@ -0,0 +1,12 @@ +--- +apiVersion: dataplane.openstack.org/v1beta1 +kind: OpenStackDataPlaneDeployment +metadata: + name: dataplane-update-ovn + namespace: openstack +spec: + nodeSets: + - edpm + - networkers + servicesOverride: + - ovn diff --git a/scenarios/secret-rotate/manifests/update/update-reboot.yaml b/scenarios/secret-rotate/manifests/update/update-reboot.yaml new file mode 100644 index 00000000..679318ee --- /dev/null +++ b/scenarios/secret-rotate/manifests/update/update-reboot.yaml @@ -0,0 +1,15 @@ +--- +apiVersion: dataplane.openstack.org/v1beta1 +kind: OpenStackDataPlaneDeployment +metadata: + name: dataplane-update-reboot + namespace: openstack +spec: + nodeSets: + - edpm + - networkers + servicesOverride: + - reboot-os + ansibleExtraVars: + edpm_reboot_strategy: force +# ansibleLimit: ,..., diff --git a/scenarios/secret-rotate/manifests/update/update-services.yaml b/scenarios/secret-rotate/manifests/update/update-services.yaml new file mode 100644 index 00000000..cfa47be2 --- /dev/null +++ b/scenarios/secret-rotate/manifests/update/update-services.yaml @@ -0,0 +1,15 @@ +--- +apiVersion: dataplane.openstack.org/v1beta1 +kind: OpenStackDataPlaneDeployment +metadata: + name: dataplane-update-services + namespace: openstack +spec: + nodeSets: + - edpm + - networkers + servicesOverride: + - update + - reboot-os + ansibleExtraVars: + edpm_reboot_strategy: never diff --git a/scenarios/secret-rotate/test-operator/automation-vars.yml b/scenarios/secret-rotate/test-operator/automation-vars.yml new file mode 100644 index 00000000..2123261f --- /dev/null +++ b/scenarios/secret-rotate/test-operator/automation-vars.yml @@ -0,0 +1,393 @@ +--- +stages: + - name: Set a multiattach volume type and create it if needed + shell: | + set -xe -o pipefail + oc project openstack + + oc rsh openstackclient openstack volume type show multiattach &>/dev/null || \ + oc rsh openstackclient openstack volume type create multiattach + + oc rsh openstackclient openstack volume type set --property multiattach=" True" multiattach + + - name: Create public network if needed + shell: | + set -xe -o pipefail + oc project openstack + + oc rsh openstackclient openstack network show public &>/dev/null || \ + oc rsh openstackclient openstack network create public \ + --external \ + --no-share \ + --default \ + --provider-network-type flat \ + --provider-physical-network datacentre + + - name: Create subnet on public network if needed + shell: | + set -xe -o pipefail + oc project openstack + + oc rsh openstackclient openstack subnet show public_subnet &>/dev/null || \ + oc rsh openstackclient openstack subnet create public_subnet \ + --network public \ + --subnet-range 192.168.122.0/24 \ + --allocation-pool start=192.168.122.171,end=192.168.122.250 \ + --gateway 192.168.122.1 \ + --dhcp + + - name: Create private network if needed + shell: | + set -xe -o pipefail + oc project openstack + + oc rsh openstackclient openstack network show private &>/dev/null || \ + oc rsh openstackclient openstack network create private --share + + - name: Create subnet on private network if needed + shell: | + set -xe -o pipefail + oc project openstack + + oc rsh openstackclient openstack subnet show private_subnet &>/dev/null || \ + oc rsh openstackclient openstack subnet create private_subnet \ + --network private \ + --subnet-range 10.2.0.0/24 \ + --allocation-pool start=10.2.0.10,end=10.2.0.250 \ + --gateway 10.2.0.1 \ + --dhcp + + - name: Create network for ironic provisioning if needed + shell: | + set -xe -o pipefail + oc project openstack + + oc rsh openstackclient openstack network show provisioning &>/dev/null || \ + oc rsh openstackclient \ + openstack network create provisioning \ + --share \ + --provider-physical-network ironic \ + --availability-zone-hint zone-1 \ + --provider-network-type flat + + - name: Create subnet for ironic provisioning if needed + shell: | + set -xe -o pipefail + oc project openstack + + oc rsh openstackclient openstack subnet show provisioning-subnet &>/dev/null || \ + oc rsh openstackclient \ + openstack subnet create provisioning-subnet \ + --network provisioning \ + --subnet-range 172.20.1.0/24 \ + --gateway 172.20.1.1 \ + --dns-nameserver 192.168.122.80 \ + --allocation-pool start=172.20.1.100,end=172.20.1.200 + + - name: Create baremetal flavor if needed + shell: | + set -xe -o pipefail + oc project openstack + + oc rsh openstackclient openstack flavor show baremetal &>/dev/null || \ + oc rsh openstackclient \ + openstack flavor create baremetal \ + --id 123456789-1234-1234-1234-000000000001 \ + --ram 1024 \ + --vcpus 1 \ + --disk 15 \ + --property resources:VCPU=0 \ + --property resources:MEMORY_MB=0 \ + --property resources:DISK_GB=0 \ + --property resources:CUSTOM_BAREMETAL=1 \ + --property capabilities:boot_mode=uefi + + - name: Get CA cert and add to chain + shell: | + set -xe -o pipefail + oc project openstack + + oc get secret rootca-public -n openstack -o json | jq -r '.data."ca.crt"' | base64 -d > rhoso-ca.crt + sudo cp rhoso-ca.crt /etc/pki/ca-trust/source/anchors/ + sudo update-ca-trust + + - name: Set up clouds.yaml on hotstack controller + shell: | + set -xe -o pipefail + oc project openstack + + mkdir -p /home/zuul/.config/openstack + oc cp openstack/openstackclient:.config/openstack/clouds.yaml /home/zuul/.config/openstack/clouds.yaml + oc cp openstack/openstackclient:.config/openstack/secure.yaml .config/openstack/secure.yaml + + - name: Install openstack client on hotstack controller + shell: | + set -xe -o pipefail + sudo dnf config-manager --enable crb + sudo dnf install -y centos-release-openstack-antelope.noarch + sudo dnf install -y python-openstackclient + + - name: Stop octavia pods + shell: | + set -xe -o pipefail + NAMESPACE="openstack" + OSCP="controlplane" + + oc patch oscp ${OSCP} -n ${NAMESPACE} \ + --type=merge \ + --patch ' + spec: + octavia: + template: + octaviaHealthManager: + nodeSelector: + amphoracontroller: "false" + octaviaHousekeeping: + nodeSelector: + amphoracontroller: "false" + octaviaWorker: + nodeSelector: + amphoracontroller: "false" + ' + + - name: Galera Database Passwords + shell: | + {% raw %} + set -xe -o pipefail + NAMESPACE="openstack" + OSCP="controlplane" + OLD_PASSWORD="12345678" + NEW_PASSWORD="DBPass123" + + # Dynamically get Galera instance names from oscp + GALERA_INSTANCES=($(oc get oscp ${OSCP} -n ${NAMESPACE} -o json | jq -r '.spec.galera.templates | keys[]')) + + # Build CURRENT_REPLICAS array by iterating over instances + CURRENT_REPLICAS=() + for GALERA_NAME in "${GALERA_INSTANCES[@]}"; do + REPLICAS=$(oc get oscp ${OSCP} -n ${NAMESPACE} -o jsonpath="{.spec.galera.templates.${GALERA_NAME}.replicas}") + CURRENT_REPLICAS+=("${REPLICAS}") + done + + for GALERA_NAME in "${GALERA_INSTANCES[@]}"; do + POD_NAME="${GALERA_NAME}-galera-0" + echo "Scaling down ${POD_NAME}..." + oc patch oscp ${OSCP} -n ${NAMESPACE} --type=json \ + -p "[{\"op\": \"replace\", \"path\": \"/spec/galera/templates/${GALERA_NAME}/replicas\", \"value\": 1}]" + done + + # Wait for each Galera instance to scale down to 1 replica + for GALERA_NAME in "${GALERA_INSTANCES[@]}"; do + echo "Waiting for ${GALERA_NAME}-galera to have 1 ready replica..." + oc -n ${NAMESPACE} wait statefulsets.apps ${GALERA_NAME}-galera --for=jsonpath='{.status.readyReplicas}'=1 --timeout=5m + done + + # Change the password in the database for each Galera instance + for GALERA_NAME in "${GALERA_INSTANCES[@]}"; do + POD_NAME="${GALERA_NAME}-galera-0" + oc exec -it ${POD_NAME} -n ${NAMESPACE} -c galera -- bash -c " + mysql -uroot -p'${OLD_PASSWORD}' <<'EOSQL' + ALTER USER 'root'@'localhost' IDENTIFIED BY '${NEW_PASSWORD}'; + ALTER USER 'root'@'%' IDENTIFIED BY '${NEW_PASSWORD}'; + FLUSH PRIVILEGES; + SELECT 'Password changed for ${POD_NAME}' AS Status; + EOSQL + " + + if [ $? -ne 0 ]; then + echo "ERROR: Failed to change password in ${POD_NAME}" + exit 1 + fi + echo "Password changed in MySQL for ${POD_NAME}" + + echo "" + echo "Verify new password works for ${POD_NAME}" + oc exec ${POD_NAME} -n ${NAMESPACE} -c galera -- \ + mysql -uroot -p"${NEW_PASSWORD}" -e "SELECT 'Verified ${POD_NAME}' AS Status;" + + if [ $? -ne 0 ]; then + echo "ERROR: New password verification failed for ${POD_NAME}" + exit 1 + fi + echo "New password verified for ${POD_NAME}" + done + + # Update osp-secret with the new password + NEW_PASSWORD_B64=$(echo -n "${NEW_PASSWORD}" | base64 -w 0) + oc patch secret osp-secret -n openstack --type='json' \ + -p="[{\"op\": \"replace\", \"path\": \"/data/DbRootPassword\", \"value\": \"${NEW_PASSWORD_B64}\"}]" + + # Restart the Galera Pods + for GALERA_NAME in "${GALERA_INSTANCES[@]}"; do + POD_NAME="${GALERA_NAME}-galera-0" + oc delete pod ${POD_NAME} -n ${NAMESPACE} + + echo "Waiting for ${POD_NAME} to be ready..." + oc wait --for=condition=ready pod/${POD_NAME} -n ${NAMESPACE} --timeout=300s + + if [ $? -ne 0 ]; then + echo "ERROR: ${POD_NAME} failed to become ready" + exit 1 + fi + echo "${POD_NAME} restarted successfully" + echo "" + done + + # Verify the new password with the new pods + for GALERA_NAME in "${GALERA_INSTANCES[@]}"; do + POD_NAME="${GALERA_NAME}-galera-0" + echo "Verifying ${POD_NAME}..." + + # Test connection with new password + oc exec ${POD_NAME} -n ${NAMESPACE} -c galera -- \ + mysql -uroot -p"${NEW_PASSWORD}" -e "SELECT 'Connection OK' AS Status;" >/dev/null 2>&1 + + if [ $? -eq 0 ]; then + echo "${POD_NAME} - Connection OK with new password" + else + echo "${POD_NAME} - Connection FAILED with new password" + fi + + # Check cluster health + CLUSTER_SIZE=$(oc exec ${POD_NAME} -n ${NAMESPACE} -c galera -- \ + mysql -uroot -p"${NEW_PASSWORD}" -sNe "SHOW STATUS LIKE 'wsrep_cluster_size';" 2>/dev/null | awk '{print $2}') + echo " Cluster size: ${CLUSTER_SIZE}" + + CLUSTER_STATUS=$(oc exec ${POD_NAME} -n ${NAMESPACE} -c galera -- \ + mysql -uroot -p"${NEW_PASSWORD}" -sNe "SHOW STATUS LIKE 'wsrep_local_state_comment';" 2>/dev/null | awk '{print $2}') + echo " Cluster status: ${CLUSTER_STATUS}" + + if [ "${CLUSTER_STATUS}" != "Synced" ]; then + echo " WARNING: Cluster is not synced" + fi + echo "" + done + + # Rescale the pods back to their original sizes + for ((i=0; i<${#GALERA_INSTANCES[@]}; i++)); do + POD_NAME="${GALERA_INSTANCES[$i]}-galera-0" + echo "Scaling up ${POD_NAME} to ${CURRENT_REPLICAS[$i]} replicas..." + oc patch oscp ${OSCP} -n ${NAMESPACE} --type=json \ + -p "[{\"op\": \"replace\", \"path\": \"/spec/galera/templates/${GALERA_INSTANCES[$i]}/replicas\", \"value\": ${CURRENT_REPLICAS[$i]}}]" + done + + # Wait for each Galera instance to scale back up to original replica count + for ((i=0; i<${#GALERA_INSTANCES[@]}; i++)); do + echo "Waiting for ${GALERA_INSTANCES[$i]}-galera to have ${CURRENT_REPLICAS[$i]} ready replicas..." + oc -n ${NAMESPACE} wait statefulsets.apps ${GALERA_INSTANCES[$i]}-galera \ + --for=jsonpath='{.status.readyReplicas}'=${CURRENT_REPLICAS[$i]} --timeout=5m + done + {% endraw %} + + - name: RabbitMQ Password Rotation + shell: | + set -xe -o pipefail + NAMESPACE="openstack" + RABBITMQ_CLUSTERS=$(oc get rabbitmq.rabbitmq.openstack.org -n ${NAMESPACE} -o jsonpath='{.items[*].metadata.name}') + + # Delete RabbitMQ clusters + for CLUSTER in ${RABBITMQ_CLUSTERS}; do + echo "Deleting RabbitMQ cluster: ${CLUSTER}" + oc delete rabbitmq.rabbitmq.openstack.org/${CLUSTER} -n ${NAMESPACE} + done + + # Wait for clusters to be deleted and recreated + echo "Waiting 30 seconds for RabbitMQ clusters to be recreated..." + sleep 30 + + # Wait for each RabbitMQ cluster to come back and be ready + for CLUSTER in ${RABBITMQ_CLUSTERS}; do + echo "Waiting for RabbitMQ cluster ${CLUSTER} to be ready..." + oc wait rabbitmq.rabbitmq.openstack.org/${CLUSTER} -n ${NAMESPACE} \ + --for=condition=Ready --timeout=10m + done + + # - name: Run script to create new users + # script: scripts/create-users.sh + + # - name: Create new admin user + # shell: | + # set -xe -o pipefail + # oc project openstack + # oc rsh openstackclient openstack user create admin-new \ + # --domain default \ + # --password admin_Pass123 + # oc rsh openstackclient openstack role add admin\ + # --user admin-new \ + # --project default + + # - name: Create new heat_stack_domain_admin user + # shell: | + # set -xe -o pipefail + # oc project openstack + # oc rsh openstackclient openstack user create heat_stack_domain_admin-new \ + # --domain heat_stack \ + # --password heat_stack_domain_admin_Pass123 + + # oc rsh openstackclient openstack role add admin \ + # --user heat_stack_domain_admin-new \ + # --domain heat_stack + + # - name: Create osp-secret-new with updated passwords + # shell: | + # set -xe -o pipefail + # oc project openstack + + # # Create the new secret with updated passwords + # oc create secret generic osp-secret-new \ + # --from-literal=AdminPassword=admin_Pass123 \ + # --from-literal=AodhPassword=aodh_Pass123 \ + # --from-literal=BarbicanPassword=barbican_Pass123 \ + # --from-literal=CeilometerPassword=ceilometer_Pass123 \ + # --from-literal=CinderPassword=cinder_Pass123 \ + # --from-literal=GlancePassword=glance_Pass123 \ + # --from-literal=HeatPassword=heat_Pass123 \ + # --from-literal=IronicPassword=ironic_Pass123 \ + # --from-literal=ManilaPassword=manila_Pass123 \ + # --from-literal=NeutronPassword=neutron_Pass123 \ + # --from-literal=NovaPassword=nova_Pass123 \ + # --from-literal=OctaviaPassword=octavia_Pass123 \ + # --from-literal=PlacementPassword=placement_Pass123 \ + # --from-literal=SwiftPassword=swift_Pass123 + # --from-literal=AodhDatabasePassword=MTIzNDU2Nzg5 \ + # --from-literal=DbRootPassword=MTIzNDU2Nzg5 \ + # --from-literal=BarbicanDatabasePassword=MTIzNDU2Nzg5 \ + # --from-literal=CinderDatabasePassword=MTIzNDU2Nzg5 \ + # --from-literal=DesignateDatabasePassword=MTIzNDU2Nzg5 \ + # --from-literal=DesignatePassword=MTIzNDU2Nzg5 \ + # --from-literal=GlanceDatabasePassword=MTIzNDU2Nzg5 \ + # --from-literal=HeatDatabasePassword=MTIzNDU2Nzg5 \ + # --from-literal=IronicDatabasePassword=MTIzNDU2Nzg5 \ + # --from-literal=IronicInspectorDatabasePassword=MTIzNDU2Nzg5 \ + # --from-literal=IronicInspectorPassword=MTIzNDU2Nzg5 \ + # --from-literal=KeystoneDatabasePassword=MTIzNDU2Nzg5 \ + # --from-literal=ManilaDatabasePassword=MTIzNDU2Nzg5 \ + # --from-literal=MetadataSecret=MTIzNDU2Nzg0Mw== \ + # --from-literal=NeutronDatabasePassword=MTIzNDU2Nzg5 \ + # --from-literal=NovaAPIDatabasePassword=MTIzNDU2Nzg5 \ + # --from-literal=NovaCell0DatabasePassword=MTIzNDU2Nzg5 \ + # --from-literal=NovaCell1DatabasePassword=MTIzNDU2Nzg5 \ + # --from-literal=OctaviaDatabasePassword=MTIzNDU2Nzg5 \ + # --from-literal=PlacementDatabasePassword=MTIzNDU2Nzg5 \ + # --from-literal=BarbicanSimpleCryptoKEK=r0wDZ1zrD5upafX9RDfYqvDkW2LENBWH7Gz9+Tr3NdM= \ + # --from-literal=HeatAuthEncryptionKey=NzY3YzNlZDA1NmNiYWEzYjlkZmVkYjhjNmY4MjViZjA= \ + # --from-literal=OctaviaHeartbeatKey=MTIzNDU2Nzg5 \ + # --type=Opaque \ + # --dry-run=client -o yaml | oc apply -f - + + - name: Run tempest + documentation: >- + Executes comprehensive OpenStack validation tests using the Tempest framework. + manifest: tempest-tests.yml + wait_conditions: + - >- + oc wait -n openstack tempests.test.openstack.org tempest-tests + --for condition=ServiceConfigReady --timeout=120s + wait_pod_completion: + - namespace: openstack + labels: + operator: test-operator + service: tempest + workflowStep: "0" + timeout: 3600 + poll_interval: 15 diff --git a/scenarios/secret-rotate/test-operator/scripts/create-users.sh b/scenarios/secret-rotate/test-operator/scripts/create-users.sh new file mode 100755 index 00000000..ba9d4e08 --- /dev/null +++ b/scenarios/secret-rotate/test-operator/scripts/create-users.sh @@ -0,0 +1,110 @@ +#!/usr/bin/env bash +set -euo pipefail + +# Usage: +# DOMAIN=Default PROJECT=service ROLE=service ./create_new_users_auto.sh +# Defaults: +export OS_CLOUD="${OS_CLOUD:-default}" +DOMAIN="${DOMAIN:-Default}" +PROJECT="${PROJECT:-service}" +ROLE="${ROLE:-service}" + +OUTCSV="${OUTCSV:-created_users.csv}" + +# Do not allow admin role to be used by this script +if [ "$ROLE" = "admin" ]; then + echo "ERROR: Using ROLE=admin is disallowed by policy for this script. Use ROLE=service (default) or another non-admin role." + exit 2 +fi + +# header for CSV (safe permissions) +: > "$OUTCSV" +chmod 600 "$OUTCSV" +echo "original_user,new_user,password" > "$OUTCSV" + +# quick auth check +if ! openstack token issue >/dev/null 2>&1; then + echo "ERROR: openstack CLI cannot get a token. Source your RC and retry." + exit 1 +fi + +# check role exists +if ! openstack role show "$ROLE" >/dev/null 2>&1; then + echo "ERROR: role '$ROLE' not found. Create the role or choose a different ROLE." + exit 2 +fi + +echo "Domain: $DOMAIN Project: $PROJECT Role: $ROLE" +echo "Will write created users to: $OUTCSV" +echo + +# Fetch all users in domain, one per line +mapfile -t EXISTING_USERS < <(openstack user list --domain "$DOMAIN" -f value -c Name) + +if [ "${#EXISTING_USERS[@]}" -eq 0 ]; then + echo "No users found in domain '$DOMAIN'. Exiting." + exit 0 +fi + +echo "Found ${#EXISTING_USERS[@]} users. Creating new test users for each..." + +# skip list: admin will be skipped (you wanted to create that manually) +EXCLUDE_REGEX="${EXCLUDE_REGEX:-^admin$}" + +for orig in "${EXISTING_USERS[@]}"; do + # skip users that look like they're already '-new' variants to avoid infinite loops + if [[ "$orig" =~ -new($|-[0-9]+$) ]]; then + echo "SKIP: '$orig' looks like a test user (suffix -new)." + continue + fi + + # skip explicitly excluded users (admin) + if [[ "$orig" =~ $EXCLUDE_REGEX ]]; then + echo "SKIP (excluded): '$orig'" + continue + fi + + # build base new username and find a free candidate + base="${orig}-new" + candidate="$base" + suffix=1 + while openstack user show "$candidate" --domain "$DOMAIN" -f value -c id >/dev/null 2>&1; do + suffix=$((suffix + 1)) + candidate="${base}-${suffix}" + done + NEW_USER="$candidate" + + # derive a simple test password (safe-ish) + safe_orig=$(echo "$orig" | tr -c '[:alnum:]' '_') + PASSWORD="${safe_orig}Pass123" + + echo -n "Creating user '$NEW_USER' for original '$orig' ... " + + # create user + openstack user create \ + --domain "$DOMAIN" \ + --password "$PASSWORD" \ + --description "Auto-created test user for ${orig}" \ + "$NEW_USER" + + # set default project for the user (so openstack user show shows it) + openstack user set --project "$PROJECT" --project-domain "$DOMAIN" "$NEW_USER" + + # assign role on the project (explicit domain flags) + openstack role add \ + --user "$NEW_USER" \ + --user-domain "$DOMAIN" \ + --project "$PROJECT" \ + --project-domain "$DOMAIN" \ + "$ROLE" + + # record in CSV + printf '%s,%s,%s\n' "$orig" "$NEW_USER" "$PASSWORD" >> "$OUTCSV" + + echo "OK" +done + +echo +echo "Done. Created users recorded in: $OUTCSV" +echo "Preview:" +column -t -s, "$OUTCSV" | sed -n '1,200p' diff --git a/scenarios/secret-rotate/test-operator/tempest-tests.yml b/scenarios/secret-rotate/test-operator/tempest-tests.yml new file mode 100644 index 00000000..8f8d18bf --- /dev/null +++ b/scenarios/secret-rotate/test-operator/tempest-tests.yml @@ -0,0 +1,226 @@ +--- +apiVersion: test.openstack.org/v1beta1 +kind: Tempest +metadata: + name: tempest-tests + namespace: openstack +spec: + resources: + requests: + cpu: 2000m + memory: 2Gi + limits: + cpu: 8000m + memory: 6Gi + networkAttachments: + - ctlplane + privileged: true + tempestRun: + + workflow: + - stepName: multi-thread-testing + storageClass: lvms-local-storage + resources: + requests: + cpu: 2000m + memory: 2Gi + limits: + cpu: 8000m + memory: 6Gi + tempestconfRun: + create: true + overrides: | + auth.tempest_roles swiftoperator + barbican_tempest.enable_multiple_secret_stores true + compute-feature-enabled.allow_port_security_disabled true + compute-feature-enabled.attach_encrypted_volume true + compute-feature-enabled.block_migrate_cinder_iscsi false + compute-feature-enabled.block_migration_for_live_migration true + compute-feature-enabled.can_migrate_between_any_hosts false + compute-feature-enabled.cold_migration true + compute-feature-enabled.console_output false + compute-feature-enabled.dhcp_domain '' + compute-feature-enabled.ide_bus False + compute-feature-enabled.live_migration true + compute-feature-enabled.vnc_console true + compute-feature-enabled.volume_multiattach true + compute.fixed_network_name private + compute.hypervisor_type QEMU + compute.migration_dest_host edpm-compute-1.ctlplane.example.com + compute.migration_source_host edpm-compute-0.ctlplane.example.com + compute.target_hosts_to_avoid compute-ironic + heat_plugin.vm_to_heat_api_insecure true + identity-feature-enabled.application_credentials true + identity.v2_admin_endpoint_type public + identity.v3_endpoint_type public + image_signature_verification.certificate_validation False + image_signature_verification.enforced False + load_balancer.test_server_path /usr/libexec/octavia-tempest-plugin-tests-httpd + network-feature-enabled.port_security true + neutron_plugin_options.advanced_image_ssh_user 'cloud-user' + neutron_plugin_options.available_type_drivers 'geneve' + neutron_plugin_options.create_shared_resources true + neutron_plugin_options.firewall_driver ovn + neutron_plugin_options.ipv6_metadata false + neutron_plugin_options.is_igmp_snooping_enabled true + service_available.ceilometer true + service_available.sg_core true + telemetry.ceilometer_polling_interval 120 + telemetry.prometheus_scrape_interval 30 + telemetry.prometheus_service_url "https://metric-storage-prometheus.openstack.svc.cluster.local:9090" + telemetry.sg_core_service_url "https://ceilometer-internal.openstack.svc.cluster.local:3000" + validation.allowed_network_downtime 15 + validation.image_alt_ssh_password cubswin:) + validation.image_alt_ssh_user cirros + validation.image_ssh_password cubswin:) + validation.image_ssh_user cirros + validation.run_validation true + volume-feature-enabled.extend_attached_volume true + volume-feature-enabled.manage_snapshot true + volume-feature-enabled.manage_volume true + volume-feature-enabled.volume_revert true + volume.storage_protocol 'iSCSI' + volume.volume_type_multiattach 'multiattach' + tempestRun: + concurrency: 4 + includeList: | + tempest.scenario.test_server_basic_ops.TestServerBasicOps.test_server_basic_ops + # ^barbican_tempest_plugin + # ^tempest.api.compute + # ^heat_integrationtests + # ^heat_tempest_plugin..* + # ^tempest.api.identity..* + # ^keystone_tempest_plugin..* + # ^tempest.api.image.* + # tempest.scenario.test_stamp_pattern.* + # ^tempest.scenario.test_snapshot_pattern.* + # ^glance_tempest_plugin.* + # ^tempest.api.network.* + # ^tempest.scenario.test_network_advanced_server_ops + # ^tempest.scenario.test_network_basic_ops + # ^tempest.scenario.test_network_v6 + # ^tempest.scenario.test_security_groups_basic_ops + # ^neutron_tempest_plugin.* + # ^tempest.api.object_storage.* + # ^tempest.scenario.test_object_storage_basic_ops.* + # ^tempest.api.volume.* + # ^tempest.scenario.test_encrypted_cinder_volumes.* + # ^tempest.scenario.test_minimum_basic.* + # ^tempest.scenario.test_stamp_pattern.* + # ^tempest.scenario.test_volume_.* + # ^cinder_tempest_plugin.* + # ^tempest.api.telemetry.* + # ^telemetry_tempest_plugin.* + # These tests are have hard dependencies on ci-framework or devstack. + # Changes needed in https://opendev.org/x/whitebox-neutron-tempest-plugin is + # required. + # whitebox_neutron_tempest_plugin.* + excludeList: | + test_encrypted_cinder_volumes_cryptsetup + octavia_tempest_plugin.tests.act_stdby_scenario + octavia_tempest_plugin.tests.spare_pool_scenario + est_established_tcp_session_after_re_attachinging_sg + heat_tempest_plugin.tests.scenario.test_octavia_lbaas.LoadBalancerTest + heat_tempest_plugin.tests.scenario.test_aodh_alarm.AodhAlarmTest.test_alarm + # Unskip this after https://issues.redhat.com/browse/OSPRH-7820 + heat_tempest_plugin.tests.scenario.test_server_signal.ServerSignalIntegrationTest.test_server_signal_userdata_format_raw + heat_tempest_plugin.tests.scenario.test_server_signal.ServerSignalIntegrationTest.test_server_signal_userdata_format_software_config + ^tempest.api.compute.admin.test_auto_allocate_network.AutoAllocateNetworkTest.test_server_multi_create_auto_allocate + ^tempest.api.compute.admin.test_live_migration.LiveMigrationTest.test_live_block_migration_paused + ^tempest.api.compute.admin.test_live_migration.LiveAutoBlockMigrationV225Test.test_live_block_migration_paused + ^tempest.api.compute.admin.test_live_migration.LiveMigrationRemoteConsolesV26Test.test_live_block_migration_paused + telemetry_tempest_plugin.scenario.test_telemetry_integration_prometheus.PrometheusGabbiTest.test_autoscaling + heat_integrationtests.functional.test_aws_stack + heat_integrationtests.functional.test_cancel_update.CancelUpdateTest.test_cancel_update_server_with_port + heat_integrationtests.functional.test_reload_on_sighup + heat_integrationtests.functional.test_resource_group.ResourceGroupAdoptTest.test_adopt + heat_integrationtests.functional.test_software_config.ZaqarSignalTransportTest.test_signal_queues + heat_integrationtests.functional.test_waitcondition.ZaqarWaitConditionTest + heat_integrationtests.functional.test_event_sinks.ZaqarEventSinkTest.test_events + heat_integrationtests.functional.test_stack_tags.StackTagTest.test_hidden_stack + heat_integrationtests.functional.test_template_resource.TemplateResourceAdoptTest + heat_integrationtests.functional.test_purge.PurgeTest.test_purge + heat_integrationtests.functional.test_notifications.NotificationTest + heat_integrationtests.functional.test_os_wait_condition.OSWaitCondition + heat_integrationtests.scenario.test_base_resources.BasicResourcesTest.test_base_resources_integration + heat_integrationtests.scenario.test_server_software_config + heat_integrationtests.scenario.test_volumes + heat_integrationtests.scenario.test_server_cfn_init + heat_tempest_plugin.tests.functional.test_aws_stack + heat_tempest_plugin.tests.functional.test_software_config.ZaqarSignalTransportTest.test_signal_queues + heat_tempest_plugin.tests.functional.test_waitcondition.ZaqarWaitConditionTest + heat_tempest_plugin.tests.functional.test_event_sinks.ZaqarEventSinkTest.test_events + heat_tempest_plugin.tests.functional.test_os_wait_condition.OSWaitCondition + heat_tempest_plugin.tests.scenario.test_base_resources.BasicResourcesTest.test_base_resources_integration + heat_tempest_plugin.tests.scenario.test_server_software_config + heat_tempest_plugin.tests.scenario.test_volumes + heat_tempest_plugin.tests.scenario.test_server_cfn_init + ^neutron_.*plugin..*scenario.test_.*macvtap + test_multicast.*restart + test_multicast.*ext* + ^neutron_tempest_plugin.fwaas.* + test_port_security_macspoofing_port + DHCPAgentSchedulers + test_agent_management.AgentManagementTestJSON + test_router_interface_status + test_connectivity_min_max_mtu + + - stepName: ironic-api-testing + storageClass: lvms-local-storage + resources: + requests: + cpu: 2000m + memory: 2Gi + limits: + cpu: 8000m + memory: 6Gi + tempestconfRun: + overrides: | + baremetal.driver fake-hardware + baremetal.max_microversion 1.82 + service_available.ironic_inspector true + service_available.ironic true + tempestRun: + concurrency: 8 + includeList: | + ^ironic_tempest_plugin.tests.api.* + excludeList: | + ^ironic_tempest_plugin.tests.api.admin.test_allocations.TestAllocations.test_create_allocation_candidate_node + ^ironic_tempest_plugin.tests.api.admin.test_allocations.TestAllocations.test_create_allocation_node_mismatch + ^ironic_tempest_plugin.tests.api.admin.test_allocations.TestAllocations.test_create_allocation_resource_class_mismatch + ^ironic_tempest_plugin.tests.api.admin.test_allocations.TestAllocations.test_create_allocation_traits_mismatch + ^ironic_tempest_plugin.tests.api.admin.test_allocations.TestAllocations.test_create_allocation_with_traits + ^ironic_tempest_plugin.tests.api.admin.test_allocations.TestAllocations.test_create_show_allocation + ^ironic_tempest_plugin.tests.api.admin.test_allocations.TestAllocations.test_delete_allocation + ^ironic_tempest_plugin.tests.api.admin.test_allocations.TestAllocations.test_delete_allocation_by_name + ^ironic_tempest_plugin.tests.api.admin.test_allocations.TestAllocations.test_list_allocations + ^ironic_tempest_plugin.tests.api.admin.test_allocations.TestAllocations.test_list_allocations_by_state + ^ironic_tempest_plugin.tests.api.admin.test_allocations.TestAllocations.test_show_by_name + ^ironic_tempest_plugin.tests.api.admin.test_allocations.TestBackfill.test_backfill_allocation + ^ironic_tempest_plugin.tests.api.admin.test_allocations.TestBackfill.test_backfill_without_resource_class + ^ironic_tempest_plugin.tests.api.admin.test_nodestates.TestNodeStatesV1_11.test_set_node_provision_state + ^ironic_tempest_plugin.tests.api.admin.test_nodestates.TestNodeStatesV1_1.test_set_node_provision_state + ^ironic_tempest_plugin.tests.api.admin.test_nodestates.TestNodeStatesV1_2.test_set_node_provision_state + ^ironic_tempest_plugin.tests.api.admin.test_nodestates.TestNodeStatesV1_4.test_set_node_provision_state + ^ironic_tempest_plugin.tests.api.admin.test_nodestates.TestNodeStatesV1_6.test_set_node_provision_state + ^ironic_tempest_plugin.tests.api.admin.test_nodes.TestHardwareInterfaces.test_reset_interfaces + ^ironic_tempest_plugin.tests.api.admin.test_nodes.TestNodeProtected.test_node_protected + ^ironic_tempest_plugin.tests.api.admin.test_nodes.TestNodeProtected.test_node_protected_from_deletion + ^ironic_tempest_plugin.tests.api.admin.test_nodes.TestNodeProtected.test_node_protected_negative + ^ironic_tempest_plugin.tests.api.admin.test_nodes.TestNodeProtected.test_node_protected_set_unset + ^ironic_tempest_plugin.tests.api.admin.test_nodes.TestNodesProtectedOldApi.test_node_protected_old_api + ^ironic_tempest_plugin.tests.api.admin.test_nodes.TestNodesVif.test_vif_already_attached_on_internal_info + ^ironic_tempest_plugin.tests.api.admin.test_nodes.TestNodesVif.test_vif_already_attached_with_portgroups + ^ironic_tempest_plugin.tests.api.admin.test_nodes.TestNodesVif.test_vif_already_set + ^ironic_tempest_plugin.tests.api.admin.test_nodes.TestNodesVif.test_vif_attach_no_args + ^ironic_tempest_plugin.tests.api.admin.test_nodes.TestNodesVif.test_vif_attach_no_free_port + ^ironic_tempest_plugin.tests.api.admin.test_nodes.TestNodesVif.test_vif_attach_no_port + ^ironic_tempest_plugin.tests.api.admin.test_nodes.TestNodesVif.test_vif_attach_port_not_in_portgroup + ^ironic_tempest_plugin.tests.api.admin.test_nodes.TestNodesVif.test_vif_attach_with_empty_portgroup + ^ironic_tempest_plugin.tests.api.admin.test_nodes.TestNodesVif.test_vif_detach_not_existing + ^ironic_tempest_plugin.tests.api.admin.test_nodes.TestNodesVif.test_vif_on_port + ^ironic_tempest_plugin.tests.api.admin.test_nodes.TestNodesVif.test_vif_on_portgroup + ^ironic_tempest_plugin.tests.api.admin.test_nodes.TestResetInterfaces.test_reset_interfaces + ^ironic_tempest_plugin.tests.api.rbac_defaults.test_nodes.TestNodeProjectReader.test_reader_cannot_update_owner_provisioned + ^ironic_tempest_plugin.tests.api.rbac_defaults.test_nodes.TestNodeSystemReader.test_reader_cannot_update_owner_provisioned + ^ironic_tempest_plugin.tests.api.rbac_defaults.test_nodes.TestNodeSystemReader.* diff --git a/zuul.d/jobs.yaml b/zuul.d/jobs.yaml index fa024208..3692f606 100644 --- a/zuul.d/jobs.yaml +++ b/zuul.d/jobs.yaml @@ -131,3 +131,66 @@ files: - ^scenarios/3-nodes/.* - ^roles/.* + +- job: + name: vexxhost-hotstack-secret-rotate + parent: base-hotstack + nodeset: hotstack-image-vexxhost + description: | + Hotstack scenario: secret-rotate + timeout: 10800 + attempts: 1 + vars: + cloud_name: vexxhost + scenario: secret-rotate + scenario_dir: >- + {{ + [ + ansible_user_dir, + zuul.projects['github.com/openstack-k8s-operators/hotstack'].src_dir, + 'scenarios' + ] | ansible.builtin.path_join + }} + hotstack_overrides: + stack_parameters: + dns_servers: + - 199.204.44.24 + - 199.204.47.54 + ntp_servers: [] + router_external_network: public + floating_ip_network: public + controller_params: + image: hotstack-controller + # vpu: 1 ram 2GB disk: 20GB + flavor: ci.m1.small + ocp_master_params: + image: ipxe-boot-usb + # vpu: 16 ram 48GB disk: 200GB + flavor: 16vcpu_48GB + ocp_worker_params: + image: ipxe-boot-usb + # vpu: 12 ram 32GB disk: 200GB + flavor: 12vcpu_32GB + compute_params: + image: cs9stream-genericcloud + # vpu: 2 ram 4GB disk: 40GB + flavor: ci.m1.medium + networker_params: + image: cs9stream-genericcloud + # vpu: 2 ram 4GB disk: 40GB + flavor: ci.m1.medium + bmh_params: + image: sushy-tools-blank-image + cd_image: sushy-tools-blank-image + # vpu: 2 ram 4GB disk: 40GB + flavor: ci.m1.medium + ironic_params: + image: sushy-tools-blank-image + cd_image: sushy-tools-blank-image + # vpu: 1 ram 2GB disk: 20GB + flavor: ci.m1.small + run: + - ci/playbooks/run-deploy.yml + - ci/playbooks/run-tests.yml + files: + - ^scenarios/secret-rotate/.* diff --git a/zuul.d/projects.yaml b/zuul.d/projects.yaml index 4073ff99..343fc804 100644 --- a/zuul.d/projects.yaml +++ b/zuul.d/projects.yaml @@ -11,3 +11,4 @@ jobs: - vexxhost-hotstack-sno-2-bm - vexxhost-hotstack-3-nodes + - vexxhost-hotstack-secret-rotate