diff --git a/internal/operator-controller/controllers/clusterextensionrevision_controller.go b/internal/operator-controller/controllers/clusterextensionrevision_controller.go index 138bcd8eba..5150262215 100644 --- a/internal/operator-controller/controllers/clusterextensionrevision_controller.go +++ b/internal/operator-controller/controllers/clusterextensionrevision_controller.go @@ -131,46 +131,50 @@ func checkForUnexpectedClusterExtensionRevisionFieldChange(a, b ocv1.ClusterExte return !equality.Semantic.DeepEqual(a.Spec, b.Spec) } -func (c *ClusterExtensionRevisionReconciler) reconcile(ctx context.Context, rev *ocv1.ClusterExtensionRevision) (ctrl.Result, error) { +func (c *ClusterExtensionRevisionReconciler) reconcile(ctx context.Context, cer *ocv1.ClusterExtensionRevision) (ctrl.Result, error) { l := log.FromContext(ctx) - revision, opts, err := c.toBoxcutterRevision(ctx, rev) + if !cer.DeletionTimestamp.IsZero() { + return c.delete(ctx, cer) + } + + revision, opts, err := c.toBoxcutterRevision(ctx, cer) if err != nil { - setRetryingConditions(rev, err.Error()) + setRetryingConditions(cer, err.Error()) return ctrl.Result{}, fmt.Errorf("converting to boxcutter revision: %v", err) } - if !rev.DeletionTimestamp.IsZero() || rev.Spec.LifecycleState == ocv1.ClusterExtensionRevisionLifecycleStateArchived { - return c.teardown(ctx, rev) + revisionEngine, err := c.RevisionEngineFactory.CreateRevisionEngine(ctx, cer) + if err != nil { + setRetryingConditions(cer, err.Error()) + return ctrl.Result{}, fmt.Errorf("failed to create revision engine: %v", err) + } + + if cer.Spec.LifecycleState == ocv1.ClusterExtensionRevisionLifecycleStateArchived { + if err := c.TrackingCache.Free(ctx, cer); err != nil { + markAsAvailableUnknown(cer, ocv1.ClusterExtensionRevisionReasonReconciling, err.Error()) + return ctrl.Result{}, fmt.Errorf("error stopping informers: %v", err) + } + return c.archive(ctx, revisionEngine, cer, revision) } - revVersion := rev.GetAnnotations()[labels.BundleVersionKey] - // - // Reconcile - // - if err := c.ensureFinalizer(ctx, rev, clusterExtensionRevisionTeardownFinalizer); err != nil { + if err := c.ensureFinalizer(ctx, cer, clusterExtensionRevisionTeardownFinalizer); err != nil { return ctrl.Result{}, fmt.Errorf("error ensuring teardown finalizer: %v", err) } - if err := c.establishWatch(ctx, rev, revision); err != nil { + if err := c.establishWatch(ctx, cer, revision); err != nil { werr := fmt.Errorf("establish watch: %v", err) - setRetryingConditions(rev, werr.Error()) + setRetryingConditions(cer, werr.Error()) return ctrl.Result{}, werr } - revisionEngine, err := c.RevisionEngineFactory.CreateRevisionEngine(ctx, rev) - if err != nil { - setRetryingConditions(rev, err.Error()) - return ctrl.Result{}, fmt.Errorf("failed to create revision engine: %v", err) - } - rres, err := revisionEngine.Reconcile(ctx, *revision, opts...) if err != nil { if rres != nil { // Log detailed reconcile reports only in debug mode (V(1)) to reduce verbosity. l.V(1).Info("reconcile report", "report", rres.String()) } - setRetryingConditions(rev, err.Error()) + setRetryingConditions(cer, err.Error()) return ctrl.Result{}, fmt.Errorf("revision reconcile: %v", err) } @@ -178,14 +182,14 @@ func (c *ClusterExtensionRevisionReconciler) reconcile(ctx context.Context, rev // TODO: report status, backoff? if verr := rres.GetValidationError(); verr != nil { l.Error(fmt.Errorf("%w", verr), "preflight validation failed, retrying after 10s") - setRetryingConditions(rev, fmt.Sprintf("revision validation error: %s", verr)) + setRetryingConditions(cer, fmt.Sprintf("revision validation error: %s", verr)) return ctrl.Result{RequeueAfter: 10 * time.Second}, nil } for i, pres := range rres.GetPhases() { if verr := pres.GetValidationError(); verr != nil { l.Error(fmt.Errorf("%w", verr), "phase preflight validation failed, retrying after 10s", "phase", i) - setRetryingConditions(rev, fmt.Sprintf("phase %d validation error: %s", i, verr)) + setRetryingConditions(cer, fmt.Sprintf("phase %d validation error: %s", i, verr)) return ctrl.Result{RequeueAfter: 10 * time.Second}, nil } @@ -198,21 +202,22 @@ func (c *ClusterExtensionRevisionReconciler) reconcile(ctx context.Context, rev if len(collidingObjs) > 0 { l.Error(fmt.Errorf("object collision detected"), "object collision, retrying after 10s", "phase", i, "collisions", collidingObjs) - setRetryingConditions(rev, fmt.Sprintf("revision object collisions in phase %d\n%s", i, strings.Join(collidingObjs, "\n\n"))) + setRetryingConditions(cer, fmt.Sprintf("revision object collisions in phase %d\n%s", i, strings.Join(collidingObjs, "\n\n"))) return ctrl.Result{RequeueAfter: 10 * time.Second}, nil } } + revVersion := cer.GetAnnotations()[labels.BundleVersionKey] if !rres.InTransition() { - markAsProgressing(rev, ocv1.ReasonSucceeded, fmt.Sprintf("Revision %s has rolled out.", revVersion)) + markAsProgressing(cer, ocv1.ReasonSucceeded, fmt.Sprintf("Revision %s has rolled out.", revVersion)) } else { - markAsProgressing(rev, ocv1.ReasonRollingOut, fmt.Sprintf("Revision %s is rolling out.", revVersion)) + markAsProgressing(cer, ocv1.ReasonRollingOut, fmt.Sprintf("Revision %s is rolling out.", revVersion)) } //nolint:nestif if rres.IsComplete() { // Archive previous revisions - previous, err := c.listPreviousRevisions(ctx, rev) + previous, err := c.listPreviousRevisions(ctx, cer) if err != nil { return ctrl.Result{}, fmt.Errorf("listing previous revisions: %v", err) } @@ -226,17 +231,17 @@ func (c *ClusterExtensionRevisionReconciler) reconcile(ctx context.Context, rev } } - markAsAvailable(rev, ocv1.ClusterExtensionRevisionReasonProbesSucceeded, "Objects are available and pass all probes.") + markAsAvailable(cer, ocv1.ClusterExtensionRevisionReasonProbesSucceeded, "Objects are available and pass all probes.") // We'll probably only want to remove this once we are done updating the ClusterExtension conditions // as its one of the interfaces between the revision and the extension. If we still have the Succeeded for now // that's fine. - meta.SetStatusCondition(&rev.Status.Conditions, metav1.Condition{ + meta.SetStatusCondition(&cer.Status.Conditions, metav1.Condition{ Type: ocv1.ClusterExtensionRevisionTypeSucceeded, Status: metav1.ConditionTrue, Reason: ocv1.ReasonSucceeded, Message: "Revision succeeded rolling out.", - ObservedGeneration: rev.Generation, + ObservedGeneration: cer.Generation, }) } else { var probeFailureMsgs []string @@ -266,27 +271,40 @@ func (c *ClusterExtensionRevisionReconciler) reconcile(ctx context.Context, rev } if len(probeFailureMsgs) > 0 { - markAsUnavailable(rev, ocv1.ClusterExtensionRevisionReasonProbeFailure, strings.Join(probeFailureMsgs, "\n")) + markAsUnavailable(cer, ocv1.ClusterExtensionRevisionReasonProbeFailure, strings.Join(probeFailureMsgs, "\n")) } else { - markAsUnavailable(rev, ocv1.ReasonRollingOut, fmt.Sprintf("Revision %s is rolling out.", revVersion)) + markAsUnavailable(cer, ocv1.ReasonRollingOut, fmt.Sprintf("Revision %s is rolling out.", revVersion)) } } return ctrl.Result{}, nil } -func (c *ClusterExtensionRevisionReconciler) teardown(ctx context.Context, rev *ocv1.ClusterExtensionRevision) (ctrl.Result, error) { - if err := c.TrackingCache.Free(ctx, rev); err != nil { - markAsAvailableUnknown(rev, ocv1.ClusterExtensionRevisionReasonReconciling, err.Error()) +func (c *ClusterExtensionRevisionReconciler) delete(ctx context.Context, cer *ocv1.ClusterExtensionRevision) (ctrl.Result, error) { + if err := c.TrackingCache.Free(ctx, cer); err != nil { return ctrl.Result{}, fmt.Errorf("error stopping informers: %v", err) } + if err := c.removeFinalizer(ctx, cer, clusterExtensionRevisionTeardownFinalizer); err != nil { + return ctrl.Result{}, fmt.Errorf("error removing teardown finalizer: %v", err) + } + return ctrl.Result{}, nil +} +func (c *ClusterExtensionRevisionReconciler) archive(ctx context.Context, revisionEngine RevisionEngine, cer *ocv1.ClusterExtensionRevision, revision *boxcutter.Revision) (ctrl.Result, error) { + tdres, err := revisionEngine.Teardown(ctx, *revision) + if err != nil { + setRetryingConditions(cer, fmt.Sprintf("error archiving: %v", err)) + return ctrl.Result{}, fmt.Errorf("error tearing down revision: %v", err) + } + if tdres != nil && !tdres.IsComplete() { + setRetryingConditions(cer, "tearing down revision") + return ctrl.Result{RequeueAfter: 5 * time.Second}, nil + } // Ensure conditions are set before removing the finalizer when archiving - if rev.Spec.LifecycleState == ocv1.ClusterExtensionRevisionLifecycleStateArchived && markAsArchived(rev) { + if markAsArchived(cer) { return ctrl.Result{}, nil } - - if err := c.removeFinalizer(ctx, rev, clusterExtensionRevisionTeardownFinalizer); err != nil { + if err := c.removeFinalizer(ctx, cer, clusterExtensionRevisionTeardownFinalizer); err != nil { return ctrl.Result{}, fmt.Errorf("error removing teardown finalizer: %v", err) } return ctrl.Result{}, nil diff --git a/internal/operator-controller/controllers/clusterextensionrevision_controller_test.go b/internal/operator-controller/controllers/clusterextensionrevision_controller_test.go index f456976f43..ea4001f61b 100644 --- a/internal/operator-controller/controllers/clusterextensionrevision_controller_test.go +++ b/internal/operator-controller/controllers/clusterextensionrevision_controller_test.go @@ -9,7 +9,6 @@ import ( "github.com/stretchr/testify/require" corev1 "k8s.io/api/core/v1" - apierrors "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/api/meta" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" @@ -628,7 +627,7 @@ func Test_ClusterExtensionRevisionReconciler_Reconcile_ValidationError_Retries(t } } -func Test_ClusterExtensionRevisionReconciler_Reconcile_Deletion(t *testing.T) { +func Test_ClusterExtensionRevisionReconciler_Reconcile_ArchivalAndDeletion(t *testing.T) { const ( clusterExtensionRevisionName = "test-ext-1" ) @@ -641,9 +640,11 @@ func Test_ClusterExtensionRevisionReconciler_Reconcile_Deletion(t *testing.T) { existingObjs func() []client.Object revisionResult machinery.RevisionResult revisionEngineTeardownFn func(*testing.T) func(context.Context, machinerytypes.Revision, ...machinerytypes.RevisionTeardownOption) (machinery.RevisionTeardownResult, error) + revisionEngineFactoryErr error validate func(*testing.T, client.Client) trackingCacheFreeFn func(context.Context, client.Object) error expectedErr string + expectedResult ctrl.Result }{ { name: "teardown finalizer is removed", @@ -669,7 +670,7 @@ func Test_ClusterExtensionRevisionReconciler_Reconcile_Deletion(t *testing.T) { }, }, { - name: "revision is torn down and deleted when deleted", + name: "set Available:Archived:Unknown and Progressing:False:Archived conditions when a revision is archived", revisionResult: mockRevisionResult{}, existingObjs: func() []client.Object { ext := newTestClusterExtension() @@ -677,7 +678,7 @@ func Test_ClusterExtensionRevisionReconciler_Reconcile_Deletion(t *testing.T) { rev1.Finalizers = []string{ "olm.operatorframework.io/teardown", } - rev1.DeletionTimestamp = &metav1.Time{Time: time.Now()} + rev1.Spec.LifecycleState = ocv1.ClusterExtensionRevisionLifecycleStateArchived return []client.Object{rev1, ext} }, revisionEngineTeardownFn: func(t *testing.T) func(ctx context.Context, rev machinerytypes.Revision, opts ...machinerytypes.RevisionTeardownOption) (machinery.RevisionTeardownResult, error) { @@ -688,17 +689,28 @@ func Test_ClusterExtensionRevisionReconciler_Reconcile_Deletion(t *testing.T) { } }, validate: func(t *testing.T, c client.Client) { - t.Log("cluster revision is deleted") rev := &ocv1.ClusterExtensionRevision{} err := c.Get(t.Context(), client.ObjectKey{ Name: clusterExtensionRevisionName, }, rev) - require.Error(t, err) - require.True(t, apierrors.IsNotFound(err)) + require.NoError(t, err) + cond := meta.FindStatusCondition(rev.Status.Conditions, ocv1.ClusterExtensionRevisionTypeAvailable) + require.NotNil(t, cond) + require.Equal(t, metav1.ConditionUnknown, cond.Status) + require.Equal(t, ocv1.ClusterExtensionRevisionReasonArchived, cond.Reason) + require.Equal(t, "revision is archived", cond.Message) + require.Equal(t, int64(1), cond.ObservedGeneration) + + cond = meta.FindStatusCondition(rev.Status.Conditions, ocv1.ClusterExtensionRevisionTypeProgressing) + require.NotNil(t, cond) + require.Equal(t, metav1.ConditionFalse, cond.Status) + require.Equal(t, ocv1.ClusterExtensionRevisionReasonArchived, cond.Reason) + require.Equal(t, "revision is archived", cond.Message) + require.Equal(t, int64(1), cond.ObservedGeneration) }, }, { - name: "set Available:Unknown:Reconciling and surface tracking cache cleanup errors when deleted", + name: "set Progressing:True:Retrying and requeue when archived revision archival is incomplete", revisionResult: mockRevisionResult{}, existingObjs: func() []client.Object { ext := newTestClusterExtension() @@ -706,37 +718,35 @@ func Test_ClusterExtensionRevisionReconciler_Reconcile_Deletion(t *testing.T) { rev1.Finalizers = []string{ "olm.operatorframework.io/teardown", } - rev1.DeletionTimestamp = &metav1.Time{Time: time.Now()} + rev1.Spec.LifecycleState = ocv1.ClusterExtensionRevisionLifecycleStateArchived return []client.Object{rev1, ext} }, revisionEngineTeardownFn: func(t *testing.T) func(ctx context.Context, rev machinerytypes.Revision, opts ...machinerytypes.RevisionTeardownOption) (machinery.RevisionTeardownResult, error) { return func(ctx context.Context, rev machinerytypes.Revision, opts ...machinerytypes.RevisionTeardownOption) (machinery.RevisionTeardownResult, error) { return &mockRevisionTeardownResult{ - isComplete: true, + isComplete: false, }, nil } }, - trackingCacheFreeFn: func(ctx context.Context, object client.Object) error { - return fmt.Errorf("some tracking cache cleanup error") - }, - expectedErr: "some tracking cache cleanup error", + expectedResult: ctrl.Result{RequeueAfter: 5 * time.Second}, validate: func(t *testing.T, c client.Client) { - t.Log("cluster revision is not deleted and still contains finalizer") rev := &ocv1.ClusterExtensionRevision{} err := c.Get(t.Context(), client.ObjectKey{ Name: clusterExtensionRevisionName, }, rev) require.NoError(t, err) - cond := meta.FindStatusCondition(rev.Status.Conditions, ocv1.ClusterExtensionRevisionTypeAvailable) + cond := meta.FindStatusCondition(rev.Status.Conditions, ocv1.ClusterExtensionRevisionTypeProgressing) require.NotNil(t, cond) - require.Equal(t, metav1.ConditionUnknown, cond.Status) - require.Equal(t, ocv1.ClusterExtensionRevisionReasonReconciling, cond.Reason) - require.Equal(t, "some tracking cache cleanup error", cond.Message) - require.Equal(t, int64(1), cond.ObservedGeneration) + require.Equal(t, metav1.ConditionTrue, cond.Status) + require.Equal(t, ocv1.ClusterExtensionRevisionReasonRetrying, cond.Reason) + require.Equal(t, "tearing down revision", cond.Message) + + // Finalizer should still be present + require.Contains(t, rev.Finalizers, "olm.operatorframework.io/teardown") }, }, { - name: "set Available:Archived:Unknown and Progressing:False:Archived conditions when a revision is archived", + name: "return error and set retrying conditions when archived revision teardown fails", revisionResult: mockRevisionResult{}, existingObjs: func() []client.Object { ext := newTestClusterExtension() @@ -749,30 +759,57 @@ func Test_ClusterExtensionRevisionReconciler_Reconcile_Deletion(t *testing.T) { }, revisionEngineTeardownFn: func(t *testing.T) func(ctx context.Context, rev machinerytypes.Revision, opts ...machinerytypes.RevisionTeardownOption) (machinery.RevisionTeardownResult, error) { return func(ctx context.Context, rev machinerytypes.Revision, opts ...machinerytypes.RevisionTeardownOption) (machinery.RevisionTeardownResult, error) { - return &mockRevisionTeardownResult{ - isComplete: true, - }, nil + return nil, fmt.Errorf("teardown failed: connection refused") } }, + expectedErr: "error tearing down revision", validate: func(t *testing.T, c client.Client) { rev := &ocv1.ClusterExtensionRevision{} err := c.Get(t.Context(), client.ObjectKey{ Name: clusterExtensionRevisionName, }, rev) require.NoError(t, err) - cond := meta.FindStatusCondition(rev.Status.Conditions, ocv1.ClusterExtensionRevisionTypeAvailable) + cond := meta.FindStatusCondition(rev.Status.Conditions, ocv1.ClusterExtensionRevisionTypeProgressing) require.NotNil(t, cond) - require.Equal(t, metav1.ConditionUnknown, cond.Status) - require.Equal(t, ocv1.ClusterExtensionRevisionReasonArchived, cond.Reason) - require.Equal(t, "revision is archived", cond.Message) - require.Equal(t, int64(1), cond.ObservedGeneration) + require.Equal(t, metav1.ConditionTrue, cond.Status) + require.Equal(t, ocv1.ClusterExtensionRevisionReasonRetrying, cond.Reason) + require.Contains(t, cond.Message, "teardown failed: connection refused") - cond = meta.FindStatusCondition(rev.Status.Conditions, ocv1.ClusterExtensionRevisionTypeProgressing) + // Finalizer should still be present + require.Contains(t, rev.Finalizers, "olm.operatorframework.io/teardown") + }, + }, + { + name: "return error and set retrying conditions when factory fails to create engine during archived teardown", + revisionResult: mockRevisionResult{}, + existingObjs: func() []client.Object { + ext := newTestClusterExtension() + rev1 := newTestClusterExtensionRevision(t, clusterExtensionRevisionName, ext, testScheme) + rev1.Finalizers = []string{ + "olm.operatorframework.io/teardown", + } + rev1.Spec.LifecycleState = ocv1.ClusterExtensionRevisionLifecycleStateArchived + return []client.Object{rev1, ext} + }, + revisionEngineTeardownFn: func(t *testing.T) func(ctx context.Context, rev machinerytypes.Revision, opts ...machinerytypes.RevisionTeardownOption) (machinery.RevisionTeardownResult, error) { + return nil + }, + revisionEngineFactoryErr: fmt.Errorf("token getter failed"), + expectedErr: "failed to create revision engine", + validate: func(t *testing.T, c client.Client) { + rev := &ocv1.ClusterExtensionRevision{} + err := c.Get(t.Context(), client.ObjectKey{ + Name: clusterExtensionRevisionName, + }, rev) + require.NoError(t, err) + cond := meta.FindStatusCondition(rev.Status.Conditions, ocv1.ClusterExtensionRevisionTypeProgressing) require.NotNil(t, cond) - require.Equal(t, metav1.ConditionFalse, cond.Status) - require.Equal(t, ocv1.ClusterExtensionRevisionReasonArchived, cond.Reason) - require.Equal(t, "revision is archived", cond.Message) - require.Equal(t, int64(1), cond.ObservedGeneration) + require.Equal(t, metav1.ConditionTrue, cond.Status) + require.Equal(t, ocv1.ClusterExtensionRevisionReasonRetrying, cond.Reason) + require.Contains(t, cond.Message, "token getter failed") + + // Finalizer should still be present + require.Contains(t, rev.Finalizers, "olm.operatorframework.io/teardown") }, }, { @@ -833,9 +870,10 @@ func Test_ClusterExtensionRevisionReconciler_Reconcile_Deletion(t *testing.T) { }, teardown: tc.revisionEngineTeardownFn(t), } + factory := &mockRevisionEngineFactory{engine: mockEngine, createErr: tc.revisionEngineFactoryErr} result, err := (&controllers.ClusterExtensionRevisionReconciler{ Client: testClient, - RevisionEngineFactory: &mockRevisionEngineFactory{engine: mockEngine}, + RevisionEngineFactory: factory, TrackingCache: &mockTrackingCache{ client: testClient, freeFn: tc.trackingCacheFreeFn, @@ -847,7 +885,7 @@ func Test_ClusterExtensionRevisionReconciler_Reconcile_Deletion(t *testing.T) { }) // reconcile cluster extension revision - require.Equal(t, ctrl.Result{}, result) + require.Equal(t, tc.expectedResult, result) if tc.expectedErr != "" { require.Contains(t, err.Error(), tc.expectedErr) } else { diff --git a/test/e2e/features/update.feature b/test/e2e/features/update.feature index dee45e32a9..c48de1019e 100644 --- a/test/e2e/features/update.feature +++ b/test/e2e/features/update.feature @@ -181,7 +181,7 @@ Feature: Update ClusterExtension Then bundle "test-operator.1.3.0" is installed in version "1.3.0" @BoxcutterRuntime - Scenario: Each update creates a new revision + Scenario: Each update creates a new revision and resources not present in the new revision are removed from the cluster Given ClusterExtension is applied """ apiVersion: olm.operatorframework.io/v1 @@ -212,6 +212,7 @@ Feature: Update ClusterExtension And ClusterExtensionRevision "${NAME}-2" reports Progressing as True with Reason Succeeded And ClusterExtensionRevision "${NAME}-2" reports Available as True with Reason ProbesSucceeded And ClusterExtensionRevision "${NAME}-1" is archived + And ClusterExtensionRevision "${NAME}-1" phase objects are not found or not owned by the revision @BoxcutterRuntime Scenario: Report all active revisions on ClusterExtension diff --git a/test/e2e/steps/steps.go b/test/e2e/steps/steps.go index ee3ce30802..8d7ef90f3f 100644 --- a/test/e2e/steps/steps.go +++ b/test/e2e/steps/steps.go @@ -75,6 +75,7 @@ func RegisterSteps(sc *godog.ScenarioContext) { sc.Step(`^(?i)ClusterExtensionRevision "([^"]+)" is archived$`, ClusterExtensionRevisionIsArchived) sc.Step(`^(?i)ClusterExtensionRevision "([^"]+)" contains annotation "([^"]+)" with value$`, ClusterExtensionRevisionHasAnnotationWithValue) sc.Step(`^(?i)ClusterExtensionRevision "([^"]+)" has label "([^"]+)" with value "([^"]+)"$`, ClusterExtensionRevisionHasLabelWithValue) + sc.Step(`^(?i)ClusterExtensionRevision "([^"]+)" phase objects are not found or not owned by the revision$`, ClusterExtensionRevisionObjectsNotFoundOrNotOwned) sc.Step(`^(?i)resource "([^"]+)" is installed$`, ResourceAvailable) sc.Step(`^(?i)resource "([^"]+)" is available$`, ResourceAvailable) @@ -319,7 +320,7 @@ func ClusterExtensionResourcesCreatedAndAreLabeled(ctx context.Context) error { return fmt.Errorf("extension objects not found in context") } - for _, obj := range sc.extensionObjects { + for _, obj := range sc.GetClusterExtensionObjects() { waitFor(ctx, func() bool { kind := obj.GetObjectKind().GroupVersionKind().Kind clusterObj, err := getResource(kind, obj.GetName(), obj.GetNamespace()) @@ -361,7 +362,7 @@ func ClusterExtensionResourcesRemoved(ctx context.Context) error { if len(sc.GetClusterExtensionObjects()) == 0 { return fmt.Errorf("extension objects not found in context") } - for _, obj := range sc.extensionObjects { + for _, obj := range sc.GetClusterExtensionObjects() { if err := ResourceEventuallyNotFound(ctx, fmt.Sprintf("%s/%s", obj.GetObjectKind().GroupVersionKind().Kind, obj.GetName())); err != nil { return err } @@ -560,6 +561,66 @@ func ClusterExtensionRevisionHasLabelWithValue(ctx context.Context, revisionName return nil } +// ClusterExtensionRevisionObjectsNotFoundOrNotOwned waits for all objects described in the named +// ClusterExtensionRevision's phases to either not exist on the cluster or not contain the revision +// in their ownerReferences. Polls with timeout. +func ClusterExtensionRevisionObjectsNotFoundOrNotOwned(ctx context.Context, revisionName string) error { + sc := scenarioCtx(ctx) + revisionName = substituteScenarioVars(strings.TrimSpace(revisionName), sc) + + // Get the CER to extract its phase objects + var rev ocv1.ClusterExtensionRevision + waitFor(ctx, func() bool { + out, err := k8sClient("get", "clusterextensionrevision", revisionName, "-o", "json") + if err != nil { + return false + } + return json.Unmarshal([]byte(out), &rev) == nil + }) + + // For each object in each phase, verify it either doesn't exist or + // doesn't have the CER in its ownerReferences + for _, phase := range rev.Spec.Phases { + for _, phaseObj := range phase.Objects { + obj := phaseObj.Object + kind := obj.GetKind() + name := obj.GetName() + namespace := obj.GetNamespace() + + waitFor(ctx, func() bool { + args := []string{"get", kind, name, "--ignore-not-found", "-o", "json"} + if namespace != "" { + args = append(args, "-n", namespace) + } + out, err := k8sClient(args...) + if err != nil { + return false + } + // If output is empty, the resource does not exist — condition satisfied + if strings.TrimSpace(out) == "" { + return true + } + clusterObj, err := toUnstructured(out) + if err != nil { + return false + } + + // Check that no ownerReference points to this CER + for _, ref := range clusterObj.GetOwnerReferences() { + if ref.Kind == ocv1.ClusterExtensionRevisionKind && ref.Name == revisionName { + logger.V(1).Info("object still owned by revision", + "kind", kind, "name", name, "namespace", namespace, + "revision", revisionName) + return false + } + } + return true + }) + } + } + return nil +} + // ResourceAvailable waits for the specified resource (kind/name format) to exist in the test namespace. Polls with timeout. func ResourceAvailable(ctx context.Context, resource string) error { sc := scenarioCtx(ctx) diff --git a/testdata/images/bundles/test-operator/v1.0.0/manifests/dummy.configmap.yaml b/testdata/images/bundles/test-operator/v1.0.0/manifests/dummy.configmap.yaml new file mode 100644 index 0000000000..8135b6f178 --- /dev/null +++ b/testdata/images/bundles/test-operator/v1.0.0/manifests/dummy.configmap.yaml @@ -0,0 +1,6 @@ +apiVersion: v1 +kind: ConfigMap +metadata: + name: dummy-configmap +data: + why: "this config map does not exist in higher versions of the bundle - it is useful to test whether resources removed between versions are removed from the cluster as well"