Skip to content
Open
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
35 changes: 32 additions & 3 deletions openshift/tests-extension/test/olmv1-catalog.go
Original file line number Diff line number Diff line change
Expand Up @@ -11,6 +11,7 @@ import (

batchv1 "k8s.io/api/batch/v1"
corev1 "k8s.io/api/core/v1"
apierrors "k8s.io/apimachinery/pkg/api/errors"
"k8s.io/apimachinery/pkg/api/meta"
"k8s.io/apimachinery/pkg/api/resource"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
Expand Down Expand Up @@ -76,12 +77,39 @@ func verifyCatalogEndpoint(ctx SpecContext, catalog, endpoint, query string) {
strings.ReplaceAll(endpoint, "?", ""),
strings.ReplaceAll(catalog, "-", ""))

job := buildCurlJob(jobNamePrefix, "default", serviceURL)
// create service account object
serviceAccount := &corev1.ServiceAccount{
ObjectMeta: metav1.ObjectMeta{
Name: jobNamePrefix,
Namespace: "default",
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Wasn't the reversion due to the fact that we were still creating an SA in the default namespace?

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

As far as I am aware, the reason why the revert occurred was due to race condition causing the curl job to not always consistently run and hence fail. The race condition being the service account was not cleaned up or applied before the curl job was applied.

There was some discussion in the Slack thread referenced in the revert #609 about having the verify pod in the default namespace as "suspicious". If you prefer I can move this pod to a different namespace. :)

},
}

err = k8sClient.Create(ctx, serviceAccount)

if err != nil && !apierrors.IsAlreadyExists(err) {
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

We expect err to be nill on line 92. Maybe attenuate the Expect to be tolerant to AlreadyExists?

Fail(fmt.Sprintf("Failed to ensure ServiceAccount %s: %v", jobNamePrefix, err))
}

DeferCleanup(func(ctx SpecContext) {
_ = k8sClient.Delete(ctx, serviceAccount)
})

job := buildCurlJob(jobNamePrefix, "default", serviceURL, serviceAccount)

err = k8sClient.Create(ctx, job)
Expect(err).NotTo(HaveOccurred(), "failed to create Job")

DeferCleanup(func(ctx SpecContext) {
_ = k8sClient.Delete(ctx, job)
// We poll for deletion success so that the cleanup succeeds only when
// the job is deleted. Then, we can move onto deleting the service
// account without issue.
Eventually(func(g Gomega) {
recheck := &batchv1.Job{}
err := k8sClient.Get(ctx, client.ObjectKeyFromObject(job), recheck)
g.Expect(apierrors.IsNotFound(err)).To(BeTrue(), fmt.Sprintf("Job %v should be deleted", job.Name))
}).WithTimeout(helpers.DefaultTimeout).WithPolling(helpers.DefaultPolling).Should(Succeed())
})

By("Waiting for Job to succeed")
Expand Down Expand Up @@ -203,7 +231,7 @@ var _ = Describe("[sig-olmv1][OCPFeatureGate:NewOLM][Skipped:Disconnected] OLMv1
})
})

func buildCurlJob(prefix, namespace, url string) *batchv1.Job {
func buildCurlJob(prefix, namespace, url string, serviceAccount *corev1.ServiceAccount) *batchv1.Job {
backoff := int32(1)
// This means the k8s garbage collector will automatically delete the job 5 minutes
// after it has completed or failed.
Expand Down Expand Up @@ -232,7 +260,8 @@ func buildCurlJob(prefix, namespace, url string) *batchv1.Job {
BackoffLimit: &backoff,
Template: corev1.PodTemplateSpec{
Spec: corev1.PodSpec{
RestartPolicy: corev1.RestartPolicyNever,
ServiceAccountName: serviceAccount.Name,
RestartPolicy: corev1.RestartPolicyNever,
Containers: []corev1.Container{{
Name: "api-tester",
Image: "registry.redhat.io/rhel8/httpd-24:latest",
Expand Down