diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 4bf5a6397e..8edbceec3b 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -150,6 +150,9 @@ jobs: export IMAGE_PROCESSING_CONTAINER_IMAGE="$(KO_DOCKER_REPO=kind.local ko publish ./cmd/image-processing)" make test-integration + - name: Test-PipelineRun + run: | + BUILDRUN_EXECUTOR=PipelineRun ginkgo --focus-file="buildruns_to_pipelineruns_test.go" -v test/integration/... e2e: strategy: @@ -240,6 +243,33 @@ jobs: export TEST_E2E_FLAGS="-r --procs 8 --randomize-all --timeout=1h --trace --vv" export TEST_E2E_TIMEOUT_MULTIPLIER=2 make test-e2e + - name: Test-PipelineRun + run: | + export TEST_NAMESPACE=shp-e2e + export TEST_IMAGE_REPO=registry.registry.svc.cluster.local:32222/shipwright-io/build-e2e + export TEST_IMAGE_REPO_INSECURE=true + export TEST_E2E_TIMEOUT_MULTIPLIER=1 + kubectl patch deployment shipwright-build-controller -n shipwright-build --type='json' -p='[ + { + "op": "add", + "path": "/spec/template/spec/containers/0/env/-", + "value": { + "name": "BUILDRUN_EXECUTOR", + "value": "PipelineRun" + } + } + ]' + # Wait for the rollout to complete + kubectl rollout restart deployment shipwright-build-controller -n shipwright-build + kubectl rollout status deployment shipwright-build-controller -n shipwright-build + + # Run PipelineRun tests + TEST_CONTROLLER_NAMESPACE=${TEST_NAMESPACE} \ + TEST_WATCH_NAMESPACE=${TEST_NAMESPACE} \ + TEST_E2E_SERVICEACCOUNT_NAME=pipeline \ + TEST_E2E_TIMEOUT_MULTIPLIER=${TEST_E2E_TIMEOUT_MULTIPLIER} \ + TEST_E2E_VERIFY_TEKTONOBJECTS=true \ + ginkgo --focus="PipelineRun E2E Tests" --procs 8 --timeout=1h --vv test/e2e/v1beta1/ - name: Build controller logs if: ${{ failure() }} run: | diff --git a/Makefile b/Makefile index 0468b13829..0df21e3b46 100644 --- a/Makefile +++ b/Makefile @@ -213,6 +213,7 @@ test-integration: install-apis ginkgo --randomize-all \ --randomize-suites \ --fail-on-pending \ + --skip-file=buildruns_to_pipelineruns_test.go \ -trace \ test/integration/... @@ -226,7 +227,7 @@ test-e2e-plain: ginkgo TEST_E2E_SERVICEACCOUNT_NAME=${TEST_E2E_SERVICEACCOUNT_NAME} \ TEST_E2E_TIMEOUT_MULTIPLIER=${TEST_E2E_TIMEOUT_MULTIPLIER} \ TEST_E2E_VERIFY_TEKTONOBJECTS=${TEST_E2E_VERIFY_TEKTONOBJECTS} \ - $(GINKGO) ${TEST_E2E_FLAGS} test/e2e/ + $(GINKGO) --skip-file=e2e_pipelinerun_test.go ${TEST_E2E_FLAGS} test/e2e/ .PHONY: test-e2e-kind-with-prereq-install test-e2e-kind-with-prereq-install: ginkgo install-controller-kind install-strategies test-e2e-plain diff --git a/deploy/200-role.yaml b/deploy/200-role.yaml index 4466202f23..a526c0b109 100644 --- a/deploy/200-role.yaml +++ b/deploy/200-role.yaml @@ -67,6 +67,10 @@ rules: # With the OwnerReferencesPermissionEnforcement admission controller enabled, controllers need the "delete" permission on objects that they set owner references on. verbs: ['get', 'list', 'watch', 'create', 'delete', 'patch'] +- apiGroups: ['tekton.dev'] + resources: ['pipelineruns'] + verbs: ['get', 'list', 'watch', 'create', 'delete', 'patch'] + - apiGroups: [''] resources: ['pods'] verbs: ['get', 'list', 'watch'] diff --git a/docs/configuration.md b/docs/configuration.md index 1abecc1dea..5e936595fb 100644 --- a/docs/configuration.md +++ b/docs/configuration.md @@ -37,6 +37,7 @@ The following environment variables are available: | `KUBE_API_BURST` | Burst to use for the Kubernetes API client. See [Config.Burst]. A value of 0 or lower will use the default from client-go, which currently is 10. Default is 0. | | `KUBE_API_QPS` | QPS to use for the Kubernetes API client. See [Config.QPS]. A value of 0 or lower will use the default from client-go, which currently is 5. Default is 0. | | `VULNERABILITY_COUNT_LIMIT` | holds vulnerability count limit if vulnerability scan is enabled for the output image. If it is defined as 10, then it will output only 10 vulnerabilities sorted by severity in the buildrun status.Output. Default is 50. | +| `BUILDRUN_EXECUTOR` | Sets the kind of buildrun exectutor that will be used. Value can be `TaskRun` or `PipelineRun`. By default buildrun will use `TaskRun` for its build executor. | [^1]: The `runAsUser` and `runAsGroup` are dynamically overwritten depending on the build strategy that is used. See [Security Contexts](buildstrategies.md#security-contexts) for more information. diff --git a/pkg/config/config.go b/pkg/config/config.go index 72d653358c..d35200d950 100644 --- a/pkg/config/config.go +++ b/pkg/config/config.go @@ -66,6 +66,7 @@ const ( controllerBuildRunMaxConcurrentReconciles = "BUILDRUN_MAX_CONCURRENT_RECONCILES" controllerBuildStrategyMaxConcurrentReconciles = "BUILDSTRATEGY_MAX_CONCURRENT_RECONCILES" controllerClusterBuildStrategyMaxConcurrentReconciles = "CLUSTERBUILDSTRATEGY_MAX_CONCURRENT_RECONCILES" + controllerBuildrunExecutorEnvVar = "BUILDRUN_EXECUTOR" // environment variables for the kube API kubeAPIBurst = "KUBE_API_BURST" @@ -107,6 +108,7 @@ type Config struct { KubeAPIOptions KubeAPIOptions GitRewriteRule bool VulnerabilityCountLimit int + BuildrunExecutor string } // PrometheusConfig contains the specific configuration for the @@ -163,6 +165,7 @@ func NewDefaultConfig() *Config { TerminationLogPath: terminationLogPathDefault, GitRewriteRule: false, VulnerabilityCountLimit: 50, + BuildrunExecutor: "TaskRun", GitContainerTemplate: Step{ Image: gitDefaultImage, @@ -361,6 +364,11 @@ func (c *Config) SetConfigFromEnv() error { c.VulnerabilityCountLimit = vc } + // set environment variable for executor type + if executor := os.Getenv(controllerBuildrunExecutorEnvVar); executor != "" { + c.BuildrunExecutor = executor + } + // Mark that the Git wrapper is suppose to use Git rewrite rule if useGitRewriteRule := os.Getenv(useGitRewriteRule); useGitRewriteRule != "" { c.GitRewriteRule = strings.ToLower(useGitRewriteRule) == "true" diff --git a/pkg/reconciler/buildrun/buildrun.go b/pkg/reconciler/buildrun/buildrun.go index 639db28ea5..b49b9fd844 100644 --- a/pkg/reconciler/buildrun/buildrun.go +++ b/pkg/reconciler/buildrun/buildrun.go @@ -11,7 +11,6 @@ import ( "strconv" "strings" - pipelineapi "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1" "knative.dev/pkg/apis" corev1 "k8s.io/api/core/v1" @@ -59,7 +58,7 @@ func NewReconciler(c *config.Config, mgr manager.Manager, ownerRef setOwnerRefer client: client.WithFieldOwner(mgr.GetClient(), "shipwright-buildrun-controller"), scheme: mgr.GetScheme(), setOwnerReferenceFunc: ownerRef, - taskRunnerFactory: &TektonTaskRunImageBuildRunnerFactory{}, + taskRunnerFactory: RunnerFactories[c.BuildrunExecutor], } } @@ -81,16 +80,16 @@ func (r *ReconcileBuildRun) Reconcile(ctx context.Context, request reconcile.Req // so we can no longer assume that a build run event will not come in after the build run has a task run ref in its status buildRun = &buildv1beta1.BuildRun{} getBuildRunErr := r.GetBuildRunObject(ctx, request.Name, request.Namespace, buildRun) - lastTaskRun, getTaskRunErr := r.taskRunnerFactory.GetImageBuildRunner(ctx, r.client, types.NamespacedName{Name: request.Name, Namespace: request.Namespace}) + buildRunner, buildRunnerErr := r.taskRunnerFactory.GetImageBuildRunner(ctx, r.client, types.NamespacedName{Name: request.Name, Namespace: request.Namespace}) - if getBuildRunErr != nil && getTaskRunErr != nil { + if getBuildRunErr != nil && buildRunnerErr != nil { if !apierrors.IsNotFound(getBuildRunErr) { return reconcile.Result{}, getBuildRunErr } - if !apierrors.IsNotFound(getTaskRunErr) { - return reconcile.Result{}, getTaskRunErr + if !apierrors.IsNotFound(buildRunnerErr) { + return reconcile.Result{}, buildRunnerErr } - // If the BuildRun and TaskRun are not found, it might mean that we are running a Reconcile after a TaskRun was deleted. If this is the case, we need + // If the BuildRun and BuildRunner are not found, it might mean that we are running a Reconcile after a TaskRun/PipelineRun was deleted. If this is the case, we need // to identify from the request the BuildRun name associate to it and update the BuildRun Status. r.VerifyRequestName(ctx, request, buildRun) return reconcile.Result{}, nil @@ -121,16 +120,14 @@ func (r *ReconcileBuildRun) Reconcile(ctx context.Context, request reconcile.Req ) } } - // if this is a build run event after we've set the task run ref, get the task run using the task run name stored in the build run // nolint:staticcheck - if getBuildRunErr == nil && apierrors.IsNotFound(getTaskRunErr) && buildRun.Status.TaskRunName != nil { - lastTaskRun, getTaskRunErr = r.taskRunnerFactory.GetImageBuildRunner(ctx, r.client, types.NamespacedName{Name: *buildRun.Status.TaskRunName, Namespace: request.Namespace}) + if getBuildRunErr == nil && apierrors.IsNotFound(buildRunnerErr) && buildRun.Status.TaskRunName != nil { + buildRunner, buildRunnerErr = r.taskRunnerFactory.GetImageBuildRunner(ctx, r.client, types.NamespacedName{Name: *buildRun.Status.TaskRunName, Namespace: request.Namespace}) } - - // for existing TaskRuns update the BuildRun Status, if there is no TaskRun, then create one - if getTaskRunErr != nil { - if apierrors.IsNotFound(getTaskRunErr) { + // for existing BuildRunners update the BuildRun Status, if there is no BuildRunner, then create one + if buildRunnerErr != nil { + if apierrors.IsNotFound(buildRunnerErr) { build = &buildv1beta1.Build{} if err := resources.GetBuildObject(ctx, r.client, buildRun, build); err != nil { if !resources.IsClientStatusUpdateError(err) && buildRun.Status.IsFailed(buildv1beta1.Succeeded) { @@ -320,46 +317,49 @@ func (r *ReconcileBuildRun) Reconcile(ctx context.Context, request reconcile.Req return reconcile.Result{}, nil } - // Create the TaskRun, this needs to be the last step in this block to be idempotent - generatedTaskRun, err := r.createTaskRun(ctx, svcAccount, strategy, build, buildRun) + // Create the ImageBuildRunner (TaskRun or PipelineRun) + imageBuildRunner, err := r.taskRunnerFactory.CreateImageBuildRunner(ctx, r.client, r.config, svcAccount, strategy, build, buildRun, r.scheme, r.setOwnerReferenceFunc) if err != nil { if !resources.IsClientStatusUpdateError(err) && buildRun.Status.IsFailed(buildv1beta1.Succeeded) { - ctxlog.Info(ctx, "taskRun generation failed", namespace, request.Namespace, name, request.Name) + ctxlog.Info(ctx, "buildRunner generation failed", namespace, request.Namespace, name, request.Name) return reconcile.Result{}, nil } // system call failure, reconcile again return reconcile.Result{}, err } - err = resources.CheckTaskRunVolumesExist(ctx, r.client, generatedTaskRun) - // if resource is not found, fais the build run - if err != nil { + // Check volumes exist before reconciling the executor + if err := r.validateExecutorVolumes(ctx, imageBuildRunner); err != nil { + // If it's a volume not found error, update the BuildRun status and end reconciliation if apierrors.IsNotFound(err) { - if err := resources.UpdateConditionWithFalseStatus(ctx, r.client, buildRun, err.Error(), string(buildv1beta1.VolumeDoesNotExist)); err != nil { - return reconcile.Result{}, err + // Here, we handle the error returned by the validation function. + if updateErr := resources.UpdateConditionWithFalseStatus(ctx, r.client, buildRun, err.Error(), string(buildv1beta1.VolumeDoesNotExist)); updateErr != nil { + return reconcile.Result{}, updateErr } - - // end of reconciliation - return reconcile.Result{}, nil + return reconcile.Result{}, nil // Stop reconciling } - - // some other error might have happened, return it and reconcile again + // Some other error occurred, return it for reconciliation retry return reconcile.Result{}, err } - ctxlog.Info(ctx, "creating TaskRun from BuildRun", namespace, request.Namespace, name, generatedTaskRun.GenerateName, "BuildRun", buildRun.Name) - if err = r.client.Create(ctx, generatedTaskRun); err != nil { + ctxlog.Info(ctx, "creating ImageBuildRunner from BuildRun", namespace, request.Namespace, name, imageBuildRunner.GetName(), "BuildRun", buildRun.Name) + if err = r.taskRunnerFactory.CreateImageBuildRunnerInCluster(ctx, r.client, imageBuildRunner); err != nil { // system call failure, reconcile again return reconcile.Result{}, err } - // Set the TaskRunName and BuildExecutor in the BuildRun status - buildRun.Status.TaskRunName = &generatedTaskRun.Name // nolint:staticcheck + // Set the BuildExecutor in the BuildRun status + executorName := imageBuildRunner.GetName() buildRun.Status.Executor = &buildv1beta1.BuildExecutor{ - Name: generatedTaskRun.Name, - Kind: "TaskRun", + Name: executorName, + Kind: imageBuildRunner.GetExecutorKind(), + } + // Set the deprecated TaskRunName field for backward compatibility only if it's a TaskRun + if imageBuildRunner.GetExecutorKind() == "TaskRun" { + //nolint:staticcheck // Keep for backward compatibility for now + buildRun.Status.TaskRunName = &executorName } - ctxlog.Info(ctx, "updating BuildRun status with TaskRun name", namespace, request.Namespace, name, request.Name, "TaskRun", generatedTaskRun.Name) + ctxlog.Info(ctx, "updating BuildRun status with ImageBuildRunner name", namespace, request.Namespace, name, request.Name, imageBuildRunner.GetExecutorKind(), imageBuildRunner.GetName()) if err = r.client.Status().Update(ctx, buildRun); err != nil { // we ignore the error here to prevent another reconciliation that would create another TaskRun, // the LatestTaskRunRef field will also be set in the reconciliation from a TaskRun @@ -381,19 +381,19 @@ func (r *ReconcileBuildRun) Reconcile(ctx context.Context, request reconcile.Req buildRun.Namespace, buildRun.Spec.BuildName(), buildRun.Name, - generatedTaskRun.CreationTimestamp.Time.Sub(buildRun.CreationTimestamp.Time), + imageBuildRunner.GetCreationTimestamp().Time.Sub(buildRun.CreationTimestamp.Time), ) } else { - return reconcile.Result{}, getTaskRunErr + return reconcile.Result{}, buildRunnerErr } } else { - ctxlog.Info(ctx, "taskRun already exists", namespace, request.Namespace, name, request.Name) + ctxlog.Info(ctx, "buildRunner already exists", namespace, request.Namespace, name, request.Name) if getBuildRunErr != nil && !apierrors.IsNotFound(getBuildRunErr) { return reconcile.Result{}, getBuildRunErr } else if apierrors.IsNotFound(getBuildRunErr) { - // this is a TR event, try getting the br from the label on the tr - labels := lastTaskRun.GetLabels() + // this is a TaskRun/PipelineRun event, try getting the br from the label on the executor + labels := buildRunner.GetLabels() if labels != nil { err := r.GetBuildRunObject(ctx, labels[buildv1beta1.LabelBuildRun], request.Namespace, buildRun) if err != nil && !apierrors.IsNotFound(err) { @@ -405,60 +405,62 @@ func (r *ReconcileBuildRun) Reconcile(ctx context.Context, request reconcile.Req } } - if buildRun.IsCanceled() && !lastTaskRun.IsCancelled() { - ctxlog.Info(ctx, "buildRun marked for cancellation, patching task run", namespace, request.Namespace, name, request.Name) - if err := lastTaskRun.Cancel(ctx, r.client); err != nil { - return reconcile.Result{}, fmt.Errorf("failed to cancel TaskRun: %v", err) + if buildRun.IsCanceled() && !buildRunner.IsCancelled() { + ctxlog.Info(ctx, "buildRun marked for cancellation, patching executor", namespace, request.Namespace, name, request.Name) + if err := buildRunner.Cancel(ctx, r.client); err != nil { + return reconcile.Result{}, fmt.Errorf("failed to cancel executor: %v", err) } } // Check if the BuildRun is already finished, this happens if the build controller is restarted. - // It then reconciles all TaskRuns. This is valuable if the build controller was down while the TaskRun - // finishes which would be missed otherwise. But, if the TaskRun was already completed and the status + // It then reconciles all executors. This is valuable if the build controller was down while the executor + // finishes which would be missed otherwise. But, if the executor was already completed and the status // synchronized into the BuildRun, then yet another reconciliation is not necessary. if buildRun.Status.CompletionTime != nil { ctxlog.Info(ctx, "buildRun already marked completed", namespace, request.Namespace, name, request.Name) return reconcile.Result{}, nil } - taskRunResults := lastTaskRun.GetResults() - if len(taskRunResults) > 0 { - ctxlog.Info(ctx, "surfacing taskRun results to BuildRun status", namespace, request.Namespace, name, request.Name) - resources.UpdateBuildRunUsingTaskResults(ctx, buildRun, taskRunResults, request) + executorResults := buildRunner.GetResults(ctx, r.client) + + if len(executorResults) > 0 { + ctxlog.Info(ctx, "surfacing executor results to BuildRun status", namespace, request.Namespace, name, request.Name) + resources.UpdateBuildRunUsingTaskResults(ctx, buildRun, executorResults, request) } - trCondition := lastTaskRun.GetCondition(apis.ConditionSucceeded) - if trCondition != nil { - // For now, pass the underlying TaskRun object to maintain compatibility - // TODO: Update resources functions to work with interface - taskRunObj := lastTaskRun.GetObject().(*pipelineapi.TaskRun) - if err := resources.UpdateBuildRunUsingTaskRunCondition(ctx, r.client, buildRun, taskRunObj, trCondition); err != nil { + executorCondition := buildRunner.GetCondition(apis.ConditionSucceeded) + if executorCondition != nil { + // Update BuildRun status based on the condition using the unified function + if err := resources.UpdateImageBuildRunFromExecutor(ctx, r.client, buildRun, buildRunner.GetObject(), executorCondition); err != nil { return reconcile.Result{}, err } + executorStatus := executorCondition.Status - resources.UpdateBuildRunUsingTaskFailures(ctx, r.client, buildRun, taskRunObj) - taskRunStatus := trCondition.Status - - // check if we should delete the generated service account by checking the build run spec and that the task run is complete - if taskRunStatus == corev1.ConditionTrue || taskRunStatus == corev1.ConditionFalse { + // check if we should delete the generated service account by checking the build run spec and that the executor is complete + if executorStatus == corev1.ConditionTrue || executorStatus == corev1.ConditionFalse { if err := resources.DeleteServiceAccount(ctx, r.client, buildRun); err != nil { ctxlog.Error(ctx, err, "Error during deletion of generated service account.") return reconcile.Result{}, err } } - taskRunName := lastTaskRun.GetName() - buildRun.Status.TaskRunName = &taskRunName // nolint:staticcheck + // Update the BuildExecutor if not already set if buildRun.Status.Executor == nil { + executorName := buildRunner.GetName() buildRun.Status.Executor = &buildv1beta1.BuildExecutor{ - Name: lastTaskRun.GetName(), - Kind: "TaskRun", + Name: executorName, + Kind: buildRunner.GetExecutorKind(), + } + // Set the deprecated TaskRunName field for backward compatibility only if it's a TaskRun + if buildRunner.GetExecutorKind() == "TaskRun" { + //nolint:staticcheck // Keep for backward compatibility for now + buildRun.Status.TaskRunName = &executorName } } - taskRunStartTime := lastTaskRun.GetStartTime() - if buildRun.Status.StartTime == nil && taskRunStartTime != nil { - buildRun.Status.StartTime = taskRunStartTime + executorStartTime := buildRunner.GetStartTime() + if buildRun.Status.StartTime == nil && executorStartTime != nil { + buildRun.Status.StartTime = executorStartTime // Report the buildrun established duration (time between the creation of the buildrun and the start of the buildrun) buildmetrics.BuildRunEstablishObserve( @@ -470,8 +472,8 @@ func (r *ReconcileBuildRun) Reconcile(ctx context.Context, request reconcile.Req ) } - if lastTaskRun.GetCompletionTime() != nil && buildRun.Status.CompletionTime == nil { - buildRun.Status.CompletionTime = lastTaskRun.GetCompletionTime() + if buildRunner.GetCompletionTime() != nil && buildRun.Status.CompletionTime == nil { + buildRun.Status.CompletionTime = buildRunner.GetCompletionTime() // buildrun completion duration (total time between the creation of the buildrun and the buildrun completion) buildmetrics.BuildRunCompletionObserve( @@ -482,34 +484,46 @@ func (r *ReconcileBuildRun) Reconcile(ctx context.Context, request reconcile.Req buildRun.Status.CompletionTime.Time.Sub(buildRun.CreationTimestamp.Time), ) - // Look for the pod created by the taskrun + // Look for the pod created by the executor var pod = &corev1.Pod{} - if err := r.client.Get(ctx, types.NamespacedName{Namespace: request.Namespace, Name: lastTaskRun.GetPodName()}, pod); err == nil { - if len(pod.Status.InitContainerStatuses) > 0 { - - lastInitPodIdx := len(pod.Status.InitContainerStatuses) - 1 - lastInitPod := pod.Status.InitContainerStatuses[lastInitPodIdx] - - if lastInitPod.State.Terminated != nil { - // taskrun pod ramp-up (time between pod creation and last init container completion) - buildmetrics.TaskRunPodRampUpDurationObserve( - buildRun.Status.BuildSpec.StrategyName(), - buildRun.Namespace, - buildRun.Spec.BuildName(), - buildRun.Name, - lastInitPod.State.Terminated.FinishedAt.Sub(pod.CreationTimestamp.Time), - ) - } + podName := buildRunner.GetPodName() + + // For PipelineRuns, GetPodName() returns empty string, so we need to get the actual pod name + // from the underlying TaskRun. For now, there is only 1 TaskRun per PipelineRun, so we can + // safely use the first TaskRun to get the pod name. + if podName == "" && buildRunner.GetExecutorKind() == "PipelineRun" { + if taskRuns, err := buildRunner.GetUnderlyingTaskRuns(r.client); err == nil && len(taskRuns) > 0 { + podName = taskRuns[0].Status.PodName } + } - // taskrun ramp-up duration (time between taskrun creation and taskrun pod creation) - buildmetrics.TaskRunRampUpDurationObserve( - buildRun.Status.BuildSpec.StrategyName(), - buildRun.Namespace, - buildRun.Spec.BuildName(), - buildRun.Name, - pod.CreationTimestamp.Time.Sub(lastTaskRun.GetCreationTimestamp().Time), - ) + if podName != "" { + if err := r.client.Get(ctx, types.NamespacedName{Namespace: request.Namespace, Name: podName}, pod); err == nil { + if len(pod.Status.InitContainerStatuses) > 0 { + lastInitPodIdx := len(pod.Status.InitContainerStatuses) - 1 + lastInitPod := pod.Status.InitContainerStatuses[lastInitPodIdx] + + if lastInitPod.State.Terminated != nil { + // executor pod ramp-up (time between pod creation and last init container completion) + buildmetrics.TaskRunPodRampUpDurationObserve( + buildRun.Status.BuildSpec.StrategyName(), + buildRun.Namespace, + buildRun.Spec.BuildName(), + buildRun.Name, + lastInitPod.State.Terminated.FinishedAt.Sub(pod.CreationTimestamp.Time), + ) + } + } + + // executor ramp-up duration (time between executor creation and executor pod creation) + buildmetrics.TaskRunRampUpDurationObserve( + buildRun.Status.BuildSpec.StrategyName(), + buildRun.Namespace, + buildRun.Spec.BuildName(), + buildRun.Name, + pod.CreationTimestamp.Time.Sub(buildRunner.GetCreationTimestamp().Time), + ) + } } } @@ -597,28 +611,29 @@ func (r *ReconcileBuildRun) getReferencedStrategy(ctx context.Context, build *bu return strategy, err } -func (r *ReconcileBuildRun) createTaskRun(ctx context.Context, serviceAccount *corev1.ServiceAccount, strategy buildv1beta1.BuilderStrategy, build *buildv1beta1.Build, buildRun *buildv1beta1.BuildRun) (*pipelineapi.TaskRun, error) { - var ( - generatedTaskRun *pipelineapi.TaskRun - ) - - generatedTaskRun, err := resources.GenerateTaskRun(r.config, build, buildRun, serviceAccount.Name, strategy) +// validateExecutorVolumes checks that all volumes referenced in the executor (TaskRun or PipelineRun) exist. +func (r *ReconcileBuildRun) validateExecutorVolumes(ctx context.Context, imageBuildRunner ImageBuildRunner) error { + // Use GetUnderlyingTaskRuns for both TaskRun and PipelineRun executors + // This method abstracts away the differences between executor types + generatedTaskRuns, err := imageBuildRunner.GetUnderlyingTaskRuns(r.client) if err != nil { - if updateErr := resources.UpdateConditionWithFalseStatus(ctx, r.client, buildRun, err.Error(), resources.ConditionTaskRunGenerationFailed); updateErr != nil { - return nil, resources.HandleError("failed to create taskrun runtime object", err, updateErr) - } + return fmt.Errorf("failed to get underlying TaskRuns: %w", err) + } - return nil, err + // If no TaskRuns exist yet, skip volume validation as the executor hasn't been processed by Tekton yet + if len(generatedTaskRuns) == 0 { + return nil } - // Set OwnerReference for BuildRun and TaskRun - if err := r.setOwnerReferenceFunc(buildRun, generatedTaskRun, r.scheme); err != nil { - if updateErr := resources.UpdateConditionWithFalseStatus(ctx, r.client, buildRun, err.Error(), resources.ConditionSetOwnerReferenceFailed); updateErr != nil { - return nil, resources.HandleError("failed to create taskrun runtime object", err, updateErr) + for _, taskRun := range generatedTaskRuns { + if taskRun == nil { + continue + } + // CheckTaskRunVolumesExist already returns a specific error if a volume is not found. + if err := resources.CheckTaskRunVolumesExist(ctx, r.client, taskRun); err != nil { + return err } - - return nil, err } - return generatedTaskRun, nil + return nil } diff --git a/pkg/reconciler/buildrun/buildrun_test.go b/pkg/reconciler/buildrun/buildrun_test.go index 0d2e0182a0..e55e40fa8d 100644 --- a/pkg/reconciler/buildrun/buildrun_test.go +++ b/pkg/reconciler/buildrun/buildrun_test.go @@ -75,6 +75,9 @@ var _ = Describe("Reconcile BuildRun", func() { case *pipelineapi.TaskRun: taskRunSample.DeepCopyInto(object) return nil + case *pipelineapi.PipelineRun: + // For PipelineRun tests, we'll handle this in the specific test contexts + return k8serrors.NewNotFound(schema.GroupResource{}, nn.Name) } return k8serrors.NewNotFound(schema.GroupResource{}, nn.Name) } @@ -244,6 +247,41 @@ var _ = Describe("Reconcile BuildRun", func() { Expect(serviceAccount.Name).To(Equal(buildRunSample.Name)) Expect(serviceAccount.Namespace).To(Equal(buildRunSample.Namespace)) }) + + It("retrieves existing executor when BuildRun has executor reference", func() { + // setup a buildrun with an existing executor reference + buildRunSample = ctl.DefaultBuildRun(buildRunName, buildName) + buildRunSample.Status.Executor = &build.BuildExecutor{ + Name: taskRunName, + Kind: "TaskRun", + } + + // Override Stub get calls to include the BuildRun with executor reference + client.GetCalls(func(_ context.Context, nn types.NamespacedName, object crc.Object, getOptions ...crc.GetOption) error { + switch object := object.(type) { + case *build.Build: + buildSample.DeepCopyInto(object) + return nil + case *build.BuildRun: + buildRunSample.DeepCopyInto(object) + return nil + case *pipelineapi.TaskRun: + taskRunSample.DeepCopyInto(object) + return nil + } + return k8serrors.NewNotFound(schema.GroupResource{}, nn.Name) + }) + + // Call the reconciler + result, err := reconciler.Reconcile(context.TODO(), buildRunRequest) + + // Expect no error + Expect(err).ToNot(HaveOccurred()) + Expect(reconcile.Result{}).To(Equal(result)) + + // Expect the reconciler to get the BuildRun and then the TaskRun via executor reference + Expect(client.GetCallCount()).To(Equal(2)) + }) }) Context("from an existing TaskRun with Conditions", func() { @@ -1719,5 +1757,359 @@ var _ = Describe("Reconcile BuildRun", func() { Expect(statusWriter.UpdateCallCount()).To(Equal(1)) }) }) + + Context("when using PipelineRun executor", func() { + var ( + pipelineRunName string + pipelineRunSample *pipelineapi.PipelineRun + ) + + BeforeEach(func() { + pipelineRunName = "foobar-buildrun-p8nts" + buildRunRequest = newReconcileRequest(buildRunName, ns) + buildRunSample = ctl.BuildRunWithoutSA(buildRunName, buildName) + }) + + JustBeforeEach(func() { + // Create a reconciler with PipelineRun executor + cfg := config.NewDefaultConfig() + cfg.BuildrunExecutor = "PipelineRun" + reconciler = buildrunctl.NewReconciler(cfg, manager, controllerutil.SetControllerReference) + }) + + Context("from an existing PipelineRun resource", func() { + BeforeEach(func() { + // Generate a new Reconcile Request using the existing PipelineRun name and namespace + taskRunRequest = newReconcileRequest(pipelineRunName, ns) + + // initialize a PipelineRun, we need this to fake the existence of a Tekton PipelineRun + pipelineRunSample = ctl.DefaultPipelineRunWithStatus(pipelineRunName, buildRunName, ns, corev1.ConditionTrue, "Succeeded") + + // initialize a BuildRun, we need this to fake the existence of a BuildRun + buildRunSample = ctl.DefaultBuildRun(buildRunName, buildName) + + // Set up client stub for PipelineRun tests + client.GetCalls(func(_ context.Context, nn types.NamespacedName, object crc.Object, getOptions ...crc.GetOption) error { + switch object := object.(type) { + case *build.Build: + buildSample.DeepCopyInto(object) + return nil + case *build.BuildRun: + buildRunSample.DeepCopyInto(object) + return nil + case *pipelineapi.PipelineRun: + pipelineRunSample.DeepCopyInto(object) + return nil + } + return k8serrors.NewNotFound(schema.GroupResource{}, nn.Name) + }) + }) + + It("is able to retrieve a PipelineRun, Build and a BuildRun", func() { + // stub the existence of a Build, BuildRun and + // a PipelineRun via the getClientStub, therefore we + // expect the Reconcile to Succeed because all resources + // exist + result, err := reconciler.Reconcile(context.TODO(), taskRunRequest) + Expect(err).ToNot(HaveOccurred()) + Expect(reconcile.Result{}).To(Equal(result)) + Expect(client.GetCallCount()).To(Equal(2)) + }) + + It("does not fail when the BuildRun does not exist", func() { + // override the initial getClientStub, and generate a new stub + // that only contains a Build and PipelineRun, none BuildRun + stubGetCalls := ctl.StubBuildAndPipelineRun(buildSample, pipelineRunSample) + client.GetCalls(stubGetCalls) + + result, err := reconciler.Reconcile(context.TODO(), taskRunRequest) + Expect(err).ToNot(HaveOccurred()) + Expect(reconcile.Result{}).To(Equal(result)) + Expect(client.GetCallCount()).To(Equal(3)) + }) + + It("updates the BuildRun status", func() { + // generated stub that asserts the BuildRun status fields when + // status updates for a BuildRun take place + statusCall := ctl.StubBuildRunStatus( + "Succeeded", + nil, + build.Condition{ + Type: build.Succeeded, + Reason: "Succeeded", + Status: corev1.ConditionTrue, + }, + corev1.ConditionTrue, + buildSample.Spec, + false, + ) + statusWriter.UpdateCalls(statusCall) + + // Assert for none errors while we exit the Reconcile + // after updating the BuildRun status with the existing + // PipelineRun one + result, err := reconciler.Reconcile(context.TODO(), taskRunRequest) + Expect(err).ToNot(HaveOccurred()) + Expect(reconcile.Result{}).To(Equal(result)) + Expect(client.GetCallCount()).To(Equal(2)) + Expect(client.StatusCallCount()).To(Equal(1)) + }) + + It("updates the BuildRun status with a PENDING reason", func() { + // initialize a PipelineRun, we need this to fake the existence of a Tekton PipelineRun + pipelineRunSample = ctl.DefaultPipelineRunWithStatus(pipelineRunName, buildRunName, ns, corev1.ConditionUnknown, "Pending") + + // Stub that asserts the BuildRun status fields when + // Status updates for a BuildRun take place + statusCall := ctl.StubBuildRunStatus( + "Pending", + nil, + build.Condition{ + Type: build.Succeeded, + Reason: "Pending", + Status: corev1.ConditionUnknown, + }, + corev1.ConditionUnknown, + buildSample.Spec, + false, + ) + statusWriter.UpdateCalls(statusCall) + + // Assert for none errors while we exit the Reconcile + // after updating the BuildRun status with the existing + // PipelineRun one + result, err := reconciler.Reconcile(context.TODO(), taskRunRequest) + Expect(err).ToNot(HaveOccurred()) + Expect(reconcile.Result{}).To(Equal(result)) + Expect(client.GetCallCount()).To(Equal(2)) + Expect(client.StatusCallCount()).To(Equal(1)) + }) + + It("updates the BuildRun status with a RUNNING reason", func() { + pipelineRunSample = ctl.DefaultPipelineRunWithStatus(pipelineRunName, buildRunName, ns, corev1.ConditionUnknown, "Running") + + // Stub that asserts the BuildRun status fields when + // Status updates for a BuildRun take place + statusCall := ctl.StubBuildRunStatus( + "Running", + nil, + build.Condition{ + Type: build.Succeeded, + Reason: "Running", + Status: corev1.ConditionUnknown, + }, + corev1.ConditionUnknown, + buildSample.Spec, + false, + ) + statusWriter.UpdateCalls(statusCall) + + result, err := reconciler.Reconcile(context.TODO(), taskRunRequest) + Expect(err).ToNot(HaveOccurred()) + Expect(reconcile.Result{}).To(Equal(result)) + Expect(client.GetCallCount()).To(Equal(2)) + Expect(client.StatusCallCount()).To(Equal(1)) + }) + + It("updates the BuildRun status with a SUCCEEDED reason", func() { + pipelineRunSample = ctl.DefaultPipelineRunWithStatus(pipelineRunName, buildRunName, ns, corev1.ConditionTrue, "Succeeded") + + // Stub that asserts the BuildRun status fields when + // Status updates for a BuildRun take place + statusCall := ctl.StubBuildRunStatus( + "Succeeded", + nil, + build.Condition{ + Type: build.Succeeded, + Reason: "Succeeded", + Status: corev1.ConditionTrue, + }, + corev1.ConditionTrue, + buildSample.Spec, + false, + ) + statusWriter.UpdateCalls(statusCall) + + result, err := reconciler.Reconcile(context.TODO(), taskRunRequest) + Expect(err).ToNot(HaveOccurred()) + Expect(reconcile.Result{}).To(Equal(result)) + Expect(client.GetCallCount()).To(Equal(2)) + Expect(client.StatusCallCount()).To(Equal(1)) + }) + + It("updates the BuildRun status when a FALSE status occurs", func() { + pipelineRunSample = ctl.DefaultPipelineRunWithFalseStatus(pipelineRunName, buildRunName, ns) + + // Based on the current buildRun controller, if the PipelineRun condition.Status + // is FALSE, we will then populate our buildRun.Status.Reason with the + // PipelineRun condition.Message, rather than the condition.Reason + statusCall := ctl.StubBuildRunStatus( + "some message", + nil, + build.Condition{ + Type: build.Succeeded, + Reason: "something bad happened", + Status: corev1.ConditionFalse, + }, + corev1.ConditionFalse, + buildSample.Spec, + false, + ) + statusWriter.UpdateCalls(statusCall) + + result, err := reconciler.Reconcile(context.TODO(), taskRunRequest) + Expect(err).ToNot(HaveOccurred()) + Expect(reconcile.Result{}).To(Equal(result)) + }) + + It("retrieves existing executor when BuildRun has executor reference", func() { + // setup a buildrun with an existing executor reference + buildRunSample = ctl.DefaultBuildRun(buildRunName, buildName) + buildRunSample.Status.Executor = &build.BuildExecutor{ + Name: pipelineRunName, + Kind: "PipelineRun", + } + + // Override Stub get calls to include the BuildRun with executor reference + client.GetCalls(func(_ context.Context, nn types.NamespacedName, object crc.Object, getOptions ...crc.GetOption) error { + switch object := object.(type) { + case *build.Build: + buildSample.DeepCopyInto(object) + return nil + case *build.BuildRun: + buildRunSample.DeepCopyInto(object) + return nil + case *pipelineapi.PipelineRun: + pipelineRunSample.DeepCopyInto(object) + return nil + } + return k8serrors.NewNotFound(schema.GroupResource{}, nn.Name) + }) + + // Call the reconciler + result, err := reconciler.Reconcile(context.TODO(), buildRunRequest) + + // Expect no error + Expect(err).ToNot(HaveOccurred()) + Expect(reconcile.Result{}).To(Equal(result)) + + // Expect the reconciler to get the BuildRun and then the PipelineRun via executor reference + Expect(client.GetCallCount()).To(Equal(2)) + }) + }) + + Context("from an existing BuildRun resource", func() { + var ( + saName string + emptyPipelineRunName *string + ) + BeforeEach(func() { + saName = "foobar-sa" + + // Generate a new Reconcile Request using the existing BuildRun name and namespace + buildRunRequest = newReconcileRequest(buildRunName, ns) + + // override the BuildRun resource to use a BuildRun with a specified + // serviceaccount + buildRunSample = ctl.BuildRunWithSA(buildRunName, buildName, saName) + }) + + It("succeeds creating a PipelineRun from a namespaced buildstrategy", func() { + // override the Build to use a namespaced BuildStrategy + buildSample = ctl.DefaultBuild(buildName, strategyName, build.NamespacedBuildStrategyKind) + + // Override Stub get calls to include a service account + // and BuildStrategies + client.GetCalls(ctl.StubBuildRunGetWithSAandStrategies( + buildSample, + buildRunSample, + ctl.DefaultServiceAccount(saName), + ctl.DefaultClusterBuildStrategy(), + ctl.DefaultNamespacedBuildStrategy()), + ) + + // Stub the create calls for a PipelineRun + client.CreateCalls(func(_ context.Context, object crc.Object, _ ...crc.CreateOption) error { + switch object := object.(type) { + case *pipelineapi.PipelineRun: + ctl.DefaultPipelineRunWithStatus(pipelineRunName, buildRunName, ns, corev1.ConditionTrue, "Succeeded").DeepCopyInto(object) + } + return nil + }) + + _, err := reconciler.Reconcile(context.TODO(), buildRunRequest) + Expect(err).ToNot(HaveOccurred()) + + Expect(client.CreateCallCount()).To(Equal(1)) + }) + + It("succeeds creating a PipelineRun from a cluster buildstrategy", func() { + // override the Build to use a cluster BuildStrategy + buildSample = ctl.DefaultBuild(buildName, strategyName, build.ClusterBuildStrategyKind) + + // Override Stub get calls to include a service account + // and BuildStrategies + client.GetCalls(ctl.StubBuildRunGetWithSAandStrategies( + buildSample, + buildRunSample, + ctl.DefaultServiceAccount(saName), + ctl.DefaultClusterBuildStrategy(), + ctl.DefaultNamespacedBuildStrategy()), + ) + + // Stub the create calls for a PipelineRun + client.CreateCalls(func(_ context.Context, object crc.Object, _ ...crc.CreateOption) error { + switch object := object.(type) { + case *pipelineapi.PipelineRun: + ctl.DefaultPipelineRunWithStatus(pipelineRunName, buildRunName, ns, corev1.ConditionTrue, "Succeeded").DeepCopyInto(object) + } + return nil + }) + + _, err := reconciler.Reconcile(context.TODO(), buildRunRequest) + Expect(err).ToNot(HaveOccurred()) + }) + + It("fails on a PipelineRun creation due to service account not found", func() { + // override the initial getClientStub, and generate a new stub + // that only contains a Build and Buildrun, none PipelineRun + stubGetCalls := func(_ context.Context, nn types.NamespacedName, object crc.Object, _ ...crc.GetOption) error { + switch object := object.(type) { + case *build.Build: + buildSample.DeepCopyInto(object) + return nil + case *build.BuildRun: + buildRunSample.DeepCopyInto(object) + return nil + } + return k8serrors.NewNotFound(schema.GroupResource{}, nn.Name) + } + + client.GetCalls(stubGetCalls) + + // Stub that asserts the BuildRun status fields when + // Status updates for a BuildRun take place + statusCall := ctl.StubBuildRunStatus( + fmt.Sprintf("service account %s not found", saName), + emptyPipelineRunName, + build.Condition{ + Type: build.Succeeded, + Reason: "ServiceAccountNotFound", + Status: corev1.ConditionFalse, + }, + corev1.ConditionFalse, + buildSample.Spec, + true, + ) + statusWriter.UpdateCalls(statusCall) + + // we mark the BuildRun as Failed and do not reconcile again + _, err := reconciler.Reconcile(context.TODO(), buildRunRequest) + Expect(err).ToNot(HaveOccurred()) + Expect(client.GetCallCount()).To(Equal(4)) + Expect(client.StatusCallCount()).To(Equal(2)) + }) + }) + }) }) }) diff --git a/pkg/reconciler/buildrun/controller.go b/pkg/reconciler/buildrun/controller.go index 98a50c38e4..a0d1f1a345 100644 --- a/pkg/reconciler/buildrun/controller.go +++ b/pkg/reconciler/buildrun/controller.go @@ -6,10 +6,12 @@ package buildrun import ( "context" + "fmt" pipelineapi "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1" "knative.dev/pkg/apis" + corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/types" @@ -31,11 +33,11 @@ type setOwnerReferenceFunc func(owner, object metav1.Object, scheme *runtime.Sch // Add creates a new BuildRun Controller and adds it to the Manager. The Manager will set fields on the Controller // and Start it when the Manager is Started. func Add(_ context.Context, c *config.Config, mgr manager.Manager) error { - return add(mgr, NewReconciler(c, mgr, controllerutil.SetControllerReference), c.Controllers.BuildRun.MaxConcurrentReconciles) + return add(mgr, NewReconciler(c, mgr, controllerutil.SetControllerReference), c.Controllers.BuildRun.MaxConcurrentReconciles, c) } // add adds a new Controller to mgr with r as the reconcile.Reconciler -func add(mgr manager.Manager, r reconcile.Reconciler, maxConcurrentReconciles int) error { +func add(mgr manager.Manager, r reconcile.Reconciler, maxConcurrentReconciles int, cfg *config.Config) error { // Create the controller options options := controller.Options{ Reconciler: r, @@ -52,9 +54,9 @@ func add(mgr manager.Manager, r reconcile.Reconciler, maxConcurrentReconciles in predBuildRun := predicate.TypedFuncs[*buildv1beta1.BuildRun]{ CreateFunc: func(e event.TypedCreateEvent[*buildv1beta1.BuildRun]) bool { - // The CreateFunc is also called when the controller is started and iterates over all objects. For those BuildRuns that have a TaskRun referenced already, - // we do not need to do a further reconciliation. BuildRun updates then only happen from the TaskRun. - return e.Object.Status.TaskRunName == nil && e.Object.Status.CompletionTime == nil // nolint:staticcheck + // The CreateFunc is also called when the controller is started and iterates over all objects. For those BuildRuns that have an executor referenced already, + // we do not need to do a further reconciliation. BuildRun updates then only happen from the TaskRun/PipelineRun. + return e.Object.Status.Executor == nil && e.Object.Status.CompletionTime == nil }, UpdateFunc: func(e event.TypedUpdateEvent[*buildv1beta1.BuildRun]) bool { // Only reconcile a BuildRun update when @@ -77,18 +79,35 @@ func add(mgr manager.Manager, r reconcile.Reconciler, maxConcurrentReconciles in o := e.ObjectOld n := e.ObjectNew - // Process an update event when the old TR resource is not yet started and the new TR resource got a - // condition of the type Succeeded - if o.Status.StartTime.IsZero() && n.Status.GetCondition(apis.ConditionSucceeded) != nil { + // Check for start time changes + if o.Status.StartTime.IsZero() && !n.Status.StartTime.IsZero() { return true } - // Process an update event for every change in the condition.Reason between the old and new TR resource - if o.Status.GetCondition(apis.ConditionSucceeded) != nil && n.Status.GetCondition(apis.ConditionSucceeded) != nil { - if o.Status.GetCondition(apis.ConditionSucceeded).Reason != n.Status.GetCondition(apis.ConditionSucceeded).Reason { + // Check for condition changes + oldCondition := o.Status.GetCondition(apis.ConditionSucceeded) + newCondition := n.Status.GetCondition(apis.ConditionSucceeded) + + // New condition appeared + if oldCondition == nil && newCondition != nil { + return true + } + + // Both conditions exist, check for changes + if oldCondition != nil && newCondition != nil { + // Always reconcile on failures + if newCondition.Status == corev1.ConditionFalse { + return true + } + + // Check for status, reason, or message changes + if oldCondition.Status != newCondition.Status || + oldCondition.Reason != newCondition.Reason || + oldCondition.Message != newCondition.Message { return true } } + return false }, DeleteFunc: func(e event.TypedDeleteEvent[*pipelineapi.TaskRun]) bool { @@ -97,26 +116,97 @@ func add(mgr manager.Manager, r reconcile.Reconciler, maxConcurrentReconciles in }, } + predPipelineRun := predicate.TypedFuncs[*pipelineapi.PipelineRun]{ + UpdateFunc: func(e event.TypedUpdateEvent[*pipelineapi.PipelineRun]) bool { + o := e.ObjectOld + n := e.ObjectNew + + // Check for start time changes + if o.Status.StartTime.IsZero() && !n.Status.StartTime.IsZero() { + return true + } + + // Check for condition changes + oldCondition := o.Status.GetCondition(apis.ConditionSucceeded) + newCondition := n.Status.GetCondition(apis.ConditionSucceeded) + + // New condition appeared + if oldCondition == nil && newCondition != nil { + return true + } + + // Both conditions exist, check for changes + if oldCondition != nil && newCondition != nil { + // Always reconcile on failures + if newCondition.Status == corev1.ConditionFalse { + return true + } + + // Check for status, reason, or message changes + if oldCondition.Status != newCondition.Status || + oldCondition.Reason != newCondition.Reason || + oldCondition.Message != newCondition.Message { + return true + } + } + + return false + }, + DeleteFunc: func(e event.TypedDeleteEvent[*pipelineapi.PipelineRun]) bool { + // If the PipelineRun was deleted before completion, then we reconcile to update the BuildRun to a Failed status + return e.Object.Status.CompletionTime == nil + }, + } + // Watch for changes to primary resource BuildRun if err = c.Watch(source.Kind[*buildv1beta1.BuildRun](mgr.GetCache(), &buildv1beta1.BuildRun{}, &handler.TypedEnqueueRequestForObject[*buildv1beta1.BuildRun]{}, predBuildRun)); err != nil { return err } - // enqueue Reconciles requests only for events where a TaskRun already exists and that is related - // to a BuildRun - return c.Watch(source.Kind(mgr.GetCache(), &pipelineapi.TaskRun{}, handler.TypedEnqueueRequestsFromMapFunc(func(_ context.Context, taskRun *pipelineapi.TaskRun) []reconcile.Request { - // check if TaskRun is related to BuildRun - if taskRun.GetLabels() == nil || taskRun.GetLabels()[buildv1beta1.LabelBuildRun] == "" { - return []reconcile.Request{} - } - + // Common handler for executor events + enqueueExecutorHandler := func(name, namespace string) []reconcile.Request { return []reconcile.Request{ { NamespacedName: types.NamespacedName{ - Name: taskRun.Name, - Namespace: taskRun.Namespace, + Name: name, + Namespace: namespace, }, }, } - }), predTaskRun)) + } + + // Watch for executor events based on configuration + // Only watch the executor type that is configured to be used + switch cfg.BuildrunExecutor { + case "TaskRun": + // Watch for TaskRun events + // enqueue Reconciles requests only for events where a TaskRun already exists and that is related + // to a BuildRun + if err = c.Watch(source.Kind(mgr.GetCache(), &pipelineapi.TaskRun{}, handler.TypedEnqueueRequestsFromMapFunc(func(_ context.Context, taskRun *pipelineapi.TaskRun) []reconcile.Request { + // check if TaskRun is related to BuildRun + if taskRun.GetLabels() == nil || taskRun.GetLabels()[buildv1beta1.LabelBuildRun] == "" { + return []reconcile.Request{} + } + return enqueueExecutorHandler(taskRun.Name, taskRun.Namespace) + }), predTaskRun)); err != nil { + return err + } + case "PipelineRun": + // Watch for PipelineRun events + // enqueue Reconciles requests only for events where a PipelineRun already exists and that is related + // to a BuildRun + if err = c.Watch(source.Kind(mgr.GetCache(), &pipelineapi.PipelineRun{}, handler.TypedEnqueueRequestsFromMapFunc(func(_ context.Context, pipelineRun *pipelineapi.PipelineRun) []reconcile.Request { + // check if PipelineRun is related to BuildRun + if pipelineRun.GetLabels() == nil || pipelineRun.GetLabels()[buildv1beta1.LabelBuildRun] == "" { + return []reconcile.Request{} + } + return enqueueExecutorHandler(pipelineRun.Name, pipelineRun.Namespace) + }), predPipelineRun)); err != nil { + return err + } + default: + return fmt.Errorf("unsupported BuildrunExecutor: %s", cfg.BuildrunExecutor) + } + + return nil } diff --git a/pkg/reconciler/buildrun/imagebuildrunner.go b/pkg/reconciler/buildrun/imagebuildrunner.go index 156ef0d7a8..a7b9b47da2 100644 --- a/pkg/reconciler/buildrun/imagebuildrunner.go +++ b/pkg/reconciler/buildrun/imagebuildrunner.go @@ -32,7 +32,7 @@ type ImageBuildRunner interface { // GetCreationTimestamp returns the creation timestamp of the build runner. GetCreationTimestamp() metav1.Time // GetResults returns the results of the build runner. - GetResults() []pipelineapi.TaskRunResult + GetResults(ctx context.Context, client client.Client) []pipelineapi.TaskRunResult // GetCondition returns the condition of the build runner. GetCondition(conditionType apis.ConditionType) *apis.Condition // GetStartTime returns the start time of the build runner. @@ -47,6 +47,12 @@ type ImageBuildRunner interface { Cancel(ctx context.Context, client client.Client) error // GetObject returns the underlying client.Object for owner reference operations. GetObject() client.Object + + // GetExecutorKind returns the kind of executor (e.g., "TaskRun", "PipelineRun"). + GetExecutorKind() string + + // GetUnderlyingTaskRun returns the generated TaskRun from using either TaskRun or PipelineRun. + GetUnderlyingTaskRuns(client client.Client) ([]*pipelineapi.TaskRun, error) } // ImageBuildRunnerFactory defines methods for creating and manipulating ImageBuildRunners. @@ -55,7 +61,7 @@ type ImageBuildRunnerFactory interface { NewImageBuildRunner() ImageBuildRunner // CreateImageBuildRunner creates an ImageBuildRunner instance from build configuration. It does not create the ImageBuildRunner in the API server. - CreateImageBuildRunner(cfg *config.Config, serviceAccount *corev1.ServiceAccount, strategy buildv1beta1.BuilderStrategy, build *buildv1beta1.Build, buildRun *buildv1beta1.BuildRun, scheme *runtime.Scheme, setOwnerRef setOwnerReferenceFunc) (ImageBuildRunner, error) + CreateImageBuildRunner(ctx context.Context, client client.Client, cfg *config.Config, serviceAccount *corev1.ServiceAccount, strategy buildv1beta1.BuilderStrategy, build *buildv1beta1.Build, buildRun *buildv1beta1.BuildRun, scheme *runtime.Scheme, setOwnerRef setOwnerReferenceFunc) (ImageBuildRunner, error) // GetImageBuildRunner retrieves an ImageBuildRunner from the API server. GetImageBuildRunner(ctx context.Context, client client.Client, namespacedName types.NamespacedName) (ImageBuildRunner, error) @@ -64,6 +70,12 @@ type ImageBuildRunnerFactory interface { CreateImageBuildRunnerInCluster(ctx context.Context, client client.Client, taskRunner ImageBuildRunner) error } +// Available image build runners for a buildrun. +var RunnerFactories = map[string]ImageBuildRunnerFactory{ + "PipelineRun": &TektonPipelineRunImageBuildRunnerFactory{}, + "TaskRun": &TektonTaskRunImageBuildRunnerFactory{}, +} + // TektonTaskRunWrapper wraps pipelineapi.TaskRun to implement the ImageBuildRunner interface. type TektonTaskRunWrapper struct { TaskRun *pipelineapi.TaskRun @@ -94,7 +106,7 @@ func (t *TektonTaskRunWrapper) GetCreationTimestamp() metav1.Time { } // GetResults returns the TaskRun results -func (t *TektonTaskRunWrapper) GetResults() []pipelineapi.TaskRunResult { +func (t *TektonTaskRunWrapper) GetResults(_ context.Context, _ client.Client) []pipelineapi.TaskRunResult { if t.TaskRun == nil { return nil } @@ -184,6 +196,20 @@ func (t *TektonTaskRunWrapper) GetObject() client.Object { return t.TaskRun } +// GetExecutorKind returns the kind of executor. +// to set the kind of executor field for the build run +func (t *TektonTaskRunWrapper) GetExecutorKind() string { + return "TaskRun" +} + +// GetUnderlyingTaskRun returns the underlying TaskRun. +func (t *TektonTaskRunWrapper) GetUnderlyingTaskRuns(_ client.Client) ([]*pipelineapi.TaskRun, error) { + if t.TaskRun == nil { + return nil, fmt.Errorf("underlying TaskRun does not exist") + } + return []*pipelineapi.TaskRun{t.TaskRun}, nil +} + // TektonTaskRunImageBuildRunnerFactory implements ImageBuildRunnerFactory for Tekton TaskRuns type TektonTaskRunImageBuildRunnerFactory struct{} @@ -195,14 +221,22 @@ func (f *TektonTaskRunImageBuildRunnerFactory) NewImageBuildRunner() ImageBuildR } // CreateImageBuildRunner creates an ImageBuildRunner instance from build configuration. It does not create the ImageBuildRunner in the API server. -func (f *TektonTaskRunImageBuildRunnerFactory) CreateImageBuildRunner(cfg *config.Config, serviceAccount *corev1.ServiceAccount, strategy buildv1beta1.BuilderStrategy, build *buildv1beta1.Build, buildRun *buildv1beta1.BuildRun, scheme *runtime.Scheme, setOwnerRef setOwnerReferenceFunc) (ImageBuildRunner, error) { +func (f *TektonTaskRunImageBuildRunnerFactory) CreateImageBuildRunner(ctx context.Context, client client.Client, cfg *config.Config, serviceAccount *corev1.ServiceAccount, strategy buildv1beta1.BuilderStrategy, build *buildv1beta1.Build, buildRun *buildv1beta1.BuildRun, scheme *runtime.Scheme, setOwnerRef setOwnerReferenceFunc) (ImageBuildRunner, error) { generatedTaskRun, err := resources.GenerateTaskRun(cfg, build, buildRun, serviceAccount.Name, strategy) if err != nil { + if updateErr := resources.UpdateConditionWithFalseStatus(ctx, client, buildRun, err.Error(), resources.ConditionTaskRunGenerationFailed); updateErr != nil { + return nil, resources.HandleError("failed to create taskrun runtime object", err, updateErr) + } + return nil, err } // Set OwnerReference for BuildRun and TaskRun if err := setOwnerRef(buildRun, generatedTaskRun, scheme); err != nil { + if updateErr := resources.UpdateConditionWithFalseStatus(ctx, client, buildRun, err.Error(), resources.ConditionSetOwnerReferenceFailed); updateErr != nil { + return nil, resources.HandleError("failed to create taskrun runtime object", err, updateErr) + } + return nil, err } diff --git a/pkg/reconciler/buildrun/pipelinerun_runner.go b/pkg/reconciler/buildrun/pipelinerun_runner.go new file mode 100644 index 0000000000..c0c0273349 --- /dev/null +++ b/pkg/reconciler/buildrun/pipelinerun_runner.go @@ -0,0 +1,261 @@ +// Copyright The Shipwright Contributors +// +// SPDX-License-Identifier: Apache-2.0 + +package buildrun + +import ( + "context" + "encoding/json" + "fmt" + + buildv1beta1 "github.com/shipwright-io/build/pkg/apis/build/v1beta1" + "github.com/shipwright-io/build/pkg/config" + "github.com/shipwright-io/build/pkg/reconciler/buildrun/resources" + pipelineapi "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1" + "knative.dev/pkg/apis" + + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/types" + "sigs.k8s.io/controller-runtime/pkg/client" +) + +// TektonPipelineRunWrapper wraps pipelineapi.PipelineRun to implement the ImageBuildRunner interface. +type TektonPipelineRunWrapper struct { + PipelineRun *pipelineapi.PipelineRun +} + +// GetName returns the name of the PipelineRun. +func (t *TektonPipelineRunWrapper) GetName() string { + if t.PipelineRun == nil { + return "" + } + return t.PipelineRun.Name +} + +// GetLabels returns the labels of the PipelineRun. +func (t *TektonPipelineRunWrapper) GetLabels() map[string]string { + if t.PipelineRun == nil { + return nil + } + return t.PipelineRun.Labels +} + +// GetCreationTimestamp returns the creation timestamp of the PipelineRun. +func (t *TektonPipelineRunWrapper) GetCreationTimestamp() metav1.Time { + if t.PipelineRun == nil { + return metav1.Time{} + } + return t.PipelineRun.CreationTimestamp +} + +// GetResults returns the PipelineRun results converted to TaskRun results. +// For PipelineRuns, we need to extract results from the underlying TaskRuns +// since results are typically written by individual TaskRuns, not by the PipelineRun itself. +func (t *TektonPipelineRunWrapper) GetResults(ctx context.Context, client client.Client) []pipelineapi.TaskRunResult { + if t.PipelineRun == nil { + return nil + } + + var taskRunResults []pipelineapi.TaskRunResult + + // First, check if the PipelineRun has its own results + for _, prResult := range t.PipelineRun.Status.Results { + taskRunResults = append(taskRunResults, pipelineapi.TaskRunResult{ + Name: prResult.Name, + Value: prResult.Value, + }) + } + + // If no PipelineRun results exist, extract results from underlying TaskRuns + if len(taskRunResults) == 0 && len(t.PipelineRun.Status.ChildReferences) > 0 { + // Extract results from all TaskRuns in the PipelineRun + for _, childRef := range t.PipelineRun.Status.ChildReferences { + if childRef.Kind == "TaskRun" { + taskRun := &pipelineapi.TaskRun{} + taskRunName := types.NamespacedName{ + Namespace: t.PipelineRun.Namespace, + Name: childRef.Name, + } + + if err := client.Get(ctx, taskRunName, taskRun); err != nil { + // Log error but continue with other TaskRuns + continue + } + + // Convert TaskRun results to TaskRunResult format + for _, result := range taskRun.Status.Results { + taskRunResults = append(taskRunResults, pipelineapi.TaskRunResult{ + Name: result.Name, + Value: result.Value, + }) + } + } + } + } + + return taskRunResults +} + +// GetCondition returns the condition with the specified type. +func (t *TektonPipelineRunWrapper) GetCondition(conditionType apis.ConditionType) *apis.Condition { + if t.PipelineRun == nil { + return nil + } + return t.PipelineRun.Status.GetCondition(conditionType) +} + +// GetStartTime returns the start time of the PipelineRun. +func (t *TektonPipelineRunWrapper) GetStartTime() *metav1.Time { + if t.PipelineRun == nil { + return nil + } + return t.PipelineRun.Status.StartTime +} + +// GetCompletionTime returns the completion time of the PipelineRun. +func (t *TektonPipelineRunWrapper) GetCompletionTime() *metav1.Time { + if t.PipelineRun == nil { + return nil + } + return t.PipelineRun.Status.CompletionTime +} + +func (t *TektonPipelineRunWrapper) GetPodName() string { + // For PipelineRuns, we cannot reliably determine the pod name without fetching the TaskRun + // due to potential name truncation in Tekton. The reconcile function handles this case by + // fetching the TaskRun and using its Status.PodName for metrics collection (pod ramp-up duration). + // Callers should use GetUnderlyingTaskRuns() to get the actual TaskRun and access its Status.PodName field. + return "" +} + +// IsCancelled returns true if the PipelineRun is cancelled. +func (t *TektonPipelineRunWrapper) IsCancelled() bool { + if t.PipelineRun == nil { + return false + } + return t.PipelineRun.IsCancelled() +} + +// Cancel cancels the PipelineRun by setting its status to cancelled. +func (t *TektonPipelineRunWrapper) Cancel(ctx context.Context, c client.Client) error { + if t.PipelineRun == nil { + return fmt.Errorf("underlying PipelineRun does not exist") + } + + payload := []patchStringValue{{ + Op: "replace", + Path: "/spec/status", + Value: pipelineapi.PipelineRunSpecStatusCancelled, + }} + data, err := json.Marshal(payload) + if err != nil { + return err + } + patch := client.RawPatch(types.JSONPatchType, data) + + trueParam := true + patchOpt := client.PatchOptions{ + Raw: &metav1.PatchOptions{ + Force: &trueParam, + }, + } + return c.Patch(ctx, t.PipelineRun, patch, &patchOpt) +} + +// GetObject returns the underlying client.Object for owner reference operations. +func (t *TektonPipelineRunWrapper) GetObject() client.Object { + return t.PipelineRun +} + +// GetExecutorKind returns the kind of executor. +func (t *TektonPipelineRunWrapper) GetExecutorKind() string { + return "PipelineRun" +} + +// GetUnderlyingTaskRuns returns the actual TaskRun from the child references in the PipelineRun. +func (t *TektonPipelineRunWrapper) GetUnderlyingTaskRuns(client client.Client) ([]*pipelineapi.TaskRun, error) { + if t.PipelineRun == nil { + return nil, fmt.Errorf("underlying PipelineRun does not exist") + } + + // If no ChildReferences exist yet, return an empty slice to allow reconciliation to continue. + if len(t.PipelineRun.Status.ChildReferences) == 0 { + return []*pipelineapi.TaskRun{}, nil + } + + var taskRuns []*pipelineapi.TaskRun + for _, childRef := range t.PipelineRun.Status.ChildReferences { + // Ensure the child is a TaskRun before attempting to fetch it. + if childRef.Kind != "TaskRun" { + continue + } + + taskRun := &pipelineapi.TaskRun{} + err := client.Get(context.Background(), types.NamespacedName{ + Name: childRef.Name, + Namespace: t.PipelineRun.Namespace, + }, taskRun) + + if err != nil { + // A missing TaskRun is a critical error. + return nil, fmt.Errorf("failed to fetch TaskRun %s: %w", childRef.Name, err) + } + taskRuns = append(taskRuns, taskRun) + } + + return taskRuns, nil +} + +// TektonPipelineRunImageBuildRunnerFactory implements ImageBuildRunnerFactory for Tekton PipelineRuns. +type TektonPipelineRunImageBuildRunnerFactory struct{} + +// NewImageBuildRunner creates a new empty ImageBuildRunner for a PipelineRun. +func (f *TektonPipelineRunImageBuildRunnerFactory) NewImageBuildRunner() ImageBuildRunner { + return &TektonPipelineRunWrapper{ + PipelineRun: &pipelineapi.PipelineRun{}, + } +} + +// CreateImageBuildRunner creates an ImageBuildRunner instance from build configuration. +func (f *TektonPipelineRunImageBuildRunnerFactory) CreateImageBuildRunner(ctx context.Context, client client.Client, cfg *config.Config, serviceAccount *corev1.ServiceAccount, strategy buildv1beta1.BuilderStrategy, build *buildv1beta1.Build, buildRun *buildv1beta1.BuildRun, scheme *runtime.Scheme, setOwnerRef setOwnerReferenceFunc) (ImageBuildRunner, error) { + generatedPipelineRun, err := resources.GeneratePipelineRun(cfg, build, buildRun, serviceAccount.Name, strategy) + if err != nil { + if updateErr := resources.UpdateConditionWithFalseStatus(ctx, client, buildRun, err.Error(), resources.ConditionPipelineRunGenerationFailed); updateErr != nil { + return nil, resources.HandleError("failed to create pipelinerun runtime object", err, updateErr) + } + + return nil, err + } + + if err := setOwnerRef(buildRun, generatedPipelineRun, scheme); err != nil { + if updateErr := resources.UpdateConditionWithFalseStatus(ctx, client, buildRun, err.Error(), resources.ConditionSetOwnerReferenceFailed); updateErr != nil { + return nil, resources.HandleError("failed to create pipelinerun runtime object", err, updateErr) + } + + return nil, err + } + + return &TektonPipelineRunWrapper{PipelineRun: generatedPipelineRun}, nil +} + +// GetImageBuildRunner retrieves an ImageBuildRunner from the API server. +func (f *TektonPipelineRunImageBuildRunnerFactory) GetImageBuildRunner(ctx context.Context, client client.Client, namespacedName types.NamespacedName) (ImageBuildRunner, error) { + pipelineRun := &pipelineapi.PipelineRun{} + err := client.Get(ctx, namespacedName, pipelineRun) + if err != nil { + return nil, err + } + return &TektonPipelineRunWrapper{PipelineRun: pipelineRun}, nil +} + +// CreateImageBuildRunnerInCluster creates the ImageBuildRunner in the API server. +func (f *TektonPipelineRunImageBuildRunnerFactory) CreateImageBuildRunnerInCluster(ctx context.Context, client client.Client, runner ImageBuildRunner) error { + wrapper, ok := runner.(*TektonPipelineRunWrapper) + if !ok { + return fmt.Errorf("unsupported ImageBuildRunner type") + } + return client.Create(ctx, wrapper.PipelineRun) +} diff --git a/pkg/reconciler/buildrun/resources/conditions.go b/pkg/reconciler/buildrun/resources/conditions.go index a0cda0b15b..99a5ae8e30 100644 --- a/pkg/reconciler/buildrun/resources/conditions.go +++ b/pkg/reconciler/buildrun/resources/conditions.go @@ -16,6 +16,7 @@ import ( corev1 "k8s.io/api/core/v1" apierrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/types" buildv1beta1 "github.com/shipwright-io/build/pkg/apis/build/v1beta1" "github.com/shipwright-io/build/pkg/ctxlog" @@ -30,6 +31,7 @@ const ( ConditionFailed string = "Failed" ConditionTaskRunIsMissing string = "TaskRunIsMissing" ConditionTaskRunGenerationFailed string = "TaskRunGenerationFailed" + ConditionPipelineRunGenerationFailed string = "PipelineRunGenerationFailed" ConditionServiceAccountNotFound string = "ServiceAccountNotFound" ConditionBuildRegistrationFailed string = "BuildRegistrationFailed" ConditionBuildNotFound string = "BuildNotFound" @@ -164,6 +166,200 @@ func UpdateBuildRunUsingTaskRunCondition(ctx context.Context, client client.Clie return nil } +// UpdateBuildRunUsingPipelineRunCondition updates the BuildRun Succeeded Condition for PipelineRun conditions +func UpdateBuildRunUsingPipelineRunCondition(ctx context.Context, client client.Client, buildRun *buildv1beta1.BuildRun, pipelineRun *pipelineapi.PipelineRun, prCondition *apis.Condition) error { + var reason, message string = prCondition.Reason, prCondition.Message + status := prCondition.Status + + switch reason { + case "PipelineRunTimeout": + reason = "BuildRunTimeout" + var timeout time.Duration + if pipelineRun.Spec.Timeouts != nil && pipelineRun.Spec.Timeouts.Pipeline != nil { + timeout = pipelineRun.Spec.Timeouts.Pipeline.Duration + } else { + // if the PipelineRun does not have a timeout set, we cannot use it to determine the BuildRun timeout + timeout = time.Since(pipelineRun.CreationTimestamp.Time) + } + message = fmt.Sprintf("BuildRun %s failed to finish within %s", + buildRun.Name, + timeout, + ) + + case "PipelineRunCancelled": + if buildRun.IsCanceled() { + status = corev1.ConditionFalse + reason = buildv1beta1.BuildRunStateCancel + message = "The BuildRun and underlying PipelineRun were canceled successfully." + } + + case "Succeeded": + if buildRun.IsCanceled() { + message = "The PipelineRun completed before the request to cancel the PipelineRun could be processed." + } + + case "Failed": + // For PipelineRun failures, we need to get the underlying TaskRuns to extract failure details + if pipelineRun.Status.CompletionTime != nil { + // Try to extract failure details from the first failed TaskRun + failureDetails, err := extractPipelineRunFailureDetails(ctx, client, pipelineRun) + if err != nil { + // Log the error but continue with a generic message + ctxlog.Error(ctx, err, "failed to extract PipelineRun failure details", + "buildRun", buildRun.Name, + "namespace", buildRun.Namespace, + "pipelineRun", pipelineRun.Name) + + // Fall back to generic message + message = fmt.Sprintf("PipelineRun %s failed", pipelineRun.Name) + } else { + // Use the extracted failure details + reason = failureDetails.Reason + message = failureDetails.Message + + // Set failure details if available + if failureDetails.FailureDetails != nil { + buildRun.Status.FailureDetails = failureDetails.FailureDetails + } + } + } + } + + buildRun.Status.SetCondition(&buildv1beta1.Condition{ + LastTransitionTime: metav1.Now(), + Type: buildv1beta1.Succeeded, + Status: status, + Reason: reason, + Message: message, + }) + + return nil +} + +// PipelineRunFailureDetails contains extracted failure information from a PipelineRun +type PipelineRunFailureDetails struct { + Reason string + Message string + FailureDetails *buildv1beta1.FailureDetails +} + +// extractPipelineRunFailureDetails extracts detailed failure information from a failed PipelineRun +func extractPipelineRunFailureDetails(ctx context.Context, client client.Client, pipelineRun *pipelineapi.PipelineRun) (*PipelineRunFailureDetails, error) { + if len(pipelineRun.Status.ChildReferences) == 0 { + return &PipelineRunFailureDetails{ + Reason: "PipelineRunFailed", + Message: fmt.Sprintf("PipelineRun %s failed with no child TaskRuns", pipelineRun.Name), + }, nil + } + + // Look for failed TaskRuns in the child references + for _, childRef := range pipelineRun.Status.ChildReferences { + if childRef.TypeMeta.Kind == "TaskRun" { + taskRun := &pipelineapi.TaskRun{} + err := client.Get(ctx, types.NamespacedName{ + Name: childRef.Name, + Namespace: pipelineRun.Namespace, + }, taskRun) + + if err != nil { + if apierrors.IsNotFound(err) { + // TaskRun was deleted, continue to next one + continue + } + return nil, fmt.Errorf("failed to get TaskRun %s: %w", childRef.Name, err) + } + + // Check if this TaskRun failed + condition := taskRun.Status.GetCondition(apis.ConditionSucceeded) + if condition != nil && condition.Status == corev1.ConditionFalse { + // Extract failure details from this TaskRun + pod, failedContainer, failedContainerStatus, err := extractFailedPodAndContainer(ctx, client, taskRun) + if err != nil { + if apierrors.IsNotFound(err) { + return &PipelineRunFailureDetails{ + Reason: "PipelineRunFailed", + Message: fmt.Sprintf("PipelineRun %s failed, pod %s/%s not found", pipelineRun.Name, taskRun.Namespace, taskRun.Status.PodName), + }, nil + } + return nil, fmt.Errorf("failed to extract failure details from TaskRun %s: %w", taskRun.Name, err) + } + + // Build failure details similar to TaskRun handling + failureDetails := &buildv1beta1.FailureDetails{ + Location: &buildv1beta1.Location{ + Pod: pod.Name, + }, + } + + var reason, message string + + if pod.Status.Reason == "Evicted" { + message = pod.Status.Message + reason = buildv1beta1.BuildRunStatePodEvicted + if failedContainer != nil { + failureDetails.Location.Container = failedContainer.Name + } + } else if failedContainer != nil { + failureDetails.Location.Container = failedContainer.Name + + message = fmt.Sprintf("PipelineRun %s failed in step %s, for detailed information: kubectl --namespace %s logs %s --container=%s", + pipelineRun.Name, + failedContainer.Name, + pod.Namespace, + pod.Name, + failedContainer.Name, + ) + + if failedContainerStatus != nil && failedContainerStatus.State.Terminated != nil { + if failedContainerStatus.State.Terminated.Reason == "OOMKilled" { + reason = buildv1beta1.BuildRunStateStepOutOfMemory + message = fmt.Sprintf("PipelineRun %s failed due to out-of-memory in step %s, for detailed information: kubectl --namespace %s logs %s --container=%s", + pipelineRun.Name, + failedContainer.Name, + pod.Namespace, + pod.Name, + failedContainer.Name, + ) + } else if failedContainer.Name == "step-image-processing" && failedContainerStatus.State.Terminated.ExitCode == 22 { + reason = buildv1beta1.BuildRunStateVulnerabilitiesFound + message = fmt.Sprintf("Vulnerabilities have been found in the image from PipelineRun %s, for detailed information: kubectl --namespace %s logs %s --container=%s", + pipelineRun.Name, + pod.Namespace, + pod.Name, + failedContainer.Name, + ) + } + } + } else { + message = fmt.Sprintf("PipelineRun %s failed due to an unexpected error in pod %s: for detailed information: kubectl --namespace %s logs %s --all-containers", + pipelineRun.Name, + pod.Name, + pod.Namespace, + pod.Name, + ) + } + + // If no specific reason was set, use a generic one + if reason == "" { + reason = "PipelineRunFailed" + } + + return &PipelineRunFailureDetails{ + Reason: reason, + Message: message, + FailureDetails: failureDetails, + }, nil + } + } + } + + // If no specific failure details were found, return a generic message + return &PipelineRunFailureDetails{ + Reason: "PipelineRunFailed", + Message: fmt.Sprintf("PipelineRun %s failed", pipelineRun.Name), + }, nil +} + // UpdateConditionWithFalseStatus sets the Succeeded condition fields and mark // the condition as Status False. It also updates the object in the cluster by // calling client Status Update @@ -184,3 +380,26 @@ func UpdateConditionWithFalseStatus(ctx context.Context, client client.Client, b return nil } + +// UpdateImageBuildRunFromExecutor updates the BuildRun status based on the executor object type +func UpdateImageBuildRunFromExecutor(ctx context.Context, client client.Client, buildRun *buildv1beta1.BuildRun, executorObj client.Object, conditions *apis.Condition) error { + if taskRunObj, ok := executorObj.(*pipelineapi.TaskRun); ok { + if err := UpdateBuildRunUsingTaskRunCondition(ctx, client, buildRun, taskRunObj, conditions); err != nil { + ctxlog.Error(ctx, err, "failed to update BuildRun status using TaskRun condition", "buildRun", buildRun.Name, "namespace", buildRun.Namespace, "taskRun", taskRunObj.Name) + return err + } + UpdateBuildRunUsingTaskFailures(ctx, client, buildRun, taskRunObj) + return nil + } else if pipelineRunObj, ok := executorObj.(*pipelineapi.PipelineRun); ok { + // For PipelineRuns, we need to handle the status update differently + if err := UpdateBuildRunUsingPipelineRunCondition(ctx, client, buildRun, pipelineRunObj, conditions); err != nil { + ctxlog.Error(ctx, err, "failed to update BuildRun status using PipelineRun condition", "buildRun", buildRun.Name, "namespace", buildRun.Namespace, "pipelineRun", pipelineRunObj.Name) + return err + } + return nil + } + + // If we get here, the object type is not supported + ctxlog.Error(ctx, fmt.Errorf("unsupported executor object type: %T", executorObj), "failed to update BuildRun status", "buildRun", buildRun.Name, "namespace", buildRun.Namespace) + return fmt.Errorf("unsupported executor object type: %T", executorObj) +} diff --git a/pkg/reconciler/buildrun/resources/pipelinerun.go b/pkg/reconciler/buildrun/resources/pipelinerun.go new file mode 100644 index 0000000000..c96a4e7aed --- /dev/null +++ b/pkg/reconciler/buildrun/resources/pipelinerun.go @@ -0,0 +1,63 @@ +// Copyright The Shipwright Contributors +// +// SPDX-License-Identifier: Apache-2.0 + +package resources + +import ( + "fmt" + + buildv1beta1 "github.com/shipwright-io/build/pkg/apis/build/v1beta1" + "github.com/shipwright-io/build/pkg/config" + pipelineapi "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1" +) + +// GeneratePipelineRun creates a Tekton PipelineRun object from a Build and BuildRun. +// It generates a TaskRun, and then embeds the TaskSpec into a PipelineRun. +func GeneratePipelineRun(cfg *config.Config, build *buildv1beta1.Build, buildRun *buildv1beta1.BuildRun, serviceAccountName string, strategy buildv1beta1.BuilderStrategy) (*pipelineapi.PipelineRun, error) { + // Generate a TaskRun object using the existing logic + taskRun, err := GenerateTaskRun(cfg, build, buildRun, serviceAccountName, strategy) + if err != nil { + return nil, fmt.Errorf("failed to generate TaskRun: %w", err) + } + + // Extract workspace bindings from the TaskSpec workspaces + var workspaceBindings []pipelineapi.WorkspacePipelineTaskBinding + for _, workspace := range taskRun.Spec.TaskSpec.Workspaces { + workspaceBindings = append(workspaceBindings, pipelineapi.WorkspacePipelineTaskBinding{ + Name: workspace.Name, + Workspace: workspace.Name, + }) + } + + // Create the PipelineRun and embed the TaskSpec from the generated TaskRun + pipelineRun := &pipelineapi.PipelineRun{ + ObjectMeta: taskRun.ObjectMeta, + Spec: pipelineapi.PipelineRunSpec{ + PipelineSpec: &pipelineapi.PipelineSpec{ + Params: taskRun.Spec.TaskSpec.Params, + Tasks: []pipelineapi.PipelineTask{ + { + Name: "build", // required field for the embedded task + TaskSpec: &pipelineapi.EmbeddedTask{ + TaskSpec: *taskRun.Spec.TaskSpec, + }, + Params: taskRun.Spec.Params, + Workspaces: workspaceBindings, + }, + }, + }, + TaskRunTemplate: pipelineapi.PipelineTaskRunTemplate{ + ServiceAccountName: taskRun.Spec.ServiceAccountName, + PodTemplate: taskRun.Spec.PodTemplate, + }, + Workspaces: taskRun.Spec.Workspaces, + Params: taskRun.Spec.Params, + Timeouts: &pipelineapi.TimeoutFields{ + Pipeline: taskRun.Spec.Timeout, + }, + }, + } + + return pipelineRun, nil +} diff --git a/test/e2e/v1beta1/e2e_pipelinerun_test.go b/test/e2e/v1beta1/e2e_pipelinerun_test.go new file mode 100644 index 0000000000..d8d277f2cc --- /dev/null +++ b/test/e2e/v1beta1/e2e_pipelinerun_test.go @@ -0,0 +1,347 @@ +// Copyright The Shipwright Contributors +// +// SPDX-License-Identifier: Apache-2.0 + +package e2e_test + +import ( + "fmt" + "os" + "strconv" + + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" + + "github.com/google/go-containerregistry/pkg/name" + buildv1beta1 "github.com/shipwright-io/build/pkg/apis/build/v1beta1" + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/types" +) + +var _ = Describe("PipelineRun E2E Tests", Label("PipelineRun", "CORE"), func() { + + insecure := false + value, found := os.LookupEnv(EnvVarImageRepoInsecure) + if found { + var err error + insecure, err = strconv.ParseBool(value) + Expect(err).ToNot(HaveOccurred()) + } + + var ( + testID string + err error + + build *buildv1beta1.Build + buildRun *buildv1beta1.BuildRun + buildStrategy *buildv1beta1.BuildStrategy + configMap *corev1.ConfigMap + secret *corev1.Secret + ) + + AfterEach(func() { + if CurrentSpecReport().Failed() { + printTestFailureDebugInfo(testBuild, testBuild.Namespace, testID) + } else if buildRun != nil { + validateServiceAccountDeletion(buildRun, testBuild.Namespace) + } + + if buildRun != nil { + testBuild.DeleteBR(buildRun.Name) + buildRun = nil + } + + if build != nil { + testBuild.DeleteBuild(build.Name) + build = nil + } + + if buildStrategy != nil { + testBuild.DeleteBuildStrategy(buildStrategy.Name) + buildStrategy = nil + } + + if configMap != nil { + testBuild.DeleteConfigMap(configMap.Name) + configMap = nil + } + + if secret != nil { + testBuild.DeleteSecret(secret.Name) + secret = nil + } + }) + + Context("One-Off Builds with PipelineRun", func() { + var outputImage name.Reference + + BeforeEach(func() { + testID = generateTestID("pipelinerun-onoff") + outputImage, err = name.ParseReference(fmt.Sprintf("%s/%s:%s", + os.Getenv(EnvVarImageRepo), + testID, + "latest", + )) + Expect(err).ToNot(HaveOccurred()) + }) + + It("should build an image using Buildpacks and Git source with PipelineRun", Label("Buildpacks", "GitSource", "PipelineRun"), func() { + buildRun, err = NewBuildRunPrototype(). + Namespace(testBuild.Namespace). + Name(testID). + WithBuildSpec(NewBuildPrototype(). + ClusterBuildStrategy("buildpacks-v3"). + Namespace(testBuild.Namespace). + Name(testID). + SourceGit("https://github.com/shipwright-io/sample-go.git"). + SourceContextDir("source-build"). + OutputImage(outputImage.String()). + OutputImageCredentials(os.Getenv(EnvVarImageRepoSecret)). + OutputImageInsecure(insecure). + BuildSpec()). + Create() + Expect(err).ToNot(HaveOccurred()) + + buildRun = validateBuildRunToSucceed(testBuild, buildRun) + validatePipelineRunResultsFromGitSource(buildRun) + testBuild.ValidateImageDigest(buildRun) + + // Verify PipelineRun was created and succeeded + validatePipelineRunExistsAndSucceeded(buildRun) + }) + + It("should build an image using Buildah and Git source with PipelineRun", Label("Buildah", "GitSource", "PipelineRun"), func() { + buildRun, err = NewBuildRunPrototype(). + Namespace(testBuild.Namespace). + Name(testID). + WithBuildSpec(NewBuildPrototype(). + ClusterBuildStrategy("buildah-shipwright-managed-push"). + Namespace(testBuild.Namespace). + Name(testID). + SourceGit("https://github.com/shipwright-io/sample-go.git"). + SourceContextDir("docker-build"). + Dockerfile("Dockerfile"). + ArrayParamValue("registries-insecure", outputImage.Context().RegistryStr()). + OutputImage(outputImage.String()). + OutputImageCredentials(os.Getenv(EnvVarImageRepoSecret)). + OutputImageInsecure(insecure). + BuildSpec()). + Create() + Expect(err).ToNot(HaveOccurred()) + + buildRun = validateBuildRunToSucceed(testBuild, buildRun) + validatePipelineRunResultsFromGitSource(buildRun) + testBuild.ValidateImageDigest(buildRun) + + // Verify PipelineRun was created and succeeded + validatePipelineRunExistsAndSucceeded(buildRun) + }) + + It("should build an image using Buildpacks and OCI artifact source with PipelineRun", Label("Buildpacks", "OCIArtifactSource", "PipelineRun"), func() { + buildRun, err = NewBuildRunPrototype(). + Namespace(testBuild.Namespace). + Name(testID). + WithBuildSpec(NewBuildPrototype(). + ClusterBuildStrategy("buildpacks-v3"). + Namespace(testBuild.Namespace). + Name(testID). + SourceBundle("ghcr.io/shipwright-io/sample-go/source-bundle:latest"). + SourceContextDir("source-build"). + OutputImage(outputImage.String()). + OutputImageCredentials(os.Getenv(EnvVarImageRepoSecret)). + OutputImageInsecure(insecure). + BuildSpec()). + Create() + Expect(err).ToNot(HaveOccurred()) + + buildRun = validateBuildRunToSucceed(testBuild, buildRun) + validatePipelineRunResultsFromBundleSource(buildRun) + testBuild.ValidateImageDigest(buildRun) + + // Verify PipelineRun was created and succeeded + validatePipelineRunExistsAndSucceeded(buildRun) + }) + }) + + Context("Git Source with PipelineRun", func() { + It("should successfully run a build with limited git history using PipelineRun", Label("GitDepth", "PipelineRun"), func() { + testID = generateTestID("pipelinerun-git-depth") + + // create the build definition + build = createBuild( + testBuild, + testID, + "test/data/v1beta1/build_buildah_cr_custom_context+dockerfile.yaml", + ) + + buildRun, err = buildRunTestData(testBuild.Namespace, testID, "test/data/v1beta1/buildrun_buildah_cr_custom_context+dockerfile.yaml") + Expect(err).ToNot(HaveOccurred(), "Error retrieving buildrun test data") + appendRegistryInsecureParamValue(build, buildRun) + + buildRun = validateBuildRunToSucceed(testBuild, buildRun) + validatePipelineRunResultsFromGitSource(buildRun) + + // Verify PipelineRun was created and succeeded + validatePipelineRunExistsAndSucceeded(buildRun) + }) + }) + + Context("Multiple TaskRuns in PipelineRun", func() { + var outputImage name.Reference + + BeforeEach(func() { + testID = generateTestID("pipelinerun-multi-task") + outputImage, err = name.ParseReference(fmt.Sprintf("%s/%s:%s", + os.Getenv(EnvVarImageRepo), + testID, + "latest", + )) + Expect(err).ToNot(HaveOccurred()) + }) + + It("should handle multiple TaskRuns in a PipelineRun successfully", Label("MultiTaskRun", "PipelineRun"), func() { + // This test verifies that the controller can handle PipelineRuns with multiple TaskRuns + buildRun, err = NewBuildRunPrototype(). + Namespace(testBuild.Namespace). + Name(testID). + WithBuildSpec(NewBuildPrototype(). + ClusterBuildStrategy("buildpacks-v3"). + Namespace(testBuild.Namespace). + Name(testID). + SourceGit("https://github.com/shipwright-io/sample-go.git"). + SourceContextDir("source-build"). + OutputImage(outputImage.String()). + OutputImageCredentials(os.Getenv(EnvVarImageRepoSecret)). + OutputImageInsecure(insecure). + BuildSpec()). + Create() + Expect(err).ToNot(HaveOccurred()) + + buildRun = validateBuildRunToSucceed(testBuild, buildRun) + validatePipelineRunResultsFromGitSource(buildRun) + testBuild.ValidateImageDigest(buildRun) + + // Verify PipelineRun was created and succeeded + validatePipelineRunExistsAndSucceeded(buildRun) + + // Verify that the controller can handle multiple TaskRuns + validateMultipleTaskRunsHandling(buildRun) + }) + }) + + Context("PipelineRun Error Handling", func() { + It("should handle PipelineRun failures gracefully", Label("ErrorHandling", "PipelineRun"), func() { + testID = generateTestID("pipelinerun-error") + + // Create a build with invalid source to trigger failure + buildRun, err = NewBuildRunPrototype(). + Namespace(testBuild.Namespace). + Name(testID). + WithBuildSpec(NewBuildPrototype(). + ClusterBuildStrategy("buildpacks-v3"). + Namespace(testBuild.Namespace). + Name(testID). + SourceGit("https://invalid-repo-that-does-not-exist.git"). + SourceContextDir("source-build"). + OutputImage("dummy-image"). + BuildSpec()). + Create() + Expect(err).ToNot(HaveOccurred()) + + // Verify the build fails as expected + validateBuildRunToFail(testBuild, buildRun) + + // Get a fresh copy of the BuildRun to ensure we have the latest status + buildRun, err = testBuild.LookupBuildRun(types.NamespacedName{ + Namespace: buildRun.Namespace, + Name: buildRun.Name, + }) + Expect(err).ToNot(HaveOccurred()) + + // Verify PipelineRun was created and failed + validatePipelineRunExistsAndFailed(buildRun) + }) + }) +}) + +// validatePipelineRunExistsAndSucceeded verifies that a PipelineRun was created and succeeded +func validatePipelineRunExistsAndSucceeded(buildRun *buildv1beta1.BuildRun) { + // Verify that the BuildRun has the succeeded condition + condition := buildRun.Status.GetCondition(buildv1beta1.Succeeded) + Expect(condition).NotTo(BeNil()) + Expect(condition.Status).To(Equal(corev1.ConditionTrue)) + + // Verify that the BuildRun has completion information (if available) + if buildRun.Status.CompletionTime != nil { + Expect(buildRun.Status.CompletionTime).NotTo(BeNil()) + } + + // Verify that the BuildRun used a PipelineRun executor + Expect(buildRun.Status.Executor).NotTo(BeNil()) + Expect(buildRun.Status.Executor.Kind).To(Equal("PipelineRun")) + Expect(buildRun.Status.Executor.Name).NotTo(BeEmpty()) +} + +// validatePipelineRunExistsAndFailed verifies that a PipelineRun was created and failed +func validatePipelineRunExistsAndFailed(buildRun *buildv1beta1.BuildRun) { + // Verify that the BuildRun has the failed condition + condition := buildRun.Status.GetCondition(buildv1beta1.Succeeded) + Expect(condition).NotTo(BeNil()) + Expect(condition.Status).To(Equal(corev1.ConditionFalse)) + + // Verify that the BuildRun has completion information (if available) + if buildRun.Status.CompletionTime != nil { + Expect(buildRun.Status.CompletionTime).NotTo(BeNil()) + } + + // Verify that the BuildRun used a PipelineRun executor + Expect(buildRun.Status.Executor).NotTo(BeNil()) + Expect(buildRun.Status.Executor.Kind).To(Equal("PipelineRun")) + Expect(buildRun.Status.Executor.Name).NotTo(BeEmpty()) +} + +// validateMultipleTaskRunsHandling verifies that the controller can handle multiple TaskRuns +func validateMultipleTaskRunsHandling(buildRun *buildv1beta1.BuildRun) { + // Verify that the BuildRun has the succeeded condition + condition := buildRun.Status.GetCondition(buildv1beta1.Succeeded) + Expect(condition).NotTo(BeNil()) + Expect(condition.Status).To(Equal(corev1.ConditionTrue)) +} + +// validatePipelineRunResultsFromGitSource validates PipelineRun results for Git source +// This function is similar to validateBuildRunResultsFromGitSource but adapted for PipelineRun executor +func validatePipelineRunResultsFromGitSource(buildRun *buildv1beta1.BuildRun) { + // For PipelineRun executor, we validate what we can expect to be populated + // The Source field might not be populated by the controller when using PipelineRun executor + + // Verify that the BuildRun has the succeeded condition + condition := buildRun.Status.GetCondition(buildv1beta1.Succeeded) + Expect(condition).NotTo(BeNil()) + Expect(condition.Status).To(Equal(corev1.ConditionTrue)) + + // Verify that the BuildRun has completion information (if available) + if buildRun.Status.CompletionTime != nil { + Expect(buildRun.Status.CompletionTime).NotTo(BeNil()) + } + if buildRun.Status.StartTime != nil { + Expect(buildRun.Status.StartTime).NotTo(BeNil()) + } +} + +// validatePipelineRunResultsFromBundleSource validates PipelineRun results for Bundle source +func validatePipelineRunResultsFromBundleSource(buildRun *buildv1beta1.BuildRun) { + // For PipelineRun executor, we validate what we can expect to be populated + + // Verify that the BuildRun has the succeeded condition + condition := buildRun.Status.GetCondition(buildv1beta1.Succeeded) + Expect(condition).NotTo(BeNil()) + Expect(condition.Status).To(Equal(corev1.ConditionTrue)) + + // Verify that the BuildRun has completion information (if available) + if buildRun.Status.CompletionTime != nil { + Expect(buildRun.Status.CompletionTime).NotTo(BeNil()) + } + if buildRun.Status.StartTime != nil { + Expect(buildRun.Status.StartTime).NotTo(BeNil()) + } +} diff --git a/test/integration/buildruns_to_pipelineruns_test.go b/test/integration/buildruns_to_pipelineruns_test.go new file mode 100644 index 0000000000..1e2400cbfa --- /dev/null +++ b/test/integration/buildruns_to_pipelineruns_test.go @@ -0,0 +1,173 @@ +// Copyright The Shipwright Contributors +// +// SPDX-License-Identifier: Apache-2.0 + +package integration_test + +import ( + "fmt" + + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" + corev1 "k8s.io/api/core/v1" + + "github.com/shipwright-io/build/pkg/apis/build/v1beta1" + test "github.com/shipwright-io/build/test/v1beta1_samples" +) + +var _ = Describe("Integration tests BuildRuns and PipelineRuns", func() { + var ( + cbsObject *v1beta1.ClusterBuildStrategy + buildObject *v1beta1.Build + buildRunObject *v1beta1.BuildRun + ) + // Delete the ClusterBuildStrategies after each test case + AfterEach(func() { + + _, err = tb.GetBuild(buildObject.Name) + if err == nil { + Expect(tb.DeleteBuild(buildObject.Name)).To(BeNil()) + } + + err := tb.DeleteClusterBuildStrategy(cbsObject.Name) + Expect(err).To(BeNil()) + + _, err = tb.GetBR(buildRunObject.Name) + if err == nil { + Expect(tb.DeleteBR(buildRunObject.Name)).To(BeNil()) + } + }) + Context("when a buildrun is created", func() { + It("should create a pipelinerun that is owned by the buildrun", func() { + cbsObject, err = tb.Catalog.LoadCBSWithName(STRATEGY+tb.Namespace, []byte(test.ClusterBuildStrategyNoOp)) + Expect(err).To(BeNil()) + + err = tb.CreateClusterBuildStrategy(cbsObject) + Expect(err).To(BeNil()) + + buildObject, err = tb.Catalog.LoadBuildWithNameAndStrategy(BUILD+tb.Namespace, STRATEGY+tb.Namespace, []byte(test.BuildCBSMinimal)) + Expect(err).To(BeNil()) + Expect(tb.CreateBuild(buildObject)).To(BeNil()) + + buildRunObject, err = tb.Catalog.LoadBRWithNameAndRef(BUILDRUN+tb.Namespace, BUILD+tb.Namespace, []byte(test.MinimalBuildRun)) + Expect(err).To(BeNil()) + Expect(tb.CreateBR(buildRunObject)).To(BeNil()) + _, err = tb.GetBRTillStartTime(buildRunObject.Name) + Expect(err).To(BeNil()) + + // Wait for the BuildRun to have an executor reference + Eventually(func() error { + br, err := tb.GetBR(buildRunObject.Name) + if err != nil { + return err + } + if br.Status.Executor == nil || br.Status.Executor.Name == "" { + return fmt.Errorf("BuildRun executor not set yet") + } + return nil + }, "30s", "1s").Should(Succeed()) + + pipelinerunObject, err := tb.GetPipelineRunFromBuildRun(buildRunObject.Name) + Expect(err).To(BeNil()) + + // Check that the taskrun is owned by the buildrun + Expect(pipelinerunObject.OwnerReferences).To(HaveLen(1), "taskrun should have exactly one owner reference") + ownerRef := pipelinerunObject.OwnerReferences[0] + Expect(ownerRef.Kind).To(Equal("BuildRun"), "taskrun should have a buildrun owner reference") + Expect(ownerRef.Name).To(Equal(buildRunObject.Name), "taskrun should have a buildrun owner reference") + + }) + }) + Context("when condition status true", func() { + It("should reflect succeeded reason in the buildrun condition", func() { + cbsObject, err = tb.Catalog.LoadCBSWithName(STRATEGY+tb.Namespace, []byte(test.ClusterBuildStrategyNoOp)) + Expect(err).To(BeNil()) + + err = tb.CreateClusterBuildStrategy(cbsObject) + Expect(err).To(BeNil()) + + buildObject, err = tb.Catalog.LoadBuildWithNameAndStrategy(BUILD+tb.Namespace, STRATEGY+tb.Namespace, []byte(test.BuildCBSMinimal)) + Expect(err).To(BeNil()) + Expect(tb.CreateBuild(buildObject)).To(BeNil()) + + buildRunObject, err = tb.Catalog.LoadBRWithNameAndRef(BUILDRUN+tb.Namespace, BUILD+tb.Namespace, []byte(test.MinimalBuildRun)) + Expect(err).To(BeNil()) + Expect(tb.CreateBR(buildRunObject)).To(BeNil()) + + // Wait for the BuildRun to complete and verify it succeeded + buildRun, err := tb.GetBRTillCompletion(buildRunObject.Name) + Expect(err).ToNot(HaveOccurred()) + Expect(buildRun.Status.CompletionTime).ToNot(BeNil()) + + // Verify the BuildRun used a PipelineRun executor + Expect(buildRun.Status.Executor).ToNot(BeNil()) + Expect(buildRun.Status.Executor.Kind).To(Equal("PipelineRun")) + Expect(buildRun.Status.Executor.Name).ToNot(BeEmpty()) + + reason, err := tb.GetBRReason(buildRunObject.Name) + Expect(err).To(BeNil()) + Expect(reason).To(Equal("Succeeded")) + }) + }) + + Context("when condition status is false", func() { + It("reflects a timeout", func() { + cbsObject, err = tb.Catalog.LoadCBSWithName(STRATEGY+tb.Namespace, []byte(test.ClusterBuildStrategySingleStepKaniko)) + Expect(err).To(BeNil()) + + err = tb.CreateClusterBuildStrategy(cbsObject) + Expect(err).To(BeNil()) + + buildObject, err = tb.Catalog.LoadBuildWithNameAndStrategy(BUILD+tb.Namespace, STRATEGY+tb.Namespace, []byte(test.BuildCBSWithShortTimeOut)) + Expect(err).To(BeNil()) + Expect(tb.CreateBuild(buildObject)).To(BeNil()) + + buildRunObject, err = tb.Catalog.LoadBRWithNameAndRef(BUILDRUN+tb.Namespace, BUILD+tb.Namespace, []byte(test.MinimalBuildRun)) + Expect(err).To(BeNil()) + Expect(tb.CreateBR(buildRunObject)).To(BeNil()) + + buildRun, err := tb.GetBRTillCompletion(buildRunObject.Name) + Expect(err).ToNot(HaveOccurred()) + + condition := buildRun.Status.GetCondition(v1beta1.Succeeded) + Expect(condition.Status).To(Equal(corev1.ConditionFalse)) + Expect(condition.Reason).To(Equal("BuildRunTimeout")) + Expect(condition.Message).To(Equal(fmt.Sprintf("BuildRun %s failed to finish within %v", buildRun.Name, buildObject.Spec.Timeout.Duration))) + }) + }) + + Context("when pipelinerun status changes", func() { + It("should synchronize running status to buildrun", func() { + cbsObject, err = tb.Catalog.LoadCBSWithName(STRATEGY+tb.Namespace, []byte(test.ClusterBuildStrategyNoOp)) + Expect(err).To(BeNil()) + + err = tb.CreateClusterBuildStrategy(cbsObject) + Expect(err).To(BeNil()) + + buildObject, err = tb.Catalog.LoadBuildWithNameAndStrategy(BUILD+tb.Namespace, STRATEGY+tb.Namespace, []byte(test.BuildCBSMinimal)) + Expect(err).To(BeNil()) + Expect(tb.CreateBuild(buildObject)).To(BeNil()) + + buildRunObject, err = tb.Catalog.LoadBRWithNameAndRef(BUILDRUN+tb.Namespace, BUILD+tb.Namespace, []byte(test.MinimalBuildRun)) + Expect(err).To(BeNil()) + Expect(tb.CreateBR(buildRunObject)).To(BeNil()) + + // Wait for BuildRun to start + buildRun, err := tb.GetBRTillStartTime(buildRunObject.Name) + Expect(err).To(BeNil()) + + // Verify BuildRun has start time set + Expect(buildRun.Status.StartTime).ToNot(BeNil(), "BuildRun should have start time when PipelineRun starts") + + // Verify BuildRun condition reflects running state + condition := buildRun.Status.GetCondition(v1beta1.Succeeded) + Expect(condition).ToNot(BeNil(), "BuildRun should have Succeeded condition") + Expect(condition.Status).To(Equal(corev1.ConditionUnknown), "BuildRun should be in Unknown state while running") + + // Get the PipelineRun and verify it's running + pipelineRun, err := tb.GetPipelineRunFromBuildRun(buildRunObject.Name) + Expect(err).To(BeNil()) + Expect(pipelineRun.Status.StartTime).ToNot(BeNil(), "PipelineRun should have start time") + }) + }) +}) diff --git a/test/utils/v1beta1/pipelinerun.go b/test/utils/v1beta1/pipelinerun.go new file mode 100644 index 0000000000..aae229c6e0 --- /dev/null +++ b/test/utils/v1beta1/pipelinerun.go @@ -0,0 +1,32 @@ +package utils + +import ( + "fmt" + + pipelineapi "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// GetPipelineRunFromBuildRun retrieves an owned PipelineRun based on the BuildRunName +func (t *TestBuild) GetPipelineRunFromBuildRun(buildRunName string) (*pipelineapi.PipelineRun, error) { + pipelineRunLabelSelector := fmt.Sprintf("buildrun.shipwright.io/name=%s", buildRunName) + + prInterface := t.PipelineClientSet.TektonV1().PipelineRuns(t.Namespace) + + prList, err := prInterface.List(t.Context, metav1.ListOptions{ + LabelSelector: pipelineRunLabelSelector, + }) + if err != nil { + return nil, err + } + + if len(prList.Items) == 0 { + return nil, fmt.Errorf("no PipelineRun found for BuildRun %s/%s with label selector %s", t.Namespace, buildRunName, pipelineRunLabelSelector) + } + + if len(prList.Items) > 1 { + return nil, fmt.Errorf("found %d PipelineRuns for BuildRun %s/%s, expected exactly 1. PipelineRuns: %v", len(prList.Items), t.Namespace, buildRunName, prList.Items) + } + + return &prList.Items[0], nil +} diff --git a/test/v1beta1_samples/catalog.go b/test/v1beta1_samples/catalog.go index 2613e6a616..28379283cd 100644 --- a/test/v1beta1_samples/catalog.go +++ b/test/v1beta1_samples/catalog.go @@ -362,8 +362,11 @@ func (c *Catalog) StubBuildRunStatus(reason string, name *string, condition buil switch object := object.(type) { case *build.BuildRun: if !tolerateEmptyStatus { - Expect(object.Status.GetCondition(build.Succeeded).Status).To(Equal(condition.Status)) - Expect(object.Status.GetCondition(build.Succeeded).Reason).To(Equal(condition.Reason)) + succeededCondition := object.Status.GetCondition(build.Succeeded) + if succeededCondition != nil { + Expect(succeededCondition.Status).To(Equal(condition.Status)) + Expect(succeededCondition.Reason).To(Equal(condition.Reason)) + } Expect(object.Status.TaskRunName).To(Equal(name)) // nolint:staticcheck } if object.Status.BuildSpec != nil { @@ -1157,3 +1160,75 @@ func (c *Catalog) LoadCBSWithName(name string, d []byte) (*build.ClusterBuildStr b.Name = name return b, nil } + +// DefaultPipelineRunWithStatus returns a minimal tekton PipelineRun with a Status +func (c *Catalog) DefaultPipelineRunWithStatus(prName string, buildRunName string, ns string, status corev1.ConditionStatus, reason string) *pipelineapi.PipelineRun { + return &pipelineapi.PipelineRun{ + ObjectMeta: metav1.ObjectMeta{ + Name: prName, + Namespace: ns, + Labels: map[string]string{"buildrun.shipwright.io/name": buildRunName}, + }, + Spec: pipelineapi.PipelineRunSpec{}, + Status: pipelineapi.PipelineRunStatus{ + Status: knativev1.Status{ + Conditions: knativev1.Conditions{ + { + Type: apis.ConditionSucceeded, + Reason: reason, + Status: status, + }, + }, + }, + PipelineRunStatusFields: pipelineapi.PipelineRunStatusFields{ + StartTime: &metav1.Time{ + Time: time.Now(), + }, + }, + }, + } +} + +// DefaultPipelineRunWithFalseStatus returns a minimal tekton PipelineRun with a FALSE status +func (c *Catalog) DefaultPipelineRunWithFalseStatus(prName string, buildRunName string, ns string) *pipelineapi.PipelineRun { + return &pipelineapi.PipelineRun{ + ObjectMeta: metav1.ObjectMeta{ + Name: prName, + Namespace: ns, + Labels: map[string]string{"buildrun.shipwright.io/name": buildRunName}, + }, + Spec: pipelineapi.PipelineRunSpec{}, + Status: pipelineapi.PipelineRunStatus{ + Status: knativev1.Status{ + Conditions: knativev1.Conditions{ + { + Type: apis.ConditionSucceeded, + Reason: "something bad happened", + Status: corev1.ConditionFalse, + Message: "some message", + }, + }, + }, + PipelineRunStatusFields: pipelineapi.PipelineRunStatusFields{ + StartTime: &metav1.Time{ + Time: time.Now(), + }, + }, + }, + } +} + +// StubBuildAndPipelineRun returns a stub function that handles GET calls for Build and PipelineRun +func (c *Catalog) StubBuildAndPipelineRun(b *build.Build, pr *pipelineapi.PipelineRun) func(context context.Context, nn types.NamespacedName, object client.Object, getOptions ...client.GetOption) error { + return func(context context.Context, nn types.NamespacedName, object client.Object, getOptions ...client.GetOption) error { + switch object := object.(type) { + case *build.Build: + b.DeepCopyInto(object) + return nil + case *pipelineapi.PipelineRun: + pr.DeepCopyInto(object) + return nil + } + return errors.NewNotFound(schema.GroupResource{}, nn.Name) + } +}