diff --git a/AGENTS.md b/AGENTS.md index 6c7e50fea4..cf8b153945 100644 --- a/AGENTS.md +++ b/AGENTS.md @@ -2,6 +2,9 @@ - Run `make fmt` to format `.go` files, and run `make lint-go` to lint them - Run `make lint-js` to lint `.ts` files - Run `make tidy` after any `go.mod` changes +- Run single go unit tests with `go test -tags 'sqlite sqlite_unlock_notify' -run '^TestName$' ./modulepath/` +- Run single go integration tests with `make 'test-sqlite#TestName/Subtest'` +- Run single playwright e2e test files with `GITEA_TEST_E2E_FLAGS='' make test-e2e` - Add the current year into the copyright header of new `.go` files - Ensure no trailing whitespace in edited files - Never force-push, amend, or squash unless asked. Use new commits and normal push for pull request updates diff --git a/custom/conf/app.example.ini b/custom/conf/app.example.ini index 2c789dd103..97af5fa5fb 100644 --- a/custom/conf/app.example.ini +++ b/custom/conf/app.example.ini @@ -525,8 +525,11 @@ INTERNAL_TOKEN = ;; Set to "enforced", to force users to enroll into Two-Factor Authentication, users without 2FA have no access to repositories via API or web. ;TWO_FACTOR_AUTH = ;; -;; The value of the X-Frame-Options HTTP header for HTML responses. Use "unset" to remove the header. +;; The value of the X-Frame-Options HTTP header for all responses. Use "unset" to remove the header. ;X_FRAME_OPTIONS = SAMEORIGIN +;; +;; The value of the X-Content-Type-Options HTTP header for all responses. Use "unset" to remove the header. +;X_CONTENT_TYPE_OPTIONS = nosniff ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; @@ -2973,6 +2976,8 @@ LEVEL = Info ;; Comma-separated list of workflow directories, the first one to exist ;; in a repo is used to find Actions workflow files ;WORKFLOW_DIRS = .gitea/workflows,.github/workflows +;; Maximum number of attempts a single workflow run can have. Default value is 50. +;MAX_RERUN_ATTEMPTS = 50 ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; diff --git a/models/actions/artifact.go b/models/actions/artifact.go index ffadc79661..f0effdeeca 100644 --- a/models/actions/artifact.go +++ b/models/actions/artifact.go @@ -12,6 +12,7 @@ import ( "time" "code.gitea.io/gitea/models/db" + "code.gitea.io/gitea/modules/optional" "code.gitea.io/gitea/modules/timeutil" "code.gitea.io/gitea/modules/util" @@ -61,7 +62,8 @@ const ( // ActionArtifact is a file that is stored in the artifact storage. type ActionArtifact struct { ID int64 `xorm:"pk autoincr"` - RunID int64 `xorm:"index unique(runid_name_path)"` // The run id of the artifact + RunID int64 `xorm:"index unique(runid_attempt_name_path)"` // The run id of the artifact + RunAttemptID int64 `xorm:"index unique(runid_attempt_name_path) NOT NULL DEFAULT 0"` RunnerID int64 RepoID int64 `xorm:"index"` OwnerID int64 @@ -80,9 +82,9 @@ type ActionArtifact struct { // * "application/pdf", "text/html", etc.: real content type of the artifact ContentEncodingOrType string `xorm:"content_encoding"` - ArtifactPath string `xorm:"index unique(runid_name_path)"` // The path to the artifact when runner uploads it - ArtifactName string `xorm:"index unique(runid_name_path)"` // The name of the artifact when runner uploads it - Status ArtifactStatus `xorm:"index"` // The status of the artifact, uploading, expired or need-delete + ArtifactPath string `xorm:"index unique(runid_attempt_name_path)"` // The path to the artifact when runner uploads it + ArtifactName string `xorm:"index unique(runid_attempt_name_path)"` // The name of the artifact when runner uploads it + Status ArtifactStatus `xorm:"index"` // The status of the artifact, uploading, expired or need-delete CreatedUnix timeutil.TimeStamp `xorm:"created"` UpdatedUnix timeutil.TimeStamp `xorm:"updated index"` ExpiredUnix timeutil.TimeStamp `xorm:"index"` // The time when the artifact will be expired @@ -92,12 +94,13 @@ func CreateArtifact(ctx context.Context, t *ActionTask, artifactName, artifactPa if err := t.LoadJob(ctx); err != nil { return nil, err } - artifact, err := getArtifactByNameAndPath(ctx, t.Job.RunID, artifactName, artifactPath) + artifact, err := getArtifactByNameAndPath(ctx, t.Job.RunID, t.Job.RunAttemptID, artifactName, artifactPath) if errors.Is(err, util.ErrNotExist) { artifact := &ActionArtifact{ ArtifactName: artifactName, ArtifactPath: artifactPath, RunID: t.Job.RunID, + RunAttemptID: t.Job.RunAttemptID, RunnerID: t.RunnerID, RepoID: t.RepoID, OwnerID: t.OwnerID, @@ -122,9 +125,9 @@ func CreateArtifact(ctx context.Context, t *ActionTask, artifactName, artifactPa return artifact, nil } -func getArtifactByNameAndPath(ctx context.Context, runID int64, name, fpath string) (*ActionArtifact, error) { +func getArtifactByNameAndPath(ctx context.Context, runID, runAttemptID int64, name, fpath string) (*ActionArtifact, error) { var art ActionArtifact - has, err := db.GetEngine(ctx).Where("run_id = ? AND artifact_name = ? AND artifact_path = ?", runID, name, fpath).Get(&art) + has, err := db.GetEngine(ctx).Where("run_id = ? AND run_attempt_id = ? AND artifact_name = ? AND artifact_path = ?", runID, runAttemptID, name, fpath).Get(&art) if err != nil { return nil, err } else if !has { @@ -144,6 +147,7 @@ type FindArtifactsOptions struct { db.ListOptions RepoID int64 RunID int64 + RunAttemptID optional.Option[int64] // use optional to allow filtering by zero (legacy artifacts have run_attempt_id=0) ArtifactName string Status int FinalizedArtifactsV4 bool @@ -163,6 +167,9 @@ func (opts FindArtifactsOptions) ToConds() builder.Cond { if opts.RunID > 0 { cond = cond.And(builder.Eq{"run_id": opts.RunID}) } + if opts.RunAttemptID.Has() { + cond = cond.And(builder.Eq{"run_attempt_id": opts.RunAttemptID.Value()}) + } if opts.ArtifactName != "" { cond = cond.And(builder.Eq{"artifact_name": opts.ArtifactName}) } @@ -186,11 +193,12 @@ type ActionArtifactMeta struct { ExpiredUnix timeutil.TimeStamp } -// ListUploadedArtifactsMeta returns all uploaded artifacts meta of a run -func ListUploadedArtifactsMeta(ctx context.Context, repoID, runID int64) ([]*ActionArtifactMeta, error) { +// ListUploadedArtifactsMetaByRunAttempt returns uploaded artifacts meta scoped to a specific run and attempt. +// Pass runAttemptID=0 to target legacy artifacts (pre-v331) belonging to the run. +func ListUploadedArtifactsMetaByRunAttempt(ctx context.Context, repoID, runID, runAttemptID int64) ([]*ActionArtifactMeta, error) { arts := make([]*ActionArtifactMeta, 0, 10) return arts, db.GetEngine(ctx).Table("action_artifact"). - Where("repo_id=? AND run_id=? AND (status=? OR status=?)", repoID, runID, ArtifactStatusUploadConfirmed, ArtifactStatusExpired). + Where("repo_id=? AND run_id=? AND run_attempt_id=? AND (status=? OR status=?)", repoID, runID, runAttemptID, ArtifactStatusUploadConfirmed, ArtifactStatusExpired). GroupBy("artifact_name"). Select("artifact_name, sum(file_size) as file_size, max(status) as status, max(expired_unix) as expired_unix"). Find(&arts) @@ -217,12 +225,29 @@ func SetArtifactExpired(ctx context.Context, artifactID int64) error { return err } -// SetArtifactNeedDelete sets an artifact to need-delete, cron job will delete it -func SetArtifactNeedDelete(ctx context.Context, runID int64, name string) error { - _, err := db.GetEngine(ctx).Where("run_id=? AND artifact_name=? AND status = ?", runID, name, ArtifactStatusUploadConfirmed).Cols("status").Update(&ActionArtifact{Status: ArtifactStatusPendingDeletion}) +// SetArtifactNeedDeleteByID sets an artifact to need-delete by ID, cron job will delete it. +func SetArtifactNeedDeleteByID(ctx context.Context, artifactID int64) error { + _, err := db.GetEngine(ctx).Where("id=? AND status = ?", artifactID, ArtifactStatusUploadConfirmed).Cols("status").Update(&ActionArtifact{Status: ArtifactStatusPendingDeletion}) return err } +// SetArtifactNeedDeleteByRunAttempt sets an artifact to need-delete in a run attempt, cron job will delete it. +// runAttemptID may be 0 for legacy artifacts created before ActionRunAttempt existed. +func SetArtifactNeedDeleteByRunAttempt(ctx context.Context, runID, runAttemptID int64, name string) error { + _, err := db.GetEngine(ctx).Where("run_id=? AND run_attempt_id=? AND artifact_name=? AND status = ?", runID, runAttemptID, name, ArtifactStatusUploadConfirmed).Cols("status").Update(&ActionArtifact{Status: ArtifactStatusPendingDeletion}) + return err +} + +// GetArtifactsByRunAttemptAndName returns all artifacts with the given name in the specified run attempt. +// This supports both attempt-scoped data and legacy artifacts with run_attempt_id=0. +func GetArtifactsByRunAttemptAndName(ctx context.Context, runID, runAttemptID int64, artifactName string) ([]*ActionArtifact, error) { + arts := make([]*ActionArtifact, 0) + return arts, db.GetEngine(ctx). + Where("run_id = ? AND run_attempt_id = ? AND artifact_name = ?", runID, runAttemptID, artifactName). + OrderBy("id"). + Find(&arts) +} + // SetArtifactDeleted sets an artifact to deleted func SetArtifactDeleted(ctx context.Context, artifactID int64) error { _, err := db.GetEngine(ctx).ID(artifactID).Cols("status").Update(&ActionArtifact{Status: ArtifactStatusDeleted}) diff --git a/models/actions/run.go b/models/actions/run.go index bce356c0e2..b8c9a59fb4 100644 --- a/models/actions/run.go +++ b/models/actions/run.go @@ -30,7 +30,7 @@ import ( type ActionRun struct { ID int64 Title string - RepoID int64 `xorm:"unique(repo_index) index(repo_concurrency)"` + RepoID int64 `xorm:"unique(repo_index)"` Repo *repo_model.Repository `xorm:"-"` OwnerID int64 `xorm:"index"` WorkflowID string `xorm:"index"` // the name of workflow file @@ -50,15 +50,20 @@ type ActionRun struct { Status Status `xorm:"index"` Version int `xorm:"version default 0"` // Status could be updated concomitantly, so an optimistic lock is needed RawConcurrency string // raw concurrency - ConcurrencyGroup string `xorm:"index(repo_concurrency) NOT NULL DEFAULT ''"` - ConcurrencyCancel bool `xorm:"NOT NULL DEFAULT FALSE"` - // Started and Stopped is used for recording last run time, if rerun happened, they will be reset to 0 + + // Started and Stopped are identical to the latest attempt after ActionRunAttempt was introduced. + // When a rerun creates a new latest attempt, they are reset until the new attempt starts and stops. Started timeutil.TimeStamp Stopped timeutil.TimeStamp - // PreviousDuration is used for recording previous duration + + // PreviousDuration is kept only for legacy runs created before ActionRunAttempt existed. + // New runs and reruns no longer update this field and use attempt-scoped durations instead. PreviousDuration time.Duration - Created timeutil.TimeStamp `xorm:"created"` - Updated timeutil.TimeStamp `xorm:"updated"` + + LatestAttemptID int64 `xorm:"index NOT NULL DEFAULT 0"` + + Created timeutil.TimeStamp `xorm:"created"` + Updated timeutil.TimeStamp `xorm:"updated"` } func init() { @@ -160,6 +165,31 @@ func (run *ActionRun) Duration() time.Duration { return d } +// GetLatestAttempt returns +// - the latest attempt of the run +// - (nil, false, nil) for legacy runs that have no attempt records +func (run *ActionRun) GetLatestAttempt(ctx context.Context) (*ActionRunAttempt, bool, error) { + if run.LatestAttemptID == 0 { + return nil, false, nil + } + attempt, err := GetRunAttemptByRepoAndID(ctx, run.RepoID, run.LatestAttemptID) + if err != nil { + return nil, false, err + } + return attempt, true, nil +} + +func (run *ActionRun) GetEffectiveConcurrency(ctx context.Context) (string, bool, error) { + attempt, has, err := run.GetLatestAttempt(ctx) + if err != nil { + return "", false, err + } + if has { + return attempt.ConcurrencyGroup, attempt.ConcurrencyCancel, nil + } + return "", false, nil +} + func (run *ActionRun) GetPushEventPayload() (*api.PushPayload, error) { if run.Event == webhook_module.HookEventPush { var payload api.PushPayload @@ -406,14 +436,11 @@ func UpdateRun(ctx context.Context, run *ActionRun, cols ...string) error { type ActionRunIndex db.ResourceIndex -func GetConcurrentRunsAndJobs(ctx context.Context, repoID int64, concurrencyGroup string, status []Status) ([]*ActionRun, []*ActionRunJob, error) { - runs, err := db.Find[ActionRun](ctx, &FindRunOptions{ - RepoID: repoID, - ConcurrencyGroup: concurrencyGroup, - Status: status, - }) +// GetConcurrentRunAttemptsAndJobs returns run attempts and jobs in the same concurrency group by statuses. +func GetConcurrentRunAttemptsAndJobs(ctx context.Context, repoID int64, concurrencyGroup string, status []Status) ([]*ActionRunAttempt, []*ActionRunJob, error) { + attempts, err := FindConcurrentRunAttempts(ctx, repoID, concurrencyGroup, status) if err != nil { - return nil, nil, fmt.Errorf("find runs: %w", err) + return nil, nil, fmt.Errorf("find run attempts: %w", err) } jobs, err := db.Find[ActionRunJob](ctx, &FindRunJobOptions{ @@ -425,36 +452,34 @@ func GetConcurrentRunsAndJobs(ctx context.Context, repoID int64, concurrencyGrou return nil, nil, fmt.Errorf("find jobs: %w", err) } - return runs, jobs, nil + return attempts, jobs, nil } -func CancelPreviousJobsByRunConcurrency(ctx context.Context, actionRun *ActionRun) ([]*ActionRunJob, error) { - if actionRun.ConcurrencyGroup == "" { +func CancelPreviousJobsByRunConcurrency(ctx context.Context, attempt *ActionRunAttempt) ([]*ActionRunJob, error) { + if attempt.ConcurrencyGroup == "" { return nil, nil } var jobsToCancel []*ActionRunJob statusFindOption := []Status{StatusWaiting, StatusBlocked} - if actionRun.ConcurrencyCancel { + if attempt.ConcurrencyCancel { statusFindOption = append(statusFindOption, StatusRunning) } - runs, jobs, err := GetConcurrentRunsAndJobs(ctx, actionRun.RepoID, actionRun.ConcurrencyGroup, statusFindOption) + attempts, jobs, err := GetConcurrentRunAttemptsAndJobs(ctx, attempt.RepoID, attempt.ConcurrencyGroup, statusFindOption) if err != nil { return nil, fmt.Errorf("find concurrent runs and jobs: %w", err) } jobsToCancel = append(jobsToCancel, jobs...) // cancel runs in the same concurrency group - for _, run := range runs { - if run.ID == actionRun.ID { + for _, concurrentAttempt := range attempts { + if concurrentAttempt.RunID == attempt.RunID { continue } - jobs, err := db.Find[ActionRunJob](ctx, FindRunJobOptions{ - RunID: run.ID, - }) + jobs, err := GetRunJobsByRunAndAttemptID(ctx, concurrentAttempt.RunID, concurrentAttempt.ID) if err != nil { - return nil, fmt.Errorf("find run %d jobs: %w", run.ID, err) + return nil, fmt.Errorf("find run %d attempt %d jobs: %w", concurrentAttempt.RunID, concurrentAttempt.ID, err) } jobsToCancel = append(jobsToCancel, jobs...) } diff --git a/models/actions/run_attempt.go b/models/actions/run_attempt.go new file mode 100644 index 0000000000..7fd0212522 --- /dev/null +++ b/models/actions/run_attempt.go @@ -0,0 +1,145 @@ +// Copyright 2026 The Gitea Authors. All rights reserved. +// SPDX-License-Identifier: MIT + +package actions + +import ( + "context" + "fmt" + "slices" + "time" + + "code.gitea.io/gitea/models/db" + user_model "code.gitea.io/gitea/models/user" + "code.gitea.io/gitea/modules/log" + "code.gitea.io/gitea/modules/timeutil" + "code.gitea.io/gitea/modules/util" +) + +// ActionRunAttempt represents a single execution attempt of an ActionRun. +type ActionRunAttempt struct { + ID int64 + RepoID int64 `xorm:"index(repo_concurrency_status)"` + RunID int64 `xorm:"UNIQUE(run_attempt)"` + Run *ActionRun `xorm:"-"` + Attempt int64 `xorm:"UNIQUE(run_attempt)"` + + TriggerUserID int64 + TriggerUser *user_model.User `xorm:"-"` + + ConcurrencyGroup string `xorm:"index(repo_concurrency_status) NOT NULL DEFAULT ''"` + ConcurrencyCancel bool `xorm:"NOT NULL DEFAULT FALSE"` + + Status Status `xorm:"index(repo_concurrency_status)"` + Started timeutil.TimeStamp + Stopped timeutil.TimeStamp + + Created timeutil.TimeStamp `xorm:"created"` + Updated timeutil.TimeStamp `xorm:"updated"` +} + +func (*ActionRunAttempt) TableName() string { + return "action_run_attempt" +} + +func init() { + db.RegisterModel(new(ActionRunAttempt)) +} + +func (attempt *ActionRunAttempt) Duration() time.Duration { + return calculateDuration(attempt.Started, attempt.Stopped, attempt.Status, attempt.Updated) +} + +func (attempt *ActionRunAttempt) LoadAttributes(ctx context.Context) error { + if attempt == nil { + return nil + } + + if attempt.Run == nil { + run, err := GetRunByRepoAndID(ctx, attempt.RepoID, attempt.RunID) + if err != nil { + return err + } + if err := run.LoadAttributes(ctx); err != nil { + return err + } + attempt.Run = run + } + + if attempt.TriggerUser == nil { + u, err := user_model.GetPossibleUserByID(ctx, attempt.TriggerUserID) + if err != nil { + return err + } + attempt.TriggerUser = u + } + + return nil +} + +func GetRunAttemptByRepoAndID(ctx context.Context, repoID, attemptID int64) (*ActionRunAttempt, error) { + var attempt ActionRunAttempt + has, err := db.GetEngine(ctx).Where("repo_id=? AND id=?", repoID, attemptID).Get(&attempt) + if err != nil { + return nil, err + } else if !has { + return nil, fmt.Errorf("run attempt %d in repo %d: %w", attemptID, repoID, util.ErrNotExist) + } + return &attempt, nil +} + +func GetRunAttemptByRunIDAndAttemptNum(ctx context.Context, runID, attemptNum int64) (*ActionRunAttempt, error) { + var attempt ActionRunAttempt + has, err := db.GetEngine(ctx).Where("run_id=? AND attempt=?", runID, attemptNum).Get(&attempt) + if err != nil { + return nil, err + } else if !has { + return nil, fmt.Errorf("run attempt %d for run %d: %w", attemptNum, runID, util.ErrNotExist) + } + return &attempt, nil +} + +// FindConcurrentRunAttempts returns attempts in the given concurrency group and status set. +// Results are unordered; callers must not depend on any particular row order. +func FindConcurrentRunAttempts(ctx context.Context, repoID int64, concurrencyGroup string, statuses []Status) ([]*ActionRunAttempt, error) { + attempts := make([]*ActionRunAttempt, 0) + sess := db.GetEngine(ctx).Where("repo_id=? AND concurrency_group=?", repoID, concurrencyGroup) + if len(statuses) > 0 { + sess = sess.In("status", statuses) + } + return attempts, sess.Find(&attempts) +} + +func UpdateRunAttempt(ctx context.Context, attempt *ActionRunAttempt, cols ...string) error { + if slices.Contains(cols, "status") && attempt.Started.IsZero() && attempt.Status.IsRunning() { + attempt.Started = timeutil.TimeStampNow() + cols = append(cols, "started") + } + + sess := db.GetEngine(ctx).ID(attempt.ID) + if len(cols) > 0 { + sess.Cols(cols...) + } + if _, err := sess.Update(attempt); err != nil { + return err + } + + // Only status/timing changes on an attempt need to update the latest run. + if len(cols) > 0 && !slices.Contains(cols, "status") && !slices.Contains(cols, "started") && !slices.Contains(cols, "stopped") { + return nil + } + + run, err := GetRunByRepoAndID(ctx, attempt.RepoID, attempt.RunID) + if err != nil { + return err + } + if run.LatestAttemptID != attempt.ID { + log.Warn("run %d cannot be updated by an old attempt %d", run.LatestAttemptID, attempt.ID) + return nil + } + + run.Status = attempt.Status + run.Started = attempt.Started + run.Stopped = attempt.Stopped + return UpdateRun(ctx, run, "status", "started", "stopped") +} diff --git a/models/actions/run_attempt_list.go b/models/actions/run_attempt_list.go new file mode 100644 index 0000000000..77a5b8f15c --- /dev/null +++ b/models/actions/run_attempt_list.go @@ -0,0 +1,46 @@ +// Copyright 2026 The Gitea Authors. All rights reserved. +// SPDX-License-Identifier: MIT + +package actions + +import ( + "context" + + "code.gitea.io/gitea/models/db" + user_model "code.gitea.io/gitea/models/user" + "code.gitea.io/gitea/modules/container" +) + +type ActionRunAttemptList []*ActionRunAttempt + +// GetUserIDs returns a slice of user's id +func (attempts ActionRunAttemptList) GetUserIDs() []int64 { + return container.FilterSlice(attempts, func(attempt *ActionRunAttempt) (int64, bool) { + return attempt.TriggerUserID, true + }) +} + +func (attempts ActionRunAttemptList) LoadTriggerUser(ctx context.Context) error { + userIDs := attempts.GetUserIDs() + users := make(map[int64]*user_model.User, len(userIDs)) + if err := db.GetEngine(ctx).In("id", userIDs).Find(&users); err != nil { + return err + } + for _, attempt := range attempts { + if attempt.TriggerUserID == user_model.ActionsUserID { + attempt.TriggerUser = user_model.NewActionsUser() + } else { + attempt.TriggerUser = users[attempt.TriggerUserID] + if attempt.TriggerUser == nil { + attempt.TriggerUser = user_model.NewGhostUser() + } + } + } + return nil +} + +// ListRunAttemptsByRunID returns all attempts of a run, ordered by attempt number DESC (newest first). +func ListRunAttemptsByRunID(ctx context.Context, runID int64) (ActionRunAttemptList, error) { + var attempts ActionRunAttemptList + return attempts, db.GetEngine(ctx).Where("run_id=?", runID).OrderBy("attempt DESC").Find(&attempts) +} diff --git a/models/actions/run_job.go b/models/actions/run_job.go index d1e5d1e938..0921329997 100644 --- a/models/actions/run_job.go +++ b/models/actions/run_job.go @@ -34,7 +34,10 @@ type ActionRunJob struct { CommitSHA string `xorm:"index"` IsForkPullRequest bool Name string `xorm:"VARCHAR(255)"` - Attempt int64 + + // for legacy jobs, this counts how many times the job has run; + // otherwise it matches the Attempt of the ActionRunAttempt identified by job.RunAttemptID + Attempt int64 // WorkflowPayload is act/jobparser.SingleWorkflow for act/jobparser.Parse // it should contain exactly one job with global workflow fields for this model @@ -43,8 +46,11 @@ type ActionRunJob struct { JobID string `xorm:"VARCHAR(255)"` // job id in workflow, not job's id Needs []string `xorm:"JSON TEXT"` RunsOn []string `xorm:"JSON TEXT"` - TaskID int64 // the latest task of the job - Status Status `xorm:"index"` + + TaskID int64 // the task created by this job in its own attempt + SourceTaskID int64 `xorm:"NOT NULL DEFAULT 0"` // SourceTaskID points to a historical task when this job reuses an earlier attempt's result. + + Status Status `xorm:"index"` RawConcurrency string // raw concurrency from job YAML's "concurrency" section @@ -61,6 +67,14 @@ type ActionRunJob struct { // It is JSON-encoded repo_model.ActionsTokenPermissions and may be empty if not specified. TokenPermissions *repo_model.ActionsTokenPermissions `xorm:"JSON TEXT"` + // RunAttemptID identifies the ActionRunAttempt this job belongs to. + // A value of 0 indicates a legacy job created before ActionRunAttempt existed. + RunAttemptID int64 `xorm:"index NOT NULL DEFAULT 0"` + // AttemptJobID is unique within a single attempt. + // For jobs created after ActionRunAttempt was introduced, the same logical job is expected to keep the same AttemptJobID across attempts. + // A value of 0 indicates a legacy job created before ActionRunAttempt existed. + AttemptJobID int64 `xorm:"index NOT NULL DEFAULT 0"` + Started timeutil.TimeStamp Stopped timeutil.TimeStamp Created timeutil.TimeStamp `xorm:"created"` @@ -75,6 +89,13 @@ func (job *ActionRunJob) Duration() time.Duration { return calculateDuration(job.Started, job.Stopped, job.Status, job.Updated) } +func (job *ActionRunJob) EffectiveTaskID() int64 { + if job.TaskID > 0 { + return job.TaskID + } + return job.SourceTaskID +} + func (job *ActionRunJob) LoadRun(ctx context.Context) error { if job.Run == nil { run, err := GetRunByRepoAndID(ctx, job.RepoID, job.RunID) @@ -152,9 +173,50 @@ func GetRunJobByRunAndID(ctx context.Context, runID, jobID int64) (*ActionRunJob return &job, nil } -func GetRunJobsByRunID(ctx context.Context, runID int64) (ActionJobList, error) { +func GetRunJobByAttemptJobID(ctx context.Context, runID, attemptID, attemptJobID int64) (*ActionRunJob, error) { + var job ActionRunJob + has, err := db.GetEngine(ctx).Where("run_id=? AND run_attempt_id=? AND attempt_job_id=?", runID, attemptID, attemptJobID).Get(&job) + if err != nil { + return nil, err + } else if !has { + return nil, fmt.Errorf("run job with attempt_job_id %d in run %d attempt %d: %w", attemptJobID, runID, attemptID, util.ErrNotExist) + } + + return &job, nil +} + +// GetLatestAttemptJobsByRepoAndRunID returns the jobs of the latest attempt for a run. +// It prefers the latest attempt when one exists, and falls back to legacy jobs with run_attempt_id=0 for runs created before ActionRunAttempt existed. +func GetLatestAttemptJobsByRepoAndRunID(ctx context.Context, repoID, runID int64) (ActionJobList, error) { + run, err := GetRunByRepoAndID(ctx, repoID, runID) + if err != nil { + return nil, err + } + if run.LatestAttemptID > 0 { + return GetRunJobsByRunAndAttemptID(ctx, runID, run.LatestAttemptID) + } + var jobs []*ActionRunJob - if err := db.GetEngine(ctx).Where("run_id=?", runID).OrderBy("id").Find(&jobs); err != nil { + if err := db.GetEngine(ctx).Where("repo_id=? AND run_id=? AND run_attempt_id=0", repoID, runID).OrderBy("id").Find(&jobs); err != nil { + return nil, err + } + return jobs, nil +} + +// GetAllRunJobsByRepoAndRunID returns all jobs for a run across all attempts. +func GetAllRunJobsByRepoAndRunID(ctx context.Context, repoID, runID int64) (ActionJobList, error) { + var jobs []*ActionRunJob + if err := db.GetEngine(ctx).Where("repo_id=? AND run_id=?", repoID, runID).OrderBy("id").Find(&jobs); err != nil { + return nil, err + } + return jobs, nil +} + +// GetRunJobsByRunAndAttemptID returns jobs for a run within a specific attempt. +// runAttemptID may be 0 to address legacy jobs that were created before ActionRunAttempt existed and therefore have no attempt association. +func GetRunJobsByRunAndAttemptID(ctx context.Context, runID, runAttemptID int64) (ActionJobList, error) { + var jobs []*ActionRunJob + if err := db.GetEngine(ctx).Where("run_id=? AND run_attempt_id=?", runID, runAttemptID).OrderBy("id").Find(&jobs); err != nil { return nil, err } return jobs, nil @@ -196,25 +258,51 @@ func UpdateRunJob(ctx context.Context, job *ActionRunJob, cond builder.Cond, col } { - // Other goroutines may aggregate the status of the run and update it too. - // So we need load the run and its jobs before updating the run. - run, err := GetRunByRepoAndID(ctx, job.RepoID, job.RunID) - if err != nil { - return 0, err - } - jobs, err := GetRunJobsByRunID(ctx, job.RunID) - if err != nil { - return 0, err - } - run.Status = AggregateJobStatus(jobs) - if run.Started.IsZero() && run.Status.IsRunning() { - run.Started = timeutil.TimeStampNow() - } - if run.Stopped.IsZero() && run.Status.IsDone() { - run.Stopped = timeutil.TimeStampNow() - } - if err := UpdateRun(ctx, run, "status", "started", "stopped"); err != nil { - return 0, fmt.Errorf("update run %d: %w", run.ID, err) + // Other goroutines may aggregate the status of the attempt/run and update it too. + // So we need to load the current jobs before updating the aggregate state. + if job.RunAttemptID > 0 { + attempt, err := GetRunAttemptByRepoAndID(ctx, job.RepoID, job.RunAttemptID) + if err != nil { + return 0, err + } + jobs, err := GetRunJobsByRunAndAttemptID(ctx, job.RunID, job.RunAttemptID) + if err != nil { + return 0, err + } + attempt.Status = AggregateJobStatus(jobs) + if attempt.Started.IsZero() && attempt.Status.IsRunning() { + attempt.Started = timeutil.TimeStampNow() + } + if attempt.Stopped.IsZero() && attempt.Status.IsDone() { + attempt.Stopped = timeutil.TimeStampNow() + } + if err := UpdateRunAttempt(ctx, attempt, "status", "started", "stopped"); err != nil { + return 0, fmt.Errorf("update run attempt %d: %w", attempt.ID, err) + } + } else { + // TODO: Remove this fallback in the future. + // Legacy fallback: jobs created before migration v331 have RunAttemptID=0 and are NOT backfilled. + // This path keeps those runs' status consistent when their jobs finish, including: + // - jobs created before migration v331 and complete on the new version starts + // - zombie/abandoned cleanup cron tasks that call UpdateRunJob on legacy jobs + run, err := GetRunByRepoAndID(ctx, job.RepoID, job.RunID) + if err != nil { + return 0, err + } + jobs, err := GetLatestAttemptJobsByRepoAndRunID(ctx, job.RepoID, job.RunID) + if err != nil { + return 0, err + } + run.Status = AggregateJobStatus(jobs) + if run.Started.IsZero() && run.Status.IsRunning() { + run.Started = timeutil.TimeStampNow() + } + if run.Stopped.IsZero() && run.Status.IsDone() { + run.Stopped = timeutil.TimeStampNow() + } + if err := UpdateRun(ctx, run, "status", "started", "stopped"); err != nil { + return 0, fmt.Errorf("update run %d: %w", run.ID, err) + } } } @@ -269,7 +357,7 @@ func CancelPreviousJobsByJobConcurrency(ctx context.Context, job *ActionRunJob) if job.ConcurrencyCancel { statusFindOption = append(statusFindOption, StatusRunning) } - runs, jobs, err := GetConcurrentRunsAndJobs(ctx, job.RepoID, job.ConcurrencyGroup, statusFindOption) + attempts, jobs, err := GetConcurrentRunAttemptsAndJobs(ctx, job.RepoID, job.ConcurrencyGroup, statusFindOption) if err != nil { return nil, fmt.Errorf("find concurrent runs and jobs: %w", err) } @@ -277,12 +365,13 @@ func CancelPreviousJobsByJobConcurrency(ctx context.Context, job *ActionRunJob) jobsToCancel = append(jobsToCancel, jobs...) // cancel runs in the same concurrency group - for _, run := range runs { - jobs, err := db.Find[ActionRunJob](ctx, FindRunJobOptions{ - RunID: run.ID, - }) + for _, attempt := range attempts { + if attempt.ID == job.RunAttemptID { + continue + } + jobs, err := GetRunJobsByRunAndAttemptID(ctx, attempt.RunID, attempt.ID) if err != nil { - return nil, fmt.Errorf("find run %d jobs: %w", run.ID, err) + return nil, fmt.Errorf("find run %d attempt %d jobs: %w", attempt.RunID, attempt.ID, err) } jobsToCancel = append(jobsToCancel, jobs...) } diff --git a/models/actions/run_job_list.go b/models/actions/run_job_list.go index 10f76d3641..e06b6beb9e 100644 --- a/models/actions/run_job_list.go +++ b/models/actions/run_job_list.go @@ -9,6 +9,7 @@ import ( "code.gitea.io/gitea/models/db" repo_model "code.gitea.io/gitea/models/repo" "code.gitea.io/gitea/modules/container" + "code.gitea.io/gitea/modules/optional" "code.gitea.io/gitea/modules/timeutil" "xorm.io/builder" @@ -70,6 +71,7 @@ func (jobs ActionJobList) LoadAttributes(ctx context.Context, withRepo bool) err type FindRunJobOptions struct { db.ListOptions RunID int64 + RunAttemptID optional.Option[int64] // use optional to allow filtering by zero (legacy jobs have run_attempt_id=0) RepoID int64 OwnerID int64 CommitSHA string @@ -83,6 +85,9 @@ func (opts FindRunJobOptions) ToConds() builder.Cond { if opts.RunID > 0 { cond = cond.And(builder.Eq{"`action_run_job`.run_id": opts.RunID}) } + if opts.RunAttemptID.Has() { + cond = cond.And(builder.Eq{"`action_run_job`.run_attempt_id": opts.RunAttemptID.Value()}) + } if opts.RepoID > 0 { cond = cond.And(builder.Eq{"`action_run_job`.repo_id": opts.RepoID}) } diff --git a/models/actions/run_list.go b/models/actions/run_list.go index 8b8c132a48..82dc97f3e5 100644 --- a/models/actions/run_list.go +++ b/models/actions/run_list.go @@ -83,12 +83,6 @@ func (opts FindRunOptions) ToConds() builder.Cond { if opts.CommitSHA != "" { cond = cond.And(builder.Eq{"`action_run`.commit_sha": opts.CommitSHA}) } - if len(opts.ConcurrencyGroup) > 0 { - if opts.RepoID == 0 { - panic("Invalid FindRunOptions: repo_id is required") - } - cond = cond.And(builder.Eq{"`action_run`.concurrency_group": opts.ConcurrencyGroup}) - } return cond } diff --git a/models/actions/task.go b/models/actions/task.go index 28928c2bc6..016f91a7bb 100644 --- a/models/actions/task.go +++ b/models/actions/task.go @@ -272,7 +272,6 @@ func CreateTaskForRunner(ctx context.Context, runner *ActionRunner) (*ActionTask } now := timeutil.TimeStampNow() - job.Attempt++ job.Started = now job.Status = StatusRunning diff --git a/models/migrations/migrations.go b/models/migrations/migrations.go index db74ff78d5..c3a8f08b5d 100644 --- a/models/migrations/migrations.go +++ b/models/migrations/migrations.go @@ -26,6 +26,7 @@ import ( "code.gitea.io/gitea/models/migrations/v1_24" "code.gitea.io/gitea/models/migrations/v1_25" "code.gitea.io/gitea/models/migrations/v1_26" + "code.gitea.io/gitea/models/migrations/v1_27" "code.gitea.io/gitea/models/migrations/v1_6" "code.gitea.io/gitea/models/migrations/v1_7" "code.gitea.io/gitea/models/migrations/v1_8" @@ -405,6 +406,9 @@ func prepareMigrationTasks() []*migration { newMigration(328, "Add TokenPermissions column to ActionRunJob", v1_26.AddTokenPermissionsToActionRunJob), newMigration(329, "Add unique constraint for user badge", v1_26.AddUniqueIndexForUserBadge), newMigration(330, "Add name column to webhook", v1_26.AddNameToWebhook), + // Gitea 1.26.0 ends at migration ID number 330 (database version 331) + + newMigration(331, "Add ActionRunAttempt model and related action fields", v1_27.AddActionRunAttemptModel), } return preparedMigrations } diff --git a/models/migrations/v1_27/main_test.go b/models/migrations/v1_27/main_test.go new file mode 100644 index 0000000000..e269e3df9a --- /dev/null +++ b/models/migrations/v1_27/main_test.go @@ -0,0 +1,14 @@ +// Copyright 2026 The Gitea Authors. All rights reserved. +// SPDX-License-Identifier: MIT + +package v1_27 + +import ( + "testing" + + "code.gitea.io/gitea/models/migrations/base" +) + +func TestMain(m *testing.M) { + base.MainTest(m) +} diff --git a/models/migrations/v1_27/v331.go b/models/migrations/v1_27/v331.go new file mode 100644 index 0000000000..204b7b661e --- /dev/null +++ b/models/migrations/v1_27/v331.go @@ -0,0 +1,158 @@ +// Copyright 2026 The Gitea Authors. All rights reserved. +// SPDX-License-Identifier: MIT + +package v1_27 + +import ( + "context" + "time" + + "code.gitea.io/gitea/models/migrations/base" + "code.gitea.io/gitea/modules/timeutil" + + "xorm.io/xorm" +) + +type actionRunAttempt struct { + ID int64 + RepoID int64 `xorm:"index(repo_concurrency_status)"` + RunID int64 `xorm:"UNIQUE(run_attempt)"` + Attempt int64 `xorm:"UNIQUE(run_attempt)"` + TriggerUserID int64 + ConcurrencyGroup string `xorm:"index(repo_concurrency_status) NOT NULL DEFAULT ''"` + ConcurrencyCancel bool `xorm:"NOT NULL DEFAULT FALSE"` + Status int `xorm:"index(repo_concurrency_status)"` + Started timeutil.TimeStamp + Stopped timeutil.TimeStamp + Created timeutil.TimeStamp `xorm:"created"` + Updated timeutil.TimeStamp `xorm:"updated"` +} + +func (actionRunAttempt) TableName() string { + return "action_run_attempt" +} + +type actionArtifact struct { + ID int64 `xorm:"pk autoincr"` + RunID int64 `xorm:"index unique(runid_attempt_name_path)"` + RunAttemptID int64 `xorm:"index unique(runid_attempt_name_path) NOT NULL DEFAULT 0"` + RunnerID int64 + RepoID int64 `xorm:"index"` + OwnerID int64 + CommitSHA string + StoragePath string + FileSize int64 + FileCompressedSize int64 + ContentEncoding string `xorm:"content_encoding"` + ArtifactPath string `xorm:"index unique(runid_attempt_name_path)"` + ArtifactName string `xorm:"index unique(runid_attempt_name_path)"` + Status int `xorm:"index"` + CreatedUnix timeutil.TimeStamp `xorm:"created"` + UpdatedUnix timeutil.TimeStamp `xorm:"updated index"` + ExpiredUnix timeutil.TimeStamp `xorm:"index"` +} + +func (actionArtifact) TableName() string { + return "action_artifact" +} + +// actionRun mirrors the post-migration action_run schema. +type actionRun struct { + ID int64 + Title string + RepoID int64 `xorm:"unique(repo_index)"` + OwnerID int64 `xorm:"index"` + WorkflowID string `xorm:"index"` + Index int64 `xorm:"index unique(repo_index)"` + TriggerUserID int64 `xorm:"index"` + ScheduleID int64 + Ref string `xorm:"index"` + CommitSHA string + IsForkPullRequest bool + NeedApproval bool + ApprovedBy int64 `xorm:"index"` + Event string + EventPayload string `xorm:"LONGTEXT"` + TriggerEvent string + Status int `xorm:"index"` + Version int `xorm:"version default 0"` + RawConcurrency string + Started timeutil.TimeStamp + Stopped timeutil.TimeStamp + PreviousDuration time.Duration + LatestAttemptID int64 `xorm:"index NOT NULL DEFAULT 0"` + Created timeutil.TimeStamp `xorm:"created"` + Updated timeutil.TimeStamp `xorm:"updated"` +} + +func (actionRun) TableName() string { + return "action_run" +} + +// AddActionRunAttemptModel adds the ActionRunAttempt table and the supporting ActionRun/ActionRunJob fields. +func AddActionRunAttemptModel(x *xorm.Engine) error { + // add "action_run_attempt" + if _, err := x.SyncWithOptions(xorm.SyncOptions{ + IgnoreDropIndices: true, + }, new(actionRunAttempt)); err != nil { + return err + } + + // update "action_run_job" + type ActionRunJob struct { + RunAttemptID int64 `xorm:"index NOT NULL DEFAULT 0"` + AttemptJobID int64 `xorm:"index NOT NULL DEFAULT 0"` + SourceTaskID int64 `xorm:"NOT NULL DEFAULT 0"` + } + if _, err := x.SyncWithOptions(xorm.SyncOptions{ + IgnoreDropIndices: true, + }, new(ActionRunJob)); err != nil { + return err + } + + // update "action_artifact": let xorm sync add the new 4-column unique index (runid_attempt_name_path) and drop the old 3-column unique (runid_name_path) + if err := x.Sync(new(actionArtifact)); err != nil { + return err + } + + // update "action_run" + // + // This migration intentionally removes the legacy run-level concurrency columns after + // introducing attempt-level concurrency on action_run_attempt. + // + // Existing values from action_run.concurrency_group / action_run.concurrency_cancel are + // not backfilled into action_run_attempt: + // - the old fields are only meaningful while a run is actively participating in + // concurrency scheduling + // - for completed legacy runs, keeping or backfilling those values has no practical + // effect on future scheduling behavior + // - scanning and backfilling old runs would add significant migration cost for little value + // + // This means the schema change is destructive for those two legacy columns by design. + // + // Let xorm sync add the latest_attempt_id column and drop the now-orphan (repo_id, concurrency_group) index. + if err := x.Sync(new(actionRun)); err != nil { + return err + } + concurrencyColumns := make([]string, 0, 2) + for _, col := range []string{"concurrency_group", "concurrency_cancel"} { + exist, err := x.Dialect().IsColumnExist(x.DB(), context.Background(), "action_run", col) + if err != nil { + return err + } + if exist { + concurrencyColumns = append(concurrencyColumns, col) + } + } + if len(concurrencyColumns) == 0 { + return nil + } + sess := x.NewSession() + defer sess.Close() + if err := base.DropTableColumns(sess, "action_run", concurrencyColumns...); err != nil { + return err + } + // DropTableColumns rebuilds the table on SQLite, which drops all existing indexes. + // Re-sync to restore the indexes defined on actionRun. + return x.Sync(new(actionRun)) +} diff --git a/models/migrations/v1_27/v331_test.go b/models/migrations/v1_27/v331_test.go new file mode 100644 index 0000000000..45f467cf9b --- /dev/null +++ b/models/migrations/v1_27/v331_test.go @@ -0,0 +1,156 @@ +// Copyright 2026 The Gitea Authors. All rights reserved. +// SPDX-License-Identifier: MIT + +package v1_27 + +import ( + "context" + "slices" + "testing" + + "code.gitea.io/gitea/models/migrations/base" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + "xorm.io/xorm/schemas" +) + +type actionRunBeforeV331 struct { + ID int64 `xorm:"pk autoincr"` + ConcurrencyGroup string + ConcurrencyCancel bool + LatestAttemptID int64 `xorm:"-"` +} + +func (actionRunBeforeV331) TableName() string { + return "action_run" +} + +type actionRunJobBeforeV331 struct { + ID int64 `xorm:"pk autoincr"` + RunID int64 `xorm:"index"` + RepoID int64 `xorm:"index"` +} + +func (actionRunJobBeforeV331) TableName() string { + return "action_run_job" +} + +type actionArtifactBeforeV331 struct { + ID int64 `xorm:"pk autoincr"` + RunID int64 `xorm:"index unique(runid_name_path)"` + RepoID int64 `xorm:"index"` + ArtifactPath string `xorm:"index unique(runid_name_path)"` + ArtifactName string `xorm:"index unique(runid_name_path)"` +} + +func (actionArtifactBeforeV331) TableName() string { + return "action_artifact" +} + +func Test_AddActionRunAttemptModel(t *testing.T) { + x, deferable := base.PrepareTestEnv(t, 0, + new(actionRunBeforeV331), + new(actionRunJobBeforeV331), + new(actionArtifactBeforeV331), + ) + defer deferable() + if x == nil || t.Failed() { + return + } + + _, err := x.Insert(&actionArtifactBeforeV331{ + RunID: 1, + RepoID: 1, + ArtifactPath: "artifact/path", + ArtifactName: "artifact-name", + }) + require.NoError(t, err) + + require.NoError(t, AddActionRunAttemptModel(x)) + + tableMap := base.LoadTableSchemasMap(t, x) + + attemptTable := tableMap["action_run_attempt"] + require.NotNil(t, attemptTable) + attemptTablCols := []string{"id", "repo_id", "run_id", "attempt", "trigger_user_id", "status", "started", "stopped", "concurrency_group", "concurrency_cancel", "created", "updated"} + require.ElementsMatch(t, attemptTable.ColumnsSeq(), attemptTablCols) + + runTable := tableMap["action_run"] + require.NotNil(t, runTable) + require.Contains(t, runTable.ColumnsSeq(), "latest_attempt_id") + require.NotContains(t, runTable.ColumnsSeq(), "concurrency_group") + require.NotContains(t, runTable.ColumnsSeq(), "concurrency_cancel") + + jobTable := tableMap["action_run_job"] + require.NotNil(t, jobTable) + require.Contains(t, jobTable.ColumnsSeq(), "run_attempt_id") + require.Contains(t, jobTable.ColumnsSeq(), "attempt_job_id") + require.Contains(t, jobTable.ColumnsSeq(), "source_task_id") + + attemptIndexes, err := x.Dialect().GetIndexes(x.DB(), context.Background(), "action_run_attempt") + require.NoError(t, err) + assert.True(t, hasIndexWithColumns(attemptIndexes, []string{"run_id", "attempt"}, true)) + assert.True(t, hasIndexWithColumns(attemptIndexes, []string{"repo_id", "concurrency_group", "status"}, false)) + + runIndexes, err := x.Dialect().GetIndexes(x.DB(), context.Background(), "action_run") + require.NoError(t, err) + assert.True(t, hasIndexWithColumns(runIndexes, []string{"latest_attempt_id"}, false)) + assert.False(t, hasIndexWithColumns(runIndexes, []string{"repo_id", "concurrency_group"}, false)) + + jobIndexes, err := x.Dialect().GetIndexes(x.DB(), context.Background(), "action_run_job") + require.NoError(t, err) + assert.True(t, hasIndexWithColumns(jobIndexes, []string{"run_attempt_id"}, false)) + assert.True(t, hasIndexWithColumns(jobIndexes, []string{"attempt_job_id"}, false)) + + indexes, err := x.Dialect().GetIndexes(x.DB(), context.Background(), "action_artifact") + require.NoError(t, err) + assert.False(t, hasIndexWithColumns(indexes, []string{"run_id", "artifact_path", "artifact_name"}, true)) + assert.True(t, hasIndexWithColumns(indexes, []string{"run_id", "run_attempt_id", "artifact_path", "artifact_name"}, true)) + + _, err = x.Insert(&actionArtifact{ + RunID: 1, + RunAttemptID: 2, + RepoID: 1, + ArtifactPath: "artifact/path", + ArtifactName: "artifact-name", + }) + require.NoError(t, err) + _, err = x.Insert(&actionArtifact{ + RunID: 1, + RunAttemptID: 2, + RepoID: 1, + ArtifactPath: "artifact/path", + ArtifactName: "artifact-name", + }) + require.Error(t, err) + + _, err = x.Insert(&actionRunAttempt{ + RepoID: 1, + RunID: 1, + Attempt: 2, + TriggerUserID: 1, + Status: 1, + }) + require.NoError(t, err) + _, err = x.Insert(&actionRunAttempt{ + RepoID: 1, + RunID: 1, + Attempt: 2, + TriggerUserID: 2, + Status: 1, + }) + require.Error(t, err) +} + +func hasIndexWithColumns(indexes map[string]*schemas.Index, cols []string, isUnique bool) bool { + for _, index := range indexes { + if isUnique && index.Type != schemas.UniqueType { + continue + } + if slices.Equal(index.Cols, cols) { + return true + } + } + return false +} diff --git a/modules/dump/dumper.go b/modules/dump/dumper.go index 02829d6a1e..2f16070704 100644 --- a/modules/dump/dumper.go +++ b/modules/dump/dumper.go @@ -4,6 +4,7 @@ package dump import ( + "archive/zip" "context" "errors" "fmt" @@ -85,7 +86,7 @@ func NewDumper(ctx context.Context, format string, output io.Writer) (*Dumper, e var comp archives.ArchiverAsync switch format { case "zip": - comp = archives.Zip{} + comp = archives.Zip{Compression: zip.Deflate} case "tar": comp = archives.Tar{} case "tar.sz": diff --git a/modules/git/gitcmd/error.go b/modules/git/gitcmd/error.go index b674068c40..436f4e18ae 100644 --- a/modules/git/gitcmd/error.go +++ b/modules/git/gitcmd/error.go @@ -56,6 +56,14 @@ func StderrHasPrefix(err error, prefix string) bool { return strings.HasPrefix(stderr, prefix) } +func StderrContains(err error, sub string) bool { + stderr, ok := ErrorAsStderr(err) + if !ok { + return false + } + return strings.Contains(stderr, sub) +} + func IsErrorExitCode(err error, code int) bool { var exitError *exec.ExitError if errors.As(err, &exitError) { diff --git a/modules/markup/html_test.go b/modules/markup/html_test.go index e62747c724..4fa9466d19 100644 --- a/modules/markup/html_test.go +++ b/modules/markup/html_test.go @@ -317,7 +317,7 @@ func TestRender_email(t *testing.T) { func TestRender_emoji(t *testing.T) { setting.AppURL = markup.TestAppURL - setting.StaticURLPrefix = markup.TestAppURL + setting.StaticURLPrefix = strings.TrimSuffix(markup.TestAppURL, "/") test := func(input, expected string) { expected = strings.ReplaceAll(expected, "&", "&") @@ -500,7 +500,7 @@ func Test_ParseClusterFuzz(t *testing.T) { } func TestPostProcess(t *testing.T) { - setting.StaticURLPrefix = markup.TestAppURL // can't run standalone + setting.StaticURLPrefix = strings.TrimSuffix(markup.TestAppURL, "/") // can't run standalone defer testModule.MockVariableValue(&markup.RenderBehaviorForTesting.DisableAdditionalAttributes, true)() test := func(input, expected string) { diff --git a/modules/setting/actions.go b/modules/setting/actions.go index 7a91ecb593..0d1bdadc8e 100644 --- a/modules/setting/actions.go +++ b/modules/setting/actions.go @@ -12,6 +12,8 @@ import ( "code.gitea.io/gitea/modules/log" ) +const defaultMaxRerunAttempts = 50 + // Actions settings var ( Actions = struct { @@ -27,11 +29,13 @@ var ( AbandonedJobTimeout time.Duration `ini:"ABANDONED_JOB_TIMEOUT"` SkipWorkflowStrings []string `ini:"SKIP_WORKFLOW_STRINGS"` WorkflowDirs []string `ini:"WORKFLOW_DIRS"` + MaxRerunAttempts int64 `ini:"MAX_RERUN_ATTEMPTS"` }{ Enabled: true, DefaultActionsURL: defaultActionsURLGitHub, SkipWorkflowStrings: []string{"[skip ci]", "[ci skip]", "[no ci]", "[skip actions]", "[actions skip]"}, WorkflowDirs: []string{".gitea/workflows", ".github/workflows"}, + MaxRerunAttempts: defaultMaxRerunAttempts, } ) @@ -118,6 +122,10 @@ func loadActionsFrom(rootCfg ConfigProvider) error { Actions.EndlessTaskTimeout = sec.Key("ENDLESS_TASK_TIMEOUT").MustDuration(3 * time.Hour) Actions.AbandonedJobTimeout = sec.Key("ABANDONED_JOB_TIMEOUT").MustDuration(24 * time.Hour) + if Actions.MaxRerunAttempts <= 0 { + Actions.MaxRerunAttempts = defaultMaxRerunAttempts + } + if !Actions.LogCompression.IsValid() { return fmt.Errorf("invalid [actions] LOG_COMPRESSION: %q", Actions.LogCompression) } diff --git a/modules/setting/security.go b/modules/setting/security.go index 152bcffd9f..8b7664baba 100644 --- a/modules/setting/security.go +++ b/modules/setting/security.go @@ -16,9 +16,11 @@ import ( // Security settings var Security = struct { // TODO: move more settings to this struct in future - XFrameOptions string + XFrameOptions string + XContentTypeOptions string }{ - XFrameOptions: "SAMEORIGIN", + XFrameOptions: "SAMEORIGIN", + XContentTypeOptions: "nosniff", } var ( @@ -154,6 +156,8 @@ func loadSecurityFrom(rootCfg ConfigProvider) { Security.XFrameOptions = rootCfg.Section("cors").Key("X_FRAME_OPTIONS").MustString(Security.XFrameOptions) } + Security.XContentTypeOptions = sec.Key("X_CONTENT_TYPE_OPTIONS").MustString(Security.XContentTypeOptions) + twoFactorAuth := sec.Key("TWO_FACTOR_AUTH").String() switch twoFactorAuth { case "": diff --git a/modules/setting/server.go b/modules/setting/server.go index 1085e052a3..dc58e43c43 100644 --- a/modules/setting/server.go +++ b/modules/setting/server.go @@ -4,7 +4,6 @@ package setting import ( - "encoding/base64" "net" "net/url" "os" @@ -13,7 +12,6 @@ import ( "strings" "time" - "code.gitea.io/gitea/modules/json" "code.gitea.io/gitea/modules/log" ) @@ -112,72 +110,9 @@ var ( StartupTimeout time.Duration PerWriteTimeout = 30 * time.Second PerWritePerKbTimeout = 10 * time.Second - StaticURLPrefix string - AbsoluteAssetURL string - - ManifestData string + StaticURLPrefix string // no trailing slash, defaults to AppSubURL, the URL can be relative or absolute ) -// MakeManifestData generates web app manifest JSON -func MakeManifestData(appName, appURL, absoluteAssetURL string) []byte { - type manifestIcon struct { - Src string `json:"src"` - Type string `json:"type"` - Sizes string `json:"sizes"` - } - - type manifestJSON struct { - Name string `json:"name"` - ShortName string `json:"short_name"` - StartURL string `json:"start_url"` - Icons []manifestIcon `json:"icons"` - } - - bytes, err := json.Marshal(&manifestJSON{ - Name: appName, - ShortName: appName, - StartURL: appURL, - Icons: []manifestIcon{ - { - Src: absoluteAssetURL + "/assets/img/logo.png", - Type: "image/png", - Sizes: "512x512", - }, - { - Src: absoluteAssetURL + "/assets/img/logo.svg", - Type: "image/svg+xml", - Sizes: "512x512", - }, - }, - }) - if err != nil { - log.Error("unable to marshal manifest JSON. Error: %v", err) - return make([]byte, 0) - } - - return bytes -} - -// MakeAbsoluteAssetURL returns the absolute asset url prefix without a trailing slash -func MakeAbsoluteAssetURL(appURL *url.URL, staticURLPrefix string) string { - parsedPrefix, err := url.Parse(strings.TrimSuffix(staticURLPrefix, "/")) - if err != nil { - log.Fatal("Unable to parse STATIC_URL_PREFIX: %v", err) - } - - if err == nil && parsedPrefix.Hostname() == "" { - if staticURLPrefix == "" { - return strings.TrimSuffix(appURL.String(), "/") - } - - // StaticURLPrefix is just a path - appHostURL := &url.URL{Scheme: appURL.Scheme, Host: appURL.Host} - return appHostURL.String() + "/" + strings.Trim(staticURLPrefix, "/") - } - - return strings.TrimSuffix(staticURLPrefix, "/") -} - func loadServerFrom(rootCfg ConfigProvider) { sec := rootCfg.Section("server") AppName = rootCfg.Section("").Key("APP_NAME").MustString("Gitea: Git with a cup of tea") @@ -313,10 +248,6 @@ func loadServerFrom(rootCfg ConfigProvider) { Domain = urlHostname } - AbsoluteAssetURL = MakeAbsoluteAssetURL(appURL, StaticURLPrefix) - manifestBytes := MakeManifestData(AppName, AppURL, AbsoluteAssetURL) - ManifestData = `application/json;base64,` + base64.StdEncoding.EncodeToString(manifestBytes) - var defaultLocalURL string switch Protocol { case HTTPUnix: diff --git a/modules/setting/setting_test.go b/modules/setting/setting_test.go deleted file mode 100644 index 13575f52a6..0000000000 --- a/modules/setting/setting_test.go +++ /dev/null @@ -1,41 +0,0 @@ -// Copyright 2020 The Gitea Authors. All rights reserved. -// SPDX-License-Identifier: MIT - -package setting - -import ( - "net/url" - "testing" - - "code.gitea.io/gitea/modules/json" - - "github.com/stretchr/testify/assert" -) - -func TestMakeAbsoluteAssetURL(t *testing.T) { - appURL1, _ := url.Parse("https://localhost:1234") - appURL2, _ := url.Parse("https://localhost:1234/") - appURLSub1, _ := url.Parse("https://localhost:1234/foo") - appURLSub2, _ := url.Parse("https://localhost:1234/foo/") - - // static URL is an absolute URL, so should be used - assert.Equal(t, "https://localhost:2345", MakeAbsoluteAssetURL(appURL1, "https://localhost:2345")) - assert.Equal(t, "https://localhost:2345", MakeAbsoluteAssetURL(appURL1, "https://localhost:2345/")) - - assert.Equal(t, "https://localhost:1234/foo", MakeAbsoluteAssetURL(appURL1, "/foo")) - assert.Equal(t, "https://localhost:1234/foo", MakeAbsoluteAssetURL(appURL2, "/foo")) - assert.Equal(t, "https://localhost:1234/foo", MakeAbsoluteAssetURL(appURL1, "/foo/")) - - assert.Equal(t, "https://localhost:1234/foo", MakeAbsoluteAssetURL(appURLSub1, "/foo")) - assert.Equal(t, "https://localhost:1234/foo", MakeAbsoluteAssetURL(appURLSub2, "/foo")) - assert.Equal(t, "https://localhost:1234/foo", MakeAbsoluteAssetURL(appURLSub1, "/foo/")) - - assert.Equal(t, "https://localhost:1234/bar", MakeAbsoluteAssetURL(appURLSub1, "/bar")) - assert.Equal(t, "https://localhost:1234/bar", MakeAbsoluteAssetURL(appURLSub2, "/bar")) - assert.Equal(t, "https://localhost:1234/bar", MakeAbsoluteAssetURL(appURLSub1, "/bar/")) -} - -func TestMakeManifestData(t *testing.T) { - jsonBytes := MakeManifestData(`Example App '\"`, "https://example.com", "https://example.com/foo/bar") - assert.True(t, json.Valid(jsonBytes)) -} diff --git a/modules/structs/repo_actions.go b/modules/structs/repo_actions.go index 92ca9bccce..4592c18ed6 100644 --- a/modules/structs/repo_actions.go +++ b/modules/structs/repo_actions.go @@ -105,12 +105,18 @@ type ActionArtifact struct { // ActionWorkflowRun represents a WorkflowRun type ActionWorkflowRun struct { - ID int64 `json:"id"` - URL string `json:"url"` - HTMLURL string `json:"html_url"` - DisplayTitle string `json:"display_title"` - Path string `json:"path"` - Event string `json:"event"` + ID int64 `json:"id"` + URL string `json:"url"` + // PreviousAttemptURL is the API URL of the previous attempt of this run, e.g. ".../actions/runs/{run_id}/attempts/{attempt-1}". + // It is set only when the current attempt is > 1 (i.e. a rerun). For the first attempt, or for legacy runs that pre-date ActionRunAttempt, it is null. + PreviousAttemptURL *string `json:"previous_attempt_url"` + HTMLURL string `json:"html_url"` + DisplayTitle string `json:"display_title"` + Path string `json:"path"` + Event string `json:"event"` + // RunAttempt is 1-based for runs created after ActionRunAttempt was introduced. + // A value of 0 is a legacy-only sentinel for runs created before attempts existed + // and indicates no corresponding /attempts/{n} resource is available. RunAttempt int64 `json:"run_attempt"` RunNumber int64 `json:"run_number"` RepositoryID int64 `json:"repository_id,omitempty"` diff --git a/options/locale/locale_en-US.json b/options/locale/locale_en-US.json index f8828f95c6..6281ff8f54 100644 --- a/options/locale/locale_en-US.json +++ b/options/locale/locale_en-US.json @@ -1850,6 +1850,7 @@ "repo.pulls.merge_manually": "Manually merged", "repo.pulls.merge_commit_id": "The merge commit ID", "repo.pulls.require_signed_wont_sign": "The branch requires signed commits but this merge will not be signed", + "repo.pulls.require_signed_head_commits_unverified": "The branch requires signed commits but one or more commits on this pull request are not verified", "repo.pulls.invalid_merge_option": "You cannot use this merge option for this pull request.", "repo.pulls.merge_conflict": "Merge Failed: There was a conflict while merging. Hint: Try a different strategy.", "repo.pulls.merge_conflict_summary": "Error Message", @@ -3771,9 +3772,11 @@ "actions.runs.delete.description": "Are you sure you want to permanently delete this workflow run? This action cannot be undone.", "actions.runs.not_done": "This workflow run is not done.", "actions.runs.view_workflow_file": "View workflow file", - "actions.runs.workflow_graph": "Workflow Graph", "actions.runs.summary": "Summary", "actions.runs.all_jobs": "All jobs", + "actions.runs.attempt": "Attempt", + "actions.runs.latest": "Latest", + "actions.runs.latest_attempt": "Latest attempt", "actions.runs.triggered_via": "Triggered via %s", "actions.runs.total_duration": "Total duration:", "actions.workflow.disable": "Disable Workflow", diff --git a/routers/api/actions/artifacts.go b/routers/api/actions/artifacts.go index 13cbecb5cd..838ddb7f91 100644 --- a/routers/api/actions/artifacts.go +++ b/routers/api/actions/artifacts.go @@ -74,6 +74,7 @@ import ( "code.gitea.io/gitea/modules/httplib" "code.gitea.io/gitea/modules/json" "code.gitea.io/gitea/modules/log" + "code.gitea.io/gitea/modules/optional" "code.gitea.io/gitea/modules/setting" "code.gitea.io/gitea/modules/storage" "code.gitea.io/gitea/modules/util" @@ -310,7 +311,7 @@ func (ar artifactRoutes) confirmUploadArtifact(ctx *ArtifactContext) { ctx.HTTPError(http.StatusBadRequest, "Error artifact name is empty") return } - if err := mergeChunksForRun(ctx, ar.fs, runID, artifactName); err != nil { + if err := mergeChunksForRun(ctx, ar.fs, runID, ctx.ActionTask.Job.RunAttemptID, artifactName); err != nil { log.Error("Error merge chunks: %v", err) ctx.HTTPError(http.StatusInternalServerError, "Error merge chunks") return @@ -338,8 +339,9 @@ func (ar artifactRoutes) listArtifacts(ctx *ArtifactContext) { } artifacts, err := db.Find[actions.ActionArtifact](ctx, actions.FindArtifactsOptions{ - RunID: runID, - Status: int(actions.ArtifactStatusUploadConfirmed), + RunID: runID, + RunAttemptID: optional.Some(ctx.ActionTask.Job.RunAttemptID), + Status: int(actions.ArtifactStatusUploadConfirmed), }) if err != nil { log.Error("Error getting artifacts: %v", err) @@ -404,6 +406,7 @@ func (ar artifactRoutes) getDownloadArtifactURL(ctx *ArtifactContext) { artifacts, err := db.Find[actions.ActionArtifact](ctx, actions.FindArtifactsOptions{ RunID: runID, + RunAttemptID: optional.Some(ctx.ActionTask.Job.RunAttemptID), ArtifactName: itemPath, Status: int(actions.ArtifactStatusUploadConfirmed), }) @@ -477,6 +480,11 @@ func (ar artifactRoutes) downloadArtifact(ctx *ArtifactContext) { ctx.HTTPError(http.StatusBadRequest) return } + if ctx.ActionTask.Job.RunAttemptID > 0 && artifact.RunAttemptID != ctx.ActionTask.Job.RunAttemptID { + log.Error("Error mismatch runAttemptID and artifactID, task: %v, artifact: %v", ctx.ActionTask.Job.RunAttemptID, artifactID) + ctx.HTTPError(http.StatusBadRequest) + return + } if artifact.Status != actions.ArtifactStatusUploadConfirmed { log.Error("Error artifact not found: %s", artifact.Status.ToString()) ctx.HTTPError(http.StatusNotFound, "Error artifact not found") diff --git a/routers/api/actions/artifacts_chunks.go b/routers/api/actions/artifacts_chunks.go index 8d04c68922..6f84f7a5cf 100644 --- a/routers/api/actions/artifacts_chunks.go +++ b/routers/api/actions/artifacts_chunks.go @@ -20,6 +20,7 @@ import ( "code.gitea.io/gitea/models/actions" "code.gitea.io/gitea/models/db" "code.gitea.io/gitea/modules/log" + "code.gitea.io/gitea/modules/optional" "code.gitea.io/gitea/modules/setting" "code.gitea.io/gitea/modules/storage" ) @@ -257,10 +258,11 @@ func listOrderedChunksForArtifact(st storage.ObjectStorage, runID, artifactID in return emptyListAsError(chunks) } -func mergeChunksForRun(ctx *ArtifactContext, st storage.ObjectStorage, runID int64, artifactName string) error { +func mergeChunksForRun(ctx *ArtifactContext, st storage.ObjectStorage, runID, runAttemptID int64, artifactName string) error { // read all db artifacts by name artifacts, err := db.Find[actions.ActionArtifact](ctx, actions.FindArtifactsOptions{ RunID: runID, + RunAttemptID: optional.Some(runAttemptID), ArtifactName: artifactName, }) if err != nil { diff --git a/routers/api/actions/artifactsv4.go b/routers/api/actions/artifactsv4.go index e86645cb0c..8bd3fb7e2b 100644 --- a/routers/api/actions/artifactsv4.go +++ b/routers/api/actions/artifactsv4.go @@ -107,6 +107,7 @@ import ( "code.gitea.io/gitea/modules/actions" "code.gitea.io/gitea/modules/httplib" "code.gitea.io/gitea/modules/log" + "code.gitea.io/gitea/modules/optional" "code.gitea.io/gitea/modules/setting" "code.gitea.io/gitea/modules/storage" "code.gitea.io/gitea/modules/util" @@ -266,9 +267,9 @@ func (r *artifactV4Routes) verifySignature(ctx *ArtifactContext, endp string) (* return task, artifactName, true } -func (r *artifactV4Routes) getArtifactByName(ctx *ArtifactContext, runID int64, name string) (*actions_model.ActionArtifact, error) { +func (r *artifactV4Routes) getArtifactByName(ctx *ArtifactContext, runID, runAttemptID int64, name string) (*actions_model.ActionArtifact, error) { var art actions_model.ActionArtifact - has, err := db.GetEngine(ctx).Where(builder.Eq{"run_id": runID, "artifact_name": name}, builder.Like{"content_encoding", "%/%"}).Get(&art) + has, err := db.GetEngine(ctx).Where(builder.Eq{"run_id": runID, "run_attempt_id": runAttemptID, "artifact_name": name}, builder.Like{"content_encoding", "%/%"}).Get(&art) if err != nil { return nil, err } else if !has { @@ -388,7 +389,7 @@ func (r *artifactV4Routes) uploadArtifact(ctx *ArtifactContext) { switch comp { case "block", "appendBlock": // get artifact by name - artifact, err := r.getArtifactByName(ctx, task.Job.RunID, artifactName) + artifact, err := r.getArtifactByName(ctx, task.Job.RunID, task.Job.RunAttemptID, artifactName) if err != nil { log.Error("Error artifact not found: %v", err) ctx.HTTPError(http.StatusNotFound, "Error artifact not found") @@ -475,7 +476,7 @@ func (r *artifactV4Routes) finalizeArtifact(ctx *ArtifactContext) { } // get artifact by name - artifact, err := r.getArtifactByName(ctx, runID, req.Name) + artifact, err := r.getArtifactByName(ctx, runID, ctx.ActionTask.Job.RunAttemptID, req.Name) if err != nil { log.Error("Error artifact not found: %v", err) ctx.HTTPError(http.StatusNotFound, "Error artifact not found") @@ -589,6 +590,7 @@ func (r *artifactV4Routes) listArtifacts(ctx *ArtifactContext) { artifacts, err := db.Find[actions_model.ActionArtifact](ctx, actions_model.FindArtifactsOptions{ RunID: runID, + RunAttemptID: optional.Some(ctx.ActionTask.Job.RunAttemptID), Status: int(actions_model.ArtifactStatusUploadConfirmed), FinalizedArtifactsV4: true, }) @@ -642,7 +644,7 @@ func (r *artifactV4Routes) getSignedArtifactURL(ctx *ArtifactContext) { artifactName := req.Name // get artifact by name - artifact, err := r.getArtifactByName(ctx, runID, artifactName) + artifact, err := r.getArtifactByName(ctx, runID, ctx.ActionTask.Job.RunAttemptID, artifactName) if err != nil { log.Error("Error artifact not found: %v", err) ctx.HTTPError(http.StatusNotFound, "Error artifact not found") @@ -676,7 +678,7 @@ func (r *artifactV4Routes) downloadArtifact(ctx *ArtifactContext) { } // get artifact by name - artifact, err := r.getArtifactByName(ctx, task.Job.RunID, artifactName) + artifact, err := r.getArtifactByName(ctx, task.Job.RunID, task.Job.RunAttemptID, artifactName) if err != nil { log.Error("Error artifact not found: %v", err) ctx.HTTPError(http.StatusNotFound, "Error artifact not found") @@ -707,14 +709,14 @@ func (r *artifactV4Routes) deleteArtifact(ctx *ArtifactContext) { } // get artifact by name - artifact, err := r.getArtifactByName(ctx, runID, req.Name) + artifact, err := r.getArtifactByName(ctx, runID, ctx.ActionTask.Job.RunAttemptID, req.Name) if err != nil { log.Error("Error artifact not found: %v", err) ctx.HTTPError(http.StatusNotFound, "Error artifact not found") return } - err = actions_model.SetArtifactNeedDelete(ctx, runID, req.Name) + err = actions_model.SetArtifactNeedDeleteByRunAttempt(ctx, runID, ctx.ActionTask.Job.RunAttemptID, req.Name) if err != nil { log.Error("Error deleting artifacts: %v", err) ctx.HTTPError(http.StatusInternalServerError, err.Error()) diff --git a/routers/api/actions/runner/runner.go b/routers/api/actions/runner/runner.go index 886595be71..eee39760ed 100644 --- a/routers/api/actions/runner/runner.go +++ b/routers/api/actions/runner/runner.go @@ -15,7 +15,6 @@ import ( "code.gitea.io/gitea/modules/log" "code.gitea.io/gitea/modules/util" actions_service "code.gitea.io/gitea/services/actions" - notify_service "code.gitea.io/gitea/services/notify" runnerv1 "code.gitea.io/actions-proto-go/runner/v1" "code.gitea.io/actions-proto-go/runner/v1/runnerv1connect" @@ -224,7 +223,7 @@ func (s *Service) UpdateTask( actions_service.CreateCommitStatusForRunJobs(ctx, task.Job.Run, task.Job) if task.Status.IsDone() { - notify_service.WorkflowJobStatusUpdate(ctx, task.Job.Run.Repo, task.Job.Run.TriggerUser, task.Job, task) + actions_service.NotifyWorkflowJobStatusUpdateWithTask(ctx, task.Job, task) } if req.Msg.State.Result != runnerv1.Result_RESULT_UNSPECIFIED { @@ -232,7 +231,7 @@ func (s *Service) UpdateTask( log.Error("Emit ready jobs of run %d: %v", task.Job.RunID, err) } if task.Job.Run.Status.IsDone() { - actions_service.NotifyWorkflowRunStatusUpdateWithReload(ctx, task.Job) + actions_service.NotifyWorkflowRunStatusUpdateWithReload(ctx, task.Job.RepoID, task.Job.RunID) } } diff --git a/routers/api/v1/admin/action.go b/routers/api/v1/admin/action.go index 2fbb8e1a95..62e0c6addc 100644 --- a/routers/api/v1/admin/action.go +++ b/routers/api/v1/admin/action.go @@ -37,7 +37,7 @@ func ListWorkflowJobs(ctx *context.APIContext) { // "404": // "$ref": "#/responses/notFound" - shared.ListJobs(ctx, 0, 0, 0) + shared.ListJobs(ctx, 0, 0, 0, nil) } // ListWorkflowRuns Lists all runs diff --git a/routers/api/v1/api.go b/routers/api/v1/api.go index 2d80692fef..e13bbedd29 100644 --- a/routers/api/v1/api.go +++ b/routers/api/v1/api.go @@ -865,7 +865,6 @@ func checkDeprecatedAuthMethods(ctx *context.APIContext) { func Routes() *web.Router { m := web.NewRouter() - m.BeforeRouting(securityHeaders()) if setting.CORSConfig.Enabled { m.BeforeRouting(cors.Handler(cors.Options{ AllowedOrigins: setting.CORSConfig.AllowDomain, @@ -1255,6 +1254,10 @@ func Routes() *web.Router { m.Group("/runs", func() { m.Group("/{run}", func() { m.Get("", repo.GetWorkflowRun) + m.Group("/attempts/{attempt}", func() { + m.Get("", repo.GetWorkflowRunAttempt) + m.Get("/jobs", repo.ListWorkflowRunAttemptJobs) + }) m.Delete("", reqToken(), reqRepoWriter(unit.TypeActions), repo.DeleteActionRun) m.Post("/rerun", reqToken(), reqRepoWriter(unit.TypeActions), repo.RerunWorkflowRun) m.Post("/rerun-failed-jobs", reqToken(), reqRepoWriter(unit.TypeActions), repo.RerunFailedWorkflowRun) @@ -1745,14 +1748,3 @@ func Routes() *web.Router { return m } - -func securityHeaders() func(http.Handler) http.Handler { - return func(next http.Handler) http.Handler { - return http.HandlerFunc(func(resp http.ResponseWriter, req *http.Request) { - // CORB: https://www.chromium.org/Home/chromium-security/corb-for-developers - // http://stackoverflow.com/a/3146618/244009 - resp.Header().Set("x-content-type-options", "nosniff") - next.ServeHTTP(resp, req) - }) - } -} diff --git a/routers/api/v1/org/action.go b/routers/api/v1/org/action.go index 01b57b3fac..d218c19fd4 100644 --- a/routers/api/v1/org/action.go +++ b/routers/api/v1/org/action.go @@ -624,7 +624,7 @@ func (Action) ListWorkflowJobs(ctx *context.APIContext) { // "$ref": "#/responses/error" // "404": // "$ref": "#/responses/notFound" - shared.ListJobs(ctx, ctx.Org.Organization.ID, 0, 0) + shared.ListJobs(ctx, ctx.Org.Organization.ID, 0, 0, nil) } func (Action) ListWorkflowRuns(ctx *context.APIContext) { diff --git a/routers/api/v1/repo/action.go b/routers/api/v1/repo/action.go index 7ac8a10575..8a0be250da 100644 --- a/routers/api/v1/repo/action.go +++ b/routers/api/v1/repo/action.go @@ -23,6 +23,7 @@ import ( secret_model "code.gitea.io/gitea/models/secret" "code.gitea.io/gitea/modules/actions" "code.gitea.io/gitea/modules/httplib" + "code.gitea.io/gitea/modules/optional" "code.gitea.io/gitea/modules/setting" api "code.gitea.io/gitea/modules/structs" "code.gitea.io/gitea/modules/util" @@ -676,7 +677,7 @@ func (Action) UpdateRunner(ctx *context.APIContext) { shared.UpdateRunner(ctx, 0, ctx.Repo.Repository.ID, ctx.PathParamInt64("runner_id")) } -// GetWorkflowRunJobs Lists all jobs for a workflow run. +// ListWorkflowJobs Lists all jobs for a repository. func (Action) ListWorkflowJobs(ctx *context.APIContext) { // swagger:operation GET /repos/{owner}/{repo}/actions/jobs repository listWorkflowJobs // --- @@ -717,7 +718,7 @@ func (Action) ListWorkflowJobs(ctx *context.APIContext) { repoID := ctx.Repo.Repository.ID - shared.ListJobs(ctx, 0, repoID, 0) + shared.ListJobs(ctx, 0, repoID, 0, nil) } // ListWorkflowRuns Lists all runs for a repository run. @@ -1163,7 +1164,7 @@ func getCurrentRepoActionRunJobsByID(ctx *context.APIContext) (*actions_model.Ac return nil, nil } - jobs, err := actions_model.GetRunJobsByRunID(ctx, run.ID) + jobs, err := actions_model.GetLatestAttemptJobsByRepoAndRunID(ctx, run.RepoID, run.ID) if err != nil { ctx.APIErrorInternal(err) return nil, nil @@ -1171,6 +1172,24 @@ func getCurrentRepoActionRunJobsByID(ctx *context.APIContext) (*actions_model.Ac return run, jobs } +func getCurrentRepoActionRunAttemptByNumber(ctx *context.APIContext) (*actions_model.ActionRun, *actions_model.ActionRunAttempt) { + run := getCurrentRepoActionRunByID(ctx) + if ctx.Written() { + return nil, nil + } + + attemptNum := ctx.PathParamInt64("attempt") + attempt, err := actions_model.GetRunAttemptByRunIDAndAttemptNum(ctx, run.ID, attemptNum) + if errors.Is(err, util.ErrNotExist) { + ctx.APIErrorNotFound(err) + return nil, nil + } else if err != nil { + ctx.APIErrorInternal(err) + return nil, nil + } + return run, attempt +} + // GetWorkflowRun Gets a specific workflow run. func GetWorkflowRun(ctx *context.APIContext) { // swagger:operation GET /repos/{owner}/{repo}/actions/runs/{run} repository GetWorkflowRun @@ -1207,7 +1226,56 @@ func GetWorkflowRun(ctx *context.APIContext) { return } - convertedRun, err := convert.ToActionWorkflowRun(ctx, ctx.Repo.Repository, run) + convertedRun, err := convert.ToActionWorkflowRun(ctx, ctx.Repo.Repository, run, nil) + if err != nil { + ctx.APIErrorInternal(err) + return + } + ctx.JSON(http.StatusOK, convertedRun) +} + +// GetWorkflowRunAttempt Gets a specific workflow run attempt. +func GetWorkflowRunAttempt(ctx *context.APIContext) { + // swagger:operation GET /repos/{owner}/{repo}/actions/runs/{run}/attempts/{attempt} repository getWorkflowRunAttempt + // --- + // summary: Gets a specific workflow run attempt + // produces: + // - application/json + // parameters: + // - name: owner + // in: path + // description: owner of the repo + // type: string + // required: true + // - name: repo + // in: path + // description: name of the repository + // type: string + // required: true + // - name: run + // in: path + // description: id of the run + // type: integer + // required: true + // - name: attempt + // in: path + // description: logical attempt number of the run + // type: integer + // required: true + // responses: + // "200": + // "$ref": "#/responses/WorkflowRun" + // "400": + // "$ref": "#/responses/error" + // "404": + // "$ref": "#/responses/notFound" + + run, attempt := getCurrentRepoActionRunAttemptByNumber(ctx) + if ctx.Written() { + return + } + + convertedRun, err := convert.ToActionWorkflowRun(ctx, ctx.Repo.Repository, run, attempt) if err != nil { ctx.APIErrorInternal(err) return @@ -1247,6 +1315,8 @@ func RerunWorkflowRun(ctx *context.APIContext) { // "$ref": "#/responses/forbidden" // "404": // "$ref": "#/responses/notFound" + // "409": + // "$ref": "#/responses/error" // "422": // "$ref": "#/responses/validationError" @@ -1255,12 +1325,12 @@ func RerunWorkflowRun(ctx *context.APIContext) { return } - if err := actions_service.RerunWorkflowRunJobs(ctx, ctx.Repo.Repository, run, jobs); err != nil { + if _, err := actions_service.RerunWorkflowRunJobs(ctx, ctx.Repo.Repository, run, ctx.Doer, jobs); err != nil { handleWorkflowRerunError(ctx, err) return } - convertedRun, err := convert.ToActionWorkflowRun(ctx, ctx.Repo.Repository, run) + convertedRun, err := convert.ToActionWorkflowRun(ctx, ctx.Repo.Repository, run, nil) if err != nil { ctx.APIErrorInternal(err) return @@ -1298,6 +1368,8 @@ func RerunFailedWorkflowRun(ctx *context.APIContext) { // "$ref": "#/responses/forbidden" // "404": // "$ref": "#/responses/notFound" + // "409": + // "$ref": "#/responses/error" // "422": // "$ref": "#/responses/validationError" @@ -1306,7 +1378,7 @@ func RerunFailedWorkflowRun(ctx *context.APIContext) { return } - if err := actions_service.RerunWorkflowRunJobs(ctx, ctx.Repo.Repository, run, actions_service.GetFailedRerunJobs(jobs)); err != nil { + if _, err := actions_service.RerunWorkflowRunJobs(ctx, ctx.Repo.Repository, run, ctx.Doer, actions_service.GetFailedJobsForRerun(jobs)); err != nil { handleWorkflowRerunError(ctx, err) return } @@ -1351,6 +1423,8 @@ func RerunWorkflowJob(ctx *context.APIContext) { // "$ref": "#/responses/forbidden" // "404": // "$ref": "#/responses/notFound" + // "409": + // "$ref": "#/responses/error" // "422": // "$ref": "#/responses/validationError" @@ -1367,12 +1441,28 @@ func RerunWorkflowJob(ctx *context.APIContext) { } targetJob := jobs[jobIdx] - if err := actions_service.RerunWorkflowRunJobs(ctx, ctx.Repo.Repository, run, actions_service.GetAllRerunJobs(targetJob, jobs)); err != nil { + newAttempt, err := actions_service.RerunWorkflowRunJobs(ctx, ctx.Repo.Repository, run, ctx.Doer, []*actions_model.ActionRunJob{targetJob}) + if err != nil { handleWorkflowRerunError(ctx, err) return } - convertedJob, err := convert.ToActionWorkflowJob(ctx, ctx.Repo.Repository, nil, targetJob) + // Legacy jobs had AttemptJobID=0 before the rerun; createOriginalAttemptForLegacyRun inside + // RerunWorkflowRunJobs has since backfilled it in the DB, so reload only in that case. + if targetJob.AttemptJobID == 0 { + targetJob, err = actions_model.GetRunJobByRepoAndID(ctx, run.RepoID, targetJob.ID) + if err != nil { + ctx.APIErrorInternal(err) + return + } + } + rerunJob, err := actions_model.GetRunJobByAttemptJobID(ctx, run.ID, newAttempt.ID, targetJob.AttemptJobID) + if err != nil { + handleWorkflowRerunError(ctx, err) + return + } + + convertedJob, err := convert.ToActionWorkflowJob(ctx, ctx.Repo.Repository, nil, rerunJob) if err != nil { ctx.APIErrorInternal(err) return @@ -1384,6 +1474,12 @@ func handleWorkflowRerunError(ctx *context.APIContext, err error) { if errors.Is(err, util.ErrInvalidArgument) { ctx.APIError(http.StatusBadRequest, err) return + } else if errors.Is(err, util.ErrAlreadyExist) { + ctx.APIError(http.StatusConflict, err) + return + } else if errors.Is(err, util.ErrNotExist) { + ctx.APIError(http.StatusNotFound, err) + return } ctx.APIErrorInternal(err) } @@ -1440,9 +1536,75 @@ func ListWorkflowRunJobs(ctx *context.APIContext) { return } + run, err := actions_model.GetRunByRepoAndID(ctx, repoID, runID) + if err != nil { + if errors.Is(err, util.ErrNotExist) { + ctx.APIErrorNotFound(err) + } else { + ctx.APIErrorInternal(err) + } + return + } // runID is used as an additional filter next to repoID to ensure that we only list jobs for the specified repoID and runID. // no additional checks for runID are needed here - shared.ListJobs(ctx, 0, repoID, runID) + shared.ListJobs(ctx, 0, repoID, runID, optional.Some(run.LatestAttemptID)) +} + +// ListWorkflowRunAttemptJobs Lists all jobs for a workflow run attempt. +func ListWorkflowRunAttemptJobs(ctx *context.APIContext) { + // swagger:operation GET /repos/{owner}/{repo}/actions/runs/{run}/attempts/{attempt}/jobs repository listWorkflowRunAttemptJobs + // --- + // summary: Lists all jobs for a workflow run attempt + // produces: + // - application/json + // parameters: + // - name: owner + // in: path + // description: owner of the repo + // type: string + // required: true + // - name: repo + // in: path + // description: name of the repository + // type: string + // required: true + // - name: run + // in: path + // description: id of the workflow run + // type: integer + // required: true + // - name: attempt + // in: path + // description: logical attempt number of the run + // type: integer + // required: true + // - name: status + // in: query + // description: workflow status (pending, queued, in_progress, failure, success, skipped) + // type: string + // required: false + // - name: page + // in: query + // description: page number of results to return (1-based) + // type: integer + // - name: limit + // in: query + // description: page size of results + // type: integer + // responses: + // "200": + // "$ref": "#/responses/WorkflowJobsList" + // "400": + // "$ref": "#/responses/error" + // "404": + // "$ref": "#/responses/notFound" + + run, attempt := getCurrentRepoActionRunAttemptByNumber(ctx) + if ctx.Written() { + return + } + + shared.ListJobs(ctx, 0, run.RepoID, run.ID, optional.Some(attempt.ID)) } // GetWorkflowJob Gets a specific workflow job for a workflow run. @@ -1758,7 +1920,7 @@ func DeleteArtifact(ctx *context.APIContext) { } if actions.IsArtifactV4(art) { - if err := actions_model.SetArtifactNeedDelete(ctx, art.RunID, art.ArtifactName); err != nil { + if err := actions_model.SetArtifactNeedDeleteByID(ctx, art.ID); err != nil { ctx.APIErrorInternal(err) return } diff --git a/routers/api/v1/repo/pull.go b/routers/api/v1/repo/pull.go index ef8cc6cd93..8b1fc8f5cb 100644 --- a/routers/api/v1/repo/pull.go +++ b/routers/api/v1/repo/pull.go @@ -965,7 +965,7 @@ func MergePullRequest(ctx *context.APIContext) { } // start with merging by checking - if err := pull_service.CheckPullMergeable(ctx, ctx.Doer, &ctx.Repo.Permission, pr, mergeCheckType, form.ForceMerge); err != nil { + if err := pull_service.CheckPullMergeable(ctx, ctx.Doer, &ctx.Repo.Permission, pr, mergeCheckType, repo_model.MergeStyle(form.Do), form.ForceMerge); err != nil { if errors.Is(err, pull_service.ErrIsClosed) { ctx.APIErrorNotFound() } else if errors.Is(err, pull_service.ErrNoPermissionToMerge) { @@ -980,6 +980,8 @@ func MergePullRequest(ctx *context.APIContext) { ctx.APIError(http.StatusMethodNotAllowed, err) } else if asymkey_service.IsErrWontSign(err) { ctx.APIError(http.StatusMethodNotAllowed, err) + } else if errors.Is(err, pull_service.ErrHeadCommitsNotAllVerified) { + ctx.APIError(http.StatusMethodNotAllowed, err) } else { ctx.APIErrorInternal(err) } @@ -1173,7 +1175,7 @@ func parseCompareInfo(ctx *context.APIContext, compareParam string) (result *git return nil, nil } - return compareInfo, closer + return &compareInfo, closer } // UpdatePullRequest merge PR's baseBranch into headBranch @@ -1417,7 +1419,6 @@ func GetPullRequestCommits(ctx *context.APIContext) { return } - var compareInfo *git_service.CompareInfo baseGitRepo, closer, err := gitrepo.RepositoryFromContextOrOpen(ctx, pr.BaseRepo) if err != nil { ctx.APIErrorInternal(err) @@ -1425,6 +1426,7 @@ func GetPullRequestCommits(ctx *context.APIContext) { } defer closer.Close() + var compareInfo git_service.CompareInfo if pr.HasMerged { compareInfo, err = git_service.GetCompareInfo(ctx, pr.BaseRepo, pr.BaseRepo, baseGitRepo, git.RefName(pr.MergeBase), git.RefName(pr.GetGitHeadRefName()), false, false) } else { @@ -1550,7 +1552,7 @@ func GetPullRequestFiles(ctx *context.APIContext) { baseGitRepo := ctx.Repo.GitRepo - var compareInfo *git_service.CompareInfo + var compareInfo git_service.CompareInfo if pr.HasMerged { compareInfo, err = git_service.GetCompareInfo(ctx, pr.BaseRepo, pr.BaseRepo, baseGitRepo, git.RefName(pr.MergeBase), git.RefName(pr.GetGitHeadRefName()), false, false) } else { diff --git a/routers/api/v1/shared/action.go b/routers/api/v1/shared/action.go index 715e76c355..1b12023d7a 100644 --- a/routers/api/v1/shared/action.go +++ b/routers/api/v1/shared/action.go @@ -12,6 +12,7 @@ import ( repo_model "code.gitea.io/gitea/models/repo" user_model "code.gitea.io/gitea/models/user" "code.gitea.io/gitea/modules/git" + "code.gitea.io/gitea/modules/optional" "code.gitea.io/gitea/modules/setting" api "code.gitea.io/gitea/modules/structs" "code.gitea.io/gitea/modules/webhook" @@ -27,8 +28,9 @@ import ( // ownerID != 0 and repoID != 0 undefined behavior // runID == 0 means all jobs // runID is used as an additional filter together with ownerID and repoID to only return jobs for the given run +// runAttemptID, when set, additionally limits the result to jobs of the specified run attempt. Only takes effect when runID > 0. // Access rights are checked at the API route level -func ListJobs(ctx *context.APIContext, ownerID, repoID, runID int64) { +func ListJobs(ctx *context.APIContext, ownerID, repoID, runID int64, runAttemptID optional.Option[int64]) { if ownerID != 0 && repoID != 0 { setting.PanicInDevOrTesting("ownerID and repoID should not be both set") } @@ -39,6 +41,9 @@ func ListJobs(ctx *context.APIContext, ownerID, repoID, runID int64) { RunID: runID, ListOptions: listOptions, } + if runID > 0 { + opts.RunAttemptID = runAttemptID + } for _, status := range ctx.FormStrings("status") { values, err := convertToInternal(status) if err != nil { @@ -178,7 +183,7 @@ func ListRuns(ctx *context.APIContext, ownerID, repoID int64) { } } - convertedRun, err := convert.ToActionWorkflowRun(ctx, repository, runs[i]) + convertedRun, err := convert.ToActionWorkflowRun(ctx, repository, runs[i], nil) if err != nil { ctx.APIErrorInternal(err) return diff --git a/routers/api/v1/user/action.go b/routers/api/v1/user/action.go index 573e2e4dd0..4de0b30d98 100644 --- a/routers/api/v1/user/action.go +++ b/routers/api/v1/user/action.go @@ -439,5 +439,5 @@ func ListWorkflowJobs(ctx *context.APIContext) { // "404": // "$ref": "#/responses/notFound" - shared.ListJobs(ctx, ctx.Doer.ID, 0, 0) + shared.ListJobs(ctx, ctx.Doer.ID, 0, 0, nil) } diff --git a/routers/common/actions.go b/routers/common/actions.go index 4eb7078db6..2b83e5d842 100644 --- a/routers/common/actions.go +++ b/routers/common/actions.go @@ -31,7 +31,8 @@ func DownloadActionsRunJobLogs(ctx *context.Base, ctxRepo *repo_model.Repository return util.NewNotExistErrorf("job not found") } - if curJob.TaskID == 0 { + taskID := curJob.EffectiveTaskID() + if taskID == 0 { return util.NewNotExistErrorf("job not started") } @@ -39,7 +40,7 @@ func DownloadActionsRunJobLogs(ctx *context.Base, ctxRepo *repo_model.Repository return fmt.Errorf("LoadRun: %w", err) } - task, err := actions_model.GetTaskByID(ctx, curJob.TaskID) + task, err := actions_model.GetTaskByID(ctx, taskID) if err != nil { return fmt.Errorf("GetTaskByID: %w", err) } diff --git a/routers/common/errpage.go b/routers/common/errpage.go index 07760bcd18..9baf7915e1 100644 --- a/routers/common/errpage.go +++ b/routers/common/errpage.go @@ -33,10 +33,6 @@ func renderServerErrorPage(w http.ResponseWriter, req *http.Request, respCode in } httpcache.SetCacheControlInHeader(w.Header(), &httpcache.CacheControlOptions{NoTransform: true}) - if setting.Security.XFrameOptions != "unset" { - w.Header().Set(`X-Frame-Options`, setting.Security.XFrameOptions) - } - tmplCtx := context.NewTemplateContextForWeb(reqctx.FromContext(req.Context()), req, middleware.Locale(w, req)) w.WriteHeader(respCode) diff --git a/routers/common/middleware.go b/routers/common/middleware.go index 39911e2548..3932a84b6d 100644 --- a/routers/common/middleware.go +++ b/routers/common/middleware.go @@ -28,6 +28,7 @@ func ProtocolMiddlewares() (handlers []any) { // the order is important handlers = append(handlers, ChiRoutePathHandler()) // make sure chi has correct paths handlers = append(handlers, RequestContextHandler()) // prepare the context and panic recovery + handlers = append(handlers, SecurityHeadersHandler()) if setting.ReverseProxyLimit > 0 && len(setting.ReverseProxyTrustedProxies) > 0 { handlers = append(handlers, ForwardedHeadersHandler(setting.ReverseProxyLimit, setting.ReverseProxyTrustedProxies)) @@ -48,6 +49,21 @@ func ProtocolMiddlewares() (handlers []any) { return handlers } +// SecurityHeadersHandler sets headers globally for every response that leaves Gitea. +func SecurityHeadersHandler() func(http.Handler) http.Handler { + return func(next http.Handler) http.Handler { + return http.HandlerFunc(func(resp http.ResponseWriter, req *http.Request) { + if setting.Security.XContentTypeOptions != "unset" { + resp.Header().Set("X-Content-Type-Options", setting.Security.XContentTypeOptions) + } + if setting.Security.XFrameOptions != "unset" { + resp.Header().Set("X-Frame-Options", setting.Security.XFrameOptions) + } + next.ServeHTTP(resp, req) + }) + } +} + func RequestContextHandler() func(h http.Handler) http.Handler { return func(next http.Handler) http.Handler { return http.HandlerFunc(func(respOrig http.ResponseWriter, req *http.Request) { diff --git a/routers/web/devtest/mock_actions.go b/routers/web/devtest/mock_actions.go index 51c13113e5..a74efaf54e 100644 --- a/routers/web/devtest/mock_actions.go +++ b/routers/web/devtest/mock_actions.go @@ -4,6 +4,7 @@ package devtest import ( + "fmt" mathRand "math/rand/v2" "net/http" "slices" @@ -12,7 +13,9 @@ import ( "time" actions_model "code.gitea.io/gitea/models/actions" + user_model "code.gitea.io/gitea/models/user" "code.gitea.io/gitea/modules/setting" + "code.gitea.io/gitea/modules/timeutil" "code.gitea.io/gitea/modules/util" "code.gitea.io/gitea/modules/web" "code.gitea.io/gitea/routers/web/repo/actions" @@ -59,13 +62,18 @@ func generateMockStepsLog(logCur actions.LogCursor, opts generateMockStepsLogOpt } func MockActionsView(ctx *context.Context) { - ctx.Data["RunID"] = ctx.PathParamInt64("run") + if runID := ctx.PathParamInt64("run"); runID == 0 { + ctx.Redirect("/repo-action-view/runs/10") + return + } ctx.Data["JobID"] = ctx.PathParamInt64("job") + ctx.Data["ActionsViewURL"] = ctx.Req.URL.Path ctx.HTML(http.StatusOK, "devtest/repo-action-view") } func MockActionsRunsJobs(ctx *context.Context) { runID := ctx.PathParamInt64("run") + attemptID := ctx.PathParamInt64("attempt") alignTime := func(v, unit int64) int64 { return (v + unit) / unit * unit @@ -74,16 +82,9 @@ func MockActionsRunsJobs(ctx *context.Context) { resp.State.Run.RepoID = 12345 resp.State.Run.TitleHTML = `mock run title link` resp.State.Run.Link = setting.AppSubURL + "/devtest/repo-action-view/runs/" + strconv.FormatInt(runID, 10) - resp.State.Run.Status = actions_model.StatusRunning.String() - resp.State.Run.CanCancel = runID == 10 - resp.State.Run.CanApprove = runID == 20 - resp.State.Run.CanRerun = runID == 30 - resp.State.Run.CanRerunFailed = runID == 30 resp.State.Run.CanDeleteArtifact = true resp.State.Run.WorkflowID = "workflow-id" resp.State.Run.WorkflowLink = "./workflow-link" - resp.State.Run.Duration = "1h 23m 45s" - resp.State.Run.TriggeredAt = time.Now().Add(-time.Hour).Unix() resp.State.Run.TriggerEvent = "push" resp.State.Run.Commit = actions.ViewCommit{ ShortSha: "ccccdddd", @@ -98,6 +99,88 @@ func MockActionsRunsJobs(ctx *context.Context) { IsDeleted: false, }, } + now := time.Now() + currentAttemptNum := int64(1) + if attemptID > 0 { + currentAttemptNum = attemptID + } + user2 := &user_model.User{Name: "user2"} + user3 := &user_model.User{Name: "user3"} + attempts := []*actions_model.ActionRunAttempt{{ + Attempt: 1, + Status: actions_model.StatusSuccess, + Created: timeutil.TimeStamp(now.Add(-time.Hour).Unix()), + TriggerUserID: 2, + TriggerUser: user2, + }} + if runID == 10 { + attempts = []*actions_model.ActionRunAttempt{ + { + Attempt: 3, + Status: actions_model.StatusSuccess, + Created: timeutil.TimeStamp(alignTime(now.Add(-time.Hour).Unix(), 3600)), + TriggerUserID: 2, + TriggerUser: user2, + }, + { + Attempt: 2, + Status: actions_model.StatusFailure, + Created: timeutil.TimeStamp(alignTime(now.Add(-2*time.Hour).Unix(), 3600)), + TriggerUserID: 1, + TriggerUser: user3, + }, + { + Attempt: 1, + Status: actions_model.StatusSuccess, + Created: timeutil.TimeStamp(alignTime(now.Add(-3*time.Hour).Unix(), 3600)), + TriggerUserID: 2, + TriggerUser: user2, + }, + } + if attemptID == 0 { + currentAttemptNum = 3 + } + } + + latestAttempt := attempts[0] + resp.State.Run.RunAttempt = currentAttemptNum + resp.State.Run.Done = latestAttempt.Status.IsDone() + resp.State.Run.Status = latestAttempt.Status.String() + resp.State.Run.Duration = "1h 23m 45s" + resp.State.Run.TriggeredAt = latestAttempt.Created.AsTime().Unix() + resp.State.Run.ViewLink = resp.State.Run.Link + for _, attempt := range attempts { + link := resp.State.Run.Link + if attempt.Attempt != latestAttempt.Attempt { + link = fmt.Sprintf("%s/attempts/%d", resp.State.Run.Link, attempt.Attempt) + } + current := attempt.Attempt == currentAttemptNum + if current { + resp.State.Run.Status = attempt.Status.String() + resp.State.Run.Done = attempt.Status.IsDone() + resp.State.Run.TriggeredAt = attempt.Created.AsTime().Unix() + if attempt.Attempt != latestAttempt.Attempt { + resp.State.Run.ViewLink = link + } + } + resp.State.Run.Attempts = append(resp.State.Run.Attempts, &actions.ViewRunAttempt{ + Attempt: attempt.Attempt, + Status: attempt.Status.String(), + Done: attempt.Status.IsDone(), + Link: link, + Current: current, + Latest: attempt.Attempt == latestAttempt.Attempt, + TriggeredAt: attempt.Created.AsTime().Unix(), + TriggerUserName: attempt.TriggerUser.GetDisplayName(), + TriggerUserLink: attempt.TriggerUser.HomeLink(), + }) + } + isLatestAttempt := currentAttemptNum == latestAttempt.Attempt + resp.State.Run.CanCancel = runID == 10 && isLatestAttempt + resp.State.Run.CanApprove = runID == 20 && isLatestAttempt + resp.State.Run.CanRerun = runID == 30 && isLatestAttempt + resp.State.Run.CanRerunFailed = runID == 30 && isLatestAttempt + resp.Artifacts = append(resp.Artifacts, &actions.ArtifactsViewItem{ Name: "artifact-a", Size: 100 * 1024, @@ -123,8 +206,13 @@ func MockActionsRunsJobs(ctx *context.Context) { ExpiresUnix: 0, }) + jobLink := func(jobID int64) string { + return fmt.Sprintf("%s/jobs/%d", resp.State.Run.Link, jobID) + } + resp.State.Run.Jobs = append(resp.State.Run.Jobs, &actions.ViewJob{ ID: runID * 10, + Link: jobLink(runID * 10), JobID: "job-100", Name: "job 100 (testsubname)", Status: actions_model.StatusRunning.String(), @@ -133,6 +221,7 @@ func MockActionsRunsJobs(ctx *context.Context) { }) resp.State.Run.Jobs = append(resp.State.Run.Jobs, &actions.ViewJob{ ID: runID*10 + 1, + Link: jobLink(runID*10 + 1), JobID: "job-101", Name: "job 101", Status: actions_model.StatusWaiting.String(), @@ -142,6 +231,7 @@ func MockActionsRunsJobs(ctx *context.Context) { }) resp.State.Run.Jobs = append(resp.State.Run.Jobs, &actions.ViewJob{ ID: runID*10 + 2, + Link: jobLink(runID*10 + 2), JobID: "job-102", Name: "ULTRA LOOOOOOOOOOOONG job name 102 that exceeds the limit", Status: actions_model.StatusFailure.String(), @@ -151,6 +241,7 @@ func MockActionsRunsJobs(ctx *context.Context) { }) resp.State.Run.Jobs = append(resp.State.Run.Jobs, &actions.ViewJob{ ID: runID*10 + 3, + Link: jobLink(runID*10 + 3), JobID: "job-103", Name: "job 103", Status: actions_model.StatusCancelled.String(), @@ -162,8 +253,10 @@ func MockActionsRunsJobs(ctx *context.Context) { // add more jobs to a run for UI testing if resp.State.Run.CanCancel { for i := range 10 { + jobID := runID*1000 + int64(i) resp.State.Run.Jobs = append(resp.State.Run.Jobs, &actions.ViewJob{ - ID: runID*1000 + int64(i), + ID: jobID, + Link: jobLink(jobID), JobID: "job-dup-test-" + strconv.Itoa(i), Name: "job dup test " + strconv.Itoa(i), Status: actions_model.StatusSuccess.String(), @@ -184,6 +277,14 @@ func fillViewRunResponseCurrentJob(ctx *context.Context, resp *actions.ViewRespo return } + for _, job := range resp.State.Run.Jobs { + if job.ID == jobID { + resp.State.CurrentJob.Title = job.Name + resp.State.CurrentJob.Detail = job.Status + break + } + } + req := web.GetForm(ctx).(*actions.ViewRequest) var mockLogOptions []generateMockStepsLogOptions resp.State.CurrentJob.Steps = append(resp.State.CurrentJob.Steps, &actions.ViewJobStep{ diff --git a/routers/web/misc/misc.go b/routers/web/misc/misc.go index a50d9130ac..0b939ee435 100644 --- a/routers/web/misc/misc.go +++ b/routers/web/misc/misc.go @@ -7,9 +7,12 @@ import ( "net/http" "path" "strconv" + "strings" "code.gitea.io/gitea/modules/git" "code.gitea.io/gitea/modules/httpcache" + "code.gitea.io/gitea/modules/httplib" + "code.gitea.io/gitea/modules/json" "code.gitea.io/gitea/modules/log" "code.gitea.io/gitea/modules/setting" "code.gitea.io/gitea/modules/util" @@ -17,6 +20,29 @@ import ( "code.gitea.io/gitea/services/context" ) +func SiteManifest(w http.ResponseWriter, req *http.Request) { + w.Header().Set("Content-Type", "application/manifest+json") + if httpcache.HandleGenericETagPublicCache(req, w, "", &setting.AppStartTime) { + return + } + if req.Method == http.MethodHead { + return + } + + ctx := req.Context() + absoluteAssetURL := strings.TrimSuffix(httplib.MakeAbsoluteURL(ctx, setting.StaticURLPrefix), "/") + manifest := map[string]any{ + "name": setting.AppName, + "short_name": setting.AppName, + "start_url": httplib.GuessCurrentAppURL(ctx), + "icons": []map[string]string{ + {"src": absoluteAssetURL + "/assets/img/logo.png", "type": "image/png", "sizes": "512x512"}, + {"src": absoluteAssetURL + "/assets/img/logo.svg", "type": "image/svg+xml", "sizes": "512x512"}, + }, + } + _ = json.NewEncoder(w).Encode(manifest) +} + func SSHInfo(rw http.ResponseWriter, req *http.Request) { if !git.DefaultFeatures().SupportProcReceive { rw.WriteHeader(http.StatusNotFound) diff --git a/routers/web/repo/actions/actions.go b/routers/web/repo/actions/actions.go index 644a53f28a..a6a6e539b9 100644 --- a/routers/web/repo/actions/actions.go +++ b/routers/web/repo/actions/actions.go @@ -311,7 +311,7 @@ func prepareWorkflowList(ctx *context.Context, workflows []WorkflowInfo) { if !run.Status.In(actions_model.StatusWaiting, actions_model.StatusRunning) { continue } - jobs, err := actions_model.GetRunJobsByRunID(ctx, run.ID) + jobs, err := actions_model.GetLatestAttemptJobsByRepoAndRunID(ctx, run.RepoID, run.ID) if err != nil { ctx.ServerError("GetRunJobsByRunID", err) return diff --git a/routers/web/repo/actions/view.go b/routers/web/repo/actions/view.go index fb4dfa9603..b5b72b4f12 100644 --- a/routers/web/repo/actions/view.go +++ b/routers/web/repo/actions/view.go @@ -34,7 +34,6 @@ import ( "code.gitea.io/gitea/routers/common" actions_service "code.gitea.io/gitea/services/actions" context_module "code.gitea.io/gitea/services/context" - notify_service "code.gitea.io/gitea/services/notify" "github.com/nektos/act/pkg/model" ) @@ -166,7 +165,7 @@ func resolveCurrentRunForView(ctx *context_module.Context) *actions_model.Action return nil } if run != nil { - jobs, err := actions_model.GetRunJobsByRunID(ctx, run.ID) + jobs, err := actions_model.GetLatestAttemptJobsByRepoAndRunID(ctx, run.RepoID, run.ID) if err != nil { ctx.ServerError("GetRunJobsByRunID", err) return nil @@ -203,9 +202,23 @@ func View(ctx *context_module.Context) { if ctx.Written() { return } - ctx.Data["RunID"] = run.ID - ctx.Data["JobID"] = ctx.PathParamInt64("job") // it can be 0 when no job (e.g.: run summary view) - ctx.Data["ActionsURL"] = ctx.Repo.RepoLink + "/actions" + run.Repo = ctx.Repo.Repository + + jobID := ctx.PathParamInt64("job") + ctx.Data["JobID"] = jobID // it can be 0 when no job (e.g.: run summary view) + + attemptNum := ctx.PathParamInt64("attempt") + + // ActionsViewURL is the endpoint for viewing a run (job summary), a job, or a job attempt. + // It's POST method handler can provide the state data for the frontend rendering. + switch { + case attemptNum > 0: + ctx.Data["ActionsViewURL"] = fmt.Sprintf("%s/attempts/%d", run.Link(), attemptNum) + case jobID > 0: + ctx.Data["ActionsViewURL"] = fmt.Sprintf("%s/jobs/%d", run.Link(), jobID) + default: + ctx.Data["ActionsViewURL"] = run.Link() + } ctx.HTML(http.StatusOK, tplViewActions) } @@ -259,22 +272,30 @@ type ViewResponse struct { State struct { Run struct { - RepoID int64 `json:"repoId"` - Link string `json:"link"` - Title string `json:"title"` - TitleHTML template.HTML `json:"titleHTML"` - Status string `json:"status"` - CanCancel bool `json:"canCancel"` - CanApprove bool `json:"canApprove"` // the run needs an approval and the doer has permission to approve - CanRerun bool `json:"canRerun"` - CanRerunFailed bool `json:"canRerunFailed"` - CanDeleteArtifact bool `json:"canDeleteArtifact"` - Done bool `json:"done"` - WorkflowID string `json:"workflowID"` - WorkflowLink string `json:"workflowLink"` - IsSchedule bool `json:"isSchedule"` - Jobs []*ViewJob `json:"jobs"` - Commit ViewCommit `json:"commit"` + RepoID int64 `json:"repoId"` + // Link is the canonical HTML URL of the run, e.g. "/owner/repo/actions/runs/123". + // Used as the base for composing sub-resource URLs (cancel, rerun, artifacts, jobs) that are not attempt-scoped. + Link string `json:"link"` + // ViewLink is the attempt-aware URL for navigation, e.g. "/owner/repo/actions/runs/123" for the latest attempt + // or "/owner/repo/actions/runs/123/attempts/2" for a historical attempt. + // Use this when the target should reflect the currently-viewed attempt. + ViewLink string `json:"viewLink"` + Title string `json:"title"` + TitleHTML template.HTML `json:"titleHTML"` + Status string `json:"status"` + CanCancel bool `json:"canCancel"` + CanApprove bool `json:"canApprove"` // the run needs an approval and the doer has permission to approve + CanRerun bool `json:"canRerun"` + CanRerunFailed bool `json:"canRerunFailed"` + CanDeleteArtifact bool `json:"canDeleteArtifact"` + Done bool `json:"done"` + WorkflowID string `json:"workflowID"` + WorkflowLink string `json:"workflowLink"` + IsSchedule bool `json:"isSchedule"` + RunAttempt int64 `json:"runAttempt"` + Attempts []*ViewRunAttempt `json:"attempts"` + Jobs []*ViewJob `json:"jobs"` + Commit ViewCommit `json:"commit"` // Summary view: run duration and trigger time/event Duration string `json:"duration"` TriggeredAt int64 `json:"triggeredAt"` // unix seconds for relative time @@ -293,6 +314,7 @@ type ViewResponse struct { type ViewJob struct { ID int64 `json:"id"` + Link string `json:"link"` JobID string `json:"jobId,omitempty"` Name string `json:"name"` Status string `json:"status"` @@ -301,6 +323,18 @@ type ViewJob struct { Needs []string `json:"needs,omitempty"` } +type ViewRunAttempt struct { + Attempt int64 `json:"attempt"` + Status string `json:"status"` + Done bool `json:"done"` + Link string `json:"link"` + Current bool `json:"current"` + Latest bool `json:"latest"` + TriggeredAt int64 `json:"triggeredAt"` + TriggerUserName string `json:"triggerUserName"` + TriggerUserLink string `json:"triggerUserLink"` +} + type ViewCommit struct { ShortSha string `json:"shortSHA"` Link string `json:"link"` @@ -338,24 +372,8 @@ type ViewStepLogLine struct { Timestamp float64 `json:"timestamp"` } -func getActionsViewArtifacts(ctx context.Context, repoID, runID int64) (artifactsViewItems []*ArtifactsViewItem, err error) { - artifacts, err := actions_model.ListUploadedArtifactsMeta(ctx, repoID, runID) - if err != nil { - return nil, err - } - for _, art := range artifacts { - artifactsViewItems = append(artifactsViewItems, &ArtifactsViewItem{ - Name: art.ArtifactName, - Size: art.FileSize, - Status: util.Iif(art.Status == actions_model.ArtifactStatusExpired, "expired", "completed"), - ExpiresUnix: int64(art.ExpiredUnix), - }) - } - return artifactsViewItems, nil -} - func ViewPost(ctx *context_module.Context) { - run, jobs := getCurrentRunJobsByPathParam(ctx) + run, attempt, jobs := getCurrentRunJobsByPathParam(ctx) if ctx.Written() { return } @@ -365,7 +383,7 @@ func ViewPost(ctx *context_module.Context) { } resp := &ViewResponse{} - fillViewRunResponseSummary(ctx, resp, run, jobs) + fillViewRunResponseSummary(ctx, resp, run, attempt, jobs) if ctx.Written() { return } @@ -376,23 +394,33 @@ func ViewPost(ctx *context_module.Context) { ctx.JSON(http.StatusOK, resp) } -func fillViewRunResponseSummary(ctx *context_module.Context, resp *ViewResponse, run *actions_model.ActionRun, jobs []*actions_model.ActionRunJob) { - var err error - resp.Artifacts, err = getActionsViewArtifacts(ctx, ctx.Repo.Repository.ID, run.ID) - if err != nil { - ctx.ServerError("getActionsViewArtifacts", err) - return - } +func fillViewRunResponseSummary(ctx *context_module.Context, resp *ViewResponse, run *actions_model.ActionRun, attempt *actions_model.ActionRunAttempt, jobs []*actions_model.ActionRunJob) { + // Latest when the run has no attempts yet (legacy) or the viewed attempt is the run's latest. + isLatestAttempt := run.LatestAttemptID == 0 || (attempt != nil && attempt.ID == run.LatestAttemptID) resp.State.Run.RepoID = ctx.Repo.Repository.ID // the title for the "run" is from the commit message resp.State.Run.Title = run.Title resp.State.Run.TitleHTML = templates.NewRenderUtils(ctx).RenderCommitMessage(run.Title, ctx.Repo.Repository) resp.State.Run.Link = run.Link() - resp.State.Run.CanCancel = !run.Status.IsDone() && ctx.Repo.CanWrite(unit.TypeActions) - resp.State.Run.CanApprove = run.NeedApproval && ctx.Repo.CanWrite(unit.TypeActions) - resp.State.Run.CanRerun = run.Status.IsDone() && ctx.Repo.CanWrite(unit.TypeActions) - resp.State.Run.CanDeleteArtifact = run.Status.IsDone() && ctx.Repo.CanWrite(unit.TypeActions) + resp.State.Run.ViewLink = getRunViewLink(run, attempt) + resp.State.Run.Attempts = make([]*ViewRunAttempt, 0) + if attempt != nil { + resp.State.Run.RunAttempt = attempt.Attempt + resp.State.Run.Status = attempt.Status.String() + resp.State.Run.Done = attempt.Status.IsDone() + resp.State.Run.Duration = attempt.Duration().String() + resp.State.Run.TriggeredAt = attempt.Created.AsTime().Unix() + } else { + resp.State.Run.Status = run.Status.String() + resp.State.Run.Done = run.Status.IsDone() + resp.State.Run.Duration = run.Duration().String() + resp.State.Run.TriggeredAt = run.Created.AsTime().Unix() + } + resp.State.Run.CanCancel = isLatestAttempt && !resp.State.Run.Done && ctx.Repo.CanWrite(unit.TypeActions) + resp.State.Run.CanApprove = isLatestAttempt && run.NeedApproval && ctx.Repo.CanWrite(unit.TypeActions) + resp.State.Run.CanRerun = isLatestAttempt && resp.State.Run.Done && ctx.Repo.CanWrite(unit.TypeActions) + resp.State.Run.CanDeleteArtifact = resp.State.Run.Done && ctx.Repo.CanWrite(unit.TypeActions) if resp.State.Run.CanRerun { for _, job := range jobs { if job.Status == actions_model.StatusFailure || job.Status == actions_model.StatusCancelled { @@ -401,15 +429,16 @@ func fillViewRunResponseSummary(ctx *context_module.Context, resp *ViewResponse, } } } - resp.State.Run.Done = run.Status.IsDone() resp.State.Run.WorkflowID = run.WorkflowID - resp.State.Run.WorkflowLink = run.WorkflowLink() + if isLatestAttempt { + resp.State.Run.WorkflowLink = run.WorkflowLink() + } resp.State.Run.IsSchedule = run.IsSchedule() resp.State.Run.Jobs = make([]*ViewJob, 0, len(jobs)) // marshal to '[]' instead fo 'null' in json - resp.State.Run.Status = run.Status.String() for _, v := range jobs { resp.State.Run.Jobs = append(resp.State.Run.Jobs, &ViewJob{ ID: v.ID, + Link: fmt.Sprintf("%s/jobs/%d", run.Link(), v.ID), JobID: v.JobID, Name: v.Name, Status: v.Status.String(), @@ -419,6 +448,29 @@ func fillViewRunResponseSummary(ctx *context_module.Context, resp *ViewResponse, }) } + attempts, err := actions_model.ListRunAttemptsByRunID(ctx, run.ID) + if err != nil { + ctx.ServerError("ListRunAttemptsByRunID", err) + return + } + if err := attempts.LoadTriggerUser(ctx); err != nil { + ctx.ServerError("LoadTriggerUser", err) + return + } + for _, runAttempt := range attempts { + resp.State.Run.Attempts = append(resp.State.Run.Attempts, &ViewRunAttempt{ + Attempt: runAttempt.Attempt, + Status: runAttempt.Status.String(), + Done: runAttempt.Status.IsDone(), + Link: getRunViewLink(run, runAttempt), + Current: runAttempt.ID == attempt.ID, + Latest: runAttempt.ID == run.LatestAttemptID, + TriggeredAt: runAttempt.Created.AsTime().Unix(), + TriggerUserName: runAttempt.TriggerUser.GetDisplayName(), + TriggerUserLink: runAttempt.TriggerUser.HomeLink(), + }) + } + pusher := ViewUser{ DisplayName: run.TriggerUser.GetDisplayName(), Link: run.TriggerUser.HomeLink(), @@ -443,9 +495,27 @@ func fillViewRunResponseSummary(ctx *context_module.Context, resp *ViewResponse, Pusher: pusher, Branch: branch, } - resp.State.Run.Duration = run.Duration().String() - resp.State.Run.TriggeredAt = run.Created.AsTime().Unix() resp.State.Run.TriggerEvent = run.TriggerEvent + + // Legacy runs (LatestAttemptID == 0) have no attempt; their artifacts all share run_attempt_id=0, + // so passing 0 here scopes to this run's legacy artifacts only. + var runAttemptID int64 + if attempt != nil { + runAttemptID = attempt.ID + } + arts, err := actions_model.ListUploadedArtifactsMetaByRunAttempt(ctx, ctx.Repo.Repository.ID, run.ID, runAttemptID) + if err != nil { + ctx.ServerError("ListUploadedArtifactsMetaByRunAttempt", err) + return + } + resp.Artifacts = make([]*ArtifactsViewItem, 0, len(arts)) + for _, art := range arts { + resp.Artifacts = append(resp.Artifacts, &ArtifactsViewItem{ + Name: art.ArtifactName, + Size: art.FileSize, + Status: util.Iif(art.Status == actions_model.ArtifactStatusExpired, "expired", "completed"), + }) + } } func fillViewRunResponseCurrentJob(ctx *context_module.Context, resp *ViewResponse, run *actions_model.ActionRun, jobs []*actions_model.ActionRunJob) { @@ -459,9 +529,9 @@ func fillViewRunResponseCurrentJob(ctx *context_module.Context, resp *ViewRespon } var task *actions_model.ActionTask - if current.TaskID > 0 { + if effectiveTaskID := current.EffectiveTaskID(); effectiveTaskID > 0 { var err error - task, err = actions_model.GetTaskByID(ctx, current.TaskID) + task, err = actions_model.GetTaskByID(ctx, effectiveTaskID) if err != nil { ctx.ServerError("actions_model.GetTaskByID", err) return @@ -589,13 +659,24 @@ func checkRunRerunAllowed(ctx *context_module.Context, run *actions_model.Action return true } +func checkLatestAttempt(ctx *context_module.Context, run *actions_model.ActionRun, attempt *actions_model.ActionRunAttempt) bool { + if attempt != nil && run.LatestAttemptID != attempt.ID { + ctx.NotFound(nil) + return false + } + return true +} + // Rerun will rerun jobs in the given run // If jobIDStr is a blank string, it means rerun all jobs func Rerun(ctx *context_module.Context) { - run, jobs := getCurrentRunJobsByPathParam(ctx) + run, attempt, jobs := getCurrentRunJobsByPathParam(ctx) if ctx.Written() { return } + if !checkLatestAttempt(ctx, run, attempt) { + return + } if !checkRunRerunAllowed(ctx, run) { return } @@ -608,35 +689,48 @@ func Rerun(ctx *context_module.Context) { var jobsToRerun []*actions_model.ActionRunJob if currentJob != nil { - jobsToRerun = actions_service.GetAllRerunJobs(currentJob, jobs) - } else { - jobsToRerun = jobs + jobsToRerun = []*actions_model.ActionRunJob{currentJob} } - if err := actions_service.RerunWorkflowRunJobs(ctx, ctx.Repo.Repository, run, jobsToRerun); err != nil { - ctx.ServerError("RerunWorkflowRunJobs", err) + if _, err := actions_service.RerunWorkflowRunJobs(ctx, ctx.Repo.Repository, run, ctx.Doer, jobsToRerun); err != nil { + handleWorkflowRerunError(ctx, err) return } - ctx.JSONOK() + ctx.JSONRedirect(run.Link()) } // RerunFailed reruns all failed jobs in the given run func RerunFailed(ctx *context_module.Context) { - run, jobs := getCurrentRunJobsByPathParam(ctx) + run, attempt, jobs := getCurrentRunJobsByPathParam(ctx) if ctx.Written() { return } + if !checkLatestAttempt(ctx, run, attempt) { + return + } if !checkRunRerunAllowed(ctx, run) { return } - if err := actions_service.RerunWorkflowRunJobs(ctx, ctx.Repo.Repository, run, actions_service.GetFailedRerunJobs(jobs)); err != nil { - ctx.ServerError("RerunWorkflowRunJobs", err) + if _, err := actions_service.RerunWorkflowRunJobs(ctx, ctx.Repo.Repository, run, ctx.Doer, actions_service.GetFailedJobsForRerun(jobs)); err != nil { + handleWorkflowRerunError(ctx, err) return } - ctx.JSONOK() + ctx.JSONRedirect(run.Link()) +} + +func handleWorkflowRerunError(ctx *context_module.Context, err error) { + if errors.Is(err, util.ErrAlreadyExist) { + ctx.JSON(http.StatusConflict, map[string]any{"message": err.Error()}) + return + } + if errors.Is(err, util.ErrInvalidArgument) { + ctx.JSON(http.StatusBadRequest, map[string]any{"message": err.Error()}) + return + } + ctx.ServerError("RerunWorkflowRunJobs", err) } func Logs(ctx *context_module.Context) { @@ -654,10 +748,13 @@ func Logs(ctx *context_module.Context) { } func Cancel(ctx *context_module.Context) { - run, jobs := getCurrentRunJobsByPathParam(ctx) + run, attempt, jobs := getCurrentRunJobsByPathParam(ctx) if ctx.Written() { return } + if !checkLatestAttempt(ctx, run, attempt) { + return + } var updatedJobs []*actions_model.ActionRunJob @@ -676,13 +773,9 @@ func Cancel(ctx *context_module.Context) { actions_service.CreateCommitStatusForRunJobs(ctx, run, jobs...) actions_service.EmitJobsIfReadyByJobs(updatedJobs) - for _, job := range updatedJobs { - _ = job.LoadAttributes(ctx) - notify_service.WorkflowJobStatusUpdate(ctx, job.Run.Repo, job.Run.TriggerUser, job, nil) - } + actions_service.NotifyWorkflowJobsStatusUpdate(ctx, updatedJobs...) if len(updatedJobs) > 0 { - job := updatedJobs[0] - actions_service.NotifyWorkflowRunStatusUpdateWithReload(ctx, job) + actions_service.NotifyWorkflowRunStatusUpdateWithReload(ctx, run.RepoID, run.ID) } ctx.JSONOK() } @@ -692,78 +785,14 @@ func Approve(ctx *context_module.Context) { if ctx.Written() { return } - approveRuns(ctx, []int64{run.ID}) - if ctx.Written() { - return - } - - ctx.JSONOK() -} - -func approveRuns(ctx *context_module.Context, runIDs []int64) { - doer := ctx.Doer - repo := ctx.Repo.Repository - - updatedJobs := make([]*actions_model.ActionRunJob, 0) - runMap := make(map[int64]*actions_model.ActionRun, len(runIDs)) - runJobs := make(map[int64][]*actions_model.ActionRunJob, len(runIDs)) - - err := db.WithTx(ctx, func(ctx context.Context) (err error) { - for _, runID := range runIDs { - run, err := actions_model.GetRunByRepoAndID(ctx, repo.ID, runID) - if err != nil { - return err - } - runMap[run.ID] = run - run.Repo = repo - run.NeedApproval = false - run.ApprovedBy = doer.ID - if err := actions_model.UpdateRun(ctx, run, "need_approval", "approved_by"); err != nil { - return err - } - jobs, err := actions_model.GetRunJobsByRunID(ctx, run.ID) - if err != nil { - return err - } - runJobs[run.ID] = jobs - for _, job := range jobs { - job.Status, err = actions_service.PrepareToStartJobWithConcurrency(ctx, job) - if err != nil { - return err - } - if job.Status == actions_model.StatusWaiting { - n, err := actions_model.UpdateRunJob(ctx, job, nil, "status") - if err != nil { - return err - } - if n > 0 { - updatedJobs = append(updatedJobs, job) - } - } - } - } - return nil - }) - if err != nil { - ctx.NotFoundOrServerError("approveRuns", func(err error) bool { + if err := actions_service.ApproveRuns(ctx, ctx.Repo.Repository, ctx.Doer, []int64{run.ID}); err != nil { + ctx.NotFoundOrServerError("ApproveRuns", func(err error) bool { return errors.Is(err, util.ErrNotExist) }, err) return } - for runID, run := range runMap { - actions_service.CreateCommitStatusForRunJobs(ctx, run, runJobs[runID]...) - } - - if len(updatedJobs) > 0 { - job := updatedJobs[0] - actions_service.NotifyWorkflowRunStatusUpdateWithReload(ctx, job) - } - - for _, job := range updatedJobs { - _ = job.LoadAttributes(ctx) - notify_service.WorkflowJobStatusUpdate(ctx, job.Run.Repo, job.Run.TriggerUser, job, nil) - } + ctx.JSONOK() } func Delete(ctx *context_module.Context) { @@ -785,28 +814,108 @@ func Delete(ctx *context_module.Context) { ctx.JSONOK() } -// getRunJobs loads the run and its jobs for runID +func getRunViewLink(run *actions_model.ActionRun, attempt *actions_model.ActionRunAttempt) string { + if attempt == nil || run.LatestAttemptID == attempt.ID { + return run.Link() + } + return fmt.Sprintf("%s/attempts/%d", run.Link(), attempt.Attempt) +} + +// getCurrentRunJobsByPathParam resolves the current run view context from path parameters, including the run, optional attempt, and jobs to render. // Any error will be written to the ctx, empty jobs will also result in 404 error, then the return values are all nil. -func getCurrentRunJobsByPathParam(ctx *context_module.Context) (*actions_model.ActionRun, []*actions_model.ActionRunJob) { +func getCurrentRunJobsByPathParam(ctx *context_module.Context) (*actions_model.ActionRun, *actions_model.ActionRunAttempt, []*actions_model.ActionRunJob) { run := getCurrentRunByPathParam(ctx) if ctx.Written() { - return nil, nil + return nil, nil, nil } run.Repo = ctx.Repo.Repository - jobs, err := actions_model.GetRunJobsByRunID(ctx, run.ID) + + var err error + var selectedJob *actions_model.ActionRunJob + if ctx.PathParam("job") != "" { + jobID := ctx.PathParamInt64("job") + selectedJob, err = actions_model.GetRunJobByRunAndID(ctx, run.ID, jobID) + if err != nil { + ctx.NotFoundOrServerError("GetRunJobByRepoAndID", func(err error) bool { + return errors.Is(err, util.ErrNotExist) + }, err) + return nil, nil, nil + } + } + + // Resolve the attempt to display. + // Priority: explicit path param (/attempts/:num) > job's attempt (when navigating to a specific job) > latest attempt. + // attempt may be nil for legacy runs that pre-date ActionRunAttempt; callers must handle that case. + attemptNum := ctx.PathParamInt64("attempt") + var attempt *actions_model.ActionRunAttempt + switch { + case attemptNum > 0: + // Explicit attempt number in the URL — user is viewing a historical attempt. + attempt, err = actions_model.GetRunAttemptByRunIDAndAttemptNum(ctx, run.ID, attemptNum) + if err != nil { + ctx.NotFoundOrServerError("GetRunAttemptByRunIDAndAttempt", func(err error) bool { + return errors.Is(err, util.ErrNotExist) + }, err) + return nil, nil, nil + } + case selectedJob != nil && selectedJob.RunAttemptID > 0: + // No explicit attempt in the URL, but the requested job belongs to a known attempt — resolve via the job. + attempt, err = actions_model.GetRunAttemptByRepoAndID(ctx, selectedJob.RepoID, selectedJob.RunAttemptID) + if err != nil { + ctx.NotFoundOrServerError("GetRunAttemptByRepoAndID", func(err error) bool { + return errors.Is(err, util.ErrNotExist) + }, err) + return nil, nil, nil + } + default: + // No attempt context at all — show the latest attempt (nil for legacy runs). + attempt, _, err = run.GetLatestAttempt(ctx) + if err != nil { + ctx.NotFoundOrServerError("GetLatestAttempt", func(err error) bool { + return errors.Is(err, util.ErrNotExist) + }, err) + return nil, nil, nil + } + } + + // Resolve the jobs for the resolved attempt. + // When attempt is nil (legacy run or legacy job), jobs are stored with run_attempt_id=0. + var resolvedAttemptID int64 + if attempt != nil { + resolvedAttemptID = attempt.ID + } + jobs, err := actions_model.GetRunJobsByRunAndAttemptID(ctx, run.ID, resolvedAttemptID) if err != nil { - ctx.ServerError("GetRunJobsByRunID", err) - return nil, nil + ctx.ServerError("get current jobs", err) + return nil, nil, nil } if len(jobs) == 0 { ctx.NotFound(nil) - return nil, nil + return nil, nil, nil } for _, job := range jobs { job.Run = run } - return run, jobs + return run, attempt, jobs +} + +// resolveArtifactAttemptIDFromQuery resolves the run_attempt_id used to scope artifact lookups. +// If the `attempt` query parameter is present and valid, it returns the matching attempt's ID. +// Otherwise it falls back to run.LatestAttemptID, which is 0 only for legacy runs created before ActionRunAttempt existed. +func resolveArtifactAttemptIDFromQuery(ctx *context_module.Context, run *actions_model.ActionRun) (int64, error) { + if ctx.FormString("attempt") == "" { + return run.LatestAttemptID, nil + } + attemptNum := ctx.FormInt64("attempt") + if attemptNum <= 0 { + return 0, util.ErrNotExist + } + attempt, err := actions_model.GetRunAttemptByRunIDAndAttemptNum(ctx, run.ID, attemptNum) + if err != nil { + return 0, err + } + return attempt.ID, nil } func ArtifactsDeleteView(ctx *context_module.Context) { @@ -814,9 +923,16 @@ func ArtifactsDeleteView(ctx *context_module.Context) { if ctx.Written() { return } + resolvedAttemptID, err := resolveArtifactAttemptIDFromQuery(ctx, run) + if err != nil { + ctx.NotFoundOrServerError("resolveArtifactAttemptIDFromQuery", func(err error) bool { + return errors.Is(err, util.ErrNotExist) + }, err) + return + } artifactName := ctx.PathParam("artifact_name") - if err := actions_model.SetArtifactNeedDelete(ctx, run.ID, artifactName); err != nil { - ctx.ServerError("SetArtifactNeedDelete", err) + if err := actions_model.SetArtifactNeedDeleteByRunAttempt(ctx, run.ID, resolvedAttemptID, artifactName); err != nil { + ctx.ServerError("SetArtifactNeedDeleteByRunAttempt", err) return } ctx.JSON(http.StatusOK, struct{}{}) @@ -827,14 +943,17 @@ func ArtifactsDownloadView(ctx *context_module.Context) { if ctx.Written() { return } - - artifactName := ctx.PathParam("artifact_name") - artifacts, err := db.Find[actions_model.ActionArtifact](ctx, actions_model.FindArtifactsOptions{ - RunID: run.ID, - ArtifactName: artifactName, - }) + resolvedAttemptID, err := resolveArtifactAttemptIDFromQuery(ctx, run) if err != nil { - ctx.ServerError("FindArtifacts", err) + ctx.NotFoundOrServerError("resolveArtifactAttemptIDFromQuery", func(err error) bool { + return errors.Is(err, util.ErrNotExist) + }, err) + return + } + artifactName := ctx.PathParam("artifact_name") + artifacts, err := actions_model.GetArtifactsByRunAttemptAndName(ctx, run.ID, resolvedAttemptID, artifactName) + if err != nil { + ctx.ServerError("GetArtifactsByRunAttemptAndName", err) return } if len(artifacts) == 0 { @@ -931,8 +1050,10 @@ func ApproveAllChecks(ctx *context_module.Context) { return } - approveRuns(ctx, runIDs) - if ctx.Written() { + if err := actions_service.ApproveRuns(ctx, repo, ctx.Doer, runIDs); err != nil { + ctx.NotFoundOrServerError("ApproveRuns", func(err error) bool { + return errors.Is(err, util.ErrNotExist) + }, err) return } diff --git a/routers/web/repo/compare.go b/routers/web/repo/compare.go index 285f3968d4..c0833452ea 100644 --- a/routers/web/repo/compare.go +++ b/routers/web/repo/compare.go @@ -421,8 +421,7 @@ func ParseCompareInfo(ctx *context.Context) *git_service.CompareInfo { } else { ctx.Data["BeforeCommitID"] = compareInfo.MergeBase } - - return compareInfo + return &compareInfo } func prepareNewPullRequestTitleContent(ci *git_service.CompareInfo, commits []*git_model.SignCommitWithStatuses) (title, content string) { diff --git a/routers/web/repo/issue_view.go b/routers/web/repo/issue_view.go index f678f83878..778720ebda 100644 --- a/routers/web/repo/issue_view.go +++ b/routers/web/repo/issue_view.go @@ -386,10 +386,13 @@ func ViewIssue(ctx *context.Context) { prepareIssueViewSidebarTimeTracker, prepareIssueViewSidebarDependency, prepareIssueViewSidebarPin, - func(ctx *context.Context, issue *issues_model.Issue) { preparePullViewPullInfo(ctx, issue) }, - preparePullViewReviewAndMerge, } - + if issue.IsPull { + prepareFuncs = append(prepareFuncs, + func(ctx *context.Context, issue *issues_model.Issue) { preparePullViewPullInfo(ctx, issue) }, + preparePullViewReviewAndMerge, + ) + } for _, prepareFunc := range prepareFuncs { prepareFunc(ctx, issue) if ctx.Written() { @@ -443,7 +446,13 @@ func ViewPullMergeBox(ctx *context.Context) { return } preparePullViewPullInfo(ctx, issue) + if ctx.Written() { + return + } preparePullViewReviewAndMerge(ctx, issue) + if ctx.Written() { + return + } ctx.Data["PullMergeBoxReloading"] = issue.PullRequest.IsChecking() // TODO: it should use a dedicated struct to render the pull merge box, to make sure all data is prepared correctly diff --git a/routers/web/repo/pull.go b/routers/web/repo/pull.go index efcdaac674..c208bc1907 100644 --- a/routers/web/repo/pull.go +++ b/routers/web/repo/pull.go @@ -165,7 +165,7 @@ func setMergeTarget(ctx *context.Context, pull *issues_model.PullRequest) { if ctx.Repo.Owner.Name == pull.MustHeadUserName(ctx) { ctx.Data["HeadTarget"] = pull.HeadBranch } else if pull.HeadRepo == nil { - ctx.Data["HeadTarget"] = pull.MustHeadUserName(ctx) + ":" + pull.HeadBranch + ctx.Data["HeadTarget"] = ctx.Locale.Tr("repo.pull.deleted_branch", pull.HeadBranch) } else { ctx.Data["HeadTarget"] = pull.MustHeadUserName(ctx) + "/" + pull.HeadRepo.Name + ":" + pull.HeadBranch } @@ -260,60 +260,189 @@ func GetMergedBaseCommitID(ctx *context.Context, issue *issues_model.Issue) stri return baseCommit } -func preparePullViewPullInfo(ctx *context.Context, issue *issues_model.Issue) *git_service.CompareInfo { - if !issue.IsPull { - return nil +// PullRequestViewInfo is a structured type for viewing pull request +// Refactoring plan: +// * move dynamic template-data-based variable into this struct +// * let backend handle complex logic, prepare everything, avoid plenty of "if" blocks in tmpl +type PullRequestViewInfo struct { + IsPullRequestBroken bool + HeadBranchCommitID string + + CompareInfo git_service.CompareInfo + MergeBoxInfo struct { + // TODO: move "merge box" related template variables here in the future } - if issue.PullRequest.HasMerged { - return prepareMergedViewPullInfo(ctx, issue) - } - return prepareViewPullInfo(ctx, issue) + + StatusCheckData pullCommitStatusCheckData + CommitStatuses []*git_model.CommitStatus } -// prepareMergedViewPullInfo show meta information for a merged pull request view page -func prepareMergedViewPullInfo(ctx *context.Context, issue *issues_model.Issue) *git_service.CompareInfo { - pull := issue.PullRequest +func preparePullViewPullInfo(ctx *context.Context, issue *issues_model.Issue) *PullRequestViewInfo { + prInfo := &PullRequestViewInfo{} + ctx.Data["PullRequestViewInfo"] = prInfo // TODO: after the complete refactoring, decouple the variable from template data + ctx.Data["PullRequestWorkInProgressPrefixes"] = setting.Repository.PullRequest.WorkInProgressPrefixes - setMergeTarget(ctx, pull) - ctx.Data["HasMerged"] = true - - baseCommit := GetMergedBaseCommitID(ctx, issue) - - compareInfo, err := git_service.GetCompareInfo(ctx, ctx.Repo.Repository, ctx.Repo.Repository, ctx.Repo.GitRepo, - git.RefName(baseCommit), git.RefName(pull.GetGitHeadRefName()), false, false) - if err != nil { - if gitcmd.IsStdErrorNotValidObjectName(err) || strings.Contains(err.Error(), "unknown revision or path not in the working tree") { - ctx.Data["IsPullRequestBroken"] = true - ctx.Data["BaseTarget"] = pull.BaseBranch - ctx.Data["NumCommits"] = 0 - ctx.Data["NumFiles"] = 0 - return nil - } - - ctx.ServerError("GetCompareInfo", err) + if err := issue.PullRequest.LoadHeadRepo(ctx); err != nil { + ctx.ServerError("LoadHeadRepo", err) return nil } - ctx.Data["NumCommits"] = len(compareInfo.Commits) - ctx.Data["NumFiles"] = compareInfo.NumFiles - if len(compareInfo.Commits) != 0 { - sha := compareInfo.Commits[0].ID.String() - commitStatuses, err := git_model.GetLatestCommitStatus(ctx, ctx.Repo.Repository.ID, sha, db.ListOptionsAll) - if err != nil { - ctx.ServerError("GetLatestCommitStatus", err) - return nil - } - if !ctx.Repo.CanRead(unit.TypeActions) { - git_model.CommitStatusesHideActionsURL(ctx, commitStatuses) - } - - if len(commitStatuses) != 0 { - ctx.Data["LatestCommitStatuses"] = commitStatuses - ctx.Data["LatestCommitStatus"] = git_model.CalcCommitStatus(commitStatuses) - } + if err := issue.PullRequest.LoadBaseRepo(ctx); err != nil { + ctx.ServerError("LoadBaseRepo", err) + return nil } - return compareInfo + if issue.PullRequest.HasMerged { + prepareViewMergedPullInfo(ctx, issue) + } else { + prepareViewOpenPullInfo(ctx, issue) + } + return prInfo +} + +func preparePullViewFillInfo(ctx *context.Context, issue *issues_model.Issue, baseRef git.RefName) { + preparePullViewFillCompareInfo(ctx, issue, baseRef) + if ctx.Written() { + return + } + preparePullViewFillCommitStatusInfo(ctx, issue) +} + +func preparePullViewFillCompareInfo(ctx *context.Context, issue *issues_model.Issue, baseRef git.RefName) { + var err error + prInfo := ctx.Data["PullRequestViewInfo"].(*PullRequestViewInfo) + pull := issue.PullRequest + prInfo.CompareInfo, err = git_service.GetCompareInfo(ctx, ctx.Repo.Repository, ctx.Repo.Repository, ctx.Repo.GitRepo, baseRef, git.RefName(pull.GetGitHeadRefName()), false, false) + if err != nil { + isKnownErrorForBroken := gitcmd.IsStdErrorNotValidObjectName(err) || + // fatal: ambiguous argument 'origin': unknown revision or path not in the working tree. + gitcmd.StderrContains(err, "unknown revision or path not in the working tree") + if !isKnownErrorForBroken { + log.Error("GetCompareInfo: %v", err) + } + prInfo.IsPullRequestBroken = true + } + + prInfo.HeadBranchCommitID, err = getViewPullHeadBranchCommitID(ctx, pull) + if err != nil { + if !errors.Is(err, util.ErrNotExist) { + log.Error("GetViewPullHeadBranchCommitID: %v", err) + } + prInfo.IsPullRequestBroken = true + } + if !pull.Issue.IsClosed && (prInfo.HeadBranchCommitID != prInfo.CompareInfo.HeadCommitID) { + // if the PR is still open, but its "branch commit in head repo" + // doesn't match "the PR's internal git ref commit in base repo", then the PR is broken + prInfo.IsPullRequestBroken = true + } + + ctx.Data["IsPullRequestBroken"] = prInfo.IsPullRequestBroken + ctx.Data["NumCommits"] = len(prInfo.CompareInfo.Commits) + ctx.Data["NumFiles"] = prInfo.CompareInfo.NumFiles + setMergeTarget(ctx, issue.PullRequest) +} + +func preparePullViewFillCommitStatusInfo(ctx *context.Context, issue *issues_model.Issue) { + prInfo := ctx.Data["PullRequestViewInfo"].(*PullRequestViewInfo) + + headCommitID := prInfo.CompareInfo.HeadCommitID + if headCommitID == "" { + return + } + + repo := ctx.Repo.Repository + statusCheckData := &prInfo.StatusCheckData + + commitStatuses, err := git_model.GetLatestCommitStatus(ctx, ctx.Repo.Repository.ID, prInfo.CompareInfo.HeadCommitID, db.ListOptionsAll) + if err != nil { + ctx.ServerError("GetLatestCommitStatus", err) + return + } + if !ctx.Repo.CanRead(unit.TypeActions) { + git_model.CommitStatusesHideActionsURL(ctx, commitStatuses) + } + + prInfo.CommitStatuses = commitStatuses + statusCheckData.ApproveLink = fmt.Sprintf("%s/actions/approve-all-checks?commit_id=%s", repo.Link(), headCommitID) + statusCheckData.LatestCommitStatus = git_model.CalcCommitStatus(commitStatuses) + ctx.Data["LatestCommitStatuses"] = commitStatuses + ctx.Data["LatestCommitStatus"] = statusCheckData.LatestCommitStatus + ctx.Data["StatusCheckData"] = &prInfo.StatusCheckData + + if !issue.IsClosed { + preparePullViewFillCommitStatusInfoForOpen(ctx, issue) + } +} + +func preparePullViewFillCommitStatusInfoForOpen(ctx *context.Context, issue *issues_model.Issue) { + prInfo := ctx.Data["PullRequestViewInfo"].(*PullRequestViewInfo) + statusCheckData := &prInfo.StatusCheckData + commitStatuses := prInfo.CommitStatuses + runs, err := actions_service.GetRunsFromCommitStatuses(ctx, commitStatuses) + if err != nil { + ctx.ServerError("GetRunsFromCommitStatuses", err) + return + } + for _, run := range runs { + if run.NeedApproval { + statusCheckData.RequireApprovalRunCount++ + } + } + if statusCheckData.RequireApprovalRunCount > 0 { + statusCheckData.CanApprove = ctx.Repo.CanWrite(unit.TypeActions) + } + + pb, err := git_model.GetFirstMatchProtectedBranchRule(ctx, ctx.Repo.Repository.ID, issue.PullRequest.BaseBranch) + if err != nil { + ctx.ServerError("LoadProtectedBranch", err) + return + } + enableStatusCheck := pb != nil && pb.EnableStatusCheck + ctx.Data["EnableStatusCheck"] = enableStatusCheck + if !enableStatusCheck { + return + } + var missingRequiredChecks []string + for _, requiredContext := range pb.StatusCheckContexts { + contextFound := false + matchesRequiredContext := createRequiredContextMatcher(requiredContext) + for _, presentStatus := range commitStatuses { + if matchesRequiredContext(presentStatus.Context) { + contextFound = true + break + } + } + + if !contextFound { + missingRequiredChecks = append(missingRequiredChecks, requiredContext) + } + } + statusCheckData.MissingRequiredChecks = missingRequiredChecks + + statusCheckData.IsContextRequired = func(context string) bool { + for _, c := range pb.StatusCheckContexts { + if c == context { + return true + } + if gp, err := glob.Compile(c); err != nil { + // All newly created status_check_contexts are checked to ensure they are valid glob expressions before being stored in the database. + // But some old status_check_context created before glob was introduced may be invalid glob expressions. + // So log the error here for debugging. + log.Error("compile glob %q: %v", c, err) + } else if gp.Match(context) { + return true + } + } + return false + } + statusCheckData.RequiredChecksState = pull_service.MergeRequiredContextsCommitStatus(commitStatuses, pb.StatusCheckContexts) +} + +// prepareViewMergedPullInfo show meta information for a merged pull request view page +func prepareViewMergedPullInfo(ctx *context.Context, issue *issues_model.Issue) { + ctx.Data["HasMerged"] = true + baseCommit := GetMergedBaseCommitID(ctx, issue) + preparePullViewFillInfo(ctx, issue, git.RefName(baseCommit)) } type pullCommitStatusCheckData struct { @@ -344,259 +473,59 @@ func (d *pullCommitStatusCheckData) CommitStatusCheckPrompt(locale translation.L return locale.TrString("repo.pulls.status_checking") } -func getViewPullHeadBranchInfo(ctx *context.Context, pull *issues_model.PullRequest, baseGitRepo *git.Repository) (headCommitID string, headCommitExists bool, err error) { - if pull.HeadRepo == nil { - return "", false, nil - } - headGitRepo, closer, err := gitrepo.RepositoryFromContextOrOpen(ctx, pull.HeadRepo) - if err != nil { - return "", false, util.Iif(errors.Is(err, util.ErrNotExist), nil, err) - } - defer closer.Close() - - if pull.Flow == issues_model.PullRequestFlowGithub { - headCommitExists, _ = git_model.IsBranchExist(ctx, pull.HeadRepo.ID, pull.HeadBranch) - } else { - headCommitExists = gitrepo.IsReferenceExist(ctx, pull.BaseRepo, pull.GetGitHeadRefName()) - } - - if headCommitExists { - if pull.Flow != issues_model.PullRequestFlowGithub { - headCommitID, err = baseGitRepo.GetRefCommitID(pull.GetGitHeadRefName()) - } else { - headCommitID, err = headGitRepo.GetBranchCommitID(pull.HeadBranch) +func getViewPullHeadBranchCommitID(ctx *context.Context, pull *issues_model.PullRequest) (string, error) { + switch pull.Flow { + case issues_model.PullRequestFlowGithub: + if pull.HeadRepo == nil { + return "", util.ErrNotExist } + headGitRepo, err := gitrepo.RepositoryFromRequestContextOrOpen(ctx, pull.HeadRepo) if err != nil { - return "", false, util.Iif(errors.Is(err, util.ErrNotExist), nil, err) + return "", err } + return headGitRepo.GetRefCommitID(git.RefNameFromBranch(pull.HeadBranch).String()) + case issues_model.PullRequestFlowAGit: + baseGitRepo, err := gitrepo.RepositoryFromRequestContextOrOpen(ctx, pull.BaseRepo) + if err != nil { + return "", err + } + return baseGitRepo.GetRefCommitID(pull.GetGitHeadRefName()) } - return headCommitID, headCommitExists, nil + setting.PanicInDevOrTesting("invalid pull request flow type: %v", pull.Flow) + return "", util.ErrNotExist } -// prepareViewPullInfo show meta information for a pull request preview page -func prepareViewPullInfo(ctx *context.Context, issue *issues_model.Issue) *git_service.CompareInfo { - ctx.Data["PullRequestWorkInProgressPrefixes"] = setting.Repository.PullRequest.WorkInProgressPrefixes - - repo := ctx.Repo.Repository +func prepareViewOpenPullInfo(ctx *context.Context, issue *issues_model.Issue) { + prInfo := ctx.Data["PullRequestViewInfo"].(*PullRequestViewInfo) pull := issue.PullRequest - - if err := pull.LoadHeadRepo(ctx); err != nil { - ctx.ServerError("LoadHeadRepo", err) - return nil - } - - if err := pull.LoadBaseRepo(ctx); err != nil { - ctx.ServerError("LoadBaseRepo", err) - return nil - } - - setMergeTarget(ctx, pull) - - pb, err := git_model.GetFirstMatchProtectedBranchRule(ctx, repo.ID, pull.BaseBranch) - if err != nil { - ctx.ServerError("LoadProtectedBranch", err) - return nil - } - ctx.Data["EnableStatusCheck"] = pb != nil && pb.EnableStatusCheck - - var baseGitRepo *git.Repository - if pull.BaseRepoID == ctx.Repo.Repository.ID && ctx.Repo.GitRepo != nil { - baseGitRepo = ctx.Repo.GitRepo - } else { - baseGitRepo, err := gitrepo.OpenRepository(ctx, pull.BaseRepo) - if err != nil { - ctx.ServerError("OpenRepository", err) - return nil - } - defer baseGitRepo.Close() - } - - statusCheckData := &pullCommitStatusCheckData{} - if exist, _ := git_model.IsBranchExist(ctx, pull.BaseRepo.ID, pull.BaseBranch); !exist { + // if base branch doesn't exist, prepare from the merge base ctx.Data["BaseBranchNotExist"] = true - ctx.Data["IsPullRequestBroken"] = true - ctx.Data["BaseTarget"] = pull.BaseBranch - ctx.Data["HeadTarget"] = pull.HeadBranch - - sha, err := baseGitRepo.GetRefCommitID(pull.GetGitHeadRefName()) - if err != nil { - ctx.ServerError(fmt.Sprintf("GetRefCommitID(%s)", pull.GetGitHeadRefName()), err) - return nil - } - commitStatuses, err := git_model.GetLatestCommitStatus(ctx, repo.ID, sha, db.ListOptionsAll) - if err != nil { - ctx.ServerError("GetLatestCommitStatus", err) - return nil - } - if !ctx.Repo.CanRead(unit.TypeActions) { - git_model.CommitStatusesHideActionsURL(ctx, commitStatuses) - } - - statusCheckData.LatestCommitStatus = git_model.CalcCommitStatus(commitStatuses) - if len(commitStatuses) > 0 { - ctx.Data["LatestCommitStatuses"] = commitStatuses - ctx.Data["LatestCommitStatus"] = statusCheckData.LatestCommitStatus - } - - compareInfo, err := git_service.GetCompareInfo(ctx, pull.BaseRepo, pull.BaseRepo, baseGitRepo, - git.RefName(pull.MergeBase), git.RefName(pull.GetGitHeadRefName()), false, false) - if err != nil { - if gitcmd.IsStdErrorNotValidObjectName(err) { - ctx.Data["IsPullRequestBroken"] = true - ctx.Data["BaseTarget"] = pull.BaseBranch - ctx.Data["NumCommits"] = 0 - ctx.Data["NumFiles"] = 0 - return nil - } - - ctx.ServerError("GetCompareInfo", err) - return nil - } - - ctx.Data["NumCommits"] = len(compareInfo.Commits) - ctx.Data["NumFiles"] = compareInfo.NumFiles - return compareInfo + preparePullViewFillInfo(ctx, issue, git.RefName(pull.MergeBase)) + return } - headBranchSha, headBranchExist, err := getViewPullHeadBranchInfo(ctx, pull, baseGitRepo) - if err != nil { - ctx.ServerError("getViewPullHeadBranchInfo", err) - return nil + preparePullViewFillInfo(ctx, issue, git.RefNameFromBranch(pull.BaseBranch)) + if ctx.Written() { + return } - if headBranchExist { + if !prInfo.IsPullRequestBroken { var err error ctx.Data["UpdateAllowed"], ctx.Data["UpdateByRebaseAllowed"], err = pull_service.IsUserAllowedToUpdate(ctx, pull, ctx.Doer) if err != nil { ctx.ServerError("IsUserAllowedToUpdate", err) - return nil + return } ctx.Data["GetCommitMessages"] = pull_service.GetSquashMergeCommitMessages(ctx, pull) } else { ctx.Data["GetCommitMessages"] = "" } - sha, err := baseGitRepo.GetRefCommitID(pull.GetGitHeadRefName()) - if err != nil { - if git.IsErrNotExist(err) { - ctx.Data["IsPullRequestBroken"] = true - if pull.IsSameRepo() { - ctx.Data["HeadTarget"] = pull.HeadBranch - } else if pull.HeadRepo == nil { - ctx.Data["HeadTarget"] = ctx.Locale.Tr("repo.pull.deleted_branch", pull.HeadBranch) - } else { - ctx.Data["HeadTarget"] = pull.HeadRepo.OwnerName + ":" + pull.HeadBranch - } - ctx.Data["BaseTarget"] = pull.BaseBranch - ctx.Data["NumCommits"] = 0 - ctx.Data["NumFiles"] = 0 - return nil - } - ctx.ServerError(fmt.Sprintf("GetRefCommitID(%s)", pull.GetGitHeadRefName()), err) - return nil - } + ctx.Data["HeadBranchCommitID"] = prInfo.HeadBranchCommitID + ctx.Data["PullHeadCommitID"] = prInfo.CompareInfo.HeadCommitID - ctx.Data["StatusCheckData"] = statusCheckData - statusCheckData.ApproveLink = fmt.Sprintf("%s/actions/approve-all-checks?commit_id=%s", repo.Link(), sha) - - commitStatuses, err := git_model.GetLatestCommitStatus(ctx, repo.ID, sha, db.ListOptionsAll) - if err != nil { - ctx.ServerError("GetLatestCommitStatus", err) - return nil - } - if !ctx.Repo.CanRead(unit.TypeActions) { - git_model.CommitStatusesHideActionsURL(ctx, commitStatuses) - } - - runs, err := actions_service.GetRunsFromCommitStatuses(ctx, commitStatuses) - if err != nil { - ctx.ServerError("GetRunsFromCommitStatuses", err) - return nil - } - for _, run := range runs { - if run.NeedApproval { - statusCheckData.RequireApprovalRunCount++ - } - } - if statusCheckData.RequireApprovalRunCount > 0 { - statusCheckData.CanApprove = ctx.Repo.CanWrite(unit.TypeActions) - } - - statusCheckData.LatestCommitStatus = git_model.CalcCommitStatus(commitStatuses) - if len(commitStatuses) > 0 { - ctx.Data["LatestCommitStatuses"] = commitStatuses - ctx.Data["LatestCommitStatus"] = statusCheckData.LatestCommitStatus - } - - if pb != nil && pb.EnableStatusCheck { - var missingRequiredChecks []string - for _, requiredContext := range pb.StatusCheckContexts { - contextFound := false - matchesRequiredContext := createRequiredContextMatcher(requiredContext) - for _, presentStatus := range commitStatuses { - if matchesRequiredContext(presentStatus.Context) { - contextFound = true - break - } - } - - if !contextFound { - missingRequiredChecks = append(missingRequiredChecks, requiredContext) - } - } - statusCheckData.MissingRequiredChecks = missingRequiredChecks - - statusCheckData.IsContextRequired = func(context string) bool { - for _, c := range pb.StatusCheckContexts { - if c == context { - return true - } - if gp, err := glob.Compile(c); err != nil { - // All newly created status_check_contexts are checked to ensure they are valid glob expressions before being stored in the database. - // But some old status_check_context created before glob was introduced may be invalid glob expressions. - // So log the error here for debugging. - log.Error("compile glob %q: %v", c, err) - } else if gp.Match(context) { - return true - } - } - return false - } - statusCheckData.RequiredChecksState = pull_service.MergeRequiredContextsCommitStatus(commitStatuses, pb.StatusCheckContexts) - } - - ctx.Data["HeadBranchMovedOn"] = headBranchSha != sha - ctx.Data["HeadBranchCommitID"] = headBranchSha - ctx.Data["PullHeadCommitID"] = sha - - if pull.HeadRepo == nil || !headBranchExist || (!pull.Issue.IsClosed && (headBranchSha != sha)) { - ctx.Data["IsPullRequestBroken"] = true - if pull.IsSameRepo() { - ctx.Data["HeadTarget"] = pull.HeadBranch - } else if pull.HeadRepo == nil { - ctx.Data["HeadTarget"] = ctx.Locale.Tr("repo.pull.deleted_branch", pull.HeadBranch) - } else { - ctx.Data["HeadTarget"] = pull.HeadRepo.OwnerName + ":" + pull.HeadBranch - } - } - - compareInfo, err := git_service.GetCompareInfo(ctx, pull.BaseRepo, pull.BaseRepo, baseGitRepo, - git.RefNameFromBranch(pull.BaseBranch), git.RefName(pull.GetGitHeadRefName()), false, false) - if err != nil { - if gitcmd.IsStdErrorNotValidObjectName(err) { - ctx.Data["IsPullRequestBroken"] = true - ctx.Data["BaseTarget"] = pull.BaseBranch - ctx.Data["NumCommits"] = 0 - ctx.Data["NumFiles"] = 0 - return nil - } - - ctx.ServerError("GetCompareInfo", err) - return nil - } - - if compareInfo.HeadCommitID == compareInfo.MergeBase { + if prInfo.CompareInfo.HeadCommitID == prInfo.CompareInfo.MergeBase { ctx.Data["IsNothingToCompare"] = true } @@ -609,10 +538,6 @@ func prepareViewPullInfo(ctx *context.Context, issue *issues_model.Issue) *git_s ctx.Data["IsPullFilesConflicted"] = true ctx.Data["ConflictedFiles"] = pull.ConflictedFiles } - - ctx.Data["NumCommits"] = len(compareInfo.Commits) - ctx.Data["NumFiles"] = compareInfo.NumFiles - return compareInfo } func createRequiredContextMatcher(requiredContext string) func(string) bool { @@ -672,10 +597,12 @@ func ViewPullCommits(ctx *context.Context) { return } - prInfo := preparePullViewPullInfo(ctx, issue) + prViewInfo := preparePullViewPullInfo(ctx, issue) if ctx.Written() { return - } else if prInfo == nil { + } + prCompareInfo := &prViewInfo.CompareInfo + if prCompareInfo.HeadCommitID == "" { ctx.NotFound(nil) return } @@ -683,7 +610,7 @@ func ViewPullCommits(ctx *context.Context) { ctx.Data["Username"] = ctx.Repo.Owner.Name ctx.Data["Reponame"] = ctx.Repo.Repository.Name - commits, err := processGitCommits(ctx, prInfo.Commits) + commits, err := processGitCommits(ctx, prCompareInfo.Commits) if err != nil { ctx.ServerError("processGitCommits", err) return @@ -725,46 +652,44 @@ func viewPullFiles(ctx *context.Context, beforeCommitID, afterCommitID string) { gitRepo := ctx.Repo.GitRepo - prInfo := preparePullViewPullInfo(ctx, issue) + prViewInfo := preparePullViewPullInfo(ctx, issue) if ctx.Written() { return - } else if prInfo == nil { + } + prCompareInfo := &prViewInfo.CompareInfo + if prCompareInfo.HeadCommitID == "" { ctx.NotFound(nil) return } - headCommitID, err := gitRepo.GetRefCommitID(pull.GetGitHeadRefName()) - if err != nil { - ctx.ServerError("GetRefCommitID", err) - return - } - + headCommitID := prCompareInfo.HeadCommitID isSingleCommit := beforeCommitID == "" && afterCommitID != "" ctx.Data["IsShowingOnlySingleCommit"] = isSingleCommit - isShowAllCommits := (beforeCommitID == "" || beforeCommitID == prInfo.MergeBase) && (afterCommitID == "" || afterCommitID == headCommitID) + isShowAllCommits := (beforeCommitID == "" || beforeCommitID == prCompareInfo.MergeBase) && (afterCommitID == "" || afterCommitID == headCommitID) ctx.Data["IsShowingAllCommits"] = isShowAllCommits if afterCommitID == "" || afterCommitID == headCommitID { afterCommitID = headCommitID } - afterCommit := indexCommit(prInfo.Commits, afterCommitID) + afterCommit := indexCommit(prCompareInfo.Commits, afterCommitID) if afterCommit == nil { ctx.HTTPError(http.StatusBadRequest, "after commit not found in PR commits") return } var beforeCommit *git.Commit + var err error if !isSingleCommit { - if beforeCommitID == "" || beforeCommitID == prInfo.MergeBase { - beforeCommitID = prInfo.MergeBase - // mergebase commit is not in the list of the pull request commits + if beforeCommitID == "" || beforeCommitID == prCompareInfo.MergeBase { + beforeCommitID = prCompareInfo.MergeBase + // merge base commit is not in the list of the pull request commits beforeCommit, err = gitRepo.GetCommit(beforeCommitID) if err != nil { ctx.ServerError("GetCommit", err) return } } else { - beforeCommit = indexCommit(prInfo.Commits, beforeCommitID) + beforeCommit = indexCommit(prCompareInfo.Commits, beforeCommitID) if beforeCommit == nil { ctx.HTTPError(http.StatusBadRequest, "before commit not found in PR commits") return @@ -781,7 +706,7 @@ func viewPullFiles(ctx *context.Context, beforeCommitID, afterCommitID string) { ctx.Data["Username"] = ctx.Repo.Owner.Name ctx.Data["Reponame"] = ctx.Repo.Repository.Name - ctx.Data["MergeBase"] = prInfo.MergeBase + ctx.Data["MergeBase"] = prCompareInfo.MergeBase ctx.Data["AfterCommitID"] = afterCommitID ctx.Data["BeforeCommitID"] = beforeCommitID @@ -1100,7 +1025,7 @@ func MergePullRequest(ctx *context.Context) { } // start with merging by checking - if err := pull_service.CheckPullMergeable(ctx, ctx.Doer, &ctx.Repo.Permission, pr, mergeCheckType, form.ForceMerge); err != nil { + if err := pull_service.CheckPullMergeable(ctx, ctx.Doer, &ctx.Repo.Permission, pr, mergeCheckType, repo_model.MergeStyle(form.Do), form.ForceMerge); err != nil { switch { case errors.Is(err, pull_service.ErrIsClosed): if issue.IsPull { @@ -1120,6 +1045,8 @@ func MergePullRequest(ctx *context.Context) { ctx.JSONError(ctx.Tr("repo.pulls.no_merge_not_ready")) case asymkey_service.IsErrWontSign(err): ctx.JSONError(err.Error()) // has no translation ... + case errors.Is(err, pull_service.ErrHeadCommitsNotAllVerified): + ctx.JSONError(ctx.Tr("repo.pulls.require_signed_head_commits_unverified")) case errors.Is(err, pull_service.ErrDependenciesLeft): ctx.JSONError(ctx.Tr("repo.issues.dependency.pr_close_blocked")) default: diff --git a/routers/web/web.go b/routers/web/web.go index e0ff54fcff..15f8d4886b 100644 --- a/routers/web/web.go +++ b/routers/web/web.go @@ -260,6 +260,7 @@ func Routes() *web.Router { routes.BeforeRouting(chi_middleware.GetHead) routes.Head("/", misc.DummyOK) // for health check - doesn't need to be passed through gzip handler + routes.Methods("GET, HEAD", "/assets/site-manifest.json", misc.SiteManifest) routes.Methods("GET, HEAD, OPTIONS", "/assets/*", routing.MarkLogLevelTrace, public.AssetsCors(), public.FileHandlerFunc()) routes.Methods("GET, HEAD", "/avatars/*", avatarStorageHandler(setting.Avatar.Storage, "avatars", storage.Avatars)) routes.Methods("GET, HEAD", "/repo-avatars/*", avatarStorageHandler(setting.RepoAvatar.Storage, "repo-avatars", storage.RepoAvatars)) @@ -1539,6 +1540,11 @@ func registerWebRoutes(m *web.Router, webAuth *AuthMiddleware) { m.Combo(""). Get(actions.View). Post(web.Bind(actions.ViewRequest{}), actions.ViewPost) + m.Group("/attempts/{attempt}", func() { + m.Combo(""). + Get(actions.View). + Post(web.Bind(actions.ViewRequest{}), actions.ViewPost) + }) m.Group("/jobs/{job}", func() { m.Combo(""). Get(actions.View). @@ -1754,8 +1760,10 @@ func registerWebRoutes(m *web.Router, webAuth *AuthMiddleware) { m.Any("/mail-preview/*", devtest.MailPreviewRender) m.Any("/{sub}", devtest.TmplCommon) m.Get("/repo-action-view/runs/{run}", devtest.MockActionsView) + m.Get("/repo-action-view/runs/{run}/attempts/{attempt}", devtest.MockActionsView) m.Get("/repo-action-view/runs/{run}/jobs/{job}", devtest.MockActionsView) m.Post("/repo-action-view/runs/{run}", web.Bind(actions.ViewRequest{}), devtest.MockActionsRunsJobs) + m.Post("/repo-action-view/runs/{run}/attempts/{attempt}", web.Bind(actions.ViewRequest{}), devtest.MockActionsRunsJobs) m.Post("/repo-action-view/runs/{run}/jobs/{job}", web.Bind(actions.ViewRequest{}), devtest.MockActionsRunsJobs) }) } diff --git a/services/actions/approve.go b/services/actions/approve.go new file mode 100644 index 0000000000..552b055b70 --- /dev/null +++ b/services/actions/approve.go @@ -0,0 +1,69 @@ +// Copyright 2026 The Gitea Authors. All rights reserved. +// SPDX-License-Identifier: MIT + +package actions + +import ( + "context" + + actions_model "code.gitea.io/gitea/models/actions" + "code.gitea.io/gitea/models/db" + repo_model "code.gitea.io/gitea/models/repo" + user_model "code.gitea.io/gitea/models/user" +) + +func ApproveRuns(ctx context.Context, repo *repo_model.Repository, doer *user_model.User, runIDs []int64) error { + updatedJobs := make([]*actions_model.ActionRunJob, 0) + cancelledConcurrencyJobs := make([]*actions_model.ActionRunJob, 0) + + err := db.WithTx(ctx, func(ctx context.Context) (err error) { + for _, runID := range runIDs { + run, err := actions_model.GetRunByRepoAndID(ctx, repo.ID, runID) + if err != nil { + return err + } + run.NeedApproval = false + run.ApprovedBy = doer.ID + if err := actions_model.UpdateRun(ctx, run, "need_approval", "approved_by"); err != nil { + return err + } + jobs, err := actions_model.GetLatestAttemptJobsByRepoAndRunID(ctx, repo.ID, run.ID) + if err != nil { + return err + } + for _, job := range jobs { + // Skip jobs with `needs`: they stay blocked until their dependencies finish, + // at which point job_emitter will evaluate and start them. + if len(job.Needs) > 0 { + continue + } + var jobsToCancel []*actions_model.ActionRunJob + job.Status, jobsToCancel, err = PrepareToStartJobWithConcurrency(ctx, job) + if err != nil { + return err + } + cancelledConcurrencyJobs = append(cancelledConcurrencyJobs, jobsToCancel...) + if job.Status == actions_model.StatusWaiting { + n, err := actions_model.UpdateRunJob(ctx, job, nil, "status") + if err != nil { + return err + } + if n > 0 { + updatedJobs = append(updatedJobs, job) + } + } + } + } + return nil + }) + if err != nil { + return err + } + + NotifyWorkflowJobsAndRunsStatusUpdate(ctx, updatedJobs) + NotifyWorkflowJobsAndRunsStatusUpdate(ctx, cancelledConcurrencyJobs) + + EmitJobsIfReadyByJobs(cancelledConcurrencyJobs) + + return nil +} diff --git a/services/actions/cleanup.go b/services/actions/cleanup.go index d0cc63e538..f223c98125 100644 --- a/services/actions/cleanup.go +++ b/services/actions/cleanup.go @@ -179,7 +179,7 @@ func DeleteRun(ctx context.Context, run *actions_model.ActionRun) error { repoID := run.RepoID - jobs, err := actions_model.GetRunJobsByRunID(ctx, run.ID) + jobs, err := actions_model.GetAllRunJobsByRepoAndRunID(ctx, run.RepoID, run.ID) if err != nil { return err } @@ -207,6 +207,10 @@ func DeleteRun(ctx context.Context, run *actions_model.ActionRun) error { RepoID: repoID, ID: run.ID, }) + recordsToDelete = append(recordsToDelete, &actions_model.ActionRunAttempt{ + RepoID: repoID, + RunID: run.ID, + }) recordsToDelete = append(recordsToDelete, &actions_model.ActionRunJob{ RepoID: repoID, RunID: run.ID, diff --git a/services/actions/clear_tasks.go b/services/actions/clear_tasks.go index c71f63e7d1..940f1d8454 100644 --- a/services/actions/clear_tasks.go +++ b/services/actions/clear_tasks.go @@ -17,7 +17,6 @@ import ( "code.gitea.io/gitea/modules/timeutil" "code.gitea.io/gitea/modules/util" webhook_module "code.gitea.io/gitea/modules/webhook" - notify_service "code.gitea.io/gitea/services/notify" ) // StopZombieTasks stops the task which have running status, but haven't been updated for a long time @@ -36,39 +35,16 @@ func StopEndlessTasks(ctx context.Context) error { }) } -func notifyWorkflowJobStatusUpdate(ctx context.Context, jobs []*actions_model.ActionRunJob) { - if len(jobs) == 0 { - return - } - // The input jobs may belong to different runs, so track each affected run. - runs := make(map[int64]*actions_model.ActionRun, len(jobs)) - for _, job := range jobs { - if err := job.LoadAttributes(ctx); err != nil { - log.Error("Failed to load job attributes: %v", err) - continue - } - CreateCommitStatusForRunJobs(ctx, job.Run, job) - notify_service.WorkflowJobStatusUpdate(ctx, job.Run.Repo, job.Run.TriggerUser, job, nil) - if _, ok := runs[job.RunID]; !ok { - runs[job.RunID] = job.Run - } - } - - for _, run := range runs { - notify_service.WorkflowRunStatusUpdate(ctx, run.Repo, run.TriggerUser, run) - } -} - func CancelPreviousJobs(ctx context.Context, repoID int64, ref, workflowID string, event webhook_module.HookEventType) error { jobs, err := actions_model.CancelPreviousJobs(ctx, repoID, ref, workflowID, event) - notifyWorkflowJobStatusUpdate(ctx, jobs) + NotifyWorkflowJobsAndRunsStatusUpdate(ctx, jobs) EmitJobsIfReadyByJobs(jobs) return err } func CleanRepoScheduleTasks(ctx context.Context, repo *repo_model.Repository) error { jobs, err := actions_model.CleanRepoScheduleTasks(ctx, repo) - notifyWorkflowJobStatusUpdate(ctx, jobs) + NotifyWorkflowJobsAndRunsStatusUpdate(ctx, jobs) EmitJobsIfReadyByJobs(jobs) return err } @@ -83,61 +59,59 @@ func shouldBlockJobByConcurrency(ctx context.Context, job *actions_model.ActionR return false, nil } - runs, jobs, err := actions_model.GetConcurrentRunsAndJobs(ctx, job.RepoID, job.ConcurrencyGroup, []actions_model.Status{actions_model.StatusRunning}) + attempts, jobs, err := actions_model.GetConcurrentRunAttemptsAndJobs(ctx, job.RepoID, job.ConcurrencyGroup, []actions_model.Status{actions_model.StatusRunning}) if err != nil { - return false, fmt.Errorf("GetConcurrentRunsAndJobs: %w", err) + return false, fmt.Errorf("GetConcurrentRunAttemptsAndJobs: %w", err) } - return len(runs) > 0 || len(jobs) > 0, nil + return len(attempts) > 0 || len(jobs) > 0, nil } // PrepareToStartJobWithConcurrency prepares a job to start by its evaluated concurrency group and cancelling previous jobs if necessary. -// It returns the new status of the job (either StatusBlocked or StatusWaiting) and any error encountered during the process. -func PrepareToStartJobWithConcurrency(ctx context.Context, job *actions_model.ActionRunJob) (actions_model.Status, error) { +// It returns the new status of the job (either StatusBlocked or StatusWaiting), any cancelled jobs, and any error encountered during the process. +func PrepareToStartJobWithConcurrency(ctx context.Context, job *actions_model.ActionRunJob) (actions_model.Status, []*actions_model.ActionRunJob, error) { shouldBlock, err := shouldBlockJobByConcurrency(ctx, job) if err != nil { - return actions_model.StatusBlocked, err + return actions_model.StatusBlocked, nil, err } // even if the current job is blocked, we still need to cancel previous "waiting/blocked" jobs in the same concurrency group jobs, err := actions_model.CancelPreviousJobsByJobConcurrency(ctx, job) if err != nil { - return actions_model.StatusBlocked, fmt.Errorf("CancelPreviousJobsByJobConcurrency: %w", err) + return actions_model.StatusBlocked, nil, fmt.Errorf("CancelPreviousJobsByJobConcurrency: %w", err) } - notifyWorkflowJobStatusUpdate(ctx, jobs) - return util.Iif(shouldBlock, actions_model.StatusBlocked, actions_model.StatusWaiting), nil + return util.Iif(shouldBlock, actions_model.StatusBlocked, actions_model.StatusWaiting), jobs, nil } -func shouldBlockRunByConcurrency(ctx context.Context, actionRun *actions_model.ActionRun) (bool, error) { - if actionRun.ConcurrencyGroup == "" || actionRun.ConcurrencyCancel { +func shouldBlockRunByConcurrency(ctx context.Context, attempt *actions_model.ActionRunAttempt) (bool, error) { + if attempt.ConcurrencyGroup == "" || attempt.ConcurrencyCancel { return false, nil } - runs, jobs, err := actions_model.GetConcurrentRunsAndJobs(ctx, actionRun.RepoID, actionRun.ConcurrencyGroup, []actions_model.Status{actions_model.StatusRunning}) + attempts, jobs, err := actions_model.GetConcurrentRunAttemptsAndJobs(ctx, attempt.RepoID, attempt.ConcurrencyGroup, []actions_model.Status{actions_model.StatusRunning}) if err != nil { return false, fmt.Errorf("find concurrent runs and jobs: %w", err) } - return len(runs) > 0 || len(jobs) > 0, nil + return len(attempts) > 0 || len(jobs) > 0, nil } -// PrepareToStartRunWithConcurrency prepares a run to start by its evaluated concurrency group and cancelling previous jobs if necessary. -// It returns the new status of the run (either StatusBlocked or StatusWaiting) and any error encountered during the process. -func PrepareToStartRunWithConcurrency(ctx context.Context, run *actions_model.ActionRun) (actions_model.Status, error) { - shouldBlock, err := shouldBlockRunByConcurrency(ctx, run) +// PrepareToStartRunWithConcurrency prepares a run attempt to start by its evaluated concurrency group and cancelling previous jobs if necessary. +// It returns the new status of the run attempt (either StatusBlocked or StatusWaiting), any cancelled jobs, and any error encountered during the process. +func PrepareToStartRunWithConcurrency(ctx context.Context, attempt *actions_model.ActionRunAttempt) (actions_model.Status, []*actions_model.ActionRunJob, error) { + shouldBlock, err := shouldBlockRunByConcurrency(ctx, attempt) if err != nil { - return actions_model.StatusBlocked, err + return actions_model.StatusBlocked, nil, err } // even if the current run is blocked, we still need to cancel previous "waiting/blocked" jobs in the same concurrency group - jobs, err := actions_model.CancelPreviousJobsByRunConcurrency(ctx, run) + jobs, err := actions_model.CancelPreviousJobsByRunConcurrency(ctx, attempt) if err != nil { - return actions_model.StatusBlocked, fmt.Errorf("CancelPreviousJobsByRunConcurrency: %w", err) + return actions_model.StatusBlocked, nil, fmt.Errorf("CancelPreviousJobsByRunConcurrency: %w", err) } - notifyWorkflowJobStatusUpdate(ctx, jobs) - return util.Iif(shouldBlock, actions_model.StatusBlocked, actions_model.StatusWaiting), nil + return util.Iif(shouldBlock, actions_model.StatusBlocked, actions_model.StatusWaiting), jobs, nil } func stopTasks(ctx context.Context, opts actions_model.FindTaskOptions) error { @@ -175,7 +149,7 @@ func stopTasks(ctx context.Context, opts actions_model.FindTaskOptions) error { remove() } - notifyWorkflowJobStatusUpdate(ctx, jobs) + NotifyWorkflowJobsAndRunsStatusUpdate(ctx, jobs) EmitJobsIfReadyByJobs(jobs) return nil @@ -194,8 +168,6 @@ func CancelAbandonedJobs(ctx context.Context) error { now := timeutil.TimeStampNow() - // Collect one job per run to send workflow run status update - updatedRuns := map[int64]*actions_model.ActionRunJob{} updatedJobs := []*actions_model.ActionRunJob{} for _, job := range jobs { @@ -211,9 +183,6 @@ func CancelAbandonedJobs(ctx context.Context) error { return err } updated = n > 0 - if updated && job.Run.Status.IsDone() { - updatedRuns[job.RunID] = job - } return nil }); err != nil { log.Warn("cancel abandoned job %v: %v", job.ID, err) @@ -222,16 +191,13 @@ func CancelAbandonedJobs(ctx context.Context) error { if job.Run == nil || job.Run.Repo == nil { continue // error occurs during loading attributes, the following code that depends on "Run.Repo" will fail, so ignore and skip } - CreateCommitStatusForRunJobs(ctx, job.Run, job) if updated { + CreateCommitStatusForRunJobs(ctx, job.Run, job) updatedJobs = append(updatedJobs, job) - notify_service.WorkflowJobStatusUpdate(ctx, job.Run.Repo, job.Run.TriggerUser, job, nil) } } - for _, job := range updatedRuns { - notify_service.WorkflowRunStatusUpdate(ctx, job.Run.Repo, job.Run.TriggerUser, job.Run) - } + NotifyWorkflowJobsAndRunsStatusUpdate(ctx, updatedJobs) EmitJobsIfReadyByJobs(updatedJobs) return nil diff --git a/services/actions/concurrency.go b/services/actions/concurrency.go index 878e5c483b..e1ec549930 100644 --- a/services/actions/concurrency.go +++ b/services/actions/concurrency.go @@ -17,15 +17,15 @@ import ( ) // EvaluateRunConcurrencyFillModel evaluates the expressions in a run-level (workflow) concurrency, -// and fills the run's model fields with `concurrency.group` and `concurrency.cancel-in-progress`. +// and fills the run attempt model with the evaluated `concurrency.group` and `concurrency.cancel-in-progress` values. // Workflow-level concurrency doesn't depend on the job outputs, so it can always be evaluated if there is no syntax error. // See https://docs.github.com/en/actions/reference/workflows-and-actions/workflow-syntax#concurrency -func EvaluateRunConcurrencyFillModel(ctx context.Context, run *actions_model.ActionRun, wfRawConcurrency *act_model.RawConcurrency, vars map[string]string, inputs map[string]any) error { +func EvaluateRunConcurrencyFillModel(ctx context.Context, run *actions_model.ActionRun, attempt *actions_model.ActionRunAttempt, wfRawConcurrency *act_model.RawConcurrency, vars map[string]string, inputs map[string]any) error { if err := run.LoadAttributes(ctx); err != nil { return fmt.Errorf("run LoadAttributes: %w", err) } - actionsRunCtx := GenerateGiteaContext(run, nil) + actionsRunCtx := GenerateGiteaContext(ctx, run, attempt, nil) jobResults := map[string]*jobparser.JobResult{"": {}} if inputs == nil { var err error @@ -35,12 +35,8 @@ func EvaluateRunConcurrencyFillModel(ctx context.Context, run *actions_model.Act } } - rawConcurrency, err := yaml.Marshal(wfRawConcurrency) - if err != nil { - return fmt.Errorf("marshal raw concurrency: %w", err) - } - run.RawConcurrency = string(rawConcurrency) - run.ConcurrencyGroup, run.ConcurrencyCancel, err = jobparser.EvaluateConcurrency(wfRawConcurrency, "", nil, actionsRunCtx, jobResults, vars, inputs) + var err error + attempt.ConcurrencyGroup, attempt.ConcurrencyCancel, err = jobparser.EvaluateConcurrency(wfRawConcurrency, "", nil, actionsRunCtx, jobResults, vars, inputs) if err != nil { return fmt.Errorf("evaluate concurrency: %w", err) } @@ -71,7 +67,7 @@ func findJobNeedsAndFillJobResults(ctx context.Context, job *actions_model.Actio // Job-level concurrency may depend on other job's outputs (via `needs`): `concurrency.group: my-group-${{ needs.job1.outputs.out1 }}` // If the needed jobs haven't been executed yet, this evaluation will also fail. // See https://docs.github.com/en/actions/reference/workflows-and-actions/workflow-syntax#jobsjob_idconcurrency -func EvaluateJobConcurrencyFillModel(ctx context.Context, run *actions_model.ActionRun, actionRunJob *actions_model.ActionRunJob, vars map[string]string, inputs map[string]any) error { +func EvaluateJobConcurrencyFillModel(ctx context.Context, run *actions_model.ActionRun, attempt *actions_model.ActionRunAttempt, actionRunJob *actions_model.ActionRunJob, vars map[string]string, inputs map[string]any) error { if err := actionRunJob.LoadAttributes(ctx); err != nil { return fmt.Errorf("job LoadAttributes: %w", err) } @@ -81,7 +77,7 @@ func EvaluateJobConcurrencyFillModel(ctx context.Context, run *actions_model.Act return fmt.Errorf("unmarshal raw concurrency: %w", err) } - actionsJobCtx := GenerateGiteaContext(run, actionRunJob) + actionsJobCtx := GenerateGiteaContext(ctx, run, attempt, actionRunJob) jobResults, err := findJobNeedsAndFillJobResults(ctx, actionRunJob) if err != nil { diff --git a/services/actions/context.go b/services/actions/context.go index 69d5937623..9250c40983 100644 --- a/services/actions/context.go +++ b/services/actions/context.go @@ -14,6 +14,7 @@ import ( "code.gitea.io/gitea/modules/container" "code.gitea.io/gitea/modules/git" "code.gitea.io/gitea/modules/json" + "code.gitea.io/gitea/modules/optional" "code.gitea.io/gitea/modules/setting" "code.gitea.io/gitea/modules/util" @@ -22,9 +23,14 @@ import ( type GiteaContext map[string]any -// GenerateGiteaContext generate the gitea context without token and gitea_runtime_token -// job can be nil when generating a context for parsing workflow-level expressions -func GenerateGiteaContext(run *actions_model.ActionRun, job *actions_model.ActionRunJob) GiteaContext { +// GenerateGiteaContext generate the gitea context without token and gitea_runtime_token. +// attempt and job can be nil when generating a context for parsing workflow-level expressions. +// +// The run_attempt value is resolved with the following precedence: +// 1. attempt.Attempt - the explicit attempt argument, or run.GetLatestAttempt() as a fallback +// 2. job.Attempt - only used when neither an explicit nor latest attempt is available +// 3. "1" - when none of the above apply (first-run parse time, before the first attempt exists) +func GenerateGiteaContext(ctx context.Context, run *actions_model.ActionRun, attempt *actions_model.ActionRunAttempt, job *actions_model.ActionRunJob) GiteaContext { event := map[string]any{} _ = json.Unmarshal([]byte(run.EventPayload), &event) @@ -89,10 +95,28 @@ func GenerateGiteaContext(run *actions_model.ActionRun, job *actions_model.Actio if job != nil { gitContext["job"] = job.JobID - gitContext["run_id"] = strconv.FormatInt(job.RunID, 10) gitContext["run_attempt"] = strconv.FormatInt(job.Attempt, 10) } + if attempt == nil { + if latestAttempt, has, err := run.GetLatestAttempt(ctx); err == nil && has { + attempt = latestAttempt + } + } + + if attempt != nil { + gitContext["run_attempt"] = strconv.FormatInt(attempt.Attempt, 10) + if err := attempt.LoadAttributes(ctx); err == nil { + gitContext["triggering_actor"] = attempt.TriggerUser.Name + } + } + + // Fallback for first-run parse time: no job, no attempt (LatestAttemptID==0). github.run_attempt + // is 1-based per the documented contract, so emit "1" rather than leaving it empty. + if gitContext["run_attempt"] == "" { + gitContext["run_attempt"] = "1" + } + return gitContext } @@ -108,7 +132,13 @@ func FindTaskNeeds(ctx context.Context, job *actions_model.ActionRunJob) (map[st } needs := container.SetOf(job.Needs...) - jobs, err := db.Find[actions_model.ActionRunJob](ctx, actions_model.FindRunJobOptions{RunID: job.RunID}) + // Scope to the same attempt. For legacy jobs RunAttemptID==0, which matches all other legacy jobs in the same run. + findOpts := actions_model.FindRunJobOptions{ + RunID: job.RunID, + RunAttemptID: optional.Some(job.RunAttemptID), + } + + jobs, err := db.Find[actions_model.ActionRunJob](ctx, findOpts) if err != nil { return nil, fmt.Errorf("FindRunJobs: %w", err) } @@ -125,11 +155,12 @@ func FindTaskNeeds(ctx context.Context, job *actions_model.ActionRunJob) (map[st } var jobOutputs map[string]string for _, job := range jobsWithSameID { - if job.TaskID == 0 || !job.Status.IsDone() { - // it shouldn't happen, or the job has been rerun + taskID := job.EffectiveTaskID() + if taskID == 0 || !job.Status.IsDone() { + // it shouldn't happen continue } - got, err := actions_model.FindTaskOutputByTaskID(ctx, job.TaskID) + got, err := actions_model.FindTaskOutputByTaskID(ctx, taskID) if err != nil { return nil, fmt.Errorf("FindTaskOutputByTaskID: %w", err) } diff --git a/services/actions/context_test.go b/services/actions/context_test.go index 4ade67111c..22f9abcce8 100644 --- a/services/actions/context_test.go +++ b/services/actions/context_test.go @@ -26,17 +26,20 @@ func TestEvaluateRunConcurrency_RunIDFallback(t *testing.T) { runA := unittest.AssertExistsAndLoadBean(t, &actions_model.ActionRun{ID: 791}) runB := unittest.AssertExistsAndLoadBean(t, &actions_model.ActionRun{ID: 792}) + attemptA := &actions_model.ActionRunAttempt{RepoID: runA.RepoID, RunID: runA.ID, Attempt: 1} + attemptB := &actions_model.ActionRunAttempt{RepoID: runB.RepoID, RunID: runB.ID, Attempt: 1} + expr := &act_model.RawConcurrency{ Group: "${{ github.workflow }}-${{ github.head_ref || github.run_id }}", CancelInProgress: "true", } - assert.NoError(t, EvaluateRunConcurrencyFillModel(ctx, runA, expr, nil, nil)) - assert.NoError(t, EvaluateRunConcurrencyFillModel(ctx, runB, expr, nil, nil)) + assert.NoError(t, EvaluateRunConcurrencyFillModel(ctx, runA, attemptA, expr, nil, nil)) + assert.NoError(t, EvaluateRunConcurrencyFillModel(ctx, runB, attemptB, expr, nil, nil)) - assert.Contains(t, runA.ConcurrencyGroup, "791") - assert.Contains(t, runB.ConcurrencyGroup, "792") - assert.NotEqual(t, runA.ConcurrencyGroup, runB.ConcurrencyGroup) + assert.Contains(t, attemptA.ConcurrencyGroup, "791") + assert.Contains(t, attemptB.ConcurrencyGroup, "792") + assert.NotEqual(t, attemptA.ConcurrencyGroup, attemptB.ConcurrencyGroup) } func TestPrepareRunAndInsert_ExpressionsSeeRunID(t *testing.T) { @@ -78,7 +81,10 @@ jobs: persisted := unittest.AssertExistsAndLoadBean(t, &actions_model.ActionRun{ID: run.ID}) runIDStr := strconv.FormatInt(run.ID, 10) assert.Equal(t, "Run "+runIDStr, persisted.Title) - assert.Equal(t, "group-"+runIDStr, persisted.ConcurrencyGroup) + // ConcurrencyGroup lives on the latest attempt after migration v331. + require.Positive(t, persisted.LatestAttemptID) + attempt := unittest.AssertExistsAndLoadBean(t, &actions_model.ActionRunAttempt{ID: persisted.LatestAttemptID}) + assert.Equal(t, "group-"+runIDStr, attempt.ConcurrencyGroup) // Rerun reads raw_concurrency from the DB to re-evaluate the group; // see services/actions/rerun.go. Must survive the insert. assert.NotEmpty(t, persisted.RawConcurrency) diff --git a/services/actions/job_emitter.go b/services/actions/job_emitter.go index c7813360ab..489b36a3a7 100644 --- a/services/actions/job_emitter.go +++ b/services/actions/job_emitter.go @@ -16,7 +16,6 @@ import ( "code.gitea.io/gitea/modules/queue" "code.gitea.io/gitea/modules/setting" "code.gitea.io/gitea/modules/util" - notify_service "code.gitea.io/gitea/services/notify" "xorm.io/builder" ) @@ -70,30 +69,33 @@ func checkJobsByRunID(ctx context.Context, runID int64) error { if err != nil { return fmt.Errorf("get action run: %w", err) } - var jobs, updatedJobs []*actions_model.ActionRunJob + var jobs, updatedJobs, cancelledJobs []*actions_model.ActionRunJob if err := db.WithTx(ctx, func(ctx context.Context) error { // check jobs of the current run - if js, ujs, err := checkJobsOfRun(ctx, run); err != nil { + if js, ujs, cjs, err := checkJobsOfCurrentRunAttempt(ctx, run); err != nil { return err } else { jobs = append(jobs, js...) updatedJobs = append(updatedJobs, ujs...) + cancelledJobs = append(cancelledJobs, cjs...) } - if js, ujs, err := checkRunConcurrency(ctx, run); err != nil { + if js, ujs, cjs, err := checkRunConcurrency(ctx, run); err != nil { return err } else { jobs = append(jobs, js...) updatedJobs = append(updatedJobs, ujs...) + cancelledJobs = append(cancelledJobs, cjs...) } return nil }); err != nil { return err } - CreateCommitStatusForRunJobs(ctx, run, jobs...) - for _, job := range updatedJobs { - _ = job.LoadAttributes(ctx) - notify_service.WorkflowJobStatusUpdate(ctx, job.Run.Repo, job.Run.TriggerUser, job, nil) + NotifyWorkflowJobsAndRunsStatusUpdate(ctx, cancelledJobs) + EmitJobsIfReadyByJobs(cancelledJobs) + if err := createCommitStatusesForJobsByRun(ctx, jobs); err != nil { + return err } + NotifyWorkflowJobsStatusUpdate(ctx, updatedJobs...) runJobs := make(map[int64][]*actions_model.ActionRunJob) for _, job := range jobs { runJobs[job.RunID] = append(runJobs[job.RunID], job) @@ -114,71 +116,97 @@ func checkJobsByRunID(ctx context.Context, runID int64) error { } } if runUpdated { - NotifyWorkflowRunStatusUpdateWithReload(ctx, js[0]) + NotifyWorkflowRunStatusUpdateWithReload(ctx, js[0].RepoID, js[0].RunID) } } return nil } -// findBlockedRunByConcurrency finds the blocked concurrent run in a repo and returns `nil, nil` when there is no blocked run. -func findBlockedRunByConcurrency(ctx context.Context, repoID int64, concurrencyGroup string) (*actions_model.ActionRun, error) { - if concurrencyGroup == "" { - return nil, nil //nolint:nilnil // return nil to indicate that no blocked run exists - } - cRuns, cJobs, err := actions_model.GetConcurrentRunsAndJobs(ctx, repoID, concurrencyGroup, []actions_model.Status{actions_model.StatusBlocked}) - if err != nil { - return nil, fmt.Errorf("find concurrent runs and jobs: %w", err) +func createCommitStatusesForJobsByRun(ctx context.Context, jobs []*actions_model.ActionRunJob) error { + runJobs := make(map[int64][]*actions_model.ActionRunJob) + for _, job := range jobs { + runJobs[job.RunID] = append(runJobs[job.RunID], job) } - // There can be at most one blocked run or job - var concurrentRun *actions_model.ActionRun - if len(cRuns) > 0 { - concurrentRun = cRuns[0] - } else if len(cJobs) > 0 { - jobRun, exist, err := db.GetByID[actions_model.ActionRun](ctx, cJobs[0].RunID) - if !exist { - return nil, fmt.Errorf("run %d does not exist", cJobs[0].RunID) - } + for jobRunID, jobList := range runJobs { + run, err := actions_model.GetRunByRepoAndID(ctx, jobList[0].RepoID, jobRunID) if err != nil { - return nil, fmt.Errorf("get run by job %d: %w", cJobs[0].ID, err) + return fmt.Errorf("get action run %d: %w", jobRunID, err) } - concurrentRun = jobRun + CreateCommitStatusForRunJobs(ctx, run, jobList...) } - - return concurrentRun, nil + return nil } -func checkRunConcurrency(ctx context.Context, run *actions_model.ActionRun) (jobs, updatedJobs []*actions_model.ActionRunJob, err error) { +// findBlockedRunIDByConcurrency finds a blocked concurrent run in a repo and returns 0 when there is no blocked run. +func findBlockedRunIDByConcurrency(ctx context.Context, repoID int64, concurrencyGroup string) (int64, error) { + if concurrencyGroup == "" { + return 0, nil + } + cAttempts, cJobs, err := actions_model.GetConcurrentRunAttemptsAndJobs(ctx, repoID, concurrencyGroup, []actions_model.Status{actions_model.StatusBlocked}) + if err != nil { + return 0, fmt.Errorf("find concurrent runs and jobs: %w", err) + } + + if len(cAttempts) > 0 { + return cAttempts[0].RunID, nil + } + if len(cJobs) > 0 { + return cJobs[0].RunID, nil + } + + return 0, nil +} + +func checkBlockedConcurrentRun(ctx context.Context, repoID, runID int64) (jobs, updatedJobs, cancelledJobs []*actions_model.ActionRunJob, err error) { + concurrentRun, err := actions_model.GetRunByRepoAndID(ctx, repoID, runID) + if err != nil { + return nil, nil, nil, fmt.Errorf("get run %d: %w", runID, err) + } + if concurrentRun.NeedApproval { + return nil, nil, nil, nil + } + + return checkJobsOfCurrentRunAttempt(ctx, concurrentRun) +} + +// checkRunConcurrency rechecks runs blocked by concurrency that may become unblocked after the current run releases a workflow-level or job-level concurrency group. +func checkRunConcurrency(ctx context.Context, run *actions_model.ActionRun) (jobs, updatedJobs, cancelledJobs []*actions_model.ActionRunJob, err error) { checkedConcurrencyGroup := make(container.Set[string]) collect := func(concurrencyGroup string) error { - concurrentRun, err := findBlockedRunByConcurrency(ctx, run.RepoID, concurrencyGroup) + concurrentRunID, err := findBlockedRunIDByConcurrency(ctx, run.RepoID, concurrencyGroup) if err != nil { return fmt.Errorf("find blocked run by concurrency: %w", err) } - if concurrentRun != nil && !concurrentRun.NeedApproval { - js, ujs, err := checkJobsOfRun(ctx, concurrentRun) + if concurrentRunID > 0 { + js, ujs, cjs, err := checkBlockedConcurrentRun(ctx, run.RepoID, concurrentRunID) if err != nil { return err } jobs = append(jobs, js...) updatedJobs = append(updatedJobs, ujs...) + cancelledJobs = append(cancelledJobs, cjs...) } checkedConcurrencyGroup.Add(concurrencyGroup) return nil } // check run (workflow-level) concurrency - if run.ConcurrencyGroup != "" { - if err := collect(run.ConcurrencyGroup); err != nil { - return nil, nil, err + runConcurrencyGroup, _, err := run.GetEffectiveConcurrency(ctx) + if err != nil { + return nil, nil, nil, fmt.Errorf("GetEffectiveConcurrency: %w", err) + } + if runConcurrencyGroup != "" { + if err := collect(runConcurrencyGroup); err != nil { + return nil, nil, nil, err } } // check job concurrency - runJobs, err := db.Find[actions_model.ActionRunJob](ctx, actions_model.FindRunJobOptions{RunID: run.ID}) + runJobs, err := actions_model.GetLatestAttemptJobsByRepoAndRunID(ctx, run.RepoID, run.ID) if err != nil { - return nil, nil, fmt.Errorf("find run %d jobs: %w", run.ID, err) + return nil, nil, nil, fmt.Errorf("find run %d jobs: %w", run.ID, err) } for _, job := range runJobs { if !job.Status.IsDone() { @@ -188,28 +216,30 @@ func checkRunConcurrency(ctx context.Context, run *actions_model.ActionRun) (job continue } if err := collect(job.ConcurrencyGroup); err != nil { - return nil, nil, err + return nil, nil, nil, err } } - return jobs, updatedJobs, nil + return jobs, updatedJobs, cancelledJobs, nil } -func checkJobsOfRun(ctx context.Context, run *actions_model.ActionRun) (jobs, updatedJobs []*actions_model.ActionRunJob, err error) { - jobs, err = db.Find[actions_model.ActionRunJob](ctx, actions_model.FindRunJobOptions{RunID: run.ID}) +// checkJobsOfCurrentRunAttempt resolves blocked jobs of the run's latest attempt. +func checkJobsOfCurrentRunAttempt(ctx context.Context, run *actions_model.ActionRun) (jobs, updatedJobs, cancelledJobs []*actions_model.ActionRunJob, err error) { + jobs, err = actions_model.GetRunJobsByRunAndAttemptID(ctx, run.ID, run.LatestAttemptID) if err != nil { - return nil, nil, err + return nil, nil, nil, err } vars, err := actions_model.GetVariablesOfRun(ctx, run) if err != nil { - return nil, nil, err + return nil, nil, nil, err } + resolver := newJobStatusResolver(jobs, vars) if err = db.WithTx(ctx, func(ctx context.Context) error { for _, job := range jobs { job.Run = run } - updates := newJobStatusResolver(jobs, vars).Resolve(ctx) + updates := resolver.Resolve(ctx) for _, job := range jobs { if status, ok := updates[job.ID]; ok { job.Status = status @@ -223,26 +253,18 @@ func checkJobsOfRun(ctx context.Context, run *actions_model.ActionRun) (jobs, up } return nil }); err != nil { - return nil, nil, err + return nil, nil, nil, err } - return jobs, updatedJobs, nil -} - -func NotifyWorkflowRunStatusUpdateWithReload(ctx context.Context, job *actions_model.ActionRunJob) { - job.Run = nil - if err := job.LoadAttributes(ctx); err != nil { - log.Error("LoadAttributes: %v", err) - return - } - notify_service.WorkflowRunStatusUpdate(ctx, job.Run.Repo, job.Run.TriggerUser, job.Run) + return jobs, updatedJobs, resolver.cancelledJobs, nil } type jobStatusResolver struct { - statuses map[int64]actions_model.Status - needs map[int64][]int64 - jobMap map[int64]*actions_model.ActionRunJob - vars map[string]string + statuses map[int64]actions_model.Status + needs map[int64][]int64 + jobMap map[int64]*actions_model.ActionRunJob + vars map[string]string + cancelledJobs []*actions_model.ActionRunJob } func newJobStatusResolver(jobs actions_model.ActionJobList, vars map[string]string) *jobStatusResolver { @@ -341,9 +363,12 @@ func (r *jobStatusResolver) resolve(ctx context.Context) map[int64]actions_model newStatus := util.Iif(shouldStartJob, actions_model.StatusWaiting, actions_model.StatusSkipped) if newStatus == actions_model.StatusWaiting { - newStatus, err = PrepareToStartJobWithConcurrency(ctx, actionRunJob) + var cancelledJobs []*actions_model.ActionRunJob + newStatus, cancelledJobs, err = PrepareToStartJobWithConcurrency(ctx, actionRunJob) if err != nil { log.Error("ShouldBlockJobByConcurrency failed, this job will stay blocked: job: %d, err: %v", id, err) + } else { + r.cancelledJobs = append(r.cancelledJobs, cancelledJobs...) } } @@ -359,8 +384,16 @@ func updateConcurrencyEvaluationForJobWithNeeds(ctx context.Context, actionRunJo return nil // for testing purpose only, no repo, no evaluation } - err := EvaluateJobConcurrencyFillModel(ctx, actionRunJob.Run, actionRunJob, vars, nil) - if err != nil { + // Legacy jobs (created before migration v331) have RunAttemptID=0 and no attempt record. + var attempt *actions_model.ActionRunAttempt + if actionRunJob.RunAttemptID > 0 { + var err error + attempt, err = actions_model.GetRunAttemptByRepoAndID(ctx, actionRunJob.RepoID, actionRunJob.RunAttemptID) + if err != nil { + return fmt.Errorf("GetRunAttemptByRepoAndID: %w", err) + } + } + if err := EvaluateJobConcurrencyFillModel(ctx, actionRunJob.Run, attempt, actionRunJob, vars, nil); err != nil { return fmt.Errorf("evaluate job concurrency: %w", err) } diff --git a/services/actions/job_emitter_test.go b/services/actions/job_emitter_test.go index 5ab1c0846d..11998e01b2 100644 --- a/services/actions/job_emitter_test.go +++ b/services/actions/job_emitter_test.go @@ -144,23 +144,36 @@ func Test_checkRunConcurrency_NoDuplicateConcurrencyGroupCheck(t *testing.T) { assert.NoError(t, unittest.PrepareTestDatabase()) ctx := t.Context() - // Run A: the triggering run with a concurrency group. + // Run A: the triggering run of attempt A runA := &actions_model.ActionRun{ + RepoID: 4, + OwnerID: 1, + TriggerUserID: 1, + WorkflowID: "test.yml", + Index: 9901, + Ref: "refs/heads/main", + Status: actions_model.StatusRunning, + } + assert.NoError(t, db.Insert(ctx, runA)) + + // Attempt A: an attempt of run A with concurrency group "test-cg" + runAAttempt := &actions_model.ActionRunAttempt{ RepoID: 4, - OwnerID: 1, - TriggerUserID: 1, - WorkflowID: "test.yml", - Index: 9901, - Ref: "refs/heads/main", + RunID: runA.ID, + Attempt: 1, Status: actions_model.StatusRunning, ConcurrencyGroup: "test-cg", } - assert.NoError(t, db.Insert(ctx, runA)) + assert.NoError(t, db.Insert(ctx, runAAttempt)) + _, err := db.Exec(t.Context(), "UPDATE `action_run` SET latest_attempt_id = ? WHERE id = ?", runAAttempt.ID, runA.ID) + assert.NoError(t, err) // A done job for run A with the same ConcurrencyGroup. // This triggers the job-level concurrency check in checkRunConcurrency. jobADone := &actions_model.ActionRunJob{ RunID: runA.ID, + RunAttemptID: runAAttempt.ID, + AttemptJobID: 1, RepoID: 4, OwnerID: 1, JobID: "job1", @@ -170,31 +183,45 @@ func Test_checkRunConcurrency_NoDuplicateConcurrencyGroupCheck(t *testing.T) { } assert.NoError(t, db.Insert(ctx, jobADone)) - // Blocked run B competing for the same concurrency group. + // Run B: a run blocked by concurrency runB := &actions_model.ActionRun{ - RepoID: 4, - OwnerID: 1, - TriggerUserID: 1, - WorkflowID: "test.yml", - Index: 9902, - Ref: "refs/heads/main", - Status: actions_model.StatusBlocked, - ConcurrencyGroup: "test-cg", + RepoID: 4, + OwnerID: 1, + TriggerUserID: 1, + WorkflowID: "test.yml", + Index: 9902, + Ref: "refs/heads/main", + Status: actions_model.StatusBlocked, } assert.NoError(t, db.Insert(ctx, runB)) + // Attempt B: an blocked attempt of run B + runBAttempt := &actions_model.ActionRunAttempt{ + RepoID: 4, + RunID: runB.ID, + Attempt: 1, + Status: actions_model.StatusBlocked, + ConcurrencyGroup: "test-cg", + } + assert.NoError(t, db.Insert(ctx, runBAttempt)) + _, err = db.Exec(t.Context(), "UPDATE `action_run` SET latest_attempt_id = ? WHERE id = ?", runBAttempt.ID, runB.ID) + assert.NoError(t, err) + // A blocked job belonging to run B (no job-level concurrency group). jobBBlocked := &actions_model.ActionRunJob{ - RunID: runB.ID, - RepoID: 4, - OwnerID: 1, - JobID: "job1", - Name: "job1", - Status: actions_model.StatusBlocked, + RunID: runB.ID, + RunAttemptID: runBAttempt.ID, + AttemptJobID: 1, + RepoID: 4, + OwnerID: 1, + JobID: "job1", + Name: "job1", + Status: actions_model.StatusBlocked, } assert.NoError(t, db.Insert(ctx, jobBBlocked)) - jobs, _, err := checkRunConcurrency(ctx, runA) + runA, _, _ = db.GetByID[actions_model.ActionRun](t.Context(), runA.ID) + jobs, _, _, err := checkRunConcurrency(ctx, runA) assert.NoError(t, err) if assert.Len(t, jobs, 1) { diff --git a/services/actions/notifier.go b/services/actions/notifier.go index 5f7ee6fcea..c3b2003b3c 100644 --- a/services/actions/notifier.go +++ b/services/actions/notifier.go @@ -815,7 +815,7 @@ func (n *actionsNotifier) WorkflowRunStatusUpdate(ctx context.Context, repo *rep log.Error("GetActionWorkflow: %v", err) return } - convertedRun, err := convert.ToActionWorkflowRun(ctx, repo, run) + convertedRun, err := convert.ToActionWorkflowRun(ctx, repo, run, nil) if err != nil { log.Error("ToActionWorkflowRun: %v", err) return diff --git a/services/actions/notify.go b/services/actions/notify.go new file mode 100644 index 0000000000..e8b05c9fec --- /dev/null +++ b/services/actions/notify.go @@ -0,0 +1,144 @@ +// Copyright 2026 The Gitea Authors. All rights reserved. +// SPDX-License-Identifier: MIT + +package actions + +import ( + "context" + + actions_model "code.gitea.io/gitea/models/actions" + "code.gitea.io/gitea/modules/log" + notify_service "code.gitea.io/gitea/services/notify" +) + +// NotifyWorkflowJobsAndRunsStatusUpdate notifies status changes for a batch of jobs and the runs they affect. +// Use it when a workflow operation updates multiple jobs and runs. +func NotifyWorkflowJobsAndRunsStatusUpdate(ctx context.Context, jobs []*actions_model.ActionRunJob) { + if len(jobs) == 0 { + return + } + + // The input jobs may belong to different runs, so track each affected run. + runs := make(map[int64]*actions_model.ActionRun, len(jobs)) + jobsByRunID := make(map[int64][]*actions_model.ActionRunJob) + + for _, job := range jobs { + if err := job.LoadAttributes(ctx); err != nil { + log.Error("Failed to load job attributes: %v", err) + continue + } + CreateCommitStatusForRunJobs(ctx, job.Run, job) + + if _, ok := runs[job.RunID]; !ok { + runs[job.RunID] = job.Run + } + if _, ok := jobsByRunID[job.RunID]; !ok { + jobsByRunID[job.RunID] = make([]*actions_model.ActionRunJob, 0) + } + jobsByRunID[job.RunID] = append(jobsByRunID[job.RunID], job) + } + + for _, run := range runs { + NotifyWorkflowRunStatusUpdate(ctx, run) + } + + for _, jobs := range jobsByRunID { + NotifyWorkflowJobsStatusUpdate(ctx, jobs...) + } +} + +// NotifyWorkflowRunStatusUpdateWithReload reloads the run before notifying its status update. +// Use it when only repo/run IDs are available or when the in-memory run may be stale after job updates. +func NotifyWorkflowRunStatusUpdateWithReload(ctx context.Context, repoID, runID int64) { + run, err := actions_model.GetRunByRepoAndID(ctx, repoID, runID) + if err != nil { + log.Error("GetRunByRepoAndID: %v", err) + return + } + NotifyWorkflowRunStatusUpdate(ctx, run) +} + +// NotifyWorkflowRunStatusUpdate notifies a run status update using the latest attempt trigger user when available. +// Use it for run-level notifications when the caller already has the run model loaded. +func NotifyWorkflowRunStatusUpdate(ctx context.Context, run *actions_model.ActionRun) { + if err := run.LoadAttributes(ctx); err != nil { + log.Error("run.LoadAttributes: %v", err) + return + } + triggerUser := run.TriggerUser + if run.LatestAttemptID > 0 { + attempt, err := actions_model.GetRunAttemptByRepoAndID(ctx, run.RepoID, run.LatestAttemptID) + if err != nil { + log.Error("GetRunAttemptByRepoAndID: %v", err) + return + } + if err := attempt.LoadAttributes(ctx); err != nil { + log.Error("attempt.LoadAttributes: %v", err) + return + } + triggerUser = attempt.TriggerUser + } + notify_service.WorkflowRunStatusUpdate(ctx, run.Repo, triggerUser, run) +} + +// NotifyWorkflowJobsStatusUpdate notifies status updates for jobs without task. +// Use it for batch or single-job notifications after state changes. +func NotifyWorkflowJobsStatusUpdate(ctx context.Context, jobs ...*actions_model.ActionRunJob) { + jobsByAttempt := make(map[int64][]*actions_model.ActionRunJob) + for _, job := range jobs { + if _, ok := jobsByAttempt[job.RunAttemptID]; !ok { + jobsByAttempt[job.RunAttemptID] = make([]*actions_model.ActionRunJob, 0) + } + jobsByAttempt[job.RunAttemptID] = append(jobsByAttempt[job.RunAttemptID], job) + } + + for attemptID, js := range jobsByAttempt { + if attemptID == 0 { + for _, job := range js { + if err := job.LoadAttributes(ctx); err != nil { + log.Error("job.LoadAttributes: %v", err) + continue + } + notify_service.WorkflowJobStatusUpdate(ctx, job.Run.Repo, job.Run.TriggerUser, job, nil) + } + continue + } + + attempt, err := actions_model.GetRunAttemptByRepoAndID(ctx, js[0].RepoID, attemptID) + if err != nil { + log.Error("GetRunAttemptByRepoAndID: %v", err) + continue + } + if err := attempt.LoadAttributes(ctx); err != nil { + log.Error("attempt.LoadAttributes: %v", err) + continue + } + for _, job := range js { + notify_service.WorkflowJobStatusUpdate(ctx, attempt.Run.Repo, attempt.TriggerUser, job, nil) + } + } +} + +// NotifyWorkflowJobStatusUpdateWithTask notifies a single job status update when a concrete task is available. +// Use it for runner/task lifecycle callbacks so the notification includes the originating task context. +func NotifyWorkflowJobStatusUpdateWithTask(ctx context.Context, job *actions_model.ActionRunJob, task *actions_model.ActionTask) { + if job.RunAttemptID == 0 { + if err := job.LoadAttributes(ctx); err != nil { + log.Error("job.LoadAttributes: %v", err) + return + } + notify_service.WorkflowJobStatusUpdate(ctx, job.Run.Repo, job.Run.TriggerUser, job, task) + return + } + + attempt, err := actions_model.GetRunAttemptByRepoAndID(ctx, job.RepoID, job.RunAttemptID) + if err != nil { + log.Error("GetRunAttemptByRepoAndID: %v", err) + return + } + if err := attempt.LoadAttributes(ctx); err != nil { + log.Error("attempt.LoadAttributes: %v", err) + return + } + notify_service.WorkflowJobStatusUpdate(ctx, attempt.Run.Repo, attempt.TriggerUser, job, task) +} diff --git a/services/actions/rerun.go b/services/actions/rerun.go index 1596d9bfc5..f253181a8d 100644 --- a/services/actions/rerun.go +++ b/services/actions/rerun.go @@ -6,57 +6,312 @@ package actions import ( "context" "fmt" + "slices" actions_model "code.gitea.io/gitea/models/actions" "code.gitea.io/gitea/models/db" repo_model "code.gitea.io/gitea/models/repo" "code.gitea.io/gitea/models/unit" + user_model "code.gitea.io/gitea/models/user" "code.gitea.io/gitea/modules/container" + "code.gitea.io/gitea/modules/setting" "code.gitea.io/gitea/modules/util" - notify_service "code.gitea.io/gitea/services/notify" "github.com/nektos/act/pkg/model" "go.yaml.in/yaml/v4" - "xorm.io/builder" ) -// GetFailedRerunJobs returns all failed jobs and their downstream dependent jobs that need to be rerun -func GetFailedRerunJobs(allJobs []*actions_model.ActionRunJob) []*actions_model.ActionRunJob { - rerunJobIDSet := make(container.Set[int64]) +// GetFailedJobsForRerun returns the failed or cancelled jobs in a run. +func GetFailedJobsForRerun(allJobs []*actions_model.ActionRunJob) []*actions_model.ActionRunJob { var jobsToRerun []*actions_model.ActionRunJob for _, job := range allJobs { if job.Status == actions_model.StatusFailure || job.Status == actions_model.StatusCancelled { - for _, j := range GetAllRerunJobs(job, allJobs) { - if !rerunJobIDSet.Contains(j.ID) { - rerunJobIDSet.Add(j.ID) - jobsToRerun = append(jobsToRerun, j) - } - } + jobsToRerun = append(jobsToRerun, job) } } return jobsToRerun } -// GetAllRerunJobs returns the target job and all jobs that transitively depend on it. -// Downstream jobs are included regardless of their current status. -func GetAllRerunJobs(job *actions_model.ActionRunJob, allJobs []*actions_model.ActionRunJob) []*actions_model.ActionRunJob { - rerunJobs := []*actions_model.ActionRunJob{job} - rerunJobsIDSet := make(container.Set[string]) - rerunJobsIDSet.Add(job.JobID) +// RerunWorkflowRunJobs reruns the given jobs of a workflow run. +// An empty jobsToRerun means rerunning the whole run. Otherwise jobsToRerun contains only the user-requested target jobs; +// downstream dependent jobs are expanded internally while building the rerun plan. +// +// The three stages below (legacy backfill, plan build, plan exec) deliberately run in separate DB transactions +// rather than one big outer transaction: +// - execRerunPlan performs slow work (loading variables, YAML unmarshal, concurrency expression evaluation) +// before opening its own transaction, so the tx stays focused on inserts/updates. +// - The legacy backfill is idempotent-friendly: if it succeeds but a later stage fails, a subsequent rerun +// will observe run.LatestAttemptID != 0 and skip the backfill, continuing naturally. No data corruption +// or stuck state results from partial progress. +// +// Fast validations that can catch failures early (workflow disabled, run not done, etc.) are therefore +// pushed into validateRerun so we rarely enter createOriginalAttemptForLegacyRun only to fail afterwards. +func RerunWorkflowRunJobs(ctx context.Context, repo *repo_model.Repository, run *actions_model.ActionRun, triggerUser *user_model.User, jobsToRerun []*actions_model.ActionRunJob) (*actions_model.ActionRunAttempt, error) { + if err := validateRerun(ctx, run, repo, triggerUser, jobsToRerun); err != nil { + return nil, err + } + + if run.LatestAttemptID == 0 { + if err := createOriginalAttemptForLegacyRun(ctx, run); err != nil { + return nil, fmt.Errorf("create attempt for legacy run: %w", err) + } + } + + plan, err := buildRerunPlan(ctx, run, triggerUser, jobsToRerun) + if err != nil { + return nil, err + } + return execRerunPlan(ctx, plan) +} + +func validateRerun(ctx context.Context, run *actions_model.ActionRun, repo *repo_model.Repository, triggerUser *user_model.User, jobsToRerun []*actions_model.ActionRunJob) error { + if !run.Status.IsDone() { + return util.NewInvalidArgumentErrorf("this workflow run is not done") + } + if repo == nil { + return util.NewInvalidArgumentErrorf("repo is required") + } + if run.RepoID != repo.ID { + return util.NewInvalidArgumentErrorf("run %d does not belong to repo %d", run.ID, repo.ID) + } + for _, job := range jobsToRerun { + if job.RunID != run.ID { + return util.NewInvalidArgumentErrorf("job %d does not belong to workflow run %d", job.ID, run.ID) + } + } + if triggerUser == nil { + return util.NewInvalidArgumentErrorf("trigger user is required") + } + cfgUnit := repo.MustGetUnit(ctx, unit.TypeActions) + cfg := cfgUnit.ActionsConfig() + if cfg.IsWorkflowDisabled(run.WorkflowID) { + return util.NewInvalidArgumentErrorf("workflow %s is disabled", run.WorkflowID) + } + + // Legacy runs (LatestAttemptID == 0) conceptually have only attempt 1, so they can never be at the cap. + // For non-legacy runs, look up the latest attempt and reject when its number is already at the configured cap. + if run.LatestAttemptID > 0 { + latestAttempt, has, err := run.GetLatestAttempt(ctx) + if err != nil { + return fmt.Errorf("GetLatestAttempt: %w", err) + } + if has && latestAttempt.Attempt >= setting.Actions.MaxRerunAttempts { + return util.NewInvalidArgumentErrorf("workflow run has reached the maximum of %d attempts", setting.Actions.MaxRerunAttempts) + } + } + + return nil +} + +// rerunPlan is a read-only snapshot of the inputs needed to execute a rerun. +// It holds no to-be-persisted entities and no intermediate evaluation results; +// execRerunPlan constructs and evaluates the new ActionRunAttempt itself. +type rerunPlan struct { + run *actions_model.ActionRun + templateAttempt *actions_model.ActionRunAttempt + templateJobs actions_model.ActionJobList + rerunJobIDs container.Set[string] + triggerUser *user_model.User +} + +// buildRerunPlan constructs a rerunPlan for the given workflow run without writing to the database. +// jobsToRerun contains only the user-requested target jobs. An empty jobsToRerun means the entire run should be rerun. +// It loads the latest attempt as a template and expands jobsToRerun to include all transitive downstream dependents. +// The construction of new-attempt and concurrency evaluation are deferred to execRerunPlan so that the plan remains a pure input snapshot. +func buildRerunPlan(ctx context.Context, run *actions_model.ActionRun, triggerUser *user_model.User, jobsToRerun []*actions_model.ActionRunJob) (*rerunPlan, error) { + if err := run.LoadAttributes(ctx); err != nil { + return nil, err + } + + templateAttempt, hasTemplateAttempt, err := run.GetLatestAttempt(ctx) + if err != nil { + return nil, err + } + if !hasTemplateAttempt { + return nil, util.NewNotExistErrorf("latest attempt not found") + } + + templateJobs, err := actions_model.GetRunJobsByRunAndAttemptID(ctx, run.ID, templateAttempt.ID) + if err != nil { + return nil, fmt.Errorf("load template jobs: %w", err) + } + if len(templateJobs) == 0 { + return nil, util.NewNotExistErrorf("no template jobs") + } + + plan := &rerunPlan{ + run: run, + templateAttempt: templateAttempt, + templateJobs: templateJobs, + triggerUser: triggerUser, + } + + if err := plan.expandRerunJobIDs(jobsToRerun); err != nil { + return nil, err + } + + return plan, nil +} + +// execRerunPlan executes the rerun plan built by buildRerunPlan. +// It loads run variables, constructs the new ActionRunAttempt and evaluates run-level concurrency (all outside the transaction to keep the tx short). +// Inside a single database transaction it then inserts the new attempt, clones all template jobs, evaluates job-level concurrency for rerun jobs, +// and updates the run's latest_attempt_id. +// Jobs not in the rerun set are cloned as pass-through: their status is preserved and SourceTaskID points to the original task so the UI can still display their results. +// The attempt's final status is derived only from the rerun jobs, not the pass-through jobs. +// Notifications and commit statuses are sent after the transaction commits. +func execRerunPlan(ctx context.Context, plan *rerunPlan) (*actions_model.ActionRunAttempt, error) { + vars, err := actions_model.GetVariablesOfRun(ctx, plan.run) + if err != nil { + return nil, fmt.Errorf("get run %d variables: %w", plan.run.ID, err) + } + + newAttempt := &actions_model.ActionRunAttempt{ + RepoID: plan.run.RepoID, + RunID: plan.run.ID, + Attempt: plan.templateAttempt.Attempt + 1, + TriggerUserID: plan.triggerUser.ID, + Status: actions_model.StatusWaiting, + } + + if plan.run.RawConcurrency != "" { + var rawConcurrency model.RawConcurrency + if err := yaml.Unmarshal([]byte(plan.run.RawConcurrency), &rawConcurrency); err != nil { + return nil, fmt.Errorf("unmarshal raw concurrency: %w", err) + } + if err := EvaluateRunConcurrencyFillModel(ctx, plan.run, newAttempt, &rawConcurrency, vars, nil); err != nil { + return nil, err + } + } + + var newJobs, newJobsToRerun actions_model.ActionJobList + var cancelledConcurrencyJobs []*actions_model.ActionRunJob + + err = db.WithTx(ctx, func(ctx context.Context) error { + newAttemptStatus, jobsToCancel, err := PrepareToStartRunWithConcurrency(ctx, newAttempt) + if err != nil { + return err + } + cancelledConcurrencyJobs = append(cancelledConcurrencyJobs, jobsToCancel...) + newAttempt.Status = newAttemptStatus + shouldBlock := newAttemptStatus == actions_model.StatusBlocked + + if err := db.Insert(ctx, newAttempt); err != nil { + if _, getErr := actions_model.GetRunAttemptByRunIDAndAttemptNum(ctx, plan.run.ID, newAttempt.Attempt); getErr == nil { + return util.NewAlreadyExistErrorf("workflow run attempt %d for run %d already exists", newAttempt.Attempt, plan.run.ID) + } + return err + } + + plan.run.LatestAttemptID = newAttempt.ID + if err := actions_model.UpdateRun(ctx, plan.run, "latest_attempt_id"); err != nil { + return err + } + + hasWaitingJobs := false + newJobs = make(actions_model.ActionJobList, 0, len(plan.templateJobs)) + newJobsToRerun = make(actions_model.ActionJobList, 0, len(plan.rerunJobIDs)) + for _, templateJob := range plan.templateJobs { + newJob := cloneRunJobForAttempt(templateJob, newAttempt) + if plan.rerunJobIDs.Contains(templateJob.JobID) { + shouldBlockJob := shouldBlock || plan.hasRerunDependency(templateJob) + + newJob.Status = util.Iif(shouldBlockJob, actions_model.StatusBlocked, actions_model.StatusWaiting) + newJob.TaskID = 0 + newJob.SourceTaskID = 0 + newJob.Started = 0 + newJob.Stopped = 0 + newJob.ConcurrencyGroup = "" + newJob.ConcurrencyCancel = false + newJob.IsConcurrencyEvaluated = false + + if newJob.RawConcurrency != "" && !shouldBlockJob { + if err := EvaluateJobConcurrencyFillModel(ctx, plan.run, newAttempt, newJob, vars, nil); err != nil { + return fmt.Errorf("evaluate job concurrency: %w", err) + } + newJob.Status, jobsToCancel, err = PrepareToStartJobWithConcurrency(ctx, newJob) + if err != nil { + return fmt.Errorf("prepare to start job with concurrency: %w", err) + } + cancelledConcurrencyJobs = append(cancelledConcurrencyJobs, jobsToCancel...) + } + + newJobsToRerun = append(newJobsToRerun, newJob) + } else { + newJob.TaskID = 0 + newJob.SourceTaskID = templateJob.EffectiveTaskID() + newJob.Started = templateJob.Started + newJob.Stopped = templateJob.Stopped + } + + if err := db.Insert(ctx, newJob); err != nil { + return err + } + hasWaitingJobs = hasWaitingJobs || newJob.Status == actions_model.StatusWaiting + newJobs = append(newJobs, newJob) + } + + newAttempt.Status = actions_model.AggregateJobStatus(newJobsToRerun) + if err := actions_model.UpdateRunAttempt(ctx, newAttempt, "status"); err != nil { + return err + } + + if hasWaitingJobs { + if err := actions_model.IncreaseTaskVersion(ctx, plan.run.OwnerID, plan.run.RepoID); err != nil { + return err + } + } + + return nil + }) + if err != nil { + return nil, err + } + + if err := plan.run.LoadAttributes(ctx); err != nil { + return nil, err + } + + NotifyWorkflowJobsAndRunsStatusUpdate(ctx, cancelledConcurrencyJobs) + EmitJobsIfReadyByJobs(cancelledConcurrencyJobs) + + CreateCommitStatusForRunJobs(ctx, plan.run, newJobs...) + NotifyWorkflowJobsAndRunsStatusUpdate(ctx, newJobsToRerun) + + return newAttempt, nil +} + +func (p *rerunPlan) expandRerunJobIDs(jobsToRerun []*actions_model.ActionRunJob) error { + templateJobIDs := make(container.Set[string]) + for _, job := range p.templateJobs { + templateJobIDs.Add(job.JobID) + } + + if len(jobsToRerun) == 0 { + p.rerunJobIDs = templateJobIDs + return nil + } + + rerunJobIDs := make(container.Set[string]) + for _, job := range jobsToRerun { + if !templateJobIDs.Contains(job.JobID) { + return util.NewInvalidArgumentErrorf("job %q does not exist in the latest attempt", job.JobID) + } + rerunJobIDs.Add(job.JobID) + } for { found := false - for _, j := range allJobs { - if rerunJobsIDSet.Contains(j.JobID) { + for _, job := range p.templateJobs { + if rerunJobIDs.Contains(job.JobID) { continue } - for _, need := range j.Needs { - if rerunJobsIDSet.Contains(need) { + for _, need := range job.Needs { + if rerunJobIDs.Contains(need) { found = true - rerunJobs = append(rerunJobs, j) - rerunJobsIDSet.Add(j.JobID) + rerunJobIDs.Add(job.JobID) break } } @@ -66,152 +321,100 @@ func GetAllRerunJobs(job *actions_model.ActionRunJob, allJobs []*actions_model.A } } - return rerunJobs + p.rerunJobIDs = rerunJobIDs + return nil } -// prepareRunRerun validates the run, resets its state, handles concurrency, persists the -// updated run, and fires a status-update notification. -// It returns isRunBlocked (true when the run itself is held by a concurrency group). -func prepareRunRerun(ctx context.Context, repo *repo_model.Repository, run *actions_model.ActionRun, jobs []*actions_model.ActionRunJob) (isRunBlocked bool, err error) { - if !run.Status.IsDone() { - return false, util.NewInvalidArgumentErrorf("this workflow run is not done") - } - - cfgUnit := repo.MustGetUnit(ctx, unit.TypeActions) - - // Rerun is not allowed when workflow is disabled. - cfg := cfgUnit.ActionsConfig() - if cfg.IsWorkflowDisabled(run.WorkflowID) { - return false, util.NewInvalidArgumentErrorf("workflow %s is disabled", run.WorkflowID) - } - - // Reset run's timestamps and status. - run.PreviousDuration = run.Duration() - run.Started = 0 - run.Stopped = 0 - run.Status = actions_model.StatusWaiting - - vars, err := actions_model.GetVariablesOfRun(ctx, run) - if err != nil { - return false, fmt.Errorf("get run %d variables: %w", run.ID, err) - } - - if run.RawConcurrency != "" { - var rawConcurrency model.RawConcurrency - if err := yaml.Unmarshal([]byte(run.RawConcurrency), &rawConcurrency); err != nil { - return false, fmt.Errorf("unmarshal raw concurrency: %w", err) +func (p *rerunPlan) hasRerunDependency(job *actions_model.ActionRunJob) bool { + for _, need := range job.Needs { + if p.rerunJobIDs.Contains(need) { + return true } + } + return false +} - if err := EvaluateRunConcurrencyFillModel(ctx, run, &rawConcurrency, vars, nil); err != nil { - return false, err - } +func cloneRunJobForAttempt(templateJob *actions_model.ActionRunJob, attempt *actions_model.ActionRunAttempt) *actions_model.ActionRunJob { + return &actions_model.ActionRunJob{ + RunID: templateJob.RunID, + RunAttemptID: attempt.ID, + RepoID: templateJob.RepoID, + OwnerID: templateJob.OwnerID, + CommitSHA: templateJob.CommitSHA, + IsForkPullRequest: templateJob.IsForkPullRequest, + Name: templateJob.Name, + Attempt: attempt.Attempt, + WorkflowPayload: slices.Clone(templateJob.WorkflowPayload), + JobID: templateJob.JobID, + AttemptJobID: templateJob.AttemptJobID, + Needs: slices.Clone(templateJob.Needs), + RunsOn: slices.Clone(templateJob.RunsOn), + Status: templateJob.Status, + RawConcurrency: templateJob.RawConcurrency, + IsConcurrencyEvaluated: templateJob.IsConcurrencyEvaluated, + ConcurrencyGroup: templateJob.ConcurrencyGroup, + ConcurrencyCancel: templateJob.ConcurrencyCancel, + TokenPermissions: templateJob.TokenPermissions, + } +} - run.Status, err = PrepareToStartRunWithConcurrency(ctx, run) +// createOriginalAttemptForLegacyRun creates a real attempt=1 for a legacy run and updates the existing legacy jobs and artifacts in place +// so the original execution becomes attempt-aware before the rerun plan is built and all subsequent logic can use real attempts. +// Tasks are not modified: they reference jobs by JobID, so updating jobs implicitly carries the new attempt linkage. +func createOriginalAttemptForLegacyRun(ctx context.Context, run *actions_model.ActionRun) error { + return db.WithTx(ctx, func(ctx context.Context) error { + jobs, err := actions_model.GetRunJobsByRunAndAttemptID(ctx, run.ID, 0) if err != nil { - return false, err + return fmt.Errorf("load legacy run jobs: %w", err) + } + if len(jobs) == 0 { + return fmt.Errorf("run %d has no jobs", run.ID) } - } - if err := actions_model.UpdateRun(ctx, run, "started", "stopped", "previous_duration", "status", "concurrency_group", "concurrency_cancel"); err != nil { - return false, err - } + originalAttempt := &actions_model.ActionRunAttempt{ + RepoID: run.RepoID, + RunID: run.ID, + Attempt: 1, + TriggerUserID: run.TriggerUserID, - if err := run.LoadAttributes(ctx); err != nil { - return false, err - } + // Legacy concurrency fields on ActionRun are intentionally NOT backfilled onto this original attempt. + // They only matter while a run is actively being scheduled, and backfilling them for completed legacy runs + // would add migration/runtime cost without changing any future concurrency behavior. - for _, job := range jobs { - job.Run = run - } + Status: run.Status, + Created: run.Created, + Started: run.Started, + Stopped: run.Stopped, + } - notify_service.WorkflowRunStatusUpdate(ctx, run.Repo, run.TriggerUser, run) + // Use NoAutoTime so xorm does not overwrite Created with the current time on insert. + if _, err := db.GetEngine(ctx).NoAutoTime().Insert(originalAttempt); err != nil { + if _, getErr := actions_model.GetRunAttemptByRunIDAndAttemptNum(ctx, run.ID, originalAttempt.Attempt); getErr == nil { + return util.NewAlreadyExistErrorf("workflow run attempt %d for run %d already exists", originalAttempt.Attempt, run.ID) + } + return err + } - return run.Status == actions_model.StatusBlocked, nil -} - -// RerunWorkflowRunJobs reruns the given jobs of a workflow run. -// jobsToRerun must include all jobs to be rerun (the target job and its transitively dependent jobs). -// A job is blocked (waiting for dependencies) if the run itself is blocked or if any of its -// needs are also being rerun. -func RerunWorkflowRunJobs(ctx context.Context, repo *repo_model.Repository, run *actions_model.ActionRun, jobsToRerun []*actions_model.ActionRunJob) error { - if len(jobsToRerun) == 0 { - return nil - } - - isRunBlocked, err := prepareRunRerun(ctx, repo, run, jobsToRerun) - if err != nil { - return err - } - - rerunJobIDs := make(container.Set[string]) - for _, j := range jobsToRerun { - rerunJobIDs.Add(j.JobID) - } - - for _, job := range jobsToRerun { - shouldBlockJob := isRunBlocked - if !shouldBlockJob { - for _, need := range job.Needs { - if rerunJobIDs.Contains(need) { - shouldBlockJob = true - break - } + // backfill attempt related fields for jobs + for i, job := range jobs { + job.RunAttemptID = originalAttempt.ID + job.Attempt = originalAttempt.Attempt + job.AttemptJobID = int64(i + 1) + if _, err := db.GetEngine(ctx).ID(job.ID).Cols("run_attempt_id", "attempt", "attempt_job_id").Update(job); err != nil { + return fmt.Errorf("backfill legacy run jobs: %w", err) } } - if err := rerunWorkflowJob(ctx, job, shouldBlockJob); err != nil { - return err - } - } - return nil -} - -func rerunWorkflowJob(ctx context.Context, job *actions_model.ActionRunJob, shouldBlock bool) error { - status := job.Status - if !status.IsDone() { - return nil - } - - job.TaskID = 0 - job.Status = util.Iif(shouldBlock, actions_model.StatusBlocked, actions_model.StatusWaiting) - job.Started = 0 - job.Stopped = 0 - job.ConcurrencyGroup = "" - job.ConcurrencyCancel = false - job.IsConcurrencyEvaluated = false - - if err := job.LoadRun(ctx); err != nil { - return err - } - if err := job.Run.LoadAttributes(ctx); err != nil { - return err - } - - vars, err := actions_model.GetVariablesOfRun(ctx, job.Run) - if err != nil { - return fmt.Errorf("get run %d variables: %w", job.Run.ID, err) - } - - if job.RawConcurrency != "" && !shouldBlock { - if err := EvaluateJobConcurrencyFillModel(ctx, job.Run, job, vars, nil); err != nil { - return fmt.Errorf("evaluate job concurrency: %w", err) - } - - job.Status, err = PrepareToStartJobWithConcurrency(ctx, job) - if err != nil { - return err - } - } - - if err := db.WithTx(ctx, func(ctx context.Context) error { - updateCols := []string{"task_id", "status", "started", "stopped", "concurrency_group", "concurrency_cancel", "is_concurrency_evaluated"} - _, err := actions_model.UpdateRunJob(ctx, job, builder.Eq{"status": status}, updateCols...) - return err - }); err != nil { - return err - } - - CreateCommitStatusForRunJobs(ctx, job.Run, job) - notify_service.WorkflowJobStatusUpdate(ctx, job.Run.Repo, job.Run.TriggerUser, job, nil) - return nil + // backfill "run_attempt_id" field for artifacts + if _, err := db.GetEngine(ctx). + Where("run_id=? AND run_attempt_id=0", run.ID). + Cols("run_attempt_id"). + Update(&actions_model.ActionArtifact{RunAttemptID: originalAttempt.ID}); err != nil { + return fmt.Errorf("backfill legacy artifacts: %w", err) + } + + // update "latest_attempt_id" for the run + run.LatestAttemptID = originalAttempt.ID + return actions_model.UpdateRun(ctx, run, "latest_attempt_id") + }) } diff --git a/services/actions/rerun_test.go b/services/actions/rerun_test.go index 3b4dc5483f..3077298061 100644 --- a/services/actions/rerun_test.go +++ b/services/actions/rerun_test.go @@ -4,54 +4,17 @@ package actions import ( - "context" "testing" actions_model "code.gitea.io/gitea/models/actions" + user_model "code.gitea.io/gitea/models/user" "code.gitea.io/gitea/modules/util" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" ) -func TestGetAllRerunJobs(t *testing.T) { - job1 := &actions_model.ActionRunJob{JobID: "job1"} - job2 := &actions_model.ActionRunJob{JobID: "job2", Needs: []string{"job1"}} - job3 := &actions_model.ActionRunJob{JobID: "job3", Needs: []string{"job2"}} - job4 := &actions_model.ActionRunJob{JobID: "job4", Needs: []string{"job2", "job3"}} - - jobs := []*actions_model.ActionRunJob{job1, job2, job3, job4} - - testCases := []struct { - job *actions_model.ActionRunJob - rerunJobs []*actions_model.ActionRunJob - }{ - { - job1, - []*actions_model.ActionRunJob{job1, job2, job3, job4}, - }, - { - job2, - []*actions_model.ActionRunJob{job2, job3, job4}, - }, - { - job3, - []*actions_model.ActionRunJob{job3, job4}, - }, - { - job4, - []*actions_model.ActionRunJob{job4}, - }, - } - - for _, tc := range testCases { - rerunJobs := GetAllRerunJobs(tc.job, jobs) - assert.ElementsMatch(t, tc.rerunJobs, rerunJobs) - } -} - -func TestGetFailedRerunJobs(t *testing.T) { - // IDs must be non-zero to distinguish jobs in the dedup set. +func TestGetFailedJobsForRerun(t *testing.T) { makeJob := func(id int64, jobID string, status actions_model.Status, needs ...string) *actions_model.ActionRunJob { return &actions_model.ActionRunJob{ID: id, JobID: jobID, Status: status, Needs: needs} } @@ -61,7 +24,7 @@ func TestGetFailedRerunJobs(t *testing.T) { makeJob(1, "job1", actions_model.StatusSuccess), makeJob(2, "job2", actions_model.StatusSkipped, "job1"), } - assert.Empty(t, GetFailedRerunJobs(jobs)) + assert.Empty(t, GetFailedJobsForRerun(jobs)) }) t.Run("single failed job with no dependents", func(t *testing.T) { @@ -69,56 +32,50 @@ func TestGetFailedRerunJobs(t *testing.T) { job2 := makeJob(2, "job2", actions_model.StatusSuccess) jobs := []*actions_model.ActionRunJob{job1, job2} - result := GetFailedRerunJobs(jobs) + result := GetFailedJobsForRerun(jobs) assert.ElementsMatch(t, []*actions_model.ActionRunJob{job1}, result) }) - t.Run("failed job pulls in downstream dependents", func(t *testing.T) { - // job1 failed; job2 depends on job1 (skipped); job3 depends on job2 (skipped) + t.Run("failed job does not pull in downstream dependents", func(t *testing.T) { job1 := makeJob(1, "job1", actions_model.StatusFailure) job2 := makeJob(2, "job2", actions_model.StatusSkipped, "job1") job3 := makeJob(3, "job3", actions_model.StatusSkipped, "job2") job4 := makeJob(4, "job4", actions_model.StatusSuccess) // unrelated, must not appear jobs := []*actions_model.ActionRunJob{job1, job2, job3, job4} - result := GetFailedRerunJobs(jobs) - assert.ElementsMatch(t, []*actions_model.ActionRunJob{job1, job2, job3}, result) + result := GetFailedJobsForRerun(jobs) + assert.ElementsMatch(t, []*actions_model.ActionRunJob{job1}, result) }) - t.Run("multiple independent failed jobs each pull in their own dependents", func(t *testing.T) { - // job1 failed -> job3 depends on job1 - // job2 failed -> job4 depends on job2 + t.Run("multiple failed jobs are returned directly", func(t *testing.T) { job1 := makeJob(1, "job1", actions_model.StatusFailure) job2 := makeJob(2, "job2", actions_model.StatusFailure) job3 := makeJob(3, "job3", actions_model.StatusSkipped, "job1") job4 := makeJob(4, "job4", actions_model.StatusSkipped, "job2") jobs := []*actions_model.ActionRunJob{job1, job2, job3, job4} - result := GetFailedRerunJobs(jobs) - assert.ElementsMatch(t, []*actions_model.ActionRunJob{job1, job2, job3, job4}, result) + result := GetFailedJobsForRerun(jobs) + assert.ElementsMatch(t, []*actions_model.ActionRunJob{job1, job2}, result) }) - t.Run("shared downstream dependent is not duplicated", func(t *testing.T) { - // job1 and job2 both failed; job3 depends on both + t.Run("shared downstream dependent is not included", func(t *testing.T) { job1 := makeJob(1, "job1", actions_model.StatusFailure) job2 := makeJob(2, "job2", actions_model.StatusFailure) job3 := makeJob(3, "job3", actions_model.StatusSkipped, "job1", "job2") jobs := []*actions_model.ActionRunJob{job1, job2, job3} - result := GetFailedRerunJobs(jobs) - assert.ElementsMatch(t, []*actions_model.ActionRunJob{job1, job2, job3}, result) - assert.Len(t, result, 3) // job3 must appear exactly once + result := GetFailedJobsForRerun(jobs) + assert.ElementsMatch(t, []*actions_model.ActionRunJob{job1, job2}, result) + assert.Len(t, result, 2) }) - t.Run("successful downstream job of a failed job is still included", func(t *testing.T) { - // job1 failed; job2 succeeded but depends on job1 — downstream is always rerun - // regardless of its own status (GetAllRerunJobs includes all transitive dependents) + t.Run("successful downstream job of a failed job is not included", func(t *testing.T) { job1 := makeJob(1, "job1", actions_model.StatusFailure) job2 := makeJob(2, "job2", actions_model.StatusSuccess, "job1") jobs := []*actions_model.ActionRunJob{job1, job2} - result := GetFailedRerunJobs(jobs) - assert.ElementsMatch(t, []*actions_model.ActionRunJob{job1, job2}, result) + result := GetFailedJobsForRerun(jobs) + assert.ElementsMatch(t, []*actions_model.ActionRunJob{job1}, result) }) } @@ -129,7 +86,7 @@ func TestRerunValidation(t *testing.T) { jobs := []*actions_model.ActionRunJob{ {ID: 1, JobID: "job1"}, } - err := RerunWorkflowRunJobs(context.Background(), nil, runningRun, jobs) + _, err := RerunWorkflowRunJobs(t.Context(), nil, runningRun, &user_model.User{ID: 1}, jobs) require.Error(t, err) assert.ErrorIs(t, err, util.ErrInvalidArgument) }) @@ -138,7 +95,7 @@ func TestRerunValidation(t *testing.T) { jobs := []*actions_model.ActionRunJob{ {ID: 1, JobID: "job1", Status: actions_model.StatusFailure}, } - err := RerunWorkflowRunJobs(context.Background(), nil, runningRun, GetFailedRerunJobs(jobs)) + _, err := RerunWorkflowRunJobs(t.Context(), nil, runningRun, &user_model.User{ID: 1}, GetFailedJobsForRerun(jobs)) require.Error(t, err) assert.ErrorIs(t, err, util.ErrInvalidArgument) }) diff --git a/services/actions/run.go b/services/actions/run.go index 432bb19628..162e3678ae 100644 --- a/services/actions/run.go +++ b/services/actions/run.go @@ -11,7 +11,6 @@ import ( "code.gitea.io/gitea/models/db" "code.gitea.io/gitea/modules/actions/jobparser" "code.gitea.io/gitea/modules/util" - notify_service "code.gitea.io/gitea/services/notify" act_model "github.com/nektos/act/pkg/model" "go.yaml.in/yaml/v4" @@ -47,10 +46,7 @@ func PrepareRunAndInsert(ctx context.Context, content []byte, run *actions_model CreateCommitStatusForRunJobs(ctx, run, allJobs...) - notify_service.WorkflowRunStatusUpdate(ctx, run.Repo, run.TriggerUser, run) - for _, job := range allJobs { - notify_service.WorkflowJobStatusUpdate(ctx, run.Repo, run.TriggerUser, job, nil) - } + NotifyWorkflowJobsAndRunsStatusUpdate(ctx, allJobs) return nil } @@ -58,7 +54,8 @@ func PrepareRunAndInsert(ctx context.Context, content []byte, run *actions_model // InsertRun inserts a run // The title will be cut off at 255 characters if it's longer than 255 characters. func InsertRun(ctx context.Context, run *actions_model.ActionRun, content []byte, vars map[string]string, inputs map[string]any, wfRawConcurrency *act_model.RawConcurrency) error { - return db.WithTx(ctx, func(ctx context.Context) error { + var cancelledConcurrencyJobs []*actions_model.ActionRunJob + if err := db.WithTx(ctx, func(ctx context.Context) error { index, err := db.GetNextResourceIndex(ctx, "action_run_index", run.RepoID) if err != nil { return err @@ -67,6 +64,14 @@ func InsertRun(ctx context.Context, run *actions_model.ActionRun, content []byte run.Title = util.EllipsisDisplayString(run.Title, 255) run.Status = actions_model.StatusWaiting + if wfRawConcurrency != nil { + rawConcurrency, err := yaml.Marshal(wfRawConcurrency) + if err != nil { + return fmt.Errorf("marshal raw concurrency: %w", err) + } + run.RawConcurrency = string(rawConcurrency) + } + // Insert before parsing jobs or evaluating workflow-level concurrency // so that run.ID is populated. Expressions referencing github.run_id — // in run-name, job names, runs-on, or a workflow-level concurrency @@ -76,31 +81,54 @@ func InsertRun(ctx context.Context, run *actions_model.ActionRun, content []byte return err } - giteaCtx := GenerateGiteaContext(run, nil) + runAttempt := &actions_model.ActionRunAttempt{ + RepoID: run.RepoID, + RunID: run.ID, + Attempt: 1, + TriggerUserID: run.TriggerUserID, + Status: actions_model.StatusWaiting, + } + + if wfRawConcurrency != nil { + if err := EvaluateRunConcurrencyFillModel(ctx, run, runAttempt, wfRawConcurrency, vars, inputs); err != nil { + return fmt.Errorf("EvaluateRunConcurrencyFillModel: %w", err) + } + // check run (workflow-level) concurrency + var jobsToCancel []*actions_model.ActionRunJob + runAttempt.Status, jobsToCancel, err = PrepareToStartRunWithConcurrency(ctx, runAttempt) + if err != nil { + return err + } + cancelledConcurrencyJobs = append(cancelledConcurrencyJobs, jobsToCancel...) + } + + if err := db.Insert(ctx, runAttempt); err != nil { + return err + } + run.LatestAttemptID = runAttempt.ID + + giteaCtx := GenerateGiteaContext(ctx, run, runAttempt, nil) jobs, err := jobparser.Parse(content, jobparser.WithVars(vars), jobparser.WithGitContext(giteaCtx.ToGitHubContext()), jobparser.WithInputs(inputs)) if err != nil { return fmt.Errorf("parse workflow: %w", err) } - titleChanged := len(jobs) > 0 && jobs[0].RunName != "" if titleChanged { run.Title = util.EllipsisDisplayString(jobs[0].RunName, 255) } - if wfRawConcurrency != nil { - if err := EvaluateRunConcurrencyFillModel(ctx, run, wfRawConcurrency, vars, inputs); err != nil { - return fmt.Errorf("EvaluateRunConcurrencyFillModel: %w", err) - } - run.Status, err = PrepareToStartRunWithConcurrency(ctx, run) - if err != nil { - return err - } + cols := []string{"latest_attempt_id"} + if titleChanged { + cols = append(cols, "title") + } + if err := actions_model.UpdateRun(ctx, run, cols...); err != nil { + return err } runJobs := make([]*actions_model.ActionRunJob, 0, len(jobs)) var hasWaitingJobs bool - for _, v := range jobs { + for i, v := range jobs { id, job := v.Job() needs := job.Needs() if err := v.SetJob(id, job.EraseNeeds()); err != nil { @@ -108,18 +136,21 @@ func InsertRun(ctx context.Context, run *actions_model.ActionRun, content []byte } payload, _ := v.Marshal() - shouldBlockJob := len(needs) > 0 || run.NeedApproval || run.Status == actions_model.StatusBlocked + shouldBlockJob := runAttempt.Status == actions_model.StatusBlocked || len(needs) > 0 || run.NeedApproval job.Name = util.EllipsisDisplayString(job.Name, 255) runJob := &actions_model.ActionRunJob{ RunID: run.ID, + RunAttemptID: runAttempt.ID, RepoID: run.RepoID, OwnerID: run.OwnerID, CommitSHA: run.CommitSHA, IsForkPullRequest: run.IsForkPullRequest, Name: job.Name, + Attempt: runAttempt.Attempt, WorkflowPayload: payload, JobID: id, + AttemptJobID: int64(i + 1), Needs: needs, RunsOn: job.RunsOn(), Status: util.Iif(shouldBlockJob, actions_model.StatusBlocked, actions_model.StatusWaiting), @@ -139,7 +170,7 @@ func InsertRun(ctx context.Context, run *actions_model.ActionRun, content []byte // do not evaluate job concurrency when it requires `needs`, the jobs with `needs` will be evaluated later by job emitter if len(needs) == 0 { - err = EvaluateJobConcurrencyFillModel(ctx, run, runJob, vars, inputs) + err = EvaluateJobConcurrencyFillModel(ctx, run, runAttempt, runJob, vars, inputs) if err != nil { return fmt.Errorf("evaluate job concurrency: %w", err) } @@ -148,10 +179,12 @@ func InsertRun(ctx context.Context, run *actions_model.ActionRun, content []byte // If a job needs other jobs ("needs" is not empty), its status is set to StatusBlocked at the entry of the loop // No need to check job concurrency for a blocked job (it will be checked by job emitter later) if runJob.Status == actions_model.StatusWaiting { - runJob.Status, err = PrepareToStartJobWithConcurrency(ctx, runJob) + var jobsToCancel []*actions_model.ActionRunJob + runJob.Status, jobsToCancel, err = PrepareToStartJobWithConcurrency(ctx, runJob) if err != nil { return fmt.Errorf("prepare to start job with concurrency: %w", err) } + cancelledConcurrencyJobs = append(cancelledConcurrencyJobs, jobsToCancel...) } } @@ -163,15 +196,8 @@ func InsertRun(ctx context.Context, run *actions_model.ActionRun, content []byte runJobs = append(runJobs, runJob) } - run.Status = actions_model.AggregateJobStatus(runJobs) - cols := []string{"status"} - if titleChanged { - cols = append(cols, "title") - } - if wfRawConcurrency != nil { - cols = append(cols, "raw_concurrency", "concurrency_group", "concurrency_cancel") - } - if err := actions_model.UpdateRun(ctx, run, cols...); err != nil { + runAttempt.Status = actions_model.AggregateJobStatus(runJobs) + if err := actions_model.UpdateRunAttempt(ctx, runAttempt, "status"); err != nil { return err } @@ -183,5 +209,12 @@ func InsertRun(ctx context.Context, run *actions_model.ActionRun, content []byte } return nil - }) + }); err != nil { + return err + } + + NotifyWorkflowJobsAndRunsStatusUpdate(ctx, cancelledConcurrencyJobs) + EmitJobsIfReadyByJobs(cancelledConcurrencyJobs) + + return nil } diff --git a/services/actions/task.go b/services/actions/task.go index 2cb10b6cd8..9dc3c9a34b 100644 --- a/services/actions/task.go +++ b/services/actions/task.go @@ -11,7 +11,6 @@ import ( actions_model "code.gitea.io/gitea/models/actions" "code.gitea.io/gitea/models/db" secret_model "code.gitea.io/gitea/models/secret" - notify_service "code.gitea.io/gitea/services/notify" runnerv1 "code.gitea.io/actions-proto-go/runner/v1" "google.golang.org/protobuf/types/known/structpb" @@ -78,7 +77,7 @@ func PickTask(ctx context.Context, runner *actions_model.ActionRunner) (*runnerv return fmt.Errorf("findTaskNeeds: %w", err) } - taskContext, err := generateTaskContext(t) + taskContext, err := generateTaskContext(ctx, t) if err != nil { return fmt.Errorf("generateTaskContext: %w", err) } @@ -102,23 +101,23 @@ func PickTask(ctx context.Context, runner *actions_model.ActionRunner) (*runnerv } CreateCommitStatusForRunJobs(ctx, job.Run, job) - notify_service.WorkflowJobStatusUpdate(ctx, job.Run.Repo, job.Run.TriggerUser, job, actionTask) + NotifyWorkflowJobStatusUpdateWithTask(ctx, job, actionTask) // job.Run is loaded inside the transaction before UpdateRunJob sets run.Started, // so Started is zero only on the very first pick-up of that run. if job.Run.Started.IsZero() { - NotifyWorkflowRunStatusUpdateWithReload(ctx, job) + NotifyWorkflowRunStatusUpdateWithReload(ctx, job.RepoID, job.RunID) } return task, true, nil } -func generateTaskContext(t *actions_model.ActionTask) (*structpb.Struct, error) { +func generateTaskContext(ctx context.Context, t *actions_model.ActionTask) (*structpb.Struct, error) { giteaRuntimeToken, err := CreateAuthorizationToken(t.ID, t.Job.RunID, t.JobID) if err != nil { return nil, err } - gitCtx := GenerateGiteaContext(t.Job.Run, t.Job) + gitCtx := GenerateGiteaContext(ctx, t.Job.Run, nil, t.Job) gitCtx["token"] = t.Token gitCtx["gitea_runtime_token"] = giteaRuntimeToken diff --git a/services/asymkey/sign.go b/services/asymkey/sign.go index cffefe08ae..8c28717e5d 100644 --- a/services/asymkey/sign.go +++ b/services/asymkey/sign.go @@ -338,26 +338,41 @@ Loop: return false, nil, nil, &ErrWontSign{headSigned} } case commitsSigned: - verification := ParseCommitWithSignature(ctx, headCommit) - if !verification.Verified { + verified, err := AllHeadCommitsVerified(ctx, pr, gitRepo) + if err != nil { + return false, nil, nil, err + } + if !verified { return false, nil, nil, &ErrWontSign{commitsSigned} } - // need to work out merge-base - mergeBaseCommit, err := gitrepo.MergeBase(ctx, pr.BaseRepo, baseCommit.ID.String(), headCommit.ID.String()) - if err != nil { - return false, nil, nil, err - } - commitList, err := headCommit.CommitsBeforeUntil(mergeBaseCommit) - if err != nil { - return false, nil, nil, err - } - for _, commit := range commitList { - verification := ParseCommitWithSignature(ctx, commit) - if !verification.Verified { - return false, nil, nil, &ErrWontSign{commitsSigned} - } - } } } return true, signingKey, signer, nil } + +// AllHeadCommitsVerified checks that every new commit in the PR head has a +// verified signature. +func AllHeadCommitsVerified(ctx context.Context, pr *issues_model.PullRequest, gitRepo *git.Repository) (bool, error) { + baseCommit, err := gitRepo.GetCommit(pr.BaseBranch) + if err != nil { + return false, err + } + headCommit, err := gitRepo.GetCommit(pr.GetGitHeadRefName()) + if err != nil { + return false, err + } + mergeBaseCommit, err := gitrepo.MergeBase(ctx, pr.BaseRepo, baseCommit.ID.String(), headCommit.ID.String()) + if err != nil { + return false, err + } + commitList, err := headCommit.CommitsBeforeUntil(mergeBaseCommit) + if err != nil { + return false, err + } + for _, commit := range commitList { + if !ParseCommitWithSignature(ctx, commit).Verified { + return false, nil + } + } + return true, nil +} diff --git a/services/automerge/automerge.go b/services/automerge/automerge.go index b3a988320b..b06fc723be 100644 --- a/services/automerge/automerge.go +++ b/services/automerge/automerge.go @@ -251,7 +251,7 @@ func handlePullRequestAutoMerge(pullID int64, sha string) { return } - if err := pull_service.CheckPullMergeable(ctx, doer, &perm, pr, pull_service.MergeCheckTypeGeneral, false); err != nil { + if err := pull_service.CheckPullMergeable(ctx, doer, &perm, pr, pull_service.MergeCheckTypeGeneral, scheduledPRM.MergeStyle, false); err != nil { if errors.Is(err, pull_service.ErrNotReadyToMerge) { log.Info("%-v was scheduled to automerge by an unauthorized user", pr) return diff --git a/services/context/context.go b/services/context/context.go index d6030808d8..8d286f6738 100644 --- a/services/context/context.go +++ b/services/context/context.go @@ -196,10 +196,6 @@ func Contexter() func(next http.Handler) http.Handler { httpcache.SetCacheControlInHeader(ctx.Resp.Header(), &httpcache.CacheControlOptions{NoTransform: true}) - if setting.Security.XFrameOptions != "unset" { - ctx.Resp.Header().Set(`X-Frame-Options`, setting.Security.XFrameOptions) - } - ctx.Data["SystemConfig"] = setting.Config() ctx.Data["ShowTwoFactorRequiredMessage"] = ctx.DoerNeedTwoFactorAuth() @@ -209,7 +205,6 @@ func Contexter() func(next http.Handler) http.Handler { ctx.Data["DisableStars"] = setting.Repository.DisableStars ctx.Data["EnableActions"] = setting.Actions.Enabled && !unit.TypeActions.UnitGlobalDisabled() - ctx.Data["ManifestData"] = setting.ManifestData ctx.Data["AllLangs"] = translation.AllLangs() next.ServeHTTP(ctx.Resp, ctx.Req) diff --git a/services/context/context_template.go b/services/context/context_template.go index 2c8fde6870..b63aaf4c3c 100644 --- a/services/context/context_template.go +++ b/services/context/context_template.go @@ -148,8 +148,7 @@ func (c TemplateContext) HeadMetaContentSecurityPolicy() template.HTML { // * Maybe this approach should be avoided, don't make the config system too complex, just let users use A return template.HTML(` 1 { + url := fmt.Sprintf("%s/actions/runs/%d/attempts/%d", repo.APIURL(), run.ID, attempt.Attempt-1) + previousAttemptURL = &url + } + } + return &api.ActionWorkflowRun{ - ID: run.ID, - URL: fmt.Sprintf("%s/actions/runs/%d", repo.APIURL(), run.ID), - HTMLURL: run.HTMLURL(), - RunNumber: run.Index, - StartedAt: run.Started.AsLocalTime(), - CompletedAt: run.Stopped.AsLocalTime(), - Event: run.TriggerEvent, - DisplayTitle: run.Title, - HeadBranch: git.RefName(run.Ref).BranchName(), - HeadSha: run.CommitSHA, - Status: status, - Conclusion: conclusion, - Path: fmt.Sprintf("%s@%s", run.WorkflowID, run.Ref), - Repository: ToRepo(ctx, repo, access_model.Permission{AccessMode: perm.AccessModeNone}), - TriggerActor: ToUser(ctx, run.TriggerUser, nil), - // We do not have a way to get a different User for the actor than the trigger user - Actor: ToUser(ctx, run.TriggerUser, nil), + ID: run.ID, + URL: fmt.Sprintf("%s/actions/runs/%d", repo.APIURL(), run.ID), + PreviousAttemptURL: previousAttemptURL, + HTMLURL: run.HTMLURL(), + RunNumber: run.Index, + RunAttempt: runAttempt, + StartedAt: startedAt, + CompletedAt: completedAt, + Event: run.TriggerEvent, + DisplayTitle: run.Title, + HeadBranch: git.RefName(run.Ref).BranchName(), + HeadSha: run.CommitSHA, + Status: status, + Conclusion: conclusion, + Path: fmt.Sprintf("%s@%s", run.WorkflowID, run.Ref), + Repository: ToRepo(ctx, repo, access_model.Permission{AccessMode: perm.AccessModeNone}), + TriggerActor: ToUser(ctx, triggerUser, nil), + Actor: ToUser(ctx, actor, nil), }, nil } @@ -329,9 +363,9 @@ func ToActionWorkflowJob(ctx context.Context, repo *repo_model.Repository, task var runnerName string var steps []*api.ActionWorkflowStep - if job.TaskID != 0 { + if effectiveTaskID := job.EffectiveTaskID(); effectiveTaskID != 0 { if task == nil { - task, _, err = db.GetByID[actions_model.ActionTask](ctx, job.TaskID) + task, _, err = db.GetByID[actions_model.ActionTask](ctx, effectiveTaskID) if err != nil { return nil, err } diff --git a/services/git/compare.go b/services/git/compare.go index 251a035058..a8c2980112 100644 --- a/services/git/compare.go +++ b/services/git/compare.go @@ -5,6 +5,7 @@ package git import ( "context" + "errors" "fmt" repo_model "code.gitea.io/gitea/models/repo" @@ -43,23 +44,22 @@ func (ci *CompareInfo) DirectComparison() bool { } // GetCompareInfo generates and returns compare information between base and head branches of repositories. -func GetCompareInfo(ctx context.Context, baseRepo, headRepo *repo_model.Repository, headGitRepo *git.Repository, baseRef, headRef git.RefName, directComparison, fileOnly bool) (_ *CompareInfo, err error) { - compareInfo := &CompareInfo{ +// It does its best to fill the fields as many as it can. +func GetCompareInfo(ctx context.Context, baseRepo, headRepo *repo_model.Repository, headGitRepo *git.Repository, baseRef, headRef git.RefName, directComparison, fileOnly bool) (compareInfo CompareInfo, err error) { + baseCommitID, err1 := gitrepo.GetFullCommitID(ctx, baseRepo, baseRef.String()) + headCommitID, err2 := gitrepo.GetFullCommitID(ctx, headRepo, headRef.String()) + compareInfo = CompareInfo{ BaseRepo: baseRepo, BaseRef: baseRef, + BaseCommitID: baseCommitID, HeadRepo: headRepo, HeadGitRepo: headGitRepo, HeadRef: headRef, + HeadCommitID: headCommitID, CompareSeparator: util.Iif(directComparison, "..", "..."), } - - compareInfo.BaseCommitID, err = gitrepo.GetFullCommitID(ctx, baseRepo, baseRef.String()) - if err != nil { - return nil, err - } - compareInfo.HeadCommitID, err = gitrepo.GetFullCommitID(ctx, headRepo, headRef.String()) - if err != nil { - return nil, err + if err1 != nil || err2 != nil { + return compareInfo, errors.Join(err1, err2) } // if they are not the same repository, then we need to fetch the base commit into the head repository @@ -68,7 +68,7 @@ func GetCompareInfo(ctx context.Context, baseRepo, headRepo *repo_model.Reposito exist := headGitRepo.IsReferenceExist(compareInfo.BaseCommitID) if !exist { if err := gitrepo.FetchRemoteCommit(ctx, headRepo, baseRepo, compareInfo.BaseCommitID); err != nil { - return nil, fmt.Errorf("FetchRemoteCommit: %w", err) + return compareInfo, fmt.Errorf("FetchRemoteCommit: %w", err) } } } @@ -76,7 +76,7 @@ func GetCompareInfo(ctx context.Context, baseRepo, headRepo *repo_model.Reposito if !directComparison { compareInfo.MergeBase, err = gitrepo.MergeBase(ctx, headRepo, compareInfo.BaseCommitID, compareInfo.HeadCommitID) if err != nil { - return nil, fmt.Errorf("MergeBase: %w", err) + return compareInfo, fmt.Errorf("MergeBase: %w", err) } } else { compareInfo.MergeBase = compareInfo.BaseCommitID @@ -90,7 +90,7 @@ func GetCompareInfo(ctx context.Context, baseRepo, headRepo *repo_model.Reposito // Otherwise, commits newly pushed to the base branch would also be included, which is incorrect. compareInfo.Commits, err = headGitRepo.ShowPrettyFormatLogToList(ctx, compareInfo.MergeBase+".."+compareInfo.HeadCommitID) if err != nil { - return nil, fmt.Errorf("ShowPrettyFormatLogToList: %w", err) + return compareInfo, fmt.Errorf("ShowPrettyFormatLogToList: %w", err) } } else { compareInfo.Commits = []*git.Commit{} @@ -100,8 +100,5 @@ func GetCompareInfo(ctx context.Context, baseRepo, headRepo *repo_model.Reposito // This probably should be removed as we need to use shortstat elsewhere // Now there is git diff --shortstat but this appears to be slower than simply iterating with --nameonly compareInfo.NumFiles, err = headGitRepo.GetDiffNumChangedFiles(compareInfo.BaseCommitID, compareInfo.HeadCommitID, directComparison) - if err != nil { - return nil, err - } - return compareInfo, nil + return compareInfo, err } diff --git a/services/issue/assignee.go b/services/issue/assignee.go index 5a64c722b3..44024389cf 100644 --- a/services/issue/assignee.go +++ b/services/issue/assignee.go @@ -7,13 +7,10 @@ import ( "context" issues_model "code.gitea.io/gitea/models/issues" - "code.gitea.io/gitea/models/organization" - "code.gitea.io/gitea/models/perm" access_model "code.gitea.io/gitea/models/perm/access" repo_model "code.gitea.io/gitea/models/repo" - "code.gitea.io/gitea/models/unit" user_model "code.gitea.io/gitea/models/user" - "code.gitea.io/gitea/modules/log" + "code.gitea.io/gitea/modules/container" notify_service "code.gitea.io/gitea/services/notify" ) @@ -62,267 +59,85 @@ func ToggleAssigneeWithNotify(ctx context.Context, issue *issues_model.Issue, do return removed, comment, err } -// ReviewRequest add or remove a review request from a user for this PR, and make comment for it. -func ReviewRequest(ctx context.Context, issue *issues_model.Issue, doer *user_model.User, permDoer *access_model.Permission, reviewer *user_model.User, isAdd bool) (comment *issues_model.Comment, err error) { - err = isValidReviewRequest(ctx, reviewer, doer, isAdd, issue, permDoer) - if err != nil { - return nil, err +// UpdateAssignees is a helper function to add or delete one or multiple issue assignee(s) +// Deleting is done the GitHub way (quote from their api documentation): +// https://developer.github.com/v3/issues/#edit-an-issue +// "assignees" (array): Logins for Users to assign to this issue. +// Pass one or more user logins to replace the set of assignees on this Issue. +// Send an empty array ([]) to clear all assignees from the Issue. +func UpdateAssignees(ctx context.Context, issue *issues_model.Issue, oneAssignee string, multipleAssignees []string, doer *user_model.User) (err error) { + uniqueAssignees := container.SetOf(multipleAssignees...) + + // Keep the old assignee thingy for compatibility reasons + if oneAssignee != "" { + uniqueAssignees.Add(oneAssignee) } - if isAdd { - comment, err = issues_model.AddReviewRequest(ctx, issue, reviewer, doer, false) - } else { - comment, err = issues_model.RemoveReviewRequest(ctx, issue, reviewer, doer) - } - - if err != nil { - return nil, err - } - - if comment != nil { - notify_service.PullRequestReviewRequest(ctx, doer, issue, reviewer, isAdd, comment) - } - - return comment, err -} - -// isValidReviewRequest Check permission for ReviewRequest -func isValidReviewRequest(ctx context.Context, reviewer, doer *user_model.User, isAdd bool, issue *issues_model.Issue, permDoer *access_model.Permission) error { - if reviewer.IsOrganization() { - return issues_model.ErrNotValidReviewRequest{ - Reason: "Organization can't be added as reviewer", - UserID: doer.ID, - RepoID: issue.Repo.ID, + // Loop through all assignees to add them + allNewAssignees := make([]*user_model.User, 0, len(uniqueAssignees)) + for _, assigneeName := range uniqueAssignees.Values() { + assignee, err := user_model.GetUserByName(ctx, assigneeName) + if err != nil { + return err } - } - if doer.IsOrganization() { - return issues_model.ErrNotValidReviewRequest{ - Reason: "Organization can't be doer to add reviewer", - UserID: doer.ID, - RepoID: issue.Repo.ID, + + if user_model.IsUserBlockedBy(ctx, doer, assignee.ID) { + return user_model.ErrBlockedUser } + + allNewAssignees = append(allNewAssignees, assignee) } - permReviewer, err := access_model.GetIndividualUserRepoPermission(ctx, issue.Repo, reviewer) - if err != nil { + // Delete all old assignees not passed + if err = DeleteNotPassedAssignee(ctx, issue, doer, allNewAssignees); err != nil { return err } - if permDoer == nil { - permDoer = new(access_model.Permission) - *permDoer, err = access_model.GetDoerRepoPermission(ctx, issue.Repo, doer) + // Add all new assignees + // Update the assignee. The function will check if the user exists, is already + // assigned (which he shouldn't as we deleted all assignees before) and + // has access to the repo. + for _, assignee := range allNewAssignees { + // Extra method to prevent double adding (which would result in removing) + _, err = AddAssigneeIfNotAssigned(ctx, issue, doer, assignee.ID, true) if err != nil { return err } } - lastReview, err := issues_model.GetReviewByIssueIDAndUserID(ctx, issue.ID, reviewer.ID) - if err != nil && !issues_model.IsErrReviewNotExist(err) { - return err - } - - canDoerChangeReviewRequests := CanDoerChangeReviewRequests(ctx, doer, issue.Repo, issue.PosterID) - - if isAdd { - if !permReviewer.CanAccessAny(perm.AccessModeRead, unit.TypePullRequests) { - return issues_model.ErrNotValidReviewRequest{ - Reason: "Reviewer can't read", - UserID: doer.ID, - RepoID: issue.Repo.ID, - } - } - - if reviewer.ID == issue.PosterID && issue.OriginalAuthorID == 0 { - return issues_model.ErrNotValidReviewRequest{ - Reason: "poster of pr can't be reviewer", - UserID: doer.ID, - RepoID: issue.Repo.ID, - } - } - - if canDoerChangeReviewRequests { - return nil - } - - if doer.ID == issue.PosterID && issue.OriginalAuthorID == 0 && lastReview != nil && lastReview.Type != issues_model.ReviewTypeRequest { - return nil - } - - return issues_model.ErrNotValidReviewRequest{ - Reason: "Doer can't choose reviewer", - UserID: doer.ID, - RepoID: issue.Repo.ID, - } - } - - if canDoerChangeReviewRequests { - return nil - } - - if lastReview != nil && lastReview.Type == issues_model.ReviewTypeRequest && lastReview.ReviewerID == doer.ID { - return nil - } - - return issues_model.ErrNotValidReviewRequest{ - Reason: "Doer can't remove reviewer", - UserID: doer.ID, - RepoID: issue.Repo.ID, - } -} - -// isValidTeamReviewRequest Check permission for ReviewRequest Team -func isValidTeamReviewRequest(ctx context.Context, reviewer *organization.Team, doer *user_model.User, isAdd bool, issue *issues_model.Issue) error { - if doer.IsOrganization() { - return issues_model.ErrNotValidReviewRequest{ - Reason: "Organization can't be doer to add reviewer", - UserID: doer.ID, - RepoID: issue.Repo.ID, - } - } - - canDoerChangeReviewRequests := CanDoerChangeReviewRequests(ctx, doer, issue.Repo, issue.PosterID) - - if isAdd { - if issue.Repo.IsPrivate { - hasTeam := organization.HasTeamRepo(ctx, reviewer.OrgID, reviewer.ID, issue.RepoID) - - if !hasTeam { - return issues_model.ErrNotValidReviewRequest{ - Reason: "Reviewing team can't read repo", - UserID: doer.ID, - RepoID: issue.Repo.ID, - } - } - } - - if canDoerChangeReviewRequests { - return nil - } - - return issues_model.ErrNotValidReviewRequest{ - Reason: "Doer can't choose reviewer", - UserID: doer.ID, - RepoID: issue.Repo.ID, - } - } - - if canDoerChangeReviewRequests { - return nil - } - - return issues_model.ErrNotValidReviewRequest{ - Reason: "Doer can't remove reviewer", - UserID: doer.ID, - RepoID: issue.Repo.ID, - } -} - -// TeamReviewRequest add or remove a review request from a team for this PR, and make comment for it. -func TeamReviewRequest(ctx context.Context, issue *issues_model.Issue, doer *user_model.User, reviewer *organization.Team, isAdd bool) (comment *issues_model.Comment, err error) { - err = isValidTeamReviewRequest(ctx, reviewer, doer, isAdd, issue) - if err != nil { - return nil, err - } - if isAdd { - comment, err = issues_model.AddTeamReviewRequest(ctx, issue, reviewer, doer, false) - } else { - comment, err = issues_model.RemoveTeamReviewRequest(ctx, issue, reviewer, doer) - } - - if err != nil { - return nil, err - } - - if comment == nil || !isAdd { - return nil, nil //nolint:nilnil // return nil because no comment was created or it is a removal - } - - return comment, teamReviewRequestNotify(ctx, issue, doer, reviewer, isAdd, comment) -} - -func ReviewRequestNotify(ctx context.Context, issue *issues_model.Issue, doer *user_model.User, reviewNotifiers []*ReviewRequestNotifier) { - for _, reviewNotifier := range reviewNotifiers { - if reviewNotifier.Reviewer != nil { - notify_service.PullRequestReviewRequest(ctx, issue.Poster, issue, reviewNotifier.Reviewer, reviewNotifier.IsAdd, reviewNotifier.Comment) - } else if reviewNotifier.ReviewTeam != nil { - if err := teamReviewRequestNotify(ctx, issue, issue.Poster, reviewNotifier.ReviewTeam, reviewNotifier.IsAdd, reviewNotifier.Comment); err != nil { - log.Error("teamReviewRequestNotify: %v", err) - } - } - } -} - -// teamReviewRequestNotify notify all user in this team -func teamReviewRequestNotify(ctx context.Context, issue *issues_model.Issue, doer *user_model.User, reviewer *organization.Team, isAdd bool, comment *issues_model.Comment) error { - // notify all user in this team - if err := comment.LoadIssue(ctx); err != nil { - return err - } - - members, err := organization.GetTeamMembers(ctx, &organization.SearchMembersOptions{ - TeamID: reviewer.ID, - }) - if err != nil { - return err - } - - for _, member := range members { - if member.ID == comment.Issue.PosterID { - continue - } - comment.AssigneeID = member.ID - notify_service.PullRequestReviewRequest(ctx, doer, issue, member, isAdd, comment) - } - return err } -// CanDoerChangeReviewRequests returns if the doer can add/remove review requests of a PR -func CanDoerChangeReviewRequests(ctx context.Context, doer *user_model.User, repo *repo_model.Repository, posterID int64) bool { - if repo.IsArchived { - return false - } - // The poster of the PR can change the reviewers - if doer.ID == posterID { - return true - } - - // The owner of the repo can change the reviewers - if doer.ID == repo.OwnerID { - return true - } - - // Collaborators of the repo can change the reviewers - isCollaborator, err := repo_model.IsCollaborator(ctx, repo.ID, doer.ID) +// AddAssigneeIfNotAssigned adds an assignee only if he isn't already assigned to the issue. +// Also checks for access of assigned user +func AddAssigneeIfNotAssigned(ctx context.Context, issue *issues_model.Issue, doer *user_model.User, assigneeID int64, notify bool) (comment *issues_model.Comment, err error) { + assignee, err := user_model.GetUserByID(ctx, assigneeID) if err != nil { - log.Error("IsCollaborator: %v", err) - return false - } - if isCollaborator { - return true + return nil, err } - // If the repo's owner is an organization, members of teams with read permission on pull requests can change reviewers - if repo.Owner.IsOrganization() { - teams, err := organization.GetTeamsWithAccessToAnyRepoUnit(ctx, repo.OwnerID, repo.ID, perm.AccessModeRead, unit.TypePullRequests) - if err != nil { - log.Error("GetTeamsWithAccessToRepo: %v", err) - return false - } - for _, team := range teams { - if !team.UnitEnabled(ctx, unit.TypePullRequests) { - continue - } - isMember, err := organization.IsTeamMember(ctx, repo.OwnerID, team.ID, doer.ID) - if err != nil { - log.Error("IsTeamMember: %v", err) - continue - } - if isMember { - return true - } - } + // Check if the user is already assigned + isAssigned, err := issues_model.IsUserAssignedToIssue(ctx, issue, assignee) + if err != nil { + return nil, err + } + if isAssigned { + // nothing to do + return nil, nil //nolint:nilnil // return nil because the user is already assigned } - return false + valid, err := access_model.CanBeAssigned(ctx, assignee, issue.Repo, issue.IsPull) + if err != nil { + return nil, err + } + if !valid { + return nil, repo_model.ErrUserDoesNotHaveAccessToRepo{UserID: assigneeID, RepoName: issue.Repo.Name} + } + + if notify { + _, comment, err = ToggleAssigneeWithNotify(ctx, issue, doer, assigneeID) + return comment, err + } + _, comment, err = issues_model.ToggleIssueAssignee(ctx, issue, doer, assigneeID) + return comment, err } diff --git a/services/issue/issue.go b/services/issue/issue.go index 9beb4c46ec..5b57b2453e 100644 --- a/services/issue/issue.go +++ b/services/issue/issue.go @@ -15,7 +15,6 @@ import ( repo_model "code.gitea.io/gitea/models/repo" system_model "code.gitea.io/gitea/models/system" user_model "code.gitea.io/gitea/models/user" - "code.gitea.io/gitea/modules/container" "code.gitea.io/gitea/modules/git" "code.gitea.io/gitea/modules/gitrepo" "code.gitea.io/gitea/modules/log" @@ -131,55 +130,6 @@ func ChangeIssueRef(ctx context.Context, issue *issues_model.Issue, doer *user_m return nil } -// UpdateAssignees is a helper function to add or delete one or multiple issue assignee(s) -// Deleting is done the GitHub way (quote from their api documentation): -// https://developer.github.com/v3/issues/#edit-an-issue -// "assignees" (array): Logins for Users to assign to this issue. -// Pass one or more user logins to replace the set of assignees on this Issue. -// Send an empty array ([]) to clear all assignees from the Issue. -func UpdateAssignees(ctx context.Context, issue *issues_model.Issue, oneAssignee string, multipleAssignees []string, doer *user_model.User) (err error) { - uniqueAssignees := container.SetOf(multipleAssignees...) - - // Keep the old assignee thingy for compatibility reasons - if oneAssignee != "" { - uniqueAssignees.Add(oneAssignee) - } - - // Loop through all assignees to add them - allNewAssignees := make([]*user_model.User, 0, len(uniqueAssignees)) - for _, assigneeName := range uniqueAssignees.Values() { - assignee, err := user_model.GetUserByName(ctx, assigneeName) - if err != nil { - return err - } - - if user_model.IsUserBlockedBy(ctx, doer, assignee.ID) { - return user_model.ErrBlockedUser - } - - allNewAssignees = append(allNewAssignees, assignee) - } - - // Delete all old assignees not passed - if err = DeleteNotPassedAssignee(ctx, issue, doer, allNewAssignees); err != nil { - return err - } - - // Add all new assignees - // Update the assignee. The function will check if the user exists, is already - // assigned (which he shouldn't as we deleted all assignees before) and - // has access to the repo. - for _, assignee := range allNewAssignees { - // Extra method to prevent double adding (which would result in removing) - _, err = AddAssigneeIfNotAssigned(ctx, issue, doer, assignee.ID, true) - if err != nil { - return err - } - } - - return err -} - // DeleteIssue deletes an issue func DeleteIssue(ctx context.Context, doer *user_model.User, issue *issues_model.Issue) error { // load issue before deleting it @@ -214,40 +164,6 @@ func DeleteIssue(ctx context.Context, doer *user_model.User, issue *issues_model return nil } -// AddAssigneeIfNotAssigned adds an assignee only if he isn't already assigned to the issue. -// Also checks for access of assigned user -func AddAssigneeIfNotAssigned(ctx context.Context, issue *issues_model.Issue, doer *user_model.User, assigneeID int64, notify bool) (comment *issues_model.Comment, err error) { - assignee, err := user_model.GetUserByID(ctx, assigneeID) - if err != nil { - return nil, err - } - - // Check if the user is already assigned - isAssigned, err := issues_model.IsUserAssignedToIssue(ctx, issue, assignee) - if err != nil { - return nil, err - } - if isAssigned { - // nothing to do - return nil, nil //nolint:nilnil // return nil because the user is already assigned - } - - valid, err := access_model.CanBeAssigned(ctx, assignee, issue.Repo, issue.IsPull) - if err != nil { - return nil, err - } - if !valid { - return nil, repo_model.ErrUserDoesNotHaveAccessToRepo{UserID: assigneeID, RepoName: issue.Repo.Name} - } - - if notify { - _, comment, err = ToggleAssigneeWithNotify(ctx, issue, doer, assigneeID) - return comment, err - } - _, comment, err = issues_model.ToggleIssueAssignee(ctx, issue, doer, assigneeID) - return comment, err -} - // GetRefEndNamesAndURLs retrieves the ref end names (e.g. refs/heads/branch-name -> branch-name) // and their respective URLs. func GetRefEndNamesAndURLs(issues []*issues_model.Issue, repoLink string) (map[int64]string, map[int64]string) { diff --git a/services/issue/pull.go b/services/issue/pull.go index f415ebe759..3fc9c335f9 100644 --- a/services/issue/pull.go +++ b/services/issue/pull.go @@ -133,7 +133,7 @@ func PullRequestCodeOwnersReview(ctx context.Context, pr *issues_model.PullReque if u.ID != issue.Poster.ID && !contain(latestReviews, u) { comment, err := issues_model.AddReviewRequest(ctx, issue, u, issue.Poster, true) if err != nil { - log.Warn("Failed add assignee user: %s to PR review: %s#%d, error: %s", u.Name, pr.BaseRepo.Name, pr.ID, err) + log.Warn("Failed add review user: %s to PR review: %s#%d, error: %s", u.Name, pr.BaseRepo.Name, pr.ID, err) return nil, err } if comment == nil { // comment maybe nil if review type is ReviewTypeRequest @@ -150,7 +150,7 @@ func PullRequestCodeOwnersReview(ctx context.Context, pr *issues_model.PullReque for _, t := range uniqTeams { comment, err := issues_model.AddTeamReviewRequest(ctx, issue, t, issue.Poster, true) if err != nil { - log.Warn("Failed add assignee team: %s to PR review: %s#%d, error: %s", t.Name, pr.BaseRepo.Name, pr.ID, err) + log.Warn("Failed add reviewer team: %s to PR review: %s#%d, error: %s", t.Name, pr.BaseRepo.Name, pr.ID, err) return nil, err } if comment == nil { // comment maybe nil if review type is ReviewTypeRequest diff --git a/services/issue/review_request.go b/services/issue/review_request.go new file mode 100644 index 0000000000..23fe9d171e --- /dev/null +++ b/services/issue/review_request.go @@ -0,0 +1,283 @@ +// Copyright 2026 The Gitea Authors. All rights reserved. +// SPDX-License-Identifier: MIT + +package issue + +import ( + "context" + + issues_model "code.gitea.io/gitea/models/issues" + "code.gitea.io/gitea/models/organization" + "code.gitea.io/gitea/models/perm" + access_model "code.gitea.io/gitea/models/perm/access" + repo_model "code.gitea.io/gitea/models/repo" + "code.gitea.io/gitea/models/unit" + user_model "code.gitea.io/gitea/models/user" + "code.gitea.io/gitea/modules/log" + notify_service "code.gitea.io/gitea/services/notify" +) + +// ReviewRequest add or remove a review request from a user for this PR, and make comment for it. +func ReviewRequest(ctx context.Context, issue *issues_model.Issue, doer *user_model.User, permDoer *access_model.Permission, reviewer *user_model.User, isAdd bool) (comment *issues_model.Comment, err error) { + err = isValidReviewRequest(ctx, reviewer, doer, isAdd, issue, permDoer) + if err != nil { + return nil, err + } + + if isAdd { + comment, err = issues_model.AddReviewRequest(ctx, issue, reviewer, doer, false) + } else { + comment, err = issues_model.RemoveReviewRequest(ctx, issue, reviewer, doer) + } + + if err != nil { + return nil, err + } + + if comment != nil { + notify_service.PullRequestReviewRequest(ctx, doer, issue, reviewer, isAdd, comment) + } + + return comment, err +} + +// isValidReviewRequest Check permission for ReviewRequest +func isValidReviewRequest(ctx context.Context, reviewer, doer *user_model.User, isAdd bool, issue *issues_model.Issue, permDoer *access_model.Permission) error { + if reviewer.IsOrganization() { + return issues_model.ErrNotValidReviewRequest{ + Reason: "Organization can't be added as reviewer", + UserID: doer.ID, + RepoID: issue.Repo.ID, + } + } + if doer.IsOrganization() { + return issues_model.ErrNotValidReviewRequest{ + Reason: "Organization can't be doer to add reviewer", + UserID: doer.ID, + RepoID: issue.Repo.ID, + } + } + + permReviewer, err := access_model.GetIndividualUserRepoPermission(ctx, issue.Repo, reviewer) + if err != nil { + return err + } + + if permDoer == nil { + permDoer = new(access_model.Permission) + *permDoer, err = access_model.GetDoerRepoPermission(ctx, issue.Repo, doer) + if err != nil { + return err + } + } + + lastReview, err := issues_model.GetReviewByIssueIDAndUserID(ctx, issue.ID, reviewer.ID) + if err != nil && !issues_model.IsErrReviewNotExist(err) { + return err + } + + canDoerChangeReviewRequests := CanDoerChangeReviewRequests(ctx, doer, issue.Repo, issue.PosterID) + + if isAdd { + if !permReviewer.CanAccessAny(perm.AccessModeRead, unit.TypePullRequests) { + return issues_model.ErrNotValidReviewRequest{ + Reason: "Reviewer can't read", + UserID: doer.ID, + RepoID: issue.Repo.ID, + } + } + + if reviewer.ID == issue.PosterID && issue.OriginalAuthorID == 0 { + return issues_model.ErrNotValidReviewRequest{ + Reason: "poster of pr can't be reviewer", + UserID: doer.ID, + RepoID: issue.Repo.ID, + } + } + + if canDoerChangeReviewRequests { + return nil + } + + if doer.ID == issue.PosterID && issue.OriginalAuthorID == 0 && lastReview != nil && lastReview.Type != issues_model.ReviewTypeRequest { + return nil + } + + return issues_model.ErrNotValidReviewRequest{ + Reason: "Doer can't choose reviewer", + UserID: doer.ID, + RepoID: issue.Repo.ID, + } + } + + if canDoerChangeReviewRequests { + return nil + } + + if lastReview != nil && lastReview.Type == issues_model.ReviewTypeRequest && lastReview.ReviewerID == doer.ID { + return nil + } + + return issues_model.ErrNotValidReviewRequest{ + Reason: "Doer can't remove reviewer", + UserID: doer.ID, + RepoID: issue.Repo.ID, + } +} + +// isValidTeamReviewRequest Check permission for ReviewRequest Team +func isValidTeamReviewRequest(ctx context.Context, reviewer *organization.Team, doer *user_model.User, isAdd bool, issue *issues_model.Issue) error { + if doer.IsOrganization() { + return issues_model.ErrNotValidReviewRequest{ + Reason: "Organization can't be doer to add reviewer", + UserID: doer.ID, + RepoID: issue.Repo.ID, + } + } + + canDoerChangeReviewRequests := CanDoerChangeReviewRequests(ctx, doer, issue.Repo, issue.PosterID) + + if isAdd { + if issue.Repo.IsPrivate { + hasTeam := organization.HasTeamRepo(ctx, reviewer.OrgID, reviewer.ID, issue.RepoID) + + if !hasTeam { + return issues_model.ErrNotValidReviewRequest{ + Reason: "Reviewing team can't read repo", + UserID: doer.ID, + RepoID: issue.Repo.ID, + } + } + } + + if canDoerChangeReviewRequests { + return nil + } + + return issues_model.ErrNotValidReviewRequest{ + Reason: "Doer can't choose reviewer", + UserID: doer.ID, + RepoID: issue.Repo.ID, + } + } + + if canDoerChangeReviewRequests { + return nil + } + + return issues_model.ErrNotValidReviewRequest{ + Reason: "Doer can't remove reviewer", + UserID: doer.ID, + RepoID: issue.Repo.ID, + } +} + +// TeamReviewRequest add or remove a review request from a team for this PR, and make comment for it. +func TeamReviewRequest(ctx context.Context, issue *issues_model.Issue, doer *user_model.User, reviewer *organization.Team, isAdd bool) (comment *issues_model.Comment, err error) { + err = isValidTeamReviewRequest(ctx, reviewer, doer, isAdd, issue) + if err != nil { + return nil, err + } + if isAdd { + comment, err = issues_model.AddTeamReviewRequest(ctx, issue, reviewer, doer, false) + } else { + comment, err = issues_model.RemoveTeamReviewRequest(ctx, issue, reviewer, doer) + } + + if err != nil { + return nil, err + } + + if comment == nil || !isAdd { + return nil, nil //nolint:nilnil // return nil because no comment was created or it is a removal + } + + return comment, teamReviewRequestNotify(ctx, issue, doer, reviewer, isAdd, comment) +} + +func ReviewRequestNotify(ctx context.Context, issue *issues_model.Issue, doer *user_model.User, reviewNotifiers []*ReviewRequestNotifier) { + for _, reviewNotifier := range reviewNotifiers { + if reviewNotifier.Reviewer != nil { + notify_service.PullRequestReviewRequest(ctx, issue.Poster, issue, reviewNotifier.Reviewer, reviewNotifier.IsAdd, reviewNotifier.Comment) + } else if reviewNotifier.ReviewTeam != nil { + if err := teamReviewRequestNotify(ctx, issue, issue.Poster, reviewNotifier.ReviewTeam, reviewNotifier.IsAdd, reviewNotifier.Comment); err != nil { + log.Error("teamReviewRequestNotify: %v", err) + } + } + } +} + +// teamReviewRequestNotify notify all user in this team +func teamReviewRequestNotify(ctx context.Context, issue *issues_model.Issue, doer *user_model.User, reviewer *organization.Team, isAdd bool, comment *issues_model.Comment) error { + // notify all user in this team + if err := comment.LoadIssue(ctx); err != nil { + return err + } + + members, err := organization.GetTeamMembers(ctx, &organization.SearchMembersOptions{ + TeamID: reviewer.ID, + }) + if err != nil { + return err + } + + for _, member := range members { + if member.ID == comment.Issue.PosterID { + continue + } + comment.AssigneeID = member.ID + notify_service.PullRequestReviewRequest(ctx, doer, issue, member, isAdd, comment) + } + + return err +} + +// CanDoerChangeReviewRequests returns if the doer can add/remove review requests of a PR +func CanDoerChangeReviewRequests(ctx context.Context, doer *user_model.User, repo *repo_model.Repository, posterID int64) bool { + if repo.IsArchived { + return false + } + // The poster of the PR can change the reviewers + if doer.ID == posterID { + return true + } + + // The owner of the repo can change the reviewers + if doer.ID == repo.OwnerID { + return true + } + + // Collaborators of the repo can change the reviewers + isCollaborator, err := repo_model.IsCollaborator(ctx, repo.ID, doer.ID) + if err != nil { + log.Error("IsCollaborator: %v", err) + return false + } + if isCollaborator { + return true + } + + // If the repo's owner is an organization, members of teams with read permission on pull requests can change reviewers + if repo.Owner.IsOrganization() { + teams, err := organization.GetTeamsWithAccessToAnyRepoUnit(ctx, repo.OwnerID, repo.ID, perm.AccessModeRead, unit.TypePullRequests) + if err != nil { + log.Error("GetTeamsWithAccessToRepo: %v", err) + return false + } + for _, team := range teams { + if !team.UnitEnabled(ctx, unit.TypePullRequests) { + continue + } + isMember, err := organization.IsTeamMember(ctx, repo.OwnerID, team.ID, doer.ID) + if err != nil { + log.Error("IsTeamMember: %v", err) + continue + } + if isMember { + return true + } + } + } + + return false +} diff --git a/services/mailer/mail_workflow_run.go b/services/mailer/mail_workflow_run.go index 9efaa4182b..18c13bcc75 100644 --- a/services/mailer/mail_workflow_run.go +++ b/services/mailer/mail_workflow_run.go @@ -37,7 +37,7 @@ func generateMessageIDForActionsWorkflowRunStatusEmail(repo *repo_model.Reposito } func composeAndSendActionsWorkflowRunStatusEmail(ctx context.Context, repo *repo_model.Repository, run *actions_model.ActionRun, sender *user_model.User, recipients []*user_model.User) error { - jobs, err := actions_model.GetRunJobsByRunID(ctx, run.ID) + jobs, err := actions_model.GetLatestAttemptJobsByRepoAndRunID(ctx, repo.ID, run.ID) if err != nil { return err } diff --git a/services/notify/notify.go b/services/notify/notify.go index 2416cbd2e0..152d53b01c 100644 --- a/services/notify/notify.go +++ b/services/notify/notify.go @@ -399,12 +399,18 @@ func CreateCommitStatus(ctx context.Context, repo *repo_model.Repository, commit } } +// WorkflowRunStatusUpdate dispatches a workflow run status change to every registered notifier. +// Prefer the helpers in services/actions/notify.go over calling this directly; +// unless you are sure the caller has already resolved the correct sender and paired notifications. func WorkflowRunStatusUpdate(ctx context.Context, repo *repo_model.Repository, sender *user_model.User, run *actions_model.ActionRun) { for _, notifier := range notifiers { notifier.WorkflowRunStatusUpdate(ctx, repo, sender, run) } } +// WorkflowJobStatusUpdate dispatches a workflow job status change to every registered notifier. +// Prefer the helpers in services/actions/notify.go over calling this directly; +// unless you are sure the caller has already resolved the correct sender and paired notifications. func WorkflowJobStatusUpdate(ctx context.Context, repo *repo_model.Repository, sender *user_model.User, job *actions_model.ActionRunJob, task *actions_model.ActionTask) { for _, notifier := range notifiers { notifier.WorkflowJobStatusUpdate(ctx, repo, sender, job, task) diff --git a/services/pull/check.go b/services/pull/check.go index 6486ca79df..996994cea9 100644 --- a/services/pull/check.go +++ b/services/pull/check.go @@ -38,14 +38,15 @@ import ( var prPatchCheckerQueue *queue.WorkerPoolQueue[string] var ( - ErrIsClosed = errors.New("pull is closed") - ErrNoPermissionToMerge = errors.New("no permission to merge") - ErrNotReadyToMerge = errors.New("not ready to merge") - ErrHasMerged = errors.New("has already been merged") - ErrIsWorkInProgress = errors.New("work in progress PRs cannot be merged") - ErrIsChecking = errors.New("cannot merge while conflict checking is in progress") - ErrNotMergeableState = errors.New("not in mergeable state") - ErrDependenciesLeft = errors.New("is blocked by an open dependency") + ErrIsClosed = errors.New("pull is closed") + ErrNoPermissionToMerge = errors.New("no permission to merge") + ErrNotReadyToMerge = errors.New("not ready to merge") + ErrHasMerged = errors.New("has already been merged") + ErrIsWorkInProgress = errors.New("work in progress PRs cannot be merged") + ErrIsChecking = errors.New("cannot merge while conflict checking is in progress") + ErrNotMergeableState = errors.New("not in mergeable state") + ErrDependenciesLeft = errors.New("is blocked by an open dependency") + ErrHeadCommitsNotAllVerified = errors.New("the branch requires signed commits but not all head commits are verified") ) func markPullRequestStatusAsChecking(ctx context.Context, pr *issues_model.PullRequest) bool { @@ -132,7 +133,13 @@ const ( ) // CheckPullMergeable check if the pull mergeable based on all conditions (branch protection, merge options, ...) -func CheckPullMergeable(stdCtx context.Context, doer *user_model.User, perm *access_model.Permission, pr *issues_model.PullRequest, mergeCheckType MergeCheckType, adminForceMerge bool) error { +// mergeStyle tailors the "require signed commits" prechecks: +// - fast-forward-only: no Gitea commit is produced, so Gitea's merge-signing check is skipped; +// only the user's head commits are verified. +// - merge: both the head commits must be verified and Gitea must sign the merge commit. +// - rebase, rebase-merge, squash: Gitea rewrites the commits and signs each, so only Gitea's +// signing ability is checked. +func CheckPullMergeable(stdCtx context.Context, doer *user_model.User, perm *access_model.Permission, pr *issues_model.PullRequest, mergeCheckType MergeCheckType, mergeStyle repo_model.MergeStyle, adminForceMerge bool) error { return db.WithTx(stdCtx, func(ctx context.Context) error { if pr.HasMerged { return ErrHasMerged @@ -207,7 +214,7 @@ func CheckPullMergeable(stdCtx context.Context, doer *user_model.User, perm *acc } } - if _, err := isSignedIfRequired(ctx, pr, doer); err != nil { + if err := checkSigningRequirements(ctx, pr, doer, mergeStyle); err != nil { return err } @@ -221,26 +228,45 @@ func CheckPullMergeable(stdCtx context.Context, doer *user_model.User, perm *acc }) } -// isSignedIfRequired check if merge will be signed if required -func isSignedIfRequired(ctx context.Context, pr *issues_model.PullRequest, doer *user_model.User) (bool, error) { +// checkSigningRequirements enforces the target branch's RequireSignedCommits rule +// against the selected merge style: +// - fast-forward-only and merge keep the user's commits on the base branch, so +// those commits must all be verified, or the pre-receive hook will reject the +// push with a generic error. +// - fast-forward-only creates no Gitea commit, so Gitea's signing key is not used. +// - merge, rebase, rebase-merge and squash produce a Gitea-signed commit, so +// Gitea must be configured to sign it. +func checkSigningRequirements(ctx context.Context, pr *issues_model.PullRequest, doer *user_model.User, mergeStyle repo_model.MergeStyle) error { pb, err := git_model.GetFirstMatchProtectedBranchRule(ctx, pr.BaseRepoID, pr.BaseBranch) if err != nil { - return false, err + return err } - if pb == nil || !pb.RequireSignedCommits { - return true, nil + return nil } gitRepo, closer, err := gitrepo.RepositoryFromContextOrOpen(ctx, pr.BaseRepo) if err != nil { - return false, err + return err } defer closer.Close() - sign, _, _, err := asymkey_service.SignMerge(ctx, pr, doer, gitRepo) + if mergeStyle == repo_model.MergeStyleFastForwardOnly || mergeStyle == repo_model.MergeStyleMerge { + verified, err := asymkey_service.AllHeadCommitsVerified(ctx, pr, gitRepo) + if err != nil { + return err + } + if !verified { + return ErrHeadCommitsNotAllVerified + } + } - return sign, err + if mergeStyle != repo_model.MergeStyleFastForwardOnly { + if _, _, _, err := asymkey_service.SignMerge(ctx, pr, doer, gitRepo); err != nil { + return err + } + } + return nil } // markPullRequestAsMergeable checks if pull request is possible to leaving checking status, diff --git a/services/pull/check_test.go b/services/pull/check_test.go index 0f39237932..506cd42301 100644 --- a/services/pull/check_test.go +++ b/services/pull/check_test.go @@ -9,6 +9,7 @@ import ( "time" "code.gitea.io/gitea/models/db" + git_model "code.gitea.io/gitea/models/git" issues_model "code.gitea.io/gitea/models/issues" "code.gitea.io/gitea/models/pull" repo_model "code.gitea.io/gitea/models/repo" @@ -73,6 +74,39 @@ func TestPullRequest_AddToTaskQueue(t *testing.T) { prPatchCheckerQueue = nil } +func TestCheckSigningRequirementsHeadCommits(t *testing.T) { + require.NoError(t, unittest.PrepareTestDatabase()) + ctx := t.Context() + + pr := unittest.AssertExistsAndLoadBean(t, &issues_model.PullRequest{ID: 2}) + require.NoError(t, pr.LoadBaseRepo(ctx)) + require.NoError(t, pr.LoadHeadRepo(ctx)) + + check := func() error { + return checkSigningRequirements(ctx, pr, nil, repo_model.MergeStyleFastForwardOnly) + } + + // No protected branch rule on the base branch: the check must pass. + require.NoError(t, check()) + + // Protected branch without RequireSignedCommits: the check must still pass. + require.NoError(t, git_model.UpdateProtectBranch(ctx, pr.BaseRepo, &git_model.ProtectedBranch{ + RepoID: pr.BaseRepoID, + RuleName: pr.BaseBranch, + RequireSignedCommits: false, + }, git_model.WhitelistOptions{})) + require.NoError(t, check()) + + // With RequireSignedCommits enabled: the test fixture commits have no signatures, + // so the check must report ErrHeadCommitsNotAllVerified. + pb, err := git_model.GetFirstMatchProtectedBranchRule(ctx, pr.BaseRepoID, pr.BaseBranch) + require.NoError(t, err) + require.NotNil(t, pb) + pb.RequireSignedCommits = true + require.NoError(t, git_model.UpdateProtectBranch(ctx, pr.BaseRepo, pb, git_model.WhitelistOptions{})) + require.ErrorIs(t, check(), ErrHeadCommitsNotAllVerified) +} + func TestMarkPullRequestAsMergeable(t *testing.T) { assert.NoError(t, unittest.PrepareTestDatabase()) diff --git a/services/webhook/notifier.go b/services/webhook/notifier.go index 2b301d4d58..d2575e9931 100644 --- a/services/webhook/notifier.go +++ b/services/webhook/notifier.go @@ -1043,7 +1043,7 @@ func (*webhookNotifier) WorkflowRunStatusUpdate(ctx context.Context, repo *repo_ return } - convertedRun, err := convert.ToActionWorkflowRun(ctx, repo, run) + convertedRun, err := convert.ToActionWorkflowRun(ctx, repo, run, nil) if err != nil { log.Error("ToActionWorkflowRun: %v", err) return diff --git a/templates/base/head.tmpl b/templates/base/head.tmpl index 475d350d0c..58728fd117 100644 --- a/templates/base/head.tmpl +++ b/templates/base/head.tmpl @@ -4,7 +4,7 @@ {{ctx.HeadMetaContentSecurityPolicy}} {{if .Title}}{{.Title}} - {{end}}{{.PageTitleCommon}} - {{if .ManifestData}}{{end}} + diff --git a/templates/devtest/repo-action-view.tmpl b/templates/devtest/repo-action-view.tmpl index 46f040d8a6..2971039fc9 100644 --- a/templates/devtest/repo-action-view.tmpl +++ b/templates/devtest/repo-action-view.tmpl @@ -3,12 +3,12 @@
Run:CanCancel Run:CanApprove - Run:CanRerun + Run:CanRerunLatest + Run:PreviousAttempt
{{template "repo/actions/view_component" (dict - "RunID" (or .RunID 10) "JobID" (or .JobID 0) - "ActionsURL" (print AppSubUrl "/devtest/repo-action-view") + "ActionsViewURL" $.ActionsViewURL )}} {{template "base/footer" .}} diff --git a/templates/repo/actions/view.tmpl b/templates/repo/actions/view.tmpl index 1eb84a9b93..3f879e0e5a 100644 --- a/templates/repo/actions/view.tmpl +++ b/templates/repo/actions/view.tmpl @@ -3,9 +3,8 @@
{{template "repo/header" .}} {{template "repo/actions/view_component" (dict - "RunID" .RunID "JobID" .JobID - "ActionsURL" .ActionsURL + "ActionsViewURL" .ActionsViewURL )}}
diff --git a/templates/repo/actions/view_component.tmpl b/templates/repo/actions/view_component.tmpl index 2cc70e499a..67926276c0 100644 --- a/templates/repo/actions/view_component.tmpl +++ b/templates/repo/actions/view_component.tmpl @@ -1,17 +1,18 @@ -
- -
+ +
{{ctx.Locale.Tr "repo.diff.image.side_by_side"}} {{if and .blobBase .blobHead}} {{ctx.Locale.Tr "repo.diff.image.swipe"}} diff --git a/templates/repo/editor/edit.tmpl b/templates/repo/editor/edit.tmpl index 74d6dcb07f..0acd7bfd71 100644 --- a/templates/repo/editor/edit.tmpl +++ b/templates/repo/editor/edit.tmpl @@ -19,7 +19,7 @@
-