0
0
mirror of https://github.com/go-gitea/gitea.git synced 2026-05-10 11:51:33 +02:00

Merge branch 'main' into fix-render-control-char

This commit is contained in:
Giteabot 2026-04-03 08:24:21 +08:00 committed by GitHub
commit b10aad6005
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
17 changed files with 772 additions and 294 deletions

View File

@ -322,16 +322,16 @@ func GetRunByRepoAndID(ctx context.Context, repoID, runID int64) (*ActionRun, er
return &run, nil
}
func GetRunByIndex(ctx context.Context, repoID, index int64) (*ActionRun, error) {
func GetRunByRepoAndIndex(ctx context.Context, repoID, runIndex int64) (*ActionRun, error) {
run := &ActionRun{
RepoID: repoID,
Index: index,
Index: runIndex,
}
has, err := db.GetEngine(ctx).Get(run)
if err != nil {
return nil, err
} else if !has {
return nil, fmt.Errorf("run with index %d %d: %w", repoID, index, util.ErrNotExist)
return nil, fmt.Errorf("run with repo_id %d and index %d: %w", repoID, runIndex, util.ErrNotExist)
}
return run, nil

View File

@ -18,6 +18,11 @@ import (
"xorm.io/builder"
)
// MaxJobNumPerRun is the maximum number of jobs in a single run.
// https://docs.github.com/en/actions/reference/limits#existing-system-limits
// TODO: check this limit when creating jobs
const MaxJobNumPerRun = 256
// ActionRunJob represents a job of a run
type ActionRunJob struct {
ID int64

View File

@ -1,9 +1,29 @@
# type ActionRun struct {
# ID int64 `xorm:"pk autoincr"`
# RepoID int64 `xorm:"index"`
# Index int64
# ID int64 `xorm:"pk autoincr"`
# RepoID int64 `xorm:"index"`
# Index int64
# CommitSHA string `xorm:"commit_sha"`
# Event string
# TriggerEvent string
# EventPayload string `xorm:"LONGTEXT"`
# }
-
id: 106
repo_id: 1
id: 990
repo_id: 100
index: 7
commit_sha: merge-sha
event: pull_request
event_payload: '{"pull_request":{"head":{"sha":"sha-shared"}}}'
-
id: 991
repo_id: 100
index: 8
commit_sha: sha-shared
event: push
event_payload: '{"head_commit":{"id":"sha-shared"}}'
-
id: 1991
repo_id: 100
index: 9
commit_sha: sha-other
event: release

View File

@ -3,8 +3,14 @@
# RunID int64 `xorm:"index"`
# }
-
id: 530
run_id: 106
id: 997
run_id: 990
-
id: 531
run_id: 106
id: 998
run_id: 990
-
id: 1997
run_id: 991
-
id: 1998
run_id: 1991

View File

@ -1,29 +1,51 @@
# type CommitStatus struct {
# ID int64 `xorm:"pk autoincr"`
# RepoID int64 `xorm:"index"`
# SHA string
# TargetURL string
# }
-
id: 10
repo_id: 1
id: 10010
repo_id: 100
sha: sha-shared
target_url: /testuser/repo1/actions/runs/7/jobs/0
-
id: 11
repo_id: 1
id: 10011
repo_id: 100
sha: sha-shared
target_url: /testuser/repo1/actions/runs/7/jobs/1
-
id: 12
repo_id: 1
id: 10012
repo_id: 100
sha: sha-shared
target_url: /testuser/repo1/actions/runs/8/jobs/0
-
id: 10013
repo_id: 100
sha: sha-other
target_url: /testuser/repo1/actions/runs/9/jobs/0
-
id: 10014
repo_id: 100
sha: sha-shared
target_url: /otheruser/badrepo/actions/runs/7/jobs/0
-
id: 13
repo_id: 1
id: 10015
repo_id: 100
sha: sha-shared
target_url: /testuser/repo1/actions/runs/10/jobs/0
-
id: 14
repo_id: 1
id: 10016
repo_id: 100
sha: sha-shared
target_url: /testuser/repo1/actions/runs/7/jobs/3
-
id: 15
repo_id: 1
id: 10017
repo_id: 100
sha: sha-shared
target_url: https://ci.example.com/build/123
-
id: 10018
repo_id: 100
sha: sha-shared
target_url: /testuser/repo1/actions/runs/990/jobs/997

View File

@ -6,14 +6,14 @@
# TargetURL string
# }
-
id: 20
repo_id: 1
sha: "012345"
state: success
id: 10020
repo_id: 100
sha: sha-shared
state: pending
target_url: /testuser/repo1/actions/runs/7/jobs/0
-
id: 21
repo_id: 1
sha: "678901"
state: success
target_url: https://ci.example.com/build/123
id: 10021
repo_id: 100
sha: sha-other
state: pending
target_url: /testuser/repo1/actions/runs/9/jobs/0

View File

@ -4,6 +4,6 @@
# Name string
# }
-
id: 1
id: 100
owner_name: testuser
name: repo1

View File

@ -400,7 +400,7 @@ func prepareMigrationTasks() []*migration {
newMigration(323, "Add support for actions concurrency", v1_26.AddActionsConcurrency),
newMigration(324, "Fix closed milestone completeness for milestones with no issues", v1_26.FixClosedMilestoneCompleteness),
newMigration(325, "Fix missed repo_id when migrate attachments", v1_26.FixMissedRepoIDWhenMigrateAttachments),
newMigration(326, "Migrate commit status target URL to use run ID and job ID", v1_26.FixCommitStatusTargetURLToUseRunAndJobID),
newMigration(326, "Partially migrate commit status target URL to use run ID and job ID", v1_26.FixCommitStatusTargetURLToUseRunAndJobID),
newMigration(327, "Add disabled state to action runners", v1_26.AddDisabledToActionRunner),
newMigration(328, "Add TokenPermissions column to ActionRunJob", v1_26.AddTokenPermissionsToActionRunJob),
newMigration(329, "Add unique constraint for user badge", v1_26.AddUniqueIndexForUserBadge),

View File

@ -4,18 +4,33 @@
package v1_26
import (
"errors"
"fmt"
"net/url"
"strconv"
"strings"
"code.gitea.io/gitea/modules/json"
"code.gitea.io/gitea/modules/log"
"code.gitea.io/gitea/modules/setting"
api "code.gitea.io/gitea/modules/structs"
webhook_module "code.gitea.io/gitea/modules/webhook"
"xorm.io/xorm"
)
const actionsRunPath = "/actions/runs/"
const (
actionsRunPath = "/actions/runs/"
// Only commit status target URLs whose resolved run ID is smaller than this threshold are rewritten by this partial migration.
// The fixed value 1000 is a conservative cutoff chosen to cover the smaller legacy run indexes that are most likely to be confused with ID-based URLs at runtime.
// Larger legacy {run} or {job} numbers are usually easier to disambiguate. For example:
// * /actions/runs/1200/jobs/1420 is most likely an ID-based URL, because a run should not contain more than 256 jobs.
// * /actions/runs/1500/jobs/3 is most likely an index-based URL, because a job ID cannot be smaller than its run ID.
// But URLs with small numbers, such as /actions/runs/5/jobs/6, are much harder to distinguish reliably.
// This migration therefore prioritizes rewriting target URLs for runs in that lower range.
legacyURLIDThreshold int64 = 1000
)
type migrationRepository struct {
ID int64
@ -24,9 +39,13 @@ type migrationRepository struct {
}
type migrationActionRun struct {
ID int64
RepoID int64
Index int64
ID int64
RepoID int64
Index int64
CommitSHA string `xorm:"commit_sha"`
Event webhook_module.HookEventType
TriggerEvent string
EventPayload string
}
type migrationActionRunJob struct {
@ -40,93 +59,133 @@ type migrationCommitStatus struct {
TargetURL string
}
func FixCommitStatusTargetURLToUseRunAndJobID(x *xorm.Engine) error {
runByIndexCache := make(map[int64]map[int64]*migrationActionRun)
jobsByRunIDCache := make(map[int64][]int64)
repoLinkCache := make(map[int64]string)
if err := migrateCommitStatusTargetURL(x, "commit_status", runByIndexCache, jobsByRunIDCache, repoLinkCache); err != nil {
return err
}
return migrateCommitStatusTargetURL(x, "commit_status_summary", runByIndexCache, jobsByRunIDCache, repoLinkCache)
type commitSHAAndRuns struct {
commitSHA string
runs map[int64]*migrationActionRun
}
func migrateCommitStatusTargetURL(
// FixCommitStatusTargetURLToUseRunAndJobID partially migrates legacy Actions
// commit status target URLs to the new run/job ID-based form.
//
// Only rows whose resolved run ID is below legacyURLIDThreshold are rewritten.
// This is because smaller legacy run indexes are more likely to collide with run ID URLs during runtime resolution,
// so this migration prioritizes that lower range and leaves the remaining legacy target URLs to the web compatibility logic.
func FixCommitStatusTargetURLToUseRunAndJobID(x *xorm.Engine) error {
jobsByRunIDCache := make(map[int64][]int64)
repoLinkCache := make(map[int64]string)
groups, err := loadLegacyMigrationRunGroups(x)
if err != nil {
return err
}
for repoID, groupsBySHA := range groups {
for _, group := range groupsBySHA {
if err := migrateCommitStatusTargetURLForGroup(x, "commit_status", repoID, group.commitSHA, group.runs, jobsByRunIDCache, repoLinkCache); err != nil {
return err
}
if err := migrateCommitStatusTargetURLForGroup(x, "commit_status_summary", repoID, group.commitSHA, group.runs, jobsByRunIDCache, repoLinkCache); err != nil {
return err
}
}
}
return nil
}
func loadLegacyMigrationRunGroups(x *xorm.Engine) (map[int64]map[string]*commitSHAAndRuns, error) {
var runs []migrationActionRun
if err := x.Table("action_run").
Where("id < ?", legacyURLIDThreshold).
Cols("id", "repo_id", "`index`", "commit_sha", "event", "trigger_event", "event_payload").
Find(&runs); err != nil {
return nil, fmt.Errorf("query action_run: %w", err)
}
groups := make(map[int64]map[string]*commitSHAAndRuns)
for i := range runs {
run := runs[i]
commitID, err := getCommitStatusCommitID(&run)
if err != nil {
log.Warn("skip action_run id=%d when resolving commit status commit SHA: %v", run.ID, err)
continue
}
if commitID == "" {
// empty commitID means the run didn't create any commit status records, just skip
continue
}
if groups[run.RepoID] == nil {
groups[run.RepoID] = make(map[string]*commitSHAAndRuns)
}
if groups[run.RepoID][commitID] == nil {
groups[run.RepoID][commitID] = &commitSHAAndRuns{
commitSHA: commitID,
runs: make(map[int64]*migrationActionRun),
}
}
groups[run.RepoID][commitID].runs[run.Index] = &run
}
return groups, nil
}
func migrateCommitStatusTargetURLForGroup(
x *xorm.Engine,
table string,
runByIndexCache map[int64]map[int64]*migrationActionRun,
repoID int64,
sha string,
runs map[int64]*migrationActionRun,
jobsByRunIDCache map[int64][]int64,
repoLinkCache map[int64]string,
) error {
const batchSize = 500
var lastID int64
var rows []migrationCommitStatus
if err := x.Table(table).
Where("repo_id = ?", repoID).
And("sha = ?", sha).
Cols("id", "repo_id", "target_url").
Find(&rows); err != nil {
return fmt.Errorf("query %s for repo_id=%d sha=%s: %w", table, repoID, sha, err)
}
for {
var rows []migrationCommitStatus
sess := x.Table(table).
Where("target_url LIKE ?", "%"+actionsRunPath+"%").
And("id > ?", lastID).
Asc("id").
Limit(batchSize)
if err := sess.Find(&rows); err != nil {
return fmt.Errorf("query %s: %w", table, err)
}
if len(rows) == 0 {
return nil
for _, row := range rows {
repoLink, err := getRepoLinkCached(x, repoLinkCache, row.RepoID)
if err != nil || repoLink == "" {
if err != nil {
log.Warn("convert %s id=%d getRepoLinkCached: %v", table, row.ID, err)
} else {
log.Warn("convert %s id=%d: repo=%d not found", table, row.ID, row.RepoID)
}
continue
}
for _, row := range rows {
lastID = row.ID
if row.TargetURL == "" {
continue
}
runNum, jobNum, ok := parseTargetURL(row.TargetURL, repoLink)
if !ok {
continue
}
repoLink, err := getRepoLinkCached(x, repoLinkCache, row.RepoID)
if err != nil || repoLink == "" {
if err != nil {
log.Warn("convert %s id=%d getRepoLinkCached: %v", table, row.ID, err)
} else {
log.Warn("convert %s id=%d: repo=%d not found", table, row.ID, row.RepoID)
}
continue
}
run, ok := runs[runNum]
if !ok {
continue
}
runNum, jobNum, ok := parseTargetURL(row.TargetURL, repoLink)
if !ok {
continue
jobID, ok, err := getJobIDByIndexCached(x, jobsByRunIDCache, run.ID, jobNum)
if err != nil || !ok {
if err != nil {
log.Warn("convert %s id=%d getJobIDByIndexCached: %v", table, row.ID, err)
} else {
log.Warn("convert %s id=%d: job not found for run_id=%d job_index=%d", table, row.ID, run.ID, jobNum)
}
continue
}
run, err := getRunByIndexCached(x, runByIndexCache, row.RepoID, runNum)
if err != nil || run == nil {
if err != nil {
log.Warn("convert %s id=%d getRunByIndexCached: %v", table, row.ID, err)
} else {
log.Warn("convert %s id=%d: run not found for repo_id=%d run_index=%d", table, row.ID, row.RepoID, runNum)
}
continue
}
oldURL := row.TargetURL
newURL := fmt.Sprintf("%s%s%d/jobs/%d", repoLink, actionsRunPath, run.ID, jobID)
if oldURL == newURL {
continue
}
jobID, ok, err := getJobIDByIndexCached(x, jobsByRunIDCache, run.ID, jobNum)
if err != nil || !ok {
if err != nil {
log.Warn("convert %s id=%d getJobIDByIndexCached: %v", table, row.ID, err)
} else {
log.Warn("convert %s id=%d: job not found for run_id=%d job_index=%d", table, row.ID, run.ID, jobNum)
}
continue
}
oldURL := row.TargetURL
newURL := fmt.Sprintf("%s%s%d/jobs/%d", repoLink, actionsRunPath, run.ID, jobID) // expect: {repo_link}/actions/runs/{run_id}/jobs/{job_id}
if oldURL == newURL {
continue
}
if _, err := x.Table(table).ID(row.ID).Cols("target_url").Update(&migrationCommitStatus{TargetURL: newURL}); err != nil {
return fmt.Errorf("update %s id=%d target_url from %s to %s: %w", table, row.ID, oldURL, newURL, err)
}
if _, err := x.Table(table).ID(row.ID).Cols("target_url").Update(&migrationCommitStatus{TargetURL: newURL}); err != nil {
return fmt.Errorf("update %s id=%d target_url from %s to %s: %w", table, row.ID, oldURL, newURL, err)
}
}
return nil
}
func getRepoLinkCached(x *xorm.Engine, cache map[int64]string, repoID int64) (string, error) {
@ -147,35 +206,6 @@ func getRepoLinkCached(x *xorm.Engine, cache map[int64]string, repoID int64) (st
return link, nil
}
func getRunByIndexCached(x *xorm.Engine, cache map[int64]map[int64]*migrationActionRun, repoID, runIndex int64) (*migrationActionRun, error) {
if repoCache, ok := cache[repoID]; ok {
if run, ok := repoCache[runIndex]; ok {
if run == nil {
return nil, fmt.Errorf("run repo_id=%d run_index=%d not found", repoID, runIndex)
}
return run, nil
}
}
var run migrationActionRun
has, err := x.Table("action_run").Where("repo_id=? AND `index`=?", repoID, runIndex).Get(&run)
if err != nil {
return nil, err
}
if !has {
if cache[repoID] == nil {
cache[repoID] = make(map[int64]*migrationActionRun)
}
cache[repoID][runIndex] = nil
return nil, fmt.Errorf("run repo_id=%d run_index=%d not found", repoID, runIndex)
}
if cache[repoID] == nil {
cache[repoID] = make(map[int64]*migrationActionRun)
}
cache[repoID][runIndex] = &run
return &run, nil
}
func getJobIDByIndexCached(x *xorm.Engine, cache map[int64][]int64, runID, jobIndex int64) (int64, bool, error) {
jobIDs, ok := cache[runID]
if !ok {
@ -202,7 +232,7 @@ func parseTargetURL(targetURL, repoLink string) (runNum, jobNum int64, ok bool)
}
rest := targetURL[len(prefix):]
parts := strings.Split(rest, "/") // expect: {run_num}/jobs/{job_num}
parts := strings.Split(rest, "/")
if len(parts) == 3 && parts[1] == "jobs" {
runNum, err1 := strconv.ParseInt(parts[0], 10, 64)
jobNum, err2 := strconv.ParseInt(parts[2], 10, 64)
@ -214,3 +244,72 @@ func parseTargetURL(targetURL, repoLink string) (runNum, jobNum int64, ok bool)
return 0, 0, false
}
func getCommitStatusCommitID(run *migrationActionRun) (string, error) {
switch run.Event {
case webhook_module.HookEventPush:
payload, err := getPushEventPayload(run)
if err != nil {
return "", fmt.Errorf("getPushEventPayload: %w", err)
}
if payload.HeadCommit == nil {
return "", errors.New("head commit is missing in event payload")
}
return payload.HeadCommit.ID, nil
case webhook_module.HookEventPullRequest,
webhook_module.HookEventPullRequestSync,
webhook_module.HookEventPullRequestAssign,
webhook_module.HookEventPullRequestLabel,
webhook_module.HookEventPullRequestReviewRequest,
webhook_module.HookEventPullRequestMilestone:
payload, err := getPullRequestEventPayload(run)
if err != nil {
return "", fmt.Errorf("getPullRequestEventPayload: %w", err)
}
if payload.PullRequest == nil {
return "", errors.New("pull request is missing in event payload")
} else if payload.PullRequest.Head == nil {
return "", errors.New("head of pull request is missing in event payload")
}
return payload.PullRequest.Head.Sha, nil
case webhook_module.HookEventPullRequestReviewApproved,
webhook_module.HookEventPullRequestReviewRejected,
webhook_module.HookEventPullRequestReviewComment:
payload, err := getPullRequestEventPayload(run)
if err != nil {
return "", fmt.Errorf("getPullRequestEventPayload: %w", err)
}
if payload.PullRequest == nil {
return "", errors.New("pull request is missing in event payload")
} else if payload.PullRequest.Head == nil {
return "", errors.New("head of pull request is missing in event payload")
}
return payload.PullRequest.Head.Sha, nil
case webhook_module.HookEventRelease:
return run.CommitSHA, nil
default:
return "", nil
}
}
func getPushEventPayload(run *migrationActionRun) (*api.PushPayload, error) {
if run.Event != webhook_module.HookEventPush {
return nil, fmt.Errorf("event %s is not a push event", run.Event)
}
var payload api.PushPayload
if err := json.Unmarshal([]byte(run.EventPayload), &payload); err != nil {
return nil, err
}
return &payload, nil
}
func getPullRequestEventPayload(run *migrationActionRun) (*api.PullRequestPayload, error) {
if !run.Event.IsPullRequest() && !run.Event.IsPullRequestReview() {
return nil, fmt.Errorf("event %s is not a pull request event", run.Event)
}
var payload api.PullRequestPayload
if err := json.Unmarshal([]byte(run.EventPayload), &payload); err != nil {
return nil, err
}
return &payload, nil
}

View File

@ -28,9 +28,13 @@ func Test_FixCommitStatusTargetURLToUseRunAndJobID(t *testing.T) {
}
type ActionRun struct {
ID int64 `xorm:"pk autoincr"`
RepoID int64 `xorm:"index"`
Index int64
ID int64 `xorm:"pk autoincr"`
RepoID int64 `xorm:"index"`
Index int64
CommitSHA string `xorm:"commit_sha"`
Event string
TriggerEvent string
EventPayload string `xorm:"LONGTEXT"`
}
type ActionRunJob struct {
@ -41,6 +45,7 @@ func Test_FixCommitStatusTargetURLToUseRunAndJobID(t *testing.T) {
type CommitStatus struct {
ID int64 `xorm:"pk autoincr"`
RepoID int64 `xorm:"index"`
SHA string
TargetURL string
}
@ -61,14 +66,6 @@ func Test_FixCommitStatusTargetURLToUseRunAndJobID(t *testing.T) {
)
defer deferable()
newURL1 := "/testuser/repo1/actions/runs/106/jobs/530"
newURL2 := "/testuser/repo1/actions/runs/106/jobs/531"
invalidWrongRepo := "/otheruser/badrepo/actions/runs/7/jobs/0"
invalidNonexistentRun := "/testuser/repo1/actions/runs/10/jobs/0"
invalidNonexistentJob := "/testuser/repo1/actions/runs/7/jobs/3"
externalTargetURL := "https://ci.example.com/build/123"
require.NoError(t, FixCommitStatusTargetURLToUseRunAndJobID(x))
cases := []struct {
@ -76,14 +73,26 @@ func Test_FixCommitStatusTargetURLToUseRunAndJobID(t *testing.T) {
id int64
want string
}{
{table: "commit_status", id: 10, want: newURL1},
{table: "commit_status", id: 11, want: newURL2},
{table: "commit_status", id: 12, want: invalidWrongRepo},
{table: "commit_status", id: 13, want: invalidNonexistentRun},
{table: "commit_status", id: 14, want: invalidNonexistentJob},
{table: "commit_status", id: 15, want: externalTargetURL},
{table: "commit_status_summary", id: 20, want: newURL1},
{table: "commit_status_summary", id: 21, want: externalTargetURL},
// Legacy URLs for runs whose resolved run IDs are below the threshold should be rewritten.
{table: "commit_status", id: 10010, want: "/testuser/repo1/actions/runs/990/jobs/997"},
{table: "commit_status", id: 10011, want: "/testuser/repo1/actions/runs/990/jobs/998"},
{table: "commit_status", id: 10012, want: "/testuser/repo1/actions/runs/991/jobs/1997"},
// Runs whose resolved IDs are above the threshold are intentionally left unchanged.
{table: "commit_status", id: 10013, want: "/testuser/repo1/actions/runs/9/jobs/0"},
// URLs that do not resolve cleanly as legacy Actions URLs should remain untouched.
{table: "commit_status", id: 10014, want: "/otheruser/badrepo/actions/runs/7/jobs/0"},
{table: "commit_status", id: 10015, want: "/testuser/repo1/actions/runs/10/jobs/0"},
{table: "commit_status", id: 10016, want: "/testuser/repo1/actions/runs/7/jobs/3"},
{table: "commit_status", id: 10017, want: "https://ci.example.com/build/123"},
// Already ID-based URLs are valid inputs and should not be rewritten again.
{table: "commit_status", id: 10018, want: "/testuser/repo1/actions/runs/990/jobs/997"},
// The same rewrite rules apply to commit_status_summary rows.
{table: "commit_status_summary", id: 10020, want: "/testuser/repo1/actions/runs/990/jobs/997"},
{table: "commit_status_summary", id: 10021, want: "/testuser/repo1/actions/runs/9/jobs/0"},
}
for _, tc := range cases {

View File

@ -6,7 +6,6 @@ package repo
import (
"errors"
"fmt"
"net/http"
"code.gitea.io/gitea/models/organization"
@ -14,6 +13,7 @@ import (
access_model "code.gitea.io/gitea/models/perm/access"
repo_model "code.gitea.io/gitea/models/repo"
user_model "code.gitea.io/gitea/models/user"
"code.gitea.io/gitea/modules/optional"
api "code.gitea.io/gitea/modules/structs"
"code.gitea.io/gitea/modules/util"
"code.gitea.io/gitea/modules/web"
@ -83,6 +83,35 @@ func ListForks(ctx *context.APIContext) {
ctx.JSON(http.StatusOK, apiForks)
}
func prepareDoerCreateRepoInOrg(ctx *context.APIContext, orgName string) *organization.Organization {
org, err := organization.GetOrgByName(ctx, orgName)
if errors.Is(err, util.ErrNotExist) {
ctx.APIErrorNotFound()
return nil
} else if err != nil {
ctx.APIErrorInternal(err)
return nil
}
if !organization.HasOrgOrUserVisible(ctx, org.AsUser(), ctx.Doer) {
ctx.APIErrorNotFound()
return nil
}
if !ctx.Doer.IsAdmin {
canCreate, err := org.CanCreateOrgRepo(ctx, ctx.Doer.ID)
if err != nil {
ctx.APIErrorInternal(err)
return nil
}
if !canCreate {
ctx.APIError(http.StatusForbidden, "User is not allowed to create repositories in this organization.")
return nil
}
}
return org
}
// CreateFork create a fork of a repo
func CreateFork(ctx *context.APIContext) {
// swagger:operation POST /repos/{owner}/{repo}/forks repository createFork
@ -118,41 +147,18 @@ func CreateFork(ctx *context.APIContext) {
// "$ref": "#/responses/validationError"
form := web.GetForm(ctx).(*api.CreateForkOption)
repo := ctx.Repo.Repository
var forker *user_model.User // user/org that will own the fork
if form.Organization == nil {
forker = ctx.Doer
} else {
org, err := organization.GetOrgByName(ctx, *form.Organization)
if err != nil {
if organization.IsErrOrgNotExist(err) {
ctx.APIError(http.StatusUnprocessableEntity, err)
} else {
ctx.APIErrorInternal(err)
}
forkOwner := ctx.Doer // user/org that will own the fork
if form.Organization != nil {
org := prepareDoerCreateRepoInOrg(ctx, *form.Organization)
if ctx.Written() {
return
}
if !ctx.Doer.IsAdmin {
isMember, err := org.IsOrgMember(ctx, ctx.Doer.ID)
if err != nil {
ctx.APIErrorInternal(err)
return
} else if !isMember {
ctx.APIError(http.StatusForbidden, fmt.Sprintf("User is no Member of Organisation '%s'", org.Name))
return
}
}
forker = org.AsUser()
forkOwner = org.AsUser()
}
var name string
if form.Name == nil {
name = repo.Name
} else {
name = *form.Name
}
fork, err := repo_service.ForkRepository(ctx, ctx.Doer, forker, repo_service.ForkRepoOptions{
repo := ctx.Repo.Repository
name := optional.FromPtr(form.Name).ValueOrDefault(repo.Name)
fork, err := repo_service.ForkRepository(ctx, ctx.Doer, forkOwner, repo_service.ForkRepoOptions{
BaseRepo: repo,
Name: name,
Description: repo.Description,

View File

@ -498,31 +498,11 @@ func CreateOrgRepo(ctx *context.APIContext) {
// "403":
// "$ref": "#/responses/forbidden"
opt := web.GetForm(ctx).(*api.CreateRepoOption)
org, err := organization.GetOrgByName(ctx, ctx.PathParam("org"))
if err != nil {
if organization.IsErrOrgNotExist(err) {
ctx.APIError(http.StatusUnprocessableEntity, err)
} else {
ctx.APIErrorInternal(err)
}
orgName := ctx.PathParam("org")
org := prepareDoerCreateRepoInOrg(ctx, orgName)
if ctx.Written() {
return
}
if !organization.HasOrgOrUserVisible(ctx, org.AsUser(), ctx.Doer) {
ctx.APIErrorNotFound("HasOrgOrUserVisible", nil)
return
}
if !ctx.Doer.IsAdmin {
canCreate, err := org.CanCreateOrgRepo(ctx, ctx.Doer.ID)
if err != nil {
ctx.APIErrorInternal(err)
return
} else if !canCreate {
ctx.APIError(http.StatusForbidden, "Given user is not allowed to create repository in organization.")
return
}
}
CreateUserRepo(ctx, org.AsUser(), *opt)
}

View File

@ -68,9 +68,138 @@ func getCurrentRunByPathParam(ctx *context_module.Context) (run *actions_model.A
return run
}
// resolveCurrentRunForView resolves GET Actions page URLs and supports both ID-based and legacy index-based forms.
//
// By default, run summary pages (/actions/runs/{run}) use a best-effort ID-first fallback,
// and job pages (/actions/runs/{run}/jobs/{job}) try to confirm an ID-based URL first and prefer the ID-based interpretation when both are valid.
//
// `by_id=1` param explicitly forces the ID-based path, and `by_index=1` explicitly forces the legacy index-based path.
// If both are present, `by_id` takes precedence.
func resolveCurrentRunForView(ctx *context_module.Context) *actions_model.ActionRun {
// `by_id` explicitly requests ID-based resolution, so the request skips the legacy index-based disambiguation logic and resolves the run by ID directly.
// It takes precedence over `by_index` when both query parameters are present.
if ctx.PathParam("run") == "latest" || ctx.FormBool("by_id") {
return getCurrentRunByPathParam(ctx)
}
runNum := ctx.PathParamInt64("run")
if runNum <= 0 {
ctx.NotFound(nil)
return nil
}
byIndex := ctx.FormBool("by_index")
if ctx.PathParam("job") == "" {
// The URL does not contain a {job} path parameter, so it cannot use the
// job-specific rules to disambiguate ID-based URLs from legacy index-based URLs.
// Because of that, this path is handled with a best-effort ID-first fallback by default.
//
// When the same repository contains:
// - a run whose ID matches runNum, and
// - a different run whose repo-scope index also matches runNum
// this path prefers the ID match and may show a different run than the old legacy URL originally intended,
// unless `by_index=1` explicitly forces the legacy index-based interpretation.
if !byIndex {
runByID, err := actions_model.GetRunByRepoAndID(ctx, ctx.Repo.Repository.ID, runNum)
if err == nil {
return runByID
}
if !errors.Is(err, util.ErrNotExist) {
ctx.ServerError("GetRun:"+ctx.PathParam("run"), err)
return nil
}
}
runByIndex, err := actions_model.GetRunByRepoAndIndex(ctx, ctx.Repo.Repository.ID, runNum)
if err == nil {
ctx.Redirect(fmt.Sprintf("%s/actions/runs/%d", ctx.Repo.RepoLink, runByIndex.ID), http.StatusFound)
return nil
}
if !errors.Is(err, util.ErrNotExist) {
ctx.ServerError("GetRunByRepoAndIndex", err)
return nil
}
ctx.NotFound(nil)
return nil
}
jobNum := ctx.PathParamInt64("job")
if jobNum < 0 {
ctx.NotFound(nil)
return nil
}
// A job index should not be larger than MaxJobNumPerRun, so larger values can skip the legacy index-based path and be treated as job IDs directly.
if !byIndex && jobNum >= actions_model.MaxJobNumPerRun {
return getCurrentRunByPathParam(ctx)
}
var runByID, runByIndex *actions_model.ActionRun
var targetJobByIndex *actions_model.ActionRunJob
// Each run must have at least one job, so a valid job ID in the same run cannot be smaller than the run ID.
if !byIndex && jobNum >= runNum {
// Probe the repo-scoped job ID first and only accept it when the job exists and belongs to the same runNum.
job, err := actions_model.GetRunJobByRepoAndID(ctx, ctx.Repo.Repository.ID, jobNum)
if err != nil && !errors.Is(err, util.ErrNotExist) {
ctx.ServerError("GetRunJobByRepoAndID", err)
return nil
}
if job != nil {
if err := job.LoadRun(ctx); err != nil {
ctx.ServerError("LoadRun", err)
return nil
}
if job.Run.ID == runNum {
runByID = job.Run
}
}
}
// Try to resolve the request as a legacy run-index/job-index URL.
{
run, err := actions_model.GetRunByRepoAndIndex(ctx, ctx.Repo.Repository.ID, runNum)
if err != nil && !errors.Is(err, util.ErrNotExist) {
ctx.ServerError("GetRunByRepoAndIndex", err)
return nil
}
if run != nil {
jobs, err := actions_model.GetRunJobsByRunID(ctx, run.ID)
if err != nil {
ctx.ServerError("GetRunJobsByRunID", err)
return nil
}
if jobNum < int64(len(jobs)) {
runByIndex = run
targetJobByIndex = jobs[jobNum]
}
}
}
if runByID == nil && runByIndex == nil {
ctx.NotFound(nil)
return nil
}
if runByID != nil && runByIndex == nil {
return runByID
}
if runByID == nil && runByIndex != nil {
ctx.Redirect(fmt.Sprintf("%s/actions/runs/%d/jobs/%d", ctx.Repo.RepoLink, runByIndex.ID, targetJobByIndex.ID), http.StatusFound)
return nil
}
// Reaching this point means both ID-based and legacy index-based interpretations are valid. Prefer the ID-based interpretation by default.
// Use `by_index=1` query parameter to access the legacy index-based interpretation when necessary.
return runByID
}
func View(ctx *context_module.Context) {
ctx.Data["PageIsActions"] = true
run := getCurrentRunByPathParam(ctx)
run := resolveCurrentRunForView(ctx)
if ctx.Written() {
return
}

View File

@ -132,10 +132,12 @@ func OrgAssignment(orgAssignmentOpts OrgAssignmentOptions) func(ctx *Context) {
ctx.ServerError("IsOrgMember", err)
return
}
ctx.Org.CanCreateOrgRepo, err = org.CanCreateOrgRepo(ctx, ctx.Doer.ID)
if err != nil {
ctx.ServerError("CanCreateOrgRepo", err)
return
if ctx.Org.IsMember {
ctx.Org.CanCreateOrgRepo, err = org.CanCreateOrgRepo(ctx, ctx.Doer.ID)
if err != nil {
ctx.ServerError("CanCreateOrgRepo", err)
return
}
}
}
} else {

View File

@ -9,30 +9,40 @@ import (
"net/url"
"testing"
actions_model "code.gitea.io/gitea/models/actions"
auth_model "code.gitea.io/gitea/models/auth"
"code.gitea.io/gitea/models/db"
"code.gitea.io/gitea/models/unittest"
user_model "code.gitea.io/gitea/models/user"
"code.gitea.io/gitea/modules/setting"
actions_web "code.gitea.io/gitea/routers/web/repo/actions"
runnerv1 "code.gitea.io/actions-proto-go/runner/v1"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
func TestActionsRoute(t *testing.T) {
onGiteaRun(t, func(t *testing.T, u *url.URL) {
user2 := unittest.AssertExistsAndLoadBean(t, &user_model.User{ID: 2})
user2Session := loginUser(t, user2.Name)
user2Token := getTokenForLoggedInUser(t, user2Session, auth_model.AccessTokenScopeWriteRepository, auth_model.AccessTokenScopeWriteUser)
t.Run("testActionsRouteForIDBasedURL", testActionsRouteForIDBasedURL)
t.Run("testActionsRouteForLegacyIndexBasedURL", testActionsRouteForLegacyIndexBasedURL)
})
}
repo1 := createActionsTestRepo(t, user2Token, "actions-route-test-1", false)
runner1 := newMockRunner()
runner1.registerAsRepoRunner(t, user2.Name, repo1.Name, "mock-runner", []string{"ubuntu-latest"}, false)
repo2 := createActionsTestRepo(t, user2Token, "actions-route-test-2", false)
runner2 := newMockRunner()
runner2.registerAsRepoRunner(t, user2.Name, repo2.Name, "mock-runner", []string{"ubuntu-latest"}, false)
func testActionsRouteForIDBasedURL(t *testing.T) {
user2 := unittest.AssertExistsAndLoadBean(t, &user_model.User{ID: 2})
user2Session := loginUser(t, user2.Name)
user2Token := getTokenForLoggedInUser(t, user2Session, auth_model.AccessTokenScopeWriteRepository, auth_model.AccessTokenScopeWriteUser)
workflowTreePath := ".gitea/workflows/test.yml"
workflowContent := `name: test
repo1 := createActionsTestRepo(t, user2Token, "actions-route-id-url-1", false)
runner1 := newMockRunner()
runner1.registerAsRepoRunner(t, user2.Name, repo1.Name, "mock-runner", []string{"ubuntu-latest"}, false)
repo2 := createActionsTestRepo(t, user2Token, "actions-route-id-url-2", false)
runner2 := newMockRunner()
runner2.registerAsRepoRunner(t, user2.Name, repo2.Name, "mock-runner", []string{"ubuntu-latest"}, false)
workflowTreePath := ".gitea/workflows/test.yml"
workflowContent := `name: test
on:
push:
paths:
@ -44,57 +54,239 @@ jobs:
- run: echo job1
`
opts := getWorkflowCreateFileOptions(user2, repo1.DefaultBranch, "create "+workflowTreePath, workflowContent)
createWorkflowFile(t, user2Token, user2.Name, repo1.Name, workflowTreePath, opts)
createWorkflowFile(t, user2Token, user2.Name, repo2.Name, workflowTreePath, opts)
opts := getWorkflowCreateFileOptions(user2, repo1.DefaultBranch, "create "+workflowTreePath, workflowContent)
createWorkflowFile(t, user2Token, user2.Name, repo1.Name, workflowTreePath, opts)
createWorkflowFile(t, user2Token, user2.Name, repo2.Name, workflowTreePath, opts)
task1 := runner1.fetchTask(t)
_, job1, run1 := getTaskAndJobAndRunByTaskID(t, task1.Id)
task2 := runner2.fetchTask(t)
_, job2, run2 := getTaskAndJobAndRunByTaskID(t, task2.Id)
task1 := runner1.fetchTask(t)
_, job1, run1 := getTaskAndJobAndRunByTaskID(t, task1.Id)
task2 := runner2.fetchTask(t)
_, job2, run2 := getTaskAndJobAndRunByTaskID(t, task2.Id)
// run1 and job1 belong to repo1, success
req := NewRequest(t, "POST", fmt.Sprintf("/%s/%s/actions/runs/%d/jobs/%d", user2.Name, repo1.Name, run1.ID, job1.ID))
resp := user2Session.MakeRequest(t, req, http.StatusOK)
var viewResp actions_web.ViewResponse
DecodeJSON(t, resp, &viewResp)
assert.Len(t, viewResp.State.Run.Jobs, 1)
assert.Equal(t, job1.ID, viewResp.State.Run.Jobs[0].ID)
req := NewRequest(t, "GET", fmt.Sprintf("/%s/%s/actions/runs/%d", user2.Name, repo1.Name, run1.ID))
user2Session.MakeRequest(t, req, http.StatusOK)
// run2 and job2 do not belong to repo1, failure
req = NewRequest(t, "POST", fmt.Sprintf("/%s/%s/actions/runs/%d/jobs/%d", user2.Name, repo1.Name, run2.ID, job2.ID))
user2Session.MakeRequest(t, req, http.StatusNotFound)
req = NewRequest(t, "POST", fmt.Sprintf("/%s/%s/actions/runs/%d/jobs/%d", user2.Name, repo1.Name, run1.ID, job2.ID))
user2Session.MakeRequest(t, req, http.StatusNotFound)
req = NewRequest(t, "POST", fmt.Sprintf("/%s/%s/actions/runs/%d/jobs/%d", user2.Name, repo1.Name, run2.ID, job1.ID))
user2Session.MakeRequest(t, req, http.StatusNotFound)
req = NewRequest(t, "GET", fmt.Sprintf("/%s/%s/actions/runs/%d/workflow", user2.Name, repo1.Name, run2.ID))
user2Session.MakeRequest(t, req, http.StatusNotFound)
req = NewRequest(t, "POST", fmt.Sprintf("/%s/%s/actions/runs/%d/approve", user2.Name, repo1.Name, run2.ID))
user2Session.MakeRequest(t, req, http.StatusNotFound)
req = NewRequest(t, "POST", fmt.Sprintf("/%s/%s/actions/runs/%d/cancel", user2.Name, repo1.Name, run2.ID))
user2Session.MakeRequest(t, req, http.StatusNotFound)
req = NewRequest(t, "POST", fmt.Sprintf("/%s/%s/actions/runs/%d/delete", user2.Name, repo1.Name, run2.ID))
user2Session.MakeRequest(t, req, http.StatusNotFound)
req = NewRequest(t, "GET", fmt.Sprintf("/%s/%s/actions/runs/%d/artifacts/test.txt", user2.Name, repo1.Name, run2.ID))
user2Session.MakeRequest(t, req, http.StatusNotFound)
req = NewRequest(t, "DELETE", fmt.Sprintf("/%s/%s/actions/runs/%d/artifacts/test.txt", user2.Name, repo1.Name, run2.ID))
req = NewRequest(t, "GET", fmt.Sprintf("/%s/%s/actions/runs/%d", user2.Name, repo1.Name, 999999))
user2Session.MakeRequest(t, req, http.StatusNotFound)
// run1 and job1 belong to repo1, success
req = NewRequest(t, "POST", fmt.Sprintf("/%s/%s/actions/runs/%d/jobs/%d", user2.Name, repo1.Name, run1.ID, job1.ID))
resp := user2Session.MakeRequest(t, req, http.StatusOK)
var viewResp actions_web.ViewResponse
DecodeJSON(t, resp, &viewResp)
assert.Len(t, viewResp.State.Run.Jobs, 1)
assert.Equal(t, job1.ID, viewResp.State.Run.Jobs[0].ID)
// run2 and job2 do not belong to repo1, failure
req = NewRequest(t, "POST", fmt.Sprintf("/%s/%s/actions/runs/%d/jobs/%d", user2.Name, repo1.Name, run2.ID, job2.ID))
user2Session.MakeRequest(t, req, http.StatusNotFound)
req = NewRequest(t, "POST", fmt.Sprintf("/%s/%s/actions/runs/%d/jobs/%d", user2.Name, repo1.Name, run1.ID, job2.ID))
user2Session.MakeRequest(t, req, http.StatusNotFound)
req = NewRequest(t, "POST", fmt.Sprintf("/%s/%s/actions/runs/%d/jobs/%d", user2.Name, repo1.Name, run2.ID, job1.ID))
user2Session.MakeRequest(t, req, http.StatusNotFound)
req = NewRequest(t, "GET", fmt.Sprintf("/%s/%s/actions/runs/%d/workflow", user2.Name, repo1.Name, run2.ID))
user2Session.MakeRequest(t, req, http.StatusNotFound)
req = NewRequest(t, "POST", fmt.Sprintf("/%s/%s/actions/runs/%d/approve", user2.Name, repo1.Name, run2.ID))
user2Session.MakeRequest(t, req, http.StatusNotFound)
req = NewRequest(t, "POST", fmt.Sprintf("/%s/%s/actions/runs/%d/cancel", user2.Name, repo1.Name, run2.ID))
user2Session.MakeRequest(t, req, http.StatusNotFound)
req = NewRequest(t, "POST", fmt.Sprintf("/%s/%s/actions/runs/%d/delete", user2.Name, repo1.Name, run2.ID))
user2Session.MakeRequest(t, req, http.StatusNotFound)
req = NewRequest(t, "GET", fmt.Sprintf("/%s/%s/actions/runs/%d/artifacts/test.txt", user2.Name, repo1.Name, run2.ID))
user2Session.MakeRequest(t, req, http.StatusNotFound)
req = NewRequest(t, "DELETE", fmt.Sprintf("/%s/%s/actions/runs/%d/artifacts/test.txt", user2.Name, repo1.Name, run2.ID))
user2Session.MakeRequest(t, req, http.StatusNotFound)
// make the tasks complete, then test rerun
runner1.execTask(t, task1, &mockTaskOutcome{
result: runnerv1.Result_RESULT_SUCCESS,
})
runner2.execTask(t, task2, &mockTaskOutcome{
result: runnerv1.Result_RESULT_SUCCESS,
})
req = NewRequest(t, "POST", fmt.Sprintf("/%s/%s/actions/runs/%d/rerun", user2.Name, repo1.Name, run2.ID))
user2Session.MakeRequest(t, req, http.StatusNotFound)
req = NewRequest(t, "POST", fmt.Sprintf("/%s/%s/actions/runs/%d/jobs/%d/rerun", user2.Name, repo1.Name, run2.ID, job2.ID))
user2Session.MakeRequest(t, req, http.StatusNotFound)
req = NewRequest(t, "POST", fmt.Sprintf("/%s/%s/actions/runs/%d/jobs/%d/rerun", user2.Name, repo1.Name, run1.ID, job2.ID))
user2Session.MakeRequest(t, req, http.StatusNotFound)
req = NewRequest(t, "POST", fmt.Sprintf("/%s/%s/actions/runs/%d/jobs/%d/rerun", user2.Name, repo1.Name, run2.ID, job1.ID))
user2Session.MakeRequest(t, req, http.StatusNotFound)
}
func testActionsRouteForLegacyIndexBasedURL(t *testing.T) {
user2 := unittest.AssertExistsAndLoadBean(t, &user_model.User{ID: 2})
user2Session := loginUser(t, user2.Name)
user2Token := getTokenForLoggedInUser(t, user2Session, auth_model.AccessTokenScopeWriteRepository, auth_model.AccessTokenScopeWriteUser)
repo := createActionsTestRepo(t, user2Token, "actions-route-legacy-url", false)
mkRun := func(id, index int64, title, sha string) *actions_model.ActionRun {
return &actions_model.ActionRun{
ID: id,
Index: index,
RepoID: repo.ID,
OwnerID: user2.ID,
Title: title,
WorkflowID: "legacy-route.yml",
TriggerUserID: user2.ID,
Ref: "refs/heads/master",
CommitSHA: sha,
Status: actions_model.StatusWaiting,
}
}
mkJob := func(id, runID int64, name, sha string) *actions_model.ActionRunJob {
return &actions_model.ActionRunJob{
ID: id,
RunID: runID,
RepoID: repo.ID,
OwnerID: user2.ID,
CommitSHA: sha,
Name: name,
Status: actions_model.StatusWaiting,
}
}
// A small ID-based run/job pair that should always resolve directly.
smallIDRun := mkRun(80, 20, "legacy route small id", "aaa001")
smallIDJob := mkJob(170, smallIDRun.ID, "legacy-small-job", smallIDRun.CommitSHA)
// Another small run used to provide a job ID that belongs to a different run.
otherSmallRun := mkRun(90, 30, "legacy route other small", "aaa002")
otherSmallJob := mkJob(180, otherSmallRun.ID, "legacy-other-small-job", otherSmallRun.CommitSHA)
// A large-ID run whose legacy run index should redirect to its ID-based URL.
normalRun := mkRun(1500, 900, "legacy route normal", "aaa003")
normalRunJob := mkJob(1600, normalRun.ID, "legacy-normal-job", normalRun.CommitSHA)
// A run whose index collides with normalRun.ID to exercise summary-page ID-first behavior.
collisionRun := mkRun(2400, 1500, "legacy route collision", "aaa004")
collisionJobIdx0 := mkJob(2600, collisionRun.ID, "legacy-collision-job-1", collisionRun.CommitSHA)
collisionJobIdx1 := mkJob(2601, collisionRun.ID, "legacy-collision-job-2", collisionRun.CommitSHA)
// A small ID-based run/job pair that collides with a different legacy run/job index pair.
ambiguousIDRun := mkRun(3, 1, "legacy route ambiguous id", "aaa005")
ambiguousIDJob := mkJob(4, ambiguousIDRun.ID, "legacy-ambiguous-id-job", ambiguousIDRun.CommitSHA)
// The legacy run/job target for the ambiguous /runs/3/jobs/4 URL.
ambiguousLegacyRun := mkRun(1501, ambiguousIDRun.ID, "legacy route ambiguous legacy", "aaa006")
ambiguousLegacyJobIdx0 := mkJob(1601, ambiguousLegacyRun.ID, "legacy-ambiguous-legacy-job-0", ambiguousLegacyRun.CommitSHA)
ambiguousLegacyJobIdx1 := mkJob(1602, ambiguousLegacyRun.ID, "legacy-ambiguous-legacy-job-1", ambiguousLegacyRun.CommitSHA)
ambiguousLegacyJobIdx2 := mkJob(1603, ambiguousLegacyRun.ID, "legacy-ambiguous-legacy-job-2", ambiguousLegacyRun.CommitSHA)
ambiguousLegacyJobIdx3 := mkJob(1604, ambiguousLegacyRun.ID, "legacy-ambiguous-legacy-job-3", ambiguousLegacyRun.CommitSHA)
ambiguousLegacyJobIdx4 := mkJob(1605, ambiguousLegacyRun.ID, "legacy-ambiguous-legacy-job-4", ambiguousLegacyRun.CommitSHA) // job_index=4
ambiguousLegacyJobIdx5 := mkJob(1606, ambiguousLegacyRun.ID, "legacy-ambiguous-legacy-job-5", ambiguousLegacyRun.CommitSHA)
ambiguousLegacyJobs := []*actions_model.ActionRunJob{
ambiguousLegacyJobIdx0,
ambiguousLegacyJobIdx1,
ambiguousLegacyJobIdx2,
ambiguousLegacyJobIdx3,
ambiguousLegacyJobIdx4,
ambiguousLegacyJobIdx5,
}
targetAmbiguousLegacyJob := ambiguousLegacyJobs[int(ambiguousIDJob.ID)]
insertBeansWithExplicitIDs(t, "action_run",
smallIDRun, otherSmallRun, normalRun, ambiguousIDRun, ambiguousLegacyRun, collisionRun,
)
insertBeansWithExplicitIDs(t, "action_run_job",
smallIDJob, otherSmallJob, normalRunJob, ambiguousIDJob, collisionJobIdx0, collisionJobIdx1,
ambiguousLegacyJobIdx0, ambiguousLegacyJobIdx1, ambiguousLegacyJobIdx2, ambiguousLegacyJobIdx3, ambiguousLegacyJobIdx4, ambiguousLegacyJobIdx5,
)
t.Run("OnlyRunID", func(t *testing.T) {
// ID-based URLs must be valid
req := NewRequest(t, "GET", fmt.Sprintf("/%s/%s/actions/runs/%d", user2.Name, repo.Name, smallIDRun.ID))
user2Session.MakeRequest(t, req, http.StatusOK)
req = NewRequest(t, "GET", fmt.Sprintf("/%s/%s/actions/runs/%d", user2.Name, repo.Name, normalRun.ID))
user2Session.MakeRequest(t, req, http.StatusOK)
})
t.Run("OnlyRunIndex", func(t *testing.T) {
// legacy run index should redirect to the ID-based URL
req := NewRequest(t, "GET", fmt.Sprintf("/%s/%s/actions/runs/%d", user2.Name, repo.Name, normalRun.Index))
resp := user2Session.MakeRequest(t, req, http.StatusFound)
assert.Equal(t, fmt.Sprintf("/%s/%s/actions/runs/%d", user2.Name, repo.Name, normalRun.ID), resp.Header().Get("Location"))
// Best-effort compatibility prefers the run ID when the same number also exists as a legacy run index.
req = NewRequest(t, "GET", fmt.Sprintf("/%s/%s/actions/runs/%d", user2.Name, repo.Name, collisionRun.Index))
resp = user2Session.MakeRequest(t, req, http.StatusOK)
assert.Contains(t, resp.Body.String(), fmt.Sprintf(`data-run-id="%d"`, normalRun.ID)) // because collisionRun.Index == normalRun.ID
// by_index=1 should force the summary page to use the legacy run index interpretation.
req = NewRequest(t, "GET", fmt.Sprintf("/%s/%s/actions/runs/%d?by_index=1", user2.Name, repo.Name, collisionRun.Index))
resp = user2Session.MakeRequest(t, req, http.StatusFound)
assert.Equal(t, fmt.Sprintf("/%s/%s/actions/runs/%d", user2.Name, repo.Name, collisionRun.ID), resp.Header().Get("Location"))
})
t.Run("RunIDAndJobID", func(t *testing.T) {
// ID-based URLs must be valid
req := NewRequest(t, "GET", fmt.Sprintf("/%s/%s/actions/runs/%d/jobs/%d", user2.Name, repo.Name, smallIDRun.ID, smallIDJob.ID))
user2Session.MakeRequest(t, req, http.StatusOK)
req = NewRequest(t, "GET", fmt.Sprintf("/%s/%s/actions/runs/%d/jobs/%d", user2.Name, repo.Name, normalRun.ID, normalRunJob.ID))
user2Session.MakeRequest(t, req, http.StatusOK)
})
t.Run("RunIndexAndJobIndex", func(t *testing.T) {
// /user2/repo2/actions/runs/3/jobs/4 is ambiguous:
// - it may resolve as the ID-based URL for run_id=3/job_id=4,
// - or as the legacy index-based URL for run_index=3/job_index=4 which should redirect to run_id=1501/job_id=1605.
idBasedURL := fmt.Sprintf("/%s/%s/actions/runs/%d/jobs/%d", user2.Name, repo.Name, ambiguousIDRun.ID, ambiguousIDJob.ID)
indexBasedURL := fmt.Sprintf("/%s/%s/actions/runs/%d/jobs/%d", user2.Name, repo.Name, ambiguousLegacyRun.Index, 4) // for ambiguousLegacyJobIdx4
assert.Equal(t, idBasedURL, indexBasedURL)
// When both interpretations are valid, prefer the ID-based target by default.
req := NewRequest(t, "GET", indexBasedURL)
user2Session.MakeRequest(t, req, http.StatusOK)
redirectURL := fmt.Sprintf("/%s/%s/actions/runs/%d/jobs/%d", user2.Name, repo.Name, ambiguousLegacyRun.ID, targetAmbiguousLegacyJob.ID)
// by_index=1 should explicitly force the legacy run/job index interpretation.
req = NewRequest(t, "GET", indexBasedURL+"?by_index=1")
resp := user2Session.MakeRequest(t, req, http.StatusFound)
assert.Equal(t, redirectURL, resp.Header().Get("Location"))
// legacy job index 0 should redirect to the first job's ID
req = NewRequest(t, "GET", fmt.Sprintf("/%s/%s/actions/runs/%d/jobs/0", user2.Name, repo.Name, collisionRun.Index))
resp = user2Session.MakeRequest(t, req, http.StatusFound)
assert.Equal(t, fmt.Sprintf("/%s/%s/actions/runs/%d/jobs/%d", user2.Name, repo.Name, collisionRun.ID, collisionJobIdx0.ID), resp.Header().Get("Location"))
// legacy job index 1 should redirect to the second job's ID
req = NewRequest(t, "GET", fmt.Sprintf("/%s/%s/actions/runs/%d/jobs/1", user2.Name, repo.Name, collisionRun.Index))
resp = user2Session.MakeRequest(t, req, http.StatusFound)
assert.Equal(t, fmt.Sprintf("/%s/%s/actions/runs/%d/jobs/%d", user2.Name, repo.Name, collisionRun.ID, collisionJobIdx1.ID), resp.Header().Get("Location"))
})
t.Run("InvalidURLs", func(t *testing.T) {
// the job ID from a different run should not match
req := NewRequest(t, "GET", fmt.Sprintf("/%s/%s/actions/runs/%d/jobs/%d", user2.Name, repo.Name, smallIDRun.ID, otherSmallJob.ID))
user2Session.MakeRequest(t, req, http.StatusNotFound)
// make the tasks complete, then test rerun
runner1.execTask(t, task1, &mockTaskOutcome{
result: runnerv1.Result_RESULT_SUCCESS,
})
runner2.execTask(t, task2, &mockTaskOutcome{
result: runnerv1.Result_RESULT_SUCCESS,
})
req = NewRequest(t, "POST", fmt.Sprintf("/%s/%s/actions/runs/%d/rerun", user2.Name, repo1.Name, run2.ID))
// resolve the run by index first and then return not found because the job index is out-of-range
req = NewRequest(t, "GET", fmt.Sprintf("/%s/%s/actions/runs/%d/jobs/2", user2.Name, repo.Name, normalRun.ID))
user2Session.MakeRequest(t, req, http.StatusNotFound)
req = NewRequest(t, "POST", fmt.Sprintf("/%s/%s/actions/runs/%d/jobs/%d/rerun", user2.Name, repo1.Name, run2.ID, job2.ID))
// an out-of-range job index should return not found
req = NewRequest(t, "GET", fmt.Sprintf("/%s/%s/actions/runs/%d/jobs/2", user2.Name, repo.Name, collisionRun.Index))
user2Session.MakeRequest(t, req, http.StatusNotFound)
req = NewRequest(t, "POST", fmt.Sprintf("/%s/%s/actions/runs/%d/jobs/%d/rerun", user2.Name, repo1.Name, run1.ID, job2.ID))
// a missing run number should return not found
req = NewRequest(t, "GET", fmt.Sprintf("/%s/%s/actions/runs/%d", user2.Name, repo.Name, 999999))
user2Session.MakeRequest(t, req, http.StatusNotFound)
req = NewRequest(t, "POST", fmt.Sprintf("/%s/%s/actions/runs/%d/jobs/%d/rerun", user2.Name, repo1.Name, run2.ID, job1.ID))
// a missing legacy run index should return not found
req = NewRequest(t, "GET", fmt.Sprintf("/%s/%s/actions/runs/%d/jobs/0", user2.Name, repo.Name, 999999))
user2Session.MakeRequest(t, req, http.StatusNotFound)
})
}
func insertBeansWithExplicitIDs(t *testing.T, table string, beans ...any) {
t.Helper()
ctx, committer, err := db.TxContext(t.Context())
require.NoError(t, err)
defer committer.Close()
if setting.Database.Type.IsMSSQL() {
_, err = db.Exec(ctx, fmt.Sprintf("SET IDENTITY_INSERT [%s] ON", table))
require.NoError(t, err)
defer func() {
_, err = db.Exec(ctx, fmt.Sprintf("SET IDENTITY_INSERT [%s] OFF", table))
require.NoError(t, err)
}()
}
require.NoError(t, db.Insert(ctx, beans...))
require.NoError(t, committer.Commit())
}

View File

@ -19,15 +19,35 @@ import (
"github.com/stretchr/testify/assert"
)
func TestCreateForkNoLogin(t *testing.T) {
func TestAPIFork(t *testing.T) {
defer tests.PrepareTestEnv(t)()
t.Run("CreateForkNoLogin", testCreateForkNoLogin)
t.Run("CreateForkOrgNoCreatePermission", testCreateForkOrgNoCreatePermission)
t.Run("APIForkListLimitedAndPrivateRepos", testAPIForkListLimitedAndPrivateRepos)
t.Run("GetPrivateReposForks", testGetPrivateReposForks)
}
func testCreateForkNoLogin(t *testing.T) {
req := NewRequestWithJSON(t, "POST", "/api/v1/repos/user2/repo1/forks", &api.CreateForkOption{})
MakeRequest(t, req, http.StatusUnauthorized)
}
func TestAPIForkListLimitedAndPrivateRepos(t *testing.T) {
defer tests.PrepareTestEnv(t)()
func testCreateForkOrgNoCreatePermission(t *testing.T) {
user4Sess := loginUser(t, "user4")
org := unittest.AssertExistsAndLoadBean(t, &user_model.User{ID: 3})
canCreate, err := org_model.OrgFromUser(org).CanCreateOrgRepo(t.Context(), 4)
assert.NoError(t, err)
assert.False(t, canCreate)
user4Token := getTokenForLoggedInUser(t, user4Sess, auth_model.AccessTokenScopeWriteRepository, auth_model.AccessTokenScopeWriteOrganization)
req := NewRequestWithJSON(t, "POST", "/api/v1/repos/user2/repo1/forks", &api.CreateForkOption{
Organization: &org.Name,
}).AddTokenAuth(user4Token)
MakeRequest(t, req, http.StatusForbidden)
}
func testAPIForkListLimitedAndPrivateRepos(t *testing.T) {
user1Sess := loginUser(t, "user1")
user1 := unittest.AssertExistsAndLoadBean(t, &user_model.User{Name: "user1"})
@ -64,10 +84,7 @@ func TestAPIForkListLimitedAndPrivateRepos(t *testing.T) {
req := NewRequest(t, "GET", "/api/v1/repos/user2/repo1/forks")
resp := MakeRequest(t, req, http.StatusOK)
var forks []*api.Repository
DecodeJSON(t, resp, &forks)
forks := DecodeJSON(t, resp, []*api.Repository{})
assert.Empty(t, forks)
assert.Equal(t, "0", resp.Header().Get("X-Total-Count"))
})
@ -78,9 +95,7 @@ func TestAPIForkListLimitedAndPrivateRepos(t *testing.T) {
req := NewRequest(t, "GET", "/api/v1/repos/user2/repo1/forks").AddTokenAuth(user1Token)
resp := MakeRequest(t, req, http.StatusOK)
var forks []*api.Repository
DecodeJSON(t, resp, &forks)
forks := DecodeJSON(t, resp, []*api.Repository{})
assert.Len(t, forks, 2)
assert.Equal(t, "2", resp.Header().Get("X-Total-Count"))
@ -88,28 +103,22 @@ func TestAPIForkListLimitedAndPrivateRepos(t *testing.T) {
req = NewRequest(t, "GET", "/api/v1/repos/user2/repo1/forks").AddTokenAuth(user1Token)
resp = MakeRequest(t, req, http.StatusOK)
forks = []*api.Repository{}
DecodeJSON(t, resp, &forks)
forks = DecodeJSON(t, resp, []*api.Repository{})
assert.Len(t, forks, 2)
assert.Equal(t, "2", resp.Header().Get("X-Total-Count"))
})
}
func TestGetPrivateReposForks(t *testing.T) {
defer tests.PrepareTestEnv(t)()
func testGetPrivateReposForks(t *testing.T) {
user1Sess := loginUser(t, "user1")
repo2 := unittest.AssertExistsAndLoadBean(t, &repo_model.Repository{ID: 2}) // private repository
privateOrg := unittest.AssertExistsAndLoadBean(t, &user_model.User{ID: 23})
user1Token := getTokenForLoggedInUser(t, user1Sess, auth_model.AccessTokenScopeWriteRepository)
forkedRepoName := "forked-repo"
// create fork from a private repository
req := NewRequestWithJSON(t, "POST", "/api/v1/repos/"+repo2.FullName()+"/forks", &api.CreateForkOption{
Organization: &privateOrg.Name,
Name: &forkedRepoName,
Name: new("forked-repo"),
}).AddTokenAuth(user1Token)
MakeRequest(t, req, http.StatusAccepted)
@ -117,8 +126,7 @@ func TestGetPrivateReposForks(t *testing.T) {
req = NewRequest(t, "GET", "/api/v1/repos/"+repo2.FullName()+"/forks").AddTokenAuth(user1Token)
resp := MakeRequest(t, req, http.StatusOK)
forks := []*api.Repository{}
DecodeJSON(t, resp, &forks)
forks := DecodeJSON(t, resp, []*api.Repository{})
assert.Len(t, forks, 1)
assert.Equal(t, "1", resp.Header().Get("X-Total-Count"))
assert.Equal(t, "forked-repo", forks[0].Name)

View File

@ -415,7 +415,7 @@ func DecodeJSON[T any](t testing.TB, resp *httptest.ResponseRecorder, v T) (ret
// FIXME: JSON-KEY-CASE: for testing purpose only, because many structs don't provide `json` tags, they just use capitalized field names
decoder := json.NewDecoderCaseInsensitive(resp.Body)
require.NoError(t, decoder.Decode(v))
require.NoError(t, decoder.Decode(&v))
return v
}