0
0
mirror of https://github.com/go-gitea/gitea.git synced 2026-05-15 04:13:35 +02:00

Merge 0f2a872e1c6bc16166f102312835217f20648116 into a5d81d9ce230aaa6e1021b6236ca01cb6d2b56c3

This commit is contained in:
Dmitry Frolov 2026-05-09 08:37:06 +08:00 committed by GitHub
commit 7b9ea57540
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
18 changed files with 1440 additions and 42 deletions

View File

@ -1094,6 +1094,18 @@ LEVEL = Info
;; Allow to fork repositories without maximum number limit
;ALLOW_FORK_WITHOUT_MAXIMUM_LIMIT = true
;
;; Specify a global repository Git size limit in bytes. -1 - Disabled, 0 - limit to zero bytes
;; Standard units of measurements for size can be used like B, KB, KiB, ... , EB, EiB, etc or if not provided - bytes
;; If limit is reached and operation doesn't increase disk consumption operation would be allowed
;; This is experimental and subject to change
;GIT_SIZE_MAX = -1
;; Specify a global repository LFS size limit in bytes. -1 - Disabled, 0 - limit to zero bytes
;; Standard units of measurements for size can be used like B, KB, KiB, ... , EB, EiB, etc or if not provided - bytes
;; If limit is reached and operation doesn't increase disk consumption operation would be allowed
;; This is experimental and subject to change
;LFS_SIZE_MAX = -1
;; Allow to fork repositories into the same owner (user or organization)
;; This feature is experimental, not fully tested, and may be changed in the future

View File

@ -252,8 +252,8 @@ func (repo *Repository) SanitizedOriginalURL() string {
// text representations to be returned in SizeDetail.Name
const (
SizeDetailNameGit = "git"
SizeDetailNameLFS = "lfs"
SizeDetailNameGit = "Git"
SizeDetailNameLFS = "LFS"
)
type SizeDetail struct {
@ -613,6 +613,46 @@ func (repo *Repository) IsOwnedBy(userID int64) bool {
return repo.OwnerID == userID
}
// GetActualSizeLimit returns repository size limit in bytes
func (repo *Repository) GetActualSizeLimit() int64 {
return setting.Repository.GitSizeMax
}
// IsRepoSizeOversized return true if is over size limitation
func (repo *Repository) IsRepoSizeOversized(additionalSize int64) bool {
limit := repo.GetActualSizeLimit()
if limit < 0 {
return false
}
newSize := repo.GitSize + additionalSize
return newSize > limit && newSize > repo.GitSize
}
// ShouldCheckRepoSize returns true if size limit checking is enabled
func (repo *Repository) ShouldCheckRepoSize() bool {
return setting.Repository.GitSizeMax > -1
}
// GetActualLFSSizeLimit returns repository LFS size limit in bytes
func (repo *Repository) GetActualLFSSizeLimit() int64 {
return setting.Repository.LFSSizeMax
}
// ShouldCheckLFSSize returns true if LFS size limit checking is enabled
func (repo *Repository) ShouldCheckLFSSize() bool {
return setting.Repository.LFSSizeMax > -1
}
// IsLFSSizeOversized returns true if adding additionalSize would exceed the LFS size limit
func (repo *Repository) IsLFSSizeOversized(additionalSize int64) bool {
limit := repo.GetActualLFSSizeLimit()
if limit < 0 {
return false
}
newSize := repo.LFSSize + additionalSize
return newSize > limit && newSize > repo.LFSSize
}
// CanCreateBranch returns true if repository meets the requirements for creating new branches.
func (repo *Repository) CanCreateBranch() bool {
return !repo.IsMirror

View File

@ -12,6 +12,7 @@ import (
"fmt"
"hash"
"strconv"
"strings"
"time"
"code.gitea.io/gitea/modules/setting"
@ -95,6 +96,17 @@ func FileSize(s int64) string {
return humanize.IBytes(uint64(s))
}
// GetFileSize gets FileSize bytes value from String.
func GetFileSize(s string) (int64, error) {
s = strings.TrimSpace(s)
// default to bytes if no unit is provided
if _, err := strconv.ParseInt(s, 10, 64); err == nil {
s += " B"
}
v, err := humanize.ParseBytes(s)
return int64(v), err
}
// StringsToInt64s converts a slice of string to a slice of int64.
func StringsToInt64s(strs []string) ([]int64, error) {
if strs == nil {

View File

@ -93,6 +93,27 @@ func TestFileSize(t *testing.T) {
assert.Equal(t, "2.0 EiB", FileSize(size))
}
func TestGetFileSizeParsesHumanSizes(t *testing.T) {
cases := []struct {
in string
want int64
}{
{"11", 11},
{"11 B", 11},
{"11 KiB", 11 * 1024},
{"11 MiB", 11 * 1024 * 1024},
{"11 GiB", 11 * 1024 * 1024 * 1024},
{"11 TiB", 11 * 1024 * 1024 * 1024 * 1024},
{"11 PiB", 11 * 1024 * 1024 * 1024 * 1024 * 1024},
{"2 EiB", 2 * 1024 * 1024 * 1024 * 1024 * 1024 * 1024},
}
for _, tc := range cases {
got, err := GetFileSize(tc.in)
assert.NoError(t, err, tc.in)
assert.Equal(t, tc.want, got, tc.in)
}
}
func TestStringsToInt64s(t *testing.T) {
testSuccess := func(input []string, expected []int64) {
result, err := StringsToInt64s(input)

View File

@ -224,3 +224,57 @@ func Push(ctx context.Context, repoPath string, opts PushOptions) error {
return nil
}
// CountObject represents repository count objects report
type CountObject struct {
Count int64
Size int64
InPack int64
Packs int64
SizePack int64
PrunePack int64
Garbage int64
SizeGarbage int64
}
const (
statCount = "count: "
statSize = "size: "
statInpack = "in-pack: "
statPacks = "packs: "
statSizePack = "size-pack: "
statPrunePackage = "prune-package: "
statGarbage = "garbage: "
statSizeGarbage = "size-garbage: "
)
// ParseCountObjectsResult parses the output from git count-objects -v
// and returns a CountObject struct with the parsed values
func ParseCountObjectsResult(output string) *CountObject {
const bytesPerKilobyte = 1024
repoSize := new(CountObject)
for line := range strings.SplitSeq(output, "\n") {
switch {
case strings.HasPrefix(line, statCount):
repoSize.Count, _ = strconv.ParseInt(line[7:], 10, 64)
case strings.HasPrefix(line, statSize):
number, _ := strconv.ParseInt(line[6:], 10, 64)
repoSize.Size = number * bytesPerKilobyte
case strings.HasPrefix(line, statInpack):
repoSize.InPack, _ = strconv.ParseInt(line[9:], 10, 64)
case strings.HasPrefix(line, statPacks):
repoSize.Packs, _ = strconv.ParseInt(line[7:], 10, 64)
case strings.HasPrefix(line, statSizePack):
number, _ := strconv.ParseInt(line[11:], 10, 64)
repoSize.SizePack = number * bytesPerKilobyte
case strings.HasPrefix(line, statPrunePackage):
repoSize.PrunePack, _ = strconv.ParseInt(line[16:], 10, 64)
case strings.HasPrefix(line, statGarbage):
repoSize.Garbage, _ = strconv.ParseInt(line[9:], 10, 64)
case strings.HasPrefix(line, statSizeGarbage):
number, _ := strconv.ParseInt(line[14:], 10, 64)
repoSize.SizeGarbage = number * bytesPerKilobyte
}
}
return repoSize
}

View File

@ -4,8 +4,12 @@
package gitrepo
import (
"context"
"os"
"path/filepath"
"code.gitea.io/gitea/modules/git"
"code.gitea.io/gitea/modules/git/gitcmd"
)
const notRegularFileMode = os.ModeSymlink | os.ModeNamedPipe | os.ModeSocket | os.ModeDevice | os.ModeCharDevice | os.ModeIrregular
@ -35,3 +39,20 @@ func CalcRepositorySize(repo Repository) (int64, error) {
})
return size, err
}
// CountObjects returns the results of git count-objects on the repository
func CountObjects(ctx context.Context, repo Repository) (*git.CountObject, error) {
return CountObjectsWithEnv(ctx, repo, nil)
}
// CountObjectsWithEnv returns the results of git count-objects on the repository
// with custom environment variables (e.g., GIT_QUARANTINE_PATH for pre-receive hooks)
func CountObjectsWithEnv(ctx context.Context, repo Repository, env []string) (*git.CountObject, error) {
cmd := gitcmd.NewCommand("count-objects", "-v")
stdout, _, err := cmd.WithDir(repoPath(repo)).WithEnv(env).RunStdString(ctx)
if err != nil {
return nil, err
}
return git.ParseCountObjectsResult(stdout), nil
}

View File

@ -6,9 +6,12 @@ package setting
import (
"os/exec"
"path/filepath"
"strconv"
"strings"
"code.gitea.io/gitea/modules/log"
"github.com/dustin/go-humanize"
)
// enumerates all the policy repository creating
@ -59,6 +62,8 @@ var (
DisableDownloadSourceArchives bool
AllowForkWithoutMaximumLimit bool
AllowForkIntoSameOwner bool
GitSizeMax int64 `ini:"GIT_SIZE_MAX"`
LFSSizeMax int64 `ini:"LFS_SIZE_MAX"`
// StreamArchives makes Gitea stream git archive files to the client directly instead of creating an archive first.
// Ideally all users should use this streaming method. However, at the moment we don't know whether there are
@ -290,10 +295,56 @@ var (
ScriptType = "bash"
)
func UpdateGlobalRepositoryLimit(gitSizeMax, lfsSizeMax int64) {
Repository.GitSizeMax = gitSizeMax
Repository.LFSSizeMax = lfsSizeMax
}
// FormatRepositorySizeLimit returns "-1" for disabled limits, otherwise returns human-readable size.
func FormatRepositorySizeLimit(sizeInBytes int64) string {
if sizeInBytes == -1 {
return "-1"
}
return humanize.IBytes(uint64(sizeInBytes))
}
// ParseRepositorySizeLimit accepts "-1" to disable the limit, otherwise parses as a byte size.
func ParseRepositorySizeLimit(s string) (int64, error) {
s = strings.TrimSpace(s)
if s == "-1" {
return -1, nil
}
// default to bytes if no unit is provided
if _, err := strconv.ParseInt(s, 10, 64); err == nil {
s += " B"
}
v, err := humanize.ParseBytes(s)
return int64(v), err
}
func parseSize(sec ConfigSection, key string, def int64) int64 {
v := sec.Key(key).MustString("")
if v == "" {
return def
}
if v == "-1" {
return -1
}
size, err := humanize.ParseBytes(v)
if err != nil {
return def
}
return int64(size)
}
func loadRepositoryFrom(rootCfg ConfigProvider) {
var err error
// Determine and create root git repository path.
sec := rootCfg.Section("repository")
Repository.GitSizeMax = parseSize(sec, "GIT_SIZE_MAX", -1)
Repository.LFSSizeMax = parseSize(sec, "LFS_SIZE_MAX", -1)
Repository.DisableHTTPGit = sec.Key("DISABLE_HTTP_GIT").MustBool()
Repository.UseCompatSSHURI = sec.Key("USE_COMPAT_SSH_URI").MustBool()
Repository.GoGetCloneURLProtocol = sec.Key("GO_GET_CLONE_URL_PROTOCOL").MustString("https")

View File

@ -3083,8 +3083,15 @@
"admin.repos.name": "Name",
"admin.repos.private": "Private",
"admin.repos.issues": "Issues",
"admin.repos.size": "Size",
"admin.repos.size": "Git Size",
"admin.repos.lfs_size": "LFS Size",
"admin.repos.settings": "Global Repository Settings",
"admin.repos.git_size_max": "Max Git Size (Global), Bytes",
"admin.repos.git_size_max_helper": "Maximum Git size allowed for a single repository. Set to -1 for unlimited.",
"admin.repos.lfs_size_max": "Max LFS Size (Global), Bytes",
"admin.repos.lfs_size_max_helper": "Maximum LFS size allowed for a single repository. Set to -1 for unlimited.",
"admin.repos.update_settings": "Update Settings",
"admin.repos.update_success": "Global repository limits have been updated.",
"admin.packages.package_manage_panel": "Package Management",
"admin.packages.total_size": "Total Size: %s",
"admin.packages.unreferenced_size": "Unreferenced Size: %s",

View File

@ -4,10 +4,18 @@
package private
import (
"bytes"
"errors"
"fmt"
"net/http"
"os"
"path/filepath"
"regexp"
"strconv"
"strings"
"sync"
"sync/atomic"
"time"
asymkey_model "code.gitea.io/gitea/models/asymkey"
git_model "code.gitea.io/gitea/models/git"
@ -16,11 +24,13 @@ import (
access_model "code.gitea.io/gitea/models/perm/access"
"code.gitea.io/gitea/models/unit"
user_model "code.gitea.io/gitea/models/user"
"code.gitea.io/gitea/modules/base"
"code.gitea.io/gitea/modules/git"
"code.gitea.io/gitea/modules/git/gitcmd"
"code.gitea.io/gitea/modules/gitrepo"
"code.gitea.io/gitea/modules/log"
"code.gitea.io/gitea/modules/private"
"code.gitea.io/gitea/modules/setting"
"code.gitea.io/gitea/modules/util"
"code.gitea.io/gitea/modules/web"
"code.gitea.io/gitea/services/agit"
@ -105,22 +115,654 @@ func (ctx *preReceiveContext) AssertCreatePullRequest() bool {
return true
}
// calculateSizeOfObject calculates the size of one git object via git cat-file -s command
func calculateSizeOfObject(ctx *gitea_context.PrivateContext, dir string, env []string, objectID string) (int64, error) {
objectSizeStr, _, err := gitcmd.NewCommand("cat-file", "-s").AddDynamicArguments(objectID).WithDir(dir).WithEnv(env).RunStdString(ctx)
if err != nil {
log.Trace("CalculateSizeOfRemovedObjects: Error during git cat-file -s on object: %s", objectID)
return 0, err
}
objectSize, errParse := strconv.ParseInt(strings.TrimSpace(objectSizeStr), 10, 64)
if errParse != nil {
log.Trace("CalculateSizeOfRemovedObjects: Error during ParseInt on string '%s'", objectID)
return 0, errParse
}
return objectSize, nil
}
// calculateSizeOfObjectsFromCache calculates the size of objects added and removed from the repository by new push
// it uses data that was cached about the repository for this run
func calculateSizeOfObjectsFromCache(newCommitObjects, oldCommitObjects, otherCommitObjects map[string]bool, commitObjectsSizes map[string]int64) (addedSize, removedSize int64) {
// Calculate size of objects that were added
for objectID := range newCommitObjects {
if _, exists := oldCommitObjects[objectID]; !exists {
addedSize += commitObjectsSizes[objectID]
}
}
// Calculate size of objects that were removed
for objectID := range oldCommitObjects {
if _, exists := newCommitObjects[objectID]; !exists {
if _, exists := otherCommitObjects[objectID]; !exists {
removedSize += commitObjectsSizes[objectID]
}
}
}
return addedSize, removedSize
}
// convertObjectsToMap takes a newline-separated string of git objects and
// converts it into a map for efficient lookup.
func convertObjectsToMap(objects string) map[string]bool {
objectsMap := make(map[string]bool)
for object := range strings.SplitSeq(objects, "\n") {
if len(object) == 0 {
continue
}
objectID := strings.Split(object, " ")[0]
objectsMap[objectID] = true
}
return objectsMap
}
// convertObjectsToSlice converts a list of hashes in a string from the git rev-list --objects command to a slice of string objects
func convertObjectsToSlice(objects string) (objectIDs []string) {
for object := range strings.SplitSeq(objects, "\n") {
if len(object) == 0 {
continue
}
objectID := strings.Split(object, " ")[0]
objectIDs = append(objectIDs, objectID)
}
return objectIDs
}
// loadObjectSizesFromPack access all packs that this push or repo has
// and load compressed object size in bytes into objectSizes map
// using `git verify-pack -v` output
func loadObjectSizesFromPack(ctx *gitea_context.PrivateContext, dir string, env []string, objectsSizes map[string]int64) error {
// Find the path from GIT_QUARANTINE_PATH environment variable (path to the pack file)
var packPath string
var errExec error
for _, envVar := range env {
split := strings.SplitN(envVar, "=", 2)
if split[0] == "GIT_QUARANTINE_PATH" {
packPath = split[1]
break
}
}
// if no quarantinPath determined we silently ignore
if packPath == "" {
log.Trace("GIT_QUARANTINE_PATH not found in the environment variables. Will read the pack files from main repo instead")
packPath = filepath.Join(ctx.Repo.Repository.RepoPath(), "./objects/")
}
log.Trace("packPath: %s", packPath)
// Find all pack files *.idx in the quarantine directory
packFiles, err := filepath.Glob(filepath.Join(packPath, "./pack/*.idx"))
// if pack file not found we silently ignore
if err != nil {
log.Trace("Error during finding pack files %s: %v", filepath.Join(packPath, "./pack/*.idx"), err)
}
// Loop over each pack file
i := 0
for _, packFile := range packFiles {
log.Trace("Processing packfile %s", packFile)
// Extract and store in cache objectsSizes the sizes of the object parsing output of the `git verify-pack` command
output, _, err := gitcmd.NewCommand("verify-pack", "-v").AddDynamicArguments(packFile).WithDir(dir).WithEnv(env).RunStdString(ctx)
if err != nil {
log.Trace("Error during git verify-pack on pack file: %s", packFile)
if errExec == nil {
errExec = err
} else {
errExec = fmt.Errorf("%w; %v", errExec, err)
}
continue
}
// Parsing the output of the git verify-pack command
lines := strings.SplitSeq(output, "\n")
for line := range lines {
fields := strings.Fields(line)
if len(fields) < 4 {
continue
}
// Second field has object type
objectType := fields[1]
if objectType != "commit" && objectType != "tree" && objectType != "blob" && objectType != "tag" {
continue
}
// First field would have an object hash
objectID := fields[0]
// Forth field would have an object compressed size
size, err := strconv.ParseInt(fields[3], 10, 64)
if err != nil {
log.Trace("Failed to parse size for object %s: %v", objectID, err)
continue
}
i++
objectsSizes[objectID] = size
}
}
log.Trace("Loaded %d items from packfiles", i)
return errExec
}
// loadObjectsSizesViaCatFile uses hashes from objectIDs and runs `git cat-file -s` in 10 workers to return each object sizes
// Objects for which size is already loaded are skipped
// can't use `git cat-file --batch-check` here as it only provides data from git DB before the commit applied and has no knowledge on new commit objects
func loadObjectsSizesViaCatFile(ctx *gitea_context.PrivateContext, dir string, env, objectIDs []string, objectsSizes map[string]int64) error {
// This is the number of workers that will simultaneously process CalculateSizeOfObject.
const numWorkers = 10
var wg sync.WaitGroup
var mu sync.Mutex
// errExec will hold the first error.
var errOnce sync.Once
var errExec error
// errCount will count how many *additional* errors occurred after the first one.
var errCount int64
// Prepare numWorkers slices to store the work
reducedObjectIDs := make([][]string, numWorkers)
for i := range reducedObjectIDs {
reducedObjectIDs[i] = make([]string, 0, len(objectIDs)/numWorkers+1)
}
// Loop over all objectIDs and find which ones are missing size information
i := 0
for _, objectID := range objectIDs {
_, exists := objectsSizes[objectID]
if !exists {
reducedObjectIDs[i%numWorkers] = append(reducedObjectIDs[i%numWorkers], objectID)
i++
}
}
// Start workers and determine size using `git cat-file -s`, store in objectsSizes cache
for w := 1; w <= numWorkers; w++ {
wg.Add(1)
go func(reducedObjectIDs *[]string) {
defer wg.Done()
for _, objectID := range *reducedObjectIDs {
workerCtx := ctx
// Ensure that each worker has its own copy of the env environment to prevent races
env := append([]string(nil), env...)
objectSize, err := calculateSizeOfObject(workerCtx, dir, env, objectID)
// Upon error we store the first error and continue processing, as we can't stop the push
// if we were not able to calculate the size of the object, but we keep one error to
// return at the end, along with a count of subsequent similar errors.
if err != nil {
ran := false
errOnce.Do(func() {
errExec = err
ran = true
})
if !ran {
atomic.AddInt64(&errCount, 1)
}
}
mu.Lock() // Protecting shared resource
objectsSizes[objectID] = objectSize
mu.Unlock() // Releasing shared resource for other goroutines
}
}(&reducedObjectIDs[(w-1)%numWorkers])
}
wg.Wait()
if errExec == nil {
return nil
}
if n := atomic.LoadInt64(&errCount); n > 0 {
return fmt.Errorf("%w (and %d subsequent similar errors)", errExec, n)
}
return errExec
}
// loadObjectsSizesViaBatch uses hashes from objectIDs and uses `git cat-file --batch` command to retrieve object sizes
// This function can't be used for new commit objects.
func loadObjectsSizesViaBatch(ctx *gitea_context.PrivateContext, repoPath string, objectIDs []string, objectsSizes map[string]int64) error {
reducedObjectIDs := make([]string, 0, len(objectIDs))
for _, objectID := range objectIDs {
_, exists := objectsSizes[objectID]
if !exists {
reducedObjectIDs = append(reducedObjectIDs, objectID)
}
}
batch, err := git.NewBatch(ctx, repoPath)
if err != nil {
log.Error("Unable to create CatFileBatch in %s Error: %v", repoPath, err)
return fmt.Errorf("Fail to create CatFileBatch: %v", err)
}
defer batch.Close()
for _, objectID := range reducedObjectIDs {
info, err := batch.QueryInfo(objectID)
if err != nil {
log.Trace("Failed to query info for object %s: %v", objectID, err)
continue
}
objectsSizes[objectID] = info.Size
}
return nil
}
/*
LFS pointer scanning (fast-ish, bounded)
We look for pointer blobs (small, <= 4KiB) and parse:
oid sha256:<64hex>
size <bytes>
This lets us compute:
- incomingNewToRepoLFS: pointers that are new vs old AND not referenced in "other" parts of repo
- removedLFSSize: pointers removed vs new AND not referenced in "other"
*/
var (
lfsPointerMarker = []byte("version https://git-lfs.github.com/spec/v1")
lfsOIDRe = regexp.MustCompile(`(?m)^oid sha256:([0-9a-f]{64})$`)
lfsSizeRe = regexp.MustCompile(`(?m)^size ([0-9]+)$`)
)
func sumLFSSizes(m map[string]int64) int64 {
var s int64
for _, v := range m {
s += v
}
return s
}
// scanLFSPointersFromObjectIDs finds LFS pointer blobs among objectIDs and returns map[oid]size.
// It only reads small blobs via cat-file, so it stays bounded.
func scanLFSPointersFromObjectIDs(ctx *gitea_context.PrivateContext, repoPath string, env, objectIDs []string, maxBlobSize int64) (map[string]int64, error) {
out := make(map[string]int64)
if len(objectIDs) == 0 {
return out, nil
}
// 1) batch-check: filter small blobs only
var input bytes.Buffer
for _, oid := range objectIDs {
if oid == "" {
continue
}
input.WriteString(oid)
input.WriteByte('\n')
}
// Use batch-check to get object type and size for each object ID
checkCmd := gitcmd.NewCommand("cat-file", "--batch-check=%(objectname) %(objecttype) %(objectsize)").
WithDir(repoPath).
WithEnv(env).
WithStdinCopy(bytes.NewReader(input.Bytes()))
checkBytes, _, err := checkCmd.RunStdBytes(ctx)
if err != nil {
return out, err
}
smallBlobs := make([]string, 0, 1024)
for line := range bytes.SplitSeq(checkBytes, []byte{'\n'}) {
// "<sha> blob <size>"
fields := bytes.Fields(line)
if len(fields) != 3 {
continue
}
if !bytes.Equal(fields[1], []byte("blob")) {
continue
}
size, perr := strconv.ParseInt(string(fields[2]), 10, 64)
if perr != nil {
continue
}
if size <= maxBlobSize {
smallBlobs = append(smallBlobs, string(fields[0]))
}
}
if len(smallBlobs) == 0 {
return out, nil
}
// 2) batch: read contents of small blobs, parse LFS pointers
var input2 bytes.Buffer
for _, oid := range smallBlobs {
input2.WriteString(oid)
input2.WriteByte('\n')
}
catCmd := gitcmd.NewCommand("cat-file", "--batch").
WithDir(repoPath).
WithEnv(env).
WithStdinCopy(bytes.NewReader(input2.Bytes()))
catBytes, _, err := catCmd.RunStdBytes(ctx)
if err != nil {
return out, err
}
data := catBytes
i := 0
for i < len(data) {
j := bytes.IndexByte(data[i:], '\n')
if j < 0 {
break
}
j += i
header := data[i:j]
i = j + 1
hf := bytes.Fields(header)
if len(hf) < 3 {
break
}
blobSize, perr := strconv.ParseInt(string(hf[2]), 10, 64)
if perr != nil || blobSize < 0 {
break
}
if i+int(blobSize) > len(data) {
break
}
content := data[i : i+int(blobSize)]
i += int(blobSize)
if i < len(data) && data[i] == '\n' {
i++
}
if !bytes.Contains(content, lfsPointerMarker) {
continue
}
oidm := lfsOIDRe.FindSubmatch(content)
if len(oidm) != 2 {
continue
}
sizem := lfsSizeRe.FindSubmatch(content)
if len(sizem) != 2 {
continue
}
oid := string(oidm[1])
sz, perr := strconv.ParseInt(string(sizem[1]), 10, 64)
if perr != nil || sz < 0 {
continue
}
if prev, ok := out[oid]; !ok || sz > prev {
out[oid] = sz
}
}
return out, nil
}
// HookPreReceive checks whether a individual commit is acceptable
func HookPreReceive(ctx *gitea_context.PrivateContext) {
startTime := time.Now()
const maxLFSPointerBlobSize = int64(4096)
opts := web.GetForm(ctx).(*private.HookOptions)
ourCtx := &preReceiveContext{
PrivateContext: ctx,
env: generateGitEnv(opts), // Generate git environment for checking commits
env: generateGitEnv(opts),
opts: opts,
}
// Iterate across the provided old commit IDs
repo := ourCtx.Repo.Repository
var addedSize int64
var removedSize int64
// LFS sizes derived from pointers
var incomingNewToRepoLFS int64 // best proxy for “incoming LFS objects”
var removedLFSSize int64
var addedLFSSize int64 // new-vs-old pointers (can include already-known-to-repo)
var isRepoOversized bool
var pushSize *git.CountObject
var repoSize *git.CountObject
var err error
var duration time.Duration
needGitDelta := repo.ShouldCheckRepoSize()
needLFSDelta := repo.ShouldCheckLFSSize()
// Only do CountObjects (push/repo) when we're doing the repo-size limit at all
if needGitDelta {
repoSize, err = gitrepo.CountObjects(ctx, repo)
if err != nil {
log.Error("Unable to get repository size with env %v: %s Error: %v", repo.RepoPath(), ourCtx.env, err)
ctx.JSON(http.StatusInternalServerError, map[string]any{
"err": err.Error(),
})
return
}
pushSize, err = gitrepo.CountObjectsWithEnv(ctx, repo, ourCtx.env)
if err != nil {
log.Error("Unable to get push size with env %v: %s Error: %v", repo.RepoPath(), ourCtx.env, err)
ctx.JSON(http.StatusInternalServerError, map[string]any{
"err": err.Error(),
})
return
}
isRepoOversized = repo.IsRepoSizeOversized(pushSize.Size + pushSize.SizePack)
log.Trace("Push counts %+v", pushSize)
log.Trace("Repo counts %+v", repoSize)
}
for i := range opts.OldCommitIDs {
oldCommitID := opts.OldCommitIDs[i]
newCommitID := opts.NewCommitIDs[i]
refFullName := opts.RefFullNames[i]
log.Trace("Processing old commit: %s, new commit: %s, ref: %s", oldCommitID, newCommitID, refFullName)
// Deep work is only needed if:
// - repo is oversized (git deep path), OR
// - we need LFS delta (LFS limit enabled OR combined mode enabled)
if isRepoOversized || needLFSDelta {
var gitObjects string
var errLoop error
var errLFS error
// Keep pointer maps so we can compute delta at the end
var oldLFSPtrs, otherLFSPtrs, newLFSPtrs map[string]int64
// Only allocate object-size cache if we'll actually do git delta calc
var commitObjectsSizes map[string]int64
if isRepoOversized {
commitObjectsSizes = make(map[string]int64)
}
// OLD commit objects
if oldCommitID != "0000000000000000000000000000000000000000" {
gitObjects, _, err = gitcmd.NewCommand("rev-list", "--objects").
AddDynamicArguments(oldCommitID).
WithDir(repo.RepoPath()).WithEnv(ourCtx.env).RunStdString(ctx)
if err != nil {
log.Error("Unable to list objects in old commit: %s in %-v Error: %v", oldCommitID, repo, err)
ctx.JSON(http.StatusInternalServerError, private.Response{
Err: fmt.Sprintf("Fail to list objects in old commit: %v", err),
})
return
}
}
oldCommitObjects := convertObjectsToMap(gitObjects)
objectIDs := convertObjectsToSlice(gitObjects)
// LFS pointers for OLD (only if needed)
oldLFSPtrs = map[string]int64{}
if needLFSDelta {
oldLFSPtrs, errLFS = scanLFSPointersFromObjectIDs(ctx, repo.RepoPath(), ourCtx.env, objectIDs, maxLFSPointerBlobSize)
if errLFS != nil {
log.Error("Unable to scan old commit LFS pointers for %s in %-v: %v", oldCommitID, repo, errLFS)
oldLFSPtrs = map[string]int64{}
} else {
log.Trace("LFS(old): pointers=%d total=%s", len(oldLFSPtrs), base.FileSize(sumLFSSizes(oldLFSPtrs)))
}
}
// OTHER objects (repo excluding old+new)
if oldCommitID == "0000000000000000000000000000000000000000" {
gitObjects, _, err = gitcmd.NewCommand("rev-list", "--objects", "--all").
AddDynamicArguments("^" + newCommitID).
WithDir(repo.RepoPath()).WithEnv(ourCtx.env).RunStdString(ctx)
if err != nil {
log.Error("Unable to list objects in the repo that are missing from both old %s and new %s commits in %-v Error: %v", oldCommitID, newCommitID, repo, err)
ctx.JSON(http.StatusInternalServerError, private.Response{
Err: fmt.Sprintf("Fail to list objects missing from both old and new commits: %v", err),
})
return
}
} else {
gitObjects, _, err = gitcmd.NewCommand("rev-list", "--objects", "--all").
AddDynamicArguments("^"+oldCommitID, "^"+newCommitID).
WithDir(repo.RepoPath()).WithEnv(ourCtx.env).RunStdString(ctx)
if err != nil {
log.Error("Unable to list objects in the repo that are missing from both old %s and new %s commits in %-v Error: %v", oldCommitID, newCommitID, repo, err)
ctx.JSON(http.StatusInternalServerError, private.Response{
Err: fmt.Sprintf("Fail to list objects missing from both old and new commits: %v", err),
})
return
}
}
otherCommitObjects := convertObjectsToMap(gitObjects)
objectIDs = append(objectIDs, convertObjectsToSlice(gitObjects)...)
// LFS pointers for OTHER (only if needed)
otherLFSPtrs = map[string]int64{}
if needLFSDelta {
otherLFSPtrs, errLFS = scanLFSPointersFromObjectIDs(ctx, repo.RepoPath(), ourCtx.env, objectIDs, maxLFSPointerBlobSize)
if errLFS != nil {
log.Error("Unable to scan other-objects LFS pointers for repo %-v: %v", repo, errLFS)
otherLFSPtrs = map[string]int64{}
} else {
log.Trace("LFS(other): pointers=%d total=%s", len(otherLFSPtrs), base.FileSize(sumLFSSizes(otherLFSPtrs)))
}
}
// Load sizes of OLD+OTHER objects (existing in DB): pack + batch (git deep only)
if isRepoOversized {
if repoSize != nil && repoSize.InPack > 0 {
errLoop = loadObjectSizesFromPack(ctx, repo.RepoPath(), nil, commitObjectsSizes)
if errLoop != nil {
log.Error("Unable to get sizes of objects from the pack in %-v Error: %v", repo, errLoop)
}
}
errLoop = loadObjectsSizesViaBatch(ctx, repo.RepoPath(), objectIDs, commitObjectsSizes)
if errLoop != nil {
log.Error("Unable to get sizes of objects that are missing in both old %s and new commits %s in %-v Error: %v", oldCommitID, newCommitID, repo, errLoop)
ctx.JSON(http.StatusInternalServerError, private.Response{
Err: fmt.Sprintf("Fail to get sizes of objects missing in both old and new commit and those in old commit: %v", errLoop),
})
return
}
}
// NEW commit objects
gitObjects, _, err = gitcmd.NewCommand("rev-list", "--objects").
AddDynamicArguments(newCommitID).
WithDir(repo.RepoPath()).WithEnv(ourCtx.env).RunStdString(ctx)
if err != nil {
log.Error("Unable to list objects in new commit %s in %-v Error: %v", newCommitID, repo, err)
ctx.JSON(http.StatusInternalServerError, private.Response{
Err: fmt.Sprintf("Fail to list objects in new commit: %v", err),
})
return
}
newCommitObjects := convertObjectsToMap(gitObjects)
objectIDs = convertObjectsToSlice(gitObjects)
// LFS pointers for NEW (only if needed)
newLFSPtrs = map[string]int64{}
if needLFSDelta {
newLFSPtrs, errLFS = scanLFSPointersFromObjectIDs(ctx, repo.RepoPath(), ourCtx.env, objectIDs, maxLFSPointerBlobSize)
if errLFS != nil {
log.Error("Unable to scan new commit LFS pointers for %s in %-v: %v", newCommitID, repo, errLFS)
newLFSPtrs = map[string]int64{}
} else {
log.Trace("LFS(new): pointers=%d total=%s", len(newLFSPtrs), base.FileSize(sumLFSSizes(newLFSPtrs)))
}
}
// Load sizes of NEW objects (may be in quarantine packs, etc.) (git deep only)
if isRepoOversized {
if pushSize != nil && pushSize.InPack > 0 {
errLoop = loadObjectSizesFromPack(ctx, repo.RepoPath(), ourCtx.env, commitObjectsSizes)
if errLoop != nil {
log.Error("Unable to get sizes of objects from the pack in new commit %s in %-v Error: %v", newCommitID, repo, errLoop)
}
}
errLoop = loadObjectsSizesViaCatFile(ctx, repo.RepoPath(), ourCtx.env, objectIDs, commitObjectsSizes)
if errLoop != nil {
log.Error("Unable to get sizes of objects in new commit %s in %-v Error: %v", newCommitID, repo, errLoop)
}
// Git object delta (git deep only)
addedSize, removedSize = calculateSizeOfObjectsFromCache(newCommitObjects, oldCommitObjects, otherCommitObjects, commitObjectsSizes)
}
// LFS delta based on pointer presence (LFS deep only)
if needLFSDelta {
for oid, sz := range newLFSPtrs {
if _, inOld := oldLFSPtrs[oid]; !inOld {
addedLFSSize += sz
if _, inOther := otherLFSPtrs[oid]; !inOther {
// Check if the object is already in the database for this repository (e.g. orphan or referenced by hidden ref)
if _, err := git_model.GetLFSMetaObjectByOid(ctx, repo.ID, oid); err == nil {
continue
}
incomingNewToRepoLFS += sz
}
}
}
for oid, sz := range oldLFSPtrs {
if _, inNew := newLFSPtrs[oid]; inNew {
continue
}
if _, inOther := otherLFSPtrs[oid]; inOther {
continue
}
removedLFSSize += sz
}
log.Trace(
"LFS(delta): incoming-new-to-repo=%s added(vs old)=%s removed=%s current(repo.LFSSize)=%s predicted=%s",
base.FileSize(incomingNewToRepoLFS),
base.FileSize(addedLFSSize),
base.FileSize(removedLFSSize),
base.FileSize(repo.LFSSize),
base.FileSize(repo.LFSSize+incomingNewToRepoLFS-removedLFSSize),
)
}
}
switch {
case refFullName.IsBranch():
preReceiveBranch(ourCtx, oldCommitID, newCommitID, refFullName)
@ -136,6 +778,69 @@ func HookPreReceive(ctx *gitea_context.PrivateContext) {
}
}
// --------- Final accounting + enforcement (one timing) ---------
duration = time.Since(startTime)
currentGit := repo.GitSize
currentLFS := repo.LFSSize
gitDelta := addedSize - removedSize
predictedGitAfter := currentGit + gitDelta
lfsDelta := incomingNewToRepoLFS - removedLFSSize
predictedLFSAfter := currentLFS + lfsDelta
// One summary line (time included here only)
if repo.ShouldCheckRepoSize() || repo.ShouldCheckLFSSize() {
log.Warn(
"SizeCheck summary: took=%s repo=%s/%s git(pred=%s cur=%s delta=%s) lfs(pred=%s cur=%s delta=%s) limits(git=%s lfs=%s)",
duration,
repo.OwnerName, repo.Name,
base.FileSize(predictedGitAfter), base.FileSize(currentGit), base.FileSize(gitDelta),
base.FileSize(predictedLFSAfter), base.FileSize(currentLFS), base.FileSize(lfsDelta),
setting.FormatRepositorySizeLimit(setting.Repository.GitSizeMax),
setting.FormatRepositorySizeLimit(setting.Repository.LFSSizeMax),
)
}
// 1) LFS size limit: compare against predicted LFS after push
if repo.ShouldCheckLFSSize() {
lfsLimit := repo.GetActualLFSSizeLimit()
if lfsLimit >= 0 && predictedLFSAfter > lfsLimit && predictedLFSAfter > currentLFS {
log.Warn("Forbidden: LFS size limit exceeded: %s > %s for repo %-v",
base.FileSize(predictedLFSAfter),
base.FileSize(lfsLimit),
repo,
)
ctx.JSON(http.StatusForbidden, private.Response{
UserMsg: fmt.Sprintf("LFS size limit exceeded: %s > than limit of %s",
base.FileSize(predictedLFSAfter),
base.FileSize(lfsLimit),
),
})
return
}
}
// 2) Git size limit
if repo.ShouldCheckRepoSize() {
limit := repo.GetActualSizeLimit()
if limit >= 0 && predictedGitAfter > limit && predictedGitAfter > currentGit {
log.Warn("Forbidden: Repository git size limit exceeded: %s > %s for repo %-v",
base.FileSize(predictedGitAfter),
base.FileSize(limit),
repo,
)
ctx.JSON(http.StatusForbidden, private.Response{
UserMsg: fmt.Sprintf("Repository git size limit exceeded: %s > than limit of %s",
base.FileSize(predictedGitAfter),
base.FileSize(limit),
),
})
return
}
}
ctx.PlainText(http.StatusOK, "ok")
}
@ -172,17 +877,11 @@ func preReceiveBranch(ctx *preReceiveContext, oldCommitID, newCommitID string, r
return
}
// Allow pushes to non-protected branches
if protectBranch == nil {
return
}
protectBranch.Repo = repo
// This ref is a protected branch.
//
// First of all we need to enforce absolutely:
//
// 1. Detect and prevent deletion of the branch
if newCommitID == objectFormat.EmptyObjectID().String() {
log.Warn("Forbidden: Branch: %s in %-v is protected from deletion", branchName, repo)
ctx.JSON(http.StatusForbidden, private.Response{
@ -193,7 +892,6 @@ func preReceiveBranch(ctx *preReceiveContext, oldCommitID, newCommitID string, r
isForcePush := false
// 2. Disallow force pushes to protected branches
if oldCommitID != objectFormat.EmptyObjectID().String() {
output, _, err := gitrepo.RunCmdString(ctx,
repo,
@ -220,7 +918,6 @@ func preReceiveBranch(ctx *preReceiveContext, oldCommitID, newCommitID string, r
}
}
// 3. Enforce require signed commits
if protectBranch.RequireSignedCommits {
err := verifyCommits(oldCommitID, newCommitID, gitRepo, ctx.env)
if err != nil {
@ -240,9 +937,6 @@ func preReceiveBranch(ctx *preReceiveContext, oldCommitID, newCommitID string, r
}
}
// Now there are several tests which can be overridden:
//
// 4. Check protected file patterns - this is overridable from the UI
changedProtectedfiles := false
protectedFilePath := ""
@ -263,10 +957,8 @@ func preReceiveBranch(ctx *preReceiveContext, oldCommitID, newCommitID string, r
}
}
// 5. Check if the doer is allowed to push (and force-push if the incoming push is a force-push)
var canPush bool
if ctx.opts.DeployKeyID != 0 {
// This flag is only ever true if protectBranch.CanForcePush is true
if isForcePush {
canPush = !changedProtectedfiles && protectBranch.CanPush && (!protectBranch.EnableForcePushAllowlist || protectBranch.ForcePushAllowlistDeployKeys)
} else {
@ -288,13 +980,8 @@ func preReceiveBranch(ctx *preReceiveContext, oldCommitID, newCommitID string, r
}
}
// 6. If we're not allowed to push directly
if !canPush {
// Is this is a merge from the UI/API?
if ctx.opts.PullRequestID == 0 {
// 6a. If we're not merging from the UI/API then there are two ways we got here:
//
// We are changing a protected file and we're not allowed to do that
if changedProtectedfiles {
log.Warn("Forbidden: Branch: %s in %-v is protected from changing file %s", branchName, repo, protectedFilePath)
ctx.JSON(http.StatusForbidden, private.Response{
@ -303,7 +990,6 @@ func preReceiveBranch(ctx *preReceiveContext, oldCommitID, newCommitID string, r
return
}
// Allow commits that only touch unprotected files
globs := protectBranch.GetUnprotectedFilePatterns()
if len(globs) > 0 {
unprotectedFilesOnly, err := pull_service.CheckUnprotectedFiles(gitRepo, branchName, oldCommitID, newCommitID, globs, ctx.env)
@ -315,12 +1001,10 @@ func preReceiveBranch(ctx *preReceiveContext, oldCommitID, newCommitID string, r
return
}
if unprotectedFilesOnly {
// Commit only touches unprotected files, this is allowed
return
}
}
// Or we're simply not able to push to this protected branch
if isForcePush {
log.Warn("Forbidden: User %d is not allowed to force-push to protected branch: %s in %-v", ctx.opts.UserID, branchName, repo)
ctx.JSON(http.StatusForbidden, private.Response{
@ -334,9 +1018,7 @@ func preReceiveBranch(ctx *preReceiveContext, oldCommitID, newCommitID string, r
})
return
}
// 6b. Merge (from UI or API)
// Get the PR, user and permissions for the user in the repository
pr, err := issues_model.GetPullRequestByID(ctx, ctx.opts.PullRequestID)
if err != nil {
log.Error("Unable to get PullRequest %d Error: %v", ctx.opts.PullRequestID, err)
@ -346,14 +1028,10 @@ func preReceiveBranch(ctx *preReceiveContext, oldCommitID, newCommitID string, r
return
}
// although we should have called `loadPusherAndPermission` before, here we call it explicitly again because we need to access ctx.user below
if !ctx.loadPusherAndPermission() {
// if error occurs, loadPusherAndPermission had written the error response
return
}
// Now check if the user is allowed to merge PRs for this repository
// Note: we can use ctx.perm and ctx.user directly as they will have been loaded above
allowedMerge, err := pull_service.IsUserAllowedToMerge(ctx, pr, ctx.userPerm, ctx.user)
if err != nil {
log.Error("Error calculating if allowed to merge: %v", err)
@ -371,12 +1049,10 @@ func preReceiveBranch(ctx *preReceiveContext, oldCommitID, newCommitID string, r
return
}
// If we're an admin for the repository we can ignore status checks, reviews and override protected files
if ctx.userPerm.IsAdmin() {
return
}
// Now if we're not an admin - we can't overwrite protected files so fail now
if changedProtectedfiles {
log.Warn("Forbidden: Branch: %s in %-v is protected from changing file %s", branchName, repo, protectedFilePath)
ctx.JSON(http.StatusForbidden, private.Response{
@ -385,7 +1061,6 @@ func preReceiveBranch(ctx *preReceiveContext, oldCommitID, newCommitID string, r
return
}
// Check all status checks and reviews are ok
if err := pull_service.CheckPullBranchProtections(ctx, pr, true); err != nil {
if errors.Is(err, pull_service.ErrNotReadyToMerge) {
log.Warn("Forbidden: User %d is not allowed push to protected branch %s in %-v and pr #%d is not ready to be merged: %s", ctx.opts.UserID, branchName, repo, pr.Index, err.Error())
@ -475,16 +1150,13 @@ func preReceiveFor(ctx *preReceiveContext, refFullName git.RefName) {
func generateGitEnv(opts *private.HookOptions) (env []string) {
env = os.Environ()
if opts.GitAlternativeObjectDirectories != "" {
env = append(env,
private.GitAlternativeObjectDirectories+"="+opts.GitAlternativeObjectDirectories)
env = append(env, private.GitAlternativeObjectDirectories+"="+opts.GitAlternativeObjectDirectories)
}
if opts.GitObjectDirectory != "" {
env = append(env,
private.GitObjectDirectory+"="+opts.GitObjectDirectory)
env = append(env, private.GitObjectDirectory+"="+opts.GitObjectDirectory)
}
if opts.GitQuarantinePath != "" {
env = append(env,
private.GitQuarantinePath+"="+opts.GitQuarantinePath)
env = append(env, private.GitQuarantinePath+"="+opts.GitQuarantinePath)
}
return env
}

View File

@ -15,8 +15,10 @@ import (
"code.gitea.io/gitea/modules/log"
"code.gitea.io/gitea/modules/setting"
"code.gitea.io/gitea/modules/templates"
"code.gitea.io/gitea/modules/web"
"code.gitea.io/gitea/routers/web/explore"
"code.gitea.io/gitea/services/context"
"code.gitea.io/gitea/services/forms"
repo_service "code.gitea.io/gitea/services/repository"
)
@ -30,6 +32,12 @@ func Repos(ctx *context.Context) {
ctx.Data["Title"] = ctx.Tr("admin.repositories")
ctx.Data["PageIsAdminRepositories"] = true
gitSizeStr := setting.FormatRepositorySizeLimit(setting.Repository.GitSizeMax)
lfsSizeStr := setting.FormatRepositorySizeLimit(setting.Repository.LFSSizeMax)
log.Trace("Repos: GitSizeMax=%d -> %s, LFSSizeMax=%d -> %s", setting.Repository.GitSizeMax, gitSizeStr, setting.Repository.LFSSizeMax, lfsSizeStr)
ctx.Data["GitSizeMax"] = gitSizeStr
ctx.Data["LFSSizeMax"] = lfsSizeStr
explore.RenderRepoSearch(ctx, &explore.RepoSearchOptions{
Private: true,
PageSize: setting.UI.Admin.RepoPagingNum,
@ -38,6 +46,45 @@ func Repos(ctx *context.Context) {
})
}
func UpdateRepoPost(ctx *context.Context) {
form := web.GetForm(ctx).(*forms.UpdateGlobalRepoForm)
ctx.Data["Title"] = ctx.Tr("admin.repositories")
ctx.Data["PageIsAdminRepositories"] = true
ctx.Data["GitSizeMax"] = form.GitSizeMax
ctx.Data["LFSSizeMax"] = form.LFSSizeMax
gitSizeMax, err := setting.ParseRepositorySizeLimit(form.GitSizeMax)
if err != nil {
ctx.Data["Err_Git_Size_Max"] = form.GitSizeMax
explore.RenderRepoSearch(ctx, &explore.RepoSearchOptions{
Private: true,
PageSize: setting.UI.Admin.RepoPagingNum,
TplName: tplRepos,
OnlyShowRelevant: false,
})
return
}
lfsSizeMax, err := setting.ParseRepositorySizeLimit(form.LFSSizeMax)
if err != nil {
ctx.Data["Err_LFS_Size_Max"] = form.LFSSizeMax
explore.RenderRepoSearch(ctx, &explore.RepoSearchOptions{
Private: true,
PageSize: setting.UI.Admin.RepoPagingNum,
TplName: tplRepos,
OnlyShowRelevant: false,
})
return
}
setting.UpdateGlobalRepositoryLimit(gitSizeMax, lfsSizeMax)
log.Trace("UpdateRepoPost: After update, setting.Repository.GitSizeMax=%d, LFSSizeMax=%d", setting.Repository.GitSizeMax, setting.Repository.LFSSizeMax)
ctx.Flash.Success(ctx.Tr("admin.repos.update_success"))
ctx.Redirect(setting.AppSubURL + "/-/admin/repos")
}
// DeleteRepo delete one repository
func DeleteRepo(ctx *context.Context) {
repo, err := repo_model.GetRepositoryByID(ctx, ctx.FormInt64("id"))

View File

@ -141,6 +141,24 @@ func RenderRepoSearch(ctx *context.Context, opts *RepoSearchOptions) {
pager.AddParamFromRequest(ctx.Req)
ctx.Data["Page"] = pager
if ctx.Data["Err_Repo_Size_Limit"] != nil {
ctx.RenderWithErr(ctx.Tr("admin.config.invalid_repo_size", ctx.Data["Err_Repo_Size_Limit"]),
opts.TplName, nil)
return
}
if ctx.Data["Err_LFS_Size_Limit"] != nil {
ctx.RenderWithErr(ctx.Tr("admin.config.invalid_lfs_size", ctx.Data["Err_LFS_Size_Limit"]),
opts.TplName, nil)
return
}
if ctx.Data["Err_Repo_Size_Save"] != nil {
ctx.RenderWithErr(ctx.Tr("admin.config.save_repo_size_setting_failed", ctx.Data["Err_Repo_Size_Save"]),
opts.TplName, nil)
return
}
ctx.HTML(http.StatusOK, opts.TplName)
}

View File

@ -61,6 +61,9 @@ func SettingsCtxData(ctx *context.Context) {
ctx.Data["DefaultMirrorInterval"] = setting.Mirror.DefaultInterval
ctx.Data["MinimumMirrorInterval"] = setting.Mirror.MinInterval
ctx.Data["CanConvertFork"] = ctx.Repo.Repository.IsFork && ctx.Doer.CanCreateRepoIn(ctx.Repo.Repository.Owner)
ctx.Data["Err_RepoSize"] = ctx.Repo.Repository.IsRepoSizeOversized(ctx.Repo.Repository.GetActualSizeLimit() / 10) // less than 10% left
ctx.Data["GitSizeMax"] = ctx.Repo.Repository.GetActualSizeLimit()
ctx.Data["LFSSizeMax"] = ctx.Repo.Repository.GetActualLFSSizeLimit()
signing, _ := gitrepo.GetSigningKey(ctx)
ctx.Data["SigningKeyAvailable"] = signing != nil
@ -115,6 +118,9 @@ func SettingsPost(ctx *context.Context) {
ctx.Data["SigningSettings"] = setting.Repository.Signing
ctx.Data["IsRepoIndexerEnabled"] = setting.Indexer.RepoIndexerEnabled
repo := ctx.Repo.Repository
ctx.Data["Err_RepoSize"] = repo.IsRepoSizeOversized(repo.GetActualSizeLimit() / 10) // less than 10% left
switch ctx.FormString("action") {
case "update":
handleSettingsPostUpdate(ctx)

View File

@ -814,6 +814,7 @@ func registerWebRoutes(m *web.Router, webAuth *AuthMiddleware) {
m.Get("", admin.Repos)
m.Combo("/unadopted").Get(admin.UnadoptedRepos).Post(admin.AdoptOrDeleteRepository)
m.Post("/delete", admin.DeleteRepo)
m.Post("", web.Bind(forms.UpdateGlobalRepoForm{}), admin.UpdateRepoPost)
})
m.Group("/packages", func() {

View File

@ -47,6 +47,11 @@ type CreateRepoForm struct {
ObjectFormatName string
}
type UpdateGlobalRepoForm struct {
GitSizeMax string `form:"GitSizeMax"`
LFSSizeMax string `form:"LFSSizeMax"`
}
// Validate validates the fields
func (f *CreateRepoForm) Validate(req *http.Request, errs binding.Errors) binding.Errors {
ctx := context.GetValidateContext(req)

View File

@ -26,6 +26,7 @@ import (
"code.gitea.io/gitea/models/unit"
user_model "code.gitea.io/gitea/models/user"
"code.gitea.io/gitea/modules/auth/httpauth"
"code.gitea.io/gitea/modules/base"
"code.gitea.io/gitea/modules/httplib"
"code.gitea.io/gitea/modules/json"
lfs_module "code.gitea.io/gitea/modules/lfs"
@ -183,6 +184,12 @@ func DownloadHandler(ctx *context.Context) {
}
}
// traceBatchDecision outputs a trace message for a batch decision
func traceBatchDecision(rc *requestContext, op, msg string, args ...any) {
prefix := fmt.Sprintf("LFS[BATCH][%s/%s][op=%s] ", rc.User, rc.Repo, op)
log.Trace(prefix+msg, args...)
}
// BatchHandler provides the batch api
func BatchHandler(ctx *context.Context) {
var br lfs_module.BatchRequest
@ -205,6 +212,7 @@ func BatchHandler(ctx *context.Context) {
}
rc := getRequestContext(ctx)
log.Trace("LFS[BATCH][%s/%s] op=%s objects=%d", rc.User, rc.Repo, br.Operation, len(br.Objects))
repository := getAuthenticatedRepository(ctx, rc, isUpload)
if repository == nil {
@ -216,8 +224,61 @@ func BatchHandler(ctx *context.Context) {
return
}
// Create content store once, reuse for tracing + normal logic below.
contentStore := lfs_module.NewContentStore()
// Baseline repo stats and limits
traceBatchDecision(rc, br.Operation,
"auth=%t isUpload=%t repoID=%d sizes: git=%s lfs=%s limits: git=%s lfs=%s",
ctx.IsSigned || ctx.Doer != nil,
isUpload,
repository.ID,
base.FileSize(repository.GitSize),
base.FileSize(repository.LFSSize),
setting.FormatRepositorySizeLimit(repository.GetActualSizeLimit()),
setting.FormatRepositorySizeLimit(repository.GetActualLFSSizeLimit()),
)
// Check LFS size limits for upload operations
if isUpload && repository.ShouldCheckLFSSize() {
// Sum sizes of objects that are NEW TO THIS REPO (no meta row)
var incomingNewToRepoLFS int64
var invalid, newObjects, metaPresent int
for _, p := range br.Objects {
if !p.IsValid() {
invalid++
continue
}
meta, _ := git_model.GetLFSMetaObjectByOid(ctx, repository.ID, p.Oid)
if meta == nil {
incomingNewToRepoLFS += p.Size
newObjects++
} else {
metaPresent++
}
}
predictedLFS := repository.LFSSize + incomingNewToRepoLFS
// LFS-only limit if we are over, but size doesn't increase allow
if predictedLFS > repository.GetActualLFSSizeLimit() && predictedLFS > repository.LFSSize {
traceBatchDecision(rc, br.Operation,
"DECISION=FORBID reason=LFS_LIMIT predictedLFS=%s limit=%s (NewObjects=%d MetaPresent=%d Invalid=%d)",
base.FileSize(predictedLFS), setting.FormatRepositorySizeLimit(repository.GetActualLFSSizeLimit()),
newObjects, metaPresent, invalid,
)
writeStatusMessage(ctx, http.StatusForbidden,
fmt.Sprintf("LFS size %s would exceed limit %s",
base.FileSize(predictedLFS), setting.FormatRepositorySizeLimit(repository.GetActualLFSSizeLimit())))
return
}
traceBatchDecision(rc, br.Operation, "DECISION=ALLOW size-check passed")
}
var responseObjects []*lfs_module.ObjectResponse
for _, p := range br.Objects {

View File

@ -1,5 +1,22 @@
{{template "admin/layout_head" (dict "pageClass" "admin")}}
<div class="admin-setting-content">
<h4 class="ui top attached header">
{{ctx.Locale.Tr "admin.repos.settings"}}
</h4>
<div class="ui attached segment">
<form class="ui form" action="{{.Link}}" method="post">
{{.CsrfTokenHtml}}
<div class="field {{if .Err_Git_Size_Max}}error{{end}}">
<label>{{ctx.Locale.Tr "admin.repos.git_size_max"}}</label>
<input name="GitSizeMax" value="{{if .Err_Git_Size_Max}}{{.Err_Git_Size_Max}}{{else}}{{.GitSizeMax}}{{end}}" data-tooltip-content="{{ctx.Locale.Tr "admin.repos.git_size_max_helper"}}">
</div>
<div class="field {{if .Err_LFS_Size_Max}}error{{end}}">
<label>{{ctx.Locale.Tr "admin.repos.lfs_size_max"}}</label>
<input name="LFSSizeMax" value="{{if .Err_LFS_Size_Max}}{{.Err_LFS_Size_Max}}{{else}}{{.LFSSizeMax}}{{end}}" data-tooltip-content="{{ctx.Locale.Tr "admin.repos.lfs_size_max_helper"}}">
</div>
<button class="ui green button">{{ctx.Locale.Tr "admin.repos.update_settings"}}</button>
</form>
</div>
<h4 class="ui top attached header">
{{ctx.Locale.Tr "admin.repos.repo_manage_panel"}} ({{ctx.Locale.Tr "admin.total" .Total}})
<div class="ui right">

View File

@ -13,7 +13,11 @@
</div>
<div class="inline field">
<label>{{ctx.Locale.Tr "repo.repo_size"}}</label>
<span {{if not (eq .Repository.Size 0)}} data-tooltip-content="{{.Repository.SizeDetailsString}}"{{end}}>{{FileSize .Repository.Size}}</span>
<span {{if .Err_RepoSize}}class="ui text red"{{end}} {{if not (eq .Repository.Size 0)}} data-tooltip-content="{{.Repository.SizeDetailsString}}"{{end}}>{{FileSize .Repository.Size}}
{{if gt .GitSizeMax -1}}
/ {{FileSize .GitSizeMax}}
{{end}}
</span>
</div>
<div class="inline field">
<label>{{ctx.Locale.Tr "repo.template"}}</label>

View File

@ -0,0 +1,349 @@
// Copyright 2024 The Gitea Authors. All rights reserved.
// SPDX-License-Identifier: MIT
package integration
import (
"crypto/rand"
"io"
"net/url"
"os"
"path"
"testing"
auth_model "code.gitea.io/gitea/models/auth"
"code.gitea.io/gitea/modules/git/gitcmd"
"code.gitea.io/gitea/modules/setting"
"code.gitea.io/gitea/tests"
"github.com/stretchr/testify/assert"
)
func TestSizeLimit(t *testing.T) {
onGiteaRun(t, func(t *testing.T, u *url.URL) {
t.Run("Git", func(t *testing.T) {
testGitSizeLimitInternal(t, u)
})
t.Run("LFS", func(t *testing.T) {
testLFSSizeLimitInternal(t, u)
})
})
}
func testGitSizeLimitInternal(t *testing.T, u *url.URL) {
username := "user2"
u.User = url.UserPassword(username, userPassword)
t.Run("Under", func(t *testing.T) {
defer tests.PrintCurrentTest(t)()
repoName := "repo-git-under"
ctx := NewAPITestContext(t, username, repoName, auth_model.AccessTokenScopeWriteRepository, auth_model.AccessTokenScopeWriteUser)
// Cleanup: reset limits and delete repository
defer func() {
setting.Repository.GitSizeMax = -1
setting.Repository.LFSSizeMax = -1
}()
defer doAPIDeleteRepository(ctx)
doAPICreateRepository(ctx, false)(t)
dstPath := t.TempDir()
u.Path = ctx.GitPath()
doGitClone(dstPath, u)(t)
// Phase 1: Push with no limit
setting.Repository.GitSizeMax = -1
setting.Repository.LFSSizeMax = -1
doCommitAndPush(t, 1024, dstPath, "under-phase1-")
// Phase 2: Push with limit enabled but not exceeded
setting.Repository.GitSizeMax = 50 * 1024 // 50 KiB
doCommitAndPush(t, 1024, dstPath, "under-phase2-")
})
t.Run("Over", func(t *testing.T) {
defer tests.PrintCurrentTest(t)()
repoName := "repo-git-over"
ctx := NewAPITestContext(t, username, repoName, auth_model.AccessTokenScopeWriteRepository, auth_model.AccessTokenScopeWriteUser)
// Cleanup: reset limits and delete repository
defer func() {
setting.Repository.GitSizeMax = -1
setting.Repository.LFSSizeMax = -1
}()
defer doAPIDeleteRepository(ctx)
doAPICreateRepository(ctx, false)(t)
dstPath := t.TempDir()
u.Path = ctx.GitPath()
doGitClone(dstPath, u)(t)
// Set restrictive limit and attempt push
setting.Repository.GitSizeMax = 100
setting.Repository.LFSSizeMax = -1
doCommitAndPushWithExpectedError(t, 1024, dstPath, "over-")
})
t.Run("UnderAfterResize", func(t *testing.T) {
defer tests.PrintCurrentTest(t)()
repoName := "repo-git-resize"
ctx := NewAPITestContext(t, username, repoName, auth_model.AccessTokenScopeWriteRepository, auth_model.AccessTokenScopeWriteUser)
// Cleanup: reset limits and delete repository
defer func() {
setting.Repository.GitSizeMax = -1
setting.Repository.LFSSizeMax = -1
}()
defer doAPIDeleteRepository(ctx)
doAPICreateRepository(ctx, false)(t)
dstPath := t.TempDir()
u.Path = ctx.GitPath()
doGitClone(dstPath, u)(t)
// Attempt push with restrictive limit - should fail
setting.Repository.GitSizeMax = 100
setting.Repository.LFSSizeMax = -1
doCommitAndPushWithExpectedError(t, 1024, dstPath, "resize-")
// Increase limit and retry same push - should succeed
setting.Repository.GitSizeMax = 30 * 1024 // 30 KiB
_, _, err := gitcmd.NewCommand("push", "origin", "master").WithDir(dstPath).RunStdString(t.Context())
assert.NoError(t, err, "Push should succeed after limit increase")
})
t.Run("DeletionAndSoftEnforcement", func(t *testing.T) {
defer tests.PrintCurrentTest(t)()
repoName := "repo-git-soft"
ctx := NewAPITestContext(t, username, repoName, auth_model.AccessTokenScopeWriteRepository, auth_model.AccessTokenScopeWriteUser)
// Cleanup: reset limits and delete repository
defer func() {
setting.Repository.GitSizeMax = -1
setting.Repository.LFSSizeMax = -1
}()
defer doAPIDeleteRepository(ctx)
doAPICreateRepository(ctx, false)(t)
dstPath := t.TempDir()
u.Path = ctx.GitPath()
doGitClone(dstPath, u)(t)
// Step 1: Push 1KB file with no limit
setting.Repository.GitSizeMax = -1
setting.Repository.LFSSizeMax = -1
doCommitAndPush(t, 1024, dstPath, "soft-base-")
// Step 2: Push 10KB file
doCommitAndPush(t, 10*1024, dstPath, "soft-big-")
// Step 3: Delete big file using reset
_, _, err := gitcmd.NewCommand("reset", "--hard", "HEAD~1").WithDir(dstPath).RunStdString(t.Context())
assert.NoError(t, err, "Reset should succeed")
// Step 4: Set very restrictive limit
setting.Repository.GitSizeMax = 10 // 10 bytes
// Step 5: Force push - should succeed (soft enforcement)
_, _, err = gitcmd.NewCommand("push", "--force-with-lease", "origin", "master").WithDir(dstPath).RunStdString(t.Context())
assert.NoError(t, err, "Force push should succeed with soft enforcement")
// Step 6: Try to push another 1KB file - should fail
doCommitAndPushWithExpectedError(t, 1024, dstPath, "soft-new-")
})
}
func testLFSSizeLimitInternal(t *testing.T, u *url.URL) {
if !setting.LFS.StartServer {
t.Skip("LFS server disabled")
}
username := "user2"
u.User = url.UserPassword(username, userPassword)
// Helper to track LFS
setupLFS := func(t *testing.T, dstPath string) {
// Initialize git-lfs in the repository
err := gitcmd.NewCommand("lfs", "install", "--local").WithDir(dstPath).Run(t.Context())
assert.NoError(t, err)
err = os.WriteFile(path.Join(dstPath, ".gitattributes"), []byte("*.dat filter=lfs diff=lfs merge=lfs -text\n"), 0o644)
assert.NoError(t, err)
err = gitcmd.NewCommand("add", ".gitattributes").WithDir(dstPath).Run(t.Context())
assert.NoError(t, err)
err = gitcmd.NewCommand("commit", "-m", "Track LFS").WithDir(dstPath).Run(t.Context())
assert.NoError(t, err)
err = gitcmd.NewCommand("push", "origin", "master").WithDir(dstPath).Run(t.Context())
assert.NoError(t, err)
}
t.Run("PushUnderLimit", func(t *testing.T) {
defer tests.PrintCurrentTest(t)()
repoName := "repo-lfs-under"
ctx := NewAPITestContext(t, username, repoName, auth_model.AccessTokenScopeWriteRepository, auth_model.AccessTokenScopeWriteUser)
// Cleanup: reset limits and delete repository
defer func() {
setting.Repository.GitSizeMax = -1
setting.Repository.LFSSizeMax = -1
}()
defer doAPIDeleteRepository(ctx)
doAPICreateRepository(ctx, false)(t)
dstPath := t.TempDir()
u.Path = ctx.GitPath()
doGitClone(dstPath, u)(t)
setupLFS(t, dstPath)
// Push with limit enabled but not exceeded
setting.Repository.GitSizeMax = -1
setting.Repository.LFSSizeMax = 10000
doCommitAndPushWithData(t, dstPath, "data-under.dat", "some-content-under")
})
t.Run("PushOverLimit", func(t *testing.T) {
defer tests.PrintCurrentTest(t)()
repoName := "repo-lfs-over"
ctx := NewAPITestContext(t, username, repoName, auth_model.AccessTokenScopeWriteRepository, auth_model.AccessTokenScopeWriteUser)
// Cleanup: reset limits and delete repository
defer func() {
setting.Repository.GitSizeMax = -1
setting.Repository.LFSSizeMax = -1
}()
defer doAPIDeleteRepository(ctx)
doAPICreateRepository(ctx, false)(t)
dstPath := t.TempDir()
u.Path = ctx.GitPath()
doGitClone(dstPath, u)(t)
setupLFS(t, dstPath)
// Push with restrictive limit - should fail
setting.Repository.GitSizeMax = -1
setting.Repository.LFSSizeMax = 5
doCommitAndPushWithDataWithExpectedError(t, dstPath, "data-over.dat", "some-content-over-limit")
})
t.Run("SoftEnforcement", func(t *testing.T) {
defer tests.PrintCurrentTest(t)()
repoName := "repo-lfs-soft-enforce"
ctx := NewAPITestContext(t, username, repoName, auth_model.AccessTokenScopeWriteRepository, auth_model.AccessTokenScopeWriteUser)
// Cleanup: reset limits and delete repository
defer func() {
setting.Repository.GitSizeMax = -1
setting.Repository.LFSSizeMax = -1
}()
defer doAPIDeleteRepository(ctx)
doAPICreateRepository(ctx, false)(t)
dstPath := t.TempDir()
u.Path = ctx.GitPath()
doGitClone(dstPath, u)(t)
setupLFS(t, dstPath)
// Step 1 & 2: Init LFS and push 1024B file with random content (Commit 1)
setting.Repository.GitSizeMax = -1
setting.Repository.LFSSizeMax = -1
doCommitAndPushLFSWithRandomData(t, dstPath, "data-soft-1.dat", 1024)
// Step 3: Push 10240B LFS file with random content (Commit 2)
doCommitAndPushLFSWithRandomData(t, dstPath, "data-soft-2.dat", 10240)
// Step 4: Set limit to 10 KiB (1 KiB below current ~11 KiB)
setting.Repository.LFSSizeMax = 10 * 1024
// Step 5: Try to push 1KB LFS file - should fail (Commit 3, local only)
err := os.WriteFile(path.Join(dstPath, "data-soft-3.dat"), generateRandomData(1024), 0o644)
assert.NoError(t, err)
err = gitcmd.NewCommand("add", "data-soft-3.dat").WithDir(dstPath).Run(t.Context())
assert.NoError(t, err)
err = gitcmd.NewCommand("commit", "-m", "Add data-soft-3.dat").WithDir(dstPath).Run(t.Context())
assert.NoError(t, err)
err = gitcmd.NewCommand("push", "origin", "master").WithDir(dstPath).Run(t.Context())
assert.Error(t, err, "Push should fail when exceeding LFS limit")
// Step 6: Reset to Commit 1 (removes Commits 2 & 3)
_, _, err = gitcmd.NewCommand("reset", "--hard", "HEAD~2").WithDir(dstPath).RunStdString(t.Context())
assert.NoError(t, err, "Reset should succeed")
_, _, err = gitcmd.NewCommand("push", "--force-with-lease", "origin", "master").WithDir(dstPath).RunStdString(t.Context())
assert.NoError(t, err, "Force push should succeed with soft enforcement")
// Step 7: Try to push 1024B LFS file - should still fail
doCommitAndPushLFSWithRandomDataWithExpectedError(t, dstPath, "data-soft-new.dat", 1024)
})
}
// Helper functions
func doCommitAndPushWithData(t *testing.T, repoPath, filename, content string) {
err := os.WriteFile(path.Join(repoPath, filename), []byte(content), 0o644)
assert.NoError(t, err)
err = gitcmd.NewCommand("add").AddDynamicArguments(filename).WithDir(repoPath).Run(t.Context())
assert.NoError(t, err)
err = gitcmd.NewCommand("commit", "-m").AddDynamicArguments("Add " + filename).WithDir(repoPath).Run(t.Context())
assert.NoError(t, err)
err = gitcmd.NewCommand("push", "origin", "master").WithDir(repoPath).Run(t.Context())
assert.NoError(t, err)
}
func doCommitAndPushWithDataWithExpectedError(t *testing.T, repoPath, filename, content string) {
err := os.WriteFile(path.Join(repoPath, filename), []byte(content), 0o644)
assert.NoError(t, err)
err = gitcmd.NewCommand("add").AddDynamicArguments(filename).WithDir(repoPath).Run(t.Context())
assert.NoError(t, err)
err = gitcmd.NewCommand("commit", "-m").AddDynamicArguments("Add " + filename).WithDir(repoPath).Run(t.Context())
assert.NoError(t, err)
err = gitcmd.NewCommand("push", "origin", "master").WithDir(repoPath).Run(t.Context())
assert.Error(t, err)
}
func generateRandomData(size int) []byte {
data := make([]byte, size)
_, _ = io.ReadFull(rand.Reader, data)
return data
}
func doCommitAndPushLFSWithRandomData(t *testing.T, repoPath, filename string, size int) {
err := os.WriteFile(path.Join(repoPath, filename), generateRandomData(size), 0o644)
assert.NoError(t, err)
err = gitcmd.NewCommand("add").AddDynamicArguments(filename).WithDir(repoPath).Run(t.Context())
assert.NoError(t, err)
// Verify file is tracked by LFS
stdout, _, err := gitcmd.NewCommand("lfs", "ls-files").WithDir(repoPath).RunStdString(t.Context())
assert.NoError(t, err, "git lfs ls-files should succeed")
assert.Contains(t, stdout, filename, "File %s should be tracked by LFS", filename)
err = gitcmd.NewCommand("commit", "-m").AddDynamicArguments("Add " + filename).WithDir(repoPath).Run(t.Context())
assert.NoError(t, err)
err = gitcmd.NewCommand("push", "origin", "master").WithDir(repoPath).Run(t.Context())
assert.NoError(t, err)
}
func doCommitAndPushLFSWithRandomDataWithExpectedError(t *testing.T, repoPath, filename string, size int) {
err := os.WriteFile(path.Join(repoPath, filename), generateRandomData(size), 0o644)
assert.NoError(t, err)
err = gitcmd.NewCommand("add").AddDynamicArguments(filename).WithDir(repoPath).Run(t.Context())
assert.NoError(t, err)
// Verify file is tracked by LFS
stdout, _, err := gitcmd.NewCommand("lfs", "ls-files").WithDir(repoPath).RunStdString(t.Context())
assert.NoError(t, err, "git lfs ls-files should succeed")
assert.Contains(t, stdout, filename, "File %s should be tracked by LFS", filename)
err = gitcmd.NewCommand("commit", "-m").AddDynamicArguments("Add " + filename).WithDir(repoPath).Run(t.Context())
assert.NoError(t, err)
err = gitcmd.NewCommand("push", "origin", "master").WithDir(repoPath).Run(t.Context())
assert.Error(t, err)
}
// Reuse global helpers for Git: doCommitAndPush
func doCommitAndPushWithExpectedError(t *testing.T, size int, repoPath, prefix string) string {
name, err := generateCommitWithNewData(t.Context(), size, repoPath, "user2@example.com", "User Two", prefix)
assert.NoError(t, err)
_, _, err = gitcmd.NewCommand("push", "origin", "master").WithDir(repoPath).RunStdString(t.Context()) // Push
assert.Error(t, err)
return name
}