0
0
mirror of https://github.com/go-gitea/gitea.git synced 2024-12-22 01:25:10 +01:00
gitea/modules/repository/repo.go

390 lines
12 KiB
Go
Raw Normal View History

// Copyright 2019 The Gitea Authors. All rights reserved.
// SPDX-License-Identifier: MIT
package repository
import (
"context"
"errors"
"fmt"
"io"
"strings"
"time"
"code.gitea.io/gitea/models/db"
git_model "code.gitea.io/gitea/models/git"
repo_model "code.gitea.io/gitea/models/repo"
user_model "code.gitea.io/gitea/models/user"
"code.gitea.io/gitea/modules/container"
"code.gitea.io/gitea/modules/git"
"code.gitea.io/gitea/modules/gitrepo"
Add LFS Migration and Mirror (#14726) * Implemented LFS client. * Implemented scanning for pointer files. * Implemented downloading of lfs files. * Moved model-dependent code into services. * Removed models dependency. Added TryReadPointerFromBuffer. * Migrated code from service to module. * Centralised storage creation. * Removed dependency from models. * Moved ContentStore into modules. * Share structs between server and client. * Moved method to services. * Implemented lfs download on clone. * Implemented LFS sync on clone and mirror update. * Added form fields. * Updated templates. * Fixed condition. * Use alternate endpoint. * Added missing methods. * Fixed typo and make linter happy. * Detached pointer parser from gogit dependency. * Fixed TestGetLFSRange test. * Added context to support cancellation. * Use ReadFull to probably read more data. * Removed duplicated code from models. * Moved scan implementation into pointer_scanner_nogogit. * Changed method name. * Added comments. * Added more/specific log/error messages. * Embedded lfs.Pointer into models.LFSMetaObject. * Moved code from models to module. * Moved code from models to module. * Moved code from models to module. * Reduced pointer usage. * Embedded type. * Use promoted fields. * Fixed unexpected eof. * Added unit tests. * Implemented migration of local file paths. * Show an error on invalid LFS endpoints. * Hide settings if not used. * Added LFS info to mirror struct. * Fixed comment. * Check LFS endpoint. * Manage LFS settings from mirror page. * Fixed selector. * Adjusted selector. * Added more tests. * Added local filesystem migration test. * Fixed typo. * Reset settings. * Added special windows path handling. * Added unit test for HTTPClient. * Added unit test for BasicTransferAdapter. * Moved into util package. * Test if LFS endpoint is allowed. * Added support for git:// * Just use a static placeholder as the displayed url may be invalid. * Reverted to original code. * Added "Advanced Settings". * Updated wording. * Added discovery info link. * Implemented suggestion. * Fixed missing format parameter. * Added Pointer.IsValid(). * Always remove model on error. * Added suggestions. * Use channel instead of array. * Update routers/repo/migrate.go * fmt Signed-off-by: Andrew Thornton <art27@cantab.net> Co-authored-by: zeripath <art27@cantab.net>
2021-04-09 00:25:57 +02:00
"code.gitea.io/gitea/modules/lfs"
"code.gitea.io/gitea/modules/log"
"code.gitea.io/gitea/modules/setting"
"code.gitea.io/gitea/modules/timeutil"
)
/*
GitHub, GitLab, Gogs: *.wiki.git
BitBucket: *.git/wiki
*/
var commonWikiURLSuffixes = []string{".wiki.git", ".git/wiki"}
// WikiRemoteURL returns accessible repository URL for wiki if exists.
// Otherwise, it returns an empty string.
func WikiRemoteURL(ctx context.Context, remote string) string {
remote = strings.TrimSuffix(remote, ".git")
for _, suffix := range commonWikiURLSuffixes {
wikiURL := remote + suffix
if git.IsRepoURLAccessible(ctx, wikiURL) {
return wikiURL
}
}
return ""
}
// SyncRepoTags synchronizes releases table with repository tags
func SyncRepoTags(ctx context.Context, repoID int64) error {
repo, err := repo_model.GetRepositoryByID(ctx, repoID)
if err != nil {
return err
}
gitRepo, err := gitrepo.OpenRepository(ctx, repo)
if err != nil {
return err
}
defer gitRepo.Close()
return SyncReleasesWithTags(ctx, repo, gitRepo)
}
// SyncReleasesWithTags synchronizes release table with repository tags
func SyncReleasesWithTags(ctx context.Context, repo *repo_model.Repository, gitRepo *git.Repository) error {
Improve sync performance for pull-mirrors (#19125) This addresses https://github.com/go-gitea/gitea/issues/18352 It aims to improve performance (and resource use) of the `SyncReleasesWithTags` operation for pull-mirrors. For large repositories with many tags, `SyncReleasesWithTags` can be a costly operation (taking several minutes to complete). The reason is two-fold: 1. on sync, every upstream repo tag is compared (for changes) against existing local entries in the release table to ensure that they are up-to-date. 2. the procedure for getting _each tag_ involves a series of git operations ```bash git show-ref --tags -- v8.2.4477 git cat-file -t 29ab6ce9f36660cffaad3c8789e71162e5db5d2f git cat-file -p 29ab6ce9f36660cffaad3c8789e71162e5db5d2f git rev-list --count 29ab6ce9f36660cffaad3c8789e71162e5db5d2f ``` of which the `git rev-list --count` can be particularly heavy. This PR optimizes performance for pull-mirrors. We utilize the fact that a pull-mirror is always identical to its upstream and rebuild the entire release table on every sync and use a batch `git for-each-ref .. refs/tags` call to retrieve all tags in one go. For large mirror repos, with hundreds of annotated tags, this brings down the duration of the sync operation from several minutes to a few seconds. A few unscientific examples run on my local machine: - https://github.com/spring-projects/spring-boot (223 tags) - before: `0m28,673s` - after: `0m2,244s` - https://github.com/kubernetes/kubernetes (890 tags) - before: `8m00s` - after: `0m8,520s` - https://github.com/vim/vim (13954 tags) - before: `14m20,383s` - after: `0m35,467s` I added a `foreachref` package which contains a flexible way of specifying which reference fields are of interest (`git-for-each-ref(1)`) and to produce a parser for the expected output. These could be reused in other places where `for-each-ref` is used. I'll add unit tests for those if the overall PR looks promising.
2022-03-31 14:30:40 +02:00
log.Debug("SyncReleasesWithTags: in Repo[%d:%s/%s]", repo.ID, repo.OwnerName, repo.Name)
// optimized procedure for pull-mirrors which saves a lot of time (in
// particular for repos with many tags).
if repo.IsMirror {
return pullMirrorReleaseSync(ctx, repo, gitRepo)
Improve sync performance for pull-mirrors (#19125) This addresses https://github.com/go-gitea/gitea/issues/18352 It aims to improve performance (and resource use) of the `SyncReleasesWithTags` operation for pull-mirrors. For large repositories with many tags, `SyncReleasesWithTags` can be a costly operation (taking several minutes to complete). The reason is two-fold: 1. on sync, every upstream repo tag is compared (for changes) against existing local entries in the release table to ensure that they are up-to-date. 2. the procedure for getting _each tag_ involves a series of git operations ```bash git show-ref --tags -- v8.2.4477 git cat-file -t 29ab6ce9f36660cffaad3c8789e71162e5db5d2f git cat-file -p 29ab6ce9f36660cffaad3c8789e71162e5db5d2f git rev-list --count 29ab6ce9f36660cffaad3c8789e71162e5db5d2f ``` of which the `git rev-list --count` can be particularly heavy. This PR optimizes performance for pull-mirrors. We utilize the fact that a pull-mirror is always identical to its upstream and rebuild the entire release table on every sync and use a batch `git for-each-ref .. refs/tags` call to retrieve all tags in one go. For large mirror repos, with hundreds of annotated tags, this brings down the duration of the sync operation from several minutes to a few seconds. A few unscientific examples run on my local machine: - https://github.com/spring-projects/spring-boot (223 tags) - before: `0m28,673s` - after: `0m2,244s` - https://github.com/kubernetes/kubernetes (890 tags) - before: `8m00s` - after: `0m8,520s` - https://github.com/vim/vim (13954 tags) - before: `14m20,383s` - after: `0m35,467s` I added a `foreachref` package which contains a flexible way of specifying which reference fields are of interest (`git-for-each-ref(1)`) and to produce a parser for the expected output. These could be reused in other places where `for-each-ref` is used. I'll add unit tests for those if the overall PR looks promising.
2022-03-31 14:30:40 +02:00
}
existingRelTags := make(container.Set[string])
opts := repo_model.FindReleasesOptions{
IncludeDrafts: true,
IncludeTags: true,
ListOptions: db.ListOptions{PageSize: 50},
RepoID: repo.ID,
}
for page := 1; ; page++ {
API add/generalize pagination (#9452) * paginate results * fixed deadlock * prevented breaking change * updated swagger * go fmt * fixed find topic * go mod tidy * go mod vendor with go1.13.5 * fixed repo find topics * fixed unit test * added Limit method to Engine struct; use engine variable when provided; fixed gitignore * use ItemsPerPage for default pagesize; fix GetWatchers, getOrgUsersByOrgID and GetStargazers; fix GetAllCommits headers; reverted some changed behaviors * set Page value on Home route * improved memory allocations * fixed response headers * removed logfiles * fixed import order * import order * improved swagger * added function to get models.ListOptions from context * removed pagesize diff on unit test * fixed imports * removed unnecessary struct field * fixed go fmt * scoped PR * code improvements * code improvements * go mod tidy * fixed import order * fixed commit statuses session * fixed files headers * fixed headers; added pagination for notifications * go mod tidy * go fmt * removed Private from user search options; added setting.UI.IssuePagingNum as default valeu on repo's issues list * Apply suggestions from code review Co-Authored-By: 6543 <6543@obermui.de> Co-Authored-By: zeripath <art27@cantab.net> * fixed build error * CI.restart() * fixed merge conflicts resolve * fixed conflicts resolve * improved FindTrackedTimesOptions.ToOptions() method * added backwards compatibility on ListReleases request; fixed issue tracked time ToSession * fixed build error; fixed swagger template * fixed swagger template * fixed ListReleases backwards compatibility * added page to user search route Co-authored-by: techknowlogick <matti@mdranta.net> Co-authored-by: 6543 <6543@obermui.de> Co-authored-by: zeripath <art27@cantab.net>
2020-01-24 20:00:29 +01:00
opts.Page = page
rels, err := db.Find[repo_model.Release](gitRepo.Ctx, opts)
if err != nil {
return fmt.Errorf("unable to GetReleasesByRepoID in Repo[%d:%s/%s]: %w", repo.ID, repo.OwnerName, repo.Name, err)
}
if len(rels) == 0 {
break
}
for _, rel := range rels {
if rel.IsDraft {
continue
}
commitID, err := gitRepo.GetTagCommitID(rel.TagName)
if err != nil && !git.IsErrNotExist(err) {
return fmt.Errorf("unable to GetTagCommitID for %q in Repo[%d:%s/%s]: %w", rel.TagName, repo.ID, repo.OwnerName, repo.Name, err)
}
if git.IsErrNotExist(err) || commitID != rel.Sha1 {
if err := repo_model.PushUpdateDeleteTag(ctx, repo, rel.TagName); err != nil {
return fmt.Errorf("unable to PushUpdateDeleteTag: %q in Repo[%d:%s/%s]: %w", rel.TagName, repo.ID, repo.OwnerName, repo.Name, err)
}
} else {
existingRelTags.Add(strings.ToLower(rel.TagName))
}
}
}
_, err := gitRepo.WalkReferences(git.ObjectTag, 0, 0, func(sha1, refname string) error {
tagName := strings.TrimPrefix(refname, git.TagPrefix)
if existingRelTags.Contains(strings.ToLower(tagName)) {
return nil
}
if err := PushUpdateAddTag(ctx, repo, gitRepo, tagName, sha1, refname); err != nil {
// sometimes, some tags will be sync failed. i.e. https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git/tag/?h=v2.6.11
// this is a tree object, not a tag object which created before git
log.Error("unable to PushUpdateAddTag: %q to Repo[%d:%s/%s]: %v", tagName, repo.ID, repo.OwnerName, repo.Name, err)
}
return nil
})
return err
}
// PushUpdateAddTag must be called for any push actions to add tag
Add context cache as a request level cache (#22294) To avoid duplicated load of the same data in an HTTP request, we can set a context cache to do that. i.e. Some pages may load a user from a database with the same id in different areas on the same page. But the code is hidden in two different deep logic. How should we share the user? As a result of this PR, now if both entry functions accept `context.Context` as the first parameter and we just need to refactor `GetUserByID` to reuse the user from the context cache. Then it will not be loaded twice on an HTTP request. But of course, sometimes we would like to reload an object from the database, that's why `RemoveContextData` is also exposed. The core context cache is here. It defines a new context ```go type cacheContext struct { ctx context.Context data map[any]map[any]any lock sync.RWMutex } var cacheContextKey = struct{}{} func WithCacheContext(ctx context.Context) context.Context { return context.WithValue(ctx, cacheContextKey, &cacheContext{ ctx: ctx, data: make(map[any]map[any]any), }) } ``` Then you can use the below 4 methods to read/write/del the data within the same context. ```go func GetContextData(ctx context.Context, tp, key any) any func SetContextData(ctx context.Context, tp, key, value any) func RemoveContextData(ctx context.Context, tp, key any) func GetWithContextCache[T any](ctx context.Context, cacheGroupKey string, cacheTargetID any, f func() (T, error)) (T, error) ``` Then let's take a look at how `system.GetString` implement it. ```go func GetSetting(ctx context.Context, key string) (string, error) { return cache.GetWithContextCache(ctx, contextCacheKey, key, func() (string, error) { return cache.GetString(genSettingCacheKey(key), func() (string, error) { res, err := GetSettingNoCache(ctx, key) if err != nil { return "", err } return res.SettingValue, nil }) }) } ``` First, it will check if context data include the setting object with the key. If not, it will query from the global cache which may be memory or a Redis cache. If not, it will get the object from the database. In the end, if the object gets from the global cache or database, it will be set into the context cache. An object stored in the context cache will only be destroyed after the context disappeared.
2023-02-15 14:37:34 +01:00
func PushUpdateAddTag(ctx context.Context, repo *repo_model.Repository, gitRepo *git.Repository, tagName, sha1, refname string) error {
tag, err := gitRepo.GetTagWithID(sha1, tagName)
if err != nil {
return fmt.Errorf("unable to GetTag: %w", err)
}
commit, err := tag.Commit(gitRepo)
if err != nil {
return fmt.Errorf("unable to get tag Commit: %w", err)
}
sig := tag.Tagger
if sig == nil {
sig = commit.Author
}
if sig == nil {
sig = commit.Committer
}
var author *user_model.User
createdAt := time.Unix(1, 0)
if sig != nil {
Add context cache as a request level cache (#22294) To avoid duplicated load of the same data in an HTTP request, we can set a context cache to do that. i.e. Some pages may load a user from a database with the same id in different areas on the same page. But the code is hidden in two different deep logic. How should we share the user? As a result of this PR, now if both entry functions accept `context.Context` as the first parameter and we just need to refactor `GetUserByID` to reuse the user from the context cache. Then it will not be loaded twice on an HTTP request. But of course, sometimes we would like to reload an object from the database, that's why `RemoveContextData` is also exposed. The core context cache is here. It defines a new context ```go type cacheContext struct { ctx context.Context data map[any]map[any]any lock sync.RWMutex } var cacheContextKey = struct{}{} func WithCacheContext(ctx context.Context) context.Context { return context.WithValue(ctx, cacheContextKey, &cacheContext{ ctx: ctx, data: make(map[any]map[any]any), }) } ``` Then you can use the below 4 methods to read/write/del the data within the same context. ```go func GetContextData(ctx context.Context, tp, key any) any func SetContextData(ctx context.Context, tp, key, value any) func RemoveContextData(ctx context.Context, tp, key any) func GetWithContextCache[T any](ctx context.Context, cacheGroupKey string, cacheTargetID any, f func() (T, error)) (T, error) ``` Then let's take a look at how `system.GetString` implement it. ```go func GetSetting(ctx context.Context, key string) (string, error) { return cache.GetWithContextCache(ctx, contextCacheKey, key, func() (string, error) { return cache.GetString(genSettingCacheKey(key), func() (string, error) { res, err := GetSettingNoCache(ctx, key) if err != nil { return "", err } return res.SettingValue, nil }) }) } ``` First, it will check if context data include the setting object with the key. If not, it will query from the global cache which may be memory or a Redis cache. If not, it will get the object from the database. In the end, if the object gets from the global cache or database, it will be set into the context cache. An object stored in the context cache will only be destroyed after the context disappeared.
2023-02-15 14:37:34 +01:00
author, err = user_model.GetUserByEmail(ctx, sig.Email)
if err != nil && !user_model.IsErrUserNotExist(err) {
return fmt.Errorf("unable to GetUserByEmail for %q: %w", sig.Email, err)
}
createdAt = sig.When
}
commitsCount, err := commit.CommitsCount()
if err != nil {
return fmt.Errorf("unable to get CommitsCount: %w", err)
}
rel := repo_model.Release{
RepoID: repo.ID,
TagName: tagName,
LowerTagName: strings.ToLower(tagName),
Sha1: commit.ID.String(),
NumCommits: commitsCount,
CreatedUnix: timeutil.TimeStamp(createdAt.Unix()),
IsTag: true,
}
if author != nil {
rel.PublisherID = author.ID
}
return repo_model.SaveOrUpdateTag(ctx, repo, &rel)
}
Add LFS Migration and Mirror (#14726) * Implemented LFS client. * Implemented scanning for pointer files. * Implemented downloading of lfs files. * Moved model-dependent code into services. * Removed models dependency. Added TryReadPointerFromBuffer. * Migrated code from service to module. * Centralised storage creation. * Removed dependency from models. * Moved ContentStore into modules. * Share structs between server and client. * Moved method to services. * Implemented lfs download on clone. * Implemented LFS sync on clone and mirror update. * Added form fields. * Updated templates. * Fixed condition. * Use alternate endpoint. * Added missing methods. * Fixed typo and make linter happy. * Detached pointer parser from gogit dependency. * Fixed TestGetLFSRange test. * Added context to support cancellation. * Use ReadFull to probably read more data. * Removed duplicated code from models. * Moved scan implementation into pointer_scanner_nogogit. * Changed method name. * Added comments. * Added more/specific log/error messages. * Embedded lfs.Pointer into models.LFSMetaObject. * Moved code from models to module. * Moved code from models to module. * Moved code from models to module. * Reduced pointer usage. * Embedded type. * Use promoted fields. * Fixed unexpected eof. * Added unit tests. * Implemented migration of local file paths. * Show an error on invalid LFS endpoints. * Hide settings if not used. * Added LFS info to mirror struct. * Fixed comment. * Check LFS endpoint. * Manage LFS settings from mirror page. * Fixed selector. * Adjusted selector. * Added more tests. * Added local filesystem migration test. * Fixed typo. * Reset settings. * Added special windows path handling. * Added unit test for HTTPClient. * Added unit test for BasicTransferAdapter. * Moved into util package. * Test if LFS endpoint is allowed. * Added support for git:// * Just use a static placeholder as the displayed url may be invalid. * Reverted to original code. * Added "Advanced Settings". * Updated wording. * Added discovery info link. * Implemented suggestion. * Fixed missing format parameter. * Added Pointer.IsValid(). * Always remove model on error. * Added suggestions. * Use channel instead of array. * Update routers/repo/migrate.go * fmt Signed-off-by: Andrew Thornton <art27@cantab.net> Co-authored-by: zeripath <art27@cantab.net>
2021-04-09 00:25:57 +02:00
// StoreMissingLfsObjectsInRepository downloads missing LFS objects
func StoreMissingLfsObjectsInRepository(ctx context.Context, repo *repo_model.Repository, gitRepo *git.Repository, lfsClient lfs.Client) error {
Add LFS Migration and Mirror (#14726) * Implemented LFS client. * Implemented scanning for pointer files. * Implemented downloading of lfs files. * Moved model-dependent code into services. * Removed models dependency. Added TryReadPointerFromBuffer. * Migrated code from service to module. * Centralised storage creation. * Removed dependency from models. * Moved ContentStore into modules. * Share structs between server and client. * Moved method to services. * Implemented lfs download on clone. * Implemented LFS sync on clone and mirror update. * Added form fields. * Updated templates. * Fixed condition. * Use alternate endpoint. * Added missing methods. * Fixed typo and make linter happy. * Detached pointer parser from gogit dependency. * Fixed TestGetLFSRange test. * Added context to support cancellation. * Use ReadFull to probably read more data. * Removed duplicated code from models. * Moved scan implementation into pointer_scanner_nogogit. * Changed method name. * Added comments. * Added more/specific log/error messages. * Embedded lfs.Pointer into models.LFSMetaObject. * Moved code from models to module. * Moved code from models to module. * Moved code from models to module. * Reduced pointer usage. * Embedded type. * Use promoted fields. * Fixed unexpected eof. * Added unit tests. * Implemented migration of local file paths. * Show an error on invalid LFS endpoints. * Hide settings if not used. * Added LFS info to mirror struct. * Fixed comment. * Check LFS endpoint. * Manage LFS settings from mirror page. * Fixed selector. * Adjusted selector. * Added more tests. * Added local filesystem migration test. * Fixed typo. * Reset settings. * Added special windows path handling. * Added unit test for HTTPClient. * Added unit test for BasicTransferAdapter. * Moved into util package. * Test if LFS endpoint is allowed. * Added support for git:// * Just use a static placeholder as the displayed url may be invalid. * Reverted to original code. * Added "Advanced Settings". * Updated wording. * Added discovery info link. * Implemented suggestion. * Fixed missing format parameter. * Added Pointer.IsValid(). * Always remove model on error. * Added suggestions. * Use channel instead of array. * Update routers/repo/migrate.go * fmt Signed-off-by: Andrew Thornton <art27@cantab.net> Co-authored-by: zeripath <art27@cantab.net>
2021-04-09 00:25:57 +02:00
contentStore := lfs.NewContentStore()
pointerChan := make(chan lfs.PointerBlob)
errChan := make(chan error, 1)
go lfs.SearchPointerBlobs(ctx, gitRepo, pointerChan, errChan)
downloadObjects := func(pointers []lfs.Pointer) error {
err := lfsClient.Download(ctx, pointers, func(p lfs.Pointer, content io.ReadCloser, objectError error) error {
if errors.Is(objectError, lfs.ErrObjectNotExist) {
log.Warn("Ignoring missing upstream LFS object %-v: %v", p, objectError)
return nil
}
if objectError != nil {
return objectError
Add LFS Migration and Mirror (#14726) * Implemented LFS client. * Implemented scanning for pointer files. * Implemented downloading of lfs files. * Moved model-dependent code into services. * Removed models dependency. Added TryReadPointerFromBuffer. * Migrated code from service to module. * Centralised storage creation. * Removed dependency from models. * Moved ContentStore into modules. * Share structs between server and client. * Moved method to services. * Implemented lfs download on clone. * Implemented LFS sync on clone and mirror update. * Added form fields. * Updated templates. * Fixed condition. * Use alternate endpoint. * Added missing methods. * Fixed typo and make linter happy. * Detached pointer parser from gogit dependency. * Fixed TestGetLFSRange test. * Added context to support cancellation. * Use ReadFull to probably read more data. * Removed duplicated code from models. * Moved scan implementation into pointer_scanner_nogogit. * Changed method name. * Added comments. * Added more/specific log/error messages. * Embedded lfs.Pointer into models.LFSMetaObject. * Moved code from models to module. * Moved code from models to module. * Moved code from models to module. * Reduced pointer usage. * Embedded type. * Use promoted fields. * Fixed unexpected eof. * Added unit tests. * Implemented migration of local file paths. * Show an error on invalid LFS endpoints. * Hide settings if not used. * Added LFS info to mirror struct. * Fixed comment. * Check LFS endpoint. * Manage LFS settings from mirror page. * Fixed selector. * Adjusted selector. * Added more tests. * Added local filesystem migration test. * Fixed typo. * Reset settings. * Added special windows path handling. * Added unit test for HTTPClient. * Added unit test for BasicTransferAdapter. * Moved into util package. * Test if LFS endpoint is allowed. * Added support for git:// * Just use a static placeholder as the displayed url may be invalid. * Reverted to original code. * Added "Advanced Settings". * Updated wording. * Added discovery info link. * Implemented suggestion. * Fixed missing format parameter. * Added Pointer.IsValid(). * Always remove model on error. * Added suggestions. * Use channel instead of array. * Update routers/repo/migrate.go * fmt Signed-off-by: Andrew Thornton <art27@cantab.net> Co-authored-by: zeripath <art27@cantab.net>
2021-04-09 00:25:57 +02:00
}
defer content.Close()
Add LFS Migration and Mirror (#14726) * Implemented LFS client. * Implemented scanning for pointer files. * Implemented downloading of lfs files. * Moved model-dependent code into services. * Removed models dependency. Added TryReadPointerFromBuffer. * Migrated code from service to module. * Centralised storage creation. * Removed dependency from models. * Moved ContentStore into modules. * Share structs between server and client. * Moved method to services. * Implemented lfs download on clone. * Implemented LFS sync on clone and mirror update. * Added form fields. * Updated templates. * Fixed condition. * Use alternate endpoint. * Added missing methods. * Fixed typo and make linter happy. * Detached pointer parser from gogit dependency. * Fixed TestGetLFSRange test. * Added context to support cancellation. * Use ReadFull to probably read more data. * Removed duplicated code from models. * Moved scan implementation into pointer_scanner_nogogit. * Changed method name. * Added comments. * Added more/specific log/error messages. * Embedded lfs.Pointer into models.LFSMetaObject. * Moved code from models to module. * Moved code from models to module. * Moved code from models to module. * Reduced pointer usage. * Embedded type. * Use promoted fields. * Fixed unexpected eof. * Added unit tests. * Implemented migration of local file paths. * Show an error on invalid LFS endpoints. * Hide settings if not used. * Added LFS info to mirror struct. * Fixed comment. * Check LFS endpoint. * Manage LFS settings from mirror page. * Fixed selector. * Adjusted selector. * Added more tests. * Added local filesystem migration test. * Fixed typo. * Reset settings. * Added special windows path handling. * Added unit test for HTTPClient. * Added unit test for BasicTransferAdapter. * Moved into util package. * Test if LFS endpoint is allowed. * Added support for git:// * Just use a static placeholder as the displayed url may be invalid. * Reverted to original code. * Added "Advanced Settings". * Updated wording. * Added discovery info link. * Implemented suggestion. * Fixed missing format parameter. * Added Pointer.IsValid(). * Always remove model on error. * Added suggestions. * Use channel instead of array. * Update routers/repo/migrate.go * fmt Signed-off-by: Andrew Thornton <art27@cantab.net> Co-authored-by: zeripath <art27@cantab.net>
2021-04-09 00:25:57 +02:00
_, err := git_model.NewLFSMetaObject(ctx, repo.ID, p)
Add LFS Migration and Mirror (#14726) * Implemented LFS client. * Implemented scanning for pointer files. * Implemented downloading of lfs files. * Moved model-dependent code into services. * Removed models dependency. Added TryReadPointerFromBuffer. * Migrated code from service to module. * Centralised storage creation. * Removed dependency from models. * Moved ContentStore into modules. * Share structs between server and client. * Moved method to services. * Implemented lfs download on clone. * Implemented LFS sync on clone and mirror update. * Added form fields. * Updated templates. * Fixed condition. * Use alternate endpoint. * Added missing methods. * Fixed typo and make linter happy. * Detached pointer parser from gogit dependency. * Fixed TestGetLFSRange test. * Added context to support cancellation. * Use ReadFull to probably read more data. * Removed duplicated code from models. * Moved scan implementation into pointer_scanner_nogogit. * Changed method name. * Added comments. * Added more/specific log/error messages. * Embedded lfs.Pointer into models.LFSMetaObject. * Moved code from models to module. * Moved code from models to module. * Moved code from models to module. * Reduced pointer usage. * Embedded type. * Use promoted fields. * Fixed unexpected eof. * Added unit tests. * Implemented migration of local file paths. * Show an error on invalid LFS endpoints. * Hide settings if not used. * Added LFS info to mirror struct. * Fixed comment. * Check LFS endpoint. * Manage LFS settings from mirror page. * Fixed selector. * Adjusted selector. * Added more tests. * Added local filesystem migration test. * Fixed typo. * Reset settings. * Added special windows path handling. * Added unit test for HTTPClient. * Added unit test for BasicTransferAdapter. * Moved into util package. * Test if LFS endpoint is allowed. * Added support for git:// * Just use a static placeholder as the displayed url may be invalid. * Reverted to original code. * Added "Advanced Settings". * Updated wording. * Added discovery info link. * Implemented suggestion. * Fixed missing format parameter. * Added Pointer.IsValid(). * Always remove model on error. * Added suggestions. * Use channel instead of array. * Update routers/repo/migrate.go * fmt Signed-off-by: Andrew Thornton <art27@cantab.net> Co-authored-by: zeripath <art27@cantab.net>
2021-04-09 00:25:57 +02:00
if err != nil {
log.Error("Repo[%-v]: Error creating LFS meta object %-v: %v", repo, p, err)
return err
}
Add LFS Migration and Mirror (#14726) * Implemented LFS client. * Implemented scanning for pointer files. * Implemented downloading of lfs files. * Moved model-dependent code into services. * Removed models dependency. Added TryReadPointerFromBuffer. * Migrated code from service to module. * Centralised storage creation. * Removed dependency from models. * Moved ContentStore into modules. * Share structs between server and client. * Moved method to services. * Implemented lfs download on clone. * Implemented LFS sync on clone and mirror update. * Added form fields. * Updated templates. * Fixed condition. * Use alternate endpoint. * Added missing methods. * Fixed typo and make linter happy. * Detached pointer parser from gogit dependency. * Fixed TestGetLFSRange test. * Added context to support cancellation. * Use ReadFull to probably read more data. * Removed duplicated code from models. * Moved scan implementation into pointer_scanner_nogogit. * Changed method name. * Added comments. * Added more/specific log/error messages. * Embedded lfs.Pointer into models.LFSMetaObject. * Moved code from models to module. * Moved code from models to module. * Moved code from models to module. * Reduced pointer usage. * Embedded type. * Use promoted fields. * Fixed unexpected eof. * Added unit tests. * Implemented migration of local file paths. * Show an error on invalid LFS endpoints. * Hide settings if not used. * Added LFS info to mirror struct. * Fixed comment. * Check LFS endpoint. * Manage LFS settings from mirror page. * Fixed selector. * Adjusted selector. * Added more tests. * Added local filesystem migration test. * Fixed typo. * Reset settings. * Added special windows path handling. * Added unit test for HTTPClient. * Added unit test for BasicTransferAdapter. * Moved into util package. * Test if LFS endpoint is allowed. * Added support for git:// * Just use a static placeholder as the displayed url may be invalid. * Reverted to original code. * Added "Advanced Settings". * Updated wording. * Added discovery info link. * Implemented suggestion. * Fixed missing format parameter. * Added Pointer.IsValid(). * Always remove model on error. * Added suggestions. * Use channel instead of array. * Update routers/repo/migrate.go * fmt Signed-off-by: Andrew Thornton <art27@cantab.net> Co-authored-by: zeripath <art27@cantab.net>
2021-04-09 00:25:57 +02:00
if err := contentStore.Put(p, content); err != nil {
log.Error("Repo[%-v]: Error storing content for LFS meta object %-v: %v", repo, p, err)
if _, err2 := git_model.RemoveLFSMetaObjectByOid(ctx, repo.ID, p.Oid); err2 != nil {
log.Error("Repo[%-v]: Error removing LFS meta object %-v: %v", repo, p, err2)
Add LFS Migration and Mirror (#14726) * Implemented LFS client. * Implemented scanning for pointer files. * Implemented downloading of lfs files. * Moved model-dependent code into services. * Removed models dependency. Added TryReadPointerFromBuffer. * Migrated code from service to module. * Centralised storage creation. * Removed dependency from models. * Moved ContentStore into modules. * Share structs between server and client. * Moved method to services. * Implemented lfs download on clone. * Implemented LFS sync on clone and mirror update. * Added form fields. * Updated templates. * Fixed condition. * Use alternate endpoint. * Added missing methods. * Fixed typo and make linter happy. * Detached pointer parser from gogit dependency. * Fixed TestGetLFSRange test. * Added context to support cancellation. * Use ReadFull to probably read more data. * Removed duplicated code from models. * Moved scan implementation into pointer_scanner_nogogit. * Changed method name. * Added comments. * Added more/specific log/error messages. * Embedded lfs.Pointer into models.LFSMetaObject. * Moved code from models to module. * Moved code from models to module. * Moved code from models to module. * Reduced pointer usage. * Embedded type. * Use promoted fields. * Fixed unexpected eof. * Added unit tests. * Implemented migration of local file paths. * Show an error on invalid LFS endpoints. * Hide settings if not used. * Added LFS info to mirror struct. * Fixed comment. * Check LFS endpoint. * Manage LFS settings from mirror page. * Fixed selector. * Adjusted selector. * Added more tests. * Added local filesystem migration test. * Fixed typo. * Reset settings. * Added special windows path handling. * Added unit test for HTTPClient. * Added unit test for BasicTransferAdapter. * Moved into util package. * Test if LFS endpoint is allowed. * Added support for git:// * Just use a static placeholder as the displayed url may be invalid. * Reverted to original code. * Added "Advanced Settings". * Updated wording. * Added discovery info link. * Implemented suggestion. * Fixed missing format parameter. * Added Pointer.IsValid(). * Always remove model on error. * Added suggestions. * Use channel instead of array. * Update routers/repo/migrate.go * fmt Signed-off-by: Andrew Thornton <art27@cantab.net> Co-authored-by: zeripath <art27@cantab.net>
2021-04-09 00:25:57 +02:00
}
return err
}
return nil
})
if err != nil {
select {
case <-ctx.Done():
return nil
default:
}
Add LFS Migration and Mirror (#14726) * Implemented LFS client. * Implemented scanning for pointer files. * Implemented downloading of lfs files. * Moved model-dependent code into services. * Removed models dependency. Added TryReadPointerFromBuffer. * Migrated code from service to module. * Centralised storage creation. * Removed dependency from models. * Moved ContentStore into modules. * Share structs between server and client. * Moved method to services. * Implemented lfs download on clone. * Implemented LFS sync on clone and mirror update. * Added form fields. * Updated templates. * Fixed condition. * Use alternate endpoint. * Added missing methods. * Fixed typo and make linter happy. * Detached pointer parser from gogit dependency. * Fixed TestGetLFSRange test. * Added context to support cancellation. * Use ReadFull to probably read more data. * Removed duplicated code from models. * Moved scan implementation into pointer_scanner_nogogit. * Changed method name. * Added comments. * Added more/specific log/error messages. * Embedded lfs.Pointer into models.LFSMetaObject. * Moved code from models to module. * Moved code from models to module. * Moved code from models to module. * Reduced pointer usage. * Embedded type. * Use promoted fields. * Fixed unexpected eof. * Added unit tests. * Implemented migration of local file paths. * Show an error on invalid LFS endpoints. * Hide settings if not used. * Added LFS info to mirror struct. * Fixed comment. * Check LFS endpoint. * Manage LFS settings from mirror page. * Fixed selector. * Adjusted selector. * Added more tests. * Added local filesystem migration test. * Fixed typo. * Reset settings. * Added special windows path handling. * Added unit test for HTTPClient. * Added unit test for BasicTransferAdapter. * Moved into util package. * Test if LFS endpoint is allowed. * Added support for git:// * Just use a static placeholder as the displayed url may be invalid. * Reverted to original code. * Added "Advanced Settings". * Updated wording. * Added discovery info link. * Implemented suggestion. * Fixed missing format parameter. * Added Pointer.IsValid(). * Always remove model on error. * Added suggestions. * Use channel instead of array. * Update routers/repo/migrate.go * fmt Signed-off-by: Andrew Thornton <art27@cantab.net> Co-authored-by: zeripath <art27@cantab.net>
2021-04-09 00:25:57 +02:00
}
return err
}
var batch []lfs.Pointer
for pointerBlob := range pointerChan {
meta, err := git_model.GetLFSMetaObjectByOid(ctx, repo.ID, pointerBlob.Oid)
if err != nil && err != git_model.ErrLFSObjectNotExist {
log.Error("Repo[%-v]: Error querying LFS meta object %-v: %v", repo, pointerBlob.Pointer, err)
return err
}
if meta != nil {
log.Trace("Repo[%-v]: Skipping unknown LFS meta object %-v", repo, pointerBlob.Pointer)
continue
}
log.Trace("Repo[%-v]: LFS object %-v not present in repository", repo, pointerBlob.Pointer)
exist, err := contentStore.Exists(pointerBlob.Pointer)
if err != nil {
log.Error("Repo[%-v]: Error checking if LFS object %-v exists: %v", repo, pointerBlob.Pointer, err)
return err
}
if exist {
log.Trace("Repo[%-v]: LFS object %-v already present; creating meta object", repo, pointerBlob.Pointer)
_, err := git_model.NewLFSMetaObject(ctx, repo.ID, pointerBlob.Pointer)
if err != nil {
log.Error("Repo[%-v]: Error creating LFS meta object %-v: %v", repo, pointerBlob.Pointer, err)
return err
}
} else {
if setting.LFS.MaxFileSize > 0 && pointerBlob.Size > setting.LFS.MaxFileSize {
log.Info("Repo[%-v]: LFS object %-v download denied because of LFS_MAX_FILE_SIZE=%d < size %d", repo, pointerBlob.Pointer, setting.LFS.MaxFileSize, pointerBlob.Size)
continue
}
batch = append(batch, pointerBlob.Pointer)
if len(batch) >= lfsClient.BatchSize() {
if err := downloadObjects(batch); err != nil {
return err
}
batch = nil
}
}
}
if len(batch) > 0 {
if err := downloadObjects(batch); err != nil {
return err
}
}
Add LFS Migration and Mirror (#14726) * Implemented LFS client. * Implemented scanning for pointer files. * Implemented downloading of lfs files. * Moved model-dependent code into services. * Removed models dependency. Added TryReadPointerFromBuffer. * Migrated code from service to module. * Centralised storage creation. * Removed dependency from models. * Moved ContentStore into modules. * Share structs between server and client. * Moved method to services. * Implemented lfs download on clone. * Implemented LFS sync on clone and mirror update. * Added form fields. * Updated templates. * Fixed condition. * Use alternate endpoint. * Added missing methods. * Fixed typo and make linter happy. * Detached pointer parser from gogit dependency. * Fixed TestGetLFSRange test. * Added context to support cancellation. * Use ReadFull to probably read more data. * Removed duplicated code from models. * Moved scan implementation into pointer_scanner_nogogit. * Changed method name. * Added comments. * Added more/specific log/error messages. * Embedded lfs.Pointer into models.LFSMetaObject. * Moved code from models to module. * Moved code from models to module. * Moved code from models to module. * Reduced pointer usage. * Embedded type. * Use promoted fields. * Fixed unexpected eof. * Added unit tests. * Implemented migration of local file paths. * Show an error on invalid LFS endpoints. * Hide settings if not used. * Added LFS info to mirror struct. * Fixed comment. * Check LFS endpoint. * Manage LFS settings from mirror page. * Fixed selector. * Adjusted selector. * Added more tests. * Added local filesystem migration test. * Fixed typo. * Reset settings. * Added special windows path handling. * Added unit test for HTTPClient. * Added unit test for BasicTransferAdapter. * Moved into util package. * Test if LFS endpoint is allowed. * Added support for git:// * Just use a static placeholder as the displayed url may be invalid. * Reverted to original code. * Added "Advanced Settings". * Updated wording. * Added discovery info link. * Implemented suggestion. * Fixed missing format parameter. * Added Pointer.IsValid(). * Always remove model on error. * Added suggestions. * Use channel instead of array. * Update routers/repo/migrate.go * fmt Signed-off-by: Andrew Thornton <art27@cantab.net> Co-authored-by: zeripath <art27@cantab.net>
2021-04-09 00:25:57 +02:00
err, has := <-errChan
if has {
log.Error("Repo[%-v]: Error enumerating LFS objects for repository: %v", repo, err)
Add LFS Migration and Mirror (#14726) * Implemented LFS client. * Implemented scanning for pointer files. * Implemented downloading of lfs files. * Moved model-dependent code into services. * Removed models dependency. Added TryReadPointerFromBuffer. * Migrated code from service to module. * Centralised storage creation. * Removed dependency from models. * Moved ContentStore into modules. * Share structs between server and client. * Moved method to services. * Implemented lfs download on clone. * Implemented LFS sync on clone and mirror update. * Added form fields. * Updated templates. * Fixed condition. * Use alternate endpoint. * Added missing methods. * Fixed typo and make linter happy. * Detached pointer parser from gogit dependency. * Fixed TestGetLFSRange test. * Added context to support cancellation. * Use ReadFull to probably read more data. * Removed duplicated code from models. * Moved scan implementation into pointer_scanner_nogogit. * Changed method name. * Added comments. * Added more/specific log/error messages. * Embedded lfs.Pointer into models.LFSMetaObject. * Moved code from models to module. * Moved code from models to module. * Moved code from models to module. * Reduced pointer usage. * Embedded type. * Use promoted fields. * Fixed unexpected eof. * Added unit tests. * Implemented migration of local file paths. * Show an error on invalid LFS endpoints. * Hide settings if not used. * Added LFS info to mirror struct. * Fixed comment. * Check LFS endpoint. * Manage LFS settings from mirror page. * Fixed selector. * Adjusted selector. * Added more tests. * Added local filesystem migration test. * Fixed typo. * Reset settings. * Added special windows path handling. * Added unit test for HTTPClient. * Added unit test for BasicTransferAdapter. * Moved into util package. * Test if LFS endpoint is allowed. * Added support for git:// * Just use a static placeholder as the displayed url may be invalid. * Reverted to original code. * Added "Advanced Settings". * Updated wording. * Added discovery info link. * Implemented suggestion. * Fixed missing format parameter. * Added Pointer.IsValid(). * Always remove model on error. * Added suggestions. * Use channel instead of array. * Update routers/repo/migrate.go * fmt Signed-off-by: Andrew Thornton <art27@cantab.net> Co-authored-by: zeripath <art27@cantab.net>
2021-04-09 00:25:57 +02:00
return err
}
return nil
}
Improve sync performance for pull-mirrors (#19125) This addresses https://github.com/go-gitea/gitea/issues/18352 It aims to improve performance (and resource use) of the `SyncReleasesWithTags` operation for pull-mirrors. For large repositories with many tags, `SyncReleasesWithTags` can be a costly operation (taking several minutes to complete). The reason is two-fold: 1. on sync, every upstream repo tag is compared (for changes) against existing local entries in the release table to ensure that they are up-to-date. 2. the procedure for getting _each tag_ involves a series of git operations ```bash git show-ref --tags -- v8.2.4477 git cat-file -t 29ab6ce9f36660cffaad3c8789e71162e5db5d2f git cat-file -p 29ab6ce9f36660cffaad3c8789e71162e5db5d2f git rev-list --count 29ab6ce9f36660cffaad3c8789e71162e5db5d2f ``` of which the `git rev-list --count` can be particularly heavy. This PR optimizes performance for pull-mirrors. We utilize the fact that a pull-mirror is always identical to its upstream and rebuild the entire release table on every sync and use a batch `git for-each-ref .. refs/tags` call to retrieve all tags in one go. For large mirror repos, with hundreds of annotated tags, this brings down the duration of the sync operation from several minutes to a few seconds. A few unscientific examples run on my local machine: - https://github.com/spring-projects/spring-boot (223 tags) - before: `0m28,673s` - after: `0m2,244s` - https://github.com/kubernetes/kubernetes (890 tags) - before: `8m00s` - after: `0m8,520s` - https://github.com/vim/vim (13954 tags) - before: `14m20,383s` - after: `0m35,467s` I added a `foreachref` package which contains a flexible way of specifying which reference fields are of interest (`git-for-each-ref(1)`) and to produce a parser for the expected output. These could be reused in other places where `for-each-ref` is used. I'll add unit tests for those if the overall PR looks promising.
2022-03-31 14:30:40 +02:00
// shortRelease to reduce load memory, this struct can replace repo_model.Release
type shortRelease struct {
ID int64
TagName string
Sha1 string
IsTag bool
}
func (shortRelease) TableName() string {
return "release"
}
Improve sync performance for pull-mirrors (#19125) This addresses https://github.com/go-gitea/gitea/issues/18352 It aims to improve performance (and resource use) of the `SyncReleasesWithTags` operation for pull-mirrors. For large repositories with many tags, `SyncReleasesWithTags` can be a costly operation (taking several minutes to complete). The reason is two-fold: 1. on sync, every upstream repo tag is compared (for changes) against existing local entries in the release table to ensure that they are up-to-date. 2. the procedure for getting _each tag_ involves a series of git operations ```bash git show-ref --tags -- v8.2.4477 git cat-file -t 29ab6ce9f36660cffaad3c8789e71162e5db5d2f git cat-file -p 29ab6ce9f36660cffaad3c8789e71162e5db5d2f git rev-list --count 29ab6ce9f36660cffaad3c8789e71162e5db5d2f ``` of which the `git rev-list --count` can be particularly heavy. This PR optimizes performance for pull-mirrors. We utilize the fact that a pull-mirror is always identical to its upstream and rebuild the entire release table on every sync and use a batch `git for-each-ref .. refs/tags` call to retrieve all tags in one go. For large mirror repos, with hundreds of annotated tags, this brings down the duration of the sync operation from several minutes to a few seconds. A few unscientific examples run on my local machine: - https://github.com/spring-projects/spring-boot (223 tags) - before: `0m28,673s` - after: `0m2,244s` - https://github.com/kubernetes/kubernetes (890 tags) - before: `8m00s` - after: `0m8,520s` - https://github.com/vim/vim (13954 tags) - before: `14m20,383s` - after: `0m35,467s` I added a `foreachref` package which contains a flexible way of specifying which reference fields are of interest (`git-for-each-ref(1)`) and to produce a parser for the expected output. These could be reused in other places where `for-each-ref` is used. I'll add unit tests for those if the overall PR looks promising.
2022-03-31 14:30:40 +02:00
// pullMirrorReleaseSync is a pull-mirror specific tag<->release table
// synchronization which overwrites all Releases from the repository tags. This
// can be relied on since a pull-mirror is always identical to its
// upstream. Hence, after each sync we want the pull-mirror release set to be
// identical to the upstream tag set. This is much more efficient for
// repositories like https://github.com/vim/vim (with over 13000 tags).
func pullMirrorReleaseSync(ctx context.Context, repo *repo_model.Repository, gitRepo *git.Repository) error {
Improve sync performance for pull-mirrors (#19125) This addresses https://github.com/go-gitea/gitea/issues/18352 It aims to improve performance (and resource use) of the `SyncReleasesWithTags` operation for pull-mirrors. For large repositories with many tags, `SyncReleasesWithTags` can be a costly operation (taking several minutes to complete). The reason is two-fold: 1. on sync, every upstream repo tag is compared (for changes) against existing local entries in the release table to ensure that they are up-to-date. 2. the procedure for getting _each tag_ involves a series of git operations ```bash git show-ref --tags -- v8.2.4477 git cat-file -t 29ab6ce9f36660cffaad3c8789e71162e5db5d2f git cat-file -p 29ab6ce9f36660cffaad3c8789e71162e5db5d2f git rev-list --count 29ab6ce9f36660cffaad3c8789e71162e5db5d2f ``` of which the `git rev-list --count` can be particularly heavy. This PR optimizes performance for pull-mirrors. We utilize the fact that a pull-mirror is always identical to its upstream and rebuild the entire release table on every sync and use a batch `git for-each-ref .. refs/tags` call to retrieve all tags in one go. For large mirror repos, with hundreds of annotated tags, this brings down the duration of the sync operation from several minutes to a few seconds. A few unscientific examples run on my local machine: - https://github.com/spring-projects/spring-boot (223 tags) - before: `0m28,673s` - after: `0m2,244s` - https://github.com/kubernetes/kubernetes (890 tags) - before: `8m00s` - after: `0m8,520s` - https://github.com/vim/vim (13954 tags) - before: `14m20,383s` - after: `0m35,467s` I added a `foreachref` package which contains a flexible way of specifying which reference fields are of interest (`git-for-each-ref(1)`) and to produce a parser for the expected output. These could be reused in other places where `for-each-ref` is used. I'll add unit tests for those if the overall PR looks promising.
2022-03-31 14:30:40 +02:00
log.Trace("pullMirrorReleaseSync: rebuilding releases for pull-mirror Repo[%d:%s/%s]", repo.ID, repo.OwnerName, repo.Name)
tags, numTags, err := gitRepo.GetTagInfos(0, 0)
if err != nil {
return fmt.Errorf("unable to GetTagInfos in pull-mirror Repo[%d:%s/%s]: %w", repo.ID, repo.OwnerName, repo.Name, err)
}
err = db.WithTx(ctx, func(ctx context.Context) error {
dbReleases, err := db.Find[shortRelease](ctx, repo_model.FindReleasesOptions{
RepoID: repo.ID,
IncludeDrafts: true,
IncludeTags: true,
})
if err != nil {
return fmt.Errorf("unable to FindReleases in pull-mirror Repo[%d:%s/%s]: %w", repo.ID, repo.OwnerName, repo.Name, err)
Improve sync performance for pull-mirrors (#19125) This addresses https://github.com/go-gitea/gitea/issues/18352 It aims to improve performance (and resource use) of the `SyncReleasesWithTags` operation for pull-mirrors. For large repositories with many tags, `SyncReleasesWithTags` can be a costly operation (taking several minutes to complete). The reason is two-fold: 1. on sync, every upstream repo tag is compared (for changes) against existing local entries in the release table to ensure that they are up-to-date. 2. the procedure for getting _each tag_ involves a series of git operations ```bash git show-ref --tags -- v8.2.4477 git cat-file -t 29ab6ce9f36660cffaad3c8789e71162e5db5d2f git cat-file -p 29ab6ce9f36660cffaad3c8789e71162e5db5d2f git rev-list --count 29ab6ce9f36660cffaad3c8789e71162e5db5d2f ``` of which the `git rev-list --count` can be particularly heavy. This PR optimizes performance for pull-mirrors. We utilize the fact that a pull-mirror is always identical to its upstream and rebuild the entire release table on every sync and use a batch `git for-each-ref .. refs/tags` call to retrieve all tags in one go. For large mirror repos, with hundreds of annotated tags, this brings down the duration of the sync operation from several minutes to a few seconds. A few unscientific examples run on my local machine: - https://github.com/spring-projects/spring-boot (223 tags) - before: `0m28,673s` - after: `0m2,244s` - https://github.com/kubernetes/kubernetes (890 tags) - before: `8m00s` - after: `0m8,520s` - https://github.com/vim/vim (13954 tags) - before: `14m20,383s` - after: `0m35,467s` I added a `foreachref` package which contains a flexible way of specifying which reference fields are of interest (`git-for-each-ref(1)`) and to produce a parser for the expected output. These could be reused in other places where `for-each-ref` is used. I'll add unit tests for those if the overall PR looks promising.
2022-03-31 14:30:40 +02:00
}
inserts, deletes, updates := calcSync(tags, dbReleases)
Improve sync performance for pull-mirrors (#19125) This addresses https://github.com/go-gitea/gitea/issues/18352 It aims to improve performance (and resource use) of the `SyncReleasesWithTags` operation for pull-mirrors. For large repositories with many tags, `SyncReleasesWithTags` can be a costly operation (taking several minutes to complete). The reason is two-fold: 1. on sync, every upstream repo tag is compared (for changes) against existing local entries in the release table to ensure that they are up-to-date. 2. the procedure for getting _each tag_ involves a series of git operations ```bash git show-ref --tags -- v8.2.4477 git cat-file -t 29ab6ce9f36660cffaad3c8789e71162e5db5d2f git cat-file -p 29ab6ce9f36660cffaad3c8789e71162e5db5d2f git rev-list --count 29ab6ce9f36660cffaad3c8789e71162e5db5d2f ``` of which the `git rev-list --count` can be particularly heavy. This PR optimizes performance for pull-mirrors. We utilize the fact that a pull-mirror is always identical to its upstream and rebuild the entire release table on every sync and use a batch `git for-each-ref .. refs/tags` call to retrieve all tags in one go. For large mirror repos, with hundreds of annotated tags, this brings down the duration of the sync operation from several minutes to a few seconds. A few unscientific examples run on my local machine: - https://github.com/spring-projects/spring-boot (223 tags) - before: `0m28,673s` - after: `0m2,244s` - https://github.com/kubernetes/kubernetes (890 tags) - before: `8m00s` - after: `0m8,520s` - https://github.com/vim/vim (13954 tags) - before: `14m20,383s` - after: `0m35,467s` I added a `foreachref` package which contains a flexible way of specifying which reference fields are of interest (`git-for-each-ref(1)`) and to produce a parser for the expected output. These could be reused in other places where `for-each-ref` is used. I'll add unit tests for those if the overall PR looks promising.
2022-03-31 14:30:40 +02:00
//
// make release set identical to upstream tags
//
for _, tag := range inserts {
release := repo_model.Release{
Improve sync performance for pull-mirrors (#19125) This addresses https://github.com/go-gitea/gitea/issues/18352 It aims to improve performance (and resource use) of the `SyncReleasesWithTags` operation for pull-mirrors. For large repositories with many tags, `SyncReleasesWithTags` can be a costly operation (taking several minutes to complete). The reason is two-fold: 1. on sync, every upstream repo tag is compared (for changes) against existing local entries in the release table to ensure that they are up-to-date. 2. the procedure for getting _each tag_ involves a series of git operations ```bash git show-ref --tags -- v8.2.4477 git cat-file -t 29ab6ce9f36660cffaad3c8789e71162e5db5d2f git cat-file -p 29ab6ce9f36660cffaad3c8789e71162e5db5d2f git rev-list --count 29ab6ce9f36660cffaad3c8789e71162e5db5d2f ``` of which the `git rev-list --count` can be particularly heavy. This PR optimizes performance for pull-mirrors. We utilize the fact that a pull-mirror is always identical to its upstream and rebuild the entire release table on every sync and use a batch `git for-each-ref .. refs/tags` call to retrieve all tags in one go. For large mirror repos, with hundreds of annotated tags, this brings down the duration of the sync operation from several minutes to a few seconds. A few unscientific examples run on my local machine: - https://github.com/spring-projects/spring-boot (223 tags) - before: `0m28,673s` - after: `0m2,244s` - https://github.com/kubernetes/kubernetes (890 tags) - before: `8m00s` - after: `0m8,520s` - https://github.com/vim/vim (13954 tags) - before: `14m20,383s` - after: `0m35,467s` I added a `foreachref` package which contains a flexible way of specifying which reference fields are of interest (`git-for-each-ref(1)`) and to produce a parser for the expected output. These could be reused in other places where `for-each-ref` is used. I'll add unit tests for those if the overall PR looks promising.
2022-03-31 14:30:40 +02:00
RepoID: repo.ID,
TagName: tag.Name,
LowerTagName: strings.ToLower(tag.Name),
Sha1: tag.Object.String(),
// NOTE: ignored, since NumCommits are unused
// for pull-mirrors (only relevant when
// displaying releases, IsTag: false)
NumCommits: -1,
CreatedUnix: timeutil.TimeStamp(tag.Tagger.When.Unix()),
IsTag: true,
}
if err := db.Insert(ctx, release); err != nil {
return fmt.Errorf("unable insert tag %s for pull-mirror Repo[%d:%s/%s]: %w", tag.Name, repo.ID, repo.OwnerName, repo.Name, err)
}
}
// only delete tags releases
if len(deletes) > 0 {
if _, err := db.GetEngine(ctx).Where("repo_id=?", repo.ID).
In("id", deletes).
Delete(&repo_model.Release{}); err != nil {
return fmt.Errorf("unable to delete tags for pull-mirror Repo[%d:%s/%s]: %w", repo.ID, repo.OwnerName, repo.Name, err)
}
}
for _, tag := range updates {
if _, err := db.GetEngine(ctx).Where("repo_id = ? AND lower_tag_name = ?", repo.ID, strings.ToLower(tag.Name)).
Cols("sha1", "created_unix").
Update(&repo_model.Release{
Sha1: tag.Object.String(),
CreatedUnix: timeutil.TimeStamp(tag.Tagger.When.Unix()),
}); err != nil {
return fmt.Errorf("unable to update tag %s for pull-mirror Repo[%d:%s/%s]: %w", tag.Name, repo.ID, repo.OwnerName, repo.Name, err)
}
}
Improve sync performance for pull-mirrors (#19125) This addresses https://github.com/go-gitea/gitea/issues/18352 It aims to improve performance (and resource use) of the `SyncReleasesWithTags` operation for pull-mirrors. For large repositories with many tags, `SyncReleasesWithTags` can be a costly operation (taking several minutes to complete). The reason is two-fold: 1. on sync, every upstream repo tag is compared (for changes) against existing local entries in the release table to ensure that they are up-to-date. 2. the procedure for getting _each tag_ involves a series of git operations ```bash git show-ref --tags -- v8.2.4477 git cat-file -t 29ab6ce9f36660cffaad3c8789e71162e5db5d2f git cat-file -p 29ab6ce9f36660cffaad3c8789e71162e5db5d2f git rev-list --count 29ab6ce9f36660cffaad3c8789e71162e5db5d2f ``` of which the `git rev-list --count` can be particularly heavy. This PR optimizes performance for pull-mirrors. We utilize the fact that a pull-mirror is always identical to its upstream and rebuild the entire release table on every sync and use a batch `git for-each-ref .. refs/tags` call to retrieve all tags in one go. For large mirror repos, with hundreds of annotated tags, this brings down the duration of the sync operation from several minutes to a few seconds. A few unscientific examples run on my local machine: - https://github.com/spring-projects/spring-boot (223 tags) - before: `0m28,673s` - after: `0m2,244s` - https://github.com/kubernetes/kubernetes (890 tags) - before: `8m00s` - after: `0m8,520s` - https://github.com/vim/vim (13954 tags) - before: `14m20,383s` - after: `0m35,467s` I added a `foreachref` package which contains a flexible way of specifying which reference fields are of interest (`git-for-each-ref(1)`) and to produce a parser for the expected output. These could be reused in other places where `for-each-ref` is used. I'll add unit tests for those if the overall PR looks promising.
2022-03-31 14:30:40 +02:00
return nil
})
if err != nil {
return fmt.Errorf("unable to rebuild release table for pull-mirror Repo[%d:%s/%s]: %w", repo.ID, repo.OwnerName, repo.Name, err)
}
log.Trace("pullMirrorReleaseSync: done rebuilding %d releases", numTags)
return nil
}
func calcSync(destTags []*git.Tag, dbTags []*shortRelease) ([]*git.Tag, []int64, []*git.Tag) {
destTagMap := make(map[string]*git.Tag)
for _, tag := range destTags {
destTagMap[tag.Name] = tag
}
dbTagMap := make(map[string]*shortRelease)
for _, rel := range dbTags {
dbTagMap[rel.TagName] = rel
}
inserted := make([]*git.Tag, 0, 10)
updated := make([]*git.Tag, 0, 10)
for _, tag := range destTags {
rel := dbTagMap[tag.Name]
if rel == nil {
inserted = append(inserted, tag)
} else if rel.Sha1 != tag.Object.String() {
updated = append(updated, tag)
}
}
deleted := make([]int64, 0, 10)
for _, tag := range dbTags {
if destTagMap[tag.TagName] == nil && tag.IsTag {
deleted = append(deleted, tag.ID)
}
}
return inserted, deleted, updated
}