mirror of
https://github.com/go-gitea/gitea.git
synced 2025-09-10 23:56:17 +02:00
Use db.WithTx/WithTx2 instead of TxContext when possible (#35428)
This commit is contained in:
parent
fb247f640e
commit
e35e724e42
@ -72,96 +72,90 @@ func AddGPGKey(ctx context.Context, ownerID int64, content, token, signature str
|
||||
return nil, err
|
||||
}
|
||||
|
||||
ctx, committer, err := db.TxContext(ctx)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer committer.Close()
|
||||
return db.WithTx2(ctx, func(ctx context.Context) ([]*GPGKey, error) {
|
||||
keys := make([]*GPGKey, 0, len(ekeys))
|
||||
|
||||
keys := make([]*GPGKey, 0, len(ekeys))
|
||||
|
||||
verified := false
|
||||
// Handle provided signature
|
||||
if signature != "" {
|
||||
signer, err := openpgp.CheckArmoredDetachedSignature(ekeys, strings.NewReader(token), strings.NewReader(signature), nil)
|
||||
if err != nil {
|
||||
signer, err = openpgp.CheckArmoredDetachedSignature(ekeys, strings.NewReader(token+"\n"), strings.NewReader(signature), nil)
|
||||
}
|
||||
if err != nil {
|
||||
signer, err = openpgp.CheckArmoredDetachedSignature(ekeys, strings.NewReader(token+"\r\n"), strings.NewReader(signature), nil)
|
||||
}
|
||||
if err != nil {
|
||||
log.Debug("AddGPGKey CheckArmoredDetachedSignature failed: %v", err)
|
||||
return nil, ErrGPGInvalidTokenSignature{
|
||||
ID: ekeys[0].PrimaryKey.KeyIdString(),
|
||||
Wrapped: err,
|
||||
verified := false
|
||||
// Handle provided signature
|
||||
if signature != "" {
|
||||
signer, err := openpgp.CheckArmoredDetachedSignature(ekeys, strings.NewReader(token), strings.NewReader(signature), nil)
|
||||
if err != nil {
|
||||
signer, err = openpgp.CheckArmoredDetachedSignature(ekeys, strings.NewReader(token+"\n"), strings.NewReader(signature), nil)
|
||||
}
|
||||
if err != nil {
|
||||
signer, err = openpgp.CheckArmoredDetachedSignature(ekeys, strings.NewReader(token+"\r\n"), strings.NewReader(signature), nil)
|
||||
}
|
||||
if err != nil {
|
||||
log.Debug("AddGPGKey CheckArmoredDetachedSignature failed: %v", err)
|
||||
return nil, ErrGPGInvalidTokenSignature{
|
||||
ID: ekeys[0].PrimaryKey.KeyIdString(),
|
||||
Wrapped: err,
|
||||
}
|
||||
}
|
||||
ekeys = []*openpgp.Entity{signer}
|
||||
verified = true
|
||||
}
|
||||
ekeys = []*openpgp.Entity{signer}
|
||||
verified = true
|
||||
}
|
||||
|
||||
if len(ekeys) > 1 {
|
||||
id2key := map[string]*openpgp.Entity{}
|
||||
newEKeys := make([]*openpgp.Entity, 0, len(ekeys))
|
||||
for _, ekey := range ekeys {
|
||||
id := ekey.PrimaryKey.KeyIdString()
|
||||
if original, has := id2key[id]; has {
|
||||
// Coalesce this with the other one
|
||||
for _, subkey := range ekey.Subkeys {
|
||||
if subkey.PublicKey == nil {
|
||||
continue
|
||||
}
|
||||
found := false
|
||||
|
||||
for _, originalSubkey := range original.Subkeys {
|
||||
if originalSubkey.PublicKey == nil {
|
||||
if len(ekeys) > 1 {
|
||||
id2key := map[string]*openpgp.Entity{}
|
||||
newEKeys := make([]*openpgp.Entity, 0, len(ekeys))
|
||||
for _, ekey := range ekeys {
|
||||
id := ekey.PrimaryKey.KeyIdString()
|
||||
if original, has := id2key[id]; has {
|
||||
// Coalesce this with the other one
|
||||
for _, subkey := range ekey.Subkeys {
|
||||
if subkey.PublicKey == nil {
|
||||
continue
|
||||
}
|
||||
if originalSubkey.PublicKey.KeyId == subkey.PublicKey.KeyId {
|
||||
found = true
|
||||
break
|
||||
found := false
|
||||
|
||||
for _, originalSubkey := range original.Subkeys {
|
||||
if originalSubkey.PublicKey == nil {
|
||||
continue
|
||||
}
|
||||
if originalSubkey.PublicKey.KeyId == subkey.PublicKey.KeyId {
|
||||
found = true
|
||||
break
|
||||
}
|
||||
}
|
||||
if !found {
|
||||
original.Subkeys = append(original.Subkeys, subkey)
|
||||
}
|
||||
}
|
||||
if !found {
|
||||
original.Subkeys = append(original.Subkeys, subkey)
|
||||
for name, identity := range ekey.Identities {
|
||||
if _, has := original.Identities[name]; has {
|
||||
continue
|
||||
}
|
||||
original.Identities[name] = identity
|
||||
}
|
||||
continue
|
||||
}
|
||||
for name, identity := range ekey.Identities {
|
||||
if _, has := original.Identities[name]; has {
|
||||
continue
|
||||
}
|
||||
original.Identities[name] = identity
|
||||
}
|
||||
continue
|
||||
id2key[id] = ekey
|
||||
newEKeys = append(newEKeys, ekey)
|
||||
}
|
||||
id2key[id] = ekey
|
||||
newEKeys = append(newEKeys, ekey)
|
||||
}
|
||||
ekeys = newEKeys
|
||||
}
|
||||
|
||||
for _, ekey := range ekeys {
|
||||
// Key ID cannot be duplicated.
|
||||
has, err := db.GetEngine(ctx).Where("key_id=?", ekey.PrimaryKey.KeyIdString()).
|
||||
Get(new(GPGKey))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
} else if has {
|
||||
return nil, ErrGPGKeyIDAlreadyUsed{ekey.PrimaryKey.KeyIdString()}
|
||||
ekeys = newEKeys
|
||||
}
|
||||
|
||||
// Get DB session
|
||||
for _, ekey := range ekeys {
|
||||
// Key ID cannot be duplicated.
|
||||
has, err := db.GetEngine(ctx).Where("key_id=?", ekey.PrimaryKey.KeyIdString()).
|
||||
Get(new(GPGKey))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
} else if has {
|
||||
return nil, ErrGPGKeyIDAlreadyUsed{ekey.PrimaryKey.KeyIdString()}
|
||||
}
|
||||
|
||||
key, err := parseGPGKey(ctx, ownerID, ekey, verified)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
key, err := parseGPGKey(ctx, ownerID, ekey, verified)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if err = addGPGKey(ctx, key, content); err != nil {
|
||||
return nil, err
|
||||
if err = addGPGKey(ctx, key, content); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
keys = append(keys, key)
|
||||
}
|
||||
keys = append(keys, key)
|
||||
}
|
||||
return keys, committer.Commit()
|
||||
return keys, nil
|
||||
})
|
||||
}
|
||||
|
@ -334,122 +334,111 @@ func FindRenamedBranch(ctx context.Context, repoID int64, from string) (branch *
|
||||
|
||||
// RenameBranch rename a branch
|
||||
func RenameBranch(ctx context.Context, repo *repo_model.Repository, from, to string, gitAction func(ctx context.Context, isDefault bool) error) (err error) {
|
||||
ctx, committer, err := db.TxContext(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer committer.Close()
|
||||
return db.WithTx(ctx, func(ctx context.Context) error {
|
||||
sess := db.GetEngine(ctx)
|
||||
|
||||
sess := db.GetEngine(ctx)
|
||||
|
||||
// check whether from branch exist
|
||||
var branch Branch
|
||||
exist, err := db.GetEngine(ctx).Where("repo_id=? AND name=?", repo.ID, from).Get(&branch)
|
||||
if err != nil {
|
||||
return err
|
||||
} else if !exist || branch.IsDeleted {
|
||||
return ErrBranchNotExist{
|
||||
RepoID: repo.ID,
|
||||
BranchName: from,
|
||||
}
|
||||
}
|
||||
|
||||
// check whether to branch exist or is_deleted
|
||||
var dstBranch Branch
|
||||
exist, err = db.GetEngine(ctx).Where("repo_id=? AND name=?", repo.ID, to).Get(&dstBranch)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if exist {
|
||||
if !dstBranch.IsDeleted {
|
||||
return ErrBranchAlreadyExists{
|
||||
BranchName: to,
|
||||
// check whether from branch exist
|
||||
var branch Branch
|
||||
exist, err := db.GetEngine(ctx).Where("repo_id=? AND name=?", repo.ID, from).Get(&branch)
|
||||
if err != nil {
|
||||
return err
|
||||
} else if !exist || branch.IsDeleted {
|
||||
return ErrBranchNotExist{
|
||||
RepoID: repo.ID,
|
||||
BranchName: from,
|
||||
}
|
||||
}
|
||||
|
||||
if _, err := db.GetEngine(ctx).ID(dstBranch.ID).NoAutoCondition().Delete(&dstBranch); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
// 1. update branch in database
|
||||
if n, err := sess.Where("repo_id=? AND name=?", repo.ID, from).Update(&Branch{
|
||||
Name: to,
|
||||
}); err != nil {
|
||||
return err
|
||||
} else if n <= 0 {
|
||||
return ErrBranchNotExist{
|
||||
RepoID: repo.ID,
|
||||
BranchName: from,
|
||||
}
|
||||
}
|
||||
|
||||
// 2. update default branch if needed
|
||||
isDefault := repo.DefaultBranch == from
|
||||
if isDefault {
|
||||
repo.DefaultBranch = to
|
||||
_, err = sess.ID(repo.ID).Cols("default_branch").Update(repo)
|
||||
// check whether to branch exist or is_deleted
|
||||
var dstBranch Branch
|
||||
exist, err = db.GetEngine(ctx).Where("repo_id=? AND name=?", repo.ID, to).Get(&dstBranch)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
if exist {
|
||||
if !dstBranch.IsDeleted {
|
||||
return ErrBranchAlreadyExists{
|
||||
BranchName: to,
|
||||
}
|
||||
}
|
||||
|
||||
// 3. Update protected branch if needed
|
||||
protectedBranch, err := GetProtectedBranchRuleByName(ctx, repo.ID, from)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if _, err := db.GetEngine(ctx).ID(dstBranch.ID).NoAutoCondition().Delete(&dstBranch); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
if protectedBranch != nil {
|
||||
// there is a protect rule for this branch
|
||||
protectedBranch.RuleName = to
|
||||
_, err = sess.ID(protectedBranch.ID).Cols("branch_name").Update(protectedBranch)
|
||||
// 1. update branch in database
|
||||
if n, err := sess.Where("repo_id=? AND name=?", repo.ID, from).Update(&Branch{
|
||||
Name: to,
|
||||
}); err != nil {
|
||||
return err
|
||||
} else if n <= 0 {
|
||||
return ErrBranchNotExist{
|
||||
RepoID: repo.ID,
|
||||
BranchName: from,
|
||||
}
|
||||
}
|
||||
|
||||
// 2. update default branch if needed
|
||||
isDefault := repo.DefaultBranch == from
|
||||
if isDefault {
|
||||
repo.DefaultBranch = to
|
||||
_, err = sess.ID(repo.ID).Cols("default_branch").Update(repo)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
// 3. Update protected branch if needed
|
||||
protectedBranch, err := GetProtectedBranchRuleByName(ctx, repo.ID, from)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
} else {
|
||||
// some glob protect rules may match this branch
|
||||
protected, err := IsBranchProtected(ctx, repo.ID, from)
|
||||
|
||||
if protectedBranch != nil {
|
||||
// there is a protect rule for this branch
|
||||
protectedBranch.RuleName = to
|
||||
if _, err = sess.ID(protectedBranch.ID).Cols("branch_name").Update(protectedBranch); err != nil {
|
||||
return err
|
||||
}
|
||||
} else {
|
||||
// some glob protect rules may match this branch
|
||||
protected, err := IsBranchProtected(ctx, repo.ID, from)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if protected {
|
||||
return ErrBranchIsProtected
|
||||
}
|
||||
}
|
||||
|
||||
// 4. Update all not merged pull request base branch name
|
||||
_, err = sess.Table("pull_request").Where("base_repo_id=? AND base_branch=? AND has_merged=?",
|
||||
repo.ID, from, false).
|
||||
Update(map[string]any{"base_branch": to})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if protected {
|
||||
return ErrBranchIsProtected
|
||||
|
||||
// 4.1 Update all not merged pull request head branch name
|
||||
if _, err = sess.Table("pull_request").Where("head_repo_id=? AND head_branch=? AND has_merged=?",
|
||||
repo.ID, from, false).
|
||||
Update(map[string]any{"head_branch": to}); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
// 4. Update all not merged pull request base branch name
|
||||
_, err = sess.Table("pull_request").Where("base_repo_id=? AND base_branch=? AND has_merged=?",
|
||||
repo.ID, from, false).
|
||||
Update(map[string]any{"base_branch": to})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
// 5. insert renamed branch record
|
||||
if err = db.Insert(ctx, &RenamedBranch{
|
||||
RepoID: repo.ID,
|
||||
From: from,
|
||||
To: to,
|
||||
}); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// 4.1 Update all not merged pull request head branch name
|
||||
if _, err = sess.Table("pull_request").Where("head_repo_id=? AND head_branch=? AND has_merged=?",
|
||||
repo.ID, from, false).
|
||||
Update(map[string]any{"head_branch": to}); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// 5. insert renamed branch record
|
||||
renamedBranch := &RenamedBranch{
|
||||
RepoID: repo.ID,
|
||||
From: from,
|
||||
To: to,
|
||||
}
|
||||
err = db.Insert(ctx, renamedBranch)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// 6. do git action
|
||||
if err = gitAction(ctx, isDefault); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return committer.Commit()
|
||||
// 6. do git action
|
||||
return gitAction(ctx, isDefault)
|
||||
})
|
||||
}
|
||||
|
||||
type FindRecentlyPushedNewBranchesOptions struct {
|
||||
|
@ -180,29 +180,25 @@ func RemoveLFSMetaObjectByOidFn(ctx context.Context, repoID int64, oid string, f
|
||||
return 0, ErrLFSObjectNotExist
|
||||
}
|
||||
|
||||
ctx, committer, err := db.TxContext(ctx)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
defer committer.Close()
|
||||
return db.WithTx2(ctx, func(ctx context.Context) (int64, error) {
|
||||
m := &LFSMetaObject{Pointer: lfs.Pointer{Oid: oid}, RepositoryID: repoID}
|
||||
if _, err := db.DeleteByBean(ctx, m); err != nil {
|
||||
return -1, err
|
||||
}
|
||||
|
||||
m := &LFSMetaObject{Pointer: lfs.Pointer{Oid: oid}, RepositoryID: repoID}
|
||||
if _, err := db.DeleteByBean(ctx, m); err != nil {
|
||||
return -1, err
|
||||
}
|
||||
|
||||
count, err := db.CountByBean(ctx, &LFSMetaObject{Pointer: lfs.Pointer{Oid: oid}})
|
||||
if err != nil {
|
||||
return count, err
|
||||
}
|
||||
|
||||
if fn != nil {
|
||||
if err := fn(count); err != nil {
|
||||
count, err := db.CountByBean(ctx, &LFSMetaObject{Pointer: lfs.Pointer{Oid: oid}})
|
||||
if err != nil {
|
||||
return count, err
|
||||
}
|
||||
}
|
||||
|
||||
return count, committer.Commit()
|
||||
if fn != nil {
|
||||
if err := fn(count); err != nil {
|
||||
return count, err
|
||||
}
|
||||
}
|
||||
|
||||
return count, nil
|
||||
})
|
||||
}
|
||||
|
||||
// GetLFSMetaObjects returns all LFSMetaObjects associated with a repository
|
||||
@ -243,56 +239,46 @@ func ExistsLFSObject(ctx context.Context, oid string) (bool, error) {
|
||||
|
||||
// LFSAutoAssociate auto associates accessible LFSMetaObjects
|
||||
func LFSAutoAssociate(ctx context.Context, metas []*LFSMetaObject, user *user_model.User, repoID int64) error {
|
||||
ctx, committer, err := db.TxContext(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer committer.Close()
|
||||
return db.WithTx(ctx, func(ctx context.Context) error {
|
||||
oids := make([]any, len(metas))
|
||||
oidMap := make(map[string]*LFSMetaObject, len(metas))
|
||||
for i, meta := range metas {
|
||||
oids[i] = meta.Oid
|
||||
oidMap[meta.Oid] = meta
|
||||
}
|
||||
|
||||
sess := db.GetEngine(ctx)
|
||||
if !user.IsAdmin {
|
||||
newMetas := make([]*LFSMetaObject, 0, len(metas))
|
||||
cond := builder.In(
|
||||
"`lfs_meta_object`.repository_id",
|
||||
builder.Select("`repository`.id").From("repository").Where(repo_model.AccessibleRepositoryCondition(user, unit.TypeInvalid)),
|
||||
)
|
||||
if err := db.GetEngine(ctx).Cols("oid").Where(cond).In("oid", oids...).GroupBy("oid").Find(&newMetas); err != nil {
|
||||
return err
|
||||
}
|
||||
if len(newMetas) != len(oidMap) {
|
||||
return fmt.Errorf("unable collect all LFS objects from database, expected %d, actually %d", len(oidMap), len(newMetas))
|
||||
}
|
||||
for i := range newMetas {
|
||||
newMetas[i].Size = oidMap[newMetas[i].Oid].Size
|
||||
newMetas[i].RepositoryID = repoID
|
||||
}
|
||||
return db.Insert(ctx, newMetas)
|
||||
}
|
||||
|
||||
oids := make([]any, len(metas))
|
||||
oidMap := make(map[string]*LFSMetaObject, len(metas))
|
||||
for i, meta := range metas {
|
||||
oids[i] = meta.Oid
|
||||
oidMap[meta.Oid] = meta
|
||||
}
|
||||
|
||||
if !user.IsAdmin {
|
||||
newMetas := make([]*LFSMetaObject, 0, len(metas))
|
||||
cond := builder.In(
|
||||
"`lfs_meta_object`.repository_id",
|
||||
builder.Select("`repository`.id").From("repository").Where(repo_model.AccessibleRepositoryCondition(user, unit.TypeInvalid)),
|
||||
)
|
||||
err = sess.Cols("oid").Where(cond).In("oid", oids...).GroupBy("oid").Find(&newMetas)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if len(newMetas) != len(oidMap) {
|
||||
return fmt.Errorf("unable collect all LFS objects from database, expected %d, actually %d", len(oidMap), len(newMetas))
|
||||
}
|
||||
for i := range newMetas {
|
||||
newMetas[i].Size = oidMap[newMetas[i].Oid].Size
|
||||
newMetas[i].RepositoryID = repoID
|
||||
}
|
||||
if err = db.Insert(ctx, newMetas); err != nil {
|
||||
return err
|
||||
}
|
||||
} else {
|
||||
// admin can associate any LFS object to any repository, and we do not care about errors (eg: duplicated unique key),
|
||||
// even if error occurs, it won't hurt users and won't make things worse
|
||||
for i := range metas {
|
||||
p := lfs.Pointer{Oid: metas[i].Oid, Size: metas[i].Size}
|
||||
_, err = sess.Insert(&LFSMetaObject{
|
||||
if err := db.Insert(ctx, &LFSMetaObject{
|
||||
Pointer: p,
|
||||
RepositoryID: repoID,
|
||||
})
|
||||
if err != nil {
|
||||
}); err != nil {
|
||||
log.Warn("failed to insert LFS meta object %-v for repo_id: %d into database, err=%v", p, repoID, err)
|
||||
}
|
||||
}
|
||||
}
|
||||
return committer.Commit()
|
||||
return nil
|
||||
})
|
||||
}
|
||||
|
||||
// CopyLFS copies LFS data from one repo to another
|
||||
|
@ -91,18 +91,10 @@ func GetAssignedIssues(ctx context.Context, opts *AssignedIssuesOptions) ([]*Iss
|
||||
|
||||
// ToggleIssueAssignee changes a user between assigned and not assigned for this issue, and make issue comment for it.
|
||||
func ToggleIssueAssignee(ctx context.Context, issue *Issue, doer *user_model.User, assigneeID int64) (removed bool, comment *Comment, err error) {
|
||||
ctx, committer, err := db.TxContext(ctx)
|
||||
if err != nil {
|
||||
return false, nil, err
|
||||
}
|
||||
defer committer.Close()
|
||||
|
||||
removed, comment, err = toggleIssueAssignee(ctx, issue, doer, assigneeID, false)
|
||||
if err != nil {
|
||||
return false, nil, err
|
||||
}
|
||||
|
||||
if err := committer.Commit(); err != nil {
|
||||
if err := db.WithTx(ctx, func(ctx context.Context) error {
|
||||
removed, comment, err = toggleIssueAssignee(ctx, issue, doer, assigneeID, false)
|
||||
return err
|
||||
}); err != nil {
|
||||
return false, nil, err
|
||||
}
|
||||
|
||||
|
@ -415,37 +415,28 @@ func NewIssueWithIndex(ctx context.Context, doer *user_model.User, opts NewIssue
|
||||
// NewIssue creates new issue with labels for repository.
|
||||
// The title will be cut off at 255 characters if it's longer than 255 characters.
|
||||
func NewIssue(ctx context.Context, repo *repo_model.Repository, issue *Issue, labelIDs []int64, uuids []string) (err error) {
|
||||
ctx, committer, err := db.TxContext(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer committer.Close()
|
||||
|
||||
idx, err := db.GetNextResourceIndex(ctx, "issue_index", repo.ID)
|
||||
if err != nil {
|
||||
return fmt.Errorf("generate issue index failed: %w", err)
|
||||
}
|
||||
|
||||
issue.Index = idx
|
||||
issue.Title = util.EllipsisDisplayString(issue.Title, 255)
|
||||
|
||||
if err = NewIssueWithIndex(ctx, issue.Poster, NewIssueOptions{
|
||||
Repo: repo,
|
||||
Issue: issue,
|
||||
LabelIDs: labelIDs,
|
||||
Attachments: uuids,
|
||||
}); err != nil {
|
||||
if repo_model.IsErrUserDoesNotHaveAccessToRepo(err) || IsErrNewIssueInsert(err) {
|
||||
return err
|
||||
return db.WithTx(ctx, func(ctx context.Context) error {
|
||||
idx, err := db.GetNextResourceIndex(ctx, "issue_index", repo.ID)
|
||||
if err != nil {
|
||||
return fmt.Errorf("generate issue index failed: %w", err)
|
||||
}
|
||||
return fmt.Errorf("newIssue: %w", err)
|
||||
}
|
||||
|
||||
if err = committer.Commit(); err != nil {
|
||||
return fmt.Errorf("Commit: %w", err)
|
||||
}
|
||||
issue.Index = idx
|
||||
issue.Title = util.EllipsisDisplayString(issue.Title, 255)
|
||||
|
||||
return nil
|
||||
if err = NewIssueWithIndex(ctx, issue.Poster, NewIssueOptions{
|
||||
Repo: repo,
|
||||
Issue: issue,
|
||||
LabelIDs: labelIDs,
|
||||
Attachments: uuids,
|
||||
}); err != nil {
|
||||
if repo_model.IsErrUserDoesNotHaveAccessToRepo(err) || IsErrNewIssueInsert(err) {
|
||||
return err
|
||||
}
|
||||
return fmt.Errorf("newIssue: %w", err)
|
||||
}
|
||||
return nil
|
||||
})
|
||||
}
|
||||
|
||||
// UpdateIssueMentions updates issue-user relations for mentioned users.
|
||||
|
@ -442,58 +442,53 @@ func SearchEmails(ctx context.Context, opts *SearchEmailOptions) ([]*SearchEmail
|
||||
// ActivateUserEmail will change the activated state of an email address,
|
||||
// either primary or secondary (all in the email_address table)
|
||||
func ActivateUserEmail(ctx context.Context, userID int64, email string, activate bool) (err error) {
|
||||
ctx, committer, err := db.TxContext(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer committer.Close()
|
||||
|
||||
// Activate/deactivate a user's secondary email address
|
||||
// First check if there's another user active with the same address
|
||||
addr, exist, err := db.Get[EmailAddress](ctx, builder.Eq{"uid": userID, "lower_email": strings.ToLower(email)})
|
||||
if err != nil {
|
||||
return err
|
||||
} else if !exist {
|
||||
return fmt.Errorf("no such email: %d (%s)", userID, email)
|
||||
}
|
||||
|
||||
if addr.IsActivated == activate {
|
||||
// Already in the desired state; no action
|
||||
return nil
|
||||
}
|
||||
if activate {
|
||||
if used, err := IsEmailActive(ctx, email, addr.ID); err != nil {
|
||||
return fmt.Errorf("unable to check isEmailActive() for %s: %w", email, err)
|
||||
} else if used {
|
||||
return ErrEmailAlreadyUsed{Email: email}
|
||||
}
|
||||
}
|
||||
if err = updateActivation(ctx, addr, activate); err != nil {
|
||||
return fmt.Errorf("unable to updateActivation() for %d:%s: %w", addr.ID, addr.Email, err)
|
||||
}
|
||||
|
||||
// Activate/deactivate a user's primary email address and account
|
||||
if addr.IsPrimary {
|
||||
user, exist, err := db.Get[User](ctx, builder.Eq{"id": userID})
|
||||
return db.WithTx(ctx, func(ctx context.Context) error {
|
||||
// Activate/deactivate a user's secondary email address
|
||||
// First check if there's another user active with the same address
|
||||
addr, exist, err := db.Get[EmailAddress](ctx, builder.Eq{"uid": userID, "lower_email": strings.ToLower(email)})
|
||||
if err != nil {
|
||||
return err
|
||||
} else if !exist || !strings.EqualFold(user.Email, email) {
|
||||
return fmt.Errorf("no user with ID: %d and Email: %s", userID, email)
|
||||
} else if !exist {
|
||||
return fmt.Errorf("no such email: %d (%s)", userID, email)
|
||||
}
|
||||
|
||||
// The user's activation state should be synchronized with the primary email
|
||||
if user.IsActive != activate {
|
||||
user.IsActive = activate
|
||||
if user.Rands, err = GetUserSalt(); err != nil {
|
||||
return fmt.Errorf("unable to generate salt: %w", err)
|
||||
}
|
||||
if err = UpdateUserCols(ctx, user, "is_active", "rands"); err != nil {
|
||||
return fmt.Errorf("unable to updateUserCols() for user ID: %d: %w", userID, err)
|
||||
if addr.IsActivated == activate {
|
||||
// Already in the desired state; no action
|
||||
return nil
|
||||
}
|
||||
if activate {
|
||||
if used, err := IsEmailActive(ctx, email, addr.ID); err != nil {
|
||||
return fmt.Errorf("unable to check isEmailActive() for %s: %w", email, err)
|
||||
} else if used {
|
||||
return ErrEmailAlreadyUsed{Email: email}
|
||||
}
|
||||
}
|
||||
}
|
||||
if err = updateActivation(ctx, addr, activate); err != nil {
|
||||
return fmt.Errorf("unable to updateActivation() for %d:%s: %w", addr.ID, addr.Email, err)
|
||||
}
|
||||
|
||||
return committer.Commit()
|
||||
// Activate/deactivate a user's primary email address and account
|
||||
if addr.IsPrimary {
|
||||
user, exist, err := db.Get[User](ctx, builder.Eq{"id": userID})
|
||||
if err != nil {
|
||||
return err
|
||||
} else if !exist || !strings.EqualFold(user.Email, email) {
|
||||
return fmt.Errorf("no user with ID: %d and Email: %s", userID, email)
|
||||
}
|
||||
|
||||
// The user's activation state should be synchronized with the primary email
|
||||
if user.IsActive != activate {
|
||||
user.IsActive = activate
|
||||
if user.Rands, err = GetUserSalt(); err != nil {
|
||||
return fmt.Errorf("unable to generate salt: %w", err)
|
||||
}
|
||||
if err = UpdateUserCols(ctx, user, "is_active", "rands"); err != nil {
|
||||
return fmt.Errorf("unable to updateUserCols() for user ID: %d: %w", userID, err)
|
||||
}
|
||||
}
|
||||
}
|
||||
return nil
|
||||
})
|
||||
}
|
||||
|
||||
// validateEmailBasic checks whether the email complies with the rules
|
||||
|
@ -716,90 +716,82 @@ func createUser(ctx context.Context, u *User, meta *Meta, createdByAdmin bool, o
|
||||
}
|
||||
}
|
||||
|
||||
ctx, committer, err := db.TxContext(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer committer.Close()
|
||||
|
||||
isExist, err := IsUserExist(ctx, 0, u.Name)
|
||||
if err != nil {
|
||||
return err
|
||||
} else if isExist {
|
||||
return ErrUserAlreadyExist{u.Name}
|
||||
}
|
||||
|
||||
isExist, err = IsEmailUsed(ctx, u.Email)
|
||||
if err != nil {
|
||||
return err
|
||||
} else if isExist {
|
||||
return ErrEmailAlreadyUsed{
|
||||
Email: u.Email,
|
||||
return db.WithTx(ctx, func(ctx context.Context) error {
|
||||
isExist, err := IsUserExist(ctx, 0, u.Name)
|
||||
if err != nil {
|
||||
return err
|
||||
} else if isExist {
|
||||
return ErrUserAlreadyExist{u.Name}
|
||||
}
|
||||
}
|
||||
|
||||
// prepare for database
|
||||
isExist, err = IsEmailUsed(ctx, u.Email)
|
||||
if err != nil {
|
||||
return err
|
||||
} else if isExist {
|
||||
return ErrEmailAlreadyUsed{
|
||||
Email: u.Email,
|
||||
}
|
||||
}
|
||||
|
||||
u.LowerName = strings.ToLower(u.Name)
|
||||
u.AvatarEmail = u.Email
|
||||
if u.Rands, err = GetUserSalt(); err != nil {
|
||||
return err
|
||||
}
|
||||
if u.Passwd != "" {
|
||||
if err = u.SetPassword(u.Passwd); err != nil {
|
||||
// prepare for database
|
||||
|
||||
u.LowerName = strings.ToLower(u.Name)
|
||||
u.AvatarEmail = u.Email
|
||||
if u.Rands, err = GetUserSalt(); err != nil {
|
||||
return err
|
||||
}
|
||||
} else {
|
||||
u.Salt = ""
|
||||
u.PasswdHashAlgo = ""
|
||||
}
|
||||
if u.Passwd != "" {
|
||||
if err = u.SetPassword(u.Passwd); err != nil {
|
||||
return err
|
||||
}
|
||||
} else {
|
||||
u.Salt = ""
|
||||
u.PasswdHashAlgo = ""
|
||||
}
|
||||
|
||||
// save changes to database
|
||||
// save changes to database
|
||||
|
||||
if err = DeleteUserRedirect(ctx, u.Name); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if u.CreatedUnix == 0 {
|
||||
// Caller expects auto-time for creation & update timestamps.
|
||||
err = db.Insert(ctx, u)
|
||||
} else {
|
||||
// Caller sets the timestamps themselves. They are responsible for ensuring
|
||||
// both `CreatedUnix` and `UpdatedUnix` are set appropriately.
|
||||
_, err = db.GetEngine(ctx).NoAutoTime().Insert(u)
|
||||
}
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if setting.RecordUserSignupMetadata {
|
||||
// insert initial IP and UserAgent
|
||||
if err = SetUserSetting(ctx, u.ID, SignupIP, meta.InitialIP); err != nil {
|
||||
if err = DeleteUserRedirect(ctx, u.Name); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// trim user agent string to a reasonable length, if necessary
|
||||
userAgent := strings.TrimSpace(meta.InitialUserAgent)
|
||||
if len(userAgent) > 255 {
|
||||
userAgent = userAgent[:255]
|
||||
if u.CreatedUnix == 0 {
|
||||
// Caller expects auto-time for creation & update timestamps.
|
||||
err = db.Insert(ctx, u)
|
||||
} else {
|
||||
// Caller sets the timestamps themselves. They are responsible for ensuring
|
||||
// both `CreatedUnix` and `UpdatedUnix` are set appropriately.
|
||||
_, err = db.GetEngine(ctx).NoAutoTime().Insert(u)
|
||||
}
|
||||
if err = SetUserSetting(ctx, u.ID, SignupUserAgent, userAgent); err != nil {
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
// insert email address
|
||||
if err := db.Insert(ctx, &EmailAddress{
|
||||
UID: u.ID,
|
||||
Email: u.Email,
|
||||
LowerEmail: strings.ToLower(u.Email),
|
||||
IsActivated: u.IsActive,
|
||||
IsPrimary: true,
|
||||
}); err != nil {
|
||||
return err
|
||||
}
|
||||
if setting.RecordUserSignupMetadata {
|
||||
// insert initial IP and UserAgent
|
||||
if err = SetUserSetting(ctx, u.ID, SignupIP, meta.InitialIP); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return committer.Commit()
|
||||
// trim user agent string to a reasonable length, if necessary
|
||||
userAgent := strings.TrimSpace(meta.InitialUserAgent)
|
||||
if len(userAgent) > 255 {
|
||||
userAgent = userAgent[:255]
|
||||
}
|
||||
if err = SetUserSetting(ctx, u.ID, SignupUserAgent, userAgent); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
// insert email address
|
||||
return db.Insert(ctx, &EmailAddress{
|
||||
UID: u.ID,
|
||||
Email: u.Email,
|
||||
LowerEmail: strings.ToLower(u.Email),
|
||||
IsActivated: u.IsActive,
|
||||
IsPrimary: true,
|
||||
})
|
||||
})
|
||||
}
|
||||
|
||||
// ErrDeleteLastAdminUser represents a "DeleteLastAdminUser" kind of error.
|
||||
|
@ -261,74 +261,67 @@ func GetRefEndNamesAndURLs(issues []*issues_model.Issue, repoLink string) (map[i
|
||||
|
||||
// deleteIssue deletes the issue
|
||||
func deleteIssue(ctx context.Context, issue *issues_model.Issue) ([]string, error) {
|
||||
ctx, committer, err := db.TxContext(ctx)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer committer.Close()
|
||||
|
||||
if _, err := db.GetEngine(ctx).ID(issue.ID).NoAutoCondition().Delete(issue); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// update the total issue numbers
|
||||
if err := repo_model.UpdateRepoIssueNumbers(ctx, issue.RepoID, issue.IsPull, false); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
// if the issue is closed, update the closed issue numbers
|
||||
if issue.IsClosed {
|
||||
if err := repo_model.UpdateRepoIssueNumbers(ctx, issue.RepoID, issue.IsPull, true); err != nil {
|
||||
return db.WithTx2(ctx, func(ctx context.Context) ([]string, error) {
|
||||
if _, err := db.GetEngine(ctx).ID(issue.ID).NoAutoCondition().Delete(issue); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
if err := issues_model.UpdateMilestoneCounters(ctx, issue.MilestoneID); err != nil {
|
||||
return nil, fmt.Errorf("error updating counters for milestone id %d: %w",
|
||||
issue.MilestoneID, err)
|
||||
}
|
||||
// update the total issue numbers
|
||||
if err := repo_model.UpdateRepoIssueNumbers(ctx, issue.RepoID, issue.IsPull, false); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
// if the issue is closed, update the closed issue numbers
|
||||
if issue.IsClosed {
|
||||
if err := repo_model.UpdateRepoIssueNumbers(ctx, issue.RepoID, issue.IsPull, true); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
if err := activities_model.DeleteIssueActions(ctx, issue.RepoID, issue.ID, issue.Index); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if err := issues_model.UpdateMilestoneCounters(ctx, issue.MilestoneID); err != nil {
|
||||
return nil, fmt.Errorf("error updating counters for milestone id %d: %w",
|
||||
issue.MilestoneID, err)
|
||||
}
|
||||
|
||||
// find attachments related to this issue and remove them
|
||||
if err := issue.LoadAttachments(ctx); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if err := activities_model.DeleteIssueActions(ctx, issue.RepoID, issue.ID, issue.Index); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
var attachmentPaths []string
|
||||
for i := range issue.Attachments {
|
||||
attachmentPaths = append(attachmentPaths, issue.Attachments[i].RelativePath())
|
||||
}
|
||||
// find attachments related to this issue and remove them
|
||||
if err := issue.LoadAttachments(ctx); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// delete all database data still assigned to this issue
|
||||
if err := db.DeleteBeans(ctx,
|
||||
&issues_model.ContentHistory{IssueID: issue.ID},
|
||||
&issues_model.Comment{IssueID: issue.ID},
|
||||
&issues_model.IssueLabel{IssueID: issue.ID},
|
||||
&issues_model.IssueDependency{IssueID: issue.ID},
|
||||
&issues_model.IssueAssignees{IssueID: issue.ID},
|
||||
&issues_model.IssueUser{IssueID: issue.ID},
|
||||
&activities_model.Notification{IssueID: issue.ID},
|
||||
&issues_model.Reaction{IssueID: issue.ID},
|
||||
&issues_model.IssueWatch{IssueID: issue.ID},
|
||||
&issues_model.Stopwatch{IssueID: issue.ID},
|
||||
&issues_model.TrackedTime{IssueID: issue.ID},
|
||||
&project_model.ProjectIssue{IssueID: issue.ID},
|
||||
&repo_model.Attachment{IssueID: issue.ID},
|
||||
&issues_model.PullRequest{IssueID: issue.ID},
|
||||
&issues_model.Comment{RefIssueID: issue.ID},
|
||||
&issues_model.IssueDependency{DependencyID: issue.ID},
|
||||
&issues_model.Comment{DependentIssueID: issue.ID},
|
||||
&issues_model.IssuePin{IssueID: issue.ID},
|
||||
); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
var attachmentPaths []string
|
||||
for i := range issue.Attachments {
|
||||
attachmentPaths = append(attachmentPaths, issue.Attachments[i].RelativePath())
|
||||
}
|
||||
|
||||
if err := committer.Commit(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return attachmentPaths, nil
|
||||
// delete all database data still assigned to this issue
|
||||
if err := db.DeleteBeans(ctx,
|
||||
&issues_model.ContentHistory{IssueID: issue.ID},
|
||||
&issues_model.Comment{IssueID: issue.ID},
|
||||
&issues_model.IssueLabel{IssueID: issue.ID},
|
||||
&issues_model.IssueDependency{IssueID: issue.ID},
|
||||
&issues_model.IssueAssignees{IssueID: issue.ID},
|
||||
&issues_model.IssueUser{IssueID: issue.ID},
|
||||
&activities_model.Notification{IssueID: issue.ID},
|
||||
&issues_model.Reaction{IssueID: issue.ID},
|
||||
&issues_model.IssueWatch{IssueID: issue.ID},
|
||||
&issues_model.Stopwatch{IssueID: issue.ID},
|
||||
&issues_model.TrackedTime{IssueID: issue.ID},
|
||||
&project_model.ProjectIssue{IssueID: issue.ID},
|
||||
&repo_model.Attachment{IssueID: issue.ID},
|
||||
&issues_model.PullRequest{IssueID: issue.ID},
|
||||
&issues_model.Comment{RefIssueID: issue.ID},
|
||||
&issues_model.IssueDependency{DependencyID: issue.ID},
|
||||
&issues_model.Comment{DependentIssueID: issue.ID},
|
||||
&issues_model.IssuePin{IssueID: issue.ID},
|
||||
); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return attachmentPaths, nil
|
||||
})
|
||||
}
|
||||
|
||||
// DeleteOrphanedIssues delete issues without a repo
|
||||
|
@ -52,39 +52,34 @@ func deleteOrganization(ctx context.Context, org *org_model.Organization) error
|
||||
|
||||
// DeleteOrganization completely and permanently deletes everything of organization.
|
||||
func DeleteOrganization(ctx context.Context, org *org_model.Organization, purge bool) error {
|
||||
ctx, committer, err := db.TxContext(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer committer.Close()
|
||||
|
||||
if purge {
|
||||
err := repo_service.DeleteOwnerRepositoriesDirectly(ctx, org.AsUser())
|
||||
if err != nil {
|
||||
return err
|
||||
if err := db.WithTx(ctx, func(ctx context.Context) error {
|
||||
if purge {
|
||||
err := repo_service.DeleteOwnerRepositoriesDirectly(ctx, org.AsUser())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Check ownership of repository.
|
||||
count, err := repo_model.CountRepositories(ctx, repo_model.CountRepositoryOptions{OwnerID: org.ID})
|
||||
if err != nil {
|
||||
return fmt.Errorf("GetRepositoryCount: %w", err)
|
||||
} else if count > 0 {
|
||||
return repo_model.ErrUserOwnRepos{UID: org.ID}
|
||||
}
|
||||
// Check ownership of repository.
|
||||
count, err := repo_model.CountRepositories(ctx, repo_model.CountRepositoryOptions{OwnerID: org.ID})
|
||||
if err != nil {
|
||||
return fmt.Errorf("GetRepositoryCount: %w", err)
|
||||
} else if count > 0 {
|
||||
return repo_model.ErrUserOwnRepos{UID: org.ID}
|
||||
}
|
||||
|
||||
// Check ownership of packages.
|
||||
if ownsPackages, err := packages_model.HasOwnerPackages(ctx, org.ID); err != nil {
|
||||
return fmt.Errorf("HasOwnerPackages: %w", err)
|
||||
} else if ownsPackages {
|
||||
return packages_model.ErrUserOwnPackages{UID: org.ID}
|
||||
}
|
||||
// Check ownership of packages.
|
||||
if ownsPackages, err := packages_model.HasOwnerPackages(ctx, org.ID); err != nil {
|
||||
return fmt.Errorf("HasOwnerPackages: %w", err)
|
||||
} else if ownsPackages {
|
||||
return packages_model.ErrUserOwnPackages{UID: org.ID}
|
||||
}
|
||||
|
||||
if err := deleteOrganization(ctx, org); err != nil {
|
||||
return fmt.Errorf("DeleteOrganization: %w", err)
|
||||
}
|
||||
|
||||
if err := committer.Commit(); err != nil {
|
||||
if err := deleteOrganization(ctx, org); err != nil {
|
||||
return fmt.Errorf("DeleteOrganization: %w", err)
|
||||
}
|
||||
return nil
|
||||
}); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
|
@ -47,57 +47,52 @@ func RemoveOrgUser(ctx context.Context, org *organization.Organization, user *us
|
||||
}
|
||||
}
|
||||
|
||||
ctx, committer, err := db.TxContext(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer committer.Close()
|
||||
return db.WithTx(ctx, func(ctx context.Context) error {
|
||||
if _, err := db.DeleteByID[organization.OrgUser](ctx, ou.ID); err != nil {
|
||||
return err
|
||||
} else if _, err = db.Exec(ctx, "UPDATE `user` SET num_members=num_members-1 WHERE id=?", org.ID); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if _, err := db.DeleteByID[organization.OrgUser](ctx, ou.ID); err != nil {
|
||||
return err
|
||||
} else if _, err = db.Exec(ctx, "UPDATE `user` SET num_members=num_members-1 WHERE id=?", org.ID); err != nil {
|
||||
return err
|
||||
}
|
||||
// Delete all repository accesses and unwatch them.
|
||||
env, err := repo_model.AccessibleReposEnv(ctx, org, user.ID)
|
||||
if err != nil {
|
||||
return fmt.Errorf("AccessibleReposEnv: %w", err)
|
||||
}
|
||||
repoIDs, err := env.RepoIDs(ctx)
|
||||
if err != nil {
|
||||
return fmt.Errorf("GetUserRepositories [%d]: %w", user.ID, err)
|
||||
}
|
||||
|
||||
// Delete all repository accesses and unwatch them.
|
||||
env, err := repo_model.AccessibleReposEnv(ctx, org, user.ID)
|
||||
if err != nil {
|
||||
return fmt.Errorf("AccessibleReposEnv: %w", err)
|
||||
}
|
||||
repoIDs, err := env.RepoIDs(ctx)
|
||||
if err != nil {
|
||||
return fmt.Errorf("GetUserRepositories [%d]: %w", user.ID, err)
|
||||
}
|
||||
for _, repoID := range repoIDs {
|
||||
repo, err := repo_model.GetRepositoryByID(ctx, repoID)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if err = repo_model.WatchRepo(ctx, user, repo, false); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
for _, repoID := range repoIDs {
|
||||
repo, err := repo_model.GetRepositoryByID(ctx, repoID)
|
||||
if len(repoIDs) > 0 {
|
||||
if _, err = db.GetEngine(ctx).
|
||||
Where("user_id = ?", user.ID).
|
||||
In("repo_id", repoIDs).
|
||||
Delete(new(access_model.Access)); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
// Delete member in their teams.
|
||||
teams, err := organization.GetUserOrgTeams(ctx, org.ID, user.ID)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if err = repo_model.WatchRepo(ctx, user, repo, false); err != nil {
|
||||
return err
|
||||
for _, t := range teams {
|
||||
if err = removeTeamMember(ctx, t, user); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if len(repoIDs) > 0 {
|
||||
if _, err = db.GetEngine(ctx).
|
||||
Where("user_id = ?", user.ID).
|
||||
In("repo_id", repoIDs).
|
||||
Delete(new(access_model.Access)); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
// Delete member in their teams.
|
||||
teams, err := organization.GetUserOrgTeams(ctx, org.ID, user.ID)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
for _, t := range teams {
|
||||
if err = removeTeamMember(ctx, t, user); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return committer.Commit()
|
||||
return nil
|
||||
})
|
||||
}
|
||||
|
@ -267,74 +267,69 @@ func UpdateRelease(ctx context.Context, doer *user_model.User, gitRepo *git.Repo
|
||||
}
|
||||
rel.LowerTagName = strings.ToLower(rel.TagName)
|
||||
|
||||
ctx, committer, err := db.TxContext(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer committer.Close()
|
||||
|
||||
oldRelease, err := repo_model.GetReleaseByID(ctx, rel.ID)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
isConvertedFromTag := oldRelease.IsTag && !rel.IsTag
|
||||
|
||||
if err = repo_model.UpdateRelease(ctx, rel); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if err = repo_model.AddReleaseAttachments(ctx, rel.ID, addAttachmentUUIDs); err != nil {
|
||||
return fmt.Errorf("AddReleaseAttachments: %w", err)
|
||||
}
|
||||
|
||||
deletedUUIDs := make(container.Set[string])
|
||||
if len(delAttachmentUUIDs) > 0 {
|
||||
// Check attachments
|
||||
attachments, err := repo_model.GetAttachmentsByUUIDs(ctx, delAttachmentUUIDs)
|
||||
if err != nil {
|
||||
return fmt.Errorf("GetAttachmentsByUUIDs [uuids: %v]: %w", delAttachmentUUIDs, err)
|
||||
if err := db.WithTx(ctx, func(ctx context.Context) error {
|
||||
if err = repo_model.UpdateRelease(ctx, rel); err != nil {
|
||||
return err
|
||||
}
|
||||
for _, attach := range attachments {
|
||||
if attach.ReleaseID != rel.ID {
|
||||
return util.NewPermissionDeniedErrorf("delete attachment of release permission denied")
|
||||
|
||||
if err = repo_model.AddReleaseAttachments(ctx, rel.ID, addAttachmentUUIDs); err != nil {
|
||||
return fmt.Errorf("AddReleaseAttachments: %w", err)
|
||||
}
|
||||
|
||||
deletedUUIDs := make(container.Set[string])
|
||||
if len(delAttachmentUUIDs) > 0 {
|
||||
// Check attachments
|
||||
attachments, err := repo_model.GetAttachmentsByUUIDs(ctx, delAttachmentUUIDs)
|
||||
if err != nil {
|
||||
return fmt.Errorf("GetAttachmentsByUUIDs [uuids: %v]: %w", delAttachmentUUIDs, err)
|
||||
}
|
||||
for _, attach := range attachments {
|
||||
if attach.ReleaseID != rel.ID {
|
||||
return util.NewPermissionDeniedErrorf("delete attachment of release permission denied")
|
||||
}
|
||||
deletedUUIDs.Add(attach.UUID)
|
||||
}
|
||||
deletedUUIDs.Add(attach.UUID)
|
||||
}
|
||||
|
||||
if _, err := repo_model.DeleteAttachments(ctx, attachments, true); err != nil {
|
||||
return fmt.Errorf("DeleteAttachments [uuids: %v]: %w", delAttachmentUUIDs, err)
|
||||
}
|
||||
}
|
||||
|
||||
if len(editAttachments) > 0 {
|
||||
updateAttachmentsList := make([]string, 0, len(editAttachments))
|
||||
for k := range editAttachments {
|
||||
updateAttachmentsList = append(updateAttachmentsList, k)
|
||||
}
|
||||
// Check attachments
|
||||
attachments, err := repo_model.GetAttachmentsByUUIDs(ctx, updateAttachmentsList)
|
||||
if err != nil {
|
||||
return fmt.Errorf("GetAttachmentsByUUIDs [uuids: %v]: %w", updateAttachmentsList, err)
|
||||
}
|
||||
for _, attach := range attachments {
|
||||
if attach.ReleaseID != rel.ID {
|
||||
return util.NewPermissionDeniedErrorf("update attachment of release permission denied")
|
||||
if _, err := repo_model.DeleteAttachments(ctx, attachments, true); err != nil {
|
||||
return fmt.Errorf("DeleteAttachments [uuids: %v]: %w", delAttachmentUUIDs, err)
|
||||
}
|
||||
}
|
||||
|
||||
for uuid, newName := range editAttachments {
|
||||
if !deletedUUIDs.Contains(uuid) {
|
||||
if err = repo_model.UpdateAttachmentByUUID(ctx, &repo_model.Attachment{
|
||||
UUID: uuid,
|
||||
Name: newName,
|
||||
}, "name"); err != nil {
|
||||
return err
|
||||
if len(editAttachments) > 0 {
|
||||
updateAttachmentsList := make([]string, 0, len(editAttachments))
|
||||
for k := range editAttachments {
|
||||
updateAttachmentsList = append(updateAttachmentsList, k)
|
||||
}
|
||||
// Check attachments
|
||||
attachments, err := repo_model.GetAttachmentsByUUIDs(ctx, updateAttachmentsList)
|
||||
if err != nil {
|
||||
return fmt.Errorf("GetAttachmentsByUUIDs [uuids: %v]: %w", updateAttachmentsList, err)
|
||||
}
|
||||
for _, attach := range attachments {
|
||||
if attach.ReleaseID != rel.ID {
|
||||
return util.NewPermissionDeniedErrorf("update attachment of release permission denied")
|
||||
}
|
||||
}
|
||||
|
||||
for uuid, newName := range editAttachments {
|
||||
if !deletedUUIDs.Contains(uuid) {
|
||||
if err = repo_model.UpdateAttachmentByUUID(ctx, &repo_model.Attachment{
|
||||
UUID: uuid,
|
||||
Name: newName,
|
||||
}, "name"); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if err := committer.Commit(); err != nil {
|
||||
return nil
|
||||
}); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
|
@ -173,92 +173,88 @@ func MigrateRepositoryGitData(ctx context.Context, u *user_model.User,
|
||||
}
|
||||
}
|
||||
|
||||
ctx, committer, err := db.TxContext(ctx)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer committer.Close()
|
||||
|
||||
if opts.Mirror {
|
||||
remoteAddress, err := util.SanitizeURL(opts.CloneAddr)
|
||||
if err != nil {
|
||||
return repo, err
|
||||
}
|
||||
mirrorModel := repo_model.Mirror{
|
||||
RepoID: repo.ID,
|
||||
Interval: setting.Mirror.DefaultInterval,
|
||||
EnablePrune: true,
|
||||
NextUpdateUnix: timeutil.TimeStampNow().AddDuration(setting.Mirror.DefaultInterval),
|
||||
LFS: opts.LFS,
|
||||
RemoteAddress: remoteAddress,
|
||||
}
|
||||
if opts.LFS {
|
||||
mirrorModel.LFSEndpoint = opts.LFSEndpoint
|
||||
}
|
||||
|
||||
if opts.MirrorInterval != "" {
|
||||
parsedInterval, err := time.ParseDuration(opts.MirrorInterval)
|
||||
return db.WithTx2(ctx, func(ctx context.Context) (*repo_model.Repository, error) {
|
||||
if opts.Mirror {
|
||||
remoteAddress, err := util.SanitizeURL(opts.CloneAddr)
|
||||
if err != nil {
|
||||
log.Error("Failed to set Interval: %v", err)
|
||||
return repo, err
|
||||
}
|
||||
if parsedInterval == 0 {
|
||||
mirrorModel.Interval = 0
|
||||
mirrorModel.NextUpdateUnix = 0
|
||||
} else if parsedInterval < setting.Mirror.MinInterval {
|
||||
err := fmt.Errorf("interval %s is set below Minimum Interval of %s", parsedInterval, setting.Mirror.MinInterval)
|
||||
log.Error("Interval: %s is too frequent", opts.MirrorInterval)
|
||||
return repo, err
|
||||
} else {
|
||||
mirrorModel.Interval = parsedInterval
|
||||
mirrorModel.NextUpdateUnix = timeutil.TimeStampNow().AddDuration(parsedInterval)
|
||||
mirrorModel := repo_model.Mirror{
|
||||
RepoID: repo.ID,
|
||||
Interval: setting.Mirror.DefaultInterval,
|
||||
EnablePrune: true,
|
||||
NextUpdateUnix: timeutil.TimeStampNow().AddDuration(setting.Mirror.DefaultInterval),
|
||||
LFS: opts.LFS,
|
||||
RemoteAddress: remoteAddress,
|
||||
}
|
||||
if opts.LFS {
|
||||
mirrorModel.LFSEndpoint = opts.LFSEndpoint
|
||||
}
|
||||
|
||||
if opts.MirrorInterval != "" {
|
||||
parsedInterval, err := time.ParseDuration(opts.MirrorInterval)
|
||||
if err != nil {
|
||||
log.Error("Failed to set Interval: %v", err)
|
||||
return repo, err
|
||||
}
|
||||
if parsedInterval == 0 {
|
||||
mirrorModel.Interval = 0
|
||||
mirrorModel.NextUpdateUnix = 0
|
||||
} else if parsedInterval < setting.Mirror.MinInterval {
|
||||
err := fmt.Errorf("interval %s is set below Minimum Interval of %s", parsedInterval, setting.Mirror.MinInterval)
|
||||
log.Error("Interval: %s is too frequent", opts.MirrorInterval)
|
||||
return repo, err
|
||||
} else {
|
||||
mirrorModel.Interval = parsedInterval
|
||||
mirrorModel.NextUpdateUnix = timeutil.TimeStampNow().AddDuration(parsedInterval)
|
||||
}
|
||||
}
|
||||
|
||||
if err = repo_model.InsertMirror(ctx, &mirrorModel); err != nil {
|
||||
return repo, fmt.Errorf("InsertOne: %w", err)
|
||||
}
|
||||
|
||||
repo.IsMirror = true
|
||||
if err = repo_model.UpdateRepositoryColsNoAutoTime(ctx, repo, "num_watches", "is_empty", "default_branch", "default_wiki_branch", "is_mirror"); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if err = repo_module.UpdateRepoSize(ctx, repo); err != nil {
|
||||
log.Error("Failed to update size for repository: %v", err)
|
||||
}
|
||||
|
||||
// this is necessary for sync local tags from remote
|
||||
configName := fmt.Sprintf("remote.%s.fetch", mirrorModel.GetRemoteName())
|
||||
if stdout, _, err := git.NewCommand("config").
|
||||
AddOptionValues("--add", configName, `+refs/tags/*:refs/tags/*`).
|
||||
RunStdString(ctx, &git.RunOpts{Dir: repoPath}); err != nil {
|
||||
log.Error("MigrateRepositoryGitData(git config --add <remote> +refs/tags/*:refs/tags/*) in %v: Stdout: %s\nError: %v", repo, stdout, err)
|
||||
return repo, fmt.Errorf("error in MigrateRepositoryGitData(git config --add <remote> +refs/tags/*:refs/tags/*): %w", err)
|
||||
}
|
||||
} else {
|
||||
if err = repo_module.UpdateRepoSize(ctx, repo); err != nil {
|
||||
log.Error("Failed to update size for repository: %v", err)
|
||||
}
|
||||
if repo, err = CleanUpMigrateInfo(ctx, repo); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
if err = repo_model.InsertMirror(ctx, &mirrorModel); err != nil {
|
||||
return repo, fmt.Errorf("InsertOne: %w", err)
|
||||
var enableRepoUnits []repo_model.RepoUnit
|
||||
if opts.Releases && !unit_model.TypeReleases.UnitGlobalDisabled() {
|
||||
enableRepoUnits = append(enableRepoUnits, repo_model.RepoUnit{RepoID: repo.ID, Type: unit_model.TypeReleases})
|
||||
}
|
||||
|
||||
repo.IsMirror = true
|
||||
if err = repo_model.UpdateRepositoryColsNoAutoTime(ctx, repo, "num_watches", "is_empty", "default_branch", "default_wiki_branch", "is_mirror"); err != nil {
|
||||
return nil, err
|
||||
if opts.Wiki && !unit_model.TypeWiki.UnitGlobalDisabled() {
|
||||
enableRepoUnits = append(enableRepoUnits, repo_model.RepoUnit{RepoID: repo.ID, Type: unit_model.TypeWiki})
|
||||
}
|
||||
|
||||
if err = repo_module.UpdateRepoSize(ctx, repo); err != nil {
|
||||
log.Error("Failed to update size for repository: %v", err)
|
||||
if len(enableRepoUnits) > 0 {
|
||||
err = UpdateRepositoryUnits(ctx, repo, enableRepoUnits, nil)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
// this is necessary for sync local tags from remote
|
||||
configName := fmt.Sprintf("remote.%s.fetch", mirrorModel.GetRemoteName())
|
||||
if stdout, _, err := git.NewCommand("config").
|
||||
AddOptionValues("--add", configName, `+refs/tags/*:refs/tags/*`).
|
||||
RunStdString(ctx, &git.RunOpts{Dir: repoPath}); err != nil {
|
||||
log.Error("MigrateRepositoryGitData(git config --add <remote> +refs/tags/*:refs/tags/*) in %v: Stdout: %s\nError: %v", repo, stdout, err)
|
||||
return repo, fmt.Errorf("error in MigrateRepositoryGitData(git config --add <remote> +refs/tags/*:refs/tags/*): %w", err)
|
||||
}
|
||||
} else {
|
||||
if err = repo_module.UpdateRepoSize(ctx, repo); err != nil {
|
||||
log.Error("Failed to update size for repository: %v", err)
|
||||
}
|
||||
if repo, err = CleanUpMigrateInfo(ctx, repo); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
var enableRepoUnits []repo_model.RepoUnit
|
||||
if opts.Releases && !unit_model.TypeReleases.UnitGlobalDisabled() {
|
||||
enableRepoUnits = append(enableRepoUnits, repo_model.RepoUnit{RepoID: repo.ID, Type: unit_model.TypeReleases})
|
||||
}
|
||||
if opts.Wiki && !unit_model.TypeWiki.UnitGlobalDisabled() {
|
||||
enableRepoUnits = append(enableRepoUnits, repo_model.RepoUnit{RepoID: repo.ID, Type: unit_model.TypeWiki})
|
||||
}
|
||||
if len(enableRepoUnits) > 0 {
|
||||
err = UpdateRepositoryUnits(ctx, repo, enableRepoUnits, nil)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
return repo, committer.Commit()
|
||||
return repo, nil
|
||||
})
|
||||
}
|
||||
|
||||
// CleanUpMigrateInfo finishes migrating repository and/or wiki with things that don't need to be done for mirrors.
|
||||
|
@ -210,65 +210,59 @@ func DeleteUser(ctx context.Context, u *user_model.User, purge bool) error {
|
||||
}
|
||||
}
|
||||
|
||||
ctx, committer, err := db.TxContext(ctx)
|
||||
if err != nil {
|
||||
if err := db.WithTx(ctx, func(ctx context.Context) error {
|
||||
// Note: A user owns any repository or belongs to any organization
|
||||
// cannot perform delete operation. This causes a race with the purge above
|
||||
// however consistency requires that we ensure that this is the case
|
||||
|
||||
// Check ownership of repository.
|
||||
count, err := repo_model.CountRepositories(ctx, repo_model.CountRepositoryOptions{OwnerID: u.ID})
|
||||
if err != nil {
|
||||
return fmt.Errorf("GetRepositoryCount: %w", err)
|
||||
} else if count > 0 {
|
||||
return repo_model.ErrUserOwnRepos{UID: u.ID}
|
||||
}
|
||||
|
||||
// Check membership of organization.
|
||||
count, err = organization.GetOrganizationCount(ctx, u)
|
||||
if err != nil {
|
||||
return fmt.Errorf("GetOrganizationCount: %w", err)
|
||||
} else if count > 0 {
|
||||
return organization.ErrUserHasOrgs{UID: u.ID}
|
||||
}
|
||||
|
||||
// Check ownership of packages.
|
||||
if ownsPackages, err := packages_model.HasOwnerPackages(ctx, u.ID); err != nil {
|
||||
return fmt.Errorf("HasOwnerPackages: %w", err)
|
||||
} else if ownsPackages {
|
||||
return packages_model.ErrUserOwnPackages{UID: u.ID}
|
||||
}
|
||||
|
||||
if err := deleteUser(ctx, u, purge); err != nil {
|
||||
return fmt.Errorf("DeleteUser: %w", err)
|
||||
}
|
||||
return nil
|
||||
}); err != nil {
|
||||
return err
|
||||
}
|
||||
defer committer.Close()
|
||||
|
||||
// Note: A user owns any repository or belongs to any organization
|
||||
// cannot perform delete operation. This causes a race with the purge above
|
||||
// however consistency requires that we ensure that this is the case
|
||||
|
||||
// Check ownership of repository.
|
||||
count, err := repo_model.CountRepositories(ctx, repo_model.CountRepositoryOptions{OwnerID: u.ID})
|
||||
if err != nil {
|
||||
return fmt.Errorf("GetRepositoryCount: %w", err)
|
||||
} else if count > 0 {
|
||||
return repo_model.ErrUserOwnRepos{UID: u.ID}
|
||||
}
|
||||
|
||||
// Check membership of organization.
|
||||
count, err = organization.GetOrganizationCount(ctx, u)
|
||||
if err != nil {
|
||||
return fmt.Errorf("GetOrganizationCount: %w", err)
|
||||
} else if count > 0 {
|
||||
return organization.ErrUserHasOrgs{UID: u.ID}
|
||||
}
|
||||
|
||||
// Check ownership of packages.
|
||||
if ownsPackages, err := packages_model.HasOwnerPackages(ctx, u.ID); err != nil {
|
||||
return fmt.Errorf("HasOwnerPackages: %w", err)
|
||||
} else if ownsPackages {
|
||||
return packages_model.ErrUserOwnPackages{UID: u.ID}
|
||||
}
|
||||
|
||||
if err := deleteUser(ctx, u, purge); err != nil {
|
||||
return fmt.Errorf("DeleteUser: %w", err)
|
||||
}
|
||||
|
||||
if err := committer.Commit(); err != nil {
|
||||
if err := asymkey_service.RewriteAllPublicKeys(ctx); err != nil {
|
||||
return err
|
||||
}
|
||||
_ = committer.Close()
|
||||
|
||||
if err = asymkey_service.RewriteAllPublicKeys(ctx); err != nil {
|
||||
return err
|
||||
}
|
||||
if err = asymkey_service.RewriteAllPrincipalKeys(ctx); err != nil {
|
||||
if err := asymkey_service.RewriteAllPrincipalKeys(ctx); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Note: There are something just cannot be roll back, so just keep error logs of those operations.
|
||||
path := user_model.UserPath(u.Name)
|
||||
if err = util.RemoveAll(path); err != nil {
|
||||
if err := util.RemoveAll(path); err != nil {
|
||||
err = fmt.Errorf("failed to RemoveAll %s: %w", path, err)
|
||||
_ = system_model.CreateNotice(ctx, system_model.NoticeTask, fmt.Sprintf("delete user '%s': %v", u.Name, err))
|
||||
}
|
||||
|
||||
if u.Avatar != "" {
|
||||
avatarPath := u.CustomAvatarRelativePath()
|
||||
if err = storage.Avatars.Delete(avatarPath); err != nil {
|
||||
if err := storage.Avatars.Delete(avatarPath); err != nil {
|
||||
err = fmt.Errorf("failed to remove %s: %w", avatarPath, err)
|
||||
_ = system_model.CreateNotice(ctx, system_model.NoticeTask, fmt.Sprintf("delete user '%s': %v", u.Name, err))
|
||||
}
|
||||
|
Loading…
x
Reference in New Issue
Block a user