0
0
mirror of https://github.com/go-gitea/gitea.git synced 2026-05-14 21:47:38 +02:00

Merge branch 'main' into main

This commit is contained in:
Karthik Bhandary 2026-05-02 09:32:25 +05:30 committed by GitHub
commit 0c8a150a5d
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
94 changed files with 1676 additions and 1264 deletions

View File

@ -102,9 +102,10 @@ jobs:
runs-on: ubuntu-latest
services:
elasticsearch:
image: elasticsearch:7.5.0
image: docker.elastic.co/elasticsearch/elasticsearch:8.19.14
env:
discovery.type: single-node
xpack.security.enabled: false
ports:
- "9200:9200"
meilisearch:
@ -180,9 +181,10 @@ jobs:
options: >-
--mount type=tmpfs,destination=/bitnami/mysql/data
elasticsearch:
image: elasticsearch:7.5.0
image: docker.elastic.co/elasticsearch/elasticsearch:8.19.14
env:
discovery.type: single-node
xpack.security.enabled: false
ports:
- "9200:9200"
smtpimap:

View File

@ -164,7 +164,7 @@ TEST_PGSQL_PASSWORD ?= postgres
TEST_PGSQL_SCHEMA ?= gtestschema
TEST_MINIO_ENDPOINT ?= minio:9000
TEST_MSSQL_HOST ?= mssql:1433
TEST_MSSQL_DBNAME ?= gitea
TEST_MSSQL_DBNAME ?= testgitea
TEST_MSSQL_USERNAME ?= sa
TEST_MSSQL_PASSWORD ?= MwantsaSecurePassword1

View File

@ -1004,16 +1004,6 @@
"path": "github.com/olekukonko/tablewriter/LICENSE.md",
"licenseText": "Copyright (C) 2014 by Oleku Konko\n\nPermission is hereby granted, free of charge, to any person obtaining a copy\nof this software and associated documentation files (the \"Software\"), to deal\nin the Software without restriction, including without limitation the rights\nto use, copy, modify, merge, publish, distribute, sublicense, and/or sell\ncopies of the Software, and to permit persons to whom the Software is\nfurnished to do so, subject to the following conditions:\n\nThe above copyright notice and this permission notice shall be included in\nall copies or substantial portions of the Software.\n\nTHE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\nIMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\nFITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\nAUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\nLIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\nOUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN\nTHE SOFTWARE.\n"
},
{
"name": "github.com/olivere/elastic/v7",
"path": "github.com/olivere/elastic/v7/LICENSE",
"licenseText": "The MIT License (MIT)\nCopyright © 2012-2015 Oliver Eilhard\n\nPermission is hereby granted, free of charge, to any person obtaining a copy\nof this software and associated documentation files (the “Software”), to deal\nin the Software without restriction, including without limitation the rights\nto use, copy, modify, merge, publish, distribute, sublicense, and/or sell\ncopies of the Software, and to permit persons to whom the Software is\nfurnished to do so, subject to the following conditions:\n\nThe above copyright notice and this permission notice shall be included\nin all copies or substantial portions of the Software.\n\nTHE SOFTWARE IS PROVIDED “AS IS”, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\nIMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\nFITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\nAUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\nLIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING\nFROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS\nIN THE SOFTWARE.\n"
},
{
"name": "github.com/olivere/elastic/v7/uritemplates",
"path": "github.com/olivere/elastic/v7/uritemplates/LICENSE",
"licenseText": "Copyright (c) 2013 Joshua Tacoma\n\nPermission is hereby granted, free of charge, to any person obtaining a copy of\nthis software and associated documentation files (the \"Software\"), to deal in\nthe Software without restriction, including without limitation the rights to\nuse, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of\nthe Software, and to permit persons to whom the Software is furnished to do so,\nsubject to the following conditions:\n\nThe above copyright notice and this permission notice shall be included in all\ncopies or substantial portions of the Software.\n\nTHE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\nIMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS\nFOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR\nCOPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER\nIN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN\nCONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.\n"
},
{
"name": "github.com/opencontainers/go-digest",
"path": "github.com/opencontainers/go-digest/LICENSE",

View File

@ -203,8 +203,8 @@ func runDump(ctx context.Context, cmd *cli.Command) error {
}
}()
targetDBType := cmd.String("database")
if len(targetDBType) > 0 && targetDBType != setting.Database.Type.String() {
targetDBType := setting.DatabaseType(cmd.String("database"))
if targetDBType != "" && targetDBType != setting.Database.Type {
log.Info("Dumping database %s => %s...", setting.Database.Type, targetDBType)
} else {
log.Info("Dumping database...")

View File

@ -1524,7 +1524,7 @@ LEVEL = Info
;; Issue Indexer settings
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
;;
;; Issue indexer type, currently support: bleve, db, elasticsearch or meilisearch default is bleve
;; Issue indexer type, currently support: bleve, db, elasticsearch (also compatible with OpenSearch) or meilisearch default is bleve
;ISSUE_INDEXER_TYPE = bleve
;;
;; Issue indexer storage path, available when ISSUE_INDEXER_TYPE is bleve
@ -1551,7 +1551,7 @@ LEVEL = Info
;; If empty then it defaults to `sources` only, as if you'd like to disable fully please see REPO_INDEXER_ENABLED.
;REPO_INDEXER_REPO_TYPES = sources,forks,mirrors,templates
;;
;; Code search engine type, could be `bleve` or `elasticsearch`.
;; Code search engine type, could be `bleve` or `elasticsearch` (also compatible with OpenSearch).
;REPO_INDEXER_TYPE = bleve
;;
;; Index file used for code search. available when `REPO_INDEXER_TYPE` is bleve

3
go.mod
View File

@ -87,7 +87,6 @@ require (
github.com/msteinert/pam/v2 v2.1.0
github.com/nektos/act v0.2.63
github.com/niklasfasching/go-org v1.9.1
github.com/olivere/elastic/v7 v7.0.32
github.com/opencontainers/go-digest v1.0.0
github.com/opencontainers/image-spec v1.1.1
github.com/pquerna/otp v1.5.0
@ -222,7 +221,7 @@ require (
github.com/klauspost/crc32 v1.3.0 // indirect
github.com/klauspost/pgzip v1.2.6 // indirect
github.com/libdns/libdns v1.1.1 // indirect
github.com/mailru/easyjson v0.9.2 // indirect
github.com/mailru/easyjson v0.7.7 // indirect
github.com/markbates/going v1.0.3 // indirect
github.com/mattn/go-colorable v0.1.14 // indirect
github.com/mattn/go-runewidth v0.0.21 // indirect

10
go.sum
View File

@ -267,8 +267,6 @@ github.com/fatih/color v1.19.0 h1:Zp3PiM21/9Ld6FzSKyL5c/BULoe/ONr9KlbYVOfG8+w=
github.com/fatih/color v1.19.0/go.mod h1:zNk67I0ZUT1bEGsSGyCZYZNrHuTkJJB+r6Q9VuMi0LE=
github.com/felixge/fgprof v0.9.5 h1:8+vR6yu2vvSKn08urWyEuxx75NWPEvybbkBirEpsbVY=
github.com/felixge/fgprof v0.9.5/go.mod h1:yKl+ERSa++RYOs32d8K6WEXCB4uXdLls4ZaZPpayhMM=
github.com/fortytw2/leaktest v1.3.0 h1:u8491cBMTQ8ft8aeV+adlcytMZylmA5nnwwkRZjI8vw=
github.com/fortytw2/leaktest v1.3.0/go.mod h1:jDsjWgpAGjm2CA7WthBh/CdZYEPF31XHquHwclZch5g=
github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo=
github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4IgpuI1SZQ=
github.com/fsnotify/fsnotify v1.9.0 h1:2Ml+OJNzbYCTzsxtv8vKSFD9PbJjmhYF14k/jKC7S9k=
@ -507,9 +505,8 @@ github.com/lib/pq v1.12.3/go.mod h1:/p+8NSbOcwzAEI7wiMXFlgydTwcgTr3OSKMsD2BitpA=
github.com/libdns/libdns v1.1.1 h1:wPrHrXILoSHKWJKGd0EiAVmiJbFShguILTg9leS/P/U=
github.com/libdns/libdns v1.1.1/go.mod h1:4Bj9+5CQiNMVGf87wjX4CY3HQJypUHRuLvlsfsZqLWQ=
github.com/magiconair/properties v1.8.0/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ=
github.com/mailru/easyjson v0.7.7 h1:UGYAvKxe3sBsEDzO8ZeWOSlIQfWFlxbzLZe7hwFURr0=
github.com/mailru/easyjson v0.7.7/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJJLY9Nlc=
github.com/mailru/easyjson v0.9.2 h1:dX8U45hQsZpxd80nLvDGihsQ/OxlvTkVUXH2r/8cb2M=
github.com/mailru/easyjson v0.9.2/go.mod h1:1+xMtQp2MRNVL/V1bOzuP3aP8VNwRW55fQUto+XFtTU=
github.com/markbates/going v1.0.3 h1:mY45T5TvW+Xz5A6jY7lf4+NLg9D8+iuStIHyR7M8qsE=
github.com/markbates/going v1.0.3/go.mod h1:fQiT6v6yQar9UD6bd/D4Z5Afbk9J6BBVBtLiyY4gp2o=
github.com/markbates/goth v1.82.0 h1:8j/c34AjBSTNzO7zTsOyP5IYCQCMBTRBHAbBt/PI0bQ=
@ -585,8 +582,6 @@ github.com/olekukonko/ll v0.1.8 h1:ysHCJRGHYKzmBSdz9w5AySztx7lG8SQY+naTGYUbsz8=
github.com/olekukonko/ll v0.1.8/go.mod h1:RPRC6UcscfFZgjo1nulkfMH5IM0QAYim0LfnMvUuozw=
github.com/olekukonko/tablewriter v1.1.4 h1:ORUMI3dXbMnRlRggJX3+q7OzQFDdvgbN9nVWj1drm6I=
github.com/olekukonko/tablewriter v1.1.4/go.mod h1:+kedxuyTtgoZLwif3P1Em4hARJs+mVnzKxmsCL/C5RY=
github.com/olivere/elastic/v7 v7.0.32 h1:R7CXvbu8Eq+WlsLgxmKVKPox0oOwAE/2T9Si5BnvK6E=
github.com/olivere/elastic/v7 v7.0.32/go.mod h1:c7PVmLe3Fxq77PIfY/bZmxY/TAamBhCzZ8xDOE09a9k=
github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
github.com/onsi/ginkgo v1.7.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
github.com/onsi/ginkgo v1.12.1/go.mod h1:zj2OWP4+oCPe1qIXoGWkgMRwljMUYCdkwsT2108oapk=
@ -667,9 +662,8 @@ github.com/sirupsen/logrus v1.9.4 h1:TsZE7l11zFCLZnZ+teH4Umoq5BhEIfIzfRDZ1Uzql2w
github.com/sirupsen/logrus v1.9.4/go.mod h1:ftWc9WdOfJ0a92nsE2jF5u5ZwH8Bv2zdeOC42RjbV2g=
github.com/skeema/knownhosts v1.3.2 h1:EDL9mgf4NzwMXCTfaxSD/o/a5fxDw/xL9nkU28JjdBg=
github.com/skeema/knownhosts v1.3.2/go.mod h1:bEg3iQAuw+jyiw+484wwFJoKSLwcfd7fqRy+N0QTiow=
github.com/smartystreets/assertions v0.0.0-20190116191733-b6c0e53d7304 h1:Jpy1PXuP99tXNrhbq2BaPz9B+jNAvH1JPQQpG/9GCXY=
github.com/smartystreets/assertions v0.0.0-20190116191733-b6c0e53d7304/go.mod h1:OnSkiWE9lh6wB0YB77sQom3nweQdgAjqCqsofrRNTgc=
github.com/smartystreets/assertions v1.1.1 h1:T/YLemO5Yp7KPzS+lVtu+WsHn8yoSwTfItdAd1r3cck=
github.com/smartystreets/assertions v1.1.1/go.mod h1:tcbTF8ujkAEcZ8TElKY+i30BzYlVhC/LOxJk7iOWnoo=
github.com/smartystreets/goconvey v0.0.0-20181108003508-044398e4856c/go.mod h1:XDJAKZRPZ1CvBcN2aX5YOUTYGHki24fSF0Iv48Ibg0s=
github.com/smartystreets/goconvey v0.0.0-20190731233626-505e41936337 h1:WN9BUFbdyOsSH/XohnWpXOlq9NBD5sGAB2FciQMUEe8=
github.com/smartystreets/goconvey v0.0.0-20190731233626-505e41936337/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9QV7WQ/tjFTllLA=

173
models/db/conn.go Normal file
View File

@ -0,0 +1,173 @@
// Copyright 2026 The Gitea Authors. All rights reserved.
// SPDX-License-Identifier: MIT
package db
import (
"errors"
"fmt"
"net"
"net/url"
"os"
"path/filepath"
"strings"
"code.gitea.io/gitea/modules/setting"
"code.gitea.io/gitea/modules/util"
)
type ConnOptions struct {
Type setting.DatabaseType
Host string
Database string
User string
Passwd string
Schema string
SSLMode string
SQLitePath string
SQLiteBusyTimeout int
SQLiteJournalMode string
}
type SQLiteConnStrOptions struct {
FilePath string
BusyTimeout int
JournalMode string
}
func GlobalConnOptions() ConnOptions {
return ConnOptions{
Type: setting.Database.Type,
Host: setting.Database.Host,
Database: setting.Database.Name,
User: setting.Database.User,
Passwd: setting.Database.Passwd,
Schema: setting.Database.Schema,
SSLMode: setting.Database.SSLMode,
SQLitePath: setting.Database.Path,
SQLiteBusyTimeout: setting.Database.SQLiteBusyTimeout,
SQLiteJournalMode: setting.Database.SQLiteJournalMode,
}
}
const sqlDriverPostgresSchema = "postgresschema"
var makeSQLiteConnStr = func(opts SQLiteConnStrOptions) (string, string, error) {
return "", "", errors.New(`this Gitea binary was not built with SQLite3 support, get an official release or rebuild with: -tags sqlite,sqlite_unlock_notify`)
}
func ConnStrDefaultDatabase(opts ConnOptions) (string, string, error) {
opts.Database, opts.Schema = "", ""
return ConnStr(opts)
}
func ConnStr(opts ConnOptions) (string, string, error) {
switch {
case opts.Type.IsMySQL():
// use unix socket or tcp socket
connType := util.Iif(strings.HasPrefix(opts.Host, "/"), "unix", "tcp")
// allow (Postgres-inspired) default value to work in MySQL
tls := util.Iif(opts.SSLMode == "disable", "false", opts.SSLMode)
// in case the database name is a partial connection string which contains "?" parameters
paramSep := util.Iif(strings.Contains(opts.Database, "?"), "&", "?")
connStr := fmt.Sprintf("%s:%s@%s(%s)/%s%sparseTime=true&tls=%s", opts.User, opts.Passwd, connType, opts.Host, opts.Database, paramSep, tls)
return "mysql", connStr, nil
case opts.Type.IsPostgreSQL():
connStr := makePgSQLConnStr(opts.Host, opts.User, opts.Passwd, opts.Database, opts.SSLMode)
driver := util.Iif(opts.Schema == "", "postgres", sqlDriverPostgresSchema)
registerPostgresSchemaDriver()
return driver, connStr, nil
case opts.Type.IsMSSQL():
host, port := parseMSSQLHostPort(opts.Host)
connStr := fmt.Sprintf("server=%s; port=%s; user id=%s; password=%s;", host, port, opts.User, opts.Passwd)
if opts.Database != "" {
connStr += "; database=" + opts.Database
}
return "mssql", connStr, nil
case opts.Type.IsSQLite3():
if opts.SQLitePath == "" {
return "", "", errors.New("sqlite3 database path cannot be empty")
}
if err := os.MkdirAll(filepath.Dir(opts.SQLitePath), os.ModePerm); err != nil {
return "", "", fmt.Errorf("failed to create directories: %w", err)
}
return makeSQLiteConnStr(SQLiteConnStrOptions{
FilePath: opts.SQLitePath,
JournalMode: opts.SQLiteJournalMode,
BusyTimeout: opts.SQLiteBusyTimeout,
})
}
return "", "", fmt.Errorf("unknown database type: %s", opts.Type)
}
// parsePgSQLHostPort parses given input in various forms defined in
// https://www.postgresql.org/docs/current/static/libpq-connect.html#LIBPQ-CONNSTRING
// and returns proper host and port number.
func parsePgSQLHostPort(info string) (host, port string) {
if h, p, err := net.SplitHostPort(info); err == nil {
host, port = h, p
} else {
// treat the "info" as "host", if it's an IPv6 address, remove the wrapper
host = info
if strings.HasPrefix(host, "[") && strings.HasSuffix(host, "]") {
host = host[1 : len(host)-1]
}
}
// set fallback values
if host == "" {
host = "127.0.0.1"
}
if port == "" {
port = "5432"
}
return host, port
}
func makePgSQLConnStr(dbHost, dbUser, dbPasswd, dbName, dbsslMode string) (connStr string) {
dbName, dbParam, _ := strings.Cut(dbName, "?")
host, port := parsePgSQLHostPort(dbHost)
connURL := url.URL{
Scheme: "postgres",
User: url.UserPassword(dbUser, dbPasswd),
Host: net.JoinHostPort(host, port),
Path: dbName,
OmitHost: false,
RawQuery: dbParam,
}
query := connURL.Query()
if strings.HasPrefix(host, "/") { // looks like a unix socket
query.Add("host", host)
connURL.Host = ":" + port
}
query.Set("sslmode", dbsslMode)
connURL.RawQuery = query.Encode()
return connURL.String()
}
// parseMSSQLHostPort splits the host into host and port
func parseMSSQLHostPort(info string) (string, string) {
// the default port "0" might be related to MSSQL's dynamic port, maybe it should be double-confirmed in the future
host, port := "127.0.0.1", "0"
if strings.Contains(info, ":") {
host = strings.Split(info, ":")[0]
port = strings.Split(info, ":")[1]
} else if strings.Contains(info, ",") {
host = strings.Split(info, ",")[0]
port = strings.TrimSpace(strings.Split(info, ",")[1])
} else if len(info) > 0 {
host = info
}
if host == "" {
host = "127.0.0.1"
}
if port == "" {
port = "0"
}
return host, port
}

View File

@ -1,7 +1,7 @@
// Copyright 2019 The Gitea Authors. All rights reserved.
// SPDX-License-Identifier: MIT
package setting
package db
import (
"testing"
@ -9,7 +9,7 @@ import (
"github.com/stretchr/testify/assert"
)
func Test_parsePostgreSQLHostPort(t *testing.T) {
func TestParsePgSQLHostPort(t *testing.T) {
tests := map[string]struct {
HostPort string
Host string
@ -49,14 +49,14 @@ func Test_parsePostgreSQLHostPort(t *testing.T) {
for k, test := range tests {
t.Run(k, func(t *testing.T) {
t.Log(test.HostPort)
host, port := parsePostgreSQLHostPort(test.HostPort)
host, port := parsePgSQLHostPort(test.HostPort)
assert.Equal(t, test.Host, host)
assert.Equal(t, test.Port, port)
})
}
}
func Test_getPostgreSQLConnectionString(t *testing.T) {
func TestMakePgSQLConnStr(t *testing.T) {
tests := []struct {
Host string
User string
@ -103,7 +103,7 @@ func Test_getPostgreSQLConnectionString(t *testing.T) {
}
for _, test := range tests {
connStr := getPostgreSQLConnectionString(test.Host, test.User, test.Passwd, test.Name, test.SSLMode)
connStr := makePgSQLConnStr(test.Host, test.User, test.Passwd, test.Name, test.SSLMode)
assert.Equal(t, test.Output, connStr)
}
}

View File

@ -18,8 +18,8 @@ var registerOnce sync.Once
func registerPostgresSchemaDriver() {
registerOnce.Do(func() {
sql.Register("postgresschema", &postgresSchemaDriver{})
dialects.RegisterDriver("postgresschema", dialects.QueryDriver("postgres"))
sql.Register(sqlDriverPostgresSchema, &postgresSchemaDriver{})
dialects.RegisterDriver(sqlDriverPostgresSchema, dialects.QueryDriver("postgres"))
})
}

View File

@ -0,0 +1,34 @@
//go:build sqlite
// Copyright 2026 The Gitea Authors. All rights reserved.
// SPDX-License-Identifier: MIT
package db
import (
"fmt"
"strconv"
"strings"
"code.gitea.io/gitea/modules/setting"
_ "github.com/mattn/go-sqlite3"
)
func init() {
setting.SupportedDatabaseTypes = append(setting.SupportedDatabaseTypes, "sqlite3")
makeSQLiteConnStr = makeSQLiteConnStrMattnCGO
}
func makeSQLiteConnStrMattnCGO(opts SQLiteConnStrOptions) (string, string, error) {
var params []string
params = append(params, "cache=shared")
params = append(params, "mode=rwc")
params = append(params, "_busy_timeout="+strconv.Itoa(opts.BusyTimeout))
params = append(params, "_txlock=immediate")
if opts.JournalMode != "" {
params = append(params, "_journal_mode="+opts.JournalMode)
}
connStr := fmt.Sprintf("file:%s?%s", opts.FilePath, strings.Join(params, "&"))
return "sqlite3", connStr, nil
}

View File

@ -3,10 +3,14 @@
package db
import "xorm.io/xorm/schemas"
import (
"code.gitea.io/gitea/modules/setting"
"xorm.io/xorm/schemas"
)
// DumpDatabase dumps all data from database according the special database SQL syntax to file system.
func DumpDatabase(filePath, dbType string) error {
func DumpDatabase(filePath string, dbType setting.DatabaseType) error {
var tbs []*schemas.Table
for _, t := range registeredModels {
t, err := xormEngine.TableInfo(t)

View File

@ -6,7 +6,6 @@ package db
import (
"context"
"fmt"
"strings"
"code.gitea.io/gitea/modules/log"
"code.gitea.io/gitea/modules/setting"
@ -24,31 +23,23 @@ func init() {
// newXORMEngine returns a new XORM engine from the configuration
func newXORMEngine() (*xorm.Engine, error) {
connStr, err := setting.DBConnStr()
connOpts := GlobalConnOptions()
driver, connStr, err := ConnStr(connOpts)
if err != nil {
return nil, err
}
var engine *xorm.Engine
if setting.Database.Type.IsPostgreSQL() && len(setting.Database.Schema) > 0 {
// OK whilst we sort out our schema issues - create a schema aware postgres
registerPostgresSchemaDriver()
engine, err = xorm.NewEngine("postgresschema", connStr)
} else {
engine, err = xorm.NewEngine(setting.Database.Type.String(), connStr)
}
engine, err := xorm.NewEngine(driver, connStr)
if err != nil {
return nil, err
}
switch setting.Database.Type {
case "mysql":
switch {
case connOpts.Type.IsMySQL():
engine.Dialect().SetParams(map[string]string{"rowFormat": "DYNAMIC"})
case "mssql":
case connOpts.Type.IsMSSQL():
engine.Dialect().SetParams(map[string]string{"DEFAULT_VARCHAR": "nvarchar"})
}
engine.SetSchema(setting.Database.Schema)
engine.SetSchema(connOpts.Schema)
return engine, nil
}
@ -56,10 +47,7 @@ func newXORMEngine() (*xorm.Engine, error) {
func InitEngine(ctx context.Context) error {
xe, err := newXORMEngine()
if err != nil {
if strings.Contains(err.Error(), "SQLite3 support") {
return fmt.Errorf("sqlite3 requires: -tags sqlite,sqlite_unlock_notify\n%w", err)
}
return fmt.Errorf("failed to connect to database: %w", err)
return fmt.Errorf("failed to init database engine: %w", err)
}
xe.SetMapper(names.GonicMapper{})

View File

@ -30,7 +30,7 @@ func TestDumpDatabase(t *testing.T) {
assert.NoError(t, db.GetEngine(t.Context()).Sync(new(Version)))
for _, dbType := range setting.SupportedDatabaseTypes {
assert.NoError(t, db.DumpDatabase(filepath.Join(dir, dbType+".sql"), dbType))
assert.NoError(t, db.DumpDatabase(filepath.Join(dir, dbType+".sql"), setting.DatabaseType(dbType)))
}
}

View File

@ -6,17 +6,18 @@ package base
import (
"testing"
"code.gitea.io/gitea/models/migrations/migrationtest"
"code.gitea.io/gitea/modules/timeutil"
"xorm.io/xorm/names"
)
func TestMain(m *testing.M) {
MainTest(m)
migrationtest.MainTest(m)
}
func Test_DropTableColumns(t *testing.T) {
x, deferable := PrepareTestEnv(t, 0)
x, deferable := migrationtest.PrepareTestEnv(t, 0)
defer deferable()
// FIXME: this logic seems wrong. Need to add an assertion here in the future, but it seems causing failure.
if x == nil || t.Failed() {

View File

@ -1,223 +0,0 @@
// Copyright 2022 The Gitea Authors. All rights reserved.
// SPDX-License-Identifier: MIT
package base
import (
"database/sql"
"fmt"
"os"
"path"
"path/filepath"
"testing"
"code.gitea.io/gitea/models/db"
"code.gitea.io/gitea/models/unittest"
"code.gitea.io/gitea/modules/git"
"code.gitea.io/gitea/modules/setting"
"code.gitea.io/gitea/modules/testlogger"
"code.gitea.io/gitea/modules/util"
"github.com/stretchr/testify/require"
"xorm.io/xorm"
"xorm.io/xorm/schemas"
)
// FIXME: this file shouldn't be in a normal package, it should only be compiled for tests
func newXORMEngine(t *testing.T) (*xorm.Engine, error) {
if err := db.InitEngine(t.Context()); err != nil {
return nil, err
}
x := unittest.GetXORMEngine()
return x, nil
}
func deleteDB() error {
switch {
case setting.Database.Type.IsSQLite3():
if err := util.Remove(setting.Database.Path); err != nil {
return err
}
return os.MkdirAll(path.Dir(setting.Database.Path), os.ModePerm)
case setting.Database.Type.IsMySQL():
db, err := sql.Open("mysql", fmt.Sprintf("%s:%s@tcp(%s)/",
setting.Database.User, setting.Database.Passwd, setting.Database.Host))
if err != nil {
return err
}
defer db.Close()
if _, err = db.Exec("DROP DATABASE IF EXISTS " + setting.Database.Name); err != nil {
return err
}
if _, err = db.Exec("CREATE DATABASE IF NOT EXISTS " + setting.Database.Name); err != nil {
return err
}
return nil
case setting.Database.Type.IsPostgreSQL():
db, err := sql.Open("postgres", fmt.Sprintf("postgres://%s:%s@%s/?sslmode=%s",
setting.Database.User, setting.Database.Passwd, setting.Database.Host, setting.Database.SSLMode))
if err != nil {
return err
}
defer db.Close()
if _, err = db.Exec("DROP DATABASE IF EXISTS " + setting.Database.Name); err != nil {
return err
}
if _, err = db.Exec("CREATE DATABASE " + setting.Database.Name); err != nil {
return err
}
db.Close()
// Check if we need to set up a specific schema
if len(setting.Database.Schema) != 0 {
db, err = sql.Open("postgres", fmt.Sprintf("postgres://%s:%s@%s/%s?sslmode=%s",
setting.Database.User, setting.Database.Passwd, setting.Database.Host, setting.Database.Name, setting.Database.SSLMode))
if err != nil {
return err
}
defer db.Close()
schrows, err := db.Query(fmt.Sprintf("SELECT 1 FROM information_schema.schemata WHERE schema_name = '%s'", setting.Database.Schema))
if err != nil {
return err
}
defer schrows.Close()
if !schrows.Next() {
// Create and set up a DB schema
_, err = db.Exec("CREATE SCHEMA " + setting.Database.Schema)
if err != nil {
return err
}
}
// Make the user's default search path the created schema; this will affect new connections
_, err = db.Exec(fmt.Sprintf(`ALTER USER "%s" SET search_path = %s`, setting.Database.User, setting.Database.Schema))
if err != nil {
return err
}
return nil
}
case setting.Database.Type.IsMSSQL():
host, port := setting.ParseMSSQLHostPort(setting.Database.Host)
db, err := sql.Open("mssql", fmt.Sprintf("server=%s; port=%s; database=%s; user id=%s; password=%s;",
host, port, "master", setting.Database.User, setting.Database.Passwd))
if err != nil {
return err
}
defer db.Close()
if _, err = db.Exec(fmt.Sprintf("DROP DATABASE IF EXISTS [%s]", setting.Database.Name)); err != nil {
return err
}
if _, err = db.Exec(fmt.Sprintf("CREATE DATABASE [%s]", setting.Database.Name)); err != nil {
return err
}
default:
return fmt.Errorf("unsupported database type: %s", setting.Database.Type)
}
return nil
}
// PrepareTestEnv prepares the test environment and reset the database. The skip parameter should usually be 0.
// Provide models to be sync'd with the database - in particular any models you expect fixtures to be loaded from.
//
// fixtures in `models/migrations/fixtures/<TestName>` will be loaded automatically
func PrepareTestEnv(t *testing.T, skip int, syncModels ...any) (*xorm.Engine, func()) {
t.Helper()
ourSkip := 2
ourSkip += skip
deferFn := testlogger.PrintCurrentTest(t, ourSkip)
giteaRoot := setting.GetGiteaTestSourceRoot()
require.NoError(t, unittest.SyncDirs(filepath.Join(giteaRoot, "tests/gitea-repositories-meta"), setting.RepoRootPath))
if err := deleteDB(); err != nil {
t.Fatalf("unable to reset database: %v", err)
return nil, deferFn
}
x, err := newXORMEngine(t)
require.NoError(t, err)
if x != nil {
oldDefer := deferFn
deferFn = func() {
oldDefer()
if err := x.Close(); err != nil {
t.Errorf("error during close: %v", err)
}
if err := deleteDB(); err != nil {
t.Errorf("unable to reset database: %v", err)
}
}
}
if err != nil {
return x, deferFn
}
if len(syncModels) > 0 {
if err := x.Sync(syncModels...); err != nil {
t.Errorf("error during sync: %v", err)
return x, deferFn
}
}
fixturesDir := filepath.Join(giteaRoot, "models", "migrations", "fixtures", t.Name())
if _, err := os.Stat(fixturesDir); err == nil {
t.Logf("initializing fixtures from: %s", fixturesDir)
if err := unittest.InitFixtures(
unittest.FixturesOptions{
Dir: fixturesDir,
}, x); err != nil {
t.Errorf("error whilst initializing fixtures from %s: %v", fixturesDir, err)
return x, deferFn
}
if err := unittest.LoadFixtures(); err != nil {
t.Errorf("error whilst loading fixtures from %s: %v", fixturesDir, err)
return x, deferFn
}
} else if !os.IsNotExist(err) {
t.Errorf("unexpected error whilst checking for existence of fixtures: %v", err)
} else {
t.Logf("no fixtures found in: %s", fixturesDir)
}
return x, deferFn
}
func LoadTableSchemasMap(t *testing.T, x *xorm.Engine) map[string]*schemas.Table {
tables, err := x.DBMetas()
require.NoError(t, err)
tableMap := make(map[string]*schemas.Table)
for _, table := range tables {
tableMap[table.Name] = table
}
return tableMap
}
func mainTest(m *testing.M) int {
testlogger.Init()
err := setting.PrepareIntegrationTestConfig()
if err != nil {
return testlogger.MainErrorf("Unable to prepare integration test config: %v", err)
}
setting.SetupGiteaTestEnv()
if err = git.InitFull(); err != nil {
return testlogger.MainErrorf("Unable to InitFull: %v", err)
}
setting.LoadDBSetting()
setting.InitLoggersForTest()
return m.Run()
}
func MainTest(m *testing.M) {
os.Exit(mainTest(m))
}

View File

@ -0,0 +1,120 @@
// Copyright 2022 The Gitea Authors. All rights reserved.
// SPDX-License-Identifier: MIT
package migrationtest
import (
"os"
"path/filepath"
"testing"
"code.gitea.io/gitea/models/db"
"code.gitea.io/gitea/models/unittest"
"code.gitea.io/gitea/modules/git"
"code.gitea.io/gitea/modules/setting"
"code.gitea.io/gitea/modules/testlogger"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
"xorm.io/xorm"
"xorm.io/xorm/schemas"
)
// PrepareTestEnv prepares the test environment and reset the database. The skip parameter should usually be 0.
// Provide models to be sync'd with the database - in particular any models you expect fixtures to be loaded from.
//
// fixtures in `models/migrations/fixtures/<TestName>` will be loaded automatically
func PrepareTestEnv(t *testing.T, skip int, syncModels ...any) (*xorm.Engine, func()) {
t.Helper()
ourSkip := 2
ourSkip += skip
deferFn := testlogger.PrintCurrentTest(t, ourSkip)
giteaRoot := setting.GetGiteaTestSourceRoot()
require.NoError(t, unittest.SyncDirs(filepath.Join(giteaRoot, "tests/gitea-repositories-meta"), setting.RepoRootPath))
cleanup, err := unittest.ResetTestDatabase()
if err != nil {
t.Fatalf("unable to reset database: %v", err)
return nil, deferFn
}
{
oldDefer := deferFn
deferFn = func() {
cleanup()
oldDefer()
}
}
err = db.InitEngine(t.Context())
if !assert.NoError(t, err) {
return nil, deferFn
}
x := unittest.GetXORMEngine()
{
oldDefer := deferFn
deferFn = func() {
_ = x.Close()
oldDefer()
}
}
if len(syncModels) > 0 {
if err := x.Sync(syncModels...); err != nil {
t.Errorf("error during sync: %v", err)
return x, deferFn
}
}
fixturesDir := filepath.Join(giteaRoot, "models", "migrations", "fixtures", t.Name())
if _, err := os.Stat(fixturesDir); err == nil {
t.Logf("initializing fixtures from: %s", fixturesDir)
if err := unittest.InitFixtures(
unittest.FixturesOptions{
Dir: fixturesDir,
}, x); err != nil {
t.Errorf("error whilst initializing fixtures from %s: %v", fixturesDir, err)
return x, deferFn
}
if err := unittest.LoadFixtures(); err != nil {
t.Errorf("error whilst loading fixtures from %s: %v", fixturesDir, err)
return x, deferFn
}
} else if !os.IsNotExist(err) {
t.Errorf("unexpected error whilst checking for existence of fixtures: %v", err)
} else {
t.Logf("no fixtures found in: %s", fixturesDir)
}
return x, deferFn
}
func LoadTableSchemasMap(t *testing.T, x *xorm.Engine) map[string]*schemas.Table {
tables, err := x.DBMetas()
require.NoError(t, err)
tableMap := make(map[string]*schemas.Table)
for _, table := range tables {
tableMap[table.Name] = table
}
return tableMap
}
func mainTest(m *testing.M) int {
testlogger.Init()
err := setting.PrepareIntegrationTestConfig()
if err != nil {
return testlogger.MainErrorf("Unable to prepare integration test config: %v", err)
}
setting.SetupGiteaTestEnv()
if err = git.InitFull(); err != nil {
return testlogger.MainErrorf("Unable to InitFull: %v", err)
}
setting.LoadDBSetting()
setting.InitLoggersForTest()
return m.Run()
}
func MainTest(m *testing.M) {
os.Exit(mainTest(m))
}

View File

@ -6,9 +6,9 @@ package v1_14
import (
"testing"
"code.gitea.io/gitea/models/migrations/base"
"code.gitea.io/gitea/models/migrations/migrationtest"
)
func TestMain(m *testing.M) {
base.MainTest(m)
migrationtest.MainTest(m)
}

View File

@ -6,7 +6,7 @@ package v1_14
import (
"testing"
"code.gitea.io/gitea/models/migrations/base"
"code.gitea.io/gitea/models/migrations/migrationtest"
"github.com/stretchr/testify/assert"
)
@ -47,7 +47,7 @@ func Test_RemoveInvalidLabels(t *testing.T) {
}
// load and prepare the test database
x, deferable := base.PrepareTestEnv(t, 0, new(Comment), new(Issue), new(Repository), new(IssueLabel), new(Label))
x, deferable := migrationtest.PrepareTestEnv(t, 0, new(Comment), new(Issue), new(Repository), new(IssueLabel), new(Label))
if x == nil || t.Failed() {
defer deferable()
return

View File

@ -6,7 +6,7 @@ package v1_14
import (
"testing"
"code.gitea.io/gitea/models/migrations/base"
"code.gitea.io/gitea/models/migrations/migrationtest"
"code.gitea.io/gitea/modules/timeutil"
"github.com/stretchr/testify/assert"
@ -34,7 +34,7 @@ func Test_DeleteOrphanedIssueLabels(t *testing.T) {
}
// Prepare and load the testing database
x, deferable := base.PrepareTestEnv(t, 0, new(IssueLabel), new(Label))
x, deferable := migrationtest.PrepareTestEnv(t, 0, new(IssueLabel), new(Label))
if x == nil || t.Failed() {
defer deferable()
return

View File

@ -6,9 +6,9 @@ package v1_15
import (
"testing"
"code.gitea.io/gitea/models/migrations/base"
"code.gitea.io/gitea/models/migrations/migrationtest"
)
func TestMain(m *testing.M) {
base.MainTest(m)
migrationtest.MainTest(m)
}

View File

@ -7,7 +7,7 @@ import (
"strings"
"testing"
"code.gitea.io/gitea/models/migrations/base"
"code.gitea.io/gitea/models/migrations/migrationtest"
"github.com/stretchr/testify/assert"
)
@ -20,7 +20,7 @@ func Test_AddPrimaryEmail2EmailAddress(t *testing.T) {
}
// Prepare and load the testing database
x, deferable := base.PrepareTestEnv(t, 0, new(User))
x, deferable := migrationtest.PrepareTestEnv(t, 0, new(User))
if x == nil || t.Failed() {
defer deferable()
return

View File

@ -6,7 +6,7 @@ package v1_15
import (
"testing"
"code.gitea.io/gitea/models/migrations/base"
"code.gitea.io/gitea/models/migrations/migrationtest"
"github.com/stretchr/testify/assert"
)
@ -20,7 +20,7 @@ func Test_AddIssueResourceIndexTable(t *testing.T) {
}
// Prepare and load the testing database
x, deferable := base.PrepareTestEnv(t, 0, new(Issue))
x, deferable := migrationtest.PrepareTestEnv(t, 0, new(Issue))
if x == nil || t.Failed() {
defer deferable()
return

View File

@ -6,9 +6,9 @@ package v1_16
import (
"testing"
"code.gitea.io/gitea/models/migrations/base"
"code.gitea.io/gitea/models/migrations/migrationtest"
)
func TestMain(m *testing.M) {
base.MainTest(m)
migrationtest.MainTest(m)
}

View File

@ -6,7 +6,7 @@ package v1_16
import (
"testing"
"code.gitea.io/gitea/models/migrations/base"
"code.gitea.io/gitea/models/migrations/migrationtest"
"code.gitea.io/gitea/modules/json"
"github.com/stretchr/testify/assert"
@ -27,7 +27,7 @@ func (ls *LoginSourceOriginalV189) TableName() string {
func Test_UnwrapLDAPSourceCfg(t *testing.T) {
// Prepare and load the testing database
x, deferable := base.PrepareTestEnv(t, 0, new(LoginSourceOriginalV189))
x, deferable := migrationtest.PrepareTestEnv(t, 0, new(LoginSourceOriginalV189))
if x == nil || t.Failed() {
defer deferable()
return

View File

@ -6,7 +6,7 @@ package v1_16
import (
"testing"
"code.gitea.io/gitea/models/migrations/base"
"code.gitea.io/gitea/models/migrations/migrationtest"
"github.com/stretchr/testify/assert"
)
@ -31,7 +31,7 @@ func Test_AddRepoIDForAttachment(t *testing.T) {
}
// Prepare and load the testing database
x, deferrable := base.PrepareTestEnv(t, 0, new(Attachment), new(Issue), new(Release))
x, deferrable := migrationtest.PrepareTestEnv(t, 0, new(Attachment), new(Issue), new(Release))
defer deferrable()
if x == nil || t.Failed() {
return

View File

@ -6,7 +6,7 @@ package v1_16
import (
"testing"
"code.gitea.io/gitea/models/migrations/base"
"code.gitea.io/gitea/models/migrations/migrationtest"
"github.com/stretchr/testify/assert"
)
@ -21,7 +21,7 @@ func Test_AddTableCommitStatusIndex(t *testing.T) {
}
// Prepare and load the testing database
x, deferable := base.PrepareTestEnv(t, 0, new(CommitStatus))
x, deferable := migrationtest.PrepareTestEnv(t, 0, new(CommitStatus))
if x == nil || t.Failed() {
defer deferable()
return

View File

@ -6,7 +6,7 @@ package v1_16
import (
"testing"
"code.gitea.io/gitea/models/migrations/base"
"code.gitea.io/gitea/models/migrations/migrationtest"
"code.gitea.io/gitea/modules/timeutil"
"github.com/stretchr/testify/assert"
@ -44,7 +44,7 @@ func Test_RemigrateU2FCredentials(t *testing.T) {
}
// Prepare and load the testing database
x, deferable := base.PrepareTestEnv(t, 0, new(WebauthnCredential), new(U2fRegistration), new(ExpectedWebauthnCredential))
x, deferable := migrationtest.PrepareTestEnv(t, 0, new(WebauthnCredential), new(U2fRegistration), new(ExpectedWebauthnCredential))
if x == nil || t.Failed() {
defer deferable()
return

View File

@ -6,9 +6,9 @@ package v1_17
import (
"testing"
"code.gitea.io/gitea/models/migrations/base"
"code.gitea.io/gitea/models/migrations/migrationtest"
)
func TestMain(m *testing.M) {
base.MainTest(m)
migrationtest.MainTest(m)
}

View File

@ -7,7 +7,7 @@ import (
"encoding/base32"
"testing"
"code.gitea.io/gitea/models/migrations/base"
"code.gitea.io/gitea/models/migrations/migrationtest"
"github.com/stretchr/testify/assert"
)
@ -38,7 +38,7 @@ func Test_StoreWebauthnCredentialIDAsBytes(t *testing.T) {
}
// Prepare and load the testing database
x, deferable := base.PrepareTestEnv(t, 0, new(WebauthnCredential), new(ExpectedWebauthnCredential))
x, deferable := migrationtest.PrepareTestEnv(t, 0, new(WebauthnCredential), new(ExpectedWebauthnCredential))
defer deferable()
if x == nil || t.Failed() {
return

View File

@ -6,9 +6,9 @@ package v1_18
import (
"testing"
"code.gitea.io/gitea/models/migrations/base"
"code.gitea.io/gitea/models/migrations/migrationtest"
)
func TestMain(m *testing.M) {
base.MainTest(m)
migrationtest.MainTest(m)
}

View File

@ -7,7 +7,7 @@ import (
"testing"
"code.gitea.io/gitea/models/issues"
"code.gitea.io/gitea/models/migrations/base"
"code.gitea.io/gitea/models/migrations/migrationtest"
"github.com/stretchr/testify/assert"
)
@ -16,7 +16,7 @@ func Test_UpdateOpenMilestoneCounts(t *testing.T) {
type ExpectedMilestone issues.Milestone
// Prepare and load the testing database
x, deferable := base.PrepareTestEnv(t, 0, new(issues.Milestone), new(ExpectedMilestone), new(issues.Issue))
x, deferable := migrationtest.PrepareTestEnv(t, 0, new(issues.Milestone), new(ExpectedMilestone), new(issues.Issue))
defer deferable()
if x == nil || t.Failed() {
return

View File

@ -6,7 +6,7 @@ package v1_18
import (
"testing"
"code.gitea.io/gitea/models/migrations/base"
"code.gitea.io/gitea/models/migrations/migrationtest"
"github.com/stretchr/testify/assert"
)
@ -18,7 +18,7 @@ func Test_AddConfidentialClientColumnToOAuth2ApplicationTable(t *testing.T) {
}
// Prepare and load the testing database
x, deferable := base.PrepareTestEnv(t, 0, new(oauth2Application))
x, deferable := migrationtest.PrepareTestEnv(t, 0, new(oauth2Application))
defer deferable()
if x == nil || t.Failed() {
return

View File

@ -6,9 +6,9 @@ package v1_19
import (
"testing"
"code.gitea.io/gitea/models/migrations/base"
"code.gitea.io/gitea/models/migrations/migrationtest"
)
func TestMain(m *testing.M) {
base.MainTest(m)
migrationtest.MainTest(m)
}

View File

@ -6,7 +6,7 @@ package v1_19
import (
"testing"
"code.gitea.io/gitea/models/migrations/base"
"code.gitea.io/gitea/models/migrations/migrationtest"
"code.gitea.io/gitea/modules/json"
"code.gitea.io/gitea/modules/secret"
"code.gitea.io/gitea/modules/setting"
@ -39,7 +39,7 @@ func Test_AddHeaderAuthorizationEncryptedColWebhook(t *testing.T) {
}
// Prepare and load the testing database
x, deferable := base.PrepareTestEnv(t, 0, new(Webhook), new(ExpectedWebhook), new(HookTask))
x, deferable := migrationtest.PrepareTestEnv(t, 0, new(Webhook), new(ExpectedWebhook), new(HookTask))
defer deferable()
if x == nil || t.Failed() {
return

View File

@ -6,9 +6,9 @@ package v1_20
import (
"testing"
"code.gitea.io/gitea/models/migrations/base"
"code.gitea.io/gitea/models/migrations/migrationtest"
)
func TestMain(m *testing.M) {
base.MainTest(m)
migrationtest.MainTest(m)
}

View File

@ -8,7 +8,7 @@ import (
"strings"
"testing"
"code.gitea.io/gitea/models/migrations/base"
"code.gitea.io/gitea/models/migrations/migrationtest"
"github.com/stretchr/testify/assert"
)
@ -66,7 +66,7 @@ func Test_ConvertScopedAccessTokens(t *testing.T) {
})
}
x, deferable := base.PrepareTestEnv(t, 0, new(AccessToken))
x, deferable := migrationtest.PrepareTestEnv(t, 0, new(AccessToken))
defer deferable()
if x == nil || t.Failed() {
t.Skip()

View File

@ -6,9 +6,9 @@ package v1_21
import (
"testing"
"code.gitea.io/gitea/models/migrations/base"
"code.gitea.io/gitea/models/migrations/migrationtest"
)
func TestMain(m *testing.M) {
base.MainTest(m)
migrationtest.MainTest(m)
}

View File

@ -6,9 +6,9 @@ package v1_22
import (
"testing"
"code.gitea.io/gitea/models/migrations/base"
"code.gitea.io/gitea/models/migrations/migrationtest"
)
func TestMain(m *testing.M) {
base.MainTest(m)
migrationtest.MainTest(m)
}

View File

@ -6,7 +6,7 @@ package v1_22
import (
"testing"
"code.gitea.io/gitea/models/migrations/base"
"code.gitea.io/gitea/models/migrations/migrationtest"
"github.com/stretchr/testify/assert"
)
@ -21,7 +21,7 @@ func Test_AddCombinedIndexToIssueUser(t *testing.T) {
}
// Prepare and load the testing database
x, deferable := base.PrepareTestEnv(t, 0, new(IssueUser))
x, deferable := migrationtest.PrepareTestEnv(t, 0, new(IssueUser))
defer deferable()
assert.NoError(t, AddCombinedIndexToIssueUser(x))

View File

@ -6,7 +6,7 @@ package v1_22
import (
"testing"
"code.gitea.io/gitea/models/migrations/base"
"code.gitea.io/gitea/models/migrations/migrationtest"
"github.com/stretchr/testify/assert"
"xorm.io/xorm"
@ -64,7 +64,7 @@ func PrepareOldRepository(t *testing.T) (*xorm.Engine, func()) {
}
// Prepare and load the testing database
return base.PrepareTestEnv(t, 0,
return migrationtest.PrepareTestEnv(t, 0,
new(Repository),
new(CommitStatus),
new(RepoArchiver),

View File

@ -7,7 +7,7 @@ import (
"strconv"
"testing"
"code.gitea.io/gitea/models/migrations/base"
"code.gitea.io/gitea/models/migrations/migrationtest"
"github.com/stretchr/testify/assert"
)
@ -20,7 +20,7 @@ func Test_UpdateBadgeColName(t *testing.T) {
}
// Prepare and load the testing database
x, deferable := base.PrepareTestEnv(t, 0, new(Badge))
x, deferable := migrationtest.PrepareTestEnv(t, 0, new(Badge))
defer deferable()
if x == nil || t.Failed() {
return

View File

@ -6,7 +6,7 @@ package v1_22
import (
"testing"
"code.gitea.io/gitea/models/migrations/base"
"code.gitea.io/gitea/models/migrations/migrationtest"
"code.gitea.io/gitea/models/project"
"github.com/stretchr/testify/assert"
@ -14,7 +14,7 @@ import (
func Test_CheckProjectColumnsConsistency(t *testing.T) {
// Prepare and load the testing database
x, deferable := base.PrepareTestEnv(t, 0, new(project.Project), new(project.Column))
x, deferable := migrationtest.PrepareTestEnv(t, 0, new(project.Project), new(project.Column))
defer deferable()
if x == nil || t.Failed() {
return

View File

@ -6,7 +6,7 @@ package v1_22
import (
"testing"
"code.gitea.io/gitea/models/migrations/base"
"code.gitea.io/gitea/models/migrations/migrationtest"
"github.com/stretchr/testify/assert"
"xorm.io/xorm/schemas"
@ -20,7 +20,7 @@ func Test_AddUniqueIndexForProjectIssue(t *testing.T) {
}
// Prepare and load the testing database
x, deferable := base.PrepareTestEnv(t, 0, new(ProjectIssue))
x, deferable := migrationtest.PrepareTestEnv(t, 0, new(ProjectIssue))
defer deferable()
if x == nil || t.Failed() {
return

View File

@ -6,9 +6,9 @@ package v1_23
import (
"testing"
"code.gitea.io/gitea/models/migrations/base"
"code.gitea.io/gitea/models/migrations/migrationtest"
)
func TestMain(m *testing.M) {
base.MainTest(m)
migrationtest.MainTest(m)
}

View File

@ -6,7 +6,7 @@ package v1_23
import (
"testing"
"code.gitea.io/gitea/models/migrations/base"
"code.gitea.io/gitea/models/migrations/migrationtest"
"code.gitea.io/gitea/modules/timeutil"
"github.com/stretchr/testify/assert"
@ -44,7 +44,7 @@ func Test_AddIndexToActionTaskStoppedLogExpired(t *testing.T) {
}
// Prepare and load the testing database
x, deferable := base.PrepareTestEnv(t, 0, new(ActionTask))
x, deferable := migrationtest.PrepareTestEnv(t, 0, new(ActionTask))
defer deferable()
assert.NoError(t, AddIndexToActionTaskStoppedLogExpired(x))

View File

@ -6,7 +6,7 @@ package v1_23
import (
"testing"
"code.gitea.io/gitea/models/migrations/base"
"code.gitea.io/gitea/models/migrations/migrationtest"
"code.gitea.io/gitea/modules/timeutil"
"github.com/stretchr/testify/assert"
@ -33,7 +33,7 @@ func Test_AddIndexForReleaseSha1(t *testing.T) {
}
// Prepare and load the testing database
x, deferable := base.PrepareTestEnv(t, 0, new(Release))
x, deferable := migrationtest.PrepareTestEnv(t, 0, new(Release))
defer deferable()
assert.NoError(t, AddIndexForReleaseSha1(x))

View File

@ -6,9 +6,9 @@ package v1_25
import (
"testing"
"code.gitea.io/gitea/models/migrations/base"
"code.gitea.io/gitea/models/migrations/migrationtest"
)
func TestMain(m *testing.M) {
base.MainTest(m)
migrationtest.MainTest(m)
}

View File

@ -6,7 +6,7 @@ package v1_25
import (
"testing"
"code.gitea.io/gitea/models/migrations/base"
"code.gitea.io/gitea/models/migrations/migrationtest"
"code.gitea.io/gitea/modules/setting"
"code.gitea.io/gitea/modules/timeutil"
@ -44,12 +44,12 @@ func Test_UseLongTextInSomeColumnsAndFixBugs(t *testing.T) {
}
// Prepare and load the testing database
x, deferrable := base.PrepareTestEnv(t, 0, new(ReviewState), new(PackageProperty), new(Notice))
x, deferrable := migrationtest.PrepareTestEnv(t, 0, new(ReviewState), new(PackageProperty), new(Notice))
defer deferrable()
require.NoError(t, UseLongTextInSomeColumnsAndFixBugs(x))
tables := base.LoadTableSchemasMap(t, x)
tables := migrationtest.LoadTableSchemasMap(t, x)
table := tables["review_state"]
column := table.GetColumn("updated_files")
assert.Equal(t, "LONGTEXT", column.SQLType.Name)

View File

@ -6,7 +6,7 @@ package v1_25
import (
"testing"
"code.gitea.io/gitea/models/migrations/base"
"code.gitea.io/gitea/models/migrations/migrationtest"
"code.gitea.io/gitea/modules/setting"
"github.com/stretchr/testify/assert"
@ -23,11 +23,11 @@ func Test_ExtendCommentTreePathLength(t *testing.T) {
TreePath string `xorm:"VARCHAR(255)"`
}
x, deferrable := base.PrepareTestEnv(t, 0, new(Comment))
x, deferrable := migrationtest.PrepareTestEnv(t, 0, new(Comment))
defer deferrable()
require.NoError(t, ExtendCommentTreePathLength(x))
table := base.LoadTableSchemasMap(t, x)["comment"]
table := migrationtest.LoadTableSchemasMap(t, x)["comment"]
column := table.GetColumn("tree_path")
assert.Contains(t, []string{"NVARCHAR", "VARCHAR"}, column.SQLType.Name)
assert.EqualValues(t, 4000, column.Length)

View File

@ -6,9 +6,9 @@ package v1_26
import (
"testing"
"code.gitea.io/gitea/models/migrations/base"
"code.gitea.io/gitea/models/migrations/migrationtest"
)
func TestMain(m *testing.M) {
base.MainTest(m)
migrationtest.MainTest(m)
}

View File

@ -6,7 +6,7 @@ package v1_26
import (
"testing"
"code.gitea.io/gitea/models/migrations/base"
"code.gitea.io/gitea/models/migrations/migrationtest"
"code.gitea.io/gitea/modules/timeutil"
"github.com/stretchr/testify/require"
@ -38,7 +38,7 @@ func Test_FixMissedRepoIDWhenMigrateAttachments(t *testing.T) {
}
// Prepare and load the testing database
x, deferrable := base.PrepareTestEnv(t, 0, new(Attachment), new(Issue), new(Release))
x, deferrable := migrationtest.PrepareTestEnv(t, 0, new(Attachment), new(Issue), new(Release))
defer deferrable()
require.NoError(t, FixMissedRepoIDWhenMigrateAttachments(x))

View File

@ -6,7 +6,7 @@ package v1_26
import (
"testing"
"code.gitea.io/gitea/models/migrations/base"
"code.gitea.io/gitea/models/migrations/migrationtest"
"code.gitea.io/gitea/modules/setting"
"code.gitea.io/gitea/modules/test"
@ -57,7 +57,7 @@ func Test_FixCommitStatusTargetURLToUseRunAndJobID(t *testing.T) {
TargetURL string
}
x, deferable := base.PrepareTestEnv(t, 0,
x, deferable := migrationtest.PrepareTestEnv(t, 0,
new(Repository),
new(ActionRun),
new(ActionRunJob),

View File

@ -6,7 +6,7 @@ package v1_26
import (
"testing"
"code.gitea.io/gitea/models/migrations/base"
"code.gitea.io/gitea/models/migrations/migrationtest"
"github.com/stretchr/testify/require"
)
@ -17,7 +17,7 @@ func Test_AddDisabledToActionRunner(t *testing.T) {
Name string
}
x, deferable := base.PrepareTestEnv(t, 0, new(ActionRunner))
x, deferable := migrationtest.PrepareTestEnv(t, 0, new(ActionRunner))
defer deferable()
_, err := x.Insert(&ActionRunner{Name: "runner"})

View File

@ -6,7 +6,7 @@ package v1_26
import (
"testing"
"code.gitea.io/gitea/models/migrations/base"
"code.gitea.io/gitea/models/migrations/migrationtest"
"github.com/stretchr/testify/assert"
)
@ -22,7 +22,7 @@ func (UserBadgeBefore) TableName() string {
}
func Test_AddUniqueIndexForUserBadge(t *testing.T) {
x, deferable := base.PrepareTestEnv(t, 0, new(UserBadgeBefore))
x, deferable := migrationtest.PrepareTestEnv(t, 0, new(UserBadgeBefore))
defer deferable()
if x == nil || t.Failed() {
return

View File

@ -6,9 +6,9 @@ package v1_27
import (
"testing"
"code.gitea.io/gitea/models/migrations/base"
"code.gitea.io/gitea/models/migrations/migrationtest"
)
func TestMain(m *testing.M) {
base.MainTest(m)
migrationtest.MainTest(m)
}

View File

@ -8,7 +8,7 @@ import (
"slices"
"testing"
"code.gitea.io/gitea/models/migrations/base"
"code.gitea.io/gitea/models/migrations/migrationtest"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
@ -49,7 +49,7 @@ func (actionArtifactBeforeV331) TableName() string {
}
func Test_AddActionRunAttemptModel(t *testing.T) {
x, deferable := base.PrepareTestEnv(t, 0,
x, deferable := migrationtest.PrepareTestEnv(t, 0,
new(actionRunBeforeV331),
new(actionRunJobBeforeV331),
new(actionArtifactBeforeV331),
@ -69,7 +69,7 @@ func Test_AddActionRunAttemptModel(t *testing.T) {
require.NoError(t, AddActionRunAttemptModel(x))
tableMap := base.LoadTableSchemasMap(t, x)
tableMap := migrationtest.LoadTableSchemasMap(t, x)
attemptTable := tableMap["action_run_attempt"]
require.NotNil(t, attemptTable)

View File

@ -5,6 +5,8 @@ package unittest
import (
"context"
"database/sql"
"errors"
"fmt"
"os"
"path/filepath"
@ -102,6 +104,101 @@ func mainTest(m *testing.M, testOptsArg ...*TestOptions) int {
return exitStatus
}
func ResetTestDatabase() (cleanup func(), err error) {
defer func() {
if cleanup == nil {
cleanup = func() {}
}
}()
connOpts := db.GlobalConnOptions()
driverDefault, connStrDefault, err := db.ConnStrDefaultDatabase(connOpts)
if err != nil {
return nil, err
}
driverDatabase, connStrDatabase, err := db.ConnStr(connOpts)
if err != nil {
return nil, err
}
if connOpts.Type.IsSQLite3() {
if !strings.HasSuffix(connOpts.SQLitePath, "-test.db") {
return nil, errors.New(`testing database file for sqlite3 must end in "-test.db"`)
}
_ = os.Remove(connOpts.SQLitePath)
err = os.MkdirAll(filepath.Dir(connOpts.SQLitePath), os.ModePerm)
if err != nil {
return nil, err
}
cleanup = func() {
_ = os.Remove(connOpts.SQLitePath)
_ = os.Remove(filepath.Dir(connOpts.SQLitePath))
}
return cleanup, nil
}
if !strings.Contains(connOpts.Database, "test") {
return nil, fmt.Errorf(`testing database name for %s must contain "test"`, connOpts.Database)
}
quotedDbName := connOpts.Database
if connOpts.Type.IsMSSQL() {
quotedDbName = `[` + connOpts.Database + `]`
}
sqlExec := func(sqlDB *sql.DB, sql string) error {
_, err := sqlDB.Exec(sql)
if err != nil {
return fmt.Errorf("failed to execute SQL %q: %w", sql, err)
}
return nil
}
createDatabase := func() error {
sqlDB, err := sql.Open(driverDefault, connStrDefault)
if err != nil {
return err
}
defer sqlDB.Close()
if err = sqlExec(sqlDB, "DROP DATABASE IF EXISTS "+quotedDbName); err != nil {
return err
}
return sqlExec(sqlDB, "CREATE DATABASE "+quotedDbName)
}
if err = createDatabase(); err != nil {
return nil, err
}
cleanup = func() {
sqlDB, err := sql.Open(driverDefault, connStrDefault)
if err != nil {
return
}
defer sqlDB.Close()
_, _ = sqlDB.Exec("DROP DATABASE IF EXISTS " + quotedDbName)
}
createDatabaseSchema := func() error {
if !connOpts.Type.IsPostgreSQL() {
return nil
}
if connOpts.Schema == "" {
return nil
}
sqlDB, err := sql.Open(driverDatabase, connStrDatabase)
if err != nil {
return err
}
defer sqlDB.Close()
if err = sqlExec(sqlDB, "DROP SCHEMA IF EXISTS "+connOpts.Schema); err != nil {
return err
}
return sqlExec(sqlDB, "CREATE SCHEMA "+connOpts.Schema)
}
return cleanup, createDatabaseSchema()
}
// FixturesOptions fixtures needs to be loaded options
type FixturesOptions struct {
Dir string
@ -110,11 +207,12 @@ type FixturesOptions struct {
// CreateTestEngine creates a memory database and loads the fixture data from fixturesDir
func CreateTestEngine(opts FixturesOptions) error {
x, err := xorm.NewEngine("sqlite3", "file::memory:?cache=shared&_txlock=immediate")
driver, connStr, err := db.ConnStr(db.ConnOptions{Type: "sqlite3", SQLitePath: ":memory:"})
if err != nil {
return err
}
x, err := xorm.NewEngine(driver, connStr)
if err != nil {
if strings.Contains(err.Error(), "unknown driver") {
return fmt.Errorf("sqlite3 requires: -tags sqlite,sqlite_unlock_notify\n%w", err)
}
return err
}
x.SetMapper(names.GonicMapper{})

View File

@ -18,8 +18,7 @@ import (
"code.gitea.io/gitea/modules/gitrepo"
"code.gitea.io/gitea/modules/indexer"
"code.gitea.io/gitea/modules/indexer/code/internal"
indexer_internal "code.gitea.io/gitea/modules/indexer/internal"
inner_elasticsearch "code.gitea.io/gitea/modules/indexer/internal/elasticsearch"
es "code.gitea.io/gitea/modules/indexer/internal/elasticsearch"
"code.gitea.io/gitea/modules/json"
"code.gitea.io/gitea/modules/log"
"code.gitea.io/gitea/modules/setting"
@ -28,23 +27,15 @@ import (
"code.gitea.io/gitea/modules/util"
"github.com/go-enry/go-enry/v2"
"github.com/olivere/elastic/v7"
)
const (
esRepoIndexerLatestVersion = 3
// multi-match-types, currently only 2 types are used
// Reference: https://www.elastic.co/guide/en/elasticsearch/reference/7.0/query-dsl-multi-match-query.html#multi-match-types
esMultiMatchTypeBestFields = "best_fields"
esMultiMatchTypePhrasePrefix = "phrase_prefix"
)
const esRepoIndexerLatestVersion = 3
var _ internal.Indexer = &Indexer{}
// Indexer implements Indexer interface
type Indexer struct {
inner *inner_elasticsearch.Indexer
indexer_internal.Indexer // do not composite inner_elasticsearch.Indexer directly to avoid exposing too much
*es.Indexer
}
func (b *Indexer) SupportedSearchModes() []indexer.SearchMode {
@ -53,12 +44,7 @@ func (b *Indexer) SupportedSearchModes() []indexer.SearchMode {
// NewIndexer creates a new elasticsearch indexer
func NewIndexer(url, indexerName string) *Indexer {
inner := inner_elasticsearch.NewIndexer(url, indexerName, esRepoIndexerLatestVersion, defaultMapping)
indexer := &Indexer{
inner: inner,
Indexer: inner,
}
return indexer
return &Indexer{Indexer: es.NewIndexer(url, indexerName, esRepoIndexerLatestVersion, defaultMapping)}
}
const (
@ -138,7 +124,7 @@ const (
}`
)
func (b *Indexer) addUpdate(ctx context.Context, catFileBatch git.CatFileBatch, sha string, update internal.FileUpdate, repo *repo_model.Repository) ([]elastic.BulkableRequest, error) {
func (b *Indexer) addUpdate(ctx context.Context, catFileBatch git.CatFileBatch, sha string, update internal.FileUpdate, repo *repo_model.Repository) ([]es.BulkOp, error) {
// Ignore vendored files in code search
if setting.Indexer.ExcludeVendored && analyze.IsVendor(update.Filename) {
return nil, nil
@ -157,8 +143,9 @@ func (b *Indexer) addUpdate(ctx context.Context, catFileBatch git.CatFileBatch,
}
}
id := internal.FilenameIndexerID(repo.ID, update.Filename)
if size > setting.Indexer.MaxIndexerFileSize {
return []elastic.BulkableRequest{b.addDelete(update.Filename, repo)}, nil
return []es.BulkOp{es.DeleteOp(id)}, nil
}
info, batchReader, err := catFileBatch.QueryContent(update.BlobSha)
@ -177,33 +164,24 @@ func (b *Indexer) addUpdate(ctx context.Context, catFileBatch git.CatFileBatch,
if _, err = batchReader.Discard(1); err != nil {
return nil, err
}
id := internal.FilenameIndexerID(repo.ID, update.Filename)
return []elastic.BulkableRequest{
elastic.NewBulkIndexRequest().
Index(b.inner.VersionedIndexName()).
Id(id).
Doc(map[string]any{
"repo_id": repo.ID,
"filename": update.Filename,
"content": string(charset.ToUTF8DropErrors(fileContents)),
"commit_id": sha,
"language": analyze.GetCodeLanguage(update.Filename, fileContents),
"updated_at": timeutil.TimeStampNow(),
}),
}, nil
return []es.BulkOp{es.IndexOp(id, map[string]any{
"repo_id": repo.ID,
"filename": update.Filename,
"content": string(charset.ToUTF8DropErrors(fileContents)),
"commit_id": sha,
"language": analyze.GetCodeLanguage(update.Filename, fileContents),
"updated_at": timeutil.TimeStampNow(),
})}, nil
}
func (b *Indexer) addDelete(filename string, repo *repo_model.Repository) elastic.BulkableRequest {
id := internal.FilenameIndexerID(repo.ID, filename)
return elastic.NewBulkDeleteRequest().
Index(b.inner.VersionedIndexName()).
Id(id)
func (b *Indexer) addDelete(filename string, repo *repo_model.Repository) es.BulkOp {
return es.DeleteOp(internal.FilenameIndexerID(repo.ID, filename))
}
// Index will save the index data
func (b *Indexer) Index(ctx context.Context, repo *repo_model.Repository, sha string, changes *internal.RepoChanges) error {
reqs := make([]elastic.BulkableRequest, 0)
ops := make([]es.BulkOp, 0)
if len(changes.Updates) > 0 {
batch, err := gitrepo.NewBatch(ctx, repo)
if err != nil {
@ -212,29 +190,25 @@ func (b *Indexer) Index(ctx context.Context, repo *repo_model.Repository, sha st
defer batch.Close()
for _, update := range changes.Updates {
updateReqs, err := b.addUpdate(ctx, batch, sha, update, repo)
updateOps, err := b.addUpdate(ctx, batch, sha, update, repo)
if err != nil {
return err
}
if len(updateReqs) > 0 {
reqs = append(reqs, updateReqs...)
if len(updateOps) > 0 {
ops = append(ops, updateOps...)
}
}
}
for _, filename := range changes.RemovedFilenames {
reqs = append(reqs, b.addDelete(filename, repo))
ops = append(ops, b.addDelete(filename, repo))
}
if len(reqs) > 0 {
if len(ops) > 0 {
esBatchSize := 50
for i := 0; i < len(reqs); i += esBatchSize {
_, err := b.inner.Client.Bulk().
Index(b.inner.VersionedIndexName()).
Add(reqs[i:min(i+esBatchSize, len(reqs))]...).
Do(ctx)
if err != nil {
for i := 0; i < len(ops); i += esBatchSize {
if err := b.Bulk(ctx, ops[i:min(i+esBatchSize, len(ops))]); err != nil {
return err
}
}
@ -246,33 +220,21 @@ func (b *Indexer) Index(ctx context.Context, repo *repo_model.Repository, sha st
func (b *Indexer) Delete(ctx context.Context, repoID int64) error {
if err := b.doDelete(ctx, repoID); err != nil {
// Maybe there is a conflict during the delete operation, so we should retry after a refresh
log.Warn("Deletion of entries of repo %v within index %v was erroneous. Trying to refresh index before trying again", repoID, b.inner.VersionedIndexName(), err)
if err := b.refreshIndex(ctx); err != nil {
log.Warn("Deletion of entries of repo %v within index %v was erroneous: %v. Trying to refresh index before trying again", repoID, b.VersionedIndexName(), err)
if err := b.Refresh(ctx); err != nil {
return err
}
if err := b.doDelete(ctx, repoID); err != nil {
log.Error("Could not delete entries of repo %v within index %v", repoID, b.inner.VersionedIndexName())
log.Error("Could not delete entries of repo %v within index %v", repoID, b.VersionedIndexName())
return err
}
}
return nil
}
func (b *Indexer) refreshIndex(ctx context.Context) error {
if _, err := b.inner.Client.Refresh(b.inner.VersionedIndexName()).Do(ctx); err != nil {
log.Error("Error while trying to refresh index %v", b.inner.VersionedIndexName(), err)
return err
}
return nil
}
// Delete entries by repoId
func (b *Indexer) doDelete(ctx context.Context, repoID int64) error {
_, err := b.inner.Client.DeleteByQuery(b.inner.VersionedIndexName()).
Query(elastic.NewTermsQuery("repo_id", repoID)).
Do(ctx)
return err
return b.DeleteByQuery(ctx, es.TermsQuery("repo_id", repoID))
}
// contentMatchIndexPos find words positions for start and the following end on content. It will
@ -291,10 +253,10 @@ func contentMatchIndexPos(content, start, end string) (int, int) {
return startIdx, (startIdx + len(start) + endIdx + len(end)) - 9 // remove the length <em></em> since we give Content the original data
}
func convertResult(searchResult *elastic.SearchResult, kw string, pageSize int) (int64, []*internal.SearchResult, []*internal.SearchResultLanguages, error) {
func convertResult(searchResult *es.SearchResponse, kw string, pageSize int) (int64, []*internal.SearchResult, []*internal.SearchResultLanguages, error) {
hits := make([]*internal.SearchResult, 0, pageSize)
for _, hit := range searchResult.Hits.Hits {
repoID, fileName := internal.ParseIndexerID(hit.Id)
for _, hit := range searchResult.Hits {
repoID, fileName := internal.ParseIndexerID(hit.ID)
res := make(map[string]any)
if err := json.Unmarshal(hit.Source, &res); err != nil {
return 0, nil, nil, err
@ -333,111 +295,111 @@ func convertResult(searchResult *elastic.SearchResult, kw string, pageSize int)
})
}
return searchResult.TotalHits(), hits, extractAggs(searchResult), nil
return searchResult.Total, hits, extractAggs(searchResult), nil
}
func extractAggs(searchResult *elastic.SearchResult) []*internal.SearchResultLanguages {
var searchResultLanguages []*internal.SearchResultLanguages
agg, found := searchResult.Aggregations.Terms("language")
if found {
searchResultLanguages = make([]*internal.SearchResultLanguages, 0, 10)
for _, bucket := range agg.Buckets {
searchResultLanguages = append(searchResultLanguages, &internal.SearchResultLanguages{
Language: bucket.Key.(string),
Color: enry.GetColor(bucket.Key.(string)),
Count: int(bucket.DocCount),
})
func extractAggs(searchResult *es.SearchResponse) []*internal.SearchResultLanguages {
buckets, found := searchResult.Aggregations["language"]
if !found {
return nil
}
searchResultLanguages := make([]*internal.SearchResultLanguages, 0, 10)
for _, bucket := range buckets {
// language is mapped as keyword so the key is always a string; if the
// mapping ever changes, skip rather than emit an empty-language bucket.
key, ok := bucket.Key.(string)
if !ok {
continue
}
searchResultLanguages = append(searchResultLanguages, &internal.SearchResultLanguages{
Language: key,
Color: enry.GetColor(key),
Count: int(bucket.DocCount),
})
}
return searchResultLanguages
}
// Search searches for codes and language stats by given conditions.
func (b *Indexer) Search(ctx context.Context, opts *internal.SearchOptions) (int64, []*internal.SearchResult, []*internal.SearchResultLanguages, error) {
var contentQuery elastic.Query
searchMode := util.IfZero(opts.SearchMode, b.SupportedSearchModes()[0].ModeValue)
contentQuery := es.Query(es.NewMultiMatchQuery(opts.Keyword, "content").Type(es.MultiMatchTypeBestFields).Operator("and"))
if searchMode == indexer.SearchModeExact {
// 1.21 used NewMultiMatchQuery().Type(esMultiMatchTypePhrasePrefix), but later releases changed to NewMatchPhraseQuery
contentQuery = elastic.NewMatchPhraseQuery("content", opts.Keyword)
} else /* words */ {
contentQuery = elastic.NewMultiMatchQuery("content", opts.Keyword).Type(esMultiMatchTypeBestFields).Operator("and")
contentQuery = es.MatchPhraseQuery("content", opts.Keyword)
}
kwQuery := elastic.NewBoolQuery().Should(
kwQuery := es.NewBoolQuery().Should(
contentQuery,
elastic.NewMultiMatchQuery(opts.Keyword, "filename^10").Type(esMultiMatchTypePhrasePrefix),
es.NewMultiMatchQuery(opts.Keyword, "filename^10").Type(es.MultiMatchTypePhrasePrefix),
)
query := elastic.NewBoolQuery()
query = query.Must(kwQuery)
query := es.NewBoolQuery().Must(kwQuery)
if len(opts.RepoIDs) > 0 {
repoStrs := make([]any, 0, len(opts.RepoIDs))
for _, repoID := range opts.RepoIDs {
repoStrs = append(repoStrs, repoID)
}
repoQuery := elastic.NewTermsQuery("repo_id", repoStrs...)
query = query.Must(repoQuery)
query.Must(es.TermsQuery("repo_id", es.ToAnySlice(opts.RepoIDs)...))
}
var (
start, pageSize = opts.GetSkipTake()
kw = "<em>" + opts.Keyword + "</em>"
aggregation = elastic.NewTermsAggregation().Field("language").Size(10).OrderByCountDesc()
)
start, pageSize := opts.GetSkipTake()
kw := "<em>" + opts.Keyword + "</em>"
languageAggs := map[string]any{
"language": map[string]any{
"terms": map[string]any{
"field": "language",
"size": 10,
"order": map[string]any{"_count": "desc"},
},
},
}
// number_of_fragments=0 returns the full highlighted content (no fragmentation).
highlight := map[string]any{
"fields": map[string]any{
"content": map[string]any{},
"filename": map[string]any{},
},
"number_of_fragments": 0,
"type": "fvh",
}
sort := []es.SortField{
{Field: "_score", Desc: true},
{Field: "updated_at", Desc: false},
}
if len(opts.Language) == 0 {
searchResult, err := b.inner.Client.Search().
Index(b.inner.VersionedIndexName()).
Aggregation("language", aggregation).
Query(query).
Highlight(
elastic.NewHighlight().
Field("content").
Field("filename").
NumOfFragments(0). // return all highlighting content on fragments
HighlighterType("fvh"),
).
Sort("_score", false).
Sort("updated_at", true).
From(start).Size(pageSize).
Do(ctx)
resp, err := b.Indexer.Search(ctx, es.SearchRequest{
Query: query,
Sort: sort,
From: start,
Size: pageSize,
TrackTotal: true,
Aggregations: languageAggs,
Highlight: highlight,
})
if err != nil {
return 0, nil, nil, err
}
return convertResult(searchResult, kw, pageSize)
return convertResult(resp, kw, pageSize)
}
langQuery := elastic.NewMatchQuery("language", opts.Language)
countResult, err := b.inner.Client.Search().
Index(b.inner.VersionedIndexName()).
Aggregation("language", aggregation).
Query(query).
Size(0). // We only need stats information
Do(ctx)
countResp, err := b.Indexer.Search(ctx, es.SearchRequest{
Query: query,
Size: 0, // stats only
TrackTotal: true,
Aggregations: languageAggs,
})
if err != nil {
return 0, nil, nil, err
}
query = query.Must(langQuery)
searchResult, err := b.inner.Client.Search().
Index(b.inner.VersionedIndexName()).
Query(query).
Highlight(
elastic.NewHighlight().
Field("content").
Field("filename").
NumOfFragments(0). // return all highlighting content on fragments
HighlighterType("fvh"),
).
Sort("_score", false).
Sort("updated_at", true).
From(start).Size(pageSize).
Do(ctx)
query.Must(es.MatchQuery("language", opts.Language))
resp, err := b.Indexer.Search(ctx, es.SearchRequest{
Query: query,
Sort: sort,
From: start,
Size: pageSize,
TrackTotal: true,
Highlight: highlight,
})
if err != nil {
return 0, nil, nil, err
}
total, hits, _, err := convertResult(searchResult, kw, pageSize)
return total, hits, extractAggs(countResult), err
total, hits, _, err := convertResult(resp, kw, pageSize)
return total, hits, extractAggs(countResp), err
}

View File

@ -8,6 +8,7 @@ import (
"os"
"slices"
"testing"
"time"
"code.gitea.io/gitea/models/db"
"code.gitea.io/gitea/models/unittest"
@ -39,6 +40,16 @@ func TestMain(m *testing.M) {
func testIndexer(name string, t *testing.T, indexer internal.Indexer) {
t.Run(name, func(t *testing.T) {
assert.NoError(t, setupRepositoryIndexes(t.Context(), indexer))
// Wait for the index to catch up: ES/OpenSearch make writes visible
// only after a refresh (default interval: 1s). Bleve is synchronous
// and passes on the first iteration.
require.Eventually(t, func() bool {
total, _, _, err := indexer.Search(t.Context(), &internal.SearchOptions{
Keyword: "Description",
Paginator: &db.ListOptions{Page: 1, PageSize: 1},
})
return err == nil && total > 0
}, 10*time.Second, 100*time.Millisecond, "index did not become searchable")
keywords := []struct {
RepoIDs []int64

View File

@ -4,52 +4,80 @@
package elasticsearch
import (
"bytes"
"context"
"errors"
"fmt"
"io"
"net"
"net/http"
"net/url"
"slices"
"strconv"
"strings"
"time"
"code.gitea.io/gitea/modules/indexer/internal"
"github.com/olivere/elastic/v7"
"code.gitea.io/gitea/modules/json"
)
var _ internal.Indexer = &Indexer{}
// Indexer represents a basic elasticsearch indexer implementation
// Indexer is a narrow wrapper around an Elasticsearch/OpenSearch cluster.
// It targets the REST subset shared by Elasticsearch 7/8/9 and OpenSearch 3.
type Indexer struct {
Client *elastic.Client
client *http.Client
base string // base URL with trailing slash, no userinfo
user string
pass string
url string
indexName string
version int
mapping string
}
func NewIndexer(url, indexName string, version int, mapping string) *Indexer {
// NewIndexer builds an Indexer. The connection is opened by Init.
func NewIndexer(rawURL, indexName string, version int, mapping string) *Indexer {
return &Indexer{
url: url,
base: rawURL,
indexName: indexName,
version: version,
mapping: mapping,
}
}
// Init initializes the indexer
// Init connects and creates the versioned index if missing, returning true if it already existed.
func (i *Indexer) Init(ctx context.Context) (bool, error) {
if i == nil {
return false, errors.New("cannot init nil indexer")
}
if i.Client != nil {
return false, errors.New("indexer is already initialized")
}
client, err := i.initClient()
parsed, err := url.Parse(i.base)
if err != nil {
return false, err
return false, fmt.Errorf("parse elasticsearch url: %w", err)
}
if parsed.User != nil {
i.user = parsed.User.Username()
i.pass, _ = parsed.User.Password()
parsed.User = nil
}
base := parsed.String()
if !strings.HasSuffix(base, "/") {
base += "/"
}
i.base = base
// No client-level Timeout: bulk/_delete_by_query can legitimately run for
// minutes on large repos. Per-request deadlines come from the caller's ctx;
// transport-level timeouts cover stalled connects/handshakes/headers so a
// half-open server cannot wedge the indexer indefinitely.
i.client = &http.Client{
Transport: &http.Transport{
Proxy: http.ProxyFromEnvironment,
DialContext: (&net.Dialer{Timeout: 30 * time.Second, KeepAlive: 30 * time.Second}).DialContext,
TLSHandshakeTimeout: 10 * time.Second,
ResponseHeaderTimeout: 30 * time.Second,
ExpectContinueTimeout: 1 * time.Second,
IdleConnTimeout: 90 * time.Second,
MaxIdleConns: 100,
},
}
i.Client = client
exists, err := i.Client.IndexExists(i.VersionedIndexName()).Do(ctx)
exists, err := i.indexExists(ctx, i.VersionedIndexName())
if err != nil {
return false, err
}
@ -61,34 +89,321 @@ func (i *Indexer) Init(ctx context.Context) (bool, error) {
return false, err
}
return exists, nil
return false, nil
}
// Ping checks if the indexer is available
// Ping returns an error when the cluster is unusable (status != green/yellow).
func (i *Indexer) Ping(ctx context.Context) error {
if i == nil {
return errors.New("cannot ping nil indexer")
var body struct {
Status string `json:"status"`
}
if i.Client == nil {
return errors.New("indexer is not initialized")
}
resp, err := i.Client.ClusterHealth().Do(ctx)
if err != nil {
if err := i.doJSON(ctx, http.MethodGet, "_cluster/health", nil, &body); err != nil {
return err
}
if resp.Status != "green" && resp.Status != "yellow" {
// It's healthy if the status is green, and it's available if the status is yellow,
// see https://www.elastic.co/guide/en/elasticsearch/reference/current/cluster-health.html
return fmt.Errorf("status of elasticsearch cluster is %s", resp.Status)
// Healthy = green; usable = yellow. Red is unusable.
// https://www.elastic.co/guide/en/elasticsearch/reference/current/cluster-health.html
if body.Status != "green" && body.Status != "yellow" {
return fmt.Errorf("status of elasticsearch cluster is %s", body.Status)
}
return nil
}
// Close closes the indexer
// Close releases idle HTTP connections held by the client.
func (i *Indexer) Close() {
if i == nil {
if i == nil || i.client == nil {
return
}
i.Client = nil
i.client.CloseIdleConnections()
i.client = nil
}
// Bulk submits index/delete ops. Returns the first item-level failure, if any.
func (i *Indexer) Bulk(ctx context.Context, ops []BulkOp) error {
if len(ops) == 0 {
return nil
}
index := i.VersionedIndexName()
var buf bytes.Buffer
buf.Grow(len(ops) * 256)
for _, op := range ops {
meta := map[string]any{op.action: map[string]any{"_index": index, "_id": op.id}}
if err := writeJSONLine(&buf, meta); err != nil {
return err
}
if op.action == bulkActionIndex {
if err := writeJSONLine(&buf, op.doc); err != nil {
return err
}
}
}
res, err := i.do(ctx, http.MethodPost, urlPath(index, "_bulk"), "application/x-ndjson", bytes.NewReader(buf.Bytes()))
if err != nil {
return err
}
defer drainAndClose(res)
var body struct {
Errors bool `json:"errors"`
Items []map[string]struct {
Status int `json:"status"`
Error json.Value `json:"error"`
} `json:"items"`
}
if err := json.NewDecoder(res.Body).Decode(&body); err != nil {
return err
}
if !body.Errors {
return nil
}
return firstBulkError(body.Items)
}
// firstBulkError returns the first item-level failure in a bulk response.
// Each items entry is a single-key map ({"index": {...}} or {"delete": {...}}).
// Delete-of-missing (404) is idempotent and not reported.
func firstBulkError(items []map[string]struct {
Status int `json:"status"`
Error json.Value `json:"error"`
},
) error {
for _, item := range items {
for action, result := range item {
if action == bulkActionDelete && result.Status == http.StatusNotFound {
continue
}
if result.Status >= 300 {
return fmt.Errorf("bulk %s failed (status %d): %s", action, result.Status, string(result.Error))
}
}
}
return nil
}
// Index writes a single document.
func (i *Indexer) Index(ctx context.Context, id string, doc any) error {
body, err := json.Marshal(doc)
if err != nil {
return err
}
return i.doJSON(ctx, http.MethodPut, urlPath(i.VersionedIndexName(), "_doc", id), bytes.NewReader(body), nil)
}
// Delete removes a single document by id. Missing ids are not an error.
func (i *Indexer) Delete(ctx context.Context, id string) error {
res, err := i.do(ctx, http.MethodDelete, urlPath(i.VersionedIndexName(), "_doc", id), "", nil, http.StatusNotFound)
if err != nil {
return err
}
drainAndClose(res)
return nil
}
// DeleteByQuery removes every document matching the query.
func (i *Indexer) DeleteByQuery(ctx context.Context, query Query) error {
body, err := json.Marshal(map[string]any{"query": query.querySource()})
if err != nil {
return err
}
return i.doJSON(ctx, http.MethodPost, urlPath(i.VersionedIndexName(), "_delete_by_query"), bytes.NewReader(body), nil)
}
// Refresh forces a refresh so recent writes are searchable.
func (i *Indexer) Refresh(ctx context.Context) error {
return i.doJSON(ctx, http.MethodPost, urlPath(i.VersionedIndexName(), "_refresh"), nil, nil)
}
// Search runs a search request and decodes the reply.
func (i *Indexer) Search(ctx context.Context, req SearchRequest) (*SearchResponse, error) {
body := map[string]any{}
if req.Query != nil {
body["query"] = req.Query.querySource()
}
if len(req.Sort) > 0 {
sorts := make([]map[string]any, len(req.Sort))
for idx, s := range req.Sort {
sorts[idx] = s.source()
}
body["sort"] = sorts
}
if req.From > 0 {
body["from"] = req.From
}
body["size"] = req.Size
if len(req.Aggregations) > 0 {
body["aggs"] = req.Aggregations
}
if len(req.Highlight) > 0 {
body["highlight"] = req.Highlight
}
payload, err := json.Marshal(body)
if err != nil {
return nil, err
}
// Default track_total_hits is 10000 (capped count); send it explicitly so
// callers can choose between exact totals (true) and skipping counting (false).
path := urlPath(i.VersionedIndexName(), "_search") + "?track_total_hits=" + strconv.FormatBool(req.TrackTotal)
res, err := i.do(ctx, http.MethodPost, path, "application/json", bytes.NewReader(payload))
if err != nil {
return nil, err
}
defer drainAndClose(res)
return decodeSearchResponse(res.Body)
}
func (i *Indexer) indexExists(ctx context.Context, name string) (bool, error) {
res, err := i.do(ctx, http.MethodHead, urlPath(name), "", nil, http.StatusNotFound)
if err != nil {
return false, err
}
drainAndClose(res)
return res.StatusCode == http.StatusOK, nil
}
func (i *Indexer) createIndex(ctx context.Context) error {
var body struct {
Acknowledged bool `json:"acknowledged"`
}
if err := i.doJSON(ctx, http.MethodPut, urlPath(i.VersionedIndexName()), bytes.NewBufferString(i.mapping), &body); err != nil {
return fmt.Errorf("create index %s: %w", i.VersionedIndexName(), err)
}
if !body.Acknowledged {
return fmt.Errorf("create index %s not acknowledged", i.VersionedIndexName())
}
i.checkOldIndexes(ctx)
return nil
}
// do sends a request and returns the response. Status >= 300 is turned into
// an error unless the status appears in okStatus. The caller closes Body.
func (i *Indexer) do(ctx context.Context, method, path, contentType string, body io.Reader, okStatus ...int) (*http.Response, error) {
req, err := http.NewRequestWithContext(ctx, method, i.base+path, body)
if err != nil {
return nil, err
}
if contentType != "" {
req.Header.Set("Content-Type", contentType)
}
if i.user != "" || i.pass != "" {
req.SetBasicAuth(i.user, i.pass)
}
res, err := i.client.Do(req)
if err != nil {
return nil, err
}
if res.StatusCode >= 300 && !slices.Contains(okStatus, res.StatusCode) {
msg := readErrBody(res)
res.Body.Close()
return nil, fmt.Errorf("%s %s: %s", method, path, msg)
}
return res, nil
}
// doJSON sends a request with a JSON body and, when out is non-nil, decodes
// the JSON response into it.
func (i *Indexer) doJSON(ctx context.Context, method, path string, body io.Reader, out any) error {
contentType := ""
if body != nil {
contentType = "application/json"
}
res, err := i.do(ctx, method, path, contentType, body)
if err != nil {
return err
}
defer drainAndClose(res)
if out == nil {
return nil
}
return json.NewDecoder(res.Body).Decode(out)
}
// drainAndClose discards any unread response body before closing so the
// underlying TCP connection can be reused for keep-alive.
func drainAndClose(res *http.Response) {
_, _ = io.Copy(io.Discard, res.Body)
res.Body.Close()
}
func writeJSONLine(buf *bytes.Buffer, v any) error {
enc, err := json.Marshal(v)
if err != nil {
return err
}
buf.Write(enc)
buf.WriteByte('\n')
return nil
}
// readErrBody reads up to 4 KiB of an error response and drains the rest so
// the underlying connection can be reused (keep-alive needs Body fully read).
func readErrBody(res *http.Response) string {
const limit = 4 << 10
b, _ := io.ReadAll(io.LimitReader(res.Body, limit))
_, _ = io.Copy(io.Discard, res.Body)
return fmt.Sprintf("status %d: %s", res.StatusCode, bytes.TrimSpace(b))
}
func decodeSearchResponse(r io.Reader) (*SearchResponse, error) {
var raw struct {
Hits struct {
Total struct {
Value int64 `json:"value"`
} `json:"total"`
Hits []struct {
ID string `json:"_id"`
Score float64 `json:"_score"`
Source json.Value `json:"_source"`
Highlight map[string][]string `json:"highlight"`
} `json:"hits"`
} `json:"hits"`
Aggregations map[string]struct {
Buckets []struct {
Key any `json:"key"`
DocCount int64 `json:"doc_count"`
} `json:"buckets"`
} `json:"aggregations"`
}
if err := json.NewDecoder(r).Decode(&raw); err != nil {
return nil, err
}
resp := &SearchResponse{
Total: raw.Hits.Total.Value,
Hits: make([]SearchHit, 0, len(raw.Hits.Hits)),
}
for _, h := range raw.Hits.Hits {
resp.Hits = append(resp.Hits, SearchHit{
ID: h.ID,
Score: h.Score,
Source: h.Source,
Highlight: h.Highlight,
})
}
if len(raw.Aggregations) > 0 {
resp.Aggregations = make(map[string][]AggBucket, len(raw.Aggregations))
for name, agg := range raw.Aggregations {
buckets := make([]AggBucket, len(agg.Buckets))
for idx, b := range agg.Buckets {
buckets[idx] = AggBucket{Key: b.Key, DocCount: b.DocCount}
}
resp.Aggregations[name] = buckets
}
}
return resp, nil
}
// urlPath joins path segments with `/` and percent-escapes each.
func urlPath(segments ...string) string {
var b bytes.Buffer
for idx, s := range segments {
if idx > 0 {
b.WriteByte('/')
}
b.WriteString(url.PathEscape(s))
}
return b.String()
}

View File

@ -0,0 +1,44 @@
// Copyright 2026 The Gitea Authors. All rights reserved.
// SPDX-License-Identifier: MIT
package elasticsearch
import (
"os"
"strings"
"testing"
"github.com/stretchr/testify/require"
)
func newRealIndexer(t *testing.T) *Indexer {
t.Helper()
url := "http://elasticsearch:9200"
if os.Getenv("CI") == "" {
url = os.Getenv("TEST_ELASTICSEARCH_URL")
if url == "" {
t.Skip("TEST_ELASTICSEARCH_URL not set and not running in CI")
}
}
indexName := "gitea_test_" + strings.ReplaceAll(strings.ToLower(t.Name()), "/", "_")
ix := NewIndexer(url, indexName, 1, `{"mappings":{"properties":{"x":{"type":"keyword"}}}}`)
_, err := ix.Init(t.Context())
require.NoError(t, err)
t.Cleanup(ix.Close)
return ix
}
func TestPing(t *testing.T) {
ix := newRealIndexer(t)
require.NoError(t, ix.Ping(t.Context()))
}
func TestDeleteSwallows404(t *testing.T) {
ix := newRealIndexer(t)
require.NoError(t, ix.Delete(t.Context(), "missing-id"))
}
func TestBulkAcceptsDelete404(t *testing.T) {
ix := newRealIndexer(t)
require.NoError(t, ix.Bulk(t.Context(), []BulkOp{DeleteOp("missing-id")}))
}

View File

@ -0,0 +1,132 @@
// Copyright 2026 The Gitea Authors. All rights reserved.
// SPDX-License-Identifier: MIT
package elasticsearch
// MultiMatch types used by the call sites. See
// https://www.elastic.co/guide/en/elasticsearch/reference/current/query-dsl-multi-match-query.html#multi-match-types
const (
MultiMatchTypeBestFields = "best_fields"
MultiMatchTypePhrasePrefix = "phrase_prefix"
)
// ToAnySlice converts []T to []any for variadic query args like TermsQuery.
func ToAnySlice[T any](s []T) []any {
out := make([]any, len(s))
for idx, v := range s {
out[idx] = v
}
return out
}
// Query is an Elasticsearch query DSL node. It marshals to the JSON
// object expected by the ES query API.
type Query interface {
querySource() map[string]any
}
type rawQuery map[string]any
func (q rawQuery) querySource() map[string]any { return q }
// TermQuery matches documents whose `field` exactly equals `value`.
func TermQuery(field string, value any) Query {
return rawQuery{"term": map[string]any{field: value}}
}
// TermsQuery matches documents whose `field` equals any of `values`.
func TermsQuery(field string, values ...any) Query {
return rawQuery{"terms": map[string]any{field: values}}
}
// MatchQuery is a full-text match on a single field.
func MatchQuery(field string, value any) Query {
return rawQuery{"match": map[string]any{field: value}}
}
// MatchPhraseQuery matches the exact phrase on `field`.
func MatchPhraseQuery(field, value string) Query {
return rawQuery{"match_phrase": map[string]any{field: value}}
}
// MultiMatchQuery is the fluent builder for a multi_match query.
type MultiMatchQuery struct {
query any
fields []string
typ string
operator string
}
// NewMultiMatchQuery creates a multi_match query over the given fields.
func NewMultiMatchQuery(query any, fields ...string) *MultiMatchQuery {
return &MultiMatchQuery{query: query, fields: fields}
}
func (m *MultiMatchQuery) Type(t string) *MultiMatchQuery { m.typ = t; return m }
func (m *MultiMatchQuery) Operator(op string) *MultiMatchQuery { m.operator = op; return m }
func (m *MultiMatchQuery) querySource() map[string]any {
body := map[string]any{"query": m.query}
if len(m.fields) > 0 {
body["fields"] = m.fields
}
if m.typ != "" {
body["type"] = m.typ
}
if m.operator != "" {
body["operator"] = m.operator
}
return map[string]any{"multi_match": body}
}
// RangeQuery is the fluent builder for a range query.
type RangeQuery struct {
field string
body map[string]any
}
func NewRangeQuery(field string) *RangeQuery {
return &RangeQuery{field: field, body: map[string]any{}}
}
func (r *RangeQuery) Gte(v any) *RangeQuery { r.body["gte"] = v; return r }
func (r *RangeQuery) Lte(v any) *RangeQuery { r.body["lte"] = v; return r }
func (r *RangeQuery) querySource() map[string]any {
return map[string]any{"range": map[string]any{r.field: r.body}}
}
// BoolQuery is the fluent builder for a bool query.
type BoolQuery struct {
must []Query
should []Query
mustNot []Query
}
func NewBoolQuery() *BoolQuery { return &BoolQuery{} }
func (b *BoolQuery) Must(q ...Query) *BoolQuery { b.must = append(b.must, q...); return b }
func (b *BoolQuery) Should(q ...Query) *BoolQuery { b.should = append(b.should, q...); return b }
func (b *BoolQuery) MustNot(q ...Query) *BoolQuery { b.mustNot = append(b.mustNot, q...); return b }
func (b *BoolQuery) querySource() map[string]any {
body := map[string]any{}
if len(b.must) > 0 {
body["must"] = querySlice(b.must)
}
if len(b.should) > 0 {
body["should"] = querySlice(b.should)
}
if len(b.mustNot) > 0 {
body["must_not"] = querySlice(b.mustNot)
}
return map[string]any{"bool": body}
}
func querySlice(queries []Query) []map[string]any {
out := make([]map[string]any, len(queries))
for idx, q := range queries {
out[idx] = q.querySource()
}
return out
}

View File

@ -0,0 +1,76 @@
// Copyright 2026 The Gitea Authors. All rights reserved.
// SPDX-License-Identifier: MIT
package elasticsearch
import "code.gitea.io/gitea/modules/json"
const (
bulkActionIndex = "index"
bulkActionDelete = "delete"
)
// BulkOp is a single write inside a Bulk call. Construct with IndexOp or DeleteOp.
type BulkOp struct {
action string
id string
doc any
}
// IndexOp builds a bulk index operation.
func IndexOp(id string, doc any) BulkOp {
return BulkOp{action: bulkActionIndex, id: id, doc: doc}
}
// DeleteOp builds a bulk delete operation.
func DeleteOp(id string) BulkOp {
return BulkOp{action: bulkActionDelete, id: id}
}
// SortField is one entry of the search sort array.
type SortField struct {
Field string
Desc bool
}
func (s SortField) source() map[string]any {
order := "asc"
if s.Desc {
order = "desc"
}
return map[string]any{s.Field: map[string]any{"order": order}}
}
// SearchRequest captures everything Gitea sends to the _search endpoint.
// Aggregations and Highlight are raw ES JSON bodies — callers write them as
// map[string]any since each has exactly one call site with a fixed shape.
type SearchRequest struct {
Query Query
Sort []SortField
From int
Size int
TrackTotal bool
Aggregations map[string]any
Highlight map[string]any
}
// SearchHit is a single result row.
type SearchHit struct {
ID string
Score float64
Source json.Value
Highlight map[string][]string
}
// AggBucket is a terms-aggregation bucket.
type AggBucket struct {
Key any
DocCount int64
}
// SearchResponse is Gitea's decoded view of the search reply.
type SearchResponse struct {
Total int64
Hits []SearchHit
Aggregations map[string][]AggBucket
}

View File

@ -6,14 +6,11 @@ package elasticsearch
import (
"context"
"fmt"
"time"
"code.gitea.io/gitea/modules/log"
"github.com/olivere/elastic/v7"
)
// VersionedIndexName returns the full index name with version
// VersionedIndexName returns the full index name with version suffix.
func (i *Indexer) VersionedIndexName() string {
return versionedIndexName(i.indexName, i.version)
}
@ -26,41 +23,10 @@ func versionedIndexName(indexName string, version int) string {
return fmt.Sprintf("%s.v%d", indexName, version)
}
func (i *Indexer) createIndex(ctx context.Context) error {
createIndex, err := i.Client.CreateIndex(i.VersionedIndexName()).BodyString(i.mapping).Do(ctx)
if err != nil {
return err
}
if !createIndex.Acknowledged {
return fmt.Errorf("create index %s with %s failed", i.VersionedIndexName(), i.mapping)
}
i.checkOldIndexes(ctx)
return nil
}
func (i *Indexer) initClient() (*elastic.Client, error) {
opts := []elastic.ClientOptionFunc{
elastic.SetURL(i.url),
elastic.SetSniff(false),
elastic.SetHealthcheckInterval(10 * time.Second),
elastic.SetGzip(false),
}
logger := log.GetLogger(log.DEFAULT)
opts = append(opts, elastic.SetTraceLog(&log.PrintfLogger{Logf: logger.Trace}))
opts = append(opts, elastic.SetInfoLog(&log.PrintfLogger{Logf: logger.Info}))
opts = append(opts, elastic.SetErrorLog(&log.PrintfLogger{Logf: logger.Error}))
return elastic.NewClient(opts...)
}
func (i *Indexer) checkOldIndexes(ctx context.Context) {
for v := 0; v < i.version; v++ {
for v := range i.version {
indexName := versionedIndexName(i.indexName, v)
exists, err := i.Client.IndexExists(indexName).Do(ctx)
exists, err := i.indexExists(ctx, indexName)
if err == nil && exists {
log.Warn("Found older elasticsearch index named %q, Gitea will keep the old NOT DELETED. You can delete the old version after the upgrade succeed.", indexName)
}

View File

@ -11,27 +11,18 @@ import (
"code.gitea.io/gitea/modules/graceful"
"code.gitea.io/gitea/modules/indexer"
indexer_internal "code.gitea.io/gitea/modules/indexer/internal"
inner_elasticsearch "code.gitea.io/gitea/modules/indexer/internal/elasticsearch"
es "code.gitea.io/gitea/modules/indexer/internal/elasticsearch"
"code.gitea.io/gitea/modules/indexer/issues/internal"
"code.gitea.io/gitea/modules/util"
"github.com/olivere/elastic/v7"
)
const (
issueIndexerLatestVersion = 3
// multi-match-types, currently only 2 types are used
// Reference: https://www.elastic.co/guide/en/elasticsearch/reference/7.0/query-dsl-multi-match-query.html#multi-match-types
esMultiMatchTypeBestFields = "best_fields"
esMultiMatchTypePhrasePrefix = "phrase_prefix"
)
const issueIndexerLatestVersion = 3
var _ internal.Indexer = &Indexer{}
// Indexer implements Indexer interface
type Indexer struct {
inner *inner_elasticsearch.Indexer
indexer_internal.Indexer // do not composite inner_elasticsearch.Indexer directly to avoid exposing too much
*es.Indexer
}
func (b *Indexer) SupportedSearchModes() []indexer.SearchMode {
@ -41,12 +32,7 @@ func (b *Indexer) SupportedSearchModes() []indexer.SearchMode {
// NewIndexer creates a new elasticsearch indexer
func NewIndexer(url, indexerName string) *Indexer {
inner := inner_elasticsearch.NewIndexer(url, indexerName, issueIndexerLatestVersion, defaultMapping)
indexer := &Indexer{
inner: inner,
Indexer: inner,
}
return indexer
return &Indexer{Indexer: es.NewIndexer(url, indexerName, issueIndexerLatestVersion, defaultMapping)}
}
const (
@ -93,29 +79,14 @@ func (b *Indexer) Index(ctx context.Context, issues ...*internal.IndexerData) er
return nil
} else if len(issues) == 1 {
issue := issues[0]
_, err := b.inner.Client.Index().
Index(b.inner.VersionedIndexName()).
Id(strconv.FormatInt(issue.ID, 10)).
BodyJson(issue).
Do(ctx)
return err
return b.Indexer.Index(ctx, strconv.FormatInt(issue.ID, 10), issue)
}
reqs := make([]elastic.BulkableRequest, 0)
ops := make([]es.BulkOp, 0, len(issues))
for _, issue := range issues {
reqs = append(reqs,
elastic.NewBulkIndexRequest().
Index(b.inner.VersionedIndexName()).
Id(strconv.FormatInt(issue.ID, 10)).
Doc(issue),
)
ops = append(ops, es.IndexOp(strconv.FormatInt(issue.ID, 10), issue))
}
_, err := b.inner.Client.Bulk().
Index(b.inner.VersionedIndexName()).
Add(reqs...).
Do(graceful.GetManager().HammerContext())
return err
return b.Bulk(graceful.GetManager().HammerContext(), ops)
}
// Delete deletes indexes by ids
@ -123,129 +94,116 @@ func (b *Indexer) Delete(ctx context.Context, ids ...int64) error {
if len(ids) == 0 {
return nil
} else if len(ids) == 1 {
_, err := b.inner.Client.Delete().
Index(b.inner.VersionedIndexName()).
Id(strconv.FormatInt(ids[0], 10)).
Do(ctx)
return err
return b.Indexer.Delete(ctx, strconv.FormatInt(ids[0], 10))
}
reqs := make([]elastic.BulkableRequest, 0)
ops := make([]es.BulkOp, 0, len(ids))
for _, id := range ids {
reqs = append(reqs,
elastic.NewBulkDeleteRequest().
Index(b.inner.VersionedIndexName()).
Id(strconv.FormatInt(id, 10)),
)
ops = append(ops, es.DeleteOp(strconv.FormatInt(id, 10)))
}
_, err := b.inner.Client.Bulk().
Index(b.inner.VersionedIndexName()).
Add(reqs...).
Do(graceful.GetManager().HammerContext())
return err
return b.Bulk(graceful.GetManager().HammerContext(), ops)
}
// Search searches for issues by given conditions.
// Returns the matching issue IDs
func (b *Indexer) Search(ctx context.Context, options *internal.SearchOptions) (*internal.SearchResult, error) {
query := elastic.NewBoolQuery()
query := es.NewBoolQuery()
if options.Keyword != "" {
searchMode := util.IfZero(options.SearchMode, b.SupportedSearchModes()[0].ModeValue)
mm := es.NewMultiMatchQuery(options.Keyword, "title", "content", "comments")
if searchMode == indexer.SearchModeExact {
query.Must(elastic.NewMultiMatchQuery(options.Keyword, "title", "content", "comments").Type(esMultiMatchTypePhrasePrefix))
} else /* words */ {
query.Must(elastic.NewMultiMatchQuery(options.Keyword, "title", "content", "comments").Type(esMultiMatchTypeBestFields).Operator("and"))
mm = mm.Type(es.MultiMatchTypePhrasePrefix)
} else {
mm = mm.Type(es.MultiMatchTypeBestFields).Operator("and")
}
query.Must(mm)
}
if len(options.RepoIDs) > 0 {
q := elastic.NewBoolQuery()
q.Should(elastic.NewTermsQuery("repo_id", toAnySlice(options.RepoIDs)...))
q := es.NewBoolQuery()
q.Should(es.TermsQuery("repo_id", es.ToAnySlice(options.RepoIDs)...))
if options.AllPublic {
q.Should(elastic.NewTermQuery("is_public", true))
q.Should(es.TermQuery("is_public", true))
}
query.Must(q)
}
if options.IsPull.Has() {
query.Must(elastic.NewTermQuery("is_pull", options.IsPull.Value()))
query.Must(es.TermQuery("is_pull", options.IsPull.Value()))
}
if options.IsClosed.Has() {
query.Must(elastic.NewTermQuery("is_closed", options.IsClosed.Value()))
query.Must(es.TermQuery("is_closed", options.IsClosed.Value()))
}
if options.IsArchived.Has() {
query.Must(elastic.NewTermQuery("is_archived", options.IsArchived.Value()))
query.Must(es.TermQuery("is_archived", options.IsArchived.Value()))
}
if options.NoLabelOnly {
query.Must(elastic.NewTermQuery("no_label", true))
query.Must(es.TermQuery("no_label", true))
} else {
if len(options.IncludedLabelIDs) > 0 {
q := elastic.NewBoolQuery()
q := es.NewBoolQuery()
for _, labelID := range options.IncludedLabelIDs {
q.Must(elastic.NewTermQuery("label_ids", labelID))
q.Must(es.TermQuery("label_ids", labelID))
}
query.Must(q)
} else if len(options.IncludedAnyLabelIDs) > 0 {
query.Must(elastic.NewTermsQuery("label_ids", toAnySlice(options.IncludedAnyLabelIDs)...))
query.Must(es.TermsQuery("label_ids", es.ToAnySlice(options.IncludedAnyLabelIDs)...))
}
if len(options.ExcludedLabelIDs) > 0 {
q := elastic.NewBoolQuery()
q := es.NewBoolQuery()
for _, labelID := range options.ExcludedLabelIDs {
q.MustNot(elastic.NewTermQuery("label_ids", labelID))
q.MustNot(es.TermQuery("label_ids", labelID))
}
query.Must(q)
}
}
if len(options.MilestoneIDs) > 0 {
query.Must(elastic.NewTermsQuery("milestone_id", toAnySlice(options.MilestoneIDs)...))
query.Must(es.TermsQuery("milestone_id", es.ToAnySlice(options.MilestoneIDs)...))
}
if options.NoProjectOnly {
query.Must(elastic.NewTermQuery("no_project", true))
query.Must(es.TermQuery("no_project", true))
} else if len(options.ProjectIDs) > 0 {
// FIXME: ISSUE-MULTIPLE-PROJECTS-FILTER: this logic is not right, it should use "AND" but not "OR"
query.Must(elastic.NewTermsQuery("project_ids", toAnySlice(options.ProjectIDs)...))
query.Must(es.TermsQuery("project_ids", es.ToAnySlice(options.ProjectIDs)...))
}
if options.PosterID != "" {
// "(none)" becomes 0, it means no poster
posterIDInt64, _ := strconv.ParseInt(options.PosterID, 10, 64)
query.Must(elastic.NewTermQuery("poster_id", posterIDInt64))
query.Must(es.TermQuery("poster_id", posterIDInt64))
}
if options.AssigneeID != "" {
if options.AssigneeID == "(any)" {
q := elastic.NewRangeQuery("assignee_id")
q.Gte(1)
query.Must(q)
query.Must(es.NewRangeQuery("assignee_id").Gte(1))
} else {
// "(none)" becomes 0, it means no assignee
assigneeIDInt64, _ := strconv.ParseInt(options.AssigneeID, 10, 64)
query.Must(elastic.NewTermQuery("assignee_id", assigneeIDInt64))
query.Must(es.TermQuery("assignee_id", assigneeIDInt64))
}
}
if options.MentionID.Has() {
query.Must(elastic.NewTermQuery("mention_ids", options.MentionID.Value()))
query.Must(es.TermQuery("mention_ids", options.MentionID.Value()))
}
if options.ReviewedID.Has() {
query.Must(elastic.NewTermQuery("reviewed_ids", options.ReviewedID.Value()))
query.Must(es.TermQuery("reviewed_ids", options.ReviewedID.Value()))
}
if options.ReviewRequestedID.Has() {
query.Must(elastic.NewTermQuery("review_requested_ids", options.ReviewRequestedID.Value()))
query.Must(es.TermQuery("review_requested_ids", options.ReviewRequestedID.Value()))
}
if options.SubscriberID.Has() {
query.Must(elastic.NewTermQuery("subscriber_ids", options.SubscriberID.Value()))
query.Must(es.TermQuery("subscriber_ids", options.SubscriberID.Value()))
}
if options.UpdatedAfterUnix.Has() || options.UpdatedBeforeUnix.Has() {
q := elastic.NewRangeQuery("updated_unix")
q := es.NewRangeQuery("updated_unix")
if options.UpdatedAfterUnix.Has() {
q.Gte(options.UpdatedAfterUnix.Value())
}
@ -258,9 +216,9 @@ func (b *Indexer) Search(ctx context.Context, options *internal.SearchOptions) (
if options.SortBy == "" {
options.SortBy = internal.SortByCreatedAsc
}
sortBy := []elastic.Sorter{
sortBy := []es.SortField{
parseSortBy(options.SortBy),
elastic.NewFieldSort("id").Desc(),
{Field: "id", Desc: true},
}
// See https://stackoverflow.com/questions/35206409/elasticsearch-2-1-result-window-is-too-large-index-max-result-window/35221900
@ -268,43 +226,30 @@ func (b *Indexer) Search(ctx context.Context, options *internal.SearchOptions) (
const maxPageSize = 10000
skip, limit := indexer_internal.ParsePaginator(options.Paginator, maxPageSize)
searchResult, err := b.inner.Client.Search().
Index(b.inner.VersionedIndexName()).
Query(query).
SortBy(sortBy...).
From(skip).Size(limit).
Do(ctx)
resp, err := b.Indexer.Search(ctx, es.SearchRequest{
Query: query,
Sort: sortBy,
From: skip,
Size: limit,
TrackTotal: true,
})
if err != nil {
return nil, err
}
hits := make([]internal.Match, 0, limit)
for _, hit := range searchResult.Hits.Hits {
id, _ := strconv.ParseInt(hit.Id, 10, 64)
hits = append(hits, internal.Match{
ID: id,
})
hits := make([]internal.Match, 0, len(resp.Hits))
for _, hit := range resp.Hits {
id, _ := strconv.ParseInt(hit.ID, 10, 64)
hits = append(hits, internal.Match{ID: id})
}
return &internal.SearchResult{
Total: searchResult.TotalHits(),
Total: resp.Total,
Hits: hits,
}, nil
}
func toAnySlice[T any](s []T) []any {
ret := make([]any, 0, len(s))
for _, item := range s {
ret = append(ret, item)
}
return ret
}
func parseSortBy(sortBy internal.SortBy) elastic.Sorter {
field := strings.TrimPrefix(string(sortBy), "-")
ret := elastic.NewFieldSort(field)
if strings.HasPrefix(string(sortBy), "-") {
ret.Desc()
}
return ret
func parseSortBy(sortBy internal.SortBy) es.SortField {
field, desc := strings.CutPrefix(string(sortBy), "-")
return es.SortField{Field: field, Desc: desc}
}

View File

@ -6,6 +6,7 @@ package elasticsearch
import (
"fmt"
"net/http"
"net/url"
"os"
"testing"
"time"
@ -17,19 +18,36 @@ import (
func TestElasticsearchIndexer(t *testing.T) {
// The elasticsearch instance started by pull-db-tests.yml > test-unit > services > elasticsearch
url := "http://elastic:changeme@elasticsearch:9200"
rawURL := "http://elastic:changeme@elasticsearch:9200"
if os.Getenv("CI") == "" {
// Make it possible to run tests against a local elasticsearch instance
url = os.Getenv("TEST_ELASTICSEARCH_URL")
if url == "" {
rawURL = os.Getenv("TEST_ELASTICSEARCH_URL")
if rawURL == "" {
t.Skip("TEST_ELASTICSEARCH_URL not set and not running in CI")
return
}
}
// Go's net/http does not auto-attach URL userinfo as Basic Auth, so extract
// it and set the header explicitly; otherwise auth-enforced clusters answer
// 401 and the probe never reports ready.
parsed, err := url.Parse(rawURL)
require.NoError(t, err)
user := parsed.User
parsed.User = nil
probeURL := parsed.String()
require.Eventually(t, func() bool {
resp, err := http.Get(url)
req, err := http.NewRequest(http.MethodGet, probeURL, nil)
if err != nil {
return false
}
if user != nil {
pass, _ := user.Password()
req.SetBasicAuth(user.Username(), pass)
}
resp, err := http.DefaultClient.Do(req)
if err != nil {
return false
}
@ -37,7 +55,7 @@ func TestElasticsearchIndexer(t *testing.T) {
return resp.StatusCode == http.StatusOK
}, time.Minute, time.Second, "Expected elasticsearch to be up")
indexer := NewIndexer(url, fmt.Sprintf("test_elasticsearch_indexer_%d", time.Now().Unix()))
indexer := NewIndexer(rawURL, fmt.Sprintf("test_elasticsearch_indexer_%d", time.Now().Unix()))
defer indexer.Close()
tests.TestIndexer(t, indexer)

View File

@ -116,6 +116,16 @@ var cases = []*testIndexerCase{
assert.Equal(t, len(data), int(result.Total))
},
},
{
// Exercises the single-doc Index/Delete fast path in backends that have one (e.g. Elasticsearch).
Name: "single-doc index",
ExtraData: []*internal.IndexerData{
{ID: 999, Title: "solo-issue-marker"},
},
SearchOptions: &internal.SearchOptions{Keyword: "solo-issue-marker"},
ExpectedIDs: []int64{999},
ExpectedTotal: 1,
},
{
Name: "Keyword",
ExtraData: []*internal.IndexerData{

View File

@ -4,13 +4,7 @@
package setting
import (
"errors"
"fmt"
"net"
"net/url"
"os"
"path/filepath"
"strings"
"time"
)
@ -20,24 +14,22 @@ var (
// DatabaseTypeNames contains the friendly names for all database types
DatabaseTypeNames = map[string]string{"mysql": "MySQL", "postgres": "PostgreSQL", "mssql": "MSSQL", "sqlite3": "SQLite3"}
// EnableSQLite3 use SQLite3, set by build flag
EnableSQLite3 bool
// Database holds the database settings
Database = struct {
Type DatabaseType
Host string
Name string
User string
Passwd string
Schema string
SSLMode string
Path string
Type DatabaseType
Host string
Name string
User string
Passwd string
Schema string
SSLMode string
Path string
SQLiteBusyTimeout int
SQLiteJournalMode string
LogSQL bool
MysqlCharset string
CharsetCollation string
Timeout int // seconds
SQLiteJournalMode string
DBConnectRetries int
DBConnectBackoff time.Duration
MaxIdleConns int
@ -47,7 +39,7 @@ var (
AutoMigration bool
SlowQueryThreshold time.Duration
}{
Timeout: 500,
SQLiteBusyTimeout: 500,
IterateBufferSize: 50,
}
)
@ -64,15 +56,14 @@ func loadDBSetting(rootCfg ConfigProvider) {
Database.Host = sec.Key("HOST").String()
Database.Name = sec.Key("NAME").String()
Database.User = sec.Key("USER").String()
if len(Database.Passwd) == 0 {
Database.Passwd = sec.Key("PASSWD").String()
}
Database.Passwd = sec.Key("PASSWD").String()
Database.Schema = sec.Key("SCHEMA").String()
Database.SSLMode = sec.Key("SSL_MODE").MustString("disable")
Database.CharsetCollation = sec.Key("CHARSET_COLLATION").String()
Database.Path = sec.Key("PATH").MustString(filepath.Join(AppDataPath, "gitea.db"))
Database.Timeout = sec.Key("SQLITE_TIMEOUT").MustInt(500)
Database.SQLiteBusyTimeout = sec.Key("SQLITE_TIMEOUT").MustInt(500)
Database.SQLiteJournalMode = sec.Key("SQLITE_JOURNAL_MODE").MustString("")
Database.MaxIdleConns = sec.Key("MAX_IDLE_CONNS").MustInt(2)
@ -91,123 +82,9 @@ func loadDBSetting(rootCfg ConfigProvider) {
Database.SlowQueryThreshold = sec.Key("SLOW_QUERY_THRESHOLD").MustDuration(5 * time.Second)
}
// DBConnStr returns database connection string
func DBConnStr() (string, error) {
var connStr string
paramSep := "?"
if strings.Contains(Database.Name, paramSep) {
paramSep = "&"
}
switch Database.Type {
case "mysql":
connType := "tcp"
if len(Database.Host) > 0 && Database.Host[0] == '/' { // looks like a unix socket
connType = "unix"
}
tls := Database.SSLMode
if tls == "disable" { // allow (Postgres-inspired) default value to work in MySQL
tls = "false"
}
connStr = fmt.Sprintf("%s:%s@%s(%s)/%s%sparseTime=true&tls=%s",
Database.User, Database.Passwd, connType, Database.Host, Database.Name, paramSep, tls)
case "postgres":
connStr = getPostgreSQLConnectionString(Database.Host, Database.User, Database.Passwd, Database.Name, Database.SSLMode)
case "mssql":
host, port := ParseMSSQLHostPort(Database.Host)
connStr = fmt.Sprintf("server=%s; port=%s; database=%s; user id=%s; password=%s;", host, port, Database.Name, Database.User, Database.Passwd)
case "sqlite3":
if !EnableSQLite3 {
return "", errors.New("this Gitea binary was not built with SQLite3 support")
}
if err := os.MkdirAll(filepath.Dir(Database.Path), os.ModePerm); err != nil {
return "", fmt.Errorf("Failed to create directories: %w", err)
}
journalMode := ""
if Database.SQLiteJournalMode != "" {
journalMode = "&_journal_mode=" + Database.SQLiteJournalMode
}
connStr = fmt.Sprintf("file:%s?cache=shared&mode=rwc&_busy_timeout=%d&_txlock=immediate%s",
Database.Path, Database.Timeout, journalMode)
default:
return "", fmt.Errorf("unknown database type: %s", Database.Type)
}
return connStr, nil
}
// parsePostgreSQLHostPort parses given input in various forms defined in
// https://www.postgresql.org/docs/current/static/libpq-connect.html#LIBPQ-CONNSTRING
// and returns proper host and port number.
func parsePostgreSQLHostPort(info string) (host, port string) {
if h, p, err := net.SplitHostPort(info); err == nil {
host, port = h, p
} else {
// treat the "info" as "host", if it's an IPv6 address, remove the wrapper
host = info
if strings.HasPrefix(host, "[") && strings.HasSuffix(host, "]") {
host = host[1 : len(host)-1]
}
}
// set fallback values
if host == "" {
host = "127.0.0.1"
}
if port == "" {
port = "5432"
}
return host, port
}
func getPostgreSQLConnectionString(dbHost, dbUser, dbPasswd, dbName, dbsslMode string) (connStr string) {
dbName, dbParam, _ := strings.Cut(dbName, "?")
host, port := parsePostgreSQLHostPort(dbHost)
connURL := url.URL{
Scheme: "postgres",
User: url.UserPassword(dbUser, dbPasswd),
Host: net.JoinHostPort(host, port),
Path: dbName,
OmitHost: false,
RawQuery: dbParam,
}
query := connURL.Query()
if strings.HasPrefix(host, "/") { // looks like a unix socket
query.Add("host", host)
connURL.Host = ":" + port
}
query.Set("sslmode", dbsslMode)
connURL.RawQuery = query.Encode()
return connURL.String()
}
// ParseMSSQLHostPort splits the host into host and port
func ParseMSSQLHostPort(info string) (string, string) {
// the default port "0" might be related to MSSQL's dynamic port, maybe it should be double-confirmed in the future
host, port := "127.0.0.1", "0"
if strings.Contains(info, ":") {
host = strings.Split(info, ":")[0]
port = strings.Split(info, ":")[1]
} else if strings.Contains(info, ",") {
host = strings.Split(info, ",")[0]
port = strings.TrimSpace(strings.Split(info, ",")[1])
} else if len(info) > 0 {
host = info
}
if host == "" {
host = "127.0.0.1"
}
if port == "" {
port = "0"
}
return host, port
}
// DatabaseType FIXME: it is also used directly with "schemas.DBType", so the names must be consistent
type DatabaseType string
func (t DatabaseType) String() string {
return string(t)
}
func (t DatabaseType) IsSQLite3() bool {
return t == "sqlite3"
}

View File

@ -1,15 +0,0 @@
//go:build sqlite
// Copyright 2014 The Gogs Authors. All rights reserved.
// SPDX-License-Identifier: MIT
package setting
import (
_ "github.com/mattn/go-sqlite3"
)
func init() {
EnableSQLite3 = true
SupportedDatabaseTypes = append(SupportedDatabaseTypes, "sqlite3")
}

View File

@ -236,6 +236,12 @@ type EditRepoOption struct {
MirrorInterval *string `json:"mirror_interval,omitempty"`
// enable prune - remove obsolete remote-tracking references when mirroring
EnablePrune *bool `json:"enable_prune,omitempty"`
// authentication username for the remote repository (mirrors)
MirrorUsername *string `json:"mirror_username,omitempty"`
// authentication password for the remote repository (mirrors)
MirrorPassword *string `json:"mirror_password,omitempty"`
// authentication token for the remote repository (mirrors)
MirrorToken *string `json:"mirror_token,omitempty"`
}
// GenerateRepoOption options when creating a repository using a template

View File

@ -3315,7 +3315,6 @@
"admin.config.cache_config": "Cache Configuration",
"admin.config.cache_adapter": "Cache Adapter",
"admin.config.cache_interval": "Cache Interval",
"admin.config.cache_conn": "Cache Connection",
"admin.config.cache_item_ttl": "Cache Item TTL",
"admin.config.cache_test": "Test Cache",
"admin.config.cache_test_failed": "Failed to probe the cache: %v.",
@ -3330,7 +3329,6 @@
"admin.config.instance_web_banner.message_placeholder": "Banner message (supports markdown)",
"admin.config.session_config": "Session Configuration",
"admin.config.session_provider": "Session Provider",
"admin.config.provider_config": "Provider Config",
"admin.config.cookie_name": "Cookie Name",
"admin.config.gc_interval_time": "GC Interval Time",
"admin.config.session_life_time": "Session Life Time",

View File

@ -1385,6 +1385,7 @@
"repo.projects.column.delete": "Supprimer la colonne",
"repo.projects.column.deletion_desc": "La suppression dune colonne déplace tous ses tickets dans la colonne par défaut. Continuer ?",
"repo.projects.column.color": "Couleur",
"repo.projects.column": "Colonne",
"repo.projects.open": "Ouvrir",
"repo.projects.close": "Fermer",
"repo.projects.column.assigned_to": "Assigné à",
@ -3314,7 +3315,6 @@
"admin.config.cache_config": "Configuration du cache",
"admin.config.cache_adapter": "Adaptateur du Cache",
"admin.config.cache_interval": "Intervales du Cache",
"admin.config.cache_conn": "Liaison du Cache",
"admin.config.cache_item_ttl": "Durée de vie des éléments dans le cache",
"admin.config.cache_test": "Test du cache",
"admin.config.cache_test_failed": "Impossible dinterroger le cache : %v.",
@ -3329,7 +3329,6 @@
"admin.config.instance_web_banner.message_placeholder": "Message de bannière (supporte markdown)",
"admin.config.session_config": "Configuration de session",
"admin.config.session_provider": "Fournisseur de session",
"admin.config.provider_config": "Configuration du fournisseur",
"admin.config.cookie_name": "Nom du cookie",
"admin.config.gc_interval_time": "Intervals GC",
"admin.config.session_life_time": "Durée des sessions",

View File

@ -122,6 +122,7 @@
"unpin": "Díphoráil",
"artifacts": "Déantáin",
"expired": "Imithe in éag",
"artifact_expires_at": "Éagaíonn ag %s",
"confirm_delete_artifact": "An bhfuil tú cinnte gur mian leat an déantán '%s' a scriosadh?",
"archived": "Cartlann",
"concept_system_global": "Domhanda",
@ -223,6 +224,7 @@
"error.occurred": "Tharla earráid",
"error.report_message": "Má chreideann tú gur fabht Gitea é seo, déan cuardach le haghaidh ceisteanna ar <a href=\"%s\" target=\"_blank\">GitHub</a> nó oscail eagrán nua más gá.",
"error.not_found": "Ní raibh an sprioc in ann a fháil.",
"error.permission_denied": "Cead diúltaithe.",
"error.network_error": "Earráid líonra",
"startpage.app_desc": "Seirbhís Git gan phian, féin-óstáil",
"startpage.install": "Éasca a shuiteáil",
@ -637,14 +639,8 @@
"user.block.unblock.failure": "Theip ar an úsáideoir a díbhlocáil: %s",
"user.block.blocked": "Chuir tú bac ar an úsáideoir seo.",
"user.block.title": "Cuir bac ar úsáideoir",
"user.block.info": "Cuireann blocáil úsáideora cosc orthu idirghníomhú le stórais, mar shampla iarratais tarraingthe nó saincheisteanna a oscailt nó trácht a dhéanamh orthu. Níos mó a fhoghlaim faoi bhac úsáideora.",
"user.block.info_1": "Cuireann blocáil úsáideora cosc ar na gníomhartha seo a leanas ar do chuntas agus ar do stór:",
"user.block.info_2": "ag leanúint do chuntas",
"user.block.info_3": "seol fógraí chugat ag @mentioning d'ainm úsáideora",
"user.block.info_4": "ag tabhairt cuireadh duit mar chomhoibritheoir chuig a stórtha",
"user.block.info_5": "ag réaladh, ag forcáil nó ag féachaint ar stórais",
"user.block.info_6": "ceisteanna nó iarrataí tarraingthe a oscailt agus trácht",
"user.block.info_7": "ag freagairt do do thuairimí i saincheisteanna nó i n-iarratais tarraingthe",
"user.block.info": "Má chuireann bac ar úsáideoir, cuirtear cosc orthu idirghníomhú le stórtha, amhail iarratais tarraingthe nó saincheisteanna a oscailt nó trácht a dhéanamh orthu.",
"user.block.info.docs": "Foghlaim tuilleadh faoi úsáideoir a bhlocáil.",
"user.block.user_to_block": "Úsáideoir chun blocáil",
"user.block.note": "Nóta",
"user.block.note.title": "Nóta roghnach:",
@ -1065,8 +1061,8 @@
"repo.transfer.accept_desc": "Aistriú chuig “%s”",
"repo.transfer.reject": "Diúltaigh aistriú",
"repo.transfer.reject_desc": "Cealaigh aistriú chuig \"%s\"",
"repo.transfer.no_permission_to_accept": "Níl cead agat glacadh leis an aistriú seo.",
"repo.transfer.no_permission_to_reject": "Níl cead agat an aistriú seo a dhiúltú.",
"repo.transfer.is_transferring": "Ag aistriú…",
"repo.transfer.is_transferring_prompt": "Tá an stórlann á aistriú go %s",
"repo.desc.private": "Príobháideach",
"repo.desc.public": "Poiblí",
"repo.desc.public_access": "Rochtain Phoiblí",
@ -1389,6 +1385,7 @@
"repo.projects.column.delete": "Scrios Colún",
"repo.projects.column.deletion_desc": "Ag scriosadh colún tionscadail aistríonn gach saincheist ghaolmhar chuig an gcolún. Lean ar aghaidh?",
"repo.projects.column.color": "Dath",
"repo.projects.column": "Colún",
"repo.projects.open": "Oscailte",
"repo.projects.close": "Dún",
"repo.projects.column.assigned_to": "Sannta do",
@ -1406,11 +1403,12 @@
"repo.issues.new": "Eagrán Nua",
"repo.issues.new.title_empty": "Ní féidir leis an teideal a bheith folamh",
"repo.issues.new.labels": "Lipéid",
"repo.issues.new.no_label": "Gan Lipéad",
"repo.issues.new.no_labels": "Gan lipéid",
"repo.issues.new.clear_labels": "Lipéid shoiléir",
"repo.issues.new.projects": "Tionscadail",
"repo.issues.new.clear_projects": "Tionscadail soiléire",
"repo.issues.new.no_projects": "Gan aon tionscadal",
"repo.issues.new.no_projects": "Gan aon tionscadail",
"repo.issues.new.no_column": "Gan aon cholún",
"repo.issues.new.open_projects": "Tionscadail Oscailte",
"repo.issues.new.closed_projects": "Tionscadail Dúnta",
"repo.issues.new.no_items": "Gan aon earraí",
@ -1787,6 +1785,7 @@
"repo.pulls.review_only_possible_for_full_diff": "Ní féidir athbhreithniú a dhéanamh ach amháin nuair a bhreathnaítear ar an difríocht iomlán",
"repo.pulls.filter_changes_by_commit": "Scagaigh de réir tiomantas",
"repo.pulls.nothing_to_compare": "Tá na brainsí seo cothrom. Ní gá iarratas tarraingthe a chruthú.",
"repo.pulls.no_common_history": "Níl bonn cumaisc coitianta ag na brainsí seo. Roghnaigh bonn difriúil nó cuir brainse i gcomparáid.",
"repo.pulls.nothing_to_compare_have_tag": "Tá na brainsí/clibeanna roghnaithe comhionann.",
"repo.pulls.nothing_to_compare_and_allow_empty_pr": "Tá na brainsí seo cothrom. Beidh an PR seo folamh.",
"repo.pulls.has_pull_request": "Tá iarratas tarraingthe idir na brainsí seo ann cheana: <a href=\"%[1]s\">%[2]s#%[3]d</a>",
@ -1853,6 +1852,7 @@
"repo.pulls.merge_manually": "Cumaisc de láimh",
"repo.pulls.merge_commit_id": "ID an tiomantis cumaisc",
"repo.pulls.require_signed_wont_sign": "Éilíonn an bhrainse tiomáintí shínithe, ach ní shínífear an cumasc seo",
"repo.pulls.require_signed_head_commits_unverified": "Teastaíonn gealltanais sínithe ón mbrainse ach ní dheimhnítear gealltanas amháin nó níos mó ar an iarratas tarraingte seo",
"repo.pulls.invalid_merge_option": "Ní féidir leat an rogha cumaisc seo a úsáid don iarratas tarraingthe seo.",
"repo.pulls.merge_conflict": "Theip ar an gCumasc: Bhí coimhlint ann agus an cumasc á dhéanamh. Leid: Bain triail as straitéis dhifriúil.",
"repo.pulls.merge_conflict_summary": "Teachtaireacht Earráide",
@ -3315,7 +3315,6 @@
"admin.config.cache_config": "Cumraíocht taisce",
"admin.config.cache_adapter": "Cuibheoir taisce",
"admin.config.cache_interval": "Eatramh Taisce",
"admin.config.cache_conn": "Ceangal Taisce",
"admin.config.cache_item_ttl": "Mír Taisce TTL",
"admin.config.cache_test": "Taisce Tástáil",
"admin.config.cache_test_failed": "Theip ar an taisce a thaiscéaladh: %v.",
@ -3330,7 +3329,6 @@
"admin.config.instance_web_banner.message_placeholder": "Teachtaireacht meirge (tacaíonn sé le Markdown)",
"admin.config.session_config": "Cumraíocht Seisiúin",
"admin.config.session_provider": "Soláthraí Seisiúin",
"admin.config.provider_config": "Cumraíocht Soláthraí",
"admin.config.cookie_name": "Ainm Fianán",
"admin.config.gc_interval_time": "Am Eatramh GC",
"admin.config.session_life_time": "Am Saoil na Seisiúin",
@ -3774,9 +3772,11 @@
"actions.runs.delete.description": "An bhfuil tú cinnte gur mian leat an rith sreabha oibre seo a scriosadh go buan? Ní féidir an gníomh seo a chealú.",
"actions.runs.not_done": "Níl an rith sreabha oibre seo críochnaithe.",
"actions.runs.view_workflow_file": "Féach ar chomhad sreabha oibre",
"actions.runs.workflow_graph": "Graf Sreabhadh Oibre",
"actions.runs.summary": "Achoimre",
"actions.runs.all_jobs": "Gach post",
"actions.runs.attempt": "Iarracht",
"actions.runs.latest": "Is déanaí",
"actions.runs.latest_attempt": "An iarracht is déanaí",
"actions.runs.triggered_via": "Spreagtha trí %s",
"actions.runs.total_duration": "Fad iomlán:",
"actions.workflow.disable": "Díchumasaigh sreabhadh oibre",

View File

@ -257,7 +257,7 @@ func handleRemoteAddrError(ctx *context.APIContext, err error) {
addrErr := err.(*git.ErrInvalidCloneAddr)
switch {
case addrErr.IsURLError:
ctx.APIError(http.StatusUnprocessableEntity, err)
ctx.APIError(http.StatusUnprocessableEntity, "The provided URL is invalid.")
case addrErr.IsPermissionDenied:
if addrErr.LocalPath {
ctx.APIError(http.StatusUnprocessableEntity, "You are not allowed to import local repositories.")

View File

@ -21,6 +21,7 @@ import (
repo_model "code.gitea.io/gitea/models/repo"
unit_model "code.gitea.io/gitea/models/unit"
user_model "code.gitea.io/gitea/models/user"
"code.gitea.io/gitea/modules/git"
"code.gitea.io/gitea/modules/gitrepo"
"code.gitea.io/gitea/modules/label"
"code.gitea.io/gitea/modules/log"
@ -37,6 +38,8 @@ import (
"code.gitea.io/gitea/services/convert"
feed_service "code.gitea.io/gitea/services/feed"
"code.gitea.io/gitea/services/issue"
"code.gitea.io/gitea/services/migrations"
mirror_service "code.gitea.io/gitea/services/mirror"
repo_service "code.gitea.io/gitea/services/repository"
)
@ -628,7 +631,11 @@ func Edit(ctx *context.APIContext) {
}
}
if opts.MirrorInterval != nil || opts.EnablePrune != nil {
if opts.MirrorInterval != nil ||
opts.EnablePrune != nil ||
opts.MirrorUsername != nil ||
opts.MirrorPassword != nil ||
opts.MirrorToken != nil {
if err := updateMirror(ctx, opts); err != nil {
return
}
@ -1059,6 +1066,57 @@ func updateMirror(ctx *context.APIContext, opts api.EditRepoOption) error {
log.Trace("Repository %s Mirror[%d] Set EnablePrune: %t", repo.FullName(), mirror.ID, mirror.EnablePrune)
}
authUpdateRequested := opts.MirrorPassword != nil || opts.MirrorToken != nil || opts.MirrorUsername != nil
if authUpdateRequested {
remoteURL, err := gitrepo.GitRemoteGetURL(ctx, repo, mirror.GetRemoteName())
if err != nil {
ctx.APIErrorInternal(err)
return err
}
authUsername := ""
if opts.MirrorUsername != nil {
authUsername = *opts.MirrorUsername
} else if remoteURL.User != nil {
authUsername = remoteURL.User.Username()
}
authPassword := ""
authToken := ""
if opts.MirrorPassword != nil {
authPassword = *opts.MirrorPassword
}
if opts.MirrorToken != nil {
authToken = *opts.MirrorToken
}
if opts.MirrorPassword == nil && opts.MirrorToken == nil && remoteURL.User != nil && (authUsername == "" || authUsername == remoteURL.User.Username()) {
authPassword, _ = remoteURL.User.Password()
}
if authToken != "" {
authPassword = authToken
}
composedAddress, err := git.ParseRemoteAddr(repo.OriginalURL, authUsername, authPassword)
if err == nil {
err = migrations.IsMigrateURLAllowed(composedAddress, ctx.Doer)
}
if err != nil {
handleRemoteAddrError(ctx, err)
return err
}
if err := mirror_service.UpdateAddress(ctx, mirror, composedAddress); err != nil {
ctx.APIErrorInternal(err)
return err
}
if sanitized, err := util.SanitizeURL(repo.OriginalURL); err == nil {
mirror.RemoteAddress = sanitized
}
}
// finally update the mirror in the DB
if err := repo_model.UpdateMirror(ctx, mirror); err != nil {
log.Error("Failed to Set Mirror Interval: %s", err)

View File

@ -134,12 +134,6 @@ func InitWebInstalled(ctx context.Context) {
external.RegisterRenderers()
markup.Init(markup_service.FormalRenderHelperFuncs())
if setting.EnableSQLite3 {
log.Info("SQLite3 support is enabled")
} else if setting.Database.Type.IsSQLite3() {
log.Fatal("SQLite3 support is disabled, but it is used for database setting. Please get or build a Gitea release with SQLite3 support.")
}
mustInitCtx(ctx, common.InitDBEngine)
log.Info("ORM engine initialization successful!")
mustInit(system.Init)

View File

@ -76,7 +76,7 @@ func Install(ctx *context.Context) {
form.DbSchema = setting.Database.Schema
form.SSLMode = setting.Database.SSLMode
curDBType := setting.Database.Type.String()
curDBType := string(setting.Database.Type)
if !slices.Contains(setting.SupportedDatabaseTypes, curDBType) {
curDBType = "mysql"
}
@ -328,7 +328,7 @@ func SubmitInstall(ctx *context.Context) {
cfg.Section("").Key("WORK_PATH").SetValue(setting.AppWorkPath)
cfg.Section("").Key("RUN_MODE").SetValue("prod")
cfg.Section("database").Key("DB_TYPE").SetValue(setting.Database.Type.String())
cfg.Section("database").Key("DB_TYPE").SetValue(string(setting.Database.Type))
cfg.Section("database").Key("HOST").SetValue(setting.Database.Host)
cfg.Section("database").Key("NAME").SetValue(setting.Database.Name)
cfg.Section("database").Key("USER").SetValue(setting.Database.User)

View File

@ -16,64 +16,6 @@ import (
"github.com/stretchr/testify/require"
)
func TestShadowPassword(t *testing.T) {
kases := []struct {
Provider string
CfgItem string
Result string
}{
{
Provider: "redis",
CfgItem: "network=tcp,addr=:6379,password=gitea,db=0,pool_size=100,idle_timeout=180",
Result: "network=tcp,addr=:6379,password=******,db=0,pool_size=100,idle_timeout=180",
},
{
Provider: "mysql",
CfgItem: "root:@tcp(localhost:3306)/gitea?charset=utf8",
Result: "root:******@tcp(localhost:3306)/gitea?charset=utf8",
},
{
Provider: "mysql",
CfgItem: "/gitea?charset=utf8",
Result: "/gitea?charset=utf8",
},
{
Provider: "mysql",
CfgItem: "user:mypassword@/dbname",
Result: "user:******@/dbname",
},
{
Provider: "postgres",
CfgItem: "user=pqgotest dbname=pqgotest sslmode=verify-full",
Result: "user=pqgotest dbname=pqgotest sslmode=verify-full",
},
{
Provider: "postgres",
CfgItem: "user=pqgotest password= dbname=pqgotest sslmode=verify-full",
Result: "user=pqgotest password=****** dbname=pqgotest sslmode=verify-full",
},
{
Provider: "postgres",
CfgItem: "postgres://user:pass@hostname/dbname",
Result: "postgres://user:******@hostname/dbname",
},
{
Provider: "couchbase",
CfgItem: "http://dev-couchbase.example.com:8091/",
Result: "http://dev-couchbase.example.com:8091/",
},
{
Provider: "couchbase",
CfgItem: "http://user:the_password@dev-couchbase.example.com:8091/",
Result: "http://user:******@dev-couchbase.example.com:8091/",
},
}
for _, k := range kases {
assert.Equal(t, k.Result, shadowPassword(k.Provider, k.CfgItem))
}
}
func TestSelfCheckPost(t *testing.T) {
defer test.MockVariableValue(&setting.PublicURLDetection)()
defer test.MockVariableValue(&setting.AppURL, "http://config/sub/")()

View File

@ -7,8 +7,6 @@ package admin
import (
"errors"
"net/http"
"net/url"
"strings"
system_model "code.gitea.io/gitea/models/system"
"code.gitea.io/gitea/modules/cache"
@ -59,63 +57,6 @@ func TestCache(ctx *context.Context) {
ctx.Redirect(setting.AppSubURL + "/-/admin/config")
}
func shadowPasswordKV(cfgItem, splitter string) string {
fields := strings.Split(cfgItem, splitter)
for i := range fields {
if strings.HasPrefix(fields[i], "password=") {
fields[i] = "password=******"
break
}
}
return strings.Join(fields, splitter)
}
func shadowURL(provider, cfgItem string) string {
u, err := url.Parse(cfgItem)
if err != nil {
log.Error("Shadowing Password for %v failed: %v", provider, err)
return cfgItem
}
if u.User != nil {
atIdx := strings.Index(cfgItem, "@")
if atIdx > 0 {
colonIdx := strings.LastIndex(cfgItem[:atIdx], ":")
if colonIdx > 0 {
return cfgItem[:colonIdx+1] + "******" + cfgItem[atIdx:]
}
}
}
return cfgItem
}
func shadowPassword(provider, cfgItem string) string {
switch provider {
case "redis":
return shadowPasswordKV(cfgItem, ",")
case "mysql":
// root:@tcp(localhost:3306)/macaron?charset=utf8
atIdx := strings.Index(cfgItem, "@")
if atIdx > 0 {
colonIdx := strings.Index(cfgItem[:atIdx], ":")
if colonIdx > 0 {
return cfgItem[:colonIdx+1] + "******" + cfgItem[atIdx:]
}
}
return cfgItem
case "postgres":
// user=jiahuachen dbname=macaron port=5432 sslmode=disable
if !strings.HasPrefix(cfgItem, "postgres://") {
return shadowPasswordKV(cfgItem, " ")
}
fallthrough
case "couchbase":
return shadowURL(provider, cfgItem)
// postgres://pqgotest:password@localhost/pqgotest?sslmode=verify-full
// Notice: use shadowURL
}
return cfgItem
}
// Config show admin config page
func Config(ctx *context.Context) {
ctx.Data["Title"] = ctx.Tr("admin.config_summary")
@ -150,8 +91,6 @@ func Config(ctx *context.Context) {
ctx.Data["CacheAdapter"] = setting.CacheService.Adapter
ctx.Data["CacheInterval"] = setting.CacheService.Interval
ctx.Data["CacheConn"] = shadowPassword(setting.CacheService.Adapter, setting.CacheService.Conn)
ctx.Data["CacheItemTTL"] = setting.CacheService.TTL
sessionCfg := setting.SessionConfig
@ -169,7 +108,7 @@ func Config(ctx *context.Context) {
sessionCfg.Secure = realSession.Secure
sessionCfg.Domain = realSession.Domain
}
sessionCfg.ProviderConfig = shadowPassword(sessionCfg.Provider, sessionCfg.ProviderConfig)
sessionCfg.ProviderConfig = ""
ctx.Data["SessionConfig"] = sessionCfg
ctx.Data["Git"] = setting.Git

View File

@ -111,16 +111,10 @@ func checkDatabase(ctx context.Context, checks checks) status {
}
if setting.Database.Type.IsSQLite3() && st.Status == pass {
if !setting.EnableSQLite3 {
if _, err := os.Stat(setting.Database.Path); err != nil {
st.Status = fail
st.Time = getCheckTime()
log.Error("SQLite3 health check failed with error: %v", "this Gitea binary is built without SQLite3 enabled")
} else {
if _, err := os.Stat(setting.Database.Path); err != nil {
st.Status = fail
st.Time = getCheckTime()
log.Error("SQLite3 file exists check failed with error: %v", err)
}
log.Error("SQLite3 file exists check failed with error: %v", err)
}
}

View File

@ -265,8 +265,13 @@ func handleSettingsPostMirror(ctx *context.Context) {
handleSettingRemoteAddrError(ctx, err, form)
return
}
if u.User != nil && form.MirrorPassword == "" && form.MirrorUsername == u.User.Username() {
form.MirrorPassword, _ = u.User.Password()
if u.User != nil {
if form.MirrorUsername == "" {
form.MirrorUsername = u.User.Username()
}
if form.MirrorPassword == "" && form.MirrorUsername == u.User.Username() {
form.MirrorPassword, _ = u.User.Password()
}
}
address, err := git.ParseRemoteAddr(form.MirrorAddress, form.MirrorUsername, form.MirrorPassword)

View File

@ -13,15 +13,18 @@ import (
repo_model "code.gitea.io/gitea/models/repo"
"code.gitea.io/gitea/models/unittest"
user_model "code.gitea.io/gitea/models/user"
"code.gitea.io/gitea/modules/gitrepo"
"code.gitea.io/gitea/modules/setting"
"code.gitea.io/gitea/modules/test"
"code.gitea.io/gitea/modules/web"
"code.gitea.io/gitea/services/context"
"code.gitea.io/gitea/services/contexttest"
"code.gitea.io/gitea/services/forms"
mirror_service "code.gitea.io/gitea/services/mirror"
repo_service "code.gitea.io/gitea/services/repository"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
func TestAddReadOnlyDeployKey(t *testing.T) {
@ -386,3 +389,45 @@ func TestDeleteTeam(t *testing.T) {
assert.False(t, repo_service.HasRepository(t.Context(), team, re.ID))
}
func TestHandleSettingsPostMirrorPreservesExistingUsername(t *testing.T) {
defer test.MockVariableValue(&setting.Mirror.Enabled, true)()
unittest.PrepareTestEnv(t)
// Use the existing fixture mirror repo (org3/repo5) which has a git repo on disk.
mirrorRepo := unittest.AssertExistsAndLoadBean(t, &repo_model.Repository{ID: 5})
mirror := unittest.AssertExistsAndLoadBean(t, &repo_model.Mirror{RepoID: 5})
require.NoError(t, mirror_service.UpdateAddress(t.Context(), mirror, "https://existing-user:existing-password@example.com/user2/repo1.git"))
user := unittest.AssertExistsAndLoadBean(t, &user_model.User{ID: 2})
ctx, _ := contexttest.MockContext(t, mirrorRepo.Link()+"/settings")
contexttest.LoadUser(t, ctx, user.ID)
contexttest.LoadRepo(t, ctx, mirrorRepo.ID)
web.SetForm(ctx, &forms.RepoSettingForm{
Interval: "8h",
MirrorAddress: "https://example.com/user2/repo1.git",
MirrorPassword: "updated-password",
})
handleSettingsPostMirror(ctx)
assert.Equal(t, http.StatusSeeOther, ctx.Resp.WrittenStatus())
updatedMirror := unittest.AssertExistsAndLoadBean(t, &repo_model.Mirror{RepoID: mirrorRepo.ID})
assert.Equal(t, "https://example.com/user2/repo1.git", updatedMirror.RemoteAddress)
updatedRepo := unittest.AssertExistsAndLoadBean(t, &repo_model.Repository{ID: mirrorRepo.ID})
assert.Equal(t, "https://example.com/user2/repo1.git", updatedRepo.OriginalURL)
remoteURL, err := gitrepo.GitRemoteGetURL(t.Context(), updatedRepo, updatedMirror.GetRemoteName())
require.NoError(t, err)
require.NotNil(t, remoteURL.User)
assert.Equal(t, "existing-user", remoteURL.User.Username())
password, ok := remoteURL.User.Password()
require.True(t, ok)
assert.Equal(t, "updated-password", password)
}

View File

@ -244,8 +244,6 @@
<dd>{{.CacheInterval}} {{ctx.Locale.Tr "tool.raw_seconds"}}</dd>
{{end}}
{{if .CacheConn}}
<dt>{{ctx.Locale.Tr "admin.config.cache_conn"}}</dt>
<dd>{{.CacheConn}}</dd>
<dt>{{ctx.Locale.Tr "admin.config.cache_item_ttl"}}</dt>
<dd>{{.CacheItemTTL}}</dd>
{{end}}
@ -266,8 +264,6 @@
<dl class="admin-dl-horizontal">
<dt>{{ctx.Locale.Tr "admin.config.session_provider"}}</dt>
<dd>{{.SessionConfig.Provider}}</dd>
<dt>{{ctx.Locale.Tr "admin.config.provider_config"}}</dt>
<dd>{{if .SessionConfig.ProviderConfig}}{{.SessionConfig.ProviderConfig}}{{else}}-{{end}}</dd>
<dt>{{ctx.Locale.Tr "admin.config.cookie_name"}}</dt>
<dd>{{.SessionConfig.CookieName}}</dd>
<dt>{{ctx.Locale.Tr "admin.config.gc_interval_time"}}</dt>

View File

@ -25510,6 +25510,21 @@
"type": "string",
"x-go-name": "MirrorInterval"
},
"mirror_password": {
"description": "authentication password for the remote repository (mirrors)",
"type": "string",
"x-go-name": "MirrorPassword"
},
"mirror_token": {
"description": "authentication token for the remote repository (mirrors)",
"type": "string",
"x-go-name": "MirrorToken"
},
"mirror_username": {
"description": "authentication username for the remote repository (mirrors)",
"type": "string",
"x-go-name": "MirrorUsername"
},
"name": {
"description": "name of the repository",
"type": "string",

View File

@ -5738,6 +5738,21 @@
"type": "string",
"x-go-name": "MirrorInterval"
},
"mirror_password": {
"description": "authentication password for the remote repository (mirrors)",
"type": "string",
"x-go-name": "MirrorPassword"
},
"mirror_token": {
"description": "authentication token for the remote repository (mirrors)",
"type": "string",
"x-go-name": "MirrorToken"
},
"mirror_username": {
"description": "authentication username for the remote repository (mirrors)",
"type": "string",
"x-go-name": "MirrorUsername"
},
"name": {
"description": "name of the repository",
"type": "string",

View File

@ -15,9 +15,12 @@ import (
unit_model "code.gitea.io/gitea/models/unit"
"code.gitea.io/gitea/models/unittest"
user_model "code.gitea.io/gitea/models/user"
"code.gitea.io/gitea/modules/gitrepo"
api "code.gitea.io/gitea/modules/structs"
mirror_service "code.gitea.io/gitea/services/mirror"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
// getRepoEditOptionFromRepo gets the options for an existing repo exactly as is
@ -432,5 +435,56 @@ func TestAPIRepoEdit(t *testing.T) {
DefaultDeleteBranchAfterMerge: &bFalse,
}).AddTokenAuth(token2)
_ = MakeRequest(t, req, http.StatusOK)
// Test updating mirror password without changing the existing username
ctx := t.Context()
mirrorRepo := unittest.AssertExistsAndLoadBean(t, &repo_model.Repository{ID: 5})
mirror := unittest.AssertExistsAndLoadBean(t, &repo_model.Mirror{RepoID: 5})
newPassword := "updated-password"
require.NoError(t, mirror_service.UpdateAddress(ctx, mirror, "https://existing-user:existing-password@example.com/user2/repo1.git"))
req = NewRequestWithJSON(t, "PATCH", fmt.Sprintf("/api/v1/repos/%s/%s", mirrorRepo.OwnerName, mirrorRepo.Name), &api.EditRepoOption{
MirrorPassword: &newPassword,
}).AddTokenAuth(token2)
MakeRequest(t, req, http.StatusOK)
updatedMirror := unittest.AssertExistsAndLoadBean(t, &repo_model.Mirror{RepoID: mirrorRepo.ID})
assert.Equal(t, "https://example.com/user2/repo1.git", updatedMirror.RemoteAddress)
updatedRepo := unittest.AssertExistsAndLoadBean(t, &repo_model.Repository{ID: mirrorRepo.ID})
assert.Equal(t, "https://example.com/user2/repo1.git", updatedRepo.OriginalURL)
remoteURL, err := gitrepo.GitRemoteGetURL(ctx, updatedRepo, updatedMirror.GetRemoteName())
require.NoError(t, err)
require.NotNil(t, remoteURL.User)
assert.Equal(t, "existing-user", remoteURL.User.Username())
password, ok := remoteURL.User.Password()
require.True(t, ok)
assert.Equal(t, newPassword, password)
// Test updating mirror token without guessing a username
token := "mirror-token-value"
require.NoError(t, mirror_service.UpdateAddress(ctx, mirror, "https://example.com/user2/repo1.git"))
req = NewRequestWithJSON(t, "PATCH", fmt.Sprintf("/api/v1/repos/%s/%s", mirrorRepo.OwnerName, mirrorRepo.Name), &api.EditRepoOption{
MirrorToken: &token,
}).AddTokenAuth(token2)
MakeRequest(t, req, http.StatusOK)
updatedMirror = unittest.AssertExistsAndLoadBean(t, &repo_model.Mirror{RepoID: mirrorRepo.ID})
assert.Equal(t, "https://example.com/user2/repo1.git", updatedMirror.RemoteAddress)
updatedRepo = unittest.AssertExistsAndLoadBean(t, &repo_model.Repository{ID: mirrorRepo.ID})
assert.Equal(t, "https://example.com/user2/repo1.git", updatedRepo.OriginalURL)
remoteURL, err = gitrepo.GitRemoteGetURL(ctx, updatedRepo, updatedMirror.GetRemoteName())
require.NoError(t, err)
require.NotNil(t, remoteURL.User)
assert.Empty(t, remoteURL.User.Username())
password, ok = remoteURL.User.Password()
require.True(t, ok)
assert.Equal(t, token, password)
})
}

View File

@ -11,7 +11,6 @@ import (
"fmt"
"io"
"os"
"path"
"path/filepath"
"regexp"
"sort"
@ -26,7 +25,6 @@ import (
"code.gitea.io/gitea/modules/log"
"code.gitea.io/gitea/modules/setting"
"code.gitea.io/gitea/modules/testlogger"
"code.gitea.io/gitea/modules/util"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
@ -55,7 +53,7 @@ func availableVersions() ([]string, error) {
return nil, err
}
defer migrationsDir.Close()
versionRE, err := regexp.Compile("gitea-v(?P<version>.+)" + regexp.QuoteMeta("."+setting.Database.Type.String()+".sql.gz"))
versionRE, err := regexp.Compile("gitea-v(?P<version>.+)" + regexp.QuoteMeta("."+string(setting.Database.Type)+".sql.gz"))
if err != nil {
return nil, err
}
@ -64,7 +62,7 @@ func availableVersions() ([]string, error) {
if err != nil {
return nil, err
}
versions := []string{}
var versions []string
for _, filename := range filenames {
if versionRE.MatchString(filename) {
substrings := versionRE.FindStringSubmatch(filename)
@ -76,11 +74,8 @@ func availableVersions() ([]string, error) {
}
func readSQLFromFile(version string) (string, error) {
filename := filepath.Join(setting.GetGiteaTestSourceRoot(), "tests/integration/migration-test", fmt.Sprintf("gitea-v%s.%s.sql.gz", version, setting.Database.Type))
if _, err := os.Stat(filename); os.IsNotExist(err) {
return "", nil
}
filename := fmt.Sprintf("tests/integration/migration-test/gitea-v%s.%s.sql.gz", version, setting.Database.Type)
filename = filepath.Join(setting.GetGiteaTestSourceRoot(), filename)
file, err := os.Open(filename)
if err != nil {
@ -106,134 +101,51 @@ func restoreOldDB(t *testing.T, version string) {
require.NoError(t, err)
require.NotEmpty(t, data, "No data found for %s version: %s", setting.Database.Type, version)
switch {
case setting.Database.Type.IsSQLite3():
util.Remove(setting.Database.Path)
err := os.MkdirAll(path.Dir(setting.Database.Path), os.ModePerm)
assert.NoError(t, err)
cleanup, err := unittest.ResetTestDatabase()
require.NoError(t, err)
_ = cleanup // no clean up yet (not needed at the moment)
db, err := sql.Open("sqlite3", fmt.Sprintf("file:%s?cache=shared&mode=rwc&_busy_timeout=%d&_txlock=immediate", setting.Database.Path, setting.Database.Timeout))
assert.NoError(t, err)
defer db.Close()
connOpts := db.GlobalConnOptions()
_, err = db.Exec(data)
assert.NoError(t, err)
db.Close()
case setting.Database.Type.IsMySQL():
db, err := sql.Open("mysql", fmt.Sprintf("%s:%s@tcp(%s)/",
setting.Database.User, setting.Database.Passwd, setting.Database.Host))
assert.NoError(t, err)
defer db.Close()
_, err = db.Exec("DROP DATABASE IF EXISTS " + setting.Database.Name)
assert.NoError(t, err)
_, err = db.Exec("CREATE DATABASE IF NOT EXISTS " + setting.Database.Name)
assert.NoError(t, err)
db.Close()
db, err = sql.Open("mysql", fmt.Sprintf("%s:%s@tcp(%s)/%s?multiStatements=true",
setting.Database.User, setting.Database.Passwd, setting.Database.Host, setting.Database.Name))
assert.NoError(t, err)
defer db.Close()
_, err = db.Exec(data)
assert.NoError(t, err)
db.Close()
case setting.Database.Type.IsPostgreSQL():
var db *sql.DB
var err error
if setting.Database.Host[0] == '/' {
db, err = sql.Open("postgres", fmt.Sprintf("postgres://%s:%s@/?sslmode=%s&host=%s",
setting.Database.User, setting.Database.Passwd, setting.Database.SSLMode, setting.Database.Host))
assert.NoError(t, err)
} else {
db, err = sql.Open("postgres", fmt.Sprintf("postgres://%s:%s@%s/?sslmode=%s",
setting.Database.User, setting.Database.Passwd, setting.Database.Host, setting.Database.SSLMode))
assert.NoError(t, err)
if !connOpts.Type.IsMSSQL() {
if connOpts.Type.IsMySQL() {
connOpts.Database += "?multiStatements=true"
}
defer db.Close()
driver, connStr, err := db.ConnStr(connOpts)
require.NoError(t, err)
_, err = db.Exec("DROP DATABASE IF EXISTS " + setting.Database.Name)
assert.NoError(t, err)
sqlDB, err := sql.Open(driver, connStr)
require.NoError(t, err)
defer sqlDB.Close()
_, err = db.Exec("CREATE DATABASE " + setting.Database.Name)
assert.NoError(t, err)
db.Close()
// Check if we need to setup a specific schema
if len(setting.Database.Schema) != 0 {
if setting.Database.Host[0] == '/' {
db, err = sql.Open("postgres", fmt.Sprintf("postgres://%s:%s@/%s?sslmode=%s&host=%s",
setting.Database.User, setting.Database.Passwd, setting.Database.Name, setting.Database.SSLMode, setting.Database.Host))
} else {
db, err = sql.Open("postgres", fmt.Sprintf("postgres://%s:%s@%s/%s?sslmode=%s",
setting.Database.User, setting.Database.Passwd, setting.Database.Host, setting.Database.Name, setting.Database.SSLMode))
}
require.NoError(t, err)
defer db.Close()
schrows, err := db.Query(fmt.Sprintf("SELECT 1 FROM information_schema.schemata WHERE schema_name = '%s'", setting.Database.Schema))
require.NoError(t, err)
require.NotEmpty(t, schrows)
if !schrows.Next() {
// Create and setup a DB schema
_, err = db.Exec("CREATE SCHEMA " + setting.Database.Schema)
assert.NoError(t, err)
}
schrows.Close()
// Make the user's default search path the created schema; this will affect new connections
_, err = db.Exec(fmt.Sprintf(`ALTER USER "%s" SET search_path = %s`, setting.Database.User, setting.Database.Schema))
assert.NoError(t, err)
db.Close()
}
if setting.Database.Host[0] == '/' {
db, err = sql.Open("postgres", fmt.Sprintf("postgres://%s:%s@/%s?sslmode=%s&host=%s",
setting.Database.User, setting.Database.Passwd, setting.Database.Name, setting.Database.SSLMode, setting.Database.Host))
} else {
db, err = sql.Open("postgres", fmt.Sprintf("postgres://%s:%s@%s/%s?sslmode=%s",
setting.Database.User, setting.Database.Passwd, setting.Database.Host, setting.Database.Name, setting.Database.SSLMode))
}
assert.NoError(t, err)
defer db.Close()
_, err = db.Exec(data)
assert.NoError(t, err)
db.Close()
case setting.Database.Type.IsMSSQL():
host, port := setting.ParseMSSQLHostPort(setting.Database.Host)
db, err := sql.Open("mssql", fmt.Sprintf("server=%s; port=%s; database=%s; user id=%s; password=%s;",
host, port, "master", setting.Database.User, setting.Database.Passwd))
assert.NoError(t, err)
defer db.Close()
_, err = db.Exec("DROP DATABASE IF EXISTS [gitea]")
assert.NoError(t, err)
statements := strings.Split(data, "\nGO\n")
for _, statement := range statements {
if len(statement) > 5 && statement[:5] == "USE [" {
dbname := statement[5 : len(statement)-1]
db.Close()
db, err = sql.Open("mssql", fmt.Sprintf("server=%s; port=%s; database=%s; user id=%s; password=%s;",
host, port, dbname, setting.Database.User, setting.Database.Passwd))
assert.NoError(t, err)
defer db.Close()
}
_, err = db.Exec(statement)
assert.NoError(t, err, "Failure whilst running: %s\nError: %v", statement, err)
}
db.Close()
default:
assert.Failf(t, "unsupported database type", "setting.Database.Type=%v", setting.Database.Type)
_, err = sqlDB.Exec(data)
require.NoError(t, err)
return
}
// MSSQL is special. the test fixture will create the [testgitea] database again, so drop it ahead if it exists
driver, connStr, err := db.ConnStrDefaultDatabase(connOpts)
require.NoError(t, err)
sqlDB, err := sql.Open(driver, connStr)
require.NoError(t, err)
_, err = sqlDB.Exec("DROP DATABASE IF EXISTS [testgitea]")
require.NoError(t, err, "drop existing database testgitea")
for statement := range strings.SplitSeq(data, "\nGO\n") {
if useStmtAfter, ok := strings.CutPrefix(statement, "USE ["); ok {
_ = sqlDB.Close()
dbname := strings.TrimSuffix(useStmtAfter, "]") // extract the database name from "USE [dbname]"
connOpts.Database = dbname
driver, connStr, err := db.ConnStr(connOpts)
require.NoError(t, err)
sqlDB, err = sql.Open(driver, connStr)
require.NoError(t, err)
}
_, err = sqlDB.Exec(statement)
require.NoError(t, err, "SQL Exec failed when running: %s\nError: %v", statement, err)
}
_ = sqlDB.Close()
}
func wrappedMigrate(ctx context.Context, x *xorm.Engine) error {

View File

@ -4,7 +4,7 @@ RUN_MODE = prod
[database]
DB_TYPE = sqlite3
PATH = gitea.db
PATH = gitea-test.db
[indexer]
REPO_INDEXER_ENABLED = true

View File

@ -4,10 +4,7 @@
package tests
import (
"database/sql"
"fmt"
"path/filepath"
"strings"
"testing"
"code.gitea.io/gitea/models/db"
@ -40,97 +37,14 @@ func InitIntegrationTest() error {
}
setting.LoadDBSetting()
if err := storage.Init(); err != nil {
cleanupDb, err := unittest.ResetTestDatabase()
if err != nil {
return err
}
_ = cleanupDb // no clean up yet (not really needed at the moment)
switch {
case setting.Database.Type.IsMySQL():
{
connType := util.Iif(strings.HasPrefix(setting.Database.Host, "/"), "unix", "tcp")
db, err := sql.Open("mysql", fmt.Sprintf("%s:%s@%s(%s)/",
setting.Database.User, setting.Database.Passwd, connType, setting.Database.Host))
if err != nil {
return err
}
defer db.Close()
if _, err = db.Exec("CREATE DATABASE IF NOT EXISTS " + setting.Database.Name); err != nil {
return err
}
}
case setting.Database.Type.IsPostgreSQL():
openPostgreSQL := func() (*sql.DB, error) {
if strings.HasPrefix(setting.Database.Host, "/") {
return sql.Open("postgres", fmt.Sprintf("postgres://%s:%s@/%s?sslmode=%s&host=%s",
setting.Database.User, setting.Database.Passwd, setting.Database.Name, setting.Database.SSLMode, setting.Database.Host))
}
return sql.Open("postgres", fmt.Sprintf("postgres://%s:%s@%s/%s?sslmode=%s",
setting.Database.User, setting.Database.Passwd, setting.Database.Host, setting.Database.Name, setting.Database.SSLMode))
}
// create database
{
db, err := openPostgreSQL()
if err != nil {
return err
}
defer db.Close()
dbRows, err := db.Query(fmt.Sprintf("SELECT 1 FROM pg_database WHERE datname = '%s'", setting.Database.Name))
if err != nil {
return err
}
defer dbRows.Close()
if !dbRows.Next() {
if _, err = db.Exec("CREATE DATABASE " + setting.Database.Name); err != nil {
return err
}
}
// Check if we need to set up a specific schema
if setting.Database.Schema == "" {
break
}
db.Close()
}
// create schema
{
db, err := openPostgreSQL()
if err != nil {
return err
}
defer db.Close()
schemaRows, err := db.Query(fmt.Sprintf("SELECT 1 FROM information_schema.schemata WHERE schema_name = '%s'", setting.Database.Schema))
if err != nil {
return err
}
defer schemaRows.Close()
if !schemaRows.Next() {
// Create and set up a DB schema
if _, err = db.Exec("CREATE SCHEMA " + setting.Database.Schema); err != nil {
return err
}
}
}
case setting.Database.Type.IsMSSQL():
{
host, port := setting.ParseMSSQLHostPort(setting.Database.Host)
db, err := sql.Open("mssql", fmt.Sprintf("server=%s; port=%s; database=%s; user id=%s; password=%s;",
host, port, "master", setting.Database.User, setting.Database.Passwd))
if err != nil {
return err
}
defer db.Close()
if _, err = db.Exec(fmt.Sprintf("If(db_id(N'%s') IS NULL) BEGIN CREATE DATABASE %s; END;", setting.Database.Name, setting.Database.Name)); err != nil {
return err
}
}
case setting.Database.Type.IsSQLite3():
default:
return fmt.Errorf("unsupported database type: %s", setting.Database.Type)
if err := storage.Init(); err != nil {
return err
}
routers.InitWebInstalled(graceful.GetManager().HammerContext())

View File

@ -24,6 +24,8 @@
font-family: var(--fonts-monospace);
font-size: 12px;
max-height: 90vh;
flex: 1;
min-height: 0;
}
.code-editor-container .cm-editor,
@ -31,24 +33,22 @@
border-radius: 0 0 var(--border-radius) var(--border-radius);
}
.code-editor-container .cm-content,
.code-editor-container .cm-gutter {
min-height: 200px;
}
.code-editor-container .cm-scroller {
overflow: auto;
line-height: var(--line-height-code);
flex: 1;
min-height: 0;
}
.code-editor-container .cm-content {
align-self: stretch;
padding: 0;
}
.code-editor-container .cm-content * {
caret-color: inherit;
}
.code-editor-container .cm-content {
padding: 0;
}
.code-editor-container .cm-cursor,
.code-editor-container .cm-dropCursor {
border-left-color: var(--color-caret);
@ -341,6 +341,8 @@
.code-editor-container {
position: relative;
min-height: 90vh;
display: flex;
flex-direction: column;
}
.cm-command-palette {

View File

@ -7,7 +7,7 @@ import type {PaletteCommand} from './command-palette.ts';
import {contextMenu, collectSymbols, selectAllOccurrences} from './context-menu.ts';
import {createJsonLinter, createSyntaxErrorLinter} from './linter.ts';
import {clickableUrls, goToDefinitionAt, trimTrailingWhitespaceFromView} from './utils.ts';
import type {LanguageDescription} from '@codemirror/language';
import type {LanguageDescription, LanguageSupport} from '@codemirror/language';
import type {Compartment, Extension} from '@codemirror/state';
import type {EditorView, ViewUpdate} from '@codemirror/view';
@ -295,16 +295,19 @@ export async function createCodeEditor(textarea: HTMLTextAreaElement, filenameIn
return editor;
}
// files that are JSONC despite having a .json extension
const jsoncFilesRegex = /^([jt]sconfig.*|devcontainer)\.json$/;
// files that the JSON parser is too strict for (comments, trailing commas)
const jsoncFilesRegex = /^([jt]sconfig.*|devcontainer)\.json$|\.(jsonc|json5)$/i;
async function getLinterExtension(cm: CodemirrorModules, filename: string, loadedLang: {language: unknown} | null): Promise<Extension> {
const ext = extname(filename).toLowerCase();
if (ext === '.json' || ext === '.map') {
async function getLinterExtension(cm: CodemirrorModules, filename: string, loadedLang: LanguageSupport | null): Promise<Extension> {
if (!loadedLang) return [];
const lang = loadedLang.language;
// StreamLanguage (legacy modes) don't produce Lezer error nodes
if (lang instanceof cm.language.StreamLanguage) return [];
if (lang.name === 'json') {
return jsoncFilesRegex.test(filename) ? [] : [cm.lint.lintGutter(), await createJsonLinter(cm)];
}
// StreamLanguage (legacy modes) don't produce Lezer error nodes
if (!loadedLang || loadedLang.language instanceof cm.language.StreamLanguage) return [];
// markdown's parser emits no error nodes, and nested code-fence overlays aren't traversed
if (lang.name === 'markdown') return [];
return [cm.lint.lintGutter(), createSyntaxErrorLinter(cm)];
}