2014-04-13 07:57:42 +02:00
|
|
|
// Copyright 2014 The Gogs Authors. All rights reserved.
|
2019-02-08 17:45:43 +01:00
|
|
|
// Copyright 2019 The Gitea Authors. All rights reserved.
|
2022-11-27 19:20:29 +01:00
|
|
|
// SPDX-License-Identifier: MIT
|
2014-04-13 07:57:42 +02:00
|
|
|
|
|
|
|
package user
|
|
|
|
|
|
|
|
import (
|
2014-11-23 08:33:47 +01:00
|
|
|
"bytes"
|
2014-04-13 07:57:42 +02:00
|
|
|
"fmt"
|
2021-04-05 17:30:52 +02:00
|
|
|
"net/http"
|
2019-12-02 04:50:36 +01:00
|
|
|
"regexp"
|
2023-09-07 11:37:47 +02:00
|
|
|
"slices"
|
2017-12-04 05:39:01 +01:00
|
|
|
"sort"
|
2019-12-02 04:50:36 +01:00
|
|
|
"strconv"
|
2019-01-23 05:10:38 +01:00
|
|
|
"strings"
|
2014-04-13 07:57:42 +02:00
|
|
|
|
2022-08-25 04:31:57 +02:00
|
|
|
activities_model "code.gitea.io/gitea/models/activities"
|
2021-12-10 09:14:24 +01:00
|
|
|
asymkey_model "code.gitea.io/gitea/models/asymkey"
|
2021-09-24 13:32:56 +02:00
|
|
|
"code.gitea.io/gitea/models/db"
|
2024-07-28 17:11:40 +02:00
|
|
|
git_model "code.gitea.io/gitea/models/git"
|
2022-04-08 11:11:15 +02:00
|
|
|
issues_model "code.gitea.io/gitea/models/issues"
|
2022-03-29 08:29:02 +02:00
|
|
|
"code.gitea.io/gitea/models/organization"
|
2024-11-24 09:18:57 +01:00
|
|
|
"code.gitea.io/gitea/models/renderhelper"
|
2021-12-10 02:27:50 +01:00
|
|
|
repo_model "code.gitea.io/gitea/models/repo"
|
2021-11-09 20:57:58 +01:00
|
|
|
"code.gitea.io/gitea/models/unit"
|
2021-11-24 10:49:20 +01:00
|
|
|
user_model "code.gitea.io/gitea/models/user"
|
2016-11-10 17:24:48 +01:00
|
|
|
"code.gitea.io/gitea/modules/base"
|
Refactor and enhance issue indexer to support both searching, filtering and paging (#26012)
Fix #24662.
Replace #24822 and #25708 (although it has been merged)
## Background
In the past, Gitea supported issue searching with a keyword and
conditions in a less efficient way. It worked by searching for issues
with the keyword and obtaining limited IDs (as it is heavy to get all)
on the indexer (bleve/elasticsearch/meilisearch), and then querying with
conditions on the database to find a subset of the found IDs. This is
why the results could be incomplete.
To solve this issue, we need to store all fields that could be used as
conditions in the indexer and support both keyword and additional
conditions when searching with the indexer.
## Major changes
- Redefine `IndexerData` to include all fields that could be used as
filter conditions.
- Refactor `Search(ctx context.Context, kw string, repoIDs []int64,
limit, start int, state string)` to `Search(ctx context.Context, options
*SearchOptions)`, so it supports more conditions now.
- Change the data type stored in `issueIndexerQueue`. Use
`IndexerMetadata` instead of `IndexerData` in case the data has been
updated while it is in the queue. This also reduces the storage size of
the queue.
- Enhance searching with Bleve/Elasticsearch/Meilisearch, make them
fully support `SearchOptions`. Also, update the data versions.
- Keep most logic of database indexer, but remove
`issues.SearchIssueIDsByKeyword` in `models` to avoid confusion where is
the entry point to search issues.
- Start a Meilisearch instance to test it in unit tests.
- Add unit tests with almost full coverage to test
Bleve/Elasticsearch/Meilisearch indexer.
---------
Co-authored-by: Lunny Xiao <xiaolunwen@gmail.com>
2023-07-31 08:28:53 +02:00
|
|
|
"code.gitea.io/gitea/modules/container"
|
2020-02-29 07:52:05 +01:00
|
|
|
issue_indexer "code.gitea.io/gitea/modules/indexer/issues"
|
2019-10-08 19:55:16 +02:00
|
|
|
"code.gitea.io/gitea/modules/log"
|
2019-12-15 15:20:08 +01:00
|
|
|
"code.gitea.io/gitea/modules/markup/markdown"
|
2024-02-29 19:52:49 +01:00
|
|
|
"code.gitea.io/gitea/modules/optional"
|
2016-11-10 17:24:48 +01:00
|
|
|
"code.gitea.io/gitea/modules/setting"
|
2024-12-08 13:44:17 +01:00
|
|
|
"code.gitea.io/gitea/modules/util"
|
2023-04-27 08:06:45 +02:00
|
|
|
"code.gitea.io/gitea/routers/web/feed"
|
2024-12-08 13:44:17 +01:00
|
|
|
"code.gitea.io/gitea/routers/web/shared/user"
|
2024-02-27 08:12:22 +01:00
|
|
|
"code.gitea.io/gitea/services/context"
|
2024-11-29 18:53:49 +01:00
|
|
|
feed_service "code.gitea.io/gitea/services/feed"
|
2020-05-15 00:55:43 +02:00
|
|
|
issue_service "code.gitea.io/gitea/services/issue"
|
2020-04-10 13:26:37 +02:00
|
|
|
pull_service "code.gitea.io/gitea/services/pull"
|
2017-12-26 00:25:16 +01:00
|
|
|
|
2019-04-14 18:43:56 +02:00
|
|
|
"github.com/keybase/go-crypto/openpgp"
|
|
|
|
"github.com/keybase/go-crypto/openpgp/armor"
|
2020-03-31 09:47:00 +02:00
|
|
|
"xorm.io/builder"
|
2014-04-13 07:57:42 +02:00
|
|
|
)
|
|
|
|
|
2014-06-23 05:11:12 +02:00
|
|
|
const (
|
2019-12-15 15:20:08 +01:00
|
|
|
tplDashboard base.TplName = "user/dashboard/dashboard"
|
|
|
|
tplIssues base.TplName = "user/dashboard/issues"
|
|
|
|
tplMilestones base.TplName = "user/dashboard/milestones"
|
|
|
|
tplProfile base.TplName = "user/profile"
|
2014-06-23 05:11:12 +02:00
|
|
|
)
|
|
|
|
|
2021-01-13 05:19:17 +01:00
|
|
|
// getDashboardContextUser finds out which context user dashboard is being viewed as .
|
2021-11-24 10:49:20 +01:00
|
|
|
func getDashboardContextUser(ctx *context.Context) *user_model.User {
|
2022-03-22 08:03:22 +01:00
|
|
|
ctxUser := ctx.Doer
|
2024-06-19 00:32:45 +02:00
|
|
|
orgName := ctx.PathParam(":org")
|
2014-07-27 05:53:16 +02:00
|
|
|
if len(orgName) > 0 {
|
2021-11-19 12:41:40 +01:00
|
|
|
ctxUser = ctx.Org.Organization.AsUser()
|
|
|
|
ctx.Data["Teams"] = ctx.Org.Teams
|
2015-08-25 16:58:34 +02:00
|
|
|
}
|
|
|
|
ctx.Data["ContextUser"] = ctxUser
|
|
|
|
|
2023-09-16 16:39:12 +02:00
|
|
|
orgs, err := organization.GetUserOrgsList(ctx, ctx.Doer)
|
2021-06-14 14:18:09 +02:00
|
|
|
if err != nil {
|
|
|
|
ctx.ServerError("GetUserOrgsList", err)
|
2015-08-25 16:58:34 +02:00
|
|
|
return nil
|
|
|
|
}
|
2021-06-14 14:18:09 +02:00
|
|
|
ctx.Data["Orgs"] = orgs
|
2015-08-25 16:58:34 +02:00
|
|
|
|
|
|
|
return ctxUser
|
|
|
|
}
|
|
|
|
|
2020-08-17 05:07:38 +02:00
|
|
|
// Dashboard render the dashboard page
|
2016-03-11 17:56:52 +01:00
|
|
|
func Dashboard(ctx *context.Context) {
|
2016-03-10 05:56:03 +01:00
|
|
|
ctxUser := getDashboardContextUser(ctx)
|
2015-08-25 16:58:34 +02:00
|
|
|
if ctx.Written() {
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
2023-02-24 22:15:10 +01:00
|
|
|
var (
|
|
|
|
date = ctx.FormString("date")
|
|
|
|
page = ctx.FormInt("page")
|
|
|
|
)
|
|
|
|
|
|
|
|
// Make sure page number is at least 1. Will be posted to ctx.Data.
|
|
|
|
if page <= 1 {
|
|
|
|
page = 1
|
|
|
|
}
|
|
|
|
|
2024-02-14 22:48:45 +01:00
|
|
|
ctx.Data["Title"] = ctxUser.DisplayName() + " - " + ctx.Locale.TrString("dashboard")
|
2016-07-23 19:08:22 +02:00
|
|
|
ctx.Data["PageIsDashboard"] = true
|
|
|
|
ctx.Data["PageIsNews"] = true
|
2022-03-29 08:29:02 +02:00
|
|
|
cnt, _ := organization.GetOrganizationCount(ctx, ctxUser)
|
2021-11-22 16:21:55 +01:00
|
|
|
ctx.Data["UserOrgsCount"] = cnt
|
2022-06-04 13:42:17 +02:00
|
|
|
ctx.Data["MirrorsEnabled"] = setting.Mirror.Enabled
|
2023-02-24 22:15:10 +01:00
|
|
|
ctx.Data["Date"] = date
|
2021-10-15 04:35:26 +02:00
|
|
|
|
2021-10-19 06:38:33 +02:00
|
|
|
var uid int64
|
|
|
|
if ctxUser != nil {
|
|
|
|
uid = ctxUser.ID
|
|
|
|
}
|
|
|
|
|
2023-07-04 20:36:08 +02:00
|
|
|
ctx.PageData["dashboardRepoList"] = map[string]any{
|
2021-10-15 04:35:26 +02:00
|
|
|
"searchLimit": setting.UI.User.RepoPagingNum,
|
2021-10-19 06:38:33 +02:00
|
|
|
"uid": uid,
|
2021-10-15 04:35:26 +02:00
|
|
|
}
|
2020-12-27 20:58:03 +01:00
|
|
|
|
2021-03-04 23:59:13 +01:00
|
|
|
if setting.Service.EnableUserHeatmap {
|
2023-09-25 15:17:37 +02:00
|
|
|
data, err := activities_model.GetUserHeatmapDataByUserTeam(ctx, ctxUser, ctx.Org.Team, ctx.Doer)
|
2020-11-18 23:00:16 +01:00
|
|
|
if err != nil {
|
2020-12-27 20:58:03 +01:00
|
|
|
ctx.ServerError("GetUserHeatmapDataByUserTeam", err)
|
2020-11-18 23:00:16 +01:00
|
|
|
return
|
|
|
|
}
|
|
|
|
ctx.Data["HeatmapData"] = data
|
2023-04-17 20:26:01 +02:00
|
|
|
ctx.Data["HeatmapTotalContributions"] = activities_model.GetTotalContributionsInHeatmap(data)
|
2020-11-18 23:00:16 +01:00
|
|
|
}
|
2016-07-23 19:08:22 +02:00
|
|
|
|
2024-11-29 18:53:49 +01:00
|
|
|
feeds, count, err := feed_service.GetFeeds(ctx, activities_model.GetFeedsOptions{
|
2018-02-21 11:55:34 +01:00
|
|
|
RequestedUser: ctxUser,
|
2020-12-27 20:58:03 +01:00
|
|
|
RequestedTeam: ctx.Org.Team,
|
2022-03-22 08:03:22 +01:00
|
|
|
Actor: ctx.Doer,
|
2017-08-23 03:30:54 +02:00
|
|
|
IncludePrivate: true,
|
|
|
|
OnlyPerformedBy: false,
|
|
|
|
IncludeDeleted: false,
|
2021-08-11 02:31:13 +02:00
|
|
|
Date: ctx.FormString("date"),
|
2023-02-24 22:15:10 +01:00
|
|
|
ListOptions: db.ListOptions{
|
|
|
|
Page: page,
|
|
|
|
PageSize: setting.UI.FeedPagingNum,
|
|
|
|
},
|
2017-08-23 03:30:54 +02:00
|
|
|
})
|
2022-03-13 17:40:47 +01:00
|
|
|
if err != nil {
|
|
|
|
ctx.ServerError("GetFeeds", err)
|
2014-04-13 07:57:42 +02:00
|
|
|
return
|
|
|
|
}
|
2021-10-16 16:21:16 +02:00
|
|
|
|
2023-02-24 22:15:10 +01:00
|
|
|
ctx.Data["Feeds"] = feeds
|
|
|
|
|
|
|
|
pager := context.NewPagination(int(count), setting.UI.FeedPagingNum, page, 5)
|
2024-03-16 13:07:56 +01:00
|
|
|
pager.AddParamString("date", date)
|
2023-02-24 22:15:10 +01:00
|
|
|
ctx.Data["Page"] = pager
|
|
|
|
|
2021-04-05 17:30:52 +02:00
|
|
|
ctx.HTML(http.StatusOK, tplDashboard)
|
2014-04-13 07:57:42 +02:00
|
|
|
}
|
|
|
|
|
2019-12-15 15:20:08 +01:00
|
|
|
// Milestones render the user milestones page
|
|
|
|
func Milestones(ctx *context.Context) {
|
2021-11-09 20:57:58 +01:00
|
|
|
if unit.TypeIssues.UnitGlobalDisabled() && unit.TypePullRequests.UnitGlobalDisabled() {
|
2020-01-17 08:34:37 +01:00
|
|
|
log.Debug("Milestones overview page not available as both issues and pull requests are globally disabled")
|
2022-03-23 05:54:07 +01:00
|
|
|
ctx.Status(http.StatusNotFound)
|
2020-01-17 08:34:37 +01:00
|
|
|
return
|
|
|
|
}
|
|
|
|
|
2019-12-15 15:20:08 +01:00
|
|
|
ctx.Data["Title"] = ctx.Tr("milestones")
|
|
|
|
ctx.Data["PageIsMilestonesDashboard"] = true
|
|
|
|
|
|
|
|
ctxUser := getDashboardContextUser(ctx)
|
|
|
|
if ctx.Written() {
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
2022-06-06 10:01:49 +02:00
|
|
|
repoOpts := repo_model.SearchRepoOptions{
|
2023-08-29 14:46:52 +02:00
|
|
|
Actor: ctx.Doer,
|
2020-12-27 20:58:03 +01:00
|
|
|
OwnerID: ctxUser.ID,
|
|
|
|
Private: true,
|
2022-06-04 20:30:01 +02:00
|
|
|
AllPublic: false, // Include also all public repositories of users and public organisations
|
|
|
|
AllLimited: false, // Include also all public repositories of limited organisations
|
2024-02-29 19:52:49 +01:00
|
|
|
Archived: optional.Some(false),
|
|
|
|
HasMilestones: optional.Some(true), // Just needs display repos has milestones
|
2020-12-27 20:58:03 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
if ctxUser.IsOrganization() && ctx.Org.Team != nil {
|
|
|
|
repoOpts.TeamID = ctx.Org.Team.ID
|
|
|
|
}
|
2019-12-15 15:20:08 +01:00
|
|
|
|
2020-12-27 20:58:03 +01:00
|
|
|
var (
|
2022-06-06 10:01:49 +02:00
|
|
|
userRepoCond = repo_model.SearchRepositoryCondition(&repoOpts) // all repo condition user could visit
|
2020-03-31 09:47:00 +02:00
|
|
|
repoCond = userRepoCond
|
|
|
|
repoIDs []int64
|
2019-12-15 15:20:08 +01:00
|
|
|
|
2021-08-11 02:31:13 +02:00
|
|
|
reposQuery = ctx.FormString("repos")
|
|
|
|
isShowClosed = ctx.FormString("state") == "closed"
|
|
|
|
sortType = ctx.FormString("sort")
|
2021-07-29 03:42:15 +02:00
|
|
|
page = ctx.FormInt("page")
|
2021-08-11 17:08:52 +02:00
|
|
|
keyword = ctx.FormTrim("q")
|
2020-03-31 09:47:00 +02:00
|
|
|
)
|
|
|
|
|
|
|
|
if page <= 1 {
|
|
|
|
page = 1
|
2019-12-15 15:20:08 +01:00
|
|
|
}
|
|
|
|
|
2020-01-05 02:23:29 +01:00
|
|
|
if len(reposQuery) != 0 {
|
|
|
|
if issueReposQueryPattern.MatchString(reposQuery) {
|
|
|
|
// remove "[" and "]" from string
|
|
|
|
reposQuery = reposQuery[1 : len(reposQuery)-1]
|
2022-01-20 18:46:10 +01:00
|
|
|
// for each ID (delimiter ",") add to int to repoIDs
|
2020-03-31 09:47:00 +02:00
|
|
|
|
2020-01-05 02:23:29 +01:00
|
|
|
for _, rID := range strings.Split(reposQuery, ",") {
|
|
|
|
// Ensure nonempty string entries
|
|
|
|
if rID != "" && rID != "0" {
|
|
|
|
rIDint64, err := strconv.ParseInt(rID, 10, 64)
|
|
|
|
// If the repo id specified by query is not parseable or not accessible by user, just ignore it.
|
2020-03-31 09:47:00 +02:00
|
|
|
if err == nil {
|
2020-01-05 02:23:29 +01:00
|
|
|
repoIDs = append(repoIDs, rIDint64)
|
|
|
|
}
|
2019-12-15 15:20:08 +01:00
|
|
|
}
|
|
|
|
}
|
2020-03-31 09:47:00 +02:00
|
|
|
if len(repoIDs) > 0 {
|
|
|
|
// Don't just let repoCond = builder.In("id", repoIDs) because user may has no permission on repoIDs
|
|
|
|
// But the original repoCond has a limitation
|
|
|
|
repoCond = repoCond.And(builder.In("id", repoIDs))
|
2020-01-05 02:23:29 +01:00
|
|
|
}
|
|
|
|
} else {
|
|
|
|
log.Warn("issueReposQueryPattern not match with query")
|
2019-12-15 15:20:08 +01:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2023-12-11 09:56:48 +01:00
|
|
|
counts, err := issues_model.CountMilestonesMap(ctx, issues_model.FindMilestoneOptions{
|
|
|
|
RepoCond: userRepoCond,
|
|
|
|
Name: keyword,
|
2024-03-02 16:42:31 +01:00
|
|
|
IsClosed: optional.Some(isShowClosed),
|
2023-12-11 09:56:48 +01:00
|
|
|
})
|
2019-12-15 15:20:08 +01:00
|
|
|
if err != nil {
|
|
|
|
ctx.ServerError("CountMilestonesByRepoIDs", err)
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
2023-12-11 09:56:48 +01:00
|
|
|
milestones, err := db.Find[issues_model.Milestone](ctx, issues_model.FindMilestoneOptions{
|
|
|
|
ListOptions: db.ListOptions{
|
|
|
|
Page: page,
|
|
|
|
PageSize: setting.UI.IssuePagingNum,
|
|
|
|
},
|
|
|
|
RepoCond: repoCond,
|
2024-03-02 16:42:31 +01:00
|
|
|
IsClosed: optional.Some(isShowClosed),
|
2023-12-11 09:56:48 +01:00
|
|
|
SortType: sortType,
|
|
|
|
Name: keyword,
|
|
|
|
})
|
2019-12-15 15:20:08 +01:00
|
|
|
if err != nil {
|
2021-04-08 13:53:59 +02:00
|
|
|
ctx.ServerError("SearchMilestones", err)
|
2019-12-15 15:20:08 +01:00
|
|
|
return
|
|
|
|
}
|
|
|
|
|
2022-11-19 09:12:33 +01:00
|
|
|
showRepos, _, err := repo_model.SearchRepositoryByCondition(ctx, &repoOpts, userRepoCond, false)
|
2020-03-31 09:47:00 +02:00
|
|
|
if err != nil {
|
|
|
|
ctx.ServerError("SearchRepositoryByCondition", err)
|
2019-12-15 15:20:08 +01:00
|
|
|
return
|
|
|
|
}
|
2020-03-31 09:47:00 +02:00
|
|
|
sort.Sort(showRepos)
|
2019-12-15 15:20:08 +01:00
|
|
|
|
2020-03-31 09:47:00 +02:00
|
|
|
for i := 0; i < len(milestones); {
|
|
|
|
for _, repo := range showRepos {
|
|
|
|
if milestones[i].RepoID == repo.ID {
|
|
|
|
milestones[i].Repo = repo
|
|
|
|
break
|
|
|
|
}
|
|
|
|
}
|
|
|
|
if milestones[i].Repo == nil {
|
|
|
|
log.Warn("Cannot find milestone %d 's repository %d", milestones[i].ID, milestones[i].RepoID)
|
|
|
|
milestones = append(milestones[:i], milestones[i+1:]...)
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
|
2024-11-24 09:18:57 +01:00
|
|
|
rctx := renderhelper.NewRenderContextRepoComment(ctx, milestones[i].Repo)
|
|
|
|
milestones[i].RenderedContent, err = markdown.RenderString(rctx, milestones[i].Content)
|
2021-04-20 00:25:08 +02:00
|
|
|
if err != nil {
|
|
|
|
ctx.ServerError("RenderString", err)
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
2022-12-10 03:46:31 +01:00
|
|
|
if milestones[i].Repo.IsTimetrackerEnabled(ctx) {
|
2023-09-16 16:39:12 +02:00
|
|
|
err := milestones[i].LoadTotalTrackedTime(ctx)
|
2019-12-15 15:20:08 +01:00
|
|
|
if err != nil {
|
|
|
|
ctx.ServerError("LoadTotalTrackedTime", err)
|
|
|
|
return
|
|
|
|
}
|
|
|
|
}
|
2020-03-31 09:47:00 +02:00
|
|
|
i++
|
2019-12-15 15:20:08 +01:00
|
|
|
}
|
|
|
|
|
2023-09-16 16:39:12 +02:00
|
|
|
milestoneStats, err := issues_model.GetMilestonesStatsByRepoCondAndKw(ctx, repoCond, keyword)
|
2019-12-15 15:20:08 +01:00
|
|
|
if err != nil {
|
|
|
|
ctx.ServerError("GetMilestoneStats", err)
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
2022-04-08 11:11:15 +02:00
|
|
|
var totalMilestoneStats *issues_model.MilestonesStats
|
2020-03-31 09:47:00 +02:00
|
|
|
if len(repoIDs) == 0 {
|
|
|
|
totalMilestoneStats = milestoneStats
|
|
|
|
} else {
|
2023-09-16 16:39:12 +02:00
|
|
|
totalMilestoneStats, err = issues_model.GetMilestonesStatsByRepoCondAndKw(ctx, userRepoCond, keyword)
|
2020-03-31 09:47:00 +02:00
|
|
|
if err != nil {
|
|
|
|
ctx.ServerError("GetMilestoneStats", err)
|
|
|
|
return
|
|
|
|
}
|
2019-12-15 15:20:08 +01:00
|
|
|
}
|
|
|
|
|
2024-02-14 19:19:57 +01:00
|
|
|
showRepoIDs := make(container.Set[int64], len(showRepos))
|
2023-08-04 17:16:56 +02:00
|
|
|
for _, repo := range showRepos {
|
|
|
|
if repo.ID > 0 {
|
2024-02-14 19:19:57 +01:00
|
|
|
showRepoIDs.Add(repo.ID)
|
2023-08-04 17:16:56 +02:00
|
|
|
}
|
|
|
|
}
|
|
|
|
if len(repoIDs) == 0 {
|
2024-02-14 19:19:57 +01:00
|
|
|
repoIDs = showRepoIDs.Values()
|
2023-08-04 17:16:56 +02:00
|
|
|
}
|
2023-09-07 11:37:47 +02:00
|
|
|
repoIDs = slices.DeleteFunc(repoIDs, func(v int64) bool {
|
2024-02-14 19:19:57 +01:00
|
|
|
return !showRepoIDs.Contains(v)
|
2023-08-04 17:16:56 +02:00
|
|
|
})
|
|
|
|
|
2019-12-15 15:20:08 +01:00
|
|
|
var pagerCount int
|
|
|
|
if isShowClosed {
|
|
|
|
ctx.Data["State"] = "closed"
|
|
|
|
ctx.Data["Total"] = totalMilestoneStats.ClosedCount
|
|
|
|
pagerCount = int(milestoneStats.ClosedCount)
|
|
|
|
} else {
|
|
|
|
ctx.Data["State"] = "open"
|
|
|
|
ctx.Data["Total"] = totalMilestoneStats.OpenCount
|
|
|
|
pagerCount = int(milestoneStats.OpenCount)
|
|
|
|
}
|
|
|
|
|
|
|
|
ctx.Data["Milestones"] = milestones
|
|
|
|
ctx.Data["Repos"] = showRepos
|
|
|
|
ctx.Data["Counts"] = counts
|
|
|
|
ctx.Data["MilestoneStats"] = milestoneStats
|
|
|
|
ctx.Data["SortType"] = sortType
|
2021-04-08 13:53:59 +02:00
|
|
|
ctx.Data["Keyword"] = keyword
|
2023-08-04 17:16:56 +02:00
|
|
|
ctx.Data["RepoIDs"] = repoIDs
|
2019-12-15 15:20:08 +01:00
|
|
|
ctx.Data["IsShowClosed"] = isShowClosed
|
|
|
|
|
|
|
|
pager := context.NewPagination(pagerCount, setting.UI.IssuePagingNum, page, 5)
|
2024-03-16 13:07:56 +01:00
|
|
|
pager.AddParamString("q", keyword)
|
|
|
|
pager.AddParamString("repos", reposQuery)
|
|
|
|
pager.AddParamString("sort", sortType)
|
|
|
|
pager.AddParamString("state", fmt.Sprint(ctx.Data["State"]))
|
2019-12-15 15:20:08 +01:00
|
|
|
ctx.Data["Page"] = pager
|
|
|
|
|
2021-04-05 17:30:52 +02:00
|
|
|
ctx.HTML(http.StatusOK, tplMilestones)
|
2019-12-15 15:20:08 +01:00
|
|
|
}
|
|
|
|
|
2021-01-13 05:19:17 +01:00
|
|
|
// Pulls renders the user's pull request overview page
|
|
|
|
func Pulls(ctx *context.Context) {
|
2021-11-09 20:57:58 +01:00
|
|
|
if unit.TypePullRequests.UnitGlobalDisabled() {
|
2021-01-13 05:19:17 +01:00
|
|
|
log.Debug("Pull request overview page not available as it is globally disabled.")
|
2022-03-23 05:54:07 +01:00
|
|
|
ctx.Status(http.StatusNotFound)
|
2021-01-13 05:19:17 +01:00
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
ctx.Data["Title"] = ctx.Tr("pull_requests")
|
|
|
|
ctx.Data["PageIsPulls"] = true
|
2021-11-09 20:57:58 +01:00
|
|
|
buildIssueOverview(ctx, unit.TypePullRequests)
|
2021-01-13 05:19:17 +01:00
|
|
|
}
|
2019-12-02 04:50:36 +01:00
|
|
|
|
2021-01-13 05:19:17 +01:00
|
|
|
// Issues renders the user's issues overview page
|
2016-03-11 17:56:52 +01:00
|
|
|
func Issues(ctx *context.Context) {
|
2021-11-09 20:57:58 +01:00
|
|
|
if unit.TypeIssues.UnitGlobalDisabled() {
|
2021-01-13 05:19:17 +01:00
|
|
|
log.Debug("Issues overview page not available as it is globally disabled.")
|
2022-03-23 05:54:07 +01:00
|
|
|
ctx.Status(http.StatusNotFound)
|
2021-01-13 05:19:17 +01:00
|
|
|
return
|
|
|
|
}
|
2020-01-17 08:34:37 +01:00
|
|
|
|
2021-01-13 05:19:17 +01:00
|
|
|
ctx.Data["Title"] = ctx.Tr("issues")
|
|
|
|
ctx.Data["PageIsIssues"] = true
|
2021-11-09 20:57:58 +01:00
|
|
|
buildIssueOverview(ctx, unit.TypeIssues)
|
2021-01-13 05:19:17 +01:00
|
|
|
}
|
2020-01-17 08:34:37 +01:00
|
|
|
|
2021-01-13 05:19:17 +01:00
|
|
|
// Regexp for repos query
|
|
|
|
var issueReposQueryPattern = regexp.MustCompile(`^\[\d+(,\d+)*,?\]$`)
|
|
|
|
|
2021-11-09 20:57:58 +01:00
|
|
|
func buildIssueOverview(ctx *context.Context, unitType unit.Type) {
|
2021-01-13 05:19:17 +01:00
|
|
|
// ----------------------------------------------------
|
|
|
|
// Determine user; can be either user or organization.
|
|
|
|
// Return with NotFound or ServerError if unsuccessful.
|
|
|
|
// ----------------------------------------------------
|
2015-08-25 16:58:34 +02:00
|
|
|
|
|
|
|
ctxUser := getDashboardContextUser(ctx)
|
|
|
|
if ctx.Written() {
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
2023-02-17 08:13:35 +01:00
|
|
|
// Default to recently updated, unlike repository issues list
|
2024-12-08 13:44:17 +01:00
|
|
|
sortType := util.IfZero(ctx.FormString("sort"), "recentupdate")
|
2023-02-17 08:13:35 +01:00
|
|
|
|
2021-01-13 05:19:17 +01:00
|
|
|
// --------------------------------------------------------------------------------
|
|
|
|
// Distinguish User from Organization.
|
|
|
|
// Org:
|
|
|
|
// - Remember pre-determined viewType string for later. Will be posted to ctx.Data.
|
|
|
|
// Organization does not have view type and filter mode.
|
|
|
|
// User:
|
2021-08-11 02:31:13 +02:00
|
|
|
// - Use ctx.FormString("type") to determine filterMode.
|
2021-01-13 05:19:17 +01:00
|
|
|
// The type is set when clicking for example "assigned to me" on the overview page.
|
|
|
|
// - Remember either this or a fallback. Will be posted to ctx.Data.
|
|
|
|
// --------------------------------------------------------------------------------
|
|
|
|
|
|
|
|
// TODO: distinguish during routing
|
|
|
|
|
2024-12-08 13:44:17 +01:00
|
|
|
viewType := ctx.FormString("type")
|
|
|
|
var filterMode int
|
2021-01-03 18:29:12 +01:00
|
|
|
switch viewType {
|
|
|
|
case "assigned":
|
2022-06-13 11:37:59 +02:00
|
|
|
filterMode = issues_model.FilterModeAssign
|
2021-01-03 18:29:12 +01:00
|
|
|
case "created_by":
|
2022-06-13 11:37:59 +02:00
|
|
|
filterMode = issues_model.FilterModeCreate
|
2021-01-03 18:29:12 +01:00
|
|
|
case "mentioned":
|
2022-06-13 11:37:59 +02:00
|
|
|
filterMode = issues_model.FilterModeMention
|
2021-01-17 17:34:19 +01:00
|
|
|
case "review_requested":
|
2022-06-13 11:37:59 +02:00
|
|
|
filterMode = issues_model.FilterModeReviewRequested
|
2023-02-25 03:55:50 +01:00
|
|
|
case "reviewed_by":
|
|
|
|
filterMode = issues_model.FilterModeReviewed
|
2022-03-23 23:57:09 +01:00
|
|
|
case "your_repositories":
|
|
|
|
fallthrough
|
2021-01-03 18:29:12 +01:00
|
|
|
default:
|
2022-06-13 11:37:59 +02:00
|
|
|
filterMode = issues_model.FilterModeYourRepositories
|
2019-12-03 07:01:29 +01:00
|
|
|
viewType = "your_repositories"
|
2015-08-25 16:58:34 +02:00
|
|
|
}
|
|
|
|
|
2021-01-13 05:19:17 +01:00
|
|
|
// --------------------------------------------------------------------------
|
|
|
|
// Build opts (IssuesOptions), which contains filter information.
|
|
|
|
// Will eventually be used to retrieve issues relevant for the overview page.
|
|
|
|
// Note: Non-final states of opts are used in-between, namely for:
|
|
|
|
// - Keyword search
|
|
|
|
// - Count Issues by repo
|
|
|
|
// --------------------------------------------------------------------------
|
2017-02-14 15:15:18 +01:00
|
|
|
|
2021-01-13 05:19:17 +01:00
|
|
|
// Get repository IDs where User/Org/Team has access.
|
2022-03-29 08:29:02 +02:00
|
|
|
var team *organization.Team
|
|
|
|
var org *organization.Organization
|
2021-01-13 05:19:17 +01:00
|
|
|
if ctx.Org != nil {
|
2021-12-29 14:02:12 +01:00
|
|
|
org = ctx.Org.Organization
|
2021-01-13 05:19:17 +01:00
|
|
|
team = ctx.Org.Team
|
2017-02-17 01:58:19 +01:00
|
|
|
}
|
2021-12-29 14:02:12 +01:00
|
|
|
|
|
|
|
isPullList := unitType == unit.TypePullRequests
|
2022-06-13 11:37:59 +02:00
|
|
|
opts := &issues_model.IssuesOptions{
|
2024-03-02 16:42:31 +01:00
|
|
|
IsPull: optional.Some(isPullList),
|
2021-12-29 14:02:12 +01:00
|
|
|
SortType: sortType,
|
2024-03-02 16:42:31 +01:00
|
|
|
IsArchived: optional.Some(false),
|
2021-12-29 14:02:12 +01:00
|
|
|
Org: org,
|
|
|
|
Team: team,
|
2022-03-22 08:03:22 +01:00
|
|
|
User: ctx.Doer,
|
2019-10-08 19:55:16 +02:00
|
|
|
}
|
2024-12-08 13:44:17 +01:00
|
|
|
// Get filter by author id & assignee id
|
|
|
|
// FIXME: this feature doesn't work at the moment, because frontend can't use a "user-remote-search" dropdown directly
|
|
|
|
// the existing "/posters" handlers doesn't work for this case, it is unable to list the related users correctly.
|
|
|
|
// In the future, we need something like github: "author:user1" to accept usernames directly.
|
|
|
|
posterUsername := ctx.FormString("poster")
|
|
|
|
opts.PosterID = user.GetFilterUserIDByName(ctx, posterUsername)
|
|
|
|
// TODO: "assignee" should also use GetFilterUserIDByName in the future to support usernames directly
|
|
|
|
opts.AssigneeID, _ = strconv.ParseInt(ctx.FormString("assignee"), 10, 64)
|
2019-10-08 19:55:16 +02:00
|
|
|
|
2024-04-18 02:16:52 +02:00
|
|
|
isFuzzy := ctx.FormBool("fuzzy")
|
|
|
|
|
2022-03-23 23:57:09 +01:00
|
|
|
// Search all repositories which
|
|
|
|
//
|
|
|
|
// As user:
|
|
|
|
// - Owns the repository.
|
|
|
|
// - Have collaborator permissions in repository.
|
|
|
|
//
|
|
|
|
// As org:
|
|
|
|
// - Owns the repository.
|
|
|
|
//
|
|
|
|
// As team:
|
|
|
|
// - Team org's owns the repository.
|
|
|
|
// - Team has read permission to repository.
|
2022-06-06 10:01:49 +02:00
|
|
|
repoOpts := &repo_model.SearchRepoOptions{
|
2023-08-23 04:29:17 +02:00
|
|
|
Actor: ctx.Doer,
|
2023-08-29 14:46:52 +02:00
|
|
|
OwnerID: ctxUser.ID,
|
2023-08-23 04:29:17 +02:00
|
|
|
Private: true,
|
|
|
|
AllPublic: false,
|
|
|
|
AllLimited: false,
|
2024-02-29 19:52:49 +01:00
|
|
|
Collaborate: optional.None[bool](),
|
2023-08-23 04:29:17 +02:00
|
|
|
UnitType: unitType,
|
2024-02-29 19:52:49 +01:00
|
|
|
Archived: optional.Some(false),
|
2022-03-23 23:57:09 +01:00
|
|
|
}
|
2022-05-16 11:49:17 +02:00
|
|
|
if team != nil {
|
|
|
|
repoOpts.TeamID = team.ID
|
2022-03-23 23:57:09 +01:00
|
|
|
}
|
2023-08-23 04:29:17 +02:00
|
|
|
accessibleRepos := container.Set[int64]{}
|
2023-08-17 19:42:17 +02:00
|
|
|
{
|
2023-10-11 06:24:07 +02:00
|
|
|
ids, _, err := repo_model.SearchRepositoryIDs(ctx, repoOpts)
|
2023-08-17 19:42:17 +02:00
|
|
|
if err != nil {
|
|
|
|
ctx.ServerError("SearchRepositoryIDs", err)
|
|
|
|
return
|
|
|
|
}
|
2023-08-23 04:29:17 +02:00
|
|
|
accessibleRepos.AddMultiple(ids...)
|
2023-08-17 19:42:17 +02:00
|
|
|
opts.RepoIDs = ids
|
|
|
|
if len(opts.RepoIDs) == 0 {
|
|
|
|
// no repos found, don't let the indexer return all repos
|
|
|
|
opts.RepoIDs = []int64{0}
|
|
|
|
}
|
|
|
|
}
|
Include public repos in doer's dashboard for issue search (#28304)
It will fix #28268 .
<img width="1313" alt="image"
src="https://github.com/go-gitea/gitea/assets/9418365/cb1e07d5-7a12-4691-a054-8278ba255bfc">
<img width="1318" alt="image"
src="https://github.com/go-gitea/gitea/assets/9418365/4fd60820-97f1-4c2c-a233-d3671a5039e9">
## :warning: BREAKING :warning:
But need to give up some features:
<img width="1312" alt="image"
src="https://github.com/go-gitea/gitea/assets/9418365/281c0d51-0e7d-473f-bbed-216e2f645610">
However, such abandonment may fix #28055 .
## Backgroud
When the user switches the dashboard context to an org, it means they
want to search issues in the repos that belong to the org. However, when
they switch to themselves, it means all repos they can access because
they may have created an issue in a public repo that they don't own.
<img width="286" alt="image"
src="https://github.com/go-gitea/gitea/assets/9418365/182dcd5b-1c20-4725-93af-96e8dfae5b97">
It's a confusing design. Think about this: What does "In your
repositories" mean when the user switches to an org? Repos belong to the
user or the org?
Whatever, it has been broken by #26012 and its following PRs. After the
PR, it searches for issues in repos that the dashboard context user owns
or has been explicitly granted access to, so it causes #28268.
## How to fix it
It's not really difficult to fix it. Just extend the repo scope to
search issues when the dashboard context user is the doer. Since the
user may create issues or be mentioned in any public repo, we can just
set `AllPublic` to true, which is already supported by indexers. The DB
condition will also support it in this PR.
But the real difficulty is how to count the search results grouped by
repos. It's something like "search issues with this keyword and those
filters, and return the total number and the top results. **Then, group
all of them by repo and return the counts of each group.**"
<img width="314" alt="image"
src="https://github.com/go-gitea/gitea/assets/9418365/5206eb20-f8f5-49b9-b45a-1be2fcf679f4">
Before #26012, it was being done in the DB, but it caused the results to
be incomplete (see the description of #26012).
And to keep this, #26012 implement it in an inefficient way, just count
the issues by repo one by one, so it cannot work when `AllPublic` is
true because it's almost impossible to do this for all public repos.
https://github.com/go-gitea/gitea/blob/1bfcdeef4cca0f5509476358e5931c13d37ed1ca/modules/indexer/issues/indexer.go#L318-L338
## Give up unnecessary features
We may can resovle `TODO: use "group by" of the indexer engines to
implement it`, I'm sure it can be done with Elasticsearch, but IIRC,
Bleve and Meilisearch don't support "group by".
And the real question is, does it worth it? Why should we need to know
the counts grouped by repos?
Let me show you my search dashboard on gitea.com.
<img width="1304" alt="image"
src="https://github.com/go-gitea/gitea/assets/9418365/2bca2d46-6c71-4de1-94cb-0c9af27c62ff">
I never think the long repo list helps anything.
And if we agree to abandon it, things will be much easier. That is this
PR.
## TODO
I know it's important to filter by repos when searching issues. However,
it shouldn't be the way we have it now. It could be implemented like
this.
<img width="1316" alt="image"
src="https://github.com/go-gitea/gitea/assets/9418365/99ee5f21-cbb5-4dfe-914d-cb796cb79fbe">
The indexers support it well now, but it requires some frontend work,
which I'm not good at. So, I think someone could help do that in another
PR and merge this one to fix the bug first.
Or please block this PR and help to complete it.
Finally, "Switch dashboard context" is also a design that needs
improvement. In my opinion, it can be accomplished by adding filtering
conditions instead of "switching".
2023-12-07 06:26:18 +01:00
|
|
|
if ctx.Doer.ID == ctxUser.ID && filterMode != issues_model.FilterModeYourRepositories {
|
|
|
|
// If the doer is the same as the context user, which means the doer is viewing his own dashboard,
|
|
|
|
// it's not enough to show the repos that the doer owns or has been explicitly granted access to,
|
|
|
|
// because the doer may create issues or be mentioned in any public repo.
|
|
|
|
// So we need search issues in all public repos.
|
|
|
|
opts.AllPublic = true
|
|
|
|
}
|
2022-03-23 23:57:09 +01:00
|
|
|
|
2017-02-14 15:15:18 +01:00
|
|
|
switch filterMode {
|
2022-06-13 11:37:59 +02:00
|
|
|
case issues_model.FilterModeAll:
|
|
|
|
case issues_model.FilterModeYourRepositories:
|
|
|
|
case issues_model.FilterModeAssign:
|
2022-03-22 08:03:22 +01:00
|
|
|
opts.AssigneeID = ctx.Doer.ID
|
2022-06-13 11:37:59 +02:00
|
|
|
case issues_model.FilterModeCreate:
|
2022-03-22 08:03:22 +01:00
|
|
|
opts.PosterID = ctx.Doer.ID
|
2022-06-13 11:37:59 +02:00
|
|
|
case issues_model.FilterModeMention:
|
2022-03-22 08:03:22 +01:00
|
|
|
opts.MentionedID = ctx.Doer.ID
|
2022-06-13 11:37:59 +02:00
|
|
|
case issues_model.FilterModeReviewRequested:
|
2022-03-22 08:03:22 +01:00
|
|
|
opts.ReviewRequestedID = ctx.Doer.ID
|
2023-02-25 03:55:50 +01:00
|
|
|
case issues_model.FilterModeReviewed:
|
|
|
|
opts.ReviewedID = ctx.Doer.ID
|
2021-01-03 18:29:12 +01:00
|
|
|
}
|
|
|
|
|
2021-01-13 05:19:17 +01:00
|
|
|
// keyword holds the search term entered into the search field.
|
2021-08-11 02:31:13 +02:00
|
|
|
keyword := strings.Trim(ctx.FormString("q"), " ")
|
2021-01-13 05:19:17 +01:00
|
|
|
ctx.Data["Keyword"] = keyword
|
2020-02-29 07:52:05 +01:00
|
|
|
|
2021-01-13 05:19:17 +01:00
|
|
|
// Educated guess: Do or don't show closed issues.
|
2021-08-11 02:31:13 +02:00
|
|
|
isShowClosed := ctx.FormString("state") == "closed"
|
2024-03-02 16:42:31 +01:00
|
|
|
opts.IsClosed = optional.Some(isShowClosed)
|
2020-02-29 07:52:05 +01:00
|
|
|
|
2021-01-13 05:19:17 +01:00
|
|
|
// Make sure page number is at least 1. Will be posted to ctx.Data.
|
2021-07-29 03:42:15 +02:00
|
|
|
page := ctx.FormInt("page")
|
2021-01-13 05:19:17 +01:00
|
|
|
if page <= 1 {
|
|
|
|
page = 1
|
|
|
|
}
|
Refactor and enhance issue indexer to support both searching, filtering and paging (#26012)
Fix #24662.
Replace #24822 and #25708 (although it has been merged)
## Background
In the past, Gitea supported issue searching with a keyword and
conditions in a less efficient way. It worked by searching for issues
with the keyword and obtaining limited IDs (as it is heavy to get all)
on the indexer (bleve/elasticsearch/meilisearch), and then querying with
conditions on the database to find a subset of the found IDs. This is
why the results could be incomplete.
To solve this issue, we need to store all fields that could be used as
conditions in the indexer and support both keyword and additional
conditions when searching with the indexer.
## Major changes
- Redefine `IndexerData` to include all fields that could be used as
filter conditions.
- Refactor `Search(ctx context.Context, kw string, repoIDs []int64,
limit, start int, state string)` to `Search(ctx context.Context, options
*SearchOptions)`, so it supports more conditions now.
- Change the data type stored in `issueIndexerQueue`. Use
`IndexerMetadata` instead of `IndexerData` in case the data has been
updated while it is in the queue. This also reduces the storage size of
the queue.
- Enhance searching with Bleve/Elasticsearch/Meilisearch, make them
fully support `SearchOptions`. Also, update the data versions.
- Keep most logic of database indexer, but remove
`issues.SearchIssueIDsByKeyword` in `models` to avoid confusion where is
the entry point to search issues.
- Start a Meilisearch instance to test it in unit tests.
- Add unit tests with almost full coverage to test
Bleve/Elasticsearch/Meilisearch indexer.
---------
Co-authored-by: Lunny Xiao <xiaolunwen@gmail.com>
2023-07-31 08:28:53 +02:00
|
|
|
opts.Paginator = &db.ListOptions{
|
|
|
|
Page: page,
|
|
|
|
PageSize: setting.UI.IssuePagingNum,
|
|
|
|
}
|
2021-01-13 05:19:17 +01:00
|
|
|
|
|
|
|
// Get IDs for labels (a filter option for issues/pulls).
|
|
|
|
// Required for IssuesOptions.
|
2021-08-11 02:31:13 +02:00
|
|
|
selectedLabels := ctx.FormString("labels")
|
2021-01-13 05:19:17 +01:00
|
|
|
if len(selectedLabels) > 0 && selectedLabels != "0" {
|
Refactor and enhance issue indexer to support both searching, filtering and paging (#26012)
Fix #24662.
Replace #24822 and #25708 (although it has been merged)
## Background
In the past, Gitea supported issue searching with a keyword and
conditions in a less efficient way. It worked by searching for issues
with the keyword and obtaining limited IDs (as it is heavy to get all)
on the indexer (bleve/elasticsearch/meilisearch), and then querying with
conditions on the database to find a subset of the found IDs. This is
why the results could be incomplete.
To solve this issue, we need to store all fields that could be used as
conditions in the indexer and support both keyword and additional
conditions when searching with the indexer.
## Major changes
- Redefine `IndexerData` to include all fields that could be used as
filter conditions.
- Refactor `Search(ctx context.Context, kw string, repoIDs []int64,
limit, start int, state string)` to `Search(ctx context.Context, options
*SearchOptions)`, so it supports more conditions now.
- Change the data type stored in `issueIndexerQueue`. Use
`IndexerMetadata` instead of `IndexerData` in case the data has been
updated while it is in the queue. This also reduces the storage size of
the queue.
- Enhance searching with Bleve/Elasticsearch/Meilisearch, make them
fully support `SearchOptions`. Also, update the data versions.
- Keep most logic of database indexer, but remove
`issues.SearchIssueIDsByKeyword` in `models` to avoid confusion where is
the entry point to search issues.
- Start a Meilisearch instance to test it in unit tests.
- Add unit tests with almost full coverage to test
Bleve/Elasticsearch/Meilisearch indexer.
---------
Co-authored-by: Lunny Xiao <xiaolunwen@gmail.com>
2023-07-31 08:28:53 +02:00
|
|
|
var err error
|
2024-03-21 16:07:35 +01:00
|
|
|
opts.LabelIDs, err = base.StringsToInt64s(strings.Split(selectedLabels, ","))
|
2019-01-23 05:10:38 +01:00
|
|
|
if err != nil {
|
2024-03-21 16:07:35 +01:00
|
|
|
ctx.Flash.Error(ctx.Tr("invalid_data", selectedLabels), true)
|
2019-01-23 05:10:38 +01:00
|
|
|
}
|
|
|
|
}
|
2018-10-28 07:55:01 +01:00
|
|
|
|
2021-01-13 05:19:17 +01:00
|
|
|
// ------------------------------
|
|
|
|
// Get issues as defined by opts.
|
|
|
|
// ------------------------------
|
|
|
|
|
|
|
|
// Slice of Issues that will be displayed on the overview page
|
|
|
|
// USING FINAL STATE OF opts FOR A QUERY.
|
Refactor and enhance issue indexer to support both searching, filtering and paging (#26012)
Fix #24662.
Replace #24822 and #25708 (although it has been merged)
## Background
In the past, Gitea supported issue searching with a keyword and
conditions in a less efficient way. It worked by searching for issues
with the keyword and obtaining limited IDs (as it is heavy to get all)
on the indexer (bleve/elasticsearch/meilisearch), and then querying with
conditions on the database to find a subset of the found IDs. This is
why the results could be incomplete.
To solve this issue, we need to store all fields that could be used as
conditions in the indexer and support both keyword and additional
conditions when searching with the indexer.
## Major changes
- Redefine `IndexerData` to include all fields that could be used as
filter conditions.
- Refactor `Search(ctx context.Context, kw string, repoIDs []int64,
limit, start int, state string)` to `Search(ctx context.Context, options
*SearchOptions)`, so it supports more conditions now.
- Change the data type stored in `issueIndexerQueue`. Use
`IndexerMetadata` instead of `IndexerData` in case the data has been
updated while it is in the queue. This also reduces the storage size of
the queue.
- Enhance searching with Bleve/Elasticsearch/Meilisearch, make them
fully support `SearchOptions`. Also, update the data versions.
- Keep most logic of database indexer, but remove
`issues.SearchIssueIDsByKeyword` in `models` to avoid confusion where is
the entry point to search issues.
- Start a Meilisearch instance to test it in unit tests.
- Add unit tests with almost full coverage to test
Bleve/Elasticsearch/Meilisearch indexer.
---------
Co-authored-by: Lunny Xiao <xiaolunwen@gmail.com>
2023-07-31 08:28:53 +02:00
|
|
|
var issues issues_model.IssueList
|
|
|
|
{
|
2024-04-18 02:16:52 +02:00
|
|
|
issueIDs, _, err := issue_indexer.SearchIssues(ctx, issue_indexer.ToSearchOptions(keyword, opts).Copy(
|
|
|
|
func(o *issue_indexer.SearchOptions) { o.IsFuzzyKeyword = isFuzzy },
|
|
|
|
))
|
2020-02-29 07:52:05 +01:00
|
|
|
if err != nil {
|
Refactor and enhance issue indexer to support both searching, filtering and paging (#26012)
Fix #24662.
Replace #24822 and #25708 (although it has been merged)
## Background
In the past, Gitea supported issue searching with a keyword and
conditions in a less efficient way. It worked by searching for issues
with the keyword and obtaining limited IDs (as it is heavy to get all)
on the indexer (bleve/elasticsearch/meilisearch), and then querying with
conditions on the database to find a subset of the found IDs. This is
why the results could be incomplete.
To solve this issue, we need to store all fields that could be used as
conditions in the indexer and support both keyword and additional
conditions when searching with the indexer.
## Major changes
- Redefine `IndexerData` to include all fields that could be used as
filter conditions.
- Refactor `Search(ctx context.Context, kw string, repoIDs []int64,
limit, start int, state string)` to `Search(ctx context.Context, options
*SearchOptions)`, so it supports more conditions now.
- Change the data type stored in `issueIndexerQueue`. Use
`IndexerMetadata` instead of `IndexerData` in case the data has been
updated while it is in the queue. This also reduces the storage size of
the queue.
- Enhance searching with Bleve/Elasticsearch/Meilisearch, make them
fully support `SearchOptions`. Also, update the data versions.
- Keep most logic of database indexer, but remove
`issues.SearchIssueIDsByKeyword` in `models` to avoid confusion where is
the entry point to search issues.
- Start a Meilisearch instance to test it in unit tests.
- Add unit tests with almost full coverage to test
Bleve/Elasticsearch/Meilisearch indexer.
---------
Co-authored-by: Lunny Xiao <xiaolunwen@gmail.com>
2023-07-31 08:28:53 +02:00
|
|
|
ctx.ServerError("issueIDsFromSearch", err)
|
|
|
|
return
|
|
|
|
}
|
|
|
|
issues, err = issues_model.GetIssuesByIDs(ctx, issueIDs, true)
|
|
|
|
if err != nil {
|
|
|
|
ctx.ServerError("GetIssuesByIDs", err)
|
2020-02-29 07:52:05 +01:00
|
|
|
return
|
|
|
|
}
|
2017-02-17 01:58:19 +01:00
|
|
|
}
|
2015-09-02 22:18:09 +02:00
|
|
|
|
2022-04-27 00:40:01 +02:00
|
|
|
commitStatuses, lastStatus, err := pull_service.GetIssuesAllCommitStatus(ctx, issues)
|
2021-04-15 19:34:43 +02:00
|
|
|
if err != nil {
|
|
|
|
ctx.ServerError("GetIssuesLastCommitStatus", err)
|
|
|
|
return
|
2017-08-03 07:09:16 +02:00
|
|
|
}
|
2024-07-28 17:11:40 +02:00
|
|
|
if !ctx.Repo.CanRead(unit.TypeActions) {
|
|
|
|
for key := range commitStatuses {
|
|
|
|
git_model.CommitStatusesHideActionsURL(ctx, commitStatuses[key])
|
|
|
|
}
|
|
|
|
}
|
2017-08-03 07:09:16 +02:00
|
|
|
|
2021-01-13 05:19:17 +01:00
|
|
|
// -------------------------------
|
|
|
|
// Fill stats to post to ctx.Data.
|
|
|
|
// -------------------------------
|
2024-12-08 13:44:17 +01:00
|
|
|
issueStats, err := getUserIssueStats(ctx, filterMode, issue_indexer.ToSearchOptions(keyword, opts).Copy(
|
|
|
|
func(o *issue_indexer.SearchOptions) {
|
|
|
|
o.IsFuzzyKeyword = isFuzzy
|
|
|
|
// If the doer is the same as the context user, which means the doer is viewing his own dashboard,
|
|
|
|
// it's not enough to show the repos that the doer owns or has been explicitly granted access to,
|
|
|
|
// because the doer may create issues or be mentioned in any public repo.
|
|
|
|
// So we need search issues in all public repos.
|
|
|
|
o.AllPublic = ctx.Doer.ID == ctxUser.ID
|
|
|
|
// TODO: to make it work with poster/assignee filter, then these IDs should be kept
|
|
|
|
o.AssigneeID = nil
|
|
|
|
o.PosterID = nil
|
|
|
|
|
|
|
|
o.MentionID = nil
|
|
|
|
o.ReviewRequestedID = nil
|
|
|
|
o.ReviewedID = nil
|
|
|
|
},
|
2024-04-18 02:16:52 +02:00
|
|
|
))
|
2023-08-23 04:29:17 +02:00
|
|
|
if err != nil {
|
|
|
|
ctx.ServerError("getUserIssueStats", err)
|
|
|
|
return
|
2019-12-02 04:50:36 +01:00
|
|
|
}
|
|
|
|
|
2021-01-13 05:19:17 +01:00
|
|
|
// Will be posted to ctx.Data.
|
2019-12-02 04:50:36 +01:00
|
|
|
var shownIssues int
|
2015-08-25 17:22:05 +02:00
|
|
|
if !isShowClosed {
|
2021-12-29 14:02:12 +01:00
|
|
|
shownIssues = int(issueStats.OpenCount)
|
2015-08-25 17:22:05 +02:00
|
|
|
} else {
|
2021-12-29 14:02:12 +01:00
|
|
|
shownIssues = int(issueStats.ClosedCount)
|
2015-08-25 17:22:05 +02:00
|
|
|
}
|
|
|
|
|
2021-01-13 05:19:17 +01:00
|
|
|
ctx.Data["IsShowClosed"] = isShowClosed
|
|
|
|
|
2022-01-20 18:46:10 +01:00
|
|
|
ctx.Data["IssueRefEndNames"], ctx.Data["IssueRefURLs"] = issue_service.GetRefEndNamesAndURLs(issues, ctx.FormString("RepoLink"))
|
2020-05-15 00:55:43 +02:00
|
|
|
|
Refactor and enhance issue indexer to support both searching, filtering and paging (#26012)
Fix #24662.
Replace #24822 and #25708 (although it has been merged)
## Background
In the past, Gitea supported issue searching with a keyword and
conditions in a less efficient way. It worked by searching for issues
with the keyword and obtaining limited IDs (as it is heavy to get all)
on the indexer (bleve/elasticsearch/meilisearch), and then querying with
conditions on the database to find a subset of the found IDs. This is
why the results could be incomplete.
To solve this issue, we need to store all fields that could be used as
conditions in the indexer and support both keyword and additional
conditions when searching with the indexer.
## Major changes
- Redefine `IndexerData` to include all fields that could be used as
filter conditions.
- Refactor `Search(ctx context.Context, kw string, repoIDs []int64,
limit, start int, state string)` to `Search(ctx context.Context, options
*SearchOptions)`, so it supports more conditions now.
- Change the data type stored in `issueIndexerQueue`. Use
`IndexerMetadata` instead of `IndexerData` in case the data has been
updated while it is in the queue. This also reduces the storage size of
the queue.
- Enhance searching with Bleve/Elasticsearch/Meilisearch, make them
fully support `SearchOptions`. Also, update the data versions.
- Keep most logic of database indexer, but remove
`issues.SearchIssueIDsByKeyword` in `models` to avoid confusion where is
the entry point to search issues.
- Start a Meilisearch instance to test it in unit tests.
- Add unit tests with almost full coverage to test
Bleve/Elasticsearch/Meilisearch indexer.
---------
Co-authored-by: Lunny Xiao <xiaolunwen@gmail.com>
2023-07-31 08:28:53 +02:00
|
|
|
if err := issues.LoadAttributes(ctx); err != nil {
|
|
|
|
ctx.ServerError("issues.LoadAttributes", err)
|
|
|
|
return
|
|
|
|
}
|
2015-08-25 16:58:34 +02:00
|
|
|
ctx.Data["Issues"] = issues
|
2021-01-13 05:19:17 +01:00
|
|
|
|
Refactor and enhance issue indexer to support both searching, filtering and paging (#26012)
Fix #24662.
Replace #24822 and #25708 (although it has been merged)
## Background
In the past, Gitea supported issue searching with a keyword and
conditions in a less efficient way. It worked by searching for issues
with the keyword and obtaining limited IDs (as it is heavy to get all)
on the indexer (bleve/elasticsearch/meilisearch), and then querying with
conditions on the database to find a subset of the found IDs. This is
why the results could be incomplete.
To solve this issue, we need to store all fields that could be used as
conditions in the indexer and support both keyword and additional
conditions when searching with the indexer.
## Major changes
- Redefine `IndexerData` to include all fields that could be used as
filter conditions.
- Refactor `Search(ctx context.Context, kw string, repoIDs []int64,
limit, start int, state string)` to `Search(ctx context.Context, options
*SearchOptions)`, so it supports more conditions now.
- Change the data type stored in `issueIndexerQueue`. Use
`IndexerMetadata` instead of `IndexerData` in case the data has been
updated while it is in the queue. This also reduces the storage size of
the queue.
- Enhance searching with Bleve/Elasticsearch/Meilisearch, make them
fully support `SearchOptions`. Also, update the data versions.
- Keep most logic of database indexer, but remove
`issues.SearchIssueIDsByKeyword` in `models` to avoid confusion where is
the entry point to search issues.
- Start a Meilisearch instance to test it in unit tests.
- Add unit tests with almost full coverage to test
Bleve/Elasticsearch/Meilisearch indexer.
---------
Co-authored-by: Lunny Xiao <xiaolunwen@gmail.com>
2023-07-31 08:28:53 +02:00
|
|
|
approvalCounts, err := issues.GetApprovalCounts(ctx)
|
2021-01-13 05:19:17 +01:00
|
|
|
if err != nil {
|
|
|
|
ctx.ServerError("ApprovalCounts", err)
|
|
|
|
return
|
|
|
|
}
|
2020-03-06 04:44:06 +01:00
|
|
|
ctx.Data["ApprovalCounts"] = func(issueID int64, typ string) int64 {
|
|
|
|
counts, ok := approvalCounts[issueID]
|
|
|
|
if !ok || len(counts) == 0 {
|
|
|
|
return 0
|
|
|
|
}
|
2022-06-13 11:37:59 +02:00
|
|
|
reviewTyp := issues_model.ReviewTypeApprove
|
2020-03-06 04:44:06 +01:00
|
|
|
if typ == "reject" {
|
2022-06-13 11:37:59 +02:00
|
|
|
reviewTyp = issues_model.ReviewTypeReject
|
2020-04-06 18:33:34 +02:00
|
|
|
} else if typ == "waiting" {
|
2022-06-13 11:37:59 +02:00
|
|
|
reviewTyp = issues_model.ReviewTypeRequest
|
2020-03-06 04:44:06 +01:00
|
|
|
}
|
|
|
|
for _, count := range counts {
|
|
|
|
if count.Type == reviewTyp {
|
|
|
|
return count.Count
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return 0
|
|
|
|
}
|
2022-04-27 00:40:01 +02:00
|
|
|
ctx.Data["CommitLastStatus"] = lastStatus
|
|
|
|
ctx.Data["CommitStatuses"] = commitStatuses
|
2021-12-29 14:02:12 +01:00
|
|
|
ctx.Data["IssueStats"] = issueStats
|
2015-08-25 16:58:34 +02:00
|
|
|
ctx.Data["ViewType"] = viewType
|
2015-11-04 18:50:02 +01:00
|
|
|
ctx.Data["SortType"] = sortType
|
2015-08-25 16:58:34 +02:00
|
|
|
ctx.Data["IsShowClosed"] = isShowClosed
|
2021-01-13 05:19:17 +01:00
|
|
|
ctx.Data["SelectLabels"] = selectedLabels
|
2024-04-18 02:16:52 +02:00
|
|
|
ctx.Data["IsFuzzy"] = isFuzzy
|
2024-12-08 13:44:17 +01:00
|
|
|
ctx.Data["SearchFilterPosterID"] = util.Iif[any](opts.PosterID != 0, opts.PosterID, nil)
|
|
|
|
ctx.Data["SearchFilterAssigneeID"] = util.Iif[any](opts.AssigneeID != 0, opts.AssigneeID, nil)
|
2017-02-14 15:15:18 +01:00
|
|
|
|
2015-08-25 16:58:34 +02:00
|
|
|
if isShowClosed {
|
|
|
|
ctx.Data["State"] = "closed"
|
|
|
|
} else {
|
|
|
|
ctx.Data["State"] = "open"
|
|
|
|
}
|
|
|
|
|
2019-12-02 04:50:36 +01:00
|
|
|
pager := context.NewPagination(shownIssues, setting.UI.IssuePagingNum, page, 5)
|
2024-03-16 13:07:56 +01:00
|
|
|
pager.AddParamString("q", keyword)
|
|
|
|
pager.AddParamString("type", viewType)
|
|
|
|
pager.AddParamString("sort", sortType)
|
|
|
|
pager.AddParamString("state", fmt.Sprint(ctx.Data["State"]))
|
|
|
|
pager.AddParamString("labels", selectedLabels)
|
2024-12-08 13:44:17 +01:00
|
|
|
pager.AddParamString("fuzzy", fmt.Sprint(isFuzzy))
|
|
|
|
pager.AddParamString("poster", posterUsername)
|
|
|
|
if opts.AssigneeID != 0 {
|
|
|
|
pager.AddParamString("assignee", fmt.Sprint(opts.AssigneeID))
|
|
|
|
}
|
2019-04-20 06:15:19 +02:00
|
|
|
ctx.Data["Page"] = pager
|
|
|
|
|
2021-04-05 17:30:52 +02:00
|
|
|
ctx.HTML(http.StatusOK, tplIssues)
|
2015-08-25 16:58:34 +02:00
|
|
|
}
|
|
|
|
|
2016-11-27 12:59:12 +01:00
|
|
|
// ShowSSHKeys output all the ssh keys of user by uid
|
2022-03-26 10:04:22 +01:00
|
|
|
func ShowSSHKeys(ctx *context.Context) {
|
2023-11-24 04:49:41 +01:00
|
|
|
keys, err := db.Find[asymkey_model.PublicKey](ctx, asymkey_model.FindPublicKeyOptions{
|
|
|
|
OwnerID: ctx.ContextUser.ID,
|
|
|
|
})
|
2014-11-23 08:33:47 +01:00
|
|
|
if err != nil {
|
2018-01-10 22:34:17 +01:00
|
|
|
ctx.ServerError("ListPublicKeys", err)
|
2014-11-23 08:33:47 +01:00
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
var buf bytes.Buffer
|
|
|
|
for i := range keys {
|
|
|
|
buf.WriteString(keys[i].OmitEmail())
|
2015-06-08 09:40:38 +02:00
|
|
|
buf.WriteString("\n")
|
2014-11-23 08:33:47 +01:00
|
|
|
}
|
2021-12-15 07:59:57 +01:00
|
|
|
ctx.PlainTextBytes(http.StatusOK, buf.Bytes())
|
2014-11-23 08:33:47 +01:00
|
|
|
}
|
|
|
|
|
2019-04-14 18:43:56 +02:00
|
|
|
// ShowGPGKeys output all the public GPG keys of user by uid
|
2022-03-26 10:04:22 +01:00
|
|
|
func ShowGPGKeys(ctx *context.Context) {
|
2024-01-15 03:19:25 +01:00
|
|
|
keys, err := db.Find[asymkey_model.GPGKey](ctx, asymkey_model.FindGPGKeyOptions{
|
|
|
|
ListOptions: db.ListOptionsAll,
|
|
|
|
OwnerID: ctx.ContextUser.ID,
|
|
|
|
})
|
2019-04-14 18:43:56 +02:00
|
|
|
if err != nil {
|
|
|
|
ctx.ServerError("ListGPGKeys", err)
|
|
|
|
return
|
|
|
|
}
|
2022-03-02 17:32:18 +01:00
|
|
|
|
2019-04-14 18:43:56 +02:00
|
|
|
entities := make([]*openpgp.Entity, 0)
|
|
|
|
failedEntitiesID := make([]string, 0)
|
|
|
|
for _, k := range keys {
|
2023-10-14 10:37:24 +02:00
|
|
|
e, err := asymkey_model.GPGKeyToEntity(ctx, k)
|
2019-04-14 18:43:56 +02:00
|
|
|
if err != nil {
|
2021-12-10 09:14:24 +01:00
|
|
|
if asymkey_model.IsErrGPGKeyImportNotExist(err) {
|
2019-04-14 18:43:56 +02:00
|
|
|
failedEntitiesID = append(failedEntitiesID, k.KeyID)
|
2022-01-20 18:46:10 +01:00
|
|
|
continue // Skip previous import without backup of imported armored key
|
2019-04-14 18:43:56 +02:00
|
|
|
}
|
|
|
|
ctx.ServerError("ShowGPGKeys", err)
|
|
|
|
return
|
|
|
|
}
|
|
|
|
entities = append(entities, e)
|
|
|
|
}
|
|
|
|
var buf bytes.Buffer
|
|
|
|
|
|
|
|
headers := make(map[string]string)
|
2022-01-20 18:46:10 +01:00
|
|
|
if len(failedEntitiesID) > 0 { // If some key need re-import to be exported
|
2019-04-14 18:43:56 +02:00
|
|
|
headers["Note"] = fmt.Sprintf("The keys with the following IDs couldn't be exported and need to be reuploaded %s", strings.Join(failedEntitiesID, ", "))
|
2022-03-02 17:32:18 +01:00
|
|
|
} else if len(entities) == 0 {
|
|
|
|
headers["Note"] = "This user hasn't uploaded any GPG keys."
|
2019-04-14 18:43:56 +02:00
|
|
|
}
|
|
|
|
writer, _ := armor.Encode(&buf, "PGP PUBLIC KEY BLOCK", headers)
|
|
|
|
for _, e := range entities {
|
2022-01-20 18:46:10 +01:00
|
|
|
err = e.Serialize(writer) // TODO find why key are exported with a different cipherTypeByte as original (should not be blocking but strange)
|
2019-04-14 18:43:56 +02:00
|
|
|
if err != nil {
|
|
|
|
ctx.ServerError("ShowGPGKeys", err)
|
|
|
|
return
|
|
|
|
}
|
|
|
|
}
|
|
|
|
writer.Close()
|
2021-12-15 07:59:57 +01:00
|
|
|
ctx.PlainTextBytes(http.StatusOK, buf.Bytes())
|
2019-04-14 18:43:56 +02:00
|
|
|
}
|
2023-04-27 08:06:45 +02:00
|
|
|
|
|
|
|
func UsernameSubRoute(ctx *context.Context) {
|
|
|
|
// WORKAROUND to support usernames with "." in it
|
|
|
|
// https://github.com/go-chi/chi/issues/781
|
2024-06-19 00:32:45 +02:00
|
|
|
username := ctx.PathParam("username")
|
2023-04-27 08:06:45 +02:00
|
|
|
reloadParam := func(suffix string) (success bool) {
|
2024-06-19 00:32:45 +02:00
|
|
|
ctx.SetPathParam("username", strings.TrimSuffix(username, suffix))
|
2024-02-27 08:12:22 +01:00
|
|
|
context.UserAssignmentWeb()(ctx)
|
2024-03-13 07:57:30 +01:00
|
|
|
if ctx.Written() {
|
|
|
|
return false
|
|
|
|
}
|
|
|
|
|
2023-11-13 23:30:24 +01:00
|
|
|
// check view permissions
|
|
|
|
if !user_model.IsUserVisibleToViewer(ctx, ctx.ContextUser, ctx.Doer) {
|
2024-09-10 04:23:07 +02:00
|
|
|
ctx.NotFound("user", fmt.Errorf("%s", ctx.ContextUser.Name))
|
2023-11-13 23:30:24 +01:00
|
|
|
return false
|
|
|
|
}
|
2024-03-13 07:57:30 +01:00
|
|
|
return true
|
2023-04-27 08:06:45 +02:00
|
|
|
}
|
|
|
|
switch {
|
|
|
|
case strings.HasSuffix(username, ".png"):
|
|
|
|
if reloadParam(".png") {
|
|
|
|
AvatarByUserName(ctx)
|
|
|
|
}
|
|
|
|
case strings.HasSuffix(username, ".keys"):
|
|
|
|
if reloadParam(".keys") {
|
|
|
|
ShowSSHKeys(ctx)
|
|
|
|
}
|
|
|
|
case strings.HasSuffix(username, ".gpg"):
|
|
|
|
if reloadParam(".gpg") {
|
|
|
|
ShowGPGKeys(ctx)
|
|
|
|
}
|
|
|
|
case strings.HasSuffix(username, ".rss"):
|
|
|
|
if !setting.Other.EnableFeed {
|
|
|
|
ctx.Error(http.StatusNotFound)
|
|
|
|
return
|
|
|
|
}
|
|
|
|
if reloadParam(".rss") {
|
|
|
|
feed.ShowUserFeedRSS(ctx)
|
|
|
|
}
|
|
|
|
case strings.HasSuffix(username, ".atom"):
|
|
|
|
if !setting.Other.EnableFeed {
|
|
|
|
ctx.Error(http.StatusNotFound)
|
|
|
|
return
|
|
|
|
}
|
|
|
|
if reloadParam(".atom") {
|
|
|
|
feed.ShowUserFeedAtom(ctx)
|
|
|
|
}
|
|
|
|
default:
|
2024-02-27 08:12:22 +01:00
|
|
|
context.UserAssignmentWeb()(ctx)
|
2023-04-27 08:06:45 +02:00
|
|
|
if !ctx.Written() {
|
|
|
|
ctx.Data["EnableFeed"] = setting.Other.EnableFeed
|
2023-07-06 20:59:24 +02:00
|
|
|
OwnerProfile(ctx)
|
2023-04-27 08:06:45 +02:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
2023-08-23 04:29:17 +02:00
|
|
|
|
2024-12-08 13:44:17 +01:00
|
|
|
func getUserIssueStats(ctx *context.Context, filterMode int, opts *issue_indexer.SearchOptions) (ret *issues_model.IssueStats, err error) {
|
|
|
|
ret = &issues_model.IssueStats{}
|
Include public repos in doer's dashboard for issue search (#28304)
It will fix #28268 .
<img width="1313" alt="image"
src="https://github.com/go-gitea/gitea/assets/9418365/cb1e07d5-7a12-4691-a054-8278ba255bfc">
<img width="1318" alt="image"
src="https://github.com/go-gitea/gitea/assets/9418365/4fd60820-97f1-4c2c-a233-d3671a5039e9">
## :warning: BREAKING :warning:
But need to give up some features:
<img width="1312" alt="image"
src="https://github.com/go-gitea/gitea/assets/9418365/281c0d51-0e7d-473f-bbed-216e2f645610">
However, such abandonment may fix #28055 .
## Backgroud
When the user switches the dashboard context to an org, it means they
want to search issues in the repos that belong to the org. However, when
they switch to themselves, it means all repos they can access because
they may have created an issue in a public repo that they don't own.
<img width="286" alt="image"
src="https://github.com/go-gitea/gitea/assets/9418365/182dcd5b-1c20-4725-93af-96e8dfae5b97">
It's a confusing design. Think about this: What does "In your
repositories" mean when the user switches to an org? Repos belong to the
user or the org?
Whatever, it has been broken by #26012 and its following PRs. After the
PR, it searches for issues in repos that the dashboard context user owns
or has been explicitly granted access to, so it causes #28268.
## How to fix it
It's not really difficult to fix it. Just extend the repo scope to
search issues when the dashboard context user is the doer. Since the
user may create issues or be mentioned in any public repo, we can just
set `AllPublic` to true, which is already supported by indexers. The DB
condition will also support it in this PR.
But the real difficulty is how to count the search results grouped by
repos. It's something like "search issues with this keyword and those
filters, and return the total number and the top results. **Then, group
all of them by repo and return the counts of each group.**"
<img width="314" alt="image"
src="https://github.com/go-gitea/gitea/assets/9418365/5206eb20-f8f5-49b9-b45a-1be2fcf679f4">
Before #26012, it was being done in the DB, but it caused the results to
be incomplete (see the description of #26012).
And to keep this, #26012 implement it in an inefficient way, just count
the issues by repo one by one, so it cannot work when `AllPublic` is
true because it's almost impossible to do this for all public repos.
https://github.com/go-gitea/gitea/blob/1bfcdeef4cca0f5509476358e5931c13d37ed1ca/modules/indexer/issues/indexer.go#L318-L338
## Give up unnecessary features
We may can resovle `TODO: use "group by" of the indexer engines to
implement it`, I'm sure it can be done with Elasticsearch, but IIRC,
Bleve and Meilisearch don't support "group by".
And the real question is, does it worth it? Why should we need to know
the counts grouped by repos?
Let me show you my search dashboard on gitea.com.
<img width="1304" alt="image"
src="https://github.com/go-gitea/gitea/assets/9418365/2bca2d46-6c71-4de1-94cb-0c9af27c62ff">
I never think the long repo list helps anything.
And if we agree to abandon it, things will be much easier. That is this
PR.
## TODO
I know it's important to filter by repos when searching issues. However,
it shouldn't be the way we have it now. It could be implemented like
this.
<img width="1316" alt="image"
src="https://github.com/go-gitea/gitea/assets/9418365/99ee5f21-cbb5-4dfe-914d-cb796cb79fbe">
The indexers support it well now, but it requires some frontend work,
which I'm not good at. So, I think someone could help do that in another
PR and merge this one to fix the bug first.
Or please block this PR and help to complete it.
Finally, "Switch dashboard context" is also a design that needs
improvement. In my opinion, it can be accomplished by adding filtering
conditions instead of "switching".
2023-12-07 06:26:18 +01:00
|
|
|
doerID := ctx.Doer.ID
|
|
|
|
|
2023-08-23 04:29:17 +02:00
|
|
|
{
|
|
|
|
openClosedOpts := opts.Copy()
|
|
|
|
switch filterMode {
|
Include public repos in doer's dashboard for issue search (#28304)
It will fix #28268 .
<img width="1313" alt="image"
src="https://github.com/go-gitea/gitea/assets/9418365/cb1e07d5-7a12-4691-a054-8278ba255bfc">
<img width="1318" alt="image"
src="https://github.com/go-gitea/gitea/assets/9418365/4fd60820-97f1-4c2c-a233-d3671a5039e9">
## :warning: BREAKING :warning:
But need to give up some features:
<img width="1312" alt="image"
src="https://github.com/go-gitea/gitea/assets/9418365/281c0d51-0e7d-473f-bbed-216e2f645610">
However, such abandonment may fix #28055 .
## Backgroud
When the user switches the dashboard context to an org, it means they
want to search issues in the repos that belong to the org. However, when
they switch to themselves, it means all repos they can access because
they may have created an issue in a public repo that they don't own.
<img width="286" alt="image"
src="https://github.com/go-gitea/gitea/assets/9418365/182dcd5b-1c20-4725-93af-96e8dfae5b97">
It's a confusing design. Think about this: What does "In your
repositories" mean when the user switches to an org? Repos belong to the
user or the org?
Whatever, it has been broken by #26012 and its following PRs. After the
PR, it searches for issues in repos that the dashboard context user owns
or has been explicitly granted access to, so it causes #28268.
## How to fix it
It's not really difficult to fix it. Just extend the repo scope to
search issues when the dashboard context user is the doer. Since the
user may create issues or be mentioned in any public repo, we can just
set `AllPublic` to true, which is already supported by indexers. The DB
condition will also support it in this PR.
But the real difficulty is how to count the search results grouped by
repos. It's something like "search issues with this keyword and those
filters, and return the total number and the top results. **Then, group
all of them by repo and return the counts of each group.**"
<img width="314" alt="image"
src="https://github.com/go-gitea/gitea/assets/9418365/5206eb20-f8f5-49b9-b45a-1be2fcf679f4">
Before #26012, it was being done in the DB, but it caused the results to
be incomplete (see the description of #26012).
And to keep this, #26012 implement it in an inefficient way, just count
the issues by repo one by one, so it cannot work when `AllPublic` is
true because it's almost impossible to do this for all public repos.
https://github.com/go-gitea/gitea/blob/1bfcdeef4cca0f5509476358e5931c13d37ed1ca/modules/indexer/issues/indexer.go#L318-L338
## Give up unnecessary features
We may can resovle `TODO: use "group by" of the indexer engines to
implement it`, I'm sure it can be done with Elasticsearch, but IIRC,
Bleve and Meilisearch don't support "group by".
And the real question is, does it worth it? Why should we need to know
the counts grouped by repos?
Let me show you my search dashboard on gitea.com.
<img width="1304" alt="image"
src="https://github.com/go-gitea/gitea/assets/9418365/2bca2d46-6c71-4de1-94cb-0c9af27c62ff">
I never think the long repo list helps anything.
And if we agree to abandon it, things will be much easier. That is this
PR.
## TODO
I know it's important to filter by repos when searching issues. However,
it shouldn't be the way we have it now. It could be implemented like
this.
<img width="1316" alt="image"
src="https://github.com/go-gitea/gitea/assets/9418365/99ee5f21-cbb5-4dfe-914d-cb796cb79fbe">
The indexers support it well now, but it requires some frontend work,
which I'm not good at. So, I think someone could help do that in another
PR and merge this one to fix the bug first.
Or please block this PR and help to complete it.
Finally, "Switch dashboard context" is also a design that needs
improvement. In my opinion, it can be accomplished by adding filtering
conditions instead of "switching".
2023-12-07 06:26:18 +01:00
|
|
|
case issues_model.FilterModeAll:
|
|
|
|
// no-op
|
|
|
|
case issues_model.FilterModeYourRepositories:
|
|
|
|
openClosedOpts.AllPublic = false
|
2023-08-23 04:29:17 +02:00
|
|
|
case issues_model.FilterModeAssign:
|
2024-03-13 09:25:53 +01:00
|
|
|
openClosedOpts.AssigneeID = optional.Some(doerID)
|
2023-08-23 04:29:17 +02:00
|
|
|
case issues_model.FilterModeCreate:
|
2024-03-13 09:25:53 +01:00
|
|
|
openClosedOpts.PosterID = optional.Some(doerID)
|
2023-08-23 04:29:17 +02:00
|
|
|
case issues_model.FilterModeMention:
|
2024-03-13 09:25:53 +01:00
|
|
|
openClosedOpts.MentionID = optional.Some(doerID)
|
2023-08-23 04:29:17 +02:00
|
|
|
case issues_model.FilterModeReviewRequested:
|
2024-03-13 09:25:53 +01:00
|
|
|
openClosedOpts.ReviewRequestedID = optional.Some(doerID)
|
2023-08-23 04:29:17 +02:00
|
|
|
case issues_model.FilterModeReviewed:
|
2024-03-13 09:25:53 +01:00
|
|
|
openClosedOpts.ReviewedID = optional.Some(doerID)
|
2023-08-23 04:29:17 +02:00
|
|
|
}
|
2024-03-02 16:42:31 +01:00
|
|
|
openClosedOpts.IsClosed = optional.Some(false)
|
2023-08-23 04:29:17 +02:00
|
|
|
ret.OpenCount, err = issue_indexer.CountIssues(ctx, openClosedOpts)
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
2024-03-02 16:42:31 +01:00
|
|
|
openClosedOpts.IsClosed = optional.Some(true)
|
2023-08-23 04:29:17 +02:00
|
|
|
ret.ClosedCount, err = issue_indexer.CountIssues(ctx, openClosedOpts)
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
Include public repos in doer's dashboard for issue search (#28304)
It will fix #28268 .
<img width="1313" alt="image"
src="https://github.com/go-gitea/gitea/assets/9418365/cb1e07d5-7a12-4691-a054-8278ba255bfc">
<img width="1318" alt="image"
src="https://github.com/go-gitea/gitea/assets/9418365/4fd60820-97f1-4c2c-a233-d3671a5039e9">
## :warning: BREAKING :warning:
But need to give up some features:
<img width="1312" alt="image"
src="https://github.com/go-gitea/gitea/assets/9418365/281c0d51-0e7d-473f-bbed-216e2f645610">
However, such abandonment may fix #28055 .
## Backgroud
When the user switches the dashboard context to an org, it means they
want to search issues in the repos that belong to the org. However, when
they switch to themselves, it means all repos they can access because
they may have created an issue in a public repo that they don't own.
<img width="286" alt="image"
src="https://github.com/go-gitea/gitea/assets/9418365/182dcd5b-1c20-4725-93af-96e8dfae5b97">
It's a confusing design. Think about this: What does "In your
repositories" mean when the user switches to an org? Repos belong to the
user or the org?
Whatever, it has been broken by #26012 and its following PRs. After the
PR, it searches for issues in repos that the dashboard context user owns
or has been explicitly granted access to, so it causes #28268.
## How to fix it
It's not really difficult to fix it. Just extend the repo scope to
search issues when the dashboard context user is the doer. Since the
user may create issues or be mentioned in any public repo, we can just
set `AllPublic` to true, which is already supported by indexers. The DB
condition will also support it in this PR.
But the real difficulty is how to count the search results grouped by
repos. It's something like "search issues with this keyword and those
filters, and return the total number and the top results. **Then, group
all of them by repo and return the counts of each group.**"
<img width="314" alt="image"
src="https://github.com/go-gitea/gitea/assets/9418365/5206eb20-f8f5-49b9-b45a-1be2fcf679f4">
Before #26012, it was being done in the DB, but it caused the results to
be incomplete (see the description of #26012).
And to keep this, #26012 implement it in an inefficient way, just count
the issues by repo one by one, so it cannot work when `AllPublic` is
true because it's almost impossible to do this for all public repos.
https://github.com/go-gitea/gitea/blob/1bfcdeef4cca0f5509476358e5931c13d37ed1ca/modules/indexer/issues/indexer.go#L318-L338
## Give up unnecessary features
We may can resovle `TODO: use "group by" of the indexer engines to
implement it`, I'm sure it can be done with Elasticsearch, but IIRC,
Bleve and Meilisearch don't support "group by".
And the real question is, does it worth it? Why should we need to know
the counts grouped by repos?
Let me show you my search dashboard on gitea.com.
<img width="1304" alt="image"
src="https://github.com/go-gitea/gitea/assets/9418365/2bca2d46-6c71-4de1-94cb-0c9af27c62ff">
I never think the long repo list helps anything.
And if we agree to abandon it, things will be much easier. That is this
PR.
## TODO
I know it's important to filter by repos when searching issues. However,
it shouldn't be the way we have it now. It could be implemented like
this.
<img width="1316" alt="image"
src="https://github.com/go-gitea/gitea/assets/9418365/99ee5f21-cbb5-4dfe-914d-cb796cb79fbe">
The indexers support it well now, but it requires some frontend work,
which I'm not good at. So, I think someone could help do that in another
PR and merge this one to fix the bug first.
Or please block this PR and help to complete it.
Finally, "Switch dashboard context" is also a design that needs
improvement. In my opinion, it can be accomplished by adding filtering
conditions instead of "switching".
2023-12-07 06:26:18 +01:00
|
|
|
ret.YourRepositoriesCount, err = issue_indexer.CountIssues(ctx, opts.Copy(func(o *issue_indexer.SearchOptions) { o.AllPublic = false }))
|
2023-08-23 04:29:17 +02:00
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
2024-03-13 09:25:53 +01:00
|
|
|
ret.AssignCount, err = issue_indexer.CountIssues(ctx, opts.Copy(func(o *issue_indexer.SearchOptions) { o.AssigneeID = optional.Some(doerID) }))
|
2023-08-23 04:29:17 +02:00
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
2024-03-13 09:25:53 +01:00
|
|
|
ret.CreateCount, err = issue_indexer.CountIssues(ctx, opts.Copy(func(o *issue_indexer.SearchOptions) { o.PosterID = optional.Some(doerID) }))
|
2023-08-23 04:29:17 +02:00
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
2024-03-13 09:25:53 +01:00
|
|
|
ret.MentionCount, err = issue_indexer.CountIssues(ctx, opts.Copy(func(o *issue_indexer.SearchOptions) { o.MentionID = optional.Some(doerID) }))
|
2023-08-23 04:29:17 +02:00
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
2024-03-13 09:25:53 +01:00
|
|
|
ret.ReviewRequestedCount, err = issue_indexer.CountIssues(ctx, opts.Copy(func(o *issue_indexer.SearchOptions) { o.ReviewRequestedID = optional.Some(doerID) }))
|
2023-08-23 04:29:17 +02:00
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
2024-03-13 09:25:53 +01:00
|
|
|
ret.ReviewedCount, err = issue_indexer.CountIssues(ctx, opts.Copy(func(o *issue_indexer.SearchOptions) { o.ReviewedID = optional.Some(doerID) }))
|
2023-08-23 04:29:17 +02:00
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
return ret, nil
|
|
|
|
}
|