#5125 fix-5056 优化项目统计时间

Merged
zouap merged 2 commits from fix-5056 into V20240129 3 months ago
  1. +1
    -1
      go.mod
  2. +2
    -2
      go.sum
  3. +222
    -174
      routers/repo/repo_statistic.go
  4. +19
    -0
      vendor/github.com/Jeffail/tunny/LICENSE
  5. +134
    -0
      vendor/github.com/Jeffail/tunny/README.md
  6. +309
    -0
      vendor/github.com/Jeffail/tunny/tunny.go
  7. BIN
      vendor/github.com/Jeffail/tunny/tunny_logo.png
  8. +126
    -0
      vendor/github.com/Jeffail/tunny/worker.go
  9. +0
    -27
      vendor/golang.org/x/exp/LICENSE
  10. +0
    -22
      vendor/golang.org/x/exp/PATENTS
  11. +0
    -50
      vendor/golang.org/x/exp/constraints/constraints.go
  12. +0
    -44
      vendor/golang.org/x/exp/slices/cmp.go
  13. +0
    -499
      vendor/golang.org/x/exp/slices/slices.go
  14. +0
    -195
      vendor/golang.org/x/exp/slices/sort.go
  15. +0
    -479
      vendor/golang.org/x/exp/slices/zsortanyfunc.go
  16. +0
    -481
      vendor/golang.org/x/exp/slices/zsortordered.go
  17. +3
    -2
      vendor/modules.txt

+ 1
- 1
go.mod View File

@@ -18,6 +18,7 @@ require (
gitea.com/macaron/toolbox v0.0.0-20190822013122-05ff0fc766b7
github.com/360EntSecGroup-Skylar/excelize/v2 v2.0.2
github.com/BurntSushi/toml v0.3.1
github.com/Jeffail/tunny v0.1.4
github.com/PuerkitoBio/goquery v1.5.0
github.com/RichardKnop/machinery v1.6.9
github.com/alecthomas/chroma v0.10.0
@@ -97,7 +98,6 @@ require (
github.com/yuin/goldmark-highlighting v0.0.0-20220208100518-594be1970594
github.com/yuin/goldmark-meta v1.1.0
golang.org/x/crypto v0.16.0
golang.org/x/exp v0.0.0-20231127185646-65229373498e
golang.org/x/net v0.19.0
golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d
golang.org/x/sys v0.15.0


+ 2
- 2
go.sum View File

@@ -54,6 +54,8 @@ github.com/360EntSecGroup-Skylar/excelize/v2 v2.0.2/go.mod h1:EfRHD2k+Kd7ijnqlwO
github.com/BurntSushi/toml v0.3.1 h1:WXkYYl6Yr3qBf1K79EBnL4mak0OimBfB0XUf9Vl28OQ=
github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU=
github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo=
github.com/Jeffail/tunny v0.1.4 h1:chtpdz+nUtaYQeCKlNBg6GycFF/kGVHOr6A3cmzTJXs=
github.com/Jeffail/tunny v0.1.4/go.mod h1:P8xAx4XQl0xsuhjX1DtfaMDCSuavzdb2rwbd0lk+fvo=
github.com/Microsoft/go-winio v0.5.2 h1:a9IhgEQBCUEk6QCdml9CiJGhAws+YwffDHEMp1VMrpA=
github.com/Microsoft/go-winio v0.5.2/go.mod h1:WpS1mjBmmwHBEWmogvA2mj8546UReBk4v8QkMxJ6pZY=
github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU=
@@ -858,8 +860,6 @@ golang.org/x/crypto v0.16.0 h1:mMMrFzRSCF0GvB7Ne27XVtVAaXLrPmgPC7/v0tkwHaY=
golang.org/x/crypto v0.16.0/go.mod h1:gCAAfMLgwOJRpTjQ2zCCt2OcSfYMTeZVSRtQlPC7Nq4=
golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8=
golang.org/x/exp v0.0.0-20231127185646-65229373498e h1:Gvh4YaCaXNs6dKTlfgismwWZKyjVZXwOPfIyUaqU3No=
golang.org/x/exp v0.0.0-20231127185646-65229373498e/go.mod h1:iRJReGqOEeBhDZGkGbynYwcHlctCvnjTYIamk7uXpHI=
golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js=
golang.org/x/image v0.0.0-20190910094157-69e4b8554b2a/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0=
golang.org/x/image v0.0.0-20191009234506-e7c1f5e7dbb8 h1:hVwzHzIUGRjiF7EcUjqNxk3NCfkPxbDKRdnNE1Rpg0U=


+ 222
- 174
routers/repo/repo_statistic.go View File

@@ -2,6 +2,7 @@ package repo

import (
"errors"
"sync"
"time"

"code.gitea.io/gitea/models"
@@ -12,8 +13,23 @@ import (
"code.gitea.io/gitea/services/mailer"

"gitea.com/macaron/macaron"
"github.com/Jeffail/tunny"
)

var statisticMutex sync.Mutex
var statisticWg sync.WaitGroup

type StatisticInput struct {
Date string
Repo *models.Repository
T time.Time
ErrorProjects []string
ReposRadar []*models.RepoStatistic
IsInitMinMaxRadar bool
MinRepoRadar models.RepoStatistic
MaxRepoRadar models.RepoStatistic
}

func StatisticAuto() {
go RepoStatisticAuto()
go TimingCountData()
@@ -52,136 +68,196 @@ func RepoStatisticDaily(date string) {
isInitMinMaxRadar := false

var error_projects = make([]string, 0)
for _, repo := range repos {
projectName := getDistinctProjectName(repo)
log.Info("start statistic: %s", projectName)
var numDevMonths, numWikiViews, numContributor, numKeyContributor, numCommitsGrowth, numCommitLinesGrowth, numContributorsGrowth, numCommits int64
repoGitStat, err := models.GetRepoKPIStats(repo)
if err != nil {
log.Error("GetRepoKPIStats failed: %s", projectName)
} else {
numDevMonths = repoGitStat.DevelopAge
numKeyContributor = repoGitStat.KeyContributors
numWikiViews = repoGitStat.WikiPages
numContributor = repoGitStat.Contributors
numCommitsGrowth = repoGitStat.CommitsAdded
numCommitLinesGrowth = repoGitStat.CommitLinesModified
numContributorsGrowth = repoGitStat.ContributorsAdded
numCommits = repoGitStat.Commits

pool := tunny.NewFunc(4, statisticOneRepo)
defer pool.Close()
statisticWg.Add(len(repos))
for _, repo := range repos {
input := &StatisticInput{
Date: date,
Repo: repo,
T: t,
ErrorProjects: error_projects,
ReposRadar: reposRadar,
IsInitMinMaxRadar: isInitMinMaxRadar,
MinRepoRadar: minRepoRadar,
MaxRepoRadar: maxRepoRadar,
}
pool.Process(input)
}
statisticWg.Wait()

var issueFixedRate float32
if repo.NumIssues != 0 {
issueFixedRate = float32(repo.NumClosedIssues) / float32(repo.NumIssues)
if len(error_projects) > 0 {
mailer.SendWarnNotifyMail(setting.Warn_Notify_Mails, warnEmailMessage)
}

//radar map
log.Info("begin statistic radar")
for _, radarInit := range reposRadar {
if radarInit.IsMirror && setting.RadarMap.IgnoreMirrorRepo {
radarInit.Impact = 0
radarInit.Completeness = 0
radarInit.Liveness = 0
radarInit.ProjectHealth = 0
radarInit.TeamHealth = 0
radarInit.Growth = 0
radarInit.RadarTotal = 0
} else {
issueFixedRate = float32(setting.RadarMap.ProjectHealth0IssueCloseRatio)
radarInit.Impact = normalization.Normalization(radarInit.Impact, minRepoRadar.Impact, maxRepoRadar.Impact)
radarInit.Completeness = normalization.Normalization(radarInit.Completeness, minRepoRadar.Completeness, maxRepoRadar.Completeness)
radarInit.Liveness = normalization.Normalization(radarInit.Liveness, minRepoRadar.Liveness, maxRepoRadar.Liveness)
radarInit.ProjectHealth = normalization.Normalization(radarInit.ProjectHealth, minRepoRadar.ProjectHealth, maxRepoRadar.ProjectHealth)
radarInit.TeamHealth = normalization.Normalization(radarInit.TeamHealth, minRepoRadar.TeamHealth, maxRepoRadar.TeamHealth)
radarInit.Growth = normalization.Normalization(radarInit.Growth, minRepoRadar.Growth, maxRepoRadar.Growth)
radarInit.RadarTotal = normalization.GetRadarValue(radarInit.Impact, radarInit.Completeness, radarInit.Liveness, radarInit.ProjectHealth, radarInit.TeamHealth, radarInit.Growth)
}
models.UpdateRepoStat(radarInit)
}

var numVersions int64
numVersions, err = models.GetReleaseCountByRepoID(repo.ID, models.FindReleasesOptions{})
if err != nil {
log.Error("GetReleaseCountByRepoID failed(%s): %v", projectName, err)
}
log.Info("finish statistic: radar")

var datasetSize int64
datasetSize, err = getDatasetSize(repo)
if err != nil {
log.Error("getDatasetSize failed(%s): %v", projectName, err)
}
}

var numComments int64
numComments, err = models.GetCommentCountByRepoID(repo.ID)
if err != nil {
log.Error("GetCommentCountByRepoID failed(%s): %v", projectName, err)
}
func statisticOneRepo(input interface{}) interface{} {
defer statisticWg.Done()

beginTime, endTime := getStatTime(date)
var numVisits int
numVisits, err = repository.AppointProjectView(repo.OwnerName, repo.Name, beginTime, endTime)
if err != nil {
log.Error("AppointProjectView failed(%s): %v", projectName, err)
}
statisticInput := input.(*StatisticInput)
repo := statisticInput.Repo
projectName := getDistinctProjectName(repo)
log.Info("start statistic: %s", projectName)
var numDevMonths, numWikiViews, numContributor, numKeyContributor, numCommitsGrowth, numCommitLinesGrowth, numContributorsGrowth, numCommits int64
repoGitStat, err := models.GetRepoKPIStats(repo)
if err != nil {
log.Error("GetRepoKPIStats failed: %s", projectName)
} else {
numDevMonths = repoGitStat.DevelopAge
numKeyContributor = repoGitStat.KeyContributors
numWikiViews = repoGitStat.WikiPages
numContributor = repoGitStat.Contributors
numCommitsGrowth = repoGitStat.CommitsAdded
numCommitLinesGrowth = repoGitStat.CommitLinesModified
numContributorsGrowth = repoGitStat.ContributorsAdded
numCommits = repoGitStat.Commits

repoStat := models.RepoStatistic{
RepoID: repo.ID,
Date: date,
Name: repo.Name,
Alias: repo.Alias,
IsPrivate: repo.IsPrivate,
IsMirror: repo.IsMirror,
IsFork: repo.IsFork,
RepoCreatedUnix: repo.CreatedUnix,
OwnerName: repo.OwnerName,
NumWatches: int64(repo.NumWatches),
NumStars: int64(repo.NumStars),
NumForks: int64(repo.NumForks),
NumDownloads: repo.CloneCnt,
NumComments: numComments,
NumVisits: int64(numVisits),
NumClosedIssues: int64(repo.NumClosedIssues),
NumVersions: numVersions,
NumDevMonths: numDevMonths,
RepoSize: repo.Size,
DatasetSize: datasetSize,
NumModels: repo.ModelCnt,
NumWikiViews: numWikiViews,
NumCommits: numCommits,
NumIssues: int64(repo.NumIssues),
NumPulls: int64(repo.NumPulls),
IssueFixedRate: issueFixedRate,
NumContributor: numContributor,
NumKeyContributor: numKeyContributor,
NumCommitsGrowth: numCommitsGrowth,
NumCommitLinesGrowth: numCommitLinesGrowth,
NumContributorsGrowth: numContributorsGrowth,
NumCloudbrain: repo.AiTaskCnt,
NumDatasetFile: repo.DatasetCnt,
NumModelConvert: models.QueryModelConvertCountByRepoID(repo.ID),
}
}

dayBeforeDate := t.AddDate(0, 0, -1).Format("2006-01-02")
repoStatisticsBefore, err := models.GetRepoStatisticByDate(dayBeforeDate, repo.ID)
var issueFixedRate float32
if repo.NumIssues != 0 {
issueFixedRate = float32(repo.NumClosedIssues) / float32(repo.NumIssues)
} else {
issueFixedRate = float32(setting.RadarMap.ProjectHealth0IssueCloseRatio)
}

if err != nil {
log.Error("get data of day before the date failed ", err)
} else {
if len(repoStatisticsBefore) > 0 {
repoStatisticBefore := repoStatisticsBefore[0]
repoStat.NumWatchesAdded = repoStat.NumWatches - repoStatisticBefore.NumWatches
repoStat.NumStarsAdded = repoStat.NumStars - repoStatisticBefore.NumStars
repoStat.NumForksAdded = repoStat.NumForks - repoStatisticBefore.NumForks
repoStat.NumDownloadsAdded = repoStat.NumDownloads - repoStatisticBefore.NumDownloads
repoStat.NumCommentsAdded = repoStat.NumComments - repoStatisticBefore.NumComments
repoStat.NumClosedIssuesAdded = repoStat.NumClosedIssues - repoStatisticBefore.NumClosedIssues
repoStat.NumCommitsAdded = repoStat.NumCommits - repoStatisticBefore.NumCommits
repoStat.NumIssuesAdded = repoStat.NumIssues - repoStatisticBefore.NumIssues
repoStat.NumPullsAdded = repoStat.NumPulls - repoStatisticBefore.NumPulls
repoStat.NumContributorAdded = repoStat.NumContributor - repoStatisticBefore.NumContributor
repoStat.NumModelsAdded = repoStat.NumModels - repoStatisticBefore.NumModels
repoStat.NumCloudbrainAdded = repoStat.NumCloudbrain - repoStatisticBefore.NumCloudbrain
repoStat.NumModelConvertAdded = repoStat.NumModelConvert - repoStatisticBefore.NumModelConvert
repoStat.NumDatasetFileAdded = repoStat.NumDatasetFile - repoStatisticBefore.NumDatasetFile
}
}
day4MonthsAgo := t.AddDate(0, -4, 0)
repoStatisticFourMonthsAgo, err := models.GetOneRepoStatisticBeforeTime(day4MonthsAgo)
if err != nil {
log.Error("Get data of 4 moth ago failed.", err)
} else {
repoStat.NumCommentsGrowth = repoStat.NumComments - repoStatisticFourMonthsAgo.NumComments
repoStat.NumIssuesGrowth = repoStat.NumIssues - repoStatisticFourMonthsAgo.NumIssues
}
var numVersions int64
numVersions, err = models.GetReleaseCountByRepoID(repo.ID, models.FindReleasesOptions{})
if err != nil {
log.Error("GetReleaseCountByRepoID failed(%s): %v", projectName, err)
}

var datasetSize int64
datasetSize, err = getDatasetSize(repo)
if err != nil {
log.Error("getDatasetSize failed(%s): %v", projectName, err)
}

models.SyncStatDataToRepo(repo)
var numComments int64
numComments, err = models.GetCommentCountByRepoID(repo.ID)
if err != nil {
log.Error("GetCommentCountByRepoID failed(%s): %v", projectName, err)
}

if _, err = models.InsertRepoStat(&repoStat); err != nil {
log.Error("InsertRepoStat failed(%s): %v", projectName, err)
log.Error("failed statistic: %s", projectName)
error_projects = append(error_projects, projectName)
beginTime, endTime := getStatTime(statisticInput.Date)
var numVisits int
numVisits, err = repository.AppointProjectView(repo.OwnerName, repo.Name, beginTime, endTime)
if err != nil {
log.Error("AppointProjectView failed(%s): %v", projectName, err)
}

continue
repoStat := models.RepoStatistic{
RepoID: repo.ID,
Date: statisticInput.Date,
Name: repo.Name,
Alias: repo.Alias,
IsPrivate: repo.IsPrivate,
IsMirror: repo.IsMirror,
IsFork: repo.IsFork,
RepoCreatedUnix: repo.CreatedUnix,
OwnerName: repo.OwnerName,
NumWatches: int64(repo.NumWatches),
NumStars: int64(repo.NumStars),
NumForks: int64(repo.NumForks),
NumDownloads: repo.CloneCnt,
NumComments: numComments,
NumVisits: int64(numVisits),
NumClosedIssues: int64(repo.NumClosedIssues),
NumVersions: numVersions,
NumDevMonths: numDevMonths,
RepoSize: repo.Size,
DatasetSize: datasetSize,
NumModels: repo.ModelCnt,
NumWikiViews: numWikiViews,
NumCommits: numCommits,
NumIssues: int64(repo.NumIssues),
NumPulls: int64(repo.NumPulls),
IssueFixedRate: issueFixedRate,
NumContributor: numContributor,
NumKeyContributor: numKeyContributor,
NumCommitsGrowth: numCommitsGrowth,
NumCommitLinesGrowth: numCommitLinesGrowth,
NumContributorsGrowth: numContributorsGrowth,
NumCloudbrain: repo.AiTaskCnt,
NumDatasetFile: repo.DatasetCnt,
NumModelConvert: models.QueryModelConvertCountByRepoID(repo.ID),
}

dayBeforeDate := statisticInput.T.AddDate(0, 0, -1).Format("2006-01-02")
repoStatisticsBefore, err := models.GetRepoStatisticByDate(dayBeforeDate, repo.ID)

if err != nil {
log.Error("get data of day before the date failed ", err)
} else {
if len(repoStatisticsBefore) > 0 {
repoStatisticBefore := repoStatisticsBefore[0]
repoStat.NumWatchesAdded = repoStat.NumWatches - repoStatisticBefore.NumWatches
repoStat.NumStarsAdded = repoStat.NumStars - repoStatisticBefore.NumStars
repoStat.NumForksAdded = repoStat.NumForks - repoStatisticBefore.NumForks
repoStat.NumDownloadsAdded = repoStat.NumDownloads - repoStatisticBefore.NumDownloads
repoStat.NumCommentsAdded = repoStat.NumComments - repoStatisticBefore.NumComments
repoStat.NumClosedIssuesAdded = repoStat.NumClosedIssues - repoStatisticBefore.NumClosedIssues
repoStat.NumCommitsAdded = repoStat.NumCommits - repoStatisticBefore.NumCommits
repoStat.NumIssuesAdded = repoStat.NumIssues - repoStatisticBefore.NumIssues
repoStat.NumPullsAdded = repoStat.NumPulls - repoStatisticBefore.NumPulls
repoStat.NumContributorAdded = repoStat.NumContributor - repoStatisticBefore.NumContributor
repoStat.NumModelsAdded = repoStat.NumModels - repoStatisticBefore.NumModels
repoStat.NumCloudbrainAdded = repoStat.NumCloudbrain - repoStatisticBefore.NumCloudbrain
repoStat.NumModelConvertAdded = repoStat.NumModelConvert - repoStatisticBefore.NumModelConvert
repoStat.NumDatasetFileAdded = repoStat.NumDatasetFile - repoStatisticBefore.NumDatasetFile
}
}
day4MonthsAgo := statisticInput.T.AddDate(0, -4, 0)
repoStatisticFourMonthsAgo, err := models.GetOneRepoStatisticBeforeTime(day4MonthsAgo)
if err != nil {
log.Error("Get data of 4 moth ago failed.", err)
} else {
repoStat.NumCommentsGrowth = repoStat.NumComments - repoStatisticFourMonthsAgo.NumComments
repoStat.NumIssuesGrowth = repoStat.NumIssues - repoStatisticFourMonthsAgo.NumIssues
}

models.SyncStatDataToRepo(repo)

if _, err = models.InsertRepoStat(&repoStat); err != nil {
log.Error("InsertRepoStat failed(%s): %v", projectName, err)
log.Error("failed statistic: %s", projectName)
statisticMutex.Lock()
{
statisticInput.ErrorProjects = append(statisticInput.ErrorProjects, projectName)
}
statisticMutex.Unlock()
return nil
}

statisticMutex.Lock()
{

tempRepoStat := models.RepoStatistic{
RepoID: repoStat.RepoID,
@@ -195,102 +271,74 @@ func RepoStatisticDaily(date string) {
Growth: normalization.GetRepoGrowthInitValue(repoStat.NumCommitLinesGrowth, repoStat.NumIssuesGrowth, repoStat.NumCommitsGrowth, repoStat.NumContributorsGrowth, repoStat.NumCommentsGrowth),
}

reposRadar = append(reposRadar, &tempRepoStat)
statisticInput.ReposRadar = append(statisticInput.ReposRadar, &tempRepoStat)

if !isInitMinMaxRadar {
if !statisticInput.IsInitMinMaxRadar {

if !setting.RadarMap.IgnoreMirrorRepo || (setting.RadarMap.IgnoreMirrorRepo && !tempRepoStat.IsMirror) {
minRepoRadar = tempRepoStat
maxRepoRadar = tempRepoStat
isInitMinMaxRadar = true
statisticInput.MinRepoRadar = tempRepoStat
statisticInput.MaxRepoRadar = tempRepoStat
statisticInput.IsInitMinMaxRadar = true
}

} else {
if !setting.RadarMap.IgnoreMirrorRepo || (setting.RadarMap.IgnoreMirrorRepo && !tempRepoStat.IsMirror) {
if tempRepoStat.Impact < minRepoRadar.Impact {
minRepoRadar.Impact = tempRepoStat.Impact
if tempRepoStat.Impact < statisticInput.MinRepoRadar.Impact {
statisticInput.MinRepoRadar.Impact = tempRepoStat.Impact
}

if tempRepoStat.Impact > maxRepoRadar.Impact {
maxRepoRadar.Impact = tempRepoStat.Impact
if tempRepoStat.Impact > statisticInput.MaxRepoRadar.Impact {
statisticInput.MaxRepoRadar.Impact = tempRepoStat.Impact
}

if tempRepoStat.Completeness < minRepoRadar.Completeness {
minRepoRadar.Completeness = tempRepoStat.Completeness
if tempRepoStat.Completeness < statisticInput.MinRepoRadar.Completeness {
statisticInput.MinRepoRadar.Completeness = tempRepoStat.Completeness
}

if tempRepoStat.Completeness > maxRepoRadar.Completeness {
maxRepoRadar.Completeness = tempRepoStat.Completeness
if tempRepoStat.Completeness > statisticInput.MaxRepoRadar.Completeness {
statisticInput.MaxRepoRadar.Completeness = tempRepoStat.Completeness
}

if tempRepoStat.Liveness < minRepoRadar.Liveness {
minRepoRadar.Liveness = tempRepoStat.Liveness
if tempRepoStat.Liveness < statisticInput.MinRepoRadar.Liveness {
statisticInput.MinRepoRadar.Liveness = tempRepoStat.Liveness
}

if tempRepoStat.Liveness > maxRepoRadar.Liveness {
maxRepoRadar.Liveness = tempRepoStat.Liveness
if tempRepoStat.Liveness > statisticInput.MaxRepoRadar.Liveness {
statisticInput.MaxRepoRadar.Liveness = tempRepoStat.Liveness
}

if tempRepoStat.ProjectHealth < minRepoRadar.ProjectHealth {
minRepoRadar.ProjectHealth = tempRepoStat.ProjectHealth
if tempRepoStat.ProjectHealth < statisticInput.MinRepoRadar.ProjectHealth {
statisticInput.MinRepoRadar.ProjectHealth = tempRepoStat.ProjectHealth
}

if tempRepoStat.ProjectHealth > maxRepoRadar.ProjectHealth {
maxRepoRadar.ProjectHealth = tempRepoStat.ProjectHealth
if tempRepoStat.ProjectHealth > statisticInput.MaxRepoRadar.ProjectHealth {
statisticInput.MaxRepoRadar.ProjectHealth = tempRepoStat.ProjectHealth
}

if tempRepoStat.TeamHealth < minRepoRadar.TeamHealth {
minRepoRadar.TeamHealth = tempRepoStat.TeamHealth
if tempRepoStat.TeamHealth < statisticInput.MinRepoRadar.TeamHealth {
statisticInput.MinRepoRadar.TeamHealth = tempRepoStat.TeamHealth
}

if tempRepoStat.TeamHealth > maxRepoRadar.TeamHealth {
maxRepoRadar.TeamHealth = tempRepoStat.TeamHealth
if tempRepoStat.TeamHealth > statisticInput.MaxRepoRadar.TeamHealth {
statisticInput.MaxRepoRadar.TeamHealth = tempRepoStat.TeamHealth
}

if tempRepoStat.Growth < minRepoRadar.Growth {
minRepoRadar.Growth = tempRepoStat.Growth
if tempRepoStat.Growth < statisticInput.MinRepoRadar.Growth {
statisticInput.MinRepoRadar.Growth = tempRepoStat.Growth
}

if tempRepoStat.Growth > maxRepoRadar.Growth {
maxRepoRadar.Growth = tempRepoStat.Growth
if tempRepoStat.Growth > statisticInput.MaxRepoRadar.Growth {
statisticInput.MaxRepoRadar.Growth = tempRepoStat.Growth
}

}

}

log.Info("finish statistic: %s", getDistinctProjectName(repo))
}
statisticMutex.Unlock()

if len(error_projects) > 0 {
mailer.SendWarnNotifyMail(setting.Warn_Notify_Mails, warnEmailMessage)
}

//radar map
log.Info("begin statistic radar")
for _, radarInit := range reposRadar {
if radarInit.IsMirror && setting.RadarMap.IgnoreMirrorRepo {
radarInit.Impact = 0
radarInit.Completeness = 0
radarInit.Liveness = 0
radarInit.ProjectHealth = 0
radarInit.TeamHealth = 0
radarInit.Growth = 0
radarInit.RadarTotal = 0
} else {
radarInit.Impact = normalization.Normalization(radarInit.Impact, minRepoRadar.Impact, maxRepoRadar.Impact)
radarInit.Completeness = normalization.Normalization(radarInit.Completeness, minRepoRadar.Completeness, maxRepoRadar.Completeness)
radarInit.Liveness = normalization.Normalization(radarInit.Liveness, minRepoRadar.Liveness, maxRepoRadar.Liveness)
radarInit.ProjectHealth = normalization.Normalization(radarInit.ProjectHealth, minRepoRadar.ProjectHealth, maxRepoRadar.ProjectHealth)
radarInit.TeamHealth = normalization.Normalization(radarInit.TeamHealth, minRepoRadar.TeamHealth, maxRepoRadar.TeamHealth)
radarInit.Growth = normalization.Normalization(radarInit.Growth, minRepoRadar.Growth, maxRepoRadar.Growth)
radarInit.RadarTotal = normalization.GetRadarValue(radarInit.Impact, radarInit.Completeness, radarInit.Liveness, radarInit.ProjectHealth, radarInit.TeamHealth, radarInit.Growth)
}
models.UpdateRepoStat(radarInit)
}

log.Info("finish statistic: radar")

log.Info("finish statistic: %s", getDistinctProjectName(repo))
return nil
}

func getDistinctProjectName(repo *models.Repository) string {


+ 19
- 0
vendor/github.com/Jeffail/tunny/LICENSE View File

@@ -0,0 +1,19 @@
Copyright (c) 2014 Ashley Jeffs

Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:

The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.

THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.

+ 134
- 0
vendor/github.com/Jeffail/tunny/README.md View File

@@ -0,0 +1,134 @@
![Tunny](tunny_logo.png "Tunny")

[![godoc for Jeffail/tunny][1]][2]
[![goreportcard for Jeffail/tunny][3]][4]

Tunny is a Golang library for spawning and managing a goroutine pool, allowing
you to limit work coming from any number of goroutines with a synchronous API.

A fixed goroutine pool is helpful when you have work coming from an arbitrary
number of asynchronous sources, but a limited capacity for parallel processing.
For example, when processing jobs from HTTP requests that are CPU heavy you can
create a pool with a size that matches your CPU count.

## Install

``` sh
go get github.com/Jeffail/tunny
```

Or, using dep:

``` sh
dep ensure -add github.com/Jeffail/tunny
```

## Use

For most cases your heavy work can be expressed in a simple `func()`, where you
can use `NewFunc`. Let's see how this looks using our HTTP requests to CPU count
example:

``` go
package main

import (
"io/ioutil"
"net/http"
"runtime"

"github.com/Jeffail/tunny"
)

func main() {
numCPUs := runtime.NumCPU()

pool := tunny.NewFunc(numCPUs, func(payload interface{}) interface{} {
var result []byte

// TODO: Something CPU heavy with payload

return result
})
defer pool.Close()

http.HandleFunc("/work", func(w http.ResponseWriter, r *http.Request) {
input, err := ioutil.ReadAll(r.Body)
if err != nil {
http.Error(w, "Internal error", http.StatusInternalServerError)
}
defer r.Body.Close()

// Funnel this work into our pool. This call is synchronous and will
// block until the job is completed.
result := pool.Process(input)

w.Write(result.([]byte))
})

http.ListenAndServe(":8080", nil)
}
```

Tunny also supports timeouts. You can replace the `Process` call above to the
following:

``` go
result, err := pool.ProcessTimed(input, time.Second*5)
if err == tunny.ErrJobTimedOut {
http.Error(w, "Request timed out", http.StatusRequestTimeout)
}
```

You can also use the context from the request (or any other context) to handle timeouts and deadlines. Simply replace the `Process` call to the following:

``` go
result, err := pool.ProcessCtx(r.Context(), input)
if err == context.DeadlineExceeded {
http.Error(w, "Request timed out", http.StatusRequestTimeout)
}
```

## Changing Pool Size

The size of a Tunny pool can be changed at any time with `SetSize(int)`:

``` go
pool.SetSize(10) // 10 goroutines
pool.SetSize(100) // 100 goroutines
```

This is safe to perform from any goroutine even if others are still processing.

## Goroutines With State

Sometimes each goroutine within a Tunny pool will require its own managed state.
In this case you should implement [`tunny.Worker`][tunny-worker], which includes
calls for terminating, interrupting (in case a job times out and is no longer
needed) and blocking the next job allocation until a condition is met.

When creating a pool using `Worker` types you will need to provide a constructor
function for spawning your custom implementation:

``` go
pool := tunny.New(poolSize, func() Worker {
// TODO: Any per-goroutine state allocation here.
return newCustomWorker()
})
```

This allows Tunny to create and destroy `Worker` types cleanly when the pool
size is changed.

## Ordering

Backlogged jobs are not guaranteed to be processed in order. Due to the current
implementation of channels and select blocks a stack of backlogged jobs will be
processed as a FIFO queue. However, this behaviour is not part of the spec and
should not be relied upon.

[1]: https://godoc.org/github.com/Jeffail/tunny?status.svg
[2]: http://godoc.org/github.com/Jeffail/tunny
[3]: https://goreportcard.com/badge/github.com/Jeffail/tunny
[4]: https://goreportcard.com/report/Jeffail/tunny
[tunny-worker]: https://godoc.org/github.com/Jeffail/tunny#Worker

+ 309
- 0
vendor/github.com/Jeffail/tunny/tunny.go View File

@@ -0,0 +1,309 @@
// Copyright (c) 2014 Ashley Jeffs
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to deal
// in the Software without restriction, including without limitation the rights
// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
// copies of the Software, and to permit persons to whom the Software is
// furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in
// all copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
// THE SOFTWARE.

package tunny

import (
"context"
"errors"
"sync"
"sync/atomic"
"time"
)

//------------------------------------------------------------------------------

// Errors that are used throughout the Tunny API.
var (
ErrPoolNotRunning = errors.New("the pool is not running")
ErrJobNotFunc = errors.New("generic worker not given a func()")
ErrWorkerClosed = errors.New("worker was closed")
ErrJobTimedOut = errors.New("job request timed out")
)

// Worker is an interface representing a Tunny working agent. It will be used to
// block a calling goroutine until ready to process a job, process that job
// synchronously, interrupt its own process call when jobs are abandoned, and
// clean up its resources when being removed from the pool.
//
// Each of these duties are implemented as a single method and can be averted
// when not needed by simply implementing an empty func.
type Worker interface {
// Process will synchronously perform a job and return the result.
Process(interface{}) interface{}

// BlockUntilReady is called before each job is processed and must block the
// calling goroutine until the Worker is ready to process the next job.
BlockUntilReady()

// Interrupt is called when a job is cancelled. The worker is responsible
// for unblocking the Process implementation.
Interrupt()

// Terminate is called when a Worker is removed from the processing pool
// and is responsible for cleaning up any held resources.
Terminate()
}

//------------------------------------------------------------------------------

// closureWorker is a minimal Worker implementation that simply wraps a
// func(interface{}) interface{}
type closureWorker struct {
processor func(interface{}) interface{}
}

func (w *closureWorker) Process(payload interface{}) interface{} {
return w.processor(payload)
}

func (w *closureWorker) BlockUntilReady() {}
func (w *closureWorker) Interrupt() {}
func (w *closureWorker) Terminate() {}

//------------------------------------------------------------------------------

// callbackWorker is a minimal Worker implementation that attempts to cast
// each job into func() and either calls it if successful or returns
// ErrJobNotFunc.
type callbackWorker struct{}

func (w *callbackWorker) Process(payload interface{}) interface{} {
f, ok := payload.(func())
if !ok {
return ErrJobNotFunc
}
f()
return nil
}

func (w *callbackWorker) BlockUntilReady() {}
func (w *callbackWorker) Interrupt() {}
func (w *callbackWorker) Terminate() {}

//------------------------------------------------------------------------------

// Pool is a struct that manages a collection of workers, each with their own
// goroutine. The Pool can initialize, expand, compress and close the workers,
// as well as processing jobs with the workers synchronously.
type Pool struct {
queuedJobs int64

ctor func() Worker
workers []*workerWrapper
reqChan chan workRequest

workerMut sync.Mutex
}

// New creates a new Pool of workers that starts with n workers. You must
// provide a constructor function that creates new Worker types and when you
// change the size of the pool the constructor will be called to create each new
// Worker.
func New(n int, ctor func() Worker) *Pool {
p := &Pool{
ctor: ctor,
reqChan: make(chan workRequest),
}
p.SetSize(n)

return p
}

// NewFunc creates a new Pool of workers where each worker will process using
// the provided func.
func NewFunc(n int, f func(interface{}) interface{}) *Pool {
return New(n, func() Worker {
return &closureWorker{
processor: f,
}
})
}

// NewCallback creates a new Pool of workers where workers cast the job payload
// into a func() and runs it, or returns ErrNotFunc if the cast failed.
func NewCallback(n int) *Pool {
return New(n, func() Worker {
return &callbackWorker{}
})
}

//------------------------------------------------------------------------------

// Process will use the Pool to process a payload and synchronously return the
// result. Process can be called safely by any goroutines, but will panic if the
// Pool has been stopped.
func (p *Pool) Process(payload interface{}) interface{} {
atomic.AddInt64(&p.queuedJobs, 1)

request, open := <-p.reqChan
if !open {
panic(ErrPoolNotRunning)
}

request.jobChan <- payload

payload, open = <-request.retChan
if !open {
panic(ErrWorkerClosed)
}

atomic.AddInt64(&p.queuedJobs, -1)
return payload
}

// ProcessTimed will use the Pool to process a payload and synchronously return
// the result. If the timeout occurs before the job has finished the worker will
// be interrupted and ErrJobTimedOut will be returned. ProcessTimed can be
// called safely by any goroutines.
func (p *Pool) ProcessTimed(
payload interface{},
timeout time.Duration,
) (interface{}, error) {
atomic.AddInt64(&p.queuedJobs, 1)
defer atomic.AddInt64(&p.queuedJobs, -1)

tout := time.NewTimer(timeout)

var request workRequest
var open bool

select {
case request, open = <-p.reqChan:
if !open {
return nil, ErrPoolNotRunning
}
case <-tout.C:
return nil, ErrJobTimedOut
}

select {
case request.jobChan <- payload:
case <-tout.C:
request.interruptFunc()
return nil, ErrJobTimedOut
}

select {
case payload, open = <-request.retChan:
if !open {
return nil, ErrWorkerClosed
}
case <-tout.C:
request.interruptFunc()
return nil, ErrJobTimedOut
}

tout.Stop()
return payload, nil
}

// ProcessCtx will use the Pool to process a payload and synchronously return
// the result. If the context cancels before the job has finished the worker will
// be interrupted and ErrJobTimedOut will be returned. ProcessCtx can be
// called safely by any goroutines.
func (p *Pool) ProcessCtx(ctx context.Context, payload interface{}) (interface{}, error) {
atomic.AddInt64(&p.queuedJobs, 1)
defer atomic.AddInt64(&p.queuedJobs, -1)

var request workRequest
var open bool

select {
case request, open = <-p.reqChan:
if !open {
return nil, ErrPoolNotRunning
}
case <-ctx.Done():
return nil, ctx.Err()
}

select {
case request.jobChan <- payload:
case <-ctx.Done():
request.interruptFunc()
return nil, ctx.Err()
}

select {
case payload, open = <-request.retChan:
if !open {
return nil, ErrWorkerClosed
}
case <-ctx.Done():
request.interruptFunc()
return nil, ctx.Err()
}

return payload, nil
}

// QueueLength returns the current count of pending queued jobs.
func (p *Pool) QueueLength() int64 {
return atomic.LoadInt64(&p.queuedJobs)
}

// SetSize changes the total number of workers in the Pool. This can be called
// by any goroutine at any time unless the Pool has been stopped, in which case
// a panic will occur.
func (p *Pool) SetSize(n int) {
p.workerMut.Lock()
defer p.workerMut.Unlock()

lWorkers := len(p.workers)
if lWorkers == n {
return
}

// Add extra workers if N > len(workers)
for i := lWorkers; i < n; i++ {
p.workers = append(p.workers, newWorkerWrapper(p.reqChan, p.ctor()))
}

// Asynchronously stop all workers > N
for i := n; i < lWorkers; i++ {
p.workers[i].stop()
}

// Synchronously wait for all workers > N to stop
for i := n; i < lWorkers; i++ {
p.workers[i].join()
p.workers[i] = nil
}

// Remove stopped workers from slice
p.workers = p.workers[:n]
}

// GetSize returns the current size of the pool.
func (p *Pool) GetSize() int {
p.workerMut.Lock()
defer p.workerMut.Unlock()

return len(p.workers)
}

// Close will terminate all workers and close the job channel of this Pool.
func (p *Pool) Close() {
p.SetSize(0)
close(p.reqChan)
}

//------------------------------------------------------------------------------

BIN
vendor/github.com/Jeffail/tunny/tunny_logo.png View File

Before After
Width: 1400  |  Height: 350  |  Size: 52 KiB

+ 126
- 0
vendor/github.com/Jeffail/tunny/worker.go View File

@@ -0,0 +1,126 @@
// Copyright (c) 2014 Ashley Jeffs
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to deal
// in the Software without restriction, including without limitation the rights
// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
// copies of the Software, and to permit persons to whom the Software is
// furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in
// all copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
// THE SOFTWARE.

package tunny

//------------------------------------------------------------------------------

// workRequest is a struct containing context representing a workers intention
// to receive a work payload.
type workRequest struct {
// jobChan is used to send the payload to this worker.
jobChan chan<- interface{}

// retChan is used to read the result from this worker.
retChan <-chan interface{}

// interruptFunc can be called to cancel a running job. When called it is no
// longer necessary to read from retChan.
interruptFunc func()
}

//------------------------------------------------------------------------------

// workerWrapper takes a Worker implementation and wraps it within a goroutine
// and channel arrangement. The workerWrapper is responsible for managing the
// lifetime of both the Worker and the goroutine.
type workerWrapper struct {
worker Worker
interruptChan chan struct{}

// reqChan is NOT owned by this type, it is used to send requests for work.
reqChan chan<- workRequest

// closeChan can be closed in order to cleanly shutdown this worker.
closeChan chan struct{}

// closedChan is closed by the run() goroutine when it exits.
closedChan chan struct{}
}

func newWorkerWrapper(
reqChan chan<- workRequest,
worker Worker,
) *workerWrapper {
w := workerWrapper{
worker: worker,
interruptChan: make(chan struct{}),
reqChan: reqChan,
closeChan: make(chan struct{}),
closedChan: make(chan struct{}),
}

go w.run()

return &w
}

//------------------------------------------------------------------------------

func (w *workerWrapper) interrupt() {
close(w.interruptChan)
w.worker.Interrupt()
}

func (w *workerWrapper) run() {
jobChan, retChan := make(chan interface{}), make(chan interface{})
defer func() {
w.worker.Terminate()
close(retChan)
close(w.closedChan)
}()

for {
// NOTE: Blocking here will prevent the worker from closing down.
w.worker.BlockUntilReady()
select {
case w.reqChan <- workRequest{
jobChan: jobChan,
retChan: retChan,
interruptFunc: w.interrupt,
}:
select {
case payload := <-jobChan:
result := w.worker.Process(payload)
select {
case retChan <- result:
case <-w.interruptChan:
w.interruptChan = make(chan struct{})
}
case _, _ = <-w.interruptChan:
w.interruptChan = make(chan struct{})
}
case <-w.closeChan:
return
}
}
}

//------------------------------------------------------------------------------

func (w *workerWrapper) stop() {
close(w.closeChan)
}

func (w *workerWrapper) join() {
<-w.closedChan
}

//------------------------------------------------------------------------------

+ 0
- 27
vendor/golang.org/x/exp/LICENSE View File

@@ -1,27 +0,0 @@
Copyright (c) 2009 The Go Authors. All rights reserved.

Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are
met:

* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above
copyright notice, this list of conditions and the following disclaimer
in the documentation and/or other materials provided with the
distribution.
* Neither the name of Google Inc. nor the names of its
contributors may be used to endorse or promote products derived from
this software without specific prior written permission.

THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.

+ 0
- 22
vendor/golang.org/x/exp/PATENTS View File

@@ -1,22 +0,0 @@
Additional IP Rights Grant (Patents)

"This implementation" means the copyrightable works distributed by
Google as part of the Go project.

Google hereby grants to You a perpetual, worldwide, non-exclusive,
no-charge, royalty-free, irrevocable (except as stated in this section)
patent license to make, have made, use, offer to sell, sell, import,
transfer and otherwise run, modify and propagate the contents of this
implementation of Go, where such license applies only to those patent
claims, both currently owned or controlled by Google and acquired in
the future, licensable by Google that are necessarily infringed by this
implementation of Go. This grant does not include claims that would be
infringed only as a consequence of further modification of this
implementation. If you or your agent or exclusive licensee institute or
order or agree to the institution of patent litigation against any
entity (including a cross-claim or counterclaim in a lawsuit) alleging
that this implementation of Go or any code incorporated within this
implementation of Go constitutes direct or contributory patent
infringement, or inducement of patent infringement, then any patent
rights granted to you under this License for this implementation of Go
shall terminate as of the date such litigation is filed.

+ 0
- 50
vendor/golang.org/x/exp/constraints/constraints.go View File

@@ -1,50 +0,0 @@
// Copyright 2021 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.

// Package constraints defines a set of useful constraints to be used
// with type parameters.
package constraints

// Signed is a constraint that permits any signed integer type.
// If future releases of Go add new predeclared signed integer types,
// this constraint will be modified to include them.
type Signed interface {
~int | ~int8 | ~int16 | ~int32 | ~int64
}

// Unsigned is a constraint that permits any unsigned integer type.
// If future releases of Go add new predeclared unsigned integer types,
// this constraint will be modified to include them.
type Unsigned interface {
~uint | ~uint8 | ~uint16 | ~uint32 | ~uint64 | ~uintptr
}

// Integer is a constraint that permits any integer type.
// If future releases of Go add new predeclared integer types,
// this constraint will be modified to include them.
type Integer interface {
Signed | Unsigned
}

// Float is a constraint that permits any floating-point type.
// If future releases of Go add new predeclared floating-point types,
// this constraint will be modified to include them.
type Float interface {
~float32 | ~float64
}

// Complex is a constraint that permits any complex numeric type.
// If future releases of Go add new predeclared complex numeric types,
// this constraint will be modified to include them.
type Complex interface {
~complex64 | ~complex128
}

// Ordered is a constraint that permits any ordered type: any type
// that supports the operators < <= >= >.
// If future releases of Go add new ordered types,
// this constraint will be modified to include them.
type Ordered interface {
Integer | Float | ~string
}

+ 0
- 44
vendor/golang.org/x/exp/slices/cmp.go View File

@@ -1,44 +0,0 @@
// Copyright 2023 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.

package slices

import "golang.org/x/exp/constraints"

// min is a version of the predeclared function from the Go 1.21 release.
func min[T constraints.Ordered](a, b T) T {
if a < b || isNaN(a) {
return a
}
return b
}

// max is a version of the predeclared function from the Go 1.21 release.
func max[T constraints.Ordered](a, b T) T {
if a > b || isNaN(a) {
return a
}
return b
}

// cmpLess is a copy of cmp.Less from the Go 1.21 release.
func cmpLess[T constraints.Ordered](x, y T) bool {
return (isNaN(x) && !isNaN(y)) || x < y
}

// cmpCompare is a copy of cmp.Compare from the Go 1.21 release.
func cmpCompare[T constraints.Ordered](x, y T) int {
xNaN := isNaN(x)
yNaN := isNaN(y)
if xNaN && yNaN {
return 0
}
if xNaN || x < y {
return -1
}
if yNaN || x > y {
return +1
}
return 0
}

+ 0
- 499
vendor/golang.org/x/exp/slices/slices.go View File

@@ -1,499 +0,0 @@
// Copyright 2021 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.

// Package slices defines various functions useful with slices of any type.
package slices

import (
"unsafe"

"golang.org/x/exp/constraints"
)

// Equal reports whether two slices are equal: the same length and all
// elements equal. If the lengths are different, Equal returns false.
// Otherwise, the elements are compared in increasing index order, and the
// comparison stops at the first unequal pair.
// Floating point NaNs are not considered equal.
func Equal[S ~[]E, E comparable](s1, s2 S) bool {
if len(s1) != len(s2) {
return false
}
for i := range s1 {
if s1[i] != s2[i] {
return false
}
}
return true
}

// EqualFunc reports whether two slices are equal using an equality
// function on each pair of elements. If the lengths are different,
// EqualFunc returns false. Otherwise, the elements are compared in
// increasing index order, and the comparison stops at the first index
// for which eq returns false.
func EqualFunc[S1 ~[]E1, S2 ~[]E2, E1, E2 any](s1 S1, s2 S2, eq func(E1, E2) bool) bool {
if len(s1) != len(s2) {
return false
}
for i, v1 := range s1 {
v2 := s2[i]
if !eq(v1, v2) {
return false
}
}
return true
}

// Compare compares the elements of s1 and s2, using [cmp.Compare] on each pair
// of elements. The elements are compared sequentially, starting at index 0,
// until one element is not equal to the other.
// The result of comparing the first non-matching elements is returned.
// If both slices are equal until one of them ends, the shorter slice is
// considered less than the longer one.
// The result is 0 if s1 == s2, -1 if s1 < s2, and +1 if s1 > s2.
func Compare[S ~[]E, E constraints.Ordered](s1, s2 S) int {
for i, v1 := range s1 {
if i >= len(s2) {
return +1
}
v2 := s2[i]
if c := cmpCompare(v1, v2); c != 0 {
return c
}
}
if len(s1) < len(s2) {
return -1
}
return 0
}

// CompareFunc is like [Compare] but uses a custom comparison function on each
// pair of elements.
// The result is the first non-zero result of cmp; if cmp always
// returns 0 the result is 0 if len(s1) == len(s2), -1 if len(s1) < len(s2),
// and +1 if len(s1) > len(s2).
func CompareFunc[S1 ~[]E1, S2 ~[]E2, E1, E2 any](s1 S1, s2 S2, cmp func(E1, E2) int) int {
for i, v1 := range s1 {
if i >= len(s2) {
return +1
}
v2 := s2[i]
if c := cmp(v1, v2); c != 0 {
return c
}
}
if len(s1) < len(s2) {
return -1
}
return 0
}

// Index returns the index of the first occurrence of v in s,
// or -1 if not present.
func Index[S ~[]E, E comparable](s S, v E) int {
for i := range s {
if v == s[i] {
return i
}
}
return -1
}

// IndexFunc returns the first index i satisfying f(s[i]),
// or -1 if none do.
func IndexFunc[S ~[]E, E any](s S, f func(E) bool) int {
for i := range s {
if f(s[i]) {
return i
}
}
return -1
}

// Contains reports whether v is present in s.
func Contains[S ~[]E, E comparable](s S, v E) bool {
return Index(s, v) >= 0
}

// ContainsFunc reports whether at least one
// element e of s satisfies f(e).
func ContainsFunc[S ~[]E, E any](s S, f func(E) bool) bool {
return IndexFunc(s, f) >= 0
}

// Insert inserts the values v... into s at index i,
// returning the modified slice.
// The elements at s[i:] are shifted up to make room.
// In the returned slice r, r[i] == v[0],
// and r[i+len(v)] == value originally at r[i].
// Insert panics if i is out of range.
// This function is O(len(s) + len(v)).
func Insert[S ~[]E, E any](s S, i int, v ...E) S {
m := len(v)
if m == 0 {
return s
}
n := len(s)
if i == n {
return append(s, v...)
}
if n+m > cap(s) {
// Use append rather than make so that we bump the size of
// the slice up to the next storage class.
// This is what Grow does but we don't call Grow because
// that might copy the values twice.
s2 := append(s[:i], make(S, n+m-i)...)
copy(s2[i:], v)
copy(s2[i+m:], s[i:])
return s2
}
s = s[:n+m]

// before:
// s: aaaaaaaabbbbccccccccdddd
// ^ ^ ^ ^
// i i+m n n+m
// after:
// s: aaaaaaaavvvvbbbbcccccccc
// ^ ^ ^ ^
// i i+m n n+m
//
// a are the values that don't move in s.
// v are the values copied in from v.
// b and c are the values from s that are shifted up in index.
// d are the values that get overwritten, never to be seen again.

if !overlaps(v, s[i+m:]) {
// Easy case - v does not overlap either the c or d regions.
// (It might be in some of a or b, or elsewhere entirely.)
// The data we copy up doesn't write to v at all, so just do it.

copy(s[i+m:], s[i:])

// Now we have
// s: aaaaaaaabbbbbbbbcccccccc
// ^ ^ ^ ^
// i i+m n n+m
// Note the b values are duplicated.

copy(s[i:], v)

// Now we have
// s: aaaaaaaavvvvbbbbcccccccc
// ^ ^ ^ ^
// i i+m n n+m
// That's the result we want.
return s
}

// The hard case - v overlaps c or d. We can't just shift up
// the data because we'd move or clobber the values we're trying
// to insert.
// So instead, write v on top of d, then rotate.
copy(s[n:], v)

// Now we have
// s: aaaaaaaabbbbccccccccvvvv
// ^ ^ ^ ^
// i i+m n n+m

rotateRight(s[i:], m)

// Now we have
// s: aaaaaaaavvvvbbbbcccccccc
// ^ ^ ^ ^
// i i+m n n+m
// That's the result we want.
return s
}

// Delete removes the elements s[i:j] from s, returning the modified slice.
// Delete panics if s[i:j] is not a valid slice of s.
// Delete is O(len(s)-j), so if many items must be deleted, it is better to
// make a single call deleting them all together than to delete one at a time.
// Delete might not modify the elements s[len(s)-(j-i):len(s)]. If those
// elements contain pointers you might consider zeroing those elements so that
// objects they reference can be garbage collected.
func Delete[S ~[]E, E any](s S, i, j int) S {
_ = s[i:j] // bounds check

return append(s[:i], s[j:]...)
}

// DeleteFunc removes any elements from s for which del returns true,
// returning the modified slice.
// When DeleteFunc removes m elements, it might not modify the elements
// s[len(s)-m:len(s)]. If those elements contain pointers you might consider
// zeroing those elements so that objects they reference can be garbage
// collected.
func DeleteFunc[S ~[]E, E any](s S, del func(E) bool) S {
i := IndexFunc(s, del)
if i == -1 {
return s
}
// Don't start copying elements until we find one to delete.
for j := i + 1; j < len(s); j++ {
if v := s[j]; !del(v) {
s[i] = v
i++
}
}
return s[:i]
}

// Replace replaces the elements s[i:j] by the given v, and returns the
// modified slice. Replace panics if s[i:j] is not a valid slice of s.
func Replace[S ~[]E, E any](s S, i, j int, v ...E) S {
_ = s[i:j] // verify that i:j is a valid subslice

if i == j {
return Insert(s, i, v...)
}
if j == len(s) {
return append(s[:i], v...)
}

tot := len(s[:i]) + len(v) + len(s[j:])
if tot > cap(s) {
// Too big to fit, allocate and copy over.
s2 := append(s[:i], make(S, tot-i)...) // See Insert
copy(s2[i:], v)
copy(s2[i+len(v):], s[j:])
return s2
}

r := s[:tot]

if i+len(v) <= j {
// Easy, as v fits in the deleted portion.
copy(r[i:], v)
if i+len(v) != j {
copy(r[i+len(v):], s[j:])
}
return r
}

// We are expanding (v is bigger than j-i).
// The situation is something like this:
// (example has i=4,j=8,len(s)=16,len(v)=6)
// s: aaaaxxxxbbbbbbbbyy
// ^ ^ ^ ^
// i j len(s) tot
// a: prefix of s
// x: deleted range
// b: more of s
// y: area to expand into

if !overlaps(r[i+len(v):], v) {
// Easy, as v is not clobbered by the first copy.
copy(r[i+len(v):], s[j:])
copy(r[i:], v)
return r
}

// This is a situation where we don't have a single place to which
// we can copy v. Parts of it need to go to two different places.
// We want to copy the prefix of v into y and the suffix into x, then
// rotate |y| spots to the right.
//
// v[2:] v[:2]
// | |
// s: aaaavvvvbbbbbbbbvv
// ^ ^ ^ ^
// i j len(s) tot
//
// If either of those two destinations don't alias v, then we're good.
y := len(v) - (j - i) // length of y portion

if !overlaps(r[i:j], v) {
copy(r[i:j], v[y:])
copy(r[len(s):], v[:y])
rotateRight(r[i:], y)
return r
}
if !overlaps(r[len(s):], v) {
copy(r[len(s):], v[:y])
copy(r[i:j], v[y:])
rotateRight(r[i:], y)
return r
}

// Now we know that v overlaps both x and y.
// That means that the entirety of b is *inside* v.
// So we don't need to preserve b at all; instead we
// can copy v first, then copy the b part of v out of
// v to the right destination.
k := startIdx(v, s[j:])
copy(r[i:], v)
copy(r[i+len(v):], r[i+k:])
return r
}

// Clone returns a copy of the slice.
// The elements are copied using assignment, so this is a shallow clone.
func Clone[S ~[]E, E any](s S) S {
// Preserve nil in case it matters.
if s == nil {
return nil
}
return append(S([]E{}), s...)
}

// Compact replaces consecutive runs of equal elements with a single copy.
// This is like the uniq command found on Unix.
// Compact modifies the contents of the slice s and returns the modified slice,
// which may have a smaller length.
// When Compact discards m elements in total, it might not modify the elements
// s[len(s)-m:len(s)]. If those elements contain pointers you might consider
// zeroing those elements so that objects they reference can be garbage collected.
func Compact[S ~[]E, E comparable](s S) S {
if len(s) < 2 {
return s
}
i := 1
for k := 1; k < len(s); k++ {
if s[k] != s[k-1] {
if i != k {
s[i] = s[k]
}
i++
}
}
return s[:i]
}

// CompactFunc is like [Compact] but uses an equality function to compare elements.
// For runs of elements that compare equal, CompactFunc keeps the first one.
func CompactFunc[S ~[]E, E any](s S, eq func(E, E) bool) S {
if len(s) < 2 {
return s
}
i := 1
for k := 1; k < len(s); k++ {
if !eq(s[k], s[k-1]) {
if i != k {
s[i] = s[k]
}
i++
}
}
return s[:i]
}

// Grow increases the slice's capacity, if necessary, to guarantee space for
// another n elements. After Grow(n), at least n elements can be appended
// to the slice without another allocation. If n is negative or too large to
// allocate the memory, Grow panics.
func Grow[S ~[]E, E any](s S, n int) S {
if n < 0 {
panic("cannot be negative")
}
if n -= cap(s) - len(s); n > 0 {
// TODO(https://go.dev/issue/53888): Make using []E instead of S
// to workaround a compiler bug where the runtime.growslice optimization
// does not take effect. Revert when the compiler is fixed.
s = append([]E(s)[:cap(s)], make([]E, n)...)[:len(s)]
}
return s
}

// Clip removes unused capacity from the slice, returning s[:len(s):len(s)].
func Clip[S ~[]E, E any](s S) S {
return s[:len(s):len(s)]
}

// Rotation algorithm explanation:
//
// rotate left by 2
// start with
// 0123456789
// split up like this
// 01 234567 89
// swap first 2 and last 2
// 89 234567 01
// join first parts
// 89234567 01
// recursively rotate first left part by 2
// 23456789 01
// join at the end
// 2345678901
//
// rotate left by 8
// start with
// 0123456789
// split up like this
// 01 234567 89
// swap first 2 and last 2
// 89 234567 01
// join last parts
// 89 23456701
// recursively rotate second part left by 6
// 89 01234567
// join at the end
// 8901234567

// TODO: There are other rotate algorithms.
// This algorithm has the desirable property that it moves each element exactly twice.
// The triple-reverse algorithm is simpler and more cache friendly, but takes more writes.
// The follow-cycles algorithm can be 1-write but it is not very cache friendly.

// rotateLeft rotates b left by n spaces.
// s_final[i] = s_orig[i+r], wrapping around.
func rotateLeft[E any](s []E, r int) {
for r != 0 && r != len(s) {
if r*2 <= len(s) {
swap(s[:r], s[len(s)-r:])
s = s[:len(s)-r]
} else {
swap(s[:len(s)-r], s[r:])
s, r = s[len(s)-r:], r*2-len(s)
}
}
}
func rotateRight[E any](s []E, r int) {
rotateLeft(s, len(s)-r)
}

// swap swaps the contents of x and y. x and y must be equal length and disjoint.
func swap[E any](x, y []E) {
for i := 0; i < len(x); i++ {
x[i], y[i] = y[i], x[i]
}
}

// overlaps reports whether the memory ranges a[0:len(a)] and b[0:len(b)] overlap.
func overlaps[E any](a, b []E) bool {
if len(a) == 0 || len(b) == 0 {
return false
}
elemSize := unsafe.Sizeof(a[0])
if elemSize == 0 {
return false
}
// TODO: use a runtime/unsafe facility once one becomes available. See issue 12445.
// Also see crypto/internal/alias/alias.go:AnyOverlap
return uintptr(unsafe.Pointer(&a[0])) <= uintptr(unsafe.Pointer(&b[len(b)-1]))+(elemSize-1) &&
uintptr(unsafe.Pointer(&b[0])) <= uintptr(unsafe.Pointer(&a[len(a)-1]))+(elemSize-1)
}

// startIdx returns the index in haystack where the needle starts.
// prerequisite: the needle must be aliased entirely inside the haystack.
func startIdx[E any](haystack, needle []E) int {
p := &needle[0]
for i := range haystack {
if p == &haystack[i] {
return i
}
}
// TODO: what if the overlap is by a non-integral number of Es?
panic("needle not found")
}

// Reverse reverses the elements of the slice in place.
func Reverse[S ~[]E, E any](s S) {
for i, j := 0, len(s)-1; i < j; i, j = i+1, j-1 {
s[i], s[j] = s[j], s[i]
}
}

+ 0
- 195
vendor/golang.org/x/exp/slices/sort.go View File

@@ -1,195 +0,0 @@
// Copyright 2022 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.

//go:generate go run $GOROOT/src/sort/gen_sort_variants.go -exp

package slices

import (
"math/bits"

"golang.org/x/exp/constraints"
)

// Sort sorts a slice of any ordered type in ascending order.
// When sorting floating-point numbers, NaNs are ordered before other values.
func Sort[S ~[]E, E constraints.Ordered](x S) {
n := len(x)
pdqsortOrdered(x, 0, n, bits.Len(uint(n)))
}

// SortFunc sorts the slice x in ascending order as determined by the cmp
// function. This sort is not guaranteed to be stable.
// cmp(a, b) should return a negative number when a < b, a positive number when
// a > b and zero when a == b.
//
// SortFunc requires that cmp is a strict weak ordering.
// See https://en.wikipedia.org/wiki/Weak_ordering#Strict_weak_orderings.
func SortFunc[S ~[]E, E any](x S, cmp func(a, b E) int) {
n := len(x)
pdqsortCmpFunc(x, 0, n, bits.Len(uint(n)), cmp)
}

// SortStableFunc sorts the slice x while keeping the original order of equal
// elements, using cmp to compare elements in the same way as [SortFunc].
func SortStableFunc[S ~[]E, E any](x S, cmp func(a, b E) int) {
stableCmpFunc(x, len(x), cmp)
}

// IsSorted reports whether x is sorted in ascending order.
func IsSorted[S ~[]E, E constraints.Ordered](x S) bool {
for i := len(x) - 1; i > 0; i-- {
if cmpLess(x[i], x[i-1]) {
return false
}
}
return true
}

// IsSortedFunc reports whether x is sorted in ascending order, with cmp as the
// comparison function as defined by [SortFunc].
func IsSortedFunc[S ~[]E, E any](x S, cmp func(a, b E) int) bool {
for i := len(x) - 1; i > 0; i-- {
if cmp(x[i], x[i-1]) < 0 {
return false
}
}
return true
}

// Min returns the minimal value in x. It panics if x is empty.
// For floating-point numbers, Min propagates NaNs (any NaN value in x
// forces the output to be NaN).
func Min[S ~[]E, E constraints.Ordered](x S) E {
if len(x) < 1 {
panic("slices.Min: empty list")
}
m := x[0]
for i := 1; i < len(x); i++ {
m = min(m, x[i])
}
return m
}

// MinFunc returns the minimal value in x, using cmp to compare elements.
// It panics if x is empty. If there is more than one minimal element
// according to the cmp function, MinFunc returns the first one.
func MinFunc[S ~[]E, E any](x S, cmp func(a, b E) int) E {
if len(x) < 1 {
panic("slices.MinFunc: empty list")
}
m := x[0]
for i := 1; i < len(x); i++ {
if cmp(x[i], m) < 0 {
m = x[i]
}
}
return m
}

// Max returns the maximal value in x. It panics if x is empty.
// For floating-point E, Max propagates NaNs (any NaN value in x
// forces the output to be NaN).
func Max[S ~[]E, E constraints.Ordered](x S) E {
if len(x) < 1 {
panic("slices.Max: empty list")
}
m := x[0]
for i := 1; i < len(x); i++ {
m = max(m, x[i])
}
return m
}

// MaxFunc returns the maximal value in x, using cmp to compare elements.
// It panics if x is empty. If there is more than one maximal element
// according to the cmp function, MaxFunc returns the first one.
func MaxFunc[S ~[]E, E any](x S, cmp func(a, b E) int) E {
if len(x) < 1 {
panic("slices.MaxFunc: empty list")
}
m := x[0]
for i := 1; i < len(x); i++ {
if cmp(x[i], m) > 0 {
m = x[i]
}
}
return m
}

// BinarySearch searches for target in a sorted slice and returns the position
// where target is found, or the position where target would appear in the
// sort order; it also returns a bool saying whether the target is really found
// in the slice. The slice must be sorted in increasing order.
func BinarySearch[S ~[]E, E constraints.Ordered](x S, target E) (int, bool) {
// Inlining is faster than calling BinarySearchFunc with a lambda.
n := len(x)
// Define x[-1] < target and x[n] >= target.
// Invariant: x[i-1] < target, x[j] >= target.
i, j := 0, n
for i < j {
h := int(uint(i+j) >> 1) // avoid overflow when computing h
// i ≤ h < j
if cmpLess(x[h], target) {
i = h + 1 // preserves x[i-1] < target
} else {
j = h // preserves x[j] >= target
}
}
// i == j, x[i-1] < target, and x[j] (= x[i]) >= target => answer is i.
return i, i < n && (x[i] == target || (isNaN(x[i]) && isNaN(target)))
}

// BinarySearchFunc works like [BinarySearch], but uses a custom comparison
// function. The slice must be sorted in increasing order, where "increasing"
// is defined by cmp. cmp should return 0 if the slice element matches
// the target, a negative number if the slice element precedes the target,
// or a positive number if the slice element follows the target.
// cmp must implement the same ordering as the slice, such that if
// cmp(a, t) < 0 and cmp(b, t) >= 0, then a must precede b in the slice.
func BinarySearchFunc[S ~[]E, E, T any](x S, target T, cmp func(E, T) int) (int, bool) {
n := len(x)
// Define cmp(x[-1], target) < 0 and cmp(x[n], target) >= 0 .
// Invariant: cmp(x[i - 1], target) < 0, cmp(x[j], target) >= 0.
i, j := 0, n
for i < j {
h := int(uint(i+j) >> 1) // avoid overflow when computing h
// i ≤ h < j
if cmp(x[h], target) < 0 {
i = h + 1 // preserves cmp(x[i - 1], target) < 0
} else {
j = h // preserves cmp(x[j], target) >= 0
}
}
// i == j, cmp(x[i-1], target) < 0, and cmp(x[j], target) (= cmp(x[i], target)) >= 0 => answer is i.
return i, i < n && cmp(x[i], target) == 0
}

type sortedHint int // hint for pdqsort when choosing the pivot

const (
unknownHint sortedHint = iota
increasingHint
decreasingHint
)

// xorshift paper: https://www.jstatsoft.org/article/view/v008i14/xorshift.pdf
type xorshift uint64

func (r *xorshift) Next() uint64 {
*r ^= *r << 13
*r ^= *r >> 17
*r ^= *r << 5
return uint64(*r)
}

func nextPowerOfTwo(length int) uint {
return 1 << bits.Len(uint(length))
}

// isNaN reports whether x is a NaN without requiring the math package.
// This will always return false if T is not floating-point.
func isNaN[T constraints.Ordered](x T) bool {
return x != x
}

+ 0
- 479
vendor/golang.org/x/exp/slices/zsortanyfunc.go View File

@@ -1,479 +0,0 @@
// Code generated by gen_sort_variants.go; DO NOT EDIT.

// Copyright 2022 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.

package slices

// insertionSortCmpFunc sorts data[a:b] using insertion sort.
func insertionSortCmpFunc[E any](data []E, a, b int, cmp func(a, b E) int) {
for i := a + 1; i < b; i++ {
for j := i; j > a && (cmp(data[j], data[j-1]) < 0); j-- {
data[j], data[j-1] = data[j-1], data[j]
}
}
}

// siftDownCmpFunc implements the heap property on data[lo:hi].
// first is an offset into the array where the root of the heap lies.
func siftDownCmpFunc[E any](data []E, lo, hi, first int, cmp func(a, b E) int) {
root := lo
for {
child := 2*root + 1
if child >= hi {
break
}
if child+1 < hi && (cmp(data[first+child], data[first+child+1]) < 0) {
child++
}
if !(cmp(data[first+root], data[first+child]) < 0) {
return
}
data[first+root], data[first+child] = data[first+child], data[first+root]
root = child
}
}

func heapSortCmpFunc[E any](data []E, a, b int, cmp func(a, b E) int) {
first := a
lo := 0
hi := b - a

// Build heap with greatest element at top.
for i := (hi - 1) / 2; i >= 0; i-- {
siftDownCmpFunc(data, i, hi, first, cmp)
}

// Pop elements, largest first, into end of data.
for i := hi - 1; i >= 0; i-- {
data[first], data[first+i] = data[first+i], data[first]
siftDownCmpFunc(data, lo, i, first, cmp)
}
}

// pdqsortCmpFunc sorts data[a:b].
// The algorithm based on pattern-defeating quicksort(pdqsort), but without the optimizations from BlockQuicksort.
// pdqsort paper: https://arxiv.org/pdf/2106.05123.pdf
// C++ implementation: https://github.com/orlp/pdqsort
// Rust implementation: https://docs.rs/pdqsort/latest/pdqsort/
// limit is the number of allowed bad (very unbalanced) pivots before falling back to heapsort.
func pdqsortCmpFunc[E any](data []E, a, b, limit int, cmp func(a, b E) int) {
const maxInsertion = 12

var (
wasBalanced = true // whether the last partitioning was reasonably balanced
wasPartitioned = true // whether the slice was already partitioned
)

for {
length := b - a

if length <= maxInsertion {
insertionSortCmpFunc(data, a, b, cmp)
return
}

// Fall back to heapsort if too many bad choices were made.
if limit == 0 {
heapSortCmpFunc(data, a, b, cmp)
return
}

// If the last partitioning was imbalanced, we need to breaking patterns.
if !wasBalanced {
breakPatternsCmpFunc(data, a, b, cmp)
limit--
}

pivot, hint := choosePivotCmpFunc(data, a, b, cmp)
if hint == decreasingHint {
reverseRangeCmpFunc(data, a, b, cmp)
// The chosen pivot was pivot-a elements after the start of the array.
// After reversing it is pivot-a elements before the end of the array.
// The idea came from Rust's implementation.
pivot = (b - 1) - (pivot - a)
hint = increasingHint
}

// The slice is likely already sorted.
if wasBalanced && wasPartitioned && hint == increasingHint {
if partialInsertionSortCmpFunc(data, a, b, cmp) {
return
}
}

// Probably the slice contains many duplicate elements, partition the slice into
// elements equal to and elements greater than the pivot.
if a > 0 && !(cmp(data[a-1], data[pivot]) < 0) {
mid := partitionEqualCmpFunc(data, a, b, pivot, cmp)
a = mid
continue
}

mid, alreadyPartitioned := partitionCmpFunc(data, a, b, pivot, cmp)
wasPartitioned = alreadyPartitioned

leftLen, rightLen := mid-a, b-mid
balanceThreshold := length / 8
if leftLen < rightLen {
wasBalanced = leftLen >= balanceThreshold
pdqsortCmpFunc(data, a, mid, limit, cmp)
a = mid + 1
} else {
wasBalanced = rightLen >= balanceThreshold
pdqsortCmpFunc(data, mid+1, b, limit, cmp)
b = mid
}
}
}

// partitionCmpFunc does one quicksort partition.
// Let p = data[pivot]
// Moves elements in data[a:b] around, so that data[i]<p and data[j]>=p for i<newpivot and j>newpivot.
// On return, data[newpivot] = p
func partitionCmpFunc[E any](data []E, a, b, pivot int, cmp func(a, b E) int) (newpivot int, alreadyPartitioned bool) {
data[a], data[pivot] = data[pivot], data[a]
i, j := a+1, b-1 // i and j are inclusive of the elements remaining to be partitioned

for i <= j && (cmp(data[i], data[a]) < 0) {
i++
}
for i <= j && !(cmp(data[j], data[a]) < 0) {
j--
}
if i > j {
data[j], data[a] = data[a], data[j]
return j, true
}
data[i], data[j] = data[j], data[i]
i++
j--

for {
for i <= j && (cmp(data[i], data[a]) < 0) {
i++
}
for i <= j && !(cmp(data[j], data[a]) < 0) {
j--
}
if i > j {
break
}
data[i], data[j] = data[j], data[i]
i++
j--
}
data[j], data[a] = data[a], data[j]
return j, false
}

// partitionEqualCmpFunc partitions data[a:b] into elements equal to data[pivot] followed by elements greater than data[pivot].
// It assumed that data[a:b] does not contain elements smaller than the data[pivot].
func partitionEqualCmpFunc[E any](data []E, a, b, pivot int, cmp func(a, b E) int) (newpivot int) {
data[a], data[pivot] = data[pivot], data[a]
i, j := a+1, b-1 // i and j are inclusive of the elements remaining to be partitioned

for {
for i <= j && !(cmp(data[a], data[i]) < 0) {
i++
}
for i <= j && (cmp(data[a], data[j]) < 0) {
j--
}
if i > j {
break
}
data[i], data[j] = data[j], data[i]
i++
j--
}
return i
}

// partialInsertionSortCmpFunc partially sorts a slice, returns true if the slice is sorted at the end.
func partialInsertionSortCmpFunc[E any](data []E, a, b int, cmp func(a, b E) int) bool {
const (
maxSteps = 5 // maximum number of adjacent out-of-order pairs that will get shifted
shortestShifting = 50 // don't shift any elements on short arrays
)
i := a + 1
for j := 0; j < maxSteps; j++ {
for i < b && !(cmp(data[i], data[i-1]) < 0) {
i++
}

if i == b {
return true
}

if b-a < shortestShifting {
return false
}

data[i], data[i-1] = data[i-1], data[i]

// Shift the smaller one to the left.
if i-a >= 2 {
for j := i - 1; j >= 1; j-- {
if !(cmp(data[j], data[j-1]) < 0) {
break
}
data[j], data[j-1] = data[j-1], data[j]
}
}
// Shift the greater one to the right.
if b-i >= 2 {
for j := i + 1; j < b; j++ {
if !(cmp(data[j], data[j-1]) < 0) {
break
}
data[j], data[j-1] = data[j-1], data[j]
}
}
}
return false
}

// breakPatternsCmpFunc scatters some elements around in an attempt to break some patterns
// that might cause imbalanced partitions in quicksort.
func breakPatternsCmpFunc[E any](data []E, a, b int, cmp func(a, b E) int) {
length := b - a
if length >= 8 {
random := xorshift(length)
modulus := nextPowerOfTwo(length)

for idx := a + (length/4)*2 - 1; idx <= a+(length/4)*2+1; idx++ {
other := int(uint(random.Next()) & (modulus - 1))
if other >= length {
other -= length
}
data[idx], data[a+other] = data[a+other], data[idx]
}
}
}

// choosePivotCmpFunc chooses a pivot in data[a:b].
//
// [0,8): chooses a static pivot.
// [8,shortestNinther): uses the simple median-of-three method.
// [shortestNinther,∞): uses the Tukey ninther method.
func choosePivotCmpFunc[E any](data []E, a, b int, cmp func(a, b E) int) (pivot int, hint sortedHint) {
const (
shortestNinther = 50
maxSwaps = 4 * 3
)

l := b - a

var (
swaps int
i = a + l/4*1
j = a + l/4*2
k = a + l/4*3
)

if l >= 8 {
if l >= shortestNinther {
// Tukey ninther method, the idea came from Rust's implementation.
i = medianAdjacentCmpFunc(data, i, &swaps, cmp)
j = medianAdjacentCmpFunc(data, j, &swaps, cmp)
k = medianAdjacentCmpFunc(data, k, &swaps, cmp)
}
// Find the median among i, j, k and stores it into j.
j = medianCmpFunc(data, i, j, k, &swaps, cmp)
}

switch swaps {
case 0:
return j, increasingHint
case maxSwaps:
return j, decreasingHint
default:
return j, unknownHint
}
}

// order2CmpFunc returns x,y where data[x] <= data[y], where x,y=a,b or x,y=b,a.
func order2CmpFunc[E any](data []E, a, b int, swaps *int, cmp func(a, b E) int) (int, int) {
if cmp(data[b], data[a]) < 0 {
*swaps++
return b, a
}
return a, b
}

// medianCmpFunc returns x where data[x] is the median of data[a],data[b],data[c], where x is a, b, or c.
func medianCmpFunc[E any](data []E, a, b, c int, swaps *int, cmp func(a, b E) int) int {
a, b = order2CmpFunc(data, a, b, swaps, cmp)
b, c = order2CmpFunc(data, b, c, swaps, cmp)
a, b = order2CmpFunc(data, a, b, swaps, cmp)
return b
}

// medianAdjacentCmpFunc finds the median of data[a - 1], data[a], data[a + 1] and stores the index into a.
func medianAdjacentCmpFunc[E any](data []E, a int, swaps *int, cmp func(a, b E) int) int {
return medianCmpFunc(data, a-1, a, a+1, swaps, cmp)
}

func reverseRangeCmpFunc[E any](data []E, a, b int, cmp func(a, b E) int) {
i := a
j := b - 1
for i < j {
data[i], data[j] = data[j], data[i]
i++
j--
}
}

func swapRangeCmpFunc[E any](data []E, a, b, n int, cmp func(a, b E) int) {
for i := 0; i < n; i++ {
data[a+i], data[b+i] = data[b+i], data[a+i]
}
}

func stableCmpFunc[E any](data []E, n int, cmp func(a, b E) int) {
blockSize := 20 // must be > 0
a, b := 0, blockSize
for b <= n {
insertionSortCmpFunc(data, a, b, cmp)
a = b
b += blockSize
}
insertionSortCmpFunc(data, a, n, cmp)

for blockSize < n {
a, b = 0, 2*blockSize
for b <= n {
symMergeCmpFunc(data, a, a+blockSize, b, cmp)
a = b
b += 2 * blockSize
}
if m := a + blockSize; m < n {
symMergeCmpFunc(data, a, m, n, cmp)
}
blockSize *= 2
}
}

// symMergeCmpFunc merges the two sorted subsequences data[a:m] and data[m:b] using
// the SymMerge algorithm from Pok-Son Kim and Arne Kutzner, "Stable Minimum
// Storage Merging by Symmetric Comparisons", in Susanne Albers and Tomasz
// Radzik, editors, Algorithms - ESA 2004, volume 3221 of Lecture Notes in
// Computer Science, pages 714-723. Springer, 2004.
//
// Let M = m-a and N = b-n. Wolog M < N.
// The recursion depth is bound by ceil(log(N+M)).
// The algorithm needs O(M*log(N/M + 1)) calls to data.Less.
// The algorithm needs O((M+N)*log(M)) calls to data.Swap.
//
// The paper gives O((M+N)*log(M)) as the number of assignments assuming a
// rotation algorithm which uses O(M+N+gcd(M+N)) assignments. The argumentation
// in the paper carries through for Swap operations, especially as the block
// swapping rotate uses only O(M+N) Swaps.
//
// symMerge assumes non-degenerate arguments: a < m && m < b.
// Having the caller check this condition eliminates many leaf recursion calls,
// which improves performance.
func symMergeCmpFunc[E any](data []E, a, m, b int, cmp func(a, b E) int) {
// Avoid unnecessary recursions of symMerge
// by direct insertion of data[a] into data[m:b]
// if data[a:m] only contains one element.
if m-a == 1 {
// Use binary search to find the lowest index i
// such that data[i] >= data[a] for m <= i < b.
// Exit the search loop with i == b in case no such index exists.
i := m
j := b
for i < j {
h := int(uint(i+j) >> 1)
if cmp(data[h], data[a]) < 0 {
i = h + 1
} else {
j = h
}
}
// Swap values until data[a] reaches the position before i.
for k := a; k < i-1; k++ {
data[k], data[k+1] = data[k+1], data[k]
}
return
}

// Avoid unnecessary recursions of symMerge
// by direct insertion of data[m] into data[a:m]
// if data[m:b] only contains one element.
if b-m == 1 {
// Use binary search to find the lowest index i
// such that data[i] > data[m] for a <= i < m.
// Exit the search loop with i == m in case no such index exists.
i := a
j := m
for i < j {
h := int(uint(i+j) >> 1)
if !(cmp(data[m], data[h]) < 0) {
i = h + 1
} else {
j = h
}
}
// Swap values until data[m] reaches the position i.
for k := m; k > i; k-- {
data[k], data[k-1] = data[k-1], data[k]
}
return
}

mid := int(uint(a+b) >> 1)
n := mid + m
var start, r int
if m > mid {
start = n - b
r = mid
} else {
start = a
r = m
}
p := n - 1

for start < r {
c := int(uint(start+r) >> 1)
if !(cmp(data[p-c], data[c]) < 0) {
start = c + 1
} else {
r = c
}
}

end := n - start
if start < m && m < end {
rotateCmpFunc(data, start, m, end, cmp)
}
if a < start && start < mid {
symMergeCmpFunc(data, a, start, mid, cmp)
}
if mid < end && end < b {
symMergeCmpFunc(data, mid, end, b, cmp)
}
}

// rotateCmpFunc rotates two consecutive blocks u = data[a:m] and v = data[m:b] in data:
// Data of the form 'x u v y' is changed to 'x v u y'.
// rotate performs at most b-a many calls to data.Swap,
// and it assumes non-degenerate arguments: a < m && m < b.
func rotateCmpFunc[E any](data []E, a, m, b int, cmp func(a, b E) int) {
i := m - a
j := b - m

for i != j {
if i > j {
swapRangeCmpFunc(data, m-i, m, j, cmp)
i -= j
} else {
swapRangeCmpFunc(data, m-i, m+j-i, i, cmp)
j -= i
}
}
// i == j
swapRangeCmpFunc(data, m-i, m, i, cmp)
}

+ 0
- 481
vendor/golang.org/x/exp/slices/zsortordered.go View File

@@ -1,481 +0,0 @@
// Code generated by gen_sort_variants.go; DO NOT EDIT.

// Copyright 2022 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.

package slices

import "golang.org/x/exp/constraints"

// insertionSortOrdered sorts data[a:b] using insertion sort.
func insertionSortOrdered[E constraints.Ordered](data []E, a, b int) {
for i := a + 1; i < b; i++ {
for j := i; j > a && cmpLess(data[j], data[j-1]); j-- {
data[j], data[j-1] = data[j-1], data[j]
}
}
}

// siftDownOrdered implements the heap property on data[lo:hi].
// first is an offset into the array where the root of the heap lies.
func siftDownOrdered[E constraints.Ordered](data []E, lo, hi, first int) {
root := lo
for {
child := 2*root + 1
if child >= hi {
break
}
if child+1 < hi && cmpLess(data[first+child], data[first+child+1]) {
child++
}
if !cmpLess(data[first+root], data[first+child]) {
return
}
data[first+root], data[first+child] = data[first+child], data[first+root]
root = child
}
}

func heapSortOrdered[E constraints.Ordered](data []E, a, b int) {
first := a
lo := 0
hi := b - a

// Build heap with greatest element at top.
for i := (hi - 1) / 2; i >= 0; i-- {
siftDownOrdered(data, i, hi, first)
}

// Pop elements, largest first, into end of data.
for i := hi - 1; i >= 0; i-- {
data[first], data[first+i] = data[first+i], data[first]
siftDownOrdered(data, lo, i, first)
}
}

// pdqsortOrdered sorts data[a:b].
// The algorithm based on pattern-defeating quicksort(pdqsort), but without the optimizations from BlockQuicksort.
// pdqsort paper: https://arxiv.org/pdf/2106.05123.pdf
// C++ implementation: https://github.com/orlp/pdqsort
// Rust implementation: https://docs.rs/pdqsort/latest/pdqsort/
// limit is the number of allowed bad (very unbalanced) pivots before falling back to heapsort.
func pdqsortOrdered[E constraints.Ordered](data []E, a, b, limit int) {
const maxInsertion = 12

var (
wasBalanced = true // whether the last partitioning was reasonably balanced
wasPartitioned = true // whether the slice was already partitioned
)

for {
length := b - a

if length <= maxInsertion {
insertionSortOrdered(data, a, b)
return
}

// Fall back to heapsort if too many bad choices were made.
if limit == 0 {
heapSortOrdered(data, a, b)
return
}

// If the last partitioning was imbalanced, we need to breaking patterns.
if !wasBalanced {
breakPatternsOrdered(data, a, b)
limit--
}

pivot, hint := choosePivotOrdered(data, a, b)
if hint == decreasingHint {
reverseRangeOrdered(data, a, b)
// The chosen pivot was pivot-a elements after the start of the array.
// After reversing it is pivot-a elements before the end of the array.
// The idea came from Rust's implementation.
pivot = (b - 1) - (pivot - a)
hint = increasingHint
}

// The slice is likely already sorted.
if wasBalanced && wasPartitioned && hint == increasingHint {
if partialInsertionSortOrdered(data, a, b) {
return
}
}

// Probably the slice contains many duplicate elements, partition the slice into
// elements equal to and elements greater than the pivot.
if a > 0 && !cmpLess(data[a-1], data[pivot]) {
mid := partitionEqualOrdered(data, a, b, pivot)
a = mid
continue
}

mid, alreadyPartitioned := partitionOrdered(data, a, b, pivot)
wasPartitioned = alreadyPartitioned

leftLen, rightLen := mid-a, b-mid
balanceThreshold := length / 8
if leftLen < rightLen {
wasBalanced = leftLen >= balanceThreshold
pdqsortOrdered(data, a, mid, limit)
a = mid + 1
} else {
wasBalanced = rightLen >= balanceThreshold
pdqsortOrdered(data, mid+1, b, limit)
b = mid
}
}
}

// partitionOrdered does one quicksort partition.
// Let p = data[pivot]
// Moves elements in data[a:b] around, so that data[i]<p and data[j]>=p for i<newpivot and j>newpivot.
// On return, data[newpivot] = p
func partitionOrdered[E constraints.Ordered](data []E, a, b, pivot int) (newpivot int, alreadyPartitioned bool) {
data[a], data[pivot] = data[pivot], data[a]
i, j := a+1, b-1 // i and j are inclusive of the elements remaining to be partitioned

for i <= j && cmpLess(data[i], data[a]) {
i++
}
for i <= j && !cmpLess(data[j], data[a]) {
j--
}
if i > j {
data[j], data[a] = data[a], data[j]
return j, true
}
data[i], data[j] = data[j], data[i]
i++
j--

for {
for i <= j && cmpLess(data[i], data[a]) {
i++
}
for i <= j && !cmpLess(data[j], data[a]) {
j--
}
if i > j {
break
}
data[i], data[j] = data[j], data[i]
i++
j--
}
data[j], data[a] = data[a], data[j]
return j, false
}

// partitionEqualOrdered partitions data[a:b] into elements equal to data[pivot] followed by elements greater than data[pivot].
// It assumed that data[a:b] does not contain elements smaller than the data[pivot].
func partitionEqualOrdered[E constraints.Ordered](data []E, a, b, pivot int) (newpivot int) {
data[a], data[pivot] = data[pivot], data[a]
i, j := a+1, b-1 // i and j are inclusive of the elements remaining to be partitioned

for {
for i <= j && !cmpLess(data[a], data[i]) {
i++
}
for i <= j && cmpLess(data[a], data[j]) {
j--
}
if i > j {
break
}
data[i], data[j] = data[j], data[i]
i++
j--
}
return i
}

// partialInsertionSortOrdered partially sorts a slice, returns true if the slice is sorted at the end.
func partialInsertionSortOrdered[E constraints.Ordered](data []E, a, b int) bool {
const (
maxSteps = 5 // maximum number of adjacent out-of-order pairs that will get shifted
shortestShifting = 50 // don't shift any elements on short arrays
)
i := a + 1
for j := 0; j < maxSteps; j++ {
for i < b && !cmpLess(data[i], data[i-1]) {
i++
}

if i == b {
return true
}

if b-a < shortestShifting {
return false
}

data[i], data[i-1] = data[i-1], data[i]

// Shift the smaller one to the left.
if i-a >= 2 {
for j := i - 1; j >= 1; j-- {
if !cmpLess(data[j], data[j-1]) {
break
}
data[j], data[j-1] = data[j-1], data[j]
}
}
// Shift the greater one to the right.
if b-i >= 2 {
for j := i + 1; j < b; j++ {
if !cmpLess(data[j], data[j-1]) {
break
}
data[j], data[j-1] = data[j-1], data[j]
}
}
}
return false
}

// breakPatternsOrdered scatters some elements around in an attempt to break some patterns
// that might cause imbalanced partitions in quicksort.
func breakPatternsOrdered[E constraints.Ordered](data []E, a, b int) {
length := b - a
if length >= 8 {
random := xorshift(length)
modulus := nextPowerOfTwo(length)

for idx := a + (length/4)*2 - 1; idx <= a+(length/4)*2+1; idx++ {
other := int(uint(random.Next()) & (modulus - 1))
if other >= length {
other -= length
}
data[idx], data[a+other] = data[a+other], data[idx]
}
}
}

// choosePivotOrdered chooses a pivot in data[a:b].
//
// [0,8): chooses a static pivot.
// [8,shortestNinther): uses the simple median-of-three method.
// [shortestNinther,∞): uses the Tukey ninther method.
func choosePivotOrdered[E constraints.Ordered](data []E, a, b int) (pivot int, hint sortedHint) {
const (
shortestNinther = 50
maxSwaps = 4 * 3
)

l := b - a

var (
swaps int
i = a + l/4*1
j = a + l/4*2
k = a + l/4*3
)

if l >= 8 {
if l >= shortestNinther {
// Tukey ninther method, the idea came from Rust's implementation.
i = medianAdjacentOrdered(data, i, &swaps)
j = medianAdjacentOrdered(data, j, &swaps)
k = medianAdjacentOrdered(data, k, &swaps)
}
// Find the median among i, j, k and stores it into j.
j = medianOrdered(data, i, j, k, &swaps)
}

switch swaps {
case 0:
return j, increasingHint
case maxSwaps:
return j, decreasingHint
default:
return j, unknownHint
}
}

// order2Ordered returns x,y where data[x] <= data[y], where x,y=a,b or x,y=b,a.
func order2Ordered[E constraints.Ordered](data []E, a, b int, swaps *int) (int, int) {
if cmpLess(data[b], data[a]) {
*swaps++
return b, a
}
return a, b
}

// medianOrdered returns x where data[x] is the median of data[a],data[b],data[c], where x is a, b, or c.
func medianOrdered[E constraints.Ordered](data []E, a, b, c int, swaps *int) int {
a, b = order2Ordered(data, a, b, swaps)
b, c = order2Ordered(data, b, c, swaps)
a, b = order2Ordered(data, a, b, swaps)
return b
}

// medianAdjacentOrdered finds the median of data[a - 1], data[a], data[a + 1] and stores the index into a.
func medianAdjacentOrdered[E constraints.Ordered](data []E, a int, swaps *int) int {
return medianOrdered(data, a-1, a, a+1, swaps)
}

func reverseRangeOrdered[E constraints.Ordered](data []E, a, b int) {
i := a
j := b - 1
for i < j {
data[i], data[j] = data[j], data[i]
i++
j--
}
}

func swapRangeOrdered[E constraints.Ordered](data []E, a, b, n int) {
for i := 0; i < n; i++ {
data[a+i], data[b+i] = data[b+i], data[a+i]
}
}

func stableOrdered[E constraints.Ordered](data []E, n int) {
blockSize := 20 // must be > 0
a, b := 0, blockSize
for b <= n {
insertionSortOrdered(data, a, b)
a = b
b += blockSize
}
insertionSortOrdered(data, a, n)

for blockSize < n {
a, b = 0, 2*blockSize
for b <= n {
symMergeOrdered(data, a, a+blockSize, b)
a = b
b += 2 * blockSize
}
if m := a + blockSize; m < n {
symMergeOrdered(data, a, m, n)
}
blockSize *= 2
}
}

// symMergeOrdered merges the two sorted subsequences data[a:m] and data[m:b] using
// the SymMerge algorithm from Pok-Son Kim and Arne Kutzner, "Stable Minimum
// Storage Merging by Symmetric Comparisons", in Susanne Albers and Tomasz
// Radzik, editors, Algorithms - ESA 2004, volume 3221 of Lecture Notes in
// Computer Science, pages 714-723. Springer, 2004.
//
// Let M = m-a and N = b-n. Wolog M < N.
// The recursion depth is bound by ceil(log(N+M)).
// The algorithm needs O(M*log(N/M + 1)) calls to data.Less.
// The algorithm needs O((M+N)*log(M)) calls to data.Swap.
//
// The paper gives O((M+N)*log(M)) as the number of assignments assuming a
// rotation algorithm which uses O(M+N+gcd(M+N)) assignments. The argumentation
// in the paper carries through for Swap operations, especially as the block
// swapping rotate uses only O(M+N) Swaps.
//
// symMerge assumes non-degenerate arguments: a < m && m < b.
// Having the caller check this condition eliminates many leaf recursion calls,
// which improves performance.
func symMergeOrdered[E constraints.Ordered](data []E, a, m, b int) {
// Avoid unnecessary recursions of symMerge
// by direct insertion of data[a] into data[m:b]
// if data[a:m] only contains one element.
if m-a == 1 {
// Use binary search to find the lowest index i
// such that data[i] >= data[a] for m <= i < b.
// Exit the search loop with i == b in case no such index exists.
i := m
j := b
for i < j {
h := int(uint(i+j) >> 1)
if cmpLess(data[h], data[a]) {
i = h + 1
} else {
j = h
}
}
// Swap values until data[a] reaches the position before i.
for k := a; k < i-1; k++ {
data[k], data[k+1] = data[k+1], data[k]
}
return
}

// Avoid unnecessary recursions of symMerge
// by direct insertion of data[m] into data[a:m]
// if data[m:b] only contains one element.
if b-m == 1 {
// Use binary search to find the lowest index i
// such that data[i] > data[m] for a <= i < m.
// Exit the search loop with i == m in case no such index exists.
i := a
j := m
for i < j {
h := int(uint(i+j) >> 1)
if !cmpLess(data[m], data[h]) {
i = h + 1
} else {
j = h
}
}
// Swap values until data[m] reaches the position i.
for k := m; k > i; k-- {
data[k], data[k-1] = data[k-1], data[k]
}
return
}

mid := int(uint(a+b) >> 1)
n := mid + m
var start, r int
if m > mid {
start = n - b
r = mid
} else {
start = a
r = m
}
p := n - 1

for start < r {
c := int(uint(start+r) >> 1)
if !cmpLess(data[p-c], data[c]) {
start = c + 1
} else {
r = c
}
}

end := n - start
if start < m && m < end {
rotateOrdered(data, start, m, end)
}
if a < start && start < mid {
symMergeOrdered(data, a, start, mid)
}
if mid < end && end < b {
symMergeOrdered(data, mid, end, b)
}
}

// rotateOrdered rotates two consecutive blocks u = data[a:m] and v = data[m:b] in data:
// Data of the form 'x u v y' is changed to 'x v u y'.
// rotate performs at most b-a many calls to data.Swap,
// and it assumes non-degenerate arguments: a < m && m < b.
func rotateOrdered[E constraints.Ordered](data []E, a, m, b int) {
i := m - a
j := b - m

for i != j {
if i > j {
swapRangeOrdered(data, m-i, m, j)
i -= j
} else {
swapRangeOrdered(data, m-i, m+j-i, i)
j -= i
}
}
// i == j
swapRangeOrdered(data, m-i, m, i)
}

+ 3
- 2
vendor/modules.txt View File

@@ -61,6 +61,9 @@ github.com/360EntSecGroup-Skylar/excelize/v2
# github.com/BurntSushi/toml v0.3.1
## explicit
github.com/BurntSushi/toml
# github.com/Jeffail/tunny v0.1.4
## explicit; go 1.13
github.com/Jeffail/tunny
# github.com/Microsoft/go-winio v0.5.2
## explicit; go 1.13
github.com/Microsoft/go-winio
@@ -1145,8 +1148,6 @@ golang.org/x/crypto/ssh/internal/bcrypt_pbkdf
golang.org/x/crypto/ssh/knownhosts
# golang.org/x/exp v0.0.0-20231127185646-65229373498e
## explicit; go 1.20
golang.org/x/exp/constraints
golang.org/x/exp/slices
# golang.org/x/image v0.0.0-20191009234506-e7c1f5e7dbb8
## explicit; go 1.12
golang.org/x/image/bmp


Loading…
Cancel
Save