mirror of
https://github.com/go-gitea/gitea.git
synced 2025-11-03 08:02:36 +09:00
Compare commits
110 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
62fa153f9f | ||
|
|
be46f240d9 | ||
|
|
ca55e49cc0 | ||
|
|
58615be523 | ||
|
|
6df82db0f7 | ||
|
|
d98694e6ca | ||
|
|
ac0f452b30 | ||
|
|
6e5fd5c584 | ||
|
|
d0b8e3c8e1 | ||
|
|
7ff8e863a5 | ||
|
|
c65e49d72f | ||
|
|
50084daa4c | ||
|
|
c7db7438b7 | ||
|
|
e11f042a95 | ||
|
|
87782636e6 | ||
|
|
b935472cdf | ||
|
|
8ac48584ec | ||
|
|
e898590c81 | ||
|
|
d407857d97 | ||
|
|
8cfd6695da | ||
|
|
f832e8eeea | ||
|
|
544ef7d394 | ||
|
|
5ff807acde | ||
|
|
849d316d8d | ||
|
|
946eb1321c | ||
|
|
bc82bb9cda | ||
|
|
f034804e5d | ||
|
|
c1887bfc9b | ||
|
|
41a4047e79 | ||
|
|
ac84bb7183 | ||
|
|
3be67e9a2b | ||
|
|
ce2ade05e6 | ||
|
|
1e76f7b5b7 | ||
|
|
2265058c31 | ||
|
|
ba74fdbda9 | ||
|
|
0600f7972a | ||
|
|
8007602b40 | ||
|
|
3a79f1190f | ||
|
|
d95489b7ed | ||
|
|
a9e1a37b71 | ||
|
|
5a589ef9ec | ||
|
|
159bc8842a | ||
|
|
4b771d393e | ||
|
|
0c2cbfcb3b | ||
|
|
8c4bf4c3b4 | ||
|
|
3bcf2e5c18 | ||
|
|
ad54f008ac | ||
|
|
c21167e3a2 | ||
|
|
aaa539dd2d | ||
|
|
e38134f707 | ||
|
|
fa96ddb327 | ||
|
|
a3e8450fd5 | ||
|
|
41422f0df0 | ||
|
|
f773733252 | ||
|
|
cbaf8e8785 | ||
|
|
1bf46836da | ||
|
|
387a1bc472 | ||
|
|
62daf84596 | ||
|
|
39d209dccc | ||
|
|
c88392e772 | ||
|
|
a83cde2f3f | ||
|
|
332eb2f6d2 | ||
|
|
3ae1d7a59f | ||
|
|
d054c4e7f3 | ||
|
|
5e562e9b30 | ||
|
|
c57e908f36 | ||
|
|
1112fef93d | ||
|
|
af11549fb2 | ||
|
|
76d6184cd0 | ||
|
|
d644709b22 | ||
|
|
30584a6df8 | ||
|
|
78710946f2 | ||
|
|
22d700edfd | ||
|
|
6782a64a4a | ||
|
|
1ec11ac87e | ||
|
|
2c2a30d6bb | ||
|
|
717b313c34 | ||
|
|
0a32861b28 | ||
|
|
52ca7b9b65 | ||
|
|
e078d08ecd | ||
|
|
a83fb3a83a | ||
|
|
f9b1fac4ea | ||
|
|
f1e8b8c0d7 | ||
|
|
dbbb75712d | ||
|
|
462c6fdee2 | ||
|
|
cead819cb5 | ||
|
|
4fa2804238 | ||
|
|
3ce46a7fbd | ||
|
|
15886ce048 | ||
|
|
a725d31496 | ||
|
|
8e27f6e814 | ||
|
|
54263ff123 | ||
|
|
3bde297121 | ||
|
|
0dfde367c1 | ||
|
|
875501584b | ||
|
|
4190c134e6 | ||
|
|
cae46216e4 | ||
|
|
761111f9ed | ||
|
|
57f1476093 | ||
|
|
bdba89452d | ||
|
|
6e2dacfef6 | ||
|
|
c0869c295a | ||
|
|
a719311f6d | ||
|
|
248b67af6f | ||
|
|
990c6089db | ||
|
|
5da024a019 | ||
|
|
eff2499be7 | ||
|
|
4a3c6384ac | ||
|
|
2b1989e59f | ||
|
|
340c4fc7c7 |
16
.drone.yml
16
.drone.yml
@@ -522,7 +522,7 @@ steps:
|
||||
image: plugins/s3:1
|
||||
settings:
|
||||
acl: public-read
|
||||
bucket: releases
|
||||
bucket: gitea-artifacts
|
||||
endpoint: https://storage.gitea.io
|
||||
path_style: true
|
||||
source: "dist/release/*"
|
||||
@@ -543,7 +543,7 @@ steps:
|
||||
image: plugins/s3:1
|
||||
settings:
|
||||
acl: public-read
|
||||
bucket: releases
|
||||
bucket: gitea-artifacts
|
||||
endpoint: https://storage.gitea.io
|
||||
path_style: true
|
||||
source: "dist/release/*"
|
||||
@@ -618,7 +618,7 @@ steps:
|
||||
image: plugins/s3:1
|
||||
settings:
|
||||
acl: public-read
|
||||
bucket: releases
|
||||
bucket: gitea-artifacts
|
||||
endpoint: https://storage.gitea.io
|
||||
path_style: true
|
||||
source: "dist/release/*"
|
||||
@@ -709,7 +709,7 @@ steps:
|
||||
|
||||
- name: publish
|
||||
pull: always
|
||||
image: plugins/docker:linux-amd64
|
||||
image: techknowlogick/drone-docker:latest
|
||||
settings:
|
||||
auto_tag: true
|
||||
auto_tag_suffix: linux-amd64
|
||||
@@ -726,7 +726,7 @@ steps:
|
||||
- pull_request
|
||||
|
||||
- name: publish-rootless
|
||||
image: plugins/docker:linux-amd64
|
||||
image: techknowlogick/drone-docker:latest
|
||||
settings:
|
||||
dockerfile: Dockerfile.rootless
|
||||
auto_tag: true
|
||||
@@ -764,7 +764,7 @@ trigger:
|
||||
steps:
|
||||
- name: dryrun
|
||||
pull: always
|
||||
image: plugins/docker:linux-arm64
|
||||
image: techknowlogick/drone-docker:latest
|
||||
settings:
|
||||
dry_run: true
|
||||
repo: gitea/gitea
|
||||
@@ -806,7 +806,7 @@ steps:
|
||||
|
||||
- name: publish
|
||||
pull: always
|
||||
image: plugins/docker:linux-arm64
|
||||
image: techknowlogick/drone-docker:latest
|
||||
settings:
|
||||
auto_tag: true
|
||||
auto_tag_suffix: linux-arm64
|
||||
@@ -826,7 +826,7 @@ steps:
|
||||
- pull_request
|
||||
|
||||
- name: publish-rootless
|
||||
image: plugins/docker:linux-arm64
|
||||
image: techknowlogick/drone-docker:latest
|
||||
settings:
|
||||
dockerfile: Dockerfile.rootless
|
||||
auto_tag: true
|
||||
|
||||
@@ -110,3 +110,7 @@ issues:
|
||||
- text: "exitAfterDefer:"
|
||||
linters:
|
||||
- gocritic
|
||||
- path: modules/graceful/manager_windows.go
|
||||
linters:
|
||||
- staticcheck
|
||||
text: "svc.IsAnInteractiveSession is deprecated: Use IsWindowsService instead."
|
||||
|
||||
125
CHANGELOG.md
125
CHANGELOG.md
@@ -4,6 +4,131 @@ This changelog goes through all the changes that have been made in each release
|
||||
without substantial changes to our git log; to see the highlights of what has
|
||||
been added to each release, please refer to the [blog](https://blog.gitea.io).
|
||||
|
||||
## [1.14.5](https://github.com/go-gitea/gitea/releases/tag/v1.14.5) - 2021-07-16
|
||||
|
||||
* SECURITY
|
||||
* Hide mirror passwords on repo settings page (#16022) (#16355)
|
||||
* Update bluemonday to v1.0.15 (#16379) (#16380)
|
||||
* BUGFIXES
|
||||
* Retry rename on lock induced failures (#16435) (#16439)
|
||||
* Validate issue index before querying DB (#16406) (#16410)
|
||||
* Fix crash following ldap authentication update (#16447) (#16449)
|
||||
* ENHANCEMENTS
|
||||
* Redirect on bad CSRF instead of presenting bad page (#14937) (#16378)
|
||||
|
||||
## [1.14.4](https://github.com/go-gitea/gitea/releases/tag/v1.14.4) - 2021-07-06
|
||||
|
||||
* BUGFIXES
|
||||
* Fix relative links in postprocessed images (#16334) (#16340)
|
||||
* Fix list_options GetStartEnd (#16303) (#16305)
|
||||
* Fix API to use author for commits instead of committer (#16276) (#16277)
|
||||
* Handle misencoding of login_source cfg in mssql (#16268) (#16275)
|
||||
* Fixed issues not updated by commits (#16254) (#16261)
|
||||
* Improve efficiency in FindRenderizableReferenceNumeric and getReference (#16251) (#16255)
|
||||
* Use html.Parse rather than html.ParseFragment (#16223) (#16225)
|
||||
* Fix milestone counters on new issue (#16183) (#16224)
|
||||
* reqOrgMembership calls need to be preceded by reqToken (#16198) (#16219)
|
||||
|
||||
## [1.14.3](https://github.com/go-gitea/gitea/releases/tag/v1.14.3) - 2021-06-10
|
||||
|
||||
* SECURITY
|
||||
* Encrypt migration credentials at rest (#15895) (#16187)
|
||||
* Only check access tokens if they are likely to be tokens (#16164) (#16171)
|
||||
* Add missing SameSite settings for the i_like_gitea cookie (#16037) (#16039)
|
||||
* Fix setting of SameSite on cookies (#15989) (#15991)
|
||||
* API
|
||||
* Repository object only count releases as releases (#16184) (#16190)
|
||||
* EditOrg respect RepoAdminChangeTeamAccess option (#16184) (#16190)
|
||||
* Fix overly strict edit pr permissions (#15900) (#16081)
|
||||
* BUGFIXES
|
||||
* Run processors on whole of text (#16155) (#16185)
|
||||
* Class `issue-keyword` is being incorrectly stripped off spans (#16163) (#16172)
|
||||
* Fix language switch for install page (#16043) (#16128)
|
||||
* Fix bug on getIssueIDsByRepoID (#16119) (#16124)
|
||||
* Set self-adjusting deadline for connection writing (#16068) (#16123)
|
||||
* Fix http path bug (#16117) (#16120)
|
||||
* Fix data URI scramble (#16098) (#16118)
|
||||
* Merge all deleteBranch as one function and also fix bug when delete branch don't close related PRs (#16067) (#16097)
|
||||
* git migration: don't prompt interactively for clone credentials (#15902) (#16082)
|
||||
* Fix case change in ownernames (#16045) (#16050)
|
||||
* Don't manipulate input params in email notification (#16011) (#16033)
|
||||
* Remove branch URL before IssueRefURL (#15968) (#15970)
|
||||
* Fix layout of milestone view (#15927) (#15940)
|
||||
* GitHub Migration, migrate draft releases too (#15884) (#15888)
|
||||
* Close the gitrepo when deleting the repository (#15876) (#15887)
|
||||
* Upgrade xorm to v1.1.0 (#15869) (#15885)
|
||||
* Fix blame row height alignment (#15863) (#15883)
|
||||
* Fix error message when saving generated LOCAL_ROOT_URL config (#15880) (#15882)
|
||||
* Backport Fix LFS commit finder not working (#15856) (#15874)
|
||||
* Stop calling WriteHeader in Write (#15862) (#15873)
|
||||
* Add timeout to writing to responses (#15831) (#15872)
|
||||
* Return go-get info on subdirs (#15642) (#15871)
|
||||
* Restore PAM user autocreation functionality (#15825) (#15867)
|
||||
* Fix truncate utf8 string (#15828) (#15854)
|
||||
* Fix bound address/port for caddy's certmagic library (#15758) (#15848)
|
||||
* Upgrade unrolled/render to v1.1.1 (#15845) (#15846)
|
||||
* Queue manager FlushAll can loop rapidly - add delay (#15733) (#15840)
|
||||
* Tagger can be empty, as can Commit and Author - tolerate this (#15835) (#15839)
|
||||
* Set autocomplete off on branches selector (#15809) (#15833)
|
||||
* Add missing error to Doctor log (#15813) (#15824)
|
||||
* Move restore repo to internal router and invoke from command to avoid open the same db file or queues files (#15790) (#15816)
|
||||
* ENHANCEMENTS
|
||||
* Removable media support to snap package (#16136) (#16138)
|
||||
* Move sans-serif fallback font higher than emoji fonts (#15855) (#15892)
|
||||
* DOCKER
|
||||
* Only write config in environment-to-ini if there are changes (#15861) (#15868)
|
||||
* Only offer hostcertificates if they exist (#15849) (#15853)
|
||||
|
||||
## [1.14.2](https://github.com/go-gitea/gitea/releases/tag/v1.14.2) - 2021-05-08
|
||||
|
||||
* API
|
||||
* Make change repo settings work on empty repos (#15778) (#15789)
|
||||
* Add pull "merged" notification subject status to API (#15344) (#15654)
|
||||
* BUGFIXES
|
||||
* Ensure that ctx.Written is checked after issues(...) calls (#15797) (#15798)
|
||||
* Use pulls in commit graph unless pulls are disabled (#15734 & #15740 & #15774) (#15775)
|
||||
* Set GIT_DIR correctly if it is not set (#15751) (#15769)
|
||||
* Fix bug where repositories appear unadopted (#15757) (#15767)
|
||||
* Not show `ref-in-new-issue` pop when issue was disabled (#15761) (#15765)
|
||||
* Drop back to use IsAnInteractiveSession for SVC (#15749) (#15762)
|
||||
* Fix setting version table in dump (#15753) (#15759)
|
||||
* Fix close button change on delete in simplemde area (#15737) (#15747)
|
||||
* Defer closing the gitrepo until the end of the wrapped context functions (#15653) (#15746)
|
||||
* Fix some ui bug about draft release (#15137) (#15745)
|
||||
* Only log Error on getLastCommitStatus error to let pull list still be visible (#15716) (#15715)
|
||||
* Move tooltip down to allow selection of Remove File on error (#15672) (#15714)
|
||||
* Fix setting redis db path (#15698) (#15708)
|
||||
* Fix DB session cleanup (#15697) (#15700)
|
||||
* Fixed several activation bugs (#15473) (#15685)
|
||||
* Delete references if repository gets deleted (#15681) (#15684)
|
||||
* Fix orphaned objects deletion bug (#15657) (#15683)
|
||||
* Delete protected branch if repository gets removed (#15658) (#15676)
|
||||
* Remove spurious set name from eventsource.sharedworker.js (#15643) (#15652)
|
||||
* Not update updated uinx for `git gc` (#15637) (#15641)
|
||||
* Fix commit graph author link (#15627) (#15630)
|
||||
* Fix webhook timeout bug (#15613) (#15621)
|
||||
* Resolve panic on failed interface conversion in migration v156 (#15604) (#15610)
|
||||
* Fix missing storage init (#15589) (#15598)
|
||||
* If the default branch is not present do not report error on stats indexing (#15546 & #15583) (#15594)
|
||||
* Fix lfs management find (#15537) (#15578)
|
||||
* Fix NPE on view commit with notes (#15561) (#15573)
|
||||
* Fix bug on commit graph (#15517) (#15530)
|
||||
* Send size to /avatars if requested (#15459) (#15528)
|
||||
* Prevent migration 156 failure if tag commit missing (#15519) (#15527)
|
||||
* ENHANCEMENTS
|
||||
* Display conflict-free merge messages for pull requests (#15773) (#15796)
|
||||
* Exponential Backoff for ByteFIFO (#15724) (#15793)
|
||||
* Issue list alignment tweaks (#15483) (#15766)
|
||||
* Implement delete release attachments and update release attachments' name (#14130) (#15666)
|
||||
* Add placeholder text to deploy key textarea (#15575) (#15576)
|
||||
* Project board improvements (#15429) (#15560)
|
||||
* Repo branch page: label size, PR ref, new PR button alignment (#15363) (#15365)
|
||||
* MISC
|
||||
* Fix webkit calendar icon color on arc-green (#15713) (#15728)
|
||||
* Performance improvement for last commit cache and show-ref (#15455) (#15701)
|
||||
* Bump unrolled/render to v1.1.0 (#15581) (#15608)
|
||||
* Add ETag header (#15370) (#15552)
|
||||
|
||||
## [1.14.1](https://github.com/go-gitea/gitea/releases/tag/v1.14.1) - 2021-04-15
|
||||
|
||||
* BUGFIXES
|
||||
|
||||
@@ -21,6 +21,7 @@ import (
|
||||
pwd "code.gitea.io/gitea/modules/password"
|
||||
repo_module "code.gitea.io/gitea/modules/repository"
|
||||
"code.gitea.io/gitea/modules/setting"
|
||||
"code.gitea.io/gitea/modules/storage"
|
||||
|
||||
"github.com/urfave/cli"
|
||||
)
|
||||
@@ -489,6 +490,10 @@ func runDeleteUser(c *cli.Context) error {
|
||||
return err
|
||||
}
|
||||
|
||||
if err := storage.Init(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
var err error
|
||||
var user *models.User
|
||||
if c.IsSet("email") {
|
||||
|
||||
@@ -19,6 +19,7 @@ import (
|
||||
"code.gitea.io/gitea/modules/public"
|
||||
"code.gitea.io/gitea/modules/setting"
|
||||
"code.gitea.io/gitea/modules/templates"
|
||||
"code.gitea.io/gitea/modules/util"
|
||||
|
||||
"github.com/gobwas/glob"
|
||||
"github.com/urfave/cli"
|
||||
@@ -271,7 +272,7 @@ func extractAsset(d string, a asset, overwrite, rename bool) error {
|
||||
} else if !fi.Mode().IsRegular() {
|
||||
return fmt.Errorf("%s already exists, but it's not a regular file", dest)
|
||||
} else if rename {
|
||||
if err := os.Rename(dest, dest+".bak"); err != nil {
|
||||
if err := util.Rename(dest, dest+".bak"); err != nil {
|
||||
return fmt.Errorf("Error creating backup for %s: %v", dest, err)
|
||||
}
|
||||
// Attempt to respect file permissions mask (even if user:group will be set anew)
|
||||
|
||||
@@ -5,15 +5,12 @@
|
||||
package cmd
|
||||
|
||||
import (
|
||||
"context"
|
||||
"strings"
|
||||
"errors"
|
||||
"net/http"
|
||||
|
||||
"code.gitea.io/gitea/modules/log"
|
||||
"code.gitea.io/gitea/modules/migrations"
|
||||
"code.gitea.io/gitea/modules/migrations/base"
|
||||
"code.gitea.io/gitea/modules/private"
|
||||
"code.gitea.io/gitea/modules/setting"
|
||||
"code.gitea.io/gitea/modules/storage"
|
||||
pull_service "code.gitea.io/gitea/services/pull"
|
||||
|
||||
"github.com/urfave/cli"
|
||||
)
|
||||
@@ -50,70 +47,18 @@ wiki, issues, labels, releases, release_assets, milestones, pull_requests, comme
|
||||
}
|
||||
|
||||
func runRestoreRepository(ctx *cli.Context) error {
|
||||
if err := initDB(); err != nil {
|
||||
return err
|
||||
}
|
||||
setting.NewContext()
|
||||
|
||||
log.Trace("AppPath: %s", setting.AppPath)
|
||||
log.Trace("AppWorkPath: %s", setting.AppWorkPath)
|
||||
log.Trace("Custom path: %s", setting.CustomPath)
|
||||
log.Trace("Log path: %s", setting.LogRootPath)
|
||||
setting.InitDBConfig()
|
||||
|
||||
if err := storage.Init(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if err := pull_service.Init(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
var opts = base.MigrateOptions{
|
||||
RepoName: ctx.String("repo_name"),
|
||||
}
|
||||
|
||||
if len(ctx.String("units")) == 0 {
|
||||
opts.Wiki = true
|
||||
opts.Issues = true
|
||||
opts.Milestones = true
|
||||
opts.Labels = true
|
||||
opts.Releases = true
|
||||
opts.Comments = true
|
||||
opts.PullRequests = true
|
||||
opts.ReleaseAssets = true
|
||||
} else {
|
||||
units := strings.Split(ctx.String("units"), ",")
|
||||
for _, unit := range units {
|
||||
switch strings.ToLower(unit) {
|
||||
case "wiki":
|
||||
opts.Wiki = true
|
||||
case "issues":
|
||||
opts.Issues = true
|
||||
case "milestones":
|
||||
opts.Milestones = true
|
||||
case "labels":
|
||||
opts.Labels = true
|
||||
case "releases":
|
||||
opts.Releases = true
|
||||
case "release_assets":
|
||||
opts.ReleaseAssets = true
|
||||
case "comments":
|
||||
opts.Comments = true
|
||||
case "pull_requests":
|
||||
opts.PullRequests = true
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if err := migrations.RestoreRepository(
|
||||
context.Background(),
|
||||
statusCode, errStr := private.RestoreRepo(
|
||||
ctx.String("repo_dir"),
|
||||
ctx.String("owner_name"),
|
||||
ctx.String("repo_name"),
|
||||
); err != nil {
|
||||
log.Fatal("Failed to restore repository: %v", err)
|
||||
return err
|
||||
}
|
||||
|
||||
ctx.StringSlice("units"),
|
||||
)
|
||||
if statusCode == http.StatusOK {
|
||||
return nil
|
||||
}
|
||||
|
||||
log.Fatal("Failed to restore repository: %v", errStr)
|
||||
return errors.New(errStr)
|
||||
}
|
||||
|
||||
@@ -175,7 +175,7 @@ func setPort(port string) error {
|
||||
|
||||
cfg.Section("server").Key("LOCAL_ROOT_URL").SetValue(defaultLocalURL)
|
||||
if err := cfg.SaveTo(setting.CustomConf); err != nil {
|
||||
return fmt.Errorf("Error saving generated JWT Secret to custom config: %v", err)
|
||||
return fmt.Errorf("Error saving generated LOCAL_ROOT_URL to custom config: %v", err)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
|
||||
@@ -6,6 +6,7 @@ package cmd
|
||||
|
||||
import (
|
||||
"net/http"
|
||||
"strconv"
|
||||
"strings"
|
||||
|
||||
"code.gitea.io/gitea/modules/log"
|
||||
@@ -22,6 +23,15 @@ func runLetsEncrypt(listenAddr, domain, directory, email string, m http.Handler)
|
||||
// TODO: these are placeholders until we add options for each in settings with appropriate warning
|
||||
enableHTTPChallenge := true
|
||||
enableTLSALPNChallenge := true
|
||||
altHTTPPort := 0
|
||||
altTLSALPNPort := 0
|
||||
|
||||
if p, err := strconv.Atoi(setting.PortToRedirect); err == nil {
|
||||
altHTTPPort = p
|
||||
}
|
||||
if p, err := strconv.Atoi(setting.HTTPPort); err == nil {
|
||||
altTLSALPNPort = p
|
||||
}
|
||||
|
||||
magic := certmagic.NewDefault()
|
||||
magic.Storage = &certmagic.FileStorage{Path: directory}
|
||||
@@ -30,6 +40,9 @@ func runLetsEncrypt(listenAddr, domain, directory, email string, m http.Handler)
|
||||
Agreed: setting.LetsEncryptTOS,
|
||||
DisableHTTPChallenge: !enableHTTPChallenge,
|
||||
DisableTLSALPNChallenge: !enableTLSALPNChallenge,
|
||||
ListenHost: setting.HTTPAddr,
|
||||
AltTLSALPNPort: altTLSALPNPort,
|
||||
AltHTTPPort: altHTTPPort,
|
||||
})
|
||||
|
||||
magic.Issuer = myACME
|
||||
|
||||
@@ -110,6 +110,8 @@ func runEnvironmentToIni(c *cli.Context) error {
|
||||
}
|
||||
cfg.NameMapper = ini.SnackCase
|
||||
|
||||
changed := false
|
||||
|
||||
prefix := c.String("prefix") + "__"
|
||||
|
||||
for _, kv := range os.Environ() {
|
||||
@@ -143,16 +145,22 @@ func runEnvironmentToIni(c *cli.Context) error {
|
||||
continue
|
||||
}
|
||||
}
|
||||
oldValue := key.Value()
|
||||
if !changed && oldValue != value {
|
||||
changed = true
|
||||
}
|
||||
key.SetValue(value)
|
||||
}
|
||||
destination := c.String("out")
|
||||
if len(destination) == 0 {
|
||||
destination = setting.CustomConf
|
||||
}
|
||||
if destination != setting.CustomConf || changed {
|
||||
err = cfg.SaveTo(destination)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
if c.Bool("clear") {
|
||||
for _, kv := range os.Environ() {
|
||||
idx := strings.IndexByte(kv, '=')
|
||||
|
||||
@@ -281,6 +281,10 @@ HTTP_PORT = 3000
|
||||
; PORT_TO_REDIRECT.
|
||||
REDIRECT_OTHER_PORT = false
|
||||
PORT_TO_REDIRECT = 80
|
||||
; Timeout for any write to the connection. (Set to 0 to disable all timeouts.)
|
||||
PER_WRITE_TIMEOUT = 30s
|
||||
; Timeout per Kb written to connections.
|
||||
PER_WRITE_PER_KB_TIMEOUT = 30s
|
||||
; Permission for unix socket
|
||||
UNIX_SOCKET_PERMISSION = 666
|
||||
; Local (DMZ) URL for Gitea workers (such as SSH update) accessing web service.
|
||||
|
||||
@@ -24,9 +24,29 @@ if [ ! -f /data/ssh/ssh_host_ecdsa_key ]; then
|
||||
ssh-keygen -t ecdsa -b 256 -f /data/ssh/ssh_host_ecdsa_key -N "" > /dev/null
|
||||
fi
|
||||
|
||||
if [ -e /data/ssh/ssh_host_ed25519_cert ]; then
|
||||
SSH_ED25519_CERT=${SSH_ED25519_CERT:-"/data/ssh/ssh_host_ed25519_cert"}
|
||||
fi
|
||||
|
||||
if [ -e /data/ssh/ssh_host_rsa_cert ]; then
|
||||
SSH_RSA_CERT=${SSH_RSA_CERT:-"/data/ssh/ssh_host_rsa_cert"}
|
||||
fi
|
||||
|
||||
if [ -e /data/ssh/ssh_host_ecdsa_cert ]; then
|
||||
SSH_ECDSA_CERT=${SSH_ECDSA_CERT:-"/data/ssh/ssh_host_ecdsa_cert"}
|
||||
fi
|
||||
|
||||
if [ -e /data/ssh/ssh_host_dsa_cert ]; then
|
||||
SSH_DSA_CERT=${SSH_DSA_CERT:-"/data/ssh/ssh_host_dsa_cert"}
|
||||
fi
|
||||
|
||||
if [ -d /etc/ssh ]; then
|
||||
SSH_PORT=${SSH_PORT:-"22"} \
|
||||
SSH_LISTEN_PORT=${SSH_LISTEN_PORT:-"${SSH_PORT}"} \
|
||||
SSH_ED25519_CERT="${SSH_ED25519_CERT:+"HostCertificate "}${SSH_ED25519_CERT}" \
|
||||
SSH_RSA_CERT="${SSH_RSA_CERT:+"HostCertificate "}${SSH_RSA_CERT}" \
|
||||
SSH_ECDSA_CERT="${SSH_ECDSA_CERT:+"HostCertificate "}${SSH_ECDSA_CERT}" \
|
||||
SSH_DSA_CERT="${SSH_DSA_CERT:+"HostCertificate "}${SSH_DSA_CERT}" \
|
||||
envsubst < /etc/templates/sshd_config > /etc/ssh/sshd_config
|
||||
|
||||
chmod 0644 /etc/ssh/sshd_config
|
||||
|
||||
@@ -8,13 +8,13 @@ ListenAddress ::
|
||||
LogLevel INFO
|
||||
|
||||
HostKey /data/ssh/ssh_host_ed25519_key
|
||||
HostCertificate /data/ssh/ssh_host_ed25519_cert
|
||||
${SSH_ED25519_CERT}
|
||||
HostKey /data/ssh/ssh_host_rsa_key
|
||||
HostCertificate /data/ssh/ssh_host_rsa_cert
|
||||
${SSH_RSA_CERT}
|
||||
HostKey /data/ssh/ssh_host_ecdsa_key
|
||||
HostCertificate /data/ssh/ssh_host_ecdsa_cert
|
||||
${SSH_ECDSA_CERT}
|
||||
HostKey /data/ssh/ssh_host_dsa_key
|
||||
HostCertificate /data/ssh/ssh_host_dsa_cert
|
||||
${SSH_DSA_CERT}
|
||||
|
||||
AuthorizedKeysFile .ssh/authorized_keys
|
||||
AuthorizedPrincipalsFile .ssh/authorized_principals
|
||||
|
||||
@@ -31,4 +31,4 @@ update: $(THEME)
|
||||
$(THEME): $(THEME)/theme.toml
|
||||
$(THEME)/theme.toml:
|
||||
mkdir -p $$(dirname $@)
|
||||
curl -s $(ARCHIVE) | tar xz -C $$(dirname $@)
|
||||
curl -L -s $(ARCHIVE) | tar xz -C $$(dirname $@)
|
||||
|
||||
@@ -237,6 +237,9 @@ Values containing `#` or `;` must be quoted using `` ` `` or `"""`.
|
||||
most cases you do not need to change the default value. Alter it only if
|
||||
your SSH server node is not the same as HTTP node. Do not set this variable
|
||||
if `PROTOCOL` is set to `unix`.
|
||||
- `PER_WRITE_TIMEOUT`: **30s**: Timeout for any write to the connection. (Set to 0 to
|
||||
disable all timeouts.)
|
||||
- `PER_WRITE_PER_KB_TIMEOUT`: **10s**: Timeout per Kb written to connections.
|
||||
|
||||
- `DISABLE_SSH`: **false**: Disable SSH feature when it's not available.
|
||||
- `START_SSH_SERVER`: **false**: When enabled, use the built-in SSH server.
|
||||
@@ -260,6 +263,9 @@ Values containing `#` or `;` must be quoted using `` ` `` or `"""`.
|
||||
- `SSH_KEY_TEST_PATH`: **/tmp**: Directory to create temporary files in when testing public keys using ssh-keygen, default is the system temporary directory.
|
||||
- `SSH_KEYGEN_PATH`: **ssh-keygen**: Path to ssh-keygen, default is 'ssh-keygen' which means the shell is responsible for finding out which one to call.
|
||||
- `SSH_EXPOSE_ANONYMOUS`: **false**: Enable exposure of SSH clone URL to anonymous visitors, default is false.
|
||||
- `SSH_PER_WRITE_TIMEOUT`: **30s**: Timeout for any write to the SSH connections. (Set to
|
||||
0 to disable all timeouts.)
|
||||
- `SSH_PER_WRITE_PER_KB_TIMEOUT`: **10s**: Timeout per Kb written to SSH connections.
|
||||
- `MINIMUM_KEY_SIZE_CHECK`: **true**: Indicate whether to check minimum key size with corresponding type.
|
||||
|
||||
- `OFFLINE_MODE`: **false**: Disables use of CDN for static files and Gravatar for profile pictures.
|
||||
|
||||
12
go.mod
12
go.mod
@@ -86,7 +86,7 @@ require (
|
||||
github.com/mgechev/revive v1.0.3
|
||||
github.com/mholt/acmez v0.1.3 // indirect
|
||||
github.com/mholt/archiver/v3 v3.5.0
|
||||
github.com/microcosm-cc/bluemonday v1.0.7
|
||||
github.com/microcosm-cc/bluemonday v1.0.15
|
||||
github.com/miekg/dns v1.1.40 // indirect
|
||||
github.com/minio/md5-simd v1.1.2 // indirect
|
||||
github.com/minio/minio-go/v7 v7.0.10
|
||||
@@ -122,7 +122,7 @@ require (
|
||||
github.com/unknwon/com v1.0.1
|
||||
github.com/unknwon/i18n v0.0.0-20200823051745-09abd91c7f2c
|
||||
github.com/unknwon/paginater v0.0.0-20200328080006-042474bd0eae
|
||||
github.com/unrolled/render v1.0.3
|
||||
github.com/unrolled/render v1.1.1
|
||||
github.com/urfave/cli v1.22.5
|
||||
github.com/willf/bitset v1.1.11 // indirect
|
||||
github.com/xanzy/go-gitlab v0.44.0
|
||||
@@ -136,10 +136,10 @@ require (
|
||||
go.uber.org/multierr v1.6.0 // indirect
|
||||
go.uber.org/zap v1.16.0 // indirect
|
||||
golang.org/x/crypto v0.0.0-20210220033148-5ea612d1eb83
|
||||
golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4
|
||||
golang.org/x/net v0.0.0-20210614182718-04defd469f4e
|
||||
golang.org/x/oauth2 v0.0.0-20210220000619-9bb904979d93
|
||||
golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44
|
||||
golang.org/x/text v0.3.5
|
||||
golang.org/x/sys v0.0.0-20210423082822-04245dca01da
|
||||
golang.org/x/text v0.3.6
|
||||
golang.org/x/time v0.0.0-20210220033141-f8bda1e9f3ba // indirect
|
||||
golang.org/x/tools v0.1.0
|
||||
gopkg.in/alexcesaro/quotedprintable.v3 v3.0.0-20150716171945-2caba252f4dc // indirect
|
||||
@@ -149,7 +149,7 @@ require (
|
||||
mvdan.cc/xurls/v2 v2.2.0
|
||||
strk.kbt.io/projects/go/libravatar v0.0.0-20191008002943-06d1c002b251
|
||||
xorm.io/builder v0.3.9
|
||||
xorm.io/xorm v1.0.7
|
||||
xorm.io/xorm v1.1.0
|
||||
)
|
||||
|
||||
replace github.com/hashicorp/go-version => github.com/6543/go-version v1.2.4
|
||||
|
||||
58
go.sum
58
go.sum
@@ -127,8 +127,9 @@ github.com/asaskevich/govalidator v0.0.0-20180720115003-f9ffefc3facf/go.mod h1:l
|
||||
github.com/asaskevich/govalidator v0.0.0-20190424111038-f61b66f89f4a/go.mod h1:lB+ZfQJz7igIIfQNfa7Ml4HSf2uFQQRzpGGRXenZAgY=
|
||||
github.com/asaskevich/govalidator v0.0.0-20200108200545-475eaeb16496/go.mod h1:oGkLhpf+kjZl6xBf758TQhh5XrAeiJv/7FRz/2spLIg=
|
||||
github.com/asaskevich/govalidator v0.0.0-20200428143746-21a406dcc535/go.mod h1:oGkLhpf+kjZl6xBf758TQhh5XrAeiJv/7FRz/2spLIg=
|
||||
github.com/asaskevich/govalidator v0.0.0-20200907205600-7a23bdc65eef h1:46PFijGLmAjMPwCCCo7Jf0W6f9slllCkkv7vyc1yOSg=
|
||||
github.com/asaskevich/govalidator v0.0.0-20200907205600-7a23bdc65eef/go.mod h1:WaHUgvxTVq04UNunO+XhnAqY/wQc+bxr74GqbsZ/Jqw=
|
||||
github.com/asaskevich/govalidator v0.0.0-20210307081110-f21760c49a8d h1:Byv0BzEl3/e6D5CLfI0j/7hiIEtvGVFPCZ7Ei2oq8iQ=
|
||||
github.com/asaskevich/govalidator v0.0.0-20210307081110-f21760c49a8d/go.mod h1:WaHUgvxTVq04UNunO+XhnAqY/wQc+bxr74GqbsZ/Jqw=
|
||||
github.com/aws/aws-lambda-go v1.13.3/go.mod h1:4UKl9IzQMoD+QF79YdCuzCwp8VbmG4VAQwij/eHl5CU=
|
||||
github.com/aws/aws-sdk-go v1.27.0/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo=
|
||||
github.com/aws/aws-sdk-go v1.34.28/go.mod h1:H7NKnBqNVzoTJpGfLrQkkD+ytBA93eiDYi/+8rV9s48=
|
||||
@@ -830,8 +831,8 @@ github.com/mholt/acmez v0.1.3 h1:J7MmNIk4Qf9b8mAGqAh4XkNeowv3f1zW816yf4zt7Qk=
|
||||
github.com/mholt/acmez v0.1.3/go.mod h1:8qnn8QA/Ewx8E3ZSsmscqsIjhhpxuy9vqdgbX2ceceM=
|
||||
github.com/mholt/archiver/v3 v3.5.0 h1:nE8gZIrw66cu4osS/U7UW7YDuGMHssxKutU8IfWxwWE=
|
||||
github.com/mholt/archiver/v3 v3.5.0/go.mod h1:qqTTPUK/HZPFgFQ/TJ3BzvTpF/dPtFVJXdQbCmeMxwc=
|
||||
github.com/microcosm-cc/bluemonday v1.0.7 h1:6yAQfk4XT+PI/dk1ZeBp1gr3Q2Hd1DR0O3aEyPUJVTE=
|
||||
github.com/microcosm-cc/bluemonday v1.0.7/go.mod h1:HOT/6NaBlR0f9XlxD3zolN6Z3N8Lp4pvhp+jLS5ihnI=
|
||||
github.com/microcosm-cc/bluemonday v1.0.15 h1:J4uN+qPng9rvkBZBoBb8YGR+ijuklIMpSOZZLjYpbeY=
|
||||
github.com/microcosm-cc/bluemonday v1.0.15/go.mod h1:ZLvAzeakRwrGnzQEvstVzVt3ZpqOF2+sdFr0Om+ce30=
|
||||
github.com/miekg/dns v1.0.14/go.mod h1:W1PPwlIAgtquWBMBEV9nkV9Cazfe8ScdGz/Lj7v3Nrg=
|
||||
github.com/miekg/dns v1.1.30/go.mod h1:KNUDUusw/aVsxyTYZM1oqvCicbwhgbNgztCETuNZ7xM=
|
||||
github.com/miekg/dns v1.1.40 h1:pyyPFfGMnciYUk/mXpKkVmeMQjfXqt3FAJ2hy7tPiLA=
|
||||
@@ -996,6 +997,8 @@ github.com/quasoft/websspi v1.0.0 h1:5nDgdM5xSur9s+B5w2xQ5kxf5nUGqgFgU4W0aDLZ8Mw
|
||||
github.com/quasoft/websspi v1.0.0/go.mod h1:HmVdl939dQ0WIXZhyik+ARdI03M6bQzaSEKcgpFmewk=
|
||||
github.com/rcrowley/go-metrics v0.0.0-20181016184325-3113b8401b8a/go.mod h1:bCqnVzQkZxMG4s8nGwiZ5l3QUCyqpo9Y+/ZMZ9VjZe4=
|
||||
github.com/rcrowley/go-metrics v0.0.0-20190826022208-cac0b30c2563/go.mod h1:bCqnVzQkZxMG4s8nGwiZ5l3QUCyqpo9Y+/ZMZ9VjZe4=
|
||||
github.com/remyoudompheng/bigfft v0.0.0-20200410134404-eec4a21b6bb0 h1:OdAsTTz6OkFY5QxjkYwrChwuRruF69c169dPK26NUlk=
|
||||
github.com/remyoudompheng/bigfft v0.0.0-20200410134404-eec4a21b6bb0/go.mod h1:qqbHyh8v60DhA7CoWK5oRCqLrMHRGoxYCSS9EjAz6Eo=
|
||||
github.com/rivo/uniseg v0.1.0/go.mod h1:J6wj4VEh+S6ZtnVlnTBMWIodfgj8LQOQFoIToxlJtxc=
|
||||
github.com/rivo/uniseg v0.2.0 h1:S1pD9weZBuJdFmowNwbpi7BJ8TNftyUImj/0WQi72jY=
|
||||
github.com/rivo/uniseg v0.2.0/go.mod h1:J6wj4VEh+S6ZtnVlnTBMWIodfgj8LQOQFoIToxlJtxc=
|
||||
@@ -1113,8 +1116,8 @@ github.com/unknwon/i18n v0.0.0-20200823051745-09abd91c7f2c h1:679/gJXwrsHC3RATr0
|
||||
github.com/unknwon/i18n v0.0.0-20200823051745-09abd91c7f2c/go.mod h1:+5rDk6sDGpl3azws3O+f+GpFSyN9GVr0K8cvQLQM2ZQ=
|
||||
github.com/unknwon/paginater v0.0.0-20200328080006-042474bd0eae h1:ihaXiJkaca54IaCSnEXtE/uSZOmPxKZhDfVLrzZLFDs=
|
||||
github.com/unknwon/paginater v0.0.0-20200328080006-042474bd0eae/go.mod h1:1fdkY6xxl6ExVs2QFv7R0F5IRZHKA8RahhB9fMC9RvM=
|
||||
github.com/unrolled/render v1.0.3 h1:baO+NG1bZSF2WR4zwh+0bMWauWky7DVrTOfvE2w+aFo=
|
||||
github.com/unrolled/render v1.0.3/go.mod h1:gN9T0NhL4Bfbwu8ann7Ry/TGHYfosul+J0obPf6NBdM=
|
||||
github.com/unrolled/render v1.1.1 h1:FpzNzkvlJQIlVdVaqeVBGWiCS8gpbmjtrKpDmCn6p64=
|
||||
github.com/unrolled/render v1.1.1/go.mod h1:gN9T0NhL4Bfbwu8ann7Ry/TGHYfosul+J0obPf6NBdM=
|
||||
github.com/urfave/cli v1.20.0/go.mod h1:70zkFmudgCuE/ngEzBv17Jvp/497gISqfk5gWijbERA=
|
||||
github.com/urfave/cli v1.22.1/go.mod h1:Gos4lmkARVdJ6EkW0WaNv/tZAAMe9V7XWyB60NtXRu0=
|
||||
github.com/urfave/cli v1.22.5 h1:lNq9sAHXK2qfdI8W+GRItjCEkI+2oR4d+MEHy1CKXoU=
|
||||
@@ -1319,9 +1322,8 @@ golang.org/x/net v0.0.0-20201202161906-c7110b5ffcbb/go.mod h1:sp8m0HH+o8qH0wwXwY
|
||||
golang.org/x/net v0.0.0-20201209123823-ac852fbbde11/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg=
|
||||
golang.org/x/net v0.0.0-20201224014010-6772e930b67b/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg=
|
||||
golang.org/x/net v0.0.0-20210119194325-5f4716e94777/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg=
|
||||
golang.org/x/net v0.0.0-20210331212208-0fccb6fa2b5c/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM=
|
||||
golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4 h1:4nGaVu0QrbjT/AK2PRLuQfQuh6DJve+pELhqTdAj3x0=
|
||||
golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM=
|
||||
golang.org/x/net v0.0.0-20210614182718-04defd469f4e h1:XpT3nA5TvE525Ne3hInMh6+GETgn27Zfm9dxsThnX2Q=
|
||||
golang.org/x/net v0.0.0-20210614182718-04defd469f4e/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
|
||||
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
|
||||
golang.org/x/oauth2 v0.0.0-20181106182150-f42d05182288/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
|
||||
golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
|
||||
@@ -1417,8 +1419,8 @@ golang.org/x/sys v0.0.0-20210112080510-489259a85091/go.mod h1:h1NjWce9XRLGQEsW7w
|
||||
golang.org/x/sys v0.0.0-20210119212857-b64e53b001e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20210124154548-22da62e12c0c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20210220050731-9a76102bfb43/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44 h1:Bli41pIlzTzf3KEY06n+xnzK/BESIg2ze4Pgfh/aI8c=
|
||||
golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20210423082822-04245dca01da h1:b3NXsE2LusjYGGjL5bxEVZZORm/YEFFrWFjR8eFrw/c=
|
||||
golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw=
|
||||
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1 h1:v+OssWQX+hTHEmOBgwxdZxK4zHq3yOs8F9J7mk0PY8E=
|
||||
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
|
||||
@@ -1428,8 +1430,9 @@ golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3
|
||||
golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk=
|
||||
golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
|
||||
golang.org/x/text v0.3.4/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
|
||||
golang.org/x/text v0.3.5 h1:i6eZZ+zk0SOf0xgBpEpPD18qWcJda6q1sxt3S0kzyUQ=
|
||||
golang.org/x/text v0.3.5/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
|
||||
golang.org/x/text v0.3.6 h1:aRYxNxv6iGQlyVaZmk6ZgYEDa+Jg18DxebPSrd6bg1M=
|
||||
golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
|
||||
golang.org/x/time v0.0.0-20180412165947-fbb02b2291d2/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
|
||||
golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
|
||||
golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
|
||||
@@ -1500,6 +1503,7 @@ golang.org/x/tools v0.0.0-20200928182047-19e03678916f/go.mod h1:z6u4i615ZeAfBE4X
|
||||
golang.org/x/tools v0.0.0-20200929161345-d7fc70abf50f/go.mod h1:z6u4i615ZeAfBE4XtMziQW1fSVJXACjjbWkB/mvPzlU=
|
||||
golang.org/x/tools v0.0.0-20201022035929-9cf592e881e9/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
|
||||
golang.org/x/tools v0.0.0-20201110124207-079ba7bd75cd/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
|
||||
golang.org/x/tools v0.0.0-20201124115921-2c860bdd6e78/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
|
||||
golang.org/x/tools v0.0.0-20201125231158-b5590deeca9b/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
|
||||
golang.org/x/tools v0.0.0-20201201161351-ac6f37ff4c2a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
|
||||
golang.org/x/tools v0.0.0-20201208233053-a543418bbed2/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
|
||||
@@ -1666,6 +1670,33 @@ honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt
|
||||
honnef.co/go/tools v0.0.1-2020.1.3/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k=
|
||||
honnef.co/go/tools v0.0.1-2020.1.4 h1:UoveltGrhghAA7ePc+e+QYDHXrBps2PqFZiHkGR/xK8=
|
||||
honnef.co/go/tools v0.0.1-2020.1.4/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k=
|
||||
modernc.org/cc/v3 v3.31.5-0.20210308123301-7a3e9dab9009 h1:u0oCo5b9wyLr++HF3AN9JicGhkUxJhMz51+8TIZH9N0=
|
||||
modernc.org/cc/v3 v3.31.5-0.20210308123301-7a3e9dab9009/go.mod h1:0R6jl1aZlIl2avnYfbfHBS1QB6/f+16mihBObaBC878=
|
||||
modernc.org/ccgo/v3 v3.9.0 h1:JbcEIqjw4Agf+0g3Tc85YvfYqkkFOv6xBwS4zkfqSoA=
|
||||
modernc.org/ccgo/v3 v3.9.0/go.mod h1:nQbgkn8mwzPdp4mm6BT6+p85ugQ7FrGgIcYaE7nSrpY=
|
||||
modernc.org/httpfs v1.0.6 h1:AAgIpFZRXuYnkjftxTAZwMIiwEqAfk8aVB2/oA6nAeM=
|
||||
modernc.org/httpfs v1.0.6/go.mod h1:7dosgurJGp0sPaRanU53W4xZYKh14wfzX420oZADeHM=
|
||||
modernc.org/libc v1.7.13-0.20210308123627-12f642a52bb8/go.mod h1:U1eq8YWr/Kc1RWCMFUWEdkTg8OTcfLw2kY8EDwl039w=
|
||||
modernc.org/libc v1.8.0 h1:Pp4uv9g0csgBMpGPABKtkieF6O5MGhfGo6ZiOdlYfR8=
|
||||
modernc.org/libc v1.8.0/go.mod h1:U1eq8YWr/Kc1RWCMFUWEdkTg8OTcfLw2kY8EDwl039w=
|
||||
modernc.org/mathutil v1.1.1/go.mod h1:mZW8CKdRPY1v87qxC/wUdX5O1qDzXMP5TH3wjfpga6E=
|
||||
modernc.org/mathutil v1.2.2 h1:+yFk8hBprV+4c0U9GjFtL+dV3N8hOJ8JCituQcMShFY=
|
||||
modernc.org/mathutil v1.2.2/go.mod h1:mZW8CKdRPY1v87qxC/wUdX5O1qDzXMP5TH3wjfpga6E=
|
||||
modernc.org/memory v1.0.4 h1:utMBrFcpnQDdNsmM6asmyH/FM9TqLPS7XF7otpJmrwM=
|
||||
modernc.org/memory v1.0.4/go.mod h1:nV2OApxradM3/OVbs2/0OsP6nPfakXpi50C7dcoHXlc=
|
||||
modernc.org/opt v0.1.1 h1:/0RX92k9vwVeDXj+Xn23DKp2VJubL7k8qNffND6qn3A=
|
||||
modernc.org/opt v0.1.1/go.mod h1:WdSiB5evDcignE70guQKxYUl14mgWtbClRi5wmkkTX0=
|
||||
modernc.org/sqlite v1.10.1-0.20210314190707-798bbeb9bb84 h1:rgEUzE849tFlHSoeCrKyS9cZAljC+DY7MdMHKq6R6sY=
|
||||
modernc.org/sqlite v1.10.1-0.20210314190707-798bbeb9bb84/go.mod h1:PGzq6qlhyYjL6uVbSgS6WoF7ZopTW/sI7+7p+mb4ZVU=
|
||||
modernc.org/strutil v1.1.0 h1:+1/yCzZxY2pZwwrsbH+4T7BQMoLQ9QiBshRC9eicYsc=
|
||||
modernc.org/strutil v1.1.0/go.mod h1:lstksw84oURvj9y3tn8lGvRxyRC1S2+g5uuIzNfIOBs=
|
||||
modernc.org/tcl v1.5.0 h1:euZSUNfE0Fd4W8VqXI1Ly1v7fqDJoBuAV88Ea+SnaSs=
|
||||
modernc.org/tcl v1.5.0/go.mod h1:gb57hj4pO8fRrK54zveIfFXBaMHK3SKJNWcmRw1cRzc=
|
||||
modernc.org/token v1.0.0 h1:a0jaWiNMDhDUtqOj09wvjWWAqd3q7WpBulmL9H2egsk=
|
||||
modernc.org/token v1.0.0/go.mod h1:UGzOrNV1mAFSEB63lOFHIpNRUVMvYTc6yu1SMY/XTDM=
|
||||
modernc.org/z v1.0.1-0.20210308123920-1f282aa71362/go.mod h1:8/SRk5C/HgiQWCgXdfpb+1RvhORdkz5sw72d3jjtyqA=
|
||||
modernc.org/z v1.0.1 h1:WyIDpEpAIx4Hel6q/Pcgj/VhaQV5XPJ2I6ryIYbjnpc=
|
||||
modernc.org/z v1.0.1/go.mod h1:8/SRk5C/HgiQWCgXdfpb+1RvhORdkz5sw72d3jjtyqA=
|
||||
mvdan.cc/xurls/v2 v2.2.0 h1:NSZPykBXJFCetGZykLAxaL6SIpvbVy/UFEniIfHAa8A=
|
||||
mvdan.cc/xurls/v2 v2.2.0/go.mod h1:EV1RMtya9D6G5DMYPGD8zTQzaHet6Jh8gFlRgGRJeO8=
|
||||
rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8=
|
||||
@@ -1676,8 +1707,9 @@ sourcegraph.com/sourcegraph/appdash v0.0.0-20190731080439-ebfcffb1b5c0/go.mod h1
|
||||
strk.kbt.io/projects/go/libravatar v0.0.0-20191008002943-06d1c002b251 h1:mUcz5b3FJbP5Cvdq7Khzn6J9OCUQJaBwgBkCR+MOwSs=
|
||||
strk.kbt.io/projects/go/libravatar v0.0.0-20191008002943-06d1c002b251/go.mod h1:FJGmPh3vz9jSos1L/F91iAgnC/aejc0wIIrF2ZwJxdY=
|
||||
xorm.io/builder v0.3.7/go.mod h1:aUW0S9eb9VCaPohFCH3j7czOx1PMW3i1HrSzbLYGBSE=
|
||||
xorm.io/builder v0.3.8/go.mod h1:aUW0S9eb9VCaPohFCH3j7czOx1PMW3i1HrSzbLYGBSE=
|
||||
xorm.io/builder v0.3.9 h1:Sd65/LdWyO7LR8+Cbd+e7mm3sK/7U9k0jS3999IDHMc=
|
||||
xorm.io/builder v0.3.9/go.mod h1:aUW0S9eb9VCaPohFCH3j7czOx1PMW3i1HrSzbLYGBSE=
|
||||
xorm.io/xorm v1.0.6/go.mod h1:uF9EtbhODq5kNWxMbnBEj8hRRZnlcNSz2t2N7HW/+A4=
|
||||
xorm.io/xorm v1.0.7 h1:26yBTDVI+CfQpVz2Y88fISh+aiJXIPP4eNoTJlwzsC4=
|
||||
xorm.io/xorm v1.0.7/go.mod h1:uF9EtbhODq5kNWxMbnBEj8hRRZnlcNSz2t2N7HW/+A4=
|
||||
xorm.io/xorm v1.1.0 h1:mkEsQXLauZajiOld2cB2PkFcUZKePepPgs1bC1dw8RA=
|
||||
xorm.io/xorm v1.1.0/go.mod h1:EDzNHMuCVZNszkIRSLL2nI0zX+nQE8RstAVranlSfqI=
|
||||
|
||||
@@ -130,11 +130,14 @@ func getNewRepoEditOption(opts *api.EditRepoOption) *api.EditRepoOption {
|
||||
|
||||
func TestAPIRepoEdit(t *testing.T) {
|
||||
onGiteaRun(t, func(t *testing.T, u *url.URL) {
|
||||
bFalse, bTrue := false, true
|
||||
|
||||
user2 := models.AssertExistsAndLoadBean(t, &models.User{ID: 2}).(*models.User) // owner of the repo1 & repo16
|
||||
user3 := models.AssertExistsAndLoadBean(t, &models.User{ID: 3}).(*models.User) // owner of the repo3, is an org
|
||||
user4 := models.AssertExistsAndLoadBean(t, &models.User{ID: 4}).(*models.User) // owner of neither repos
|
||||
repo1 := models.AssertExistsAndLoadBean(t, &models.Repository{ID: 1}).(*models.Repository) // public repo
|
||||
repo3 := models.AssertExistsAndLoadBean(t, &models.Repository{ID: 3}).(*models.Repository) // public repo
|
||||
repo15 := models.AssertExistsAndLoadBean(t, &models.Repository{ID: 15}).(*models.Repository) // empty repo
|
||||
repo16 := models.AssertExistsAndLoadBean(t, &models.Repository{ID: 16}).(*models.Repository) // private repo
|
||||
|
||||
// Get user2's token
|
||||
@@ -286,9 +289,8 @@ func TestAPIRepoEdit(t *testing.T) {
|
||||
// Test making a repo public that is private
|
||||
repo16 = models.AssertExistsAndLoadBean(t, &models.Repository{ID: 16}).(*models.Repository)
|
||||
assert.True(t, repo16.IsPrivate)
|
||||
private := false
|
||||
repoEditOption = &api.EditRepoOption{
|
||||
Private: &private,
|
||||
Private: &bFalse,
|
||||
}
|
||||
url = fmt.Sprintf("/api/v1/repos/%s/%s?token=%s", user2.Name, repo16.Name, token2)
|
||||
req = NewRequestWithJSON(t, "PATCH", url, &repoEditOption)
|
||||
@@ -296,11 +298,24 @@ func TestAPIRepoEdit(t *testing.T) {
|
||||
repo16 = models.AssertExistsAndLoadBean(t, &models.Repository{ID: 16}).(*models.Repository)
|
||||
assert.False(t, repo16.IsPrivate)
|
||||
// Make it private again
|
||||
private = true
|
||||
repoEditOption.Private = &private
|
||||
repoEditOption.Private = &bTrue
|
||||
req = NewRequestWithJSON(t, "PATCH", url, &repoEditOption)
|
||||
_ = session.MakeRequest(t, req, http.StatusOK)
|
||||
|
||||
// Test to change empty repo
|
||||
assert.False(t, repo15.IsArchived)
|
||||
url = fmt.Sprintf("/api/v1/repos/%s/%s?token=%s", user2.Name, repo15.Name, token2)
|
||||
req = NewRequestWithJSON(t, "PATCH", url, &api.EditRepoOption{
|
||||
Archived: &bTrue,
|
||||
})
|
||||
_ = session.MakeRequest(t, req, http.StatusOK)
|
||||
repo15 = models.AssertExistsAndLoadBean(t, &models.Repository{ID: 15}).(*models.Repository)
|
||||
assert.True(t, repo15.IsArchived)
|
||||
req = NewRequestWithJSON(t, "PATCH", url, &api.EditRepoOption{
|
||||
Archived: &bFalse,
|
||||
})
|
||||
_ = session.MakeRequest(t, req, http.StatusOK)
|
||||
|
||||
// Test using org repo "user3/repo3" where user2 is a collaborator
|
||||
origRepoEditOption = getRepoEditOptionFromRepo(repo3)
|
||||
repoEditOption = getNewRepoEditOption(origRepoEditOption)
|
||||
|
||||
@@ -223,7 +223,7 @@ func TestAPIViewRepo(t *testing.T) {
|
||||
DecodeJSON(t, resp, &repo)
|
||||
assert.EqualValues(t, 1, repo.ID)
|
||||
assert.EqualValues(t, "repo1", repo.Name)
|
||||
assert.EqualValues(t, 2, repo.Releases)
|
||||
assert.EqualValues(t, 1, repo.Releases)
|
||||
assert.EqualValues(t, 1, repo.OpenIssues)
|
||||
assert.EqualValues(t, 3, repo.OpenPulls)
|
||||
|
||||
|
||||
@@ -144,7 +144,9 @@ func TestAPITeamSearch(t *testing.T) {
|
||||
var results TeamSearchResults
|
||||
|
||||
session := loginUser(t, user.Name)
|
||||
csrf := GetCSRF(t, session, "/"+org.Name)
|
||||
req := NewRequestf(t, "GET", "/api/v1/orgs/%s/teams/search?q=%s", org.Name, "_team")
|
||||
req.Header.Add("X-Csrf-Token", csrf)
|
||||
resp := session.MakeRequest(t, req, http.StatusOK)
|
||||
DecodeJSON(t, resp, &results)
|
||||
assert.NotEmpty(t, results.Data)
|
||||
@@ -154,7 +156,9 @@ func TestAPITeamSearch(t *testing.T) {
|
||||
// no access if not organization member
|
||||
user5 := models.AssertExistsAndLoadBean(t, &models.User{ID: 5}).(*models.User)
|
||||
session = loginUser(t, user5.Name)
|
||||
csrf = GetCSRF(t, session, "/"+org.Name)
|
||||
req = NewRequestf(t, "GET", "/api/v1/orgs/%s/teams/search?q=%s", org.Name, "team")
|
||||
req.Header.Add("X-Csrf-Token", csrf)
|
||||
resp = session.MakeRequest(t, req, http.StatusForbidden)
|
||||
|
||||
}
|
||||
|
||||
69
integrations/git_smart_http_test.go
Normal file
69
integrations/git_smart_http_test.go
Normal file
@@ -0,0 +1,69 @@
|
||||
// Copyright 2021 The Gitea Authors. All rights reserved.
|
||||
// Use of this source code is governed by a MIT-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package integrations
|
||||
|
||||
import (
|
||||
"io/ioutil"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
func TestGitSmartHTTP(t *testing.T) {
|
||||
onGiteaRun(t, testGitSmartHTTP)
|
||||
}
|
||||
|
||||
func testGitSmartHTTP(t *testing.T, u *url.URL) {
|
||||
var kases = []struct {
|
||||
p string
|
||||
code int
|
||||
}{
|
||||
{
|
||||
p: "user2/repo1/info/refs",
|
||||
code: 200,
|
||||
},
|
||||
{
|
||||
p: "user2/repo1/HEAD",
|
||||
code: 200,
|
||||
},
|
||||
{
|
||||
p: "user2/repo1/objects/info/alternates",
|
||||
code: 404,
|
||||
},
|
||||
{
|
||||
p: "user2/repo1/objects/info/http-alternates",
|
||||
code: 404,
|
||||
},
|
||||
{
|
||||
p: "user2/repo1/../../custom/conf/app.ini",
|
||||
code: 404,
|
||||
},
|
||||
{
|
||||
p: "user2/repo1/objects/info/../../../../custom/conf/app.ini",
|
||||
code: 404,
|
||||
},
|
||||
{
|
||||
p: `user2/repo1/objects/info/..\..\..\..\custom\conf\app.ini`,
|
||||
code: 400,
|
||||
},
|
||||
}
|
||||
|
||||
for _, kase := range kases {
|
||||
t.Run(kase.p, func(t *testing.T) {
|
||||
p := u.String() + kase.p
|
||||
req, err := http.NewRequest("GET", p, nil)
|
||||
assert.NoError(t, err)
|
||||
req.SetBasicAuth("user2", userPassword)
|
||||
resp, err := http.DefaultClient.Do(req)
|
||||
assert.NoError(t, err)
|
||||
defer resp.Body.Close()
|
||||
assert.EqualValues(t, kase.code, resp.StatusCode)
|
||||
_, err = ioutil.ReadAll(resp.Body)
|
||||
assert.NoError(t, err)
|
||||
})
|
||||
}
|
||||
}
|
||||
35
integrations/goget_test.go
Normal file
35
integrations/goget_test.go
Normal file
@@ -0,0 +1,35 @@
|
||||
// Copyright 2021 The Gitea Authors. All rights reserved.
|
||||
// Use of this source code is governed by a MIT-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package integrations
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"net/http"
|
||||
"testing"
|
||||
|
||||
"code.gitea.io/gitea/modules/setting"
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
func TestGoGet(t *testing.T) {
|
||||
defer prepareTestEnv(t)()
|
||||
|
||||
req := NewRequest(t, "GET", "/blah/glah/plah?go-get=1")
|
||||
resp := MakeRequest(t, req, http.StatusOK)
|
||||
|
||||
expected := fmt.Sprintf(`<!doctype html>
|
||||
<html>
|
||||
<head>
|
||||
<meta name="go-import" content="%[1]s:%[2]s/blah/glah git %[3]sblah/glah.git">
|
||||
<meta name="go-source" content="%[1]s:%[2]s/blah/glah _ %[3]sblah/glah/src/branch/master{/dir} %[3]sblah/glah/src/branch/master{/dir}/{file}#L{line}">
|
||||
</head>
|
||||
<body>
|
||||
go get --insecure %[1]s:%[2]s/blah/glah
|
||||
</body>
|
||||
</html>
|
||||
`, setting.Domain, setting.HTTPPort, setting.AppURL)
|
||||
|
||||
assert.Equal(t, expected, resp.Body.String())
|
||||
}
|
||||
@@ -10,9 +10,11 @@ import (
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"code.gitea.io/gitea/models"
|
||||
"code.gitea.io/gitea/modules/setting"
|
||||
"code.gitea.io/gitea/modules/test"
|
||||
|
||||
"github.com/PuerkitoBio/goquery"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/unknwon/i18n"
|
||||
)
|
||||
@@ -83,7 +85,7 @@ func TestCreateRelease(t *testing.T) {
|
||||
session := loginUser(t, "user2")
|
||||
createNewRelease(t, session, "/user2/repo1", "v0.0.1", "v0.0.1", false, false)
|
||||
|
||||
checkLatestReleaseAndCount(t, session, "/user2/repo1", "v0.0.1", i18n.Tr("en", "repo.release.stable"), 2)
|
||||
checkLatestReleaseAndCount(t, session, "/user2/repo1", "v0.0.1", i18n.Tr("en", "repo.release.stable"), 3)
|
||||
}
|
||||
|
||||
func TestCreateReleasePreRelease(t *testing.T) {
|
||||
@@ -92,7 +94,7 @@ func TestCreateReleasePreRelease(t *testing.T) {
|
||||
session := loginUser(t, "user2")
|
||||
createNewRelease(t, session, "/user2/repo1", "v0.0.1", "v0.0.1", true, false)
|
||||
|
||||
checkLatestReleaseAndCount(t, session, "/user2/repo1", "v0.0.1", i18n.Tr("en", "repo.release.prerelease"), 2)
|
||||
checkLatestReleaseAndCount(t, session, "/user2/repo1", "v0.0.1", i18n.Tr("en", "repo.release.prerelease"), 3)
|
||||
}
|
||||
|
||||
func TestCreateReleaseDraft(t *testing.T) {
|
||||
@@ -101,7 +103,7 @@ func TestCreateReleaseDraft(t *testing.T) {
|
||||
session := loginUser(t, "user2")
|
||||
createNewRelease(t, session, "/user2/repo1", "v0.0.1", "v0.0.1", false, true)
|
||||
|
||||
checkLatestReleaseAndCount(t, session, "/user2/repo1", "v0.0.1", i18n.Tr("en", "repo.release.draft"), 2)
|
||||
checkLatestReleaseAndCount(t, session, "/user2/repo1", "v0.0.1", i18n.Tr("en", "repo.release.draft"), 3)
|
||||
}
|
||||
|
||||
func TestCreateReleasePaging(t *testing.T) {
|
||||
@@ -127,3 +129,80 @@ func TestCreateReleasePaging(t *testing.T) {
|
||||
session2 := loginUser(t, "user4")
|
||||
checkLatestReleaseAndCount(t, session2, "/user2/repo1", "v0.0.11", i18n.Tr("en", "repo.release.stable"), 10)
|
||||
}
|
||||
|
||||
func TestViewReleaseListNoLogin(t *testing.T) {
|
||||
defer prepareTestEnv(t)()
|
||||
|
||||
repo := models.AssertExistsAndLoadBean(t, &models.Repository{ID: 1}).(*models.Repository)
|
||||
|
||||
link := repo.Link() + "/releases"
|
||||
|
||||
req := NewRequest(t, "GET", link)
|
||||
rsp := MakeRequest(t, req, http.StatusOK)
|
||||
|
||||
htmlDoc := NewHTMLParser(t, rsp.Body)
|
||||
releases := htmlDoc.Find("#release-list li.ui.grid")
|
||||
assert.Equal(t, 1, releases.Length())
|
||||
|
||||
links := make([]string, 0, 5)
|
||||
releases.Each(func(i int, s *goquery.Selection) {
|
||||
link, exist := s.Find(".release-list-title a").Attr("href")
|
||||
if !exist {
|
||||
return
|
||||
}
|
||||
links = append(links, link)
|
||||
})
|
||||
|
||||
assert.EqualValues(t, []string{"/user2/repo1/releases/tag/v1.1"}, links)
|
||||
}
|
||||
|
||||
func TestViewReleaseListLogin(t *testing.T) {
|
||||
defer prepareTestEnv(t)()
|
||||
|
||||
repo := models.AssertExistsAndLoadBean(t, &models.Repository{ID: 1}).(*models.Repository)
|
||||
|
||||
link := repo.Link() + "/releases"
|
||||
|
||||
session := loginUser(t, "user1")
|
||||
req := NewRequest(t, "GET", link)
|
||||
rsp := session.MakeRequest(t, req, http.StatusOK)
|
||||
|
||||
htmlDoc := NewHTMLParser(t, rsp.Body)
|
||||
releases := htmlDoc.Find("#release-list li.ui.grid")
|
||||
assert.Equal(t, 2, releases.Length())
|
||||
|
||||
links := make([]string, 0, 5)
|
||||
releases.Each(func(i int, s *goquery.Selection) {
|
||||
link, exist := s.Find(".release-list-title a").Attr("href")
|
||||
if !exist {
|
||||
return
|
||||
}
|
||||
links = append(links, link)
|
||||
})
|
||||
|
||||
assert.EqualValues(t, []string{"/user2/repo1/releases/tag/draft-release",
|
||||
"/user2/repo1/releases/tag/v1.1"}, links)
|
||||
}
|
||||
|
||||
func TestViewTagsList(t *testing.T) {
|
||||
defer prepareTestEnv(t)()
|
||||
|
||||
repo := models.AssertExistsAndLoadBean(t, &models.Repository{ID: 1}).(*models.Repository)
|
||||
|
||||
link := repo.Link() + "/tags"
|
||||
|
||||
session := loginUser(t, "user1")
|
||||
req := NewRequest(t, "GET", link)
|
||||
rsp := session.MakeRequest(t, req, http.StatusOK)
|
||||
|
||||
htmlDoc := NewHTMLParser(t, rsp.Body)
|
||||
tags := htmlDoc.Find(".tag-list tr")
|
||||
assert.Equal(t, 2, tags.Length())
|
||||
|
||||
tagNames := make([]string, 0, 5)
|
||||
tags.Each(func(i int, s *goquery.Selection) {
|
||||
tagNames = append(tagNames, s.Find(".tag a.df.ac").Text())
|
||||
})
|
||||
|
||||
assert.EqualValues(t, []string{"delete-tag", "v1.1"}, tagNames)
|
||||
}
|
||||
|
||||
@@ -11,6 +11,7 @@ import (
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
"code.gitea.io/gitea/modules/setting"
|
||||
"code.gitea.io/gitea/modules/test"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
@@ -134,5 +135,13 @@ func TestCreateBranchInvalidCSRF(t *testing.T) {
|
||||
"_csrf": "fake_csrf",
|
||||
"new_branch_name": "test",
|
||||
})
|
||||
session.MakeRequest(t, req, http.StatusBadRequest)
|
||||
resp := session.MakeRequest(t, req, http.StatusFound)
|
||||
loc := resp.Header().Get("Location")
|
||||
assert.Equal(t, setting.AppSubURL+"/", loc)
|
||||
resp = session.MakeRequest(t, NewRequest(t, "GET", loc), http.StatusOK)
|
||||
htmlDoc := NewHTMLParser(t, resp.Body)
|
||||
assert.Equal(t,
|
||||
"Bad Request: Invalid CSRF token",
|
||||
strings.TrimSpace(htmlDoc.doc.Find(".ui.message").Text()),
|
||||
)
|
||||
}
|
||||
|
||||
@@ -125,8 +125,8 @@ func getAttachmentByUUID(e Engine, uuid string) (*Attachment, error) {
|
||||
}
|
||||
|
||||
// GetAttachmentsByUUIDs returns attachment by given UUID list.
|
||||
func GetAttachmentsByUUIDs(uuids []string) ([]*Attachment, error) {
|
||||
return getAttachmentsByUUIDs(x, uuids)
|
||||
func GetAttachmentsByUUIDs(ctx DBContext, uuids []string) ([]*Attachment, error) {
|
||||
return getAttachmentsByUUIDs(ctx.e, uuids)
|
||||
}
|
||||
|
||||
func getAttachmentsByUUIDs(e Engine, uuids []string) ([]*Attachment, error) {
|
||||
@@ -183,12 +183,12 @@ func getAttachmentByReleaseIDFileName(e Engine, releaseID int64, fileName string
|
||||
|
||||
// DeleteAttachment deletes the given attachment and optionally the associated file.
|
||||
func DeleteAttachment(a *Attachment, remove bool) error {
|
||||
_, err := DeleteAttachments([]*Attachment{a}, remove)
|
||||
_, err := DeleteAttachments(DefaultDBContext(), []*Attachment{a}, remove)
|
||||
return err
|
||||
}
|
||||
|
||||
// DeleteAttachments deletes the given attachments and optionally the associated files.
|
||||
func DeleteAttachments(attachments []*Attachment, remove bool) (int, error) {
|
||||
func DeleteAttachments(ctx DBContext, attachments []*Attachment, remove bool) (int, error) {
|
||||
if len(attachments) == 0 {
|
||||
return 0, nil
|
||||
}
|
||||
@@ -198,7 +198,7 @@ func DeleteAttachments(attachments []*Attachment, remove bool) (int, error) {
|
||||
ids = append(ids, a.ID)
|
||||
}
|
||||
|
||||
cnt, err := x.In("id", ids).NoAutoCondition().Delete(attachments[0])
|
||||
cnt, err := ctx.e.In("id", ids).NoAutoCondition().Delete(attachments[0])
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
@@ -220,7 +220,7 @@ func DeleteAttachmentsByIssue(issueID int64, remove bool) (int, error) {
|
||||
return 0, err
|
||||
}
|
||||
|
||||
return DeleteAttachments(attachments, remove)
|
||||
return DeleteAttachments(DefaultDBContext(), attachments, remove)
|
||||
}
|
||||
|
||||
// DeleteAttachmentsByComment deletes all attachments associated with the given comment.
|
||||
@@ -230,7 +230,7 @@ func DeleteAttachmentsByComment(commentID int64, remove bool) (int, error) {
|
||||
return 0, err
|
||||
}
|
||||
|
||||
return DeleteAttachments(attachments, remove)
|
||||
return DeleteAttachments(DefaultDBContext(), attachments, remove)
|
||||
}
|
||||
|
||||
// UpdateAttachment updates the given attachment in database
|
||||
@@ -238,6 +238,15 @@ func UpdateAttachment(atta *Attachment) error {
|
||||
return updateAttachment(x, atta)
|
||||
}
|
||||
|
||||
// UpdateAttachmentByUUID Updates attachment via uuid
|
||||
func UpdateAttachmentByUUID(ctx DBContext, attach *Attachment, cols ...string) error {
|
||||
if attach.UUID == "" {
|
||||
return fmt.Errorf("Attachement uuid should not blank")
|
||||
}
|
||||
_, err := ctx.e.Where("uuid=?", attach.UUID).Cols(cols...).Update(attach)
|
||||
return err
|
||||
}
|
||||
|
||||
func updateAttachment(e Engine, atta *Attachment) error {
|
||||
var sess *xorm.Session
|
||||
if atta.ID != 0 && atta.UUID == "" {
|
||||
|
||||
@@ -120,7 +120,7 @@ func TestUpdateAttachment(t *testing.T) {
|
||||
func TestGetAttachmentsByUUIDs(t *testing.T) {
|
||||
assert.NoError(t, PrepareTestDatabase())
|
||||
|
||||
attachList, err := GetAttachmentsByUUIDs([]string{"a0eebc99-9c0b-4ef8-bb6d-6bb9bd380a11", "a0eebc99-9c0b-4ef8-bb6d-6bb9bd380a17", "not-existing-uuid"})
|
||||
attachList, err := GetAttachmentsByUUIDs(DefaultDBContext(), []string{"a0eebc99-9c0b-4ef8-bb6d-6bb9bd380a11", "a0eebc99-9c0b-4ef8-bb6d-6bb9bd380a17", "not-existing-uuid"})
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, 2, len(attachList))
|
||||
assert.Equal(t, "a0eebc99-9c0b-4ef8-bb6d-6bb9bd380a11", attachList[0].UUID)
|
||||
|
||||
@@ -81,7 +81,7 @@ func LibravatarURL(email string) (*url.URL, error) {
|
||||
}
|
||||
|
||||
// HashedAvatarLink returns an avatar link for a provided email
|
||||
func HashedAvatarLink(email string) string {
|
||||
func HashedAvatarLink(email string, size int) string {
|
||||
lowerEmail := strings.ToLower(strings.TrimSpace(email))
|
||||
sum := fmt.Sprintf("%x", md5.Sum([]byte(lowerEmail)))
|
||||
_, _ = cache.GetString("Avatar:"+sum, func() (string, error) {
|
||||
@@ -108,6 +108,9 @@ func HashedAvatarLink(email string) string {
|
||||
}
|
||||
return lowerEmail, nil
|
||||
})
|
||||
if size > 0 {
|
||||
return setting.AppSubURL + "/avatar/" + url.PathEscape(sum) + "?size=" + strconv.Itoa(size)
|
||||
}
|
||||
return setting.AppSubURL + "/avatar/" + url.PathEscape(sum)
|
||||
}
|
||||
|
||||
@@ -129,7 +132,7 @@ func SizedAvatarLink(email string, size int) string {
|
||||
// This is the slow path that would need to call LibravatarURL() which
|
||||
// does DNS lookups. Avoid it by issuing a redirect so we don't block
|
||||
// the template render with network requests.
|
||||
return HashedAvatarLink(email)
|
||||
return HashedAvatarLink(email, size)
|
||||
} else if !setting.DisableGravatar {
|
||||
// copy GravatarSourceURL, because we will modify its Path.
|
||||
copyOfGravatarSourceURL := *setting.GravatarSourceURL
|
||||
|
||||
@@ -141,6 +141,12 @@ func (milestone *Milestone) checkForConsistency(t *testing.T) {
|
||||
actual := getCount(t, x.Where("is_closed=?", true), &Issue{MilestoneID: milestone.ID})
|
||||
assert.EqualValues(t, milestone.NumClosedIssues, actual,
|
||||
"Unexpected number of closed issues for milestone %+v", milestone)
|
||||
|
||||
completeness := 0
|
||||
if milestone.NumIssues > 0 {
|
||||
completeness = milestone.NumClosedIssues * 100 / milestone.NumIssues
|
||||
}
|
||||
assert.Equal(t, completeness, milestone.Completeness)
|
||||
}
|
||||
|
||||
func (label *Label) checkForConsistency(t *testing.T) {
|
||||
@@ -296,11 +302,15 @@ func CountOrphanedObjects(subject, refobject, joinCond string) (int64, error) {
|
||||
|
||||
// DeleteOrphanedObjects delete subjects with have no existing refobject anymore
|
||||
func DeleteOrphanedObjects(subject, refobject, joinCond string) error {
|
||||
_, err := x.In("id", builder.Select("`"+subject+"`.id").
|
||||
subQuery := builder.Select("`"+subject+"`.id").
|
||||
From("`"+subject+"`").
|
||||
Join("LEFT", "`"+refobject+"`", joinCond).
|
||||
Where(builder.IsNull{"`" + refobject + "`.id"})).
|
||||
Delete("`" + subject + "`")
|
||||
Where(builder.IsNull{"`" + refobject + "`.id"})
|
||||
sql, args, err := builder.Delete(builder.In("id", subQuery)).From("`" + subject + "`").ToSQL()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
_, err = x.Exec(append([]interface{}{sql}, args...)...)
|
||||
return err
|
||||
}
|
||||
|
||||
|
||||
32
models/consistency_test.go
Normal file
32
models/consistency_test.go
Normal file
@@ -0,0 +1,32 @@
|
||||
// Copyright 2021 Gitea. All rights reserved.
|
||||
// Use of this source code is governed by a MIT-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package models
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
func TestDeleteOrphanedObjects(t *testing.T) {
|
||||
assert.NoError(t, PrepareTestDatabase())
|
||||
|
||||
countBefore, err := x.Count(&PullRequest{})
|
||||
assert.NoError(t, err)
|
||||
|
||||
_, err = x.Insert(&PullRequest{IssueID: 1000}, &PullRequest{IssueID: 1001}, &PullRequest{IssueID: 1003})
|
||||
assert.NoError(t, err)
|
||||
|
||||
orphaned, err := CountOrphanedObjects("pull_request", "issue", "pull_request.issue_id=issue.id")
|
||||
assert.NoError(t, err)
|
||||
assert.EqualValues(t, 3, orphaned)
|
||||
|
||||
err = DeleteOrphanedObjects("pull_request", "issue", "pull_request.issue_id=issue.id")
|
||||
assert.NoError(t, err)
|
||||
|
||||
countAfter, err := x.Count(&PullRequest{})
|
||||
assert.NoError(t, err)
|
||||
assert.EqualValues(t, countBefore, countAfter)
|
||||
}
|
||||
@@ -43,3 +43,15 @@
|
||||
is_tag: true
|
||||
created_unix: 946684800
|
||||
|
||||
-
|
||||
id: 4
|
||||
repo_id: 1
|
||||
publisher_id: 2
|
||||
tag_name: "draft-release"
|
||||
lower_tag_name: "draft-release"
|
||||
target: "master"
|
||||
title: "draft-release"
|
||||
is_draft: true
|
||||
is_prerelease: false
|
||||
is_tag: false
|
||||
created_unix: 1619524806
|
||||
|
||||
@@ -648,9 +648,11 @@ func (issue *Issue) doChangeStatus(e *xorm.Session, doer *User, isMergePull bool
|
||||
}
|
||||
|
||||
// Update issue count of milestone
|
||||
if err := updateMilestoneClosedNum(e, issue.MilestoneID); err != nil {
|
||||
if issue.MilestoneID > 0 {
|
||||
if err := updateMilestoneCounters(e, issue.MilestoneID); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
if err := issue.updateClosedNum(e); err != nil {
|
||||
return nil, err
|
||||
@@ -912,7 +914,7 @@ func newIssue(e *xorm.Session, doer *User, opts NewIssueOptions) (err error) {
|
||||
opts.Issue.Index = inserted.Index
|
||||
|
||||
if opts.Issue.MilestoneID > 0 {
|
||||
if _, err = e.Exec("UPDATE `milestone` SET num_issues=num_issues+1 WHERE id=?", opts.Issue.MilestoneID); err != nil {
|
||||
if err := updateMilestoneCounters(e, opts.Issue.MilestoneID); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
@@ -1032,6 +1034,9 @@ func newIssueAttempt(repo *Repository, issue *Issue, labelIDs []int64, uuids []s
|
||||
|
||||
// GetIssueByIndex returns raw issue without loading attributes by index in a repository.
|
||||
func GetIssueByIndex(repoID, index int64) (*Issue, error) {
|
||||
if index < 1 {
|
||||
return nil, ErrIssueNotExist{}
|
||||
}
|
||||
issue := &Issue{
|
||||
RepoID: repoID,
|
||||
Index: index,
|
||||
@@ -1086,7 +1091,7 @@ func getIssuesByIDs(e Engine, issueIDs []int64) ([]*Issue, error) {
|
||||
|
||||
func getIssueIDsByRepoID(e Engine, repoID int64) ([]int64, error) {
|
||||
ids := make([]int64, 0, 10)
|
||||
err := e.Table("issue").Where("repo_id = ?", repoID).Find(&ids)
|
||||
err := e.Table("issue").Cols("id").Where("repo_id = ?", repoID).Find(&ids)
|
||||
return ids, err
|
||||
}
|
||||
|
||||
|
||||
@@ -129,8 +129,12 @@ func GetMilestoneByRepoIDANDName(repoID int64, name string) (*Milestone, error)
|
||||
|
||||
// GetMilestoneByID returns the milestone via id .
|
||||
func GetMilestoneByID(id int64) (*Milestone, error) {
|
||||
return getMilestoneByID(x, id)
|
||||
}
|
||||
|
||||
func getMilestoneByID(e Engine, id int64) (*Milestone, error) {
|
||||
var m Milestone
|
||||
has, err := x.ID(id).Get(&m)
|
||||
has, err := e.ID(id).Get(&m)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
} else if !has {
|
||||
@@ -155,10 +159,6 @@ func UpdateMilestone(m *Milestone, oldIsClosed bool) error {
|
||||
return err
|
||||
}
|
||||
|
||||
if err := updateMilestoneCompleteness(sess, m.ID); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// if IsClosed changed, update milestone numbers of repository
|
||||
if oldIsClosed != m.IsClosed {
|
||||
if err := updateRepoMilestoneNum(sess, m.RepoID); err != nil {
|
||||
@@ -171,23 +171,31 @@ func UpdateMilestone(m *Milestone, oldIsClosed bool) error {
|
||||
|
||||
func updateMilestone(e Engine, m *Milestone) error {
|
||||
m.Name = strings.TrimSpace(m.Name)
|
||||
_, err := e.ID(m.ID).AllCols().
|
||||
_, err := e.ID(m.ID).AllCols().Update(m)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return updateMilestoneCounters(e, m.ID)
|
||||
}
|
||||
|
||||
// updateMilestoneCounters calculates NumIssues, NumClosesIssues and Completeness
|
||||
func updateMilestoneCounters(e Engine, id int64) error {
|
||||
_, err := e.ID(id).
|
||||
SetExpr("num_issues", builder.Select("count(*)").From("issue").Where(
|
||||
builder.Eq{"milestone_id": m.ID},
|
||||
builder.Eq{"milestone_id": id},
|
||||
)).
|
||||
SetExpr("num_closed_issues", builder.Select("count(*)").From("issue").Where(
|
||||
builder.Eq{
|
||||
"milestone_id": m.ID,
|
||||
"milestone_id": id,
|
||||
"is_closed": true,
|
||||
},
|
||||
)).
|
||||
Update(m)
|
||||
Update(&Milestone{})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
func updateMilestoneCompleteness(e Engine, milestoneID int64) error {
|
||||
_, err := e.Exec("UPDATE `milestone` SET completeness=100*num_closed_issues/(CASE WHEN num_issues > 0 THEN num_issues ELSE 1 END) WHERE id=?",
|
||||
milestoneID,
|
||||
_, err = e.Exec("UPDATE `milestone` SET completeness=100*num_closed_issues/(CASE WHEN num_issues > 0 THEN num_issues ELSE 1 END) WHERE id=?",
|
||||
id,
|
||||
)
|
||||
return err
|
||||
}
|
||||
@@ -256,25 +264,15 @@ func changeMilestoneAssign(e *xorm.Session, doer *User, issue *Issue, oldMilesto
|
||||
}
|
||||
|
||||
if oldMilestoneID > 0 {
|
||||
if err := updateMilestoneTotalNum(e, oldMilestoneID); err != nil {
|
||||
if err := updateMilestoneCounters(e, oldMilestoneID); err != nil {
|
||||
return err
|
||||
}
|
||||
if issue.IsClosed {
|
||||
if err := updateMilestoneClosedNum(e, oldMilestoneID); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if issue.MilestoneID > 0 {
|
||||
if err := updateMilestoneTotalNum(e, issue.MilestoneID); err != nil {
|
||||
if err := updateMilestoneCounters(e, issue.MilestoneID); err != nil {
|
||||
return err
|
||||
}
|
||||
if issue.IsClosed {
|
||||
if err := updateMilestoneClosedNum(e, issue.MilestoneID); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if oldMilestoneID > 0 || issue.MilestoneID > 0 {
|
||||
@@ -558,29 +556,6 @@ func updateRepoMilestoneNum(e Engine, repoID int64) error {
|
||||
return err
|
||||
}
|
||||
|
||||
func updateMilestoneTotalNum(e Engine, milestoneID int64) (err error) {
|
||||
if _, err = e.Exec("UPDATE `milestone` SET num_issues=(SELECT count(*) FROM issue WHERE milestone_id=?) WHERE id=?",
|
||||
milestoneID,
|
||||
milestoneID,
|
||||
); err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
return updateMilestoneCompleteness(e, milestoneID)
|
||||
}
|
||||
|
||||
func updateMilestoneClosedNum(e Engine, milestoneID int64) (err error) {
|
||||
if _, err = e.Exec("UPDATE `milestone` SET num_closed_issues=(SELECT count(*) FROM issue WHERE milestone_id=? AND is_closed=?) WHERE id=?",
|
||||
milestoneID,
|
||||
true,
|
||||
milestoneID,
|
||||
); err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
return updateMilestoneCompleteness(e, milestoneID)
|
||||
}
|
||||
|
||||
// _____ _ _ _____ _
|
||||
// |_ _| __ __ _ ___| | _____ __| |_ _(_)_ __ ___ ___ ___
|
||||
// | || '__/ _` |/ __| |/ / _ \/ _` | | | | | '_ ` _ \ / _ \/ __|
|
||||
|
||||
@@ -215,7 +215,7 @@ func TestChangeMilestoneStatus(t *testing.T) {
|
||||
CheckConsistencyFor(t, &Repository{ID: milestone.RepoID}, &Milestone{})
|
||||
}
|
||||
|
||||
func TestUpdateMilestoneClosedNum(t *testing.T) {
|
||||
func TestUpdateMilestoneCounters(t *testing.T) {
|
||||
assert.NoError(t, PrepareTestDatabase())
|
||||
issue := AssertExistsAndLoadBean(t, &Issue{MilestoneID: 1},
|
||||
"is_closed=0").(*Issue)
|
||||
@@ -224,14 +224,14 @@ func TestUpdateMilestoneClosedNum(t *testing.T) {
|
||||
issue.ClosedUnix = timeutil.TimeStampNow()
|
||||
_, err := x.ID(issue.ID).Cols("is_closed", "closed_unix").Update(issue)
|
||||
assert.NoError(t, err)
|
||||
assert.NoError(t, updateMilestoneClosedNum(x, issue.MilestoneID))
|
||||
assert.NoError(t, updateMilestoneCounters(x, issue.MilestoneID))
|
||||
CheckConsistencyFor(t, &Milestone{})
|
||||
|
||||
issue.IsClosed = false
|
||||
issue.ClosedUnix = 0
|
||||
_, err = x.ID(issue.ID).Cols("is_closed", "closed_unix").Update(issue)
|
||||
assert.NoError(t, err)
|
||||
assert.NoError(t, updateMilestoneClosedNum(x, issue.MilestoneID))
|
||||
assert.NoError(t, updateMilestoneCounters(x, issue.MilestoneID))
|
||||
CheckConsistencyFor(t, &Milestone{})
|
||||
}
|
||||
|
||||
|
||||
@@ -36,6 +36,14 @@ func TestIssue_ReplaceLabels(t *testing.T) {
|
||||
testSuccess(1, []int64{})
|
||||
}
|
||||
|
||||
func Test_GetIssueIDsByRepoID(t *testing.T) {
|
||||
assert.NoError(t, PrepareTestDatabase())
|
||||
|
||||
ids, err := GetIssueIDsByRepoID(1)
|
||||
assert.NoError(t, err)
|
||||
assert.Len(t, ids, 5)
|
||||
}
|
||||
|
||||
func TestIssueAPIURL(t *testing.T) {
|
||||
assert.NoError(t, PrepareTestDatabase())
|
||||
issue := AssertExistsAndLoadBean(t, &Issue{ID: 1}).(*Issue)
|
||||
|
||||
@@ -41,7 +41,7 @@ func (opts *ListOptions) setEnginePagination(e Engine) Engine {
|
||||
func (opts *ListOptions) GetStartEnd() (start, end int) {
|
||||
opts.setDefaultValues()
|
||||
start = (opts.Page - 1) * opts.PageSize
|
||||
end = start + opts.Page
|
||||
end = start + opts.PageSize
|
||||
return
|
||||
}
|
||||
|
||||
|
||||
@@ -21,6 +21,7 @@ import (
|
||||
"code.gitea.io/gitea/modules/setting"
|
||||
"code.gitea.io/gitea/modules/timeutil"
|
||||
"code.gitea.io/gitea/modules/util"
|
||||
gouuid "github.com/google/uuid"
|
||||
jsoniter "github.com/json-iterator/go"
|
||||
|
||||
"xorm.io/xorm"
|
||||
@@ -68,6 +69,17 @@ var (
|
||||
_ convert.Conversion = &SSPIConfig{}
|
||||
)
|
||||
|
||||
// jsonUnmarshalIgnoreErroneousBOM - due to a bug in xorm (see https://gitea.com/xorm/xorm/pulls/1957) - it's
|
||||
// possible that a Blob may gain an unwanted prefix of 0xff 0xfe.
|
||||
func jsonUnmarshalIgnoreErroneousBOM(bs []byte, v interface{}) error {
|
||||
json := jsoniter.ConfigCompatibleWithStandardLibrary
|
||||
err := json.Unmarshal(bs, v)
|
||||
if err != nil && len(bs) > 2 && bs[0] == 0xff && bs[1] == 0xfe {
|
||||
err = json.Unmarshal(bs[2:], v)
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
// LDAPConfig holds configuration for LDAP login source.
|
||||
type LDAPConfig struct {
|
||||
*ldap.Source
|
||||
@@ -75,8 +87,7 @@ type LDAPConfig struct {
|
||||
|
||||
// FromDB fills up a LDAPConfig from serialized format.
|
||||
func (cfg *LDAPConfig) FromDB(bs []byte) error {
|
||||
json := jsoniter.ConfigCompatibleWithStandardLibrary
|
||||
return json.Unmarshal(bs, &cfg)
|
||||
return jsonUnmarshalIgnoreErroneousBOM(bs, &cfg)
|
||||
}
|
||||
|
||||
// ToDB exports a LDAPConfig to a serialized format.
|
||||
@@ -103,8 +114,7 @@ type SMTPConfig struct {
|
||||
|
||||
// FromDB fills up an SMTPConfig from serialized format.
|
||||
func (cfg *SMTPConfig) FromDB(bs []byte) error {
|
||||
json := jsoniter.ConfigCompatibleWithStandardLibrary
|
||||
return json.Unmarshal(bs, cfg)
|
||||
return jsonUnmarshalIgnoreErroneousBOM(bs, cfg)
|
||||
}
|
||||
|
||||
// ToDB exports an SMTPConfig to a serialized format.
|
||||
@@ -116,12 +126,12 @@ func (cfg *SMTPConfig) ToDB() ([]byte, error) {
|
||||
// PAMConfig holds configuration for the PAM login source.
|
||||
type PAMConfig struct {
|
||||
ServiceName string // pam service (e.g. system-auth)
|
||||
EmailDomain string
|
||||
}
|
||||
|
||||
// FromDB fills up a PAMConfig from serialized format.
|
||||
func (cfg *PAMConfig) FromDB(bs []byte) error {
|
||||
json := jsoniter.ConfigCompatibleWithStandardLibrary
|
||||
return json.Unmarshal(bs, &cfg)
|
||||
return jsonUnmarshalIgnoreErroneousBOM(bs, cfg)
|
||||
}
|
||||
|
||||
// ToDB exports a PAMConfig to a serialized format.
|
||||
@@ -142,8 +152,7 @@ type OAuth2Config struct {
|
||||
|
||||
// FromDB fills up an OAuth2Config from serialized format.
|
||||
func (cfg *OAuth2Config) FromDB(bs []byte) error {
|
||||
json := jsoniter.ConfigCompatibleWithStandardLibrary
|
||||
return json.Unmarshal(bs, cfg)
|
||||
return jsonUnmarshalIgnoreErroneousBOM(bs, cfg)
|
||||
}
|
||||
|
||||
// ToDB exports an SMTPConfig to a serialized format.
|
||||
@@ -163,8 +172,7 @@ type SSPIConfig struct {
|
||||
|
||||
// FromDB fills up an SSPIConfig from serialized format.
|
||||
func (cfg *SSPIConfig) FromDB(bs []byte) error {
|
||||
json := jsoniter.ConfigCompatibleWithStandardLibrary
|
||||
return json.Unmarshal(bs, cfg)
|
||||
return jsonUnmarshalIgnoreErroneousBOM(bs, cfg)
|
||||
}
|
||||
|
||||
// ToDB exports an SSPIConfig to a serialized format.
|
||||
@@ -696,15 +704,26 @@ func LoginViaPAM(user *User, login, password string, sourceID int64, cfg *PAMCon
|
||||
|
||||
// Allow PAM sources with `@` in their name, like from Active Directory
|
||||
username := pamLogin
|
||||
email := pamLogin
|
||||
idx := strings.Index(pamLogin, "@")
|
||||
if idx > -1 {
|
||||
username = pamLogin[:idx]
|
||||
}
|
||||
if ValidateEmail(email) != nil {
|
||||
if cfg.EmailDomain != "" {
|
||||
email = fmt.Sprintf("%s@%s", username, cfg.EmailDomain)
|
||||
} else {
|
||||
email = fmt.Sprintf("%s@%s", username, setting.Service.NoReplyAddress)
|
||||
}
|
||||
if ValidateEmail(email) != nil {
|
||||
email = gouuid.New().String() + "@localhost"
|
||||
}
|
||||
}
|
||||
|
||||
user = &User{
|
||||
LowerName: strings.ToLower(username),
|
||||
Name: username,
|
||||
Email: pamLogin,
|
||||
Email: email,
|
||||
Passwd: password,
|
||||
LoginType: LoginPAM,
|
||||
LoginSource: sourceID,
|
||||
|
||||
@@ -88,6 +88,7 @@ func fixPublisherIDforTagReleases(x *xorm.Engine) error {
|
||||
repo = new(Repository)
|
||||
has, err := sess.ID(release.RepoID).Get(repo)
|
||||
if err != nil {
|
||||
log.Error("Error whilst loading repository[%d] for release[%d] with tag name %s. Error: %v", release.RepoID, release.ID, release.TagName, err)
|
||||
return err
|
||||
} else if !has {
|
||||
log.Warn("Release[%d] is orphaned and refers to non-existing repository %d", release.ID, release.RepoID)
|
||||
@@ -99,28 +100,55 @@ func fixPublisherIDforTagReleases(x *xorm.Engine) error {
|
||||
// v120.go migration may not have been run correctly - we'll just replicate it here
|
||||
// because this appears to be a common-ish problem.
|
||||
if _, err := sess.Exec("UPDATE repository SET owner_name = (SELECT name FROM `user` WHERE `user`.id = repository.owner_id)"); err != nil {
|
||||
log.Error("Error whilst updating repository[%d] owner name", repo.ID)
|
||||
return err
|
||||
}
|
||||
|
||||
if _, err := sess.ID(release.RepoID).Get(repo); err != nil {
|
||||
log.Error("Error whilst loading repository[%d] for release[%d] with tag name %s. Error: %v", release.RepoID, release.ID, release.TagName, err)
|
||||
return err
|
||||
}
|
||||
}
|
||||
gitRepo, err = git.OpenRepository(repoPath(repo.OwnerName, repo.Name))
|
||||
if err != nil {
|
||||
log.Error("Error whilst opening git repo for [%d]%s/%s. Error: %v", repo.ID, repo.OwnerName, repo.Name, err)
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
commit, err := gitRepo.GetTagCommit(release.TagName)
|
||||
if err != nil {
|
||||
if git.IsErrNotExist(err) {
|
||||
log.Warn("Unable to find commit %s for Tag: %s in [%d]%s/%s. Cannot update publisher ID.", err.(git.ErrNotExist).ID, release.TagName, repo.ID, repo.OwnerName, repo.Name)
|
||||
continue
|
||||
}
|
||||
log.Error("Error whilst getting commit for Tag: %s in [%d]%s/%s. Error: %v", release.TagName, repo.ID, repo.OwnerName, repo.Name, err)
|
||||
return fmt.Errorf("GetTagCommit: %v", err)
|
||||
}
|
||||
|
||||
if commit.Author.Email == "" {
|
||||
log.Warn("Tag: %s in Repo[%d]%s/%s does not have a tagger.", release.TagName, repo.ID, repo.OwnerName, repo.Name)
|
||||
commit, err = gitRepo.GetCommit(commit.ID.String())
|
||||
if err != nil {
|
||||
if git.IsErrNotExist(err) {
|
||||
log.Warn("Unable to find commit %s for Tag: %s in [%d]%s/%s. Cannot update publisher ID.", err.(git.ErrNotExist).ID, release.TagName, repo.ID, repo.OwnerName, repo.Name)
|
||||
continue
|
||||
}
|
||||
log.Error("Error whilst getting commit for Tag: %s in [%d]%s/%s. Error: %v", release.TagName, repo.ID, repo.OwnerName, repo.Name, err)
|
||||
return fmt.Errorf("GetCommit: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
if commit.Author.Email == "" {
|
||||
log.Warn("Tag: %s in Repo[%d]%s/%s does not have a Tagger and its underlying commit does not have an Author either!", release.TagName, repo.ID, repo.OwnerName, repo.Name)
|
||||
continue
|
||||
}
|
||||
|
||||
if user == nil || !strings.EqualFold(user.Email, commit.Author.Email) {
|
||||
user = new(User)
|
||||
_, err = sess.Where("email=?", commit.Author.Email).Get(user)
|
||||
if err != nil {
|
||||
log.Error("Error whilst getting commit author by email: %s for Tag: %s in [%d]%s/%s. Error: %v", commit.Author.Email, release.TagName, repo.ID, repo.OwnerName, repo.Name, err)
|
||||
return err
|
||||
}
|
||||
|
||||
@@ -133,6 +161,7 @@ func fixPublisherIDforTagReleases(x *xorm.Engine) error {
|
||||
|
||||
release.PublisherID = user.ID
|
||||
if _, err := sess.ID(release.ID).Cols("publisher_id").Update(release); err != nil {
|
||||
log.Error("Error whilst updating publisher[%d] for release[%d] with tag name %s. Error: %v", release.PublisherID, release.ID, release.TagName, err)
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
@@ -319,7 +319,7 @@ func DumpDatabase(filePath, dbType string) error {
|
||||
ID int64 `xorm:"pk autoincr"`
|
||||
Version int64
|
||||
}
|
||||
t, err := x.TableInfo(Version{})
|
||||
t, err := x.TableInfo(&Version{})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
@@ -25,7 +25,7 @@ func TestDumpDatabase(t *testing.T) {
|
||||
ID int64 `xorm:"pk autoincr"`
|
||||
Version int64
|
||||
}
|
||||
assert.NoError(t, x.Sync2(Version{}))
|
||||
assert.NoError(t, x.Sync2(new(Version)))
|
||||
|
||||
for _, dbName := range setting.SupportedDatabases {
|
||||
dbType := setting.GetDBTypeByName(dbName)
|
||||
|
||||
@@ -212,12 +212,21 @@ func (pr *PullRequest) GetDefaultMergeMessage() string {
|
||||
log.Error("Cannot load issue %d for PR id %d: Error: %v", pr.IssueID, pr.ID, err)
|
||||
return ""
|
||||
}
|
||||
|
||||
if pr.BaseRepoID == pr.HeadRepoID {
|
||||
return fmt.Sprintf("Merge pull request '%s' (#%d) from %s into %s", pr.Issue.Title, pr.Issue.Index, pr.HeadBranch, pr.BaseBranch)
|
||||
if err := pr.LoadBaseRepo(); err != nil {
|
||||
log.Error("LoadBaseRepo: %v", err)
|
||||
return ""
|
||||
}
|
||||
|
||||
return fmt.Sprintf("Merge pull request '%s' (#%d) from %s:%s into %s", pr.Issue.Title, pr.Issue.Index, pr.HeadRepo.FullName(), pr.HeadBranch, pr.BaseBranch)
|
||||
issueReference := "#"
|
||||
if pr.BaseRepo.UnitEnabled(UnitTypeExternalTracker) {
|
||||
issueReference = "!"
|
||||
}
|
||||
|
||||
if pr.BaseRepoID == pr.HeadRepoID {
|
||||
return fmt.Sprintf("Merge pull request '%s' (%s%d) from %s into %s", pr.Issue.Title, issueReference, pr.Issue.Index, pr.HeadBranch, pr.BaseBranch)
|
||||
}
|
||||
|
||||
return fmt.Sprintf("Merge pull request '%s' (%s%d) from %s:%s into %s", pr.Issue.Title, issueReference, pr.Issue.Index, pr.HeadRepo.FullName(), pr.HeadBranch, pr.BaseBranch)
|
||||
}
|
||||
|
||||
// ReviewCount represents a count of Reviews
|
||||
|
||||
@@ -234,3 +234,36 @@ func TestPullRequest_GetWorkInProgressPrefixWorkInProgress(t *testing.T) {
|
||||
pr.Issue.Title = "[wip] " + original
|
||||
assert.Equal(t, "[wip]", pr.GetWorkInProgressPrefix())
|
||||
}
|
||||
|
||||
func TestPullRequest_GetDefaultMergeMessage_InternalTracker(t *testing.T) {
|
||||
assert.NoError(t, PrepareTestDatabase())
|
||||
pr := AssertExistsAndLoadBean(t, &PullRequest{ID: 2}).(*PullRequest)
|
||||
|
||||
assert.Equal(t, "Merge pull request 'issue3' (#3) from branch2 into master", pr.GetDefaultMergeMessage())
|
||||
|
||||
pr.BaseRepoID = 1
|
||||
pr.HeadRepoID = 2
|
||||
assert.Equal(t, "Merge pull request 'issue3' (#3) from user2/repo1:branch2 into master", pr.GetDefaultMergeMessage())
|
||||
}
|
||||
|
||||
func TestPullRequest_GetDefaultMergeMessage_ExternalTracker(t *testing.T) {
|
||||
assert.NoError(t, PrepareTestDatabase())
|
||||
|
||||
externalTracker := RepoUnit{
|
||||
Type: UnitTypeExternalTracker,
|
||||
Config: &ExternalTrackerConfig{
|
||||
ExternalTrackerFormat: "https://someurl.com/{user}/{repo}/{issue}",
|
||||
},
|
||||
}
|
||||
baseRepo := &Repository{Name: "testRepo", ID: 1}
|
||||
baseRepo.Owner = &User{Name: "testOwner"}
|
||||
baseRepo.Units = []*RepoUnit{&externalTracker}
|
||||
|
||||
pr := AssertExistsAndLoadBean(t, &PullRequest{ID: 2, BaseRepo: baseRepo}).(*PullRequest)
|
||||
|
||||
assert.Equal(t, "Merge pull request 'issue3' (!3) from branch2 into master", pr.GetDefaultMergeMessage())
|
||||
|
||||
pr.BaseRepoID = 1
|
||||
pr.HeadRepoID = 2
|
||||
assert.Equal(t, "Merge pull request 'issue3' (!3) from user2/repo1:branch2 into master", pr.GetDefaultMergeMessage())
|
||||
}
|
||||
|
||||
@@ -6,6 +6,7 @@
|
||||
package models
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"sort"
|
||||
"strings"
|
||||
@@ -117,17 +118,20 @@ func UpdateRelease(ctx DBContext, rel *Release) error {
|
||||
}
|
||||
|
||||
// AddReleaseAttachments adds a release attachments
|
||||
func AddReleaseAttachments(releaseID int64, attachmentUUIDs []string) (err error) {
|
||||
func AddReleaseAttachments(ctx DBContext, releaseID int64, attachmentUUIDs []string) (err error) {
|
||||
// Check attachments
|
||||
attachments, err := GetAttachmentsByUUIDs(attachmentUUIDs)
|
||||
attachments, err := getAttachmentsByUUIDs(ctx.e, attachmentUUIDs)
|
||||
if err != nil {
|
||||
return fmt.Errorf("GetAttachmentsByUUIDs [uuids: %v]: %v", attachmentUUIDs, err)
|
||||
}
|
||||
|
||||
for i := range attachments {
|
||||
if attachments[i].ReleaseID != 0 {
|
||||
return errors.New("release permission denied")
|
||||
}
|
||||
attachments[i].ReleaseID = releaseID
|
||||
// No assign value could be 0, so ignore AllCols().
|
||||
if _, err = x.ID(attachments[i].ID).Update(attachments[i]); err != nil {
|
||||
if _, err = ctx.e.ID(attachments[i].ID).Update(attachments[i]); err != nil {
|
||||
return fmt.Errorf("update attachment [%d]: %v", attachments[i].ID, err)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -749,7 +749,7 @@ func (repo *Repository) updateSize(e Engine) error {
|
||||
}
|
||||
|
||||
repo.Size = size
|
||||
_, err = e.ID(repo.ID).Cols("size").Update(repo)
|
||||
_, err = e.ID(repo.ID).Cols("size").NoAutoTime().Update(repo)
|
||||
return err
|
||||
}
|
||||
|
||||
@@ -1215,7 +1215,7 @@ func ChangeRepositoryName(doer *User, repo *Repository, newRepoName string) (err
|
||||
}
|
||||
|
||||
newRepoPath := RepoPath(repo.Owner.Name, newRepoName)
|
||||
if err = os.Rename(repo.RepoPath(), newRepoPath); err != nil {
|
||||
if err = util.Rename(repo.RepoPath(), newRepoPath); err != nil {
|
||||
return fmt.Errorf("rename repository directory: %v", err)
|
||||
}
|
||||
|
||||
@@ -1226,7 +1226,7 @@ func ChangeRepositoryName(doer *User, repo *Repository, newRepoName string) (err
|
||||
return err
|
||||
}
|
||||
if isExist {
|
||||
if err = os.Rename(wikiPath, WikiPath(repo.Owner.Name, newRepoName)); err != nil {
|
||||
if err = util.Rename(wikiPath, WikiPath(repo.Owner.Name, newRepoName)); err != nil {
|
||||
return fmt.Errorf("rename repository wiki: %v", err)
|
||||
}
|
||||
}
|
||||
@@ -1349,6 +1349,26 @@ func UpdateRepository(repo *Repository, visibilityChanged bool) (err error) {
|
||||
return sess.Commit()
|
||||
}
|
||||
|
||||
// UpdateRepositoryOwnerNames updates repository owner_names (this should only be used when the ownerName has changed case)
|
||||
func UpdateRepositoryOwnerNames(ownerID int64, ownerName string) error {
|
||||
if ownerID == 0 {
|
||||
return nil
|
||||
}
|
||||
sess := x.NewSession()
|
||||
defer sess.Close()
|
||||
if err := sess.Begin(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if _, err := sess.Where("owner_id = ?", ownerID).Cols("owner_name").Update(&Repository{
|
||||
OwnerName: ownerName,
|
||||
}); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return sess.Commit()
|
||||
}
|
||||
|
||||
// UpdateRepositoryUpdatedTime updates a repository's updated time
|
||||
func UpdateRepositoryUpdatedTime(repoID int64, updateTime time.Time) error {
|
||||
_, err := x.Exec("UPDATE repository SET updated_unix = ? WHERE id = ?", updateTime.Unix(), repoID)
|
||||
@@ -1454,23 +1474,26 @@ func DeleteRepository(doer *User, uid, repoID int64) error {
|
||||
if err := deleteBeans(sess,
|
||||
&Access{RepoID: repo.ID},
|
||||
&Action{RepoID: repo.ID},
|
||||
&Watch{RepoID: repoID},
|
||||
&Star{RepoID: repoID},
|
||||
&Mirror{RepoID: repoID},
|
||||
&Milestone{RepoID: repoID},
|
||||
&Release{RepoID: repoID},
|
||||
&Collaboration{RepoID: repoID},
|
||||
&PullRequest{BaseRepoID: repoID},
|
||||
&RepoUnit{RepoID: repoID},
|
||||
&RepoRedirect{RedirectRepoID: repoID},
|
||||
&Webhook{RepoID: repoID},
|
||||
&HookTask{RepoID: repoID},
|
||||
&Notification{RepoID: repoID},
|
||||
&CommitStatus{RepoID: repoID},
|
||||
&RepoIndexerStatus{RepoID: repoID},
|
||||
&LanguageStat{RepoID: repoID},
|
||||
&Comment{RefRepoID: repoID},
|
||||
&CommitStatus{RepoID: repoID},
|
||||
&DeletedBranch{RepoID: repoID},
|
||||
&HookTask{RepoID: repoID},
|
||||
&LFSLock{RepoID: repoID},
|
||||
&LanguageStat{RepoID: repoID},
|
||||
&Milestone{RepoID: repoID},
|
||||
&Mirror{RepoID: repoID},
|
||||
&Notification{RepoID: repoID},
|
||||
&ProtectedBranch{RepoID: repoID},
|
||||
&PullRequest{BaseRepoID: repoID},
|
||||
&Release{RepoID: repoID},
|
||||
&RepoIndexerStatus{RepoID: repoID},
|
||||
&RepoRedirect{RedirectRepoID: repoID},
|
||||
&RepoUnit{RepoID: repoID},
|
||||
&Star{RepoID: repoID},
|
||||
&Task{RepoID: repoID},
|
||||
&Watch{RepoID: repoID},
|
||||
&Webhook{RepoID: repoID},
|
||||
); err != nil {
|
||||
return fmt.Errorf("deleteBeans: %v", err)
|
||||
}
|
||||
@@ -1486,10 +1509,6 @@ func DeleteRepository(doer *User, uid, repoID int64) error {
|
||||
return err
|
||||
}
|
||||
|
||||
if _, err := sess.Where("repo_id = ?", repoID).Delete(new(RepoUnit)); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if repo.IsFork {
|
||||
if _, err := sess.Exec("UPDATE `repository` SET num_forks=num_forks-1 WHERE id=?", repo.ForkID); err != nil {
|
||||
return fmt.Errorf("decrease fork count: %v", err)
|
||||
|
||||
@@ -210,13 +210,13 @@ func TransferOwnership(doer *User, newOwnerName string, repo *Repository) (err e
|
||||
}
|
||||
|
||||
if repoRenamed {
|
||||
if err := os.Rename(RepoPath(newOwnerName, repo.Name), RepoPath(oldOwnerName, repo.Name)); err != nil {
|
||||
if err := util.Rename(RepoPath(newOwnerName, repo.Name), RepoPath(oldOwnerName, repo.Name)); err != nil {
|
||||
log.Critical("Unable to move repository %s/%s directory from %s back to correct place %s: %v", oldOwnerName, repo.Name, RepoPath(newOwnerName, repo.Name), RepoPath(oldOwnerName, repo.Name), err)
|
||||
}
|
||||
}
|
||||
|
||||
if wikiRenamed {
|
||||
if err := os.Rename(WikiPath(newOwnerName, repo.Name), WikiPath(oldOwnerName, repo.Name)); err != nil {
|
||||
if err := util.Rename(WikiPath(newOwnerName, repo.Name), WikiPath(oldOwnerName, repo.Name)); err != nil {
|
||||
log.Critical("Unable to move wiki for repository %s/%s directory from %s back to correct place %s: %v", oldOwnerName, repo.Name, WikiPath(newOwnerName, repo.Name), WikiPath(oldOwnerName, repo.Name), err)
|
||||
}
|
||||
}
|
||||
@@ -358,7 +358,7 @@ func TransferOwnership(doer *User, newOwnerName string, repo *Repository) (err e
|
||||
return fmt.Errorf("Failed to create dir %s: %v", dir, err)
|
||||
}
|
||||
|
||||
if err := os.Rename(RepoPath(oldOwner.Name, repo.Name), RepoPath(newOwner.Name, repo.Name)); err != nil {
|
||||
if err := util.Rename(RepoPath(oldOwner.Name, repo.Name), RepoPath(newOwner.Name, repo.Name)); err != nil {
|
||||
return fmt.Errorf("rename repository directory: %v", err)
|
||||
}
|
||||
repoRenamed = true
|
||||
@@ -370,7 +370,7 @@ func TransferOwnership(doer *User, newOwnerName string, repo *Repository) (err e
|
||||
log.Error("Unable to check if %s exists. Error: %v", wikiPath, err)
|
||||
return err
|
||||
} else if isExist {
|
||||
if err := os.Rename(wikiPath, WikiPath(newOwner.Name, repo.Name)); err != nil {
|
||||
if err := util.Rename(wikiPath, WikiPath(newOwner.Name, repo.Name)); err != nil {
|
||||
return fmt.Errorf("rename repository wiki: %v", err)
|
||||
}
|
||||
wikiRenamed = true
|
||||
|
||||
@@ -28,8 +28,7 @@ type UnitConfig struct{}
|
||||
|
||||
// FromDB fills up a UnitConfig from serialized format.
|
||||
func (cfg *UnitConfig) FromDB(bs []byte) error {
|
||||
json := jsoniter.ConfigCompatibleWithStandardLibrary
|
||||
return json.Unmarshal(bs, &cfg)
|
||||
return jsonUnmarshalIgnoreErroneousBOM(bs, &cfg)
|
||||
}
|
||||
|
||||
// ToDB exports a UnitConfig to a serialized format.
|
||||
@@ -45,8 +44,7 @@ type ExternalWikiConfig struct {
|
||||
|
||||
// FromDB fills up a ExternalWikiConfig from serialized format.
|
||||
func (cfg *ExternalWikiConfig) FromDB(bs []byte) error {
|
||||
json := jsoniter.ConfigCompatibleWithStandardLibrary
|
||||
return json.Unmarshal(bs, &cfg)
|
||||
return jsonUnmarshalIgnoreErroneousBOM(bs, &cfg)
|
||||
}
|
||||
|
||||
// ToDB exports a ExternalWikiConfig to a serialized format.
|
||||
@@ -64,8 +62,7 @@ type ExternalTrackerConfig struct {
|
||||
|
||||
// FromDB fills up a ExternalTrackerConfig from serialized format.
|
||||
func (cfg *ExternalTrackerConfig) FromDB(bs []byte) error {
|
||||
json := jsoniter.ConfigCompatibleWithStandardLibrary
|
||||
return json.Unmarshal(bs, &cfg)
|
||||
return jsonUnmarshalIgnoreErroneousBOM(bs, &cfg)
|
||||
}
|
||||
|
||||
// ToDB exports a ExternalTrackerConfig to a serialized format.
|
||||
@@ -83,8 +80,7 @@ type IssuesConfig struct {
|
||||
|
||||
// FromDB fills up a IssuesConfig from serialized format.
|
||||
func (cfg *IssuesConfig) FromDB(bs []byte) error {
|
||||
json := jsoniter.ConfigCompatibleWithStandardLibrary
|
||||
return json.Unmarshal(bs, &cfg)
|
||||
return jsonUnmarshalIgnoreErroneousBOM(bs, &cfg)
|
||||
}
|
||||
|
||||
// ToDB exports a IssuesConfig to a serialized format.
|
||||
@@ -106,8 +102,7 @@ type PullRequestsConfig struct {
|
||||
|
||||
// FromDB fills up a PullRequestsConfig from serialized format.
|
||||
func (cfg *PullRequestsConfig) FromDB(bs []byte) error {
|
||||
json := jsoniter.ConfigCompatibleWithStandardLibrary
|
||||
return json.Unmarshal(bs, &cfg)
|
||||
return jsonUnmarshalIgnoreErroneousBOM(bs, &cfg)
|
||||
}
|
||||
|
||||
// ToDB exports a PullRequestsConfig to a serialized format.
|
||||
|
||||
@@ -117,6 +117,6 @@ func CountSessions() (int64, error) {
|
||||
|
||||
// CleanupSessions cleans up expired sessions
|
||||
func CleanupSessions(maxLifetime int64) error {
|
||||
_, err := x.Where("created_unix <= ?", timeutil.TimeStampNow().Add(-maxLifetime)).Delete(&Session{})
|
||||
_, err := x.Where("expiry <= ?", timeutil.TimeStampNow().Add(-maxLifetime)).Delete(&Session{})
|
||||
return err
|
||||
}
|
||||
|
||||
@@ -834,7 +834,7 @@ func rewriteAllPublicKeys(e Engine) error {
|
||||
}
|
||||
|
||||
t.Close()
|
||||
return os.Rename(tmpPath, fPath)
|
||||
return util.Rename(tmpPath, fPath)
|
||||
}
|
||||
|
||||
// RegeneratePublicKeys regenerates the authorized_keys file
|
||||
@@ -1316,7 +1316,7 @@ func rewriteAllPrincipalKeys(e Engine) error {
|
||||
}
|
||||
|
||||
t.Close()
|
||||
return os.Rename(tmpPath, fPath)
|
||||
return util.Rename(tmpPath, fPath)
|
||||
}
|
||||
|
||||
// ListPrincipalKeys returns a list of principals belongs to given user.
|
||||
|
||||
@@ -8,8 +8,11 @@ import (
|
||||
"fmt"
|
||||
|
||||
migration "code.gitea.io/gitea/modules/migrations/base"
|
||||
"code.gitea.io/gitea/modules/secret"
|
||||
"code.gitea.io/gitea/modules/setting"
|
||||
"code.gitea.io/gitea/modules/structs"
|
||||
"code.gitea.io/gitea/modules/timeutil"
|
||||
"code.gitea.io/gitea/modules/util"
|
||||
jsoniter "github.com/json-iterator/go"
|
||||
|
||||
"xorm.io/builder"
|
||||
@@ -110,6 +113,24 @@ func (task *Task) MigrateConfig() (*migration.MigrateOptions, error) {
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// decrypt credentials
|
||||
if opts.CloneAddrEncrypted != "" {
|
||||
if opts.CloneAddr, err = secret.DecryptSecret(setting.SecretKey, opts.CloneAddrEncrypted); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
if opts.AuthPasswordEncrypted != "" {
|
||||
if opts.AuthPassword, err = secret.DecryptSecret(setting.SecretKey, opts.AuthPasswordEncrypted); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
if opts.AuthTokenEncrypted != "" {
|
||||
if opts.AuthToken, err = secret.DecryptSecret(setting.SecretKey, opts.AuthTokenEncrypted); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
return &opts, nil
|
||||
}
|
||||
return nil, fmt.Errorf("Task type is %s, not Migrate Repo", task.Type.Name())
|
||||
@@ -205,12 +226,31 @@ func createTask(e Engine, task *Task) error {
|
||||
func FinishMigrateTask(task *Task) error {
|
||||
task.Status = structs.TaskStatusFinished
|
||||
task.EndTime = timeutil.TimeStampNow()
|
||||
|
||||
// delete credentials when we're done, they're a liability.
|
||||
conf, err := task.MigrateConfig()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
conf.AuthPassword = ""
|
||||
conf.AuthToken = ""
|
||||
conf.CloneAddr = util.SanitizeURLCredentials(conf.CloneAddr, true)
|
||||
conf.AuthPasswordEncrypted = ""
|
||||
conf.AuthTokenEncrypted = ""
|
||||
conf.CloneAddrEncrypted = ""
|
||||
json := jsoniter.ConfigCompatibleWithStandardLibrary
|
||||
confBytes, err := json.Marshal(conf)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
task.PayloadContent = string(confBytes)
|
||||
|
||||
sess := x.NewSession()
|
||||
defer sess.Close()
|
||||
if err := sess.Begin(); err != nil {
|
||||
return err
|
||||
}
|
||||
if _, err := sess.ID(task.ID).Cols("status", "end_time").Update(task); err != nil {
|
||||
if _, err := sess.ID(task.ID).Cols("status", "end_time", "payload_content").Update(task); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
|
||||
@@ -57,9 +57,15 @@ func GetAccessTokenBySHA(token string) (*AccessToken, error) {
|
||||
if token == "" {
|
||||
return nil, ErrAccessTokenEmpty{}
|
||||
}
|
||||
if len(token) < 8 {
|
||||
// A token is defined as being SHA1 sum these are 40 hexadecimal bytes long
|
||||
if len(token) != 40 {
|
||||
return nil, ErrAccessTokenNotExist{token}
|
||||
}
|
||||
for _, x := range []byte(token) {
|
||||
if x < '0' || (x > '9' && x < 'a') || x > 'f' {
|
||||
return nil, ErrAccessTokenNotExist{token}
|
||||
}
|
||||
}
|
||||
var tokens []AccessToken
|
||||
lastEight := token[len(token)-8:]
|
||||
err := x.Table(&AccessToken{}).Where("token_last_eight = ?", lastEight).Find(&tokens)
|
||||
|
||||
@@ -1011,7 +1011,7 @@ func ChangeUserName(u *User, newUserName string) (err error) {
|
||||
}
|
||||
|
||||
// Do not fail if directory does not exist
|
||||
if err = os.Rename(UserPath(oldUserName), UserPath(newUserName)); err != nil && !os.IsNotExist(err) {
|
||||
if err = util.Rename(UserPath(oldUserName), UserPath(newUserName)); err != nil && !os.IsNotExist(err) {
|
||||
return fmt.Errorf("Rename user directory: %v", err)
|
||||
}
|
||||
|
||||
@@ -1020,7 +1020,7 @@ func ChangeUserName(u *User, newUserName string) (err error) {
|
||||
}
|
||||
|
||||
if err = sess.Commit(); err != nil {
|
||||
if err2 := os.Rename(UserPath(newUserName), UserPath(oldUserName)); err2 != nil && !os.IsNotExist(err2) {
|
||||
if err2 := util.Rename(UserPath(newUserName), UserPath(oldUserName)); err2 != nil && !os.IsNotExist(err2) {
|
||||
log.Critical("Unable to rollback directory change during failed username change from: %s to: %s. DB Error: %v. Filesystem Error: %v", oldUserName, newUserName, err, err2)
|
||||
return fmt.Errorf("failed to rollback directory change during failed username change from: %s to: %s. DB Error: %w. Filesystem Error: %v", oldUserName, newUserName, err, err2)
|
||||
}
|
||||
|
||||
@@ -82,6 +82,9 @@ func (u *User) RealSizedAvatarLink(size int) string {
|
||||
if u.Avatar == "" {
|
||||
return DefaultAvatarLink()
|
||||
}
|
||||
if size > 0 {
|
||||
return setting.AppSubURL + "/avatars/" + u.Avatar + "?size=" + strconv.Itoa(size)
|
||||
}
|
||||
return setting.AppSubURL + "/avatars/" + u.Avatar
|
||||
case setting.DisableGravatar, setting.OfflineMode:
|
||||
if u.Avatar == "" {
|
||||
@@ -89,7 +92,9 @@ func (u *User) RealSizedAvatarLink(size int) string {
|
||||
log.Error("GenerateRandomAvatar: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
if size > 0 {
|
||||
return setting.AppSubURL + "/avatars/" + u.Avatar + "?size=" + strconv.Itoa(size)
|
||||
}
|
||||
return setting.AppSubURL + "/avatars/" + u.Avatar
|
||||
}
|
||||
return SizedAvatarLink(u.AvatarEmail, size)
|
||||
|
||||
@@ -21,6 +21,7 @@ import (
|
||||
"strings"
|
||||
"time"
|
||||
"unicode"
|
||||
"unicode/utf8"
|
||||
|
||||
"code.gitea.io/gitea/modules/git"
|
||||
"code.gitea.io/gitea/modules/log"
|
||||
@@ -213,19 +214,19 @@ func EllipsisString(str string, length int) string {
|
||||
if length <= 3 {
|
||||
return "..."
|
||||
}
|
||||
if len(str) <= length {
|
||||
if utf8.RuneCountInString(str) <= length {
|
||||
return str
|
||||
}
|
||||
return str[:length-3] + "..."
|
||||
return string([]rune(str)[:length-3]) + "..."
|
||||
}
|
||||
|
||||
// TruncateString returns a truncated string with given limit,
|
||||
// it returns input string if length is not reached limit.
|
||||
func TruncateString(str string, limit int) string {
|
||||
if len(str) < limit {
|
||||
if utf8.RuneCountInString(str) < limit {
|
||||
return str
|
||||
}
|
||||
return str[:limit]
|
||||
return string([]rune(str)[:limit])
|
||||
}
|
||||
|
||||
// StringsToInt64s converts a slice of string to a slice of int64.
|
||||
|
||||
@@ -170,6 +170,10 @@ func TestEllipsisString(t *testing.T) {
|
||||
assert.Equal(t, "fo...", EllipsisString("foobar", 5))
|
||||
assert.Equal(t, "foobar", EllipsisString("foobar", 6))
|
||||
assert.Equal(t, "foobar", EllipsisString("foobar", 10))
|
||||
assert.Equal(t, "测...", EllipsisString("测试文本一二三四", 4))
|
||||
assert.Equal(t, "测试...", EllipsisString("测试文本一二三四", 5))
|
||||
assert.Equal(t, "测试文...", EllipsisString("测试文本一二三四", 6))
|
||||
assert.Equal(t, "测试文本一二三四", EllipsisString("测试文本一二三四", 10))
|
||||
}
|
||||
|
||||
func TestTruncateString(t *testing.T) {
|
||||
@@ -181,6 +185,10 @@ func TestTruncateString(t *testing.T) {
|
||||
assert.Equal(t, "fooba", TruncateString("foobar", 5))
|
||||
assert.Equal(t, "foobar", TruncateString("foobar", 6))
|
||||
assert.Equal(t, "foobar", TruncateString("foobar", 7))
|
||||
assert.Equal(t, "测试文本", TruncateString("测试文本一二三四", 4))
|
||||
assert.Equal(t, "测试文本一", TruncateString("测试文本一二三四", 5))
|
||||
assert.Equal(t, "测试文本一二", TruncateString("测试文本一二三四", 6))
|
||||
assert.Equal(t, "测试文本一二三", TruncateString("测试文本一二三四", 7))
|
||||
}
|
||||
|
||||
func TestStringsToInt64s(t *testing.T) {
|
||||
|
||||
@@ -22,6 +22,7 @@ import (
|
||||
"net/http"
|
||||
"time"
|
||||
|
||||
"code.gitea.io/gitea/modules/setting"
|
||||
"code.gitea.io/gitea/modules/web/middleware"
|
||||
|
||||
"github.com/unknwon/com"
|
||||
@@ -266,7 +267,12 @@ func Validate(ctx *Context, x CSRF) {
|
||||
-1,
|
||||
x.GetCookiePath(),
|
||||
x.GetCookieDomain()) // FIXME: Do we need to set the Secure, httpOnly and SameSite values too?
|
||||
if middleware.IsAPIPath(ctx.Req) {
|
||||
x.Error(ctx.Resp)
|
||||
return
|
||||
}
|
||||
ctx.Flash.Error(ctx.Tr("error.invalid_csrf"))
|
||||
ctx.Redirect(setting.AppSubURL + "/")
|
||||
}
|
||||
return
|
||||
}
|
||||
@@ -277,10 +283,19 @@ func Validate(ctx *Context, x CSRF) {
|
||||
-1,
|
||||
x.GetCookiePath(),
|
||||
x.GetCookieDomain()) // FIXME: Do we need to set the Secure, httpOnly and SameSite values too?
|
||||
if middleware.IsAPIPath(ctx.Req) {
|
||||
x.Error(ctx.Resp)
|
||||
return
|
||||
}
|
||||
ctx.Flash.Error(ctx.Tr("error.invalid_csrf"))
|
||||
ctx.Redirect(setting.AppSubURL + "/")
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
if middleware.IsAPIPath(ctx.Req) {
|
||||
http.Error(ctx.Resp, "Bad Request: no CSRF token present", http.StatusBadRequest)
|
||||
return
|
||||
}
|
||||
ctx.Flash.Error(ctx.Tr("error.missing_csrf"))
|
||||
ctx.Redirect(setting.AppSubURL + "/")
|
||||
}
|
||||
|
||||
@@ -6,6 +6,7 @@
|
||||
package context
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"net/url"
|
||||
@@ -393,7 +394,7 @@ func RepoIDAssignment() func(ctx *Context) {
|
||||
}
|
||||
|
||||
// RepoAssignment returns a middleware to handle repository assignment
|
||||
func RepoAssignment(ctx *Context) {
|
||||
func RepoAssignment(ctx *Context) (cancel context.CancelFunc) {
|
||||
var (
|
||||
owner *models.User
|
||||
err error
|
||||
@@ -529,12 +530,12 @@ func RepoAssignment(ctx *Context) {
|
||||
ctx.Repo.GitRepo = gitRepo
|
||||
|
||||
// We opened it, we should close it
|
||||
defer func() {
|
||||
cancel = func() {
|
||||
// If it's been set to nil then assume someone else has closed it.
|
||||
if ctx.Repo.GitRepo != nil {
|
||||
ctx.Repo.GitRepo.Close()
|
||||
}
|
||||
}()
|
||||
}
|
||||
|
||||
// Stop at this point when the repo is empty.
|
||||
if ctx.Repo.Repository.IsEmpty {
|
||||
@@ -619,6 +620,7 @@ func RepoAssignment(ctx *Context) {
|
||||
ctx.Data["GoDocDirectory"] = prefix + "{/dir}"
|
||||
ctx.Data["GoDocFile"] = prefix + "{/dir}/{file}#L{line}"
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// RepoRefType type of repo reference
|
||||
@@ -643,7 +645,7 @@ const (
|
||||
|
||||
// RepoRef handles repository reference names when the ref name is not
|
||||
// explicitly given
|
||||
func RepoRef() func(*Context) {
|
||||
func RepoRef() func(*Context) context.CancelFunc {
|
||||
// since no ref name is explicitly specified, ok to just use branch
|
||||
return RepoRefByType(RepoRefBranch)
|
||||
}
|
||||
@@ -722,8 +724,8 @@ func getRefName(ctx *Context, pathType RepoRefType) string {
|
||||
|
||||
// RepoRefByType handles repository reference name for a specific type
|
||||
// of repository reference
|
||||
func RepoRefByType(refType RepoRefType) func(*Context) {
|
||||
return func(ctx *Context) {
|
||||
func RepoRefByType(refType RepoRefType, ignoreNotExistErr ...bool) func(*Context) context.CancelFunc {
|
||||
return func(ctx *Context) (cancel context.CancelFunc) {
|
||||
// Empty repository does not have reference information.
|
||||
if ctx.Repo.Repository.IsEmpty {
|
||||
return
|
||||
@@ -742,12 +744,12 @@ func RepoRefByType(refType RepoRefType) func(*Context) {
|
||||
return
|
||||
}
|
||||
// We opened it, we should close it
|
||||
defer func() {
|
||||
cancel = func() {
|
||||
// If it's been set to nil then assume someone else has closed it.
|
||||
if ctx.Repo.GitRepo != nil {
|
||||
ctx.Repo.GitRepo.Close()
|
||||
}
|
||||
}()
|
||||
}
|
||||
}
|
||||
|
||||
// Get default branch.
|
||||
@@ -811,6 +813,9 @@ func RepoRefByType(refType RepoRefType) func(*Context) {
|
||||
util.URLJoin(setting.AppURL, strings.Replace(ctx.Req.URL.RequestURI(), refName, ctx.Repo.Commit.ID.String(), 1))))
|
||||
}
|
||||
} else {
|
||||
if len(ignoreNotExistErr) > 0 && ignoreNotExistErr[0] {
|
||||
return
|
||||
}
|
||||
ctx.NotFound("RepoRef invalid repo", fmt.Errorf("branch or tag not exist: %s", refName))
|
||||
return
|
||||
}
|
||||
@@ -841,6 +846,7 @@ func RepoRefByType(refType RepoRefType) func(*Context) {
|
||||
return
|
||||
}
|
||||
ctx.Data["CommitsCount"] = ctx.Repo.CommitsCount
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -49,7 +49,7 @@ func (r *Response) Write(bs []byte) (int, error) {
|
||||
return size, err
|
||||
}
|
||||
if r.status == 0 {
|
||||
r.WriteHeader(200)
|
||||
r.status = http.StatusOK
|
||||
}
|
||||
return size, nil
|
||||
}
|
||||
|
||||
@@ -155,8 +155,8 @@ func ToCommit(repo *models.Repository, commit *git.Commit, userCache map[string]
|
||||
URL: repo.APIURL() + "/git/commits/" + commit.ID.String(),
|
||||
Author: &api.CommitUser{
|
||||
Identity: api.Identity{
|
||||
Name: commit.Committer.Name,
|
||||
Email: commit.Committer.Email,
|
||||
Name: commit.Author.Name,
|
||||
Email: commit.Author.Email,
|
||||
},
|
||||
Date: commit.Author.When.Format(time.RFC3339),
|
||||
},
|
||||
|
||||
@@ -47,6 +47,11 @@ func ToNotificationThread(n *models.Notification) *api.NotificationThread {
|
||||
if err == nil && comment != nil {
|
||||
result.Subject.LatestCommentURL = comment.APIURL()
|
||||
}
|
||||
|
||||
pr, _ := n.Issue.GetPullRequest()
|
||||
if pr != nil && pr.HasMerged {
|
||||
result.Subject.State = "merged"
|
||||
}
|
||||
}
|
||||
case models.NotificationSourceCommit:
|
||||
result.Subject = &api.NotificationSubject{
|
||||
|
||||
@@ -89,7 +89,7 @@ func innerToRepo(repo *models.Repository, mode models.AccessMode, isParent bool)
|
||||
return nil
|
||||
}
|
||||
|
||||
numReleases, _ := models.GetReleaseCountByRepoID(repo.ID, models.FindReleasesOptions{IncludeDrafts: false, IncludeTags: true})
|
||||
numReleases, _ := models.GetReleaseCountByRepoID(repo.ID, models.FindReleasesOptions{IncludeDrafts: false, IncludeTags: false})
|
||||
|
||||
mirrorInterval := ""
|
||||
if repo.IsMirror {
|
||||
|
||||
@@ -23,13 +23,13 @@ func checkDBConsistency(logger log.Logger, autofix bool) error {
|
||||
// find labels without existing repo or org
|
||||
count, err := models.CountOrphanedLabels()
|
||||
if err != nil {
|
||||
logger.Critical("Error: %v whilst counting orphaned labels")
|
||||
logger.Critical("Error: %v whilst counting orphaned labels", err)
|
||||
return err
|
||||
}
|
||||
if count > 0 {
|
||||
if autofix {
|
||||
if err = models.DeleteOrphanedLabels(); err != nil {
|
||||
logger.Critical("Error: %v whilst deleting orphaned labels")
|
||||
logger.Critical("Error: %v whilst deleting orphaned labels", err)
|
||||
return err
|
||||
}
|
||||
logger.Info("%d labels without existing repository/organisation deleted", count)
|
||||
@@ -41,13 +41,13 @@ func checkDBConsistency(logger log.Logger, autofix bool) error {
|
||||
// find IssueLabels without existing label
|
||||
count, err = models.CountOrphanedIssueLabels()
|
||||
if err != nil {
|
||||
logger.Critical("Error: %v whilst counting orphaned issue_labels")
|
||||
logger.Critical("Error: %v whilst counting orphaned issue_labels", err)
|
||||
return err
|
||||
}
|
||||
if count > 0 {
|
||||
if autofix {
|
||||
if err = models.DeleteOrphanedIssueLabels(); err != nil {
|
||||
logger.Critical("Error: %v whilst deleting orphaned issue_labels")
|
||||
logger.Critical("Error: %v whilst deleting orphaned issue_labels", err)
|
||||
return err
|
||||
}
|
||||
logger.Info("%d issue_labels without existing label deleted", count)
|
||||
@@ -59,13 +59,13 @@ func checkDBConsistency(logger log.Logger, autofix bool) error {
|
||||
// find issues without existing repository
|
||||
count, err = models.CountOrphanedIssues()
|
||||
if err != nil {
|
||||
logger.Critical("Error: %v whilst counting orphaned issues")
|
||||
logger.Critical("Error: %v whilst counting orphaned issues", err)
|
||||
return err
|
||||
}
|
||||
if count > 0 {
|
||||
if autofix {
|
||||
if err = models.DeleteOrphanedIssues(); err != nil {
|
||||
logger.Critical("Error: %v whilst deleting orphaned issues")
|
||||
logger.Critical("Error: %v whilst deleting orphaned issues", err)
|
||||
return err
|
||||
}
|
||||
logger.Info("%d issues without existing repository deleted", count)
|
||||
@@ -77,13 +77,13 @@ func checkDBConsistency(logger log.Logger, autofix bool) error {
|
||||
// find pulls without existing issues
|
||||
count, err = models.CountOrphanedObjects("pull_request", "issue", "pull_request.issue_id=issue.id")
|
||||
if err != nil {
|
||||
logger.Critical("Error: %v whilst counting orphaned objects")
|
||||
logger.Critical("Error: %v whilst counting orphaned objects", err)
|
||||
return err
|
||||
}
|
||||
if count > 0 {
|
||||
if autofix {
|
||||
if err = models.DeleteOrphanedObjects("pull_request", "issue", "pull_request.issue_id=issue.id"); err != nil {
|
||||
logger.Critical("Error: %v whilst deleting orphaned objects")
|
||||
logger.Critical("Error: %v whilst deleting orphaned objects", err)
|
||||
return err
|
||||
}
|
||||
logger.Info("%d pull requests without existing issue deleted", count)
|
||||
@@ -95,13 +95,13 @@ func checkDBConsistency(logger log.Logger, autofix bool) error {
|
||||
// find tracked times without existing issues/pulls
|
||||
count, err = models.CountOrphanedObjects("tracked_time", "issue", "tracked_time.issue_id=issue.id")
|
||||
if err != nil {
|
||||
logger.Critical("Error: %v whilst counting orphaned objects")
|
||||
logger.Critical("Error: %v whilst counting orphaned objects", err)
|
||||
return err
|
||||
}
|
||||
if count > 0 {
|
||||
if autofix {
|
||||
if err = models.DeleteOrphanedObjects("tracked_time", "issue", "tracked_time.issue_id=issue.id"); err != nil {
|
||||
logger.Critical("Error: %v whilst deleting orphaned objects")
|
||||
logger.Critical("Error: %v whilst deleting orphaned objects", err)
|
||||
return err
|
||||
}
|
||||
logger.Info("%d tracked times without existing issue deleted", count)
|
||||
@@ -113,14 +113,14 @@ func checkDBConsistency(logger log.Logger, autofix bool) error {
|
||||
// find null archived repositories
|
||||
count, err = models.CountNullArchivedRepository()
|
||||
if err != nil {
|
||||
logger.Critical("Error: %v whilst counting null archived repositories")
|
||||
logger.Critical("Error: %v whilst counting null archived repositories", err)
|
||||
return err
|
||||
}
|
||||
if count > 0 {
|
||||
if autofix {
|
||||
updatedCount, err := models.FixNullArchivedRepository()
|
||||
if err != nil {
|
||||
logger.Critical("Error: %v whilst fixing null archived repositories")
|
||||
logger.Critical("Error: %v whilst fixing null archived repositories", err)
|
||||
return err
|
||||
}
|
||||
logger.Info("%d repositories with null is_archived updated", updatedCount)
|
||||
@@ -132,14 +132,14 @@ func checkDBConsistency(logger log.Logger, autofix bool) error {
|
||||
// find label comments with empty labels
|
||||
count, err = models.CountCommentTypeLabelWithEmptyLabel()
|
||||
if err != nil {
|
||||
logger.Critical("Error: %v whilst counting label comments with empty labels")
|
||||
logger.Critical("Error: %v whilst counting label comments with empty labels", err)
|
||||
return err
|
||||
}
|
||||
if count > 0 {
|
||||
if autofix {
|
||||
updatedCount, err := models.FixCommentTypeLabelWithEmptyLabel()
|
||||
if err != nil {
|
||||
logger.Critical("Error: %v whilst removing label comments with empty labels")
|
||||
logger.Critical("Error: %v whilst removing label comments with empty labels", err)
|
||||
return err
|
||||
}
|
||||
logger.Info("%d label comments with empty labels removed", updatedCount)
|
||||
@@ -191,13 +191,14 @@ func checkDBConsistency(logger log.Logger, autofix bool) error {
|
||||
if setting.Database.UsePostgreSQL {
|
||||
count, err = models.CountBadSequences()
|
||||
if err != nil {
|
||||
logger.Critical("Error: %v whilst checking sequence values")
|
||||
logger.Critical("Error: %v whilst checking sequence values", err)
|
||||
return err
|
||||
}
|
||||
if count > 0 {
|
||||
if autofix {
|
||||
err := models.FixBadSequences()
|
||||
if err != nil {
|
||||
logger.Critical("Error: %v whilst attempting to fix sequences")
|
||||
logger.Critical("Error: %v whilst attempting to fix sequences", err)
|
||||
return err
|
||||
}
|
||||
logger.Info("%d sequences updated", count)
|
||||
@@ -207,6 +208,60 @@ func checkDBConsistency(logger log.Logger, autofix bool) error {
|
||||
}
|
||||
}
|
||||
|
||||
// find protected branches without existing repository
|
||||
count, err = models.CountOrphanedObjects("protected_branch", "repository", "protected_branch.repo_id=repository.id")
|
||||
if err != nil {
|
||||
logger.Critical("Error: %v whilst counting orphaned objects", err)
|
||||
return err
|
||||
}
|
||||
if count > 0 {
|
||||
if autofix {
|
||||
if err = models.DeleteOrphanedObjects("protected_branch", "repository", "protected_branch.repo_id=repository.id"); err != nil {
|
||||
logger.Critical("Error: %v whilst deleting orphaned objects", err)
|
||||
return err
|
||||
}
|
||||
logger.Info("%d protected branches without existing repository deleted", count)
|
||||
} else {
|
||||
logger.Warn("%d protected branches without existing repository", count)
|
||||
}
|
||||
}
|
||||
|
||||
// find deleted branches without existing repository
|
||||
count, err = models.CountOrphanedObjects("deleted_branch", "repository", "deleted_branch.repo_id=repository.id")
|
||||
if err != nil {
|
||||
logger.Critical("Error: %v whilst counting orphaned objects", err)
|
||||
return err
|
||||
}
|
||||
if count > 0 {
|
||||
if autofix {
|
||||
if err = models.DeleteOrphanedObjects("deleted_branch", "repository", "deleted_branch.repo_id=repository.id"); err != nil {
|
||||
logger.Critical("Error: %v whilst deleting orphaned objects", err)
|
||||
return err
|
||||
}
|
||||
logger.Info("%d deleted branches without existing repository deleted", count)
|
||||
} else {
|
||||
logger.Warn("%d deleted branches without existing repository", count)
|
||||
}
|
||||
}
|
||||
|
||||
// find LFS locks without existing repository
|
||||
count, err = models.CountOrphanedObjects("lfs_lock", "repository", "lfs_lock.repo_id=repository.id")
|
||||
if err != nil {
|
||||
logger.Critical("Error: %v whilst counting orphaned objects", err)
|
||||
return err
|
||||
}
|
||||
if count > 0 {
|
||||
if autofix {
|
||||
if err = models.DeleteOrphanedObjects("lfs_lock", "repository", "lfs_lock.repo_id=repository.id"); err != nil {
|
||||
logger.Critical("Error: %v whilst deleting orphaned objects", err)
|
||||
return err
|
||||
}
|
||||
logger.Info("%d LFS locks without existing repository deleted", count)
|
||||
} else {
|
||||
logger.Warn("%d LFS locks without existing repository", count)
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
|
||||
@@ -23,7 +23,7 @@ func checkDBVersion(logger log.Logger, autofix bool) error {
|
||||
|
||||
err = models.NewEngine(context.Background(), migrations.Migrate)
|
||||
if err != nil {
|
||||
logger.Critical("Error: %v during migration")
|
||||
logger.Critical("Error: %v during migration", err)
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
@@ -6,6 +6,7 @@
|
||||
package emoji
|
||||
|
||||
import (
|
||||
"io"
|
||||
"sort"
|
||||
"strings"
|
||||
"sync"
|
||||
@@ -145,6 +146,8 @@ func (n *rememberSecondWriteWriter) Write(p []byte) (int, error) {
|
||||
if n.writecount == 2 {
|
||||
n.idx = n.pos
|
||||
n.end = n.pos + len(p)
|
||||
n.pos += len(p)
|
||||
return len(p), io.EOF
|
||||
}
|
||||
n.pos += len(p)
|
||||
return len(p), nil
|
||||
@@ -155,6 +158,8 @@ func (n *rememberSecondWriteWriter) WriteString(s string) (int, error) {
|
||||
if n.writecount == 2 {
|
||||
n.idx = n.pos
|
||||
n.end = n.pos + len(s)
|
||||
n.pos += len(s)
|
||||
return len(s), io.EOF
|
||||
}
|
||||
n.pos += len(s)
|
||||
return len(s), nil
|
||||
|
||||
@@ -51,6 +51,7 @@ type AuthenticationForm struct {
|
||||
TLS bool
|
||||
SkipVerify bool
|
||||
PAMServiceName string
|
||||
PAMEmailDomain string
|
||||
Oauth2Provider string
|
||||
Oauth2Key string
|
||||
Oauth2Secret string
|
||||
|
||||
@@ -149,17 +149,18 @@ headerLoop:
|
||||
// constant hextable to help quickly convert between 20byte and 40byte hashes
|
||||
const hextable = "0123456789abcdef"
|
||||
|
||||
// to40ByteSHA converts a 20-byte SHA in a 40-byte slice into a 40-byte sha in place
|
||||
// without allocations. This is at least 100x quicker that hex.EncodeToString
|
||||
// NB This requires that sha is a 40-byte slice
|
||||
func to40ByteSHA(sha []byte) []byte {
|
||||
// To40ByteSHA converts a 20-byte SHA into a 40-byte sha. Input and output can be the
|
||||
// same 40 byte slice to support in place conversion without allocations.
|
||||
// This is at least 100x quicker that hex.EncodeToString
|
||||
// NB This requires that out is a 40-byte slice
|
||||
func To40ByteSHA(sha, out []byte) []byte {
|
||||
for i := 19; i >= 0; i-- {
|
||||
v := sha[i]
|
||||
vhi, vlo := v>>4, v&0x0f
|
||||
shi, slo := hextable[vhi], hextable[vlo]
|
||||
sha[i*2], sha[i*2+1] = shi, slo
|
||||
out[i*2], out[i*2+1] = shi, slo
|
||||
}
|
||||
return sha
|
||||
return out
|
||||
}
|
||||
|
||||
// ParseTreeLineSkipMode reads an entry from a tree in a cat-file --batch stream
|
||||
|
||||
@@ -124,12 +124,18 @@ func (c *Command) RunInDirTimeoutEnvFullPipelineFunc(env []string, timeout time.
|
||||
|
||||
cmd := exec.CommandContext(ctx, c.name, c.args...)
|
||||
if env == nil {
|
||||
cmd.Env = append(os.Environ(), fmt.Sprintf("LC_ALL=%s", DefaultLocale))
|
||||
cmd.Env = os.Environ()
|
||||
} else {
|
||||
cmd.Env = env
|
||||
cmd.Env = append(cmd.Env, fmt.Sprintf("LC_ALL=%s", DefaultLocale))
|
||||
}
|
||||
|
||||
cmd.Env = append(
|
||||
cmd.Env,
|
||||
fmt.Sprintf("LC_ALL=%s", DefaultLocale),
|
||||
// avoid prompting for credentials interactively, supported since git v2.3
|
||||
"GIT_TERMINAL_PROMPT=0",
|
||||
)
|
||||
|
||||
// TODO: verify if this is still needed in golang 1.15
|
||||
if goVersionLessThan115 {
|
||||
cmd.Env = append(cmd.Env, "GODEBUG=asyncpreemptoff=1")
|
||||
|
||||
@@ -102,10 +102,13 @@ func (tes Entries) GetCommitsInfo(commit *Commit, treePath string, cache *LastCo
|
||||
}
|
||||
|
||||
func getLastCommitForPathsByCache(commitID, treePath string, paths []string, cache *LastCommitCache) (map[string]*Commit, []string, error) {
|
||||
wr, rd, cancel := CatFileBatch(cache.repo.Path)
|
||||
defer cancel()
|
||||
|
||||
var unHitEntryPaths []string
|
||||
var results = make(map[string]*Commit)
|
||||
for _, p := range paths {
|
||||
lastCommit, err := cache.Get(commitID, path.Join(treePath, p))
|
||||
lastCommit, err := cache.Get(commitID, path.Join(treePath, p), wr, rd)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
@@ -300,7 +303,7 @@ revListLoop:
|
||||
commits[0] = string(commitID)
|
||||
}
|
||||
}
|
||||
treeID = to40ByteSHA(treeID)
|
||||
treeID = To40ByteSHA(treeID, treeID)
|
||||
_, err = batchStdinWriter.Write(treeID)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
|
||||
@@ -18,6 +18,8 @@ import (
|
||||
func CommitFromReader(gitRepo *Repository, sha SHA1, reader io.Reader) (*Commit, error) {
|
||||
commit := &Commit{
|
||||
ID: sha,
|
||||
Author: &Signature{},
|
||||
Committer: &Signature{},
|
||||
}
|
||||
|
||||
payloadSB := new(strings.Builder)
|
||||
|
||||
@@ -7,6 +7,8 @@
|
||||
package git
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"io"
|
||||
"path"
|
||||
)
|
||||
|
||||
@@ -34,7 +36,7 @@ func NewLastCommitCache(repoPath string, gitRepo *Repository, ttl func() int64,
|
||||
}
|
||||
|
||||
// Get get the last commit information by commit id and entry path
|
||||
func (c *LastCommitCache) Get(ref, entryPath string) (interface{}, error) {
|
||||
func (c *LastCommitCache) Get(ref, entryPath string, wr *io.PipeWriter, rd *bufio.Reader) (interface{}, error) {
|
||||
v := c.cache.Get(c.getCacheKey(c.repoPath, ref, entryPath))
|
||||
if vs, ok := v.(string); ok {
|
||||
log("LastCommitCache hit level 1: [%s:%s:%s]", ref, entryPath, vs)
|
||||
@@ -46,7 +48,10 @@ func (c *LastCommitCache) Get(ref, entryPath string) (interface{}, error) {
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
commit, err := c.repo.getCommit(id)
|
||||
if _, err := wr.Write([]byte(vs + "\n")); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
commit, err := c.repo.getCommitFromBatchReader(rd, id)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
@@ -8,6 +8,7 @@ package git
|
||||
|
||||
import (
|
||||
"io/ioutil"
|
||||
"strings"
|
||||
)
|
||||
|
||||
// GetNote retrieves the git-notes data for a given commit.
|
||||
@@ -49,7 +50,13 @@ func GetNote(repo *Repository, commitID string, note *Note) error {
|
||||
}
|
||||
note.Message = d
|
||||
|
||||
lastCommits, err := GetLastCommitForPaths(notes, "", []string{path})
|
||||
treePath := ""
|
||||
if idx := strings.LastIndex(path, "/"); idx > -1 {
|
||||
treePath = path[:idx]
|
||||
path = path[idx+1:]
|
||||
}
|
||||
|
||||
lastCommits, err := GetLastCommitForPaths(notes, treePath, []string{path})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
@@ -43,8 +43,6 @@ func FindLFSFile(repo *git.Repository, hash git.SHA1) ([]*LFSResult, error) {
|
||||
|
||||
basePath := repo.Path
|
||||
|
||||
hashStr := hash.String()
|
||||
|
||||
// Use rev-list to provide us with all commits in order
|
||||
revListReader, revListWriter := io.Pipe()
|
||||
defer func() {
|
||||
@@ -74,7 +72,7 @@ func FindLFSFile(repo *git.Repository, hash git.SHA1) ([]*LFSResult, error) {
|
||||
|
||||
fnameBuf := make([]byte, 4096)
|
||||
modeBuf := make([]byte, 40)
|
||||
workingShaBuf := make([]byte, 40)
|
||||
workingShaBuf := make([]byte, 20)
|
||||
|
||||
for scan.Scan() {
|
||||
// Get the next commit ID
|
||||
@@ -127,12 +125,12 @@ func FindLFSFile(repo *git.Repository, hash git.SHA1) ([]*LFSResult, error) {
|
||||
case "tree":
|
||||
var n int64
|
||||
for n < size {
|
||||
mode, fname, sha, count, err := git.ParseTreeLine(batchReader, modeBuf, fnameBuf, workingShaBuf)
|
||||
mode, fname, sha20byte, count, err := git.ParseTreeLine(batchReader, modeBuf, fnameBuf, workingShaBuf)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
n += int64(count)
|
||||
if bytes.Equal(sha, []byte(hashStr)) {
|
||||
if bytes.Equal(sha20byte, hash[:]) {
|
||||
result := LFSResult{
|
||||
Name: curPath + string(fname),
|
||||
SHA: curCommit.ID.String(),
|
||||
@@ -142,7 +140,9 @@ func FindLFSFile(repo *git.Repository, hash git.SHA1) ([]*LFSResult, error) {
|
||||
}
|
||||
resultsMap[curCommit.ID.String()+":"+curPath+string(fname)] = &result
|
||||
} else if string(mode) == git.EntryModeTree.String() {
|
||||
trees = append(trees, sha)
|
||||
sha40Byte := make([]byte, 40)
|
||||
git.To40ByteSHA(sha20byte, sha40Byte)
|
||||
trees = append(trees, sha40Byte)
|
||||
paths = append(paths, curPath+string(fname)+"/")
|
||||
}
|
||||
}
|
||||
|
||||
@@ -9,9 +9,10 @@ package git
|
||||
import (
|
||||
"bufio"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
)
|
||||
|
||||
@@ -34,6 +35,18 @@ func (repo *Repository) ResolveReference(name string) (string, error) {
|
||||
|
||||
// GetRefCommitID returns the last commit ID string of given reference (branch or tag).
|
||||
func (repo *Repository) GetRefCommitID(name string) (string, error) {
|
||||
if strings.HasPrefix(name, "refs/") {
|
||||
// We're gonna try just reading the ref file as this is likely to be quicker than other options
|
||||
fileInfo, err := os.Lstat(filepath.Join(repo.Path, name))
|
||||
if err == nil && fileInfo.Mode().IsRegular() && fileInfo.Size() == 41 {
|
||||
ref, err := ioutil.ReadFile(filepath.Join(repo.Path, name))
|
||||
|
||||
if err == nil && SHAPattern.Match(ref[:40]) && ref[40] == '\n' {
|
||||
return string(ref[:40]), nil
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
stdout, err := NewCommand("show-ref", "--verify", "--hash", name).RunInDir(repo.Path)
|
||||
if err != nil {
|
||||
if strings.Contains(err.Error(), "not a valid ref") {
|
||||
@@ -69,6 +82,11 @@ func (repo *Repository) getCommit(id SHA1) (*Commit, error) {
|
||||
}()
|
||||
|
||||
bufReader := bufio.NewReader(stdoutReader)
|
||||
|
||||
return repo.getCommitFromBatchReader(bufReader, id)
|
||||
}
|
||||
|
||||
func (repo *Repository) getCommitFromBatchReader(bufReader *bufio.Reader, id SHA1) (*Commit, error) {
|
||||
_, typ, size, err := ReadBatchLine(bufReader)
|
||||
if err != nil {
|
||||
if errors.Is(err, io.EOF) {
|
||||
@@ -106,7 +124,6 @@ func (repo *Repository) getCommit(id SHA1) (*Commit, error) {
|
||||
case "commit":
|
||||
return CommitFromReader(repo, id, io.LimitReader(bufReader, size))
|
||||
default:
|
||||
_ = stdoutReader.CloseWithError(fmt.Errorf("unknown typ: %s", typ))
|
||||
log("Unknown typ: %s", typ)
|
||||
return nil, ErrNotExist{
|
||||
ID: id.String(),
|
||||
|
||||
@@ -7,23 +7,18 @@ package git
|
||||
import (
|
||||
"path/filepath"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
func TestGetLatestCommitTime(t *testing.T) {
|
||||
lct, err := GetLatestCommitTime(".")
|
||||
bareRepo1Path := filepath.Join(testReposDir, "repo1_bare")
|
||||
lct, err := GetLatestCommitTime(bareRepo1Path)
|
||||
assert.NoError(t, err)
|
||||
// Time is in the past
|
||||
now := time.Now()
|
||||
assert.True(t, lct.Unix() < now.Unix(), "%d not smaller than %d", lct, now)
|
||||
// Time is after Mon Oct 23 03:52:09 2017 +0300
|
||||
// Time is Sun Jul 21 22:43:13 2019 +0200
|
||||
// which is the time of commit
|
||||
// d47b98c44c9a6472e44ab80efe65235e11c6da2a
|
||||
refTime, err := time.Parse("Mon Jan 02 15:04:05 2006 -0700", "Mon Oct 23 03:52:09 2017 +0300")
|
||||
assert.NoError(t, err)
|
||||
assert.True(t, lct.Unix() > refTime.Unix(), "%d not greater than %d", lct, refTime)
|
||||
// feaf4ba6bc635fec442f46ddd4512416ec43c2c2 (refs/heads/master)
|
||||
assert.EqualValues(t, 1563741793, lct.Unix())
|
||||
}
|
||||
|
||||
func TestRepoIsEmpty(t *testing.T) {
|
||||
|
||||
@@ -35,6 +35,7 @@ func (tag *Tag) Commit() (*Commit, error) {
|
||||
// \n\n separate headers from message
|
||||
func parseTagData(data []byte) (*Tag, error) {
|
||||
tag := new(Tag)
|
||||
tag.Tagger = &Signature{}
|
||||
// we now have the contents of the commit object. Let's investigate...
|
||||
nextline := 0
|
||||
l:
|
||||
|
||||
@@ -7,6 +7,7 @@ package gitgraph
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"strings"
|
||||
|
||||
"code.gitea.io/gitea/models"
|
||||
"code.gitea.io/gitea/modules/git"
|
||||
@@ -216,10 +217,10 @@ func newRefsFromRefNames(refNames []byte) []git.Reference {
|
||||
continue
|
||||
}
|
||||
refName := string(refNameBytes)
|
||||
if refName[0:5] == "tag: " {
|
||||
refName = refName[5:]
|
||||
} else if refName[0:8] == "HEAD -> " {
|
||||
refName = refName[8:]
|
||||
if strings.HasPrefix(refName, "tag: ") {
|
||||
refName = strings.TrimPrefix(refName, "tag: ")
|
||||
} else if strings.HasPrefix(refName, "HEAD -> ") {
|
||||
refName = strings.TrimPrefix(refName, "HEAD -> ")
|
||||
}
|
||||
refs = append(refs, git.Reference{
|
||||
Name: refName,
|
||||
|
||||
@@ -74,12 +74,14 @@ func (g *Manager) start() {
|
||||
|
||||
// Make SVC process
|
||||
run := svc.Run
|
||||
isWindowsService, err := svc.IsWindowsService()
|
||||
|
||||
//lint:ignore SA1019 We use IsAnInteractiveSession because IsWindowsService has a different permissions profile
|
||||
isAnInteractiveSession, err := svc.IsAnInteractiveSession()
|
||||
if err != nil {
|
||||
log.Error("Unable to ascertain if running as an Windows Service: %v", err)
|
||||
return
|
||||
}
|
||||
if !isWindowsService {
|
||||
if isAnInteractiveSession {
|
||||
log.Trace("Not running a service ... using the debug SVC manager")
|
||||
run = debug.Run
|
||||
}
|
||||
|
||||
@@ -17,6 +17,7 @@ import (
|
||||
"time"
|
||||
|
||||
"code.gitea.io/gitea/modules/log"
|
||||
"code.gitea.io/gitea/modules/setting"
|
||||
)
|
||||
|
||||
var (
|
||||
@@ -26,6 +27,10 @@ var (
|
||||
DefaultWriteTimeOut time.Duration
|
||||
// DefaultMaxHeaderBytes default max header bytes
|
||||
DefaultMaxHeaderBytes int
|
||||
// PerWriteWriteTimeout timeout for writes
|
||||
PerWriteWriteTimeout = 30 * time.Second
|
||||
// PerWriteWriteTimeoutKbTime is a timeout taking account of how much there is to be written
|
||||
PerWriteWriteTimeoutKbTime = 10 * time.Second
|
||||
)
|
||||
|
||||
func init() {
|
||||
@@ -45,6 +50,8 @@ type Server struct {
|
||||
lock *sync.RWMutex
|
||||
BeforeBegin func(network, address string)
|
||||
OnShutdown func()
|
||||
PerWriteTimeout time.Duration
|
||||
PerWritePerKbTimeout time.Duration
|
||||
}
|
||||
|
||||
// NewServer creates a server on network at provided address
|
||||
@@ -60,6 +67,8 @@ func NewServer(network, address, name string) *Server {
|
||||
lock: &sync.RWMutex{},
|
||||
network: network,
|
||||
address: address,
|
||||
PerWriteTimeout: setting.PerWriteTimeout,
|
||||
PerWritePerKbTimeout: setting.PerWritePerKbTimeout,
|
||||
}
|
||||
|
||||
srv.BeforeBegin = func(network, addr string) {
|
||||
@@ -224,6 +233,8 @@ func (wl *wrappedListener) Accept() (net.Conn, error) {
|
||||
Conn: c,
|
||||
server: wl.server,
|
||||
closed: &closed,
|
||||
perWriteTimeout: wl.server.PerWriteTimeout,
|
||||
perWritePerKbTimeout: wl.server.PerWritePerKbTimeout,
|
||||
}
|
||||
|
||||
wl.server.wg.Add(1)
|
||||
@@ -248,6 +259,23 @@ type wrappedConn struct {
|
||||
net.Conn
|
||||
server *Server
|
||||
closed *int32
|
||||
deadline time.Time
|
||||
perWriteTimeout time.Duration
|
||||
perWritePerKbTimeout time.Duration
|
||||
}
|
||||
|
||||
func (w wrappedConn) Write(p []byte) (n int, err error) {
|
||||
if w.perWriteTimeout > 0 {
|
||||
minTimeout := time.Duration(len(p)/1024) * w.perWritePerKbTimeout
|
||||
minDeadline := time.Now().Add(minTimeout).Add(w.perWriteTimeout)
|
||||
|
||||
w.deadline = w.deadline.Add(minTimeout)
|
||||
if minDeadline.After(w.deadline) {
|
||||
w.deadline = minDeadline
|
||||
}
|
||||
_ = w.Conn.SetWriteDeadline(w.deadline)
|
||||
}
|
||||
return w.Conn.Write(p)
|
||||
}
|
||||
|
||||
func (w wrappedConn) Close() error {
|
||||
|
||||
@@ -10,6 +10,7 @@ import (
|
||||
"net/http"
|
||||
"os"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"code.gitea.io/gitea/modules/setting"
|
||||
@@ -26,11 +27,13 @@ func GetCacheControl() string {
|
||||
// generateETag generates an ETag based on size, filename and file modification time
|
||||
func generateETag(fi os.FileInfo) string {
|
||||
etag := fmt.Sprint(fi.Size()) + fi.Name() + fi.ModTime().UTC().Format(http.TimeFormat)
|
||||
return base64.StdEncoding.EncodeToString([]byte(etag))
|
||||
return `"` + base64.StdEncoding.EncodeToString([]byte(etag)) + `"`
|
||||
}
|
||||
|
||||
// HandleTimeCache handles time-based caching for a HTTP request
|
||||
func HandleTimeCache(req *http.Request, w http.ResponseWriter, fi os.FileInfo) (handled bool) {
|
||||
w.Header().Set("Cache-Control", GetCacheControl())
|
||||
|
||||
ifModifiedSince := req.Header.Get("If-Modified-Since")
|
||||
if ifModifiedSince != "" {
|
||||
t, err := time.Parse(http.TimeFormat, ifModifiedSince)
|
||||
@@ -40,20 +43,40 @@ func HandleTimeCache(req *http.Request, w http.ResponseWriter, fi os.FileInfo) (
|
||||
}
|
||||
}
|
||||
|
||||
w.Header().Set("Cache-Control", GetCacheControl())
|
||||
w.Header().Set("Last-Modified", fi.ModTime().Format(http.TimeFormat))
|
||||
return false
|
||||
}
|
||||
|
||||
// HandleEtagCache handles ETag-based caching for a HTTP request
|
||||
func HandleEtagCache(req *http.Request, w http.ResponseWriter, fi os.FileInfo) (handled bool) {
|
||||
// HandleFileETagCache handles ETag-based caching for a HTTP request
|
||||
func HandleFileETagCache(req *http.Request, w http.ResponseWriter, fi os.FileInfo) (handled bool) {
|
||||
etag := generateETag(fi)
|
||||
if req.Header.Get("If-None-Match") == etag {
|
||||
return HandleGenericETagCache(req, w, etag)
|
||||
}
|
||||
|
||||
// HandleGenericETagCache handles ETag-based caching for a HTTP request.
|
||||
// It returns true if the request was handled.
|
||||
func HandleGenericETagCache(req *http.Request, w http.ResponseWriter, etag string) (handled bool) {
|
||||
if len(etag) > 0 {
|
||||
w.Header().Set("Etag", etag)
|
||||
if checkIfNoneMatchIsValid(req, etag) {
|
||||
w.WriteHeader(http.StatusNotModified)
|
||||
return true
|
||||
}
|
||||
|
||||
}
|
||||
w.Header().Set("Cache-Control", GetCacheControl())
|
||||
w.Header().Set("ETag", etag)
|
||||
return false
|
||||
}
|
||||
|
||||
// checkIfNoneMatchIsValid tests if the header If-None-Match matches the ETag
|
||||
func checkIfNoneMatchIsValid(req *http.Request, etag string) bool {
|
||||
ifNoneMatch := req.Header.Get("If-None-Match")
|
||||
if len(ifNoneMatch) > 0 {
|
||||
for _, item := range strings.Split(ifNoneMatch, ",") {
|
||||
item = strings.TrimSpace(item)
|
||||
if item == etag {
|
||||
return true
|
||||
}
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
144
modules/httpcache/httpcache_test.go
Normal file
144
modules/httpcache/httpcache_test.go
Normal file
@@ -0,0 +1,144 @@
|
||||
// Copyright 2021 The Gitea Authors. All rights reserved.
|
||||
// Use of this source code is governed by a MIT-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package httpcache
|
||||
|
||||
import (
|
||||
"net/http"
|
||||
"net/http/httptest"
|
||||
"os"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
type mockFileInfo struct {
|
||||
}
|
||||
|
||||
func (m mockFileInfo) Name() string { return "gitea.test" }
|
||||
func (m mockFileInfo) Size() int64 { return int64(10) }
|
||||
func (m mockFileInfo) Mode() os.FileMode { return os.ModePerm }
|
||||
func (m mockFileInfo) ModTime() time.Time { return time.Time{} }
|
||||
func (m mockFileInfo) IsDir() bool { return false }
|
||||
func (m mockFileInfo) Sys() interface{} { return nil }
|
||||
|
||||
func TestHandleFileETagCache(t *testing.T) {
|
||||
fi := mockFileInfo{}
|
||||
etag := `"MTBnaXRlYS50ZXN0TW9uLCAwMSBKYW4gMDAwMSAwMDowMDowMCBHTVQ="`
|
||||
|
||||
t.Run("No_If-None-Match", func(t *testing.T) {
|
||||
req := &http.Request{Header: make(http.Header)}
|
||||
w := httptest.NewRecorder()
|
||||
|
||||
handled := HandleFileETagCache(req, w, fi)
|
||||
|
||||
assert.False(t, handled)
|
||||
assert.Len(t, w.Header(), 2)
|
||||
assert.Contains(t, w.Header(), "Cache-Control")
|
||||
assert.Contains(t, w.Header(), "Etag")
|
||||
assert.Equal(t, etag, w.Header().Get("Etag"))
|
||||
})
|
||||
t.Run("Wrong_If-None-Match", func(t *testing.T) {
|
||||
req := &http.Request{Header: make(http.Header)}
|
||||
w := httptest.NewRecorder()
|
||||
|
||||
req.Header.Set("If-None-Match", `"wrong etag"`)
|
||||
|
||||
handled := HandleFileETagCache(req, w, fi)
|
||||
|
||||
assert.False(t, handled)
|
||||
assert.Len(t, w.Header(), 2)
|
||||
assert.Contains(t, w.Header(), "Cache-Control")
|
||||
assert.Contains(t, w.Header(), "Etag")
|
||||
assert.Equal(t, etag, w.Header().Get("Etag"))
|
||||
})
|
||||
t.Run("Correct_If-None-Match", func(t *testing.T) {
|
||||
req := &http.Request{Header: make(http.Header)}
|
||||
w := httptest.NewRecorder()
|
||||
|
||||
req.Header.Set("If-None-Match", etag)
|
||||
|
||||
handled := HandleFileETagCache(req, w, fi)
|
||||
|
||||
assert.True(t, handled)
|
||||
assert.Len(t, w.Header(), 1)
|
||||
assert.Contains(t, w.Header(), "Etag")
|
||||
assert.Equal(t, etag, w.Header().Get("Etag"))
|
||||
assert.Equal(t, http.StatusNotModified, w.Code)
|
||||
})
|
||||
}
|
||||
|
||||
func TestHandleGenericETagCache(t *testing.T) {
|
||||
etag := `"test"`
|
||||
|
||||
t.Run("No_If-None-Match", func(t *testing.T) {
|
||||
req := &http.Request{Header: make(http.Header)}
|
||||
w := httptest.NewRecorder()
|
||||
|
||||
handled := HandleGenericETagCache(req, w, etag)
|
||||
|
||||
assert.False(t, handled)
|
||||
assert.Len(t, w.Header(), 2)
|
||||
assert.Contains(t, w.Header(), "Cache-Control")
|
||||
assert.Contains(t, w.Header(), "Etag")
|
||||
assert.Equal(t, etag, w.Header().Get("Etag"))
|
||||
})
|
||||
t.Run("Wrong_If-None-Match", func(t *testing.T) {
|
||||
req := &http.Request{Header: make(http.Header)}
|
||||
w := httptest.NewRecorder()
|
||||
|
||||
req.Header.Set("If-None-Match", `"wrong etag"`)
|
||||
|
||||
handled := HandleGenericETagCache(req, w, etag)
|
||||
|
||||
assert.False(t, handled)
|
||||
assert.Len(t, w.Header(), 2)
|
||||
assert.Contains(t, w.Header(), "Cache-Control")
|
||||
assert.Contains(t, w.Header(), "Etag")
|
||||
assert.Equal(t, etag, w.Header().Get("Etag"))
|
||||
})
|
||||
t.Run("Correct_If-None-Match", func(t *testing.T) {
|
||||
req := &http.Request{Header: make(http.Header)}
|
||||
w := httptest.NewRecorder()
|
||||
|
||||
req.Header.Set("If-None-Match", etag)
|
||||
|
||||
handled := HandleGenericETagCache(req, w, etag)
|
||||
|
||||
assert.True(t, handled)
|
||||
assert.Len(t, w.Header(), 1)
|
||||
assert.Contains(t, w.Header(), "Etag")
|
||||
assert.Equal(t, etag, w.Header().Get("Etag"))
|
||||
assert.Equal(t, http.StatusNotModified, w.Code)
|
||||
})
|
||||
t.Run("Multiple_Wrong_If-None-Match", func(t *testing.T) {
|
||||
req := &http.Request{Header: make(http.Header)}
|
||||
w := httptest.NewRecorder()
|
||||
|
||||
req.Header.Set("If-None-Match", `"wrong etag", "wrong etag "`)
|
||||
|
||||
handled := HandleGenericETagCache(req, w, etag)
|
||||
|
||||
assert.False(t, handled)
|
||||
assert.Len(t, w.Header(), 2)
|
||||
assert.Contains(t, w.Header(), "Cache-Control")
|
||||
assert.Contains(t, w.Header(), "Etag")
|
||||
assert.Equal(t, etag, w.Header().Get("Etag"))
|
||||
})
|
||||
t.Run("Multiple_Correct_If-None-Match", func(t *testing.T) {
|
||||
req := &http.Request{Header: make(http.Header)}
|
||||
w := httptest.NewRecorder()
|
||||
|
||||
req.Header.Set("If-None-Match", `"wrong etag", `+etag)
|
||||
|
||||
handled := HandleGenericETagCache(req, w, etag)
|
||||
|
||||
assert.True(t, handled)
|
||||
assert.Len(t, w.Header(), 1)
|
||||
assert.Contains(t, w.Header(), "Etag")
|
||||
assert.Equal(t, etag, w.Header().Get("Etag"))
|
||||
assert.Equal(t, http.StatusNotModified, w.Code)
|
||||
})
|
||||
}
|
||||
@@ -325,7 +325,7 @@ func (r *Request) getResponse() (*http.Response, error) {
|
||||
trans = &http.Transport{
|
||||
TLSClientConfig: r.setting.TLSClientConfig,
|
||||
Proxy: proxy,
|
||||
Dial: TimeoutDialer(r.setting.ConnectTimeout, r.setting.ReadWriteTimeout),
|
||||
Dial: TimeoutDialer(r.setting.ConnectTimeout),
|
||||
}
|
||||
} else if t, ok := trans.(*http.Transport); ok {
|
||||
if t.TLSClientConfig == nil {
|
||||
@@ -335,7 +335,7 @@ func (r *Request) getResponse() (*http.Response, error) {
|
||||
t.Proxy = r.setting.Proxy
|
||||
}
|
||||
if t.Dial == nil {
|
||||
t.Dial = TimeoutDialer(r.setting.ConnectTimeout, r.setting.ReadWriteTimeout)
|
||||
t.Dial = TimeoutDialer(r.setting.ConnectTimeout)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -352,6 +352,7 @@ func (r *Request) getResponse() (*http.Response, error) {
|
||||
client := &http.Client{
|
||||
Transport: trans,
|
||||
Jar: jar,
|
||||
Timeout: r.setting.ReadWriteTimeout,
|
||||
}
|
||||
|
||||
if len(r.setting.UserAgent) > 0 && len(r.req.Header.Get("User-Agent")) == 0 {
|
||||
@@ -457,12 +458,12 @@ func (r *Request) Response() (*http.Response, error) {
|
||||
}
|
||||
|
||||
// TimeoutDialer returns functions of connection dialer with timeout settings for http.Transport Dial field.
|
||||
func TimeoutDialer(cTimeout time.Duration, rwTimeout time.Duration) func(net, addr string) (c net.Conn, err error) {
|
||||
func TimeoutDialer(cTimeout time.Duration) func(net, addr string) (c net.Conn, err error) {
|
||||
return func(netw, addr string) (net.Conn, error) {
|
||||
conn, err := net.DialTimeout(netw, addr, cTimeout)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return conn, conn.SetDeadline(time.Now().Add(rwTimeout))
|
||||
return conn, nil
|
||||
}
|
||||
}
|
||||
|
||||
@@ -38,7 +38,11 @@ func (db *DBIndexer) Index(id int64) error {
|
||||
// Get latest commit for default branch
|
||||
commitID, err := gitRepo.GetBranchCommitID(repo.DefaultBranch)
|
||||
if err != nil {
|
||||
log.Error("Unable to get commit ID for defaultbranch %s in %s", repo.DefaultBranch, repo.RepoPath())
|
||||
if git.IsErrBranchNotExist(err) || git.IsErrNotExist((err)) {
|
||||
log.Debug("Unable to get commit ID for defaultbranch %s in %s ... skipping this repository", repo.DefaultBranch, repo.RepoPath())
|
||||
return nil
|
||||
}
|
||||
log.Error("Unable to get commit ID for defaultbranch %s in %s. Error: %v", repo.DefaultBranch, repo.RepoPath(), err)
|
||||
return err
|
||||
}
|
||||
|
||||
|
||||
@@ -177,7 +177,7 @@ func (log *FileLogger) DoRotate() error {
|
||||
|
||||
// close fd before rename
|
||||
// Rename the file to its newfound home
|
||||
if err = os.Rename(log.Filename, fname); err != nil {
|
||||
if err = util.Rename(log.Filename, fname); err != nil {
|
||||
return fmt.Errorf("Rotate: %v", err)
|
||||
}
|
||||
|
||||
|
||||
@@ -87,6 +87,7 @@ func isLinkStr(link string) bool {
|
||||
return validLinksPattern.MatchString(link)
|
||||
}
|
||||
|
||||
// FIXME: This function is not concurrent safe
|
||||
func getIssueFullPattern() *regexp.Regexp {
|
||||
if issueFullPattern == nil {
|
||||
issueFullPattern = regexp.MustCompile(regexp.QuoteMeta(setting.AppURL) +
|
||||
@@ -333,40 +334,37 @@ func (ctx *postProcessCtx) postProcess(rawHTML []byte) ([]byte, error) {
|
||||
_, _ = res.WriteString("</body></html>")
|
||||
|
||||
// parse the HTML
|
||||
nodes, err := html.ParseFragment(res, nil)
|
||||
node, err := html.Parse(res)
|
||||
if err != nil {
|
||||
return nil, &postProcessError{"invalid HTML", err}
|
||||
}
|
||||
|
||||
for _, node := range nodes {
|
||||
ctx.visitNode(node, true)
|
||||
if node.Type == html.DocumentNode {
|
||||
node = node.FirstChild
|
||||
}
|
||||
|
||||
newNodes := make([]*html.Node, 0, len(nodes))
|
||||
ctx.visitNode(node, true)
|
||||
|
||||
nodes := make([]*html.Node, 0, 5)
|
||||
|
||||
for _, node := range nodes {
|
||||
if node.Data == "html" {
|
||||
node = node.FirstChild
|
||||
for node != nil && node.Data != "body" {
|
||||
node = node.NextSibling
|
||||
}
|
||||
}
|
||||
if node == nil {
|
||||
continue
|
||||
}
|
||||
if node != nil {
|
||||
if node.Data == "body" {
|
||||
child := node.FirstChild
|
||||
for child != nil {
|
||||
newNodes = append(newNodes, child)
|
||||
nodes = append(nodes, child)
|
||||
child = child.NextSibling
|
||||
}
|
||||
} else {
|
||||
newNodes = append(newNodes, node)
|
||||
nodes = append(nodes, node)
|
||||
}
|
||||
}
|
||||
|
||||
nodes = newNodes
|
||||
|
||||
// Create buffer in which the data will be placed again. We know that the
|
||||
// length will be at least that of res; to spare a few alloc+copy, we
|
||||
// reuse res, resetting its length to 0.
|
||||
@@ -403,24 +401,20 @@ func (ctx *postProcessCtx) visitNode(node *html.Node, visitText bool) {
|
||||
}
|
||||
case html.ElementNode:
|
||||
if node.Data == "img" {
|
||||
attrs := node.Attr
|
||||
for idx, attr := range attrs {
|
||||
for i, attr := range node.Attr {
|
||||
if attr.Key != "src" {
|
||||
continue
|
||||
}
|
||||
link := []byte(attr.Val)
|
||||
if len(link) > 0 && !IsLink(link) {
|
||||
if len(attr.Val) > 0 && !isLinkStr(attr.Val) && !strings.HasPrefix(attr.Val, "data:image/") {
|
||||
prefix := ctx.urlPrefix
|
||||
if ctx.isWikiMarkdown {
|
||||
prefix = util.URLJoin(prefix, "wiki", "raw")
|
||||
}
|
||||
prefix = strings.Replace(prefix, "/src/", "/media/", 1)
|
||||
|
||||
lnk := string(link)
|
||||
lnk = util.URLJoin(prefix, lnk)
|
||||
link = []byte(lnk)
|
||||
attr.Val = util.URLJoin(prefix, attr.Val)
|
||||
}
|
||||
node.Attr[idx].Val = string(link)
|
||||
node.Attr[i] = attr
|
||||
}
|
||||
} else if node.Data == "a" {
|
||||
visitText = false
|
||||
@@ -610,11 +604,16 @@ func replaceContentList(node *html.Node, i, j int, newNodes []*html.Node) {
|
||||
}
|
||||
|
||||
func mentionProcessor(ctx *postProcessCtx, node *html.Node) {
|
||||
start := 0
|
||||
next := node.NextSibling
|
||||
for node != nil && node != next && start < len(node.Data) {
|
||||
// We replace only the first mention; other mentions will be addressed later
|
||||
found, loc := references.FindFirstMentionBytes([]byte(node.Data))
|
||||
found, loc := references.FindFirstMentionBytes([]byte(node.Data[start:]))
|
||||
if !found {
|
||||
return
|
||||
}
|
||||
loc.Start += start
|
||||
loc.End += start
|
||||
mention := node.Data[loc.Start:loc.End]
|
||||
var teams string
|
||||
teams, ok := ctx.metas["teams"]
|
||||
@@ -626,10 +625,17 @@ func mentionProcessor(ctx *postProcessCtx, node *html.Node) {
|
||||
mentionOrgAndTeam := strings.Split(mention, "/")
|
||||
if mentionOrgAndTeam[0][1:] == ctx.metas["org"] && strings.Contains(teams, ","+strings.ToLower(mentionOrgAndTeam[1])+",") {
|
||||
replaceContent(node, loc.Start, loc.End, createLink(util.URLJoin(setting.AppURL, "org", ctx.metas["org"], "teams", mentionOrgAndTeam[1]), mention, "mention"))
|
||||
node = node.NextSibling.NextSibling
|
||||
start = 0
|
||||
continue
|
||||
}
|
||||
return
|
||||
start = loc.End
|
||||
continue
|
||||
}
|
||||
replaceContent(node, loc.Start, loc.End, createLink(util.URLJoin(setting.AppURL, mention[1:]), mention, "mention"))
|
||||
node = node.NextSibling.NextSibling
|
||||
start = 0
|
||||
}
|
||||
}
|
||||
|
||||
func shortLinkProcessor(ctx *postProcessCtx, node *html.Node) {
|
||||
@@ -637,6 +643,8 @@ func shortLinkProcessor(ctx *postProcessCtx, node *html.Node) {
|
||||
}
|
||||
|
||||
func shortLinkProcessorFull(ctx *postProcessCtx, node *html.Node, noLink bool) {
|
||||
next := node.NextSibling
|
||||
for node != nil && node != next {
|
||||
m := shortLinkPattern.FindStringSubmatchIndex(node.Data)
|
||||
if m == nil {
|
||||
return
|
||||
@@ -716,7 +724,7 @@ func shortLinkProcessorFull(ctx *postProcessCtx, node *html.Node, noLink bool) {
|
||||
switch ext := filepath.Ext(link); ext {
|
||||
// fast path: empty string, ignore
|
||||
case "":
|
||||
break
|
||||
// leave image as false
|
||||
case ".jpg", ".jpeg", ".png", ".tif", ".tiff", ".webp", ".gif", ".bmp", ".ico", ".svg":
|
||||
image = true
|
||||
}
|
||||
@@ -792,12 +800,16 @@ func shortLinkProcessorFull(ctx *postProcessCtx, node *html.Node, noLink bool) {
|
||||
linkNode.Attr = []html.Attribute{{Key: "href", Val: link}}
|
||||
}
|
||||
replaceContent(node, m[0], m[1], linkNode)
|
||||
node = node.NextSibling.NextSibling
|
||||
}
|
||||
}
|
||||
|
||||
func fullIssuePatternProcessor(ctx *postProcessCtx, node *html.Node) {
|
||||
if ctx.metas == nil {
|
||||
return
|
||||
}
|
||||
next := node.NextSibling
|
||||
for node != nil && node != next {
|
||||
m := getIssueFullPattern().FindStringSubmatchIndex(node.Data)
|
||||
if m == nil {
|
||||
return
|
||||
@@ -815,23 +827,25 @@ func fullIssuePatternProcessor(ctx *postProcessCtx, node *html.Node) {
|
||||
// TODO if m[4]:m[5] is not nil, then link is to a comment,
|
||||
// and we should indicate that in the text somehow
|
||||
replaceContent(node, m[0], m[1], createLink(link, id, "ref-issue"))
|
||||
|
||||
} else {
|
||||
orgRepoID := matchOrg + "/" + matchRepo + id
|
||||
replaceContent(node, m[0], m[1], createLink(link, orgRepoID, "ref-issue"))
|
||||
}
|
||||
node = node.NextSibling.NextSibling
|
||||
}
|
||||
}
|
||||
|
||||
func issueIndexPatternProcessor(ctx *postProcessCtx, node *html.Node) {
|
||||
if ctx.metas == nil {
|
||||
return
|
||||
}
|
||||
|
||||
var (
|
||||
found bool
|
||||
ref *references.RenderizableReference
|
||||
)
|
||||
|
||||
next := node.NextSibling
|
||||
for node != nil && node != next {
|
||||
_, exttrack := ctx.metas["format"]
|
||||
alphanum := ctx.metas["style"] == IssueNameStyleAlphanumeric
|
||||
|
||||
@@ -872,7 +886,8 @@ func issueIndexPatternProcessor(ctx *postProcessCtx, node *html.Node) {
|
||||
|
||||
if ref.Action == references.XRefActionNone {
|
||||
replaceContent(node, ref.RefLocation.Start, ref.RefLocation.End, link)
|
||||
return
|
||||
node = node.NextSibling.NextSibling
|
||||
continue
|
||||
}
|
||||
|
||||
// Decorate action keywords if actionable
|
||||
@@ -890,6 +905,8 @@ func issueIndexPatternProcessor(ctx *postProcessCtx, node *html.Node) {
|
||||
Data: node.Data[ref.ActionLocation.End:ref.RefLocation.Start],
|
||||
}
|
||||
replaceContentList(node, ref.ActionLocation.Start, ref.RefLocation.End, []*html.Node{keyword, spaces, link})
|
||||
node = node.NextSibling.NextSibling.NextSibling.NextSibling
|
||||
}
|
||||
}
|
||||
|
||||
// fullSha1PatternProcessor renders SHA containing URLs
|
||||
@@ -897,6 +914,9 @@ func fullSha1PatternProcessor(ctx *postProcessCtx, node *html.Node) {
|
||||
if ctx.metas == nil {
|
||||
return
|
||||
}
|
||||
|
||||
next := node.NextSibling
|
||||
for node != nil && node != next {
|
||||
m := anySHA1Pattern.FindStringSubmatchIndex(node.Data)
|
||||
if m == nil {
|
||||
return
|
||||
@@ -941,15 +961,23 @@ func fullSha1PatternProcessor(ctx *postProcessCtx, node *html.Node) {
|
||||
}
|
||||
|
||||
replaceContent(node, start, end, createCodeLink(urlFull, text, "commit"))
|
||||
node = node.NextSibling.NextSibling
|
||||
}
|
||||
}
|
||||
|
||||
// emojiShortCodeProcessor for rendering text like :smile: into emoji
|
||||
func emojiShortCodeProcessor(ctx *postProcessCtx, node *html.Node) {
|
||||
|
||||
m := EmojiShortCodeRegex.FindStringSubmatchIndex(node.Data)
|
||||
start := 0
|
||||
next := node.NextSibling
|
||||
for node != nil && node != next && start < len(node.Data) {
|
||||
m := EmojiShortCodeRegex.FindStringSubmatchIndex(node.Data[start:])
|
||||
if m == nil {
|
||||
return
|
||||
}
|
||||
m[0] += start
|
||||
m[1] += start
|
||||
|
||||
start = m[1]
|
||||
|
||||
alias := node.Data[m[0]:m[1]]
|
||||
alias = strings.ReplaceAll(alias, ":", "")
|
||||
@@ -959,25 +987,39 @@ func emojiShortCodeProcessor(ctx *postProcessCtx, node *html.Node) {
|
||||
s := strings.Join(setting.UI.Reactions, " ") + "gitea"
|
||||
if strings.Contains(s, alias) {
|
||||
replaceContent(node, m[0], m[1], createCustomEmoji(alias, "emoji"))
|
||||
return
|
||||
node = node.NextSibling.NextSibling
|
||||
start = 0
|
||||
continue
|
||||
}
|
||||
return
|
||||
continue
|
||||
}
|
||||
|
||||
replaceContent(node, m[0], m[1], createEmoji(converted.Emoji, "emoji", converted.Description))
|
||||
node = node.NextSibling.NextSibling
|
||||
start = 0
|
||||
}
|
||||
}
|
||||
|
||||
// emoji processor to match emoji and add emoji class
|
||||
func emojiProcessor(ctx *postProcessCtx, node *html.Node) {
|
||||
m := emoji.FindEmojiSubmatchIndex(node.Data)
|
||||
start := 0
|
||||
next := node.NextSibling
|
||||
for node != nil && node != next && start < len(node.Data) {
|
||||
m := emoji.FindEmojiSubmatchIndex(node.Data[start:])
|
||||
if m == nil {
|
||||
return
|
||||
}
|
||||
m[0] += start
|
||||
m[1] += start
|
||||
|
||||
codepoint := node.Data[m[0]:m[1]]
|
||||
start = m[1]
|
||||
val := emoji.FromCode(codepoint)
|
||||
if val != nil {
|
||||
replaceContent(node, m[0], m[1], createEmoji(codepoint, "emoji", val.Description))
|
||||
node = node.NextSibling.NextSibling
|
||||
start = 0
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -987,10 +1029,17 @@ func sha1CurrentPatternProcessor(ctx *postProcessCtx, node *html.Node) {
|
||||
if ctx.metas == nil || ctx.metas["user"] == "" || ctx.metas["repo"] == "" || ctx.metas["repoPath"] == "" {
|
||||
return
|
||||
}
|
||||
m := sha1CurrentPattern.FindStringSubmatchIndex(node.Data)
|
||||
|
||||
start := 0
|
||||
next := node.NextSibling
|
||||
for node != nil && node != next && start < len(node.Data) {
|
||||
m := sha1CurrentPattern.FindStringSubmatchIndex(node.Data[start:])
|
||||
if m == nil {
|
||||
return
|
||||
}
|
||||
m[2] += start
|
||||
m[3] += start
|
||||
|
||||
hash := node.Data[m[2]:m[3]]
|
||||
// The regex does not lie, it matches the hash pattern.
|
||||
// However, a regex cannot know if a hash actually exists or not.
|
||||
@@ -1004,32 +1053,45 @@ func sha1CurrentPatternProcessor(ctx *postProcessCtx, node *html.Node) {
|
||||
if !strings.Contains(err.Error(), "fatal: Needed a single revision") {
|
||||
log.Debug("sha1CurrentPatternProcessor git rev-parse: %v", err)
|
||||
}
|
||||
return
|
||||
start = m[3]
|
||||
continue
|
||||
}
|
||||
|
||||
replaceContent(node, m[2], m[3],
|
||||
createCodeLink(util.URLJoin(setting.AppURL, ctx.metas["user"], ctx.metas["repo"], "commit", hash), base.ShortSha(hash), "commit"))
|
||||
start = 0
|
||||
node = node.NextSibling.NextSibling
|
||||
}
|
||||
}
|
||||
|
||||
// emailAddressProcessor replaces raw email addresses with a mailto: link.
|
||||
func emailAddressProcessor(ctx *postProcessCtx, node *html.Node) {
|
||||
next := node.NextSibling
|
||||
for node != nil && node != next {
|
||||
m := emailRegex.FindStringSubmatchIndex(node.Data)
|
||||
if m == nil {
|
||||
return
|
||||
}
|
||||
|
||||
mail := node.Data[m[2]:m[3]]
|
||||
replaceContent(node, m[2], m[3], createLink("mailto:"+mail, mail, "mailto"))
|
||||
node = node.NextSibling.NextSibling
|
||||
}
|
||||
}
|
||||
|
||||
// linkProcessor creates links for any HTTP or HTTPS URL not captured by
|
||||
// markdown.
|
||||
func linkProcessor(ctx *postProcessCtx, node *html.Node) {
|
||||
next := node.NextSibling
|
||||
for node != nil && node != next {
|
||||
m := common.LinkRegex.FindStringIndex(node.Data)
|
||||
if m == nil {
|
||||
return
|
||||
}
|
||||
uri := node.Data[m[0]:m[1]]
|
||||
replaceContent(node, m[0], m[1], createLink(uri, uri, "link"))
|
||||
node = node.NextSibling.NextSibling
|
||||
}
|
||||
}
|
||||
|
||||
func genDefaultLinkProcessor(defaultLink string) processor {
|
||||
@@ -1053,12 +1115,17 @@ func genDefaultLinkProcessor(defaultLink string) processor {
|
||||
|
||||
// descriptionLinkProcessor creates links for DescriptionHTML
|
||||
func descriptionLinkProcessor(ctx *postProcessCtx, node *html.Node) {
|
||||
next := node.NextSibling
|
||||
for node != nil && node != next {
|
||||
m := common.LinkRegex.FindStringIndex(node.Data)
|
||||
if m == nil {
|
||||
return
|
||||
}
|
||||
|
||||
uri := node.Data[m[0]:m[1]]
|
||||
replaceContent(node, m[0], m[1], createDescriptionLink(uri, uri))
|
||||
node = node.NextSibling.NextSibling
|
||||
}
|
||||
}
|
||||
|
||||
func createDescriptionLink(href, content string) *html.Node {
|
||||
|
||||
@@ -124,13 +124,13 @@ func TestRender_links(t *testing.T) {
|
||||
`<p><a href="http://www.example.com/wpstyle/?p=364" rel="nofollow">http://www.example.com/wpstyle/?p=364</a></p>`)
|
||||
test(
|
||||
"https://www.example.com/foo/?bar=baz&inga=42&quux",
|
||||
`<p><a href="https://www.example.com/foo/?bar=baz&inga=42&quux" rel="nofollow">https://www.example.com/foo/?bar=baz&inga=42&quux</a></p>`)
|
||||
`<p><a href="https://www.example.com/foo/?bar=baz&inga=42&quux" rel="nofollow">https://www.example.com/foo/?bar=baz&inga=42&quux</a></p>`)
|
||||
test(
|
||||
"http://142.42.1.1/",
|
||||
`<p><a href="http://142.42.1.1/" rel="nofollow">http://142.42.1.1/</a></p>`)
|
||||
test(
|
||||
"https://github.com/go-gitea/gitea/?p=aaa/bbb.html#ccc-ddd",
|
||||
`<p><a href="https://github.com/go-gitea/gitea/?p=aaa%2Fbbb.html#ccc-ddd" rel="nofollow">https://github.com/go-gitea/gitea/?p=aaa/bbb.html#ccc-ddd</a></p>`)
|
||||
`<p><a href="https://github.com/go-gitea/gitea/?p=aaa/bbb.html#ccc-ddd" rel="nofollow">https://github.com/go-gitea/gitea/?p=aaa/bbb.html#ccc-ddd</a></p>`)
|
||||
test(
|
||||
"https://en.wikipedia.org/wiki/URL_(disambiguation)",
|
||||
`<p><a href="https://en.wikipedia.org/wiki/URL_(disambiguation)" rel="nofollow">https://en.wikipedia.org/wiki/URL_(disambiguation)</a></p>`)
|
||||
@@ -148,7 +148,7 @@ func TestRender_links(t *testing.T) {
|
||||
`<p><a href="ftp://gitea.com/file.txt" rel="nofollow">ftp://gitea.com/file.txt</a></p>`)
|
||||
test(
|
||||
"magnet:?xt=urn:btih:5dee65101db281ac9c46344cd6b175cdcadabcde&dn=download",
|
||||
`<p><a href="magnet:?xt=urn%3Abtih%3A5dee65101db281ac9c46344cd6b175cdcadabcde&dn=download" rel="nofollow">magnet:?xt=urn:btih:5dee65101db281ac9c46344cd6b175cdcadabcde&dn=download</a></p>`)
|
||||
`<p><a href="magnet:?xt=urn:btih:5dee65101db281ac9c46344cd6b175cdcadabcde&dn=download" rel="nofollow">magnet:?xt=urn:btih:5dee65101db281ac9c46344cd6b175cdcadabcde&dn=download</a></p>`)
|
||||
|
||||
// Test that should *not* be turned into URL
|
||||
test(
|
||||
@@ -384,6 +384,32 @@ func TestRender_ShortLinks(t *testing.T) {
|
||||
`<p><a href="https://example.org" rel="nofollow">[[foobar]]</a></p>`)
|
||||
}
|
||||
|
||||
func TestRender_RelativeImages(t *testing.T) {
|
||||
setting.AppURL = AppURL
|
||||
setting.AppSubURL = AppSubURL
|
||||
tree := util.URLJoin(AppSubURL, "src", "master")
|
||||
|
||||
test := func(input, expected, expectedWiki string) {
|
||||
buffer := markdown.RenderString(input, tree, localMetas)
|
||||
assert.Equal(t, strings.TrimSpace(expected), strings.TrimSpace(buffer))
|
||||
buffer = markdown.RenderWiki([]byte(input), setting.AppSubURL, localMetas)
|
||||
assert.Equal(t, strings.TrimSpace(expectedWiki), strings.TrimSpace(buffer))
|
||||
}
|
||||
|
||||
rawwiki := util.URLJoin(AppSubURL, "wiki", "raw")
|
||||
mediatree := util.URLJoin(AppSubURL, "media", "master")
|
||||
|
||||
test(
|
||||
`<img src="Link">`,
|
||||
`<img src="`+util.URLJoin(mediatree, "Link")+`"/>`,
|
||||
`<img src="`+util.URLJoin(rawwiki, "Link")+`"/>`)
|
||||
|
||||
test(
|
||||
`<img src="./icon.png">`,
|
||||
`<img src="`+util.URLJoin(mediatree, "icon.png")+`"/>`,
|
||||
`<img src="`+util.URLJoin(rawwiki, "icon.png")+`"/>`)
|
||||
}
|
||||
|
||||
func Test_ParseClusterFuzz(t *testing.T) {
|
||||
setting.AppURL = AppURL
|
||||
setting.AppSubURL = AppSubURL
|
||||
@@ -408,3 +434,36 @@ func Test_ParseClusterFuzz(t *testing.T) {
|
||||
|
||||
assert.NotContains(t, string(val), "<html")
|
||||
}
|
||||
|
||||
func TestIssue16020(t *testing.T) {
|
||||
setting.AppURL = AppURL
|
||||
setting.AppSubURL = AppSubURL
|
||||
|
||||
var localMetas = map[string]string{
|
||||
"user": "go-gitea",
|
||||
"repo": "gitea",
|
||||
}
|
||||
|
||||
data := `<img src="data:image/png;base64,i//V"/>`
|
||||
|
||||
// func PostProcess(rawHTML []byte, urlPrefix string, metas map[string]string, isWikiMarkdown bool) ([]byte, error)
|
||||
res, err := PostProcess([]byte(data), "https://example.com", localMetas, false)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, data, string(res))
|
||||
}
|
||||
|
||||
func BenchmarkEmojiPostprocess(b *testing.B) {
|
||||
data := "🥰 "
|
||||
for len(data) < 1<<16 {
|
||||
data += data
|
||||
}
|
||||
b.ResetTimer()
|
||||
for i := 0; i < b.N; i++ {
|
||||
_, err := PostProcess(
|
||||
[]byte(data),
|
||||
"https://example.com",
|
||||
localMetas,
|
||||
false)
|
||||
assert.NoError(b, err)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -50,9 +50,6 @@ func ReplaceSanitizer() {
|
||||
sanitizer.policy.AllowURLSchemes(setting.Markdown.CustomURLSchemes...)
|
||||
}
|
||||
|
||||
// Allow keyword markup
|
||||
sanitizer.policy.AllowAttrs("class").Matching(regexp.MustCompile(`^` + keywordClass + `$`)).OnElements("span")
|
||||
|
||||
// Allow classes for anchors
|
||||
sanitizer.policy.AllowAttrs("class").Matching(regexp.MustCompile(`ref-issue`)).OnElements("a")
|
||||
|
||||
@@ -68,8 +65,8 @@ func ReplaceSanitizer() {
|
||||
// Allow classes for emojis
|
||||
sanitizer.policy.AllowAttrs("class").Matching(regexp.MustCompile(`emoji`)).OnElements("img")
|
||||
|
||||
// Allow icons, emojis, and chroma syntax on span
|
||||
sanitizer.policy.AllowAttrs("class").Matching(regexp.MustCompile(`^((icon(\s+[\p{L}\p{N}_-]+)+)|(emoji))$|^([a-z][a-z0-9]{0,2})$`)).OnElements("span")
|
||||
// Allow icons, emojis, chroma syntax and keyword markup on span
|
||||
sanitizer.policy.AllowAttrs("class").Matching(regexp.MustCompile(`^((icon(\s+[\p{L}\p{N}_-]+)+)|(emoji))$|^([a-z][a-z0-9]{0,2})$|^` + keywordClass + `$`)).OnElements("span")
|
||||
|
||||
// Allow generally safe attributes
|
||||
generalSafeAttrs := []string{"abbr", "accept", "accept-charset",
|
||||
|
||||
@@ -12,9 +12,12 @@ import "code.gitea.io/gitea/modules/structs"
|
||||
type MigrateOptions struct {
|
||||
// required: true
|
||||
CloneAddr string `json:"clone_addr" binding:"Required"`
|
||||
CloneAddrEncrypted string `json:"clone_addr_encrypted,omitempty"`
|
||||
AuthUsername string `json:"auth_username"`
|
||||
AuthPassword string `json:"auth_password"`
|
||||
AuthToken string `json:"auth_token"`
|
||||
AuthPassword string `json:"auth_password,omitempty"`
|
||||
AuthPasswordEncrypted string `json:"auth_password_encrypted,omitempty"`
|
||||
AuthToken string `json:"auth_token,omitempty"`
|
||||
AuthTokenEncrypted string `json:"auth_token_encrypted,omitempty"`
|
||||
// required: true
|
||||
UID int `json:"uid" binding:"Required"`
|
||||
// required: true
|
||||
|
||||
@@ -13,6 +13,7 @@ import (
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"code.gitea.io/gitea/models"
|
||||
@@ -563,8 +564,42 @@ func DumpRepository(ctx context.Context, baseDir, ownerName string, opts base.Mi
|
||||
return nil
|
||||
}
|
||||
|
||||
func updateOptionsUnits(opts *base.MigrateOptions, units []string) {
|
||||
if len(units) == 0 {
|
||||
opts.Wiki = true
|
||||
opts.Issues = true
|
||||
opts.Milestones = true
|
||||
opts.Labels = true
|
||||
opts.Releases = true
|
||||
opts.Comments = true
|
||||
opts.PullRequests = true
|
||||
opts.ReleaseAssets = true
|
||||
} else {
|
||||
for _, unit := range units {
|
||||
switch strings.ToLower(unit) {
|
||||
case "wiki":
|
||||
opts.Wiki = true
|
||||
case "issues":
|
||||
opts.Issues = true
|
||||
case "milestones":
|
||||
opts.Milestones = true
|
||||
case "labels":
|
||||
opts.Labels = true
|
||||
case "releases":
|
||||
opts.Releases = true
|
||||
case "release_assets":
|
||||
opts.ReleaseAssets = true
|
||||
case "comments":
|
||||
opts.Comments = true
|
||||
case "pull_requests":
|
||||
opts.PullRequests = true
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// RestoreRepository restore a repository from the disk directory
|
||||
func RestoreRepository(ctx context.Context, baseDir string, ownerName, repoName string) error {
|
||||
func RestoreRepository(ctx context.Context, baseDir string, ownerName, repoName string, units []string) error {
|
||||
doer, err := models.GetAdminUser()
|
||||
if err != nil {
|
||||
return err
|
||||
@@ -580,17 +615,12 @@ func RestoreRepository(ctx context.Context, baseDir string, ownerName, repoName
|
||||
}
|
||||
tp, _ := strconv.Atoi(opts["service_type"])
|
||||
|
||||
if err = migrateRepository(downloader, uploader, base.MigrateOptions{
|
||||
Wiki: true,
|
||||
Issues: true,
|
||||
Milestones: true,
|
||||
Labels: true,
|
||||
Releases: true,
|
||||
Comments: true,
|
||||
PullRequests: true,
|
||||
ReleaseAssets: true,
|
||||
var migrateOpts = base.MigrateOptions{
|
||||
GitServiceType: structs.GitServiceType(tp),
|
||||
}); err != nil {
|
||||
}
|
||||
updateOptionsUnits(&migrateOpts, units)
|
||||
|
||||
if err = migrateRepository(downloader, uploader, migrateOpts); err != nil {
|
||||
if err1 := uploader.Rollback(); err1 != nil {
|
||||
log.Error("rollback failed: %v", err1)
|
||||
}
|
||||
|
||||
@@ -248,7 +248,8 @@ func (g *GiteaLocalUploader) CreateReleases(releases ...*base.Release) error {
|
||||
rel.OriginalAuthorID = release.PublisherID
|
||||
}
|
||||
|
||||
// calc NumCommits
|
||||
// calc NumCommits if no draft
|
||||
if !release.Draft {
|
||||
commit, err := g.gitRepo.GetCommit(rel.TagName)
|
||||
if err != nil {
|
||||
return fmt.Errorf("GetCommit: %v", err)
|
||||
@@ -257,6 +258,7 @@ func (g *GiteaLocalUploader) CreateReleases(releases ...*base.Release) error {
|
||||
if err != nil {
|
||||
return fmt.Errorf("CommitsCount: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
for _, asset := range release.Assets {
|
||||
var attach = models.Attachment{
|
||||
@@ -268,9 +270,10 @@ func (g *GiteaLocalUploader) CreateReleases(releases ...*base.Release) error {
|
||||
}
|
||||
|
||||
// download attachment
|
||||
err = func() error {
|
||||
err := func() error {
|
||||
// asset.DownloadURL maybe a local file
|
||||
var rc io.ReadCloser
|
||||
var err error
|
||||
if asset.DownloadURL == nil {
|
||||
rc, err = asset.DownloadFunc()
|
||||
if err != nil {
|
||||
@@ -849,6 +852,7 @@ func (g *GiteaLocalUploader) CreateReviews(reviews ...*base.Review) error {
|
||||
// Rollback when migrating failed, this will rollback all the changes.
|
||||
func (g *GiteaLocalUploader) Rollback() error {
|
||||
if g.repo != nil && g.repo.ID > 0 {
|
||||
g.gitRepo.Close()
|
||||
if err := models.DeleteRepository(g.doer, g.repo.OwnerID, g.repo.ID); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
@@ -264,34 +264,29 @@ func (g *GithubDownloaderV3) GetLabels() ([]*base.Label, error) {
|
||||
}
|
||||
|
||||
func (g *GithubDownloaderV3) convertGithubRelease(rel *github.RepositoryRelease) *base.Release {
|
||||
var (
|
||||
name string
|
||||
desc string
|
||||
)
|
||||
if rel.Body != nil {
|
||||
desc = *rel.Body
|
||||
}
|
||||
if rel.Name != nil {
|
||||
name = *rel.Name
|
||||
}
|
||||
|
||||
var email string
|
||||
if rel.Author.Email != nil {
|
||||
email = *rel.Author.Email
|
||||
}
|
||||
|
||||
r := &base.Release{
|
||||
TagName: *rel.TagName,
|
||||
TargetCommitish: *rel.TargetCommitish,
|
||||
Name: name,
|
||||
Body: desc,
|
||||
Draft: *rel.Draft,
|
||||
Prerelease: *rel.Prerelease,
|
||||
Created: rel.CreatedAt.Time,
|
||||
PublisherID: *rel.Author.ID,
|
||||
PublisherName: *rel.Author.Login,
|
||||
PublisherEmail: email,
|
||||
Published: rel.PublishedAt.Time,
|
||||
}
|
||||
|
||||
if rel.Body != nil {
|
||||
r.Body = *rel.Body
|
||||
}
|
||||
if rel.Name != nil {
|
||||
r.Name = *rel.Name
|
||||
}
|
||||
|
||||
if rel.Author.Email != nil {
|
||||
r.PublisherEmail = *rel.Author.Email
|
||||
}
|
||||
|
||||
if rel.PublishedAt != nil {
|
||||
r.Published = rel.PublishedAt.Time
|
||||
}
|
||||
|
||||
for _, asset := range rel.Assets {
|
||||
@@ -306,18 +301,17 @@ func (g *GithubDownloaderV3) convertGithubRelease(rel *github.RepositoryRelease)
|
||||
Updated: asset.UpdatedAt.Time,
|
||||
DownloadFunc: func() (io.ReadCloser, error) {
|
||||
g.sleep()
|
||||
asset, redir, err := g.client.Repositories.DownloadReleaseAsset(g.ctx, g.repoOwner, g.repoName, assetID, nil)
|
||||
asset, redirectURL, err := g.client.Repositories.DownloadReleaseAsset(g.ctx, g.repoOwner, g.repoName, assetID, nil)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
err = g.RefreshRate()
|
||||
if err != nil {
|
||||
if err := g.RefreshRate(); err != nil {
|
||||
log.Error("g.client.RateLimits: %s", err)
|
||||
}
|
||||
if asset == nil {
|
||||
if redir != "" {
|
||||
if redirectURL != "" {
|
||||
g.sleep()
|
||||
req, err := http.NewRequestWithContext(g.ctx, "GET", redir, nil)
|
||||
req, err := http.NewRequestWithContext(g.ctx, "GET", redirectURL, nil)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
@@ -152,7 +152,7 @@ func (m *Manager) GetRedisClient(connection string) redis.UniversalClient {
|
||||
opts.Addrs = append(opts.Addrs, strings.Split(uri.Host, ",")...)
|
||||
}
|
||||
if uri.Path != "" {
|
||||
if db, err := strconv.Atoi(uri.Path); err == nil {
|
||||
if db, err := strconv.Atoi(uri.Path[1:]); err == nil {
|
||||
opts.DB = db
|
||||
}
|
||||
}
|
||||
@@ -168,7 +168,7 @@ func (m *Manager) GetRedisClient(connection string) redis.UniversalClient {
|
||||
opts.Addrs = append(opts.Addrs, strings.Split(uri.Host, ",")...)
|
||||
}
|
||||
if uri.Path != "" {
|
||||
if db, err := strconv.Atoi(uri.Path); err == nil {
|
||||
if db, err := strconv.Atoi(uri.Path[1:]); err == nil {
|
||||
opts.DB = db
|
||||
}
|
||||
}
|
||||
@@ -186,7 +186,7 @@ func (m *Manager) GetRedisClient(connection string) redis.UniversalClient {
|
||||
opts.Addrs = append(opts.Addrs, strings.Split(uri.Host, ",")...)
|
||||
}
|
||||
if uri.Path != "" {
|
||||
if db, err := strconv.Atoi(uri.Path); err == nil {
|
||||
if db, err := strconv.Atoi(uri.Path[1:]); err == nil {
|
||||
opts.DB = db
|
||||
}
|
||||
}
|
||||
|
||||
@@ -54,7 +54,6 @@ func (m *mailNotifier) NotifyNewIssue(issue *models.Issue, mentions []*models.Us
|
||||
|
||||
func (m *mailNotifier) NotifyIssueChangeStatus(doer *models.User, issue *models.Issue, actionComment *models.Comment, isClosed bool) {
|
||||
var actionType models.ActionType
|
||||
issue.Content = ""
|
||||
if issue.IsPull {
|
||||
if isClosed {
|
||||
actionType = models.ActionClosePullRequest
|
||||
@@ -120,7 +119,6 @@ func (m *mailNotifier) NotifyMergePullRequest(pr *models.PullRequest, doer *mode
|
||||
log.Error("pr.LoadIssue: %v", err)
|
||||
return
|
||||
}
|
||||
pr.Issue.Content = ""
|
||||
if err := mailer.MailParticipants(pr.Issue, doer, models.ActionMergePullRequest, nil); err != nil {
|
||||
log.Error("MailParticipants: %v", err)
|
||||
}
|
||||
@@ -147,7 +145,6 @@ func (m *mailNotifier) NotifyPullRequestPushCommits(doer *models.User, pr *model
|
||||
if err := comment.LoadPushCommits(); err != nil {
|
||||
log.Error("comment.LoadPushCommits: %v", err)
|
||||
}
|
||||
comment.Content = ""
|
||||
|
||||
m.NotifyCreateIssueComment(doer, comment.Issue.Repo, comment.Issue, comment, nil)
|
||||
}
|
||||
|
||||
60
modules/private/restore_repo.go
Normal file
60
modules/private/restore_repo.go
Normal file
@@ -0,0 +1,60 @@
|
||||
// Copyright 2020 The Gitea Authors. All rights reserved.
|
||||
// Use of this source code is governed by a MIT-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package private
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"net/http"
|
||||
"time"
|
||||
|
||||
"code.gitea.io/gitea/modules/setting"
|
||||
jsoniter "github.com/json-iterator/go"
|
||||
)
|
||||
|
||||
// RestoreParams structure holds a data for restore repository
|
||||
type RestoreParams struct {
|
||||
RepoDir string
|
||||
OwnerName string
|
||||
RepoName string
|
||||
Units []string
|
||||
}
|
||||
|
||||
// RestoreRepo calls the internal RestoreRepo function
|
||||
func RestoreRepo(repoDir, ownerName, repoName string, units []string) (int, string) {
|
||||
reqURL := setting.LocalURL + "api/internal/restore_repo"
|
||||
|
||||
req := newInternalRequest(reqURL, "POST")
|
||||
req.SetTimeout(3*time.Second, 0) // since the request will spend much time, don't timeout
|
||||
req = req.Header("Content-Type", "application/json")
|
||||
json := jsoniter.ConfigCompatibleWithStandardLibrary
|
||||
jsonBytes, _ := json.Marshal(RestoreParams{
|
||||
RepoDir: repoDir,
|
||||
OwnerName: ownerName,
|
||||
RepoName: repoName,
|
||||
Units: units,
|
||||
})
|
||||
req.Body(jsonBytes)
|
||||
resp, err := req.Response()
|
||||
if err != nil {
|
||||
return http.StatusInternalServerError, fmt.Sprintf("Unable to contact gitea: %v, could you confirm it's running?", err.Error())
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
|
||||
if resp.StatusCode != 200 {
|
||||
var ret = struct {
|
||||
Err string `json:"err"`
|
||||
}{}
|
||||
body, err := ioutil.ReadAll(resp.Body)
|
||||
if err != nil {
|
||||
return http.StatusInternalServerError, fmt.Sprintf("Response body error: %v", err.Error())
|
||||
}
|
||||
if err := json.Unmarshal(body, &ret); err != nil {
|
||||
return http.StatusInternalServerError, fmt.Sprintf("Response body Unmarshal error: %v", err.Error())
|
||||
}
|
||||
}
|
||||
|
||||
return http.StatusOK, fmt.Sprintf("Restore repo %s/%s successfully", ownerName, repoName)
|
||||
}
|
||||
@@ -165,7 +165,7 @@ func (opts *Options) handle(w http.ResponseWriter, req *http.Request, opt *Optio
|
||||
log.Println("[Static] Serving " + file)
|
||||
}
|
||||
|
||||
if httpcache.HandleEtagCache(req, w, fi) {
|
||||
if httpcache.HandleFileETagCache(req, w, fi) {
|
||||
return true
|
||||
}
|
||||
|
||||
|
||||
@@ -198,17 +198,20 @@ func (m *Manager) FlushAll(baseCtx context.Context, timeout time.Duration) error
|
||||
wg.Done()
|
||||
}(mq)
|
||||
} else {
|
||||
log.Debug("Queue: %s is non-empty but is not flushable - adding 100 millisecond wait", mq.Name)
|
||||
go func() {
|
||||
<-time.After(100 * time.Millisecond)
|
||||
log.Debug("Queue: %s is non-empty but is not flushable", mq.Name)
|
||||
wg.Done()
|
||||
}()
|
||||
}
|
||||
|
||||
}
|
||||
if allEmpty {
|
||||
log.Debug("All queues are empty")
|
||||
break
|
||||
}
|
||||
// Ensure there are always at least 100ms between loops but not more if we've actually been doing some flushign
|
||||
// but don't delay cancellation here.
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
case <-time.After(100 * time.Millisecond):
|
||||
}
|
||||
wg.Wait()
|
||||
}
|
||||
return nil
|
||||
|
||||
@@ -114,41 +114,71 @@ func (q *ByteFIFOQueue) Run(atShutdown, atTerminate func(context.Context, func()
|
||||
}
|
||||
|
||||
func (q *ByteFIFOQueue) readToChan() {
|
||||
for {
|
||||
// handle quick cancels
|
||||
select {
|
||||
case <-q.closed:
|
||||
// tell the pool to shutdown.
|
||||
q.cancel()
|
||||
return
|
||||
default:
|
||||
q.lock.Lock()
|
||||
bs, err := q.byteFIFO.Pop()
|
||||
if err != nil {
|
||||
q.lock.Unlock()
|
||||
log.Error("%s: %s Error on Pop: %v", q.typ, q.name, err)
|
||||
time.Sleep(time.Millisecond * 100)
|
||||
continue
|
||||
}
|
||||
|
||||
if len(bs) == 0 {
|
||||
q.lock.Unlock()
|
||||
time.Sleep(time.Millisecond * 100)
|
||||
continue
|
||||
backOffTime := time.Millisecond * 100
|
||||
maxBackOffTime := time.Second * 3
|
||||
for {
|
||||
success, resetBackoff := q.doPop()
|
||||
if resetBackoff {
|
||||
backOffTime = 100 * time.Millisecond
|
||||
}
|
||||
|
||||
if success {
|
||||
select {
|
||||
case <-q.closed:
|
||||
// tell the pool to shutdown.
|
||||
q.cancel()
|
||||
return
|
||||
default:
|
||||
}
|
||||
} else {
|
||||
select {
|
||||
case <-q.closed:
|
||||
// tell the pool to shutdown.
|
||||
q.cancel()
|
||||
return
|
||||
case <-time.After(backOffTime):
|
||||
}
|
||||
backOffTime += backOffTime / 2
|
||||
if backOffTime > maxBackOffTime {
|
||||
backOffTime = maxBackOffTime
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (q *ByteFIFOQueue) doPop() (success, resetBackoff bool) {
|
||||
q.lock.Lock()
|
||||
defer q.lock.Unlock()
|
||||
bs, err := q.byteFIFO.Pop()
|
||||
if err != nil {
|
||||
log.Error("%s: %s Error on Pop: %v", q.typ, q.name, err)
|
||||
return
|
||||
}
|
||||
if len(bs) == 0 {
|
||||
return
|
||||
}
|
||||
|
||||
resetBackoff = true
|
||||
|
||||
data, err := unmarshalAs(bs, q.exemplar)
|
||||
if err != nil {
|
||||
log.Error("%s: %s Failed to unmarshal with error: %v", q.typ, q.name, err)
|
||||
q.lock.Unlock()
|
||||
time.Sleep(time.Millisecond * 100)
|
||||
continue
|
||||
return
|
||||
}
|
||||
|
||||
log.Trace("%s %s: Task found: %#v", q.typ, q.name, data)
|
||||
q.WorkerPool.Push(data)
|
||||
q.lock.Unlock()
|
||||
}
|
||||
}
|
||||
success = true
|
||||
return
|
||||
}
|
||||
|
||||
// Shutdown processing from this queue
|
||||
|
||||
@@ -5,6 +5,7 @@
|
||||
package references
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"net/url"
|
||||
"regexp"
|
||||
"strconv"
|
||||
@@ -14,6 +15,8 @@ import (
|
||||
"code.gitea.io/gitea/modules/log"
|
||||
"code.gitea.io/gitea/modules/markup/mdstripper"
|
||||
"code.gitea.io/gitea/modules/setting"
|
||||
|
||||
"github.com/yuin/goldmark/util"
|
||||
)
|
||||
|
||||
var (
|
||||
@@ -321,7 +324,7 @@ func FindRenderizableReferenceNumeric(content string, prOnly bool) (bool, *Rende
|
||||
return false, nil
|
||||
}
|
||||
}
|
||||
r := getCrossReference([]byte(content), match[2], match[3], false, prOnly)
|
||||
r := getCrossReference(util.StringToReadOnlyBytes(content), match[2], match[3], false, prOnly)
|
||||
if r == nil {
|
||||
return false, nil
|
||||
}
|
||||
@@ -465,17 +468,16 @@ func findAllIssueReferencesBytes(content []byte, links []string) []*rawReference
|
||||
}
|
||||
|
||||
func getCrossReference(content []byte, start, end int, fromLink bool, prOnly bool) *rawReference {
|
||||
refid := string(content[start:end])
|
||||
sep := strings.IndexAny(refid, "#!")
|
||||
sep := bytes.IndexAny(content[start:end], "#!")
|
||||
if sep < 0 {
|
||||
return nil
|
||||
}
|
||||
isPull := refid[sep] == '!'
|
||||
isPull := content[start+sep] == '!'
|
||||
if prOnly && !isPull {
|
||||
return nil
|
||||
}
|
||||
repo := refid[:sep]
|
||||
issue := refid[sep+1:]
|
||||
repo := string(content[start : start+sep])
|
||||
issue := string(content[start+sep+1 : end])
|
||||
index, err := strconv.ParseInt(issue, 10, 64)
|
||||
if err != nil {
|
||||
return nil
|
||||
|
||||
@@ -228,7 +228,7 @@ func ListUnadoptedRepositories(query string, opts *models.ListOptions) ([]string
|
||||
found := false
|
||||
repoLoop:
|
||||
for i, repo := range repos {
|
||||
if repo.Name == name {
|
||||
if repo.LowerName == name {
|
||||
found = true
|
||||
repos = append(repos[:i], repos[i+1:]...)
|
||||
break repoLoop
|
||||
|
||||
@@ -22,9 +22,53 @@ import (
|
||||
func getHookTemplates() (hookNames, hookTpls, giteaHookTpls []string) {
|
||||
hookNames = []string{"pre-receive", "update", "post-receive"}
|
||||
hookTpls = []string{
|
||||
fmt.Sprintf("#!/usr/bin/env %s\ndata=$(cat)\nexitcodes=\"\"\nhookname=$(basename $0)\nGIT_DIR=${GIT_DIR:-$(dirname $0)}\n\nfor hook in ${GIT_DIR}/hooks/${hookname}.d/*; do\ntest -x \"${hook}\" && test -f \"${hook}\" || continue\necho \"${data}\" | \"${hook}\"\nexitcodes=\"${exitcodes} $?\"\ndone\n\nfor i in ${exitcodes}; do\n[ ${i} -eq 0 ] || exit ${i}\ndone\n", setting.ScriptType),
|
||||
fmt.Sprintf("#!/usr/bin/env %s\nexitcodes=\"\"\nhookname=$(basename $0)\nGIT_DIR=${GIT_DIR:-$(dirname $0)}\n\nfor hook in ${GIT_DIR}/hooks/${hookname}.d/*; do\ntest -x \"${hook}\" && test -f \"${hook}\" || continue\n\"${hook}\" $1 $2 $3\nexitcodes=\"${exitcodes} $?\"\ndone\n\nfor i in ${exitcodes}; do\n[ ${i} -eq 0 ] || exit ${i}\ndone\n", setting.ScriptType),
|
||||
fmt.Sprintf("#!/usr/bin/env %s\ndata=$(cat)\nexitcodes=\"\"\nhookname=$(basename $0)\nGIT_DIR=${GIT_DIR:-$(dirname $0)}\n\nfor hook in ${GIT_DIR}/hooks/${hookname}.d/*; do\ntest -x \"${hook}\" && test -f \"${hook}\" || continue\necho \"${data}\" | \"${hook}\"\nexitcodes=\"${exitcodes} $?\"\ndone\n\nfor i in ${exitcodes}; do\n[ ${i} -eq 0 ] || exit ${i}\ndone\n", setting.ScriptType),
|
||||
fmt.Sprintf(`#!/usr/bin/env %s
|
||||
data=$(cat)
|
||||
exitcodes=""
|
||||
hookname=$(basename $0)
|
||||
GIT_DIR=${GIT_DIR:-$(dirname $0)/..}
|
||||
|
||||
for hook in ${GIT_DIR}/hooks/${hookname}.d/*; do
|
||||
test -x "${hook}" && test -f "${hook}" || continue
|
||||
echo "${data}" | "${hook}"
|
||||
exitcodes="${exitcodes} $?"
|
||||
done
|
||||
|
||||
for i in ${exitcodes}; do
|
||||
[ ${i} -eq 0 ] || exit ${i}
|
||||
done
|
||||
`, setting.ScriptType),
|
||||
fmt.Sprintf(`#!/usr/bin/env %s
|
||||
exitcodes=""
|
||||
hookname=$(basename $0)
|
||||
GIT_DIR=${GIT_DIR:-$(dirname $0/..)}
|
||||
|
||||
for hook in ${GIT_DIR}/hooks/${hookname}.d/*; do
|
||||
test -x "${hook}" && test -f "${hook}" || continue
|
||||
"${hook}" $1 $2 $3
|
||||
exitcodes="${exitcodes} $?"
|
||||
done
|
||||
|
||||
for i in ${exitcodes}; do
|
||||
[ ${i} -eq 0 ] || exit ${i}
|
||||
done
|
||||
`, setting.ScriptType),
|
||||
fmt.Sprintf(`#!/usr/bin/env %s
|
||||
data=$(cat)
|
||||
exitcodes=""
|
||||
hookname=$(basename $0)
|
||||
GIT_DIR=${GIT_DIR:-$(dirname $0)/..}
|
||||
|
||||
for hook in ${GIT_DIR}/hooks/${hookname}.d/*; do
|
||||
test -x "${hook}" && test -f "${hook}" || continue
|
||||
echo "${data}" | "${hook}"
|
||||
exitcodes="${exitcodes} $?"
|
||||
done
|
||||
|
||||
for i in ${exitcodes}; do
|
||||
[ ${i} -eq 0 ] || exit ${i}
|
||||
done
|
||||
`, setting.ScriptType),
|
||||
}
|
||||
giteaHookTpls = []string{
|
||||
fmt.Sprintf("#!/usr/bin/env %s\n%s hook --config=%s pre-receive\n", setting.ScriptType, util.ShellEscape(setting.AppPath), util.ShellEscape(setting.CustomConf)),
|
||||
|
||||
@@ -117,6 +117,8 @@ var (
|
||||
GracefulRestartable bool
|
||||
GracefulHammerTime time.Duration
|
||||
StartupTimeout time.Duration
|
||||
PerWriteTimeout = 30 * time.Second
|
||||
PerWritePerKbTimeout = 10 * time.Second
|
||||
StaticURLPrefix string
|
||||
AbsoluteAssetURL string
|
||||
|
||||
@@ -147,6 +149,8 @@ var (
|
||||
TrustedUserCAKeys []string `ini:"SSH_TRUSTED_USER_CA_KEYS"`
|
||||
TrustedUserCAKeysFile string `ini:"SSH_TRUSTED_USER_CA_KEYS_FILENAME"`
|
||||
TrustedUserCAKeysParsed []gossh.PublicKey `ini:"-"`
|
||||
PerWriteTimeout time.Duration `ini:"SSH_PER_WRITE_TIMEOUT"`
|
||||
PerWritePerKbTimeout time.Duration `ini:"SSH_PER_WRITE_PER_KB_TIMEOUT"`
|
||||
}{
|
||||
Disabled: false,
|
||||
StartBuiltinServer: false,
|
||||
@@ -159,6 +163,8 @@ var (
|
||||
MinimumKeySizeCheck: true,
|
||||
MinimumKeySizes: map[string]int{"ed25519": 256, "ed25519-sk": 256, "ecdsa": 256, "ecdsa-sk": 256, "rsa": 2048},
|
||||
ServerHostKeys: []string{"ssh/gitea.rsa", "ssh/gogs.rsa"},
|
||||
PerWriteTimeout: PerWriteTimeout,
|
||||
PerWritePerKbTimeout: PerWritePerKbTimeout,
|
||||
}
|
||||
|
||||
// Security settings
|
||||
@@ -607,6 +613,8 @@ func NewContext() {
|
||||
GracefulRestartable = sec.Key("ALLOW_GRACEFUL_RESTARTS").MustBool(true)
|
||||
GracefulHammerTime = sec.Key("GRACEFUL_HAMMER_TIME").MustDuration(60 * time.Second)
|
||||
StartupTimeout = sec.Key("STARTUP_TIMEOUT").MustDuration(0 * time.Second)
|
||||
PerWriteTimeout = sec.Key("PER_WRITE_TIMEOUT").MustDuration(PerWriteTimeout)
|
||||
PerWritePerKbTimeout = sec.Key("PER_WRITE_PER_KB_TIMEOUT").MustDuration(PerWritePerKbTimeout)
|
||||
|
||||
defaultAppURL := string(Protocol) + "://" + Domain
|
||||
if (Protocol == HTTP && HTTPPort != "80") || (Protocol == HTTPS && HTTPPort != "443") {
|
||||
@@ -772,6 +780,8 @@ func NewContext() {
|
||||
}
|
||||
|
||||
SSH.ExposeAnonymous = sec.Key("SSH_EXPOSE_ANONYMOUS").MustBool(false)
|
||||
SSH.PerWriteTimeout = sec.Key("SSH_PER_WRITE_TIMEOUT").MustDuration(PerWriteTimeout)
|
||||
SSH.PerWritePerKbTimeout = sec.Key("SSH_PER_WRITE_PER_KB_TIMEOUT").MustDuration(PerWritePerKbTimeout)
|
||||
|
||||
if err = Cfg.Section("oauth2").MapTo(&OAuth2); err != nil {
|
||||
log.Fatal("Failed to OAuth2 settings: %v", err)
|
||||
|
||||
@@ -7,12 +7,15 @@ package ssh
|
||||
import (
|
||||
"code.gitea.io/gitea/modules/graceful"
|
||||
"code.gitea.io/gitea/modules/log"
|
||||
"code.gitea.io/gitea/modules/setting"
|
||||
|
||||
"github.com/gliderlabs/ssh"
|
||||
)
|
||||
|
||||
func listen(server *ssh.Server) {
|
||||
gracefulServer := graceful.NewServer("tcp", server.Addr, "SSH")
|
||||
gracefulServer.PerWriteTimeout = setting.SSH.PerWriteTimeout
|
||||
gracefulServer.PerWritePerKbTimeout = setting.SSH.PerWritePerKbTimeout
|
||||
|
||||
err := gracefulServer.ListenAndServe(server.Serve)
|
||||
if err != nil {
|
||||
|
||||
@@ -96,7 +96,7 @@ func (l *LocalStorage) Save(path string, r io.Reader, size int64) (int64, error)
|
||||
return 0, err
|
||||
}
|
||||
|
||||
if err := os.Rename(tmp.Name(), p); err != nil {
|
||||
if err := util.Rename(tmp.Name(), p); err != nil {
|
||||
return 0, err
|
||||
}
|
||||
|
||||
|
||||
@@ -31,6 +31,8 @@ type CreateOrgOption struct {
|
||||
RepoAdminChangeTeamAccess bool `json:"repo_admin_change_team_access"`
|
||||
}
|
||||
|
||||
// TODO: make EditOrgOption fields optional after https://gitea.com/go-chi/binding/pulls/5 got merged
|
||||
|
||||
// EditOrgOption options for editing an organization
|
||||
type EditOrgOption struct {
|
||||
FullName string `json:"full_name"`
|
||||
@@ -40,5 +42,5 @@ type EditOrgOption struct {
|
||||
// possible values are `public`, `limited` or `private`
|
||||
// enum: public,limited,private
|
||||
Visibility string `json:"visibility" binding:"In(,public,limited,private)"`
|
||||
RepoAdminChangeTeamAccess bool `json:"repo_admin_change_team_access"`
|
||||
RepoAdminChangeTeamAccess *bool `json:"repo_admin_change_team_access"`
|
||||
}
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user