mirror of
https://github.com/go-gitea/gitea.git
synced 2025-11-03 08:02:36 +09:00
Compare commits
135 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
a6f9ebfeb9 | ||
|
|
14de28b876 | ||
|
|
e4120bbc89 | ||
|
|
37abfcaf8a | ||
|
|
c719841f0d | ||
|
|
f9e150002e | ||
|
|
2f4f2852fc | ||
|
|
b25a571bc9 | ||
|
|
f9bbed028c | ||
|
|
7e084341fe | ||
|
|
7d75eede04 | ||
|
|
3db98bef99 | ||
|
|
484fe075f4 | ||
|
|
de3216ee55 | ||
|
|
353d88a42e | ||
|
|
a17fce31a9 | ||
|
|
71e1ebfa60 | ||
|
|
afe9d2cadd | ||
|
|
012e45a4c1 | ||
|
|
d25ff0d695 | ||
|
|
6eaebda1b5 | ||
|
|
6100935a77 | ||
|
|
6de75224de | ||
|
|
9086916eb7 | ||
|
|
877040e652 | ||
|
|
91f5be889a | ||
|
|
a818a48c76 | ||
|
|
76e1c130fb | ||
|
|
148a417774 | ||
|
|
6081948ef0 | ||
|
|
48bd54286c | ||
|
|
c69b3b65f3 | ||
|
|
fe91d9617b | ||
|
|
711ca52f1f | ||
|
|
a15f0cb010 | ||
|
|
2051f850ef | ||
|
|
3ae4c4898b | ||
|
|
3a77465e4e | ||
|
|
fc8c23edb7 | ||
|
|
31df892059 | ||
|
|
9879e23c57 | ||
|
|
56a3b50136 | ||
|
|
9a8532d928 | ||
|
|
d29a0fc3be | ||
|
|
04517e17d6 | ||
|
|
3a222ee416 | ||
|
|
add85f5a85 | ||
|
|
76ad83f05e | ||
|
|
714ecd9f1e | ||
|
|
a08856606e | ||
|
|
7be2d7b136 | ||
|
|
6f3596e33c | ||
|
|
0305a73633 | ||
|
|
6cd1ccef3d | ||
|
|
ea0fe83888 | ||
|
|
1cec7f5ab5 | ||
|
|
1cb1101d44 | ||
|
|
653dff4e57 | ||
|
|
b661bbaed7 | ||
|
|
20ae184967 | ||
|
|
15b44496ec | ||
|
|
0d0ff5e32a | ||
|
|
f25f7c592f | ||
|
|
e8cf04bad7 | ||
|
|
251fdaaf41 | ||
|
|
f572fb906f | ||
|
|
9340269d84 | ||
|
|
34650b925b | ||
|
|
718e0db12e | ||
|
|
6110ddc280 | ||
|
|
c7d8181a70 | ||
|
|
548ae3eb98 | ||
|
|
2c383d812d | ||
|
|
ef12b8de80 | ||
|
|
dd1ba34ee5 | ||
|
|
1fbdf96c34 | ||
|
|
5159055278 | ||
|
|
06da10b9a1 | ||
|
|
175ebc6f88 | ||
|
|
3aecea2e6e | ||
|
|
cae8c63517 | ||
|
|
8ace5c1161 | ||
|
|
a87b813955 | ||
|
|
3baeec745c | ||
|
|
befb6bea22 | ||
|
|
79f0b1a50b | ||
|
|
79a3d277e5 | ||
|
|
eb748ff79e | ||
|
|
c5770195d9 | ||
|
|
a20ccec369 | ||
|
|
9c2b7a196e | ||
|
|
1e278b15c2 | ||
|
|
fde6ff6a75 | ||
|
|
51f4f8c393 | ||
|
|
f5845e6497 | ||
|
|
c927ebd119 | ||
|
|
245596e130 | ||
|
|
1c3ae6d05e | ||
|
|
a1e57ebe6b | ||
|
|
73ae93b007 | ||
|
|
dc030f64a7 | ||
|
|
6e0a08d753 | ||
|
|
7b1153e943 | ||
|
|
6995be66e7 | ||
|
|
28971c7c15 | ||
|
|
eb5e6f09eb | ||
|
|
bf6264c1db | ||
|
|
5b6b7e79cf | ||
|
|
766272b154 | ||
|
|
4707d4b8a9 | ||
|
|
4b8b214108 | ||
|
|
ebae7e1512 | ||
|
|
122917f4d5 | ||
|
|
9cf5739c0f | ||
|
|
4b6556565f | ||
|
|
7ce938b6c7 | ||
|
|
6139834e76 | ||
|
|
b673a24ee6 | ||
|
|
fd35f56e87 | ||
|
|
1f8df5dd89 | ||
|
|
6a025d8b4a | ||
|
|
270c7f36db | ||
|
|
0e448fb96d | ||
|
|
659b946eda | ||
|
|
56ab5ec9ea | ||
|
|
3b13c5d41a | ||
|
|
d27f061863 | ||
|
|
07489d0405 | ||
|
|
30708d9ffe | ||
|
|
1b08dfeacf | ||
|
|
e5ded0ee19 | ||
|
|
a384109244 | ||
|
|
67ceb61fe3 | ||
|
|
5cb5101720 | ||
|
|
6f261fdf47 |
@@ -527,7 +527,7 @@ steps:
|
||||
|
||||
- name: release-branch
|
||||
pull: always
|
||||
image: plugins/s3:1
|
||||
image: woodpeckerci/plugin-s3:latest
|
||||
settings:
|
||||
acl: public-read
|
||||
bucket: gitea-artifacts
|
||||
@@ -548,7 +548,7 @@ steps:
|
||||
- push
|
||||
|
||||
- name: release-main
|
||||
image: plugins/s3:1
|
||||
image: woodpeckerci/plugin-s3:latest
|
||||
settings:
|
||||
acl: public-read
|
||||
bucket: gitea-artifacts
|
||||
@@ -623,7 +623,7 @@ steps:
|
||||
|
||||
- name: release-tag
|
||||
pull: always
|
||||
image: plugins/s3:1
|
||||
image: woodpeckerci/plugin-s3:latest
|
||||
settings:
|
||||
acl: public-read
|
||||
bucket: gitea-artifacts
|
||||
|
||||
@@ -9,7 +9,6 @@ linters:
|
||||
- unused
|
||||
- structcheck
|
||||
- varcheck
|
||||
- golint
|
||||
- dupl
|
||||
#- gocyclo # The cyclomatic complexety of a lot of functions is too high, we should refactor those another time.
|
||||
- gofmt
|
||||
|
||||
159
CHANGELOG.md
159
CHANGELOG.md
@@ -4,6 +4,165 @@ This changelog goes through all the changes that have been made in each release
|
||||
without substantial changes to our git log; to see the highlights of what has
|
||||
been added to each release, please refer to the [blog](https://blog.gitea.io).
|
||||
|
||||
## [1.15.10](https://github.com/go-gitea/gitea/releases/tag/v1.15.10) - 2022-01-14
|
||||
|
||||
* BUGFIXES
|
||||
* Fix inconsistent PR comment counts (#18260) (#18261)
|
||||
* Fix release link broken (#18252) (#18253)
|
||||
* Fix update user from site administration page bug (#18250) (#18251)
|
||||
* Set HeadCommit when creating tags (#18116) (#18173)
|
||||
* Use correct translation key for error messages due to max repo limits (#18135 & #18153) (#18152)
|
||||
* Fix purple color in suggested label colors (#18241) (#18242)
|
||||
* SECURITY
|
||||
* Bump mermaid from 8.10.1 to 8.13.8 (#18198) (#18206)
|
||||
|
||||
## [1.15.9](https://github.com/go-gitea/gitea/releases/tag/v1.15.9) - 2021-12-30
|
||||
|
||||
* BUGFIXES
|
||||
* Fix wrong redirect on org labels (#18128) (#18134)
|
||||
* Fix: unstable sort skips/duplicates issues across pages (#18094) (#18095)
|
||||
* Revert "Fix delete u2f keys bug (#18042)" (#18107)
|
||||
* Migrating wiki don't require token, so we should move it out of the require form (#17645) (#18104)
|
||||
* Prevent NPE if gitea uploader fails to open url (#18080) (#18101)
|
||||
* Reset locale on login (#17734) (#18100)
|
||||
* Correctly handle failed migrations (#17575) (#18099)
|
||||
* Instead of using routerCtx just escape the url before routing (#18086) (#18098)
|
||||
* Quote references to the user table in consistency checks (#18072) (#18073)
|
||||
* Add NotFound handler (#18062) (#18067)
|
||||
* Ensure that git repository is closed before transfer (#18049) (#18057)
|
||||
* Use common sessioner for API and web routes (#18114)
|
||||
* TRANSLATION
|
||||
* Fix code search result hint on zh-CN (#18053)
|
||||
|
||||
## [1.15.8](https://github.com/go-gitea/gitea/releases/tag/v1.15.8) - 2021-12-20
|
||||
|
||||
* BUGFIXES
|
||||
* Move POST /{username}/action/{action} to simply POST /{username} (#18045) (#18046)
|
||||
* Fix delete u2f keys bug (#18040) (#18042)
|
||||
* Reset Session ID on login (#18018) (#18041)
|
||||
* Prevent off-by-one error on comments on newly appended lines (#18029) (#18035)
|
||||
* Stop printing 03d after escaped characters in logs (#18030) (#18034)
|
||||
* Reset locale on login (#18023) (#18025)
|
||||
* Fix reset password email template (#17025) (#18022)
|
||||
* Fix outType on gitea dump (#18000) (#18016)
|
||||
* Ensure complexity, minlength and isPwned are checked on password setting (#18005) (#18015)
|
||||
* Fix rename notification bug (#18011)
|
||||
* Prevent double decoding of % in url params (#17997) (#18001)
|
||||
* Prevent hang in git cat-file if the repository is not a valid repository (Partial #17991) (#17992)
|
||||
* Prevent deadlock in create issue (#17970) (#17982)
|
||||
* TESTING
|
||||
* Use non-expiring key. (#17984) (#17985)
|
||||
|
||||
## [1.15.7](https://github.com/go-gitea/gitea/releases/tag/v1.15.7) - 2021-12-01
|
||||
|
||||
* ENHANCEMENTS
|
||||
* Only allow webhook to send requests to allowed hosts (#17482) (#17510)
|
||||
* Fix login redirection links (#17451) (#17473)
|
||||
* BUGFIXES
|
||||
* Fix database inconsistent when admin change user email (#17549) (#17840)
|
||||
* Use correct user on releases (#17806) (#17818)
|
||||
* Fix commit count in tag view (#17698) (#17790)
|
||||
* Fix close issue but time watcher still running (#17643) (#17761)
|
||||
* Fix Migrate Description (#17692) (#17727)
|
||||
* Fix bug when project board get open issue number (#17703) (#17726)
|
||||
* Return 400 but not 500 when request archive with wrong format (#17691) (#17700)
|
||||
* Fix bug when read mysql database max lifetime (#17682) (#17690)
|
||||
* Fix database deadlock when update issue labels (#17649) (#17665)
|
||||
* Fix bug on detect issue/comment writer (#17592)
|
||||
* Remove appSubUrl from pasted images (#17572) (#17588)
|
||||
* Make `ParsePatch` more robust (#17573) (#17580)
|
||||
* Fix stats upon searching issues (#17566) (#17578)
|
||||
* Escape issue titles in comments list (#17555) (#17556)
|
||||
* Fix zero created time bug on commit api (#17546) (#17547)
|
||||
* Fix database keyword quote problem on migration v161 (#17522) (#17523)
|
||||
* Fix email with + when active (#17518) (#17520)
|
||||
* Stop double encoding blame commit messages (#17498) (#17500)
|
||||
* Quote the table name in CountOrphanedObjects (#17487) (#17488)
|
||||
* Run Migrate in Install rather than just SyncTables (#17475) (#17486)
|
||||
* BUILD
|
||||
* Fix golangci-lint warnings (#17598 et al) (#17668)
|
||||
* MISC
|
||||
* Preserve color when inverting emojis (#17797) (#17799)
|
||||
|
||||
## [1.15.6](https://github.com/go-gitea/gitea/releases/tag/v1.15.6) - 2021-10-28
|
||||
|
||||
* BUGFIXES
|
||||
* Prevent panic in serv.go with Deploy Keys (#17434) (#17435)
|
||||
* Fix CSV render error (#17406) (#17431)
|
||||
* Read expected buffer size (#17409) (#17430)
|
||||
* Ensure that restricted users can access repos for which they are members (#17460) (#17464)
|
||||
* Make commit-statuses popup show correctly (#17447) (#17466)
|
||||
* TESTING
|
||||
* Add integration tests for private.NoServCommand and private.ServCommand (#17456) (#17463)
|
||||
|
||||
## [1.15.5](https://github.com/go-gitea/gitea/releases/tag/v1.15.5) - 2021-10-21
|
||||
|
||||
* SECURITY
|
||||
* Upgrade Bluemonday to v1.0.16 (#17372) (#17374)
|
||||
* Ensure correct SSH permissions check for private and restricted users (#17370) (#17373)
|
||||
* BUGFIXES
|
||||
* Prevent NPE in CSV diff rendering when column removed (#17018) (#17377)
|
||||
* Offer rsa-sha2-512 and rsa-sha2-256 algorithms in internal SSH (#17281) (#17376)
|
||||
* Don't panic if we fail to parse U2FRegistration data (#17304) (#17371)
|
||||
* Ensure popup text is aligned left (backport for 1.15) (#17343)
|
||||
* Ensure that git daemon export ok is created for mirrors (#17243) (#17306)
|
||||
* Disable core.protectNTFS (#17300) (#17302)
|
||||
* Use pointer for wrappedConn methods (#17295) (#17296)
|
||||
* AutoRegistration is supposed to be working with disabled registration (backport) (#17292)
|
||||
* Handle duplicate keys on GPG key ring (#17242) (#17284)
|
||||
* Fix SVG side by side comparison link (#17375) (#17391)
|
||||
|
||||
## [1.15.4](https://github.com/go-gitea/gitea/releases/tag/v1.15.4) - 2021-10-08
|
||||
* BUGFIXES
|
||||
* Raw file API: don't try to interpret 40char filenames as commit SHA (#17185) (#17272)
|
||||
* Don't allow merged PRs to be reopened (#17192) (#17271)
|
||||
* Fix incorrect repository count on organization tab of dashboard (#17256) (#17266)
|
||||
* Fix unwanted team review request deletion (#17257) (#17264)
|
||||
* Fix broken Activities link in team dashboard (#17255) (#17258)
|
||||
* API pull's head/base have correct permission(#17214) (#17245)
|
||||
* Fix stange behavior of DownloadPullDiffOrPatch in incorect index (#17223) (#17227)
|
||||
* Upgrade xorm to v1.2.5 (#17177) (#17188)
|
||||
* Fix missing repo link in issue/pull assigned emails (#17183) (#17184)
|
||||
* Fix bug of get context user (#17169) (#17172)
|
||||
* Nicely handle missing user in collaborations (#17049) (#17166)
|
||||
* Add Horizontal scrollbar to inner menu on Chrome (#17086) (#17164)
|
||||
* Fix wrong i18n keys (#17150) (#17153)
|
||||
* Fix Archive Creation: correct transaction ending (#17151)
|
||||
* Prevent panic in Org mode HighlightCodeBlock (#17140) (#17141)
|
||||
* Create doctor command to fix repo_units broken by dumps from 1.14.3-1.14.6 (#17136) (#17137)
|
||||
* ENHANCEMENT
|
||||
* Check user instead of organization when creating a repo from a template via API (#16346) (#17195)
|
||||
* TRANSLATION
|
||||
* v1.15 fix Sprintf format 'verbs' in locale files (#17187)
|
||||
|
||||
|
||||
## [1.15.3](https://github.com/go-gitea/gitea/releases/tag/v1.15.3) - 2021-09-19
|
||||
|
||||
* ENHANCEMENTS
|
||||
* Add fluid to ui container class to remove margin (#16396) (#16976)
|
||||
* Add caller to cat-file batch calls (#17082) (#17089)
|
||||
* BUGFIXES
|
||||
* Render full plain readme. (#17083) (#17090)
|
||||
* Upgrade xorm to v1.2.4 (#17059)
|
||||
* Fix bug of migrate comments which only fetch one page (#17055) (#17058)
|
||||
* Do not show issue context popup on external issues (#17050) (#17054)
|
||||
* Decrement Fork Num when converting from Fork (#17035) (#17046)
|
||||
* Correctly rollback in ForkRepository (#17034) (#17045)
|
||||
* Fix missing close in WalkGitLog (#17008) (#17009)
|
||||
* Add prefix to SVG id/class attributes (#16997) (#17000)
|
||||
* Fix bug of migrated repository not index (#16991) (#16996)
|
||||
* Skip AllowedUserVisibilityModes validation on update user if it is an organisation (#16988) (#16990)
|
||||
* Fix storage Iterate bug and Add storage doctor to delete garbage attachments (#16971) (#16977)
|
||||
* Fix issue with issue default mail template (#16956) (#16975)
|
||||
* Ensure that rebase conflicts are handled in updates (#16952) (#16960)
|
||||
* Prevent panic on diff generation (#16950) (#16951)
|
||||
|
||||
## [1.15.2](https://github.com/go-gitea/gitea/releases/tag/v1.15.2) - 2021-09-03
|
||||
|
||||
* BUGFIXES
|
||||
* Add unique constraint back into issue_index (#16938)
|
||||
* Close storage objects before cleaning (#16934) (#16942)
|
||||
|
||||
## [1.15.1](https://github.com/go-gitea/gitea/releases/tag/v1.15.1) - 2021-09-02
|
||||
|
||||
* BUGFIXES
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
|
||||
###################################
|
||||
#Build stage
|
||||
FROM golang:1.16-alpine3.13 AS build-env
|
||||
FROM techknowlogick/go:1.16-alpine3.13 AS build-env
|
||||
|
||||
ARG GOPROXY
|
||||
ENV GOPROXY ${GOPROXY:-direct}
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
|
||||
###################################
|
||||
#Build stage
|
||||
FROM golang:1.16-alpine3.13 AS build-env
|
||||
FROM techknowlogick/go:1.16-alpine3.13 AS build-env
|
||||
|
||||
ARG GOPROXY
|
||||
ENV GOPROXY ${GOPROXY:-direct}
|
||||
|
||||
@@ -12,9 +12,6 @@
|
||||
<a href="https://discord.gg/Gitea" title="Join the Discord chat at https://discord.gg/Gitea">
|
||||
<img src="https://img.shields.io/discord/322538954119184384.svg">
|
||||
</a>
|
||||
<a href="https://microbadger.com/images/gitea/gitea" title="Get your own image badge on microbadger.com">
|
||||
<img src="https://images.microbadger.com/badges/image/gitea/gitea.svg">
|
||||
</a>
|
||||
<a href="https://codecov.io/gh/go-gitea/gitea" title="Codecov">
|
||||
<img src="https://codecov.io/gh/go-gitea/gitea/branch/main/graph/badge.svg">
|
||||
</a>
|
||||
|
||||
3
build.go
3
build.go
@@ -2,7 +2,8 @@
|
||||
// Use of this source code is governed by a MIT-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
//+build vendor
|
||||
//go:build vendor
|
||||
// +build vendor
|
||||
|
||||
package main
|
||||
|
||||
|
||||
@@ -2,6 +2,7 @@
|
||||
// Use of this source code is governed by a MIT-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
//go:build ignore
|
||||
// +build ignore
|
||||
|
||||
package main
|
||||
|
||||
@@ -3,6 +3,7 @@
|
||||
// Use of this source code is governed by a MIT-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
//go:build ignore
|
||||
// +build ignore
|
||||
|
||||
package main
|
||||
|
||||
@@ -1,3 +1,4 @@
|
||||
//go:build ignore
|
||||
// +build ignore
|
||||
|
||||
package main
|
||||
|
||||
@@ -1,3 +1,4 @@
|
||||
//go:build ignore
|
||||
// +build ignore
|
||||
|
||||
package main
|
||||
|
||||
@@ -29,6 +29,7 @@ async function processFile(file, {prefix, fullName} = {}) {
|
||||
plugins: extendDefaultPlugins([
|
||||
'removeXMLNS',
|
||||
'removeDimensions',
|
||||
{name: 'prefixIds', params: {prefix: () => name}},
|
||||
{
|
||||
name: 'addClassesToSVGElement',
|
||||
params: {classNames: ['svg', name]},
|
||||
|
||||
@@ -6,6 +6,7 @@
|
||||
// gocovmerge takes the results from multiple `go test -coverprofile` runs and
|
||||
// merges them into one profile
|
||||
|
||||
//go:build ignore
|
||||
// +build ignore
|
||||
|
||||
package main
|
||||
|
||||
@@ -335,6 +335,10 @@ func runChangePassword(c *cli.Context) error {
|
||||
if err := initDB(); err != nil {
|
||||
return err
|
||||
}
|
||||
if len(c.String("password")) < setting.MinPasswordLength {
|
||||
return fmt.Errorf("Password is not long enough. Needs to be at least %d", setting.MinPasswordLength)
|
||||
}
|
||||
|
||||
if !pwd.IsComplexEnough(c.String("password")) {
|
||||
return errors.New("Password does not meet complexity requirements")
|
||||
}
|
||||
|
||||
@@ -43,7 +43,11 @@ func runDocs(ctx *cli.Context) error {
|
||||
// Clean up markdown. The following bug was fixed in v2, but is present in v1.
|
||||
// It affects markdown output (even though the issue is referring to man pages)
|
||||
// https://github.com/urfave/cli/issues/1040
|
||||
docs = docs[strings.Index(docs, "#"):]
|
||||
firstHashtagIndex := strings.Index(docs, "#")
|
||||
|
||||
if firstHashtagIndex > 0 {
|
||||
docs = docs[firstHashtagIndex:]
|
||||
}
|
||||
}
|
||||
|
||||
out := os.Stdout
|
||||
|
||||
@@ -124,7 +124,6 @@ func runRecreateTable(ctx *cli.Context) error {
|
||||
}
|
||||
|
||||
func runDoctor(ctx *cli.Context) error {
|
||||
|
||||
// Silence the default loggers
|
||||
log.DelNamedLogger("console")
|
||||
log.DelNamedLogger(log.DEFAULT)
|
||||
|
||||
@@ -87,7 +87,7 @@ func (o outputType) String() string {
|
||||
}
|
||||
|
||||
var outputTypeEnum = &outputType{
|
||||
Enum: []string{"zip", "tar", "tar.gz", "tar.xz", "tar.bz2"},
|
||||
Enum: []string{"zip", "rar", "tar", "sz", "tar.gz", "tar.xz", "tar.bz2", "tar.br", "tar.lz4"},
|
||||
Default: "zip",
|
||||
}
|
||||
|
||||
@@ -153,12 +153,16 @@ func fatal(format string, args ...interface{}) {
|
||||
func runDump(ctx *cli.Context) error {
|
||||
var file *os.File
|
||||
fileName := ctx.String("file")
|
||||
outType := ctx.String("type")
|
||||
if fileName == "-" {
|
||||
file = os.Stdout
|
||||
err := log.DelLogger("console")
|
||||
if err != nil {
|
||||
fatal("Deleting default logger failed. Can not write to stdout: %v", err)
|
||||
}
|
||||
} else {
|
||||
fileName = strings.TrimSuffix(fileName, path.Ext(fileName))
|
||||
fileName += "." + outType
|
||||
}
|
||||
setting.NewContext()
|
||||
// make sure we are logging to the console no matter what the configuration tells us do to
|
||||
@@ -197,7 +201,6 @@ func runDump(ctx *cli.Context) error {
|
||||
}
|
||||
|
||||
verbose := ctx.Bool("verbose")
|
||||
outType := ctx.String("type")
|
||||
var iface interface{}
|
||||
if fileName == "-" {
|
||||
iface, err = archiver.ByExtension(fmt.Sprintf(".%s", outType))
|
||||
|
||||
@@ -2,6 +2,7 @@
|
||||
// Use of this source code is governed by a MIT-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
//go:build bindata
|
||||
// +build bindata
|
||||
|
||||
package cmd
|
||||
|
||||
@@ -2,6 +2,7 @@
|
||||
// Use of this source code is governed by a MIT-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
//go:build !bindata
|
||||
// +build !bindata
|
||||
|
||||
package cmd
|
||||
|
||||
@@ -194,6 +194,10 @@ func listen(m http.Handler, handleRedirector bool) error {
|
||||
listenAddr = net.JoinHostPort(listenAddr, setting.HTTPPort)
|
||||
}
|
||||
log.Info("Listen: %v://%s%s", setting.Protocol, listenAddr, setting.AppSubURL)
|
||||
// This can be useful for users, many users do wrong to their config and get strange behaviors behind a reverse-proxy.
|
||||
// A user may fix the configuration mistake when he sees this log.
|
||||
// And this is also very helpful to maintainers to provide help to users to resolve their configuration problems.
|
||||
log.Info("AppURL(ROOT_URL): %s", setting.AppURL)
|
||||
|
||||
if setting.LFS.StartServer {
|
||||
log.Info("LFS server enabled")
|
||||
|
||||
@@ -576,6 +576,8 @@ PATH =
|
||||
;;
|
||||
;; (Go-Git only) Don't cache objects greater than this in memory. (Set to 0 to disable.)
|
||||
;LARGE_OBJECT_THRESHOLD = 1048576
|
||||
;; Set to true to forcibly set core.protectNTFS=false
|
||||
;DISABLE_CORE_PROTECT_NTFS=false
|
||||
|
||||
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
|
||||
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
|
||||
@@ -1386,6 +1388,13 @@ PATH =
|
||||
;; Deliver timeout in seconds
|
||||
;DELIVER_TIMEOUT = 5
|
||||
;;
|
||||
;; Webhook can only call allowed hosts for security reasons. Comma separated list, eg: external, 192.168.1.0/24, *.mydomain.com
|
||||
;; Built-in: loopback (for localhost), private (for LAN/intranet), external (for public hosts on internet), * (for all hosts)
|
||||
;; CIDR list: 1.2.3.0/8, 2001:db8::/32
|
||||
;; Wildcard hosts: *.mydomain.com, 192.168.100.*
|
||||
;; Default to * for 1.15.x, external for 1.16 and later
|
||||
;ALLOWED_HOST_LIST = *
|
||||
;;
|
||||
;; Allow insecure certification
|
||||
;SKIP_TLS_VERIFY = false
|
||||
;;
|
||||
|
||||
@@ -18,9 +18,9 @@ params:
|
||||
description: Git with a cup of tea
|
||||
author: The Gitea Authors
|
||||
website: https://docs.gitea.io
|
||||
version: 1.14.6
|
||||
version: 1.15.8
|
||||
minGoVersion: 1.16
|
||||
goVersion: 1.16
|
||||
goVersion: 1.17
|
||||
minNodeVersion: 12.17
|
||||
|
||||
outputs:
|
||||
|
||||
@@ -545,6 +545,14 @@ Define allowed algorithms and their minimum key length (use -1 to disable a type
|
||||
|
||||
- `QUEUE_LENGTH`: **1000**: Hook task queue length. Use caution when editing this value.
|
||||
- `DELIVER_TIMEOUT`: **5**: Delivery timeout (sec) for shooting webhooks.
|
||||
- `ALLOWED_HOST_LIST`: `*`: Default to `*` for 1.15.x, `external` for 1.16 and later. Webhook can only call allowed hosts for security reasons. Comma separated list.
|
||||
- Built-in networks:
|
||||
- `loopback`: 127.0.0.0/8 for IPv4 and ::1/128 for IPv6, localhost is included.
|
||||
- `private`: RFC 1918 (10.0.0.0/8, 172.16.0.0/12, 192.168.0.0/16) and RFC 4193 (FC00::/7). Also called LAN/Intranet.
|
||||
- `external`: A valid non-private unicast IP, you can access all hosts on public internet.
|
||||
- `*`: All hosts are allowed.
|
||||
- CIDR list: `1.2.3.0/8` for IPv4 and `2001:db8::/32` for IPv6
|
||||
- Wildcard hosts: `*.mydomain.com`, `192.168.100.*`
|
||||
- `SKIP_TLS_VERIFY`: **false**: Allow insecure certification.
|
||||
- `PAGING_NUM`: **10**: Number of webhook history events that are shown in one page.
|
||||
- `PROXY_URL`: ****: Proxy server URL, support http://, https//, socks://, blank will follow environment http_proxy/https_proxy
|
||||
@@ -839,6 +847,7 @@ NB: You must have `DISABLE_ROUTER_LOG` set to `false` for this option to take ef
|
||||
- `VERBOSE_PUSH`: **true**: Print status information about pushes as they are being processed.
|
||||
- `VERBOSE_PUSH_DELAY`: **5s**: Only print verbose information if push takes longer than this delay.
|
||||
- `LARGE_OBJECT_THRESHOLD`: **1048576**: (Go-Git only), don't cache objects greater than this in memory. (Set to 0 to disable.)
|
||||
- `DISABLE_CORE_PROTECT_NTFS`: **false** Set to true to forcibly set `core.protectNTFS` to false.
|
||||
## Git - Timeout settings (`git.timeout`)
|
||||
- `DEFAUlT`: **360**: Git operations default timeout seconds.
|
||||
- `MIGRATE`: **600**: Migrate external repositories timeout seconds.
|
||||
|
||||
8
go.mod
8
go.mod
@@ -9,7 +9,7 @@ require (
|
||||
gitea.com/go-chi/binding v0.0.0-20210301195521-1fe1c9a555e7
|
||||
gitea.com/go-chi/cache v0.0.0-20210110083709-82c4c9ce2d5e
|
||||
gitea.com/go-chi/captcha v0.0.0-20210110083842-e7696c336a1e
|
||||
gitea.com/go-chi/session v0.0.0-20210108030337-0cb48c5ba8ee
|
||||
gitea.com/go-chi/session v0.0.0-20211218221615-e3605d8b28b8
|
||||
gitea.com/lunny/levelqueue v0.4.1
|
||||
github.com/Microsoft/go-winio v0.5.0 // indirect
|
||||
github.com/NYTimes/gziphandler v1.1.1
|
||||
@@ -80,7 +80,7 @@ require (
|
||||
github.com/mattn/go-runewidth v0.0.13 // indirect
|
||||
github.com/mattn/go-sqlite3 v1.14.8
|
||||
github.com/mholt/archiver/v3 v3.5.0
|
||||
github.com/microcosm-cc/bluemonday v1.0.15
|
||||
github.com/microcosm-cc/bluemonday v1.0.16
|
||||
github.com/miekg/dns v1.1.43 // indirect
|
||||
github.com/minio/md5-simd v1.1.2 // indirect
|
||||
github.com/minio/minio-go/v7 v7.0.12
|
||||
@@ -125,7 +125,7 @@ require (
|
||||
go.uber.org/multierr v1.7.0 // indirect
|
||||
go.uber.org/zap v1.18.1 // indirect
|
||||
golang.org/x/crypto v0.0.0-20210616213533-5ff15b29337e
|
||||
golang.org/x/net v0.0.0-20210614182718-04defd469f4e
|
||||
golang.org/x/net v0.0.0-20211020060615-d418f374d309
|
||||
golang.org/x/oauth2 v0.0.0-20210628180205-a41e5a781914
|
||||
golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c
|
||||
golang.org/x/text v0.3.6
|
||||
@@ -139,7 +139,7 @@ require (
|
||||
mvdan.cc/xurls/v2 v2.2.0
|
||||
strk.kbt.io/projects/go/libravatar v0.0.0-20191008002943-06d1c002b251
|
||||
xorm.io/builder v0.3.9
|
||||
xorm.io/xorm v1.2.2
|
||||
xorm.io/xorm v1.2.5
|
||||
)
|
||||
|
||||
replace github.com/hashicorp/go-version => github.com/6543/go-version v1.3.1
|
||||
|
||||
18
go.sum
18
go.sum
@@ -47,8 +47,8 @@ gitea.com/go-chi/cache v0.0.0-20210110083709-82c4c9ce2d5e h1:zgPGaf3kXP0cVm9J0l8
|
||||
gitea.com/go-chi/cache v0.0.0-20210110083709-82c4c9ce2d5e/go.mod h1:k2V/gPDEtXGjjMGuBJiapffAXTv76H4snSmlJRLUhH0=
|
||||
gitea.com/go-chi/captcha v0.0.0-20210110083842-e7696c336a1e h1:YjaQU6XFicdhPN+MlGolcXO8seYY2+EY5g7vZPB17CQ=
|
||||
gitea.com/go-chi/captcha v0.0.0-20210110083842-e7696c336a1e/go.mod h1:nfA7JaGv3hbGQ1ktdhAsZhdS84qKffI8NMlHr+Opsog=
|
||||
gitea.com/go-chi/session v0.0.0-20210108030337-0cb48c5ba8ee h1:9U6HuKUBt/cGK6T/64dEuz0r7Yp97WAAEJvXHDlY3ws=
|
||||
gitea.com/go-chi/session v0.0.0-20210108030337-0cb48c5ba8ee/go.mod h1:Ozg8IchVNb/Udg+ui39iHRYqVHSvf3C99ixdpLR8Vu0=
|
||||
gitea.com/go-chi/session v0.0.0-20211218221615-e3605d8b28b8 h1:tJQRXgZigkLeeW9LPlps9G9aMoE6LAmqigLA+wxmd1Q=
|
||||
gitea.com/go-chi/session v0.0.0-20211218221615-e3605d8b28b8/go.mod h1:fc/pjt5EqNKgqQXYzcas1Z5L5whkZHyOvTA7OzWVJck=
|
||||
gitea.com/lunny/levelqueue v0.4.1 h1:RZ+AFx5gBsZuyqCvofhAkPQ9uaVDPJnsULoJZIYaJNw=
|
||||
gitea.com/lunny/levelqueue v0.4.1/go.mod h1:HBqmLbz56JWpfEGG0prskAV97ATNRoj5LDmPicD22hU=
|
||||
gitea.com/xorm/sqlfiddle v0.0.0-20180821085327-62ce714f951a h1:lSA0F4e9A2NcQSqGqTOXqu2aRi/XEQxDCBwM8yJtE6s=
|
||||
@@ -325,8 +325,9 @@ github.com/go-asn1-ber/asn1-ber v1.5.3/go.mod h1:hEBeB/ic+5LoWskz+yKT7vGhhPYkPro
|
||||
github.com/go-chi/chi v1.5.1/go.mod h1:REp24E+25iKvxgeTfHmdUoL5x15kBiDBlnIl5bCwe2k=
|
||||
github.com/go-chi/chi v1.5.4 h1:QHdzF2szwjqVV4wmByUnTcsbIg7UGaQ0tPF2t5GcAIs=
|
||||
github.com/go-chi/chi v1.5.4/go.mod h1:uaf8YgoFazUOkPBG7fxPftUylNumIev9awIWOENIuEg=
|
||||
github.com/go-chi/chi/v5 v5.0.1 h1:ALxjCrTf1aflOlkhMnCUP86MubbWFrzB3gkRPReLpTo=
|
||||
github.com/go-chi/chi/v5 v5.0.1/go.mod h1:DslCQbL2OYiznFReuXYUmQ2hGd1aDpCnlMNITLSKoi8=
|
||||
github.com/go-chi/chi/v5 v5.0.4 h1:5e494iHzsYBiyXQAHHuI4tyJS9M3V84OuX3ufIIGHFo=
|
||||
github.com/go-chi/chi/v5 v5.0.4/go.mod h1:DslCQbL2OYiznFReuXYUmQ2hGd1aDpCnlMNITLSKoi8=
|
||||
github.com/go-chi/cors v1.2.0 h1:tV1g1XENQ8ku4Bq3K9ub2AtgG+p16SmzeMSGTwrOKdE=
|
||||
github.com/go-chi/cors v1.2.0/go.mod h1:sSbTewc+6wYHBBCW7ytsFSn836hqM7JxpglAy2Vzc58=
|
||||
github.com/go-enry/go-enry/v2 v2.7.1 h1:WCqtfyteIz61GYk9lRVy8HblvIv4cP9GIiwm/6txCbU=
|
||||
@@ -868,8 +869,8 @@ github.com/mholt/acmez v0.1.3 h1:J7MmNIk4Qf9b8mAGqAh4XkNeowv3f1zW816yf4zt7Qk=
|
||||
github.com/mholt/acmez v0.1.3/go.mod h1:8qnn8QA/Ewx8E3ZSsmscqsIjhhpxuy9vqdgbX2ceceM=
|
||||
github.com/mholt/archiver/v3 v3.5.0 h1:nE8gZIrw66cu4osS/U7UW7YDuGMHssxKutU8IfWxwWE=
|
||||
github.com/mholt/archiver/v3 v3.5.0/go.mod h1:qqTTPUK/HZPFgFQ/TJ3BzvTpF/dPtFVJXdQbCmeMxwc=
|
||||
github.com/microcosm-cc/bluemonday v1.0.15 h1:J4uN+qPng9rvkBZBoBb8YGR+ijuklIMpSOZZLjYpbeY=
|
||||
github.com/microcosm-cc/bluemonday v1.0.15/go.mod h1:ZLvAzeakRwrGnzQEvstVzVt3ZpqOF2+sdFr0Om+ce30=
|
||||
github.com/microcosm-cc/bluemonday v1.0.16 h1:kHmAq2t7WPWLjiGvzKa5o3HzSfahUKiOq7fAPUiMNIc=
|
||||
github.com/microcosm-cc/bluemonday v1.0.16/go.mod h1:Z0r70sCuXHig8YpBzCc5eGHAap2K7e/u082ZUpDRRqM=
|
||||
github.com/miekg/dns v1.0.14/go.mod h1:W1PPwlIAgtquWBMBEV9nkV9Cazfe8ScdGz/Lj7v3Nrg=
|
||||
github.com/miekg/dns v1.1.42/go.mod h1:+evo5L0630/F6ca/Z9+GAqzhjGyn8/c+TBaOyfEl0V4=
|
||||
github.com/miekg/dns v1.1.43 h1:JKfpVSCB84vrAmHzyrsxB5NAr5kLoMXZArPSw7Qlgyg=
|
||||
@@ -1364,8 +1365,9 @@ golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v
|
||||
golang.org/x/net v0.0.0-20210326060303-6b1517762897/go.mod h1:uSPa2vr4CLtc/ILN5odXGNXS6mhrKVzTaCXzk9m6W3k=
|
||||
golang.org/x/net v0.0.0-20210331060903-cb1fcc7394e5/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM=
|
||||
golang.org/x/net v0.0.0-20210525063256-abc453219eb5/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
|
||||
golang.org/x/net v0.0.0-20210614182718-04defd469f4e h1:XpT3nA5TvE525Ne3hInMh6+GETgn27Zfm9dxsThnX2Q=
|
||||
golang.org/x/net v0.0.0-20210614182718-04defd469f4e/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
|
||||
golang.org/x/net v0.0.0-20211020060615-d418f374d309 h1:A0lJIi+hcTR6aajJH4YqKWwohY4aW9RO7oRMcdv+HKI=
|
||||
golang.org/x/net v0.0.0-20211020060615-d418f374d309/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
|
||||
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
|
||||
golang.org/x/oauth2 v0.0.0-20181106182150-f42d05182288/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
|
||||
golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
|
||||
@@ -1765,5 +1767,5 @@ xorm.io/builder v0.3.7/go.mod h1:aUW0S9eb9VCaPohFCH3j7czOx1PMW3i1HrSzbLYGBSE=
|
||||
xorm.io/builder v0.3.9 h1:Sd65/LdWyO7LR8+Cbd+e7mm3sK/7U9k0jS3999IDHMc=
|
||||
xorm.io/builder v0.3.9/go.mod h1:aUW0S9eb9VCaPohFCH3j7czOx1PMW3i1HrSzbLYGBSE=
|
||||
xorm.io/xorm v1.0.6/go.mod h1:uF9EtbhODq5kNWxMbnBEj8hRRZnlcNSz2t2N7HW/+A4=
|
||||
xorm.io/xorm v1.2.2 h1:FFBOEvJ++8fYBA9cywf2sxDVmFktl1SpJzTAG1ab06Y=
|
||||
xorm.io/xorm v1.2.2/go.mod h1:fTG8tSjk6O1BYxwuohZUK+S1glnRycsCF05L1qQyEU0=
|
||||
xorm.io/xorm v1.2.5 h1:tqN7OhN8P9xi52qBb76I8m5maAJMz/SSbgK2RGPCPbo=
|
||||
xorm.io/xorm v1.2.5/go.mod h1:fTG8tSjk6O1BYxwuohZUK+S1glnRycsCF05L1qQyEU0=
|
||||
|
||||
154
integrations/api_private_serv_test.go
Normal file
154
integrations/api_private_serv_test.go
Normal file
@@ -0,0 +1,154 @@
|
||||
// Copyright 2021 The Gitea Authors. All rights reserved.
|
||||
// Use of this source code is governed by a MIT-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package integrations
|
||||
|
||||
import (
|
||||
"context"
|
||||
"net/url"
|
||||
"testing"
|
||||
|
||||
"code.gitea.io/gitea/models"
|
||||
"code.gitea.io/gitea/modules/private"
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
func TestAPIPrivateNoServ(t *testing.T) {
|
||||
onGiteaRun(t, func(*testing.T, *url.URL) {
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
key, user, err := private.ServNoCommand(ctx, 1)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, int64(2), user.ID)
|
||||
assert.Equal(t, "user2", user.Name)
|
||||
assert.Equal(t, int64(1), key.ID)
|
||||
assert.Equal(t, "user2@localhost", key.Name)
|
||||
|
||||
deployKey, err := models.AddDeployKey(1, "test-deploy", "sk-ecdsa-sha2-nistp256@openssh.com AAAAInNrLWVjZHNhLXNoYTItbmlzdHAyNTZAb3BlbnNzaC5jb20AAAAIbmlzdHAyNTYAAABBBGXEEzWmm1dxb+57RoK5KVCL0w2eNv9cqJX2AGGVlkFsVDhOXHzsadS3LTK4VlEbbrDMJdoti9yM8vclA8IeRacAAAAEc3NoOg== nocomment", false)
|
||||
assert.NoError(t, err)
|
||||
|
||||
key, user, err = private.ServNoCommand(ctx, deployKey.KeyID)
|
||||
assert.NoError(t, err)
|
||||
assert.Empty(t, user)
|
||||
assert.Equal(t, deployKey.KeyID, key.ID)
|
||||
assert.Equal(t, "test-deploy", key.Name)
|
||||
})
|
||||
}
|
||||
|
||||
func TestAPIPrivateServ(t *testing.T) {
|
||||
onGiteaRun(t, func(*testing.T, *url.URL) {
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
|
||||
// Can push to a repo we own
|
||||
results, err := private.ServCommand(ctx, 1, "user2", "repo1", models.AccessModeWrite, "git-upload-pack", "")
|
||||
assert.NoError(t, err)
|
||||
assert.False(t, results.IsWiki)
|
||||
assert.False(t, results.IsDeployKey)
|
||||
assert.Equal(t, int64(1), results.KeyID)
|
||||
assert.Equal(t, "user2@localhost", results.KeyName)
|
||||
assert.Equal(t, "user2", results.UserName)
|
||||
assert.Equal(t, int64(2), results.UserID)
|
||||
assert.Equal(t, "user2", results.OwnerName)
|
||||
assert.Equal(t, "repo1", results.RepoName)
|
||||
assert.Equal(t, int64(1), results.RepoID)
|
||||
|
||||
// Cannot push to a private repo we're not associated with
|
||||
results, err = private.ServCommand(ctx, 1, "user15", "big_test_private_1", models.AccessModeWrite, "git-upload-pack", "")
|
||||
assert.Error(t, err)
|
||||
assert.Empty(t, results)
|
||||
|
||||
// Cannot pull from a private repo we're not associated with
|
||||
results, err = private.ServCommand(ctx, 1, "user15", "big_test_private_1", models.AccessModeRead, "git-upload-pack", "")
|
||||
assert.Error(t, err)
|
||||
assert.Empty(t, results)
|
||||
|
||||
// Can pull from a public repo we're not associated with
|
||||
results, err = private.ServCommand(ctx, 1, "user15", "big_test_public_1", models.AccessModeRead, "git-upload-pack", "")
|
||||
assert.NoError(t, err)
|
||||
assert.False(t, results.IsWiki)
|
||||
assert.False(t, results.IsDeployKey)
|
||||
assert.Equal(t, int64(1), results.KeyID)
|
||||
assert.Equal(t, "user2@localhost", results.KeyName)
|
||||
assert.Equal(t, "user2", results.UserName)
|
||||
assert.Equal(t, int64(2), results.UserID)
|
||||
assert.Equal(t, "user15", results.OwnerName)
|
||||
assert.Equal(t, "big_test_public_1", results.RepoName)
|
||||
assert.Equal(t, int64(17), results.RepoID)
|
||||
|
||||
// Cannot push to a public repo we're not associated with
|
||||
results, err = private.ServCommand(ctx, 1, "user15", "big_test_public_1", models.AccessModeWrite, "git-upload-pack", "")
|
||||
assert.Error(t, err)
|
||||
assert.Empty(t, results)
|
||||
|
||||
// Add reading deploy key
|
||||
deployKey, err := models.AddDeployKey(19, "test-deploy", "sk-ecdsa-sha2-nistp256@openssh.com AAAAInNrLWVjZHNhLXNoYTItbmlzdHAyNTZAb3BlbnNzaC5jb20AAAAIbmlzdHAyNTYAAABBBGXEEzWmm1dxb+57RoK5KVCL0w2eNv9cqJX2AGGVlkFsVDhOXHzsadS3LTK4VlEbbrDMJdoti9yM8vclA8IeRacAAAAEc3NoOg== nocomment", true)
|
||||
assert.NoError(t, err)
|
||||
|
||||
// Can pull from repo we're a deploy key for
|
||||
results, err = private.ServCommand(ctx, deployKey.KeyID, "user15", "big_test_private_1", models.AccessModeRead, "git-upload-pack", "")
|
||||
assert.NoError(t, err)
|
||||
assert.False(t, results.IsWiki)
|
||||
assert.True(t, results.IsDeployKey)
|
||||
assert.Equal(t, deployKey.KeyID, results.KeyID)
|
||||
assert.Equal(t, "test-deploy", results.KeyName)
|
||||
assert.Equal(t, "user15", results.UserName)
|
||||
assert.Equal(t, int64(15), results.UserID)
|
||||
assert.Equal(t, "user15", results.OwnerName)
|
||||
assert.Equal(t, "big_test_private_1", results.RepoName)
|
||||
assert.Equal(t, int64(19), results.RepoID)
|
||||
|
||||
// Cannot push to a private repo with reading key
|
||||
results, err = private.ServCommand(ctx, deployKey.KeyID, "user15", "big_test_private_1", models.AccessModeWrite, "git-upload-pack", "")
|
||||
assert.Error(t, err)
|
||||
assert.Empty(t, results)
|
||||
|
||||
// Cannot pull from a private repo we're not associated with
|
||||
results, err = private.ServCommand(ctx, deployKey.ID, "user15", "big_test_private_2", models.AccessModeRead, "git-upload-pack", "")
|
||||
assert.Error(t, err)
|
||||
assert.Empty(t, results)
|
||||
|
||||
// Cannot pull from a public repo we're not associated with
|
||||
results, err = private.ServCommand(ctx, deployKey.ID, "user15", "big_test_public_1", models.AccessModeRead, "git-upload-pack", "")
|
||||
assert.Error(t, err)
|
||||
assert.Empty(t, results)
|
||||
|
||||
// Add writing deploy key
|
||||
deployKey, err = models.AddDeployKey(20, "test-deploy", "sk-ecdsa-sha2-nistp256@openssh.com AAAAInNrLWVjZHNhLXNoYTItbmlzdHAyNTZAb3BlbnNzaC5jb20AAAAIbmlzdHAyNTYAAABBBGXEEzWmm1dxb+57RoK5KVCL0w2eNv9cqJX2AGGVlkFsVDhOXHzsadS3LTK4VlEbbrDMJdoti9yM8vclA8IeRacAAAAEc3NoOg== nocomment", false)
|
||||
assert.NoError(t, err)
|
||||
|
||||
// Cannot push to a private repo with reading key
|
||||
results, err = private.ServCommand(ctx, deployKey.KeyID, "user15", "big_test_private_1", models.AccessModeWrite, "git-upload-pack", "")
|
||||
assert.Error(t, err)
|
||||
assert.Empty(t, results)
|
||||
|
||||
// Can pull from repo we're a writing deploy key for
|
||||
results, err = private.ServCommand(ctx, deployKey.KeyID, "user15", "big_test_private_2", models.AccessModeRead, "git-upload-pack", "")
|
||||
assert.NoError(t, err)
|
||||
assert.False(t, results.IsWiki)
|
||||
assert.True(t, results.IsDeployKey)
|
||||
assert.Equal(t, deployKey.KeyID, results.KeyID)
|
||||
assert.Equal(t, "test-deploy", results.KeyName)
|
||||
assert.Equal(t, "user15", results.UserName)
|
||||
assert.Equal(t, int64(15), results.UserID)
|
||||
assert.Equal(t, "user15", results.OwnerName)
|
||||
assert.Equal(t, "big_test_private_2", results.RepoName)
|
||||
assert.Equal(t, int64(20), results.RepoID)
|
||||
|
||||
// Can push to repo we're a writing deploy key for
|
||||
results, err = private.ServCommand(ctx, deployKey.KeyID, "user15", "big_test_private_2", models.AccessModeWrite, "git-upload-pack", "")
|
||||
assert.NoError(t, err)
|
||||
assert.False(t, results.IsWiki)
|
||||
assert.True(t, results.IsDeployKey)
|
||||
assert.Equal(t, deployKey.KeyID, results.KeyID)
|
||||
assert.Equal(t, "test-deploy", results.KeyName)
|
||||
assert.Equal(t, "user15", results.UserName)
|
||||
assert.Equal(t, int64(15), results.UserID)
|
||||
assert.Equal(t, "user15", results.OwnerName)
|
||||
assert.Equal(t, "big_test_private_2", results.RepoName)
|
||||
assert.Equal(t, int64(20), results.RepoID)
|
||||
|
||||
})
|
||||
|
||||
}
|
||||
44
integrations/api_repo_archive_test.go
Normal file
44
integrations/api_repo_archive_test.go
Normal file
@@ -0,0 +1,44 @@
|
||||
// Copyright 2021 The Gitea Authors. All rights reserved.
|
||||
// Use of this source code is governed by a MIT-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package integrations
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"testing"
|
||||
|
||||
"code.gitea.io/gitea/models"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
func TestAPIDownloadArchive(t *testing.T) {
|
||||
defer prepareTestEnv(t)()
|
||||
|
||||
repo := models.AssertExistsAndLoadBean(t, &models.Repository{ID: 1}).(*models.Repository)
|
||||
user2 := models.AssertExistsAndLoadBean(t, &models.User{ID: 2}).(*models.User)
|
||||
session := loginUser(t, user2.LowerName)
|
||||
token := getTokenForLoggedInUser(t, session)
|
||||
|
||||
link, _ := url.Parse(fmt.Sprintf("/api/v1/repos/%s/%s/archive/master.zip", user2.Name, repo.Name))
|
||||
link.RawQuery = url.Values{"token": {token}}.Encode()
|
||||
resp := MakeRequest(t, NewRequest(t, "GET", link.String()), http.StatusOK)
|
||||
bs, err := io.ReadAll(resp.Body)
|
||||
assert.NoError(t, err)
|
||||
assert.EqualValues(t, 320, len(bs))
|
||||
|
||||
link, _ = url.Parse(fmt.Sprintf("/api/v1/repos/%s/%s/archive/master.tar.gz", user2.Name, repo.Name))
|
||||
link.RawQuery = url.Values{"token": {token}}.Encode()
|
||||
resp = MakeRequest(t, NewRequest(t, "GET", link.String()), http.StatusOK)
|
||||
bs, err = io.ReadAll(resp.Body)
|
||||
assert.NoError(t, err)
|
||||
assert.EqualValues(t, 266, len(bs))
|
||||
|
||||
link, _ = url.Parse(fmt.Sprintf("/api/v1/repos/%s/%s/archive/master", user2.Name, repo.Name))
|
||||
link.RawQuery = url.Values{"token": {token}}.Encode()
|
||||
MakeRequest(t, NewRequest(t, "GET", link.String()), http.StatusBadRequest)
|
||||
}
|
||||
@@ -8,8 +8,10 @@ import (
|
||||
"fmt"
|
||||
"net/http"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"code.gitea.io/gitea/models"
|
||||
"code.gitea.io/gitea/modules/timeutil"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
@@ -20,6 +22,10 @@ func TestUserHeatmap(t *testing.T) {
|
||||
normalUsername := "user2"
|
||||
session := loginUser(t, adminUsername)
|
||||
|
||||
var fakeNow = time.Date(2011, 10, 20, 0, 0, 0, 0, time.Local)
|
||||
timeutil.Set(fakeNow)
|
||||
defer timeutil.Unset()
|
||||
|
||||
urlStr := fmt.Sprintf("/api/v1/users/%s/heatmap", normalUsername)
|
||||
req := NewRequest(t, "GET", urlStr)
|
||||
resp := session.MakeRequest(t, req, http.StatusOK)
|
||||
|
||||
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
@@ -1 +1 @@
|
||||
3a810dbf6b96afaa8c5f69a8b6ec1dabfca7368b
|
||||
59e2c41e8f5140bb0182acebec17c8ad9831cc62
|
||||
|
||||
@@ -251,6 +251,26 @@ func prepareTestEnv(t testing.TB, skip ...int) func() {
|
||||
assert.NoError(t, util.RemoveAll(setting.RepoRootPath))
|
||||
|
||||
assert.NoError(t, util.CopyDir(path.Join(filepath.Dir(setting.AppPath), "integrations/gitea-repositories-meta"), setting.RepoRootPath))
|
||||
ownerDirs, err := os.ReadDir(setting.RepoRootPath)
|
||||
if err != nil {
|
||||
assert.NoError(t, err, "unable to read the new repo root: %v\n", err)
|
||||
}
|
||||
for _, ownerDir := range ownerDirs {
|
||||
if !ownerDir.Type().IsDir() {
|
||||
continue
|
||||
}
|
||||
repoDirs, err := os.ReadDir(filepath.Join(setting.RepoRootPath, ownerDir.Name()))
|
||||
if err != nil {
|
||||
assert.NoError(t, err, "unable to read the new repo root: %v\n", err)
|
||||
}
|
||||
for _, repoDir := range repoDirs {
|
||||
_ = os.MkdirAll(filepath.Join(setting.RepoRootPath, ownerDir.Name(), repoDir.Name(), "objects", "pack"), 0755)
|
||||
_ = os.MkdirAll(filepath.Join(setting.RepoRootPath, ownerDir.Name(), repoDir.Name(), "objects", "info"), 0755)
|
||||
_ = os.MkdirAll(filepath.Join(setting.RepoRootPath, ownerDir.Name(), repoDir.Name(), "refs", "heads"), 0755)
|
||||
_ = os.MkdirAll(filepath.Join(setting.RepoRootPath, ownerDir.Name(), repoDir.Name(), "refs", "tag"), 0755)
|
||||
}
|
||||
}
|
||||
|
||||
return deferFn
|
||||
}
|
||||
|
||||
@@ -529,4 +549,23 @@ func resetFixtures(t *testing.T) {
|
||||
assert.NoError(t, models.LoadFixtures())
|
||||
assert.NoError(t, util.RemoveAll(setting.RepoRootPath))
|
||||
assert.NoError(t, util.CopyDir(path.Join(filepath.Dir(setting.AppPath), "integrations/gitea-repositories-meta"), setting.RepoRootPath))
|
||||
ownerDirs, err := os.ReadDir(setting.RepoRootPath)
|
||||
if err != nil {
|
||||
assert.NoError(t, err, "unable to read the new repo root: %v\n", err)
|
||||
}
|
||||
for _, ownerDir := range ownerDirs {
|
||||
if !ownerDir.Type().IsDir() {
|
||||
continue
|
||||
}
|
||||
repoDirs, err := os.ReadDir(filepath.Join(setting.RepoRootPath, ownerDir.Name()))
|
||||
if err != nil {
|
||||
assert.NoError(t, err, "unable to read the new repo root: %v\n", err)
|
||||
}
|
||||
for _, repoDir := range repoDirs {
|
||||
_ = os.MkdirAll(filepath.Join(setting.RepoRootPath, ownerDir.Name(), repoDir.Name(), "objects", "pack"), 0755)
|
||||
_ = os.MkdirAll(filepath.Join(setting.RepoRootPath, ownerDir.Name(), repoDir.Name(), "objects", "info"), 0755)
|
||||
_ = os.MkdirAll(filepath.Join(setting.RepoRootPath, ownerDir.Name(), repoDir.Name(), "refs", "heads"), 0755)
|
||||
_ = os.MkdirAll(filepath.Join(setting.RepoRootPath, ownerDir.Name(), repoDir.Name(), "refs", "tag"), 0755)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -33,6 +33,7 @@ func TestLinksNoLogin(t *testing.T) {
|
||||
"/user/forgot_password",
|
||||
"/api/swagger",
|
||||
"/user2/repo1",
|
||||
"/user2/repo1/",
|
||||
"/user2/repo1/projects",
|
||||
"/user2/repo1/projects/1",
|
||||
"/assets/img/404.png",
|
||||
|
||||
@@ -61,6 +61,25 @@ func initMigrationTest(t *testing.T) func() {
|
||||
assert.True(t, len(setting.RepoRootPath) != 0)
|
||||
assert.NoError(t, util.RemoveAll(setting.RepoRootPath))
|
||||
assert.NoError(t, util.CopyDir(path.Join(filepath.Dir(setting.AppPath), "integrations/gitea-repositories-meta"), setting.RepoRootPath))
|
||||
ownerDirs, err := os.ReadDir(setting.RepoRootPath)
|
||||
if err != nil {
|
||||
assert.NoError(t, err, "unable to read the new repo root: %v\n", err)
|
||||
}
|
||||
for _, ownerDir := range ownerDirs {
|
||||
if !ownerDir.Type().IsDir() {
|
||||
continue
|
||||
}
|
||||
repoDirs, err := os.ReadDir(filepath.Join(setting.RepoRootPath, ownerDir.Name()))
|
||||
if err != nil {
|
||||
assert.NoError(t, err, "unable to read the new repo root: %v\n", err)
|
||||
}
|
||||
for _, repoDir := range repoDirs {
|
||||
_ = os.MkdirAll(filepath.Join(setting.RepoRootPath, ownerDir.Name(), repoDir.Name(), "objects", "pack"), 0755)
|
||||
_ = os.MkdirAll(filepath.Join(setting.RepoRootPath, ownerDir.Name(), repoDir.Name(), "objects", "info"), 0755)
|
||||
_ = os.MkdirAll(filepath.Join(setting.RepoRootPath, ownerDir.Name(), repoDir.Name(), "refs", "heads"), 0755)
|
||||
_ = os.MkdirAll(filepath.Join(setting.RepoRootPath, ownerDir.Name(), repoDir.Name(), "refs", "tag"), 0755)
|
||||
}
|
||||
}
|
||||
|
||||
git.CheckLFSVersion()
|
||||
setting.InitDBConfig()
|
||||
|
||||
@@ -6,6 +6,7 @@ package integrations
|
||||
|
||||
import (
|
||||
"net/http"
|
||||
"net/url"
|
||||
"path"
|
||||
"testing"
|
||||
|
||||
@@ -83,7 +84,7 @@ func TestNonasciiBranches(t *testing.T) {
|
||||
},
|
||||
{
|
||||
from: "Plus+Is+Not+Space/Файл.md",
|
||||
to: "branch/Plus+Is+Not+Space/%d0%a4%d0%b0%d0%b9%d0%bb.md",
|
||||
to: "branch/Plus+Is+Not+Space/%D0%A4%D0%B0%D0%B9%D0%BB.md",
|
||||
status: http.StatusOK,
|
||||
},
|
||||
{
|
||||
@@ -114,7 +115,7 @@ func TestNonasciiBranches(t *testing.T) {
|
||||
},
|
||||
{
|
||||
from: "タグ/ファイル.md",
|
||||
to: "tag/%e3%82%bf%e3%82%b0/%e3%83%95%e3%82%a1%e3%82%a4%e3%83%ab.md",
|
||||
to: "tag/%e3%82%bf%e3%82%b0/%E3%83%95%E3%82%A1%E3%82%A4%E3%83%AB.md",
|
||||
status: http.StatusOK,
|
||||
},
|
||||
// Files
|
||||
@@ -125,12 +126,12 @@ func TestNonasciiBranches(t *testing.T) {
|
||||
},
|
||||
{
|
||||
from: "Файл.md",
|
||||
to: "branch/Plus+Is+Not+Space/%d0%a4%d0%b0%d0%b9%d0%bb.md",
|
||||
to: "branch/Plus+Is+Not+Space/%D0%A4%D0%B0%D0%B9%D0%BB.md",
|
||||
status: http.StatusOK,
|
||||
},
|
||||
{
|
||||
from: "ファイル.md",
|
||||
to: "branch/Plus+Is+Not+Space/%e3%83%95%e3%82%a1%e3%82%a4%e3%83%ab.md",
|
||||
to: "branch/Plus+Is+Not+Space/%E3%83%95%E3%82%A1%E3%82%A4%E3%83%AB.md",
|
||||
status: http.StatusNotFound, // it's not on default branch
|
||||
},
|
||||
// Same but url-encoded (few tests)
|
||||
@@ -146,7 +147,7 @@ func TestNonasciiBranches(t *testing.T) {
|
||||
},
|
||||
{
|
||||
from: "%D0%A4%D0%B0%D0%B9%D0%BB.md",
|
||||
to: "branch/Plus+Is+Not+Space/%d0%a4%d0%b0%d0%b9%d0%bb.md",
|
||||
to: "branch/Plus+Is+Not+Space/%D0%A4%D0%B0%D0%B9%D0%BB.md",
|
||||
status: http.StatusOK,
|
||||
},
|
||||
{
|
||||
@@ -159,6 +160,41 @@ func TestNonasciiBranches(t *testing.T) {
|
||||
to: "tag/%d0%81/%e4%ba%ba",
|
||||
status: http.StatusOK,
|
||||
},
|
||||
{
|
||||
from: "Plus+Is+Not+Space/%25%252525mightnotplaywell",
|
||||
to: "branch/Plus+Is+Not+Space/%25%252525mightnotplaywell",
|
||||
status: http.StatusOK,
|
||||
},
|
||||
{
|
||||
from: "Plus+Is+Not+Space/%25253Fisnotaquestion%25253F",
|
||||
to: "branch/Plus+Is+Not+Space/%25253Fisnotaquestion%25253F",
|
||||
status: http.StatusOK,
|
||||
},
|
||||
{
|
||||
from: "Plus+Is+Not+Space/" + url.PathEscape("%3Fis?and#afile"),
|
||||
to: "branch/Plus+Is+Not+Space/" + url.PathEscape("%3Fis?and#afile"),
|
||||
status: http.StatusOK,
|
||||
},
|
||||
{
|
||||
from: "Plus+Is+Not+Space/10%25.md",
|
||||
to: "branch/Plus+Is+Not+Space/10%25.md",
|
||||
status: http.StatusOK,
|
||||
},
|
||||
{
|
||||
from: "Plus+Is+Not+Space/" + url.PathEscape("This+file%20has 1space"),
|
||||
to: "branch/Plus+Is+Not+Space/" + url.PathEscape("This+file%20has 1space"),
|
||||
status: http.StatusOK,
|
||||
},
|
||||
{
|
||||
from: "Plus+Is+Not+Space/" + url.PathEscape("This+file%2520has 2 spaces"),
|
||||
to: "branch/Plus+Is+Not+Space/" + url.PathEscape("This+file%2520has 2 spaces"),
|
||||
status: http.StatusOK,
|
||||
},
|
||||
{
|
||||
from: "Plus+Is+Not+Space/" + url.PathEscape("£15&$6.txt"),
|
||||
to: "branch/Plus+Is+Not+Space/" + url.PathEscape("£15&$6.txt"),
|
||||
status: http.StatusOK,
|
||||
},
|
||||
}
|
||||
|
||||
defer prepareTestEnv(t)()
|
||||
|
||||
@@ -5,10 +5,12 @@
|
||||
package integrations
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"net/http"
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
api "code.gitea.io/gitea/modules/structs"
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
@@ -110,3 +112,64 @@ func TestPrivateOrg(t *testing.T) {
|
||||
req = NewRequest(t, "GET", "/privated_org/private_repo_on_private_org")
|
||||
session.MakeRequest(t, req, http.StatusOK)
|
||||
}
|
||||
|
||||
func TestOrgRestrictedUser(t *testing.T) {
|
||||
defer prepareTestEnv(t)()
|
||||
|
||||
// privated_org is a private org who has id 23
|
||||
orgName := "privated_org"
|
||||
|
||||
// public_repo_on_private_org is a public repo on privated_org
|
||||
repoName := "public_repo_on_private_org"
|
||||
|
||||
// user29 is a restricted user who is not a member of the organization
|
||||
restrictedUser := "user29"
|
||||
|
||||
// #17003 reports a bug whereby adding a restricted user to a read-only team doesn't work
|
||||
|
||||
// assert restrictedUser cannot see the org or the public repo
|
||||
restrictedSession := loginUser(t, restrictedUser)
|
||||
req := NewRequest(t, "GET", fmt.Sprintf("/%s", orgName))
|
||||
restrictedSession.MakeRequest(t, req, http.StatusNotFound)
|
||||
|
||||
req = NewRequest(t, "GET", fmt.Sprintf("/%s/%s", orgName, repoName))
|
||||
restrictedSession.MakeRequest(t, req, http.StatusNotFound)
|
||||
|
||||
// Therefore create a read-only team
|
||||
adminSession := loginUser(t, "user1")
|
||||
token := getTokenForLoggedInUser(t, adminSession)
|
||||
|
||||
teamToCreate := &api.CreateTeamOption{
|
||||
Name: "codereader",
|
||||
Description: "Code Reader",
|
||||
IncludesAllRepositories: true,
|
||||
Permission: "read",
|
||||
Units: []string{"repo.code"},
|
||||
}
|
||||
|
||||
req = NewRequestWithJSON(t, "POST",
|
||||
fmt.Sprintf("/api/v1/orgs/%s/teams?token=%s", orgName, token), teamToCreate)
|
||||
|
||||
var apiTeam api.Team
|
||||
|
||||
resp := adminSession.MakeRequest(t, req, http.StatusCreated)
|
||||
DecodeJSON(t, resp, &apiTeam)
|
||||
checkTeamResponse(t, &apiTeam, teamToCreate.Name, teamToCreate.Description, teamToCreate.IncludesAllRepositories,
|
||||
teamToCreate.Permission, teamToCreate.Units)
|
||||
checkTeamBean(t, apiTeam.ID, teamToCreate.Name, teamToCreate.Description, teamToCreate.IncludesAllRepositories,
|
||||
teamToCreate.Permission, teamToCreate.Units)
|
||||
//teamID := apiTeam.ID
|
||||
|
||||
// Now we need to add the restricted user to the team
|
||||
req = NewRequest(t, "PUT",
|
||||
fmt.Sprintf("/api/v1/teams/%d/members/%s?token=%s", apiTeam.ID, restrictedUser, token))
|
||||
_ = adminSession.MakeRequest(t, req, http.StatusNoContent)
|
||||
|
||||
// Now we need to check if the restrictedUser can access the repo
|
||||
req = NewRequest(t, "GET", fmt.Sprintf("/%s", orgName))
|
||||
restrictedSession.MakeRequest(t, req, http.StatusOK)
|
||||
|
||||
req = NewRequest(t, "GET", fmt.Sprintf("/%s/%s", orgName, repoName))
|
||||
restrictedSession.MakeRequest(t, req, http.StatusOK)
|
||||
|
||||
}
|
||||
|
||||
@@ -1,128 +1,81 @@
|
||||
-----BEGIN PGP PRIVATE KEY BLOCK-----
|
||||
|
||||
lQVYBF3190cBDADpfh1CvNtLl8N6lJQ03jymeE8h2ocvT61stAj31eefiOvZPDyx
|
||||
n0kRztBcrQ1OITBEslYqKiYHVfVptscIWf+5bARoHnLQIgpJEio00ggqO3AP8lIZ
|
||||
uiy9/ARDVgyPl3WgMza/J3Z7W0sBJTtN/6W35i+eNEY4Q0mScmNIVc75oo5ey/pL
|
||||
JSF0qumiy94o38dy6Vk2dPEf5LyxJWoA4dLvj49LL/QGPqnsAXAQXr1zULaRuwf2
|
||||
a8oKebr3m9wVkZcaH86duUmkU822k6OZSSxWCxZFFGVkC/VaA0uUjIL4a6SQqOOr
|
||||
PPOttmCLXJrtvpFdRwJ4PNwr9r2nPDDLwoajLJ8zo5jRumzwvkK9vALkY0BPuVoC
|
||||
qFdGY+2SGYKa4FTE7Mri/5j0NhWplhbcdGRe5doGDxJHbIN2UO21XjDlTBcscKep
|
||||
mWPVE2cdJrspYaZ9O0L6vhMVwGyyk+Qxmf6NbDw0q63AtqVe9qwbr2O3Irxseftw
|
||||
uWuSuLXzp+SMh/0AEQEAAQAL/2ExopuDwuNSHsh5bcIeGnAPV51Xentqtt2viaYk
|
||||
0AB8PfTVGsyzafa0OM7DKG0z6oRGGhD+L4tRMFGbiHlFAWqdeK4gsplJ+i8VlSUc
|
||||
otJ1oH262IsmEPban6mp+ZuSKCASAYGLu0m5JF0rMucSeli1RHAeAXbtJ4SDAin7
|
||||
sib/EDWMwjkikS0f8hZWt7kbAcqnMQA2qKKmlBdHZDtOxX/8KeFZ6kHpNtFrfcsK
|
||||
rOECIaVDDhr5HobCyl3E7tW5nrlrvSUkLVFl0IjcypqfzDlZp04PMdswhkdfBhu+
|
||||
0iY4K+d4uMPMzcpF1+mcn8C+7XK7jOqZysQa42bqgFHWEqljjJiUCuXfHbxnZWls
|
||||
0R2j9FLgTqtPQ33f3zMjhOyvdiy1DmfzU9MSu/I0VqCJnq6AwlW5rBQaKwAQuHMB
|
||||
UJ7bjMx/z41z41v0IFpxHnwSa+tkl49tV+y8zVtajfwXxJNy8j/ElX0ywfM5sDHa
|
||||
RAVwI7DSwMk5azp3F15DnA6XbwYA8O0b5AIeCo8edmIdKgY3vAi20j/lsTgsTUkY
|
||||
GTQ4BdMohr9gpZWHZZmQ1TeZokm4Auex7UgPblflufepkADassXixMmSNUsggGI+
|
||||
sR9qydNCw+qzgaJjchpwT5TdLJNHRbE+6VuGXJftcjdfXiKYZltEQBX8U4w7hui8
|
||||
D6dpzJK5mE1QebrFnJ7IKpAe+hWTc1+g9iHH3rInPMIzQW72WqSKndKIrRy1PZS5
|
||||
WM5MJzgWQaDzZSOQhrKA4yLIyzsrBgD4GfFLWh+sQ02sob5xmpDblfQUYVRy5TAx
|
||||
4WOLSflJqqyarrk7s1D7obqsSoAEdJk521dpE/0ciI5xT41fQKMXH1Qm9tu9uW5d
|
||||
1Y3oDxQXFJFa34gi5J9UbUBBIJRU0KyFcB1mGVF+fKbAKGPFR2lMCmkeqAYjVohM
|
||||
PG+tluArQrQYCwkZroR460TqvSadmPUekEjYsIzwlaOkJhGf7r40G5Djgyb2/LoC
|
||||
JY28zH7P9MXxIc7WAWuMJniUOqvslXcGAOkfZ1KVI61AIAvkEoRUpKwNSofs2PDQ
|
||||
1K5Q9DN0NK5UNAAr+Wn91mw/MBXqxdheq9wjmcsvx8OAhvw7O89QMuTviCTUQjSl
|
||||
Wzel6gpoZhpOgVb2RTxV7yVrp2fgYKkeUr7hiGhSxw78guF2jLgfBgb1ef+XKIMk
|
||||
5anUqKcsHHiouBQbcUCDyKBcVeIUKjAuh9ADpqn1v1oVshugnjpx32Oq1AW6Mn9e
|
||||
SmxBoR7YIvsy79P2IonjixEAjSp1chkGpNQTtBhnaXRlYSA8Z2l0ZWFAZmFrZS5s
|
||||
b2NhbD6JAdQEEwEKAD4WIQQ4G/p4KVUOUEVu5g5R68KXFICqDwUCXfX3RwIbAwUJ
|
||||
A8JnAAULCQgHAgYVCgkICwIEFgIDAQIeAQIXgAAKCRBR68KXFICqD/8/C/4wYdr1
|
||||
Q6fnXiAdBZPQDOHUjCSjH1d6tbVYaCz0ku8rK1uU0oTToKRSTXH9K59erHl//MEX
|
||||
Rte6loI22y3GFA8wAWzFKGwb6wDUr0SkH21espsp+plKUI/gHV8JyfWs0pLmy/C0
|
||||
tOr9XhdnQepFjpDAquWszSO0B70G32Tl82Tzbrho6+ePvU+2X0fYj1F8q/2bmegB
|
||||
lL1CcdVuivBqYglj6tzlurPXFq1QenJdssZNn0fizGiGfTY/7kgrvKHc4KN03i9d
|
||||
PUrPMQw7J59KSFNdkE3KYdedmEeWBVmrbfC8QBEO1zcTJN9wwV8fVv4qOhKN8yIO
|
||||
QLuhBZTeChtP3i2FCPHQqbeD2f0SG+yBbWu/OyfSC2YHcGyjbNV8D9upNg5eIJ34
|
||||
Sm5i0tGUYEdq9QQROacXn2/MhyJuJYbFrTcsHLsSiwygUXvHOqi0G0gEjWl67yMd
|
||||
9YIq1sZlNs3WY7ASrV+QcB7B9WKJAyh5YWz/G4MlThU91YUfltAb3QmNFeadBVgE
|
||||
XfX3RwEMALH7pae22J/BmhFFBCjMld166hTknXjOR459csv5ppky91Yl82VFFg+J
|
||||
G6hbkGzvCQ5XDJu8xgWeXowe2sXkyDwhTRaB6MEnA4KW5PUazL7KlDGsR5oPvBlE
|
||||
dSQDGzTV/RPcszSNdcN9MRNbfAf0ZFV6D9R3CIlNZAm6HwML7lZ0JmCiLORz3TbF
|
||||
4kg1KDZIQAhY7Y7AuMdoXfnpUqFLba2ZxZBvdcrMcuYz8GkmFsYdi1/JuXEK3//B
|
||||
Mo7Pg78zsq7UolUcT2p3qKb7hB3CEtwa3xffwzgAcFSKYrCE/5/IYjHhS97uKWor
|
||||
8dh59wUCuPCmAiuIz3aD84rxZIHgBGPy03TEWCrCBCVxAdH/2Ezpn3DpuZyCuanJ
|
||||
0WGSzrPBw+twA8bk9BATvFVQ/7Bs9deSsMAOI1uj9lTy1R9LU/KHEr8BEz61+Bgk
|
||||
+m4ev6OQAVDY6QpRtf+zfB2xO95Wu4l1pIFuz7OJaZLCm2ApeAKsCCUDdTSJn5e7
|
||||
0i1E4SIVgwARAQABAAv/WHaZqbiqBw21RCwnmxfEzWbQfj37PxZYXqxfqJ6XfcHl
|
||||
Sb5nMcia5HHje1S3fk15FNWTgLzdN+G1YLPdTUsfczOiGzPKumZnyjqx5lnBtnr+
|
||||
GYpltF9pwK1UA+g/V42c0oh50f8Vr2rEP7jS9ykzzYBz6ciYR5ZdyK/nxh3iArqM
|
||||
cK9q3MnyA81rYTR6njBfE0cQHEoSDZsESrj7xwu0ofqyRc4AoCHqYh0iu0ChRSte
|
||||
IOgk8djT6Uzfkjf2ZcyNiD2/iFzAXaI8CpoiiRJDn/qIhtSFqjb284wwbmTUE+Nw
|
||||
LjeMbpKQiWqnsw2GKhlXVvTLjrCb8TIKjbLtFH2HlEaIjL332GcVqkVy2TMtjZRi
|
||||
lhy/uSY2kzkBkoGJXp5isJFk3ZcOHHsG4VQ+08vq++GoqQE8U1t8zMfAbBFoFlpP
|
||||
nkRjZs0MwY9u6C0IiXRDrYrMIW12LjsRBiebGHUhzv10/4T0XZ8FKTewxcszcuMf
|
||||
lpbIotF1ItGqCXqgufnhBgDBhi8BErxO59ksWLAHdozDyiXwQ5ua6WesGrIRvw23
|
||||
Z8a5wxTByXmd1fMgJ509hpXbxUC94TGObJoUo23YE3TpqlTLs5NqeFzzU+OjWSb0
|
||||
Wo1hpFlzyatuynpz2aXbKbjw5dgyeIxj9t+NhGo2SW6v+RHtYAWJRFBFOPVOLwTy
|
||||
an733pA3MSUT0oEh+aggDkXEJLBum0P3Onnma7wR7Xj2Nk1SGLCMgVmKbtGlvyrj
|
||||
yc5FZhzuvOfeuLSYoa0KE4sGAOtxED2jDkV2HS67bxrUzMLvAAhOnRmunA7Qk4F7
|
||||
B2uMYa03O7vnUJAINpmZVu/ubWz1/JRV6M3/1lQTH+2B9kZ5v6kHrczCsoSP2dXD
|
||||
7CQnxSm6zngdgwkoo+9pgFztGUZM071SjRW+r1IwE/XBZNwFya5PM02/Akb0ejuB
|
||||
6K2ClnIFf7gflndUZ0mhZn48I88b6mzEG4X4uUZG+4vW8EZEInl+nMA9f3S6YT0U
|
||||
ZG4JC8JMKsmoYLye/BuedHxk6QX6AnMFBjK7cnfBnViJkmXhDLxmcCjwjUUBraRI
|
||||
QbyzHzY2Jq1VyhTJ1HZxE+vj26MzFFzjpe84r1Ggrcowx53RHstBBYBA5OjRy+cN
|
||||
vDzqqWz4cDKU/XlwJhRnG+PcY3c47obpvjjagcwG7xU4df15fDetKajnIloA5r22
|
||||
hbmVmTAqljyWLnvSNYrvf5QDqqg6tBuHITUiZhYgECpIoeEj9hU8MZSvQOscK0kx
|
||||
Vn8SqUjxDcNazQM8NoxNB10wfJw63hCJAbwEGAEKACYWIQQ4G/p4KVUOUEVu5g5R
|
||||
68KXFICqDwUCXfX3RwIbDAUJA8JnAAAKCRBR68KXFICqD54+DAC4VZpKrU6Oo04z
|
||||
/gJeC+3fNon6W9Pdxx7KimDOttkpCiss8JydO6aSB97xrWdvMBTui333qGo2exE/
|
||||
XFA4RF7K4lAKUWbwaR1brLQfGVYOltmMb986/LeE3OsmMt4vbxUnGvHVX+QXDWAr
|
||||
p6q4DZvMgQQhbWp+rMjXtRr10iQnSlM5CYhyawdiiahFqgoo8395l/2JA2YGhUgU
|
||||
nARUPZ9SqaUmRm+KGsSyoYnvN9apiDk5KVQoyfrmweNN7DCIIcoh/B9Ax8nmouKz
|
||||
yBB2fjCM/bJNtN/AsgYbZIScuYK/xqTkwNtbe5WdCyD/QJOHTsPJzx59hgSVo6gf
|
||||
Fe8VBnxHtrY8gPSUU3gkhYLvLzyVX+YLNzRcffobd8gJbfumwFJUkz91oGvYz7xg
|
||||
XN2qmsgBNCbTIzWZMpRDMAbY+n2QFImGf+EJZlMdj6gOrIYq8N4+nMW1FwJivsOb
|
||||
muqySyjZnD2AYjEA6OYPXfCVhaB5fTfhQXbIrZbgsEh4ob/eIdOdBVgEXta5egEM
|
||||
AMYlmZ47NqBMBeaN0o/ahYMe8eIMaroWkufMfC9VRBSMAkpbDl34oNp0cflmnMYo
|
||||
AFAl8ucRMFTiUnjiWpo27q14tjSyDVsn/CqwbnrgJgCFNV/MGsYsToEkb4JwDIRC
|
||||
bky+1BvqvI8RMlO3MlwzrlIaMrlQfx5NtUb9TyO7S4xZTz864+Ty5p3HhRwbdZMe
|
||||
Ko8sfXFhCcCHFXosI0mX83EyzsrXlbkGRawId7jvrdOAUg/cYP8f/XmV6z1NHHH9
|
||||
cvz+3oLOGuVxUdG0KuS/jigHrLWdRuKM3xfEeesp870yZU3AbyFdoHnGXROJePTl
|
||||
FV8j2P5Ahf/yuVhjdyJSKdZC2h6+HtLG9RiGgLviLLYhtlZG2H6pYyKY5Ud3php+
|
||||
qw1aYL1xtdxrHYkQlAa0vLY/mwpuPfMke9I+rtnrwlLRMCstdiN34ybZ4sRD+gL1
|
||||
w5VIZ/aM6/Gsczd3s/T8psIi09TKPfEU2gWLMGvlDsgz+aSDdVP7XYQpNglaEPet
|
||||
PwARAQABAAv8CHg6+hnV2pblTwGTlTU7V8DO3gwMfn/QhQ/8ju66G5a7J6p/ZreQ
|
||||
nfCJnqYq4AgoW0SuqVSBbbTENF6YjixNmiSlb9iHMZ+ilms24xG0Y3lOMBYYCY3Y
|
||||
nTSNf6nXyconz31TW7jLmTdG9hpykKEKO9WFgt5UpgWe+2CAgtUoBDZyaLrVBZ2h
|
||||
te99WmziDbPQZeZPm7UQ0aX0iRBclxy4+dxjcnrcmi1mdQAM/glgs2sHbEjN7JnV
|
||||
dTOvUSN7/8ixj6I719Wx6MN6jE+BNd0ytZOun6tcDl0vamfT5fBpqbQoJMib2ggo
|
||||
+FGg9VFnzEMLqyI47LfOKUjCIhwVsxS4q9HXa2FtpO8UfRMPjDKgDZQzRTRJScrP
|
||||
s1NJ9HiM/eCHS1YjRmgroo60HygxkoLVCHp+Rz/hi0tG/ptv4q6mdnm8Mwb5JJtV
|
||||
48EvmZoNTWl9xOez1wmQn6caVHipc0qDqn/veoe8N5wdc+3hoMEXbSXqU+kx2KUa
|
||||
cVxCCVoUeURhBgDUGWtx34j1y17zE92BYhtVJTCU89dDe4wOEqGPyCGvRtgTmZ+1
|
||||
KwWr66pij91MV9mlY+7Ue2QHUSmgav2EFGIjVes956p4/F/CJ6qaYoekirMSnmX5
|
||||
jhRt4p6RW7m4omha3LAQ+gN4Fqa4acZUywENBvv1x3v+IWbjGJGn3eBnRrP3o9P+
|
||||
QUAtyMifiRm0ZN8J767o+bzUVmscXrkh7Qml47lQfDToyRI1UZZQmP2izpwHcwbZ
|
||||
NtfkgRUdeEq4GJUGAO8o4Oebbt0ALZ54E2LHhk8xi4ofKkFBDCkUFjcqS3bJJNck
|
||||
rkhfqEkMLETNhPbiC4TRNiunI5PXOinwNPkKI8P/hfp4S49WdIvnARazCoxjZNtl
|
||||
0Cbo+F1wtOH9FZaaWzNlU2lCQ2JJ3MCpLHz+nEmdYWOIWGQu2/s7smLODVEFbYKR
|
||||
50VWVRL7mB83v1XdfMFvExdQ7i5MOX4hFvmwi/WJIKClJfhNwTrHp6Jrm9jA66RL
|
||||
+dNyPKfwcFcYrqt1gwYAruZzP7QgTYVL+cmvGtCaHY4KoR8hanbpqR4YbzzyEXwS
|
||||
ll2FUCaVSokuRAdH3+/CHF9bqog3Zvn6HYcCS/A/rHVGIU9a+7s5IbRe0Ysc2FAN
|
||||
Nm9AsC5YnuyoAjW3cJGaZLYxp2WOZcMEXZeLPFYrNz22R1nRoxnUIPRpsKICXcK0
|
||||
aC4rSMk479jc/8WprWx4d45EVG+6Gsh1AT8LVhDL9yHFrh50ss2jCe1Fnftet6DI
|
||||
V5zHcxBx4sCs91aPxxe12UiJA2wEGAEKACAWIQQ4G/p4KVUOUEVu5g5R68KXFICq
|
||||
DwUCXta5egIbAgHACRBR68KXFICqD8D0IAQZAQoAHRYhBKAm5ShdO9gmF/o8jan0
|
||||
RkmWoKbKBQJe1rl6AAoJEKn0RkmWoKbKacUL/3YYKmiVvcr5LYFzMdwdahkla+6m
|
||||
hEEkL0l3dJNuU97Ou71tA1ieF0fjbVRSWjXKsntKwhyPoXjaZEZwMmv7iZ8BXV+b
|
||||
oO/EG5sg2/6iukJFXZqGnQwMdLVo1jPoXDteZU1qYiCoxLHhGhHL7ivtD1ygEi6w
|
||||
/cMbbOEB5Le1vOWIwqazs8dDcAYyy1PKthRl0ygvh8CpqPwy+AK3uLm0TVwetQAp
|
||||
taux0bDYWCb5Aft1r1nlV44gU4RiC131TDo+TKd754+UuI+UHk1D+LjTmZxRX2S6
|
||||
fXgoMXzrWmthGPdqvVOgKWm7Ef18hmaBECvPnp/tUJeDVVe02KrYQi8Bf2kxveSd
|
||||
8T0N/ExcydU9HgzTL8MuyPI+yp086elQzKJu6vb9tpgxCcglQZrUNT9Uy82pzTRY
|
||||
z9MmhnCDI2SD5L/CW5PsNpPTPy7s3f9DOV0G5Vka4LTSBOCK64NvAGBmRf8rFjJU
|
||||
lPtRPhC7h6uHdUIx3Q550Xogvq5sQm8UBCsbG8OJDADT3FJSIulR9Sh96OsES3sc
|
||||
H09juN4KcbpS03MAeUFwXqw3jBMhDoGKlsjX17Jf31qh/nI/XjigS3XWyj1BLSMG
|
||||
rJfH0NyYoGDCnff37tf+8lD9km9TlnV4Qjd9ujYbDRsefhaSjLVcy/gqdxZEuNBC
|
||||
BWmGwsmLI3nyZ4KDtNsa5JUHUNNZLBN20hvmE41Eszmz4Yg9Ho9DxKiFKvzUULMc
|
||||
bnMHaVHseHHq6+NVUnN1SAcOA0ygjnEid8D57RtdBCD90LXjLB7vlR+HaSMZYOnr
|
||||
DtseivHvqqy4+rxhwV2S3avnls9vRwE4bV6GCiqhoBnWIZRrARLZc2OTBIya82vS
|
||||
BIS1eyhjif1mE7Lqhs6aPD+eqQK2mBtQ/sidN8P/IfKfVF5siXfFbuGZLz5nRIho
|
||||
Yp1z7oO3OZ09lpUk0G1h+ouIFF6goDP48M/AKtbvs9OWk3QKxnOUZD8sRncq95x6
|
||||
m4q1MVb+aJyxwBqDRGaFY+3TVArB1b+kG1JsAvV5dag=
|
||||
=511T
|
||||
lQVYBGG44vABDAC7VVdrVcU2CzI4P1vm0HtsgRCj9TsCpxjESleIheG/jrLjpVaF
|
||||
YrlVKQ0+q6HXOMcbjJnsm+N6hgZNqwaKTNC6+LJZMXHlPG8wUGrHgHyUZ03urYB6
|
||||
vjlJ70RUBu1+dB5yJcTOk7kMLx8/is9FlAEEY/G98aviv2m3My6B5SJ2BErjREIw
|
||||
eRnWFm+JDcga9nRi8ra/DMac45iQ4IcQcj0NDlCn3aY88nGa6o1+07h7wYwI3t8S
|
||||
+pfITuJgWf2cYK49v9QVsBMR8XHuS8UDGFuJ1Y4KK5zMHWKhah/6isyWPSgiC0wo
|
||||
V7LZDJp/tN8IoQf2fchRQN+x0PBeVXdt3KGXqvsfk7hnwGDKjGMp4nTxL8PFhpG8
|
||||
KJP0tTA063bbnrGjVYHaulTBTSKS8R3Zk2utA8JUgTU6tkNFoh8rNLgh2xtw/Ci3
|
||||
kvKzTdikWxBfspYgrWloMyCTZwOHssARyarXgtysEI1hNpvgpJo0WZOMurYuFDIB
|
||||
kEqgnqe1b1B7ItcAEQEAAQAL/iNebgZkZ7sX6w/mmn3eL+dhCNjD5LPQA6OP2635
|
||||
hRFLKmhDn63IYXB8MzV5ZzGA1UrUxX0AQ7cu1cLVPwNelGwwp0+iv7vFqMKI9Fgd
|
||||
YKgORw8AsAi8oIlehNqOgkmFN/haPCm6h04PGYnANfkPhA+lpQ81MTw64oVFwwqg
|
||||
TdzVW6RED3EidCfRDZblRLoefQPvimRQz7DwYa48zhNjVjaAVOcUuJ26MovKrBNd
|
||||
eu/Wr48/MQPez0hw6FnDs9fSAtB/cLmSlSL3yBkDB4RHTne6amvemX5SyQqOSKLJ
|
||||
F+YM33yIN3NQNQtJUkjNkBWuIe+s8pxFuKTHNyulCe/ES0ivtnqaCJ/J/PPzn/3t
|
||||
2S5f1K26jqJEnu4SfCxG3xTbSMu9DIcDP6BkU6WK9dQCPyfWZ3r3QkgZjHt02HP9
|
||||
Gbzh2tSxBO3b4ujysdSB2l78I0s3XLWae6FPNNKG+zmlCV8mUEa+OFVjS60GrX83
|
||||
NQVfoyjNdSQkLlg3+bo5DFma+QYAwr/HXi06iC8dh23HkPkYedIOml70SPAQqvVj
|
||||
xYtZRRSXo98P+QtA2kX0G3/9f606n2qqA9JXc3m4euvE94oSp708M5xAkSfdsc6B
|
||||
QIDNrR5ty+f+WdhZAsW4Gu/XbQ5ndkRReTtc3UtzIrC0zg8egCoE0yMfCJWPS2nF
|
||||
QTdlsl+cXDSQj7UMfCP9cKSsTzdEAF/P5ALI7Y+W4va/gy/0czJne+ZNMxPWE+Gs
|
||||
00KJCbSfgktnYhVt/XdWKuRZ8ylZBgD2QHts6MHkfno/OUK3wYDB7zLMIBdLltlg
|
||||
wvp7CXh8hIxzNqxaAjGus1XAg+/7QbSey/t88CR9XQsekd/L8NIYaFOxSpVAe03V
|
||||
RaW2/EXtmKIHKoWBTQJLJle3mp+iUiVjzdmTyUAqhFaCBYVMBlSvBuC99jXnu3U3
|
||||
UcUelLDvP2ufMdeXhVU1Anfg45wqvyfPIAhpgYMmyprGpfkd2Sf2W1ThaTec0kI1
|
||||
cT7AtkrqijCGDgo9ohl8ojmRhRCl968F/imENQATANdkhbYJ0k1+Ubm690xYNN7u
|
||||
d+wnQzS9P/UPpMrC4H2esz9g+Nls7X6/jeGB6K0bpOYAUR1VlRfuXREJcy9bK9Q8
|
||||
gzfBC4XWELA726fc9YeJqWH4fI9SFx0AjVVx6VFwSiDcoYbX26CLZN+jY6Gx8kx6
|
||||
PrOf4tPCU+8EP5f/tYn/dwN9oQPoyM7bYyN/zcrupLhHON7ryFr++Kpiw0feBGbg
|
||||
kEP+0HWJ2cX1MvcqTurx344RVlmnEBesDuFstBhnaXRlYSA8Z2l0ZWFAZmFrZS5s
|
||||
b2NhbD6JAc4EEwEKADgWIQT3rIVBIbYw8mUW1p+Z3Yqpy9FcAQUCYbji8AIbAwUL
|
||||
CQgHAgYVCgkICwIEFgIDAQIeAQIXgAAKCRCZ3Yqpy9FcAY6AC/9GUc0vGAmZ1N7P
|
||||
ThOxy3SvoIWJzycEu6DKdp4FlucKW9Rm66vCwPDg7XcQxZQTIWNPIGB3kln0yRdx
|
||||
zRtGLKIDPo2qW8kPrLN3GXToKX2mBb76duaShW34W1rUVY613olmtwLT+QqgRX+H
|
||||
x0rNNJloOh3kawwaMoYZy4B2vq7AZ5ybIsT4ROKgKPzAlajI4+jI+qKA5GSyP7Jq
|
||||
Tu254BCeg0v51p0VWIbGdgPyVkZkLtrlxN7s8UGDoTUAJgB/K3SOGNtQFSxnJba5
|
||||
q0YBxDUScd65b1+YCUHY+3FdC4/5168y4Zic9bBxeVu3jBwSVDvrELsWzIDNgHmP
|
||||
eyl/Nv+CTZDDKOtzpS823k7gC129rcxMk0mkIzAt/wG7N4zf0vpt02LZ/Ei/azqK
|
||||
xq782Fmc3un+pgQWJrlU2ZT7yHi6aJAfxfDpQZwz8qXGgdaFsumNylEWy9o80pJG
|
||||
8RYgM+phZL4INYIiHoWUuz2v+qK9jmhxtTLOpKDXxtGrz6aFWJadBVgEYbji8AEM
|
||||
AMDFivCjl7vGACeST4iboZw817uAJFOTOk3uOnXuAx5NLq/DbL3Cyhjictwxhxot
|
||||
U1MdAZOSOHlWPBJTiib1145rDTJCH6gwQNVaqn/V0i/Dc2Isua4YF0efztzwD2aH
|
||||
NX4RCDp74bQ08YTsAlCWHk7blg3NCU/y4maaxdJ26PsNrIiY0l5SC3oNiEAp2aWP
|
||||
Yf+plmQwqk+Z3laB5fkVz8Vca8TZle11/NZVVwrpq8rubPUYHC2KmabFLihcMCGv
|
||||
eTt3LCB7tDzohDmX/0vuqTD09YTv5gmIzU/tx4+qH4tVfCK/DKTxsxafY4KZY4kM
|
||||
hqrhuGWq8EAu4RUG6AzbSDJZnO1UAfzC9j/8upr3qxOXx/xhWKzixGrRXo9eK5eR
|
||||
1pqEj+XGH+f9bQiF/pEIojcUp45S0ZBaSBPj2W7TZbbHzqXYNzmXa3IVdz+9l7MB
|
||||
cRfIe67wt4h66/fmATe47KvHNRfKpyhFD2utdOSd61tKXo/bu/5LBath0mxMBPHd
|
||||
4QARAQABAAv5Aacf824U/LiW+JU4poVJFofEr22gQhwwIt9rnmZm80ak+L+o9MaR
|
||||
CN4WLzJN2X5b1B8FTAXerexR8bPy1QsvaN/yRMT23wW3j0IVVf5tbIM/6m6o5+fP
|
||||
zp7S5/zh8OvbXE7v6Qp2C19sgQqB/ugOmff9hSBF18A6II2Wq8uLtgKua5xof1kI
|
||||
5/1qNpH1SltcndPPKjbq8D7zk6kjoZCw5PJk1ShVcKwIjzDmS729qezZ6nm6sh7v
|
||||
BX70JUdHErQzBtcb+Y39nRC/7aQ/X5s73Iy9OsnAzzTSTtw1RgxgAYXxQKhQN5xP
|
||||
rzUdZqCSFicjLAPvY4PxQmIL+DS7tb/rrWUJAfr/9LcrzoOC5LaYFTuykq231ORs
|
||||
4oRfHmJqYAiMYQ7iXMtFVspxQWq/8qrBPmmEkS2oAnmd8Ld5hbd7sFBsS5GCW9a3
|
||||
UyQQ9WQECyvpgFOR9m746/bFjKMgG+aBHyKvndniF3XWjHWrzrbk5vAViMb+9Al+
|
||||
7MxSqZ/oNrvdBgDT6hTMwyNBvQwJ/Lev0S3XPDJmxg+Y8QIrNbBrXjA70yVeLFgr
|
||||
emDnfdAwuhmZ5vKRe2YcIyMIOagRIDUEWs8EyCvM2e+bF+I0meQvWT536Cm2TouI
|
||||
jCUIip4HRTwe7NAR50OMACtji8sbcmfnIfFMfGUS3dPpNGURhCEHxWB6hlvbkbkV
|
||||
CToTlMS/agY0sV4O4kWqWiaKgZRefJSiVfj6RDKs43SbNxhJu+DslU7PPlfv6SFJ
|
||||
nX9LWE6daLrpuF0GAOjf+kjqpFFgF50h3B7lCsSfxIKW587z93rkmccGKvZj4Qeq
|
||||
ahjekO6kxapYJhtjY9BOQdU0rzEPhh8bF39GE/iCfXVdIh1suqp3uQv9birgkWJN
|
||||
CROrHvk5NmlBBb4BDid0hY8hM3lEi+6rK2lhs4krpoHin/h852AI+YBzeAVYSqor
|
||||
fqEzCiPlX7f1EI3I6kPnGrgeIWcznOO0yXkM/QuKCDWZlaLDxu7Rc5lBnsmiChrT
|
||||
3HwOiyOFfU1Rib/TVQYAng1PxHZfIfC77cblAiv3SXjFtSDIfyueER3Ii11DyEfB
|
||||
zco+qbpqYiDEI7yLZFuyExEpT2GbHTTEn28aEZzZBv/aFRnVFPTMiyquFE7QKuLc
|
||||
aEpEYZE3qSiAUDAckfDblM1SHZAVP6CaStkoUigtYBND2F316MTNGGLtcJ4y9s1r
|
||||
soqvCJ/cx0lR359kljqCHyv+iMqeBttwTGjFbiNJ5as4ATA988FlR6PnB0cr+Lg2
|
||||
8X3xiRcAaxlLFcUOifpa3m6JAbYEGAEKACAWIQT3rIVBIbYw8mUW1p+Z3Yqpy9Fc
|
||||
AQUCYbji8AIbDAAKCRCZ3Yqpy9FcAT/pDACilZ8zPUs+MwwI0BI6dMWxmhusHwTx
|
||||
kdwbxt2TuCQE3DEftCTCaxO5f8hQ6CL9pxYw5mn/6p8ELUpindFxgzpBjUQZyynb
|
||||
+ZA7LOK5gKw25vGTRcMFiWZOBnMEAifyywmG6XCPtio8i3/In95ix/Adi17tzdpy
|
||||
EfFfWTeDocTNPhIPhg9REteZ71eBW3qEbY2iCeG3XSpKhkj6obY7BL8xLT9iaezh
|
||||
C6Upzb3gvjEInoaMR2yra9fVugW32lCFgXr6UZ5osBqVNjXGcwBqxg5IAkt4R5v1
|
||||
vdt5h69cagkbdS0qSRbS56GctmxVnbWyuAuKON55BDri5BhO3V4GmIXXUW12dQhl
|
||||
1/P9+xMjHm424QlGL7jgEzOMR5CJFdDQ+osabA2iZAUEQ7Ut8SgREfCduqKqzJ7z
|
||||
Uvb3feuoW45VNBqv7op8hH1S8okFaCTuznrAPqGXxee0I3oTX1lbBW+IySoisWMC
|
||||
ZMtt+nu5oJo/m1bvhWiYLhW6WX8TcmRKD3s=
|
||||
=V9rS
|
||||
-----END PGP PRIVATE KEY BLOCK-----
|
||||
|
||||
@@ -18,7 +18,7 @@ func TestSignOut(t *testing.T) {
|
||||
session.MakeRequest(t, req, http.StatusFound)
|
||||
|
||||
// try to view a private repo, should fail
|
||||
req = NewRequest(t, "GET", "/user2/repo2/")
|
||||
req = NewRequest(t, "GET", "/user2/repo2")
|
||||
session.MakeRequest(t, req, http.StatusNotFound)
|
||||
|
||||
// invalidate cached cookies for user2, for subsequent tests
|
||||
|
||||
@@ -225,6 +225,9 @@ func (repo *Repository) refreshCollaboratorAccesses(e Engine, accessMap map[int6
|
||||
return fmt.Errorf("getCollaborations: %v", err)
|
||||
}
|
||||
for _, c := range collaborators {
|
||||
if c.User.IsGhost() {
|
||||
continue
|
||||
}
|
||||
updateUserAccess(accessMap, c.User, c.Collaboration.Mode)
|
||||
}
|
||||
return nil
|
||||
|
||||
@@ -144,6 +144,11 @@ func GetAttachmentByUUID(uuid string) (*Attachment, error) {
|
||||
return getAttachmentByUUID(x, uuid)
|
||||
}
|
||||
|
||||
// ExistAttachmentsByUUID returns true if attachment is exist by given UUID
|
||||
func ExistAttachmentsByUUID(uuid string) (bool, error) {
|
||||
return x.Where("`uuid`=?", uuid).Exist(new(Attachment))
|
||||
}
|
||||
|
||||
// GetAttachmentByReleaseIDFileName returns attachment by given releaseId and fileName.
|
||||
func GetAttachmentByReleaseIDFileName(releaseID int64, fileName string) (*Attachment, error) {
|
||||
return getAttachmentByReleaseIDFileName(x, releaseID, fileName)
|
||||
|
||||
@@ -302,7 +302,7 @@ func DeleteOrphanedIssues() error {
|
||||
// CountOrphanedObjects count subjects with have no existing refobject anymore
|
||||
func CountOrphanedObjects(subject, refobject, joinCond string) (int64, error) {
|
||||
return x.Table("`"+subject+"`").
|
||||
Join("LEFT", refobject, joinCond).
|
||||
Join("LEFT", "`"+refobject+"`", joinCond).
|
||||
Where(builder.IsNull{"`" + refobject + "`.id"}).
|
||||
Count("id")
|
||||
}
|
||||
|
||||
@@ -45,19 +45,16 @@ func WithContext(f func(ctx DBContext) error) error {
|
||||
// WithTx represents executing database operations on a transaction
|
||||
func WithTx(f func(ctx DBContext) error) error {
|
||||
sess := x.NewSession()
|
||||
defer sess.Close()
|
||||
if err := sess.Begin(); err != nil {
|
||||
sess.Close()
|
||||
return err
|
||||
}
|
||||
|
||||
if err := f(DBContext{sess}); err != nil {
|
||||
sess.Close()
|
||||
return err
|
||||
}
|
||||
|
||||
err := sess.Commit()
|
||||
sess.Close()
|
||||
return err
|
||||
return sess.Commit()
|
||||
}
|
||||
|
||||
// Iterate iterates the databases and doing something
|
||||
|
||||
@@ -568,7 +568,7 @@
|
||||
-
|
||||
id: 40
|
||||
owner_id: 23
|
||||
owner_name: limited_org
|
||||
owner_name: privated_org
|
||||
lower_name: public_repo_on_private_org
|
||||
name: public_repo_on_private_org
|
||||
is_private: false
|
||||
@@ -581,7 +581,7 @@
|
||||
-
|
||||
id: 41
|
||||
owner_id: 23
|
||||
owner_name: limited_org
|
||||
owner_name: privated_org
|
||||
lower_name: private_repo_on_private_org
|
||||
name: private_repo_on_private_org
|
||||
is_private: true
|
||||
|
||||
@@ -99,6 +99,46 @@ func AddGPGKey(ownerID int64, content, token, signature string) ([]*GPGKey, erro
|
||||
verified = true
|
||||
}
|
||||
|
||||
if len(ekeys) > 1 {
|
||||
id2key := map[string]*openpgp.Entity{}
|
||||
newEKeys := make([]*openpgp.Entity, 0, len(ekeys))
|
||||
for _, ekey := range ekeys {
|
||||
id := ekey.PrimaryKey.KeyIdString()
|
||||
if original, has := id2key[id]; has {
|
||||
// Coalesce this with the other one
|
||||
for _, subkey := range ekey.Subkeys {
|
||||
if subkey.PublicKey == nil {
|
||||
continue
|
||||
}
|
||||
found := false
|
||||
|
||||
for _, originalSubkey := range original.Subkeys {
|
||||
if originalSubkey.PublicKey == nil {
|
||||
continue
|
||||
}
|
||||
if originalSubkey.PublicKey.KeyId == subkey.PublicKey.KeyId {
|
||||
found = true
|
||||
break
|
||||
}
|
||||
}
|
||||
if !found {
|
||||
original.Subkeys = append(original.Subkeys, subkey)
|
||||
}
|
||||
}
|
||||
for name, identity := range ekey.Identities {
|
||||
if _, has := original.Identities[name]; has {
|
||||
continue
|
||||
}
|
||||
original.Identities[name] = identity
|
||||
}
|
||||
continue
|
||||
}
|
||||
id2key[id] = ekey
|
||||
newEKeys = append(newEKeys, ekey)
|
||||
}
|
||||
ekeys = newEKeys
|
||||
}
|
||||
|
||||
for _, ekey := range ekeys {
|
||||
// Key ID cannot be duplicated.
|
||||
has, err := sess.Where("key_id=?", ekey.PrimaryKey.KeyIdString()).
|
||||
|
||||
@@ -14,7 +14,7 @@ import (
|
||||
// ResourceIndex represents a resource index which could be used as issue/release and others
|
||||
// We can create different tables i.e. issue_index, release_index and etc.
|
||||
type ResourceIndex struct {
|
||||
GroupID int64 `xorm:"pk"`
|
||||
GroupID int64 `xorm:"pk unique"`
|
||||
MaxIndex int64 `xorm:"index"`
|
||||
}
|
||||
|
||||
|
||||
@@ -1145,17 +1145,17 @@ type IssuesOptions struct {
|
||||
func sortIssuesSession(sess *xorm.Session, sortType string, priorityRepoID int64) {
|
||||
switch sortType {
|
||||
case "oldest":
|
||||
sess.Asc("issue.created_unix")
|
||||
sess.Asc("issue.created_unix").Asc("issue.id")
|
||||
case "recentupdate":
|
||||
sess.Desc("issue.updated_unix")
|
||||
sess.Desc("issue.updated_unix").Desc("issue.created_unix").Desc("issue.id")
|
||||
case "leastupdate":
|
||||
sess.Asc("issue.updated_unix")
|
||||
sess.Asc("issue.updated_unix").Asc("issue.created_unix").Asc("issue.id")
|
||||
case "mostcomment":
|
||||
sess.Desc("issue.num_comments")
|
||||
sess.Desc("issue.num_comments").Desc("issue.created_unix").Desc("issue.id")
|
||||
case "leastcomment":
|
||||
sess.Asc("issue.num_comments")
|
||||
sess.Asc("issue.num_comments").Desc("issue.created_unix").Desc("issue.id")
|
||||
case "priority":
|
||||
sess.Desc("issue.priority")
|
||||
sess.Desc("issue.priority").Desc("issue.created_unix").Desc("issue.id")
|
||||
case "nearduedate":
|
||||
// 253370764800 is 01/01/9999 @ 12:00am (UTC)
|
||||
sess.Join("LEFT", "milestone", "issue.milestone_id = milestone.id").
|
||||
@@ -1163,17 +1163,25 @@ func sortIssuesSession(sess *xorm.Session, sortType string, priorityRepoID int64
|
||||
"WHEN issue.deadline_unix = 0 AND (milestone.deadline_unix = 0 OR milestone.deadline_unix IS NULL) THEN 253370764800 " +
|
||||
"WHEN milestone.deadline_unix = 0 OR milestone.deadline_unix IS NULL THEN issue.deadline_unix " +
|
||||
"WHEN milestone.deadline_unix < issue.deadline_unix OR issue.deadline_unix = 0 THEN milestone.deadline_unix " +
|
||||
"ELSE issue.deadline_unix END ASC")
|
||||
"ELSE issue.deadline_unix END ASC").
|
||||
Desc("issue.created_unix").
|
||||
Desc("issue.id")
|
||||
case "farduedate":
|
||||
sess.Join("LEFT", "milestone", "issue.milestone_id = milestone.id").
|
||||
OrderBy("CASE " +
|
||||
"WHEN milestone.deadline_unix IS NULL THEN issue.deadline_unix " +
|
||||
"WHEN milestone.deadline_unix < issue.deadline_unix OR issue.deadline_unix = 0 THEN milestone.deadline_unix " +
|
||||
"ELSE issue.deadline_unix END DESC")
|
||||
"ELSE issue.deadline_unix END DESC").
|
||||
Desc("issue.created_unix").
|
||||
Desc("issue.id")
|
||||
case "priorityrepo":
|
||||
sess.OrderBy("CASE WHEN issue.repo_id = " + strconv.FormatInt(priorityRepoID, 10) + " THEN 1 ELSE 2 END, issue.created_unix DESC")
|
||||
sess.OrderBy("CASE " +
|
||||
"WHEN issue.repo_id = " + strconv.FormatInt(priorityRepoID, 10) + " THEN 1 " +
|
||||
"ELSE 2 END ASC").
|
||||
Desc("issue.created_unix").
|
||||
Desc("issue.id")
|
||||
default:
|
||||
sess.Desc("issue.created_unix")
|
||||
sess.Desc("issue.created_unix").Desc("issue.id")
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1517,12 +1525,12 @@ func GetIssueStats(opts *IssueStatsOptions) (*IssueStats, error) {
|
||||
func getIssueStatsChunk(opts *IssueStatsOptions, issueIDs []int64) (*IssueStats, error) {
|
||||
stats := &IssueStats{}
|
||||
|
||||
countSession := func(opts *IssueStatsOptions) *xorm.Session {
|
||||
countSession := func(opts *IssueStatsOptions, issueIDs []int64) *xorm.Session {
|
||||
sess := x.
|
||||
Where("issue.repo_id = ?", opts.RepoID)
|
||||
|
||||
if len(opts.IssueIDs) > 0 {
|
||||
sess.In("issue.id", opts.IssueIDs)
|
||||
if len(issueIDs) > 0 {
|
||||
sess.In("issue.id", issueIDs)
|
||||
}
|
||||
|
||||
if len(opts.Labels) > 0 && opts.Labels != "0" {
|
||||
@@ -1572,13 +1580,13 @@ func getIssueStatsChunk(opts *IssueStatsOptions, issueIDs []int64) (*IssueStats,
|
||||
}
|
||||
|
||||
var err error
|
||||
stats.OpenCount, err = countSession(opts).
|
||||
stats.OpenCount, err = countSession(opts, issueIDs).
|
||||
And("issue.is_closed = ?", false).
|
||||
Count(new(Issue))
|
||||
if err != nil {
|
||||
return stats, err
|
||||
}
|
||||
stats.ClosedCount, err = countSession(opts).
|
||||
stats.ClosedCount, err = countSession(opts, issueIDs).
|
||||
And("issue.is_closed = ?", true).
|
||||
Count(new(Issue))
|
||||
return stats, err
|
||||
|
||||
@@ -762,13 +762,12 @@ func updateCommentInfos(e *xorm.Session, opts *CreateCommentOptions, comment *Co
|
||||
}
|
||||
}
|
||||
fallthrough
|
||||
case CommentTypeReview:
|
||||
fallthrough
|
||||
case CommentTypeComment:
|
||||
if _, err = e.Exec("UPDATE `issue` SET num_comments=num_comments+1 WHERE id=?", opts.Issue.ID); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
fallthrough
|
||||
case CommentTypeReview:
|
||||
// Check attachments
|
||||
attachments, err := getAttachmentsByUUIDs(e, opts.Attachments)
|
||||
if err != nil {
|
||||
|
||||
@@ -13,6 +13,26 @@ import (
|
||||
"xorm.io/xorm"
|
||||
)
|
||||
|
||||
// ErrIssueStopwatchNotExist represents an error that stopwatch is not exist
|
||||
type ErrIssueStopwatchNotExist struct {
|
||||
UserID int64
|
||||
IssueID int64
|
||||
}
|
||||
|
||||
func (err ErrIssueStopwatchNotExist) Error() string {
|
||||
return fmt.Sprintf("issue stopwatch doesn't exist[uid: %d, issue_id: %d", err.UserID, err.IssueID)
|
||||
}
|
||||
|
||||
// ErrIssueStopwatchAlreadyExist represents an error that stopwatch is already exist
|
||||
type ErrIssueStopwatchAlreadyExist struct {
|
||||
UserID int64
|
||||
IssueID int64
|
||||
}
|
||||
|
||||
func (err ErrIssueStopwatchAlreadyExist) Error() string {
|
||||
return fmt.Sprintf("issue stopwatch already exists[uid: %d, issue_id: %d", err.UserID, err.IssueID)
|
||||
}
|
||||
|
||||
// Stopwatch represents a stopwatch for time tracking.
|
||||
type Stopwatch struct {
|
||||
ID int64 `xorm:"pk autoincr"`
|
||||
@@ -74,91 +94,141 @@ func hasUserStopwatch(e Engine, userID int64) (exists bool, sw *Stopwatch, err e
|
||||
return
|
||||
}
|
||||
|
||||
// FinishIssueStopwatchIfPossible if stopwatch exist then finish it otherwise ignore
|
||||
func FinishIssueStopwatchIfPossible(user *User, issue *Issue) error {
|
||||
_, exists, err := getStopwatch(x, user.ID, issue.ID)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if !exists {
|
||||
return nil
|
||||
}
|
||||
return FinishIssueStopwatch(user, issue)
|
||||
}
|
||||
|
||||
// CreateOrStopIssueStopwatch will create or remove a stopwatch and will log it into issue's timeline.
|
||||
func CreateOrStopIssueStopwatch(user *User, issue *Issue) error {
|
||||
_, exists, err := getStopwatch(x, user.ID, issue.ID)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if exists {
|
||||
return FinishIssueStopwatch(user, issue)
|
||||
}
|
||||
return CreateIssueStopwatch(user, issue)
|
||||
}
|
||||
|
||||
// FinishIssueStopwatch if stopwatch exist then finish it otherwise return an error
|
||||
func FinishIssueStopwatch(user *User, issue *Issue) error {
|
||||
sess := x.NewSession()
|
||||
defer sess.Close()
|
||||
if err := sess.Begin(); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := createOrStopIssueStopwatch(sess, user, issue); err != nil {
|
||||
if err := finishIssueStopwatch(sess, user, issue); err != nil {
|
||||
return err
|
||||
}
|
||||
return sess.Commit()
|
||||
}
|
||||
|
||||
func createOrStopIssueStopwatch(e *xorm.Session, user *User, issue *Issue) error {
|
||||
func finishIssueStopwatch(e *xorm.Session, user *User, issue *Issue) error {
|
||||
sw, exists, err := getStopwatch(e, user.ID, issue.ID)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if !exists {
|
||||
return ErrIssueStopwatchNotExist{
|
||||
UserID: user.ID,
|
||||
IssueID: issue.ID,
|
||||
}
|
||||
}
|
||||
|
||||
// Create tracked time out of the time difference between start date and actual date
|
||||
timediff := time.Now().Unix() - int64(sw.CreatedUnix)
|
||||
|
||||
// Create TrackedTime
|
||||
tt := &TrackedTime{
|
||||
Created: time.Now(),
|
||||
IssueID: issue.ID,
|
||||
UserID: user.ID,
|
||||
Time: timediff,
|
||||
}
|
||||
|
||||
if _, err := e.Insert(tt); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if err := issue.loadRepo(e); err != nil {
|
||||
return err
|
||||
}
|
||||
if _, err := createComment(e, &CreateCommentOptions{
|
||||
Doer: user,
|
||||
Issue: issue,
|
||||
Repo: issue.Repo,
|
||||
Content: SecToTime(timediff),
|
||||
Type: CommentTypeStopTracking,
|
||||
TimeID: tt.ID,
|
||||
}); err != nil {
|
||||
return err
|
||||
}
|
||||
_, err = e.Delete(sw)
|
||||
return err
|
||||
}
|
||||
|
||||
// CreateIssueStopwatch creates a stopwatch if not exist, otherwise return an error
|
||||
func CreateIssueStopwatch(user *User, issue *Issue) error {
|
||||
sess := x.NewSession()
|
||||
defer sess.Close()
|
||||
if err := sess.Begin(); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := createIssueStopwatch(sess, user, issue); err != nil {
|
||||
return err
|
||||
}
|
||||
return sess.Commit()
|
||||
}
|
||||
|
||||
func createIssueStopwatch(e *xorm.Session, user *User, issue *Issue) error {
|
||||
if err := issue.loadRepo(e); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// if another stopwatch is running: stop it
|
||||
exists, sw, err := hasUserStopwatch(e, user.ID)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if exists {
|
||||
// Create tracked time out of the time difference between start date and actual date
|
||||
timediff := time.Now().Unix() - int64(sw.CreatedUnix)
|
||||
|
||||
// Create TrackedTime
|
||||
tt := &TrackedTime{
|
||||
Created: time.Now(),
|
||||
IssueID: issue.ID,
|
||||
UserID: user.ID,
|
||||
Time: timediff,
|
||||
}
|
||||
|
||||
if _, err := e.Insert(tt); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if _, err := createComment(e, &CreateCommentOptions{
|
||||
Doer: user,
|
||||
Issue: issue,
|
||||
Repo: issue.Repo,
|
||||
Content: SecToTime(timediff),
|
||||
Type: CommentTypeStopTracking,
|
||||
TimeID: tt.ID,
|
||||
}); err != nil {
|
||||
return err
|
||||
}
|
||||
if _, err := e.Delete(sw); err != nil {
|
||||
return err
|
||||
}
|
||||
} else {
|
||||
// if another stopwatch is running: stop it
|
||||
exists, sw, err := hasUserStopwatch(e, user.ID)
|
||||
issue, err := getIssueByID(e, sw.IssueID)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if exists {
|
||||
issue, err := getIssueByID(e, sw.IssueID)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if err := createOrStopIssueStopwatch(e, user, issue); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
// Create stopwatch
|
||||
sw = &Stopwatch{
|
||||
UserID: user.ID,
|
||||
IssueID: issue.ID,
|
||||
}
|
||||
|
||||
if _, err := e.Insert(sw); err != nil {
|
||||
if err := finishIssueStopwatch(e, user, issue); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
if _, err := createComment(e, &CreateCommentOptions{
|
||||
Doer: user,
|
||||
Issue: issue,
|
||||
Repo: issue.Repo,
|
||||
Type: CommentTypeStartTracking,
|
||||
}); err != nil {
|
||||
return err
|
||||
}
|
||||
// Create stopwatch
|
||||
sw = &Stopwatch{
|
||||
UserID: user.ID,
|
||||
IssueID: issue.ID,
|
||||
}
|
||||
|
||||
if _, err := e.Insert(sw); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if err := issue.loadRepo(e); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if _, err := createComment(e, &CreateCommentOptions{
|
||||
Doer: user,
|
||||
Issue: issue,
|
||||
Repo: issue.Repo,
|
||||
Type: CommentTypeStartTracking,
|
||||
}); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -5,7 +5,9 @@
|
||||
package models
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"sort"
|
||||
"sync"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
@@ -417,3 +419,43 @@ func TestIssue_ResolveMentions(t *testing.T) {
|
||||
// Private repo, whole team
|
||||
testSuccess("user17", "big_test_private_4", "user15", []string{"user17/owners"}, []int64{18})
|
||||
}
|
||||
|
||||
func TestCorrectIssueStats(t *testing.T) {
|
||||
assert.NoError(t, PrepareTestDatabase())
|
||||
|
||||
// Because the condition is to have chunked database look-ups,
|
||||
// We have to more issues than `maxQueryParameters`, we will insert.
|
||||
// maxQueryParameters + 10 issues into the testDatabase.
|
||||
// Each new issues will have a constant description "Bugs are nasty"
|
||||
// Which will be used later on.
|
||||
|
||||
issueAmount := maxQueryParameters + 10
|
||||
|
||||
var wg sync.WaitGroup
|
||||
for i := 0; i < issueAmount; i++ {
|
||||
wg.Add(1)
|
||||
go func(i int) {
|
||||
testInsertIssue(t, fmt.Sprintf("Issue %d", i+1), "Bugs are nasty", 0)
|
||||
wg.Done()
|
||||
}(i)
|
||||
}
|
||||
wg.Wait()
|
||||
|
||||
// Now we will get all issueID's that match the "Bugs are nasty" query.
|
||||
total, ids, err := SearchIssueIDsByKeyword("Bugs are nasty", []int64{1}, issueAmount, 0)
|
||||
|
||||
// Just to be sure.
|
||||
assert.NoError(t, err)
|
||||
assert.EqualValues(t, issueAmount, total)
|
||||
|
||||
// Now we will call the GetIssueStats with these IDs and if working,
|
||||
// get the correct stats back.
|
||||
issueStats, err := GetIssueStats(&IssueStatsOptions{
|
||||
RepoID: 1,
|
||||
IssueIDs: ids,
|
||||
})
|
||||
|
||||
// Now check the values.
|
||||
assert.NoError(t, err)
|
||||
assert.EqualValues(t, issueStats.OpenCount, issueAmount)
|
||||
}
|
||||
|
||||
@@ -71,9 +71,9 @@ var (
|
||||
_ convert.Conversion = &SSPIConfig{}
|
||||
)
|
||||
|
||||
// jsonUnmarshalHandleDoubleEncode - due to a bug in xorm (see https://gitea.com/xorm/xorm/pulls/1957) - it's
|
||||
// JSONUnmarshalHandleDoubleEncode - due to a bug in xorm (see https://gitea.com/xorm/xorm/pulls/1957) - it's
|
||||
// possible that a Blob may be double encoded or gain an unwanted prefix of 0xff 0xfe.
|
||||
func jsonUnmarshalHandleDoubleEncode(bs []byte, v interface{}) error {
|
||||
func JSONUnmarshalHandleDoubleEncode(bs []byte, v interface{}) error {
|
||||
json := jsoniter.ConfigCompatibleWithStandardLibrary
|
||||
err := json.Unmarshal(bs, v)
|
||||
if err != nil {
|
||||
@@ -89,7 +89,7 @@ func jsonUnmarshalHandleDoubleEncode(bs []byte, v interface{}) error {
|
||||
rs = append(rs, temp...)
|
||||
}
|
||||
if ok {
|
||||
if rs[0] == 0xff && rs[1] == 0xfe {
|
||||
if len(rs) > 1 && rs[0] == 0xff && rs[1] == 0xfe {
|
||||
rs = rs[2:]
|
||||
}
|
||||
err = json.Unmarshal(rs, v)
|
||||
@@ -108,7 +108,7 @@ type LDAPConfig struct {
|
||||
|
||||
// FromDB fills up a LDAPConfig from serialized format.
|
||||
func (cfg *LDAPConfig) FromDB(bs []byte) error {
|
||||
err := jsonUnmarshalHandleDoubleEncode(bs, &cfg)
|
||||
err := JSONUnmarshalHandleDoubleEncode(bs, &cfg)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -149,7 +149,7 @@ type SMTPConfig struct {
|
||||
|
||||
// FromDB fills up an SMTPConfig from serialized format.
|
||||
func (cfg *SMTPConfig) FromDB(bs []byte) error {
|
||||
return jsonUnmarshalHandleDoubleEncode(bs, cfg)
|
||||
return JSONUnmarshalHandleDoubleEncode(bs, cfg)
|
||||
}
|
||||
|
||||
// ToDB exports an SMTPConfig to a serialized format.
|
||||
@@ -166,7 +166,7 @@ type PAMConfig struct {
|
||||
|
||||
// FromDB fills up a PAMConfig from serialized format.
|
||||
func (cfg *PAMConfig) FromDB(bs []byte) error {
|
||||
return jsonUnmarshalHandleDoubleEncode(bs, cfg)
|
||||
return JSONUnmarshalHandleDoubleEncode(bs, cfg)
|
||||
}
|
||||
|
||||
// ToDB exports a PAMConfig to a serialized format.
|
||||
@@ -187,7 +187,7 @@ type OAuth2Config struct {
|
||||
|
||||
// FromDB fills up an OAuth2Config from serialized format.
|
||||
func (cfg *OAuth2Config) FromDB(bs []byte) error {
|
||||
return jsonUnmarshalHandleDoubleEncode(bs, cfg)
|
||||
return JSONUnmarshalHandleDoubleEncode(bs, cfg)
|
||||
}
|
||||
|
||||
// ToDB exports an SMTPConfig to a serialized format.
|
||||
@@ -207,7 +207,7 @@ type SSPIConfig struct {
|
||||
|
||||
// FromDB fills up an SSPIConfig from serialized format.
|
||||
func (cfg *SSPIConfig) FromDB(bs []byte) error {
|
||||
return jsonUnmarshalHandleDoubleEncode(bs, cfg)
|
||||
return JSONUnmarshalHandleDoubleEncode(bs, cfg)
|
||||
}
|
||||
|
||||
// ToDB exports an SSPIConfig to a serialized format.
|
||||
|
||||
@@ -7,6 +7,7 @@ package migrations
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"os"
|
||||
"reflect"
|
||||
@@ -762,8 +763,14 @@ func dropTableColumns(sess *xorm.Session, tableName string, columnNames ...strin
|
||||
}
|
||||
tableSQL := string(res[0]["sql"])
|
||||
|
||||
// Get the string offset for column definitions: `CREATE TABLE ( column-definitions... )`
|
||||
columnDefinitionsIndex := strings.Index(tableSQL, "(")
|
||||
if columnDefinitionsIndex < 0 {
|
||||
return errors.New("couldn't find column definitions")
|
||||
}
|
||||
|
||||
// Separate out the column definitions
|
||||
tableSQL = tableSQL[strings.Index(tableSQL, "("):]
|
||||
tableSQL = tableSQL[columnDefinitionsIndex:]
|
||||
|
||||
// Remove the required columnNames
|
||||
for _, name := range columnNames {
|
||||
|
||||
@@ -205,6 +205,25 @@ func prepareTestEnv(t *testing.T, skip int, syncModels ...interface{}) (*xorm.En
|
||||
|
||||
assert.NoError(t, com.CopyDir(path.Join(filepath.Dir(setting.AppPath), "integrations/gitea-repositories-meta"),
|
||||
setting.RepoRootPath))
|
||||
ownerDirs, err := os.ReadDir(setting.RepoRootPath)
|
||||
if err != nil {
|
||||
assert.NoError(t, err, "unable to read the new repo root: %v\n", err)
|
||||
}
|
||||
for _, ownerDir := range ownerDirs {
|
||||
if !ownerDir.Type().IsDir() {
|
||||
continue
|
||||
}
|
||||
repoDirs, err := os.ReadDir(filepath.Join(setting.RepoRootPath, ownerDir.Name()))
|
||||
if err != nil {
|
||||
assert.NoError(t, err, "unable to read the new repo root: %v\n", err)
|
||||
}
|
||||
for _, repoDir := range repoDirs {
|
||||
_ = os.MkdirAll(filepath.Join(setting.RepoRootPath, ownerDir.Name(), repoDir.Name(), "objects", "pack"), 0755)
|
||||
_ = os.MkdirAll(filepath.Join(setting.RepoRootPath, ownerDir.Name(), repoDir.Name(), "objects", "info"), 0755)
|
||||
_ = os.MkdirAll(filepath.Join(setting.RepoRootPath, ownerDir.Name(), repoDir.Name(), "refs", "heads"), 0755)
|
||||
_ = os.MkdirAll(filepath.Join(setting.RepoRootPath, ownerDir.Name(), repoDir.Name(), "refs", "tag"), 0755)
|
||||
}
|
||||
}
|
||||
|
||||
if err := deleteDB(); err != nil {
|
||||
t.Errorf("unable to reset database: %v", err)
|
||||
|
||||
@@ -5,6 +5,8 @@
|
||||
package migrations
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"xorm.io/xorm"
|
||||
)
|
||||
|
||||
@@ -40,8 +42,17 @@ func convertTaskTypeToString(x *xorm.Engine) error {
|
||||
return err
|
||||
}
|
||||
|
||||
// to keep the migration could be rerun
|
||||
exist, err := x.Dialect().IsColumnExist(x.DB(), context.Background(), "hook_task", "type")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if !exist {
|
||||
return nil
|
||||
}
|
||||
|
||||
for i, s := range hookTaskTypes {
|
||||
if _, err := x.Exec("UPDATE hook_task set typ = ? where type=?", s, i); err != nil {
|
||||
if _, err := x.Exec("UPDATE hook_task set typ = ? where `type`=?", s, i); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
@@ -5,6 +5,7 @@
|
||||
package migrations
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
|
||||
"code.gitea.io/gitea/modules/setting"
|
||||
@@ -19,6 +20,22 @@ func renameTaskErrorsToMessage(x *xorm.Engine) error {
|
||||
Status int `xorm:"index"`
|
||||
}
|
||||
|
||||
// This migration maybe rerun so that we should check if it has been run
|
||||
messageExist, err := x.Dialect().IsColumnExist(x.DB(), context.Background(), "task", "message")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if messageExist {
|
||||
errorsExist, err := x.Dialect().IsColumnExist(x.DB(), context.Background(), "task", "errors")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if !errorsExist {
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
sess := x.NewSession()
|
||||
defer sess.Close()
|
||||
if err := sess.Begin(); err != nil {
|
||||
@@ -29,6 +46,13 @@ func renameTaskErrorsToMessage(x *xorm.Engine) error {
|
||||
return fmt.Errorf("error on Sync2: %v", err)
|
||||
}
|
||||
|
||||
if messageExist {
|
||||
// if both errors and message exist, drop message at first
|
||||
if err := dropTableColumns(sess, "task", "message"); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
switch {
|
||||
case setting.Database.UseMySQL:
|
||||
if _, err := sess.Exec("ALTER TABLE `task` CHANGE errors message text"); err != nil {
|
||||
|
||||
@@ -179,16 +179,35 @@ func syncTables() error {
|
||||
return x.StoreEngine("InnoDB").Sync2(tables...)
|
||||
}
|
||||
|
||||
// NewTestEngine sets a new test xorm.Engine
|
||||
func NewTestEngine() (err error) {
|
||||
// NewInstallTestEngine creates a new xorm.Engine for testing during install
|
||||
//
|
||||
// This function will cause the basic database schema to be created
|
||||
func NewInstallTestEngine(ctx context.Context, migrateFunc func(*xorm.Engine) error) (err error) {
|
||||
x, err = GetNewEngine()
|
||||
if err != nil {
|
||||
return fmt.Errorf("Connect to database: %v", err)
|
||||
return fmt.Errorf("failed to connect to database: %w", err)
|
||||
}
|
||||
|
||||
x.SetMapper(names.GonicMapper{})
|
||||
x.SetLogger(NewXORMLogger(!setting.IsProd()))
|
||||
x.ShowSQL(!setting.IsProd())
|
||||
|
||||
x.SetDefaultContext(ctx)
|
||||
|
||||
if err = x.Ping(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// We have to run migrateFunc here in case the user is re-running installation on a previously created DB.
|
||||
// If we do not then table schemas will be changed and there will be conflicts when the migrations run properly.
|
||||
//
|
||||
// Installation should only be being re-run if users want to recover an old database.
|
||||
// However, we should think carefully about should we support re-install on an installed instance,
|
||||
// as there may be other problems due to secret reinitialization.
|
||||
if err = migrateFunc(x); err != nil {
|
||||
return fmt.Errorf("migrate: %v", err)
|
||||
}
|
||||
|
||||
return syncTables()
|
||||
}
|
||||
|
||||
|
||||
@@ -455,7 +455,7 @@ func GetUserOrgsList(user *User) ([]*MinimalOrg, error) {
|
||||
groupByStr := groupByCols.String()
|
||||
groupByStr = groupByStr[0 : len(groupByStr)-1]
|
||||
|
||||
sess.Select(groupByStr+", count(repo_id) as org_count").
|
||||
sess.Select(groupByStr+", count(distinct repo_id) as org_count").
|
||||
Table("user").
|
||||
Join("INNER", "team", "`team`.org_id = `user`.id").
|
||||
Join("INNER", "team_user", "`team`.id = `team_user`.team_id").
|
||||
|
||||
@@ -115,7 +115,9 @@ func (p *Project) NumClosedIssues() int {
|
||||
func (p *Project) NumOpenIssues() int {
|
||||
c, err := x.Table("project_issue").
|
||||
Join("INNER", "issue", "project_issue.issue_id=issue.id").
|
||||
Where("project_issue.project_id=? AND issue.is_closed=?", p.ID, false).Count("issue.id")
|
||||
Where("project_issue.project_id=? AND issue.is_closed=?", p.ID, false).
|
||||
Cols("issue_id").
|
||||
Count()
|
||||
if err != nil {
|
||||
return 0
|
||||
}
|
||||
|
||||
@@ -502,6 +502,9 @@ func GetLatestPullRequestByHeadInfo(repoID int64, branch string) (*PullRequest,
|
||||
|
||||
// GetPullRequestByIndex returns a pull request by the given index
|
||||
func GetPullRequestByIndex(repoID, index int64) (*PullRequest, error) {
|
||||
if index < 1 {
|
||||
return nil, ErrPullRequestNotExist{}
|
||||
}
|
||||
pr := &PullRequest{
|
||||
BaseRepoID: repoID,
|
||||
Index: index,
|
||||
|
||||
@@ -133,6 +133,10 @@ func TestGetPullRequestByIndex(t *testing.T) {
|
||||
_, err = GetPullRequestByIndex(9223372036854775807, 9223372036854775807)
|
||||
assert.Error(t, err)
|
||||
assert.True(t, IsErrPullRequestNotExist(err))
|
||||
|
||||
_, err = GetPullRequestByIndex(1, 0)
|
||||
assert.Error(t, err)
|
||||
assert.True(t, IsErrPullRequestNotExist(err))
|
||||
}
|
||||
|
||||
func TestGetPullRequestByID(t *testing.T) {
|
||||
|
||||
@@ -1152,16 +1152,6 @@ func CreateRepository(ctx DBContext, doer, u *User, repo *Repository, overwriteO
|
||||
return fmt.Errorf("recalculateAccesses: %v", err)
|
||||
}
|
||||
|
||||
if u.Visibility == api.VisibleTypePublic && !repo.IsPrivate {
|
||||
// Create/Remove git-daemon-export-ok for git-daemon...
|
||||
daemonExportFile := path.Join(repo.RepoPath(), `git-daemon-export-ok`)
|
||||
if f, err := os.Create(daemonExportFile); err != nil {
|
||||
log.Error("Failed to create %s: %v", daemonExportFile, err)
|
||||
} else {
|
||||
f.Close()
|
||||
}
|
||||
}
|
||||
|
||||
if setting.Service.AutoWatchNewRepos {
|
||||
if err = watchRepo(ctx.e, doer.ID, repo.ID, true); err != nil {
|
||||
return fmt.Errorf("watchRepo: %v", err)
|
||||
@@ -1175,6 +1165,46 @@ func CreateRepository(ctx DBContext, doer, u *User, repo *Repository, overwriteO
|
||||
return nil
|
||||
}
|
||||
|
||||
// CheckDaemonExportOK creates/removes git-daemon-export-ok for git-daemon...
|
||||
func (repo *Repository) CheckDaemonExportOK() error {
|
||||
return repo.checkDaemonExportOK(x)
|
||||
}
|
||||
|
||||
// CheckDaemonExportOKCtx creates/removes git-daemon-export-ok for git-daemon...
|
||||
func (repo *Repository) CheckDaemonExportOKCtx(ctx DBContext) error {
|
||||
return repo.checkDaemonExportOK(ctx.e)
|
||||
}
|
||||
|
||||
func (repo *Repository) checkDaemonExportOK(e Engine) error {
|
||||
if err := repo.getOwner(e); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Create/Remove git-daemon-export-ok for git-daemon...
|
||||
daemonExportFile := path.Join(repo.RepoPath(), `git-daemon-export-ok`)
|
||||
|
||||
isExist, err := util.IsExist(daemonExportFile)
|
||||
if err != nil {
|
||||
log.Error("Unable to check if %s exists. Error: %v", daemonExportFile, err)
|
||||
return err
|
||||
}
|
||||
|
||||
isPublic := !repo.IsPrivate && repo.Owner.Visibility == api.VisibleTypePublic
|
||||
if !isPublic && isExist {
|
||||
if err = util.Remove(daemonExportFile); err != nil {
|
||||
log.Error("Failed to remove %s: %v", daemonExportFile, err)
|
||||
}
|
||||
} else if isPublic && !isExist {
|
||||
if f, err := os.Create(daemonExportFile); err != nil {
|
||||
log.Error("Failed to create %s: %v", daemonExportFile, err)
|
||||
} else {
|
||||
f.Close()
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func countRepositories(userID int64, private bool) int64 {
|
||||
sess := x.Where("id > 0")
|
||||
|
||||
@@ -1217,6 +1247,12 @@ func IncrementRepoForkNum(ctx DBContext, repoID int64) error {
|
||||
return err
|
||||
}
|
||||
|
||||
// DecrementRepoForkNum decrement repository fork number
|
||||
func DecrementRepoForkNum(ctx DBContext, repoID int64) error {
|
||||
_, err := ctx.e.Exec("UPDATE `repository` SET num_forks=num_forks-1 WHERE id=?", repoID)
|
||||
return err
|
||||
}
|
||||
|
||||
// ChangeRepositoryName changes all corresponding setting from old repository name to new one.
|
||||
func ChangeRepositoryName(doer *User, repo *Repository, newRepoName string) (err error) {
|
||||
oldRepoName := repo.Name
|
||||
@@ -1318,24 +1354,9 @@ func updateRepository(e Engine, repo *Repository, visibilityChanged bool) (err e
|
||||
}
|
||||
|
||||
// Create/Remove git-daemon-export-ok for git-daemon...
|
||||
daemonExportFile := path.Join(repo.RepoPath(), `git-daemon-export-ok`)
|
||||
isExist, err := util.IsExist(daemonExportFile)
|
||||
isPublic := !repo.IsPrivate && repo.Owner.Visibility == api.VisibleTypePublic
|
||||
if err != nil {
|
||||
log.Error("Unable to check if %s exists. Error: %v", daemonExportFile, err)
|
||||
if err := repo.checkDaemonExportOK(e); err != nil {
|
||||
return err
|
||||
}
|
||||
if !isPublic && isExist {
|
||||
if err = util.Remove(daemonExportFile); err != nil {
|
||||
log.Error("Failed to remove %s: %v", daemonExportFile, err)
|
||||
}
|
||||
} else if isPublic && !isExist {
|
||||
if f, err := os.Create(daemonExportFile); err != nil {
|
||||
log.Error("Failed to create %s: %v", daemonExportFile, err)
|
||||
} else {
|
||||
f.Close()
|
||||
}
|
||||
}
|
||||
|
||||
forkRepos, err := getRepositoriesByForkID(e, repo.ID)
|
||||
if err != nil {
|
||||
|
||||
@@ -8,6 +8,7 @@ package models
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"code.gitea.io/gitea/modules/log"
|
||||
"code.gitea.io/gitea/modules/timeutil"
|
||||
|
||||
"xorm.io/builder"
|
||||
@@ -83,16 +84,21 @@ func (repo *Repository) getCollaborators(e Engine, listOptions ListOptions) ([]*
|
||||
return nil, fmt.Errorf("getCollaborations: %v", err)
|
||||
}
|
||||
|
||||
collaborators := make([]*Collaborator, len(collaborations))
|
||||
for i, c := range collaborations {
|
||||
collaborators := make([]*Collaborator, 0, len(collaborations))
|
||||
for _, c := range collaborations {
|
||||
user, err := getUserByID(e, c.UserID)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
if IsErrUserNotExist(err) {
|
||||
log.Warn("Inconsistent DB: User: %d is listed as collaborator of %-v but does not exist", c.UserID, repo)
|
||||
user = NewGhostUser()
|
||||
} else {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
collaborators[i] = &Collaborator{
|
||||
collaborators = append(collaborators, &Collaborator{
|
||||
User: user,
|
||||
Collaboration: c,
|
||||
}
|
||||
})
|
||||
}
|
||||
return collaborators, nil
|
||||
}
|
||||
|
||||
@@ -21,13 +21,18 @@ func (repo *Repository) CanEnableTimetracker() bool {
|
||||
|
||||
// IsTimetrackerEnabled returns whether or not the timetracker is enabled. It returns the default value from config if an error occurs.
|
||||
func (repo *Repository) IsTimetrackerEnabled() bool {
|
||||
return repo.isTimetrackerEnabled(x)
|
||||
}
|
||||
|
||||
// IsTimetrackerEnabled returns whether or not the timetracker is enabled. It returns the default value from config if an error occurs.
|
||||
func (repo *Repository) isTimetrackerEnabled(e Engine) bool {
|
||||
if !setting.Service.EnableTimetracking {
|
||||
return false
|
||||
}
|
||||
|
||||
var u *RepoUnit
|
||||
var err error
|
||||
if u, err = repo.GetUnit(UnitTypeIssues); err != nil {
|
||||
if u, err = repo.getUnit(e, UnitTypeIssues); err != nil {
|
||||
return setting.Service.DefaultEnableTimetracking
|
||||
}
|
||||
return u.IssuesConfig().EnableTimetracker
|
||||
|
||||
@@ -269,6 +269,14 @@ func TransferOwnership(doer *User, newOwnerName string, repo *Repository) (err e
|
||||
// Dummy object.
|
||||
collaboration := &Collaboration{RepoID: repo.ID}
|
||||
for _, c := range collaborators {
|
||||
if c.IsGhost() {
|
||||
collaboration.ID = c.Collaboration.ID
|
||||
if _, err := sess.Delete(collaboration); err != nil {
|
||||
return fmt.Errorf("remove collaborator '%d': %v", c.ID, err)
|
||||
}
|
||||
collaboration.ID = 0
|
||||
}
|
||||
|
||||
if c.ID != newOwner.ID {
|
||||
isMember, err := isOrganizationMember(sess, newOwner.ID, c.ID)
|
||||
if err != nil {
|
||||
@@ -281,6 +289,7 @@ func TransferOwnership(doer *User, newOwnerName string, repo *Repository) (err e
|
||||
if _, err := sess.Delete(collaboration); err != nil {
|
||||
return fmt.Errorf("remove collaborator '%d': %v", c.ID, err)
|
||||
}
|
||||
collaboration.UserID = 0
|
||||
}
|
||||
|
||||
// Remove old team-repository relations.
|
||||
|
||||
@@ -28,7 +28,7 @@ type UnitConfig struct{}
|
||||
|
||||
// FromDB fills up a UnitConfig from serialized format.
|
||||
func (cfg *UnitConfig) FromDB(bs []byte) error {
|
||||
return jsonUnmarshalHandleDoubleEncode(bs, &cfg)
|
||||
return JSONUnmarshalHandleDoubleEncode(bs, &cfg)
|
||||
}
|
||||
|
||||
// ToDB exports a UnitConfig to a serialized format.
|
||||
@@ -44,7 +44,7 @@ type ExternalWikiConfig struct {
|
||||
|
||||
// FromDB fills up a ExternalWikiConfig from serialized format.
|
||||
func (cfg *ExternalWikiConfig) FromDB(bs []byte) error {
|
||||
return jsonUnmarshalHandleDoubleEncode(bs, &cfg)
|
||||
return JSONUnmarshalHandleDoubleEncode(bs, &cfg)
|
||||
}
|
||||
|
||||
// ToDB exports a ExternalWikiConfig to a serialized format.
|
||||
@@ -62,7 +62,7 @@ type ExternalTrackerConfig struct {
|
||||
|
||||
// FromDB fills up a ExternalTrackerConfig from serialized format.
|
||||
func (cfg *ExternalTrackerConfig) FromDB(bs []byte) error {
|
||||
return jsonUnmarshalHandleDoubleEncode(bs, &cfg)
|
||||
return JSONUnmarshalHandleDoubleEncode(bs, &cfg)
|
||||
}
|
||||
|
||||
// ToDB exports a ExternalTrackerConfig to a serialized format.
|
||||
@@ -80,7 +80,7 @@ type IssuesConfig struct {
|
||||
|
||||
// FromDB fills up a IssuesConfig from serialized format.
|
||||
func (cfg *IssuesConfig) FromDB(bs []byte) error {
|
||||
return jsonUnmarshalHandleDoubleEncode(bs, &cfg)
|
||||
return JSONUnmarshalHandleDoubleEncode(bs, &cfg)
|
||||
}
|
||||
|
||||
// ToDB exports a IssuesConfig to a serialized format.
|
||||
@@ -104,7 +104,7 @@ type PullRequestsConfig struct {
|
||||
|
||||
// FromDB fills up a PullRequestsConfig from serialized format.
|
||||
func (cfg *PullRequestsConfig) FromDB(bs []byte) error {
|
||||
return jsonUnmarshalHandleDoubleEncode(bs, &cfg)
|
||||
return JSONUnmarshalHandleDoubleEncode(bs, &cfg)
|
||||
}
|
||||
|
||||
// ToDB exports a PullRequestsConfig to a serialized format.
|
||||
@@ -219,3 +219,9 @@ func getUnitsByRepoID(e Engine, repoID int64) (units []*RepoUnit, err error) {
|
||||
|
||||
return units, nil
|
||||
}
|
||||
|
||||
// UpdateRepoUnit updates the provided repo unit
|
||||
func UpdateRepoUnit(unit *RepoUnit) error {
|
||||
_, err := x.ID(unit.ID).Update(unit)
|
||||
return err
|
||||
}
|
||||
|
||||
@@ -434,7 +434,7 @@ func SubmitReview(doer *User, issue *Issue, reviewType ReviewType, content, comm
|
||||
// try to remove team review request if need
|
||||
if issue.Repo.Owner.IsOrganization() && (reviewType == ReviewTypeApprove || reviewType == ReviewTypeReject) {
|
||||
teamReviewRequests := make([]*Review, 0, 10)
|
||||
if err := sess.SQL("SELECT * FROM review WHERE reviewer_team_id > 0 AND type = ?", ReviewTypeRequest).Find(&teamReviewRequests); err != nil {
|
||||
if err := sess.SQL("SELECT * FROM review WHERE issue_id = ? AND reviewer_team_id > 0 AND type = ?", issue.ID, ReviewTypeRequest).Find(&teamReviewRequests); err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
|
||||
@@ -52,7 +52,7 @@ func (list U2FRegistrationList) ToRegistrations() []u2f.Registration {
|
||||
for _, reg := range list {
|
||||
r, err := reg.Parse()
|
||||
if err != nil {
|
||||
log.Fatal("parsing u2f registration: %v", err)
|
||||
log.Error("parsing u2f registration: %v", err)
|
||||
continue
|
||||
}
|
||||
regs = append(regs, *r)
|
||||
|
||||
@@ -5,6 +5,7 @@
|
||||
package models
|
||||
|
||||
import (
|
||||
"encoding/hex"
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
@@ -27,6 +28,7 @@ func TestGetU2FRegistrationsByUID(t *testing.T) {
|
||||
assert.NoError(t, PrepareTestDatabase())
|
||||
|
||||
res, err := GetU2FRegistrationsByUID(1)
|
||||
|
||||
assert.NoError(t, err)
|
||||
assert.Len(t, res, 1)
|
||||
assert.Equal(t, "U2F Key", res[0].Name)
|
||||
@@ -71,3 +73,27 @@ func TestDeleteRegistration(t *testing.T) {
|
||||
assert.NoError(t, DeleteRegistration(reg))
|
||||
AssertNotExistsBean(t, &U2FRegistration{ID: 1})
|
||||
}
|
||||
|
||||
const validU2FRegistrationResponseHex = "0504b174bc49c7ca254b70d2e5c207cee9cf174820ebd77ea3c65508c26da51b657c1cc6b952f8621697936482da0a6d3d3826a59095daf6cd7c03e2e60385d2f6d9402a552dfdb7477ed65fd84133f86196010b2215b57da75d315b7b9e8fe2e3925a6019551bab61d16591659cbaf00b4950f7abfe6660e2e006f76868b772d70c253082013c3081e4a003020102020a47901280001155957352300a06082a8648ce3d0403023017311530130603550403130c476e756262792050696c6f74301e170d3132303831343138323933325a170d3133303831343138323933325a3031312f302d0603550403132650696c6f74476e756262792d302e342e312d34373930313238303030313135353935373335323059301306072a8648ce3d020106082a8648ce3d030107034200048d617e65c9508e64bcc5673ac82a6799da3c1446682c258c463fffdf58dfd2fa3e6c378b53d795c4a4dffb4199edd7862f23abaf0203b4b8911ba0569994e101300a06082a8648ce3d0403020347003044022060cdb6061e9c22262d1aac1d96d8c70829b2366531dda268832cb836bcd30dfa0220631b1459f09e6330055722c8d89b7f48883b9089b88d60d1d9795902b30410df304502201471899bcc3987e62e8202c9b39c33c19033f7340352dba80fcab017db9230e402210082677d673d891933ade6f617e5dbde2e247e70423fd5ad7804a6d3d3961ef871"
|
||||
|
||||
func TestToRegistrations_SkipInvalidItemsWithoutCrashing(t *testing.T) {
|
||||
regKeyRaw, _ := hex.DecodeString(validU2FRegistrationResponseHex)
|
||||
regs := U2FRegistrationList{
|
||||
&U2FRegistration{ID: 1},
|
||||
&U2FRegistration{ID: 2, Name: "U2F Key", UserID: 2, Counter: 0, Raw: regKeyRaw, CreatedUnix: 946684800, UpdatedUnix: 946684800},
|
||||
}
|
||||
|
||||
actual := regs.ToRegistrations()
|
||||
assert.Len(t, actual, 1)
|
||||
}
|
||||
|
||||
func TestToRegistrations(t *testing.T) {
|
||||
regKeyRaw, _ := hex.DecodeString(validU2FRegistrationResponseHex)
|
||||
regs := U2FRegistrationList{
|
||||
&U2FRegistration{ID: 1, Name: "U2F Key", UserID: 1, Counter: 0, Raw: regKeyRaw, CreatedUnix: 946684800, UpdatedUnix: 946684800},
|
||||
&U2FRegistration{ID: 2, Name: "U2F Key", UserID: 2, Counter: 0, Raw: regKeyRaw, CreatedUnix: 946684800, UpdatedUnix: 946684800},
|
||||
}
|
||||
|
||||
actual := regs.ToRegistrations()
|
||||
assert.Len(t, actual, 2)
|
||||
}
|
||||
|
||||
@@ -87,6 +87,26 @@ func MainTest(m *testing.M, pathToGiteaRoot string) {
|
||||
fatalTestError("util.CopyDir: %v\n", err)
|
||||
}
|
||||
|
||||
ownerDirs, err := os.ReadDir(setting.RepoRootPath)
|
||||
if err != nil {
|
||||
fatalTestError("unable to read the new repo root: %v\n", err)
|
||||
}
|
||||
for _, ownerDir := range ownerDirs {
|
||||
if !ownerDir.Type().IsDir() {
|
||||
continue
|
||||
}
|
||||
repoDirs, err := os.ReadDir(filepath.Join(setting.RepoRootPath, ownerDir.Name()))
|
||||
if err != nil {
|
||||
fatalTestError("unable to read the new repo root: %v\n", err)
|
||||
}
|
||||
for _, repoDir := range repoDirs {
|
||||
_ = os.MkdirAll(filepath.Join(setting.RepoRootPath, ownerDir.Name(), repoDir.Name(), "objects", "pack"), 0755)
|
||||
_ = os.MkdirAll(filepath.Join(setting.RepoRootPath, ownerDir.Name(), repoDir.Name(), "objects", "info"), 0755)
|
||||
_ = os.MkdirAll(filepath.Join(setting.RepoRootPath, ownerDir.Name(), repoDir.Name(), "refs", "heads"), 0755)
|
||||
_ = os.MkdirAll(filepath.Join(setting.RepoRootPath, ownerDir.Name(), repoDir.Name(), "refs", "tag"), 0755)
|
||||
}
|
||||
}
|
||||
|
||||
exitStatus := m.Run()
|
||||
if err = util.RemoveAll(setting.RepoRootPath); err != nil {
|
||||
fatalTestError("util.RemoveAll: %v\n", err)
|
||||
@@ -128,6 +148,23 @@ func PrepareTestEnv(t testing.TB) {
|
||||
assert.NoError(t, util.RemoveAll(setting.RepoRootPath))
|
||||
metaPath := filepath.Join(giteaRoot, "integrations", "gitea-repositories-meta")
|
||||
assert.NoError(t, util.CopyDir(metaPath, setting.RepoRootPath))
|
||||
|
||||
ownerDirs, err := os.ReadDir(setting.RepoRootPath)
|
||||
assert.NoError(t, err)
|
||||
for _, ownerDir := range ownerDirs {
|
||||
if !ownerDir.Type().IsDir() {
|
||||
continue
|
||||
}
|
||||
repoDirs, err := os.ReadDir(filepath.Join(setting.RepoRootPath, ownerDir.Name()))
|
||||
assert.NoError(t, err)
|
||||
for _, repoDir := range repoDirs {
|
||||
_ = os.MkdirAll(filepath.Join(setting.RepoRootPath, ownerDir.Name(), repoDir.Name(), "objects", "pack"), 0755)
|
||||
_ = os.MkdirAll(filepath.Join(setting.RepoRootPath, ownerDir.Name(), repoDir.Name(), "objects", "info"), 0755)
|
||||
_ = os.MkdirAll(filepath.Join(setting.RepoRootPath, ownerDir.Name(), repoDir.Name(), "refs", "heads"), 0755)
|
||||
_ = os.MkdirAll(filepath.Join(setting.RepoRootPath, ownerDir.Name(), repoDir.Name(), "refs", "tag"), 0755)
|
||||
}
|
||||
}
|
||||
|
||||
base.SetupGiteaRoot() // Makes sure GITEA_ROOT is set
|
||||
}
|
||||
|
||||
|
||||
@@ -77,9 +77,6 @@ var (
|
||||
// ErrEmailNotActivated e-mail address has not been activated error
|
||||
ErrEmailNotActivated = errors.New("E-mail address has not been activated")
|
||||
|
||||
// ErrUserNameIllegal user name contains illegal characters error
|
||||
ErrUserNameIllegal = errors.New("User name contains illegal characters")
|
||||
|
||||
// ErrLoginSourceNotActived login source is not actived error
|
||||
ErrLoginSourceNotActived = errors.New("Login source is not actived")
|
||||
|
||||
@@ -296,7 +293,7 @@ func (u *User) CanImportLocal() bool {
|
||||
// DashboardLink returns the user dashboard page link.
|
||||
func (u *User) DashboardLink() string {
|
||||
if u.IsOrganization() {
|
||||
return u.OrganisationLink() + "/dashboard/"
|
||||
return u.OrganisationLink() + "/dashboard"
|
||||
}
|
||||
return setting.AppSubURL + "/"
|
||||
}
|
||||
@@ -1062,9 +1059,9 @@ func checkDupEmail(e Engine, u *User) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// validateUser check if user is valide to insert / update into database
|
||||
// validateUser check if user is valid to insert / update into database
|
||||
func validateUser(u *User) error {
|
||||
if !setting.Service.AllowedUserVisibilityModesSlice.IsAllowedVisibility(u.Visibility) {
|
||||
if !setting.Service.AllowedUserVisibilityModesSlice.IsAllowedVisibility(u.Visibility) && !u.IsOrganization() {
|
||||
return fmt.Errorf("visibility Mode not allowed: %s", u.Visibility.String())
|
||||
}
|
||||
|
||||
@@ -1072,18 +1069,46 @@ func validateUser(u *User) error {
|
||||
return ValidateEmail(u.Email)
|
||||
}
|
||||
|
||||
func updateUser(e Engine, u *User) error {
|
||||
func updateUser(e Engine, u *User, changePrimaryEmail bool) error {
|
||||
if err := validateUser(u); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if changePrimaryEmail {
|
||||
var emailAddress EmailAddress
|
||||
has, err := e.Where("lower_email=?", strings.ToLower(u.Email)).Get(&emailAddress)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if !has {
|
||||
// 1. Update old primary email
|
||||
if _, err = e.Where("uid=? AND is_primary=?", u.ID, true).Cols("is_primary").Update(&EmailAddress{
|
||||
IsPrimary: false,
|
||||
}); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
emailAddress.Email = u.Email
|
||||
emailAddress.UID = u.ID
|
||||
emailAddress.IsActivated = true
|
||||
emailAddress.IsPrimary = true
|
||||
if _, err := e.Insert(&emailAddress); err != nil {
|
||||
return err
|
||||
}
|
||||
} else if _, err := e.ID(emailAddress.ID).Cols("is_primary").Update(&EmailAddress{
|
||||
IsPrimary: true,
|
||||
}); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
_, err := e.ID(u.ID).AllCols().Update(u)
|
||||
return err
|
||||
}
|
||||
|
||||
// UpdateUser updates user's information.
|
||||
func UpdateUser(u *User) error {
|
||||
return updateUser(x, u)
|
||||
func UpdateUser(u *User, changePrimaryEmail bool) error {
|
||||
return updateUser(x, u, changePrimaryEmail)
|
||||
}
|
||||
|
||||
// UpdateUserCols update user according special columns
|
||||
@@ -1112,7 +1137,7 @@ func UpdateUserSetting(u *User) (err error) {
|
||||
return err
|
||||
}
|
||||
}
|
||||
if err = updateUser(sess, u); err != nil {
|
||||
if err = updateUser(sess, u, false); err != nil {
|
||||
return err
|
||||
}
|
||||
return sess.Commit()
|
||||
|
||||
@@ -7,6 +7,9 @@ package models
|
||||
import (
|
||||
"fmt"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"code.gitea.io/gitea/modules/timeutil"
|
||||
|
||||
jsoniter "github.com/json-iterator/go"
|
||||
"github.com/stretchr/testify/assert"
|
||||
@@ -37,6 +40,10 @@ func TestGetUserHeatmapDataByUser(t *testing.T) {
|
||||
// Prepare
|
||||
assert.NoError(t, PrepareTestDatabase())
|
||||
|
||||
// Mock time
|
||||
timeutil.Set(time.Date(2021, 1, 1, 0, 0, 0, 0, time.UTC))
|
||||
defer timeutil.Unset()
|
||||
|
||||
for i, tc := range testCases {
|
||||
user := AssertExistsAndLoadBean(t, &User{ID: tc.userID}).(*User)
|
||||
|
||||
|
||||
@@ -475,17 +475,17 @@ func TestUpdateUser(t *testing.T) {
|
||||
user := AssertExistsAndLoadBean(t, &User{ID: 2}).(*User)
|
||||
|
||||
user.KeepActivityPrivate = true
|
||||
assert.NoError(t, UpdateUser(user))
|
||||
assert.NoError(t, UpdateUser(user, false))
|
||||
user = AssertExistsAndLoadBean(t, &User{ID: 2}).(*User)
|
||||
assert.True(t, user.KeepActivityPrivate)
|
||||
|
||||
setting.Service.AllowedUserVisibilityModesSlice = []bool{true, false, false}
|
||||
user.KeepActivityPrivate = false
|
||||
user.Visibility = structs.VisibleTypePrivate
|
||||
assert.Error(t, UpdateUser(user))
|
||||
assert.Error(t, UpdateUser(user, false))
|
||||
user = AssertExistsAndLoadBean(t, &User{ID: 2}).(*User)
|
||||
assert.True(t, user.KeepActivityPrivate)
|
||||
|
||||
user.Email = "no mail@mail.org"
|
||||
assert.Error(t, UpdateUser(user))
|
||||
assert.Error(t, UpdateUser(user, true))
|
||||
}
|
||||
|
||||
@@ -1,3 +1,4 @@
|
||||
//go:build pam
|
||||
// +build pam
|
||||
|
||||
// Copyright 2014 The Gogs Authors. All rights reserved.
|
||||
|
||||
@@ -1,9 +1,10 @@
|
||||
// +build !pam
|
||||
|
||||
// Copyright 2014 The Gogs Authors. All rights reserved.
|
||||
// Use of this source code is governed by a MIT-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
//go:build !pam
|
||||
// +build !pam
|
||||
|
||||
package pam
|
||||
|
||||
import (
|
||||
|
||||
@@ -1,3 +1,4 @@
|
||||
//go:build pam
|
||||
// +build pam
|
||||
|
||||
// Copyright 2021 The Gitea Authors. All rights reserved.
|
||||
|
||||
@@ -14,6 +14,7 @@ import (
|
||||
|
||||
"code.gitea.io/gitea/modules/log"
|
||||
"code.gitea.io/gitea/modules/setting"
|
||||
"code.gitea.io/gitea/modules/util"
|
||||
|
||||
"github.com/gogs/chardet"
|
||||
"golang.org/x/net/html/charset"
|
||||
@@ -26,9 +27,9 @@ var UTF8BOM = []byte{'\xef', '\xbb', '\xbf'}
|
||||
// ToUTF8WithFallbackReader detects the encoding of content and coverts to UTF-8 reader if possible
|
||||
func ToUTF8WithFallbackReader(rd io.Reader) io.Reader {
|
||||
var buf = make([]byte, 2048)
|
||||
n, err := rd.Read(buf)
|
||||
n, err := util.ReadAtMost(rd, buf)
|
||||
if err != nil {
|
||||
return rd
|
||||
return io.MultiReader(bytes.NewReader(RemoveBOMIfPresent(buf[:n])), rd)
|
||||
}
|
||||
|
||||
charsetLabel, err := DetectEncoding(buf[:n])
|
||||
|
||||
@@ -223,6 +223,9 @@ func APIAuth(authMethod auth.Auth) func(*APIContext) {
|
||||
// Get user from session if logged in.
|
||||
ctx.User = authMethod.Verify(ctx.Req, ctx.Resp, ctx, ctx.Session)
|
||||
if ctx.User != nil {
|
||||
if ctx.Locale.Language() != ctx.User.Language {
|
||||
ctx.Locale = middleware.Locale(ctx.Resp, ctx.Req)
|
||||
}
|
||||
ctx.IsBasicAuth = ctx.Data["AuthedMethod"].(string) == new(auth.Basic).Name()
|
||||
ctx.IsSigned = true
|
||||
ctx.Data["IsSigned"] = ctx.IsSigned
|
||||
|
||||
@@ -587,6 +587,17 @@ func GetContext(req *http.Request) *Context {
|
||||
return req.Context().Value(contextKey).(*Context)
|
||||
}
|
||||
|
||||
// GetContextUser returns context user
|
||||
func GetContextUser(req *http.Request) *models.User {
|
||||
if apiContext, ok := req.Context().Value(apiContextKey).(*APIContext); ok {
|
||||
return apiContext.User
|
||||
}
|
||||
if ctx, ok := req.Context().Value(contextKey).(*Context); ok {
|
||||
return ctx.User
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// SignedUserName returns signed user's name via context
|
||||
func SignedUserName(req *http.Request) string {
|
||||
if middleware.IsInternalPath(req) {
|
||||
@@ -631,6 +642,9 @@ func Auth(authMethod auth.Auth) func(*Context) {
|
||||
return func(ctx *Context) {
|
||||
ctx.User = authMethod.Verify(ctx.Req, ctx.Resp, ctx, ctx.Session)
|
||||
if ctx.User != nil {
|
||||
if ctx.Locale.Language() != ctx.User.Language {
|
||||
ctx.Locale = middleware.Locale(ctx.Resp, ctx.Req)
|
||||
}
|
||||
ctx.IsBasicAuth = ctx.Data["AuthedMethod"].(string) == new(auth.Basic).Name()
|
||||
ctx.IsSigned = true
|
||||
ctx.Data["IsSigned"] = ctx.IsSigned
|
||||
@@ -658,6 +672,7 @@ func Contexter() func(next http.Handler) http.Handler {
|
||||
var locale = middleware.Locale(resp, req)
|
||||
var startTime = time.Now()
|
||||
var link = setting.AppSubURL + strings.TrimSuffix(req.URL.EscapedPath(), "/")
|
||||
|
||||
var ctx = Context{
|
||||
Resp: NewResponse(resp),
|
||||
Cache: mc.GetCache(),
|
||||
|
||||
@@ -58,6 +58,7 @@ type Repository struct {
|
||||
Commit *git.Commit
|
||||
Tag *git.Tag
|
||||
GitRepo *git.Repository
|
||||
RefName string
|
||||
BranchName string
|
||||
TagName string
|
||||
TreePath string
|
||||
@@ -190,9 +191,9 @@ func (r *Repository) BranchNameSubURL() string {
|
||||
case r.IsViewBranch:
|
||||
return "branch/" + r.BranchName
|
||||
case r.IsViewTag:
|
||||
return "tag/" + r.BranchName
|
||||
return "tag/" + r.TagName
|
||||
case r.IsViewCommit:
|
||||
return "commit/" + r.BranchName
|
||||
return "commit/" + r.CommitID
|
||||
}
|
||||
log.Error("Unknown view type for repo: %v", r)
|
||||
return ""
|
||||
@@ -345,7 +346,7 @@ func repoAssignment(ctx *Context, repo *models.Repository) {
|
||||
}
|
||||
|
||||
// Check access.
|
||||
if ctx.Repo.Permission.AccessMode == models.AccessModeNone {
|
||||
if !ctx.Repo.Permission.HasAccess() {
|
||||
if ctx.Query("go-get") == "1" {
|
||||
EarlyResponseForGoGetMeta(ctx)
|
||||
return
|
||||
@@ -562,8 +563,6 @@ func RepoAssignment(ctx *Context) (cancel context.CancelFunc) {
|
||||
ctx.Data["Branches"] = brs
|
||||
ctx.Data["BranchesCount"] = len(brs)
|
||||
|
||||
ctx.Data["TagName"] = ctx.Repo.TagName
|
||||
|
||||
// If not branch selected, try default one.
|
||||
// If default branch doesn't exists, fall back to some other branch.
|
||||
if len(ctx.Repo.BranchName) == 0 {
|
||||
@@ -572,9 +571,9 @@ func RepoAssignment(ctx *Context) (cancel context.CancelFunc) {
|
||||
} else if len(brs) > 0 {
|
||||
ctx.Repo.BranchName = brs[0]
|
||||
}
|
||||
ctx.Repo.RefName = ctx.Repo.BranchName
|
||||
}
|
||||
ctx.Data["BranchName"] = ctx.Repo.BranchName
|
||||
ctx.Data["CommitID"] = ctx.Repo.CommitID
|
||||
|
||||
// People who have push access or have forked repository can propose a new pull request.
|
||||
canPush := ctx.Repo.CanWrite(models.UnitTypeCode) || (ctx.IsSigned && ctx.User.HasForkedRepo(ctx.Repo.Repository.ID))
|
||||
@@ -695,7 +694,7 @@ func getRefName(ctx *Context, pathType RepoRefType) string {
|
||||
}
|
||||
// For legacy and API support only full commit sha
|
||||
parts := strings.Split(path, "/")
|
||||
if len(parts) > 0 && len(parts[0]) == 40 {
|
||||
if len(parts) > 1 && len(parts[0]) == 40 {
|
||||
ctx.Repo.TreePath = strings.Join(parts[1:], "/")
|
||||
return parts[0]
|
||||
}
|
||||
@@ -759,7 +758,6 @@ func RepoRefByType(refType RepoRefType, ignoreNotExistErr ...bool) func(*Context
|
||||
// Get default branch.
|
||||
if len(ctx.Params("*")) == 0 {
|
||||
refName = ctx.Repo.Repository.DefaultBranch
|
||||
ctx.Repo.BranchName = refName
|
||||
if !ctx.Repo.GitRepo.IsBranchExist(refName) {
|
||||
brs, _, err := ctx.Repo.GitRepo.GetBranches(0, 0)
|
||||
if err != nil {
|
||||
@@ -773,6 +771,8 @@ func RepoRefByType(refType RepoRefType, ignoreNotExistErr ...bool) func(*Context
|
||||
}
|
||||
refName = brs[0]
|
||||
}
|
||||
ctx.Repo.RefName = refName
|
||||
ctx.Repo.BranchName = refName
|
||||
ctx.Repo.Commit, err = ctx.Repo.GitRepo.GetBranchCommit(refName)
|
||||
if err != nil {
|
||||
ctx.ServerError("GetBranchCommit", err)
|
||||
@@ -783,9 +783,10 @@ func RepoRefByType(refType RepoRefType, ignoreNotExistErr ...bool) func(*Context
|
||||
|
||||
} else {
|
||||
refName = getRefName(ctx, refType)
|
||||
ctx.Repo.BranchName = refName
|
||||
ctx.Repo.RefName = refName
|
||||
if refType.RefTypeIncludesBranches() && ctx.Repo.GitRepo.IsBranchExist(refName) {
|
||||
ctx.Repo.IsViewBranch = true
|
||||
ctx.Repo.BranchName = refName
|
||||
|
||||
ctx.Repo.Commit, err = ctx.Repo.GitRepo.GetBranchCommit(refName)
|
||||
if err != nil {
|
||||
@@ -796,6 +797,8 @@ func RepoRefByType(refType RepoRefType, ignoreNotExistErr ...bool) func(*Context
|
||||
|
||||
} else if refType.RefTypeIncludesTags() && ctx.Repo.GitRepo.IsTagExist(refName) {
|
||||
ctx.Repo.IsViewTag = true
|
||||
ctx.Repo.TagName = refName
|
||||
|
||||
ctx.Repo.Commit, err = ctx.Repo.GitRepo.GetTagCommit(refName)
|
||||
if err != nil {
|
||||
ctx.ServerError("GetTagCommit", err)
|
||||
@@ -830,13 +833,14 @@ func RepoRefByType(refType RepoRefType, ignoreNotExistErr ...bool) func(*Context
|
||||
setting.AppSubURL,
|
||||
strings.TrimSuffix(ctx.Req.URL.Path, ctx.Params("*")),
|
||||
ctx.Repo.BranchNameSubURL(),
|
||||
ctx.Repo.TreePath))
|
||||
util.PathEscapeSegments(ctx.Repo.TreePath)))
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
ctx.Data["BranchName"] = ctx.Repo.BranchName
|
||||
ctx.Data["BranchNameSubURL"] = ctx.Repo.BranchNameSubURL()
|
||||
ctx.Data["TagName"] = ctx.Repo.TagName
|
||||
ctx.Data["CommitID"] = ctx.Repo.CommitID
|
||||
ctx.Data["TreePath"] = ctx.Repo.TreePath
|
||||
ctx.Data["IsViewBranch"] = ctx.Repo.IsViewBranch
|
||||
|
||||
@@ -147,8 +147,9 @@ func ToCommit(repo *models.Repository, commit *git.Commit, userCache map[string]
|
||||
|
||||
return &api.Commit{
|
||||
CommitMeta: &api.CommitMeta{
|
||||
URL: repo.APIURL() + "/git/commits/" + commit.ID.String(),
|
||||
SHA: commit.ID.String(),
|
||||
URL: repo.APIURL() + "/git/commits/" + commit.ID.String(),
|
||||
SHA: commit.ID.String(),
|
||||
Created: commit.Committer.When,
|
||||
},
|
||||
HTMLURL: repo.HTMLURL() + "/commit/" + commit.ID.String(),
|
||||
RepoCommit: &api.RepoCommit{
|
||||
@@ -169,8 +170,9 @@ func ToCommit(repo *models.Repository, commit *git.Commit, userCache map[string]
|
||||
},
|
||||
Message: commit.Message(),
|
||||
Tree: &api.CommitMeta{
|
||||
URL: repo.APIURL() + "/git/trees/" + commit.ID.String(),
|
||||
SHA: commit.ID.String(),
|
||||
URL: repo.APIURL() + "/git/trees/" + commit.ID.String(),
|
||||
SHA: commit.ID.String(),
|
||||
Created: commit.Committer.When,
|
||||
},
|
||||
},
|
||||
Author: apiAuthor,
|
||||
|
||||
@@ -17,7 +17,7 @@ import (
|
||||
// ToAPIPullRequest assumes following fields have been assigned with valid values:
|
||||
// Required - Issue
|
||||
// Optional - Merger
|
||||
func ToAPIPullRequest(pr *models.PullRequest) *api.PullRequest {
|
||||
func ToAPIPullRequest(pr *models.PullRequest, doer *models.User) *api.PullRequest {
|
||||
var (
|
||||
baseBranch *git.Branch
|
||||
headBranch *git.Branch
|
||||
@@ -41,6 +41,12 @@ func ToAPIPullRequest(pr *models.PullRequest) *api.PullRequest {
|
||||
return nil
|
||||
}
|
||||
|
||||
perm, err := models.GetUserRepoPermission(pr.BaseRepo, doer)
|
||||
if err != nil {
|
||||
log.Error("GetUserRepoPermission[%d]: %v", pr.BaseRepoID, err)
|
||||
perm.AccessMode = models.AccessModeNone
|
||||
}
|
||||
|
||||
apiPullRequest := &api.PullRequest{
|
||||
ID: pr.ID,
|
||||
URL: pr.Issue.HTMLURL(),
|
||||
@@ -68,7 +74,7 @@ func ToAPIPullRequest(pr *models.PullRequest) *api.PullRequest {
|
||||
Name: pr.BaseBranch,
|
||||
Ref: pr.BaseBranch,
|
||||
RepoID: pr.BaseRepoID,
|
||||
Repository: ToRepo(pr.BaseRepo, models.AccessModeNone),
|
||||
Repository: ToRepo(pr.BaseRepo, perm.AccessMode),
|
||||
},
|
||||
Head: &api.PRBranchInfo{
|
||||
Name: pr.HeadBranch,
|
||||
@@ -96,8 +102,14 @@ func ToAPIPullRequest(pr *models.PullRequest) *api.PullRequest {
|
||||
}
|
||||
|
||||
if pr.HeadRepo != nil {
|
||||
perm, err := models.GetUserRepoPermission(pr.HeadRepo, doer)
|
||||
if err != nil {
|
||||
log.Error("GetUserRepoPermission[%d]: %v", pr.HeadRepoID, err)
|
||||
perm.AccessMode = models.AccessModeNone
|
||||
}
|
||||
|
||||
apiPullRequest.Head.RepoID = pr.HeadRepo.ID
|
||||
apiPullRequest.Head.Repository = ToRepo(pr.HeadRepo, models.AccessModeNone)
|
||||
apiPullRequest.Head.Repository = ToRepo(pr.HeadRepo, perm.AccessMode)
|
||||
|
||||
headGitRepo, err := git.OpenRepository(pr.HeadRepo.RepoPath())
|
||||
if err != nil {
|
||||
|
||||
@@ -20,14 +20,14 @@ func TestPullRequest_APIFormat(t *testing.T) {
|
||||
pr := models.AssertExistsAndLoadBean(t, &models.PullRequest{ID: 1}).(*models.PullRequest)
|
||||
assert.NoError(t, pr.LoadAttributes())
|
||||
assert.NoError(t, pr.LoadIssue())
|
||||
apiPullRequest := ToAPIPullRequest(pr)
|
||||
apiPullRequest := ToAPIPullRequest(pr, nil)
|
||||
assert.NotNil(t, apiPullRequest)
|
||||
assert.EqualValues(t, &structs.PRBranchInfo{
|
||||
Name: "branch1",
|
||||
Ref: "refs/pull/2/head",
|
||||
Sha: "4a357436d925b5c974181ff12a994538ddc5a269",
|
||||
RepoID: 1,
|
||||
Repository: ToRepo(headRepo, models.AccessModeNone),
|
||||
Repository: ToRepo(headRepo, models.AccessModeRead),
|
||||
}, apiPullRequest.Head)
|
||||
|
||||
//withOut HeadRepo
|
||||
@@ -37,7 +37,7 @@ func TestPullRequest_APIFormat(t *testing.T) {
|
||||
// simulate fork deletion
|
||||
pr.HeadRepo = nil
|
||||
pr.HeadRepoID = 100000
|
||||
apiPullRequest = ToAPIPullRequest(pr)
|
||||
apiPullRequest = ToAPIPullRequest(pr, nil)
|
||||
assert.NotNil(t, apiPullRequest)
|
||||
assert.Nil(t, apiPullRequest.Head.Repository)
|
||||
assert.EqualValues(t, -1, apiPullRequest.Head.RepoID)
|
||||
|
||||
@@ -28,32 +28,24 @@ func CreateReader(input io.Reader, delimiter rune) *stdcsv.Reader {
|
||||
}
|
||||
|
||||
// CreateReaderAndGuessDelimiter tries to guess the field delimiter from the content and creates a csv.Reader.
|
||||
// Reads at most 10k bytes.
|
||||
func CreateReaderAndGuessDelimiter(rd io.Reader) (*stdcsv.Reader, error) {
|
||||
var data = make([]byte, 1e4)
|
||||
size, err := rd.Read(data)
|
||||
size, err := util.ReadAtMost(rd, data)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
delimiter := guessDelimiter(data[:size])
|
||||
|
||||
var newInput io.Reader
|
||||
if size < 1e4 {
|
||||
newInput = bytes.NewReader(data[:size])
|
||||
} else {
|
||||
newInput = io.MultiReader(bytes.NewReader(data), rd)
|
||||
}
|
||||
|
||||
return CreateReader(newInput, delimiter), nil
|
||||
return CreateReader(
|
||||
io.MultiReader(bytes.NewReader(data[:size]), rd),
|
||||
guessDelimiter(data[:size]),
|
||||
), nil
|
||||
}
|
||||
|
||||
// guessDelimiter scores the input CSV data against delimiters, and returns the best match.
|
||||
// Reads at most 10k bytes & 10 lines.
|
||||
func guessDelimiter(data []byte) rune {
|
||||
maxLines := 10
|
||||
maxBytes := util.Min(len(data), 1e4)
|
||||
text := string(data[:maxBytes])
|
||||
text = quoteRegexp.ReplaceAllLiteralString(text, "")
|
||||
text := quoteRegexp.ReplaceAllLiteralString(string(data), "")
|
||||
lines := strings.SplitN(text, "\n", maxLines+1)
|
||||
lines = lines[:util.Min(maxLines, len(lines))]
|
||||
|
||||
|
||||
@@ -13,6 +13,64 @@ import (
|
||||
"code.gitea.io/gitea/modules/setting"
|
||||
)
|
||||
|
||||
type consistencyCheck struct {
|
||||
Name string
|
||||
Counter func() (int64, error)
|
||||
Fixer func() (int64, error)
|
||||
FixedMessage string
|
||||
}
|
||||
|
||||
func (c *consistencyCheck) Run(logger log.Logger, autofix bool) error {
|
||||
count, err := c.Counter()
|
||||
if err != nil {
|
||||
logger.Critical("Error: %v whilst counting %s", err, c.Name)
|
||||
return err
|
||||
}
|
||||
if count > 0 {
|
||||
if autofix {
|
||||
var fixed int64
|
||||
if fixed, err = c.Fixer(); err != nil {
|
||||
logger.Critical("Error: %v whilst fixing %s", err, c.Name)
|
||||
return err
|
||||
}
|
||||
|
||||
prompt := "Deleted"
|
||||
if c.FixedMessage != "" {
|
||||
prompt = c.FixedMessage
|
||||
}
|
||||
|
||||
if fixed < 0 {
|
||||
logger.Info(prompt+" %d %s", count, c.Name)
|
||||
} else {
|
||||
logger.Info(prompt+" %d/%d %s", fixed, count, c.Name)
|
||||
}
|
||||
} else {
|
||||
logger.Warn("Found %d %s", count, c.Name)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func asFixer(fn func() error) func() (int64, error) {
|
||||
return func() (int64, error) {
|
||||
err := fn()
|
||||
return -1, err
|
||||
}
|
||||
}
|
||||
|
||||
func genericOrphanCheck(name, subject, refobject, joincond string) consistencyCheck {
|
||||
return consistencyCheck{
|
||||
Name: name,
|
||||
Counter: func() (int64, error) {
|
||||
return models.CountOrphanedObjects(subject, refobject, joincond)
|
||||
},
|
||||
Fixer: func() (int64, error) {
|
||||
err := models.DeleteOrphanedObjects(subject, refobject, joincond)
|
||||
return -1, err
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func checkDBConsistency(logger log.Logger, autofix bool) error {
|
||||
// make sure DB version is uptodate
|
||||
if err := models.NewEngine(context.Background(), migrations.EnsureUpToDate); err != nil {
|
||||
@@ -20,246 +78,103 @@ func checkDBConsistency(logger log.Logger, autofix bool) error {
|
||||
return err
|
||||
}
|
||||
|
||||
// find labels without existing repo or org
|
||||
count, err := models.CountOrphanedLabels()
|
||||
if err != nil {
|
||||
logger.Critical("Error: %v whilst counting orphaned labels", err)
|
||||
return err
|
||||
}
|
||||
if count > 0 {
|
||||
if autofix {
|
||||
if err = models.DeleteOrphanedLabels(); err != nil {
|
||||
logger.Critical("Error: %v whilst deleting orphaned labels", err)
|
||||
return err
|
||||
}
|
||||
logger.Info("%d labels without existing repository/organisation deleted", count)
|
||||
} else {
|
||||
logger.Warn("%d labels without existing repository/organisation", count)
|
||||
}
|
||||
}
|
||||
|
||||
// find IssueLabels without existing label
|
||||
count, err = models.CountOrphanedIssueLabels()
|
||||
if err != nil {
|
||||
logger.Critical("Error: %v whilst counting orphaned issue_labels", err)
|
||||
return err
|
||||
}
|
||||
if count > 0 {
|
||||
if autofix {
|
||||
if err = models.DeleteOrphanedIssueLabels(); err != nil {
|
||||
logger.Critical("Error: %v whilst deleting orphaned issue_labels", err)
|
||||
return err
|
||||
}
|
||||
logger.Info("%d issue_labels without existing label deleted", count)
|
||||
} else {
|
||||
logger.Warn("%d issue_labels without existing label", count)
|
||||
}
|
||||
}
|
||||
|
||||
// find issues without existing repository
|
||||
count, err = models.CountOrphanedIssues()
|
||||
if err != nil {
|
||||
logger.Critical("Error: %v whilst counting orphaned issues", err)
|
||||
return err
|
||||
}
|
||||
if count > 0 {
|
||||
if autofix {
|
||||
if err = models.DeleteOrphanedIssues(); err != nil {
|
||||
logger.Critical("Error: %v whilst deleting orphaned issues", err)
|
||||
return err
|
||||
}
|
||||
logger.Info("%d issues without existing repository deleted", count)
|
||||
} else {
|
||||
logger.Warn("%d issues without existing repository", count)
|
||||
}
|
||||
}
|
||||
|
||||
// find pulls without existing issues
|
||||
count, err = models.CountOrphanedObjects("pull_request", "issue", "pull_request.issue_id=issue.id")
|
||||
if err != nil {
|
||||
logger.Critical("Error: %v whilst counting orphaned objects", err)
|
||||
return err
|
||||
}
|
||||
if count > 0 {
|
||||
if autofix {
|
||||
if err = models.DeleteOrphanedObjects("pull_request", "issue", "pull_request.issue_id=issue.id"); err != nil {
|
||||
logger.Critical("Error: %v whilst deleting orphaned objects", err)
|
||||
return err
|
||||
}
|
||||
logger.Info("%d pull requests without existing issue deleted", count)
|
||||
} else {
|
||||
logger.Warn("%d pull requests without existing issue", count)
|
||||
}
|
||||
}
|
||||
|
||||
// find tracked times without existing issues/pulls
|
||||
count, err = models.CountOrphanedObjects("tracked_time", "issue", "tracked_time.issue_id=issue.id")
|
||||
if err != nil {
|
||||
logger.Critical("Error: %v whilst counting orphaned objects", err)
|
||||
return err
|
||||
}
|
||||
if count > 0 {
|
||||
if autofix {
|
||||
if err = models.DeleteOrphanedObjects("tracked_time", "issue", "tracked_time.issue_id=issue.id"); err != nil {
|
||||
logger.Critical("Error: %v whilst deleting orphaned objects", err)
|
||||
return err
|
||||
}
|
||||
logger.Info("%d tracked times without existing issue deleted", count)
|
||||
} else {
|
||||
logger.Warn("%d tracked times without existing issue", count)
|
||||
}
|
||||
}
|
||||
|
||||
// find null archived repositories
|
||||
count, err = models.CountNullArchivedRepository()
|
||||
if err != nil {
|
||||
logger.Critical("Error: %v whilst counting null archived repositories", err)
|
||||
return err
|
||||
}
|
||||
if count > 0 {
|
||||
if autofix {
|
||||
updatedCount, err := models.FixNullArchivedRepository()
|
||||
if err != nil {
|
||||
logger.Critical("Error: %v whilst fixing null archived repositories", err)
|
||||
return err
|
||||
}
|
||||
logger.Info("%d repositories with null is_archived updated", updatedCount)
|
||||
} else {
|
||||
logger.Warn("%d repositories with null is_archived", count)
|
||||
}
|
||||
}
|
||||
|
||||
// find label comments with empty labels
|
||||
count, err = models.CountCommentTypeLabelWithEmptyLabel()
|
||||
if err != nil {
|
||||
logger.Critical("Error: %v whilst counting label comments with empty labels", err)
|
||||
return err
|
||||
}
|
||||
if count > 0 {
|
||||
if autofix {
|
||||
updatedCount, err := models.FixCommentTypeLabelWithEmptyLabel()
|
||||
if err != nil {
|
||||
logger.Critical("Error: %v whilst removing label comments with empty labels", err)
|
||||
return err
|
||||
}
|
||||
logger.Info("%d label comments with empty labels removed", updatedCount)
|
||||
} else {
|
||||
logger.Warn("%d label comments with empty labels", count)
|
||||
}
|
||||
}
|
||||
|
||||
// find label comments with labels from outside the repository
|
||||
count, err = models.CountCommentTypeLabelWithOutsideLabels()
|
||||
if err != nil {
|
||||
logger.Critical("Error: %v whilst counting label comments with outside labels", err)
|
||||
return err
|
||||
}
|
||||
if count > 0 {
|
||||
if autofix {
|
||||
updatedCount, err := models.FixCommentTypeLabelWithOutsideLabels()
|
||||
if err != nil {
|
||||
logger.Critical("Error: %v whilst removing label comments with outside labels", err)
|
||||
return err
|
||||
}
|
||||
log.Info("%d label comments with outside labels removed", updatedCount)
|
||||
} else {
|
||||
log.Warn("%d label comments with outside labels", count)
|
||||
}
|
||||
}
|
||||
|
||||
// find issue_label with labels from outside the repository
|
||||
count, err = models.CountIssueLabelWithOutsideLabels()
|
||||
if err != nil {
|
||||
logger.Critical("Error: %v whilst counting issue_labels from outside the repository or organisation", err)
|
||||
return err
|
||||
}
|
||||
if count > 0 {
|
||||
if autofix {
|
||||
updatedCount, err := models.FixIssueLabelWithOutsideLabels()
|
||||
if err != nil {
|
||||
logger.Critical("Error: %v whilst removing issue_labels from outside the repository or organisation", err)
|
||||
return err
|
||||
}
|
||||
logger.Info("%d issue_labels from outside the repository or organisation removed", updatedCount)
|
||||
} else {
|
||||
logger.Warn("%d issue_labels from outside the repository or organisation", count)
|
||||
}
|
||||
consistencyChecks := []consistencyCheck{
|
||||
{
|
||||
// find labels without existing repo or org
|
||||
Name: "Orphaned Labels without existing repository or organisation",
|
||||
Counter: models.CountOrphanedLabels,
|
||||
Fixer: asFixer(models.DeleteOrphanedLabels),
|
||||
},
|
||||
{
|
||||
// find IssueLabels without existing label
|
||||
Name: "Orphaned Issue Labels without existing label",
|
||||
Counter: models.CountOrphanedIssueLabels,
|
||||
Fixer: asFixer(models.DeleteOrphanedIssueLabels),
|
||||
},
|
||||
{
|
||||
// find issues without existing repository
|
||||
Name: "Orphaned Issues without existing repository",
|
||||
Counter: models.CountOrphanedIssues,
|
||||
Fixer: asFixer(models.DeleteOrphanedIssues),
|
||||
},
|
||||
// find releases without existing repository
|
||||
genericOrphanCheck("Orphaned Releases without existing repository",
|
||||
"release", "repository", "release.repo_id=repository.id"),
|
||||
// find pulls without existing issues
|
||||
genericOrphanCheck("Orphaned PullRequests without existing issue",
|
||||
"pull_request", "issue", "pull_request.issue_id=issue.id"),
|
||||
// find tracked times without existing issues/pulls
|
||||
genericOrphanCheck("Orphaned TrackedTimes without existing issue",
|
||||
"tracked_time", "issue", "tracked_time.issue_id=issue.id"),
|
||||
// find null archived repositories
|
||||
{
|
||||
Name: "Repositories with is_archived IS NULL",
|
||||
Counter: models.CountNullArchivedRepository,
|
||||
Fixer: models.FixNullArchivedRepository,
|
||||
FixedMessage: "Fixed",
|
||||
},
|
||||
// find label comments with empty labels
|
||||
{
|
||||
Name: "Label comments with empty labels",
|
||||
Counter: models.CountCommentTypeLabelWithEmptyLabel,
|
||||
Fixer: models.FixCommentTypeLabelWithEmptyLabel,
|
||||
FixedMessage: "Fixed",
|
||||
},
|
||||
// find label comments with labels from outside the repository
|
||||
{
|
||||
Name: "Label comments with labels from outside the repository",
|
||||
Counter: models.CountCommentTypeLabelWithOutsideLabels,
|
||||
Fixer: models.FixCommentTypeLabelWithOutsideLabels,
|
||||
FixedMessage: "Removed",
|
||||
},
|
||||
// find issue_label with labels from outside the repository
|
||||
{
|
||||
Name: "IssueLabels with Labels from outside the repository",
|
||||
Counter: models.CountIssueLabelWithOutsideLabels,
|
||||
Fixer: models.FixIssueLabelWithOutsideLabels,
|
||||
FixedMessage: "Removed",
|
||||
},
|
||||
}
|
||||
|
||||
// TODO: function to recalc all counters
|
||||
|
||||
if setting.Database.UsePostgreSQL {
|
||||
count, err = models.CountBadSequences()
|
||||
if err != nil {
|
||||
logger.Critical("Error: %v whilst checking sequence values", err)
|
||||
consistencyChecks = append(consistencyChecks, consistencyCheck{
|
||||
Name: "Sequence values",
|
||||
Counter: models.CountBadSequences,
|
||||
Fixer: asFixer(models.FixBadSequences),
|
||||
FixedMessage: "Updated",
|
||||
})
|
||||
}
|
||||
|
||||
consistencyChecks = append(consistencyChecks,
|
||||
// find protected branches without existing repository
|
||||
genericOrphanCheck("Protected Branches without existing repository",
|
||||
"protected_branch", "repository", "protected_branch.repo_id=repository.id"),
|
||||
// find deleted branches without existing repository
|
||||
genericOrphanCheck("Deleted Branches without existing repository",
|
||||
"deleted_branch", "repository", "deleted_branch.repo_id=repository.id"),
|
||||
// find LFS locks without existing repository
|
||||
genericOrphanCheck("LFS locks without existing repository",
|
||||
"lfs_lock", "repository", "lfs_lock.repo_id=repository.id"),
|
||||
// find collaborations without users
|
||||
genericOrphanCheck("Collaborations without existing user",
|
||||
"collaboration", "user", "collaboration.user_id=`user`.id"),
|
||||
// find collaborations without repository
|
||||
genericOrphanCheck("Collaborations without existing repository",
|
||||
"collaboration", "repository", "collaboration.repo_id=repository.id"),
|
||||
// find access without users
|
||||
genericOrphanCheck("Access entries without existing user",
|
||||
"access", "user", "access.user_id=`user`.id"),
|
||||
// find access without repository
|
||||
genericOrphanCheck("Access entries without existing repository",
|
||||
"access", "repository", "access.repo_id=repository.id"),
|
||||
)
|
||||
|
||||
for _, c := range consistencyChecks {
|
||||
if err := c.Run(logger, autofix); err != nil {
|
||||
return err
|
||||
}
|
||||
if count > 0 {
|
||||
if autofix {
|
||||
err := models.FixBadSequences()
|
||||
if err != nil {
|
||||
logger.Critical("Error: %v whilst attempting to fix sequences", err)
|
||||
return err
|
||||
}
|
||||
logger.Info("%d sequences updated", count)
|
||||
} else {
|
||||
logger.Warn("%d sequences with incorrect values", count)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// find protected branches without existing repository
|
||||
count, err = models.CountOrphanedObjects("protected_branch", "repository", "protected_branch.repo_id=repository.id")
|
||||
if err != nil {
|
||||
logger.Critical("Error: %v whilst counting orphaned objects", err)
|
||||
return err
|
||||
}
|
||||
if count > 0 {
|
||||
if autofix {
|
||||
if err = models.DeleteOrphanedObjects("protected_branch", "repository", "protected_branch.repo_id=repository.id"); err != nil {
|
||||
logger.Critical("Error: %v whilst deleting orphaned objects", err)
|
||||
return err
|
||||
}
|
||||
logger.Info("%d protected branches without existing repository deleted", count)
|
||||
} else {
|
||||
logger.Warn("%d protected branches without existing repository", count)
|
||||
}
|
||||
}
|
||||
|
||||
// find deleted branches without existing repository
|
||||
count, err = models.CountOrphanedObjects("deleted_branch", "repository", "deleted_branch.repo_id=repository.id")
|
||||
if err != nil {
|
||||
logger.Critical("Error: %v whilst counting orphaned objects", err)
|
||||
return err
|
||||
}
|
||||
if count > 0 {
|
||||
if autofix {
|
||||
if err = models.DeleteOrphanedObjects("deleted_branch", "repository", "deleted_branch.repo_id=repository.id"); err != nil {
|
||||
logger.Critical("Error: %v whilst deleting orphaned objects", err)
|
||||
return err
|
||||
}
|
||||
logger.Info("%d deleted branches without existing repository deleted", count)
|
||||
} else {
|
||||
logger.Warn("%d deleted branches without existing repository", count)
|
||||
}
|
||||
}
|
||||
|
||||
// find LFS locks without existing repository
|
||||
count, err = models.CountOrphanedObjects("lfs_lock", "repository", "lfs_lock.repo_id=repository.id")
|
||||
if err != nil {
|
||||
logger.Critical("Error: %v whilst counting orphaned objects", err)
|
||||
return err
|
||||
}
|
||||
if count > 0 {
|
||||
if autofix {
|
||||
if err = models.DeleteOrphanedObjects("lfs_lock", "repository", "lfs_lock.repo_id=repository.id"); err != nil {
|
||||
logger.Critical("Error: %v whilst deleting orphaned objects", err)
|
||||
return err
|
||||
}
|
||||
logger.Info("%d LFS locks without existing repository deleted", count)
|
||||
} else {
|
||||
logger.Warn("%d LFS locks without existing repository", count)
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
|
||||
317
modules/doctor/fix16961.go
Normal file
317
modules/doctor/fix16961.go
Normal file
@@ -0,0 +1,317 @@
|
||||
// Copyright 2021 The Gitea Authors. All rights reserved.
|
||||
// Use of this source code is governed by a MIT-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package doctor
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
|
||||
"code.gitea.io/gitea/models"
|
||||
"code.gitea.io/gitea/modules/log"
|
||||
"code.gitea.io/gitea/modules/timeutil"
|
||||
"xorm.io/builder"
|
||||
)
|
||||
|
||||
// #16831 revealed that the dump command that was broken in 1.14.3-1.14.6 and 1.15.0 (#15885).
|
||||
// This led to repo_unit and login_source cfg not being converted to JSON in the dump
|
||||
// Unfortunately although it was hoped that there were only a few users affected it
|
||||
// appears that many users are affected.
|
||||
|
||||
// We therefore need to provide a doctor command to fix this repeated issue #16961
|
||||
|
||||
func parseBool16961(bs []byte) (bool, error) {
|
||||
if bytes.EqualFold(bs, []byte("%!s(bool=false)")) {
|
||||
return false, nil
|
||||
}
|
||||
|
||||
if bytes.EqualFold(bs, []byte("%!s(bool=true)")) {
|
||||
return true, nil
|
||||
}
|
||||
|
||||
return false, fmt.Errorf("unexpected bool format: %s", string(bs))
|
||||
}
|
||||
|
||||
func fixUnitConfig16961(bs []byte, cfg *models.UnitConfig) (fixed bool, err error) {
|
||||
err = models.JSONUnmarshalHandleDoubleEncode(bs, &cfg)
|
||||
if err == nil {
|
||||
return
|
||||
}
|
||||
|
||||
// Handle #16961
|
||||
if string(bs) != "&{}" && len(bs) != 0 {
|
||||
return
|
||||
}
|
||||
|
||||
return true, nil
|
||||
}
|
||||
|
||||
func fixExternalWikiConfig16961(bs []byte, cfg *models.ExternalWikiConfig) (fixed bool, err error) {
|
||||
err = models.JSONUnmarshalHandleDoubleEncode(bs, &cfg)
|
||||
if err == nil {
|
||||
return
|
||||
}
|
||||
|
||||
if len(bs) < 3 {
|
||||
return
|
||||
}
|
||||
if bs[0] != '&' || bs[1] != '{' || bs[len(bs)-1] != '}' {
|
||||
return
|
||||
}
|
||||
cfg.ExternalWikiURL = string(bs[2 : len(bs)-1])
|
||||
return true, nil
|
||||
}
|
||||
|
||||
func fixExternalTrackerConfig16961(bs []byte, cfg *models.ExternalTrackerConfig) (fixed bool, err error) {
|
||||
err = models.JSONUnmarshalHandleDoubleEncode(bs, &cfg)
|
||||
if err == nil {
|
||||
return
|
||||
}
|
||||
// Handle #16961
|
||||
if len(bs) < 3 {
|
||||
return
|
||||
}
|
||||
|
||||
if bs[0] != '&' || bs[1] != '{' || bs[len(bs)-1] != '}' {
|
||||
return
|
||||
}
|
||||
|
||||
parts := bytes.Split(bs[2:len(bs)-1], []byte{' '})
|
||||
if len(parts) != 3 {
|
||||
return
|
||||
}
|
||||
|
||||
cfg.ExternalTrackerURL = string(bytes.Join(parts[:len(parts)-2], []byte{' '}))
|
||||
cfg.ExternalTrackerFormat = string(parts[len(parts)-2])
|
||||
cfg.ExternalTrackerStyle = string(parts[len(parts)-1])
|
||||
return true, nil
|
||||
}
|
||||
|
||||
func fixPullRequestsConfig16961(bs []byte, cfg *models.PullRequestsConfig) (fixed bool, err error) {
|
||||
err = models.JSONUnmarshalHandleDoubleEncode(bs, &cfg)
|
||||
if err == nil {
|
||||
return
|
||||
}
|
||||
|
||||
// Handle #16961
|
||||
if len(bs) < 3 {
|
||||
return
|
||||
}
|
||||
|
||||
if bs[0] != '&' || bs[1] != '{' || bs[len(bs)-1] != '}' {
|
||||
return
|
||||
}
|
||||
|
||||
// PullRequestsConfig was the following in 1.14
|
||||
// type PullRequestsConfig struct {
|
||||
// IgnoreWhitespaceConflicts bool
|
||||
// AllowMerge bool
|
||||
// AllowRebase bool
|
||||
// AllowRebaseMerge bool
|
||||
// AllowSquash bool
|
||||
// AllowManualMerge bool
|
||||
// AutodetectManualMerge bool
|
||||
// }
|
||||
//
|
||||
// 1.15 added in addition:
|
||||
// DefaultDeleteBranchAfterMerge bool
|
||||
// DefaultMergeStyle MergeStyle
|
||||
parts := bytes.Split(bs[2:len(bs)-1], []byte{' '})
|
||||
if len(parts) < 7 {
|
||||
return
|
||||
}
|
||||
|
||||
var parseErr error
|
||||
cfg.IgnoreWhitespaceConflicts, parseErr = parseBool16961(parts[0])
|
||||
if parseErr != nil {
|
||||
return
|
||||
}
|
||||
cfg.AllowMerge, parseErr = parseBool16961(parts[1])
|
||||
if parseErr != nil {
|
||||
return
|
||||
}
|
||||
cfg.AllowRebase, parseErr = parseBool16961(parts[2])
|
||||
if parseErr != nil {
|
||||
return
|
||||
}
|
||||
cfg.AllowRebaseMerge, parseErr = parseBool16961(parts[3])
|
||||
if parseErr != nil {
|
||||
return
|
||||
}
|
||||
cfg.AllowSquash, parseErr = parseBool16961(parts[4])
|
||||
if parseErr != nil {
|
||||
return
|
||||
}
|
||||
cfg.AllowManualMerge, parseErr = parseBool16961(parts[5])
|
||||
if parseErr != nil {
|
||||
return
|
||||
}
|
||||
cfg.AutodetectManualMerge, parseErr = parseBool16961(parts[6])
|
||||
if parseErr != nil {
|
||||
return
|
||||
}
|
||||
|
||||
// 1.14 unit
|
||||
if len(parts) == 7 {
|
||||
return true, nil
|
||||
}
|
||||
|
||||
if len(parts) < 9 {
|
||||
return
|
||||
}
|
||||
|
||||
cfg.DefaultDeleteBranchAfterMerge, parseErr = parseBool16961(parts[7])
|
||||
if parseErr != nil {
|
||||
return
|
||||
}
|
||||
|
||||
cfg.DefaultMergeStyle = models.MergeStyle(string(bytes.Join(parts[8:], []byte{' '})))
|
||||
return true, nil
|
||||
}
|
||||
|
||||
func fixIssuesConfig16961(bs []byte, cfg *models.IssuesConfig) (fixed bool, err error) {
|
||||
err = models.JSONUnmarshalHandleDoubleEncode(bs, &cfg)
|
||||
if err == nil {
|
||||
return
|
||||
}
|
||||
|
||||
// Handle #16961
|
||||
if len(bs) < 3 {
|
||||
return
|
||||
}
|
||||
|
||||
if bs[0] != '&' || bs[1] != '{' || bs[len(bs)-1] != '}' {
|
||||
return
|
||||
}
|
||||
|
||||
parts := bytes.Split(bs[2:len(bs)-1], []byte{' '})
|
||||
if len(parts) != 3 {
|
||||
return
|
||||
}
|
||||
var parseErr error
|
||||
cfg.EnableTimetracker, parseErr = parseBool16961(parts[0])
|
||||
if parseErr != nil {
|
||||
return
|
||||
}
|
||||
cfg.AllowOnlyContributorsToTrackTime, parseErr = parseBool16961(parts[1])
|
||||
if parseErr != nil {
|
||||
return
|
||||
}
|
||||
cfg.EnableDependencies, parseErr = parseBool16961(parts[2])
|
||||
if parseErr != nil {
|
||||
return
|
||||
}
|
||||
return true, nil
|
||||
}
|
||||
|
||||
func fixBrokenRepoUnit16961(repoUnit *models.RepoUnit, bs []byte) (fixed bool, err error) {
|
||||
// Shortcut empty or null values
|
||||
if len(bs) == 0 {
|
||||
return false, nil
|
||||
}
|
||||
|
||||
switch models.UnitType(repoUnit.Type) {
|
||||
case models.UnitTypeCode, models.UnitTypeReleases, models.UnitTypeWiki, models.UnitTypeProjects:
|
||||
cfg := &models.UnitConfig{}
|
||||
repoUnit.Config = cfg
|
||||
if fixed, err := fixUnitConfig16961(bs, cfg); !fixed {
|
||||
return false, err
|
||||
}
|
||||
case models.UnitTypeExternalWiki:
|
||||
cfg := &models.ExternalWikiConfig{}
|
||||
repoUnit.Config = cfg
|
||||
|
||||
if fixed, err := fixExternalWikiConfig16961(bs, cfg); !fixed {
|
||||
return false, err
|
||||
}
|
||||
case models.UnitTypeExternalTracker:
|
||||
cfg := &models.ExternalTrackerConfig{}
|
||||
repoUnit.Config = cfg
|
||||
if fixed, err := fixExternalTrackerConfig16961(bs, cfg); !fixed {
|
||||
return false, err
|
||||
}
|
||||
case models.UnitTypePullRequests:
|
||||
cfg := &models.PullRequestsConfig{}
|
||||
repoUnit.Config = cfg
|
||||
|
||||
if fixed, err := fixPullRequestsConfig16961(bs, cfg); !fixed {
|
||||
return false, err
|
||||
}
|
||||
case models.UnitTypeIssues:
|
||||
cfg := &models.IssuesConfig{}
|
||||
repoUnit.Config = cfg
|
||||
if fixed, err := fixIssuesConfig16961(bs, cfg); !fixed {
|
||||
return false, err
|
||||
}
|
||||
default:
|
||||
panic(fmt.Sprintf("unrecognized repo unit type: %v", repoUnit.Type))
|
||||
}
|
||||
return true, nil
|
||||
}
|
||||
|
||||
func fixBrokenRepoUnits16961(logger log.Logger, autofix bool) error {
|
||||
// RepoUnit describes all units of a repository
|
||||
type RepoUnit struct {
|
||||
ID int64
|
||||
RepoID int64
|
||||
Type models.UnitType
|
||||
Config []byte
|
||||
CreatedUnix timeutil.TimeStamp `xorm:"INDEX CREATED"`
|
||||
}
|
||||
|
||||
count := 0
|
||||
|
||||
err := models.Iterate(
|
||||
models.DefaultDBContext(),
|
||||
new(RepoUnit),
|
||||
builder.Gt{
|
||||
"id": 0,
|
||||
},
|
||||
func(idx int, bean interface{}) error {
|
||||
unit := bean.(*RepoUnit)
|
||||
|
||||
bs := unit.Config
|
||||
repoUnit := &models.RepoUnit{
|
||||
ID: unit.ID,
|
||||
RepoID: unit.RepoID,
|
||||
Type: unit.Type,
|
||||
CreatedUnix: unit.CreatedUnix,
|
||||
}
|
||||
|
||||
if fixed, err := fixBrokenRepoUnit16961(repoUnit, bs); !fixed {
|
||||
return err
|
||||
}
|
||||
|
||||
count++
|
||||
if !autofix {
|
||||
return nil
|
||||
}
|
||||
|
||||
return models.UpdateRepoUnit(repoUnit)
|
||||
},
|
||||
)
|
||||
|
||||
if err != nil {
|
||||
logger.Critical("Unable to iterate acrosss repounits to fix the broken units: Error %v", err)
|
||||
return err
|
||||
}
|
||||
|
||||
if !autofix {
|
||||
logger.Warn("Found %d broken repo_units", count)
|
||||
return nil
|
||||
}
|
||||
logger.Info("Fixed %d broken repo_units", count)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func init() {
|
||||
Register(&Check{
|
||||
Title: "Check for incorrectly dumped repo_units (See #16961)",
|
||||
Name: "fix-broken-repo-units",
|
||||
IsDefault: false,
|
||||
Run: fixBrokenRepoUnits16961,
|
||||
Priority: 7,
|
||||
})
|
||||
}
|
||||
271
modules/doctor/fix16961_test.go
Normal file
271
modules/doctor/fix16961_test.go
Normal file
@@ -0,0 +1,271 @@
|
||||
// Copyright 2021 The Gitea Authors. All rights reserved.
|
||||
// Use of this source code is governed by a MIT-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package doctor
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"code.gitea.io/gitea/models"
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
func Test_fixUnitConfig_16961(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
bs string
|
||||
wantFixed bool
|
||||
wantErr bool
|
||||
}{
|
||||
{
|
||||
name: "empty",
|
||||
bs: "",
|
||||
wantFixed: true,
|
||||
wantErr: false,
|
||||
},
|
||||
{
|
||||
name: "normal: {}",
|
||||
bs: "{}",
|
||||
wantFixed: false,
|
||||
wantErr: false,
|
||||
},
|
||||
{
|
||||
name: "broken but fixable: &{}",
|
||||
bs: "&{}",
|
||||
wantFixed: true,
|
||||
wantErr: false,
|
||||
},
|
||||
{
|
||||
name: "broken but unfixable: &{asdasd}",
|
||||
bs: "&{asdasd}",
|
||||
wantFixed: false,
|
||||
wantErr: true,
|
||||
},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
gotFixed, err := fixUnitConfig16961([]byte(tt.bs), &models.UnitConfig{})
|
||||
if (err != nil) != tt.wantErr {
|
||||
t.Errorf("fixUnitConfig_16961() error = %v, wantErr %v", err, tt.wantErr)
|
||||
return
|
||||
}
|
||||
if gotFixed != tt.wantFixed {
|
||||
t.Errorf("fixUnitConfig_16961() = %v, want %v", gotFixed, tt.wantFixed)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func Test_fixExternalWikiConfig_16961(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
bs string
|
||||
expected string
|
||||
wantFixed bool
|
||||
wantErr bool
|
||||
}{
|
||||
{
|
||||
name: "normal: {\"ExternalWikiURL\":\"http://someurl\"}",
|
||||
bs: "{\"ExternalWikiURL\":\"http://someurl\"}",
|
||||
expected: "http://someurl",
|
||||
wantFixed: false,
|
||||
wantErr: false,
|
||||
},
|
||||
{
|
||||
name: "broken: &{http://someurl}",
|
||||
bs: "&{http://someurl}",
|
||||
expected: "http://someurl",
|
||||
wantFixed: true,
|
||||
wantErr: false,
|
||||
},
|
||||
{
|
||||
name: "broken but unfixable: http://someurl",
|
||||
bs: "http://someurl",
|
||||
wantFixed: false,
|
||||
wantErr: true,
|
||||
},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
cfg := &models.ExternalWikiConfig{}
|
||||
gotFixed, err := fixExternalWikiConfig16961([]byte(tt.bs), cfg)
|
||||
if (err != nil) != tt.wantErr {
|
||||
t.Errorf("fixExternalWikiConfig_16961() error = %v, wantErr %v", err, tt.wantErr)
|
||||
return
|
||||
}
|
||||
if gotFixed != tt.wantFixed {
|
||||
t.Errorf("fixExternalWikiConfig_16961() = %v, want %v", gotFixed, tt.wantFixed)
|
||||
}
|
||||
if cfg.ExternalWikiURL != tt.expected {
|
||||
t.Errorf("fixExternalWikiConfig_16961().ExternalWikiURL = %v, want %v", cfg.ExternalWikiURL, tt.expected)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func Test_fixExternalTrackerConfig_16961(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
bs string
|
||||
expected models.ExternalTrackerConfig
|
||||
wantFixed bool
|
||||
wantErr bool
|
||||
}{
|
||||
{
|
||||
name: "normal",
|
||||
bs: `{"ExternalTrackerURL":"a","ExternalTrackerFormat":"b","ExternalTrackerStyle":"c"}`,
|
||||
expected: models.ExternalTrackerConfig{
|
||||
ExternalTrackerURL: "a",
|
||||
ExternalTrackerFormat: "b",
|
||||
ExternalTrackerStyle: "c",
|
||||
},
|
||||
wantFixed: false,
|
||||
wantErr: false,
|
||||
},
|
||||
{
|
||||
name: "broken",
|
||||
bs: "&{a b c}",
|
||||
expected: models.ExternalTrackerConfig{
|
||||
ExternalTrackerURL: "a",
|
||||
ExternalTrackerFormat: "b",
|
||||
ExternalTrackerStyle: "c",
|
||||
},
|
||||
wantFixed: true,
|
||||
wantErr: false,
|
||||
},
|
||||
{
|
||||
name: "broken - too many fields",
|
||||
bs: "&{a b c d}",
|
||||
wantFixed: false,
|
||||
wantErr: true,
|
||||
},
|
||||
{
|
||||
name: "broken - wrong format",
|
||||
bs: "a b c d}",
|
||||
wantFixed: false,
|
||||
wantErr: true,
|
||||
},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
cfg := &models.ExternalTrackerConfig{}
|
||||
gotFixed, err := fixExternalTrackerConfig16961([]byte(tt.bs), cfg)
|
||||
if (err != nil) != tt.wantErr {
|
||||
t.Errorf("fixExternalTrackerConfig_16961() error = %v, wantErr %v", err, tt.wantErr)
|
||||
return
|
||||
}
|
||||
if gotFixed != tt.wantFixed {
|
||||
t.Errorf("fixExternalTrackerConfig_16961() = %v, want %v", gotFixed, tt.wantFixed)
|
||||
}
|
||||
if cfg.ExternalTrackerFormat != tt.expected.ExternalTrackerFormat {
|
||||
t.Errorf("fixExternalTrackerConfig_16961().ExternalTrackerFormat = %v, want %v", tt.expected.ExternalTrackerFormat, cfg.ExternalTrackerFormat)
|
||||
}
|
||||
if cfg.ExternalTrackerStyle != tt.expected.ExternalTrackerStyle {
|
||||
t.Errorf("fixExternalTrackerConfig_16961().ExternalTrackerStyle = %v, want %v", tt.expected.ExternalTrackerStyle, cfg.ExternalTrackerStyle)
|
||||
}
|
||||
if cfg.ExternalTrackerURL != tt.expected.ExternalTrackerURL {
|
||||
t.Errorf("fixExternalTrackerConfig_16961().ExternalTrackerURL = %v, want %v", tt.expected.ExternalTrackerURL, cfg.ExternalTrackerURL)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func Test_fixPullRequestsConfig_16961(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
bs string
|
||||
expected models.PullRequestsConfig
|
||||
wantFixed bool
|
||||
wantErr bool
|
||||
}{
|
||||
{
|
||||
name: "normal",
|
||||
bs: `{"IgnoreWhitespaceConflicts":false,"AllowMerge":false,"AllowRebase":false,"AllowRebaseMerge":false,"AllowSquash":false,"AllowManualMerge":false,"AutodetectManualMerge":false,"DefaultDeleteBranchAfterMerge":false,"DefaultMergeStyle":""}`,
|
||||
},
|
||||
{
|
||||
name: "broken - 1.14",
|
||||
bs: `&{%!s(bool=false) %!s(bool=true) %!s(bool=true) %!s(bool=true) %!s(bool=true) %!s(bool=false) %!s(bool=false)}`,
|
||||
expected: models.PullRequestsConfig{
|
||||
IgnoreWhitespaceConflicts: false,
|
||||
AllowMerge: true,
|
||||
AllowRebase: true,
|
||||
AllowRebaseMerge: true,
|
||||
AllowSquash: true,
|
||||
AllowManualMerge: false,
|
||||
AutodetectManualMerge: false,
|
||||
},
|
||||
wantFixed: true,
|
||||
},
|
||||
{
|
||||
name: "broken - 1.15",
|
||||
bs: `&{%!s(bool=false) %!s(bool=true) %!s(bool=true) %!s(bool=true) %!s(bool=true) %!s(bool=false) %!s(bool=false) %!s(bool=false) merge}`,
|
||||
expected: models.PullRequestsConfig{
|
||||
AllowMerge: true,
|
||||
AllowRebase: true,
|
||||
AllowRebaseMerge: true,
|
||||
AllowSquash: true,
|
||||
DefaultMergeStyle: models.MergeStyleMerge,
|
||||
},
|
||||
wantFixed: true,
|
||||
},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
cfg := &models.PullRequestsConfig{}
|
||||
gotFixed, err := fixPullRequestsConfig16961([]byte(tt.bs), cfg)
|
||||
if (err != nil) != tt.wantErr {
|
||||
t.Errorf("fixPullRequestsConfig_16961() error = %v, wantErr %v", err, tt.wantErr)
|
||||
return
|
||||
}
|
||||
if gotFixed != tt.wantFixed {
|
||||
t.Errorf("fixPullRequestsConfig_16961() = %v, want %v", gotFixed, tt.wantFixed)
|
||||
}
|
||||
assert.EqualValues(t, &tt.expected, cfg)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func Test_fixIssuesConfig_16961(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
bs string
|
||||
expected models.IssuesConfig
|
||||
wantFixed bool
|
||||
wantErr bool
|
||||
}{
|
||||
{
|
||||
name: "normal",
|
||||
bs: `{"EnableTimetracker":true,"AllowOnlyContributorsToTrackTime":true,"EnableDependencies":true}`,
|
||||
expected: models.IssuesConfig{
|
||||
EnableTimetracker: true,
|
||||
AllowOnlyContributorsToTrackTime: true,
|
||||
EnableDependencies: true,
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "broken",
|
||||
bs: `&{%!s(bool=true) %!s(bool=true) %!s(bool=true)}`,
|
||||
expected: models.IssuesConfig{
|
||||
EnableTimetracker: true,
|
||||
AllowOnlyContributorsToTrackTime: true,
|
||||
EnableDependencies: true,
|
||||
},
|
||||
wantFixed: true,
|
||||
},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
cfg := &models.IssuesConfig{}
|
||||
gotFixed, err := fixIssuesConfig16961([]byte(tt.bs), cfg)
|
||||
if (err != nil) != tt.wantErr {
|
||||
t.Errorf("fixIssuesConfig_16961() error = %v, wantErr %v", err, tt.wantErr)
|
||||
return
|
||||
}
|
||||
if gotFixed != tt.wantFixed {
|
||||
t.Errorf("fixIssuesConfig_16961() = %v, want %v", gotFixed, tt.wantFixed)
|
||||
}
|
||||
assert.EqualValues(t, &tt.expected, cfg)
|
||||
})
|
||||
}
|
||||
}
|
||||
76
modules/doctor/storage.go
Normal file
76
modules/doctor/storage.go
Normal file
@@ -0,0 +1,76 @@
|
||||
// Copyright 2021 The Gitea Authors. All rights reserved.
|
||||
// Use of this source code is governed by a MIT-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package doctor
|
||||
|
||||
import (
|
||||
"code.gitea.io/gitea/models"
|
||||
"code.gitea.io/gitea/modules/log"
|
||||
"code.gitea.io/gitea/modules/storage"
|
||||
)
|
||||
|
||||
func checkAttachmentStorageFiles(logger log.Logger, autofix bool) error {
|
||||
var total, garbageNum int
|
||||
var deletePaths []string
|
||||
if err := storage.Attachments.IterateObjects(func(p string, obj storage.Object) error {
|
||||
defer obj.Close()
|
||||
|
||||
total++
|
||||
stat, err := obj.Stat()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
exist, err := models.ExistAttachmentsByUUID(stat.Name())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if !exist {
|
||||
garbageNum++
|
||||
if autofix {
|
||||
deletePaths = append(deletePaths, p)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}); err != nil {
|
||||
logger.Error("storage.Attachments.IterateObjects failed: %v", err)
|
||||
return err
|
||||
}
|
||||
|
||||
if garbageNum > 0 {
|
||||
if autofix {
|
||||
var deletedNum int
|
||||
for _, p := range deletePaths {
|
||||
if err := storage.Attachments.Delete(p); err != nil {
|
||||
log.Error("Delete attachment %s failed: %v", p, err)
|
||||
} else {
|
||||
deletedNum++
|
||||
}
|
||||
}
|
||||
logger.Info("%d missed information attachment detected, %d deleted.", garbageNum, deletedNum)
|
||||
} else {
|
||||
logger.Warn("Checked %d attachment, %d missed information.", total, garbageNum)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func checkStorageFiles(logger log.Logger, autofix bool) error {
|
||||
if err := storage.Init(); err != nil {
|
||||
logger.Error("storage.Init failed: %v", err)
|
||||
return err
|
||||
}
|
||||
return checkAttachmentStorageFiles(logger, autofix)
|
||||
}
|
||||
|
||||
func init() {
|
||||
Register(&Check{
|
||||
Title: "Check if there is garbage storage files",
|
||||
Name: "storages",
|
||||
IsDefault: false,
|
||||
Run: checkStorageFiles,
|
||||
AbortIfFailed: false,
|
||||
SkipDatabaseInitialization: false,
|
||||
Priority: 1,
|
||||
})
|
||||
}
|
||||
@@ -8,8 +8,10 @@ import (
|
||||
"bufio"
|
||||
"bytes"
|
||||
"context"
|
||||
"fmt"
|
||||
"io"
|
||||
"math"
|
||||
"runtime"
|
||||
"strconv"
|
||||
"strings"
|
||||
|
||||
@@ -25,6 +27,20 @@ type WriteCloserError interface {
|
||||
CloseWithError(err error) error
|
||||
}
|
||||
|
||||
// EnsureValidGitRepository runs git rev-parse in the repository path - thus ensuring that the repository is a valid repository.
|
||||
// Run before opening git cat-file.
|
||||
// This is needed otherwise the git cat-file will hang for invalid repositories.
|
||||
func EnsureValidGitRepository(ctx context.Context, repoPath string) error {
|
||||
stderr := strings.Builder{}
|
||||
err := NewCommandContext(ctx, "rev-parse").
|
||||
SetDescription(fmt.Sprintf("%s rev-parse [repo_path: %s]", GitExecutable, repoPath)).
|
||||
RunInDirFullPipeline(repoPath, nil, &stderr, nil)
|
||||
if err != nil {
|
||||
return ConcatenateError(err, (&stderr).String())
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// CatFileBatchCheck opens git cat-file --batch-check in the provided repo and returns a stdin pipe, a stdout reader and cancel function
|
||||
func CatFileBatchCheck(repoPath string) (WriteCloserError, *bufio.Reader, func()) {
|
||||
batchStdinReader, batchStdinWriter := io.Pipe()
|
||||
@@ -40,9 +56,14 @@ func CatFileBatchCheck(repoPath string) (WriteCloserError, *bufio.Reader, func()
|
||||
<-closed
|
||||
}
|
||||
|
||||
_, filename, line, _ := runtime.Caller(2)
|
||||
filename = strings.TrimPrefix(filename, callerPrefix)
|
||||
|
||||
go func() {
|
||||
stderr := strings.Builder{}
|
||||
err := NewCommandContext(ctx, "cat-file", "--batch-check").RunInDirFullPipeline(repoPath, batchStdoutWriter, &stderr, batchStdinReader)
|
||||
err := NewCommandContext(ctx, "cat-file", "--batch-check").
|
||||
SetDescription(fmt.Sprintf("%s cat-file --batch-check [repo_path: %s] (%s:%d)", GitExecutable, repoPath, filename, line)).
|
||||
RunInDirFullPipeline(repoPath, batchStdoutWriter, &stderr, batchStdinReader)
|
||||
if err != nil {
|
||||
_ = batchStdoutWriter.CloseWithError(ConcatenateError(err, (&stderr).String()))
|
||||
_ = batchStdinReader.CloseWithError(ConcatenateError(err, (&stderr).String()))
|
||||
@@ -76,9 +97,14 @@ func CatFileBatch(repoPath string) (WriteCloserError, *bufio.Reader, func()) {
|
||||
<-closed
|
||||
}
|
||||
|
||||
_, filename, line, _ := runtime.Caller(2)
|
||||
filename = strings.TrimPrefix(filename, callerPrefix)
|
||||
|
||||
go func() {
|
||||
stderr := strings.Builder{}
|
||||
err := NewCommandContext(ctx, "cat-file", "--batch").RunInDirFullPipeline(repoPath, batchStdoutWriter, &stderr, batchStdinReader)
|
||||
err := NewCommandContext(ctx, "cat-file", "--batch").
|
||||
SetDescription(fmt.Sprintf("%s cat-file --batch [repo_path: %s] (%s:%d)", GitExecutable, repoPath, filename, line)).
|
||||
RunInDirFullPipeline(repoPath, batchStdoutWriter, &stderr, batchStdinReader)
|
||||
if err != nil {
|
||||
_ = batchStdoutWriter.CloseWithError(ConcatenateError(err, (&stderr).String()))
|
||||
_ = batchStdinReader.CloseWithError(ConcatenateError(err, (&stderr).String()))
|
||||
@@ -292,3 +318,10 @@ func ParseTreeLine(rd *bufio.Reader, modeBuf, fnameBuf, shaBuf []byte) (mode, fn
|
||||
sha = shaBuf
|
||||
return
|
||||
}
|
||||
|
||||
var callerPrefix string
|
||||
|
||||
func init() {
|
||||
_, filename, _, _ := runtime.Caller(0)
|
||||
callerPrefix = strings.TrimSuffix(filename, "modules/git/batch_reader.go")
|
||||
}
|
||||
|
||||
@@ -12,6 +12,7 @@ import (
|
||||
"io/ioutil"
|
||||
|
||||
"code.gitea.io/gitea/modules/typesniffer"
|
||||
"code.gitea.io/gitea/modules/util"
|
||||
)
|
||||
|
||||
// This file contains common functions between the gogit and !gogit variants for git Blobs
|
||||
@@ -29,7 +30,7 @@ func (b *Blob) GetBlobContent() (string, error) {
|
||||
}
|
||||
defer dataRc.Close()
|
||||
buf := make([]byte, 1024)
|
||||
n, _ := dataRc.Read(buf)
|
||||
n, _ := util.ReadAtMost(dataRc, buf)
|
||||
buf = buf[:n]
|
||||
return string(buf), nil
|
||||
}
|
||||
|
||||
@@ -3,6 +3,7 @@
|
||||
// Use of this source code is governed by a MIT-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
//go:build gogit
|
||||
// +build gogit
|
||||
|
||||
package git
|
||||
|
||||
@@ -2,6 +2,7 @@
|
||||
// Use of this source code is governed by a MIT-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
//go:build !gogit
|
||||
// +build !gogit
|
||||
|
||||
package git
|
||||
|
||||
@@ -2,6 +2,7 @@
|
||||
// Use of this source code is governed by a MIT-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
//go:build race
|
||||
// +build race
|
||||
|
||||
package git
|
||||
|
||||
@@ -3,6 +3,7 @@
|
||||
// Use of this source code is governed by a MIT-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
//go:build gogit
|
||||
// +build gogit
|
||||
|
||||
package git
|
||||
|
||||
@@ -2,6 +2,7 @@
|
||||
// Use of this source code is governed by a MIT-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
//go:build gogit
|
||||
// +build gogit
|
||||
|
||||
package git
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user