mirror of
https://github.com/go-gitea/gitea.git
synced 2025-11-08 05:02:38 +09:00
Compare commits
92 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
9879e23c57 | ||
|
|
56a3b50136 | ||
|
|
9a8532d928 | ||
|
|
d29a0fc3be | ||
|
|
04517e17d6 | ||
|
|
3a222ee416 | ||
|
|
add85f5a85 | ||
|
|
76ad83f05e | ||
|
|
714ecd9f1e | ||
|
|
a08856606e | ||
|
|
7be2d7b136 | ||
|
|
6f3596e33c | ||
|
|
0305a73633 | ||
|
|
6cd1ccef3d | ||
|
|
ea0fe83888 | ||
|
|
1cec7f5ab5 | ||
|
|
1cb1101d44 | ||
|
|
653dff4e57 | ||
|
|
b661bbaed7 | ||
|
|
20ae184967 | ||
|
|
15b44496ec | ||
|
|
0d0ff5e32a | ||
|
|
f25f7c592f | ||
|
|
e8cf04bad7 | ||
|
|
251fdaaf41 | ||
|
|
f572fb906f | ||
|
|
9340269d84 | ||
|
|
34650b925b | ||
|
|
718e0db12e | ||
|
|
6110ddc280 | ||
|
|
c7d8181a70 | ||
|
|
548ae3eb98 | ||
|
|
2c383d812d | ||
|
|
ef12b8de80 | ||
|
|
dd1ba34ee5 | ||
|
|
1fbdf96c34 | ||
|
|
5159055278 | ||
|
|
06da10b9a1 | ||
|
|
175ebc6f88 | ||
|
|
3aecea2e6e | ||
|
|
cae8c63517 | ||
|
|
8ace5c1161 | ||
|
|
a87b813955 | ||
|
|
3baeec745c | ||
|
|
befb6bea22 | ||
|
|
79f0b1a50b | ||
|
|
79a3d277e5 | ||
|
|
eb748ff79e | ||
|
|
c5770195d9 | ||
|
|
a20ccec369 | ||
|
|
9c2b7a196e | ||
|
|
1e278b15c2 | ||
|
|
fde6ff6a75 | ||
|
|
51f4f8c393 | ||
|
|
f5845e6497 | ||
|
|
c927ebd119 | ||
|
|
245596e130 | ||
|
|
1c3ae6d05e | ||
|
|
a1e57ebe6b | ||
|
|
73ae93b007 | ||
|
|
dc030f64a7 | ||
|
|
6e0a08d753 | ||
|
|
7b1153e943 | ||
|
|
6995be66e7 | ||
|
|
28971c7c15 | ||
|
|
eb5e6f09eb | ||
|
|
bf6264c1db | ||
|
|
5b6b7e79cf | ||
|
|
766272b154 | ||
|
|
4707d4b8a9 | ||
|
|
4b8b214108 | ||
|
|
ebae7e1512 | ||
|
|
122917f4d5 | ||
|
|
9cf5739c0f | ||
|
|
4b6556565f | ||
|
|
7ce938b6c7 | ||
|
|
6139834e76 | ||
|
|
b673a24ee6 | ||
|
|
fd35f56e87 | ||
|
|
1f8df5dd89 | ||
|
|
6a025d8b4a | ||
|
|
270c7f36db | ||
|
|
0e448fb96d | ||
|
|
659b946eda | ||
|
|
56ab5ec9ea | ||
|
|
3b13c5d41a | ||
|
|
d27f061863 | ||
|
|
07489d0405 | ||
|
|
30708d9ffe | ||
|
|
1b08dfeacf | ||
|
|
e5ded0ee19 | ||
|
|
a384109244 |
@@ -527,7 +527,7 @@ steps:
|
||||
|
||||
- name: release-branch
|
||||
pull: always
|
||||
image: plugins/s3:1
|
||||
image: woodpeckerci/plugin-s3:latest
|
||||
settings:
|
||||
acl: public-read
|
||||
bucket: gitea-artifacts
|
||||
@@ -548,7 +548,7 @@ steps:
|
||||
- push
|
||||
|
||||
- name: release-main
|
||||
image: plugins/s3:1
|
||||
image: woodpeckerci/plugin-s3:latest
|
||||
settings:
|
||||
acl: public-read
|
||||
bucket: gitea-artifacts
|
||||
@@ -623,7 +623,7 @@ steps:
|
||||
|
||||
- name: release-tag
|
||||
pull: always
|
||||
image: plugins/s3:1
|
||||
image: woodpeckerci/plugin-s3:latest
|
||||
settings:
|
||||
acl: public-read
|
||||
bucket: gitea-artifacts
|
||||
|
||||
@@ -9,7 +9,6 @@ linters:
|
||||
- unused
|
||||
- structcheck
|
||||
- varcheck
|
||||
- golint
|
||||
- dupl
|
||||
#- gocyclo # The cyclomatic complexety of a lot of functions is too high, we should refactor those another time.
|
||||
- gofmt
|
||||
|
||||
104
CHANGELOG.md
104
CHANGELOG.md
@@ -4,6 +4,110 @@ This changelog goes through all the changes that have been made in each release
|
||||
without substantial changes to our git log; to see the highlights of what has
|
||||
been added to each release, please refer to the [blog](https://blog.gitea.io).
|
||||
|
||||
## [1.15.7](https://github.com/go-gitea/gitea/releases/tag/v1.15.7) - 2021-12-01
|
||||
|
||||
* ENHANCEMENTS
|
||||
* Only allow webhook to send requests to allowed hosts (#17482) (#17510)
|
||||
* Fix login redirection links (#17451) (#17473)
|
||||
* BUGFIXES
|
||||
* Fix database inconsistent when admin change user email (#17549) (#17840)
|
||||
* Use correct user on releases (#17806) (#17818)
|
||||
* Fix commit count in tag view (#17698) (#17790)
|
||||
* Fix close issue but time watcher still running (#17643) (#17761)
|
||||
* Fix Migrate Description (#17692) (#17727)
|
||||
* Fix bug when project board get open issue number (#17703) (#17726)
|
||||
* Return 400 but not 500 when request archive with wrong format (#17691) (#17700)
|
||||
* Fix bug when read mysql database max lifetime (#17682) (#17690)
|
||||
* Fix database deadlock when update issue labels (#17649) (#17665)
|
||||
* Fix bug on detect issue/comment writer (#17592)
|
||||
* Remove appSubUrl from pasted images (#17572) (#17588)
|
||||
* Make `ParsePatch` more robust (#17573) (#17580)
|
||||
* Fix stats upon searching issues (#17566) (#17578)
|
||||
* Escape issue titles in comments list (#17555) (#17556)
|
||||
* Fix zero created time bug on commit api (#17546) (#17547)
|
||||
* Fix database keyword quote problem on migration v161 (#17522) (#17523)
|
||||
* Fix email with + when active (#17518) (#17520)
|
||||
* Stop double encoding blame commit messages (#17498) (#17500)
|
||||
* Quote the table name in CountOrphanedObjects (#17487) (#17488)
|
||||
* Run Migrate in Install rather than just SyncTables (#17475) (#17486)
|
||||
* BUILD
|
||||
* Fix golangci-lint warnings (#17598 et al) (#17668)
|
||||
* MISC
|
||||
* Preserve color when inverting emojis (#17797) (#17799)
|
||||
|
||||
## [1.15.6](https://github.com/go-gitea/gitea/releases/tag/v1.15.6) - 2021-10-28
|
||||
|
||||
* BUGFIXES
|
||||
* Prevent panic in serv.go with Deploy Keys (#17434) (#17435)
|
||||
* Fix CSV render error (#17406) (#17431)
|
||||
* Read expected buffer size (#17409) (#17430)
|
||||
* Ensure that restricted users can access repos for which they are members (#17460) (#17464)
|
||||
* Make commit-statuses popup show correctly (#17447) (#17466)
|
||||
* TESTING
|
||||
* Add integration tests for private.NoServCommand and private.ServCommand (#17456) (#17463)
|
||||
|
||||
## [1.15.5](https://github.com/go-gitea/gitea/releases/tag/v1.15.5) - 2021-10-21
|
||||
|
||||
* SECURITY
|
||||
* Upgrade Bluemonday to v1.0.16 (#17372) (#17374)
|
||||
* Ensure correct SSH permissions check for private and restricted users (#17370) (#17373)
|
||||
* BUGFIXES
|
||||
* Prevent NPE in CSV diff rendering when column removed (#17018) (#17377)
|
||||
* Offer rsa-sha2-512 and rsa-sha2-256 algorithms in internal SSH (#17281) (#17376)
|
||||
* Don't panic if we fail to parse U2FRegistration data (#17304) (#17371)
|
||||
* Ensure popup text is aligned left (backport for 1.15) (#17343)
|
||||
* Ensure that git daemon export ok is created for mirrors (#17243) (#17306)
|
||||
* Disable core.protectNTFS (#17300) (#17302)
|
||||
* Use pointer for wrappedConn methods (#17295) (#17296)
|
||||
* AutoRegistration is supposed to be working with disabled registration (backport) (#17292)
|
||||
* Handle duplicate keys on GPG key ring (#17242) (#17284)
|
||||
* Fix SVG side by side comparison link (#17375) (#17391)
|
||||
|
||||
## [1.15.4](https://github.com/go-gitea/gitea/releases/tag/v1.15.4) - 2021-10-08
|
||||
* BUGFIXES
|
||||
* Raw file API: don't try to interpret 40char filenames as commit SHA (#17185) (#17272)
|
||||
* Don't allow merged PRs to be reopened (#17192) (#17271)
|
||||
* Fix incorrect repository count on organization tab of dashboard (#17256) (#17266)
|
||||
* Fix unwanted team review request deletion (#17257) (#17264)
|
||||
* Fix broken Activities link in team dashboard (#17255) (#17258)
|
||||
* API pull's head/base have correct permission(#17214) (#17245)
|
||||
* Fix stange behavior of DownloadPullDiffOrPatch in incorect index (#17223) (#17227)
|
||||
* Upgrade xorm to v1.2.5 (#17177) (#17188)
|
||||
* Fix missing repo link in issue/pull assigned emails (#17183) (#17184)
|
||||
* Fix bug of get context user (#17169) (#17172)
|
||||
* Nicely handle missing user in collaborations (#17049) (#17166)
|
||||
* Add Horizontal scrollbar to inner menu on Chrome (#17086) (#17164)
|
||||
* Fix wrong i18n keys (#17150) (#17153)
|
||||
* Fix Archive Creation: correct transaction ending (#17151)
|
||||
* Prevent panic in Org mode HighlightCodeBlock (#17140) (#17141)
|
||||
* Create doctor command to fix repo_units broken by dumps from 1.14.3-1.14.6 (#17136) (#17137)
|
||||
* ENHANCEMENT
|
||||
* Check user instead of organization when creating a repo from a template via API (#16346) (#17195)
|
||||
* TRANSLATION
|
||||
* v1.15 fix Sprintf format 'verbs' in locale files (#17187)
|
||||
|
||||
|
||||
## [1.15.3](https://github.com/go-gitea/gitea/releases/tag/v1.15.3) - 2021-09-19
|
||||
|
||||
* ENHANCEMENTS
|
||||
* Add fluid to ui container class to remove margin (#16396) (#16976)
|
||||
* Add caller to cat-file batch calls (#17082) (#17089)
|
||||
* BUGFIXES
|
||||
* Render full plain readme. (#17083) (#17090)
|
||||
* Upgrade xorm to v1.2.4 (#17059)
|
||||
* Fix bug of migrate comments which only fetch one page (#17055) (#17058)
|
||||
* Do not show issue context popup on external issues (#17050) (#17054)
|
||||
* Decrement Fork Num when converting from Fork (#17035) (#17046)
|
||||
* Correctly rollback in ForkRepository (#17034) (#17045)
|
||||
* Fix missing close in WalkGitLog (#17008) (#17009)
|
||||
* Add prefix to SVG id/class attributes (#16997) (#17000)
|
||||
* Fix bug of migrated repository not index (#16991) (#16996)
|
||||
* Skip AllowedUserVisibilityModes validation on update user if it is an organisation (#16988) (#16990)
|
||||
* Fix storage Iterate bug and Add storage doctor to delete garbage attachments (#16971) (#16977)
|
||||
* Fix issue with issue default mail template (#16956) (#16975)
|
||||
* Ensure that rebase conflicts are handled in updates (#16952) (#16960)
|
||||
* Prevent panic on diff generation (#16950) (#16951)
|
||||
|
||||
## [1.15.2](https://github.com/go-gitea/gitea/releases/tag/v1.15.2) - 2021-09-03
|
||||
|
||||
* BUGFIXES
|
||||
|
||||
@@ -12,9 +12,6 @@
|
||||
<a href="https://discord.gg/Gitea" title="Join the Discord chat at https://discord.gg/Gitea">
|
||||
<img src="https://img.shields.io/discord/322538954119184384.svg">
|
||||
</a>
|
||||
<a href="https://microbadger.com/images/gitea/gitea" title="Get your own image badge on microbadger.com">
|
||||
<img src="https://images.microbadger.com/badges/image/gitea/gitea.svg">
|
||||
</a>
|
||||
<a href="https://codecov.io/gh/go-gitea/gitea" title="Codecov">
|
||||
<img src="https://codecov.io/gh/go-gitea/gitea/branch/main/graph/badge.svg">
|
||||
</a>
|
||||
|
||||
3
build.go
3
build.go
@@ -2,7 +2,8 @@
|
||||
// Use of this source code is governed by a MIT-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
//+build vendor
|
||||
//go:build vendor
|
||||
// +build vendor
|
||||
|
||||
package main
|
||||
|
||||
|
||||
@@ -2,6 +2,7 @@
|
||||
// Use of this source code is governed by a MIT-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
//go:build ignore
|
||||
// +build ignore
|
||||
|
||||
package main
|
||||
|
||||
@@ -3,6 +3,7 @@
|
||||
// Use of this source code is governed by a MIT-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
//go:build ignore
|
||||
// +build ignore
|
||||
|
||||
package main
|
||||
|
||||
@@ -1,3 +1,4 @@
|
||||
//go:build ignore
|
||||
// +build ignore
|
||||
|
||||
package main
|
||||
|
||||
@@ -1,3 +1,4 @@
|
||||
//go:build ignore
|
||||
// +build ignore
|
||||
|
||||
package main
|
||||
|
||||
@@ -29,6 +29,7 @@ async function processFile(file, {prefix, fullName} = {}) {
|
||||
plugins: extendDefaultPlugins([
|
||||
'removeXMLNS',
|
||||
'removeDimensions',
|
||||
{name: 'prefixIds', params: {prefix: () => name}},
|
||||
{
|
||||
name: 'addClassesToSVGElement',
|
||||
params: {classNames: ['svg', name]},
|
||||
|
||||
@@ -6,6 +6,7 @@
|
||||
// gocovmerge takes the results from multiple `go test -coverprofile` runs and
|
||||
// merges them into one profile
|
||||
|
||||
//go:build ignore
|
||||
// +build ignore
|
||||
|
||||
package main
|
||||
|
||||
@@ -43,7 +43,11 @@ func runDocs(ctx *cli.Context) error {
|
||||
// Clean up markdown. The following bug was fixed in v2, but is present in v1.
|
||||
// It affects markdown output (even though the issue is referring to man pages)
|
||||
// https://github.com/urfave/cli/issues/1040
|
||||
docs = docs[strings.Index(docs, "#"):]
|
||||
firstHashtagIndex := strings.Index(docs, "#")
|
||||
|
||||
if firstHashtagIndex > 0 {
|
||||
docs = docs[firstHashtagIndex:]
|
||||
}
|
||||
}
|
||||
|
||||
out := os.Stdout
|
||||
|
||||
@@ -124,7 +124,6 @@ func runRecreateTable(ctx *cli.Context) error {
|
||||
}
|
||||
|
||||
func runDoctor(ctx *cli.Context) error {
|
||||
|
||||
// Silence the default loggers
|
||||
log.DelNamedLogger("console")
|
||||
log.DelNamedLogger(log.DEFAULT)
|
||||
|
||||
@@ -2,6 +2,7 @@
|
||||
// Use of this source code is governed by a MIT-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
//go:build bindata
|
||||
// +build bindata
|
||||
|
||||
package cmd
|
||||
|
||||
@@ -2,6 +2,7 @@
|
||||
// Use of this source code is governed by a MIT-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
//go:build !bindata
|
||||
// +build !bindata
|
||||
|
||||
package cmd
|
||||
|
||||
@@ -194,6 +194,10 @@ func listen(m http.Handler, handleRedirector bool) error {
|
||||
listenAddr = net.JoinHostPort(listenAddr, setting.HTTPPort)
|
||||
}
|
||||
log.Info("Listen: %v://%s%s", setting.Protocol, listenAddr, setting.AppSubURL)
|
||||
// This can be useful for users, many users do wrong to their config and get strange behaviors behind a reverse-proxy.
|
||||
// A user may fix the configuration mistake when he sees this log.
|
||||
// And this is also very helpful to maintainers to provide help to users to resolve their configuration problems.
|
||||
log.Info("AppURL(ROOT_URL): %s", setting.AppURL)
|
||||
|
||||
if setting.LFS.StartServer {
|
||||
log.Info("LFS server enabled")
|
||||
|
||||
@@ -576,6 +576,8 @@ PATH =
|
||||
;;
|
||||
;; (Go-Git only) Don't cache objects greater than this in memory. (Set to 0 to disable.)
|
||||
;LARGE_OBJECT_THRESHOLD = 1048576
|
||||
;; Set to true to forcibly set core.protectNTFS=false
|
||||
;DISABLE_CORE_PROTECT_NTFS=false
|
||||
|
||||
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
|
||||
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
|
||||
@@ -1386,6 +1388,13 @@ PATH =
|
||||
;; Deliver timeout in seconds
|
||||
;DELIVER_TIMEOUT = 5
|
||||
;;
|
||||
;; Webhook can only call allowed hosts for security reasons. Comma separated list, eg: external, 192.168.1.0/24, *.mydomain.com
|
||||
;; Built-in: loopback (for localhost), private (for LAN/intranet), external (for public hosts on internet), * (for all hosts)
|
||||
;; CIDR list: 1.2.3.0/8, 2001:db8::/32
|
||||
;; Wildcard hosts: *.mydomain.com, 192.168.100.*
|
||||
;; Default to * for 1.15.x, external for 1.16 and later
|
||||
;ALLOWED_HOST_LIST = *
|
||||
;;
|
||||
;; Allow insecure certification
|
||||
;SKIP_TLS_VERIFY = false
|
||||
;;
|
||||
|
||||
@@ -545,6 +545,14 @@ Define allowed algorithms and their minimum key length (use -1 to disable a type
|
||||
|
||||
- `QUEUE_LENGTH`: **1000**: Hook task queue length. Use caution when editing this value.
|
||||
- `DELIVER_TIMEOUT`: **5**: Delivery timeout (sec) for shooting webhooks.
|
||||
- `ALLOWED_HOST_LIST`: `*`: Default to `*` for 1.15.x, `external` for 1.16 and later. Webhook can only call allowed hosts for security reasons. Comma separated list.
|
||||
- Built-in networks:
|
||||
- `loopback`: 127.0.0.0/8 for IPv4 and ::1/128 for IPv6, localhost is included.
|
||||
- `private`: RFC 1918 (10.0.0.0/8, 172.16.0.0/12, 192.168.0.0/16) and RFC 4193 (FC00::/7). Also called LAN/Intranet.
|
||||
- `external`: A valid non-private unicast IP, you can access all hosts on public internet.
|
||||
- `*`: All hosts are allowed.
|
||||
- CIDR list: `1.2.3.0/8` for IPv4 and `2001:db8::/32` for IPv6
|
||||
- Wildcard hosts: `*.mydomain.com`, `192.168.100.*`
|
||||
- `SKIP_TLS_VERIFY`: **false**: Allow insecure certification.
|
||||
- `PAGING_NUM`: **10**: Number of webhook history events that are shown in one page.
|
||||
- `PROXY_URL`: ****: Proxy server URL, support http://, https//, socks://, blank will follow environment http_proxy/https_proxy
|
||||
@@ -839,6 +847,7 @@ NB: You must have `DISABLE_ROUTER_LOG` set to `false` for this option to take ef
|
||||
- `VERBOSE_PUSH`: **true**: Print status information about pushes as they are being processed.
|
||||
- `VERBOSE_PUSH_DELAY`: **5s**: Only print verbose information if push takes longer than this delay.
|
||||
- `LARGE_OBJECT_THRESHOLD`: **1048576**: (Go-Git only), don't cache objects greater than this in memory. (Set to 0 to disable.)
|
||||
- `DISABLE_CORE_PROTECT_NTFS`: **false** Set to true to forcibly set `core.protectNTFS` to false.
|
||||
## Git - Timeout settings (`git.timeout`)
|
||||
- `DEFAUlT`: **360**: Git operations default timeout seconds.
|
||||
- `MIGRATE`: **600**: Migrate external repositories timeout seconds.
|
||||
|
||||
6
go.mod
6
go.mod
@@ -80,7 +80,7 @@ require (
|
||||
github.com/mattn/go-runewidth v0.0.13 // indirect
|
||||
github.com/mattn/go-sqlite3 v1.14.8
|
||||
github.com/mholt/archiver/v3 v3.5.0
|
||||
github.com/microcosm-cc/bluemonday v1.0.15
|
||||
github.com/microcosm-cc/bluemonday v1.0.16
|
||||
github.com/miekg/dns v1.1.43 // indirect
|
||||
github.com/minio/md5-simd v1.1.2 // indirect
|
||||
github.com/minio/minio-go/v7 v7.0.12
|
||||
@@ -125,7 +125,7 @@ require (
|
||||
go.uber.org/multierr v1.7.0 // indirect
|
||||
go.uber.org/zap v1.18.1 // indirect
|
||||
golang.org/x/crypto v0.0.0-20210616213533-5ff15b29337e
|
||||
golang.org/x/net v0.0.0-20210614182718-04defd469f4e
|
||||
golang.org/x/net v0.0.0-20211020060615-d418f374d309
|
||||
golang.org/x/oauth2 v0.0.0-20210628180205-a41e5a781914
|
||||
golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c
|
||||
golang.org/x/text v0.3.6
|
||||
@@ -139,7 +139,7 @@ require (
|
||||
mvdan.cc/xurls/v2 v2.2.0
|
||||
strk.kbt.io/projects/go/libravatar v0.0.0-20191008002943-06d1c002b251
|
||||
xorm.io/builder v0.3.9
|
||||
xorm.io/xorm v1.2.2
|
||||
xorm.io/xorm v1.2.5
|
||||
)
|
||||
|
||||
replace github.com/hashicorp/go-version => github.com/6543/go-version v1.3.1
|
||||
|
||||
11
go.sum
11
go.sum
@@ -868,8 +868,8 @@ github.com/mholt/acmez v0.1.3 h1:J7MmNIk4Qf9b8mAGqAh4XkNeowv3f1zW816yf4zt7Qk=
|
||||
github.com/mholt/acmez v0.1.3/go.mod h1:8qnn8QA/Ewx8E3ZSsmscqsIjhhpxuy9vqdgbX2ceceM=
|
||||
github.com/mholt/archiver/v3 v3.5.0 h1:nE8gZIrw66cu4osS/U7UW7YDuGMHssxKutU8IfWxwWE=
|
||||
github.com/mholt/archiver/v3 v3.5.0/go.mod h1:qqTTPUK/HZPFgFQ/TJ3BzvTpF/dPtFVJXdQbCmeMxwc=
|
||||
github.com/microcosm-cc/bluemonday v1.0.15 h1:J4uN+qPng9rvkBZBoBb8YGR+ijuklIMpSOZZLjYpbeY=
|
||||
github.com/microcosm-cc/bluemonday v1.0.15/go.mod h1:ZLvAzeakRwrGnzQEvstVzVt3ZpqOF2+sdFr0Om+ce30=
|
||||
github.com/microcosm-cc/bluemonday v1.0.16 h1:kHmAq2t7WPWLjiGvzKa5o3HzSfahUKiOq7fAPUiMNIc=
|
||||
github.com/microcosm-cc/bluemonday v1.0.16/go.mod h1:Z0r70sCuXHig8YpBzCc5eGHAap2K7e/u082ZUpDRRqM=
|
||||
github.com/miekg/dns v1.0.14/go.mod h1:W1PPwlIAgtquWBMBEV9nkV9Cazfe8ScdGz/Lj7v3Nrg=
|
||||
github.com/miekg/dns v1.1.42/go.mod h1:+evo5L0630/F6ca/Z9+GAqzhjGyn8/c+TBaOyfEl0V4=
|
||||
github.com/miekg/dns v1.1.43 h1:JKfpVSCB84vrAmHzyrsxB5NAr5kLoMXZArPSw7Qlgyg=
|
||||
@@ -1364,8 +1364,9 @@ golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v
|
||||
golang.org/x/net v0.0.0-20210326060303-6b1517762897/go.mod h1:uSPa2vr4CLtc/ILN5odXGNXS6mhrKVzTaCXzk9m6W3k=
|
||||
golang.org/x/net v0.0.0-20210331060903-cb1fcc7394e5/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM=
|
||||
golang.org/x/net v0.0.0-20210525063256-abc453219eb5/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
|
||||
golang.org/x/net v0.0.0-20210614182718-04defd469f4e h1:XpT3nA5TvE525Ne3hInMh6+GETgn27Zfm9dxsThnX2Q=
|
||||
golang.org/x/net v0.0.0-20210614182718-04defd469f4e/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
|
||||
golang.org/x/net v0.0.0-20211020060615-d418f374d309 h1:A0lJIi+hcTR6aajJH4YqKWwohY4aW9RO7oRMcdv+HKI=
|
||||
golang.org/x/net v0.0.0-20211020060615-d418f374d309/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
|
||||
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
|
||||
golang.org/x/oauth2 v0.0.0-20181106182150-f42d05182288/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
|
||||
golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
|
||||
@@ -1765,5 +1766,5 @@ xorm.io/builder v0.3.7/go.mod h1:aUW0S9eb9VCaPohFCH3j7czOx1PMW3i1HrSzbLYGBSE=
|
||||
xorm.io/builder v0.3.9 h1:Sd65/LdWyO7LR8+Cbd+e7mm3sK/7U9k0jS3999IDHMc=
|
||||
xorm.io/builder v0.3.9/go.mod h1:aUW0S9eb9VCaPohFCH3j7czOx1PMW3i1HrSzbLYGBSE=
|
||||
xorm.io/xorm v1.0.6/go.mod h1:uF9EtbhODq5kNWxMbnBEj8hRRZnlcNSz2t2N7HW/+A4=
|
||||
xorm.io/xorm v1.2.2 h1:FFBOEvJ++8fYBA9cywf2sxDVmFktl1SpJzTAG1ab06Y=
|
||||
xorm.io/xorm v1.2.2/go.mod h1:fTG8tSjk6O1BYxwuohZUK+S1glnRycsCF05L1qQyEU0=
|
||||
xorm.io/xorm v1.2.5 h1:tqN7OhN8P9xi52qBb76I8m5maAJMz/SSbgK2RGPCPbo=
|
||||
xorm.io/xorm v1.2.5/go.mod h1:fTG8tSjk6O1BYxwuohZUK+S1glnRycsCF05L1qQyEU0=
|
||||
|
||||
154
integrations/api_private_serv_test.go
Normal file
154
integrations/api_private_serv_test.go
Normal file
@@ -0,0 +1,154 @@
|
||||
// Copyright 2021 The Gitea Authors. All rights reserved.
|
||||
// Use of this source code is governed by a MIT-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package integrations
|
||||
|
||||
import (
|
||||
"context"
|
||||
"net/url"
|
||||
"testing"
|
||||
|
||||
"code.gitea.io/gitea/models"
|
||||
"code.gitea.io/gitea/modules/private"
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
func TestAPIPrivateNoServ(t *testing.T) {
|
||||
onGiteaRun(t, func(*testing.T, *url.URL) {
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
key, user, err := private.ServNoCommand(ctx, 1)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, int64(2), user.ID)
|
||||
assert.Equal(t, "user2", user.Name)
|
||||
assert.Equal(t, int64(1), key.ID)
|
||||
assert.Equal(t, "user2@localhost", key.Name)
|
||||
|
||||
deployKey, err := models.AddDeployKey(1, "test-deploy", "sk-ecdsa-sha2-nistp256@openssh.com AAAAInNrLWVjZHNhLXNoYTItbmlzdHAyNTZAb3BlbnNzaC5jb20AAAAIbmlzdHAyNTYAAABBBGXEEzWmm1dxb+57RoK5KVCL0w2eNv9cqJX2AGGVlkFsVDhOXHzsadS3LTK4VlEbbrDMJdoti9yM8vclA8IeRacAAAAEc3NoOg== nocomment", false)
|
||||
assert.NoError(t, err)
|
||||
|
||||
key, user, err = private.ServNoCommand(ctx, deployKey.KeyID)
|
||||
assert.NoError(t, err)
|
||||
assert.Empty(t, user)
|
||||
assert.Equal(t, deployKey.KeyID, key.ID)
|
||||
assert.Equal(t, "test-deploy", key.Name)
|
||||
})
|
||||
}
|
||||
|
||||
func TestAPIPrivateServ(t *testing.T) {
|
||||
onGiteaRun(t, func(*testing.T, *url.URL) {
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
|
||||
// Can push to a repo we own
|
||||
results, err := private.ServCommand(ctx, 1, "user2", "repo1", models.AccessModeWrite, "git-upload-pack", "")
|
||||
assert.NoError(t, err)
|
||||
assert.False(t, results.IsWiki)
|
||||
assert.False(t, results.IsDeployKey)
|
||||
assert.Equal(t, int64(1), results.KeyID)
|
||||
assert.Equal(t, "user2@localhost", results.KeyName)
|
||||
assert.Equal(t, "user2", results.UserName)
|
||||
assert.Equal(t, int64(2), results.UserID)
|
||||
assert.Equal(t, "user2", results.OwnerName)
|
||||
assert.Equal(t, "repo1", results.RepoName)
|
||||
assert.Equal(t, int64(1), results.RepoID)
|
||||
|
||||
// Cannot push to a private repo we're not associated with
|
||||
results, err = private.ServCommand(ctx, 1, "user15", "big_test_private_1", models.AccessModeWrite, "git-upload-pack", "")
|
||||
assert.Error(t, err)
|
||||
assert.Empty(t, results)
|
||||
|
||||
// Cannot pull from a private repo we're not associated with
|
||||
results, err = private.ServCommand(ctx, 1, "user15", "big_test_private_1", models.AccessModeRead, "git-upload-pack", "")
|
||||
assert.Error(t, err)
|
||||
assert.Empty(t, results)
|
||||
|
||||
// Can pull from a public repo we're not associated with
|
||||
results, err = private.ServCommand(ctx, 1, "user15", "big_test_public_1", models.AccessModeRead, "git-upload-pack", "")
|
||||
assert.NoError(t, err)
|
||||
assert.False(t, results.IsWiki)
|
||||
assert.False(t, results.IsDeployKey)
|
||||
assert.Equal(t, int64(1), results.KeyID)
|
||||
assert.Equal(t, "user2@localhost", results.KeyName)
|
||||
assert.Equal(t, "user2", results.UserName)
|
||||
assert.Equal(t, int64(2), results.UserID)
|
||||
assert.Equal(t, "user15", results.OwnerName)
|
||||
assert.Equal(t, "big_test_public_1", results.RepoName)
|
||||
assert.Equal(t, int64(17), results.RepoID)
|
||||
|
||||
// Cannot push to a public repo we're not associated with
|
||||
results, err = private.ServCommand(ctx, 1, "user15", "big_test_public_1", models.AccessModeWrite, "git-upload-pack", "")
|
||||
assert.Error(t, err)
|
||||
assert.Empty(t, results)
|
||||
|
||||
// Add reading deploy key
|
||||
deployKey, err := models.AddDeployKey(19, "test-deploy", "sk-ecdsa-sha2-nistp256@openssh.com AAAAInNrLWVjZHNhLXNoYTItbmlzdHAyNTZAb3BlbnNzaC5jb20AAAAIbmlzdHAyNTYAAABBBGXEEzWmm1dxb+57RoK5KVCL0w2eNv9cqJX2AGGVlkFsVDhOXHzsadS3LTK4VlEbbrDMJdoti9yM8vclA8IeRacAAAAEc3NoOg== nocomment", true)
|
||||
assert.NoError(t, err)
|
||||
|
||||
// Can pull from repo we're a deploy key for
|
||||
results, err = private.ServCommand(ctx, deployKey.KeyID, "user15", "big_test_private_1", models.AccessModeRead, "git-upload-pack", "")
|
||||
assert.NoError(t, err)
|
||||
assert.False(t, results.IsWiki)
|
||||
assert.True(t, results.IsDeployKey)
|
||||
assert.Equal(t, deployKey.KeyID, results.KeyID)
|
||||
assert.Equal(t, "test-deploy", results.KeyName)
|
||||
assert.Equal(t, "user15", results.UserName)
|
||||
assert.Equal(t, int64(15), results.UserID)
|
||||
assert.Equal(t, "user15", results.OwnerName)
|
||||
assert.Equal(t, "big_test_private_1", results.RepoName)
|
||||
assert.Equal(t, int64(19), results.RepoID)
|
||||
|
||||
// Cannot push to a private repo with reading key
|
||||
results, err = private.ServCommand(ctx, deployKey.KeyID, "user15", "big_test_private_1", models.AccessModeWrite, "git-upload-pack", "")
|
||||
assert.Error(t, err)
|
||||
assert.Empty(t, results)
|
||||
|
||||
// Cannot pull from a private repo we're not associated with
|
||||
results, err = private.ServCommand(ctx, deployKey.ID, "user15", "big_test_private_2", models.AccessModeRead, "git-upload-pack", "")
|
||||
assert.Error(t, err)
|
||||
assert.Empty(t, results)
|
||||
|
||||
// Cannot pull from a public repo we're not associated with
|
||||
results, err = private.ServCommand(ctx, deployKey.ID, "user15", "big_test_public_1", models.AccessModeRead, "git-upload-pack", "")
|
||||
assert.Error(t, err)
|
||||
assert.Empty(t, results)
|
||||
|
||||
// Add writing deploy key
|
||||
deployKey, err = models.AddDeployKey(20, "test-deploy", "sk-ecdsa-sha2-nistp256@openssh.com AAAAInNrLWVjZHNhLXNoYTItbmlzdHAyNTZAb3BlbnNzaC5jb20AAAAIbmlzdHAyNTYAAABBBGXEEzWmm1dxb+57RoK5KVCL0w2eNv9cqJX2AGGVlkFsVDhOXHzsadS3LTK4VlEbbrDMJdoti9yM8vclA8IeRacAAAAEc3NoOg== nocomment", false)
|
||||
assert.NoError(t, err)
|
||||
|
||||
// Cannot push to a private repo with reading key
|
||||
results, err = private.ServCommand(ctx, deployKey.KeyID, "user15", "big_test_private_1", models.AccessModeWrite, "git-upload-pack", "")
|
||||
assert.Error(t, err)
|
||||
assert.Empty(t, results)
|
||||
|
||||
// Can pull from repo we're a writing deploy key for
|
||||
results, err = private.ServCommand(ctx, deployKey.KeyID, "user15", "big_test_private_2", models.AccessModeRead, "git-upload-pack", "")
|
||||
assert.NoError(t, err)
|
||||
assert.False(t, results.IsWiki)
|
||||
assert.True(t, results.IsDeployKey)
|
||||
assert.Equal(t, deployKey.KeyID, results.KeyID)
|
||||
assert.Equal(t, "test-deploy", results.KeyName)
|
||||
assert.Equal(t, "user15", results.UserName)
|
||||
assert.Equal(t, int64(15), results.UserID)
|
||||
assert.Equal(t, "user15", results.OwnerName)
|
||||
assert.Equal(t, "big_test_private_2", results.RepoName)
|
||||
assert.Equal(t, int64(20), results.RepoID)
|
||||
|
||||
// Can push to repo we're a writing deploy key for
|
||||
results, err = private.ServCommand(ctx, deployKey.KeyID, "user15", "big_test_private_2", models.AccessModeWrite, "git-upload-pack", "")
|
||||
assert.NoError(t, err)
|
||||
assert.False(t, results.IsWiki)
|
||||
assert.True(t, results.IsDeployKey)
|
||||
assert.Equal(t, deployKey.KeyID, results.KeyID)
|
||||
assert.Equal(t, "test-deploy", results.KeyName)
|
||||
assert.Equal(t, "user15", results.UserName)
|
||||
assert.Equal(t, int64(15), results.UserID)
|
||||
assert.Equal(t, "user15", results.OwnerName)
|
||||
assert.Equal(t, "big_test_private_2", results.RepoName)
|
||||
assert.Equal(t, int64(20), results.RepoID)
|
||||
|
||||
})
|
||||
|
||||
}
|
||||
44
integrations/api_repo_archive_test.go
Normal file
44
integrations/api_repo_archive_test.go
Normal file
@@ -0,0 +1,44 @@
|
||||
// Copyright 2021 The Gitea Authors. All rights reserved.
|
||||
// Use of this source code is governed by a MIT-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package integrations
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"testing"
|
||||
|
||||
"code.gitea.io/gitea/models"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
func TestAPIDownloadArchive(t *testing.T) {
|
||||
defer prepareTestEnv(t)()
|
||||
|
||||
repo := models.AssertExistsAndLoadBean(t, &models.Repository{ID: 1}).(*models.Repository)
|
||||
user2 := models.AssertExistsAndLoadBean(t, &models.User{ID: 2}).(*models.User)
|
||||
session := loginUser(t, user2.LowerName)
|
||||
token := getTokenForLoggedInUser(t, session)
|
||||
|
||||
link, _ := url.Parse(fmt.Sprintf("/api/v1/repos/%s/%s/archive/master.zip", user2.Name, repo.Name))
|
||||
link.RawQuery = url.Values{"token": {token}}.Encode()
|
||||
resp := MakeRequest(t, NewRequest(t, "GET", link.String()), http.StatusOK)
|
||||
bs, err := io.ReadAll(resp.Body)
|
||||
assert.NoError(t, err)
|
||||
assert.EqualValues(t, 320, len(bs))
|
||||
|
||||
link, _ = url.Parse(fmt.Sprintf("/api/v1/repos/%s/%s/archive/master.tar.gz", user2.Name, repo.Name))
|
||||
link.RawQuery = url.Values{"token": {token}}.Encode()
|
||||
resp = MakeRequest(t, NewRequest(t, "GET", link.String()), http.StatusOK)
|
||||
bs, err = io.ReadAll(resp.Body)
|
||||
assert.NoError(t, err)
|
||||
assert.EqualValues(t, 266, len(bs))
|
||||
|
||||
link, _ = url.Parse(fmt.Sprintf("/api/v1/repos/%s/%s/archive/master", user2.Name, repo.Name))
|
||||
link.RawQuery = url.Values{"token": {token}}.Encode()
|
||||
MakeRequest(t, NewRequest(t, "GET", link.String()), http.StatusBadRequest)
|
||||
}
|
||||
@@ -8,8 +8,10 @@ import (
|
||||
"fmt"
|
||||
"net/http"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"code.gitea.io/gitea/models"
|
||||
"code.gitea.io/gitea/modules/timeutil"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
@@ -20,6 +22,10 @@ func TestUserHeatmap(t *testing.T) {
|
||||
normalUsername := "user2"
|
||||
session := loginUser(t, adminUsername)
|
||||
|
||||
var fakeNow = time.Date(2011, 10, 20, 0, 0, 0, 0, time.Local)
|
||||
timeutil.Set(fakeNow)
|
||||
defer timeutil.Unset()
|
||||
|
||||
urlStr := fmt.Sprintf("/api/v1/users/%s/heatmap", normalUsername)
|
||||
req := NewRequest(t, "GET", urlStr)
|
||||
resp := session.MakeRequest(t, req, http.StatusOK)
|
||||
|
||||
@@ -5,10 +5,12 @@
|
||||
package integrations
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"net/http"
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
api "code.gitea.io/gitea/modules/structs"
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
@@ -110,3 +112,64 @@ func TestPrivateOrg(t *testing.T) {
|
||||
req = NewRequest(t, "GET", "/privated_org/private_repo_on_private_org")
|
||||
session.MakeRequest(t, req, http.StatusOK)
|
||||
}
|
||||
|
||||
func TestOrgRestrictedUser(t *testing.T) {
|
||||
defer prepareTestEnv(t)()
|
||||
|
||||
// privated_org is a private org who has id 23
|
||||
orgName := "privated_org"
|
||||
|
||||
// public_repo_on_private_org is a public repo on privated_org
|
||||
repoName := "public_repo_on_private_org"
|
||||
|
||||
// user29 is a restricted user who is not a member of the organization
|
||||
restrictedUser := "user29"
|
||||
|
||||
// #17003 reports a bug whereby adding a restricted user to a read-only team doesn't work
|
||||
|
||||
// assert restrictedUser cannot see the org or the public repo
|
||||
restrictedSession := loginUser(t, restrictedUser)
|
||||
req := NewRequest(t, "GET", fmt.Sprintf("/%s", orgName))
|
||||
restrictedSession.MakeRequest(t, req, http.StatusNotFound)
|
||||
|
||||
req = NewRequest(t, "GET", fmt.Sprintf("/%s/%s", orgName, repoName))
|
||||
restrictedSession.MakeRequest(t, req, http.StatusNotFound)
|
||||
|
||||
// Therefore create a read-only team
|
||||
adminSession := loginUser(t, "user1")
|
||||
token := getTokenForLoggedInUser(t, adminSession)
|
||||
|
||||
teamToCreate := &api.CreateTeamOption{
|
||||
Name: "codereader",
|
||||
Description: "Code Reader",
|
||||
IncludesAllRepositories: true,
|
||||
Permission: "read",
|
||||
Units: []string{"repo.code"},
|
||||
}
|
||||
|
||||
req = NewRequestWithJSON(t, "POST",
|
||||
fmt.Sprintf("/api/v1/orgs/%s/teams?token=%s", orgName, token), teamToCreate)
|
||||
|
||||
var apiTeam api.Team
|
||||
|
||||
resp := adminSession.MakeRequest(t, req, http.StatusCreated)
|
||||
DecodeJSON(t, resp, &apiTeam)
|
||||
checkTeamResponse(t, &apiTeam, teamToCreate.Name, teamToCreate.Description, teamToCreate.IncludesAllRepositories,
|
||||
teamToCreate.Permission, teamToCreate.Units)
|
||||
checkTeamBean(t, apiTeam.ID, teamToCreate.Name, teamToCreate.Description, teamToCreate.IncludesAllRepositories,
|
||||
teamToCreate.Permission, teamToCreate.Units)
|
||||
//teamID := apiTeam.ID
|
||||
|
||||
// Now we need to add the restricted user to the team
|
||||
req = NewRequest(t, "PUT",
|
||||
fmt.Sprintf("/api/v1/teams/%d/members/%s?token=%s", apiTeam.ID, restrictedUser, token))
|
||||
_ = adminSession.MakeRequest(t, req, http.StatusNoContent)
|
||||
|
||||
// Now we need to check if the restrictedUser can access the repo
|
||||
req = NewRequest(t, "GET", fmt.Sprintf("/%s", orgName))
|
||||
restrictedSession.MakeRequest(t, req, http.StatusOK)
|
||||
|
||||
req = NewRequest(t, "GET", fmt.Sprintf("/%s/%s", orgName, repoName))
|
||||
restrictedSession.MakeRequest(t, req, http.StatusOK)
|
||||
|
||||
}
|
||||
|
||||
@@ -225,6 +225,9 @@ func (repo *Repository) refreshCollaboratorAccesses(e Engine, accessMap map[int6
|
||||
return fmt.Errorf("getCollaborations: %v", err)
|
||||
}
|
||||
for _, c := range collaborators {
|
||||
if c.User.IsGhost() {
|
||||
continue
|
||||
}
|
||||
updateUserAccess(accessMap, c.User, c.Collaboration.Mode)
|
||||
}
|
||||
return nil
|
||||
|
||||
@@ -144,6 +144,11 @@ func GetAttachmentByUUID(uuid string) (*Attachment, error) {
|
||||
return getAttachmentByUUID(x, uuid)
|
||||
}
|
||||
|
||||
// ExistAttachmentsByUUID returns true if attachment is exist by given UUID
|
||||
func ExistAttachmentsByUUID(uuid string) (bool, error) {
|
||||
return x.Where("`uuid`=?", uuid).Exist(new(Attachment))
|
||||
}
|
||||
|
||||
// GetAttachmentByReleaseIDFileName returns attachment by given releaseId and fileName.
|
||||
func GetAttachmentByReleaseIDFileName(releaseID int64, fileName string) (*Attachment, error) {
|
||||
return getAttachmentByReleaseIDFileName(x, releaseID, fileName)
|
||||
|
||||
@@ -302,7 +302,7 @@ func DeleteOrphanedIssues() error {
|
||||
// CountOrphanedObjects count subjects with have no existing refobject anymore
|
||||
func CountOrphanedObjects(subject, refobject, joinCond string) (int64, error) {
|
||||
return x.Table("`"+subject+"`").
|
||||
Join("LEFT", refobject, joinCond).
|
||||
Join("LEFT", "`"+refobject+"`", joinCond).
|
||||
Where(builder.IsNull{"`" + refobject + "`.id"}).
|
||||
Count("id")
|
||||
}
|
||||
|
||||
@@ -45,19 +45,16 @@ func WithContext(f func(ctx DBContext) error) error {
|
||||
// WithTx represents executing database operations on a transaction
|
||||
func WithTx(f func(ctx DBContext) error) error {
|
||||
sess := x.NewSession()
|
||||
defer sess.Close()
|
||||
if err := sess.Begin(); err != nil {
|
||||
sess.Close()
|
||||
return err
|
||||
}
|
||||
|
||||
if err := f(DBContext{sess}); err != nil {
|
||||
sess.Close()
|
||||
return err
|
||||
}
|
||||
|
||||
err := sess.Commit()
|
||||
sess.Close()
|
||||
return err
|
||||
return sess.Commit()
|
||||
}
|
||||
|
||||
// Iterate iterates the databases and doing something
|
||||
|
||||
@@ -568,7 +568,7 @@
|
||||
-
|
||||
id: 40
|
||||
owner_id: 23
|
||||
owner_name: limited_org
|
||||
owner_name: privated_org
|
||||
lower_name: public_repo_on_private_org
|
||||
name: public_repo_on_private_org
|
||||
is_private: false
|
||||
@@ -581,7 +581,7 @@
|
||||
-
|
||||
id: 41
|
||||
owner_id: 23
|
||||
owner_name: limited_org
|
||||
owner_name: privated_org
|
||||
lower_name: private_repo_on_private_org
|
||||
name: private_repo_on_private_org
|
||||
is_private: true
|
||||
|
||||
@@ -99,6 +99,46 @@ func AddGPGKey(ownerID int64, content, token, signature string) ([]*GPGKey, erro
|
||||
verified = true
|
||||
}
|
||||
|
||||
if len(ekeys) > 1 {
|
||||
id2key := map[string]*openpgp.Entity{}
|
||||
newEKeys := make([]*openpgp.Entity, 0, len(ekeys))
|
||||
for _, ekey := range ekeys {
|
||||
id := ekey.PrimaryKey.KeyIdString()
|
||||
if original, has := id2key[id]; has {
|
||||
// Coalesce this with the other one
|
||||
for _, subkey := range ekey.Subkeys {
|
||||
if subkey.PublicKey == nil {
|
||||
continue
|
||||
}
|
||||
found := false
|
||||
|
||||
for _, originalSubkey := range original.Subkeys {
|
||||
if originalSubkey.PublicKey == nil {
|
||||
continue
|
||||
}
|
||||
if originalSubkey.PublicKey.KeyId == subkey.PublicKey.KeyId {
|
||||
found = true
|
||||
break
|
||||
}
|
||||
}
|
||||
if !found {
|
||||
original.Subkeys = append(original.Subkeys, subkey)
|
||||
}
|
||||
}
|
||||
for name, identity := range ekey.Identities {
|
||||
if _, has := original.Identities[name]; has {
|
||||
continue
|
||||
}
|
||||
original.Identities[name] = identity
|
||||
}
|
||||
continue
|
||||
}
|
||||
id2key[id] = ekey
|
||||
newEKeys = append(newEKeys, ekey)
|
||||
}
|
||||
ekeys = newEKeys
|
||||
}
|
||||
|
||||
for _, ekey := range ekeys {
|
||||
// Key ID cannot be duplicated.
|
||||
has, err := sess.Where("key_id=?", ekey.PrimaryKey.KeyIdString()).
|
||||
|
||||
@@ -1517,12 +1517,12 @@ func GetIssueStats(opts *IssueStatsOptions) (*IssueStats, error) {
|
||||
func getIssueStatsChunk(opts *IssueStatsOptions, issueIDs []int64) (*IssueStats, error) {
|
||||
stats := &IssueStats{}
|
||||
|
||||
countSession := func(opts *IssueStatsOptions) *xorm.Session {
|
||||
countSession := func(opts *IssueStatsOptions, issueIDs []int64) *xorm.Session {
|
||||
sess := x.
|
||||
Where("issue.repo_id = ?", opts.RepoID)
|
||||
|
||||
if len(opts.IssueIDs) > 0 {
|
||||
sess.In("issue.id", opts.IssueIDs)
|
||||
if len(issueIDs) > 0 {
|
||||
sess.In("issue.id", issueIDs)
|
||||
}
|
||||
|
||||
if len(opts.Labels) > 0 && opts.Labels != "0" {
|
||||
@@ -1572,13 +1572,13 @@ func getIssueStatsChunk(opts *IssueStatsOptions, issueIDs []int64) (*IssueStats,
|
||||
}
|
||||
|
||||
var err error
|
||||
stats.OpenCount, err = countSession(opts).
|
||||
stats.OpenCount, err = countSession(opts, issueIDs).
|
||||
And("issue.is_closed = ?", false).
|
||||
Count(new(Issue))
|
||||
if err != nil {
|
||||
return stats, err
|
||||
}
|
||||
stats.ClosedCount, err = countSession(opts).
|
||||
stats.ClosedCount, err = countSession(opts, issueIDs).
|
||||
And("issue.is_closed = ?", true).
|
||||
Count(new(Issue))
|
||||
return stats, err
|
||||
|
||||
@@ -13,6 +13,26 @@ import (
|
||||
"xorm.io/xorm"
|
||||
)
|
||||
|
||||
// ErrIssueStopwatchNotExist represents an error that stopwatch is not exist
|
||||
type ErrIssueStopwatchNotExist struct {
|
||||
UserID int64
|
||||
IssueID int64
|
||||
}
|
||||
|
||||
func (err ErrIssueStopwatchNotExist) Error() string {
|
||||
return fmt.Sprintf("issue stopwatch doesn't exist[uid: %d, issue_id: %d", err.UserID, err.IssueID)
|
||||
}
|
||||
|
||||
// ErrIssueStopwatchAlreadyExist represents an error that stopwatch is already exist
|
||||
type ErrIssueStopwatchAlreadyExist struct {
|
||||
UserID int64
|
||||
IssueID int64
|
||||
}
|
||||
|
||||
func (err ErrIssueStopwatchAlreadyExist) Error() string {
|
||||
return fmt.Sprintf("issue stopwatch already exists[uid: %d, issue_id: %d", err.UserID, err.IssueID)
|
||||
}
|
||||
|
||||
// Stopwatch represents a stopwatch for time tracking.
|
||||
type Stopwatch struct {
|
||||
ID int64 `xorm:"pk autoincr"`
|
||||
@@ -74,91 +94,141 @@ func hasUserStopwatch(e Engine, userID int64) (exists bool, sw *Stopwatch, err e
|
||||
return
|
||||
}
|
||||
|
||||
// FinishIssueStopwatchIfPossible if stopwatch exist then finish it otherwise ignore
|
||||
func FinishIssueStopwatchIfPossible(user *User, issue *Issue) error {
|
||||
_, exists, err := getStopwatch(x, user.ID, issue.ID)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if !exists {
|
||||
return nil
|
||||
}
|
||||
return FinishIssueStopwatch(user, issue)
|
||||
}
|
||||
|
||||
// CreateOrStopIssueStopwatch will create or remove a stopwatch and will log it into issue's timeline.
|
||||
func CreateOrStopIssueStopwatch(user *User, issue *Issue) error {
|
||||
_, exists, err := getStopwatch(x, user.ID, issue.ID)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if exists {
|
||||
return FinishIssueStopwatch(user, issue)
|
||||
}
|
||||
return CreateIssueStopwatch(user, issue)
|
||||
}
|
||||
|
||||
// FinishIssueStopwatch if stopwatch exist then finish it otherwise return an error
|
||||
func FinishIssueStopwatch(user *User, issue *Issue) error {
|
||||
sess := x.NewSession()
|
||||
defer sess.Close()
|
||||
if err := sess.Begin(); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := createOrStopIssueStopwatch(sess, user, issue); err != nil {
|
||||
if err := finishIssueStopwatch(sess, user, issue); err != nil {
|
||||
return err
|
||||
}
|
||||
return sess.Commit()
|
||||
}
|
||||
|
||||
func createOrStopIssueStopwatch(e *xorm.Session, user *User, issue *Issue) error {
|
||||
func finishIssueStopwatch(e *xorm.Session, user *User, issue *Issue) error {
|
||||
sw, exists, err := getStopwatch(e, user.ID, issue.ID)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if !exists {
|
||||
return ErrIssueStopwatchNotExist{
|
||||
UserID: user.ID,
|
||||
IssueID: issue.ID,
|
||||
}
|
||||
}
|
||||
|
||||
// Create tracked time out of the time difference between start date and actual date
|
||||
timediff := time.Now().Unix() - int64(sw.CreatedUnix)
|
||||
|
||||
// Create TrackedTime
|
||||
tt := &TrackedTime{
|
||||
Created: time.Now(),
|
||||
IssueID: issue.ID,
|
||||
UserID: user.ID,
|
||||
Time: timediff,
|
||||
}
|
||||
|
||||
if _, err := e.Insert(tt); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if err := issue.loadRepo(e); err != nil {
|
||||
return err
|
||||
}
|
||||
if _, err := createComment(e, &CreateCommentOptions{
|
||||
Doer: user,
|
||||
Issue: issue,
|
||||
Repo: issue.Repo,
|
||||
Content: SecToTime(timediff),
|
||||
Type: CommentTypeStopTracking,
|
||||
TimeID: tt.ID,
|
||||
}); err != nil {
|
||||
return err
|
||||
}
|
||||
_, err = e.Delete(sw)
|
||||
return err
|
||||
}
|
||||
|
||||
// CreateIssueStopwatch creates a stopwatch if not exist, otherwise return an error
|
||||
func CreateIssueStopwatch(user *User, issue *Issue) error {
|
||||
sess := x.NewSession()
|
||||
defer sess.Close()
|
||||
if err := sess.Begin(); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := createIssueStopwatch(sess, user, issue); err != nil {
|
||||
return err
|
||||
}
|
||||
return sess.Commit()
|
||||
}
|
||||
|
||||
func createIssueStopwatch(e *xorm.Session, user *User, issue *Issue) error {
|
||||
if err := issue.loadRepo(e); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// if another stopwatch is running: stop it
|
||||
exists, sw, err := hasUserStopwatch(e, user.ID)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if exists {
|
||||
// Create tracked time out of the time difference between start date and actual date
|
||||
timediff := time.Now().Unix() - int64(sw.CreatedUnix)
|
||||
|
||||
// Create TrackedTime
|
||||
tt := &TrackedTime{
|
||||
Created: time.Now(),
|
||||
IssueID: issue.ID,
|
||||
UserID: user.ID,
|
||||
Time: timediff,
|
||||
}
|
||||
|
||||
if _, err := e.Insert(tt); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if _, err := createComment(e, &CreateCommentOptions{
|
||||
Doer: user,
|
||||
Issue: issue,
|
||||
Repo: issue.Repo,
|
||||
Content: SecToTime(timediff),
|
||||
Type: CommentTypeStopTracking,
|
||||
TimeID: tt.ID,
|
||||
}); err != nil {
|
||||
return err
|
||||
}
|
||||
if _, err := e.Delete(sw); err != nil {
|
||||
return err
|
||||
}
|
||||
} else {
|
||||
// if another stopwatch is running: stop it
|
||||
exists, sw, err := hasUserStopwatch(e, user.ID)
|
||||
issue, err := getIssueByID(e, sw.IssueID)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if exists {
|
||||
issue, err := getIssueByID(e, sw.IssueID)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if err := createOrStopIssueStopwatch(e, user, issue); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
// Create stopwatch
|
||||
sw = &Stopwatch{
|
||||
UserID: user.ID,
|
||||
IssueID: issue.ID,
|
||||
}
|
||||
|
||||
if _, err := e.Insert(sw); err != nil {
|
||||
if err := finishIssueStopwatch(e, user, issue); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
if _, err := createComment(e, &CreateCommentOptions{
|
||||
Doer: user,
|
||||
Issue: issue,
|
||||
Repo: issue.Repo,
|
||||
Type: CommentTypeStartTracking,
|
||||
}); err != nil {
|
||||
return err
|
||||
}
|
||||
// Create stopwatch
|
||||
sw = &Stopwatch{
|
||||
UserID: user.ID,
|
||||
IssueID: issue.ID,
|
||||
}
|
||||
|
||||
if _, err := e.Insert(sw); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if err := issue.loadRepo(e); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if _, err := createComment(e, &CreateCommentOptions{
|
||||
Doer: user,
|
||||
Issue: issue,
|
||||
Repo: issue.Repo,
|
||||
Type: CommentTypeStartTracking,
|
||||
}); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -5,7 +5,9 @@
|
||||
package models
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"sort"
|
||||
"sync"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
@@ -417,3 +419,43 @@ func TestIssue_ResolveMentions(t *testing.T) {
|
||||
// Private repo, whole team
|
||||
testSuccess("user17", "big_test_private_4", "user15", []string{"user17/owners"}, []int64{18})
|
||||
}
|
||||
|
||||
func TestCorrectIssueStats(t *testing.T) {
|
||||
assert.NoError(t, PrepareTestDatabase())
|
||||
|
||||
// Because the condition is to have chunked database look-ups,
|
||||
// We have to more issues than `maxQueryParameters`, we will insert.
|
||||
// maxQueryParameters + 10 issues into the testDatabase.
|
||||
// Each new issues will have a constant description "Bugs are nasty"
|
||||
// Which will be used later on.
|
||||
|
||||
issueAmount := maxQueryParameters + 10
|
||||
|
||||
var wg sync.WaitGroup
|
||||
for i := 0; i < issueAmount; i++ {
|
||||
wg.Add(1)
|
||||
go func(i int) {
|
||||
testInsertIssue(t, fmt.Sprintf("Issue %d", i+1), "Bugs are nasty", 0)
|
||||
wg.Done()
|
||||
}(i)
|
||||
}
|
||||
wg.Wait()
|
||||
|
||||
// Now we will get all issueID's that match the "Bugs are nasty" query.
|
||||
total, ids, err := SearchIssueIDsByKeyword("Bugs are nasty", []int64{1}, issueAmount, 0)
|
||||
|
||||
// Just to be sure.
|
||||
assert.NoError(t, err)
|
||||
assert.EqualValues(t, issueAmount, total)
|
||||
|
||||
// Now we will call the GetIssueStats with these IDs and if working,
|
||||
// get the correct stats back.
|
||||
issueStats, err := GetIssueStats(&IssueStatsOptions{
|
||||
RepoID: 1,
|
||||
IssueIDs: ids,
|
||||
})
|
||||
|
||||
// Now check the values.
|
||||
assert.NoError(t, err)
|
||||
assert.EqualValues(t, issueStats.OpenCount, issueAmount)
|
||||
}
|
||||
|
||||
@@ -71,9 +71,9 @@ var (
|
||||
_ convert.Conversion = &SSPIConfig{}
|
||||
)
|
||||
|
||||
// jsonUnmarshalHandleDoubleEncode - due to a bug in xorm (see https://gitea.com/xorm/xorm/pulls/1957) - it's
|
||||
// JSONUnmarshalHandleDoubleEncode - due to a bug in xorm (see https://gitea.com/xorm/xorm/pulls/1957) - it's
|
||||
// possible that a Blob may be double encoded or gain an unwanted prefix of 0xff 0xfe.
|
||||
func jsonUnmarshalHandleDoubleEncode(bs []byte, v interface{}) error {
|
||||
func JSONUnmarshalHandleDoubleEncode(bs []byte, v interface{}) error {
|
||||
json := jsoniter.ConfigCompatibleWithStandardLibrary
|
||||
err := json.Unmarshal(bs, v)
|
||||
if err != nil {
|
||||
@@ -89,7 +89,7 @@ func jsonUnmarshalHandleDoubleEncode(bs []byte, v interface{}) error {
|
||||
rs = append(rs, temp...)
|
||||
}
|
||||
if ok {
|
||||
if rs[0] == 0xff && rs[1] == 0xfe {
|
||||
if len(rs) > 1 && rs[0] == 0xff && rs[1] == 0xfe {
|
||||
rs = rs[2:]
|
||||
}
|
||||
err = json.Unmarshal(rs, v)
|
||||
@@ -108,7 +108,7 @@ type LDAPConfig struct {
|
||||
|
||||
// FromDB fills up a LDAPConfig from serialized format.
|
||||
func (cfg *LDAPConfig) FromDB(bs []byte) error {
|
||||
err := jsonUnmarshalHandleDoubleEncode(bs, &cfg)
|
||||
err := JSONUnmarshalHandleDoubleEncode(bs, &cfg)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -149,7 +149,7 @@ type SMTPConfig struct {
|
||||
|
||||
// FromDB fills up an SMTPConfig from serialized format.
|
||||
func (cfg *SMTPConfig) FromDB(bs []byte) error {
|
||||
return jsonUnmarshalHandleDoubleEncode(bs, cfg)
|
||||
return JSONUnmarshalHandleDoubleEncode(bs, cfg)
|
||||
}
|
||||
|
||||
// ToDB exports an SMTPConfig to a serialized format.
|
||||
@@ -166,7 +166,7 @@ type PAMConfig struct {
|
||||
|
||||
// FromDB fills up a PAMConfig from serialized format.
|
||||
func (cfg *PAMConfig) FromDB(bs []byte) error {
|
||||
return jsonUnmarshalHandleDoubleEncode(bs, cfg)
|
||||
return JSONUnmarshalHandleDoubleEncode(bs, cfg)
|
||||
}
|
||||
|
||||
// ToDB exports a PAMConfig to a serialized format.
|
||||
@@ -187,7 +187,7 @@ type OAuth2Config struct {
|
||||
|
||||
// FromDB fills up an OAuth2Config from serialized format.
|
||||
func (cfg *OAuth2Config) FromDB(bs []byte) error {
|
||||
return jsonUnmarshalHandleDoubleEncode(bs, cfg)
|
||||
return JSONUnmarshalHandleDoubleEncode(bs, cfg)
|
||||
}
|
||||
|
||||
// ToDB exports an SMTPConfig to a serialized format.
|
||||
@@ -207,7 +207,7 @@ type SSPIConfig struct {
|
||||
|
||||
// FromDB fills up an SSPIConfig from serialized format.
|
||||
func (cfg *SSPIConfig) FromDB(bs []byte) error {
|
||||
return jsonUnmarshalHandleDoubleEncode(bs, cfg)
|
||||
return JSONUnmarshalHandleDoubleEncode(bs, cfg)
|
||||
}
|
||||
|
||||
// ToDB exports an SSPIConfig to a serialized format.
|
||||
|
||||
@@ -7,6 +7,7 @@ package migrations
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"os"
|
||||
"reflect"
|
||||
@@ -762,8 +763,14 @@ func dropTableColumns(sess *xorm.Session, tableName string, columnNames ...strin
|
||||
}
|
||||
tableSQL := string(res[0]["sql"])
|
||||
|
||||
// Get the string offset for column definitions: `CREATE TABLE ( column-definitions... )`
|
||||
columnDefinitionsIndex := strings.Index(tableSQL, "(")
|
||||
if columnDefinitionsIndex < 0 {
|
||||
return errors.New("couldn't find column definitions")
|
||||
}
|
||||
|
||||
// Separate out the column definitions
|
||||
tableSQL = tableSQL[strings.Index(tableSQL, "("):]
|
||||
tableSQL = tableSQL[columnDefinitionsIndex:]
|
||||
|
||||
// Remove the required columnNames
|
||||
for _, name := range columnNames {
|
||||
|
||||
@@ -5,6 +5,8 @@
|
||||
package migrations
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"xorm.io/xorm"
|
||||
)
|
||||
|
||||
@@ -40,8 +42,17 @@ func convertTaskTypeToString(x *xorm.Engine) error {
|
||||
return err
|
||||
}
|
||||
|
||||
// to keep the migration could be rerun
|
||||
exist, err := x.Dialect().IsColumnExist(x.DB(), context.Background(), "hook_task", "type")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if !exist {
|
||||
return nil
|
||||
}
|
||||
|
||||
for i, s := range hookTaskTypes {
|
||||
if _, err := x.Exec("UPDATE hook_task set typ = ? where type=?", s, i); err != nil {
|
||||
if _, err := x.Exec("UPDATE hook_task set typ = ? where `type`=?", s, i); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
@@ -5,6 +5,7 @@
|
||||
package migrations
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
|
||||
"code.gitea.io/gitea/modules/setting"
|
||||
@@ -19,6 +20,22 @@ func renameTaskErrorsToMessage(x *xorm.Engine) error {
|
||||
Status int `xorm:"index"`
|
||||
}
|
||||
|
||||
// This migration maybe rerun so that we should check if it has been run
|
||||
messageExist, err := x.Dialect().IsColumnExist(x.DB(), context.Background(), "task", "message")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if messageExist {
|
||||
errorsExist, err := x.Dialect().IsColumnExist(x.DB(), context.Background(), "task", "errors")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if !errorsExist {
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
sess := x.NewSession()
|
||||
defer sess.Close()
|
||||
if err := sess.Begin(); err != nil {
|
||||
@@ -29,6 +46,13 @@ func renameTaskErrorsToMessage(x *xorm.Engine) error {
|
||||
return fmt.Errorf("error on Sync2: %v", err)
|
||||
}
|
||||
|
||||
if messageExist {
|
||||
// if both errors and message exist, drop message at first
|
||||
if err := dropTableColumns(sess, "task", "message"); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
switch {
|
||||
case setting.Database.UseMySQL:
|
||||
if _, err := sess.Exec("ALTER TABLE `task` CHANGE errors message text"); err != nil {
|
||||
|
||||
@@ -179,16 +179,35 @@ func syncTables() error {
|
||||
return x.StoreEngine("InnoDB").Sync2(tables...)
|
||||
}
|
||||
|
||||
// NewTestEngine sets a new test xorm.Engine
|
||||
func NewTestEngine() (err error) {
|
||||
// NewInstallTestEngine creates a new xorm.Engine for testing during install
|
||||
//
|
||||
// This function will cause the basic database schema to be created
|
||||
func NewInstallTestEngine(ctx context.Context, migrateFunc func(*xorm.Engine) error) (err error) {
|
||||
x, err = GetNewEngine()
|
||||
if err != nil {
|
||||
return fmt.Errorf("Connect to database: %v", err)
|
||||
return fmt.Errorf("failed to connect to database: %w", err)
|
||||
}
|
||||
|
||||
x.SetMapper(names.GonicMapper{})
|
||||
x.SetLogger(NewXORMLogger(!setting.IsProd()))
|
||||
x.ShowSQL(!setting.IsProd())
|
||||
|
||||
x.SetDefaultContext(ctx)
|
||||
|
||||
if err = x.Ping(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// We have to run migrateFunc here in case the user is re-running installation on a previously created DB.
|
||||
// If we do not then table schemas will be changed and there will be conflicts when the migrations run properly.
|
||||
//
|
||||
// Installation should only be being re-run if users want to recover an old database.
|
||||
// However, we should think carefully about should we support re-install on an installed instance,
|
||||
// as there may be other problems due to secret reinitialization.
|
||||
if err = migrateFunc(x); err != nil {
|
||||
return fmt.Errorf("migrate: %v", err)
|
||||
}
|
||||
|
||||
return syncTables()
|
||||
}
|
||||
|
||||
|
||||
@@ -455,7 +455,7 @@ func GetUserOrgsList(user *User) ([]*MinimalOrg, error) {
|
||||
groupByStr := groupByCols.String()
|
||||
groupByStr = groupByStr[0 : len(groupByStr)-1]
|
||||
|
||||
sess.Select(groupByStr+", count(repo_id) as org_count").
|
||||
sess.Select(groupByStr+", count(distinct repo_id) as org_count").
|
||||
Table("user").
|
||||
Join("INNER", "team", "`team`.org_id = `user`.id").
|
||||
Join("INNER", "team_user", "`team`.id = `team_user`.team_id").
|
||||
|
||||
@@ -115,7 +115,9 @@ func (p *Project) NumClosedIssues() int {
|
||||
func (p *Project) NumOpenIssues() int {
|
||||
c, err := x.Table("project_issue").
|
||||
Join("INNER", "issue", "project_issue.issue_id=issue.id").
|
||||
Where("project_issue.project_id=? AND issue.is_closed=?", p.ID, false).Count("issue.id")
|
||||
Where("project_issue.project_id=? AND issue.is_closed=?", p.ID, false).
|
||||
Cols("issue_id").
|
||||
Count()
|
||||
if err != nil {
|
||||
return 0
|
||||
}
|
||||
|
||||
@@ -502,6 +502,9 @@ func GetLatestPullRequestByHeadInfo(repoID int64, branch string) (*PullRequest,
|
||||
|
||||
// GetPullRequestByIndex returns a pull request by the given index
|
||||
func GetPullRequestByIndex(repoID, index int64) (*PullRequest, error) {
|
||||
if index < 1 {
|
||||
return nil, ErrPullRequestNotExist{}
|
||||
}
|
||||
pr := &PullRequest{
|
||||
BaseRepoID: repoID,
|
||||
Index: index,
|
||||
|
||||
@@ -133,6 +133,10 @@ func TestGetPullRequestByIndex(t *testing.T) {
|
||||
_, err = GetPullRequestByIndex(9223372036854775807, 9223372036854775807)
|
||||
assert.Error(t, err)
|
||||
assert.True(t, IsErrPullRequestNotExist(err))
|
||||
|
||||
_, err = GetPullRequestByIndex(1, 0)
|
||||
assert.Error(t, err)
|
||||
assert.True(t, IsErrPullRequestNotExist(err))
|
||||
}
|
||||
|
||||
func TestGetPullRequestByID(t *testing.T) {
|
||||
|
||||
@@ -1152,16 +1152,6 @@ func CreateRepository(ctx DBContext, doer, u *User, repo *Repository, overwriteO
|
||||
return fmt.Errorf("recalculateAccesses: %v", err)
|
||||
}
|
||||
|
||||
if u.Visibility == api.VisibleTypePublic && !repo.IsPrivate {
|
||||
// Create/Remove git-daemon-export-ok for git-daemon...
|
||||
daemonExportFile := path.Join(repo.RepoPath(), `git-daemon-export-ok`)
|
||||
if f, err := os.Create(daemonExportFile); err != nil {
|
||||
log.Error("Failed to create %s: %v", daemonExportFile, err)
|
||||
} else {
|
||||
f.Close()
|
||||
}
|
||||
}
|
||||
|
||||
if setting.Service.AutoWatchNewRepos {
|
||||
if err = watchRepo(ctx.e, doer.ID, repo.ID, true); err != nil {
|
||||
return fmt.Errorf("watchRepo: %v", err)
|
||||
@@ -1175,6 +1165,46 @@ func CreateRepository(ctx DBContext, doer, u *User, repo *Repository, overwriteO
|
||||
return nil
|
||||
}
|
||||
|
||||
// CheckDaemonExportOK creates/removes git-daemon-export-ok for git-daemon...
|
||||
func (repo *Repository) CheckDaemonExportOK() error {
|
||||
return repo.checkDaemonExportOK(x)
|
||||
}
|
||||
|
||||
// CheckDaemonExportOKCtx creates/removes git-daemon-export-ok for git-daemon...
|
||||
func (repo *Repository) CheckDaemonExportOKCtx(ctx DBContext) error {
|
||||
return repo.checkDaemonExportOK(ctx.e)
|
||||
}
|
||||
|
||||
func (repo *Repository) checkDaemonExportOK(e Engine) error {
|
||||
if err := repo.getOwner(e); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Create/Remove git-daemon-export-ok for git-daemon...
|
||||
daemonExportFile := path.Join(repo.RepoPath(), `git-daemon-export-ok`)
|
||||
|
||||
isExist, err := util.IsExist(daemonExportFile)
|
||||
if err != nil {
|
||||
log.Error("Unable to check if %s exists. Error: %v", daemonExportFile, err)
|
||||
return err
|
||||
}
|
||||
|
||||
isPublic := !repo.IsPrivate && repo.Owner.Visibility == api.VisibleTypePublic
|
||||
if !isPublic && isExist {
|
||||
if err = util.Remove(daemonExportFile); err != nil {
|
||||
log.Error("Failed to remove %s: %v", daemonExportFile, err)
|
||||
}
|
||||
} else if isPublic && !isExist {
|
||||
if f, err := os.Create(daemonExportFile); err != nil {
|
||||
log.Error("Failed to create %s: %v", daemonExportFile, err)
|
||||
} else {
|
||||
f.Close()
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func countRepositories(userID int64, private bool) int64 {
|
||||
sess := x.Where("id > 0")
|
||||
|
||||
@@ -1217,6 +1247,12 @@ func IncrementRepoForkNum(ctx DBContext, repoID int64) error {
|
||||
return err
|
||||
}
|
||||
|
||||
// DecrementRepoForkNum decrement repository fork number
|
||||
func DecrementRepoForkNum(ctx DBContext, repoID int64) error {
|
||||
_, err := ctx.e.Exec("UPDATE `repository` SET num_forks=num_forks-1 WHERE id=?", repoID)
|
||||
return err
|
||||
}
|
||||
|
||||
// ChangeRepositoryName changes all corresponding setting from old repository name to new one.
|
||||
func ChangeRepositoryName(doer *User, repo *Repository, newRepoName string) (err error) {
|
||||
oldRepoName := repo.Name
|
||||
@@ -1318,24 +1354,9 @@ func updateRepository(e Engine, repo *Repository, visibilityChanged bool) (err e
|
||||
}
|
||||
|
||||
// Create/Remove git-daemon-export-ok for git-daemon...
|
||||
daemonExportFile := path.Join(repo.RepoPath(), `git-daemon-export-ok`)
|
||||
isExist, err := util.IsExist(daemonExportFile)
|
||||
isPublic := !repo.IsPrivate && repo.Owner.Visibility == api.VisibleTypePublic
|
||||
if err != nil {
|
||||
log.Error("Unable to check if %s exists. Error: %v", daemonExportFile, err)
|
||||
if err := repo.checkDaemonExportOK(e); err != nil {
|
||||
return err
|
||||
}
|
||||
if !isPublic && isExist {
|
||||
if err = util.Remove(daemonExportFile); err != nil {
|
||||
log.Error("Failed to remove %s: %v", daemonExportFile, err)
|
||||
}
|
||||
} else if isPublic && !isExist {
|
||||
if f, err := os.Create(daemonExportFile); err != nil {
|
||||
log.Error("Failed to create %s: %v", daemonExportFile, err)
|
||||
} else {
|
||||
f.Close()
|
||||
}
|
||||
}
|
||||
|
||||
forkRepos, err := getRepositoriesByForkID(e, repo.ID)
|
||||
if err != nil {
|
||||
|
||||
@@ -8,6 +8,7 @@ package models
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"code.gitea.io/gitea/modules/log"
|
||||
"code.gitea.io/gitea/modules/timeutil"
|
||||
|
||||
"xorm.io/builder"
|
||||
@@ -83,16 +84,21 @@ func (repo *Repository) getCollaborators(e Engine, listOptions ListOptions) ([]*
|
||||
return nil, fmt.Errorf("getCollaborations: %v", err)
|
||||
}
|
||||
|
||||
collaborators := make([]*Collaborator, len(collaborations))
|
||||
for i, c := range collaborations {
|
||||
collaborators := make([]*Collaborator, 0, len(collaborations))
|
||||
for _, c := range collaborations {
|
||||
user, err := getUserByID(e, c.UserID)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
if IsErrUserNotExist(err) {
|
||||
log.Warn("Inconsistent DB: User: %d is listed as collaborator of %-v but does not exist", c.UserID, repo)
|
||||
user = NewGhostUser()
|
||||
} else {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
collaborators[i] = &Collaborator{
|
||||
collaborators = append(collaborators, &Collaborator{
|
||||
User: user,
|
||||
Collaboration: c,
|
||||
}
|
||||
})
|
||||
}
|
||||
return collaborators, nil
|
||||
}
|
||||
|
||||
@@ -269,6 +269,14 @@ func TransferOwnership(doer *User, newOwnerName string, repo *Repository) (err e
|
||||
// Dummy object.
|
||||
collaboration := &Collaboration{RepoID: repo.ID}
|
||||
for _, c := range collaborators {
|
||||
if c.IsGhost() {
|
||||
collaboration.ID = c.Collaboration.ID
|
||||
if _, err := sess.Delete(collaboration); err != nil {
|
||||
return fmt.Errorf("remove collaborator '%d': %v", c.ID, err)
|
||||
}
|
||||
collaboration.ID = 0
|
||||
}
|
||||
|
||||
if c.ID != newOwner.ID {
|
||||
isMember, err := isOrganizationMember(sess, newOwner.ID, c.ID)
|
||||
if err != nil {
|
||||
@@ -281,6 +289,7 @@ func TransferOwnership(doer *User, newOwnerName string, repo *Repository) (err e
|
||||
if _, err := sess.Delete(collaboration); err != nil {
|
||||
return fmt.Errorf("remove collaborator '%d': %v", c.ID, err)
|
||||
}
|
||||
collaboration.UserID = 0
|
||||
}
|
||||
|
||||
// Remove old team-repository relations.
|
||||
|
||||
@@ -28,7 +28,7 @@ type UnitConfig struct{}
|
||||
|
||||
// FromDB fills up a UnitConfig from serialized format.
|
||||
func (cfg *UnitConfig) FromDB(bs []byte) error {
|
||||
return jsonUnmarshalHandleDoubleEncode(bs, &cfg)
|
||||
return JSONUnmarshalHandleDoubleEncode(bs, &cfg)
|
||||
}
|
||||
|
||||
// ToDB exports a UnitConfig to a serialized format.
|
||||
@@ -44,7 +44,7 @@ type ExternalWikiConfig struct {
|
||||
|
||||
// FromDB fills up a ExternalWikiConfig from serialized format.
|
||||
func (cfg *ExternalWikiConfig) FromDB(bs []byte) error {
|
||||
return jsonUnmarshalHandleDoubleEncode(bs, &cfg)
|
||||
return JSONUnmarshalHandleDoubleEncode(bs, &cfg)
|
||||
}
|
||||
|
||||
// ToDB exports a ExternalWikiConfig to a serialized format.
|
||||
@@ -62,7 +62,7 @@ type ExternalTrackerConfig struct {
|
||||
|
||||
// FromDB fills up a ExternalTrackerConfig from serialized format.
|
||||
func (cfg *ExternalTrackerConfig) FromDB(bs []byte) error {
|
||||
return jsonUnmarshalHandleDoubleEncode(bs, &cfg)
|
||||
return JSONUnmarshalHandleDoubleEncode(bs, &cfg)
|
||||
}
|
||||
|
||||
// ToDB exports a ExternalTrackerConfig to a serialized format.
|
||||
@@ -80,7 +80,7 @@ type IssuesConfig struct {
|
||||
|
||||
// FromDB fills up a IssuesConfig from serialized format.
|
||||
func (cfg *IssuesConfig) FromDB(bs []byte) error {
|
||||
return jsonUnmarshalHandleDoubleEncode(bs, &cfg)
|
||||
return JSONUnmarshalHandleDoubleEncode(bs, &cfg)
|
||||
}
|
||||
|
||||
// ToDB exports a IssuesConfig to a serialized format.
|
||||
@@ -104,7 +104,7 @@ type PullRequestsConfig struct {
|
||||
|
||||
// FromDB fills up a PullRequestsConfig from serialized format.
|
||||
func (cfg *PullRequestsConfig) FromDB(bs []byte) error {
|
||||
return jsonUnmarshalHandleDoubleEncode(bs, &cfg)
|
||||
return JSONUnmarshalHandleDoubleEncode(bs, &cfg)
|
||||
}
|
||||
|
||||
// ToDB exports a PullRequestsConfig to a serialized format.
|
||||
@@ -219,3 +219,9 @@ func getUnitsByRepoID(e Engine, repoID int64) (units []*RepoUnit, err error) {
|
||||
|
||||
return units, nil
|
||||
}
|
||||
|
||||
// UpdateRepoUnit updates the provided repo unit
|
||||
func UpdateRepoUnit(unit *RepoUnit) error {
|
||||
_, err := x.ID(unit.ID).Update(unit)
|
||||
return err
|
||||
}
|
||||
|
||||
@@ -434,7 +434,7 @@ func SubmitReview(doer *User, issue *Issue, reviewType ReviewType, content, comm
|
||||
// try to remove team review request if need
|
||||
if issue.Repo.Owner.IsOrganization() && (reviewType == ReviewTypeApprove || reviewType == ReviewTypeReject) {
|
||||
teamReviewRequests := make([]*Review, 0, 10)
|
||||
if err := sess.SQL("SELECT * FROM review WHERE reviewer_team_id > 0 AND type = ?", ReviewTypeRequest).Find(&teamReviewRequests); err != nil {
|
||||
if err := sess.SQL("SELECT * FROM review WHERE issue_id = ? AND reviewer_team_id > 0 AND type = ?", issue.ID, ReviewTypeRequest).Find(&teamReviewRequests); err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
|
||||
@@ -52,7 +52,7 @@ func (list U2FRegistrationList) ToRegistrations() []u2f.Registration {
|
||||
for _, reg := range list {
|
||||
r, err := reg.Parse()
|
||||
if err != nil {
|
||||
log.Fatal("parsing u2f registration: %v", err)
|
||||
log.Error("parsing u2f registration: %v", err)
|
||||
continue
|
||||
}
|
||||
regs = append(regs, *r)
|
||||
|
||||
@@ -5,6 +5,7 @@
|
||||
package models
|
||||
|
||||
import (
|
||||
"encoding/hex"
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
@@ -27,6 +28,7 @@ func TestGetU2FRegistrationsByUID(t *testing.T) {
|
||||
assert.NoError(t, PrepareTestDatabase())
|
||||
|
||||
res, err := GetU2FRegistrationsByUID(1)
|
||||
|
||||
assert.NoError(t, err)
|
||||
assert.Len(t, res, 1)
|
||||
assert.Equal(t, "U2F Key", res[0].Name)
|
||||
@@ -71,3 +73,27 @@ func TestDeleteRegistration(t *testing.T) {
|
||||
assert.NoError(t, DeleteRegistration(reg))
|
||||
AssertNotExistsBean(t, &U2FRegistration{ID: 1})
|
||||
}
|
||||
|
||||
const validU2FRegistrationResponseHex = "0504b174bc49c7ca254b70d2e5c207cee9cf174820ebd77ea3c65508c26da51b657c1cc6b952f8621697936482da0a6d3d3826a59095daf6cd7c03e2e60385d2f6d9402a552dfdb7477ed65fd84133f86196010b2215b57da75d315b7b9e8fe2e3925a6019551bab61d16591659cbaf00b4950f7abfe6660e2e006f76868b772d70c253082013c3081e4a003020102020a47901280001155957352300a06082a8648ce3d0403023017311530130603550403130c476e756262792050696c6f74301e170d3132303831343138323933325a170d3133303831343138323933325a3031312f302d0603550403132650696c6f74476e756262792d302e342e312d34373930313238303030313135353935373335323059301306072a8648ce3d020106082a8648ce3d030107034200048d617e65c9508e64bcc5673ac82a6799da3c1446682c258c463fffdf58dfd2fa3e6c378b53d795c4a4dffb4199edd7862f23abaf0203b4b8911ba0569994e101300a06082a8648ce3d0403020347003044022060cdb6061e9c22262d1aac1d96d8c70829b2366531dda268832cb836bcd30dfa0220631b1459f09e6330055722c8d89b7f48883b9089b88d60d1d9795902b30410df304502201471899bcc3987e62e8202c9b39c33c19033f7340352dba80fcab017db9230e402210082677d673d891933ade6f617e5dbde2e247e70423fd5ad7804a6d3d3961ef871"
|
||||
|
||||
func TestToRegistrations_SkipInvalidItemsWithoutCrashing(t *testing.T) {
|
||||
regKeyRaw, _ := hex.DecodeString(validU2FRegistrationResponseHex)
|
||||
regs := U2FRegistrationList{
|
||||
&U2FRegistration{ID: 1},
|
||||
&U2FRegistration{ID: 2, Name: "U2F Key", UserID: 2, Counter: 0, Raw: regKeyRaw, CreatedUnix: 946684800, UpdatedUnix: 946684800},
|
||||
}
|
||||
|
||||
actual := regs.ToRegistrations()
|
||||
assert.Len(t, actual, 1)
|
||||
}
|
||||
|
||||
func TestToRegistrations(t *testing.T) {
|
||||
regKeyRaw, _ := hex.DecodeString(validU2FRegistrationResponseHex)
|
||||
regs := U2FRegistrationList{
|
||||
&U2FRegistration{ID: 1, Name: "U2F Key", UserID: 1, Counter: 0, Raw: regKeyRaw, CreatedUnix: 946684800, UpdatedUnix: 946684800},
|
||||
&U2FRegistration{ID: 2, Name: "U2F Key", UserID: 2, Counter: 0, Raw: regKeyRaw, CreatedUnix: 946684800, UpdatedUnix: 946684800},
|
||||
}
|
||||
|
||||
actual := regs.ToRegistrations()
|
||||
assert.Len(t, actual, 2)
|
||||
}
|
||||
|
||||
@@ -77,9 +77,6 @@ var (
|
||||
// ErrEmailNotActivated e-mail address has not been activated error
|
||||
ErrEmailNotActivated = errors.New("E-mail address has not been activated")
|
||||
|
||||
// ErrUserNameIllegal user name contains illegal characters error
|
||||
ErrUserNameIllegal = errors.New("User name contains illegal characters")
|
||||
|
||||
// ErrLoginSourceNotActived login source is not actived error
|
||||
ErrLoginSourceNotActived = errors.New("Login source is not actived")
|
||||
|
||||
@@ -296,7 +293,7 @@ func (u *User) CanImportLocal() bool {
|
||||
// DashboardLink returns the user dashboard page link.
|
||||
func (u *User) DashboardLink() string {
|
||||
if u.IsOrganization() {
|
||||
return u.OrganisationLink() + "/dashboard/"
|
||||
return u.OrganisationLink() + "/dashboard"
|
||||
}
|
||||
return setting.AppSubURL + "/"
|
||||
}
|
||||
@@ -1062,9 +1059,9 @@ func checkDupEmail(e Engine, u *User) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// validateUser check if user is valide to insert / update into database
|
||||
// validateUser check if user is valid to insert / update into database
|
||||
func validateUser(u *User) error {
|
||||
if !setting.Service.AllowedUserVisibilityModesSlice.IsAllowedVisibility(u.Visibility) {
|
||||
if !setting.Service.AllowedUserVisibilityModesSlice.IsAllowedVisibility(u.Visibility) && !u.IsOrganization() {
|
||||
return fmt.Errorf("visibility Mode not allowed: %s", u.Visibility.String())
|
||||
}
|
||||
|
||||
@@ -1072,18 +1069,46 @@ func validateUser(u *User) error {
|
||||
return ValidateEmail(u.Email)
|
||||
}
|
||||
|
||||
func updateUser(e Engine, u *User) error {
|
||||
func updateUser(e Engine, u *User, changePrimaryEmail bool) error {
|
||||
if err := validateUser(u); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if changePrimaryEmail {
|
||||
var emailAddress EmailAddress
|
||||
has, err := e.Where("lower_email=?", strings.ToLower(u.Email)).Get(&emailAddress)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if !has {
|
||||
// 1. Update old primary email
|
||||
if _, err = e.Where("uid=? AND is_primary=?", u.ID, true).Cols("is_primary").Update(&EmailAddress{
|
||||
IsPrimary: false,
|
||||
}); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
emailAddress.Email = u.Email
|
||||
emailAddress.UID = u.ID
|
||||
emailAddress.IsActivated = true
|
||||
emailAddress.IsPrimary = true
|
||||
if _, err := e.Insert(&emailAddress); err != nil {
|
||||
return err
|
||||
}
|
||||
} else if _, err := e.ID(emailAddress).Cols("is_primary").Update(&EmailAddress{
|
||||
IsPrimary: true,
|
||||
}); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
_, err := e.ID(u.ID).AllCols().Update(u)
|
||||
return err
|
||||
}
|
||||
|
||||
// UpdateUser updates user's information.
|
||||
func UpdateUser(u *User) error {
|
||||
return updateUser(x, u)
|
||||
func UpdateUser(u *User, changePrimaryEmail bool) error {
|
||||
return updateUser(x, u, changePrimaryEmail)
|
||||
}
|
||||
|
||||
// UpdateUserCols update user according special columns
|
||||
@@ -1112,7 +1137,7 @@ func UpdateUserSetting(u *User) (err error) {
|
||||
return err
|
||||
}
|
||||
}
|
||||
if err = updateUser(sess, u); err != nil {
|
||||
if err = updateUser(sess, u, false); err != nil {
|
||||
return err
|
||||
}
|
||||
return sess.Commit()
|
||||
|
||||
@@ -7,6 +7,9 @@ package models
|
||||
import (
|
||||
"fmt"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"code.gitea.io/gitea/modules/timeutil"
|
||||
|
||||
jsoniter "github.com/json-iterator/go"
|
||||
"github.com/stretchr/testify/assert"
|
||||
@@ -37,6 +40,10 @@ func TestGetUserHeatmapDataByUser(t *testing.T) {
|
||||
// Prepare
|
||||
assert.NoError(t, PrepareTestDatabase())
|
||||
|
||||
// Mock time
|
||||
timeutil.Set(time.Date(2021, 1, 1, 0, 0, 0, 0, time.UTC))
|
||||
defer timeutil.Unset()
|
||||
|
||||
for i, tc := range testCases {
|
||||
user := AssertExistsAndLoadBean(t, &User{ID: tc.userID}).(*User)
|
||||
|
||||
|
||||
@@ -475,17 +475,17 @@ func TestUpdateUser(t *testing.T) {
|
||||
user := AssertExistsAndLoadBean(t, &User{ID: 2}).(*User)
|
||||
|
||||
user.KeepActivityPrivate = true
|
||||
assert.NoError(t, UpdateUser(user))
|
||||
assert.NoError(t, UpdateUser(user, false))
|
||||
user = AssertExistsAndLoadBean(t, &User{ID: 2}).(*User)
|
||||
assert.True(t, user.KeepActivityPrivate)
|
||||
|
||||
setting.Service.AllowedUserVisibilityModesSlice = []bool{true, false, false}
|
||||
user.KeepActivityPrivate = false
|
||||
user.Visibility = structs.VisibleTypePrivate
|
||||
assert.Error(t, UpdateUser(user))
|
||||
assert.Error(t, UpdateUser(user, false))
|
||||
user = AssertExistsAndLoadBean(t, &User{ID: 2}).(*User)
|
||||
assert.True(t, user.KeepActivityPrivate)
|
||||
|
||||
user.Email = "no mail@mail.org"
|
||||
assert.Error(t, UpdateUser(user))
|
||||
assert.Error(t, UpdateUser(user, true))
|
||||
}
|
||||
|
||||
@@ -1,3 +1,4 @@
|
||||
//go:build pam
|
||||
// +build pam
|
||||
|
||||
// Copyright 2014 The Gogs Authors. All rights reserved.
|
||||
|
||||
@@ -1,9 +1,10 @@
|
||||
// +build !pam
|
||||
|
||||
// Copyright 2014 The Gogs Authors. All rights reserved.
|
||||
// Use of this source code is governed by a MIT-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
//go:build !pam
|
||||
// +build !pam
|
||||
|
||||
package pam
|
||||
|
||||
import (
|
||||
|
||||
@@ -1,3 +1,4 @@
|
||||
//go:build pam
|
||||
// +build pam
|
||||
|
||||
// Copyright 2021 The Gitea Authors. All rights reserved.
|
||||
|
||||
@@ -14,6 +14,7 @@ import (
|
||||
|
||||
"code.gitea.io/gitea/modules/log"
|
||||
"code.gitea.io/gitea/modules/setting"
|
||||
"code.gitea.io/gitea/modules/util"
|
||||
|
||||
"github.com/gogs/chardet"
|
||||
"golang.org/x/net/html/charset"
|
||||
@@ -26,9 +27,9 @@ var UTF8BOM = []byte{'\xef', '\xbb', '\xbf'}
|
||||
// ToUTF8WithFallbackReader detects the encoding of content and coverts to UTF-8 reader if possible
|
||||
func ToUTF8WithFallbackReader(rd io.Reader) io.Reader {
|
||||
var buf = make([]byte, 2048)
|
||||
n, err := rd.Read(buf)
|
||||
n, err := util.ReadAtMost(rd, buf)
|
||||
if err != nil {
|
||||
return rd
|
||||
return io.MultiReader(bytes.NewReader(RemoveBOMIfPresent(buf[:n])), rd)
|
||||
}
|
||||
|
||||
charsetLabel, err := DetectEncoding(buf[:n])
|
||||
|
||||
@@ -587,6 +587,17 @@ func GetContext(req *http.Request) *Context {
|
||||
return req.Context().Value(contextKey).(*Context)
|
||||
}
|
||||
|
||||
// GetContextUser returns context user
|
||||
func GetContextUser(req *http.Request) *models.User {
|
||||
if apiContext, ok := req.Context().Value(apiContextKey).(*APIContext); ok {
|
||||
return apiContext.User
|
||||
}
|
||||
if ctx, ok := req.Context().Value(contextKey).(*Context); ok {
|
||||
return ctx.User
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// SignedUserName returns signed user's name via context
|
||||
func SignedUserName(req *http.Request) string {
|
||||
if middleware.IsInternalPath(req) {
|
||||
|
||||
@@ -58,6 +58,7 @@ type Repository struct {
|
||||
Commit *git.Commit
|
||||
Tag *git.Tag
|
||||
GitRepo *git.Repository
|
||||
RefName string
|
||||
BranchName string
|
||||
TagName string
|
||||
TreePath string
|
||||
@@ -190,9 +191,9 @@ func (r *Repository) BranchNameSubURL() string {
|
||||
case r.IsViewBranch:
|
||||
return "branch/" + r.BranchName
|
||||
case r.IsViewTag:
|
||||
return "tag/" + r.BranchName
|
||||
return "tag/" + r.TagName
|
||||
case r.IsViewCommit:
|
||||
return "commit/" + r.BranchName
|
||||
return "commit/" + r.CommitID
|
||||
}
|
||||
log.Error("Unknown view type for repo: %v", r)
|
||||
return ""
|
||||
@@ -345,7 +346,7 @@ func repoAssignment(ctx *Context, repo *models.Repository) {
|
||||
}
|
||||
|
||||
// Check access.
|
||||
if ctx.Repo.Permission.AccessMode == models.AccessModeNone {
|
||||
if !ctx.Repo.Permission.HasAccess() {
|
||||
if ctx.Query("go-get") == "1" {
|
||||
EarlyResponseForGoGetMeta(ctx)
|
||||
return
|
||||
@@ -562,8 +563,6 @@ func RepoAssignment(ctx *Context) (cancel context.CancelFunc) {
|
||||
ctx.Data["Branches"] = brs
|
||||
ctx.Data["BranchesCount"] = len(brs)
|
||||
|
||||
ctx.Data["TagName"] = ctx.Repo.TagName
|
||||
|
||||
// If not branch selected, try default one.
|
||||
// If default branch doesn't exists, fall back to some other branch.
|
||||
if len(ctx.Repo.BranchName) == 0 {
|
||||
@@ -572,9 +571,9 @@ func RepoAssignment(ctx *Context) (cancel context.CancelFunc) {
|
||||
} else if len(brs) > 0 {
|
||||
ctx.Repo.BranchName = brs[0]
|
||||
}
|
||||
ctx.Repo.RefName = ctx.Repo.BranchName
|
||||
}
|
||||
ctx.Data["BranchName"] = ctx.Repo.BranchName
|
||||
ctx.Data["CommitID"] = ctx.Repo.CommitID
|
||||
|
||||
// People who have push access or have forked repository can propose a new pull request.
|
||||
canPush := ctx.Repo.CanWrite(models.UnitTypeCode) || (ctx.IsSigned && ctx.User.HasForkedRepo(ctx.Repo.Repository.ID))
|
||||
@@ -695,7 +694,7 @@ func getRefName(ctx *Context, pathType RepoRefType) string {
|
||||
}
|
||||
// For legacy and API support only full commit sha
|
||||
parts := strings.Split(path, "/")
|
||||
if len(parts) > 0 && len(parts[0]) == 40 {
|
||||
if len(parts) > 1 && len(parts[0]) == 40 {
|
||||
ctx.Repo.TreePath = strings.Join(parts[1:], "/")
|
||||
return parts[0]
|
||||
}
|
||||
@@ -759,7 +758,6 @@ func RepoRefByType(refType RepoRefType, ignoreNotExistErr ...bool) func(*Context
|
||||
// Get default branch.
|
||||
if len(ctx.Params("*")) == 0 {
|
||||
refName = ctx.Repo.Repository.DefaultBranch
|
||||
ctx.Repo.BranchName = refName
|
||||
if !ctx.Repo.GitRepo.IsBranchExist(refName) {
|
||||
brs, _, err := ctx.Repo.GitRepo.GetBranches(0, 0)
|
||||
if err != nil {
|
||||
@@ -773,6 +771,8 @@ func RepoRefByType(refType RepoRefType, ignoreNotExistErr ...bool) func(*Context
|
||||
}
|
||||
refName = brs[0]
|
||||
}
|
||||
ctx.Repo.RefName = refName
|
||||
ctx.Repo.BranchName = refName
|
||||
ctx.Repo.Commit, err = ctx.Repo.GitRepo.GetBranchCommit(refName)
|
||||
if err != nil {
|
||||
ctx.ServerError("GetBranchCommit", err)
|
||||
@@ -783,9 +783,10 @@ func RepoRefByType(refType RepoRefType, ignoreNotExistErr ...bool) func(*Context
|
||||
|
||||
} else {
|
||||
refName = getRefName(ctx, refType)
|
||||
ctx.Repo.BranchName = refName
|
||||
ctx.Repo.RefName = refName
|
||||
if refType.RefTypeIncludesBranches() && ctx.Repo.GitRepo.IsBranchExist(refName) {
|
||||
ctx.Repo.IsViewBranch = true
|
||||
ctx.Repo.BranchName = refName
|
||||
|
||||
ctx.Repo.Commit, err = ctx.Repo.GitRepo.GetBranchCommit(refName)
|
||||
if err != nil {
|
||||
@@ -796,6 +797,8 @@ func RepoRefByType(refType RepoRefType, ignoreNotExistErr ...bool) func(*Context
|
||||
|
||||
} else if refType.RefTypeIncludesTags() && ctx.Repo.GitRepo.IsTagExist(refName) {
|
||||
ctx.Repo.IsViewTag = true
|
||||
ctx.Repo.TagName = refName
|
||||
|
||||
ctx.Repo.Commit, err = ctx.Repo.GitRepo.GetTagCommit(refName)
|
||||
if err != nil {
|
||||
ctx.ServerError("GetTagCommit", err)
|
||||
@@ -837,6 +840,7 @@ func RepoRefByType(refType RepoRefType, ignoreNotExistErr ...bool) func(*Context
|
||||
|
||||
ctx.Data["BranchName"] = ctx.Repo.BranchName
|
||||
ctx.Data["BranchNameSubURL"] = ctx.Repo.BranchNameSubURL()
|
||||
ctx.Data["TagName"] = ctx.Repo.TagName
|
||||
ctx.Data["CommitID"] = ctx.Repo.CommitID
|
||||
ctx.Data["TreePath"] = ctx.Repo.TreePath
|
||||
ctx.Data["IsViewBranch"] = ctx.Repo.IsViewBranch
|
||||
|
||||
@@ -147,8 +147,9 @@ func ToCommit(repo *models.Repository, commit *git.Commit, userCache map[string]
|
||||
|
||||
return &api.Commit{
|
||||
CommitMeta: &api.CommitMeta{
|
||||
URL: repo.APIURL() + "/git/commits/" + commit.ID.String(),
|
||||
SHA: commit.ID.String(),
|
||||
URL: repo.APIURL() + "/git/commits/" + commit.ID.String(),
|
||||
SHA: commit.ID.String(),
|
||||
Created: commit.Committer.When,
|
||||
},
|
||||
HTMLURL: repo.HTMLURL() + "/commit/" + commit.ID.String(),
|
||||
RepoCommit: &api.RepoCommit{
|
||||
@@ -169,8 +170,9 @@ func ToCommit(repo *models.Repository, commit *git.Commit, userCache map[string]
|
||||
},
|
||||
Message: commit.Message(),
|
||||
Tree: &api.CommitMeta{
|
||||
URL: repo.APIURL() + "/git/trees/" + commit.ID.String(),
|
||||
SHA: commit.ID.String(),
|
||||
URL: repo.APIURL() + "/git/trees/" + commit.ID.String(),
|
||||
SHA: commit.ID.String(),
|
||||
Created: commit.Committer.When,
|
||||
},
|
||||
},
|
||||
Author: apiAuthor,
|
||||
|
||||
@@ -17,7 +17,7 @@ import (
|
||||
// ToAPIPullRequest assumes following fields have been assigned with valid values:
|
||||
// Required - Issue
|
||||
// Optional - Merger
|
||||
func ToAPIPullRequest(pr *models.PullRequest) *api.PullRequest {
|
||||
func ToAPIPullRequest(pr *models.PullRequest, doer *models.User) *api.PullRequest {
|
||||
var (
|
||||
baseBranch *git.Branch
|
||||
headBranch *git.Branch
|
||||
@@ -41,6 +41,12 @@ func ToAPIPullRequest(pr *models.PullRequest) *api.PullRequest {
|
||||
return nil
|
||||
}
|
||||
|
||||
perm, err := models.GetUserRepoPermission(pr.BaseRepo, doer)
|
||||
if err != nil {
|
||||
log.Error("GetUserRepoPermission[%d]: %v", pr.BaseRepoID, err)
|
||||
perm.AccessMode = models.AccessModeNone
|
||||
}
|
||||
|
||||
apiPullRequest := &api.PullRequest{
|
||||
ID: pr.ID,
|
||||
URL: pr.Issue.HTMLURL(),
|
||||
@@ -68,7 +74,7 @@ func ToAPIPullRequest(pr *models.PullRequest) *api.PullRequest {
|
||||
Name: pr.BaseBranch,
|
||||
Ref: pr.BaseBranch,
|
||||
RepoID: pr.BaseRepoID,
|
||||
Repository: ToRepo(pr.BaseRepo, models.AccessModeNone),
|
||||
Repository: ToRepo(pr.BaseRepo, perm.AccessMode),
|
||||
},
|
||||
Head: &api.PRBranchInfo{
|
||||
Name: pr.HeadBranch,
|
||||
@@ -96,8 +102,14 @@ func ToAPIPullRequest(pr *models.PullRequest) *api.PullRequest {
|
||||
}
|
||||
|
||||
if pr.HeadRepo != nil {
|
||||
perm, err := models.GetUserRepoPermission(pr.HeadRepo, doer)
|
||||
if err != nil {
|
||||
log.Error("GetUserRepoPermission[%d]: %v", pr.HeadRepoID, err)
|
||||
perm.AccessMode = models.AccessModeNone
|
||||
}
|
||||
|
||||
apiPullRequest.Head.RepoID = pr.HeadRepo.ID
|
||||
apiPullRequest.Head.Repository = ToRepo(pr.HeadRepo, models.AccessModeNone)
|
||||
apiPullRequest.Head.Repository = ToRepo(pr.HeadRepo, perm.AccessMode)
|
||||
|
||||
headGitRepo, err := git.OpenRepository(pr.HeadRepo.RepoPath())
|
||||
if err != nil {
|
||||
|
||||
@@ -20,14 +20,14 @@ func TestPullRequest_APIFormat(t *testing.T) {
|
||||
pr := models.AssertExistsAndLoadBean(t, &models.PullRequest{ID: 1}).(*models.PullRequest)
|
||||
assert.NoError(t, pr.LoadAttributes())
|
||||
assert.NoError(t, pr.LoadIssue())
|
||||
apiPullRequest := ToAPIPullRequest(pr)
|
||||
apiPullRequest := ToAPIPullRequest(pr, nil)
|
||||
assert.NotNil(t, apiPullRequest)
|
||||
assert.EqualValues(t, &structs.PRBranchInfo{
|
||||
Name: "branch1",
|
||||
Ref: "refs/pull/2/head",
|
||||
Sha: "4a357436d925b5c974181ff12a994538ddc5a269",
|
||||
RepoID: 1,
|
||||
Repository: ToRepo(headRepo, models.AccessModeNone),
|
||||
Repository: ToRepo(headRepo, models.AccessModeRead),
|
||||
}, apiPullRequest.Head)
|
||||
|
||||
//withOut HeadRepo
|
||||
@@ -37,7 +37,7 @@ func TestPullRequest_APIFormat(t *testing.T) {
|
||||
// simulate fork deletion
|
||||
pr.HeadRepo = nil
|
||||
pr.HeadRepoID = 100000
|
||||
apiPullRequest = ToAPIPullRequest(pr)
|
||||
apiPullRequest = ToAPIPullRequest(pr, nil)
|
||||
assert.NotNil(t, apiPullRequest)
|
||||
assert.Nil(t, apiPullRequest.Head.Repository)
|
||||
assert.EqualValues(t, -1, apiPullRequest.Head.RepoID)
|
||||
|
||||
@@ -28,32 +28,24 @@ func CreateReader(input io.Reader, delimiter rune) *stdcsv.Reader {
|
||||
}
|
||||
|
||||
// CreateReaderAndGuessDelimiter tries to guess the field delimiter from the content and creates a csv.Reader.
|
||||
// Reads at most 10k bytes.
|
||||
func CreateReaderAndGuessDelimiter(rd io.Reader) (*stdcsv.Reader, error) {
|
||||
var data = make([]byte, 1e4)
|
||||
size, err := rd.Read(data)
|
||||
size, err := util.ReadAtMost(rd, data)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
delimiter := guessDelimiter(data[:size])
|
||||
|
||||
var newInput io.Reader
|
||||
if size < 1e4 {
|
||||
newInput = bytes.NewReader(data[:size])
|
||||
} else {
|
||||
newInput = io.MultiReader(bytes.NewReader(data), rd)
|
||||
}
|
||||
|
||||
return CreateReader(newInput, delimiter), nil
|
||||
return CreateReader(
|
||||
io.MultiReader(bytes.NewReader(data[:size]), rd),
|
||||
guessDelimiter(data[:size]),
|
||||
), nil
|
||||
}
|
||||
|
||||
// guessDelimiter scores the input CSV data against delimiters, and returns the best match.
|
||||
// Reads at most 10k bytes & 10 lines.
|
||||
func guessDelimiter(data []byte) rune {
|
||||
maxLines := 10
|
||||
maxBytes := util.Min(len(data), 1e4)
|
||||
text := string(data[:maxBytes])
|
||||
text = quoteRegexp.ReplaceAllLiteralString(text, "")
|
||||
text := quoteRegexp.ReplaceAllLiteralString(string(data), "")
|
||||
lines := strings.SplitN(text, "\n", maxLines+1)
|
||||
lines = lines[:util.Min(maxLines, len(lines))]
|
||||
|
||||
|
||||
@@ -13,6 +13,64 @@ import (
|
||||
"code.gitea.io/gitea/modules/setting"
|
||||
)
|
||||
|
||||
type consistencyCheck struct {
|
||||
Name string
|
||||
Counter func() (int64, error)
|
||||
Fixer func() (int64, error)
|
||||
FixedMessage string
|
||||
}
|
||||
|
||||
func (c *consistencyCheck) Run(logger log.Logger, autofix bool) error {
|
||||
count, err := c.Counter()
|
||||
if err != nil {
|
||||
logger.Critical("Error: %v whilst counting %s", err, c.Name)
|
||||
return err
|
||||
}
|
||||
if count > 0 {
|
||||
if autofix {
|
||||
var fixed int64
|
||||
if fixed, err = c.Fixer(); err != nil {
|
||||
logger.Critical("Error: %v whilst fixing %s", err, c.Name)
|
||||
return err
|
||||
}
|
||||
|
||||
prompt := "Deleted"
|
||||
if c.FixedMessage != "" {
|
||||
prompt = c.FixedMessage
|
||||
}
|
||||
|
||||
if fixed < 0 {
|
||||
logger.Info(prompt+" %d %s", count, c.Name)
|
||||
} else {
|
||||
logger.Info(prompt+" %d/%d %s", fixed, count, c.Name)
|
||||
}
|
||||
} else {
|
||||
logger.Warn("Found %d %s", count, c.Name)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func asFixer(fn func() error) func() (int64, error) {
|
||||
return func() (int64, error) {
|
||||
err := fn()
|
||||
return -1, err
|
||||
}
|
||||
}
|
||||
|
||||
func genericOrphanCheck(name, subject, refobject, joincond string) consistencyCheck {
|
||||
return consistencyCheck{
|
||||
Name: name,
|
||||
Counter: func() (int64, error) {
|
||||
return models.CountOrphanedObjects(subject, refobject, joincond)
|
||||
},
|
||||
Fixer: func() (int64, error) {
|
||||
err := models.DeleteOrphanedObjects(subject, refobject, joincond)
|
||||
return -1, err
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func checkDBConsistency(logger log.Logger, autofix bool) error {
|
||||
// make sure DB version is uptodate
|
||||
if err := models.NewEngine(context.Background(), migrations.EnsureUpToDate); err != nil {
|
||||
@@ -20,246 +78,103 @@ func checkDBConsistency(logger log.Logger, autofix bool) error {
|
||||
return err
|
||||
}
|
||||
|
||||
// find labels without existing repo or org
|
||||
count, err := models.CountOrphanedLabels()
|
||||
if err != nil {
|
||||
logger.Critical("Error: %v whilst counting orphaned labels", err)
|
||||
return err
|
||||
}
|
||||
if count > 0 {
|
||||
if autofix {
|
||||
if err = models.DeleteOrphanedLabels(); err != nil {
|
||||
logger.Critical("Error: %v whilst deleting orphaned labels", err)
|
||||
return err
|
||||
}
|
||||
logger.Info("%d labels without existing repository/organisation deleted", count)
|
||||
} else {
|
||||
logger.Warn("%d labels without existing repository/organisation", count)
|
||||
}
|
||||
}
|
||||
|
||||
// find IssueLabels without existing label
|
||||
count, err = models.CountOrphanedIssueLabels()
|
||||
if err != nil {
|
||||
logger.Critical("Error: %v whilst counting orphaned issue_labels", err)
|
||||
return err
|
||||
}
|
||||
if count > 0 {
|
||||
if autofix {
|
||||
if err = models.DeleteOrphanedIssueLabels(); err != nil {
|
||||
logger.Critical("Error: %v whilst deleting orphaned issue_labels", err)
|
||||
return err
|
||||
}
|
||||
logger.Info("%d issue_labels without existing label deleted", count)
|
||||
} else {
|
||||
logger.Warn("%d issue_labels without existing label", count)
|
||||
}
|
||||
}
|
||||
|
||||
// find issues without existing repository
|
||||
count, err = models.CountOrphanedIssues()
|
||||
if err != nil {
|
||||
logger.Critical("Error: %v whilst counting orphaned issues", err)
|
||||
return err
|
||||
}
|
||||
if count > 0 {
|
||||
if autofix {
|
||||
if err = models.DeleteOrphanedIssues(); err != nil {
|
||||
logger.Critical("Error: %v whilst deleting orphaned issues", err)
|
||||
return err
|
||||
}
|
||||
logger.Info("%d issues without existing repository deleted", count)
|
||||
} else {
|
||||
logger.Warn("%d issues without existing repository", count)
|
||||
}
|
||||
}
|
||||
|
||||
// find pulls without existing issues
|
||||
count, err = models.CountOrphanedObjects("pull_request", "issue", "pull_request.issue_id=issue.id")
|
||||
if err != nil {
|
||||
logger.Critical("Error: %v whilst counting orphaned objects", err)
|
||||
return err
|
||||
}
|
||||
if count > 0 {
|
||||
if autofix {
|
||||
if err = models.DeleteOrphanedObjects("pull_request", "issue", "pull_request.issue_id=issue.id"); err != nil {
|
||||
logger.Critical("Error: %v whilst deleting orphaned objects", err)
|
||||
return err
|
||||
}
|
||||
logger.Info("%d pull requests without existing issue deleted", count)
|
||||
} else {
|
||||
logger.Warn("%d pull requests without existing issue", count)
|
||||
}
|
||||
}
|
||||
|
||||
// find tracked times without existing issues/pulls
|
||||
count, err = models.CountOrphanedObjects("tracked_time", "issue", "tracked_time.issue_id=issue.id")
|
||||
if err != nil {
|
||||
logger.Critical("Error: %v whilst counting orphaned objects", err)
|
||||
return err
|
||||
}
|
||||
if count > 0 {
|
||||
if autofix {
|
||||
if err = models.DeleteOrphanedObjects("tracked_time", "issue", "tracked_time.issue_id=issue.id"); err != nil {
|
||||
logger.Critical("Error: %v whilst deleting orphaned objects", err)
|
||||
return err
|
||||
}
|
||||
logger.Info("%d tracked times without existing issue deleted", count)
|
||||
} else {
|
||||
logger.Warn("%d tracked times without existing issue", count)
|
||||
}
|
||||
}
|
||||
|
||||
// find null archived repositories
|
||||
count, err = models.CountNullArchivedRepository()
|
||||
if err != nil {
|
||||
logger.Critical("Error: %v whilst counting null archived repositories", err)
|
||||
return err
|
||||
}
|
||||
if count > 0 {
|
||||
if autofix {
|
||||
updatedCount, err := models.FixNullArchivedRepository()
|
||||
if err != nil {
|
||||
logger.Critical("Error: %v whilst fixing null archived repositories", err)
|
||||
return err
|
||||
}
|
||||
logger.Info("%d repositories with null is_archived updated", updatedCount)
|
||||
} else {
|
||||
logger.Warn("%d repositories with null is_archived", count)
|
||||
}
|
||||
}
|
||||
|
||||
// find label comments with empty labels
|
||||
count, err = models.CountCommentTypeLabelWithEmptyLabel()
|
||||
if err != nil {
|
||||
logger.Critical("Error: %v whilst counting label comments with empty labels", err)
|
||||
return err
|
||||
}
|
||||
if count > 0 {
|
||||
if autofix {
|
||||
updatedCount, err := models.FixCommentTypeLabelWithEmptyLabel()
|
||||
if err != nil {
|
||||
logger.Critical("Error: %v whilst removing label comments with empty labels", err)
|
||||
return err
|
||||
}
|
||||
logger.Info("%d label comments with empty labels removed", updatedCount)
|
||||
} else {
|
||||
logger.Warn("%d label comments with empty labels", count)
|
||||
}
|
||||
}
|
||||
|
||||
// find label comments with labels from outside the repository
|
||||
count, err = models.CountCommentTypeLabelWithOutsideLabels()
|
||||
if err != nil {
|
||||
logger.Critical("Error: %v whilst counting label comments with outside labels", err)
|
||||
return err
|
||||
}
|
||||
if count > 0 {
|
||||
if autofix {
|
||||
updatedCount, err := models.FixCommentTypeLabelWithOutsideLabels()
|
||||
if err != nil {
|
||||
logger.Critical("Error: %v whilst removing label comments with outside labels", err)
|
||||
return err
|
||||
}
|
||||
log.Info("%d label comments with outside labels removed", updatedCount)
|
||||
} else {
|
||||
log.Warn("%d label comments with outside labels", count)
|
||||
}
|
||||
}
|
||||
|
||||
// find issue_label with labels from outside the repository
|
||||
count, err = models.CountIssueLabelWithOutsideLabels()
|
||||
if err != nil {
|
||||
logger.Critical("Error: %v whilst counting issue_labels from outside the repository or organisation", err)
|
||||
return err
|
||||
}
|
||||
if count > 0 {
|
||||
if autofix {
|
||||
updatedCount, err := models.FixIssueLabelWithOutsideLabels()
|
||||
if err != nil {
|
||||
logger.Critical("Error: %v whilst removing issue_labels from outside the repository or organisation", err)
|
||||
return err
|
||||
}
|
||||
logger.Info("%d issue_labels from outside the repository or organisation removed", updatedCount)
|
||||
} else {
|
||||
logger.Warn("%d issue_labels from outside the repository or organisation", count)
|
||||
}
|
||||
consistencyChecks := []consistencyCheck{
|
||||
{
|
||||
// find labels without existing repo or org
|
||||
Name: "Orphaned Labels without existing repository or organisation",
|
||||
Counter: models.CountOrphanedLabels,
|
||||
Fixer: asFixer(models.DeleteOrphanedLabels),
|
||||
},
|
||||
{
|
||||
// find IssueLabels without existing label
|
||||
Name: "Orphaned Issue Labels without existing label",
|
||||
Counter: models.CountOrphanedIssueLabels,
|
||||
Fixer: asFixer(models.DeleteOrphanedIssueLabels),
|
||||
},
|
||||
{
|
||||
// find issues without existing repository
|
||||
Name: "Orphaned Issues without existing repository",
|
||||
Counter: models.CountOrphanedIssues,
|
||||
Fixer: asFixer(models.DeleteOrphanedIssues),
|
||||
},
|
||||
// find releases without existing repository
|
||||
genericOrphanCheck("Orphaned Releases without existing repository",
|
||||
"release", "repository", "release.repo_id=repository.id"),
|
||||
// find pulls without existing issues
|
||||
genericOrphanCheck("Orphaned PullRequests without existing issue",
|
||||
"pull_request", "issue", "pull_request.issue_id=issue.id"),
|
||||
// find tracked times without existing issues/pulls
|
||||
genericOrphanCheck("Orphaned TrackedTimes without existing issue",
|
||||
"tracked_time", "issue", "tracked_time.issue_id=issue.id"),
|
||||
// find null archived repositories
|
||||
{
|
||||
Name: "Repositories with is_archived IS NULL",
|
||||
Counter: models.CountNullArchivedRepository,
|
||||
Fixer: models.FixNullArchivedRepository,
|
||||
FixedMessage: "Fixed",
|
||||
},
|
||||
// find label comments with empty labels
|
||||
{
|
||||
Name: "Label comments with empty labels",
|
||||
Counter: models.CountCommentTypeLabelWithEmptyLabel,
|
||||
Fixer: models.FixCommentTypeLabelWithEmptyLabel,
|
||||
FixedMessage: "Fixed",
|
||||
},
|
||||
// find label comments with labels from outside the repository
|
||||
{
|
||||
Name: "Label comments with labels from outside the repository",
|
||||
Counter: models.CountCommentTypeLabelWithOutsideLabels,
|
||||
Fixer: models.FixCommentTypeLabelWithOutsideLabels,
|
||||
FixedMessage: "Removed",
|
||||
},
|
||||
// find issue_label with labels from outside the repository
|
||||
{
|
||||
Name: "IssueLabels with Labels from outside the repository",
|
||||
Counter: models.CountIssueLabelWithOutsideLabels,
|
||||
Fixer: models.FixIssueLabelWithOutsideLabels,
|
||||
FixedMessage: "Removed",
|
||||
},
|
||||
}
|
||||
|
||||
// TODO: function to recalc all counters
|
||||
|
||||
if setting.Database.UsePostgreSQL {
|
||||
count, err = models.CountBadSequences()
|
||||
if err != nil {
|
||||
logger.Critical("Error: %v whilst checking sequence values", err)
|
||||
consistencyChecks = append(consistencyChecks, consistencyCheck{
|
||||
Name: "Sequence values",
|
||||
Counter: models.CountBadSequences,
|
||||
Fixer: asFixer(models.FixBadSequences),
|
||||
FixedMessage: "Updated",
|
||||
})
|
||||
}
|
||||
|
||||
consistencyChecks = append(consistencyChecks,
|
||||
// find protected branches without existing repository
|
||||
genericOrphanCheck("Protected Branches without existing repository",
|
||||
"protected_branch", "repository", "protected_branch.repo_id=repository.id"),
|
||||
// find deleted branches without existing repository
|
||||
genericOrphanCheck("Deleted Branches without existing repository",
|
||||
"deleted_branch", "repository", "deleted_branch.repo_id=repository.id"),
|
||||
// find LFS locks without existing repository
|
||||
genericOrphanCheck("LFS locks without existing repository",
|
||||
"lfs_lock", "repository", "lfs_lock.repo_id=repository.id"),
|
||||
// find collaborations without users
|
||||
genericOrphanCheck("Collaborations without existing user",
|
||||
"collaboration", "user", "collaboration.user_id=user.id"),
|
||||
// find collaborations without repository
|
||||
genericOrphanCheck("Collaborations without existing repository",
|
||||
"collaboration", "repository", "collaboration.repo_id=repository.id"),
|
||||
// find access without users
|
||||
genericOrphanCheck("Access entries without existing user",
|
||||
"access", "user", "access.user_id=user.id"),
|
||||
// find access without repository
|
||||
genericOrphanCheck("Access entries without existing repository",
|
||||
"access", "repository", "access.repo_id=repository.id"),
|
||||
)
|
||||
|
||||
for _, c := range consistencyChecks {
|
||||
if err := c.Run(logger, autofix); err != nil {
|
||||
return err
|
||||
}
|
||||
if count > 0 {
|
||||
if autofix {
|
||||
err := models.FixBadSequences()
|
||||
if err != nil {
|
||||
logger.Critical("Error: %v whilst attempting to fix sequences", err)
|
||||
return err
|
||||
}
|
||||
logger.Info("%d sequences updated", count)
|
||||
} else {
|
||||
logger.Warn("%d sequences with incorrect values", count)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// find protected branches without existing repository
|
||||
count, err = models.CountOrphanedObjects("protected_branch", "repository", "protected_branch.repo_id=repository.id")
|
||||
if err != nil {
|
||||
logger.Critical("Error: %v whilst counting orphaned objects", err)
|
||||
return err
|
||||
}
|
||||
if count > 0 {
|
||||
if autofix {
|
||||
if err = models.DeleteOrphanedObjects("protected_branch", "repository", "protected_branch.repo_id=repository.id"); err != nil {
|
||||
logger.Critical("Error: %v whilst deleting orphaned objects", err)
|
||||
return err
|
||||
}
|
||||
logger.Info("%d protected branches without existing repository deleted", count)
|
||||
} else {
|
||||
logger.Warn("%d protected branches without existing repository", count)
|
||||
}
|
||||
}
|
||||
|
||||
// find deleted branches without existing repository
|
||||
count, err = models.CountOrphanedObjects("deleted_branch", "repository", "deleted_branch.repo_id=repository.id")
|
||||
if err != nil {
|
||||
logger.Critical("Error: %v whilst counting orphaned objects", err)
|
||||
return err
|
||||
}
|
||||
if count > 0 {
|
||||
if autofix {
|
||||
if err = models.DeleteOrphanedObjects("deleted_branch", "repository", "deleted_branch.repo_id=repository.id"); err != nil {
|
||||
logger.Critical("Error: %v whilst deleting orphaned objects", err)
|
||||
return err
|
||||
}
|
||||
logger.Info("%d deleted branches without existing repository deleted", count)
|
||||
} else {
|
||||
logger.Warn("%d deleted branches without existing repository", count)
|
||||
}
|
||||
}
|
||||
|
||||
// find LFS locks without existing repository
|
||||
count, err = models.CountOrphanedObjects("lfs_lock", "repository", "lfs_lock.repo_id=repository.id")
|
||||
if err != nil {
|
||||
logger.Critical("Error: %v whilst counting orphaned objects", err)
|
||||
return err
|
||||
}
|
||||
if count > 0 {
|
||||
if autofix {
|
||||
if err = models.DeleteOrphanedObjects("lfs_lock", "repository", "lfs_lock.repo_id=repository.id"); err != nil {
|
||||
logger.Critical("Error: %v whilst deleting orphaned objects", err)
|
||||
return err
|
||||
}
|
||||
logger.Info("%d LFS locks without existing repository deleted", count)
|
||||
} else {
|
||||
logger.Warn("%d LFS locks without existing repository", count)
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
|
||||
317
modules/doctor/fix16961.go
Normal file
317
modules/doctor/fix16961.go
Normal file
@@ -0,0 +1,317 @@
|
||||
// Copyright 2021 The Gitea Authors. All rights reserved.
|
||||
// Use of this source code is governed by a MIT-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package doctor
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
|
||||
"code.gitea.io/gitea/models"
|
||||
"code.gitea.io/gitea/modules/log"
|
||||
"code.gitea.io/gitea/modules/timeutil"
|
||||
"xorm.io/builder"
|
||||
)
|
||||
|
||||
// #16831 revealed that the dump command that was broken in 1.14.3-1.14.6 and 1.15.0 (#15885).
|
||||
// This led to repo_unit and login_source cfg not being converted to JSON in the dump
|
||||
// Unfortunately although it was hoped that there were only a few users affected it
|
||||
// appears that many users are affected.
|
||||
|
||||
// We therefore need to provide a doctor command to fix this repeated issue #16961
|
||||
|
||||
func parseBool16961(bs []byte) (bool, error) {
|
||||
if bytes.EqualFold(bs, []byte("%!s(bool=false)")) {
|
||||
return false, nil
|
||||
}
|
||||
|
||||
if bytes.EqualFold(bs, []byte("%!s(bool=true)")) {
|
||||
return true, nil
|
||||
}
|
||||
|
||||
return false, fmt.Errorf("unexpected bool format: %s", string(bs))
|
||||
}
|
||||
|
||||
func fixUnitConfig16961(bs []byte, cfg *models.UnitConfig) (fixed bool, err error) {
|
||||
err = models.JSONUnmarshalHandleDoubleEncode(bs, &cfg)
|
||||
if err == nil {
|
||||
return
|
||||
}
|
||||
|
||||
// Handle #16961
|
||||
if string(bs) != "&{}" && len(bs) != 0 {
|
||||
return
|
||||
}
|
||||
|
||||
return true, nil
|
||||
}
|
||||
|
||||
func fixExternalWikiConfig16961(bs []byte, cfg *models.ExternalWikiConfig) (fixed bool, err error) {
|
||||
err = models.JSONUnmarshalHandleDoubleEncode(bs, &cfg)
|
||||
if err == nil {
|
||||
return
|
||||
}
|
||||
|
||||
if len(bs) < 3 {
|
||||
return
|
||||
}
|
||||
if bs[0] != '&' || bs[1] != '{' || bs[len(bs)-1] != '}' {
|
||||
return
|
||||
}
|
||||
cfg.ExternalWikiURL = string(bs[2 : len(bs)-1])
|
||||
return true, nil
|
||||
}
|
||||
|
||||
func fixExternalTrackerConfig16961(bs []byte, cfg *models.ExternalTrackerConfig) (fixed bool, err error) {
|
||||
err = models.JSONUnmarshalHandleDoubleEncode(bs, &cfg)
|
||||
if err == nil {
|
||||
return
|
||||
}
|
||||
// Handle #16961
|
||||
if len(bs) < 3 {
|
||||
return
|
||||
}
|
||||
|
||||
if bs[0] != '&' || bs[1] != '{' || bs[len(bs)-1] != '}' {
|
||||
return
|
||||
}
|
||||
|
||||
parts := bytes.Split(bs[2:len(bs)-1], []byte{' '})
|
||||
if len(parts) != 3 {
|
||||
return
|
||||
}
|
||||
|
||||
cfg.ExternalTrackerURL = string(bytes.Join(parts[:len(parts)-2], []byte{' '}))
|
||||
cfg.ExternalTrackerFormat = string(parts[len(parts)-2])
|
||||
cfg.ExternalTrackerStyle = string(parts[len(parts)-1])
|
||||
return true, nil
|
||||
}
|
||||
|
||||
func fixPullRequestsConfig16961(bs []byte, cfg *models.PullRequestsConfig) (fixed bool, err error) {
|
||||
err = models.JSONUnmarshalHandleDoubleEncode(bs, &cfg)
|
||||
if err == nil {
|
||||
return
|
||||
}
|
||||
|
||||
// Handle #16961
|
||||
if len(bs) < 3 {
|
||||
return
|
||||
}
|
||||
|
||||
if bs[0] != '&' || bs[1] != '{' || bs[len(bs)-1] != '}' {
|
||||
return
|
||||
}
|
||||
|
||||
// PullRequestsConfig was the following in 1.14
|
||||
// type PullRequestsConfig struct {
|
||||
// IgnoreWhitespaceConflicts bool
|
||||
// AllowMerge bool
|
||||
// AllowRebase bool
|
||||
// AllowRebaseMerge bool
|
||||
// AllowSquash bool
|
||||
// AllowManualMerge bool
|
||||
// AutodetectManualMerge bool
|
||||
// }
|
||||
//
|
||||
// 1.15 added in addition:
|
||||
// DefaultDeleteBranchAfterMerge bool
|
||||
// DefaultMergeStyle MergeStyle
|
||||
parts := bytes.Split(bs[2:len(bs)-1], []byte{' '})
|
||||
if len(parts) < 7 {
|
||||
return
|
||||
}
|
||||
|
||||
var parseErr error
|
||||
cfg.IgnoreWhitespaceConflicts, parseErr = parseBool16961(parts[0])
|
||||
if parseErr != nil {
|
||||
return
|
||||
}
|
||||
cfg.AllowMerge, parseErr = parseBool16961(parts[1])
|
||||
if parseErr != nil {
|
||||
return
|
||||
}
|
||||
cfg.AllowRebase, parseErr = parseBool16961(parts[2])
|
||||
if parseErr != nil {
|
||||
return
|
||||
}
|
||||
cfg.AllowRebaseMerge, parseErr = parseBool16961(parts[3])
|
||||
if parseErr != nil {
|
||||
return
|
||||
}
|
||||
cfg.AllowSquash, parseErr = parseBool16961(parts[4])
|
||||
if parseErr != nil {
|
||||
return
|
||||
}
|
||||
cfg.AllowManualMerge, parseErr = parseBool16961(parts[5])
|
||||
if parseErr != nil {
|
||||
return
|
||||
}
|
||||
cfg.AutodetectManualMerge, parseErr = parseBool16961(parts[6])
|
||||
if parseErr != nil {
|
||||
return
|
||||
}
|
||||
|
||||
// 1.14 unit
|
||||
if len(parts) == 7 {
|
||||
return true, nil
|
||||
}
|
||||
|
||||
if len(parts) < 9 {
|
||||
return
|
||||
}
|
||||
|
||||
cfg.DefaultDeleteBranchAfterMerge, parseErr = parseBool16961(parts[7])
|
||||
if parseErr != nil {
|
||||
return
|
||||
}
|
||||
|
||||
cfg.DefaultMergeStyle = models.MergeStyle(string(bytes.Join(parts[8:], []byte{' '})))
|
||||
return true, nil
|
||||
}
|
||||
|
||||
func fixIssuesConfig16961(bs []byte, cfg *models.IssuesConfig) (fixed bool, err error) {
|
||||
err = models.JSONUnmarshalHandleDoubleEncode(bs, &cfg)
|
||||
if err == nil {
|
||||
return
|
||||
}
|
||||
|
||||
// Handle #16961
|
||||
if len(bs) < 3 {
|
||||
return
|
||||
}
|
||||
|
||||
if bs[0] != '&' || bs[1] != '{' || bs[len(bs)-1] != '}' {
|
||||
return
|
||||
}
|
||||
|
||||
parts := bytes.Split(bs[2:len(bs)-1], []byte{' '})
|
||||
if len(parts) != 3 {
|
||||
return
|
||||
}
|
||||
var parseErr error
|
||||
cfg.EnableTimetracker, parseErr = parseBool16961(parts[0])
|
||||
if parseErr != nil {
|
||||
return
|
||||
}
|
||||
cfg.AllowOnlyContributorsToTrackTime, parseErr = parseBool16961(parts[1])
|
||||
if parseErr != nil {
|
||||
return
|
||||
}
|
||||
cfg.EnableDependencies, parseErr = parseBool16961(parts[2])
|
||||
if parseErr != nil {
|
||||
return
|
||||
}
|
||||
return true, nil
|
||||
}
|
||||
|
||||
func fixBrokenRepoUnit16961(repoUnit *models.RepoUnit, bs []byte) (fixed bool, err error) {
|
||||
// Shortcut empty or null values
|
||||
if len(bs) == 0 {
|
||||
return false, nil
|
||||
}
|
||||
|
||||
switch models.UnitType(repoUnit.Type) {
|
||||
case models.UnitTypeCode, models.UnitTypeReleases, models.UnitTypeWiki, models.UnitTypeProjects:
|
||||
cfg := &models.UnitConfig{}
|
||||
repoUnit.Config = cfg
|
||||
if fixed, err := fixUnitConfig16961(bs, cfg); !fixed {
|
||||
return false, err
|
||||
}
|
||||
case models.UnitTypeExternalWiki:
|
||||
cfg := &models.ExternalWikiConfig{}
|
||||
repoUnit.Config = cfg
|
||||
|
||||
if fixed, err := fixExternalWikiConfig16961(bs, cfg); !fixed {
|
||||
return false, err
|
||||
}
|
||||
case models.UnitTypeExternalTracker:
|
||||
cfg := &models.ExternalTrackerConfig{}
|
||||
repoUnit.Config = cfg
|
||||
if fixed, err := fixExternalTrackerConfig16961(bs, cfg); !fixed {
|
||||
return false, err
|
||||
}
|
||||
case models.UnitTypePullRequests:
|
||||
cfg := &models.PullRequestsConfig{}
|
||||
repoUnit.Config = cfg
|
||||
|
||||
if fixed, err := fixPullRequestsConfig16961(bs, cfg); !fixed {
|
||||
return false, err
|
||||
}
|
||||
case models.UnitTypeIssues:
|
||||
cfg := &models.IssuesConfig{}
|
||||
repoUnit.Config = cfg
|
||||
if fixed, err := fixIssuesConfig16961(bs, cfg); !fixed {
|
||||
return false, err
|
||||
}
|
||||
default:
|
||||
panic(fmt.Sprintf("unrecognized repo unit type: %v", repoUnit.Type))
|
||||
}
|
||||
return true, nil
|
||||
}
|
||||
|
||||
func fixBrokenRepoUnits16961(logger log.Logger, autofix bool) error {
|
||||
// RepoUnit describes all units of a repository
|
||||
type RepoUnit struct {
|
||||
ID int64
|
||||
RepoID int64
|
||||
Type models.UnitType
|
||||
Config []byte
|
||||
CreatedUnix timeutil.TimeStamp `xorm:"INDEX CREATED"`
|
||||
}
|
||||
|
||||
count := 0
|
||||
|
||||
err := models.Iterate(
|
||||
models.DefaultDBContext(),
|
||||
new(RepoUnit),
|
||||
builder.Gt{
|
||||
"id": 0,
|
||||
},
|
||||
func(idx int, bean interface{}) error {
|
||||
unit := bean.(*RepoUnit)
|
||||
|
||||
bs := unit.Config
|
||||
repoUnit := &models.RepoUnit{
|
||||
ID: unit.ID,
|
||||
RepoID: unit.RepoID,
|
||||
Type: unit.Type,
|
||||
CreatedUnix: unit.CreatedUnix,
|
||||
}
|
||||
|
||||
if fixed, err := fixBrokenRepoUnit16961(repoUnit, bs); !fixed {
|
||||
return err
|
||||
}
|
||||
|
||||
count++
|
||||
if !autofix {
|
||||
return nil
|
||||
}
|
||||
|
||||
return models.UpdateRepoUnit(repoUnit)
|
||||
},
|
||||
)
|
||||
|
||||
if err != nil {
|
||||
logger.Critical("Unable to iterate acrosss repounits to fix the broken units: Error %v", err)
|
||||
return err
|
||||
}
|
||||
|
||||
if !autofix {
|
||||
logger.Warn("Found %d broken repo_units", count)
|
||||
return nil
|
||||
}
|
||||
logger.Info("Fixed %d broken repo_units", count)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func init() {
|
||||
Register(&Check{
|
||||
Title: "Check for incorrectly dumped repo_units (See #16961)",
|
||||
Name: "fix-broken-repo-units",
|
||||
IsDefault: false,
|
||||
Run: fixBrokenRepoUnits16961,
|
||||
Priority: 7,
|
||||
})
|
||||
}
|
||||
271
modules/doctor/fix16961_test.go
Normal file
271
modules/doctor/fix16961_test.go
Normal file
@@ -0,0 +1,271 @@
|
||||
// Copyright 2021 The Gitea Authors. All rights reserved.
|
||||
// Use of this source code is governed by a MIT-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package doctor
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"code.gitea.io/gitea/models"
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
func Test_fixUnitConfig_16961(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
bs string
|
||||
wantFixed bool
|
||||
wantErr bool
|
||||
}{
|
||||
{
|
||||
name: "empty",
|
||||
bs: "",
|
||||
wantFixed: true,
|
||||
wantErr: false,
|
||||
},
|
||||
{
|
||||
name: "normal: {}",
|
||||
bs: "{}",
|
||||
wantFixed: false,
|
||||
wantErr: false,
|
||||
},
|
||||
{
|
||||
name: "broken but fixable: &{}",
|
||||
bs: "&{}",
|
||||
wantFixed: true,
|
||||
wantErr: false,
|
||||
},
|
||||
{
|
||||
name: "broken but unfixable: &{asdasd}",
|
||||
bs: "&{asdasd}",
|
||||
wantFixed: false,
|
||||
wantErr: true,
|
||||
},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
gotFixed, err := fixUnitConfig16961([]byte(tt.bs), &models.UnitConfig{})
|
||||
if (err != nil) != tt.wantErr {
|
||||
t.Errorf("fixUnitConfig_16961() error = %v, wantErr %v", err, tt.wantErr)
|
||||
return
|
||||
}
|
||||
if gotFixed != tt.wantFixed {
|
||||
t.Errorf("fixUnitConfig_16961() = %v, want %v", gotFixed, tt.wantFixed)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func Test_fixExternalWikiConfig_16961(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
bs string
|
||||
expected string
|
||||
wantFixed bool
|
||||
wantErr bool
|
||||
}{
|
||||
{
|
||||
name: "normal: {\"ExternalWikiURL\":\"http://someurl\"}",
|
||||
bs: "{\"ExternalWikiURL\":\"http://someurl\"}",
|
||||
expected: "http://someurl",
|
||||
wantFixed: false,
|
||||
wantErr: false,
|
||||
},
|
||||
{
|
||||
name: "broken: &{http://someurl}",
|
||||
bs: "&{http://someurl}",
|
||||
expected: "http://someurl",
|
||||
wantFixed: true,
|
||||
wantErr: false,
|
||||
},
|
||||
{
|
||||
name: "broken but unfixable: http://someurl",
|
||||
bs: "http://someurl",
|
||||
wantFixed: false,
|
||||
wantErr: true,
|
||||
},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
cfg := &models.ExternalWikiConfig{}
|
||||
gotFixed, err := fixExternalWikiConfig16961([]byte(tt.bs), cfg)
|
||||
if (err != nil) != tt.wantErr {
|
||||
t.Errorf("fixExternalWikiConfig_16961() error = %v, wantErr %v", err, tt.wantErr)
|
||||
return
|
||||
}
|
||||
if gotFixed != tt.wantFixed {
|
||||
t.Errorf("fixExternalWikiConfig_16961() = %v, want %v", gotFixed, tt.wantFixed)
|
||||
}
|
||||
if cfg.ExternalWikiURL != tt.expected {
|
||||
t.Errorf("fixExternalWikiConfig_16961().ExternalWikiURL = %v, want %v", cfg.ExternalWikiURL, tt.expected)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func Test_fixExternalTrackerConfig_16961(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
bs string
|
||||
expected models.ExternalTrackerConfig
|
||||
wantFixed bool
|
||||
wantErr bool
|
||||
}{
|
||||
{
|
||||
name: "normal",
|
||||
bs: `{"ExternalTrackerURL":"a","ExternalTrackerFormat":"b","ExternalTrackerStyle":"c"}`,
|
||||
expected: models.ExternalTrackerConfig{
|
||||
ExternalTrackerURL: "a",
|
||||
ExternalTrackerFormat: "b",
|
||||
ExternalTrackerStyle: "c",
|
||||
},
|
||||
wantFixed: false,
|
||||
wantErr: false,
|
||||
},
|
||||
{
|
||||
name: "broken",
|
||||
bs: "&{a b c}",
|
||||
expected: models.ExternalTrackerConfig{
|
||||
ExternalTrackerURL: "a",
|
||||
ExternalTrackerFormat: "b",
|
||||
ExternalTrackerStyle: "c",
|
||||
},
|
||||
wantFixed: true,
|
||||
wantErr: false,
|
||||
},
|
||||
{
|
||||
name: "broken - too many fields",
|
||||
bs: "&{a b c d}",
|
||||
wantFixed: false,
|
||||
wantErr: true,
|
||||
},
|
||||
{
|
||||
name: "broken - wrong format",
|
||||
bs: "a b c d}",
|
||||
wantFixed: false,
|
||||
wantErr: true,
|
||||
},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
cfg := &models.ExternalTrackerConfig{}
|
||||
gotFixed, err := fixExternalTrackerConfig16961([]byte(tt.bs), cfg)
|
||||
if (err != nil) != tt.wantErr {
|
||||
t.Errorf("fixExternalTrackerConfig_16961() error = %v, wantErr %v", err, tt.wantErr)
|
||||
return
|
||||
}
|
||||
if gotFixed != tt.wantFixed {
|
||||
t.Errorf("fixExternalTrackerConfig_16961() = %v, want %v", gotFixed, tt.wantFixed)
|
||||
}
|
||||
if cfg.ExternalTrackerFormat != tt.expected.ExternalTrackerFormat {
|
||||
t.Errorf("fixExternalTrackerConfig_16961().ExternalTrackerFormat = %v, want %v", tt.expected.ExternalTrackerFormat, cfg.ExternalTrackerFormat)
|
||||
}
|
||||
if cfg.ExternalTrackerStyle != tt.expected.ExternalTrackerStyle {
|
||||
t.Errorf("fixExternalTrackerConfig_16961().ExternalTrackerStyle = %v, want %v", tt.expected.ExternalTrackerStyle, cfg.ExternalTrackerStyle)
|
||||
}
|
||||
if cfg.ExternalTrackerURL != tt.expected.ExternalTrackerURL {
|
||||
t.Errorf("fixExternalTrackerConfig_16961().ExternalTrackerURL = %v, want %v", tt.expected.ExternalTrackerURL, cfg.ExternalTrackerURL)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func Test_fixPullRequestsConfig_16961(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
bs string
|
||||
expected models.PullRequestsConfig
|
||||
wantFixed bool
|
||||
wantErr bool
|
||||
}{
|
||||
{
|
||||
name: "normal",
|
||||
bs: `{"IgnoreWhitespaceConflicts":false,"AllowMerge":false,"AllowRebase":false,"AllowRebaseMerge":false,"AllowSquash":false,"AllowManualMerge":false,"AutodetectManualMerge":false,"DefaultDeleteBranchAfterMerge":false,"DefaultMergeStyle":""}`,
|
||||
},
|
||||
{
|
||||
name: "broken - 1.14",
|
||||
bs: `&{%!s(bool=false) %!s(bool=true) %!s(bool=true) %!s(bool=true) %!s(bool=true) %!s(bool=false) %!s(bool=false)}`,
|
||||
expected: models.PullRequestsConfig{
|
||||
IgnoreWhitespaceConflicts: false,
|
||||
AllowMerge: true,
|
||||
AllowRebase: true,
|
||||
AllowRebaseMerge: true,
|
||||
AllowSquash: true,
|
||||
AllowManualMerge: false,
|
||||
AutodetectManualMerge: false,
|
||||
},
|
||||
wantFixed: true,
|
||||
},
|
||||
{
|
||||
name: "broken - 1.15",
|
||||
bs: `&{%!s(bool=false) %!s(bool=true) %!s(bool=true) %!s(bool=true) %!s(bool=true) %!s(bool=false) %!s(bool=false) %!s(bool=false) merge}`,
|
||||
expected: models.PullRequestsConfig{
|
||||
AllowMerge: true,
|
||||
AllowRebase: true,
|
||||
AllowRebaseMerge: true,
|
||||
AllowSquash: true,
|
||||
DefaultMergeStyle: models.MergeStyleMerge,
|
||||
},
|
||||
wantFixed: true,
|
||||
},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
cfg := &models.PullRequestsConfig{}
|
||||
gotFixed, err := fixPullRequestsConfig16961([]byte(tt.bs), cfg)
|
||||
if (err != nil) != tt.wantErr {
|
||||
t.Errorf("fixPullRequestsConfig_16961() error = %v, wantErr %v", err, tt.wantErr)
|
||||
return
|
||||
}
|
||||
if gotFixed != tt.wantFixed {
|
||||
t.Errorf("fixPullRequestsConfig_16961() = %v, want %v", gotFixed, tt.wantFixed)
|
||||
}
|
||||
assert.EqualValues(t, &tt.expected, cfg)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func Test_fixIssuesConfig_16961(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
bs string
|
||||
expected models.IssuesConfig
|
||||
wantFixed bool
|
||||
wantErr bool
|
||||
}{
|
||||
{
|
||||
name: "normal",
|
||||
bs: `{"EnableTimetracker":true,"AllowOnlyContributorsToTrackTime":true,"EnableDependencies":true}`,
|
||||
expected: models.IssuesConfig{
|
||||
EnableTimetracker: true,
|
||||
AllowOnlyContributorsToTrackTime: true,
|
||||
EnableDependencies: true,
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "broken",
|
||||
bs: `&{%!s(bool=true) %!s(bool=true) %!s(bool=true)}`,
|
||||
expected: models.IssuesConfig{
|
||||
EnableTimetracker: true,
|
||||
AllowOnlyContributorsToTrackTime: true,
|
||||
EnableDependencies: true,
|
||||
},
|
||||
wantFixed: true,
|
||||
},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
cfg := &models.IssuesConfig{}
|
||||
gotFixed, err := fixIssuesConfig16961([]byte(tt.bs), cfg)
|
||||
if (err != nil) != tt.wantErr {
|
||||
t.Errorf("fixIssuesConfig_16961() error = %v, wantErr %v", err, tt.wantErr)
|
||||
return
|
||||
}
|
||||
if gotFixed != tt.wantFixed {
|
||||
t.Errorf("fixIssuesConfig_16961() = %v, want %v", gotFixed, tt.wantFixed)
|
||||
}
|
||||
assert.EqualValues(t, &tt.expected, cfg)
|
||||
})
|
||||
}
|
||||
}
|
||||
76
modules/doctor/storage.go
Normal file
76
modules/doctor/storage.go
Normal file
@@ -0,0 +1,76 @@
|
||||
// Copyright 2021 The Gitea Authors. All rights reserved.
|
||||
// Use of this source code is governed by a MIT-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package doctor
|
||||
|
||||
import (
|
||||
"code.gitea.io/gitea/models"
|
||||
"code.gitea.io/gitea/modules/log"
|
||||
"code.gitea.io/gitea/modules/storage"
|
||||
)
|
||||
|
||||
func checkAttachmentStorageFiles(logger log.Logger, autofix bool) error {
|
||||
var total, garbageNum int
|
||||
var deletePaths []string
|
||||
if err := storage.Attachments.IterateObjects(func(p string, obj storage.Object) error {
|
||||
defer obj.Close()
|
||||
|
||||
total++
|
||||
stat, err := obj.Stat()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
exist, err := models.ExistAttachmentsByUUID(stat.Name())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if !exist {
|
||||
garbageNum++
|
||||
if autofix {
|
||||
deletePaths = append(deletePaths, p)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}); err != nil {
|
||||
logger.Error("storage.Attachments.IterateObjects failed: %v", err)
|
||||
return err
|
||||
}
|
||||
|
||||
if garbageNum > 0 {
|
||||
if autofix {
|
||||
var deletedNum int
|
||||
for _, p := range deletePaths {
|
||||
if err := storage.Attachments.Delete(p); err != nil {
|
||||
log.Error("Delete attachment %s failed: %v", p, err)
|
||||
} else {
|
||||
deletedNum++
|
||||
}
|
||||
}
|
||||
logger.Info("%d missed information attachment detected, %d deleted.", garbageNum, deletedNum)
|
||||
} else {
|
||||
logger.Warn("Checked %d attachment, %d missed information.", total, garbageNum)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func checkStorageFiles(logger log.Logger, autofix bool) error {
|
||||
if err := storage.Init(); err != nil {
|
||||
logger.Error("storage.Init failed: %v", err)
|
||||
return err
|
||||
}
|
||||
return checkAttachmentStorageFiles(logger, autofix)
|
||||
}
|
||||
|
||||
func init() {
|
||||
Register(&Check{
|
||||
Title: "Check if there is garbage storage files",
|
||||
Name: "storages",
|
||||
IsDefault: false,
|
||||
Run: checkStorageFiles,
|
||||
AbortIfFailed: false,
|
||||
SkipDatabaseInitialization: false,
|
||||
Priority: 1,
|
||||
})
|
||||
}
|
||||
@@ -8,8 +8,10 @@ import (
|
||||
"bufio"
|
||||
"bytes"
|
||||
"context"
|
||||
"fmt"
|
||||
"io"
|
||||
"math"
|
||||
"runtime"
|
||||
"strconv"
|
||||
"strings"
|
||||
|
||||
@@ -40,9 +42,14 @@ func CatFileBatchCheck(repoPath string) (WriteCloserError, *bufio.Reader, func()
|
||||
<-closed
|
||||
}
|
||||
|
||||
_, filename, line, _ := runtime.Caller(2)
|
||||
filename = strings.TrimPrefix(filename, callerPrefix)
|
||||
|
||||
go func() {
|
||||
stderr := strings.Builder{}
|
||||
err := NewCommandContext(ctx, "cat-file", "--batch-check").RunInDirFullPipeline(repoPath, batchStdoutWriter, &stderr, batchStdinReader)
|
||||
err := NewCommandContext(ctx, "cat-file", "--batch-check").
|
||||
SetDescription(fmt.Sprintf("%s cat-file --batch-check [repo_path: %s] (%s:%d)", GitExecutable, repoPath, filename, line)).
|
||||
RunInDirFullPipeline(repoPath, batchStdoutWriter, &stderr, batchStdinReader)
|
||||
if err != nil {
|
||||
_ = batchStdoutWriter.CloseWithError(ConcatenateError(err, (&stderr).String()))
|
||||
_ = batchStdinReader.CloseWithError(ConcatenateError(err, (&stderr).String()))
|
||||
@@ -76,9 +83,14 @@ func CatFileBatch(repoPath string) (WriteCloserError, *bufio.Reader, func()) {
|
||||
<-closed
|
||||
}
|
||||
|
||||
_, filename, line, _ := runtime.Caller(2)
|
||||
filename = strings.TrimPrefix(filename, callerPrefix)
|
||||
|
||||
go func() {
|
||||
stderr := strings.Builder{}
|
||||
err := NewCommandContext(ctx, "cat-file", "--batch").RunInDirFullPipeline(repoPath, batchStdoutWriter, &stderr, batchStdinReader)
|
||||
err := NewCommandContext(ctx, "cat-file", "--batch").
|
||||
SetDescription(fmt.Sprintf("%s cat-file --batch [repo_path: %s] (%s:%d)", GitExecutable, repoPath, filename, line)).
|
||||
RunInDirFullPipeline(repoPath, batchStdoutWriter, &stderr, batchStdinReader)
|
||||
if err != nil {
|
||||
_ = batchStdoutWriter.CloseWithError(ConcatenateError(err, (&stderr).String()))
|
||||
_ = batchStdinReader.CloseWithError(ConcatenateError(err, (&stderr).String()))
|
||||
@@ -292,3 +304,10 @@ func ParseTreeLine(rd *bufio.Reader, modeBuf, fnameBuf, shaBuf []byte) (mode, fn
|
||||
sha = shaBuf
|
||||
return
|
||||
}
|
||||
|
||||
var callerPrefix string
|
||||
|
||||
func init() {
|
||||
_, filename, _, _ := runtime.Caller(0)
|
||||
callerPrefix = strings.TrimSuffix(filename, "modules/git/batch_reader.go")
|
||||
}
|
||||
|
||||
@@ -12,6 +12,7 @@ import (
|
||||
"io/ioutil"
|
||||
|
||||
"code.gitea.io/gitea/modules/typesniffer"
|
||||
"code.gitea.io/gitea/modules/util"
|
||||
)
|
||||
|
||||
// This file contains common functions between the gogit and !gogit variants for git Blobs
|
||||
@@ -29,7 +30,7 @@ func (b *Blob) GetBlobContent() (string, error) {
|
||||
}
|
||||
defer dataRc.Close()
|
||||
buf := make([]byte, 1024)
|
||||
n, _ := dataRc.Read(buf)
|
||||
n, _ := util.ReadAtMost(dataRc, buf)
|
||||
buf = buf[:n]
|
||||
return string(buf), nil
|
||||
}
|
||||
|
||||
@@ -3,6 +3,7 @@
|
||||
// Use of this source code is governed by a MIT-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
//go:build gogit
|
||||
// +build gogit
|
||||
|
||||
package git
|
||||
|
||||
@@ -2,6 +2,7 @@
|
||||
// Use of this source code is governed by a MIT-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
//go:build !gogit
|
||||
// +build !gogit
|
||||
|
||||
package git
|
||||
|
||||
@@ -2,6 +2,7 @@
|
||||
// Use of this source code is governed by a MIT-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
//go:build race
|
||||
// +build race
|
||||
|
||||
package git
|
||||
|
||||
@@ -3,6 +3,7 @@
|
||||
// Use of this source code is governed by a MIT-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
//go:build gogit
|
||||
// +build gogit
|
||||
|
||||
package git
|
||||
|
||||
@@ -2,6 +2,7 @@
|
||||
// Use of this source code is governed by a MIT-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
//go:build gogit
|
||||
// +build gogit
|
||||
|
||||
package git
|
||||
|
||||
@@ -2,6 +2,7 @@
|
||||
// Use of this source code is governed by a MIT-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
//go:build !gogit
|
||||
// +build !gogit
|
||||
|
||||
package git
|
||||
|
||||
@@ -188,6 +188,12 @@ func Init(ctx context.Context) error {
|
||||
return err
|
||||
}
|
||||
}
|
||||
if setting.Git.DisableCoreProtectNTFS {
|
||||
if err := checkAndSetConfig("core.protectntfs", "false", true); err != nil {
|
||||
return err
|
||||
}
|
||||
GlobalCommandArgs = append(GlobalCommandArgs, "-c", "core.protectntfs=false")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
|
||||
@@ -2,6 +2,7 @@
|
||||
// Use of this source code is governed by a MIT-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
//go:build gogit
|
||||
// +build gogit
|
||||
|
||||
package git
|
||||
|
||||
@@ -2,6 +2,7 @@
|
||||
// Use of this source code is governed by a MIT-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
//go:build !gogit
|
||||
// +build !gogit
|
||||
|
||||
package git
|
||||
|
||||
@@ -18,11 +18,16 @@ import (
|
||||
)
|
||||
|
||||
// LogNameStatusRepo opens git log --raw in the provided repo and returns a stdin pipe, a stdout reader and cancel function
|
||||
func LogNameStatusRepo(repository, head, treepath string, paths ...string) (*bufio.Reader, func()) {
|
||||
func LogNameStatusRepo(ctx context.Context, repository, head, treepath string, paths ...string) (*bufio.Reader, func()) {
|
||||
// We often want to feed the commits in order into cat-file --batch, followed by their trees and sub trees as necessary.
|
||||
// so let's create a batch stdin and stdout
|
||||
stdoutReader, stdoutWriter := nio.Pipe(buffer.New(32 * 1024))
|
||||
|
||||
// Lets also create a context so that we can absolutely ensure that the command should die when we're done
|
||||
ctx, ctxCancel := context.WithCancel(ctx)
|
||||
|
||||
cancel := func() {
|
||||
ctxCancel()
|
||||
_ = stdoutReader.Close()
|
||||
_ = stdoutWriter.Close()
|
||||
}
|
||||
@@ -50,7 +55,7 @@ func LogNameStatusRepo(repository, head, treepath string, paths ...string) (*buf
|
||||
|
||||
go func() {
|
||||
stderr := strings.Builder{}
|
||||
err := NewCommand(args...).RunInDirFullPipeline(repository, stdoutWriter, &stderr, nil)
|
||||
err := NewCommandContext(ctx, args...).RunInDirFullPipeline(repository, stdoutWriter, &stderr, nil)
|
||||
if err != nil {
|
||||
_ = stdoutWriter.CloseWithError(ConcatenateError(err, (&stderr).String()))
|
||||
} else {
|
||||
@@ -75,8 +80,8 @@ type LogNameStatusRepoParser struct {
|
||||
}
|
||||
|
||||
// NewLogNameStatusRepoParser returns a new parser for a git log raw output
|
||||
func NewLogNameStatusRepoParser(repository, head, treepath string, paths ...string) *LogNameStatusRepoParser {
|
||||
rd, cancel := LogNameStatusRepo(repository, head, treepath, paths...)
|
||||
func NewLogNameStatusRepoParser(ctx context.Context, repository, head, treepath string, paths ...string) *LogNameStatusRepoParser {
|
||||
rd, cancel := LogNameStatusRepo(ctx, repository, head, treepath, paths...)
|
||||
return &LogNameStatusRepoParser{
|
||||
treepath: treepath,
|
||||
paths: paths,
|
||||
@@ -311,8 +316,11 @@ func WalkGitLog(ctx context.Context, repo *Repository, head *Commit, treepath st
|
||||
}
|
||||
}
|
||||
|
||||
g := NewLogNameStatusRepoParser(repo.Path, head.ID.String(), treepath, paths...)
|
||||
defer g.Close()
|
||||
g := NewLogNameStatusRepoParser(ctx, repo.Path, head.ID.String(), treepath, paths...)
|
||||
// don't use defer g.Close() here as g may change its value - instead wrap in a func
|
||||
defer func() {
|
||||
g.Close()
|
||||
}()
|
||||
|
||||
results := make([]string, len(paths))
|
||||
remaining := len(paths)
|
||||
@@ -331,6 +339,7 @@ heaploop:
|
||||
for {
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
g.Close()
|
||||
return nil, ctx.Err()
|
||||
default:
|
||||
}
|
||||
@@ -380,7 +389,7 @@ heaploop:
|
||||
remainingPaths = append(remainingPaths, pth)
|
||||
}
|
||||
}
|
||||
g = NewLogNameStatusRepoParser(repo.Path, lastEmptyParent, treepath, remainingPaths...)
|
||||
g = NewLogNameStatusRepoParser(ctx, repo.Path, lastEmptyParent, treepath, remainingPaths...)
|
||||
parentRemaining = map[string]bool{}
|
||||
nextRestart = (remaining * 3) / 4
|
||||
continue heaploop
|
||||
|
||||
@@ -2,6 +2,7 @@
|
||||
// Use of this source code is governed by a MIT-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
//go:build gogit
|
||||
// +build gogit
|
||||
|
||||
package git
|
||||
|
||||
@@ -2,6 +2,7 @@
|
||||
// Use of this source code is governed by a MIT-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
//go:build !gogit
|
||||
// +build !gogit
|
||||
|
||||
package git
|
||||
|
||||
@@ -2,6 +2,7 @@
|
||||
// Use of this source code is governed by a MIT-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
//go:build gogit
|
||||
// +build gogit
|
||||
|
||||
package git
|
||||
|
||||
@@ -2,6 +2,7 @@
|
||||
// Use of this source code is governed by a MIT-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
//go:build gogit
|
||||
// +build gogit
|
||||
|
||||
package git
|
||||
|
||||
@@ -2,6 +2,7 @@
|
||||
// Use of this source code is governed by a MIT-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
//go:build !gogit
|
||||
// +build !gogit
|
||||
|
||||
package git
|
||||
|
||||
@@ -2,6 +2,7 @@
|
||||
// Use of this source code is governed by a MIT-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
//go:build !gogit
|
||||
// +build !gogit
|
||||
|
||||
package git
|
||||
|
||||
@@ -2,6 +2,7 @@
|
||||
// Use of this source code is governed by a MIT-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
//go:build gogit
|
||||
// +build gogit
|
||||
|
||||
package pipeline
|
||||
|
||||
@@ -2,6 +2,7 @@
|
||||
// Use of this source code is governed by a MIT-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
//go:build !gogit
|
||||
// +build !gogit
|
||||
|
||||
package pipeline
|
||||
|
||||
@@ -3,6 +3,7 @@
|
||||
// Use of this source code is governed by a MIT-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
//go:build gogit
|
||||
// +build gogit
|
||||
|
||||
package git
|
||||
|
||||
@@ -3,6 +3,7 @@
|
||||
// Use of this source code is governed by a MIT-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
//go:build !gogit
|
||||
// +build !gogit
|
||||
|
||||
package git
|
||||
|
||||
@@ -2,6 +2,7 @@
|
||||
// Use of this source code is governed by a MIT-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
//go:build gogit
|
||||
// +build gogit
|
||||
|
||||
package git
|
||||
|
||||
@@ -2,6 +2,7 @@
|
||||
// Use of this source code is governed by a MIT-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
//go:build !gogit
|
||||
// +build !gogit
|
||||
|
||||
package git
|
||||
|
||||
@@ -3,6 +3,7 @@
|
||||
// Use of this source code is governed by a MIT-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
//go:build gogit
|
||||
// +build gogit
|
||||
|
||||
package git
|
||||
|
||||
@@ -3,6 +3,7 @@
|
||||
// Use of this source code is governed by a MIT-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
//go:build !gogit
|
||||
// +build !gogit
|
||||
|
||||
package git
|
||||
|
||||
@@ -3,6 +3,7 @@
|
||||
// Use of this source code is governed by a MIT-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
//go:build gogit
|
||||
// +build gogit
|
||||
|
||||
package git
|
||||
|
||||
@@ -2,6 +2,7 @@
|
||||
// Use of this source code is governed by a MIT-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
//go:build !gogit
|
||||
// +build !gogit
|
||||
|
||||
package git
|
||||
|
||||
@@ -3,6 +3,7 @@
|
||||
// Use of this source code is governed by a MIT-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
//go:build gogit
|
||||
// +build gogit
|
||||
|
||||
package git
|
||||
|
||||
@@ -2,6 +2,7 @@
|
||||
// Use of this source code is governed by a MIT-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
//go:build gogit
|
||||
// +build gogit
|
||||
|
||||
package git
|
||||
|
||||
@@ -2,6 +2,7 @@
|
||||
// Use of this source code is governed by a MIT-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
//go:build !gogit
|
||||
// +build !gogit
|
||||
|
||||
package git
|
||||
|
||||
@@ -2,6 +2,7 @@
|
||||
// Use of this source code is governed by a MIT-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
//go:build gogit
|
||||
// +build gogit
|
||||
|
||||
package git
|
||||
|
||||
@@ -2,6 +2,7 @@
|
||||
// Use of this source code is governed by a MIT-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
//go:build !gogit
|
||||
// +build !gogit
|
||||
|
||||
package git
|
||||
|
||||
@@ -3,6 +3,7 @@
|
||||
// Use of this source code is governed by a MIT-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
//go:build gogit
|
||||
// +build gogit
|
||||
|
||||
package git
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user