Compare commits

..

1 Commits

Author SHA1 Message Date
0x5c
0de57fd57c Documentation: Clarity for HTTPS setups (#5626)
[https-setup]
- Made it clearer that HTTP redirection is possible
[config-cheat-sheet]
- Clarified the behavihour of the redirection-related config keys
2019-01-03 16:46:07 +01:00
56 changed files with 198 additions and 700 deletions

View File

@@ -4,32 +4,7 @@ This changelog goes through all the changes that have been made in each release
without substantial changes to our git log; to see the highlights of what has without substantial changes to our git log; to see the highlights of what has
been added to each release, please refer to the [blog](https://blog.gitea.io). been added to each release, please refer to the [blog](https://blog.gitea.io).
## [1.7.1](https://github.com/go-gitea/gitea/releases/tag/v1.7.1) - 2019-01-31 ## [1.7.0-rc1](https://github.com/go-gitea/gitea/releases/tag/v1.7.0) - 2019-01-02
* SECURITY
* Disable redirect for i18n (#5910) (#5916)
* Only allow local login if password is non-empty (#5906) (#5908)
* Fix go-get URL generation (#5905) (#5907)
* BUGFIXES
* Fix TLS errors when using acme/autocert for local connections (#5820) (#5826)
* Request for public keys only if LDAP attribute is set (#5816) (#5819)
* Fix delete correct temp directory (#5840) (#5839)
* Fix an error while adding a dependency via UI (#5862) (#5876)
* Fix null pointer in attempt to Sudo if not logged in (#5872) (#5884)
* When creating new repository fsck option should be enabled (#5817) (#5885)
* Prevent nil dereference in mailIssueCommentToParticipants (#5891) (#5895) (#5894)
* Fix bug when read public repo lfs file (#5913) (#5912)
* Respect value of REQUIRE_SIGNIN_VIEW (#5901) (#5915)
* Fix compare button on upstream repo leading to 404 (#5877) (#5914)
* DOCS
* Added docs for the tree api (#5835)
* MISC
* Include Go toolchain to --version (#5832) (#5830)
## [1.7.0](https://github.com/go-gitea/gitea/releases/tag/v1.7.0) - 2019-01-22
* SECURITY
* Do not display the raw OpenID error in the UI (#5705) (#5712)
* When redirecting clean the path to avoid redirecting to external site (#5669) (#5679)
* Prevent DeleteFilePost doing arbitrary deletion (#5631)
* BREAKING * BREAKING
* Restrict permission check on repositories and fix some problems (#5314) * Restrict permission check on repositories and fix some problems (#5314)
* Show only opened milestones on issues page milestone filter (#5051) * Show only opened milestones on issues page milestone filter (#5051)
@@ -48,13 +23,6 @@ been added to each release, please refer to the [blog](https://blog.gitea.io).
* Give user a link to create PR after push (#4716) * Give user a link to create PR after push (#4716)
* Add rebase with merge commit merge style (#3844) (#4052) * Add rebase with merge commit merge style (#3844) (#4052)
* BUGFIXES * BUGFIXES
* Disallow empty titles (#5785) (#5794)
* Fix sqlite deadlock when assigning to a PR (#5640) (#5642)
* Don't close issues via commits on non-default branch. (#5622) (#5643)
* Fix commit page showing status for current default branch (#5650) (#5653)
* Only count users own actions for heatmap contributions (#5647) (#5655)
* Update xorm to fix issue postgresql dumping issues (#5680) (#5692)
* Use correct value for "MSpan Structures Obtained" (#5706) (#5716)
* Fix bug on modifying sshd username (#5624) * Fix bug on modifying sshd username (#5624)
* Delete tags in mirror which are removed for original repo. (#5609) * Delete tags in mirror which are removed for original repo. (#5609)
* Fix wrong text getting saved on editing second comment on an issue. (#5608) * Fix wrong text getting saved on editing second comment on an issue. (#5608)
@@ -181,18 +149,6 @@ been added to each release, please refer to the [blog](https://blog.gitea.io).
* Git-Trees API (#5403) * Git-Trees API (#5403)
* Only chown directories during docker setup if necessary. Fix #4425 (#5064) * Only chown directories during docker setup if necessary. Fix #4425 (#5064)
## [1.6.4](https://github.com/go-gitea/gitea/releases/tag/v1.6.4) - 2019-01-15
* BUGFIX
* Fix SSH key now can be reused as public key after deleting as deploy key (#5671) (#5685)
* When redirecting clean the path to avoid redirecting to external site (#5669) (#5703)
* Fix to use correct value for "MSpan Structures Obtained" (#5706) (#5715)
## [1.6.3](https://github.com/go-gitea/gitea/releases/tag/v1.6.3) - 2019-01-04
* SECURITY
* Prevent DeleteFilePost doing arbitrary deletion (#5631)
* BUGFIX
* Fix wrong text getting saved on editing second comment on an issue (#5608)
## [1.6.2](https://github.com/go-gitea/gitea/releases/tag/v1.6.2) - 2018-12-21 ## [1.6.2](https://github.com/go-gitea/gitea/releases/tag/v1.6.2) - 2018-12-21
* SECURITY * SECURITY
* Sanitize uploaded file names (#5571) (#5573) * Sanitize uploaded file names (#5571) (#5573)

11
Gopkg.lock generated
View File

@@ -406,11 +406,11 @@
version = "v0.6.0" version = "v0.6.0"
[[projects]] [[projects]]
digest = "1:d366480c27ab51b3f7e995f25503063e7a6ebc7feb269df2499c33471f35cd62" digest = "1:931a62a1aacc37a5e4c309a111642ec4da47b4dc453cd4ba5481b12eedb04a5d"
name = "github.com/go-xorm/xorm" name = "github.com/go-xorm/xorm"
packages = ["."] packages = ["."]
pruneopts = "NUT" pruneopts = "NUT"
revision = "1cd2662be938bfee0e34af92fe448513e0560fb1" revision = "401f4ee8ff8cbc40a4754cb12192fbe4f02f3979"
[[projects]] [[projects]]
branch = "master" branch = "master"
@@ -1005,12 +1005,12 @@
version = "v1.31.1" version = "v1.31.1"
[[projects]] [[projects]]
digest = "1:7e1c00b9959544fa1ccca7cf0407a5b29ac6d5201059c4fac6f599cb99bfd24d" digest = "1:01f4ac37c52bda6f7e1bd73680a99f88733c0408aaa159ecb1ba53a1ade9423c"
name = "gopkg.in/ldap.v2" name = "gopkg.in/ldap.v2"
packages = ["."] packages = ["."]
pruneopts = "NUT" pruneopts = "NUT"
revision = "bb7a9ca6e4fbc2129e3db588a34bc970ffe811a9" revision = "d0a5ced67b4dc310b9158d63a2c6f9c5ec13f105"
version = "v2.5.1" version = "v2.4.1"
[[projects]] [[projects]]
digest = "1:cfe1730a152ff033ad7d9c115d22e36b19eec6d5928c06146b9119be45d39dc0" digest = "1:cfe1730a152ff033ad7d9c115d22e36b19eec6d5928c06146b9119be45d39dc0"
@@ -1173,7 +1173,6 @@
"github.com/keybase/go-crypto/openpgp", "github.com/keybase/go-crypto/openpgp",
"github.com/keybase/go-crypto/openpgp/armor", "github.com/keybase/go-crypto/openpgp/armor",
"github.com/keybase/go-crypto/openpgp/packet", "github.com/keybase/go-crypto/openpgp/packet",
"github.com/klauspost/compress/gzip",
"github.com/lafriks/xormstore", "github.com/lafriks/xormstore",
"github.com/lib/pq", "github.com/lib/pq",
"github.com/lunny/dingtalk_webhook", "github.com/lunny/dingtalk_webhook",

View File

@@ -38,7 +38,7 @@ ignored = ["google.golang.org/appengine*"]
[[override]] [[override]]
name = "github.com/go-xorm/xorm" name = "github.com/go-xorm/xorm"
revision = "1cd2662be938bfee0e34af92fe448513e0560fb1" revision = "401f4ee8ff8cbc40a4754cb12192fbe4f02f3979"
[[override]] [[override]]
name = "github.com/go-xorm/builder" name = "github.com/go-xorm/builder"

View File

@@ -9,11 +9,10 @@ package cmd
import ( import (
"errors" "errors"
"fmt" "fmt"
"strings"
"code.gitea.io/gitea/models" "code.gitea.io/gitea/models"
"code.gitea.io/gitea/modules/setting" "code.gitea.io/gitea/modules/setting"
"code.gitea.io/gitea/modules/util"
"github.com/urfave/cli" "github.com/urfave/cli"
) )
@@ -25,7 +24,7 @@ func argsSet(c *cli.Context, args ...string) error {
return errors.New(a + " is not set") return errors.New(a + " is not set")
} }
if util.IsEmptyString(a) { if len(strings.TrimSpace(c.String(a))) == 0 {
return errors.New(a + " is required") return errors.New(a + " is required")
} }
} }

View File

@@ -8,7 +8,6 @@ package main // import "code.gitea.io/gitea"
import ( import (
"os" "os"
"runtime"
"strings" "strings"
"code.gitea.io/gitea/cmd" "code.gitea.io/gitea/cmd"
@@ -62,8 +61,8 @@ arguments - which can alternatively be run by running the subcommand web.`
func formatBuiltWith(Tags string) string { func formatBuiltWith(Tags string) string {
if len(Tags) == 0 { if len(Tags) == 0 {
return " built with " + runtime.Version() return ""
} }
return " built with " + runtime.Version() + " : " + strings.Replace(Tags, " ", ", ", -1) return " built with: " + strings.Replace(Tags, " ", ", ", -1)
} }

View File

@@ -476,34 +476,8 @@ func getIssueFromRef(repo *Repository, ref string) (*Issue, error) {
return issue, nil return issue, nil
} }
func changeIssueStatus(repo *Repository, doer *User, ref string, refMarked map[int64]bool, status bool) error {
issue, err := getIssueFromRef(repo, ref)
if err != nil {
return err
}
if issue == nil || refMarked[issue.ID] {
return nil
}
refMarked[issue.ID] = true
if issue.RepoID != repo.ID || issue.IsClosed == status {
return nil
}
issue.Repo = repo
if err = issue.ChangeStatus(doer, status); err != nil {
// Don't return an error when dependencies are open as this would let the push fail
if IsErrDependenciesLeft(err) {
return nil
}
return err
}
return nil
}
// UpdateIssuesCommit checks if issues are manipulated by commit message. // UpdateIssuesCommit checks if issues are manipulated by commit message.
func UpdateIssuesCommit(doer *User, repo *Repository, commits []*PushCommit, branchName string) error { func UpdateIssuesCommit(doer *User, repo *Repository, commits []*PushCommit) error {
// Commits are appended in the reverse order. // Commits are appended in the reverse order.
for i := len(commits) - 1; i >= 0; i-- { for i := len(commits) - 1; i >= 0; i-- {
c := commits[i] c := commits[i]
@@ -526,21 +500,51 @@ func UpdateIssuesCommit(doer *User, repo *Repository, commits []*PushCommit, bra
} }
} }
// Change issue status only if the commit has been pushed to the default branch. refMarked = make(map[int64]bool)
if repo.DefaultBranch != branchName { // FIXME: can merge this one and next one to a common function.
for _, ref := range issueCloseKeywordsPat.FindAllString(c.Message, -1) {
issue, err := getIssueFromRef(repo, ref)
if err != nil {
return err
}
if issue == nil || refMarked[issue.ID] {
continue
}
refMarked[issue.ID] = true
if issue.RepoID != repo.ID || issue.IsClosed {
continue continue
} }
refMarked = make(map[int64]bool) issue.Repo = repo
for _, ref := range issueCloseKeywordsPat.FindAllString(c.Message, -1) { if err = issue.ChangeStatus(doer, true); err != nil {
if err := changeIssueStatus(repo, doer, ref, refMarked, true); err != nil { // Don't return an error when dependencies are open as this would let the push fail
if IsErrDependenciesLeft(err) {
return nil
}
return err return err
} }
} }
// It is conflict to have close and reopen at same time, so refsMarked doesn't need to reinit here. // It is conflict to have close and reopen at same time, so refsMarked doesn't need to reinit here.
for _, ref := range issueReopenKeywordsPat.FindAllString(c.Message, -1) { for _, ref := range issueReopenKeywordsPat.FindAllString(c.Message, -1) {
if err := changeIssueStatus(repo, doer, ref, refMarked, false); err != nil { issue, err := getIssueFromRef(repo, ref)
if err != nil {
return err
}
if issue == nil || refMarked[issue.ID] {
continue
}
refMarked[issue.ID] = true
if issue.RepoID != repo.ID || !issue.IsClosed {
continue
}
issue.Repo = repo
if err = issue.ChangeStatus(doer, false); err != nil {
return err return err
} }
} }
@@ -605,7 +609,7 @@ func CommitRepoAction(opts CommitRepoActionOptions) error {
opts.Commits.CompareURL = repo.ComposeCompareURL(opts.OldCommitID, opts.NewCommitID) opts.Commits.CompareURL = repo.ComposeCompareURL(opts.OldCommitID, opts.NewCommitID)
} }
if err = UpdateIssuesCommit(pusher, repo, opts.Commits.Commits, refName); err != nil { if err = UpdateIssuesCommit(pusher, repo, opts.Commits.Commits); err != nil {
log.Error(4, "updateIssuesCommit: %v", err) log.Error(4, "updateIssuesCommit: %v", err)
} }
} }

View File

@@ -227,37 +227,10 @@ func TestUpdateIssuesCommit(t *testing.T) {
AssertNotExistsBean(t, commentBean) AssertNotExistsBean(t, commentBean)
AssertNotExistsBean(t, &Issue{RepoID: repo.ID, Index: 2}, "is_closed=1") AssertNotExistsBean(t, &Issue{RepoID: repo.ID, Index: 2}, "is_closed=1")
assert.NoError(t, UpdateIssuesCommit(user, repo, pushCommits, repo.DefaultBranch)) assert.NoError(t, UpdateIssuesCommit(user, repo, pushCommits))
AssertExistsAndLoadBean(t, commentBean) AssertExistsAndLoadBean(t, commentBean)
AssertExistsAndLoadBean(t, issueBean, "is_closed=1") AssertExistsAndLoadBean(t, issueBean, "is_closed=1")
CheckConsistencyFor(t, &Action{}) CheckConsistencyFor(t, &Action{})
// Test that push to a non-default branch closes no issue.
pushCommits = []*PushCommit{
{
Sha1: "abcdef1",
CommitterEmail: "user2@example.com",
CommitterName: "User Two",
AuthorEmail: "user4@example.com",
AuthorName: "User Four",
Message: "close #1",
},
}
repo = AssertExistsAndLoadBean(t, &Repository{ID: 3}).(*Repository)
commentBean = &Comment{
Type: CommentTypeCommitRef,
CommitSHA: "abcdef1",
PosterID: user.ID,
IssueID: 6,
}
issueBean = &Issue{RepoID: repo.ID, Index: 1}
AssertNotExistsBean(t, commentBean)
AssertNotExistsBean(t, &Issue{RepoID: repo.ID, Index: 1}, "is_closed=1")
assert.NoError(t, UpdateIssuesCommit(user, repo, pushCommits, "non-existing-branch"))
AssertExistsAndLoadBean(t, commentBean)
AssertNotExistsBean(t, issueBean, "is_closed=1")
CheckConsistencyFor(t, &Action{})
} }
func testCorrectRepoAction(t *testing.T, opts CommitRepoActionOptions, actionBean *Action) { func testCorrectRepoAction(t *testing.T, opts CommitRepoActionOptions, actionBean *Action) {

View File

@@ -1402,7 +1402,7 @@ func UpdateIssueMentions(e Engine, issueID int64, mentions []string) error {
} }
memberIDs := make([]int64, 0, user.NumMembers) memberIDs := make([]int64, 0, user.NumMembers)
orgUsers, err := getOrgUsersByOrgID(e, user.ID) orgUsers, err := GetOrgUsersByOrgID(user.ID)
if err != nil { if err != nil {
return fmt.Errorf("GetOrgUsersByOrgID [%d]: %v", user.ID, err) return fmt.Errorf("GetOrgUsersByOrgID [%d]: %v", user.ID, err)
} }

View File

@@ -44,11 +44,7 @@ func (issue *Issue) loadAssignees(e Engine) (err error) {
// GetAssigneesByIssue returns everyone assigned to that issue // GetAssigneesByIssue returns everyone assigned to that issue
func GetAssigneesByIssue(issue *Issue) (assignees []*User, err error) { func GetAssigneesByIssue(issue *Issue) (assignees []*User, err error) {
return getAssigneesByIssue(x, issue) err = issue.loadAssignees(x)
}
func getAssigneesByIssue(e Engine, issue *Issue) (assignees []*User, err error) {
err = issue.loadAssignees(e)
if err != nil { if err != nil {
return assignees, err return assignees, err
} }
@@ -177,7 +173,7 @@ func (issue *Issue) changeAssignee(sess *xorm.Session, doer *User, assigneeID in
issue.PullRequest.Issue = issue issue.PullRequest.Issue = issue
apiPullRequest := &api.PullRequestPayload{ apiPullRequest := &api.PullRequestPayload{
Index: issue.Index, Index: issue.Index,
PullRequest: issue.PullRequest.apiFormat(sess), PullRequest: issue.PullRequest.APIFormat(),
Repository: issue.Repo.innerAPIFormat(sess, mode, false), Repository: issue.Repo.innerAPIFormat(sess, mode, false),
Sender: doer.APIFormat(), Sender: doer.APIFormat(),
} }

View File

@@ -748,9 +748,6 @@ func createIssueDependencyComment(e *xorm.Session, doer *User, issue *Issue, dep
if !add { if !add {
cType = CommentTypeRemoveDependency cType = CommentTypeRemoveDependency
} }
if err = issue.loadRepo(e); err != nil {
return
}
// Make two comments, one in each issue // Make two comments, one in each issue
_, err = createComment(e, &CreateCommentOptions{ _, err = createComment(e, &CreateCommentOptions{

View File

@@ -19,9 +19,11 @@ func TestCreateIssueDependency(t *testing.T) {
issue1, err := GetIssueByID(1) issue1, err := GetIssueByID(1)
assert.NoError(t, err) assert.NoError(t, err)
issue1.LoadAttributes()
issue2, err := GetIssueByID(2) issue2, err := GetIssueByID(2)
assert.NoError(t, err) assert.NoError(t, err)
issue2.LoadAttributes()
// Create a dependency and check if it was successful // Create a dependency and check if it was successful
err = CreateIssueDependency(user1, issue1, issue2) err = CreateIssueDependency(user1, issue1, issue2)

View File

@@ -39,16 +39,16 @@ func mailIssueCommentToParticipants(e Engine, issue *Issue, doer *User, content
// In case the issue poster is not watching the repository and is active, // In case the issue poster is not watching the repository and is active,
// even if we have duplicated in watchers, can be safely filtered out. // even if we have duplicated in watchers, can be safely filtered out.
err = issue.loadPoster(e) poster, err := getUserByID(e, issue.PosterID)
if err != nil { if err != nil {
return fmt.Errorf("GetUserByID [%d]: %v", issue.PosterID, err) return fmt.Errorf("GetUserByID [%d]: %v", issue.PosterID, err)
} }
if issue.PosterID != doer.ID && issue.Poster.IsActive && !issue.Poster.ProhibitLogin { if issue.PosterID != doer.ID && poster.IsActive && !poster.ProhibitLogin {
participants = append(participants, issue.Poster) participants = append(participants, issue.Poster)
} }
// Assignees must receive any communications // Assignees must receive any communications
assignees, err := getAssigneesByIssue(e, issue) assignees, err := GetAssigneesByIssue(issue)
if err != nil { if err != nil {
return err return err
} }
@@ -88,10 +88,6 @@ func mailIssueCommentToParticipants(e Engine, issue *Issue, doer *User, content
names = append(names, participants[i].Name) names = append(names, participants[i].Name)
} }
if err := issue.loadRepo(e); err != nil {
return err
}
for _, to := range tos { for _, to := range tos {
SendIssueCommentMail(issue, doer, content, comment, []string{to}) SendIssueCommentMail(issue, doer, content, comment, []string{to})
} }

View File

@@ -54,7 +54,7 @@ func newIssueUsers(e Engine, repo *Repository, issue *Issue) error {
func updateIssueAssignee(e *xorm.Session, issue *Issue, assigneeID int64) (removed bool, err error) { func updateIssueAssignee(e *xorm.Session, issue *Issue, assigneeID int64) (removed bool, err error) {
// Check if the user exists // Check if the user exists
assignee, err := getUserByID(e, assigneeID) assignee, err := GetUserByID(assigneeID)
if err != nil { if err != nil {
return false, err return false, err
} }

View File

@@ -644,7 +644,7 @@ func UserSignIn(username, password string) (*User, error) {
if hasUser { if hasUser {
switch user.LoginType { switch user.LoginType {
case LoginNoType, LoginPlain, LoginOAuth2: case LoginNoType, LoginPlain, LoginOAuth2:
if user.IsPasswordSet() && user.ValidatePassword(password) { if user.ValidatePassword(password) {
return user, nil return user, nil
} }

View File

@@ -393,12 +393,8 @@ func GetOrgUsersByUserID(uid int64, all bool) ([]*OrgUser, error) {
// GetOrgUsersByOrgID returns all organization-user relations by organization ID. // GetOrgUsersByOrgID returns all organization-user relations by organization ID.
func GetOrgUsersByOrgID(orgID int64) ([]*OrgUser, error) { func GetOrgUsersByOrgID(orgID int64) ([]*OrgUser, error) {
return getOrgUsersByOrgID(x, orgID)
}
func getOrgUsersByOrgID(e Engine, orgID int64) ([]*OrgUser, error) {
ous := make([]*OrgUser, 0, 10) ous := make([]*OrgUser, 0, 10)
err := e. err := x.
Where("org_id=?", orgID). Where("org_id=?", orgID).
Find(&ous) Find(&ous)
return ous, err return ous, err

View File

@@ -366,7 +366,7 @@ func (pr *PullRequest) Merge(doer *User, baseGitRepo *git.Repository, mergeStyle
return fmt.Errorf("Failed to create dir %s: %v", tmpBasePath, err) return fmt.Errorf("Failed to create dir %s: %v", tmpBasePath, err)
} }
defer os.RemoveAll(tmpBasePath) defer os.RemoveAll(path.Dir(tmpBasePath))
var stderr string var stderr string
if _, stderr, err = process.GetManager().ExecTimeout(5*time.Minute, if _, stderr, err = process.GetManager().ExecTimeout(5*time.Minute,

View File

@@ -11,7 +11,6 @@ import (
"fmt" "fmt"
"html/template" "html/template"
"io/ioutil" "io/ioutil"
"net/url"
"os" "os"
"os/exec" "os/exec"
"path" "path"
@@ -825,7 +824,7 @@ type CloneLink struct {
// ComposeHTTPSCloneURL returns HTTPS clone URL based on given owner and repository name. // ComposeHTTPSCloneURL returns HTTPS clone URL based on given owner and repository name.
func ComposeHTTPSCloneURL(owner, repo string) string { func ComposeHTTPSCloneURL(owner, repo string) string {
return fmt.Sprintf("%s%s/%s.git", setting.AppURL, url.QueryEscape(owner), url.QueryEscape(repo)) return fmt.Sprintf("%s%s/%s.git", setting.AppURL, owner, repo)
} }
func (repo *Repository) cloneLink(e Engine, isWiki bool) *CloneLink { func (repo *Repository) cloneLink(e Engine, isWiki bool) *CloneLink {
@@ -1366,7 +1365,6 @@ func CreateRepository(doer, u *User, opts CreateRepoOptions) (_ *Repository, err
LowerName: strings.ToLower(opts.Name), LowerName: strings.ToLower(opts.Name),
Description: opts.Description, Description: opts.Description,
IsPrivate: opts.IsPrivate, IsPrivate: opts.IsPrivate,
IsFsckEnabled: true,
} }
sess := x.NewSession() sess := x.NewSession()

View File

@@ -113,15 +113,15 @@ func notifyWatchers(e Engine, act *Action) error {
switch act.OpType { switch act.OpType {
case ActionCommitRepo, ActionPushTag, ActionDeleteTag, ActionDeleteBranch: case ActionCommitRepo, ActionPushTag, ActionDeleteTag, ActionDeleteBranch:
if !act.Repo.checkUnitUser(e, act.UserID, false, UnitTypeCode) { if !act.Repo.CheckUnitUser(act.UserID, false, UnitTypeCode) {
continue continue
} }
case ActionCreateIssue, ActionCommentIssue, ActionCloseIssue, ActionReopenIssue: case ActionCreateIssue, ActionCommentIssue, ActionCloseIssue, ActionReopenIssue:
if !act.Repo.checkUnitUser(e, act.UserID, false, UnitTypeIssues) { if !act.Repo.CheckUnitUser(act.UserID, false, UnitTypeIssues) {
continue continue
} }
case ActionCreatePullRequest, ActionMergePullRequest, ActionClosePullRequest, ActionReopenPullRequest: case ActionCreatePullRequest, ActionMergePullRequest, ActionClosePullRequest, ActionReopenPullRequest:
if !act.Repo.checkUnitUser(e, act.UserID, false, UnitTypePullRequests) { if !act.Repo.CheckUnitUser(act.UserID, false, UnitTypePullRequests) {
continue continue
} }
} }

View File

@@ -844,11 +844,6 @@ func DeleteDeployKey(doer *User, id int64) error {
if err = deletePublicKeys(sess, key.KeyID); err != nil { if err = deletePublicKeys(sess, key.KeyID); err != nil {
return err return err
} }
// after deleted the public keys, should rewrite the public keys file
if err = rewriteAllPublicKeys(sess); err != nil {
return err
}
} }
return sess.Commit() return sess.Commit()

View File

@@ -32,22 +32,12 @@ func GetUserHeatmapDataByUser(user *User) ([]*UserHeatmapData, error) {
groupByName = groupBy groupByName = groupBy
} }
sess := x.Select(groupBy+" AS timestamp, count(user_id) as contributions"). err := x.Select(groupBy+" AS timestamp, count(user_id) as contributions").
Table("action"). Table("action").
Where("user_id = ?", user.ID). Where("user_id = ?", user.ID).
And("created_unix > ?", (util.TimeStampNow() - 31536000)) And("created_unix > ?", (util.TimeStampNow() - 31536000)).
GroupBy(groupByName).
// * Heatmaps for individual users only include actions that the user themself
// did.
// * For organizations actions by all users that were made in owned
// repositories are counted.
if user.Type == UserTypeIndividual {
sess = sess.And("act_user_id = ?", user.ID)
}
err := sess.GroupBy(groupByName).
OrderBy("timestamp"). OrderBy("timestamp").
Find(&hdata) Find(&hdata)
return hdata, err return hdata, err
} }

View File

@@ -247,17 +247,11 @@ func (ls *Source) SearchEntry(name, passwd string, directBind bool) *SearchResul
return nil return nil
} }
var isAttributeSSHPublicKeySet = len(strings.TrimSpace(ls.AttributeSSHPublicKey)) > 0
attribs := []string{ls.AttributeUsername, ls.AttributeName, ls.AttributeSurname, ls.AttributeMail}
if isAttributeSSHPublicKeySet {
attribs = append(attribs, ls.AttributeSSHPublicKey)
}
log.Trace("Fetching attributes '%v', '%v', '%v', '%v', '%v' with filter %s and base %s", ls.AttributeUsername, ls.AttributeName, ls.AttributeSurname, ls.AttributeMail, ls.AttributeSSHPublicKey, userFilter, userDN) log.Trace("Fetching attributes '%v', '%v', '%v', '%v', '%v' with filter %s and base %s", ls.AttributeUsername, ls.AttributeName, ls.AttributeSurname, ls.AttributeMail, ls.AttributeSSHPublicKey, userFilter, userDN)
search := ldap.NewSearchRequest( search := ldap.NewSearchRequest(
userDN, ldap.ScopeWholeSubtree, ldap.NeverDerefAliases, 0, 0, false, userFilter, userDN, ldap.ScopeWholeSubtree, ldap.NeverDerefAliases, 0, 0, false, userFilter,
attribs, nil) []string{ls.AttributeUsername, ls.AttributeName, ls.AttributeSurname, ls.AttributeMail, ls.AttributeSSHPublicKey},
nil)
sr, err := l.Search(search) sr, err := l.Search(search)
if err != nil { if err != nil {
@@ -273,15 +267,11 @@ func (ls *Source) SearchEntry(name, passwd string, directBind bool) *SearchResul
return nil return nil
} }
var sshPublicKey []string
username := sr.Entries[0].GetAttributeValue(ls.AttributeUsername) username := sr.Entries[0].GetAttributeValue(ls.AttributeUsername)
firstname := sr.Entries[0].GetAttributeValue(ls.AttributeName) firstname := sr.Entries[0].GetAttributeValue(ls.AttributeName)
surname := sr.Entries[0].GetAttributeValue(ls.AttributeSurname) surname := sr.Entries[0].GetAttributeValue(ls.AttributeSurname)
mail := sr.Entries[0].GetAttributeValue(ls.AttributeMail) mail := sr.Entries[0].GetAttributeValue(ls.AttributeMail)
if isAttributeSSHPublicKeySet { sshPublicKey := sr.Entries[0].GetAttributeValues(ls.AttributeSSHPublicKey)
sshPublicKey = sr.Entries[0].GetAttributeValues(ls.AttributeSSHPublicKey)
}
isAdmin := checkAdmin(l, ls, userDN) isAdmin := checkAdmin(l, ls, userDN)
if !directBind && ls.AttributesInBind { if !directBind && ls.AttributesInBind {
@@ -330,17 +320,11 @@ func (ls *Source) SearchEntries() []*SearchResult {
userFilter := fmt.Sprintf(ls.Filter, "*") userFilter := fmt.Sprintf(ls.Filter, "*")
var isAttributeSSHPublicKeySet = len(strings.TrimSpace(ls.AttributeSSHPublicKey)) > 0
attribs := []string{ls.AttributeUsername, ls.AttributeName, ls.AttributeSurname, ls.AttributeMail}
if isAttributeSSHPublicKeySet {
attribs = append(attribs, ls.AttributeSSHPublicKey)
}
log.Trace("Fetching attributes '%v', '%v', '%v', '%v', '%v' with filter %s and base %s", ls.AttributeUsername, ls.AttributeName, ls.AttributeSurname, ls.AttributeMail, ls.AttributeSSHPublicKey, userFilter, ls.UserBase) log.Trace("Fetching attributes '%v', '%v', '%v', '%v', '%v' with filter %s and base %s", ls.AttributeUsername, ls.AttributeName, ls.AttributeSurname, ls.AttributeMail, ls.AttributeSSHPublicKey, userFilter, ls.UserBase)
search := ldap.NewSearchRequest( search := ldap.NewSearchRequest(
ls.UserBase, ldap.ScopeWholeSubtree, ldap.NeverDerefAliases, 0, 0, false, userFilter, ls.UserBase, ldap.ScopeWholeSubtree, ldap.NeverDerefAliases, 0, 0, false, userFilter,
attribs, nil) []string{ls.AttributeUsername, ls.AttributeName, ls.AttributeSurname, ls.AttributeMail, ls.AttributeSSHPublicKey},
nil)
var sr *ldap.SearchResult var sr *ldap.SearchResult
if ls.UsePagedSearch() { if ls.UsePagedSearch() {
@@ -361,11 +345,9 @@ func (ls *Source) SearchEntries() []*SearchResult {
Name: v.GetAttributeValue(ls.AttributeName), Name: v.GetAttributeValue(ls.AttributeName),
Surname: v.GetAttributeValue(ls.AttributeSurname), Surname: v.GetAttributeValue(ls.AttributeSurname),
Mail: v.GetAttributeValue(ls.AttributeMail), Mail: v.GetAttributeValue(ls.AttributeMail),
SSHPublicKey: v.GetAttributeValues(ls.AttributeSSHPublicKey),
IsAdmin: checkAdmin(l, ls, v.DN), IsAdmin: checkAdmin(l, ls, v.DN),
} }
if isAttributeSSHPublicKeySet {
result[i].SSHPublicKey = v.GetAttributeValues(ls.AttributeSSHPublicKey)
}
} }
return result return result

View File

@@ -209,7 +209,7 @@ func Contexter() macaron.Handler {
if err == nil && len(repo.DefaultBranch) > 0 { if err == nil && len(repo.DefaultBranch) > 0 {
branchName = repo.DefaultBranch branchName = repo.DefaultBranch
} }
prefix := setting.AppURL + path.Join(url.QueryEscape(ownerName), url.QueryEscape(repoName), "src", "branch", branchName) prefix := setting.AppURL + path.Join(ownerName, repoName, "src", "branch", branchName)
c.Header().Set("Content-Type", "text/html") c.Header().Set("Content-Type", "text/html")
c.WriteHeader(http.StatusOK) c.WriteHeader(http.StatusOK)
c.Write([]byte(com.Expand(`<!doctype html> c.Write([]byte(com.Expand(`<!doctype html>

View File

@@ -8,7 +8,6 @@ package context
import ( import (
"fmt" "fmt"
"io/ioutil" "io/ioutil"
"net/url"
"path" "path"
"strings" "strings"
@@ -163,7 +162,7 @@ func RetrieveBaseRepo(ctx *Context, repo *models.Repository) {
// ComposeGoGetImport returns go-get-import meta content. // ComposeGoGetImport returns go-get-import meta content.
func ComposeGoGetImport(owner, repo string) string { func ComposeGoGetImport(owner, repo string) string {
return path.Join(setting.Domain, setting.AppSubURL, url.QueryEscape(owner), url.QueryEscape(repo)) return path.Join(setting.Domain, setting.AppSubURL, owner, repo)
} }
// EarlyResponseForGoGetMeta responses appropriate go-get meta with status 200 // EarlyResponseForGoGetMeta responses appropriate go-get meta with status 200

View File

@@ -497,15 +497,12 @@ func authenticate(ctx *context.Context, repository *models.Repository, authoriza
accessMode = models.AccessModeWrite accessMode = models.AccessModeWrite
} }
// ctx.IsSigned is unnecessary here, this will be checked in perm.CanAccess
perm, err := models.GetUserRepoPermission(repository, ctx.User) perm, err := models.GetUserRepoPermission(repository, ctx.User)
if err != nil { if err != nil {
return false return false
} }
if ctx.IsSigned {
canRead := perm.CanAccess(accessMode, models.UnitTypeCode) return perm.CanAccess(accessMode, models.UnitTypeCode)
if canRead {
return true
} }
user, repo, opStr, err := parseToken(authorization) user, repo, opStr, err := parseToken(authorization)
@@ -585,7 +582,7 @@ func parseToken(authorization string) (*models.User, *models.Repository, string,
if err != nil { if err != nil {
return nil, nil, "basic", err return nil, nil, "basic", err
} }
if !u.IsPasswordSet() || !u.ValidatePassword(password) { if !u.ValidatePassword(password) {
return nil, nil, "basic", fmt.Errorf("Basic auth failed") return nil, nil, "basic", fmt.Errorf("Basic auth failed")
} }
return u, nil, "basic", nil return u, nil, "basic", nil

View File

@@ -39,7 +39,6 @@ func decodeJSONError(resp *http.Response) *Response {
func newInternalRequest(url, method string) *httplib.Request { func newInternalRequest(url, method string) *httplib.Request {
req := newRequest(url, method).SetTLSClientConfig(&tls.Config{ req := newRequest(url, method).SetTLSClientConfig(&tls.Config{
InsecureSkipVerify: true, InsecureSkipVerify: true,
ServerName: setting.Domain,
}) })
if setting.Protocol == setting.UnixSocket { if setting.Protocol == setting.UnixSocket {
req.SetTransport(&http.Transport{ req.SetTransport(&http.Transport{

View File

@@ -117,7 +117,7 @@ func (opts *Options) handle(ctx *macaron.Context, log *log.Logger, opt *Options)
if fi.IsDir() { if fi.IsDir() {
// Redirect if missing trailing slash. // Redirect if missing trailing slash.
if !strings.HasSuffix(ctx.Req.URL.Path, "/") { if !strings.HasSuffix(ctx.Req.URL.Path, "/") {
http.Redirect(ctx.Resp, ctx.Req.Request, path.Clean(ctx.Req.URL.Path+"/"), http.StatusFound) http.Redirect(ctx.Resp, ctx.Req.Request, ctx.Req.URL.Path+"/", http.StatusFound)
return true return true
} }

View File

@@ -98,8 +98,3 @@ func Min(a, b int) int {
} }
return a return a
} }
// IsEmptyString checks if the provided string is empty
func IsEmptyString(s string) bool {
return len(strings.TrimSpace(s)) == 0
}

View File

@@ -77,20 +77,3 @@ func TestIsExternalURL(t *testing.T) {
assert.Equal(t, test.Expected, IsExternalURL(test.RawURL)) assert.Equal(t, test.Expected, IsExternalURL(test.RawURL))
} }
} }
func TestIsEmptyString(t *testing.T) {
cases := []struct {
s string
expected bool
}{
{"", true},
{" ", true},
{" ", true},
{" a", false},
}
for _, v := range cases {
assert.Equal(t, v.expected, IsEmptyString(v.s))
}
}

View File

@@ -655,7 +655,6 @@ ext_issues.desc = Link to an external issue tracker.
issues.desc = Organize bug reports, tasks and milestones. issues.desc = Organize bug reports, tasks and milestones.
issues.new = New Issue issues.new = New Issue
issues.new.title_empty = Title cannot be empty
issues.new.labels = Labels issues.new.labels = Labels
issues.new.no_label = No Label issues.new.no_label = No Label
issues.new.clear_labels = Clear labels issues.new.clear_labels = Clear labels

View File

@@ -859,7 +859,6 @@ pulls.title_wip_desc=`<a href="#">Sāciet virsrakstu ar <strong>%s</strong></a>,
pulls.cannot_merge_work_in_progress=Šis izmaiņu pieprasījums ir atzīmēts, ka pie tā vēl notiek izstrāde. Noņemiet <strong>%s</strong> no virsraksta sākuma, kad tas ir pabeigts. pulls.cannot_merge_work_in_progress=Šis izmaiņu pieprasījums ir atzīmēts, ka pie tā vēl notiek izstrāde. Noņemiet <strong>%s</strong> no virsraksta sākuma, kad tas ir pabeigts.
pulls.data_broken=Izmaiņu pieprasījums ir bojāts, jo dzēsta informācija no atdalītā repozitorija. pulls.data_broken=Izmaiņu pieprasījums ir bojāts, jo dzēsta informācija no atdalītā repozitorija.
pulls.is_checking=Notiek konfliktu pārbaude, mirkli uzgaidiet un atjaunojiet lapu. pulls.is_checking=Notiek konfliktu pārbaude, mirkli uzgaidiet un atjaunojiet lapu.
pulls.blocked_by_approvals=Šim izmaiņu pieprasījumam nav nepieciešamais apstiprinājumu daudzums. %d no %d apstiprinājumi piešķirti.
pulls.can_auto_merge_desc=Šo izmaiņu pieprasījumu var automātiski sapludināt. pulls.can_auto_merge_desc=Šo izmaiņu pieprasījumu var automātiski sapludināt.
pulls.cannot_auto_merge_desc=Šis izmaiņu pieprasījums nevar tikt automātiski sapludināts konfliktu dēļ. pulls.cannot_auto_merge_desc=Šis izmaiņu pieprasījums nevar tikt automātiski sapludināts konfliktu dēļ.
pulls.cannot_auto_merge_helper=Sapludiniet manuāli, lai atrisinātu konfliktus. pulls.cannot_auto_merge_helper=Sapludiniet manuāli, lai atrisinātu konfliktus.
@@ -868,7 +867,6 @@ pulls.no_merge_helper=Lai sapludinātu šo izmaiņu pieprasījumu, iespējojiet
pulls.no_merge_wip=Šo izmaiņu pieprasījumu nav iespējams sapludināt, jo tas ir atzīmēts, ka darbs pie tā vēl nav pabeigts. pulls.no_merge_wip=Šo izmaiņu pieprasījumu nav iespējams sapludināt, jo tas ir atzīmēts, ka darbs pie tā vēl nav pabeigts.
pulls.merge_pull_request=Izmaiņu pieprasījuma sapludināšana pulls.merge_pull_request=Izmaiņu pieprasījuma sapludināšana
pulls.rebase_merge_pull_request=Pārbāzēt un sapludināt pulls.rebase_merge_pull_request=Pārbāzēt un sapludināt
pulls.rebase_merge_commit_pull_request=Pārbāzēt un sapludināt (--no-ff)
pulls.squash_merge_pull_request=Saspiest un sapludināt pulls.squash_merge_pull_request=Saspiest un sapludināt
pulls.invalid_merge_option=Nav iespējams izmantot šādu sapludināšanas veidu šim izmaiņu pieprasījumam. pulls.invalid_merge_option=Nav iespējams izmantot šādu sapludināšanas veidu šim izmaiņu pieprasījumam.
pulls.open_unmerged_pull_exists=`Jūs nevarat veikt atkārtotas atvēršanas darbību, jo jau eksistē izmaiņu pieprasījums (#%d) ar šādu sapludināšanas informāciju.` pulls.open_unmerged_pull_exists=`Jūs nevarat veikt atkārtotas atvēršanas darbību, jo jau eksistē izmaiņu pieprasījums (#%d) ar šādu sapludināšanas informāciju.`
@@ -1014,7 +1012,6 @@ settings.pulls_desc=Iespējot repozitorija izmaiņu pieprasījumus
settings.pulls.ignore_whitespace=Pārbaudot konfliktus, ignorēt izmaiņas atstarpēs settings.pulls.ignore_whitespace=Pārbaudot konfliktus, ignorēt izmaiņas atstarpēs
settings.pulls.allow_merge_commits=Iespējot revīziju sapludināšanu settings.pulls.allow_merge_commits=Iespējot revīziju sapludināšanu
settings.pulls.allow_rebase_merge=Iespējot pārbāzēšanu sapludinot revīzijas settings.pulls.allow_rebase_merge=Iespējot pārbāzēšanu sapludinot revīzijas
settings.pulls.allow_rebase_merge_commit=Iespējot pārbāzēšanu sapludinot revīzijas (--no-ff)
settings.pulls.allow_squash_commits=Iespējot saspiešanu sapludinot revīzijas settings.pulls.allow_squash_commits=Iespējot saspiešanu sapludinot revīzijas
settings.admin_settings=Administratora iestatījumi settings.admin_settings=Administratora iestatījumi
settings.admin_enable_health_check=Iespējot veselības pārbaudi (git fsck) šim repozitorijam settings.admin_enable_health_check=Iespējot veselības pārbaudi (git fsck) šim repozitorijam
@@ -1101,7 +1098,6 @@ settings.event_issue_comment_desc=Problēmas komentārs pievienots, labots vai d
settings.event_release=Laidiens settings.event_release=Laidiens
settings.event_release_desc=Publicēts, atjaunots vai dzēsts laidiens repozitorijā. settings.event_release_desc=Publicēts, atjaunots vai dzēsts laidiens repozitorijā.
settings.event_pull_request=Izmaiņu pieprasījums settings.event_pull_request=Izmaiņu pieprasījums
settings.event_pull_request_desc=Izmaiņu pieprasījums izveidots, slēgts, atkārtoti atvērts, labots, apstiprināts, noraidīts, recenzēts, piešķirts, pievienots vai noņemts atbildīgais, pievienota etiķete, noņemta etiķete, pievienots vai noņemts atskaites punkts.
settings.event_push=Izmaiņu nosūtīšana settings.event_push=Izmaiņu nosūtīšana
settings.event_push_desc=Git izmaiņu nosūtīšana uz repozitoriju. settings.event_push_desc=Git izmaiņu nosūtīšana uz repozitoriju.
settings.event_repository=Repozitorijs settings.event_repository=Repozitorijs
@@ -1152,10 +1148,6 @@ settings.protect_merge_whitelist_committers=Iespējot sapludināšanas ierobežo
settings.protect_merge_whitelist_committers_desc=Atļaut tikai noteiktiem lietotājiem vai komandām sapludināt izmaiņu pieprasījumus šajā atzarā. settings.protect_merge_whitelist_committers_desc=Atļaut tikai noteiktiem lietotājiem vai komandām sapludināt izmaiņu pieprasījumus šajā atzarā.
settings.protect_merge_whitelist_users=Lietotāji, kas var veikt izmaiņu sapludināšanu: settings.protect_merge_whitelist_users=Lietotāji, kas var veikt izmaiņu sapludināšanu:
settings.protect_merge_whitelist_teams=Komandas, kas var veikt izmaiņu sapludināšanu: settings.protect_merge_whitelist_teams=Komandas, kas var veikt izmaiņu sapludināšanu:
settings.protect_required_approvals=Vajadzīgi apstiprinājumi:
settings.protect_required_approvals_desc=Atļaut tikai noteiktiem lietotājiem vai komandām sapludināt izmaiņu pieprasījumu, kam veikts noteikts daudzums pozitīvu recenziju.
settings.protect_approvals_whitelist_users=Lietotāji, kas var veikt recenzijas:
settings.protect_approvals_whitelist_teams=Komandas, kas var veikt recenzijas:
settings.add_protected_branch=Iespējot aizsargāšanu settings.add_protected_branch=Iespējot aizsargāšanu
settings.delete_protected_branch=Atspējot aizsargāšanu settings.delete_protected_branch=Atspējot aizsargāšanu
settings.update_protect_branch_success=Atzara aizsardzība atzaram '%s' tika saglabāta. settings.update_protect_branch_success=Atzara aizsardzība atzaram '%s' tika saglabāta.
@@ -1166,7 +1158,6 @@ settings.default_branch_desc=Norādiet noklusēto repozitorija atzaru izmaiņu p
settings.choose_branch=Izvēlieties atzaru… settings.choose_branch=Izvēlieties atzaru…
settings.no_protected_branch=Nav neviena aizsargātā atzara. settings.no_protected_branch=Nav neviena aizsargātā atzara.
settings.edit_protected_branch=Labot settings.edit_protected_branch=Labot
settings.protected_branch_required_approvals_min=Pieprasīto recenziju skaits nevar būt negatīvs.
diff.browse_source=Pārlūkot izejas kodu diff.browse_source=Pārlūkot izejas kodu
diff.parent=vecāks diff.parent=vecāks

View File

@@ -85,7 +85,7 @@ func sudo() macaron.Handler {
} }
if len(sudo) > 0 { if len(sudo) > 0 {
if ctx.IsSigned && ctx.User.IsAdmin { if ctx.User.IsAdmin {
user, err := models.GetUserByName(sudo) user, err := models.GetUserByName(sudo)
if err != nil { if err != nil {
if models.IsErrUserNotExist(err) { if models.IsErrUserNotExist(err) {

View File

@@ -16,30 +16,6 @@ import (
// GetTree get the tree of a repository. // GetTree get the tree of a repository.
func GetTree(ctx *context.APIContext) { func GetTree(ctx *context.APIContext) {
// swagger:operation GET /repos/{owner}/{repo}/git/trees/{sha} repository GetTree
// ---
// summary: Gets the tree of a repository.
// produces:
// - application/json
// parameters:
// - name: owner
// in: path
// description: owner of the repo
// type: string
// required: true
// - name: repo
// in: path
// description: name of the repo
// type: string
// required: true
// - name: sha
// in: path
// description: sha of the commit
// type: string
// required: true
// responses:
// "200":
// "$ref": "#/responses/GitTreeResponse"
sha := ctx.Params("sha") sha := ctx.Params("sha")
if len(sha) == 0 { if len(sha) == 0 {
ctx.Error(400, "sha not provided", nil) ctx.Error(400, "sha not provided", nil)

View File

@@ -133,10 +133,3 @@ type swaggerResponseAttachment struct {
//in: body //in: body
Body api.Attachment `json:"body"` Body api.Attachment `json:"body"`
} }
// GitTreeResponse
// swagger:response GitTreeResponse
type swaggerGitTreeResponse struct {
//in: body
Body api.GitTreeResponse `json:"body"`
}

View File

@@ -201,7 +201,7 @@ func Diff(ctx *context.Context) {
commitID = commit.ID.String() commitID = commit.ID.String()
} }
statuses, err := models.GetLatestCommitStatus(ctx.Repo.Repository, commitID, 0) statuses, err := models.GetLatestCommitStatus(ctx.Repo.Repository, ctx.Repo.Commit.ID.String(), 0)
if err != nil { if err != nil {
log.Error(3, "GetLatestCommitStatus: %v", err) log.Error(3, "GetLatestCommitStatus: %v", err)
} }

View File

@@ -163,11 +163,7 @@ func editFilePost(ctx *context.Context, form auth.EditRepoFileForm, isNewFile bo
branchName = form.NewBranchName branchName = form.NewBranchName
} }
form.TreePath = cleanUploadFileName(form.TreePath) form.TreePath = strings.Trim(path.Clean("/"+form.TreePath), " /")
if len(form.TreePath) == 0 {
ctx.Error(500, "Upload file name is invalid")
return
}
treeNames, treePaths := getParentTreeFields(form.TreePath) treeNames, treePaths := getParentTreeFields(form.TreePath)
ctx.Data["TreePath"] = form.TreePath ctx.Data["TreePath"] = form.TreePath
@@ -377,13 +373,6 @@ func DeleteFile(ctx *context.Context) {
func DeleteFilePost(ctx *context.Context, form auth.DeleteRepoFileForm) { func DeleteFilePost(ctx *context.Context, form auth.DeleteRepoFileForm) {
ctx.Data["PageIsDelete"] = true ctx.Data["PageIsDelete"] = true
ctx.Data["BranchLink"] = ctx.Repo.RepoLink + "/src/" + ctx.Repo.BranchNameSubURL() ctx.Data["BranchLink"] = ctx.Repo.RepoLink + "/src/" + ctx.Repo.BranchNameSubURL()
ctx.Repo.TreePath = cleanUploadFileName(ctx.Repo.TreePath)
if len(ctx.Repo.TreePath) == 0 {
ctx.Error(500, "Delete file name is invalid")
return
}
ctx.Data["TreePath"] = ctx.Repo.TreePath ctx.Data["TreePath"] = ctx.Repo.TreePath
canCommit := renderCommitRights(ctx) canCommit := renderCommitRights(ctx)
@@ -488,12 +477,7 @@ func UploadFilePost(ctx *context.Context, form auth.UploadRepoFileForm) {
branchName = form.NewBranchName branchName = form.NewBranchName
} }
form.TreePath = cleanUploadFileName(form.TreePath) form.TreePath = strings.Trim(path.Clean("/"+form.TreePath), " /")
if len(form.TreePath) == 0 {
ctx.Error(500, "Upload file name is invalid")
return
}
treeNames, treePaths := getParentTreeFields(form.TreePath) treeNames, treePaths := getParentTreeFields(form.TreePath)
if len(treeNames) == 0 { if len(treeNames) == 0 {
// We must at least have one element for user to input. // We must at least have one element for user to input.

View File

@@ -355,7 +355,7 @@ func setTemplateIfExists(ctx *context.Context, ctxDataKey string, possibleFiles
} }
} }
// NewIssue render creating issue page // NewIssue render createing issue page
func NewIssue(ctx *context.Context) { func NewIssue(ctx *context.Context) {
ctx.Data["Title"] = ctx.Tr("repo.issues.new") ctx.Data["Title"] = ctx.Tr("repo.issues.new")
ctx.Data["PageIsIssueList"] = true ctx.Data["PageIsIssueList"] = true
@@ -494,11 +494,6 @@ func NewIssuePost(ctx *context.Context, form auth.CreateIssueForm) {
return return
} }
if util.IsEmptyString(form.Title) {
ctx.RenderWithErr(ctx.Tr("repo.issues.new.title_empty"), tplIssueNew, form)
return
}
issue := &models.Issue{ issue := &models.Issue{
RepoID: repo.ID, RepoID: repo.ID,
Title: form.Title, Title: form.Title,

View File

@@ -22,7 +22,6 @@ import (
"code.gitea.io/gitea/modules/log" "code.gitea.io/gitea/modules/log"
"code.gitea.io/gitea/modules/notification" "code.gitea.io/gitea/modules/notification"
"code.gitea.io/gitea/modules/setting" "code.gitea.io/gitea/modules/setting"
"code.gitea.io/gitea/modules/util"
"github.com/Unknwon/com" "github.com/Unknwon/com"
) )
@@ -861,16 +860,6 @@ func CompareAndPullRequestPost(ctx *context.Context, form auth.CreateIssueForm)
return return
} }
if util.IsEmptyString(form.Title) {
PrepareCompareDiff(ctx, headUser, headRepo, headGitRepo, prInfo, baseBranch, headBranch)
if ctx.Written() {
return
}
ctx.RenderWithErr(ctx.Tr("repo.issues.new.title_empty"), tplComparePull, form)
return
}
patch, err := headGitRepo.GetPatch(prInfo.MergeBase, headBranch) patch, err := headGitRepo.GetPatch(prInfo.MergeBase, headBranch)
if err != nil { if err != nil {
ctx.ServerError("GetPatch", err) ctx.ServerError("GetPatch", err)

View File

@@ -341,11 +341,6 @@ func NewWikiPost(ctx *context.Context, form auth.NewWikiForm) {
return return
} }
if util.IsEmptyString(form.Title) {
ctx.RenderWithErr(ctx.Tr("repo.issues.new.title_empty"), tplWikiNew, form)
return
}
wikiName := models.NormalizeWikiName(form.Title) wikiName := models.NormalizeWikiName(form.Title)
if err := ctx.Repo.Repository.AddWikiPage(ctx.User, wikiName, form.Content, form.Message); err != nil { if err := ctx.Repo.Repository.AddWikiPage(ctx.User, wikiName, form.Content, form.Message); err != nil {
if models.IsErrWikiReservedName(err) { if models.IsErrWikiReservedName(err) {

View File

@@ -106,7 +106,7 @@ func NewMacaron() *macaron.Macaron {
Langs: setting.Langs, Langs: setting.Langs,
Names: setting.Names, Names: setting.Names,
DefaultLang: "en-US", DefaultLang: "en-US",
Redirect: false, Redirect: true,
})) }))
m.Use(cache.Cacher(cache.Options{ m.Use(cache.Cacher(cache.Options{
Adapter: setting.CacheService.Adapter, Adapter: setting.CacheService.Adapter,
@@ -643,7 +643,7 @@ func RegisterRoutes(m *macaron.Macaron) {
} }
ctx.Data["CommitsCount"] = ctx.Repo.CommitsCount ctx.Data["CommitsCount"] = ctx.Repo.CommitsCount
}) })
}, ignSignIn, context.RepoAssignment(), context.UnitTypes(), reqRepoReleaseReader) }, context.RepoAssignment(), context.UnitTypes(), reqRepoReleaseReader)
m.Group("/:username/:reponame", func() { m.Group("/:username/:reponame", func() {
m.Post("/topics", repo.TopicsPost) m.Post("/topics", repo.TopicsPost)

View File

@@ -115,8 +115,7 @@ func SignInOpenIDPost(ctx *context.Context, form auth.SignInOpenIDForm) {
redirectTo := setting.AppURL + "user/login/openid" redirectTo := setting.AppURL + "user/login/openid"
url, err := openid.RedirectURL(id, redirectTo, setting.AppURL) url, err := openid.RedirectURL(id, redirectTo, setting.AppURL)
if err != nil { if err != nil {
log.Error(1, "Error in OpenID redirect URL: %s, %v", redirectTo, err.Error()) ctx.RenderWithErr(err.Error(), tplSignInOpenID, &form)
ctx.RenderWithErr(fmt.Sprintf("Unable to find OpenID provider in %s", redirectTo), tplSignInOpenID, &form)
return return
} }

View File

@@ -100,7 +100,7 @@
<dt>{{.i18n.Tr "admin.dashboard.mspan_structures_usage"}}</dt> <dt>{{.i18n.Tr "admin.dashboard.mspan_structures_usage"}}</dt>
<dd>{{.SysStatus.MSpanInuse}}</dd> <dd>{{.SysStatus.MSpanInuse}}</dd>
<dt>{{.i18n.Tr "admin.dashboard.mspan_structures_obtained"}}</dt> <dt>{{.i18n.Tr "admin.dashboard.mspan_structures_obtained"}}</dt>
<dd>{{.SysStatus.MSpanSys}}</dd> <dd>{{.SysStatus.HeapSys}}</dd>
<dt>{{.i18n.Tr "admin.dashboard.mcache_structures_usage"}}</dt> <dt>{{.i18n.Tr "admin.dashboard.mcache_structures_usage"}}</dt>
<dd>{{.SysStatus.MCacheInuse}}</dd> <dd>{{.SysStatus.MCacheInuse}}</dd>
<dt>{{.i18n.Tr "admin.dashboard.mcache_structures_obtained"}}</dt> <dt>{{.i18n.Tr "admin.dashboard.mcache_structures_obtained"}}</dt>

View File

@@ -54,7 +54,7 @@
<div class="ui stackable secondary menu mobile--margin-between-items mobile--no-negative-margins"> <div class="ui stackable secondary menu mobile--margin-between-items mobile--no-negative-margins">
{{if and .PullRequestCtx.Allowed .IsViewBranch}} {{if and .PullRequestCtx.Allowed .IsViewBranch}}
<div class="fitted item"> <div class="fitted item">
<a href="{{.BaseRepo.Link}}/compare/{{.BaseRepo.DefaultBranch | EscapePound}}...{{ if .Repository.IsFork }}{{.Repository.Owner.Name}}{{ else }}{{ .SignedUserName }}{{ end }}:{{.BranchName | EscapePound}}"> <a href="{{.BaseRepo.Link}}/compare/{{.BaseRepo.DefaultBranch | EscapePound}}...{{.Repository.Owner.Name}}:{{.BranchName | EscapePound}}">
<button class="ui green tiny compact button"><i class="octicon octicon-git-compare"></i></button> <button class="ui green tiny compact button"><i class="octicon octicon-git-compare"></i></button>
</a> </a>
</div> </div>

View File

@@ -1663,46 +1663,6 @@
} }
} }
}, },
"/repos/{owner}/{repo}/git/trees/{sha}": {
"get": {
"produces": [
"application/json"
],
"tags": [
"repository"
],
"summary": "Gets the tree of a repository.",
"operationId": "GetTree",
"parameters": [
{
"type": "string",
"description": "owner of the repo",
"name": "owner",
"in": "path",
"required": true
},
{
"type": "string",
"description": "name of the repo",
"name": "repo",
"in": "path",
"required": true
},
{
"type": "string",
"description": "sha of the commit",
"name": "sha",
"in": "path",
"required": true
}
],
"responses": {
"200": {
"$ref": "#/responses/GitTreeResponse"
}
}
}
},
"/repos/{owner}/{repo}/hooks": { "/repos/{owner}/{repo}/hooks": {
"get": { "get": {
"produces": [ "produces": [
@@ -7080,38 +7040,6 @@
}, },
"x-go-package": "code.gitea.io/gitea/vendor/code.gitea.io/sdk/gitea" "x-go-package": "code.gitea.io/gitea/vendor/code.gitea.io/sdk/gitea"
}, },
"GitEntry": {
"description": "GitEntry represents a git tree",
"type": "object",
"properties": {
"mode": {
"type": "string",
"x-go-name": "Mode"
},
"path": {
"type": "string",
"x-go-name": "Path"
},
"sha": {
"type": "string",
"x-go-name": "SHA"
},
"size": {
"type": "integer",
"format": "int64",
"x-go-name": "Size"
},
"type": {
"type": "string",
"x-go-name": "Type"
},
"url": {
"type": "string",
"x-go-name": "URL"
}
},
"x-go-package": "code.gitea.io/gitea/vendor/code.gitea.io/sdk/gitea"
},
"GitObject": { "GitObject": {
"type": "object", "type": "object",
"title": "GitObject represents a Git object.", "title": "GitObject represents a Git object.",
@@ -7131,32 +7059,6 @@
}, },
"x-go-package": "code.gitea.io/gitea/vendor/code.gitea.io/sdk/gitea" "x-go-package": "code.gitea.io/gitea/vendor/code.gitea.io/sdk/gitea"
}, },
"GitTreeResponse": {
"description": "GitTreeResponse returns a git tree",
"type": "object",
"properties": {
"sha": {
"type": "string",
"x-go-name": "SHA"
},
"tree": {
"type": "array",
"items": {
"$ref": "#/definitions/GitEntry"
},
"x-go-name": "Entries"
},
"truncated": {
"type": "boolean",
"x-go-name": "Truncated"
},
"url": {
"type": "string",
"x-go-name": "URL"
}
},
"x-go-package": "code.gitea.io/gitea/vendor/code.gitea.io/sdk/gitea"
},
"Issue": { "Issue": {
"description": "Issue represents an issue in a repository", "description": "Issue represents an issue in a repository",
"type": "object", "type": "object",
@@ -8298,12 +8200,6 @@
} }
} }
}, },
"GitTreeResponse": {
"description": "GitTreeResponse",
"schema": {
"$ref": "#/definitions/GitTreeResponse"
}
},
"Hook": { "Hook": {
"description": "Hook", "description": "Hook",
"schema": { "schema": {

View File

@@ -822,7 +822,7 @@ func (db *postgres) SqlType(c *core.Column) string {
case core.NVarchar: case core.NVarchar:
res = core.Varchar res = core.Varchar
case core.Uuid: case core.Uuid:
return core.Uuid res = core.Uuid
case core.Blob, core.TinyBlob, core.MediumBlob, core.LongBlob: case core.Blob, core.TinyBlob, core.MediumBlob, core.LongBlob:
return core.Bytea return core.Bytea
case core.Double: case core.Double:
@@ -834,10 +834,6 @@ func (db *postgres) SqlType(c *core.Column) string {
res = t res = t
} }
if strings.EqualFold(res, "bool") {
// for bool, we don't need length information
return res
}
hasLen1 := (c.Length > 0) hasLen1 := (c.Length > 0)
hasLen2 := (c.Length2 > 0) hasLen2 := (c.Length2 > 0)

View File

@@ -481,8 +481,7 @@ func (engine *Engine) dumpTables(tables []*core.Table, w io.Writer, tp ...core.D
} }
cols := table.ColumnsSeq() cols := table.ColumnsSeq()
colNames := engine.dialect.Quote(strings.Join(cols, engine.dialect.Quote(", "))) colNames := dialect.Quote(strings.Join(cols, dialect.Quote(", ")))
destColNames := dialect.Quote(strings.Join(cols, dialect.Quote(", ")))
rows, err := engine.DB().Query("SELECT " + colNames + " FROM " + engine.Quote(table.Name)) rows, err := engine.DB().Query("SELECT " + colNames + " FROM " + engine.Quote(table.Name))
if err != nil { if err != nil {
@@ -497,7 +496,7 @@ func (engine *Engine) dumpTables(tables []*core.Table, w io.Writer, tp ...core.D
return err return err
} }
_, err = io.WriteString(w, "INSERT INTO "+dialect.Quote(table.Name)+" ("+destColNames+") VALUES (") _, err = io.WriteString(w, "INSERT INTO "+dialect.Quote(table.Name)+" ("+colNames+") VALUES (")
if err != nil { if err != nil {
return err return err
} }

43
vendor/gopkg.in/ldap.v2/LICENSE generated vendored
View File

@@ -1,22 +1,27 @@
The MIT License (MIT) Copyright (c) 2012 The Go Authors. All rights reserved.
Copyright (c) 2011-2015 Michael Mitton (mmitton@gmail.com) Redistribution and use in source and binary forms, with or without
Portions copyright (c) 2015-2016 go-ldap Authors modification, are permitted provided that the following conditions are
met:
Permission is hereby granted, free of charge, to any person obtaining a copy * Redistributions of source code must retain the above copyright
of this software and associated documentation files (the "Software"), to deal notice, this list of conditions and the following disclaimer.
in the Software without restriction, including without limitation the rights * Redistributions in binary form must reproduce the above
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copyright notice, this list of conditions and the following disclaimer
copies of the Software, and to permit persons to whom the Software is in the documentation and/or other materials provided with the
furnished to do so, subject to the following conditions: distribution.
* Neither the name of Google Inc. nor the names of its
contributors may be used to endorse or promote products derived from
this software without specific prior written permission.
The above copyright notice and this permission notice shall be included in all THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
copies or substantial portions of the Software. "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
SOFTWARE. (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.

View File

@@ -1,13 +0,0 @@
// +build go1.4
package ldap
import (
"sync/atomic"
)
// For compilers that support it, we just use the underlying sync/atomic.Value
// type.
type atomicValue struct {
atomic.Value
}

View File

@@ -1,28 +0,0 @@
// +build !go1.4
package ldap
import (
"sync"
)
// This is a helper type that emulates the use of the "sync/atomic.Value"
// struct that's available in Go 1.4 and up.
type atomicValue struct {
value interface{}
lock sync.RWMutex
}
func (av *atomicValue) Store(val interface{}) {
av.lock.Lock()
av.value = val
av.lock.Unlock()
}
func (av *atomicValue) Load() interface{} {
av.lock.RLock()
ret := av.value
av.lock.RUnlock()
return ret
}

69
vendor/gopkg.in/ldap.v2/conn.go generated vendored
View File

@@ -11,7 +11,6 @@ import (
"log" "log"
"net" "net"
"sync" "sync"
"sync/atomic"
"time" "time"
"gopkg.in/asn1-ber.v1" "gopkg.in/asn1-ber.v1"
@@ -83,18 +82,20 @@ const (
type Conn struct { type Conn struct {
conn net.Conn conn net.Conn
isTLS bool isTLS bool
closing uint32 isClosing bool
closeErr atomicValue closeErr error
isStartingTLS bool isStartingTLS bool
Debug debugging Debug debugging
chanConfirm chan struct{} chanConfirm chan bool
messageContexts map[int64]*messageContext messageContexts map[int64]*messageContext
chanMessage chan *messagePacket chanMessage chan *messagePacket
chanMessageID chan int64 chanMessageID chan int64
wgSender sync.WaitGroup
wgClose sync.WaitGroup wgClose sync.WaitGroup
once sync.Once
outstandingRequests uint outstandingRequests uint
messageMutex sync.Mutex messageMutex sync.Mutex
requestTimeout int64 requestTimeout time.Duration
} }
var _ Client = &Conn{} var _ Client = &Conn{}
@@ -141,7 +142,7 @@ func DialTLS(network, addr string, config *tls.Config) (*Conn, error) {
func NewConn(conn net.Conn, isTLS bool) *Conn { func NewConn(conn net.Conn, isTLS bool) *Conn {
return &Conn{ return &Conn{
conn: conn, conn: conn,
chanConfirm: make(chan struct{}), chanConfirm: make(chan bool),
chanMessageID: make(chan int64), chanMessageID: make(chan int64),
chanMessage: make(chan *messagePacket, 10), chanMessage: make(chan *messagePacket, 10),
messageContexts: map[int64]*messageContext{}, messageContexts: map[int64]*messageContext{},
@@ -157,22 +158,12 @@ func (l *Conn) Start() {
l.wgClose.Add(1) l.wgClose.Add(1)
} }
// isClosing returns whether or not we're currently closing.
func (l *Conn) isClosing() bool {
return atomic.LoadUint32(&l.closing) == 1
}
// setClosing sets the closing value to true
func (l *Conn) setClosing() bool {
return atomic.CompareAndSwapUint32(&l.closing, 0, 1)
}
// Close closes the connection. // Close closes the connection.
func (l *Conn) Close() { func (l *Conn) Close() {
l.messageMutex.Lock() l.once.Do(func() {
defer l.messageMutex.Unlock() l.isClosing = true
l.wgSender.Wait()
if l.setClosing() {
l.Debug.Printf("Sending quit message and waiting for confirmation") l.Debug.Printf("Sending quit message and waiting for confirmation")
l.chanMessage <- &messagePacket{Op: MessageQuit} l.chanMessage <- &messagePacket{Op: MessageQuit}
<-l.chanConfirm <-l.chanConfirm
@@ -180,26 +171,28 @@ func (l *Conn) Close() {
l.Debug.Printf("Closing network connection") l.Debug.Printf("Closing network connection")
if err := l.conn.Close(); err != nil { if err := l.conn.Close(); err != nil {
log.Println(err) log.Print(err)
} }
l.wgClose.Done() l.wgClose.Done()
} })
l.wgClose.Wait() l.wgClose.Wait()
} }
// SetTimeout sets the time after a request is sent that a MessageTimeout triggers // SetTimeout sets the time after a request is sent that a MessageTimeout triggers
func (l *Conn) SetTimeout(timeout time.Duration) { func (l *Conn) SetTimeout(timeout time.Duration) {
if timeout > 0 { if timeout > 0 {
atomic.StoreInt64(&l.requestTimeout, int64(timeout)) l.requestTimeout = timeout
} }
} }
// Returns the next available messageID // Returns the next available messageID
func (l *Conn) nextMessageID() int64 { func (l *Conn) nextMessageID() int64 {
if l.chanMessageID != nil {
if messageID, ok := <-l.chanMessageID; ok { if messageID, ok := <-l.chanMessageID; ok {
return messageID return messageID
} }
}
return 0 return 0
} }
@@ -265,7 +258,7 @@ func (l *Conn) sendMessage(packet *ber.Packet) (*messageContext, error) {
} }
func (l *Conn) sendMessageWithFlags(packet *ber.Packet, flags sendMessageFlags) (*messageContext, error) { func (l *Conn) sendMessageWithFlags(packet *ber.Packet, flags sendMessageFlags) (*messageContext, error) {
if l.isClosing() { if l.isClosing {
return nil, NewError(ErrorNetwork, errors.New("ldap: connection closed")) return nil, NewError(ErrorNetwork, errors.New("ldap: connection closed"))
} }
l.messageMutex.Lock() l.messageMutex.Lock()
@@ -304,7 +297,7 @@ func (l *Conn) sendMessageWithFlags(packet *ber.Packet, flags sendMessageFlags)
func (l *Conn) finishMessage(msgCtx *messageContext) { func (l *Conn) finishMessage(msgCtx *messageContext) {
close(msgCtx.done) close(msgCtx.done)
if l.isClosing() { if l.isClosing {
return return
} }
@@ -323,12 +316,12 @@ func (l *Conn) finishMessage(msgCtx *messageContext) {
} }
func (l *Conn) sendProcessMessage(message *messagePacket) bool { func (l *Conn) sendProcessMessage(message *messagePacket) bool {
l.messageMutex.Lock() if l.isClosing {
defer l.messageMutex.Unlock()
if l.isClosing() {
return false return false
} }
l.wgSender.Add(1)
l.chanMessage <- message l.chanMessage <- message
l.wgSender.Done()
return true return true
} }
@@ -340,14 +333,15 @@ func (l *Conn) processMessages() {
for messageID, msgCtx := range l.messageContexts { for messageID, msgCtx := range l.messageContexts {
// If we are closing due to an error, inform anyone who // If we are closing due to an error, inform anyone who
// is waiting about the error. // is waiting about the error.
if l.isClosing() && l.closeErr.Load() != nil { if l.isClosing && l.closeErr != nil {
msgCtx.sendResponse(&PacketResponse{Error: l.closeErr.Load().(error)}) msgCtx.sendResponse(&PacketResponse{Error: l.closeErr})
} }
l.Debug.Printf("Closing channel for MessageID %d", messageID) l.Debug.Printf("Closing channel for MessageID %d", messageID)
close(msgCtx.responses) close(msgCtx.responses)
delete(l.messageContexts, messageID) delete(l.messageContexts, messageID)
} }
close(l.chanMessageID) close(l.chanMessageID)
l.chanConfirm <- true
close(l.chanConfirm) close(l.chanConfirm)
}() }()
@@ -356,7 +350,11 @@ func (l *Conn) processMessages() {
select { select {
case l.chanMessageID <- messageID: case l.chanMessageID <- messageID:
messageID++ messageID++
case message := <-l.chanMessage: case message, ok := <-l.chanMessage:
if !ok {
l.Debug.Printf("Shutting down - message channel is closed")
return
}
switch message.Op { switch message.Op {
case MessageQuit: case MessageQuit:
l.Debug.Printf("Shutting down - quit message received") l.Debug.Printf("Shutting down - quit message received")
@@ -379,15 +377,14 @@ func (l *Conn) processMessages() {
l.messageContexts[message.MessageID] = message.Context l.messageContexts[message.MessageID] = message.Context
// Add timeout if defined // Add timeout if defined
requestTimeout := time.Duration(atomic.LoadInt64(&l.requestTimeout)) if l.requestTimeout > 0 {
if requestTimeout > 0 {
go func() { go func() {
defer func() { defer func() {
if err := recover(); err != nil { if err := recover(); err != nil {
log.Printf("ldap: recovered panic in RequestTimeout: %v", err) log.Printf("ldap: recovered panic in RequestTimeout: %v", err)
} }
}() }()
time.Sleep(requestTimeout) time.Sleep(l.requestTimeout)
timeoutMessage := &messagePacket{ timeoutMessage := &messagePacket{
Op: MessageTimeout, Op: MessageTimeout,
MessageID: message.MessageID, MessageID: message.MessageID,
@@ -400,7 +397,7 @@ func (l *Conn) processMessages() {
if msgCtx, ok := l.messageContexts[message.MessageID]; ok { if msgCtx, ok := l.messageContexts[message.MessageID]; ok {
msgCtx.sendResponse(&PacketResponse{message.Packet, nil}) msgCtx.sendResponse(&PacketResponse{message.Packet, nil})
} else { } else {
log.Printf("Received unexpected message %d, %v", message.MessageID, l.isClosing()) log.Printf("Received unexpected message %d, %v", message.MessageID, l.isClosing)
ber.PrintPacket(message.Packet) ber.PrintPacket(message.Packet)
} }
case MessageTimeout: case MessageTimeout:
@@ -442,8 +439,8 @@ func (l *Conn) reader() {
packet, err := ber.ReadPacket(l.conn) packet, err := ber.ReadPacket(l.conn)
if err != nil { if err != nil {
// A read error is expected here if we are closing the connection... // A read error is expected here if we are closing the connection...
if !l.isClosing() { if !l.isClosing {
l.closeErr.Store(fmt.Errorf("unable to read LDAP response packet: %s", err)) l.closeErr = fmt.Errorf("unable to read LDAP response packet: %s", err)
l.Debug.Printf("reader error: %s", err.Error()) l.Debug.Printf("reader error: %s", err.Error())
} }
return return

12
vendor/gopkg.in/ldap.v2/control.go generated vendored
View File

@@ -334,18 +334,18 @@ func DecodeControl(packet *ber.Packet) Control {
for _, child := range sequence.Children { for _, child := range sequence.Children {
if child.Tag == 0 { if child.Tag == 0 {
//Warning //Warning
warningPacket := child.Children[0] child := child.Children[0]
packet := ber.DecodePacket(warningPacket.Data.Bytes()) packet := ber.DecodePacket(child.Data.Bytes())
val, ok := packet.Value.(int64) val, ok := packet.Value.(int64)
if ok { if ok {
if warningPacket.Tag == 0 { if child.Tag == 0 {
//timeBeforeExpiration //timeBeforeExpiration
c.Expire = val c.Expire = val
warningPacket.Value = c.Expire child.Value = c.Expire
} else if warningPacket.Tag == 1 { } else if child.Tag == 1 {
//graceAuthNsRemaining //graceAuthNsRemaining
c.Grace = val c.Grace = val
warningPacket.Value = c.Grace child.Value = c.Grace
} }
} }
} else if child.Tag == 1 { } else if child.Tag == 1 {

2
vendor/gopkg.in/ldap.v2/debug.go generated vendored
View File

@@ -6,7 +6,7 @@ import (
"gopkg.in/asn1-ber.v1" "gopkg.in/asn1-ber.v1"
) )
// debugging type // debbuging type
// - has a Printf method to write the debug output // - has a Printf method to write the debug output
type debugging bool type debugging bool

103
vendor/gopkg.in/ldap.v2/dn.go generated vendored
View File

@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style // Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file. // license that can be found in the LICENSE file.
// //
// File contains DN parsing functionality // File contains DN parsing functionallity
// //
// https://tools.ietf.org/html/rfc4514 // https://tools.ietf.org/html/rfc4514
// //
@@ -52,7 +52,7 @@ import (
"fmt" "fmt"
"strings" "strings"
"gopkg.in/asn1-ber.v1" ber "gopkg.in/asn1-ber.v1"
) )
// AttributeTypeAndValue represents an attributeTypeAndValue from https://tools.ietf.org/html/rfc4514 // AttributeTypeAndValue represents an attributeTypeAndValue from https://tools.ietf.org/html/rfc4514
@@ -83,19 +83,9 @@ func ParseDN(str string) (*DN, error) {
attribute := new(AttributeTypeAndValue) attribute := new(AttributeTypeAndValue)
escaping := false escaping := false
unescapedTrailingSpaces := 0
stringFromBuffer := func() string {
s := buffer.String()
s = s[0 : len(s)-unescapedTrailingSpaces]
buffer.Reset()
unescapedTrailingSpaces = 0
return s
}
for i := 0; i < len(str); i++ { for i := 0; i < len(str); i++ {
char := str[i] char := str[i]
if escaping { if escaping {
unescapedTrailingSpaces = 0
escaping = false escaping = false
switch char { switch char {
case ' ', '"', '#', '+', ',', ';', '<', '=', '>', '\\': case ' ', '"', '#', '+', ',', ';', '<', '=', '>', '\\':
@@ -117,10 +107,10 @@ func ParseDN(str string) (*DN, error) {
buffer.WriteByte(dst[0]) buffer.WriteByte(dst[0])
i++ i++
} else if char == '\\' { } else if char == '\\' {
unescapedTrailingSpaces = 0
escaping = true escaping = true
} else if char == '=' { } else if char == '=' {
attribute.Type = stringFromBuffer() attribute.Type = buffer.String()
buffer.Reset()
// Special case: If the first character in the value is # the // Special case: If the first character in the value is # the
// following data is BER encoded so we can just fast forward // following data is BER encoded so we can just fast forward
// and decode. // and decode.
@@ -143,10 +133,7 @@ func ParseDN(str string) (*DN, error) {
} }
} else if char == ',' || char == '+' { } else if char == ',' || char == '+' {
// We're done with this RDN or value, push it // We're done with this RDN or value, push it
if len(attribute.Type) == 0 { attribute.Value = buffer.String()
return nil, errors.New("incomplete type, value pair")
}
attribute.Value = stringFromBuffer()
rdn.Attributes = append(rdn.Attributes, attribute) rdn.Attributes = append(rdn.Attributes, attribute)
attribute = new(AttributeTypeAndValue) attribute = new(AttributeTypeAndValue)
if char == ',' { if char == ',' {
@@ -154,17 +141,8 @@ func ParseDN(str string) (*DN, error) {
rdn = new(RelativeDN) rdn = new(RelativeDN)
rdn.Attributes = make([]*AttributeTypeAndValue, 0) rdn.Attributes = make([]*AttributeTypeAndValue, 0)
} }
} else if char == ' ' && buffer.Len() == 0 { buffer.Reset()
// ignore unescaped leading spaces
continue
} else { } else {
if char == ' ' {
// Track unescaped spaces in case they are trailing and we need to remove them
unescapedTrailingSpaces++
} else {
// Reset if we see a non-space char
unescapedTrailingSpaces = 0
}
buffer.WriteByte(char) buffer.WriteByte(char)
} }
} }
@@ -172,76 +150,9 @@ func ParseDN(str string) (*DN, error) {
if len(attribute.Type) == 0 { if len(attribute.Type) == 0 {
return nil, errors.New("DN ended with incomplete type, value pair") return nil, errors.New("DN ended with incomplete type, value pair")
} }
attribute.Value = stringFromBuffer() attribute.Value = buffer.String()
rdn.Attributes = append(rdn.Attributes, attribute) rdn.Attributes = append(rdn.Attributes, attribute)
dn.RDNs = append(dn.RDNs, rdn) dn.RDNs = append(dn.RDNs, rdn)
} }
return dn, nil return dn, nil
} }
// Equal returns true if the DNs are equal as defined by rfc4517 4.2.15 (distinguishedNameMatch).
// Returns true if they have the same number of relative distinguished names
// and corresponding relative distinguished names (by position) are the same.
func (d *DN) Equal(other *DN) bool {
if len(d.RDNs) != len(other.RDNs) {
return false
}
for i := range d.RDNs {
if !d.RDNs[i].Equal(other.RDNs[i]) {
return false
}
}
return true
}
// AncestorOf returns true if the other DN consists of at least one RDN followed by all the RDNs of the current DN.
// "ou=widgets,o=acme.com" is an ancestor of "ou=sprockets,ou=widgets,o=acme.com"
// "ou=widgets,o=acme.com" is not an ancestor of "ou=sprockets,ou=widgets,o=foo.com"
// "ou=widgets,o=acme.com" is not an ancestor of "ou=widgets,o=acme.com"
func (d *DN) AncestorOf(other *DN) bool {
if len(d.RDNs) >= len(other.RDNs) {
return false
}
// Take the last `len(d.RDNs)` RDNs from the other DN to compare against
otherRDNs := other.RDNs[len(other.RDNs)-len(d.RDNs):]
for i := range d.RDNs {
if !d.RDNs[i].Equal(otherRDNs[i]) {
return false
}
}
return true
}
// Equal returns true if the RelativeDNs are equal as defined by rfc4517 4.2.15 (distinguishedNameMatch).
// Relative distinguished names are the same if and only if they have the same number of AttributeTypeAndValues
// and each attribute of the first RDN is the same as the attribute of the second RDN with the same attribute type.
// The order of attributes is not significant.
// Case of attribute types is not significant.
func (r *RelativeDN) Equal(other *RelativeDN) bool {
if len(r.Attributes) != len(other.Attributes) {
return false
}
return r.hasAllAttributes(other.Attributes) && other.hasAllAttributes(r.Attributes)
}
func (r *RelativeDN) hasAllAttributes(attrs []*AttributeTypeAndValue) bool {
for _, attr := range attrs {
found := false
for _, myattr := range r.Attributes {
if myattr.Equal(attr) {
found = true
break
}
}
if !found {
return false
}
}
return true
}
// Equal returns true if the AttributeTypeAndValue is equivalent to the specified AttributeTypeAndValue
// Case of the attribute type is not significant
func (a *AttributeTypeAndValue) Equal(other *AttributeTypeAndValue) bool {
return strings.EqualFold(a.Type, other.Type) && a.Value == other.Value
}

7
vendor/gopkg.in/ldap.v2/error.go generated vendored
View File

@@ -97,13 +97,6 @@ var LDAPResultCodeMap = map[uint8]string{
LDAPResultObjectClassModsProhibited: "Object Class Mods Prohibited", LDAPResultObjectClassModsProhibited: "Object Class Mods Prohibited",
LDAPResultAffectsMultipleDSAs: "Affects Multiple DSAs", LDAPResultAffectsMultipleDSAs: "Affects Multiple DSAs",
LDAPResultOther: "Other", LDAPResultOther: "Other",
ErrorNetwork: "Network Error",
ErrorFilterCompile: "Filter Compile Error",
ErrorFilterDecompile: "Filter Decompile Error",
ErrorDebugging: "Debugging Error",
ErrorUnexpectedMessage: "Unexpected Message",
ErrorUnexpectedResponse: "Unexpected Response",
} }
func getLDAPResultCode(packet *ber.Packet) (code uint8, description string) { func getLDAPResultCode(packet *ber.Packet) (code uint8, description string) {

5
vendor/gopkg.in/ldap.v2/filter.go generated vendored
View File

@@ -82,10 +82,7 @@ func CompileFilter(filter string) (*ber.Packet, error) {
if err != nil { if err != nil {
return nil, err return nil, err
} }
switch { if pos != len(filter) {
case pos > len(filter):
return nil, NewError(ErrorFilterCompile, errors.New("ldap: unexpected end of filter"))
case pos < len(filter):
return nil, NewError(ErrorFilterCompile, errors.New("ldap: finished compiling filter with extra at end: "+fmt.Sprint(filter[pos:]))) return nil, NewError(ErrorFilterCompile, errors.New("ldap: finished compiling filter with extra at end: "+fmt.Sprint(filter[pos:])))
} }
return packet, nil return packet, nil

57
vendor/gopkg.in/ldap.v2/ldap.go generated vendored
View File

@@ -9,7 +9,7 @@ import (
"io/ioutil" "io/ioutil"
"os" "os"
"gopkg.in/asn1-ber.v1" ber "gopkg.in/asn1-ber.v1"
) )
// LDAP Application Codes // LDAP Application Codes
@@ -153,47 +153,16 @@ func addLDAPDescriptions(packet *ber.Packet) (err error) {
func addControlDescriptions(packet *ber.Packet) { func addControlDescriptions(packet *ber.Packet) {
packet.Description = "Controls" packet.Description = "Controls"
for _, child := range packet.Children { for _, child := range packet.Children {
var value *ber.Packet
controlType := ""
child.Description = "Control" child.Description = "Control"
switch len(child.Children) { child.Children[0].Description = "Control Type (" + ControlTypeMap[child.Children[0].Value.(string)] + ")"
case 0: value := child.Children[1]
// at least one child is required for control type if len(child.Children) == 3 {
continue
case 1:
// just type, no criticality or value
controlType = child.Children[0].Value.(string)
child.Children[0].Description = "Control Type (" + ControlTypeMap[controlType] + ")"
case 2:
controlType = child.Children[0].Value.(string)
child.Children[0].Description = "Control Type (" + ControlTypeMap[controlType] + ")"
// Children[1] could be criticality or value (both are optional)
// duck-type on whether this is a boolean
if _, ok := child.Children[1].Value.(bool); ok {
child.Children[1].Description = "Criticality" child.Children[1].Description = "Criticality"
} else {
child.Children[1].Description = "Control Value"
value = child.Children[1]
}
case 3:
// criticality and value present
controlType = child.Children[0].Value.(string)
child.Children[0].Description = "Control Type (" + ControlTypeMap[controlType] + ")"
child.Children[1].Description = "Criticality"
child.Children[2].Description = "Control Value"
value = child.Children[2] value = child.Children[2]
}
value.Description = "Control Value"
default: switch child.Children[0].Value.(string) {
// more than 3 children is invalid
continue
}
if value == nil {
continue
}
switch controlType {
case ControlTypePaging: case ControlTypePaging:
value.Description += " (Paging)" value.Description += " (Paging)"
if value.Value != nil { if value.Value != nil {
@@ -219,18 +188,18 @@ func addControlDescriptions(packet *ber.Packet) {
for _, child := range sequence.Children { for _, child := range sequence.Children {
if child.Tag == 0 { if child.Tag == 0 {
//Warning //Warning
warningPacket := child.Children[0] child := child.Children[0]
packet := ber.DecodePacket(warningPacket.Data.Bytes()) packet := ber.DecodePacket(child.Data.Bytes())
val, ok := packet.Value.(int64) val, ok := packet.Value.(int64)
if ok { if ok {
if warningPacket.Tag == 0 { if child.Tag == 0 {
//timeBeforeExpiration //timeBeforeExpiration
value.Description += " (TimeBeforeExpiration)" value.Description += " (TimeBeforeExpiration)"
warningPacket.Value = val child.Value = val
} else if warningPacket.Tag == 1 { } else if child.Tag == 1 {
//graceAuthNsRemaining //graceAuthNsRemaining
value.Description += " (GraceAuthNsRemaining)" value.Description += " (GraceAuthNsRemaining)"
warningPacket.Value = val child.Value = val
} }
} }
} else if child.Tag == 1 { } else if child.Tag == 1 {

View File

@@ -135,10 +135,10 @@ func (l *Conn) PasswordModify(passwordModifyRequest *PasswordModifyRequest) (*Pa
extendedResponse := packet.Children[1] extendedResponse := packet.Children[1]
for _, child := range extendedResponse.Children { for _, child := range extendedResponse.Children {
if child.Tag == 11 { if child.Tag == 11 {
passwordModifyResponseValue := ber.DecodePacket(child.Data.Bytes()) passwordModifyReponseValue := ber.DecodePacket(child.Data.Bytes())
if len(passwordModifyResponseValue.Children) == 1 { if len(passwordModifyReponseValue.Children) == 1 {
if passwordModifyResponseValue.Children[0].Tag == 0 { if passwordModifyReponseValue.Children[0].Tag == 0 {
result.GeneratedPassword = ber.DecodeString(passwordModifyResponseValue.Children[0].Data.Bytes()) result.GeneratedPassword = ber.DecodeString(passwordModifyReponseValue.Children[0].Data.Bytes())
} }
} }
} }