mirror of
				https://github.com/go-gitea/gitea.git
				synced 2025-10-27 00:23:41 +09:00 
			
		
		
		
	Graceful: Xorm, RepoIndexer, Cron and Others (#9282)
* Change graceful to use a singleton obtained through GetManager instead of a global. * Graceful: Make TestPullRequests shutdownable * Graceful: Make the cron tasks graceful * Graceful: AddTestPullRequest run in graceful ctx * Graceful: SyncMirrors shutdown * Graceful: SetDefaultContext for Xorm to be HammerContext * Avoid starting graceful for migrate commands and checkout * Graceful: DeliverHooks now can be shutdown * Fix multiple syncing errors in modules/sync/UniqueQueue & Make UniqueQueue closable * Begin the process of making the repo indexer shutdown gracefully
This commit is contained in:
		| @@ -5,6 +5,8 @@ | |||||||
| package cmd | package cmd | ||||||
|  |  | ||||||
| import ( | import ( | ||||||
|  | 	"context" | ||||||
|  |  | ||||||
| 	"code.gitea.io/gitea/models" | 	"code.gitea.io/gitea/models" | ||||||
| 	"code.gitea.io/gitea/models/migrations" | 	"code.gitea.io/gitea/models/migrations" | ||||||
| 	"code.gitea.io/gitea/modules/log" | 	"code.gitea.io/gitea/modules/log" | ||||||
| @@ -32,7 +34,7 @@ func runMigrate(ctx *cli.Context) error { | |||||||
| 	log.Trace("Log path: %s", setting.LogRootPath) | 	log.Trace("Log path: %s", setting.LogRootPath) | ||||||
| 	setting.InitDBConfig() | 	setting.InitDBConfig() | ||||||
|  |  | ||||||
| 	if err := models.NewEngine(migrations.Migrate); err != nil { | 	if err := models.NewEngine(context.Background(), migrations.Migrate); err != nil { | ||||||
| 		log.Fatal("Failed to initialize ORM engine: %v", err) | 		log.Fatal("Failed to initialize ORM engine: %v", err) | ||||||
| 		return err | 		return err | ||||||
| 	} | 	} | ||||||
|   | |||||||
							
								
								
									
										10
									
								
								cmd/web.go
									
									
									
									
									
								
							
							
						
						
									
										10
									
								
								cmd/web.go
									
									
									
									
									
								
							| @@ -5,6 +5,7 @@ | |||||||
| package cmd | package cmd | ||||||
|  |  | ||||||
| import ( | import ( | ||||||
|  | 	"context" | ||||||
| 	"fmt" | 	"fmt" | ||||||
| 	"net/http" | 	"net/http" | ||||||
| 	_ "net/http/pprof" // Used for debugging if enabled and a web server is running | 	_ "net/http/pprof" // Used for debugging if enabled and a web server is running | ||||||
| @@ -96,6 +97,10 @@ func runLetsEncryptFallbackHandler(w http.ResponseWriter, r *http.Request) { | |||||||
| } | } | ||||||
|  |  | ||||||
| func runWeb(ctx *cli.Context) error { | func runWeb(ctx *cli.Context) error { | ||||||
|  | 	managerCtx, cancel := context.WithCancel(context.Background()) | ||||||
|  | 	graceful.InitManager(managerCtx) | ||||||
|  | 	defer cancel() | ||||||
|  |  | ||||||
| 	if os.Getppid() > 1 && len(os.Getenv("LISTEN_FDS")) > 0 { | 	if os.Getppid() > 1 && len(os.Getenv("LISTEN_FDS")) > 0 { | ||||||
| 		log.Info("Restarting Gitea on PID: %d from parent PID: %d", os.Getpid(), os.Getppid()) | 		log.Info("Restarting Gitea on PID: %d from parent PID: %d", os.Getpid(), os.Getppid()) | ||||||
| 	} else { | 	} else { | ||||||
| @@ -108,7 +113,7 @@ func runWeb(ctx *cli.Context) error { | |||||||
| 	} | 	} | ||||||
|  |  | ||||||
| 	// Perform global initialization | 	// Perform global initialization | ||||||
| 	routers.GlobalInit() | 	routers.GlobalInit(graceful.GetManager().HammerContext()) | ||||||
|  |  | ||||||
| 	// Set up Macaron | 	// Set up Macaron | ||||||
| 	m := routes.NewMacaron() | 	m := routes.NewMacaron() | ||||||
| @@ -199,8 +204,7 @@ func runWeb(ctx *cli.Context) error { | |||||||
| 		log.Critical("Failed to start server: %v", err) | 		log.Critical("Failed to start server: %v", err) | ||||||
| 	} | 	} | ||||||
| 	log.Info("HTTP Listener: %s Closed", listenAddr) | 	log.Info("HTTP Listener: %s Closed", listenAddr) | ||||||
| 	graceful.Manager.WaitForServers() | 	<-graceful.GetManager().Done() | ||||||
| 	graceful.Manager.WaitForTerminate() |  | ||||||
| 	log.Info("PID: %d Gitea Web Finished", os.Getpid()) | 	log.Info("PID: %d Gitea Web Finished", os.Getpid()) | ||||||
| 	log.Close() | 	log.Close() | ||||||
| 	return nil | 	return nil | ||||||
|   | |||||||
| @@ -28,13 +28,13 @@ func runHTTPSWithTLSConfig(network, listenAddr string, tlsConfig *tls.Config, m | |||||||
|  |  | ||||||
| // NoHTTPRedirector tells our cleanup routine that we will not be using a fallback http redirector | // NoHTTPRedirector tells our cleanup routine that we will not be using a fallback http redirector | ||||||
| func NoHTTPRedirector() { | func NoHTTPRedirector() { | ||||||
| 	graceful.Manager.InformCleanup() | 	graceful.GetManager().InformCleanup() | ||||||
| } | } | ||||||
|  |  | ||||||
| // NoMainListener tells our cleanup routine that we will not be using a possibly provided listener | // NoMainListener tells our cleanup routine that we will not be using a possibly provided listener | ||||||
| // for our main HTTP/HTTPS service | // for our main HTTP/HTTPS service | ||||||
| func NoMainListener() { | func NoMainListener() { | ||||||
| 	graceful.Manager.InformCleanup() | 	graceful.GetManager().InformCleanup() | ||||||
| } | } | ||||||
|  |  | ||||||
| func runFCGI(network, listenAddr string, m http.Handler) error { | func runFCGI(network, listenAddr string, m http.Handler) error { | ||||||
|   | |||||||
| @@ -5,6 +5,7 @@ Checkout a PR and load the tests data into sqlite database | |||||||
| */ | */ | ||||||
|  |  | ||||||
| import ( | import ( | ||||||
|  | 	"context" | ||||||
| 	"flag" | 	"flag" | ||||||
| 	"fmt" | 	"fmt" | ||||||
| 	"io/ioutil" | 	"io/ioutil" | ||||||
| @@ -92,7 +93,7 @@ func runPR() { | |||||||
| 	//x, err = xorm.NewEngine("sqlite3", "file::memory:?cache=shared") | 	//x, err = xorm.NewEngine("sqlite3", "file::memory:?cache=shared") | ||||||
|  |  | ||||||
| 	var helper testfixtures.Helper = &testfixtures.SQLite{} | 	var helper testfixtures.Helper = &testfixtures.SQLite{} | ||||||
| 	models.NewEngine(func(_ *xorm.Engine) error { | 	models.NewEngine(context.Background(), func(_ *xorm.Engine) error { | ||||||
| 		return nil | 		return nil | ||||||
| 	}) | 	}) | ||||||
| 	models.HasEngine = true | 	models.HasEngine = true | ||||||
|   | |||||||
| @@ -5,6 +5,7 @@ | |||||||
| package integrations | package integrations | ||||||
|  |  | ||||||
| import ( | import ( | ||||||
|  | 	"context" | ||||||
| 	"net/http" | 	"net/http" | ||||||
| 	"os" | 	"os" | ||||||
| 	"strings" | 	"strings" | ||||||
| @@ -147,7 +148,7 @@ func TestLDAPUserSync(t *testing.T) { | |||||||
| 	} | 	} | ||||||
| 	defer prepareTestEnv(t)() | 	defer prepareTestEnv(t)() | ||||||
| 	addAuthSourceLDAP(t, "") | 	addAuthSourceLDAP(t, "") | ||||||
| 	models.SyncExternalUsers() | 	models.SyncExternalUsers(context.Background()) | ||||||
|  |  | ||||||
| 	session := loginUser(t, "user1") | 	session := loginUser(t, "user1") | ||||||
| 	// Check if users exists | 	// Check if users exists | ||||||
| @@ -206,7 +207,8 @@ func TestLDAPUserSSHKeySync(t *testing.T) { | |||||||
| 	} | 	} | ||||||
| 	defer prepareTestEnv(t)() | 	defer prepareTestEnv(t)() | ||||||
| 	addAuthSourceLDAP(t, "sshPublicKey") | 	addAuthSourceLDAP(t, "sshPublicKey") | ||||||
| 	models.SyncExternalUsers() |  | ||||||
|  | 	models.SyncExternalUsers(context.Background()) | ||||||
|  |  | ||||||
| 	// Check if users has SSH keys synced | 	// Check if users has SSH keys synced | ||||||
| 	for _, u := range gitLDAPUsers { | 	for _, u := range gitLDAPUsers { | ||||||
|   | |||||||
| @@ -6,6 +6,7 @@ package integrations | |||||||
|  |  | ||||||
| import ( | import ( | ||||||
| 	"bytes" | 	"bytes" | ||||||
|  | 	"context" | ||||||
| 	"database/sql" | 	"database/sql" | ||||||
| 	"encoding/json" | 	"encoding/json" | ||||||
| 	"fmt" | 	"fmt" | ||||||
| @@ -24,6 +25,7 @@ import ( | |||||||
|  |  | ||||||
| 	"code.gitea.io/gitea/models" | 	"code.gitea.io/gitea/models" | ||||||
| 	"code.gitea.io/gitea/modules/base" | 	"code.gitea.io/gitea/modules/base" | ||||||
|  | 	"code.gitea.io/gitea/modules/graceful" | ||||||
| 	"code.gitea.io/gitea/modules/setting" | 	"code.gitea.io/gitea/modules/setting" | ||||||
| 	"code.gitea.io/gitea/routers" | 	"code.gitea.io/gitea/routers" | ||||||
| 	"code.gitea.io/gitea/routers/routes" | 	"code.gitea.io/gitea/routers/routes" | ||||||
| @@ -55,6 +57,10 @@ func NewNilResponseRecorder() *NilResponseRecorder { | |||||||
| } | } | ||||||
|  |  | ||||||
| func TestMain(m *testing.M) { | func TestMain(m *testing.M) { | ||||||
|  | 	managerCtx, cancel := context.WithCancel(context.Background()) | ||||||
|  | 	graceful.InitManager(managerCtx) | ||||||
|  | 	defer cancel() | ||||||
|  |  | ||||||
| 	initIntegrationTest() | 	initIntegrationTest() | ||||||
| 	mac = routes.NewMacaron() | 	mac = routes.NewMacaron() | ||||||
| 	routes.RegisterRoutes(mac) | 	routes.RegisterRoutes(mac) | ||||||
| @@ -171,7 +177,7 @@ func initIntegrationTest() { | |||||||
| 		} | 		} | ||||||
| 		defer db.Close() | 		defer db.Close() | ||||||
| 	} | 	} | ||||||
| 	routers.GlobalInit() | 	routers.GlobalInit(graceful.GetManager().HammerContext()) | ||||||
| } | } | ||||||
|  |  | ||||||
| func prepareTestEnv(t testing.TB, skip ...int) func() { | func prepareTestEnv(t testing.TB, skip ...int) func() { | ||||||
|   | |||||||
| @@ -6,6 +6,7 @@ package migrations | |||||||
|  |  | ||||||
| import ( | import ( | ||||||
| 	"compress/gzip" | 	"compress/gzip" | ||||||
|  | 	"context" | ||||||
| 	"database/sql" | 	"database/sql" | ||||||
| 	"fmt" | 	"fmt" | ||||||
| 	"io/ioutil" | 	"io/ioutil" | ||||||
| @@ -220,7 +221,7 @@ func doMigrationTest(t *testing.T, version string) { | |||||||
| 	err := models.SetEngine() | 	err := models.SetEngine() | ||||||
| 	assert.NoError(t, err) | 	assert.NoError(t, err) | ||||||
|  |  | ||||||
| 	err = models.NewEngine(wrappedMigrate) | 	err = models.NewEngine(context.Background(), wrappedMigrate) | ||||||
| 	assert.NoError(t, err) | 	assert.NoError(t, err) | ||||||
| 	currentEngine.Close() | 	currentEngine.Close() | ||||||
| } | } | ||||||
|   | |||||||
| @@ -5,6 +5,7 @@ | |||||||
| package models | package models | ||||||
|  |  | ||||||
| import ( | import ( | ||||||
|  | 	"context" | ||||||
| 	"fmt" | 	"fmt" | ||||||
| 	"time" | 	"time" | ||||||
|  |  | ||||||
| @@ -525,7 +526,8 @@ func (deletedBranch *DeletedBranch) LoadUser() { | |||||||
| } | } | ||||||
|  |  | ||||||
| // RemoveOldDeletedBranches removes old deleted branches | // RemoveOldDeletedBranches removes old deleted branches | ||||||
| func RemoveOldDeletedBranches() { | func RemoveOldDeletedBranches(ctx context.Context) { | ||||||
|  | 	// Nothing to do for shutdown or terminate | ||||||
| 	log.Trace("Doing: DeletedBranchesCleanup") | 	log.Trace("Doing: DeletedBranchesCleanup") | ||||||
|  |  | ||||||
| 	deleteBefore := time.Now().Add(-setting.Cron.DeletedBranchesCleanup.OlderThan) | 	deleteBefore := time.Now().Add(-setting.Cron.DeletedBranchesCleanup.OlderThan) | ||||||
|   | |||||||
| @@ -6,6 +6,7 @@ | |||||||
| package models | package models | ||||||
|  |  | ||||||
| import ( | import ( | ||||||
|  | 	"context" | ||||||
| 	"database/sql" | 	"database/sql" | ||||||
| 	"errors" | 	"errors" | ||||||
| 	"fmt" | 	"fmt" | ||||||
| @@ -164,11 +165,13 @@ func SetEngine() (err error) { | |||||||
| } | } | ||||||
|  |  | ||||||
| // NewEngine initializes a new xorm.Engine | // NewEngine initializes a new xorm.Engine | ||||||
| func NewEngine(migrateFunc func(*xorm.Engine) error) (err error) { | func NewEngine(ctx context.Context, migrateFunc func(*xorm.Engine) error) (err error) { | ||||||
| 	if err = SetEngine(); err != nil { | 	if err = SetEngine(); err != nil { | ||||||
| 		return err | 		return err | ||||||
| 	} | 	} | ||||||
|  |  | ||||||
|  | 	x.SetDefaultContext(ctx) | ||||||
|  |  | ||||||
| 	if err = x.Ping(); err != nil { | 	if err = x.Ping(); err != nil { | ||||||
| 		return err | 		return err | ||||||
| 	} | 	} | ||||||
|   | |||||||
| @@ -68,11 +68,12 @@ func GetUnmergedPullRequestsByBaseInfo(repoID int64, branch string) ([]*PullRequ | |||||||
| 		Find(&prs) | 		Find(&prs) | ||||||
| } | } | ||||||
|  |  | ||||||
| // GetPullRequestsByCheckStatus returns all pull requests according the special checking status. | // GetPullRequestIDsByCheckStatus returns all pull requests according the special checking status. | ||||||
| func GetPullRequestsByCheckStatus(status PullRequestStatus) ([]*PullRequest, error) { | func GetPullRequestIDsByCheckStatus(status PullRequestStatus) ([]int64, error) { | ||||||
| 	prs := make([]*PullRequest, 0, 10) | 	prs := make([]int64, 0, 10) | ||||||
| 	return prs, x. | 	return prs, x.Table("pull_request"). | ||||||
| 		Where("status=?", status). | 		Where("status=?", status). | ||||||
|  | 		Cols("pull_request.id"). | ||||||
| 		Find(&prs) | 		Find(&prs) | ||||||
| } | } | ||||||
|  |  | ||||||
|   | |||||||
| @@ -7,6 +7,7 @@ package models | |||||||
|  |  | ||||||
| import ( | import ( | ||||||
| 	"bytes" | 	"bytes" | ||||||
|  | 	"context" | ||||||
| 	"crypto/md5" | 	"crypto/md5" | ||||||
| 	"errors" | 	"errors" | ||||||
| 	"fmt" | 	"fmt" | ||||||
| @@ -2098,19 +2099,27 @@ func DeleteRepositoryArchives() error { | |||||||
| } | } | ||||||
|  |  | ||||||
| // DeleteOldRepositoryArchives deletes old repository archives. | // DeleteOldRepositoryArchives deletes old repository archives. | ||||||
| func DeleteOldRepositoryArchives() { | func DeleteOldRepositoryArchives(ctx context.Context) { | ||||||
| 	log.Trace("Doing: ArchiveCleanup") | 	log.Trace("Doing: ArchiveCleanup") | ||||||
|  |  | ||||||
| 	if err := x.Where("id > 0").Iterate(new(Repository), deleteOldRepositoryArchives); err != nil { | 	if err := x.Where("id > 0").Iterate(new(Repository), func(idx int, bean interface{}) error { | ||||||
|  | 		return deleteOldRepositoryArchives(ctx, idx, bean) | ||||||
|  | 	}); err != nil { | ||||||
| 		log.Error("ArchiveClean: %v", err) | 		log.Error("ArchiveClean: %v", err) | ||||||
| 	} | 	} | ||||||
| } | } | ||||||
|  |  | ||||||
| func deleteOldRepositoryArchives(idx int, bean interface{}) error { | func deleteOldRepositoryArchives(ctx context.Context, idx int, bean interface{}) error { | ||||||
| 	repo := bean.(*Repository) | 	repo := bean.(*Repository) | ||||||
| 	basePath := filepath.Join(repo.RepoPath(), "archives") | 	basePath := filepath.Join(repo.RepoPath(), "archives") | ||||||
|  |  | ||||||
| 	for _, ty := range []string{"zip", "targz"} { | 	for _, ty := range []string{"zip", "targz"} { | ||||||
|  | 		select { | ||||||
|  | 		case <-ctx.Done(): | ||||||
|  | 			return fmt.Errorf("Aborted due to shutdown:\nin delete of old repository archives %v\nat delete file %s", repo, ty) | ||||||
|  | 		default: | ||||||
|  | 		} | ||||||
|  |  | ||||||
| 		path := filepath.Join(basePath, ty) | 		path := filepath.Join(basePath, ty) | ||||||
| 		file, err := os.Open(path) | 		file, err := os.Open(path) | ||||||
| 		if err != nil { | 		if err != nil { | ||||||
| @@ -2133,6 +2142,11 @@ func deleteOldRepositoryArchives(idx int, bean interface{}) error { | |||||||
| 		minimumOldestTime := time.Now().Add(-setting.Cron.ArchiveCleanup.OlderThan) | 		minimumOldestTime := time.Now().Add(-setting.Cron.ArchiveCleanup.OlderThan) | ||||||
| 		for _, info := range files { | 		for _, info := range files { | ||||||
| 			if info.ModTime().Before(minimumOldestTime) && !info.IsDir() { | 			if info.ModTime().Before(minimumOldestTime) && !info.IsDir() { | ||||||
|  | 				select { | ||||||
|  | 				case <-ctx.Done(): | ||||||
|  | 					return fmt.Errorf("Aborted due to shutdown:\nin delete of old repository archives %v\nat delete file %s - %s", repo, ty, info.Name()) | ||||||
|  | 				default: | ||||||
|  | 				} | ||||||
| 				toDelete := filepath.Join(path, info.Name()) | 				toDelete := filepath.Join(path, info.Name()) | ||||||
| 				// This is a best-effort purge, so we do not check error codes to confirm removal. | 				// This is a best-effort purge, so we do not check error codes to confirm removal. | ||||||
| 				if err = os.Remove(toDelete); err != nil { | 				if err = os.Remove(toDelete); err != nil { | ||||||
| @@ -2226,13 +2240,17 @@ func SyncRepositoryHooks() error { | |||||||
| } | } | ||||||
|  |  | ||||||
| // GitFsck calls 'git fsck' to check repository health. | // GitFsck calls 'git fsck' to check repository health. | ||||||
| func GitFsck() { | func GitFsck(ctx context.Context) { | ||||||
| 	log.Trace("Doing: GitFsck") | 	log.Trace("Doing: GitFsck") | ||||||
|  |  | ||||||
| 	if err := x. | 	if err := x. | ||||||
| 		Where("id>0 AND is_fsck_enabled=?", true).BufferSize(setting.Database.IterateBufferSize). | 		Where("id>0 AND is_fsck_enabled=?", true).BufferSize(setting.Database.IterateBufferSize). | ||||||
| 		Iterate(new(Repository), | 		Iterate(new(Repository), | ||||||
| 			func(idx int, bean interface{}) error { | 			func(idx int, bean interface{}) error { | ||||||
|  | 				select { | ||||||
|  | 				case <-ctx.Done(): | ||||||
|  | 					return fmt.Errorf("Aborted due to shutdown") | ||||||
|  | 				default: | ||||||
|  | 				} | ||||||
| 				repo := bean.(*Repository) | 				repo := bean.(*Repository) | ||||||
| 				repoPath := repo.RepoPath() | 				repoPath := repo.RepoPath() | ||||||
| 				log.Trace("Running health check on repository %s", repoPath) | 				log.Trace("Running health check on repository %s", repoPath) | ||||||
| @@ -2278,13 +2296,19 @@ type repoChecker struct { | |||||||
| 	desc                 string | 	desc                 string | ||||||
| } | } | ||||||
|  |  | ||||||
| func repoStatsCheck(checker *repoChecker) { | func repoStatsCheck(ctx context.Context, checker *repoChecker) { | ||||||
| 	results, err := x.Query(checker.querySQL) | 	results, err := x.Query(checker.querySQL) | ||||||
| 	if err != nil { | 	if err != nil { | ||||||
| 		log.Error("Select %s: %v", checker.desc, err) | 		log.Error("Select %s: %v", checker.desc, err) | ||||||
| 		return | 		return | ||||||
| 	} | 	} | ||||||
| 	for _, result := range results { | 	for _, result := range results { | ||||||
|  | 		select { | ||||||
|  | 		case <-ctx.Done(): | ||||||
|  | 			log.Warn("CheckRepoStats: Aborting due to shutdown") | ||||||
|  | 			return | ||||||
|  | 		default: | ||||||
|  | 		} | ||||||
| 		id := com.StrTo(result["id"]).MustInt64() | 		id := com.StrTo(result["id"]).MustInt64() | ||||||
| 		log.Trace("Updating %s: %d", checker.desc, id) | 		log.Trace("Updating %s: %d", checker.desc, id) | ||||||
| 		_, err = x.Exec(checker.correctSQL, id, id) | 		_, err = x.Exec(checker.correctSQL, id, id) | ||||||
| @@ -2295,7 +2319,7 @@ func repoStatsCheck(checker *repoChecker) { | |||||||
| } | } | ||||||
|  |  | ||||||
| // CheckRepoStats checks the repository stats | // CheckRepoStats checks the repository stats | ||||||
| func CheckRepoStats() { | func CheckRepoStats(ctx context.Context) { | ||||||
| 	log.Trace("Doing: CheckRepoStats") | 	log.Trace("Doing: CheckRepoStats") | ||||||
|  |  | ||||||
| 	checkers := []*repoChecker{ | 	checkers := []*repoChecker{ | ||||||
| @@ -2331,7 +2355,13 @@ func CheckRepoStats() { | |||||||
| 		}, | 		}, | ||||||
| 	} | 	} | ||||||
| 	for i := range checkers { | 	for i := range checkers { | ||||||
| 		repoStatsCheck(checkers[i]) | 		select { | ||||||
|  | 		case <-ctx.Done(): | ||||||
|  | 			log.Warn("CheckRepoStats: Aborting due to shutdown") | ||||||
|  | 			return | ||||||
|  | 		default: | ||||||
|  | 			repoStatsCheck(ctx, checkers[i]) | ||||||
|  | 		} | ||||||
| 	} | 	} | ||||||
|  |  | ||||||
| 	// ***** START: Repository.NumClosedIssues ***** | 	// ***** START: Repository.NumClosedIssues ***** | ||||||
| @@ -2341,6 +2371,12 @@ func CheckRepoStats() { | |||||||
| 		log.Error("Select %s: %v", desc, err) | 		log.Error("Select %s: %v", desc, err) | ||||||
| 	} else { | 	} else { | ||||||
| 		for _, result := range results { | 		for _, result := range results { | ||||||
|  | 			select { | ||||||
|  | 			case <-ctx.Done(): | ||||||
|  | 				log.Warn("CheckRepoStats: Aborting due to shutdown") | ||||||
|  | 				return | ||||||
|  | 			default: | ||||||
|  | 			} | ||||||
| 			id := com.StrTo(result["id"]).MustInt64() | 			id := com.StrTo(result["id"]).MustInt64() | ||||||
| 			log.Trace("Updating %s: %d", desc, id) | 			log.Trace("Updating %s: %d", desc, id) | ||||||
| 			_, err = x.Exec("UPDATE `repository` SET num_closed_issues=(SELECT COUNT(*) FROM `issue` WHERE repo_id=? AND is_closed=? AND is_pull=?) WHERE id=?", id, true, false, id) | 			_, err = x.Exec("UPDATE `repository` SET num_closed_issues=(SELECT COUNT(*) FROM `issue` WHERE repo_id=? AND is_closed=? AND is_pull=?) WHERE id=?", id, true, false, id) | ||||||
| @@ -2358,6 +2394,12 @@ func CheckRepoStats() { | |||||||
| 		log.Error("Select %s: %v", desc, err) | 		log.Error("Select %s: %v", desc, err) | ||||||
| 	} else { | 	} else { | ||||||
| 		for _, result := range results { | 		for _, result := range results { | ||||||
|  | 			select { | ||||||
|  | 			case <-ctx.Done(): | ||||||
|  | 				log.Warn("CheckRepoStats: Aborting due to shutdown") | ||||||
|  | 				return | ||||||
|  | 			default: | ||||||
|  | 			} | ||||||
| 			id := com.StrTo(result["id"]).MustInt64() | 			id := com.StrTo(result["id"]).MustInt64() | ||||||
| 			log.Trace("Updating %s: %d", desc, id) | 			log.Trace("Updating %s: %d", desc, id) | ||||||
| 			_, err = x.Exec("UPDATE `repository` SET num_closed_pulls=(SELECT COUNT(*) FROM `issue` WHERE repo_id=? AND is_closed=? AND is_pull=?) WHERE id=?", id, true, true, id) | 			_, err = x.Exec("UPDATE `repository` SET num_closed_pulls=(SELECT COUNT(*) FROM `issue` WHERE repo_id=? AND is_closed=? AND is_pull=?) WHERE id=?", id, true, true, id) | ||||||
| @@ -2375,6 +2417,12 @@ func CheckRepoStats() { | |||||||
| 		log.Error("Select repository count 'num_forks': %v", err) | 		log.Error("Select repository count 'num_forks': %v", err) | ||||||
| 	} else { | 	} else { | ||||||
| 		for _, result := range results { | 		for _, result := range results { | ||||||
|  | 			select { | ||||||
|  | 			case <-ctx.Done(): | ||||||
|  | 				log.Warn("CheckRepoStats: Aborting due to shutdown") | ||||||
|  | 				return | ||||||
|  | 			default: | ||||||
|  | 			} | ||||||
| 			id := com.StrTo(result["id"]).MustInt64() | 			id := com.StrTo(result["id"]).MustInt64() | ||||||
| 			log.Trace("Updating repository count 'num_forks': %d", id) | 			log.Trace("Updating repository count 'num_forks': %d", id) | ||||||
|  |  | ||||||
|   | |||||||
| @@ -4,6 +4,12 @@ | |||||||
|  |  | ||||||
| package models | package models | ||||||
|  |  | ||||||
|  | import ( | ||||||
|  | 	"fmt" | ||||||
|  |  | ||||||
|  | 	"xorm.io/builder" | ||||||
|  | ) | ||||||
|  |  | ||||||
| // RepoIndexerStatus status of a repo's entry in the repo indexer | // RepoIndexerStatus status of a repo's entry in the repo indexer | ||||||
| // For now, implicitly refers to default branch | // For now, implicitly refers to default branch | ||||||
| type RepoIndexerStatus struct { | type RepoIndexerStatus struct { | ||||||
| @@ -12,6 +18,31 @@ type RepoIndexerStatus struct { | |||||||
| 	CommitSha string `xorm:"VARCHAR(40)"` | 	CommitSha string `xorm:"VARCHAR(40)"` | ||||||
| } | } | ||||||
|  |  | ||||||
|  | // GetUnindexedRepos returns repos which do not have an indexer status | ||||||
|  | func GetUnindexedRepos(maxRepoID int64, page, pageSize int) ([]int64, error) { | ||||||
|  | 	ids := make([]int64, 0, 50) | ||||||
|  | 	cond := builder.Cond(builder.IsNull{ | ||||||
|  | 		"repo_indexer_status.id", | ||||||
|  | 	}) | ||||||
|  | 	sess := x.Table("repository").Join("LEFT OUTER", "repo_indexer_status", "repository.id = repo_indexer_status.repo_id") | ||||||
|  | 	if maxRepoID > 0 { | ||||||
|  | 		cond = builder.And(cond, builder.Lte{ | ||||||
|  | 			"repository.id": maxRepoID, | ||||||
|  | 		}) | ||||||
|  | 	} | ||||||
|  | 	if page >= 0 && pageSize > 0 { | ||||||
|  | 		start := 0 | ||||||
|  | 		if page > 0 { | ||||||
|  | 			start = (page - 1) * pageSize | ||||||
|  | 		} | ||||||
|  | 		sess.Limit(pageSize, start) | ||||||
|  | 	} | ||||||
|  |  | ||||||
|  | 	sess.Where(cond).Cols("repository.id").Desc("repository.id") | ||||||
|  | 	err := sess.Find(&ids) | ||||||
|  | 	return ids, err | ||||||
|  | } | ||||||
|  |  | ||||||
| // GetIndexerStatus loads repo codes indxer status | // GetIndexerStatus loads repo codes indxer status | ||||||
| func (repo *Repository) GetIndexerStatus() error { | func (repo *Repository) GetIndexerStatus() error { | ||||||
| 	if repo.IndexerStatus != nil { | 	if repo.IndexerStatus != nil { | ||||||
| @@ -31,15 +62,21 @@ func (repo *Repository) GetIndexerStatus() error { | |||||||
| // UpdateIndexerStatus updates indexer status | // UpdateIndexerStatus updates indexer status | ||||||
| func (repo *Repository) UpdateIndexerStatus(sha string) error { | func (repo *Repository) UpdateIndexerStatus(sha string) error { | ||||||
| 	if err := repo.GetIndexerStatus(); err != nil { | 	if err := repo.GetIndexerStatus(); err != nil { | ||||||
| 		return err | 		return fmt.Errorf("UpdateIndexerStatus: Unable to getIndexerStatus for repo: %s/%s Error: %v", repo.MustOwnerName(), repo.Name, err) | ||||||
| 	} | 	} | ||||||
| 	if len(repo.IndexerStatus.CommitSha) == 0 { | 	if len(repo.IndexerStatus.CommitSha) == 0 { | ||||||
| 		repo.IndexerStatus.CommitSha = sha | 		repo.IndexerStatus.CommitSha = sha | ||||||
| 		_, err := x.Insert(repo.IndexerStatus) | 		_, err := x.Insert(repo.IndexerStatus) | ||||||
| 		return err | 		if err != nil { | ||||||
|  | 			return fmt.Errorf("UpdateIndexerStatus: Unable to insert repoIndexerStatus for repo: %s/%s Sha: %s Error: %v", repo.MustOwnerName(), repo.Name, sha, err) | ||||||
|  | 		} | ||||||
|  | 		return nil | ||||||
| 	} | 	} | ||||||
| 	repo.IndexerStatus.CommitSha = sha | 	repo.IndexerStatus.CommitSha = sha | ||||||
| 	_, err := x.ID(repo.IndexerStatus.ID).Cols("commit_sha"). | 	_, err := x.ID(repo.IndexerStatus.ID).Cols("commit_sha"). | ||||||
| 		Update(repo.IndexerStatus) | 		Update(repo.IndexerStatus) | ||||||
| 	return err | 	if err != nil { | ||||||
|  | 		return fmt.Errorf("UpdateIndexerStatus: Unable to update repoIndexerStatus for repo: %s/%s Sha: %s Error: %v", repo.MustOwnerName(), repo.Name, sha, err) | ||||||
|  | 	} | ||||||
|  | 	return nil | ||||||
| } | } | ||||||
|   | |||||||
| @@ -7,6 +7,7 @@ package models | |||||||
|  |  | ||||||
| import ( | import ( | ||||||
| 	"container/list" | 	"container/list" | ||||||
|  | 	"context" | ||||||
| 	"crypto/md5" | 	"crypto/md5" | ||||||
| 	"crypto/sha256" | 	"crypto/sha256" | ||||||
| 	"crypto/subtle" | 	"crypto/subtle" | ||||||
| @@ -1695,7 +1696,7 @@ func synchronizeLdapSSHPublicKeys(usr *User, s *LoginSource, sshPublicKeys []str | |||||||
| } | } | ||||||
|  |  | ||||||
| // SyncExternalUsers is used to synchronize users with external authorization source | // SyncExternalUsers is used to synchronize users with external authorization source | ||||||
| func SyncExternalUsers() { | func SyncExternalUsers(ctx context.Context) { | ||||||
| 	log.Trace("Doing: SyncExternalUsers") | 	log.Trace("Doing: SyncExternalUsers") | ||||||
|  |  | ||||||
| 	ls, err := LoginSources() | 	ls, err := LoginSources() | ||||||
| @@ -1710,6 +1711,12 @@ func SyncExternalUsers() { | |||||||
| 		if !s.IsActived || !s.IsSyncEnabled { | 		if !s.IsActived || !s.IsSyncEnabled { | ||||||
| 			continue | 			continue | ||||||
| 		} | 		} | ||||||
|  | 		select { | ||||||
|  | 		case <-ctx.Done(): | ||||||
|  | 			log.Warn("SyncExternalUsers: Aborted due to shutdown before update of %s", s.Name) | ||||||
|  | 			return | ||||||
|  | 		default: | ||||||
|  | 		} | ||||||
|  |  | ||||||
| 		if s.IsLDAP() { | 		if s.IsLDAP() { | ||||||
| 			log.Trace("Doing: SyncExternalUsers[%s]", s.Name) | 			log.Trace("Doing: SyncExternalUsers[%s]", s.Name) | ||||||
| @@ -1727,6 +1734,12 @@ func SyncExternalUsers() { | |||||||
| 				log.Error("SyncExternalUsers: %v", err) | 				log.Error("SyncExternalUsers: %v", err) | ||||||
| 				return | 				return | ||||||
| 			} | 			} | ||||||
|  | 			select { | ||||||
|  | 			case <-ctx.Done(): | ||||||
|  | 				log.Warn("SyncExternalUsers: Aborted due to shutdown before update of %s", s.Name) | ||||||
|  | 				return | ||||||
|  | 			default: | ||||||
|  | 			} | ||||||
|  |  | ||||||
| 			sr, err := s.LDAP().SearchEntries() | 			sr, err := s.LDAP().SearchEntries() | ||||||
| 			if err != nil { | 			if err != nil { | ||||||
| @@ -1735,6 +1748,19 @@ func SyncExternalUsers() { | |||||||
| 			} | 			} | ||||||
|  |  | ||||||
| 			for _, su := range sr { | 			for _, su := range sr { | ||||||
|  | 				select { | ||||||
|  | 				case <-ctx.Done(): | ||||||
|  | 					log.Warn("SyncExternalUsers: Aborted due to shutdown at update of %s before completed update of users", s.Name) | ||||||
|  | 					// Rewrite authorized_keys file if LDAP Public SSH Key attribute is set and any key was added or removed | ||||||
|  | 					if sshKeysNeedUpdate { | ||||||
|  | 						err = RewriteAllPublicKeys() | ||||||
|  | 						if err != nil { | ||||||
|  | 							log.Error("RewriteAllPublicKeys: %v", err) | ||||||
|  | 						} | ||||||
|  | 					} | ||||||
|  | 					return | ||||||
|  | 				default: | ||||||
|  | 				} | ||||||
| 				if len(su.Username) == 0 { | 				if len(su.Username) == 0 { | ||||||
| 					continue | 					continue | ||||||
| 				} | 				} | ||||||
| @@ -1819,6 +1845,13 @@ func SyncExternalUsers() { | |||||||
| 				} | 				} | ||||||
| 			} | 			} | ||||||
|  |  | ||||||
|  | 			select { | ||||||
|  | 			case <-ctx.Done(): | ||||||
|  | 				log.Warn("SyncExternalUsers: Aborted due to shutdown at update of %s before delete users", s.Name) | ||||||
|  | 				return | ||||||
|  | 			default: | ||||||
|  | 			} | ||||||
|  |  | ||||||
| 			// Deactivate users not present in LDAP | 			// Deactivate users not present in LDAP | ||||||
| 			if updateExisting { | 			if updateExisting { | ||||||
| 				for _, usr := range users { | 				for _, usr := range users { | ||||||
|   | |||||||
| @@ -6,9 +6,11 @@ | |||||||
| package cron | package cron | ||||||
|  |  | ||||||
| import ( | import ( | ||||||
|  | 	"context" | ||||||
| 	"time" | 	"time" | ||||||
|  |  | ||||||
| 	"code.gitea.io/gitea/models" | 	"code.gitea.io/gitea/models" | ||||||
|  | 	"code.gitea.io/gitea/modules/graceful" | ||||||
| 	"code.gitea.io/gitea/modules/log" | 	"code.gitea.io/gitea/modules/log" | ||||||
| 	"code.gitea.io/gitea/modules/migrations" | 	"code.gitea.io/gitea/modules/migrations" | ||||||
| 	"code.gitea.io/gitea/modules/setting" | 	"code.gitea.io/gitea/modules/setting" | ||||||
| @@ -37,17 +39,19 @@ var taskStatusTable = sync.NewStatusTable() | |||||||
| type Func func() | type Func func() | ||||||
|  |  | ||||||
| // WithUnique wrap a cron func with an unique running check | // WithUnique wrap a cron func with an unique running check | ||||||
| func WithUnique(name string, body Func) Func { | func WithUnique(name string, body func(context.Context)) Func { | ||||||
| 	return func() { | 	return func() { | ||||||
| 		if !taskStatusTable.StartIfNotRunning(name) { | 		if !taskStatusTable.StartIfNotRunning(name) { | ||||||
| 			return | 			return | ||||||
| 		} | 		} | ||||||
| 		defer taskStatusTable.Stop(name) | 		defer taskStatusTable.Stop(name) | ||||||
| 		body() | 		graceful.GetManager().RunWithShutdownContext(body) | ||||||
| 	} | 	} | ||||||
| } | } | ||||||
|  |  | ||||||
| // NewContext begins cron tasks | // NewContext begins cron tasks | ||||||
|  | // Each cron task is run within the shutdown context as a running server | ||||||
|  | // AtShutdown the cron server is stopped | ||||||
| func NewContext() { | func NewContext() { | ||||||
| 	var ( | 	var ( | ||||||
| 		entry *cron.Entry | 		entry *cron.Entry | ||||||
| @@ -129,6 +133,7 @@ func NewContext() { | |||||||
| 	go WithUnique(updateMigrationPosterID, migrations.UpdateMigrationPosterID)() | 	go WithUnique(updateMigrationPosterID, migrations.UpdateMigrationPosterID)() | ||||||
|  |  | ||||||
| 	c.Start() | 	c.Start() | ||||||
|  | 	graceful.GetManager().RunAtShutdown(context.Background(), c.Stop) | ||||||
| } | } | ||||||
|  |  | ||||||
| // ListTasks returns all running cron tasks. | // ListTasks returns all running cron tasks. | ||||||
|   | |||||||
| @@ -106,7 +106,8 @@ func SetExecutablePath(path string) error { | |||||||
| } | } | ||||||
|  |  | ||||||
| // Init initializes git module | // Init initializes git module | ||||||
| func Init() error { | func Init(ctx context.Context) error { | ||||||
|  | 	DefaultContext = ctx | ||||||
| 	// Git requires setting user.name and user.email in order to commit changes. | 	// Git requires setting user.name and user.email in order to commit changes. | ||||||
| 	for configKey, defaultValue := range map[string]string{"user.name": "Gitea", "user.email": "gitea@fake.local"} { | 	for configKey, defaultValue := range map[string]string{"user.name": "Gitea", "user.email": "gitea@fake.local"} { | ||||||
| 		if stdout, stderr, err := process.GetManager().Exec("git.Init(get setting)", GitExecutable, "config", "--get", configKey); err != nil || strings.TrimSpace(stdout) == "" { | 		if stdout, stderr, err := process.GetManager().Exec("git.Init(get setting)", GitExecutable, "config", "--get", configKey); err != nil || strings.TrimSpace(stdout) == "" { | ||||||
|   | |||||||
| @@ -5,6 +5,7 @@ | |||||||
| package git | package git | ||||||
|  |  | ||||||
| import ( | import ( | ||||||
|  | 	"context" | ||||||
| 	"fmt" | 	"fmt" | ||||||
| 	"os" | 	"os" | ||||||
| 	"testing" | 	"testing" | ||||||
| @@ -16,7 +17,7 @@ func fatalTestError(fmtStr string, args ...interface{}) { | |||||||
| } | } | ||||||
|  |  | ||||||
| func TestMain(m *testing.M) { | func TestMain(m *testing.M) { | ||||||
| 	if err := Init(); err != nil { | 	if err := Init(context.Background()); err != nil { | ||||||
| 		fatalTestError("Init failed: %v", err) | 		fatalTestError("Init failed: %v", err) | ||||||
| 	} | 	} | ||||||
|  |  | ||||||
|   | |||||||
| @@ -62,7 +62,7 @@ func (ctx *ChannelContext) Value(key interface{}) interface{} { | |||||||
| // ShutdownContext returns a context.Context that is Done at shutdown | // ShutdownContext returns a context.Context that is Done at shutdown | ||||||
| // Callers using this context should ensure that they are registered as a running server | // Callers using this context should ensure that they are registered as a running server | ||||||
| // in order that they are waited for. | // in order that they are waited for. | ||||||
| func (g *gracefulManager) ShutdownContext() context.Context { | func (g *Manager) ShutdownContext() context.Context { | ||||||
| 	return &ChannelContext{ | 	return &ChannelContext{ | ||||||
| 		done: g.IsShutdown(), | 		done: g.IsShutdown(), | ||||||
| 		err:  ErrShutdown, | 		err:  ErrShutdown, | ||||||
| @@ -72,7 +72,7 @@ func (g *gracefulManager) ShutdownContext() context.Context { | |||||||
| // HammerContext returns a context.Context that is Done at hammer | // HammerContext returns a context.Context that is Done at hammer | ||||||
| // Callers using this context should ensure that they are registered as a running server | // Callers using this context should ensure that they are registered as a running server | ||||||
| // in order that they are waited for. | // in order that they are waited for. | ||||||
| func (g *gracefulManager) HammerContext() context.Context { | func (g *Manager) HammerContext() context.Context { | ||||||
| 	return &ChannelContext{ | 	return &ChannelContext{ | ||||||
| 		done: g.IsHammer(), | 		done: g.IsHammer(), | ||||||
| 		err:  ErrHammer, | 		err:  ErrHammer, | ||||||
| @@ -82,7 +82,7 @@ func (g *gracefulManager) HammerContext() context.Context { | |||||||
| // TerminateContext returns a context.Context that is Done at terminate | // TerminateContext returns a context.Context that is Done at terminate | ||||||
| // Callers using this context should ensure that they are registered as a terminating server | // Callers using this context should ensure that they are registered as a terminating server | ||||||
| // in order that they are waited for. | // in order that they are waited for. | ||||||
| func (g *gracefulManager) TerminateContext() context.Context { | func (g *Manager) TerminateContext() context.Context { | ||||||
| 	return &ChannelContext{ | 	return &ChannelContext{ | ||||||
| 		done: g.IsTerminate(), | 		done: g.IsTerminate(), | ||||||
| 		err:  ErrTerminate, | 		err:  ErrTerminate, | ||||||
|   | |||||||
| @@ -6,9 +6,9 @@ package graceful | |||||||
|  |  | ||||||
| import ( | import ( | ||||||
| 	"context" | 	"context" | ||||||
|  | 	"sync" | ||||||
| 	"time" | 	"time" | ||||||
|  |  | ||||||
| 	"code.gitea.io/gitea/modules/git" |  | ||||||
| 	"code.gitea.io/gitea/modules/log" | 	"code.gitea.io/gitea/modules/log" | ||||||
| 	"code.gitea.io/gitea/modules/process" | 	"code.gitea.io/gitea/modules/process" | ||||||
| 	"code.gitea.io/gitea/modules/setting" | 	"code.gitea.io/gitea/modules/setting" | ||||||
| @@ -34,14 +34,24 @@ const ( | |||||||
| const numberOfServersToCreate = 3 | const numberOfServersToCreate = 3 | ||||||
|  |  | ||||||
| // Manager represents the graceful server manager interface | // Manager represents the graceful server manager interface | ||||||
| var Manager *gracefulManager | var manager *Manager | ||||||
|  |  | ||||||
| func init() { | var initOnce = sync.Once{} | ||||||
| 	Manager = newGracefulManager(context.Background()) |  | ||||||
| 	// Set the git default context to the HammerContext | // GetManager returns the Manager | ||||||
| 	git.DefaultContext = Manager.HammerContext() | func GetManager() *Manager { | ||||||
| 	// Set the process default context to the HammerContext | 	InitManager(context.Background()) | ||||||
| 	process.DefaultContext = Manager.HammerContext() | 	return manager | ||||||
|  | } | ||||||
|  |  | ||||||
|  | // InitManager creates the graceful manager in the provided context | ||||||
|  | func InitManager(ctx context.Context) { | ||||||
|  | 	initOnce.Do(func() { | ||||||
|  | 		manager = newGracefulManager(ctx) | ||||||
|  |  | ||||||
|  | 		// Set the process default context to the HammerContext | ||||||
|  | 		process.DefaultContext = manager.HammerContext() | ||||||
|  | 	}) | ||||||
| } | } | ||||||
|  |  | ||||||
| // CallbackWithContext is combined runnable and context to watch to see if the caller has finished | // CallbackWithContext is combined runnable and context to watch to see if the caller has finished | ||||||
| @@ -61,7 +71,7 @@ type RunnableWithShutdownFns func(atShutdown, atTerminate func(context.Context, | |||||||
| // Please note that use of the atShutdown and atTerminate callbacks will create go-routines that will wait till their respective signals | // Please note that use of the atShutdown and atTerminate callbacks will create go-routines that will wait till their respective signals | ||||||
| // - users must therefore be careful to only call these as necessary. | // - users must therefore be careful to only call these as necessary. | ||||||
| // If run is not expected to run indefinitely RunWithShutdownChan is likely to be more appropriate. | // If run is not expected to run indefinitely RunWithShutdownChan is likely to be more appropriate. | ||||||
| func (g *gracefulManager) RunWithShutdownFns(run RunnableWithShutdownFns) { | func (g *Manager) RunWithShutdownFns(run RunnableWithShutdownFns) { | ||||||
| 	g.runningServerWaitGroup.Add(1) | 	g.runningServerWaitGroup.Add(1) | ||||||
| 	defer g.runningServerWaitGroup.Done() | 	defer g.runningServerWaitGroup.Done() | ||||||
| 	run(func(ctx context.Context, atShutdown func()) { | 	run(func(ctx context.Context, atShutdown func()) { | ||||||
| @@ -90,7 +100,7 @@ type RunnableWithShutdownChan func(atShutdown <-chan struct{}, atTerminate Callb | |||||||
| // (Optionally IsHammer may be waited for instead however, this should be avoided if possible.) | // (Optionally IsHammer may be waited for instead however, this should be avoided if possible.) | ||||||
| // The callback function provided to atTerminate must return once termination is complete. | // The callback function provided to atTerminate must return once termination is complete. | ||||||
| // Please note that use of the atTerminate function will create a go-routine that will wait till terminate - users must therefore be careful to only call this as necessary. | // Please note that use of the atTerminate function will create a go-routine that will wait till terminate - users must therefore be careful to only call this as necessary. | ||||||
| func (g *gracefulManager) RunWithShutdownChan(run RunnableWithShutdownChan) { | func (g *Manager) RunWithShutdownChan(run RunnableWithShutdownChan) { | ||||||
| 	g.runningServerWaitGroup.Add(1) | 	g.runningServerWaitGroup.Add(1) | ||||||
| 	defer g.runningServerWaitGroup.Done() | 	defer g.runningServerWaitGroup.Done() | ||||||
| 	run(g.IsShutdown(), func(ctx context.Context, atTerminate func()) { | 	run(g.IsShutdown(), func(ctx context.Context, atTerminate func()) { | ||||||
| @@ -101,14 +111,14 @@ func (g *gracefulManager) RunWithShutdownChan(run RunnableWithShutdownChan) { | |||||||
| // RunWithShutdownContext takes a function that has a context to watch for shutdown. | // RunWithShutdownContext takes a function that has a context to watch for shutdown. | ||||||
| // After the provided context is Done(), the main function must return once shutdown is complete. | // After the provided context is Done(), the main function must return once shutdown is complete. | ||||||
| // (Optionally the HammerContext may be obtained and waited for however, this should be avoided if possible.) | // (Optionally the HammerContext may be obtained and waited for however, this should be avoided if possible.) | ||||||
| func (g *gracefulManager) RunWithShutdownContext(run func(context.Context)) { | func (g *Manager) RunWithShutdownContext(run func(context.Context)) { | ||||||
| 	g.runningServerWaitGroup.Add(1) | 	g.runningServerWaitGroup.Add(1) | ||||||
| 	defer g.runningServerWaitGroup.Done() | 	defer g.runningServerWaitGroup.Done() | ||||||
| 	run(g.ShutdownContext()) | 	run(g.ShutdownContext()) | ||||||
| } | } | ||||||
|  |  | ||||||
| // RunAtTerminate adds to the terminate wait group and creates a go-routine to run the provided function at termination | // RunAtTerminate adds to the terminate wait group and creates a go-routine to run the provided function at termination | ||||||
| func (g *gracefulManager) RunAtTerminate(ctx context.Context, terminate func()) { | func (g *Manager) RunAtTerminate(ctx context.Context, terminate func()) { | ||||||
| 	g.terminateWaitGroup.Add(1) | 	g.terminateWaitGroup.Add(1) | ||||||
| 	go func() { | 	go func() { | ||||||
| 		select { | 		select { | ||||||
| @@ -121,7 +131,7 @@ func (g *gracefulManager) RunAtTerminate(ctx context.Context, terminate func()) | |||||||
| } | } | ||||||
|  |  | ||||||
| // RunAtShutdown creates a go-routine to run the provided function at shutdown | // RunAtShutdown creates a go-routine to run the provided function at shutdown | ||||||
| func (g *gracefulManager) RunAtShutdown(ctx context.Context, shutdown func()) { | func (g *Manager) RunAtShutdown(ctx context.Context, shutdown func()) { | ||||||
| 	go func() { | 	go func() { | ||||||
| 		select { | 		select { | ||||||
| 		case <-g.IsShutdown(): | 		case <-g.IsShutdown(): | ||||||
| @@ -132,7 +142,7 @@ func (g *gracefulManager) RunAtShutdown(ctx context.Context, shutdown func()) { | |||||||
| } | } | ||||||
|  |  | ||||||
| // RunAtHammer creates a go-routine to run the provided function at shutdown | // RunAtHammer creates a go-routine to run the provided function at shutdown | ||||||
| func (g *gracefulManager) RunAtHammer(ctx context.Context, hammer func()) { | func (g *Manager) RunAtHammer(ctx context.Context, hammer func()) { | ||||||
| 	go func() { | 	go func() { | ||||||
| 		select { | 		select { | ||||||
| 		case <-g.IsHammer(): | 		case <-g.IsHammer(): | ||||||
| @@ -141,7 +151,7 @@ func (g *gracefulManager) RunAtHammer(ctx context.Context, hammer func()) { | |||||||
| 		} | 		} | ||||||
| 	}() | 	}() | ||||||
| } | } | ||||||
| func (g *gracefulManager) doShutdown() { | func (g *Manager) doShutdown() { | ||||||
| 	if !g.setStateTransition(stateRunning, stateShuttingDown) { | 	if !g.setStateTransition(stateRunning, stateShuttingDown) { | ||||||
| 		return | 		return | ||||||
| 	} | 	} | ||||||
| @@ -158,48 +168,47 @@ func (g *gracefulManager) doShutdown() { | |||||||
| 		g.doHammerTime(0) | 		g.doHammerTime(0) | ||||||
| 		<-time.After(1 * time.Second) | 		<-time.After(1 * time.Second) | ||||||
| 		g.doTerminate() | 		g.doTerminate() | ||||||
|  | 		g.WaitForTerminate() | ||||||
|  | 		g.lock.Lock() | ||||||
|  | 		close(g.done) | ||||||
|  | 		g.lock.Unlock() | ||||||
| 	}() | 	}() | ||||||
| } | } | ||||||
|  |  | ||||||
| func (g *gracefulManager) doHammerTime(d time.Duration) { | func (g *Manager) doHammerTime(d time.Duration) { | ||||||
| 	time.Sleep(d) | 	time.Sleep(d) | ||||||
|  | 	g.lock.Lock() | ||||||
| 	select { | 	select { | ||||||
| 	case <-g.hammer: | 	case <-g.hammer: | ||||||
| 	default: | 	default: | ||||||
| 		log.Warn("Setting Hammer condition") | 		log.Warn("Setting Hammer condition") | ||||||
| 		close(g.hammer) | 		close(g.hammer) | ||||||
| 	} | 	} | ||||||
|  | 	g.lock.Unlock() | ||||||
| } | } | ||||||
|  |  | ||||||
| func (g *gracefulManager) doTerminate() { | func (g *Manager) doTerminate() { | ||||||
| 	if !g.setStateTransition(stateShuttingDown, stateTerminate) { | 	if !g.setStateTransition(stateShuttingDown, stateTerminate) { | ||||||
| 		return | 		return | ||||||
| 	} | 	} | ||||||
| 	g.lock.Lock() | 	g.lock.Lock() | ||||||
| 	close(g.terminate) | 	select { | ||||||
|  | 	case <-g.terminate: | ||||||
|  | 	default: | ||||||
|  | 		log.Warn("Terminating") | ||||||
|  | 		close(g.terminate) | ||||||
|  | 	} | ||||||
| 	g.lock.Unlock() | 	g.lock.Unlock() | ||||||
| } | } | ||||||
|  |  | ||||||
| // IsChild returns if the current process is a child of previous Gitea process | // IsChild returns if the current process is a child of previous Gitea process | ||||||
| func (g *gracefulManager) IsChild() bool { | func (g *Manager) IsChild() bool { | ||||||
| 	return g.isChild | 	return g.isChild | ||||||
| } | } | ||||||
|  |  | ||||||
| // IsShutdown returns a channel which will be closed at shutdown. | // IsShutdown returns a channel which will be closed at shutdown. | ||||||
| // The order of closure is IsShutdown, IsHammer (potentially), IsTerminate | // The order of closure is IsShutdown, IsHammer (potentially), IsTerminate | ||||||
| func (g *gracefulManager) IsShutdown() <-chan struct{} { | func (g *Manager) IsShutdown() <-chan struct{} { | ||||||
| 	g.lock.RLock() |  | ||||||
| 	if g.shutdown == nil { |  | ||||||
| 		g.lock.RUnlock() |  | ||||||
| 		g.lock.Lock() |  | ||||||
| 		if g.shutdown == nil { |  | ||||||
| 			g.shutdown = make(chan struct{}) |  | ||||||
| 		} |  | ||||||
| 		defer g.lock.Unlock() |  | ||||||
| 		return g.shutdown |  | ||||||
| 	} |  | ||||||
| 	defer g.lock.RUnlock() |  | ||||||
| 	return g.shutdown | 	return g.shutdown | ||||||
| } | } | ||||||
|  |  | ||||||
| @@ -207,65 +216,43 @@ func (g *gracefulManager) IsShutdown() <-chan struct{} { | |||||||
| // The order of closure is IsShutdown, IsHammer (potentially), IsTerminate | // The order of closure is IsShutdown, IsHammer (potentially), IsTerminate | ||||||
| // Servers running within the running server wait group should respond to IsHammer | // Servers running within the running server wait group should respond to IsHammer | ||||||
| // if not shutdown already | // if not shutdown already | ||||||
| func (g *gracefulManager) IsHammer() <-chan struct{} { | func (g *Manager) IsHammer() <-chan struct{} { | ||||||
| 	g.lock.RLock() |  | ||||||
| 	if g.hammer == nil { |  | ||||||
| 		g.lock.RUnlock() |  | ||||||
| 		g.lock.Lock() |  | ||||||
| 		if g.hammer == nil { |  | ||||||
| 			g.hammer = make(chan struct{}) |  | ||||||
| 		} |  | ||||||
| 		defer g.lock.Unlock() |  | ||||||
| 		return g.hammer |  | ||||||
| 	} |  | ||||||
| 	defer g.lock.RUnlock() |  | ||||||
| 	return g.hammer | 	return g.hammer | ||||||
| } | } | ||||||
|  |  | ||||||
| // IsTerminate returns a channel which will be closed at terminate | // IsTerminate returns a channel which will be closed at terminate | ||||||
| // The order of closure is IsShutdown, IsHammer (potentially), IsTerminate | // The order of closure is IsShutdown, IsHammer (potentially), IsTerminate | ||||||
| // IsTerminate will only close once all running servers have stopped | // IsTerminate will only close once all running servers have stopped | ||||||
| func (g *gracefulManager) IsTerminate() <-chan struct{} { | func (g *Manager) IsTerminate() <-chan struct{} { | ||||||
| 	g.lock.RLock() |  | ||||||
| 	if g.terminate == nil { |  | ||||||
| 		g.lock.RUnlock() |  | ||||||
| 		g.lock.Lock() |  | ||||||
| 		if g.terminate == nil { |  | ||||||
| 			g.terminate = make(chan struct{}) |  | ||||||
| 		} |  | ||||||
| 		defer g.lock.Unlock() |  | ||||||
| 		return g.terminate |  | ||||||
| 	} |  | ||||||
| 	defer g.lock.RUnlock() |  | ||||||
| 	return g.terminate | 	return g.terminate | ||||||
| } | } | ||||||
|  |  | ||||||
| // ServerDone declares a running server done and subtracts one from the | // ServerDone declares a running server done and subtracts one from the | ||||||
| // running server wait group. Users probably do not want to call this | // running server wait group. Users probably do not want to call this | ||||||
| // and should use one of the RunWithShutdown* functions | // and should use one of the RunWithShutdown* functions | ||||||
| func (g *gracefulManager) ServerDone() { | func (g *Manager) ServerDone() { | ||||||
| 	g.runningServerWaitGroup.Done() | 	g.runningServerWaitGroup.Done() | ||||||
| } | } | ||||||
|  |  | ||||||
| // WaitForServers waits for all running servers to finish. Users should probably | // WaitForServers waits for all running servers to finish. Users should probably | ||||||
| // instead use AtTerminate or IsTerminate | // instead use AtTerminate or IsTerminate | ||||||
| func (g *gracefulManager) WaitForServers() { | func (g *Manager) WaitForServers() { | ||||||
| 	g.runningServerWaitGroup.Wait() | 	g.runningServerWaitGroup.Wait() | ||||||
| } | } | ||||||
|  |  | ||||||
| // WaitForTerminate waits for all terminating actions to finish. | // WaitForTerminate waits for all terminating actions to finish. | ||||||
| // Only the main go-routine should use this | // Only the main go-routine should use this | ||||||
| func (g *gracefulManager) WaitForTerminate() { | func (g *Manager) WaitForTerminate() { | ||||||
| 	g.terminateWaitGroup.Wait() | 	g.terminateWaitGroup.Wait() | ||||||
| } | } | ||||||
|  |  | ||||||
| func (g *gracefulManager) getState() state { | func (g *Manager) getState() state { | ||||||
| 	g.lock.RLock() | 	g.lock.RLock() | ||||||
| 	defer g.lock.RUnlock() | 	defer g.lock.RUnlock() | ||||||
| 	return g.state | 	return g.state | ||||||
| } | } | ||||||
|  |  | ||||||
| func (g *gracefulManager) setStateTransition(old, new state) bool { | func (g *Manager) setStateTransition(old, new state) bool { | ||||||
| 	if old != g.getState() { | 	if old != g.getState() { | ||||||
| 		return false | 		return false | ||||||
| 	} | 	} | ||||||
| @@ -279,7 +266,7 @@ func (g *gracefulManager) setStateTransition(old, new state) bool { | |||||||
| 	return true | 	return true | ||||||
| } | } | ||||||
|  |  | ||||||
| func (g *gracefulManager) setState(st state) { | func (g *Manager) setState(st state) { | ||||||
| 	g.lock.Lock() | 	g.lock.Lock() | ||||||
| 	defer g.lock.Unlock() | 	defer g.lock.Unlock() | ||||||
|  |  | ||||||
| @@ -288,6 +275,31 @@ func (g *gracefulManager) setState(st state) { | |||||||
|  |  | ||||||
| // InformCleanup tells the cleanup wait group that we have either taken a listener | // InformCleanup tells the cleanup wait group that we have either taken a listener | ||||||
| // or will not be taking a listener | // or will not be taking a listener | ||||||
| func (g *gracefulManager) InformCleanup() { | func (g *Manager) InformCleanup() { | ||||||
| 	g.createServerWaitGroup.Done() | 	g.createServerWaitGroup.Done() | ||||||
| } | } | ||||||
|  |  | ||||||
|  | // Done allows the manager to be viewed as a context.Context, it returns a channel that is closed when the server is finished terminating | ||||||
|  | func (g *Manager) Done() <-chan struct{} { | ||||||
|  | 	return g.done | ||||||
|  | } | ||||||
|  |  | ||||||
|  | // Err allows the manager to be viewed as a context.Context done at Terminate, it returns ErrTerminate | ||||||
|  | func (g *Manager) Err() error { | ||||||
|  | 	select { | ||||||
|  | 	case <-g.Done(): | ||||||
|  | 		return ErrTerminate | ||||||
|  | 	default: | ||||||
|  | 		return nil | ||||||
|  | 	} | ||||||
|  | } | ||||||
|  |  | ||||||
|  | // Value allows the manager to be viewed as a context.Context done at Terminate, it has no values | ||||||
|  | func (g *Manager) Value(key interface{}) interface{} { | ||||||
|  | 	return nil | ||||||
|  | } | ||||||
|  |  | ||||||
|  | // Deadline returns nil as there is no fixed Deadline for the manager, it allows the manager to be viewed as a context.Context | ||||||
|  | func (g *Manager) Deadline() (deadline time.Time, ok bool) { | ||||||
|  | 	return | ||||||
|  | } | ||||||
|   | |||||||
| @@ -19,7 +19,8 @@ import ( | |||||||
| 	"code.gitea.io/gitea/modules/setting" | 	"code.gitea.io/gitea/modules/setting" | ||||||
| ) | ) | ||||||
|  |  | ||||||
| type gracefulManager struct { | // Manager manages the graceful shutdown process | ||||||
|  | type Manager struct { | ||||||
| 	isChild                bool | 	isChild                bool | ||||||
| 	forked                 bool | 	forked                 bool | ||||||
| 	lock                   *sync.RWMutex | 	lock                   *sync.RWMutex | ||||||
| @@ -27,27 +28,37 @@ type gracefulManager struct { | |||||||
| 	shutdown               chan struct{} | 	shutdown               chan struct{} | ||||||
| 	hammer                 chan struct{} | 	hammer                 chan struct{} | ||||||
| 	terminate              chan struct{} | 	terminate              chan struct{} | ||||||
|  | 	done                   chan struct{} | ||||||
| 	runningServerWaitGroup sync.WaitGroup | 	runningServerWaitGroup sync.WaitGroup | ||||||
| 	createServerWaitGroup  sync.WaitGroup | 	createServerWaitGroup  sync.WaitGroup | ||||||
| 	terminateWaitGroup     sync.WaitGroup | 	terminateWaitGroup     sync.WaitGroup | ||||||
| } | } | ||||||
|  |  | ||||||
| func newGracefulManager(ctx context.Context) *gracefulManager { | func newGracefulManager(ctx context.Context) *Manager { | ||||||
| 	manager := &gracefulManager{ | 	manager := &Manager{ | ||||||
| 		isChild: len(os.Getenv(listenFDs)) > 0 && os.Getppid() > 1, | 		isChild: len(os.Getenv(listenFDs)) > 0 && os.Getppid() > 1, | ||||||
| 		lock:    &sync.RWMutex{}, | 		lock:    &sync.RWMutex{}, | ||||||
| 	} | 	} | ||||||
| 	manager.createServerWaitGroup.Add(numberOfServersToCreate) | 	manager.createServerWaitGroup.Add(numberOfServersToCreate) | ||||||
| 	manager.Run(ctx) | 	manager.start(ctx) | ||||||
| 	return manager | 	return manager | ||||||
| } | } | ||||||
|  |  | ||||||
| func (g *gracefulManager) Run(ctx context.Context) { | func (g *Manager) start(ctx context.Context) { | ||||||
|  | 	// Make channels | ||||||
|  | 	g.terminate = make(chan struct{}) | ||||||
|  | 	g.shutdown = make(chan struct{}) | ||||||
|  | 	g.hammer = make(chan struct{}) | ||||||
|  | 	g.done = make(chan struct{}) | ||||||
|  |  | ||||||
|  | 	// Set the running state & handle signals | ||||||
| 	g.setState(stateRunning) | 	g.setState(stateRunning) | ||||||
| 	go g.handleSignals(ctx) | 	go g.handleSignals(ctx) | ||||||
| 	c := make(chan struct{}) |  | ||||||
|  | 	// Handle clean up of unused provided listeners	and delayed start-up | ||||||
|  | 	startupDone := make(chan struct{}) | ||||||
| 	go func() { | 	go func() { | ||||||
| 		defer close(c) | 		defer close(startupDone) | ||||||
| 		// Wait till we're done getting all of the listeners and then close | 		// Wait till we're done getting all of the listeners and then close | ||||||
| 		// the unused ones | 		// the unused ones | ||||||
| 		g.createServerWaitGroup.Wait() | 		g.createServerWaitGroup.Wait() | ||||||
| @@ -58,9 +69,19 @@ func (g *gracefulManager) Run(ctx context.Context) { | |||||||
| 	if setting.StartupTimeout > 0 { | 	if setting.StartupTimeout > 0 { | ||||||
| 		go func() { | 		go func() { | ||||||
| 			select { | 			select { | ||||||
| 			case <-c: | 			case <-startupDone: | ||||||
| 				return | 				return | ||||||
| 			case <-g.IsShutdown(): | 			case <-g.IsShutdown(): | ||||||
|  | 				func() { | ||||||
|  | 					// When waitgroup counter goes negative it will panic - we don't care about this so we can just ignore it. | ||||||
|  | 					defer func() { | ||||||
|  | 						_ = recover() | ||||||
|  | 					}() | ||||||
|  | 					// Ensure that the createServerWaitGroup stops waiting | ||||||
|  | 					for { | ||||||
|  | 						g.createServerWaitGroup.Done() | ||||||
|  | 					} | ||||||
|  | 				}() | ||||||
| 				return | 				return | ||||||
| 			case <-time.After(setting.StartupTimeout): | 			case <-time.After(setting.StartupTimeout): | ||||||
| 				log.Error("Startup took too long! Shutting down") | 				log.Error("Startup took too long! Shutting down") | ||||||
| @@ -70,7 +91,7 @@ func (g *gracefulManager) Run(ctx context.Context) { | |||||||
| 	} | 	} | ||||||
| } | } | ||||||
|  |  | ||||||
| func (g *gracefulManager) handleSignals(ctx context.Context) { | func (g *Manager) handleSignals(ctx context.Context) { | ||||||
| 	signalChannel := make(chan os.Signal, 1) | 	signalChannel := make(chan os.Signal, 1) | ||||||
|  |  | ||||||
| 	signal.Notify( | 	signal.Notify( | ||||||
| @@ -123,7 +144,7 @@ func (g *gracefulManager) handleSignals(ctx context.Context) { | |||||||
| 	} | 	} | ||||||
| } | } | ||||||
|  |  | ||||||
| func (g *gracefulManager) doFork() error { | func (g *Manager) doFork() error { | ||||||
| 	g.lock.Lock() | 	g.lock.Lock() | ||||||
| 	if g.forked { | 	if g.forked { | ||||||
| 		g.lock.Unlock() | 		g.lock.Unlock() | ||||||
| @@ -139,7 +160,9 @@ func (g *gracefulManager) doFork() error { | |||||||
| 	return err | 	return err | ||||||
| } | } | ||||||
|  |  | ||||||
| func (g *gracefulManager) RegisterServer() { | // RegisterServer registers the running of a listening server, in the case of unix this means that the parent process can now die. | ||||||
|  | // Any call to RegisterServer must be matched by a call to ServerDone | ||||||
|  | func (g *Manager) RegisterServer() { | ||||||
| 	KillParent() | 	KillParent() | ||||||
| 	g.runningServerWaitGroup.Add(1) | 	g.runningServerWaitGroup.Add(1) | ||||||
| } | } | ||||||
|   | |||||||
| @@ -30,7 +30,8 @@ const ( | |||||||
| 	acceptHammerCode = svc.Accepted(hammerCode) | 	acceptHammerCode = svc.Accepted(hammerCode) | ||||||
| ) | ) | ||||||
|  |  | ||||||
| type gracefulManager struct { | // Manager manages the graceful shutdown process | ||||||
|  | type Manager struct { | ||||||
| 	ctx                    context.Context | 	ctx                    context.Context | ||||||
| 	isChild                bool | 	isChild                bool | ||||||
| 	lock                   *sync.RWMutex | 	lock                   *sync.RWMutex | ||||||
| @@ -38,27 +39,37 @@ type gracefulManager struct { | |||||||
| 	shutdown               chan struct{} | 	shutdown               chan struct{} | ||||||
| 	hammer                 chan struct{} | 	hammer                 chan struct{} | ||||||
| 	terminate              chan struct{} | 	terminate              chan struct{} | ||||||
|  | 	done                   chan struct{} | ||||||
| 	runningServerWaitGroup sync.WaitGroup | 	runningServerWaitGroup sync.WaitGroup | ||||||
| 	createServerWaitGroup  sync.WaitGroup | 	createServerWaitGroup  sync.WaitGroup | ||||||
| 	terminateWaitGroup     sync.WaitGroup | 	terminateWaitGroup     sync.WaitGroup | ||||||
| } | } | ||||||
|  |  | ||||||
| func newGracefulManager(ctx context.Context) *gracefulManager { | func newGracefulManager(ctx context.Context) *Manager { | ||||||
| 	manager := &gracefulManager{ | 	manager := &Manager{ | ||||||
| 		isChild: false, | 		isChild: false, | ||||||
| 		lock:    &sync.RWMutex{}, | 		lock:    &sync.RWMutex{}, | ||||||
| 		ctx:     ctx, | 		ctx:     ctx, | ||||||
| 	} | 	} | ||||||
| 	manager.createServerWaitGroup.Add(numberOfServersToCreate) | 	manager.createServerWaitGroup.Add(numberOfServersToCreate) | ||||||
| 	manager.Run() | 	manager.start() | ||||||
| 	return manager | 	return manager | ||||||
| } | } | ||||||
|  |  | ||||||
| func (g *gracefulManager) Run() { | func (g *Manager) start() { | ||||||
|  | 	// Make channels | ||||||
|  | 	g.terminate = make(chan struct{}) | ||||||
|  | 	g.shutdown = make(chan struct{}) | ||||||
|  | 	g.hammer = make(chan struct{}) | ||||||
|  | 	g.done = make(chan struct{}) | ||||||
|  |  | ||||||
|  | 	// Set the running state | ||||||
| 	g.setState(stateRunning) | 	g.setState(stateRunning) | ||||||
| 	if skip, _ := strconv.ParseBool(os.Getenv("SKIP_MINWINSVC")); skip { | 	if skip, _ := strconv.ParseBool(os.Getenv("SKIP_MINWINSVC")); skip { | ||||||
| 		return | 		return | ||||||
| 	} | 	} | ||||||
|  |  | ||||||
|  | 	// Make SVC process | ||||||
| 	run := svc.Run | 	run := svc.Run | ||||||
| 	isInteractive, err := svc.IsAnInteractiveSession() | 	isInteractive, err := svc.IsAnInteractiveSession() | ||||||
| 	if err != nil { | 	if err != nil { | ||||||
| @@ -71,8 +82,8 @@ func (g *gracefulManager) Run() { | |||||||
| 	go run(WindowsServiceName, g) | 	go run(WindowsServiceName, g) | ||||||
| } | } | ||||||
|  |  | ||||||
| // Execute makes gracefulManager implement svc.Handler | // Execute makes Manager implement svc.Handler | ||||||
| func (g *gracefulManager) Execute(args []string, changes <-chan svc.ChangeRequest, status chan<- svc.Status) (svcSpecificEC bool, exitCode uint32) { | func (g *Manager) Execute(args []string, changes <-chan svc.ChangeRequest, status chan<- svc.Status) (svcSpecificEC bool, exitCode uint32) { | ||||||
| 	if setting.StartupTimeout > 0 { | 	if setting.StartupTimeout > 0 { | ||||||
| 		status <- svc.Status{State: svc.StartPending} | 		status <- svc.Status{State: svc.StartPending} | ||||||
| 	} else { | 	} else { | ||||||
| @@ -141,11 +152,13 @@ hammerLoop: | |||||||
| 	return false, 0 | 	return false, 0 | ||||||
| } | } | ||||||
|  |  | ||||||
| func (g *gracefulManager) RegisterServer() { | // RegisterServer registers the running of a listening server. | ||||||
|  | // Any call to RegisterServer must be matched by a call to ServerDone | ||||||
|  | func (g *Manager) RegisterServer() { | ||||||
| 	g.runningServerWaitGroup.Add(1) | 	g.runningServerWaitGroup.Add(1) | ||||||
| } | } | ||||||
|  |  | ||||||
| func (g *gracefulManager) awaitServer(limit time.Duration) bool { | func (g *Manager) awaitServer(limit time.Duration) bool { | ||||||
| 	c := make(chan struct{}) | 	c := make(chan struct{}) | ||||||
| 	go func() { | 	go func() { | ||||||
| 		defer close(c) | 		defer close(c) | ||||||
|   | |||||||
| @@ -101,7 +101,7 @@ func CloseProvidedListeners() error { | |||||||
| // creates a new one using net.Listen. | // creates a new one using net.Listen. | ||||||
| func GetListener(network, address string) (net.Listener, error) { | func GetListener(network, address string) (net.Listener, error) { | ||||||
| 	// Add a deferral to say that we've tried to grab a listener | 	// Add a deferral to say that we've tried to grab a listener | ||||||
| 	defer Manager.InformCleanup() | 	defer GetManager().InformCleanup() | ||||||
| 	switch network { | 	switch network { | ||||||
| 	case "tcp", "tcp4", "tcp6": | 	case "tcp", "tcp4", "tcp6": | ||||||
| 		tcpAddr, err := net.ResolveTCPAddr(network, address) | 		tcpAddr, err := net.ResolveTCPAddr(network, address) | ||||||
|   | |||||||
| @@ -22,7 +22,7 @@ var killParent sync.Once | |||||||
| // KillParent sends the kill signal to the parent process if we are a child | // KillParent sends the kill signal to the parent process if we are a child | ||||||
| func KillParent() { | func KillParent() { | ||||||
| 	killParent.Do(func() { | 	killParent.Do(func() { | ||||||
| 		if Manager.IsChild() { | 		if GetManager().IsChild() { | ||||||
| 			ppid := syscall.Getppid() | 			ppid := syscall.Getppid() | ||||||
| 			if ppid > 1 { | 			if ppid > 1 { | ||||||
| 				_ = syscall.Kill(ppid, syscall.SIGTERM) | 				_ = syscall.Kill(ppid, syscall.SIGTERM) | ||||||
|   | |||||||
| @@ -47,7 +47,7 @@ type Server struct { | |||||||
|  |  | ||||||
| // NewServer creates a server on network at provided address | // NewServer creates a server on network at provided address | ||||||
| func NewServer(network, address string) *Server { | func NewServer(network, address string) *Server { | ||||||
| 	if Manager.IsChild() { | 	if GetManager().IsChild() { | ||||||
| 		log.Info("Restarting new server: %s:%s on PID: %d", network, address, os.Getpid()) | 		log.Info("Restarting new server: %s:%s on PID: %d", network, address, os.Getpid()) | ||||||
| 	} else { | 	} else { | ||||||
| 		log.Info("Starting new server: %s:%s on PID: %d", network, address, os.Getpid()) | 		log.Info("Starting new server: %s:%s on PID: %d", network, address, os.Getpid()) | ||||||
| @@ -138,12 +138,12 @@ func (srv *Server) ListenAndServeTLSConfig(tlsConfig *tls.Config, serve ServeFun | |||||||
| func (srv *Server) Serve(serve ServeFunction) error { | func (srv *Server) Serve(serve ServeFunction) error { | ||||||
| 	defer log.Debug("Serve() returning... (PID: %d)", syscall.Getpid()) | 	defer log.Debug("Serve() returning... (PID: %d)", syscall.Getpid()) | ||||||
| 	srv.setState(stateRunning) | 	srv.setState(stateRunning) | ||||||
| 	Manager.RegisterServer() | 	GetManager().RegisterServer() | ||||||
| 	err := serve(srv.listener) | 	err := serve(srv.listener) | ||||||
| 	log.Debug("Waiting for connections to finish... (PID: %d)", syscall.Getpid()) | 	log.Debug("Waiting for connections to finish... (PID: %d)", syscall.Getpid()) | ||||||
| 	srv.wg.Wait() | 	srv.wg.Wait() | ||||||
| 	srv.setState(stateTerminate) | 	srv.setState(stateTerminate) | ||||||
| 	Manager.ServerDone() | 	GetManager().ServerDone() | ||||||
| 	// use of closed means that the listeners are closed - i.e. we should be shutting down - return nil | 	// use of closed means that the listeners are closed - i.e. we should be shutting down - return nil | ||||||
| 	if err != nil && strings.Contains(err.Error(), "use of closed") { | 	if err != nil && strings.Contains(err.Error(), "use of closed") { | ||||||
| 		return nil | 		return nil | ||||||
|   | |||||||
| @@ -14,15 +14,15 @@ import ( | |||||||
| // awaitShutdown waits for the shutdown signal from the Manager | // awaitShutdown waits for the shutdown signal from the Manager | ||||||
| func (srv *Server) awaitShutdown() { | func (srv *Server) awaitShutdown() { | ||||||
| 	select { | 	select { | ||||||
| 	case <-Manager.IsShutdown(): | 	case <-GetManager().IsShutdown(): | ||||||
| 		// Shutdown | 		// Shutdown | ||||||
| 		srv.doShutdown() | 		srv.doShutdown() | ||||||
| 	case <-Manager.IsHammer(): | 	case <-GetManager().IsHammer(): | ||||||
| 		// Hammer | 		// Hammer | ||||||
| 		srv.doShutdown() | 		srv.doShutdown() | ||||||
| 		srv.doHammer() | 		srv.doHammer() | ||||||
| 	} | 	} | ||||||
| 	<-Manager.IsHammer() | 	<-GetManager().IsHammer() | ||||||
| 	srv.doHammer() | 	srv.doHammer() | ||||||
| } | } | ||||||
|  |  | ||||||
|   | |||||||
| @@ -6,6 +6,7 @@ package code | |||||||
|  |  | ||||||
| import ( | import ( | ||||||
| 	"fmt" | 	"fmt" | ||||||
|  | 	"os" | ||||||
| 	"strconv" | 	"strconv" | ||||||
| 	"strings" | 	"strings" | ||||||
| 	"time" | 	"time" | ||||||
| @@ -34,10 +35,11 @@ func InitRepoIndexer() { | |||||||
| 		return | 		return | ||||||
| 	} | 	} | ||||||
| 	waitChannel := make(chan time.Duration) | 	waitChannel := make(chan time.Duration) | ||||||
|  | 	// FIXME: graceful: This should use a persistable queue | ||||||
| 	repoIndexerOperationQueue = make(chan repoIndexerOperation, setting.Indexer.UpdateQueueLength) | 	repoIndexerOperationQueue = make(chan repoIndexerOperation, setting.Indexer.UpdateQueueLength) | ||||||
| 	go func() { | 	go func() { | ||||||
| 		start := time.Now() | 		start := time.Now() | ||||||
| 		log.Info("Initializing Repository Indexer") | 		log.Info("PID: %d: Initializing Repository Indexer", os.Getpid()) | ||||||
| 		initRepoIndexer(populateRepoIndexerAsynchronously) | 		initRepoIndexer(populateRepoIndexerAsynchronously) | ||||||
| 		go processRepoIndexerOperationQueue() | 		go processRepoIndexerOperationQueue() | ||||||
| 		waitChannel <- time.Since(start) | 		waitChannel <- time.Since(start) | ||||||
| @@ -45,7 +47,7 @@ func InitRepoIndexer() { | |||||||
| 	if setting.Indexer.StartupTimeout > 0 { | 	if setting.Indexer.StartupTimeout > 0 { | ||||||
| 		go func() { | 		go func() { | ||||||
| 			timeout := setting.Indexer.StartupTimeout | 			timeout := setting.Indexer.StartupTimeout | ||||||
| 			if graceful.Manager.IsChild() && setting.GracefulHammerTime > 0 { | 			if graceful.GetManager().IsChild() && setting.GracefulHammerTime > 0 { | ||||||
| 				timeout += setting.GracefulHammerTime | 				timeout += setting.GracefulHammerTime | ||||||
| 			} | 			} | ||||||
| 			select { | 			select { | ||||||
| @@ -70,13 +72,6 @@ func populateRepoIndexerAsynchronously() error { | |||||||
| 		return nil | 		return nil | ||||||
| 	} | 	} | ||||||
|  |  | ||||||
| 	// if there is any existing repo indexer metadata in the DB, delete it |  | ||||||
| 	// since we are starting afresh. Also, xorm requires deletes to have a |  | ||||||
| 	// condition, and we want to delete everything, thus 1=1. |  | ||||||
| 	if err := models.DeleteAllRecords("repo_indexer_status"); err != nil { |  | ||||||
| 		return err |  | ||||||
| 	} |  | ||||||
|  |  | ||||||
| 	var maxRepoID int64 | 	var maxRepoID int64 | ||||||
| 	if maxRepoID, err = models.GetMaxID("repository"); err != nil { | 	if maxRepoID, err = models.GetMaxID("repository"); err != nil { | ||||||
| 		return err | 		return err | ||||||
| @@ -87,44 +82,59 @@ func populateRepoIndexerAsynchronously() error { | |||||||
|  |  | ||||||
| // populateRepoIndexer populate the repo indexer with pre-existing data. This | // populateRepoIndexer populate the repo indexer with pre-existing data. This | ||||||
| // should only be run when the indexer is created for the first time. | // should only be run when the indexer is created for the first time. | ||||||
|  | // FIXME: graceful: This should use a persistable queue | ||||||
| func populateRepoIndexer(maxRepoID int64) { | func populateRepoIndexer(maxRepoID int64) { | ||||||
| 	log.Info("Populating the repo indexer with existing repositories") | 	log.Info("Populating the repo indexer with existing repositories") | ||||||
|  |  | ||||||
|  | 	isShutdown := graceful.GetManager().IsShutdown() | ||||||
|  |  | ||||||
| 	// start with the maximum existing repo ID and work backwards, so that we | 	// start with the maximum existing repo ID and work backwards, so that we | ||||||
| 	// don't include repos that are created after gitea starts; such repos will | 	// don't include repos that are created after gitea starts; such repos will | ||||||
| 	// already be added to the indexer, and we don't need to add them again. | 	// already be added to the indexer, and we don't need to add them again. | ||||||
| 	for maxRepoID > 0 { | 	for maxRepoID > 0 { | ||||||
| 		repos := make([]*models.Repository, 0, models.RepositoryListDefaultPageSize) | 		select { | ||||||
| 		err := models.FindByMaxID(maxRepoID, models.RepositoryListDefaultPageSize, &repos) | 		case <-isShutdown: | ||||||
|  | 			log.Info("Repository Indexer population shutdown before completion") | ||||||
|  | 			return | ||||||
|  | 		default: | ||||||
|  | 		} | ||||||
|  | 		ids, err := models.GetUnindexedRepos(maxRepoID, 0, 50) | ||||||
| 		if err != nil { | 		if err != nil { | ||||||
| 			log.Error("populateRepoIndexer: %v", err) | 			log.Error("populateRepoIndexer: %v", err) | ||||||
| 			return | 			return | ||||||
| 		} else if len(repos) == 0 { | 		} else if len(ids) == 0 { | ||||||
| 			break | 			break | ||||||
| 		} | 		} | ||||||
| 		for _, repo := range repos { | 		for _, id := range ids { | ||||||
|  | 			select { | ||||||
|  | 			case <-isShutdown: | ||||||
|  | 				log.Info("Repository Indexer population shutdown before completion") | ||||||
|  | 				return | ||||||
|  | 			default: | ||||||
|  | 			} | ||||||
| 			repoIndexerOperationQueue <- repoIndexerOperation{ | 			repoIndexerOperationQueue <- repoIndexerOperation{ | ||||||
| 				repoID:  repo.ID, | 				repoID:  id, | ||||||
| 				deleted: false, | 				deleted: false, | ||||||
| 			} | 			} | ||||||
| 			maxRepoID = repo.ID - 1 | 			maxRepoID = id - 1 | ||||||
| 		} | 		} | ||||||
| 	} | 	} | ||||||
| 	log.Info("Done populating the repo indexer with existing repositories") | 	log.Info("Done (re)populating the repo indexer with existing repositories") | ||||||
| } | } | ||||||
|  |  | ||||||
| func updateRepoIndexer(repoID int64) error { | func updateRepoIndexer(repoID int64) error { | ||||||
| 	repo, err := models.GetRepositoryByID(repoID) | 	repo, err := models.GetRepositoryByID(repoID) | ||||||
| 	if err != nil { | 	if err != nil { | ||||||
| 		return err | 		return fmt.Errorf("UpdateRepoIndexer: Unable to GetRepositoryByID: %d, Error: %v", repoID, err) | ||||||
| 	} | 	} | ||||||
|  |  | ||||||
| 	sha, err := getDefaultBranchSha(repo) | 	sha, err := getDefaultBranchSha(repo) | ||||||
| 	if err != nil { | 	if err != nil { | ||||||
| 		return err | 		return fmt.Errorf("UpdateRepoIndexer: Unable to GetDefaultBranchSha for: %s/%s, Error: %v", repo.MustOwnerName(), repo.Name, err) | ||||||
| 	} | 	} | ||||||
| 	changes, err := getRepoChanges(repo, sha) | 	changes, err := getRepoChanges(repo, sha) | ||||||
| 	if err != nil { | 	if err != nil { | ||||||
| 		return err | 		return fmt.Errorf("UpdateRepoIndexer: Unable to GetRepoChanges for: %s/%s Sha: %s Error: %v", repo.MustOwnerName(), repo.Name, sha, err) | ||||||
| 	} else if changes == nil { | 	} else if changes == nil { | ||||||
| 		return nil | 		return nil | ||||||
| 	} | 	} | ||||||
| @@ -132,16 +142,16 @@ func updateRepoIndexer(repoID int64) error { | |||||||
| 	batch := RepoIndexerBatch() | 	batch := RepoIndexerBatch() | ||||||
| 	for _, update := range changes.Updates { | 	for _, update := range changes.Updates { | ||||||
| 		if err := addUpdate(update, repo, batch); err != nil { | 		if err := addUpdate(update, repo, batch); err != nil { | ||||||
| 			return err | 			return fmt.Errorf("UpdateRepoIndexer: Unable to addUpdate to: %s/%s Sha: %s, update: %s(%s) Error: %v", repo.MustOwnerName(), repo.Name, sha, update.Filename, update.BlobSha, err) | ||||||
| 		} | 		} | ||||||
| 	} | 	} | ||||||
| 	for _, filename := range changes.RemovedFilenames { | 	for _, filename := range changes.RemovedFilenames { | ||||||
| 		if err := addDelete(filename, repo, batch); err != nil { | 		if err := addDelete(filename, repo, batch); err != nil { | ||||||
| 			return err | 			return fmt.Errorf("UpdateRepoIndexer: Unable to addDelete to: %s/%s Sha: %s, filename: %s Error: %v", repo.MustOwnerName(), repo.Name, sha, filename, err) | ||||||
| 		} | 		} | ||||||
| 	} | 	} | ||||||
| 	if err = batch.Flush(); err != nil { | 	if err = batch.Flush(); err != nil { | ||||||
| 		return err | 		return fmt.Errorf("UpdateRepoIndexer: Unable to flush batch to indexer for repo: %s/%s Error: %v", repo.MustOwnerName(), repo.Name, err) | ||||||
| 	} | 	} | ||||||
| 	return repo.UpdateIndexerStatus(sha) | 	return repo.UpdateIndexerStatus(sha) | ||||||
| } | } | ||||||
| @@ -322,20 +332,26 @@ func nonGenesisChanges(repo *models.Repository, revision string) (*repoChanges, | |||||||
|  |  | ||||||
| func processRepoIndexerOperationQueue() { | func processRepoIndexerOperationQueue() { | ||||||
| 	for { | 	for { | ||||||
| 		op := <-repoIndexerOperationQueue | 		select { | ||||||
| 		var err error | 		case op := <-repoIndexerOperationQueue: | ||||||
| 		if op.deleted { | 			var err error | ||||||
| 			if err = deleteRepoFromIndexer(op.repoID); err != nil { | 			if op.deleted { | ||||||
| 				log.Error("deleteRepoFromIndexer: %v", err) | 				if err = deleteRepoFromIndexer(op.repoID); err != nil { | ||||||
|  | 					log.Error("DeleteRepoFromIndexer: %v", err) | ||||||
|  | 				} | ||||||
|  | 			} else { | ||||||
|  | 				if err = updateRepoIndexer(op.repoID); err != nil { | ||||||
|  | 					log.Error("updateRepoIndexer: %v", err) | ||||||
|  | 				} | ||||||
| 			} | 			} | ||||||
| 		} else { | 			for _, watcher := range op.watchers { | ||||||
| 			if err = updateRepoIndexer(op.repoID); err != nil { | 				watcher <- err | ||||||
| 				log.Error("updateRepoIndexer: %v", err) |  | ||||||
| 			} | 			} | ||||||
|  | 		case <-graceful.GetManager().IsShutdown(): | ||||||
|  | 			log.Info("PID: %d Repository indexer queue processing stopped", os.Getpid()) | ||||||
|  | 			return | ||||||
| 		} | 		} | ||||||
| 		for _, watcher := range op.watchers { |  | ||||||
| 			watcher <- err |  | ||||||
| 		} |  | ||||||
| 	} | 	} | ||||||
| } | } | ||||||
|  |  | ||||||
|   | |||||||
| @@ -5,9 +5,13 @@ | |||||||
| package code | package code | ||||||
|  |  | ||||||
| import ( | import ( | ||||||
|  | 	"context" | ||||||
|  | 	"os" | ||||||
| 	"strings" | 	"strings" | ||||||
| 	"sync" | 	"sync" | ||||||
|  |  | ||||||
|  | 	"code.gitea.io/gitea/models" | ||||||
|  | 	"code.gitea.io/gitea/modules/graceful" | ||||||
| 	"code.gitea.io/gitea/modules/log" | 	"code.gitea.io/gitea/modules/log" | ||||||
| 	"code.gitea.io/gitea/modules/setting" | 	"code.gitea.io/gitea/modules/setting" | ||||||
|  |  | ||||||
| @@ -104,21 +108,50 @@ func (update RepoIndexerUpdate) AddToFlushingBatch(batch rupture.FlushingBatch) | |||||||
| func initRepoIndexer(populateIndexer func() error) { | func initRepoIndexer(populateIndexer func() error) { | ||||||
| 	indexer, err := openIndexer(setting.Indexer.RepoPath, repoIndexerLatestVersion) | 	indexer, err := openIndexer(setting.Indexer.RepoPath, repoIndexerLatestVersion) | ||||||
| 	if err != nil { | 	if err != nil { | ||||||
| 		log.Fatal("InitRepoIndexer: %v", err) | 		log.Fatal("InitRepoIndexer %s: %v", setting.Indexer.RepoPath, err) | ||||||
| 	} | 	} | ||||||
| 	if indexer != nil { | 	if indexer != nil { | ||||||
| 		indexerHolder.set(indexer) | 		indexerHolder.set(indexer) | ||||||
|  | 		closeAtTerminate() | ||||||
|  |  | ||||||
|  | 		// Continue population from where left off | ||||||
|  | 		if err = populateIndexer(); err != nil { | ||||||
|  | 			log.Fatal("PopulateRepoIndex: %v", err) | ||||||
|  | 		} | ||||||
| 		return | 		return | ||||||
| 	} | 	} | ||||||
|  |  | ||||||
| 	if err = createRepoIndexer(setting.Indexer.RepoPath, repoIndexerLatestVersion); err != nil { | 	if err = createRepoIndexer(setting.Indexer.RepoPath, repoIndexerLatestVersion); err != nil { | ||||||
| 		log.Fatal("CreateRepoIndexer: %v", err) | 		log.Fatal("CreateRepoIndexer: %v", err) | ||||||
| 	} | 	} | ||||||
|  | 	closeAtTerminate() | ||||||
|  |  | ||||||
|  | 	// if there is any existing repo indexer metadata in the DB, delete it | ||||||
|  | 	// since we are starting afresh. Also, xorm requires deletes to have a | ||||||
|  | 	// condition, and we want to delete everything, thus 1=1. | ||||||
|  | 	if err := models.DeleteAllRecords("repo_indexer_status"); err != nil { | ||||||
|  | 		log.Fatal("DeleteAllRepoIndexerStatus: %v", err) | ||||||
|  | 	} | ||||||
|  |  | ||||||
| 	if err = populateIndexer(); err != nil { | 	if err = populateIndexer(); err != nil { | ||||||
| 		log.Fatal("PopulateRepoIndex: %v", err) | 		log.Fatal("PopulateRepoIndex: %v", err) | ||||||
| 	} | 	} | ||||||
| } | } | ||||||
|  |  | ||||||
|  | func closeAtTerminate() { | ||||||
|  | 	graceful.GetManager().RunAtTerminate(context.Background(), func() { | ||||||
|  | 		log.Debug("Closing repo indexer") | ||||||
|  | 		indexer := indexerHolder.get() | ||||||
|  | 		if indexer != nil { | ||||||
|  | 			err := indexer.Close() | ||||||
|  | 			if err != nil { | ||||||
|  | 				log.Error("Error whilst closing the repository indexer: %v", err) | ||||||
|  | 			} | ||||||
|  | 		} | ||||||
|  | 		log.Info("PID: %d Repository Indexer closed", os.Getpid()) | ||||||
|  | 	}) | ||||||
|  | } | ||||||
|  |  | ||||||
| // createRepoIndexer create a repo indexer if one does not already exist | // createRepoIndexer create a repo indexer if one does not already exist | ||||||
| func createRepoIndexer(path string, latestVersion int) error { | func createRepoIndexer(path string, latestVersion int) error { | ||||||
| 	docMapping := bleve.NewDocumentMapping() | 	docMapping := bleve.NewDocumentMapping() | ||||||
|   | |||||||
| @@ -172,7 +172,7 @@ func InitIssueIndexer(syncReindex bool) { | |||||||
| 	} else if setting.Indexer.StartupTimeout > 0 { | 	} else if setting.Indexer.StartupTimeout > 0 { | ||||||
| 		go func() { | 		go func() { | ||||||
| 			timeout := setting.Indexer.StartupTimeout | 			timeout := setting.Indexer.StartupTimeout | ||||||
| 			if graceful.Manager.IsChild() && setting.GracefulHammerTime > 0 { | 			if graceful.GetManager().IsChild() && setting.GracefulHammerTime > 0 { | ||||||
| 				timeout += setting.GracefulHammerTime | 				timeout += setting.GracefulHammerTime | ||||||
| 			} | 			} | ||||||
| 			select { | 			select { | ||||||
|   | |||||||
| @@ -5,21 +5,28 @@ | |||||||
| package migrations | package migrations | ||||||
|  |  | ||||||
| import ( | import ( | ||||||
|  | 	"context" | ||||||
|  |  | ||||||
| 	"code.gitea.io/gitea/models" | 	"code.gitea.io/gitea/models" | ||||||
| 	"code.gitea.io/gitea/modules/log" | 	"code.gitea.io/gitea/modules/log" | ||||||
| 	"code.gitea.io/gitea/modules/structs" | 	"code.gitea.io/gitea/modules/structs" | ||||||
| ) | ) | ||||||
|  |  | ||||||
| // UpdateMigrationPosterID updates all migrated repositories' issues and comments posterID | // UpdateMigrationPosterID updates all migrated repositories' issues and comments posterID | ||||||
| func UpdateMigrationPosterID() { | func UpdateMigrationPosterID(ctx context.Context) { | ||||||
| 	for _, gitService := range structs.SupportedFullGitService { | 	for _, gitService := range structs.SupportedFullGitService { | ||||||
| 		if err := updateMigrationPosterIDByGitService(gitService); err != nil { | 		select { | ||||||
|  | 		case <-ctx.Done(): | ||||||
|  | 			log.Warn("UpdateMigrationPosterID aborted due to shutdown before %s", gitService.Name()) | ||||||
|  | 		default: | ||||||
|  | 		} | ||||||
|  | 		if err := updateMigrationPosterIDByGitService(ctx, gitService); err != nil { | ||||||
| 			log.Error("updateMigrationPosterIDByGitService failed: %v", err) | 			log.Error("updateMigrationPosterIDByGitService failed: %v", err) | ||||||
| 		} | 		} | ||||||
| 	} | 	} | ||||||
| } | } | ||||||
|  |  | ||||||
| func updateMigrationPosterIDByGitService(tp structs.GitServiceType) error { | func updateMigrationPosterIDByGitService(ctx context.Context, tp structs.GitServiceType) error { | ||||||
| 	provider := tp.Name() | 	provider := tp.Name() | ||||||
| 	if len(provider) == 0 { | 	if len(provider) == 0 { | ||||||
| 		return nil | 		return nil | ||||||
| @@ -28,6 +35,13 @@ func updateMigrationPosterIDByGitService(tp structs.GitServiceType) error { | |||||||
| 	const batchSize = 100 | 	const batchSize = 100 | ||||||
| 	var start int | 	var start int | ||||||
| 	for { | 	for { | ||||||
|  | 		select { | ||||||
|  | 		case <-ctx.Done(): | ||||||
|  | 			log.Warn("UpdateMigrationPosterIDByGitService(%s) aborted due to shutdown", tp.Name()) | ||||||
|  | 			return nil | ||||||
|  | 		default: | ||||||
|  | 		} | ||||||
|  |  | ||||||
| 		users, err := models.FindExternalUsersByProvider(models.FindExternalUserOptions{ | 		users, err := models.FindExternalUsersByProvider(models.FindExternalUserOptions{ | ||||||
| 			Provider: provider, | 			Provider: provider, | ||||||
| 			Start:    start, | 			Start:    start, | ||||||
| @@ -38,6 +52,12 @@ func updateMigrationPosterIDByGitService(tp structs.GitServiceType) error { | |||||||
| 		} | 		} | ||||||
|  |  | ||||||
| 		for _, user := range users { | 		for _, user := range users { | ||||||
|  | 			select { | ||||||
|  | 			case <-ctx.Done(): | ||||||
|  | 				log.Warn("UpdateMigrationPosterIDByGitService(%s) aborted due to shutdown", tp.Name()) | ||||||
|  | 				return nil | ||||||
|  | 			default: | ||||||
|  | 			} | ||||||
| 			externalUserID := user.ExternalID | 			externalUserID := user.ExternalID | ||||||
| 			if err := models.UpdateMigrationsByType(tp, externalUserID, user.UserID); err != nil { | 			if err := models.UpdateMigrationsByType(tp, externalUserID, user.UserID); err != nil { | ||||||
| 				log.Error("UpdateMigrationsByType type %s external user id %v to local user id %v failed: %v", tp.Name(), user.ExternalID, user.UserID, err) | 				log.Error("UpdateMigrationsByType type %s external user id %v to local user id %v failed: %v", tp.Name(), user.ExternalID, user.UserID, err) | ||||||
|   | |||||||
| @@ -24,5 +24,5 @@ func listen(server *ssh.Server) { | |||||||
|  |  | ||||||
| // Unused informs our cleanup routine that we will not be using a ssh port | // Unused informs our cleanup routine that we will not be using a ssh port | ||||||
| func Unused() { | func Unused() { | ||||||
| 	graceful.Manager.InformCleanup() | 	graceful.GetManager().InformCleanup() | ||||||
| } | } | ||||||
|   | |||||||
| @@ -1,4 +1,5 @@ | |||||||
| // Copyright 2016 The Gogs Authors. All rights reserved. | // Copyright 2016 The Gogs Authors. All rights reserved. | ||||||
|  | // Copyright 2019 The Gitea Authors. All rights reserved. | ||||||
| // Use of this source code is governed by a MIT-style | // Use of this source code is governed by a MIT-style | ||||||
| // license that can be found in the LICENSE file. | // license that can be found in the LICENSE file. | ||||||
|  |  | ||||||
| @@ -15,8 +16,9 @@ import ( | |||||||
| // This queue is particularly useful for preventing duplicated task | // This queue is particularly useful for preventing duplicated task | ||||||
| // of same purpose. | // of same purpose. | ||||||
| type UniqueQueue struct { | type UniqueQueue struct { | ||||||
| 	table *StatusTable | 	table  *StatusTable | ||||||
| 	queue chan string | 	queue  chan string | ||||||
|  | 	closed chan struct{} | ||||||
| } | } | ||||||
|  |  | ||||||
| // NewUniqueQueue initializes and returns a new UniqueQueue object. | // NewUniqueQueue initializes and returns a new UniqueQueue object. | ||||||
| @@ -26,11 +28,43 @@ func NewUniqueQueue(queueLength int) *UniqueQueue { | |||||||
| 	} | 	} | ||||||
|  |  | ||||||
| 	return &UniqueQueue{ | 	return &UniqueQueue{ | ||||||
| 		table: NewStatusTable(), | 		table:  NewStatusTable(), | ||||||
| 		queue: make(chan string, queueLength), | 		queue:  make(chan string, queueLength), | ||||||
|  | 		closed: make(chan struct{}), | ||||||
| 	} | 	} | ||||||
| } | } | ||||||
|  |  | ||||||
|  | // Close closes this queue | ||||||
|  | func (q *UniqueQueue) Close() { | ||||||
|  | 	select { | ||||||
|  | 	case <-q.closed: | ||||||
|  | 	default: | ||||||
|  | 		q.table.lock.Lock() | ||||||
|  | 		select { | ||||||
|  | 		case <-q.closed: | ||||||
|  | 		default: | ||||||
|  | 			close(q.closed) | ||||||
|  | 		} | ||||||
|  | 		q.table.lock.Unlock() | ||||||
|  | 	} | ||||||
|  | } | ||||||
|  |  | ||||||
|  | // IsClosed returns a channel that is closed when this Queue is closed | ||||||
|  | func (q *UniqueQueue) IsClosed() <-chan struct{} { | ||||||
|  | 	return q.closed | ||||||
|  | } | ||||||
|  |  | ||||||
|  | // IDs returns the current ids in the pool | ||||||
|  | func (q *UniqueQueue) IDs() []interface{} { | ||||||
|  | 	q.table.lock.Lock() | ||||||
|  | 	defer q.table.lock.Unlock() | ||||||
|  | 	ids := make([]interface{}, 0, len(q.table.pool)) | ||||||
|  | 	for id := range q.table.pool { | ||||||
|  | 		ids = append(ids, id) | ||||||
|  | 	} | ||||||
|  | 	return ids | ||||||
|  | } | ||||||
|  |  | ||||||
| // Queue returns channel of queue for retrieving instances. | // Queue returns channel of queue for retrieving instances. | ||||||
| func (q *UniqueQueue) Queue() <-chan string { | func (q *UniqueQueue) Queue() <-chan string { | ||||||
| 	return q.queue | 	return q.queue | ||||||
| @@ -45,18 +79,22 @@ func (q *UniqueQueue) Exist(id interface{}) bool { | |||||||
| // AddFunc adds new instance to the queue with a custom runnable function, | // AddFunc adds new instance to the queue with a custom runnable function, | ||||||
| // the queue is blocked until the function exits. | // the queue is blocked until the function exits. | ||||||
| func (q *UniqueQueue) AddFunc(id interface{}, fn func()) { | func (q *UniqueQueue) AddFunc(id interface{}, fn func()) { | ||||||
| 	if q.Exist(id) { |  | ||||||
| 		return |  | ||||||
| 	} |  | ||||||
|  |  | ||||||
| 	idStr := com.ToStr(id) | 	idStr := com.ToStr(id) | ||||||
| 	q.table.lock.Lock() | 	q.table.lock.Lock() | ||||||
|  | 	if _, ok := q.table.pool[idStr]; ok { | ||||||
|  | 		return | ||||||
|  | 	} | ||||||
| 	q.table.pool[idStr] = struct{}{} | 	q.table.pool[idStr] = struct{}{} | ||||||
| 	if fn != nil { | 	if fn != nil { | ||||||
| 		fn() | 		fn() | ||||||
| 	} | 	} | ||||||
| 	q.table.lock.Unlock() | 	q.table.lock.Unlock() | ||||||
| 	q.queue <- idStr | 	select { | ||||||
|  | 	case <-q.closed: | ||||||
|  | 		return | ||||||
|  | 	case q.queue <- idStr: | ||||||
|  | 		return | ||||||
|  | 	} | ||||||
| } | } | ||||||
|  |  | ||||||
| // Add adds new instance to the queue. | // Add adds new instance to the queue. | ||||||
|   | |||||||
| @@ -5,6 +5,7 @@ | |||||||
| package webhook | package webhook | ||||||
|  |  | ||||||
| import ( | import ( | ||||||
|  | 	"context" | ||||||
| 	"crypto/tls" | 	"crypto/tls" | ||||||
| 	"fmt" | 	"fmt" | ||||||
| 	"io/ioutil" | 	"io/ioutil" | ||||||
| @@ -16,6 +17,7 @@ import ( | |||||||
| 	"time" | 	"time" | ||||||
|  |  | ||||||
| 	"code.gitea.io/gitea/models" | 	"code.gitea.io/gitea/models" | ||||||
|  | 	"code.gitea.io/gitea/modules/graceful" | ||||||
| 	"code.gitea.io/gitea/modules/log" | 	"code.gitea.io/gitea/modules/log" | ||||||
| 	"code.gitea.io/gitea/modules/setting" | 	"code.gitea.io/gitea/modules/setting" | ||||||
| 	"github.com/gobwas/glob" | 	"github.com/gobwas/glob" | ||||||
| @@ -145,8 +147,14 @@ func Deliver(t *models.HookTask) error { | |||||||
| } | } | ||||||
|  |  | ||||||
| // DeliverHooks checks and delivers undelivered hooks. | // DeliverHooks checks and delivers undelivered hooks. | ||||||
| // TODO: shoot more hooks at same time. | // FIXME: graceful: This would likely benefit from either a worker pool with dummy queue | ||||||
| func DeliverHooks() { | // or a full queue. Then more hooks could be sent at same time. | ||||||
|  | func DeliverHooks(ctx context.Context) { | ||||||
|  | 	select { | ||||||
|  | 	case <-ctx.Done(): | ||||||
|  | 		return | ||||||
|  | 	default: | ||||||
|  | 	} | ||||||
| 	tasks, err := models.FindUndeliveredHookTasks() | 	tasks, err := models.FindUndeliveredHookTasks() | ||||||
| 	if err != nil { | 	if err != nil { | ||||||
| 		log.Error("DeliverHooks: %v", err) | 		log.Error("DeliverHooks: %v", err) | ||||||
| @@ -155,33 +163,50 @@ func DeliverHooks() { | |||||||
|  |  | ||||||
| 	// Update hook task status. | 	// Update hook task status. | ||||||
| 	for _, t := range tasks { | 	for _, t := range tasks { | ||||||
|  | 		select { | ||||||
|  | 		case <-ctx.Done(): | ||||||
|  | 			return | ||||||
|  | 		default: | ||||||
|  | 		} | ||||||
| 		if err = Deliver(t); err != nil { | 		if err = Deliver(t); err != nil { | ||||||
| 			log.Error("deliver: %v", err) | 			log.Error("deliver: %v", err) | ||||||
| 		} | 		} | ||||||
| 	} | 	} | ||||||
|  |  | ||||||
| 	// Start listening on new hook requests. | 	// Start listening on new hook requests. | ||||||
| 	for repoIDStr := range hookQueue.Queue() { | 	for { | ||||||
| 		log.Trace("DeliverHooks [repo_id: %v]", repoIDStr) | 		select { | ||||||
| 		hookQueue.Remove(repoIDStr) | 		case <-ctx.Done(): | ||||||
|  | 			hookQueue.Close() | ||||||
|  | 			return | ||||||
|  | 		case repoIDStr := <-hookQueue.Queue(): | ||||||
|  | 			log.Trace("DeliverHooks [repo_id: %v]", repoIDStr) | ||||||
|  | 			hookQueue.Remove(repoIDStr) | ||||||
|  |  | ||||||
| 		repoID, err := com.StrTo(repoIDStr).Int64() | 			repoID, err := com.StrTo(repoIDStr).Int64() | ||||||
| 		if err != nil { | 			if err != nil { | ||||||
| 			log.Error("Invalid repo ID: %s", repoIDStr) | 				log.Error("Invalid repo ID: %s", repoIDStr) | ||||||
| 			continue | 				continue | ||||||
| 		} | 			} | ||||||
|  |  | ||||||
| 		tasks, err := models.FindRepoUndeliveredHookTasks(repoID) | 			tasks, err := models.FindRepoUndeliveredHookTasks(repoID) | ||||||
| 		if err != nil { | 			if err != nil { | ||||||
| 			log.Error("Get repository [%d] hook tasks: %v", repoID, err) | 				log.Error("Get repository [%d] hook tasks: %v", repoID, err) | ||||||
| 			continue | 				continue | ||||||
| 		} | 			} | ||||||
| 		for _, t := range tasks { | 			for _, t := range tasks { | ||||||
| 			if err = Deliver(t); err != nil { | 				select { | ||||||
| 				log.Error("deliver: %v", err) | 				case <-ctx.Done(): | ||||||
|  | 					return | ||||||
|  | 				default: | ||||||
|  | 				} | ||||||
|  | 				if err = Deliver(t); err != nil { | ||||||
|  | 					log.Error("deliver: %v", err) | ||||||
|  | 				} | ||||||
| 			} | 			} | ||||||
| 		} | 		} | ||||||
| 	} | 	} | ||||||
|  |  | ||||||
| } | } | ||||||
|  |  | ||||||
| var ( | var ( | ||||||
| @@ -234,5 +259,5 @@ func InitDeliverHooks() { | |||||||
| 		}, | 		}, | ||||||
| 	} | 	} | ||||||
|  |  | ||||||
| 	go DeliverHooks() | 	go graceful.GetManager().RunWithShutdownContext(DeliverHooks) | ||||||
| } | } | ||||||
|   | |||||||
| @@ -19,6 +19,7 @@ import ( | |||||||
| 	"code.gitea.io/gitea/modules/context" | 	"code.gitea.io/gitea/modules/context" | ||||||
| 	"code.gitea.io/gitea/modules/cron" | 	"code.gitea.io/gitea/modules/cron" | ||||||
| 	"code.gitea.io/gitea/modules/git" | 	"code.gitea.io/gitea/modules/git" | ||||||
|  | 	"code.gitea.io/gitea/modules/graceful" | ||||||
| 	"code.gitea.io/gitea/modules/log" | 	"code.gitea.io/gitea/modules/log" | ||||||
| 	"code.gitea.io/gitea/modules/process" | 	"code.gitea.io/gitea/modules/process" | ||||||
| 	"code.gitea.io/gitea/modules/setting" | 	"code.gitea.io/gitea/modules/setting" | ||||||
| @@ -171,10 +172,10 @@ func Dashboard(ctx *context.Context) { | |||||||
| 			err = models.ReinitMissingRepositories() | 			err = models.ReinitMissingRepositories() | ||||||
| 		case syncExternalUsers: | 		case syncExternalUsers: | ||||||
| 			success = ctx.Tr("admin.dashboard.sync_external_users_started") | 			success = ctx.Tr("admin.dashboard.sync_external_users_started") | ||||||
| 			go models.SyncExternalUsers() | 			go graceful.GetManager().RunWithShutdownContext(models.SyncExternalUsers) | ||||||
| 		case gitFsck: | 		case gitFsck: | ||||||
| 			success = ctx.Tr("admin.dashboard.git_fsck_started") | 			success = ctx.Tr("admin.dashboard.git_fsck_started") | ||||||
| 			go models.GitFsck() | 			go graceful.GetManager().RunWithShutdownContext(models.GitFsck) | ||||||
| 		case deleteGeneratedRepositoryAvatars: | 		case deleteGeneratedRepositoryAvatars: | ||||||
| 			success = ctx.Tr("admin.dashboard.delete_generated_repository_avatars_success") | 			success = ctx.Tr("admin.dashboard.delete_generated_repository_avatars_success") | ||||||
| 			err = models.RemoveRandomAvatars() | 			err = models.RemoveRandomAvatars() | ||||||
|   | |||||||
| @@ -5,6 +5,7 @@ | |||||||
| package routers | package routers | ||||||
|  |  | ||||||
| import ( | import ( | ||||||
|  | 	"context" | ||||||
| 	"strings" | 	"strings" | ||||||
| 	"time" | 	"time" | ||||||
|  |  | ||||||
| @@ -53,11 +54,11 @@ func NewServices() { | |||||||
| } | } | ||||||
|  |  | ||||||
| // In case of problems connecting to DB, retry connection. Eg, PGSQL in Docker Container on Synology | // In case of problems connecting to DB, retry connection. Eg, PGSQL in Docker Container on Synology | ||||||
| func initDBEngine() (err error) { | func initDBEngine(ctx context.Context) (err error) { | ||||||
| 	log.Info("Beginning ORM engine initialization.") | 	log.Info("Beginning ORM engine initialization.") | ||||||
| 	for i := 0; i < setting.Database.DBConnectRetries; i++ { | 	for i := 0; i < setting.Database.DBConnectRetries; i++ { | ||||||
| 		log.Info("ORM engine initialization attempt #%d/%d...", i+1, setting.Database.DBConnectRetries) | 		log.Info("ORM engine initialization attempt #%d/%d...", i+1, setting.Database.DBConnectRetries) | ||||||
| 		if err = models.NewEngine(migrations.Migrate); err == nil { | 		if err = models.NewEngine(ctx, migrations.Migrate); err == nil { | ||||||
| 			break | 			break | ||||||
| 		} else if i == setting.Database.DBConnectRetries-1 { | 		} else if i == setting.Database.DBConnectRetries-1 { | ||||||
| 			return err | 			return err | ||||||
| @@ -71,9 +72,9 @@ func initDBEngine() (err error) { | |||||||
| } | } | ||||||
|  |  | ||||||
| // GlobalInit is for global configuration reload-able. | // GlobalInit is for global configuration reload-able. | ||||||
| func GlobalInit() { | func GlobalInit(ctx context.Context) { | ||||||
| 	setting.NewContext() | 	setting.NewContext() | ||||||
| 	if err := git.Init(); err != nil { | 	if err := git.Init(ctx); err != nil { | ||||||
| 		log.Fatal("Git module init failed: %v", err) | 		log.Fatal("Git module init failed: %v", err) | ||||||
| 	} | 	} | ||||||
| 	setting.CheckLFSVersion() | 	setting.CheckLFSVersion() | ||||||
| @@ -88,7 +89,7 @@ func GlobalInit() { | |||||||
| 		highlight.NewContext() | 		highlight.NewContext() | ||||||
| 		external.RegisterParsers() | 		external.RegisterParsers() | ||||||
| 		markup.Init() | 		markup.Init() | ||||||
| 		if err := initDBEngine(); err == nil { | 		if err := initDBEngine(ctx); err == nil { | ||||||
| 			log.Info("ORM engine initialization successful!") | 			log.Info("ORM engine initialization successful!") | ||||||
| 		} else { | 		} else { | ||||||
| 			log.Fatal("ORM engine initialization failed: %v", err) | 			log.Fatal("ORM engine initialization failed: %v", err) | ||||||
|   | |||||||
| @@ -16,6 +16,7 @@ import ( | |||||||
| 	"code.gitea.io/gitea/modules/base" | 	"code.gitea.io/gitea/modules/base" | ||||||
| 	"code.gitea.io/gitea/modules/context" | 	"code.gitea.io/gitea/modules/context" | ||||||
| 	"code.gitea.io/gitea/modules/generate" | 	"code.gitea.io/gitea/modules/generate" | ||||||
|  | 	"code.gitea.io/gitea/modules/graceful" | ||||||
| 	"code.gitea.io/gitea/modules/log" | 	"code.gitea.io/gitea/modules/log" | ||||||
| 	"code.gitea.io/gitea/modules/setting" | 	"code.gitea.io/gitea/modules/setting" | ||||||
| 	"code.gitea.io/gitea/modules/user" | 	"code.gitea.io/gitea/modules/user" | ||||||
| @@ -351,7 +352,7 @@ func InstallPost(ctx *context.Context, form auth.InstallForm) { | |||||||
| 		return | 		return | ||||||
| 	} | 	} | ||||||
|  |  | ||||||
| 	GlobalInit() | 	GlobalInit(graceful.GetManager().HammerContext()) | ||||||
|  |  | ||||||
| 	// Create admin account | 	// Create admin account | ||||||
| 	if len(form.AdminName) > 0 { | 	if len(form.AdminName) > 0 { | ||||||
|   | |||||||
| @@ -5,11 +5,14 @@ | |||||||
| package mirror | package mirror | ||||||
|  |  | ||||||
| import ( | import ( | ||||||
|  | 	"context" | ||||||
| 	"fmt" | 	"fmt" | ||||||
| 	"net/url" | 	"net/url" | ||||||
| 	"strings" | 	"strings" | ||||||
| 	"time" | 	"time" | ||||||
|  |  | ||||||
|  | 	"code.gitea.io/gitea/modules/graceful" | ||||||
|  |  | ||||||
| 	"code.gitea.io/gitea/models" | 	"code.gitea.io/gitea/models" | ||||||
| 	"code.gitea.io/gitea/modules/cache" | 	"code.gitea.io/gitea/modules/cache" | ||||||
| 	"code.gitea.io/gitea/modules/git" | 	"code.gitea.io/gitea/modules/git" | ||||||
| @@ -294,29 +297,38 @@ func Password(m *models.Mirror) string { | |||||||
| } | } | ||||||
|  |  | ||||||
| // Update checks and updates mirror repositories. | // Update checks and updates mirror repositories. | ||||||
| func Update() { | func Update(ctx context.Context) { | ||||||
| 	log.Trace("Doing: Update") | 	log.Trace("Doing: Update") | ||||||
|  |  | ||||||
| 	if err := models.MirrorsIterate(func(idx int, bean interface{}) error { | 	if err := models.MirrorsIterate(func(idx int, bean interface{}) error { | ||||||
| 		m := bean.(*models.Mirror) | 		m := bean.(*models.Mirror) | ||||||
| 		if m.Repo == nil { | 		if m.Repo == nil { | ||||||
| 			log.Error("Disconnected mirror repository found: %d", m.ID) | 			log.Error("Disconnected mirror repository found: %d", m.ID) | ||||||
| 			return nil | 			return nil | ||||||
| 		} | 		} | ||||||
|  | 		select { | ||||||
| 		mirrorQueue.Add(m.RepoID) | 		case <-ctx.Done(): | ||||||
| 		return nil | 			return fmt.Errorf("Aborted due to shutdown") | ||||||
|  | 		default: | ||||||
|  | 			mirrorQueue.Add(m.RepoID) | ||||||
|  | 			return nil | ||||||
|  | 		} | ||||||
| 	}); err != nil { | 	}); err != nil { | ||||||
| 		log.Error("Update: %v", err) | 		log.Error("Update: %v", err) | ||||||
| 	} | 	} | ||||||
| } | } | ||||||
|  |  | ||||||
| // SyncMirrors checks and syncs mirrors. | // SyncMirrors checks and syncs mirrors. | ||||||
| // TODO: sync more mirrors at same time. | // FIXME: graceful: this should be a persistable queue | ||||||
| func SyncMirrors() { | func SyncMirrors(ctx context.Context) { | ||||||
| 	// Start listening on new sync requests. | 	// Start listening on new sync requests. | ||||||
| 	for repoID := range mirrorQueue.Queue() { | 	for { | ||||||
| 		syncMirror(repoID) | 		select { | ||||||
|  | 		case <-ctx.Done(): | ||||||
|  | 			mirrorQueue.Close() | ||||||
|  | 			return | ||||||
|  | 		case repoID := <-mirrorQueue.Queue(): | ||||||
|  | 			syncMirror(repoID) | ||||||
|  | 		} | ||||||
| 	} | 	} | ||||||
| } | } | ||||||
|  |  | ||||||
| @@ -416,7 +428,7 @@ func syncMirror(repoID string) { | |||||||
|  |  | ||||||
| // InitSyncMirrors initializes a go routine to sync the mirrors | // InitSyncMirrors initializes a go routine to sync the mirrors | ||||||
| func InitSyncMirrors() { | func InitSyncMirrors() { | ||||||
| 	go SyncMirrors() | 	go graceful.GetManager().RunWithShutdownContext(SyncMirrors) | ||||||
| } | } | ||||||
|  |  | ||||||
| // StartToMirror adds repoID to mirror queue | // StartToMirror adds repoID to mirror queue | ||||||
|   | |||||||
| @@ -6,6 +6,7 @@ | |||||||
| package pull | package pull | ||||||
|  |  | ||||||
| import ( | import ( | ||||||
|  | 	"context" | ||||||
| 	"fmt" | 	"fmt" | ||||||
| 	"io/ioutil" | 	"io/ioutil" | ||||||
| 	"os" | 	"os" | ||||||
| @@ -16,6 +17,7 @@ import ( | |||||||
|  |  | ||||||
| 	"code.gitea.io/gitea/models" | 	"code.gitea.io/gitea/models" | ||||||
| 	"code.gitea.io/gitea/modules/git" | 	"code.gitea.io/gitea/modules/git" | ||||||
|  | 	"code.gitea.io/gitea/modules/graceful" | ||||||
| 	"code.gitea.io/gitea/modules/log" | 	"code.gitea.io/gitea/modules/log" | ||||||
| 	"code.gitea.io/gitea/modules/setting" | 	"code.gitea.io/gitea/modules/setting" | ||||||
| 	"code.gitea.io/gitea/modules/sync" | 	"code.gitea.io/gitea/modules/sync" | ||||||
| @@ -151,65 +153,53 @@ func manuallyMerged(pr *models.PullRequest) bool { | |||||||
|  |  | ||||||
| // TestPullRequests checks and tests untested patches of pull requests. | // TestPullRequests checks and tests untested patches of pull requests. | ||||||
| // TODO: test more pull requests at same time. | // TODO: test more pull requests at same time. | ||||||
| func TestPullRequests() { | func TestPullRequests(ctx context.Context) { | ||||||
| 	prs, err := models.GetPullRequestsByCheckStatus(models.PullRequestStatusChecking) |  | ||||||
| 	if err != nil { |  | ||||||
| 		log.Error("Find Checking PRs: %v", err) |  | ||||||
| 		return |  | ||||||
| 	} |  | ||||||
|  |  | ||||||
| 	var checkedPRs = make(map[int64]struct{}) | 	go func() { | ||||||
|  | 		prs, err := models.GetPullRequestIDsByCheckStatus(models.PullRequestStatusChecking) | ||||||
| 	// Update pull request status. | 		if err != nil { | ||||||
| 	for _, pr := range prs { | 			log.Error("Find Checking PRs: %v", err) | ||||||
| 		checkedPRs[pr.ID] = struct{}{} | 			return | ||||||
| 		if err := pr.GetBaseRepo(); err != nil { |  | ||||||
| 			log.Error("GetBaseRepo: %v", err) |  | ||||||
| 			continue |  | ||||||
| 		} | 		} | ||||||
| 		if manuallyMerged(pr) { | 		for _, prID := range prs { | ||||||
| 			continue | 			select { | ||||||
|  | 			case <-ctx.Done(): | ||||||
|  | 				return | ||||||
|  | 			default: | ||||||
|  | 				pullRequestQueue.Add(prID) | ||||||
|  | 			} | ||||||
| 		} | 		} | ||||||
| 		if err := TestPatch(pr); err != nil { | 	}() | ||||||
| 			log.Error("testPatch: %v", err) |  | ||||||
| 			continue |  | ||||||
| 		} |  | ||||||
|  |  | ||||||
| 		checkAndUpdateStatus(pr) |  | ||||||
| 	} |  | ||||||
|  |  | ||||||
| 	// Start listening on new test requests. | 	// Start listening on new test requests. | ||||||
| 	for prID := range pullRequestQueue.Queue() { | 	for { | ||||||
| 		log.Trace("TestPullRequests[%v]: processing test task", prID) | 		select { | ||||||
| 		pullRequestQueue.Remove(prID) | 		case prID := <-pullRequestQueue.Queue(): | ||||||
|  | 			log.Trace("TestPullRequests[%v]: processing test task", prID) | ||||||
|  | 			pullRequestQueue.Remove(prID) | ||||||
|  |  | ||||||
| 		id := com.StrTo(prID).MustInt64() | 			id := com.StrTo(prID).MustInt64() | ||||||
| 		if _, ok := checkedPRs[id]; ok { |  | ||||||
| 			continue |  | ||||||
| 		} |  | ||||||
|  |  | ||||||
| 		pr, err := models.GetPullRequestByID(id) | 			pr, err := models.GetPullRequestByID(id) | ||||||
| 		if err != nil { | 			if err != nil { | ||||||
| 			log.Error("GetPullRequestByID[%s]: %v", prID, err) | 				log.Error("GetPullRequestByID[%s]: %v", prID, err) | ||||||
| 			continue | 				continue | ||||||
| 		} else if manuallyMerged(pr) { | 			} else if manuallyMerged(pr) { | ||||||
| 			continue | 				continue | ||||||
|  | 			} else if err = TestPatch(pr); err != nil { | ||||||
|  | 				log.Error("testPatch[%d]: %v", pr.ID, err) | ||||||
|  | 				continue | ||||||
|  | 			} | ||||||
|  | 			checkAndUpdateStatus(pr) | ||||||
|  | 		case <-ctx.Done(): | ||||||
|  | 			pullRequestQueue.Close() | ||||||
|  | 			log.Info("PID: %d Pull Request testing shutdown", os.Getpid()) | ||||||
|  | 			return | ||||||
| 		} | 		} | ||||||
| 		pr.Status = models.PullRequestStatusChecking |  | ||||||
| 		if err := pr.Update(); err != nil { |  | ||||||
| 			log.Error("testPatch[%d]: Unable to update status to Checking Status %v", pr.ID, err) |  | ||||||
| 			continue |  | ||||||
| 		} |  | ||||||
| 		if err = TestPatch(pr); err != nil { |  | ||||||
| 			log.Error("testPatch[%d]: %v", pr.ID, err) |  | ||||||
| 			continue |  | ||||||
| 		} |  | ||||||
|  |  | ||||||
| 		checkAndUpdateStatus(pr) |  | ||||||
| 	} | 	} | ||||||
| } | } | ||||||
|  |  | ||||||
| // Init runs the task queue to test all the checking status pull requests | // Init runs the task queue to test all the checking status pull requests | ||||||
| func Init() { | func Init() { | ||||||
| 	go TestPullRequests() | 	go graceful.GetManager().RunWithShutdownContext(TestPullRequests) | ||||||
| } | } | ||||||
|   | |||||||
| @@ -5,12 +5,14 @@ | |||||||
| package pull | package pull | ||||||
|  |  | ||||||
| import ( | import ( | ||||||
|  | 	"context" | ||||||
| 	"fmt" | 	"fmt" | ||||||
| 	"os" | 	"os" | ||||||
| 	"path" | 	"path" | ||||||
|  |  | ||||||
| 	"code.gitea.io/gitea/models" | 	"code.gitea.io/gitea/models" | ||||||
| 	"code.gitea.io/gitea/modules/git" | 	"code.gitea.io/gitea/modules/git" | ||||||
|  | 	"code.gitea.io/gitea/modules/graceful" | ||||||
| 	"code.gitea.io/gitea/modules/log" | 	"code.gitea.io/gitea/modules/log" | ||||||
| 	"code.gitea.io/gitea/modules/notification" | 	"code.gitea.io/gitea/modules/notification" | ||||||
| 	issue_service "code.gitea.io/gitea/services/issue" | 	issue_service "code.gitea.io/gitea/services/issue" | ||||||
| @@ -54,6 +56,7 @@ func checkForInvalidation(requests models.PullRequestList, repoID int64, doer *m | |||||||
| 		return fmt.Errorf("git.OpenRepository: %v", err) | 		return fmt.Errorf("git.OpenRepository: %v", err) | ||||||
| 	} | 	} | ||||||
| 	go func() { | 	go func() { | ||||||
|  | 		// FIXME: graceful: We need to tell the manager we're doing something... | ||||||
| 		err := requests.InvalidateCodeComments(doer, gitRepo, branch) | 		err := requests.InvalidateCodeComments(doer, gitRepo, branch) | ||||||
| 		if err != nil { | 		if err != nil { | ||||||
| 			log.Error("PullRequestList.InvalidateCodeComments: %v", err) | 			log.Error("PullRequestList.InvalidateCodeComments: %v", err) | ||||||
| @@ -79,39 +82,45 @@ func addHeadRepoTasks(prs []*models.PullRequest) { | |||||||
| // and generate new patch for testing as needed. | // and generate new patch for testing as needed. | ||||||
| func AddTestPullRequestTask(doer *models.User, repoID int64, branch string, isSync bool) { | func AddTestPullRequestTask(doer *models.User, repoID int64, branch string, isSync bool) { | ||||||
| 	log.Trace("AddTestPullRequestTask [head_repo_id: %d, head_branch: %s]: finding pull requests", repoID, branch) | 	log.Trace("AddTestPullRequestTask [head_repo_id: %d, head_branch: %s]: finding pull requests", repoID, branch) | ||||||
| 	prs, err := models.GetUnmergedPullRequestsByHeadInfo(repoID, branch) | 	graceful.GetManager().RunWithShutdownContext(func(ctx context.Context) { | ||||||
| 	if err != nil { | 		// There is no sensible way to shut this down ":-(" | ||||||
| 		log.Error("Find pull requests [head_repo_id: %d, head_branch: %s]: %v", repoID, branch, err) | 		// If you don't let it run all the way then you will lose data | ||||||
| 		return | 		// FIXME: graceful: AddTestPullRequestTask needs to become a queue! | ||||||
| 	} |  | ||||||
|  |  | ||||||
| 	if isSync { | 		prs, err := models.GetUnmergedPullRequestsByHeadInfo(repoID, branch) | ||||||
| 		requests := models.PullRequestList(prs) | 		if err != nil { | ||||||
| 		if err = requests.LoadAttributes(); err != nil { | 			log.Error("Find pull requests [head_repo_id: %d, head_branch: %s]: %v", repoID, branch, err) | ||||||
| 			log.Error("PullRequestList.LoadAttributes: %v", err) | 			return | ||||||
| 		} | 		} | ||||||
| 		if invalidationErr := checkForInvalidation(requests, repoID, doer, branch); invalidationErr != nil { |  | ||||||
| 			log.Error("checkForInvalidation: %v", invalidationErr) | 		if isSync { | ||||||
| 		} | 			requests := models.PullRequestList(prs) | ||||||
| 		if err == nil { | 			if err = requests.LoadAttributes(); err != nil { | ||||||
| 			for _, pr := range prs { | 				log.Error("PullRequestList.LoadAttributes: %v", err) | ||||||
| 				pr.Issue.PullRequest = pr | 			} | ||||||
| 				notification.NotifyPullRequestSynchronized(doer, pr) | 			if invalidationErr := checkForInvalidation(requests, repoID, doer, branch); invalidationErr != nil { | ||||||
|  | 				log.Error("checkForInvalidation: %v", invalidationErr) | ||||||
|  | 			} | ||||||
|  | 			if err == nil { | ||||||
|  | 				for _, pr := range prs { | ||||||
|  | 					pr.Issue.PullRequest = pr | ||||||
|  | 					notification.NotifyPullRequestSynchronized(doer, pr) | ||||||
|  | 				} | ||||||
| 			} | 			} | ||||||
| 		} | 		} | ||||||
| 	} |  | ||||||
|  |  | ||||||
| 	addHeadRepoTasks(prs) | 		addHeadRepoTasks(prs) | ||||||
|  |  | ||||||
| 	log.Trace("AddTestPullRequestTask [base_repo_id: %d, base_branch: %s]: finding pull requests", repoID, branch) | 		log.Trace("AddTestPullRequestTask [base_repo_id: %d, base_branch: %s]: finding pull requests", repoID, branch) | ||||||
| 	prs, err = models.GetUnmergedPullRequestsByBaseInfo(repoID, branch) | 		prs, err = models.GetUnmergedPullRequestsByBaseInfo(repoID, branch) | ||||||
| 	if err != nil { | 		if err != nil { | ||||||
| 		log.Error("Find pull requests [base_repo_id: %d, base_branch: %s]: %v", repoID, branch, err) | 			log.Error("Find pull requests [base_repo_id: %d, base_branch: %s]: %v", repoID, branch, err) | ||||||
| 		return | 			return | ||||||
| 	} | 		} | ||||||
| 	for _, pr := range prs { | 		for _, pr := range prs { | ||||||
| 		AddToTaskQueue(pr) | 			AddToTaskQueue(pr) | ||||||
| 	} | 		} | ||||||
|  | 	}) | ||||||
| } | } | ||||||
|  |  | ||||||
| // PushToBaseRepo pushes commits from branches of head repository to | // PushToBaseRepo pushes commits from branches of head repository to | ||||||
|   | |||||||
		Reference in New Issue
	
	Block a user