diff --git a/models/models.go b/models/models.go index 3b3d8ec30..f746f680a 100644 --- a/models/models.go +++ b/models/models.go @@ -368,3 +368,9 @@ func DumpDatabase(filePath string, dbType string) error { } return x.DumpTablesToFile(tbs, filePath) } + +// MaxBatchInsertSize returns the table's max batch insert size +func MaxBatchInsertSize(bean interface{}) int { + t := x.TableInfo(bean) + return 999 / len(t.ColumnsSeq()) +} diff --git a/modules/migrations/base/uploader.go b/modules/migrations/base/uploader.go index 9d2fd2af6..8c1d64922 100644 --- a/modules/migrations/base/uploader.go +++ b/modules/migrations/base/uploader.go @@ -7,6 +7,7 @@ package base // Uploader uploads all the informations of one repository type Uploader interface { + MaxBatchInsertSize(tp string) int CreateRepo(repo *Repository, opts MigrateOptions) error CreateMilestones(milestones ...*Milestone) error CreateReleases(releases ...*Release) error diff --git a/modules/migrations/gitea.go b/modules/migrations/gitea.go index bfc5e4903..1df824c94 100644 --- a/modules/migrations/gitea.go +++ b/modules/migrations/gitea.go @@ -53,6 +53,25 @@ func NewGiteaLocalUploader(doer *models.User, repoOwner, repoName string) *Gitea } } +// MaxBatchInsertSize returns the table's max batch insert size +func (g *GiteaLocalUploader) MaxBatchInsertSize(tp string) int { + switch tp { + case "issue": + return models.MaxBatchInsertSize(new(models.Issue)) + case "comment": + return models.MaxBatchInsertSize(new(models.Comment)) + case "milestone": + return models.MaxBatchInsertSize(new(models.Milestone)) + case "label": + return models.MaxBatchInsertSize(new(models.Label)) + case "release": + return models.MaxBatchInsertSize(new(models.Release)) + case "pullrequest": + return models.MaxBatchInsertSize(new(models.PullRequest)) + } + return 10 +} + // CreateRepo creates a repository func (g *GiteaLocalUploader) CreateRepo(repo *base.Repository, opts base.MigrateOptions) error { owner, err := models.GetUserByName(g.repoOwner) diff --git a/modules/migrations/migrate.go b/modules/migrations/migrate.go index ce8f9b802..5adf7f805 100644 --- a/modules/migrations/migrate.go +++ b/modules/migrations/migrate.go @@ -91,8 +91,16 @@ func migrateRepository(downloader base.Downloader, uploader base.Uploader, opts return err } - if err := uploader.CreateMilestones(milestones...); err != nil { - return err + msBatchSize := uploader.MaxBatchInsertSize("milestone") + for len(milestones) > 0 { + if len(milestones) < msBatchSize { + msBatchSize = len(milestones) + } + + if err := uploader.CreateMilestones(milestones...); err != nil { + return err + } + milestones = milestones[msBatchSize:] } } @@ -103,8 +111,16 @@ func migrateRepository(downloader base.Downloader, uploader base.Uploader, opts return err } - if err := uploader.CreateLabels(labels...); err != nil { - return err + lbBatchSize := uploader.MaxBatchInsertSize("label") + for len(labels) > 0 { + if len(labels) < lbBatchSize { + lbBatchSize = len(labels) + } + + if err := uploader.CreateLabels(labels...); err != nil { + return err + } + labels = labels[lbBatchSize:] } } @@ -115,15 +131,27 @@ func migrateRepository(downloader base.Downloader, uploader base.Uploader, opts return err } - if err := uploader.CreateReleases(releases...); err != nil { - return err + relBatchSize := uploader.MaxBatchInsertSize("release") + for len(releases) > 0 { + if len(releases) < relBatchSize { + relBatchSize = len(releases) + } + + if err := uploader.CreateReleases(releases[:relBatchSize]...); err != nil { + return err + } + releases = releases[relBatchSize:] } } + var commentBatchSize = uploader.MaxBatchInsertSize("comment") + if opts.Issues { log.Trace("migrating issues and comments") + var issueBatchSize = uploader.MaxBatchInsertSize("issue") + for i := 1; ; i++ { - issues, isEnd, err := downloader.GetIssues(i, 100) + issues, isEnd, err := downloader.GetIssues(i, issueBatchSize) if err != nil { return err } @@ -141,7 +169,7 @@ func migrateRepository(downloader base.Downloader, uploader base.Uploader, opts continue } - var allComments = make([]*base.Comment, 0, 100) + var allComments = make([]*base.Comment, 0, commentBatchSize) for _, issue := range issues { comments, err := downloader.GetComments(issue.Number) if err != nil { @@ -154,11 +182,12 @@ func migrateRepository(downloader base.Downloader, uploader base.Uploader, opts } allComments = append(allComments, comments...) - if len(allComments) >= 100 { - if err := uploader.CreateComments(allComments...); err != nil { + if len(allComments) >= commentBatchSize { + if err := uploader.CreateComments(allComments[:commentBatchSize]...); err != nil { return err } - allComments = make([]*base.Comment, 0, 100) + + allComments = allComments[commentBatchSize:] } } @@ -176,8 +205,9 @@ func migrateRepository(downloader base.Downloader, uploader base.Uploader, opts if opts.PullRequests { log.Trace("migrating pull requests and comments") + var prBatchSize = models.MaxBatchInsertSize("pullrequest") for i := 1; ; i++ { - prs, err := downloader.GetPullRequests(i, 100) + prs, err := downloader.GetPullRequests(i, prBatchSize) if err != nil { return err } @@ -195,7 +225,7 @@ func migrateRepository(downloader base.Downloader, uploader base.Uploader, opts continue } - var allComments = make([]*base.Comment, 0, 100) + var allComments = make([]*base.Comment, 0, commentBatchSize) for _, pr := range prs { comments, err := downloader.GetComments(pr.Number) if err != nil { @@ -209,11 +239,11 @@ func migrateRepository(downloader base.Downloader, uploader base.Uploader, opts allComments = append(allComments, comments...) - if len(allComments) >= 100 { - if err := uploader.CreateComments(allComments...); err != nil { + if len(allComments) >= commentBatchSize { + if err := uploader.CreateComments(allComments[:commentBatchSize]...); err != nil { return err } - allComments = make([]*base.Comment, 0, 100) + allComments = allComments[commentBatchSize:] } } if len(allComments) > 0 { @@ -222,7 +252,7 @@ func migrateRepository(downloader base.Downloader, uploader base.Uploader, opts } } - if len(prs) < 100 { + if len(prs) < prBatchSize { break } }