Browse Source

Graceful Queues: Issue Indexing and Tasks (#9363)

* Queue: Add generic graceful queues with settings

* Queue & Setting: Add worker pool implementation

* Queue: Add worker settings

* Queue: Make resizing worker pools

* Queue: Add name variable to queues

* Queue: Add monitoring

* Queue: Improve logging

* Issues: Gracefulise the issues indexer

Remove the old now unused specific queues

* Task: Move to generic queue and gracefulise

* Issues: Standardise the issues indexer queue settings

* Fix test

* Queue: Allow Redis to connect to unix

* Prevent deadlock during early shutdown of issue indexer

* Add MaxWorker settings to queues

* Merge branch 'master' into graceful-queues

* Update modules/indexer/issues/indexer.go

Co-Authored-By: guillep2k <18600385+guillep2k@users.noreply.github.com>

* Update modules/indexer/issues/indexer.go

Co-Authored-By: guillep2k <18600385+guillep2k@users.noreply.github.com>

* Update modules/queue/queue_channel.go

Co-Authored-By: guillep2k <18600385+guillep2k@users.noreply.github.com>

* Update modules/queue/queue_disk.go

* Update modules/queue/queue_disk_channel.go

Co-Authored-By: guillep2k <18600385+guillep2k@users.noreply.github.com>

* Rename queue.Description to queue.ManagedQueue as per @guillep2k

* Cancel pool workers when removed

* Remove dependency on queue from setting

* Update modules/queue/queue_redis.go

Co-Authored-By: guillep2k <18600385+guillep2k@users.noreply.github.com>

* As per @guillep2k add mutex locks on shutdown/terminate

* move unlocking out of setInternal

* Add warning if number of workers < 0

* Small changes as per @guillep2k

* No redis host specified not found

* Clean up documentation for queues

* Update docs/content/doc/advanced/config-cheat-sheet.en-us.md

* Update modules/indexer/issues/indexer_test.go

* Ensure that persistable channel queue is added to manager

* Rename QUEUE_NAME REDIS_QUEUE_NAME

* Revert "Rename QUEUE_NAME REDIS_QUEUE_NAME"

This reverts commit 1f83b4fc9b.

Co-authored-by: guillep2k <18600385+guillep2k@users.noreply.github.com>
Co-authored-by: Lauris BH <lauris@nix.lv>
Co-authored-by: techknowlogick <matti@mdranta.net>
Co-authored-by: Lunny Xiao <xiaolunwen@gmail.com>
lunny/display_deleted_branch2
zeripath 3 years ago
committed by Antoine GIRARD
parent
commit
62eb1b0f25
  1. 33
      custom/conf/app.ini.sample
  2. 20
      docs/content/doc/advanced/config-cheat-sheet.en-us.md
  3. 9
      integrations/issue_test.go
  4. 4
      modules/indexer/issues/db.go
  5. 199
      modules/indexer/issues/indexer.go
  6. 4
      modules/indexer/issues/indexer_test.go
  7. 25
      modules/indexer/issues/queue.go
  8. 62
      modules/indexer/issues/queue_channel.go
  9. 104
      modules/indexer/issues/queue_disk.go
  10. 146
      modules/indexer/issues/queue_redis.go
  11. 270
      modules/queue/manager.go
  12. 133
      modules/queue/queue.go
  13. 106
      modules/queue/queue_channel.go
  14. 91
      modules/queue/queue_channel_test.go
  15. 213
      modules/queue/queue_disk.go
  16. 193
      modules/queue/queue_disk_channel.go
  17. 117
      modules/queue/queue_disk_channel_test.go
  18. 126
      modules/queue/queue_disk_test.go
  19. 234
      modules/queue/queue_redis.go
  20. 43
      modules/queue/queue_test.go
  21. 206
      modules/queue/queue_wrapped.go
  22. 75
      modules/queue/setting.go
  23. 325
      modules/queue/workerpool.go
  24. 159
      modules/setting/queue.go
  25. 1
      modules/setting/setting.go
  26. 27
      modules/setting/task.go
  27. 14
      modules/task/queue.go
  28. 48
      modules/task/queue_channel.go
  29. 130
      modules/task/queue_redis.go
  30. 41
      modules/task/task.go
  31. 50
      options/locale/locale_en-US.ini
  32. 127
      routers/admin/admin.go
  33. 12
      routers/routes/routes.go
  34. 28
      templates/admin/monitor.tmpl
  35. 147
      templates/admin/queue.tmpl

33
custom/conf/app.ini.sample

@ -382,6 +382,39 @@ REPO_INDEXER_INCLUDE =
; A comma separated list of glob patterns to exclude from the index; ; default is empty
REPO_INDEXER_EXCLUDE =
[queue]
; Specific queues can be individually configured with [queue.name]. [queue] provides defaults
;
; General queue queue type, currently support: persistable-channel, channel, level, redis, dummy
; default to persistable-channel
TYPE = persistable-channel
; data-dir for storing persistable queues and level queues, individual queues will be named by their type
DATADIR = queues/
; Default queue length before a channel queue will block
LENGTH = 20
; Batch size to send for batched queues
BATCH_LENGTH = 20
; Connection string for redis queues this will store the redis connection string.
CONN_STR = "addrs=127.0.0.1:6379 db=0"
; Provide the suffix of the default redis queue name - specific queues can be overriden within in their [queue.name] sections.
QUEUE_NAME = "_queue"
; If the queue cannot be created at startup - level queues may need a timeout at startup - wrap the queue:
WRAP_IF_NECESSARY = true
; Attempt to create the wrapped queue at max
MAX_ATTEMPTS = 10
; Timeout queue creation
TIMEOUT = 15m30s
; Create a pool with this many workers
WORKERS = 1
; Dynamically scale the worker pool to at this many workers
MAX_WORKERS = 10
; Add boost workers when the queue blocks for BLOCK_TIMEOUT
BLOCK_TIMEOUT = 1s
; Remove the boost workers after BOOST_TIMEOUT
BOOST_TIMEOUT = 5m
; During a boost add BOOST_WORKERS
BOOST_WORKERS = 5
[admin]
; Disallow regular (non-admin) users from creating organizations.
DISABLE_REGULAR_ORG_CREATION = false

20
docs/content/doc/advanced/config-cheat-sheet.en-us.md

@ -226,6 +226,7 @@ relation to port exhaustion.
- `ISSUE_INDEXER_TYPE`: **bleve**: Issue indexer type, currently support: bleve or db, if it's db, below issue indexer item will be invalid.
- `ISSUE_INDEXER_PATH`: **indexers/issues.bleve**: Index file used for issue search.
- The next 4 configuration values are deprecated and should be set in `queue.issue_indexer` however are kept for backwards compatibility:
- `ISSUE_INDEXER_QUEUE_TYPE`: **levelqueue**: Issue indexer queue, currently supports:`channel`, `levelqueue`, `redis`.
- `ISSUE_INDEXER_QUEUE_DIR`: **indexers/issues.queue**: When `ISSUE_INDEXER_QUEUE_TYPE` is `levelqueue`, this will be the queue will be saved path.
- `ISSUE_INDEXER_QUEUE_CONN_STR`: **addrs=127.0.0.1:6379 db=0**: When `ISSUE_INDEXER_QUEUE_TYPE` is `redis`, this will store the redis connection string.
@ -239,6 +240,24 @@ relation to port exhaustion.
- `MAX_FILE_SIZE`: **1048576**: Maximum size in bytes of files to be indexed.
- `STARTUP_TIMEOUT`: **30s**: If the indexer takes longer than this timeout to start - fail. (This timeout will be added to the hammer time above for child processes - as bleve will not start until the previous parent is shutdown.) Set to zero to never timeout.
## Queue (`queue` and `queue.*`)
- `TYPE`: **persistable-channel**: General queue type, currently support: `persistable-channel`, `channel`, `level`, `redis`, `dummy`
- `DATADIR`: **queues/**: Base DataDir for storing persistent and level queues. `DATADIR` for inidividual queues can be set in `queue.name` sections but will default to `DATADIR/`**`name`**.
- `LENGTH`: **20**: Maximal queue size before channel queues block
- `BATCH_LENGTH`: **20**: Batch data before passing to the handler
- `CONN_STR`: **addrs=127.0.0.1:6379 db=0**: Connection string for the redis queue type.
- `QUEUE_NAME`: **_queue**: The suffix for default redis queue name. Individual queues will default to **`name`**`QUEUE_NAME` but can be overriden in the specific `queue.name` section.
- `WRAP_IF_NECESSARY`: **true**: Will wrap queues with a timeoutable queue if the selected queue is not ready to be created - (Only relevant for the level queue.)
- `MAX_ATTEMPTS`: **10**: Maximum number of attempts to create the wrapped queue
- `TIMEOUT`: **GRACEFUL_HAMMER_TIME + 30s**: Timeout the creation of the wrapped queue if it takes longer than this to create.
- Queues by default come with a dynamically scaling worker pool. The following settings configure this:
- `WORKERS`: **1**: Number of initial workers for the queue.
- `MAX_WORKERS`: **10**: Maximum number of worker go-routines for the queue.
- `BLOCK_TIMEOUT`: **1s**: If the queue blocks for this time, boost the number of workers - the `BLOCK_TIMEOUT` will then be doubled before boosting again whilst the boost is ongoing.
- `BOOST_TIMEOUT`: **5m**: Boost workers will timeout after this long.
- `BOOST_WORKERS`: **5**: This many workers will be added to the worker pool if there is a boost.
## Admin (`admin`)
- `DEFAULT_EMAIL_NOTIFICATIONS`: **enabled**: Default configuration for email notifications for users (user configurable). Options: enabled, onmention, disabled
@ -614,6 +633,7 @@ You may redefine `ELEMENT`, `ALLOW_ATTR`, and `REGEXP` multiple times; each time
## Task (`task`)
- Task queue configuration has been moved to `queue.task` however, the below configuration values are kept for backwards compatibilityx:
- `QUEUE_TYPE`: **channel**: Task queue type, could be `channel` or `redis`.
- `QUEUE_LENGTH`: **1000**: Task queue length, available only when `QUEUE_TYPE` is `channel`.
- `QUEUE_CONN_STR`: **addrs=127.0.0.1:6379 db=0**: Task queue connection string, available only when `QUEUE_TYPE` is `redis`. If there redis needs a password, use `addrs=127.0.0.1:6379 password=123 db=0`.

9
integrations/issue_test.go

@ -11,8 +11,10 @@ import (
"strconv"
"strings"
"testing"
"time"
"code.gitea.io/gitea/models"
"code.gitea.io/gitea/modules/indexer/issues"
"code.gitea.io/gitea/modules/references"
"code.gitea.io/gitea/modules/setting"
"code.gitea.io/gitea/modules/test"
@ -87,7 +89,12 @@ func TestViewIssuesKeyword(t *testing.T) {
defer prepareTestEnv(t)()
repo := models.AssertExistsAndLoadBean(t, &models.Repository{ID: 1}).(*models.Repository)
issue := models.AssertExistsAndLoadBean(t, &models.Issue{
RepoID: repo.ID,
Index: 1,
}).(*models.Issue)
issues.UpdateIssueIndexer(issue)
time.Sleep(time.Second * 1)
const keyword = "first"
req := NewRequestf(t, "GET", "%s/issues?q=%s", repo.RelLink(), keyword)
resp := MakeRequest(t, req, http.StatusOK)

4
modules/indexer/issues/db.go

@ -25,6 +25,10 @@ func (db *DBIndexer) Delete(ids ...int64) error {
return nil
}
// Close dummy function
func (db *DBIndexer) Close() {
}
// Search dummy function
func (db *DBIndexer) Search(kw string, repoIDs []int64, limit, start int) (*SearchResult, error) {
total, ids, err := models.SearchIssueIDsByKeyword(kw, repoIDs, limit, start)

199
modules/indexer/issues/indexer.go

@ -5,12 +5,16 @@
package issues
import (
"context"
"fmt"
"os"
"sync"
"time"
"code.gitea.io/gitea/models"
"code.gitea.io/gitea/modules/graceful"
"code.gitea.io/gitea/modules/log"
"code.gitea.io/gitea/modules/queue"
"code.gitea.io/gitea/modules/setting"
"code.gitea.io/gitea/modules/util"
)
@ -44,12 +48,14 @@ type Indexer interface {
Index(issue []*IndexerData) error
Delete(ids ...int64) error
Search(kw string, repoIDs []int64, limit, start int) (*SearchResult, error)
Close()
}
type indexerHolder struct {
indexer Indexer
mutex sync.RWMutex
cond *sync.Cond
indexer Indexer
mutex sync.RWMutex
cond *sync.Cond
cancelled bool
}
func newIndexerHolder() *indexerHolder {
@ -58,6 +64,13 @@ func newIndexerHolder() *indexerHolder {
return h
}
func (h *indexerHolder) cancel() {
h.mutex.Lock()
defer h.mutex.Unlock()
h.cancelled = true
h.cond.Broadcast()
}
func (h *indexerHolder) set(indexer Indexer) {
h.mutex.Lock()
defer h.mutex.Unlock()
@ -68,16 +81,15 @@ func (h *indexerHolder) set(indexer Indexer) {
func (h *indexerHolder) get() Indexer {
h.mutex.RLock()
defer h.mutex.RUnlock()
if h.indexer == nil {
if h.indexer == nil && !h.cancelled {
h.cond.Wait()
}
return h.indexer
}
var (
issueIndexerChannel = make(chan *IndexerData, setting.Indexer.UpdateQueueLength)
// issueIndexerQueue queue of issue ids to be updated
issueIndexerQueue Queue
issueIndexerQueue queue.Queue
holder = newIndexerHolder()
)
@ -85,90 +97,99 @@ var (
// all issue index done.
func InitIssueIndexer(syncReindex bool) {
waitChannel := make(chan time.Duration)
// Create the Queue
switch setting.Indexer.IssueType {
case "bleve":
handler := func(data ...queue.Data) {
indexer := holder.get()
if indexer == nil {
log.Error("Issue indexer handler: unable to get indexer!")
return
}
iData := make([]*IndexerData, 0, setting.Indexer.IssueQueueBatchNumber)
for _, datum := range data {
indexerData, ok := datum.(*IndexerData)
if !ok {
log.Error("Unable to process provided datum: %v - not possible to cast to IndexerData", datum)
continue
}
log.Trace("IndexerData Process: %d %v %t", indexerData.ID, indexerData.IDs, indexerData.IsDelete)
if indexerData.IsDelete {
_ = indexer.Delete(indexerData.IDs...)
continue
}
iData = append(iData, indexerData)
}
if err := indexer.Index(iData); err != nil {
log.Error("Error whilst indexing: %v Error: %v", iData, err)
}
}
issueIndexerQueue = queue.CreateQueue("issue_indexer", handler, &IndexerData{})
if issueIndexerQueue == nil {
log.Fatal("Unable to create issue indexer queue")
}
default:
issueIndexerQueue = &queue.DummyQueue{}
}
// Create the Indexer
go func() {
start := time.Now()
log.Info("Initializing Issue Indexer")
log.Info("PID %d: Initializing Issue Indexer: %s", os.Getpid(), setting.Indexer.IssueType)
var populate bool
var dummyQueue bool
switch setting.Indexer.IssueType {
case "bleve":
issueIndexer := NewBleveIndexer(setting.Indexer.IssuePath)
exist, err := issueIndexer.Init()
if err != nil {
log.Fatal("Unable to initialize Bleve Issue Indexer: %v", err)
}
populate = !exist
holder.set(issueIndexer)
graceful.GetManager().RunWithShutdownFns(func(_, atTerminate func(context.Context, func())) {
issueIndexer := NewBleveIndexer(setting.Indexer.IssuePath)
exist, err := issueIndexer.Init()
if err != nil {
holder.cancel()
log.Fatal("Unable to initialize Bleve Issue Indexer: %v", err)
}
populate = !exist
holder.set(issueIndexer)
atTerminate(context.Background(), func() {
log.Debug("Closing issue indexer")
issueIndexer := holder.get()
if issueIndexer != nil {
issueIndexer.Close()
}
log.Info("PID: %d Issue Indexer closed", os.Getpid())
})
log.Debug("Created Bleve Indexer")
})
case "db":
issueIndexer := &DBIndexer{}
holder.set(issueIndexer)
dummyQueue = true
default:
holder.cancel()
log.Fatal("Unknown issue indexer type: %s", setting.Indexer.IssueType)
}
if dummyQueue {
issueIndexerQueue = &DummyQueue{}
} else {
var err error
switch setting.Indexer.IssueQueueType {
case setting.LevelQueueType:
issueIndexerQueue, err = NewLevelQueue(
holder.get(),
setting.Indexer.IssueQueueDir,
setting.Indexer.IssueQueueBatchNumber)
if err != nil {
log.Fatal(
"Unable create level queue for issue queue dir: %s batch number: %d : %v",
setting.Indexer.IssueQueueDir,
setting.Indexer.IssueQueueBatchNumber,
err)
}
case setting.ChannelQueueType:
issueIndexerQueue = NewChannelQueue(holder.get(), setting.Indexer.IssueQueueBatchNumber)
case setting.RedisQueueType:
addrs, pass, idx, err := parseConnStr(setting.Indexer.IssueQueueConnStr)
if err != nil {
log.Fatal("Unable to parse connection string for RedisQueueType: %s : %v",
setting.Indexer.IssueQueueConnStr,
err)
}
issueIndexerQueue, err = NewRedisQueue(addrs, pass, idx, holder.get(), setting.Indexer.IssueQueueBatchNumber)
if err != nil {
log.Fatal("Unable to create RedisQueue: %s : %v",
setting.Indexer.IssueQueueConnStr,
err)
}
default:
log.Fatal("Unsupported indexer queue type: %v",
setting.Indexer.IssueQueueType)
}
go func() {
err = issueIndexerQueue.Run()
if err != nil {
log.Error("issueIndexerQueue.Run: %v", err)
}
}()
}
go func() {
for data := range issueIndexerChannel {
_ = issueIndexerQueue.Push(data)
}
}()
// Start processing the queue
go graceful.GetManager().RunWithShutdownFns(issueIndexerQueue.Run)
// Populate the index
if populate {
if syncReindex {
populateIssueIndexer()
graceful.GetManager().RunWithShutdownContext(populateIssueIndexer)
} else {
go populateIssueIndexer()
go graceful.GetManager().RunWithShutdownContext(populateIssueIndexer)
}
}
waitChannel <- time.Since(start)
close(waitChannel)
}()
if syncReindex {
<-waitChannel
select {
case <-waitChannel:
case <-graceful.GetManager().IsShutdown():
}
} else if setting.Indexer.StartupTimeout > 0 {
go func() {
timeout := setting.Indexer.StartupTimeout
@ -178,7 +199,12 @@ func InitIssueIndexer(syncReindex bool) {
select {
case duration := <-waitChannel:
log.Info("Issue Indexer Initialization took %v", duration)
case <-graceful.GetManager().IsShutdown():
log.Warn("Shutdown occurred before issue index initialisation was complete")
case <-time.After(timeout):
if shutdownable, ok := issueIndexerQueue.(queue.Shutdownable); ok {
shutdownable.Terminate()
}
log.Fatal("Issue Indexer Initialization timed-out after: %v", timeout)
}
}()
@ -186,8 +212,14 @@ func InitIssueIndexer(syncReindex bool) {
}
// populateIssueIndexer populate the issue indexer with issue data
func populateIssueIndexer() {
func populateIssueIndexer(ctx context.Context) {
for page := 1; ; page++ {
select {
case <-ctx.Done():
log.Warn("Issue Indexer population shutdown before completion")
return
default:
}
repos, _, err := models.SearchRepositoryByName(&models.SearchRepoOptions{
Page: page,
PageSize: models.RepositoryListDefaultPageSize,
@ -200,10 +232,17 @@ func populateIssueIndexer() {
continue
}
if len(repos) == 0 {
log.Debug("Issue Indexer population complete")
return
}
for _, repo := range repos {
select {
case <-ctx.Done():
log.Info("Issue Indexer population shutdown before completion")
return
default:
}
UpdateRepoIndexer(repo)
}
}
@ -237,13 +276,17 @@ func UpdateIssueIndexer(issue *models.Issue) {
comments = append(comments, comment.Content)
}
}
issueIndexerChannel <- &IndexerData{
indexerData := &IndexerData{
ID: issue.ID,
RepoID: issue.RepoID,
Title: issue.Title,
Content: issue.Content,
Comments: comments,
}
log.Debug("Adding to channel: %v", indexerData)
if err := issueIndexerQueue.Push(indexerData); err != nil {
log.Error("Unable to push to issue indexer: %v: Error: %v", indexerData, err)
}
}
// DeleteRepoIssueIndexer deletes repo's all issues indexes
@ -258,17 +301,25 @@ func DeleteRepoIssueIndexer(repo *models.Repository) {
if len(ids) == 0 {
return
}
issueIndexerChannel <- &IndexerData{
indexerData := &IndexerData{
IDs: ids,
IsDelete: true,
}
if err := issueIndexerQueue.Push(indexerData); err != nil {
log.Error("Unable to push to issue indexer: %v: Error: %v", indexerData, err)
}
}
// SearchIssuesByKeyword search issue ids by keywords and repo id
func SearchIssuesByKeyword(repoIDs []int64, keyword string) ([]int64, error) {
var issueIDs []int64
res, err := holder.get().Search(keyword, repoIDs, 1000, 0)
indexer := holder.get()
if indexer == nil {
log.Error("SearchIssuesByKeyword(): unable to get indexer!")
return nil, fmt.Errorf("unable to get issue indexer")
}
res, err := indexer.Search(keyword, repoIDs, 1000, 0)
if err != nil {
return nil, err
}

4
modules/indexer/issues/indexer_test.go

@ -15,6 +15,8 @@ import (
"code.gitea.io/gitea/models"
"code.gitea.io/gitea/modules/setting"
"gopkg.in/ini.v1"
"github.com/stretchr/testify/assert"
)
@ -24,6 +26,7 @@ func TestMain(m *testing.M) {
func TestBleveSearchIssues(t *testing.T) {
assert.NoError(t, models.PrepareTestDatabase())
setting.Cfg = ini.Empty()
tmpIndexerDir, err := ioutil.TempDir("", "issues-indexer")
if err != nil {
@ -41,6 +44,7 @@ func TestBleveSearchIssues(t *testing.T) {
}()
setting.Indexer.IssueType = "bleve"
setting.NewQueueService()
InitIssueIndexer(true)
defer func() {
indexer := holder.get()

25
modules/indexer/issues/queue.go

@ -1,25 +0,0 @@
// Copyright 2018 The Gitea Authors. All rights reserved.
// Use of this source code is governed by a MIT-style
// license that can be found in the LICENSE file.
package issues
// Queue defines an interface to save an issue indexer queue
type Queue interface {
Run() error
Push(*IndexerData) error
}
// DummyQueue represents an empty queue
type DummyQueue struct {
}
// Run starts to run the queue
func (b *DummyQueue) Run() error {
return nil
}
// Push pushes data to indexer
func (b *DummyQueue) Push(*IndexerData) error {
return nil
}

62
modules/indexer/issues/queue_channel.go

@ -1,62 +0,0 @@
// Copyright 2018 The Gitea Authors. All rights reserved.
// Use of this source code is governed by a MIT-style
// license that can be found in the LICENSE file.
package issues
import (
"time"
"code.gitea.io/gitea/modules/setting"
)
// ChannelQueue implements
type ChannelQueue struct {
queue chan *IndexerData
indexer Indexer
batchNumber int
}
// NewChannelQueue create a memory channel queue
func NewChannelQueue(indexer Indexer, batchNumber int) *ChannelQueue {
return &ChannelQueue{
queue: make(chan *IndexerData, setting.Indexer.UpdateQueueLength),
indexer: indexer,
batchNumber: batchNumber,
}
}
// Run starts to run the queue
func (c *ChannelQueue) Run() error {
var i int
var datas = make([]*IndexerData, 0, c.batchNumber)
for {
select {
case data := <-c.queue:
if data.IsDelete {
_ = c.indexer.Delete(data.IDs...)
continue
}
datas = append(datas, data)
if len(datas) >= c.batchNumber {
_ = c.indexer.Index(datas)
// TODO: save the point
datas = make([]*IndexerData, 0, c.batchNumber)
}
case <-time.After(time.Millisecond * 100):
i++
if i >= 3 && len(datas) > 0 {
_ = c.indexer.Index(datas)
// TODO: save the point
datas = make([]*IndexerData, 0, c.batchNumber)
}
}
}
}
// Push will push the indexer data to queue
func (c *ChannelQueue) Push(data *IndexerData) error {
c.queue <- data
return nil
}

104
modules/indexer/issues/queue_disk.go

@ -1,104 +0,0 @@
// Copyright 2019 The Gitea Authors. All rights reserved.
// Use of this source code is governed by a MIT-style
// license that can be found in the LICENSE file.
package issues
import (
"encoding/json"
"time"
"code.gitea.io/gitea/modules/log"
"gitea.com/lunny/levelqueue"
)
var (
_ Queue = &LevelQueue{}
)
// LevelQueue implements a disk library queue
type LevelQueue struct {
indexer Indexer
queue *levelqueue.Queue
batchNumber int
}
// NewLevelQueue creates a ledis local queue
func NewLevelQueue(indexer Indexer, dataDir string, batchNumber int) (*LevelQueue, error) {
queue, err := levelqueue.Open(dataDir)
if err != nil {
return nil, err
}
return &LevelQueue{
indexer: indexer,
queue: queue,
batchNumber: batchNumber,
}, nil
}
// Run starts to run the queue
func (l *LevelQueue) Run() error {
var i int
var datas = make([]*IndexerData, 0, l.batchNumber)
for {
i++
if len(datas) > l.batchNumber || (len(datas) > 0 && i > 3) {
_ = l.indexer.Index(datas)
datas = make([]*IndexerData, 0, l.batchNumber)
i = 0
continue
}
bs, err := l.queue.RPop()
if err != nil {
if err != levelqueue.ErrNotFound {
log.Error("RPop: %v", err)
}
time.Sleep(time.Millisecond * 100)
continue
}
if len(bs) == 0 {
time.Sleep(time.Millisecond * 100)
continue
}
var data IndexerData
err = json.Unmarshal(bs, &data)
if err != nil {
log.Error("Unmarshal: %v", err)
time.Sleep(time.Millisecond * 100)
continue
}
log.Trace("LevelQueue: task found: %#v", data)
if data.IsDelete {
if data.ID > 0 {
if err = l.indexer.Delete(data.ID); err != nil {
log.Error("indexer.Delete: %v", err)
}
} else if len(data.IDs) > 0 {
if err = l.indexer.Delete(data.IDs...); err != nil {
log.Error("indexer.Delete: %v", err)
}
}
time.Sleep(time.Millisecond * 10)
continue
}
datas = append(datas, &data)
time.Sleep(time.Millisecond * 10)
}
}
// Push will push the indexer data to queue
func (l *LevelQueue) Push(data *IndexerData) error {
bs, err := json.Marshal(data)
if err != nil {
return err
}
return l.queue.LPush(bs)
}

146
modules/indexer/issues/queue_redis.go

@ -1,146 +0,0 @@
// Copyright 2019 The Gitea Authors. All rights reserved.
// Use of this source code is governed by a MIT-style
// license that can be found in the LICENSE file.
package issues
import (
"encoding/json"
"errors"
"strconv"
"strings"
"time"
"code.gitea.io/gitea/modules/log"
"github.com/go-redis/redis"
)
var (
_ Queue = &RedisQueue{}
)
type redisClient interface {
RPush(key string, args ...interface{}) *redis.IntCmd
LPop(key string) *redis.StringCmd
Ping() *redis.StatusCmd
}
// RedisQueue redis queue
type RedisQueue struct {
client redisClient
queueName string
indexer Indexer
batchNumber int
}
func parseConnStr(connStr string) (addrs, password string, dbIdx int, err error) {
fields := strings.Fields(connStr)
for _, f := range fields {
items := strings.SplitN(f, "=", 2)
if len(items) < 2 {
continue
}
switch strings.ToLower(items[0]) {
case "addrs":
addrs = items[1]
case "password":
password = items[1]
case "db":
dbIdx, err = strconv.Atoi(items[1])
if err != nil {
return
}
}
}
return
}
// NewRedisQueue creates single redis or cluster redis queue
func NewRedisQueue(addrs string, password string, dbIdx int, indexer Indexer, batchNumber int) (*RedisQueue, error) {
dbs := strings.Split(addrs, ",")
var queue = RedisQueue{
queueName: "issue_indexer_queue",
indexer: indexer,
batchNumber: batchNumber,
}
if len(dbs) == 0 {
return nil, errors.New("no redis host found")
} else if len(dbs) == 1 {
queue.client = redis.NewClient(&redis.Options{
Addr: strings.TrimSpace(dbs[0]), // use default Addr
Password: password, // no password set
DB: dbIdx, // use default DB
})
} else {
queue.client = redis.NewClusterClient(&redis.ClusterOptions{
Addrs: dbs,
})
}
if err := queue.client.Ping().Err(); err != nil {
return nil, err
}
return &queue, nil
}
// Run runs the redis queue
func (r *RedisQueue) Run() error {
var i int
var datas = make([]*IndexerData, 0, r.batchNumber)
for {
bs, err := r.client.LPop(r.queueName).Bytes()
if err != nil && err != redis.Nil {
log.Error("LPop faile: %v", err)
time.Sleep(time.Millisecond * 100)
continue
}
i++
if len(datas) > r.batchNumber || (len(datas) > 0 && i > 3) {
_ = r.indexer.Index(datas)
datas = make([]*IndexerData, 0, r.batchNumber)
i = 0
}
if len(bs) == 0 {
time.Sleep(time.Millisecond * 100)
continue
}
var data IndexerData
err = json.Unmarshal(bs, &data)
if err != nil {
log.Error("Unmarshal: %v", err)
time.Sleep(time.Millisecond * 100)
continue
}
log.Trace("RedisQueue: task found: %#v", data)
if data.IsDelete {
if data.ID > 0 {
if err = r.indexer.Delete(data.ID); err != nil {
log.Error("indexer.Delete: %v", err)
}
} else if len(data.IDs) > 0 {
if err = r.indexer.Delete(data.IDs...); err != nil {
log.Error("indexer.Delete: %v", err)
}
}
time.Sleep(time.Millisecond * 100)
continue
}
datas = append(datas, &data)
time.Sleep(time.Millisecond * 100)
}
}
// Push implements Queue
func (r *RedisQueue) Push(data *IndexerData) error {
bs, err := json.Marshal(data)
if err != nil {
return err
}
return r.client.RPush(r.queueName, bs).Err()
}

270
modules/queue/manager.go

@ -0,0 +1,270 @@
// Copyright 2019 The Gitea Authors. All rights reserved.
// Use of this source code is governed by a MIT-style
// license that can be found in the LICENSE file.
package queue
import (
"context"
"encoding/json"
"fmt"
"reflect"
"sort"
"sync"
"time"
"code.gitea.io/gitea/modules/log"
)
var manager *Manager
// Manager is a queue manager
type Manager struct {
mutex sync.Mutex
counter int64
Queues map[int64]*ManagedQueue
}
// ManagedQueue represents a working queue inheriting from Gitea.
type ManagedQueue struct {
mutex sync.Mutex
QID int64
Queue Queue
Type Type
Name string
Configuration interface{}
ExemplarType string
Pool ManagedPool
counter int64
PoolWorkers map[int64]*PoolWorkers
}
// ManagedPool is a simple interface to get certain details from a worker pool
type ManagedPool interface {
AddWorkers(number int, timeout time.Duration) context.CancelFunc
NumberOfWorkers() int
MaxNumberOfWorkers() int
SetMaxNumberOfWorkers(int)
BoostTimeout() time.Duration
BlockTimeout() time.Duration
BoostWorkers() int
SetSettings(maxNumberOfWorkers, boostWorkers int, timeout time.Duration)
}
// ManagedQueueList implements the sort.Interface
type ManagedQueueList []*ManagedQueue
// PoolWorkers represents a working queue inheriting from Gitea.
type PoolWorkers struct {
PID int64
Workers int
Start time.Time
Timeout time.Time
HasTimeout bool
Cancel context.CancelFunc
}
// PoolWorkersList implements the sort.Interface
type PoolWorkersList []*PoolWorkers
func init() {
_ = GetManager()
}
// GetManager returns a Manager and initializes one as singleton if there's none yet
func GetManager() *Manager {
if manager == nil {
manager = &Manager{
Queues: make(map[int64]*ManagedQueue),
}
}
return manager
}
// Add adds a queue to this manager
func (m *Manager) Add(queue Queue,
t Type,
configuration,
exemplar interface{},
pool ManagedPool) int64 {
cfg, _ := json.Marshal(configuration)
mq := &ManagedQueue{
Queue: queue,
Type: t,
Configuration: string(cfg),
ExemplarType: reflect.TypeOf(exemplar).String(),
PoolWorkers: make(map[int64]*PoolWorkers),
Pool: pool,
}
m.mutex.Lock()
m.counter++
mq.QID = m.counter
mq.Name = fmt.Sprintf("queue-%d", mq.QID)
if named, ok := queue.(Named); ok {
mq.Name = named.Name()
}
m.Queues[mq.QID] = mq
m.mutex.Unlock()
log.Trace("Queue Manager registered: %s (QID: %d)", mq.Name, mq.QID)
return mq.QID
}
// Remove a queue from the Manager
func (m *Manager) Remove(qid int64) {
m.mutex.Lock()
delete(m.Queues, qid)
m.mutex.Unlock()
log.Trace("Queue Manager removed: QID: %d", qid)
}
// GetManagedQueue by qid
func (m *Manager) GetManagedQueue(qid int64) *ManagedQueue {
m.mutex.Lock()
defer m.mutex.Unlock()
return m.Queues[qid]
}
// ManagedQueues returns the managed queues
func (m *Manager) ManagedQueues() []*ManagedQueue {
m.mutex.Lock()
mqs := make([]*ManagedQueue, 0, len(m.Queues))
for _, mq := range m.Queues {
mqs = append(mqs, mq)
}
m.mutex.Unlock()
sort.Sort(ManagedQueueList(mqs))
return mqs
}
// Workers returns the poolworkers
func (q *ManagedQueue) Workers() []*PoolWorkers {
q.mutex.Lock()
workers := make([]*PoolWorkers, 0, len(q.PoolWorkers))
for _, worker := range q.PoolWorkers {
workers = append(workers, worker)
}
q.mutex.Unlock()
sort.Sort(PoolWorkersList(workers))
return workers
}
// RegisterWorkers registers workers to this queue
func (q *ManagedQueue) RegisterWorkers(number int, start time.Time, hasTimeout bool, timeout time.Time, cancel context.CancelFunc) int64 {
q.mutex.Lock()
defer q.mutex.Unlock()
q.counter++
q.PoolWorkers[q.counter] = &PoolWorkers{
PID: q.counter,
Workers: number,
Start: start,
Timeout: timeout,
HasTimeout: hasTimeout,
Cancel: cancel,
}
return q.counter
}
// CancelWorkers cancels pooled workers with pid
func (q *ManagedQueue) CancelWorkers(pid int64) {
q.mutex.Lock()
pw, ok := q.PoolWorkers[pid]
q.mutex.Unlock()
if !ok {
return
}
pw.Cancel()
}
// RemoveWorkers deletes pooled workers with pid
func (q *ManagedQueue) RemoveWorkers(pid int64) {
q.mutex.Lock()
pw, ok := q.PoolWorkers[pid]
delete(q.PoolWorkers, pid)
q.mutex.Unlock()
if ok && pw.Cancel != nil {
pw.Cancel()
}
}
// AddWorkers adds workers to the queue if it has registered an add worker function
func (q *ManagedQueue) AddWorkers(number int, timeout time.Duration) context.CancelFunc {
if q.Pool != nil {
// the cancel will be added to the pool workers description above
return q.Pool.AddWorkers(number, timeout)
}
return nil
}
// NumberOfWorkers returns the number of workers in the queue
func (q *ManagedQueue) NumberOfWorkers() int {
if q.Pool != nil {
return q.Pool.NumberOfWorkers()
}
return -1
}
// MaxNumberOfWorkers returns the maximum number of workers for the pool
func (q *ManagedQueue) MaxNumberOfWorkers() int {
if q.Pool != nil {
return q.Pool.MaxNumberOfWorkers()
}
return 0
}
// BoostWorkers returns the number of workers for a boost
func (q *ManagedQueue) BoostWorkers() int {
if q.Pool != nil {
return q.Pool.BoostWorkers()
}
return -1
}
// BoostTimeout returns the timeout of the next boost
func (q *ManagedQueue) BoostTimeout() time.Duration {
if q.Pool != nil {
return q.Pool.BoostTimeout()
}
return 0
}
// BlockTimeout returns the timeout til the next boost
func (q *ManagedQueue) BlockTimeout() time.Duration {
if q.Pool != nil {
return q.Pool.BlockTimeout()
}
return 0
}
// SetSettings sets the setable boost values
func (q *ManagedQueue) SetSettings(maxNumberOfWorkers, boostWorkers int, timeout time.Duration) {
if q.Pool != nil {
q.Pool.SetSettings(maxNumberOfWorkers, boostWorkers, timeout)
}
}
func (l ManagedQueueList) Len() int {
return len(l)
}
func (l ManagedQueueList) Less(i, j int) bool {
return l[i].Name < l[j].Name
}
func (l ManagedQueueList) Swap(i, j int) {
l[i], l[j] = l[j], l[i]
}
func (l PoolWorkersList) Len() int {
return len(l)
}
func (l PoolWorkersList) Less(i, j int) bool {
return l[i].Start.Before(l[j].Start)
}
func (l PoolWorkersList) Swap(i, j int) {
l[i], l[j] = l[j], l[i]
}

133
modules/queue/queue.go

@ -0,0 +1,133 @@
// Copyright 2019 The Gitea Authors. All rights reserved.
// Use of this source code is governed by a MIT-style
// license that can be found in the LICENSE file.
package queue
import (
"context"
"encoding/json"
"fmt"
"reflect"
)
// ErrInvalidConfiguration is called when there is invalid configuration for a queue
type ErrInvalidConfiguration struct {
cfg interface{}
err error
}
func (err ErrInvalidConfiguration) Error() string {
if err.err != nil {
return fmt.Sprintf("Invalid Configuration Argument: %v: Error: %v", err.cfg, err.err)
}
return fmt.Sprintf("Invalid Configuration Argument: %v", err.cfg)
}
// IsErrInvalidConfiguration checks if an error is an ErrInvalidConfiguration
func IsErrInvalidConfiguration(err error) bool {
_, ok := err.(ErrInvalidConfiguration)
return ok
}
// Type is a type of Queue
type Type string
// Data defines an type of queuable data
type Data interface{}
// HandlerFunc is a function that takes a variable amount of data and processes it
type HandlerFunc func(...Data)
// NewQueueFunc is a function that creates a queue
type NewQueueFunc func(handler HandlerFunc, config interface{}, exemplar interface{}) (Queue, error)
// Shutdownable represents a queue that can be shutdown
type Shutdownable interface {
Shutdown()
Terminate()
}
// Named represents a queue with a name
type Named interface {
Name() string
}
// Queue defines an interface to save an issue indexer queue
type Queue interface {
Run(atShutdown, atTerminate func(context.Context, func()))
Push(Data) error
}
// DummyQueueType is the type for the dummy queue
const DummyQueueType Type = "dummy"
// NewDummyQueue creates a new DummyQueue
func NewDummyQueue(handler HandlerFunc, opts, exemplar interface{}) (Queue, error) {
return &DummyQueue{}, nil
}
// DummyQueue represents an empty queue
type DummyQueue struct {
}
// Run starts to run the queue
func (b *DummyQueue) Run(_, _ func(context.Context, func())) {}
// Push pushes data to the queue
func (b *DummyQueue) Push(Data) error {
return nil
}
func toConfig(exemplar, cfg interface{}) (interface{}, error) {
if reflect.TypeOf(cfg).AssignableTo(reflect.TypeOf(exemplar)) {
return cfg, nil
}
configBytes, ok := cfg.([]byte)
if !ok {
configStr, ok := cfg.(string)
if !ok {
return nil, ErrInvalidConfiguration{cfg: cfg}
}
configBytes = []byte(configStr)
}
newVal := reflect.New(reflect.TypeOf(exemplar))
if err := json.Unmarshal(configBytes, newVal.Interface()); err != nil {
return nil, ErrInvalidConfiguration{cfg: cfg, err: err}
}
return newVal.Elem().Interface(), nil
}
var queuesMap = map[Type]NewQueueFunc{DummyQueueType: NewDummyQueue}
// RegisteredTypes provides the list of requested types of queues
func RegisteredTypes() []Type {
types := make([]Type, len(queuesMap))
i := 0
for key := range queuesMap {
types[i] = key
i++
}
return types
}
// RegisteredTypesAsString provides the list of requested types of queues
func RegisteredTypesAsString() []string {
types := make([]string, len(queuesMap))
i := 0
for key := range queuesMap {
types[i] = string(key)
i++
}
return types
}
// NewQueue takes a queue Type and HandlerFunc some options and possibly an exemplar and returns a Queue or an error
func NewQueue(queueType Type, handlerFunc HandlerFunc, opts, exemplar interface{}) (Queue, error) {
newFn, ok := queuesMap[queueType]
if !ok {
return nil, fmt.Errorf("Unsupported queue type: %v", queueType)
}
return newFn(handlerFunc, opts, exemplar)
}

106
modules/queue/queue_channel.go

@ -0,0 +1,106 @@
// Copyright 2019 The Gitea Authors. All rights reserved.
// Use of this source code is governed by a MIT-style
// license that can be found in the LICENSE file.
package queue
import (
"context"
"fmt"
"reflect"
"time"
"code.gitea.io/gitea/modules/log"
)
// ChannelQueueType is the type for channel queue
const ChannelQueueType Type = "channel"
// ChannelQueueConfiguration is the configuration for a ChannelQueue
type ChannelQueueConfiguration struct {
QueueLength int
BatchLength int
Workers int
MaxWorkers int
BlockTimeout time.Duration
BoostTimeout time.Duration
BoostWorkers int
Name string
}
// ChannelQueue implements
type ChannelQueue struct {
pool *WorkerPool
exemplar interface{}
workers int
name string
}
// NewChannelQueue create a memory channel queue
func NewChannelQueue(handle HandlerFunc, cfg, exemplar interface{}) (Queue, error) {
configInterface, err := toConfig(ChannelQueueConfiguration{}, cfg)
if err != nil {
return nil, err
}
config := configInterface.(ChannelQueueConfiguration)
if config.BatchLength == 0 {
config.BatchLength = 1
}
dataChan := make(chan Data, config.QueueLength)
ctx, cancel := context.WithCancel(context.Background())
queue := &ChannelQueue{
pool: &WorkerPool{
baseCtx: ctx,
cancel: cancel,
batchLength: config.BatchLength,
handle: handle,
dataChan: dataChan,
blockTimeout: config.BlockTimeout,
boostTimeout: config.BoostTimeout,
boostWorkers: config.BoostWorkers,
maxNumberOfWorkers: config.MaxWorkers,
},
exemplar: exemplar,
workers: config.Workers,
name: config.Name,
}
queue.pool.qid = GetManager().Add(queue, ChannelQueueType, config, exemplar, queue.pool)
return queue, nil
}
// Run starts to run the queue
func (c *ChannelQueue) Run(atShutdown, atTerminate func(context.Context, func())) {
atShutdown(context.Background(), func() {
log.Warn("ChannelQueue: %s is not shutdownable!", c.name)
})
atTerminate(context.Background(), func() {
log.Warn("ChannelQueue: %s is not terminatable!", c.name)
})
go func() {
_ = c.pool.AddWorkers(c.workers, 0)
}()
}
// Push will push data into the queue
func (c *ChannelQueue) Push(data Data) error {
if c.exemplar != nil {
// Assert data is of same type as r.exemplar
t := reflect.TypeOf(data)
exemplarType := reflect.TypeOf(c.exemplar)
if !t.AssignableTo(exemplarType) || data == nil {
return fmt.Errorf("Unable to assign data: %v to same type as exemplar: %v in queue: %s", data, c.exemplar, c.name)
}
}
c.pool.Push(data)
return nil
}
// Name returns the name of this queue
func (c *ChannelQueue) Name() string {
return c.name
}
func init() {
queuesMap[ChannelQueueType] = NewChannelQueue
}

91
modules/queue/queue_channel_test.go

@ -0,0 +1,91 @@
// Copyright 2019 The Gitea Authors. All rights reserved.
// Use of this source code is governed by a MIT-style
// license that can be found in the LICENSE file.
package queue
import (
"context"
"testing"
"time"
"github.com/stretchr/testify/assert"
)
func TestChannelQueue(t *testing.T) {
handleChan := make(chan *testData)
handle := func(data ...Data) {
for _, datum := range data {
testDatum := datum.(*testData)
handleChan <- testDatum
}
}
nilFn := func(_ context.Context, _ func()) {}
queue, err := NewChannelQueue(handle,
ChannelQueueConfiguration{
QueueLength: 20,
Workers: 1,
MaxWorkers: 10,
BlockTimeout: 1 * time.Second,
BoostTimeout: 5 * time.Minute,
BoostWorkers: 5,
}, &testData{})
assert.NoError(t, err)
go queue.Run(nilFn, nilFn)
test1 := testData{"A", 1}
go queue.Push(&test1)
result1 := <-handleChan
assert.Equal(t, test1.TestString, result1.TestString)
assert.Equal(t, test1.TestInt, result1.TestInt)
err = queue.Push(test1)
assert.Error(t, err)
}
func TestChannelQueue_Batch(t *testing.T) {
handleChan := make(chan *testData)
handle := func(data ...Data) {
assert.True(t, len(data) == 2)
for _, datum := range data {
testDatum := datum.(*testData)
handleChan <- testDatum
}
}
nilFn := func(_ context.Context, _ func()) {}
queue, err := NewChannelQueue(handle,
ChannelQueueConfiguration{
QueueLength: 20,
BatchLength: 2,
Workers: 1,
MaxWorkers: 10,
BlockTimeout: 1 * time.Second,
BoostTimeout: 5 * time.Minute,
BoostWorkers: 5,
}, &testData{})
assert.NoError(t, err)
go queue.Run(nilFn, nilFn)
test1 := testData{"A", 1}
test2 := testData{"B", 2}
queue.Push(&test1)
go queue.Push(&test2)
result1 := <-handleChan
assert.Equal(t, test1.TestString, result1.TestString)
assert.Equal(t, test1.TestInt, result1.TestInt)
result2 := <-handleChan
assert.Equal(t, test2.TestString, result2.TestString)
assert.Equal(t, test2.TestInt, result2.TestInt)
err = queue.Push(test1)
assert.Error(t, err)
}

213
modules/queue/queue_disk.go

@ -0,0 +1,213 @@
// Copyright 2019 The Gitea Authors. All rights reserved.
// Use of this source code is governed by a MIT-style
// license that can be found in the LICENSE file.
package queue
import (
"context"
"encoding/json"
"fmt"
"reflect"
"sync"
"time"
"code.gitea.io/gitea/modules/log"
"gitea.com/lunny/levelqueue"
)
// LevelQueueType is the type for level queue
const LevelQueueType Type = "level"
// LevelQueueConfiguration is the configuration for a LevelQueue
type LevelQueueConfiguration struct {
DataDir string
QueueLength int
BatchLength int
Workers int
MaxWorkers int
BlockTimeout time.Duration
BoostTimeout time.Duration
BoostWorkers int
Name string
}
// LevelQueue implements a disk library queue
type LevelQueue struct {
pool *WorkerPool
queue *levelqueue.Queue
closed chan struct{}
terminated chan struct{}
lock sync.Mutex
exemplar interface{}
workers int
name string
}
// NewLevelQueue creates a ledis local queue
func NewLevelQueue(handle HandlerFunc, cfg, exemplar interface{}) (Queue, error) {
configInterface, err := toConfig(LevelQueueConfiguration{}, cfg)
if err != nil {
return nil, err
}
config := configInterface.(LevelQueueConfiguration)
internal, err := levelqueue.Open(config.DataDir)
if err != nil {
return nil, err
}
dataChan := make(chan Data, config.QueueLength)
ctx, cancel := context.WithCancel(context.Background())
queue := &LevelQueue{
pool: &WorkerPool{
baseCtx: ctx,
cancel: cancel,
batchLength: config.BatchLength,
handle: handle,
dataChan: dataChan,
blockTimeout: config.BlockTimeout,
boostTimeout: config.BoostTimeout,
boostWorkers: config.BoostWorkers,
maxNumberOfWorkers: config.MaxWorkers,
},
queue: internal,
exemplar: exemplar,
closed: make(chan struct{}),
terminated: make(chan struct{}),
workers: config.Workers,
name: config.Name,
}
queue.pool.qid = GetManager().Add(queue, LevelQueueType, config, exemplar, queue.pool)
return queue, nil
}
// Run starts to run the queue
func (l *LevelQueue) Run(atShutdown, atTerminate func(context.Context, func())) {
atShutdown(context.Background(), l.Shutdown)
atTerminate(context.Background(), l.Terminate)
go func() {
_ = l.pool.AddWorkers(l.workers, 0)
}()
go l.readToChan()
log.Trace("LevelQueue: %s Waiting til closed", l.name)
<-l.closed
log.Trace("LevelQueue: %s Waiting til done", l.name)
l.pool.Wait()
log.Trace("LevelQueue: %s Waiting til cleaned", l.name)
ctx, cancel := context.WithCancel(context.Background())
atTerminate(ctx, cancel)
l.pool.CleanUp(ctx)
cancel()
log.Trace("LevelQueue: %s Cleaned", l.name)
}
func (l *LevelQueue) readToChan() {
for {
select {
case <-l.closed:
// tell the pool to shutdown.
l.pool.cancel()
return
default:
bs, err := l.queue.RPop()
if err != nil {
if err != levelqueue.ErrNotFound {
log.Error("LevelQueue: %s Error on RPop: %v", l.name, err)
}
time.Sleep(time.Millisecond * 100)
continue
}
if len(bs) == 0 {
time.Sleep(time.Millisecond * 100)