Skip to content

Commit

Permalink
[dbnode] Aggregate() using only FSTs where possible
Browse files Browse the repository at this point in the history
  • Loading branch information
prateek committed Apr 18, 2019
1 parent 637dc24 commit 2ce7b21
Show file tree
Hide file tree
Showing 21 changed files with 3,737 additions and 552 deletions.
17 changes: 16 additions & 1 deletion src/dbnode/generated-source-files.mk
Original file line number Diff line number Diff line change
Expand Up @@ -172,7 +172,9 @@ genny-map-storage-index-aggregation-results: genny-map-storage-index-aggregate-v

# generation rule for all generated arraypools
.PHONY: genny-arraypool-all
genny-arraypool-all: genny-arraypool-node-segments
genny-arraypool-all: \
genny-arraypool-node-segments \
genny-arraypool-aggregate-results-entry \

# arraypool generation rule for ./network/server/tchannelthrift/node/segmentsArrayPool
.PHONY: genny-arraypool-node-segments
Expand All @@ -186,6 +188,19 @@ genny-arraypool-node-segments:
rename_type_middle=Segments \
rename_constructor=newSegmentsArrayPool

# arraypool generation rule for ./storage/index/AggregateResultsEntryArrayPool
.PHONY: genny-arraypool-aggregate-results-entry
genny-arraypool-aggregate-results-entry:
cd $(m3x_package_path) && make genny-arraypool \
pkg=index \
elem_type=AggregateResultsEntry \
target_package=$(m3db_package)/src/dbnode/storage/index \
out_file=aggregate_results_entry_arraypool_gen.go \
rename_type_prefix=AggregateResultsEntry \
rename_type_middle=AggregateResultsEntry \
rename_constructor=NewAggregateResultsEntryArrayPool \
rename_gen_types=true \

# generation rule for all generated leakcheckpools
.PHONY: genny-leakcheckpool-all
genny-leakcheckpool-all: \
Expand Down
2 changes: 1 addition & 1 deletion src/dbnode/generated/mocks/generate.go
Original file line number Diff line number Diff line change
Expand Up @@ -24,7 +24,6 @@
//go:generate sh -c "mockgen -package=xio $PACKAGE/src/dbnode/x/xio SegmentReader,SegmentReaderPool | genclean -pkg $PACKAGE/src/dbnode/x/xio -out $GOPATH/src/$PACKAGE/src/dbnode/x/xio/io_mock.go"
//go:generate sh -c "mockgen -package=digest -destination=$GOPATH/src/$PACKAGE/src/dbnode/digest/digest_mock.go $PACKAGE/src/dbnode/digest ReaderWithDigest"
//go:generate sh -c "mockgen -package=series $PACKAGE/src/dbnode/storage/series DatabaseSeries,QueryableBlockRetriever | genclean -pkg $PACKAGE/src/dbnode/storage/series -out $GOPATH/src/$PACKAGE/src/dbnode/storage/series/series_mock.go"
//go:generate sh -c "mockgen -package=index $PACKAGE/src/dbnode/storage/index QueryResults,AggregateResults,Block,OnIndexSeries | genclean -pkg $PACKAGE/src/dbnode/storage/index -out $GOPATH/src/$PACKAGE/src/dbnode/storage/index/index_mock.go"

// mockgen rules for generating mocks for unexported interfaces (file mode)
//go:generate sh -c "mockgen -package=encoding -destination=$GOPATH/src/$PACKAGE/src/dbnode/encoding/encoding_mock.go -source=$GOPATH/src/$PACKAGE/src/dbnode/encoding/types.go"
Expand All @@ -41,5 +40,6 @@
//go:generate sh -c "mockgen -package=namespace -destination=$GOPATH/src/$PACKAGE/src/dbnode/storage/namespace/namespace_mock.go -source=$GOPATH/src/$PACKAGE/src/dbnode/storage/namespace/types.go"
//go:generate sh -c "mockgen -package=runtime -destination=$GOPATH/src/$PACKAGE/src/dbnode/runtime/runtime_mock.go -source=$GOPATH/src/$PACKAGE/src/dbnode/runtime/types.go"
//go:generate sh -c "mockgen -package=ts -destination=$GOPATH/src/$PACKAGE/src/dbnode/ts/write_batch_mock.go -source=$GOPATH/src/$PACKAGE/src/dbnode/ts/types.go"
//go:generate sh -c "mockgen -package=index -destination=$GOPATH/src/$PACKAGE/src/dbnode/storage/index/index_mock.go -source=$GOPATH/src/$PACKAGE/src/dbnode/storage/index/types.go"

package mocks
134 changes: 96 additions & 38 deletions src/dbnode/storage/index.go
Original file line number Diff line number Diff line change
Expand Up @@ -41,6 +41,7 @@ import (
"github.com/m3db/m3/src/dbnode/storage/index/convert"
"github.com/m3db/m3/src/dbnode/storage/namespace"
"github.com/m3db/m3/src/m3ninx/doc"
"github.com/m3db/m3/src/m3ninx/idx"
m3ninxindex "github.com/m3db/m3/src/m3ninx/index"
"github.com/m3db/m3/src/m3ninx/index/segment"
"github.com/m3db/m3/src/m3ninx/index/segment/builder"
Expand Down Expand Up @@ -72,6 +73,10 @@ const (
nsIndexReportStatsInterval = 10 * time.Second
)

var (
allQuery = idx.NewAllQuery()
)

// nolint: maligned
type nsIndex struct {
state nsIndexState
Expand Down Expand Up @@ -167,6 +172,23 @@ type newNamespaceIndexOpts struct {
newBlockFn newBlockFn
}

// execBlockQueryFn executes a query against the given block whilst tracking state.
type execBlockQueryFn func(
cancellable *resource.CancellableLifetime,
block index.Block,
query index.Query,
opts index.QueryOptions,
state *asyncQueryExecState,
results index.BaseResults,
)

// asyncQueryExecState tracks the async execution errors and results for a query.
type asyncQueryExecState struct {
sync.Mutex
multiErr xerrors.MultiError
exhaustive bool
}

// newNamespaceIndex returns a new namespaceIndex for the provided namespace.
func newNamespaceIndex(
nsMD namespace.Metadata,
Expand Down Expand Up @@ -265,6 +287,7 @@ func newNamespaceIndexWithOptions(
queryWorkersPool: newIndexOpts.opts.QueryIDsWorkerPool(),
metrics: newNamespaceIndexMetrics(indexOpts, instrumentOpts),
}

if runtimeOptsMgr != nil {
idx.runtimeOptsListener = runtimeOptsMgr.RegisterListener(idx)
}
Expand Down Expand Up @@ -868,7 +891,7 @@ func (i *nsIndex) Query(
results.Reset(i.nsMetadata.ID(), index.QueryResultsOptions{
SizeLimit: opts.Limit,
})
exhaustive, err := i.query(ctx, query, results, opts)
exhaustive, err := i.query(ctx, query, results, opts, i.execBlockQueryFn)
if err != nil {
return index.QueryResult{}, err
}
Expand All @@ -890,7 +913,12 @@ func (i *nsIndex) AggregateQuery(
TermFilter: opts.TermFilter,
Type: opts.Type,
})
exhaustive, err := i.query(ctx, query, results, opts.QueryOptions)
// use appropriate fn to query underlying blocks.
fn := i.execBlockQueryFn
if query.Equal(allQuery) {
fn = i.execBlockAggregateQueryFn
}
exhaustive, err := i.query(ctx, query, results, opts.QueryOptions, fn)
if err != nil {
return index.AggregateQueryResult{}, err
}
Expand All @@ -905,6 +933,7 @@ func (i *nsIndex) query(
query index.Query,
results index.BaseResults,
opts index.QueryOptions,
execBlockFn execBlockQueryFn,
) (bool, error) {
// Capture start before needing to acquire lock.
start := i.nowFn()
Expand Down Expand Up @@ -940,50 +969,19 @@ func (i *nsIndex) query(
}

var (
deadline = start.Add(timeout)
wg sync.WaitGroup

// State contains concurrent mutable state for async execution below.
state = struct {
sync.Mutex
multiErr xerrors.MultiError
exhaustive bool
}{
state = asyncQueryExecState{
exhaustive: true,
}
deadline = start.Add(timeout)
wg sync.WaitGroup
)

// Create a cancellable lifetime and cancel it at end of this method so that
// no child async task modifies the result after this method returns.
cancellable := resource.NewCancellableLifetime()
defer cancellable.Cancel()

execBlockQuery := func(block index.Block) {
blockExhaustive, err := block.Query(cancellable, query, opts, results)
if err == index.ErrUnableToQueryBlockClosed {
// NB(r): Because we query this block outside of the results lock, it's
// possible this block may get closed if it slides out of retention, in
// that case those results are no longer considered valid and outside of
// retention regardless, so this is a non-issue.
err = nil
}

state.Lock()
defer state.Unlock()

if err != nil {
state.multiErr = state.multiErr.Add(err)
return
}

if blockExhaustive {
return
}

// If block had more data but we stopped early, need to notify caller.
state.exhaustive = false
}

for _, block := range blocks {
// Capture block for async query execution below.
block := block
Expand All @@ -1009,7 +1007,7 @@ func (i *nsIndex) query(
// No timeout, just wait blockingly for a worker.
wg.Add(1)
i.queryWorkersPool.Go(func() {
execBlockQuery(block)
execBlockFn(cancellable, block, query, opts, &state, results)
wg.Done()
})
continue
Expand All @@ -1020,7 +1018,7 @@ func (i *nsIndex) query(
if timeLeft := deadline.Sub(i.nowFn()); timeLeft > 0 {
wg.Add(1)
timedOut := !i.queryWorkersPool.GoWithTimeout(func() {
execBlockQuery(block)
execBlockFn(cancellable, block, query, opts, &state, results)
wg.Done()
}, timeLeft)

Expand Down Expand Up @@ -1085,6 +1083,66 @@ func (i *nsIndex) query(
return exhaustive, nil
}

func (i *nsIndex) execBlockQueryFn(
cancellable *resource.CancellableLifetime,
block index.Block,
query index.Query,
opts index.QueryOptions,
state *asyncQueryExecState,
results index.BaseResults,
) {
blockExhaustive, err := block.Query(cancellable, query, opts, results)
if err == index.ErrUnableToQueryBlockClosed {
// NB(r): Because we query this block outside of the results lock, it's
// possible this block may get closed if it slides out of retention, in
// that case those results are no longer considered valid and outside of
// retention regardless, so this is a non-issue.
err = nil
}

state.Lock()
defer state.Unlock()

if err != nil {
state.multiErr = state.multiErr.Add(err)
}
state.exhaustive = state.exhaustive && blockExhaustive
}

func (i *nsIndex) execBlockAggregateQueryFn(
cancellable *resource.CancellableLifetime,
block index.Block,
query index.Query,
opts index.QueryOptions,
state *asyncQueryExecState,
results index.BaseResults,
) {
aggResults, ok := results.(index.AggregateResults)
if !ok { // should never happen
state.Lock()
state.multiErr = state.multiErr.Add(
fmt.Errorf("unknown results type [%T] received during aggregation", results))
state.Unlock()
return
}

blockExhaustive, err := block.Aggregate(cancellable, opts, aggResults)
if err == index.ErrUnableToQueryBlockClosed {
// NB(r): Because we query this block outside of the results lock, it's
// possible this block may get closed if it slides out of retention, in
// that case those results are no longer considered valid and outside of
// retention regardless, so this is a non-issue.
err = nil
}

state.Lock()
defer state.Unlock()
if err != nil {
state.multiErr = state.multiErr.Add(err)
}
state.exhaustive = state.exhaustive && blockExhaustive
}

func (i *nsIndex) timeoutForQueryWithRLock(
ctx context.Context,
) time.Duration {
Expand Down
43 changes: 43 additions & 0 deletions src/dbnode/storage/index/aggregate_results.go
Original file line number Diff line number Diff line change
Expand Up @@ -104,6 +104,49 @@ func (r *aggregatedResults) AddDocuments(batch []doc.Document) (int, error) {
return size, err
}

func (r *aggregatedResults) AggregateResultsOptions() AggregateResultsOptions {
return r.aggregateOpts
}

func (r *aggregatedResults) AddFields(batch []AggregateResultsEntry) int {
r.Lock()
for _, entry := range batch {
f := entry.Field
aggValues, ok := r.resultsMap.Get(f)
if !ok {
aggValues = r.valuesPool.Get()
// we can avoid the copy because we assume ownership of the passed ident.ID,
// but still need to finalize it.
r.resultsMap.SetUnsafe(f, aggValues, AggregateResultsMapSetUnsafeOptions{
NoCopyKey: true,
NoFinalizeKey: false,
})
} else {
// because we already have a entry for this field, we release the ident back to
// the underlying pool.
f.Finalize()
}
valuesMap := aggValues.Map()
for _, t := range entry.Terms {
if !valuesMap.Contains(t) {
// we can avoid the copy because we assume ownership of the passed ident.ID,
// but still need to finalize it.
valuesMap.SetUnsafe(t, struct{}{}, AggregateValuesMapSetUnsafeOptions{
NoCopyKey: true,
NoFinalizeKey: false,
})
} else {
// because we already have a entry for this term, we release the ident back to
// the underlying pool.
t.Finalize()
}
}
}
size := r.resultsMap.Len()
r.Unlock()
return size
}

func (r *aggregatedResults) addDocumentsBatchWithLock(
batch []doc.Document,
) error {
Expand Down
Loading

0 comments on commit 2ce7b21

Please sign in to comment.