Skip to content

Commit

Permalink
storage,server: remove .context()
Browse files Browse the repository at this point in the history
  • Loading branch information
tbg committed Jul 19, 2016
1 parent 310c7d4 commit 84396b3
Show file tree
Hide file tree
Showing 8 changed files with 81 additions and 94 deletions.
16 changes: 6 additions & 10 deletions server/node.go
Original file line number Diff line number Diff line change
Expand Up @@ -253,13 +253,9 @@ func NewNode(
return n
}

// context returns a context encapsulating the NodeID, derived from the
// supplied context (which is not allowed to be nil).
func (n *Node) context(ctx context.Context) context.Context {
if ctx == nil {
panic("ctx cannot be nil")
}
return ctx // TODO(tschottdorf): see #1779
// String implements fmt.Stringer.
func (n *Node) String() string {
return fmt.Sprintf("node=%d", n.Descriptor.NodeID)
}

// initDescriptor initializes the node descriptor with the server
Expand Down Expand Up @@ -352,7 +348,7 @@ func (n *Node) start(addr net.Addr, engines []engine.Engine, attrs roachpb.Attri
// Record node started event.
n.recordJoinEvent()

log.Infoc(n.context(context.TODO()), "Started node with %v engine(s) and attributes %v", engines, attrs.Attrs)
log.Infof("%s: started with %v engine(s) and attributes %v", n, engines, attrs.Attrs)
return nil
}

Expand Down Expand Up @@ -713,7 +709,7 @@ func (n *Node) recordJoinEvent() {
}{n.Descriptor, n.ClusterID, n.startedAt},
)
}); err != nil {
log.Warningc(n.context(context.TODO()), "unable to log %s event for node %d: %s", logEventType, n.Descriptor.NodeID, err)
log.Warningf("%s: unable to log %s event: %s", n, logEventType, err)
} else {
return
}
Expand Down Expand Up @@ -791,7 +787,7 @@ func (n *Node) Batch(
}
}
defer sp.Finish()
traceCtx := opentracing.ContextWithSpan(n.context(ctx), sp)
traceCtx := opentracing.ContextWithSpan(ctx, sp)

tStart := timeutil.Now()
var pErr *roachpb.Error
Expand Down
8 changes: 4 additions & 4 deletions storage/intent_resolver.go
Original file line number Diff line number Diff line change
Expand Up @@ -268,7 +268,7 @@ func (ir *intentResolver) processIntentsAsync(r *Replica, intents []intentsWithA
return
}
now := r.store.Clock().Now()
ctx := r.context(context.TODO())
ctx := context.TODO()
stopper := r.store.Stopper()

for _, item := range intents {
Expand Down Expand Up @@ -299,11 +299,11 @@ func (ir *intentResolver) processIntentsAsync(r *Replica, intents []intentsWithA
// poison.
if err := ir.resolveIntents(ctxWithTimeout, r, resolveIntents,
true /* wait */, true /* poison */); err != nil {
log.Warningc(ctxWithTimeout, "failed to resolve intents: %s", err)
log.Warningf("%s: failed to resolve intents: %s", r, err)
return
}
if pushErr != nil {
log.Warningc(ctxWithTimeout, "failed to push during intent resolution: %s", pushErr)
log.Warningf("%s: failed to push during intent resolution: %s", r, pushErr)
return
}
}); err != nil {
Expand All @@ -325,7 +325,7 @@ func (ir *intentResolver) processIntentsAsync(r *Replica, intents []intentsWithA
// not make it back to the client.
if err := ir.resolveIntents(ctxWithTimeout, r, item.intents,
true /* wait */, false /* !poison */); err != nil {
log.Warningc(ctxWithTimeout, "failed to resolve intents: %s", err)
log.Warningf("%s: failed to resolve intents: %s", r, err)
return
}

Expand Down
2 changes: 1 addition & 1 deletion storage/intent_resolver_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -38,7 +38,7 @@ func TestPushTransactionsWithNonPendingIntent(t *testing.T) {

intents := []roachpb.Intent{{Span: roachpb.Span{Key: roachpb.Key("a")}, Status: roachpb.ABORTED}}
if _, pErr := tc.store.intentResolver.maybePushTransactions(
tc.rng.context(context.Background()), intents, roachpb.Header{}, roachpb.PUSH_TOUCH, true); !testutils.IsPError(pErr, "unexpected aborted/resolved intent") {
context.Background(), intents, roachpb.Header{}, roachpb.PUSH_TOUCH, true); !testutils.IsPError(pErr, "unexpected aborted/resolved intent") {
t.Errorf("expected error on aborted/resolved intent, but got %s", pErr)
}
}
2 changes: 1 addition & 1 deletion storage/queue.go
Original file line number Diff line number Diff line change
Expand Up @@ -455,7 +455,7 @@ func (bq *baseQueue) processReplica(repl *Replica, clock *hlc.Clock) error {
}

sp := repl.store.Tracer().StartSpan(fmt.Sprintf("%s:%d", bq.name, repl.RangeID))
ctx := opentracing.ContextWithSpan(repl.context(context.Background()), sp)
ctx := opentracing.ContextWithSpan(context.Background(), sp)
defer sp.Finish()

// If the queue requires a replica to have the range lease in
Expand Down
22 changes: 7 additions & 15 deletions storage/replica.go
Original file line number Diff line number Diff line change
Expand Up @@ -457,14 +457,6 @@ func (r *Replica) setReplicaIDLocked(replicaID roachpb.ReplicaID) error {
return nil
}

// context returns a context with information about this range, derived from
// the supplied context (which is not allowed to be nil). It is only relevant
// when commands need to be executed on this range in the absence of a
// pre-existing context, such as during range scanner operations.
func (r *Replica) context(ctx context.Context) context.Context {
return r.store.context(ctx) // TODO(tschottdorf): see #1779
}

// GetMaxBytes atomically gets the range maximum byte limit.
func (r *Replica) GetMaxBytes() int64 {
r.mu.Lock()
Expand Down Expand Up @@ -1758,7 +1750,7 @@ func (r *Replica) refurbishPendingCmdLocked(cmd *pendingCmd) *roachpb.Error {
// updating only the applied index.
func (r *Replica) processRaftCommand(idKey storagebase.CmdIDKey, index uint64, raftCmd roachpb.RaftCommand) *roachpb.Error {
if index == 0 {
log.Fatalc(r.context(context.TODO()), "processRaftCommand requires a non-zero index")
log.Fatalf("%s: processRaftCommand requires a non-zero index", r)
}

var forcedErr *roachpb.Error
Expand Down Expand Up @@ -1874,7 +1866,7 @@ func (r *Replica) processRaftCommand(idKey storagebase.CmdIDKey, index uint64, r
r.mu.Unlock()
if ctx == nil {
// TODO(tschottdorf): consider the Trace situation here.
ctx = r.context(context.Background())
ctx = context.Background()
}

log.Trace(ctx, "applying batch")
Expand All @@ -1893,7 +1885,7 @@ func (r *Replica) processRaftCommand(idKey storagebase.CmdIDKey, index uint64, r
cmd.done <- roachpb.ResponseWithError{Reply: br, Err: err}
close(cmd.done)
} else if err != nil && log.V(1) {
log.Errorc(r.context(context.TODO()), "error executing raft command: %s", err)
log.Errorf("%s: error executing raft command: %s", r, err)
}

return err
Expand Down Expand Up @@ -2452,7 +2444,7 @@ func (r *Replica) maybeGossipFirstRange() *roachpb.Error {
return nil
}

ctx := r.context(context.TODO())
ctx := context.Background()

// When multiple nodes are initialized with overlapping Gossip addresses, they all
// will attempt to gossip their cluster ID. This is a fairly obvious misconfiguration,
Expand Down Expand Up @@ -2532,7 +2524,7 @@ func (r *Replica) maybeGossipSystemConfig() {
return
}

ctx := r.context(context.TODO())
ctx := context.Background()
// TODO(marc): check for bad split in the middle of the SystemConfig span.
kvs, hash, err := r.loadSystemConfigSpan()
if err != nil {
Expand Down Expand Up @@ -2584,7 +2576,7 @@ func newReplicaCorruptionError(errs ...error) *roachpb.ReplicaCorruptionError {
// range, store, node or cluster with corresponding actions taken.
func (r *Replica) maybeSetCorrupt(pErr *roachpb.Error) *roachpb.Error {
if cErr, ok := pErr.GetDetail().(*roachpb.ReplicaCorruptionError); ok {
log.Errorc(r.context(context.TODO()), "stalling replica due to: %s", cErr.ErrorMsg)
log.Errorf("%s: stalling replica due to: %s", r, cErr.ErrorMsg)
cErr.Processed = true
return roachpb.NewError(cErr)
}
Expand All @@ -2602,7 +2594,7 @@ func (r *Replica) loadSystemConfigSpan() ([]roachpb.KeyValue, []byte, error) {
ba.Timestamp = r.store.Clock().Now()
ba.Add(&roachpb.ScanRequest{Span: keys.SystemConfigSpan})
br, intents, pErr :=
r.executeBatch(r.context(context.TODO()), storagebase.CmdIDKey(""), r.store.Engine(), nil, ba)
r.executeBatch(context.Background(), storagebase.CmdIDKey(""), r.store.Engine(), nil, ba)
if pErr != nil {
return nil, nil, pErr.GoError()
}
Expand Down
6 changes: 3 additions & 3 deletions storage/replica_command.go
Original file line number Diff line number Diff line change
Expand Up @@ -774,7 +774,7 @@ func (r *Replica) runCommitTrigger(ctx context.Context, batch engine.Batch, ms *

if err := func() error {
if ct.GetSplitTrigger() != nil {
if err := r.splitTrigger(r.context(ctx), batch, ms, ct.SplitTrigger, txn.Timestamp); err != nil {
if err := r.splitTrigger(ctx, batch, ms, ct.SplitTrigger, txn.Timestamp); err != nil {
return err
}
*ms = enginepb.MVCCStats{} // clear stats, as split recomputed.
Expand Down Expand Up @@ -1677,7 +1677,7 @@ func (r *Replica) applyNewLeaseLocked(
func (r *Replica) CheckConsistency(
args roachpb.CheckConsistencyRequest, desc *roachpb.RangeDescriptor,
) (roachpb.CheckConsistencyResponse, *roachpb.Error) {
ctx := r.context(context.TODO())
ctx := context.TODO()
key := desc.StartKey.AsRawKey()
endKey := desc.EndKey.AsRawKey()
id := uuid.MakeV4()
Expand Down Expand Up @@ -2855,7 +2855,7 @@ func (r *Replica) changeReplicasTrigger(ctx context.Context, batch engine.Batch,
_ = r.store.Stopper().RunAsyncTask(func() {
// Create a new context because this is an asynchronous task and we
// don't want to share the trace.
ctx := r.context(context.Background())
ctx := context.Background()
if hasLease, pErr := r.getLeaseForGossip(ctx); hasLease {
r.mu.Lock()
defer r.mu.Unlock()
Expand Down
3 changes: 1 addition & 2 deletions storage/replica_range_lease.go
Original file line number Diff line number Diff line change
Expand Up @@ -129,8 +129,7 @@ func (p *pendingLeaseRequest) InitOrJoinRequest(
// checks from normal request machinery, (e.g. the command queue).
// Note that the command itself isn't traced, but usually the caller
// waiting for the result has an active Trace.
ch, _, err := replica.proposeRaftCommand(
replica.context(context.Background()), ba)
ch, _, err := replica.proposeRaftCommand(context.Background(), ba)
if err != nil {
execPErr = roachpb.NewError(err)
} else {
Expand Down
Loading

0 comments on commit 84396b3

Please sign in to comment.