Skip to content

Commit

Permalink
RFC: Update test (will squash if it's accepcted)
Browse files Browse the repository at this point in the history
Signed-off-by: Wei Fu <fuweid89@gmail.com>
  • Loading branch information
fuweid committed Jul 26, 2024
1 parent f2636ee commit bcbe3ca
Showing 1 changed file with 77 additions and 32 deletions.
109 changes: 77 additions & 32 deletions tests/e2e/hashkv_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -31,28 +31,24 @@ import (

func TestVerifyHashKVAfterCompact(t *testing.T) {
tests := []struct {
compactedOnRev int64
hashKVOnRev int64
compactedOnTombstoneRev bool
hashKVOnTombstoneRev bool
}{
{
compactedOnRev: 33, // tombstone
hashKVOnRev: 33, // tombstone
compactedOnTombstoneRev: true,
hashKVOnTombstoneRev: true,
},
{
compactedOnRev: 22, // tombstone
hashKVOnRev: 33, // tombstone
compactedOnTombstoneRev: false,
hashKVOnTombstoneRev: true,
},
{
compactedOnRev: 33, // tombstone
hashKVOnRev: 41, // non-tombstone
compactedOnTombstoneRev: true,
hashKVOnTombstoneRev: false,
},
{
compactedOnRev: 26, // non-tombstone
hashKVOnRev: 33, // tombstone
},
{
compactedOnRev: 32, // non-tombstone
hashKVOnRev: 41, // non-tombstone
compactedOnTombstoneRev: false,
hashKVOnTombstoneRev: false,
},
}

Expand Down Expand Up @@ -85,17 +81,32 @@ func TestVerifyHashKVAfterCompact(t *testing.T) {

clus, cliCfg := newClusterForHashKV(t, scenario.ClusterVersion)

newTestKeySetInCluster(t, clus, cliCfg, scenario.OnlyOneKey)
dataset := newTestDatasetInCluster(t, clus, cliCfg, scenario.OnlyOneKey)

// ensure we can pick up the right revision
require.True(t, len(dataset.tombstones) > 2)
require.True(t, dataset.tombstones[0]+3 < dataset.tombstones[1])
require.True(t, dataset.tombstones[1]+3 < dataset.tombstones[2])

compactedOnRev := dataset.tombstones[0]
if !tt.compactedOnTombstoneRev {
compactedOnRev += 3
}

hashKVOnRev := dataset.tombstones[1]
if !tt.hashKVOnTombstoneRev {
hashKVOnRev += 3
}

cli, err := fe2e.NewEtcdctl(cliCfg, clus.EndpointsGRPC())
require.NoError(t, err)

t.Logf("COMPACT rev=%d", tt.compactedOnRev)
_, err = cli.Compact(ctx, tt.compactedOnRev, config.CompactOption{Physical: true})
t.Logf("COMPACT on rev=%d", compactedOnRev)
_, err = cli.Compact(ctx, compactedOnRev, config.CompactOption{Physical: true})
require.NoError(t, err)

t.Logf("HashKV on rev=%d", tt.hashKVOnRev)
resp, err := cli.HashKV(ctx, tt.hashKVOnRev)
t.Logf("HashKV on rev=%d", hashKVOnRev)
resp, err := cli.HashKV(ctx, hashKVOnRev)
require.NoError(t, err)

require.Len(t, resp, 3)
Expand All @@ -109,6 +120,13 @@ func TestVerifyHashKVAfterCompact(t *testing.T) {
}
}

type datasetInfo struct {
keys map[string]struct{}

tombstones []int64
lastestRevision int64
}

// key: "foo"
// modified: 41
// generations:
Expand Down Expand Up @@ -137,34 +155,42 @@ func TestVerifyHashKVAfterCompact(t *testing.T) {
// generations:
//
// {34, 1}
func newTestKeySetInCluster(t *testing.T, clus *fe2e.EtcdProcessCluster, cliCfg fe2e.ClientConfig, onlyOneKey bool) {
func newTestDatasetInCluster(t *testing.T, clus *fe2e.EtcdProcessCluster, cliCfg fe2e.ClientConfig, onlyOneKey bool) *datasetInfo {
t.Helper()
c := newClient(t, clus.EndpointsGRPC(), cliCfg)
defer c.Close()

info := &datasetInfo{
keys: make(map[string]struct{}),
lastestRevision: 1,
}

ctx := context.Background()
key := "foo"
totalRev := 41

deleteOnRev := 11 // 22, 33
lastRevision := int64(1)
for i := 2; i <= totalRev; i++ {
require.Equal(t, int64(i-1), lastRevision)
require.Equal(t, int64(i-1), info.lastestRevision)

if i%deleteOnRev == 0 {
t.Logf("DELETEing key=%s", key)
resp, derr := c.Delete(ctx, key)
require.NoError(t, derr)
lastRevision = resp.Header.Revision
info.lastestRevision = resp.Header.Revision
info.tombstones = append(info.tombstones, resp.Header.Revision)
continue
}

info.keys[key] = struct{}{}

value := fmt.Sprintf("%d", i)
ops := []clientv3.Op{clientv3.OpPut(key, value)}

logMsg := fmt.Sprintf("PUTing key=%s", key)
if i%deleteOnRev == 1 && !onlyOneKey {
key2 := fmt.Sprintf("%s-%d", key, i/deleteOnRev)
info.keys[key2] = struct{}{}
ops = append(ops, clientv3.OpPut(key2, value))
logMsg = fmt.Sprintf("%s,key=%s", logMsg, key2)
}
Expand All @@ -174,31 +200,36 @@ func newTestKeySetInCluster(t *testing.T, clus *fe2e.EtcdProcessCluster, cliCfg
require.NoError(t, terr)
require.True(t, resp.Succeeded)
require.Len(t, resp.Responses, len(ops))
lastRevision = resp.Header.Revision
info.lastestRevision = resp.Header.Revision
}
return info
}

func TestVerifyHashKVAfterTwoCompactions_MixVersions(t *testing.T) {
clus, cliCfg := newClusterForHashKV(t, fe2e.QuorumLastVersion)

newTestKeySet2InCluster(t, clus, cliCfg)
dataset := newTestDataset2InCluster(t, clus, cliCfg)

require.True(t, len(dataset.tombstones) > 1)
require.True(t, dataset.tombstones[0]+3 < dataset.tombstones[1])
require.True(t, dataset.tombstones[0]+3 < dataset.lastestRevision)

cli, err := fe2e.NewEtcdctl(cliCfg, clus.EndpointsGRPC())
require.NoError(t, err)

ctx := context.Background()

firstCompactOnRev := int64(6)
firstCompactOnRev := dataset.tombstones[0]
t.Logf("COMPACT rev=%d", firstCompactOnRev)
_, err = cli.Compact(ctx, firstCompactOnRev, config.CompactOption{Physical: true})
require.NoError(t, err)

secondCompactOnRev := int64(10)
secondCompactOnRev := firstCompactOnRev + 3
t.Logf("COMPACT rev=%d", secondCompactOnRev)
_, err = cli.Compact(ctx, secondCompactOnRev, config.CompactOption{Physical: true})
require.NoError(t, err)

for hashKVOnRev := int64(10); hashKVOnRev <= 14; hashKVOnRev++ {
for hashKVOnRev := secondCompactOnRev; hashKVOnRev <= dataset.lastestRevision; hashKVOnRev++ {
t.Logf("HashKV on rev=%d", hashKVOnRev)
resp, err := cli.HashKV(ctx, hashKVOnRev)
require.NoError(t, err)
Expand Down Expand Up @@ -239,13 +270,22 @@ func TestVerifyHashKVAfterTwoCompactions_MixVersions(t *testing.T) {
//
// {empty}
// {8, 0}, {9, 0}, {12, 0}[tombstone]
func newTestKeySet2InCluster(t *testing.T, clus *fe2e.EtcdProcessCluster, cliCfg fe2e.ClientConfig) {
func newTestDataset2InCluster(t *testing.T, clus *fe2e.EtcdProcessCluster, cliCfg fe2e.ClientConfig) *datasetInfo {
t.Helper()
c := newClient(t, clus.EndpointsGRPC(), cliCfg)
defer c.Close()

ctx := context.Background()

info := &datasetInfo{
keys: map[string]struct{}{
"foo": {},
"foo1": {},
"foo2": {},
"foo3": {},
},
}

_, err := c.Put(ctx, "foo", "2")
require.NoError(t, err)

Expand All @@ -258,11 +298,12 @@ func newTestKeySet2InCluster(t *testing.T, clus *fe2e.EtcdProcessCluster, cliCfg
_, err = c.Put(ctx, "foo1", "5")
require.NoError(t, err)

_, err = c.Txn(ctx).Then(
tresp, err := c.Txn(ctx).Then(
clientv3.OpDelete("foo"),
clientv3.OpDelete("foo1"),
).Commit()
require.NoError(t, err)
info.tombstones = append(info.tombstones, tresp.Header.Revision)

_, err = c.Put(ctx, "foo2", "7")
require.NoError(t, err)
Expand All @@ -276,18 +317,22 @@ func newTestKeySet2InCluster(t *testing.T, clus *fe2e.EtcdProcessCluster, cliCfg
_, err = c.Put(ctx, "foo2", "10")
require.NoError(t, err)

_, err = c.Delete(ctx, "foo2")
dresp, err := c.Delete(ctx, "foo2")
require.NoError(t, err)
info.tombstones = append(info.tombstones, dresp.Header.Revision)

_, err = c.Delete(ctx, "foo3")
dresp, err = c.Delete(ctx, "foo3")
require.NoError(t, err)
info.tombstones = append(info.tombstones, dresp.Header.Revision)

_, err = c.Put(ctx, "foo", "13")
require.NoError(t, err)

resp, err := c.Put(ctx, "foo", "14")
require.NoError(t, err)
require.Equal(t, int64(14), resp.Header.Revision)
info.lastestRevision = resp.Header.Revision
return info
}

func newClusterForHashKV(t *testing.T, clusVersion fe2e.ClusterVersion) (*fe2e.EtcdProcessCluster, fe2e.ClientConfig) {
Expand Down

0 comments on commit bcbe3ca

Please sign in to comment.