diff --git a/README.md b/README.md index a10a5c6..568b79c 100644 --- a/README.md +++ b/README.md @@ -1,8 +1,104 @@ -##### Version 2.0.1 -> proxy 访问store加上限流日志 +bfs +============== +`bfs` 是基于facebook haystack 用golang实现的小文件存储系统。 -##### Version 2.0.0 -> 1.bfs 缓存限制在1M以内 +--------------------------------------- + * [特性](#特性) + * [安装](#安装) + * [集群](#集群) + * [API](#API) + * [更多](#更多) -# bfs -distributed file system(small file storage) writen by golang according to facebook haystack. +--------------------------------------- + +## 特性 + * 高吞吐量和低延迟 + * 容错性 + * 高效 + * 维护简单 + +## 安装 + +### 一、安装hbase、zookeeper + + * 参考hbase官网. 安装、启动请查看[这里](https://hbase.apache.org/). + * 参考zookeeper官网. 安装、启动请查看[这里](http://zookeeper.apache.org/). + +### 二、搭建golang、python环境 + + * 参考golang官网. 安装请查看[这里](https://golang.org/doc/install). + * 参考python官网. 安装请查看[这里] +(https://www.python.org/) + +### 三、安装gosnowflake + + * 参考[这里](https://github.com/Terry-Mao/gosnowflake) + +### 四、部署 +1.下载bfs及依赖包 +```sh +$ go get -u github.com/Terry-Mao/bfs +$ cd /data/apps/go/src/github.com/Terry-Mao/bfs +$ go get ./... +``` + +2.安装directory、store、pitchfork、proxy模块(配置文件请依据实际机器环境配置) +```sh +$ cd $GOPATH/src/github.com/Terry-Mao/bfs/directory +$ go install +$ cp directory.toml $GOPATH/bin/directory.toml +$ cd ../store/ +$ go install +$ cp store.toml $GOPATH/bin/store.toml +$ cd ../pitchfork/ +$ go install +$ cp pitchfork.toml $GOPATH/bin/pitchfork.toml +$ cd ../proxy +$ go install +$ cp proxy.toml $GOPATH/bin/proxy.toml + +``` +到此所有的环境都搭建完成! + +### 五、启动 +```sh +$ cd /$GOPATH/bin +$ nohup $GOPATH/bin/directory -c $GOPATH/bin/directory.toml & +$ nohup $GOPATH/bin/store -c $GOPATH/bin/store.toml & +$ nohup $GOPATH/bin/pitchfork -c $GOPATH/bin/pitchfork.toml & +$ nohup $GOPATH/bin/proxy -c $GOPATH/bin/proxy.toml & +$ cd $GOPATH/github.com/Terry-Mao/bfs/ops +$ nohup python runserver.py & +``` + +### 六、测试 + * bfs初始化,分配存储空间,请查看[这里](https://github.com/Terry-Mao/bfs/doc/ops.md) + * 请求bfs,请查看[这里](https://github.com/Terry-Mao/bfs/doc/proxy.md) + +## 集群 + +![Aaron Swartz](http://i0.hdslb.com/bfs/active/bfs_server.png) + +### directory + + * directory主要负责请求的均匀调度和元数据管理,元数据存放在hbase,由gosnowflake产生文件key + +### store + + * store主要负责文件的物理存储 + +### pitchfork + + * pitchfork负责监控store的服务状态、可用性和磁盘状态 + +### proxy + + * proxy作为bfs存储的代理以及维护bucket相关 + +### ops + + * ops作为bfs的后台管理界面,负责分配存储、扩容、压缩等维护工作 + +## API +[api文档](https://github.com/Terry-Mao/bfs/blob/master/doc/api.md) +## 更多 \ No newline at end of file diff --git a/directory/conf/config.go b/directory/conf/config.go index c3031c3..665eef1 100644 --- a/directory/conf/config.go +++ b/directory/conf/config.go @@ -1,10 +1,13 @@ package conf import ( - "github.com/BurntSushi/toml" "io/ioutil" "os" "time" + + xtime "bfs/libs/time" + + "github.com/BurntSushi/toml" ) type Config struct { @@ -34,12 +37,24 @@ type Zookeeper struct { GroupRoot string } +// HBase config. type HBase struct { - Addr string - MaxActive int - MaxIdle int - Timeout duration - LvsTimeout duration + ZookeeperHbase *ZookeeperHbase + // default "" means use default hbase zk path. It should correspond to server config + Master string + Meta string + TestRowKey string + DialTimeout xtime.Duration // 0 means no dial timeout + ReadTimeout xtime.Duration + ReadsTimeout xtime.Duration + WriteTimeout xtime.Duration + WritesTimeout xtime.Duration +} + +type ZookeeperHbase struct { + Root string + Addrs []string + Timeout xtime.Duration } // Code to implement the TextUnmarshaler interface for `duration`: diff --git a/directory/directory.go b/directory/directory.go index 48de174..dbcdf5e 100644 --- a/directory/directory.go +++ b/directory/directory.go @@ -19,7 +19,7 @@ const ( retrySleep = time.Second * 1 ) -// Directory +// Directory . // id means store serverid; vid means volume id; gid means group id type Directory struct { // STORE @@ -34,15 +34,15 @@ type Directory struct { volume map[int32]*meta.VolumeState // volume_id:volume_state volumeStore map[int32][]string // volume_id:store_server_id - genkey *snowflake.Genkey // snowflake client for gen key - hBase *hbase.HBaseClient // hBase client - dispatcher *Dispatcher // dispatch for write or read reqs + genkey *snowflake.Genkey // snowflake client for gen key + hBase *hbase.Client // hBase client + dispatcher *Dispatcher // dispatch for write or read reqs config *conf.Config zk *myzk.Zookeeper } -// NewDirectory +// NewDirectory . func NewDirectory(config *conf.Config) (d *Directory, err error) { d = &Directory{} d.config = config @@ -52,10 +52,8 @@ func NewDirectory(config *conf.Config) (d *Directory, err error) { if d.genkey, err = snowflake.NewGenkey(config.Snowflake.ZkAddrs, config.Snowflake.ZkPath, config.Snowflake.ZkTimeout.Duration, config.Snowflake.WorkId); err != nil { return } - if err = hbase.Init(config); err != nil { - return - } - d.hBase = hbase.NewHBaseClient() + + d.hBase = hbase.NewClient(config.HBase) d.dispatcher = NewDispatcher() go d.SyncZookeeper() return @@ -259,7 +257,7 @@ func (d *Directory) GetStores(bucket, filename string) (n *meta.Needle, f *meta. stores = make([]string, 0, len(svrs)) for _, store = range svrs { if storeMeta, ok = d.store[store]; !ok { - log.Errorf("store cannot match store:", store) + log.Errorf("store cannot match store:%s", store) continue } if !storeMeta.CanRead() { @@ -283,8 +281,8 @@ func (d *Directory) UploadStores(bucket string, f *meta.File) (n *meta.Needle, s storeMeta *meta.Store ok bool ) - if vid, err = d.dispatcher.VolumeId(d.group, d.storeVolume); err != nil { - log.Errorf("dispatcher.VolumeId error(%v)", err) + if vid, err = d.dispatcher.VolumeID(d.group, d.storeVolume); err != nil { + log.Errorf("dispatcher.VolumeID error(%v)", err) err = errors.ErrStoreNotAvailable return } diff --git a/directory/directory.toml b/directory/directory.toml index ef84909..ba16f21 100644 --- a/directory/directory.toml +++ b/directory/directory.toml @@ -23,7 +23,7 @@ ZkPath = "/gosnowflake-servers" ZkTimeout = "15s" # workid -WorkId = 0 +WorkId = 1 [zookeeper] # zookeeper cluster addrs, multiple addrs split by ",". @@ -47,14 +47,14 @@ GroupRoot = "/group" PullInterval = "10s" [hbase] -# addr 172.16.13.90:9090 -Addr = "172.16.13.90:9090" - -# Note that you must specify a number here. -MaxActive = 100 - -# Note that you must specify a number here. -MaxIdle = 100 - -# Note that you must specify a number here. -Timeout = "1s" +master = "" +meta = "" +dialTimeout = "1s" +readTimeout = "10s" +readsTimeout = "10s" +writeTimeout = "10s" +writesTimeout = "10s" + [hbase.zookeeperHbase] + root = "" + addrs = ["localhost:2181"] + timeout = "30s" \ No newline at end of file diff --git a/directory/directory_test.go b/directory/directory_test.go index fad8f7f..26aa98b 100644 --- a/directory/directory_test.go +++ b/directory/directory_test.go @@ -2,26 +2,30 @@ package main import ( "testing" - "time" + // "time" + + "bfs/directory/conf" + dzk "bfs/directory/zk" ) func TestDirectory(t *testing.T) { var ( err error - config *Config - zk *Zookeeper + config *conf.Config + zk *dzk.Zookeeper d *Directory ) - if config, err = NewConfig("./directory.conf"); err != nil { + if config, err = conf.NewConfig("./directory.toml"); err != nil { t.Errorf("NewConfig() error(%v)", err) t.FailNow() } - if zk, err = NewZookeeper([]string{"localhost:2181"}, time.Second*1, "/rack", "/volume", "/group"); err != nil { + if zk, err = dzk.NewZookeeper(config); err != nil { t.Errorf("NewZookeeper() error(%v)", err) t.FailNow() } - if d, err = NewDirectory(config, zk); err != nil { + defer zk.Close() + if d, err = NewDirectory(config); err != nil { t.Errorf("NewDirectory() error(%v)", err) t.FailNow() } @@ -29,7 +33,7 @@ func TestDirectory(t *testing.T) { t.Errorf("syncStores() error(%v)", err) t.FailNow() } - if _, err = d.syncGroups(); err != nil { + if err = d.syncGroups(); err != nil { t.Errorf("syncGroups() error(%v)", err) t.FailNow() } diff --git a/directory/dispatcher.go b/directory/dispatcher.go index 8937019..412db90 100644 --- a/directory/dispatcher.go +++ b/directory/dispatcher.go @@ -3,18 +3,20 @@ package main import ( "bfs/libs/errors" "bfs/libs/meta" - log "github.com/golang/glog" "math/rand" "sync" "time" + + log "github.com/golang/glog" ) -// Dispatcher +// Dispatcher , // get raw data and processed into memory for http reqs type Dispatcher struct { - gids []int // for write eg: gid:1;2 gids: [1,1,2,2,2,2,2] - rand *rand.Rand - rlock sync.Mutex + gids []int // for write eg: gid:1;2 gids: [1,1,2,2,2,2,2] + wrtVids map[string]int32 // choose most suitable written volume, always order by rest space. + rand *rand.Rand + rlock sync.Mutex } const ( @@ -23,9 +25,10 @@ const ( spaceBenchmark = meta.MaxBlockOffset // 1 volume addDelayBenchmark = 100 // 100ms <100ms means no load, -Score==0 baseAddDelay = 100 // 1s score: -(1000/baseAddDelay)*addDelayBenchmark == -1000 + minFreeSpace = 10 * 1024 * 1024 // 10M * PaddingSize every volume must have 80M left. PaddingSize:8 ) -// NewDispatcher +// NewDispatcher . func NewDispatcher() (d *Dispatcher) { d = new(Dispatcher) d.rand = rand.New(rand.NewSource(time.Now().UnixNano())) @@ -41,6 +44,7 @@ func (d *Dispatcher) Update(group map[int][]string, i int vid int32 gids []int + wrtVids map[string]int32 sid string stores []string restSpace, minScore, score int @@ -50,6 +54,7 @@ func (d *Dispatcher) Update(group map[int][]string, volumeState *meta.VolumeState ) gids = []int{} + wrtVids = map[string]int32{} for gid, stores = range group { write = true // check all stores can writeable by the group. @@ -83,6 +88,12 @@ func (d *Dispatcher) Update(group map[int][]string, totalAdd = totalAdd + volumeState.TotalWriteProcessed restSpace = restSpace + int(volumeState.FreeSpace) totalAddDelay = totalAddDelay + volumeState.TotalWriteDelay + // cacl most suitable written vid + if volumeState.FreeSpace > minFreeSpace { + if value, ok := wrtVids[sid]; !ok || vid < value { + wrtVids[sid] = vid + } + } } score = d.calScore(int(totalAdd), int(totalAddDelay), restSpace) if score < minScore || minScore == 0 { @@ -94,6 +105,7 @@ func (d *Dispatcher) Update(group map[int][]string, } } d.gids = gids + d.wrtVids = wrtVids return } @@ -116,13 +128,11 @@ func (d *Dispatcher) calScore(totalAdd, totalAddDelay, restSpace int) (score int return } -// VolumeId get a volume id. -func (d *Dispatcher) VolumeId(group map[int][]string, storeVolume map[string][]int32) (vid int32, err error) { +// VolumeID get a volume id. +func (d *Dispatcher) VolumeID(group map[int][]string, storeVolume map[string][]int32) (vid int32, err error) { var ( - sid string stores []string gid int - vids []int32 ) if len(d.gids) == 0 { err = errors.ErrStoreNotAvailable @@ -136,8 +146,6 @@ func (d *Dispatcher) VolumeId(group map[int][]string, storeVolume map[string][]i err = errors.ErrZookeeperDataError return } - sid = stores[0] - vids = storeVolume[sid] - vid = vids[d.rand.Intn(len(vids))] + vid = d.wrtVids[stores[0]] return } diff --git a/directory/dispatcher_test.go b/directory/dispatcher_test.go index debe56e..722a885 100644 --- a/directory/dispatcher_test.go +++ b/directory/dispatcher_test.go @@ -1,6 +1,8 @@ package main import ( + "bfs/directory/conf" + dzk "bfs/directory/zk" "testing" "time" // "fmt" @@ -9,27 +11,35 @@ import ( func TestDispatcher(t *testing.T) { var ( err error - config *Config - zk *Zookeeper + config *conf.Config + zk *dzk.Zookeeper d *Directory ds *Dispatcher + vid int32 ) - if config, err = NewConfig("./directory.conf"); err != nil { + if config, err = conf.NewConfig("./directory.toml"); err != nil { t.Errorf("NewConfig() error(%v)", err) return } - if zk, err = NewZookeeper([]string{"localhost:2181"}, time.Second*1, "/rack", "/volume", "/group"); err != nil { + if zk, err = dzk.NewZookeeper(config); err != nil { t.Errorf("NewZookeeper() error(%v)", err) t.FailNow() } - if d, err = NewDirectory(config, zk); err != nil { + defer zk.Close() + if d, err = NewDirectory(config); err != nil { t.Errorf("NewDirectory() error(%v)", err) t.FailNow() } - ds = NewDispatcher(d) - if err = ds.Update(); err != nil { + time.Sleep(5 * time.Second) + ds = NewDispatcher() + if err = ds.Update(d.group, d.store, d.volume, d.storeVolume); err != nil { t.Errorf("Update() error(%v)", err) t.FailNow() } + if vid, err = ds.VolumeID(d.group, d.storeVolume); err != nil { + t.Errorf("Update() error(%v)", err) + t.FailNow() + } + t.Logf("vid:%v", vid) } diff --git a/directory/hbase/file.go b/directory/hbase/file.go new file mode 100644 index 0000000..29c27c7 --- /dev/null +++ b/directory/hbase/file.go @@ -0,0 +1,160 @@ +package hbase + +import ( + "bfs/libs/errors" + "bfs/libs/gohbase/hrpc" + "bfs/libs/meta" + "bytes" + "context" + "encoding/binary" + "time" + + log "github.com/golang/glog" +) + +var ( + _familyFile = "bfsfile" // file info column family + _columnKey = "key" + _columnSha1 = "sha1" + _columnMine = "mine" + _columnStatus = "status" +) + +func (c *Client) getFile(bucket, filename string) (f *meta.File, err error) { + g, err := hrpc.NewGet(context.Background(), c.tableName(bucket), []byte(filename)) + if err != nil { + log.Errorf("Client.getFile.NewGet(%v,%v) error:%v", bucket, filename, err.Error()) + return + } + result, err := c.c.Get(g) + if err != nil { + log.Errorf("Client.getFile.Get(%v,%v) error:%v", bucket, filename, err.Error()) + return + } + if result == nil || len(result.Cells) == 0 { + err = errors.ErrNeedleNotExist + return + } + f = &meta.File{ + Filename: filename, + } + for _, cell := range result.Cells { + if cell == nil { + continue + } + if bytes.Equal(cell.Family, []byte(_familyFile)) { + if bytes.Equal(cell.Qualifier, []byte(_columnKey)) { + f.Key = int64(binary.BigEndian.Uint64(cell.Value)) + } else if bytes.Equal(cell.Qualifier, []byte(_columnSha1)) { + f.Sha1 = string(cell.Value) + } else if bytes.Equal(cell.Qualifier, []byte(_columnMine)) { + f.Mine = string(cell.Value) + } else if bytes.Equal(cell.Qualifier, []byte(_columnStatus)) { + f.Status = int32(binary.BigEndian.Uint32(cell.Value)) + } else if bytes.Equal(cell.Qualifier, []byte(_columnUpdateTime)) { + f.MTime = int64(binary.BigEndian.Uint64(cell.Value)) + } + } + } + return +} + +func (c *Client) existFile(bucket, filename string) (exist bool, err error) { + g, err := hrpc.NewGet(context.Background(), c.tableName(bucket), []byte(filename)) + if err != nil { + log.Errorf("Client.existFile.NewGet(%v,%v) error:%v", bucket, filename, err.Error()) + return + } + result, err := c.c.Get(g) + if err != nil { + log.Errorf("Client.existFile.Get(%v,%v) error:%v", bucket, filename, err.Error()) + return + } + if result == nil || len(result.Cells) == 0 { + return + } + exist = true + return +} + +func (c *Client) putFile(bucket string, f *meta.File) (err error) { + var ( + kbuf = make([]byte, 8) + stbuf = make([]byte, 4) + ubuf = make([]byte, 8) + mutate *hrpc.Mutate + exist bool + ) + exist, err = c.existFile(bucket, f.Filename) + if err != nil { + log.Errorf("Client.putFile.existFile(%v,%v) error:%v", bucket, f.Filename, err.Error()) + return + } + if exist { + if err = c.updateFile(bucket, f.Filename, f.Sha1); err != nil { + return + } + err = errors.ErrNeedleExist + return + } + binary.BigEndian.PutUint64(kbuf, uint64(f.Key)) + binary.BigEndian.PutUint32(stbuf, uint32(f.Status)) + binary.BigEndian.PutUint64(ubuf, uint64(f.MTime)) + values := map[string]map[string][]byte{ + _familyFile: map[string][]byte{ + _columnKey: kbuf, + _columnSha1: []byte(f.Sha1), + _columnMine: []byte(f.Mine), + _columnStatus: stbuf, + _columnUpdateTime: ubuf, + }, + } + if mutate, err = hrpc.NewPut(context.Background(), c.tableName(bucket), []byte(f.Filename), values); err != nil { + log.Errorf("Client.putFile.NewPut(%v,%v) error:%v", bucket, f.Filename, err.Error()) + return + } + if _, err = c.c.Put(mutate); err != nil { + log.Errorf("Client.putFile.Put(%v,%v) error:%v", bucket, f.Filename, err.Error()) + } + return +} + +func (c *Client) delFile(bucket, filename string) (err error) { + var ( + mutate *hrpc.Mutate + ) + if mutate, err = hrpc.NewDel(context.Background(), c.tableName(bucket), []byte(filename), nil); err != nil { + log.Errorf("Client.delFile.NewDel(%v,%v) error:%v", bucket, filename, err.Error()) + return + } + if _, err = c.c.Delete(mutate); err != nil { + log.Errorf("Client.delFile.Delete(%v,%v) error:%v", bucket, filename, err.Error()) + } + return +} + +func (c *Client) updateFile(bucket, filename, sha1 string) (err error) { + var ( + ubuf = make([]byte, 8) + mutate *hrpc.Mutate + ) + binary.BigEndian.PutUint64(ubuf, uint64(time.Now().UnixNano())) + values := map[string]map[string][]byte{ + _familyFile: map[string][]byte{ + _columnSha1: []byte(sha1), + _columnUpdateTime: ubuf, + }, + } + if mutate, err = hrpc.NewPut(context.Background(), c.tableName(bucket), []byte(filename), values); err != nil { + log.Errorf("Client.updateFile.NewPut(%v,%v) error:%v", bucket, filename, err.Error()) + return + } + if _, err = c.c.Put(mutate); err != nil { + log.Errorf("Client.updateFile.Put(%v,%v) error:%v", bucket, filename, err.Error()) + } + return +} + +func (c *Client) tableName(bucket string) []byte { + return []byte(_prefix + bucket) +} diff --git a/directory/hbase/file_test.go b/directory/hbase/file_test.go new file mode 100644 index 0000000..b00766b --- /dev/null +++ b/directory/hbase/file_test.go @@ -0,0 +1,48 @@ +package hbase + +import ( + "bfs/libs/errors" + "bfs/libs/meta" + "testing" + "time" +) + +func TestGetFile(t *testing.T) { + c := getClient() + f, err := c.getFile("test", "guhaotest111.jpg") + if err != nil && err != errors.ErrNeedleNotExist { + t.Fatalf("err:%v", err.Error()) + } + t.Logf("f: %v", f) +} + +func TestPutFile(t *testing.T) { + c := getClient() + err := c.putFile("test", &meta.File{ + Filename: "guhaotest111.jpg", + Key: 1234567, + Sha1: "12312312312312312", + Mine: "image/jpg", + Status: 123, + MTime: time.Now().Unix(), + }) + if err != nil { + t.Fatalf("err:%v", err.Error()) + } +} + +func TestDelFile(t *testing.T) { + c := getClient() + if err := c.delFile("test", "guhaotest111.jpg"); err != nil { + t.Fatalf("err:%v", err.Error()) + } +} + +func TestExistFile(t *testing.T) { + c := getClient() + exist, err := c.existFile("test", "guhaotest111.jpg") + if err != errors.ErrNeedleExist { + t.Fatalf("err:%v", err.Error()) + } + t.Logf("pass:err:%v", exist) +} diff --git a/directory/hbase/hbase.go b/directory/hbase/hbase.go deleted file mode 100644 index 8999a36..0000000 --- a/directory/hbase/hbase.go +++ /dev/null @@ -1,368 +0,0 @@ -package hbase - -import ( - "bfs/directory/hbase/hbasethrift" - "bfs/libs/errors" - "bfs/libs/meta" - "bytes" - "crypto/sha1" - "encoding/binary" - "time" - - log "github.com/golang/glog" -) - -const ( - _prefix = "bucket_" -) - -var ( - _table = []byte("bfsmeta") // default bucket - - _familyBasic = []byte("basic") // basic store info column family - _columnVid = []byte("vid") - _columnCookie = []byte("cookie") - _columnUpdateTime = []byte("update_time") - - _familyFile = []byte("bfsfile") // file info column family - _columnKey = []byte("key") - _columnSha1 = []byte("sha1") - _columnMine = []byte("mine") - _columnStatus = []byte("status") - // _columnUpdateTime = []byte("update_time") -) - -type HBaseClient struct { -} - -// NewHBaseClient -func NewHBaseClient() *HBaseClient { - return &HBaseClient{} -} - -// Get get needle from hbase -func (h *HBaseClient) Get(bucket, filename string) (n *meta.Needle, f *meta.File, err error) { - if f, err = h.getFile(bucket, filename); err != nil { - return - } - if n, err = h.getNeedle(f.Key); err == errors.ErrNeedleNotExist { - log.Warningf("table not match: bucket: %s filename: %s", bucket, filename) - h.delFile(bucket, filename) - } - return -} - -// Put put file and needle into hbase -func (h *HBaseClient) Put(bucket string, f *meta.File, n *meta.Needle) (err error) { - if err = h.putFile(bucket, f); err != nil { - return - } - if err = h.putNeedle(n); err != errors.ErrNeedleExist && err != nil { - log.Warningf("table not match: bucket: %s filename: %s", bucket, f.Filename) - h.delFile(bucket, f.Filename) - } - return -} - -// Del del file and needle from hbase -func (h *HBaseClient) Del(bucket, filename string) (err error) { - var ( - f *meta.File - ) - if f, err = h.getFile(bucket, filename); err != nil { - return - } - if err = h.delFile(bucket, filename); err != nil { - return - } - err = h.delNeedle(f.Key) - return -} - -// getNeedle get meta data from hbase.bfsmeta -func (h *HBaseClient) getNeedle(key int64) (n *meta.Needle, err error) { - var ( - ks []byte - c *hbasethrift.THBaseServiceClient - r *hbasethrift.TResult_ - cv *hbasethrift.TColumnValue - ) - if c, err = hbasePool.Get(); err != nil { - log.Errorf("hbasePool.Get() error(%v)", err) - return - } - ks = h.key(key) - if r, err = c.Get(_table, &hbasethrift.TGet{Row: ks}); err != nil { - hbasePool.Put(c, true) - return - } - hbasePool.Put(c, false) - if len(r.ColumnValues) == 0 { - err = errors.ErrNeedleNotExist - return - } - n = new(meta.Needle) - n.Key = key - for _, cv = range r.ColumnValues { - if cv == nil { - continue - } - if bytes.Equal(cv.Family, _familyBasic) { - if bytes.Equal(cv.Qualifier, _columnVid) { - n.Vid = int32(binary.BigEndian.Uint32(cv.Value)) - } else if bytes.Equal(cv.Qualifier, _columnCookie) { - n.Cookie = int32(binary.BigEndian.Uint32(cv.Value)) - } else if bytes.Equal(cv.Qualifier, _columnUpdateTime) { - n.MTime = int64(binary.BigEndian.Uint64(cv.Value)) - } - } - } - return -} - -// putNeedle overwriting is bug, banned -func (h *HBaseClient) putNeedle(n *meta.Needle) (err error) { - var ( - ks []byte - vbuf = make([]byte, 4) - cbuf = make([]byte, 4) - ubuf = make([]byte, 8) - exist bool - c *hbasethrift.THBaseServiceClient - ) - if c, err = hbasePool.Get(); err != nil { - log.Errorf("hbasePool.Get() error(%v)", err) - return - } - ks = h.key(n.Key) - if exist, err = c.Exists(_table, &hbasethrift.TGet{Row: ks}); err != nil { - hbasePool.Put(c, true) - return - } - if exist { - hbasePool.Put(c, false) - return errors.ErrNeedleExist - } - binary.BigEndian.PutUint32(vbuf, uint32(n.Vid)) - binary.BigEndian.PutUint32(cbuf, uint32(n.Cookie)) - binary.BigEndian.PutUint64(ubuf, uint64(n.MTime)) - if err = c.Put(_table, &hbasethrift.TPut{ - Row: ks, - ColumnValues: []*hbasethrift.TColumnValue{ - &hbasethrift.TColumnValue{ - Family: _familyBasic, - Qualifier: _columnVid, - Value: vbuf, - }, - &hbasethrift.TColumnValue{ - Family: _familyBasic, - Qualifier: _columnCookie, - Value: cbuf, - }, - &hbasethrift.TColumnValue{ - Family: _familyBasic, - Qualifier: _columnUpdateTime, - Value: ubuf, - }, - }, - }); err != nil { - hbasePool.Put(c, true) - return - } - hbasePool.Put(c, false) - return -} - -// delNeedle delete the hbase.bfsmeta colume vid and cookie by the key. -func (h *HBaseClient) delNeedle(key int64) (err error) { - var ( - ks []byte - c *hbasethrift.THBaseServiceClient - ) - if c, err = hbasePool.Get(); err != nil { - log.Errorf("hbasePool.Get() error(%v)", err) - return - } - ks = h.key(key) - if err = c.DeleteSingle(_table, &hbasethrift.TDelete{ - Row: ks, - }); err != nil { - hbasePool.Put(c, true) - return - } - hbasePool.Put(c, false) - return -} - -// getFile get file data from hbase.bucket_xxx. -func (h *HBaseClient) getFile(bucket, filename string) (f *meta.File, err error) { - var ( - ks []byte - c *hbasethrift.THBaseServiceClient - r *hbasethrift.TResult_ - cv *hbasethrift.TColumnValue - ) - if c, err = hbasePool.Get(); err != nil { - log.Errorf("hbasePool.Get() error(%v)", err) - return - } - ks = []byte(filename) - if r, err = c.Get(h.tableName(bucket), &hbasethrift.TGet{Row: ks}); err != nil { - hbasePool.Put(c, true) - return - } - hbasePool.Put(c, false) - if len(r.ColumnValues) == 0 { - err = errors.ErrNeedleNotExist - return - } - f = new(meta.File) - f.Filename = filename - for _, cv = range r.ColumnValues { - if cv == nil { - continue - } - if bytes.Equal(cv.Family, _familyFile) { - if bytes.Equal(cv.Qualifier, _columnKey) { - f.Key = int64(binary.BigEndian.Uint64(cv.Value)) - } else if bytes.Equal(cv.Qualifier, _columnSha1) { - f.Sha1 = string(cv.GetValue()) - } else if bytes.Equal(cv.Qualifier, _columnMine) { - f.Mine = string(cv.GetValue()) - } else if bytes.Equal(cv.Qualifier, _columnStatus) { - f.Status = int32(binary.BigEndian.Uint32(cv.Value)) - } else if bytes.Equal(cv.Qualifier, _columnUpdateTime) { - f.MTime = int64(binary.BigEndian.Uint64(cv.Value)) - } - } - } - return -} - -// putFile overwriting is bug, banned -func (h *HBaseClient) putFile(bucket string, f *meta.File) (err error) { - var ( - ks []byte - kbuf = make([]byte, 8) - stbuf = make([]byte, 4) - ubuf = make([]byte, 8) - exist bool - c *hbasethrift.THBaseServiceClient - ) - if c, err = hbasePool.Get(); err != nil { - log.Errorf("hbasePool.Get() error(%v)", err) - return - } - ks = []byte(f.Filename) - if exist, err = c.Exists(h.tableName(bucket), &hbasethrift.TGet{Row: ks}); err != nil { - hbasePool.Put(c, true) - return - } - if exist { - err = h.updateFile(c, bucket, f.Filename, f.Sha1) - hbasePool.Put(c, err != nil) - return errors.ErrNeedleExist - } - binary.BigEndian.PutUint64(kbuf, uint64(f.Key)) - binary.BigEndian.PutUint32(stbuf, uint32(f.Status)) - binary.BigEndian.PutUint64(ubuf, uint64(f.MTime)) - if err = c.Put(h.tableName(bucket), &hbasethrift.TPut{ - Row: ks, - ColumnValues: []*hbasethrift.TColumnValue{ - &hbasethrift.TColumnValue{ - Family: _familyFile, - Qualifier: _columnKey, - Value: kbuf, - }, - &hbasethrift.TColumnValue{ - Family: _familyFile, - Qualifier: _columnSha1, - Value: []byte(f.Sha1), - }, - &hbasethrift.TColumnValue{ - Family: _familyFile, - Qualifier: _columnMine, - Value: []byte(f.Mine), - }, - &hbasethrift.TColumnValue{ - Family: _familyFile, - Qualifier: _columnStatus, - Value: stbuf, - }, - &hbasethrift.TColumnValue{ - Family: _familyFile, - Qualifier: _columnUpdateTime, - Value: ubuf, - }, - }, - }); err != nil { - hbasePool.Put(c, true) - return - } - hbasePool.Put(c, false) - return -} - -// updateFile overwriting is bug, banned -func (h *HBaseClient) updateFile(c *hbasethrift.THBaseServiceClient, bucket, filename, sha1 string) (err error) { - var ( - ks []byte - ubuf = make([]byte, 8) - ) - ks = []byte(filename) - binary.BigEndian.PutUint64(ubuf, uint64(time.Now().UnixNano())) - err = c.Put(h.tableName(bucket), &hbasethrift.TPut{ - Row: ks, - ColumnValues: []*hbasethrift.TColumnValue{ - &hbasethrift.TColumnValue{ - Family: _familyFile, - Qualifier: _columnSha1, - Value: []byte(sha1), - }, - &hbasethrift.TColumnValue{ - Family: _familyFile, - Qualifier: _columnUpdateTime, - Value: ubuf, - }, - }, - }) - return -} - -// delFile delete file from hbase.bucket_xxx. -func (h *HBaseClient) delFile(bucket, filename string) (err error) { - var ( - ks []byte - c *hbasethrift.THBaseServiceClient - ) - if c, err = hbasePool.Get(); err != nil { - log.Errorf("hbasePool.Get() error(%v)", err) - return - } - ks = []byte(filename) - if err = c.DeleteSingle(h.tableName(bucket), &hbasethrift.TDelete{ - Row: ks, - }); err != nil { - hbasePool.Put(c, true) - return - } - hbasePool.Put(c, false) - return -} - -// key hbase bfsmeta -func (h *HBaseClient) key(key int64) []byte { - var ( - sb [sha1.Size]byte - b []byte - ) - b = make([]byte, 8) - binary.BigEndian.PutUint64(b, uint64(key)) - sb = sha1.Sum(b) - return sb[:] -} - -// tableName name of bucket table -func (h *HBaseClient) tableName(bucket string) []byte { - return []byte(_prefix + bucket) -} diff --git a/directory/hbase/hbase_b_test.go b/directory/hbase/hbase_b_test.go deleted file mode 100644 index 2eb8fce..0000000 --- a/directory/hbase/hbase_b_test.go +++ /dev/null @@ -1,62 +0,0 @@ -package hbase - -import ( - "bfs/libs/meta" - "math/rand" - "testing" - "time" -) - -func BenchmarkHbasePut(b *testing.B) { - var ( - err error - h = NewHBaseClient() - m = &meta.Needle{} - t int64 - ) - ch := make(chan int64, 1000000) - if err = Init("172.16.13.90:9090", 5*time.Second, 200, 200); err != nil { - b.Errorf("Init failed") - b.FailNow() - } - for j := 0; j < 1000000; j++ { - k := int64(time.Now().UnixNano()) - ch <- k - } - b.ResetTimer() - b.SetParallelism(8) - b.RunParallel(func(pb *testing.PB) { - for pb.Next() { - t = <-ch - m.Key = t - if err = h.Put(m); err != nil { - continue - } - } - }) -} - -func BenchmarkHbaseGet(b *testing.B) { - var ( - err error - h = NewHBaseClient() - t int64 - r *rand.Rand - ) - if err = Init("172.16.13.90:9090", 5*time.Second, 200, 200); err != nil { - b.Errorf("Init failed") - b.FailNow() - } - r = rand.New(rand.NewSource(time.Now().UnixNano())) - b.ResetTimer() - b.SetParallelism(8) - b.RunParallel(func(pb *testing.PB) { - for pb.Next() { - t = r.Int63n(1000000) - if _, err = h.Get(t); err != nil { - b.Errorf("Put() error(%v)", err) - b.FailNow() - } - } - }) -} diff --git a/directory/hbase/hbase_client.go b/directory/hbase/hbase_client.go new file mode 100644 index 0000000..d019e6d --- /dev/null +++ b/directory/hbase/hbase_client.go @@ -0,0 +1,96 @@ +package hbase + +import ( + "strings" + "time" + + "bfs/directory/conf" + "bfs/libs/errors" + "bfs/libs/gohbase" + hconf "bfs/libs/gohbase/conf" + "bfs/libs/gohbase/hbase" + "bfs/libs/meta" + + log "github.com/golang/glog" +) + +const ( + _family = "hbase_client" + _prefix = "bucket_" +) + +// Client hbase client. +type Client struct { + c gohbase.Client + testCell *hbase.HBaseCell + addr string +} + +// NewClient new a hbase client. +func NewClient(c *conf.HBase, options ...gohbase.Option) *Client { + var testRowKey string + if c.TestRowKey != "" { + testRowKey = c.TestRowKey + } else { + testRowKey = "test" + } + return &Client{ + c: gohbase.NewClient(hconf.NewConf( + c.ZookeeperHbase.Addrs, + c.ZookeeperHbase.Root, + c.Master, + c.Meta, + time.Duration(c.ZookeeperHbase.Timeout), + 0, + 0, + time.Duration(c.DialTimeout), + ), options...), + testCell: &hbase.HBaseCell{ + "test", + testRowKey, + "test", + "test", + "test", + }, + addr: strings.Join(c.ZookeeperHbase.Addrs, ","), + } +} + +// Put put file and needle into hbase +func (c *Client) Put(bucket string, f *meta.File, n *meta.Needle) (err error) { + if err = c.putFile(bucket, f); err != nil { + return + } + if err = c.putNeedle(n); err != nil && err != errors.ErrNeedleExist { + log.Warningf("table not match: bucket: %s filename: %s", bucket, f.Filename) + c.delFile(bucket, f.Filename) + } + return +} + +// Get get needle from hbase +func (c *Client) Get(bucket, filename string) (n *meta.Needle, f *meta.File, err error) { + if f, err = c.getFile(bucket, filename); err != nil { + return + } + if n, err = c.getNeedle(f.Key); err == errors.ErrNeedleNotExist { + log.Warningf("table not match: bucket: %s filename: %s", bucket, filename) + c.delFile(bucket, filename) + } + return +} + +// Del del file and needle from hbase +func (c *Client) Del(bucket, filename string) (err error) { + var ( + f *meta.File + ) + if f, err = c.getFile(bucket, filename); err != nil { + return + } + if err = c.delFile(bucket, filename); err != nil { + return + } + err = c.delNeedle(f.Key) + return +} diff --git a/directory/hbase/hbase_client_test.go b/directory/hbase/hbase_client_test.go new file mode 100644 index 0000000..d7da200 --- /dev/null +++ b/directory/hbase/hbase_client_test.go @@ -0,0 +1,72 @@ +package hbase + +import ( + "bfs/directory/conf" + "bfs/libs/errors" + "bfs/libs/meta" + xtime "bfs/libs/time" + "fmt" + "testing" + "time" +) + +func getClient() *Client { + d, err := time.ParseDuration("1s") + if err != nil { + panic(err) + } + return NewClient(&conf.HBase{ + Master: "", + Meta: "", + TestRowKey: "", + DialTimeout: xtime.Duration(d), + ReadTimeout: xtime.Duration(d), + ReadsTimeout: xtime.Duration(d), + WriteTimeout: xtime.Duration(d), + WritesTimeout: xtime.Duration(d), + ZookeeperHbase: &conf.ZookeeperHbase{ + Root: "", + Addrs: []string{"localhost:2181"}, + Timeout: xtime.Duration(d), + }, + }) +} + +func TestGet(t *testing.T) { + c := getClient() + fmt.Println(c.c) + n, f, err := c.Get("test", "guhaotest123.jpg") + if err != nil && err != errors.ErrNeedleNotExist { + t.Fatalf("err:%v", err.Error()) + } + t.Logf("vid:%v,key:%v,cookie:%v,fkey:%v", n.Vid, n.Key, n.Cookie, f.Key) +} + +func TestPut(t *testing.T) { + c := getClient() + mf := &meta.File{ + Filename: "guhaotest111.jpg", + Key: 1234567, + Sha1: "12312312312312312", + Mine: "image/jpg", + Status: 123, + MTime: time.Now().Unix(), + } + mn := &meta.Needle{ + Key: 1234567, + Cookie: 123333, + Vid: 1, + MTime: time.Now().Unix(), + } + if err := c.Put("test", mf, mn); err != nil { + t.Fatalf("err:%v", err.Error()) + } + t.Logf("pass:%v", mf) +} + +func TestDelete(t *testing.T) { + c := getClient() + if err := c.Del("test", "guhaotest.jpg"); err != nil { + t.Fatalf("err:%v", err.Error()) + } +} diff --git a/directory/hbase/hbase_test.go b/directory/hbase/hbase_test.go deleted file mode 100644 index 6590ffd..0000000 --- a/directory/hbase/hbase_test.go +++ /dev/null @@ -1,44 +0,0 @@ -package hbase - -import ( - "bfs/libs/meta" - "fmt" - "testing" - "time" -) - -func TestHbase(t *testing.T) { - var ( - err error - m, n *meta.Needle - ) - if err = Init("172.16.13.90:9090", 5*time.Second, 10, 10); err != nil { - t.Errorf("Init failed") - t.FailNow() - } - - h := NewHBaseClient() - m = new(meta.Needle) - m.Key = 445 - m.Vid = 55 - m.Cookie = 5 - n = new(meta.Needle) - if err = h.Put(m); err != nil { - t.Errorf("error: %v", err) - t.FailNow() - } - if n, err = h.Get(m.Key); err != nil { - t.Errorf("error: %v", err) - t.FailNow() - } - fmt.Println("Get success", n) - if err = h.Del(m.Key); err != nil { - t.Errorf("error:%v", err) - t.FailNow() - } - if n, err = h.Get(m.Key); err != nil { - t.Errorf("error: %v", err) - t.FailNow() - } - fmt.Println("Get success", n) -} diff --git a/directory/hbase/hbasethrift/constants.go b/directory/hbase/hbasethrift/constants.go deleted file mode 100644 index f9267d4..0000000 --- a/directory/hbase/hbasethrift/constants.go +++ /dev/null @@ -1,18 +0,0 @@ -// Autogenerated by Thrift Compiler (0.9.3) -// DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING - -package hbasethrift - -import ( - "bytes" - "fmt" - "git.apache.org/thrift.git/lib/go/thrift" -) - -// (needed to ensure safety because of naive import list construction.) -var _ = thrift.ZERO -var _ = fmt.Printf -var _ = bytes.Equal - -func init() { -} diff --git a/directory/hbase/hbasethrift/thbaseservice.go b/directory/hbase/hbasethrift/thbaseservice.go deleted file mode 100644 index 215df10..0000000 --- a/directory/hbase/hbasethrift/thbaseservice.go +++ /dev/null @@ -1,7429 +0,0 @@ -// Autogenerated by Thrift Compiler (0.9.3) -// DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING - -package hbasethrift - -import ( - "bytes" - "fmt" - "git.apache.org/thrift.git/lib/go/thrift" -) - -// (needed to ensure safety because of naive import list construction.) -var _ = thrift.ZERO -var _ = fmt.Printf -var _ = bytes.Equal - -type THBaseService interface { - // Test for the existence of columns in the table, as specified in the TGet. - // - // @return true if the specified TGet matches one or more keys, false if not - // - // Parameters: - // - Table: the table to check on - // - Tget: the TGet to check for - Exists(table []byte, tget *TGet) (r bool, err error) - // Method for getting data from a row. - // - // If the row cannot be found an empty Result is returned. - // This can be checked by the empty field of the TResult - // - // @return the result - // - // Parameters: - // - Table: the table to get from - // - Tget: the TGet to fetch - Get(table []byte, tget *TGet) (r *TResult_, err error) - // Method for getting multiple rows. - // - // If a row cannot be found there will be a null - // value in the result list for that TGet at the - // same position. - // - // So the Results are in the same order as the TGets. - // - // Parameters: - // - Table: the table to get from - // - Tgets: a list of TGets to fetch, the Result list - // will have the Results at corresponding positions - // or null if there was an error - GetMultiple(table []byte, tgets []*TGet) (r []*TResult_, err error) - // Commit a TPut to a table. - // - // Parameters: - // - Table: the table to put data in - // - Tput: the TPut to put - Put(table []byte, tput *TPut) (err error) - // Atomically checks if a row/family/qualifier value matches the expected - // value. If it does, it adds the TPut. - // - // @return true if the new put was executed, false otherwise - // - // Parameters: - // - Table: to check in and put to - // - Row: row to check - // - Family: column family to check - // - Qualifier: column qualifier to check - // - Value: the expected value, if not provided the - // check is for the non-existence of the - // column in question - // - Tput: the TPut to put if the check succeeds - CheckAndPut(table []byte, row []byte, family []byte, qualifier []byte, value []byte, tput *TPut) (r bool, err error) - // Commit a List of Puts to the table. - // - // Parameters: - // - Table: the table to put data in - // - Tputs: a list of TPuts to commit - PutMultiple(table []byte, tputs []*TPut) (err error) - // Deletes as specified by the TDelete. - // - // Note: "delete" is a reserved keyword and cannot be used in Thrift - // thus the inconsistent naming scheme from the other functions. - // - // Parameters: - // - Table: the table to delete from - // - Tdelete: the TDelete to delete - DeleteSingle(table []byte, tdelete *TDelete) (err error) - // Bulk commit a List of TDeletes to the table. - // - // Throws a TIOError if any of the deletes fail. - // - // Always returns an empty list for backwards compatibility. - // - // Parameters: - // - Table: the table to delete from - // - Tdeletes: list of TDeletes to delete - DeleteMultiple(table []byte, tdeletes []*TDelete) (r []*TDelete, err error) - // Atomically checks if a row/family/qualifier value matches the expected - // value. If it does, it adds the delete. - // - // @return true if the new delete was executed, false otherwise - // - // Parameters: - // - Table: to check in and delete from - // - Row: row to check - // - Family: column family to check - // - Qualifier: column qualifier to check - // - Value: the expected value, if not provided the - // check is for the non-existence of the - // column in question - // - Tdelete: the TDelete to execute if the check succeeds - CheckAndDelete(table []byte, row []byte, family []byte, qualifier []byte, value []byte, tdelete *TDelete) (r bool, err error) - // Parameters: - // - Table: the table to increment the value on - // - Tincrement: the TIncrement to increment - Increment(table []byte, tincrement *TIncrement) (r *TResult_, err error) - // Parameters: - // - Table: the table to append the value on - // - Tappend: the TAppend to append - Append(table []byte, tappend *TAppend) (r *TResult_, err error) - // Get a Scanner for the provided TScan object. - // - // @return Scanner Id to be used with other scanner procedures - // - // Parameters: - // - Table: the table to get the Scanner for - // - Tscan: the scan object to get a Scanner for - OpenScanner(table []byte, tscan *TScan) (r int32, err error) - // Grabs multiple rows from a Scanner. - // - // @return Between zero and numRows TResults - // - // Parameters: - // - ScannerId: the Id of the Scanner to return rows from. This is an Id returned from the openScanner function. - // - NumRows: number of rows to return - GetScannerRows(scannerId int32, numRows int32) (r []*TResult_, err error) - // Closes the scanner. Should be called to free server side resources timely. - // Typically close once the scanner is not needed anymore, i.e. after looping - // over it to get all the required rows. - // - // Parameters: - // - ScannerId: the Id of the Scanner to close * - CloseScanner(scannerId int32) (err error) - // mutateRow performs multiple mutations atomically on a single row. - // - // Parameters: - // - Table: table to apply the mutations - // - TrowMutations: mutations to apply - MutateRow(table []byte, trowMutations *TRowMutations) (err error) - // Get results for the provided TScan object. - // This helper function opens a scanner, get the results and close the scanner. - // - // @return between zero and numRows TResults - // - // Parameters: - // - Table: the table to get the Scanner for - // - Tscan: the scan object to get a Scanner for - // - NumRows: number of rows to return - GetScannerResults(table []byte, tscan *TScan, numRows int32) (r []*TResult_, err error) -} - -type THBaseServiceClient struct { - Transport thrift.TTransport - ProtocolFactory thrift.TProtocolFactory - InputProtocol thrift.TProtocol - OutputProtocol thrift.TProtocol - SeqId int32 -} - -func NewTHBaseServiceClientFactory(t thrift.TTransport, f thrift.TProtocolFactory) *THBaseServiceClient { - return &THBaseServiceClient{Transport: t, - ProtocolFactory: f, - InputProtocol: f.GetProtocol(t), - OutputProtocol: f.GetProtocol(t), - SeqId: 0, - } -} - -func NewTHBaseServiceClientProtocol(t thrift.TTransport, iprot thrift.TProtocol, oprot thrift.TProtocol) *THBaseServiceClient { - return &THBaseServiceClient{Transport: t, - ProtocolFactory: nil, - InputProtocol: iprot, - OutputProtocol: oprot, - SeqId: 0, - } -} - -// Test for the existence of columns in the table, as specified in the TGet. -// -// @return true if the specified TGet matches one or more keys, false if not -// -// Parameters: -// - Table: the table to check on -// - Tget: the TGet to check for -func (p *THBaseServiceClient) Exists(table []byte, tget *TGet) (r bool, err error) { - if err = p.sendExists(table, tget); err != nil { - return - } - return p.recvExists() -} - -func (p *THBaseServiceClient) sendExists(table []byte, tget *TGet) (err error) { - oprot := p.OutputProtocol - if oprot == nil { - oprot = p.ProtocolFactory.GetProtocol(p.Transport) - p.OutputProtocol = oprot - } - p.SeqId++ - if err = oprot.WriteMessageBegin("exists", thrift.CALL, p.SeqId); err != nil { - return - } - args := THBaseServiceExistsArgs{ - Table: table, - Tget: tget, - } - if err = args.Write(oprot); err != nil { - return - } - if err = oprot.WriteMessageEnd(); err != nil { - return - } - return oprot.Flush() -} - -func (p *THBaseServiceClient) recvExists() (value bool, err error) { - iprot := p.InputProtocol - if iprot == nil { - iprot = p.ProtocolFactory.GetProtocol(p.Transport) - p.InputProtocol = iprot - } - method, mTypeId, seqId, err := iprot.ReadMessageBegin() - if err != nil { - return - } - if method != "exists" { - err = thrift.NewTApplicationException(thrift.WRONG_METHOD_NAME, "exists failed: wrong method name") - return - } - if p.SeqId != seqId { - err = thrift.NewTApplicationException(thrift.BAD_SEQUENCE_ID, "exists failed: out of sequence response") - return - } - if mTypeId == thrift.EXCEPTION { - error21 := thrift.NewTApplicationException(thrift.UNKNOWN_APPLICATION_EXCEPTION, "Unknown Exception") - var error22 error - error22, err = error21.Read(iprot) - if err != nil { - return - } - if err = iprot.ReadMessageEnd(); err != nil { - return - } - err = error22 - return - } - if mTypeId != thrift.REPLY { - err = thrift.NewTApplicationException(thrift.INVALID_MESSAGE_TYPE_EXCEPTION, "exists failed: invalid message type") - return - } - result := THBaseServiceExistsResult{} - if err = result.Read(iprot); err != nil { - return - } - if err = iprot.ReadMessageEnd(); err != nil { - return - } - if result.Io != nil { - err = result.Io - return - } - value = result.GetSuccess() - return -} - -// Method for getting data from a row. -// -// If the row cannot be found an empty Result is returned. -// This can be checked by the empty field of the TResult -// -// @return the result -// -// Parameters: -// - Table: the table to get from -// - Tget: the TGet to fetch -func (p *THBaseServiceClient) Get(table []byte, tget *TGet) (r *TResult_, err error) { - if err = p.sendGet(table, tget); err != nil { - return - } - return p.recvGet() -} - -func (p *THBaseServiceClient) sendGet(table []byte, tget *TGet) (err error) { - oprot := p.OutputProtocol - if oprot == nil { - oprot = p.ProtocolFactory.GetProtocol(p.Transport) - p.OutputProtocol = oprot - } - p.SeqId++ - if err = oprot.WriteMessageBegin("get", thrift.CALL, p.SeqId); err != nil { - return - } - args := THBaseServiceGetArgs{ - Table: table, - Tget: tget, - } - if err = args.Write(oprot); err != nil { - return - } - if err = oprot.WriteMessageEnd(); err != nil { - return - } - return oprot.Flush() -} - -func (p *THBaseServiceClient) recvGet() (value *TResult_, err error) { - iprot := p.InputProtocol - if iprot == nil { - iprot = p.ProtocolFactory.GetProtocol(p.Transport) - p.InputProtocol = iprot - } - method, mTypeId, seqId, err := iprot.ReadMessageBegin() - if err != nil { - return - } - if method != "get" { - err = thrift.NewTApplicationException(thrift.WRONG_METHOD_NAME, "get failed: wrong method name") - return - } - if p.SeqId != seqId { - err = thrift.NewTApplicationException(thrift.BAD_SEQUENCE_ID, "get failed: out of sequence response") - return - } - if mTypeId == thrift.EXCEPTION { - error23 := thrift.NewTApplicationException(thrift.UNKNOWN_APPLICATION_EXCEPTION, "Unknown Exception") - var error24 error - error24, err = error23.Read(iprot) - if err != nil { - return - } - if err = iprot.ReadMessageEnd(); err != nil { - return - } - err = error24 - return - } - if mTypeId != thrift.REPLY { - err = thrift.NewTApplicationException(thrift.INVALID_MESSAGE_TYPE_EXCEPTION, "get failed: invalid message type") - return - } - result := THBaseServiceGetResult{} - if err = result.Read(iprot); err != nil { - return - } - if err = iprot.ReadMessageEnd(); err != nil { - return - } - if result.Io != nil { - err = result.Io - return - } - value = result.GetSuccess() - return -} - -// Method for getting multiple rows. -// -// If a row cannot be found there will be a null -// value in the result list for that TGet at the -// same position. -// -// So the Results are in the same order as the TGets. -// -// Parameters: -// - Table: the table to get from -// - Tgets: a list of TGets to fetch, the Result list -// will have the Results at corresponding positions -// or null if there was an error -func (p *THBaseServiceClient) GetMultiple(table []byte, tgets []*TGet) (r []*TResult_, err error) { - if err = p.sendGetMultiple(table, tgets); err != nil { - return - } - return p.recvGetMultiple() -} - -func (p *THBaseServiceClient) sendGetMultiple(table []byte, tgets []*TGet) (err error) { - oprot := p.OutputProtocol - if oprot == nil { - oprot = p.ProtocolFactory.GetProtocol(p.Transport) - p.OutputProtocol = oprot - } - p.SeqId++ - if err = oprot.WriteMessageBegin("getMultiple", thrift.CALL, p.SeqId); err != nil { - return - } - args := THBaseServiceGetMultipleArgs{ - Table: table, - Tgets: tgets, - } - if err = args.Write(oprot); err != nil { - return - } - if err = oprot.WriteMessageEnd(); err != nil { - return - } - return oprot.Flush() -} - -func (p *THBaseServiceClient) recvGetMultiple() (value []*TResult_, err error) { - iprot := p.InputProtocol - if iprot == nil { - iprot = p.ProtocolFactory.GetProtocol(p.Transport) - p.InputProtocol = iprot - } - method, mTypeId, seqId, err := iprot.ReadMessageBegin() - if err != nil { - return - } - if method != "getMultiple" { - err = thrift.NewTApplicationException(thrift.WRONG_METHOD_NAME, "getMultiple failed: wrong method name") - return - } - if p.SeqId != seqId { - err = thrift.NewTApplicationException(thrift.BAD_SEQUENCE_ID, "getMultiple failed: out of sequence response") - return - } - if mTypeId == thrift.EXCEPTION { - error25 := thrift.NewTApplicationException(thrift.UNKNOWN_APPLICATION_EXCEPTION, "Unknown Exception") - var error26 error - error26, err = error25.Read(iprot) - if err != nil { - return - } - if err = iprot.ReadMessageEnd(); err != nil { - return - } - err = error26 - return - } - if mTypeId != thrift.REPLY { - err = thrift.NewTApplicationException(thrift.INVALID_MESSAGE_TYPE_EXCEPTION, "getMultiple failed: invalid message type") - return - } - result := THBaseServiceGetMultipleResult{} - if err = result.Read(iprot); err != nil { - return - } - if err = iprot.ReadMessageEnd(); err != nil { - return - } - if result.Io != nil { - err = result.Io - return - } - value = result.GetSuccess() - return -} - -// Commit a TPut to a table. -// -// Parameters: -// - Table: the table to put data in -// - Tput: the TPut to put -func (p *THBaseServiceClient) Put(table []byte, tput *TPut) (err error) { - if err = p.sendPut(table, tput); err != nil { - return - } - return p.recvPut() -} - -func (p *THBaseServiceClient) sendPut(table []byte, tput *TPut) (err error) { - oprot := p.OutputProtocol - if oprot == nil { - oprot = p.ProtocolFactory.GetProtocol(p.Transport) - p.OutputProtocol = oprot - } - p.SeqId++ - if err = oprot.WriteMessageBegin("put", thrift.CALL, p.SeqId); err != nil { - return - } - args := THBaseServicePutArgs{ - Table: table, - Tput: tput, - } - if err = args.Write(oprot); err != nil { - return - } - if err = oprot.WriteMessageEnd(); err != nil { - return - } - return oprot.Flush() -} - -func (p *THBaseServiceClient) recvPut() (err error) { - iprot := p.InputProtocol - if iprot == nil { - iprot = p.ProtocolFactory.GetProtocol(p.Transport) - p.InputProtocol = iprot - } - method, mTypeId, seqId, err := iprot.ReadMessageBegin() - if err != nil { - return - } - if method != "put" { - err = thrift.NewTApplicationException(thrift.WRONG_METHOD_NAME, "put failed: wrong method name") - return - } - if p.SeqId != seqId { - err = thrift.NewTApplicationException(thrift.BAD_SEQUENCE_ID, "put failed: out of sequence response") - return - } - if mTypeId == thrift.EXCEPTION { - error27 := thrift.NewTApplicationException(thrift.UNKNOWN_APPLICATION_EXCEPTION, "Unknown Exception") - var error28 error - error28, err = error27.Read(iprot) - if err != nil { - return - } - if err = iprot.ReadMessageEnd(); err != nil { - return - } - err = error28 - return - } - if mTypeId != thrift.REPLY { - err = thrift.NewTApplicationException(thrift.INVALID_MESSAGE_TYPE_EXCEPTION, "put failed: invalid message type") - return - } - result := THBaseServicePutResult{} - if err = result.Read(iprot); err != nil { - return - } - if err = iprot.ReadMessageEnd(); err != nil { - return - } - if result.Io != nil { - err = result.Io - return - } - return -} - -// Atomically checks if a row/family/qualifier value matches the expected -// value. If it does, it adds the TPut. -// -// @return true if the new put was executed, false otherwise -// -// Parameters: -// - Table: to check in and put to -// - Row: row to check -// - Family: column family to check -// - Qualifier: column qualifier to check -// - Value: the expected value, if not provided the -// check is for the non-existence of the -// column in question -// - Tput: the TPut to put if the check succeeds -func (p *THBaseServiceClient) CheckAndPut(table []byte, row []byte, family []byte, qualifier []byte, value []byte, tput *TPut) (r bool, err error) { - if err = p.sendCheckAndPut(table, row, family, qualifier, value, tput); err != nil { - return - } - return p.recvCheckAndPut() -} - -func (p *THBaseServiceClient) sendCheckAndPut(table []byte, row []byte, family []byte, qualifier []byte, value []byte, tput *TPut) (err error) { - oprot := p.OutputProtocol - if oprot == nil { - oprot = p.ProtocolFactory.GetProtocol(p.Transport) - p.OutputProtocol = oprot - } - p.SeqId++ - if err = oprot.WriteMessageBegin("checkAndPut", thrift.CALL, p.SeqId); err != nil { - return - } - args := THBaseServiceCheckAndPutArgs{ - Table: table, - Row: row, - Family: family, - Qualifier: qualifier, - Value: value, - Tput: tput, - } - if err = args.Write(oprot); err != nil { - return - } - if err = oprot.WriteMessageEnd(); err != nil { - return - } - return oprot.Flush() -} - -func (p *THBaseServiceClient) recvCheckAndPut() (value bool, err error) { - iprot := p.InputProtocol - if iprot == nil { - iprot = p.ProtocolFactory.GetProtocol(p.Transport) - p.InputProtocol = iprot - } - method, mTypeId, seqId, err := iprot.ReadMessageBegin() - if err != nil { - return - } - if method != "checkAndPut" { - err = thrift.NewTApplicationException(thrift.WRONG_METHOD_NAME, "checkAndPut failed: wrong method name") - return - } - if p.SeqId != seqId { - err = thrift.NewTApplicationException(thrift.BAD_SEQUENCE_ID, "checkAndPut failed: out of sequence response") - return - } - if mTypeId == thrift.EXCEPTION { - error29 := thrift.NewTApplicationException(thrift.UNKNOWN_APPLICATION_EXCEPTION, "Unknown Exception") - var error30 error - error30, err = error29.Read(iprot) - if err != nil { - return - } - if err = iprot.ReadMessageEnd(); err != nil { - return - } - err = error30 - return - } - if mTypeId != thrift.REPLY { - err = thrift.NewTApplicationException(thrift.INVALID_MESSAGE_TYPE_EXCEPTION, "checkAndPut failed: invalid message type") - return - } - result := THBaseServiceCheckAndPutResult{} - if err = result.Read(iprot); err != nil { - return - } - if err = iprot.ReadMessageEnd(); err != nil { - return - } - if result.Io != nil { - err = result.Io - return - } - value = result.GetSuccess() - return -} - -// Commit a List of Puts to the table. -// -// Parameters: -// - Table: the table to put data in -// - Tputs: a list of TPuts to commit -func (p *THBaseServiceClient) PutMultiple(table []byte, tputs []*TPut) (err error) { - if err = p.sendPutMultiple(table, tputs); err != nil { - return - } - return p.recvPutMultiple() -} - -func (p *THBaseServiceClient) sendPutMultiple(table []byte, tputs []*TPut) (err error) { - oprot := p.OutputProtocol - if oprot == nil { - oprot = p.ProtocolFactory.GetProtocol(p.Transport) - p.OutputProtocol = oprot - } - p.SeqId++ - if err = oprot.WriteMessageBegin("putMultiple", thrift.CALL, p.SeqId); err != nil { - return - } - args := THBaseServicePutMultipleArgs{ - Table: table, - Tputs: tputs, - } - if err = args.Write(oprot); err != nil { - return - } - if err = oprot.WriteMessageEnd(); err != nil { - return - } - return oprot.Flush() -} - -func (p *THBaseServiceClient) recvPutMultiple() (err error) { - iprot := p.InputProtocol - if iprot == nil { - iprot = p.ProtocolFactory.GetProtocol(p.Transport) - p.InputProtocol = iprot - } - method, mTypeId, seqId, err := iprot.ReadMessageBegin() - if err != nil { - return - } - if method != "putMultiple" { - err = thrift.NewTApplicationException(thrift.WRONG_METHOD_NAME, "putMultiple failed: wrong method name") - return - } - if p.SeqId != seqId { - err = thrift.NewTApplicationException(thrift.BAD_SEQUENCE_ID, "putMultiple failed: out of sequence response") - return - } - if mTypeId == thrift.EXCEPTION { - error31 := thrift.NewTApplicationException(thrift.UNKNOWN_APPLICATION_EXCEPTION, "Unknown Exception") - var error32 error - error32, err = error31.Read(iprot) - if err != nil { - return - } - if err = iprot.ReadMessageEnd(); err != nil { - return - } - err = error32 - return - } - if mTypeId != thrift.REPLY { - err = thrift.NewTApplicationException(thrift.INVALID_MESSAGE_TYPE_EXCEPTION, "putMultiple failed: invalid message type") - return - } - result := THBaseServicePutMultipleResult{} - if err = result.Read(iprot); err != nil { - return - } - if err = iprot.ReadMessageEnd(); err != nil { - return - } - if result.Io != nil { - err = result.Io - return - } - return -} - -// Deletes as specified by the TDelete. -// -// Note: "delete" is a reserved keyword and cannot be used in Thrift -// thus the inconsistent naming scheme from the other functions. -// -// Parameters: -// - Table: the table to delete from -// - Tdelete: the TDelete to delete -func (p *THBaseServiceClient) DeleteSingle(table []byte, tdelete *TDelete) (err error) { - if err = p.sendDeleteSingle(table, tdelete); err != nil { - return - } - return p.recvDeleteSingle() -} - -func (p *THBaseServiceClient) sendDeleteSingle(table []byte, tdelete *TDelete) (err error) { - oprot := p.OutputProtocol - if oprot == nil { - oprot = p.ProtocolFactory.GetProtocol(p.Transport) - p.OutputProtocol = oprot - } - p.SeqId++ - if err = oprot.WriteMessageBegin("deleteSingle", thrift.CALL, p.SeqId); err != nil { - return - } - args := THBaseServiceDeleteSingleArgs{ - Table: table, - Tdelete: tdelete, - } - if err = args.Write(oprot); err != nil { - return - } - if err = oprot.WriteMessageEnd(); err != nil { - return - } - return oprot.Flush() -} - -func (p *THBaseServiceClient) recvDeleteSingle() (err error) { - iprot := p.InputProtocol - if iprot == nil { - iprot = p.ProtocolFactory.GetProtocol(p.Transport) - p.InputProtocol = iprot - } - method, mTypeId, seqId, err := iprot.ReadMessageBegin() - if err != nil { - return - } - if method != "deleteSingle" { - err = thrift.NewTApplicationException(thrift.WRONG_METHOD_NAME, "deleteSingle failed: wrong method name") - return - } - if p.SeqId != seqId { - err = thrift.NewTApplicationException(thrift.BAD_SEQUENCE_ID, "deleteSingle failed: out of sequence response") - return - } - if mTypeId == thrift.EXCEPTION { - error33 := thrift.NewTApplicationException(thrift.UNKNOWN_APPLICATION_EXCEPTION, "Unknown Exception") - var error34 error - error34, err = error33.Read(iprot) - if err != nil { - return - } - if err = iprot.ReadMessageEnd(); err != nil { - return - } - err = error34 - return - } - if mTypeId != thrift.REPLY { - err = thrift.NewTApplicationException(thrift.INVALID_MESSAGE_TYPE_EXCEPTION, "deleteSingle failed: invalid message type") - return - } - result := THBaseServiceDeleteSingleResult{} - if err = result.Read(iprot); err != nil { - return - } - if err = iprot.ReadMessageEnd(); err != nil { - return - } - if result.Io != nil { - err = result.Io - return - } - return -} - -// Bulk commit a List of TDeletes to the table. -// -// Throws a TIOError if any of the deletes fail. -// -// Always returns an empty list for backwards compatibility. -// -// Parameters: -// - Table: the table to delete from -// - Tdeletes: list of TDeletes to delete -func (p *THBaseServiceClient) DeleteMultiple(table []byte, tdeletes []*TDelete) (r []*TDelete, err error) { - if err = p.sendDeleteMultiple(table, tdeletes); err != nil { - return - } - return p.recvDeleteMultiple() -} - -func (p *THBaseServiceClient) sendDeleteMultiple(table []byte, tdeletes []*TDelete) (err error) { - oprot := p.OutputProtocol - if oprot == nil { - oprot = p.ProtocolFactory.GetProtocol(p.Transport) - p.OutputProtocol = oprot - } - p.SeqId++ - if err = oprot.WriteMessageBegin("deleteMultiple", thrift.CALL, p.SeqId); err != nil { - return - } - args := THBaseServiceDeleteMultipleArgs{ - Table: table, - Tdeletes: tdeletes, - } - if err = args.Write(oprot); err != nil { - return - } - if err = oprot.WriteMessageEnd(); err != nil { - return - } - return oprot.Flush() -} - -func (p *THBaseServiceClient) recvDeleteMultiple() (value []*TDelete, err error) { - iprot := p.InputProtocol - if iprot == nil { - iprot = p.ProtocolFactory.GetProtocol(p.Transport) - p.InputProtocol = iprot - } - method, mTypeId, seqId, err := iprot.ReadMessageBegin() - if err != nil { - return - } - if method != "deleteMultiple" { - err = thrift.NewTApplicationException(thrift.WRONG_METHOD_NAME, "deleteMultiple failed: wrong method name") - return - } - if p.SeqId != seqId { - err = thrift.NewTApplicationException(thrift.BAD_SEQUENCE_ID, "deleteMultiple failed: out of sequence response") - return - } - if mTypeId == thrift.EXCEPTION { - error35 := thrift.NewTApplicationException(thrift.UNKNOWN_APPLICATION_EXCEPTION, "Unknown Exception") - var error36 error - error36, err = error35.Read(iprot) - if err != nil { - return - } - if err = iprot.ReadMessageEnd(); err != nil { - return - } - err = error36 - return - } - if mTypeId != thrift.REPLY { - err = thrift.NewTApplicationException(thrift.INVALID_MESSAGE_TYPE_EXCEPTION, "deleteMultiple failed: invalid message type") - return - } - result := THBaseServiceDeleteMultipleResult{} - if err = result.Read(iprot); err != nil { - return - } - if err = iprot.ReadMessageEnd(); err != nil { - return - } - if result.Io != nil { - err = result.Io - return - } - value = result.GetSuccess() - return -} - -// Atomically checks if a row/family/qualifier value matches the expected -// value. If it does, it adds the delete. -// -// @return true if the new delete was executed, false otherwise -// -// Parameters: -// - Table: to check in and delete from -// - Row: row to check -// - Family: column family to check -// - Qualifier: column qualifier to check -// - Value: the expected value, if not provided the -// check is for the non-existence of the -// column in question -// - Tdelete: the TDelete to execute if the check succeeds -func (p *THBaseServiceClient) CheckAndDelete(table []byte, row []byte, family []byte, qualifier []byte, value []byte, tdelete *TDelete) (r bool, err error) { - if err = p.sendCheckAndDelete(table, row, family, qualifier, value, tdelete); err != nil { - return - } - return p.recvCheckAndDelete() -} - -func (p *THBaseServiceClient) sendCheckAndDelete(table []byte, row []byte, family []byte, qualifier []byte, value []byte, tdelete *TDelete) (err error) { - oprot := p.OutputProtocol - if oprot == nil { - oprot = p.ProtocolFactory.GetProtocol(p.Transport) - p.OutputProtocol = oprot - } - p.SeqId++ - if err = oprot.WriteMessageBegin("checkAndDelete", thrift.CALL, p.SeqId); err != nil { - return - } - args := THBaseServiceCheckAndDeleteArgs{ - Table: table, - Row: row, - Family: family, - Qualifier: qualifier, - Value: value, - Tdelete: tdelete, - } - if err = args.Write(oprot); err != nil { - return - } - if err = oprot.WriteMessageEnd(); err != nil { - return - } - return oprot.Flush() -} - -func (p *THBaseServiceClient) recvCheckAndDelete() (value bool, err error) { - iprot := p.InputProtocol - if iprot == nil { - iprot = p.ProtocolFactory.GetProtocol(p.Transport) - p.InputProtocol = iprot - } - method, mTypeId, seqId, err := iprot.ReadMessageBegin() - if err != nil { - return - } - if method != "checkAndDelete" { - err = thrift.NewTApplicationException(thrift.WRONG_METHOD_NAME, "checkAndDelete failed: wrong method name") - return - } - if p.SeqId != seqId { - err = thrift.NewTApplicationException(thrift.BAD_SEQUENCE_ID, "checkAndDelete failed: out of sequence response") - return - } - if mTypeId == thrift.EXCEPTION { - error37 := thrift.NewTApplicationException(thrift.UNKNOWN_APPLICATION_EXCEPTION, "Unknown Exception") - var error38 error - error38, err = error37.Read(iprot) - if err != nil { - return - } - if err = iprot.ReadMessageEnd(); err != nil { - return - } - err = error38 - return - } - if mTypeId != thrift.REPLY { - err = thrift.NewTApplicationException(thrift.INVALID_MESSAGE_TYPE_EXCEPTION, "checkAndDelete failed: invalid message type") - return - } - result := THBaseServiceCheckAndDeleteResult{} - if err = result.Read(iprot); err != nil { - return - } - if err = iprot.ReadMessageEnd(); err != nil { - return - } - if result.Io != nil { - err = result.Io - return - } - value = result.GetSuccess() - return -} - -// Parameters: -// - Table: the table to increment the value on -// - Tincrement: the TIncrement to increment -func (p *THBaseServiceClient) Increment(table []byte, tincrement *TIncrement) (r *TResult_, err error) { - if err = p.sendIncrement(table, tincrement); err != nil { - return - } - return p.recvIncrement() -} - -func (p *THBaseServiceClient) sendIncrement(table []byte, tincrement *TIncrement) (err error) { - oprot := p.OutputProtocol - if oprot == nil { - oprot = p.ProtocolFactory.GetProtocol(p.Transport) - p.OutputProtocol = oprot - } - p.SeqId++ - if err = oprot.WriteMessageBegin("increment", thrift.CALL, p.SeqId); err != nil { - return - } - args := THBaseServiceIncrementArgs{ - Table: table, - Tincrement: tincrement, - } - if err = args.Write(oprot); err != nil { - return - } - if err = oprot.WriteMessageEnd(); err != nil { - return - } - return oprot.Flush() -} - -func (p *THBaseServiceClient) recvIncrement() (value *TResult_, err error) { - iprot := p.InputProtocol - if iprot == nil { - iprot = p.ProtocolFactory.GetProtocol(p.Transport) - p.InputProtocol = iprot - } - method, mTypeId, seqId, err := iprot.ReadMessageBegin() - if err != nil { - return - } - if method != "increment" { - err = thrift.NewTApplicationException(thrift.WRONG_METHOD_NAME, "increment failed: wrong method name") - return - } - if p.SeqId != seqId { - err = thrift.NewTApplicationException(thrift.BAD_SEQUENCE_ID, "increment failed: out of sequence response") - return - } - if mTypeId == thrift.EXCEPTION { - error39 := thrift.NewTApplicationException(thrift.UNKNOWN_APPLICATION_EXCEPTION, "Unknown Exception") - var error40 error - error40, err = error39.Read(iprot) - if err != nil { - return - } - if err = iprot.ReadMessageEnd(); err != nil { - return - } - err = error40 - return - } - if mTypeId != thrift.REPLY { - err = thrift.NewTApplicationException(thrift.INVALID_MESSAGE_TYPE_EXCEPTION, "increment failed: invalid message type") - return - } - result := THBaseServiceIncrementResult{} - if err = result.Read(iprot); err != nil { - return - } - if err = iprot.ReadMessageEnd(); err != nil { - return - } - if result.Io != nil { - err = result.Io - return - } - value = result.GetSuccess() - return -} - -// Parameters: -// - Table: the table to append the value on -// - Tappend: the TAppend to append -func (p *THBaseServiceClient) Append(table []byte, tappend *TAppend) (r *TResult_, err error) { - if err = p.sendAppend(table, tappend); err != nil { - return - } - return p.recvAppend() -} - -func (p *THBaseServiceClient) sendAppend(table []byte, tappend *TAppend) (err error) { - oprot := p.OutputProtocol - if oprot == nil { - oprot = p.ProtocolFactory.GetProtocol(p.Transport) - p.OutputProtocol = oprot - } - p.SeqId++ - if err = oprot.WriteMessageBegin("append", thrift.CALL, p.SeqId); err != nil { - return - } - args := THBaseServiceAppendArgs{ - Table: table, - Tappend: tappend, - } - if err = args.Write(oprot); err != nil { - return - } - if err = oprot.WriteMessageEnd(); err != nil { - return - } - return oprot.Flush() -} - -func (p *THBaseServiceClient) recvAppend() (value *TResult_, err error) { - iprot := p.InputProtocol - if iprot == nil { - iprot = p.ProtocolFactory.GetProtocol(p.Transport) - p.InputProtocol = iprot - } - method, mTypeId, seqId, err := iprot.ReadMessageBegin() - if err != nil { - return - } - if method != "append" { - err = thrift.NewTApplicationException(thrift.WRONG_METHOD_NAME, "append failed: wrong method name") - return - } - if p.SeqId != seqId { - err = thrift.NewTApplicationException(thrift.BAD_SEQUENCE_ID, "append failed: out of sequence response") - return - } - if mTypeId == thrift.EXCEPTION { - error41 := thrift.NewTApplicationException(thrift.UNKNOWN_APPLICATION_EXCEPTION, "Unknown Exception") - var error42 error - error42, err = error41.Read(iprot) - if err != nil { - return - } - if err = iprot.ReadMessageEnd(); err != nil { - return - } - err = error42 - return - } - if mTypeId != thrift.REPLY { - err = thrift.NewTApplicationException(thrift.INVALID_MESSAGE_TYPE_EXCEPTION, "append failed: invalid message type") - return - } - result := THBaseServiceAppendResult{} - if err = result.Read(iprot); err != nil { - return - } - if err = iprot.ReadMessageEnd(); err != nil { - return - } - if result.Io != nil { - err = result.Io - return - } - value = result.GetSuccess() - return -} - -// Get a Scanner for the provided TScan object. -// -// @return Scanner Id to be used with other scanner procedures -// -// Parameters: -// - Table: the table to get the Scanner for -// - Tscan: the scan object to get a Scanner for -func (p *THBaseServiceClient) OpenScanner(table []byte, tscan *TScan) (r int32, err error) { - if err = p.sendOpenScanner(table, tscan); err != nil { - return - } - return p.recvOpenScanner() -} - -func (p *THBaseServiceClient) sendOpenScanner(table []byte, tscan *TScan) (err error) { - oprot := p.OutputProtocol - if oprot == nil { - oprot = p.ProtocolFactory.GetProtocol(p.Transport) - p.OutputProtocol = oprot - } - p.SeqId++ - if err = oprot.WriteMessageBegin("openScanner", thrift.CALL, p.SeqId); err != nil { - return - } - args := THBaseServiceOpenScannerArgs{ - Table: table, - Tscan: tscan, - } - if err = args.Write(oprot); err != nil { - return - } - if err = oprot.WriteMessageEnd(); err != nil { - return - } - return oprot.Flush() -} - -func (p *THBaseServiceClient) recvOpenScanner() (value int32, err error) { - iprot := p.InputProtocol - if iprot == nil { - iprot = p.ProtocolFactory.GetProtocol(p.Transport) - p.InputProtocol = iprot - } - method, mTypeId, seqId, err := iprot.ReadMessageBegin() - if err != nil { - return - } - if method != "openScanner" { - err = thrift.NewTApplicationException(thrift.WRONG_METHOD_NAME, "openScanner failed: wrong method name") - return - } - if p.SeqId != seqId { - err = thrift.NewTApplicationException(thrift.BAD_SEQUENCE_ID, "openScanner failed: out of sequence response") - return - } - if mTypeId == thrift.EXCEPTION { - error43 := thrift.NewTApplicationException(thrift.UNKNOWN_APPLICATION_EXCEPTION, "Unknown Exception") - var error44 error - error44, err = error43.Read(iprot) - if err != nil { - return - } - if err = iprot.ReadMessageEnd(); err != nil { - return - } - err = error44 - return - } - if mTypeId != thrift.REPLY { - err = thrift.NewTApplicationException(thrift.INVALID_MESSAGE_TYPE_EXCEPTION, "openScanner failed: invalid message type") - return - } - result := THBaseServiceOpenScannerResult{} - if err = result.Read(iprot); err != nil { - return - } - if err = iprot.ReadMessageEnd(); err != nil { - return - } - if result.Io != nil { - err = result.Io - return - } - value = result.GetSuccess() - return -} - -// Grabs multiple rows from a Scanner. -// -// @return Between zero and numRows TResults -// -// Parameters: -// - ScannerId: the Id of the Scanner to return rows from. This is an Id returned from the openScanner function. -// - NumRows: number of rows to return -func (p *THBaseServiceClient) GetScannerRows(scannerId int32, numRows int32) (r []*TResult_, err error) { - if err = p.sendGetScannerRows(scannerId, numRows); err != nil { - return - } - return p.recvGetScannerRows() -} - -func (p *THBaseServiceClient) sendGetScannerRows(scannerId int32, numRows int32) (err error) { - oprot := p.OutputProtocol - if oprot == nil { - oprot = p.ProtocolFactory.GetProtocol(p.Transport) - p.OutputProtocol = oprot - } - p.SeqId++ - if err = oprot.WriteMessageBegin("getScannerRows", thrift.CALL, p.SeqId); err != nil { - return - } - args := THBaseServiceGetScannerRowsArgs{ - ScannerId: scannerId, - NumRows: numRows, - } - if err = args.Write(oprot); err != nil { - return - } - if err = oprot.WriteMessageEnd(); err != nil { - return - } - return oprot.Flush() -} - -func (p *THBaseServiceClient) recvGetScannerRows() (value []*TResult_, err error) { - iprot := p.InputProtocol - if iprot == nil { - iprot = p.ProtocolFactory.GetProtocol(p.Transport) - p.InputProtocol = iprot - } - method, mTypeId, seqId, err := iprot.ReadMessageBegin() - if err != nil { - return - } - if method != "getScannerRows" { - err = thrift.NewTApplicationException(thrift.WRONG_METHOD_NAME, "getScannerRows failed: wrong method name") - return - } - if p.SeqId != seqId { - err = thrift.NewTApplicationException(thrift.BAD_SEQUENCE_ID, "getScannerRows failed: out of sequence response") - return - } - if mTypeId == thrift.EXCEPTION { - error45 := thrift.NewTApplicationException(thrift.UNKNOWN_APPLICATION_EXCEPTION, "Unknown Exception") - var error46 error - error46, err = error45.Read(iprot) - if err != nil { - return - } - if err = iprot.ReadMessageEnd(); err != nil { - return - } - err = error46 - return - } - if mTypeId != thrift.REPLY { - err = thrift.NewTApplicationException(thrift.INVALID_MESSAGE_TYPE_EXCEPTION, "getScannerRows failed: invalid message type") - return - } - result := THBaseServiceGetScannerRowsResult{} - if err = result.Read(iprot); err != nil { - return - } - if err = iprot.ReadMessageEnd(); err != nil { - return - } - if result.Io != nil { - err = result.Io - return - } else if result.Ia != nil { - err = result.Ia - return - } - value = result.GetSuccess() - return -} - -// Closes the scanner. Should be called to free server side resources timely. -// Typically close once the scanner is not needed anymore, i.e. after looping -// over it to get all the required rows. -// -// Parameters: -// - ScannerId: the Id of the Scanner to close * -func (p *THBaseServiceClient) CloseScanner(scannerId int32) (err error) { - if err = p.sendCloseScanner(scannerId); err != nil { - return - } - return p.recvCloseScanner() -} - -func (p *THBaseServiceClient) sendCloseScanner(scannerId int32) (err error) { - oprot := p.OutputProtocol - if oprot == nil { - oprot = p.ProtocolFactory.GetProtocol(p.Transport) - p.OutputProtocol = oprot - } - p.SeqId++ - if err = oprot.WriteMessageBegin("closeScanner", thrift.CALL, p.SeqId); err != nil { - return - } - args := THBaseServiceCloseScannerArgs{ - ScannerId: scannerId, - } - if err = args.Write(oprot); err != nil { - return - } - if err = oprot.WriteMessageEnd(); err != nil { - return - } - return oprot.Flush() -} - -func (p *THBaseServiceClient) recvCloseScanner() (err error) { - iprot := p.InputProtocol - if iprot == nil { - iprot = p.ProtocolFactory.GetProtocol(p.Transport) - p.InputProtocol = iprot - } - method, mTypeId, seqId, err := iprot.ReadMessageBegin() - if err != nil { - return - } - if method != "closeScanner" { - err = thrift.NewTApplicationException(thrift.WRONG_METHOD_NAME, "closeScanner failed: wrong method name") - return - } - if p.SeqId != seqId { - err = thrift.NewTApplicationException(thrift.BAD_SEQUENCE_ID, "closeScanner failed: out of sequence response") - return - } - if mTypeId == thrift.EXCEPTION { - error47 := thrift.NewTApplicationException(thrift.UNKNOWN_APPLICATION_EXCEPTION, "Unknown Exception") - var error48 error - error48, err = error47.Read(iprot) - if err != nil { - return - } - if err = iprot.ReadMessageEnd(); err != nil { - return - } - err = error48 - return - } - if mTypeId != thrift.REPLY { - err = thrift.NewTApplicationException(thrift.INVALID_MESSAGE_TYPE_EXCEPTION, "closeScanner failed: invalid message type") - return - } - result := THBaseServiceCloseScannerResult{} - if err = result.Read(iprot); err != nil { - return - } - if err = iprot.ReadMessageEnd(); err != nil { - return - } - if result.Io != nil { - err = result.Io - return - } else if result.Ia != nil { - err = result.Ia - return - } - return -} - -// mutateRow performs multiple mutations atomically on a single row. -// -// Parameters: -// - Table: table to apply the mutations -// - TrowMutations: mutations to apply -func (p *THBaseServiceClient) MutateRow(table []byte, trowMutations *TRowMutations) (err error) { - if err = p.sendMutateRow(table, trowMutations); err != nil { - return - } - return p.recvMutateRow() -} - -func (p *THBaseServiceClient) sendMutateRow(table []byte, trowMutations *TRowMutations) (err error) { - oprot := p.OutputProtocol - if oprot == nil { - oprot = p.ProtocolFactory.GetProtocol(p.Transport) - p.OutputProtocol = oprot - } - p.SeqId++ - if err = oprot.WriteMessageBegin("mutateRow", thrift.CALL, p.SeqId); err != nil { - return - } - args := THBaseServiceMutateRowArgs{ - Table: table, - TrowMutations: trowMutations, - } - if err = args.Write(oprot); err != nil { - return - } - if err = oprot.WriteMessageEnd(); err != nil { - return - } - return oprot.Flush() -} - -func (p *THBaseServiceClient) recvMutateRow() (err error) { - iprot := p.InputProtocol - if iprot == nil { - iprot = p.ProtocolFactory.GetProtocol(p.Transport) - p.InputProtocol = iprot - } - method, mTypeId, seqId, err := iprot.ReadMessageBegin() - if err != nil { - return - } - if method != "mutateRow" { - err = thrift.NewTApplicationException(thrift.WRONG_METHOD_NAME, "mutateRow failed: wrong method name") - return - } - if p.SeqId != seqId { - err = thrift.NewTApplicationException(thrift.BAD_SEQUENCE_ID, "mutateRow failed: out of sequence response") - return - } - if mTypeId == thrift.EXCEPTION { - error49 := thrift.NewTApplicationException(thrift.UNKNOWN_APPLICATION_EXCEPTION, "Unknown Exception") - var error50 error - error50, err = error49.Read(iprot) - if err != nil { - return - } - if err = iprot.ReadMessageEnd(); err != nil { - return - } - err = error50 - return - } - if mTypeId != thrift.REPLY { - err = thrift.NewTApplicationException(thrift.INVALID_MESSAGE_TYPE_EXCEPTION, "mutateRow failed: invalid message type") - return - } - result := THBaseServiceMutateRowResult{} - if err = result.Read(iprot); err != nil { - return - } - if err = iprot.ReadMessageEnd(); err != nil { - return - } - if result.Io != nil { - err = result.Io - return - } - return -} - -// Get results for the provided TScan object. -// This helper function opens a scanner, get the results and close the scanner. -// -// @return between zero and numRows TResults -// -// Parameters: -// - Table: the table to get the Scanner for -// - Tscan: the scan object to get a Scanner for -// - NumRows: number of rows to return -func (p *THBaseServiceClient) GetScannerResults(table []byte, tscan *TScan, numRows int32) (r []*TResult_, err error) { - if err = p.sendGetScannerResults(table, tscan, numRows); err != nil { - return - } - return p.recvGetScannerResults() -} - -func (p *THBaseServiceClient) sendGetScannerResults(table []byte, tscan *TScan, numRows int32) (err error) { - oprot := p.OutputProtocol - if oprot == nil { - oprot = p.ProtocolFactory.GetProtocol(p.Transport) - p.OutputProtocol = oprot - } - p.SeqId++ - if err = oprot.WriteMessageBegin("getScannerResults", thrift.CALL, p.SeqId); err != nil { - return - } - args := THBaseServiceGetScannerResultsArgs{ - Table: table, - Tscan: tscan, - NumRows: numRows, - } - if err = args.Write(oprot); err != nil { - return - } - if err = oprot.WriteMessageEnd(); err != nil { - return - } - return oprot.Flush() -} - -func (p *THBaseServiceClient) recvGetScannerResults() (value []*TResult_, err error) { - iprot := p.InputProtocol - if iprot == nil { - iprot = p.ProtocolFactory.GetProtocol(p.Transport) - p.InputProtocol = iprot - } - method, mTypeId, seqId, err := iprot.ReadMessageBegin() - if err != nil { - return - } - if method != "getScannerResults" { - err = thrift.NewTApplicationException(thrift.WRONG_METHOD_NAME, "getScannerResults failed: wrong method name") - return - } - if p.SeqId != seqId { - err = thrift.NewTApplicationException(thrift.BAD_SEQUENCE_ID, "getScannerResults failed: out of sequence response") - return - } - if mTypeId == thrift.EXCEPTION { - error51 := thrift.NewTApplicationException(thrift.UNKNOWN_APPLICATION_EXCEPTION, "Unknown Exception") - var error52 error - error52, err = error51.Read(iprot) - if err != nil { - return - } - if err = iprot.ReadMessageEnd(); err != nil { - return - } - err = error52 - return - } - if mTypeId != thrift.REPLY { - err = thrift.NewTApplicationException(thrift.INVALID_MESSAGE_TYPE_EXCEPTION, "getScannerResults failed: invalid message type") - return - } - result := THBaseServiceGetScannerResultsResult{} - if err = result.Read(iprot); err != nil { - return - } - if err = iprot.ReadMessageEnd(); err != nil { - return - } - if result.Io != nil { - err = result.Io - return - } - value = result.GetSuccess() - return -} - -type THBaseServiceProcessor struct { - processorMap map[string]thrift.TProcessorFunction - handler THBaseService -} - -func (p *THBaseServiceProcessor) AddToProcessorMap(key string, processor thrift.TProcessorFunction) { - p.processorMap[key] = processor -} - -func (p *THBaseServiceProcessor) GetProcessorFunction(key string) (processor thrift.TProcessorFunction, ok bool) { - processor, ok = p.processorMap[key] - return processor, ok -} - -func (p *THBaseServiceProcessor) ProcessorMap() map[string]thrift.TProcessorFunction { - return p.processorMap -} - -func NewTHBaseServiceProcessor(handler THBaseService) *THBaseServiceProcessor { - - self53 := &THBaseServiceProcessor{handler: handler, processorMap: make(map[string]thrift.TProcessorFunction)} - self53.processorMap["exists"] = &tHBaseServiceProcessorExists{handler: handler} - self53.processorMap["get"] = &tHBaseServiceProcessorGet{handler: handler} - self53.processorMap["getMultiple"] = &tHBaseServiceProcessorGetMultiple{handler: handler} - self53.processorMap["put"] = &tHBaseServiceProcessorPut{handler: handler} - self53.processorMap["checkAndPut"] = &tHBaseServiceProcessorCheckAndPut{handler: handler} - self53.processorMap["putMultiple"] = &tHBaseServiceProcessorPutMultiple{handler: handler} - self53.processorMap["deleteSingle"] = &tHBaseServiceProcessorDeleteSingle{handler: handler} - self53.processorMap["deleteMultiple"] = &tHBaseServiceProcessorDeleteMultiple{handler: handler} - self53.processorMap["checkAndDelete"] = &tHBaseServiceProcessorCheckAndDelete{handler: handler} - self53.processorMap["increment"] = &tHBaseServiceProcessorIncrement{handler: handler} - self53.processorMap["append"] = &tHBaseServiceProcessorAppend{handler: handler} - self53.processorMap["openScanner"] = &tHBaseServiceProcessorOpenScanner{handler: handler} - self53.processorMap["getScannerRows"] = &tHBaseServiceProcessorGetScannerRows{handler: handler} - self53.processorMap["closeScanner"] = &tHBaseServiceProcessorCloseScanner{handler: handler} - self53.processorMap["mutateRow"] = &tHBaseServiceProcessorMutateRow{handler: handler} - self53.processorMap["getScannerResults"] = &tHBaseServiceProcessorGetScannerResults{handler: handler} - return self53 -} - -func (p *THBaseServiceProcessor) Process(iprot, oprot thrift.TProtocol) (success bool, err thrift.TException) { - name, _, seqId, err := iprot.ReadMessageBegin() - if err != nil { - return false, err - } - if processor, ok := p.GetProcessorFunction(name); ok { - return processor.Process(seqId, iprot, oprot) - } - iprot.Skip(thrift.STRUCT) - iprot.ReadMessageEnd() - x54 := thrift.NewTApplicationException(thrift.UNKNOWN_METHOD, "Unknown function "+name) - oprot.WriteMessageBegin(name, thrift.EXCEPTION, seqId) - x54.Write(oprot) - oprot.WriteMessageEnd() - oprot.Flush() - return false, x54 - -} - -type tHBaseServiceProcessorExists struct { - handler THBaseService -} - -func (p *tHBaseServiceProcessorExists) Process(seqId int32, iprot, oprot thrift.TProtocol) (success bool, err thrift.TException) { - args := THBaseServiceExistsArgs{} - if err = args.Read(iprot); err != nil { - iprot.ReadMessageEnd() - x := thrift.NewTApplicationException(thrift.PROTOCOL_ERROR, err.Error()) - oprot.WriteMessageBegin("exists", thrift.EXCEPTION, seqId) - x.Write(oprot) - oprot.WriteMessageEnd() - oprot.Flush() - return false, err - } - - iprot.ReadMessageEnd() - result := THBaseServiceExistsResult{} - var retval bool - var err2 error - if retval, err2 = p.handler.Exists(args.Table, args.Tget); err2 != nil { - switch v := err2.(type) { - case *TIOError: - result.Io = v - default: - x := thrift.NewTApplicationException(thrift.INTERNAL_ERROR, "Internal error processing exists: "+err2.Error()) - oprot.WriteMessageBegin("exists", thrift.EXCEPTION, seqId) - x.Write(oprot) - oprot.WriteMessageEnd() - oprot.Flush() - return true, err2 - } - } else { - result.Success = &retval - } - if err2 = oprot.WriteMessageBegin("exists", thrift.REPLY, seqId); err2 != nil { - err = err2 - } - if err2 = result.Write(oprot); err == nil && err2 != nil { - err = err2 - } - if err2 = oprot.WriteMessageEnd(); err == nil && err2 != nil { - err = err2 - } - if err2 = oprot.Flush(); err == nil && err2 != nil { - err = err2 - } - if err != nil { - return - } - return true, err -} - -type tHBaseServiceProcessorGet struct { - handler THBaseService -} - -func (p *tHBaseServiceProcessorGet) Process(seqId int32, iprot, oprot thrift.TProtocol) (success bool, err thrift.TException) { - args := THBaseServiceGetArgs{} - if err = args.Read(iprot); err != nil { - iprot.ReadMessageEnd() - x := thrift.NewTApplicationException(thrift.PROTOCOL_ERROR, err.Error()) - oprot.WriteMessageBegin("get", thrift.EXCEPTION, seqId) - x.Write(oprot) - oprot.WriteMessageEnd() - oprot.Flush() - return false, err - } - - iprot.ReadMessageEnd() - result := THBaseServiceGetResult{} - var retval *TResult_ - var err2 error - if retval, err2 = p.handler.Get(args.Table, args.Tget); err2 != nil { - switch v := err2.(type) { - case *TIOError: - result.Io = v - default: - x := thrift.NewTApplicationException(thrift.INTERNAL_ERROR, "Internal error processing get: "+err2.Error()) - oprot.WriteMessageBegin("get", thrift.EXCEPTION, seqId) - x.Write(oprot) - oprot.WriteMessageEnd() - oprot.Flush() - return true, err2 - } - } else { - result.Success = retval - } - if err2 = oprot.WriteMessageBegin("get", thrift.REPLY, seqId); err2 != nil { - err = err2 - } - if err2 = result.Write(oprot); err == nil && err2 != nil { - err = err2 - } - if err2 = oprot.WriteMessageEnd(); err == nil && err2 != nil { - err = err2 - } - if err2 = oprot.Flush(); err == nil && err2 != nil { - err = err2 - } - if err != nil { - return - } - return true, err -} - -type tHBaseServiceProcessorGetMultiple struct { - handler THBaseService -} - -func (p *tHBaseServiceProcessorGetMultiple) Process(seqId int32, iprot, oprot thrift.TProtocol) (success bool, err thrift.TException) { - args := THBaseServiceGetMultipleArgs{} - if err = args.Read(iprot); err != nil { - iprot.ReadMessageEnd() - x := thrift.NewTApplicationException(thrift.PROTOCOL_ERROR, err.Error()) - oprot.WriteMessageBegin("getMultiple", thrift.EXCEPTION, seqId) - x.Write(oprot) - oprot.WriteMessageEnd() - oprot.Flush() - return false, err - } - - iprot.ReadMessageEnd() - result := THBaseServiceGetMultipleResult{} - var retval []*TResult_ - var err2 error - if retval, err2 = p.handler.GetMultiple(args.Table, args.Tgets); err2 != nil { - switch v := err2.(type) { - case *TIOError: - result.Io = v - default: - x := thrift.NewTApplicationException(thrift.INTERNAL_ERROR, "Internal error processing getMultiple: "+err2.Error()) - oprot.WriteMessageBegin("getMultiple", thrift.EXCEPTION, seqId) - x.Write(oprot) - oprot.WriteMessageEnd() - oprot.Flush() - return true, err2 - } - } else { - result.Success = retval - } - if err2 = oprot.WriteMessageBegin("getMultiple", thrift.REPLY, seqId); err2 != nil { - err = err2 - } - if err2 = result.Write(oprot); err == nil && err2 != nil { - err = err2 - } - if err2 = oprot.WriteMessageEnd(); err == nil && err2 != nil { - err = err2 - } - if err2 = oprot.Flush(); err == nil && err2 != nil { - err = err2 - } - if err != nil { - return - } - return true, err -} - -type tHBaseServiceProcessorPut struct { - handler THBaseService -} - -func (p *tHBaseServiceProcessorPut) Process(seqId int32, iprot, oprot thrift.TProtocol) (success bool, err thrift.TException) { - args := THBaseServicePutArgs{} - if err = args.Read(iprot); err != nil { - iprot.ReadMessageEnd() - x := thrift.NewTApplicationException(thrift.PROTOCOL_ERROR, err.Error()) - oprot.WriteMessageBegin("put", thrift.EXCEPTION, seqId) - x.Write(oprot) - oprot.WriteMessageEnd() - oprot.Flush() - return false, err - } - - iprot.ReadMessageEnd() - result := THBaseServicePutResult{} - var err2 error - if err2 = p.handler.Put(args.Table, args.Tput); err2 != nil { - switch v := err2.(type) { - case *TIOError: - result.Io = v - default: - x := thrift.NewTApplicationException(thrift.INTERNAL_ERROR, "Internal error processing put: "+err2.Error()) - oprot.WriteMessageBegin("put", thrift.EXCEPTION, seqId) - x.Write(oprot) - oprot.WriteMessageEnd() - oprot.Flush() - return true, err2 - } - } - if err2 = oprot.WriteMessageBegin("put", thrift.REPLY, seqId); err2 != nil { - err = err2 - } - if err2 = result.Write(oprot); err == nil && err2 != nil { - err = err2 - } - if err2 = oprot.WriteMessageEnd(); err == nil && err2 != nil { - err = err2 - } - if err2 = oprot.Flush(); err == nil && err2 != nil { - err = err2 - } - if err != nil { - return - } - return true, err -} - -type tHBaseServiceProcessorCheckAndPut struct { - handler THBaseService -} - -func (p *tHBaseServiceProcessorCheckAndPut) Process(seqId int32, iprot, oprot thrift.TProtocol) (success bool, err thrift.TException) { - args := THBaseServiceCheckAndPutArgs{} - if err = args.Read(iprot); err != nil { - iprot.ReadMessageEnd() - x := thrift.NewTApplicationException(thrift.PROTOCOL_ERROR, err.Error()) - oprot.WriteMessageBegin("checkAndPut", thrift.EXCEPTION, seqId) - x.Write(oprot) - oprot.WriteMessageEnd() - oprot.Flush() - return false, err - } - - iprot.ReadMessageEnd() - result := THBaseServiceCheckAndPutResult{} - var retval bool - var err2 error - if retval, err2 = p.handler.CheckAndPut(args.Table, args.Row, args.Family, args.Qualifier, args.Value, args.Tput); err2 != nil { - switch v := err2.(type) { - case *TIOError: - result.Io = v - default: - x := thrift.NewTApplicationException(thrift.INTERNAL_ERROR, "Internal error processing checkAndPut: "+err2.Error()) - oprot.WriteMessageBegin("checkAndPut", thrift.EXCEPTION, seqId) - x.Write(oprot) - oprot.WriteMessageEnd() - oprot.Flush() - return true, err2 - } - } else { - result.Success = &retval - } - if err2 = oprot.WriteMessageBegin("checkAndPut", thrift.REPLY, seqId); err2 != nil { - err = err2 - } - if err2 = result.Write(oprot); err == nil && err2 != nil { - err = err2 - } - if err2 = oprot.WriteMessageEnd(); err == nil && err2 != nil { - err = err2 - } - if err2 = oprot.Flush(); err == nil && err2 != nil { - err = err2 - } - if err != nil { - return - } - return true, err -} - -type tHBaseServiceProcessorPutMultiple struct { - handler THBaseService -} - -func (p *tHBaseServiceProcessorPutMultiple) Process(seqId int32, iprot, oprot thrift.TProtocol) (success bool, err thrift.TException) { - args := THBaseServicePutMultipleArgs{} - if err = args.Read(iprot); err != nil { - iprot.ReadMessageEnd() - x := thrift.NewTApplicationException(thrift.PROTOCOL_ERROR, err.Error()) - oprot.WriteMessageBegin("putMultiple", thrift.EXCEPTION, seqId) - x.Write(oprot) - oprot.WriteMessageEnd() - oprot.Flush() - return false, err - } - - iprot.ReadMessageEnd() - result := THBaseServicePutMultipleResult{} - var err2 error - if err2 = p.handler.PutMultiple(args.Table, args.Tputs); err2 != nil { - switch v := err2.(type) { - case *TIOError: - result.Io = v - default: - x := thrift.NewTApplicationException(thrift.INTERNAL_ERROR, "Internal error processing putMultiple: "+err2.Error()) - oprot.WriteMessageBegin("putMultiple", thrift.EXCEPTION, seqId) - x.Write(oprot) - oprot.WriteMessageEnd() - oprot.Flush() - return true, err2 - } - } - if err2 = oprot.WriteMessageBegin("putMultiple", thrift.REPLY, seqId); err2 != nil { - err = err2 - } - if err2 = result.Write(oprot); err == nil && err2 != nil { - err = err2 - } - if err2 = oprot.WriteMessageEnd(); err == nil && err2 != nil { - err = err2 - } - if err2 = oprot.Flush(); err == nil && err2 != nil { - err = err2 - } - if err != nil { - return - } - return true, err -} - -type tHBaseServiceProcessorDeleteSingle struct { - handler THBaseService -} - -func (p *tHBaseServiceProcessorDeleteSingle) Process(seqId int32, iprot, oprot thrift.TProtocol) (success bool, err thrift.TException) { - args := THBaseServiceDeleteSingleArgs{} - if err = args.Read(iprot); err != nil { - iprot.ReadMessageEnd() - x := thrift.NewTApplicationException(thrift.PROTOCOL_ERROR, err.Error()) - oprot.WriteMessageBegin("deleteSingle", thrift.EXCEPTION, seqId) - x.Write(oprot) - oprot.WriteMessageEnd() - oprot.Flush() - return false, err - } - - iprot.ReadMessageEnd() - result := THBaseServiceDeleteSingleResult{} - var err2 error - if err2 = p.handler.DeleteSingle(args.Table, args.Tdelete); err2 != nil { - switch v := err2.(type) { - case *TIOError: - result.Io = v - default: - x := thrift.NewTApplicationException(thrift.INTERNAL_ERROR, "Internal error processing deleteSingle: "+err2.Error()) - oprot.WriteMessageBegin("deleteSingle", thrift.EXCEPTION, seqId) - x.Write(oprot) - oprot.WriteMessageEnd() - oprot.Flush() - return true, err2 - } - } - if err2 = oprot.WriteMessageBegin("deleteSingle", thrift.REPLY, seqId); err2 != nil { - err = err2 - } - if err2 = result.Write(oprot); err == nil && err2 != nil { - err = err2 - } - if err2 = oprot.WriteMessageEnd(); err == nil && err2 != nil { - err = err2 - } - if err2 = oprot.Flush(); err == nil && err2 != nil { - err = err2 - } - if err != nil { - return - } - return true, err -} - -type tHBaseServiceProcessorDeleteMultiple struct { - handler THBaseService -} - -func (p *tHBaseServiceProcessorDeleteMultiple) Process(seqId int32, iprot, oprot thrift.TProtocol) (success bool, err thrift.TException) { - args := THBaseServiceDeleteMultipleArgs{} - if err = args.Read(iprot); err != nil { - iprot.ReadMessageEnd() - x := thrift.NewTApplicationException(thrift.PROTOCOL_ERROR, err.Error()) - oprot.WriteMessageBegin("deleteMultiple", thrift.EXCEPTION, seqId) - x.Write(oprot) - oprot.WriteMessageEnd() - oprot.Flush() - return false, err - } - - iprot.ReadMessageEnd() - result := THBaseServiceDeleteMultipleResult{} - var retval []*TDelete - var err2 error - if retval, err2 = p.handler.DeleteMultiple(args.Table, args.Tdeletes); err2 != nil { - switch v := err2.(type) { - case *TIOError: - result.Io = v - default: - x := thrift.NewTApplicationException(thrift.INTERNAL_ERROR, "Internal error processing deleteMultiple: "+err2.Error()) - oprot.WriteMessageBegin("deleteMultiple", thrift.EXCEPTION, seqId) - x.Write(oprot) - oprot.WriteMessageEnd() - oprot.Flush() - return true, err2 - } - } else { - result.Success = retval - } - if err2 = oprot.WriteMessageBegin("deleteMultiple", thrift.REPLY, seqId); err2 != nil { - err = err2 - } - if err2 = result.Write(oprot); err == nil && err2 != nil { - err = err2 - } - if err2 = oprot.WriteMessageEnd(); err == nil && err2 != nil { - err = err2 - } - if err2 = oprot.Flush(); err == nil && err2 != nil { - err = err2 - } - if err != nil { - return - } - return true, err -} - -type tHBaseServiceProcessorCheckAndDelete struct { - handler THBaseService -} - -func (p *tHBaseServiceProcessorCheckAndDelete) Process(seqId int32, iprot, oprot thrift.TProtocol) (success bool, err thrift.TException) { - args := THBaseServiceCheckAndDeleteArgs{} - if err = args.Read(iprot); err != nil { - iprot.ReadMessageEnd() - x := thrift.NewTApplicationException(thrift.PROTOCOL_ERROR, err.Error()) - oprot.WriteMessageBegin("checkAndDelete", thrift.EXCEPTION, seqId) - x.Write(oprot) - oprot.WriteMessageEnd() - oprot.Flush() - return false, err - } - - iprot.ReadMessageEnd() - result := THBaseServiceCheckAndDeleteResult{} - var retval bool - var err2 error - if retval, err2 = p.handler.CheckAndDelete(args.Table, args.Row, args.Family, args.Qualifier, args.Value, args.Tdelete); err2 != nil { - switch v := err2.(type) { - case *TIOError: - result.Io = v - default: - x := thrift.NewTApplicationException(thrift.INTERNAL_ERROR, "Internal error processing checkAndDelete: "+err2.Error()) - oprot.WriteMessageBegin("checkAndDelete", thrift.EXCEPTION, seqId) - x.Write(oprot) - oprot.WriteMessageEnd() - oprot.Flush() - return true, err2 - } - } else { - result.Success = &retval - } - if err2 = oprot.WriteMessageBegin("checkAndDelete", thrift.REPLY, seqId); err2 != nil { - err = err2 - } - if err2 = result.Write(oprot); err == nil && err2 != nil { - err = err2 - } - if err2 = oprot.WriteMessageEnd(); err == nil && err2 != nil { - err = err2 - } - if err2 = oprot.Flush(); err == nil && err2 != nil { - err = err2 - } - if err != nil { - return - } - return true, err -} - -type tHBaseServiceProcessorIncrement struct { - handler THBaseService -} - -func (p *tHBaseServiceProcessorIncrement) Process(seqId int32, iprot, oprot thrift.TProtocol) (success bool, err thrift.TException) { - args := THBaseServiceIncrementArgs{} - if err = args.Read(iprot); err != nil { - iprot.ReadMessageEnd() - x := thrift.NewTApplicationException(thrift.PROTOCOL_ERROR, err.Error()) - oprot.WriteMessageBegin("increment", thrift.EXCEPTION, seqId) - x.Write(oprot) - oprot.WriteMessageEnd() - oprot.Flush() - return false, err - } - - iprot.ReadMessageEnd() - result := THBaseServiceIncrementResult{} - var retval *TResult_ - var err2 error - if retval, err2 = p.handler.Increment(args.Table, args.Tincrement); err2 != nil { - switch v := err2.(type) { - case *TIOError: - result.Io = v - default: - x := thrift.NewTApplicationException(thrift.INTERNAL_ERROR, "Internal error processing increment: "+err2.Error()) - oprot.WriteMessageBegin("increment", thrift.EXCEPTION, seqId) - x.Write(oprot) - oprot.WriteMessageEnd() - oprot.Flush() - return true, err2 - } - } else { - result.Success = retval - } - if err2 = oprot.WriteMessageBegin("increment", thrift.REPLY, seqId); err2 != nil { - err = err2 - } - if err2 = result.Write(oprot); err == nil && err2 != nil { - err = err2 - } - if err2 = oprot.WriteMessageEnd(); err == nil && err2 != nil { - err = err2 - } - if err2 = oprot.Flush(); err == nil && err2 != nil { - err = err2 - } - if err != nil { - return - } - return true, err -} - -type tHBaseServiceProcessorAppend struct { - handler THBaseService -} - -func (p *tHBaseServiceProcessorAppend) Process(seqId int32, iprot, oprot thrift.TProtocol) (success bool, err thrift.TException) { - args := THBaseServiceAppendArgs{} - if err = args.Read(iprot); err != nil { - iprot.ReadMessageEnd() - x := thrift.NewTApplicationException(thrift.PROTOCOL_ERROR, err.Error()) - oprot.WriteMessageBegin("append", thrift.EXCEPTION, seqId) - x.Write(oprot) - oprot.WriteMessageEnd() - oprot.Flush() - return false, err - } - - iprot.ReadMessageEnd() - result := THBaseServiceAppendResult{} - var retval *TResult_ - var err2 error - if retval, err2 = p.handler.Append(args.Table, args.Tappend); err2 != nil { - switch v := err2.(type) { - case *TIOError: - result.Io = v - default: - x := thrift.NewTApplicationException(thrift.INTERNAL_ERROR, "Internal error processing append: "+err2.Error()) - oprot.WriteMessageBegin("append", thrift.EXCEPTION, seqId) - x.Write(oprot) - oprot.WriteMessageEnd() - oprot.Flush() - return true, err2 - } - } else { - result.Success = retval - } - if err2 = oprot.WriteMessageBegin("append", thrift.REPLY, seqId); err2 != nil { - err = err2 - } - if err2 = result.Write(oprot); err == nil && err2 != nil { - err = err2 - } - if err2 = oprot.WriteMessageEnd(); err == nil && err2 != nil { - err = err2 - } - if err2 = oprot.Flush(); err == nil && err2 != nil { - err = err2 - } - if err != nil { - return - } - return true, err -} - -type tHBaseServiceProcessorOpenScanner struct { - handler THBaseService -} - -func (p *tHBaseServiceProcessorOpenScanner) Process(seqId int32, iprot, oprot thrift.TProtocol) (success bool, err thrift.TException) { - args := THBaseServiceOpenScannerArgs{} - if err = args.Read(iprot); err != nil { - iprot.ReadMessageEnd() - x := thrift.NewTApplicationException(thrift.PROTOCOL_ERROR, err.Error()) - oprot.WriteMessageBegin("openScanner", thrift.EXCEPTION, seqId) - x.Write(oprot) - oprot.WriteMessageEnd() - oprot.Flush() - return false, err - } - - iprot.ReadMessageEnd() - result := THBaseServiceOpenScannerResult{} - var retval int32 - var err2 error - if retval, err2 = p.handler.OpenScanner(args.Table, args.Tscan); err2 != nil { - switch v := err2.(type) { - case *TIOError: - result.Io = v - default: - x := thrift.NewTApplicationException(thrift.INTERNAL_ERROR, "Internal error processing openScanner: "+err2.Error()) - oprot.WriteMessageBegin("openScanner", thrift.EXCEPTION, seqId) - x.Write(oprot) - oprot.WriteMessageEnd() - oprot.Flush() - return true, err2 - } - } else { - result.Success = &retval - } - if err2 = oprot.WriteMessageBegin("openScanner", thrift.REPLY, seqId); err2 != nil { - err = err2 - } - if err2 = result.Write(oprot); err == nil && err2 != nil { - err = err2 - } - if err2 = oprot.WriteMessageEnd(); err == nil && err2 != nil { - err = err2 - } - if err2 = oprot.Flush(); err == nil && err2 != nil { - err = err2 - } - if err != nil { - return - } - return true, err -} - -type tHBaseServiceProcessorGetScannerRows struct { - handler THBaseService -} - -func (p *tHBaseServiceProcessorGetScannerRows) Process(seqId int32, iprot, oprot thrift.TProtocol) (success bool, err thrift.TException) { - args := THBaseServiceGetScannerRowsArgs{} - if err = args.Read(iprot); err != nil { - iprot.ReadMessageEnd() - x := thrift.NewTApplicationException(thrift.PROTOCOL_ERROR, err.Error()) - oprot.WriteMessageBegin("getScannerRows", thrift.EXCEPTION, seqId) - x.Write(oprot) - oprot.WriteMessageEnd() - oprot.Flush() - return false, err - } - - iprot.ReadMessageEnd() - result := THBaseServiceGetScannerRowsResult{} - var retval []*TResult_ - var err2 error - if retval, err2 = p.handler.GetScannerRows(args.ScannerId, args.NumRows); err2 != nil { - switch v := err2.(type) { - case *TIOError: - result.Io = v - case *TIllegalArgument: - result.Ia = v - default: - x := thrift.NewTApplicationException(thrift.INTERNAL_ERROR, "Internal error processing getScannerRows: "+err2.Error()) - oprot.WriteMessageBegin("getScannerRows", thrift.EXCEPTION, seqId) - x.Write(oprot) - oprot.WriteMessageEnd() - oprot.Flush() - return true, err2 - } - } else { - result.Success = retval - } - if err2 = oprot.WriteMessageBegin("getScannerRows", thrift.REPLY, seqId); err2 != nil { - err = err2 - } - if err2 = result.Write(oprot); err == nil && err2 != nil { - err = err2 - } - if err2 = oprot.WriteMessageEnd(); err == nil && err2 != nil { - err = err2 - } - if err2 = oprot.Flush(); err == nil && err2 != nil { - err = err2 - } - if err != nil { - return - } - return true, err -} - -type tHBaseServiceProcessorCloseScanner struct { - handler THBaseService -} - -func (p *tHBaseServiceProcessorCloseScanner) Process(seqId int32, iprot, oprot thrift.TProtocol) (success bool, err thrift.TException) { - args := THBaseServiceCloseScannerArgs{} - if err = args.Read(iprot); err != nil { - iprot.ReadMessageEnd() - x := thrift.NewTApplicationException(thrift.PROTOCOL_ERROR, err.Error()) - oprot.WriteMessageBegin("closeScanner", thrift.EXCEPTION, seqId) - x.Write(oprot) - oprot.WriteMessageEnd() - oprot.Flush() - return false, err - } - - iprot.ReadMessageEnd() - result := THBaseServiceCloseScannerResult{} - var err2 error - if err2 = p.handler.CloseScanner(args.ScannerId); err2 != nil { - switch v := err2.(type) { - case *TIOError: - result.Io = v - case *TIllegalArgument: - result.Ia = v - default: - x := thrift.NewTApplicationException(thrift.INTERNAL_ERROR, "Internal error processing closeScanner: "+err2.Error()) - oprot.WriteMessageBegin("closeScanner", thrift.EXCEPTION, seqId) - x.Write(oprot) - oprot.WriteMessageEnd() - oprot.Flush() - return true, err2 - } - } - if err2 = oprot.WriteMessageBegin("closeScanner", thrift.REPLY, seqId); err2 != nil { - err = err2 - } - if err2 = result.Write(oprot); err == nil && err2 != nil { - err = err2 - } - if err2 = oprot.WriteMessageEnd(); err == nil && err2 != nil { - err = err2 - } - if err2 = oprot.Flush(); err == nil && err2 != nil { - err = err2 - } - if err != nil { - return - } - return true, err -} - -type tHBaseServiceProcessorMutateRow struct { - handler THBaseService -} - -func (p *tHBaseServiceProcessorMutateRow) Process(seqId int32, iprot, oprot thrift.TProtocol) (success bool, err thrift.TException) { - args := THBaseServiceMutateRowArgs{} - if err = args.Read(iprot); err != nil { - iprot.ReadMessageEnd() - x := thrift.NewTApplicationException(thrift.PROTOCOL_ERROR, err.Error()) - oprot.WriteMessageBegin("mutateRow", thrift.EXCEPTION, seqId) - x.Write(oprot) - oprot.WriteMessageEnd() - oprot.Flush() - return false, err - } - - iprot.ReadMessageEnd() - result := THBaseServiceMutateRowResult{} - var err2 error - if err2 = p.handler.MutateRow(args.Table, args.TrowMutations); err2 != nil { - switch v := err2.(type) { - case *TIOError: - result.Io = v - default: - x := thrift.NewTApplicationException(thrift.INTERNAL_ERROR, "Internal error processing mutateRow: "+err2.Error()) - oprot.WriteMessageBegin("mutateRow", thrift.EXCEPTION, seqId) - x.Write(oprot) - oprot.WriteMessageEnd() - oprot.Flush() - return true, err2 - } - } - if err2 = oprot.WriteMessageBegin("mutateRow", thrift.REPLY, seqId); err2 != nil { - err = err2 - } - if err2 = result.Write(oprot); err == nil && err2 != nil { - err = err2 - } - if err2 = oprot.WriteMessageEnd(); err == nil && err2 != nil { - err = err2 - } - if err2 = oprot.Flush(); err == nil && err2 != nil { - err = err2 - } - if err != nil { - return - } - return true, err -} - -type tHBaseServiceProcessorGetScannerResults struct { - handler THBaseService -} - -func (p *tHBaseServiceProcessorGetScannerResults) Process(seqId int32, iprot, oprot thrift.TProtocol) (success bool, err thrift.TException) { - args := THBaseServiceGetScannerResultsArgs{} - if err = args.Read(iprot); err != nil { - iprot.ReadMessageEnd() - x := thrift.NewTApplicationException(thrift.PROTOCOL_ERROR, err.Error()) - oprot.WriteMessageBegin("getScannerResults", thrift.EXCEPTION, seqId) - x.Write(oprot) - oprot.WriteMessageEnd() - oprot.Flush() - return false, err - } - - iprot.ReadMessageEnd() - result := THBaseServiceGetScannerResultsResult{} - var retval []*TResult_ - var err2 error - if retval, err2 = p.handler.GetScannerResults(args.Table, args.Tscan, args.NumRows); err2 != nil { - switch v := err2.(type) { - case *TIOError: - result.Io = v - default: - x := thrift.NewTApplicationException(thrift.INTERNAL_ERROR, "Internal error processing getScannerResults: "+err2.Error()) - oprot.WriteMessageBegin("getScannerResults", thrift.EXCEPTION, seqId) - x.Write(oprot) - oprot.WriteMessageEnd() - oprot.Flush() - return true, err2 - } - } else { - result.Success = retval - } - if err2 = oprot.WriteMessageBegin("getScannerResults", thrift.REPLY, seqId); err2 != nil { - err = err2 - } - if err2 = result.Write(oprot); err == nil && err2 != nil { - err = err2 - } - if err2 = oprot.WriteMessageEnd(); err == nil && err2 != nil { - err = err2 - } - if err2 = oprot.Flush(); err == nil && err2 != nil { - err = err2 - } - if err != nil { - return - } - return true, err -} - -// HELPER FUNCTIONS AND STRUCTURES - -// Attributes: -// - Table: the table to check on -// - Tget: the TGet to check for -type THBaseServiceExistsArgs struct { - Table []byte `thrift:"table,1,required" json:"table"` - Tget *TGet `thrift:"tget,2,required" json:"tget"` -} - -func NewTHBaseServiceExistsArgs() *THBaseServiceExistsArgs { - return &THBaseServiceExistsArgs{} -} - -func (p *THBaseServiceExistsArgs) GetTable() []byte { - return p.Table -} - -var THBaseServiceExistsArgs_Tget_DEFAULT *TGet - -func (p *THBaseServiceExistsArgs) GetTget() *TGet { - if !p.IsSetTget() { - return THBaseServiceExistsArgs_Tget_DEFAULT - } - return p.Tget -} -func (p *THBaseServiceExistsArgs) IsSetTget() bool { - return p.Tget != nil -} - -func (p *THBaseServiceExistsArgs) Read(iprot thrift.TProtocol) error { - if _, err := iprot.ReadStructBegin(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) - } - - var issetTable bool = false - var issetTget bool = false - - for { - _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin() - if err != nil { - return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) - } - if fieldTypeId == thrift.STOP { - break - } - switch fieldId { - case 1: - if err := p.readField1(iprot); err != nil { - return err - } - issetTable = true - case 2: - if err := p.readField2(iprot); err != nil { - return err - } - issetTget = true - default: - if err := iprot.Skip(fieldTypeId); err != nil { - return err - } - } - if err := iprot.ReadFieldEnd(); err != nil { - return err - } - } - if err := iprot.ReadStructEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) - } - if !issetTable { - return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field Table is not set")) - } - if !issetTget { - return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field Tget is not set")) - } - return nil -} - -func (p *THBaseServiceExistsArgs) readField1(iprot thrift.TProtocol) error { - if v, err := iprot.ReadBinary(); err != nil { - return thrift.PrependError("error reading field 1: ", err) - } else { - p.Table = v - } - return nil -} - -func (p *THBaseServiceExistsArgs) readField2(iprot thrift.TProtocol) error { - p.Tget = &TGet{} - if err := p.Tget.Read(iprot); err != nil { - return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.Tget), err) - } - return nil -} - -func (p *THBaseServiceExistsArgs) Write(oprot thrift.TProtocol) error { - if err := oprot.WriteStructBegin("exists_args"); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) - } - if err := p.writeField1(oprot); err != nil { - return err - } - if err := p.writeField2(oprot); err != nil { - return err - } - if err := oprot.WriteFieldStop(); err != nil { - return thrift.PrependError("write field stop error: ", err) - } - if err := oprot.WriteStructEnd(); err != nil { - return thrift.PrependError("write struct stop error: ", err) - } - return nil -} - -func (p *THBaseServiceExistsArgs) writeField1(oprot thrift.TProtocol) (err error) { - if err := oprot.WriteFieldBegin("table", thrift.STRING, 1); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:table: ", p), err) - } - if err := oprot.WriteBinary(p.Table); err != nil { - return thrift.PrependError(fmt.Sprintf("%T.table (1) field write error: ", p), err) - } - if err := oprot.WriteFieldEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 1:table: ", p), err) - } - return err -} - -func (p *THBaseServiceExistsArgs) writeField2(oprot thrift.TProtocol) (err error) { - if err := oprot.WriteFieldBegin("tget", thrift.STRUCT, 2); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 2:tget: ", p), err) - } - if err := p.Tget.Write(oprot); err != nil { - return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.Tget), err) - } - if err := oprot.WriteFieldEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 2:tget: ", p), err) - } - return err -} - -func (p *THBaseServiceExistsArgs) String() string { - if p == nil { - return "" - } - return fmt.Sprintf("THBaseServiceExistsArgs(%+v)", *p) -} - -// Attributes: -// - Success -// - Io -type THBaseServiceExistsResult struct { - Success *bool `thrift:"success,0" json:"success,omitempty"` - Io *TIOError `thrift:"io,1" json:"io,omitempty"` -} - -func NewTHBaseServiceExistsResult() *THBaseServiceExistsResult { - return &THBaseServiceExistsResult{} -} - -var THBaseServiceExistsResult_Success_DEFAULT bool - -func (p *THBaseServiceExistsResult) GetSuccess() bool { - if !p.IsSetSuccess() { - return THBaseServiceExistsResult_Success_DEFAULT - } - return *p.Success -} - -var THBaseServiceExistsResult_Io_DEFAULT *TIOError - -func (p *THBaseServiceExistsResult) GetIo() *TIOError { - if !p.IsSetIo() { - return THBaseServiceExistsResult_Io_DEFAULT - } - return p.Io -} -func (p *THBaseServiceExistsResult) IsSetSuccess() bool { - return p.Success != nil -} - -func (p *THBaseServiceExistsResult) IsSetIo() bool { - return p.Io != nil -} - -func (p *THBaseServiceExistsResult) Read(iprot thrift.TProtocol) error { - if _, err := iprot.ReadStructBegin(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) - } - - for { - _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin() - if err != nil { - return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) - } - if fieldTypeId == thrift.STOP { - break - } - switch fieldId { - case 0: - if err := p.readField0(iprot); err != nil { - return err - } - case 1: - if err := p.readField1(iprot); err != nil { - return err - } - default: - if err := iprot.Skip(fieldTypeId); err != nil { - return err - } - } - if err := iprot.ReadFieldEnd(); err != nil { - return err - } - } - if err := iprot.ReadStructEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) - } - return nil -} - -func (p *THBaseServiceExistsResult) readField0(iprot thrift.TProtocol) error { - if v, err := iprot.ReadBool(); err != nil { - return thrift.PrependError("error reading field 0: ", err) - } else { - p.Success = &v - } - return nil -} - -func (p *THBaseServiceExistsResult) readField1(iprot thrift.TProtocol) error { - p.Io = &TIOError{} - if err := p.Io.Read(iprot); err != nil { - return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.Io), err) - } - return nil -} - -func (p *THBaseServiceExistsResult) Write(oprot thrift.TProtocol) error { - if err := oprot.WriteStructBegin("exists_result"); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) - } - if err := p.writeField0(oprot); err != nil { - return err - } - if err := p.writeField1(oprot); err != nil { - return err - } - if err := oprot.WriteFieldStop(); err != nil { - return thrift.PrependError("write field stop error: ", err) - } - if err := oprot.WriteStructEnd(); err != nil { - return thrift.PrependError("write struct stop error: ", err) - } - return nil -} - -func (p *THBaseServiceExistsResult) writeField0(oprot thrift.TProtocol) (err error) { - if p.IsSetSuccess() { - if err := oprot.WriteFieldBegin("success", thrift.BOOL, 0); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 0:success: ", p), err) - } - if err := oprot.WriteBool(bool(*p.Success)); err != nil { - return thrift.PrependError(fmt.Sprintf("%T.success (0) field write error: ", p), err) - } - if err := oprot.WriteFieldEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 0:success: ", p), err) - } - } - return err -} - -func (p *THBaseServiceExistsResult) writeField1(oprot thrift.TProtocol) (err error) { - if p.IsSetIo() { - if err := oprot.WriteFieldBegin("io", thrift.STRUCT, 1); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:io: ", p), err) - } - if err := p.Io.Write(oprot); err != nil { - return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.Io), err) - } - if err := oprot.WriteFieldEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 1:io: ", p), err) - } - } - return err -} - -func (p *THBaseServiceExistsResult) String() string { - if p == nil { - return "" - } - return fmt.Sprintf("THBaseServiceExistsResult(%+v)", *p) -} - -// Attributes: -// - Table: the table to get from -// - Tget: the TGet to fetch -type THBaseServiceGetArgs struct { - Table []byte `thrift:"table,1,required" json:"table"` - Tget *TGet `thrift:"tget,2,required" json:"tget"` -} - -func NewTHBaseServiceGetArgs() *THBaseServiceGetArgs { - return &THBaseServiceGetArgs{} -} - -func (p *THBaseServiceGetArgs) GetTable() []byte { - return p.Table -} - -var THBaseServiceGetArgs_Tget_DEFAULT *TGet - -func (p *THBaseServiceGetArgs) GetTget() *TGet { - if !p.IsSetTget() { - return THBaseServiceGetArgs_Tget_DEFAULT - } - return p.Tget -} -func (p *THBaseServiceGetArgs) IsSetTget() bool { - return p.Tget != nil -} - -func (p *THBaseServiceGetArgs) Read(iprot thrift.TProtocol) error { - if _, err := iprot.ReadStructBegin(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) - } - - var issetTable bool = false - var issetTget bool = false - - for { - _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin() - if err != nil { - return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) - } - if fieldTypeId == thrift.STOP { - break - } - switch fieldId { - case 1: - if err := p.readField1(iprot); err != nil { - return err - } - issetTable = true - case 2: - if err := p.readField2(iprot); err != nil { - return err - } - issetTget = true - default: - if err := iprot.Skip(fieldTypeId); err != nil { - return err - } - } - if err := iprot.ReadFieldEnd(); err != nil { - return err - } - } - if err := iprot.ReadStructEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) - } - if !issetTable { - return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field Table is not set")) - } - if !issetTget { - return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field Tget is not set")) - } - return nil -} - -func (p *THBaseServiceGetArgs) readField1(iprot thrift.TProtocol) error { - if v, err := iprot.ReadBinary(); err != nil { - return thrift.PrependError("error reading field 1: ", err) - } else { - p.Table = v - } - return nil -} - -func (p *THBaseServiceGetArgs) readField2(iprot thrift.TProtocol) error { - p.Tget = &TGet{} - if err := p.Tget.Read(iprot); err != nil { - return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.Tget), err) - } - return nil -} - -func (p *THBaseServiceGetArgs) Write(oprot thrift.TProtocol) error { - if err := oprot.WriteStructBegin("get_args"); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) - } - if err := p.writeField1(oprot); err != nil { - return err - } - if err := p.writeField2(oprot); err != nil { - return err - } - if err := oprot.WriteFieldStop(); err != nil { - return thrift.PrependError("write field stop error: ", err) - } - if err := oprot.WriteStructEnd(); err != nil { - return thrift.PrependError("write struct stop error: ", err) - } - return nil -} - -func (p *THBaseServiceGetArgs) writeField1(oprot thrift.TProtocol) (err error) { - if err := oprot.WriteFieldBegin("table", thrift.STRING, 1); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:table: ", p), err) - } - if err := oprot.WriteBinary(p.Table); err != nil { - return thrift.PrependError(fmt.Sprintf("%T.table (1) field write error: ", p), err) - } - if err := oprot.WriteFieldEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 1:table: ", p), err) - } - return err -} - -func (p *THBaseServiceGetArgs) writeField2(oprot thrift.TProtocol) (err error) { - if err := oprot.WriteFieldBegin("tget", thrift.STRUCT, 2); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 2:tget: ", p), err) - } - if err := p.Tget.Write(oprot); err != nil { - return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.Tget), err) - } - if err := oprot.WriteFieldEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 2:tget: ", p), err) - } - return err -} - -func (p *THBaseServiceGetArgs) String() string { - if p == nil { - return "" - } - return fmt.Sprintf("THBaseServiceGetArgs(%+v)", *p) -} - -// Attributes: -// - Success -// - Io -type THBaseServiceGetResult struct { - Success *TResult_ `thrift:"success,0" json:"success,omitempty"` - Io *TIOError `thrift:"io,1" json:"io,omitempty"` -} - -func NewTHBaseServiceGetResult() *THBaseServiceGetResult { - return &THBaseServiceGetResult{} -} - -var THBaseServiceGetResult_Success_DEFAULT *TResult_ - -func (p *THBaseServiceGetResult) GetSuccess() *TResult_ { - if !p.IsSetSuccess() { - return THBaseServiceGetResult_Success_DEFAULT - } - return p.Success -} - -var THBaseServiceGetResult_Io_DEFAULT *TIOError - -func (p *THBaseServiceGetResult) GetIo() *TIOError { - if !p.IsSetIo() { - return THBaseServiceGetResult_Io_DEFAULT - } - return p.Io -} -func (p *THBaseServiceGetResult) IsSetSuccess() bool { - return p.Success != nil -} - -func (p *THBaseServiceGetResult) IsSetIo() bool { - return p.Io != nil -} - -func (p *THBaseServiceGetResult) Read(iprot thrift.TProtocol) error { - if _, err := iprot.ReadStructBegin(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) - } - - for { - _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin() - if err != nil { - return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) - } - if fieldTypeId == thrift.STOP { - break - } - switch fieldId { - case 0: - if err := p.readField0(iprot); err != nil { - return err - } - case 1: - if err := p.readField1(iprot); err != nil { - return err - } - default: - if err := iprot.Skip(fieldTypeId); err != nil { - return err - } - } - if err := iprot.ReadFieldEnd(); err != nil { - return err - } - } - if err := iprot.ReadStructEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) - } - return nil -} - -func (p *THBaseServiceGetResult) readField0(iprot thrift.TProtocol) error { - p.Success = &TResult_{} - if err := p.Success.Read(iprot); err != nil { - return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.Success), err) - } - return nil -} - -func (p *THBaseServiceGetResult) readField1(iprot thrift.TProtocol) error { - p.Io = &TIOError{} - if err := p.Io.Read(iprot); err != nil { - return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.Io), err) - } - return nil -} - -func (p *THBaseServiceGetResult) Write(oprot thrift.TProtocol) error { - if err := oprot.WriteStructBegin("get_result"); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) - } - if err := p.writeField0(oprot); err != nil { - return err - } - if err := p.writeField1(oprot); err != nil { - return err - } - if err := oprot.WriteFieldStop(); err != nil { - return thrift.PrependError("write field stop error: ", err) - } - if err := oprot.WriteStructEnd(); err != nil { - return thrift.PrependError("write struct stop error: ", err) - } - return nil -} - -func (p *THBaseServiceGetResult) writeField0(oprot thrift.TProtocol) (err error) { - if p.IsSetSuccess() { - if err := oprot.WriteFieldBegin("success", thrift.STRUCT, 0); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 0:success: ", p), err) - } - if err := p.Success.Write(oprot); err != nil { - return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.Success), err) - } - if err := oprot.WriteFieldEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 0:success: ", p), err) - } - } - return err -} - -func (p *THBaseServiceGetResult) writeField1(oprot thrift.TProtocol) (err error) { - if p.IsSetIo() { - if err := oprot.WriteFieldBegin("io", thrift.STRUCT, 1); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:io: ", p), err) - } - if err := p.Io.Write(oprot); err != nil { - return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.Io), err) - } - if err := oprot.WriteFieldEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 1:io: ", p), err) - } - } - return err -} - -func (p *THBaseServiceGetResult) String() string { - if p == nil { - return "" - } - return fmt.Sprintf("THBaseServiceGetResult(%+v)", *p) -} - -// Attributes: -// - Table: the table to get from -// - Tgets: a list of TGets to fetch, the Result list -// will have the Results at corresponding positions -// or null if there was an error -type THBaseServiceGetMultipleArgs struct { - Table []byte `thrift:"table,1,required" json:"table"` - Tgets []*TGet `thrift:"tgets,2,required" json:"tgets"` -} - -func NewTHBaseServiceGetMultipleArgs() *THBaseServiceGetMultipleArgs { - return &THBaseServiceGetMultipleArgs{} -} - -func (p *THBaseServiceGetMultipleArgs) GetTable() []byte { - return p.Table -} - -func (p *THBaseServiceGetMultipleArgs) GetTgets() []*TGet { - return p.Tgets -} -func (p *THBaseServiceGetMultipleArgs) Read(iprot thrift.TProtocol) error { - if _, err := iprot.ReadStructBegin(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) - } - - var issetTable bool = false - var issetTgets bool = false - - for { - _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin() - if err != nil { - return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) - } - if fieldTypeId == thrift.STOP { - break - } - switch fieldId { - case 1: - if err := p.readField1(iprot); err != nil { - return err - } - issetTable = true - case 2: - if err := p.readField2(iprot); err != nil { - return err - } - issetTgets = true - default: - if err := iprot.Skip(fieldTypeId); err != nil { - return err - } - } - if err := iprot.ReadFieldEnd(); err != nil { - return err - } - } - if err := iprot.ReadStructEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) - } - if !issetTable { - return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field Table is not set")) - } - if !issetTgets { - return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field Tgets is not set")) - } - return nil -} - -func (p *THBaseServiceGetMultipleArgs) readField1(iprot thrift.TProtocol) error { - if v, err := iprot.ReadBinary(); err != nil { - return thrift.PrependError("error reading field 1: ", err) - } else { - p.Table = v - } - return nil -} - -func (p *THBaseServiceGetMultipleArgs) readField2(iprot thrift.TProtocol) error { - _, size, err := iprot.ReadListBegin() - if err != nil { - return thrift.PrependError("error reading list begin: ", err) - } - tSlice := make([]*TGet, 0, size) - p.Tgets = tSlice - for i := 0; i < size; i++ { - _elem55 := &TGet{} - if err := _elem55.Read(iprot); err != nil { - return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", _elem55), err) - } - p.Tgets = append(p.Tgets, _elem55) - } - if err := iprot.ReadListEnd(); err != nil { - return thrift.PrependError("error reading list end: ", err) - } - return nil -} - -func (p *THBaseServiceGetMultipleArgs) Write(oprot thrift.TProtocol) error { - if err := oprot.WriteStructBegin("getMultiple_args"); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) - } - if err := p.writeField1(oprot); err != nil { - return err - } - if err := p.writeField2(oprot); err != nil { - return err - } - if err := oprot.WriteFieldStop(); err != nil { - return thrift.PrependError("write field stop error: ", err) - } - if err := oprot.WriteStructEnd(); err != nil { - return thrift.PrependError("write struct stop error: ", err) - } - return nil -} - -func (p *THBaseServiceGetMultipleArgs) writeField1(oprot thrift.TProtocol) (err error) { - if err := oprot.WriteFieldBegin("table", thrift.STRING, 1); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:table: ", p), err) - } - if err := oprot.WriteBinary(p.Table); err != nil { - return thrift.PrependError(fmt.Sprintf("%T.table (1) field write error: ", p), err) - } - if err := oprot.WriteFieldEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 1:table: ", p), err) - } - return err -} - -func (p *THBaseServiceGetMultipleArgs) writeField2(oprot thrift.TProtocol) (err error) { - if err := oprot.WriteFieldBegin("tgets", thrift.LIST, 2); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 2:tgets: ", p), err) - } - if err := oprot.WriteListBegin(thrift.STRUCT, len(p.Tgets)); err != nil { - return thrift.PrependError("error writing list begin: ", err) - } - for _, v := range p.Tgets { - if err := v.Write(oprot); err != nil { - return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", v), err) - } - } - if err := oprot.WriteListEnd(); err != nil { - return thrift.PrependError("error writing list end: ", err) - } - if err := oprot.WriteFieldEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 2:tgets: ", p), err) - } - return err -} - -func (p *THBaseServiceGetMultipleArgs) String() string { - if p == nil { - return "" - } - return fmt.Sprintf("THBaseServiceGetMultipleArgs(%+v)", *p) -} - -// Attributes: -// - Success -// - Io -type THBaseServiceGetMultipleResult struct { - Success []*TResult_ `thrift:"success,0" json:"success,omitempty"` - Io *TIOError `thrift:"io,1" json:"io,omitempty"` -} - -func NewTHBaseServiceGetMultipleResult() *THBaseServiceGetMultipleResult { - return &THBaseServiceGetMultipleResult{} -} - -var THBaseServiceGetMultipleResult_Success_DEFAULT []*TResult_ - -func (p *THBaseServiceGetMultipleResult) GetSuccess() []*TResult_ { - return p.Success -} - -var THBaseServiceGetMultipleResult_Io_DEFAULT *TIOError - -func (p *THBaseServiceGetMultipleResult) GetIo() *TIOError { - if !p.IsSetIo() { - return THBaseServiceGetMultipleResult_Io_DEFAULT - } - return p.Io -} -func (p *THBaseServiceGetMultipleResult) IsSetSuccess() bool { - return p.Success != nil -} - -func (p *THBaseServiceGetMultipleResult) IsSetIo() bool { - return p.Io != nil -} - -func (p *THBaseServiceGetMultipleResult) Read(iprot thrift.TProtocol) error { - if _, err := iprot.ReadStructBegin(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) - } - - for { - _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin() - if err != nil { - return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) - } - if fieldTypeId == thrift.STOP { - break - } - switch fieldId { - case 0: - if err := p.readField0(iprot); err != nil { - return err - } - case 1: - if err := p.readField1(iprot); err != nil { - return err - } - default: - if err := iprot.Skip(fieldTypeId); err != nil { - return err - } - } - if err := iprot.ReadFieldEnd(); err != nil { - return err - } - } - if err := iprot.ReadStructEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) - } - return nil -} - -func (p *THBaseServiceGetMultipleResult) readField0(iprot thrift.TProtocol) error { - _, size, err := iprot.ReadListBegin() - if err != nil { - return thrift.PrependError("error reading list begin: ", err) - } - tSlice := make([]*TResult_, 0, size) - p.Success = tSlice - for i := 0; i < size; i++ { - _elem56 := &TResult_{} - if err := _elem56.Read(iprot); err != nil { - return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", _elem56), err) - } - p.Success = append(p.Success, _elem56) - } - if err := iprot.ReadListEnd(); err != nil { - return thrift.PrependError("error reading list end: ", err) - } - return nil -} - -func (p *THBaseServiceGetMultipleResult) readField1(iprot thrift.TProtocol) error { - p.Io = &TIOError{} - if err := p.Io.Read(iprot); err != nil { - return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.Io), err) - } - return nil -} - -func (p *THBaseServiceGetMultipleResult) Write(oprot thrift.TProtocol) error { - if err := oprot.WriteStructBegin("getMultiple_result"); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) - } - if err := p.writeField0(oprot); err != nil { - return err - } - if err := p.writeField1(oprot); err != nil { - return err - } - if err := oprot.WriteFieldStop(); err != nil { - return thrift.PrependError("write field stop error: ", err) - } - if err := oprot.WriteStructEnd(); err != nil { - return thrift.PrependError("write struct stop error: ", err) - } - return nil -} - -func (p *THBaseServiceGetMultipleResult) writeField0(oprot thrift.TProtocol) (err error) { - if p.IsSetSuccess() { - if err := oprot.WriteFieldBegin("success", thrift.LIST, 0); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 0:success: ", p), err) - } - if err := oprot.WriteListBegin(thrift.STRUCT, len(p.Success)); err != nil { - return thrift.PrependError("error writing list begin: ", err) - } - for _, v := range p.Success { - if err := v.Write(oprot); err != nil { - return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", v), err) - } - } - if err := oprot.WriteListEnd(); err != nil { - return thrift.PrependError("error writing list end: ", err) - } - if err := oprot.WriteFieldEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 0:success: ", p), err) - } - } - return err -} - -func (p *THBaseServiceGetMultipleResult) writeField1(oprot thrift.TProtocol) (err error) { - if p.IsSetIo() { - if err := oprot.WriteFieldBegin("io", thrift.STRUCT, 1); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:io: ", p), err) - } - if err := p.Io.Write(oprot); err != nil { - return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.Io), err) - } - if err := oprot.WriteFieldEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 1:io: ", p), err) - } - } - return err -} - -func (p *THBaseServiceGetMultipleResult) String() string { - if p == nil { - return "" - } - return fmt.Sprintf("THBaseServiceGetMultipleResult(%+v)", *p) -} - -// Attributes: -// - Table: the table to put data in -// - Tput: the TPut to put -type THBaseServicePutArgs struct { - Table []byte `thrift:"table,1,required" json:"table"` - Tput *TPut `thrift:"tput,2,required" json:"tput"` -} - -func NewTHBaseServicePutArgs() *THBaseServicePutArgs { - return &THBaseServicePutArgs{} -} - -func (p *THBaseServicePutArgs) GetTable() []byte { - return p.Table -} - -var THBaseServicePutArgs_Tput_DEFAULT *TPut - -func (p *THBaseServicePutArgs) GetTput() *TPut { - if !p.IsSetTput() { - return THBaseServicePutArgs_Tput_DEFAULT - } - return p.Tput -} -func (p *THBaseServicePutArgs) IsSetTput() bool { - return p.Tput != nil -} - -func (p *THBaseServicePutArgs) Read(iprot thrift.TProtocol) error { - if _, err := iprot.ReadStructBegin(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) - } - - var issetTable bool = false - var issetTput bool = false - - for { - _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin() - if err != nil { - return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) - } - if fieldTypeId == thrift.STOP { - break - } - switch fieldId { - case 1: - if err := p.readField1(iprot); err != nil { - return err - } - issetTable = true - case 2: - if err := p.readField2(iprot); err != nil { - return err - } - issetTput = true - default: - if err := iprot.Skip(fieldTypeId); err != nil { - return err - } - } - if err := iprot.ReadFieldEnd(); err != nil { - return err - } - } - if err := iprot.ReadStructEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) - } - if !issetTable { - return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field Table is not set")) - } - if !issetTput { - return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field Tput is not set")) - } - return nil -} - -func (p *THBaseServicePutArgs) readField1(iprot thrift.TProtocol) error { - if v, err := iprot.ReadBinary(); err != nil { - return thrift.PrependError("error reading field 1: ", err) - } else { - p.Table = v - } - return nil -} - -func (p *THBaseServicePutArgs) readField2(iprot thrift.TProtocol) error { - p.Tput = &TPut{} - if err := p.Tput.Read(iprot); err != nil { - return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.Tput), err) - } - return nil -} - -func (p *THBaseServicePutArgs) Write(oprot thrift.TProtocol) error { - if err := oprot.WriteStructBegin("put_args"); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) - } - if err := p.writeField1(oprot); err != nil { - return err - } - if err := p.writeField2(oprot); err != nil { - return err - } - if err := oprot.WriteFieldStop(); err != nil { - return thrift.PrependError("write field stop error: ", err) - } - if err := oprot.WriteStructEnd(); err != nil { - return thrift.PrependError("write struct stop error: ", err) - } - return nil -} - -func (p *THBaseServicePutArgs) writeField1(oprot thrift.TProtocol) (err error) { - if err := oprot.WriteFieldBegin("table", thrift.STRING, 1); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:table: ", p), err) - } - if err := oprot.WriteBinary(p.Table); err != nil { - return thrift.PrependError(fmt.Sprintf("%T.table (1) field write error: ", p), err) - } - if err := oprot.WriteFieldEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 1:table: ", p), err) - } - return err -} - -func (p *THBaseServicePutArgs) writeField2(oprot thrift.TProtocol) (err error) { - if err := oprot.WriteFieldBegin("tput", thrift.STRUCT, 2); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 2:tput: ", p), err) - } - if err := p.Tput.Write(oprot); err != nil { - return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.Tput), err) - } - if err := oprot.WriteFieldEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 2:tput: ", p), err) - } - return err -} - -func (p *THBaseServicePutArgs) String() string { - if p == nil { - return "" - } - return fmt.Sprintf("THBaseServicePutArgs(%+v)", *p) -} - -// Attributes: -// - Io -type THBaseServicePutResult struct { - Io *TIOError `thrift:"io,1" json:"io,omitempty"` -} - -func NewTHBaseServicePutResult() *THBaseServicePutResult { - return &THBaseServicePutResult{} -} - -var THBaseServicePutResult_Io_DEFAULT *TIOError - -func (p *THBaseServicePutResult) GetIo() *TIOError { - if !p.IsSetIo() { - return THBaseServicePutResult_Io_DEFAULT - } - return p.Io -} -func (p *THBaseServicePutResult) IsSetIo() bool { - return p.Io != nil -} - -func (p *THBaseServicePutResult) Read(iprot thrift.TProtocol) error { - if _, err := iprot.ReadStructBegin(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) - } - - for { - _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin() - if err != nil { - return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) - } - if fieldTypeId == thrift.STOP { - break - } - switch fieldId { - case 1: - if err := p.readField1(iprot); err != nil { - return err - } - default: - if err := iprot.Skip(fieldTypeId); err != nil { - return err - } - } - if err := iprot.ReadFieldEnd(); err != nil { - return err - } - } - if err := iprot.ReadStructEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) - } - return nil -} - -func (p *THBaseServicePutResult) readField1(iprot thrift.TProtocol) error { - p.Io = &TIOError{} - if err := p.Io.Read(iprot); err != nil { - return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.Io), err) - } - return nil -} - -func (p *THBaseServicePutResult) Write(oprot thrift.TProtocol) error { - if err := oprot.WriteStructBegin("put_result"); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) - } - if err := p.writeField1(oprot); err != nil { - return err - } - if err := oprot.WriteFieldStop(); err != nil { - return thrift.PrependError("write field stop error: ", err) - } - if err := oprot.WriteStructEnd(); err != nil { - return thrift.PrependError("write struct stop error: ", err) - } - return nil -} - -func (p *THBaseServicePutResult) writeField1(oprot thrift.TProtocol) (err error) { - if p.IsSetIo() { - if err := oprot.WriteFieldBegin("io", thrift.STRUCT, 1); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:io: ", p), err) - } - if err := p.Io.Write(oprot); err != nil { - return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.Io), err) - } - if err := oprot.WriteFieldEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 1:io: ", p), err) - } - } - return err -} - -func (p *THBaseServicePutResult) String() string { - if p == nil { - return "" - } - return fmt.Sprintf("THBaseServicePutResult(%+v)", *p) -} - -// Attributes: -// - Table: to check in and put to -// - Row: row to check -// - Family: column family to check -// - Qualifier: column qualifier to check -// - Value: the expected value, if not provided the -// check is for the non-existence of the -// column in question -// - Tput: the TPut to put if the check succeeds -type THBaseServiceCheckAndPutArgs struct { - Table []byte `thrift:"table,1,required" json:"table"` - Row []byte `thrift:"row,2,required" json:"row"` - Family []byte `thrift:"family,3,required" json:"family"` - Qualifier []byte `thrift:"qualifier,4,required" json:"qualifier"` - Value []byte `thrift:"value,5" json:"value"` - Tput *TPut `thrift:"tput,6,required" json:"tput"` -} - -func NewTHBaseServiceCheckAndPutArgs() *THBaseServiceCheckAndPutArgs { - return &THBaseServiceCheckAndPutArgs{} -} - -func (p *THBaseServiceCheckAndPutArgs) GetTable() []byte { - return p.Table -} - -func (p *THBaseServiceCheckAndPutArgs) GetRow() []byte { - return p.Row -} - -func (p *THBaseServiceCheckAndPutArgs) GetFamily() []byte { - return p.Family -} - -func (p *THBaseServiceCheckAndPutArgs) GetQualifier() []byte { - return p.Qualifier -} - -func (p *THBaseServiceCheckAndPutArgs) GetValue() []byte { - return p.Value -} - -var THBaseServiceCheckAndPutArgs_Tput_DEFAULT *TPut - -func (p *THBaseServiceCheckAndPutArgs) GetTput() *TPut { - if !p.IsSetTput() { - return THBaseServiceCheckAndPutArgs_Tput_DEFAULT - } - return p.Tput -} -func (p *THBaseServiceCheckAndPutArgs) IsSetTput() bool { - return p.Tput != nil -} - -func (p *THBaseServiceCheckAndPutArgs) Read(iprot thrift.TProtocol) error { - if _, err := iprot.ReadStructBegin(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) - } - - var issetTable bool = false - var issetRow bool = false - var issetFamily bool = false - var issetQualifier bool = false - var issetTput bool = false - - for { - _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin() - if err != nil { - return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) - } - if fieldTypeId == thrift.STOP { - break - } - switch fieldId { - case 1: - if err := p.readField1(iprot); err != nil { - return err - } - issetTable = true - case 2: - if err := p.readField2(iprot); err != nil { - return err - } - issetRow = true - case 3: - if err := p.readField3(iprot); err != nil { - return err - } - issetFamily = true - case 4: - if err := p.readField4(iprot); err != nil { - return err - } - issetQualifier = true - case 5: - if err := p.readField5(iprot); err != nil { - return err - } - case 6: - if err := p.readField6(iprot); err != nil { - return err - } - issetTput = true - default: - if err := iprot.Skip(fieldTypeId); err != nil { - return err - } - } - if err := iprot.ReadFieldEnd(); err != nil { - return err - } - } - if err := iprot.ReadStructEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) - } - if !issetTable { - return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field Table is not set")) - } - if !issetRow { - return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field Row is not set")) - } - if !issetFamily { - return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field Family is not set")) - } - if !issetQualifier { - return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field Qualifier is not set")) - } - if !issetTput { - return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field Tput is not set")) - } - return nil -} - -func (p *THBaseServiceCheckAndPutArgs) readField1(iprot thrift.TProtocol) error { - if v, err := iprot.ReadBinary(); err != nil { - return thrift.PrependError("error reading field 1: ", err) - } else { - p.Table = v - } - return nil -} - -func (p *THBaseServiceCheckAndPutArgs) readField2(iprot thrift.TProtocol) error { - if v, err := iprot.ReadBinary(); err != nil { - return thrift.PrependError("error reading field 2: ", err) - } else { - p.Row = v - } - return nil -} - -func (p *THBaseServiceCheckAndPutArgs) readField3(iprot thrift.TProtocol) error { - if v, err := iprot.ReadBinary(); err != nil { - return thrift.PrependError("error reading field 3: ", err) - } else { - p.Family = v - } - return nil -} - -func (p *THBaseServiceCheckAndPutArgs) readField4(iprot thrift.TProtocol) error { - if v, err := iprot.ReadBinary(); err != nil { - return thrift.PrependError("error reading field 4: ", err) - } else { - p.Qualifier = v - } - return nil -} - -func (p *THBaseServiceCheckAndPutArgs) readField5(iprot thrift.TProtocol) error { - if v, err := iprot.ReadBinary(); err != nil { - return thrift.PrependError("error reading field 5: ", err) - } else { - p.Value = v - } - return nil -} - -func (p *THBaseServiceCheckAndPutArgs) readField6(iprot thrift.TProtocol) error { - p.Tput = &TPut{} - if err := p.Tput.Read(iprot); err != nil { - return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.Tput), err) - } - return nil -} - -func (p *THBaseServiceCheckAndPutArgs) Write(oprot thrift.TProtocol) error { - if err := oprot.WriteStructBegin("checkAndPut_args"); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) - } - if err := p.writeField1(oprot); err != nil { - return err - } - if err := p.writeField2(oprot); err != nil { - return err - } - if err := p.writeField3(oprot); err != nil { - return err - } - if err := p.writeField4(oprot); err != nil { - return err - } - if err := p.writeField5(oprot); err != nil { - return err - } - if err := p.writeField6(oprot); err != nil { - return err - } - if err := oprot.WriteFieldStop(); err != nil { - return thrift.PrependError("write field stop error: ", err) - } - if err := oprot.WriteStructEnd(); err != nil { - return thrift.PrependError("write struct stop error: ", err) - } - return nil -} - -func (p *THBaseServiceCheckAndPutArgs) writeField1(oprot thrift.TProtocol) (err error) { - if err := oprot.WriteFieldBegin("table", thrift.STRING, 1); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:table: ", p), err) - } - if err := oprot.WriteBinary(p.Table); err != nil { - return thrift.PrependError(fmt.Sprintf("%T.table (1) field write error: ", p), err) - } - if err := oprot.WriteFieldEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 1:table: ", p), err) - } - return err -} - -func (p *THBaseServiceCheckAndPutArgs) writeField2(oprot thrift.TProtocol) (err error) { - if err := oprot.WriteFieldBegin("row", thrift.STRING, 2); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 2:row: ", p), err) - } - if err := oprot.WriteBinary(p.Row); err != nil { - return thrift.PrependError(fmt.Sprintf("%T.row (2) field write error: ", p), err) - } - if err := oprot.WriteFieldEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 2:row: ", p), err) - } - return err -} - -func (p *THBaseServiceCheckAndPutArgs) writeField3(oprot thrift.TProtocol) (err error) { - if err := oprot.WriteFieldBegin("family", thrift.STRING, 3); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 3:family: ", p), err) - } - if err := oprot.WriteBinary(p.Family); err != nil { - return thrift.PrependError(fmt.Sprintf("%T.family (3) field write error: ", p), err) - } - if err := oprot.WriteFieldEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 3:family: ", p), err) - } - return err -} - -func (p *THBaseServiceCheckAndPutArgs) writeField4(oprot thrift.TProtocol) (err error) { - if err := oprot.WriteFieldBegin("qualifier", thrift.STRING, 4); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 4:qualifier: ", p), err) - } - if err := oprot.WriteBinary(p.Qualifier); err != nil { - return thrift.PrependError(fmt.Sprintf("%T.qualifier (4) field write error: ", p), err) - } - if err := oprot.WriteFieldEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 4:qualifier: ", p), err) - } - return err -} - -func (p *THBaseServiceCheckAndPutArgs) writeField5(oprot thrift.TProtocol) (err error) { - if err := oprot.WriteFieldBegin("value", thrift.STRING, 5); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 5:value: ", p), err) - } - if err := oprot.WriteBinary(p.Value); err != nil { - return thrift.PrependError(fmt.Sprintf("%T.value (5) field write error: ", p), err) - } - if err := oprot.WriteFieldEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 5:value: ", p), err) - } - return err -} - -func (p *THBaseServiceCheckAndPutArgs) writeField6(oprot thrift.TProtocol) (err error) { - if err := oprot.WriteFieldBegin("tput", thrift.STRUCT, 6); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 6:tput: ", p), err) - } - if err := p.Tput.Write(oprot); err != nil { - return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.Tput), err) - } - if err := oprot.WriteFieldEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 6:tput: ", p), err) - } - return err -} - -func (p *THBaseServiceCheckAndPutArgs) String() string { - if p == nil { - return "" - } - return fmt.Sprintf("THBaseServiceCheckAndPutArgs(%+v)", *p) -} - -// Attributes: -// - Success -// - Io -type THBaseServiceCheckAndPutResult struct { - Success *bool `thrift:"success,0" json:"success,omitempty"` - Io *TIOError `thrift:"io,1" json:"io,omitempty"` -} - -func NewTHBaseServiceCheckAndPutResult() *THBaseServiceCheckAndPutResult { - return &THBaseServiceCheckAndPutResult{} -} - -var THBaseServiceCheckAndPutResult_Success_DEFAULT bool - -func (p *THBaseServiceCheckAndPutResult) GetSuccess() bool { - if !p.IsSetSuccess() { - return THBaseServiceCheckAndPutResult_Success_DEFAULT - } - return *p.Success -} - -var THBaseServiceCheckAndPutResult_Io_DEFAULT *TIOError - -func (p *THBaseServiceCheckAndPutResult) GetIo() *TIOError { - if !p.IsSetIo() { - return THBaseServiceCheckAndPutResult_Io_DEFAULT - } - return p.Io -} -func (p *THBaseServiceCheckAndPutResult) IsSetSuccess() bool { - return p.Success != nil -} - -func (p *THBaseServiceCheckAndPutResult) IsSetIo() bool { - return p.Io != nil -} - -func (p *THBaseServiceCheckAndPutResult) Read(iprot thrift.TProtocol) error { - if _, err := iprot.ReadStructBegin(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) - } - - for { - _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin() - if err != nil { - return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) - } - if fieldTypeId == thrift.STOP { - break - } - switch fieldId { - case 0: - if err := p.readField0(iprot); err != nil { - return err - } - case 1: - if err := p.readField1(iprot); err != nil { - return err - } - default: - if err := iprot.Skip(fieldTypeId); err != nil { - return err - } - } - if err := iprot.ReadFieldEnd(); err != nil { - return err - } - } - if err := iprot.ReadStructEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) - } - return nil -} - -func (p *THBaseServiceCheckAndPutResult) readField0(iprot thrift.TProtocol) error { - if v, err := iprot.ReadBool(); err != nil { - return thrift.PrependError("error reading field 0: ", err) - } else { - p.Success = &v - } - return nil -} - -func (p *THBaseServiceCheckAndPutResult) readField1(iprot thrift.TProtocol) error { - p.Io = &TIOError{} - if err := p.Io.Read(iprot); err != nil { - return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.Io), err) - } - return nil -} - -func (p *THBaseServiceCheckAndPutResult) Write(oprot thrift.TProtocol) error { - if err := oprot.WriteStructBegin("checkAndPut_result"); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) - } - if err := p.writeField0(oprot); err != nil { - return err - } - if err := p.writeField1(oprot); err != nil { - return err - } - if err := oprot.WriteFieldStop(); err != nil { - return thrift.PrependError("write field stop error: ", err) - } - if err := oprot.WriteStructEnd(); err != nil { - return thrift.PrependError("write struct stop error: ", err) - } - return nil -} - -func (p *THBaseServiceCheckAndPutResult) writeField0(oprot thrift.TProtocol) (err error) { - if p.IsSetSuccess() { - if err := oprot.WriteFieldBegin("success", thrift.BOOL, 0); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 0:success: ", p), err) - } - if err := oprot.WriteBool(bool(*p.Success)); err != nil { - return thrift.PrependError(fmt.Sprintf("%T.success (0) field write error: ", p), err) - } - if err := oprot.WriteFieldEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 0:success: ", p), err) - } - } - return err -} - -func (p *THBaseServiceCheckAndPutResult) writeField1(oprot thrift.TProtocol) (err error) { - if p.IsSetIo() { - if err := oprot.WriteFieldBegin("io", thrift.STRUCT, 1); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:io: ", p), err) - } - if err := p.Io.Write(oprot); err != nil { - return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.Io), err) - } - if err := oprot.WriteFieldEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 1:io: ", p), err) - } - } - return err -} - -func (p *THBaseServiceCheckAndPutResult) String() string { - if p == nil { - return "" - } - return fmt.Sprintf("THBaseServiceCheckAndPutResult(%+v)", *p) -} - -// Attributes: -// - Table: the table to put data in -// - Tputs: a list of TPuts to commit -type THBaseServicePutMultipleArgs struct { - Table []byte `thrift:"table,1,required" json:"table"` - Tputs []*TPut `thrift:"tputs,2,required" json:"tputs"` -} - -func NewTHBaseServicePutMultipleArgs() *THBaseServicePutMultipleArgs { - return &THBaseServicePutMultipleArgs{} -} - -func (p *THBaseServicePutMultipleArgs) GetTable() []byte { - return p.Table -} - -func (p *THBaseServicePutMultipleArgs) GetTputs() []*TPut { - return p.Tputs -} -func (p *THBaseServicePutMultipleArgs) Read(iprot thrift.TProtocol) error { - if _, err := iprot.ReadStructBegin(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) - } - - var issetTable bool = false - var issetTputs bool = false - - for { - _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin() - if err != nil { - return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) - } - if fieldTypeId == thrift.STOP { - break - } - switch fieldId { - case 1: - if err := p.readField1(iprot); err != nil { - return err - } - issetTable = true - case 2: - if err := p.readField2(iprot); err != nil { - return err - } - issetTputs = true - default: - if err := iprot.Skip(fieldTypeId); err != nil { - return err - } - } - if err := iprot.ReadFieldEnd(); err != nil { - return err - } - } - if err := iprot.ReadStructEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) - } - if !issetTable { - return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field Table is not set")) - } - if !issetTputs { - return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field Tputs is not set")) - } - return nil -} - -func (p *THBaseServicePutMultipleArgs) readField1(iprot thrift.TProtocol) error { - if v, err := iprot.ReadBinary(); err != nil { - return thrift.PrependError("error reading field 1: ", err) - } else { - p.Table = v - } - return nil -} - -func (p *THBaseServicePutMultipleArgs) readField2(iprot thrift.TProtocol) error { - _, size, err := iprot.ReadListBegin() - if err != nil { - return thrift.PrependError("error reading list begin: ", err) - } - tSlice := make([]*TPut, 0, size) - p.Tputs = tSlice - for i := 0; i < size; i++ { - _elem57 := &TPut{} - if err := _elem57.Read(iprot); err != nil { - return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", _elem57), err) - } - p.Tputs = append(p.Tputs, _elem57) - } - if err := iprot.ReadListEnd(); err != nil { - return thrift.PrependError("error reading list end: ", err) - } - return nil -} - -func (p *THBaseServicePutMultipleArgs) Write(oprot thrift.TProtocol) error { - if err := oprot.WriteStructBegin("putMultiple_args"); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) - } - if err := p.writeField1(oprot); err != nil { - return err - } - if err := p.writeField2(oprot); err != nil { - return err - } - if err := oprot.WriteFieldStop(); err != nil { - return thrift.PrependError("write field stop error: ", err) - } - if err := oprot.WriteStructEnd(); err != nil { - return thrift.PrependError("write struct stop error: ", err) - } - return nil -} - -func (p *THBaseServicePutMultipleArgs) writeField1(oprot thrift.TProtocol) (err error) { - if err := oprot.WriteFieldBegin("table", thrift.STRING, 1); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:table: ", p), err) - } - if err := oprot.WriteBinary(p.Table); err != nil { - return thrift.PrependError(fmt.Sprintf("%T.table (1) field write error: ", p), err) - } - if err := oprot.WriteFieldEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 1:table: ", p), err) - } - return err -} - -func (p *THBaseServicePutMultipleArgs) writeField2(oprot thrift.TProtocol) (err error) { - if err := oprot.WriteFieldBegin("tputs", thrift.LIST, 2); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 2:tputs: ", p), err) - } - if err := oprot.WriteListBegin(thrift.STRUCT, len(p.Tputs)); err != nil { - return thrift.PrependError("error writing list begin: ", err) - } - for _, v := range p.Tputs { - if err := v.Write(oprot); err != nil { - return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", v), err) - } - } - if err := oprot.WriteListEnd(); err != nil { - return thrift.PrependError("error writing list end: ", err) - } - if err := oprot.WriteFieldEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 2:tputs: ", p), err) - } - return err -} - -func (p *THBaseServicePutMultipleArgs) String() string { - if p == nil { - return "" - } - return fmt.Sprintf("THBaseServicePutMultipleArgs(%+v)", *p) -} - -// Attributes: -// - Io -type THBaseServicePutMultipleResult struct { - Io *TIOError `thrift:"io,1" json:"io,omitempty"` -} - -func NewTHBaseServicePutMultipleResult() *THBaseServicePutMultipleResult { - return &THBaseServicePutMultipleResult{} -} - -var THBaseServicePutMultipleResult_Io_DEFAULT *TIOError - -func (p *THBaseServicePutMultipleResult) GetIo() *TIOError { - if !p.IsSetIo() { - return THBaseServicePutMultipleResult_Io_DEFAULT - } - return p.Io -} -func (p *THBaseServicePutMultipleResult) IsSetIo() bool { - return p.Io != nil -} - -func (p *THBaseServicePutMultipleResult) Read(iprot thrift.TProtocol) error { - if _, err := iprot.ReadStructBegin(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) - } - - for { - _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin() - if err != nil { - return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) - } - if fieldTypeId == thrift.STOP { - break - } - switch fieldId { - case 1: - if err := p.readField1(iprot); err != nil { - return err - } - default: - if err := iprot.Skip(fieldTypeId); err != nil { - return err - } - } - if err := iprot.ReadFieldEnd(); err != nil { - return err - } - } - if err := iprot.ReadStructEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) - } - return nil -} - -func (p *THBaseServicePutMultipleResult) readField1(iprot thrift.TProtocol) error { - p.Io = &TIOError{} - if err := p.Io.Read(iprot); err != nil { - return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.Io), err) - } - return nil -} - -func (p *THBaseServicePutMultipleResult) Write(oprot thrift.TProtocol) error { - if err := oprot.WriteStructBegin("putMultiple_result"); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) - } - if err := p.writeField1(oprot); err != nil { - return err - } - if err := oprot.WriteFieldStop(); err != nil { - return thrift.PrependError("write field stop error: ", err) - } - if err := oprot.WriteStructEnd(); err != nil { - return thrift.PrependError("write struct stop error: ", err) - } - return nil -} - -func (p *THBaseServicePutMultipleResult) writeField1(oprot thrift.TProtocol) (err error) { - if p.IsSetIo() { - if err := oprot.WriteFieldBegin("io", thrift.STRUCT, 1); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:io: ", p), err) - } - if err := p.Io.Write(oprot); err != nil { - return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.Io), err) - } - if err := oprot.WriteFieldEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 1:io: ", p), err) - } - } - return err -} - -func (p *THBaseServicePutMultipleResult) String() string { - if p == nil { - return "" - } - return fmt.Sprintf("THBaseServicePutMultipleResult(%+v)", *p) -} - -// Attributes: -// - Table: the table to delete from -// - Tdelete: the TDelete to delete -type THBaseServiceDeleteSingleArgs struct { - Table []byte `thrift:"table,1,required" json:"table"` - Tdelete *TDelete `thrift:"tdelete,2,required" json:"tdelete"` -} - -func NewTHBaseServiceDeleteSingleArgs() *THBaseServiceDeleteSingleArgs { - return &THBaseServiceDeleteSingleArgs{} -} - -func (p *THBaseServiceDeleteSingleArgs) GetTable() []byte { - return p.Table -} - -var THBaseServiceDeleteSingleArgs_Tdelete_DEFAULT *TDelete - -func (p *THBaseServiceDeleteSingleArgs) GetTdelete() *TDelete { - if !p.IsSetTdelete() { - return THBaseServiceDeleteSingleArgs_Tdelete_DEFAULT - } - return p.Tdelete -} -func (p *THBaseServiceDeleteSingleArgs) IsSetTdelete() bool { - return p.Tdelete != nil -} - -func (p *THBaseServiceDeleteSingleArgs) Read(iprot thrift.TProtocol) error { - if _, err := iprot.ReadStructBegin(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) - } - - var issetTable bool = false - var issetTdelete bool = false - - for { - _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin() - if err != nil { - return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) - } - if fieldTypeId == thrift.STOP { - break - } - switch fieldId { - case 1: - if err := p.readField1(iprot); err != nil { - return err - } - issetTable = true - case 2: - if err := p.readField2(iprot); err != nil { - return err - } - issetTdelete = true - default: - if err := iprot.Skip(fieldTypeId); err != nil { - return err - } - } - if err := iprot.ReadFieldEnd(); err != nil { - return err - } - } - if err := iprot.ReadStructEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) - } - if !issetTable { - return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field Table is not set")) - } - if !issetTdelete { - return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field Tdelete is not set")) - } - return nil -} - -func (p *THBaseServiceDeleteSingleArgs) readField1(iprot thrift.TProtocol) error { - if v, err := iprot.ReadBinary(); err != nil { - return thrift.PrependError("error reading field 1: ", err) - } else { - p.Table = v - } - return nil -} - -func (p *THBaseServiceDeleteSingleArgs) readField2(iprot thrift.TProtocol) error { - p.Tdelete = &TDelete{ - DeleteType: 1, - } - if err := p.Tdelete.Read(iprot); err != nil { - return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.Tdelete), err) - } - return nil -} - -func (p *THBaseServiceDeleteSingleArgs) Write(oprot thrift.TProtocol) error { - if err := oprot.WriteStructBegin("deleteSingle_args"); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) - } - if err := p.writeField1(oprot); err != nil { - return err - } - if err := p.writeField2(oprot); err != nil { - return err - } - if err := oprot.WriteFieldStop(); err != nil { - return thrift.PrependError("write field stop error: ", err) - } - if err := oprot.WriteStructEnd(); err != nil { - return thrift.PrependError("write struct stop error: ", err) - } - return nil -} - -func (p *THBaseServiceDeleteSingleArgs) writeField1(oprot thrift.TProtocol) (err error) { - if err := oprot.WriteFieldBegin("table", thrift.STRING, 1); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:table: ", p), err) - } - if err := oprot.WriteBinary(p.Table); err != nil { - return thrift.PrependError(fmt.Sprintf("%T.table (1) field write error: ", p), err) - } - if err := oprot.WriteFieldEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 1:table: ", p), err) - } - return err -} - -func (p *THBaseServiceDeleteSingleArgs) writeField2(oprot thrift.TProtocol) (err error) { - if err := oprot.WriteFieldBegin("tdelete", thrift.STRUCT, 2); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 2:tdelete: ", p), err) - } - if err := p.Tdelete.Write(oprot); err != nil { - return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.Tdelete), err) - } - if err := oprot.WriteFieldEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 2:tdelete: ", p), err) - } - return err -} - -func (p *THBaseServiceDeleteSingleArgs) String() string { - if p == nil { - return "" - } - return fmt.Sprintf("THBaseServiceDeleteSingleArgs(%+v)", *p) -} - -// Attributes: -// - Io -type THBaseServiceDeleteSingleResult struct { - Io *TIOError `thrift:"io,1" json:"io,omitempty"` -} - -func NewTHBaseServiceDeleteSingleResult() *THBaseServiceDeleteSingleResult { - return &THBaseServiceDeleteSingleResult{} -} - -var THBaseServiceDeleteSingleResult_Io_DEFAULT *TIOError - -func (p *THBaseServiceDeleteSingleResult) GetIo() *TIOError { - if !p.IsSetIo() { - return THBaseServiceDeleteSingleResult_Io_DEFAULT - } - return p.Io -} -func (p *THBaseServiceDeleteSingleResult) IsSetIo() bool { - return p.Io != nil -} - -func (p *THBaseServiceDeleteSingleResult) Read(iprot thrift.TProtocol) error { - if _, err := iprot.ReadStructBegin(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) - } - - for { - _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin() - if err != nil { - return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) - } - if fieldTypeId == thrift.STOP { - break - } - switch fieldId { - case 1: - if err := p.readField1(iprot); err != nil { - return err - } - default: - if err := iprot.Skip(fieldTypeId); err != nil { - return err - } - } - if err := iprot.ReadFieldEnd(); err != nil { - return err - } - } - if err := iprot.ReadStructEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) - } - return nil -} - -func (p *THBaseServiceDeleteSingleResult) readField1(iprot thrift.TProtocol) error { - p.Io = &TIOError{} - if err := p.Io.Read(iprot); err != nil { - return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.Io), err) - } - return nil -} - -func (p *THBaseServiceDeleteSingleResult) Write(oprot thrift.TProtocol) error { - if err := oprot.WriteStructBegin("deleteSingle_result"); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) - } - if err := p.writeField1(oprot); err != nil { - return err - } - if err := oprot.WriteFieldStop(); err != nil { - return thrift.PrependError("write field stop error: ", err) - } - if err := oprot.WriteStructEnd(); err != nil { - return thrift.PrependError("write struct stop error: ", err) - } - return nil -} - -func (p *THBaseServiceDeleteSingleResult) writeField1(oprot thrift.TProtocol) (err error) { - if p.IsSetIo() { - if err := oprot.WriteFieldBegin("io", thrift.STRUCT, 1); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:io: ", p), err) - } - if err := p.Io.Write(oprot); err != nil { - return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.Io), err) - } - if err := oprot.WriteFieldEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 1:io: ", p), err) - } - } - return err -} - -func (p *THBaseServiceDeleteSingleResult) String() string { - if p == nil { - return "" - } - return fmt.Sprintf("THBaseServiceDeleteSingleResult(%+v)", *p) -} - -// Attributes: -// - Table: the table to delete from -// - Tdeletes: list of TDeletes to delete -type THBaseServiceDeleteMultipleArgs struct { - Table []byte `thrift:"table,1,required" json:"table"` - Tdeletes []*TDelete `thrift:"tdeletes,2,required" json:"tdeletes"` -} - -func NewTHBaseServiceDeleteMultipleArgs() *THBaseServiceDeleteMultipleArgs { - return &THBaseServiceDeleteMultipleArgs{} -} - -func (p *THBaseServiceDeleteMultipleArgs) GetTable() []byte { - return p.Table -} - -func (p *THBaseServiceDeleteMultipleArgs) GetTdeletes() []*TDelete { - return p.Tdeletes -} -func (p *THBaseServiceDeleteMultipleArgs) Read(iprot thrift.TProtocol) error { - if _, err := iprot.ReadStructBegin(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) - } - - var issetTable bool = false - var issetTdeletes bool = false - - for { - _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin() - if err != nil { - return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) - } - if fieldTypeId == thrift.STOP { - break - } - switch fieldId { - case 1: - if err := p.readField1(iprot); err != nil { - return err - } - issetTable = true - case 2: - if err := p.readField2(iprot); err != nil { - return err - } - issetTdeletes = true - default: - if err := iprot.Skip(fieldTypeId); err != nil { - return err - } - } - if err := iprot.ReadFieldEnd(); err != nil { - return err - } - } - if err := iprot.ReadStructEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) - } - if !issetTable { - return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field Table is not set")) - } - if !issetTdeletes { - return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field Tdeletes is not set")) - } - return nil -} - -func (p *THBaseServiceDeleteMultipleArgs) readField1(iprot thrift.TProtocol) error { - if v, err := iprot.ReadBinary(); err != nil { - return thrift.PrependError("error reading field 1: ", err) - } else { - p.Table = v - } - return nil -} - -func (p *THBaseServiceDeleteMultipleArgs) readField2(iprot thrift.TProtocol) error { - _, size, err := iprot.ReadListBegin() - if err != nil { - return thrift.PrependError("error reading list begin: ", err) - } - tSlice := make([]*TDelete, 0, size) - p.Tdeletes = tSlice - for i := 0; i < size; i++ { - _elem58 := &TDelete{ - DeleteType: 1, - } - if err := _elem58.Read(iprot); err != nil { - return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", _elem58), err) - } - p.Tdeletes = append(p.Tdeletes, _elem58) - } - if err := iprot.ReadListEnd(); err != nil { - return thrift.PrependError("error reading list end: ", err) - } - return nil -} - -func (p *THBaseServiceDeleteMultipleArgs) Write(oprot thrift.TProtocol) error { - if err := oprot.WriteStructBegin("deleteMultiple_args"); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) - } - if err := p.writeField1(oprot); err != nil { - return err - } - if err := p.writeField2(oprot); err != nil { - return err - } - if err := oprot.WriteFieldStop(); err != nil { - return thrift.PrependError("write field stop error: ", err) - } - if err := oprot.WriteStructEnd(); err != nil { - return thrift.PrependError("write struct stop error: ", err) - } - return nil -} - -func (p *THBaseServiceDeleteMultipleArgs) writeField1(oprot thrift.TProtocol) (err error) { - if err := oprot.WriteFieldBegin("table", thrift.STRING, 1); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:table: ", p), err) - } - if err := oprot.WriteBinary(p.Table); err != nil { - return thrift.PrependError(fmt.Sprintf("%T.table (1) field write error: ", p), err) - } - if err := oprot.WriteFieldEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 1:table: ", p), err) - } - return err -} - -func (p *THBaseServiceDeleteMultipleArgs) writeField2(oprot thrift.TProtocol) (err error) { - if err := oprot.WriteFieldBegin("tdeletes", thrift.LIST, 2); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 2:tdeletes: ", p), err) - } - if err := oprot.WriteListBegin(thrift.STRUCT, len(p.Tdeletes)); err != nil { - return thrift.PrependError("error writing list begin: ", err) - } - for _, v := range p.Tdeletes { - if err := v.Write(oprot); err != nil { - return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", v), err) - } - } - if err := oprot.WriteListEnd(); err != nil { - return thrift.PrependError("error writing list end: ", err) - } - if err := oprot.WriteFieldEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 2:tdeletes: ", p), err) - } - return err -} - -func (p *THBaseServiceDeleteMultipleArgs) String() string { - if p == nil { - return "" - } - return fmt.Sprintf("THBaseServiceDeleteMultipleArgs(%+v)", *p) -} - -// Attributes: -// - Success -// - Io -type THBaseServiceDeleteMultipleResult struct { - Success []*TDelete `thrift:"success,0" json:"success,omitempty"` - Io *TIOError `thrift:"io,1" json:"io,omitempty"` -} - -func NewTHBaseServiceDeleteMultipleResult() *THBaseServiceDeleteMultipleResult { - return &THBaseServiceDeleteMultipleResult{} -} - -var THBaseServiceDeleteMultipleResult_Success_DEFAULT []*TDelete - -func (p *THBaseServiceDeleteMultipleResult) GetSuccess() []*TDelete { - return p.Success -} - -var THBaseServiceDeleteMultipleResult_Io_DEFAULT *TIOError - -func (p *THBaseServiceDeleteMultipleResult) GetIo() *TIOError { - if !p.IsSetIo() { - return THBaseServiceDeleteMultipleResult_Io_DEFAULT - } - return p.Io -} -func (p *THBaseServiceDeleteMultipleResult) IsSetSuccess() bool { - return p.Success != nil -} - -func (p *THBaseServiceDeleteMultipleResult) IsSetIo() bool { - return p.Io != nil -} - -func (p *THBaseServiceDeleteMultipleResult) Read(iprot thrift.TProtocol) error { - if _, err := iprot.ReadStructBegin(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) - } - - for { - _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin() - if err != nil { - return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) - } - if fieldTypeId == thrift.STOP { - break - } - switch fieldId { - case 0: - if err := p.readField0(iprot); err != nil { - return err - } - case 1: - if err := p.readField1(iprot); err != nil { - return err - } - default: - if err := iprot.Skip(fieldTypeId); err != nil { - return err - } - } - if err := iprot.ReadFieldEnd(); err != nil { - return err - } - } - if err := iprot.ReadStructEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) - } - return nil -} - -func (p *THBaseServiceDeleteMultipleResult) readField0(iprot thrift.TProtocol) error { - _, size, err := iprot.ReadListBegin() - if err != nil { - return thrift.PrependError("error reading list begin: ", err) - } - tSlice := make([]*TDelete, 0, size) - p.Success = tSlice - for i := 0; i < size; i++ { - _elem59 := &TDelete{ - DeleteType: 1, - } - if err := _elem59.Read(iprot); err != nil { - return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", _elem59), err) - } - p.Success = append(p.Success, _elem59) - } - if err := iprot.ReadListEnd(); err != nil { - return thrift.PrependError("error reading list end: ", err) - } - return nil -} - -func (p *THBaseServiceDeleteMultipleResult) readField1(iprot thrift.TProtocol) error { - p.Io = &TIOError{} - if err := p.Io.Read(iprot); err != nil { - return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.Io), err) - } - return nil -} - -func (p *THBaseServiceDeleteMultipleResult) Write(oprot thrift.TProtocol) error { - if err := oprot.WriteStructBegin("deleteMultiple_result"); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) - } - if err := p.writeField0(oprot); err != nil { - return err - } - if err := p.writeField1(oprot); err != nil { - return err - } - if err := oprot.WriteFieldStop(); err != nil { - return thrift.PrependError("write field stop error: ", err) - } - if err := oprot.WriteStructEnd(); err != nil { - return thrift.PrependError("write struct stop error: ", err) - } - return nil -} - -func (p *THBaseServiceDeleteMultipleResult) writeField0(oprot thrift.TProtocol) (err error) { - if p.IsSetSuccess() { - if err := oprot.WriteFieldBegin("success", thrift.LIST, 0); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 0:success: ", p), err) - } - if err := oprot.WriteListBegin(thrift.STRUCT, len(p.Success)); err != nil { - return thrift.PrependError("error writing list begin: ", err) - } - for _, v := range p.Success { - if err := v.Write(oprot); err != nil { - return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", v), err) - } - } - if err := oprot.WriteListEnd(); err != nil { - return thrift.PrependError("error writing list end: ", err) - } - if err := oprot.WriteFieldEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 0:success: ", p), err) - } - } - return err -} - -func (p *THBaseServiceDeleteMultipleResult) writeField1(oprot thrift.TProtocol) (err error) { - if p.IsSetIo() { - if err := oprot.WriteFieldBegin("io", thrift.STRUCT, 1); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:io: ", p), err) - } - if err := p.Io.Write(oprot); err != nil { - return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.Io), err) - } - if err := oprot.WriteFieldEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 1:io: ", p), err) - } - } - return err -} - -func (p *THBaseServiceDeleteMultipleResult) String() string { - if p == nil { - return "" - } - return fmt.Sprintf("THBaseServiceDeleteMultipleResult(%+v)", *p) -} - -// Attributes: -// - Table: to check in and delete from -// - Row: row to check -// - Family: column family to check -// - Qualifier: column qualifier to check -// - Value: the expected value, if not provided the -// check is for the non-existence of the -// column in question -// - Tdelete: the TDelete to execute if the check succeeds -type THBaseServiceCheckAndDeleteArgs struct { - Table []byte `thrift:"table,1,required" json:"table"` - Row []byte `thrift:"row,2,required" json:"row"` - Family []byte `thrift:"family,3,required" json:"family"` - Qualifier []byte `thrift:"qualifier,4,required" json:"qualifier"` - Value []byte `thrift:"value,5" json:"value"` - Tdelete *TDelete `thrift:"tdelete,6,required" json:"tdelete"` -} - -func NewTHBaseServiceCheckAndDeleteArgs() *THBaseServiceCheckAndDeleteArgs { - return &THBaseServiceCheckAndDeleteArgs{} -} - -func (p *THBaseServiceCheckAndDeleteArgs) GetTable() []byte { - return p.Table -} - -func (p *THBaseServiceCheckAndDeleteArgs) GetRow() []byte { - return p.Row -} - -func (p *THBaseServiceCheckAndDeleteArgs) GetFamily() []byte { - return p.Family -} - -func (p *THBaseServiceCheckAndDeleteArgs) GetQualifier() []byte { - return p.Qualifier -} - -func (p *THBaseServiceCheckAndDeleteArgs) GetValue() []byte { - return p.Value -} - -var THBaseServiceCheckAndDeleteArgs_Tdelete_DEFAULT *TDelete - -func (p *THBaseServiceCheckAndDeleteArgs) GetTdelete() *TDelete { - if !p.IsSetTdelete() { - return THBaseServiceCheckAndDeleteArgs_Tdelete_DEFAULT - } - return p.Tdelete -} -func (p *THBaseServiceCheckAndDeleteArgs) IsSetTdelete() bool { - return p.Tdelete != nil -} - -func (p *THBaseServiceCheckAndDeleteArgs) Read(iprot thrift.TProtocol) error { - if _, err := iprot.ReadStructBegin(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) - } - - var issetTable bool = false - var issetRow bool = false - var issetFamily bool = false - var issetQualifier bool = false - var issetTdelete bool = false - - for { - _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin() - if err != nil { - return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) - } - if fieldTypeId == thrift.STOP { - break - } - switch fieldId { - case 1: - if err := p.readField1(iprot); err != nil { - return err - } - issetTable = true - case 2: - if err := p.readField2(iprot); err != nil { - return err - } - issetRow = true - case 3: - if err := p.readField3(iprot); err != nil { - return err - } - issetFamily = true - case 4: - if err := p.readField4(iprot); err != nil { - return err - } - issetQualifier = true - case 5: - if err := p.readField5(iprot); err != nil { - return err - } - case 6: - if err := p.readField6(iprot); err != nil { - return err - } - issetTdelete = true - default: - if err := iprot.Skip(fieldTypeId); err != nil { - return err - } - } - if err := iprot.ReadFieldEnd(); err != nil { - return err - } - } - if err := iprot.ReadStructEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) - } - if !issetTable { - return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field Table is not set")) - } - if !issetRow { - return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field Row is not set")) - } - if !issetFamily { - return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field Family is not set")) - } - if !issetQualifier { - return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field Qualifier is not set")) - } - if !issetTdelete { - return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field Tdelete is not set")) - } - return nil -} - -func (p *THBaseServiceCheckAndDeleteArgs) readField1(iprot thrift.TProtocol) error { - if v, err := iprot.ReadBinary(); err != nil { - return thrift.PrependError("error reading field 1: ", err) - } else { - p.Table = v - } - return nil -} - -func (p *THBaseServiceCheckAndDeleteArgs) readField2(iprot thrift.TProtocol) error { - if v, err := iprot.ReadBinary(); err != nil { - return thrift.PrependError("error reading field 2: ", err) - } else { - p.Row = v - } - return nil -} - -func (p *THBaseServiceCheckAndDeleteArgs) readField3(iprot thrift.TProtocol) error { - if v, err := iprot.ReadBinary(); err != nil { - return thrift.PrependError("error reading field 3: ", err) - } else { - p.Family = v - } - return nil -} - -func (p *THBaseServiceCheckAndDeleteArgs) readField4(iprot thrift.TProtocol) error { - if v, err := iprot.ReadBinary(); err != nil { - return thrift.PrependError("error reading field 4: ", err) - } else { - p.Qualifier = v - } - return nil -} - -func (p *THBaseServiceCheckAndDeleteArgs) readField5(iprot thrift.TProtocol) error { - if v, err := iprot.ReadBinary(); err != nil { - return thrift.PrependError("error reading field 5: ", err) - } else { - p.Value = v - } - return nil -} - -func (p *THBaseServiceCheckAndDeleteArgs) readField6(iprot thrift.TProtocol) error { - p.Tdelete = &TDelete{ - DeleteType: 1, - } - if err := p.Tdelete.Read(iprot); err != nil { - return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.Tdelete), err) - } - return nil -} - -func (p *THBaseServiceCheckAndDeleteArgs) Write(oprot thrift.TProtocol) error { - if err := oprot.WriteStructBegin("checkAndDelete_args"); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) - } - if err := p.writeField1(oprot); err != nil { - return err - } - if err := p.writeField2(oprot); err != nil { - return err - } - if err := p.writeField3(oprot); err != nil { - return err - } - if err := p.writeField4(oprot); err != nil { - return err - } - if err := p.writeField5(oprot); err != nil { - return err - } - if err := p.writeField6(oprot); err != nil { - return err - } - if err := oprot.WriteFieldStop(); err != nil { - return thrift.PrependError("write field stop error: ", err) - } - if err := oprot.WriteStructEnd(); err != nil { - return thrift.PrependError("write struct stop error: ", err) - } - return nil -} - -func (p *THBaseServiceCheckAndDeleteArgs) writeField1(oprot thrift.TProtocol) (err error) { - if err := oprot.WriteFieldBegin("table", thrift.STRING, 1); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:table: ", p), err) - } - if err := oprot.WriteBinary(p.Table); err != nil { - return thrift.PrependError(fmt.Sprintf("%T.table (1) field write error: ", p), err) - } - if err := oprot.WriteFieldEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 1:table: ", p), err) - } - return err -} - -func (p *THBaseServiceCheckAndDeleteArgs) writeField2(oprot thrift.TProtocol) (err error) { - if err := oprot.WriteFieldBegin("row", thrift.STRING, 2); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 2:row: ", p), err) - } - if err := oprot.WriteBinary(p.Row); err != nil { - return thrift.PrependError(fmt.Sprintf("%T.row (2) field write error: ", p), err) - } - if err := oprot.WriteFieldEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 2:row: ", p), err) - } - return err -} - -func (p *THBaseServiceCheckAndDeleteArgs) writeField3(oprot thrift.TProtocol) (err error) { - if err := oprot.WriteFieldBegin("family", thrift.STRING, 3); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 3:family: ", p), err) - } - if err := oprot.WriteBinary(p.Family); err != nil { - return thrift.PrependError(fmt.Sprintf("%T.family (3) field write error: ", p), err) - } - if err := oprot.WriteFieldEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 3:family: ", p), err) - } - return err -} - -func (p *THBaseServiceCheckAndDeleteArgs) writeField4(oprot thrift.TProtocol) (err error) { - if err := oprot.WriteFieldBegin("qualifier", thrift.STRING, 4); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 4:qualifier: ", p), err) - } - if err := oprot.WriteBinary(p.Qualifier); err != nil { - return thrift.PrependError(fmt.Sprintf("%T.qualifier (4) field write error: ", p), err) - } - if err := oprot.WriteFieldEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 4:qualifier: ", p), err) - } - return err -} - -func (p *THBaseServiceCheckAndDeleteArgs) writeField5(oprot thrift.TProtocol) (err error) { - if err := oprot.WriteFieldBegin("value", thrift.STRING, 5); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 5:value: ", p), err) - } - if err := oprot.WriteBinary(p.Value); err != nil { - return thrift.PrependError(fmt.Sprintf("%T.value (5) field write error: ", p), err) - } - if err := oprot.WriteFieldEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 5:value: ", p), err) - } - return err -} - -func (p *THBaseServiceCheckAndDeleteArgs) writeField6(oprot thrift.TProtocol) (err error) { - if err := oprot.WriteFieldBegin("tdelete", thrift.STRUCT, 6); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 6:tdelete: ", p), err) - } - if err := p.Tdelete.Write(oprot); err != nil { - return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.Tdelete), err) - } - if err := oprot.WriteFieldEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 6:tdelete: ", p), err) - } - return err -} - -func (p *THBaseServiceCheckAndDeleteArgs) String() string { - if p == nil { - return "" - } - return fmt.Sprintf("THBaseServiceCheckAndDeleteArgs(%+v)", *p) -} - -// Attributes: -// - Success -// - Io -type THBaseServiceCheckAndDeleteResult struct { - Success *bool `thrift:"success,0" json:"success,omitempty"` - Io *TIOError `thrift:"io,1" json:"io,omitempty"` -} - -func NewTHBaseServiceCheckAndDeleteResult() *THBaseServiceCheckAndDeleteResult { - return &THBaseServiceCheckAndDeleteResult{} -} - -var THBaseServiceCheckAndDeleteResult_Success_DEFAULT bool - -func (p *THBaseServiceCheckAndDeleteResult) GetSuccess() bool { - if !p.IsSetSuccess() { - return THBaseServiceCheckAndDeleteResult_Success_DEFAULT - } - return *p.Success -} - -var THBaseServiceCheckAndDeleteResult_Io_DEFAULT *TIOError - -func (p *THBaseServiceCheckAndDeleteResult) GetIo() *TIOError { - if !p.IsSetIo() { - return THBaseServiceCheckAndDeleteResult_Io_DEFAULT - } - return p.Io -} -func (p *THBaseServiceCheckAndDeleteResult) IsSetSuccess() bool { - return p.Success != nil -} - -func (p *THBaseServiceCheckAndDeleteResult) IsSetIo() bool { - return p.Io != nil -} - -func (p *THBaseServiceCheckAndDeleteResult) Read(iprot thrift.TProtocol) error { - if _, err := iprot.ReadStructBegin(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) - } - - for { - _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin() - if err != nil { - return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) - } - if fieldTypeId == thrift.STOP { - break - } - switch fieldId { - case 0: - if err := p.readField0(iprot); err != nil { - return err - } - case 1: - if err := p.readField1(iprot); err != nil { - return err - } - default: - if err := iprot.Skip(fieldTypeId); err != nil { - return err - } - } - if err := iprot.ReadFieldEnd(); err != nil { - return err - } - } - if err := iprot.ReadStructEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) - } - return nil -} - -func (p *THBaseServiceCheckAndDeleteResult) readField0(iprot thrift.TProtocol) error { - if v, err := iprot.ReadBool(); err != nil { - return thrift.PrependError("error reading field 0: ", err) - } else { - p.Success = &v - } - return nil -} - -func (p *THBaseServiceCheckAndDeleteResult) readField1(iprot thrift.TProtocol) error { - p.Io = &TIOError{} - if err := p.Io.Read(iprot); err != nil { - return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.Io), err) - } - return nil -} - -func (p *THBaseServiceCheckAndDeleteResult) Write(oprot thrift.TProtocol) error { - if err := oprot.WriteStructBegin("checkAndDelete_result"); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) - } - if err := p.writeField0(oprot); err != nil { - return err - } - if err := p.writeField1(oprot); err != nil { - return err - } - if err := oprot.WriteFieldStop(); err != nil { - return thrift.PrependError("write field stop error: ", err) - } - if err := oprot.WriteStructEnd(); err != nil { - return thrift.PrependError("write struct stop error: ", err) - } - return nil -} - -func (p *THBaseServiceCheckAndDeleteResult) writeField0(oprot thrift.TProtocol) (err error) { - if p.IsSetSuccess() { - if err := oprot.WriteFieldBegin("success", thrift.BOOL, 0); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 0:success: ", p), err) - } - if err := oprot.WriteBool(bool(*p.Success)); err != nil { - return thrift.PrependError(fmt.Sprintf("%T.success (0) field write error: ", p), err) - } - if err := oprot.WriteFieldEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 0:success: ", p), err) - } - } - return err -} - -func (p *THBaseServiceCheckAndDeleteResult) writeField1(oprot thrift.TProtocol) (err error) { - if p.IsSetIo() { - if err := oprot.WriteFieldBegin("io", thrift.STRUCT, 1); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:io: ", p), err) - } - if err := p.Io.Write(oprot); err != nil { - return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.Io), err) - } - if err := oprot.WriteFieldEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 1:io: ", p), err) - } - } - return err -} - -func (p *THBaseServiceCheckAndDeleteResult) String() string { - if p == nil { - return "" - } - return fmt.Sprintf("THBaseServiceCheckAndDeleteResult(%+v)", *p) -} - -// Attributes: -// - Table: the table to increment the value on -// - Tincrement: the TIncrement to increment -type THBaseServiceIncrementArgs struct { - Table []byte `thrift:"table,1,required" json:"table"` - Tincrement *TIncrement `thrift:"tincrement,2,required" json:"tincrement"` -} - -func NewTHBaseServiceIncrementArgs() *THBaseServiceIncrementArgs { - return &THBaseServiceIncrementArgs{} -} - -func (p *THBaseServiceIncrementArgs) GetTable() []byte { - return p.Table -} - -var THBaseServiceIncrementArgs_Tincrement_DEFAULT *TIncrement - -func (p *THBaseServiceIncrementArgs) GetTincrement() *TIncrement { - if !p.IsSetTincrement() { - return THBaseServiceIncrementArgs_Tincrement_DEFAULT - } - return p.Tincrement -} -func (p *THBaseServiceIncrementArgs) IsSetTincrement() bool { - return p.Tincrement != nil -} - -func (p *THBaseServiceIncrementArgs) Read(iprot thrift.TProtocol) error { - if _, err := iprot.ReadStructBegin(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) - } - - var issetTable bool = false - var issetTincrement bool = false - - for { - _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin() - if err != nil { - return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) - } - if fieldTypeId == thrift.STOP { - break - } - switch fieldId { - case 1: - if err := p.readField1(iprot); err != nil { - return err - } - issetTable = true - case 2: - if err := p.readField2(iprot); err != nil { - return err - } - issetTincrement = true - default: - if err := iprot.Skip(fieldTypeId); err != nil { - return err - } - } - if err := iprot.ReadFieldEnd(); err != nil { - return err - } - } - if err := iprot.ReadStructEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) - } - if !issetTable { - return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field Table is not set")) - } - if !issetTincrement { - return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field Tincrement is not set")) - } - return nil -} - -func (p *THBaseServiceIncrementArgs) readField1(iprot thrift.TProtocol) error { - if v, err := iprot.ReadBinary(); err != nil { - return thrift.PrependError("error reading field 1: ", err) - } else { - p.Table = v - } - return nil -} - -func (p *THBaseServiceIncrementArgs) readField2(iprot thrift.TProtocol) error { - p.Tincrement = &TIncrement{} - if err := p.Tincrement.Read(iprot); err != nil { - return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.Tincrement), err) - } - return nil -} - -func (p *THBaseServiceIncrementArgs) Write(oprot thrift.TProtocol) error { - if err := oprot.WriteStructBegin("increment_args"); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) - } - if err := p.writeField1(oprot); err != nil { - return err - } - if err := p.writeField2(oprot); err != nil { - return err - } - if err := oprot.WriteFieldStop(); err != nil { - return thrift.PrependError("write field stop error: ", err) - } - if err := oprot.WriteStructEnd(); err != nil { - return thrift.PrependError("write struct stop error: ", err) - } - return nil -} - -func (p *THBaseServiceIncrementArgs) writeField1(oprot thrift.TProtocol) (err error) { - if err := oprot.WriteFieldBegin("table", thrift.STRING, 1); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:table: ", p), err) - } - if err := oprot.WriteBinary(p.Table); err != nil { - return thrift.PrependError(fmt.Sprintf("%T.table (1) field write error: ", p), err) - } - if err := oprot.WriteFieldEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 1:table: ", p), err) - } - return err -} - -func (p *THBaseServiceIncrementArgs) writeField2(oprot thrift.TProtocol) (err error) { - if err := oprot.WriteFieldBegin("tincrement", thrift.STRUCT, 2); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 2:tincrement: ", p), err) - } - if err := p.Tincrement.Write(oprot); err != nil { - return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.Tincrement), err) - } - if err := oprot.WriteFieldEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 2:tincrement: ", p), err) - } - return err -} - -func (p *THBaseServiceIncrementArgs) String() string { - if p == nil { - return "" - } - return fmt.Sprintf("THBaseServiceIncrementArgs(%+v)", *p) -} - -// Attributes: -// - Success -// - Io -type THBaseServiceIncrementResult struct { - Success *TResult_ `thrift:"success,0" json:"success,omitempty"` - Io *TIOError `thrift:"io,1" json:"io,omitempty"` -} - -func NewTHBaseServiceIncrementResult() *THBaseServiceIncrementResult { - return &THBaseServiceIncrementResult{} -} - -var THBaseServiceIncrementResult_Success_DEFAULT *TResult_ - -func (p *THBaseServiceIncrementResult) GetSuccess() *TResult_ { - if !p.IsSetSuccess() { - return THBaseServiceIncrementResult_Success_DEFAULT - } - return p.Success -} - -var THBaseServiceIncrementResult_Io_DEFAULT *TIOError - -func (p *THBaseServiceIncrementResult) GetIo() *TIOError { - if !p.IsSetIo() { - return THBaseServiceIncrementResult_Io_DEFAULT - } - return p.Io -} -func (p *THBaseServiceIncrementResult) IsSetSuccess() bool { - return p.Success != nil -} - -func (p *THBaseServiceIncrementResult) IsSetIo() bool { - return p.Io != nil -} - -func (p *THBaseServiceIncrementResult) Read(iprot thrift.TProtocol) error { - if _, err := iprot.ReadStructBegin(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) - } - - for { - _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin() - if err != nil { - return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) - } - if fieldTypeId == thrift.STOP { - break - } - switch fieldId { - case 0: - if err := p.readField0(iprot); err != nil { - return err - } - case 1: - if err := p.readField1(iprot); err != nil { - return err - } - default: - if err := iprot.Skip(fieldTypeId); err != nil { - return err - } - } - if err := iprot.ReadFieldEnd(); err != nil { - return err - } - } - if err := iprot.ReadStructEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) - } - return nil -} - -func (p *THBaseServiceIncrementResult) readField0(iprot thrift.TProtocol) error { - p.Success = &TResult_{} - if err := p.Success.Read(iprot); err != nil { - return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.Success), err) - } - return nil -} - -func (p *THBaseServiceIncrementResult) readField1(iprot thrift.TProtocol) error { - p.Io = &TIOError{} - if err := p.Io.Read(iprot); err != nil { - return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.Io), err) - } - return nil -} - -func (p *THBaseServiceIncrementResult) Write(oprot thrift.TProtocol) error { - if err := oprot.WriteStructBegin("increment_result"); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) - } - if err := p.writeField0(oprot); err != nil { - return err - } - if err := p.writeField1(oprot); err != nil { - return err - } - if err := oprot.WriteFieldStop(); err != nil { - return thrift.PrependError("write field stop error: ", err) - } - if err := oprot.WriteStructEnd(); err != nil { - return thrift.PrependError("write struct stop error: ", err) - } - return nil -} - -func (p *THBaseServiceIncrementResult) writeField0(oprot thrift.TProtocol) (err error) { - if p.IsSetSuccess() { - if err := oprot.WriteFieldBegin("success", thrift.STRUCT, 0); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 0:success: ", p), err) - } - if err := p.Success.Write(oprot); err != nil { - return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.Success), err) - } - if err := oprot.WriteFieldEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 0:success: ", p), err) - } - } - return err -} - -func (p *THBaseServiceIncrementResult) writeField1(oprot thrift.TProtocol) (err error) { - if p.IsSetIo() { - if err := oprot.WriteFieldBegin("io", thrift.STRUCT, 1); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:io: ", p), err) - } - if err := p.Io.Write(oprot); err != nil { - return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.Io), err) - } - if err := oprot.WriteFieldEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 1:io: ", p), err) - } - } - return err -} - -func (p *THBaseServiceIncrementResult) String() string { - if p == nil { - return "" - } - return fmt.Sprintf("THBaseServiceIncrementResult(%+v)", *p) -} - -// Attributes: -// - Table: the table to append the value on -// - Tappend: the TAppend to append -type THBaseServiceAppendArgs struct { - Table []byte `thrift:"table,1,required" json:"table"` - Tappend *TAppend `thrift:"tappend,2,required" json:"tappend"` -} - -func NewTHBaseServiceAppendArgs() *THBaseServiceAppendArgs { - return &THBaseServiceAppendArgs{} -} - -func (p *THBaseServiceAppendArgs) GetTable() []byte { - return p.Table -} - -var THBaseServiceAppendArgs_Tappend_DEFAULT *TAppend - -func (p *THBaseServiceAppendArgs) GetTappend() *TAppend { - if !p.IsSetTappend() { - return THBaseServiceAppendArgs_Tappend_DEFAULT - } - return p.Tappend -} -func (p *THBaseServiceAppendArgs) IsSetTappend() bool { - return p.Tappend != nil -} - -func (p *THBaseServiceAppendArgs) Read(iprot thrift.TProtocol) error { - if _, err := iprot.ReadStructBegin(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) - } - - var issetTable bool = false - var issetTappend bool = false - - for { - _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin() - if err != nil { - return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) - } - if fieldTypeId == thrift.STOP { - break - } - switch fieldId { - case 1: - if err := p.readField1(iprot); err != nil { - return err - } - issetTable = true - case 2: - if err := p.readField2(iprot); err != nil { - return err - } - issetTappend = true - default: - if err := iprot.Skip(fieldTypeId); err != nil { - return err - } - } - if err := iprot.ReadFieldEnd(); err != nil { - return err - } - } - if err := iprot.ReadStructEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) - } - if !issetTable { - return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field Table is not set")) - } - if !issetTappend { - return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field Tappend is not set")) - } - return nil -} - -func (p *THBaseServiceAppendArgs) readField1(iprot thrift.TProtocol) error { - if v, err := iprot.ReadBinary(); err != nil { - return thrift.PrependError("error reading field 1: ", err) - } else { - p.Table = v - } - return nil -} - -func (p *THBaseServiceAppendArgs) readField2(iprot thrift.TProtocol) error { - p.Tappend = &TAppend{} - if err := p.Tappend.Read(iprot); err != nil { - return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.Tappend), err) - } - return nil -} - -func (p *THBaseServiceAppendArgs) Write(oprot thrift.TProtocol) error { - if err := oprot.WriteStructBegin("append_args"); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) - } - if err := p.writeField1(oprot); err != nil { - return err - } - if err := p.writeField2(oprot); err != nil { - return err - } - if err := oprot.WriteFieldStop(); err != nil { - return thrift.PrependError("write field stop error: ", err) - } - if err := oprot.WriteStructEnd(); err != nil { - return thrift.PrependError("write struct stop error: ", err) - } - return nil -} - -func (p *THBaseServiceAppendArgs) writeField1(oprot thrift.TProtocol) (err error) { - if err := oprot.WriteFieldBegin("table", thrift.STRING, 1); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:table: ", p), err) - } - if err := oprot.WriteBinary(p.Table); err != nil { - return thrift.PrependError(fmt.Sprintf("%T.table (1) field write error: ", p), err) - } - if err := oprot.WriteFieldEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 1:table: ", p), err) - } - return err -} - -func (p *THBaseServiceAppendArgs) writeField2(oprot thrift.TProtocol) (err error) { - if err := oprot.WriteFieldBegin("tappend", thrift.STRUCT, 2); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 2:tappend: ", p), err) - } - if err := p.Tappend.Write(oprot); err != nil { - return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.Tappend), err) - } - if err := oprot.WriteFieldEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 2:tappend: ", p), err) - } - return err -} - -func (p *THBaseServiceAppendArgs) String() string { - if p == nil { - return "" - } - return fmt.Sprintf("THBaseServiceAppendArgs(%+v)", *p) -} - -// Attributes: -// - Success -// - Io -type THBaseServiceAppendResult struct { - Success *TResult_ `thrift:"success,0" json:"success,omitempty"` - Io *TIOError `thrift:"io,1" json:"io,omitempty"` -} - -func NewTHBaseServiceAppendResult() *THBaseServiceAppendResult { - return &THBaseServiceAppendResult{} -} - -var THBaseServiceAppendResult_Success_DEFAULT *TResult_ - -func (p *THBaseServiceAppendResult) GetSuccess() *TResult_ { - if !p.IsSetSuccess() { - return THBaseServiceAppendResult_Success_DEFAULT - } - return p.Success -} - -var THBaseServiceAppendResult_Io_DEFAULT *TIOError - -func (p *THBaseServiceAppendResult) GetIo() *TIOError { - if !p.IsSetIo() { - return THBaseServiceAppendResult_Io_DEFAULT - } - return p.Io -} -func (p *THBaseServiceAppendResult) IsSetSuccess() bool { - return p.Success != nil -} - -func (p *THBaseServiceAppendResult) IsSetIo() bool { - return p.Io != nil -} - -func (p *THBaseServiceAppendResult) Read(iprot thrift.TProtocol) error { - if _, err := iprot.ReadStructBegin(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) - } - - for { - _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin() - if err != nil { - return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) - } - if fieldTypeId == thrift.STOP { - break - } - switch fieldId { - case 0: - if err := p.readField0(iprot); err != nil { - return err - } - case 1: - if err := p.readField1(iprot); err != nil { - return err - } - default: - if err := iprot.Skip(fieldTypeId); err != nil { - return err - } - } - if err := iprot.ReadFieldEnd(); err != nil { - return err - } - } - if err := iprot.ReadStructEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) - } - return nil -} - -func (p *THBaseServiceAppendResult) readField0(iprot thrift.TProtocol) error { - p.Success = &TResult_{} - if err := p.Success.Read(iprot); err != nil { - return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.Success), err) - } - return nil -} - -func (p *THBaseServiceAppendResult) readField1(iprot thrift.TProtocol) error { - p.Io = &TIOError{} - if err := p.Io.Read(iprot); err != nil { - return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.Io), err) - } - return nil -} - -func (p *THBaseServiceAppendResult) Write(oprot thrift.TProtocol) error { - if err := oprot.WriteStructBegin("append_result"); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) - } - if err := p.writeField0(oprot); err != nil { - return err - } - if err := p.writeField1(oprot); err != nil { - return err - } - if err := oprot.WriteFieldStop(); err != nil { - return thrift.PrependError("write field stop error: ", err) - } - if err := oprot.WriteStructEnd(); err != nil { - return thrift.PrependError("write struct stop error: ", err) - } - return nil -} - -func (p *THBaseServiceAppendResult) writeField0(oprot thrift.TProtocol) (err error) { - if p.IsSetSuccess() { - if err := oprot.WriteFieldBegin("success", thrift.STRUCT, 0); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 0:success: ", p), err) - } - if err := p.Success.Write(oprot); err != nil { - return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.Success), err) - } - if err := oprot.WriteFieldEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 0:success: ", p), err) - } - } - return err -} - -func (p *THBaseServiceAppendResult) writeField1(oprot thrift.TProtocol) (err error) { - if p.IsSetIo() { - if err := oprot.WriteFieldBegin("io", thrift.STRUCT, 1); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:io: ", p), err) - } - if err := p.Io.Write(oprot); err != nil { - return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.Io), err) - } - if err := oprot.WriteFieldEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 1:io: ", p), err) - } - } - return err -} - -func (p *THBaseServiceAppendResult) String() string { - if p == nil { - return "" - } - return fmt.Sprintf("THBaseServiceAppendResult(%+v)", *p) -} - -// Attributes: -// - Table: the table to get the Scanner for -// - Tscan: the scan object to get a Scanner for -type THBaseServiceOpenScannerArgs struct { - Table []byte `thrift:"table,1,required" json:"table"` - Tscan *TScan `thrift:"tscan,2,required" json:"tscan"` -} - -func NewTHBaseServiceOpenScannerArgs() *THBaseServiceOpenScannerArgs { - return &THBaseServiceOpenScannerArgs{} -} - -func (p *THBaseServiceOpenScannerArgs) GetTable() []byte { - return p.Table -} - -var THBaseServiceOpenScannerArgs_Tscan_DEFAULT *TScan - -func (p *THBaseServiceOpenScannerArgs) GetTscan() *TScan { - if !p.IsSetTscan() { - return THBaseServiceOpenScannerArgs_Tscan_DEFAULT - } - return p.Tscan -} -func (p *THBaseServiceOpenScannerArgs) IsSetTscan() bool { - return p.Tscan != nil -} - -func (p *THBaseServiceOpenScannerArgs) Read(iprot thrift.TProtocol) error { - if _, err := iprot.ReadStructBegin(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) - } - - var issetTable bool = false - var issetTscan bool = false - - for { - _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin() - if err != nil { - return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) - } - if fieldTypeId == thrift.STOP { - break - } - switch fieldId { - case 1: - if err := p.readField1(iprot); err != nil { - return err - } - issetTable = true - case 2: - if err := p.readField2(iprot); err != nil { - return err - } - issetTscan = true - default: - if err := iprot.Skip(fieldTypeId); err != nil { - return err - } - } - if err := iprot.ReadFieldEnd(); err != nil { - return err - } - } - if err := iprot.ReadStructEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) - } - if !issetTable { - return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field Table is not set")) - } - if !issetTscan { - return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field Tscan is not set")) - } - return nil -} - -func (p *THBaseServiceOpenScannerArgs) readField1(iprot thrift.TProtocol) error { - if v, err := iprot.ReadBinary(); err != nil { - return thrift.PrependError("error reading field 1: ", err) - } else { - p.Table = v - } - return nil -} - -func (p *THBaseServiceOpenScannerArgs) readField2(iprot thrift.TProtocol) error { - p.Tscan = &TScan{ - MaxVersions: 1, - } - if err := p.Tscan.Read(iprot); err != nil { - return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.Tscan), err) - } - return nil -} - -func (p *THBaseServiceOpenScannerArgs) Write(oprot thrift.TProtocol) error { - if err := oprot.WriteStructBegin("openScanner_args"); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) - } - if err := p.writeField1(oprot); err != nil { - return err - } - if err := p.writeField2(oprot); err != nil { - return err - } - if err := oprot.WriteFieldStop(); err != nil { - return thrift.PrependError("write field stop error: ", err) - } - if err := oprot.WriteStructEnd(); err != nil { - return thrift.PrependError("write struct stop error: ", err) - } - return nil -} - -func (p *THBaseServiceOpenScannerArgs) writeField1(oprot thrift.TProtocol) (err error) { - if err := oprot.WriteFieldBegin("table", thrift.STRING, 1); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:table: ", p), err) - } - if err := oprot.WriteBinary(p.Table); err != nil { - return thrift.PrependError(fmt.Sprintf("%T.table (1) field write error: ", p), err) - } - if err := oprot.WriteFieldEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 1:table: ", p), err) - } - return err -} - -func (p *THBaseServiceOpenScannerArgs) writeField2(oprot thrift.TProtocol) (err error) { - if err := oprot.WriteFieldBegin("tscan", thrift.STRUCT, 2); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 2:tscan: ", p), err) - } - if err := p.Tscan.Write(oprot); err != nil { - return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.Tscan), err) - } - if err := oprot.WriteFieldEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 2:tscan: ", p), err) - } - return err -} - -func (p *THBaseServiceOpenScannerArgs) String() string { - if p == nil { - return "" - } - return fmt.Sprintf("THBaseServiceOpenScannerArgs(%+v)", *p) -} - -// Attributes: -// - Success -// - Io -type THBaseServiceOpenScannerResult struct { - Success *int32 `thrift:"success,0" json:"success,omitempty"` - Io *TIOError `thrift:"io,1" json:"io,omitempty"` -} - -func NewTHBaseServiceOpenScannerResult() *THBaseServiceOpenScannerResult { - return &THBaseServiceOpenScannerResult{} -} - -var THBaseServiceOpenScannerResult_Success_DEFAULT int32 - -func (p *THBaseServiceOpenScannerResult) GetSuccess() int32 { - if !p.IsSetSuccess() { - return THBaseServiceOpenScannerResult_Success_DEFAULT - } - return *p.Success -} - -var THBaseServiceOpenScannerResult_Io_DEFAULT *TIOError - -func (p *THBaseServiceOpenScannerResult) GetIo() *TIOError { - if !p.IsSetIo() { - return THBaseServiceOpenScannerResult_Io_DEFAULT - } - return p.Io -} -func (p *THBaseServiceOpenScannerResult) IsSetSuccess() bool { - return p.Success != nil -} - -func (p *THBaseServiceOpenScannerResult) IsSetIo() bool { - return p.Io != nil -} - -func (p *THBaseServiceOpenScannerResult) Read(iprot thrift.TProtocol) error { - if _, err := iprot.ReadStructBegin(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) - } - - for { - _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin() - if err != nil { - return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) - } - if fieldTypeId == thrift.STOP { - break - } - switch fieldId { - case 0: - if err := p.readField0(iprot); err != nil { - return err - } - case 1: - if err := p.readField1(iprot); err != nil { - return err - } - default: - if err := iprot.Skip(fieldTypeId); err != nil { - return err - } - } - if err := iprot.ReadFieldEnd(); err != nil { - return err - } - } - if err := iprot.ReadStructEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) - } - return nil -} - -func (p *THBaseServiceOpenScannerResult) readField0(iprot thrift.TProtocol) error { - if v, err := iprot.ReadI32(); err != nil { - return thrift.PrependError("error reading field 0: ", err) - } else { - p.Success = &v - } - return nil -} - -func (p *THBaseServiceOpenScannerResult) readField1(iprot thrift.TProtocol) error { - p.Io = &TIOError{} - if err := p.Io.Read(iprot); err != nil { - return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.Io), err) - } - return nil -} - -func (p *THBaseServiceOpenScannerResult) Write(oprot thrift.TProtocol) error { - if err := oprot.WriteStructBegin("openScanner_result"); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) - } - if err := p.writeField0(oprot); err != nil { - return err - } - if err := p.writeField1(oprot); err != nil { - return err - } - if err := oprot.WriteFieldStop(); err != nil { - return thrift.PrependError("write field stop error: ", err) - } - if err := oprot.WriteStructEnd(); err != nil { - return thrift.PrependError("write struct stop error: ", err) - } - return nil -} - -func (p *THBaseServiceOpenScannerResult) writeField0(oprot thrift.TProtocol) (err error) { - if p.IsSetSuccess() { - if err := oprot.WriteFieldBegin("success", thrift.I32, 0); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 0:success: ", p), err) - } - if err := oprot.WriteI32(int32(*p.Success)); err != nil { - return thrift.PrependError(fmt.Sprintf("%T.success (0) field write error: ", p), err) - } - if err := oprot.WriteFieldEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 0:success: ", p), err) - } - } - return err -} - -func (p *THBaseServiceOpenScannerResult) writeField1(oprot thrift.TProtocol) (err error) { - if p.IsSetIo() { - if err := oprot.WriteFieldBegin("io", thrift.STRUCT, 1); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:io: ", p), err) - } - if err := p.Io.Write(oprot); err != nil { - return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.Io), err) - } - if err := oprot.WriteFieldEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 1:io: ", p), err) - } - } - return err -} - -func (p *THBaseServiceOpenScannerResult) String() string { - if p == nil { - return "" - } - return fmt.Sprintf("THBaseServiceOpenScannerResult(%+v)", *p) -} - -// Attributes: -// - ScannerId: the Id of the Scanner to return rows from. This is an Id returned from the openScanner function. -// - NumRows: number of rows to return -type THBaseServiceGetScannerRowsArgs struct { - ScannerId int32 `thrift:"scannerId,1,required" json:"scannerId"` - NumRows int32 `thrift:"numRows,2" json:"numRows"` -} - -func NewTHBaseServiceGetScannerRowsArgs() *THBaseServiceGetScannerRowsArgs { - return &THBaseServiceGetScannerRowsArgs{ - NumRows: 1, - } -} - -func (p *THBaseServiceGetScannerRowsArgs) GetScannerId() int32 { - return p.ScannerId -} - -func (p *THBaseServiceGetScannerRowsArgs) GetNumRows() int32 { - return p.NumRows -} -func (p *THBaseServiceGetScannerRowsArgs) Read(iprot thrift.TProtocol) error { - if _, err := iprot.ReadStructBegin(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) - } - - var issetScannerId bool = false - - for { - _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin() - if err != nil { - return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) - } - if fieldTypeId == thrift.STOP { - break - } - switch fieldId { - case 1: - if err := p.readField1(iprot); err != nil { - return err - } - issetScannerId = true - case 2: - if err := p.readField2(iprot); err != nil { - return err - } - default: - if err := iprot.Skip(fieldTypeId); err != nil { - return err - } - } - if err := iprot.ReadFieldEnd(); err != nil { - return err - } - } - if err := iprot.ReadStructEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) - } - if !issetScannerId { - return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field ScannerId is not set")) - } - return nil -} - -func (p *THBaseServiceGetScannerRowsArgs) readField1(iprot thrift.TProtocol) error { - if v, err := iprot.ReadI32(); err != nil { - return thrift.PrependError("error reading field 1: ", err) - } else { - p.ScannerId = v - } - return nil -} - -func (p *THBaseServiceGetScannerRowsArgs) readField2(iprot thrift.TProtocol) error { - if v, err := iprot.ReadI32(); err != nil { - return thrift.PrependError("error reading field 2: ", err) - } else { - p.NumRows = v - } - return nil -} - -func (p *THBaseServiceGetScannerRowsArgs) Write(oprot thrift.TProtocol) error { - if err := oprot.WriteStructBegin("getScannerRows_args"); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) - } - if err := p.writeField1(oprot); err != nil { - return err - } - if err := p.writeField2(oprot); err != nil { - return err - } - if err := oprot.WriteFieldStop(); err != nil { - return thrift.PrependError("write field stop error: ", err) - } - if err := oprot.WriteStructEnd(); err != nil { - return thrift.PrependError("write struct stop error: ", err) - } - return nil -} - -func (p *THBaseServiceGetScannerRowsArgs) writeField1(oprot thrift.TProtocol) (err error) { - if err := oprot.WriteFieldBegin("scannerId", thrift.I32, 1); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:scannerId: ", p), err) - } - if err := oprot.WriteI32(int32(p.ScannerId)); err != nil { - return thrift.PrependError(fmt.Sprintf("%T.scannerId (1) field write error: ", p), err) - } - if err := oprot.WriteFieldEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 1:scannerId: ", p), err) - } - return err -} - -func (p *THBaseServiceGetScannerRowsArgs) writeField2(oprot thrift.TProtocol) (err error) { - if err := oprot.WriteFieldBegin("numRows", thrift.I32, 2); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 2:numRows: ", p), err) - } - if err := oprot.WriteI32(int32(p.NumRows)); err != nil { - return thrift.PrependError(fmt.Sprintf("%T.numRows (2) field write error: ", p), err) - } - if err := oprot.WriteFieldEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 2:numRows: ", p), err) - } - return err -} - -func (p *THBaseServiceGetScannerRowsArgs) String() string { - if p == nil { - return "" - } - return fmt.Sprintf("THBaseServiceGetScannerRowsArgs(%+v)", *p) -} - -// Attributes: -// - Success -// - Io -// - Ia: if the scannerId is invalid -type THBaseServiceGetScannerRowsResult struct { - Success []*TResult_ `thrift:"success,0" json:"success,omitempty"` - Io *TIOError `thrift:"io,1" json:"io,omitempty"` - Ia *TIllegalArgument `thrift:"ia,2" json:"ia,omitempty"` -} - -func NewTHBaseServiceGetScannerRowsResult() *THBaseServiceGetScannerRowsResult { - return &THBaseServiceGetScannerRowsResult{} -} - -var THBaseServiceGetScannerRowsResult_Success_DEFAULT []*TResult_ - -func (p *THBaseServiceGetScannerRowsResult) GetSuccess() []*TResult_ { - return p.Success -} - -var THBaseServiceGetScannerRowsResult_Io_DEFAULT *TIOError - -func (p *THBaseServiceGetScannerRowsResult) GetIo() *TIOError { - if !p.IsSetIo() { - return THBaseServiceGetScannerRowsResult_Io_DEFAULT - } - return p.Io -} - -var THBaseServiceGetScannerRowsResult_Ia_DEFAULT *TIllegalArgument - -func (p *THBaseServiceGetScannerRowsResult) GetIa() *TIllegalArgument { - if !p.IsSetIa() { - return THBaseServiceGetScannerRowsResult_Ia_DEFAULT - } - return p.Ia -} -func (p *THBaseServiceGetScannerRowsResult) IsSetSuccess() bool { - return p.Success != nil -} - -func (p *THBaseServiceGetScannerRowsResult) IsSetIo() bool { - return p.Io != nil -} - -func (p *THBaseServiceGetScannerRowsResult) IsSetIa() bool { - return p.Ia != nil -} - -func (p *THBaseServiceGetScannerRowsResult) Read(iprot thrift.TProtocol) error { - if _, err := iprot.ReadStructBegin(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) - } - - for { - _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin() - if err != nil { - return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) - } - if fieldTypeId == thrift.STOP { - break - } - switch fieldId { - case 0: - if err := p.readField0(iprot); err != nil { - return err - } - case 1: - if err := p.readField1(iprot); err != nil { - return err - } - case 2: - if err := p.readField2(iprot); err != nil { - return err - } - default: - if err := iprot.Skip(fieldTypeId); err != nil { - return err - } - } - if err := iprot.ReadFieldEnd(); err != nil { - return err - } - } - if err := iprot.ReadStructEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) - } - return nil -} - -func (p *THBaseServiceGetScannerRowsResult) readField0(iprot thrift.TProtocol) error { - _, size, err := iprot.ReadListBegin() - if err != nil { - return thrift.PrependError("error reading list begin: ", err) - } - tSlice := make([]*TResult_, 0, size) - p.Success = tSlice - for i := 0; i < size; i++ { - _elem60 := &TResult_{} - if err := _elem60.Read(iprot); err != nil { - return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", _elem60), err) - } - p.Success = append(p.Success, _elem60) - } - if err := iprot.ReadListEnd(); err != nil { - return thrift.PrependError("error reading list end: ", err) - } - return nil -} - -func (p *THBaseServiceGetScannerRowsResult) readField1(iprot thrift.TProtocol) error { - p.Io = &TIOError{} - if err := p.Io.Read(iprot); err != nil { - return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.Io), err) - } - return nil -} - -func (p *THBaseServiceGetScannerRowsResult) readField2(iprot thrift.TProtocol) error { - p.Ia = &TIllegalArgument{} - if err := p.Ia.Read(iprot); err != nil { - return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.Ia), err) - } - return nil -} - -func (p *THBaseServiceGetScannerRowsResult) Write(oprot thrift.TProtocol) error { - if err := oprot.WriteStructBegin("getScannerRows_result"); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) - } - if err := p.writeField0(oprot); err != nil { - return err - } - if err := p.writeField1(oprot); err != nil { - return err - } - if err := p.writeField2(oprot); err != nil { - return err - } - if err := oprot.WriteFieldStop(); err != nil { - return thrift.PrependError("write field stop error: ", err) - } - if err := oprot.WriteStructEnd(); err != nil { - return thrift.PrependError("write struct stop error: ", err) - } - return nil -} - -func (p *THBaseServiceGetScannerRowsResult) writeField0(oprot thrift.TProtocol) (err error) { - if p.IsSetSuccess() { - if err := oprot.WriteFieldBegin("success", thrift.LIST, 0); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 0:success: ", p), err) - } - if err := oprot.WriteListBegin(thrift.STRUCT, len(p.Success)); err != nil { - return thrift.PrependError("error writing list begin: ", err) - } - for _, v := range p.Success { - if err := v.Write(oprot); err != nil { - return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", v), err) - } - } - if err := oprot.WriteListEnd(); err != nil { - return thrift.PrependError("error writing list end: ", err) - } - if err := oprot.WriteFieldEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 0:success: ", p), err) - } - } - return err -} - -func (p *THBaseServiceGetScannerRowsResult) writeField1(oprot thrift.TProtocol) (err error) { - if p.IsSetIo() { - if err := oprot.WriteFieldBegin("io", thrift.STRUCT, 1); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:io: ", p), err) - } - if err := p.Io.Write(oprot); err != nil { - return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.Io), err) - } - if err := oprot.WriteFieldEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 1:io: ", p), err) - } - } - return err -} - -func (p *THBaseServiceGetScannerRowsResult) writeField2(oprot thrift.TProtocol) (err error) { - if p.IsSetIa() { - if err := oprot.WriteFieldBegin("ia", thrift.STRUCT, 2); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 2:ia: ", p), err) - } - if err := p.Ia.Write(oprot); err != nil { - return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.Ia), err) - } - if err := oprot.WriteFieldEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 2:ia: ", p), err) - } - } - return err -} - -func (p *THBaseServiceGetScannerRowsResult) String() string { - if p == nil { - return "" - } - return fmt.Sprintf("THBaseServiceGetScannerRowsResult(%+v)", *p) -} - -// Attributes: -// - ScannerId: the Id of the Scanner to close * -type THBaseServiceCloseScannerArgs struct { - ScannerId int32 `thrift:"scannerId,1,required" json:"scannerId"` -} - -func NewTHBaseServiceCloseScannerArgs() *THBaseServiceCloseScannerArgs { - return &THBaseServiceCloseScannerArgs{} -} - -func (p *THBaseServiceCloseScannerArgs) GetScannerId() int32 { - return p.ScannerId -} -func (p *THBaseServiceCloseScannerArgs) Read(iprot thrift.TProtocol) error { - if _, err := iprot.ReadStructBegin(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) - } - - var issetScannerId bool = false - - for { - _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin() - if err != nil { - return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) - } - if fieldTypeId == thrift.STOP { - break - } - switch fieldId { - case 1: - if err := p.readField1(iprot); err != nil { - return err - } - issetScannerId = true - default: - if err := iprot.Skip(fieldTypeId); err != nil { - return err - } - } - if err := iprot.ReadFieldEnd(); err != nil { - return err - } - } - if err := iprot.ReadStructEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) - } - if !issetScannerId { - return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field ScannerId is not set")) - } - return nil -} - -func (p *THBaseServiceCloseScannerArgs) readField1(iprot thrift.TProtocol) error { - if v, err := iprot.ReadI32(); err != nil { - return thrift.PrependError("error reading field 1: ", err) - } else { - p.ScannerId = v - } - return nil -} - -func (p *THBaseServiceCloseScannerArgs) Write(oprot thrift.TProtocol) error { - if err := oprot.WriteStructBegin("closeScanner_args"); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) - } - if err := p.writeField1(oprot); err != nil { - return err - } - if err := oprot.WriteFieldStop(); err != nil { - return thrift.PrependError("write field stop error: ", err) - } - if err := oprot.WriteStructEnd(); err != nil { - return thrift.PrependError("write struct stop error: ", err) - } - return nil -} - -func (p *THBaseServiceCloseScannerArgs) writeField1(oprot thrift.TProtocol) (err error) { - if err := oprot.WriteFieldBegin("scannerId", thrift.I32, 1); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:scannerId: ", p), err) - } - if err := oprot.WriteI32(int32(p.ScannerId)); err != nil { - return thrift.PrependError(fmt.Sprintf("%T.scannerId (1) field write error: ", p), err) - } - if err := oprot.WriteFieldEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 1:scannerId: ", p), err) - } - return err -} - -func (p *THBaseServiceCloseScannerArgs) String() string { - if p == nil { - return "" - } - return fmt.Sprintf("THBaseServiceCloseScannerArgs(%+v)", *p) -} - -// Attributes: -// - Io -// - Ia: if the scannerId is invalid -type THBaseServiceCloseScannerResult struct { - Io *TIOError `thrift:"io,1" json:"io,omitempty"` - Ia *TIllegalArgument `thrift:"ia,2" json:"ia,omitempty"` -} - -func NewTHBaseServiceCloseScannerResult() *THBaseServiceCloseScannerResult { - return &THBaseServiceCloseScannerResult{} -} - -var THBaseServiceCloseScannerResult_Io_DEFAULT *TIOError - -func (p *THBaseServiceCloseScannerResult) GetIo() *TIOError { - if !p.IsSetIo() { - return THBaseServiceCloseScannerResult_Io_DEFAULT - } - return p.Io -} - -var THBaseServiceCloseScannerResult_Ia_DEFAULT *TIllegalArgument - -func (p *THBaseServiceCloseScannerResult) GetIa() *TIllegalArgument { - if !p.IsSetIa() { - return THBaseServiceCloseScannerResult_Ia_DEFAULT - } - return p.Ia -} -func (p *THBaseServiceCloseScannerResult) IsSetIo() bool { - return p.Io != nil -} - -func (p *THBaseServiceCloseScannerResult) IsSetIa() bool { - return p.Ia != nil -} - -func (p *THBaseServiceCloseScannerResult) Read(iprot thrift.TProtocol) error { - if _, err := iprot.ReadStructBegin(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) - } - - for { - _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin() - if err != nil { - return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) - } - if fieldTypeId == thrift.STOP { - break - } - switch fieldId { - case 1: - if err := p.readField1(iprot); err != nil { - return err - } - case 2: - if err := p.readField2(iprot); err != nil { - return err - } - default: - if err := iprot.Skip(fieldTypeId); err != nil { - return err - } - } - if err := iprot.ReadFieldEnd(); err != nil { - return err - } - } - if err := iprot.ReadStructEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) - } - return nil -} - -func (p *THBaseServiceCloseScannerResult) readField1(iprot thrift.TProtocol) error { - p.Io = &TIOError{} - if err := p.Io.Read(iprot); err != nil { - return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.Io), err) - } - return nil -} - -func (p *THBaseServiceCloseScannerResult) readField2(iprot thrift.TProtocol) error { - p.Ia = &TIllegalArgument{} - if err := p.Ia.Read(iprot); err != nil { - return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.Ia), err) - } - return nil -} - -func (p *THBaseServiceCloseScannerResult) Write(oprot thrift.TProtocol) error { - if err := oprot.WriteStructBegin("closeScanner_result"); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) - } - if err := p.writeField1(oprot); err != nil { - return err - } - if err := p.writeField2(oprot); err != nil { - return err - } - if err := oprot.WriteFieldStop(); err != nil { - return thrift.PrependError("write field stop error: ", err) - } - if err := oprot.WriteStructEnd(); err != nil { - return thrift.PrependError("write struct stop error: ", err) - } - return nil -} - -func (p *THBaseServiceCloseScannerResult) writeField1(oprot thrift.TProtocol) (err error) { - if p.IsSetIo() { - if err := oprot.WriteFieldBegin("io", thrift.STRUCT, 1); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:io: ", p), err) - } - if err := p.Io.Write(oprot); err != nil { - return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.Io), err) - } - if err := oprot.WriteFieldEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 1:io: ", p), err) - } - } - return err -} - -func (p *THBaseServiceCloseScannerResult) writeField2(oprot thrift.TProtocol) (err error) { - if p.IsSetIa() { - if err := oprot.WriteFieldBegin("ia", thrift.STRUCT, 2); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 2:ia: ", p), err) - } - if err := p.Ia.Write(oprot); err != nil { - return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.Ia), err) - } - if err := oprot.WriteFieldEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 2:ia: ", p), err) - } - } - return err -} - -func (p *THBaseServiceCloseScannerResult) String() string { - if p == nil { - return "" - } - return fmt.Sprintf("THBaseServiceCloseScannerResult(%+v)", *p) -} - -// Attributes: -// - Table: table to apply the mutations -// - TrowMutations: mutations to apply -type THBaseServiceMutateRowArgs struct { - Table []byte `thrift:"table,1,required" json:"table"` - TrowMutations *TRowMutations `thrift:"trowMutations,2,required" json:"trowMutations"` -} - -func NewTHBaseServiceMutateRowArgs() *THBaseServiceMutateRowArgs { - return &THBaseServiceMutateRowArgs{} -} - -func (p *THBaseServiceMutateRowArgs) GetTable() []byte { - return p.Table -} - -var THBaseServiceMutateRowArgs_TrowMutations_DEFAULT *TRowMutations - -func (p *THBaseServiceMutateRowArgs) GetTrowMutations() *TRowMutations { - if !p.IsSetTrowMutations() { - return THBaseServiceMutateRowArgs_TrowMutations_DEFAULT - } - return p.TrowMutations -} -func (p *THBaseServiceMutateRowArgs) IsSetTrowMutations() bool { - return p.TrowMutations != nil -} - -func (p *THBaseServiceMutateRowArgs) Read(iprot thrift.TProtocol) error { - if _, err := iprot.ReadStructBegin(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) - } - - var issetTable bool = false - var issetTrowMutations bool = false - - for { - _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin() - if err != nil { - return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) - } - if fieldTypeId == thrift.STOP { - break - } - switch fieldId { - case 1: - if err := p.readField1(iprot); err != nil { - return err - } - issetTable = true - case 2: - if err := p.readField2(iprot); err != nil { - return err - } - issetTrowMutations = true - default: - if err := iprot.Skip(fieldTypeId); err != nil { - return err - } - } - if err := iprot.ReadFieldEnd(); err != nil { - return err - } - } - if err := iprot.ReadStructEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) - } - if !issetTable { - return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field Table is not set")) - } - if !issetTrowMutations { - return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field TrowMutations is not set")) - } - return nil -} - -func (p *THBaseServiceMutateRowArgs) readField1(iprot thrift.TProtocol) error { - if v, err := iprot.ReadBinary(); err != nil { - return thrift.PrependError("error reading field 1: ", err) - } else { - p.Table = v - } - return nil -} - -func (p *THBaseServiceMutateRowArgs) readField2(iprot thrift.TProtocol) error { - p.TrowMutations = &TRowMutations{} - if err := p.TrowMutations.Read(iprot); err != nil { - return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.TrowMutations), err) - } - return nil -} - -func (p *THBaseServiceMutateRowArgs) Write(oprot thrift.TProtocol) error { - if err := oprot.WriteStructBegin("mutateRow_args"); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) - } - if err := p.writeField1(oprot); err != nil { - return err - } - if err := p.writeField2(oprot); err != nil { - return err - } - if err := oprot.WriteFieldStop(); err != nil { - return thrift.PrependError("write field stop error: ", err) - } - if err := oprot.WriteStructEnd(); err != nil { - return thrift.PrependError("write struct stop error: ", err) - } - return nil -} - -func (p *THBaseServiceMutateRowArgs) writeField1(oprot thrift.TProtocol) (err error) { - if err := oprot.WriteFieldBegin("table", thrift.STRING, 1); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:table: ", p), err) - } - if err := oprot.WriteBinary(p.Table); err != nil { - return thrift.PrependError(fmt.Sprintf("%T.table (1) field write error: ", p), err) - } - if err := oprot.WriteFieldEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 1:table: ", p), err) - } - return err -} - -func (p *THBaseServiceMutateRowArgs) writeField2(oprot thrift.TProtocol) (err error) { - if err := oprot.WriteFieldBegin("trowMutations", thrift.STRUCT, 2); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 2:trowMutations: ", p), err) - } - if err := p.TrowMutations.Write(oprot); err != nil { - return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.TrowMutations), err) - } - if err := oprot.WriteFieldEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 2:trowMutations: ", p), err) - } - return err -} - -func (p *THBaseServiceMutateRowArgs) String() string { - if p == nil { - return "" - } - return fmt.Sprintf("THBaseServiceMutateRowArgs(%+v)", *p) -} - -// Attributes: -// - Io -type THBaseServiceMutateRowResult struct { - Io *TIOError `thrift:"io,1" json:"io,omitempty"` -} - -func NewTHBaseServiceMutateRowResult() *THBaseServiceMutateRowResult { - return &THBaseServiceMutateRowResult{} -} - -var THBaseServiceMutateRowResult_Io_DEFAULT *TIOError - -func (p *THBaseServiceMutateRowResult) GetIo() *TIOError { - if !p.IsSetIo() { - return THBaseServiceMutateRowResult_Io_DEFAULT - } - return p.Io -} -func (p *THBaseServiceMutateRowResult) IsSetIo() bool { - return p.Io != nil -} - -func (p *THBaseServiceMutateRowResult) Read(iprot thrift.TProtocol) error { - if _, err := iprot.ReadStructBegin(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) - } - - for { - _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin() - if err != nil { - return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) - } - if fieldTypeId == thrift.STOP { - break - } - switch fieldId { - case 1: - if err := p.readField1(iprot); err != nil { - return err - } - default: - if err := iprot.Skip(fieldTypeId); err != nil { - return err - } - } - if err := iprot.ReadFieldEnd(); err != nil { - return err - } - } - if err := iprot.ReadStructEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) - } - return nil -} - -func (p *THBaseServiceMutateRowResult) readField1(iprot thrift.TProtocol) error { - p.Io = &TIOError{} - if err := p.Io.Read(iprot); err != nil { - return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.Io), err) - } - return nil -} - -func (p *THBaseServiceMutateRowResult) Write(oprot thrift.TProtocol) error { - if err := oprot.WriteStructBegin("mutateRow_result"); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) - } - if err := p.writeField1(oprot); err != nil { - return err - } - if err := oprot.WriteFieldStop(); err != nil { - return thrift.PrependError("write field stop error: ", err) - } - if err := oprot.WriteStructEnd(); err != nil { - return thrift.PrependError("write struct stop error: ", err) - } - return nil -} - -func (p *THBaseServiceMutateRowResult) writeField1(oprot thrift.TProtocol) (err error) { - if p.IsSetIo() { - if err := oprot.WriteFieldBegin("io", thrift.STRUCT, 1); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:io: ", p), err) - } - if err := p.Io.Write(oprot); err != nil { - return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.Io), err) - } - if err := oprot.WriteFieldEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 1:io: ", p), err) - } - } - return err -} - -func (p *THBaseServiceMutateRowResult) String() string { - if p == nil { - return "" - } - return fmt.Sprintf("THBaseServiceMutateRowResult(%+v)", *p) -} - -// Attributes: -// - Table: the table to get the Scanner for -// - Tscan: the scan object to get a Scanner for -// - NumRows: number of rows to return -type THBaseServiceGetScannerResultsArgs struct { - Table []byte `thrift:"table,1,required" json:"table"` - Tscan *TScan `thrift:"tscan,2,required" json:"tscan"` - NumRows int32 `thrift:"numRows,3" json:"numRows"` -} - -func NewTHBaseServiceGetScannerResultsArgs() *THBaseServiceGetScannerResultsArgs { - return &THBaseServiceGetScannerResultsArgs{ - NumRows: 1, - } -} - -func (p *THBaseServiceGetScannerResultsArgs) GetTable() []byte { - return p.Table -} - -var THBaseServiceGetScannerResultsArgs_Tscan_DEFAULT *TScan - -func (p *THBaseServiceGetScannerResultsArgs) GetTscan() *TScan { - if !p.IsSetTscan() { - return THBaseServiceGetScannerResultsArgs_Tscan_DEFAULT - } - return p.Tscan -} - -func (p *THBaseServiceGetScannerResultsArgs) GetNumRows() int32 { - return p.NumRows -} -func (p *THBaseServiceGetScannerResultsArgs) IsSetTscan() bool { - return p.Tscan != nil -} - -func (p *THBaseServiceGetScannerResultsArgs) Read(iprot thrift.TProtocol) error { - if _, err := iprot.ReadStructBegin(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) - } - - var issetTable bool = false - var issetTscan bool = false - - for { - _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin() - if err != nil { - return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) - } - if fieldTypeId == thrift.STOP { - break - } - switch fieldId { - case 1: - if err := p.readField1(iprot); err != nil { - return err - } - issetTable = true - case 2: - if err := p.readField2(iprot); err != nil { - return err - } - issetTscan = true - case 3: - if err := p.readField3(iprot); err != nil { - return err - } - default: - if err := iprot.Skip(fieldTypeId); err != nil { - return err - } - } - if err := iprot.ReadFieldEnd(); err != nil { - return err - } - } - if err := iprot.ReadStructEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) - } - if !issetTable { - return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field Table is not set")) - } - if !issetTscan { - return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field Tscan is not set")) - } - return nil -} - -func (p *THBaseServiceGetScannerResultsArgs) readField1(iprot thrift.TProtocol) error { - if v, err := iprot.ReadBinary(); err != nil { - return thrift.PrependError("error reading field 1: ", err) - } else { - p.Table = v - } - return nil -} - -func (p *THBaseServiceGetScannerResultsArgs) readField2(iprot thrift.TProtocol) error { - p.Tscan = &TScan{ - MaxVersions: 1, - } - if err := p.Tscan.Read(iprot); err != nil { - return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.Tscan), err) - } - return nil -} - -func (p *THBaseServiceGetScannerResultsArgs) readField3(iprot thrift.TProtocol) error { - if v, err := iprot.ReadI32(); err != nil { - return thrift.PrependError("error reading field 3: ", err) - } else { - p.NumRows = v - } - return nil -} - -func (p *THBaseServiceGetScannerResultsArgs) Write(oprot thrift.TProtocol) error { - if err := oprot.WriteStructBegin("getScannerResults_args"); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) - } - if err := p.writeField1(oprot); err != nil { - return err - } - if err := p.writeField2(oprot); err != nil { - return err - } - if err := p.writeField3(oprot); err != nil { - return err - } - if err := oprot.WriteFieldStop(); err != nil { - return thrift.PrependError("write field stop error: ", err) - } - if err := oprot.WriteStructEnd(); err != nil { - return thrift.PrependError("write struct stop error: ", err) - } - return nil -} - -func (p *THBaseServiceGetScannerResultsArgs) writeField1(oprot thrift.TProtocol) (err error) { - if err := oprot.WriteFieldBegin("table", thrift.STRING, 1); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:table: ", p), err) - } - if err := oprot.WriteBinary(p.Table); err != nil { - return thrift.PrependError(fmt.Sprintf("%T.table (1) field write error: ", p), err) - } - if err := oprot.WriteFieldEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 1:table: ", p), err) - } - return err -} - -func (p *THBaseServiceGetScannerResultsArgs) writeField2(oprot thrift.TProtocol) (err error) { - if err := oprot.WriteFieldBegin("tscan", thrift.STRUCT, 2); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 2:tscan: ", p), err) - } - if err := p.Tscan.Write(oprot); err != nil { - return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.Tscan), err) - } - if err := oprot.WriteFieldEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 2:tscan: ", p), err) - } - return err -} - -func (p *THBaseServiceGetScannerResultsArgs) writeField3(oprot thrift.TProtocol) (err error) { - if err := oprot.WriteFieldBegin("numRows", thrift.I32, 3); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 3:numRows: ", p), err) - } - if err := oprot.WriteI32(int32(p.NumRows)); err != nil { - return thrift.PrependError(fmt.Sprintf("%T.numRows (3) field write error: ", p), err) - } - if err := oprot.WriteFieldEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 3:numRows: ", p), err) - } - return err -} - -func (p *THBaseServiceGetScannerResultsArgs) String() string { - if p == nil { - return "" - } - return fmt.Sprintf("THBaseServiceGetScannerResultsArgs(%+v)", *p) -} - -// Attributes: -// - Success -// - Io -type THBaseServiceGetScannerResultsResult struct { - Success []*TResult_ `thrift:"success,0" json:"success,omitempty"` - Io *TIOError `thrift:"io,1" json:"io,omitempty"` -} - -func NewTHBaseServiceGetScannerResultsResult() *THBaseServiceGetScannerResultsResult { - return &THBaseServiceGetScannerResultsResult{} -} - -var THBaseServiceGetScannerResultsResult_Success_DEFAULT []*TResult_ - -func (p *THBaseServiceGetScannerResultsResult) GetSuccess() []*TResult_ { - return p.Success -} - -var THBaseServiceGetScannerResultsResult_Io_DEFAULT *TIOError - -func (p *THBaseServiceGetScannerResultsResult) GetIo() *TIOError { - if !p.IsSetIo() { - return THBaseServiceGetScannerResultsResult_Io_DEFAULT - } - return p.Io -} -func (p *THBaseServiceGetScannerResultsResult) IsSetSuccess() bool { - return p.Success != nil -} - -func (p *THBaseServiceGetScannerResultsResult) IsSetIo() bool { - return p.Io != nil -} - -func (p *THBaseServiceGetScannerResultsResult) Read(iprot thrift.TProtocol) error { - if _, err := iprot.ReadStructBegin(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) - } - - for { - _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin() - if err != nil { - return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) - } - if fieldTypeId == thrift.STOP { - break - } - switch fieldId { - case 0: - if err := p.readField0(iprot); err != nil { - return err - } - case 1: - if err := p.readField1(iprot); err != nil { - return err - } - default: - if err := iprot.Skip(fieldTypeId); err != nil { - return err - } - } - if err := iprot.ReadFieldEnd(); err != nil { - return err - } - } - if err := iprot.ReadStructEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) - } - return nil -} - -func (p *THBaseServiceGetScannerResultsResult) readField0(iprot thrift.TProtocol) error { - _, size, err := iprot.ReadListBegin() - if err != nil { - return thrift.PrependError("error reading list begin: ", err) - } - tSlice := make([]*TResult_, 0, size) - p.Success = tSlice - for i := 0; i < size; i++ { - _elem61 := &TResult_{} - if err := _elem61.Read(iprot); err != nil { - return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", _elem61), err) - } - p.Success = append(p.Success, _elem61) - } - if err := iprot.ReadListEnd(); err != nil { - return thrift.PrependError("error reading list end: ", err) - } - return nil -} - -func (p *THBaseServiceGetScannerResultsResult) readField1(iprot thrift.TProtocol) error { - p.Io = &TIOError{} - if err := p.Io.Read(iprot); err != nil { - return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.Io), err) - } - return nil -} - -func (p *THBaseServiceGetScannerResultsResult) Write(oprot thrift.TProtocol) error { - if err := oprot.WriteStructBegin("getScannerResults_result"); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) - } - if err := p.writeField0(oprot); err != nil { - return err - } - if err := p.writeField1(oprot); err != nil { - return err - } - if err := oprot.WriteFieldStop(); err != nil { - return thrift.PrependError("write field stop error: ", err) - } - if err := oprot.WriteStructEnd(); err != nil { - return thrift.PrependError("write struct stop error: ", err) - } - return nil -} - -func (p *THBaseServiceGetScannerResultsResult) writeField0(oprot thrift.TProtocol) (err error) { - if p.IsSetSuccess() { - if err := oprot.WriteFieldBegin("success", thrift.LIST, 0); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 0:success: ", p), err) - } - if err := oprot.WriteListBegin(thrift.STRUCT, len(p.Success)); err != nil { - return thrift.PrependError("error writing list begin: ", err) - } - for _, v := range p.Success { - if err := v.Write(oprot); err != nil { - return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", v), err) - } - } - if err := oprot.WriteListEnd(); err != nil { - return thrift.PrependError("error writing list end: ", err) - } - if err := oprot.WriteFieldEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 0:success: ", p), err) - } - } - return err -} - -func (p *THBaseServiceGetScannerResultsResult) writeField1(oprot thrift.TProtocol) (err error) { - if p.IsSetIo() { - if err := oprot.WriteFieldBegin("io", thrift.STRUCT, 1); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:io: ", p), err) - } - if err := p.Io.Write(oprot); err != nil { - return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.Io), err) - } - if err := oprot.WriteFieldEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 1:io: ", p), err) - } - } - return err -} - -func (p *THBaseServiceGetScannerResultsResult) String() string { - if p == nil { - return "" - } - return fmt.Sprintf("THBaseServiceGetScannerResultsResult(%+v)", *p) -} diff --git a/directory/hbase/hbasethrift/ttypes.go b/directory/hbase/hbasethrift/ttypes.go deleted file mode 100644 index eaf6bec..0000000 --- a/directory/hbase/hbasethrift/ttypes.go +++ /dev/null @@ -1,4315 +0,0 @@ -// Autogenerated by Thrift Compiler (0.9.3) -// DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING - -package hbasethrift - -import ( - "bytes" - "fmt" - "git.apache.org/thrift.git/lib/go/thrift" -) - -// (needed to ensure safety because of naive import list construction.) -var _ = thrift.ZERO -var _ = fmt.Printf -var _ = bytes.Equal - -var GoUnusedProtection__ int - -//Specify type of delete: -// - DELETE_COLUMN means exactly one version will be removed, -// - DELETE_COLUMNS means previous versions will also be removed. -type TDeleteType int64 - -const ( - TDeleteType_DELETE_COLUMN TDeleteType = 0 - TDeleteType_DELETE_COLUMNS TDeleteType = 1 -) - -func (p TDeleteType) String() string { - switch p { - case TDeleteType_DELETE_COLUMN: - return "DELETE_COLUMN" - case TDeleteType_DELETE_COLUMNS: - return "DELETE_COLUMNS" - } - return "" -} - -func TDeleteTypeFromString(s string) (TDeleteType, error) { - switch s { - case "DELETE_COLUMN": - return TDeleteType_DELETE_COLUMN, nil - case "DELETE_COLUMNS": - return TDeleteType_DELETE_COLUMNS, nil - } - return TDeleteType(0), fmt.Errorf("not a valid TDeleteType string") -} - -func TDeleteTypePtr(v TDeleteType) *TDeleteType { return &v } - -func (p TDeleteType) MarshalText() ([]byte, error) { - return []byte(p.String()), nil -} - -func (p *TDeleteType) UnmarshalText(text []byte) error { - q, err := TDeleteTypeFromString(string(text)) - if err != nil { - return err - } - *p = q - return nil -} - -//Specify Durability: -// - SKIP_WAL means do not write the Mutation to the WAL. -// - ASYNC_WAL means write the Mutation to the WAL asynchronously, -// - SYNC_WAL means write the Mutation to the WAL synchronously, -// - FSYNC_WAL means Write the Mutation to the WAL synchronously and force the entries to disk. -type TDurability int64 - -const ( - TDurability_SKIP_WAL TDurability = 1 - TDurability_ASYNC_WAL TDurability = 2 - TDurability_SYNC_WAL TDurability = 3 - TDurability_FSYNC_WAL TDurability = 4 -) - -func (p TDurability) String() string { - switch p { - case TDurability_SKIP_WAL: - return "SKIP_WAL" - case TDurability_ASYNC_WAL: - return "ASYNC_WAL" - case TDurability_SYNC_WAL: - return "SYNC_WAL" - case TDurability_FSYNC_WAL: - return "FSYNC_WAL" - } - return "" -} - -func TDurabilityFromString(s string) (TDurability, error) { - switch s { - case "SKIP_WAL": - return TDurability_SKIP_WAL, nil - case "ASYNC_WAL": - return TDurability_ASYNC_WAL, nil - case "SYNC_WAL": - return TDurability_SYNC_WAL, nil - case "FSYNC_WAL": - return TDurability_FSYNC_WAL, nil - } - return TDurability(0), fmt.Errorf("not a valid TDurability string") -} - -func TDurabilityPtr(v TDurability) *TDurability { return &v } - -func (p TDurability) MarshalText() ([]byte, error) { - return []byte(p.String()), nil -} - -func (p *TDurability) UnmarshalText(text []byte) error { - q, err := TDurabilityFromString(string(text)) - if err != nil { - return err - } - *p = q - return nil -} - -// Attributes: -// - MinStamp -// - MaxStamp -type TTimeRange struct { - MinStamp int64 `thrift:"minStamp,1,required" json:"minStamp"` - MaxStamp int64 `thrift:"maxStamp,2,required" json:"maxStamp"` -} - -func NewTTimeRange() *TTimeRange { - return &TTimeRange{} -} - -func (p *TTimeRange) GetMinStamp() int64 { - return p.MinStamp -} - -func (p *TTimeRange) GetMaxStamp() int64 { - return p.MaxStamp -} -func (p *TTimeRange) Read(iprot thrift.TProtocol) error { - if _, err := iprot.ReadStructBegin(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) - } - - var issetMinStamp bool = false - var issetMaxStamp bool = false - - for { - _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin() - if err != nil { - return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) - } - if fieldTypeId == thrift.STOP { - break - } - switch fieldId { - case 1: - if err := p.readField1(iprot); err != nil { - return err - } - issetMinStamp = true - case 2: - if err := p.readField2(iprot); err != nil { - return err - } - issetMaxStamp = true - default: - if err := iprot.Skip(fieldTypeId); err != nil { - return err - } - } - if err := iprot.ReadFieldEnd(); err != nil { - return err - } - } - if err := iprot.ReadStructEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) - } - if !issetMinStamp { - return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field MinStamp is not set")) - } - if !issetMaxStamp { - return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field MaxStamp is not set")) - } - return nil -} - -func (p *TTimeRange) readField1(iprot thrift.TProtocol) error { - if v, err := iprot.ReadI64(); err != nil { - return thrift.PrependError("error reading field 1: ", err) - } else { - p.MinStamp = v - } - return nil -} - -func (p *TTimeRange) readField2(iprot thrift.TProtocol) error { - if v, err := iprot.ReadI64(); err != nil { - return thrift.PrependError("error reading field 2: ", err) - } else { - p.MaxStamp = v - } - return nil -} - -func (p *TTimeRange) Write(oprot thrift.TProtocol) error { - if err := oprot.WriteStructBegin("TTimeRange"); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) - } - if err := p.writeField1(oprot); err != nil { - return err - } - if err := p.writeField2(oprot); err != nil { - return err - } - if err := oprot.WriteFieldStop(); err != nil { - return thrift.PrependError("write field stop error: ", err) - } - if err := oprot.WriteStructEnd(); err != nil { - return thrift.PrependError("write struct stop error: ", err) - } - return nil -} - -func (p *TTimeRange) writeField1(oprot thrift.TProtocol) (err error) { - if err := oprot.WriteFieldBegin("minStamp", thrift.I64, 1); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:minStamp: ", p), err) - } - if err := oprot.WriteI64(int64(p.MinStamp)); err != nil { - return thrift.PrependError(fmt.Sprintf("%T.minStamp (1) field write error: ", p), err) - } - if err := oprot.WriteFieldEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 1:minStamp: ", p), err) - } - return err -} - -func (p *TTimeRange) writeField2(oprot thrift.TProtocol) (err error) { - if err := oprot.WriteFieldBegin("maxStamp", thrift.I64, 2); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 2:maxStamp: ", p), err) - } - if err := oprot.WriteI64(int64(p.MaxStamp)); err != nil { - return thrift.PrependError(fmt.Sprintf("%T.maxStamp (2) field write error: ", p), err) - } - if err := oprot.WriteFieldEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 2:maxStamp: ", p), err) - } - return err -} - -func (p *TTimeRange) String() string { - if p == nil { - return "" - } - return fmt.Sprintf("TTimeRange(%+v)", *p) -} - -// Addresses a single cell or multiple cells -// in a HBase table by column family and optionally -// a column qualifier and timestamp -// -// Attributes: -// - Family -// - Qualifier -// - Timestamp -type TColumn struct { - Family []byte `thrift:"family,1,required" json:"family"` - Qualifier []byte `thrift:"qualifier,2" json:"qualifier,omitempty"` - Timestamp *int64 `thrift:"timestamp,3" json:"timestamp,omitempty"` -} - -func NewTColumn() *TColumn { - return &TColumn{} -} - -func (p *TColumn) GetFamily() []byte { - return p.Family -} - -var TColumn_Qualifier_DEFAULT []byte - -func (p *TColumn) GetQualifier() []byte { - return p.Qualifier -} - -var TColumn_Timestamp_DEFAULT int64 - -func (p *TColumn) GetTimestamp() int64 { - if !p.IsSetTimestamp() { - return TColumn_Timestamp_DEFAULT - } - return *p.Timestamp -} -func (p *TColumn) IsSetQualifier() bool { - return p.Qualifier != nil -} - -func (p *TColumn) IsSetTimestamp() bool { - return p.Timestamp != nil -} - -func (p *TColumn) Read(iprot thrift.TProtocol) error { - if _, err := iprot.ReadStructBegin(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) - } - - var issetFamily bool = false - - for { - _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin() - if err != nil { - return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) - } - if fieldTypeId == thrift.STOP { - break - } - switch fieldId { - case 1: - if err := p.readField1(iprot); err != nil { - return err - } - issetFamily = true - case 2: - if err := p.readField2(iprot); err != nil { - return err - } - case 3: - if err := p.readField3(iprot); err != nil { - return err - } - default: - if err := iprot.Skip(fieldTypeId); err != nil { - return err - } - } - if err := iprot.ReadFieldEnd(); err != nil { - return err - } - } - if err := iprot.ReadStructEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) - } - if !issetFamily { - return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field Family is not set")) - } - return nil -} - -func (p *TColumn) readField1(iprot thrift.TProtocol) error { - if v, err := iprot.ReadBinary(); err != nil { - return thrift.PrependError("error reading field 1: ", err) - } else { - p.Family = v - } - return nil -} - -func (p *TColumn) readField2(iprot thrift.TProtocol) error { - if v, err := iprot.ReadBinary(); err != nil { - return thrift.PrependError("error reading field 2: ", err) - } else { - p.Qualifier = v - } - return nil -} - -func (p *TColumn) readField3(iprot thrift.TProtocol) error { - if v, err := iprot.ReadI64(); err != nil { - return thrift.PrependError("error reading field 3: ", err) - } else { - p.Timestamp = &v - } - return nil -} - -func (p *TColumn) Write(oprot thrift.TProtocol) error { - if err := oprot.WriteStructBegin("TColumn"); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) - } - if err := p.writeField1(oprot); err != nil { - return err - } - if err := p.writeField2(oprot); err != nil { - return err - } - if err := p.writeField3(oprot); err != nil { - return err - } - if err := oprot.WriteFieldStop(); err != nil { - return thrift.PrependError("write field stop error: ", err) - } - if err := oprot.WriteStructEnd(); err != nil { - return thrift.PrependError("write struct stop error: ", err) - } - return nil -} - -func (p *TColumn) writeField1(oprot thrift.TProtocol) (err error) { - if err := oprot.WriteFieldBegin("family", thrift.STRING, 1); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:family: ", p), err) - } - if err := oprot.WriteBinary(p.Family); err != nil { - return thrift.PrependError(fmt.Sprintf("%T.family (1) field write error: ", p), err) - } - if err := oprot.WriteFieldEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 1:family: ", p), err) - } - return err -} - -func (p *TColumn) writeField2(oprot thrift.TProtocol) (err error) { - if p.IsSetQualifier() { - if err := oprot.WriteFieldBegin("qualifier", thrift.STRING, 2); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 2:qualifier: ", p), err) - } - if err := oprot.WriteBinary(p.Qualifier); err != nil { - return thrift.PrependError(fmt.Sprintf("%T.qualifier (2) field write error: ", p), err) - } - if err := oprot.WriteFieldEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 2:qualifier: ", p), err) - } - } - return err -} - -func (p *TColumn) writeField3(oprot thrift.TProtocol) (err error) { - if p.IsSetTimestamp() { - if err := oprot.WriteFieldBegin("timestamp", thrift.I64, 3); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 3:timestamp: ", p), err) - } - if err := oprot.WriteI64(int64(*p.Timestamp)); err != nil { - return thrift.PrependError(fmt.Sprintf("%T.timestamp (3) field write error: ", p), err) - } - if err := oprot.WriteFieldEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 3:timestamp: ", p), err) - } - } - return err -} - -func (p *TColumn) String() string { - if p == nil { - return "" - } - return fmt.Sprintf("TColumn(%+v)", *p) -} - -// Represents a single cell and its value. -// -// Attributes: -// - Family -// - Qualifier -// - Value -// - Timestamp -// - Tags -type TColumnValue struct { - Family []byte `thrift:"family,1,required" json:"family"` - Qualifier []byte `thrift:"qualifier,2,required" json:"qualifier"` - Value []byte `thrift:"value,3,required" json:"value"` - Timestamp *int64 `thrift:"timestamp,4" json:"timestamp,omitempty"` - Tags []byte `thrift:"tags,5" json:"tags,omitempty"` -} - -func NewTColumnValue() *TColumnValue { - return &TColumnValue{} -} - -func (p *TColumnValue) GetFamily() []byte { - return p.Family -} - -func (p *TColumnValue) GetQualifier() []byte { - return p.Qualifier -} - -func (p *TColumnValue) GetValue() []byte { - return p.Value -} - -var TColumnValue_Timestamp_DEFAULT int64 - -func (p *TColumnValue) GetTimestamp() int64 { - if !p.IsSetTimestamp() { - return TColumnValue_Timestamp_DEFAULT - } - return *p.Timestamp -} - -var TColumnValue_Tags_DEFAULT []byte - -func (p *TColumnValue) GetTags() []byte { - return p.Tags -} -func (p *TColumnValue) IsSetTimestamp() bool { - return p.Timestamp != nil -} - -func (p *TColumnValue) IsSetTags() bool { - return p.Tags != nil -} - -func (p *TColumnValue) Read(iprot thrift.TProtocol) error { - if _, err := iprot.ReadStructBegin(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) - } - - var issetFamily bool = false - var issetQualifier bool = false - var issetValue bool = false - - for { - _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin() - if err != nil { - return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) - } - if fieldTypeId == thrift.STOP { - break - } - switch fieldId { - case 1: - if err := p.readField1(iprot); err != nil { - return err - } - issetFamily = true - case 2: - if err := p.readField2(iprot); err != nil { - return err - } - issetQualifier = true - case 3: - if err := p.readField3(iprot); err != nil { - return err - } - issetValue = true - case 4: - if err := p.readField4(iprot); err != nil { - return err - } - case 5: - if err := p.readField5(iprot); err != nil { - return err - } - default: - if err := iprot.Skip(fieldTypeId); err != nil { - return err - } - } - if err := iprot.ReadFieldEnd(); err != nil { - return err - } - } - if err := iprot.ReadStructEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) - } - if !issetFamily { - return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field Family is not set")) - } - if !issetQualifier { - return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field Qualifier is not set")) - } - if !issetValue { - return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field Value is not set")) - } - return nil -} - -func (p *TColumnValue) readField1(iprot thrift.TProtocol) error { - if v, err := iprot.ReadBinary(); err != nil { - return thrift.PrependError("error reading field 1: ", err) - } else { - p.Family = v - } - return nil -} - -func (p *TColumnValue) readField2(iprot thrift.TProtocol) error { - if v, err := iprot.ReadBinary(); err != nil { - return thrift.PrependError("error reading field 2: ", err) - } else { - p.Qualifier = v - } - return nil -} - -func (p *TColumnValue) readField3(iprot thrift.TProtocol) error { - if v, err := iprot.ReadBinary(); err != nil { - return thrift.PrependError("error reading field 3: ", err) - } else { - p.Value = v - } - return nil -} - -func (p *TColumnValue) readField4(iprot thrift.TProtocol) error { - if v, err := iprot.ReadI64(); err != nil { - return thrift.PrependError("error reading field 4: ", err) - } else { - p.Timestamp = &v - } - return nil -} - -func (p *TColumnValue) readField5(iprot thrift.TProtocol) error { - if v, err := iprot.ReadBinary(); err != nil { - return thrift.PrependError("error reading field 5: ", err) - } else { - p.Tags = v - } - return nil -} - -func (p *TColumnValue) Write(oprot thrift.TProtocol) error { - if err := oprot.WriteStructBegin("TColumnValue"); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) - } - if err := p.writeField1(oprot); err != nil { - return err - } - if err := p.writeField2(oprot); err != nil { - return err - } - if err := p.writeField3(oprot); err != nil { - return err - } - if err := p.writeField4(oprot); err != nil { - return err - } - if err := p.writeField5(oprot); err != nil { - return err - } - if err := oprot.WriteFieldStop(); err != nil { - return thrift.PrependError("write field stop error: ", err) - } - if err := oprot.WriteStructEnd(); err != nil { - return thrift.PrependError("write struct stop error: ", err) - } - return nil -} - -func (p *TColumnValue) writeField1(oprot thrift.TProtocol) (err error) { - if err := oprot.WriteFieldBegin("family", thrift.STRING, 1); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:family: ", p), err) - } - if err := oprot.WriteBinary(p.Family); err != nil { - return thrift.PrependError(fmt.Sprintf("%T.family (1) field write error: ", p), err) - } - if err := oprot.WriteFieldEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 1:family: ", p), err) - } - return err -} - -func (p *TColumnValue) writeField2(oprot thrift.TProtocol) (err error) { - if err := oprot.WriteFieldBegin("qualifier", thrift.STRING, 2); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 2:qualifier: ", p), err) - } - if err := oprot.WriteBinary(p.Qualifier); err != nil { - return thrift.PrependError(fmt.Sprintf("%T.qualifier (2) field write error: ", p), err) - } - if err := oprot.WriteFieldEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 2:qualifier: ", p), err) - } - return err -} - -func (p *TColumnValue) writeField3(oprot thrift.TProtocol) (err error) { - if err := oprot.WriteFieldBegin("value", thrift.STRING, 3); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 3:value: ", p), err) - } - if err := oprot.WriteBinary(p.Value); err != nil { - return thrift.PrependError(fmt.Sprintf("%T.value (3) field write error: ", p), err) - } - if err := oprot.WriteFieldEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 3:value: ", p), err) - } - return err -} - -func (p *TColumnValue) writeField4(oprot thrift.TProtocol) (err error) { - if p.IsSetTimestamp() { - if err := oprot.WriteFieldBegin("timestamp", thrift.I64, 4); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 4:timestamp: ", p), err) - } - if err := oprot.WriteI64(int64(*p.Timestamp)); err != nil { - return thrift.PrependError(fmt.Sprintf("%T.timestamp (4) field write error: ", p), err) - } - if err := oprot.WriteFieldEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 4:timestamp: ", p), err) - } - } - return err -} - -func (p *TColumnValue) writeField5(oprot thrift.TProtocol) (err error) { - if p.IsSetTags() { - if err := oprot.WriteFieldBegin("tags", thrift.STRING, 5); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 5:tags: ", p), err) - } - if err := oprot.WriteBinary(p.Tags); err != nil { - return thrift.PrependError(fmt.Sprintf("%T.tags (5) field write error: ", p), err) - } - if err := oprot.WriteFieldEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 5:tags: ", p), err) - } - } - return err -} - -func (p *TColumnValue) String() string { - if p == nil { - return "" - } - return fmt.Sprintf("TColumnValue(%+v)", *p) -} - -// Represents a single cell and the amount to increment it by -// -// Attributes: -// - Family -// - Qualifier -// - Amount -type TColumnIncrement struct { - Family []byte `thrift:"family,1,required" json:"family"` - Qualifier []byte `thrift:"qualifier,2,required" json:"qualifier"` - Amount int64 `thrift:"amount,3" json:"amount,omitempty"` -} - -func NewTColumnIncrement() *TColumnIncrement { - return &TColumnIncrement{ - Amount: 1, - } -} - -func (p *TColumnIncrement) GetFamily() []byte { - return p.Family -} - -func (p *TColumnIncrement) GetQualifier() []byte { - return p.Qualifier -} - -var TColumnIncrement_Amount_DEFAULT int64 = 1 - -func (p *TColumnIncrement) GetAmount() int64 { - return p.Amount -} -func (p *TColumnIncrement) IsSetAmount() bool { - return p.Amount != TColumnIncrement_Amount_DEFAULT -} - -func (p *TColumnIncrement) Read(iprot thrift.TProtocol) error { - if _, err := iprot.ReadStructBegin(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) - } - - var issetFamily bool = false - var issetQualifier bool = false - - for { - _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin() - if err != nil { - return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) - } - if fieldTypeId == thrift.STOP { - break - } - switch fieldId { - case 1: - if err := p.readField1(iprot); err != nil { - return err - } - issetFamily = true - case 2: - if err := p.readField2(iprot); err != nil { - return err - } - issetQualifier = true - case 3: - if err := p.readField3(iprot); err != nil { - return err - } - default: - if err := iprot.Skip(fieldTypeId); err != nil { - return err - } - } - if err := iprot.ReadFieldEnd(); err != nil { - return err - } - } - if err := iprot.ReadStructEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) - } - if !issetFamily { - return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field Family is not set")) - } - if !issetQualifier { - return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field Qualifier is not set")) - } - return nil -} - -func (p *TColumnIncrement) readField1(iprot thrift.TProtocol) error { - if v, err := iprot.ReadBinary(); err != nil { - return thrift.PrependError("error reading field 1: ", err) - } else { - p.Family = v - } - return nil -} - -func (p *TColumnIncrement) readField2(iprot thrift.TProtocol) error { - if v, err := iprot.ReadBinary(); err != nil { - return thrift.PrependError("error reading field 2: ", err) - } else { - p.Qualifier = v - } - return nil -} - -func (p *TColumnIncrement) readField3(iprot thrift.TProtocol) error { - if v, err := iprot.ReadI64(); err != nil { - return thrift.PrependError("error reading field 3: ", err) - } else { - p.Amount = v - } - return nil -} - -func (p *TColumnIncrement) Write(oprot thrift.TProtocol) error { - if err := oprot.WriteStructBegin("TColumnIncrement"); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) - } - if err := p.writeField1(oprot); err != nil { - return err - } - if err := p.writeField2(oprot); err != nil { - return err - } - if err := p.writeField3(oprot); err != nil { - return err - } - if err := oprot.WriteFieldStop(); err != nil { - return thrift.PrependError("write field stop error: ", err) - } - if err := oprot.WriteStructEnd(); err != nil { - return thrift.PrependError("write struct stop error: ", err) - } - return nil -} - -func (p *TColumnIncrement) writeField1(oprot thrift.TProtocol) (err error) { - if err := oprot.WriteFieldBegin("family", thrift.STRING, 1); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:family: ", p), err) - } - if err := oprot.WriteBinary(p.Family); err != nil { - return thrift.PrependError(fmt.Sprintf("%T.family (1) field write error: ", p), err) - } - if err := oprot.WriteFieldEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 1:family: ", p), err) - } - return err -} - -func (p *TColumnIncrement) writeField2(oprot thrift.TProtocol) (err error) { - if err := oprot.WriteFieldBegin("qualifier", thrift.STRING, 2); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 2:qualifier: ", p), err) - } - if err := oprot.WriteBinary(p.Qualifier); err != nil { - return thrift.PrependError(fmt.Sprintf("%T.qualifier (2) field write error: ", p), err) - } - if err := oprot.WriteFieldEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 2:qualifier: ", p), err) - } - return err -} - -func (p *TColumnIncrement) writeField3(oprot thrift.TProtocol) (err error) { - if p.IsSetAmount() { - if err := oprot.WriteFieldBegin("amount", thrift.I64, 3); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 3:amount: ", p), err) - } - if err := oprot.WriteI64(int64(p.Amount)); err != nil { - return thrift.PrependError(fmt.Sprintf("%T.amount (3) field write error: ", p), err) - } - if err := oprot.WriteFieldEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 3:amount: ", p), err) - } - } - return err -} - -func (p *TColumnIncrement) String() string { - if p == nil { - return "" - } - return fmt.Sprintf("TColumnIncrement(%+v)", *p) -} - -// if no Result is found, row and columnValues will not be set. -// -// Attributes: -// - Row -// - ColumnValues -type TResult_ struct { - Row []byte `thrift:"row,1" json:"row,omitempty"` - ColumnValues []*TColumnValue `thrift:"columnValues,2,required" json:"columnValues"` -} - -func NewTResult_() *TResult_ { - return &TResult_{} -} - -var TResult__Row_DEFAULT []byte - -func (p *TResult_) GetRow() []byte { - return p.Row -} - -func (p *TResult_) GetColumnValues() []*TColumnValue { - return p.ColumnValues -} -func (p *TResult_) IsSetRow() bool { - return p.Row != nil -} - -func (p *TResult_) Read(iprot thrift.TProtocol) error { - if _, err := iprot.ReadStructBegin(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) - } - - var issetColumnValues bool = false - - for { - _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin() - if err != nil { - return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) - } - if fieldTypeId == thrift.STOP { - break - } - switch fieldId { - case 1: - if err := p.readField1(iprot); err != nil { - return err - } - case 2: - if err := p.readField2(iprot); err != nil { - return err - } - issetColumnValues = true - default: - if err := iprot.Skip(fieldTypeId); err != nil { - return err - } - } - if err := iprot.ReadFieldEnd(); err != nil { - return err - } - } - if err := iprot.ReadStructEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) - } - if !issetColumnValues { - return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field ColumnValues is not set")) - } - return nil -} - -func (p *TResult_) readField1(iprot thrift.TProtocol) error { - if v, err := iprot.ReadBinary(); err != nil { - return thrift.PrependError("error reading field 1: ", err) - } else { - p.Row = v - } - return nil -} - -func (p *TResult_) readField2(iprot thrift.TProtocol) error { - _, size, err := iprot.ReadListBegin() - if err != nil { - return thrift.PrependError("error reading list begin: ", err) - } - tSlice := make([]*TColumnValue, 0, size) - p.ColumnValues = tSlice - for i := 0; i < size; i++ { - _elem0 := &TColumnValue{} - if err := _elem0.Read(iprot); err != nil { - return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", _elem0), err) - } - p.ColumnValues = append(p.ColumnValues, _elem0) - } - if err := iprot.ReadListEnd(); err != nil { - return thrift.PrependError("error reading list end: ", err) - } - return nil -} - -func (p *TResult_) Write(oprot thrift.TProtocol) error { - if err := oprot.WriteStructBegin("TResult"); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) - } - if err := p.writeField1(oprot); err != nil { - return err - } - if err := p.writeField2(oprot); err != nil { - return err - } - if err := oprot.WriteFieldStop(); err != nil { - return thrift.PrependError("write field stop error: ", err) - } - if err := oprot.WriteStructEnd(); err != nil { - return thrift.PrependError("write struct stop error: ", err) - } - return nil -} - -func (p *TResult_) writeField1(oprot thrift.TProtocol) (err error) { - if p.IsSetRow() { - if err := oprot.WriteFieldBegin("row", thrift.STRING, 1); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:row: ", p), err) - } - if err := oprot.WriteBinary(p.Row); err != nil { - return thrift.PrependError(fmt.Sprintf("%T.row (1) field write error: ", p), err) - } - if err := oprot.WriteFieldEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 1:row: ", p), err) - } - } - return err -} - -func (p *TResult_) writeField2(oprot thrift.TProtocol) (err error) { - if err := oprot.WriteFieldBegin("columnValues", thrift.LIST, 2); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 2:columnValues: ", p), err) - } - if err := oprot.WriteListBegin(thrift.STRUCT, len(p.ColumnValues)); err != nil { - return thrift.PrependError("error writing list begin: ", err) - } - for _, v := range p.ColumnValues { - if err := v.Write(oprot); err != nil { - return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", v), err) - } - } - if err := oprot.WriteListEnd(); err != nil { - return thrift.PrependError("error writing list end: ", err) - } - if err := oprot.WriteFieldEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 2:columnValues: ", p), err) - } - return err -} - -func (p *TResult_) String() string { - if p == nil { - return "" - } - return fmt.Sprintf("TResult_(%+v)", *p) -} - -// Attributes: -// - Labels -type TAuthorization struct { - Labels []string `thrift:"labels,1" json:"labels,omitempty"` -} - -func NewTAuthorization() *TAuthorization { - return &TAuthorization{} -} - -var TAuthorization_Labels_DEFAULT []string - -func (p *TAuthorization) GetLabels() []string { - return p.Labels -} -func (p *TAuthorization) IsSetLabels() bool { - return p.Labels != nil -} - -func (p *TAuthorization) Read(iprot thrift.TProtocol) error { - if _, err := iprot.ReadStructBegin(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) - } - - for { - _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin() - if err != nil { - return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) - } - if fieldTypeId == thrift.STOP { - break - } - switch fieldId { - case 1: - if err := p.readField1(iprot); err != nil { - return err - } - default: - if err := iprot.Skip(fieldTypeId); err != nil { - return err - } - } - if err := iprot.ReadFieldEnd(); err != nil { - return err - } - } - if err := iprot.ReadStructEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) - } - return nil -} - -func (p *TAuthorization) readField1(iprot thrift.TProtocol) error { - _, size, err := iprot.ReadListBegin() - if err != nil { - return thrift.PrependError("error reading list begin: ", err) - } - tSlice := make([]string, 0, size) - p.Labels = tSlice - for i := 0; i < size; i++ { - var _elem1 string - if v, err := iprot.ReadString(); err != nil { - return thrift.PrependError("error reading field 0: ", err) - } else { - _elem1 = v - } - p.Labels = append(p.Labels, _elem1) - } - if err := iprot.ReadListEnd(); err != nil { - return thrift.PrependError("error reading list end: ", err) - } - return nil -} - -func (p *TAuthorization) Write(oprot thrift.TProtocol) error { - if err := oprot.WriteStructBegin("TAuthorization"); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) - } - if err := p.writeField1(oprot); err != nil { - return err - } - if err := oprot.WriteFieldStop(); err != nil { - return thrift.PrependError("write field stop error: ", err) - } - if err := oprot.WriteStructEnd(); err != nil { - return thrift.PrependError("write struct stop error: ", err) - } - return nil -} - -func (p *TAuthorization) writeField1(oprot thrift.TProtocol) (err error) { - if p.IsSetLabels() { - if err := oprot.WriteFieldBegin("labels", thrift.LIST, 1); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:labels: ", p), err) - } - if err := oprot.WriteListBegin(thrift.STRING, len(p.Labels)); err != nil { - return thrift.PrependError("error writing list begin: ", err) - } - for _, v := range p.Labels { - if err := oprot.WriteString(string(v)); err != nil { - return thrift.PrependError(fmt.Sprintf("%T. (0) field write error: ", p), err) - } - } - if err := oprot.WriteListEnd(); err != nil { - return thrift.PrependError("error writing list end: ", err) - } - if err := oprot.WriteFieldEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 1:labels: ", p), err) - } - } - return err -} - -func (p *TAuthorization) String() string { - if p == nil { - return "" - } - return fmt.Sprintf("TAuthorization(%+v)", *p) -} - -// Attributes: -// - Expression -type TCellVisibility struct { - Expression *string `thrift:"expression,1" json:"expression,omitempty"` -} - -func NewTCellVisibility() *TCellVisibility { - return &TCellVisibility{} -} - -var TCellVisibility_Expression_DEFAULT string - -func (p *TCellVisibility) GetExpression() string { - if !p.IsSetExpression() { - return TCellVisibility_Expression_DEFAULT - } - return *p.Expression -} -func (p *TCellVisibility) IsSetExpression() bool { - return p.Expression != nil -} - -func (p *TCellVisibility) Read(iprot thrift.TProtocol) error { - if _, err := iprot.ReadStructBegin(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) - } - - for { - _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin() - if err != nil { - return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) - } - if fieldTypeId == thrift.STOP { - break - } - switch fieldId { - case 1: - if err := p.readField1(iprot); err != nil { - return err - } - default: - if err := iprot.Skip(fieldTypeId); err != nil { - return err - } - } - if err := iprot.ReadFieldEnd(); err != nil { - return err - } - } - if err := iprot.ReadStructEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) - } - return nil -} - -func (p *TCellVisibility) readField1(iprot thrift.TProtocol) error { - if v, err := iprot.ReadString(); err != nil { - return thrift.PrependError("error reading field 1: ", err) - } else { - p.Expression = &v - } - return nil -} - -func (p *TCellVisibility) Write(oprot thrift.TProtocol) error { - if err := oprot.WriteStructBegin("TCellVisibility"); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) - } - if err := p.writeField1(oprot); err != nil { - return err - } - if err := oprot.WriteFieldStop(); err != nil { - return thrift.PrependError("write field stop error: ", err) - } - if err := oprot.WriteStructEnd(); err != nil { - return thrift.PrependError("write struct stop error: ", err) - } - return nil -} - -func (p *TCellVisibility) writeField1(oprot thrift.TProtocol) (err error) { - if p.IsSetExpression() { - if err := oprot.WriteFieldBegin("expression", thrift.STRING, 1); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:expression: ", p), err) - } - if err := oprot.WriteString(string(*p.Expression)); err != nil { - return thrift.PrependError(fmt.Sprintf("%T.expression (1) field write error: ", p), err) - } - if err := oprot.WriteFieldEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 1:expression: ", p), err) - } - } - return err -} - -func (p *TCellVisibility) String() string { - if p == nil { - return "" - } - return fmt.Sprintf("TCellVisibility(%+v)", *p) -} - -// Used to perform Get operations on a single row. -// -// The scope can be further narrowed down by specifying a list of -// columns or column families. -// -// To get everything for a row, instantiate a Get object with just the row to get. -// To further define the scope of what to get you can add a timestamp or time range -// with an optional maximum number of versions to return. -// -// If you specify a time range and a timestamp the range is ignored. -// Timestamps on TColumns are ignored. -// -// Attributes: -// - Row -// - Columns -// - Timestamp -// - TimeRange -// - MaxVersions -// - FilterString -// - Attributes -// - Authorizations -type TGet struct { - Row []byte `thrift:"row,1,required" json:"row"` - Columns []*TColumn `thrift:"columns,2" json:"columns,omitempty"` - Timestamp *int64 `thrift:"timestamp,3" json:"timestamp,omitempty"` - TimeRange *TTimeRange `thrift:"timeRange,4" json:"timeRange,omitempty"` - MaxVersions *int32 `thrift:"maxVersions,5" json:"maxVersions,omitempty"` - FilterString []byte `thrift:"filterString,6" json:"filterString,omitempty"` - Attributes map[string][]byte `thrift:"attributes,7" json:"attributes,omitempty"` - Authorizations *TAuthorization `thrift:"authorizations,8" json:"authorizations,omitempty"` -} - -func NewTGet() *TGet { - return &TGet{} -} - -func (p *TGet) GetRow() []byte { - return p.Row -} - -var TGet_Columns_DEFAULT []*TColumn - -func (p *TGet) GetColumns() []*TColumn { - return p.Columns -} - -var TGet_Timestamp_DEFAULT int64 - -func (p *TGet) GetTimestamp() int64 { - if !p.IsSetTimestamp() { - return TGet_Timestamp_DEFAULT - } - return *p.Timestamp -} - -var TGet_TimeRange_DEFAULT *TTimeRange - -func (p *TGet) GetTimeRange() *TTimeRange { - if !p.IsSetTimeRange() { - return TGet_TimeRange_DEFAULT - } - return p.TimeRange -} - -var TGet_MaxVersions_DEFAULT int32 - -func (p *TGet) GetMaxVersions() int32 { - if !p.IsSetMaxVersions() { - return TGet_MaxVersions_DEFAULT - } - return *p.MaxVersions -} - -var TGet_FilterString_DEFAULT []byte - -func (p *TGet) GetFilterString() []byte { - return p.FilterString -} - -var TGet_Attributes_DEFAULT map[string][]byte - -func (p *TGet) GetAttributes() map[string][]byte { - return p.Attributes -} - -var TGet_Authorizations_DEFAULT *TAuthorization - -func (p *TGet) GetAuthorizations() *TAuthorization { - if !p.IsSetAuthorizations() { - return TGet_Authorizations_DEFAULT - } - return p.Authorizations -} -func (p *TGet) IsSetColumns() bool { - return p.Columns != nil -} - -func (p *TGet) IsSetTimestamp() bool { - return p.Timestamp != nil -} - -func (p *TGet) IsSetTimeRange() bool { - return p.TimeRange != nil -} - -func (p *TGet) IsSetMaxVersions() bool { - return p.MaxVersions != nil -} - -func (p *TGet) IsSetFilterString() bool { - return p.FilterString != nil -} - -func (p *TGet) IsSetAttributes() bool { - return p.Attributes != nil -} - -func (p *TGet) IsSetAuthorizations() bool { - return p.Authorizations != nil -} - -func (p *TGet) Read(iprot thrift.TProtocol) error { - if _, err := iprot.ReadStructBegin(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) - } - - var issetRow bool = false - - for { - _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin() - if err != nil { - return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) - } - if fieldTypeId == thrift.STOP { - break - } - switch fieldId { - case 1: - if err := p.readField1(iprot); err != nil { - return err - } - issetRow = true - case 2: - if err := p.readField2(iprot); err != nil { - return err - } - case 3: - if err := p.readField3(iprot); err != nil { - return err - } - case 4: - if err := p.readField4(iprot); err != nil { - return err - } - case 5: - if err := p.readField5(iprot); err != nil { - return err - } - case 6: - if err := p.readField6(iprot); err != nil { - return err - } - case 7: - if err := p.readField7(iprot); err != nil { - return err - } - case 8: - if err := p.readField8(iprot); err != nil { - return err - } - default: - if err := iprot.Skip(fieldTypeId); err != nil { - return err - } - } - if err := iprot.ReadFieldEnd(); err != nil { - return err - } - } - if err := iprot.ReadStructEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) - } - if !issetRow { - return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field Row is not set")) - } - return nil -} - -func (p *TGet) readField1(iprot thrift.TProtocol) error { - if v, err := iprot.ReadBinary(); err != nil { - return thrift.PrependError("error reading field 1: ", err) - } else { - p.Row = v - } - return nil -} - -func (p *TGet) readField2(iprot thrift.TProtocol) error { - _, size, err := iprot.ReadListBegin() - if err != nil { - return thrift.PrependError("error reading list begin: ", err) - } - tSlice := make([]*TColumn, 0, size) - p.Columns = tSlice - for i := 0; i < size; i++ { - _elem2 := &TColumn{} - if err := _elem2.Read(iprot); err != nil { - return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", _elem2), err) - } - p.Columns = append(p.Columns, _elem2) - } - if err := iprot.ReadListEnd(); err != nil { - return thrift.PrependError("error reading list end: ", err) - } - return nil -} - -func (p *TGet) readField3(iprot thrift.TProtocol) error { - if v, err := iprot.ReadI64(); err != nil { - return thrift.PrependError("error reading field 3: ", err) - } else { - p.Timestamp = &v - } - return nil -} - -func (p *TGet) readField4(iprot thrift.TProtocol) error { - p.TimeRange = &TTimeRange{} - if err := p.TimeRange.Read(iprot); err != nil { - return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.TimeRange), err) - } - return nil -} - -func (p *TGet) readField5(iprot thrift.TProtocol) error { - if v, err := iprot.ReadI32(); err != nil { - return thrift.PrependError("error reading field 5: ", err) - } else { - p.MaxVersions = &v - } - return nil -} - -func (p *TGet) readField6(iprot thrift.TProtocol) error { - if v, err := iprot.ReadBinary(); err != nil { - return thrift.PrependError("error reading field 6: ", err) - } else { - p.FilterString = v - } - return nil -} - -func (p *TGet) readField7(iprot thrift.TProtocol) error { - _, _, size, err := iprot.ReadMapBegin() - if err != nil { - return thrift.PrependError("error reading map begin: ", err) - } - tMap := make(map[string][]byte, size) - p.Attributes = tMap - for i := 0; i < size; i++ { - var _key3 string - if v, err := iprot.ReadString(); err != nil { - return thrift.PrependError("error reading field 0: ", err) - } else { - _key3 = v - } - var _val4 []byte - if v, err := iprot.ReadBinary(); err != nil { - return thrift.PrependError("error reading field 0: ", err) - } else { - _val4 = v - } - p.Attributes[_key3] = _val4 - } - if err := iprot.ReadMapEnd(); err != nil { - return thrift.PrependError("error reading map end: ", err) - } - return nil -} - -func (p *TGet) readField8(iprot thrift.TProtocol) error { - p.Authorizations = &TAuthorization{} - if err := p.Authorizations.Read(iprot); err != nil { - return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.Authorizations), err) - } - return nil -} - -func (p *TGet) Write(oprot thrift.TProtocol) error { - if err := oprot.WriteStructBegin("TGet"); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) - } - if err := p.writeField1(oprot); err != nil { - return err - } - if err := p.writeField2(oprot); err != nil { - return err - } - if err := p.writeField3(oprot); err != nil { - return err - } - if err := p.writeField4(oprot); err != nil { - return err - } - if err := p.writeField5(oprot); err != nil { - return err - } - if err := p.writeField6(oprot); err != nil { - return err - } - if err := p.writeField7(oprot); err != nil { - return err - } - if err := p.writeField8(oprot); err != nil { - return err - } - if err := oprot.WriteFieldStop(); err != nil { - return thrift.PrependError("write field stop error: ", err) - } - if err := oprot.WriteStructEnd(); err != nil { - return thrift.PrependError("write struct stop error: ", err) - } - return nil -} - -func (p *TGet) writeField1(oprot thrift.TProtocol) (err error) { - if err := oprot.WriteFieldBegin("row", thrift.STRING, 1); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:row: ", p), err) - } - if err := oprot.WriteBinary(p.Row); err != nil { - return thrift.PrependError(fmt.Sprintf("%T.row (1) field write error: ", p), err) - } - if err := oprot.WriteFieldEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 1:row: ", p), err) - } - return err -} - -func (p *TGet) writeField2(oprot thrift.TProtocol) (err error) { - if p.IsSetColumns() { - if err := oprot.WriteFieldBegin("columns", thrift.LIST, 2); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 2:columns: ", p), err) - } - if err := oprot.WriteListBegin(thrift.STRUCT, len(p.Columns)); err != nil { - return thrift.PrependError("error writing list begin: ", err) - } - for _, v := range p.Columns { - if err := v.Write(oprot); err != nil { - return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", v), err) - } - } - if err := oprot.WriteListEnd(); err != nil { - return thrift.PrependError("error writing list end: ", err) - } - if err := oprot.WriteFieldEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 2:columns: ", p), err) - } - } - return err -} - -func (p *TGet) writeField3(oprot thrift.TProtocol) (err error) { - if p.IsSetTimestamp() { - if err := oprot.WriteFieldBegin("timestamp", thrift.I64, 3); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 3:timestamp: ", p), err) - } - if err := oprot.WriteI64(int64(*p.Timestamp)); err != nil { - return thrift.PrependError(fmt.Sprintf("%T.timestamp (3) field write error: ", p), err) - } - if err := oprot.WriteFieldEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 3:timestamp: ", p), err) - } - } - return err -} - -func (p *TGet) writeField4(oprot thrift.TProtocol) (err error) { - if p.IsSetTimeRange() { - if err := oprot.WriteFieldBegin("timeRange", thrift.STRUCT, 4); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 4:timeRange: ", p), err) - } - if err := p.TimeRange.Write(oprot); err != nil { - return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.TimeRange), err) - } - if err := oprot.WriteFieldEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 4:timeRange: ", p), err) - } - } - return err -} - -func (p *TGet) writeField5(oprot thrift.TProtocol) (err error) { - if p.IsSetMaxVersions() { - if err := oprot.WriteFieldBegin("maxVersions", thrift.I32, 5); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 5:maxVersions: ", p), err) - } - if err := oprot.WriteI32(int32(*p.MaxVersions)); err != nil { - return thrift.PrependError(fmt.Sprintf("%T.maxVersions (5) field write error: ", p), err) - } - if err := oprot.WriteFieldEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 5:maxVersions: ", p), err) - } - } - return err -} - -func (p *TGet) writeField6(oprot thrift.TProtocol) (err error) { - if p.IsSetFilterString() { - if err := oprot.WriteFieldBegin("filterString", thrift.STRING, 6); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 6:filterString: ", p), err) - } - if err := oprot.WriteBinary(p.FilterString); err != nil { - return thrift.PrependError(fmt.Sprintf("%T.filterString (6) field write error: ", p), err) - } - if err := oprot.WriteFieldEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 6:filterString: ", p), err) - } - } - return err -} - -func (p *TGet) writeField7(oprot thrift.TProtocol) (err error) { - if p.IsSetAttributes() { - if err := oprot.WriteFieldBegin("attributes", thrift.MAP, 7); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 7:attributes: ", p), err) - } - if err := oprot.WriteMapBegin(thrift.STRING, thrift.STRING, len(p.Attributes)); err != nil { - return thrift.PrependError("error writing map begin: ", err) - } - for k, v := range p.Attributes { - if err := oprot.WriteString(string(k)); err != nil { - return thrift.PrependError(fmt.Sprintf("%T. (0) field write error: ", p), err) - } - if err := oprot.WriteBinary(v); err != nil { - return thrift.PrependError(fmt.Sprintf("%T. (0) field write error: ", p), err) - } - } - if err := oprot.WriteMapEnd(); err != nil { - return thrift.PrependError("error writing map end: ", err) - } - if err := oprot.WriteFieldEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 7:attributes: ", p), err) - } - } - return err -} - -func (p *TGet) writeField8(oprot thrift.TProtocol) (err error) { - if p.IsSetAuthorizations() { - if err := oprot.WriteFieldBegin("authorizations", thrift.STRUCT, 8); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 8:authorizations: ", p), err) - } - if err := p.Authorizations.Write(oprot); err != nil { - return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.Authorizations), err) - } - if err := oprot.WriteFieldEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 8:authorizations: ", p), err) - } - } - return err -} - -func (p *TGet) String() string { - if p == nil { - return "" - } - return fmt.Sprintf("TGet(%+v)", *p) -} - -// Used to perform Put operations for a single row. -// -// Add column values to this object and they'll be added. -// You can provide a default timestamp if the column values -// don't have one. If you don't provide a default timestamp -// the current time is inserted. -// -// You can specify how this Put should be written to the write-ahead Log (WAL) -// by changing the durability. If you don't provide durability, it defaults to -// column family's default setting for durability. -// -// Attributes: -// - Row -// - ColumnValues -// - Timestamp -// - Attributes -// - Durability -// - CellVisibility -type TPut struct { - Row []byte `thrift:"row,1,required" json:"row"` - ColumnValues []*TColumnValue `thrift:"columnValues,2,required" json:"columnValues"` - Timestamp *int64 `thrift:"timestamp,3" json:"timestamp,omitempty"` - // unused field # 4 - Attributes map[string][]byte `thrift:"attributes,5" json:"attributes,omitempty"` - Durability *TDurability `thrift:"durability,6" json:"durability,omitempty"` - CellVisibility *TCellVisibility `thrift:"cellVisibility,7" json:"cellVisibility,omitempty"` -} - -func NewTPut() *TPut { - return &TPut{} -} - -func (p *TPut) GetRow() []byte { - return p.Row -} - -func (p *TPut) GetColumnValues() []*TColumnValue { - return p.ColumnValues -} - -var TPut_Timestamp_DEFAULT int64 - -func (p *TPut) GetTimestamp() int64 { - if !p.IsSetTimestamp() { - return TPut_Timestamp_DEFAULT - } - return *p.Timestamp -} - -var TPut_Attributes_DEFAULT map[string][]byte - -func (p *TPut) GetAttributes() map[string][]byte { - return p.Attributes -} - -var TPut_Durability_DEFAULT TDurability - -func (p *TPut) GetDurability() TDurability { - if !p.IsSetDurability() { - return TPut_Durability_DEFAULT - } - return *p.Durability -} - -var TPut_CellVisibility_DEFAULT *TCellVisibility - -func (p *TPut) GetCellVisibility() *TCellVisibility { - if !p.IsSetCellVisibility() { - return TPut_CellVisibility_DEFAULT - } - return p.CellVisibility -} -func (p *TPut) IsSetTimestamp() bool { - return p.Timestamp != nil -} - -func (p *TPut) IsSetAttributes() bool { - return p.Attributes != nil -} - -func (p *TPut) IsSetDurability() bool { - return p.Durability != nil -} - -func (p *TPut) IsSetCellVisibility() bool { - return p.CellVisibility != nil -} - -func (p *TPut) Read(iprot thrift.TProtocol) error { - if _, err := iprot.ReadStructBegin(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) - } - - var issetRow bool = false - var issetColumnValues bool = false - - for { - _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin() - if err != nil { - return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) - } - if fieldTypeId == thrift.STOP { - break - } - switch fieldId { - case 1: - if err := p.readField1(iprot); err != nil { - return err - } - issetRow = true - case 2: - if err := p.readField2(iprot); err != nil { - return err - } - issetColumnValues = true - case 3: - if err := p.readField3(iprot); err != nil { - return err - } - case 5: - if err := p.readField5(iprot); err != nil { - return err - } - case 6: - if err := p.readField6(iprot); err != nil { - return err - } - case 7: - if err := p.readField7(iprot); err != nil { - return err - } - default: - if err := iprot.Skip(fieldTypeId); err != nil { - return err - } - } - if err := iprot.ReadFieldEnd(); err != nil { - return err - } - } - if err := iprot.ReadStructEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) - } - if !issetRow { - return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field Row is not set")) - } - if !issetColumnValues { - return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field ColumnValues is not set")) - } - return nil -} - -func (p *TPut) readField1(iprot thrift.TProtocol) error { - if v, err := iprot.ReadBinary(); err != nil { - return thrift.PrependError("error reading field 1: ", err) - } else { - p.Row = v - } - return nil -} - -func (p *TPut) readField2(iprot thrift.TProtocol) error { - _, size, err := iprot.ReadListBegin() - if err != nil { - return thrift.PrependError("error reading list begin: ", err) - } - tSlice := make([]*TColumnValue, 0, size) - p.ColumnValues = tSlice - for i := 0; i < size; i++ { - _elem5 := &TColumnValue{} - if err := _elem5.Read(iprot); err != nil { - return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", _elem5), err) - } - p.ColumnValues = append(p.ColumnValues, _elem5) - } - if err := iprot.ReadListEnd(); err != nil { - return thrift.PrependError("error reading list end: ", err) - } - return nil -} - -func (p *TPut) readField3(iprot thrift.TProtocol) error { - if v, err := iprot.ReadI64(); err != nil { - return thrift.PrependError("error reading field 3: ", err) - } else { - p.Timestamp = &v - } - return nil -} - -func (p *TPut) readField5(iprot thrift.TProtocol) error { - _, _, size, err := iprot.ReadMapBegin() - if err != nil { - return thrift.PrependError("error reading map begin: ", err) - } - tMap := make(map[string][]byte, size) - p.Attributes = tMap - for i := 0; i < size; i++ { - var _key6 string - if v, err := iprot.ReadString(); err != nil { - return thrift.PrependError("error reading field 0: ", err) - } else { - _key6 = v - } - var _val7 []byte - if v, err := iprot.ReadBinary(); err != nil { - return thrift.PrependError("error reading field 0: ", err) - } else { - _val7 = v - } - p.Attributes[_key6] = _val7 - } - if err := iprot.ReadMapEnd(); err != nil { - return thrift.PrependError("error reading map end: ", err) - } - return nil -} - -func (p *TPut) readField6(iprot thrift.TProtocol) error { - if v, err := iprot.ReadI32(); err != nil { - return thrift.PrependError("error reading field 6: ", err) - } else { - temp := TDurability(v) - p.Durability = &temp - } - return nil -} - -func (p *TPut) readField7(iprot thrift.TProtocol) error { - p.CellVisibility = &TCellVisibility{} - if err := p.CellVisibility.Read(iprot); err != nil { - return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.CellVisibility), err) - } - return nil -} - -func (p *TPut) Write(oprot thrift.TProtocol) error { - if err := oprot.WriteStructBegin("TPut"); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) - } - if err := p.writeField1(oprot); err != nil { - return err - } - if err := p.writeField2(oprot); err != nil { - return err - } - if err := p.writeField3(oprot); err != nil { - return err - } - if err := p.writeField5(oprot); err != nil { - return err - } - if err := p.writeField6(oprot); err != nil { - return err - } - if err := p.writeField7(oprot); err != nil { - return err - } - if err := oprot.WriteFieldStop(); err != nil { - return thrift.PrependError("write field stop error: ", err) - } - if err := oprot.WriteStructEnd(); err != nil { - return thrift.PrependError("write struct stop error: ", err) - } - return nil -} - -func (p *TPut) writeField1(oprot thrift.TProtocol) (err error) { - if err := oprot.WriteFieldBegin("row", thrift.STRING, 1); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:row: ", p), err) - } - if err := oprot.WriteBinary(p.Row); err != nil { - return thrift.PrependError(fmt.Sprintf("%T.row (1) field write error: ", p), err) - } - if err := oprot.WriteFieldEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 1:row: ", p), err) - } - return err -} - -func (p *TPut) writeField2(oprot thrift.TProtocol) (err error) { - if err := oprot.WriteFieldBegin("columnValues", thrift.LIST, 2); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 2:columnValues: ", p), err) - } - if err := oprot.WriteListBegin(thrift.STRUCT, len(p.ColumnValues)); err != nil { - return thrift.PrependError("error writing list begin: ", err) - } - for _, v := range p.ColumnValues { - if err := v.Write(oprot); err != nil { - return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", v), err) - } - } - if err := oprot.WriteListEnd(); err != nil { - return thrift.PrependError("error writing list end: ", err) - } - if err := oprot.WriteFieldEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 2:columnValues: ", p), err) - } - return err -} - -func (p *TPut) writeField3(oprot thrift.TProtocol) (err error) { - if p.IsSetTimestamp() { - if err := oprot.WriteFieldBegin("timestamp", thrift.I64, 3); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 3:timestamp: ", p), err) - } - if err := oprot.WriteI64(int64(*p.Timestamp)); err != nil { - return thrift.PrependError(fmt.Sprintf("%T.timestamp (3) field write error: ", p), err) - } - if err := oprot.WriteFieldEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 3:timestamp: ", p), err) - } - } - return err -} - -func (p *TPut) writeField5(oprot thrift.TProtocol) (err error) { - if p.IsSetAttributes() { - if err := oprot.WriteFieldBegin("attributes", thrift.MAP, 5); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 5:attributes: ", p), err) - } - if err := oprot.WriteMapBegin(thrift.STRING, thrift.STRING, len(p.Attributes)); err != nil { - return thrift.PrependError("error writing map begin: ", err) - } - for k, v := range p.Attributes { - if err := oprot.WriteString(string(k)); err != nil { - return thrift.PrependError(fmt.Sprintf("%T. (0) field write error: ", p), err) - } - if err := oprot.WriteBinary(v); err != nil { - return thrift.PrependError(fmt.Sprintf("%T. (0) field write error: ", p), err) - } - } - if err := oprot.WriteMapEnd(); err != nil { - return thrift.PrependError("error writing map end: ", err) - } - if err := oprot.WriteFieldEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 5:attributes: ", p), err) - } - } - return err -} - -func (p *TPut) writeField6(oprot thrift.TProtocol) (err error) { - if p.IsSetDurability() { - if err := oprot.WriteFieldBegin("durability", thrift.I32, 6); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 6:durability: ", p), err) - } - if err := oprot.WriteI32(int32(*p.Durability)); err != nil { - return thrift.PrependError(fmt.Sprintf("%T.durability (6) field write error: ", p), err) - } - if err := oprot.WriteFieldEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 6:durability: ", p), err) - } - } - return err -} - -func (p *TPut) writeField7(oprot thrift.TProtocol) (err error) { - if p.IsSetCellVisibility() { - if err := oprot.WriteFieldBegin("cellVisibility", thrift.STRUCT, 7); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 7:cellVisibility: ", p), err) - } - if err := p.CellVisibility.Write(oprot); err != nil { - return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.CellVisibility), err) - } - if err := oprot.WriteFieldEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 7:cellVisibility: ", p), err) - } - } - return err -} - -func (p *TPut) String() string { - if p == nil { - return "" - } - return fmt.Sprintf("TPut(%+v)", *p) -} - -// Used to perform Delete operations on a single row. -// -// The scope can be further narrowed down by specifying a list of -// columns or column families as TColumns. -// -// Specifying only a family in a TColumn will delete the whole family. -// If a timestamp is specified all versions with a timestamp less than -// or equal to this will be deleted. If no timestamp is specified the -// current time will be used. -// -// Specifying a family and a column qualifier in a TColumn will delete only -// this qualifier. If a timestamp is specified only versions equal -// to this timestamp will be deleted. If no timestamp is specified the -// most recent version will be deleted. To delete all previous versions, -// specify the DELETE_COLUMNS TDeleteType. -// -// The top level timestamp is only used if a complete row should be deleted -// (i.e. no columns are passed) and if it is specified it works the same way -// as if you had added a TColumn for every column family and this timestamp -// (i.e. all versions older than or equal in all column families will be deleted) -// -// You can specify how this Delete should be written to the write-ahead Log (WAL) -// by changing the durability. If you don't provide durability, it defaults to -// column family's default setting for durability. -// -// Attributes: -// - Row -// - Columns -// - Timestamp -// - DeleteType -// - Attributes -// - Durability -type TDelete struct { - Row []byte `thrift:"row,1,required" json:"row"` - Columns []*TColumn `thrift:"columns,2" json:"columns,omitempty"` - Timestamp *int64 `thrift:"timestamp,3" json:"timestamp,omitempty"` - DeleteType TDeleteType `thrift:"deleteType,4" json:"deleteType,omitempty"` - // unused field # 5 - Attributes map[string][]byte `thrift:"attributes,6" json:"attributes,omitempty"` - Durability *TDurability `thrift:"durability,7" json:"durability,omitempty"` -} - -func NewTDelete() *TDelete { - return &TDelete{ - DeleteType: 1, - } -} - -func (p *TDelete) GetRow() []byte { - return p.Row -} - -var TDelete_Columns_DEFAULT []*TColumn - -func (p *TDelete) GetColumns() []*TColumn { - return p.Columns -} - -var TDelete_Timestamp_DEFAULT int64 - -func (p *TDelete) GetTimestamp() int64 { - if !p.IsSetTimestamp() { - return TDelete_Timestamp_DEFAULT - } - return *p.Timestamp -} - -var TDelete_DeleteType_DEFAULT TDeleteType = 1 - -func (p *TDelete) GetDeleteType() TDeleteType { - return p.DeleteType -} - -var TDelete_Attributes_DEFAULT map[string][]byte - -func (p *TDelete) GetAttributes() map[string][]byte { - return p.Attributes -} - -var TDelete_Durability_DEFAULT TDurability - -func (p *TDelete) GetDurability() TDurability { - if !p.IsSetDurability() { - return TDelete_Durability_DEFAULT - } - return *p.Durability -} -func (p *TDelete) IsSetColumns() bool { - return p.Columns != nil -} - -func (p *TDelete) IsSetTimestamp() bool { - return p.Timestamp != nil -} - -func (p *TDelete) IsSetDeleteType() bool { - return p.DeleteType != TDelete_DeleteType_DEFAULT -} - -func (p *TDelete) IsSetAttributes() bool { - return p.Attributes != nil -} - -func (p *TDelete) IsSetDurability() bool { - return p.Durability != nil -} - -func (p *TDelete) Read(iprot thrift.TProtocol) error { - if _, err := iprot.ReadStructBegin(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) - } - - var issetRow bool = false - - for { - _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin() - if err != nil { - return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) - } - if fieldTypeId == thrift.STOP { - break - } - switch fieldId { - case 1: - if err := p.readField1(iprot); err != nil { - return err - } - issetRow = true - case 2: - if err := p.readField2(iprot); err != nil { - return err - } - case 3: - if err := p.readField3(iprot); err != nil { - return err - } - case 4: - if err := p.readField4(iprot); err != nil { - return err - } - case 6: - if err := p.readField6(iprot); err != nil { - return err - } - case 7: - if err := p.readField7(iprot); err != nil { - return err - } - default: - if err := iprot.Skip(fieldTypeId); err != nil { - return err - } - } - if err := iprot.ReadFieldEnd(); err != nil { - return err - } - } - if err := iprot.ReadStructEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) - } - if !issetRow { - return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field Row is not set")) - } - return nil -} - -func (p *TDelete) readField1(iprot thrift.TProtocol) error { - if v, err := iprot.ReadBinary(); err != nil { - return thrift.PrependError("error reading field 1: ", err) - } else { - p.Row = v - } - return nil -} - -func (p *TDelete) readField2(iprot thrift.TProtocol) error { - _, size, err := iprot.ReadListBegin() - if err != nil { - return thrift.PrependError("error reading list begin: ", err) - } - tSlice := make([]*TColumn, 0, size) - p.Columns = tSlice - for i := 0; i < size; i++ { - _elem8 := &TColumn{} - if err := _elem8.Read(iprot); err != nil { - return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", _elem8), err) - } - p.Columns = append(p.Columns, _elem8) - } - if err := iprot.ReadListEnd(); err != nil { - return thrift.PrependError("error reading list end: ", err) - } - return nil -} - -func (p *TDelete) readField3(iprot thrift.TProtocol) error { - if v, err := iprot.ReadI64(); err != nil { - return thrift.PrependError("error reading field 3: ", err) - } else { - p.Timestamp = &v - } - return nil -} - -func (p *TDelete) readField4(iprot thrift.TProtocol) error { - if v, err := iprot.ReadI32(); err != nil { - return thrift.PrependError("error reading field 4: ", err) - } else { - temp := TDeleteType(v) - p.DeleteType = temp - } - return nil -} - -func (p *TDelete) readField6(iprot thrift.TProtocol) error { - _, _, size, err := iprot.ReadMapBegin() - if err != nil { - return thrift.PrependError("error reading map begin: ", err) - } - tMap := make(map[string][]byte, size) - p.Attributes = tMap - for i := 0; i < size; i++ { - var _key9 string - if v, err := iprot.ReadString(); err != nil { - return thrift.PrependError("error reading field 0: ", err) - } else { - _key9 = v - } - var _val10 []byte - if v, err := iprot.ReadBinary(); err != nil { - return thrift.PrependError("error reading field 0: ", err) - } else { - _val10 = v - } - p.Attributes[_key9] = _val10 - } - if err := iprot.ReadMapEnd(); err != nil { - return thrift.PrependError("error reading map end: ", err) - } - return nil -} - -func (p *TDelete) readField7(iprot thrift.TProtocol) error { - if v, err := iprot.ReadI32(); err != nil { - return thrift.PrependError("error reading field 7: ", err) - } else { - temp := TDurability(v) - p.Durability = &temp - } - return nil -} - -func (p *TDelete) Write(oprot thrift.TProtocol) error { - if err := oprot.WriteStructBegin("TDelete"); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) - } - if err := p.writeField1(oprot); err != nil { - return err - } - if err := p.writeField2(oprot); err != nil { - return err - } - if err := p.writeField3(oprot); err != nil { - return err - } - if err := p.writeField4(oprot); err != nil { - return err - } - if err := p.writeField6(oprot); err != nil { - return err - } - if err := p.writeField7(oprot); err != nil { - return err - } - if err := oprot.WriteFieldStop(); err != nil { - return thrift.PrependError("write field stop error: ", err) - } - if err := oprot.WriteStructEnd(); err != nil { - return thrift.PrependError("write struct stop error: ", err) - } - return nil -} - -func (p *TDelete) writeField1(oprot thrift.TProtocol) (err error) { - if err := oprot.WriteFieldBegin("row", thrift.STRING, 1); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:row: ", p), err) - } - if err := oprot.WriteBinary(p.Row); err != nil { - return thrift.PrependError(fmt.Sprintf("%T.row (1) field write error: ", p), err) - } - if err := oprot.WriteFieldEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 1:row: ", p), err) - } - return err -} - -func (p *TDelete) writeField2(oprot thrift.TProtocol) (err error) { - if p.IsSetColumns() { - if err := oprot.WriteFieldBegin("columns", thrift.LIST, 2); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 2:columns: ", p), err) - } - if err := oprot.WriteListBegin(thrift.STRUCT, len(p.Columns)); err != nil { - return thrift.PrependError("error writing list begin: ", err) - } - for _, v := range p.Columns { - if err := v.Write(oprot); err != nil { - return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", v), err) - } - } - if err := oprot.WriteListEnd(); err != nil { - return thrift.PrependError("error writing list end: ", err) - } - if err := oprot.WriteFieldEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 2:columns: ", p), err) - } - } - return err -} - -func (p *TDelete) writeField3(oprot thrift.TProtocol) (err error) { - if p.IsSetTimestamp() { - if err := oprot.WriteFieldBegin("timestamp", thrift.I64, 3); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 3:timestamp: ", p), err) - } - if err := oprot.WriteI64(int64(*p.Timestamp)); err != nil { - return thrift.PrependError(fmt.Sprintf("%T.timestamp (3) field write error: ", p), err) - } - if err := oprot.WriteFieldEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 3:timestamp: ", p), err) - } - } - return err -} - -func (p *TDelete) writeField4(oprot thrift.TProtocol) (err error) { - if p.IsSetDeleteType() { - if err := oprot.WriteFieldBegin("deleteType", thrift.I32, 4); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 4:deleteType: ", p), err) - } - if err := oprot.WriteI32(int32(p.DeleteType)); err != nil { - return thrift.PrependError(fmt.Sprintf("%T.deleteType (4) field write error: ", p), err) - } - if err := oprot.WriteFieldEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 4:deleteType: ", p), err) - } - } - return err -} - -func (p *TDelete) writeField6(oprot thrift.TProtocol) (err error) { - if p.IsSetAttributes() { - if err := oprot.WriteFieldBegin("attributes", thrift.MAP, 6); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 6:attributes: ", p), err) - } - if err := oprot.WriteMapBegin(thrift.STRING, thrift.STRING, len(p.Attributes)); err != nil { - return thrift.PrependError("error writing map begin: ", err) - } - for k, v := range p.Attributes { - if err := oprot.WriteString(string(k)); err != nil { - return thrift.PrependError(fmt.Sprintf("%T. (0) field write error: ", p), err) - } - if err := oprot.WriteBinary(v); err != nil { - return thrift.PrependError(fmt.Sprintf("%T. (0) field write error: ", p), err) - } - } - if err := oprot.WriteMapEnd(); err != nil { - return thrift.PrependError("error writing map end: ", err) - } - if err := oprot.WriteFieldEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 6:attributes: ", p), err) - } - } - return err -} - -func (p *TDelete) writeField7(oprot thrift.TProtocol) (err error) { - if p.IsSetDurability() { - if err := oprot.WriteFieldBegin("durability", thrift.I32, 7); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 7:durability: ", p), err) - } - if err := oprot.WriteI32(int32(*p.Durability)); err != nil { - return thrift.PrependError(fmt.Sprintf("%T.durability (7) field write error: ", p), err) - } - if err := oprot.WriteFieldEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 7:durability: ", p), err) - } - } - return err -} - -func (p *TDelete) String() string { - if p == nil { - return "" - } - return fmt.Sprintf("TDelete(%+v)", *p) -} - -// Used to perform Increment operations for a single row. -// -// You can specify how this Increment should be written to the write-ahead Log (WAL) -// by changing the durability. If you don't provide durability, it defaults to -// column family's default setting for durability. -// -// Attributes: -// - Row -// - Columns -// - Attributes -// - Durability -// - CellVisibility -type TIncrement struct { - Row []byte `thrift:"row,1,required" json:"row"` - Columns []*TColumnIncrement `thrift:"columns,2,required" json:"columns"` - // unused field # 3 - Attributes map[string][]byte `thrift:"attributes,4" json:"attributes,omitempty"` - Durability *TDurability `thrift:"durability,5" json:"durability,omitempty"` - CellVisibility *TCellVisibility `thrift:"cellVisibility,6" json:"cellVisibility,omitempty"` -} - -func NewTIncrement() *TIncrement { - return &TIncrement{} -} - -func (p *TIncrement) GetRow() []byte { - return p.Row -} - -func (p *TIncrement) GetColumns() []*TColumnIncrement { - return p.Columns -} - -var TIncrement_Attributes_DEFAULT map[string][]byte - -func (p *TIncrement) GetAttributes() map[string][]byte { - return p.Attributes -} - -var TIncrement_Durability_DEFAULT TDurability - -func (p *TIncrement) GetDurability() TDurability { - if !p.IsSetDurability() { - return TIncrement_Durability_DEFAULT - } - return *p.Durability -} - -var TIncrement_CellVisibility_DEFAULT *TCellVisibility - -func (p *TIncrement) GetCellVisibility() *TCellVisibility { - if !p.IsSetCellVisibility() { - return TIncrement_CellVisibility_DEFAULT - } - return p.CellVisibility -} -func (p *TIncrement) IsSetAttributes() bool { - return p.Attributes != nil -} - -func (p *TIncrement) IsSetDurability() bool { - return p.Durability != nil -} - -func (p *TIncrement) IsSetCellVisibility() bool { - return p.CellVisibility != nil -} - -func (p *TIncrement) Read(iprot thrift.TProtocol) error { - if _, err := iprot.ReadStructBegin(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) - } - - var issetRow bool = false - var issetColumns bool = false - - for { - _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin() - if err != nil { - return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) - } - if fieldTypeId == thrift.STOP { - break - } - switch fieldId { - case 1: - if err := p.readField1(iprot); err != nil { - return err - } - issetRow = true - case 2: - if err := p.readField2(iprot); err != nil { - return err - } - issetColumns = true - case 4: - if err := p.readField4(iprot); err != nil { - return err - } - case 5: - if err := p.readField5(iprot); err != nil { - return err - } - case 6: - if err := p.readField6(iprot); err != nil { - return err - } - default: - if err := iprot.Skip(fieldTypeId); err != nil { - return err - } - } - if err := iprot.ReadFieldEnd(); err != nil { - return err - } - } - if err := iprot.ReadStructEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) - } - if !issetRow { - return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field Row is not set")) - } - if !issetColumns { - return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field Columns is not set")) - } - return nil -} - -func (p *TIncrement) readField1(iprot thrift.TProtocol) error { - if v, err := iprot.ReadBinary(); err != nil { - return thrift.PrependError("error reading field 1: ", err) - } else { - p.Row = v - } - return nil -} - -func (p *TIncrement) readField2(iprot thrift.TProtocol) error { - _, size, err := iprot.ReadListBegin() - if err != nil { - return thrift.PrependError("error reading list begin: ", err) - } - tSlice := make([]*TColumnIncrement, 0, size) - p.Columns = tSlice - for i := 0; i < size; i++ { - _elem11 := &TColumnIncrement{ - Amount: 1, - } - if err := _elem11.Read(iprot); err != nil { - return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", _elem11), err) - } - p.Columns = append(p.Columns, _elem11) - } - if err := iprot.ReadListEnd(); err != nil { - return thrift.PrependError("error reading list end: ", err) - } - return nil -} - -func (p *TIncrement) readField4(iprot thrift.TProtocol) error { - _, _, size, err := iprot.ReadMapBegin() - if err != nil { - return thrift.PrependError("error reading map begin: ", err) - } - tMap := make(map[string][]byte, size) - p.Attributes = tMap - for i := 0; i < size; i++ { - var _key12 string - if v, err := iprot.ReadString(); err != nil { - return thrift.PrependError("error reading field 0: ", err) - } else { - _key12 = v - } - var _val13 []byte - if v, err := iprot.ReadBinary(); err != nil { - return thrift.PrependError("error reading field 0: ", err) - } else { - _val13 = v - } - p.Attributes[_key12] = _val13 - } - if err := iprot.ReadMapEnd(); err != nil { - return thrift.PrependError("error reading map end: ", err) - } - return nil -} - -func (p *TIncrement) readField5(iprot thrift.TProtocol) error { - if v, err := iprot.ReadI32(); err != nil { - return thrift.PrependError("error reading field 5: ", err) - } else { - temp := TDurability(v) - p.Durability = &temp - } - return nil -} - -func (p *TIncrement) readField6(iprot thrift.TProtocol) error { - p.CellVisibility = &TCellVisibility{} - if err := p.CellVisibility.Read(iprot); err != nil { - return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.CellVisibility), err) - } - return nil -} - -func (p *TIncrement) Write(oprot thrift.TProtocol) error { - if err := oprot.WriteStructBegin("TIncrement"); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) - } - if err := p.writeField1(oprot); err != nil { - return err - } - if err := p.writeField2(oprot); err != nil { - return err - } - if err := p.writeField4(oprot); err != nil { - return err - } - if err := p.writeField5(oprot); err != nil { - return err - } - if err := p.writeField6(oprot); err != nil { - return err - } - if err := oprot.WriteFieldStop(); err != nil { - return thrift.PrependError("write field stop error: ", err) - } - if err := oprot.WriteStructEnd(); err != nil { - return thrift.PrependError("write struct stop error: ", err) - } - return nil -} - -func (p *TIncrement) writeField1(oprot thrift.TProtocol) (err error) { - if err := oprot.WriteFieldBegin("row", thrift.STRING, 1); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:row: ", p), err) - } - if err := oprot.WriteBinary(p.Row); err != nil { - return thrift.PrependError(fmt.Sprintf("%T.row (1) field write error: ", p), err) - } - if err := oprot.WriteFieldEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 1:row: ", p), err) - } - return err -} - -func (p *TIncrement) writeField2(oprot thrift.TProtocol) (err error) { - if err := oprot.WriteFieldBegin("columns", thrift.LIST, 2); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 2:columns: ", p), err) - } - if err := oprot.WriteListBegin(thrift.STRUCT, len(p.Columns)); err != nil { - return thrift.PrependError("error writing list begin: ", err) - } - for _, v := range p.Columns { - if err := v.Write(oprot); err != nil { - return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", v), err) - } - } - if err := oprot.WriteListEnd(); err != nil { - return thrift.PrependError("error writing list end: ", err) - } - if err := oprot.WriteFieldEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 2:columns: ", p), err) - } - return err -} - -func (p *TIncrement) writeField4(oprot thrift.TProtocol) (err error) { - if p.IsSetAttributes() { - if err := oprot.WriteFieldBegin("attributes", thrift.MAP, 4); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 4:attributes: ", p), err) - } - if err := oprot.WriteMapBegin(thrift.STRING, thrift.STRING, len(p.Attributes)); err != nil { - return thrift.PrependError("error writing map begin: ", err) - } - for k, v := range p.Attributes { - if err := oprot.WriteString(string(k)); err != nil { - return thrift.PrependError(fmt.Sprintf("%T. (0) field write error: ", p), err) - } - if err := oprot.WriteBinary(v); err != nil { - return thrift.PrependError(fmt.Sprintf("%T. (0) field write error: ", p), err) - } - } - if err := oprot.WriteMapEnd(); err != nil { - return thrift.PrependError("error writing map end: ", err) - } - if err := oprot.WriteFieldEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 4:attributes: ", p), err) - } - } - return err -} - -func (p *TIncrement) writeField5(oprot thrift.TProtocol) (err error) { - if p.IsSetDurability() { - if err := oprot.WriteFieldBegin("durability", thrift.I32, 5); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 5:durability: ", p), err) - } - if err := oprot.WriteI32(int32(*p.Durability)); err != nil { - return thrift.PrependError(fmt.Sprintf("%T.durability (5) field write error: ", p), err) - } - if err := oprot.WriteFieldEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 5:durability: ", p), err) - } - } - return err -} - -func (p *TIncrement) writeField6(oprot thrift.TProtocol) (err error) { - if p.IsSetCellVisibility() { - if err := oprot.WriteFieldBegin("cellVisibility", thrift.STRUCT, 6); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 6:cellVisibility: ", p), err) - } - if err := p.CellVisibility.Write(oprot); err != nil { - return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.CellVisibility), err) - } - if err := oprot.WriteFieldEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 6:cellVisibility: ", p), err) - } - } - return err -} - -func (p *TIncrement) String() string { - if p == nil { - return "" - } - return fmt.Sprintf("TIncrement(%+v)", *p) -} - -// Attributes: -// - Row -// - Columns -// - Attributes -// - Durability -// - CellVisibility -type TAppend struct { - Row []byte `thrift:"row,1,required" json:"row"` - Columns []*TColumnValue `thrift:"columns,2,required" json:"columns"` - Attributes map[string][]byte `thrift:"attributes,3" json:"attributes,omitempty"` - Durability *TDurability `thrift:"durability,4" json:"durability,omitempty"` - CellVisibility *TCellVisibility `thrift:"cellVisibility,5" json:"cellVisibility,omitempty"` -} - -func NewTAppend() *TAppend { - return &TAppend{} -} - -func (p *TAppend) GetRow() []byte { - return p.Row -} - -func (p *TAppend) GetColumns() []*TColumnValue { - return p.Columns -} - -var TAppend_Attributes_DEFAULT map[string][]byte - -func (p *TAppend) GetAttributes() map[string][]byte { - return p.Attributes -} - -var TAppend_Durability_DEFAULT TDurability - -func (p *TAppend) GetDurability() TDurability { - if !p.IsSetDurability() { - return TAppend_Durability_DEFAULT - } - return *p.Durability -} - -var TAppend_CellVisibility_DEFAULT *TCellVisibility - -func (p *TAppend) GetCellVisibility() *TCellVisibility { - if !p.IsSetCellVisibility() { - return TAppend_CellVisibility_DEFAULT - } - return p.CellVisibility -} -func (p *TAppend) IsSetAttributes() bool { - return p.Attributes != nil -} - -func (p *TAppend) IsSetDurability() bool { - return p.Durability != nil -} - -func (p *TAppend) IsSetCellVisibility() bool { - return p.CellVisibility != nil -} - -func (p *TAppend) Read(iprot thrift.TProtocol) error { - if _, err := iprot.ReadStructBegin(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) - } - - var issetRow bool = false - var issetColumns bool = false - - for { - _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin() - if err != nil { - return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) - } - if fieldTypeId == thrift.STOP { - break - } - switch fieldId { - case 1: - if err := p.readField1(iprot); err != nil { - return err - } - issetRow = true - case 2: - if err := p.readField2(iprot); err != nil { - return err - } - issetColumns = true - case 3: - if err := p.readField3(iprot); err != nil { - return err - } - case 4: - if err := p.readField4(iprot); err != nil { - return err - } - case 5: - if err := p.readField5(iprot); err != nil { - return err - } - default: - if err := iprot.Skip(fieldTypeId); err != nil { - return err - } - } - if err := iprot.ReadFieldEnd(); err != nil { - return err - } - } - if err := iprot.ReadStructEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) - } - if !issetRow { - return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field Row is not set")) - } - if !issetColumns { - return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field Columns is not set")) - } - return nil -} - -func (p *TAppend) readField1(iprot thrift.TProtocol) error { - if v, err := iprot.ReadBinary(); err != nil { - return thrift.PrependError("error reading field 1: ", err) - } else { - p.Row = v - } - return nil -} - -func (p *TAppend) readField2(iprot thrift.TProtocol) error { - _, size, err := iprot.ReadListBegin() - if err != nil { - return thrift.PrependError("error reading list begin: ", err) - } - tSlice := make([]*TColumnValue, 0, size) - p.Columns = tSlice - for i := 0; i < size; i++ { - _elem14 := &TColumnValue{} - if err := _elem14.Read(iprot); err != nil { - return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", _elem14), err) - } - p.Columns = append(p.Columns, _elem14) - } - if err := iprot.ReadListEnd(); err != nil { - return thrift.PrependError("error reading list end: ", err) - } - return nil -} - -func (p *TAppend) readField3(iprot thrift.TProtocol) error { - _, _, size, err := iprot.ReadMapBegin() - if err != nil { - return thrift.PrependError("error reading map begin: ", err) - } - tMap := make(map[string][]byte, size) - p.Attributes = tMap - for i := 0; i < size; i++ { - var _key15 string - if v, err := iprot.ReadString(); err != nil { - return thrift.PrependError("error reading field 0: ", err) - } else { - _key15 = v - } - var _val16 []byte - if v, err := iprot.ReadBinary(); err != nil { - return thrift.PrependError("error reading field 0: ", err) - } else { - _val16 = v - } - p.Attributes[_key15] = _val16 - } - if err := iprot.ReadMapEnd(); err != nil { - return thrift.PrependError("error reading map end: ", err) - } - return nil -} - -func (p *TAppend) readField4(iprot thrift.TProtocol) error { - if v, err := iprot.ReadI32(); err != nil { - return thrift.PrependError("error reading field 4: ", err) - } else { - temp := TDurability(v) - p.Durability = &temp - } - return nil -} - -func (p *TAppend) readField5(iprot thrift.TProtocol) error { - p.CellVisibility = &TCellVisibility{} - if err := p.CellVisibility.Read(iprot); err != nil { - return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.CellVisibility), err) - } - return nil -} - -func (p *TAppend) Write(oprot thrift.TProtocol) error { - if err := oprot.WriteStructBegin("TAppend"); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) - } - if err := p.writeField1(oprot); err != nil { - return err - } - if err := p.writeField2(oprot); err != nil { - return err - } - if err := p.writeField3(oprot); err != nil { - return err - } - if err := p.writeField4(oprot); err != nil { - return err - } - if err := p.writeField5(oprot); err != nil { - return err - } - if err := oprot.WriteFieldStop(); err != nil { - return thrift.PrependError("write field stop error: ", err) - } - if err := oprot.WriteStructEnd(); err != nil { - return thrift.PrependError("write struct stop error: ", err) - } - return nil -} - -func (p *TAppend) writeField1(oprot thrift.TProtocol) (err error) { - if err := oprot.WriteFieldBegin("row", thrift.STRING, 1); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:row: ", p), err) - } - if err := oprot.WriteBinary(p.Row); err != nil { - return thrift.PrependError(fmt.Sprintf("%T.row (1) field write error: ", p), err) - } - if err := oprot.WriteFieldEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 1:row: ", p), err) - } - return err -} - -func (p *TAppend) writeField2(oprot thrift.TProtocol) (err error) { - if err := oprot.WriteFieldBegin("columns", thrift.LIST, 2); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 2:columns: ", p), err) - } - if err := oprot.WriteListBegin(thrift.STRUCT, len(p.Columns)); err != nil { - return thrift.PrependError("error writing list begin: ", err) - } - for _, v := range p.Columns { - if err := v.Write(oprot); err != nil { - return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", v), err) - } - } - if err := oprot.WriteListEnd(); err != nil { - return thrift.PrependError("error writing list end: ", err) - } - if err := oprot.WriteFieldEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 2:columns: ", p), err) - } - return err -} - -func (p *TAppend) writeField3(oprot thrift.TProtocol) (err error) { - if p.IsSetAttributes() { - if err := oprot.WriteFieldBegin("attributes", thrift.MAP, 3); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 3:attributes: ", p), err) - } - if err := oprot.WriteMapBegin(thrift.STRING, thrift.STRING, len(p.Attributes)); err != nil { - return thrift.PrependError("error writing map begin: ", err) - } - for k, v := range p.Attributes { - if err := oprot.WriteString(string(k)); err != nil { - return thrift.PrependError(fmt.Sprintf("%T. (0) field write error: ", p), err) - } - if err := oprot.WriteBinary(v); err != nil { - return thrift.PrependError(fmt.Sprintf("%T. (0) field write error: ", p), err) - } - } - if err := oprot.WriteMapEnd(); err != nil { - return thrift.PrependError("error writing map end: ", err) - } - if err := oprot.WriteFieldEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 3:attributes: ", p), err) - } - } - return err -} - -func (p *TAppend) writeField4(oprot thrift.TProtocol) (err error) { - if p.IsSetDurability() { - if err := oprot.WriteFieldBegin("durability", thrift.I32, 4); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 4:durability: ", p), err) - } - if err := oprot.WriteI32(int32(*p.Durability)); err != nil { - return thrift.PrependError(fmt.Sprintf("%T.durability (4) field write error: ", p), err) - } - if err := oprot.WriteFieldEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 4:durability: ", p), err) - } - } - return err -} - -func (p *TAppend) writeField5(oprot thrift.TProtocol) (err error) { - if p.IsSetCellVisibility() { - if err := oprot.WriteFieldBegin("cellVisibility", thrift.STRUCT, 5); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 5:cellVisibility: ", p), err) - } - if err := p.CellVisibility.Write(oprot); err != nil { - return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.CellVisibility), err) - } - if err := oprot.WriteFieldEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 5:cellVisibility: ", p), err) - } - } - return err -} - -func (p *TAppend) String() string { - if p == nil { - return "" - } - return fmt.Sprintf("TAppend(%+v)", *p) -} - -// Any timestamps in the columns are ignored, use timeRange to select by timestamp. -// Max versions defaults to 1. -// -// Attributes: -// - StartRow -// - StopRow -// - Columns -// - Caching -// - MaxVersions -// - TimeRange -// - FilterString -// - BatchSize -// - Attributes -// - Authorizations -// - Reversed -type TScan struct { - StartRow []byte `thrift:"startRow,1" json:"startRow,omitempty"` - StopRow []byte `thrift:"stopRow,2" json:"stopRow,omitempty"` - Columns []*TColumn `thrift:"columns,3" json:"columns,omitempty"` - Caching *int32 `thrift:"caching,4" json:"caching,omitempty"` - MaxVersions int32 `thrift:"maxVersions,5" json:"maxVersions,omitempty"` - TimeRange *TTimeRange `thrift:"timeRange,6" json:"timeRange,omitempty"` - FilterString []byte `thrift:"filterString,7" json:"filterString,omitempty"` - BatchSize *int32 `thrift:"batchSize,8" json:"batchSize,omitempty"` - Attributes map[string][]byte `thrift:"attributes,9" json:"attributes,omitempty"` - Authorizations *TAuthorization `thrift:"authorizations,10" json:"authorizations,omitempty"` - Reversed *bool `thrift:"reversed,11" json:"reversed,omitempty"` -} - -func NewTScan() *TScan { - return &TScan{ - MaxVersions: 1, - } -} - -var TScan_StartRow_DEFAULT []byte - -func (p *TScan) GetStartRow() []byte { - return p.StartRow -} - -var TScan_StopRow_DEFAULT []byte - -func (p *TScan) GetStopRow() []byte { - return p.StopRow -} - -var TScan_Columns_DEFAULT []*TColumn - -func (p *TScan) GetColumns() []*TColumn { - return p.Columns -} - -var TScan_Caching_DEFAULT int32 - -func (p *TScan) GetCaching() int32 { - if !p.IsSetCaching() { - return TScan_Caching_DEFAULT - } - return *p.Caching -} - -var TScan_MaxVersions_DEFAULT int32 = 1 - -func (p *TScan) GetMaxVersions() int32 { - return p.MaxVersions -} - -var TScan_TimeRange_DEFAULT *TTimeRange - -func (p *TScan) GetTimeRange() *TTimeRange { - if !p.IsSetTimeRange() { - return TScan_TimeRange_DEFAULT - } - return p.TimeRange -} - -var TScan_FilterString_DEFAULT []byte - -func (p *TScan) GetFilterString() []byte { - return p.FilterString -} - -var TScan_BatchSize_DEFAULT int32 - -func (p *TScan) GetBatchSize() int32 { - if !p.IsSetBatchSize() { - return TScan_BatchSize_DEFAULT - } - return *p.BatchSize -} - -var TScan_Attributes_DEFAULT map[string][]byte - -func (p *TScan) GetAttributes() map[string][]byte { - return p.Attributes -} - -var TScan_Authorizations_DEFAULT *TAuthorization - -func (p *TScan) GetAuthorizations() *TAuthorization { - if !p.IsSetAuthorizations() { - return TScan_Authorizations_DEFAULT - } - return p.Authorizations -} - -var TScan_Reversed_DEFAULT bool - -func (p *TScan) GetReversed() bool { - if !p.IsSetReversed() { - return TScan_Reversed_DEFAULT - } - return *p.Reversed -} -func (p *TScan) IsSetStartRow() bool { - return p.StartRow != nil -} - -func (p *TScan) IsSetStopRow() bool { - return p.StopRow != nil -} - -func (p *TScan) IsSetColumns() bool { - return p.Columns != nil -} - -func (p *TScan) IsSetCaching() bool { - return p.Caching != nil -} - -func (p *TScan) IsSetMaxVersions() bool { - return p.MaxVersions != TScan_MaxVersions_DEFAULT -} - -func (p *TScan) IsSetTimeRange() bool { - return p.TimeRange != nil -} - -func (p *TScan) IsSetFilterString() bool { - return p.FilterString != nil -} - -func (p *TScan) IsSetBatchSize() bool { - return p.BatchSize != nil -} - -func (p *TScan) IsSetAttributes() bool { - return p.Attributes != nil -} - -func (p *TScan) IsSetAuthorizations() bool { - return p.Authorizations != nil -} - -func (p *TScan) IsSetReversed() bool { - return p.Reversed != nil -} - -func (p *TScan) Read(iprot thrift.TProtocol) error { - if _, err := iprot.ReadStructBegin(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) - } - - for { - _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin() - if err != nil { - return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) - } - if fieldTypeId == thrift.STOP { - break - } - switch fieldId { - case 1: - if err := p.readField1(iprot); err != nil { - return err - } - case 2: - if err := p.readField2(iprot); err != nil { - return err - } - case 3: - if err := p.readField3(iprot); err != nil { - return err - } - case 4: - if err := p.readField4(iprot); err != nil { - return err - } - case 5: - if err := p.readField5(iprot); err != nil { - return err - } - case 6: - if err := p.readField6(iprot); err != nil { - return err - } - case 7: - if err := p.readField7(iprot); err != nil { - return err - } - case 8: - if err := p.readField8(iprot); err != nil { - return err - } - case 9: - if err := p.readField9(iprot); err != nil { - return err - } - case 10: - if err := p.readField10(iprot); err != nil { - return err - } - case 11: - if err := p.readField11(iprot); err != nil { - return err - } - default: - if err := iprot.Skip(fieldTypeId); err != nil { - return err - } - } - if err := iprot.ReadFieldEnd(); err != nil { - return err - } - } - if err := iprot.ReadStructEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) - } - return nil -} - -func (p *TScan) readField1(iprot thrift.TProtocol) error { - if v, err := iprot.ReadBinary(); err != nil { - return thrift.PrependError("error reading field 1: ", err) - } else { - p.StartRow = v - } - return nil -} - -func (p *TScan) readField2(iprot thrift.TProtocol) error { - if v, err := iprot.ReadBinary(); err != nil { - return thrift.PrependError("error reading field 2: ", err) - } else { - p.StopRow = v - } - return nil -} - -func (p *TScan) readField3(iprot thrift.TProtocol) error { - _, size, err := iprot.ReadListBegin() - if err != nil { - return thrift.PrependError("error reading list begin: ", err) - } - tSlice := make([]*TColumn, 0, size) - p.Columns = tSlice - for i := 0; i < size; i++ { - _elem17 := &TColumn{} - if err := _elem17.Read(iprot); err != nil { - return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", _elem17), err) - } - p.Columns = append(p.Columns, _elem17) - } - if err := iprot.ReadListEnd(); err != nil { - return thrift.PrependError("error reading list end: ", err) - } - return nil -} - -func (p *TScan) readField4(iprot thrift.TProtocol) error { - if v, err := iprot.ReadI32(); err != nil { - return thrift.PrependError("error reading field 4: ", err) - } else { - p.Caching = &v - } - return nil -} - -func (p *TScan) readField5(iprot thrift.TProtocol) error { - if v, err := iprot.ReadI32(); err != nil { - return thrift.PrependError("error reading field 5: ", err) - } else { - p.MaxVersions = v - } - return nil -} - -func (p *TScan) readField6(iprot thrift.TProtocol) error { - p.TimeRange = &TTimeRange{} - if err := p.TimeRange.Read(iprot); err != nil { - return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.TimeRange), err) - } - return nil -} - -func (p *TScan) readField7(iprot thrift.TProtocol) error { - if v, err := iprot.ReadBinary(); err != nil { - return thrift.PrependError("error reading field 7: ", err) - } else { - p.FilterString = v - } - return nil -} - -func (p *TScan) readField8(iprot thrift.TProtocol) error { - if v, err := iprot.ReadI32(); err != nil { - return thrift.PrependError("error reading field 8: ", err) - } else { - p.BatchSize = &v - } - return nil -} - -func (p *TScan) readField9(iprot thrift.TProtocol) error { - _, _, size, err := iprot.ReadMapBegin() - if err != nil { - return thrift.PrependError("error reading map begin: ", err) - } - tMap := make(map[string][]byte, size) - p.Attributes = tMap - for i := 0; i < size; i++ { - var _key18 string - if v, err := iprot.ReadString(); err != nil { - return thrift.PrependError("error reading field 0: ", err) - } else { - _key18 = v - } - var _val19 []byte - if v, err := iprot.ReadBinary(); err != nil { - return thrift.PrependError("error reading field 0: ", err) - } else { - _val19 = v - } - p.Attributes[_key18] = _val19 - } - if err := iprot.ReadMapEnd(); err != nil { - return thrift.PrependError("error reading map end: ", err) - } - return nil -} - -func (p *TScan) readField10(iprot thrift.TProtocol) error { - p.Authorizations = &TAuthorization{} - if err := p.Authorizations.Read(iprot); err != nil { - return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.Authorizations), err) - } - return nil -} - -func (p *TScan) readField11(iprot thrift.TProtocol) error { - if v, err := iprot.ReadBool(); err != nil { - return thrift.PrependError("error reading field 11: ", err) - } else { - p.Reversed = &v - } - return nil -} - -func (p *TScan) Write(oprot thrift.TProtocol) error { - if err := oprot.WriteStructBegin("TScan"); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) - } - if err := p.writeField1(oprot); err != nil { - return err - } - if err := p.writeField2(oprot); err != nil { - return err - } - if err := p.writeField3(oprot); err != nil { - return err - } - if err := p.writeField4(oprot); err != nil { - return err - } - if err := p.writeField5(oprot); err != nil { - return err - } - if err := p.writeField6(oprot); err != nil { - return err - } - if err := p.writeField7(oprot); err != nil { - return err - } - if err := p.writeField8(oprot); err != nil { - return err - } - if err := p.writeField9(oprot); err != nil { - return err - } - if err := p.writeField10(oprot); err != nil { - return err - } - if err := p.writeField11(oprot); err != nil { - return err - } - if err := oprot.WriteFieldStop(); err != nil { - return thrift.PrependError("write field stop error: ", err) - } - if err := oprot.WriteStructEnd(); err != nil { - return thrift.PrependError("write struct stop error: ", err) - } - return nil -} - -func (p *TScan) writeField1(oprot thrift.TProtocol) (err error) { - if p.IsSetStartRow() { - if err := oprot.WriteFieldBegin("startRow", thrift.STRING, 1); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:startRow: ", p), err) - } - if err := oprot.WriteBinary(p.StartRow); err != nil { - return thrift.PrependError(fmt.Sprintf("%T.startRow (1) field write error: ", p), err) - } - if err := oprot.WriteFieldEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 1:startRow: ", p), err) - } - } - return err -} - -func (p *TScan) writeField2(oprot thrift.TProtocol) (err error) { - if p.IsSetStopRow() { - if err := oprot.WriteFieldBegin("stopRow", thrift.STRING, 2); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 2:stopRow: ", p), err) - } - if err := oprot.WriteBinary(p.StopRow); err != nil { - return thrift.PrependError(fmt.Sprintf("%T.stopRow (2) field write error: ", p), err) - } - if err := oprot.WriteFieldEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 2:stopRow: ", p), err) - } - } - return err -} - -func (p *TScan) writeField3(oprot thrift.TProtocol) (err error) { - if p.IsSetColumns() { - if err := oprot.WriteFieldBegin("columns", thrift.LIST, 3); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 3:columns: ", p), err) - } - if err := oprot.WriteListBegin(thrift.STRUCT, len(p.Columns)); err != nil { - return thrift.PrependError("error writing list begin: ", err) - } - for _, v := range p.Columns { - if err := v.Write(oprot); err != nil { - return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", v), err) - } - } - if err := oprot.WriteListEnd(); err != nil { - return thrift.PrependError("error writing list end: ", err) - } - if err := oprot.WriteFieldEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 3:columns: ", p), err) - } - } - return err -} - -func (p *TScan) writeField4(oprot thrift.TProtocol) (err error) { - if p.IsSetCaching() { - if err := oprot.WriteFieldBegin("caching", thrift.I32, 4); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 4:caching: ", p), err) - } - if err := oprot.WriteI32(int32(*p.Caching)); err != nil { - return thrift.PrependError(fmt.Sprintf("%T.caching (4) field write error: ", p), err) - } - if err := oprot.WriteFieldEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 4:caching: ", p), err) - } - } - return err -} - -func (p *TScan) writeField5(oprot thrift.TProtocol) (err error) { - if p.IsSetMaxVersions() { - if err := oprot.WriteFieldBegin("maxVersions", thrift.I32, 5); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 5:maxVersions: ", p), err) - } - if err := oprot.WriteI32(int32(p.MaxVersions)); err != nil { - return thrift.PrependError(fmt.Sprintf("%T.maxVersions (5) field write error: ", p), err) - } - if err := oprot.WriteFieldEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 5:maxVersions: ", p), err) - } - } - return err -} - -func (p *TScan) writeField6(oprot thrift.TProtocol) (err error) { - if p.IsSetTimeRange() { - if err := oprot.WriteFieldBegin("timeRange", thrift.STRUCT, 6); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 6:timeRange: ", p), err) - } - if err := p.TimeRange.Write(oprot); err != nil { - return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.TimeRange), err) - } - if err := oprot.WriteFieldEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 6:timeRange: ", p), err) - } - } - return err -} - -func (p *TScan) writeField7(oprot thrift.TProtocol) (err error) { - if p.IsSetFilterString() { - if err := oprot.WriteFieldBegin("filterString", thrift.STRING, 7); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 7:filterString: ", p), err) - } - if err := oprot.WriteBinary(p.FilterString); err != nil { - return thrift.PrependError(fmt.Sprintf("%T.filterString (7) field write error: ", p), err) - } - if err := oprot.WriteFieldEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 7:filterString: ", p), err) - } - } - return err -} - -func (p *TScan) writeField8(oprot thrift.TProtocol) (err error) { - if p.IsSetBatchSize() { - if err := oprot.WriteFieldBegin("batchSize", thrift.I32, 8); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 8:batchSize: ", p), err) - } - if err := oprot.WriteI32(int32(*p.BatchSize)); err != nil { - return thrift.PrependError(fmt.Sprintf("%T.batchSize (8) field write error: ", p), err) - } - if err := oprot.WriteFieldEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 8:batchSize: ", p), err) - } - } - return err -} - -func (p *TScan) writeField9(oprot thrift.TProtocol) (err error) { - if p.IsSetAttributes() { - if err := oprot.WriteFieldBegin("attributes", thrift.MAP, 9); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 9:attributes: ", p), err) - } - if err := oprot.WriteMapBegin(thrift.STRING, thrift.STRING, len(p.Attributes)); err != nil { - return thrift.PrependError("error writing map begin: ", err) - } - for k, v := range p.Attributes { - if err := oprot.WriteString(string(k)); err != nil { - return thrift.PrependError(fmt.Sprintf("%T. (0) field write error: ", p), err) - } - if err := oprot.WriteBinary(v); err != nil { - return thrift.PrependError(fmt.Sprintf("%T. (0) field write error: ", p), err) - } - } - if err := oprot.WriteMapEnd(); err != nil { - return thrift.PrependError("error writing map end: ", err) - } - if err := oprot.WriteFieldEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 9:attributes: ", p), err) - } - } - return err -} - -func (p *TScan) writeField10(oprot thrift.TProtocol) (err error) { - if p.IsSetAuthorizations() { - if err := oprot.WriteFieldBegin("authorizations", thrift.STRUCT, 10); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 10:authorizations: ", p), err) - } - if err := p.Authorizations.Write(oprot); err != nil { - return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.Authorizations), err) - } - if err := oprot.WriteFieldEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 10:authorizations: ", p), err) - } - } - return err -} - -func (p *TScan) writeField11(oprot thrift.TProtocol) (err error) { - if p.IsSetReversed() { - if err := oprot.WriteFieldBegin("reversed", thrift.BOOL, 11); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 11:reversed: ", p), err) - } - if err := oprot.WriteBool(bool(*p.Reversed)); err != nil { - return thrift.PrependError(fmt.Sprintf("%T.reversed (11) field write error: ", p), err) - } - if err := oprot.WriteFieldEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 11:reversed: ", p), err) - } - } - return err -} - -func (p *TScan) String() string { - if p == nil { - return "" - } - return fmt.Sprintf("TScan(%+v)", *p) -} - -// Atomic mutation for the specified row. It can be either Put or Delete. -// -// Attributes: -// - Put -// - DeleteSingle -type TMutation struct { - Put *TPut `thrift:"put,1" json:"put,omitempty"` - DeleteSingle *TDelete `thrift:"deleteSingle,2" json:"deleteSingle,omitempty"` -} - -func NewTMutation() *TMutation { - return &TMutation{} -} - -var TMutation_Put_DEFAULT *TPut - -func (p *TMutation) GetPut() *TPut { - if !p.IsSetPut() { - return TMutation_Put_DEFAULT - } - return p.Put -} - -var TMutation_DeleteSingle_DEFAULT *TDelete - -func (p *TMutation) GetDeleteSingle() *TDelete { - if !p.IsSetDeleteSingle() { - return TMutation_DeleteSingle_DEFAULT - } - return p.DeleteSingle -} -func (p *TMutation) CountSetFieldsTMutation() int { - count := 0 - if p.IsSetPut() { - count++ - } - if p.IsSetDeleteSingle() { - count++ - } - return count - -} - -func (p *TMutation) IsSetPut() bool { - return p.Put != nil -} - -func (p *TMutation) IsSetDeleteSingle() bool { - return p.DeleteSingle != nil -} - -func (p *TMutation) Read(iprot thrift.TProtocol) error { - if _, err := iprot.ReadStructBegin(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) - } - - for { - _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin() - if err != nil { - return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) - } - if fieldTypeId == thrift.STOP { - break - } - switch fieldId { - case 1: - if err := p.readField1(iprot); err != nil { - return err - } - case 2: - if err := p.readField2(iprot); err != nil { - return err - } - default: - if err := iprot.Skip(fieldTypeId); err != nil { - return err - } - } - if err := iprot.ReadFieldEnd(); err != nil { - return err - } - } - if err := iprot.ReadStructEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) - } - return nil -} - -func (p *TMutation) readField1(iprot thrift.TProtocol) error { - p.Put = &TPut{} - if err := p.Put.Read(iprot); err != nil { - return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.Put), err) - } - return nil -} - -func (p *TMutation) readField2(iprot thrift.TProtocol) error { - p.DeleteSingle = &TDelete{ - DeleteType: 1, - } - if err := p.DeleteSingle.Read(iprot); err != nil { - return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.DeleteSingle), err) - } - return nil -} - -func (p *TMutation) Write(oprot thrift.TProtocol) error { - if c := p.CountSetFieldsTMutation(); c != 1 { - return fmt.Errorf("%T write union: exactly one field must be set (%d set).", p, c) - } - if err := oprot.WriteStructBegin("TMutation"); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) - } - if err := p.writeField1(oprot); err != nil { - return err - } - if err := p.writeField2(oprot); err != nil { - return err - } - if err := oprot.WriteFieldStop(); err != nil { - return thrift.PrependError("write field stop error: ", err) - } - if err := oprot.WriteStructEnd(); err != nil { - return thrift.PrependError("write struct stop error: ", err) - } - return nil -} - -func (p *TMutation) writeField1(oprot thrift.TProtocol) (err error) { - if p.IsSetPut() { - if err := oprot.WriteFieldBegin("put", thrift.STRUCT, 1); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:put: ", p), err) - } - if err := p.Put.Write(oprot); err != nil { - return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.Put), err) - } - if err := oprot.WriteFieldEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 1:put: ", p), err) - } - } - return err -} - -func (p *TMutation) writeField2(oprot thrift.TProtocol) (err error) { - if p.IsSetDeleteSingle() { - if err := oprot.WriteFieldBegin("deleteSingle", thrift.STRUCT, 2); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 2:deleteSingle: ", p), err) - } - if err := p.DeleteSingle.Write(oprot); err != nil { - return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.DeleteSingle), err) - } - if err := oprot.WriteFieldEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 2:deleteSingle: ", p), err) - } - } - return err -} - -func (p *TMutation) String() string { - if p == nil { - return "" - } - return fmt.Sprintf("TMutation(%+v)", *p) -} - -// A TRowMutations object is used to apply a number of Mutations to a single row. -// -// Attributes: -// - Row -// - Mutations -type TRowMutations struct { - Row []byte `thrift:"row,1,required" json:"row"` - Mutations []*TMutation `thrift:"mutations,2,required" json:"mutations"` -} - -func NewTRowMutations() *TRowMutations { - return &TRowMutations{} -} - -func (p *TRowMutations) GetRow() []byte { - return p.Row -} - -func (p *TRowMutations) GetMutations() []*TMutation { - return p.Mutations -} -func (p *TRowMutations) Read(iprot thrift.TProtocol) error { - if _, err := iprot.ReadStructBegin(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) - } - - var issetRow bool = false - var issetMutations bool = false - - for { - _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin() - if err != nil { - return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) - } - if fieldTypeId == thrift.STOP { - break - } - switch fieldId { - case 1: - if err := p.readField1(iprot); err != nil { - return err - } - issetRow = true - case 2: - if err := p.readField2(iprot); err != nil { - return err - } - issetMutations = true - default: - if err := iprot.Skip(fieldTypeId); err != nil { - return err - } - } - if err := iprot.ReadFieldEnd(); err != nil { - return err - } - } - if err := iprot.ReadStructEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) - } - if !issetRow { - return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field Row is not set")) - } - if !issetMutations { - return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field Mutations is not set")) - } - return nil -} - -func (p *TRowMutations) readField1(iprot thrift.TProtocol) error { - if v, err := iprot.ReadBinary(); err != nil { - return thrift.PrependError("error reading field 1: ", err) - } else { - p.Row = v - } - return nil -} - -func (p *TRowMutations) readField2(iprot thrift.TProtocol) error { - _, size, err := iprot.ReadListBegin() - if err != nil { - return thrift.PrependError("error reading list begin: ", err) - } - tSlice := make([]*TMutation, 0, size) - p.Mutations = tSlice - for i := 0; i < size; i++ { - _elem20 := &TMutation{} - if err := _elem20.Read(iprot); err != nil { - return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", _elem20), err) - } - p.Mutations = append(p.Mutations, _elem20) - } - if err := iprot.ReadListEnd(); err != nil { - return thrift.PrependError("error reading list end: ", err) - } - return nil -} - -func (p *TRowMutations) Write(oprot thrift.TProtocol) error { - if err := oprot.WriteStructBegin("TRowMutations"); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) - } - if err := p.writeField1(oprot); err != nil { - return err - } - if err := p.writeField2(oprot); err != nil { - return err - } - if err := oprot.WriteFieldStop(); err != nil { - return thrift.PrependError("write field stop error: ", err) - } - if err := oprot.WriteStructEnd(); err != nil { - return thrift.PrependError("write struct stop error: ", err) - } - return nil -} - -func (p *TRowMutations) writeField1(oprot thrift.TProtocol) (err error) { - if err := oprot.WriteFieldBegin("row", thrift.STRING, 1); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:row: ", p), err) - } - if err := oprot.WriteBinary(p.Row); err != nil { - return thrift.PrependError(fmt.Sprintf("%T.row (1) field write error: ", p), err) - } - if err := oprot.WriteFieldEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 1:row: ", p), err) - } - return err -} - -func (p *TRowMutations) writeField2(oprot thrift.TProtocol) (err error) { - if err := oprot.WriteFieldBegin("mutations", thrift.LIST, 2); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 2:mutations: ", p), err) - } - if err := oprot.WriteListBegin(thrift.STRUCT, len(p.Mutations)); err != nil { - return thrift.PrependError("error writing list begin: ", err) - } - for _, v := range p.Mutations { - if err := v.Write(oprot); err != nil { - return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", v), err) - } - } - if err := oprot.WriteListEnd(); err != nil { - return thrift.PrependError("error writing list end: ", err) - } - if err := oprot.WriteFieldEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 2:mutations: ", p), err) - } - return err -} - -func (p *TRowMutations) String() string { - if p == nil { - return "" - } - return fmt.Sprintf("TRowMutations(%+v)", *p) -} - -// A TIOError exception signals that an error occurred communicating -// to the HBase master or a HBase region server. Also used to return -// more general HBase error conditions. -// -// Attributes: -// - Message -type TIOError struct { - Message *string `thrift:"message,1" json:"message,omitempty"` -} - -func NewTIOError() *TIOError { - return &TIOError{} -} - -var TIOError_Message_DEFAULT string - -func (p *TIOError) GetMessage() string { - if !p.IsSetMessage() { - return TIOError_Message_DEFAULT - } - return *p.Message -} -func (p *TIOError) IsSetMessage() bool { - return p.Message != nil -} - -func (p *TIOError) Read(iprot thrift.TProtocol) error { - if _, err := iprot.ReadStructBegin(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) - } - - for { - _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin() - if err != nil { - return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) - } - if fieldTypeId == thrift.STOP { - break - } - switch fieldId { - case 1: - if err := p.readField1(iprot); err != nil { - return err - } - default: - if err := iprot.Skip(fieldTypeId); err != nil { - return err - } - } - if err := iprot.ReadFieldEnd(); err != nil { - return err - } - } - if err := iprot.ReadStructEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) - } - return nil -} - -func (p *TIOError) readField1(iprot thrift.TProtocol) error { - if v, err := iprot.ReadString(); err != nil { - return thrift.PrependError("error reading field 1: ", err) - } else { - p.Message = &v - } - return nil -} - -func (p *TIOError) Write(oprot thrift.TProtocol) error { - if err := oprot.WriteStructBegin("TIOError"); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) - } - if err := p.writeField1(oprot); err != nil { - return err - } - if err := oprot.WriteFieldStop(); err != nil { - return thrift.PrependError("write field stop error: ", err) - } - if err := oprot.WriteStructEnd(); err != nil { - return thrift.PrependError("write struct stop error: ", err) - } - return nil -} - -func (p *TIOError) writeField1(oprot thrift.TProtocol) (err error) { - if p.IsSetMessage() { - if err := oprot.WriteFieldBegin("message", thrift.STRING, 1); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:message: ", p), err) - } - if err := oprot.WriteString(string(*p.Message)); err != nil { - return thrift.PrependError(fmt.Sprintf("%T.message (1) field write error: ", p), err) - } - if err := oprot.WriteFieldEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 1:message: ", p), err) - } - } - return err -} - -func (p *TIOError) String() string { - if p == nil { - return "" - } - return fmt.Sprintf("TIOError(%+v)", *p) -} - -func (p *TIOError) Error() string { - return p.String() -} - -// A TIllegalArgument exception indicates an illegal or invalid -// argument was passed into a procedure. -// -// Attributes: -// - Message -type TIllegalArgument struct { - Message *string `thrift:"message,1" json:"message,omitempty"` -} - -func NewTIllegalArgument() *TIllegalArgument { - return &TIllegalArgument{} -} - -var TIllegalArgument_Message_DEFAULT string - -func (p *TIllegalArgument) GetMessage() string { - if !p.IsSetMessage() { - return TIllegalArgument_Message_DEFAULT - } - return *p.Message -} -func (p *TIllegalArgument) IsSetMessage() bool { - return p.Message != nil -} - -func (p *TIllegalArgument) Read(iprot thrift.TProtocol) error { - if _, err := iprot.ReadStructBegin(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) - } - - for { - _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin() - if err != nil { - return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) - } - if fieldTypeId == thrift.STOP { - break - } - switch fieldId { - case 1: - if err := p.readField1(iprot); err != nil { - return err - } - default: - if err := iprot.Skip(fieldTypeId); err != nil { - return err - } - } - if err := iprot.ReadFieldEnd(); err != nil { - return err - } - } - if err := iprot.ReadStructEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) - } - return nil -} - -func (p *TIllegalArgument) readField1(iprot thrift.TProtocol) error { - if v, err := iprot.ReadString(); err != nil { - return thrift.PrependError("error reading field 1: ", err) - } else { - p.Message = &v - } - return nil -} - -func (p *TIllegalArgument) Write(oprot thrift.TProtocol) error { - if err := oprot.WriteStructBegin("TIllegalArgument"); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) - } - if err := p.writeField1(oprot); err != nil { - return err - } - if err := oprot.WriteFieldStop(); err != nil { - return thrift.PrependError("write field stop error: ", err) - } - if err := oprot.WriteStructEnd(); err != nil { - return thrift.PrependError("write struct stop error: ", err) - } - return nil -} - -func (p *TIllegalArgument) writeField1(oprot thrift.TProtocol) (err error) { - if p.IsSetMessage() { - if err := oprot.WriteFieldBegin("message", thrift.STRING, 1); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:message: ", p), err) - } - if err := oprot.WriteString(string(*p.Message)); err != nil { - return thrift.PrependError(fmt.Sprintf("%T.message (1) field write error: ", p), err) - } - if err := oprot.WriteFieldEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 1:message: ", p), err) - } - } - return err -} - -func (p *TIllegalArgument) String() string { - if p == nil { - return "" - } - return fmt.Sprintf("TIllegalArgument(%+v)", *p) -} - -func (p *TIllegalArgument) Error() string { - return p.String() -} diff --git a/directory/hbase/hpool.go b/directory/hbase/hpool.go deleted file mode 100644 index bf0f807..0000000 --- a/directory/hbase/hpool.go +++ /dev/null @@ -1,42 +0,0 @@ -package hbase - -import ( - "bfs/directory/conf" - "bfs/directory/hbase/hbasethrift" - "git.apache.org/thrift.git/lib/go/thrift" - log "github.com/golang/glog" -) - -var ( - hbasePool *Pool - config *conf.Config -) - -func Init(config *conf.Config) error { - config = config - // init hbase thrift pool - hbasePool = New(func() (c *hbasethrift.THBaseServiceClient, err error) { - var trans thrift.TTransport - if trans, err = thrift.NewTSocketTimeout(config.HBase.Addr, config.HBase.Timeout.Duration); err != nil { - log.Error("thrift.NewTSocketTimeout error(%v)", err) - return - } - trans = thrift.NewTFramedTransport(trans) - c = hbasethrift.NewTHBaseServiceClientFactory(trans, thrift.NewTBinaryProtocolFactoryDefault()) - if err = trans.Open(); err != nil { - log.Error("trans.Open error(%v)", err) - } - return - }, func(c *hbasethrift.THBaseServiceClient) error { - if c != nil && c.Transport != nil { - c.Transport.Close() - } - return nil - }, config.HBase.MaxIdle) - hbasePool.MaxActive = config.HBase.MaxActive - hbasePool.IdleTimeout = config.HBase.LvsTimeout.Duration - return nil -} - -func Close() { -} diff --git a/directory/hbase/needle.go b/directory/hbase/needle.go new file mode 100644 index 0000000..439b844 --- /dev/null +++ b/directory/hbase/needle.go @@ -0,0 +1,141 @@ +package hbase + +import ( + "bfs/libs/errors" + "bfs/libs/gohbase/hrpc" + "bfs/libs/meta" + "bytes" + "context" + "crypto/sha1" + "encoding/binary" + + log "github.com/golang/glog" +) + +var ( + _table = []byte("bfsmeta") + + _familyBasic = "basic" // basic store info column family + _columnVid = "vid" + _columnCookie = "cookie" + _columnUpdateTime = "update_time" +) + +func (c *Client) delNeedle(key int64) (err error) { + var ( + mutate *hrpc.Mutate + ) + if mutate, err = hrpc.NewDel(context.Background(), _table, c.key(key), nil); err != nil { + log.Errorf("Client.delNeedle.NewDel(%v) error:%v", key, err.Error()) + return + } + if _, err = c.c.Delete(mutate); err != nil { + log.Errorf("Client.delNeedle.Delete(%v) error:%v", key, err.Error()) + } + return +} + +func (c *Client) putNeedle(n *meta.Needle) (err error) { + var ( + mutate *hrpc.Mutate + vbuf = make([]byte, 4) + cbuf = make([]byte, 4) + ubuf = make([]byte, 8) + exist bool + ) + if exist, err = c.existNeedle(n.Key); err != nil { + return + } + if exist { + err = errors.ErrNeedleExist + return + } + binary.BigEndian.PutUint32(vbuf, uint32(n.Vid)) + binary.BigEndian.PutUint32(cbuf, uint32(n.Cookie)) + binary.BigEndian.PutUint64(ubuf, uint64(n.MTime)) + values := map[string]map[string][]byte{ + _familyBasic: map[string][]byte{ + _columnVid: vbuf, + _columnCookie: cbuf, + _columnUpdateTime: ubuf, + }, + } + if mutate, err = hrpc.NewPut(context.Background(), _table, c.key(n.Key), values); err != nil { + log.Errorf("Client.putNeedle.NewPut(%v) error:%v", n.Key, err.Error()) + return + } + if _, err = c.c.Put(mutate); err != nil { + log.Errorf("Client.putNeedle.Put(%v) error:%v", n.Key, err.Error()) + } + return +} + +func (c *Client) existNeedle(key int64) (exist bool, err error) { + var ( + getter *hrpc.Get + result *hrpc.Result + ) + if getter, err = hrpc.NewGet(context.Background(), _table, c.key(key)); err != nil { + log.Errorf("Client.existNeedle.NewGet(%v) error:%v", key, err.Error()) + return + } + result, err = c.c.Get(getter) + if err != nil { + log.Errorf("Client.existNeedle.Get(%v) error:%v", key, err.Error()) + return + } + if result == nil || len(result.Cells) == 0 { + return + } + exist = true + return +} + +func (c *Client) getNeedle(key int64) (n *meta.Needle, err error) { + var ( + getter *hrpc.Get + result *hrpc.Result + ) + if getter, err = hrpc.NewGet(context.Background(), _table, c.key(key)); err != nil { + log.Errorf("Client.getNeedle.NewGet(%v) error:%v", key, err.Error()) + return + } + result, err = c.c.Get(getter) + if err != nil { + log.Errorf("Client.getNeedle.Get(%v) error:%v", key, err.Error()) + return + } + if result == nil || len(result.Cells) == 0 { + err = errors.ErrNeedleNotExist + return + } + n = &meta.Needle{ + Key: key, + } + for _, cell := range result.Cells { + if cell == nil { + continue + } + if bytes.Equal(cell.Family, []byte(_familyBasic)) { + if bytes.Equal(cell.Qualifier, []byte(_columnVid)) { + n.Vid = int32(binary.BigEndian.Uint32(cell.Value)) + } else if bytes.Equal(cell.Qualifier, []byte(_columnCookie)) { + n.Cookie = int32(binary.BigEndian.Uint32(cell.Value)) + } else if bytes.Equal(cell.Qualifier, []byte(_columnUpdateTime)) { + n.MTime = int64(binary.BigEndian.Uint64(cell.Value)) + } + } + } + return +} + +func (c *Client) key(key int64) []byte { + var ( + sb [sha1.Size]byte + b []byte + ) + b = make([]byte, 8) + binary.BigEndian.PutUint64(b, uint64(key)) + sb = sha1.Sum(b) + return sb[:] +} diff --git a/directory/hbase/needle_test.go b/directory/hbase/needle_test.go new file mode 100644 index 0000000..bf3eb1c --- /dev/null +++ b/directory/hbase/needle_test.go @@ -0,0 +1,36 @@ +package hbase + +import ( + "bfs/libs/meta" + "testing" + "time" +) + +func TestPutNeedle(t *testing.T) { + c := getClient() + if err := c.putNeedle(&meta.Needle{ + Key: 1234567, + Cookie: 1111111, + Vid: 1, + MTime: time.Now().Unix(), + }); err != nil { + t.Fatalf("err:%v", err.Error()) + } + +} + +func TestGetNeedle(t *testing.T) { + c := getClient() + mn, err := c.getNeedle(1234567) + if err != nil { + t.Fatalf("err:%v", err.Error()) + } + t.Logf("mn:%v", mn) +} + +func TestDelNeedle(t *testing.T) { + c := getClient() + if err := c.delNeedle(1234567); err != nil { + t.Fatalf("err:%v", err.Error()) + } +} diff --git a/directory/hbase/pool.go b/directory/hbase/pool.go deleted file mode 100644 index f415b1e..0000000 --- a/directory/hbase/pool.go +++ /dev/null @@ -1,245 +0,0 @@ -package hbase - -import ( - "bfs/directory/hbase/hbasethrift" - "container/list" - "errors" - "sync" - "time" -) - -var nowFunc = time.Now // for testing - -// ErrPoolExhausted is returned from a pool connection Get method when the -// maximum number of database connections in the pool has been reached. -var ErrPoolExhausted = errors.New("pool: connection pool exhausted") - -// ErrPoolClosed is returned from a pool connection Get method when the -// pool closed. -var ErrPoolClosed = errors.New("pool: get on closed pool") - -// Pool maintains a pool of connections. The application calls the Get method -// to get a connection from the pool and the connection's Close method to -// return the connection's resources to the pool. -// -// The following example shows how to use a pool in a web application. The -// application creates a pool at application startup and makes it available to -// request handlers using a global variable. -// -// func newPool() *pool.Pool { -// return &pool.Pool{ -// MaxIdle: 3, -// IdleTimeout: 240 * time.Second, -// Dial: func () (redis.Conn, error) { -// c, err := redis.Dial("tcp", server) -// if err != nil { -// return nil, err -// } -// return c, err -// }, -// TestOnBorrow: func(c redis.Conn, t time.Time) error { -// _, err := c.Do("PING") -// return err -// }, -// } -// } -// -// var ( -// p *pool.Pool -// ) -// -// func main() { -// flag.Parse() -// p = newPool() -// ... -// } -// -// A request handler gets a connection from the pool and closes the connection -// when the handler is done: -// -// func serveHome(w http.ResponseWriter, r *http.Request) { -// conn, err := p.Get() -// if err != nil { -// defer p.Put(conn) -// } -// .... -// } -// -// thrift exmples. -// p = &pool.Pool{ -// Dail: func() (interface{}, error) { -// sock, err := thrift.NewTSocketTimeout(":1011", 15*time.Second) -// if err != nil { -// return nil, err -// } -// tF := thrift.NewTFramedTransportFactory(thrift.NewTTransportFactory()) -// pF := thrift.NewTBinaryProtocolFactoryDefault() -// client := testRpc.NewRpcServiceClientFactory(tF.GetTransport(sock), pF) -// client.Transport.Open() -// return client, nil -// }, -// Close: func(c interface{}) error { -// return c.(*testRpc.RpcServiceClient).Transport.Close() -// }, -// MaxActive: 2, -// MaxIdle: 3, -// //IdleTimeout: 1 * time.Second, -// -type Pool struct { - - // Dial is an application supplied function for creating new connections. - Dial func() (*hbasethrift.THBaseServiceClient, error) - - // Close is an application supplied functoin for closeing connections. - Close func(c *hbasethrift.THBaseServiceClient) error - - // TestOnBorrow is an optional application supplied function for checking - // the health of an idle connection before the connection is used again by - // the application. Argument t is the time that the connection was returned - // to the pool. If the function returns an error, then the connection is - // closed. - TestOnBorrow func(c *hbasethrift.THBaseServiceClient, t time.Time) error - - // Maximum number of idle connections in the pool. - MaxIdle int - - // Maximum number of connections allocated by the pool at a given time. - // When zero, there is no limit on the number of connections in the pool. - MaxActive int - - // Close connections after remaining idle for this duration. If the value - // is zero, then idle connections are not closed. Applications should set - // the timeout to a value less than the server's timeout. - IdleTimeout time.Duration - - // mu protects fields defined below. - mu sync.Mutex - closed bool - active int - - // Stack of idleConn with most recently used at the front. - idle list.List -} - -type idleConn struct { - c *hbasethrift.THBaseServiceClient - t time.Time -} - -// New creates a new pool. This function is deprecated. Applications should -// initialize the Pool fields directly as shown in example. -func New(dialFn func() (*hbasethrift.THBaseServiceClient, error), closeFn func(c *hbasethrift.THBaseServiceClient) error, maxIdle int) *Pool { - return &Pool{Dial: dialFn, Close: closeFn, MaxIdle: maxIdle} -} - -// Get gets a connection. The application must close the returned connection. -// This method always returns a valid connection so that applications can defer -// error handling to the first use of the connection. -func (p *Pool) Get() (*hbasethrift.THBaseServiceClient, error) { - p.mu.Lock() - // if closed - if p.closed { - p.mu.Unlock() - return nil, ErrPoolClosed - } - // Prune stale connections. - if timeout := p.IdleTimeout; timeout > 0 { - for i, n := 0, p.idle.Len(); i < n; i++ { - e := p.idle.Back() - if e == nil { - break - } - ic := e.Value.(idleConn) - if ic.t.Add(timeout).After(nowFunc()) { - break - } - p.idle.Remove(e) - p.active -= 1 - p.mu.Unlock() - // ic.c.Close() - p.Close(ic.c) - p.mu.Lock() - } - } - // Get idle connection. - for i, n := 0, p.idle.Len(); i < n; i++ { - e := p.idle.Front() - if e == nil { - break - } - ic := e.Value.(idleConn) - p.idle.Remove(e) - test := p.TestOnBorrow - p.mu.Unlock() - if test == nil || test(ic.c, ic.t) == nil { - return ic.c, nil - } - // ic.c.Close() - p.Close(ic.c) - p.mu.Lock() - p.active -= 1 - } - if p.MaxActive > 0 && p.active >= p.MaxActive { - p.mu.Unlock() - return nil, ErrPoolExhausted - } - // No idle connection, create new. - dial := p.Dial - p.active += 1 - p.mu.Unlock() - c, err := dial() - if err != nil { - p.mu.Lock() - p.active -= 1 - p.mu.Unlock() - c = nil - } - return c, err -} - -// Put adds conn back to the pool, use forceClose to close the connection forcely -func (p *Pool) Put(c *hbasethrift.THBaseServiceClient, forceClose bool) error { - if !forceClose { - p.mu.Lock() - if !p.closed { - p.idle.PushFront(idleConn{t: nowFunc(), c: c}) - if p.idle.Len() > p.MaxIdle { - // remove exceed conn - c = p.idle.Remove(p.idle.Back()).(idleConn).c - } else { - c = nil - } - } - p.mu.Unlock() - } - // close exceed conn - if c != nil { - p.mu.Lock() - p.active -= 1 - p.mu.Unlock() - return p.Close(c) - } - return nil -} - -// ActiveCount returns the number of active connections in the pool. -func (p *Pool) ActiveCount() int { - p.mu.Lock() - active := p.active - p.mu.Unlock() - return active -} - -// Relaase releases the resources used by the pool. -func (p *Pool) Release() error { - p.mu.Lock() - idle := p.idle - p.idle.Init() - p.closed = true - p.active -= idle.Len() - p.mu.Unlock() - for e := idle.Front(); e != nil; e = e.Next() { - p.Close(e.Value.(idleConn).c) - } - return nil -} diff --git a/directory/http_api_test.go b/directory/http_api_test.go index b4cbd03..e121cac 100644 --- a/directory/http_api_test.go +++ b/directory/http_api_test.go @@ -8,32 +8,37 @@ import ( "net/http" "testing" "time" + + "bfs/directory/conf" + dzk "bfs/directory/zk" + "bfs/libs/meta" ) func TestHTTPAPI(t *testing.T) { var ( err error - config *Config - zk *Zookeeper + config *conf.Config + zk *dzk.Zookeeper d *Directory key int64 cookie int32 body []byte url string resp *http.Response - res Response + res meta.Response buf = &bytes.Buffer{} ) - if config, err = NewConfig("./directory.conf"); err != nil { + if config, err = conf.NewConfig("./directory.toml"); err != nil { t.Errorf("NewConfig() error(%v)", err) t.FailNow() } - if zk, err = NewZookeeper([]string{"123.56.108.22:2181"}, time.Second*15, "/rack", "/volume", "/group"); err != nil { + if zk, err = dzk.NewZookeeper(config); err != nil { t.Errorf("NewZookeeper() error(%v)", err) t.FailNow() } - if d, err = NewDirectory(config, zk); err != nil { + defer zk.Close() + if d, err = NewDirectory(config); err != nil { t.Errorf("NewDirectory() error(%v)", err) t.FailNow() } @@ -58,7 +63,7 @@ func TestHTTPAPI(t *testing.T) { t.Errorf("json.Unmarshal error(%v)", err) t.FailNow() } - key = res.Keys[0] + key = res.Key cookie = res.Cookie fmt.Println("put vid:", res.Vid) buf.Reset() diff --git a/directory/main.go b/directory/main.go index 6305bb5..a916c5e 100644 --- a/directory/main.go +++ b/directory/main.go @@ -3,8 +3,9 @@ package main import ( "bfs/directory/conf" "flag" - log "github.com/golang/glog" "runtime" + + log "github.com/golang/glog" ) var ( diff --git a/directory/snowflake/gosnowflake.go b/directory/snowflake/gosnowflake.go index 6c8465a..f4f3a44 100644 --- a/directory/snowflake/gosnowflake.go +++ b/directory/snowflake/gosnowflake.go @@ -17,11 +17,8 @@ package snowflake import ( - log "code.google.com/p/log4go" "encoding/json" "errors" - myrpc "github.com/Terry-Mao/gosnowflake/rpc" - "github.com/samuel/go-zookeeper/zk" "math/rand" "net/rpc" "path" @@ -29,6 +26,10 @@ import ( "strconv" "sync" "time" + + myrpc "github.com/Terry-Mao/gosnowflake/rpc" + log "github.com/golang/glog" + "github.com/samuel/go-zookeeper/zk" ) const ( @@ -174,7 +175,7 @@ func (c *Client) client() (*rpc.Client, error) { // watchWorkerId watch the zk node change. func (c *Client) watchWorkerId(workerId int64, workerIdStr string) { workerIdPath := path.Join(zkPath, workerIdStr) - log.Debug("workerIdPath: %s", workerIdPath) + log.Info("workerIdPath: %s", workerIdPath) for { rpcs, _, watch, err := zkConn.ChildrenW(workerIdPath) if err != nil { diff --git a/libs/gohbase/.travis.yml b/libs/gohbase/.travis.yml new file mode 100644 index 0000000..3b004be --- /dev/null +++ b/libs/gohbase/.travis.yml @@ -0,0 +1,12 @@ +language: go +go: + - 1.5.3 +before_install: + - go get golang.org/x/tools/cmd/cover github.com/golang/lint/golint +install: + - go get ./... +after_success: + - make coverdata + - bash <(curl -s https://codecov.io/bash) +script: + - make -j4 check GOTEST_FLAGS=-v diff --git a/libs/gohbase/AUTHORS b/libs/gohbase/AUTHORS new file mode 100644 index 0000000..4eca180 --- /dev/null +++ b/libs/gohbase/AUTHORS @@ -0,0 +1,29 @@ +The GoHBase Authors +------------------- + +GoHBase ("gohbase") was originally written by Benoit Sigoure. + +All contributors are required to sign a "Contributor License Agreement" at + http://opentsdb.net/contributing.html + +The following organizations and people have contributed at least 0.5% of the +current code of GoHBase. +(Please keep both lists sorted alphabetically.) + + + + + +Benoit Sigoure + + + +This list can be obtained at any time with the following script: + +find src test -type f \ +| while read i; do \ + git blame -t $i 2>/dev/null; \ + done \ +| sed 's/^[0-9a-f]\{8\} [^(]*(\([^)]*\) [-+0-9 ]\{14,\}).*/\1/;s/ *$//' \ +| awk '{a[$0]++; t++} END{for(n in a) if (a[n]*100.0/t > 0.5) print n}' \ +| sort diff --git a/libs/gohbase/COPYING b/libs/gohbase/COPYING new file mode 100644 index 0000000..f433b1a --- /dev/null +++ b/libs/gohbase/COPYING @@ -0,0 +1,177 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS diff --git a/libs/gohbase/Makefile b/libs/gohbase/Makefile new file mode 100644 index 0000000..26e411c --- /dev/null +++ b/libs/gohbase/Makefile @@ -0,0 +1,55 @@ +# Copyright (C) 2015 The GoHBase Authors. All rights reserved. +# This file is part of GoHBase. +# Use of this source code is governed by the Apache License 2.0 +# that can be found in the COPYING file. + +GO := go +TEST_TIMEOUT := 30s +INTEGRATION_TIMEOUT := 120s +GOTEST_FLAGS := + +DEFAULT_GOPATH := $${GOPATH%%:*} +GOPATH_BIN := $(DEFAULT_GOPATH)/bin +GOLINT := $(GOPATH_BIN)/golint + +all: install + +install: + $(GO) install ./... + +check: vet test fmtcheck lint + +COVER_PKGS := `find ./* -name '*_test.go' | xargs -I{} dirname {} | sort -u` +COVER_MODE := count +coverdata: + echo 'mode: $(COVER_MODE)' >coverage.out + for dir in $(COVER_PKGS); do \ + $(GO) test -covermode=$(COVER_MODE) -coverprofile=cov.out-t $$dir || exit; \ + tail -n +2 cov.out-t >> coverage.out && \ + rm cov.out-t; \ + done; + +coverage: coverdata + $(GO) tool cover -html=coverage.out + rm -f coverage.out + +fmtcheck: + errors=`gofmt -l .`; if test -n "$$errors"; then echo Check these files for style errors:; echo "$$errors"; exit 1; fi + find . -name '*.go' ! -path "./pb/*" -exec ./check_line_len.awk {} + + +vet: + $(GO) vet ./... + +lint: + find ./* -type d ! -name pb | xargs -L 1 $(GOLINT) &>lint; : + if test -s lint; then echo Check these packages for golint:; cat lint; rm lint; exit 1; else rm lint; fi +# The above is ugly, but unfortunately golint doesn't exit 1 when it finds +# lint. See https://github.com/golang/lint/issues/65 + +test: + $(GO) test $(GOTEST_FLAGS) -race -timeout=$(TEST_TIMEOUT) ./... + +integration: + $(GO) test $(GOTEST_FLAGS) -race -timeout=$(INTEGRATION_TIMEOUT) -v integration_test.go + +.PHONY: all check coverage coverdata fmtcheck install integration lint test vet diff --git a/libs/gohbase/README.md b/libs/gohbase/README.md new file mode 100644 index 0000000..c00273c --- /dev/null +++ b/libs/gohbase/README.md @@ -0,0 +1,71 @@ +# Golang HBase client [![Build Status](https://travis-ci.org/tsuna/gohbase.svg?branch=master)](https://travis-ci.org/tsuna/gohbase) [![codecov.io](http://codecov.io/github/tsuna/gohbase/coverage.svg?branch=master)](http://codecov.io/github/tsuna/gohbase?branch=master) [![GoDoc](https://godoc.org/github.com/tsuna/gohbase?status.png)](https://godoc.org/github.com/tsuna/gohbase) + +This is a pure-[Go](http://golang.org/) client for [HBase](http://hbase.org). + +Current status: prototype. + +## Supported Versions + +HBase >= 1.0 + +## Installation + + go get github.com/tsuna/gohbase + +## Example Usage + +#### Create a client +```go +client := gohbase.NewClient("localhost") +``` +#### Insert a cell +```go +// Values maps a ColumnFamily -> Qualifiers -> Values. +values := map[string]map[string][]byte{"cf": map[string][]byte{"a": []byte{0}}} +putRequest, err := hrpc.NewPutStr(context.Background(), "table", "key", values) +rsp, err := client.Put(putRequest) +``` + +#### Get an entire row +```go +getRequest, err := hrpc.NewGetStr(context.Background(), "table", "row") +getRsp, err := client.Get(getRequest) +``` + +#### Get a specific cell +```go +// Perform a get for the cell with key "15", column family "cf" and qualifier "a" +family := map[string][]string{"cf": []string{"a"}} +getRequest, err := hrpc.NewGetStr(context.Background(), "table", "15", + hrpc.Families(family)) +getRsp, err := client.Get(getRequest) +``` + +#### Get a specific cell with a filter +```go +pFilter := filter.NewKeyOnlyFilter(true) +family := map[string][]string{"cf": []string{"a"}} +getRequest, err := hrpc.NewGetStr(context.Background(), "table", "15", + hrpc.Families(family), hrpc.Filters(pFilter)) +getRsp, err := client.Get(getRequest) +``` + +#### Scan with a filter +```go +pFilter := filter.NewPrefixFilter([]byte("7")) +scanRequest, err := hrpc.NewScanStr(context.Background(), "table", + hrpc.Filters(pFilter)) +scanRsp, err := client.Scan(scanRequest) +``` + +## Contributing + +Any help would be appreciated. Please use +[GerritHub](https://review.gerrithub.io/#/admin/projects/tsuna/gohbase) to +send changes for review, instead of GitHub pull requests. Please sign the +[Contributor License Agreement](https://docs.google.com/spreadsheet/viewform?formkey=dFNiOFROLXJBbFBmMkQtb1hNMWhUUnc6MQ) +when you send your first change for review. + +## License + +Copyright © 2015 The GoHBase Authors. All rights reserved. Use of this source code is governed by the Apache License 2.0 that can be found in the [COPYING](COPYING) file. diff --git a/libs/gohbase/b_test.go b/libs/gohbase/b_test.go new file mode 100644 index 0000000..d7b2922 --- /dev/null +++ b/libs/gohbase/b_test.go @@ -0,0 +1,443 @@ +package gohbase_test + +import ( + "context" + "encoding/binary" + "fmt" + "sync" + "testing" + "time" + + "bfs/libs/gohbase" + "bfs/libs/gohbase/conf" + "bfs/libs/gohbase/filter" + "bfs/libs/gohbase/hrpc" + + log "github.com/golang/glog" +) + +func getStopRow(s []byte) []byte { + res := make([]byte, len(s)+20) + copy(res, s) + return res +} + +func TestScan4Split(t *testing.T) { + c := gohbase.NewClient(conf.NewConf([]string{"172.16.33.45:2181"}, "", "", "", 30*time.Second, 0, 0, 0)) + //c := gohbase.NewClient(conf.NewCo + // nf([]string{"172.16.13.94:2181"}, "", "", "", 30*time.Second, 0, 0, 0)) + scan, err := hrpc.NewScan(context.Background(), []byte("fuckclient")) + if err != nil { + t.Fatalf("Failed to create Scan request: %s", err) + } + //scan.SetLimit(1) + rsp, err := c.Scan(scan) + if err != nil { + t.Logf("Scan returned an error: %v", err) + } + for i, rspOne := range rsp { + if i%10000 == 0 { + for _, cell := range rspOne.Cells { + t.Log(string(cell.Row), string(cell.Family), string(cell.Qualifier), string(cell.Value)) + } + } + } + + time.Sleep(5 * time.Second) + + scan, err = hrpc.NewScan(context.Background(), []byte("fuckclient")) + if err != nil { + t.Fatalf("Failed to create Scan request: %s", err) + } + //scan.SetLimit(1) + rsp, err = c.Scan(scan) + if err != nil { + t.Errorf("Scan returned an error: %v", err) + } + for i, rspOne := range rsp { + if i%10000 == 0 { + for _, cell := range rspOne.Cells { + t.Log(string(cell.Row), string(cell.Family), string(cell.Qualifier), string(cell.Value)) + } + } + } +} + +func TestScanPrefix(t *testing.T) { + c := gohbase.NewClient(conf.NewConf([]string{"172.16.33.45:2181"}, "", "", "", 30*time.Second, 0, 0, 0)) + //c := gohbase.NewClient(conf.NewConf([]string{"172.16.13.94:2181"}, "", "", "", 30*time.Second, 0, 0, 0)) + scan, err := hrpc.NewScan(context.Background(), []byte("fuckclient")) + if err != nil { + t.Fatalf("Failed to create Scan request: %s", err) + } + ft := filter.NewPrefixFilter([]byte("row_0")) + scan.SetFilter(ft) + //scan.SetLimit(1) + rsp, err := c.Scan(scan) + if err != nil { + t.Errorf("Scan returned an error: %v", err) + } + for _, rspOne := range rsp { + for _, cell := range rspOne.Cells { + t.Log(string(cell.Row), string(cell.Family), string(cell.Qualifier), string(cell.Value)) + } + } +} + +func TestDel(t *testing.T) { + c := gohbase.NewClient(conf.NewConf([]string{"172.16.33.45:2181"}, "", "", "", 30*time.Second, 0, 0, 0)) + //c := gohbase.NewClient(conf.NewConf([]string{"172.16.13.94:2181"}, "", "", "", 30*time.Second, 0, 0, 0)) + var ( + err error + put *hrpc.Mutate + del *hrpc.Mutate + get *hrpc.Get + rsp *hrpc.Result + ) + + if put, err = hrpc.NewPutStr(context.Background(), "fuckclient", "haha", map[string]map[string][]byte{ + "v": map[string][]byte{ + "c1": []byte("v1"), + "c2": []byte("v2"), + "c3": []byte("v3"), + }, + }); err != nil { + t.Fatalf("Failed to create put request: %s", err) + } + if rsp, err = c.Put(put); err != nil { + t.Errorf("put returned an error: %v", err) + } + + if get, err = hrpc.NewGetStr(context.Background(), "fuckclient", "haha"); err != nil { + t.Fatalf("Failed to create get request: %s", err) + } + if rsp, err = c.Get(get); err != nil { + t.Errorf("get returned an error: %v", err) + } else { + resMap := map[string]string{} + for _, cell := range rsp.Cells { + resMap[fmt.Sprintf("%s:%s", string(cell.Family), string(cell.Qualifier))] = string(cell.Value) + } + if resMap["v:c1"] != "v1" || resMap["v:c2"] != "v2" || resMap["v:c3"] != "v3" { + t.Errorf("get does not return just-put-item") + } + } + + if del, err = hrpc.NewDelStr(context.Background(), "fuckclient", "haha", map[string]map[string][]byte{ + "v": map[string][]byte{ + "c1": nil, + }, + }); err != nil { + t.Fatalf("Failed to create Del request: %s", err) + } + if rsp, err = c.Delete(del); err != nil { + t.Errorf("del returned an error: %v", err) + } + if get, err = hrpc.NewGetStr(context.Background(), "fuckclient", "haha"); err != nil { + t.Fatalf("Failed to create get request: %s", err) + } + if rsp, err = c.Get(get); err != nil { + t.Errorf("get returned an error: %v", err) + } else { + resMap := map[string]string{} + for _, cell := range rsp.Cells { + if string(cell.Family) == "v" { + resMap[string(cell.Qualifier)] = string(cell.Value) + } + } + if resMap["c1"] != "" || resMap["c2"] == "" || resMap["c3"] == "" { + t.Errorf("get returned uncorrect value just after del v:c1: %v", resMap) + } + } + + if del, err = hrpc.NewDelStr(context.Background(), "fuckclient", "haha", map[string]map[string][]byte{ + "v": nil, + }); err != nil { + t.Fatalf("Failed to create Del request: %s", err) + } + if rsp, err = c.Delete(del); err != nil { + t.Errorf("del returned an error: %v", err) + } + if get, err = hrpc.NewGetStr(context.Background(), "fuckclient", "haha"); err != nil { + t.Fatalf("Failed to create get request: %s", err) + } + if rsp, err = c.Get(get); err != nil { + t.Errorf("get returned an error: %v", err) + } else { + resMap := map[string]string{} + for _, cell := range rsp.Cells { + if string(cell.Family) == "v" { + resMap[string(cell.Qualifier)] = string(cell.Value) + } + } + if len(resMap) != 0 { + t.Errorf("get returned cf value just after del cf: %v", resMap) + } + } +} + +func TestGetTimeRange(t *testing.T) { + c := gohbase.NewClient(conf.NewConf([]string{"172.16.33.45:2181"}, "", "", "", 30*time.Second, 0, 0, 0)) + //c := gohbase.NewClient(conf.NewConf([]string{"172.16.13.94:2181"}, "", "", "", 30*time.Second, 0, 0, 0)) + get, err := hrpc.NewGetStr(context.Background(), "test_tbl1", "row1") + if err != nil { + t.Fatalf("Failed to create get request: %s", err) + } + get.SetTimeRange(hrpc.TimeRange{1469179951934, 1469680333054}) // [). only return latest version which are in this range + rsp, err := c.Get(get) + if err != nil { + t.Errorf("get returned an error: %v", err) + } + for _, cell := range rsp.Cells { + t.Log(string(cell.Row), string(cell.Family), string(cell.Qualifier), string(cell.Value)) + } +} + +func TestScanTimeRange(t *testing.T) { + c := gohbase.NewClient(conf.NewConf([]string{"172.16.33.45:2181"}, "", "", "", 30*time.Second, 0, 0, 0)) + //c := gohbase.NewClient(conf.NewConf([]string{"172.16.13.94:2181"}, "", "", "", 30*time.Second, 0, 0, 0)) + scan, err := hrpc.NewScanRangeStr(context.Background(), "test_tbl1", "row1", "") + if err != nil { + t.Fatalf("Failed to create Scan request: %s", err) + } + ft := filter.NewPrefixFilter([]byte("row1")) + scan.SetFilter(ft) + scan.SetTimeRange(hrpc.TimeRange{1469179951934, 1469680333054}) // [). only return latest version which are in this range + rsp, err := c.Scan(scan) + if err != nil { + t.Errorf("Scan returned an error: %v", err) + } + for _, rspOne := range rsp { + for _, cell := range rspOne.Cells { + t.Log(string(cell.Row), string(cell.Family), string(cell.Qualifier), string(cell.Value)) + } + } +} + +func TestScanPrefix1(t *testing.T) { + c := gohbase.NewClient(conf.NewConf([]string{"172.16.33.45:2181"}, "", "", "", 30*time.Second, 0, 0, 0)) + //c := gohbase.NewClient(conf.NewConf([]string{"172.16.13.94:2181"}, "", "", "", 30*time.Second, 0, 0, 0)) + scan, err := hrpc.NewScanRangeStr(context.Background(), "fuckclient", "14771787", "") + if err != nil { + t.Fatalf("Failed to create Scan request: %s", err) + } + ft := filter.NewPrefixFilter([]byte("14771787")) + scan.SetFilter(ft) + rsp, err := c.Scan(scan) + if err != nil { + t.Errorf("Scan returned an error: %v", err) + } + for _, rspOne := range rsp { + for _, cell := range rspOne.Cells { + t.Log(string(cell.Row), string(cell.Family), string(cell.Qualifier), string(cell.Value)) + } + } +} + +func TestMGet(t *testing.T) { + c := gohbase.NewClient(conf.NewConf([]string{"172.16.33.45:2181"}, "", "", "", 30*time.Second, 0, 0, 0)) + scan, err := hrpc.NewScan(context.Background(), []byte("fuckclient")) + if err != nil { + t.Fatalf("Failed to create Scan request: %s", err) + } + ft := filter.NewMultiRowRangeFilter([]*filter.RowRange{ + filter.NewRowRange([]byte("row_001"), getStopRow([]byte("row_001")), true, true), + filter.NewRowRange([]byte("row_003"), getStopRow([]byte("row_003")), true, true), + filter.NewRowRange([]byte("row_007"), getStopRow([]byte("row_007")), true, true), + }) + scan.SetFilter(ft) + rsp, err := c.Scan(scan) + t.Log("len of rsps is %d", len(rsp)) + if err != nil { + t.Errorf("Scan returned an error: %v", err) + } + for _, rspOne := range rsp { + for _, cell := range rspOne.Cells { + t.Log(string(cell.Row), string(cell.Family), string(cell.Qualifier), string(cell.Value)) + } + } + /* + b_test.go:30: len of rsps is %d 3 + b_test.go:36: row_001 v c1 v1 + b_test.go:36: row_003 v c1 v1 + b_test.go:36: row_007 v c1 v1 + PASS + ok golang/gohbase 0.138s + */ +} + +func TestConGet(t *testing.T) { + c := gohbase.NewClient(conf.NewConf([]string{"172.16.33.45:2181"}, "", "", "", 30*time.Second, 0, 0, 0)) + for i := 0; i < 10; i++ { + var end = 100 + ids := make([]int, end) + for i := 0; i < end; i++ { + ids[i] = i + 1 + } + gets := make([]hrpc.Call, len(ids)) + for i, id := range ids { + rowKey := fmt.Sprintf("row_%03d", id) + get, err := hrpc.NewGetStr(context.Background(), "fuckclient", rowKey) + if err != nil { + t.Fatalf("Failed to create get request: %s", err) + } + gets[i] = get + } + ctx, _ := context.WithTimeout(context.Background(), 3000*time.Millisecond) + st := time.Now() + ress := c.Go(&hrpc.Calls{ + Calls: gets, + Ctx: ctx, + }) + var cnt int + for _, res := range ress { + if res.Err != nil { + t.Errorf("meet error %v", res.Err) + } + rRes := res.Result + cnt += 1 + for _, cell := range rRes.Cells { + _ = cell + //t.Log(time.Now(), string(cell.Row), string(cell.Family), string(cell.Qualifier), string(cell.Value)) + } + } + et := time.Now() + t.Log(fmt.Sprintf("start time: %v, end time: %v, ok %d, cost: %d", st, et, cnt, et.UnixNano()-st.UnixNano())) + } +} + +func TestConGet1(t *testing.T) { + c := gohbase.NewClient(conf.NewConf([]string{"172.16.33.45:2181"}, "", "", "", 30*time.Second, 0, 0, 0)) + ids := []int64{121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 200, 201, 202, 203, 204, 205, 206, 207, 208, 209, 210, 211, 212, 213, 214, 215, 216, 217, 218, 219, 220, 221, 222, 223, 224, 225, 226, 227, 228, 229, 230, 231, 232, 233, 234, 235, 236, 237, 238, 239, 240, 241, 242, 243, 244, 245, 246, 247, 248} + type aa struct { + ids []int64 + c int + } + aas := []aa{ + aa{ + ids, + 0, + }, + aa{ + ids[:100], + 1, + }, + aa{ + ids[:77], + 2, + }, + aa{ + ids, + len(ids[:1]) / 2, + }, + aa{ + ids[:2], + (len(ids) - 1) / 2, + }, + aa{ + ids, + (len(ids) + 1) / 2, + }, + } + for _, aai := range aas { + ids := aai.ids + gets := make([]hrpc.Call, len(ids)) + for i, id := range ids { + rowKeyBS := make([]byte, 8) + binary.LittleEndian.PutUint64(rowKeyBS, uint64(id)) + get, err := hrpc.NewGet(context.Background(), []byte("fuckclient"), rowKeyBS) + if err != nil { + t.Fatalf("Failed to create get request: %s", err) + } + gets[i] = get + } + ctx, _ := context.WithTimeout(context.Background(), 3000*time.Millisecond) + st := time.Now() + ress := c.Go(&hrpc.Calls{ + Calls: gets, + Ctx: ctx, + }) + var cnt int + for _, res := range ress { + if res.Err != nil { + t.Errorf("meet error %v", res.Err) + } + rRes := res.Result + cnt += 1 + for _, cell := range rRes.Cells { + _ = cell + //t.Log(time.Now(), string(cell.Row), string(cell.Family), string(cell.Qualifier), string(cell.Value)) + } + } + et := time.Now() + t.Log(fmt.Sprintf("start time: %v, end time: %v, c %d, total %d, ok %d, cost: %d", st, et, aai.c, len(ids), cnt, et.UnixNano()-st.UnixNano())) + } +} + +func TestBenchmark(t *testing.T) { + c := gohbase.NewClient(conf.NewConf([]string{"172.16.33.45:2181"}, "", "", "", 30*time.Second, 0, 0, 0)) + keys := []string{ + "row_001", "row_003", "row_007", + } + concurrency := 300 + per := 300 + wg := &sync.WaitGroup{} + wg.Add(concurrency) + allMax := make([]int64, concurrency) + allAvg := make([]int64, concurrency) + for i := 0; i < concurrency; i++ { + go func(i int) { + gets := make([]*hrpc.Get, per*len(keys)) + for j := 0; j < per; j++ { + for k, key := range keys { + get, err := hrpc.NewGetStr(context.Background(), "fuckclient", key) + if err != nil { + log.Error("NewGetStr error for key %s, err is %v", key, err) + continue + } + gets[j*len(keys)+k] = get + } + } + totalTime := int64(0) + maxTime := int64(0) + + //time.Sleep(15 * time.Second) + for _, get := range gets { + st := time.Now() + _, err := c.Get(get) + if err != nil { + log.Error("get meet error, err is %v", err) + continue + } + et := time.Now() + cost := (et.UnixNano() - st.UnixNano()) / int64(time.Millisecond) + if cost > maxTime { + maxTime = int64(cost) + } + totalTime += int64(cost) + //for _, cell := range res.Cells { + // log.Info("worker%2d value: %s-%s-%s-%s, st: %v, et: %v, cost: %d", + // i, string(cell.Row), string(cell.Family), string(cell.Qualifier), string(cell.Value), st, et, (et.Nanosecond() - st.Nanosecond()) / 1000000) + //} + } + avg := totalTime / int64(len(gets)) + log.Info("%v worker%2d, count %d, total %d, max %d, avg %d", time.Now(), i, len(gets), totalTime, maxTime, avg) + allMax[i] = maxTime + allAvg[i] = avg + wg.Done() + }(i) + } + wg.Wait() + var allAvgSum, maxAllMax, avgAllAvg int64 + for _, m := range allAvg { + allAvgSum += m + } + avgAllAvg = allAvgSum / int64(concurrency) + for _, m := range allMax { + if m > maxAllMax { + maxAllMax = m + } + } + log.Info("max of allMax is %d, avg of allAvg is %d", maxAllMax, avgAllAvg) + time.Sleep(200 * time.Millisecond) +} diff --git a/libs/gohbase/c_test.go b/libs/gohbase/c_test.go new file mode 100644 index 0000000..2682e80 --- /dev/null +++ b/libs/gohbase/c_test.go @@ -0,0 +1,77 @@ +package gohbase + +import ( + "context" + "testing" + "time" + + "bfs/libs/gohbase/conf" + "bfs/libs/gohbase/hrpc" + + log "github.com/golang/glog" +) + +func getStopRow(s []byte) []byte { + res := make([]byte, len(s)+20) + copy(res, s) + return res +} + +func TestMGet1(t *testing.T) { + c := newClient(standardClient, conf.NewConf([]string{"172.16.33.45:2181"}, "", "", "", 30*time.Second, 0, 0, 0)) + keys := []string{ + "row_001", "row_003", "row_007", + } + gets := make([]*hrpc.Get, len(keys)) + for i, key := range keys { + get, err := hrpc.NewGetStr(context.Background(), "fuckclient", key) + if err != nil { + log.Error("NewGetStr error for key %s, err is %v", key, err) + continue + } + gets[i] = get + } + //time.Sleep(15 * time.Second) + for _, get := range gets { + st := time.Now() + res, err := c.Get(get) + if err != nil { + log.Error("get meet error, err is %v", err) + continue + } + et := time.Now() + for _, cell := range res.Cells { + log.Info("%s-%s-%s-%s, st: %v, et: %v, cost: %d", string(cell.Row), string(cell.Family), string(cell.Qualifier), string(cell.Value), st, et, (et.Nanosecond()-st.Nanosecond())/1000000) + } + time.Sleep(3 * time.Second) + } + + c.clearAllRegions() + log.Info("%v: clearAllRegions", time.Now()) + time.Sleep(3 * time.Second) + //time.Sleep(30 * time.Second) + log.Info("%v: do second scan", time.Now()) + + gets = make([]*hrpc.Get, len(keys)) + for i, key := range keys { + get, err := hrpc.NewGetStr(context.Background(), "fuckclient", key) + if err != nil { + log.Error("NewGetStr error for key %s, err is %v", key, err) + continue + } + gets[i] = get + } + for _, get := range gets { + st := time.Now() + res, err := c.Get(get) + if err != nil { + log.Error("get meet error, err is %v", err) + continue + } + et := time.Now() + for _, cell := range res.Cells { + log.Info("%s-%s-%s-%s, st: %v, et: %v, cost: %d", string(cell.Row), string(cell.Family), string(cell.Qualifier), string(cell.Value), st, et, (et.Nanosecond()-st.Nanosecond())/1000000) + } + } + time.Sleep(1 * time.Second) +} diff --git a/libs/gohbase/check_line_len.awk b/libs/gohbase/check_line_len.awk new file mode 100644 index 0000000..9e4cf59 --- /dev/null +++ b/libs/gohbase/check_line_len.awk @@ -0,0 +1,22 @@ +#!/usr/bin/awk -f + +BEGIN { + max = 100; +} + +# Expand tabs to 4 spaces. +{ + gsub(/\t/, " "); +} + +length() > max { + errors++; + print FILENAME ":" FNR ": Line too long (" length() "/" max ")"; +} + +END { + if (errors >= 125) { + errors = 125; + } + exit errors; +} diff --git a/libs/gohbase/client.go b/libs/gohbase/client.go new file mode 100644 index 0000000..5055a73 --- /dev/null +++ b/libs/gohbase/client.go @@ -0,0 +1,1290 @@ +// Copyright (C) 2015 The GoHBase Authors. All rights reserved. +// This file is part of GoHBase. +// Use of this source code is governed by the Apache License 2.0 +// that can be found in the COPYING file. + +package gohbase + +import ( + "bytes" + "context" + "encoding/binary" + "errors" + "fmt" + "io" + "strconv" + "sync" + "time" + + "bfs/libs/gohbase/conf" + "bfs/libs/gohbase/hrpc" + "bfs/libs/gohbase/pb" + "bfs/libs/gohbase/region" + "bfs/libs/gohbase/regioninfo" + "bfs/libs/gohbase/zk" + + "github.com/cznic/b" + log "github.com/golang/glog" + "github.com/golang/protobuf/proto" +) + +// Constants +var ( + // Name of the meta region. + metaTableName = []byte("hbase:meta") + + infoFamily = map[string][]string{ + "info": nil, + } + + // + NoMServer = errors.New("no meta/master") + + // ErrDeadline is returned when the deadline of a request has been exceeded + ErrDeadline = errors.New("deadline exceeded") + + // TableNotFound is returned when attempting to access a table that + // doesn't exist on this cluster. + TableNotFound = errors.New("table not found") + + // Default timeouts + + // How long to wait for a region lookup (either meta lookup or finding + // meta in ZooKeeper). Should be greater than or equal to the ZooKeeper + // session timeout. + regionLookupTimeout = 30 * time.Second + + backoffStart = 16 * time.Millisecond +) + +const ( + standardClient = iota + adminClient +) + +const DefaultBatchCalls = 2 + +type Option func(*client) + +type newRegResult struct { + Client *region.Client + Err error +} + +type CallResult struct { + Result *hrpc.Result + Err error +} + +// region -> client cache. +type regionClientCache struct { + m sync.Mutex + + regionClientMap map[*regioninfo.Info]*region.Client + + // Used to quickly look up all the regioninfos that map to a specific client + clientRegionsMap map[*region.Client][]*regioninfo.Info +} + +func (rcc *regionClientCache) get(r *regioninfo.Info) *region.Client { + rcc.m.Lock() + c := rcc.regionClientMap[r] + rcc.m.Unlock() + return c +} + +func (rcc *regionClientCache) put(r *regioninfo.Info, c *region.Client) { + rcc.m.Lock() + rcc.regionClientMap[r] = c + lst := rcc.clientRegionsMap[c] + var exist bool + for _, ri := range lst { + if ri == r { + // same one + exist = true + break + } + } + if !exist { + rcc.clientRegionsMap[c] = append(lst, r) + } + rcc.m.Unlock() +} + +func (rcc *regionClientCache) del(r *regioninfo.Info) { + rcc.m.Lock() + c := rcc.regionClientMap[r] + + if c != nil { + // c can be nil if the regioninfo is not in the cache + // e.g. it's already been deleted. + delete(rcc.regionClientMap, r) + + var index int + for i, reg := range rcc.clientRegionsMap[c] { + if reg == r { + index = i + } + } + rcc.clientRegionsMap[c] = append( + rcc.clientRegionsMap[c][:index], + rcc.clientRegionsMap[c][index+1:]...) + } + rcc.m.Unlock() +} + +func (rcc *regionClientCache) regionClientDown(reg *regioninfo.Info) []*regioninfo.Info { + var c *region.Client + rcc.m.Lock() + c = rcc.regionClientMap[reg] + // left for rcc.clientDown to release the lock + return rcc.clientDown(c, true) +} + +func (rcc *regionClientCache) clientDown(c *region.Client, havingLock bool) []*regioninfo.Info { + if !havingLock { + rcc.m.Lock() + } + var downRegions []*regioninfo.Info + for _, sharedReg := range rcc.clientRegionsMap[c] { + succ := sharedReg.MarkUnavailable() + delete(rcc.regionClientMap, sharedReg) + if succ { + downRegions = append(downRegions, sharedReg) + } + } + delete(rcc.clientRegionsMap, c) + rcc.m.Unlock() + return downRegions +} + +// for test +func (rcc *regionClientCache) allClientDown() { + rcc.m.Lock() + for _, c := range rcc.regionClientMap { + var downregions []*regioninfo.Info + for _, sharedReg := range rcc.clientRegionsMap[c] { + succ := sharedReg.MarkUnavailable() + delete(rcc.regionClientMap, sharedReg) + if succ { + downregions = append(downregions, sharedReg) + } + } + delete(rcc.clientRegionsMap, c) + } + rcc.m.Unlock() +} + +func (rcc *regionClientCache) checkForClient(host string, port uint16) *region.Client { + rcc.m.Lock() + for client := range rcc.clientRegionsMap { + if client.Host() == host && client.Port() == port { + rcc.m.Unlock() + return client + } + } + rcc.m.Unlock() + return nil +} + +// key -> region cache. +type keyRegionCache struct { + m sync.Mutex + + // Maps a []byte of a region start key to a *regioninfo.Info + regions *b.Tree +} + +func (krc *keyRegionCache) get(key []byte) ([]byte, *regioninfo.Info) { + // When seeking - "The Enumerator's position is possibly after the last item in the tree" + // http://godoc.org/github.com/cznic/b#Tree.Set + krc.m.Lock() + enum, ok := krc.regions.Seek(key) + k, v, err := enum.Prev() + if err == io.EOF && krc.regions.Len() > 0 { + // We're past the end of the tree. Return the last element instead. + // (Without this code we always get a cache miss and create a new client for each req.) + k, v = krc.regions.Last() + err = nil + } else if !ok { + k, v, err = enum.Prev() + } + // TODO: It would be nice if we could do just enum.Get() to avoid the + // unnecessary cost of seeking to the next entry. + krc.m.Unlock() + if err != nil { + return nil, nil + } + return k.([]byte), v.(*regioninfo.Info) +} + +func (krc *keyRegionCache) put(key []byte, reg *regioninfo.Info) *regioninfo.Info { + krc.m.Lock() + // As split case if not that frequent(at least compare to put), we prefer the lazy way - + // remove and update out-of-region when meet error + // Author: We need to remove all the entries that are overlap with the range + // of the new region being added here, if any. + oldV, _ := krc.regions.Put(key, func(interface{}, bool) (interface{}, bool) { + return reg, true + }) + krc.m.Unlock() + if oldV == nil { + return nil + } + return oldV.(*regioninfo.Info) +} + +func (krc *keyRegionCache) del(key []byte) bool { + krc.m.Lock() + success := krc.regions.Delete(key) + krc.m.Unlock() + return success +} + +// A Client provides access to an HBase cluster. +type client struct { + clientType int + + zkquorum []string + + regions keyRegionCache + + // used when: + // 1. mark region as unavailable + // 2. get region from cache + regionsLock sync.Mutex + + // Maps a *regioninfo.Info to the *region.Client that we think currently + // serves it. + clients regionClientCache + + metaRegionInfo *regioninfo.Info + metaClient *region.Client + + adminRegionInfo *regioninfo.Info + adminClient *region.Client + + // The maximum size of the RPC queue in the region client + rpcQueueSize int + + // The timeout before flushing the RPC queue in the region client + flushInterval time.Duration + + // The timeout used when dial to region server. 0 means no-timeout. + dialTimeout time.Duration + + zkClient *zk.ZKClient +} + +func (c *client) Close() error { + c.regionsLock.Lock() + defer c.regionsLock.Unlock() + if c.metaClient != nil { + c.metaClient.Close() + } + if c.adminClient != nil { + c.adminClient.Close() + } + for _, rc := range c.clients.regionClientMap { + if rc != nil { + rc.Close() + } + } + return nil +} + +func (c *client) SetServer(resourceType int, ms *zk.ServerInfo) { + log.Info("SetServer for type (%d) to (%v)", resourceType, ms) + switch resourceType { + case zk.ResourceTypeMaster: + mc := c.adminClient + c.adminClient = nil + go mc.Close() + case zk.ResourceTypeMeta: + mc := c.metaClient + c.metaClient = nil + go mc.Close() + default: + log.Infof("unrecognized resourceType: %d", resourceType) + } +} + +// Client a regular HBase client +type Client interface { + CheckTable(ctx context.Context, table string) error + Scan(s *hrpc.Scan) ([]*hrpc.Result, error) + Get(g *hrpc.Get) (*hrpc.Result, error) + Put(p *hrpc.Mutate) (*hrpc.Result, error) + Delete(d *hrpc.Mutate) (*hrpc.Result, error) + Append(a *hrpc.Mutate) (*hrpc.Result, error) + Increment(i *hrpc.Mutate) (int64, error) + // Calls can only used for general call which means call.CallType().GeneralCall() == true + // notice that the total cost-time is proportional to len(gets) as sequential-execution + Calls(cs []hrpc.Call) []CallResult + // ConCalls will do the cs.Calls concurrently with given concurrency. + // 0 concurrency will result an auto-concurrency: len(cs.Calls) / DefaultBatchCalls, at least 1 + // -1 concurrency is not supported + Go(cs *hrpc.Calls) []CallResult + Close() error +} + +// AdminClient to perform admistrative operations with HMaster +type AdminClient interface { + CreateTable(t *hrpc.CreateTable) (*hrpc.Result, error) + DeleteTable(t *hrpc.DeleteTable) (*hrpc.Result, error) + EnableTable(t *hrpc.EnableTable) (*hrpc.Result, error) + DisableTable(t *hrpc.DisableTable) (*hrpc.Result, error) +} + +// NewClient creates a new HBase client. +// master or meta being empty string means use default zk-path +func NewClient(c *conf.Conf, options ...Option) Client { + return newClient(standardClient, c, options...) +} + +// NewAdminClient creates an admin HBase client. +// master or meta being empty string means use default zk-path +func NewAdminClient(c *conf.Conf, options ...Option) AdminClient { + return newClient(adminClient, c, options...) +} + +func newClient(clientType int, c *conf.Conf, options ...Option) *client { + log.Infof("Creating new client. Host: %v", c.Zkquorum) + if c.FlushInterval <= 0 && c.RpcQueueSize < 0 { + log.Errorf("flushInterval (%d) <= 0 and queueSize (%d) < 0", int64(c.FlushInterval), c.RpcQueueSize) + return nil + } + cl := &client{ + clientType: clientType, + regions: keyRegionCache{regions: b.TreeNew(regioninfo.CompareGeneric)}, + clients: regionClientCache{ + regionClientMap: make(map[*regioninfo.Info]*region.Client), + clientRegionsMap: make(map[*region.Client][]*regioninfo.Info), + }, + zkquorum: c.Zkquorum, + //rpcQueueSize: 100, + rpcQueueSize: c.RpcQueueSize, + //flushInterval: 5 * time.Millisecond, //XXX allow for configuring + flushInterval: c.FlushInterval, //XXX allow for configuring + dialTimeout: c.DialTimeout, + metaRegionInfo: ®ioninfo.Info{ + Table: []byte("hbase:meta"), + RegionName: []byte("hbase:meta,,1"), + StopKey: []byte{}, + }, + adminRegionInfo: ®ioninfo.Info{}, + } + for _, option := range options { + option(cl) + } + var ( + useMaster, useMeta bool + resourceType int + serverWatcher zk.ServerWatcher + ) + if clientType == adminClient { + useMaster, useMeta = true, false + resourceType = zk.ResourceTypeMaster + } else { + useMaster, useMeta = false, true + resourceType = zk.ResourceTypeMeta + } + zkClient, err := zk.NewZKClient(c.Zkquorum, c.ZkRoot, c.Master, c.Meta, useMaster, useMeta, c.ZkTimeout) + if err != nil { + log.Errorf("zk.NewZKClient(%v, %s, %s) failed, err is (%v)", c.Zkquorum, c.Master, c.Meta, err) + return nil + } + zkClient.WatchServer(resourceType, serverWatcher) + cl.zkClient = zkClient + return cl +} + +// RpcQueueSize will return an option that will set the size of the RPC queues +// used in a given client +func RpcQueueSize(size int) Option { + return func(c *client) { + c.rpcQueueSize = size + } +} + +// FlushInterval will return an option that will set the timeout for flushing +// the RPC queues used in a given client +func FlushInterval(interval time.Duration) Option { + return func(c *client) { + c.flushInterval = interval + } +} + +// CheckTable returns an error if the given table name doesn't exist. +func (c *client) CheckTable(ctx context.Context, table string) error { + getStr, err := hrpc.NewGetStr(ctx, table, "theKey") + if err == nil { + _, err = c.SendRPC(getStr) + } + return err +} + +// call can only used for general call which means call.CallType().GeneralCall() == true +func (c *client) call(ca hrpc.Call) (*hrpc.Result, error) { + pbmsg, err := c.sendRPC(ca) + if err != nil { + return nil, err + } + ct := ca.CallType() + var result *pb.Result + switch { + case ct == hrpc.CallTypeGet: + r, ok := pbmsg.(*pb.GetResponse) + if !ok { + return nil, fmt.Errorf("sendRPC returned not a GetResponse") + } + result = r.Result + case ct.IsMutate(): + r, ok := pbmsg.(*pb.MutateResponse) + if !ok { + return nil, fmt.Errorf("sendRPC returned not a MutateResponse") + } + result = r.Result + } + return hrpc.ToLocalResult(result), nil +} + +// for test case +func (c *client) clearAllRegions() { + oldMetaClient := c.metaClient + c.metaClient = nil // 不能 markUnAvailable + go oldMetaClient.Close() + c.regionsLock.Lock() + regions := c.regions + regions.m.Lock() + regions.regions.Clear() + regions.m.Unlock() + c.regionsLock.Unlock() +} + +// Scan retrieves the values specified in families from the given range. +func (c *client) Scan(s *hrpc.Scan) ([]*hrpc.Result, error) { + var ( + results []*pb.Result + scanres *pb.ScanResponse + rpc *hrpc.Scan + err error + res proto.Message + ) + ctx := s.GetContext() + table := s.Table() + families := s.GetFamilies() + filters := s.GetFilter() + startRow := s.GetStartRow() + stopRow := s.GetStopRow() + limit := s.Limit() + for { + // Make a new Scan RPC for this region + if rpc != nil { + // If it's not the first region, we want to start at whatever the + // last region's StopKey was + startRow = rpc.GetRegionStop() + } + + rpc, err = hrpc.NewScanRange(ctx, table, startRow, stopRow, + hrpc.Families(families), hrpc.Filters(filters)) + if err != nil { + return nil, err + } + + res, err = c.sendRPC(rpc) + if err != nil { + return nil, err + } + scanres = res.(*pb.ScanResponse) + results = append(results, scanres.Results...) + + // TODO: The more_results field of the ScanResponse object was always + // true, so we should figure out if there's a better way to know when + // to move on to the next region than making an extra request and + // seeing if there were no results + enough := false + for len(scanres.Results) != 0 { + rpc = hrpc.NewScanFromID(ctx, table, *scanres.ScannerId, rpc.Key()) + + res, err = c.sendRPC(rpc) + if err != nil { + return nil, err + } + scanres = res.(*pb.ScanResponse) + results = append(results, scanres.Results...) + if limit > 0 && len(results) >= limit { + enough = true + break + } + } + //if scanres != nil && len(scanres.Results) != 0 { + // // means scan is not finished (but has satisfied the requirement) + // rpc = hrpc.NewCloseFromID(ctx, table, *scanres.ScannerId, rpc.Key()) + // res, err = c.sendRPC(rpc) + // // new version hbase will close scanner after iterating and thus close rpc may return "UnknownScannerException" error + // // thus here we do not check err + // // if err != nil { + // // return nil, err + // // } + //} + // but for some hbase versions, it seems that scanner should be closed manually. WTF + rpc = hrpc.NewCloseFromID(ctx, table, *scanres.ScannerId, rpc.Key()) + res, _ = c.sendRPC(rpc) + if enough { + break + } + + // Check to see if this region is the last we should scan (either + // because (1) it's the last region or (3) because its stop_key is + // greater than or equal to the stop_key of this scanner provided + // that (2) we're not trying to scan until the end of the table). + // (1) + if len(rpc.GetRegionStop()) == 0 || + // (2) (3) + len(stopRow) != 0 && bytes.Compare(stopRow, rpc.GetRegionStop()) <= 0 { + break + } + } + if limit > 0 && len(results) > limit { + results = results[:limit] + } + // Do we want to be returning a slice of Result objects or should we just + // put all the Cells into the same Result object? + localResults := make([]*hrpc.Result, len(results)) + for idx, result := range results { + localResults[idx] = hrpc.ToLocalResult(result) + } + return localResults, nil +} + +func (c *client) Get(g *hrpc.Get) (*hrpc.Result, error) { + return c.call(g) +} + +func (c *client) Calls(cs []hrpc.Call) (res []CallResult) { + callsNum := len(cs) + if callsNum == 0 { + return + } + res = make([]CallResult, callsNum) + for i, ca := range cs { + var ( + callResult *hrpc.Result + err error + ) + if ca.CallType().GeneralCall() { + callResult, err = c.call(ca) + } else { + err = hrpc.NotGeneralCallErr + } + res[i] = CallResult{ + Result: callResult, + Err: err, + } + } + return +} + +func (c *client) Go(cs *hrpc.Calls) (res []CallResult) { + type chanElem struct { + res CallResult + offset int + } + callsNum := len(cs.Calls) + var perNum int + if callsNum == 0 { + return + } + concurrency := 1 + (callsNum-1)/DefaultBatchCalls + perNum = DefaultBatchCalls + resChan := make(chan chanElem, callsNum) + res = make([]CallResult, callsNum) + fetcher := func(start int, calls []hrpc.Call, ch chan<- chanElem) { + for i, curCall := range calls { + var ( + callResult *hrpc.Result + err error + ) + if curCall.CallType().GeneralCall() { + callResult, err = c.call(curCall) + } else { + err = hrpc.NotGeneralCallErr + } + ch <- chanElem{ + offset: start + i, + res: CallResult{ + Result: callResult, + Err: err, + }, + } + } + } + start := 0 + for i := 0; i < concurrency-1; i, start = i+1, start+perNum { + go fetcher(start, cs.Calls[start:start+perNum], resChan) + } + go fetcher(start, cs.Calls[start:], resChan) + for i := 0; i < callsNum; i++ { + select { + case curRes := <-resChan: + res[curRes.offset] = curRes.res + case <-cs.Ctx.Done(): + log.Infof("timeout when do Go for (%d) calls", callsNum) + return + } + } + return +} + +func (c *client) Put(p *hrpc.Mutate) (*hrpc.Result, error) { + return c.mutate(p) +} + +func (c *client) Delete(d *hrpc.Mutate) (*hrpc.Result, error) { + return c.mutate(d) +} + +func (c *client) Append(a *hrpc.Mutate) (*hrpc.Result, error) { + return c.mutate(a) +} + +func (c *client) Increment(i *hrpc.Mutate) (int64, error) { + r, err := c.mutate(i) + if err != nil { + return 0, err + } + + if len(r.Cells) != 1 { + return 0, fmt.Errorf("Increment returned %d cells, but we expected exactly one.", + len(r.Cells)) + } + + val := binary.BigEndian.Uint64(r.Cells[0].Value) + return int64(val), nil +} + +func (c *client) mutate(m *hrpc.Mutate) (*hrpc.Result, error) { + return c.call(m) +} + +func (c *client) CreateTable(t *hrpc.CreateTable) (*hrpc.Result, error) { + pbmsg, err := c.sendRPC(t) + if err != nil { + return nil, err + } + + _, ok := pbmsg.(*pb.CreateTableResponse) + if !ok { + return nil, fmt.Errorf("sendRPC returned not a CreateTableResponse") + } + + return &hrpc.Result{}, nil +} + +func (c *client) DeleteTable(t *hrpc.DeleteTable) (*hrpc.Result, error) { + pbmsg, err := c.sendRPC(t) + if err != nil { + return nil, err + } + + _, ok := pbmsg.(*pb.DeleteTableResponse) + if !ok { + return nil, fmt.Errorf("sendRPC returned not a DeleteTableResponse") + } + + return &hrpc.Result{}, nil +} + +func (c *client) EnableTable(t *hrpc.EnableTable) (*hrpc.Result, error) { + pbmsg, err := c.sendRPC(t) + if err != nil { + return nil, err + } + + _, ok := pbmsg.(*pb.EnableTableResponse) + if !ok { + return nil, fmt.Errorf("sendRPC returned not a EnableTableResponse") + } + + return &hrpc.Result{}, nil +} + +func (c *client) DisableTable(t *hrpc.DisableTable) (*hrpc.Result, error) { + pbmsg, err := c.sendRPC(t) + if err != nil { + return nil, err + } + + _, ok := pbmsg.(*pb.DisableTableResponse) + if !ok { + return nil, fmt.Errorf("sendRPC returned not a DisableTableResponse") + } + + return &hrpc.Result{}, nil +} + +// Could be removed in favour of above +func (c *client) SendRPC(rpc hrpc.Call) (*hrpc.Result, error) { + pbmsg, err := c.sendRPC(rpc) + + var rsp *hrpc.Result + switch r := pbmsg.(type) { + case *pb.GetResponse: + rsp = hrpc.ToLocalResult(r.Result) + case *pb.MutateResponse: + rsp = hrpc.ToLocalResult(r.Result) + } + + return rsp, err +} + +func (c *client) sendRPC(rpc hrpc.Call) (proto.Message, error) { + // Check the cache for a region that can handle this request + reg := c.getRegionFromCache(rpc.Table(), rpc.Key()) + if reg != nil { + //log.Info("found reg: %v for %s-%s", reg, string(rpc.Table()), string(rpc.Key())) + return c.sendRPCToRegion(rpc, reg) + } else { + return c.findRegionAndSendRPC(rpc) + } +} + +func (c *client) sendRPCToRegion(rpc hrpc.Call, reg *regioninfo.Info) (proto.Message, error) { + // On the first sendRPC to the meta or admin regions, a goroutine must be + // manually kicked off for the meta or admin region client + if c.adminClient == nil && reg == c.adminRegionInfo && !c.adminRegionInfo.IsUnavailable() || + c.metaClient == nil && reg == c.metaRegionInfo && !c.metaRegionInfo.IsUnavailable() { + c.regionsLock.Lock() + if reg == c.metaRegionInfo && !c.metaRegionInfo.IsUnavailable() || + reg == c.adminRegionInfo && !c.adminRegionInfo.IsUnavailable() { + log.Info("reestablish region (%v)", reg) + reg.MarkUnavailable() + go c.reestablishRegion(reg) + } + c.regionsLock.Unlock() + } + // The region was in the cache, check + // if the region is marked as available + if reg.IsUnavailable() { + return c.waitOnRegion(rpc, reg) + } + + rpc.SetRegion(reg) + + // Queue the RPC to be sent to the region + client := c.clientFor(reg) + var err error + if client == nil { + err = errors.New("no client for this region") + } else { + err = client.QueueRPC(rpc) + } + + if err != nil { + // if the err is UnrecoverableErr + if _, ok := err.(region.UnrecoverableError); ok { + // If it was an unrecoverable error, the region client is + // considered dead. + log.Errorf("met UnrecoverableError (%v) when access region (%v)", err, reg) + if reg == c.metaRegionInfo || reg == c.adminRegionInfo { + // If this is the admin client or the meta table, mark the + // region as unavailable and start up a goroutine to + // reconnect if it wasn't already marked as such. + first := reg.MarkUnavailable() + if first { + go c.reestablishRegion(reg) + } + } else { + // Else this is a normal region. Mark all the regions + // sharing this region's client as unavailable, and start + // a goroutine to reconnect for each of them. + downRegions := c.clients.regionClientDown(reg) + for _, downReg := range downRegions { + go c.reestablishRegion(downReg) + } + } + } else { + log.Errorf("met error (%v) when clientFor reg or queueRPC, try to reestablish region (%v)", err, reg) + // There was an error queueing the RPC. + // Mark the region as unavailable. + first := reg.MarkUnavailable() + // If this was the first goroutine to mark the region as + // unavailable, start a goroutine to reestablish a connection + if first { + go c.reestablishRegion(reg) + } + } + + // Block until the region becomes available. + return c.waitOnRegion(rpc, reg) + } + + // Wait for the response + var res hrpc.RPCResult + select { + case res = <-rpc.GetResultChan(): + case <-rpc.GetContext().Done(): + return nil, ErrDeadline + } + + // Check for errors + if _, ok := res.Error.(region.RetryableError); ok { + // There's an error specific to this region, but + // our region client is fine. Mark this region as + // unavailable (as opposed to all regions sharing + // the client), and start a goroutine to reestablish + // it. + log.Errorf("met RetryableError (%v) when access region (%v)", res.Error, reg) + first := reg.MarkUnavailable() + if first { + go c.reestablishRegion(reg) + } + if reg != c.metaRegionInfo && reg != c.adminRegionInfo { + // The client won't be in the cache if this is the + // meta or admin region + // but the reg in regions is left for establishRegion method to del + c.clients.del(reg) + } + return c.waitOnRegion(rpc, reg) + } else if _, ok := res.Error.(region.UnrecoverableError); ok { + // If it was an unrecoverable error, the region client is + // considered dead. + log.Errorf("met UnrecoverableError (%v) when access region (%v)", res.Error, reg) + if reg == c.metaRegionInfo || reg == c.adminRegionInfo { + // If this is the admin client or the meta table, mark the + // region as unavailable and start up a goroutine to + // reconnect if it wasn't already marked as such. + first := reg.MarkUnavailable() + if first { + go c.reestablishRegion(reg) + } + } else { + // Else this is a normal region. Mark all the regions + // sharing this region's client as unavailable, and start + // a goroutine to reconnect for each of them. + downRegions := c.clients.regionClientDown(reg) + for _, downReg := range downRegions { + go c.reestablishRegion(downReg) + } + } + + // Fall through to the case of the region being unavailable, + // which will result in blocking until it's available again. + return c.waitOnRegion(rpc, reg) + } else { + // RPC was successfully sent, or an unknown type of error + // occurred. In either case, return the results. + return res.Msg, res.Error + } +} + +// actually not wait on region but wait on region client after sendRPC +func (c *client) waitOnRegion(rpc hrpc.Call, reg *regioninfo.Info) (proto.Message, error) { + ch := reg.GetAvailabilityChan() + if ch == nil { + // WTF, this region is available? Maybe it was marked as such + // since waitOnRegion was called. + return c.sendRPC(rpc) + } + // The region is unavailable. Wait for it to become available, + // or for the deadline to be exceeded. + select { + case <-ch: + return c.sendRPC(rpc) // rather than sendRPCToRegion as perhaps we should use another region + case <-rpc.GetContext().Done(): + return nil, ErrDeadline + } +} + +// The region was not in the cache, it must be looked up in the meta table +func (c *client) findRegionAndSendRPC(rpc hrpc.Call) (proto.Message, error) { + + backoff := backoffStart + ctx := rpc.GetContext() + for { + // Look up the region in the meta table + reg, host, port, err := c.locateRegion(ctx, rpc.Table(), rpc.Key()) + + if err != nil { + if err == TableNotFound { + return nil, err + } + // There was an error with the meta table. Let's sleep for some + // backoff amount and retry. + backoff, err = sleepAndIncreaseBackoff(ctx, backoff) + if err != nil { + return nil, err + } + continue + } + + // Check that the region wasn't added to + // the cache while we were looking it up. + // If not add it to cache. + c.regionsLock.Lock() + + if existing := c.getRegionFromCache(rpc.Table(), rpc.Key()); existing != nil { + // The region was added to the cache while we were looking it + // up. Send the RPC to the region that was in the cache. + c.regionsLock.Unlock() + return c.sendRPCToRegion(rpc, existing) + } + + log.Infof("region cache miss for (%q) (%q)", rpc.Table(), rpc.Key()) + + // The region wasn't added to the cache while we were looking it + // up. Mark this one as unavailable and add it to the cache. + reg.MarkUnavailable() + c.regions.put(reg.RegionName, reg) // NOTE not all NEW regions are added here as we may find a region from cache + // and then it turns out to be invalid and replaced by another new one which it is fetched from establish-locate + + c.regionsLock.Unlock() + + // Start a goroutine to connect to the region + go c.establishRegion(reg, host, port) + + // Wait for the new region to become + // available, and then send the RPC + return c.waitOnRegion(rpc, reg) + } +} + +// Searches in the regions cache for the region hosting the given row. +func (c *client) getRegionFromCache(table, key []byte) *regioninfo.Info { + if c.clientType == adminClient { + return c.adminRegionInfo + } else if bytes.Equal(table, metaTableName) { + return c.metaRegionInfo + } + regionName := createRegionSearchKey(table, key) + regionKey, region := c.regions.get(regionName) + if region == nil || !isCacheKeyForTable(table, regionKey) { + return nil + } + + if len(region.StopKey) != 0 && + // If the stop key is an empty byte array, it means this region is the + // last region for this table and this key ought to be in that region. + bytes.Compare(key, region.StopKey) >= 0 { + return nil + } + + return region +} + +// Checks whether or not the given cache key is for the given table. +func isCacheKeyForTable(table, cacheKey []byte) bool { + // Check we found an entry that's really for the requested table. + for i := 0; i < len(table); i++ { + if table[i] != cacheKey[i] { + // This table isn't in the map, we found + return false // a key which is for another table. + } + } + + // Make sure we didn't find another key that's for another table + // whose name is a prefix of the table name we were given. + return cacheKey[len(table)] == ',' +} + +// Creates the META key to search for in order to locate the given key. +func createRegionSearchKey(table, key []byte) []byte { + metaKey := make([]byte, 0, len(table)+len(key)+3) + metaKey = append(metaKey, table...) + metaKey = append(metaKey, ',') + metaKey = append(metaKey, key...) + metaKey = append(metaKey, ',') + // ':' is the first byte greater than '9'. We always want to find the + // entry with the greatest timestamp, so by looking right before ':' + // we'll find it. + metaKey = append(metaKey, ':') + return metaKey +} + +// Returns the client currently known to hose the given region, or NULL. +func (c *client) clientFor(region *regioninfo.Info) *region.Client { + if c.clientType == adminClient { + return c.adminClient + } + if region == c.metaRegionInfo { + return c.metaClient + } + return c.clients.get(region) +} + +// Locates the region in which the given row key for the given table is. +// all NEW REGIONs are "made" here +func (c *client) locateRegion(ctx context.Context, + table, key []byte) (*regioninfo.Info, string, uint16, error) { + + log.Infof("locate region for table (%q), key (%q)", table, key) // for test + + metaKey := createRegionSearchKey(table, key) + rpc, err := hrpc.NewGetBefore(ctx, metaTableName, metaKey, hrpc.Families(infoFamily)) + if err != nil { + return nil, "", 0, err + } + rpc.SetRegion(c.metaRegionInfo) + resp, err := c.sendRPC(rpc) + + if err != nil { + ch := c.metaRegionInfo.GetAvailabilityChan() + if ch != nil { + select { + case <-ch: + return c.locateRegion(ctx, table, key) + case <-rpc.GetContext().Done(): + return nil, "", 0, ErrDeadline + } + } else { + return nil, "", 0, err + } + } + + metaRow := resp.(*pb.GetResponse) + if metaRow.Result == nil { + return nil, "", 0, TableNotFound + } + + reg, host, port, err := c.parseMetaTableResponse(metaRow) + if err != nil { + log.Errorf("get meta for metaKey (%s) met error(%v)", metaKey, err) + return nil, "", 0, err + } + if !bytes.Equal(table, reg.Table) { + // This would indicate a bug in HBase. + return nil, "", 0, fmt.Errorf("WTF: Meta returned an entry for the wrong table!"+ + " Looked up table=%q key=%q got region=%s", table, key, reg) + } else if len(reg.StopKey) != 0 && + bytes.Compare(key, reg.StopKey) >= 0 { + // This would indicate a hole in the meta table. + return nil, "", 0, fmt.Errorf("WTF: Meta returned an entry for the wrong region!"+ + " Looked up table=%q key=%q got region=%s", table, key, reg) + } + log.Infof("locate region for table (%q), key (%q), found: (%v), (%s), (%d)", table, key, reg, host, port) // for test + return reg, host, port, nil +} + +// parseMetaTableResponse parses the contents of a row from the meta table. +// It's guaranteed to return a region info and a host/port OR return an error. +func (c *client) parseMetaTableResponse(metaRow *pb.GetResponse) ( + *regioninfo.Info, string, uint16, error) { + + var reg *regioninfo.Info + var host string + var port uint16 + + for _, cell := range metaRow.Result.Cell { + switch string(cell.Qualifier) { + case "regioninfo": + var err error + reg, err = regioninfo.InfoFromCell(cell) + if err != nil { + return nil, "", 0, err + } + case "server": + value := cell.Value + if len(value) == 0 { + continue // Empty during NSRE. + } + colon := bytes.IndexByte(value, ':') + if colon < 1 { + // Colon can't be at the beginning. + return nil, "", 0, + fmt.Errorf("broken meta: no colon found in info:server %q", cell) + } + host = string(value[:colon]) + portU64, err := strconv.ParseUint(string(value[colon+1:]), 10, 16) + if err != nil { + return nil, "", 0, err + } + port = uint16(portU64) + default: + // Other kinds of qualifiers: ignore them. + // TODO: If this is the parent of a split region, there are two other + // KVs that could be useful: `info:splitA' and `info:splitB'. + // Need to investigate whether we can use those as a hint to update our + // regions_cache with the daughter regions of the split. + } + } + + if reg == nil { + // There was no regioninfo in the row in meta, this is really not + // expected. + err := fmt.Errorf("Meta seems to be broken, there was no regioninfo in %s", + metaRow) + log.Error(err.Error()) + return nil, "", 0, err + } else if port == 0 { + // Either both `host' and `port' are set, or both aren't. + return nil, "", 0, fmt.Errorf("Meta doesn't have a server location in %s", + metaRow) + } + + return reg, host, port, nil +} + +func (c *client) reestablishRegion(reg *regioninfo.Info) { + c.establishRegion(reg, "", 0) +} + +// ensure the region is valid (bind to a regionclient) +// when used to establish, host/port will be valid; and when used to reestablish, host/port will be zero/empty +func (c *client) establishRegion(originalReg *regioninfo.Info, host string, port uint16) { + log.Infof("establishRegion(%v, %s, %d)", originalReg, host, port) + originalReg.Park4Establish() + var err error + reg := originalReg + backoff := backoffStart + + for { + ctx, _ := context.WithTimeout(context.Background(), regionLookupTimeout) + if port != 0 && err == nil { + reg.DupExtInfo(originalReg) + // If this isn't the admin or meta region, check if a client + // for this host/port already exists + if c.clientType != adminClient && reg != c.metaRegionInfo { + client := c.clients.checkForClient(host, port) + if client != nil && client.GetSendErr() == nil { + // There's already a client, add it to the + // cache and mark the new region as available. + c.clients.put(reg, client) + c.regions.put(reg.RegionName, reg) + if !reg.Equals(originalReg) { + // new region + log.Info("originalReg: (%v), reg: (%v), update to c.regions", originalReg, reg) + c.regions.del(originalReg.RegionName) + c.clients.del(originalReg) + } + originalReg.MarkAvailable() + return + } + } + // Make this channel buffered so that if we time out we don't + // block the newRegion goroutine forever. + ch := make(chan newRegResult, 1) + var clientType region.ClientType + if c.clientType == standardClient { + clientType = region.RegionClient + } else { + clientType = region.MasterClient + } + log.Info("newRegionClient(..., clientType(%v), host(%s), port(%d), rpcQueueSize(%d), flushInterval(%v), dialTimeout(%v))", clientType, host, port, c.rpcQueueSize, c.flushInterval, c.dialTimeout) + go newRegionClient(ctx, ch, clientType, host, port, c.rpcQueueSize, c.flushInterval, c.dialTimeout) + + select { + case res := <-ch: + if res.Err == nil { + if c.clientType == adminClient { + c.adminClient = res.Client + } else if reg == c.metaRegionInfo { + c.metaClient = res.Client + } else { + c.clients.put(reg, res.Client) + c.regions.put(reg.RegionName, reg) + if !reg.Equals(originalReg) { + // Here `reg' is guaranteed to be available, so we + // must publish the region->client mapping first, + // because as soon as we add it to the key->region + // mapping here, concurrent readers are gonna want + // to find the client. + log.Info("originalReg: (%v), reg: (%v), update to c.regions", originalReg, reg) + c.regions.del(originalReg.RegionName) + c.clients.del(originalReg) + } + } + originalReg.MarkAvailable() + return + } else { + err = res.Err + } + case <-ctx.Done(): + log.Infof("region lookup timeout for clientType(%d), host(%d), port(%d)", clientType, host, port) + err = ErrDeadline + } + } + if err != nil { + log.Errorf("met error (%v)", err) + if err == TableNotFound { + c.regions.del(originalReg.RegionName) + c.clients.del(originalReg) + originalReg.MarkAvailable() + return + } + // This will be hit if either there was an error locating the + // region, or the region was located but there was an error + // connecting to it. + backoff, err = sleepAndIncreaseBackoff(ctx, backoff) + if err != nil { + continue + } + } + if c.clientType == adminClient { + host, port, err = c.zkLookup(ctx, zk.ResourceTypeMaster) + } else if reg == c.metaRegionInfo { + host, port, err = c.zkLookup(ctx, zk.ResourceTypeMeta) + } else { + reg, host, port, err = c.locateRegion(ctx, originalReg.Table, originalReg.StartKey) + } + } +} + +func sleepAndIncreaseBackoff(ctx context.Context, backoff time.Duration) (time.Duration, error) { + select { + case <-time.After(backoff): + case <-ctx.Done(): + return 0, ErrDeadline + } + // TODO: Revisit how we back off here. + if backoff < 5000*time.Millisecond { + return backoff * 2, nil + } else { + return backoff + 5000*time.Millisecond, nil + } +} + +func newRegionClient(ctx context.Context, ret chan newRegResult, clientType region.ClientType, + host string, port uint16, queueSize int, queueTimeout, dialTimeout time.Duration) { + c, e := region.NewClient(host, port, clientType, queueSize, queueTimeout, dialTimeout) + select { + case ret <- newRegResult{c, e}: + // Hooray! + case <-ctx.Done(): + // We timed out, too bad, nobody expects this client anymore, ditch it. + c.Close() + } +} + +// zkResult contains the result of a ZooKeeper lookup (when we're looking for +// the meta region or the HMaster). +type zkResult struct { + host string + port uint16 + err error +} + +// Asynchronously looks up the meta region or HMaster in ZooKeeper. +func (c *client) zkLookup(ctx context.Context, resourceType int) (string, uint16, error) { + // We make this a buffered channel so that if we stop waiting due to a + // timeout, we won't block the zkLookupSync() that we start in a + // separate goroutine. + reschan := make(chan *zk.ServerInfo, 1) + go c.zkLookupSync(resourceType, reschan) + select { + case res := <-reschan: + if res == nil { + return "", 0, NoMServer + } else { + return res.Host, res.Port, nil + } + case <-ctx.Done(): + return "", 0, ErrDeadline + } +} + +// Synchronously looks up the meta region or HMaster in ZooKeeper. +func (c *client) zkLookupSync(resourceType int, reschan chan<- *zk.ServerInfo) { + // This is guaranteed to never block as the channel is always buffered. + reschan <- c.zkClient.LocateResource(resourceType) +} diff --git a/libs/gohbase/conf/conf.go b/libs/gohbase/conf/conf.go new file mode 100644 index 0000000..c425389 --- /dev/null +++ b/libs/gohbase/conf/conf.go @@ -0,0 +1,34 @@ +package conf + +import "time" + +// Conf RpcQueueSize <= 0 && FlushInterval <= 0 is not allowed +type Conf struct { + ZkRoot string + Zkquorum []string + Master, Meta string + RpcQueueSize int + ZkTimeout, FlushInterval, DialTimeout time.Duration +} + +func NewConf(zkquorum []string, zkRoot, master, meta string, zkTimeout time.Duration, rpcQueueSize int, flushInterval, dialTimeout time.Duration) (res *Conf) { + // set default value + if rpcQueueSize <= 0 && flushInterval <= 0 { + rpcQueueSize = 1 + } + if dialTimeout == 0 { + dialTimeout = 10 * time.Second + } + res = &Conf{ + ZkRoot: zkRoot, + Zkquorum: zkquorum, + Master: master, + Meta: meta, + ZkTimeout: zkTimeout, + RpcQueueSize: rpcQueueSize, + FlushInterval: flushInterval, + DialTimeout: dialTimeout, + } + + return +} diff --git a/libs/gohbase/discovery_test.go b/libs/gohbase/discovery_test.go new file mode 100644 index 0000000..b763538 --- /dev/null +++ b/libs/gohbase/discovery_test.go @@ -0,0 +1,86 @@ +// Copyright (C) 2015 The GoHBase Authors. All rights reserved. +// This file is part of GoHBase. +// Use of this source code is governed by the Apache License 2.0 +// that can be found in the COPYING file. + +package gohbase + +import ( + "bytes" + "testing" + + "bfs/libs/gohbase/conf" + "bfs/libs/gohbase/pb" + "bfs/libs/gohbase/regioninfo" + "time" +) + +func TestRegionDiscovery(t *testing.T) { + + client := newClient(standardClient, conf.NewConf([]string{"~invalid.quorum"}, "", "", "", 30*time.Second, 0, 0, 0)) + + reg := client.getRegionFromCache([]byte("test"), []byte("theKey")) + if reg != nil { + t.Errorf("Found region %#v even though the cache was empty?!", reg) + } + + // Inject a "test" table with a single region that covers the entire key + // space (both the start and stop keys are empty). + family := []byte("info") + metaRow := &pb.GetResponse{ + Result: &pb.Result{Cell: []*pb.Cell{ + &pb.Cell{ + Row: []byte("test,,1234567890042.56f833d5569a27c7a43fbf547b4924a4."), + Family: family, + Qualifier: []byte("regioninfo"), + Value: []byte("PBUF\b\xc4\xcd\xe9\x99\xe0)\x12\x0f\n\adefault\x12\x04test" + + "\x1a\x00\"\x00(\x000\x008\x00"), + }, + &pb.Cell{ + Row: []byte("test,,1234567890042.56f833d5569a27c7a43fbf547b4924a4."), + Family: family, + Qualifier: []byte("seqnumDuringOpen"), + Value: []byte("\x00\x00\x00\x00\x00\x00\x00\x02"), + }, + &pb.Cell{ + Row: []byte("test,,1234567890042.56f833d5569a27c7a43fbf547b4924a4."), + Family: family, + Qualifier: []byte("server"), + Value: []byte("localhost:50966"), + }, + &pb.Cell{ + Row: []byte("test,,1234567890042.56f833d5569a27c7a43fbf547b4924a4."), + Family: family, + Qualifier: []byte("serverstartcode"), + Value: []byte("\x00\x00\x01N\x02\x92R\xb1"), + }, + }}} + + reg, _, _, err := client.parseMetaTableResponse(metaRow) + if err != nil { + t.Fatalf("Failed to discover region: %s", err) + } + client.regions.put(reg.RegionName, reg) + + reg = client.getRegionFromCache([]byte("test"), []byte("theKey")) + if reg == nil { + t.Fatal("Region not found even though we injected it in the cache.") + } + expected := ®ioninfo.Info{ + Table: []byte("test"), + RegionName: []byte("test,,1234567890042.56f833d5569a27c7a43fbf547b4924a4."), + StartKey: []byte(""), + StopKey: []byte(""), + } + if !bytes.Equal(reg.Table, expected.Table) || + !bytes.Equal(reg.RegionName, expected.RegionName) || + !bytes.Equal(reg.StartKey, expected.StartKey) || + !bytes.Equal(reg.StopKey, expected.StopKey) { + t.Errorf("Found region %#v \nbut expected %#v", reg, expected) + } + + reg = client.getRegionFromCache([]byte("notfound"), []byte("theKey")) + if reg != nil { + t.Errorf("Found region %#v even though this table doesn't exist", reg) + } +} diff --git a/libs/gohbase/filter/comparator.go b/libs/gohbase/filter/comparator.go new file mode 100644 index 0000000..5846831 --- /dev/null +++ b/libs/gohbase/filter/comparator.go @@ -0,0 +1,229 @@ +// Copyright (C) 2015 The GoHBase Authors. All rights reserved. +// This file is part of GoHBase. +// Use of this source code is governed by the Apache License 2.0 +// that can be found in the COPYING file. + +package filter + +import ( + "errors" + + "bfs/libs/gohbase/pb" + + "github.com/golang/protobuf/proto" +) + +const comparatorPath = "org.apache.hadoop.hbase.filter." + +// BitComparatorBitwiseOp is TODO +type BitComparatorBitwiseOp int32 + +func (o BitComparatorBitwiseOp) isValid() bool { + return o >= 1 && o <= 3 +} + +// Constants are TODO +const ( + BitComparatorAND BitComparatorBitwiseOp = 1 + BitComparatorOR BitComparatorBitwiseOp = 2 + BitComparatorXOR BitComparatorBitwiseOp = 3 +) + +// Ensure our types implement Comparator correctly. +var _ Comparator = (*BinaryComparator)(nil) +var _ Comparator = (*LongComparator)(nil) +var _ Comparator = (*BinaryPrefixComparator)(nil) +var _ Comparator = (*BitComparator)(nil) +var _ Comparator = (*NullComparator)(nil) +var _ Comparator = (*RegexStringComparator)(nil) +var _ Comparator = (*SubstringComparator)(nil) + +// Comparator is TODO +type Comparator interface { + // ConstructPBComparator creates and returns the comparator encoded in a + // pb.Comparator type + ConstructPBComparator() (*pb.Comparator, error) +} + +// ByteArrayComparable is used across many Comparators. +type ByteArrayComparable pb.ByteArrayComparable + +// NewByteArrayComparable is TODO +func NewByteArrayComparable(value []byte) *ByteArrayComparable { + return &ByteArrayComparable{ + Value: value, + } +} + +func (b *ByteArrayComparable) toPB() *pb.ByteArrayComparable { + return (*pb.ByteArrayComparable)(b) +} + +// BinaryComparator is TODO +type BinaryComparator pb.BinaryComparator + +// NewBinaryComparator is TODO +func NewBinaryComparator(comparable *ByteArrayComparable) *BinaryComparator { + return &BinaryComparator{ + Comparable: comparable.toPB(), + } +} + +// ConstructPBComparator is TODO +func (c *BinaryComparator) ConstructPBComparator() (*pb.Comparator, error) { + serializedComparator, err := proto.Marshal((*pb.BinaryComparator)(c)) + if err != nil { + return nil, err + } + comparator := &pb.Comparator{ + Name: proto.String(comparatorPath + "BinaryComparator"), + SerializedComparator: serializedComparator, + } + return comparator, nil +} + +// LongComparator is TODO +type LongComparator pb.LongComparator + +// NewLongComparator is TODO +func NewLongComparator(comparable *ByteArrayComparable) *LongComparator { + return &LongComparator{ + Comparable: comparable.toPB(), + } +} + +// ConstructPBComparator is TODO +func (c *LongComparator) ConstructPBComparator() (*pb.Comparator, error) { + serializedComparator, err := proto.Marshal((*pb.LongComparator)(c)) + if err != nil { + return nil, err + } + comparator := &pb.Comparator{ + Name: proto.String(comparatorPath + "LongComparator"), + SerializedComparator: serializedComparator, + } + return comparator, nil +} + +// BinaryPrefixComparator is TODO +type BinaryPrefixComparator pb.BinaryPrefixComparator + +// NewBinaryPrefixComparator is TODO +func NewBinaryPrefixComparator(comparable *ByteArrayComparable) *BinaryPrefixComparator { + return &BinaryPrefixComparator{ + Comparable: comparable.toPB(), + } +} + +// ConstructPBComparator is TODO +func (c *BinaryPrefixComparator) ConstructPBComparator() (*pb.Comparator, error) { + serializedComparator, err := proto.Marshal((*pb.BinaryPrefixComparator)(c)) + if err != nil { + return nil, err + } + comparator := &pb.Comparator{ + Name: proto.String(comparatorPath + "BinaryPrefixComparator"), + SerializedComparator: serializedComparator, + } + return comparator, nil +} + +// BitComparator is TODO +type BitComparator pb.BitComparator + +// NewBitComparator is TODO +func NewBitComparator(bitwiseOp BitComparatorBitwiseOp, + comparable *ByteArrayComparable) *BitComparator { + op := pb.BitComparator_BitwiseOp(bitwiseOp) + return &BitComparator{ + Comparable: comparable.toPB(), + BitwiseOp: &op, + } +} + +// ConstructPBComparator is TODO +func (c *BitComparator) ConstructPBComparator() (*pb.Comparator, error) { + if !BitComparatorBitwiseOp(*c.BitwiseOp).isValid() { + return nil, errors.New("Invalid bitwise operator specified") + } + serializedComparator, err := proto.Marshal((*pb.BitComparator)(c)) + if err != nil { + return nil, err + } + comparator := &pb.Comparator{ + Name: proto.String(comparatorPath + "BitComparator"), + SerializedComparator: serializedComparator, + } + return comparator, nil +} + +// NullComparator is TODO +type NullComparator struct{} + +// NewNullComparator is TODO +func NewNullComparator() NullComparator { + return NullComparator{} +} + +// ConstructPBComparator is TODO +func (c NullComparator) ConstructPBComparator() (*pb.Comparator, error) { + serializedComparator, err := proto.Marshal(&pb.NullComparator{}) + if err != nil { + return nil, err + } + comparator := &pb.Comparator{ + Name: proto.String(comparatorPath + "NullComparator"), + SerializedComparator: serializedComparator, + } + return comparator, nil +} + +// RegexStringComparator is TODO +type RegexStringComparator pb.RegexStringComparator + +// NewRegexStringComparator is TODO +func NewRegexStringComparator(pattern string, patternFlags int32, + charset, engine string) *RegexStringComparator { + return &RegexStringComparator{ + Pattern: proto.String(pattern), + PatternFlags: proto.Int32(patternFlags), + Charset: proto.String(charset), + Engine: proto.String(engine), + } +} + +// ConstructPBComparator is TODO +func (c *RegexStringComparator) ConstructPBComparator() (*pb.Comparator, error) { + serializedComparator, err := proto.Marshal((*pb.RegexStringComparator)(c)) + if err != nil { + return nil, err + } + comparator := &pb.Comparator{ + Name: proto.String(comparatorPath + "RegexStringComparator"), + SerializedComparator: serializedComparator, + } + return comparator, nil +} + +// SubstringComparator is TODO +type SubstringComparator pb.SubstringComparator + +// NewSubstringComparator is TODO +func NewSubstringComparator(substr string) *SubstringComparator { + return &SubstringComparator{ + Substr: proto.String(substr), + } +} + +// ConstructPBComparator is TODO +func (c *SubstringComparator) ConstructPBComparator() (*pb.Comparator, error) { + serializedComparator, err := proto.Marshal((*pb.SubstringComparator)(c)) + if err != nil { + return nil, err + } + comparator := &pb.Comparator{ + Name: proto.String(comparatorPath + "SubstringComparator"), + SerializedComparator: serializedComparator, + } + return comparator, nil +} diff --git a/libs/gohbase/filter/filter.go b/libs/gohbase/filter/filter.go new file mode 100644 index 0000000..9270d8d --- /dev/null +++ b/libs/gohbase/filter/filter.go @@ -0,0 +1,854 @@ +// Copyright (C) 2015 The GoHBase Authors. All rights reserved. +// This file is part of GoHBase. +// Use of this source code is governed by the Apache License 2.0 +// that can be found in the COPYING file. + +package filter + +import ( + "errors" + + "bfs/libs/gohbase/pb" + + "github.com/golang/protobuf/proto" +) + +const filterPath = "org.apache.hadoop.hbase.filter." + +// ListOperator is TODO +type ListOperator int32 + +func (o ListOperator) isValid() bool { + return o >= 1 && o <= 2 +} + +func (o ListOperator) toPB() *pb.FilterList_Operator { + op := pb.FilterList_Operator(o) + return &op +} + +// Constants is TODO +const ( + MustPassAll ListOperator = 1 + MustPassOne ListOperator = 2 +) + +// CompareType is TODO +type CompareType int32 + +func (c CompareType) isValid() bool { + return c >= 0 && c <= 6 +} + +// Constants is TODO +const ( + Less CompareType = 0 + LessOrEqual CompareType = 1 + Equal CompareType = 2 + NotEqual CompareType = 3 + GreaterOrEqual CompareType = 4 + Greater CompareType = 5 + NoOp CompareType = 6 +) + +// Ensure our types implement Filter correctly. +var _ Filter = (*List)(nil) +var _ Filter = (*ColumnCountGetFilter)(nil) +var _ Filter = (*ColumnPaginationFilter)(nil) +var _ Filter = (*ColumnPrefixFilter)(nil) +var _ Filter = (*ColumnRangeFilter)(nil) +var _ Filter = (*CompareFilter)(nil) +var _ Filter = (*DependentColumnFilter)(nil) +var _ Filter = (*FamilyFilter)(nil) +var _ Filter = (*Wrapper)(nil) +var _ Filter = (*FirstKeyOnlyFilter)(nil) +var _ Filter = (*FirstKeyValueMatchingQualifiersFilter)(nil) +var _ Filter = (*FuzzyRowFilter)(nil) +var _ Filter = (*InclusiveStopFilter)(nil) +var _ Filter = (*KeyOnlyFilter)(nil) +var _ Filter = (*MultipleColumnPrefixFilter)(nil) +var _ Filter = (*PageFilter)(nil) +var _ Filter = (*PrefixFilter)(nil) +var _ Filter = (*QualifierFilter)(nil) +var _ Filter = (*RandomRowFilter)(nil) +var _ Filter = (*RowFilter)(nil) +var _ Filter = (*SingleColumnValueFilter)(nil) +var _ Filter = (*SingleColumnValueExcludeFilter)(nil) +var _ Filter = (*SkipFilter)(nil) +var _ Filter = (*TimestampsFilter)(nil) +var _ Filter = (*ValueFilter)(nil) +var _ Filter = (*WhileMatchFilter)(nil) +var _ Filter = (*AllFilter)(nil) +var _ Filter = (*RowRange)(nil) +var _ Filter = (*MultiRowRangeFilter)(nil) + +// Filter is TODO +type Filter interface { + // ConstructPBFilter creates and returns the filter encoded in a pb.Filter type + // - For most filters this just involves creating the special filter object, + // serializing it, and then creating a standard Filter object with the name and + // serialization inside. + // - For FilterLists this requires creating the protobuf FilterList which contains + // an array []*pb.Filter (meaning we have to create, serialize, create all objects + // in that array), serialize the newly created pb.FilterList and then create a + // pb.Filter object containing that new serialization. + ConstructPBFilter() (*pb.Filter, error) +} + +// BytesBytesPair is a type used in FuzzyRowFilter. Want to avoid users having +// to interact directly with the protobuf generated file so exposing here. +type BytesBytesPair pb.BytesBytesPair + +// NewBytesBytesPair is TODO +func NewBytesBytesPair(first []byte, second []byte) *BytesBytesPair { + return &BytesBytesPair{ + First: first, + Second: second, + } +} + +/* + Each filter below has three primary methods/declarations, each of which can be summarized + as follows - + + 1. Type declaration. Create a new type for each filter. A 'Name' field is required but + you can create as many other fields as you like. These are purely local and will be + transcribed into a pb.Filter type by ConstructPBFilter() + 2. Constructor. Given a few parameters create the above type and return it to the callee. + 3. ConstructPBFilter. Take our local representation of a filter object and create the + appropriate pb.Filter object. Return the pb.Filter object. + + You may define any additional methods you like (see FilterList) but be aware that as soon + as the returned object is type casted to a Filter (e.g. appending it to an array of Filters) + it loses the ability to call those additional functions. +*/ + +// List is TODO +type List pb.FilterList + +// NewList is TODO +func NewList(operator ListOperator, filters ...Filter) *List { + f := &List{ + Operator: operator.toPB(), + } + f.AddFilters(filters...) + return f +} + +// AddFilters is TODO +func (f *List) AddFilters(filters ...Filter) { + for _, filter := range filters { + fpb, err := filter.ConstructPBFilter() + if err != nil { + panic(err) + } + f.Filters = append(f.Filters, fpb) + } +} + +// ConstructPBFilter is TODO +func (f *List) ConstructPBFilter() (*pb.Filter, error) { + if !ListOperator(*f.Operator).isValid() { + return nil, errors.New("Invalid operator specified.") + } + + serializedFilter, err := proto.Marshal((*pb.FilterList)(f)) + if err != nil { + return nil, err + } + filter := &pb.Filter{ + Name: proto.String(filterPath + "FilterList"), + SerializedFilter: serializedFilter, + } + return filter, nil +} + +// ColumnCountGetFilter is TODO +type ColumnCountGetFilter pb.ColumnCountGetFilter + +// NewColumnCountGetFilter is TODO +func NewColumnCountGetFilter(limit int32) *ColumnCountGetFilter { + return &ColumnCountGetFilter{ + Limit: proto.Int32(limit), + } +} + +// ConstructPBFilter is TODO +func (f *ColumnCountGetFilter) ConstructPBFilter() (*pb.Filter, error) { + serializedFilter, err := proto.Marshal((*pb.ColumnCountGetFilter)(f)) + if err != nil { + return nil, err + } + filter := &pb.Filter{ + Name: proto.String(filterPath + "ColumnCountGetFilter"), + SerializedFilter: serializedFilter, + } + return filter, nil +} + +// ColumnPaginationFilter is TODO +type ColumnPaginationFilter pb.ColumnPaginationFilter + +// NewColumnPaginationFilter is TODO +func NewColumnPaginationFilter(limit, offset int32, columnOffset []byte) *ColumnPaginationFilter { + return &ColumnPaginationFilter{ + Limit: proto.Int32(limit), + Offset: proto.Int32(offset), + ColumnOffset: columnOffset, + } +} + +// ConstructPBFilter is TODO +func (f *ColumnPaginationFilter) ConstructPBFilter() (*pb.Filter, error) { + serializedFilter, err := proto.Marshal((*pb.ColumnPaginationFilter)(f)) + if err != nil { + return nil, err + } + filter := &pb.Filter{ + Name: proto.String(filterPath + "ColumnPaginationFilter"), + SerializedFilter: serializedFilter, + } + return filter, nil +} + +// ColumnPrefixFilter is TODO +type ColumnPrefixFilter pb.ColumnPrefixFilter + +// NewColumnPrefixFilter is TODO +func NewColumnPrefixFilter(prefix []byte) *ColumnPrefixFilter { + return &ColumnPrefixFilter{ + Prefix: prefix, + } +} + +// ConstructPBFilter is TODO +func (f *ColumnPrefixFilter) ConstructPBFilter() (*pb.Filter, error) { + serializedFilter, err := proto.Marshal((*pb.ColumnPrefixFilter)(f)) + if err != nil { + return nil, err + } + filter := &pb.Filter{ + Name: proto.String(filterPath + "ColumnPrefixFilter"), + SerializedFilter: serializedFilter, + } + return filter, nil +} + +// ColumnRangeFilter is TODO +type ColumnRangeFilter pb.ColumnRangeFilter + +// NewColumnRangeFilter is TODO +func NewColumnRangeFilter(minColumn, maxColumn []byte, + minColumnInclusive, maxColumnInclusive bool) *ColumnRangeFilter { + return &ColumnRangeFilter{ + MinColumn: minColumn, + MaxColumn: maxColumn, + MinColumnInclusive: proto.Bool(minColumnInclusive), + MaxColumnInclusive: proto.Bool(maxColumnInclusive), + } +} + +// ConstructPBFilter is TODO +func (f *ColumnRangeFilter) ConstructPBFilter() (*pb.Filter, error) { + serializedFilter, err := proto.Marshal((*pb.ColumnRangeFilter)(f)) + if err != nil { + return nil, err + } + filter := &pb.Filter{ + Name: proto.String(filterPath + "ColumnRangeFilter"), + SerializedFilter: serializedFilter, + } + return filter, nil +} + +// CompareFilter is TODO +type CompareFilter pb.CompareFilter + +// NewCompareFilter is TODO +func NewCompareFilter(compareOp CompareType, comparatorObj Comparator) *CompareFilter { + op := pb.CompareType(compareOp) + obj, err := comparatorObj.ConstructPBComparator() + if err != nil { + panic(err) + } + return &CompareFilter{ + CompareOp: &op, + Comparator: obj, + } +} + +// ConstructPBFilter is TODO +func (f *CompareFilter) ConstructPBFilter() (*pb.Filter, error) { + serializedFilter, err := proto.Marshal((*pb.CompareFilter)(f)) + if err != nil { + return nil, err + } + filter := &pb.Filter{ + Name: proto.String(filterPath + "CompareFilter"), + SerializedFilter: serializedFilter, + } + return filter, nil +} + +// DependentColumnFilter is TODO +type DependentColumnFilter pb.DependentColumnFilter + +// NewDependentColumnFilter is TODO +func NewDependentColumnFilter(compareFilter *CompareFilter, columnFamily, columnQualifier []byte, + dropDependentColumn bool) *DependentColumnFilter { + return &DependentColumnFilter{ + CompareFilter: (*pb.CompareFilter)(compareFilter), + ColumnFamily: columnFamily, + ColumnQualifier: columnQualifier, + DropDependentColumn: proto.Bool(dropDependentColumn), + } +} + +// ConstructPBFilter is TODO +func (f *DependentColumnFilter) ConstructPBFilter() (*pb.Filter, error) { + serializedFilter, err := proto.Marshal((*pb.DependentColumnFilter)(f)) + if err != nil { + return nil, err + } + filter := &pb.Filter{ + Name: proto.String(filterPath + "DependentColumnFilter"), + SerializedFilter: serializedFilter, + } + return filter, nil +} + +// FamilyFilter is TODO +type FamilyFilter pb.FamilyFilter + +// NewFamilyFilter is TODO +func NewFamilyFilter(compareFilter *CompareFilter) *FamilyFilter { + return &FamilyFilter{ + CompareFilter: (*pb.CompareFilter)(compareFilter), + } +} + +// ConstructPBFilter is TODO +func (f *FamilyFilter) ConstructPBFilter() (*pb.Filter, error) { + serializedFilter, err := proto.Marshal((*pb.FamilyFilter)(f)) + if err != nil { + return nil, err + } + filter := &pb.Filter{ + Name: proto.String(filterPath + "FamilyFilter"), + SerializedFilter: serializedFilter, + } + return filter, nil +} + +// Wrapper is TODO +type Wrapper pb.FilterWrapper + +// NewWrapper is TODO +func NewWrapper(wrappedFilter Filter) *Wrapper { + f, err := wrappedFilter.ConstructPBFilter() + if err != nil { + panic(err) + } + return &Wrapper{ + Filter: f, + } +} + +// ConstructPBFilter is TODO +func (f *Wrapper) ConstructPBFilter() (*pb.Filter, error) { + serializedFilter, err := proto.Marshal((*pb.FilterWrapper)(f)) + if err != nil { + return nil, err + } + filter := &pb.Filter{ + Name: proto.String(filterPath + "FilterWrapper"), + SerializedFilter: serializedFilter, + } + return filter, nil +} + +// FirstKeyOnlyFilter is TODO +type FirstKeyOnlyFilter struct{} + +// NewFirstKeyOnlyFilter is TODO +func NewFirstKeyOnlyFilter() FirstKeyOnlyFilter { + return FirstKeyOnlyFilter{} +} + +// ConstructPBFilter is TODO +func (f FirstKeyOnlyFilter) ConstructPBFilter() (*pb.Filter, error) { + return &pb.Filter{ + Name: proto.String(filterPath + "FirstKeyOnlyFilter"), + SerializedFilter: pb.MustMarshal(&pb.FirstKeyOnlyFilter{}), + }, nil +} + +// FirstKeyValueMatchingQualifiersFilter is TODO +type FirstKeyValueMatchingQualifiersFilter pb.FirstKeyValueMatchingQualifiersFilter + +// NewFirstKeyValueMatchingQualifiersFilter is TODO +func NewFirstKeyValueMatchingQualifiersFilter( + qualifiers [][]byte) *FirstKeyValueMatchingQualifiersFilter { + return &FirstKeyValueMatchingQualifiersFilter{ + Qualifiers: qualifiers, + } +} + +// ConstructPBFilter is TODO +func (f *FirstKeyValueMatchingQualifiersFilter) ConstructPBFilter() (*pb.Filter, error) { + serializedFilter, err := proto.Marshal((*pb.FirstKeyValueMatchingQualifiersFilter)(f)) + if err != nil { + return nil, err + } + filter := &pb.Filter{ + Name: proto.String(filterPath + "FirstKeyValueMatchingQualifiersFilter"), + SerializedFilter: serializedFilter, + } + return filter, nil +} + +// FuzzyRowFilter is TODO +type FuzzyRowFilter pb.FuzzyRowFilter + +// NewFuzzyRowFilter is TODO +func NewFuzzyRowFilter(pairs []*BytesBytesPair) *FuzzyRowFilter { + p := make([]*pb.BytesBytesPair, len(pairs)) + for i, pair := range pairs { + p[i] = (*pb.BytesBytesPair)(pair) + } + return &FuzzyRowFilter{ + FuzzyKeysData: p, + } +} + +// ConstructPBFilter is TODO +func (f *FuzzyRowFilter) ConstructPBFilter() (*pb.Filter, error) { + serializedFilter, err := proto.Marshal((*pb.FuzzyRowFilter)(f)) + if err != nil { + return nil, err + } + filter := &pb.Filter{ + Name: proto.String(filterPath + "FuzzyRowFilter"), + SerializedFilter: serializedFilter, + } + return filter, nil +} + +// InclusiveStopFilter is TODO +type InclusiveStopFilter pb.InclusiveStopFilter + +// NewInclusiveStopFilter is TODO +func NewInclusiveStopFilter(stopRowKey []byte) *InclusiveStopFilter { + return &InclusiveStopFilter{ + StopRowKey: stopRowKey, + } +} + +// ConstructPBFilter is TODO +func (f *InclusiveStopFilter) ConstructPBFilter() (*pb.Filter, error) { + serializedFilter, err := proto.Marshal((*pb.InclusiveStopFilter)(f)) + if err != nil { + return nil, err + } + filter := &pb.Filter{ + Name: proto.String(filterPath + "InclusiveStopFilter"), + SerializedFilter: serializedFilter, + } + return filter, nil +} + +// KeyOnlyFilter is TODO +type KeyOnlyFilter pb.KeyOnlyFilter + +// NewKeyOnlyFilter is TODO +func NewKeyOnlyFilter(lenAsVal bool) *KeyOnlyFilter { + return &KeyOnlyFilter{ + LenAsVal: proto.Bool(lenAsVal), + } +} + +// ConstructPBFilter is TODO +func (f *KeyOnlyFilter) ConstructPBFilter() (*pb.Filter, error) { + serializedFilter, err := proto.Marshal((*pb.KeyOnlyFilter)(f)) + if err != nil { + return nil, err + } + filter := &pb.Filter{ + Name: proto.String(filterPath + "KeyOnlyFilter"), + SerializedFilter: serializedFilter, + } + return filter, nil +} + +// MultipleColumnPrefixFilter is TODO +type MultipleColumnPrefixFilter pb.MultipleColumnPrefixFilter + +// NewMultipleColumnPrefixFilter is TODO +func NewMultipleColumnPrefixFilter(sortedPrefixes [][]byte) *MultipleColumnPrefixFilter { + return &MultipleColumnPrefixFilter{ + SortedPrefixes: sortedPrefixes, + } +} + +// ConstructPBFilter is TODO +func (f *MultipleColumnPrefixFilter) ConstructPBFilter() (*pb.Filter, error) { + serializedFilter, err := proto.Marshal((*pb.MultipleColumnPrefixFilter)(f)) + if err != nil { + return nil, err + } + filter := &pb.Filter{ + Name: proto.String(filterPath + "MultipleColumnPrefixFilter"), + SerializedFilter: serializedFilter, + } + return filter, nil +} + +// PageFilter is TODO +type PageFilter pb.PageFilter + +// NewPageFilter is TODO +func NewPageFilter(pageSize int64) *PageFilter { + return &PageFilter{ + PageSize: proto.Int64(pageSize), + } +} + +// ConstructPBFilter is TODO +func (f *PageFilter) ConstructPBFilter() (*pb.Filter, error) { + serializedFilter, err := proto.Marshal((*pb.PageFilter)(f)) + if err != nil { + return nil, err + } + filter := &pb.Filter{ + Name: proto.String(filterPath + "PageFilter"), + SerializedFilter: serializedFilter, + } + return filter, nil +} + +// PrefixFilter is TODO +type PrefixFilter pb.PrefixFilter + +// NewPrefixFilter is TODO +func NewPrefixFilter(prefix []byte) *PrefixFilter { + return &PrefixFilter{ + Prefix: prefix, + } +} + +// ConstructPBFilter is TODO +func (f *PrefixFilter) ConstructPBFilter() (*pb.Filter, error) { + serializedFilter, err := proto.Marshal((*pb.PrefixFilter)(f)) + if err != nil { + return nil, err + } + filter := &pb.Filter{ + Name: proto.String(filterPath + "PrefixFilter"), + SerializedFilter: serializedFilter, + } + return filter, nil +} + +// QualifierFilter is TODO +type QualifierFilter pb.QualifierFilter + +// NewQualifierFilter is TODO +func NewQualifierFilter(compareFilter *CompareFilter) *QualifierFilter { + return &QualifierFilter{ + CompareFilter: (*pb.CompareFilter)(compareFilter), + } +} + +// ConstructPBFilter is TODO +func (f *QualifierFilter) ConstructPBFilter() (*pb.Filter, error) { + serializedFilter, err := proto.Marshal((*pb.QualifierFilter)(f)) + if err != nil { + return nil, err + } + filter := &pb.Filter{ + Name: proto.String(filterPath + "QualifierFilter"), + SerializedFilter: serializedFilter, + } + return filter, nil +} + +// RandomRowFilter is TODO +type RandomRowFilter pb.RandomRowFilter + +// NewRandomRowFilter is TODO +func NewRandomRowFilter(chance float32) *RandomRowFilter { + return &RandomRowFilter{ + Chance: proto.Float32(chance), + } +} + +// ConstructPBFilter is TODO +func (f *RandomRowFilter) ConstructPBFilter() (*pb.Filter, error) { + serializedFilter, err := proto.Marshal((*pb.RandomRowFilter)(f)) + if err != nil { + return nil, err + } + filter := &pb.Filter{ + Name: proto.String(filterPath + "RandomRowFilter"), + SerializedFilter: serializedFilter, + } + return filter, nil +} + +// RowFilter is TODO +type RowFilter pb.RowFilter + +// NewRowFilter is TODO +func NewRowFilter(compareFilter *CompareFilter) *RowFilter { + return &RowFilter{ + CompareFilter: (*pb.CompareFilter)(compareFilter), + } +} + +// ConstructPBFilter is TODO +func (f *RowFilter) ConstructPBFilter() (*pb.Filter, error) { + serializedFilter, err := proto.Marshal((*pb.RowFilter)(f)) + if err != nil { + return nil, err + } + filter := &pb.Filter{ + Name: proto.String(filterPath + "RowFilter"), + SerializedFilter: serializedFilter, + } + return filter, nil +} + +// SingleColumnValueFilter is TODO +type SingleColumnValueFilter pb.SingleColumnValueFilter + +// NewSingleColumnValueFilter is TODO +func NewSingleColumnValueFilter(columnFamily, columnQualifier []byte, compareOp CompareType, + comparatorObj Comparator, filterIfMissing, latestVersionOnly bool) *SingleColumnValueFilter { + obj, err := comparatorObj.ConstructPBComparator() + if err != nil { + panic(err) + } + return &SingleColumnValueFilter{ + ColumnFamily: columnFamily, + ColumnQualifier: columnQualifier, + CompareOp: (*pb.CompareType)(&compareOp), + Comparator: obj, + FilterIfMissing: proto.Bool(filterIfMissing), + LatestVersionOnly: proto.Bool(latestVersionOnly), + } +} + +// ConstructPB is TODO +func (f *SingleColumnValueFilter) ConstructPB() (*pb.SingleColumnValueFilter, error) { + if !CompareType(*f.CompareOp).isValid() { + return nil, errors.New("Invalid compare operation specified.") + } + + return (*pb.SingleColumnValueFilter)(f), nil +} + +// ConstructPBFilter is TODO +func (f *SingleColumnValueFilter) ConstructPBFilter() (*pb.Filter, error) { + serializedFilter, err := proto.Marshal((*pb.SingleColumnValueFilter)(f)) + if err != nil { + return nil, err + } + filter := &pb.Filter{ + Name: proto.String(filterPath + "SingleColumnValueFilter"), + SerializedFilter: serializedFilter, + } + return filter, nil +} + +// SingleColumnValueExcludeFilter is TODO +type SingleColumnValueExcludeFilter pb.SingleColumnValueExcludeFilter + +// NewSingleColumnValueExcludeFilter is TODO +func NewSingleColumnValueExcludeFilter( + filter *SingleColumnValueFilter) *SingleColumnValueExcludeFilter { + return &SingleColumnValueExcludeFilter{ + SingleColumnValueFilter: (*pb.SingleColumnValueFilter)(filter), + } +} + +// ConstructPBFilter is TODO +func (f *SingleColumnValueExcludeFilter) ConstructPBFilter() (*pb.Filter, error) { + serializedFilter, err := proto.Marshal((*pb.SingleColumnValueExcludeFilter)(f)) + if err != nil { + return nil, err + } + filter := &pb.Filter{ + Name: proto.String(filterPath + "SingleColumnValueExcludeFilter"), + SerializedFilter: serializedFilter, + } + return filter, nil +} + +// SkipFilter is TODO +type SkipFilter pb.SkipFilter + +// NewSkipFilter is TODO +func NewSkipFilter(skippingFilter Filter) *SkipFilter { + f, err := skippingFilter.ConstructPBFilter() + if err != nil { + panic(err) + } + return &SkipFilter{ + Filter: f, + } +} + +// ConstructPBFilter is TODO +func (f *SkipFilter) ConstructPBFilter() (*pb.Filter, error) { + serializedFilter, err := proto.Marshal((*pb.SkipFilter)(f)) + if err != nil { + return nil, err + } + filter := &pb.Filter{ + Name: proto.String(filterPath + "SkipFilter"), + SerializedFilter: serializedFilter, + } + return filter, nil +} + +// TimestampsFilter is TODO +type TimestampsFilter pb.TimestampsFilter + +// NewTimestampsFilter is TODO +func NewTimestampsFilter(timestamps []int64) *TimestampsFilter { + return &TimestampsFilter{ + Timestamps: timestamps, + } +} + +// ConstructPBFilter is TODO +func (f *TimestampsFilter) ConstructPBFilter() (*pb.Filter, error) { + serializedFilter, err := proto.Marshal((*pb.TimestampsFilter)(f)) + if err != nil { + return nil, err + } + filter := &pb.Filter{ + Name: proto.String(filterPath + "TimestampsFilter"), + SerializedFilter: serializedFilter, + } + return filter, nil +} + +// ValueFilter is TODO +type ValueFilter pb.ValueFilter + +// NewValueFilter is TODO +func NewValueFilter(compareFilter *CompareFilter) *ValueFilter { + return &ValueFilter{ + CompareFilter: (*pb.CompareFilter)(compareFilter), + } +} + +// ConstructPBFilter is TODO +func (f *ValueFilter) ConstructPBFilter() (*pb.Filter, error) { + serializedFilter, err := proto.Marshal((*pb.ValueFilter)(f)) + if err != nil { + return nil, err + } + filter := &pb.Filter{ + Name: proto.String(filterPath + "ValueFilter"), + SerializedFilter: serializedFilter, + } + return filter, nil +} + +// WhileMatchFilter is TODO +type WhileMatchFilter pb.WhileMatchFilter + +// NewWhileMatchFilter is TODO +func NewWhileMatchFilter(matchingFilter Filter) *WhileMatchFilter { + f, err := matchingFilter.ConstructPBFilter() + if err != nil { + panic(err) + } + return &WhileMatchFilter{ + Filter: f, + } +} + +// ConstructPBFilter is TODO +func (f *WhileMatchFilter) ConstructPBFilter() (*pb.Filter, error) { + serializedFilter, err := proto.Marshal((*pb.WhileMatchFilter)(f)) + if err != nil { + return nil, err + } + filter := &pb.Filter{ + Name: proto.String(filterPath + "WhileMatchFilter"), + SerializedFilter: serializedFilter, + } + return filter, nil +} + +// AllFilter is TODO +type AllFilter struct{} + +// NewAllFilter is TODO +func NewAllFilter() AllFilter { + return AllFilter{} +} + +// ConstructPBFilter is TODO +func (f *AllFilter) ConstructPBFilter() (*pb.Filter, error) { + return &pb.Filter{ + Name: proto.String(filterPath + "FilterAllFilter"), + SerializedFilter: pb.MustMarshal(&pb.FilterAllFilter{}), + }, nil +} + +// RowRange is TODO +type RowRange pb.RowRange + +// NewRowRange is TODO +func NewRowRange(startRow, stopRow []byte, startRowInclusive, stopRowInclusive bool) *RowRange { + return &RowRange{ + StartRow: startRow, + StartRowInclusive: proto.Bool(startRowInclusive), + StopRow: stopRow, + StopRowInclusive: proto.Bool(stopRowInclusive), + } +} + +// ConstructPBFilter is TODO +func (f *RowRange) ConstructPBFilter() (*pb.Filter, error) { + serializedFilter, err := proto.Marshal((*pb.RowRange)(f)) + if err != nil { + return nil, err + } + filter := &pb.Filter{ + Name: proto.String(filterPath + "RowRange"), + SerializedFilter: serializedFilter, + } + return filter, nil +} + +// MultiRowRangeFilter is TODO +type MultiRowRangeFilter pb.MultiRowRangeFilter + +// NewMultiRowRangeFilter is TODO +func NewMultiRowRangeFilter(rowRangeList []*RowRange) *MultiRowRangeFilter { + rangeList := make([]*pb.RowRange, len(rowRangeList)) + for i, rr := range rowRangeList { + rangeList[i] = (*pb.RowRange)(rr) + } + return &MultiRowRangeFilter{ + RowRangeList: rangeList, + } +} + +// ConstructPBFilter is TODO +func (f *MultiRowRangeFilter) ConstructPBFilter() (*pb.Filter, error) { + serializedFilter, err := proto.Marshal((*pb.MultiRowRangeFilter)(f)) + if err != nil { + return nil, err + } + filter := &pb.Filter{ + Name: proto.String(filterPath + "MultiRowRangeFilter"), + SerializedFilter: serializedFilter, + } + return filter, nil +} diff --git a/libs/gohbase/hbase/hbase.go b/libs/gohbase/hbase/hbase.go new file mode 100644 index 0000000..bb8b068 --- /dev/null +++ b/libs/gohbase/hbase/hbase.go @@ -0,0 +1,13 @@ +package hbase + +type HBaseCell struct { + Table string + RowKey string + Family string + Qualifier string + Value string +} + +func (c *HBaseCell) Valid() bool { + return c != nil && c.Table != "" && c.Family != "" && c.Qualifier != "" && c.Value != "" +} diff --git a/libs/gohbase/hrpc/call.go b/libs/gohbase/hrpc/call.go new file mode 100644 index 0000000..dc03e99 --- /dev/null +++ b/libs/gohbase/hrpc/call.go @@ -0,0 +1,219 @@ +// Copyright (C) 2015 The GoHBase Authors. All rights reserved. +// This file is part of GoHBase. +// Use of this source code is governed by the Apache License 2.0 +// that can be found in the COPYING file. + +package hrpc + +import ( + "context" + "errors" + "sync" + "unsafe" + + "bfs/libs/gohbase/filter" + "bfs/libs/gohbase/pb" + "bfs/libs/gohbase/regioninfo" + + "github.com/golang/protobuf/proto" +) + +type CallType struct { + Value int + Name string +} + +type TimeRange struct { + From uint64 + To uint64 +} + +func (tr TimeRange) Valid() bool { + return tr.From > 0 && tr.To > 0 +} + +var ( + CallTypeCall CallType = CallType{0, "Call"} + CallTypeGet CallType = CallType{1, "Get"} + CallTypeScan CallType = CallType{2, "Scan"} + // mutate: >= 100 + CallTypeAppend CallType = CallType{100, "Append"} + CallTypeIncrement CallType = CallType{101, "Increment"} + CallTypePut CallType = CallType{102, "Put"} + CallTypeDelete CallType = CallType{103, "Delete"} +) + +var ( + NotGeneralCallErr error = errors.New("not general call") +) + +func (ct CallType) IsMutate() bool { + return ct.Value >= 100 +} + +func (ct CallType) GeneralCall() bool { + return ct != CallTypeScan // && ct != CallTypeIncrement +} + +// Call represents an HBase RPC call. +type Call interface { + Table() []byte + + Key() []byte + + GetRegion() *regioninfo.Info + SetRegion(region *regioninfo.Info) + GetName() string + Serialize() ([]byte, error) + // Returns a newly created (default-state) protobuf in which to store the + // response of this call. + NewResponse() proto.Message + + GetResultChan() chan RPCResult + + GetContext() context.Context + + SetFamilies(fam map[string][]string) error + SetFilter(ft filter.Filter) error + + CallType() CallType +} + +type Calls struct { + Calls []Call + Ctx context.Context +} + +func NewCalls(cs []Call, ctx context.Context) *Calls { + if ctx == nil { + ctx = context.Background() + } + return &Calls{ + Calls: cs, + Ctx: ctx, + } +} + +// RPCResult is struct that will contain both the resulting message from an RPC +// call, and any errors that may have occurred related to making the RPC call. +type RPCResult struct { + Msg proto.Message + Error error +} + +type base struct { + table []byte + + key []byte + + region *regioninfo.Info + + // Protects access to resultch. + resultchLock sync.Mutex + + resultch chan RPCResult + + ctx context.Context + + ct CallType +} + +func (b *base) CallType() CallType { + return b.ct +} + +func (b *base) GetContext() context.Context { + return b.ctx +} + +func (b *base) GetRegion() *regioninfo.Info { + return b.region +} + +func (b *base) SetRegion(region *regioninfo.Info) { + b.region = region +} + +func (b *base) regionSpecifier() *pb.RegionSpecifier { + regionType := pb.RegionSpecifier_REGION_NAME + return &pb.RegionSpecifier{ + Type: ®ionType, + Value: []byte(b.region.RegionName), + } +} + +func applyOptions(call Call, options ...func(Call) error) error { + for _, option := range options { + err := option(call) + if err != nil { + return err + } + } + return nil +} + +func (b *base) Table() []byte { + return b.table +} + +func (b *base) Key() []byte { + return b.key +} + +func (b *base) GetResultChan() chan RPCResult { + b.resultchLock.Lock() + if b.resultch == nil { + // Buffered channels, so that if a writer thread sends a message (or + // reports an error) after the deadline it doesn't block due to the + // requesting thread having moved on. + b.resultch = make(chan RPCResult, 1) + } + b.resultchLock.Unlock() + return b.resultch +} + +// Families is used as a parameter for request creation. Adds families constraint to a request. +func Families(fam map[string][]string) func(Call) error { + return func(g Call) error { + return g.SetFamilies(fam) + } +} + +// Filters is used as a parameter for request creation. Adds filters constraint to a request. +func Filters(fl filter.Filter) func(Call) error { + return func(g Call) error { + return g.SetFilter(fl) + } +} + +// Cell is the smallest level of granularity in returned results. +// Represents a single cell in HBase (a row will have one cell for every qualifier). +type Cell pb.Cell + +// Result holds a slice of Cells as well as miscellaneous information about the response. +type Result struct { + Cells []*Cell + Exists *bool + Stale *bool + // Any other variables we want to include. +} + +// ToLocalResult takes a protobuf Result type and converts it to our own +// Result type in constant time. +func ToLocalResult(pbr *pb.Result) *Result { + if pbr == nil { + return &Result{} + } + return &Result{ + // Should all be O(1) operations. + Cells: toLocalCells(pbr), + Exists: pbr.Exists, + Stale: pbr.Stale, + } +} + +func toLocalCells(pbr *pb.Result) []*Cell { + return *(*[]*Cell)(unsafe.Pointer(pbr)) +} + +// We can now define any helper functions on Result that we want. diff --git a/libs/gohbase/hrpc/create.go b/libs/gohbase/hrpc/create.go new file mode 100644 index 0000000..b3d5602 --- /dev/null +++ b/libs/gohbase/hrpc/create.go @@ -0,0 +1,66 @@ +// Copyright (C) 2015 The GoHBase Authors. All rights reserved. +// This file is part of GoHBase. +// Use of this source code is governed by the Apache License 2.0 +// that can be found in the COPYING file. + +package hrpc + +import ( + "context" + + "bfs/libs/gohbase/pb" + + "github.com/golang/protobuf/proto" +) + +// CreateTable represents a CreateTable HBase call +type CreateTable struct { + tableOp + + columns []string +} + +// NewCreateTable creates a new CreateTable request that will create the given +// table in HBase. For use by the admin client. +func NewCreateTable(ctx context.Context, table []byte, columns []string) *CreateTable { + ct := &CreateTable{ + tableOp: tableOp{base{ + table: table, + ctx: ctx, + }}, + columns: columns, + } + return ct +} + +// GetName returns the name of this RPC call. +func (ct *CreateTable) GetName() string { + return "CreateTable" +} + +// Serialize will convert this HBase call into a slice of bytes to be written to +// the network +func (ct *CreateTable) Serialize() ([]byte, error) { + pbcols := make([]*pb.ColumnFamilySchema, len(ct.columns)) + for i, col := range ct.columns { + pbcols[i] = &pb.ColumnFamilySchema{ + Name: []byte(col), + } + } + ctable := &pb.CreateTableRequest{ + TableSchema: &pb.TableSchema{ + TableName: &pb.TableName{ + Namespace: []byte("default"), + Qualifier: ct.table, + }, + ColumnFamilies: pbcols, + }, + } + return proto.Marshal(ctable) +} + +// NewResponse creates an empty protobuf message to read the response of this +// RPC. +func (ct *CreateTable) NewResponse() proto.Message { + return &pb.CreateTableResponse{} +} diff --git a/libs/gohbase/hrpc/delete.go b/libs/gohbase/hrpc/delete.go new file mode 100644 index 0000000..a923a6a --- /dev/null +++ b/libs/gohbase/hrpc/delete.go @@ -0,0 +1,54 @@ +// Copyright (C) 2015 The GoHBase Authors. All rights reserved. +// This file is part of GoHBase. +// Use of this source code is governed by the Apache License 2.0 +// that can be found in the COPYING file. + +package hrpc + +import ( + "context" + + "bfs/libs/gohbase/pb" + + "github.com/golang/protobuf/proto" +) + +// DeleteTable represents a DeleteTable HBase call +type DeleteTable struct { + tableOp +} + +// NewDeleteTable creates a new DeleteTable request that will delete the +// given table in HBase. For use by the admin client. +func NewDeleteTable(ctx context.Context, table []byte) *DeleteTable { + dt := &DeleteTable{ + tableOp{base{ + table: table, + ctx: ctx, + }}, + } + return dt +} + +// GetName returns the name of this RPC call. +func (dt *DeleteTable) GetName() string { + return "DeleteTable" +} + +// Serialize will convert this HBase call into a slice of bytes to be written to +// the network +func (dt *DeleteTable) Serialize() ([]byte, error) { + dtreq := &pb.DeleteTableRequest{ + TableName: &pb.TableName{ + Namespace: []byte("default"), + Qualifier: dt.table, + }, + } + return proto.Marshal(dtreq) +} + +// NewResponse creates an empty protobuf message to read the response of this +// RPC. +func (dt *DeleteTable) NewResponse() proto.Message { + return &pb.DeleteTableResponse{} +} diff --git a/libs/gohbase/hrpc/disable.go b/libs/gohbase/hrpc/disable.go new file mode 100644 index 0000000..d2a2163 --- /dev/null +++ b/libs/gohbase/hrpc/disable.go @@ -0,0 +1,54 @@ +// Copyright (C) 2015 The GoHBase Authors. All rights reserved. +// This file is part of GoHBase. +// Use of this source code is governed by the Apache License 2.0 +// that can be found in the COPYING file. + +package hrpc + +import ( + "context" + + "bfs/libs/gohbase/pb" + + "github.com/golang/protobuf/proto" +) + +// DisableTable represents a DisableTable HBase call +type DisableTable struct { + tableOp +} + +// NewDisableTable creates a new DisableTable request that will disable the +// given table in HBase. For use by the admin client. +func NewDisableTable(ctx context.Context, table []byte) *DisableTable { + dt := &DisableTable{ + tableOp{base{ + table: table, + ctx: ctx, + }}, + } + return dt +} + +// GetName returns the name of this RPC call. +func (dt *DisableTable) GetName() string { + return "DisableTable" +} + +// Serialize will convert this HBase call into a slice of bytes to be written to +// the network +func (dt *DisableTable) Serialize() ([]byte, error) { + dtreq := &pb.DisableTableRequest{ + TableName: &pb.TableName{ + Namespace: []byte("default"), + Qualifier: dt.table, + }, + } + return proto.Marshal(dtreq) +} + +// NewResponse creates an empty protobuf message to read the response of this +// RPC. +func (dt *DisableTable) NewResponse() proto.Message { + return &pb.DisableTableResponse{} +} diff --git a/libs/gohbase/hrpc/enable.go b/libs/gohbase/hrpc/enable.go new file mode 100644 index 0000000..7ed1b46 --- /dev/null +++ b/libs/gohbase/hrpc/enable.go @@ -0,0 +1,54 @@ +// Copyright (C) 2015 The GoHBase Authors. All rights reserved. +// This file is part of GoHBase. +// Use of this source code is governed by the Apache License 2.0 +// that can be found in the COPYING file. + +package hrpc + +import ( + "context" + + "bfs/libs/gohbase/pb" + + "github.com/golang/protobuf/proto" +) + +// EnableTable represents a EnableTable HBase call +type EnableTable struct { + tableOp +} + +// NewEnableTable creates a new EnableTable request that will enable the +// given table in HBase. For use by the admin client. +func NewEnableTable(ctx context.Context, table []byte) *EnableTable { + et := &EnableTable{ + tableOp{base{ + table: table, + ctx: ctx, + }}, + } + return et +} + +// GetName returns the name of this RPC call. +func (et *EnableTable) GetName() string { + return "EnableTable" +} + +// Serialize will convert this HBase call into a slice of bytes to be written to +// the network +func (et *EnableTable) Serialize() ([]byte, error) { + dtreq := &pb.EnableTableRequest{ + TableName: &pb.TableName{ + Namespace: []byte("default"), + Qualifier: et.table, + }, + } + return proto.Marshal(dtreq) +} + +// NewResponse creates an empty protobuf message to read the response of this +// RPC. +func (et *EnableTable) NewResponse() proto.Message { + return &pb.EnableTableResponse{} +} diff --git a/libs/gohbase/hrpc/get.go b/libs/gohbase/hrpc/get.go new file mode 100644 index 0000000..51cc73a --- /dev/null +++ b/libs/gohbase/hrpc/get.go @@ -0,0 +1,175 @@ +// Copyright (C) 2015 The GoHBase Authors. All rights reserved. +// This file is part of GoHBase. +// Use of this source code is governed by the Apache License 2.0 +// that can be found in the COPYING file. + +package hrpc + +import ( + "context" + + "bfs/libs/gohbase/filter" + "bfs/libs/gohbase/pb" + + "github.com/golang/protobuf/proto" +) + +// Get represents a Get HBase call. +type Get struct { + base + + families map[string][]string //Maps a column family to a list of qualifiers + + // Return the row for the given key or, if this key doesn't exist, + // whichever key happens to be right before. + closestBefore bool + + // Don't return any KeyValue, just say whether the row key exists in the + // table or not. + existsOnly bool + + timeRange TimeRange + + filters filter.Filter +} + +// NewGet creates a new Get request for the given table and row key. +func NewGet(ctx context.Context, table, key []byte, + options ...func(Call) error) (*Get, error) { + g := &Get{ + base: base{ + table: table, + key: key, + ctx: ctx, + ct: CallTypeGet, + }, + } + err := applyOptions(g, options...) + if err != nil { + return nil, err + } + return g, nil +} + +// NewGetStr creates a new Get request for the given table and row key. +func NewGetStr(ctx context.Context, table, key string, + options ...func(Call) error) (*Get, error) { + return NewGet(ctx, []byte(table), []byte(key), options...) +} + +// NewGetBefore creates a new Get request for the row with a key equal to or +// immediately less than the given key, in the given table. +func NewGetBefore(ctx context.Context, table, key []byte, + options ...func(Call) error) (*Get, error) { + g := &Get{ + base: base{ + table: table, + key: key, + ctx: ctx, + ct: CallTypeGet, + }, + closestBefore: true, + } + err := applyOptions(g, options...) + if err != nil { + return nil, err + } + return g, nil +} + +func (g *Get) SetTimeRange(tr TimeRange) { + g.timeRange = tr +} + +// GetName returns the name of this RPC call. +func (g *Get) GetName() string { + return "Get" +} + +// GetFilter returns the filter of this Get request. +func (g *Get) GetFilter() filter.Filter { + return g.filters +} + +// GetFamilies returns the families to retrieve with this Get request. +func (g *Get) GetFamilies() map[string][]string { + return g.families +} + +// SetFilter sets filter to use for this Get request. +func (g *Get) SetFilter(f filter.Filter) error { + g.filters = f + // TODO: Validation? + return nil +} + +// SetFamilies sets families to retrieve with this Get request. +func (g *Get) SetFamilies(f map[string][]string) error { + g.families = f + // TODO: Validation? + return nil +} + +// ExistsOnly makes this Get request not return any KeyValue, merely whether +// or not the given row key exists in the table. +func (g *Get) ExistsOnly() error { + g.existsOnly = true + return nil +} + +// Serialize serializes this RPC into a buffer. +func (g *Get) Serialize() ([]byte, error) { + get := &pb.GetRequest{ + Region: g.regionSpecifier(), + Get: &pb.Get{ + Row: g.key, + Column: familiesToColumn(g.families), + }, + } + if g.timeRange.Valid() { + from, to := g.timeRange.From, g.timeRange.To + get.Get.TimeRange = &pb.TimeRange{ + From: &from, + To: &to, + } + } + if g.closestBefore { + get.Get.ClosestRowBefore = proto.Bool(true) + } + if g.existsOnly { + get.Get.ExistenceOnly = proto.Bool(true) + } + if g.filters != nil { + pbFilter, err := g.filters.ConstructPBFilter() + if err != nil { + return nil, err + } + get.Get.Filter = pbFilter + } + return proto.Marshal(get) +} + +// NewResponse creates an empty protobuf message to read the response of this +// RPC. +func (g *Get) NewResponse() proto.Message { + return &pb.GetResponse{} +} + +// familiesToColumn takes a map from strings to lists of strings, and converts +// them into protobuf Columns +func familiesToColumn(families map[string][]string) []*pb.Column { + cols := make([]*pb.Column, len(families)) + counter := 0 + for family, qualifiers := range families { + bytequals := make([][]byte, len(qualifiers)) + for i, qual := range qualifiers { + bytequals[i] = []byte(qual) + } + cols[counter] = &pb.Column{ + Family: []byte(family), + Qualifier: bytequals, + } + counter++ + } + return cols +} diff --git a/libs/gohbase/hrpc/hrpc_test.go b/libs/gohbase/hrpc/hrpc_test.go new file mode 100644 index 0000000..2a5f6d4 --- /dev/null +++ b/libs/gohbase/hrpc/hrpc_test.go @@ -0,0 +1,205 @@ +// Copyright (C) 2015 The GoHBase Authors. All rights reserved. +// This file is part of GoHBase. +// Use of this source code is governed by the Apache License 2.0 +// that can be found in the COPYING file. + +package hrpc + +import ( + "bytes" + "context" + "reflect" + "testing" + + "bfs/libs/gohbase/filter" + "bfs/libs/gohbase/regioninfo" +) + +func TestNewGet(t *testing.T) { + ctx := context.Background() + table := "test" + tableb := []byte(table) + key := "45" + keyb := []byte(key) + fam := make(map[string][]string) + fam["info"] = []string{"c1"} + filter1 := filter.NewFirstKeyOnlyFilter() + get, err := NewGet(ctx, tableb, keyb) + if err != nil || !confirmGetAttributes(get, ctx, tableb, keyb, nil, nil) { + t.Errorf("Get1 didn't set attributes correctly.") + } + get, err = NewGetStr(ctx, table, key) + if err != nil || !confirmGetAttributes(get, ctx, tableb, keyb, nil, nil) { + t.Errorf("Get2 didn't set attributes correctly.") + } + get, err = NewGet(ctx, tableb, keyb, Families(fam)) + if err != nil || !confirmGetAttributes(get, ctx, tableb, keyb, fam, nil) { + t.Errorf("Get3 didn't set attributes correctly.") + } + get, err = NewGet(ctx, tableb, keyb, Filters(filter1)) + if err != nil || !confirmGetAttributes(get, ctx, tableb, keyb, nil, filter1) { + t.Errorf("Get4 didn't set attributes correctly.") + } + get, err = NewGet(ctx, tableb, keyb, Filters(filter1), Families(fam)) + if err != nil || !confirmGetAttributes(get, ctx, tableb, keyb, fam, filter1) { + t.Errorf("Get5 didn't set attributes correctly.") + } + get, err = NewGet(ctx, tableb, keyb, Filters(filter1)) + err = Families(fam)(get) + if err != nil || !confirmGetAttributes(get, ctx, tableb, keyb, fam, filter1) { + t.Errorf("Get6 didn't set attributes correctly.") + } + +} + +func confirmGetAttributes(g *Get, ctx context.Context, table, key []byte, + fam map[string][]string, filter1 filter.Filter) bool { + if g.GetContext() != ctx || + !bytes.Equal(g.Table(), table) || + !bytes.Equal(g.Key(), key) || + !reflect.DeepEqual(g.GetFamilies(), fam) || + reflect.TypeOf(g.GetFilter()) != reflect.TypeOf(filter1) { + return false + } + return true +} + +func TestNewScan(t *testing.T) { + ctx := context.Background() + table := "test" + tableb := []byte(table) + fam := make(map[string][]string) + fam["info"] = []string{"c1"} + filter1 := filter.NewFirstKeyOnlyFilter() + start := "0" + stop := "100" + startb := []byte("0") + stopb := []byte("100") + scan, err := NewScan(ctx, tableb) + if err != nil || !confirmScanAttributes(scan, ctx, tableb, nil, nil, nil, nil) { + t.Errorf("Scan1 didn't set attributes correctly.") + } + scan, err = NewScanRange(ctx, tableb, startb, stopb) + if err != nil || !confirmScanAttributes(scan, ctx, tableb, startb, stopb, nil, nil) { + t.Errorf("Scan2 didn't set attributes correctly.") + } + scan, err = NewScanStr(ctx, table) + if err != nil || !confirmScanAttributes(scan, ctx, tableb, nil, nil, nil, nil) { + t.Errorf("Scan3 didn't set attributes correctly.") + } + scan, err = NewScanRangeStr(ctx, table, start, stop) + if err != nil || !confirmScanAttributes(scan, ctx, tableb, startb, stopb, nil, nil) { + t.Errorf("Scan4 didn't set attributes correctly.") + } + scan, err = NewScanRange(ctx, tableb, startb, stopb, Families(fam), Filters(filter1)) + if err != nil || !confirmScanAttributes(scan, ctx, tableb, startb, stopb, fam, filter1) { + t.Errorf("Scan5 didn't set attributes correctly.") + } + scan, err = NewScan(ctx, tableb, Filters(filter1), Families(fam)) + if err != nil || !confirmScanAttributes(scan, ctx, tableb, nil, nil, fam, filter1) { + t.Errorf("Scan6 didn't set attributes correctly.") + } +} + +func confirmScanAttributes(s *Scan, ctx context.Context, table, start, stop []byte, + fam map[string][]string, filter1 filter.Filter) bool { + if s.GetContext() != ctx || + !bytes.Equal(s.Table(), table) || + !bytes.Equal(s.GetStartRow(), start) || + !bytes.Equal(s.GetStopRow(), stop) || + !reflect.DeepEqual(s.GetFamilies(), fam) || + reflect.TypeOf(s.GetFilter()) != reflect.TypeOf(filter1) { + return false + } + return true +} + +func BenchmarkMutateSerializeWithNestedMaps(b *testing.B) { + b.ReportAllocs() + + for i := 0; i < b.N; i++ { + data := map[string]map[string][]byte{ + "cf": map[string][]byte{ + "a": []byte{10}, + "b": []byte{20}, + "c": []byte{30, 0}, + "d": []byte{40, 0, 0, 0}, + "e": []byte{50, 0, 0, 0, 0, 0, 0, 0}, + "f": []byte{60}, + "g": []byte{70}, + "h": []byte{80, 0}, + "i": []byte{90, 0, 0, 0}, + "j": []byte{100, 0, 0, 0, 0, 0, 0, 0}, + "k": []byte{0, 0, 220, 66}, + "l": []byte{0, 0, 0, 0, 0, 0, 94, 64}, + "m": []byte{0, 0, 2, 67, 0, 0, 0, 0}, + "n": []byte{0, 0, 0, 0, 0, 128, 97, 64, 0, 0, 0, 0, 0, 0, 0, 0}, + "o": []byte{150}, + "p": []byte{4, 8, 15, 26, 23, 42}, + "q": []byte{1, 1, 3, 5, 8, 13, 21, 34, 55}, + "r": []byte("This is a test string."), + }, + } + mutate, err := NewPutStr(context.Background(), "", "", data) + if err != nil { + b.Errorf("Error creating mutate: %v", err) + } + mutate.SetRegion(®ioninfo.Info{}) + mutate.Serialize() + } +} + +func BenchmarkMutateSerializeWithReflection(b *testing.B) { + b.ReportAllocs() + + type teststr struct { + AnInt int `hbase:"cf:a"` + AnInt8 int8 `hbase:"cf:b"` + AnInt16 int16 `hbase:"cf:c"` + AnInt32 int32 `hbase:"cf:d"` + AnInt64 int64 `hbase:"cf:e"` + AnUInt uint `hbase:"cf:f"` + AnUInt8 uint8 `hbase:"cf:g"` + AnUInt16 uint16 `hbase:"cf:h"` + AnUInt32 uint32 `hbase:"cf:i"` + AnUInt64 uint64 `hbase:"cf:j"` + AFloat32 float32 `hbase:"cf:k"` + AFloat64 float64 `hbase:"cf:l"` + AComplex64 complex64 `hbase:"cf:m"` + AComplex128 complex128 `hbase:"cf:n"` + APointer *int `hbase:"cf:o"` + AnArray [6]uint8 `hbase:"cf:p"` + ASlice []uint8 `hbase:"cf:q"` + AString string `hbase:"cf:r"` + } + + number := 150 + for i := 0; i < b.N; i++ { + str := teststr{ + AnInt: 10, + AnInt8: 20, + AnInt16: 30, + AnInt32: 40, + AnInt64: 50, + AnUInt: 60, + AnUInt8: 70, + AnUInt16: 80, + AnUInt32: 90, + AnUInt64: 100, + AFloat32: 110, + AFloat64: 120, + AComplex64: 130, + AComplex128: 140, + APointer: &number, + AnArray: [6]uint8{4, 8, 15, 26, 23, 42}, + ASlice: []uint8{1, 1, 3, 5, 8, 13, 21, 34, 55}, + AString: "This is a test string.", + } + mutate, err := NewPutStrRef(context.Background(), "", "", str) + if err != nil { + b.Errorf("Error creating mutate: %v", err) + } + mutate.SetRegion(®ioninfo.Info{}) + mutate.Serialize() + } +} diff --git a/libs/gohbase/hrpc/mutate.go b/libs/gohbase/hrpc/mutate.go new file mode 100644 index 0000000..cdb025e --- /dev/null +++ b/libs/gohbase/hrpc/mutate.go @@ -0,0 +1,489 @@ +// Copyright (C) 2015 The GoHBase Authors. All rights reserved. +// This file is part of GoHBase. +// Use of this source code is governed by the Apache License 2.0 +// that can be found in the COPYING file. + +package hrpc + +import ( + "bytes" + "context" + "encoding/binary" + "errors" + "fmt" + "reflect" + "strings" + "unsafe" + + "bfs/libs/gohbase/filter" + "bfs/libs/gohbase/pb" + + "github.com/golang/protobuf/proto" +) + +var ( + // ErrNotAStruct is returned by any of the *Ref functions when something + // other than a struct is passed in to their data argument + ErrNotAStruct = errors.New("data must be a struct") + + // ErrUnsupportedUints is returned when this message is serialized and uints + // are unsupported on your platform (this will probably never happen) + ErrUnsupportedUints = errors.New("uints are unsupported on your platform") + + // ErrUnsupportedInts is returned when this message is serialized and ints + // are unsupported on your platform (this will probably never happen) + ErrUnsupportedInts = errors.New("ints are unsupported on your platform") +) + +// Mutate represents a mutation on HBase. +type Mutate struct { + base + + row *[]byte + mutationType pb.MutationProto_MutationType //*int32 + + // values is a map of column families to a map of column qualifiers to bytes + values map[string]map[string][]byte + + // data is a struct passed in that has fields tagged to represent HBase + // columns + data interface{} +} + +// baseMutate returns a Mutate struct without the mutationType filled in. +func baseMutate(ctx context.Context, table, key []byte, + values map[string]map[string][]byte, data interface{}, callType CallType) *Mutate { + return &Mutate{ + base: base{ + table: table, + key: key, + ctx: ctx, + ct: callType, + }, + values: values, + data: data, + } +} + +// NewPut creates a new Mutation request to insert the given +// family-column-values in the given row key of the given table. +func NewPut(ctx context.Context, table, key []byte, + values map[string]map[string][]byte) (*Mutate, error) { + m := baseMutate(ctx, table, key, values, nil, CallTypePut) + m.mutationType = pb.MutationProto_PUT + return m, nil +} + +// NewPutStr creates a new Mutation request to insert the given +// family-column-values in the given row key of the given table. +func NewPutStr(ctx context.Context, table, key string, + values map[string]map[string][]byte) (*Mutate, error) { + return NewPut(ctx, []byte(table), []byte(key), values) +} + +// NewPutStrRef creates a new Mutation request to insert the given +// data structure in the given row key of the given table. The `data' +// argument must be a string with fields defined using the "hbase" tag. +func NewPutStrRef(ctx context.Context, table, key string, data interface{}) (*Mutate, error) { + if !isAStruct(data) { + return nil, ErrNotAStruct + } + m := baseMutate(ctx, []byte(table), []byte(key), nil, data, CallTypePut) + m.mutationType = pb.MutationProto_PUT + return m, nil +} + +// NewDel creates a new Mutation request to delete the given +// family-column-values from the given row key of the given table. +func NewDel(ctx context.Context, table, key []byte, + values map[string]map[string][]byte) (*Mutate, error) { + m := baseMutate(ctx, table, key, values, nil, CallTypeDelete) + m.mutationType = pb.MutationProto_DELETE + return m, nil +} + +// NewDelStr creates a new Mutation request to delete the given +// family-column-values from the given row key of the given table. +func NewDelStr(ctx context.Context, table, key string, + values map[string]map[string][]byte) (*Mutate, error) { + return NewDel(ctx, []byte(table), []byte(key), values) +} + +// NewDelStrRef creates a new Mutation request to delete the given +// data structure from the given row key of the given table. The `data' +// argument must be a string with fields defined using the "hbase" tag. +func NewDelStrRef(ctx context.Context, table, key string, data interface{}) (*Mutate, error) { + if !isAStruct(data) { + return nil, ErrNotAStruct + } + m := baseMutate(ctx, []byte(table), []byte(key), nil, data, CallTypeDelete) + m.mutationType = pb.MutationProto_DELETE + return m, nil +} + +func NewApp(ctx context.Context, table, key []byte, + values map[string]map[string][]byte) (*Mutate, error) { + m := baseMutate(ctx, table, []byte(key), values, nil, CallTypeAppend) + m.mutationType = pb.MutationProto_APPEND + return m, nil +} + +// NewAppStr creates a new Mutation request to append the given +// family-column-values into the existing cells in HBase (or create them if +// needed), in given row key of the given table. +func NewAppStr(ctx context.Context, table, key string, + values map[string]map[string][]byte) (*Mutate, error) { + return NewApp(ctx, []byte(table), []byte(key), values) +} + +// NewAppStrRef creates a new Mutation request that will append the given values +// to their existing values in HBase under the given table and key. +func NewAppStrRef(ctx context.Context, table, key string, data interface{}) (*Mutate, error) { + if !isAStruct(data) { + return nil, ErrNotAStruct + } + m := baseMutate(ctx, []byte(table), []byte(key), nil, data, CallTypeAppend) + m.mutationType = pb.MutationProto_APPEND + return m, nil +} + +// NewIncStrSingle creates a new Mutation request that will increment the given value +// by amount in HBase under the given table, key, family and qualifier. +func NewIncStrSingle(ctx context.Context, table, key string, family string, + qualifier string, amount int64) (*Mutate, error) { + + buf := new(bytes.Buffer) + err := binary.Write(buf, binary.BigEndian, amount) + if err != nil { + return nil, fmt.Errorf("binary.Write failed: %s", err) + } + + value := map[string]map[string][]byte{family: map[string][]byte{qualifier: buf.Bytes()}} + return NewIncStr(ctx, table, key, value) +} + +func NewInc(ctx context.Context, table, key []byte, + values map[string]map[string][]byte) (*Mutate, error) { + m := baseMutate(ctx, table, key, values, nil, CallTypeIncrement) + m.mutationType = pb.MutationProto_INCREMENT + return m, nil +} + +// NewIncStr creates a new Mutation request that will increment the given values +// in HBase under the given table and key. +func NewIncStr(ctx context.Context, table, key string, + values map[string]map[string][]byte) (*Mutate, error) { + return NewInc(ctx, []byte(table), []byte(key), values) +} + +// NewIncStrRef creates a new Mutation request that will increment the given values +// in HBase under the given table and key. +func NewIncStrRef(ctx context.Context, table, key string, data interface{}) (*Mutate, error) { + if !isAStruct(data) { + return nil, ErrNotAStruct + } + m := baseMutate(ctx, []byte(table), []byte(key), nil, data, CallTypeIncrement) + m.mutationType = pb.MutationProto_INCREMENT + return m, nil +} + +// GetName returns the name of this RPC call. +func (m *Mutate) GetName() string { + return "Mutate" +} + +// Serialize converts this mutate object into a protobuf message suitable for +// sending to an HBase server +func (m *Mutate) Serialize() ([]byte, error) { + if m.data == nil { + return m.serialize() + } + return m.serializeWithReflect() +} + +// serialize is a helper function for Serialize. It is used when there is a +// map[string]map[string][]byte to be serialized. +func (m *Mutate) serialize() ([]byte, error) { + // We need to convert everything in the values field + // to a protobuf ColumnValue + bytevalues := make([]*pb.MutationProto_ColumnValue, len(m.values)) + i := 0 + for k, v := range m.values { + qualvals := make([]*pb.MutationProto_ColumnValue_QualifierValue, len(v)) + j := 0 + // And likewise, each item in each column needs to be converted to a + // protobuf QualifierValue + for k1, v1 := range v { + qualvals[j] = &pb.MutationProto_ColumnValue_QualifierValue{ + Qualifier: []byte(k1), + Value: v1, + } + if m.mutationType == pb.MutationProto_DELETE { + tmp := pb.MutationProto_DELETE_MULTIPLE_VERSIONS + qualvals[j].DeleteType = &tmp + } + j++ + } + bytevalues[i] = &pb.MutationProto_ColumnValue{ + Family: []byte(k), + QualifierValue: qualvals, + } + i++ + } + mutate := &pb.MutateRequest{ + Region: m.regionSpecifier(), + Mutation: &pb.MutationProto{ + Row: m.key, + MutateType: &m.mutationType, + ColumnValue: bytevalues, + }, + } + return proto.Marshal(mutate) +} + +// serializeWithReflect is a helper function for Serialize. It is used when +// there is a struct with tagged fields to be serialized. +func (m *Mutate) serializeWithReflect() ([]byte, error) { + typeOf := reflect.TypeOf(m.data) + valueOf := reflect.Indirect(reflect.ValueOf(m.data)) + + columns := make(map[string][]*pb.MutationProto_ColumnValue_QualifierValue) + + for i := 0; i < typeOf.NumField(); i++ { + field := typeOf.Field(i) + if field.PkgPath != "" { + // This is an unexported field of the struct, so we're going to + // ignore it + continue + } + + tagval := field.Tag.Get("hbase") + if tagval == "" { + // If the tag is empty, we're going to ignore this field + continue + } + cnames := strings.SplitN(tagval, ":", 2) + if len(cnames) != 2 { + // If the tag doesn't contain a colon, it's set improperly + return nil, fmt.Errorf("Invalid column family and column qualifier: \"%s\"", cnames) + } + cfamily := cnames[0] + cqualifier := cnames[1] + + binaryValue, err := valueToBytes(valueOf.Field(i)) + if err != nil { + return nil, err + } + + qualVal := &pb.MutationProto_ColumnValue_QualifierValue{ + Qualifier: []byte(cqualifier), + Value: binaryValue, + } + + if m.mutationType == pb.MutationProto_DELETE { + tmp := pb.MutationProto_DELETE_MULTIPLE_VERSIONS + qualVal.DeleteType = &tmp + } + columns[cfamily] = append(columns[cfamily], qualVal) + } + + pbcolumns := make([]*pb.MutationProto_ColumnValue, 0, len(columns)) + for k, v := range columns { + colval := &pb.MutationProto_ColumnValue{ + Family: []byte(k), + QualifierValue: v, + } + pbcolumns = append(pbcolumns, colval) + + } + mutate := &pb.MutateRequest{ + Region: m.regionSpecifier(), + Mutation: &pb.MutationProto{ + Row: m.key, + MutateType: &m.mutationType, + ColumnValue: pbcolumns, + }, + } + return proto.Marshal(mutate) +} + +// valueToBytes will convert a given value from the reflect package into its +// underlying bytes +func valueToBytes(val reflect.Value) ([]byte, error) { + switch val.Kind() { + case reflect.Bool: + if val.Bool() { + return []byte{1}, nil + } + return []byte{0}, nil + + case reflect.Uint: + switch unsafe.Sizeof(unsafe.Pointer(val.UnsafeAddr())) { + case 8: + var x uint8 + return valueToBytes(val.Convert(reflect.TypeOf(x))) + case 16: + var x uint16 + return valueToBytes(val.Convert(reflect.TypeOf(x))) + case 32: + var x uint32 + return valueToBytes(val.Convert(reflect.TypeOf(x))) + case 64: + var x uint64 + return valueToBytes(val.Convert(reflect.TypeOf(x))) + default: + return nil, ErrUnsupportedUints + } + + case reflect.Int: + switch unsafe.Sizeof(unsafe.Pointer(val.UnsafeAddr())) { + case 8: + var x uint8 + return valueToBytes(val.Convert(reflect.TypeOf(x))) + case 16: + var x uint16 + return valueToBytes(val.Convert(reflect.TypeOf(x))) + case 32: + var x uint32 + return valueToBytes(val.Convert(reflect.TypeOf(x))) + case 64: + var x uint64 + return valueToBytes(val.Convert(reflect.TypeOf(x))) + default: + return nil, ErrUnsupportedInts + } + + case reflect.Int8: + var x int8 + x = val.Interface().(int8) + memory := (*(*[1]byte)(unsafe.Pointer(&x)))[:] + return copyOf(memory), nil + case reflect.Uint8: + var x uint8 + x = val.Interface().(uint8) + memory := (*(*[1]byte)(unsafe.Pointer(&x)))[:] + return copyOf(memory), nil + + case reflect.Int16: + var x int16 + x = val.Interface().(int16) + memory := (*(*[2]byte)(unsafe.Pointer(&x)))[:] + return copyOf(memory), nil + case reflect.Uint16: + var x uint16 + x = val.Interface().(uint16) + memory := (*(*[2]byte)(unsafe.Pointer(&x)))[:] + return copyOf(memory), nil + + case reflect.Int32: + var x int32 + x = val.Interface().(int32) + memory := (*(*[4]byte)(unsafe.Pointer(&x)))[:] + return copyOf(memory), nil + case reflect.Uint32: + var x uint32 + x = val.Interface().(uint32) + memory := (*(*[4]byte)(unsafe.Pointer(&x)))[:] + return copyOf(memory), nil + case reflect.Float32: + var x float32 + x = val.Interface().(float32) + memory := (*(*[4]byte)(unsafe.Pointer(&x)))[:] + return copyOf(memory), nil + + case reflect.Int64: + var x int64 + x = val.Interface().(int64) + memory := (*(*[8]byte)(unsafe.Pointer(&x)))[:] + return copyOf(memory), nil + case reflect.Uint64: + var x uint64 + x = val.Interface().(uint64) + memory := (*(*[8]byte)(unsafe.Pointer(&x)))[:] + return copyOf(memory), nil + case reflect.Float64: + var x float64 + x = val.Interface().(float64) + memory := (*(*[8]byte)(unsafe.Pointer(&x)))[:] + return copyOf(memory), nil + case reflect.Complex64: + var x complex64 + x = val.Interface().(complex64) + memory := (*(*[8]byte)(unsafe.Pointer(&x)))[:] + return copyOf(memory), nil + + case reflect.Complex128: + var x complex128 + x = val.Interface().(complex128) + memory := (*(*[16]byte)(unsafe.Pointer(&x)))[:] + return copyOf(memory), nil + + case reflect.Ptr: + return valueToBytes(val.Elem()) + + case reflect.Array, reflect.Slice: + if val.Len() == 0 { + return []byte{}, nil + } + kind := val.Index(0).Kind() + if kind == reflect.Array || kind == reflect.Slice || kind == reflect.String { + // We won't be able to deserialize this later into the correct types, since + // arrays/slices/strings don't have a defined size. + return nil, fmt.Errorf("Slices and arrays of type %s is unsupported", + val.Index(0).Type().Name()) + } + var allbytes []byte + for i := 0; i < val.Len(); i++ { + morebytes, err := valueToBytes(val.Index(i)) + if err != nil { + return nil, err + } + allbytes = append(allbytes, morebytes...) + } + return allbytes, nil + + case reflect.String: + return []byte(val.String()), nil + + // Unhandled types, left here for easy reference + //case reflect.Invalid: + //case reflect.Chan: + //case reflect.Func: + //case reflect.Interface: + //case reflect.Struct: + //case reflect.Map: + //case reflect.Uintptr: + //case reflect.UnsafePointer: + } + return nil, fmt.Errorf("Unsupported type %s, %d", val.Type().Name(), val.Kind()) +} + +func copyOf(memory []byte) []byte { + memcpy := make([]byte, len(memory)) + copy(memcpy, memory) + return memcpy +} + +func isAStruct(data interface{}) bool { + return reflect.TypeOf(data).Kind() == reflect.Struct +} + +// NewResponse creates an empty protobuf message to read the response of this +// RPC. +func (m *Mutate) NewResponse() proto.Message { + return &pb.MutateResponse{} +} + +// SetFilter always returns an error when used on Mutate objects. Do not use. +// Exists solely so Mutate can implement the Call interface. +func (m *Mutate) SetFilter(ft filter.Filter) error { + // Not allowed. Throw an error + return errors.New("Cannot set filter on mutate operation.") +} + +// SetFamilies always returns an error when used on Mutate objects. Do not use. +// Exists solely so Mutate can implement the Call interface. +func (m *Mutate) SetFamilies(fam map[string][]string) error { + // Not allowed. Throw an error + return errors.New("Cannot set families on mutate operation.") +} diff --git a/libs/gohbase/hrpc/scan.go b/libs/gohbase/hrpc/scan.go new file mode 100644 index 0000000..20de448 --- /dev/null +++ b/libs/gohbase/hrpc/scan.go @@ -0,0 +1,224 @@ +// Copyright (C) 2015 The GoHBase Authors. All rights reserved. +// This file is part of GoHBase. +// Use of this source code is governed by the Apache License 2.0 +// that can be found in the COPYING file. + +package hrpc + +import ( + "context" + + "bfs/libs/gohbase/filter" + "bfs/libs/gohbase/pb" + + "github.com/golang/protobuf/proto" +) + +// Scan represents a scanner on an HBase table. +type Scan struct { + base + + // Maps a column family to a list of qualifiers + families map[string][]string + + closeScanner bool + + startRow []byte + stopRow []byte + + timeRange TimeRange + + scannerID *uint64 + + caching int + limit int + + filters filter.Filter +} + +// NewScan creates a scanner for the given table. +func NewScan(ctx context.Context, table []byte, options ...func(Call) error) (*Scan, error) { + scan := &Scan{ + base: base{ + table: table, + ctx: ctx, + }, + closeScanner: false, + } + err := applyOptions(scan, options...) + if err != nil { + return nil, err + } + return scan, nil +} + +// NewScanRange creates a scanner for the given table and key range. +// The range is half-open, i.e. [startRow; stopRow[ -- stopRow is not +// included in the range. +func NewScanRange(ctx context.Context, table, startRow, stopRow []byte, + options ...func(Call) error) (*Scan, error) { + scan := &Scan{ + base: base{ + table: table, + key: startRow, + ctx: ctx, + }, + closeScanner: false, + startRow: startRow, + stopRow: stopRow, + } + err := applyOptions(scan, options...) + if err != nil { + return nil, err + } + return scan, nil +} + +// NewScanStr creates a scanner for the given table. +func NewScanStr(ctx context.Context, table string, options ...func(Call) error) (*Scan, error) { + return NewScan(ctx, []byte(table), options...) +} + +// NewScanRangeStr creates a scanner for the given table and key range. +// The range is half-open, i.e. [startRow; stopRow[ -- stopRow is not +// included in the range. +func NewScanRangeStr(ctx context.Context, table, startRow, stopRow string, + options ...func(Call) error) (*Scan, error) { + return NewScanRange(ctx, []byte(table), []byte(startRow), []byte(stopRow), options...) +} + +// NewScanFromID creates a new Scan request that will return additional +// results from the given scanner ID. This is an internal method, users +// are not expected to deal with scanner IDs. +func NewScanFromID(ctx context.Context, table []byte, + scannerID uint64, startRow []byte) *Scan { + return &Scan{ + base: base{ + table: []byte(table), + key: []byte(startRow), + ctx: ctx, + }, + scannerID: &scannerID, + closeScanner: false, + } +} + +// NewCloseFromID creates a new Scan request that will close the scanner for +// the given scanner ID. This is an internal method, users are not expected +// to deal with scanner IDs. +func NewCloseFromID(ctx context.Context, table []byte, + scannerID uint64, startRow []byte) *Scan { + return &Scan{ + base: base{ + table: []byte(table), + key: []byte(startRow), + ctx: ctx, + }, + scannerID: &scannerID, + closeScanner: true, + } +} + +func (s *Scan) SetTimeRange(tr TimeRange) { + s.timeRange = tr +} + +// GetName returns the name of this RPC call. +func (s *Scan) GetName() string { + return "Scan" +} + +// GetStopRow returns the end key (exclusive) of this scanner. +func (s *Scan) GetStopRow() []byte { + return s.stopRow +} + +// GetStartRow returns the start key (inclusive) of this scanner. +func (s *Scan) GetStartRow() []byte { + return s.startRow +} + +// GetFamilies returns the set families covered by this scanner. +// If no families are specified then all the families are scanned. +func (s *Scan) GetFamilies() map[string][]string { + return s.families +} + +// GetRegionStop returns the stop key of the region currently being scanned. +// This is an internal method, end users are not expected to use it. +func (s *Scan) GetRegionStop() []byte { + return s.region.StopKey +} + +// GetFilter returns the filter set on this scanner. +func (s *Scan) GetFilter() filter.Filter { + return s.filters +} + +// Serialize converts this Scan into a serialized protobuf message ready +// to be sent to an HBase node. +func (s *Scan) Serialize() ([]byte, error) { + scan := &pb.ScanRequest{ + Region: s.regionSpecifier(), + CloseScanner: &s.closeScanner, + } + if s.caching > 0 { + scan.NumberOfRows = proto.Uint32(uint32(s.caching)) + } else { + scan.NumberOfRows = proto.Uint32(20) + } + if s.scannerID == nil { + scan.Scan = &pb.Scan{ + Column: familiesToColumn(s.families), + StartRow: s.startRow, + StopRow: s.stopRow, + } + if s.timeRange.Valid() { + from, to := s.timeRange.From, s.timeRange.To + scan.Scan.TimeRange = &pb.TimeRange{ + From: &from, + To: &to, + } + } + if s.filters != nil { + pbFilter, err := s.filters.ConstructPBFilter() + if err != nil { + return nil, err + } + scan.Scan.Filter = pbFilter + } + } else { + scan.ScannerId = s.scannerID + } + return proto.Marshal(scan) +} + +// NewResponse creates an empty protobuf message to read the response +// of this RPC. +func (s *Scan) NewResponse() proto.Message { + return &pb.ScanResponse{} +} + +// SetFamilies sets the families covered by this scanner. +func (s *Scan) SetFamilies(fam map[string][]string) error { + s.families = fam + return nil +} + +// SetFilter sets the request's filter. +func (s *Scan) SetFilter(ft filter.Filter) error { + s.filters = ft + return nil +} + +func (s *Scan) SetCaching(caching int) { + s.caching = caching +} + +func (s *Scan) SetLimit(limit int) { + s.limit = limit +} + +func (s *Scan) Limit() int { + return s.limit +} diff --git a/libs/gohbase/hrpc/tableop.go b/libs/gohbase/hrpc/tableop.go new file mode 100644 index 0000000..754db3a --- /dev/null +++ b/libs/gohbase/hrpc/tableop.go @@ -0,0 +1,29 @@ +// Copyright (C) 2015 The GoHBase Authors. All rights reserved. +// This file is part of GoHBase. +// Use of this source code is governed by the Apache License 2.0 +// that can be found in the COPYING file. + +package hrpc + +import ( + "errors" + + "bfs/libs/gohbase/filter" +) + +// tableOp represents an administrative operation on a table. +type tableOp struct { + base +} + +// SetFilter always returns an error. +func (to *tableOp) SetFilter(filter.Filter) error { + // Doesn't make sense on this kind of RPC. + return errors.New("Cannot set filter on admin operations.") +} + +// SetFamilies always returns an error. +func (to *tableOp) SetFamilies(map[string][]string) error { + // Doesn't make sense on this kind of RPC. + return errors.New("Cannot set families on admin operations.") +} diff --git a/libs/gohbase/integration_test.go b/libs/gohbase/integration_test.go new file mode 100644 index 0000000..4021078 --- /dev/null +++ b/libs/gohbase/integration_test.go @@ -0,0 +1,592 @@ +// Copyright (C) 2015 The GoHBase Authors. All rights reserved. +// This file is part of GoHBase. +// Use of this source code is governed by the Apache License 2.0 +// that can be found in the COPYING file. + +// +build integration + +package gohbase_test + +import ( + "bytes" + "context" + "flag" + "fmt" + "os" + "sync" + "testing" + "time" + + "bfs/libs/gohbase" + "bfs/libs/gohbase/hrpc" + "bfs/libs/gohbase/test" +) + +var host = flag.String("HBase Host", "localhost", "The location where HBase is running") + +const table = "test1" + +func TestMain(m *testing.M) { + err := test.CreateTable(*host, table, []string{"cf", "cf2"}) + if err != nil { + panic(err) + } + res := m.Run() + err = test.DeleteTable(*host, table) + if err != nil { + panic(err) + } + + os.Exit(res) +} + +func TestGet(t *testing.T) { + key := "row1" + val := []byte("1") + headers := map[string][]string{"cf": nil} + if host == nil { + t.Fatal("Host is not set!") + } + + c := gohbase.NewClient(*host) + err := insertKeyValue(c, key, "cf", val) + if err != nil { + t.Errorf("Put returned an error: %v", err) + } + + get, err := hrpc.NewGetStr(context.Background(), table, key, hrpc.Families(headers)) + if err != nil { + t.Fatalf("Failed to create Get request: %s", err) + } + rsp, err := c.Get(get) + if err != nil { + t.Errorf("Get returned an error: %v", err) + } + rsp_value := rsp.Cells[0].Value + if !bytes.Equal(rsp_value, val) { + t.Errorf("Get returned an incorrect result. Expected: %v, Got: %v", + val, rsp_value) + } + + get.ExistsOnly() + rsp, err = c.Get(get) + if err != nil { + t.Errorf("Get returned an error: %v", err) + } else if !*rsp.Exists { + t.Error("Get claimed that our row didn't exist") + } + + ctx, _ := context.WithTimeout(context.Background(), 0) + get, err = hrpc.NewGetStr(ctx, table, key, hrpc.Families(headers)) + if err != nil { + t.Fatalf("Failed to create Get request: %s", err) + } + _, err = c.Get(get) + if err != gohbase.ErrDeadline { + t.Errorf("Get ignored the deadline") + } +} + +func TestGetDoesntExist(t *testing.T) { + key := "row1.5" + c := gohbase.NewClient(*host) + headers := map[string][]string{"cf": nil} + get, err := hrpc.NewGetStr(context.Background(), table, key, hrpc.Families(headers)) + rsp, err := c.Get(get) + if err != nil { + t.Errorf("Get returned an error: %v", err) + } else if results := len(rsp.Cells); results != 0 { + t.Errorf("Get expected 0 cells. Received: %d", results) + } + + get.ExistsOnly() + rsp, err = c.Get(get) + if err != nil { + t.Errorf("Get returned an error: %v", err) + } else if *rsp.Exists { + t.Error("Get claimed that our non-existent row exists") + } +} + +func TestGetBadColumnFamily(t *testing.T) { + key := "row1.625" + c := gohbase.NewClient(*host) + err := insertKeyValue(c, key, "cf", []byte("Bad!")) + if err != nil { + t.Errorf("Put returned an error: %v", err) + } + families := map[string][]string{"badcf": nil} + get, err := hrpc.NewGetStr(context.Background(), table, key, hrpc.Families(families)) + rsp, err := c.Get(get) + if err == nil { + t.Errorf("Get didn't return an error! (It should have)") + } + if rsp != nil { + t.Errorf("Get expected no result. Received: %v", rsp) + } +} + +func TestGetMultipleCells(t *testing.T) { + key := "row1.75" + c := gohbase.NewClient(*host, gohbase.FlushInterval(time.Millisecond*2)) + err := insertKeyValue(c, key, "cf", []byte("cf")) + if err != nil { + t.Errorf("Put returned an error: %v", err) + } + err = insertKeyValue(c, key, "cf2", []byte("cf2")) + if err != nil { + t.Errorf("Put returned an error: %v", err) + } + + families := map[string][]string{"cf": nil, "cf2": nil} + get, err := hrpc.NewGetStr(context.Background(), table, key, hrpc.Families(families)) + rsp, err := c.Get(get) + cells := rsp.Cells + num_results := len(cells) + if num_results != 2 { + t.Errorf("Get expected 2 cells. Received: %d", num_results) + } + for _, cell := range cells { + if !bytes.Equal(cell.Family, cell.Value) { + t.Errorf("Get returned an incorrect result. Expected: %v, Received: %v", + cell.Family, cell.Value) + } + } +} + +func TestPut(t *testing.T) { + key := "row2" + values := map[string]map[string][]byte{"cf": map[string][]byte{"a": []byte("1")}} + if host == nil { + t.Fatal("Host is not set!") + } + c := gohbase.NewClient(*host) + putRequest, err := hrpc.NewPutStr(context.Background(), table, key, values) + if err != nil { + t.Errorf("NewPutStr returned an error: %v", err) + } + _, err = c.Put(putRequest) + if err != nil { + t.Errorf("Put returned an error: %v", err) + } + + ctx, _ := context.WithTimeout(context.Background(), 0) + putRequest, err = hrpc.NewPutStr(ctx, table, key, values) + _, err = c.Put(putRequest) + if err != gohbase.ErrDeadline { + t.Errorf("Put ignored the deadline") + } +} + +func TestPutReflection(t *testing.T) { + key := "row2.25" + number := 150 + data := struct { + AnInt int `hbase:"cf:a"` + AnInt8 int8 `hbase:"cf:b"` + AnInt16 int16 `hbase:"cf:c"` + AnInt32 int32 `hbase:"cf:d"` + AnInt64 int64 `hbase:"cf:e"` + AnUInt uint `hbase:"cf:f"` + AnUInt8 uint8 `hbase:"cf:g"` + AnUInt16 uint16 `hbase:"cf:h"` + AnUInt32 uint32 `hbase:"cf:i"` + AnUInt64 uint64 `hbase:"cf:j"` + AFloat32 float32 `hbase:"cf:k"` + AFloat64 float64 `hbase:"cf:l"` + AComplex64 complex64 `hbase:"cf:m"` + AComplex128 complex128 `hbase:"cf:n"` + APointer *int `hbase:"cf:o"` + AnArray [6]uint8 `hbase:"cf:p"` + ASlice []uint8 `hbase:"cf:q"` + AString string `hbase:"cf:r"` + }{ + AnInt: 10, + AnInt8: 20, + AnInt16: 30, + AnInt32: 40, + AnInt64: 50, + AnUInt: 60, + AnUInt8: 70, + AnUInt16: 80, + AnUInt32: 90, + AnUInt64: 100, + AFloat32: 110, + AFloat64: 120, + AComplex64: 130, + AComplex128: 140, + APointer: &number, + AnArray: [6]uint8{4, 8, 15, 26, 23, 42}, + ASlice: []uint8{1, 1, 3, 5, 8, 13, 21, 34, 55}, + AString: "This is a test string.", + } + + if host == nil { + t.Fatal("Host is not set!") + } + + c := gohbase.NewClient(*host) + putRequest, err := hrpc.NewPutStrRef(context.Background(), table, key, data) + if err != nil { + t.Errorf("NewPutStrRef returned an error: %v", err) + } + + _, err = c.Put(putRequest) + if err != nil { + t.Errorf("Put returned an error: %v", err) + } + + headers := map[string][]string{"cf": nil} + get, err := hrpc.NewGetStr(context.Background(), table, key, hrpc.Families(headers)) + if err != nil { + t.Fatalf("Failed to create Get request: %s", err) + } + rsp, err := c.Get(get) + if err != nil { + t.Errorf("Get returned an error: %v", err) + } + + expected := map[string][]byte{ + "a": []byte{10}, + "b": []byte{20}, + "c": []byte{30, 0}, + "d": []byte{40, 0, 0, 0}, + "e": []byte{50, 0, 0, 0, 0, 0, 0, 0}, + "f": []byte{60}, + "g": []byte{70}, + "h": []byte{80, 0}, + "i": []byte{90, 0, 0, 0}, + "j": []byte{100, 0, 0, 0, 0, 0, 0, 0}, + "k": []byte{0, 0, 220, 66}, + "l": []byte{0, 0, 0, 0, 0, 0, 94, 64}, + "m": []byte{0, 0, 2, 67, 0, 0, 0, 0}, + "n": []byte{0, 0, 0, 0, 0, 128, 97, 64, 0, 0, 0, 0, 0, 0, 0, 0}, + "o": []byte{150}, + "p": []byte{4, 8, 15, 26, 23, 42}, + "q": []byte{1, 1, 3, 5, 8, 13, 21, 34, 55}, + "r": []byte("This is a test string."), + } + + for _, cell := range rsp.Cells { + want, ok := expected[string(cell.Qualifier)] + if !ok { + t.Errorf("Unexpected qualifier: %q in %#v", cell.Qualifier, rsp) + } else if !bytes.Equal(cell.Value, want) { + t.Errorf("qualifier %q didn't match: wanted %q, but got %q", + cell.Qualifier, want, cell.Value) + } + } + +} + +func TestPutMultipleCells(t *testing.T) { + key := "row2.5" + values := map[string]map[string][]byte{"cf": map[string][]byte{}, "cf2": map[string][]byte{}} + values["cf"]["a"] = []byte("a") + values["cf"]["b"] = []byte("b") + values["cf2"]["a"] = []byte("a") + c := gohbase.NewClient(*host) + putRequest, err := hrpc.NewPutStr(context.Background(), table, key, values) + _, err = c.Put(putRequest) + if err != nil { + t.Errorf("Put returned an error: %v", err) + } + families := map[string][]string{"cf": nil, "cf2": nil} + get, err := hrpc.NewGetStr(context.Background(), table, key, hrpc.Families(families)) + rsp, err := c.Get(get) + if err != nil { + t.Errorf("Get returned an error: %v", err) + } + cells := rsp.Cells + if len(cells) != 3 { + t.Errorf("Get expected 3 cells. Received: %d", len(cells)) + } + for _, cell := range cells { + if !bytes.Equal(cell.Qualifier, cell.Value) { + t.Errorf("Get returned an incorrect result. Expected: %v, Received: %v", + cell.Qualifier, cell.Value) + } + } + +} + +func TestMultiplePutsGetsSequentially(t *testing.T) { + const num_ops = 100 + keyPrefix := "row3" + headers := map[string][]string{"cf": nil} + c := gohbase.NewClient(*host, gohbase.FlushInterval(time.Millisecond)) + err := performNPuts(keyPrefix, num_ops) + if err != nil { + t.Errorf("Put returned an error: %v", err) + } + for i := num_ops - 1; i >= 0; i-- { + key := keyPrefix + fmt.Sprintf("%d", i) + get, err := hrpc.NewGetStr(context.Background(), table, key, hrpc.Families(headers)) + rsp, err := c.Get(get) + if err != nil { + t.Errorf("Get returned an error: %v", err) + } + if len(rsp.Cells) != 1 { + t.Errorf("Incorrect number of cells returned by Get: %d", len(rsp.Cells)) + } + rsp_value := rsp.Cells[0].Value + if !bytes.Equal(rsp_value, []byte(fmt.Sprintf("%d", i))) { + t.Errorf("Get returned an incorrect result. Expected: %v, Got: %v", + []byte(fmt.Sprintf("%d", i)), rsp_value) + } + } +} + +func TestMultiplePutsGetsParallel(t *testing.T) { + const num_ops = 1000 + keyPrefix := "row3.5" + headers := map[string][]string{"cf": nil} + c := gohbase.NewClient(*host) + // TODO: Currently have to CheckTable before initiating the N requests + // otherwise we face runaway client generation - one for each request. + c.CheckTable(context.Background(), table) + var wg sync.WaitGroup + for i := 0; i < num_ops; i++ { + wg.Add(1) + go func(client gohbase.Client, key string) { + defer wg.Done() + err := insertKeyValue(client, key, "cf", []byte(key)) + if err != nil { + t.Errorf("(Parallel) Put returned an error: %v", err) + } + }(c, keyPrefix+fmt.Sprintf("%d", i)) + } + wg.Wait() + // All puts are complete. Now do the same for gets. + for i := num_ops - 1; i >= 0; i-- { + wg.Add(1) + go func(client gohbase.Client, key string) { + defer wg.Done() + get, err := hrpc.NewGetStr(context.Background(), table, key, hrpc.Families(headers)) + rsp, err := c.Get(get) + if err != nil { + t.Errorf("(Parallel) Get returned an error: %v", err) + } else { + rsp_value := rsp.Cells[0].Value + if !bytes.Equal(rsp_value, []byte(key)) { + t.Errorf("Get returned an incorrect result.") + } + } + }(c, keyPrefix+fmt.Sprintf("%d", i)) + } + wg.Wait() +} + +func TestTimestampIncreasing(t *testing.T) { + key := "row4" + c := gohbase.NewClient(*host) + var oldTime uint64 = 0 + headers := map[string][]string{"cf": nil} + for i := 0; i < 10; i++ { + insertKeyValue(c, key, "cf", []byte("1")) + get, err := hrpc.NewGetStr(context.Background(), table, key, hrpc.Families(headers)) + rsp, err := c.Get(get) + if err != nil { + t.Errorf("Get returned an error: %v", err) + break + } + newTime := *rsp.Cells[0].Timestamp + if newTime <= oldTime { + t.Errorf("Timestamps are not increasing. Old Time: %v, New Time: %v", + oldTime, newTime) + } + oldTime = newTime + time.Sleep(time.Millisecond) + } +} + +func TestAppend(t *testing.T) { + key := "row7" + c := gohbase.NewClient(*host) + // Inserting "Hello" + insertErr := insertKeyValue(c, key, "cf", []byte("Hello")) + if insertErr != nil { + t.Errorf("Put returned an error: %v", insertErr) + } + // Appending " my name is Dog." + values := map[string]map[string][]byte{"cf": map[string][]byte{}} + values["cf"]["a"] = []byte(" my name is Dog.") + appRequest, err := hrpc.NewAppStr(context.Background(), table, key, values) + appRsp, err := c.Append(appRequest) + if err != nil { + t.Errorf("Append returned an error: %v", err) + } + if appRsp == nil { + t.Errorf("Append doesn't return updated value.") + } + // Verifying new result is "Hello my name is Dog." + result := appRsp.Cells[0].Value + if !bytes.Equal([]byte("Hello my name is Dog."), result) { + t.Errorf("Append returned an incorrect result. Expected: %v, Receieved: %v", + []byte("Hello my name is Dog."), result) + } + + // Make sure the change was actually committed. + headers := map[string][]string{"cf": nil} + get, err := hrpc.NewGetStr(context.Background(), table, key, hrpc.Families(headers)) + rsp, err := c.Get(get) + cells := rsp.Cells + if len(cells) != 1 { + t.Errorf("Get expected 1 cells. Received: %d", len(cells)) + } + result = cells[0].Value + if !bytes.Equal([]byte("Hello my name is Dog."), result) { + t.Errorf("Append returned an incorrect result. Expected: %v, Receieved: %v", + []byte("Hello my name is Dog."), result) + } +} + +func TestIncrement(t *testing.T) { + c := gohbase.NewClient(*host) + key := "row102" + + // test incerement + incRequest, err := hrpc.NewIncStrSingle(context.Background(), table, key, "cf", "a", 1) + result, err := c.Increment(incRequest) + if err != nil { + t.Fatalf("Increment returned an error: %v", err) + } + + if result != 1 { + t.Fatalf("Increment's result is %d, want 1", result) + } + + incRequest, err = hrpc.NewIncStrSingle(context.Background(), table, key, "cf", "a", 5) + result, err = c.Increment(incRequest) + if err != nil { + t.Fatalf("Increment returned an error: %v", err) + } + + if result != 6 { + t.Fatalf("Increment's result is %d, want 6", result) + } +} + +func TestIncrementParallel(t *testing.T) { + c := gohbase.NewClient(*host) + key := "row102.5" + + // TODO: Currently have to CheckTable before initiating N requests + // otherwise we face runaway client generation - one for each request. + c.CheckTable(context.Background(), table) + + numParallel := 10 + + // test incerement + var wg sync.WaitGroup + for i := 0; i < numParallel; i++ { + wg.Add(1) + go func() { + defer wg.Done() + incRequest, err := hrpc.NewIncStrSingle(context.Background(), table, key, "cf", "a", 1) + _, err = c.Increment(incRequest) + if err != nil { + t.Errorf("Increment returned an error: %v", err) + } + }() + } + wg.Wait() + + // do one more to check if there's a correct value + incRequest, err := hrpc.NewIncStrSingle(context.Background(), table, key, "cf", "a", 1) + result, err := c.Increment(incRequest) + if err != nil { + t.Fatalf("Increment returned an error: %v", err) + } + + if result != int64(numParallel+1) { + t.Fatalf("Increment's result is %d, want %d", result, numParallel+1) + } + +} + +// Note: This function currently causes an infinite loop in the client throwing the error - +// 2015/06/19 14:34:11 Encountered an error while reading: Failed to read from the RS: EOF +func TestChangingRegionServers(t *testing.T) { + key := "row8" + val := []byte("1") + headers := map[string][]string{"cf": nil} + if host == nil { + t.Fatal("Host is not set!") + } + c := gohbase.NewClient(*host) + err := insertKeyValue(c, key, "cf", val) + if err != nil { + t.Errorf("Put returned an error: %v", err) + } + + // RegionServer 1 hosts all the current regions. + // Now launch servers 2,3 + test.LaunchRegionServers([]string{"2", "3"}) + + // Now (gracefully) stop servers 1,2. + // All regions should now be on server 3. + test.StopRegionServers([]string{"1", "2"}) + get, err := hrpc.NewGetStr(context.Background(), table, key, hrpc.Families(headers)) + rsp, err := c.Get(get) + if err != nil { + t.Errorf("Get returned an error: %v", err) + } + rsp_value := rsp.Cells[0].Value + if !bytes.Equal(rsp_value, val) { + t.Errorf("Get returned an incorrect result. Expected: %v, Received: %v", + val, rsp_value) + } + + // Clean up by re-launching RS1 and closing RS3 + test.LaunchRegionServers([]string{"1"}) + test.StopRegionServers([]string{"3"}) +} + +func BenchmarkPut(b *testing.B) { + b.ReportAllocs() + keyPrefix := "row9" + err := performNPuts(keyPrefix, b.N) + if err != nil { + b.Errorf("Put returned an error: %v", err) + } +} + +func BenchmarkGet(b *testing.B) { + b.ReportAllocs() + keyPrefix := "row10" + err := performNPuts(keyPrefix, b.N) + if err != nil { + b.Errorf("Put returned an error: %v", err) + } + c := gohbase.NewClient(*host) + b.ResetTimer() + headers := map[string][]string{"cf": nil} + for i := 0; i < b.N; i++ { + key := keyPrefix + fmt.Sprintf("%d", i) + get, _ := hrpc.NewGetStr(context.Background(), table, key, hrpc.Families(headers)) + c.Get(get) + } +} + +// Helper function. Given a key_prefix, num_ops, performs num_ops. +func performNPuts(keyPrefix string, num_ops int) error { + c := gohbase.NewClient(*host) + for i := 0; i < num_ops; i++ { + key := keyPrefix + fmt.Sprintf("%d", i) + err := insertKeyValue(c, key, "cf", []byte(fmt.Sprintf("%d", i))) + if err != nil { + return err + } + } + return nil +} + +// Helper function. Given a client, key, columnFamily, value inserts into the table under column 'a' +func insertKeyValue(c gohbase.Client, key, columnFamily string, value []byte) error { + values := map[string]map[string][]byte{columnFamily: map[string][]byte{}} + values[columnFamily]["a"] = value + putRequest, err := hrpc.NewPutStr(context.Background(), table, key, values) + _, err = c.Put(putRequest) + return err +} diff --git a/libs/gohbase/metacache_test.go b/libs/gohbase/metacache_test.go new file mode 100644 index 0000000..2040023 --- /dev/null +++ b/libs/gohbase/metacache_test.go @@ -0,0 +1,109 @@ +// Copyright (C) 2015 The GoHBase Authors. All rights reserved. +// This file is part of GoHBase. +// Use of this source code is governed by the Apache License 2.0 +// that can be found in the COPYING file. + +package gohbase + +import ( + "reflect" + "testing" + + "bfs/libs/gohbase/conf" + "bfs/libs/gohbase/region" + "bfs/libs/gohbase/regioninfo" + "time" +) + +func TestMetaCache(t *testing.T) { + client := newClient(standardClient, conf.NewConf([]string{"~invalid.quorum~"}, "", "", "", 30*time.Second, 0, 0, 0)) // We shouldn't connect to ZK. + + reg := client.getRegionFromCache([]byte("test"), []byte("theKey")) + if reg != nil { + t.Errorf("Found region %#v even though the cache was empty?!", reg) + } + + // Inject an entry in the cache. This entry covers the entire key range. + wholeTable := ®ioninfo.Info{ + Table: []byte("test"), + RegionName: []byte("test,,1234567890042.56f833d5569a27c7a43fbf547b4924a4."), + StopKey: []byte(""), + } + regClient := ®ion.Client{} + client.regions.put(wholeTable.RegionName, wholeTable) + client.clients.put(wholeTable, regClient) + + reg = client.getRegionFromCache([]byte("test"), []byte("theKey")) + if !reflect.DeepEqual(reg, wholeTable) { + t.Errorf("Found region %#v but expected %#v", reg, wholeTable) + } + reg = client.getRegionFromCache([]byte("test"), []byte("")) // edge case. + if !reflect.DeepEqual(reg, wholeTable) { + t.Errorf("Found region %#v but expected %#v", reg, wholeTable) + } + + // Clear our client. + client = newClient(standardClient, conf.NewConf([]string{"~invalid.quorum~"}, "", "", "", 30*time.Second, 0, 0, 0)) + + // Inject 3 entries in the cache. + region1 := ®ioninfo.Info{ + Table: []byte("test"), + RegionName: []byte("test,,1234567890042.56f833d5569a27c7a43fbf547b4924a4."), + StopKey: []byte("foo"), + } + client.regions.put(region1.RegionName, region1) + client.clients.put(region1, regClient) + + region2 := ®ioninfo.Info{ + Table: []byte("test"), + RegionName: []byte("test,foo,1234567890042.56f833d5569a27c7a43fbf547b4924a4."), + StopKey: []byte("gohbase"), + } + client.regions.put(region2.RegionName, region2) + client.clients.put(region2, regClient) + + region3 := ®ioninfo.Info{ + Table: []byte("test"), + RegionName: []byte("test,gohbase,1234567890042.56f833d5569a27c7a43fbf547b4924a4."), + StopKey: []byte(""), + } + client.regions.put(region3.RegionName, region3) + client.clients.put(region3, regClient) + + testcases := []struct { + key string + reg *regioninfo.Info + }{ + {key: "theKey", reg: region3}, + {key: "", reg: region1}, + {key: "bar", reg: region1}, + {key: "fon\xFF", reg: region1}, + {key: "foo", reg: region2}, + {key: "foo\x00", reg: region2}, + {key: "gohbase", reg: region3}, + } + for i, testcase := range testcases { + reg = client.getRegionFromCache([]byte("test"), []byte(testcase.key)) + if !reflect.DeepEqual(reg, testcase.reg) { + t.Errorf("[#%d] Found region %#v but expected %#v", i, reg, testcase.reg) + } + } + + // Change the last region (maybe it got split). + region3 = ®ioninfo.Info{ + Table: []byte("test"), + RegionName: []byte("test,gohbase,1234567890042.56f833d5569a27c7a43fbf547b4924a4."), + StopKey: []byte("zab"), + } + client.regions.put(region3.RegionName, region3) + client.clients.put(region3, regClient) + + reg = client.getRegionFromCache([]byte("test"), []byte("theKey")) + if !reflect.DeepEqual(reg, region3) { + t.Errorf("Found region %#v but expected %#v", reg, region3) + } + reg = client.getRegionFromCache([]byte("test"), []byte("zoo")) + if reg != nil { + t.Errorf("Shouldn't have found any region yet found %#v", reg) + } +} diff --git a/libs/gohbase/pb/Cell.pb.go b/libs/gohbase/pb/Cell.pb.go new file mode 100644 index 0000000..f594bb9 --- /dev/null +++ b/libs/gohbase/pb/Cell.pb.go @@ -0,0 +1,221 @@ +// Code generated by protoc-gen-go. +// source: Cell.proto +// DO NOT EDIT! + +/* +Package pb is a generated protocol buffer package. + +It is generated from these files: + Cell.proto + Client.proto + ClusterId.proto + ClusterStatus.proto + Comparator.proto + ErrorHandling.proto + Filter.proto + FS.proto + HBase.proto + Master.proto + MultiRowMutation.proto + Quota.proto + RPC.proto + Tracing.proto + ZooKeeper.proto + +It has these top-level messages: + Cell + KeyValue +*/ +package pb + +import proto "github.com/golang/protobuf/proto" +import math "math" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = math.Inf + +// * +// The type of the key in a Cell +type CellType int32 + +const ( + CellType_MINIMUM CellType = 0 + CellType_PUT CellType = 4 + CellType_DELETE CellType = 8 + CellType_DELETE_COLUMN CellType = 12 + CellType_DELETE_FAMILY CellType = 14 + // MAXIMUM is used when searching; you look from maximum on down. + CellType_MAXIMUM CellType = 255 +) + +var CellType_name = map[int32]string{ + 0: "MINIMUM", + 4: "PUT", + 8: "DELETE", + 12: "DELETE_COLUMN", + 14: "DELETE_FAMILY", + 255: "MAXIMUM", +} +var CellType_value = map[string]int32{ + "MINIMUM": 0, + "PUT": 4, + "DELETE": 8, + "DELETE_COLUMN": 12, + "DELETE_FAMILY": 14, + "MAXIMUM": 255, +} + +func (x CellType) Enum() *CellType { + p := new(CellType) + *p = x + return p +} +func (x CellType) String() string { + return proto.EnumName(CellType_name, int32(x)) +} +func (x *CellType) UnmarshalJSON(data []byte) error { + value, err := proto.UnmarshalJSONEnum(CellType_value, data, "CellType") + if err != nil { + return err + } + *x = CellType(value) + return nil +} + +// * +// Protocol buffer version of Cell. +type Cell struct { + Row []byte `protobuf:"bytes,1,opt,name=row" json:"row,omitempty"` + Family []byte `protobuf:"bytes,2,opt,name=family" json:"family,omitempty"` + Qualifier []byte `protobuf:"bytes,3,opt,name=qualifier" json:"qualifier,omitempty"` + Timestamp *uint64 `protobuf:"varint,4,opt,name=timestamp" json:"timestamp,omitempty"` + CellType *CellType `protobuf:"varint,5,opt,name=cell_type,enum=pb.CellType" json:"cell_type,omitempty"` + Value []byte `protobuf:"bytes,6,opt,name=value" json:"value,omitempty"` + Tags []byte `protobuf:"bytes,7,opt,name=tags" json:"tags,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *Cell) Reset() { *m = Cell{} } +func (m *Cell) String() string { return proto.CompactTextString(m) } +func (*Cell) ProtoMessage() {} + +func (m *Cell) GetRow() []byte { + if m != nil { + return m.Row + } + return nil +} + +func (m *Cell) GetFamily() []byte { + if m != nil { + return m.Family + } + return nil +} + +func (m *Cell) GetQualifier() []byte { + if m != nil { + return m.Qualifier + } + return nil +} + +func (m *Cell) GetTimestamp() uint64 { + if m != nil && m.Timestamp != nil { + return *m.Timestamp + } + return 0 +} + +func (m *Cell) GetCellType() CellType { + if m != nil && m.CellType != nil { + return *m.CellType + } + return CellType_MINIMUM +} + +func (m *Cell) GetValue() []byte { + if m != nil { + return m.Value + } + return nil +} + +func (m *Cell) GetTags() []byte { + if m != nil { + return m.Tags + } + return nil +} + +// * +// Protocol buffer version of KeyValue. +// It doesn't have those transient parameters +type KeyValue struct { + Row []byte `protobuf:"bytes,1,req,name=row" json:"row,omitempty"` + Family []byte `protobuf:"bytes,2,req,name=family" json:"family,omitempty"` + Qualifier []byte `protobuf:"bytes,3,req,name=qualifier" json:"qualifier,omitempty"` + Timestamp *uint64 `protobuf:"varint,4,opt,name=timestamp" json:"timestamp,omitempty"` + KeyType *CellType `protobuf:"varint,5,opt,name=key_type,enum=pb.CellType" json:"key_type,omitempty"` + Value []byte `protobuf:"bytes,6,opt,name=value" json:"value,omitempty"` + Tags []byte `protobuf:"bytes,7,opt,name=tags" json:"tags,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *KeyValue) Reset() { *m = KeyValue{} } +func (m *KeyValue) String() string { return proto.CompactTextString(m) } +func (*KeyValue) ProtoMessage() {} + +func (m *KeyValue) GetRow() []byte { + if m != nil { + return m.Row + } + return nil +} + +func (m *KeyValue) GetFamily() []byte { + if m != nil { + return m.Family + } + return nil +} + +func (m *KeyValue) GetQualifier() []byte { + if m != nil { + return m.Qualifier + } + return nil +} + +func (m *KeyValue) GetTimestamp() uint64 { + if m != nil && m.Timestamp != nil { + return *m.Timestamp + } + return 0 +} + +func (m *KeyValue) GetKeyType() CellType { + if m != nil && m.KeyType != nil { + return *m.KeyType + } + return CellType_MINIMUM +} + +func (m *KeyValue) GetValue() []byte { + if m != nil { + return m.Value + } + return nil +} + +func (m *KeyValue) GetTags() []byte { + if m != nil { + return m.Tags + } + return nil +} + +func init() { + proto.RegisterEnum("pb.CellType", CellType_name, CellType_value) +} diff --git a/libs/gohbase/pb/Cell.proto b/libs/gohbase/pb/Cell.proto new file mode 100644 index 0000000..66a3e2d --- /dev/null +++ b/libs/gohbase/pb/Cell.proto @@ -0,0 +1,67 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +// Cell and KeyValue protos + +package pb; +option java_package = "org.apache.hadoop.hbase.protobuf.generated"; +option java_outer_classname = "CellProtos"; +option java_generate_equals_and_hash = true; +option optimize_for = SPEED; + +/** + * The type of the key in a Cell + */ +enum CellType { + MINIMUM = 0; + PUT = 4; + + DELETE = 8; + DELETE_COLUMN = 12; + DELETE_FAMILY = 14; + + // MAXIMUM is used when searching; you look from maximum on down. + MAXIMUM = 255; +} + +/** + * Protocol buffer version of Cell. + */ +message Cell { + optional bytes row = 1; + optional bytes family = 2; + optional bytes qualifier = 3; + optional uint64 timestamp = 4; + optional CellType cell_type = 5; + optional bytes value = 6; + optional bytes tags = 7; +} + +/** + * Protocol buffer version of KeyValue. + * It doesn't have those transient parameters + */ +message KeyValue { + required bytes row = 1; + required bytes family = 2; + required bytes qualifier = 3; + optional uint64 timestamp = 4; + optional CellType key_type = 5; + optional bytes value = 6; + optional bytes tags = 7; +} diff --git a/libs/gohbase/pb/Client.pb.go b/libs/gohbase/pb/Client.pb.go new file mode 100644 index 0000000..d8508c9 --- /dev/null +++ b/libs/gohbase/pb/Client.pb.go @@ -0,0 +1,1540 @@ +// Code generated by protoc-gen-go. +// source: Client.proto +// DO NOT EDIT! + +package pb + +import proto "github.com/golang/protobuf/proto" +import math "math" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = math.Inf + +// * +// Consistency defines the expected consistency level for an operation. +type Consistency int32 + +const ( + Consistency_STRONG Consistency = 0 + Consistency_TIMELINE Consistency = 1 +) + +var Consistency_name = map[int32]string{ + 0: "STRONG", + 1: "TIMELINE", +} +var Consistency_value = map[string]int32{ + "STRONG": 0, + "TIMELINE": 1, +} + +func (x Consistency) Enum() *Consistency { + p := new(Consistency) + *p = x + return p +} +func (x Consistency) String() string { + return proto.EnumName(Consistency_name, int32(x)) +} +func (x *Consistency) UnmarshalJSON(data []byte) error { + value, err := proto.UnmarshalJSONEnum(Consistency_value, data, "Consistency") + if err != nil { + return err + } + *x = Consistency(value) + return nil +} + +type MutationProto_Durability int32 + +const ( + MutationProto_USE_DEFAULT MutationProto_Durability = 0 + MutationProto_SKIP_WAL MutationProto_Durability = 1 + MutationProto_ASYNC_WAL MutationProto_Durability = 2 + MutationProto_SYNC_WAL MutationProto_Durability = 3 + MutationProto_FSYNC_WAL MutationProto_Durability = 4 +) + +var MutationProto_Durability_name = map[int32]string{ + 0: "USE_DEFAULT", + 1: "SKIP_WAL", + 2: "ASYNC_WAL", + 3: "SYNC_WAL", + 4: "FSYNC_WAL", +} +var MutationProto_Durability_value = map[string]int32{ + "USE_DEFAULT": 0, + "SKIP_WAL": 1, + "ASYNC_WAL": 2, + "SYNC_WAL": 3, + "FSYNC_WAL": 4, +} + +func (x MutationProto_Durability) Enum() *MutationProto_Durability { + p := new(MutationProto_Durability) + *p = x + return p +} +func (x MutationProto_Durability) String() string { + return proto.EnumName(MutationProto_Durability_name, int32(x)) +} +func (x *MutationProto_Durability) UnmarshalJSON(data []byte) error { + value, err := proto.UnmarshalJSONEnum(MutationProto_Durability_value, data, "MutationProto_Durability") + if err != nil { + return err + } + *x = MutationProto_Durability(value) + return nil +} + +type MutationProto_MutationType int32 + +const ( + MutationProto_APPEND MutationProto_MutationType = 0 + MutationProto_INCREMENT MutationProto_MutationType = 1 + MutationProto_PUT MutationProto_MutationType = 2 + MutationProto_DELETE MutationProto_MutationType = 3 +) + +var MutationProto_MutationType_name = map[int32]string{ + 0: "APPEND", + 1: "INCREMENT", + 2: "PUT", + 3: "DELETE", +} +var MutationProto_MutationType_value = map[string]int32{ + "APPEND": 0, + "INCREMENT": 1, + "PUT": 2, + "DELETE": 3, +} + +func (x MutationProto_MutationType) Enum() *MutationProto_MutationType { + p := new(MutationProto_MutationType) + *p = x + return p +} +func (x MutationProto_MutationType) String() string { + return proto.EnumName(MutationProto_MutationType_name, int32(x)) +} +func (x *MutationProto_MutationType) UnmarshalJSON(data []byte) error { + value, err := proto.UnmarshalJSONEnum(MutationProto_MutationType_value, data, "MutationProto_MutationType") + if err != nil { + return err + } + *x = MutationProto_MutationType(value) + return nil +} + +type MutationProto_DeleteType int32 + +const ( + MutationProto_DELETE_ONE_VERSION MutationProto_DeleteType = 0 + MutationProto_DELETE_MULTIPLE_VERSIONS MutationProto_DeleteType = 1 + MutationProto_DELETE_FAMILY MutationProto_DeleteType = 2 + MutationProto_DELETE_FAMILY_VERSION MutationProto_DeleteType = 3 +) + +var MutationProto_DeleteType_name = map[int32]string{ + 0: "DELETE_ONE_VERSION", + 1: "DELETE_MULTIPLE_VERSIONS", + 2: "DELETE_FAMILY", + 3: "DELETE_FAMILY_VERSION", +} +var MutationProto_DeleteType_value = map[string]int32{ + "DELETE_ONE_VERSION": 0, + "DELETE_MULTIPLE_VERSIONS": 1, + "DELETE_FAMILY": 2, + "DELETE_FAMILY_VERSION": 3, +} + +func (x MutationProto_DeleteType) Enum() *MutationProto_DeleteType { + p := new(MutationProto_DeleteType) + *p = x + return p +} +func (x MutationProto_DeleteType) String() string { + return proto.EnumName(MutationProto_DeleteType_name, int32(x)) +} +func (x *MutationProto_DeleteType) UnmarshalJSON(data []byte) error { + value, err := proto.UnmarshalJSONEnum(MutationProto_DeleteType_value, data, "MutationProto_DeleteType") + if err != nil { + return err + } + *x = MutationProto_DeleteType(value) + return nil +} + +// * +// The protocol buffer version of Authorizations. +type Authorizations struct { + Label []string `protobuf:"bytes,1,rep,name=label" json:"label,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *Authorizations) Reset() { *m = Authorizations{} } +func (m *Authorizations) String() string { return proto.CompactTextString(m) } +func (*Authorizations) ProtoMessage() {} + +func (m *Authorizations) GetLabel() []string { + if m != nil { + return m.Label + } + return nil +} + +// * +// The protocol buffer version of CellVisibility. +type CellVisibility struct { + Expression *string `protobuf:"bytes,1,req,name=expression" json:"expression,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *CellVisibility) Reset() { *m = CellVisibility{} } +func (m *CellVisibility) String() string { return proto.CompactTextString(m) } +func (*CellVisibility) ProtoMessage() {} + +func (m *CellVisibility) GetExpression() string { + if m != nil && m.Expression != nil { + return *m.Expression + } + return "" +} + +// * +// Container for a list of column qualifier names of a family. +type Column struct { + Family []byte `protobuf:"bytes,1,req,name=family" json:"family,omitempty"` + Qualifier [][]byte `protobuf:"bytes,2,rep,name=qualifier" json:"qualifier,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *Column) Reset() { *m = Column{} } +func (m *Column) String() string { return proto.CompactTextString(m) } +func (*Column) ProtoMessage() {} + +func (m *Column) GetFamily() []byte { + if m != nil { + return m.Family + } + return nil +} + +func (m *Column) GetQualifier() [][]byte { + if m != nil { + return m.Qualifier + } + return nil +} + +// * +// The protocol buffer version of Get. +// Unless existence_only is specified, return all the requested data +// for the row that matches exactly, or the one that immediately +// precedes it if closest_row_before is specified. +type Get struct { + Row []byte `protobuf:"bytes,1,req,name=row" json:"row,omitempty"` + Column []*Column `protobuf:"bytes,2,rep,name=column" json:"column,omitempty"` + Attribute []*NameBytesPair `protobuf:"bytes,3,rep,name=attribute" json:"attribute,omitempty"` + Filter *Filter `protobuf:"bytes,4,opt,name=filter" json:"filter,omitempty"` + TimeRange *TimeRange `protobuf:"bytes,5,opt,name=time_range" json:"time_range,omitempty"` + MaxVersions *uint32 `protobuf:"varint,6,opt,name=max_versions,def=1" json:"max_versions,omitempty"` + CacheBlocks *bool `protobuf:"varint,7,opt,name=cache_blocks,def=1" json:"cache_blocks,omitempty"` + StoreLimit *uint32 `protobuf:"varint,8,opt,name=store_limit" json:"store_limit,omitempty"` + StoreOffset *uint32 `protobuf:"varint,9,opt,name=store_offset" json:"store_offset,omitempty"` + // The result isn't asked for, just check for + // the existence. + ExistenceOnly *bool `protobuf:"varint,10,opt,name=existence_only,def=0" json:"existence_only,omitempty"` + // If the row to get doesn't exist, return the + // closest row before. + ClosestRowBefore *bool `protobuf:"varint,11,opt,name=closest_row_before,def=0" json:"closest_row_before,omitempty"` + Consistency *Consistency `protobuf:"varint,12,opt,name=consistency,enum=pb.Consistency,def=0" json:"consistency,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *Get) Reset() { *m = Get{} } +func (m *Get) String() string { return proto.CompactTextString(m) } +func (*Get) ProtoMessage() {} + +const Default_Get_MaxVersions uint32 = 1 +const Default_Get_CacheBlocks bool = true +const Default_Get_ExistenceOnly bool = false +const Default_Get_ClosestRowBefore bool = false +const Default_Get_Consistency Consistency = Consistency_STRONG + +func (m *Get) GetRow() []byte { + if m != nil { + return m.Row + } + return nil +} + +func (m *Get) GetColumn() []*Column { + if m != nil { + return m.Column + } + return nil +} + +func (m *Get) GetAttribute() []*NameBytesPair { + if m != nil { + return m.Attribute + } + return nil +} + +func (m *Get) GetFilter() *Filter { + if m != nil { + return m.Filter + } + return nil +} + +func (m *Get) GetTimeRange() *TimeRange { + if m != nil { + return m.TimeRange + } + return nil +} + +func (m *Get) GetMaxVersions() uint32 { + if m != nil && m.MaxVersions != nil { + return *m.MaxVersions + } + return Default_Get_MaxVersions +} + +func (m *Get) GetCacheBlocks() bool { + if m != nil && m.CacheBlocks != nil { + return *m.CacheBlocks + } + return Default_Get_CacheBlocks +} + +func (m *Get) GetStoreLimit() uint32 { + if m != nil && m.StoreLimit != nil { + return *m.StoreLimit + } + return 0 +} + +func (m *Get) GetStoreOffset() uint32 { + if m != nil && m.StoreOffset != nil { + return *m.StoreOffset + } + return 0 +} + +func (m *Get) GetExistenceOnly() bool { + if m != nil && m.ExistenceOnly != nil { + return *m.ExistenceOnly + } + return Default_Get_ExistenceOnly +} + +func (m *Get) GetClosestRowBefore() bool { + if m != nil && m.ClosestRowBefore != nil { + return *m.ClosestRowBefore + } + return Default_Get_ClosestRowBefore +} + +func (m *Get) GetConsistency() Consistency { + if m != nil && m.Consistency != nil { + return *m.Consistency + } + return Default_Get_Consistency +} + +type Result struct { + // Result includes the Cells or else it just has a count of Cells + // that are carried otherwise. + Cell []*Cell `protobuf:"bytes,1,rep,name=cell" json:"cell,omitempty"` + // The below count is set when the associated cells are + // not part of this protobuf message; they are passed alongside + // and then this Message is just a placeholder with metadata. + // The count is needed to know how many to peel off the block of Cells as + // ours. NOTE: This is different from the pb managed cell_count of the + // 'cell' field above which is non-null when the cells are pb'd. + AssociatedCellCount *int32 `protobuf:"varint,2,opt,name=associated_cell_count" json:"associated_cell_count,omitempty"` + // used for Get to check existence only. Not set if existence_only was not set to true + // in the query. + Exists *bool `protobuf:"varint,3,opt,name=exists" json:"exists,omitempty"` + // Whether or not the results are coming from possibly stale data + Stale *bool `protobuf:"varint,4,opt,name=stale,def=0" json:"stale,omitempty"` + // Whether or not the entire result could be returned. Results will be split when + // the RPC chunk size limit is reached. Partial results contain only a subset of the + // cells for a row and must be combined with a result containing the remaining cells + // to form a complete result + Partial *bool `protobuf:"varint,5,opt,name=partial,def=0" json:"partial,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *Result) Reset() { *m = Result{} } +func (m *Result) String() string { return proto.CompactTextString(m) } +func (*Result) ProtoMessage() {} + +const Default_Result_Stale bool = false +const Default_Result_Partial bool = false + +func (m *Result) GetCell() []*Cell { + if m != nil { + return m.Cell + } + return nil +} + +func (m *Result) GetAssociatedCellCount() int32 { + if m != nil && m.AssociatedCellCount != nil { + return *m.AssociatedCellCount + } + return 0 +} + +func (m *Result) GetExists() bool { + if m != nil && m.Exists != nil { + return *m.Exists + } + return false +} + +func (m *Result) GetStale() bool { + if m != nil && m.Stale != nil { + return *m.Stale + } + return Default_Result_Stale +} + +func (m *Result) GetPartial() bool { + if m != nil && m.Partial != nil { + return *m.Partial + } + return Default_Result_Partial +} + +// * +// The get request. Perform a single Get operation. +type GetRequest struct { + Region *RegionSpecifier `protobuf:"bytes,1,req,name=region" json:"region,omitempty"` + Get *Get `protobuf:"bytes,2,req,name=get" json:"get,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *GetRequest) Reset() { *m = GetRequest{} } +func (m *GetRequest) String() string { return proto.CompactTextString(m) } +func (*GetRequest) ProtoMessage() {} + +func (m *GetRequest) GetRegion() *RegionSpecifier { + if m != nil { + return m.Region + } + return nil +} + +func (m *GetRequest) GetGet() *Get { + if m != nil { + return m.Get + } + return nil +} + +type GetResponse struct { + Result *Result `protobuf:"bytes,1,opt,name=result" json:"result,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *GetResponse) Reset() { *m = GetResponse{} } +func (m *GetResponse) String() string { return proto.CompactTextString(m) } +func (*GetResponse) ProtoMessage() {} + +func (m *GetResponse) GetResult() *Result { + if m != nil { + return m.Result + } + return nil +} + +// * +// Condition to check if the value of a given cell (row, +// family, qualifier) matches a value via a given comparator. +// +// Condition is used in check and mutate operations. +type Condition struct { + Row []byte `protobuf:"bytes,1,req,name=row" json:"row,omitempty"` + Family []byte `protobuf:"bytes,2,req,name=family" json:"family,omitempty"` + Qualifier []byte `protobuf:"bytes,3,req,name=qualifier" json:"qualifier,omitempty"` + CompareType *CompareType `protobuf:"varint,4,req,name=compare_type,enum=pb.CompareType" json:"compare_type,omitempty"` + Comparator *Comparator `protobuf:"bytes,5,req,name=comparator" json:"comparator,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *Condition) Reset() { *m = Condition{} } +func (m *Condition) String() string { return proto.CompactTextString(m) } +func (*Condition) ProtoMessage() {} + +func (m *Condition) GetRow() []byte { + if m != nil { + return m.Row + } + return nil +} + +func (m *Condition) GetFamily() []byte { + if m != nil { + return m.Family + } + return nil +} + +func (m *Condition) GetQualifier() []byte { + if m != nil { + return m.Qualifier + } + return nil +} + +func (m *Condition) GetCompareType() CompareType { + if m != nil && m.CompareType != nil { + return *m.CompareType + } + return CompareType_LESS +} + +func (m *Condition) GetComparator() *Comparator { + if m != nil { + return m.Comparator + } + return nil +} + +// * +// A specific mutation inside a mutate request. +// It can be an append, increment, put or delete based +// on the mutation type. It can be fully filled in or +// only metadata present because data is being carried +// elsewhere outside of pb. +type MutationProto struct { + Row []byte `protobuf:"bytes,1,opt,name=row" json:"row,omitempty"` + MutateType *MutationProto_MutationType `protobuf:"varint,2,opt,name=mutate_type,enum=pb.MutationProto_MutationType" json:"mutate_type,omitempty"` + ColumnValue []*MutationProto_ColumnValue `protobuf:"bytes,3,rep,name=column_value" json:"column_value,omitempty"` + Timestamp *uint64 `protobuf:"varint,4,opt,name=timestamp" json:"timestamp,omitempty"` + Attribute []*NameBytesPair `protobuf:"bytes,5,rep,name=attribute" json:"attribute,omitempty"` + Durability *MutationProto_Durability `protobuf:"varint,6,opt,name=durability,enum=pb.MutationProto_Durability,def=0" json:"durability,omitempty"` + // For some mutations, a result may be returned, in which case, + // time range can be specified for potential performance gain + TimeRange *TimeRange `protobuf:"bytes,7,opt,name=time_range" json:"time_range,omitempty"` + // The below count is set when the associated cells are NOT + // part of this protobuf message; they are passed alongside + // and then this Message is a placeholder with metadata. The + // count is needed to know how many to peel off the block of Cells as + // ours. NOTE: This is different from the pb managed cell_count of the + // 'cell' field above which is non-null when the cells are pb'd. + AssociatedCellCount *int32 `protobuf:"varint,8,opt,name=associated_cell_count" json:"associated_cell_count,omitempty"` + Nonce *uint64 `protobuf:"varint,9,opt,name=nonce" json:"nonce,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *MutationProto) Reset() { *m = MutationProto{} } +func (m *MutationProto) String() string { return proto.CompactTextString(m) } +func (*MutationProto) ProtoMessage() {} + +const Default_MutationProto_Durability MutationProto_Durability = MutationProto_USE_DEFAULT + +func (m *MutationProto) GetRow() []byte { + if m != nil { + return m.Row + } + return nil +} + +func (m *MutationProto) GetMutateType() MutationProto_MutationType { + if m != nil && m.MutateType != nil { + return *m.MutateType + } + return MutationProto_APPEND +} + +func (m *MutationProto) GetColumnValue() []*MutationProto_ColumnValue { + if m != nil { + return m.ColumnValue + } + return nil +} + +func (m *MutationProto) GetTimestamp() uint64 { + if m != nil && m.Timestamp != nil { + return *m.Timestamp + } + return 0 +} + +func (m *MutationProto) GetAttribute() []*NameBytesPair { + if m != nil { + return m.Attribute + } + return nil +} + +func (m *MutationProto) GetDurability() MutationProto_Durability { + if m != nil && m.Durability != nil { + return *m.Durability + } + return Default_MutationProto_Durability +} + +func (m *MutationProto) GetTimeRange() *TimeRange { + if m != nil { + return m.TimeRange + } + return nil +} + +func (m *MutationProto) GetAssociatedCellCount() int32 { + if m != nil && m.AssociatedCellCount != nil { + return *m.AssociatedCellCount + } + return 0 +} + +func (m *MutationProto) GetNonce() uint64 { + if m != nil && m.Nonce != nil { + return *m.Nonce + } + return 0 +} + +type MutationProto_ColumnValue struct { + Family []byte `protobuf:"bytes,1,req,name=family" json:"family,omitempty"` + QualifierValue []*MutationProto_ColumnValue_QualifierValue `protobuf:"bytes,2,rep,name=qualifier_value" json:"qualifier_value,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *MutationProto_ColumnValue) Reset() { *m = MutationProto_ColumnValue{} } +func (m *MutationProto_ColumnValue) String() string { return proto.CompactTextString(m) } +func (*MutationProto_ColumnValue) ProtoMessage() {} + +func (m *MutationProto_ColumnValue) GetFamily() []byte { + if m != nil { + return m.Family + } + return nil +} + +func (m *MutationProto_ColumnValue) GetQualifierValue() []*MutationProto_ColumnValue_QualifierValue { + if m != nil { + return m.QualifierValue + } + return nil +} + +type MutationProto_ColumnValue_QualifierValue struct { + Qualifier []byte `protobuf:"bytes,1,opt,name=qualifier" json:"qualifier,omitempty"` + Value []byte `protobuf:"bytes,2,opt,name=value" json:"value,omitempty"` + Timestamp *uint64 `protobuf:"varint,3,opt,name=timestamp" json:"timestamp,omitempty"` + DeleteType *MutationProto_DeleteType `protobuf:"varint,4,opt,name=delete_type,enum=pb.MutationProto_DeleteType" json:"delete_type,omitempty"` + Tags []byte `protobuf:"bytes,5,opt,name=tags" json:"tags,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *MutationProto_ColumnValue_QualifierValue) Reset() { + *m = MutationProto_ColumnValue_QualifierValue{} +} +func (m *MutationProto_ColumnValue_QualifierValue) String() string { return proto.CompactTextString(m) } +func (*MutationProto_ColumnValue_QualifierValue) ProtoMessage() {} + +func (m *MutationProto_ColumnValue_QualifierValue) GetQualifier() []byte { + if m != nil { + return m.Qualifier + } + return nil +} + +func (m *MutationProto_ColumnValue_QualifierValue) GetValue() []byte { + if m != nil { + return m.Value + } + return nil +} + +func (m *MutationProto_ColumnValue_QualifierValue) GetTimestamp() uint64 { + if m != nil && m.Timestamp != nil { + return *m.Timestamp + } + return 0 +} + +func (m *MutationProto_ColumnValue_QualifierValue) GetDeleteType() MutationProto_DeleteType { + if m != nil && m.DeleteType != nil { + return *m.DeleteType + } + return MutationProto_DELETE_ONE_VERSION +} + +func (m *MutationProto_ColumnValue_QualifierValue) GetTags() []byte { + if m != nil { + return m.Tags + } + return nil +} + +// * +// The mutate request. Perform a single Mutate operation. +// +// Optionally, you can specify a condition. The mutate +// will take place only if the condition is met. Otherwise, +// the mutate will be ignored. In the response result, +// parameter processed is used to indicate if the mutate +// actually happened. +type MutateRequest struct { + Region *RegionSpecifier `protobuf:"bytes,1,req,name=region" json:"region,omitempty"` + Mutation *MutationProto `protobuf:"bytes,2,req,name=mutation" json:"mutation,omitempty"` + Condition *Condition `protobuf:"bytes,3,opt,name=condition" json:"condition,omitempty"` + NonceGroup *uint64 `protobuf:"varint,4,opt,name=nonce_group" json:"nonce_group,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *MutateRequest) Reset() { *m = MutateRequest{} } +func (m *MutateRequest) String() string { return proto.CompactTextString(m) } +func (*MutateRequest) ProtoMessage() {} + +func (m *MutateRequest) GetRegion() *RegionSpecifier { + if m != nil { + return m.Region + } + return nil +} + +func (m *MutateRequest) GetMutation() *MutationProto { + if m != nil { + return m.Mutation + } + return nil +} + +func (m *MutateRequest) GetCondition() *Condition { + if m != nil { + return m.Condition + } + return nil +} + +func (m *MutateRequest) GetNonceGroup() uint64 { + if m != nil && m.NonceGroup != nil { + return *m.NonceGroup + } + return 0 +} + +type MutateResponse struct { + Result *Result `protobuf:"bytes,1,opt,name=result" json:"result,omitempty"` + // used for mutate to indicate processed only + Processed *bool `protobuf:"varint,2,opt,name=processed" json:"processed,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *MutateResponse) Reset() { *m = MutateResponse{} } +func (m *MutateResponse) String() string { return proto.CompactTextString(m) } +func (*MutateResponse) ProtoMessage() {} + +func (m *MutateResponse) GetResult() *Result { + if m != nil { + return m.Result + } + return nil +} + +func (m *MutateResponse) GetProcessed() bool { + if m != nil && m.Processed != nil { + return *m.Processed + } + return false +} + +// * +// Instead of get from a table, you can scan it with optional filters. +// You can specify the row key range, time range, the columns/families +// to scan and so on. +// +// This scan is used the first time in a scan request. The response of +// the initial scan will return a scanner id, which should be used to +// fetch result batches later on before it is closed. +type Scan struct { + Column []*Column `protobuf:"bytes,1,rep,name=column" json:"column,omitempty"` + Attribute []*NameBytesPair `protobuf:"bytes,2,rep,name=attribute" json:"attribute,omitempty"` + StartRow []byte `protobuf:"bytes,3,opt,name=start_row" json:"start_row,omitempty"` + StopRow []byte `protobuf:"bytes,4,opt,name=stop_row" json:"stop_row,omitempty"` + Filter *Filter `protobuf:"bytes,5,opt,name=filter" json:"filter,omitempty"` + TimeRange *TimeRange `protobuf:"bytes,6,opt,name=time_range" json:"time_range,omitempty"` + MaxVersions *uint32 `protobuf:"varint,7,opt,name=max_versions,def=1" json:"max_versions,omitempty"` + CacheBlocks *bool `protobuf:"varint,8,opt,name=cache_blocks,def=1" json:"cache_blocks,omitempty"` + BatchSize *uint32 `protobuf:"varint,9,opt,name=batch_size" json:"batch_size,omitempty"` + MaxResultSize *uint64 `protobuf:"varint,10,opt,name=max_result_size" json:"max_result_size,omitempty"` + StoreLimit *uint32 `protobuf:"varint,11,opt,name=store_limit" json:"store_limit,omitempty"` + StoreOffset *uint32 `protobuf:"varint,12,opt,name=store_offset" json:"store_offset,omitempty"` + LoadColumnFamiliesOnDemand *bool `protobuf:"varint,13,opt,name=load_column_families_on_demand" json:"load_column_families_on_demand,omitempty"` + Small *bool `protobuf:"varint,14,opt,name=small" json:"small,omitempty"` + Reversed *bool `protobuf:"varint,15,opt,name=reversed,def=0" json:"reversed,omitempty"` + Consistency *Consistency `protobuf:"varint,16,opt,name=consistency,enum=pb.Consistency,def=0" json:"consistency,omitempty"` + Caching *uint32 `protobuf:"varint,17,opt,name=caching" json:"caching,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *Scan) Reset() { *m = Scan{} } +func (m *Scan) String() string { return proto.CompactTextString(m) } +func (*Scan) ProtoMessage() {} + +const Default_Scan_MaxVersions uint32 = 1 +const Default_Scan_CacheBlocks bool = true +const Default_Scan_Reversed bool = false +const Default_Scan_Consistency Consistency = Consistency_STRONG + +func (m *Scan) GetColumn() []*Column { + if m != nil { + return m.Column + } + return nil +} + +func (m *Scan) GetAttribute() []*NameBytesPair { + if m != nil { + return m.Attribute + } + return nil +} + +func (m *Scan) GetStartRow() []byte { + if m != nil { + return m.StartRow + } + return nil +} + +func (m *Scan) GetStopRow() []byte { + if m != nil { + return m.StopRow + } + return nil +} + +func (m *Scan) GetFilter() *Filter { + if m != nil { + return m.Filter + } + return nil +} + +func (m *Scan) GetTimeRange() *TimeRange { + if m != nil { + return m.TimeRange + } + return nil +} + +func (m *Scan) GetMaxVersions() uint32 { + if m != nil && m.MaxVersions != nil { + return *m.MaxVersions + } + return Default_Scan_MaxVersions +} + +func (m *Scan) GetCacheBlocks() bool { + if m != nil && m.CacheBlocks != nil { + return *m.CacheBlocks + } + return Default_Scan_CacheBlocks +} + +func (m *Scan) GetBatchSize() uint32 { + if m != nil && m.BatchSize != nil { + return *m.BatchSize + } + return 0 +} + +func (m *Scan) GetMaxResultSize() uint64 { + if m != nil && m.MaxResultSize != nil { + return *m.MaxResultSize + } + return 0 +} + +func (m *Scan) GetStoreLimit() uint32 { + if m != nil && m.StoreLimit != nil { + return *m.StoreLimit + } + return 0 +} + +func (m *Scan) GetStoreOffset() uint32 { + if m != nil && m.StoreOffset != nil { + return *m.StoreOffset + } + return 0 +} + +func (m *Scan) GetLoadColumnFamiliesOnDemand() bool { + if m != nil && m.LoadColumnFamiliesOnDemand != nil { + return *m.LoadColumnFamiliesOnDemand + } + return false +} + +func (m *Scan) GetSmall() bool { + if m != nil && m.Small != nil { + return *m.Small + } + return false +} + +func (m *Scan) GetReversed() bool { + if m != nil && m.Reversed != nil { + return *m.Reversed + } + return Default_Scan_Reversed +} + +func (m *Scan) GetConsistency() Consistency { + if m != nil && m.Consistency != nil { + return *m.Consistency + } + return Default_Scan_Consistency +} + +func (m *Scan) GetCaching() uint32 { + if m != nil && m.Caching != nil { + return *m.Caching + } + return 0 +} + +// * +// A scan request. Initially, it should specify a scan. Later on, you +// can use the scanner id returned to fetch result batches with a different +// scan request. +// +// The scanner will remain open if there are more results, and it's not +// asked to be closed explicitly. +// +// You can fetch the results and ask the scanner to be closed to save +// a trip if you are not interested in remaining results. +type ScanRequest struct { + Region *RegionSpecifier `protobuf:"bytes,1,opt,name=region" json:"region,omitempty"` + Scan *Scan `protobuf:"bytes,2,opt,name=scan" json:"scan,omitempty"` + ScannerId *uint64 `protobuf:"varint,3,opt,name=scanner_id" json:"scanner_id,omitempty"` + NumberOfRows *uint32 `protobuf:"varint,4,opt,name=number_of_rows" json:"number_of_rows,omitempty"` + CloseScanner *bool `protobuf:"varint,5,opt,name=close_scanner" json:"close_scanner,omitempty"` + NextCallSeq *uint64 `protobuf:"varint,6,opt,name=next_call_seq" json:"next_call_seq,omitempty"` + ClientHandlesPartials *bool `protobuf:"varint,7,opt,name=client_handles_partials" json:"client_handles_partials,omitempty"` + ClientHandlesHeartbeats *bool `protobuf:"varint,8,opt,name=client_handles_heartbeats" json:"client_handles_heartbeats,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *ScanRequest) Reset() { *m = ScanRequest{} } +func (m *ScanRequest) String() string { return proto.CompactTextString(m) } +func (*ScanRequest) ProtoMessage() {} + +func (m *ScanRequest) GetRegion() *RegionSpecifier { + if m != nil { + return m.Region + } + return nil +} + +func (m *ScanRequest) GetScan() *Scan { + if m != nil { + return m.Scan + } + return nil +} + +func (m *ScanRequest) GetScannerId() uint64 { + if m != nil && m.ScannerId != nil { + return *m.ScannerId + } + return 0 +} + +func (m *ScanRequest) GetNumberOfRows() uint32 { + if m != nil && m.NumberOfRows != nil { + return *m.NumberOfRows + } + return 0 +} + +func (m *ScanRequest) GetCloseScanner() bool { + if m != nil && m.CloseScanner != nil { + return *m.CloseScanner + } + return false +} + +func (m *ScanRequest) GetNextCallSeq() uint64 { + if m != nil && m.NextCallSeq != nil { + return *m.NextCallSeq + } + return 0 +} + +func (m *ScanRequest) GetClientHandlesPartials() bool { + if m != nil && m.ClientHandlesPartials != nil { + return *m.ClientHandlesPartials + } + return false +} + +func (m *ScanRequest) GetClientHandlesHeartbeats() bool { + if m != nil && m.ClientHandlesHeartbeats != nil { + return *m.ClientHandlesHeartbeats + } + return false +} + +// * +// The scan response. If there are no more results, more_results will +// be false. If it is not specified, it means there are more. +type ScanResponse struct { + // This field is filled in if we are doing cellblocks. A cellblock is made up + // of all Cells serialized out as one cellblock BUT responses from a server + // have their Cells grouped by Result. So we can reconstitute the + // Results on the client-side, this field is a list of counts of Cells + // in each Result that makes up the response. For example, if this field + // has 3, 3, 3 in it, then we know that on the client, we are to make + // three Results each of three Cells each. + CellsPerResult []uint32 `protobuf:"varint,1,rep,name=cells_per_result" json:"cells_per_result,omitempty"` + ScannerId *uint64 `protobuf:"varint,2,opt,name=scanner_id" json:"scanner_id,omitempty"` + MoreResults *bool `protobuf:"varint,3,opt,name=more_results" json:"more_results,omitempty"` + Ttl *uint32 `protobuf:"varint,4,opt,name=ttl" json:"ttl,omitempty"` + // If cells are not carried in an accompanying cellblock, then they are pb'd here. + // This field is mutually exclusive with cells_per_result (since the Cells will + // be inside the pb'd Result) + Results []*Result `protobuf:"bytes,5,rep,name=results" json:"results,omitempty"` + Stale *bool `protobuf:"varint,6,opt,name=stale" json:"stale,omitempty"` + // This field is filled in if we are doing cellblocks. In the event that a row + // could not fit all of its cells into a single RPC chunk, the results will be + // returned as partials, and reconstructed into a complete result on the client + // side. This field is a list of flags indicating whether or not the result + // that the cells belong to is a partial result. For example, if this field + // has false, false, true in it, then we know that on the client side, we need to + // make another RPC request since the last result was only a partial. + PartialFlagPerResult []bool `protobuf:"varint,7,rep,name=partial_flag_per_result" json:"partial_flag_per_result,omitempty"` + // A server may choose to limit the number of results returned to the client for + // reasons such as the size in bytes or quantity of results accumulated. This field + // will true when more results exist in the current region. + MoreResultsInRegion *bool `protobuf:"varint,8,opt,name=more_results_in_region" json:"more_results_in_region,omitempty"` + // This field is filled in if the server is sending back a heartbeat message. + // Heartbeat messages are sent back to the client to prevent the scanner from + // timing out. Seeing a heartbeat message communicates to the Client that the + // server would have continued to scan had the time limit not been reached. + HeartbeatMessage *bool `protobuf:"varint,9,opt,name=heartbeat_message" json:"heartbeat_message,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *ScanResponse) Reset() { *m = ScanResponse{} } +func (m *ScanResponse) String() string { return proto.CompactTextString(m) } +func (*ScanResponse) ProtoMessage() {} + +func (m *ScanResponse) GetCellsPerResult() []uint32 { + if m != nil { + return m.CellsPerResult + } + return nil +} + +func (m *ScanResponse) GetScannerId() uint64 { + if m != nil && m.ScannerId != nil { + return *m.ScannerId + } + return 0 +} + +func (m *ScanResponse) GetMoreResults() bool { + if m != nil && m.MoreResults != nil { + return *m.MoreResults + } + return false +} + +func (m *ScanResponse) GetTtl() uint32 { + if m != nil && m.Ttl != nil { + return *m.Ttl + } + return 0 +} + +func (m *ScanResponse) GetResults() []*Result { + if m != nil { + return m.Results + } + return nil +} + +func (m *ScanResponse) GetStale() bool { + if m != nil && m.Stale != nil { + return *m.Stale + } + return false +} + +func (m *ScanResponse) GetPartialFlagPerResult() []bool { + if m != nil { + return m.PartialFlagPerResult + } + return nil +} + +func (m *ScanResponse) GetMoreResultsInRegion() bool { + if m != nil && m.MoreResultsInRegion != nil { + return *m.MoreResultsInRegion + } + return false +} + +func (m *ScanResponse) GetHeartbeatMessage() bool { + if m != nil && m.HeartbeatMessage != nil { + return *m.HeartbeatMessage + } + return false +} + +// * +// Atomically bulk load multiple HFiles (say from different column families) +// into an open region. +type BulkLoadHFileRequest struct { + Region *RegionSpecifier `protobuf:"bytes,1,req,name=region" json:"region,omitempty"` + FamilyPath []*BulkLoadHFileRequest_FamilyPath `protobuf:"bytes,2,rep,name=family_path" json:"family_path,omitempty"` + AssignSeqNum *bool `protobuf:"varint,3,opt,name=assign_seq_num" json:"assign_seq_num,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *BulkLoadHFileRequest) Reset() { *m = BulkLoadHFileRequest{} } +func (m *BulkLoadHFileRequest) String() string { return proto.CompactTextString(m) } +func (*BulkLoadHFileRequest) ProtoMessage() {} + +func (m *BulkLoadHFileRequest) GetRegion() *RegionSpecifier { + if m != nil { + return m.Region + } + return nil +} + +func (m *BulkLoadHFileRequest) GetFamilyPath() []*BulkLoadHFileRequest_FamilyPath { + if m != nil { + return m.FamilyPath + } + return nil +} + +func (m *BulkLoadHFileRequest) GetAssignSeqNum() bool { + if m != nil && m.AssignSeqNum != nil { + return *m.AssignSeqNum + } + return false +} + +type BulkLoadHFileRequest_FamilyPath struct { + Family []byte `protobuf:"bytes,1,req,name=family" json:"family,omitempty"` + Path *string `protobuf:"bytes,2,req,name=path" json:"path,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *BulkLoadHFileRequest_FamilyPath) Reset() { *m = BulkLoadHFileRequest_FamilyPath{} } +func (m *BulkLoadHFileRequest_FamilyPath) String() string { return proto.CompactTextString(m) } +func (*BulkLoadHFileRequest_FamilyPath) ProtoMessage() {} + +func (m *BulkLoadHFileRequest_FamilyPath) GetFamily() []byte { + if m != nil { + return m.Family + } + return nil +} + +func (m *BulkLoadHFileRequest_FamilyPath) GetPath() string { + if m != nil && m.Path != nil { + return *m.Path + } + return "" +} + +type BulkLoadHFileResponse struct { + Loaded *bool `protobuf:"varint,1,req,name=loaded" json:"loaded,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *BulkLoadHFileResponse) Reset() { *m = BulkLoadHFileResponse{} } +func (m *BulkLoadHFileResponse) String() string { return proto.CompactTextString(m) } +func (*BulkLoadHFileResponse) ProtoMessage() {} + +func (m *BulkLoadHFileResponse) GetLoaded() bool { + if m != nil && m.Loaded != nil { + return *m.Loaded + } + return false +} + +type CoprocessorServiceCall struct { + Row []byte `protobuf:"bytes,1,req,name=row" json:"row,omitempty"` + ServiceName *string `protobuf:"bytes,2,req,name=service_name" json:"service_name,omitempty"` + MethodName *string `protobuf:"bytes,3,req,name=method_name" json:"method_name,omitempty"` + Request []byte `protobuf:"bytes,4,req,name=request" json:"request,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *CoprocessorServiceCall) Reset() { *m = CoprocessorServiceCall{} } +func (m *CoprocessorServiceCall) String() string { return proto.CompactTextString(m) } +func (*CoprocessorServiceCall) ProtoMessage() {} + +func (m *CoprocessorServiceCall) GetRow() []byte { + if m != nil { + return m.Row + } + return nil +} + +func (m *CoprocessorServiceCall) GetServiceName() string { + if m != nil && m.ServiceName != nil { + return *m.ServiceName + } + return "" +} + +func (m *CoprocessorServiceCall) GetMethodName() string { + if m != nil && m.MethodName != nil { + return *m.MethodName + } + return "" +} + +func (m *CoprocessorServiceCall) GetRequest() []byte { + if m != nil { + return m.Request + } + return nil +} + +type CoprocessorServiceResult struct { + Value *NameBytesPair `protobuf:"bytes,1,opt,name=value" json:"value,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *CoprocessorServiceResult) Reset() { *m = CoprocessorServiceResult{} } +func (m *CoprocessorServiceResult) String() string { return proto.CompactTextString(m) } +func (*CoprocessorServiceResult) ProtoMessage() {} + +func (m *CoprocessorServiceResult) GetValue() *NameBytesPair { + if m != nil { + return m.Value + } + return nil +} + +type CoprocessorServiceRequest struct { + Region *RegionSpecifier `protobuf:"bytes,1,req,name=region" json:"region,omitempty"` + Call *CoprocessorServiceCall `protobuf:"bytes,2,req,name=call" json:"call,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *CoprocessorServiceRequest) Reset() { *m = CoprocessorServiceRequest{} } +func (m *CoprocessorServiceRequest) String() string { return proto.CompactTextString(m) } +func (*CoprocessorServiceRequest) ProtoMessage() {} + +func (m *CoprocessorServiceRequest) GetRegion() *RegionSpecifier { + if m != nil { + return m.Region + } + return nil +} + +func (m *CoprocessorServiceRequest) GetCall() *CoprocessorServiceCall { + if m != nil { + return m.Call + } + return nil +} + +type CoprocessorServiceResponse struct { + Region *RegionSpecifier `protobuf:"bytes,1,req,name=region" json:"region,omitempty"` + Value *NameBytesPair `protobuf:"bytes,2,req,name=value" json:"value,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *CoprocessorServiceResponse) Reset() { *m = CoprocessorServiceResponse{} } +func (m *CoprocessorServiceResponse) String() string { return proto.CompactTextString(m) } +func (*CoprocessorServiceResponse) ProtoMessage() {} + +func (m *CoprocessorServiceResponse) GetRegion() *RegionSpecifier { + if m != nil { + return m.Region + } + return nil +} + +func (m *CoprocessorServiceResponse) GetValue() *NameBytesPair { + if m != nil { + return m.Value + } + return nil +} + +// Either a Get or a Mutation +type Action struct { + // If part of a multi action, useful aligning + // result with what was originally submitted. + Index *uint32 `protobuf:"varint,1,opt,name=index" json:"index,omitempty"` + Mutation *MutationProto `protobuf:"bytes,2,opt,name=mutation" json:"mutation,omitempty"` + Get *Get `protobuf:"bytes,3,opt,name=get" json:"get,omitempty"` + ServiceCall *CoprocessorServiceCall `protobuf:"bytes,4,opt,name=service_call" json:"service_call,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *Action) Reset() { *m = Action{} } +func (m *Action) String() string { return proto.CompactTextString(m) } +func (*Action) ProtoMessage() {} + +func (m *Action) GetIndex() uint32 { + if m != nil && m.Index != nil { + return *m.Index + } + return 0 +} + +func (m *Action) GetMutation() *MutationProto { + if m != nil { + return m.Mutation + } + return nil +} + +func (m *Action) GetGet() *Get { + if m != nil { + return m.Get + } + return nil +} + +func (m *Action) GetServiceCall() *CoprocessorServiceCall { + if m != nil { + return m.ServiceCall + } + return nil +} + +// * +// Actions to run against a Region. +type RegionAction struct { + Region *RegionSpecifier `protobuf:"bytes,1,req,name=region" json:"region,omitempty"` + // When set, run mutations as atomic unit. + Atomic *bool `protobuf:"varint,2,opt,name=atomic" json:"atomic,omitempty"` + Action []*Action `protobuf:"bytes,3,rep,name=action" json:"action,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *RegionAction) Reset() { *m = RegionAction{} } +func (m *RegionAction) String() string { return proto.CompactTextString(m) } +func (*RegionAction) ProtoMessage() {} + +func (m *RegionAction) GetRegion() *RegionSpecifier { + if m != nil { + return m.Region + } + return nil +} + +func (m *RegionAction) GetAtomic() bool { + if m != nil && m.Atomic != nil { + return *m.Atomic + } + return false +} + +func (m *RegionAction) GetAction() []*Action { + if m != nil { + return m.Action + } + return nil +} + +// +// Statistics about the current load on the region +type RegionLoadStats struct { + // Percent load on the memstore. Guaranteed to be positive, between 0 and 100. + MemstoreLoad *int32 `protobuf:"varint,1,opt,name=memstoreLoad,def=0" json:"memstoreLoad,omitempty"` + // Percent JVM heap occupancy. Guaranteed to be positive, between 0 and 100. + // We can move this to "ServerLoadStats" should we develop them. + HeapOccupancy *int32 `protobuf:"varint,2,opt,name=heapOccupancy,def=0" json:"heapOccupancy,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *RegionLoadStats) Reset() { *m = RegionLoadStats{} } +func (m *RegionLoadStats) String() string { return proto.CompactTextString(m) } +func (*RegionLoadStats) ProtoMessage() {} + +const Default_RegionLoadStats_MemstoreLoad int32 = 0 +const Default_RegionLoadStats_HeapOccupancy int32 = 0 + +func (m *RegionLoadStats) GetMemstoreLoad() int32 { + if m != nil && m.MemstoreLoad != nil { + return *m.MemstoreLoad + } + return Default_RegionLoadStats_MemstoreLoad +} + +func (m *RegionLoadStats) GetHeapOccupancy() int32 { + if m != nil && m.HeapOccupancy != nil { + return *m.HeapOccupancy + } + return Default_RegionLoadStats_HeapOccupancy +} + +// * +// Either a Result or an Exception NameBytesPair (keyed by +// exception name whose value is the exception stringified) +// or maybe empty if no result and no exception. +type ResultOrException struct { + // If part of a multi call, save original index of the list of all + // passed so can align this response w/ original request. + Index *uint32 `protobuf:"varint,1,opt,name=index" json:"index,omitempty"` + Result *Result `protobuf:"bytes,2,opt,name=result" json:"result,omitempty"` + Exception *NameBytesPair `protobuf:"bytes,3,opt,name=exception" json:"exception,omitempty"` + // result if this was a coprocessor service call + ServiceResult *CoprocessorServiceResult `protobuf:"bytes,4,opt,name=service_result" json:"service_result,omitempty"` + // current load on the region + LoadStats *RegionLoadStats `protobuf:"bytes,5,opt,name=loadStats" json:"loadStats,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *ResultOrException) Reset() { *m = ResultOrException{} } +func (m *ResultOrException) String() string { return proto.CompactTextString(m) } +func (*ResultOrException) ProtoMessage() {} + +func (m *ResultOrException) GetIndex() uint32 { + if m != nil && m.Index != nil { + return *m.Index + } + return 0 +} + +func (m *ResultOrException) GetResult() *Result { + if m != nil { + return m.Result + } + return nil +} + +func (m *ResultOrException) GetException() *NameBytesPair { + if m != nil { + return m.Exception + } + return nil +} + +func (m *ResultOrException) GetServiceResult() *CoprocessorServiceResult { + if m != nil { + return m.ServiceResult + } + return nil +} + +func (m *ResultOrException) GetLoadStats() *RegionLoadStats { + if m != nil { + return m.LoadStats + } + return nil +} + +// * +// The result of a RegionAction. +type RegionActionResult struct { + ResultOrException []*ResultOrException `protobuf:"bytes,1,rep,name=resultOrException" json:"resultOrException,omitempty"` + // If the operation failed globally for this region, this exception is set + Exception *NameBytesPair `protobuf:"bytes,2,opt,name=exception" json:"exception,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *RegionActionResult) Reset() { *m = RegionActionResult{} } +func (m *RegionActionResult) String() string { return proto.CompactTextString(m) } +func (*RegionActionResult) ProtoMessage() {} + +func (m *RegionActionResult) GetResultOrException() []*ResultOrException { + if m != nil { + return m.ResultOrException + } + return nil +} + +func (m *RegionActionResult) GetException() *NameBytesPair { + if m != nil { + return m.Exception + } + return nil +} + +// * +// Execute a list of actions on a given region in order. +// Nothing prevents a request to contains a set of RegionAction on the same region. +// For this reason, the matching between the MultiRequest and the MultiResponse is not +// done by the region specifier but by keeping the order of the RegionActionResult vs. +// the order of the RegionAction. +type MultiRequest struct { + RegionAction []*RegionAction `protobuf:"bytes,1,rep,name=regionAction" json:"regionAction,omitempty"` + NonceGroup *uint64 `protobuf:"varint,2,opt,name=nonceGroup" json:"nonceGroup,omitempty"` + Condition *Condition `protobuf:"bytes,3,opt,name=condition" json:"condition,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *MultiRequest) Reset() { *m = MultiRequest{} } +func (m *MultiRequest) String() string { return proto.CompactTextString(m) } +func (*MultiRequest) ProtoMessage() {} + +func (m *MultiRequest) GetRegionAction() []*RegionAction { + if m != nil { + return m.RegionAction + } + return nil +} + +func (m *MultiRequest) GetNonceGroup() uint64 { + if m != nil && m.NonceGroup != nil { + return *m.NonceGroup + } + return 0 +} + +func (m *MultiRequest) GetCondition() *Condition { + if m != nil { + return m.Condition + } + return nil +} + +type MultiResponse struct { + RegionActionResult []*RegionActionResult `protobuf:"bytes,1,rep,name=regionActionResult" json:"regionActionResult,omitempty"` + // used for mutate to indicate processed only + Processed *bool `protobuf:"varint,2,opt,name=processed" json:"processed,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *MultiResponse) Reset() { *m = MultiResponse{} } +func (m *MultiResponse) String() string { return proto.CompactTextString(m) } +func (*MultiResponse) ProtoMessage() {} + +func (m *MultiResponse) GetRegionActionResult() []*RegionActionResult { + if m != nil { + return m.RegionActionResult + } + return nil +} + +func (m *MultiResponse) GetProcessed() bool { + if m != nil && m.Processed != nil { + return *m.Processed + } + return false +} + +func init() { + proto.RegisterEnum("pb.Consistency", Consistency_name, Consistency_value) + proto.RegisterEnum("pb.MutationProto_Durability", MutationProto_Durability_name, MutationProto_Durability_value) + proto.RegisterEnum("pb.MutationProto_MutationType", MutationProto_MutationType_name, MutationProto_MutationType_value) + proto.RegisterEnum("pb.MutationProto_DeleteType", MutationProto_DeleteType_name, MutationProto_DeleteType_value) +} diff --git a/libs/gohbase/pb/Client.proto b/libs/gohbase/pb/Client.proto new file mode 100644 index 0000000..4b577e0 --- /dev/null +++ b/libs/gohbase/pb/Client.proto @@ -0,0 +1,464 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +// This file contains protocol buffers that are used for Client service. + +package pb; +option java_package = "org.apache.hadoop.hbase.protobuf.generated"; +option java_outer_classname = "ClientProtos"; +option java_generic_services = true; +option java_generate_equals_and_hash = true; +option optimize_for = SPEED; + +import "HBase.proto"; +import "Filter.proto"; +import "Cell.proto"; +import "Comparator.proto"; + +/** + * The protocol buffer version of Authorizations. + */ +message Authorizations { + repeated string label = 1; +} + +/** + * The protocol buffer version of CellVisibility. + */ +message CellVisibility { + required string expression = 1; +} + +/** + * Container for a list of column qualifier names of a family. + */ +message Column { + required bytes family = 1; + repeated bytes qualifier = 2; +} + +/** + * Consistency defines the expected consistency level for an operation. + */ +enum Consistency { + STRONG = 0; + TIMELINE = 1; +} + +/** + * The protocol buffer version of Get. + * Unless existence_only is specified, return all the requested data + * for the row that matches exactly, or the one that immediately + * precedes it if closest_row_before is specified. + */ +message Get { + required bytes row = 1; + repeated Column column = 2; + repeated NameBytesPair attribute = 3; + optional Filter filter = 4; + optional TimeRange time_range = 5; + optional uint32 max_versions = 6 [default = 1]; + optional bool cache_blocks = 7 [default = true]; + optional uint32 store_limit = 8; + optional uint32 store_offset = 9; + + // The result isn't asked for, just check for + // the existence. + optional bool existence_only = 10 [default = false]; + + // If the row to get doesn't exist, return the + // closest row before. + optional bool closest_row_before = 11 [default = false]; + + optional Consistency consistency = 12 [default = STRONG]; +} + +message Result { + // Result includes the Cells or else it just has a count of Cells + // that are carried otherwise. + repeated Cell cell = 1; + // The below count is set when the associated cells are + // not part of this protobuf message; they are passed alongside + // and then this Message is just a placeholder with metadata. + // The count is needed to know how many to peel off the block of Cells as + // ours. NOTE: This is different from the pb managed cell_count of the + // 'cell' field above which is non-null when the cells are pb'd. + optional int32 associated_cell_count = 2; + + // used for Get to check existence only. Not set if existence_only was not set to true + // in the query. + optional bool exists = 3; + + // Whether or not the results are coming from possibly stale data + optional bool stale = 4 [default = false]; + + // Whether or not the entire result could be returned. Results will be split when + // the RPC chunk size limit is reached. Partial results contain only a subset of the + // cells for a row and must be combined with a result containing the remaining cells + // to form a complete result + optional bool partial = 5 [default = false]; +} + +/** + * The get request. Perform a single Get operation. + */ +message GetRequest { + required RegionSpecifier region = 1; + required Get get = 2; +} + +message GetResponse { + optional Result result = 1; +} + +/** + * Condition to check if the value of a given cell (row, + * family, qualifier) matches a value via a given comparator. + * + * Condition is used in check and mutate operations. + */ +message Condition { + required bytes row = 1; + required bytes family = 2; + required bytes qualifier = 3; + required CompareType compare_type = 4; + required Comparator comparator = 5; +} + + +/** + * A specific mutation inside a mutate request. + * It can be an append, increment, put or delete based + * on the mutation type. It can be fully filled in or + * only metadata present because data is being carried + * elsewhere outside of pb. + */ +message MutationProto { + optional bytes row = 1; + optional MutationType mutate_type = 2; + repeated ColumnValue column_value = 3; + optional uint64 timestamp = 4; + repeated NameBytesPair attribute = 5; + optional Durability durability = 6 [default = USE_DEFAULT]; + + // For some mutations, a result may be returned, in which case, + // time range can be specified for potential performance gain + optional TimeRange time_range = 7; + // The below count is set when the associated cells are NOT + // part of this protobuf message; they are passed alongside + // and then this Message is a placeholder with metadata. The + // count is needed to know how many to peel off the block of Cells as + // ours. NOTE: This is different from the pb managed cell_count of the + // 'cell' field above which is non-null when the cells are pb'd. + optional int32 associated_cell_count = 8; + + optional uint64 nonce = 9; + + enum Durability { + USE_DEFAULT = 0; + SKIP_WAL = 1; + ASYNC_WAL = 2; + SYNC_WAL = 3; + FSYNC_WAL = 4; + } + + enum MutationType { + APPEND = 0; + INCREMENT = 1; + PUT = 2; + DELETE = 3; + } + + enum DeleteType { + DELETE_ONE_VERSION = 0; + DELETE_MULTIPLE_VERSIONS = 1; + DELETE_FAMILY = 2; + DELETE_FAMILY_VERSION = 3; + } + + message ColumnValue { + required bytes family = 1; + repeated QualifierValue qualifier_value = 2; + + message QualifierValue { + optional bytes qualifier = 1; + optional bytes value = 2; + optional uint64 timestamp = 3; + optional DeleteType delete_type = 4; + optional bytes tags = 5; + } + } +} + +/** + * The mutate request. Perform a single Mutate operation. + * + * Optionally, you can specify a condition. The mutate + * will take place only if the condition is met. Otherwise, + * the mutate will be ignored. In the response result, + * parameter processed is used to indicate if the mutate + * actually happened. + */ +message MutateRequest { + required RegionSpecifier region = 1; + required MutationProto mutation = 2; + optional Condition condition = 3; + optional uint64 nonce_group = 4; +} + +message MutateResponse { + optional Result result = 1; + + // used for mutate to indicate processed only + optional bool processed = 2; +} + +/** + * Instead of get from a table, you can scan it with optional filters. + * You can specify the row key range, time range, the columns/families + * to scan and so on. + * + * This scan is used the first time in a scan request. The response of + * the initial scan will return a scanner id, which should be used to + * fetch result batches later on before it is closed. + */ +message Scan { + repeated Column column = 1; + repeated NameBytesPair attribute = 2; + optional bytes start_row = 3; + optional bytes stop_row = 4; + optional Filter filter = 5; + optional TimeRange time_range = 6; + optional uint32 max_versions = 7 [default = 1]; + optional bool cache_blocks = 8 [default = true]; + optional uint32 batch_size = 9; + optional uint64 max_result_size = 10; + optional uint32 store_limit = 11; + optional uint32 store_offset = 12; + optional bool load_column_families_on_demand = 13; /* DO NOT add defaults to load_column_families_on_demand. */ + optional bool small = 14; + optional bool reversed = 15 [default = false]; + optional Consistency consistency = 16 [default = STRONG]; + optional uint32 caching = 17; +} + +/** + * A scan request. Initially, it should specify a scan. Later on, you + * can use the scanner id returned to fetch result batches with a different + * scan request. + * + * The scanner will remain open if there are more results, and it's not + * asked to be closed explicitly. + * + * You can fetch the results and ask the scanner to be closed to save + * a trip if you are not interested in remaining results. + */ +message ScanRequest { + optional RegionSpecifier region = 1; + optional Scan scan = 2; + optional uint64 scanner_id = 3; + optional uint32 number_of_rows = 4; + optional bool close_scanner = 5; + optional uint64 next_call_seq = 6; + optional bool client_handles_partials = 7; + optional bool client_handles_heartbeats = 8; +} + +/** + * The scan response. If there are no more results, more_results will + * be false. If it is not specified, it means there are more. + */ +message ScanResponse { + // This field is filled in if we are doing cellblocks. A cellblock is made up + // of all Cells serialized out as one cellblock BUT responses from a server + // have their Cells grouped by Result. So we can reconstitute the + // Results on the client-side, this field is a list of counts of Cells + // in each Result that makes up the response. For example, if this field + // has 3, 3, 3 in it, then we know that on the client, we are to make + // three Results each of three Cells each. + repeated uint32 cells_per_result = 1; + + optional uint64 scanner_id = 2; + optional bool more_results = 3; + optional uint32 ttl = 4; + // If cells are not carried in an accompanying cellblock, then they are pb'd here. + // This field is mutually exclusive with cells_per_result (since the Cells will + // be inside the pb'd Result) + repeated Result results = 5; + optional bool stale = 6; + + // This field is filled in if we are doing cellblocks. In the event that a row + // could not fit all of its cells into a single RPC chunk, the results will be + // returned as partials, and reconstructed into a complete result on the client + // side. This field is a list of flags indicating whether or not the result + // that the cells belong to is a partial result. For example, if this field + // has false, false, true in it, then we know that on the client side, we need to + // make another RPC request since the last result was only a partial. + repeated bool partial_flag_per_result = 7; + + // A server may choose to limit the number of results returned to the client for + // reasons such as the size in bytes or quantity of results accumulated. This field + // will true when more results exist in the current region. + optional bool more_results_in_region = 8; + + // This field is filled in if the server is sending back a heartbeat message. + // Heartbeat messages are sent back to the client to prevent the scanner from + // timing out. Seeing a heartbeat message communicates to the Client that the + // server would have continued to scan had the time limit not been reached. + optional bool heartbeat_message = 9; +} + +/** + * Atomically bulk load multiple HFiles (say from different column families) + * into an open region. + */ +message BulkLoadHFileRequest { + required RegionSpecifier region = 1; + repeated FamilyPath family_path = 2; + optional bool assign_seq_num = 3; + + message FamilyPath { + required bytes family = 1; + required string path = 2; + } +} + +message BulkLoadHFileResponse { + required bool loaded = 1; +} + +message CoprocessorServiceCall { + required bytes row = 1; + required string service_name = 2; + required string method_name = 3; + required bytes request = 4; +} + +message CoprocessorServiceResult { + optional NameBytesPair value = 1; +} + +message CoprocessorServiceRequest { + required RegionSpecifier region = 1; + required CoprocessorServiceCall call = 2; +} + +message CoprocessorServiceResponse { + required RegionSpecifier region = 1; + required NameBytesPair value = 2; +} + +// Either a Get or a Mutation +message Action { + // If part of a multi action, useful aligning + // result with what was originally submitted. + optional uint32 index = 1; + optional MutationProto mutation = 2; + optional Get get = 3; + optional CoprocessorServiceCall service_call = 4; +} + +/** + * Actions to run against a Region. + */ +message RegionAction { + required RegionSpecifier region = 1; + // When set, run mutations as atomic unit. + optional bool atomic = 2; + repeated Action action = 3; +} + +/* +* Statistics about the current load on the region +*/ +message RegionLoadStats { + // Percent load on the memstore. Guaranteed to be positive, between 0 and 100. + optional int32 memstoreLoad = 1 [default = 0]; + // Percent JVM heap occupancy. Guaranteed to be positive, between 0 and 100. + // We can move this to "ServerLoadStats" should we develop them. + optional int32 heapOccupancy = 2 [default = 0]; +} + +/** + * Either a Result or an Exception NameBytesPair (keyed by + * exception name whose value is the exception stringified) + * or maybe empty if no result and no exception. + */ +message ResultOrException { + // If part of a multi call, save original index of the list of all + // passed so can align this response w/ original request. + optional uint32 index = 1; + optional Result result = 2; + optional NameBytesPair exception = 3; + // result if this was a coprocessor service call + optional CoprocessorServiceResult service_result = 4; + // current load on the region + optional RegionLoadStats loadStats = 5; +} + +/** + * The result of a RegionAction. + */ +message RegionActionResult { + repeated ResultOrException resultOrException = 1; + // If the operation failed globally for this region, this exception is set + optional NameBytesPair exception = 2; +} + +/** + * Execute a list of actions on a given region in order. + * Nothing prevents a request to contains a set of RegionAction on the same region. + * For this reason, the matching between the MultiRequest and the MultiResponse is not + * done by the region specifier but by keeping the order of the RegionActionResult vs. + * the order of the RegionAction. + */ +message MultiRequest { + repeated RegionAction regionAction = 1; + optional uint64 nonceGroup = 2; + optional Condition condition = 3; +} + +message MultiResponse { + repeated RegionActionResult regionActionResult = 1; + // used for mutate to indicate processed only + optional bool processed = 2; +} + + +service ClientService { + rpc Get(GetRequest) + returns(GetResponse); + + rpc Mutate(MutateRequest) + returns(MutateResponse); + + rpc Scan(ScanRequest) + returns(ScanResponse); + + rpc BulkLoadHFile(BulkLoadHFileRequest) + returns(BulkLoadHFileResponse); + + rpc ExecService(CoprocessorServiceRequest) + returns(CoprocessorServiceResponse); + + rpc ExecRegionServerService(CoprocessorServiceRequest) + returns(CoprocessorServiceResponse); + + rpc Multi(MultiRequest) + returns(MultiResponse); +} diff --git a/libs/gohbase/pb/ClusterId.pb.go b/libs/gohbase/pb/ClusterId.pb.go new file mode 100644 index 0000000..74bd8f7 --- /dev/null +++ b/libs/gohbase/pb/ClusterId.pb.go @@ -0,0 +1,35 @@ +// Code generated by protoc-gen-go. +// source: ClusterId.proto +// DO NOT EDIT! + +package pb + +import proto "github.com/golang/protobuf/proto" +import math "math" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = math.Inf + +// * +// Content of the '/hbase/hbaseid', cluster id, znode. +// Also cluster of the ${HBASE_ROOTDIR}/hbase.id file. +type ClusterId struct { + // This is the cluster id, a uuid as a String + ClusterId *string `protobuf:"bytes,1,req,name=cluster_id" json:"cluster_id,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *ClusterId) Reset() { *m = ClusterId{} } +func (m *ClusterId) String() string { return proto.CompactTextString(m) } +func (*ClusterId) ProtoMessage() {} + +func (m *ClusterId) GetClusterId() string { + if m != nil && m.ClusterId != nil { + return *m.ClusterId + } + return "" +} + +func init() { +} diff --git a/libs/gohbase/pb/ClusterId.proto b/libs/gohbase/pb/ClusterId.proto new file mode 100644 index 0000000..3d0e494 --- /dev/null +++ b/libs/gohbase/pb/ClusterId.proto @@ -0,0 +1,34 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +// This file contains protocol buffers that are shared throughout HBase + +package pb; +option java_package = "org.apache.hadoop.hbase.protobuf.generated"; +option java_outer_classname = "ClusterIdProtos"; +option java_generate_equals_and_hash = true; +option optimize_for = SPEED; + +/** + * Content of the '/hbase/hbaseid', cluster id, znode. + * Also cluster of the ${HBASE_ROOTDIR}/hbase.id file. + */ +message ClusterId { + // This is the cluster id, a uuid as a String + required string cluster_id = 1; +} diff --git a/libs/gohbase/pb/ClusterStatus.pb.go b/libs/gohbase/pb/ClusterStatus.pb.go new file mode 100644 index 0000000..93a8507 --- /dev/null +++ b/libs/gohbase/pb/ClusterStatus.pb.go @@ -0,0 +1,669 @@ +// Code generated by protoc-gen-go. +// source: ClusterStatus.proto +// DO NOT EDIT! + +package pb + +import proto "github.com/golang/protobuf/proto" +import math "math" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = math.Inf + +type RegionState_State int32 + +const ( + RegionState_OFFLINE RegionState_State = 0 + RegionState_PENDING_OPEN RegionState_State = 1 + RegionState_OPENING RegionState_State = 2 + RegionState_OPEN RegionState_State = 3 + RegionState_PENDING_CLOSE RegionState_State = 4 + RegionState_CLOSING RegionState_State = 5 + RegionState_CLOSED RegionState_State = 6 + RegionState_SPLITTING RegionState_State = 7 + RegionState_SPLIT RegionState_State = 8 + RegionState_FAILED_OPEN RegionState_State = 9 + RegionState_FAILED_CLOSE RegionState_State = 10 + RegionState_MERGING RegionState_State = 11 + RegionState_MERGED RegionState_State = 12 + RegionState_SPLITTING_NEW RegionState_State = 13 + // region but hasn't be created yet, or master doesn't + // know it's already created + RegionState_MERGING_NEW RegionState_State = 14 +) + +var RegionState_State_name = map[int32]string{ + 0: "OFFLINE", + 1: "PENDING_OPEN", + 2: "OPENING", + 3: "OPEN", + 4: "PENDING_CLOSE", + 5: "CLOSING", + 6: "CLOSED", + 7: "SPLITTING", + 8: "SPLIT", + 9: "FAILED_OPEN", + 10: "FAILED_CLOSE", + 11: "MERGING", + 12: "MERGED", + 13: "SPLITTING_NEW", + 14: "MERGING_NEW", +} +var RegionState_State_value = map[string]int32{ + "OFFLINE": 0, + "PENDING_OPEN": 1, + "OPENING": 2, + "OPEN": 3, + "PENDING_CLOSE": 4, + "CLOSING": 5, + "CLOSED": 6, + "SPLITTING": 7, + "SPLIT": 8, + "FAILED_OPEN": 9, + "FAILED_CLOSE": 10, + "MERGING": 11, + "MERGED": 12, + "SPLITTING_NEW": 13, + "MERGING_NEW": 14, +} + +func (x RegionState_State) Enum() *RegionState_State { + p := new(RegionState_State) + *p = x + return p +} +func (x RegionState_State) String() string { + return proto.EnumName(RegionState_State_name, int32(x)) +} +func (x *RegionState_State) UnmarshalJSON(data []byte) error { + value, err := proto.UnmarshalJSONEnum(RegionState_State_value, data, "RegionState_State") + if err != nil { + return err + } + *x = RegionState_State(value) + return nil +} + +type RegionState struct { + RegionInfo *RegionInfo `protobuf:"bytes,1,req,name=region_info" json:"region_info,omitempty"` + State *RegionState_State `protobuf:"varint,2,req,name=state,enum=pb.RegionState_State" json:"state,omitempty"` + Stamp *uint64 `protobuf:"varint,3,opt,name=stamp" json:"stamp,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *RegionState) Reset() { *m = RegionState{} } +func (m *RegionState) String() string { return proto.CompactTextString(m) } +func (*RegionState) ProtoMessage() {} + +func (m *RegionState) GetRegionInfo() *RegionInfo { + if m != nil { + return m.RegionInfo + } + return nil +} + +func (m *RegionState) GetState() RegionState_State { + if m != nil && m.State != nil { + return *m.State + } + return RegionState_OFFLINE +} + +func (m *RegionState) GetStamp() uint64 { + if m != nil && m.Stamp != nil { + return *m.Stamp + } + return 0 +} + +type RegionInTransition struct { + Spec *RegionSpecifier `protobuf:"bytes,1,req,name=spec" json:"spec,omitempty"` + RegionState *RegionState `protobuf:"bytes,2,req,name=region_state" json:"region_state,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *RegionInTransition) Reset() { *m = RegionInTransition{} } +func (m *RegionInTransition) String() string { return proto.CompactTextString(m) } +func (*RegionInTransition) ProtoMessage() {} + +func (m *RegionInTransition) GetSpec() *RegionSpecifier { + if m != nil { + return m.Spec + } + return nil +} + +func (m *RegionInTransition) GetRegionState() *RegionState { + if m != nil { + return m.RegionState + } + return nil +} + +// * +// sequence Id of a store +type StoreSequenceId struct { + FamilyName []byte `protobuf:"bytes,1,req,name=family_name" json:"family_name,omitempty"` + SequenceId *uint64 `protobuf:"varint,2,req,name=sequence_id" json:"sequence_id,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *StoreSequenceId) Reset() { *m = StoreSequenceId{} } +func (m *StoreSequenceId) String() string { return proto.CompactTextString(m) } +func (*StoreSequenceId) ProtoMessage() {} + +func (m *StoreSequenceId) GetFamilyName() []byte { + if m != nil { + return m.FamilyName + } + return nil +} + +func (m *StoreSequenceId) GetSequenceId() uint64 { + if m != nil && m.SequenceId != nil { + return *m.SequenceId + } + return 0 +} + +// * +// contains a sequence id of a region which should be the minimum of its store sequence ids and +// list of sequence ids of the region's stores +type RegionStoreSequenceIds struct { + LastFlushedSequenceId *uint64 `protobuf:"varint,1,req,name=last_flushed_sequence_id" json:"last_flushed_sequence_id,omitempty"` + StoreSequenceId []*StoreSequenceId `protobuf:"bytes,2,rep,name=store_sequence_id" json:"store_sequence_id,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *RegionStoreSequenceIds) Reset() { *m = RegionStoreSequenceIds{} } +func (m *RegionStoreSequenceIds) String() string { return proto.CompactTextString(m) } +func (*RegionStoreSequenceIds) ProtoMessage() {} + +func (m *RegionStoreSequenceIds) GetLastFlushedSequenceId() uint64 { + if m != nil && m.LastFlushedSequenceId != nil { + return *m.LastFlushedSequenceId + } + return 0 +} + +func (m *RegionStoreSequenceIds) GetStoreSequenceId() []*StoreSequenceId { + if m != nil { + return m.StoreSequenceId + } + return nil +} + +type RegionLoad struct { + // * the region specifier + RegionSpecifier *RegionSpecifier `protobuf:"bytes,1,req,name=region_specifier" json:"region_specifier,omitempty"` + // * the number of stores for the region + Stores *uint32 `protobuf:"varint,2,opt,name=stores" json:"stores,omitempty"` + // * the number of storefiles for the region + Storefiles *uint32 `protobuf:"varint,3,opt,name=storefiles" json:"storefiles,omitempty"` + // * the total size of the store files for the region, uncompressed, in MB + StoreUncompressedSize_MB *uint32 `protobuf:"varint,4,opt,name=store_uncompressed_size_MB" json:"store_uncompressed_size_MB,omitempty"` + // * the current total size of the store files for the region, in MB + StorefileSize_MB *uint32 `protobuf:"varint,5,opt,name=storefile_size_MB" json:"storefile_size_MB,omitempty"` + // * the current size of the memstore for the region, in MB + MemstoreSize_MB *uint32 `protobuf:"varint,6,opt,name=memstore_size_MB" json:"memstore_size_MB,omitempty"` + // * + // The current total size of root-level store file indexes for the region, + // in MB. The same as {@link #rootIndexSizeKB} but in MB. + StorefileIndexSize_MB *uint32 `protobuf:"varint,7,opt,name=storefile_index_size_MB" json:"storefile_index_size_MB,omitempty"` + // * the current total read requests made to region + ReadRequestsCount *uint64 `protobuf:"varint,8,opt,name=read_requests_count" json:"read_requests_count,omitempty"` + // * the current total write requests made to region + WriteRequestsCount *uint64 `protobuf:"varint,9,opt,name=write_requests_count" json:"write_requests_count,omitempty"` + // * the total compacting key values in currently running compaction + TotalCompacting_KVs *uint64 `protobuf:"varint,10,opt,name=total_compacting_KVs" json:"total_compacting_KVs,omitempty"` + // * the completed count of key values in currently running compaction + CurrentCompacted_KVs *uint64 `protobuf:"varint,11,opt,name=current_compacted_KVs" json:"current_compacted_KVs,omitempty"` + // * The current total size of root-level indexes for the region, in KB. + RootIndexSize_KB *uint32 `protobuf:"varint,12,opt,name=root_index_size_KB" json:"root_index_size_KB,omitempty"` + // * The total size of all index blocks, not just the root level, in KB. + TotalStaticIndexSize_KB *uint32 `protobuf:"varint,13,opt,name=total_static_index_size_KB" json:"total_static_index_size_KB,omitempty"` + // * + // The total size of all Bloom filter blocks, not just loaded into the + // block cache, in KB. + TotalStaticBloomSize_KB *uint32 `protobuf:"varint,14,opt,name=total_static_bloom_size_KB" json:"total_static_bloom_size_KB,omitempty"` + // * the most recent sequence Id from cache flush + CompleteSequenceId *uint64 `protobuf:"varint,15,opt,name=complete_sequence_id" json:"complete_sequence_id,omitempty"` + // * The current data locality for region in the regionserver + DataLocality *float32 `protobuf:"fixed32,16,opt,name=data_locality" json:"data_locality,omitempty"` + LastMajorCompactionTs *uint64 `protobuf:"varint,17,opt,name=last_major_compaction_ts,def=0" json:"last_major_compaction_ts,omitempty"` + // * the most recent sequence Id of store from cache flush + StoreCompleteSequenceId []*StoreSequenceId `protobuf:"bytes,18,rep,name=store_complete_sequence_id" json:"store_complete_sequence_id,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *RegionLoad) Reset() { *m = RegionLoad{} } +func (m *RegionLoad) String() string { return proto.CompactTextString(m) } +func (*RegionLoad) ProtoMessage() {} + +const Default_RegionLoad_LastMajorCompactionTs uint64 = 0 + +func (m *RegionLoad) GetRegionSpecifier() *RegionSpecifier { + if m != nil { + return m.RegionSpecifier + } + return nil +} + +func (m *RegionLoad) GetStores() uint32 { + if m != nil && m.Stores != nil { + return *m.Stores + } + return 0 +} + +func (m *RegionLoad) GetStorefiles() uint32 { + if m != nil && m.Storefiles != nil { + return *m.Storefiles + } + return 0 +} + +func (m *RegionLoad) GetStoreUncompressedSize_MB() uint32 { + if m != nil && m.StoreUncompressedSize_MB != nil { + return *m.StoreUncompressedSize_MB + } + return 0 +} + +func (m *RegionLoad) GetStorefileSize_MB() uint32 { + if m != nil && m.StorefileSize_MB != nil { + return *m.StorefileSize_MB + } + return 0 +} + +func (m *RegionLoad) GetMemstoreSize_MB() uint32 { + if m != nil && m.MemstoreSize_MB != nil { + return *m.MemstoreSize_MB + } + return 0 +} + +func (m *RegionLoad) GetStorefileIndexSize_MB() uint32 { + if m != nil && m.StorefileIndexSize_MB != nil { + return *m.StorefileIndexSize_MB + } + return 0 +} + +func (m *RegionLoad) GetReadRequestsCount() uint64 { + if m != nil && m.ReadRequestsCount != nil { + return *m.ReadRequestsCount + } + return 0 +} + +func (m *RegionLoad) GetWriteRequestsCount() uint64 { + if m != nil && m.WriteRequestsCount != nil { + return *m.WriteRequestsCount + } + return 0 +} + +func (m *RegionLoad) GetTotalCompacting_KVs() uint64 { + if m != nil && m.TotalCompacting_KVs != nil { + return *m.TotalCompacting_KVs + } + return 0 +} + +func (m *RegionLoad) GetCurrentCompacted_KVs() uint64 { + if m != nil && m.CurrentCompacted_KVs != nil { + return *m.CurrentCompacted_KVs + } + return 0 +} + +func (m *RegionLoad) GetRootIndexSize_KB() uint32 { + if m != nil && m.RootIndexSize_KB != nil { + return *m.RootIndexSize_KB + } + return 0 +} + +func (m *RegionLoad) GetTotalStaticIndexSize_KB() uint32 { + if m != nil && m.TotalStaticIndexSize_KB != nil { + return *m.TotalStaticIndexSize_KB + } + return 0 +} + +func (m *RegionLoad) GetTotalStaticBloomSize_KB() uint32 { + if m != nil && m.TotalStaticBloomSize_KB != nil { + return *m.TotalStaticBloomSize_KB + } + return 0 +} + +func (m *RegionLoad) GetCompleteSequenceId() uint64 { + if m != nil && m.CompleteSequenceId != nil { + return *m.CompleteSequenceId + } + return 0 +} + +func (m *RegionLoad) GetDataLocality() float32 { + if m != nil && m.DataLocality != nil { + return *m.DataLocality + } + return 0 +} + +func (m *RegionLoad) GetLastMajorCompactionTs() uint64 { + if m != nil && m.LastMajorCompactionTs != nil { + return *m.LastMajorCompactionTs + } + return Default_RegionLoad_LastMajorCompactionTs +} + +func (m *RegionLoad) GetStoreCompleteSequenceId() []*StoreSequenceId { + if m != nil { + return m.StoreCompleteSequenceId + } + return nil +} + +type ReplicationLoadSink struct { + AgeOfLastAppliedOp *uint64 `protobuf:"varint,1,req,name=ageOfLastAppliedOp" json:"ageOfLastAppliedOp,omitempty"` + TimeStampsOfLastAppliedOp *uint64 `protobuf:"varint,2,req,name=timeStampsOfLastAppliedOp" json:"timeStampsOfLastAppliedOp,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *ReplicationLoadSink) Reset() { *m = ReplicationLoadSink{} } +func (m *ReplicationLoadSink) String() string { return proto.CompactTextString(m) } +func (*ReplicationLoadSink) ProtoMessage() {} + +func (m *ReplicationLoadSink) GetAgeOfLastAppliedOp() uint64 { + if m != nil && m.AgeOfLastAppliedOp != nil { + return *m.AgeOfLastAppliedOp + } + return 0 +} + +func (m *ReplicationLoadSink) GetTimeStampsOfLastAppliedOp() uint64 { + if m != nil && m.TimeStampsOfLastAppliedOp != nil { + return *m.TimeStampsOfLastAppliedOp + } + return 0 +} + +type ReplicationLoadSource struct { + PeerID *string `protobuf:"bytes,1,req,name=peerID" json:"peerID,omitempty"` + AgeOfLastShippedOp *uint64 `protobuf:"varint,2,req,name=ageOfLastShippedOp" json:"ageOfLastShippedOp,omitempty"` + SizeOfLogQueue *uint32 `protobuf:"varint,3,req,name=sizeOfLogQueue" json:"sizeOfLogQueue,omitempty"` + TimeStampOfLastShippedOp *uint64 `protobuf:"varint,4,req,name=timeStampOfLastShippedOp" json:"timeStampOfLastShippedOp,omitempty"` + ReplicationLag *uint64 `protobuf:"varint,5,req,name=replicationLag" json:"replicationLag,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *ReplicationLoadSource) Reset() { *m = ReplicationLoadSource{} } +func (m *ReplicationLoadSource) String() string { return proto.CompactTextString(m) } +func (*ReplicationLoadSource) ProtoMessage() {} + +func (m *ReplicationLoadSource) GetPeerID() string { + if m != nil && m.PeerID != nil { + return *m.PeerID + } + return "" +} + +func (m *ReplicationLoadSource) GetAgeOfLastShippedOp() uint64 { + if m != nil && m.AgeOfLastShippedOp != nil { + return *m.AgeOfLastShippedOp + } + return 0 +} + +func (m *ReplicationLoadSource) GetSizeOfLogQueue() uint32 { + if m != nil && m.SizeOfLogQueue != nil { + return *m.SizeOfLogQueue + } + return 0 +} + +func (m *ReplicationLoadSource) GetTimeStampOfLastShippedOp() uint64 { + if m != nil && m.TimeStampOfLastShippedOp != nil { + return *m.TimeStampOfLastShippedOp + } + return 0 +} + +func (m *ReplicationLoadSource) GetReplicationLag() uint64 { + if m != nil && m.ReplicationLag != nil { + return *m.ReplicationLag + } + return 0 +} + +type ServerLoad struct { + // * Number of requests since last report. + NumberOfRequests *uint64 `protobuf:"varint,1,opt,name=number_of_requests" json:"number_of_requests,omitempty"` + // * Total Number of requests from the start of the region server. + TotalNumberOfRequests *uint64 `protobuf:"varint,2,opt,name=total_number_of_requests" json:"total_number_of_requests,omitempty"` + // * the amount of used heap, in MB. + UsedHeap_MB *uint32 `protobuf:"varint,3,opt,name=used_heap_MB" json:"used_heap_MB,omitempty"` + // * the maximum allowable size of the heap, in MB. + MaxHeap_MB *uint32 `protobuf:"varint,4,opt,name=max_heap_MB" json:"max_heap_MB,omitempty"` + // * Information on the load of individual regions. + RegionLoads []*RegionLoad `protobuf:"bytes,5,rep,name=region_loads" json:"region_loads,omitempty"` + // * + // Regionserver-level coprocessors, e.g., WALObserver implementations. + // Region-level coprocessors, on the other hand, are stored inside RegionLoad + // objects. + Coprocessors []*Coprocessor `protobuf:"bytes,6,rep,name=coprocessors" json:"coprocessors,omitempty"` + // * + // Time when incremental (non-total) counts began being calculated (e.g. number_of_requests) + // time is measured as the difference, measured in milliseconds, between the current time + // and midnight, January 1, 1970 UTC. + ReportStartTime *uint64 `protobuf:"varint,7,opt,name=report_start_time" json:"report_start_time,omitempty"` + // * + // Time when report was generated. + // time is measured as the difference, measured in milliseconds, between the current time + // and midnight, January 1, 1970 UTC. + ReportEndTime *uint64 `protobuf:"varint,8,opt,name=report_end_time" json:"report_end_time,omitempty"` + // * + // The port number that this region server is hosing an info server on. + InfoServerPort *uint32 `protobuf:"varint,9,opt,name=info_server_port" json:"info_server_port,omitempty"` + // * + // The replicationLoadSource for the replication Source status of this region server. + ReplLoadSource []*ReplicationLoadSource `protobuf:"bytes,10,rep,name=replLoadSource" json:"replLoadSource,omitempty"` + // * + // The replicationLoadSink for the replication Sink status of this region server. + ReplLoadSink *ReplicationLoadSink `protobuf:"bytes,11,opt,name=replLoadSink" json:"replLoadSink,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *ServerLoad) Reset() { *m = ServerLoad{} } +func (m *ServerLoad) String() string { return proto.CompactTextString(m) } +func (*ServerLoad) ProtoMessage() {} + +func (m *ServerLoad) GetNumberOfRequests() uint64 { + if m != nil && m.NumberOfRequests != nil { + return *m.NumberOfRequests + } + return 0 +} + +func (m *ServerLoad) GetTotalNumberOfRequests() uint64 { + if m != nil && m.TotalNumberOfRequests != nil { + return *m.TotalNumberOfRequests + } + return 0 +} + +func (m *ServerLoad) GetUsedHeap_MB() uint32 { + if m != nil && m.UsedHeap_MB != nil { + return *m.UsedHeap_MB + } + return 0 +} + +func (m *ServerLoad) GetMaxHeap_MB() uint32 { + if m != nil && m.MaxHeap_MB != nil { + return *m.MaxHeap_MB + } + return 0 +} + +func (m *ServerLoad) GetRegionLoads() []*RegionLoad { + if m != nil { + return m.RegionLoads + } + return nil +} + +func (m *ServerLoad) GetCoprocessors() []*Coprocessor { + if m != nil { + return m.Coprocessors + } + return nil +} + +func (m *ServerLoad) GetReportStartTime() uint64 { + if m != nil && m.ReportStartTime != nil { + return *m.ReportStartTime + } + return 0 +} + +func (m *ServerLoad) GetReportEndTime() uint64 { + if m != nil && m.ReportEndTime != nil { + return *m.ReportEndTime + } + return 0 +} + +func (m *ServerLoad) GetInfoServerPort() uint32 { + if m != nil && m.InfoServerPort != nil { + return *m.InfoServerPort + } + return 0 +} + +func (m *ServerLoad) GetReplLoadSource() []*ReplicationLoadSource { + if m != nil { + return m.ReplLoadSource + } + return nil +} + +func (m *ServerLoad) GetReplLoadSink() *ReplicationLoadSink { + if m != nil { + return m.ReplLoadSink + } + return nil +} + +type LiveServerInfo struct { + Server *ServerName `protobuf:"bytes,1,req,name=server" json:"server,omitempty"` + ServerLoad *ServerLoad `protobuf:"bytes,2,req,name=server_load" json:"server_load,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *LiveServerInfo) Reset() { *m = LiveServerInfo{} } +func (m *LiveServerInfo) String() string { return proto.CompactTextString(m) } +func (*LiveServerInfo) ProtoMessage() {} + +func (m *LiveServerInfo) GetServer() *ServerName { + if m != nil { + return m.Server + } + return nil +} + +func (m *LiveServerInfo) GetServerLoad() *ServerLoad { + if m != nil { + return m.ServerLoad + } + return nil +} + +type ClusterStatus struct { + HbaseVersion *HBaseVersionFileContent `protobuf:"bytes,1,opt,name=hbase_version" json:"hbase_version,omitempty"` + LiveServers []*LiveServerInfo `protobuf:"bytes,2,rep,name=live_servers" json:"live_servers,omitempty"` + DeadServers []*ServerName `protobuf:"bytes,3,rep,name=dead_servers" json:"dead_servers,omitempty"` + RegionsInTransition []*RegionInTransition `protobuf:"bytes,4,rep,name=regions_in_transition" json:"regions_in_transition,omitempty"` + ClusterId *ClusterId `protobuf:"bytes,5,opt,name=cluster_id" json:"cluster_id,omitempty"` + MasterCoprocessors []*Coprocessor `protobuf:"bytes,6,rep,name=master_coprocessors" json:"master_coprocessors,omitempty"` + Master *ServerName `protobuf:"bytes,7,opt,name=master" json:"master,omitempty"` + BackupMasters []*ServerName `protobuf:"bytes,8,rep,name=backup_masters" json:"backup_masters,omitempty"` + BalancerOn *bool `protobuf:"varint,9,opt,name=balancer_on" json:"balancer_on,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *ClusterStatus) Reset() { *m = ClusterStatus{} } +func (m *ClusterStatus) String() string { return proto.CompactTextString(m) } +func (*ClusterStatus) ProtoMessage() {} + +func (m *ClusterStatus) GetHbaseVersion() *HBaseVersionFileContent { + if m != nil { + return m.HbaseVersion + } + return nil +} + +func (m *ClusterStatus) GetLiveServers() []*LiveServerInfo { + if m != nil { + return m.LiveServers + } + return nil +} + +func (m *ClusterStatus) GetDeadServers() []*ServerName { + if m != nil { + return m.DeadServers + } + return nil +} + +func (m *ClusterStatus) GetRegionsInTransition() []*RegionInTransition { + if m != nil { + return m.RegionsInTransition + } + return nil +} + +func (m *ClusterStatus) GetClusterId() *ClusterId { + if m != nil { + return m.ClusterId + } + return nil +} + +func (m *ClusterStatus) GetMasterCoprocessors() []*Coprocessor { + if m != nil { + return m.MasterCoprocessors + } + return nil +} + +func (m *ClusterStatus) GetMaster() *ServerName { + if m != nil { + return m.Master + } + return nil +} + +func (m *ClusterStatus) GetBackupMasters() []*ServerName { + if m != nil { + return m.BackupMasters + } + return nil +} + +func (m *ClusterStatus) GetBalancerOn() bool { + if m != nil && m.BalancerOn != nil { + return *m.BalancerOn + } + return false +} + +func init() { + proto.RegisterEnum("pb.RegionState_State", RegionState_State_name, RegionState_State_value) +} diff --git a/libs/gohbase/pb/ClusterStatus.proto b/libs/gohbase/pb/ClusterStatus.proto new file mode 100644 index 0000000..57029ae --- /dev/null +++ b/libs/gohbase/pb/ClusterStatus.proto @@ -0,0 +1,224 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +// This file contains protocol buffers that are used for ClustStatus + +package pb; +option java_package = "org.apache.hadoop.hbase.protobuf.generated"; +option java_outer_classname = "ClusterStatusProtos"; +option java_generate_equals_and_hash = true; +option optimize_for = SPEED; + +import "HBase.proto"; +import "ClusterId.proto"; +import "FS.proto"; + +message RegionState { + required RegionInfo region_info = 1; + required State state = 2; + optional uint64 stamp = 3; + enum State { + OFFLINE = 0; // region is in an offline state + PENDING_OPEN = 1; // sent rpc to server to open but has not begun + OPENING = 2; // server has begun to open but not yet done + OPEN = 3; // server opened region and updated meta + PENDING_CLOSE = 4; // sent rpc to server to close but has not begun + CLOSING = 5; // server has begun to close but not yet done + CLOSED = 6; // server closed region and updated meta + SPLITTING = 7; // server started split of a region + SPLIT = 8; // server completed split of a region + FAILED_OPEN = 9; // failed to open, and won't retry any more + FAILED_CLOSE = 10; // failed to close, and won't retry any more + MERGING = 11; // server started merge a region + MERGED = 12; // server completed merge of a region + SPLITTING_NEW = 13; // new region to be created when RS splits a parent + // region but hasn't be created yet, or master doesn't + // know it's already created + MERGING_NEW = 14; // new region to be created when RS merges two + // daughter regions but hasn't be created yet, or + // master doesn't know it's already created + } +} + +message RegionInTransition { + required RegionSpecifier spec = 1; + required RegionState region_state = 2; +} + +/** + * sequence Id of a store + */ +message StoreSequenceId { + required bytes family_name = 1; + required uint64 sequence_id = 2; +} + +/** + * contains a sequence id of a region which should be the minimum of its store sequence ids and + * list of sequence ids of the region's stores + */ +message RegionStoreSequenceIds { + required uint64 last_flushed_sequence_id = 1; + repeated StoreSequenceId store_sequence_id = 2; +} + +message RegionLoad { + /** the region specifier */ + required RegionSpecifier region_specifier = 1; + + /** the number of stores for the region */ + optional uint32 stores = 2; + + /** the number of storefiles for the region */ + optional uint32 storefiles = 3; + + /** the total size of the store files for the region, uncompressed, in MB */ + optional uint32 store_uncompressed_size_MB = 4; + + /** the current total size of the store files for the region, in MB */ + optional uint32 storefile_size_MB = 5; + + /** the current size of the memstore for the region, in MB */ + optional uint32 memstore_size_MB = 6; + + /** + * The current total size of root-level store file indexes for the region, + * in MB. The same as {@link #rootIndexSizeKB} but in MB. + */ + optional uint32 storefile_index_size_MB = 7; + + /** the current total read requests made to region */ + optional uint64 read_requests_count = 8; + + /** the current total write requests made to region */ + optional uint64 write_requests_count = 9; + + /** the total compacting key values in currently running compaction */ + optional uint64 total_compacting_KVs = 10; + + /** the completed count of key values in currently running compaction */ + optional uint64 current_compacted_KVs = 11; + + /** The current total size of root-level indexes for the region, in KB. */ + optional uint32 root_index_size_KB = 12; + + /** The total size of all index blocks, not just the root level, in KB. */ + optional uint32 total_static_index_size_KB = 13; + + /** + * The total size of all Bloom filter blocks, not just loaded into the + * block cache, in KB. + */ + optional uint32 total_static_bloom_size_KB = 14; + + /** the most recent sequence Id from cache flush */ + optional uint64 complete_sequence_id = 15; + + /** The current data locality for region in the regionserver */ + optional float data_locality = 16; + + optional uint64 last_major_compaction_ts = 17 [default = 0]; + + /** the most recent sequence Id of store from cache flush */ + repeated StoreSequenceId store_complete_sequence_id = 18; +} + +/* Server-level protobufs */ + +message ReplicationLoadSink { + required uint64 ageOfLastAppliedOp = 1; + required uint64 timeStampsOfLastAppliedOp = 2; +} + +message ReplicationLoadSource { + required string peerID = 1; + required uint64 ageOfLastShippedOp = 2; + required uint32 sizeOfLogQueue = 3; + required uint64 timeStampOfLastShippedOp = 4; + required uint64 replicationLag = 5; +} + +message ServerLoad { + /** Number of requests since last report. */ + optional uint64 number_of_requests = 1; + + /** Total Number of requests from the start of the region server. */ + optional uint64 total_number_of_requests = 2; + + /** the amount of used heap, in MB. */ + optional uint32 used_heap_MB = 3; + + /** the maximum allowable size of the heap, in MB. */ + optional uint32 max_heap_MB = 4; + + /** Information on the load of individual regions. */ + repeated RegionLoad region_loads = 5; + + /** + * Regionserver-level coprocessors, e.g., WALObserver implementations. + * Region-level coprocessors, on the other hand, are stored inside RegionLoad + * objects. + */ + repeated Coprocessor coprocessors = 6; + + /** + * Time when incremental (non-total) counts began being calculated (e.g. number_of_requests) + * time is measured as the difference, measured in milliseconds, between the current time + * and midnight, January 1, 1970 UTC. + */ + optional uint64 report_start_time = 7; + + /** + * Time when report was generated. + * time is measured as the difference, measured in milliseconds, between the current time + * and midnight, January 1, 1970 UTC. + */ + optional uint64 report_end_time = 8; + + /** + * The port number that this region server is hosing an info server on. + */ + optional uint32 info_server_port = 9; + + /** + * The replicationLoadSource for the replication Source status of this region server. + */ + repeated ReplicationLoadSource replLoadSource = 10; + + /** + * The replicationLoadSink for the replication Sink status of this region server. + */ + optional ReplicationLoadSink replLoadSink = 11; +} + +message LiveServerInfo { + required ServerName server = 1; + required ServerLoad server_load = 2; +} + +message ClusterStatus { + optional HBaseVersionFileContent hbase_version = 1; + repeated LiveServerInfo live_servers = 2; + repeated ServerName dead_servers = 3; + repeated RegionInTransition regions_in_transition = 4; + optional ClusterId cluster_id = 5; + repeated Coprocessor master_coprocessors = 6; + optional ServerName master = 7; + repeated ServerName backup_masters = 8; + optional bool balancer_on = 9; +} diff --git a/libs/gohbase/pb/Comparator.pb.go b/libs/gohbase/pb/Comparator.pb.go new file mode 100644 index 0000000..7371b67 --- /dev/null +++ b/libs/gohbase/pb/Comparator.pb.go @@ -0,0 +1,228 @@ +// Code generated by protoc-gen-go. +// source: Comparator.proto +// DO NOT EDIT! + +package pb + +import proto "github.com/golang/protobuf/proto" +import math "math" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = math.Inf + +type BitComparator_BitwiseOp int32 + +const ( + BitComparator_AND BitComparator_BitwiseOp = 1 + BitComparator_OR BitComparator_BitwiseOp = 2 + BitComparator_XOR BitComparator_BitwiseOp = 3 +) + +var BitComparator_BitwiseOp_name = map[int32]string{ + 1: "AND", + 2: "OR", + 3: "XOR", +} +var BitComparator_BitwiseOp_value = map[string]int32{ + "AND": 1, + "OR": 2, + "XOR": 3, +} + +func (x BitComparator_BitwiseOp) Enum() *BitComparator_BitwiseOp { + p := new(BitComparator_BitwiseOp) + *p = x + return p +} +func (x BitComparator_BitwiseOp) String() string { + return proto.EnumName(BitComparator_BitwiseOp_name, int32(x)) +} +func (x *BitComparator_BitwiseOp) UnmarshalJSON(data []byte) error { + value, err := proto.UnmarshalJSONEnum(BitComparator_BitwiseOp_value, data, "BitComparator_BitwiseOp") + if err != nil { + return err + } + *x = BitComparator_BitwiseOp(value) + return nil +} + +type Comparator struct { + Name *string `protobuf:"bytes,1,req,name=name" json:"name,omitempty"` + SerializedComparator []byte `protobuf:"bytes,2,opt,name=serialized_comparator" json:"serialized_comparator,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *Comparator) Reset() { *m = Comparator{} } +func (m *Comparator) String() string { return proto.CompactTextString(m) } +func (*Comparator) ProtoMessage() {} + +func (m *Comparator) GetName() string { + if m != nil && m.Name != nil { + return *m.Name + } + return "" +} + +func (m *Comparator) GetSerializedComparator() []byte { + if m != nil { + return m.SerializedComparator + } + return nil +} + +type ByteArrayComparable struct { + Value []byte `protobuf:"bytes,1,opt,name=value" json:"value,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *ByteArrayComparable) Reset() { *m = ByteArrayComparable{} } +func (m *ByteArrayComparable) String() string { return proto.CompactTextString(m) } +func (*ByteArrayComparable) ProtoMessage() {} + +func (m *ByteArrayComparable) GetValue() []byte { + if m != nil { + return m.Value + } + return nil +} + +type BinaryComparator struct { + Comparable *ByteArrayComparable `protobuf:"bytes,1,req,name=comparable" json:"comparable,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *BinaryComparator) Reset() { *m = BinaryComparator{} } +func (m *BinaryComparator) String() string { return proto.CompactTextString(m) } +func (*BinaryComparator) ProtoMessage() {} + +func (m *BinaryComparator) GetComparable() *ByteArrayComparable { + if m != nil { + return m.Comparable + } + return nil +} + +type LongComparator struct { + Comparable *ByteArrayComparable `protobuf:"bytes,1,req,name=comparable" json:"comparable,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *LongComparator) Reset() { *m = LongComparator{} } +func (m *LongComparator) String() string { return proto.CompactTextString(m) } +func (*LongComparator) ProtoMessage() {} + +func (m *LongComparator) GetComparable() *ByteArrayComparable { + if m != nil { + return m.Comparable + } + return nil +} + +type BinaryPrefixComparator struct { + Comparable *ByteArrayComparable `protobuf:"bytes,1,req,name=comparable" json:"comparable,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *BinaryPrefixComparator) Reset() { *m = BinaryPrefixComparator{} } +func (m *BinaryPrefixComparator) String() string { return proto.CompactTextString(m) } +func (*BinaryPrefixComparator) ProtoMessage() {} + +func (m *BinaryPrefixComparator) GetComparable() *ByteArrayComparable { + if m != nil { + return m.Comparable + } + return nil +} + +type BitComparator struct { + Comparable *ByteArrayComparable `protobuf:"bytes,1,req,name=comparable" json:"comparable,omitempty"` + BitwiseOp *BitComparator_BitwiseOp `protobuf:"varint,2,req,name=bitwise_op,enum=pb.BitComparator_BitwiseOp" json:"bitwise_op,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *BitComparator) Reset() { *m = BitComparator{} } +func (m *BitComparator) String() string { return proto.CompactTextString(m) } +func (*BitComparator) ProtoMessage() {} + +func (m *BitComparator) GetComparable() *ByteArrayComparable { + if m != nil { + return m.Comparable + } + return nil +} + +func (m *BitComparator) GetBitwiseOp() BitComparator_BitwiseOp { + if m != nil && m.BitwiseOp != nil { + return *m.BitwiseOp + } + return BitComparator_AND +} + +type NullComparator struct { + XXX_unrecognized []byte `json:"-"` +} + +func (m *NullComparator) Reset() { *m = NullComparator{} } +func (m *NullComparator) String() string { return proto.CompactTextString(m) } +func (*NullComparator) ProtoMessage() {} + +type RegexStringComparator struct { + Pattern *string `protobuf:"bytes,1,req,name=pattern" json:"pattern,omitempty"` + PatternFlags *int32 `protobuf:"varint,2,req,name=pattern_flags" json:"pattern_flags,omitempty"` + Charset *string `protobuf:"bytes,3,req,name=charset" json:"charset,omitempty"` + Engine *string `protobuf:"bytes,4,opt,name=engine" json:"engine,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *RegexStringComparator) Reset() { *m = RegexStringComparator{} } +func (m *RegexStringComparator) String() string { return proto.CompactTextString(m) } +func (*RegexStringComparator) ProtoMessage() {} + +func (m *RegexStringComparator) GetPattern() string { + if m != nil && m.Pattern != nil { + return *m.Pattern + } + return "" +} + +func (m *RegexStringComparator) GetPatternFlags() int32 { + if m != nil && m.PatternFlags != nil { + return *m.PatternFlags + } + return 0 +} + +func (m *RegexStringComparator) GetCharset() string { + if m != nil && m.Charset != nil { + return *m.Charset + } + return "" +} + +func (m *RegexStringComparator) GetEngine() string { + if m != nil && m.Engine != nil { + return *m.Engine + } + return "" +} + +type SubstringComparator struct { + Substr *string `protobuf:"bytes,1,req,name=substr" json:"substr,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *SubstringComparator) Reset() { *m = SubstringComparator{} } +func (m *SubstringComparator) String() string { return proto.CompactTextString(m) } +func (*SubstringComparator) ProtoMessage() {} + +func (m *SubstringComparator) GetSubstr() string { + if m != nil && m.Substr != nil { + return *m.Substr + } + return "" +} + +func init() { + proto.RegisterEnum("pb.BitComparator_BitwiseOp", BitComparator_BitwiseOp_name, BitComparator_BitwiseOp_value) +} diff --git a/libs/gohbase/pb/Comparator.proto b/libs/gohbase/pb/Comparator.proto new file mode 100644 index 0000000..8bdf596 --- /dev/null +++ b/libs/gohbase/pb/Comparator.proto @@ -0,0 +1,74 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +// This file contains protocol buffers that are used for filters + +package pb; +option java_package = "org.apache.hadoop.hbase.protobuf.generated"; +option java_outer_classname = "ComparatorProtos"; +option java_generic_services = true; +option java_generate_equals_and_hash = true; +option optimize_for = SPEED; + +// This file contains protocol buffers that are used for comparators (e.g. in filters) + +message Comparator { + required string name = 1; + optional bytes serialized_comparator = 2; +} + +message ByteArrayComparable { + optional bytes value = 1; +} + +message BinaryComparator { + required ByteArrayComparable comparable = 1; +} + +message LongComparator { + required ByteArrayComparable comparable = 1; +} + +message BinaryPrefixComparator { + required ByteArrayComparable comparable = 1; +} + +message BitComparator { + required ByteArrayComparable comparable = 1; + required BitwiseOp bitwise_op = 2; + + enum BitwiseOp { + AND = 1; + OR = 2; + XOR = 3; + } +} + +message NullComparator { +} + +message RegexStringComparator { + required string pattern = 1; + required int32 pattern_flags = 2; + required string charset = 3; + optional string engine = 4; +} + +message SubstringComparator { + required string substr = 1; +} diff --git a/libs/gohbase/pb/ErrorHandling.pb.go b/libs/gohbase/pb/ErrorHandling.pb.go new file mode 100644 index 0000000..2e3129d --- /dev/null +++ b/libs/gohbase/pb/ErrorHandling.pb.go @@ -0,0 +1,130 @@ +// Code generated by protoc-gen-go. +// source: ErrorHandling.proto +// DO NOT EDIT! + +package pb + +import proto "github.com/golang/protobuf/proto" +import math "math" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = math.Inf + +// * +// Protobuf version of a java.lang.StackTraceElement +// so we can serialize exceptions. +type StackTraceElementMessage struct { + DeclaringClass *string `protobuf:"bytes,1,opt,name=declaring_class" json:"declaring_class,omitempty"` + MethodName *string `protobuf:"bytes,2,opt,name=method_name" json:"method_name,omitempty"` + FileName *string `protobuf:"bytes,3,opt,name=file_name" json:"file_name,omitempty"` + LineNumber *int32 `protobuf:"varint,4,opt,name=line_number" json:"line_number,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *StackTraceElementMessage) Reset() { *m = StackTraceElementMessage{} } +func (m *StackTraceElementMessage) String() string { return proto.CompactTextString(m) } +func (*StackTraceElementMessage) ProtoMessage() {} + +func (m *StackTraceElementMessage) GetDeclaringClass() string { + if m != nil && m.DeclaringClass != nil { + return *m.DeclaringClass + } + return "" +} + +func (m *StackTraceElementMessage) GetMethodName() string { + if m != nil && m.MethodName != nil { + return *m.MethodName + } + return "" +} + +func (m *StackTraceElementMessage) GetFileName() string { + if m != nil && m.FileName != nil { + return *m.FileName + } + return "" +} + +func (m *StackTraceElementMessage) GetLineNumber() int32 { + if m != nil && m.LineNumber != nil { + return *m.LineNumber + } + return 0 +} + +// * +// Cause of a remote failure for a generic exception. Contains +// all the information for a generic exception as well as +// optional info about the error for generic info passing +// (which should be another protobuffed class). +type GenericExceptionMessage struct { + ClassName *string `protobuf:"bytes,1,opt,name=class_name" json:"class_name,omitempty"` + Message *string `protobuf:"bytes,2,opt,name=message" json:"message,omitempty"` + ErrorInfo []byte `protobuf:"bytes,3,opt,name=error_info" json:"error_info,omitempty"` + Trace []*StackTraceElementMessage `protobuf:"bytes,4,rep,name=trace" json:"trace,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *GenericExceptionMessage) Reset() { *m = GenericExceptionMessage{} } +func (m *GenericExceptionMessage) String() string { return proto.CompactTextString(m) } +func (*GenericExceptionMessage) ProtoMessage() {} + +func (m *GenericExceptionMessage) GetClassName() string { + if m != nil && m.ClassName != nil { + return *m.ClassName + } + return "" +} + +func (m *GenericExceptionMessage) GetMessage() string { + if m != nil && m.Message != nil { + return *m.Message + } + return "" +} + +func (m *GenericExceptionMessage) GetErrorInfo() []byte { + if m != nil { + return m.ErrorInfo + } + return nil +} + +func (m *GenericExceptionMessage) GetTrace() []*StackTraceElementMessage { + if m != nil { + return m.Trace + } + return nil +} + +// * +// Exception sent across the wire when a remote task needs +// to notify other tasks that it failed and why +type ForeignExceptionMessage struct { + Source *string `protobuf:"bytes,1,opt,name=source" json:"source,omitempty"` + GenericException *GenericExceptionMessage `protobuf:"bytes,2,opt,name=generic_exception" json:"generic_exception,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *ForeignExceptionMessage) Reset() { *m = ForeignExceptionMessage{} } +func (m *ForeignExceptionMessage) String() string { return proto.CompactTextString(m) } +func (*ForeignExceptionMessage) ProtoMessage() {} + +func (m *ForeignExceptionMessage) GetSource() string { + if m != nil && m.Source != nil { + return *m.Source + } + return "" +} + +func (m *ForeignExceptionMessage) GetGenericException() *GenericExceptionMessage { + if m != nil { + return m.GenericException + } + return nil +} + +func init() { +} diff --git a/libs/gohbase/pb/ErrorHandling.proto b/libs/gohbase/pb/ErrorHandling.proto new file mode 100644 index 0000000..f7bdc68 --- /dev/null +++ b/libs/gohbase/pb/ErrorHandling.proto @@ -0,0 +1,58 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +// This file contains protocol buffers that are used for error handling + +package pb; +option java_package = "org.apache.hadoop.hbase.protobuf.generated"; +option java_outer_classname = "ErrorHandlingProtos"; +option java_generate_equals_and_hash = true; +option optimize_for = SPEED; + +/** + * Protobuf version of a java.lang.StackTraceElement + * so we can serialize exceptions. + */ +message StackTraceElementMessage { + optional string declaring_class = 1; + optional string method_name = 2; + optional string file_name = 3; + optional int32 line_number = 4; +} + +/** + * Cause of a remote failure for a generic exception. Contains + * all the information for a generic exception as well as + * optional info about the error for generic info passing + * (which should be another protobuffed class). + */ +message GenericExceptionMessage { + optional string class_name = 1; + optional string message = 2; + optional bytes error_info = 3; + repeated StackTraceElementMessage trace = 4; +} + +/** + * Exception sent across the wire when a remote task needs + * to notify other tasks that it failed and why + */ +message ForeignExceptionMessage { + optional string source = 1; + optional GenericExceptionMessage generic_exception = 2; +} diff --git a/libs/gohbase/pb/FS.pb.go b/libs/gohbase/pb/FS.pb.go new file mode 100644 index 0000000..0c35990 --- /dev/null +++ b/libs/gohbase/pb/FS.pb.go @@ -0,0 +1,93 @@ +// Code generated by protoc-gen-go. +// source: FS.proto +// DO NOT EDIT! + +package pb + +import proto "github.com/golang/protobuf/proto" +import math "math" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = math.Inf + +type Reference_Range int32 + +const ( + Reference_TOP Reference_Range = 0 + Reference_BOTTOM Reference_Range = 1 +) + +var Reference_Range_name = map[int32]string{ + 0: "TOP", + 1: "BOTTOM", +} +var Reference_Range_value = map[string]int32{ + "TOP": 0, + "BOTTOM": 1, +} + +func (x Reference_Range) Enum() *Reference_Range { + p := new(Reference_Range) + *p = x + return p +} +func (x Reference_Range) String() string { + return proto.EnumName(Reference_Range_name, int32(x)) +} +func (x *Reference_Range) UnmarshalJSON(data []byte) error { + value, err := proto.UnmarshalJSONEnum(Reference_Range_value, data, "Reference_Range") + if err != nil { + return err + } + *x = Reference_Range(value) + return nil +} + +// * +// The ${HBASE_ROOTDIR}/hbase.version file content +type HBaseVersionFileContent struct { + Version *string `protobuf:"bytes,1,req,name=version" json:"version,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *HBaseVersionFileContent) Reset() { *m = HBaseVersionFileContent{} } +func (m *HBaseVersionFileContent) String() string { return proto.CompactTextString(m) } +func (*HBaseVersionFileContent) ProtoMessage() {} + +func (m *HBaseVersionFileContent) GetVersion() string { + if m != nil && m.Version != nil { + return *m.Version + } + return "" +} + +// * +// Reference file content used when we split an hfile under a region. +type Reference struct { + Splitkey []byte `protobuf:"bytes,1,req,name=splitkey" json:"splitkey,omitempty"` + Range *Reference_Range `protobuf:"varint,2,req,name=range,enum=pb.Reference_Range" json:"range,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *Reference) Reset() { *m = Reference{} } +func (m *Reference) String() string { return proto.CompactTextString(m) } +func (*Reference) ProtoMessage() {} + +func (m *Reference) GetSplitkey() []byte { + if m != nil { + return m.Splitkey + } + return nil +} + +func (m *Reference) GetRange() Reference_Range { + if m != nil && m.Range != nil { + return *m.Range + } + return Reference_TOP +} + +func init() { + proto.RegisterEnum("pb.Reference_Range", Reference_Range_name, Reference_Range_value) +} diff --git a/libs/gohbase/pb/FS.proto b/libs/gohbase/pb/FS.proto new file mode 100644 index 0000000..9bedb71 --- /dev/null +++ b/libs/gohbase/pb/FS.proto @@ -0,0 +1,45 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +// This file contains protocol buffers that are written into the filesystem + +package pb; +option java_package = "org.apache.hadoop.hbase.protobuf.generated"; +option java_outer_classname = "FSProtos"; +option java_generate_equals_and_hash = true; +option optimize_for = SPEED; + +/** + * The ${HBASE_ROOTDIR}/hbase.version file content + */ +message HBaseVersionFileContent { + required string version = 1; +} + +/** + * Reference file content used when we split an hfile under a region. + */ +message Reference { + required bytes splitkey = 1; + enum Range { + TOP = 0; + BOTTOM = 1; + } + required Range range = 2; +} + diff --git a/libs/gohbase/pb/Filter.pb.go b/libs/gohbase/pb/Filter.pb.go new file mode 100644 index 0000000..0dd3768 --- /dev/null +++ b/libs/gohbase/pb/Filter.pb.go @@ -0,0 +1,665 @@ +// Code generated by protoc-gen-go. +// source: Filter.proto +// DO NOT EDIT! + +package pb + +import proto "github.com/golang/protobuf/proto" +import math "math" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = math.Inf + +type FilterList_Operator int32 + +const ( + FilterList_MUST_PASS_ALL FilterList_Operator = 1 + FilterList_MUST_PASS_ONE FilterList_Operator = 2 +) + +var FilterList_Operator_name = map[int32]string{ + 1: "MUST_PASS_ALL", + 2: "MUST_PASS_ONE", +} +var FilterList_Operator_value = map[string]int32{ + "MUST_PASS_ALL": 1, + "MUST_PASS_ONE": 2, +} + +func (x FilterList_Operator) Enum() *FilterList_Operator { + p := new(FilterList_Operator) + *p = x + return p +} +func (x FilterList_Operator) String() string { + return proto.EnumName(FilterList_Operator_name, int32(x)) +} +func (x *FilterList_Operator) UnmarshalJSON(data []byte) error { + value, err := proto.UnmarshalJSONEnum(FilterList_Operator_value, data, "FilterList_Operator") + if err != nil { + return err + } + *x = FilterList_Operator(value) + return nil +} + +type Filter struct { + Name *string `protobuf:"bytes,1,req,name=name" json:"name,omitempty"` + SerializedFilter []byte `protobuf:"bytes,2,opt,name=serialized_filter" json:"serialized_filter,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *Filter) Reset() { *m = Filter{} } +func (m *Filter) String() string { return proto.CompactTextString(m) } +func (*Filter) ProtoMessage() {} + +func (m *Filter) GetName() string { + if m != nil && m.Name != nil { + return *m.Name + } + return "" +} + +func (m *Filter) GetSerializedFilter() []byte { + if m != nil { + return m.SerializedFilter + } + return nil +} + +type ColumnCountGetFilter struct { + Limit *int32 `protobuf:"varint,1,req,name=limit" json:"limit,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *ColumnCountGetFilter) Reset() { *m = ColumnCountGetFilter{} } +func (m *ColumnCountGetFilter) String() string { return proto.CompactTextString(m) } +func (*ColumnCountGetFilter) ProtoMessage() {} + +func (m *ColumnCountGetFilter) GetLimit() int32 { + if m != nil && m.Limit != nil { + return *m.Limit + } + return 0 +} + +type ColumnPaginationFilter struct { + Limit *int32 `protobuf:"varint,1,req,name=limit" json:"limit,omitempty"` + Offset *int32 `protobuf:"varint,2,opt,name=offset" json:"offset,omitempty"` + ColumnOffset []byte `protobuf:"bytes,3,opt,name=column_offset" json:"column_offset,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *ColumnPaginationFilter) Reset() { *m = ColumnPaginationFilter{} } +func (m *ColumnPaginationFilter) String() string { return proto.CompactTextString(m) } +func (*ColumnPaginationFilter) ProtoMessage() {} + +func (m *ColumnPaginationFilter) GetLimit() int32 { + if m != nil && m.Limit != nil { + return *m.Limit + } + return 0 +} + +func (m *ColumnPaginationFilter) GetOffset() int32 { + if m != nil && m.Offset != nil { + return *m.Offset + } + return 0 +} + +func (m *ColumnPaginationFilter) GetColumnOffset() []byte { + if m != nil { + return m.ColumnOffset + } + return nil +} + +type ColumnPrefixFilter struct { + Prefix []byte `protobuf:"bytes,1,req,name=prefix" json:"prefix,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *ColumnPrefixFilter) Reset() { *m = ColumnPrefixFilter{} } +func (m *ColumnPrefixFilter) String() string { return proto.CompactTextString(m) } +func (*ColumnPrefixFilter) ProtoMessage() {} + +func (m *ColumnPrefixFilter) GetPrefix() []byte { + if m != nil { + return m.Prefix + } + return nil +} + +type ColumnRangeFilter struct { + MinColumn []byte `protobuf:"bytes,1,opt,name=min_column" json:"min_column,omitempty"` + MinColumnInclusive *bool `protobuf:"varint,2,opt,name=min_column_inclusive" json:"min_column_inclusive,omitempty"` + MaxColumn []byte `protobuf:"bytes,3,opt,name=max_column" json:"max_column,omitempty"` + MaxColumnInclusive *bool `protobuf:"varint,4,opt,name=max_column_inclusive" json:"max_column_inclusive,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *ColumnRangeFilter) Reset() { *m = ColumnRangeFilter{} } +func (m *ColumnRangeFilter) String() string { return proto.CompactTextString(m) } +func (*ColumnRangeFilter) ProtoMessage() {} + +func (m *ColumnRangeFilter) GetMinColumn() []byte { + if m != nil { + return m.MinColumn + } + return nil +} + +func (m *ColumnRangeFilter) GetMinColumnInclusive() bool { + if m != nil && m.MinColumnInclusive != nil { + return *m.MinColumnInclusive + } + return false +} + +func (m *ColumnRangeFilter) GetMaxColumn() []byte { + if m != nil { + return m.MaxColumn + } + return nil +} + +func (m *ColumnRangeFilter) GetMaxColumnInclusive() bool { + if m != nil && m.MaxColumnInclusive != nil { + return *m.MaxColumnInclusive + } + return false +} + +type CompareFilter struct { + CompareOp *CompareType `protobuf:"varint,1,req,name=compare_op,enum=pb.CompareType" json:"compare_op,omitempty"` + Comparator *Comparator `protobuf:"bytes,2,opt,name=comparator" json:"comparator,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *CompareFilter) Reset() { *m = CompareFilter{} } +func (m *CompareFilter) String() string { return proto.CompactTextString(m) } +func (*CompareFilter) ProtoMessage() {} + +func (m *CompareFilter) GetCompareOp() CompareType { + if m != nil && m.CompareOp != nil { + return *m.CompareOp + } + return CompareType_LESS +} + +func (m *CompareFilter) GetComparator() *Comparator { + if m != nil { + return m.Comparator + } + return nil +} + +type DependentColumnFilter struct { + CompareFilter *CompareFilter `protobuf:"bytes,1,req,name=compare_filter" json:"compare_filter,omitempty"` + ColumnFamily []byte `protobuf:"bytes,2,opt,name=column_family" json:"column_family,omitempty"` + ColumnQualifier []byte `protobuf:"bytes,3,opt,name=column_qualifier" json:"column_qualifier,omitempty"` + DropDependentColumn *bool `protobuf:"varint,4,opt,name=drop_dependent_column" json:"drop_dependent_column,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *DependentColumnFilter) Reset() { *m = DependentColumnFilter{} } +func (m *DependentColumnFilter) String() string { return proto.CompactTextString(m) } +func (*DependentColumnFilter) ProtoMessage() {} + +func (m *DependentColumnFilter) GetCompareFilter() *CompareFilter { + if m != nil { + return m.CompareFilter + } + return nil +} + +func (m *DependentColumnFilter) GetColumnFamily() []byte { + if m != nil { + return m.ColumnFamily + } + return nil +} + +func (m *DependentColumnFilter) GetColumnQualifier() []byte { + if m != nil { + return m.ColumnQualifier + } + return nil +} + +func (m *DependentColumnFilter) GetDropDependentColumn() bool { + if m != nil && m.DropDependentColumn != nil { + return *m.DropDependentColumn + } + return false +} + +type FamilyFilter struct { + CompareFilter *CompareFilter `protobuf:"bytes,1,req,name=compare_filter" json:"compare_filter,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *FamilyFilter) Reset() { *m = FamilyFilter{} } +func (m *FamilyFilter) String() string { return proto.CompactTextString(m) } +func (*FamilyFilter) ProtoMessage() {} + +func (m *FamilyFilter) GetCompareFilter() *CompareFilter { + if m != nil { + return m.CompareFilter + } + return nil +} + +type FilterList struct { + Operator *FilterList_Operator `protobuf:"varint,1,req,name=operator,enum=pb.FilterList_Operator" json:"operator,omitempty"` + Filters []*Filter `protobuf:"bytes,2,rep,name=filters" json:"filters,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *FilterList) Reset() { *m = FilterList{} } +func (m *FilterList) String() string { return proto.CompactTextString(m) } +func (*FilterList) ProtoMessage() {} + +func (m *FilterList) GetOperator() FilterList_Operator { + if m != nil && m.Operator != nil { + return *m.Operator + } + return FilterList_MUST_PASS_ALL +} + +func (m *FilterList) GetFilters() []*Filter { + if m != nil { + return m.Filters + } + return nil +} + +type FilterWrapper struct { + Filter *Filter `protobuf:"bytes,1,req,name=filter" json:"filter,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *FilterWrapper) Reset() { *m = FilterWrapper{} } +func (m *FilterWrapper) String() string { return proto.CompactTextString(m) } +func (*FilterWrapper) ProtoMessage() {} + +func (m *FilterWrapper) GetFilter() *Filter { + if m != nil { + return m.Filter + } + return nil +} + +type FirstKeyOnlyFilter struct { + XXX_unrecognized []byte `json:"-"` +} + +func (m *FirstKeyOnlyFilter) Reset() { *m = FirstKeyOnlyFilter{} } +func (m *FirstKeyOnlyFilter) String() string { return proto.CompactTextString(m) } +func (*FirstKeyOnlyFilter) ProtoMessage() {} + +type FirstKeyValueMatchingQualifiersFilter struct { + Qualifiers [][]byte `protobuf:"bytes,1,rep,name=qualifiers" json:"qualifiers,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *FirstKeyValueMatchingQualifiersFilter) Reset() { *m = FirstKeyValueMatchingQualifiersFilter{} } +func (m *FirstKeyValueMatchingQualifiersFilter) String() string { return proto.CompactTextString(m) } +func (*FirstKeyValueMatchingQualifiersFilter) ProtoMessage() {} + +func (m *FirstKeyValueMatchingQualifiersFilter) GetQualifiers() [][]byte { + if m != nil { + return m.Qualifiers + } + return nil +} + +type FuzzyRowFilter struct { + FuzzyKeysData []*BytesBytesPair `protobuf:"bytes,1,rep,name=fuzzy_keys_data" json:"fuzzy_keys_data,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *FuzzyRowFilter) Reset() { *m = FuzzyRowFilter{} } +func (m *FuzzyRowFilter) String() string { return proto.CompactTextString(m) } +func (*FuzzyRowFilter) ProtoMessage() {} + +func (m *FuzzyRowFilter) GetFuzzyKeysData() []*BytesBytesPair { + if m != nil { + return m.FuzzyKeysData + } + return nil +} + +type InclusiveStopFilter struct { + StopRowKey []byte `protobuf:"bytes,1,opt,name=stop_row_key" json:"stop_row_key,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *InclusiveStopFilter) Reset() { *m = InclusiveStopFilter{} } +func (m *InclusiveStopFilter) String() string { return proto.CompactTextString(m) } +func (*InclusiveStopFilter) ProtoMessage() {} + +func (m *InclusiveStopFilter) GetStopRowKey() []byte { + if m != nil { + return m.StopRowKey + } + return nil +} + +type KeyOnlyFilter struct { + LenAsVal *bool `protobuf:"varint,1,req,name=len_as_val" json:"len_as_val,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *KeyOnlyFilter) Reset() { *m = KeyOnlyFilter{} } +func (m *KeyOnlyFilter) String() string { return proto.CompactTextString(m) } +func (*KeyOnlyFilter) ProtoMessage() {} + +func (m *KeyOnlyFilter) GetLenAsVal() bool { + if m != nil && m.LenAsVal != nil { + return *m.LenAsVal + } + return false +} + +type MultipleColumnPrefixFilter struct { + SortedPrefixes [][]byte `protobuf:"bytes,1,rep,name=sorted_prefixes" json:"sorted_prefixes,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *MultipleColumnPrefixFilter) Reset() { *m = MultipleColumnPrefixFilter{} } +func (m *MultipleColumnPrefixFilter) String() string { return proto.CompactTextString(m) } +func (*MultipleColumnPrefixFilter) ProtoMessage() {} + +func (m *MultipleColumnPrefixFilter) GetSortedPrefixes() [][]byte { + if m != nil { + return m.SortedPrefixes + } + return nil +} + +type PageFilter struct { + PageSize *int64 `protobuf:"varint,1,req,name=page_size" json:"page_size,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *PageFilter) Reset() { *m = PageFilter{} } +func (m *PageFilter) String() string { return proto.CompactTextString(m) } +func (*PageFilter) ProtoMessage() {} + +func (m *PageFilter) GetPageSize() int64 { + if m != nil && m.PageSize != nil { + return *m.PageSize + } + return 0 +} + +type PrefixFilter struct { + Prefix []byte `protobuf:"bytes,1,opt,name=prefix" json:"prefix,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *PrefixFilter) Reset() { *m = PrefixFilter{} } +func (m *PrefixFilter) String() string { return proto.CompactTextString(m) } +func (*PrefixFilter) ProtoMessage() {} + +func (m *PrefixFilter) GetPrefix() []byte { + if m != nil { + return m.Prefix + } + return nil +} + +type QualifierFilter struct { + CompareFilter *CompareFilter `protobuf:"bytes,1,req,name=compare_filter" json:"compare_filter,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *QualifierFilter) Reset() { *m = QualifierFilter{} } +func (m *QualifierFilter) String() string { return proto.CompactTextString(m) } +func (*QualifierFilter) ProtoMessage() {} + +func (m *QualifierFilter) GetCompareFilter() *CompareFilter { + if m != nil { + return m.CompareFilter + } + return nil +} + +type RandomRowFilter struct { + Chance *float32 `protobuf:"fixed32,1,req,name=chance" json:"chance,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *RandomRowFilter) Reset() { *m = RandomRowFilter{} } +func (m *RandomRowFilter) String() string { return proto.CompactTextString(m) } +func (*RandomRowFilter) ProtoMessage() {} + +func (m *RandomRowFilter) GetChance() float32 { + if m != nil && m.Chance != nil { + return *m.Chance + } + return 0 +} + +type RowFilter struct { + CompareFilter *CompareFilter `protobuf:"bytes,1,req,name=compare_filter" json:"compare_filter,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *RowFilter) Reset() { *m = RowFilter{} } +func (m *RowFilter) String() string { return proto.CompactTextString(m) } +func (*RowFilter) ProtoMessage() {} + +func (m *RowFilter) GetCompareFilter() *CompareFilter { + if m != nil { + return m.CompareFilter + } + return nil +} + +type SingleColumnValueExcludeFilter struct { + SingleColumnValueFilter *SingleColumnValueFilter `protobuf:"bytes,1,req,name=single_column_value_filter" json:"single_column_value_filter,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *SingleColumnValueExcludeFilter) Reset() { *m = SingleColumnValueExcludeFilter{} } +func (m *SingleColumnValueExcludeFilter) String() string { return proto.CompactTextString(m) } +func (*SingleColumnValueExcludeFilter) ProtoMessage() {} + +func (m *SingleColumnValueExcludeFilter) GetSingleColumnValueFilter() *SingleColumnValueFilter { + if m != nil { + return m.SingleColumnValueFilter + } + return nil +} + +type SingleColumnValueFilter struct { + ColumnFamily []byte `protobuf:"bytes,1,opt,name=column_family" json:"column_family,omitempty"` + ColumnQualifier []byte `protobuf:"bytes,2,opt,name=column_qualifier" json:"column_qualifier,omitempty"` + CompareOp *CompareType `protobuf:"varint,3,req,name=compare_op,enum=pb.CompareType" json:"compare_op,omitempty"` + Comparator *Comparator `protobuf:"bytes,4,req,name=comparator" json:"comparator,omitempty"` + FilterIfMissing *bool `protobuf:"varint,5,opt,name=filter_if_missing" json:"filter_if_missing,omitempty"` + LatestVersionOnly *bool `protobuf:"varint,6,opt,name=latest_version_only" json:"latest_version_only,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *SingleColumnValueFilter) Reset() { *m = SingleColumnValueFilter{} } +func (m *SingleColumnValueFilter) String() string { return proto.CompactTextString(m) } +func (*SingleColumnValueFilter) ProtoMessage() {} + +func (m *SingleColumnValueFilter) GetColumnFamily() []byte { + if m != nil { + return m.ColumnFamily + } + return nil +} + +func (m *SingleColumnValueFilter) GetColumnQualifier() []byte { + if m != nil { + return m.ColumnQualifier + } + return nil +} + +func (m *SingleColumnValueFilter) GetCompareOp() CompareType { + if m != nil && m.CompareOp != nil { + return *m.CompareOp + } + return CompareType_LESS +} + +func (m *SingleColumnValueFilter) GetComparator() *Comparator { + if m != nil { + return m.Comparator + } + return nil +} + +func (m *SingleColumnValueFilter) GetFilterIfMissing() bool { + if m != nil && m.FilterIfMissing != nil { + return *m.FilterIfMissing + } + return false +} + +func (m *SingleColumnValueFilter) GetLatestVersionOnly() bool { + if m != nil && m.LatestVersionOnly != nil { + return *m.LatestVersionOnly + } + return false +} + +type SkipFilter struct { + Filter *Filter `protobuf:"bytes,1,req,name=filter" json:"filter,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *SkipFilter) Reset() { *m = SkipFilter{} } +func (m *SkipFilter) String() string { return proto.CompactTextString(m) } +func (*SkipFilter) ProtoMessage() {} + +func (m *SkipFilter) GetFilter() *Filter { + if m != nil { + return m.Filter + } + return nil +} + +type TimestampsFilter struct { + Timestamps []int64 `protobuf:"varint,1,rep,packed,name=timestamps" json:"timestamps,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *TimestampsFilter) Reset() { *m = TimestampsFilter{} } +func (m *TimestampsFilter) String() string { return proto.CompactTextString(m) } +func (*TimestampsFilter) ProtoMessage() {} + +func (m *TimestampsFilter) GetTimestamps() []int64 { + if m != nil { + return m.Timestamps + } + return nil +} + +type ValueFilter struct { + CompareFilter *CompareFilter `protobuf:"bytes,1,req,name=compare_filter" json:"compare_filter,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *ValueFilter) Reset() { *m = ValueFilter{} } +func (m *ValueFilter) String() string { return proto.CompactTextString(m) } +func (*ValueFilter) ProtoMessage() {} + +func (m *ValueFilter) GetCompareFilter() *CompareFilter { + if m != nil { + return m.CompareFilter + } + return nil +} + +type WhileMatchFilter struct { + Filter *Filter `protobuf:"bytes,1,req,name=filter" json:"filter,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *WhileMatchFilter) Reset() { *m = WhileMatchFilter{} } +func (m *WhileMatchFilter) String() string { return proto.CompactTextString(m) } +func (*WhileMatchFilter) ProtoMessage() {} + +func (m *WhileMatchFilter) GetFilter() *Filter { + if m != nil { + return m.Filter + } + return nil +} + +type FilterAllFilter struct { + XXX_unrecognized []byte `json:"-"` +} + +func (m *FilterAllFilter) Reset() { *m = FilterAllFilter{} } +func (m *FilterAllFilter) String() string { return proto.CompactTextString(m) } +func (*FilterAllFilter) ProtoMessage() {} + +type RowRange struct { + StartRow []byte `protobuf:"bytes,1,opt,name=start_row" json:"start_row,omitempty"` + StartRowInclusive *bool `protobuf:"varint,2,opt,name=start_row_inclusive" json:"start_row_inclusive,omitempty"` + StopRow []byte `protobuf:"bytes,3,opt,name=stop_row" json:"stop_row,omitempty"` + StopRowInclusive *bool `protobuf:"varint,4,opt,name=stop_row_inclusive" json:"stop_row_inclusive,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *RowRange) Reset() { *m = RowRange{} } +func (m *RowRange) String() string { return proto.CompactTextString(m) } +func (*RowRange) ProtoMessage() {} + +func (m *RowRange) GetStartRow() []byte { + if m != nil { + return m.StartRow + } + return nil +} + +func (m *RowRange) GetStartRowInclusive() bool { + if m != nil && m.StartRowInclusive != nil { + return *m.StartRowInclusive + } + return false +} + +func (m *RowRange) GetStopRow() []byte { + if m != nil { + return m.StopRow + } + return nil +} + +func (m *RowRange) GetStopRowInclusive() bool { + if m != nil && m.StopRowInclusive != nil { + return *m.StopRowInclusive + } + return false +} + +type MultiRowRangeFilter struct { + RowRangeList []*RowRange `protobuf:"bytes,1,rep,name=row_range_list" json:"row_range_list,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *MultiRowRangeFilter) Reset() { *m = MultiRowRangeFilter{} } +func (m *MultiRowRangeFilter) String() string { return proto.CompactTextString(m) } +func (*MultiRowRangeFilter) ProtoMessage() {} + +func (m *MultiRowRangeFilter) GetRowRangeList() []*RowRange { + if m != nil { + return m.RowRangeList + } + return nil +} + +func init() { + proto.RegisterEnum("pb.FilterList_Operator", FilterList_Operator_name, FilterList_Operator_value) +} diff --git a/libs/gohbase/pb/Filter.proto b/libs/gohbase/pb/Filter.proto new file mode 100644 index 0000000..18e04f4 --- /dev/null +++ b/libs/gohbase/pb/Filter.proto @@ -0,0 +1,170 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +// This file contains protocol buffers that are used for filters + +package pb; +option java_package = "org.apache.hadoop.hbase.protobuf.generated"; +option java_outer_classname = "FilterProtos"; +option java_generic_services = true; +option java_generate_equals_and_hash = true; +option optimize_for = SPEED; + +import "HBase.proto"; +import "Comparator.proto"; + +message Filter { + required string name = 1; + optional bytes serialized_filter = 2; +} + +message ColumnCountGetFilter { + required int32 limit = 1; +} + +message ColumnPaginationFilter { + required int32 limit = 1; + optional int32 offset = 2; + optional bytes column_offset = 3; +} + +message ColumnPrefixFilter { + required bytes prefix = 1; +} + +message ColumnRangeFilter { + optional bytes min_column = 1; + optional bool min_column_inclusive = 2; + optional bytes max_column = 3; + optional bool max_column_inclusive = 4; +} + +message CompareFilter { + required CompareType compare_op = 1; + optional Comparator comparator = 2; +} + +message DependentColumnFilter { + required CompareFilter compare_filter = 1; + optional bytes column_family = 2; + optional bytes column_qualifier = 3; + optional bool drop_dependent_column = 4; +} + +message FamilyFilter { + required CompareFilter compare_filter = 1; +} + +message FilterList { + required Operator operator = 1; + repeated Filter filters = 2; + + enum Operator { + MUST_PASS_ALL = 1; + MUST_PASS_ONE = 2; + } +} + +message FilterWrapper { + required Filter filter = 1; +} + +message FirstKeyOnlyFilter { +} + +message FirstKeyValueMatchingQualifiersFilter { + repeated bytes qualifiers = 1; +} + +message FuzzyRowFilter { + repeated BytesBytesPair fuzzy_keys_data = 1; +} + +message InclusiveStopFilter { + optional bytes stop_row_key = 1; +} + +message KeyOnlyFilter { + required bool len_as_val = 1; +} + +message MultipleColumnPrefixFilter { + repeated bytes sorted_prefixes = 1; +} + +message PageFilter { + required int64 page_size = 1; +} + +message PrefixFilter { + optional bytes prefix = 1; +} + +message QualifierFilter { + required CompareFilter compare_filter = 1; +} + +message RandomRowFilter { + required float chance = 1; +} + +message RowFilter { + required CompareFilter compare_filter = 1; +} + +message SingleColumnValueExcludeFilter { + required SingleColumnValueFilter single_column_value_filter = 1; +} + +message SingleColumnValueFilter { + optional bytes column_family = 1; + optional bytes column_qualifier = 2; + required CompareType compare_op = 3; + required Comparator comparator = 4; + optional bool filter_if_missing = 5; + optional bool latest_version_only = 6; +} + +message SkipFilter { + required Filter filter = 1; +} + +message TimestampsFilter { + repeated int64 timestamps = 1 [packed=true]; +} + +message ValueFilter { + required CompareFilter compare_filter = 1; +} + +message WhileMatchFilter { + required Filter filter = 1; +} +message FilterAllFilter { +} + +message RowRange { + optional bytes start_row = 1; + optional bool start_row_inclusive = 2; + optional bytes stop_row = 3; + optional bool stop_row_inclusive =4; +} + +message MultiRowRangeFilter { + repeated RowRange row_range_list = 1; +} \ No newline at end of file diff --git a/libs/gohbase/pb/HBase.pb.go b/libs/gohbase/pb/HBase.pb.go new file mode 100644 index 0000000..01e7b52 --- /dev/null +++ b/libs/gohbase/pb/HBase.pb.go @@ -0,0 +1,808 @@ +// Code generated by protoc-gen-go. +// source: HBase.proto +// DO NOT EDIT! + +package pb + +import proto "github.com/golang/protobuf/proto" +import math "math" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = math.Inf + +// Comparison operators +type CompareType int32 + +const ( + CompareType_LESS CompareType = 0 + CompareType_LESS_OR_EQUAL CompareType = 1 + CompareType_EQUAL CompareType = 2 + CompareType_NOT_EQUAL CompareType = 3 + CompareType_GREATER_OR_EQUAL CompareType = 4 + CompareType_GREATER CompareType = 5 + CompareType_NO_OP CompareType = 6 +) + +var CompareType_name = map[int32]string{ + 0: "LESS", + 1: "LESS_OR_EQUAL", + 2: "EQUAL", + 3: "NOT_EQUAL", + 4: "GREATER_OR_EQUAL", + 5: "GREATER", + 6: "NO_OP", +} +var CompareType_value = map[string]int32{ + "LESS": 0, + "LESS_OR_EQUAL": 1, + "EQUAL": 2, + "NOT_EQUAL": 3, + "GREATER_OR_EQUAL": 4, + "GREATER": 5, + "NO_OP": 6, +} + +func (x CompareType) Enum() *CompareType { + p := new(CompareType) + *p = x + return p +} +func (x CompareType) String() string { + return proto.EnumName(CompareType_name, int32(x)) +} +func (x *CompareType) UnmarshalJSON(data []byte) error { + value, err := proto.UnmarshalJSONEnum(CompareType_value, data, "CompareType") + if err != nil { + return err + } + *x = CompareType(value) + return nil +} + +type TimeUnit int32 + +const ( + TimeUnit_NANOSECONDS TimeUnit = 1 + TimeUnit_MICROSECONDS TimeUnit = 2 + TimeUnit_MILLISECONDS TimeUnit = 3 + TimeUnit_SECONDS TimeUnit = 4 + TimeUnit_MINUTES TimeUnit = 5 + TimeUnit_HOURS TimeUnit = 6 + TimeUnit_DAYS TimeUnit = 7 +) + +var TimeUnit_name = map[int32]string{ + 1: "NANOSECONDS", + 2: "MICROSECONDS", + 3: "MILLISECONDS", + 4: "SECONDS", + 5: "MINUTES", + 6: "HOURS", + 7: "DAYS", +} +var TimeUnit_value = map[string]int32{ + "NANOSECONDS": 1, + "MICROSECONDS": 2, + "MILLISECONDS": 3, + "SECONDS": 4, + "MINUTES": 5, + "HOURS": 6, + "DAYS": 7, +} + +func (x TimeUnit) Enum() *TimeUnit { + p := new(TimeUnit) + *p = x + return p +} +func (x TimeUnit) String() string { + return proto.EnumName(TimeUnit_name, int32(x)) +} +func (x *TimeUnit) UnmarshalJSON(data []byte) error { + value, err := proto.UnmarshalJSONEnum(TimeUnit_value, data, "TimeUnit") + if err != nil { + return err + } + *x = TimeUnit(value) + return nil +} + +type RegionSpecifier_RegionSpecifierType int32 + +const ( + // ,,. + RegionSpecifier_REGION_NAME RegionSpecifier_RegionSpecifierType = 1 + // hash of ,, + RegionSpecifier_ENCODED_REGION_NAME RegionSpecifier_RegionSpecifierType = 2 +) + +var RegionSpecifier_RegionSpecifierType_name = map[int32]string{ + 1: "REGION_NAME", + 2: "ENCODED_REGION_NAME", +} +var RegionSpecifier_RegionSpecifierType_value = map[string]int32{ + "REGION_NAME": 1, + "ENCODED_REGION_NAME": 2, +} + +func (x RegionSpecifier_RegionSpecifierType) Enum() *RegionSpecifier_RegionSpecifierType { + p := new(RegionSpecifier_RegionSpecifierType) + *p = x + return p +} +func (x RegionSpecifier_RegionSpecifierType) String() string { + return proto.EnumName(RegionSpecifier_RegionSpecifierType_name, int32(x)) +} +func (x *RegionSpecifier_RegionSpecifierType) UnmarshalJSON(data []byte) error { + value, err := proto.UnmarshalJSONEnum(RegionSpecifier_RegionSpecifierType_value, data, "RegionSpecifier_RegionSpecifierType") + if err != nil { + return err + } + *x = RegionSpecifier_RegionSpecifierType(value) + return nil +} + +type SnapshotDescription_Type int32 + +const ( + SnapshotDescription_DISABLED SnapshotDescription_Type = 0 + SnapshotDescription_FLUSH SnapshotDescription_Type = 1 + SnapshotDescription_SKIPFLUSH SnapshotDescription_Type = 2 +) + +var SnapshotDescription_Type_name = map[int32]string{ + 0: "DISABLED", + 1: "FLUSH", + 2: "SKIPFLUSH", +} +var SnapshotDescription_Type_value = map[string]int32{ + "DISABLED": 0, + "FLUSH": 1, + "SKIPFLUSH": 2, +} + +func (x SnapshotDescription_Type) Enum() *SnapshotDescription_Type { + p := new(SnapshotDescription_Type) + *p = x + return p +} +func (x SnapshotDescription_Type) String() string { + return proto.EnumName(SnapshotDescription_Type_name, int32(x)) +} +func (x *SnapshotDescription_Type) UnmarshalJSON(data []byte) error { + value, err := proto.UnmarshalJSONEnum(SnapshotDescription_Type_value, data, "SnapshotDescription_Type") + if err != nil { + return err + } + *x = SnapshotDescription_Type(value) + return nil +} + +// * +// Table Name +type TableName struct { + Namespace []byte `protobuf:"bytes,1,req,name=namespace" json:"namespace,omitempty"` + Qualifier []byte `protobuf:"bytes,2,req,name=qualifier" json:"qualifier,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *TableName) Reset() { *m = TableName{} } +func (m *TableName) String() string { return proto.CompactTextString(m) } +func (*TableName) ProtoMessage() {} + +func (m *TableName) GetNamespace() []byte { + if m != nil { + return m.Namespace + } + return nil +} + +func (m *TableName) GetQualifier() []byte { + if m != nil { + return m.Qualifier + } + return nil +} + +// * +// Table Schema +// Inspired by the rest TableSchema +type TableSchema struct { + TableName *TableName `protobuf:"bytes,1,opt,name=table_name" json:"table_name,omitempty"` + Attributes []*BytesBytesPair `protobuf:"bytes,2,rep,name=attributes" json:"attributes,omitempty"` + ColumnFamilies []*ColumnFamilySchema `protobuf:"bytes,3,rep,name=column_families" json:"column_families,omitempty"` + Configuration []*NameStringPair `protobuf:"bytes,4,rep,name=configuration" json:"configuration,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *TableSchema) Reset() { *m = TableSchema{} } +func (m *TableSchema) String() string { return proto.CompactTextString(m) } +func (*TableSchema) ProtoMessage() {} + +func (m *TableSchema) GetTableName() *TableName { + if m != nil { + return m.TableName + } + return nil +} + +func (m *TableSchema) GetAttributes() []*BytesBytesPair { + if m != nil { + return m.Attributes + } + return nil +} + +func (m *TableSchema) GetColumnFamilies() []*ColumnFamilySchema { + if m != nil { + return m.ColumnFamilies + } + return nil +} + +func (m *TableSchema) GetConfiguration() []*NameStringPair { + if m != nil { + return m.Configuration + } + return nil +} + +// * +// Column Family Schema +// Inspired by the rest ColumSchemaMessage +type ColumnFamilySchema struct { + Name []byte `protobuf:"bytes,1,req,name=name" json:"name,omitempty"` + Attributes []*BytesBytesPair `protobuf:"bytes,2,rep,name=attributes" json:"attributes,omitempty"` + Configuration []*NameStringPair `protobuf:"bytes,3,rep,name=configuration" json:"configuration,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *ColumnFamilySchema) Reset() { *m = ColumnFamilySchema{} } +func (m *ColumnFamilySchema) String() string { return proto.CompactTextString(m) } +func (*ColumnFamilySchema) ProtoMessage() {} + +func (m *ColumnFamilySchema) GetName() []byte { + if m != nil { + return m.Name + } + return nil +} + +func (m *ColumnFamilySchema) GetAttributes() []*BytesBytesPair { + if m != nil { + return m.Attributes + } + return nil +} + +func (m *ColumnFamilySchema) GetConfiguration() []*NameStringPair { + if m != nil { + return m.Configuration + } + return nil +} + +// * +// Protocol buffer version of HRegionInfo. +type RegionInfo struct { + RegionId *uint64 `protobuf:"varint,1,req,name=region_id" json:"region_id,omitempty"` + TableName *TableName `protobuf:"bytes,2,req,name=table_name" json:"table_name,omitempty"` + StartKey []byte `protobuf:"bytes,3,opt,name=start_key" json:"start_key,omitempty"` + EndKey []byte `protobuf:"bytes,4,opt,name=end_key" json:"end_key,omitempty"` + Offline *bool `protobuf:"varint,5,opt,name=offline" json:"offline,omitempty"` + Split *bool `protobuf:"varint,6,opt,name=split" json:"split,omitempty"` + ReplicaId *int32 `protobuf:"varint,7,opt,name=replica_id,def=0" json:"replica_id,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *RegionInfo) Reset() { *m = RegionInfo{} } +func (m *RegionInfo) String() string { return proto.CompactTextString(m) } +func (*RegionInfo) ProtoMessage() {} + +const Default_RegionInfo_ReplicaId int32 = 0 + +func (m *RegionInfo) GetRegionId() uint64 { + if m != nil && m.RegionId != nil { + return *m.RegionId + } + return 0 +} + +func (m *RegionInfo) GetTableName() *TableName { + if m != nil { + return m.TableName + } + return nil +} + +func (m *RegionInfo) GetStartKey() []byte { + if m != nil { + return m.StartKey + } + return nil +} + +func (m *RegionInfo) GetEndKey() []byte { + if m != nil { + return m.EndKey + } + return nil +} + +func (m *RegionInfo) GetOffline() bool { + if m != nil && m.Offline != nil { + return *m.Offline + } + return false +} + +func (m *RegionInfo) GetSplit() bool { + if m != nil && m.Split != nil { + return *m.Split + } + return false +} + +func (m *RegionInfo) GetReplicaId() int32 { + if m != nil && m.ReplicaId != nil { + return *m.ReplicaId + } + return Default_RegionInfo_ReplicaId +} + +// * +// Protocol buffer for favored nodes +type FavoredNodes struct { + FavoredNode []*ServerName `protobuf:"bytes,1,rep,name=favored_node" json:"favored_node,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *FavoredNodes) Reset() { *m = FavoredNodes{} } +func (m *FavoredNodes) String() string { return proto.CompactTextString(m) } +func (*FavoredNodes) ProtoMessage() {} + +func (m *FavoredNodes) GetFavoredNode() []*ServerName { + if m != nil { + return m.FavoredNode + } + return nil +} + +// * +// Container protocol buffer to specify a region. +// You can specify region by region name, or the hash +// of the region name, which is known as encoded +// region name. +type RegionSpecifier struct { + Type *RegionSpecifier_RegionSpecifierType `protobuf:"varint,1,req,name=type,enum=pb.RegionSpecifier_RegionSpecifierType" json:"type,omitempty"` + Value []byte `protobuf:"bytes,2,req,name=value" json:"value,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *RegionSpecifier) Reset() { *m = RegionSpecifier{} } +func (m *RegionSpecifier) String() string { return proto.CompactTextString(m) } +func (*RegionSpecifier) ProtoMessage() {} + +func (m *RegionSpecifier) GetType() RegionSpecifier_RegionSpecifierType { + if m != nil && m.Type != nil { + return *m.Type + } + return RegionSpecifier_REGION_NAME +} + +func (m *RegionSpecifier) GetValue() []byte { + if m != nil { + return m.Value + } + return nil +} + +// * +// A range of time. Both from and to are Java time +// stamp in milliseconds. If you don't specify a time +// range, it means all time. By default, if not +// specified, from = 0, and to = Long.MAX_VALUE +type TimeRange struct { + From *uint64 `protobuf:"varint,1,opt,name=from" json:"from,omitempty"` + To *uint64 `protobuf:"varint,2,opt,name=to" json:"to,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *TimeRange) Reset() { *m = TimeRange{} } +func (m *TimeRange) String() string { return proto.CompactTextString(m) } +func (*TimeRange) ProtoMessage() {} + +func (m *TimeRange) GetFrom() uint64 { + if m != nil && m.From != nil { + return *m.From + } + return 0 +} + +func (m *TimeRange) GetTo() uint64 { + if m != nil && m.To != nil { + return *m.To + } + return 0 +} + +// * +// Protocol buffer version of ServerName +type ServerName struct { + HostName *string `protobuf:"bytes,1,req,name=host_name" json:"host_name,omitempty"` + Port *uint32 `protobuf:"varint,2,opt,name=port" json:"port,omitempty"` + StartCode *uint64 `protobuf:"varint,3,opt,name=start_code" json:"start_code,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *ServerName) Reset() { *m = ServerName{} } +func (m *ServerName) String() string { return proto.CompactTextString(m) } +func (*ServerName) ProtoMessage() {} + +func (m *ServerName) GetHostName() string { + if m != nil && m.HostName != nil { + return *m.HostName + } + return "" +} + +func (m *ServerName) GetPort() uint32 { + if m != nil && m.Port != nil { + return *m.Port + } + return 0 +} + +func (m *ServerName) GetStartCode() uint64 { + if m != nil && m.StartCode != nil { + return *m.StartCode + } + return 0 +} + +type Coprocessor struct { + Name *string `protobuf:"bytes,1,req,name=name" json:"name,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *Coprocessor) Reset() { *m = Coprocessor{} } +func (m *Coprocessor) String() string { return proto.CompactTextString(m) } +func (*Coprocessor) ProtoMessage() {} + +func (m *Coprocessor) GetName() string { + if m != nil && m.Name != nil { + return *m.Name + } + return "" +} + +type NameStringPair struct { + Name *string `protobuf:"bytes,1,req,name=name" json:"name,omitempty"` + Value *string `protobuf:"bytes,2,req,name=value" json:"value,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *NameStringPair) Reset() { *m = NameStringPair{} } +func (m *NameStringPair) String() string { return proto.CompactTextString(m) } +func (*NameStringPair) ProtoMessage() {} + +func (m *NameStringPair) GetName() string { + if m != nil && m.Name != nil { + return *m.Name + } + return "" +} + +func (m *NameStringPair) GetValue() string { + if m != nil && m.Value != nil { + return *m.Value + } + return "" +} + +type NameBytesPair struct { + Name *string `protobuf:"bytes,1,req,name=name" json:"name,omitempty"` + Value []byte `protobuf:"bytes,2,opt,name=value" json:"value,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *NameBytesPair) Reset() { *m = NameBytesPair{} } +func (m *NameBytesPair) String() string { return proto.CompactTextString(m) } +func (*NameBytesPair) ProtoMessage() {} + +func (m *NameBytesPair) GetName() string { + if m != nil && m.Name != nil { + return *m.Name + } + return "" +} + +func (m *NameBytesPair) GetValue() []byte { + if m != nil { + return m.Value + } + return nil +} + +type BytesBytesPair struct { + First []byte `protobuf:"bytes,1,req,name=first" json:"first,omitempty"` + Second []byte `protobuf:"bytes,2,req,name=second" json:"second,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *BytesBytesPair) Reset() { *m = BytesBytesPair{} } +func (m *BytesBytesPair) String() string { return proto.CompactTextString(m) } +func (*BytesBytesPair) ProtoMessage() {} + +func (m *BytesBytesPair) GetFirst() []byte { + if m != nil { + return m.First + } + return nil +} + +func (m *BytesBytesPair) GetSecond() []byte { + if m != nil { + return m.Second + } + return nil +} + +type NameInt64Pair struct { + Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` + Value *int64 `protobuf:"varint,2,opt,name=value" json:"value,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *NameInt64Pair) Reset() { *m = NameInt64Pair{} } +func (m *NameInt64Pair) String() string { return proto.CompactTextString(m) } +func (*NameInt64Pair) ProtoMessage() {} + +func (m *NameInt64Pair) GetName() string { + if m != nil && m.Name != nil { + return *m.Name + } + return "" +} + +func (m *NameInt64Pair) GetValue() int64 { + if m != nil && m.Value != nil { + return *m.Value + } + return 0 +} + +// * +// Description of the snapshot to take +type SnapshotDescription struct { + Name *string `protobuf:"bytes,1,req,name=name" json:"name,omitempty"` + Table *string `protobuf:"bytes,2,opt,name=table" json:"table,omitempty"` + CreationTime *int64 `protobuf:"varint,3,opt,name=creation_time,def=0" json:"creation_time,omitempty"` + Type *SnapshotDescription_Type `protobuf:"varint,4,opt,name=type,enum=pb.SnapshotDescription_Type,def=1" json:"type,omitempty"` + Version *int32 `protobuf:"varint,5,opt,name=version" json:"version,omitempty"` + Owner *string `protobuf:"bytes,6,opt,name=owner" json:"owner,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *SnapshotDescription) Reset() { *m = SnapshotDescription{} } +func (m *SnapshotDescription) String() string { return proto.CompactTextString(m) } +func (*SnapshotDescription) ProtoMessage() {} + +const Default_SnapshotDescription_CreationTime int64 = 0 +const Default_SnapshotDescription_Type SnapshotDescription_Type = SnapshotDescription_FLUSH + +func (m *SnapshotDescription) GetName() string { + if m != nil && m.Name != nil { + return *m.Name + } + return "" +} + +func (m *SnapshotDescription) GetTable() string { + if m != nil && m.Table != nil { + return *m.Table + } + return "" +} + +func (m *SnapshotDescription) GetCreationTime() int64 { + if m != nil && m.CreationTime != nil { + return *m.CreationTime + } + return Default_SnapshotDescription_CreationTime +} + +func (m *SnapshotDescription) GetType() SnapshotDescription_Type { + if m != nil && m.Type != nil { + return *m.Type + } + return Default_SnapshotDescription_Type +} + +func (m *SnapshotDescription) GetVersion() int32 { + if m != nil && m.Version != nil { + return *m.Version + } + return 0 +} + +func (m *SnapshotDescription) GetOwner() string { + if m != nil && m.Owner != nil { + return *m.Owner + } + return "" +} + +// * +// Description of the distributed procedure to take +type ProcedureDescription struct { + Signature *string `protobuf:"bytes,1,req,name=signature" json:"signature,omitempty"` + Instance *string `protobuf:"bytes,2,opt,name=instance" json:"instance,omitempty"` + CreationTime *int64 `protobuf:"varint,3,opt,name=creation_time,def=0" json:"creation_time,omitempty"` + Configuration []*NameStringPair `protobuf:"bytes,4,rep,name=configuration" json:"configuration,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *ProcedureDescription) Reset() { *m = ProcedureDescription{} } +func (m *ProcedureDescription) String() string { return proto.CompactTextString(m) } +func (*ProcedureDescription) ProtoMessage() {} + +const Default_ProcedureDescription_CreationTime int64 = 0 + +func (m *ProcedureDescription) GetSignature() string { + if m != nil && m.Signature != nil { + return *m.Signature + } + return "" +} + +func (m *ProcedureDescription) GetInstance() string { + if m != nil && m.Instance != nil { + return *m.Instance + } + return "" +} + +func (m *ProcedureDescription) GetCreationTime() int64 { + if m != nil && m.CreationTime != nil { + return *m.CreationTime + } + return Default_ProcedureDescription_CreationTime +} + +func (m *ProcedureDescription) GetConfiguration() []*NameStringPair { + if m != nil { + return m.Configuration + } + return nil +} + +type EmptyMsg struct { + XXX_unrecognized []byte `json:"-"` +} + +func (m *EmptyMsg) Reset() { *m = EmptyMsg{} } +func (m *EmptyMsg) String() string { return proto.CompactTextString(m) } +func (*EmptyMsg) ProtoMessage() {} + +type LongMsg struct { + LongMsg *int64 `protobuf:"varint,1,req,name=long_msg" json:"long_msg,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *LongMsg) Reset() { *m = LongMsg{} } +func (m *LongMsg) String() string { return proto.CompactTextString(m) } +func (*LongMsg) ProtoMessage() {} + +func (m *LongMsg) GetLongMsg() int64 { + if m != nil && m.LongMsg != nil { + return *m.LongMsg + } + return 0 +} + +type DoubleMsg struct { + DoubleMsg *float64 `protobuf:"fixed64,1,req,name=double_msg" json:"double_msg,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *DoubleMsg) Reset() { *m = DoubleMsg{} } +func (m *DoubleMsg) String() string { return proto.CompactTextString(m) } +func (*DoubleMsg) ProtoMessage() {} + +func (m *DoubleMsg) GetDoubleMsg() float64 { + if m != nil && m.DoubleMsg != nil { + return *m.DoubleMsg + } + return 0 +} + +type BigDecimalMsg struct { + BigdecimalMsg []byte `protobuf:"bytes,1,req,name=bigdecimal_msg" json:"bigdecimal_msg,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *BigDecimalMsg) Reset() { *m = BigDecimalMsg{} } +func (m *BigDecimalMsg) String() string { return proto.CompactTextString(m) } +func (*BigDecimalMsg) ProtoMessage() {} + +func (m *BigDecimalMsg) GetBigdecimalMsg() []byte { + if m != nil { + return m.BigdecimalMsg + } + return nil +} + +type UUID struct { + LeastSigBits *uint64 `protobuf:"varint,1,req,name=least_sig_bits" json:"least_sig_bits,omitempty"` + MostSigBits *uint64 `protobuf:"varint,2,req,name=most_sig_bits" json:"most_sig_bits,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *UUID) Reset() { *m = UUID{} } +func (m *UUID) String() string { return proto.CompactTextString(m) } +func (*UUID) ProtoMessage() {} + +func (m *UUID) GetLeastSigBits() uint64 { + if m != nil && m.LeastSigBits != nil { + return *m.LeastSigBits + } + return 0 +} + +func (m *UUID) GetMostSigBits() uint64 { + if m != nil && m.MostSigBits != nil { + return *m.MostSigBits + } + return 0 +} + +type NamespaceDescriptor struct { + Name []byte `protobuf:"bytes,1,req,name=name" json:"name,omitempty"` + Configuration []*NameStringPair `protobuf:"bytes,2,rep,name=configuration" json:"configuration,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *NamespaceDescriptor) Reset() { *m = NamespaceDescriptor{} } +func (m *NamespaceDescriptor) String() string { return proto.CompactTextString(m) } +func (*NamespaceDescriptor) ProtoMessage() {} + +func (m *NamespaceDescriptor) GetName() []byte { + if m != nil { + return m.Name + } + return nil +} + +func (m *NamespaceDescriptor) GetConfiguration() []*NameStringPair { + if m != nil { + return m.Configuration + } + return nil +} + +// * +// Description of the region server info +type RegionServerInfo struct { + InfoPort *int32 `protobuf:"varint,1,opt,name=infoPort" json:"infoPort,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *RegionServerInfo) Reset() { *m = RegionServerInfo{} } +func (m *RegionServerInfo) String() string { return proto.CompactTextString(m) } +func (*RegionServerInfo) ProtoMessage() {} + +func (m *RegionServerInfo) GetInfoPort() int32 { + if m != nil && m.InfoPort != nil { + return *m.InfoPort + } + return 0 +} + +func init() { + proto.RegisterEnum("pb.CompareType", CompareType_name, CompareType_value) + proto.RegisterEnum("pb.TimeUnit", TimeUnit_name, TimeUnit_value) + proto.RegisterEnum("pb.RegionSpecifier_RegionSpecifierType", RegionSpecifier_RegionSpecifierType_name, RegionSpecifier_RegionSpecifierType_value) + proto.RegisterEnum("pb.SnapshotDescription_Type", SnapshotDescription_Type_name, SnapshotDescription_Type_value) +} diff --git a/libs/gohbase/pb/HBase.proto b/libs/gohbase/pb/HBase.proto new file mode 100644 index 0000000..21cc2c6 --- /dev/null +++ b/libs/gohbase/pb/HBase.proto @@ -0,0 +1,221 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +// This file contains protocol buffers that are shared throughout HBase + +package pb; +option java_package = "org.apache.hadoop.hbase.protobuf.generated"; +option java_outer_classname = "HBaseProtos"; +option java_generate_equals_and_hash = true; +option optimize_for = SPEED; + +import "Cell.proto"; + +/** + * Table Name + */ +message TableName { + required bytes namespace = 1; + required bytes qualifier = 2; +} + +/** + * Table Schema + * Inspired by the rest TableSchema + */ +message TableSchema { + optional TableName table_name = 1; + repeated BytesBytesPair attributes = 2; + repeated ColumnFamilySchema column_families = 3; + repeated NameStringPair configuration = 4; +} + +/** + * Column Family Schema + * Inspired by the rest ColumSchemaMessage + */ +message ColumnFamilySchema { + required bytes name = 1; + repeated BytesBytesPair attributes = 2; + repeated NameStringPair configuration = 3; +} + +/** + * Protocol buffer version of HRegionInfo. + */ +message RegionInfo { + required uint64 region_id = 1; + required TableName table_name = 2; + optional bytes start_key = 3; + optional bytes end_key = 4; + optional bool offline = 5; + optional bool split = 6; + optional int32 replica_id = 7 [default = 0]; +} + +/** + * Protocol buffer for favored nodes + */ +message FavoredNodes { + repeated ServerName favored_node = 1; +} + +/** + * Container protocol buffer to specify a region. + * You can specify region by region name, or the hash + * of the region name, which is known as encoded + * region name. + */ +message RegionSpecifier { + required RegionSpecifierType type = 1; + required bytes value = 2; + + enum RegionSpecifierType { + // ,,. + REGION_NAME = 1; + + // hash of ,, + ENCODED_REGION_NAME = 2; + } +} + +/** + * A range of time. Both from and to are Java time + * stamp in milliseconds. If you don't specify a time + * range, it means all time. By default, if not + * specified, from = 0, and to = Long.MAX_VALUE + */ +message TimeRange { + optional uint64 from = 1; + optional uint64 to = 2; +} + +/* Comparison operators */ +enum CompareType { + LESS = 0; + LESS_OR_EQUAL = 1; + EQUAL = 2; + NOT_EQUAL = 3; + GREATER_OR_EQUAL = 4; + GREATER = 5; + NO_OP = 6; +} + +/** + * Protocol buffer version of ServerName + */ +message ServerName { + required string host_name = 1; + optional uint32 port = 2; + optional uint64 start_code = 3; +} + +// Comment data structures + +message Coprocessor { + required string name = 1; +} + +message NameStringPair { + required string name = 1; + required string value = 2; +} + +message NameBytesPair { + required string name = 1; + optional bytes value = 2; +} + +message BytesBytesPair { + required bytes first = 1; + required bytes second = 2; +} + +message NameInt64Pair { + optional string name = 1; + optional int64 value = 2; +} + +/** + * Description of the snapshot to take + */ +message SnapshotDescription { + required string name = 1; + optional string table = 2; // not needed for delete, but checked for in taking snapshot + optional int64 creation_time = 3 [default = 0]; + enum Type { + DISABLED = 0; + FLUSH = 1; + SKIPFLUSH = 2; + } + optional Type type = 4 [default = FLUSH]; + optional int32 version = 5; + optional string owner = 6; +} + +/** + * Description of the distributed procedure to take + */ +message ProcedureDescription { + required string signature = 1; // the unique signature of the procedure + optional string instance = 2; // the procedure instance name + optional int64 creation_time = 3 [default = 0]; + repeated NameStringPair configuration = 4; +} + +message EmptyMsg { +} + +enum TimeUnit { + NANOSECONDS = 1; + MICROSECONDS = 2; + MILLISECONDS = 3; + SECONDS = 4; + MINUTES = 5; + HOURS = 6; + DAYS = 7; +} + +message LongMsg { + required int64 long_msg = 1; +} + +message DoubleMsg { + required double double_msg = 1; +} + +message BigDecimalMsg { + required bytes bigdecimal_msg = 1; +} + +message UUID { + required uint64 least_sig_bits = 1; + required uint64 most_sig_bits = 2; +} + +message NamespaceDescriptor { + required bytes name = 1; + repeated NameStringPair configuration = 2; +} + +/** + * Description of the region server info + */ +message RegionServerInfo { + optional int32 infoPort = 1; +} diff --git a/libs/gohbase/pb/Master.pb.go b/libs/gohbase/pb/Master.pb.go new file mode 100644 index 0000000..5efb491 --- /dev/null +++ b/libs/gohbase/pb/Master.pb.go @@ -0,0 +1,1550 @@ +// Code generated by protoc-gen-go. +// source: Master.proto +// DO NOT EDIT! + +package pb + +import proto "github.com/golang/protobuf/proto" +import math "math" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = math.Inf + +type GetProcedureResultResponse_State int32 + +const ( + GetProcedureResultResponse_NOT_FOUND GetProcedureResultResponse_State = 0 + GetProcedureResultResponse_RUNNING GetProcedureResultResponse_State = 1 + GetProcedureResultResponse_FINISHED GetProcedureResultResponse_State = 2 +) + +var GetProcedureResultResponse_State_name = map[int32]string{ + 0: "NOT_FOUND", + 1: "RUNNING", + 2: "FINISHED", +} +var GetProcedureResultResponse_State_value = map[string]int32{ + "NOT_FOUND": 0, + "RUNNING": 1, + "FINISHED": 2, +} + +func (x GetProcedureResultResponse_State) Enum() *GetProcedureResultResponse_State { + p := new(GetProcedureResultResponse_State) + *p = x + return p +} +func (x GetProcedureResultResponse_State) String() string { + return proto.EnumName(GetProcedureResultResponse_State_name, int32(x)) +} +func (x *GetProcedureResultResponse_State) UnmarshalJSON(data []byte) error { + value, err := proto.UnmarshalJSONEnum(GetProcedureResultResponse_State_value, data, "GetProcedureResultResponse_State") + if err != nil { + return err + } + *x = GetProcedureResultResponse_State(value) + return nil +} + +type AddColumnRequest struct { + TableName *TableName `protobuf:"bytes,1,req,name=table_name" json:"table_name,omitempty"` + ColumnFamilies *ColumnFamilySchema `protobuf:"bytes,2,req,name=column_families" json:"column_families,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *AddColumnRequest) Reset() { *m = AddColumnRequest{} } +func (m *AddColumnRequest) String() string { return proto.CompactTextString(m) } +func (*AddColumnRequest) ProtoMessage() {} + +func (m *AddColumnRequest) GetTableName() *TableName { + if m != nil { + return m.TableName + } + return nil +} + +func (m *AddColumnRequest) GetColumnFamilies() *ColumnFamilySchema { + if m != nil { + return m.ColumnFamilies + } + return nil +} + +type AddColumnResponse struct { + XXX_unrecognized []byte `json:"-"` +} + +func (m *AddColumnResponse) Reset() { *m = AddColumnResponse{} } +func (m *AddColumnResponse) String() string { return proto.CompactTextString(m) } +func (*AddColumnResponse) ProtoMessage() {} + +type DeleteColumnRequest struct { + TableName *TableName `protobuf:"bytes,1,req,name=table_name" json:"table_name,omitempty"` + ColumnName []byte `protobuf:"bytes,2,req,name=column_name" json:"column_name,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *DeleteColumnRequest) Reset() { *m = DeleteColumnRequest{} } +func (m *DeleteColumnRequest) String() string { return proto.CompactTextString(m) } +func (*DeleteColumnRequest) ProtoMessage() {} + +func (m *DeleteColumnRequest) GetTableName() *TableName { + if m != nil { + return m.TableName + } + return nil +} + +func (m *DeleteColumnRequest) GetColumnName() []byte { + if m != nil { + return m.ColumnName + } + return nil +} + +type DeleteColumnResponse struct { + XXX_unrecognized []byte `json:"-"` +} + +func (m *DeleteColumnResponse) Reset() { *m = DeleteColumnResponse{} } +func (m *DeleteColumnResponse) String() string { return proto.CompactTextString(m) } +func (*DeleteColumnResponse) ProtoMessage() {} + +type ModifyColumnRequest struct { + TableName *TableName `protobuf:"bytes,1,req,name=table_name" json:"table_name,omitempty"` + ColumnFamilies *ColumnFamilySchema `protobuf:"bytes,2,req,name=column_families" json:"column_families,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *ModifyColumnRequest) Reset() { *m = ModifyColumnRequest{} } +func (m *ModifyColumnRequest) String() string { return proto.CompactTextString(m) } +func (*ModifyColumnRequest) ProtoMessage() {} + +func (m *ModifyColumnRequest) GetTableName() *TableName { + if m != nil { + return m.TableName + } + return nil +} + +func (m *ModifyColumnRequest) GetColumnFamilies() *ColumnFamilySchema { + if m != nil { + return m.ColumnFamilies + } + return nil +} + +type ModifyColumnResponse struct { + XXX_unrecognized []byte `json:"-"` +} + +func (m *ModifyColumnResponse) Reset() { *m = ModifyColumnResponse{} } +func (m *ModifyColumnResponse) String() string { return proto.CompactTextString(m) } +func (*ModifyColumnResponse) ProtoMessage() {} + +type MoveRegionRequest struct { + Region *RegionSpecifier `protobuf:"bytes,1,req,name=region" json:"region,omitempty"` + DestServerName *ServerName `protobuf:"bytes,2,opt,name=dest_server_name" json:"dest_server_name,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *MoveRegionRequest) Reset() { *m = MoveRegionRequest{} } +func (m *MoveRegionRequest) String() string { return proto.CompactTextString(m) } +func (*MoveRegionRequest) ProtoMessage() {} + +func (m *MoveRegionRequest) GetRegion() *RegionSpecifier { + if m != nil { + return m.Region + } + return nil +} + +func (m *MoveRegionRequest) GetDestServerName() *ServerName { + if m != nil { + return m.DestServerName + } + return nil +} + +type MoveRegionResponse struct { + XXX_unrecognized []byte `json:"-"` +} + +func (m *MoveRegionResponse) Reset() { *m = MoveRegionResponse{} } +func (m *MoveRegionResponse) String() string { return proto.CompactTextString(m) } +func (*MoveRegionResponse) ProtoMessage() {} + +// * +// Dispatch merging the specified regions. +type DispatchMergingRegionsRequest struct { + RegionA *RegionSpecifier `protobuf:"bytes,1,req,name=region_a" json:"region_a,omitempty"` + RegionB *RegionSpecifier `protobuf:"bytes,2,req,name=region_b" json:"region_b,omitempty"` + Forcible *bool `protobuf:"varint,3,opt,name=forcible,def=0" json:"forcible,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *DispatchMergingRegionsRequest) Reset() { *m = DispatchMergingRegionsRequest{} } +func (m *DispatchMergingRegionsRequest) String() string { return proto.CompactTextString(m) } +func (*DispatchMergingRegionsRequest) ProtoMessage() {} + +const Default_DispatchMergingRegionsRequest_Forcible bool = false + +func (m *DispatchMergingRegionsRequest) GetRegionA() *RegionSpecifier { + if m != nil { + return m.RegionA + } + return nil +} + +func (m *DispatchMergingRegionsRequest) GetRegionB() *RegionSpecifier { + if m != nil { + return m.RegionB + } + return nil +} + +func (m *DispatchMergingRegionsRequest) GetForcible() bool { + if m != nil && m.Forcible != nil { + return *m.Forcible + } + return Default_DispatchMergingRegionsRequest_Forcible +} + +type DispatchMergingRegionsResponse struct { + XXX_unrecognized []byte `json:"-"` +} + +func (m *DispatchMergingRegionsResponse) Reset() { *m = DispatchMergingRegionsResponse{} } +func (m *DispatchMergingRegionsResponse) String() string { return proto.CompactTextString(m) } +func (*DispatchMergingRegionsResponse) ProtoMessage() {} + +type AssignRegionRequest struct { + Region *RegionSpecifier `protobuf:"bytes,1,req,name=region" json:"region,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *AssignRegionRequest) Reset() { *m = AssignRegionRequest{} } +func (m *AssignRegionRequest) String() string { return proto.CompactTextString(m) } +func (*AssignRegionRequest) ProtoMessage() {} + +func (m *AssignRegionRequest) GetRegion() *RegionSpecifier { + if m != nil { + return m.Region + } + return nil +} + +type AssignRegionResponse struct { + XXX_unrecognized []byte `json:"-"` +} + +func (m *AssignRegionResponse) Reset() { *m = AssignRegionResponse{} } +func (m *AssignRegionResponse) String() string { return proto.CompactTextString(m) } +func (*AssignRegionResponse) ProtoMessage() {} + +type UnassignRegionRequest struct { + Region *RegionSpecifier `protobuf:"bytes,1,req,name=region" json:"region,omitempty"` + Force *bool `protobuf:"varint,2,opt,name=force,def=0" json:"force,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *UnassignRegionRequest) Reset() { *m = UnassignRegionRequest{} } +func (m *UnassignRegionRequest) String() string { return proto.CompactTextString(m) } +func (*UnassignRegionRequest) ProtoMessage() {} + +const Default_UnassignRegionRequest_Force bool = false + +func (m *UnassignRegionRequest) GetRegion() *RegionSpecifier { + if m != nil { + return m.Region + } + return nil +} + +func (m *UnassignRegionRequest) GetForce() bool { + if m != nil && m.Force != nil { + return *m.Force + } + return Default_UnassignRegionRequest_Force +} + +type UnassignRegionResponse struct { + XXX_unrecognized []byte `json:"-"` +} + +func (m *UnassignRegionResponse) Reset() { *m = UnassignRegionResponse{} } +func (m *UnassignRegionResponse) String() string { return proto.CompactTextString(m) } +func (*UnassignRegionResponse) ProtoMessage() {} + +type OfflineRegionRequest struct { + Region *RegionSpecifier `protobuf:"bytes,1,req,name=region" json:"region,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *OfflineRegionRequest) Reset() { *m = OfflineRegionRequest{} } +func (m *OfflineRegionRequest) String() string { return proto.CompactTextString(m) } +func (*OfflineRegionRequest) ProtoMessage() {} + +func (m *OfflineRegionRequest) GetRegion() *RegionSpecifier { + if m != nil { + return m.Region + } + return nil +} + +type OfflineRegionResponse struct { + XXX_unrecognized []byte `json:"-"` +} + +func (m *OfflineRegionResponse) Reset() { *m = OfflineRegionResponse{} } +func (m *OfflineRegionResponse) String() string { return proto.CompactTextString(m) } +func (*OfflineRegionResponse) ProtoMessage() {} + +type CreateTableRequest struct { + TableSchema *TableSchema `protobuf:"bytes,1,req,name=table_schema" json:"table_schema,omitempty"` + SplitKeys [][]byte `protobuf:"bytes,2,rep,name=split_keys" json:"split_keys,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *CreateTableRequest) Reset() { *m = CreateTableRequest{} } +func (m *CreateTableRequest) String() string { return proto.CompactTextString(m) } +func (*CreateTableRequest) ProtoMessage() {} + +func (m *CreateTableRequest) GetTableSchema() *TableSchema { + if m != nil { + return m.TableSchema + } + return nil +} + +func (m *CreateTableRequest) GetSplitKeys() [][]byte { + if m != nil { + return m.SplitKeys + } + return nil +} + +type CreateTableResponse struct { + ProcId *uint64 `protobuf:"varint,1,opt,name=proc_id" json:"proc_id,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *CreateTableResponse) Reset() { *m = CreateTableResponse{} } +func (m *CreateTableResponse) String() string { return proto.CompactTextString(m) } +func (*CreateTableResponse) ProtoMessage() {} + +func (m *CreateTableResponse) GetProcId() uint64 { + if m != nil && m.ProcId != nil { + return *m.ProcId + } + return 0 +} + +type DeleteTableRequest struct { + TableName *TableName `protobuf:"bytes,1,req,name=table_name" json:"table_name,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *DeleteTableRequest) Reset() { *m = DeleteTableRequest{} } +func (m *DeleteTableRequest) String() string { return proto.CompactTextString(m) } +func (*DeleteTableRequest) ProtoMessage() {} + +func (m *DeleteTableRequest) GetTableName() *TableName { + if m != nil { + return m.TableName + } + return nil +} + +type DeleteTableResponse struct { + ProcId *uint64 `protobuf:"varint,1,opt,name=proc_id" json:"proc_id,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *DeleteTableResponse) Reset() { *m = DeleteTableResponse{} } +func (m *DeleteTableResponse) String() string { return proto.CompactTextString(m) } +func (*DeleteTableResponse) ProtoMessage() {} + +func (m *DeleteTableResponse) GetProcId() uint64 { + if m != nil && m.ProcId != nil { + return *m.ProcId + } + return 0 +} + +type TruncateTableRequest struct { + TableName *TableName `protobuf:"bytes,1,req,name=tableName" json:"tableName,omitempty"` + PreserveSplits *bool `protobuf:"varint,2,opt,name=preserveSplits,def=0" json:"preserveSplits,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *TruncateTableRequest) Reset() { *m = TruncateTableRequest{} } +func (m *TruncateTableRequest) String() string { return proto.CompactTextString(m) } +func (*TruncateTableRequest) ProtoMessage() {} + +const Default_TruncateTableRequest_PreserveSplits bool = false + +func (m *TruncateTableRequest) GetTableName() *TableName { + if m != nil { + return m.TableName + } + return nil +} + +func (m *TruncateTableRequest) GetPreserveSplits() bool { + if m != nil && m.PreserveSplits != nil { + return *m.PreserveSplits + } + return Default_TruncateTableRequest_PreserveSplits +} + +type TruncateTableResponse struct { + XXX_unrecognized []byte `json:"-"` +} + +func (m *TruncateTableResponse) Reset() { *m = TruncateTableResponse{} } +func (m *TruncateTableResponse) String() string { return proto.CompactTextString(m) } +func (*TruncateTableResponse) ProtoMessage() {} + +type EnableTableRequest struct { + TableName *TableName `protobuf:"bytes,1,req,name=table_name" json:"table_name,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *EnableTableRequest) Reset() { *m = EnableTableRequest{} } +func (m *EnableTableRequest) String() string { return proto.CompactTextString(m) } +func (*EnableTableRequest) ProtoMessage() {} + +func (m *EnableTableRequest) GetTableName() *TableName { + if m != nil { + return m.TableName + } + return nil +} + +type EnableTableResponse struct { + ProcId *uint64 `protobuf:"varint,1,opt,name=proc_id" json:"proc_id,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *EnableTableResponse) Reset() { *m = EnableTableResponse{} } +func (m *EnableTableResponse) String() string { return proto.CompactTextString(m) } +func (*EnableTableResponse) ProtoMessage() {} + +func (m *EnableTableResponse) GetProcId() uint64 { + if m != nil && m.ProcId != nil { + return *m.ProcId + } + return 0 +} + +type DisableTableRequest struct { + TableName *TableName `protobuf:"bytes,1,req,name=table_name" json:"table_name,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *DisableTableRequest) Reset() { *m = DisableTableRequest{} } +func (m *DisableTableRequest) String() string { return proto.CompactTextString(m) } +func (*DisableTableRequest) ProtoMessage() {} + +func (m *DisableTableRequest) GetTableName() *TableName { + if m != nil { + return m.TableName + } + return nil +} + +type DisableTableResponse struct { + ProcId *uint64 `protobuf:"varint,1,opt,name=proc_id" json:"proc_id,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *DisableTableResponse) Reset() { *m = DisableTableResponse{} } +func (m *DisableTableResponse) String() string { return proto.CompactTextString(m) } +func (*DisableTableResponse) ProtoMessage() {} + +func (m *DisableTableResponse) GetProcId() uint64 { + if m != nil && m.ProcId != nil { + return *m.ProcId + } + return 0 +} + +type ModifyTableRequest struct { + TableName *TableName `protobuf:"bytes,1,req,name=table_name" json:"table_name,omitempty"` + TableSchema *TableSchema `protobuf:"bytes,2,req,name=table_schema" json:"table_schema,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *ModifyTableRequest) Reset() { *m = ModifyTableRequest{} } +func (m *ModifyTableRequest) String() string { return proto.CompactTextString(m) } +func (*ModifyTableRequest) ProtoMessage() {} + +func (m *ModifyTableRequest) GetTableName() *TableName { + if m != nil { + return m.TableName + } + return nil +} + +func (m *ModifyTableRequest) GetTableSchema() *TableSchema { + if m != nil { + return m.TableSchema + } + return nil +} + +type ModifyTableResponse struct { + XXX_unrecognized []byte `json:"-"` +} + +func (m *ModifyTableResponse) Reset() { *m = ModifyTableResponse{} } +func (m *ModifyTableResponse) String() string { return proto.CompactTextString(m) } +func (*ModifyTableResponse) ProtoMessage() {} + +type CreateNamespaceRequest struct { + NamespaceDescriptor *NamespaceDescriptor `protobuf:"bytes,1,req,name=namespaceDescriptor" json:"namespaceDescriptor,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *CreateNamespaceRequest) Reset() { *m = CreateNamespaceRequest{} } +func (m *CreateNamespaceRequest) String() string { return proto.CompactTextString(m) } +func (*CreateNamespaceRequest) ProtoMessage() {} + +func (m *CreateNamespaceRequest) GetNamespaceDescriptor() *NamespaceDescriptor { + if m != nil { + return m.NamespaceDescriptor + } + return nil +} + +type CreateNamespaceResponse struct { + XXX_unrecognized []byte `json:"-"` +} + +func (m *CreateNamespaceResponse) Reset() { *m = CreateNamespaceResponse{} } +func (m *CreateNamespaceResponse) String() string { return proto.CompactTextString(m) } +func (*CreateNamespaceResponse) ProtoMessage() {} + +type DeleteNamespaceRequest struct { + NamespaceName *string `protobuf:"bytes,1,req,name=namespaceName" json:"namespaceName,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *DeleteNamespaceRequest) Reset() { *m = DeleteNamespaceRequest{} } +func (m *DeleteNamespaceRequest) String() string { return proto.CompactTextString(m) } +func (*DeleteNamespaceRequest) ProtoMessage() {} + +func (m *DeleteNamespaceRequest) GetNamespaceName() string { + if m != nil && m.NamespaceName != nil { + return *m.NamespaceName + } + return "" +} + +type DeleteNamespaceResponse struct { + XXX_unrecognized []byte `json:"-"` +} + +func (m *DeleteNamespaceResponse) Reset() { *m = DeleteNamespaceResponse{} } +func (m *DeleteNamespaceResponse) String() string { return proto.CompactTextString(m) } +func (*DeleteNamespaceResponse) ProtoMessage() {} + +type ModifyNamespaceRequest struct { + NamespaceDescriptor *NamespaceDescriptor `protobuf:"bytes,1,req,name=namespaceDescriptor" json:"namespaceDescriptor,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *ModifyNamespaceRequest) Reset() { *m = ModifyNamespaceRequest{} } +func (m *ModifyNamespaceRequest) String() string { return proto.CompactTextString(m) } +func (*ModifyNamespaceRequest) ProtoMessage() {} + +func (m *ModifyNamespaceRequest) GetNamespaceDescriptor() *NamespaceDescriptor { + if m != nil { + return m.NamespaceDescriptor + } + return nil +} + +type ModifyNamespaceResponse struct { + XXX_unrecognized []byte `json:"-"` +} + +func (m *ModifyNamespaceResponse) Reset() { *m = ModifyNamespaceResponse{} } +func (m *ModifyNamespaceResponse) String() string { return proto.CompactTextString(m) } +func (*ModifyNamespaceResponse) ProtoMessage() {} + +type GetNamespaceDescriptorRequest struct { + NamespaceName *string `protobuf:"bytes,1,req,name=namespaceName" json:"namespaceName,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *GetNamespaceDescriptorRequest) Reset() { *m = GetNamespaceDescriptorRequest{} } +func (m *GetNamespaceDescriptorRequest) String() string { return proto.CompactTextString(m) } +func (*GetNamespaceDescriptorRequest) ProtoMessage() {} + +func (m *GetNamespaceDescriptorRequest) GetNamespaceName() string { + if m != nil && m.NamespaceName != nil { + return *m.NamespaceName + } + return "" +} + +type GetNamespaceDescriptorResponse struct { + NamespaceDescriptor *NamespaceDescriptor `protobuf:"bytes,1,req,name=namespaceDescriptor" json:"namespaceDescriptor,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *GetNamespaceDescriptorResponse) Reset() { *m = GetNamespaceDescriptorResponse{} } +func (m *GetNamespaceDescriptorResponse) String() string { return proto.CompactTextString(m) } +func (*GetNamespaceDescriptorResponse) ProtoMessage() {} + +func (m *GetNamespaceDescriptorResponse) GetNamespaceDescriptor() *NamespaceDescriptor { + if m != nil { + return m.NamespaceDescriptor + } + return nil +} + +type ListNamespaceDescriptorsRequest struct { + XXX_unrecognized []byte `json:"-"` +} + +func (m *ListNamespaceDescriptorsRequest) Reset() { *m = ListNamespaceDescriptorsRequest{} } +func (m *ListNamespaceDescriptorsRequest) String() string { return proto.CompactTextString(m) } +func (*ListNamespaceDescriptorsRequest) ProtoMessage() {} + +type ListNamespaceDescriptorsResponse struct { + NamespaceDescriptor []*NamespaceDescriptor `protobuf:"bytes,1,rep,name=namespaceDescriptor" json:"namespaceDescriptor,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *ListNamespaceDescriptorsResponse) Reset() { *m = ListNamespaceDescriptorsResponse{} } +func (m *ListNamespaceDescriptorsResponse) String() string { return proto.CompactTextString(m) } +func (*ListNamespaceDescriptorsResponse) ProtoMessage() {} + +func (m *ListNamespaceDescriptorsResponse) GetNamespaceDescriptor() []*NamespaceDescriptor { + if m != nil { + return m.NamespaceDescriptor + } + return nil +} + +type ListTableDescriptorsByNamespaceRequest struct { + NamespaceName *string `protobuf:"bytes,1,req,name=namespaceName" json:"namespaceName,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *ListTableDescriptorsByNamespaceRequest) Reset() { + *m = ListTableDescriptorsByNamespaceRequest{} +} +func (m *ListTableDescriptorsByNamespaceRequest) String() string { return proto.CompactTextString(m) } +func (*ListTableDescriptorsByNamespaceRequest) ProtoMessage() {} + +func (m *ListTableDescriptorsByNamespaceRequest) GetNamespaceName() string { + if m != nil && m.NamespaceName != nil { + return *m.NamespaceName + } + return "" +} + +type ListTableDescriptorsByNamespaceResponse struct { + TableSchema []*TableSchema `protobuf:"bytes,1,rep,name=tableSchema" json:"tableSchema,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *ListTableDescriptorsByNamespaceResponse) Reset() { + *m = ListTableDescriptorsByNamespaceResponse{} +} +func (m *ListTableDescriptorsByNamespaceResponse) String() string { return proto.CompactTextString(m) } +func (*ListTableDescriptorsByNamespaceResponse) ProtoMessage() {} + +func (m *ListTableDescriptorsByNamespaceResponse) GetTableSchema() []*TableSchema { + if m != nil { + return m.TableSchema + } + return nil +} + +type ListTableNamesByNamespaceRequest struct { + NamespaceName *string `protobuf:"bytes,1,req,name=namespaceName" json:"namespaceName,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *ListTableNamesByNamespaceRequest) Reset() { *m = ListTableNamesByNamespaceRequest{} } +func (m *ListTableNamesByNamespaceRequest) String() string { return proto.CompactTextString(m) } +func (*ListTableNamesByNamespaceRequest) ProtoMessage() {} + +func (m *ListTableNamesByNamespaceRequest) GetNamespaceName() string { + if m != nil && m.NamespaceName != nil { + return *m.NamespaceName + } + return "" +} + +type ListTableNamesByNamespaceResponse struct { + TableName []*TableName `protobuf:"bytes,1,rep,name=tableName" json:"tableName,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *ListTableNamesByNamespaceResponse) Reset() { *m = ListTableNamesByNamespaceResponse{} } +func (m *ListTableNamesByNamespaceResponse) String() string { return proto.CompactTextString(m) } +func (*ListTableNamesByNamespaceResponse) ProtoMessage() {} + +func (m *ListTableNamesByNamespaceResponse) GetTableName() []*TableName { + if m != nil { + return m.TableName + } + return nil +} + +type ShutdownRequest struct { + XXX_unrecognized []byte `json:"-"` +} + +func (m *ShutdownRequest) Reset() { *m = ShutdownRequest{} } +func (m *ShutdownRequest) String() string { return proto.CompactTextString(m) } +func (*ShutdownRequest) ProtoMessage() {} + +type ShutdownResponse struct { + XXX_unrecognized []byte `json:"-"` +} + +func (m *ShutdownResponse) Reset() { *m = ShutdownResponse{} } +func (m *ShutdownResponse) String() string { return proto.CompactTextString(m) } +func (*ShutdownResponse) ProtoMessage() {} + +type StopMasterRequest struct { + XXX_unrecognized []byte `json:"-"` +} + +func (m *StopMasterRequest) Reset() { *m = StopMasterRequest{} } +func (m *StopMasterRequest) String() string { return proto.CompactTextString(m) } +func (*StopMasterRequest) ProtoMessage() {} + +type StopMasterResponse struct { + XXX_unrecognized []byte `json:"-"` +} + +func (m *StopMasterResponse) Reset() { *m = StopMasterResponse{} } +func (m *StopMasterResponse) String() string { return proto.CompactTextString(m) } +func (*StopMasterResponse) ProtoMessage() {} + +type BalanceRequest struct { + XXX_unrecognized []byte `json:"-"` +} + +func (m *BalanceRequest) Reset() { *m = BalanceRequest{} } +func (m *BalanceRequest) String() string { return proto.CompactTextString(m) } +func (*BalanceRequest) ProtoMessage() {} + +type BalanceResponse struct { + BalancerRan *bool `protobuf:"varint,1,req,name=balancer_ran" json:"balancer_ran,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *BalanceResponse) Reset() { *m = BalanceResponse{} } +func (m *BalanceResponse) String() string { return proto.CompactTextString(m) } +func (*BalanceResponse) ProtoMessage() {} + +func (m *BalanceResponse) GetBalancerRan() bool { + if m != nil && m.BalancerRan != nil { + return *m.BalancerRan + } + return false +} + +type SetBalancerRunningRequest struct { + On *bool `protobuf:"varint,1,req,name=on" json:"on,omitempty"` + Synchronous *bool `protobuf:"varint,2,opt,name=synchronous" json:"synchronous,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *SetBalancerRunningRequest) Reset() { *m = SetBalancerRunningRequest{} } +func (m *SetBalancerRunningRequest) String() string { return proto.CompactTextString(m) } +func (*SetBalancerRunningRequest) ProtoMessage() {} + +func (m *SetBalancerRunningRequest) GetOn() bool { + if m != nil && m.On != nil { + return *m.On + } + return false +} + +func (m *SetBalancerRunningRequest) GetSynchronous() bool { + if m != nil && m.Synchronous != nil { + return *m.Synchronous + } + return false +} + +type SetBalancerRunningResponse struct { + PrevBalanceValue *bool `protobuf:"varint,1,opt,name=prev_balance_value" json:"prev_balance_value,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *SetBalancerRunningResponse) Reset() { *m = SetBalancerRunningResponse{} } +func (m *SetBalancerRunningResponse) String() string { return proto.CompactTextString(m) } +func (*SetBalancerRunningResponse) ProtoMessage() {} + +func (m *SetBalancerRunningResponse) GetPrevBalanceValue() bool { + if m != nil && m.PrevBalanceValue != nil { + return *m.PrevBalanceValue + } + return false +} + +type IsBalancerEnabledRequest struct { + XXX_unrecognized []byte `json:"-"` +} + +func (m *IsBalancerEnabledRequest) Reset() { *m = IsBalancerEnabledRequest{} } +func (m *IsBalancerEnabledRequest) String() string { return proto.CompactTextString(m) } +func (*IsBalancerEnabledRequest) ProtoMessage() {} + +type IsBalancerEnabledResponse struct { + Enabled *bool `protobuf:"varint,1,req,name=enabled" json:"enabled,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *IsBalancerEnabledResponse) Reset() { *m = IsBalancerEnabledResponse{} } +func (m *IsBalancerEnabledResponse) String() string { return proto.CompactTextString(m) } +func (*IsBalancerEnabledResponse) ProtoMessage() {} + +func (m *IsBalancerEnabledResponse) GetEnabled() bool { + if m != nil && m.Enabled != nil { + return *m.Enabled + } + return false +} + +type RunCatalogScanRequest struct { + XXX_unrecognized []byte `json:"-"` +} + +func (m *RunCatalogScanRequest) Reset() { *m = RunCatalogScanRequest{} } +func (m *RunCatalogScanRequest) String() string { return proto.CompactTextString(m) } +func (*RunCatalogScanRequest) ProtoMessage() {} + +type RunCatalogScanResponse struct { + ScanResult *int32 `protobuf:"varint,1,opt,name=scan_result" json:"scan_result,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *RunCatalogScanResponse) Reset() { *m = RunCatalogScanResponse{} } +func (m *RunCatalogScanResponse) String() string { return proto.CompactTextString(m) } +func (*RunCatalogScanResponse) ProtoMessage() {} + +func (m *RunCatalogScanResponse) GetScanResult() int32 { + if m != nil && m.ScanResult != nil { + return *m.ScanResult + } + return 0 +} + +type EnableCatalogJanitorRequest struct { + Enable *bool `protobuf:"varint,1,req,name=enable" json:"enable,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *EnableCatalogJanitorRequest) Reset() { *m = EnableCatalogJanitorRequest{} } +func (m *EnableCatalogJanitorRequest) String() string { return proto.CompactTextString(m) } +func (*EnableCatalogJanitorRequest) ProtoMessage() {} + +func (m *EnableCatalogJanitorRequest) GetEnable() bool { + if m != nil && m.Enable != nil { + return *m.Enable + } + return false +} + +type EnableCatalogJanitorResponse struct { + PrevValue *bool `protobuf:"varint,1,opt,name=prev_value" json:"prev_value,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *EnableCatalogJanitorResponse) Reset() { *m = EnableCatalogJanitorResponse{} } +func (m *EnableCatalogJanitorResponse) String() string { return proto.CompactTextString(m) } +func (*EnableCatalogJanitorResponse) ProtoMessage() {} + +func (m *EnableCatalogJanitorResponse) GetPrevValue() bool { + if m != nil && m.PrevValue != nil { + return *m.PrevValue + } + return false +} + +type IsCatalogJanitorEnabledRequest struct { + XXX_unrecognized []byte `json:"-"` +} + +func (m *IsCatalogJanitorEnabledRequest) Reset() { *m = IsCatalogJanitorEnabledRequest{} } +func (m *IsCatalogJanitorEnabledRequest) String() string { return proto.CompactTextString(m) } +func (*IsCatalogJanitorEnabledRequest) ProtoMessage() {} + +type IsCatalogJanitorEnabledResponse struct { + Value *bool `protobuf:"varint,1,req,name=value" json:"value,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *IsCatalogJanitorEnabledResponse) Reset() { *m = IsCatalogJanitorEnabledResponse{} } +func (m *IsCatalogJanitorEnabledResponse) String() string { return proto.CompactTextString(m) } +func (*IsCatalogJanitorEnabledResponse) ProtoMessage() {} + +func (m *IsCatalogJanitorEnabledResponse) GetValue() bool { + if m != nil && m.Value != nil { + return *m.Value + } + return false +} + +type SnapshotRequest struct { + Snapshot *SnapshotDescription `protobuf:"bytes,1,req,name=snapshot" json:"snapshot,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *SnapshotRequest) Reset() { *m = SnapshotRequest{} } +func (m *SnapshotRequest) String() string { return proto.CompactTextString(m) } +func (*SnapshotRequest) ProtoMessage() {} + +func (m *SnapshotRequest) GetSnapshot() *SnapshotDescription { + if m != nil { + return m.Snapshot + } + return nil +} + +type SnapshotResponse struct { + ExpectedTimeout *int64 `protobuf:"varint,1,req,name=expected_timeout" json:"expected_timeout,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *SnapshotResponse) Reset() { *m = SnapshotResponse{} } +func (m *SnapshotResponse) String() string { return proto.CompactTextString(m) } +func (*SnapshotResponse) ProtoMessage() {} + +func (m *SnapshotResponse) GetExpectedTimeout() int64 { + if m != nil && m.ExpectedTimeout != nil { + return *m.ExpectedTimeout + } + return 0 +} + +type GetCompletedSnapshotsRequest struct { + XXX_unrecognized []byte `json:"-"` +} + +func (m *GetCompletedSnapshotsRequest) Reset() { *m = GetCompletedSnapshotsRequest{} } +func (m *GetCompletedSnapshotsRequest) String() string { return proto.CompactTextString(m) } +func (*GetCompletedSnapshotsRequest) ProtoMessage() {} + +type GetCompletedSnapshotsResponse struct { + Snapshots []*SnapshotDescription `protobuf:"bytes,1,rep,name=snapshots" json:"snapshots,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *GetCompletedSnapshotsResponse) Reset() { *m = GetCompletedSnapshotsResponse{} } +func (m *GetCompletedSnapshotsResponse) String() string { return proto.CompactTextString(m) } +func (*GetCompletedSnapshotsResponse) ProtoMessage() {} + +func (m *GetCompletedSnapshotsResponse) GetSnapshots() []*SnapshotDescription { + if m != nil { + return m.Snapshots + } + return nil +} + +type DeleteSnapshotRequest struct { + Snapshot *SnapshotDescription `protobuf:"bytes,1,req,name=snapshot" json:"snapshot,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *DeleteSnapshotRequest) Reset() { *m = DeleteSnapshotRequest{} } +func (m *DeleteSnapshotRequest) String() string { return proto.CompactTextString(m) } +func (*DeleteSnapshotRequest) ProtoMessage() {} + +func (m *DeleteSnapshotRequest) GetSnapshot() *SnapshotDescription { + if m != nil { + return m.Snapshot + } + return nil +} + +type DeleteSnapshotResponse struct { + XXX_unrecognized []byte `json:"-"` +} + +func (m *DeleteSnapshotResponse) Reset() { *m = DeleteSnapshotResponse{} } +func (m *DeleteSnapshotResponse) String() string { return proto.CompactTextString(m) } +func (*DeleteSnapshotResponse) ProtoMessage() {} + +type RestoreSnapshotRequest struct { + Snapshot *SnapshotDescription `protobuf:"bytes,1,req,name=snapshot" json:"snapshot,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *RestoreSnapshotRequest) Reset() { *m = RestoreSnapshotRequest{} } +func (m *RestoreSnapshotRequest) String() string { return proto.CompactTextString(m) } +func (*RestoreSnapshotRequest) ProtoMessage() {} + +func (m *RestoreSnapshotRequest) GetSnapshot() *SnapshotDescription { + if m != nil { + return m.Snapshot + } + return nil +} + +type RestoreSnapshotResponse struct { + XXX_unrecognized []byte `json:"-"` +} + +func (m *RestoreSnapshotResponse) Reset() { *m = RestoreSnapshotResponse{} } +func (m *RestoreSnapshotResponse) String() string { return proto.CompactTextString(m) } +func (*RestoreSnapshotResponse) ProtoMessage() {} + +// if you don't send the snapshot, then you will get it back +// in the response (if the snapshot is done) so you can check the snapshot +type IsSnapshotDoneRequest struct { + Snapshot *SnapshotDescription `protobuf:"bytes,1,opt,name=snapshot" json:"snapshot,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *IsSnapshotDoneRequest) Reset() { *m = IsSnapshotDoneRequest{} } +func (m *IsSnapshotDoneRequest) String() string { return proto.CompactTextString(m) } +func (*IsSnapshotDoneRequest) ProtoMessage() {} + +func (m *IsSnapshotDoneRequest) GetSnapshot() *SnapshotDescription { + if m != nil { + return m.Snapshot + } + return nil +} + +type IsSnapshotDoneResponse struct { + Done *bool `protobuf:"varint,1,opt,name=done,def=0" json:"done,omitempty"` + Snapshot *SnapshotDescription `protobuf:"bytes,2,opt,name=snapshot" json:"snapshot,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *IsSnapshotDoneResponse) Reset() { *m = IsSnapshotDoneResponse{} } +func (m *IsSnapshotDoneResponse) String() string { return proto.CompactTextString(m) } +func (*IsSnapshotDoneResponse) ProtoMessage() {} + +const Default_IsSnapshotDoneResponse_Done bool = false + +func (m *IsSnapshotDoneResponse) GetDone() bool { + if m != nil && m.Done != nil { + return *m.Done + } + return Default_IsSnapshotDoneResponse_Done +} + +func (m *IsSnapshotDoneResponse) GetSnapshot() *SnapshotDescription { + if m != nil { + return m.Snapshot + } + return nil +} + +type IsRestoreSnapshotDoneRequest struct { + Snapshot *SnapshotDescription `protobuf:"bytes,1,opt,name=snapshot" json:"snapshot,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *IsRestoreSnapshotDoneRequest) Reset() { *m = IsRestoreSnapshotDoneRequest{} } +func (m *IsRestoreSnapshotDoneRequest) String() string { return proto.CompactTextString(m) } +func (*IsRestoreSnapshotDoneRequest) ProtoMessage() {} + +func (m *IsRestoreSnapshotDoneRequest) GetSnapshot() *SnapshotDescription { + if m != nil { + return m.Snapshot + } + return nil +} + +type IsRestoreSnapshotDoneResponse struct { + Done *bool `protobuf:"varint,1,opt,name=done,def=0" json:"done,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *IsRestoreSnapshotDoneResponse) Reset() { *m = IsRestoreSnapshotDoneResponse{} } +func (m *IsRestoreSnapshotDoneResponse) String() string { return proto.CompactTextString(m) } +func (*IsRestoreSnapshotDoneResponse) ProtoMessage() {} + +const Default_IsRestoreSnapshotDoneResponse_Done bool = false + +func (m *IsRestoreSnapshotDoneResponse) GetDone() bool { + if m != nil && m.Done != nil { + return *m.Done + } + return Default_IsRestoreSnapshotDoneResponse_Done +} + +type GetSchemaAlterStatusRequest struct { + TableName *TableName `protobuf:"bytes,1,req,name=table_name" json:"table_name,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *GetSchemaAlterStatusRequest) Reset() { *m = GetSchemaAlterStatusRequest{} } +func (m *GetSchemaAlterStatusRequest) String() string { return proto.CompactTextString(m) } +func (*GetSchemaAlterStatusRequest) ProtoMessage() {} + +func (m *GetSchemaAlterStatusRequest) GetTableName() *TableName { + if m != nil { + return m.TableName + } + return nil +} + +type GetSchemaAlterStatusResponse struct { + YetToUpdateRegions *uint32 `protobuf:"varint,1,opt,name=yet_to_update_regions" json:"yet_to_update_regions,omitempty"` + TotalRegions *uint32 `protobuf:"varint,2,opt,name=total_regions" json:"total_regions,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *GetSchemaAlterStatusResponse) Reset() { *m = GetSchemaAlterStatusResponse{} } +func (m *GetSchemaAlterStatusResponse) String() string { return proto.CompactTextString(m) } +func (*GetSchemaAlterStatusResponse) ProtoMessage() {} + +func (m *GetSchemaAlterStatusResponse) GetYetToUpdateRegions() uint32 { + if m != nil && m.YetToUpdateRegions != nil { + return *m.YetToUpdateRegions + } + return 0 +} + +func (m *GetSchemaAlterStatusResponse) GetTotalRegions() uint32 { + if m != nil && m.TotalRegions != nil { + return *m.TotalRegions + } + return 0 +} + +type GetTableDescriptorsRequest struct { + TableNames []*TableName `protobuf:"bytes,1,rep,name=table_names" json:"table_names,omitempty"` + Regex *string `protobuf:"bytes,2,opt,name=regex" json:"regex,omitempty"` + IncludeSysTables *bool `protobuf:"varint,3,opt,name=include_sys_tables,def=0" json:"include_sys_tables,omitempty"` + Namespace *string `protobuf:"bytes,4,opt,name=namespace" json:"namespace,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *GetTableDescriptorsRequest) Reset() { *m = GetTableDescriptorsRequest{} } +func (m *GetTableDescriptorsRequest) String() string { return proto.CompactTextString(m) } +func (*GetTableDescriptorsRequest) ProtoMessage() {} + +const Default_GetTableDescriptorsRequest_IncludeSysTables bool = false + +func (m *GetTableDescriptorsRequest) GetTableNames() []*TableName { + if m != nil { + return m.TableNames + } + return nil +} + +func (m *GetTableDescriptorsRequest) GetRegex() string { + if m != nil && m.Regex != nil { + return *m.Regex + } + return "" +} + +func (m *GetTableDescriptorsRequest) GetIncludeSysTables() bool { + if m != nil && m.IncludeSysTables != nil { + return *m.IncludeSysTables + } + return Default_GetTableDescriptorsRequest_IncludeSysTables +} + +func (m *GetTableDescriptorsRequest) GetNamespace() string { + if m != nil && m.Namespace != nil { + return *m.Namespace + } + return "" +} + +type GetTableDescriptorsResponse struct { + TableSchema []*TableSchema `protobuf:"bytes,1,rep,name=table_schema" json:"table_schema,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *GetTableDescriptorsResponse) Reset() { *m = GetTableDescriptorsResponse{} } +func (m *GetTableDescriptorsResponse) String() string { return proto.CompactTextString(m) } +func (*GetTableDescriptorsResponse) ProtoMessage() {} + +func (m *GetTableDescriptorsResponse) GetTableSchema() []*TableSchema { + if m != nil { + return m.TableSchema + } + return nil +} + +type GetTableNamesRequest struct { + Regex *string `protobuf:"bytes,1,opt,name=regex" json:"regex,omitempty"` + IncludeSysTables *bool `protobuf:"varint,2,opt,name=include_sys_tables,def=0" json:"include_sys_tables,omitempty"` + Namespace *string `protobuf:"bytes,3,opt,name=namespace" json:"namespace,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *GetTableNamesRequest) Reset() { *m = GetTableNamesRequest{} } +func (m *GetTableNamesRequest) String() string { return proto.CompactTextString(m) } +func (*GetTableNamesRequest) ProtoMessage() {} + +const Default_GetTableNamesRequest_IncludeSysTables bool = false + +func (m *GetTableNamesRequest) GetRegex() string { + if m != nil && m.Regex != nil { + return *m.Regex + } + return "" +} + +func (m *GetTableNamesRequest) GetIncludeSysTables() bool { + if m != nil && m.IncludeSysTables != nil { + return *m.IncludeSysTables + } + return Default_GetTableNamesRequest_IncludeSysTables +} + +func (m *GetTableNamesRequest) GetNamespace() string { + if m != nil && m.Namespace != nil { + return *m.Namespace + } + return "" +} + +type GetTableNamesResponse struct { + TableNames []*TableName `protobuf:"bytes,1,rep,name=table_names" json:"table_names,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *GetTableNamesResponse) Reset() { *m = GetTableNamesResponse{} } +func (m *GetTableNamesResponse) String() string { return proto.CompactTextString(m) } +func (*GetTableNamesResponse) ProtoMessage() {} + +func (m *GetTableNamesResponse) GetTableNames() []*TableName { + if m != nil { + return m.TableNames + } + return nil +} + +type GetClusterStatusRequest struct { + XXX_unrecognized []byte `json:"-"` +} + +func (m *GetClusterStatusRequest) Reset() { *m = GetClusterStatusRequest{} } +func (m *GetClusterStatusRequest) String() string { return proto.CompactTextString(m) } +func (*GetClusterStatusRequest) ProtoMessage() {} + +type GetClusterStatusResponse struct { + ClusterStatus *ClusterStatus `protobuf:"bytes,1,req,name=cluster_status" json:"cluster_status,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *GetClusterStatusResponse) Reset() { *m = GetClusterStatusResponse{} } +func (m *GetClusterStatusResponse) String() string { return proto.CompactTextString(m) } +func (*GetClusterStatusResponse) ProtoMessage() {} + +func (m *GetClusterStatusResponse) GetClusterStatus() *ClusterStatus { + if m != nil { + return m.ClusterStatus + } + return nil +} + +type IsMasterRunningRequest struct { + XXX_unrecognized []byte `json:"-"` +} + +func (m *IsMasterRunningRequest) Reset() { *m = IsMasterRunningRequest{} } +func (m *IsMasterRunningRequest) String() string { return proto.CompactTextString(m) } +func (*IsMasterRunningRequest) ProtoMessage() {} + +type IsMasterRunningResponse struct { + IsMasterRunning *bool `protobuf:"varint,1,req,name=is_master_running" json:"is_master_running,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *IsMasterRunningResponse) Reset() { *m = IsMasterRunningResponse{} } +func (m *IsMasterRunningResponse) String() string { return proto.CompactTextString(m) } +func (*IsMasterRunningResponse) ProtoMessage() {} + +func (m *IsMasterRunningResponse) GetIsMasterRunning() bool { + if m != nil && m.IsMasterRunning != nil { + return *m.IsMasterRunning + } + return false +} + +type ExecProcedureRequest struct { + Procedure *ProcedureDescription `protobuf:"bytes,1,req,name=procedure" json:"procedure,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *ExecProcedureRequest) Reset() { *m = ExecProcedureRequest{} } +func (m *ExecProcedureRequest) String() string { return proto.CompactTextString(m) } +func (*ExecProcedureRequest) ProtoMessage() {} + +func (m *ExecProcedureRequest) GetProcedure() *ProcedureDescription { + if m != nil { + return m.Procedure + } + return nil +} + +type ExecProcedureResponse struct { + ExpectedTimeout *int64 `protobuf:"varint,1,opt,name=expected_timeout" json:"expected_timeout,omitempty"` + ReturnData []byte `protobuf:"bytes,2,opt,name=return_data" json:"return_data,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *ExecProcedureResponse) Reset() { *m = ExecProcedureResponse{} } +func (m *ExecProcedureResponse) String() string { return proto.CompactTextString(m) } +func (*ExecProcedureResponse) ProtoMessage() {} + +func (m *ExecProcedureResponse) GetExpectedTimeout() int64 { + if m != nil && m.ExpectedTimeout != nil { + return *m.ExpectedTimeout + } + return 0 +} + +func (m *ExecProcedureResponse) GetReturnData() []byte { + if m != nil { + return m.ReturnData + } + return nil +} + +type IsProcedureDoneRequest struct { + Procedure *ProcedureDescription `protobuf:"bytes,1,opt,name=procedure" json:"procedure,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *IsProcedureDoneRequest) Reset() { *m = IsProcedureDoneRequest{} } +func (m *IsProcedureDoneRequest) String() string { return proto.CompactTextString(m) } +func (*IsProcedureDoneRequest) ProtoMessage() {} + +func (m *IsProcedureDoneRequest) GetProcedure() *ProcedureDescription { + if m != nil { + return m.Procedure + } + return nil +} + +type IsProcedureDoneResponse struct { + Done *bool `protobuf:"varint,1,opt,name=done,def=0" json:"done,omitempty"` + Snapshot *ProcedureDescription `protobuf:"bytes,2,opt,name=snapshot" json:"snapshot,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *IsProcedureDoneResponse) Reset() { *m = IsProcedureDoneResponse{} } +func (m *IsProcedureDoneResponse) String() string { return proto.CompactTextString(m) } +func (*IsProcedureDoneResponse) ProtoMessage() {} + +const Default_IsProcedureDoneResponse_Done bool = false + +func (m *IsProcedureDoneResponse) GetDone() bool { + if m != nil && m.Done != nil { + return *m.Done + } + return Default_IsProcedureDoneResponse_Done +} + +func (m *IsProcedureDoneResponse) GetSnapshot() *ProcedureDescription { + if m != nil { + return m.Snapshot + } + return nil +} + +type GetProcedureResultRequest struct { + ProcId *uint64 `protobuf:"varint,1,req,name=proc_id" json:"proc_id,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *GetProcedureResultRequest) Reset() { *m = GetProcedureResultRequest{} } +func (m *GetProcedureResultRequest) String() string { return proto.CompactTextString(m) } +func (*GetProcedureResultRequest) ProtoMessage() {} + +func (m *GetProcedureResultRequest) GetProcId() uint64 { + if m != nil && m.ProcId != nil { + return *m.ProcId + } + return 0 +} + +type GetProcedureResultResponse struct { + State *GetProcedureResultResponse_State `protobuf:"varint,1,req,name=state,enum=pb.GetProcedureResultResponse_State" json:"state,omitempty"` + StartTime *uint64 `protobuf:"varint,2,opt,name=start_time" json:"start_time,omitempty"` + LastUpdate *uint64 `protobuf:"varint,3,opt,name=last_update" json:"last_update,omitempty"` + Result []byte `protobuf:"bytes,4,opt,name=result" json:"result,omitempty"` + Exception *ForeignExceptionMessage `protobuf:"bytes,5,opt,name=exception" json:"exception,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *GetProcedureResultResponse) Reset() { *m = GetProcedureResultResponse{} } +func (m *GetProcedureResultResponse) String() string { return proto.CompactTextString(m) } +func (*GetProcedureResultResponse) ProtoMessage() {} + +func (m *GetProcedureResultResponse) GetState() GetProcedureResultResponse_State { + if m != nil && m.State != nil { + return *m.State + } + return GetProcedureResultResponse_NOT_FOUND +} + +func (m *GetProcedureResultResponse) GetStartTime() uint64 { + if m != nil && m.StartTime != nil { + return *m.StartTime + } + return 0 +} + +func (m *GetProcedureResultResponse) GetLastUpdate() uint64 { + if m != nil && m.LastUpdate != nil { + return *m.LastUpdate + } + return 0 +} + +func (m *GetProcedureResultResponse) GetResult() []byte { + if m != nil { + return m.Result + } + return nil +} + +func (m *GetProcedureResultResponse) GetException() *ForeignExceptionMessage { + if m != nil { + return m.Exception + } + return nil +} + +type SetQuotaRequest struct { + UserName *string `protobuf:"bytes,1,opt,name=user_name" json:"user_name,omitempty"` + UserGroup *string `protobuf:"bytes,2,opt,name=user_group" json:"user_group,omitempty"` + Namespace *string `protobuf:"bytes,3,opt,name=namespace" json:"namespace,omitempty"` + TableName *TableName `protobuf:"bytes,4,opt,name=table_name" json:"table_name,omitempty"` + RemoveAll *bool `protobuf:"varint,5,opt,name=remove_all" json:"remove_all,omitempty"` + BypassGlobals *bool `protobuf:"varint,6,opt,name=bypass_globals" json:"bypass_globals,omitempty"` + Throttle *ThrottleRequest `protobuf:"bytes,7,opt,name=throttle" json:"throttle,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *SetQuotaRequest) Reset() { *m = SetQuotaRequest{} } +func (m *SetQuotaRequest) String() string { return proto.CompactTextString(m) } +func (*SetQuotaRequest) ProtoMessage() {} + +func (m *SetQuotaRequest) GetUserName() string { + if m != nil && m.UserName != nil { + return *m.UserName + } + return "" +} + +func (m *SetQuotaRequest) GetUserGroup() string { + if m != nil && m.UserGroup != nil { + return *m.UserGroup + } + return "" +} + +func (m *SetQuotaRequest) GetNamespace() string { + if m != nil && m.Namespace != nil { + return *m.Namespace + } + return "" +} + +func (m *SetQuotaRequest) GetTableName() *TableName { + if m != nil { + return m.TableName + } + return nil +} + +func (m *SetQuotaRequest) GetRemoveAll() bool { + if m != nil && m.RemoveAll != nil { + return *m.RemoveAll + } + return false +} + +func (m *SetQuotaRequest) GetBypassGlobals() bool { + if m != nil && m.BypassGlobals != nil { + return *m.BypassGlobals + } + return false +} + +func (m *SetQuotaRequest) GetThrottle() *ThrottleRequest { + if m != nil { + return m.Throttle + } + return nil +} + +type SetQuotaResponse struct { + XXX_unrecognized []byte `json:"-"` +} + +func (m *SetQuotaResponse) Reset() { *m = SetQuotaResponse{} } +func (m *SetQuotaResponse) String() string { return proto.CompactTextString(m) } +func (*SetQuotaResponse) ProtoMessage() {} + +type MajorCompactionTimestampRequest struct { + TableName *TableName `protobuf:"bytes,1,req,name=table_name" json:"table_name,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *MajorCompactionTimestampRequest) Reset() { *m = MajorCompactionTimestampRequest{} } +func (m *MajorCompactionTimestampRequest) String() string { return proto.CompactTextString(m) } +func (*MajorCompactionTimestampRequest) ProtoMessage() {} + +func (m *MajorCompactionTimestampRequest) GetTableName() *TableName { + if m != nil { + return m.TableName + } + return nil +} + +type MajorCompactionTimestampForRegionRequest struct { + Region *RegionSpecifier `protobuf:"bytes,1,req,name=region" json:"region,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *MajorCompactionTimestampForRegionRequest) Reset() { + *m = MajorCompactionTimestampForRegionRequest{} +} +func (m *MajorCompactionTimestampForRegionRequest) String() string { return proto.CompactTextString(m) } +func (*MajorCompactionTimestampForRegionRequest) ProtoMessage() {} + +func (m *MajorCompactionTimestampForRegionRequest) GetRegion() *RegionSpecifier { + if m != nil { + return m.Region + } + return nil +} + +type MajorCompactionTimestampResponse struct { + CompactionTimestamp *int64 `protobuf:"varint,1,req,name=compaction_timestamp" json:"compaction_timestamp,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *MajorCompactionTimestampResponse) Reset() { *m = MajorCompactionTimestampResponse{} } +func (m *MajorCompactionTimestampResponse) String() string { return proto.CompactTextString(m) } +func (*MajorCompactionTimestampResponse) ProtoMessage() {} + +func (m *MajorCompactionTimestampResponse) GetCompactionTimestamp() int64 { + if m != nil && m.CompactionTimestamp != nil { + return *m.CompactionTimestamp + } + return 0 +} + +func init() { + proto.RegisterEnum("pb.GetProcedureResultResponse_State", GetProcedureResultResponse_State_name, GetProcedureResultResponse_State_value) +} diff --git a/libs/gohbase/pb/Master.proto b/libs/gohbase/pb/Master.proto new file mode 100644 index 0000000..2638621 --- /dev/null +++ b/libs/gohbase/pb/Master.proto @@ -0,0 +1,652 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +// All to do with the Master. Includes schema management since these +// changes are run by the Master process. + +package pb; +option java_package = "org.apache.hadoop.hbase.protobuf.generated"; +option java_outer_classname = "MasterProtos"; +option java_generic_services = true; +option java_generate_equals_and_hash = true; +option optimize_for = SPEED; + +import "HBase.proto"; +import "Client.proto"; +import "ClusterStatus.proto"; +import "ErrorHandling.proto"; +import "Quota.proto"; + +/* Column-level protobufs */ + +message AddColumnRequest { + required TableName table_name = 1; + required ColumnFamilySchema column_families = 2; +} + +message AddColumnResponse { +} + +message DeleteColumnRequest { + required TableName table_name = 1; + required bytes column_name = 2; +} + +message DeleteColumnResponse { +} + +message ModifyColumnRequest { + required TableName table_name = 1; + required ColumnFamilySchema column_families = 2; +} + +message ModifyColumnResponse { +} + +/* Region-level Protos */ + +message MoveRegionRequest { + required RegionSpecifier region = 1; + optional ServerName dest_server_name = 2; +} + +message MoveRegionResponse { +} + +/** + * Dispatch merging the specified regions. + */ +message DispatchMergingRegionsRequest { + required RegionSpecifier region_a = 1; + required RegionSpecifier region_b = 2; + optional bool forcible = 3 [default = false]; +} + +message DispatchMergingRegionsResponse { +} + +message AssignRegionRequest { + required RegionSpecifier region = 1; +} + +message AssignRegionResponse { +} + +message UnassignRegionRequest { + required RegionSpecifier region = 1; + optional bool force = 2 [default = false]; +} + +message UnassignRegionResponse { +} + +message OfflineRegionRequest { + required RegionSpecifier region = 1; +} + +message OfflineRegionResponse { +} + +/* Table-level protobufs */ + +message CreateTableRequest { + required TableSchema table_schema = 1; + repeated bytes split_keys = 2; +} + +message CreateTableResponse { + optional uint64 proc_id = 1; +} + +message DeleteTableRequest { + required TableName table_name = 1; +} + +message DeleteTableResponse { + optional uint64 proc_id = 1; +} + +message TruncateTableRequest { + required TableName tableName = 1; + optional bool preserveSplits = 2 [default = false]; +} + +message TruncateTableResponse { +} + +message EnableTableRequest { + required TableName table_name = 1; +} + +message EnableTableResponse { + optional uint64 proc_id = 1; +} + +message DisableTableRequest { + required TableName table_name = 1; +} + +message DisableTableResponse { + optional uint64 proc_id = 1; +} + +message ModifyTableRequest { + required TableName table_name = 1; + required TableSchema table_schema = 2; +} + +message ModifyTableResponse { +} + +/* Namespace-level protobufs */ + +message CreateNamespaceRequest { + required NamespaceDescriptor namespaceDescriptor = 1; +} + +message CreateNamespaceResponse { +} + +message DeleteNamespaceRequest { + required string namespaceName = 1; +} + +message DeleteNamespaceResponse { +} + +message ModifyNamespaceRequest { + required NamespaceDescriptor namespaceDescriptor = 1; +} + +message ModifyNamespaceResponse { +} + +message GetNamespaceDescriptorRequest { + required string namespaceName = 1; +} + +message GetNamespaceDescriptorResponse { + required NamespaceDescriptor namespaceDescriptor = 1; +} + +message ListNamespaceDescriptorsRequest { +} + +message ListNamespaceDescriptorsResponse { + repeated NamespaceDescriptor namespaceDescriptor = 1; +} + +message ListTableDescriptorsByNamespaceRequest { + required string namespaceName = 1; +} + +message ListTableDescriptorsByNamespaceResponse { + repeated TableSchema tableSchema = 1; +} + +message ListTableNamesByNamespaceRequest { + required string namespaceName = 1; +} + +message ListTableNamesByNamespaceResponse { + repeated TableName tableName = 1; +} + +/* Cluster-level protobufs */ + + +message ShutdownRequest { +} + +message ShutdownResponse { +} + +message StopMasterRequest { +} + +message StopMasterResponse { +} + +message BalanceRequest { +} + +message BalanceResponse { + required bool balancer_ran = 1; +} + +message SetBalancerRunningRequest { + required bool on = 1; + optional bool synchronous = 2; +} + +message SetBalancerRunningResponse { + optional bool prev_balance_value = 1; +} + +message IsBalancerEnabledRequest { +} + +message IsBalancerEnabledResponse { + required bool enabled = 1; +} + +message RunCatalogScanRequest { +} + +message RunCatalogScanResponse { + optional int32 scan_result = 1; +} + +message EnableCatalogJanitorRequest { + required bool enable = 1; +} + +message EnableCatalogJanitorResponse { + optional bool prev_value = 1; +} + +message IsCatalogJanitorEnabledRequest { +} + +message IsCatalogJanitorEnabledResponse { + required bool value = 1; +} + +message SnapshotRequest { + required SnapshotDescription snapshot = 1; +} + +message SnapshotResponse { + required int64 expected_timeout = 1; +} + +message GetCompletedSnapshotsRequest { +} + +message GetCompletedSnapshotsResponse { + repeated SnapshotDescription snapshots = 1; +} + +message DeleteSnapshotRequest { + required SnapshotDescription snapshot = 1; +} + +message DeleteSnapshotResponse { +} + +message RestoreSnapshotRequest { + required SnapshotDescription snapshot = 1; +} + +message RestoreSnapshotResponse { +} + +/* if you don't send the snapshot, then you will get it back + * in the response (if the snapshot is done) so you can check the snapshot + */ +message IsSnapshotDoneRequest { + optional SnapshotDescription snapshot = 1; +} + +message IsSnapshotDoneResponse { + optional bool done = 1 [default = false]; + optional SnapshotDescription snapshot = 2; +} + +message IsRestoreSnapshotDoneRequest { + optional SnapshotDescription snapshot = 1; +} + +message IsRestoreSnapshotDoneResponse { + optional bool done = 1 [default = false]; +} + +message GetSchemaAlterStatusRequest { + required TableName table_name = 1; +} + +message GetSchemaAlterStatusResponse { + optional uint32 yet_to_update_regions = 1; + optional uint32 total_regions = 2; +} + +message GetTableDescriptorsRequest { + repeated TableName table_names = 1; + optional string regex = 2; + optional bool include_sys_tables = 3 [default=false]; + optional string namespace = 4; +} + +message GetTableDescriptorsResponse { + repeated TableSchema table_schema = 1; +} + +message GetTableNamesRequest { + optional string regex = 1; + optional bool include_sys_tables = 2 [default=false]; + optional string namespace = 3; +} + +message GetTableNamesResponse { + repeated TableName table_names = 1; +} + +message GetClusterStatusRequest { +} + +message GetClusterStatusResponse { + required ClusterStatus cluster_status = 1; +} + +message IsMasterRunningRequest { +} + +message IsMasterRunningResponse { + required bool is_master_running = 1; +} + +message ExecProcedureRequest { + required ProcedureDescription procedure = 1; +} + +message ExecProcedureResponse { + optional int64 expected_timeout = 1; + optional bytes return_data = 2; +} + +message IsProcedureDoneRequest { + optional ProcedureDescription procedure = 1; +} + +message IsProcedureDoneResponse { + optional bool done = 1 [default = false]; + optional ProcedureDescription snapshot = 2; +} + +message GetProcedureResultRequest { + required uint64 proc_id = 1; +} + +message GetProcedureResultResponse { + enum State { + NOT_FOUND = 0; + RUNNING = 1; + FINISHED = 2; + } + + required State state = 1; + optional uint64 start_time = 2; + optional uint64 last_update = 3; + optional bytes result = 4; + optional ForeignExceptionMessage exception = 5; +} + +message SetQuotaRequest { + optional string user_name = 1; + optional string user_group = 2; + optional string namespace = 3; + optional TableName table_name = 4; + + optional bool remove_all = 5; + optional bool bypass_globals = 6; + optional ThrottleRequest throttle = 7; +} + +message SetQuotaResponse { +} + +message MajorCompactionTimestampRequest { + required TableName table_name = 1; +} + +message MajorCompactionTimestampForRegionRequest { + required RegionSpecifier region = 1; +} + +message MajorCompactionTimestampResponse { + required int64 compaction_timestamp = 1; +} + +service MasterService { + /** Used by the client to get the number of regions that have received the updated schema */ + rpc GetSchemaAlterStatus(GetSchemaAlterStatusRequest) + returns(GetSchemaAlterStatusResponse); + + /** Get list of TableDescriptors for requested tables. */ + rpc GetTableDescriptors(GetTableDescriptorsRequest) + returns(GetTableDescriptorsResponse); + + /** Get the list of table names. */ + rpc GetTableNames(GetTableNamesRequest) + returns(GetTableNamesResponse); + + /** Return cluster status. */ + rpc GetClusterStatus(GetClusterStatusRequest) + returns(GetClusterStatusResponse); + + /** return true if master is available */ + rpc IsMasterRunning(IsMasterRunningRequest) returns(IsMasterRunningResponse); + + /** Adds a column to the specified table. */ + rpc AddColumn(AddColumnRequest) + returns(AddColumnResponse); + + /** Deletes a column from the specified table. Table must be disabled. */ + rpc DeleteColumn(DeleteColumnRequest) + returns(DeleteColumnResponse); + + /** Modifies an existing column on the specified table. */ + rpc ModifyColumn(ModifyColumnRequest) + returns(ModifyColumnResponse); + + /** Move the region region to the destination server. */ + rpc MoveRegion(MoveRegionRequest) + returns(MoveRegionResponse); + + /** Master dispatch merging the regions */ + rpc DispatchMergingRegions(DispatchMergingRegionsRequest) + returns(DispatchMergingRegionsResponse); + + /** Assign a region to a server chosen at random. */ + rpc AssignRegion(AssignRegionRequest) + returns(AssignRegionResponse); + + /** + * Unassign a region from current hosting regionserver. Region will then be + * assigned to a regionserver chosen at random. Region could be reassigned + * back to the same server. Use MoveRegion if you want + * to control the region movement. + */ + rpc UnassignRegion(UnassignRegionRequest) + returns(UnassignRegionResponse); + + /** + * Offline a region from the assignment manager's in-memory state. The + * region should be in a closed state and there will be no attempt to + * automatically reassign the region as in unassign. This is a special + * method, and should only be used by experts or hbck. + */ + rpc OfflineRegion(OfflineRegionRequest) + returns(OfflineRegionResponse); + + /** Deletes a table */ + rpc DeleteTable(DeleteTableRequest) + returns(DeleteTableResponse); + + /** Truncate a table */ + rpc truncateTable(TruncateTableRequest) + returns(TruncateTableResponse); + + /** Puts the table on-line (only needed if table has been previously taken offline) */ + rpc EnableTable(EnableTableRequest) + returns(EnableTableResponse); + + /** Take table offline */ + rpc DisableTable(DisableTableRequest) + returns(DisableTableResponse); + + /** Modify a table's metadata */ + rpc ModifyTable(ModifyTableRequest) + returns(ModifyTableResponse); + + /** Creates a new table asynchronously */ + rpc CreateTable(CreateTableRequest) + returns(CreateTableResponse); + + /** Shutdown an HBase cluster. */ + rpc Shutdown(ShutdownRequest) + returns(ShutdownResponse); + + /** Stop HBase Master only. Does not shutdown the cluster. */ + rpc StopMaster(StopMasterRequest) + returns(StopMasterResponse); + + /** + * Run the balancer. Will run the balancer and if regions to move, it will + * go ahead and do the reassignments. Can NOT run for various reasons. + * Check logs. + */ + rpc Balance(BalanceRequest) + returns(BalanceResponse); + + /** + * Turn the load balancer on or off. + * If synchronous is true, it waits until current balance() call, if outstanding, to return. + */ + rpc SetBalancerRunning(SetBalancerRunningRequest) + returns(SetBalancerRunningResponse); + + /** + * Query whether the Region Balancer is running. + */ + rpc IsBalancerEnabled(IsBalancerEnabledRequest) + returns(IsBalancerEnabledResponse); + + /** Get a run of the catalog janitor */ + rpc RunCatalogScan(RunCatalogScanRequest) + returns(RunCatalogScanResponse); + + /** + * Enable the catalog janitor on or off. + */ + rpc EnableCatalogJanitor(EnableCatalogJanitorRequest) + returns(EnableCatalogJanitorResponse); + + /** + * Query whether the catalog janitor is enabled. + */ + rpc IsCatalogJanitorEnabled(IsCatalogJanitorEnabledRequest) + returns(IsCatalogJanitorEnabledResponse); + + /** + * Call a master coprocessor endpoint + */ + rpc ExecMasterService(CoprocessorServiceRequest) + returns(CoprocessorServiceResponse); + + /** + * Create a snapshot for the given table. + */ + rpc Snapshot(SnapshotRequest) returns(SnapshotResponse); + + /** + * Get completed snapshots. + * Returns a list of snapshot descriptors for completed snapshots + */ + rpc GetCompletedSnapshots(GetCompletedSnapshotsRequest) returns(GetCompletedSnapshotsResponse); + + /** + * Delete an existing snapshot. This method can also be used to clean up an aborted snapshot. + */ + rpc DeleteSnapshot(DeleteSnapshotRequest) returns(DeleteSnapshotResponse); + + /** + * Determine if the snapshot is done yet. + */ + rpc IsSnapshotDone(IsSnapshotDoneRequest) returns(IsSnapshotDoneResponse); + + /** + * Restore a snapshot + */ + rpc RestoreSnapshot(RestoreSnapshotRequest) returns(RestoreSnapshotResponse); + + /** + * Determine if the snapshot restore is done yet. + */ + rpc IsRestoreSnapshotDone(IsRestoreSnapshotDoneRequest) returns(IsRestoreSnapshotDoneResponse); + + /** + * Execute a distributed procedure. + */ + rpc ExecProcedure(ExecProcedureRequest) returns(ExecProcedureResponse); + + /** + * Execute a distributed procedure with return data. + */ + rpc ExecProcedureWithRet(ExecProcedureRequest) returns(ExecProcedureResponse); + + /** + * Determine if the procedure is done yet. + */ + rpc IsProcedureDone(IsProcedureDoneRequest) returns(IsProcedureDoneResponse); + + /** return true if master is available */ + /** rpc IsMasterRunning(IsMasterRunningRequest) returns(IsMasterRunningResponse); */ + + /** Modify a namespace's metadata */ + rpc ModifyNamespace(ModifyNamespaceRequest) + returns(ModifyNamespaceResponse); + + /** Creates a new namespace synchronously */ + rpc CreateNamespace(CreateNamespaceRequest) + returns(CreateNamespaceResponse); + + /** Deletes namespace synchronously */ + rpc DeleteNamespace(DeleteNamespaceRequest) + returns(DeleteNamespaceResponse); + + /** Get a namespace descriptor by name */ + rpc GetNamespaceDescriptor(GetNamespaceDescriptorRequest) + returns(GetNamespaceDescriptorResponse); + + /** returns a list of namespaces */ + rpc ListNamespaceDescriptors(ListNamespaceDescriptorsRequest) + returns(ListNamespaceDescriptorsResponse); + + /** returns a list of tables for a given namespace*/ + rpc ListTableDescriptorsByNamespace(ListTableDescriptorsByNamespaceRequest) + returns(ListTableDescriptorsByNamespaceResponse); + + /** returns a list of tables for a given namespace*/ + rpc ListTableNamesByNamespace(ListTableNamesByNamespaceRequest) + returns(ListTableNamesByNamespaceResponse); + + /** Apply the new quota settings */ + rpc SetQuota(SetQuotaRequest) returns(SetQuotaResponse); + + /** Returns the timestamp of the last major compaction */ + rpc getLastMajorCompactionTimestamp(MajorCompactionTimestampRequest) + returns(MajorCompactionTimestampResponse); + + /** Returns the timestamp of the last major compaction */ + rpc getLastMajorCompactionTimestampForRegion(MajorCompactionTimestampForRegionRequest) + returns(MajorCompactionTimestampResponse); + + rpc getProcedureResult(GetProcedureResultRequest) + returns(GetProcedureResultResponse); +} diff --git a/libs/gohbase/pb/MultiRowMutation.pb.go b/libs/gohbase/pb/MultiRowMutation.pb.go new file mode 100644 index 0000000..71f7f7a --- /dev/null +++ b/libs/gohbase/pb/MultiRowMutation.pb.go @@ -0,0 +1,71 @@ +// Code generated by protoc-gen-go. +// source: MultiRowMutation.proto +// DO NOT EDIT! + +package pb + +import proto "github.com/golang/protobuf/proto" +import math "math" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = math.Inf + +type MultiRowMutationProcessorRequest struct { + XXX_unrecognized []byte `json:"-"` +} + +func (m *MultiRowMutationProcessorRequest) Reset() { *m = MultiRowMutationProcessorRequest{} } +func (m *MultiRowMutationProcessorRequest) String() string { return proto.CompactTextString(m) } +func (*MultiRowMutationProcessorRequest) ProtoMessage() {} + +type MultiRowMutationProcessorResponse struct { + XXX_unrecognized []byte `json:"-"` +} + +func (m *MultiRowMutationProcessorResponse) Reset() { *m = MultiRowMutationProcessorResponse{} } +func (m *MultiRowMutationProcessorResponse) String() string { return proto.CompactTextString(m) } +func (*MultiRowMutationProcessorResponse) ProtoMessage() {} + +type MutateRowsRequest struct { + MutationRequest []*MutationProto `protobuf:"bytes,1,rep,name=mutation_request" json:"mutation_request,omitempty"` + NonceGroup *uint64 `protobuf:"varint,2,opt,name=nonce_group" json:"nonce_group,omitempty"` + Nonce *uint64 `protobuf:"varint,3,opt,name=nonce" json:"nonce,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *MutateRowsRequest) Reset() { *m = MutateRowsRequest{} } +func (m *MutateRowsRequest) String() string { return proto.CompactTextString(m) } +func (*MutateRowsRequest) ProtoMessage() {} + +func (m *MutateRowsRequest) GetMutationRequest() []*MutationProto { + if m != nil { + return m.MutationRequest + } + return nil +} + +func (m *MutateRowsRequest) GetNonceGroup() uint64 { + if m != nil && m.NonceGroup != nil { + return *m.NonceGroup + } + return 0 +} + +func (m *MutateRowsRequest) GetNonce() uint64 { + if m != nil && m.Nonce != nil { + return *m.Nonce + } + return 0 +} + +type MutateRowsResponse struct { + XXX_unrecognized []byte `json:"-"` +} + +func (m *MutateRowsResponse) Reset() { *m = MutateRowsResponse{} } +func (m *MutateRowsResponse) String() string { return proto.CompactTextString(m) } +func (*MutateRowsResponse) ProtoMessage() {} + +func init() { +} diff --git a/libs/gohbase/pb/MultiRowMutation.proto b/libs/gohbase/pb/MultiRowMutation.proto new file mode 100644 index 0000000..26aae35 --- /dev/null +++ b/libs/gohbase/pb/MultiRowMutation.proto @@ -0,0 +1,44 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +import "Client.proto"; +package pb; +option java_package = "org.apache.hadoop.hbase.protobuf.generated"; +option java_outer_classname = "MultiRowMutationProtos"; +option java_generate_equals_and_hash = true; +option java_generic_services = true; +option optimize_for = SPEED; + +message MultiRowMutationProcessorRequest{ +} + +message MultiRowMutationProcessorResponse{ +} + +message MutateRowsRequest { + repeated MutationProto mutation_request = 1; + optional uint64 nonce_group = 2; + optional uint64 nonce = 3; +} + +message MutateRowsResponse { +} + +service MultiRowMutationService { + rpc MutateRows(MutateRowsRequest) + returns(MutateRowsResponse); +} \ No newline at end of file diff --git a/libs/gohbase/pb/Quota.pb.go b/libs/gohbase/pb/Quota.pb.go new file mode 100644 index 0000000..5da1a3e --- /dev/null +++ b/libs/gohbase/pb/Quota.pb.go @@ -0,0 +1,282 @@ +// Code generated by protoc-gen-go. +// source: Quota.proto +// DO NOT EDIT! + +package pb + +import proto "github.com/golang/protobuf/proto" +import math "math" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = math.Inf + +type QuotaScope int32 + +const ( + QuotaScope_CLUSTER QuotaScope = 1 + QuotaScope_MACHINE QuotaScope = 2 +) + +var QuotaScope_name = map[int32]string{ + 1: "CLUSTER", + 2: "MACHINE", +} +var QuotaScope_value = map[string]int32{ + "CLUSTER": 1, + "MACHINE": 2, +} + +func (x QuotaScope) Enum() *QuotaScope { + p := new(QuotaScope) + *p = x + return p +} +func (x QuotaScope) String() string { + return proto.EnumName(QuotaScope_name, int32(x)) +} +func (x *QuotaScope) UnmarshalJSON(data []byte) error { + value, err := proto.UnmarshalJSONEnum(QuotaScope_value, data, "QuotaScope") + if err != nil { + return err + } + *x = QuotaScope(value) + return nil +} + +type ThrottleType int32 + +const ( + ThrottleType_REQUEST_NUMBER ThrottleType = 1 + ThrottleType_REQUEST_SIZE ThrottleType = 2 + ThrottleType_WRITE_NUMBER ThrottleType = 3 + ThrottleType_WRITE_SIZE ThrottleType = 4 + ThrottleType_READ_NUMBER ThrottleType = 5 + ThrottleType_READ_SIZE ThrottleType = 6 +) + +var ThrottleType_name = map[int32]string{ + 1: "REQUEST_NUMBER", + 2: "REQUEST_SIZE", + 3: "WRITE_NUMBER", + 4: "WRITE_SIZE", + 5: "READ_NUMBER", + 6: "READ_SIZE", +} +var ThrottleType_value = map[string]int32{ + "REQUEST_NUMBER": 1, + "REQUEST_SIZE": 2, + "WRITE_NUMBER": 3, + "WRITE_SIZE": 4, + "READ_NUMBER": 5, + "READ_SIZE": 6, +} + +func (x ThrottleType) Enum() *ThrottleType { + p := new(ThrottleType) + *p = x + return p +} +func (x ThrottleType) String() string { + return proto.EnumName(ThrottleType_name, int32(x)) +} +func (x *ThrottleType) UnmarshalJSON(data []byte) error { + value, err := proto.UnmarshalJSONEnum(ThrottleType_value, data, "ThrottleType") + if err != nil { + return err + } + *x = ThrottleType(value) + return nil +} + +type QuotaType int32 + +const ( + QuotaType_THROTTLE QuotaType = 1 +) + +var QuotaType_name = map[int32]string{ + 1: "THROTTLE", +} +var QuotaType_value = map[string]int32{ + "THROTTLE": 1, +} + +func (x QuotaType) Enum() *QuotaType { + p := new(QuotaType) + *p = x + return p +} +func (x QuotaType) String() string { + return proto.EnumName(QuotaType_name, int32(x)) +} +func (x *QuotaType) UnmarshalJSON(data []byte) error { + value, err := proto.UnmarshalJSONEnum(QuotaType_value, data, "QuotaType") + if err != nil { + return err + } + *x = QuotaType(value) + return nil +} + +type TimedQuota struct { + TimeUnit *TimeUnit `protobuf:"varint,1,req,name=time_unit,enum=pb.TimeUnit" json:"time_unit,omitempty"` + SoftLimit *uint64 `protobuf:"varint,2,opt,name=soft_limit" json:"soft_limit,omitempty"` + Share *float32 `protobuf:"fixed32,3,opt,name=share" json:"share,omitempty"` + Scope *QuotaScope `protobuf:"varint,4,opt,name=scope,enum=pb.QuotaScope,def=2" json:"scope,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *TimedQuota) Reset() { *m = TimedQuota{} } +func (m *TimedQuota) String() string { return proto.CompactTextString(m) } +func (*TimedQuota) ProtoMessage() {} + +const Default_TimedQuota_Scope QuotaScope = QuotaScope_MACHINE + +func (m *TimedQuota) GetTimeUnit() TimeUnit { + if m != nil && m.TimeUnit != nil { + return *m.TimeUnit + } + return TimeUnit_NANOSECONDS +} + +func (m *TimedQuota) GetSoftLimit() uint64 { + if m != nil && m.SoftLimit != nil { + return *m.SoftLimit + } + return 0 +} + +func (m *TimedQuota) GetShare() float32 { + if m != nil && m.Share != nil { + return *m.Share + } + return 0 +} + +func (m *TimedQuota) GetScope() QuotaScope { + if m != nil && m.Scope != nil { + return *m.Scope + } + return Default_TimedQuota_Scope +} + +type Throttle struct { + ReqNum *TimedQuota `protobuf:"bytes,1,opt,name=req_num" json:"req_num,omitempty"` + ReqSize *TimedQuota `protobuf:"bytes,2,opt,name=req_size" json:"req_size,omitempty"` + WriteNum *TimedQuota `protobuf:"bytes,3,opt,name=write_num" json:"write_num,omitempty"` + WriteSize *TimedQuota `protobuf:"bytes,4,opt,name=write_size" json:"write_size,omitempty"` + ReadNum *TimedQuota `protobuf:"bytes,5,opt,name=read_num" json:"read_num,omitempty"` + ReadSize *TimedQuota `protobuf:"bytes,6,opt,name=read_size" json:"read_size,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *Throttle) Reset() { *m = Throttle{} } +func (m *Throttle) String() string { return proto.CompactTextString(m) } +func (*Throttle) ProtoMessage() {} + +func (m *Throttle) GetReqNum() *TimedQuota { + if m != nil { + return m.ReqNum + } + return nil +} + +func (m *Throttle) GetReqSize() *TimedQuota { + if m != nil { + return m.ReqSize + } + return nil +} + +func (m *Throttle) GetWriteNum() *TimedQuota { + if m != nil { + return m.WriteNum + } + return nil +} + +func (m *Throttle) GetWriteSize() *TimedQuota { + if m != nil { + return m.WriteSize + } + return nil +} + +func (m *Throttle) GetReadNum() *TimedQuota { + if m != nil { + return m.ReadNum + } + return nil +} + +func (m *Throttle) GetReadSize() *TimedQuota { + if m != nil { + return m.ReadSize + } + return nil +} + +type ThrottleRequest struct { + Type *ThrottleType `protobuf:"varint,1,opt,name=type,enum=pb.ThrottleType" json:"type,omitempty"` + TimedQuota *TimedQuota `protobuf:"bytes,2,opt,name=timed_quota" json:"timed_quota,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *ThrottleRequest) Reset() { *m = ThrottleRequest{} } +func (m *ThrottleRequest) String() string { return proto.CompactTextString(m) } +func (*ThrottleRequest) ProtoMessage() {} + +func (m *ThrottleRequest) GetType() ThrottleType { + if m != nil && m.Type != nil { + return *m.Type + } + return ThrottleType_REQUEST_NUMBER +} + +func (m *ThrottleRequest) GetTimedQuota() *TimedQuota { + if m != nil { + return m.TimedQuota + } + return nil +} + +type Quotas struct { + BypassGlobals *bool `protobuf:"varint,1,opt,name=bypass_globals,def=0" json:"bypass_globals,omitempty"` + Throttle *Throttle `protobuf:"bytes,2,opt,name=throttle" json:"throttle,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *Quotas) Reset() { *m = Quotas{} } +func (m *Quotas) String() string { return proto.CompactTextString(m) } +func (*Quotas) ProtoMessage() {} + +const Default_Quotas_BypassGlobals bool = false + +func (m *Quotas) GetBypassGlobals() bool { + if m != nil && m.BypassGlobals != nil { + return *m.BypassGlobals + } + return Default_Quotas_BypassGlobals +} + +func (m *Quotas) GetThrottle() *Throttle { + if m != nil { + return m.Throttle + } + return nil +} + +type QuotaUsage struct { + XXX_unrecognized []byte `json:"-"` +} + +func (m *QuotaUsage) Reset() { *m = QuotaUsage{} } +func (m *QuotaUsage) String() string { return proto.CompactTextString(m) } +func (*QuotaUsage) ProtoMessage() {} + +func init() { + proto.RegisterEnum("pb.QuotaScope", QuotaScope_name, QuotaScope_value) + proto.RegisterEnum("pb.ThrottleType", ThrottleType_name, ThrottleType_value) + proto.RegisterEnum("pb.QuotaType", QuotaType_name, QuotaType_value) +} diff --git a/libs/gohbase/pb/Quota.proto b/libs/gohbase/pb/Quota.proto new file mode 100644 index 0000000..a0b70ae --- /dev/null +++ b/libs/gohbase/pb/Quota.proto @@ -0,0 +1,74 @@ + /** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package pb; +option java_package = "org.apache.hadoop.hbase.protobuf.generated"; +option java_outer_classname = "QuotaProtos"; +option java_generic_services = true; +option java_generate_equals_and_hash = true; +option optimize_for = SPEED; + +import "HBase.proto"; + +enum QuotaScope { + CLUSTER = 1; + MACHINE = 2; +} + +message TimedQuota { + required TimeUnit time_unit = 1; + optional uint64 soft_limit = 2; + optional float share = 3; + optional QuotaScope scope = 4 [default = MACHINE]; +} + +enum ThrottleType { + REQUEST_NUMBER = 1; + REQUEST_SIZE = 2; + WRITE_NUMBER = 3; + WRITE_SIZE = 4; + READ_NUMBER = 5; + READ_SIZE = 6; +} + +message Throttle { + optional TimedQuota req_num = 1; + optional TimedQuota req_size = 2; + + optional TimedQuota write_num = 3; + optional TimedQuota write_size = 4; + + optional TimedQuota read_num = 5; + optional TimedQuota read_size = 6; +} + +message ThrottleRequest { + optional ThrottleType type = 1; + optional TimedQuota timed_quota = 2; +} + +enum QuotaType { + THROTTLE = 1; +} + +message Quotas { + optional bool bypass_globals = 1 [default = false]; + optional Throttle throttle = 2; +} + +message QuotaUsage { +} diff --git a/libs/gohbase/pb/README.txt b/libs/gohbase/pb/README.txt new file mode 100644 index 0000000..1322fcb --- /dev/null +++ b/libs/gohbase/pb/README.txt @@ -0,0 +1,8 @@ +These are the protobuf definition files used by GoHBase. +They were copied from HBase (see under hbase-protocol/src/main/protobuf). + +The following changes were made to those files: + - the package name was changed to "pb". + +The files in this directory are also subject to the Apache License 2.0 and +are copyright of the Apache Software Foundation. diff --git a/libs/gohbase/pb/RPC.pb.go b/libs/gohbase/pb/RPC.pb.go new file mode 100644 index 0000000..056f997 --- /dev/null +++ b/libs/gohbase/pb/RPC.pb.go @@ -0,0 +1,319 @@ +// Code generated by protoc-gen-go. +// source: RPC.proto +// DO NOT EDIT! + +package pb + +import proto "github.com/golang/protobuf/proto" +import math "math" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = math.Inf + +// User Information proto. Included in ConnectionHeader on connection setup +type UserInformation struct { + EffectiveUser *string `protobuf:"bytes,1,req,name=effective_user" json:"effective_user,omitempty"` + RealUser *string `protobuf:"bytes,2,opt,name=real_user" json:"real_user,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *UserInformation) Reset() { *m = UserInformation{} } +func (m *UserInformation) String() string { return proto.CompactTextString(m) } +func (*UserInformation) ProtoMessage() {} + +func (m *UserInformation) GetEffectiveUser() string { + if m != nil && m.EffectiveUser != nil { + return *m.EffectiveUser + } + return "" +} + +func (m *UserInformation) GetRealUser() string { + if m != nil && m.RealUser != nil { + return *m.RealUser + } + return "" +} + +// Rpc client version info proto. Included in ConnectionHeader on connection setup +type VersionInfo struct { + Version *string `protobuf:"bytes,1,req,name=version" json:"version,omitempty"` + Url *string `protobuf:"bytes,2,req,name=url" json:"url,omitempty"` + Revision *string `protobuf:"bytes,3,req,name=revision" json:"revision,omitempty"` + User *string `protobuf:"bytes,4,req,name=user" json:"user,omitempty"` + Date *string `protobuf:"bytes,5,req,name=date" json:"date,omitempty"` + SrcChecksum *string `protobuf:"bytes,6,req,name=src_checksum" json:"src_checksum,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *VersionInfo) Reset() { *m = VersionInfo{} } +func (m *VersionInfo) String() string { return proto.CompactTextString(m) } +func (*VersionInfo) ProtoMessage() {} + +func (m *VersionInfo) GetVersion() string { + if m != nil && m.Version != nil { + return *m.Version + } + return "" +} + +func (m *VersionInfo) GetUrl() string { + if m != nil && m.Url != nil { + return *m.Url + } + return "" +} + +func (m *VersionInfo) GetRevision() string { + if m != nil && m.Revision != nil { + return *m.Revision + } + return "" +} + +func (m *VersionInfo) GetUser() string { + if m != nil && m.User != nil { + return *m.User + } + return "" +} + +func (m *VersionInfo) GetDate() string { + if m != nil && m.Date != nil { + return *m.Date + } + return "" +} + +func (m *VersionInfo) GetSrcChecksum() string { + if m != nil && m.SrcChecksum != nil { + return *m.SrcChecksum + } + return "" +} + +// This is sent on connection setup after the connection preamble is sent. +type ConnectionHeader struct { + UserInfo *UserInformation `protobuf:"bytes,1,opt,name=user_info" json:"user_info,omitempty"` + ServiceName *string `protobuf:"bytes,2,opt,name=service_name" json:"service_name,omitempty"` + // Cell block codec we will use sending over optional cell blocks. Server throws exception + // if cannot deal. Null means no codec'ing going on so we are pb all the time (SLOW!!!) + CellBlockCodecClass *string `protobuf:"bytes,3,opt,name=cell_block_codec_class" json:"cell_block_codec_class,omitempty"` + // Compressor we will use if cell block is compressed. Server will throw exception if not supported. + // Class must implement hadoop's CompressionCodec Interface. Can't compress if no codec. + CellBlockCompressorClass *string `protobuf:"bytes,4,opt,name=cell_block_compressor_class" json:"cell_block_compressor_class,omitempty"` + VersionInfo *VersionInfo `protobuf:"bytes,5,opt,name=version_info" json:"version_info,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *ConnectionHeader) Reset() { *m = ConnectionHeader{} } +func (m *ConnectionHeader) String() string { return proto.CompactTextString(m) } +func (*ConnectionHeader) ProtoMessage() {} + +func (m *ConnectionHeader) GetUserInfo() *UserInformation { + if m != nil { + return m.UserInfo + } + return nil +} + +func (m *ConnectionHeader) GetServiceName() string { + if m != nil && m.ServiceName != nil { + return *m.ServiceName + } + return "" +} + +func (m *ConnectionHeader) GetCellBlockCodecClass() string { + if m != nil && m.CellBlockCodecClass != nil { + return *m.CellBlockCodecClass + } + return "" +} + +func (m *ConnectionHeader) GetCellBlockCompressorClass() string { + if m != nil && m.CellBlockCompressorClass != nil { + return *m.CellBlockCompressorClass + } + return "" +} + +func (m *ConnectionHeader) GetVersionInfo() *VersionInfo { + if m != nil { + return m.VersionInfo + } + return nil +} + +// Optional Cell block Message. Included in client RequestHeader +type CellBlockMeta struct { + // Length of the following cell block. Could calculate it but convenient having it too hand. + Length *uint32 `protobuf:"varint,1,opt,name=length" json:"length,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *CellBlockMeta) Reset() { *m = CellBlockMeta{} } +func (m *CellBlockMeta) String() string { return proto.CompactTextString(m) } +func (*CellBlockMeta) ProtoMessage() {} + +func (m *CellBlockMeta) GetLength() uint32 { + if m != nil && m.Length != nil { + return *m.Length + } + return 0 +} + +// At the RPC layer, this message is used to carry +// the server side exception to the RPC client. +type ExceptionResponse struct { + // Class name of the exception thrown from the server + ExceptionClassName *string `protobuf:"bytes,1,opt,name=exception_class_name" json:"exception_class_name,omitempty"` + // Exception stack trace from the server side + StackTrace *string `protobuf:"bytes,2,opt,name=stack_trace" json:"stack_trace,omitempty"` + // Optional hostname. Filled in for some exceptions such as region moved + // where exception gives clue on where the region may have moved. + Hostname *string `protobuf:"bytes,3,opt,name=hostname" json:"hostname,omitempty"` + Port *int32 `protobuf:"varint,4,opt,name=port" json:"port,omitempty"` + // Set if we are NOT to retry on receipt of this exception + DoNotRetry *bool `protobuf:"varint,5,opt,name=do_not_retry" json:"do_not_retry,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *ExceptionResponse) Reset() { *m = ExceptionResponse{} } +func (m *ExceptionResponse) String() string { return proto.CompactTextString(m) } +func (*ExceptionResponse) ProtoMessage() {} + +func (m *ExceptionResponse) GetExceptionClassName() string { + if m != nil && m.ExceptionClassName != nil { + return *m.ExceptionClassName + } + return "" +} + +func (m *ExceptionResponse) GetStackTrace() string { + if m != nil && m.StackTrace != nil { + return *m.StackTrace + } + return "" +} + +func (m *ExceptionResponse) GetHostname() string { + if m != nil && m.Hostname != nil { + return *m.Hostname + } + return "" +} + +func (m *ExceptionResponse) GetPort() int32 { + if m != nil && m.Port != nil { + return *m.Port + } + return 0 +} + +func (m *ExceptionResponse) GetDoNotRetry() bool { + if m != nil && m.DoNotRetry != nil { + return *m.DoNotRetry + } + return false +} + +// Header sent making a request. +type RequestHeader struct { + // Monotonically increasing call_id to keep track of RPC requests and their response + CallId *uint32 `protobuf:"varint,1,opt,name=call_id" json:"call_id,omitempty"` + TraceInfo *RPCTInfo `protobuf:"bytes,2,opt,name=trace_info" json:"trace_info,omitempty"` + MethodName *string `protobuf:"bytes,3,opt,name=method_name" json:"method_name,omitempty"` + // If true, then a pb Message param follows. + RequestParam *bool `protobuf:"varint,4,opt,name=request_param" json:"request_param,omitempty"` + // If present, then an encoded data block follows. + CellBlockMeta *CellBlockMeta `protobuf:"bytes,5,opt,name=cell_block_meta" json:"cell_block_meta,omitempty"` + // 0 is NORMAL priority. 200 is HIGH. If no priority, treat it as NORMAL. + // See HConstants. + Priority *uint32 `protobuf:"varint,6,opt,name=priority" json:"priority,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *RequestHeader) Reset() { *m = RequestHeader{} } +func (m *RequestHeader) String() string { return proto.CompactTextString(m) } +func (*RequestHeader) ProtoMessage() {} + +func (m *RequestHeader) GetCallId() uint32 { + if m != nil && m.CallId != nil { + return *m.CallId + } + return 0 +} + +func (m *RequestHeader) GetTraceInfo() *RPCTInfo { + if m != nil { + return m.TraceInfo + } + return nil +} + +func (m *RequestHeader) GetMethodName() string { + if m != nil && m.MethodName != nil { + return *m.MethodName + } + return "" +} + +func (m *RequestHeader) GetRequestParam() bool { + if m != nil && m.RequestParam != nil { + return *m.RequestParam + } + return false +} + +func (m *RequestHeader) GetCellBlockMeta() *CellBlockMeta { + if m != nil { + return m.CellBlockMeta + } + return nil +} + +func (m *RequestHeader) GetPriority() uint32 { + if m != nil && m.Priority != nil { + return *m.Priority + } + return 0 +} + +type ResponseHeader struct { + CallId *uint32 `protobuf:"varint,1,opt,name=call_id" json:"call_id,omitempty"` + // If present, then request threw an exception and no response message (else we presume one) + Exception *ExceptionResponse `protobuf:"bytes,2,opt,name=exception" json:"exception,omitempty"` + // If present, then an encoded data block follows. + CellBlockMeta *CellBlockMeta `protobuf:"bytes,3,opt,name=cell_block_meta" json:"cell_block_meta,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *ResponseHeader) Reset() { *m = ResponseHeader{} } +func (m *ResponseHeader) String() string { return proto.CompactTextString(m) } +func (*ResponseHeader) ProtoMessage() {} + +func (m *ResponseHeader) GetCallId() uint32 { + if m != nil && m.CallId != nil { + return *m.CallId + } + return 0 +} + +func (m *ResponseHeader) GetException() *ExceptionResponse { + if m != nil { + return m.Exception + } + return nil +} + +func (m *ResponseHeader) GetCellBlockMeta() *CellBlockMeta { + if m != nil { + return m.CellBlockMeta + } + return nil +} + +func init() { +} diff --git a/libs/gohbase/pb/RPC.proto b/libs/gohbase/pb/RPC.proto new file mode 100644 index 0000000..4e29d5b --- /dev/null +++ b/libs/gohbase/pb/RPC.proto @@ -0,0 +1,145 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +import "Tracing.proto"; +import "HBase.proto"; + +package pb; +option java_package = "org.apache.hadoop.hbase.protobuf.generated"; +option java_outer_classname = "RPCProtos"; +option java_generate_equals_and_hash = true; +option optimize_for = SPEED; + +// See https://issues.apache.org/jira/browse/HBASE-7898 for high-level +// description of RPC specification. +// +// On connection setup, the client sends six bytes of preamble -- a four +// byte magic, a byte of version, and a byte of authentication type. +// +// We then send a "ConnectionHeader" protobuf of user information and the +// 'protocol' or 'service' that is to be run over this connection as well as +// info such as codecs and compression to use when we send cell blocks(see below). +// This connection header protobuf is prefaced by an int that holds the length +// of this connection header (this is NOT a varint). The pb connection header +// is sent with Message#writeTo. The server throws an exception if it doesn't +// like what it was sent noting what it is objecting too. Otherwise, the server +// says nothing and is open for business. +// +// Hereafter the client makes requests and the server returns responses. +// +// Requests look like this: +// +// +// +// +// +// +// ...where the Request Parameter Message is whatever the method name stipulated +// in the RequestHeader expects; e.g. if the method is a scan, then the pb +// Request Message is a GetRequest, or a ScanRequest. A block of Cells +// optionally follows. The presence of a Request param Message and/or a +// block of Cells will be noted in the RequestHeader. +// +// Response is the mirror of the request: +// +// +// +// +// +// +// ...where the Response Message is the response type that goes with the +// method specified when making the request and the follow on Cell blocks may +// or may not be there -- read the response header to find out if one following. +// If an exception, it will be included inside the Response Header. +// +// Any time we write a pb, we do it with Message#writeDelimitedTo EXCEPT when +// the connection header is sent; this is prefaced by an int with its length +// and the pb connection header is then written with Message#writeTo. +// + +// User Information proto. Included in ConnectionHeader on connection setup +message UserInformation { + required string effective_user = 1; + optional string real_user = 2; +} + +// Rpc client version info proto. Included in ConnectionHeader on connection setup +message VersionInfo { + required string version = 1; + required string url = 2; + required string revision = 3; + required string user = 4; + required string date = 5; + required string src_checksum = 6; +} + +// This is sent on connection setup after the connection preamble is sent. +message ConnectionHeader { + optional UserInformation user_info = 1; + optional string service_name = 2; + // Cell block codec we will use sending over optional cell blocks. Server throws exception + // if cannot deal. Null means no codec'ing going on so we are pb all the time (SLOW!!!) + optional string cell_block_codec_class = 3; + // Compressor we will use if cell block is compressed. Server will throw exception if not supported. + // Class must implement hadoop's CompressionCodec Interface. Can't compress if no codec. + optional string cell_block_compressor_class = 4; + optional VersionInfo version_info = 5; +} + +// Optional Cell block Message. Included in client RequestHeader +message CellBlockMeta { + // Length of the following cell block. Could calculate it but convenient having it too hand. + optional uint32 length = 1; +} + +// At the RPC layer, this message is used to carry +// the server side exception to the RPC client. +message ExceptionResponse { + // Class name of the exception thrown from the server + optional string exception_class_name = 1; + // Exception stack trace from the server side + optional string stack_trace = 2; + // Optional hostname. Filled in for some exceptions such as region moved + // where exception gives clue on where the region may have moved. + optional string hostname = 3; + optional int32 port = 4; + // Set if we are NOT to retry on receipt of this exception + optional bool do_not_retry = 5; +} + +// Header sent making a request. +message RequestHeader { + // Monotonically increasing call_id to keep track of RPC requests and their response + optional uint32 call_id = 1; + optional RPCTInfo trace_info = 2; + optional string method_name = 3; + // If true, then a pb Message param follows. + optional bool request_param = 4; + // If present, then an encoded data block follows. + optional CellBlockMeta cell_block_meta = 5; + // 0 is NORMAL priority. 200 is HIGH. If no priority, treat it as NORMAL. + // See HConstants. + optional uint32 priority = 6; +} + +message ResponseHeader { + optional uint32 call_id = 1; + // If present, then request threw an exception and no response message (else we presume one) + optional ExceptionResponse exception = 2; + // If present, then an encoded data block follows. + optional CellBlockMeta cell_block_meta = 3; +} diff --git a/libs/gohbase/pb/Tracing.pb.go b/libs/gohbase/pb/Tracing.pb.go new file mode 100644 index 0000000..ed0ece7 --- /dev/null +++ b/libs/gohbase/pb/Tracing.pb.go @@ -0,0 +1,44 @@ +// Code generated by protoc-gen-go. +// source: Tracing.proto +// DO NOT EDIT! + +package pb + +import proto "github.com/golang/protobuf/proto" +import math "math" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = math.Inf + +// Used to pass through the information necessary to continue +// a trace after an RPC is made. All we need is the traceid +// (so we know the overarching trace this message is a part of), and +// the id of the current span when this message was sent, so we know +// what span caused the new span we will create when this message is received. +type RPCTInfo struct { + TraceId *int64 `protobuf:"varint,1,opt,name=trace_id" json:"trace_id,omitempty"` + ParentId *int64 `protobuf:"varint,2,opt,name=parent_id" json:"parent_id,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *RPCTInfo) Reset() { *m = RPCTInfo{} } +func (m *RPCTInfo) String() string { return proto.CompactTextString(m) } +func (*RPCTInfo) ProtoMessage() {} + +func (m *RPCTInfo) GetTraceId() int64 { + if m != nil && m.TraceId != nil { + return *m.TraceId + } + return 0 +} + +func (m *RPCTInfo) GetParentId() int64 { + if m != nil && m.ParentId != nil { + return *m.ParentId + } + return 0 +} + +func init() { +} diff --git a/libs/gohbase/pb/Tracing.proto b/libs/gohbase/pb/Tracing.proto new file mode 100644 index 0000000..92c6057 --- /dev/null +++ b/libs/gohbase/pb/Tracing.proto @@ -0,0 +1,33 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package pb; +option java_package = "org.apache.hadoop.hbase.protobuf.generated"; +option java_outer_classname = "TracingProtos"; +option java_generate_equals_and_hash = true; +option optimize_for = SPEED; + +//Used to pass through the information necessary to continue +//a trace after an RPC is made. All we need is the traceid +//(so we know the overarching trace this message is a part of), and +//the id of the current span when this message was sent, so we know +//what span caused the new span we will create when this message is received. +message RPCTInfo { + optional int64 trace_id = 1; + optional int64 parent_id = 2; +} diff --git a/libs/gohbase/pb/ZooKeeper.pb.go b/libs/gohbase/pb/ZooKeeper.pb.go new file mode 100644 index 0000000..c1c8597 --- /dev/null +++ b/libs/gohbase/pb/ZooKeeper.pb.go @@ -0,0 +1,536 @@ +// Code generated by protoc-gen-go. +// source: ZooKeeper.proto +// DO NOT EDIT! + +package pb + +import proto "github.com/golang/protobuf/proto" +import math "math" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = math.Inf + +type SplitLogTask_State int32 + +const ( + SplitLogTask_UNASSIGNED SplitLogTask_State = 0 + SplitLogTask_OWNED SplitLogTask_State = 1 + SplitLogTask_RESIGNED SplitLogTask_State = 2 + SplitLogTask_DONE SplitLogTask_State = 3 + SplitLogTask_ERR SplitLogTask_State = 4 +) + +var SplitLogTask_State_name = map[int32]string{ + 0: "UNASSIGNED", + 1: "OWNED", + 2: "RESIGNED", + 3: "DONE", + 4: "ERR", +} +var SplitLogTask_State_value = map[string]int32{ + "UNASSIGNED": 0, + "OWNED": 1, + "RESIGNED": 2, + "DONE": 3, + "ERR": 4, +} + +func (x SplitLogTask_State) Enum() *SplitLogTask_State { + p := new(SplitLogTask_State) + *p = x + return p +} +func (x SplitLogTask_State) String() string { + return proto.EnumName(SplitLogTask_State_name, int32(x)) +} +func (x *SplitLogTask_State) UnmarshalJSON(data []byte) error { + value, err := proto.UnmarshalJSONEnum(SplitLogTask_State_value, data, "SplitLogTask_State") + if err != nil { + return err + } + *x = SplitLogTask_State(value) + return nil +} + +type SplitLogTask_RecoveryMode int32 + +const ( + SplitLogTask_UNKNOWN SplitLogTask_RecoveryMode = 0 + SplitLogTask_LOG_SPLITTING SplitLogTask_RecoveryMode = 1 + SplitLogTask_LOG_REPLAY SplitLogTask_RecoveryMode = 2 +) + +var SplitLogTask_RecoveryMode_name = map[int32]string{ + 0: "UNKNOWN", + 1: "LOG_SPLITTING", + 2: "LOG_REPLAY", +} +var SplitLogTask_RecoveryMode_value = map[string]int32{ + "UNKNOWN": 0, + "LOG_SPLITTING": 1, + "LOG_REPLAY": 2, +} + +func (x SplitLogTask_RecoveryMode) Enum() *SplitLogTask_RecoveryMode { + p := new(SplitLogTask_RecoveryMode) + *p = x + return p +} +func (x SplitLogTask_RecoveryMode) String() string { + return proto.EnumName(SplitLogTask_RecoveryMode_name, int32(x)) +} +func (x *SplitLogTask_RecoveryMode) UnmarshalJSON(data []byte) error { + value, err := proto.UnmarshalJSONEnum(SplitLogTask_RecoveryMode_value, data, "SplitLogTask_RecoveryMode") + if err != nil { + return err + } + *x = SplitLogTask_RecoveryMode(value) + return nil +} + +// Table's current state +type Table_State int32 + +const ( + Table_ENABLED Table_State = 0 + Table_DISABLED Table_State = 1 + Table_DISABLING Table_State = 2 + Table_ENABLING Table_State = 3 +) + +var Table_State_name = map[int32]string{ + 0: "ENABLED", + 1: "DISABLED", + 2: "DISABLING", + 3: "ENABLING", +} +var Table_State_value = map[string]int32{ + "ENABLED": 0, + "DISABLED": 1, + "DISABLING": 2, + "ENABLING": 3, +} + +func (x Table_State) Enum() *Table_State { + p := new(Table_State) + *p = x + return p +} +func (x Table_State) String() string { + return proto.EnumName(Table_State_name, int32(x)) +} +func (x *Table_State) UnmarshalJSON(data []byte) error { + value, err := proto.UnmarshalJSONEnum(Table_State_value, data, "Table_State") + if err != nil { + return err + } + *x = Table_State(value) + return nil +} + +type ReplicationState_State int32 + +const ( + ReplicationState_ENABLED ReplicationState_State = 0 + ReplicationState_DISABLED ReplicationState_State = 1 +) + +var ReplicationState_State_name = map[int32]string{ + 0: "ENABLED", + 1: "DISABLED", +} +var ReplicationState_State_value = map[string]int32{ + "ENABLED": 0, + "DISABLED": 1, +} + +func (x ReplicationState_State) Enum() *ReplicationState_State { + p := new(ReplicationState_State) + *p = x + return p +} +func (x ReplicationState_State) String() string { + return proto.EnumName(ReplicationState_State_name, int32(x)) +} +func (x *ReplicationState_State) UnmarshalJSON(data []byte) error { + value, err := proto.UnmarshalJSONEnum(ReplicationState_State_value, data, "ReplicationState_State") + if err != nil { + return err + } + *x = ReplicationState_State(value) + return nil +} + +// * +// Content of the meta-region-server znode. +type MetaRegionServer struct { + // The ServerName hosting the meta region currently, or destination server, + // if meta region is in transition. + Server *ServerName `protobuf:"bytes,1,req,name=server" json:"server,omitempty"` + // The major version of the rpc the server speaks. This is used so that + // clients connecting to the cluster can have prior knowledge of what version + // to send to a RegionServer. AsyncHBase will use this to detect versions. + RpcVersion *uint32 `protobuf:"varint,2,opt,name=rpc_version" json:"rpc_version,omitempty"` + // State of the region transition. OPEN means fully operational 'hbase:meta' + State *RegionState_State `protobuf:"varint,3,opt,name=state,enum=pb.RegionState_State" json:"state,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *MetaRegionServer) Reset() { *m = MetaRegionServer{} } +func (m *MetaRegionServer) String() string { return proto.CompactTextString(m) } +func (*MetaRegionServer) ProtoMessage() {} + +func (m *MetaRegionServer) GetServer() *ServerName { + if m != nil { + return m.Server + } + return nil +} + +func (m *MetaRegionServer) GetRpcVersion() uint32 { + if m != nil && m.RpcVersion != nil { + return *m.RpcVersion + } + return 0 +} + +func (m *MetaRegionServer) GetState() RegionState_State { + if m != nil && m.State != nil { + return *m.State + } + return RegionState_OFFLINE +} + +// * +// Content of the master znode. +type Master struct { + // The ServerName of the current Master + Master *ServerName `protobuf:"bytes,1,req,name=master" json:"master,omitempty"` + // Major RPC version so that clients can know what version the master can accept. + RpcVersion *uint32 `protobuf:"varint,2,opt,name=rpc_version" json:"rpc_version,omitempty"` + InfoPort *uint32 `protobuf:"varint,3,opt,name=info_port" json:"info_port,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *Master) Reset() { *m = Master{} } +func (m *Master) String() string { return proto.CompactTextString(m) } +func (*Master) ProtoMessage() {} + +func (m *Master) GetMaster() *ServerName { + if m != nil { + return m.Master + } + return nil +} + +func (m *Master) GetRpcVersion() uint32 { + if m != nil && m.RpcVersion != nil { + return *m.RpcVersion + } + return 0 +} + +func (m *Master) GetInfoPort() uint32 { + if m != nil && m.InfoPort != nil { + return *m.InfoPort + } + return 0 +} + +// * +// Content of the '/hbase/running', cluster state, znode. +type ClusterUp struct { + // If this znode is present, cluster is up. Currently + // the data is cluster start_date. + StartDate *string `protobuf:"bytes,1,req,name=start_date" json:"start_date,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *ClusterUp) Reset() { *m = ClusterUp{} } +func (m *ClusterUp) String() string { return proto.CompactTextString(m) } +func (*ClusterUp) ProtoMessage() {} + +func (m *ClusterUp) GetStartDate() string { + if m != nil && m.StartDate != nil { + return *m.StartDate + } + return "" +} + +// * +// What we write under unassigned up in zookeeper as a region moves through +// open/close, etc., regions. Details a region in transition. +type RegionTransition struct { + // Code for EventType gotten by doing o.a.h.h.EventHandler.EventType.getCode() + EventTypeCode *uint32 `protobuf:"varint,1,req,name=event_type_code" json:"event_type_code,omitempty"` + // Full regionname in bytes + RegionName []byte `protobuf:"bytes,2,req,name=region_name" json:"region_name,omitempty"` + CreateTime *uint64 `protobuf:"varint,3,req,name=create_time" json:"create_time,omitempty"` + // The region server where the transition will happen or is happening + ServerName *ServerName `protobuf:"bytes,4,req,name=server_name" json:"server_name,omitempty"` + Payload []byte `protobuf:"bytes,5,opt,name=payload" json:"payload,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *RegionTransition) Reset() { *m = RegionTransition{} } +func (m *RegionTransition) String() string { return proto.CompactTextString(m) } +func (*RegionTransition) ProtoMessage() {} + +func (m *RegionTransition) GetEventTypeCode() uint32 { + if m != nil && m.EventTypeCode != nil { + return *m.EventTypeCode + } + return 0 +} + +func (m *RegionTransition) GetRegionName() []byte { + if m != nil { + return m.RegionName + } + return nil +} + +func (m *RegionTransition) GetCreateTime() uint64 { + if m != nil && m.CreateTime != nil { + return *m.CreateTime + } + return 0 +} + +func (m *RegionTransition) GetServerName() *ServerName { + if m != nil { + return m.ServerName + } + return nil +} + +func (m *RegionTransition) GetPayload() []byte { + if m != nil { + return m.Payload + } + return nil +} + +// * +// WAL SplitLog directory znodes have this for content. Used doing distributed +// WAL splitting. Holds current state and name of server that originated split. +type SplitLogTask struct { + State *SplitLogTask_State `protobuf:"varint,1,req,name=state,enum=pb.SplitLogTask_State" json:"state,omitempty"` + ServerName *ServerName `protobuf:"bytes,2,req,name=server_name" json:"server_name,omitempty"` + Mode *SplitLogTask_RecoveryMode `protobuf:"varint,3,opt,name=mode,enum=pb.SplitLogTask_RecoveryMode,def=0" json:"mode,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *SplitLogTask) Reset() { *m = SplitLogTask{} } +func (m *SplitLogTask) String() string { return proto.CompactTextString(m) } +func (*SplitLogTask) ProtoMessage() {} + +const Default_SplitLogTask_Mode SplitLogTask_RecoveryMode = SplitLogTask_UNKNOWN + +func (m *SplitLogTask) GetState() SplitLogTask_State { + if m != nil && m.State != nil { + return *m.State + } + return SplitLogTask_UNASSIGNED +} + +func (m *SplitLogTask) GetServerName() *ServerName { + if m != nil { + return m.ServerName + } + return nil +} + +func (m *SplitLogTask) GetMode() SplitLogTask_RecoveryMode { + if m != nil && m.Mode != nil { + return *m.Mode + } + return Default_SplitLogTask_Mode +} + +// * +// The znode that holds state of table. +type Table struct { + // This is the table's state. If no znode for a table, + // its state is presumed enabled. See o.a.h.h.zookeeper.ZKTable class + // for more. + State *Table_State `protobuf:"varint,1,req,name=state,enum=pb.Table_State,def=0" json:"state,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *Table) Reset() { *m = Table{} } +func (m *Table) String() string { return proto.CompactTextString(m) } +func (*Table) ProtoMessage() {} + +const Default_Table_State Table_State = Table_ENABLED + +func (m *Table) GetState() Table_State { + if m != nil && m.State != nil { + return *m.State + } + return Default_Table_State +} + +// * +// Used by replication. Holds a replication peer key. +type ReplicationPeer struct { + // clusterkey is the concatenation of the slave cluster's + // hbase.zookeeper.quorum:hbase.zookeeper.property.clientPort:zookeeper.znode.parent + Clusterkey *string `protobuf:"bytes,1,req,name=clusterkey" json:"clusterkey,omitempty"` + ReplicationEndpointImpl *string `protobuf:"bytes,2,opt,name=replicationEndpointImpl" json:"replicationEndpointImpl,omitempty"` + Data []*BytesBytesPair `protobuf:"bytes,3,rep,name=data" json:"data,omitempty"` + Configuration []*NameStringPair `protobuf:"bytes,4,rep,name=configuration" json:"configuration,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *ReplicationPeer) Reset() { *m = ReplicationPeer{} } +func (m *ReplicationPeer) String() string { return proto.CompactTextString(m) } +func (*ReplicationPeer) ProtoMessage() {} + +func (m *ReplicationPeer) GetClusterkey() string { + if m != nil && m.Clusterkey != nil { + return *m.Clusterkey + } + return "" +} + +func (m *ReplicationPeer) GetReplicationEndpointImpl() string { + if m != nil && m.ReplicationEndpointImpl != nil { + return *m.ReplicationEndpointImpl + } + return "" +} + +func (m *ReplicationPeer) GetData() []*BytesBytesPair { + if m != nil { + return m.Data + } + return nil +} + +func (m *ReplicationPeer) GetConfiguration() []*NameStringPair { + if m != nil { + return m.Configuration + } + return nil +} + +// * +// Used by replication. Holds whether enabled or disabled +type ReplicationState struct { + State *ReplicationState_State `protobuf:"varint,1,req,name=state,enum=pb.ReplicationState_State" json:"state,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *ReplicationState) Reset() { *m = ReplicationState{} } +func (m *ReplicationState) String() string { return proto.CompactTextString(m) } +func (*ReplicationState) ProtoMessage() {} + +func (m *ReplicationState) GetState() ReplicationState_State { + if m != nil && m.State != nil { + return *m.State + } + return ReplicationState_ENABLED +} + +// * +// Used by replication. Holds the current position in an WAL file. +type ReplicationHLogPosition struct { + Position *int64 `protobuf:"varint,1,req,name=position" json:"position,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *ReplicationHLogPosition) Reset() { *m = ReplicationHLogPosition{} } +func (m *ReplicationHLogPosition) String() string { return proto.CompactTextString(m) } +func (*ReplicationHLogPosition) ProtoMessage() {} + +func (m *ReplicationHLogPosition) GetPosition() int64 { + if m != nil && m.Position != nil { + return *m.Position + } + return 0 +} + +// * +// Used by replication. Used to lock a region server during failover. +type ReplicationLock struct { + LockOwner *string `protobuf:"bytes,1,req,name=lock_owner" json:"lock_owner,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *ReplicationLock) Reset() { *m = ReplicationLock{} } +func (m *ReplicationLock) String() string { return proto.CompactTextString(m) } +func (*ReplicationLock) ProtoMessage() {} + +func (m *ReplicationLock) GetLockOwner() string { + if m != nil && m.LockOwner != nil { + return *m.LockOwner + } + return "" +} + +// * +// Metadata associated with a table lock in zookeeper +type TableLock struct { + TableName *TableName `protobuf:"bytes,1,opt,name=table_name" json:"table_name,omitempty"` + LockOwner *ServerName `protobuf:"bytes,2,opt,name=lock_owner" json:"lock_owner,omitempty"` + ThreadId *int64 `protobuf:"varint,3,opt,name=thread_id" json:"thread_id,omitempty"` + IsShared *bool `protobuf:"varint,4,opt,name=is_shared" json:"is_shared,omitempty"` + Purpose *string `protobuf:"bytes,5,opt,name=purpose" json:"purpose,omitempty"` + CreateTime *int64 `protobuf:"varint,6,opt,name=create_time" json:"create_time,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *TableLock) Reset() { *m = TableLock{} } +func (m *TableLock) String() string { return proto.CompactTextString(m) } +func (*TableLock) ProtoMessage() {} + +func (m *TableLock) GetTableName() *TableName { + if m != nil { + return m.TableName + } + return nil +} + +func (m *TableLock) GetLockOwner() *ServerName { + if m != nil { + return m.LockOwner + } + return nil +} + +func (m *TableLock) GetThreadId() int64 { + if m != nil && m.ThreadId != nil { + return *m.ThreadId + } + return 0 +} + +func (m *TableLock) GetIsShared() bool { + if m != nil && m.IsShared != nil { + return *m.IsShared + } + return false +} + +func (m *TableLock) GetPurpose() string { + if m != nil && m.Purpose != nil { + return *m.Purpose + } + return "" +} + +func (m *TableLock) GetCreateTime() int64 { + if m != nil && m.CreateTime != nil { + return *m.CreateTime + } + return 0 +} + +func init() { + proto.RegisterEnum("pb.SplitLogTask_State", SplitLogTask_State_name, SplitLogTask_State_value) + proto.RegisterEnum("pb.SplitLogTask_RecoveryMode", SplitLogTask_RecoveryMode_name, SplitLogTask_RecoveryMode_value) + proto.RegisterEnum("pb.Table_State", Table_State_name, Table_State_value) + proto.RegisterEnum("pb.ReplicationState_State", ReplicationState_State_name, ReplicationState_State_value) +} diff --git a/libs/gohbase/pb/ZooKeeper.proto b/libs/gohbase/pb/ZooKeeper.proto new file mode 100644 index 0000000..f5ef833 --- /dev/null +++ b/libs/gohbase/pb/ZooKeeper.proto @@ -0,0 +1,169 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +// ZNode data in hbase are serialized protobufs with a four byte +// 'magic' 'PBUF' prefix. + +package pb; +option java_package = "org.apache.hadoop.hbase.protobuf.generated"; +option java_outer_classname = "ZooKeeperProtos"; +option java_generic_services = true; +option java_generate_equals_and_hash = true; +option optimize_for = SPEED; + +import "HBase.proto"; +import "ClusterStatus.proto"; + +/** + * Content of the meta-region-server znode. + */ +message MetaRegionServer { + // The ServerName hosting the meta region currently, or destination server, + // if meta region is in transition. + required ServerName server = 1; + // The major version of the rpc the server speaks. This is used so that + // clients connecting to the cluster can have prior knowledge of what version + // to send to a RegionServer. AsyncHBase will use this to detect versions. + optional uint32 rpc_version = 2; + + // State of the region transition. OPEN means fully operational 'hbase:meta' + optional RegionState.State state = 3; +} + +/** + * Content of the master znode. + */ +message Master { + // The ServerName of the current Master + required ServerName master = 1; + // Major RPC version so that clients can know what version the master can accept. + optional uint32 rpc_version = 2; + optional uint32 info_port = 3; +} + +/** + * Content of the '/hbase/running', cluster state, znode. + */ +message ClusterUp { + // If this znode is present, cluster is up. Currently + // the data is cluster start_date. + required string start_date = 1; +} + +/** + * What we write under unassigned up in zookeeper as a region moves through + * open/close, etc., regions. Details a region in transition. + */ +message RegionTransition { + // Code for EventType gotten by doing o.a.h.h.EventHandler.EventType.getCode() + required uint32 event_type_code = 1; + // Full regionname in bytes + required bytes region_name = 2; + required uint64 create_time = 3; + // The region server where the transition will happen or is happening + required ServerName server_name = 4; + optional bytes payload = 5; +} + +/** + * WAL SplitLog directory znodes have this for content. Used doing distributed + * WAL splitting. Holds current state and name of server that originated split. + */ +message SplitLogTask { + enum State { + UNASSIGNED = 0; + OWNED = 1; + RESIGNED = 2; + DONE = 3; + ERR = 4; + } + enum RecoveryMode { + UNKNOWN = 0; + LOG_SPLITTING = 1; + LOG_REPLAY = 2; + } + required State state = 1; + required ServerName server_name = 2; + optional RecoveryMode mode = 3 [default = UNKNOWN]; +} + +/** + * The znode that holds state of table. + */ +message Table { + // Table's current state + enum State { + ENABLED = 0; + DISABLED = 1; + DISABLING = 2; + ENABLING = 3; + } + // This is the table's state. If no znode for a table, + // its state is presumed enabled. See o.a.h.h.zookeeper.ZKTable class + // for more. + required State state = 1 [default = ENABLED]; +} + +/** + * Used by replication. Holds a replication peer key. + */ +message ReplicationPeer { + // clusterkey is the concatenation of the slave cluster's + // hbase.zookeeper.quorum:hbase.zookeeper.property.clientPort:zookeeper.znode.parent + required string clusterkey = 1; + optional string replicationEndpointImpl = 2; + repeated BytesBytesPair data = 3; + repeated NameStringPair configuration = 4; +} + +/** + * Used by replication. Holds whether enabled or disabled + */ +message ReplicationState { + enum State { + ENABLED = 0; + DISABLED = 1; + } + required State state = 1; +} + +/** + * Used by replication. Holds the current position in an WAL file. + */ +message ReplicationHLogPosition { + required int64 position = 1; +} + +/** + * Used by replication. Used to lock a region server during failover. + */ +message ReplicationLock { + required string lock_owner = 1; +} + +/** + * Metadata associated with a table lock in zookeeper + */ +message TableLock { + optional TableName table_name = 1; + optional ServerName lock_owner = 2; + optional int64 thread_id = 3; + optional bool is_shared = 4; + optional string purpose = 5; + optional int64 create_time = 6; +} diff --git a/libs/gohbase/pb/generate.go b/libs/gohbase/pb/generate.go new file mode 100644 index 0000000..11411e7 --- /dev/null +++ b/libs/gohbase/pb/generate.go @@ -0,0 +1,8 @@ +// Copyright (C) 2015 The GoHBase Authors. All rights reserved. +// This file is part of GoHBase. +// Use of this source code is governed by the Apache License 2.0 +// that can be found in the COPYING file. + +package pb + +//go:generate sh -c "protoc --go_out=. *.proto" diff --git a/libs/gohbase/pb/marshal.go b/libs/gohbase/pb/marshal.go new file mode 100644 index 0000000..129f28d --- /dev/null +++ b/libs/gohbase/pb/marshal.go @@ -0,0 +1,20 @@ +// Copyright (C) 2015 The GoHBase Authors. All rights reserved. +// This file is part of GoHBase. +// Use of this source code is governed by the Apache License 2.0 +// that can be found in the COPYING file. + +package pb + +import ( + "github.com/golang/protobuf/proto" +) + +// MustMarshal is like proto.Marshal except it panic()'s if the protobuf +// couldn't be serialized. +func MustMarshal(pb proto.Message) []byte { + b, err := proto.Marshal(pb) + if err != nil { + panic(err) + } + return b +} diff --git a/libs/gohbase/region/client.go b/libs/gohbase/region/client.go new file mode 100644 index 0000000..8d44dcf --- /dev/null +++ b/libs/gohbase/region/client.go @@ -0,0 +1,466 @@ +// Copyright (C) 2015 The GoHBase Authors. All rights reserved. +// This file is part of GoHBase. +// Use of this source code is governed by the Apache License 2.0 +// that can be found in the COPYING file. + +package region + +import ( + "encoding/binary" + "errors" + "fmt" + "net" + "sync" + "time" + + // log "golang/log4go" + log "github.com/golang/glog" + + "bfs/libs/gohbase/hrpc" + "bfs/libs/gohbase/pb" + + "github.com/golang/protobuf/proto" +) + +// ClientType is a type alias to represent the type of this region client +type ClientType string + +var ( + // ErrShortWrite is used when the writer thread only succeeds in writing + // part of its buffer to the socket, and not all of the buffer was sent + ErrShortWrite = errors.New("short write occurred while writing to socket") + + // ErrMissingCallID is used when HBase sends us a response message for a + // request that we didn't send + ErrMissingCallID = errors.New("HBase responded to a nonsensical call ID") + + // javaRetryableExceptions is a map where all Java exceptions that signify + // the RPC should be sent again are listed (as keys). If a Java exception + // listed here is returned by HBase, the client should attempt to resend + // the RPC message, potentially via a different region client. + javaRetryableExceptions = map[string]struct{}{ + "org.apache.hadoop.hbase.NotServingRegionException": struct{}{}, + "org.apache.hadoop.hbase.exceptions.RegionMovedException": struct{}{}, + "org.apache.hadoop.hbase.exceptions.RegionOpeningException": struct{}{}, + } +) + +const ( + // RegionClient is a ClientType that means this will be a normal client + RegionClient = ClientType("ClientService") + + // MasterClient is a ClientType that means this client will talk to the + // master server + MasterClient = ClientType("MasterService") +) + +// UnrecoverableError is an error that this region.Client can't recover from. +// The connection to the RegionServer has to be closed and all queued and +// outstanding RPCs will be failed / retried. +type UnrecoverableError struct { + error +} + +func (e UnrecoverableError) Error() string { + return e.error.Error() +} + +// RetryableError is an error that indicates the RPC should be retried because +// the error is transient (e.g. a region being momentarily unavailable). +type RetryableError struct { + error +} + +func (e RetryableError) Error() string { + return e.error.Error() +} + +// Client manages a connection to a RegionServer. +type Client struct { + id uint32 + + conn net.Conn + + // Hostname or IP address of the RegionServer. + host string + + // Port of the RegionServer. + port uint16 + + // writeMutex is used to prevent multiple threads from writing to the + // socket at the same time. + writeMutex *sync.Mutex + + // sendErr is set once a write fails. + sendErr error + sendErrLock sync.Mutex + + rpcs []hrpc.Call + + // Once the rpcs list has grown to a large enough size, this channel is + // written to to notify the writer thread that it should stop sleeping and + // process the list + process chan struct{} + + // sentRPCs contains the mapping of sent call IDs to RPC calls, so that when + // a response is received it can be tied to the correct RPC + sentRPCs map[uint32]hrpc.Call + sentRPCsMutex *sync.Mutex + + // <= 0 means only wait for timeout(can not be used combined with <=0 flushInterval); = 1 means flush for every request + rpcQueueSize int + // <= 0 means no flush-timeout + flushInterval time.Duration +} + +// NewClient creates a new RegionClient. +func NewClient(host string, port uint16, ctype ClientType, + queueSize int, flushInterval, dialTimeout time.Duration) (*Client, error) { + addr := fmt.Sprintf("%s:%d", host, port) + // Read/Write Timeout is not needed as actually no direct wait-on-io will happen. + // Non-blocking RPC call is ensured by usage of Context + var ( + conn net.Conn + err error + ) + if int64(dialTimeout) > 0 { + conn, err = net.DialTimeout("tcp", addr, dialTimeout) + } else { + conn, err = net.Dial("tcp", addr) + } + if err != nil { + return nil, + fmt.Errorf("failed to connect to the RegionServer at %s: %s", addr, err) + } + c := &Client{ + conn: conn, + host: host, + port: port, + writeMutex: &sync.Mutex{}, + process: make(chan struct{}), + sentRPCsMutex: &sync.Mutex{}, + sentRPCs: make(map[uint32]hrpc.Call), + rpcQueueSize: queueSize, + flushInterval: flushInterval, + } + err = c.sendHello(ctype) + if err != nil { + return nil, err + } + go c.processRpcs() // Writer goroutine + go c.receiveRpcs() // Reader goroutine + return c, nil +} + +// Close asks this region.Client to close its connection to the RegionServer. +// All queued and outstanding RPCs, if any, will be failed as if a connection +// error had happened. +func (c *Client) Close() { + c.setSendErr(errors.New("shutting down")) + c.errorEncountered() +} + +// Host returns the host that this client talks to +func (c *Client) Host() string { + return c.host +} + +// Port returns the port that this client talks over +func (c *Client) Port() uint16 { + return c.port +} + +func (c *Client) GetSendErr() error { + c.sendErrLock.Lock() + err := c.sendErr + c.sendErrLock.Unlock() + return err +} + +func (c *Client) setSendErr(err error) { + c.sendErrLock.Lock() + c.sendErr = err + c.sendErrLock.Unlock() +} + +func (c *Client) processRpcs() { + for { + if c.GetSendErr() != nil { + return + } + + if c.flushInterval > 0 { + select { + case <-time.After(c.flushInterval): + select { + case <-c.process: + // If we got a message on c.process at the same time as our + // timeout elapsed, we'll non-deterministically land in either + // cases of this outer select. Here we double-check whether + // something was written onto c.process, in which case we don't + // grab the lock (see comment below in the other case). + default: + c.writeMutex.Lock() + } + case <-c.process: + // We don't acquire the lock here, because the thread that sent + // something on the process channel will have locked the mutex, + // and will not release it so as to transfer ownership + } + } else { + <-c.process + } + + rpcs := make([]hrpc.Call, len(c.rpcs)) + for i, rpc := range c.rpcs { + rpcs[i] = rpc + } + c.rpcs = nil + c.writeMutex.Unlock() + + for i, rpc := range rpcs { + // If the deadline has been exceeded, don't bother sending the + // request. The function that placed the RPC in our queue should + // stop waiting for a result and return an error. + select { + case _, ok := <-rpc.GetContext().Done(): + if !ok { + continue + } + default: + } + + err := c.sendRPC(rpc) + if err != nil { + _, ok := err.(UnrecoverableError) + if ok { + c.setSendErr(err) + + c.writeMutex.Lock() + c.rpcs = append(c.rpcs, rpcs[i:]...) + c.writeMutex.Unlock() + + c.errorEncountered() + return + } + rpc.GetResultChan() <- hrpc.RPCResult{Error: err} + } + } + } +} + +func (c *Client) receiveRpcs() { + var sz [4]byte + for { + err := c.readFully(sz[:]) + if err != nil { + c.setSendErr(err) + c.errorEncountered() + return + } + + buf := make([]byte, binary.BigEndian.Uint32(sz[:])) + err = c.readFully(buf) + if err != nil { + c.setSendErr(err) + c.errorEncountered() + return + } + + resp := &pb.ResponseHeader{} + respLen, nb := proto.DecodeVarint(buf) + buf = buf[nb:] + err = proto.UnmarshalMerge(buf[:respLen], resp) + buf = buf[respLen:] + if err != nil { + // Failed to deserialize the response header + c.setSendErr(err) + c.errorEncountered() + return + } + if resp.CallId == nil { + // Response doesn't have a call ID + log.Error("Response doesn't have a call ID!") + c.setSendErr(ErrMissingCallID) + c.errorEncountered() + return + } + + c.sentRPCsMutex.Lock() + rpc, ok := c.sentRPCs[*resp.CallId] + c.sentRPCsMutex.Unlock() + + if !ok { + log.Error("Received a response with an unexpected call ID: %d", *resp.CallId) + c.sentRPCsMutex.Lock() + for id, call := range c.sentRPCs { + log.Error("\t\t%d: %v", id, call) + } + c.sentRPCsMutex.Unlock() + + c.setSendErr(fmt.Errorf("HBase sent a response with an unexpected call ID: %d", + resp.CallId)) + c.errorEncountered() + return + } + + var rpcResp proto.Message + if resp.Exception == nil { + respLen, nb = proto.DecodeVarint(buf) + buf = buf[nb:] + rpcResp = rpc.NewResponse() + err = proto.UnmarshalMerge(buf, rpcResp) + buf = buf[respLen:] + } else { + javaClass := *resp.Exception.ExceptionClassName + err = fmt.Errorf("HBase Java exception %s: \n%s", javaClass, + *resp.Exception.StackTrace) + if _, ok := javaRetryableExceptions[javaClass]; ok { + // This is a recoverable error. The client should retry. + err = RetryableError{err} + } + } + rpc.GetResultChan() <- hrpc.RPCResult{Msg: rpcResp, Error: err} + + c.sentRPCsMutex.Lock() + delete(c.sentRPCs, *resp.CallId) + c.sentRPCsMutex.Unlock() + } +} + +func (c *Client) errorEncountered() { + c.writeMutex.Lock() + res := hrpc.RPCResult{Error: UnrecoverableError{c.GetSendErr()}} + for _, rpc := range c.rpcs { + rpc.GetResultChan() <- res + } + c.rpcs = nil + c.writeMutex.Unlock() + + c.sentRPCsMutex.Lock() + for _, rpc := range c.sentRPCs { + rpc.GetResultChan() <- res + } + c.sentRPCs = nil + c.sentRPCsMutex.Unlock() + + c.conn.Close() +} + +// Sends the given buffer to the RegionServer. +func (c *Client) write(buf []byte) error { + n, err := c.conn.Write(buf) + + if err != nil { + // There was an error while writing + return err + } + if n != len(buf) { + // We failed to write the entire buffer + // according to io.Writer interface, this case should not happen + return ErrShortWrite + } + return nil +} + +// Tries to read enough data to fully fill up the given buffer. +func (c *Client) readFully(buf []byte) error { + var err error + for read, total := 0, 0; total < len(buf); total += read { + // according to io.Reader interface, n may be less than len(buf) while err is nil + read, err = c.conn.Read(buf[total:]) + if err != nil { + // conn error is considered as unrecoverable error + return UnrecoverableError{fmt.Errorf("Failed to read from the RS: %s", err)} + } else if read == 0 { + return fmt.Errorf("Failed to readFully from RS: expect %d but got %d.", + len(buf), total) + } + } + return nil +} + +// Sends the "hello" message needed when opening a new connection. +func (c *Client) sendHello(ctype ClientType) error { + connHeader := &pb.ConnectionHeader{ + UserInfo: &pb.UserInformation{ + EffectiveUser: proto.String("gopher"), + }, + ServiceName: proto.String(string(ctype)), + //CellBlockCodecClass: "org.apache.hadoop.hbase.codec.KeyValueCodec", + } + data, err := proto.Marshal(connHeader) + if err != nil { + return fmt.Errorf("failed to marshal connection header: %s", err) + } + + const header = "HBas\x00\x50" // \x50 = Simple Auth. + buf := make([]byte, 0, len(header)+4+len(data)) + buf = append(buf, header...) + buf = buf[:len(header)+4] + binary.BigEndian.PutUint32(buf[6:], uint32(len(data))) + buf = append(buf, data...) + + return c.write(buf) +} + +// QueueRPC will add an rpc call to the queue for processing by the writer +// goroutine +func (c *Client) QueueRPC(rpc hrpc.Call) error { + sendErr := c.GetSendErr() + if sendErr != nil { + return sendErr + } + c.writeMutex.Lock() + c.rpcs = append(c.rpcs, rpc) + // < 0 means only flush when timeout; 0 means flush each time + if c.rpcQueueSize > 0 && len(c.rpcs) >= c.rpcQueueSize { + c.process <- struct{}{} + // We don't release the lock here, because we want to transfer ownership + // of the lock to the goroutine that processes the RPCs + } else { + c.writeMutex.Unlock() + } + return nil +} + +// sendRPC sends an RPC out to the wire. +// Returns the response (for now, as the call is synchronous). +func (c *Client) sendRPC(rpc hrpc.Call) error { + // Header. + c.id++ + reqheader := &pb.RequestHeader{ + CallId: &c.id, + MethodName: proto.String(rpc.GetName()), + RequestParam: proto.Bool(true), + } + + payload, err := rpc.Serialize() + if err != nil { + return fmt.Errorf("Failed to serialize RPC: %s", err) + } + payloadLen := proto.EncodeVarint(uint64(len(payload))) + + headerData, err := proto.Marshal(reqheader) + if err != nil { + return fmt.Errorf("Failed to marshal Get request: %s", err) + } + + buf := make([]byte, 5, 4+1+len(headerData)+len(payloadLen)+len(payload)) + binary.BigEndian.PutUint32(buf, uint32(cap(buf)-4)) + buf[4] = byte(len(headerData)) + buf = append(buf, headerData...) + buf = append(buf, payloadLen...) + buf = append(buf, payload...) + + c.sentRPCsMutex.Lock() + c.sentRPCs[c.id] = rpc + c.sentRPCsMutex.Unlock() + + err = c.write(buf) + if err != nil { + return UnrecoverableError{err} + } + + return nil +} diff --git a/libs/gohbase/region/client_test.go b/libs/gohbase/region/client_test.go new file mode 100644 index 0000000..2b301ed --- /dev/null +++ b/libs/gohbase/region/client_test.go @@ -0,0 +1,18 @@ +// Copyright (C) 2015 The GoHBase Authors. All rights reserved. +// This file is part of GoHBase. +// Use of this source code is governed by the Apache License 2.0 +// that can be found in the COPYING file. + +package region + +import ( + "fmt" + "testing" +) + +func TestErrors(t *testing.T) { + ue := UnrecoverableError{fmt.Errorf("oops")} + if ue.Error() != "oops" { + t.Errorf("Wrong error message. Got %q, wanted %q", ue, "oops") + } +} diff --git a/libs/gohbase/regioninfo/info.go b/libs/gohbase/regioninfo/info.go new file mode 100644 index 0000000..5cdacf9 --- /dev/null +++ b/libs/gohbase/regioninfo/info.go @@ -0,0 +1,255 @@ +// Copyright (C) 2015 The GoHBase Authors. All rights reserved. +// This file is part of GoHBase. +// Use of this source code is governed by the Apache License 2.0 +// that can be found in the COPYING file. + +// Package regioninfo contains data structures to represent HBase regions. +package regioninfo + +import ( + "encoding/binary" + "fmt" + "sync" + + "bfs/libs/gohbase/pb" + "bytes" + "time" + + log "github.com/golang/glog" + + "github.com/golang/protobuf/proto" +) + +const minEstablishInterval = int64(time.Duration(500 * time.Millisecond)) + +// Info describes a region. +type Info struct { + // Table name. + Table []byte + + // RegionName. + RegionName []byte + + // StartKey + StartKey []byte + + // StopKey. + StopKey []byte + + // Once a region becomes unreachable, this channel is created, and any + // functions that wish to be notified when the region becomes available + // again can read from this channel, which will be closed when the region + // is available again + available chan struct{} + availableLock sync.Mutex + + lastUnAvailable int64 + curUnAvailable int64 +} + +// InfoFromCell parses a KeyValue from the meta table and creates the +// corresponding Info object. +func InfoFromCell(cell *pb.Cell) (*Info, error) { + value := cell.Value + if len(value) == 0 { + return nil, fmt.Errorf("empty value in %q", cell) + } else if value[0] != 'P' { + return nil, fmt.Errorf("unsupported region info version %d in %q", + value[0], cell) + } + const pbufMagic = 1346524486 // 4 bytes: "PBUF" + magic := binary.BigEndian.Uint32(value) + if magic != pbufMagic { + return nil, fmt.Errorf("invalid magic number in %q", cell) + } + regInfo := &pb.RegionInfo{} + err := proto.UnmarshalMerge(value[4:len(value)-4], regInfo) + if err != nil { + return nil, fmt.Errorf("failed to decode %q: %s", cell, err) + } + return &Info{ + Table: regInfo.TableName.Qualifier, + RegionName: cell.Row, + StartKey: regInfo.StartKey, + StopKey: regInfo.EndKey, + availableLock: sync.Mutex{}, + }, nil +} + +func (i *Info) Equals(i1 *Info) bool { + if i == nil { + return i1 == nil + } + if i1 == nil { + return false + } + return bytes.Equal(i.Table, i1.Table) && bytes.Equal(i.RegionName, i1.RegionName) && bytes.Equal(i.StartKey, i1.StartKey) && bytes.Equal(i.StopKey, i1.StopKey) +} + +// IsUnavailable returns true if this region has been marked as unavailable. +func (i *Info) IsUnavailable() bool { + i.availableLock.Lock() + res := i.available != nil + i.availableLock.Unlock() + return res +} + +// GetAvailabilityChan returns a channel that can be used to wait on for +// notification that a connection to this region has been reestablished. +// If this region is not marked as unavailable, nil will be returned. +func (i *Info) GetAvailabilityChan() <-chan struct{} { + i.availableLock.Lock() + ch := i.available + i.availableLock.Unlock() + return ch +} + +// MarkUnavailable will mark this region as unavailable, by creating the struct +// returned by GetAvailabilityChan. If this region was marked as available +// before this, true will be returned. +func (i *Info) MarkUnavailable() bool { + created := false + i.availableLock.Lock() + if i.available == nil { + i.available = make(chan struct{}) + created = true + //i.lastUnAvailable, i.curUnAvailable = i.curUnAvailable, time.Now().UnixNano() + i.lastUnAvailable = i.curUnAvailable + i.curUnAvailable = time.Now().UnixNano() + } + i.availableLock.Unlock() + return created +} + +// MarkAvailable will mark this region as available again, by closing the struct +// returned by GetAvailabilityChan +func (i *Info) MarkAvailable() { + i.availableLock.Lock() + ch := i.available + i.available = nil + close(ch) + i.availableLock.Unlock() +} + +func (i *Info) String() string { + return fmt.Sprintf("*regioninfo.Info{Table: %q, RegionName: %q, StartKey: %q, StopKey: %q, lastUnAvailable: %d. curUnAvailable: %d}", + i.Table, i.RegionName, i.StartKey, i.StopKey, i.lastUnAvailable, i.curUnAvailable) +} + +func (i *Info) Park4Establish() { + if i != nil && i.curUnAvailable != 0 && i.lastUnAvailable != 0 && i.curUnAvailable > i.lastUnAvailable && (i.curUnAvailable-i.lastUnAvailable) < minEstablishInterval { + d := time.Duration(minEstablishInterval - i.curUnAvailable + i.lastUnAvailable) + log.Info("park for establish for (%v)", d) + time.Sleep(d) + } +} + +func (i *Info) DupExtInfo(origInfo *Info) { + if i != nil && origInfo != nil { + i.curUnAvailable = origInfo.curUnAvailable + i.lastUnAvailable = origInfo.lastUnAvailable + } +} + +// CompareGeneric is the same thing as Compare but for interface{}. +func CompareGeneric(a, b interface{}) int { + return Compare(a.([]byte), b.([]byte)) +} + +// Compare compares two region names. +// We can't just use bytes.Compare() because it doesn't play nicely +// with the way META keys are built as the first region has an empty start +// key. Let's assume we know about those 2 regions in our cache: +// .META.,,1 +// tableA,,1273018455182 +// We're given an RPC to execute on "tableA", row "\x00" (1 byte row key +// containing a 0). If we use Compare() to sort the entries in the cache, +// when we search for the entry right before "tableA,\000,:" +// we'll erroneously find ".META.,,1" instead of the entry for first +// region of "tableA". +// +// Since this scheme breaks natural ordering, we need this comparator to +// implement a special version of comparison to handle this scenario. +func Compare(a, b []byte) int { + var length int + if la, lb := len(a), len(b); la < lb { + length = la + } else { + length = lb + } + // Reminder: region names are of the form: + // table_name,start_key,timestamp[.MD5.] + // First compare the table names. + var i int + for i = 0; i < length; i++ { + ai := a[i] // Saves one pointer deference every iteration. + bi := b[i] // Saves one pointer deference every iteration. + if ai != bi { + // The name of the tables differ. + if ai == ',' { + return -1001 // `a' has a smaller table name. a < b + } else if bi == ',' { + return 1001 // `b' has a smaller table name. a > b + } + return int(ai) - int(bi) + } + if ai == ',' { + // Remember: at this point ai == bi. + break // We're done comparing the table names. They're equal. + } + } + + // Now find the last comma in both `a' and `b'. We need to start the + // search from the end as the row key could have an arbitrary number of + // commas and we don't know its length. + aComma := findCommaFromEnd(a, i) + bComma := findCommaFromEnd(b, i) + // If either `a' or `b' is followed immediately by another comma, then + // they are the first region (it's the empty start key). + i++ // No need to check against `length', there MUST be more bytes. + + // Compare keys. + var firstComma int + if aComma < bComma { + firstComma = aComma + } else { + firstComma = bComma + } + for ; i < firstComma; i++ { + ai := a[i] + bi := b[i] + if ai != bi { + // The keys differ. + return int(ai) - int(bi) + } + } + if aComma < bComma { + return -1002 // `a' has a shorter key. a < b + } else if bComma < aComma { + return 1002 // `b' has a shorter key. a > b + } + + // Keys have the same length and have compared identical. Compare the + // rest, which essentially means: use start code as a tie breaker. + for ; /*nothing*/ + i < length; i++ { + ai := a[i] + bi := b[i] + if ai != bi { + // The start codes differ. + return int(ai) - int(bi) + } + } + + return len(a) - len(b) +} + +// Because there is no `LastIndexByte()' in the standard `bytes' package. +func findCommaFromEnd(b []byte, offset int) int { + for i := len(b) - 1; i > offset; i-- { + if b[i] == ',' { + return i + } + } + panic(fmt.Errorf("No comma found in %q after offset %d", b, offset)) +} diff --git a/libs/gohbase/regioninfo/info_test.go b/libs/gohbase/regioninfo/info_test.go new file mode 100644 index 0000000..cc9cb2d --- /dev/null +++ b/libs/gohbase/regioninfo/info_test.go @@ -0,0 +1,146 @@ +// Copyright (C) 2015 The GoHBase Authors. All rights reserved. +// This file is part of GoHBase. +// Use of this source code is governed by the Apache License 2.0 +// that can be found in the COPYING file. + +package regioninfo_test + +import ( + "bytes" + "strings" + "testing" + + "bfs/libs/gohbase/pb" + . "bfs/libs/gohbase/regioninfo" + + "github.com/golang/protobuf/proto" +) + +// Test parsing the contents of a cell found in meta. +func TestInfoFromMeta(t *testing.T) { + put := pb.CellType_PUT + regionName := []byte("table,foo,1431921690563.53e41f94d5c3087af0d13259b8c4186d.") + buf := []byte("PBUF\010\303\217\274\251\326)\022\020\n\007default" + + "\022\005table\032\000\"\000(\0000\0008\000") + cell := &pb.Cell{ + Row: regionName, + Family: []byte("info"), + Qualifier: []byte("regioninfo"), + Timestamp: proto.Uint64(1431921690626), + CellType: &put, + } + info, err := InfoFromCell(cell) + if err == nil || !strings.HasPrefix(err.Error(), "empty value") { + t.Errorf("Unexpected error on empty value: %s", err) + } + cell.Value = buf + info, err = InfoFromCell(cell) + if err != nil { + t.Fatalf("Failed to parse cell: %s", err) + } + if !bytes.Equal(info.RegionName, regionName) { + t.Errorf("Unexpected regionName name: %q", info.RegionName) + } + if len(info.StopKey) != 0 { + t.Errorf("Expected empty StopKey but got %q", info.StopKey) + } + + expected := `*regioninfo.Info{Table: "table", RegionName: "table,foo,` + + `1431921690563.53e41f94d5c3087af0d13259b8c4186d.", StopKey: ""}` + if s := info.String(); s != expected { + t.Errorf("Unexpected string representation.\nExpected: %q\n Actual: %q", expected, s) + } + + // Corrupt the protobuf. + buf[4] = 0xFF + _, err = InfoFromCell(cell) + if err == nil || !strings.HasPrefix(err.Error(), "failed to decode") { + t.Errorf("Unexpected error on corrupt protobuf: %s", err) + } + + // Corrupt the magic number. + buf[1] = 0xFF + _, err = InfoFromCell(cell) + if err == nil || !strings.HasPrefix(err.Error(), "invalid magic number") { + t.Errorf("Unexpected error on invalid magic number %s", err) + } + + // Corrupt the magic number (first byte). + buf[0] = 0xFF + _, err = InfoFromCell(cell) + if err == nil || !strings.HasPrefix(err.Error(), "unsupported region info version") { + t.Errorf("Unexpected error on invalid magic number %s", err) + } +} + +func TestCompare(t *testing.T) { + // Test cases from AsyncHBase + testcases := []struct { + a, b []byte // Region names, where a > b + }{{ + // Different table names. + []byte("table,,1234567890"), []byte(".META.,,1234567890"), + }, { + // Different table names but same prefix. + []byte("tabl2,,1234567890"), []byte("tabl1,,1234567890"), + }, { + // Different table names (different lengths). + []byte("table,,1234567890"), []byte("tabl,,1234567890"), + }, { + // Any key is greater than the start key. + []byte("table,foo,1234567890"), []byte("table,,1234567890"), + }, { + // Different keys. + []byte("table,foo,1234567890"), []byte("table,bar,1234567890"), + }, { + // Shorter key is smaller than longer key. + []byte("table,fool,1234567890"), []byte("table,foo,1234567890"), + }, { + // Properly handle keys that contain commas. + []byte("table,a,,c,1234567890"), []byte("table,a,,b,1234567890"), + }, { + // If keys are equal, then start code should break the tie. + []byte("table,foo,1234567891"), []byte("table,foo,1234567890"), + }, { + // Make sure that a start code being a prefix of another is handled. + []byte("table,foo,1234567890"), []byte("table,foo,123456789"), + }, { + // If both are start keys, then start code should break the tie. + []byte("table,,1234567891"), []byte("table,,1234567890"), + }, { + // The value `:' is always greater than any start code. + []byte("table,foo,:"), []byte("table,foo,9999999999"), + }, { + // Issue 27: searching for key "8,\001" and region key is "8". + []byte("table,8,\001,:"), []byte("table,8,1339667458224"), + }} + + for _, tcase := range testcases { + if i := Compare(tcase.a, tcase.b); i <= 0 { + t.Errorf("%q was found to be less than %q (%d)", tcase.a, tcase.b, i) + } + if i := Compare(tcase.b, tcase.a); i >= 0 { + t.Errorf("%q was found to be greater than %q (%d)", tcase.b, tcase.a, i) + } + } + + meta := []byte("hbase:meta,,1") + if i := CompareGeneric(meta, meta); i != 0 { + t.Errorf("%q was found to not be equal to itself (%d)", meta, i) + } +} + +func TestCompareBogusName(t *testing.T) { + defer func() { + expected := `No comma found in "bogus" after offset 5` + v := recover() + if v == nil { + t.Errorf("Should have panic'ed") + } else if e, ok := v.(error); !ok { + t.Errorf("panic'ed with a %T instead of an error (%#v)", v, v) + } else if e.Error() != expected { + t.Errorf("Expected panic(%q) but got %q", expected, e) + } + }() + Compare([]byte("bogus"), []byte("bogus")) +} diff --git a/libs/gohbase/test/test.go b/libs/gohbase/test/test.go new file mode 100644 index 0000000..f49de9f --- /dev/null +++ b/libs/gohbase/test/test.go @@ -0,0 +1,112 @@ +// Copyright (C) 2015 The GoHBase Authors. All rights reserved. +// This file is part of GoHBase. +// Use of this source code is governed by the Apache License 2.0 +// that can be found in the COPYING file. + +package test + +import ( + "bytes" + "context" + "errors" + "io" + "os" + "os/exec" + "path" + "strings" + + "bfs/libs/gohbase" + "bfs/libs/gohbase/hrpc" +) + +// This error is returned when the HBASE_HOME environment variable is unset +var errHomeUnset = errors.New("Environment variable HBASE_HOME is not set") + +// getShellCmd returns a new shell subprocess (already started) along with its +// stdin +func getShellCmd() (*exec.Cmd, io.WriteCloser, error) { + hbaseHome := os.Getenv("HBASE_HOME") + if len(hbaseHome) == 0 { + return nil, nil, errHomeUnset + } + hbaseShell := path.Join(hbaseHome, "bin", "hbase") + cmd := exec.Command(hbaseShell, "shell") + stdin, err := cmd.StdinPipe() + if err != nil { + return nil, nil, err + } + + err = cmd.Start() + if err != nil { + stdin.Close() + return nil, nil, err + } + return cmd, stdin, nil +} + +// CreateTable finds the HBase shell via the HBASE_HOME environment variable, +// and creates the given table with the given families +func CreateTable(host, table string, cFamilies []string) error { + // If the table exists, delete it + DeleteTable(host, table) + // Don't check the error, since one will be returned if the table doesn't + // exist + + cmd, stdin, err := getShellCmd() + if err != nil { + return err + } + + var buf bytes.Buffer + buf.WriteString("create '" + table + "'") + + for _, f := range cFamilies { + buf.WriteString(", '") + buf.WriteString(f) + buf.WriteString("'") + } + buf.WriteString("\n") + + stdin.Write(buf.Bytes()) + stdin.Write([]byte("exit\n")) + + return cmd.Wait() +} + +// DeleteTable finds the HBase shell via the HBASE_HOME environment variable, +// and disables and drops the given table +func DeleteTable(host, table string) error { + // TODO: We leak this client. + ac := gohbase.NewAdminClient(host) + dit := hrpc.NewDisableTable(context.Background(), []byte(table)) + _, err := ac.DisableTable(dit) + if err != nil { + if !strings.Contains(err.Error(), "TableNotEnabledException") { + return err + } + } + + det := hrpc.NewDeleteTable(context.Background(), []byte(table)) + _, err = ac.DeleteTable(det) + if err != nil { + return err + } + return nil +} + +// LaunchRegionServers uses the script local-regionservers.sh to create new +// RegionServers. Fails silently if server already exists. +// Ex. LaunchRegions([]string{"2", "3"}) launches two servers with id=2,3 +func LaunchRegionServers(servers []string) { + hh := os.Getenv("HBASE_HOME") + servers = append([]string{"start"}, servers...) + exec.Command(hh+"/bin/local-regionservers.sh", servers...).Run() +} + +// StopRegionServers uses the script local-regionservers.sh to stop existing +// RegionServers. Fails silently if server isn't running. +func StopRegionServers(servers []string) { + hh := os.Getenv("HBASE_HOME") + servers = append([]string{"stop"}, servers...) + exec.Command(hh+"/bin/local-regionservers.sh", servers...).Run() +} diff --git a/libs/gohbase/tools/test_get/main.go b/libs/gohbase/tools/test_get/main.go new file mode 100644 index 0000000..b9cde36 --- /dev/null +++ b/libs/gohbase/tools/test_get/main.go @@ -0,0 +1,74 @@ +package main + +import ( + "context" + "flag" + "fmt" + "strings" + "time" + + "bfs/libs/gohbase" + "bfs/libs/gohbase/conf" + "bfs/libs/gohbase/hrpc" +) + +var ( + zkStr string + table string + key string +) + +func init() { + flag.StringVar(&zkStr, "zk", "", ", joined zk hosts") + flag.StringVar(&table, "table", "", "table name") + flag.StringVar(&key, "key", "", "key to get") +} + +func main() { + flag.Parse() + fmt.Printf("do test_get for zk(%s), table(%s), key:(%s)\n", zkStr, table, key) + var ( + get *hrpc.Get + err error + res *hrpc.Result + st, et int64 + ) + zks := strings.Split(zkStr, ",") + c := gohbase.NewClient(conf.NewConf(zks, "", "", "", 30*time.Second, 0, 0, 0)) + if c == nil { + fmt.Printf("new client get nil client for zks: (%v)\n", zks) + return + } + if get, err = hrpc.NewGetStr(context.Background(), table, key); err != nil { + fmt.Printf("new get met error: (%v)\n", err) + return + } + st = time.Now().UnixNano() + if res, err = c.Get(get); err != nil { + fmt.Printf("get met error: (%v)\n", err) + return + } else { + for _, cell := range res.Cells { + fmt.Printf("%s-%s-%s: %s;", string(cell.Row), string(cell.Family), string(cell.Qualifier), string(cell.Value)) + fmt.Println("") + } + } + et = time.Now().UnixNano() + fmt.Printf("get (%s) from (%s) cost %d ns (%d ms)\n", table, key, et-st, (et-st)/1000000) + + for _, n := range []int{10, 1000} { + st = time.Now().UnixNano() + for i := 0; i < n; i++ { + if get, err = hrpc.NewGetStr(context.Background(), table, key); err != nil { + fmt.Printf("new get met error: (%v)\n", err) + return + } + if res, err = c.Get(get); err != nil { + fmt.Printf("get met error: (%v)\n", err) + return + } + } + et = time.Now().UnixNano() + fmt.Printf("get (%s) from (%s) for %d times cost %d ns (%d ms)\n", table, key, n, et-st, (et-st)/1000000) + } +} diff --git a/libs/gohbase/tools/test_get_dapper/main.go b/libs/gohbase/tools/test_get_dapper/main.go new file mode 100644 index 0000000..4dd11c6 --- /dev/null +++ b/libs/gohbase/tools/test_get_dapper/main.go @@ -0,0 +1,68 @@ +package main + +import ( + "context" + "fmt" + "strings" + "time" + + "bfs/libs/gohbase" + "bfs/libs/gohbase/conf" + "bfs/libs/gohbase/hrpc" +) + +var ( + zkStr string = "172.18.4.117:2181,172.18.4.118:2181,172.18.4.119:2181" + testTable string = "test" + spanTable string = "dapper_origin_v1" + testKey string = "test" + spanKey string = "13794899398466741090" + tables = []string{testTable, spanTable} + keys = []string{testKey, spanKey} +) + +func main() { + var ( + get *hrpc.Get + err error + res *hrpc.Result + st, et int64 + ) + zks := strings.Split(zkStr, ",") + c := gohbase.NewClient(conf.NewConf(zks, "", "", "", 30*time.Second, 0, 0, 0)) + if c == nil { + fmt.Printf("new client get nil client for zks: (%v)\n", zks) + return + } + var counter = 0 + for { + time.Sleep(1 * time.Second) + var i int = 0 + for ; i < 2; i++ { + var key = keys[i] + var table = tables[i] + if get, err = hrpc.NewGetStr(context.Background(), table, key); err != nil { + fmt.Printf("new get met error: (%v)\n", err) + continue + } + st = time.Now().UnixNano() + if res, err = c.Get(get); err != nil { + fmt.Printf("get met error: (%v)\n", err) + continue + } else { + for _, cell := range res.Cells { + fmt.Sprintf("%s-%s-%s: %s;", string(cell.Row), string(cell.Family), string(cell.Qualifier), string(cell.Value)) + } + } + et = time.Now().UnixNano() + if (et-st)/1000000000 > 1 { + fmt.Printf("get (%s) from (%s) cost %d ns (%d ms)\n", table, key, et-st, (et-st)/1000000) + } + } + counter += 1 + if counter > 30 { + fmt.Printf("time: (%v), done (%d) loops\n", time.Now(), counter) + counter = 0 + } + } +} diff --git a/libs/gohbase/zk/client.go b/libs/gohbase/zk/client.go new file mode 100644 index 0000000..060dc3c --- /dev/null +++ b/libs/gohbase/zk/client.go @@ -0,0 +1,317 @@ +// Copyright (C) 2015 The GoHBase Authors. All rights reserved. +// This file is part of GoHBase. +// Use of this source code is governed by the Apache License 2.0 +// that can be found in the COPYING file. + +// Package zk encapsulates our interactions with ZooKeeper. +package zk + +import ( + "encoding/binary" + "time" + + log "github.com/golang/glog" + + "bfs/libs/gohbase/pb" + "path" + "sync" + + "github.com/golang/protobuf/proto" + "github.com/samuel/go-zookeeper/zk" +) + +const ( + ResourceTypeMaster = iota + ResourceTypeMeta = iota + resourceCount = iota +) + +const ( + ServerStateUp int = 1 + ServerStateDown int = 2 +) + +// ResourceName is a type alias that is used to represent different resources +// in ZooKeeper +type ResourceName string + +var ( + // Meta is a ResourceName that indicates that the location of the Meta + // table is what will be fetched + + // Master is a ResourceName that indicates that the location of the Master + // server is what will be fetched + + defaultNames = [resourceCount]string{ + "/hbase/master", + "/hbase/meta-region-server", + } +) + +type ServerInfo struct { + Host string + Port uint16 +} + +type serverInfo struct { + Host string + Port uint16 + State int + ChangeCount int64 + UpdateTime time.Time +} + +func (ms *serverInfo) Valid() bool { + return ms != nil && ms.Host != "" && ms.Port > 0 && ms.State == ServerStateUp +} + +func (ms *serverInfo) ServerInfo() (res *ServerInfo) { + if ms.Valid() { + res = &ServerInfo{ + Host: ms.Host, + Port: ms.Port, + } + } + return +} + +func (ms *serverInfo) Disable() (changed bool) { + changed = ms.Valid() // only changes when original state is valid + ms.ChangeCount += 1 + ms.UpdateTime = time.Now() + ms.State = ServerStateDown + return +} + +func (ms *serverInfo) Clear() { + ms.ChangeCount += 1 + ms.UpdateTime = time.Now() + ms.State = ServerStateDown + ms.Host = "" + ms.Port = 0 +} + +func (ms *serverInfo) Update(newMS *serverInfo) (changed bool) { + ms.UpdateTime = newMS.UpdateTime + changed = !ms.Equals(newMS) + if changed { + ms.ChangeCount += 1 + } + ms.Host = newMS.Host + ms.Port = newMS.Port + ms.State = newMS.State + return +} + +func (ms *serverInfo) Equals(newMS *serverInfo) bool { + return ms.Host == newMS.Host && ms.Port == newMS.Port && ms.State == newMS.State +} + +type ServerWatcher interface { + // SetServer implementation should ensure it will not block + SetServer(resourceType int, ms *ServerInfo) +} + +type ZKClient struct { + quorum []string + conn *zk.Conn + + watchersLock *sync.Mutex + serverWatchers [resourceCount][]ServerWatcher + watchStopChan [resourceCount]chan struct{} // buf-1 ensures send will not block + resources [resourceCount]string + serverInfos [resourceCount]*serverInfo +} + +func NewZKClient(zks []string, zkRoot, master, meta string, useMaster, useMeta bool, sessionTimeout time.Duration) (res *ZKClient, err error) { + c := &ZKClient{ + quorum: zks, + watchersLock: &sync.Mutex{}, + } + if master == "" { + master = defaultNames[ResourceTypeMaster] + } + if meta == "" { + meta = defaultNames[ResourceTypeMeta] + } + if zkRoot != "" { + master = path.Join(zkRoot, master) + meta = path.Join(zkRoot, meta) + } + c.resources[ResourceTypeMaster] = master + c.resources[ResourceTypeMeta] = meta + conn, _, err := zk.Connect(c.quorum, sessionTimeout) + if err != nil { + return // XXX + } + c.conn = conn + for i := 0; i < resourceCount; i++ { + c.watchStopChan[i] = make(chan struct{}, 1) + c.serverInfos[i] = &serverInfo{} + } + err = c.watchServer(useMaster, useMeta) + if err != nil { + return + conn.Close() + } + res = c + return +} + +func (c *ZKClient) watchServer(useMaster, useMeta bool) (err error) { + wg := &sync.WaitGroup{} + wg.Add(resourceCount) + for i := 0; i < resourceCount; i++ { + if (i == ResourceTypeMaster && !useMaster) || (i == ResourceTypeMeta && !useMeta) { + wg.Done() + continue + } + go func(resourceType int, wg *sync.WaitGroup) { + for { + path := c.resources[resourceType] + buf, _, evCh, getErr := c.conn.GetW(path) + sleep := int64(0) + curServerInfo := c.serverInfos[resourceType] + if getErr != nil { + log.Errorf("c.conn.GetW(%s) failed, err is (%v)", path, getErr) + if wg != nil { + err = getErr + wg.Done() + return + } + sleep = 1 + // XXX + } else { + var changed bool + newServerInfo := serverInfoFromContent(buf, resourceType) + if newServerInfo == nil { + log.Errorf("serverInfoFromContent(%v, %d) return nil serverInfo", buf, resourceType) + changed = curServerInfo.Disable() + } else { + changed = curServerInfo.Update(newServerInfo) + } + if changed { + log.Info("server %d change to (%v) as newServerInfo: (%v)", resourceType, curServerInfo, newServerInfo) + for _, watcher := range c.serverWatchers[resourceType] { + watcher.SetServer(resourceType, curServerInfo.ServerInfo()) + } + } + if wg != nil { + wg.Done() + } + for ev := range evCh { + log.Info("resourceType %d receive zk event %v", resourceType, ev) + curServerInfo = c.serverInfos[resourceType] + switch ev.Type { + case zk.EventNodeCreated: + fallthrough + case zk.EventNodeDataChanged: + buf, _, getErr = c.conn.Get(path) + if getErr != nil { + log.Error("failed to get (%s) from zk after event (%v)", path, ev.Type) + continue // XXX + } + newServerInfo := serverInfoFromContent(buf, resourceType) + if newServerInfo == nil { + changed = curServerInfo.Disable() + } else { + changed = curServerInfo.Update(newServerInfo) + } + case zk.EventNodeDeleted: + changed = curServerInfo.Disable() + default: + log.Info("resource type %d receives event %d from zk", resourceType, ev.Type) + } + if changed { + log.Info("server %d change to (%v) as event %v", resourceType, curServerInfo, ev) + for _, watcher := range c.serverWatchers[resourceType] { + watcher.SetServer(resourceType, curServerInfo.ServerInfo()) + } + } + } + log.Warning("evCh is closed!") + } + select { + case <-c.watchStopChan[resourceType]: + log.Info("quit watch for resource type %d as receive signal from stop chan", resourceType) + return + default: + wg = nil + if sleep > 0 { + time.Sleep(time.Duration(sleep) * time.Second) + } + continue + } + } + }(i, wg) + } + wg.Wait() + return +} + +func (c *ZKClient) WatchServer(serverType int, watcher ServerWatcher) { + if serverType < 0 || serverType >= resourceCount || watcher == nil { + return + } + c.watchersLock.Lock() + c.serverWatchers[serverType] = append(c.serverWatchers[serverType], watcher) + c.watchersLock.Unlock() +} + +// LocateResource returns the location of the specified resource. +func (c *ZKClient) LocateResource(resourceType int) (res *ServerInfo) { + if resourceType < 0 || resourceType >= resourceCount { + return + } + res = c.serverInfos[resourceType].ServerInfo() + log.Infof("LocateResource(%d), return %v", resourceType, res) + return +} + +func serverInfoFromContent(buf []byte, resourceType int) (res *serverInfo) { + if len(buf) == 0 { + log.Errorf("%d was empty!", resourceType) + return + } else if buf[0] != 0xFF { + log.Errorf("The first byte of %d was 0x%x, not 0xFF", resourceType, buf[0]) + return + } + metadataLen := binary.BigEndian.Uint32(buf[1:]) + if metadataLen < 1 || metadataLen > 65000 { + log.Error("Invalid metadata length for %d: %d", resourceType, metadataLen) + return + } + buf = buf[1+4+metadataLen:] + magic := binary.BigEndian.Uint32(buf) + const pbufMagic = 1346524486 // 4 bytes: "PBUF" + if magic != pbufMagic { + log.Error("Invalid magic number for %d: %d", resourceType, magic) + return + } + buf = buf[4:] + var server *pb.ServerName + if resourceType == ResourceTypeMeta { + meta := &pb.MetaRegionServer{} + err := proto.UnmarshalMerge(buf, meta) + if err != nil { + log.Error("Failed to deserialize the MetaRegionServer entry from ZK: %s", err) + return + } + server = meta.Server + } else { + master := &pb.Master{} + err := proto.UnmarshalMerge(buf, master) + if err != nil { + log.Error("Failed to deserialize the Master entry from ZK: %s", err) + return + } + server = master.Master + } + res = &serverInfo{ + Host: *server.HostName, + Port: uint16(*server.Port), + UpdateTime: time.Now(), + State: ServerStateUp, + } + return +} diff --git a/libs/memcache/gomemcache/README.md b/libs/memcache/gomemcache/README.md deleted file mode 100644 index 16e73ea..0000000 --- a/libs/memcache/gomemcache/README.md +++ /dev/null @@ -1,33 +0,0 @@ -gomemcache -====== - -gomemcache is a [Golang](http://golang.org/) client for the [memcache](http://www.memcached.org/) database. - -Documentation -------------- - -- [API Reference](http://godoc.org/github.com/Terry-Mao/gomemcache/memcache) - -Installation ------------- - -Install gomemcache using the "go get" command: - - go get github.com/Terry-Mao/gomemcache/memcache - -The Go distribution is gomemcache's only dependency. - -Contributing ------------- - -Contributions are welcome. - -Before writing code, send mail to iammao@vip.qq.com to discuss what you -plan to do. This gives me a chance to validate the design, avoid duplication of -effort and ensure that the changes fit the goals of the project. Do not start -the discussion with a pull request. - -Reference -------- - -[Redigo](https://github.com/garyburd/redigo) diff --git a/libs/memcache/memcache.go b/libs/memcache/memcache.go index 1fbcef9..6632379 100644 --- a/libs/memcache/memcache.go +++ b/libs/memcache/memcache.go @@ -5,7 +5,7 @@ import ( xtime "bfs/libs/time" - "libs/memcache/gomemcache/memcache" + "bfs/libs/memcache/gomemcache/memcache" ) const ( @@ -102,8 +102,13 @@ func (c *Conn) Get(cmd string, cb func(*memcache.Reply), keys ...string) (err er } // Get2 sends a command to the server for gets data. -func (c *Conn) Get2(cmd string, key string) (res *memcache.Reply, err error) { +func (c *Conn) Get2(cmd string, key string) (bs []byte, err error) { + var res *memcache.Reply res, err = c.c.Get(cmd, key) + if err != nil { + return + } + bs = res.Value return } diff --git a/proxy/bfs/bfs.go b/proxy/bfs/bfs.go index d6b2205..2c9f544 100644 --- a/proxy/bfs/bfs.go +++ b/proxy/bfs/bfs.go @@ -1,6 +1,9 @@ package bfs import ( + "bfs/libs/errors" + "bfs/libs/meta" + "bfs/proxy/conf" "bytes" "encoding/json" "fmt" @@ -15,10 +18,6 @@ import ( "strings" "time" - "bfs/libs/errors" - "bfs/libs/meta" - "bfs/proxy/conf" - itime "github.com/Terry-Mao/marmot/time" log "github.com/golang/glog" ) diff --git a/proxy/bucket/bucket.go b/proxy/bucket/bucket.go index 56213ac..dd35704 100644 --- a/proxy/bucket/bucket.go +++ b/proxy/bucket/bucket.go @@ -68,7 +68,7 @@ func New() (b *Bucket, err error) { item.property = _privateWrite item.KeyId = "221bce6492eba70f" item.KeySecret = "6eb80603e85842542f9736eb13b7e3" - item.PurgeCDN = false + item.PurgeCDN = true b.data[item.Name] = item return } diff --git a/proxy/cache/cache.go b/proxy/cache/cache.go index 906c4e5..43744b3 100644 --- a/proxy/cache/cache.go +++ b/proxy/cache/cache.go @@ -9,7 +9,6 @@ import ( "bfs/libs/meta" log "github.com/golang/glog" - gm "golang/gomemcache/memcache" ) // Cache proxy cache. @@ -127,14 +126,12 @@ func (c *Cache) set(key string, bs []byte, expire int32) (err error) { func (c *Cache) get(key string) (bs []byte, err error) { var ( - conn = c.mc.Get() - reply *gm.Reply + conn = c.mc.Get() ) defer conn.Close() - if reply, err = conn.Get2("get", key); err != nil { + if bs, err = conn.Get2("get", key); err != nil { return } - bs = reply.Value return } diff --git a/proxy/conf/config.go b/proxy/conf/config.go index 4b25c6e..78d12be 100644 --- a/proxy/conf/config.go +++ b/proxy/conf/config.go @@ -32,6 +32,8 @@ type Config struct { // qcloud QcloudKeyID string QcloudKeySecret string + // ats server list + Ats *Ats // purge channel PurgeMaxSize int // memcache @@ -41,6 +43,10 @@ type Config struct { Limit *Limit } +type Ats struct { + AtsServerList []string +} + // Limit limit rate type Limit struct { Rate float64 diff --git a/proxy/http_api.go b/proxy/http_api.go index 5c24f7c..6aeff9d 100644 --- a/proxy/http_api.go +++ b/proxy/http_api.go @@ -1,6 +1,12 @@ package main import ( + "bfs/libs/errors" + "bfs/proxy/auth" + "bfs/proxy/bfs" + ibucket "bfs/proxy/bucket" + "bfs/proxy/cdn" + "bfs/proxy/conf" "crypto/sha1" "encoding/hex" "encoding/json" @@ -14,12 +20,6 @@ import ( "strings" "time" - "bfs/libs/errors" - "bfs/proxy/auth" - "bfs/proxy/bfs" - ibucket "bfs/proxy/bucket" - "bfs/proxy/conf" - log "github.com/golang/glog" ) @@ -36,6 +36,7 @@ type server struct { bfs *bfs.Bfs bucket *ibucket.Bucket auth *auth.Auth + cdn *cdn.CDN c *conf.Config srv *Service } @@ -53,6 +54,9 @@ func StartAPI(c *conf.Config) (err error) { if s.auth, err = auth.New(c); err != nil { return } + if s.cdn, err = cdn.New(c); err != nil { + return + } go func() { mux := http.NewServeMux() mux.HandleFunc("/", s.do) @@ -222,7 +226,7 @@ func (s *server) download(item *ibucket.Item, bucket, file string, wr http.Respo } else { if err == errors.ErrNeedleNotExist { status = http.StatusNotFound - } else if err == errors.ErrStoreNotAvailable { + } else if err == errors.ErrStoreNotAvailable || err == errors.ErrServiceUnavailable { status = http.StatusServiceUnavailable } else { status = http.StatusInternalServerError @@ -241,6 +245,7 @@ func retCode(wr http.ResponseWriter, status *int) { func (s *server) upload(item *ibucket.Item, bucket, file string, wr http.ResponseWriter, r *http.Request) { var ( ok bool + nofile bool body []byte mine string location string @@ -281,6 +286,7 @@ func (s *server) upload(item *ibucket.Item, bucket, file string, wr http.Respons sha1sum = hex.EncodeToString(sha[:]) // if empty filename or endwith "/": dir if file == "" || strings.HasSuffix(file, "/") { + nofile = true file += sha1sum + "." + ext } if err = s.srv.Upload(bucket, file, mine, sha1sum, body); err != nil && err != errors.ErrNeedleExist { @@ -292,6 +298,10 @@ func (s *server) upload(item *ibucket.Item, bucket, file string, wr http.Respons return } location = s.getURI(bucket, file) + // if upload without filename, file context may be same, no need refresh cdn + if err == errors.ErrNeedleExist && !nofile { + s.cdn.Push(bucket, location, item.PurgeCDN) + } wr.Header().Set("Location", location) wr.Header().Set("ETag", sha1sum) return @@ -320,6 +330,7 @@ func (s *server) delete(item *ibucket.Item, bucket, file string, wr http.Respons } } else { wr.Header().Set("Code", strconv.Itoa(status)) + s.cdn.Push(bucket, s.getURI(bucket, file), item.PurgeCDN) } return } diff --git a/proxy/main.go b/proxy/main.go index e06b92e..9330f8a 100644 --- a/proxy/main.go +++ b/proxy/main.go @@ -1,14 +1,13 @@ package main import ( + "bfs/proxy/conf" "flag" "os" "os/signal" "runtime" "syscall" - "bfs/proxy/conf" - log "github.com/golang/glog" ) diff --git a/proxy/proxy.toml b/proxy/proxy.toml index 7d4912a..5704cff 100644 --- a/proxy/proxy.toml +++ b/proxy/proxy.toml @@ -27,12 +27,17 @@ rate = 150.0 Brust = 50 [mc] -name = "kvo" +name = "bfs" proto = "tcp" -addr = "172.16.33.54:11213" +addr = "localhost:11213" idle = 5 active = 10 dialTimeout = "1s" readTimeout = "1s" writeTimeout = "1s" idleTimeout = "80s" + +[Ats] +AtsServerList = [ "http://localhost:8080" ] + +